summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--jstests/concurrency/fsm_libs/cluster.js2
-rw-r--r--jstests/core/dbcase.js2
-rw-r--r--jstests/core/dropdb.js2
-rw-r--r--jstests/core/startup_log.js6
-rw-r--r--jstests/fail_point/set_failpoint_through_set_parameter.js2
-rw-r--r--jstests/gle/get_last_error.js10
-rw-r--r--jstests/noPassthrough/drop_oplog_should_fail_if_storage_engine_supports_replSetResizeOplog_command.js6
-rw-r--r--jstests/noPassthrough/indexbg_drop.js18
-rw-r--r--jstests/noPassthrough/minvalid.js6
-rw-r--r--jstests/noPassthrough/noncapped_oplog_creation.js2
-rw-r--r--jstests/noPassthrough/wt_delayed_secondary_read_concern_majority.js6
-rw-r--r--jstests/noPassthroughWithMongod/indexbg_interrupts.js14
-rw-r--r--jstests/noPassthroughWithMongod/sharding_rs_arb1.js4
-rw-r--r--jstests/noPassthroughWithMongod/ttl_repl_secondary_disabled.js22
-rw-r--r--jstests/replsets/apply_batch_only_goes_forward.js30
-rw-r--r--jstests/replsets/apply_ops_wc.js4
-rw-r--r--jstests/replsets/auth2.js9
-rw-r--r--jstests/replsets/auth_no_pri.js4
-rw-r--r--jstests/replsets/commands_that_write_accept_wc.js4
-rw-r--r--jstests/replsets/dbcheck.js74
-rw-r--r--jstests/replsets/drop_oplog.js6
-rw-r--r--jstests/replsets/fsync_lock_read_secondaries.js2
-rw-r--r--jstests/replsets/groupAndMapReduce.js15
-rw-r--r--jstests/replsets/index_delete.js32
-rw-r--r--jstests/replsets/initial_sync_move_forward.js18
-rw-r--r--jstests/replsets/initial_sync_with_write_load.js10
-rw-r--r--jstests/replsets/maintenance.js12
-rw-r--r--jstests/replsets/no_chaining.js8
-rw-r--r--jstests/replsets/optime.js24
-rw-r--r--jstests/replsets/rename_collection_temp.js8
-rw-r--r--jstests/replsets/replset8.js14
-rw-r--r--jstests/replsets/replsetadd_profile.js8
-rw-r--r--jstests/replsets/replsetarb2.js22
-rw-r--r--jstests/replsets/replsethostnametrim.js8
-rw-r--r--jstests/replsets/replsetprio1.js20
-rw-r--r--jstests/replsets/retryable_commit_transaction_after_failover.js2
-rw-r--r--jstests/replsets/rollback_creates_rollback_directory.js16
-rw-r--r--jstests/replsets/server8070.js26
-rw-r--r--jstests/replsets/slave_delay_clean_shutdown.js8
-rw-r--r--jstests/replsets/sync2.js12
-rw-r--r--jstests/replsets/tags2.js16
-rw-r--r--jstests/replsets/tags_with_reconfig.js16
-rw-r--r--jstests/replsets/user_management_wc.js6
-rw-r--r--jstests/sharding/addshard4.js4
-rw-r--r--jstests/sharding/auth.js6
-rw-r--r--jstests/sharding/count_slaveok.js6
-rw-r--r--jstests/sharding/cursor_valid_after_shard_stepdown.js2
-rw-r--r--jstests/sharding/read_pref.js4
-rw-r--r--jstests/sharding/recovering_slaveok.js4
-rw-r--r--jstests/sharding/shard_insert_getlasterror_w2.js2
-rw-r--r--jstests/sharding/warm_up_connection_pool.js14
-rw-r--r--jstests/slow1/replsets_priority1.js26
52 files changed, 301 insertions, 303 deletions
diff --git a/jstests/concurrency/fsm_libs/cluster.js b/jstests/concurrency/fsm_libs/cluster.js
index 4085a369152..b33f5f465c4 100644
--- a/jstests/concurrency/fsm_libs/cluster.js
+++ b/jstests/concurrency/fsm_libs/cluster.js
@@ -554,8 +554,6 @@ var Cluster = function(options) {
replSets.forEach(rst => {
var startTime = Date.now();
var res;
-
- // Use '_master' instead of getPrimary() to avoid the detection of a new primary.
var primary = rst.getPrimary();
if (shouldCheckDBHashes) {
diff --git a/jstests/core/dbcase.js b/jstests/core/dbcase.js
index 73b3b34e97a..1bcab6bb132 100644
--- a/jstests/core/dbcase.js
+++ b/jstests/core/dbcase.js
@@ -20,7 +20,7 @@ b.dropDatabase();
ai = db.getMongo().getDBNames().indexOf(a.getName());
bi = db.getMongo().getDBNames().indexOf(b.getName());
-// One of these dbs may exist if there is a slave active, but they must
+// One of these dbs may exist if there is a secondary active, but they must
// not both exist.
assert(ai == -1 || bi == -1);
printjson(db.getMongo().getDBs().databases);
diff --git a/jstests/core/dropdb.js b/jstests/core/dropdb.js
index bf3c5d18b34..8b60336b185 100644
--- a/jstests/core/dropdb.js
+++ b/jstests/core/dropdb.js
@@ -1,5 +1,5 @@
// Test that a db does not exist after it is dropped.
-// Disabled in the small oplog suite because the slave may create a master db
+// Disabled in the small oplog suite because the secondary may create a primary db
// with the same name as the dropped db when requesting a clone.
m = db.getMongo();
diff --git a/jstests/core/startup_log.js b/jstests/core/startup_log.js
index 6f13bada629..7fda5f35b87 100644
--- a/jstests/core/startup_log.js
+++ b/jstests/core/startup_log.js
@@ -68,7 +68,7 @@ var buildinfo = db.runCommand("buildinfo");
delete buildinfo.ok; // Delete extra meta info not in startup_log
delete buildinfo.operationTime; // Delete extra meta info not in startup_log
delete buildinfo.$clusterTime; // Delete extra meta info not in startup_log
-var isMaster = db._adminCommand("ismaster");
+var hello = db._adminCommand("hello");
// Test buildinfo has the expected keys
var expectedKeys = [
@@ -107,7 +107,7 @@ assert.eq(
version, versionArrayCleaned.join('.'), "version doesn't match that from the versionArray");
var jsEngine = latestStartUpLog.buildinfo.javascriptEngine;
assert((jsEngine == "none") || jsEngine.startsWith("mozjs"));
-assert.eq(isMaster.maxBsonObjectSize,
+assert.eq(hello.maxBsonObjectSize,
latestStartUpLog.buildinfo.maxBsonObjectSize,
- "maxBsonObjectSize doesn't match one from ismaster");
+ "maxBsonObjectSize doesn't match one from hello");
})();
diff --git a/jstests/fail_point/set_failpoint_through_set_parameter.js b/jstests/fail_point/set_failpoint_through_set_parameter.js
index d35ea87e240..4f8c311592b 100644
--- a/jstests/fail_point/set_failpoint_through_set_parameter.js
+++ b/jstests/fail_point/set_failpoint_through_set_parameter.js
@@ -7,7 +7,7 @@
"use strict";
var assertStartupSucceeds = function(conn) {
- assert.commandWorked(conn.adminCommand({ismaster: 1}));
+ assert.commandWorked(conn.adminCommand({hello: 1}));
};
var assertStartupFails = function(conn) {
diff --git a/jstests/gle/get_last_error.js b/jstests/gle/get_last_error.js
index 9309ac2921d..39c6ce93303 100644
--- a/jstests/gle/get_last_error.js
+++ b/jstests/gle/get_last_error.js
@@ -5,13 +5,13 @@ var replTest =
new ReplSetTest({name: name, oplogSize: 1, nodes: 3, settings: {chainingAllowed: false}});
var nodes = replTest.startSet();
replTest.initiate();
-var master = replTest.getPrimary();
-var mdb = master.getDB("test");
+var primary = replTest.getPrimary();
+var mdb = primary.getDB("test");
// synchronize replication
assert.commandWorked(mdb.foo.insert({_id: "1"}, {writeConcern: {w: 3, wtimeout: 5 * 60 * 1000}}));
-var gle = master.getDB("test").runCommand({getLastError: 1, j: true});
+var gle = primary.getDB("test").runCommand({getLastError: 1, j: true});
print('Trying j=true');
printjson(gle);
if (gle.err === null) {
@@ -48,8 +48,8 @@ assert.eq(gle.wtimeout, null);
// take a node down and GLE for more nodes than are up
replTest.stop(2);
-master = replTest.getPrimary();
-mdb = master.getDB("test");
+primary = replTest.getPrimary();
+mdb = primary.getDB("test");
// do w:2 write so secondary is caught up before calling {gle w:3}.
assert.commandWorked(mdb.foo.insert({_id: "3"}, {writeConcern: {w: 2, wtimeout: 5 * 60 * 1000}}));
gle = mdb.getLastErrorObj(3, 1000);
diff --git a/jstests/noPassthrough/drop_oplog_should_fail_if_storage_engine_supports_replSetResizeOplog_command.js b/jstests/noPassthrough/drop_oplog_should_fail_if_storage_engine_supports_replSetResizeOplog_command.js
index 66912aa4551..20d4b93049b 100644
--- a/jstests/noPassthrough/drop_oplog_should_fail_if_storage_engine_supports_replSetResizeOplog_command.js
+++ b/jstests/noPassthrough/drop_oplog_should_fail_if_storage_engine_supports_replSetResizeOplog_command.js
@@ -20,8 +20,8 @@
load("jstests/libs/storage_engine_utils.js");
// Start a standalone node.
-let master = MongoRunner.runMongod();
-let localDB = master.getDB('local');
+let primary = MongoRunner.runMongod();
+let localDB = primary.getDB('local');
// Standalone nodes don't start with an oplog; create one. The size of the oplog doesn't
// matter. We are capping the oplog because some storage engines do not allow the creation
@@ -36,5 +36,5 @@ if (storageEngineIsWiredTiger()) {
assert.commandWorked(localDB.runCommand({drop: 'oplog.rs'}));
}
-MongoRunner.stopMongod(master);
+MongoRunner.stopMongod(primary);
}()); \ No newline at end of file
diff --git a/jstests/noPassthrough/indexbg_drop.js b/jstests/noPassthrough/indexbg_drop.js
index 0cd991ab5c0..c7ef4245fc5 100644
--- a/jstests/noPassthrough/indexbg_drop.js
+++ b/jstests/noPassthrough/indexbg_drop.js
@@ -28,19 +28,19 @@ const replTest = new ReplSetTest({
const nodes = replTest.startSet();
replTest.initiate();
-var master = replTest.getPrimary();
+var primary = replTest.getPrimary();
var second = replTest.getSecondary();
-var masterDB = master.getDB(dbname);
+var primaryDB = primary.getDB(dbname);
var secondDB = second.getDB(dbname);
var dc = {dropIndexes: collection, index: "i_1"};
// Setup collections.
-masterDB.dropDatabase();
+primaryDB.dropDatabase();
jsTest.log("Creating test data " + size + " documents");
Random.setRandomSeed();
-var bulk = masterDB.getCollection(collection).initializeUnorderedBulkOp();
+var bulk = primaryDB.getCollection(collection).initializeUnorderedBulkOp();
for (i = 0; i < size; ++i) {
bulk.insert({i: Random.rand()});
}
@@ -52,22 +52,22 @@ assert.commandWorked(
jsTest.log("Starting background indexing for test of: " + tojson(dc));
// Add another index to be sure the drop command works.
-masterDB.getCollection(collection).ensureIndex({b: 1});
-masterDB.getCollection(collection).ensureIndex({i: 1}, {background: true});
+primaryDB.getCollection(collection).ensureIndex({b: 1});
+primaryDB.getCollection(collection).ensureIndex({i: 1}, {background: true});
// Make sure the index build has started on the secondary.
IndexBuildTest.waitForIndexBuildToStart(secondDB);
jsTest.log("Dropping indexes");
-masterDB.runCommand({dropIndexes: collection, index: "*"});
+primaryDB.runCommand({dropIndexes: collection, index: "*"});
jsTest.log("Waiting on replication");
assert.commandWorked(
secondDB.adminCommand({configureFailPoint: "hangAfterStartingIndexBuild", mode: "off"}));
replTest.awaitReplication();
-print("Index list on master:");
-masterDB.getCollection(collection).getIndexes().forEach(printjson);
+print("Index list on primary:");
+primaryDB.getCollection(collection).getIndexes().forEach(printjson);
// Need to assert.soon because the drop only marks the index for removal
// the removal itself is asynchronous and may take another moment before it happens.
diff --git a/jstests/noPassthrough/minvalid.js b/jstests/noPassthrough/minvalid.js
index fea0012bdc3..b30fc892bb5 100644
--- a/jstests/noPassthrough/minvalid.js
+++ b/jstests/noPassthrough/minvalid.js
@@ -10,14 +10,14 @@ var host = getHostName();
var nodes = replTest.startSet();
replTest.initiate();
-var master = replTest.getPrimary();
-var mdb = master.getDB("foo");
+var primary = replTest.getPrimary();
+var mdb = primary.getDB("foo");
print("1: initial insert");
mdb.foo.save({a: 1000});
print("2. get last op");
-var local = master.getDB("local");
+var local = primary.getDB("local");
var lastOp = local.oplog.rs.find().sort({$natural: -1}).limit(1).next();
printjson(lastOp);
diff --git a/jstests/noPassthrough/noncapped_oplog_creation.js b/jstests/noPassthrough/noncapped_oplog_creation.js
index 9d2f0865fdb..2762aab9818 100644
--- a/jstests/noPassthrough/noncapped_oplog_creation.js
+++ b/jstests/noPassthrough/noncapped_oplog_creation.js
@@ -23,7 +23,7 @@ assert.commandFailed(localDB.createCollection('oplog.fake', {capped: false}));
// collection.
assert.writeError(localDB.oplog.rs.insert({}));
-// Test that inserting into the master-slave oplog fails when implicitly creating a non-capped
+// Test that inserting into the oplog fails when implicitly creating a non-capped
// collection.
assert.commandFailed(localDB.runCommand({godinsert: 'oplog.$main', obj: {}}));
diff --git a/jstests/noPassthrough/wt_delayed_secondary_read_concern_majority.js b/jstests/noPassthrough/wt_delayed_secondary_read_concern_majority.js
index a861aa00aff..6d3c9f586c7 100644
--- a/jstests/noPassthrough/wt_delayed_secondary_read_concern_majority.js
+++ b/jstests/noPassthrough/wt_delayed_secondary_read_concern_majority.js
@@ -52,14 +52,14 @@ if (storageEngine !== "wiredTiger") {
doNotWaitForReplication: true,
doNotWaitForNewlyAddedRemovals: true
});
- var master = rst.getPrimary(); // Waits for PRIMARY state.
+ var primary = rst.getPrimary(); // Waits for PRIMARY state.
// Reconfigure primary with a small cache size so less data needs to be
// inserted to make the cache full while trying to trigger a stall.
- assert.commandWorked(master.adminCommand(
+ assert.commandWorked(primary.adminCommand(
{setParameter: 1, "wiredTigerEngineRuntimeConfig": "cache_size=100MB"}));
- var coll = master.getCollection("test.coll");
+ var coll = primary.getCollection("test.coll");
var bigstr = "a".repeat(4000);
// Do not insert with a writeConcern because we want the delayed slave
diff --git a/jstests/noPassthroughWithMongod/indexbg_interrupts.js b/jstests/noPassthroughWithMongod/indexbg_interrupts.js
index 420fb2b6d96..19055c16cf5 100644
--- a/jstests/noPassthroughWithMongod/indexbg_interrupts.js
+++ b/jstests/noPassthroughWithMongod/indexbg_interrupts.js
@@ -50,10 +50,10 @@ replTest.initiate({
]
});
-var master = replTest.getPrimary();
+var primary = replTest.getPrimary();
var second = replTest.getSecondary();
-var masterDB = master.getDB(dbname);
+var primaryDB = primary.getDB(dbname);
var secondDB = second.getDB(dbname);
var dropAction = [
@@ -69,17 +69,17 @@ for (var idx = 0; idx < dropAction.length; idx++) {
jsTest.log("Setting up collection " + collection + " for test of: " + JSON.stringify(dc));
// set up collections
- masterDB.dropDatabase();
+ primaryDB.dropDatabase();
jsTest.log("creating test data " + size + " documents");
- var bulk = masterDB.getCollection(collection).initializeUnorderedBulkOp();
+ var bulk = primaryDB.getCollection(collection).initializeUnorderedBulkOp();
for (var i = 0; i < size; ++i) {
bulk.insert({i: i});
}
assert.commandWorked(bulk.execute());
jsTest.log("Starting background indexing for test of: " + JSON.stringify(dc));
- masterDB.getCollection(collection).ensureIndex({i: 1}, {background: true});
- assert.eq(2, masterDB.getCollection(collection).getIndexes().length);
+ primaryDB.getCollection(collection).ensureIndex({i: 1}, {background: true});
+ assert.eq(2, primaryDB.getCollection(collection).getIndexes().length);
// Wait for the secondary to get the index entry
assert.soon(function() {
@@ -89,7 +89,7 @@ for (var idx = 0; idx < dropAction.length; idx++) {
jsTest.log("Index created and index info exists on secondary");
jsTest.log("running command " + JSON.stringify(dc));
- assert.commandWorked(masterDB.runCommand(dc));
+ assert.commandWorked(primaryDB.runCommand(dc));
jsTest.log("Waiting on replication");
replTest.awaitReplication();
diff --git a/jstests/noPassthroughWithMongod/sharding_rs_arb1.js b/jstests/noPassthroughWithMongod/sharding_rs_arb1.js
index 389bd7cb80a..9a84d93d71b 100644
--- a/jstests/noPassthroughWithMongod/sharding_rs_arb1.js
+++ b/jstests/noPassthroughWithMongod/sharding_rs_arb1.js
@@ -15,8 +15,8 @@ replTest.initiate({
replTest.awaitReplication();
-var master = replTest.getPrimary();
-var db = master.getDB("test");
+var primary = replTest.getPrimary();
+var db = primary.getDB("test");
printjson(rs.status());
var st = new ShardingTest({numShards: 0});
diff --git a/jstests/noPassthroughWithMongod/ttl_repl_secondary_disabled.js b/jstests/noPassthroughWithMongod/ttl_repl_secondary_disabled.js
index 654d755b694..84718bfb914 100644
--- a/jstests/noPassthroughWithMongod/ttl_repl_secondary_disabled.js
+++ b/jstests/noPassthroughWithMongod/ttl_repl_secondary_disabled.js
@@ -22,31 +22,31 @@ const rt = new ReplSetTest({
});
const nodes = rt.startSet();
rt.initiate();
-let master = rt.getPrimary();
+let primary = rt.getPrimary();
rt.awaitSecondaryNodes();
-let slave1 = rt.getSecondary();
+let secondary1 = rt.getSecondary();
// shortcuts
-let masterdb = master.getDB('d');
-let slave1db = slave1.getDB('d');
-let mastercol = masterdb['c'];
-let slave1col = slave1db['c'];
+let primarydb = primary.getDB('d');
+let secondary1db = secondary1.getDB('d');
+let primarycol = primarydb['c'];
+let secondary1col = secondary1db['c'];
// create TTL index, wait for TTL monitor to kick in, then check things
-mastercol.ensureIndex({x: 1}, {expireAfterSeconds: 10});
+primarycol.ensureIndex({x: 1}, {expireAfterSeconds: 10});
rt.awaitReplication();
// increase logging
-assert.commandWorked(slave1col.getDB().adminCommand({setParameter: 1, logLevel: 1}));
+assert.commandWorked(secondary1col.getDB().adminCommand({setParameter: 1, logLevel: 1}));
// insert old doc (10 minutes old) directly on secondary using godinsert
-assert.commandWorked(slave1col.runCommand(
+assert.commandWorked(secondary1col.runCommand(
"godinsert", {obj: {_id: new Date(), x: new Date((new Date()).getTime() - 600000)}}));
-assert.eq(1, slave1col.count(), "missing inserted doc");
+assert.eq(1, secondary1col.count(), "missing inserted doc");
sleep(70 * 1000); // wait for 70seconds
-assert.eq(1, slave1col.count(), "ttl deleted my doc!");
+assert.eq(1, secondary1col.count(), "ttl deleted my doc!");
// finish up
rt.stopSet();
diff --git a/jstests/replsets/apply_batch_only_goes_forward.js b/jstests/replsets/apply_batch_only_goes_forward.js
index b7b1e0b81da..366c69d0fe8 100644
--- a/jstests/replsets/apply_batch_only_goes_forward.js
+++ b/jstests/replsets/apply_batch_only_goes_forward.js
@@ -30,24 +30,24 @@ var replTest =
var nodes = replTest.startSet();
replTest.initiate();
-var master = replTest.getPrimary();
-var mTest = master.getDB("test");
-var mLocal = master.getDB("local");
-var mMinvalid = mLocal["replset.minvalid"];
+var primary = replTest.getPrimary();
+var pTest = primary.getDB("test");
+var pLocal = primary.getDB("local");
+var mMinvalid = pLocal["replset.minvalid"];
-var slave = replTest.getSecondary();
-var sTest = slave.getDB("test");
-var sLocal = slave.getDB("local");
+var secondary = replTest.getSecondary();
+var sTest = secondary.getDB("test");
+var sLocal = secondary.getDB("local");
var sMinvalid = sLocal["replset.minvalid"];
var stepDownSecs = 30;
var stepDownCmd = {replSetStepDown: stepDownSecs, force: true};
// Write op
assert.commandWorked(
- mTest.foo.save({}, {writeConcern: {w: 'majority', wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
-replTest.waitForState(slave, ReplSetTest.State.SECONDARY);
+ pTest.foo.save({}, {writeConcern: {w: 'majority', wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
+replTest.waitForState(secondary, ReplSetTest.State.SECONDARY);
assert.commandWorked(
- mTest.foo.save({}, {writeConcern: {w: 'majority', wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
+ pTest.foo.save({}, {writeConcern: {w: 'majority', wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
// Set minvalid to something far in the future for the current primary, to simulate recovery.
// Note: This is so far in the future (5 days) that it will never become secondary.
@@ -68,16 +68,16 @@ printjson(assert.commandWorked(mMinvalid.update(
minValidUpdate,
{upsert: true, writeConcern: {w: 1, wtimeout: ReplSetTest.kDefaultTimeoutMS}})));
-jsTest.log('Restarting primary ' + master.host +
+jsTest.log('Restarting primary ' + primary.host +
' with updated minValid. This node will go into RECOVERING upon restart. ' +
- 'Secondary ' + slave.host + ' will become new primary.');
+ 'Secondary ' + secondary.host + ' will become new primary.');
clearRawMongoProgramOutput();
-replTest.restart(master);
+replTest.restart(primary);
printjson(sLocal.adminCommand("hello"));
-replTest.waitForState(master, ReplSetTest.State.RECOVERING);
+replTest.waitForState(primary, ReplSetTest.State.RECOVERING);
replTest.awaitNodesAgreeOnPrimary();
-// Slave is now master... Do a write to advance the optime on the primary so that it will be
+// Secondary is now primary... Do a write to advance the optime on the primary so that it will be
// considered as a sync source - this is more relevant to PV0 because we do not write a new
// entry to the oplog on becoming primary.
assert.commandWorked(replTest.getPrimary().getDB("test").foo.save(
diff --git a/jstests/replsets/apply_ops_wc.js b/jstests/replsets/apply_ops_wc.js
index 9ffdca065df..7ca6eb97f58 100644
--- a/jstests/replsets/apply_ops_wc.js
+++ b/jstests/replsets/apply_ops_wc.js
@@ -24,8 +24,8 @@ replTest.initiate(cfg);
var testDB = "applyOps-wc-test";
// Get test collection.
-var master = replTest.getPrimary();
-var db = master.getDB(testDB);
+var primary = replTest.getPrimary();
+var db = primary.getDB(testDB);
var coll = db.apply_ops_wc;
function dropTestCollection() {
diff --git a/jstests/replsets/auth2.js b/jstests/replsets/auth2.js
index e0e888c9f28..86f54f748e8 100644
--- a/jstests/replsets/auth2.js
+++ b/jstests/replsets/auth2.js
@@ -43,12 +43,11 @@ replSetTest.initiate({
]
});
-var master = replSetTest.getPrimary();
+var primary = replSetTest.getPrimary();
jsTestLog("add an admin user");
-master.getDB("admin").createUser({user: "foo", pwd: "bar", roles: jsTest.adminUserRoles},
- {w: 3, wtimeout: replSetTest.kDefaultTimeoutMS});
-var m = replSetTest.nodes[0];
+primary.getDB("admin").createUser({user: "foo", pwd: "bar", roles: jsTest.adminUserRoles},
+ {w: 3, wtimeout: replSetTest.kDefaultTimeoutMS});
jsTestLog("starting 1 and 2 with key file");
replSetTest.stop(1);
@@ -64,7 +63,7 @@ testInvalidAuthStates(replSetTest);
jsTestLog("restart mongod with bad keyFile");
replSetTest.stop(0);
-m = replSetTest.restart(0, {"keyFile": key2});
+replSetTest.restart(0, {"keyFile": key2});
jsTestLog("restart nodes 1 and 2");
replSetTest.restart(1, {"keyFile": key1});
diff --git a/jstests/replsets/auth_no_pri.js b/jstests/replsets/auth_no_pri.js
index bc606dc913a..35629b3a383 100644
--- a/jstests/replsets/auth_no_pri.js
+++ b/jstests/replsets/auth_no_pri.js
@@ -8,8 +8,8 @@ var nodes = rs.startSet();
rs.initiate();
// Add user
-var master = rs.getPrimary();
-master.getDB("admin").createUser({user: "admin", pwd: "pwd", roles: ["root"]}, {w: NODE_COUNT});
+var primary = rs.getPrimary();
+primary.getDB("admin").createUser({user: "admin", pwd: "pwd", roles: ["root"]}, {w: NODE_COUNT});
// Can authenticate replset connection when whole set is up.
var conn = new Mongo(rs.getURL());
diff --git a/jstests/replsets/commands_that_write_accept_wc.js b/jstests/replsets/commands_that_write_accept_wc.js
index c718e2ce82d..8a8adffad5e 100644
--- a/jstests/replsets/commands_that_write_accept_wc.js
+++ b/jstests/replsets/commands_that_write_accept_wc.js
@@ -17,9 +17,9 @@ var replTest = new ReplSetTest({
replTest.startSet();
replTest.initiate();
-var master = replTest.getPrimary();
+var primary = replTest.getPrimary();
var dbName = "wc-test";
-var db = master.getDB(dbName);
+var db = primary.getDB(dbName);
var collName = 'leaves';
var coll = db[collName];
diff --git a/jstests/replsets/dbcheck.js b/jstests/replsets/dbcheck.js
index 99ab6261b00..959b8203126 100644
--- a/jstests/replsets/dbcheck.js
+++ b/jstests/replsets/dbcheck.js
@@ -151,17 +151,17 @@ function checkTotalCounts(conn, coll) {
// First check behavior when everything is consistent.
function simpleTestConsistent() {
- let master = replSet.getPrimary();
+ let primary = replSet.getPrimary();
clearLog();
- assert.neq(master, undefined);
- let db = master.getDB(dbName);
+ assert.neq(primary, undefined);
+ let db = primary.getDB(dbName);
assert.commandWorked(db.runCommand({"dbCheck": multiBatchSimpleCollName}));
awaitDbCheckCompletion(db);
- checkLogAllConsistent(master);
- checkTotalCounts(master, db[multiBatchSimpleCollName]);
+ checkLogAllConsistent(primary);
+ checkTotalCounts(primary, db[multiBatchSimpleCollName]);
forEachSecondary(function(secondary) {
checkLogAllConsistent(secondary);
@@ -171,9 +171,9 @@ function simpleTestConsistent() {
// Same thing, but now with concurrent updates.
function concurrentTestConsistent() {
- let master = replSet.getPrimary();
+ let primary = replSet.getPrimary();
- let db = master.getDB(dbName);
+ let db = primary.getDB(dbName);
// Add enough documents that dbCheck will take a few seconds.
db[collName].insertMany([...Array(10000).keys()].map(x => ({i: x})));
@@ -190,7 +190,7 @@ function concurrentTestConsistent() {
awaitDbCheckCompletion(db);
- checkLogAllConsistent(master);
+ checkLogAllConsistent(primary);
// Omit check for total counts, which might have changed with concurrent updates.
forEachSecondary(secondary => checkLogAllConsistent(secondary, true));
@@ -201,8 +201,8 @@ concurrentTestConsistent();
// Test the various other parameters.
function testDbCheckParameters() {
- let master = replSet.getPrimary();
- let db = master.getDB(dbName);
+ let primary = replSet.getPrimary();
+ let db = primary.getDB(dbName);
// Clean up for the test.
clearLog();
@@ -269,11 +269,11 @@ testDbCheckParameters();
// Now, test some unusual cases where the command should fail.
function testErrorOnNonexistent() {
- let master = replSet.getPrimary();
- let db = master.getDB("this-probably-doesnt-exist");
+ let primary = replSet.getPrimary();
+ let db = primary.getDB("this-probably-doesnt-exist");
assert.commandFailed(db.runCommand({dbCheck: 1}),
"dbCheck spuriously succeeded on nonexistent database");
- db = master.getDB(dbName);
+ db = primary.getDB(dbName);
assert.commandFailed(db.runCommand({dbCheck: "this-also-probably-doesnt-exist"}),
"dbCheck spuriously succeeded on nonexistent collection");
}
@@ -285,12 +285,12 @@ function testErrorOnSecondary() {
}
function testErrorOnUnreplicated() {
- let master = replSet.getPrimary();
- let db = master.getDB("local");
+ let primary = replSet.getPrimary();
+ let db = primary.getDB("local");
assert.commandFailed(db.runCommand({dbCheck: "oplog.rs"}),
"dbCheck spuriously succeeded on oplog");
- assert.commandFailed(master.getDB(dbName).runCommand({dbCheck: "system.profile"}),
+ assert.commandFailed(primary.getDB(dbName).runCommand({dbCheck: "system.profile"}),
"dbCheck spuriously succeeded on system.profile");
}
@@ -300,21 +300,21 @@ testErrorOnUnreplicated();
// Test stepdown.
function testSucceedsOnStepdown() {
- let master = replSet.getPrimary();
- let db = master.getDB(dbName);
+ let primary = replSet.getPrimary();
+ let db = primary.getDB(dbName);
- let nodeId = replSet.getNodeId(master);
+ let nodeId = replSet.getNodeId(primary);
assert.commandWorked(db.runCommand({dbCheck: multiBatchSimpleCollName}));
- // Step down the master.
- assert.commandWorked(master.getDB("admin").runCommand({replSetStepDown: 0, force: true}));
+ // Step down the primary.
+ assert.commandWorked(primary.getDB("admin").runCommand({replSetStepDown: 0, force: true}));
// Wait for the cluster to come up.
replSet.awaitSecondaryNodes();
// Find the node we ran dbCheck on.
db = replSet.getSecondaries()
- .filter(function isPreviousMaster(node) {
+ .filter(function isPreviousPrimary(node) {
return replSet.getNodeId(node) === nodeId;
})[0]
.getDB(dbName);
@@ -337,16 +337,16 @@ function collectionUuid(db, collName) {
}
function getDummyOplogEntry() {
- let master = replSet.getPrimary();
- let coll = master.getDB(dbName)[collName];
+ let primary = replSet.getPrimary();
+ let coll = primary.getDB(dbName)[collName];
let replSetStatus =
- assert.commandWorked(master.getDB("admin").runCommand({replSetGetStatus: 1}));
+ assert.commandWorked(primary.getDB("admin").runCommand({replSetGetStatus: 1}));
let connStatus = replSetStatus.members.filter(m => m.self)[0];
let lastOpTime = connStatus.optime;
- let entry = master.getDB("local").oplog.rs.find().sort({$natural: -1})[0];
- entry["ui"] = collectionUuid(master.getDB(dbName), collName);
+ let entry = primary.getDB("local").oplog.rs.find().sort({$natural: -1})[0];
+ entry["ui"] = collectionUuid(primary.getDB(dbName), collName);
entry["ns"] = coll.stats().ns;
entry["ts"] = new Timestamp();
@@ -355,17 +355,17 @@ function getDummyOplogEntry() {
// Create various inconsistencies, and check that dbCheck spots them.
function insertOnSecondaries(doc) {
- let master = replSet.getPrimary();
+ let primary = replSet.getPrimary();
let entry = getDummyOplogEntry();
entry["op"] = "i";
entry["o"] = doc;
- master.getDB("local").oplog.rs.insertOne(entry);
+ primary.getDB("local").oplog.rs.insertOne(entry);
}
// Run an apply-ops-ish command on a secondary.
function runCommandOnSecondaries(doc, ns) {
- let master = replSet.getPrimary();
+ let primary = replSet.getPrimary();
let entry = getDummyOplogEntry();
entry["op"] = "c";
entry["o"] = doc;
@@ -374,23 +374,23 @@ function runCommandOnSecondaries(doc, ns) {
entry["ns"] = ns;
}
- master.getDB("local").oplog.rs.insertOne(entry);
+ primary.getDB("local").oplog.rs.insertOne(entry);
}
// And on a primary.
function runCommandOnPrimary(doc) {
- let master = replSet.getPrimary();
+ let primary = replSet.getPrimary();
let entry = getDummyOplogEntry();
entry["op"] = "c";
entry["o"] = doc;
- master.getDB("admin").runCommand({applyOps: [entry]});
+ primary.getDB("admin").runCommand({applyOps: [entry]});
}
// Just add an extra document, and test that it catches it.
function simpleTestCatchesExtra() {
- let master = replSet.getPrimary();
- let db = master.getDB(dbName);
+ let primary = replSet.getPrimary();
+ let db = primary.getDB(dbName);
clearLog();
@@ -410,8 +410,8 @@ function simpleTestCatchesExtra() {
// Test that dbCheck catches changing various pieces of collection metadata.
function testCollectionMetadataChanges() {
- let master = replSet.getPrimary();
- let db = master.getDB(dbName);
+ let primary = replSet.getPrimary();
+ let db = primary.getDB(dbName);
db[collName].drop();
clearLog();
diff --git a/jstests/replsets/drop_oplog.js b/jstests/replsets/drop_oplog.js
index 2a64ef6c436..09ebf00b77b 100644
--- a/jstests/replsets/drop_oplog.js
+++ b/jstests/replsets/drop_oplog.js
@@ -7,8 +7,8 @@ let rt = new ReplSetTest({name: "drop_oplog", nodes: 1, oplogSize: 30});
let nodes = rt.startSet();
rt.initiate();
-let master = rt.getPrimary();
-let localDB = master.getDB('local');
+let primary = rt.getPrimary();
+let localDB = primary.getDB('local');
let threw = false;
@@ -19,7 +19,7 @@ let dropOutput = localDB.dropDatabase();
assert.eq(dropOutput.ok, 0);
assert.eq(dropOutput.errmsg, "Cannot drop 'local' database while replication is active");
-let adminDB = master.getDB('admin');
+let adminDB = primary.getDB('admin');
dropOutput = adminDB.dropDatabase();
assert.eq(dropOutput.ok, 0);
assert.eq(dropOutput.errmsg, "Dropping the 'admin' database is prohibited.");
diff --git a/jstests/replsets/fsync_lock_read_secondaries.js b/jstests/replsets/fsync_lock_read_secondaries.js
index daed9de7ad6..c409b632f7b 100644
--- a/jstests/replsets/fsync_lock_read_secondaries.js
+++ b/jstests/replsets/fsync_lock_read_secondaries.js
@@ -12,7 +12,7 @@
* 1) Create a replica set.
* 2) Add some documents to primary.
* 3) Wait until the secondary nodes are in state "SECONDARY".
- * 4) Set slaveOk on secondary.
+ * 4) Set secondaryOk on secondary.
* 5) Take the fsync lock on a secondary. This will stop replication.
* 6) Insert some more documents to primary.
* 7) Expect to be able to read from the secondary; the count of documents should
diff --git a/jstests/replsets/groupAndMapReduce.js b/jstests/replsets/groupAndMapReduce.js
index 2723f800a07..22935531bcf 100644
--- a/jstests/replsets/groupAndMapReduce.js
+++ b/jstests/replsets/groupAndMapReduce.js
@@ -45,7 +45,7 @@ doTest = function(signal) {
var one = secondary.getDB("foo").foo.findOne();
printjson(one);
- print("Calling inline mr() with slaveOk=true, must succeed");
+ print("Calling inline mr() with secondaryOk=true, must succeed");
secondary.setSecondaryOk();
map = function() {
emit(this.a, 1);
@@ -59,7 +59,7 @@ doTest = function(signal) {
};
secondary.getDB("foo").foo.mapReduce(map, reduce, {out: {"inline": 1}});
- print("Calling mr() to collection with slaveOk=true, must fail");
+ print("Calling mr() to collection with secondaryOk=true, must fail");
try {
secondary.getDB("foo").foo.mapReduce(map, reduce, "output");
assert(false, "mapReduce() to collection succeeded on secondary");
@@ -67,18 +67,19 @@ doTest = function(signal) {
print("Received exception: " + e);
}
- print("Calling inline mr() with slaveOk=false, must fail");
- secondary.slaveOk = false;
+ print("Calling inline mr() with secondaryOk=false, must fail");
+ secondary.setSecondaryOk(false);
try {
secondary.getDB("foo").foo.mapReduce(map, reduce, {out: {"inline": 1}});
- assert(false, "mapReduce() succeeded on secondary with slaveOk=false");
+ assert(false, "mapReduce() succeeded on secondary with secondaryOk=false");
} catch (e) {
print("Received exception: " + e);
}
- print("Calling mr() to collection with slaveOk=false, must fail");
+ print("Calling mr() to collection with secondaryOk=false, must fail");
try {
secondary.getDB("foo").foo.mapReduce(map, reduce, "output");
- assert(false, "mapReduce() to collection succeeded on secondary with slaveOk=false");
+ assert(false,
+ "mapReduce() to collection succeeded on secondary with secondaryOk=false");
} catch (e) {
print("Received exception: " + e);
}
diff --git a/jstests/replsets/index_delete.js b/jstests/replsets/index_delete.js
index 6e79071cd59..07b9091e439 100644
--- a/jstests/replsets/index_delete.js
+++ b/jstests/replsets/index_delete.js
@@ -30,9 +30,9 @@ replTest.initiate();
var dbName = 'foo';
var collName = 'coll';
-var master = replTest.getPrimary();
+var primary = replTest.getPrimary();
var second = replTest.getSecondary();
-var masterDB = master.getDB(dbName);
+var primaryDB = primary.getDB(dbName);
var secondDB = second.getDB(dbName);
var size = 100;
@@ -41,7 +41,7 @@ var size = 100;
assert.commandWorked(
secondDB.adminCommand({configureFailPoint: 'hangAfterStartingIndexBuild', mode: 'alwaysOn'}));
-var bulk = masterDB[collName].initializeUnorderedBulkOp();
+var bulk = primaryDB[collName].initializeUnorderedBulkOp();
for (var i = 0; i < size; ++i) {
bulk.insert({i: i, j: i, k: i});
}
@@ -50,8 +50,8 @@ assert.commandWorked(bulk.execute());
// This test create indexes with fail point enabled on secondary which prevents secondary from
// voting. So, disabling index build commit quorum.
jsTest.log("Creating index");
-assert.commandWorked(masterDB[collName].createIndex({i: 1}, {}, 0));
-assert.eq(2, masterDB[collName].getIndexes().length);
+assert.commandWorked(primaryDB[collName].createIndex({i: 1}, {}, 0));
+assert.eq(2, primaryDB[collName].getIndexes().length);
try {
assert.soon(function() {
@@ -64,14 +64,14 @@ try {
}
jsTest.log("Index created on secondary");
-masterDB.runCommand({dropIndexes: collName, index: "i_1"});
-assert.eq(1, masterDB[collName].getIndexes().length);
+primaryDB.runCommand({dropIndexes: collName, index: "i_1"});
+assert.eq(1, primaryDB[collName].getIndexes().length);
jsTest.log("Waiting on replication of first index drop");
replTest.awaitReplication();
print("Primary indexes");
-masterDB[collName].getIndexes().forEach(printjson);
+primaryDB[collName].getIndexes().forEach(printjson);
print("Secondary indexes");
secondDB[collName].getIndexes().forEach(printjson);
assert.soon(function() {
@@ -81,15 +81,15 @@ assert.eq(1, secondDB[collName].getIndexes().length);
// Secondary index builds have been unblocked, so we can build indexes with commit quorum enabled.
jsTest.log("Creating two more indexes on primary");
-assert.commandWorked(masterDB[collName].createIndex({j: 1}));
-assert.commandWorked(masterDB[collName].createIndex({k: 1}));
-assert.eq(3, masterDB[collName].getIndexes().length);
+assert.commandWorked(primaryDB[collName].createIndex({j: 1}));
+assert.commandWorked(primaryDB[collName].createIndex({k: 1}));
+assert.eq(3, primaryDB[collName].getIndexes().length);
jsTest.log("Waiting on replication of second index creations");
replTest.awaitReplication();
print("Primary indexes");
-masterDB[collName].getIndexes().forEach(printjson);
+primaryDB[collName].getIndexes().forEach(printjson);
print("Secondary indexes");
secondDB[collName].getIndexes().forEach(printjson);
assert.soon(function() {
@@ -99,11 +99,11 @@ assert.eq(3, secondDB[collName].getIndexes().length);
jsTest.log("Dropping the rest of the indexes");
-masterDB.runCommand({deleteIndexes: collName, index: "*"});
-assert.eq(1, masterDB[collName].getIndexes().length);
+primaryDB.runCommand({deleteIndexes: collName, index: "*"});
+assert.eq(1, primaryDB[collName].getIndexes().length);
// Assert that we normalize 'dropIndexes' oplog entries properly.
-master.getCollection('local.oplog.rs').find().forEach(function(entry) {
+primary.getCollection('local.oplog.rs').find().forEach(function(entry) {
assert.neq(entry.o.index, "*");
assert(!entry.o.deleteIndexes);
if (entry.o.dropIndexes) {
@@ -118,7 +118,7 @@ jsTest.log("Waiting on replication of second index drops");
replTest.awaitReplication();
print("Primary indexes");
-masterDB[collName].getIndexes().forEach(printjson);
+primaryDB[collName].getIndexes().forEach(printjson);
print("Secondary indexes");
secondDB[collName].getIndexes().forEach(printjson);
assert.soon(function() {
diff --git a/jstests/replsets/initial_sync_move_forward.js b/jstests/replsets/initial_sync_move_forward.js
index c99b529a700..311ab315536 100644
--- a/jstests/replsets/initial_sync_move_forward.js
+++ b/jstests/replsets/initial_sync_move_forward.js
@@ -21,12 +21,12 @@ var rst = new ReplSetTest({name: "initial_sync_move_forward", nodes: 1});
rst.startSet();
rst.initiate();
-var masterColl = rst.getPrimary().getDB("test").coll;
+var primaryColl = rst.getPrimary().getDB("test").coll;
// Insert 500000 documents. Make the last two documents larger, so that {_id: 0, x: 0} and {_id:
// 1, x: 1} will fit into their positions when we grow them.
var count = 500000;
-var bulk = masterColl.initializeUnorderedBulkOp();
+var bulk = primaryColl.initializeUnorderedBulkOp();
for (var i = 0; i < count - 2; ++i) {
bulk.insert({_id: i, x: i});
}
@@ -36,7 +36,7 @@ bulk.insert({_id: count - 1, x: count - 1, longString: longString});
assert.commandWorked(bulk.execute());
// Create a unique index on {x: 1}.
-assert.commandWorked(masterColl.ensureIndex({x: 1}, {unique: true}));
+assert.commandWorked(primaryColl.ensureIndex({x: 1}, {unique: true}));
// Add a secondary.
var secondary =
@@ -54,16 +54,16 @@ failPoint.wait();
// Delete {_id: count - 2} to make a hole. Grow {_id: 0} so that it moves into that hole. This
// will cause the secondary to clone {_id: 0} again.
// Change the value for 'x' so that we are not testing the uniqueness of 'x' in this case.
-assert.commandWorked(masterColl.remove({_id: 0, x: 0}));
-assert.commandWorked(masterColl.remove({_id: count - 2, x: count - 2}));
-assert.commandWorked(masterColl.insert({_id: 0, x: count, longString: longString}));
+assert.commandWorked(primaryColl.remove({_id: 0, x: 0}));
+assert.commandWorked(primaryColl.remove({_id: count - 2, x: count - 2}));
+assert.commandWorked(primaryColl.insert({_id: 0, x: count, longString: longString}));
// Delete {_id: count - 1} to make a hole. Grow {x: 1} so that it moves into that hole. This
// will cause the secondary to clone {x: 1} again.
// Change the value for _id so that we are not testing the uniqueness of _id in this case.
-assert.commandWorked(masterColl.remove({_id: 1, x: 1}));
-assert.commandWorked(masterColl.remove({_id: count - 1, x: count - 1}));
-assert.commandWorked(masterColl.insert({_id: count, x: 1, longString: longString}));
+assert.commandWorked(primaryColl.remove({_id: 1, x: 1}));
+assert.commandWorked(primaryColl.remove({_id: count - 1, x: count - 1}));
+assert.commandWorked(primaryColl.insert({_id: count, x: 1, longString: longString}));
// Resume initial sync.
failPoint.off();
diff --git a/jstests/replsets/initial_sync_with_write_load.js b/jstests/replsets/initial_sync_with_write_load.js
index 0474c1f9c10..c696b347b0b 100644
--- a/jstests/replsets/initial_sync_with_write_load.js
+++ b/jstests/replsets/initial_sync_with_write_load.js
@@ -20,8 +20,8 @@ var config = {
};
var r = replTest.initiate(config);
replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY);
-// Make sure we have a master
-var master = replTest.getPrimary();
+// Make sure we have a primary
+var primary = replTest.getPrimary();
var a_conn = conns[0];
var b_conn = conns[1];
a_conn.setSecondaryOk();
@@ -31,8 +31,8 @@ var B = b_conn.getDB("test");
var AID = replTest.getNodeId(a_conn);
var BID = replTest.getNodeId(b_conn);
-assert(master == conns[0], "conns[0] assumed to be master");
-assert(a_conn.host == master.host);
+assert(primary == conns[0], "conns[0] assumed to be primary");
+assert(a_conn.host == primary.host);
// create an oplog entry with an insert
assert.commandWorked(
@@ -67,7 +67,7 @@ var loadGen = startParallelShell(work, replTest.ports[0]);
// wait for document to appear to continue
assert.soon(function() {
try {
- return 1 == master.getDB("test")["timeToStartTrigger"].find().itcount();
+ return 1 == primary.getDB("test")["timeToStartTrigger"].find().itcount();
} catch (e) {
print(e);
return false;
diff --git a/jstests/replsets/maintenance.js b/jstests/replsets/maintenance.js
index 7becd2e0422..c034ac404d8 100644
--- a/jstests/replsets/maintenance.js
+++ b/jstests/replsets/maintenance.js
@@ -7,14 +7,14 @@ config.members[0].priority = 2;
replTest.initiate(config);
replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY);
-// Make sure we have a master
-var master = replTest.getPrimary();
+// Make sure we have a primary
+var primary = replTest.getPrimary();
for (i = 0; i < 20; i++) {
- master.getDB("bar").foo.insert({x: 1, y: i, abc: 123, str: "foo bar baz"});
+ primary.getDB("bar").foo.insert({x: 1, y: i, abc: 123, str: "foo bar baz"});
}
for (i = 0; i < 20; i++) {
- master.getDB("bar").foo.update({y: i}, {$push: {foo: "barrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr"}});
+ primary.getDB("bar").foo.update({y: i}, {$push: {foo: "barrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr"}});
}
replTest.awaitReplication();
@@ -43,7 +43,7 @@ var secondarySoon = function() {
secondarySoon();
print("make sure compact works on a secondary (SERVER-3923)");
-master.getDB("foo").bar.drop();
+primary.getDB("foo").bar.drop();
replTest.awaitReplication();
var result = conns[1].getDB("foo").runCommand({compact: "bar"});
assert.eq(result.ok, 0, tojson(result));
@@ -53,7 +53,7 @@ secondarySoon();
print("use replSetMaintenance command to go in/out of maintence mode");
print("primary cannot go into maintence mode");
-result = master.getDB("admin").runCommand({replSetMaintenance: 1});
+result = primary.getDB("admin").runCommand({replSetMaintenance: 1});
assert.eq(result.ok, 0, tojson(result));
print("check getMore works on a secondary, not on a recovering node");
diff --git a/jstests/replsets/no_chaining.js b/jstests/replsets/no_chaining.js
index c7847b71504..7f8f8fb2087 100644
--- a/jstests/replsets/no_chaining.js
+++ b/jstests/replsets/no_chaining.js
@@ -17,16 +17,16 @@ replTest.initiate({
"settings": {"chainingAllowed": false}
});
-var master = replTest.getPrimary();
+var primary = replTest.getPrimary();
replTest.awaitReplication();
var breakNetwork = function() {
nodes[0].disconnect(nodes[2]);
- master = replTest.getPrimary();
+ primary = replTest.getPrimary();
};
var checkNoChaining = function() {
- master.getDB("test").foo.insert({x: 1});
+ primary.getDB("test").foo.insert({x: 1});
assert.soon(function() {
return nodes[1].getDB("test").foo.findOne() != null;
@@ -57,7 +57,7 @@ if (!_isWindows()) {
print("check that forcing sync target still works");
forceSync();
- var config = master.getDB("local").system.replset.findOne();
+ var config = primary.getDB("local").system.replset.findOne();
assert.eq(false, config.settings.chainingAllowed, tojson(config));
}
diff --git a/jstests/replsets/optime.js b/jstests/replsets/optime.js
index fac4fdd67fc..1413df45548 100644
--- a/jstests/replsets/optime.js
+++ b/jstests/replsets/optime.js
@@ -65,18 +65,18 @@ const nodes = replTest.startSet();
assert.commandFailedWithCode(nodes[0].getDB('admin').serverStatus({oplog: true}), 17347);
replTest.initiate();
-var master = replTest.getPrimary();
+var primary = replTest.getPrimary();
replTest.awaitReplication();
replTest.awaitSecondaryNodes();
-const isPersistent = master.getDB('admin').serverStatus().storageEngine.persistent;
+const isPersistent = primary.getDB('admin').serverStatus().storageEngine.persistent;
// Check initial optimes
assert.soon(function() {
return optimesAndWallTimesAreEqual(replTest, isPersistent);
});
-var initialInfo = master.getDB('admin').serverStatus({oplog: true}).oplog;
-let initialReplStatusInfo = master.getDB('admin').runCommand({replSetGetStatus: 1});
+var initialInfo = primary.getDB('admin').serverStatus({oplog: true}).oplog;
+let initialReplStatusInfo = primary.getDB('admin').runCommand({replSetGetStatus: 1});
// Do an insert to increment optime, but without rolling the oplog
// latestOptime should be updated, but earliestOptime should be unchanged
@@ -85,15 +85,15 @@ if (isPersistent) {
// Ensure the durable optime is advanced.
options.writeConcern.j = true;
}
-assert.commandWorked(master.getDB('test').foo.insert({a: 1}, options));
+assert.commandWorked(primary.getDB('test').foo.insert({a: 1}, options));
assert.soon(function() {
return optimesAndWallTimesAreEqual(replTest, isPersistent);
});
-var info = master.getDB('admin').serverStatus({oplog: true}).oplog;
-var entry = master.getDB('local').oplog.rs.findOne().ts;
+var info = primary.getDB('admin').serverStatus({oplog: true}).oplog;
+var entry = primary.getDB('local').oplog.rs.findOne().ts;
jsTest.log("First entry's timestamp is " + tojson(entry));
-let replStatusInfo = master.getDB('admin').runCommand({replSetGetStatus: 1});
+let replStatusInfo = primary.getDB('admin').runCommand({replSetGetStatus: 1});
const dumpInfoFn = function() {
jsTestLog("Initial server status: " + tojsononeline(initialInfo));
@@ -118,21 +118,21 @@ assert.eq(timestampCompare(info.earliestOptime, initialInfo.earliestOptime), 0,
// Insert some large documents to force the oplog to roll over
var largeString = new Array(1024 * 10).toString();
for (var i = 0; i < 2000; i++) {
- master.getDB('test').foo.insert({largeString: largeString}, options);
+ primary.getDB('test').foo.insert({largeString: largeString}, options);
}
assert.soon(function() {
return optimesAndWallTimesAreEqual(replTest, isPersistent);
});
-entry = master.getDB('local').oplog.rs.findOne().ts;
+entry = primary.getDB('local').oplog.rs.findOne().ts;
jsTest.log("First entry's timestamp is now " + tojson(entry) + " after oplog rollover");
// This block requires a fresh stable checkpoint.
assert.soon(function() {
// Test that earliestOptime was updated
- info = master.getDB('admin').serverStatus({oplog: true}).oplog;
+ info = primary.getDB('admin').serverStatus({oplog: true}).oplog;
jsTest.log("Earliest optime is now " + tojson(info.earliestOptime) +
"; looking for it to be different from " + tojson(initialInfo.earliestOptime));
- replStatusInfo = master.getDB('admin').runCommand({replSetGetStatus: 1});
+ replStatusInfo = primary.getDB('admin').runCommand({replSetGetStatus: 1});
return timestampCompare(info.latestOptime, initialInfo.latestOptime) > 0 &&
wallTimeCompare(replStatusInfo.optimes.lastAppliedWallTime,
initialReplStatusInfo.optimes.lastAppliedWallTime) > 0 &&
diff --git a/jstests/replsets/rename_collection_temp.js b/jstests/replsets/rename_collection_temp.js
index 1bf716784d6..f95ea76e1fe 100644
--- a/jstests/replsets/rename_collection_temp.js
+++ b/jstests/replsets/rename_collection_temp.js
@@ -33,10 +33,10 @@ var nodes = replTest.startSet();
replTest.initiate();
-var master = replTest.getPrimary();
+var primary = replTest.getPrimary();
// Create a temporary collection.
-var dbFoo = master.getDB("foo");
+var dbFoo = primary.getDB("foo");
assert.commandWorked(dbFoo.runCommand(
{applyOps: [{op: "c", ns: dbFoo.getName() + ".$cmd", o: {create: "tempColl", temp: true}}]}));
@@ -44,7 +44,7 @@ checkCollectionTemp(dbFoo, "tempColl", true);
// Rename the collection.
assert.commandWorked(
- master.adminCommand({renameCollection: "foo.tempColl", to: "foo.permanentColl"}));
+ primary.adminCommand({renameCollection: "foo.tempColl", to: "foo.permanentColl"}));
// Confirm that it is no longer temporary.
checkCollectionTemp(dbFoo, "permanentColl", false);
@@ -70,7 +70,7 @@ checkCollectionTemp(dbFoo, "tempColl", true);
assert.commandWorked(dbFoo.runCommand({create: "permanentColl"}));
// Rename, dropping "permanentColl" and replacing it.
-assert.commandWorked(master.adminCommand(
+assert.commandWorked(primary.adminCommand(
{renameCollection: "foo.tempColl", to: "foo.permanentColl", dropTarget: true}));
checkCollectionTemp(dbFoo, "permanentColl", false);
diff --git a/jstests/replsets/replset8.js b/jstests/replsets/replset8.js
index 87eb6d6361c..ec603e5cf5d 100644
--- a/jstests/replsets/replset8.js
+++ b/jstests/replsets/replset8.js
@@ -7,9 +7,9 @@ var rt = new ReplSetTest({name: "replset8", nodes: 1});
var nodes = rt.startSet();
rt.initiate();
-var master = rt.getPrimary();
+var primary = rt.getPrimary();
var bigstring = "a";
-var md = master.getDB('d');
+var md = primary.getDB('d');
var mdc = md['c'];
// prep the data
@@ -56,7 +56,7 @@ assert.eq(doccount, result.nRemoved);
assert.eq(doccount + 1, mdc.find().itcount());
// add a secondary
-var slave = rt.add();
+var secondary = rt.add();
rt.reInitiate();
jsTestLog('reinitiation complete after adding new node to replicaset');
rt.awaitSecondaryNodes();
@@ -76,10 +76,10 @@ assert.eq(doccount - 1, result.nModified);
assert.eq(doccount + 1,
mdc.find().itcount(),
'incorrect collection size on primary (fast count: ' + mdc.count() + ')');
-assert.eq(
- doccount + 1,
- slave.getDB('d')['c'].find().itcount(),
- 'incorrect collection size on secondary (fast count: ' + slave.getDB('d')['c'].count() + ')');
+assert.eq(doccount + 1,
+ secondary.getDB('d')['c'].find().itcount(),
+ 'incorrect collection size on secondary (fast count: ' +
+ secondary.getDB('d')['c'].count() + ')');
jsTestLog("finished");
rt.stopSet();
diff --git a/jstests/replsets/replsetadd_profile.js b/jstests/replsets/replsetadd_profile.js
index 2e396a61eb7..dfb27ade23e 100644
--- a/jstests/replsets/replsetadd_profile.js
+++ b/jstests/replsets/replsetadd_profile.js
@@ -11,9 +11,9 @@ var collectionName = 'jstests_replsetadd_profile';
var replTest = new ReplSetTest({name: 'ReplSetAddProfileTestSet', nodes: [{profile: 2}]});
replTest.startSet();
replTest.initiate();
-var master = replTest.getPrimary();
-var masterCollection = master.getDB('test').getCollection(collectionName);
-masterCollection.save({a: 1});
+var primary = replTest.getPrimary();
+var primaryCollection = primary.getDB('test').getCollection(collectionName);
+primaryCollection.save({a: 1});
// Add a new node with no profiling level.
var newNode = replTest.add();
@@ -26,7 +26,7 @@ replTest.awaitReplication();
var newNodeCollection = newNode.getDB('test').getCollection(collectionName);
assert.eq(1,
newNodeCollection.find({a: 1}).itcount(),
- 'expect documents to be present in slave after replication');
+ 'expect documents to be present in secondary after replication');
var signal = 15;
replTest.stopSet(signal);
diff --git a/jstests/replsets/replsetarb2.js b/jstests/replsets/replsetarb2.js
index 14066bf7762..d8583f99f1e 100644
--- a/jstests/replsets/replsetarb2.js
+++ b/jstests/replsets/replsetarb2.js
@@ -1,4 +1,4 @@
-// Election when master fails and remaining nodes are an arbiter and a slave.
+// Election when primary fails and remaining nodes are an arbiter and a secondary.
(function() {
"use strict";
@@ -16,8 +16,8 @@ var r = replTest.initiate({
]
});
-// Make sure we have a master
-var master = replTest.getPrimary();
+// Make sure we have a primary
+var primary = replTest.getPrimary();
// Make sure we have an arbiter
assert.soon(function() {
@@ -31,18 +31,18 @@ assert(result.arbiterOnly);
assert(!result.passive);
// Wait for initial replication
-master.getDB("foo").foo.insert({a: "foo"});
+primary.getDB("foo").foo.insert({a: "foo"});
replTest.awaitReplication();
-// Now kill the original master
-var mId = replTest.getNodeId(master);
-replTest.stop(mId);
+// Now kill the original primary
+var pId = replTest.getNodeId(primary);
+replTest.stop(pId);
-// And make sure that the slave is promoted
-var new_master = replTest.getPrimary();
+// And make sure that the secondary is promoted
+var new_primary = replTest.getPrimary();
-var newMasterId = replTest.getNodeId(new_master);
-assert.neq(newMasterId, mId, "Secondary wasn't promoted to new primary");
+var newPrimaryId = replTest.getNodeId(new_primary);
+assert.neq(newPrimaryId, pId, "Secondary wasn't promoted to new primary");
replTest.stopSet(15);
}());
diff --git a/jstests/replsets/replsethostnametrim.js b/jstests/replsets/replsethostnametrim.js
index 51edf551f96..e66bac8acaf 100644
--- a/jstests/replsets/replsethostnametrim.js
+++ b/jstests/replsets/replsethostnametrim.js
@@ -4,17 +4,17 @@ var replTest = new ReplSetTest({name: 'testSet', nodes: 1});
var nodes = replTest.startSet();
replTest.initiate();
-var master = replTest.getPrimary();
-var config = master.getDB("local").system.replset.findOne();
+var primary = replTest.getPrimary();
+var config = primary.getDB("local").system.replset.findOne();
config.version++;
var origHost = config.members[0].host;
config.members[0].host = origHost + " ";
-var result = master.adminCommand({replSetReconfig: config});
+var result = primary.adminCommand({replSetReconfig: config});
assert.eq(result.ok, 1, tojson(result));
// print("current (bad) config:"); printjson(config);
// check new config to make sure it doesn't have a space in the hostname
-config = master.getDB("local").system.replset.findOne();
+config = primary.getDB("local").system.replset.findOne();
assert.eq(origHost, config.members[0].host);
// print("current (good) config:"); printjson(config);
diff --git a/jstests/replsets/replsetprio1.js b/jstests/replsets/replsetprio1.js
index 0113b47b7a3..870f5316e1a 100644
--- a/jstests/replsets/replsetprio1.js
+++ b/jstests/replsets/replsetprio1.js
@@ -15,10 +15,10 @@ replTest.initiateWithAnyNodeAsPrimary({
]
});
-// 2 should be master (give this a while to happen, as other nodes might first be elected)
+// 2 should be primary (give this a while to happen, as other nodes might first be elected)
replTest.awaitNodesAgreeOnPrimary(replTest.kDefaultTimeoutMS, nodes, nodes[2]);
-// wait for 1 to not appear to be master (we are about to make it master and need a clean slate
+// wait for 1 to not appear to be primary (we are about to make it primary and need a clean slate
// here)
replTest.waitForState(nodes[1], ReplSetTest.State.SECONDARY);
@@ -28,28 +28,28 @@ replTest.awaitReplication();
// kill 2, 1 should take over
replTest.stop(2);
-// 1 should eventually be master
+// 1 should eventually be primary
replTest.waitForState(nodes[1], ReplSetTest.State.PRIMARY);
// do some writes on 1
-var master = replTest.getPrimary();
+var primary = replTest.getPrimary();
for (var i = 0; i < 1000; i++) {
- assert.commandWorked(master.getDB("foo").bar.insert({i: i}, {writeConcern: {w: 'majority'}}));
+ assert.commandWorked(primary.getDB("foo").bar.insert({i: i}, {writeConcern: {w: 'majority'}}));
}
for (i = 0; i < 1000; i++) {
- assert.commandWorked(master.getDB("bar").baz.insert({i: i}, {writeConcern: {w: 'majority'}}));
+ assert.commandWorked(primary.getDB("bar").baz.insert({i: i}, {writeConcern: {w: 'majority'}}));
}
-// bring 2 back up, 2 should wait until caught up and then become master
+// bring 2 back up, 2 should wait until caught up and then become primary
replTest.restart(2);
replTest.awaitNodesAgreeOnPrimary(replTest.kDefaultTimeoutMS, nodes, nodes[2]);
// make sure nothing was rolled back
-master = replTest.getPrimary();
+primary = replTest.getPrimary();
for (i = 0; i < 1000; i++) {
- assert(master.getDB("foo").bar.findOne({i: i}) != null, 'checking ' + i);
- assert(master.getDB("bar").baz.findOne({i: i}) != null, 'checking ' + i);
+ assert(primary.getDB("foo").bar.findOne({i: i}) != null, 'checking ' + i);
+ assert(primary.getDB("bar").baz.findOne({i: i}) != null, 'checking ' + i);
}
replTest.stopSet();
}());
diff --git a/jstests/replsets/retryable_commit_transaction_after_failover.js b/jstests/replsets/retryable_commit_transaction_after_failover.js
index d5e9efacc60..1970d5c2bca 100644
--- a/jstests/replsets/retryable_commit_transaction_after_failover.js
+++ b/jstests/replsets/retryable_commit_transaction_after_failover.js
@@ -73,7 +73,7 @@ rst.stepUp(oldSecondary);
assert.eq(oldSecondary, rst.getPrimary());
// Reconnect the connection to the new primary.
sessionDb.getMongo()._markNodeAsFailed(
- oldPrimary.host, ErrorCodes.NotWritablePrimary, "Notice that primary is not master");
+ oldPrimary.host, ErrorCodes.NotWritablePrimary, "Notice that primary is not writable");
reconnect(sessionDb);
jsTest.log("commitTransaction command is retryable after failover");
diff --git a/jstests/replsets/rollback_creates_rollback_directory.js b/jstests/replsets/rollback_creates_rollback_directory.js
index db795769bdf..961a7300fe3 100644
--- a/jstests/replsets/rollback_creates_rollback_directory.js
+++ b/jstests/replsets/rollback_creates_rollback_directory.js
@@ -26,9 +26,9 @@ function runRollbackDirectoryTest(shouldCreateRollbackFiles) {
],
});
- // Make sure we have a master
+ // Make sure we have a primary
replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY);
- var master = replTest.getPrimary();
+ var primary = replTest.getPrimary();
var a_conn = conns[0];
var b_conn = conns[1];
a_conn.setSecondaryOk();
@@ -37,8 +37,8 @@ function runRollbackDirectoryTest(shouldCreateRollbackFiles) {
var B = b_conn.getDB("test");
var Apath = replTest.getDbPath(a_conn) + '/';
var Bpath = replTest.getDbPath(b_conn) + '/';
- assert(master == conns[0], "conns[0] assumed to be master");
- assert(a_conn.host == master.host);
+ assert(primary == conns[0], "conns[0] assumed to be primary");
+ assert(a_conn.host == primary.host);
// Make sure we have an arbiter
assert.soon(function() {
@@ -51,15 +51,15 @@ function runRollbackDirectoryTest(shouldCreateRollbackFiles) {
var AID = replTest.getNodeId(a_conn);
replTest.stop(AID);
- master = replTest.getPrimary();
- assert(b_conn.host == master.host);
+ primary = replTest.getPrimary();
+ assert(b_conn.host == primary.host);
options = {writeConcern: {w: 1, wtimeout: replTest.kDefaultTimeoutMS}, upsert: true};
assert.commandWorked(B.foo.update({key: 'value1'}, {$set: {res: 'res'}}, options));
var BID = replTest.getNodeId(b_conn);
replTest.stop(BID);
replTest.restart(AID);
- master = replTest.getPrimary();
- assert(a_conn.host == master.host);
+ primary = replTest.getPrimary();
+ assert(a_conn.host == primary.host);
options = {writeConcern: {w: 1, wtimeout: replTest.kDefaultTimeoutMS}, upsert: true};
assert.commandWorked(A.foo.update({key: 'value2'}, {$set: {req: 'req'}}, options));
replTest.restart(BID); // should rollback
diff --git a/jstests/replsets/server8070.js b/jstests/replsets/server8070.js
index 5bc4fd8f60d..92546a56c0a 100644
--- a/jstests/replsets/server8070.js
+++ b/jstests/replsets/server8070.js
@@ -34,32 +34,32 @@ replSet.initiate({
});
// set up common points of access
-var master = replSet.getPrimary();
-var primary = master.getDB("foo");
+var primary = replSet.getPrimary();
+var primaryDB = primary.getDB("foo");
replSet.nodes[1].setSecondaryOk();
replSet.nodes[2].setSecondaryOk();
var member2 = replSet.nodes[1].getDB("admin");
var member3 = replSet.nodes[2].getDB("admin");
// Do an initial write
-master.getDB("foo").bar.insert({x: 1});
+primary.getDB("foo").bar.insert({x: 1});
replSet.awaitReplication();
jsTest.log("Make sure 2 & 3 are syncing from the primary");
-assert.eq(master, replSet.nodes[0]);
-syncFrom(replSet.nodes[1], master, replSet);
-syncFrom(replSet.nodes[2], master, replSet);
+assert.eq(primary, replSet.nodes[0]);
+syncFrom(replSet.nodes[1], primary, replSet);
+syncFrom(replSet.nodes[2], primary, replSet);
jsTest.log("Stop 2's replication");
member2.runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'});
jsTest.log("Do a few writes");
for (var i = 0; i < 25; i++) {
- primary.bar.insert({x: i});
+ primaryDB.bar.insert({x: i});
}
jsTest.log("Make sure 3 is at write #25");
-waitForSameOplogPosition(primary, member3, "node 3 failed to catch up to the primary");
+waitForSameOplogPosition(primaryDB, member3, "node 3 failed to catch up to the primary");
// This means 3's buffer is empty
jsTest.log("Stop 3's replication");
@@ -73,11 +73,11 @@ member2.runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'});
jsTest.log("Do some writes");
for (var i = 25; i < 50; i++) {
- primary.bar.insert({x: i});
+ primaryDB.bar.insert({x: i});
}
jsTest.log("Make sure 2 is at write #50");
-waitForSameOplogPosition(primary, member2, "node 2 failed to catch up to the primary");
+waitForSameOplogPosition(primaryDB, member2, "node 2 failed to catch up to the primary");
// This means 2's buffer is empty
jsTest.log("Stop 2's replication");
@@ -85,11 +85,11 @@ member2.runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'});
jsTest.log("Do some writes - 2 & 3 should have up to write #75 in their buffers, but unapplied");
for (var i = 50; i < 75; i++) {
- primary.bar.insert({x: i});
+ primaryDB.bar.insert({x: i});
}
-var primaryCollectionSize = primary.bar.find().itcount();
+var primaryCollectionSize = primaryDB.bar.find().itcount();
jsTest.log("primary collection size: " + primaryCollectionSize);
-var last = primary.getSiblingDB("local").oplog.rs.find().sort({$natural: -1}).limit(1).next();
+var last = primaryDB.getSiblingDB("local").oplog.rs.find().sort({$natural: -1}).limit(1).next();
jsTest.log("waiting a bit for the secondaries to get the write");
sleep(10000);
diff --git a/jstests/replsets/slave_delay_clean_shutdown.js b/jstests/replsets/slave_delay_clean_shutdown.js
index 8a68a2d0a58..73050e18daa 100644
--- a/jstests/replsets/slave_delay_clean_shutdown.js
+++ b/jstests/replsets/slave_delay_clean_shutdown.js
@@ -23,10 +23,10 @@ conf.members[1].slaveDelay = 0; // Set later.
rst.startSet();
rst.initiate(conf);
-var master = rst.getPrimary(); // Waits for PRIMARY state.
+var primary = rst.getPrimary(); // Waits for PRIMARY state.
// Push some ops through before setting slave delay.
-assert.commandWorked(master.getCollection(ns).insert([{}, {}, {}], {writeConcern: {w: 2}}));
+assert.commandWorked(primary.getCollection(ns).insert([{}, {}, {}], {writeConcern: {w: 2}}));
// Set slaveDelay and wait for secondary to receive the change.
conf = rst.getReplSetConfigFromNode();
@@ -40,10 +40,10 @@ sleep(2000); // The secondary apply loop only checks for slaveDelay changes onc
var secondary = rst.getSecondary();
const lastOp = getLatestOp(secondary);
-assert.commandWorked(master.getCollection(ns).insert([{}, {}, {}]));
+assert.commandWorked(primary.getCollection(ns).insert([{}, {}, {}]));
assert.soon(() => secondary.adminCommand('serverStatus').metrics.repl.buffer.count > 0,
() => secondary.adminCommand('serverStatus').metrics.repl);
-assert.neq(getLatestOp(master), lastOp);
+assert.neq(getLatestOp(primary), lastOp);
assert.eq(getLatestOp(secondary), lastOp);
sleep(2000); // Prevent the test from passing by chance.
diff --git a/jstests/replsets/sync2.js b/jstests/replsets/sync2.js
index 281acafa667..7a8bc1ddd48 100644
--- a/jstests/replsets/sync2.js
+++ b/jstests/replsets/sync2.js
@@ -12,10 +12,10 @@ var replTest = new ReplSetTest({
var conns = replTest.startSet();
replTest.initiate();
-var master = replTest.getPrimary();
+var primary = replTest.getPrimary();
jsTestLog("Replica set test initialized");
-master.getDB("foo").bar.insert({x: 1});
+primary.getDB("foo").bar.insert({x: 1});
replTest.awaitReplication();
conns[0].disconnect(conns[4]);
@@ -28,8 +28,8 @@ conns[4].disconnect(conns[1]);
conns[4].disconnect(conns[3]);
assert.soon(function() {
- master = replTest.getPrimary();
- return master === conns[0];
+ primary = replTest.getPrimary();
+ return primary === conns[0];
}, "node 0 should become primary before timeout", replTest.kDefaultTimeoutMS);
replTest.awaitReplication();
@@ -39,13 +39,13 @@ var option = {writeConcern: {w: conns.length - 1, wtimeout: replTest.kDefaultTim
// to bridging, it will not change sync sources and receive the write in time. This was not a
// problem in 3.0 because the old version of mongobridge caused all the nodes to restart during
// partitioning, forcing the set to rebuild the spanning tree.
-assert.commandWorked(master.getDB("foo").bar.insert({x: 1}, option));
+assert.commandWorked(primary.getDB("foo").bar.insert({x: 1}, option));
// 4 is connected to 3
conns[4].disconnect(conns[2]);
conns[4].reconnect(conns[3]);
-assert.commandWorked(master.getDB("foo").bar.insert({x: 1}, option));
+assert.commandWorked(primary.getDB("foo").bar.insert({x: 1}, option));
replTest.stopSet();
}());
diff --git a/jstests/replsets/tags2.js b/jstests/replsets/tags2.js
index 4c7ea5b3995..16fa2157cde 100644
--- a/jstests/replsets/tags2.js
+++ b/jstests/replsets/tags2.js
@@ -31,8 +31,8 @@ replTest.initiate(conf);
replTest.awaitReplication();
-var master = replTest.getPrimary();
-var db = master.getDB("test");
+var primary = replTest.getPrimary();
+var db = primary.getDB("test");
var wtimeout = ReplSetTest.kDefaultTimeoutMS;
assert.commandWorked(db.foo.insert({x: 1}, {writeConcern: {w: 'backedUp', wtimeout: wtimeout}}));
@@ -40,21 +40,21 @@ assert.commandWorked(db.foo.insert({x: 1}, {writeConcern: {w: 'backedUp', wtimeo
var nextVersion = replTest.getReplSetConfigFromNode().version + 1;
conf.version = nextVersion;
conf.settings.getLastErrorModes.backedUp.backup = 3;
-master.getDB("admin").runCommand({replSetReconfig: conf});
+primary.getDB("admin").runCommand({replSetReconfig: conf});
replTest.awaitReplication();
-master = replTest.getPrimary();
-var db = master.getDB("test");
+primary = replTest.getPrimary();
+var db = primary.getDB("test");
assert.commandWorked(db.foo.insert({x: 2}, {writeConcern: {w: 'backedUp', wtimeout: wtimeout}}));
nextVersion++;
conf.version = nextVersion;
conf.members[0].priorty = 3;
conf.members[2].priorty = 0;
-master.getDB("admin").runCommand({replSetReconfig: conf});
+primary.getDB("admin").runCommand({replSetReconfig: conf});
-master = replTest.getPrimary();
-var db = master.getDB("test");
+primary = replTest.getPrimary();
+var db = primary.getDB("test");
assert.commandWorked(db.foo.insert({x: 3}, {writeConcern: {w: 'backedUp', wtimeout: wtimeout}}));
replTest.stopSet();
diff --git a/jstests/replsets/tags_with_reconfig.js b/jstests/replsets/tags_with_reconfig.js
index 7aadef7c5b8..75c1cf112da 100644
--- a/jstests/replsets/tags_with_reconfig.js
+++ b/jstests/replsets/tags_with_reconfig.js
@@ -1,5 +1,5 @@
// Test for SERVER-9333
-// Previously, we were not clearing the cache of slaves in the primary at reconfig
+// Previously, we were not clearing the cache of secondaries in the primary at reconfig
// time. This would cause us to update stale items in the cache when secondaries
// reported their progress to a primary.
@@ -32,8 +32,8 @@ replTest.initiate(conf);
replTest.awaitReplication();
var wtimeout = ReplSetTest.kDefaultTimeoutMS;
-var master = replTest.getPrimary();
-var db = master.getDB("test");
+var primary = replTest.getPrimary();
+var db = primary.getDB("test");
// Insert a document with write concern : anydc
assert.commandWorked(db.foo.insert({x: 1}, {writeConcern: {w: 'anydc', wtimeout: wtimeout}}));
@@ -42,14 +42,14 @@ assert.commandWorked(db.foo.insert({x: 1}, {writeConcern: {w: 'anydc', wtimeout:
assert.commandWorked(db.foo.insert({x: 2}, {writeConcern: {w: 'alldc', wtimeout: wtimeout}}));
// Add a new tag to the replica set
-var config = master.getDB("local").system.replset.findOne();
+var config = primary.getDB("local").system.replset.findOne();
printjson(config);
var modes = config.settings.getLastErrorModes;
config.version++;
config.members[0].tags.newtag = "newtag";
try {
- master.getDB("admin").runCommand({replSetReconfig: config});
+ primary.getDB("admin").runCommand({replSetReconfig: config});
} catch (e) {
print(e);
}
@@ -57,11 +57,11 @@ try {
replTest.awaitReplication();
// Print the new config for replica set
-var config = master.getDB("local").system.replset.findOne();
+var config = primary.getDB("local").system.replset.findOne();
printjson(config);
-master = replTest.getPrimary();
-var db = master.getDB("test");
+primary = replTest.getPrimary();
+var db = primary.getDB("test");
// Insert a document with write concern : anydc
assert.commandWorked(db.foo.insert({x: 3}, {writeConcern: {w: 'anydc', wtimeout: wtimeout}}));
diff --git a/jstests/replsets/user_management_wc.js b/jstests/replsets/user_management_wc.js
index 6d782fa1536..fddc2ef7686 100644
--- a/jstests/replsets/user_management_wc.js
+++ b/jstests/replsets/user_management_wc.js
@@ -19,10 +19,10 @@ var replTest =
replTest.startSet();
replTest.initiate();
-var master = replTest.getPrimary();
+var primary = replTest.getPrimary();
var dbName = "user-management-wc-test";
-var db = master.getDB(dbName);
-var adminDB = master.getDB('admin');
+var db = primary.getDB(dbName);
+var adminDB = primary.getDB('admin');
function dropUsersAndRoles() {
db.dropUser('username');
diff --git a/jstests/sharding/addshard4.js b/jstests/sharding/addshard4.js
index a23d0a13797..49e9de7b48c 100644
--- a/jstests/sharding/addshard4.js
+++ b/jstests/sharding/addshard4.js
@@ -15,7 +15,7 @@ r.initiate(config);
// to pre-allocate files on slow systems
r.awaitReplication();
-var master = r.getPrimary();
+var primary = r.getPrimary();
var members = config.members.map(function(elem) {
return elem.host;
@@ -49,7 +49,7 @@ r.initiate(config);
// to pre-allocate files on slow systems
r.awaitReplication();
-master = r.getPrimary();
+primary = r.getPrimary();
print("adding shard addshard42");
diff --git a/jstests/sharding/auth.js b/jstests/sharding/auth.js
index 96e867458df..24273b29884 100644
--- a/jstests/sharding/auth.js
+++ b/jstests/sharding/auth.js
@@ -39,8 +39,8 @@ function logout(userObj, thingToUse) {
}
function getShardName(rsTest) {
- var master = rsTest.getPrimary();
- var config = master.getDB("local").system.replset.findOne();
+ var primary = rsTest.getPrimary();
+ var config = primary.getDB("local").system.replset.findOne();
var members = config.members.map(function(elem) {
return elem.host;
});
@@ -110,7 +110,7 @@ d1.stopSet();
d1.startSet({keyFile: "jstests/libs/key1", restart: true});
d1.initiate();
-var master = d1.getPrimary();
+var primary = d1.getPrimary();
print("adding shard w/auth " + shardName);
diff --git a/jstests/sharding/count_slaveok.js b/jstests/sharding/count_slaveok.js
index 23612d96220..7ad5007be48 100644
--- a/jstests/sharding/count_slaveok.js
+++ b/jstests/sharding/count_slaveok.js
@@ -41,18 +41,18 @@ rst.awaitReplication();
var primary = rst.getPrimary();
var sec = rst.getSecondary();
-// Data now inserted... stop the master, since only two in set, other will still be secondary
+// Data now inserted... stop the primary, since only two in set, other will still be secondary
rst.stop(rst.getPrimary());
printjson(rst.status());
-// Wait for the mongos to recognize the slave
+// Wait for the mongos to recognize the secondary
awaitRSClientHosts(conn, sec, {ok: true, secondary: true});
// Make sure that mongos realizes that primary is already down
awaitRSClientHosts(conn, primary, {ok: false});
// Need to check secondaryOk=true first, since secondaryOk=false will destroy conn in pool when
-// master is down
+// primary is down
conn.setSecondaryOk();
// count using the command path
diff --git a/jstests/sharding/cursor_valid_after_shard_stepdown.js b/jstests/sharding/cursor_valid_after_shard_stepdown.js
index 574eb603e54..00c31a8ef0f 100644
--- a/jstests/sharding/cursor_valid_after_shard_stepdown.js
+++ b/jstests/sharding/cursor_valid_after_shard_stepdown.js
@@ -26,7 +26,7 @@ var coll = db.TestColl;
assert.commandWorked(coll.insert({x: 1, value: 'Test value 1'}));
assert.commandWorked(coll.insert({x: 2, value: 'Test value 2'}));
-// Establish a cursor on the primary (by not using slaveOk read)
+// Establish a cursor on the primary (by not using secondaryOk read)
var findCursor = assert.commandWorked(db.runCommand({find: 'TestColl', batchSize: 1})).cursor;
var shardVersionBeforeStepdown =
diff --git a/jstests/sharding/read_pref.js b/jstests/sharding/read_pref.js
index 9267cb18430..8435ef319da 100644
--- a/jstests/sharding/read_pref.js
+++ b/jstests/sharding/read_pref.js
@@ -129,14 +129,14 @@ var doTest = function(useDollarQuerySyntax) {
return serverInfo.host + ":" + serverInfo.port.toString();
};
- // Read pref should work without slaveOk
+ // Read pref should work without secondaryOk
var explain = getExplain("secondary");
var explainServer = getExplainServer(explain);
assert.neq(primaryNode.name, explainServer);
conn.setSecondaryOk();
- // It should also work with slaveOk
+ // It should also work with secondaryOk
explain = getExplain("secondary");
explainServer = getExplainServer(explain);
assert.neq(primaryNode.name, explainServer);
diff --git a/jstests/sharding/recovering_slaveok.js b/jstests/sharding/recovering_slaveok.js
index d9bcd44da87..7956b154361 100644
--- a/jstests/sharding/recovering_slaveok.js
+++ b/jstests/sharding/recovering_slaveok.js
@@ -1,6 +1,6 @@
/**
- * This tests that secondaryOk'd queries in sharded setups get correctly routed when a slave goes
- * into RECOVERING state, and don't break
+ * This tests that secondaryOk'd queries in sharded setups get correctly routed when a secondary
+ * goes into RECOVERING state, and don't break
*/
// Shard secondaries are restarted, which may cause that shard's primary to stepdown while it does
diff --git a/jstests/sharding/shard_insert_getlasterror_w2.js b/jstests/sharding/shard_insert_getlasterror_w2.js
index a4a0f5c540f..9db83bf5781 100644
--- a/jstests/sharding/shard_insert_getlasterror_w2.js
+++ b/jstests/sharding/shard_insert_getlasterror_w2.js
@@ -62,7 +62,7 @@ for (var i = numDocs; i < 2 * numDocs; i++) {
}
assert.commandWorked(bulk.execute({w: replNodes, wtimeout: 30000}));
-// Take down two nodes and make sure slaveOk reads still work
+// Take down two nodes and make sure secondaryOk reads still work
var primary = replSet1.getPrimary();
var [secondary1, secondary2] = replSet1.getSecondaries();
replSet1.stop(secondary1);
diff --git a/jstests/sharding/warm_up_connection_pool.js b/jstests/sharding/warm_up_connection_pool.js
index bd00206cdd0..ad87cbe21c6 100644
--- a/jstests/sharding/warm_up_connection_pool.js
+++ b/jstests/sharding/warm_up_connection_pool.js
@@ -24,11 +24,11 @@ function runTest(setParams, connPoolStatsCheck, extraOptions) {
assert.commandWorked(test.s0.adminCommand(shardCommand));
var primary;
- var mId;
+ var pId;
if (extraOptions !== undefined) {
const resp = extraOptions(test);
primary = resp.connString;
- mId = resp.nodeId;
+ pId = resp.nodeId;
}
test.restartMongos(0);
@@ -48,7 +48,7 @@ function runTest(setParams, connPoolStatsCheck, extraOptions) {
});
if (extraOptions !== undefined) {
- test.rs0.restart(mId);
+ test.rs0.restart(pId);
}
test.stop();
@@ -109,11 +109,11 @@ var shutdownNodeConnPoolStatsCheck = function(connPoolStats, currentShard, prima
};
var shutdownNodeExtraOptions = function(test) {
const nodeList = test.rs0.nodeList();
- const master = test.rs0.getPrimary();
- var mId = test.rs0.getNodeId(master);
+ const primary = test.rs0.getPrimary();
+ var pId = test.rs0.getNodeId(primary);
- test.rs0.stop(mId);
- return {connString: nodeList[mId], nodeId: mId};
+ test.rs0.stop(pId);
+ return {connString: nodeList[pId], nodeId: pId};
};
runTest(shutdownNodeParams, shutdownNodeConnPoolStatsCheck, shutdownNodeExtraOptions);
diff --git a/jstests/slow1/replsets_priority1.js b/jstests/slow1/replsets_priority1.js
index 4dea828c793..dad18abc6ba 100644
--- a/jstests/slow1/replsets_priority1.js
+++ b/jstests/slow1/replsets_priority1.js
@@ -19,13 +19,13 @@ var rs = new ReplSetTest({name: 'testSet', nodes: 3, nodeOptions: {verbose: 2}})
var nodes = rs.startSet();
rs.initiate();
-var master = rs.getPrimary();
+var primary = rs.getPrimary();
var everyoneOkSoon = function() {
var status;
assert.soon(function() {
var ok = true;
- status = master.adminCommand({replSetGetStatus: 1});
+ status = primary.adminCommand({replSetGetStatus: 1});
if (!status.members) {
return false;
@@ -50,12 +50,12 @@ var checkPrimaryIs = function(node) {
var ok = true;
try {
- status = master.adminCommand({replSetGetStatus: 1});
+ status = primary.adminCommand({replSetGetStatus: 1});
} catch (e) {
print(e);
print("nreplsets_priority1.js checkPrimaryIs reconnecting");
- reconnect(master);
- status = master.adminCommand({replSetGetStatus: 1});
+ reconnect(primary);
+ status = primary.adminCommand({replSetGetStatus: 1});
}
var str = "goal: " + node.host + "==1 states: ";
@@ -92,7 +92,7 @@ everyoneOkSoon();
jsTestLog("replsets_priority1.js initial sync");
// intial sync
-master.getDB("foo").bar.insert({x: 1});
+primary.getDB("foo").bar.insert({x: 1});
rs.awaitReplication();
jsTestLog("replsets_priority1.js starting loop");
@@ -103,8 +103,8 @@ for (var i = 0; i < n; i++) {
var max = null;
var second = null;
- master = rs.getPrimary();
- var config = master.getDB("local").system.replset.findOne();
+ primary = rs.getPrimary();
+ var config = primary.getDB("local").system.replset.findOne();
var version = config.version;
config.version++;
@@ -129,10 +129,10 @@ for (var i = 0; i < n; i++) {
}
jsTestLog("replsets_priority1.js max is " + max.host + " with priority " + max.priority +
- ", reconfiguring on " + master.host);
+ ", reconfiguring on " + primary.host);
- assert.soon(() => isConfigCommitted(master));
- assert.commandWorked(master.adminCommand({replSetReconfig: config}));
+ assert.soon(() => isConfigCommitted(primary));
+ assert.commandWorked(primary.adminCommand({replSetReconfig: config}));
jsTestLog("replsets_priority1.js wait for 2 secondaries");
@@ -170,7 +170,7 @@ for (var i = 0; i < n; i++) {
rs.stop(max._id);
- master = rs.getPrimary();
+ primary = rs.getPrimary();
jsTestLog("killed max primary. Checking statuses.");
@@ -186,7 +186,7 @@ for (var i = 0; i < n; i++) {
jsTestLog("restart max " + max._id);
rs.restart(max._id);
- master = rs.getPrimary();
+ primary = rs.getPrimary();
jsTestLog("max restarted. Checking statuses.");
checkPrimaryIs(max);