summaryrefslogtreecommitdiff
path: root/jstests/sharding
diff options
context:
space:
mode:
authorAli Mir <ali.mir@mongodb.com>2020-08-27 14:07:45 -0400
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-09-09 23:29:23 +0000
commit2b5dc35f019a3606c2dfa845cdfb320ffbac8014 (patch)
tree0b7c0bd55ba7b745986e7a248835fcfca1316109 /jstests/sharding
parentee106b978c7466bdd325cfb9f3f029d3769b1c1b (diff)
downloadmongo-2b5dc35f019a3606c2dfa845cdfb320ffbac8014.tar.gz
SERVER-50581 Replace setSlaveOk and getSlaveOk occurrences in jstests
Diffstat (limited to 'jstests/sharding')
-rw-r--r--jstests/sharding/agg_mongos_slaveok.js6
-rw-r--r--jstests/sharding/all_shard_and_config_hosts_brought_down_one_by_one.js2
-rw-r--r--jstests/sharding/auth_repl.js12
-rw-r--r--jstests/sharding/auth_slaveok_routing.js8
-rw-r--r--jstests/sharding/autodiscover_config_rs_from_secondary.js2
-rw-r--r--jstests/sharding/balance_repl.js2
-rw-r--r--jstests/sharding/cluster_create_indexes_always_routes_through_primary.js4
-rw-r--r--jstests/sharding/config_rs_no_primary.js4
-rw-r--r--jstests/sharding/count_config_servers.js2
-rw-r--r--jstests/sharding/count_slaveok.js14
-rw-r--r--jstests/sharding/error_propagation.js2
-rw-r--r--jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js28
-rw-r--r--jstests/sharding/mongos_rs_shard_failure_tolerance.js46
-rw-r--r--jstests/sharding/query/explain_read_pref.js2
-rw-r--r--jstests/sharding/read_pref.js2
-rw-r--r--jstests/sharding/read_pref_cmd.js4
-rw-r--r--jstests/sharding/recovering_slaveok.js16
-rw-r--r--jstests/sharding/session_info_in_oplog.js4
-rw-r--r--jstests/sharding/shard_aware_init_secondaries.js4
-rw-r--r--jstests/sharding/shard_identity_config_update.js4
-rw-r--r--jstests/sharding/shard_identity_rollback.js4
-rw-r--r--jstests/sharding/shard_insert_getlasterror_w2.js2
22 files changed, 87 insertions, 87 deletions
diff --git a/jstests/sharding/agg_mongos_slaveok.js b/jstests/sharding/agg_mongos_slaveok.js
index 287902092bc..01fb4286429 100644
--- a/jstests/sharding/agg_mongos_slaveok.js
+++ b/jstests/sharding/agg_mongos_slaveok.js
@@ -1,5 +1,5 @@
/**
- * Tests aggregate command against mongos with slaveOk. For more tests on read preference,
+ * Tests aggregate command against mongos with secondaryOk. For more tests on read preference,
* please refer to jstests/sharding/read_pref_cmd.js.
* @tags: [
* requires_replication,
@@ -21,12 +21,12 @@ var doTest = function(st, doSharded) {
}
testDB.user.insert({x: 10}, {writeConcern: {w: NODES}});
- testDB.setSlaveOk(true);
+ testDB.setSecondaryOk();
var secNode = st.rs0.getSecondary();
secNode.getDB('test').setProfilingLevel(2);
- // wait for mongos to recognize that the slave is up
+ // wait for mongos to recognize that the secondary is up
awaitRSClientHosts(st.s, secNode, {ok: true});
var res = testDB.runCommand({aggregate: 'user', pipeline: [{$project: {x: 1}}], cursor: {}});
diff --git a/jstests/sharding/all_shard_and_config_hosts_brought_down_one_by_one.js b/jstests/sharding/all_shard_and_config_hosts_brought_down_one_by_one.js
index b0bd0f59e8c..a11f8dbc694 100644
--- a/jstests/sharding/all_shard_and_config_hosts_brought_down_one_by_one.js
+++ b/jstests/sharding/all_shard_and_config_hosts_brought_down_one_by_one.js
@@ -39,7 +39,7 @@ jsTest.log('Config nodes up: 1 of 3, shard nodes up: 1 of 2: ' +
'Only queries will work (no shard primary)');
st.rs0.stop(0);
st.restartMongos(0);
-st.s0.setSlaveOk(true);
+st.s0.setSecondaryOk();
assert.eq([{_id: 0, count: 3}], st.s0.getDB('TestDB').TestColl.find().toArray());
jsTest.log('Config nodes up: 1 of 3, shard nodes up: 0 of 2: ' +
diff --git a/jstests/sharding/auth_repl.js b/jstests/sharding/auth_repl.js
index cd89c91f136..b806090fc3a 100644
--- a/jstests/sharding/auth_repl.js
+++ b/jstests/sharding/auth_repl.js
@@ -19,7 +19,7 @@ var testColl = testDB.user;
// before setting up authentication
assert.commandWorked(adminDB.runCommand({replSetGetStatus: 1}));
-conn.setSlaveOk();
+conn.setSecondaryOk();
assert.commandWorked(adminDB.runCommand({replSetGetStatus: 1}));
// Add admin user using direct connection to primary to simulate connection from remote host
@@ -38,19 +38,19 @@ assert.eq(1, testDB.auth('a', 'a'));
jsTest.log('Sending an authorized query that should be ok');
assert.commandWorked(testColl.insert({x: 1}, {writeConcern: {w: nodeCount}}));
-conn.setSlaveOk(true);
+conn.setSecondaryOk();
doc = testColl.findOne();
assert(doc != null);
doc = testColl.find().readPref('secondary').next();
assert(doc != null);
-conn.setSlaveOk(false);
+conn.setSecondaryOk(false);
doc = testColl.findOne();
assert(doc != null);
var queryToPriShouldFail = function() {
- conn.setSlaveOk(false);
+ conn.setSecondaryOk(false);
assert.throws(function() {
testColl.findOne();
@@ -63,7 +63,7 @@ var queryToPriShouldFail = function() {
};
var queryToSecShouldFail = function() {
- conn.setSlaveOk(true);
+ conn.setSecondaryOk();
assert.throws(function() {
testColl.findOne();
@@ -104,7 +104,7 @@ queryToPriShouldFail();
assert.eq(1, testDB.auth('a', 'a'));
// Find out the current cached secondary in the repl connection
-conn.setSlaveOk(true);
+conn.setSecondaryOk();
var serverInfo = testColl.find().readPref('secondary').explain().serverInfo;
var secNodeIdx = -1;
var secPortStr = serverInfo.port.toString();
diff --git a/jstests/sharding/auth_slaveok_routing.js b/jstests/sharding/auth_slaveok_routing.js
index 8eff7833c9b..1e573fc7c9e 100644
--- a/jstests/sharding/auth_slaveok_routing.js
+++ b/jstests/sharding/auth_slaveok_routing.js
@@ -1,5 +1,5 @@
/**
- * This tests whether slaveOk reads are properly routed through mongos in
+ * This tests whether secondaryOk reads are properly routed through mongos in
* an authenticated environment. This test also includes restarting the
* entire set, then querying afterwards.
*
@@ -59,11 +59,11 @@ priAdminDB.createUser({user: 'user', pwd: 'password', roles: jsTest.adminUserRol
{w: 3, wtimeout: 30000});
coll.drop();
-coll.setSlaveOk(true);
+coll.setSecondaryOk();
/* Secondaries should be up here, but they can still be in RECOVERY
* state, which will make the ReplicaSetMonitor mark them as
- * ok = false and not eligible for slaveOk queries.
+ * ok = false and not eligible for secondaryOk queries.
*/
awaitRSClientHosts(mongos, replTest.getSecondaries(), {ok: true, secondary: true});
@@ -90,7 +90,7 @@ for (var n = 0; n < nodeCount; n++) {
replTest.awaitSecondaryNodes();
-coll.setSlaveOk(true);
+coll.setSecondaryOk();
/* replSetMonitor does not refresh the nodes information when getting secondaries.
* A node that is previously labeled as secondary can now be a primary, so we
diff --git a/jstests/sharding/autodiscover_config_rs_from_secondary.js b/jstests/sharding/autodiscover_config_rs_from_secondary.js
index 9d9bd4adbd5..cc6ca3c11ae 100644
--- a/jstests/sharding/autodiscover_config_rs_from_secondary.js
+++ b/jstests/sharding/autodiscover_config_rs_from_secondary.js
@@ -53,7 +53,7 @@ var mongos = MongoRunner.runMongos({configdb: seedList});
rst.stop(1);
var admin = mongos.getDB('admin');
-mongos.setSlaveOk(true);
+mongos.setSecondaryOk();
assert.eq(1, admin.foo.findOne().a);
MongoRunner.stopMongos(mongos);
rst.stopSet();
diff --git a/jstests/sharding/balance_repl.js b/jstests/sharding/balance_repl.js
index 83c92ff37b1..fb501c979cb 100644
--- a/jstests/sharding/balance_repl.js
+++ b/jstests/sharding/balance_repl.js
@@ -44,7 +44,7 @@ var collPrimary = (new Mongo(s.s0.host)).getDB('TestDB').TestColl;
assert.eq(2100, collPrimary.find().itcount());
var collSlaveOk = (new Mongo(s.s0.host)).getDB('TestDB').TestColl;
-collSlaveOk.setSlaveOk();
+collSlaveOk.setSecondaryOk();
assert.eq(2100, collSlaveOk.find().itcount());
assert.commandWorked(s.s0.adminCommand({
diff --git a/jstests/sharding/cluster_create_indexes_always_routes_through_primary.js b/jstests/sharding/cluster_create_indexes_always_routes_through_primary.js
index 6c661e0abac..6b61bd12a68 100644
--- a/jstests/sharding/cluster_create_indexes_always_routes_through_primary.js
+++ b/jstests/sharding/cluster_create_indexes_always_routes_through_primary.js
@@ -1,5 +1,5 @@
// Ensure that a call to createIndexes in a sharded cluster will route to the primary, even when
-// setSlaveOk() is set to true.
+// setSecondaryOk() is set to true.
(function() {
'use strict';
@@ -12,7 +12,7 @@ assert.commandWorked(testDB.adminCommand({enableSharding: testDBName}));
assert.commandWorked(
testDB.adminCommand({shardCollection: testDB[collName].getFullName(), key: {x: 1}}));
-st.s.setSlaveOk(true);
+st.s.setSecondaryOk();
assert.commandWorked(
testDB.runCommand({createIndexes: collName, indexes: [{key: {a: 1}, name: "index"}]}));
diff --git a/jstests/sharding/config_rs_no_primary.js b/jstests/sharding/config_rs_no_primary.js
index 91ce74de45d..8bcf7e54cd4 100644
--- a/jstests/sharding/config_rs_no_primary.js
+++ b/jstests/sharding/config_rs_no_primary.js
@@ -43,9 +43,9 @@ var testOps = function(mongos) {
assert.throws(function() {
mongos.getDB('config').shards.findOne();
});
- mongos.setSlaveOk(true);
+ mongos.setSecondaryOk();
var shardDoc = mongos.getDB('config').shards.findOne();
- mongos.setSlaveOk(false);
+ mongos.setSecondaryOk(false);
assert.neq(null, shardDoc);
jsTestLog("Doing ops that require metadata writes and thus should fail against: " + mongos);
diff --git a/jstests/sharding/count_config_servers.js b/jstests/sharding/count_config_servers.js
index ded75607cd0..0904a873e52 100644
--- a/jstests/sharding/count_config_servers.js
+++ b/jstests/sharding/count_config_servers.js
@@ -13,7 +13,7 @@ TestData.skipCheckOrphans = true;
"use strict";
var st = new ShardingTest({name: 'sync_conn_cmd', shards: 0});
-st.s.setSlaveOk(true);
+st.s.setSecondaryOk();
var configDB = st.config;
var coll = configDB.test;
diff --git a/jstests/sharding/count_slaveok.js b/jstests/sharding/count_slaveok.js
index e527128a7cd..23612d96220 100644
--- a/jstests/sharding/count_slaveok.js
+++ b/jstests/sharding/count_slaveok.js
@@ -1,5 +1,5 @@
/**
- * Tests count and distinct using slaveOk. Also tests a scenario querying a set where only one
+ * Tests count and distinct using secondaryOk. Also tests a scenario querying a set where only one
* secondary is up.
*/
@@ -20,7 +20,7 @@ var rst = st.rs0;
// Insert data into replica set
var conn = new Mongo(st.s.host);
-var coll = conn.getCollection('test.countSlaveOk');
+var coll = conn.getCollection('test.countSecondaryOk');
coll.drop();
var bulk = coll.initializeUnorderedBulkOp();
@@ -51,9 +51,9 @@ awaitRSClientHosts(conn, sec, {ok: true, secondary: true});
// Make sure that mongos realizes that primary is already down
awaitRSClientHosts(conn, primary, {ok: false});
-// Need to check slaveOk=true first, since slaveOk=false will destroy conn in pool when
+// Need to check secondaryOk=true first, since secondaryOk=false will destroy conn in pool when
// master is down
-conn.setSlaveOk();
+conn.setSecondaryOk();
// count using the command path
assert.eq(30, coll.find({i: 0}).count());
@@ -62,14 +62,14 @@ assert.eq(30, coll.find({i: 0}).itcount());
assert.eq(10, coll.distinct("i").length);
try {
- conn.setSlaveOk(false);
- // Should throw exception, since not slaveOk'd
+ conn.setSecondaryOk(false);
+ // Should throw exception, since not secondaryOk'd
coll.find({i: 0}).count();
print("Should not reach here!");
assert(false);
} catch (e) {
- print("Non-slaveOk'd connection failed.");
+ print("Non-secondaryOk'd connection failed.");
}
st.stop();
diff --git a/jstests/sharding/error_propagation.js b/jstests/sharding/error_propagation.js
index 6f47075f753..6fa9b7da74c 100644
--- a/jstests/sharding/error_propagation.js
+++ b/jstests/sharding/error_propagation.js
@@ -8,7 +8,7 @@
var st = new ShardingTest({mongos: 1, shards: 1, rs: {nodes: 3}});
var db = st.getDB('test');
-db.setSlaveOk(true);
+db.setSecondaryOk();
assert.commandWorked(db.foo.insert({a: 1}, {writeConcern: {w: 3}}));
assert.commandWorked(db.runCommand(
diff --git a/jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js b/jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js
index 5cb277197b1..466c4314d45 100644
--- a/jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js
+++ b/jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js
@@ -160,9 +160,9 @@ gc(); // Clean up new connections
jsTest.log("Stopping primary of second shard...");
-mongosConnActive.setSlaveOk();
+mongosConnActive.setSecondaryOk();
mongosConnIdle = authDBUsers(new Mongo(mongos.host));
-mongosConnIdle.setSlaveOk();
+mongosConnIdle.setSecondaryOk();
// Need to save this node for later
var rs1Secondary = st.rs1.getSecondary();
@@ -192,13 +192,13 @@ assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne(
jsTest.log("Testing new connections with second primary down...");
mongosConnNew = authDBUsers(new Mongo(mongos.host));
-mongosConnNew.setSlaveOk();
+mongosConnNew.setSecondaryOk();
assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
mongosConnNew = authDBUsers(new Mongo(mongos.host));
-mongosConnNew.setSlaveOk();
+mongosConnNew.setSecondaryOk();
assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
mongosConnNew = authDBUsers(new Mongo(mongos.host));
-mongosConnNew.setSlaveOk();
+mongosConnNew.setSecondaryOk();
assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
mongosConnNew = authDBUsers(new Mongo(mongos.host));
@@ -212,9 +212,9 @@ gc(); // Clean up new connections
jsTest.log("Stopping primary of first shard...");
-mongosConnActive.setSlaveOk();
+mongosConnActive.setSecondaryOk();
mongosConnIdle = authDBUsers(new Mongo(mongos.host));
-mongosConnIdle.setSlaveOk();
+mongosConnIdle.setSecondaryOk();
st.rs0.stop(st.rs0.getPrimary());
@@ -241,13 +241,13 @@ assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne(
jsTest.log("Testing new connections with first primary down...");
mongosConnNew = authDBUsers(new Mongo(mongos.host));
-mongosConnNew.setSlaveOk();
+mongosConnNew.setSecondaryOk();
assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
mongosConnNew = authDBUsers(new Mongo(mongos.host));
-mongosConnNew.setSlaveOk();
+mongosConnNew.setSecondaryOk();
assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
mongosConnNew = authDBUsers(new Mongo(mongos.host));
-mongosConnNew.setSlaveOk();
+mongosConnNew.setSecondaryOk();
assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
mongosConnNew = authDBUsers(new Mongo(mongos.host));
@@ -261,9 +261,9 @@ gc(); // Clean up new connections
jsTest.log("Stopping second shard...");
-mongosConnActive.setSlaveOk();
+mongosConnActive.setSecondaryOk();
mongosConnIdle = authDBUsers(new Mongo(mongos.host));
-mongosConnIdle.setSlaveOk();
+mongosConnIdle.setSecondaryOk();
st.rs1.stop(rs1Secondary);
@@ -288,10 +288,10 @@ assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne(
jsTest.log("Testing new connections with second shard down...");
mongosConnNew = authDBUsers(new Mongo(mongos.host));
-mongosConnNew.setSlaveOk();
+mongosConnNew.setSecondaryOk();
assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
mongosConnNew = authDBUsers(new Mongo(mongos.host));
-mongosConnNew.setSlaveOk();
+mongosConnNew.setSecondaryOk();
assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
mongosConnNew = authDBUsers(new Mongo(mongos.host));
diff --git a/jstests/sharding/mongos_rs_shard_failure_tolerance.js b/jstests/sharding/mongos_rs_shard_failure_tolerance.js
index 34d68c45f6e..89dc4c07986 100644
--- a/jstests/sharding/mongos_rs_shard_failure_tolerance.js
+++ b/jstests/sharding/mongos_rs_shard_failure_tolerance.js
@@ -131,11 +131,11 @@ st.rs1.stop(st.rs1.getPrimary());
jsTest.log("Testing active connection with second primary down...");
// Reads with read prefs
-mongosConnActive.setSlaveOk();
+mongosConnActive.setSecondaryOk();
assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-mongosConnActive.setSlaveOk(false);
+mongosConnActive.setSecondaryOk(false);
mongosConnActive.setReadPref("primary");
assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
@@ -145,14 +145,14 @@ assert.throws(function() {
assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
// Ensure read prefs override slaveOK
-mongosConnActive.setSlaveOk();
+mongosConnActive.setSecondaryOk();
mongosConnActive.setReadPref("primary");
assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
assert.throws(function() {
mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1});
});
assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-mongosConnActive.setSlaveOk(false);
+mongosConnActive.setSecondaryOk(false);
mongosConnActive.setReadPref("secondary");
assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
@@ -187,11 +187,11 @@ assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_
assert.commandWorked(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 6}, wc));
// Reads with read prefs
-mongosConnIdle.setSlaveOk();
+mongosConnIdle.setSecondaryOk();
assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-mongosConnIdle.setSlaveOk(false);
+mongosConnIdle.setSecondaryOk(false);
mongosConnIdle.setReadPref("primary");
assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
@@ -201,14 +201,14 @@ assert.throws(function() {
assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
// Ensure read prefs override slaveOK
-mongosConnIdle.setSlaveOk();
+mongosConnIdle.setSecondaryOk();
mongosConnIdle.setReadPref("primary");
assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
assert.throws(function() {
mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1});
});
assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-mongosConnIdle.setSlaveOk(false);
+mongosConnIdle.setSecondaryOk(false);
mongosConnIdle.setReadPref("secondary");
assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
@@ -234,13 +234,13 @@ jsTest.log("Testing new connections with second primary down...");
// Reads with read prefs
mongosConnNew = new Mongo(mongos.host);
-mongosConnNew.setSlaveOk();
+mongosConnNew.setSecondaryOk();
assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
mongosConnNew = new Mongo(mongos.host);
-mongosConnNew.setSlaveOk();
+mongosConnNew.setSecondaryOk();
assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
mongosConnNew = new Mongo(mongos.host);
-mongosConnNew.setSlaveOk();
+mongosConnNew.setSecondaryOk();
assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
@@ -261,17 +261,17 @@ gc(); // Clean up new connections incrementally to compensate for slow win32 ma
// Ensure read prefs override slaveok
mongosConnNew = new Mongo(mongos.host);
-mongosConnNew.setSlaveOk();
+mongosConnNew.setSecondaryOk();
mongosConnNew.setReadPref("primary");
assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
mongosConnNew = new Mongo(mongos.host);
-mongosConnNew.setSlaveOk();
+mongosConnNew.setSecondaryOk();
mongosConnNew.setReadPref("primary");
assert.throws(function() {
mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1});
});
mongosConnNew = new Mongo(mongos.host);
-mongosConnNew.setSlaveOk();
+mongosConnNew.setSecondaryOk();
mongosConnNew.setReadPref("primary");
assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
@@ -343,7 +343,7 @@ st.rs0.stop(st.rs0.getPrimary());
jsTest.log("Testing active connection with first primary down...");
-mongosConnActive.setSlaveOk();
+mongosConnActive.setSecondaryOk();
assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
@@ -358,7 +358,7 @@ assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_
assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 9}));
assert.writeError(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 9}));
-mongosConnIdle.setSlaveOk();
+mongosConnIdle.setSecondaryOk();
assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
@@ -366,13 +366,13 @@ assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne(
jsTest.log("Testing new connections with first primary down...");
mongosConnNew = new Mongo(mongos.host);
-mongosConnNew.setSlaveOk();
+mongosConnNew.setSecondaryOk();
assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
mongosConnNew = new Mongo(mongos.host);
-mongosConnNew.setSlaveOk();
+mongosConnNew.setSecondaryOk();
assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
mongosConnNew = new Mongo(mongos.host);
-mongosConnNew.setSlaveOk();
+mongosConnNew.setSecondaryOk();
assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
mongosConnNew = new Mongo(mongos.host);
@@ -392,7 +392,7 @@ st.rs1.stop(rs1Secondary);
jsTest.log("Testing active connection with second shard down...");
-mongosConnActive.setSlaveOk();
+mongosConnActive.setSecondaryOk();
assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
@@ -406,17 +406,17 @@ assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_
assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 12}));
assert.writeError(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 12}));
-mongosConnIdle.setSlaveOk();
+mongosConnIdle.setSecondaryOk();
assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
jsTest.log("Testing new connections with second shard down...");
mongosConnNew = new Mongo(mongos.host);
-mongosConnNew.setSlaveOk();
+mongosConnNew.setSecondaryOk();
assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
mongosConnNew = new Mongo(mongos.host);
-mongosConnNew.setSlaveOk();
+mongosConnNew.setSecondaryOk();
assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
mongosConnNew = new Mongo(mongos.host);
diff --git a/jstests/sharding/query/explain_read_pref.js b/jstests/sharding/query/explain_read_pref.js
index ce5e2cf47af..c3c51d85756 100644
--- a/jstests/sharding/query/explain_read_pref.js
+++ b/jstests/sharding/query/explain_read_pref.js
@@ -58,7 +58,7 @@ var testAllModes = function(conn, isMongos) {
var mode = args[0], tagSets = args[1], secExpected = args[2];
var testDB = conn.getDB('TestDB');
- conn.setSlaveOk(false); // purely rely on readPref
+ conn.setSecondaryOk(false); // purely rely on readPref
jsTest.log('Testing mode: ' + mode + ', tag sets: ' + tojson(tagSets));
// .explain().find()
diff --git a/jstests/sharding/read_pref.js b/jstests/sharding/read_pref.js
index 95c0e9697c3..9267cb18430 100644
--- a/jstests/sharding/read_pref.js
+++ b/jstests/sharding/read_pref.js
@@ -134,7 +134,7 @@ var doTest = function(useDollarQuerySyntax) {
var explainServer = getExplainServer(explain);
assert.neq(primaryNode.name, explainServer);
- conn.setSlaveOk();
+ conn.setSecondaryOk();
// It should also work with slaveOk
explain = getExplain("secondary");
diff --git a/jstests/sharding/read_pref_cmd.js b/jstests/sharding/read_pref_cmd.js
index 2c2a7f3332b..f94dd924f45 100644
--- a/jstests/sharding/read_pref_cmd.js
+++ b/jstests/sharding/read_pref_cmd.js
@@ -165,7 +165,7 @@ let testConnReadPreference = function(conn, isMongos, rsNodes, {readPref, expect
let testDB = conn.getDB(kDbName);
let shardedColl = conn.getCollection(kShardedNs);
- conn.setSlaveOk(false); // purely rely on readPref
+ conn.setSecondaryOk(false); // purely rely on readPref
conn.setReadPref(readPref.mode, readPref.tagSets, readPref.hedge);
/**
@@ -387,7 +387,7 @@ let testCursorReadPreference = function(conn, isMongos, rsNodes, {readPref, expe
tojson(readPref.tagSets)}, hedge ${tojson(readPref.hedge)}`);
let testColl = conn.getCollection(kShardedNs);
- conn.setSlaveOk(false); // purely rely on readPref
+ conn.setSecondaryOk(false); // purely rely on readPref
let bulk = testColl.initializeUnorderedBulkOp();
for (let i = 0; i < kNumDocs; ++i) {
diff --git a/jstests/sharding/recovering_slaveok.js b/jstests/sharding/recovering_slaveok.js
index 512719b08b6..d9bcd44da87 100644
--- a/jstests/sharding/recovering_slaveok.js
+++ b/jstests/sharding/recovering_slaveok.js
@@ -1,6 +1,6 @@
/**
- * This tests that slaveOk'd queries in sharded setups get correctly routed when a slave goes into
- * RECOVERING state, and don't break
+ * This tests that secondaryOk'd queries in sharded setups get correctly routed when a slave goes
+ * into RECOVERING state, and don't break
*/
// Shard secondaries are restarted, which may cause that shard's primary to stepdown while it does
@@ -12,11 +12,11 @@ TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
load("jstests/replsets/rslib.js");
var shardTest =
- new ShardingTest({name: "recovering_slaveok", shards: 2, mongos: 2, other: {rs: true}});
+ new ShardingTest({name: "recovering_secondaryok", shards: 2, mongos: 2, other: {rs: true}});
var mongos = shardTest.s0;
var mongosSOK = shardTest.s1;
-mongosSOK.setSlaveOk();
+mongosSOK.setSecondaryOk();
var admin = mongos.getDB("admin");
var config = mongos.getDB("config");
@@ -50,7 +50,7 @@ shardTest.shardColl(coll,
/* dbname */ null,
/* waitForDelete */ true);
-print("3: test normal and slaveOk queries");
+print("3: test normal and secondaryOk queries");
// Make shardA and rsA the same
var shardA = shardTest.getShard(coll, {_id: -1});
@@ -87,7 +87,7 @@ print("6: stop non-RECOVERING secondary");
rsA.stop(goodSec);
-print("7: check our regular and slaveOk query");
+print("7: check our regular and secondaryOk query");
assert.eq(2, coll.find().itcount());
assert.eq(2, collSOk.find().itcount());
@@ -100,7 +100,7 @@ print("9: wait for recovery");
rsA.waitForState(rsA.getSecondaries(), ReplSetTest.State.SECONDARY, 5 * 60 * 1000);
-print("10: check our regular and slaveOk query");
+print("10: check our regular and secondaryOk query");
// We need to make sure our nodes are considered accessible from mongos - otherwise we fail
// See SERVER-7274
@@ -112,7 +112,7 @@ awaitRSClientHosts(coll.getMongo(), rsB.nodes, {ok: true});
awaitRSClientHosts(collSOk.getMongo(), [rsA.getSecondaries()[0]], {secondary: true, ok: true});
awaitRSClientHosts(collSOk.getMongo(), [rsB.getSecondaries()[0]], {secondary: true, ok: true});
-print("SlaveOK Query...");
+print("SecondaryOk Query...");
var sOKCount = collSOk.find().itcount();
var collCount = null;
diff --git a/jstests/sharding/session_info_in_oplog.js b/jstests/sharding/session_info_in_oplog.js
index 617d5759207..a7644fca599 100644
--- a/jstests/sharding/session_info_in_oplog.js
+++ b/jstests/sharding/session_info_in_oplog.js
@@ -329,7 +329,7 @@ replTest.initiate();
var priConn = replTest.getPrimary();
var secConn = replTest.getSecondary();
-secConn.setSlaveOk(true);
+secConn.setSecondaryOk();
runTests(priConn, priConn, secConn);
@@ -338,7 +338,7 @@ replTest.stopSet();
var st = new ShardingTest({shards: {rs0: {nodes: kNodes}}});
secConn = st.rs0.getSecondary();
-secConn.setSlaveOk(true);
+secConn.setSecondaryOk();
runTests(st.s, st.rs0.getPrimary(), secConn);
st.stop();
diff --git a/jstests/sharding/shard_aware_init_secondaries.js b/jstests/sharding/shard_aware_init_secondaries.js
index 59a8542f44b..f852c6e58a1 100644
--- a/jstests/sharding/shard_aware_init_secondaries.js
+++ b/jstests/sharding/shard_aware_init_secondaries.js
@@ -41,7 +41,7 @@ assert.commandWorked(priConn.getDB('admin').system.version.update(
shardIdentityQuery, shardIdentityUpdate, {upsert: true, writeConcern: {w: 2}}));
var secConn = replTest.getSecondary();
-secConn.setSlaveOk(true);
+secConn.setSecondaryOk();
var res = secConn.getDB('admin').runCommand({shardingState: 1});
@@ -55,7 +55,7 @@ replTest.waitForPrimary();
replTest.awaitSecondaryNodes();
secConn = replTest.getSecondary();
-secConn.setSlaveOk(true);
+secConn.setSecondaryOk();
res = secConn.getDB('admin').runCommand({shardingState: 1});
diff --git a/jstests/sharding/shard_identity_config_update.js b/jstests/sharding/shard_identity_config_update.js
index 3e668c5903c..43c10bbbd22 100644
--- a/jstests/sharding/shard_identity_config_update.js
+++ b/jstests/sharding/shard_identity_config_update.js
@@ -63,7 +63,7 @@ assert.soon(function() {
});
var secConn = st.rs0.getSecondary();
-secConn.setSlaveOk(true);
+secConn.setSecondaryOk();
assert.soon(function() {
return checkConfigStrUpdated(secConn, expectedConfigStr);
});
@@ -96,7 +96,7 @@ assert.soon(function() {
});
secConn = st.rs0.getSecondary();
-secConn.setSlaveOk(true);
+secConn.setSecondaryOk();
assert.soon(function() {
return checkConfigStrUpdated(secConn, origConfigConnStr);
});
diff --git a/jstests/sharding/shard_identity_rollback.js b/jstests/sharding/shard_identity_rollback.js
index d6e47fa3137..25dbc2e19e4 100644
--- a/jstests/sharding/shard_identity_rollback.js
+++ b/jstests/sharding/shard_identity_rollback.js
@@ -52,7 +52,7 @@ assert.eq(shardIdentityDoc.clusterId, res.clusterId);
// Ensure sharding state on the secondaries was *not* initialized
secondaries.forEach(function(secondary) {
- secondary.setSlaveOk(true);
+ secondary.setSecondaryOk();
res = secondary.getDB('admin').runCommand({shardingState: 1});
assert(!res.enabled, tojson(res));
});
@@ -105,7 +105,7 @@ try {
// specified. We do want to wait to be able to connect to the node here however, so we need to pass
// {waitForConnect: true}.
priConn = replTest.start(priConn.nodeId, {shardsvr: '', waitForConnect: true}, true);
-priConn.setSlaveOk();
+priConn.setSecondaryOk();
// Wait for the old primary to replicate the document that was written to the new primary while
// it was shut down.
diff --git a/jstests/sharding/shard_insert_getlasterror_w2.js b/jstests/sharding/shard_insert_getlasterror_w2.js
index 7bde30b2dc5..a4a0f5c540f 100644
--- a/jstests/sharding/shard_insert_getlasterror_w2.js
+++ b/jstests/sharding/shard_insert_getlasterror_w2.js
@@ -70,7 +70,7 @@ replSet1.stop(secondary2);
replSet1.waitForState(primary, ReplSetTest.State.SECONDARY);
testDB.getMongo().adminCommand({setParameter: 1, logLevel: 1});
-testDB.getMongo().setSlaveOk();
+testDB.getMongo().setSecondaryOk();
print("trying some queries");
assert.soon(function() {
try {