summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndy Schwerin <schwerin@mongodb.com>2017-01-07 15:08:22 -0500
committerAndy Schwerin <schwerin@mongodb.com>2017-01-07 15:08:22 -0500
commitcdc7af4c6d453b8c1ce2319d0cd3b50074609d87 (patch)
tree681a5704ee7bdd32052dfe43ab1be03680b7a478
parentef364240d7d48a8a352afb91df5cddbd34394021 (diff)
downloadmongo-cdc7af4c6d453b8c1ce2319d0cd3b50074609d87.tar.gz
SERVER-27490 Stop consulting storage engine isDurable flag in ReplicationCoordinatorImpl
... and simplify ReplCoordTestFixture ReplicationCoordinatorImpl consults the storage engine's isDurable flag for two purposes: 1. To choose whether to present the durable or applied optime when standing for election in pv1 2. To decide how to interpret w:majority without an explicit j field when waiting for write concern. In the first case, it is unnecessary to choose which optime to apply based on the isDurable flag. It is always safe and correct to present the applied optime, because if the node presenting it wins election, it will attempt to commit that applied optime. That means that voters may safely vote for that node. In the second case, using the value of the local node's storage engine's isDurable flag to adjust the meaning of w:majority is out of spec. Whether w:majority writes wait for journaling is a function only of the writeConcernMajorityJournalDefault flag when a write concern omits the "j" field. This patch removes the unnecessary consultation of the isDurable flag, and uses the opportunity to simplify the constructor of ReplicationCoordinatorImpl and its test fixture.
-rw-r--r--buildscripts/resmokelib/testing/fixtures/replicaset.py12
-rw-r--r--jstests/auth/copyauth.js3
-rw-r--r--jstests/auth/resource_pattern_matching.js3
-rw-r--r--jstests/ssl/initial_sync1_x509.js22
-rw-r--r--jstests/ssl/upgrade_to_x509_ssl.js21
-rw-r--r--jstests/sslSpecial/upgrade_to_x509_ssl_nossl.js15
-rw-r--r--src/mongo/db/db.cpp2
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp44
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.h25
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp7
-rw-r--r--src/mongo/db/repl/replication_coordinator_test_fixture.cpp15
-rw-r--r--src/mongo/db/repl/replication_coordinator_test_fixture.h16
-rw-r--r--src/mongo/shell/replsettest.js55
13 files changed, 122 insertions, 118 deletions
diff --git a/buildscripts/resmokelib/testing/fixtures/replicaset.py b/buildscripts/resmokelib/testing/fixtures/replicaset.py
index 69e82c76f4b..1b59457b181 100644
--- a/buildscripts/resmokelib/testing/fixtures/replicaset.py
+++ b/buildscripts/resmokelib/testing/fixtures/replicaset.py
@@ -108,9 +108,6 @@ class ReplicaSetFixture(interface.ReplFixture):
initiate_cmd_obj = {"replSetInitiate": {"_id": self.replset_name, "members": members}}
- if self.write_concern_majority_journal_default is not None:
- initiate_cmd_obj["replSetInitiate"]["writeConcernMajorityJournalDefault"] = self.write_concern_majority_journal_default
-
client = utils.new_mongo_client(port=self.port)
if self.auth_options is not None:
auth_db = client[self.auth_options["authenticationDatabase"]]
@@ -118,6 +115,15 @@ class ReplicaSetFixture(interface.ReplFixture):
password=self.auth_options["password"],
mechanism=self.auth_options["authenticationMechanism"])
+ if self.write_concern_majority_journal_default is not None:
+ initiate_cmd_obj["replSetInitiate"]["writeConcernMajorityJournalDefault"] = self.write_concern_majority_journal_default
+ else:
+ serverStatus = client.admin.command({"serverStatus": 1})
+ cmdLineOpts = client.admin.command({"getCmdLineOpts": 1})
+ if not (serverStatus["storageEngine"]["persistent"] and
+ cmdLineOpts["parsed"].get("storage", {}).get("journal", {}).get("enabled", True)):
+ initiate_cmd_obj["replSetInitiate"]["writeConcernMajorityJournalDefault"] = False
+
if self.replset_config_options.get("configsvr", False):
initiate_cmd_obj["replSetInitiate"]["configsvr"] = True
if self.replset_config_options.get("settings"):
diff --git a/jstests/auth/copyauth.js b/jstests/auth/copyauth.js
index 91e17edc669..6960692ef24 100644
--- a/jstests/auth/copyauth.js
+++ b/jstests/auth/copyauth.js
@@ -51,7 +51,8 @@ function ClusterSpawnHelper(clusterType, startWithAuth, startWithTransitionToAut
var replSetTestConfig = {
name: baseName + "_source",
nodes: 3,
- nodeOptions: singleNodeConfig
+ nodeOptions: singleNodeConfig,
+ keyFile: singleNodeConfig.keyFile
};
var replSetTest = new ReplSetTest(replSetTestConfig);
replSetTest.startSet();
diff --git a/jstests/auth/resource_pattern_matching.js b/jstests/auth/resource_pattern_matching.js
index 36d6711ea52..0d9781efd4b 100644
--- a/jstests/auth/resource_pattern_matching.js
+++ b/jstests/auth/resource_pattern_matching.js
@@ -231,7 +231,8 @@ print('--- replica set test ---');
var rst = new ReplSetTest({
name: 'testset',
nodes: 2,
- nodeOptions: {'auth': null, 'httpinterface': null, 'keyFile': keyfile}
+ nodeOptions: {'auth': null, 'httpinterface': null},
+ keyFile: keyfile
});
rst.startSet();
diff --git a/jstests/ssl/initial_sync1_x509.js b/jstests/ssl/initial_sync1_x509.js
index 85198604b57..96a707c6e17 100644
--- a/jstests/ssl/initial_sync1_x509.js
+++ b/jstests/ssl/initial_sync1_x509.js
@@ -11,12 +11,26 @@ var common_options = {
function runInitialSyncTest() {
load("jstests/replsets/rslib.js");
+ // The mongo shell cannot authenticate as the internal __system user in tests that use x509 for
+ // cluster authentication. Choosing the default value for wcMajorityJournalDefault in
+ // ReplSetTest cannot be done automatically without the shell performing such authentication, so
+ // in this test we must make the choice explicitly, based on the global test options.
+ var wcMajorityJournalDefault;
+ if (jsTestOptions().noJournal || jsTestOptions().storageEngine == "ephemeralForTest" ||
+ jsTestOptions().storageEngine == "inMemory") {
+ wcMajorityJournalDefault = false;
+ } else {
+ wcMajorityJournalDefault = true;
+ }
print("1. Bring up set");
- var replTest = new ReplSetTest(
- {name: "jstests_initsync1_x509", nodes: {node0: x509_options1, node1: x509_options2}});
-
+ var replTest = new ReplSetTest({
+ name: "jstests_initsync1_x509",
+ nodes: {node0: x509_options1, node1: x509_options2},
+ });
var conns = replTest.startSet();
- replTest.initiate();
+ replTest.initiate(
+ Object.extend(replTest.getReplSetConfig(),
+ {writeConcernMajorityJournalDefault: wcMajorityJournalDefault}));
var master = replTest.getPrimary();
var foo = master.getDB("foo");
diff --git a/jstests/ssl/upgrade_to_x509_ssl.js b/jstests/ssl/upgrade_to_x509_ssl.js
index 2072d6c51c5..0a0a3338de8 100644
--- a/jstests/ssl/upgrade_to_x509_ssl.js
+++ b/jstests/ssl/upgrade_to_x509_ssl.js
@@ -19,6 +19,18 @@ function authAllNodes() {
load("jstests/ssl/libs/ssl_helpers.js");
+// The mongo shell cannot authenticate as the internal __system user in tests that use x509 for
+// cluster authentication. Choosing the default value for wcMajorityJournalDefault in
+// ReplSetTest cannot be done automatically without the shell performing such authentication, so
+// in this test we must make the choice explicitly, based on the global test options.
+var wcMajorityJournalDefault;
+if (jsTestOptions().noJournal || jsTestOptions().storageEngine == "ephemeralForTest" ||
+ jsTestOptions().storageEngine == "inMemory") {
+ wcMajorityJournalDefault = false;
+} else {
+ wcMajorityJournalDefault = true;
+}
+
opts = {
sslMode: "allowSSL",
sslPEMKeyFile: SERVER_CERT,
@@ -28,9 +40,14 @@ opts = {
sslCAFile: CA_CERT
};
var NUM_NODES = 3;
-var rst = new ReplSetTest({name: 'sslSet', nodes: NUM_NODES, nodeOptions: opts});
+var rst = new ReplSetTest({
+ name: 'sslSet',
+ nodes: NUM_NODES,
+ nodeOptions: opts,
+});
rst.startSet();
-rst.initiate();
+rst.initiate(Object.extend(rst.getReplSetConfig(),
+ {writeConcernMajorityJournalDefault: wcMajorityJournalDefault}));
// Connect to master and do some basic operations
var rstConn1 = rst.getPrimary();
diff --git a/jstests/sslSpecial/upgrade_to_x509_ssl_nossl.js b/jstests/sslSpecial/upgrade_to_x509_ssl_nossl.js
index 47cc303069c..990f4983a91 100644
--- a/jstests/sslSpecial/upgrade_to_x509_ssl_nossl.js
+++ b/jstests/sslSpecial/upgrade_to_x509_ssl_nossl.js
@@ -19,6 +19,18 @@ function authAllNodes() {
}
}
+// The mongo shell cannot authenticate as the internal __system user in tests that use x509 for
+// cluster authentication. Choosing the default value for wcMajorityJournalDefault in
+// ReplSetTest cannot be done automatically without the shell performing such authentication, so
+// in this test we must make the choice explicitly, based on the global test options.
+var wcMajorityJournalDefault;
+if (jsTestOptions().noJournal || jsTestOptions().storageEngine == "ephemeralForTest" ||
+ jsTestOptions().storageEngine == "inMemory") {
+ wcMajorityJournalDefault = false;
+} else {
+ wcMajorityJournalDefault = true;
+}
+
opts = {
sslMode: "disabled",
clusterAuthMode: "keyFile",
@@ -27,7 +39,8 @@ opts = {
var NUM_NODES = 3;
var rst = new ReplSetTest({name: 'sslSet', nodes: NUM_NODES, nodeOptions: opts});
rst.startSet();
-rst.initiate();
+rst.initiate(Object.extend(rst.getReplSetConfig(),
+ {writeConcernMajorityJournalDefault: wcMajorityJournalDefault}));
// Connect to master and do some basic operations
var rstConn1 = rst.getPrimary();
diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp
index 8f93af50cf0..d57bd570700 100644
--- a/src/mongo/db/db.cpp
+++ b/src/mongo/db/db.cpp
@@ -878,7 +878,7 @@ static void startupConfigActions(const std::vector<std::string>& args) {
}
MONGO_INITIALIZER_WITH_PREREQUISITES(CreateReplicationManager,
- ("SetGlobalEnvironment", "SSLManager"))
+ ("SetGlobalEnvironment", "SSLManager", "default"))
(InitializerContext* context) {
auto serviceContext = getGlobalServiceContext();
repl::StorageInterface::set(serviceContext, stdx::make_unique<repl::StorageInterfaceImpl>());
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index 532363030e3..cb4b33a6ef6 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -305,17 +305,14 @@ std::string ReplicationCoordinatorImpl::SnapshotInfo::toString() const {
ReplicationCoordinatorImpl::ReplicationCoordinatorImpl(
const ReplSettings& settings,
ReplicationCoordinatorExternalState* externalState,
+ NetworkInterface* network,
TopologyCoordinator* topCoord,
StorageInterface* storage,
- int64_t prngSeed,
- NetworkInterface* network,
- ReplicationExecutor* replExec,
- stdx::function<bool()>* isDurableStorageEngineFn)
+ int64_t prngSeed)
: _settings(settings),
_replMode(getReplicationModeFromSettings(settings)),
_topCoord(topCoord),
- _replExecutorIfOwned(replExec ? nullptr : new ReplicationExecutor(network, prngSeed)),
- _replExecutor(replExec ? *replExec : *_replExecutorIfOwned),
+ _replExecutor(network, prngSeed),
_externalState(externalState),
_inShutdown(false),
_memberState(MemberState::RS_STARTUP),
@@ -325,10 +322,7 @@ ReplicationCoordinatorImpl::ReplicationCoordinatorImpl(
_sleptLastElection(false),
_canAcceptNonLocalWrites(!(settings.usingReplSets() || settings.isSlave())),
_canServeNonLocalReads(0U),
- _storage(storage),
- _isDurableStorageEngine(isDurableStorageEngineFn ? *isDurableStorageEngineFn : []() -> bool {
- return getGlobalServiceContext()->getGlobalStorageEngine()->isDurable();
- }) {
+ _storage(storage) {
if (!isReplEnabled()) {
return;
}
@@ -348,33 +342,6 @@ ReplicationCoordinatorImpl::ReplicationCoordinatorImpl(
_externalState->setupNoopWriter(kNoopWriterPeriod);
}
-ReplicationCoordinatorImpl::ReplicationCoordinatorImpl(
- const ReplSettings& settings,
- ReplicationCoordinatorExternalState* externalState,
- NetworkInterface* network,
- TopologyCoordinator* topCoord,
- StorageInterface* storage,
- int64_t prngSeed)
- : ReplicationCoordinatorImpl(
- settings, externalState, topCoord, storage, prngSeed, network, nullptr, nullptr) {}
-
-ReplicationCoordinatorImpl::ReplicationCoordinatorImpl(
- const ReplSettings& settings,
- ReplicationCoordinatorExternalState* externalState,
- TopologyCoordinator* topCoord,
- StorageInterface* storage,
- ReplicationExecutor* replExec,
- int64_t prngSeed,
- stdx::function<bool()>* isDurableStorageEngineFn)
- : ReplicationCoordinatorImpl(settings,
- externalState,
- topCoord,
- storage,
- prngSeed,
- nullptr,
- replExec,
- isDurableStorageEngineFn) {}
-
ReplicationCoordinatorImpl::~ReplicationCoordinatorImpl() {}
void ReplicationCoordinatorImpl::waitForStartUpComplete_forTest() {
@@ -3650,9 +3617,10 @@ void ReplicationCoordinatorImpl::_scheduleElectionWinNotification() {
WriteConcernOptions ReplicationCoordinatorImpl::populateUnsetWriteConcernOptionsSyncMode(
WriteConcernOptions wc) {
+
WriteConcernOptions writeConcern(wc);
if (writeConcern.syncMode == WriteConcernOptions::SyncMode::UNSET) {
- if (writeConcern.wMode == WriteConcernOptions::kMajority && _isDurableStorageEngine() &&
+ if (writeConcern.wMode == WriteConcernOptions::kMajority &&
getWriteConcernMajorityShouldJournal()) {
writeConcern.syncMode = WriteConcernOptions::SyncMode::JOURNAL;
} else {
diff --git a/src/mongo/db/repl/replication_coordinator_impl.h b/src/mongo/db/repl/replication_coordinator_impl.h
index 0478624183f..9b7eb084d2e 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.h
+++ b/src/mongo/db/repl/replication_coordinator_impl.h
@@ -95,14 +95,7 @@ public:
TopologyCoordinator* topoCoord,
StorageInterface* storage,
int64_t prngSeed);
- // Takes ownership of the "externalState" and "topCoord" objects.
- ReplicationCoordinatorImpl(const ReplSettings& settings,
- ReplicationCoordinatorExternalState* externalState,
- TopologyCoordinator* topoCoord,
- StorageInterface* storage,
- ReplicationExecutor* replExec,
- int64_t prngSeed,
- stdx::function<bool()>* isDurableStorageEngineFn);
+
virtual ~ReplicationCoordinatorImpl();
// ================== Members of public ReplicationCoordinator API ===================
@@ -421,14 +414,6 @@ private:
class LoseElectionGuardV1;
class LoseElectionDryRunGuardV1;
- ReplicationCoordinatorImpl(const ReplSettings& settings,
- ReplicationCoordinatorExternalState* externalState,
- TopologyCoordinator* topCoord,
- StorageInterface* storage,
- int64_t prngSeed,
- executor::NetworkInterface* network,
- ReplicationExecutor* replExec,
- stdx::function<bool()>* isDurableStorageEngineFn);
/**
* Configuration states for a replica set node.
*
@@ -1177,11 +1162,8 @@ private:
// Pointer to the TopologyCoordinator owned by this ReplicationCoordinator.
std::unique_ptr<TopologyCoordinator> _topCoord; // (X)
- // If the executer is owned then this will be set, but should not be used.
- // This is only used to clean up and destroy the replExec if owned
- std::unique_ptr<ReplicationExecutor> _replExecutorIfOwned; // (S)
// Executor that drives the topology coordinator.
- ReplicationExecutor& _replExecutor; // (S)
+ ReplicationExecutor _replExecutor; // (S)
// Pointer to the ReplicationCoordinatorExternalState owned by this ReplicationCoordinator.
std::unique_ptr<ReplicationCoordinatorExternalState> _externalState; // (PS)
@@ -1360,9 +1342,6 @@ private:
// Cached copy of the current config protocol version.
AtomicInt64 _protVersion; // (S)
- // Lambda indicating durability of storageEngine.
- stdx::function<bool()> _isDurableStorageEngine; // (R)
-
// This setting affects the Applier prefetcher behavior.
mutable stdx::mutex _indexPrefetchMutex;
ReplSettings::IndexPrefetchConfig _indexPrefetchConfig =
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
index 9e72fdf9bc3..f97cb00acd3 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
@@ -123,9 +123,7 @@ void ReplicationCoordinatorImpl::_startElectSelfV1() {
invariant(_rsConfig.getMemberAt(_selfIndex).isElectable());
- // Note: If we aren't durable, send last applied.
- const auto lastOpTime = _isDurableStorageEngine() ? _getMyLastDurableOpTime_inlock()
- : _getMyLastAppliedOpTime_inlock();
+ const auto lastOpTime = _getMyLastAppliedOpTime_inlock();
if (lastOpTime == OpTime()) {
log() << "not trying to elect self, "
@@ -234,8 +232,7 @@ void ReplicationCoordinatorImpl::_startVoteRequester(long long newTerm) {
LockGuard lk(_topoMutex);
- const auto lastOpTime =
- _isDurableStorageEngine() ? getMyLastDurableOpTime() : getMyLastAppliedOpTime();
+ const auto lastOpTime = getMyLastAppliedOpTime();
_voteRequester.reset(new VoteRequester);
StatusWith<ReplicationExecutor::EventHandle> nextPhaseEvh = _voteRequester->start(
diff --git a/src/mongo/db/repl/replication_coordinator_test_fixture.cpp b/src/mongo/db/repl/replication_coordinator_test_fixture.cpp
index f7101ca6fa5..f293486b946 100644
--- a/src/mongo/db/repl/replication_coordinator_test_fixture.cpp
+++ b/src/mongo/db/repl/replication_coordinator_test_fixture.cpp
@@ -54,6 +54,10 @@ using executor::NetworkInterfaceMock;
using executor::RemoteCommandRequest;
using executor::RemoteCommandResponse;
+ReplicationExecutor* ReplCoordTest::getReplExec() {
+ return _repl->getExecutor();
+}
+
ReplicaSetConfig ReplCoordTest::assertMakeRSConfig(const BSONObj& configBson) {
ReplicaSetConfig config;
ASSERT_OK(config.initialize(configBson));
@@ -118,17 +122,10 @@ void ReplCoordTest::init() {
TopologyCoordinatorImpl::Options settings;
_topo = new TopologyCoordinatorImpl(settings);
- stdx::function<bool()> _durablityLambda = [this]() -> bool { return _isStorageEngineDurable; };
_net = new NetworkInterfaceMock;
- _replExec = stdx::make_unique<ReplicationExecutor>(_net, seed);
_externalState = new ReplicationCoordinatorExternalStateMock;
- _repl.reset(new ReplicationCoordinatorImpl(_settings,
- _externalState,
- _topo,
- storageInterface,
- _replExec.get(),
- seed,
- &_durablityLambda));
+ _repl.reset(new ReplicationCoordinatorImpl(
+ _settings, _externalState, _net, _topo, storageInterface, seed));
auto service = getGlobalServiceContext();
service->setFastClockSource(stdx::make_unique<executor::NetworkInterfaceMockClockSource>(_net));
service->setPreciseClockSource(
diff --git a/src/mongo/db/repl/replication_coordinator_test_fixture.h b/src/mongo/db/repl/replication_coordinator_test_fixture.h
index fc34114cd0d..3d8587ffbd1 100644
--- a/src/mongo/db/repl/replication_coordinator_test_fixture.h
+++ b/src/mongo/db/repl/replication_coordinator_test_fixture.h
@@ -103,9 +103,7 @@ protected:
/**
* Gets the replication executor under test.
*/
- ReplicationExecutor* getReplExec() {
- return _replExec.get();
- }
+ ReplicationExecutor* getReplExec();
/**
* Gets the replication coordinator under test.
@@ -243,16 +241,6 @@ protected:
void replyToReceivedHeartbeat();
void replyToReceivedHeartbeatV1();
- /**
- * Sets how the test fixture reports the storage engine's durability feature.
- */
- void setStorageEngineDurable(bool val = true) {
- _isStorageEngineDurable = val;
- }
- bool isStorageEngineDurable() const {
- return _isStorageEngineDurable;
- }
-
void simulateEnoughHeartbeatsForAllNodesUp();
/**
@@ -276,12 +264,10 @@ private:
TopologyCoordinatorImpl* _topo = nullptr;
// Owned by ReplicationExecutor
executor::NetworkInterfaceMock* _net = nullptr;
- std::unique_ptr<ReplicationExecutor> _replExec;
// Owned by ReplicationCoordinatorImpl
ReplicationCoordinatorExternalStateMock* _externalState = nullptr;
ReplSettings _settings;
bool _callShutdown = false;
- bool _isStorageEngineDurable = true;
ServiceContext::UniqueClient _client = getGlobalServiceContext()->makeClient("testClient");
};
diff --git a/src/mongo/shell/replsettest.js b/src/mongo/shell/replsettest.js
index ec3c4d7d5fb..56574991153 100644
--- a/src/mongo/shell/replsettest.js
+++ b/src/mongo/shell/replsettest.js
@@ -136,12 +136,40 @@ var ReplSetTest = function(opts) {
return self.liveNodes.master || false;
}
+ function asCluster(conn, fn) {
+ if (self.keyFile) {
+ return authutil.asCluster(conn, self.keyFile, fn);
+ } else {
+ return fn();
+ }
+ }
+
/**
- * Returns 'true' if the test has been configured to run without journaling enabled.
+ * Returns 'true' if the "conn" has been configured to run without journaling enabled.
*/
- function _isRunningWithoutJournaling() {
- return jsTestOptions().noJournal || jsTestOptions().storageEngine == 'inMemory' ||
- jsTestOptions().storageEngine == 'ephemeralForTest';
+ function _isRunningWithoutJournaling(conn) {
+ var result = asCluster(conn, function() {
+ var serverStatus = assert.commandWorked(conn.adminCommand({serverStatus: 1}));
+ if (serverStatus.storageEngine.hasOwnProperty('persistent')) {
+ if (!serverStatus.storageEngine.persistent) {
+ return true;
+ }
+ } else if (serverStatus.storageEngine.name == 'inMemory' ||
+ serverStatus.storageEngine.name == 'ephemeralForTest') {
+ return true;
+ }
+ var cmdLineOpts = assert.commandWorked(conn.adminCommand({getCmdLineOpts: 1}));
+ var getWithDefault = function(dict, key, dflt) {
+ if (dict[key] === undefined)
+ return dflt;
+ return dict[key];
+ };
+ return !getWithDefault(
+ getWithDefault(getWithDefault(cmdLineOpts.parsed, "storage", {}), "journal", {}),
+ "enabled",
+ true);
+ });
+ return result;
}
/**
@@ -194,16 +222,9 @@ var ReplSetTest = function(opts) {
if (!conn)
return false;
- var getStatusFunc = function() {
+ asCluster(conn, function() {
status = conn.getDB('admin').runCommand({replSetGetStatus: 1});
- };
-
- if (self.keyFile) {
- // Authenticate connection used for running replSetGetStatus if needed
- authutil.asCluster(conn, self.keyFile, getStatusFunc);
- } else {
- getStatusFunc();
- }
+ });
} catch (ex) {
print("ReplSetTest waitForIndicator could not get status: " + tojson(ex));
return false;
@@ -319,7 +340,7 @@ var ReplSetTest = function(opts) {
assert.commandWorked(conn.getDB("admin").runCommand({replSetGetStatus: 1}));
var opTimeType = "durableOpTime";
- if (_isRunningWithoutJournaling()) {
+ if (_isRunningWithoutJournaling(conn)) {
opTimeType = "appliedOpTime";
}
var opTime = replSetStatus.optimes[opTimeType];
@@ -451,6 +472,10 @@ var ReplSetTest = function(opts) {
this.startSet = function(options) {
print("ReplSetTest starting set");
+ if (options && options.keyFile) {
+ self.keyFile = options.keyFile;
+ }
+
var nodes = [];
for (var n = 0; n < this.ports.length; n++) {
nodes.push(this.start(n, options));
@@ -649,7 +674,7 @@ var ReplSetTest = function(opts) {
return config;
}
- if (_isRunningWithoutJournaling()) {
+ if (_isRunningWithoutJournaling(replNode)) {
config[wcMajorityJournalField] = false;
}