summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/mongo/db/commands/compact.cpp2
-rw-r--r--src/mongo/db/dbcommands.cpp2
-rw-r--r--src/mongo/db/repl/bgsync.cpp6
-rw-r--r--src/mongo/db/repl/oplogreader.cpp2
-rw-r--r--src/mongo/db/repl/repl_coordinator.h4
-rw-r--r--src/mongo/db/repl/repl_coordinator_impl.cpp90
-rw-r--r--src/mongo/db/repl/repl_coordinator_impl.h14
-rw-r--r--src/mongo/db/repl/repl_coordinator_impl_elect.cpp8
-rw-r--r--src/mongo/db/repl/repl_coordinator_impl_elect_test.cpp4
-rw-r--r--src/mongo/db/repl/repl_coordinator_impl_heartbeat.cpp18
-rw-r--r--src/mongo/db/repl/repl_coordinator_impl_heartbeat_test.cpp2
-rw-r--r--src/mongo/db/repl/repl_coordinator_impl_reconfig_test.cpp2
-rw-r--r--src/mongo/db/repl/repl_coordinator_impl_test.cpp86
-rw-r--r--src/mongo/db/repl/repl_coordinator_mock.cpp2
-rw-r--r--src/mongo/db/repl/repl_coordinator_mock.h2
-rw-r--r--src/mongo/db/repl/repl_coordinator_test_fixture.cpp26
-rw-r--r--src/mongo/db/repl/resync.cpp2
-rw-r--r--src/mongo/db/repl/rs_rollback.cpp4
-rw-r--r--src/mongo/db/repl/rs_sync.cpp4
-rw-r--r--src/mongo/db/repl/sync_source_feedback.cpp6
-rw-r--r--src/mongo/db/repl/sync_tail.cpp8
-rw-r--r--src/mongo/db/repl/topology_coordinator_impl.cpp176
-rw-r--r--src/mongo/db/repl/topology_coordinator_impl.h3
-rw-r--r--src/mongo/db/ttl.cpp2
24 files changed, 238 insertions, 237 deletions
diff --git a/src/mongo/db/commands/compact.cpp b/src/mongo/db/commands/compact.cpp
index 00d754b6b99..62007df42bc 100644
--- a/src/mongo/db/commands/compact.cpp
+++ b/src/mongo/db/commands/compact.cpp
@@ -87,7 +87,7 @@ namespace mongo {
const std::string nsToCompact = parseNsCollectionRequired(db, cmdObj);
repl::ReplicationCoordinator* replCoord = repl::getGlobalReplicationCoordinator();
- if (replCoord->getCurrentMemberState().primary() && !cmdObj["force"].trueValue()) {
+ if (replCoord->getMemberState().primary() && !cmdObj["force"].trueValue()) {
errmsg = "will not run compact on an active replica set primary as this is a slow blocking operation. use force:true to force";
return false;
}
diff --git a/src/mongo/db/dbcommands.cpp b/src/mongo/db/dbcommands.cpp
index 509cbf0f200..45b8fcad1d8 100644
--- a/src/mongo/db/dbcommands.cpp
+++ b/src/mongo/db/dbcommands.cpp
@@ -1412,7 +1412,7 @@ namespace mongo {
if (!c->maintenanceOk()
&& replCoord->getReplicationMode() == repl::ReplicationCoordinator::modeReplSet
&& !replCoord->canAcceptWritesForDatabase(dbname)
- && !replCoord->getCurrentMemberState().secondary()) {
+ && !replCoord->getMemberState().secondary()) {
result.append( "note" , "from execCommand" );
appendCommandStatus(result, false, "node is recovering");
return;
diff --git a/src/mongo/db/repl/bgsync.cpp b/src/mongo/db/repl/bgsync.cpp
index 4ca424574fc..6766c273f6d 100644
--- a/src/mongo/db/repl/bgsync.cpp
+++ b/src/mongo/db/repl/bgsync.cpp
@@ -160,7 +160,7 @@ namespace {
}
void BackgroundSync::_producerThread() {
- const MemberState state = _replCoord->getCurrentMemberState();
+ const MemberState state = _replCoord->getMemberState();
// we want to pause when the state changes to primary
if (_replCoord->isWaitingForApplierToDrain() || state.primary()) {
if (!_pause) {
@@ -279,7 +279,7 @@ namespace {
// If we are transitioning to primary state, we need to leave
// this loop in order to go into bgsync-pause mode.
if (_replCoord->isWaitingForApplierToDrain() ||
- _replCoord->getCurrentMemberState().primary()) {
+ _replCoord->getMemberState().primary()) {
return;
}
@@ -321,7 +321,7 @@ namespace {
// If we are transitioning to primary state, we need to leave
// this loop in order to go into bgsync-pause mode.
if (_replCoord->isWaitingForApplierToDrain() ||
- _replCoord->getCurrentMemberState().primary()) {
+ _replCoord->getMemberState().primary()) {
LOG(1) << "waiting for draining or we are primary, not adding more ops to buffer";
return;
}
diff --git a/src/mongo/db/repl/oplogreader.cpp b/src/mongo/db/repl/oplogreader.cpp
index 45089126825..07384ae01ba 100644
--- a/src/mongo/db/repl/oplogreader.cpp
+++ b/src/mongo/db/repl/oplogreader.cpp
@@ -173,7 +173,7 @@ namespace repl {
if (!worked) {
warning() << "Failed to transition into "
<< MemberState(MemberState::RS_RECOVERING)
- << ". Current state: " << replCoord->getCurrentMemberState();
+ << ". Current state: " << replCoord->getMemberState();
}
return;
}
diff --git a/src/mongo/db/repl/repl_coordinator.h b/src/mongo/db/repl/repl_coordinator.h
index 5a9e2d972a3..90564f52f31 100644
--- a/src/mongo/db/repl/repl_coordinator.h
+++ b/src/mongo/db/repl/repl_coordinator.h
@@ -131,7 +131,7 @@ namespace repl {
* Returns the current replica set state of this node (PRIMARY, SECONDARY, STARTUP, etc).
* It is invalid to call this unless getReplicationMode() == modeReplSet.
*/
- virtual MemberState getCurrentMemberState() const = 0;
+ virtual MemberState getMemberState() const = 0;
/**
* Returns true if this node is in state PRIMARY or SECONDARY.
@@ -139,7 +139,7 @@ namespace repl {
* It is invalid to call this unless getReplicationMode() == modeReplSet.
*
* This method may be optimized to reduce synchronization overhead compared to
- * reading the current member state with getCurrentMemberState().
+ * reading the current member state with getMemberState().
*/
virtual bool isInPrimaryOrSecondaryState() const = 0;
diff --git a/src/mongo/db/repl/repl_coordinator_impl.cpp b/src/mongo/db/repl/repl_coordinator_impl.cpp
index 37e937a5a2b..5b333ab70e6 100644
--- a/src/mongo/db/repl/repl_coordinator_impl.cpp
+++ b/src/mongo/db/repl/repl_coordinator_impl.cpp
@@ -157,10 +157,10 @@ namespace {
_replExecutor(network, prngSeed),
_externalState(externalState),
_inShutdown(false),
- _currentState(MemberState::RS_STARTUP),
+ _memberState(MemberState::RS_STARTUP),
_isWaitingForDrainToComplete(false),
_rsConfigState(kConfigPreStart),
- _thisMembersConfigIndex(-1),
+ _selfIndex(-1),
_sleptLastElection(false),
_canAcceptNonLocalWrites(!(settings.usingReplSets() || settings.slave)),
_canServeNonLocalReads(0U) {
@@ -383,13 +383,13 @@ namespace {
return _replMode;
}
- MemberState ReplicationCoordinatorImpl::getCurrentMemberState() const {
+ MemberState ReplicationCoordinatorImpl::getMemberState() const {
boost::lock_guard<boost::mutex> lk(_mutex);
- return _getCurrentMemberState_inlock();
+ return _getMemberState_inlock();
}
- MemberState ReplicationCoordinatorImpl::_getCurrentMemberState_inlock() const {
- return _currentState;
+ MemberState ReplicationCoordinatorImpl::_getMemberState_inlock() const {
+ return _memberState;
}
Seconds ReplicationCoordinatorImpl::getSlaveDelaySecs() const {
@@ -397,8 +397,8 @@ namespace {
invariant(_rsConfig.isInitialized());
uassert(28524,
"Node not a member of the current set configuration",
- _thisMembersConfigIndex != -1);
- return _rsConfig.getMemberAt(_thisMembersConfigIndex).getSlaveDelay();
+ _selfIndex != -1);
+ return _rsConfig.getMemberAt(_selfIndex).getSlaveDelay();
}
void ReplicationCoordinatorImpl::clearSyncSourceBlacklist() {
@@ -488,7 +488,7 @@ namespace {
_topCoord->setFollowerMode(newState.s);
const PostMemberStateUpdateAction action =
- _updateCurrentMemberStateFromTopologyCoordinator_inlock();
+ _updateMemberStateFromTopologyCoordinator_inlock();
*success = true;
_replExecutor.signalEvent(finishedSettingFollowerMode);
lk.unlock();
@@ -590,7 +590,7 @@ namespace {
SlaveInfoVector oldSlaveInfos;
_slaveInfo.swap(oldSlaveInfos);
- if (_thisMembersConfigIndex == -1) {
+ if (_selfIndex == -1) {
// If we aren't in the config then the only data we care about is for ourself
for (SlaveInfoVector::const_iterator it = oldSlaveInfos.begin();
it != oldSlaveInfos.end(); ++it) {
@@ -615,7 +615,7 @@ namespace {
for (SlaveInfoVector::const_iterator it = oldSlaveInfos.begin();
it != oldSlaveInfos.end(); ++it) {
if ((it->memberId == memberId && it->hostAndPort == memberHostAndPort)
- || (i == _thisMembersConfigIndex && it->self)) {
+ || (i == _selfIndex && it->self)) {
slaveInfo = *it;
}
}
@@ -635,12 +635,12 @@ namespace {
}
else {
invariant(_settings.usingReplSets());
- if (_thisMembersConfigIndex == -1) {
+ if (_selfIndex == -1) {
invariant(_slaveInfo.size() == 1);
return 0;
}
else {
- return _thisMembersConfigIndex;
+ return _selfIndex;
}
}
}
@@ -697,7 +697,7 @@ namespace {
if (_getReplicationMode_inlock() != modeReplSet) {
return;
}
- if (_getCurrentMemberState_inlock().primary()) {
+ if (_getMemberState_inlock().primary()) {
return;
}
lock->unlock();
@@ -724,7 +724,7 @@ namespace {
Status ReplicationCoordinatorImpl::_setLastOptime_inlock(
const UpdatePositionArgs::UpdateInfo& args) {
- if (_thisMembersConfigIndex == -1) {
+ if (_selfIndex == -1) {
// Ignore updates when we're in state REMOVED
return Status(ErrorCodes::NotMasterOrSecondaryCode,
"Received replSetUpdatePosition command but we are in state REMOVED");
@@ -732,7 +732,7 @@ namespace {
invariant(_getReplicationMode_inlock() == modeReplSet);
if (args.rid == _getMyRID_inlock() ||
- args.memberId == _rsConfig.getMemberAt(_thisMembersConfigIndex).getId()) {
+ args.memberId == _rsConfig.getMemberAt(_selfIndex).getId()) {
// Do not let remote nodes tell us what our optime is.
return Status::OK();
}
@@ -937,7 +937,7 @@ namespace {
return StatusAndDuration(Status::OK(), Milliseconds(timer->millis()));
}
- if (replMode == modeReplSet && !_currentState.primary()) {
+ if (replMode == modeReplSet && !_memberState.primary()) {
return StatusAndDuration(Status(ErrorCodes::NotMaster,
"Not master while waiting for replication"),
Milliseconds(timer->millis()));
@@ -1011,7 +1011,7 @@ namespace {
const Date_t stepDownUntil(startTime.millis + stepdownTime.total_milliseconds());
const Date_t waitUntil(startTime.millis + waitTime.total_milliseconds());
- if (!getCurrentMemberState().primary()) {
+ if (!getMemberState().primary()) {
// Note this check is inherently racy - it's always possible for the node to
// stepdown from some other path before we acquire the global shared lock, but
// that's okay because we are resiliant to that happening in _stepDownContinue.
@@ -1137,7 +1137,7 @@ namespace {
boost::unique_lock<boost::mutex> lk(_mutex);
const PostMemberStateUpdateAction action =
- _updateCurrentMemberStateFromTopologyCoordinator_inlock();
+ _updateMemberStateFromTopologyCoordinator_inlock();
lk.unlock();
_performPostMemberStateUpdateAction(action);
*result = Status::OK();
@@ -1194,7 +1194,7 @@ namespace {
if (_settings.usingReplSets()) {
boost::lock_guard<boost::mutex> lock(_mutex);
if (_getReplicationMode_inlock() == modeReplSet &&
- _getCurrentMemberState_inlock().primary()) {
+ _getMemberState_inlock().primary()) {
return true;
}
return false;
@@ -1278,7 +1278,7 @@ namespace {
return false;
}
// see SERVER-6671
- MemberState ms = _getCurrentMemberState_inlock();
+ MemberState ms = _getMemberState_inlock();
switch ( ms.s ) {
case MemberState::RS_SECONDARY:
case MemberState::RS_RECOVERING:
@@ -1310,7 +1310,7 @@ namespace {
}
int ReplicationCoordinatorImpl::_getMyId_inlock() const {
- const MemberConfig& self = _rsConfig.getMemberAt(_thisMembersConfigIndex);
+ const MemberConfig& self = _rsConfig.getMemberAt(_selfIndex);
return self.getId();
}
@@ -1428,7 +1428,7 @@ namespace {
entry.append("optime", itr->opTime);
entry.append("host", itr->hostAndPort.toString());
if (_getReplicationMode_inlock() == modeReplSet) {
- if (_thisMembersConfigIndex == -1) {
+ if (_selfIndex == -1) {
continue;
}
invariant(itr->memberId >= 0);
@@ -1498,7 +1498,7 @@ namespace {
}
boost::unique_lock<boost::mutex> lk(_mutex);
- if (_getCurrentMemberState_inlock().primary()) {
+ if (_getMemberState_inlock().primary()) {
*result = Status(ErrorCodes::NotSecondary, "primaries can't modify maintenance mode");
return;
}
@@ -1523,7 +1523,7 @@ namespace {
}
const PostMemberStateUpdateAction action =
- _updateCurrentMemberStateFromTopologyCoordinator_inlock();
+ _updateMemberStateFromTopologyCoordinator_inlock();
*result = Status::OK();
lk.unlock();
_performPostMemberStateUpdateAction(action);
@@ -1631,7 +1631,7 @@ namespace {
getMyLastOptime(),
response);
if ((outStatus->isOK() || *outStatus == ErrorCodes::InvalidReplicaSetConfig) &&
- _thisMembersConfigIndex < 0) {
+ _selfIndex < 0) {
// If this node does not belong to the configuration it knows about, send heartbeats
// back to any node that sends us a heartbeat, in case one of those remote nodes has
// a configuration that contains us. Chances are excellent that it will, since that
@@ -1675,10 +1675,10 @@ namespace {
invariant(_rsConfig.isInitialized());
- if (!args.force && !_getCurrentMemberState_inlock().primary()) {
+ if (!args.force && !_getMemberState_inlock().primary()) {
return Status(ErrorCodes::NotMaster, str::stream() <<
"replSetReconfig should only be run on PRIMARY, but my state is " <<
- _getCurrentMemberState_inlock().toString() <<
+ _getMemberState_inlock().toString() <<
"; use the \"force\" argument to override");
}
@@ -1891,16 +1891,16 @@ namespace {
}
ReplicationCoordinatorImpl::PostMemberStateUpdateAction
- ReplicationCoordinatorImpl::_updateCurrentMemberStateFromTopologyCoordinator_inlock() {
+ ReplicationCoordinatorImpl::_updateMemberStateFromTopologyCoordinator_inlock() {
const MemberState newState = _topCoord->getMemberState();
- if (newState == _currentState) {
+ if (newState == _memberState) {
if (_topCoord->getRole() == TopologyCoordinator::Role::candidate) {
return kActionWinElection;
}
return kActionNone;
}
PostMemberStateUpdateAction result;
- if (_currentState.primary() || newState.removed()) {
+ if (_memberState.primary() || newState.removed()) {
// Wake up any threads blocked in awaitReplication, close connections, etc.
for (std::vector<WaiterInfo*>::iterator it = _replicationWaiterList.begin();
it != _replicationWaiterList.end(); ++it) {
@@ -1913,7 +1913,7 @@ namespace {
result = kActionCloseAllConnections;
}
else {
- if (_currentState.secondary() && !newState.primary()) {
+ if (_memberState.secondary() && !newState.primary()) {
// Switching out of SECONDARY, but not to PRIMARY.
_canServeNonLocalReads.store(0U);
}
@@ -1930,7 +1930,7 @@ namespace {
result = kActionWinElection;
}
- _currentState = newState;
+ _memberState = newState;
log() << "transition to " << newState.toString() << rsLog;
return result;
}
@@ -1954,7 +1954,7 @@ namespace {
_topCoord->processWinElection(_electionId, getNextGlobalOptime());
_isWaitingForDrainToComplete = true;
const PostMemberStateUpdateAction nextAction =
- _updateCurrentMemberStateFromTopologyCoordinator_inlock();
+ _updateMemberStateFromTopologyCoordinator_inlock();
invariant(nextAction != kActionWinElection);
lk.unlock();
_performPostMemberStateUpdateAction(nextAction);
@@ -2057,12 +2057,12 @@ namespace {
myOptime);
_rsConfig = newConfig;
log() << "new replica set config in use: " << _rsConfig.toBSON() << rsLog;
- _thisMembersConfigIndex = myIndex;
+ _selfIndex = myIndex;
const PostMemberStateUpdateAction action =
- _updateCurrentMemberStateFromTopologyCoordinator_inlock();
+ _updateMemberStateFromTopologyCoordinator_inlock();
_updateSlaveInfoFromConfig_inlock();
- if (_thisMembersConfigIndex >= 0) {
+ if (_selfIndex >= 0) {
// Don't send heartbeats if we're not in the config, if we get re-added one of the
// nodes in the set will contact us.
_startHeartbeats();
@@ -2097,7 +2097,7 @@ namespace {
somethingChanged = true;
}
- if (somethingChanged && !_getCurrentMemberState_inlock().primary()) {
+ if (somethingChanged && !_getMemberState_inlock().primary()) {
lock.unlock();
_externalState->forwardSlaveProgress(); // Must do this outside _mutex
}
@@ -2110,7 +2110,7 @@ namespace {
boost::unique_lock<boost::mutex> lock(_mutex);
if (_getReplicationMode_inlock() == modeReplSet) {
- if (_thisMembersConfigIndex == -1) {
+ if (_selfIndex == -1) {
// Ignore updates when we're in state REMOVED
return Status(ErrorCodes::NotMasterOrSecondaryCode,
"Received replSetUpdatePosition command but we are in state REMOVED");
@@ -2131,7 +2131,7 @@ namespace {
slaveInfo->rid = handshake.getRid();
slaveInfo->hostAndPort = member->getHostAndPort();
- if (!_getCurrentMemberState_inlock().primary()) {
+ if (!_getMemberState_inlock().primary()) {
lock.unlock();
_externalState->forwardSlaveHandshake(); // must do outside _mutex
}
@@ -2156,10 +2156,10 @@ namespace {
bool ReplicationCoordinatorImpl::buildsIndexes() {
boost::lock_guard<boost::mutex> lk(_mutex);
- if (_thisMembersConfigIndex == -1) {
+ if (_selfIndex == -1) {
return true;
}
- const MemberConfig& self = _rsConfig.getMemberAt(_thisMembersConfigIndex);
+ const MemberConfig& self = _rsConfig.getMemberAt(_selfIndex);
return self.shouldBuildIndexes();
}
@@ -2187,12 +2187,12 @@ namespace {
invariant(_settings.usingReplSets());
std::vector<HostAndPort> nodes;
- if (_thisMembersConfigIndex == -1) {
+ if (_selfIndex == -1) {
return nodes;
}
for (int i = 0; i < _rsConfig.getNumMembers(); ++i) {
- if (i == _thisMembersConfigIndex)
+ if (i == _selfIndex)
continue;
nodes.push_back(_rsConfig.getMemberAt(i).getHostAndPort());
@@ -2242,7 +2242,7 @@ namespace {
return Status(ErrorCodes::NoReplicationEnabled, "not running with --replSet");
}
- if (getCurrentMemberState().startup()) {
+ if (getMemberState().startup()) {
result->append("info", "run rs.initiate(...) if not yet done for the set");
return Status(ErrorCodes::NotYetInitialized, "no replset config has been received");
}
diff --git a/src/mongo/db/repl/repl_coordinator_impl.h b/src/mongo/db/repl/repl_coordinator_impl.h
index a197500f2d8..3ba31cc9cfc 100644
--- a/src/mongo/db/repl/repl_coordinator_impl.h
+++ b/src/mongo/db/repl/repl_coordinator_impl.h
@@ -86,7 +86,7 @@ namespace repl {
virtual Mode getReplicationMode() const;
- virtual MemberState getCurrentMemberState() const;
+ virtual MemberState getMemberState() const;
virtual bool isInPrimaryOrSecondaryState() const;
@@ -275,7 +275,7 @@ namespace repl {
};
/**
- * Type describing actions to take after a change to the MemberState _currentState.
+ * Type describing actions to take after a change to the MemberState _memberState.
*/
enum PostMemberStateUpdateAction {
kActionNone,
@@ -557,7 +557,7 @@ namespace repl {
int targetIndex);
- MemberState _getCurrentMemberState_inlock() const;
+ MemberState _getMemberState_inlock() const;
/**
* Returns the current replication mode. This method requires the caller to be holding
@@ -608,14 +608,14 @@ namespace repl {
void _setConfigState_inlock(ConfigState newState);
/**
- * Updates the cached value, _currentState, to match _topCoord's reported
+ * Updates the cached value, _memberState, to match _topCoord's reported
* member state, from getMemberState().
*
* Returns an enum indicating what action to take after releasing _mutex, if any.
* Call performPostMemberStateUpdateAction on the return value after releasing
* _mutex.
*/
- PostMemberStateUpdateAction _updateCurrentMemberStateFromTopologyCoordinator_inlock();
+ PostMemberStateUpdateAction _updateMemberStateFromTopologyCoordinator_inlock();
/**
* Performs a post member-state update action. Do not call while holding _mutex.
@@ -813,7 +813,7 @@ namespace repl {
SlaveInfoVector _slaveInfo; // (M)
// Current ReplicaSet state.
- MemberState _currentState; // (MX)
+ MemberState _memberState; // (MX)
// True if we are waiting for the applier to finish draining.
bool _isWaitingForDrainToComplete; // (M)
@@ -831,7 +831,7 @@ namespace repl {
ReplicaSetConfig _rsConfig; // (MX)
// This member's index position in the current config.
- int _thisMembersConfigIndex; // (MX)
+ int _selfIndex; // (MX)
// Vector of events that should be signaled whenever new heartbeat data comes in.
std::vector<ReplicationExecutor::EventHandle> _stepDownWaiters; // (X)
diff --git a/src/mongo/db/repl/repl_coordinator_impl_elect.cpp b/src/mongo/db/repl/repl_coordinator_impl_elect.cpp
index cc2b3f27faf..f6c99d5e0ec 100644
--- a/src/mongo/db/repl/repl_coordinator_impl_elect.cpp
+++ b/src/mongo/db/repl/repl_coordinator_impl_elect.cpp
@@ -119,7 +119,7 @@ namespace {
&_electionFinishedEvent);
- invariant(_rsConfig.getMemberAt(_thisMembersConfigIndex).isElectable());
+ invariant(_rsConfig.getMemberAt(_selfIndex).isElectable());
OpTime lastOpTimeApplied(_getMyLastOptime_inlock());
if (lastOpTimeApplied == OpTime()) {
@@ -133,7 +133,7 @@ namespace {
&_replExecutor,
lastOpTimeApplied,
_rsConfig,
- _thisMembersConfigIndex,
+ _selfIndex,
_topCoord->getMaybeUpHostAndPorts(),
stdx::bind(&ReplicationCoordinatorImpl::_onFreshnessCheckComplete, this));
if (nextPhaseEvh.getStatus() == ErrorCodes::ShutdownInProgress) {
@@ -166,7 +166,7 @@ namespace {
case FreshnessChecker::None:
break;
case FreshnessChecker::FreshnessTie:
- if ((_thisMembersConfigIndex != 0) && !_sleptLastElection) {
+ if ((_selfIndex != 0) && !_sleptLastElection) {
const long long ms = _replExecutor.nextRandomInt64(1000) + 50;
const Date_t nextCandidateTime = now + ms;
log() << "replSet possible election tie; sleeping " << ms << "ms until " <<
@@ -204,7 +204,7 @@ namespace {
StatusWith<ReplicationExecutor::EventHandle> nextPhaseEvh = _electCmdRunner->start(
&_replExecutor,
_rsConfig,
- _thisMembersConfigIndex,
+ _selfIndex,
_topCoord->getMaybeUpHostAndPorts(),
stdx::bind(&ReplicationCoordinatorImpl::_onElectCmdRunnerComplete, this));
if (nextPhaseEvh.getStatus() == ErrorCodes::ShutdownInProgress) {
diff --git a/src/mongo/db/repl/repl_coordinator_impl_elect_test.cpp b/src/mongo/db/repl/repl_coordinator_impl_elect_test.cpp
index 9405109799e..f08fe206e88 100644
--- a/src/mongo/db/repl/repl_coordinator_impl_elect_test.cpp
+++ b/src/mongo/db/repl/repl_coordinator_impl_elect_test.cpp
@@ -140,8 +140,8 @@ namespace {
getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
- ASSERT(getReplCoord()->getCurrentMemberState().primary()) <<
- getReplCoord()->getCurrentMemberState().toString();
+ ASSERT(getReplCoord()->getMemberState().primary()) <<
+ getReplCoord()->getMemberState().toString();
ASSERT(getReplCoord()->isWaitingForApplierToDrain());
// Since we're still in drain mode, expect that we report ismaster: false, issecondary:true.
diff --git a/src/mongo/db/repl/repl_coordinator_impl_heartbeat.cpp b/src/mongo/db/repl/repl_coordinator_impl_heartbeat.cpp
index a24b56c7857..20627cda7c2 100644
--- a/src/mongo/db/repl/repl_coordinator_impl_heartbeat.cpp
+++ b/src/mongo/db/repl/repl_coordinator_impl_heartbeat.cpp
@@ -178,7 +178,7 @@ namespace {
void ReplicationCoordinatorImpl::_updateOpTimeFromHeartbeat_inlock(int targetIndex,
OpTime optime) {
- invariant(_thisMembersConfigIndex >= 0);
+ invariant(_selfIndex >= 0);
invariant(targetIndex >= 0);
SlaveInfo& slaveInfo = _slaveInfo[targetIndex];
@@ -197,10 +197,10 @@ namespace {
switch (action.getAction()) {
case HeartbeatResponseAction::NoAction:
// Update the cached member state if different than the current topology member state
- if (_currentState != _topCoord->getMemberState()) {
+ if (_memberState != _topCoord->getMemberState()) {
boost::unique_lock<boost::mutex> lk(_mutex);
const PostMemberStateUpdateAction postUpdateAction =
- _updateCurrentMemberStateFromTopologyCoordinator_inlock();
+ _updateMemberStateFromTopologyCoordinator_inlock();
lk.unlock();
_performPostMemberStateUpdateAction(postUpdateAction);
}
@@ -213,11 +213,11 @@ namespace {
_startElectSelf();
break;
case HeartbeatResponseAction::StepDownSelf:
- invariant(action.getPrimaryConfigIndex() == _thisMembersConfigIndex);
+ invariant(action.getPrimaryConfigIndex() == _selfIndex);
_heartbeatStepDownStart();
break;
case HeartbeatResponseAction::StepDownRemotePrimary: {
- invariant(action.getPrimaryConfigIndex() != _thisMembersConfigIndex);
+ invariant(action.getPrimaryConfigIndex() != _selfIndex);
_requestRemotePrimaryStepdown(
_rsConfig.getMemberAt(action.getPrimaryConfigIndex()).getHostAndPort());
break;
@@ -282,7 +282,7 @@ namespace {
boost::unique_lock<boost::mutex> lk(_mutex);
_topCoord->stepDownIfPending();
const PostMemberStateUpdateAction action =
- _updateCurrentMemberStateFromTopologyCoordinator_inlock();
+ _updateMemberStateFromTopologyCoordinator_inlock();
lk.unlock();
_performPostMemberStateUpdateAction(action);
}
@@ -442,7 +442,7 @@ namespace {
// Make sure that the reconfigFinishFn doesn't finish until we've reset
// _heartbeatReconfigThread.
lk.lock();
- if (_currentState.primary()) {
+ if (_memberState.primary()) {
// If the primary is receiving a heartbeat reconfig, that strongly suggests
// that there has been a force reconfiguration. In any event, it might lead
// to this node stepping down as primary, so we'd better do it with the global
@@ -467,7 +467,7 @@ namespace {
invariant(!_rsConfig.isInitialized() ||
_rsConfig.getConfigVersion() < newConfig.getConfigVersion());
- if (_getCurrentMemberState_inlock().primary() && !cbData.txn) {
+ if (_getMemberState_inlock().primary() && !cbData.txn) {
// Not having an OperationContext in the CallbackData means we definitely aren't holding
// the global lock. Since we're primary and this reconfig could cause us to stepdown,
// reschedule this work with the global exclusive lock so the stepdown is safe.
@@ -538,7 +538,7 @@ namespace {
const Date_t now = _replExecutor.now();
_seedList.clear();
for (int i = 0; i < _rsConfig.getNumMembers(); ++i) {
- if (i == _thisMembersConfigIndex) {
+ if (i == _selfIndex) {
continue;
}
_scheduleHeartbeatToTarget(_rsConfig.getMemberAt(i).getHostAndPort(), i, now);
diff --git a/src/mongo/db/repl/repl_coordinator_impl_heartbeat_test.cpp b/src/mongo/db/repl/repl_coordinator_impl_heartbeat_test.cpp
index f3360062eec..10ad25a23bb 100644
--- a/src/mongo/db/repl/repl_coordinator_impl_heartbeat_test.cpp
+++ b/src/mongo/db/repl/repl_coordinator_impl_heartbeat_test.cpp
@@ -57,7 +57,7 @@ namespace {
};
void ReplCoordHBTest::assertMemberState(const MemberState expected, std::string msg) {
- const MemberState actual = getReplCoord()->getCurrentMemberState();
+ const MemberState actual = getReplCoord()->getMemberState();
ASSERT(expected == actual) << "Expected coordinator to report state " <<
expected.toString() << " but found " << actual.toString() << " - " << msg;
}
diff --git a/src/mongo/db/repl/repl_coordinator_impl_reconfig_test.cpp b/src/mongo/db/repl/repl_coordinator_impl_reconfig_test.cpp
index 0ac01b9e63f..521ce871e85 100644
--- a/src/mongo/db/repl/repl_coordinator_impl_reconfig_test.cpp
+++ b/src/mongo/db/repl/repl_coordinator_impl_reconfig_test.cpp
@@ -393,7 +393,7 @@ namespace {
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastOptime(OpTime(100,0));
simulateSuccessfulElection();
- ASSERT_TRUE(getReplCoord()->getCurrentMemberState().primary());
+ ASSERT_TRUE(getReplCoord()->getMemberState().primary());
// set hbreconfig to hang while in progress
getExternalState()->setStoreLocalConfigDocumentToHang(true);
diff --git a/src/mongo/db/repl/repl_coordinator_impl_test.cpp b/src/mongo/db/repl/repl_coordinator_impl_test.cpp
index ae56e42c10f..afb4b73d16b 100644
--- a/src/mongo/db/repl/repl_coordinator_impl_test.cpp
+++ b/src/mongo/db/repl/repl_coordinator_impl_test.cpp
@@ -101,7 +101,7 @@ namespace {
start();
stopCapturingLogMessages();
ASSERT_EQUALS(1, countLogLinesContaining("Did not find local "));
- ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getCurrentMemberState().s);
+ ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
}
TEST_F(ReplCoordTest, InitiateFailsWithEmptyConfig) {
@@ -111,14 +111,14 @@ namespace {
BSONObjBuilder result;
ASSERT_EQUALS(ErrorCodes::InvalidReplicaSetConfig,
getReplCoord()->processReplSetInitiate(&txn, BSONObj(), &result));
- ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getCurrentMemberState().s);
+ ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
}
TEST_F(ReplCoordTest, InitiateSucceedsWithOneNodeConfig) {
OperationContextNoop txn;
init("mySet");
start(HostAndPort("node1", 12345));
- ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getCurrentMemberState().s);
+ ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
// Starting uninitialized, show that we can perform the initiate behavior.
BSONObjBuilder result1;
@@ -153,7 +153,7 @@ namespace {
BSONObjBuilder result;
ASSERT_EQUALS(ErrorCodes::InvalidReplicaSetConfig,
getReplCoord()->processReplSetInitiate(&txn, BSONObj(), &result));
- ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getCurrentMemberState().s);
+ ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
// Having failed to initiate once, show that we can now initiate.
BSONObjBuilder result1;
@@ -216,7 +216,7 @@ namespace {
TEST_F(ReplCoordTest, InitiateFailsIfQuorumNotMet) {
init("mySet");
start(HostAndPort("node1", 12345));
- ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getCurrentMemberState().s);
+ ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
ReplSetHeartbeatArgs hbArgs;
hbArgs.setSetName("mySet");
@@ -241,13 +241,13 @@ namespace {
ASSERT_EQUALS(startDate + 10, getNet()->now());
prsiThread.join();
ASSERT_EQUALS(ErrorCodes::NodeNotFound, status);
- ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getCurrentMemberState().s);
+ ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
}
TEST_F(ReplCoordTest, InitiatePassesIfQuorumMet) {
init("mySet");
start(HostAndPort("node1", 12345));
- ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getCurrentMemberState().s);
+ ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
ReplSetHeartbeatArgs hbArgs;
hbArgs.setSetName("mySet");
@@ -284,7 +284,7 @@ namespace {
OperationContextNoop txn;
init("mySet");
start(HostAndPort("node1", 12345));
- ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getCurrentMemberState().s);
+ ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
BSONObjBuilder result1;
ASSERT_EQUALS(
@@ -296,14 +296,14 @@ namespace {
"members" << BSON_ARRAY(
BSON("_id" << 0 << "host" << "node1:12345"))),
&result1));
- ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getCurrentMemberState().s);
+ ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
}
TEST_F(ReplCoordTest, InitiateFailsWithoutReplSetFlag) {
OperationContextNoop txn;
init("");
start(HostAndPort("node1", 12345));
- ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getCurrentMemberState().s);
+ ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
BSONObjBuilder result1;
ASSERT_EQUALS(
@@ -315,14 +315,14 @@ namespace {
"members" << BSON_ARRAY(
BSON("_id" << 0 << "host" << "node1:12345"))),
&result1));
- ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getCurrentMemberState().s);
+ ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
}
TEST_F(ReplCoordTest, InitiateFailsWhileStoringLocalConfigDocument) {
OperationContextNoop txn;
init("mySet");
start(HostAndPort("node1", 12345));
- ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getCurrentMemberState().s);
+ ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
BSONObjBuilder result1;
getExternalState()->setStoreLocalConfigDocumentStatus(Status(ErrorCodes::OutOfDiskSpace,
@@ -336,7 +336,7 @@ namespace {
"members" << BSON_ARRAY(
BSON("_id" << 0 << "host" << "node1:12345"))),
&result1));
- ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getCurrentMemberState().s);
+ ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
}
TEST_F(ReplCoordTest, CheckReplEnabledForCommandNotRepl) {
@@ -1039,7 +1039,7 @@ namespace {
Status status = getReplCoord()->stepDown(&txn, false, Milliseconds(0), Milliseconds(0));
ASSERT_EQUALS(ErrorCodes::NotMaster, status);
- ASSERT_TRUE(getReplCoord()->getCurrentMemberState().secondary());
+ ASSERT_TRUE(getReplCoord()->getMemberState().secondary());
}
TEST_F(StepDownTest, StepDownTimeoutAcquiringGlobalLock) {
@@ -1057,7 +1057,7 @@ namespace {
Status status = getReplCoord()->stepDown(&txn, false, Milliseconds(0), Milliseconds(1000));
ASSERT_EQUALS(ErrorCodes::ExceededTimeLimit, status);
- ASSERT_TRUE(getReplCoord()->getCurrentMemberState().primary());
+ ASSERT_TRUE(getReplCoord()->getMemberState().primary());
}
TEST_F(StepDownTest, StepDownNoWaiting) {
@@ -1095,13 +1095,13 @@ namespace {
exitNetwork();
- ASSERT_TRUE(getReplCoord()->getCurrentMemberState().primary());
+ ASSERT_TRUE(getReplCoord()->getMemberState().primary());
ASSERT_OK(getReplCoord()->stepDown(&txn, false, Milliseconds(0), Milliseconds(1000)));
enterNetwork(); // So we can safely inspect the topology coordinator
ASSERT_EQUALS(Date_t(getNet()->now().millis + 1000), getTopoCoord().getStepDownTime());
ASSERT_TRUE(getTopoCoord().getMemberState().secondary());
exitNetwork();
- ASSERT_TRUE(getReplCoord()->getCurrentMemberState().secondary());
+ ASSERT_TRUE(getReplCoord()->getMemberState().secondary());
}
TEST_F(ReplCoordTest, StepDownAndBackUpSingleNode) {
@@ -1115,13 +1115,13 @@ namespace {
OperationContextReplMock txn;
getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
- ASSERT_TRUE(getReplCoord()->getCurrentMemberState().primary());
+ ASSERT_TRUE(getReplCoord()->getMemberState().primary());
ASSERT_OK(getReplCoord()->stepDown(&txn, true, Milliseconds(0), Milliseconds(1000)));
getNet()->enterNetwork(); // Must do this before inspecting the topocoord
Date_t stepdownUntil = Date_t(getNet()->now().millis + 1000);
ASSERT_EQUALS(stepdownUntil, getTopoCoord().getStepDownTime());
ASSERT_TRUE(getTopoCoord().getMemberState().secondary());
- ASSERT_TRUE(getReplCoord()->getCurrentMemberState().secondary());
+ ASSERT_TRUE(getReplCoord()->getMemberState().secondary());
// Now run time forward and make sure that the node becomes primary again when the stepdown
// period ends.
@@ -1129,7 +1129,7 @@ namespace {
ASSERT_EQUALS(stepdownUntil, getNet()->now());
ASSERT_TRUE(getTopoCoord().getMemberState().primary());
getNet()->exitNetwork();
- ASSERT_TRUE(getReplCoord()->getCurrentMemberState().primary());
+ ASSERT_TRUE(getReplCoord()->getMemberState().primary());
}
/**
@@ -1216,7 +1216,7 @@ namespace {
runner.start(&txn);
Status status = runner.getResult();
ASSERT_EQUALS(ErrorCodes::ExceededTimeLimit, status);
- ASSERT_TRUE(getReplCoord()->getCurrentMemberState().primary());
+ ASSERT_TRUE(getReplCoord()->getMemberState().primary());
// Now use "force" to force it to step down even though no one is caught up
runner.reset();
@@ -1229,12 +1229,12 @@ namespace {
getNet()->runUntil(startDate + 1000);
}
getNet()->exitNetwork();
- ASSERT_TRUE(getReplCoord()->getCurrentMemberState().primary());
+ ASSERT_TRUE(getReplCoord()->getMemberState().primary());
runner.setForce(true);
runner.start(&txn);
status = runner.getResult();
ASSERT_OK(status);
- ASSERT_TRUE(getReplCoord()->getCurrentMemberState().secondary());
+ ASSERT_TRUE(getReplCoord()->getMemberState().secondary());
}
@@ -1283,7 +1283,7 @@ namespace {
exitNetwork();
ASSERT_OK(runner.getResult());
- ASSERT_TRUE(getReplCoord()->getCurrentMemberState().secondary());
+ ASSERT_TRUE(getReplCoord()->getMemberState().secondary());
}
TEST_F(StepDownTest, InterruptStepDown) {
@@ -1302,7 +1302,7 @@ namespace {
runner.setStepDownTime(Milliseconds(60000));
simulateSuccessfulElection();
- ASSERT_TRUE(getReplCoord()->getCurrentMemberState().primary());
+ ASSERT_TRUE(getReplCoord()->getMemberState().primary());
runner.start(&txn);
@@ -1312,12 +1312,12 @@ namespace {
getReplCoord()->interrupt(opID);
ASSERT_EQUALS(ErrorCodes::Interrupted, runner.getResult());
- ASSERT_TRUE(getReplCoord()->getCurrentMemberState().primary());
+ ASSERT_TRUE(getReplCoord()->getMemberState().primary());
}
TEST_F(ReplCoordTest, GetReplicationModeNone) {
init();
- ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getCurrentMemberState().s);
+ ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
}
TEST_F(ReplCoordTest, GetReplicationModeMaster) {
@@ -1344,7 +1344,7 @@ namespace {
settings.replSet = "mySet/node1:12345";
init(settings);
ASSERT_EQUALS(ReplicationCoordinator::modeReplSet, getReplCoord()->getReplicationMode());
- ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getCurrentMemberState().s);
+ ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
assertStartSuccess(
BSON("_id" << "mySet" <<
"version" << 2 <<
@@ -1483,19 +1483,19 @@ namespace {
// Can't unset maintenance mode if it was never set to begin with.
Status status = getReplCoord()->setMaintenanceMode(false);
ASSERT_EQUALS(ErrorCodes::OperationFailed, status);
- ASSERT_TRUE(getReplCoord()->getCurrentMemberState().secondary());
+ ASSERT_TRUE(getReplCoord()->getMemberState().secondary());
// valid set
ASSERT_OK(getReplCoord()->setMaintenanceMode(true));
- ASSERT_TRUE(getReplCoord()->getCurrentMemberState().recovering());
+ ASSERT_TRUE(getReplCoord()->getMemberState().recovering());
// If we go into rollback while in maintenance mode, our state changes to RS_ROLLBACK.
getReplCoord()->setFollowerMode(MemberState::RS_ROLLBACK);
- ASSERT_TRUE(getReplCoord()->getCurrentMemberState().rollback());
+ ASSERT_TRUE(getReplCoord()->getMemberState().rollback());
// When we go back to SECONDARY, we still observe RECOVERING because of maintenance mode.
getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
- ASSERT_TRUE(getReplCoord()->getCurrentMemberState().recovering());
+ ASSERT_TRUE(getReplCoord()->getMemberState().recovering());
// Can set multiple times
ASSERT_OK(getReplCoord()->setMaintenanceMode(true));
@@ -1510,35 +1510,35 @@ namespace {
ASSERT_EQUALS(ErrorCodes::OperationFailed, status);
// Unsetting maintenance mode changes our state to secondary if maintenance mode was
// the only thinking keeping us out of it.
- ASSERT_TRUE(getReplCoord()->getCurrentMemberState().secondary());
+ ASSERT_TRUE(getReplCoord()->getMemberState().secondary());
// From rollback, entering and exiting maintenance mode doesn't change perceived
// state.
getReplCoord()->setFollowerMode(MemberState::RS_ROLLBACK);
- ASSERT_TRUE(getReplCoord()->getCurrentMemberState().rollback());
+ ASSERT_TRUE(getReplCoord()->getMemberState().rollback());
ASSERT_OK(getReplCoord()->setMaintenanceMode(true));
- ASSERT_TRUE(getReplCoord()->getCurrentMemberState().rollback());
+ ASSERT_TRUE(getReplCoord()->getMemberState().rollback());
ASSERT_OK(getReplCoord()->setMaintenanceMode(false));
- ASSERT_TRUE(getReplCoord()->getCurrentMemberState().rollback());
+ ASSERT_TRUE(getReplCoord()->getMemberState().rollback());
// Rollback is sticky even if entered while in maintenance mode.
getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
- ASSERT_TRUE(getReplCoord()->getCurrentMemberState().secondary());
+ ASSERT_TRUE(getReplCoord()->getMemberState().secondary());
ASSERT_OK(getReplCoord()->setMaintenanceMode(true));
- ASSERT_TRUE(getReplCoord()->getCurrentMemberState().recovering());
+ ASSERT_TRUE(getReplCoord()->getMemberState().recovering());
getReplCoord()->setFollowerMode(MemberState::RS_ROLLBACK);
- ASSERT_TRUE(getReplCoord()->getCurrentMemberState().rollback());
+ ASSERT_TRUE(getReplCoord()->getMemberState().rollback());
ASSERT_OK(getReplCoord()->setMaintenanceMode(false));
- ASSERT_TRUE(getReplCoord()->getCurrentMemberState().rollback());
+ ASSERT_TRUE(getReplCoord()->getMemberState().rollback());
getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
- ASSERT_TRUE(getReplCoord()->getCurrentMemberState().secondary());
+ ASSERT_TRUE(getReplCoord()->getMemberState().secondary());
// Can't modify maintenance mode when PRIMARY
simulateSuccessfulElection();
status = getReplCoord()->setMaintenanceMode(true);
ASSERT_EQUALS(ErrorCodes::NotSecondary, status);
- ASSERT_TRUE(getReplCoord()->getCurrentMemberState().primary());
+ ASSERT_TRUE(getReplCoord()->getMemberState().primary());
simulateStepDownOnIsolation();
@@ -1684,7 +1684,7 @@ namespace {
"key2" << "value2")))),
h4);
getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
- ASSERT_TRUE(getReplCoord()->getCurrentMemberState().secondary());
+ ASSERT_TRUE(getReplCoord()->getMemberState().secondary());
IsMasterResponse response;
getReplCoord()->fillIsMasterForReplSet(&response);
diff --git a/src/mongo/db/repl/repl_coordinator_mock.cpp b/src/mongo/db/repl/repl_coordinator_mock.cpp
index e0eb58a67bd..ed86767b589 100644
--- a/src/mongo/db/repl/repl_coordinator_mock.cpp
+++ b/src/mongo/db/repl/repl_coordinator_mock.cpp
@@ -61,7 +61,7 @@ namespace repl {
return modeNone;
}
- MemberState ReplicationCoordinatorMock::getCurrentMemberState() const {
+ MemberState ReplicationCoordinatorMock::getMemberState() const {
// TODO
invariant(false);
}
diff --git a/src/mongo/db/repl/repl_coordinator_mock.h b/src/mongo/db/repl/repl_coordinator_mock.h
index 10600f3065b..b9ed123c031 100644
--- a/src/mongo/db/repl/repl_coordinator_mock.h
+++ b/src/mongo/db/repl/repl_coordinator_mock.h
@@ -56,7 +56,7 @@ namespace repl {
virtual Mode getReplicationMode() const;
- virtual MemberState getCurrentMemberState() const;
+ virtual MemberState getMemberState() const;
virtual bool isInPrimaryOrSecondaryState() const;
diff --git a/src/mongo/db/repl/repl_coordinator_test_fixture.cpp b/src/mongo/db/repl/repl_coordinator_test_fixture.cpp
index 9b480974a00..2ecb4eb7342 100644
--- a/src/mongo/db/repl/repl_coordinator_test_fixture.cpp
+++ b/src/mongo/db/repl/repl_coordinator_test_fixture.cpp
@@ -155,7 +155,7 @@ namespace {
const BSONObj& configDoc,
const HostAndPort& selfHost) {
start(configDoc, selfHost);
- ASSERT_NE(MemberState::RS_STARTUP, getReplCoord()->getCurrentMemberState().s);
+ ASSERT_NE(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
}
ResponseStatus ReplCoordTest::makeResponseStatus(const BSONObj& doc, Milliseconds millis) {
@@ -168,10 +168,10 @@ namespace {
ReplicationCoordinatorImpl* replCoord = getReplCoord();
NetworkInterfaceMock* net = getNet();
ReplicaSetConfig rsConfig = replCoord->getReplicaSetConfig_forTest();
- ASSERT(replCoord->getCurrentMemberState().secondary()) <<
- replCoord->getCurrentMemberState().toString();
- while (!replCoord->getCurrentMemberState().primary()) {
- log() << "Waiting on network in state " << replCoord->getCurrentMemberState();
+ ASSERT(replCoord->getMemberState().secondary()) <<
+ replCoord->getMemberState().toString();
+ while (!replCoord->getMemberState().primary()) {
+ log() << "Waiting on network in state " << replCoord->getMemberState();
getNet()->enterNetwork();
const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
const ReplicationExecutor::RemoteCommandRequest& request = noi->getRequest();
@@ -209,8 +209,8 @@ namespace {
getNet()->exitNetwork();
}
ASSERT(replCoord->isWaitingForApplierToDrain());
- ASSERT(replCoord->getCurrentMemberState().primary()) <<
- replCoord->getCurrentMemberState().toString();
+ ASSERT(replCoord->getMemberState().primary()) <<
+ replCoord->getMemberState().toString();
IsMasterResponse imResponse;
replCoord->fillIsMasterForReplSet(&imResponse);
@@ -221,18 +221,18 @@ namespace {
ASSERT_TRUE(imResponse.isMaster()) << imResponse.toBSON().toString();
ASSERT_FALSE(imResponse.isSecondary()) << imResponse.toBSON().toString();
- ASSERT(replCoord->getCurrentMemberState().primary()) <<
- replCoord->getCurrentMemberState().toString();
+ ASSERT(replCoord->getMemberState().primary()) <<
+ replCoord->getMemberState().toString();
}
void ReplCoordTest::simulateStepDownOnIsolation() {
ReplicationCoordinatorImpl* replCoord = getReplCoord();
NetworkInterfaceMock* net = getNet();
ReplicaSetConfig rsConfig = replCoord->getReplicaSetConfig_forTest();
- ASSERT(replCoord->getCurrentMemberState().primary()) <<
- replCoord->getCurrentMemberState().toString();
- while (replCoord->getCurrentMemberState().primary()) {
- log() << "Waiting on network in state " << replCoord->getCurrentMemberState();
+ ASSERT(replCoord->getMemberState().primary()) <<
+ replCoord->getMemberState().toString();
+ while (replCoord->getMemberState().primary()) {
+ log() << "Waiting on network in state " << replCoord->getMemberState();
getNet()->enterNetwork();
net->runUntil(net->now() + 10000);
const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
diff --git a/src/mongo/db/repl/resync.cpp b/src/mongo/db/repl/resync.cpp
index 3ae3159bfec..067b8b86dd1 100644
--- a/src/mongo/db/repl/resync.cpp
+++ b/src/mongo/db/repl/resync.cpp
@@ -72,7 +72,7 @@ namespace repl {
ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator();
if (getGlobalReplicationCoordinator()->getSettings().usingReplSets()) {
- const MemberState memberState = replCoord->getCurrentMemberState();
+ const MemberState memberState = replCoord->getMemberState();
if (memberState.startup()) {
return appendCommandStatus(result, Status(ErrorCodes::NotYetInitialized,
"no replication yet active"));
diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp
index a5820837384..289317bdf42 100644
--- a/src/mongo/db/repl/rs_rollback.cpp
+++ b/src/mongo/db/repl/rs_rollback.cpp
@@ -728,7 +728,7 @@ namespace {
* also, this is better for status reporting - we know what is happening.
*/
if (!replCoord->setFollowerMode(MemberState::RS_ROLLBACK)) {
- warning() << "Cannot transition from " << replCoord->getCurrentMemberState() <<
+ warning() << "Cannot transition from " << replCoord->getMemberState() <<
" to " << MemberState(MemberState::RS_ROLLBACK);
return 0;
}
@@ -782,7 +782,7 @@ namespace {
if (!replCoord->setFollowerMode(MemberState::RS_RECOVERING)) {
warning() << "Failed to transition into " << MemberState(MemberState::RS_RECOVERING) <<
"; expected to be in state " << MemberState(MemberState::RS_ROLLBACK) <<
- "but found self in " << replCoord->getCurrentMemberState();
+ "but found self in " << replCoord->getMemberState();
}
return 0;
diff --git a/src/mongo/db/repl/rs_sync.cpp b/src/mongo/db/repl/rs_sync.cpp
index 8288b33e142..2f6753e64b6 100644
--- a/src/mongo/db/repl/rs_sync.cpp
+++ b/src/mongo/db/repl/rs_sync.cpp
@@ -89,13 +89,13 @@ namespace repl {
// check that we are in the set (and not an arbiter) before
// trying to sync with other replicas.
// TODO(spencer): Use a condition variable to await loading a config
- if (replCoord->getCurrentMemberState().startup()) {
+ if (replCoord->getMemberState().startup()) {
log() << "replSet warning did not receive a valid config yet, sleeping 5 seconds ";
sleepsecs(5);
continue;
}
- const MemberState memberState = replCoord->getCurrentMemberState();
+ const MemberState memberState = replCoord->getMemberState();
// An arbiter can never transition to any other state, and doesn't replicate, ever
if (memberState.arbiter()) {
diff --git a/src/mongo/db/repl/sync_source_feedback.cpp b/src/mongo/db/repl/sync_source_feedback.cpp
index a7bdacc0c6b..c338396d9fb 100644
--- a/src/mongo/db/repl/sync_source_feedback.cpp
+++ b/src/mongo/db/repl/sync_source_feedback.cpp
@@ -107,7 +107,7 @@ namespace repl {
bool SyncSourceFeedback::replHandshake(OperationContext* txn) {
ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator();
- if (replCoord->getCurrentMemberState().primary()) {
+ if (replCoord->getMemberState().primary()) {
// primary has no one to handshake to
return true;
}
@@ -194,7 +194,7 @@ namespace repl {
Status SyncSourceFeedback::updateUpstream(OperationContext* txn) {
ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator();
- if (replCoord->getCurrentMemberState().primary()) {
+ if (replCoord->getMemberState().primary()) {
// primary has no one to update to
return Status::OK();
}
@@ -266,7 +266,7 @@ namespace repl {
_handshakeNeeded = false;
}
- MemberState state = replCoord->getCurrentMemberState();
+ MemberState state = replCoord->getMemberState();
if (state.primary() || state.startup()) {
_resetConnection();
continue;
diff --git a/src/mongo/db/repl/sync_tail.cpp b/src/mongo/db/repl/sync_tail.cpp
index 34c070e0aa5..63284a674f6 100644
--- a/src/mongo/db/repl/sync_tail.cpp
+++ b/src/mongo/db/repl/sync_tail.cpp
@@ -365,7 +365,7 @@ namespace repl {
Lock::ParallelBatchWriterMode pbwm;
ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator();
- if (replCoord->getCurrentMemberState().primary() &&
+ if (replCoord->getMemberState().primary() &&
!replCoord->isWaitingForApplierToDrain()) {
severe() << "attempting to replicate ops while primary";
@@ -486,7 +486,7 @@ namespace {
}
// Only state RECOVERING can transition to SECONDARY.
- MemberState state(replCoord->getCurrentMemberState());
+ MemberState state(replCoord->getMemberState());
if (!state.recovering()) {
return;
}
@@ -499,7 +499,7 @@ namespace {
bool worked = replCoord->setFollowerMode(MemberState::RS_SECONDARY);
if (!worked) {
warning() << "Failed to transition into " << MemberState(MemberState::RS_SECONDARY)
- << ". Current state: " << replCoord->getCurrentMemberState();
+ << ". Current state: " << replCoord->getMemberState();
}
}
}
@@ -681,7 +681,7 @@ namespace {
// ignore slaveDelay if the box is still initializing. once
// it becomes secondary we can worry about it.
- if( slaveDelaySecs > 0 && replCoord->getCurrentMemberState().secondary() ) {
+ if( slaveDelaySecs > 0 && replCoord->getMemberState().secondary() ) {
const OpTime ts = lastOp["ts"]._opTime();
long long a = ts.getSecs();
long long b = time(0);
diff --git a/src/mongo/db/repl/topology_coordinator_impl.cpp b/src/mongo/db/repl/topology_coordinator_impl.cpp
index 0f5e9c4985d..ff83fee57fc 100644
--- a/src/mongo/db/repl/topology_coordinator_impl.cpp
+++ b/src/mongo/db/repl/topology_coordinator_impl.cpp
@@ -138,7 +138,7 @@ namespace {
}
void TopologyCoordinatorImpl::setForceSyncSourceIndex(int index) {
- invariant(_forceSyncSourceIndex < _currentConfig.getNumMembers());
+ invariant(_forceSyncSourceIndex < _rsConfig.getNumMembers());
_forceSyncSourceIndex = index;
}
@@ -157,8 +157,8 @@ namespace {
// if we have a target we've requested to sync from, use it
if (_forceSyncSourceIndex != -1) {
- invariant(_forceSyncSourceIndex < _currentConfig.getNumMembers());
- _syncSource = _currentConfig.getMemberAt(_forceSyncSourceIndex).getHostAndPort();
+ invariant(_forceSyncSourceIndex < _rsConfig.getNumMembers());
+ _syncSource = _rsConfig.getMemberAt(_forceSyncSourceIndex).getHostAndPort();
_forceSyncSourceIndex = -1;
std::string msg(str::stream() << "syncing from: "
<< _syncSource.toString() << " by request");
@@ -178,7 +178,7 @@ namespace {
}
// If we are only allowed to sync from the primary, set that
- if (!_currentConfig.isChainingAllowed()) {
+ if (!_rsConfig.isChainingAllowed()) {
if (_currentPrimaryIndex == -1) {
LOG(1) << "Cannot select sync source because chaining is"
" not allowed and primary is unknown/down";
@@ -192,7 +192,7 @@ namespace {
return _syncSource;
}
else {
- _syncSource = _currentConfig.getMemberAt(_currentPrimaryIndex).getHostAndPort();
+ _syncSource = _rsConfig.getMemberAt(_currentPrimaryIndex).getHostAndPort();
std::string msg(str::stream() << "syncing from primary: "
<< _syncSource.toString());
log() << msg << rsLog;
@@ -247,7 +247,7 @@ namespace {
continue;
}
- const MemberConfig& itMemberConfig(_currentConfig.getMemberAt(itIndex));
+ const MemberConfig& itMemberConfig(_rsConfig.getMemberAt(itIndex));
// Candidate must build indexes if we build indexes, to be considered.
if (_selfConfig().shouldBuildIndexes()) {
@@ -270,7 +270,7 @@ namespace {
// omit nodes that are more latent than anything we've already considered
if ((closestIndex != -1) &&
(_getPing(itMemberConfig.getHostAndPort())
- > _getPing(_currentConfig.getMemberAt(closestIndex).getHostAndPort()))) {
+ > _getPing(_rsConfig.getMemberAt(closestIndex).getHostAndPort()))) {
continue;
}
@@ -303,7 +303,7 @@ namespace {
_syncSource = HostAndPort();
return _syncSource;
}
- _syncSource = _currentConfig.getMemberAt(closestIndex).getHostAndPort();
+ _syncSource = _rsConfig.getMemberAt(closestIndex).getHostAndPort();
std::string msg(str::stream() << "syncing from: " << _syncSource.toString(), 0);
log() << msg << rsLog;
setMyHeartbeatMessage(now, msg);
@@ -361,17 +361,17 @@ namespace {
return;
}
- ReplicaSetConfig::MemberIterator targetConfig = _currentConfig.membersEnd();
+ ReplicaSetConfig::MemberIterator targetConfig = _rsConfig.membersEnd();
int targetIndex = 0;
- for (ReplicaSetConfig::MemberIterator it = _currentConfig.membersBegin();
- it != _currentConfig.membersEnd(); ++it) {
+ for (ReplicaSetConfig::MemberIterator it = _rsConfig.membersBegin();
+ it != _rsConfig.membersEnd(); ++it) {
if (it->getHostAndPort() == target) {
targetConfig = it;
break;
}
++targetIndex;
}
- if (targetConfig == _currentConfig.membersEnd()) {
+ if (targetConfig == _rsConfig.membersEnd()) {
*result = Status(ErrorCodes::NodeNotFound,
str::stream() << "Could not find member \"" << target.toString() <<
"\" in replica set");
@@ -439,10 +439,10 @@ namespace {
return;
}
- if (args.setName != _currentConfig.getReplSetName()) {
+ if (args.setName != _rsConfig.getReplSetName()) {
*result = Status(ErrorCodes::ReplicaSetNotFound,
str::stream() << "Wrong repl set name. Expected: " <<
- _currentConfig.getReplSetName() <<
+ _rsConfig.getReplSetName() <<
", received: " << args.setName);
return;
}
@@ -455,7 +455,7 @@ namespace {
}
bool weAreFresher = false;
- if( _currentConfig.getConfigVersion() > args.cfgver ) {
+ if( _rsConfig.getConfigVersion() > args.cfgver ) {
log() << "replSet member " << args.who << " is not yet aware its cfg version "
<< args.cfgver << " is stale";
response->append("info", "config version stale");
@@ -483,7 +483,7 @@ namespace {
const OpTime& lastOpApplied,
std::string* errmsg) const {
- if (_currentConfig.getConfigVersion() < args.cfgver) {
+ if (_rsConfig.getConfigVersion() < args.cfgver) {
// We are stale; do not veto.
return false;
}
@@ -502,7 +502,7 @@ namespace {
// hbinfo is not updated for ourself, so if we are primary we have to check the
// primary's last optime separately
*errmsg = str::stream() << "I am already primary, " <<
- _currentConfig.getMemberAt(hopefulIndex).getHostAndPort().toString() <<
+ _rsConfig.getMemberAt(hopefulIndex).getHostAndPort().toString() <<
" can try again once I've stepped down";
return true;
}
@@ -513,16 +513,16 @@ namespace {
_hbdata[hopefulIndex].getOpTime())) {
// other members might be aware of more up-to-date nodes
*errmsg = str::stream() <<
- _currentConfig.getMemberAt(hopefulIndex).getHostAndPort().toString() <<
+ _rsConfig.getMemberAt(hopefulIndex).getHostAndPort().toString() <<
" is trying to elect itself but " <<
- _currentConfig.getMemberAt(_currentPrimaryIndex).getHostAndPort().toString() <<
+ _rsConfig.getMemberAt(_currentPrimaryIndex).getHostAndPort().toString() <<
" is already primary and more up-to-date";
return true;
}
if ((highestPriorityIndex != -1)) {
- const MemberConfig& hopefulMember = _currentConfig.getMemberAt(hopefulIndex);
- const MemberConfig& priorityMember = _currentConfig.getMemberAt(highestPriorityIndex);
+ const MemberConfig& hopefulMember = _rsConfig.getMemberAt(hopefulIndex);
+ const MemberConfig& priorityMember = _rsConfig.getMemberAt(highestPriorityIndex);
if (priorityMember.getPriority() > hopefulMember.getPriority()) {
*errmsg = str::stream()
@@ -539,7 +539,7 @@ namespace {
if (reason) {
*errmsg = str::stream()
<< "I don't think "
- << _currentConfig.getMemberAt(hopefulIndex).getHostAndPort().toString()
+ << _rsConfig.getMemberAt(hopefulIndex).getHostAndPort().toString()
<< " is electable because the " << _getUnelectableReasonString(reason);
return true;
}
@@ -561,19 +561,19 @@ namespace {
return;
}
- const long long myver = _currentConfig.getConfigVersion();
+ const long long myver = _rsConfig.getConfigVersion();
const int highestPriorityIndex = _getHighestPriorityElectableIndex(now, lastOpApplied);
const MemberConfig* primary = _currentPrimaryMember();
- const MemberConfig* hopeful = _currentConfig.findMemberByID(args.whoid);
+ const MemberConfig* hopeful = _rsConfig.findMemberByID(args.whoid);
const MemberConfig* highestPriority = highestPriorityIndex == -1 ? NULL :
- &_currentConfig.getMemberAt(highestPriorityIndex);
+ &_rsConfig.getMemberAt(highestPriorityIndex);
int vote = 0;
- if (args.set != _currentConfig.getReplSetName()) {
+ if (args.set != _rsConfig.getReplSetName()) {
log() << "replSet error received an elect request for '" << args.set
<< "' but our set name is '" <<
- _currentConfig.getReplSetName() << "'";
+ _rsConfig.getReplSetName() << "'";
}
else if ( myver < args.cfgver ) {
// we are stale. don't vote
@@ -663,7 +663,7 @@ namespace {
}
}
else {
- invariant(_currentConfig.getReplSetName() == args.getSetName());
+ invariant(_rsConfig.getReplSetName() == args.getSetName());
if (args.getSenderId() == _selfConfig().getId()) {
return Status(ErrorCodes::BadValue,
str::stream() << "Received heartbeat from member with the same "
@@ -675,7 +675,7 @@ namespace {
response->noteReplSet();
// For 2.6 compatibility
- if (_currentConfig.isInitialized()) {
+ if (_rsConfig.isInitialized()) {
response->setSetName(ourSetName);
}
response->setState(myState.s);
@@ -695,16 +695,16 @@ namespace {
response->setSyncingTo(_syncSource.toString());
}
- if (!_currentConfig.isInitialized()) {
+ if (!_rsConfig.isInitialized()) {
response->setVersion(-2);
return Status::OK();
}
- const long long v = _currentConfig.getConfigVersion();
+ const long long v = _rsConfig.getConfigVersion();
response->setVersion(v);
// Deliver new config if caller's version is older than ours
if (v > args.getConfigVersion()) {
- response->setConfig(_currentConfig);
+ response->setConfig(_rsConfig);
}
// Resolve the caller's id in our Member list
@@ -731,8 +731,8 @@ namespace {
int TopologyCoordinatorImpl::_getMemberIndex(int id) const {
int index = 0;
- for (ReplicaSetConfig::MemberIterator it = _currentConfig.membersBegin();
- it != _currentConfig.membersEnd();
+ for (ReplicaSetConfig::MemberIterator it = _rsConfig.membersBegin();
+ it != _rsConfig.membersEnd();
++it, ++index) {
if (it->getId() == id) {
return index;
@@ -748,9 +748,9 @@ namespace {
PingStats& hbStats = _pings[target];
Milliseconds alreadyElapsed(now.asInt64() - hbStats.getLastHeartbeatStartDate().asInt64());
- if (!_currentConfig.isInitialized() ||
+ if (!_rsConfig.isInitialized() ||
(hbStats.getNumFailuresSinceLastStart() > kMaxHeartbeatRetries) ||
- (alreadyElapsed >= _currentConfig.getHeartbeatTimeoutPeriodMillis())) {
+ (alreadyElapsed >= _rsConfig.getHeartbeatTimeoutPeriodMillis())) {
// This is either the first request ever for "target", or the heartbeat timeout has
// passed, so we're starting a "new" heartbeat.
@@ -760,9 +760,9 @@ namespace {
ReplSetHeartbeatArgs hbArgs;
hbArgs.setProtocolVersion(1);
hbArgs.setCheckEmpty(false);
- if (_currentConfig.isInitialized()) {
- hbArgs.setSetName(_currentConfig.getReplSetName());
- hbArgs.setConfigVersion(_currentConfig.getConfigVersion());
+ if (_rsConfig.isInitialized()) {
+ hbArgs.setSetName(_rsConfig.getReplSetName());
+ hbArgs.setConfigVersion(_rsConfig.getConfigVersion());
if (_selfIndex >= 0) {
const MemberConfig& me = _selfConfig();
hbArgs.setSenderHost(me.getHostAndPort());
@@ -775,8 +775,8 @@ namespace {
}
const Milliseconds timeoutPeriod(
- _currentConfig.isInitialized() ?
- _currentConfig.getHeartbeatTimeoutPeriodMillis() :
+ _rsConfig.isInitialized() ?
+ _rsConfig.getHeartbeatTimeoutPeriodMillis() :
Milliseconds(
ReplicaSetConfig::kDefaultHeartbeatTimeoutPeriod.total_milliseconds()));
const Milliseconds timeout(
@@ -811,9 +811,9 @@ namespace {
Milliseconds alreadyElapsed(now.asInt64() - hbStats.getLastHeartbeatStartDate().asInt64());
Date_t nextHeartbeatStartDate;
- if (_currentConfig.isInitialized() &&
+ if (_rsConfig.isInitialized() &&
(hbStats.getNumFailuresSinceLastStart() <= kMaxHeartbeatRetries) &&
- (alreadyElapsed < _currentConfig.getHeartbeatTimeoutPeriodMillis())) {
+ (alreadyElapsed < _rsConfig.getHeartbeatTimeoutPeriodMillis())) {
if (!hbResponse.isOK() && !isUnauthorized) {
LOG(1) << "Bad heartbeat response from " << target <<
@@ -833,7 +833,7 @@ namespace {
if (hbResponse.isOK() && hbResponse.getValue().hasConfig()) {
const long long currentConfigVersion =
- _currentConfig.isInitialized() ? _currentConfig.getConfigVersion() : -2;
+ _rsConfig.isInitialized() ? _rsConfig.getConfigVersion() : -2;
const ReplicaSetConfig& newConfig = hbResponse.getValue().getConfig();
if (newConfig.getConfigVersion() > currentConfigVersion) {
HeartbeatResponseAction nextAction = HeartbeatResponseAction::makeReconfigAction();
@@ -853,8 +853,8 @@ namespace {
MongoLogDefaultComponent_component,
::mongo::LogstreamBuilder::severityCast(2))) {
LogstreamBuilder lsb = log();
- if (_currentConfig.isInitialized()) {
- lsb << "Current config: " << _currentConfig.toBSON() << "; ";
+ if (_rsConfig.isInitialized()) {
+ lsb << "Current config: " << _rsConfig.toBSON() << "; ";
}
lsb << "Config in heartbeat: " << newConfig.toBSON();
}
@@ -863,15 +863,15 @@ namespace {
// Check if the heartbeat target is in our config. If it isn't, there's nothing left to do,
// so return early.
- if (!_currentConfig.isInitialized()) {
+ if (!_rsConfig.isInitialized()) {
HeartbeatResponseAction nextAction = HeartbeatResponseAction::makeNoAction();
nextAction.setNextHeartbeatStartDate(nextHeartbeatStartDate);
return nextAction;
}
- const int memberIndex = _currentConfig.findMemberIndexByHostAndPort(target);
+ const int memberIndex = _rsConfig.findMemberIndexByHostAndPort(target);
if (memberIndex == -1) {
LOG(1) << "replset: Could not find " << target << " in current config so ignoring --"
- " current config: " << _currentConfig.toBSON();
+ " current config: " << _rsConfig.toBSON();
HeartbeatResponseAction nextAction = HeartbeatResponseAction::makeNoAction();
nextAction.setNextHeartbeatStartDate(nextHeartbeatStartDate);
return nextAction;
@@ -879,7 +879,7 @@ namespace {
invariant(memberIndex != _selfIndex);
MemberHeartbeatData& hbData = _hbdata[memberIndex];
- const MemberConfig member = _currentConfig.getMemberAt(memberIndex);
+ const MemberConfig member = _rsConfig.getMemberAt(memberIndex);
if (!hbResponse.isOK()) {
if (isUnauthorized) {
LOG(3) << "setAuthIssue: heartbeat response failed due to authentication"
@@ -951,9 +951,9 @@ namespace {
const int highestPriorityIndex = _getHighestPriorityElectableIndex(now, lastOpApplied);
if (highestPriorityIndex != -1) {
const MemberConfig& currentPrimaryMember =
- _currentConfig.getMemberAt(_currentPrimaryIndex);
+ _rsConfig.getMemberAt(_currentPrimaryIndex);
const MemberConfig& highestPriorityMember =
- _currentConfig.getMemberAt(highestPriorityIndex);
+ _rsConfig.getMemberAt(highestPriorityIndex);
const OpTime highestPriorityMemberOptime = highestPriorityIndex == _selfIndex ?
lastOpApplied : _hbdata[highestPriorityIndex].getOpTime();
@@ -1133,11 +1133,11 @@ namespace {
++it) {
const int itIndex = indexOfIterator(_hbdata, it);
if (itIndex == _selfIndex || it->up()) {
- vUp += _currentConfig.getMemberAt(itIndex).getNumVotes();
+ vUp += _rsConfig.getMemberAt(itIndex).getNumVotes();
}
}
- return vUp * 2 > _currentConfig.getTotalVotingMembers();
+ return vUp * 2 > _rsConfig.getTotalVotingMembers();
}
bool TopologyCoordinatorImpl::_isOpTimeCloseEnoughToLatestToElect(
@@ -1186,14 +1186,14 @@ namespace {
if (memberTwoIndex == -1)
return true;
- return _currentConfig.getMemberAt(memberOneIndex).getPriority() >
- _currentConfig.getMemberAt(memberTwoIndex).getPriority();
+ return _rsConfig.getMemberAt(memberOneIndex).getPriority() >
+ _rsConfig.getMemberAt(memberTwoIndex).getPriority();
}
int TopologyCoordinatorImpl::_getHighestPriorityElectableIndex(
Date_t now, OpTime lastOpApplied) const {
int maxIndex = -1;
- for (int currentIndex = 0; currentIndex < _currentConfig.getNumMembers(); currentIndex++) {
+ for (int currentIndex = 0; currentIndex < _rsConfig.getNumMembers(); currentIndex++) {
UnelectableReasonMask reason = currentIndex == _selfIndex ?
_getMyUnelectableReason(now, lastOpApplied) :
_getUnelectableReason(currentIndex, lastOpApplied);
@@ -1263,7 +1263,7 @@ namespace {
hbResponse.setHbMsg("");
_hbdata[primaryIndex].setUpValues(
_hbdata[primaryIndex].getLastHeartbeat(),
- _currentConfig.getMemberAt(primaryIndex).getHostAndPort(),
+ _rsConfig.getMemberAt(primaryIndex).getHostAndPort(),
hbResponse);
}
_currentPrimaryIndex = primaryIndex;
@@ -1274,7 +1274,7 @@ namespace {
if (_currentPrimaryIndex == -1)
return NULL;
- return &(_currentConfig.getMemberAt(_currentPrimaryIndex));
+ return &(_rsConfig.getMemberAt(_currentPrimaryIndex));
}
void TopologyCoordinatorImpl::prepareStatusResponse(
@@ -1345,13 +1345,13 @@ namespace {
bb.append("electionTime", _electionTime);
bb.appendDate("electionDate", Date_t(_electionTime.getSecs() * 1000ULL));
}
- bb.appendIntOrLL("configVersion", _currentConfig.getConfigVersion());
+ bb.appendIntOrLL("configVersion", _rsConfig.getConfigVersion());
bb.append("self", true);
membersOut.push_back(bb.obj());
}
else {
// add non-self member
- const MemberConfig& itConfig = _currentConfig.getMemberAt(itIndex);
+ const MemberConfig& itConfig = _rsConfig.getMemberAt(itIndex);
BSONObjBuilder bb;
bb.append("_id", itConfig.getId());
bb.append("name", itConfig.getHostAndPort().toString());
@@ -1406,7 +1406,7 @@ namespace {
sort(membersOut.begin(), membersOut.end());
response->append("set",
- _currentConfig.isInitialized() ? _currentConfig.getReplSetName() : "");
+ _rsConfig.isInitialized() ? _rsConfig.getReplSetName() : "");
response->append("date", now);
response->append("myState", myState.s);
@@ -1422,19 +1422,19 @@ namespace {
void TopologyCoordinatorImpl::fillIsMasterForReplSet(IsMasterResponse* response) {
const MemberState myState = getMemberState();
- if (!_currentConfig.isInitialized() || myState.removed()) {
+ if (!_rsConfig.isInitialized() || myState.removed()) {
response->markAsNoConfig();
return;
}
- response->setReplSetName(_currentConfig.getReplSetName());
- response->setReplSetVersion(_currentConfig.getConfigVersion());
+ response->setReplSetName(_rsConfig.getReplSetName());
+ response->setReplSetVersion(_rsConfig.getConfigVersion());
response->setIsMaster(myState.primary());
response->setIsSecondary(myState.secondary());
{
- for (ReplicaSetConfig::MemberIterator it = _currentConfig.membersBegin();
- it != _currentConfig.membersEnd(); ++it) {
+ for (ReplicaSetConfig::MemberIterator it = _rsConfig.membersBegin();
+ it != _rsConfig.membersEnd(); ++it) {
if (it->isHidden() || it->getSlaveDelay().total_seconds() > 0) {
continue;
}
@@ -1456,7 +1456,7 @@ namespace {
response->setPrimary(curPrimary->getHostAndPort());
}
- const MemberConfig& selfConfig = _currentConfig.getMemberAt(_selfIndex);
+ const MemberConfig& selfConfig = _rsConfig.getMemberAt(_selfIndex);
if (selfConfig.isArbiter()) {
response->setIsArbiterOnly(true);
}
@@ -1472,7 +1472,7 @@ namespace {
if (!selfConfig.shouldBuildIndexes()) {
response->setShouldBuildIndexes(false);
}
- const ReplicaSetTagConfig tagConfig = _currentConfig.getTagConfig();
+ const ReplicaSetTagConfig tagConfig = _rsConfig.getTagConfig();
if (selfConfig.hasTags(tagConfig)) {
for (MemberConfig::TagIterator tag = selfConfig.tagsBegin();
tag != selfConfig.tagsEnd(); ++tag) {
@@ -1499,9 +1499,9 @@ namespace {
response->append("info", "unfreezing");
if (_followerMode == MemberState::RS_SECONDARY &&
- _currentConfig.getNumMembers() == 1 &&
+ _rsConfig.getNumMembers() == 1 &&
_selfIndex == 0 &&
- _currentConfig.getMemberAt(_selfIndex).isElectable()) {
+ _rsConfig.getMemberAt(_selfIndex).isElectable()) {
// If we are a one-node replica set, we're the one member,
// we're electable, and we are currently in followerMode SECONDARY,
// we must transition to candidate now that our stepdown period
@@ -1529,9 +1529,9 @@ namespace {
}
if (_followerMode == MemberState::RS_SECONDARY &&
- _currentConfig.getNumMembers() == 1 &&
+ _rsConfig.getNumMembers() == 1 &&
_selfIndex == 0 &&
- _currentConfig.getMemberAt(_selfIndex).isElectable()) {
+ _rsConfig.getMemberAt(_selfIndex).isElectable()) {
// If the new config describes a one-node replica set, we're the one member,
// we're electable, and we are currently in followerMode SECONDARY,
// we must transition to candidate, in leiu of heartbeats.
@@ -1581,8 +1581,8 @@ namespace {
}
else {
MemberHeartbeatData newHeartbeatData;
- for (int oldIndex = 0; oldIndex < _currentConfig.getNumMembers(); ++oldIndex) {
- const MemberConfig& oldMemberConfig = _currentConfig.getMemberAt(oldIndex);
+ for (int oldIndex = 0; oldIndex < _rsConfig.getNumMembers(); ++oldIndex) {
+ const MemberConfig& oldMemberConfig = _rsConfig.getMemberAt(oldIndex);
if (oldMemberConfig.getId() == newMemberConfig.getId() &&
oldMemberConfig.getHostAndPort() == newMemberConfig.getHostAndPort()) {
// This member existed in the old config with the same member ID and
@@ -1606,7 +1606,7 @@ namespace {
invariant(selfIndex < newConfig.getNumMembers());
_updateHeartbeatDataForReconfig(newConfig, selfIndex, now);
- _currentConfig = newConfig;
+ _rsConfig = newConfig;
_selfIndex = selfIndex;
_forceSyncSourceIndex = -1;
@@ -1630,9 +1630,9 @@ namespace {
_stepDownPending = false;
if (_followerMode == MemberState::RS_SECONDARY &&
- _currentConfig.getNumMembers() == 1 &&
+ _rsConfig.getNumMembers() == 1 &&
_selfIndex == 0 &&
- _currentConfig.getMemberAt(_selfIndex).isElectable()) {
+ _rsConfig.getMemberAt(_selfIndex).isElectable()) {
// If the new config describes a one-node replica set, we're the one member,
// we're electable, and we are currently in followerMode SECONDARY,
// we must transition to candidate, in leiu of heartbeats.
@@ -1654,14 +1654,14 @@ namespace {
}
const MemberConfig& TopologyCoordinatorImpl::_selfConfig() const {
- return _currentConfig.getMemberAt(_selfIndex);
+ return _rsConfig.getMemberAt(_selfIndex);
}
TopologyCoordinatorImpl::UnelectableReasonMask TopologyCoordinatorImpl::_getUnelectableReason(
int index,
const OpTime& lastOpApplied) const {
invariant(index != _selfIndex);
- const MemberConfig& memberConfig = _currentConfig.getMemberAt(index);
+ const MemberConfig& memberConfig = _rsConfig.getMemberAt(index);
const MemberHeartbeatData& hbData = _hbdata[index];
UnelectableReasonMask result = None;
if (memberConfig.isArbiter()) {
@@ -1708,7 +1708,7 @@ namespace {
result |= StepDownPeriodActive;
}
if (_lastVote.whoId != -1 &&
- _lastVote.whoId !=_currentConfig.getMemberAt(_selfIndex).getId() &&
+ _lastVote.whoId !=_rsConfig.getMemberAt(_selfIndex).getId() &&
_lastVote.when.millis + LastVote::leaseTime.total_milliseconds() >= now.millis) {
result |= VotedTooRecently;
}
@@ -1836,7 +1836,7 @@ namespace {
continue; // skip DOWN nodes
}
- upHosts.push_back(_currentConfig.getMemberAt(itIndex).getHostAndPort());
+ upHosts.push_back(_rsConfig.getMemberAt(itIndex).getHostAndPort());
}
return upHosts;
}
@@ -1861,7 +1861,7 @@ namespace {
MemberState TopologyCoordinatorImpl::getMemberState() const {
if (_selfIndex == -1) {
- if (_currentConfig.isInitialized()) {
+ if (_rsConfig.isInitialized()) {
return MemberState::RS_REMOVED;
}
return MemberState::RS_STARTUP;
@@ -1907,7 +1907,7 @@ namespace {
bool TopologyCoordinatorImpl::stepDown(Date_t until, bool force, OpTime lastOpApplied) {
bool canStepDown = force;
- for (int i = 0; !canStepDown && i < _currentConfig.getNumMembers(); ++i) {
+ for (int i = 0; !canStepDown && i < _rsConfig.getNumMembers(); ++i) {
if (i == _selfIndex) {
continue;
}
@@ -1946,9 +1946,9 @@ namespace {
// be a candidate here. This is necessary because a single node replica set has no
// heartbeats that would normally change the role to candidate.
- if (_currentConfig.getNumMembers() == 1 &&
+ if (_rsConfig.getNumMembers() == 1 &&
_selfIndex == 0 &&
- _currentConfig.getMemberAt(_selfIndex).isElectable()) {
+ _rsConfig.getMemberAt(_selfIndex).isElectable()) {
_role = Role::candidate;
}
}
@@ -2013,7 +2013,7 @@ namespace {
return true;
}
- const int currentMemberIndex = _currentConfig.findMemberIndexByHostAndPort(currentSource);
+ const int currentMemberIndex = _rsConfig.findMemberIndexByHostAndPort(currentSource);
if (currentMemberIndex == -1) {
return true;
}
@@ -2032,7 +2032,7 @@ namespace {
it != _hbdata.end();
++it) {
const int itIndex = indexOfIterator(_hbdata, it);
- const MemberConfig& candidateConfig = _currentConfig.getMemberAt(itIndex);
+ const MemberConfig& candidateConfig = _rsConfig.getMemberAt(itIndex);
if (it->up() &&
(candidateConfig.shouldBuildIndexes() || !_selfConfig().shouldBuildIndexes()) &&
it->getState().readable() &&
diff --git a/src/mongo/db/repl/topology_coordinator_impl.h b/src/mongo/db/repl/topology_coordinator_impl.h
index c7dab508ea7..3e6c4dc824a 100644
--- a/src/mongo/db/repl/topology_coordinator_impl.h
+++ b/src/mongo/db/repl/topology_coordinator_impl.h
@@ -348,7 +348,8 @@ namespace repl {
int _selfIndex; // this node's index in _members and _currentConfig
- ReplicaSetConfig _currentConfig; // The current config, including a vector of MemberConfigs
+ ReplicaSetConfig _rsConfig; // The current config, including a vector of MemberConfigs
+
// heartbeat data for each member. It is guaranteed that this vector will be maintained
// in the same order as the MemberConfigs in _currentConfig, therefore the member config
// index can be used to index into this vector as well.
diff --git a/src/mongo/db/ttl.cpp b/src/mongo/db/ttl.cpp
index a2b5a1fdca3..d3da1f13137 100644
--- a/src/mongo/db/ttl.cpp
+++ b/src/mongo/db/ttl.cpp
@@ -100,7 +100,7 @@ namespace mongo {
// if part of replSet but not in a readable state (e.g. during initial sync), skip.
if (repl::getGlobalReplicationCoordinator()->getReplicationMode() ==
repl::ReplicationCoordinator::modeReplSet &&
- !repl::getGlobalReplicationCoordinator()->getCurrentMemberState().readable())
+ !repl::getGlobalReplicationCoordinator()->getMemberState().readable())
continue;
set<string> dbs;