summaryrefslogtreecommitdiff
path: root/src/mongo/db/repl/replication_coordinator_impl.cpp
diff options
context:
space:
mode:
authorEric Milkie <milkie@10gen.com>2015-04-06 09:10:57 -0400
committerEric Milkie <milkie@10gen.com>2015-04-07 16:25:34 -0400
commit0f58d1037bcbfbf932e73e623772c4f815c361ad (patch)
treeae7ea2cf29830f0dc39e326142ddc1b6bc468804 /src/mongo/db/repl/replication_coordinator_impl.cpp
parent72543912dca4117e1deb45a56c599657a1bf747c (diff)
downloadmongo-0f58d1037bcbfbf932e73e623772c4f815c361ad.tar.gz
SERVER-17880 Rename OpTime to Timestamp
Diffstat (limited to 'src/mongo/db/repl/replication_coordinator_impl.cpp')
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp60
1 files changed, 30 insertions, 30 deletions
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index 3ee5b1227ea..4f9e9514509 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -37,7 +37,7 @@
#include "mongo/base/status.h"
#include "mongo/db/concurrency/d_concurrency.h"
-#include "mongo/db/global_optime.h"
+#include "mongo/db/global_timestamp.h"
#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/operation_context_noop.h"
#include "mongo/db/repl/check_quorum_for_config_change.h"
@@ -109,7 +109,7 @@ namespace {
*/
WaiterInfo(std::vector<WaiterInfo*>* _list,
unsigned int _opID,
- const OpTime* _opTime,
+ const Timestamp* _opTime,
const WriteConcernOptions* _writeConcern,
boost::condition_variable* _condVar) : list(_list),
master(true),
@@ -127,7 +127,7 @@ namespace {
std::vector<WaiterInfo*>* list;
bool master; // Set to false to indicate that stepDown was called while waiting
const unsigned int opID;
- const OpTime* opTime;
+ const Timestamp* opTime;
const WriteConcernOptions* writeConcern;
boost::condition_variable* condVar;
};
@@ -213,7 +213,7 @@ namespace {
fassertFailedNoTrace(28545);
}
- StatusWith<OpTime> lastOpTimeStatus = _externalState->loadLastOpTime(txn);
+ StatusWith<Timestamp> lastOpTimeStatus = _externalState->loadLastOpTime(txn);
// Use a callback here, because _finishLoadLocalConfig calls isself() which requires
// that the server's networking layer be up and running and accepting connections, which
@@ -230,7 +230,7 @@ namespace {
void ReplicationCoordinatorImpl::_finishLoadLocalConfig(
const ReplicationExecutor::CallbackData& cbData,
const ReplicaSetConfig& localConfig,
- const StatusWith<OpTime>& lastOpTimeStatus) {
+ const StatusWith<Timestamp>& lastOpTimeStatus) {
if (!cbData.status.isOK()) {
LOG(1) << "Loading local replica set configuration failed due to " << cbData.status;
return;
@@ -266,7 +266,7 @@ namespace {
// Do not check optime, if this node is an arbiter.
bool isArbiter = myIndex.getValue() != -1 &&
localConfig.getMemberAt(myIndex.getValue()).isArbiter();
- OpTime lastOpTime(0, 0);
+ Timestamp lastOpTime(0, 0);
if (!isArbiter) {
if (!lastOpTimeStatus.isOK()) {
warning() << "Failed to load timestamp of most recently applied operation; " <<
@@ -282,7 +282,7 @@ namespace {
const PostMemberStateUpdateAction action =
_setCurrentRSConfig_inlock(localConfig, myIndex.getValue());
_setMyLastOptime_inlock(&lk, lastOpTime, false);
- _externalState->setGlobalOpTime(lastOpTime);
+ _externalState->setGlobalTimestamp(lastOpTime);
if (lk.owns_lock()) {
lk.unlock();
}
@@ -575,7 +575,7 @@ namespace {
}
void ReplicationCoordinatorImpl::_updateSlaveInfoOptime_inlock(SlaveInfo* slaveInfo,
- OpTime ts) {
+ Timestamp ts) {
slaveInfo->opTime = ts;
@@ -646,7 +646,7 @@ namespace {
}
Status ReplicationCoordinatorImpl::setLastOptimeForSlave(const OID& rid,
- const OpTime& ts) {
+ const Timestamp& ts) {
boost::unique_lock<boost::mutex> lock(_mutex);
massert(28576,
"Received an old style replication progress update, which is only used for Master/"
@@ -682,18 +682,18 @@ namespace {
_replExecutor.wait(cbh.getValue());
}
- void ReplicationCoordinatorImpl::setMyLastOptime(const OpTime& ts) {
+ void ReplicationCoordinatorImpl::setMyLastOptime(const Timestamp& ts) {
boost::unique_lock<boost::mutex> lock(_mutex);
_setMyLastOptime_inlock(&lock, ts, false);
}
void ReplicationCoordinatorImpl::resetMyLastOptime() {
boost::unique_lock<boost::mutex> lock(_mutex);
- _setMyLastOptime_inlock(&lock, OpTime(), true);
+ _setMyLastOptime_inlock(&lock, Timestamp(), true);
}
void ReplicationCoordinatorImpl::_setMyLastOptime_inlock(
- boost::unique_lock<boost::mutex>* lock, const OpTime& ts, bool isRollbackAllowed) {
+ boost::unique_lock<boost::mutex>* lock, const Timestamp& ts, bool isRollbackAllowed) {
invariant(lock->owns_lock());
SlaveInfo* mySlaveInfo = &_slaveInfo[_getMyIndexInSlaveInfo_inlock()];
invariant(isRollbackAllowed || mySlaveInfo->opTime <= ts);
@@ -709,18 +709,18 @@ namespace {
_externalState->forwardSlaveProgress(); // Must do this outside _mutex
}
- OpTime ReplicationCoordinatorImpl::getMyLastOptime() const {
+ Timestamp ReplicationCoordinatorImpl::getMyLastOptime() const {
boost::lock_guard<boost::mutex> lock(_mutex);
return _getMyLastOptime_inlock();
}
- OpTime ReplicationCoordinatorImpl::_getMyLastOptime_inlock() const {
+ Timestamp ReplicationCoordinatorImpl::_getMyLastOptime_inlock() const {
return _slaveInfo[_getMyIndexInSlaveInfo_inlock()].opTime;
}
Status ReplicationCoordinatorImpl::setLastOptime_forTest(long long cfgVer,
long long memberId,
- const OpTime& ts) {
+ const Timestamp& ts) {
boost::lock_guard<boost::mutex> lock(_mutex);
invariant(_getReplicationMode_inlock() == modeReplSet);
@@ -824,7 +824,7 @@ namespace {
}
bool ReplicationCoordinatorImpl::_doneWaitingForReplication_inlock(
- const OpTime& opTime, const WriteConcernOptions& writeConcern) {
+ const Timestamp& opTime, const WriteConcernOptions& writeConcern) {
Status status = _checkIfWriteConcernCanBeSatisfied_inlock(writeConcern);
if (!status.isOK()) {
return true;
@@ -850,7 +850,7 @@ namespace {
}
}
- bool ReplicationCoordinatorImpl::_haveNumNodesReachedOpTime_inlock(const OpTime& opTime,
+ bool ReplicationCoordinatorImpl::_haveNumNodesReachedOpTime_inlock(const Timestamp& opTime,
int numNodes) {
if (_getMyLastOptime_inlock() < opTime) {
// Secondaries that are for some reason ahead of us should not allow us to
@@ -861,7 +861,7 @@ namespace {
for (SlaveInfoVector::iterator it = _slaveInfo.begin();
it != _slaveInfo.end(); ++it) {
- const OpTime& slaveTime = it->opTime;
+ const Timestamp& slaveTime = it->opTime;
if (slaveTime >= opTime) {
--numNodes;
}
@@ -874,13 +874,13 @@ namespace {
}
bool ReplicationCoordinatorImpl::_haveTaggedNodesReachedOpTime_inlock(
- const OpTime& opTime, const ReplicaSetTagPattern& tagPattern) {
+ const Timestamp& opTime, const ReplicaSetTagPattern& tagPattern) {
ReplicaSetTagMatch matcher(tagPattern);
for (SlaveInfoVector::iterator it = _slaveInfo.begin();
it != _slaveInfo.end(); ++it) {
- const OpTime& slaveTime = it->opTime;
+ const Timestamp& slaveTime = it->opTime;
if (slaveTime >= opTime) {
// This node has reached the desired optime, now we need to check if it is a part
// of the tagPattern.
@@ -899,7 +899,7 @@ namespace {
ReplicationCoordinator::StatusAndDuration ReplicationCoordinatorImpl::awaitReplication(
const OperationContext* txn,
- const OpTime& opTime,
+ const Timestamp& opTime,
const WriteConcernOptions& writeConcern) {
Timer timer;
boost::unique_lock<boost::mutex> lock(_mutex);
@@ -924,7 +924,7 @@ namespace {
const Timer* timer,
boost::unique_lock<boost::mutex>* lock,
const OperationContext* txn,
- const OpTime& opTime,
+ const Timestamp& opTime,
const WriteConcernOptions& writeConcern) {
const Mode replMode = _getReplicationMode_inlock();
@@ -1951,7 +1951,7 @@ namespace {
case kActionWinElection: {
boost::unique_lock<boost::mutex> lk(_mutex);
_electionId = OID::gen();
- _topCoord->processWinElection(_electionId, getNextGlobalOptime());
+ _topCoord->processWinElection(_electionId, getNextGlobalTimestamp());
_isWaitingForDrainToComplete = true;
const PostMemberStateUpdateAction nextAction =
_updateMemberStateFromTopologyCoordinator_inlock();
@@ -2049,7 +2049,7 @@ namespace {
invariant(_settings.usingReplSets());
_cancelHeartbeats();
_setConfigState_inlock(kConfigSteady);
- OpTime myOptime = _getMyLastOptime_inlock(); // Must get this before changing our config.
+ Timestamp myOptime = _getMyLastOptime_inlock(); // Must get this before changing our config.
_topCoord->updateConfig(
newConfig,
myIndex,
@@ -2146,7 +2146,7 @@ namespace {
return self.shouldBuildIndexes();
}
- std::vector<HostAndPort> ReplicationCoordinatorImpl::getHostsWrittenTo(const OpTime& op) {
+ std::vector<HostAndPort> ReplicationCoordinatorImpl::getHostsWrittenTo(const Timestamp& op) {
std::vector<HostAndPort> hosts;
boost::lock_guard<boost::mutex> lk(_mutex);
for (size_t i = 0; i < _slaveInfo.size(); ++i) {
@@ -2305,8 +2305,8 @@ namespace {
}
void ReplicationCoordinatorImpl::resetLastOpTimeFromOplog(OperationContext* txn) {
- StatusWith<OpTime> lastOpTimeStatus = _externalState->loadLastOpTime(txn);
- OpTime lastOpTime(0, 0);
+ StatusWith<Timestamp> lastOpTimeStatus = _externalState->loadLastOpTime(txn);
+ Timestamp lastOpTime(0, 0);
if (!lastOpTimeStatus.isOK()) {
warning() << "Failed to load timestamp of most recently applied operation; " <<
lastOpTimeStatus.getStatus();
@@ -2316,7 +2316,7 @@ namespace {
}
boost::unique_lock<boost::mutex> lk(_mutex);
_setMyLastOptime_inlock(&lk, lastOpTime, true);
- _externalState->setGlobalOpTime(lastOpTime);
+ _externalState->setGlobalTimestamp(lastOpTime);
}
void ReplicationCoordinatorImpl::_shouldChangeSyncSource(
@@ -2355,7 +2355,7 @@ namespace {
invariant(tagPattern.isOK());
ReplicaSetTagMatch matcher{tagPattern.getValue()};
- std::vector<OpTime> votingNodesOpTimes;
+ std::vector<Timestamp> votingNodesOpTimes;
for (const auto& sI : _slaveInfo) {
auto memberConfig = _rsConfig.findMemberByID(sI.memberId);
@@ -2375,7 +2375,7 @@ namespace {
_lastCommittedOpTime = votingNodesOpTimes[(votingNodesOpTimes.size() - 1) / 2];
}
- OpTime ReplicationCoordinatorImpl::getLastCommittedOpTime() const {
+ Timestamp ReplicationCoordinatorImpl::getLastCommittedOpTime() const {
boost::unique_lock<boost::mutex> lk(_mutex);
return _lastCommittedOpTime;
}