summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
authorAndy Schwerin <schwerin@mongodb.com>2015-05-12 10:01:26 -0400
committerAndy Schwerin <schwerin@mongodb.com>2015-05-12 10:01:26 -0400
commit3681d3dbca85b25735fa0ec676828d1da191732d (patch)
tree5106b1e29186e92f31736278f79d39b4e1947b93 /src/mongo/db
parent9aac625685811873ffbc2d3e8d09531eff1ce10e (diff)
downloadmongo-3681d3dbca85b25735fa0ec676828d1da191732d.tar.gz
Revert "SERVER-13874 Make mongo::Milliseconds et al. aliases for equivalent stdx::chrono types."
This reverts commit 9aac625685811873ffbc2d3e8d09531eff1ce10e. Committed in error.
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/auth/user_cache_invalidator_job.cpp11
-rw-r--r--src/mongo/db/concurrency/lock_state.cpp2
-rw-r--r--src/mongo/db/db.cpp2
-rw-r--r--src/mongo/db/dbcommands.cpp4
-rw-r--r--src/mongo/db/dbhelpers.cpp6
-rw-r--r--src/mongo/db/pipeline/expression.cpp4
-rw-r--r--src/mongo/db/pipeline/value.cpp7
-rw-r--r--src/mongo/db/pipeline/value.h6
-rw-r--r--src/mongo/db/range_deleter.cpp40
-rw-r--r--src/mongo/db/repl/base_cloner_test_fixture.cpp4
-rw-r--r--src/mongo/db/repl/check_quorum_for_config_change_test.cpp62
-rw-r--r--src/mongo/db/repl/data_replicator.cpp4
-rw-r--r--src/mongo/db/repl/data_replicator_test.cpp2
-rw-r--r--src/mongo/db/repl/elect_cmd_runner_test.cpp6
-rw-r--r--src/mongo/db/repl/fetcher_test.cpp2
-rw-r--r--src/mongo/db/repl/freshness_checker.cpp2
-rw-r--r--src/mongo/db/repl/freshness_checker_test.cpp80
-rw-r--r--src/mongo/db/repl/heartbeat_response_action.cpp6
-rw-r--r--src/mongo/db/repl/is_master_response.cpp2
-rw-r--r--src/mongo/db/repl/master_slave.cpp4
-rw-r--r--src/mongo/db/repl/member_config.cpp4
-rw-r--r--src/mongo/db/repl/member_config_test.cpp4
-rw-r--r--src/mongo/db/repl/member_heartbeat_data.cpp9
-rw-r--r--src/mongo/db/repl/network_interface_impl.cpp10
-rw-r--r--src/mongo/db/repl/oplog.cpp2
-rw-r--r--src/mongo/db/repl/oplogreader.cpp10
-rw-r--r--src/mongo/db/repl/read_after_optime_args_test.cpp8
-rw-r--r--src/mongo/db/repl/read_after_optime_response.cpp11
-rw-r--r--src/mongo/db/repl/read_after_optime_response.h10
-rw-r--r--src/mongo/db/repl/read_after_optime_response_test.cpp4
-rw-r--r--src/mongo/db/repl/repl_set_heartbeat_response.cpp7
-rw-r--r--src/mongo/db/repl/repl_set_heartbeat_response_test.cpp18
-rw-r--r--src/mongo/db/repl/repl_set_html_summary.cpp7
-rw-r--r--src/mongo/db/repl/replica_set_config.cpp4
-rw-r--r--src/mongo/db/repl/replica_set_config.h2
-rw-r--r--src/mongo/db/repl/replica_set_config_test.cpp6
-rw-r--r--src/mongo/db/repl/replication_coordinator.h4
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp41
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect.cpp6
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect_test.cpp2
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_heartbeat_test.cpp12
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp4
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_test.cpp32
-rw-r--r--src/mongo/db/repl/replication_coordinator_test_fixture.cpp2
-rw-r--r--src/mongo/db/repl/replication_executor.cpp7
-rw-r--r--src/mongo/db/repl/replication_executor.h2
-rw-r--r--src/mongo/db/repl/replication_executor_test.cpp18
-rw-r--r--src/mongo/db/repl/replication_info.cpp5
-rw-r--r--src/mongo/db/repl/replset_commands.cpp4
-rw-r--r--src/mongo/db/repl/reporter_test.cpp2
-rw-r--r--src/mongo/db/repl/rs_initialsync.cpp2
-rw-r--r--src/mongo/db/repl/scatter_gather_test.cpp33
-rw-r--r--src/mongo/db/repl/sync_source_feedback.cpp6
-rw-r--r--src/mongo/db/repl/sync_tail.cpp6
-rw-r--r--src/mongo/db/repl/topology_coordinator_impl.cpp87
-rw-r--r--src/mongo/db/repl/topology_coordinator_impl.h3
-rw-r--r--src/mongo/db/repl/topology_coordinator_impl_test.cpp124
-rw-r--r--src/mongo/db/stats/range_deleter_server_status.cpp6
-rw-r--r--src/mongo/db/storage/key_string.cpp4
-rw-r--r--src/mongo/db/storage/mmap_v1/data_file_sync.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/dur.cpp3
-rw-r--r--src/mongo/db/ttl.cpp7
-rw-r--r--src/mongo/db/write_concern.cpp4
63 files changed, 407 insertions, 393 deletions
diff --git a/src/mongo/db/auth/user_cache_invalidator_job.cpp b/src/mongo/db/auth/user_cache_invalidator_job.cpp
index f6a17cba04a..7669d0201bc 100644
--- a/src/mongo/db/auth/user_cache_invalidator_job.cpp
+++ b/src/mongo/db/auth/user_cache_invalidator_job.cpp
@@ -138,11 +138,14 @@ namespace {
while (true) {
boost::unique_lock<boost::mutex> lock(invalidationIntervalMutex);
- Date_t sleepUntil = lastInvalidationTime + Seconds(userCacheInvalidationIntervalSecs);
+ Date_t sleepUntil = Date_t(
+ lastInvalidationTime.millis + userCacheInvalidationIntervalSecs * 1000);
Date_t now(curTimeMillis64());
- while (now < sleepUntil) {
- invalidationIntervalChangedCondition.wait_for(lock, sleepUntil - now);
- sleepUntil = lastInvalidationTime + Seconds(userCacheInvalidationIntervalSecs);
+ while (now.millis < sleepUntil.millis) {
+ invalidationIntervalChangedCondition.timed_wait(lock,
+ Milliseconds(sleepUntil - now));
+ sleepUntil = Date_t(
+ lastInvalidationTime.millis + (userCacheInvalidationIntervalSecs * 1000));
now = Date_t(curTimeMillis64());
}
lastInvalidationTime = now;
diff --git a/src/mongo/db/concurrency/lock_state.cpp b/src/mongo/db/concurrency/lock_state.cpp
index b2047138669..d5d5654fbc5 100644
--- a/src/mongo/db/concurrency/lock_state.cpp
+++ b/src/mongo/db/concurrency/lock_state.cpp
@@ -229,7 +229,7 @@ namespace {
LockResult CondVarLockGrantNotification::wait(unsigned timeoutMs) {
boost::unique_lock<boost::mutex> lock(_mutex);
while (_result == LOCK_INVALID) {
- if (boost::cv_status::timeout == _cond.wait_for(lock, Milliseconds(timeoutMs))) {
+ if (!_cond.timed_wait(lock, Milliseconds(timeoutMs))) {
// Timeout
return LOCK_TIMEOUT;
}
diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp
index fa360169fa0..1b4963fa4b0 100644
--- a/src/mongo/db/db.cpp
+++ b/src/mongo/db/db.cpp
@@ -205,7 +205,7 @@ namespace mongo {
toLog.append( "hostname", getHostNameCached() );
toLog.appendTimeT( "startTime", time(0) );
- toLog.append( "startTimeLocal", dateToCtimeString(Date_t::now()) );
+ toLog.append( "startTimeLocal", dateToCtimeString(curTimeMillis64()) );
toLog.append("cmdLine", serverGlobalParams.parsedOpts);
toLog.append( "pid", ProcessId::getCurrent().asLongLong() );
diff --git a/src/mongo/db/dbcommands.cpp b/src/mongo/db/dbcommands.cpp
index c59661c2e4d..89159347ed0 100644
--- a/src/mongo/db/dbcommands.cpp
+++ b/src/mongo/db/dbcommands.cpp
@@ -136,8 +136,8 @@ namespace mongo {
Status status = repl::getGlobalReplicationCoordinator()->stepDown(
txn,
force,
- Seconds(timeoutSecs),
- Seconds(120));
+ repl::ReplicationCoordinator::Milliseconds(timeoutSecs * 1000),
+ repl::ReplicationCoordinator::Milliseconds(120 * 1000));
if (!status.isOK() && status.code() != ErrorCodes::NotMaster) { // ignore not master
return appendCommandStatus(result, status);
}
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index 24be8b1f4f3..1d7cae4632c 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -355,7 +355,7 @@ namespace mongo {
long long numDeleted = 0;
- Milliseconds millisWaitingForReplication{0};
+ long long millisWaitingForReplication = 0;
while ( 1 ) {
// Scoping for write lock.
@@ -468,14 +468,14 @@ namespace mongo {
else {
massertStatusOK(replStatus.status);
}
- millisWaitingForReplication += replStatus.duration;
+ millisWaitingForReplication += replStatus.duration.total_milliseconds();
}
}
if (writeConcern.shouldWaitForOtherNodes())
log(LogComponent::kSharding)
<< "Helpers::removeRangeUnlocked time spent waiting for replication: "
- << durationCount<Milliseconds>(millisWaitingForReplication) << "ms" << endl;
+ << millisWaitingForReplication << "ms" << endl;
MONGO_LOG_COMPONENT(1, LogComponent::kSharding)
<< "end removal of " << min << " to " << max << " in " << ns
diff --git a/src/mongo/db/pipeline/expression.cpp b/src/mongo/db/pipeline/expression.cpp
index e8c121c2b04..defc38d8c12 100644
--- a/src/mongo/db/pipeline/expression.cpp
+++ b/src/mongo/db/pipeline/expression.cpp
@@ -422,7 +422,7 @@ namespace {
if (haveDate) {
if (totalType == NumberDouble)
longTotal = static_cast<long long>(doubleTotal);
- return Value(Date_t::fromMillisSinceEpoch(longTotal));
+ return Value(Date_t(longTotal));
}
else if (totalType == NumberLong) {
return Value(longTotal);
@@ -2548,7 +2548,7 @@ namespace {
}
else if (rhs.numeric()) {
long long millisSinceEpoch = lhs.getDate() - rhs.coerceToLong();
- return Value(Date_t::fromMillisSinceEpoch(millisSinceEpoch));
+ return Value(Date_t(millisSinceEpoch));
}
else {
uasserted(16613, str::stream() << "cant $subtract a "
diff --git a/src/mongo/db/pipeline/value.cpp b/src/mongo/db/pipeline/value.cpp
index c949601ac4d..b156d290613 100644
--- a/src/mongo/db/pipeline/value.cpp
+++ b/src/mongo/db/pipeline/value.cpp
@@ -186,7 +186,8 @@ namespace mongo {
break;
case Date:
- _storage.dateValue = elem.date().toMillisSinceEpoch();
+ // this is really signed but typed as unsigned for historical reasons
+ _storage.dateValue = static_cast<long long>(elem.date().millis);
break;
case RegEx: {
@@ -287,7 +288,7 @@ namespace mongo {
case NumberDouble: return builder << val.getDouble();
case String: return builder << val.getStringData();
case Bool: return builder << val.getBool();
- case Date: return builder << Date_t::fromMillisSinceEpoch(val.getDate());
+ case Date: return builder << Date_t(val.getDate());
case bsonTimestamp: return builder << val.getTimestamp();
case Object: return builder << val.getDocument();
case Symbol: return builder << BSONSymbol(val.getStringData());
@@ -1017,7 +1018,7 @@ namespace mongo {
case NumberLong: return Value(buf.read<long long>());
case NumberDouble: return Value(buf.read<double>());
case Bool: return Value(bool(buf.read<char>()));
- case Date: return Value(Date_t::fromMillisSinceEpoch(buf.read<long long>()));
+ case Date: return Value(Date_t(buf.read<long long>()));
case bsonTimestamp: return Value(buf.read<Timestamp>());
// types that are like strings
diff --git a/src/mongo/db/pipeline/value.h b/src/mongo/db/pipeline/value.h
index ecc66719d11..5324cc1b432 100644
--- a/src/mongo/db/pipeline/value.h
+++ b/src/mongo/db/pipeline/value.h
@@ -91,7 +91,9 @@ namespace mongo {
explicit Value(const UndefinedLabeler&) : _storage(Undefined) {} // BSONUndefined
explicit Value(const MinKeyLabeler&) : _storage(MinKey) {} // MINKEY
explicit Value(const MaxKeyLabeler&) : _storage(MaxKey) {} // MAXKEY
- explicit Value(const Date_t& date) : _storage(Date, date.toMillisSinceEpoch()) {}
+ explicit Value(const Date_t& date)
+ : _storage(Date, static_cast<long long>(date.millis)) // millis really signed
+ {}
// TODO: add an unsafe version that can share storage with the BSONElement
/// Deep-convert from BSONElement to Value
@@ -315,7 +317,7 @@ namespace mongo {
inline Timestamp Value::getTimestamp() const {
verify(getType() == bsonTimestamp);
- return Timestamp(_storage.timestampValue);
+ return Date_t(_storage.timestampValue);
}
inline const char* Value::getRegex() const {
diff --git a/src/mongo/db/range_deleter.cpp b/src/mongo/db/range_deleter.cpp
index caa2c63804c..bde2e3ad36c 100644
--- a/src/mongo/db/range_deleter.cpp
+++ b/src/mongo/db/range_deleter.cpp
@@ -83,28 +83,28 @@ namespace mongo {
// We always log the first cursors waiting message (so we have cursor ids in the logs).
// After 15 minutes (the cursor timeout period), we start logging additional messages at
// a 1 minute interval.
- static const auto kLogCursorsThreshold = stdx::chrono::minutes{15};
- static const auto kLogCursorsInterval = stdx::chrono::minutes{1};
+ static const long long kLogCursorsThresholdMillis = 15 * 60 * 1000;
+ static const long long kLogCursorsIntervalMillis = 1 * 60 * 1000;
Date_t currentTime = jsTime();
- Milliseconds elapsedMillisSinceQueued{0};
+ long long elapsedMillisSinceQueued = 0;
// We always log the first message when lastLoggedTime == 0
- if (entry->lastLoggedTS != Date_t()) {
+ if (entry->lastLoggedTS != 0) {
if (currentTime > entry->stats.queueStartTS)
elapsedMillisSinceQueued = currentTime - entry->stats.queueStartTS;
// Not logging, threshold not passed
- if (elapsedMillisSinceQueued < kLogCursorsThreshold)
+ if (elapsedMillisSinceQueued < kLogCursorsThresholdMillis)
return;
- Milliseconds elapsedMillisSinceLog{0};
+ long long elapsedMillisSinceLog = 0;
if (currentTime > entry->lastLoggedTS)
elapsedMillisSinceLog = currentTime - entry->lastLoggedTS;
// Not logging, logged a short time ago
- if (elapsedMillisSinceLog < kLogCursorsInterval)
+ if (elapsedMillisSinceLog < kLogCursorsIntervalMillis)
return;
}
@@ -119,10 +119,9 @@ namespace mongo {
log() << "waiting for open cursors before removing range "
<< "[" << entry->options.range.minKey << ", " << entry->options.range.maxKey << ") "
<< "in " << entry->options.range.ns
- << (entry->lastLoggedTS == Date_t() ?
+ << (entry->lastLoggedTS == 0 ?
string("") :
- string(str::stream() << ", elapsed secs: " <<
- durationCount<Seconds>(elapsedMillisSinceQueued)))
+ string(str::stream() << ", elapsed secs: " << elapsedMillisSinceQueued / 1000))
<< ", cursor ids: [" << string(cursorList) << "]";
entry->lastLoggedTS = currentTime;
@@ -277,23 +276,21 @@ namespace {
repl::ReplicationCoordinator::StatusAndDuration replStatus =
repl::getGlobalReplicationCoordinator()->awaitReplicationOfLastOpForClient(
txn, writeConcern);
- Milliseconds elapsedTime = replStatus.duration;
+ repl::ReplicationCoordinator::Milliseconds elapsedTime = replStatus.duration;
if (replStatus.status.code() == ErrorCodes::ExceededTimeLimit) {
*errMsg = str::stream() << "rangeDeleter timed out after "
- << durationCount<Seconds>(elapsedTime)
- << " seconds while waiting"
- " for deletions to be replicated to majority nodes";
+ << elapsedTime.total_seconds() << " seconds while waiting"
+ << " for deletions to be replicated to majority nodes";
log() << *errMsg;
}
else if (replStatus.status.code() == ErrorCodes::NotMaster) {
*errMsg = str::stream() << "rangeDeleter no longer PRIMARY after "
- << durationCount<Seconds>(elapsedTime)
- << " seconds while waiting"
- " for deletions to be replicated to majority nodes";
+ << elapsedTime.total_seconds() << " seconds while waiting"
+ << " for deletions to be replicated to majority nodes";
}
else {
- LOG(elapsedTime < Seconds(30) ? 1 : 0)
- << "rangeDeleter took " << durationCount<Seconds>(elapsedTime) << " seconds "
+ LOG(elapsedTime.total_seconds() < 30 ? 1 : 0)
+ << "rangeDeleter took " << elapsedTime.total_seconds() << " seconds "
<< " waiting for deletes to be replicated to majority nodes";
fassert(18512, replStatus.status);
@@ -604,7 +601,10 @@ namespace {
}
RangeDeleteEntry::RangeDeleteEntry(const RangeDeleterOptions& options)
- : options(options), notifyDone(NULL) {}
+ : options(options),
+ notifyDone(NULL),
+ lastLoggedTS(0) {
+ }
BSONObj RangeDeleteEntry::toBSON() const {
BSONObjBuilder builder;
diff --git a/src/mongo/db/repl/base_cloner_test_fixture.cpp b/src/mongo/db/repl/base_cloner_test_fixture.cpp
index 2abe7d75a01..45d1ca8b29d 100644
--- a/src/mongo/db/repl/base_cloner_test_fixture.cpp
+++ b/src/mongo/db/repl/base_cloner_test_fixture.cpp
@@ -131,7 +131,7 @@ namespace repl {
boost::unique_lock<boost::mutex> lk(_mutex);
if (_status == getDefaultStatus()) {
try {
- _setStatusCondition.wait_for(lk, Milliseconds(1000));
+ _setStatusCondition.timed_wait(lk, ReplicationExecutor::Milliseconds(1000));
}
catch (const boost::thread_interrupted&) {
}
@@ -142,7 +142,7 @@ namespace repl {
void BaseClonerTest::scheduleNetworkResponse(NetworkOperationIterator noi,
const BSONObj& obj) {
auto net = getNet();
- Milliseconds millis(0);
+ ReplicationExecutor::Milliseconds millis(0);
RemoteCommandResponse response(obj, millis);
ReplicationExecutor::ResponseStatus responseStatus(response);
net->scheduleResponse(noi, net->now(), responseStatus);
diff --git a/src/mongo/db/repl/check_quorum_for_config_change_test.cpp b/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
index dbb5fb562f1..677141c727e 100644
--- a/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
+++ b/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
@@ -190,12 +190,12 @@ namespace {
const int numCommandsExpected = config.getNumMembers() - 1;
for (int i = 0; i < numCommandsExpected; ++i) {
_net->scheduleResponse(_net->getNextReadyRequest(),
- startDate + Milliseconds(10),
+ startDate + 10,
ResponseStatus(ErrorCodes::NoSuchKey, "No reply"));
}
- _net->runUntil(startDate + Milliseconds(10));
+ _net->runUntil(startDate + 10);
_net->exitNetwork();
- ASSERT_EQUALS(startDate + Milliseconds(10), _net->now());
+ ASSERT_EQUALS(startDate + 10, _net->now());
Status status = waitForQuorumCheck();
ASSERT_EQUALS(ErrorCodes::NodeNotFound, status);
ASSERT_REASON_CONTAINS(
@@ -248,11 +248,11 @@ namespace {
ASSERT(seenHosts.insert(request.target).second) <<
"Already saw " << request.target.toString();
_net->scheduleResponse(noi,
- startDate + Milliseconds(10),
+ startDate + 10,
ResponseStatus(RemoteCommandResponse(
BSON("ok" << 1), Milliseconds(8))));
}
- _net->runUntil(startDate + Milliseconds(10));
+ _net->runUntil(startDate + 10);
_net->exitNetwork();
ASSERT_OK(waitForQuorumCheck());
}
@@ -291,17 +291,17 @@ namespace {
"Already saw " << request.target.toString();
if (request.target == HostAndPort("h2", 1)) {
_net->scheduleResponse(noi,
- startDate + Milliseconds(10),
+ startDate + 10,
ResponseStatus(ErrorCodes::NoSuchKey, "No response"));
}
else {
_net->scheduleResponse(noi,
- startDate + Milliseconds(10),
+ startDate + 10,
ResponseStatus(RemoteCommandResponse(
BSON("ok" << 1), Milliseconds(8))));
}
}
- _net->runUntil(startDate + Milliseconds(10));
+ _net->runUntil(startDate + 10);
_net->exitNetwork();
Status status = waitForQuorumCheck();
ASSERT_EQUALS(ErrorCodes::NodeNotFound, status);
@@ -346,19 +346,19 @@ namespace {
"Already saw " << request.target.toString();
if (request.target == HostAndPort("h4", 1)) {
_net->scheduleResponse(noi,
- startDate + Milliseconds(10),
+ startDate + 10,
ResponseStatus(RemoteCommandResponse(
BSON("ok" << 0 << "mismatch" << true),
Milliseconds(8))));
}
else {
_net->scheduleResponse(noi,
- startDate + Milliseconds(10),
+ startDate + 10,
ResponseStatus(RemoteCommandResponse(
BSON("ok" << 1), Milliseconds(8))));
}
}
- _net->runUntil(startDate + Milliseconds(10));
+ _net->runUntil(startDate + 10);
_net->exitNetwork();
Status status = waitForQuorumCheck();
ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible, status);
@@ -402,7 +402,7 @@ namespace {
"Already saw " << request.target.toString();
if (request.target == HostAndPort("h5", 1)) {
_net->scheduleResponse(noi,
- startDate + Milliseconds(10),
+ startDate + 10,
ResponseStatus(RemoteCommandResponse(
BSON("ok" << 0 <<
"set" << "rs0" <<
@@ -411,12 +411,12 @@ namespace {
}
else {
_net->scheduleResponse(noi,
- startDate + Milliseconds(10),
+ startDate + 10,
ResponseStatus(RemoteCommandResponse(
BSON("ok" << 1), Milliseconds(8))));
}
}
- _net->runUntil(startDate + Milliseconds(10));
+ _net->runUntil(startDate + 10);
_net->exitNetwork();
Status status = waitForQuorumCheck();
ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible, status);
@@ -463,7 +463,7 @@ namespace {
"Already saw " << request.target.toString();
if (request.target == HostAndPort("h5", 1)) {
_net->scheduleResponse(noi,
- startDate + Milliseconds(10),
+ startDate + 10,
ResponseStatus(RemoteCommandResponse(
BSON("ok" << 0 <<
"set" << "rs0" <<
@@ -474,7 +474,7 @@ namespace {
_net->blackHole(noi);
}
}
- _net->runUntil(startDate + Milliseconds(10));
+ _net->runUntil(startDate + 10);
_net->exitNetwork();
Status status = waitForQuorumCheck();
ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible, status);
@@ -522,7 +522,7 @@ namespace {
hbResp.noteHasData();
if (request.target == HostAndPort("h5", 1)) {
_net->scheduleResponse(noi,
- startDate + Milliseconds(10),
+ startDate + 10,
ResponseStatus(RemoteCommandResponse(
hbResp.toBSON(),
Milliseconds(8))));
@@ -531,7 +531,7 @@ namespace {
_net->blackHole(noi);
}
}
- _net->runUntil(startDate + Milliseconds(10));
+ _net->runUntil(startDate + 10);
_net->exitNetwork();
Status status = waitForQuorumCheck();
ASSERT_EQUALS(ErrorCodes::CannotInitializeNodeWithData, status);
@@ -571,7 +571,7 @@ namespace {
"Already saw " << request.target.toString();
if (request.target == HostAndPort("h1", 1)) {
_net->scheduleResponse(noi,
- startDate + Milliseconds(10),
+ startDate + 10,
ResponseStatus(RemoteCommandResponse(
BSON("ok" << 0 <<
"set" << "rs0" <<
@@ -582,7 +582,7 @@ namespace {
_net->blackHole(noi);
}
}
- _net->runUntil(startDate + Milliseconds(10));
+ _net->runUntil(startDate + 10);
_net->exitNetwork();
Status status = waitForQuorumCheck();
ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible, status);
@@ -623,18 +623,18 @@ namespace {
"Already saw " << request.target.toString();
if (request.target == HostAndPort("h2", 1)) {
_net->scheduleResponse(noi,
- startDate + Milliseconds(10),
+ startDate + 10,
ResponseStatus(RemoteCommandResponse(
BSON("ok" << 0 << "mismatch" << true),
Milliseconds(8))));
}
else {
_net->scheduleResponse(noi,
- startDate + Milliseconds(10),
+ startDate + 10,
ResponseStatus(ErrorCodes::NoSuchKey, "No response"));
}
}
- _net->runUntil(startDate + Milliseconds(10));
+ _net->runUntil(startDate + 10);
_net->exitNetwork();
Status status = waitForQuorumCheck();
ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible, status);
@@ -677,18 +677,18 @@ namespace {
"Already saw " << request.target.toString();
if (request.target == HostAndPort("h1", 1) || request.target == HostAndPort("h5", 1)) {
_net->scheduleResponse(noi,
- startDate + Milliseconds(10),
+ startDate + 10,
ResponseStatus(RemoteCommandResponse(
BSON("ok" << 1),
Milliseconds(8))));
}
else {
_net->scheduleResponse(noi,
- startDate + Milliseconds(10),
+ startDate + 10,
ResponseStatus(ErrorCodes::NoSuchKey, "No response"));
}
}
- _net->runUntil(startDate + Milliseconds(10));
+ _net->runUntil(startDate + 10);
_net->exitNetwork();
Status status = waitForQuorumCheck();
ASSERT_EQUALS(ErrorCodes::NodeNotFound, status);
@@ -730,18 +730,18 @@ namespace {
"Already saw " << request.target.toString();
if (request.target == HostAndPort("h5", 1)) {
_net->scheduleResponse(noi,
- startDate + Milliseconds(10),
+ startDate + 10,
ResponseStatus(RemoteCommandResponse(
BSON("ok" << 1),
Milliseconds(8))));
}
else {
_net->scheduleResponse(noi,
- startDate + Milliseconds(10),
+ startDate + 10,
ResponseStatus(ErrorCodes::NoSuchKey, "No response"));
}
}
- _net->runUntil(startDate + Milliseconds(10));
+ _net->runUntil(startDate + 10);
_net->exitNetwork();
Status status = waitForQuorumCheck();
ASSERT_EQUALS(ErrorCodes::NodeNotFound, status);
@@ -779,7 +779,7 @@ namespace {
"Already saw " << request.target.toString();
if (request.target == HostAndPort("h1", 1) || request.target == HostAndPort("h2", 1)) {
_net->scheduleResponse(noi,
- startDate + Milliseconds(10),
+ startDate + 10,
ResponseStatus(RemoteCommandResponse(
BSON("ok" << 1),
Milliseconds(8))));
@@ -788,7 +788,7 @@ namespace {
_net->blackHole(noi);
}
}
- _net->runUntil(startDate + Milliseconds(10));
+ _net->runUntil(startDate + 10);
_net->exitNetwork();
ASSERT_OK(waitForQuorumCheck());
}
diff --git a/src/mongo/db/repl/data_replicator.cpp b/src/mongo/db/repl/data_replicator.cpp
index 288f218c19f..4d1964c59ae 100644
--- a/src/mongo/db/repl/data_replicator.cpp
+++ b/src/mongo/db/repl/data_replicator.cpp
@@ -61,7 +61,7 @@ namespace repl {
MONGO_FP_DECLARE(failInitialSyncWithBadHost);
namespace {
- const Milliseconds NoSyncSourceRetryDelayMS{4000};
+ int NoSyncSourceRetryDelayMS = 4000;
std::string toString(DataReplicatiorState s) {
switch (s) {
@@ -499,7 +499,7 @@ namespace repl {
if (status.code() == ErrorCodes::InvalidSyncSource) {
// Error, sync source
- Date_t until{};
+ Date_t until = 0;
_replCoord->blacklistSyncSource(_syncSource, until);
_syncSource = HostAndPort();
}
diff --git a/src/mongo/db/repl/data_replicator_test.cpp b/src/mongo/db/repl/data_replicator_test.cpp
index 8fc727c7b38..db8ed712737 100644
--- a/src/mongo/db/repl/data_replicator_test.cpp
+++ b/src/mongo/db/repl/data_replicator_test.cpp
@@ -84,7 +84,7 @@ namespace {
void scheduleNetworkResponse(const BSONObj& obj) {
NetworkInterfaceMock* net = getNet();
ASSERT_TRUE(net->hasReadyRequests());
- Milliseconds millis(0);
+ ReplicationExecutor::Milliseconds millis(0);
RemoteCommandResponse response(obj, millis);
ReplicationExecutor::ResponseStatus responseStatus(response);
net->scheduleResponse(net->getNextReadyRequest(), net->now(), responseStatus);
diff --git a/src/mongo/db/repl/elect_cmd_runner_test.cpp b/src/mongo/db/repl/elect_cmd_runner_test.cpp
index f4d4c913bf1..ef9751dc084 100644
--- a/src/mongo/db/repl/elect_cmd_runner_test.cpp
+++ b/src/mongo/db/repl/elect_cmd_runner_test.cpp
@@ -199,15 +199,15 @@ namespace {
ASSERT_EQUALS(stripRound(electRequest), stripRound(noi->getRequest().cmdObj));
ASSERT_EQUALS(HostAndPort("h1"), noi->getRequest().target);
_net->scheduleResponse(noi,
- startDate + Milliseconds(10),
+ startDate + 10,
ResponseStatus(RemoteCommandResponse(
BSON("ok" << 1 <<
"vote" << 1 <<
"round" << 380865962699346850ll),
Milliseconds(8))));
- _net->runUntil(startDate + Milliseconds(10));
+ _net->runUntil(startDate + 10);
_net->exitNetwork();
- ASSERT_EQUALS(startDate + Milliseconds(10), _net->now());
+ ASSERT_EQUALS(startDate + 10, _net->now());
waitForTest();
ASSERT_EQUALS(electCmdRunner.getReceivedVotes(), 2);
}
diff --git a/src/mongo/db/repl/fetcher_test.cpp b/src/mongo/db/repl/fetcher_test.cpp
index c78ade64872..427f888ff50 100644
--- a/src/mongo/db/repl/fetcher_test.cpp
+++ b/src/mongo/db/repl/fetcher_test.cpp
@@ -109,7 +109,7 @@ namespace {
void FetcherTest::scheduleNetworkResponse(const BSONObj& obj) {
NetworkInterfaceMock* net = getNet();
ASSERT_TRUE(net->hasReadyRequests());
- Milliseconds millis(0);
+ ReplicationExecutor::Milliseconds millis(0);
RemoteCommandResponse response(obj, millis);
ReplicationExecutor::ResponseStatus responseStatus(response);
net->scheduleResponse(net->getNextReadyRequest(), net->now(), responseStatus);
diff --git a/src/mongo/db/repl/freshness_checker.cpp b/src/mongo/db/repl/freshness_checker.cpp
index 17a501f2ce4..2c5ef4872b8 100644
--- a/src/mongo/db/repl/freshness_checker.cpp
+++ b/src/mongo/db/repl/freshness_checker.cpp
@@ -88,7 +88,7 @@ namespace repl {
BSONObjBuilder freshCmdBuilder;
freshCmdBuilder.append("replSetFresh", 1);
freshCmdBuilder.append("set", _rsConfig.getReplSetName());
- freshCmdBuilder.append("opTime", Date_t::fromMillisSinceEpoch(_lastOpTimeApplied.asLL()));
+ freshCmdBuilder.append("opTime", Date_t(_lastOpTimeApplied.asULL()));
freshCmdBuilder.append("who", selfConfig.getHostAndPort().toString());
freshCmdBuilder.appendIntOrLL("cfgver", _rsConfig.getConfigVersion());
freshCmdBuilder.append("id", selfConfig.getId());
diff --git a/src/mongo/db/repl/freshness_checker_test.cpp b/src/mongo/db/repl/freshness_checker_test.cpp
index 6dbf3e666bc..08766cfd7ca 100644
--- a/src/mongo/db/repl/freshness_checker_test.cpp
+++ b/src/mongo/db/repl/freshness_checker_test.cpp
@@ -186,7 +186,7 @@ namespace {
ASSERT_EQUALS(HostAndPort("h1"), noi->getRequest().target);
_net->scheduleResponse(
noi,
- startDate + Milliseconds(10),
+ startDate + 10,
ResponseStatus(RemoteCommandResponse(
BSON("ok" << 1 <<
"id" << 2 <<
@@ -196,9 +196,9 @@ namespace {
"opTime" << Date_t(Timestamp(0,0).asULL())),
Milliseconds(8))));
}
- _net->runUntil(startDate + Milliseconds(10));
+ _net->runUntil(startDate + 10);
_net->exitNetwork();
- ASSERT_EQUALS(startDate + Milliseconds(10), _net->now());
+ ASSERT_EQUALS(startDate + 10, _net->now());
waitOnChecker();
ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FreshnessTie);
}
@@ -254,7 +254,7 @@ namespace {
ASSERT_EQUALS(HostAndPort("h1"), noi->getRequest().target);
_net->scheduleResponse(
noi,
- startDate + Milliseconds(10),
+ startDate + 10,
ResponseStatus(RemoteCommandResponse(
BSON("ok" << 1 <<
"id" << 2 <<
@@ -265,9 +265,9 @@ namespace {
"opTime" << Date_t(Timestamp(0,0).asULL())),
Milliseconds(8))));
}
- _net->runUntil(startDate + Milliseconds(10));
+ _net->runUntil(startDate + 10);
_net->exitNetwork();
- ASSERT_EQUALS(startDate + Milliseconds(10), _net->now());
+ ASSERT_EQUALS(startDate + 10, _net->now());
waitOnChecker();
stopCapturingLogMessages();
@@ -300,7 +300,7 @@ namespace {
ASSERT_EQUALS(HostAndPort("h1"), noi->getRequest().target);
_net->scheduleResponse(
noi,
- startDate + Milliseconds(10),
+ startDate + 10,
ResponseStatus(RemoteCommandResponse(
BSON("ok" << 1 <<
"id" << 2 <<
@@ -310,9 +310,9 @@ namespace {
"opTime" << Date_t(Timestamp(10,0).asULL())),
Milliseconds(8))));
}
- _net->runUntil(startDate + Milliseconds(10));
+ _net->runUntil(startDate + 10);
_net->exitNetwork();
- ASSERT_EQUALS(startDate + Milliseconds(10), _net->now());
+ ASSERT_EQUALS(startDate + 10, _net->now());
waitOnChecker();
stopCapturingLogMessages();
@@ -344,7 +344,7 @@ namespace {
ASSERT_EQUALS(HostAndPort("h1"), noi->getRequest().target);
_net->scheduleResponse(
noi,
- startDate + Milliseconds(10),
+ startDate + 10,
ResponseStatus(RemoteCommandResponse(
BSON("ok" << 1 <<
"id" << 2 <<
@@ -354,9 +354,9 @@ namespace {
"opTime" << 3),
Milliseconds(8))));
}
- _net->runUntil(startDate + Milliseconds(10));
+ _net->runUntil(startDate + 10);
_net->exitNetwork();
- ASSERT_EQUALS(startDate + Milliseconds(10), _net->now());
+ ASSERT_EQUALS(startDate + 10, _net->now());
waitOnChecker();
stopCapturingLogMessages();
@@ -391,7 +391,7 @@ namespace {
ASSERT_EQUALS(HostAndPort("h1"), noi->getRequest().target);
_net->scheduleResponse(
noi,
- startDate + Milliseconds(10),
+ startDate + 10,
ResponseStatus(RemoteCommandResponse(
BSON("ok" << 1 <<
"id" << 2 <<
@@ -403,9 +403,9 @@ namespace {
"opTime" << Date_t(Timestamp(0,0).asULL())),
Milliseconds(8))));
}
- _net->runUntil(startDate + Milliseconds(10));
+ _net->runUntil(startDate + 10);
_net->exitNetwork();
- ASSERT_EQUALS(startDate + Milliseconds(10), _net->now());
+ ASSERT_EQUALS(startDate + 10, _net->now());
waitOnChecker();
stopCapturingLogMessages();
@@ -466,14 +466,14 @@ namespace {
}
_net->scheduleResponse(
noi,
- startDate + Milliseconds(10),
+ startDate + 10,
ResponseStatus(RemoteCommandResponse(
responseBuilder.obj(),
Milliseconds(8))));
}
- _net->runUntil(startDate + Milliseconds(10));
+ _net->runUntil(startDate + 10);
_net->exitNetwork();
- ASSERT_EQUALS(startDate + Milliseconds(10), _net->now());
+ ASSERT_EQUALS(startDate + 10, _net->now());
waitOnChecker();
stopCapturingLogMessages();
ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
@@ -527,7 +527,7 @@ namespace {
"opTime" << Date_t(Timestamp(20,0).asULL());
_net->scheduleResponse(
noi,
- startDate + Milliseconds(20),
+ startDate + 20,
ResponseStatus(RemoteCommandResponse(
responseBuilder.obj(),
Milliseconds(8))));
@@ -542,17 +542,17 @@ namespace {
"opTime" << Date_t(Timestamp(10,0).asULL());
_net->scheduleResponse(
noi,
- startDate + Milliseconds(10),
+ startDate + 10,
ResponseStatus(RemoteCommandResponse(
responseBuilder.obj(),
Milliseconds(8))));
}
}
- _net->runUntil(startDate + Milliseconds(10));
- ASSERT_EQUALS(startDate + Milliseconds(10), _net->now());
+ _net->runUntil(startDate + 10);
+ ASSERT_EQUALS(startDate + 10, _net->now());
ASSERT_EQUALS(0, countLogLinesContaining("not electing self, we are not freshest"));
- _net->runUntil(startDate + Milliseconds(20));
- ASSERT_EQUALS(startDate + Milliseconds(20), _net->now());
+ _net->runUntil(startDate + 20);
+ ASSERT_EQUALS(startDate + 20, _net->now());
_net->exitNetwork();
waitOnChecker();
stopCapturingLogMessages();
@@ -606,14 +606,14 @@ namespace {
}
_net->scheduleResponse(
noi,
- startDate + Milliseconds(10),
+ startDate + 10,
ResponseStatus(RemoteCommandResponse(
responseBuilder.obj(),
Milliseconds(8))));
}
- _net->runUntil(startDate + Milliseconds(10));
+ _net->runUntil(startDate + 10);
_net->exitNetwork();
- ASSERT_EQUALS(startDate + Milliseconds(10), _net->now());
+ ASSERT_EQUALS(startDate + 10, _net->now());
waitOnChecker();
stopCapturingLogMessages();
ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
@@ -666,14 +666,14 @@ namespace {
}
_net->scheduleResponse(
noi,
- startDate + Milliseconds(10),
+ startDate + 10,
ResponseStatus(RemoteCommandResponse(
responseBuilder.obj(),
Milliseconds(8))));
}
- _net->runUntil(startDate + Milliseconds(10));
+ _net->runUntil(startDate + 10);
_net->exitNetwork();
- ASSERT_EQUALS(startDate + Milliseconds(10), _net->now());
+ ASSERT_EQUALS(startDate + 10, _net->now());
waitOnChecker();
stopCapturingLogMessages();
ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
@@ -730,7 +730,7 @@ namespace {
"opTime" << Date_t(Timestamp(10,0).asULL());
_net->scheduleResponse(
noi,
- startDate + Milliseconds(20),
+ startDate + 20,
ResponseStatus(RemoteCommandResponse(
responseBuilder.obj(),
Milliseconds(8))));
@@ -745,18 +745,18 @@ namespace {
"opTime" << Date_t(Timestamp(10,0).asULL());
_net->scheduleResponse(
noi,
- startDate + Milliseconds(10),
+ startDate + 10,
ResponseStatus(RemoteCommandResponse(
responseBuilder.obj(),
Milliseconds(8))));
}
}
- _net->runUntil(startDate + Milliseconds(10));
- ASSERT_EQUALS(startDate + Milliseconds(10), _net->now());
+ _net->runUntil(startDate + 10);
+ ASSERT_EQUALS(startDate + 10, _net->now());
ASSERT_EQUALS(0, countLogLinesContaining("not electing self, h4:27017 would veto with '"
"errmsg: \"I'd rather you didn't\"'"));
- _net->runUntil(startDate + Milliseconds(20));
- ASSERT_EQUALS(startDate + Milliseconds(20), _net->now());
+ _net->runUntil(startDate + 20);
+ ASSERT_EQUALS(startDate + 20, _net->now());
_net->exitNetwork();
waitOnChecker();
stopCapturingLogMessages();
@@ -799,7 +799,7 @@ namespace {
if (target.host() == "h2" || target.host() == "h3") {
_net->scheduleResponse(
noi,
- startDate + Milliseconds(10),
+ startDate + 10,
ResponseStatus(ErrorCodes::NoSuchKey, "No response"));
}
else {
@@ -813,15 +813,15 @@ namespace {
"opTime" << Date_t(Timestamp(0,0).asULL());
_net->scheduleResponse(
noi,
- startDate + Milliseconds(10),
+ startDate + 10,
ResponseStatus(RemoteCommandResponse(
responseBuilder.obj(),
Milliseconds(8))));
}
}
- _net->runUntil(startDate + Milliseconds(10));
+ _net->runUntil(startDate + 10);
_net->exitNetwork();
- ASSERT_EQUALS(startDate + Milliseconds(10), _net->now());
+ ASSERT_EQUALS(startDate + 10, _net->now());
waitOnChecker();
ASSERT_EQUALS(shouldAbortElection(),FreshnessChecker::None);
}
diff --git a/src/mongo/db/repl/heartbeat_response_action.cpp b/src/mongo/db/repl/heartbeat_response_action.cpp
index 49ed33c4780..4f26bc2953e 100644
--- a/src/mongo/db/repl/heartbeat_response_action.cpp
+++ b/src/mongo/db/repl/heartbeat_response_action.cpp
@@ -63,7 +63,11 @@ namespace repl {
return result;
}
- HeartbeatResponseAction::HeartbeatResponseAction() : _action(NoAction), _primaryIndex(-1) {}
+ HeartbeatResponseAction::HeartbeatResponseAction() :
+ _action(NoAction),
+ _primaryIndex(-1),
+ _nextHeartbeatStartDate(0) {
+ }
void HeartbeatResponseAction::setNextHeartbeatStartDate(Date_t when) {
_nextHeartbeatStartDate = when;
diff --git a/src/mongo/db/repl/is_master_response.cpp b/src/mongo/db/repl/is_master_response.cpp
index ab38c295d14..a789fd7b6dd 100644
--- a/src/mongo/db/repl/is_master_response.cpp
+++ b/src/mongo/db/repl/is_master_response.cpp
@@ -152,7 +152,7 @@ namespace {
if (_buildIndexesSet)
builder->append(kBuildIndexesFieldName, _buildIndexes);
if (_slaveDelaySet)
- builder->appendIntOrLL(kSlaveDelayFieldName, durationCount<Seconds>(_slaveDelay));
+ builder->append(kSlaveDelayFieldName, _slaveDelay.total_seconds());
if (_tagsSet) {
BSONObjBuilder tags(builder->subobjStart(kTagsFieldName));
for (unordered_map<std::string, std::string>::const_iterator it = _tags.begin();
diff --git a/src/mongo/db/repl/master_slave.cpp b/src/mongo/db/repl/master_slave.cpp
index 9ebf1ce6cf1..7d9624f8849 100644
--- a/src/mongo/db/repl/master_slave.cpp
+++ b/src/mongo/db/repl/master_slave.cpp
@@ -326,7 +326,7 @@ namespace repl {
DBDirectClient c(txn);
BSONObj op = c.findOne( "local.oplog.$main", QUERY( "op" << NE << "n" ).sort( BSON( "$natural" << -1 ) ) );
if ( !op.isEmpty() ) {
- tmp.syncedTo = op[ "ts" ].timestamp();
+ tmp.syncedTo = op[ "ts" ].date();
}
}
addSourceToList(txn, v, tmp, old);
@@ -541,7 +541,7 @@ namespace repl {
return true;
}
BSONElement ts = op.getField( "ts" );
- if ( ( ts.type() == Date || ts.type() == bsonTimestamp ) && ___databaseIgnorer.ignoreAt( db, ts.timestamp() ) ) {
+ if ( ( ts.type() == Date || ts.type() == bsonTimestamp ) && ___databaseIgnorer.ignoreAt( db, ts.date() ) ) {
// Database is ignored due to a previous indication that it is
// missing from master after optime "ts".
return false;
diff --git a/src/mongo/db/repl/member_config.cpp b/src/mongo/db/repl/member_config.cpp
index 02711adedd3..6f3bcf40501 100644
--- a/src/mongo/db/repl/member_config.cpp
+++ b/src/mongo/db/repl/member_config.cpp
@@ -259,7 +259,7 @@ namespace {
}
if (_slaveDelay < Seconds(0) || _slaveDelay > kMaxSlaveDelay) {
return Status(ErrorCodes::BadValue, str::stream() << kSlaveDelayFieldName <<
- " field value of " << durationCount<Seconds>(_slaveDelay) <<
+ " field value of " << _slaveDelay.total_seconds() <<
" seconds is out of range");
}
if (_slaveDelay > Seconds(0) && _priority != 0) {
@@ -310,7 +310,7 @@ namespace {
}
tags.done();
- configBuilder.append("slaveDelay", durationCount<Seconds>(_slaveDelay));
+ configBuilder.append("slaveDelay", _slaveDelay.total_seconds());
configBuilder.append("votes", getNumVotes());
return configBuilder.obj();
}
diff --git a/src/mongo/db/repl/member_config_test.cpp b/src/mongo/db/repl/member_config_test.cpp
index e69b17c237b..9d3a0f7b276 100644
--- a/src/mongo/db/repl/member_config_test.cpp
+++ b/src/mongo/db/repl/member_config_test.cpp
@@ -46,7 +46,7 @@ namespace {
ASSERT_EQUALS(0, mc.getId());
ASSERT_EQUALS(HostAndPort("localhost", 12345), mc.getHostAndPort());
ASSERT_EQUALS(1.0, mc.getPriority());
- ASSERT_EQUALS(Seconds(0), mc.getSlaveDelay());
+ ASSERT_EQUALS(0, mc.getSlaveDelay().total_seconds());
ASSERT_TRUE(mc.isVoter());
ASSERT_FALSE(mc.isHidden());
ASSERT_FALSE(mc.isArbiter());
@@ -188,7 +188,7 @@ namespace {
MemberConfig mc;
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host" << "h" << "slaveDelay" << 100),
&tagConfig));
- ASSERT_EQUALS(Seconds(100), mc.getSlaveDelay());
+ ASSERT_EQUALS(100, mc.getSlaveDelay().total_seconds());
}
TEST(MemberConfig, ParseTags) {
diff --git a/src/mongo/db/repl/member_heartbeat_data.cpp b/src/mongo/db/repl/member_heartbeat_data.cpp
index c4c99f5bd28..5ae8f1fbffc 100644
--- a/src/mongo/db/repl/member_heartbeat_data.cpp
+++ b/src/mongo/db/repl/member_heartbeat_data.cpp
@@ -41,6 +41,9 @@ namespace repl {
MemberHeartbeatData::MemberHeartbeatData() :
_health(-1),
+ _upSince(0),
+ _lastHeartbeat(0),
+ _lastHeartbeatRecv(0),
_authIssue(false) {
_lastResponse.setState(MemberState::RS_UNKNOWN);
@@ -52,7 +55,7 @@ namespace repl {
const HostAndPort& host,
ReplSetHeartbeatResponse hbResponse) {
_health = 1;
- if (_upSince == Date_t()) {
+ if (_upSince == 0) {
_upSince = now;
}
_authIssue = false;
@@ -79,7 +82,7 @@ namespace repl {
void MemberHeartbeatData::setDownValues(Date_t now, const std::string& heartbeatMessage) {
_health = 0;
- _upSince = Date_t();
+ _upSince = 0;
_lastHeartbeat = now;
_authIssue = false;
@@ -93,7 +96,7 @@ namespace repl {
void MemberHeartbeatData::setAuthIssue(Date_t now) {
_health = 0; // set health to 0 so that this doesn't count towards majority.
- _upSince = Date_t();
+ _upSince = 0;
_lastHeartbeat = now;
_authIssue = true;
diff --git a/src/mongo/db/repl/network_interface_impl.cpp b/src/mongo/db/repl/network_interface_impl.cpp
index 69ab9dc921e..3dd08e51a14 100644
--- a/src/mongo/db/repl/network_interface_impl.cpp
+++ b/src/mongo/db/repl/network_interface_impl.cpp
@@ -159,7 +159,7 @@ namespace {
if (waitTime <= Milliseconds(0)) {
break;
}
- _isExecutorRunnableCondition.wait_for(lk, waitTime);
+ _isExecutorRunnableCondition.timed_wait(lk, waitTime);
}
_isExecutorRunnable = false;
}
@@ -184,13 +184,13 @@ namespace {
if (_threads.size() > kMinThreads) {
const Date_t nowDate = now();
const Date_t nextThreadRetirementDate =
- _lastFullUtilizationDate + kMaxIdleThreadAge;
+ _lastFullUtilizationDate + kMaxIdleThreadAge.total_milliseconds();
if (nowDate > nextThreadRetirementDate) {
_lastFullUtilizationDate = nowDate;
break;
}
}
- _hasPending.wait_for(lk, kMaxIdleThreadAge);
+ _hasPending.timed_wait(lk, kMaxIdleThreadAge);
continue;
}
CommandData todo = _pending.front();
@@ -243,7 +243,7 @@ namespace {
_startNewNetworkThread_inlock();
}
if (_numIdleThreads <= _pending.size()) {
- _lastFullUtilizationDate = Date_t::now();
+ _lastFullUtilizationDate = curTimeMillis64();
}
_hasPending.notify_one();
}
@@ -270,7 +270,7 @@ namespace {
}
Date_t NetworkInterfaceImpl::now() {
- return Date_t::now();
+ return curTimeMillis64();
}
OperationContext* NetworkInterfaceImpl::createOperationContext() {
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 717ca32a578..00bd96fe12a 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -886,7 +886,7 @@ namespace {
if (!lastOp.isEmpty()) {
LOG(1) << "replSet setting last OpTime";
- setNewOptime(lastOp[ "ts" ].timestamp());
+ setNewOptime(lastOp[ "ts" ].date());
}
}
diff --git a/src/mongo/db/repl/oplogreader.cpp b/src/mongo/db/repl/oplogreader.cpp
index bea0484fc44..5a0e1054d12 100644
--- a/src/mongo/db/repl/oplogreader.cpp
+++ b/src/mongo/db/repl/oplogreader.cpp
@@ -144,7 +144,7 @@ namespace repl {
void OplogReader::connectToSyncSource(OperationContext* txn,
Timestamp lastOpTimeFetched,
ReplicationCoordinator* replCoord) {
- const Timestamp sentinel(duration_cast<Seconds>(Milliseconds(curTimeMillis64())), 0);
+ const Timestamp sentinel(Milliseconds(curTimeMillis64()).total_seconds(), 0);
Timestamp oldestOpTimeSeen = sentinel;
invariant(conn() == NULL);
@@ -182,7 +182,7 @@ namespace repl {
LOG(2) << "can't connect to " << candidate.toString() <<
" to read operations";
resetConnection();
- replCoord->blacklistSyncSource(candidate, Date_t::now() + Seconds(10));
+ replCoord->blacklistSyncSource(candidate, Date_t(curTimeMillis64() + 10*1000));
continue;
}
// Read the first (oldest) op and confirm that it's not newer than our last
@@ -193,7 +193,8 @@ namespace repl {
// This member's got a bad op in its oplog.
warning() << "oplog invalid format on node " << candidate.toString();
resetConnection();
- replCoord->blacklistSyncSource(candidate, Date_t::now() + Minutes(10));
+ replCoord->blacklistSyncSource(candidate,
+ Date_t(curTimeMillis64() + 600*1000));
continue;
}
Timestamp remoteOldOpTime = tsElem.timestamp();
@@ -201,7 +202,8 @@ namespace repl {
if (lastOpTimeFetched < remoteOldOpTime) {
// We're too stale to use this sync source.
resetConnection();
- replCoord->blacklistSyncSource(candidate, Date_t::now() + Minutes(10));
+ replCoord->blacklistSyncSource(candidate,
+ Date_t(curTimeMillis64() + 600*1000));
if (oldestOpTimeSeen > remoteOldOpTime) {
warning() << "we are too stale to use " << candidate.toString() <<
" as a sync source";
diff --git a/src/mongo/db/repl/read_after_optime_args_test.cpp b/src/mongo/db/repl/read_after_optime_args_test.cpp
index 557767c2886..d780500d9c2 100644
--- a/src/mongo/db/repl/read_after_optime_args_test.cpp
+++ b/src/mongo/db/repl/read_after_optime_args_test.cpp
@@ -46,7 +46,7 @@ namespace {
ASSERT_EQ(Timestamp(20, 30), readAfterOpTime.getOpTime().getTimestamp());
ASSERT_EQ(2, readAfterOpTime.getOpTime().getTerm());
- ASSERT_EQ(Milliseconds(100), readAfterOpTime.getTimeout());
+ ASSERT_EQ(100, readAfterOpTime.getTimeout().total_milliseconds());
}
TEST(ReadAfterParse, Empty) {
@@ -54,7 +54,7 @@ namespace {
ASSERT_OK(readAfterOpTime.initialize(BSON("find" << "test")));
ASSERT(readAfterOpTime.getOpTime().getTimestamp().isNull());
- ASSERT_EQ(Milliseconds::zero(), readAfterOpTime.getTimeout());
+ ASSERT_EQ(0, readAfterOpTime.getTimeout().total_milliseconds());
}
TEST(ReadAfterParse, BadRootType) {
@@ -133,7 +133,7 @@ namespace {
ASSERT_EQ(Timestamp(1, 0), readAfterOpTime.getOpTime().getTimestamp());
ASSERT_EQ(2, readAfterOpTime.getOpTime().getTerm());
- ASSERT_EQ(Milliseconds::zero(), readAfterOpTime.getTimeout());
+ ASSERT_EQ(0, readAfterOpTime.getTimeout().total_milliseconds());
}
TEST(ReadAfterParse, BadTimeoutType) {
@@ -170,7 +170,7 @@ namespace {
ASSERT_EQ(Timestamp(20, 30), readAfterOpTime.getOpTime().getTimestamp());
ASSERT_EQ(2, readAfterOpTime.getOpTime().getTerm());
- ASSERT_EQ(Milliseconds::zero(), readAfterOpTime.getTimeout());
+ ASSERT_EQ(0, readAfterOpTime.getTimeout().total_milliseconds());
}
} // unnamed namespace
diff --git a/src/mongo/db/repl/read_after_optime_response.cpp b/src/mongo/db/repl/read_after_optime_response.cpp
index 3a6d5fc9962..7caffe09a96 100644
--- a/src/mongo/db/repl/read_after_optime_response.cpp
+++ b/src/mongo/db/repl/read_after_optime_response.cpp
@@ -42,7 +42,7 @@ namespace repl {
const string ReadAfterOpTimeResponse::kWaitedMSFieldName("waitedMS");
ReadAfterOpTimeResponse::ReadAfterOpTimeResponse(Status status):
- ReadAfterOpTimeResponse(status, stdx::chrono::milliseconds(0), false) {
+ ReadAfterOpTimeResponse(status, boost::posix_time::milliseconds(0), false) {
}
ReadAfterOpTimeResponse::ReadAfterOpTimeResponse():
@@ -50,12 +50,12 @@ namespace repl {
}
ReadAfterOpTimeResponse::ReadAfterOpTimeResponse(Status status,
- stdx::chrono::milliseconds duration):
+ boost::posix_time::milliseconds duration):
ReadAfterOpTimeResponse(status, duration, true) {
}
ReadAfterOpTimeResponse::ReadAfterOpTimeResponse(Status status,
- stdx::chrono::milliseconds duration,
+ boost::posix_time::milliseconds duration,
bool waited):
_waited(waited),
_duration(duration),
@@ -67,14 +67,15 @@ namespace repl {
return;
}
- builder->append(kWaitedMSFieldName, durationCount<Milliseconds>(_duration));
+ builder->append(kWaitedMSFieldName,
+ static_cast<long long>(_duration.total_milliseconds()));
}
bool ReadAfterOpTimeResponse::didWait() const {
return _waited;
}
- stdx::chrono::milliseconds ReadAfterOpTimeResponse::getDuration() const {
+ boost::posix_time::milliseconds ReadAfterOpTimeResponse::getDuration() const {
return _duration;
}
diff --git a/src/mongo/db/repl/read_after_optime_response.h b/src/mongo/db/repl/read_after_optime_response.h
index 33163131363..b906dc196d4 100644
--- a/src/mongo/db/repl/read_after_optime_response.h
+++ b/src/mongo/db/repl/read_after_optime_response.h
@@ -28,10 +28,10 @@
#pragma once
+#include <boost/date_time/posix_time/posix_time_types.hpp>
#include <string>
#include "mongo/base/status.h"
-#include "mongo/stdx/chrono.h"
namespace mongo {
@@ -56,7 +56,7 @@ namespace repl {
/**
* Constructs a response with wait set to true along with the given parameters.
*/
- ReadAfterOpTimeResponse(Status status, stdx::chrono::milliseconds duration);
+ ReadAfterOpTimeResponse(Status status, boost::posix_time::milliseconds duration);
/**
* Appends to the builder the timeout and duration info if didWait() is true.
@@ -70,7 +70,7 @@ namespace repl {
* Returns the amount of duration waiting for opTime to pass.
* Valid only if didWait is true.
*/
- stdx::chrono::milliseconds getDuration() const;
+ boost::posix_time::milliseconds getDuration() const;
/**
* Returns more details about an error if it occurred.
@@ -79,11 +79,11 @@ namespace repl {
private:
ReadAfterOpTimeResponse(Status status,
- stdx::chrono::milliseconds duration,
+ boost::posix_time::milliseconds duration,
bool waited);
bool _waited;
- stdx::chrono::milliseconds _duration;
+ boost::posix_time::milliseconds _duration;
Status _status;
};
diff --git a/src/mongo/db/repl/read_after_optime_response_test.cpp b/src/mongo/db/repl/read_after_optime_response_test.cpp
index 09d70204255..a30824a57b8 100644
--- a/src/mongo/db/repl/read_after_optime_response_test.cpp
+++ b/src/mongo/db/repl/read_after_optime_response_test.cpp
@@ -62,10 +62,10 @@ namespace {
TEST(ReadAfterResponse, WaitedWithDuration) {
ReadAfterOpTimeResponse response(Status(ErrorCodes::InternalError, "test"),
- stdx::chrono::milliseconds(7));
+ boost::posix_time::milliseconds(7));
ASSERT_TRUE(response.didWait());
- ASSERT_EQUALS(Milliseconds(7), response.getDuration());
+ ASSERT_EQUALS(7, response.getDuration().total_milliseconds());
ASSERT_EQ(ErrorCodes::InternalError, response.getStatus().code());
BSONObjBuilder builder;
diff --git a/src/mongo/db/repl/repl_set_heartbeat_response.cpp b/src/mongo/db/repl/repl_set_heartbeat_response.cpp
index 8ea4bd0028f..8dac175ddf9 100644
--- a/src/mongo/db/repl/repl_set_heartbeat_response.cpp
+++ b/src/mongo/db/repl/repl_set_heartbeat_response.cpp
@@ -90,14 +90,13 @@ namespace {
builder->append(kOkFieldName, 1.0);
if (_opTimeSet) {
- builder->appendDate(kOpTimeFieldName, Date_t::fromMillisSinceEpoch(_opTime.asLL()));
+ builder->appendDate(kOpTimeFieldName, _opTime.asULL());
}
if (_timeSet) {
- *builder << kTimeFieldName << durationCount<Seconds>(_time);
+ *builder << kTimeFieldName << _time.total_seconds();
}
if (_electionTimeSet) {
- builder->appendDate(kElectionTimeFieldName,
- Date_t::fromMillisSinceEpoch(_electionTime.asLL()));
+ builder->appendDate(kElectionTimeFieldName, _electionTime.asULL());
}
if (_configSet) {
*builder << kConfigFieldName << _config.toBSON();
diff --git a/src/mongo/db/repl/repl_set_heartbeat_response_test.cpp b/src/mongo/db/repl/repl_set_heartbeat_response_test.cpp
index 76cf0a99448..05e634fbc03 100644
--- a/src/mongo/db/repl/repl_set_heartbeat_response_test.cpp
+++ b/src/mongo/db/repl/repl_set_heartbeat_response_test.cpp
@@ -201,7 +201,7 @@ namespace {
ASSERT_EQUALS(1, hbResponse.getVersion());
ASSERT_EQUALS(Timestamp(10,0), hbResponse.getElectionTime());
ASSERT_EQUALS(Timestamp(0,10), hbResponse.getOpTime());
- ASSERT_EQUALS(Seconds(10), hbResponse.getTime());
+ ASSERT_EQUALS(10, hbResponse.getTime().total_seconds());
hbResponseObj = hbResponse.toBSON();
ASSERT_EQUALS(fieldsSet, hbResponseObj.nFields());
@@ -234,7 +234,7 @@ namespace {
ASSERT_EQUALS(1, hbResponse.getVersion());
ASSERT_EQUALS(Timestamp(10,0), hbResponse.getElectionTime());
ASSERT_EQUALS(Timestamp(0,10), hbResponse.getOpTime());
- ASSERT_EQUALS(Seconds(10), hbResponse.getTime());
+ ASSERT_EQUALS(10, hbResponse.getTime().total_seconds());
ASSERT_EQUALS(true, hbResponse.isElectable());
hbResponseObj = hbResponse.toBSON();
@@ -270,7 +270,7 @@ namespace {
ASSERT_EQUALS(1, hbResponse.getVersion());
ASSERT_EQUALS(Timestamp(10,0), hbResponse.getElectionTime());
ASSERT_EQUALS(Timestamp(0,10), hbResponse.getOpTime());
- ASSERT_EQUALS(Seconds(10), hbResponse.getTime());
+ ASSERT_EQUALS(10, hbResponse.getTime().total_seconds());
ASSERT_EQUALS(true, hbResponse.isElectable());
ASSERT_EQUALS(config.toBSON().toString(), hbResponse.getConfig().toBSON().toString());
@@ -309,7 +309,7 @@ namespace {
ASSERT_EQUALS(1, hbResponse.getVersion());
ASSERT_EQUALS(Timestamp(10,0), hbResponse.getElectionTime());
ASSERT_EQUALS(Timestamp(0,10), hbResponse.getOpTime());
- ASSERT_EQUALS(Seconds(10), hbResponse.getTime());
+ ASSERT_EQUALS(10, hbResponse.getTime().total_seconds());
ASSERT_EQUALS(true, hbResponse.isElectable());
ASSERT_EQUALS(config.toBSON().toString(), hbResponse.getConfig().toBSON().toString());
@@ -349,7 +349,7 @@ namespace {
ASSERT_EQUALS(1, hbResponse.getVersion());
ASSERT_EQUALS(Timestamp(10,0), hbResponse.getElectionTime());
ASSERT_EQUALS(Timestamp(0,10), hbResponse.getOpTime());
- ASSERT_EQUALS(Seconds(10), hbResponse.getTime());
+ ASSERT_EQUALS(10, hbResponse.getTime().total_seconds());
ASSERT_EQUALS(true, hbResponse.isElectable());
ASSERT_EQUALS(config.toBSON().toString(), hbResponse.getConfig().toBSON().toString());
@@ -391,7 +391,7 @@ namespace {
ASSERT_EQUALS(1, hbResponse.getVersion());
ASSERT_EQUALS(Timestamp(10,0), hbResponse.getElectionTime());
ASSERT_EQUALS(Timestamp(0,10), hbResponse.getOpTime());
- ASSERT_EQUALS(Seconds(10), hbResponse.getTime());
+ ASSERT_EQUALS(10, hbResponse.getTime().total_seconds());
ASSERT_EQUALS(true, hbResponse.isElectable());
ASSERT_EQUALS(config.toBSON().toString(), hbResponse.getConfig().toBSON().toString());
@@ -434,7 +434,7 @@ namespace {
ASSERT_EQUALS(1, hbResponse.getVersion());
ASSERT_EQUALS(Timestamp(10,0), hbResponse.getElectionTime());
ASSERT_EQUALS(Timestamp(0,10), hbResponse.getOpTime());
- ASSERT_EQUALS(Seconds(10), hbResponse.getTime());
+ ASSERT_EQUALS(10, hbResponse.getTime().total_seconds());
ASSERT_EQUALS(true, hbResponse.isElectable());
ASSERT_EQUALS(config.toBSON().toString(), hbResponse.getConfig().toBSON().toString());
@@ -477,7 +477,7 @@ namespace {
ASSERT_EQUALS(1, hbResponse.getVersion());
ASSERT_EQUALS(Timestamp(10,0), hbResponse.getElectionTime());
ASSERT_EQUALS(Timestamp(0,10), hbResponse.getOpTime());
- ASSERT_EQUALS(Seconds(10), hbResponse.getTime());
+ ASSERT_EQUALS(10, hbResponse.getTime().total_seconds());
ASSERT_EQUALS(true, hbResponse.isElectable());
ASSERT_EQUALS(config.toBSON().toString(), hbResponse.getConfig().toBSON().toString());
@@ -520,7 +520,7 @@ namespace {
ASSERT_EQUALS(1, hbResponse.getVersion());
ASSERT_EQUALS(Timestamp(10,0), hbResponse.getElectionTime());
ASSERT_EQUALS(Timestamp(0,10), hbResponse.getOpTime());
- ASSERT_EQUALS(Seconds(10), hbResponse.getTime());
+ ASSERT_EQUALS(10, hbResponse.getTime().total_seconds());
ASSERT_EQUALS(true, hbResponse.isElectable());
ASSERT_EQUALS(config.toBSON().toString(), hbResponse.getConfig().toBSON().toString());
diff --git a/src/mongo/db/repl/repl_set_html_summary.cpp b/src/mongo/db/repl/repl_set_html_summary.cpp
index 65524427b4a..465bfdeb95e 100644
--- a/src/mongo/db/repl/repl_set_html_summary.cpp
+++ b/src/mongo/db/repl/repl_set_html_summary.cpp
@@ -68,7 +68,8 @@ namespace {
}
unsigned int timeDifference(Date_t now, Date_t past) {
- return static_cast<unsigned int>(past != Date_t() ? durationCount<Seconds>(now - past) : 0);
+ return static_cast<unsigned int> ((past ?
+ (now - past) / 1000 /* convert millis to secs */ : 0));
}
std::string stateAsHtml(const MemberState& s) {
@@ -173,7 +174,7 @@ namespace {
memberTable << td(red(str::stream() << memberHB.getHealth(), !up));
const unsigned int uptime = timeDifference(_now, memberHB.getUpSince());
memberTable << td(ago(uptime));
- if (memberHB.getLastHeartbeat() == Date_t()) {
+ if (memberHB.getLastHeartbeat() == 0) {
memberTable << td("never");
}
else {
@@ -190,7 +191,7 @@ namespace {
memberTable << td( grey(str::stream() << "(was " << state << ')', true) );
}
memberTable << td(grey(memberHB.getLastHeartbeatMsg(), !up));
- memberTable << td(memberHB.getLastHeartbeat() == Date_t() ?
+ memberTable << td(memberHB.getLastHeartbeat() == 0 ?
"?" : memberHB.getOpTime().toString());
}
memberTable << _tr();
diff --git a/src/mongo/db/repl/replica_set_config.cpp b/src/mongo/db/repl/replica_set_config.cpp
index dda9e25cf55..1e7c343db7e 100644
--- a/src/mongo/db/repl/replica_set_config.cpp
+++ b/src/mongo/db/repl/replica_set_config.cpp
@@ -274,7 +274,7 @@ namespace {
if (_heartbeatTimeoutPeriod < Seconds(0)) {
return Status(ErrorCodes::BadValue, str::stream() << kSettingsFieldName << '.' <<
kHeartbeatTimeoutFieldName << " field value must be non-negative, "
- "but found " << _heartbeatTimeoutPeriod.count());
+ "but found " << _heartbeatTimeoutPeriod.total_seconds());
}
if (_members.size() > kMaxMembers || _members.empty()) {
return Status(ErrorCodes::BadValue, str::stream() <<
@@ -539,7 +539,7 @@ namespace {
BSONObjBuilder settingsBuilder(configBuilder.subobjStart(kSettingsFieldName));
settingsBuilder.append(kChainingAllowedFieldName, _chainingAllowed);
- settingsBuilder.appendIntOrLL(kHeartbeatTimeoutFieldName, _heartbeatTimeoutPeriod.count());
+ settingsBuilder.append(kHeartbeatTimeoutFieldName, _heartbeatTimeoutPeriod.total_seconds());
BSONObjBuilder gleModes(settingsBuilder.subobjStart(kGetLastErrorModesFieldName));
for (StringMap<ReplicaSetTagPattern>::const_iterator mode =
diff --git a/src/mongo/db/repl/replica_set_config.h b/src/mongo/db/repl/replica_set_config.h
index fcd880705ea..c51ef2ebb40 100644
--- a/src/mongo/db/repl/replica_set_config.h
+++ b/src/mongo/db/repl/replica_set_config.h
@@ -161,7 +161,7 @@ namespace repl {
* Seconds object.
*/
Milliseconds getHeartbeatTimeoutPeriodMillis() const {
- return _heartbeatTimeoutPeriod;
+ return Milliseconds(_heartbeatTimeoutPeriod.total_milliseconds());
}
/**
diff --git a/src/mongo/db/repl/replica_set_config_test.cpp b/src/mongo/db/repl/replica_set_config_test.cpp
index 8bc5a247498..bf2b8f91151 100644
--- a/src/mongo/db/repl/replica_set_config_test.cpp
+++ b/src/mongo/db/repl/replica_set_config_test.cpp
@@ -50,7 +50,7 @@ namespace {
ASSERT_EQUALS(0, config.membersBegin()->getId());
ASSERT_EQUALS(1, config.getDefaultWriteConcern().wNumNodes);
ASSERT_EQUALS("", config.getDefaultWriteConcern().wMode);
- ASSERT_EQUALS(Seconds(10), config.getHeartbeatTimeoutPeriod());
+ ASSERT_EQUALS(10, config.getHeartbeatTimeoutPeriod().total_seconds());
ASSERT_TRUE(config.isChainingAllowed());
ASSERT_EQUALS(0, config.getProtocolVersion());
}
@@ -78,7 +78,7 @@ namespace {
ASSERT_EQUALS(0, config.getDefaultWriteConcern().wNumNodes);
ASSERT_EQUALS("majority", config.getDefaultWriteConcern().wMode);
ASSERT_FALSE(config.isChainingAllowed());
- ASSERT_EQUALS(Seconds(120), config.getHeartbeatTimeoutPeriod());
+ ASSERT_EQUALS(120, config.getHeartbeatTimeoutPeriod().total_seconds());
ASSERT_EQUALS(2, config.getProtocolVersion());
}
@@ -603,7 +603,7 @@ namespace {
"host" << "localhost:12345")) <<
"settings" << BSON("heartbeatTimeoutSecs" << 20))));
ASSERT_OK(config.validate());
- ASSERT_EQUALS(Seconds(20), config.getHeartbeatTimeoutPeriod());
+ ASSERT_EQUALS(20, config.getHeartbeatTimeoutPeriod().total_seconds());
ASSERT_OK(config.initialize(
BSON("_id" << "rs0" <<
diff --git a/src/mongo/db/repl/replication_coordinator.h b/src/mongo/db/repl/replication_coordinator.h
index a1a25f5b1f1..9201f4af326 100644
--- a/src/mongo/db/repl/replication_coordinator.h
+++ b/src/mongo/db/repl/replication_coordinator.h
@@ -28,6 +28,7 @@
#pragma once
+#include <boost/date_time/posix_time/posix_time_types.hpp>
#include <vector>
#include "mongo/base/disallow_copying.h"
@@ -36,7 +37,6 @@
#include "mongo/db/repl/repl_settings.h"
#include "mongo/db/repl/reporter.h"
#include "mongo/util/net/hostandport.h"
-#include "mongo/util/time_support.h"
namespace mongo {
@@ -89,6 +89,8 @@ namespace repl {
public:
+ typedef boost::posix_time::milliseconds Milliseconds;
+
struct StatusAndDuration {
public:
Status status;
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index 070487253bd..a83b37b9b76 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -801,7 +801,8 @@ namespace {
while (ts > _getMyLastOptime_inlock()) {
Status interruptedStatus = txn->checkForInterruptNoAssert();
if (!interruptedStatus.isOK()) {
- return ReadAfterOpTimeResponse(interruptedStatus, Milliseconds(timer.millis()));
+ return ReadAfterOpTimeResponse(interruptedStatus,
+ Milliseconds(timer.millis()));
}
if (_inShutdown) {
@@ -810,13 +811,14 @@ namespace {
Milliseconds(timer.millis()));
}
- const Microseconds elapsedTime{timer.micros()};
- if (timeout > Microseconds::zero() && elapsedTime > timeout) {
+ const auto elapsedMS = timer.millis();
+ if (timeout.total_milliseconds() > 0 &&
+ elapsedMS > timeout.total_milliseconds()) {
return ReadAfterOpTimeResponse(
Status(ErrorCodes::ReadAfterOptimeTimeout,
str::stream() << "timed out waiting for opTime: "
<< ts.toStringPretty()),
- duration_cast<Milliseconds>(elapsedTime));
+ Milliseconds(timer.millis()));
}
boost::condition_variable condVar;
@@ -826,19 +828,21 @@ namespace {
nullptr, // Don't care about write concern.
&condVar);
- const Microseconds maxTimeMicrosRemaining{txn->getRemainingMaxTimeMicros()};
- Microseconds waitTime = Microseconds::max();
- if (maxTimeMicrosRemaining != Microseconds::zero()) {
- waitTime = maxTimeMicrosRemaining;
- }
- if (timeout != Microseconds::zero()) {
- waitTime = std::min<Microseconds>(timeout - elapsedTime, waitTime);
- }
- if (waitTime == Microseconds::max()) {
+ uint64_t maxTimeMicrosRemaining = txn->getRemainingMaxTimeMicros();
+ auto maxTimeMSRemaining = (maxTimeMicrosRemaining == 0) ?
+ std::numeric_limits<uint64_t>::max() : (maxTimeMicrosRemaining / 1000);
+
+ auto timeoutMSRemaining = (timeout.total_milliseconds() == 0) ?
+ std::numeric_limits<uint64_t>::max() :
+ static_cast<uint64_t>(timeout.total_milliseconds() - elapsedMS);
+
+ auto sleepTimeMS = std::min(maxTimeMSRemaining, timeoutMSRemaining);
+
+ if (sleepTimeMS == std::numeric_limits<uint64_t>::max()) {
condVar.wait(lock);
}
else {
- condVar.wait_for(lock, waitTime);
+ condVar.timed_wait(lock, Milliseconds(sleepTimeMS));
}
}
@@ -1138,7 +1142,7 @@ namespace {
condVar.wait(*lock);
}
else {
- condVar.wait_for(*lock, Milliseconds(writeConcern.wTimeout - elapsed));
+ condVar.timed_wait(*lock, Milliseconds(writeConcern.wTimeout - elapsed));
}
} catch (const boost::thread_interrupted&) {}
}
@@ -1156,8 +1160,8 @@ namespace {
const Milliseconds& waitTime,
const Milliseconds& stepdownTime) {
const Date_t startTime = _replExecutor.now();
- const Date_t stepDownUntil = startTime + stepdownTime;
- const Date_t waitUntil = startTime + waitTime;
+ const Date_t stepDownUntil(startTime.millis + stepdownTime.total_milliseconds());
+ const Date_t waitUntil(startTime.millis + waitTime.total_milliseconds());
if (!getMemberState().primary()) {
// Note this check is inherently racy - it's always possible for the node to
@@ -1173,8 +1177,7 @@ namespace {
_externalState->killAllUserOperations(txn);
if (lockState == LOCK_WAITING) {
- lockState = txn->lockState()->lockGlobalComplete(
- durationCount<Milliseconds>(stepdownTime));
+ lockState = txn->lockState()->lockGlobalComplete(stepdownTime.total_milliseconds());
if (lockState == LOCK_TIMEOUT) {
return Status(ErrorCodes::ExceededTimeLimit,
"Could not acquire the global shared lock within the amount of time "
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect.cpp
index 387f52d2b84..dffe1e3793f 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect.cpp
@@ -173,9 +173,9 @@ namespace {
break;
case FreshnessChecker::FreshnessTie:
if ((_selfIndex != 0) && !_sleptLastElection) {
- const auto ms = Milliseconds(_replExecutor.nextRandomInt64(1000) + 50);
+ const long long ms = _replExecutor.nextRandomInt64(1000) + 50;
const Date_t nextCandidateTime = now + ms;
- log() << "possible election tie; sleeping " << ms.count() << "ms until " <<
+ log() << "possible election tie; sleeping " << ms << "ms until " <<
dateToISOStringLocal(nextCandidateTime);
_topCoord->setElectionSleepUntil(nextCandidateTime);
_replExecutor.scheduleWorkAt(
@@ -241,7 +241,7 @@ namespace {
" votes, but needed at least " << _rsConfig.getMajorityVoteCount();
// Suppress ourselves from standing for election again, giving other nodes a chance
// to win their elections.
- const auto ms = Milliseconds(_replExecutor.nextRandomInt64(1000) + 50);
+ const long long ms = _replExecutor.nextRandomInt64(1000) + 50;
const Date_t now(_replExecutor.now());
const Date_t nextCandidateTime = now + ms;
log() << "waiting until " << nextCandidateTime << " before standing for election again";
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect_test.cpp
index 29add436720..db4104ed761 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect_test.cpp
@@ -347,7 +347,7 @@ namespace {
BSONObjBuilder respObj2;
respObj2 << "ok" << 1;
hbResp2.addToBSON(&respObj2);
- net->runUntil(net->now() + Seconds(10)); // run until we've sent a heartbeat request
+ net->runUntil(net->now() + 10*1000); // run until we've sent a heartbeat request
const NetworkInterfaceMock::NetworkOperationIterator noi2 = net->getNextReadyRequest();
net->scheduleResponse(noi2, net->now(), makeResponseStatus(respObj2.obj()));
net->runReadyNetworkOperations();
diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat_test.cpp
index 0dec8c5b1a6..30c564284b1 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat_test.cpp
@@ -115,10 +115,8 @@ namespace {
BSONObjBuilder responseBuilder;
responseBuilder << "ok" << 1;
hbResp.addToBSON(&responseBuilder);
- net->scheduleResponse(noi,
- startDate + Milliseconds(200),
- makeResponseStatus(responseBuilder.obj()));
- assertRunUntil(startDate + Milliseconds(200));
+ net->scheduleResponse(noi, startDate + 200, makeResponseStatus(responseBuilder.obj()));
+ assertRunUntil(startDate + 200);
// Because the new config is stored using an out-of-band thread, we need to perform some
// extra synchronization to let the executor finish the heartbeat reconfig. We know that
@@ -176,10 +174,8 @@ namespace {
BSONObjBuilder responseBuilder;
responseBuilder << "ok" << 1;
hbResp.addToBSON(&responseBuilder);
- net->scheduleResponse(noi,
- startDate + Milliseconds(200),
- makeResponseStatus(responseBuilder.obj()));
- assertRunUntil(startDate + Milliseconds(2200));
+ net->scheduleResponse(noi, startDate + 200, makeResponseStatus(responseBuilder.obj()));
+ assertRunUntil(startDate + 2200);
// Because the new config is stored using an out-of-band thread, we need to perform some
// extra synchronization to let the executor finish the heartbeat reconfig. We know that
diff --git a/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
index 5b438741b29..086a574d630 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
@@ -415,7 +415,7 @@ namespace {
BSONObjBuilder respObj2;
respObj2 << "ok" << 1;
hbResp2.addToBSON(&respObj2);
- net->runUntil(net->now() + Seconds(10)); // run until we've sent a heartbeat request
+ net->runUntil(net->now() + 10*1000); // run until we've sent a heartbeat request
const NetworkInterfaceMock::NetworkOperationIterator noi2 = net->getNextReadyRequest();
net->scheduleResponse(noi2, net->now(), makeResponseStatus(respObj2.obj()));
net->runReadyNetworkOperations();
@@ -457,7 +457,7 @@ namespace {
net->blackHole(net->getNextReadyRequest());
// schedule hb reconfig
- net->runUntil(net->now() + Seconds(10)); // run until we've sent a heartbeat request
+ net->runUntil(net->now() + 10*1000); // run until we've sent a heartbeat request
const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
ReplSetHeartbeatResponse hbResp;
ReplicaSetConfig config;
diff --git a/src/mongo/db/repl/replication_coordinator_impl_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
index 006db55e81b..b33b87b1a83 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
@@ -241,11 +241,11 @@ namespace {
ASSERT_EQUALS(HostAndPort("node2", 54321), noi->getRequest().target);
ASSERT_EQUALS("admin", noi->getRequest().dbname);
ASSERT_EQUALS(hbArgs.toBSON(), noi->getRequest().cmdObj);
- getNet()->scheduleResponse(noi, startDate + Milliseconds(10),
- ResponseStatus(ErrorCodes::NoSuchKey, "No response"));
- getNet()->runUntil(startDate + Milliseconds(10));
+ getNet()->scheduleResponse(noi, startDate + 10, ResponseStatus(ErrorCodes::NoSuchKey,
+ "No response"));
+ getNet()->runUntil(startDate + 10);
getNet()->exitNetwork();
- ASSERT_EQUALS(startDate + Milliseconds(10), getNet()->now());
+ ASSERT_EQUALS(startDate + 10, getNet()->now());
prsiThread.join();
ASSERT_EQUALS(ErrorCodes::NodeNotFound, status);
ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
@@ -276,11 +276,11 @@ namespace {
hbResp.setVersion(0);
getNet()->scheduleResponse(
noi,
- startDate + Milliseconds(10),
+ startDate + 10,
ResponseStatus(RemoteCommandResponse(hbResp.toBSON(), Milliseconds(8))));
- getNet()->runUntil(startDate + Milliseconds(10));
+ getNet()->runUntil(startDate + 10);
getNet()->exitNetwork();
- ASSERT_EQUALS(startDate + Milliseconds(10), getNet()->now());
+ ASSERT_EQUALS(startDate + 10, getNet()->now());
prsiThread.join();
ASSERT_OK(status);
ASSERT_EQUALS(ReplicationCoordinator::modeReplSet, getReplCoord()->getReplicationMode());
@@ -647,7 +647,7 @@ namespace {
ReplicationAwaiter(ReplicationCoordinatorImpl* replCoord, OperationContext* txn) :
_replCoord(replCoord), _finished(false),
_result(ReplicationCoordinator::StatusAndDuration(
- Status::OK(), Milliseconds(0))) {}
+ Status::OK(), ReplicationCoordinator::Milliseconds(0))) {}
void setOpTime(const Timestamp& ot) {
_optime = ot;
@@ -675,7 +675,7 @@ namespace {
ASSERT(_finished);
_finished = false;
_result = ReplicationCoordinator::StatusAndDuration(
- Status::OK(), Milliseconds(0));
+ Status::OK(), ReplicationCoordinator::Milliseconds(0));
}
private:
@@ -952,7 +952,7 @@ namespace {
simulateSuccessfulElection();
enterNetwork();
- getNet()->runUntil(getNet()->now() + Seconds(2));
+ getNet()->runUntil(getNet()->now() + 2000);
ASSERT(getNet()->hasReadyRequests());
NetworkInterfaceMock::NetworkOperationIterator noi = getNet()->getNextReadyRequest();
RemoteCommandRequest request = noi->getRequest();
@@ -979,7 +979,7 @@ namespace {
ASSERT_TRUE(getReplCoord()->getMemberState().primary());
ASSERT_OK(getReplCoord()->stepDown(&txn, false, Milliseconds(0), Milliseconds(1000)));
enterNetwork(); // So we can safely inspect the topology coordinator
- ASSERT_EQUALS(getNet()->now() + Seconds(1), getTopoCoord().getStepDownTime());
+ ASSERT_EQUALS(Date_t(getNet()->now().millis + 1000), getTopoCoord().getStepDownTime());
ASSERT_TRUE(getTopoCoord().getMemberState().secondary());
exitNetwork();
ASSERT_TRUE(getReplCoord()->getMemberState().secondary());
@@ -999,7 +999,7 @@ namespace {
ASSERT_TRUE(getReplCoord()->getMemberState().primary());
ASSERT_OK(getReplCoord()->stepDown(&txn, true, Milliseconds(0), Milliseconds(1000)));
getNet()->enterNetwork(); // Must do this before inspecting the topocoord
- Date_t stepdownUntil = getNet()->now() + Seconds(1);
+ Date_t stepdownUntil = Date_t(getNet()->now().millis + 1000);
ASSERT_EQUALS(stepdownUntil, getTopoCoord().getStepDownTime());
ASSERT_TRUE(getTopoCoord().getMemberState().secondary());
ASSERT_TRUE(getReplCoord()->getMemberState().secondary());
@@ -1103,11 +1103,11 @@ namespace {
runner.reset();
getNet()->enterNetwork();
const Date_t startDate = getNet()->now();
- while (startDate + Milliseconds(1000) < getNet()->now()) {
+ while (startDate + 1000 < getNet()->now()) {
while (getNet()->hasReadyRequests()) {
getNet()->blackHole(getNet()->getNextReadyRequest());
}
- getNet()->runUntil(startDate + Milliseconds(1000));
+ getNet()->runUntil(startDate + 1000);
}
getNet()->exitNetwork();
ASSERT_TRUE(getReplCoord()->getMemberState().primary());
@@ -1140,7 +1140,7 @@ namespace {
// Make a secondary actually catch up
enterNetwork();
- getNet()->runUntil(getNet()->now() + Milliseconds(2000));
+ getNet()->runUntil(getNet()->now() + 2000);
ASSERT(getNet()->hasReadyRequests());
NetworkInterfaceMock::NetworkOperationIterator noi = getNet()->getNextReadyRequest();
RemoteCommandRequest request = noi->getRequest();
@@ -1497,7 +1497,7 @@ namespace {
ASSERT_TRUE(response.isPassive());
ASSERT_FALSE(response.isHidden());
ASSERT_TRUE(response.shouldBuildIndexes());
- ASSERT_EQUALS(Seconds(0), response.getSlaveDelay());
+ ASSERT_EQUALS(0, response.getSlaveDelay().total_seconds());
ASSERT_EQUALS(h4, response.getMe());
std::vector<HostAndPort> hosts = response.getHosts();
diff --git a/src/mongo/db/repl/replication_coordinator_test_fixture.cpp b/src/mongo/db/repl/replication_coordinator_test_fixture.cpp
index 24c3ee06006..160cb55281d 100644
--- a/src/mongo/db/repl/replication_coordinator_test_fixture.cpp
+++ b/src/mongo/db/repl/replication_coordinator_test_fixture.cpp
@@ -234,7 +234,7 @@ namespace {
while (replCoord->getMemberState().primary()) {
log() << "Waiting on network in state " << replCoord->getMemberState();
getNet()->enterNetwork();
- net->runUntil(net->now() + Seconds(10));
+ net->runUntil(net->now() + 10000);
const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
const RemoteCommandRequest& request = noi->getRequest();
log() << request.target.toString() << " processing " << request.cmdObj;
diff --git a/src/mongo/db/repl/replication_executor.cpp b/src/mongo/db/repl/replication_executor.cpp
index 29d5056e067..be8dbb0ba9f 100644
--- a/src/mongo/db/repl/replication_executor.cpp
+++ b/src/mongo/db/repl/replication_executor.cpp
@@ -293,7 +293,8 @@ namespace {
scheduledRequest.expirationDate = kNoExpirationDate;
}
else {
- scheduledRequest.expirationDate = _networkInterface->now() + scheduledRequest.timeout;
+ scheduledRequest.expirationDate =
+ _networkInterface->now() + scheduledRequest.timeout.total_milliseconds();
}
boost::lock_guard<boost::mutex> lk(_mutex);
StatusWith<CallbackHandle> handle = enqueueWork_inlock(
@@ -465,7 +466,7 @@ namespace {
return std::make_pair(WorkItem(), CallbackHandle());
}
lk.unlock();
- if (nextWakeupDate == Date_t::max()) {
+ if (nextWakeupDate == Date_t(~0ULL)) {
_networkInterface->waitForWork();
}
else {
@@ -492,7 +493,7 @@ namespace {
_readyQueue.splice(_readyQueue.end(), _sleepersQueue, _sleepersQueue.begin(), iter);
if (iter == _sleepersQueue.end()) {
// indicate no sleeper to wait for
- return Date_t::max();
+ return Date_t(~0ULL);
}
return iter->readyDate;
}
diff --git a/src/mongo/db/repl/replication_executor.h b/src/mongo/db/repl/replication_executor.h
index c01bc48e648..ea19ac411c2 100644
--- a/src/mongo/db/repl/replication_executor.h
+++ b/src/mongo/db/repl/replication_executor.h
@@ -28,6 +28,7 @@
#pragma once
+#include <boost/date_time/posix_time/posix_time_types.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/thread/condition_variable.hpp>
#include <boost/thread/mutex.hpp>
@@ -109,6 +110,7 @@ namespace repl {
class ReplicationExecutor {
MONGO_DISALLOW_COPYING(ReplicationExecutor);
public:
+ typedef boost::posix_time::milliseconds Milliseconds;
struct CallbackData;
class CallbackHandle;
class EventHandle;
diff --git a/src/mongo/db/repl/replication_executor_test.cpp b/src/mongo/db/repl/replication_executor_test.cpp
index a2894d5be15..5b1c62ea13d 100644
--- a/src/mongo/db/repl/replication_executor_test.cpp
+++ b/src/mongo/db/repl/replication_executor_test.cpp
@@ -284,22 +284,22 @@ namespace {
Status status3(ErrorCodes::InternalError, "Not mutated");
const Date_t now = net->now();
const ReplicationExecutor::CallbackHandle cb1 =
- unittest::assertGet(executor.scheduleWorkAt(now + Milliseconds(100),
+ unittest::assertGet(executor.scheduleWorkAt(Date_t(now.millis + 100),
stdx::bind(setStatus,
stdx::placeholders::_1,
&status1)));
- unittest::assertGet(executor.scheduleWorkAt(now + Milliseconds(5000),
+ unittest::assertGet(executor.scheduleWorkAt(Date_t(now.millis + 5000),
stdx::bind(setStatus,
stdx::placeholders::_1,
&status3)));
const ReplicationExecutor::CallbackHandle cb2 =
- unittest::assertGet(executor.scheduleWorkAt(now + Milliseconds(200),
+ unittest::assertGet(executor.scheduleWorkAt(Date_t(now.millis + 200),
stdx::bind(setStatusAndShutdown,
stdx::placeholders::_1,
&status2)));
const Date_t startTime = net->now();
- net->runUntil(startTime + Milliseconds(200));
- ASSERT_EQUALS(startTime + Milliseconds(200), net->now());
+ net->runUntil(startTime + 200 /*ms*/);
+ ASSERT_EQUALS(startTime + 200, net->now());
executor.wait(cb1);
executor.wait(cb2);
ASSERT_OK(status1);
@@ -457,7 +457,7 @@ namespace {
HostAndPort("lazy", 27017),
"admin",
BSON("sleep" << 1),
- Milliseconds(1));
+ ReplicationExecutor::Milliseconds(1));
ReplicationExecutor::CallbackHandle cbHandle = unittest::assertGet(
executor.scheduleRemoteCommand(
request,
@@ -469,10 +469,10 @@ namespace {
const Date_t startTime = net->now();
NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
net->scheduleResponse(noi,
- startTime + Milliseconds(2),
+ startTime + 2,
ResponseStatus(ErrorCodes::ExceededTimeLimit, "I took too long"));
- net->runUntil(startTime + Milliseconds(2));
- ASSERT_EQUALS(startTime + Milliseconds(2), net->now());
+ net->runUntil(startTime + 2);
+ ASSERT_EQUALS(startTime + 2, net->now());
executor.wait(cbHandle);
ASSERT_EQUALS(ErrorCodes::ExceededTimeLimit, status);
}
diff --git a/src/mongo/db/repl/replication_info.cpp b/src/mongo/db/repl/replication_info.cpp
index fe843605d10..a5303072fbd 100644
--- a/src/mongo/db/repl/replication_info.cpp
+++ b/src/mongo/db/repl/replication_info.cpp
@@ -126,9 +126,8 @@ namespace repl {
Query().sort( BSON( "$natural" << -1 ) ) );
bb.appendDate( "masterFirst" , first["ts"].timestampTime() );
bb.appendDate( "masterLast" , last["ts"].timestampTime() );
- const auto lag =
- (last["ts"].timestampTime() - s["syncedTo"].timestampTime());
- bb.append("lagSeconds", durationCount<Milliseconds>(lag) / 1000.0);
+ double lag = (double) (last["ts"].timestampTime() - s["syncedTo"].timestampTime());
+ bb.append( "lagSeconds" , lag / 1000 );
}
conn.done();
}
diff --git a/src/mongo/db/repl/replset_commands.cpp b/src/mongo/db/repl/replset_commands.cpp
index c91cc42ec2d..342ee8e514b 100644
--- a/src/mongo/db/repl/replset_commands.cpp
+++ b/src/mongo/db/repl/replset_commands.cpp
@@ -534,8 +534,8 @@ namespace {
status = getGlobalReplicationCoordinator()->stepDown(
txn,
force,
- Seconds(secondaryCatchUpPeriodSecs),
- Seconds(stepDownForSecs));
+ ReplicationCoordinator::Milliseconds(secondaryCatchUpPeriodSecs * 1000),
+ ReplicationCoordinator::Milliseconds(stepDownForSecs * 1000));
return appendCommandStatus(result, status);
}
} cmdReplSetStepDown;
diff --git a/src/mongo/db/repl/reporter_test.cpp b/src/mongo/db/repl/reporter_test.cpp
index 098096461c1..c060f714339 100644
--- a/src/mongo/db/repl/reporter_test.cpp
+++ b/src/mongo/db/repl/reporter_test.cpp
@@ -99,7 +99,7 @@ namespace {
void ReporterTest::scheduleNetworkResponse(const BSONObj& obj) {
NetworkInterfaceMock* net = getNet();
ASSERT_TRUE(net->hasReadyRequests());
- Milliseconds millis(0);
+ ReplicationExecutor::Milliseconds millis(0);
RemoteCommandResponse response(obj, millis);
ReplicationExecutor::ResponseStatus responseStatus(response);
net->scheduleResponse(net->getNextReadyRequest(), net->now(), responseStatus);
diff --git a/src/mongo/db/repl/rs_initialsync.cpp b/src/mongo/db/repl/rs_initialsync.cpp
index 71e80ceb6f6..22817bc7746 100644
--- a/src/mongo/db/repl/rs_initialsync.cpp
+++ b/src/mongo/db/repl/rs_initialsync.cpp
@@ -354,7 +354,7 @@ namespace {
truncateAndResetOplog(&txn, replCoord, bgsync);
OplogReader r;
- Timestamp now(duration_cast<Seconds>(Milliseconds(curTimeMillis64())), 0);
+ Timestamp now(Milliseconds(curTimeMillis64()).total_seconds(), 0);
while (r.getHost().empty()) {
// We must prime the sync source selector so that it considers all candidates regardless
diff --git a/src/mongo/db/repl/scatter_gather_test.cpp b/src/mongo/db/repl/scatter_gather_test.cpp
index 1fc6765e58e..a98c5a1f51f 100644
--- a/src/mongo/db/repl/scatter_gather_test.cpp
+++ b/src/mongo/db/repl/scatter_gather_test.cpp
@@ -186,29 +186,29 @@ namespace {
net->enterNetwork();
NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
net->scheduleResponse(noi,
- net->now() + Seconds(2),
+ net->now()+2000,
ResponseStatus(RemoteCommandResponse(
BSON("ok" << 1),
- Milliseconds(10))));
+ boost::posix_time::milliseconds(10))));
ASSERT_FALSE(ranCompletion);
noi = net->getNextReadyRequest();
net->scheduleResponse(noi,
- net->now() + Seconds(2),
+ net->now()+2000,
ResponseStatus(RemoteCommandResponse(
BSON("ok" << 1),
- Milliseconds(10))));
+ boost::posix_time::milliseconds(10))));
ASSERT_FALSE(ranCompletion);
noi = net->getNextReadyRequest();
net->scheduleResponse(noi,
- net->now() + Seconds(5),
+ net->now()+5000,
ResponseStatus(RemoteCommandResponse(
BSON("ok" << 1),
- Milliseconds(10))));
+ boost::posix_time::milliseconds(10))));
ASSERT_FALSE(ranCompletion);
- net->runUntil(net->now() + Seconds(2));
+ net->runUntil(net->now()+2000);
ASSERT_TRUE(ranCompletion);
delete sga;
@@ -290,31 +290,32 @@ namespace {
net->enterNetwork();
NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
net->scheduleResponse(noi,
- net->now() + Seconds(2),
+ net->now()+2000,
ResponseStatus(RemoteCommandResponse(
BSON("ok" << 1),
- Milliseconds(10))));
+ boost::posix_time::milliseconds(10))));
ASSERT_FALSE(ranCompletion);
noi = net->getNextReadyRequest();
net->scheduleResponse(noi,
- net->now() + Seconds(2),
+ net->now()+2000,
ResponseStatus(RemoteCommandResponse(
BSON("ok" << 1),
- Milliseconds(10))));
+ boost::posix_time::milliseconds(10))));
ASSERT_FALSE(ranCompletion);
noi = net->getNextReadyRequest();
net->scheduleResponse(noi,
- net->now() + Seconds(5),
+ net->now()+5000,
ResponseStatus(RemoteCommandResponse(
BSON("ok" << 1),
- Milliseconds(10))));
+ boost::posix_time::milliseconds(10))));
ASSERT_FALSE(ranCompletion);
- net->runUntil(net->now() + Seconds(2));
+ net->runUntil(net->now()+2000);
ASSERT_TRUE(ranCompletion);
+
net->runReadyNetworkOperations();
// the third resposne should not be processed, so the count should not increment
ASSERT_EQUALS(2, sga.getResponseCount());
@@ -399,7 +400,7 @@ namespace {
net->now(),
ResponseStatus(RemoteCommandResponse(
BSON("ok" << 1),
- Milliseconds(10))));
+ boost::posix_time::milliseconds(10))));
net->runReadyNetworkOperations();
noi = net->getNextReadyRequest();
@@ -411,7 +412,7 @@ namespace {
net->now(),
ResponseStatus(RemoteCommandResponse(
BSON("ok" << 1),
- Milliseconds(10))));
+ boost::posix_time::milliseconds(10))));
net->runReadyNetworkOperations();
net->exitNetwork();
diff --git a/src/mongo/db/repl/sync_source_feedback.cpp b/src/mongo/db/repl/sync_source_feedback.cpp
index 6773758ef7c..674555f7c53 100644
--- a/src/mongo/db/repl/sync_source_feedback.cpp
+++ b/src/mongo/db/repl/sync_source_feedback.cpp
@@ -127,7 +127,8 @@ namespace repl {
catch (const DBException& e) {
log() << "SyncSourceFeedback error sending update: " << e.what() << endl;
// blacklist sync target for .5 seconds and find a new one
- replCoord->blacklistSyncSource(_syncTarget, Date_t::now() + Milliseconds(500));
+ replCoord->blacklistSyncSource(_syncTarget,
+ Date_t(curTimeMillis64() + 500));
BackgroundSync::get()->clearSyncTarget();
_resetConnection();
return e.toStatus();
@@ -140,7 +141,8 @@ namespace repl {
// to the syncsource having a newer config
if (status != ErrorCodes::InvalidReplicaSetConfig || res["cfgver"].eoo() ||
res["cfgver"].numberLong() < replCoord->getConfig().getConfigVersion()) {
- replCoord->blacklistSyncSource(_syncTarget, Date_t::now() + Milliseconds(500));
+ replCoord->blacklistSyncSource(_syncTarget,
+ Date_t(curTimeMillis64() + 500));
BackgroundSync::get()->clearSyncTarget();
_resetConnection();
}
diff --git a/src/mongo/db/repl/sync_tail.cpp b/src/mongo/db/repl/sync_tail.cpp
index 4b37ea14c89..f3d20488f69 100644
--- a/src/mongo/db/repl/sync_tail.cpp
+++ b/src/mongo/db/repl/sync_tail.cpp
@@ -500,7 +500,7 @@ namespace {
tryToGoLiveAsASecondary(&txn, replCoord);
}
- const int slaveDelaySecs = replCoord->getSlaveDelaySecs().count();
+ const int slaveDelaySecs = replCoord->getSlaveDelaySecs().total_seconds();
if (!ops.empty() && slaveDelaySecs > 0) {
const BSONObj& lastOp = ops.getDeque().back();
const unsigned int opTimestampSecs = lastOp["ts"].timestamp().getSecs();
@@ -620,7 +620,7 @@ namespace {
void SyncTail::handleSlaveDelay(const BSONObj& lastOp) {
ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator();
- int slaveDelaySecs = replCoord->getSlaveDelaySecs().count();
+ int slaveDelaySecs = replCoord->getSlaveDelaySecs().total_seconds();
// ignore slaveDelay if the box is still initializing. once
// it becomes secondary we can worry about it.
@@ -645,7 +645,7 @@ namespace {
sleepsecs(6);
// Handle reconfigs that changed the slave delay
- if (replCoord->getSlaveDelaySecs().count() != slaveDelaySecs)
+ if (replCoord->getSlaveDelaySecs().total_seconds() != slaveDelaySecs)
break;
}
}
diff --git a/src/mongo/db/repl/topology_coordinator_impl.cpp b/src/mongo/db/repl/topology_coordinator_impl.cpp
index 30df7dca0f4..784f682e1ab 100644
--- a/src/mongo/db/repl/topology_coordinator_impl.cpp
+++ b/src/mongo/db/repl/topology_coordinator_impl.cpp
@@ -68,7 +68,7 @@ namespace {
// Interval between the time the last heartbeat from a node was received successfully, or
// the time when we gave up retrying, and when the next heartbeat should be sent to a target.
- const auto kHeartbeatInterval = Seconds{2};
+ const Milliseconds kHeartbeatInterval(Seconds(2).total_milliseconds());
// Maximum number of retries for a failed heartbeat.
const int kMaxHeartbeatRetries = 2;
@@ -103,6 +103,7 @@ namespace {
PingStats::PingStats() :
count(0),
value(std::numeric_limits<unsigned int>::max()),
+ _lastHeartbeatStartDate(0),
_numFailuresSinceLastStart(std::numeric_limits<int>::max()) {
}
@@ -129,6 +130,8 @@ namespace {
_maxSyncSourceLagSecs(maxSyncSourceLagSecs),
_selfIndex(-1),
_stepDownPending(false),
+ _stepDownUntil(0),
+ _electionSleepUntil(0),
_maintenanceModeCalls(0),
_followerMode(MemberState::RS_STARTUP2)
{
@@ -217,17 +220,17 @@ namespace {
}
else {
// choose a time that will exclude no candidates, since we don't see a primary
- primaryOpTime = Timestamp(_maxSyncSourceLagSecs, 0);
+ primaryOpTime = Timestamp(_maxSyncSourceLagSecs.total_seconds(), 0);
}
if (primaryOpTime.getSecs() <
- static_cast<unsigned int>(_maxSyncSourceLagSecs.count())) {
+ static_cast<unsigned int>(_maxSyncSourceLagSecs.total_seconds())) {
// erh - I think this means there was just a new election
// and we don't yet know the new primary's optime
- primaryOpTime = Timestamp(_maxSyncSourceLagSecs, 0);
+ primaryOpTime = Timestamp(_maxSyncSourceLagSecs.total_seconds(), 0);
}
- Timestamp oldestSyncOpTime(primaryOpTime.getSecs() - _maxSyncSourceLagSecs.count(), 0);
+ Timestamp oldestSyncOpTime(primaryOpTime.getSecs() - _maxSyncSourceLagSecs.total_seconds(), 0);
int closestIndex = -1;
@@ -478,7 +481,7 @@ namespace {
else if (args.opTime < _latestKnownOpTime(lastOpApplied)) {
weAreFresher = true;
}
- response->appendDate("opTime", Date_t::fromMillisSinceEpoch(lastOpApplied.asLL()));
+ response->appendDate("opTime", lastOpApplied.asULL());
response->append("fresher", weAreFresher);
std::string errmsg;
@@ -619,12 +622,12 @@ namespace {
<< highestPriority->getHostAndPort().toString();
vote = -10000;
}
- else if (_voteLease.when + VoteLease::leaseTime >= now &&
+ else if (_voteLease.when.millis + VoteLease::leaseTime.total_milliseconds() >= now.millis &&
_voteLease.whoId != args.whoid) {
log() << "replSet voting no for "
<< hopeful->getHostAndPort().toString()
<< "; voted for " << _voteLease.whoHostAndPort.toString() << ' '
- << durationCount<Seconds>(now - _voteLease.when) << " secs ago";
+ << (now.millis - _voteLease.when.millis) / 1000 << " secs ago";
}
else {
_voteLease.when = now;
@@ -701,7 +704,7 @@ namespace {
// Heartbeat status message
response->setHbMsg(_getHbmsg(now));
- response->setTime(duration_cast<Seconds>(now - Date_t{}));
+ response->setTime(Seconds(Milliseconds(now.asInt64()).total_seconds()));
response->setOpTime(lastOpApplied);
if (!_syncSource.empty()) {
@@ -760,7 +763,7 @@ namespace {
const HostAndPort& target) {
PingStats& hbStats = _pings[target];
- Milliseconds alreadyElapsed = now - hbStats.getLastHeartbeatStartDate();
+ Milliseconds alreadyElapsed(now.asInt64() - hbStats.getLastHeartbeatStartDate().asInt64());
if (!_rsConfig.isInitialized() ||
(hbStats.getNumFailuresSinceLastStart() > kMaxHeartbeatRetries) ||
(alreadyElapsed >= _rsConfig.getHeartbeatTimeoutPeriodMillis())) {
@@ -790,8 +793,10 @@ namespace {
const Milliseconds timeoutPeriod(
_rsConfig.isInitialized() ?
_rsConfig.getHeartbeatTimeoutPeriodMillis() :
- ReplicaSetConfig::kDefaultHeartbeatTimeoutPeriod);
- const Milliseconds timeout = timeoutPeriod - alreadyElapsed;
+ Milliseconds(
+ ReplicaSetConfig::kDefaultHeartbeatTimeoutPeriod.total_milliseconds()));
+ const Milliseconds timeout(
+ timeoutPeriod.total_milliseconds() - alreadyElapsed.total_milliseconds());
return std::make_pair(hbArgs, timeout);
}
@@ -804,12 +809,12 @@ namespace {
const MemberState originalState = getMemberState();
PingStats& hbStats = _pings[target];
- invariant(hbStats.getLastHeartbeatStartDate() != Date_t());
+ invariant(hbStats.getLastHeartbeatStartDate() != Date_t(0));
if (!hbResponse.isOK()) {
hbStats.miss();
}
else {
- hbStats.hit(networkRoundTripTime.count());
+ hbStats.hit(networkRoundTripTime.total_milliseconds());
// Log diagnostics.
if (hbResponse.getValue().isStateDisagreement()) {
LOG(1) << target <<
@@ -821,7 +826,7 @@ namespace {
(hbResponse.getStatus().code() == ErrorCodes::Unauthorized) ||
(hbResponse.getStatus().code() == ErrorCodes::AuthenticationFailed);
- const Milliseconds alreadyElapsed = now - hbStats.getLastHeartbeatStartDate();
+ Milliseconds alreadyElapsed(now.asInt64() - hbStats.getLastHeartbeatStartDate().asInt64());
Date_t nextHeartbeatStartDate;
if (_rsConfig.isInitialized() &&
(hbStats.getNumFailuresSinceLastStart() <= kMaxHeartbeatRetries) &&
@@ -831,16 +836,16 @@ namespace {
LOG(1) << "Bad heartbeat response from " << target <<
"; trying again; Retries left: " <<
(kMaxHeartbeatRetries - hbStats.getNumFailuresSinceLastStart()) <<
- "; " << alreadyElapsed.count() << "ms have already elapsed";
+ "; " << alreadyElapsed.total_milliseconds() << "ms have already elapsed";
}
if (isUnauthorized) {
- nextHeartbeatStartDate = now + kHeartbeatInterval;
+ nextHeartbeatStartDate = now + kHeartbeatInterval.total_milliseconds();
} else {
nextHeartbeatStartDate = now;
}
}
else {
- nextHeartbeatStartDate = now + kHeartbeatInterval;
+ nextHeartbeatStartDate = now + kHeartbeatInterval.total_milliseconds();
}
if (hbResponse.isOK() && hbResponse.getValue().hasConfig()) {
@@ -987,7 +992,9 @@ namespace {
<< highestPriorityMember.getPriority() << " and is only "
<< (latestOpTime.getSecs() - highestPriorityMemberOptime.getSecs())
<< " seconds behind me";
- const Date_t until = now + VoteLease::leaseTime + kHeartbeatInterval;
+ const Date_t until = now +
+ VoteLease::leaseTime.total_milliseconds() +
+ kHeartbeatInterval.total_milliseconds();
if (_electionSleepUntil < until) {
_electionSleepUntil = until;
}
@@ -1325,8 +1332,7 @@ namespace {
response->append("stateStr", myState.toString());
response->append("uptime", selfUptime);
response->append("optime", lastOpApplied);
- response->appendDate("optimeDate",
- Date_t::fromDurationSinceEpoch(Seconds(lastOpApplied.getSecs())));
+ response->appendDate("optimeDate", Date_t(lastOpApplied.getSecs() * 1000ULL));
if (_maintenanceModeCalls) {
response->append("maintenanceMode", _maintenanceModeCalls);
}
@@ -1353,8 +1359,7 @@ namespace {
bb.append("uptime", selfUptime);
if (!_selfConfig().isArbiter()) {
bb.append("optime", lastOpApplied);
- bb.appendDate("optimeDate",
- Date_t::fromDurationSinceEpoch(Seconds(lastOpApplied.getSecs())));
+ bb.appendDate("optimeDate", Date_t(lastOpApplied.getSecs() * 1000ULL));
}
if (!_syncSource.empty() && !_iAmPrimary()) {
@@ -1371,8 +1376,7 @@ namespace {
if (myState.primary()) {
bb.append("electionTime", _electionTime);
- bb.appendDate("electionDate",
- Date_t::fromDurationSinceEpoch(Seconds(_electionTime.getSecs())));
+ bb.appendDate("electionDate", Date_t(_electionTime.getSecs() * 1000ULL));
}
bb.appendIntOrLL("configVersion", _rsConfig.getConfigVersion());
bb.append("self", true);
@@ -1397,16 +1401,12 @@ namespace {
bb.append("stateStr", it->getState().toString());
}
- const unsigned int uptime = static_cast<unsigned int>(
- (it->getUpSince() != Date_t()?
- durationCount<Seconds>(now - it->getUpSince()) :
- 0));
+ const unsigned int uptime = static_cast<unsigned int> ((it->getUpSince() ?
+ (now - it->getUpSince()) / 1000 /* convert millis to secs */ : 0));
bb.append("uptime", uptime);
if (!itConfig.isArbiter()) {
bb.append("optime", it->getOpTime());
- bb.appendDate(
- "optimeDate",
- Date_t::fromDurationSinceEpoch(Seconds(it->getOpTime().getSecs())));
+ bb.appendDate("optimeDate", Date_t(it->getOpTime().getSecs() * 1000ULL));
}
bb.appendDate("lastHeartbeat", it->getLastHeartbeat());
bb.appendDate("lastHeartbeatRecv", it->getLastHeartbeatRecv());
@@ -1428,8 +1428,7 @@ namespace {
if (state == MemberState::RS_PRIMARY) {
bb.append("electionTime", it->getElectionTime());
bb.appendDate("electionDate",
- Date_t::fromDurationSinceEpoch(
- Seconds(it->getElectionTime().getSecs())));
+ Date_t(it->getElectionTime().getSecs() * 1000ULL));
}
bb.appendIntOrLL("configVersion", it->getConfigVersion());
membersOut.push_back(bb.obj());
@@ -1469,7 +1468,7 @@ namespace {
{
for (ReplicaSetConfig::MemberIterator it = _rsConfig.membersBegin();
it != _rsConfig.membersEnd(); ++it) {
- if (it->isHidden() || it->getSlaveDelay() > Seconds{0}) {
+ if (it->isHidden() || it->getSlaveDelay().total_seconds() > 0) {
continue;
}
@@ -1497,7 +1496,7 @@ namespace {
else if (selfConfig.getPriority() == 0) {
response->setIsPassive(true);
}
- if (selfConfig.getSlaveDelay().count()) {
+ if (selfConfig.getSlaveDelay().total_seconds()) {
response->setSlaveDelay(selfConfig.getSlaveDelay());
}
if (selfConfig.isHidden()) {
@@ -1548,7 +1547,7 @@ namespace {
response->append("warning", "you really want to freeze for only 1 second?");
if (!_iAmPrimary()) {
- _stepDownUntil = std::max(_stepDownUntil, now + Seconds(secs));
+ _stepDownUntil = std::max(_stepDownUntil, Date_t(now + (secs * 1000)));
log() << "'freezing' for " << secs << " seconds";
}
else {
@@ -1675,7 +1674,7 @@ namespace {
}
std::string TopologyCoordinatorImpl::_getHbmsg(Date_t now) const {
// ignore messages over 2 minutes old
- if ((now - _hbmsgTime) > Seconds{120}) {
+ if ((now - _hbmsgTime) > 120) {
return "";
}
return _hbmsg;
@@ -1743,7 +1742,7 @@ namespace {
}
if (_voteLease.whoId != -1 &&
_voteLease.whoId !=_rsConfig.getMemberAt(_selfIndex).getId() &&
- _voteLease.when + VoteLease::leaseTime >= now) {
+ _voteLease.when.millis + VoteLease::leaseTime.total_milliseconds() >= now.millis) {
result |= VotedTooRecently;
}
@@ -1880,11 +1879,11 @@ namespace {
return false;
}
int selfId = _selfConfig().getId();
- if ((_voteLease.when + VoteLease::leaseTime >= now)
+ if ((_voteLease.when + VoteLease::leaseTime.total_milliseconds() >= now)
&& (_voteLease.whoId != selfId)) {
log() << "not voting yea for " << selfId <<
" voted for " << _voteLease.whoHostAndPort.toString() << ' ' <<
- durationCount<Seconds>(now - _voteLease.when) << " secs ago";
+ (now - _voteLease.when) / 1000 << " secs ago";
return false;
}
_voteLease.when = now;
@@ -1937,7 +1936,7 @@ namespace {
// Clear voteLease time, if we voted for ourselves in this election.
// This will allow us to vote for others.
if (_voteLease.whoId == _selfConfig().getId()) {
- _voteLease.when = Date_t();
+ _voteLease.when = 0;
}
}
@@ -2062,7 +2061,7 @@ namespace {
return false;
}
unsigned int currentSecs = currentOpTime.getSecs();
- unsigned int goalSecs = currentSecs + _maxSyncSourceLagSecs.count();
+ unsigned int goalSecs = currentSecs + _maxSyncSourceLagSecs.total_seconds();
for (std::vector<MemberHeartbeatData>::const_iterator it = _hbdata.begin();
it != _hbdata.end();
@@ -2076,7 +2075,7 @@ namespace {
goalSecs < it->getOpTime().getSecs()) {
log() << "changing sync target because current sync target's most recent OpTime is "
<< currentOpTime.toStringLong() << " which is more than "
- << _maxSyncSourceLagSecs.count() << " seconds behind member "
+ << _maxSyncSourceLagSecs.total_seconds() << " seconds behind member "
<< candidateConfig.getHostAndPort().toString()
<< " whose most recent OpTime is " << it->getOpTime().toStringLong();
invariant(itIndex != _selfIndex);
diff --git a/src/mongo/db/repl/topology_coordinator_impl.h b/src/mongo/db/repl/topology_coordinator_impl.h
index 0bb373cad31..39d186c7308 100644
--- a/src/mongo/db/repl/topology_coordinator_impl.h
+++ b/src/mongo/db/repl/topology_coordinator_impl.h
@@ -403,8 +403,9 @@ namespace repl {
static const Seconds leaseTime;
+ VoteLease() : when(0), whoId(-1) { }
Date_t when;
- int whoId = -1;
+ int whoId;
HostAndPort whoHostAndPort;
} _voteLease;
diff --git a/src/mongo/db/repl/topology_coordinator_impl_test.cpp b/src/mongo/db/repl/topology_coordinator_impl_test.cpp
index 50f462900d7..1e12a7ac2cf 100644
--- a/src/mongo/db/repl/topology_coordinator_impl_test.cpp
+++ b/src/mongo/db/repl/topology_coordinator_impl_test.cpp
@@ -115,7 +115,7 @@ namespace {
else {
invariant(now > _now);
getTopoCoord().updateConfig(config, selfIndex, now, lastOp);
- _now = now + Milliseconds(1);
+ _now = now + 1;
}
}
@@ -772,7 +772,7 @@ namespace {
Date_t startupTime(100);
Date_t heartbeatTime = 5000;
Seconds uptimeSecs(10);
- Date_t curTime = heartbeatTime + uptimeSecs;
+ Date_t curTime = heartbeatTime + uptimeSecs.total_milliseconds();
Timestamp electionTime(1, 2);
Timestamp oplogProgress(3, 4);
std::string setName = "mySet";
@@ -784,13 +784,13 @@ namespace {
BSON("_id" << 2 << "host" << "test2:1234") <<
BSON("_id" << 3 << "host" << "test3:1234"))),
3,
- startupTime + Milliseconds(1));
+ startupTime + 1);
// Now that the replica set is setup, put the members into the states we want them in.
HostAndPort member = HostAndPort("test0:1234");
StatusWith<ReplSetHeartbeatResponse> hbResponse =
StatusWith<ReplSetHeartbeatResponse>(Status(ErrorCodes::HostUnreachable, ""));
- getTopoCoord().prepareHeartbeatRequest(startupTime + Milliseconds(2), setName, member);
+ getTopoCoord().prepareHeartbeatRequest(startupTime + 2, setName, member);
getTopoCoord().processHeartbeatResponse(heartbeatTime,
Milliseconds(0),
member,
@@ -805,7 +805,7 @@ namespace {
hb.setHbMsg("READY");
hb.setOpTime(oplogProgress);
hbResponse = StatusWith<ReplSetHeartbeatResponse>(hb);
- getTopoCoord().prepareHeartbeatRequest(startupTime + Milliseconds(2),
+ getTopoCoord().prepareHeartbeatRequest(startupTime + 2,
setName,
member);
getTopoCoord().processHeartbeatResponse(heartbeatTime,
@@ -820,7 +820,7 @@ namespace {
Status resultStatus(ErrorCodes::InternalError, "prepareStatusResponse didn't set result");
getTopoCoord().prepareStatusResponse(cbData(),
curTime,
- uptimeSecs.count(),
+ uptimeSecs.total_seconds(),
oplogProgress,
&statusBuilder,
&resultStatus);
@@ -845,8 +845,8 @@ namespace {
ASSERT_EQUALS(0, member0Status["uptime"].numberInt());
ASSERT_EQUALS(Timestamp(), Timestamp(member0Status["optime"].timestampValue()));
ASSERT_TRUE(member0Status.hasField("optimeDate"));
- ASSERT_EQUALS(Date_t::fromMillisSinceEpoch(Timestamp().getSecs() * 1000ULL),
- member0Status["optimeDate"].Date());
+ ASSERT_EQUALS(Date_t(Timestamp().getSecs() * 1000ULL),
+ member0Status["optimeDate"].Date().millis);
ASSERT_EQUALS(heartbeatTime, member0Status["lastHeartbeat"].date());
ASSERT_EQUALS(Date_t(), member0Status["lastHeartbeatRecv"].date());
@@ -857,11 +857,11 @@ namespace {
ASSERT_EQUALS(MemberState::RS_SECONDARY, member1Status["state"].numberInt());
ASSERT_EQUALS(MemberState(MemberState::RS_SECONDARY).toString(),
member1Status["stateStr"].String());
- ASSERT_EQUALS(uptimeSecs.count(), member1Status["uptime"].numberInt());
+ ASSERT_EQUALS(uptimeSecs.total_seconds(), member1Status["uptime"].numberInt());
ASSERT_EQUALS(oplogProgress, Timestamp(member1Status["optime"].timestampValue()));
ASSERT_TRUE(member1Status.hasField("optimeDate"));
- ASSERT_EQUALS(Date_t::fromMillisSinceEpoch(oplogProgress.getSecs() * 1000ULL),
- member1Status["optimeDate"].Date());
+ ASSERT_EQUALS(Date_t(oplogProgress.getSecs() * 1000ULL),
+ member1Status["optimeDate"].Date().millis);
ASSERT_EQUALS(heartbeatTime, member1Status["lastHeartbeat"].date());
ASSERT_EQUALS(Date_t(), member1Status["lastHeartbeatRecv"].date());
ASSERT_EQUALS("READY", member1Status["lastHeartbeatMessage"].str());
@@ -889,11 +889,11 @@ namespace {
ASSERT_EQUALS(MemberState::RS_PRIMARY, selfStatus["state"].numberInt());
ASSERT_EQUALS(MemberState(MemberState::RS_PRIMARY).toString(),
selfStatus["stateStr"].str());
- ASSERT_EQUALS(uptimeSecs.count(), selfStatus["uptime"].numberInt());
+ ASSERT_EQUALS(uptimeSecs.total_seconds(), selfStatus["uptime"].numberInt());
ASSERT_EQUALS(oplogProgress, Timestamp(selfStatus["optime"].timestampValue()));
ASSERT_TRUE(selfStatus.hasField("optimeDate"));
- ASSERT_EQUALS(Date_t::fromMillisSinceEpoch(oplogProgress.getSecs() * 1000ULL),
- selfStatus["optimeDate"].Date());
+ ASSERT_EQUALS(Date_t(oplogProgress.getSecs() * 1000ULL),
+ selfStatus["optimeDate"].Date().millis);
// TODO(spencer): Test electionTime and pingMs are set properly
}
@@ -904,7 +904,7 @@ namespace {
Date_t startupTime(100);
Date_t heartbeatTime = 5000;
Seconds uptimeSecs(10);
- Date_t curTime = heartbeatTime + uptimeSecs;
+ Date_t curTime = heartbeatTime + uptimeSecs.total_milliseconds();
Timestamp oplogProgress(3, 4);
std::string setName = "mySet";
@@ -914,13 +914,13 @@ namespace {
BSON("_id" << 1 << "host" << "test1:1234") <<
BSON("_id" << 2 << "host" << "test2:1234"))),
-1, // This one is not part of the replica set.
- startupTime + Milliseconds(1));
+ startupTime + 1);
BSONObjBuilder statusBuilder;
Status resultStatus(ErrorCodes::InternalError, "prepareStatusResponse didn't set result");
getTopoCoord().prepareStatusResponse(cbData(),
curTime,
- uptimeSecs.count(),
+ uptimeSecs.total_seconds(),
oplogProgress,
&statusBuilder,
&resultStatus);
@@ -1211,12 +1211,12 @@ namespace {
"rs0",
_target);
// 5 seconds to successfully complete the heartbeat before the timeout expires.
- ASSERT_EQUALS(5000, request.second.count());
+ ASSERT_EQUALS(5000, request.second.total_milliseconds());
// Initial heartbeat request fails at t + 4000ms
HeartbeatResponseAction action =
getTopoCoord().processHeartbeatResponse(
- _firstRequestDate + Seconds(4), // 4 seconds elapsed, retry allowed.
+ _firstRequestDate + 4000, // 4 seconds elapsed, retry allowed.
Milliseconds(3990), // Spent 3.99 of the 4 seconds in the network.
_target,
StatusWith<ReplSetHeartbeatResponse>(ErrorCodes::ExceededTimeLimit,
@@ -1226,17 +1226,16 @@ namespace {
ASSERT_EQUALS(HeartbeatResponseAction::NoAction, action.getAction());
ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
// Because the heartbeat failed without timing out, we expect to retry immediately.
- ASSERT_EQUALS(Date_t(_firstRequestDate + Seconds(4)),
- action.getNextHeartbeatStartDate());
+ ASSERT_EQUALS(Date_t(_firstRequestDate + 4000), action.getNextHeartbeatStartDate());
// First heartbeat retry prepared, at t + 4000ms.
request =
getTopoCoord().prepareHeartbeatRequest(
- _firstRequestDate + Milliseconds(4000),
+ _firstRequestDate + 4000,
"rs0",
_target);
// One second left to complete the heartbeat.
- ASSERT_EQUALS(1000, request.second.count());
+ ASSERT_EQUALS(1000, request.second.total_milliseconds());
}
Date_t firstRequestDate() {
@@ -1260,8 +1259,7 @@ namespace {
// First retry fails at t + 4500ms
HeartbeatResponseAction action =
getTopoCoord().processHeartbeatResponse(
- firstRequestDate() + Milliseconds(4500), // 4.5 of the 5 seconds elapsed;
- // could retry.
+ firstRequestDate() + 4500, // 4.5 of the 5 seconds elapsed; could retry.
Milliseconds(400), // Spent 0.4 of the 0.5 seconds in the network.
target(),
StatusWith<ReplSetHeartbeatResponse>(ErrorCodes::NodeNotFound, "Bad DNS?"),
@@ -1269,17 +1267,16 @@ namespace {
ASSERT_EQUALS(HeartbeatResponseAction::NoAction, action.getAction());
ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
// Because the first retry failed without timing out, we expect to retry immediately.
- ASSERT_EQUALS(firstRequestDate() + Milliseconds(4500),
- action.getNextHeartbeatStartDate());
+ ASSERT_EQUALS(Date_t(firstRequestDate() + 4500), action.getNextHeartbeatStartDate());
// Second retry prepared at t + 4500ms.
std::pair<ReplSetHeartbeatArgs, Milliseconds> request =
getTopoCoord().prepareHeartbeatRequest(
- firstRequestDate() + Milliseconds(4500),
+ firstRequestDate() + 4500,
"rs0",
target());
// 500ms left to complete the heartbeat.
- ASSERT_EQUALS(500, request.second.count());
+ ASSERT_EQUALS(500, request.second.total_milliseconds());
}
};
@@ -1439,14 +1436,14 @@ namespace {
reconfigResponse.setConfig(newConfig);
HeartbeatResponseAction action =
getTopoCoord().processHeartbeatResponse(
- firstRequestDate() + Milliseconds(4500), // Time is left.
+ firstRequestDate() + 4500, // Time is left.
Milliseconds(400), // Spent 0.4 of the 0.5 second in the network.
target(),
StatusWith<ReplSetHeartbeatResponse>(reconfigResponse),
Timestamp(0, 0)); // We've never applied anything.
ASSERT_EQUALS(HeartbeatResponseAction::Reconfig, action.getAction());
ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
- ASSERT_EQUALS(firstRequestDate() + Milliseconds(6500), action.getNextHeartbeatStartDate());
+ ASSERT_EQUALS(Date_t(firstRequestDate() + 6500), action.getNextHeartbeatStartDate());
}
TEST_F(HeartbeatResponseTestOneRetry, DecideToStepDownRemotePrimary) {
@@ -1467,14 +1464,14 @@ namespace {
electedMoreRecentlyResponse.setVersion(5);
HeartbeatResponseAction action =
getTopoCoord().processHeartbeatResponse(
- firstRequestDate() + Milliseconds(4500), // Time is left.
+ firstRequestDate() + 4500, // Time is left.
Milliseconds(400), // Spent 0.4 of the 0.5 second in the network.
target(),
StatusWith<ReplSetHeartbeatResponse>(electedMoreRecentlyResponse),
Timestamp(0,0)); // We've never applied anything.
ASSERT_EQUALS(HeartbeatResponseAction::StepDownRemotePrimary, action.getAction());
ASSERT_EQUALS(1, action.getPrimaryConfigIndex());
- ASSERT_EQUALS(firstRequestDate() + Milliseconds(6500), action.getNextHeartbeatStartDate());
+ ASSERT_EQUALS(Date_t(firstRequestDate() + 6500), action.getNextHeartbeatStartDate());
}
TEST_F(HeartbeatResponseTestOneRetry, DecideToStepDownSelf) {
@@ -1499,14 +1496,14 @@ namespace {
electedMoreRecentlyResponse.setVersion(5);
action =
getTopoCoord().processHeartbeatResponse(
- firstRequestDate() + Milliseconds(4500), // Time is left.
+ firstRequestDate() + 4500, // Time is left.
Milliseconds(400), // Spent 0.4 of the 0.5 second in the network.
target(),
StatusWith<ReplSetHeartbeatResponse>(electedMoreRecentlyResponse),
Timestamp(0, 0)); // We've never applied anything.
ASSERT_EQUALS(HeartbeatResponseAction::StepDownSelf, action.getAction());
ASSERT_EQUALS(0, action.getPrimaryConfigIndex());
- ASSERT_EQUALS(firstRequestDate() + Milliseconds(6500), action.getNextHeartbeatStartDate());
+ ASSERT_EQUALS(Date_t(firstRequestDate() + 6500), action.getNextHeartbeatStartDate());
// Doesn't actually do the stepdown until stepDownIfPending is called
ASSERT_TRUE(TopologyCoordinator::Role::leader == getTopoCoord().getRole());
ASSERT_EQUALS(0, getCurrentPrimaryIndex());
@@ -1542,14 +1539,14 @@ namespace {
startElectionResponse.setVersion(5);
action =
getTopoCoord().processHeartbeatResponse(
- firstRequestDate() + Milliseconds(4500), // Time is left.
+ firstRequestDate() + 4500, // Time is left.
Milliseconds(400), // Spent 0.4 of the 0.5 second in the network.
target(),
StatusWith<ReplSetHeartbeatResponse>(startElectionResponse),
election);
ASSERT_EQUALS(HeartbeatResponseAction::StartElection, action.getAction());
ASSERT_TRUE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
- ASSERT_EQUALS(firstRequestDate() + Milliseconds(6500), action.getNextHeartbeatStartDate());
+ ASSERT_EQUALS(Date_t(firstRequestDate() + 6500), action.getNextHeartbeatStartDate());
}
TEST_F(HeartbeatResponseTestTwoRetries, HeartbeatRetriesAtMostTwice) {
@@ -1565,8 +1562,7 @@ namespace {
// Second retry fails at t + 4800ms
HeartbeatResponseAction action =
getTopoCoord().processHeartbeatResponse(
- firstRequestDate() + Milliseconds(4800), // 4.8 of the 5 seconds elapsed;
- // could still retry.
+ firstRequestDate() + 4800, // 4.8 of the 5 seconds elapsed; could still retry.
Milliseconds(100), // Spent 0.1 of the 0.3 seconds in the network.
target(),
StatusWith<ReplSetHeartbeatResponse>(ErrorCodes::NodeNotFound, "Bad DNS?"),
@@ -1575,7 +1571,7 @@ namespace {
ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
// Because this is the second retry, rather than retry again, we expect to wait for the
// heartbeat interval of 2 seconds to elapse.
- ASSERT_EQUALS(firstRequestDate() + Milliseconds(6800), action.getNextHeartbeatStartDate());
+ ASSERT_EQUALS(Date_t(firstRequestDate() + 6800), action.getNextHeartbeatStartDate());
}
TEST_F(HeartbeatResponseTestTwoRetries, DecideToStepDownRemotePrimary) {
@@ -1596,14 +1592,14 @@ namespace {
electedMoreRecentlyResponse.setVersion(5);
HeartbeatResponseAction action =
getTopoCoord().processHeartbeatResponse(
- firstRequestDate() + Milliseconds(5000), // Time is left.
+ firstRequestDate() + 5000, // Time is left.
Milliseconds(400), // Spent 0.4 of the 0.5 second in the network.
target(),
StatusWith<ReplSetHeartbeatResponse>(electedMoreRecentlyResponse),
Timestamp(0,0)); // We've never applied anything.
ASSERT_EQUALS(HeartbeatResponseAction::StepDownRemotePrimary, action.getAction());
ASSERT_EQUALS(1, action.getPrimaryConfigIndex());
- ASSERT_EQUALS(firstRequestDate() + Milliseconds(7000), action.getNextHeartbeatStartDate());
+ ASSERT_EQUALS(Date_t(firstRequestDate() + 7000), action.getNextHeartbeatStartDate());
}
TEST_F(HeartbeatResponseTestTwoRetries, DecideToStepDownSelf) {
@@ -1628,14 +1624,14 @@ namespace {
electedMoreRecentlyResponse.setVersion(5);
action =
getTopoCoord().processHeartbeatResponse(
- firstRequestDate() + Milliseconds(5000), // Time is left.
+ firstRequestDate() + 5000, // Time is left.
Milliseconds(400), // Spent 0.4 of the 0.5 second in the network.
target(),
StatusWith<ReplSetHeartbeatResponse>(electedMoreRecentlyResponse),
Timestamp(0, 0)); // We've never applied anything.
ASSERT_EQUALS(HeartbeatResponseAction::StepDownSelf, action.getAction());
ASSERT_EQUALS(0, action.getPrimaryConfigIndex());
- ASSERT_EQUALS(firstRequestDate() + Milliseconds(7000), action.getNextHeartbeatStartDate());
+ ASSERT_EQUALS(Date_t(firstRequestDate() + 7000), action.getNextHeartbeatStartDate());
// Doesn't actually do the stepdown until stepDownIfPending is called
ASSERT_TRUE(TopologyCoordinator::Role::leader == getTopoCoord().getRole());
ASSERT_EQUALS(0, getCurrentPrimaryIndex());
@@ -1671,14 +1667,14 @@ namespace {
startElectionResponse.setVersion(5);
action =
getTopoCoord().processHeartbeatResponse(
- firstRequestDate() + Milliseconds(5000), // Time is left.
+ firstRequestDate() + 5000, // Time is left.
Milliseconds(400), // Spent 0.4 of the 0.5 second in the network.
target(),
StatusWith<ReplSetHeartbeatResponse>(startElectionResponse),
election);
ASSERT_EQUALS(HeartbeatResponseAction::StartElection, action.getAction());
ASSERT_TRUE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
- ASSERT_EQUALS(firstRequestDate() + Milliseconds(7000), action.getNextHeartbeatStartDate());
+ ASSERT_EQUALS(Date_t(firstRequestDate() + 7000), action.getNextHeartbeatStartDate());
}
TEST_F(HeartbeatResponseTest, HeartbeatTimeoutSuppressesFirstRetry) {
@@ -1694,13 +1690,12 @@ namespace {
"rs0",
target);
// 5 seconds to successfully complete the heartbeat before the timeout expires.
- ASSERT_EQUALS(5000, request.second.count());
+ ASSERT_EQUALS(5000, request.second.total_milliseconds());
// Initial heartbeat request fails at t + 5000ms
HeartbeatResponseAction action =
getTopoCoord().processHeartbeatResponse(
- firstRequestDate + Milliseconds(5000), // Entire heartbeat period elapsed;
- // no retry allowed.
+ firstRequestDate + 5000, // Entire heartbeat period elapsed; no retry allowed.
Milliseconds(4990), // Spent 4.99 of the 4 seconds in the network.
target,
StatusWith<ReplSetHeartbeatResponse>(ErrorCodes::ExceededTimeLimit,
@@ -1710,7 +1705,7 @@ namespace {
ASSERT_EQUALS(HeartbeatResponseAction::NoAction, action.getAction());
ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
// Because the heartbeat timed out, we'll retry in 2 seconds.
- ASSERT_EQUALS(firstRequestDate + Milliseconds(7000), action.getNextHeartbeatStartDate());
+ ASSERT_EQUALS(Date_t(firstRequestDate + 7000), action.getNextHeartbeatStartDate());
}
TEST_F(HeartbeatResponseTestOneRetry, HeartbeatTimeoutSuppressesSecondRetry) {
@@ -1718,8 +1713,7 @@ namespace {
// the heartbeat timeout period expired before the first retry completed.
HeartbeatResponseAction action =
getTopoCoord().processHeartbeatResponse(
- firstRequestDate() + Milliseconds(5010), // Entire heartbeat period elapsed;
- // no retry allowed.
+ firstRequestDate() + 5010, // Entire heartbeat period elapsed; no retry allowed.
Milliseconds(1000), // Spent 1 of the 1.01 seconds in the network.
target(),
StatusWith<ReplSetHeartbeatResponse>(ErrorCodes::ExceededTimeLimit,
@@ -1729,7 +1723,7 @@ namespace {
ASSERT_EQUALS(HeartbeatResponseAction::NoAction, action.getAction());
ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
// Because the heartbeat timed out, we'll retry in 2 seconds.
- ASSERT_EQUALS(firstRequestDate() + Milliseconds(7010), action.getNextHeartbeatStartDate());
+ ASSERT_EQUALS(Date_t(firstRequestDate() + 7010), action.getNextHeartbeatStartDate());
}
TEST_F(HeartbeatResponseTest, UpdateHeartbeatDataNewPrimary) {
@@ -3225,7 +3219,7 @@ namespace {
ASSERT_TRUE(response.isReplSet());
ASSERT_EQUALS(MemberState::RS_SECONDARY, response.getState().s);
ASSERT_EQUALS(Timestamp(0,0), response.getOpTime());
- ASSERT_EQUALS(0, response.getTime().count());
+ ASSERT_EQUALS(Seconds(0).total_milliseconds(), response.getTime().total_milliseconds());
ASSERT_EQUALS("", response.getHbMsg());
ASSERT_EQUALS("rs0", response.getReplicaSetName());
ASSERT_EQUALS(1, response.getVersion());
@@ -3248,7 +3242,7 @@ namespace {
ASSERT_TRUE(response.isReplSet());
ASSERT_EQUALS(MemberState::RS_SECONDARY, response.getState().s);
ASSERT_EQUALS(Timestamp(0,0), response.getOpTime());
- ASSERT_EQUALS(0, response.getTime().count());
+ ASSERT_EQUALS(Seconds(0).total_milliseconds(), response.getTime().total_milliseconds());
ASSERT_EQUALS("", response.getHbMsg());
ASSERT_EQUALS("rs0", response.getReplicaSetName());
ASSERT_EQUALS(1, response.getVersion());
@@ -3272,7 +3266,7 @@ namespace {
ASSERT_TRUE(response.isReplSet());
ASSERT_EQUALS(MemberState::RS_SECONDARY, response.getState().s);
ASSERT_EQUALS(Timestamp(0,0), response.getOpTime());
- ASSERT_EQUALS(0, response.getTime().count());
+ ASSERT_EQUALS(Seconds(0).total_milliseconds(), response.getTime().total_milliseconds());
ASSERT_EQUALS("", response.getHbMsg());
ASSERT_EQUALS("rs0", response.getReplicaSetName());
ASSERT_EQUALS(1, response.getVersion());
@@ -3296,7 +3290,7 @@ namespace {
ASSERT_TRUE(response.isReplSet());
ASSERT_EQUALS(MemberState::RS_SECONDARY, response.getState().s);
ASSERT_EQUALS(Timestamp(0,0), response.getOpTime());
- ASSERT_EQUALS(0, response.getTime().count());
+ ASSERT_EQUALS(Seconds(0).total_milliseconds(), response.getTime().total_milliseconds());
ASSERT_EQUALS("", response.getHbMsg());
ASSERT_EQUALS("rs0", response.getReplicaSetName());
ASSERT_EQUALS(1, response.getVersion());
@@ -3319,7 +3313,7 @@ namespace {
ASSERT_TRUE(response.isReplSet());
ASSERT_EQUALS(MemberState::RS_SECONDARY, response.getState().s);
ASSERT_EQUALS(Timestamp(0,0), response.getOpTime());
- ASSERT_EQUALS(0, response.getTime().count());
+ ASSERT_EQUALS(Seconds(0).total_milliseconds(), response.getTime().total_milliseconds());
ASSERT_EQUALS("", response.getHbMsg());
ASSERT_EQUALS("rs0", response.getReplicaSetName());
ASSERT_EQUALS(1, response.getVersion());
@@ -3345,7 +3339,7 @@ namespace {
ASSERT_TRUE(response.isReplSet());
ASSERT_EQUALS(MemberState::RS_SECONDARY, response.getState().s);
ASSERT_EQUALS(Timestamp(100,0), response.getOpTime());
- ASSERT_EQUALS(0, response.getTime().count());
+ ASSERT_EQUALS(Seconds(0).total_milliseconds(), response.getTime().total_milliseconds());
ASSERT_EQUALS("", response.getHbMsg());
ASSERT_EQUALS("rs0", response.getReplicaSetName());
ASSERT_EQUALS(1, response.getVersion());
@@ -3371,7 +3365,7 @@ namespace {
ASSERT_TRUE(response.isReplSet());
ASSERT_EQUALS(MemberState::RS_STARTUP, response.getState().s);
ASSERT_EQUALS(Timestamp(0,0), response.getOpTime());
- ASSERT_EQUALS(0, response.getTime().count());
+ ASSERT_EQUALS(Seconds(0).total_milliseconds(), response.getTime().total_milliseconds());
ASSERT_EQUALS("", response.getHbMsg());
ASSERT_EQUALS("", response.getReplicaSetName());
ASSERT_EQUALS(-2, response.getVersion());
@@ -3398,7 +3392,7 @@ namespace {
ASSERT_EQUALS(MemberState::RS_PRIMARY, response.getState().s);
ASSERT_EQUALS(Timestamp(11,0), response.getOpTime());
ASSERT_EQUALS(Timestamp(10,0), response.getElectionTime());
- ASSERT_EQUALS(0, response.getTime().count());
+ ASSERT_EQUALS(Seconds(0).total_milliseconds(), response.getTime().total_milliseconds());
ASSERT_EQUALS("", response.getHbMsg());
ASSERT_EQUALS("rs0", response.getReplicaSetName());
ASSERT_EQUALS(1, response.getVersion());
@@ -3428,7 +3422,7 @@ namespace {
ASSERT_TRUE(response.isReplSet());
ASSERT_EQUALS(MemberState::RS_SECONDARY, response.getState().s);
ASSERT_EQUALS(Timestamp(100,0), response.getOpTime());
- ASSERT_EQUALS(0, response.getTime().count());
+ ASSERT_EQUALS(Seconds(0).total_milliseconds(), response.getTime().total_milliseconds());
// changed to a syncing message because our sync source changed recently
ASSERT_EQUALS("syncing from: h2:27017", response.getHbMsg());
ASSERT_EQUALS("rs0", response.getReplicaSetName());
@@ -3854,17 +3848,17 @@ namespace {
fresherLastOpTimeApplied,
lastOpTimeApplied);
ASSERT_NO_ACTION(nextAction.getAction());
- getTopoCoord().blacklistSyncSource(HostAndPort("host3"), now() + Milliseconds(100));
+ getTopoCoord().blacklistSyncSource(HostAndPort("host3"), now() + 100);
// set up complete, time for actual check
ASSERT_FALSE(getTopoCoord().shouldChangeSyncSource(HostAndPort("host2"), now()));
// unblacklist with too early a time (node should remained blacklisted)
- getTopoCoord().unblacklistSyncSource(HostAndPort("host3"), now() + Milliseconds(90));
+ getTopoCoord().unblacklistSyncSource(HostAndPort("host3"), now() + 90);
ASSERT_FALSE(getTopoCoord().shouldChangeSyncSource(HostAndPort("host2"), now()));
// unblacklist and it should succeed
- getTopoCoord().unblacklistSyncSource(HostAndPort("host3"), now() + Milliseconds(100));
+ getTopoCoord().unblacklistSyncSource(HostAndPort("host3"), now() + 100);
startCapturingLogMessages();
ASSERT_TRUE(getTopoCoord().shouldChangeSyncSource(HostAndPort("host2"), now()));
stopCapturingLogMessages();
diff --git a/src/mongo/db/stats/range_deleter_server_status.cpp b/src/mongo/db/stats/range_deleter_server_status.cpp
index e625e16f01b..408f7049920 100644
--- a/src/mongo/db/stats/range_deleter_server_status.cpp
+++ b/src/mongo/db/stats/range_deleter_server_status.cpp
@@ -74,16 +74,16 @@ namespace mongo {
BSONObjBuilder entryBuilder;
entryBuilder.append("deletedDocs", (*it)->deletedDocCount);
- if ((*it)->queueEndTS > Date_t()) {
+ if ((*it)->queueEndTS.millis > 0) {
entryBuilder.append("queueStart", (*it)->queueStartTS);
entryBuilder.append("queueEnd", (*it)->queueEndTS);
}
- if ((*it)->deleteEndTS > Date_t()) {
+ if ((*it)->deleteEndTS.millis > 0) {
entryBuilder.append("deleteStart", (*it)->deleteStartTS);
entryBuilder.append("deleteEnd", (*it)->deleteEndTS);
- if ((*it)->waitForReplEndTS > Date_t()) {
+ if ((*it)->waitForReplEndTS.millis > 0) {
entryBuilder.append("waitForReplStart", (*it)->waitForReplStartTS);
entryBuilder.append("waitForReplEnd", (*it)->waitForReplEndTS);
}
diff --git a/src/mongo/db/storage/key_string.cpp b/src/mongo/db/storage/key_string.cpp
index c77e5fc2e72..0df4a8d8096 100644
--- a/src/mongo/db/storage/key_string.cpp
+++ b/src/mongo/db/storage/key_string.cpp
@@ -755,8 +755,8 @@ namespace mongo {
case CType::kBoolFalse: *stream << false; break;
case CType::kDate:
- *stream << Date_t::fromMillisSinceEpoch(
- endian::bigToNative(readType<uint64_t>(reader, inverted)) ^ (1LL << 63));
+ *stream << Date_t(endian::bigToNative(readType<uint64_t>(reader,
+ inverted)) ^ (1LL << 63));
break;
case CType::kTimestamp:
diff --git a/src/mongo/db/storage/mmap_v1/data_file_sync.cpp b/src/mongo/db/storage/mmap_v1/data_file_sync.cpp
index e8b9d6d5bd4..cf1d1abca88 100644
--- a/src/mongo/db/storage/mmap_v1/data_file_sync.cpp
+++ b/src/mongo/db/storage/mmap_v1/data_file_sync.cpp
@@ -86,7 +86,7 @@ namespace mongo {
Date_t start = jsTime();
StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
int numFiles = storageEngine->flushAllFiles( true );
- time_flushing = (jsTime() - start).count();
+ time_flushing = (int) (jsTime() - start);
_flushed(time_flushing);
diff --git a/src/mongo/db/storage/mmap_v1/dur.cpp b/src/mongo/db/storage/mmap_v1/dur.cpp
index b12098199f1..3a1498093bc 100644
--- a/src/mongo/db/storage/mmap_v1/dur.cpp
+++ b/src/mongo/db/storage/mmap_v1/dur.cpp
@@ -701,8 +701,7 @@ namespace {
boost::unique_lock<boost::mutex> lock(flushMutex);
for (unsigned i = 0; i <= 2; i++) {
- if (boost::cv_status::no_timeout == flushRequested.wait_for(
- lock, Milliseconds(oneThird))) {
+ if (flushRequested.timed_wait(lock, Milliseconds(oneThird))) {
// Someone forced a flush
break;
}
diff --git a/src/mongo/db/ttl.cpp b/src/mongo/db/ttl.cpp
index facbb58082c..43845ffaa34 100644
--- a/src/mongo/db/ttl.cpp
+++ b/src/mongo/db/ttl.cpp
@@ -231,7 +231,7 @@ namespace mongo {
// Read the current time outside of the while loop, so that we don't expand our index
// bounds after every WriteConflictException.
- const Date_t now = Date_t::now();
+ unsigned long long now = curTimeMillis64();
long long numDeleted = 0;
int attempt = 1;
@@ -284,11 +284,10 @@ namespace mongo {
return true;
}
- const Date_t kDawnOfTime =
- Date_t::fromMillisSinceEpoch(std::numeric_limits<long long>::min());
+ const Date_t kDawnOfTime(std::numeric_limits<long long>::min());
const BSONObj startKey = BSON("" << kDawnOfTime);
const BSONObj endKey =
- BSON("" << now - Seconds(secondsExpireElt.numberLong()));
+ BSON("" << Date_t(now - (1000 * secondsExpireElt.numberLong())));
const bool endKeyInclusive = true;
// The canonical check as to whether a key pattern element is "ascending" or
// "descending" is (elt.number() >= 0). This is defined by the Ordering class.
diff --git a/src/mongo/db/write_concern.cpp b/src/mongo/db/write_concern.cpp
index dd591ae4975..7bb8edd0fa2 100644
--- a/src/mongo/db/write_concern.cpp
+++ b/src/mongo/db/write_concern.cpp
@@ -269,8 +269,8 @@ namespace mongo {
}
// Add stats
result->writtenTo = repl::getGlobalReplicationCoordinator()->getHostsWrittenTo(replOpTime);
- gleWtimeStats.recordMillis(replStatus.duration.count());
- result->wTime = replStatus.duration.count();
+ gleWtimeStats.recordMillis(replStatus.duration.total_milliseconds());
+ result->wTime = replStatus.duration.total_milliseconds();
return replStatus.status;
}