summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorMaria van Keulen <maria@mongodb.com>2019-03-19 22:52:39 -0400
committerMaria van Keulen <maria@mongodb.com>2019-03-21 18:26:52 -0400
commitca3e2c1a9063b5b93547c710a93ffcc440f1b32c (patch)
tree7e88a9aefe63e8814a7d40f575d99b3de91478a3 /src
parent3789c3e4f1721e4ebd9cd076339a77d505051857 (diff)
downloadmongo-ca3e2c1a9063b5b93547c710a93ffcc440f1b32c.tar.gz
SERVER-40228 Refactor OpTimeAndWallTime into new struct
Diffstat (limited to 'src')
-rw-r--r--src/mongo/db/repl/initial_syncer.cpp60
-rw-r--r--src/mongo/db/repl/initial_syncer.h15
-rw-r--r--src/mongo/db/repl/initial_syncer_test.cpp148
-rw-r--r--src/mongo/db/repl/member_data.cpp33
-rw-r--r--src/mongo/db/repl/member_data.h2
-rw-r--r--src/mongo/db/repl/oplog.cpp3
-rw-r--r--src/mongo/db/repl/optime.cpp4
-rw-r--r--src/mongo/db/repl/optime.h6
-rw-r--r--src/mongo/db/repl/replication_consistency_markers_impl.cpp2
-rw-r--r--src/mongo/db/repl/replication_coordinator.h2
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state.h3
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.cpp25
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.h3
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_mock.cpp7
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_mock.h3
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp102
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.h9
-rw-r--r--src/mongo/db/repl/replication_coordinator_mock.cpp24
-rw-r--r--src/mongo/db/repl/replication_coordinator_test_fixture.h9
-rw-r--r--src/mongo/db/repl/sync_tail.cpp9
-rw-r--r--src/mongo/db/repl/topology_coordinator.cpp14
-rw-r--r--src/mongo/db/repl/topology_coordinator_v1_test.cpp7
-rw-r--r--src/mongo/db/write_concern.cpp1
-rw-r--r--src/mongo/dbtests/storage_timestamp_tests.cpp2
24 files changed, 241 insertions, 252 deletions
diff --git a/src/mongo/db/repl/initial_syncer.cpp b/src/mongo/db/repl/initial_syncer.cpp
index 7183fb8b5f2..b074341d4a3 100644
--- a/src/mongo/db/repl/initial_syncer.cpp
+++ b/src/mongo/db/repl/initial_syncer.cpp
@@ -149,16 +149,15 @@ StatusWith<Timestamp> parseTimestampStatus(const QueryResponseStatus& fetchResul
}
}
-StatusWith<std::tuple<OpTime, Date_t>> parseOpTimeAndWallTime(
- const QueryResponseStatus& fetchResult) {
+StatusWith<OpTimeAndWallTime> parseOpTimeAndWallTime(const QueryResponseStatus& fetchResult) {
if (!fetchResult.isOK()) {
return fetchResult.getStatus();
}
const auto docs = fetchResult.getValue().documents;
const auto hasDoc = docs.begin() != docs.end();
if (!hasDoc) {
- return StatusWith<std::tuple<OpTime, Date_t>>{ErrorCodes::NoMatchingDocument,
- "no oplog entry found"};
+ return StatusWith<OpTimeAndWallTime>{ErrorCodes::NoMatchingDocument,
+ "no oplog entry found"};
}
auto opTimeStatus = OpTime::parseFromOplogEntry(docs.front());
@@ -169,7 +168,8 @@ StatusWith<std::tuple<OpTime, Date_t>> parseOpTimeAndWallTime(
if (!wallTimeStatus.getStatus().isOK()) {
return wallTimeStatus.getStatus();
}
- return std::make_tuple(opTimeStatus.getValue(), wallTimeStatus.getValue());
+ OpTimeAndWallTime result = {opTimeStatus.getValue(), wallTimeStatus.getValue()};
+ return result;
}
/**
@@ -320,7 +320,7 @@ InitialSyncer::State InitialSyncer::getState_forTest() const {
Date_t InitialSyncer::getWallClockTime_forTest() const {
stdx::lock_guard<stdx::mutex> lk(_mutex);
- return std::get<1>(_lastApplied);
+ return _lastApplied.wallTime;
}
bool InitialSyncer::_isShuttingDown() const {
@@ -420,7 +420,7 @@ void InitialSyncer::_setUp_inlock(OperationContext* opCtx, std::uint32_t initial
}
void InitialSyncer::_tearDown_inlock(OperationContext* opCtx,
- const StatusWith<std::tuple<OpTime, Date_t>>& lastApplied) {
+ const StatusWith<OpTimeAndWallTime>& lastApplied) {
_stats.initialSyncEnd = _exec->now();
// This might not be necessary if we failed initial sync.
@@ -430,7 +430,7 @@ void InitialSyncer::_tearDown_inlock(OperationContext* opCtx,
if (!lastApplied.isOK()) {
return;
}
- const auto lastAppliedOpTime = std::get<0>(lastApplied.getValue());
+ const auto lastAppliedOpTime = lastApplied.getValue().opTime;
// A node coming out of initial sync must guarantee at least one oplog document is visible
// such that others can sync from this node. Oplog visibility is not rigorously advanced
@@ -477,10 +477,9 @@ void InitialSyncer::_startInitialSyncAttemptCallback(
// This completion guard invokes _finishInitialSyncAttempt on destruction.
auto cancelRemainingWorkInLock = [this]() { _cancelRemainingWork_inlock(); };
- auto finishInitialSyncAttemptFn =
- [this](const StatusWith<std::tuple<OpTime, Date_t>>& lastApplied) {
- _finishInitialSyncAttempt(lastApplied);
- };
+ auto finishInitialSyncAttemptFn = [this](const StatusWith<OpTimeAndWallTime>& lastApplied) {
+ _finishInitialSyncAttempt(lastApplied);
+ };
auto onCompletionGuard =
std::make_shared<OnCompletionGuard>(cancelRemainingWorkInLock, finishInitialSyncAttemptFn);
@@ -495,7 +494,7 @@ void InitialSyncer::_startInitialSyncAttemptCallback(
LOG(2) << "Resetting all optimes before starting this initial sync attempt.";
_opts.resetOptimes();
- _lastApplied = std::make_tuple(OpTime(), Date_t::min());
+ _lastApplied = {OpTime(), Date_t::min()};
_lastFetched = {};
LOG(2) << "Resetting feature compatibility version to last-stable. If the sync source is in "
@@ -732,7 +731,7 @@ void InitialSyncer::_lastOplogEntryFetcherCallbackForBeginApplyingTimestamp(
return;
}
- const auto& lastOpTime = std::get<0>(opTimeResult.getValue());
+ const auto& lastOpTime = opTimeResult.getValue().opTime;
BSONObjBuilder queryBob;
queryBob.append("find", NamespaceString::kServerConfigurationNamespace.coll());
@@ -1025,7 +1024,7 @@ void InitialSyncer::_databasesClonerCallback(const Status& databaseClonerFinishS
void InitialSyncer::_lastOplogEntryFetcherCallbackForStopTimestamp(
const StatusWith<Fetcher::QueryResponse>& result,
std::shared_ptr<OnCompletionGuard> onCompletionGuard) {
- auto resultOpTimeAndWallTime = std::make_tuple(OpTime(), Date_t::min());
+ OpTimeAndWallTime resultOpTimeAndWallTime = {OpTime(), Date_t::min()};
{
stdx::lock_guard<stdx::mutex> lock(_mutex);
auto status = _checkForShutdownAndConvertStatus_inlock(
@@ -1043,13 +1042,13 @@ void InitialSyncer::_lastOplogEntryFetcherCallbackForStopTimestamp(
}
resultOpTimeAndWallTime = optimeStatus.getValue();
- _initialSyncState->stopTimestamp = std::get<0>(resultOpTimeAndWallTime).getTimestamp();
+ _initialSyncState->stopTimestamp = resultOpTimeAndWallTime.opTime.getTimestamp();
// If the beginFetchingTimestamp is different from the stopTimestamp, it indicates that
// there are oplog entries fetched by the oplog fetcher that need to be written to the oplog
// and/or there are operations that need to be applied.
if (_initialSyncState->beginFetchingTimestamp != _initialSyncState->stopTimestamp) {
- invariant(std::get<0>(_lastApplied).isNull());
+ invariant(_lastApplied.opTime.isNull());
_checkApplierProgressAndScheduleGetNextApplierBatch_inlock(lock, onCompletionGuard);
return;
}
@@ -1071,8 +1070,8 @@ void InitialSyncer::_lastOplogEntryFetcherCallbackForStopTimestamp(
auto status = _storage->insertDocument(
opCtx.get(),
_opts.localOplogNS,
- TimestampedBSONObj{oplogSeedDoc, std::get<0>(resultOpTimeAndWallTime).getTimestamp()},
- std::get<0>(resultOpTimeAndWallTime).getTerm());
+ TimestampedBSONObj{oplogSeedDoc, resultOpTimeAndWallTime.opTime.getTimestamp()},
+ resultOpTimeAndWallTime.opTime.getTerm());
if (!status.isOK()) {
stdx::lock_guard<stdx::mutex> lock(_mutex);
onCompletionGuard->setResultAndCancelRemainingWork_inlock(lock, status);
@@ -1080,7 +1079,7 @@ void InitialSyncer::_lastOplogEntryFetcherCallbackForStopTimestamp(
}
const bool orderedCommit = true;
_storage->oplogDiskLocRegister(
- opCtx.get(), std::get<0>(resultOpTimeAndWallTime).getTimestamp(), orderedCommit);
+ opCtx.get(), resultOpTimeAndWallTime.opTime.getTimestamp(), orderedCommit);
}
stdx::lock_guard<stdx::mutex> lock(_mutex);
@@ -1139,7 +1138,7 @@ void InitialSyncer::_getNextApplierBatchCallback(
auto numApplied = ops.size();
MultiApplier::CallbackFn onCompletionFn = [=](const Status& s) {
return _multiApplierCallback(
- s, std::make_tuple(lastApplied, lastAppliedWall), numApplied, onCompletionGuard);
+ s, {lastApplied, lastAppliedWall}, numApplied, onCompletionGuard);
};
_applier = stdx::make_unique<MultiApplier>(
@@ -1159,7 +1158,7 @@ void InitialSyncer::_getNextApplierBatchCallback(
std::string msg = str::stream()
<< "The oplog fetcher is no longer running and we have applied all the oplog entries "
"in the oplog buffer. Aborting this initial sync attempt. Last applied: "
- << std::get<0>(_lastApplied).toString() << ". Last fetched: " << _lastFetched.toString()
+ << _lastApplied.opTime.toString() << ". Last fetched: " << _lastFetched.toString()
<< ". Number of operations applied: " << _initialSyncState->appliedOps;
log() << msg;
status = Status(ErrorCodes::RemoteResultsUnavailable, msg);
@@ -1183,7 +1182,7 @@ void InitialSyncer::_getNextApplierBatchCallback(
}
void InitialSyncer::_multiApplierCallback(const Status& multiApplierStatus,
- std::tuple<OpTime, Date_t> lastApplied,
+ OpTimeAndWallTime lastApplied,
std::uint32_t numApplied,
std::shared_ptr<OnCompletionGuard> onCompletionGuard) {
stdx::lock_guard<stdx::mutex> lock(_mutex);
@@ -1197,7 +1196,7 @@ void InitialSyncer::_multiApplierCallback(const Status& multiApplierStatus,
_initialSyncState->appliedOps += numApplied;
_lastApplied = lastApplied;
- const auto lastAppliedOpTime = std::get<0>(_lastApplied);
+ const auto lastAppliedOpTime = _lastApplied.opTime;
_opts.setMyLastOptime(_lastApplied, ReplicationCoordinator::DataConsistency::Inconsistent);
// Update oplog visibility after applying a batch so that while applying transaction oplog
@@ -1252,7 +1251,7 @@ void InitialSyncer::_lastOplogEntryFetcherCallbackAfterFetchingMissingDocuments(
onCompletionGuard->setResultAndCancelRemainingWork_inlock(lock, optimeStatus.getStatus());
return;
}
- auto&& optime = std::get<0>(optimeStatus.getValue());
+ auto&& optime = optimeStatus.getValue().opTime;
const auto newOplogEnd = optime.getTimestamp();
LOG(2) << "Pushing back minValid from " << _initialSyncState->stopTimestamp << " to "
@@ -1301,8 +1300,7 @@ void InitialSyncer::_rollbackCheckerCheckForRollbackCallback(
onCompletionGuard->setResultAndCancelRemainingWork_inlock(lock, _lastApplied);
}
-void InitialSyncer::_finishInitialSyncAttempt(
- const StatusWith<std::tuple<OpTime, Date_t>>& lastApplied) {
+void InitialSyncer::_finishInitialSyncAttempt(const StatusWith<OpTimeAndWallTime>& lastApplied) {
// Since _finishInitialSyncAttempt can be called from any component's callback function or
// scheduled task, it is possible that we may not be in a TaskExecutor-managed thread when this
// function is invoked.
@@ -1389,7 +1387,7 @@ void InitialSyncer::_finishInitialSyncAttempt(
finishCallbackGuard.dismiss();
}
-void InitialSyncer::_finishCallback(StatusWith<std::tuple<OpTime, Date_t>> lastApplied) {
+void InitialSyncer::_finishCallback(StatusWith<OpTimeAndWallTime> lastApplied) {
// After running callback function, clear '_onCompletion' to release any resources that might be
// held by this function object.
// '_onCompletion' must be moved to a temporary copy and destroyed outside the lock in case
@@ -1485,7 +1483,7 @@ void InitialSyncer::_checkApplierProgressAndScheduleGetNextApplierBatch_inlock(
return;
}
- if (std::get<0>(_lastApplied).isNull()) {
+ if (_lastApplied.opTime.isNull()) {
// Check if any ops occurred while cloning or any ops need to be fetched.
invariant(_initialSyncState->beginFetchingTimestamp < _initialSyncState->stopTimestamp);
log() << "Writing to the oplog and applying operations until "
@@ -1494,9 +1492,9 @@ void InitialSyncer::_checkApplierProgressAndScheduleGetNextApplierBatch_inlock(
<< _initialSyncState->beginFetchingTimestamp.toBSON() << " and applying at "
<< _initialSyncState->beginApplyingTimestamp.toBSON() << ")";
// Fall through to scheduling _getNextApplierBatchCallback().
- } else if (std::get<0>(_lastApplied).getTimestamp() >= _initialSyncState->stopTimestamp) {
+ } else if (_lastApplied.opTime.getTimestamp() >= _initialSyncState->stopTimestamp) {
// Check for rollback if we have applied far enough to be consistent.
- invariant(!std::get<0>(_lastApplied).getTimestamp().isNull());
+ invariant(!_lastApplied.opTime.getTimestamp().isNull());
_scheduleRollbackCheckerCheckForRollback_inlock(lock, onCompletionGuard);
return;
}
diff --git a/src/mongo/db/repl/initial_syncer.h b/src/mongo/db/repl/initial_syncer.h
index d9c13ec9635..658db40d705 100644
--- a/src/mongo/db/repl/initial_syncer.h
+++ b/src/mongo/db/repl/initial_syncer.h
@@ -145,13 +145,12 @@ public:
/**
* Callback function to report last applied optime of initial sync.
*/
- typedef stdx::function<void(const StatusWith<std::tuple<OpTime, Date_t>>& lastApplied)>
- OnCompletionFn;
+ typedef stdx::function<void(const StatusWith<OpTimeAndWallTime>& lastApplied)> OnCompletionFn;
/**
* Callback completion guard for initial syncer.
*/
- using OnCompletionGuard = CallbackCompletionGuard<StatusWith<std::tuple<OpTime, Date_t>>>;
+ using OnCompletionGuard = CallbackCompletionGuard<StatusWith<OpTimeAndWallTime>>;
using StartCollectionClonerFn = DatabaseCloner::StartCollectionClonerFn;
@@ -371,7 +370,7 @@ private:
* Tears down internal state before reporting final status to caller.
*/
void _tearDown_inlock(OperationContext* opCtx,
- const StatusWith<std::tuple<OpTime, Date_t>>& lastApplied);
+ const StatusWith<OpTimeAndWallTime>& lastApplied);
/**
* Callback to start a single initial sync attempt.
@@ -472,7 +471,7 @@ private:
* Callback for MultiApplier completion.
*/
void _multiApplierCallback(const Status& status,
- std::tuple<OpTime, Date_t> lastApplied,
+ OpTimeAndWallTime lastApplied,
std::uint32_t numApplied,
std::shared_ptr<OnCompletionGuard> onCompletionGuard);
@@ -498,12 +497,12 @@ private:
* Reports result of current initial sync attempt. May schedule another initial sync attempt
* depending on shutdown state and whether we've exhausted all initial sync retries.
*/
- void _finishInitialSyncAttempt(const StatusWith<std::tuple<OpTime, Date_t>>& lastApplied);
+ void _finishInitialSyncAttempt(const StatusWith<OpTimeAndWallTime>& lastApplied);
/**
* Invokes completion callback and transitions state to State::kComplete.
*/
- void _finishCallback(StatusWith<std::tuple<OpTime, Date_t>> lastApplied);
+ void _finishCallback(StatusWith<OpTimeAndWallTime> lastApplied);
// Obtains a valid sync source from the sync source selector.
// Returns error if a sync source cannot be found.
@@ -650,7 +649,7 @@ private:
std::unique_ptr<MultiApplier> _applier; // (M)
HostAndPort _syncSource; // (M)
OpTime _lastFetched; // (MX)
- std::tuple<OpTime, Date_t> _lastApplied; // (MX)
+ OpTimeAndWallTime _lastApplied; // (MX)
std::unique_ptr<OplogBuffer> _oplogBuffer; // (M)
std::unique_ptr<OplogApplier::Observer> _observer; // (S)
std::unique_ptr<OplogApplier> _oplogApplier; // (M)
diff --git a/src/mongo/db/repl/initial_syncer_test.cpp b/src/mongo/db/repl/initial_syncer_test.cpp
index 68373b8a0d5..a8ed2079d4d 100644
--- a/src/mongo/db/repl/initial_syncer_test.cpp
+++ b/src/mongo/db/repl/initial_syncer_test.cpp
@@ -128,8 +128,8 @@ public:
void reset() {
_setMyLastOptime = [this](const OpTimeAndWallTime& opTimeAndWallTime,
ReplicationCoordinator::DataConsistency consistency) {
- _myLastOpTime = std::get<0>(opTimeAndWallTime);
- _myLastWallTime = std::get<1>(opTimeAndWallTime);
+ _myLastOpTime = opTimeAndWallTime.opTime;
+ _myLastWallTime = opTimeAndWallTime.wallTime;
};
_myLastOpTime = OpTime();
_myLastWallTime = Date_t::min();
@@ -769,7 +769,7 @@ TEST_F(InitialSyncerTest, InitialSyncerReturnsCallbackCanceledIfShutdownImmediat
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::CallbackCanceled, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::CallbackCanceled, _lastApplied);
}
TEST_F(InitialSyncerTest,
@@ -829,7 +829,7 @@ TEST_F(
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::InitialSyncOplogSourceMissing, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::InitialSyncOplogSourceMissing, _lastApplied);
}
// Confirms that InitialSyncer keeps retrying initial sync.
@@ -853,7 +853,7 @@ TEST_F(InitialSyncerTest,
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::InitialSyncOplogSourceMissing, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::InitialSyncOplogSourceMissing, _lastApplied);
// Check number of failed attempts in stats.
auto progress = initialSyncer->getInitialSyncProgress();
@@ -874,7 +874,7 @@ TEST_F(InitialSyncerTest, InitialSyncerResetsOptimesOnNewAttempt) {
// Set the last optime to an arbitrary nonzero value. The value of the 'consistency' argument
// doesn't matter. Also set last wall time to an arbitrary non-minimum value.
auto origOptime = OpTime(Timestamp(1000, 1), 1);
- _setMyLastOptime(std::make_tuple(origOptime, Date_t::max()),
+ _setMyLastOptime({origOptime, Date_t::max()},
ReplicationCoordinator::DataConsistency::Inconsistent);
// Start initial sync.
@@ -916,7 +916,7 @@ TEST_F(InitialSyncerTest,
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::CallbackCanceled, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::CallbackCanceled, _lastApplied);
}
TEST_F(InitialSyncerTest,
@@ -930,7 +930,7 @@ TEST_F(InitialSyncerTest,
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied);
}
TEST_F(InitialSyncerTest,
@@ -957,7 +957,7 @@ TEST_F(InitialSyncerTest,
initialSyncer->join();
ASSERT_EQUALS(InitialSyncer::State::kComplete, initialSyncer->getState_forTest());
- ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied);
}
// This test verifies that the initial syncer will still transition to a complete state even if
@@ -977,7 +977,7 @@ TEST_F(InitialSyncerTest, InitialSyncerTransitionsToCompleteWhenFinishCallbackTh
ASSERT_OK(initialSyncer->shutdown());
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::CallbackCanceled, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::CallbackCanceled, _lastApplied);
}
class SharedCallbackState {
@@ -1053,7 +1053,7 @@ TEST_F(InitialSyncerTest, InitialSyncerTruncatesOplogAndDropsReplicatedDatabases
ASSERT_OK(initialSyncer->startup(opCtx.get(), maxAttempts));
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied);
LockGuard lock(_storageInterfaceWorkDoneMutex);
ASSERT_TRUE(_storageInterfaceWorkDone.truncateCalled);
@@ -1078,7 +1078,7 @@ TEST_F(InitialSyncerTest, InitialSyncerPassesThroughGetRollbackIdScheduleError)
ASSERT_OK(initialSyncer->startup(opCtx.get(), maxAttempts));
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied);
ASSERT_EQUALS("admin", request.dbname);
assertRemoteCommandNameEquals("replSetGetRBID", request);
@@ -1106,7 +1106,7 @@ TEST_F(
ASSERT_OK(initialSyncer->startup(opCtx.get(), maxAttempts));
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, _lastApplied);
LockGuard lock(_storageInterfaceWorkDoneMutex);
ASSERT_TRUE(_storageInterfaceWorkDone.truncateCalled);
@@ -1145,7 +1145,7 @@ TEST_F(InitialSyncerTest, InitialSyncerCancelsRollbackCheckerOnShutdown) {
initialSyncer->join();
ASSERT_EQUALS(InitialSyncer::State::kComplete, initialSyncer->getState_forTest());
- ASSERT_EQUALS(ErrorCodes::CallbackCanceled, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::CallbackCanceled, _lastApplied);
}
TEST_F(InitialSyncerTest, InitialSyncerPassesThroughRollbackCheckerCallbackError) {
@@ -1166,7 +1166,7 @@ TEST_F(InitialSyncerTest, InitialSyncerPassesThroughRollbackCheckerCallbackError
}
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied);
}
TEST_F(InitialSyncerTest, InitialSyncerPassesThroughGetBeginFetchingTimestampScheduleError) {
@@ -1197,7 +1197,7 @@ TEST_F(InitialSyncerTest, InitialSyncerPassesThroughGetBeginFetchingTimestampSch
}
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied);
ASSERT_EQUALS(syncSource, request.target);
ASSERT_EQUALS(NamespaceString::kAdminDb, request.dbname);
@@ -1227,7 +1227,7 @@ TEST_F(InitialSyncerTest, InitialSyncerPassesThroughGetBeginFetchingTimestampCal
}
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied);
}
TEST_F(InitialSyncerTest, InitialSyncerPassesThroughLastOplogEntryFetcherScheduleError) {
@@ -1262,7 +1262,7 @@ TEST_F(InitialSyncerTest, InitialSyncerPassesThroughLastOplogEntryFetcherSchedul
}
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied);
ASSERT_EQUALS(syncSource, request.target);
ASSERT_EQUALS(_options.localOplogNS.db(), request.dbname);
@@ -1299,7 +1299,7 @@ TEST_F(InitialSyncerTest, InitialSyncerPassesThroughLastOplogEntryFetcherCallbac
}
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied);
}
TEST_F(InitialSyncerTest, InitialSyncerCancelsLastOplogEntryFetcherOnShutdown) {
@@ -1329,7 +1329,7 @@ TEST_F(InitialSyncerTest, InitialSyncerCancelsLastOplogEntryFetcherOnShutdown) {
executor::NetworkInterfaceMock::InNetworkGuard(net)->runReadyNetworkOperations();
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::CallbackCanceled, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::CallbackCanceled, _lastApplied);
}
TEST_F(InitialSyncerTest,
@@ -1358,7 +1358,7 @@ TEST_F(InitialSyncerTest,
}
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::NoMatchingDocument, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::NoMatchingDocument, _lastApplied);
}
TEST_F(InitialSyncerTest,
@@ -1419,7 +1419,7 @@ TEST_F(InitialSyncerTest,
}
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::NoSuchKey, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::NoSuchKey, _lastApplied);
}
TEST_F(InitialSyncerTest,
@@ -1453,7 +1453,7 @@ TEST_F(InitialSyncerTest,
}
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied);
}
TEST_F(InitialSyncerTest, InitialSyncerPassesThroughFCVFetcherScheduleError) {
@@ -1492,7 +1492,7 @@ TEST_F(InitialSyncerTest, InitialSyncerPassesThroughFCVFetcherScheduleError) {
}
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied);
ASSERT_EQUALS(syncSource, request.target);
assertFCVRequest(request);
@@ -1530,7 +1530,7 @@ TEST_F(InitialSyncerTest, InitialSyncerPassesThroughFCVFetcherCallbackError) {
}
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied);
}
TEST_F(InitialSyncerTest, InitialSyncerCancelsFCVFetcherOnShutdown) {
@@ -1563,7 +1563,7 @@ TEST_F(InitialSyncerTest, InitialSyncerCancelsFCVFetcherOnShutdown) {
executor::NetworkInterfaceMock::InNetworkGuard(net)->runReadyNetworkOperations();
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::CallbackCanceled, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::CallbackCanceled, _lastApplied);
}
TEST_F(InitialSyncerTest, InitialSyncerResendsFindCommandIfFCVFetcherReturnsRetriableError) {
@@ -1628,7 +1628,7 @@ void InitialSyncerTest::runInitialSyncWithBadFCVResponse(std::vector<BSONObj> do
}
initialSyncer->join();
- ASSERT_EQUALS(expectedError, _lastApplied.getStatus());
+ ASSERT_EQUALS(expectedError, _lastApplied);
}
TEST_F(InitialSyncerTest,
@@ -1704,7 +1704,7 @@ TEST_F(InitialSyncerTest, InitialSyncerSucceedsWhenFCVFetcherReturnsOldVersion)
executor::NetworkInterfaceMock::InNetworkGuard(net)->runReadyNetworkOperations();
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::CallbackCanceled, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::CallbackCanceled, _lastApplied);
}
TEST_F(InitialSyncerTest, InitialSyncerPassesThroughOplogFetcherScheduleError) {
@@ -1753,7 +1753,7 @@ TEST_F(InitialSyncerTest, InitialSyncerPassesThroughOplogFetcherScheduleError) {
net->runReadyNetworkOperations();
}
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied);
ASSERT_EQUALS(syncSource, request.target);
ASSERT_EQUALS(_options.localOplogNS.db(), request.dbname);
@@ -1808,7 +1808,7 @@ TEST_F(InitialSyncerTest, InitialSyncerPassesThroughOplogFetcherCallbackError) {
}
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied);
}
TEST_F(InitialSyncerTest,
@@ -1868,8 +1868,8 @@ TEST_F(InitialSyncerTest,
initialSyncer->join();
ASSERT_OK(_lastApplied.getStatus());
auto dummyEntry = makeOplogEntry(1);
- ASSERT_EQUALS(dummyEntry.getOpTime(), std::get<0>(_lastApplied.getValue()));
- ASSERT_EQUALS(dummyEntry.getWallClockTime().get(), std::get<1>(_lastApplied.getValue()));
+ ASSERT_EQUALS(dummyEntry.getOpTime(), _lastApplied.getValue().opTime);
+ ASSERT_EQUALS(dummyEntry.getWallClockTime().get(), _lastApplied.getValue().wallTime);
}
TEST_F(
@@ -1928,8 +1928,8 @@ TEST_F(
initialSyncer->join();
ASSERT_OK(_lastApplied.getStatus());
auto dummyEntry = makeOplogEntry(3);
- ASSERT_EQUALS(dummyEntry.getOpTime(), std::get<0>(_lastApplied.getValue()));
- ASSERT_EQUALS(dummyEntry.getWallClockTime().get(), std::get<1>(_lastApplied.getValue()));
+ ASSERT_EQUALS(dummyEntry.getOpTime(), _lastApplied.getValue().opTime);
+ ASSERT_EQUALS(dummyEntry.getWallClockTime().get(), _lastApplied.getValue().wallTime);
}
TEST_F(
@@ -1983,7 +1983,7 @@ TEST_F(
}
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::RemoteResultsUnavailable, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::RemoteResultsUnavailable, _lastApplied);
}
TEST_F(InitialSyncerTest,
@@ -2036,7 +2036,7 @@ TEST_F(InitialSyncerTest,
}
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied);
ASSERT_EQUALS(syncSource, request.target);
ASSERT_EQUALS("admin", request.dbname);
@@ -2083,7 +2083,7 @@ TEST_F(InitialSyncerTest,
}
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::FailedToParse, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::FailedToParse, _lastApplied);
}
TEST_F(InitialSyncerTest, InitialSyncerIgnoresLocalDatabasesWhenCloningDatabases) {
@@ -2150,7 +2150,7 @@ TEST_F(InitialSyncerTest, InitialSyncerIgnoresLocalDatabasesWhenCloningDatabases
getExecutor().shutdown();
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::CallbackCanceled, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::CallbackCanceled, _lastApplied);
}
TEST_F(InitialSyncerTest,
@@ -2225,7 +2225,7 @@ TEST_F(InitialSyncerTest,
getExecutor().shutdown();
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::CallbackCanceled, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::CallbackCanceled, _lastApplied);
}
TEST_F(InitialSyncerTest, InitialSyncerCancelsBothOplogFetcherAndDatabasesClonerOnShutdown) {
@@ -2259,7 +2259,7 @@ TEST_F(InitialSyncerTest, InitialSyncerCancelsBothOplogFetcherAndDatabasesCloner
executor::NetworkInterfaceMock::InNetworkGuard(net)->runReadyNetworkOperations();
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::CallbackCanceled, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::CallbackCanceled, _lastApplied);
}
TEST_F(InitialSyncerTest,
@@ -2322,7 +2322,7 @@ TEST_F(InitialSyncerTest,
}
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied);
}
TEST_F(InitialSyncerTest,
@@ -2382,7 +2382,7 @@ TEST_F(InitialSyncerTest,
}
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied);
}
TEST_F(InitialSyncerTest,
@@ -2437,7 +2437,7 @@ TEST_F(InitialSyncerTest,
executor::NetworkInterfaceMock::InNetworkGuard(net)->runReadyNetworkOperations();
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::CallbackCanceled, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::CallbackCanceled, _lastApplied);
}
TEST_F(InitialSyncerTest,
@@ -2501,7 +2501,7 @@ TEST_F(InitialSyncerTest,
}
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied);
}
TEST_F(
@@ -2561,7 +2561,7 @@ TEST_F(
}
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::TypeMismatch, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::TypeMismatch, _lastApplied);
}
TEST_F(InitialSyncerTest,
@@ -2615,7 +2615,7 @@ TEST_F(InitialSyncerTest,
}
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::OplogOutOfOrder, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::OplogOutOfOrder, _lastApplied);
}
TEST_F(
@@ -2686,7 +2686,7 @@ TEST_F(
}
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied);
ASSERT_EQUALS(_options.localOplogNS, insertDocumentNss);
ASSERT_BSONOBJ_EQ(oplogEntry, insertDocumentDoc.obj);
}
@@ -2760,7 +2760,7 @@ TEST_F(
}
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::CallbackCanceled, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::CallbackCanceled, _lastApplied);
ASSERT_EQUALS(_options.localOplogNS, insertDocumentNss);
ASSERT_BSONOBJ_EQ(oplogEntry, insertDocumentDoc.obj);
}
@@ -2834,7 +2834,7 @@ TEST_F(
}
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied);
}
TEST_F(
@@ -2897,7 +2897,7 @@ TEST_F(
}
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied);
}
TEST_F(InitialSyncerTest, InitialSyncerCancelsLastRollbackCheckerOnShutdown) {
@@ -2959,7 +2959,7 @@ TEST_F(InitialSyncerTest, InitialSyncerCancelsLastRollbackCheckerOnShutdown) {
executor::NetworkInterfaceMock::InNetworkGuard(net)->runReadyNetworkOperations();
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::CallbackCanceled, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::CallbackCanceled, _lastApplied);
}
TEST_F(InitialSyncerTest, InitialSyncerCancelsLastRollbackCheckerOnOplogFetcherCallbackError) {
@@ -3024,7 +3024,7 @@ TEST_F(InitialSyncerTest, InitialSyncerCancelsLastRollbackCheckerOnOplogFetcherC
}
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied);
}
TEST_F(InitialSyncerTest,
@@ -3081,7 +3081,7 @@ TEST_F(InitialSyncerTest,
}
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::UnrecoverableRollbackError, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::UnrecoverableRollbackError, _lastApplied);
}
TEST_F(InitialSyncerTest, LastOpTimeShouldBeSetEvenIfNoOperationsAreAppliedAfterCloning) {
@@ -3173,8 +3173,8 @@ TEST_F(InitialSyncerTest, LastOpTimeShouldBeSetEvenIfNoOperationsAreAppliedAfter
initialSyncer->join();
ASSERT_OK(_lastApplied.getStatus());
- ASSERT_EQUALS(oplogEntry.getOpTime(), std::get<0>(_lastApplied.getValue()));
- ASSERT_EQUALS(oplogEntry.getWallClockTime().get(), std::get<1>(_lastApplied.getValue()));
+ ASSERT_EQUALS(oplogEntry.getOpTime(), _lastApplied.getValue().opTime);
+ ASSERT_EQUALS(oplogEntry.getWallClockTime().get(), _lastApplied.getValue().wallTime);
ASSERT_FALSE(_replicationProcess->getConsistencyMarkers()->getInitialSyncFlag(opCtx.get()));
}
@@ -3237,7 +3237,7 @@ TEST_F(InitialSyncerTest, InitialSyncerPassesThroughGetNextApplierBatchScheduleE
}
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied);
}
TEST_F(InitialSyncerTest, InitialSyncerPassesThroughSecondGetNextApplierBatchScheduleError) {
@@ -3299,7 +3299,7 @@ TEST_F(InitialSyncerTest, InitialSyncerPassesThroughSecondGetNextApplierBatchSch
}
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied);
}
TEST_F(InitialSyncerTest, InitialSyncerCancelsGetNextApplierBatchOnShutdown) {
@@ -3357,7 +3357,7 @@ TEST_F(InitialSyncerTest, InitialSyncerCancelsGetNextApplierBatchOnShutdown) {
executor::NetworkInterfaceMock::InNetworkGuard(net)->runReadyNetworkOperations();
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::CallbackCanceled, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::CallbackCanceled, _lastApplied);
}
TEST_F(InitialSyncerTest, InitialSyncerPassesThroughGetNextApplierBatchInLockError) {
@@ -3421,7 +3421,7 @@ TEST_F(InitialSyncerTest, InitialSyncerPassesThroughGetNextApplierBatchInLockErr
}
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::BadValue, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::BadValue, _lastApplied);
}
TEST_F(
@@ -3498,7 +3498,7 @@ TEST_F(
executor::NetworkInterfaceMock::InNetworkGuard(net)->runReadyNetworkOperations();
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::CallbackCanceled, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::CallbackCanceled, _lastApplied);
}
TEST_F(InitialSyncerTest, InitialSyncerPassesThroughMultiApplierScheduleError) {
@@ -3575,7 +3575,7 @@ TEST_F(InitialSyncerTest, InitialSyncerPassesThroughMultiApplierScheduleError) {
}
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied);
}
TEST_F(InitialSyncerTest, InitialSyncerPassesThroughMultiApplierCallbackError) {
@@ -3633,7 +3633,7 @@ TEST_F(InitialSyncerTest, InitialSyncerPassesThroughMultiApplierCallbackError) {
}
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied);
}
TEST_F(InitialSyncerTest, InitialSyncerCancelsGetNextApplierBatchCallbackOnOplogFetcherError) {
@@ -3691,7 +3691,7 @@ TEST_F(InitialSyncerTest, InitialSyncerCancelsGetNextApplierBatchCallbackOnOplog
}
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied);
}
OplogEntry InitialSyncerTest::doInitialSyncWithOneBatch(bool shouldSetFCV) {
@@ -3771,8 +3771,8 @@ void InitialSyncerTest::doSuccessfulInitialSyncWithOneBatch(bool shouldSetFCV) {
auto lastOp = doInitialSyncWithOneBatch(shouldSetFCV);
serverGlobalParams.featureCompatibility.reset();
ASSERT_OK(_lastApplied.getStatus());
- ASSERT_EQUALS(lastOp.getOpTime(), std::get<0>(_lastApplied.getValue()));
- ASSERT_EQUALS(lastOp.getWallClockTime().get(), std::get<1>(_lastApplied.getValue()));
+ ASSERT_EQUALS(lastOp.getOpTime(), _lastApplied.getValue().opTime);
+ ASSERT_EQUALS(lastOp.getWallClockTime().get(), _lastApplied.getValue().wallTime);
ASSERT_EQUALS(lastOp.getOpTime().getTimestamp(), _storageInterface->getInitialDataTimestamp());
}
@@ -3890,8 +3890,8 @@ TEST_F(InitialSyncerTest,
initialSyncer->join();
ASSERT_OK(_lastApplied.getStatus());
- ASSERT_EQUALS(lastOp.getOpTime(), std::get<0>(_lastApplied.getValue()));
- ASSERT_EQUALS(lastOp.getWallClockTime().get(), std::get<1>(_lastApplied.getValue()));
+ ASSERT_EQUALS(lastOp.getOpTime(), _lastApplied.getValue().opTime);
+ ASSERT_EQUALS(lastOp.getWallClockTime().get(), _lastApplied.getValue().wallTime);
}
TEST_F(
@@ -3986,8 +3986,8 @@ TEST_F(
initialSyncer->join();
ASSERT_OK(_lastApplied.getStatus());
- ASSERT_EQUALS(lastOp.getOpTime(), std::get<0>(_lastApplied.getValue()));
- ASSERT_EQUALS(lastOp.getWallClockTime().get(), std::get<1>(_lastApplied.getValue()));
+ ASSERT_EQUALS(lastOp.getOpTime(), _lastApplied.getValue().opTime);
+ ASSERT_EQUALS(lastOp.getWallClockTime().get(), _lastApplied.getValue().wallTime);
ASSERT_TRUE(fetchCountIncremented);
@@ -4010,7 +4010,7 @@ TEST_F(InitialSyncerTest,
ASSERT_OK(initialSyncer->startup(opCtx.get(), maxAttempts));
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::InvalidSyncSource, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::InvalidSyncSource, _lastApplied);
}
TEST_F(InitialSyncerTest, OplogOutOfOrderOnOplogFetchFinish) {
@@ -4067,7 +4067,7 @@ TEST_F(InitialSyncerTest, OplogOutOfOrderOnOplogFetchFinish) {
}
initialSyncer->join();
- ASSERT_EQUALS(ErrorCodes::OplogOutOfOrder, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::OplogOutOfOrder, _lastApplied);
}
TEST_F(InitialSyncerTest, GetInitialSyncProgressReturnsCorrectProgress) {
@@ -4308,8 +4308,8 @@ TEST_F(InitialSyncerTest, GetInitialSyncProgressReturnsCorrectProgress) {
initialSyncer->join();
ASSERT_OK(_lastApplied.getStatus());
auto dummyEntry = makeOplogEntry(7);
- ASSERT_EQUALS(dummyEntry.getOpTime(), std::get<0>(_lastApplied.getValue()));
- ASSERT_EQUALS(dummyEntry.getWallClockTime().get(), std::get<1>(_lastApplied.getValue()));
+ ASSERT_EQUALS(dummyEntry.getOpTime(), _lastApplied.getValue().opTime);
+ ASSERT_EQUALS(dummyEntry.getWallClockTime().get(), _lastApplied.getValue().wallTime);
progress = initialSyncer->getInitialSyncProgress();
log() << "Progress at end: " << progress;
@@ -4436,7 +4436,7 @@ TEST_F(InitialSyncerTest, InitialSyncerUpgradeNonReplicatedUniqueIndexesError) {
doInitialSyncWithOneBatch(true);
// Ensure the upgradeNonReplicatedUniqueIndexes status was captured.
- ASSERT_EQUALS(ErrorCodes::NamespaceNotFound, _lastApplied.getStatus());
+ ASSERT_EQUALS(ErrorCodes::NamespaceNotFound, _lastApplied);
}
} // namespace
diff --git a/src/mongo/db/repl/member_data.cpp b/src/mongo/db/repl/member_data.cpp
index bed402172f3..18cb910496e 100644
--- a/src/mongo/db/repl/member_data.cpp
+++ b/src/mongo/db/repl/member_data.cpp
@@ -125,40 +125,39 @@ void MemberData::setAuthIssue(Date_t now) {
void MemberData::setLastAppliedOpTimeAndWallTime(OpTimeAndWallTime opTime, Date_t now) {
_lastUpdate = now;
_lastUpdateStale = false;
- _lastAppliedOpTime = std::get<0>(opTime);
- _lastAppliedWallTime = std::get<1>(opTime);
+ _lastAppliedOpTime = opTime.opTime;
+ _lastAppliedWallTime = opTime.wallTime;
}
void MemberData::setLastAppliedOpTime(OpTime opTime, Date_t now) {
- setLastAppliedOpTimeAndWallTime(std::make_tuple(opTime, Date_t::min()), now);
+ setLastAppliedOpTimeAndWallTime({opTime, Date_t::min()}, now);
}
void MemberData::setLastDurableOpTimeAndWallTime(OpTimeAndWallTime opTime, Date_t now) {
_lastUpdate = now;
_lastUpdateStale = false;
- if (_lastAppliedOpTime < std::get<0>(opTime)) {
+ if (_lastAppliedOpTime < opTime.opTime) {
// TODO(russotto): We think this should never happen, rollback or no rollback. Make this an
// invariant and see what happens.
- log() << "Durable progress (" << std::get<0>(opTime)
- << ") is ahead of the applied progress (" << _lastAppliedOpTime
- << ". This is likely due to a "
- "rollback."
+ log() << "Durable progress (" << opTime.opTime << ") is ahead of the applied progress ("
+ << _lastAppliedOpTime << ". This is likely due to a "
+ "rollback."
<< " memberid: " << _memberId << _hostAndPort.toString()
<< " previous durable progress: " << _lastDurableOpTime;
} else {
- _lastDurableOpTime = std::get<0>(opTime);
- _lastDurableWallTime = std::get<1>(opTime);
+ _lastDurableOpTime = opTime.opTime;
+ _lastDurableWallTime = opTime.wallTime;
}
}
void MemberData::setLastDurableOpTime(OpTime opTime, Date_t now) {
- setLastDurableOpTimeAndWallTime(std::make_tuple(opTime, Date_t::min()), now);
+ setLastDurableOpTimeAndWallTime({opTime, Date_t::min()}, now);
}
bool MemberData::advanceLastAppliedOpTimeAndWallTime(OpTimeAndWallTime opTime, Date_t now) {
_lastUpdate = now;
_lastUpdateStale = false;
- if (_lastAppliedOpTime < std::get<0>(opTime)) {
+ if (_lastAppliedOpTime < opTime.opTime) {
setLastAppliedOpTimeAndWallTime(opTime, now);
return true;
}
@@ -166,22 +165,22 @@ bool MemberData::advanceLastAppliedOpTimeAndWallTime(OpTimeAndWallTime opTime, D
}
bool MemberData::advanceLastAppliedOpTime(OpTime opTime, Date_t now) {
- return advanceLastAppliedOpTimeAndWallTime(std::make_tuple(opTime, Date_t::min()), now);
+ return advanceLastAppliedOpTimeAndWallTime({opTime, Date_t::min()}, now);
}
bool MemberData::advanceLastDurableOpTimeAndWallTime(OpTimeAndWallTime opTime, Date_t now) {
_lastUpdate = now;
_lastUpdateStale = false;
- if (_lastDurableOpTime < std::get<0>(opTime)) {
- _lastDurableOpTime = std::get<0>(opTime);
- _lastDurableWallTime = std::get<1>(opTime);
+ if (_lastDurableOpTime < opTime.opTime) {
+ _lastDurableOpTime = opTime.opTime;
+ _lastDurableWallTime = opTime.wallTime;
return true;
}
return false;
}
bool MemberData::advanceLastDurableOpTime(OpTime opTime, Date_t now) {
- return advanceLastDurableOpTimeAndWallTime(std::make_tuple(opTime, Date_t::min()), now);
+ return advanceLastDurableOpTimeAndWallTime({opTime, Date_t::min()}, now);
}
} // namespace repl
diff --git a/src/mongo/db/repl/member_data.h b/src/mongo/db/repl/member_data.h
index cc65df0c309..cae0e30b4c9 100644
--- a/src/mongo/db/repl/member_data.h
+++ b/src/mongo/db/repl/member_data.h
@@ -37,8 +37,6 @@
namespace mongo {
namespace repl {
-using OpTimeAndWallTime = std::tuple<OpTime, Date_t>;
-
/**
* This class contains the data from heartbeat responses and replSetUpdatePosition commands for one
* member of a replica set.
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 7e6d5e08800..7b9df3d6add 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -551,8 +551,7 @@ void _logOpsInner(OperationContext* opCtx,
// Optimes on the primary should always represent consistent database states.
replCoord->setMyLastAppliedOpTimeAndWallTimeForward(
- std::make_tuple(finalOpTime, wallTime),
- ReplicationCoordinator::DataConsistency::Consistent);
+ {finalOpTime, wallTime}, ReplicationCoordinator::DataConsistency::Consistent);
// We set the last op on the client to 'finalOpTime', because that contains the
// timestamp of the operation that the client actually performed.
diff --git a/src/mongo/db/repl/optime.cpp b/src/mongo/db/repl/optime.cpp
index b5632a21b02..0c68be9e1a8 100644
--- a/src/mongo/db/repl/optime.cpp
+++ b/src/mongo/db/repl/optime.cpp
@@ -103,6 +103,10 @@ std::ostream& operator<<(std::ostream& out, const OpTime& opTime) {
return out << opTime.toString();
}
+std::ostream& operator<<(std::ostream& out, const OpTimeAndWallTime& opTime) {
+ return out << opTime.opTime.toString() << ", " << opTime.wallTime.toString();
+}
+
void OpTime::appendAsQuery(BSONObjBuilder* builder) const {
builder->append(kTimestampFieldName, _timestamp);
if (_term == kUninitializedTerm) {
diff --git a/src/mongo/db/repl/optime.h b/src/mongo/db/repl/optime.h
index 675d2854fde..c1b377f2845 100644
--- a/src/mongo/db/repl/optime.h
+++ b/src/mongo/db/repl/optime.h
@@ -159,8 +159,12 @@ private:
Timestamp _timestamp;
long long _term = kInitialTerm;
};
-using OpTimeAndWallTime = std::tuple<OpTime, Date_t>;
+struct OpTimeAndWallTime {
+ OpTime opTime;
+ Date_t wallTime;
+};
+std::ostream& operator<<(std::ostream& out, const OpTimeAndWallTime& opTime);
} // namespace repl
/**
diff --git a/src/mongo/db/repl/replication_consistency_markers_impl.cpp b/src/mongo/db/repl/replication_consistency_markers_impl.cpp
index cbeec1a1e28..2c98528aa35 100644
--- a/src/mongo/db/repl/replication_consistency_markers_impl.cpp
+++ b/src/mongo/db/repl/replication_consistency_markers_impl.cpp
@@ -145,7 +145,7 @@ void ReplicationConsistencyMarkersImpl::clearInitialSyncFlag(OperationContext* o
auto replCoord = repl::ReplicationCoordinator::get(opCtx);
OpTimeAndWallTime opTimeAndWallTime = replCoord->getMyLastAppliedOpTimeAndWallTime();
- const auto time = std::get<0>(opTimeAndWallTime);
+ const auto time = opTimeAndWallTime.opTime;
TimestampedBSONObj update;
update.obj = BSON("$unset" << kInitialSyncFlag << "$set"
<< BSON(MinValidDocument::kMinValidTimestampFieldName
diff --git a/src/mongo/db/repl/replication_coordinator.h b/src/mongo/db/repl/replication_coordinator.h
index 69a96ce5932..6b63d156b17 100644
--- a/src/mongo/db/repl/replication_coordinator.h
+++ b/src/mongo/db/repl/replication_coordinator.h
@@ -66,11 +66,11 @@ class ReplSetMetadata;
} // namespace rpc
namespace repl {
-using OpTimeAndWallTime = std::tuple<OpTime, Date_t>;
class BackgroundSync;
class IsMasterResponse;
class OpTime;
+struct OpTimeAndWallTime;
class ReadConcernArgs;
class ReplSetConfig;
class ReplSetHeartbeatArgsV1;
diff --git a/src/mongo/db/repl/replication_coordinator_external_state.h b/src/mongo/db/repl/replication_coordinator_external_state.h
index 91d5a0bc81a..a7bb4c6bdd0 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state.h
+++ b/src/mongo/db/repl/replication_coordinator_external_state.h
@@ -188,8 +188,7 @@ public:
* Gets the last optime, and corresponding wall clock time, of an operation performed on this
* host, from stable storage.
*/
- virtual StatusWith<std::tuple<OpTime, Date_t>> loadLastOpTimeAndWallTime(
- OperationContext* opCtx) = 0;
+ virtual StatusWith<OpTimeAndWallTime> loadLastOpTimeAndWallTime(OperationContext* opCtx) = 0;
/**
* Returns the HostAndPort of the remote client connected to us that initiated the operation
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
index 7b57c2f74dd..2811ec062ca 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
@@ -372,7 +372,7 @@ void ReplicationCoordinatorExternalStateImpl::shutdown(OperationContext* opCtx)
auto loadLastOpTimeAndWallTimeResult = loadLastOpTimeAndWallTime(opCtx);
if (_replicationProcess->getConsistencyMarkers()->getOplogTruncateAfterPoint(opCtx).isNull() &&
loadLastOpTimeAndWallTimeResult.isOK() &&
- std::get<0>(loadLastOpTimeAndWallTimeResult.getValue()) ==
+ loadLastOpTimeAndWallTimeResult.getValue().opTime ==
_replicationProcess->getConsistencyMarkers()->getAppliedThrough(opCtx)) {
// Clear the appliedThrough marker to indicate we are consistent with the top of the
// oplog. We record this update at the 'lastAppliedOpTime'. If there are any outstanding
@@ -485,7 +485,7 @@ OpTime ReplicationCoordinatorExternalStateImpl::onTransitionToPrimary(OperationC
});
const auto loadLastOpTimeAndWallTimeResult = loadLastOpTimeAndWallTime(opCtx);
fassert(28665, loadLastOpTimeAndWallTimeResult);
- auto opTimeToReturn = std::get<0>(loadLastOpTimeAndWallTimeResult.getValue());
+ auto opTimeToReturn = loadLastOpTimeAndWallTimeResult.getValue().opTime;
_shardingOnTransitionToPrimaryHook(opCtx);
@@ -627,8 +627,8 @@ bool ReplicationCoordinatorExternalStateImpl::oplogExists(OperationContext* opCt
return oplog.getCollection() != nullptr;
}
-StatusWith<std::tuple<OpTime, Date_t>>
-ReplicationCoordinatorExternalStateImpl::loadLastOpTimeAndWallTime(OperationContext* opCtx) {
+StatusWith<OpTimeAndWallTime> ReplicationCoordinatorExternalStateImpl::loadLastOpTimeAndWallTime(
+ OperationContext* opCtx) {
// TODO: handle WriteConflictExceptions below
try {
// If we are doing an initial sync do not read from the oplog.
@@ -643,14 +643,14 @@ ReplicationCoordinatorExternalStateImpl::loadLastOpTimeAndWallTime(OperationCont
return Helpers::getLast(
opCtx, NamespaceString::kRsOplogNamespace.ns().c_str(), oplogEntry);
})) {
- return StatusWith<std::tuple<OpTime, Date_t>>(
- ErrorCodes::NoMatchingDocument,
- str::stream() << "Did not find any entries in "
- << NamespaceString::kRsOplogNamespace.ns());
+ return StatusWith<OpTimeAndWallTime>(ErrorCodes::NoMatchingDocument,
+ str::stream()
+ << "Did not find any entries in "
+ << NamespaceString::kRsOplogNamespace.ns());
}
BSONElement tsElement = oplogEntry[tsFieldName];
if (tsElement.eoo()) {
- return StatusWith<std::tuple<OpTime, Date_t>>(
+ return StatusWith<OpTimeAndWallTime>(
ErrorCodes::NoSuchKey,
str::stream() << "Most recent entry in " << NamespaceString::kRsOplogNamespace.ns()
<< " missing \""
@@ -658,7 +658,7 @@ ReplicationCoordinatorExternalStateImpl::loadLastOpTimeAndWallTime(OperationCont
<< "\" field");
}
if (tsElement.type() != bsonTimestamp) {
- return StatusWith<std::tuple<OpTime, Date_t>>(
+ return StatusWith<OpTimeAndWallTime>(
ErrorCodes::TypeMismatch,
str::stream() << "Expected type of \"" << tsFieldName << "\" in most recent "
<< NamespaceString::kRsOplogNamespace.ns()
@@ -674,9 +674,10 @@ ReplicationCoordinatorExternalStateImpl::loadLastOpTimeAndWallTime(OperationCont
if (!wallTimeStatus.isOK()) {
return wallTimeStatus.getStatus();
}
- return std::make_tuple(opTimeStatus.getValue(), wallTimeStatus.getValue());
+ OpTimeAndWallTime parseResult = {opTimeStatus.getValue(), wallTimeStatus.getValue()};
+ return parseResult;
} catch (const DBException& ex) {
- return StatusWith<std::tuple<OpTime, Date_t>>(ex.toStatus());
+ return StatusWith<OpTimeAndWallTime>(ex.toStatus());
}
}
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.h b/src/mongo/db/repl/replication_coordinator_external_state_impl.h
index 5ba0c5aa57e..fb6ea07d80f 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.h
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.h
@@ -89,8 +89,7 @@ public:
virtual void setGlobalTimestamp(ServiceContext* service, const Timestamp& newTime);
virtual Timestamp getGlobalTimestamp(ServiceContext* service);
bool oplogExists(OperationContext* opCtx) final;
- virtual StatusWith<std::tuple<OpTime, Date_t>> loadLastOpTimeAndWallTime(
- OperationContext* opCtx);
+ virtual StatusWith<OpTimeAndWallTime> loadLastOpTimeAndWallTime(OperationContext* opCtx);
virtual HostAndPort getClientHostAndPort(const OperationContext* opCtx);
virtual void closeConnections();
virtual void shardingOnStepDownHook();
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_mock.cpp b/src/mongo/db/repl/replication_coordinator_external_state_mock.cpp
index 3f2cbb67a35..b73312fcd06 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_mock.cpp
+++ b/src/mongo/db/repl/replication_coordinator_external_state_mock.cpp
@@ -169,11 +169,12 @@ bool ReplicationCoordinatorExternalStateMock::oplogExists(OperationContext* opCt
return true;
}
-StatusWith<std::tuple<OpTime, Date_t>>
-ReplicationCoordinatorExternalStateMock::loadLastOpTimeAndWallTime(OperationContext* opCtx) {
+StatusWith<OpTimeAndWallTime> ReplicationCoordinatorExternalStateMock::loadLastOpTimeAndWallTime(
+ OperationContext* opCtx) {
if (_lastOpTime.getStatus().isOK()) {
if (_lastWallTime.getStatus().isOK()) {
- return std::make_tuple(_lastOpTime.getValue(), _lastWallTime.getValue());
+ OpTimeAndWallTime result = {_lastOpTime.getValue(), _lastWallTime.getValue()};
+ return result;
} else {
return _lastWallTime.getStatus();
}
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_mock.h b/src/mongo/db/repl/replication_coordinator_external_state_mock.h
index 1a6b2c4a6be..6311dd446b1 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_mock.h
+++ b/src/mongo/db/repl/replication_coordinator_external_state_mock.h
@@ -79,8 +79,7 @@ public:
virtual void setGlobalTimestamp(ServiceContext* service, const Timestamp& newTime);
virtual Timestamp getGlobalTimestamp(ServiceContext* service);
bool oplogExists(OperationContext* opCtx) override;
- virtual StatusWith<std::tuple<OpTime, Date_t>> loadLastOpTimeAndWallTime(
- OperationContext* opCtx);
+ virtual StatusWith<OpTimeAndWallTime> loadLastOpTimeAndWallTime(OperationContext* opCtx);
virtual void closeConnections();
virtual void shardingOnStepDownHook();
virtual void signalApplierToChooseNewSyncSource();
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index 95e5ac6dc7e..c0a6e03c39e 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -527,7 +527,7 @@ bool ReplicationCoordinatorImpl::_startLoadLocalConfig(OperationContext* opCtx)
void ReplicationCoordinatorImpl::_finishLoadLocalConfig(
const executor::TaskExecutor::CallbackArgs& cbData,
const ReplSetConfig& localConfig,
- const StatusWith<std::tuple<OpTime, Date_t>>& lastOpTimeAndWallTimeStatus,
+ const StatusWith<OpTimeAndWallTime>& lastOpTimeAndWallTimeStatus,
const StatusWith<LastVote>& lastVoteStatus) {
if (!cbData.status.isOK()) {
LOG(1) << "Loading local replica set configuration failed due to " << cbData.status;
@@ -573,7 +573,7 @@ void ReplicationCoordinatorImpl::_finishLoadLocalConfig(
// Do not check optime, if this node is an arbiter.
bool isArbiter =
myIndex.getValue() != -1 && localConfig.getMemberAt(myIndex.getValue()).isArbiter();
- OpTimeAndWallTime lastOpTimeAndWallTime = std::make_tuple(OpTime(), Date_t::min());
+ OpTimeAndWallTime lastOpTimeAndWallTime = {OpTime(), Date_t::min()};
if (!isArbiter) {
if (!lastOpTimeAndWallTimeStatus.isOK()) {
warning() << "Failed to load timestamp and/or wall clock time of most recently applied "
@@ -590,7 +590,7 @@ void ReplicationCoordinatorImpl::_finishLoadLocalConfig(
}
}
- const auto lastOpTime = std::get<0>(lastOpTimeAndWallTime);
+ const auto lastOpTime = lastOpTimeAndWallTime.opTime;
// Restore the current term according to the terms of last oplog entry and last vote.
// The initial term of OpTime() is 0.
long long term = lastOpTime.getTerm();
@@ -691,45 +691,44 @@ void ReplicationCoordinatorImpl::_startDataReplication(OperationContext* opCtx,
return;
}
- auto onCompletion =
- [this, startCompleted](const StatusWith<std::tuple<OpTime, Date_t>>& opTimeStatus) {
- {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
- if (opTimeStatus == ErrorCodes::CallbackCanceled) {
- log() << "Initial Sync has been cancelled: " << opTimeStatus.getStatus();
+ auto onCompletion = [this, startCompleted](const StatusWith<OpTimeAndWallTime>& opTimeStatus) {
+ {
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
+ if (opTimeStatus == ErrorCodes::CallbackCanceled) {
+ log() << "Initial Sync has been cancelled: " << opTimeStatus.getStatus();
+ return;
+ } else if (!opTimeStatus.isOK()) {
+ if (_inShutdown) {
+ log() << "Initial Sync failed during shutdown due to "
+ << opTimeStatus.getStatus();
return;
- } else if (!opTimeStatus.isOK()) {
- if (_inShutdown) {
- log() << "Initial Sync failed during shutdown due to "
- << opTimeStatus.getStatus();
- return;
- } else {
- error() << "Initial sync failed, shutting down now. Restart the server "
- "to attempt a new initial sync.";
- fassertFailedWithStatusNoTrace(40088, opTimeStatus.getStatus());
- }
+ } else {
+ error() << "Initial sync failed, shutting down now. Restart the server "
+ "to attempt a new initial sync.";
+ fassertFailedWithStatusNoTrace(40088, opTimeStatus.getStatus());
}
-
- const auto lastApplied = opTimeStatus.getValue();
- _setMyLastAppliedOpTimeAndWallTime(
- lock, lastApplied, false, DataConsistency::Consistent);
}
- // Clear maint. mode.
- while (getMaintenanceMode()) {
- setMaintenanceMode(false).transitional_ignore();
- }
+ const auto lastApplied = opTimeStatus.getValue();
+ _setMyLastAppliedOpTimeAndWallTime(
+ lock, lastApplied, false, DataConsistency::Consistent);
+ }
- if (startCompleted) {
- startCompleted();
- }
- // Because initial sync completed, we can only be in STARTUP2, not REMOVED.
- // Transition from STARTUP2 to RECOVERING and start the producer and the applier.
- invariant(getMemberState().startup2());
- invariant(setFollowerMode(MemberState::RS_RECOVERING));
- auto opCtxHolder = cc().makeOperationContext();
- _externalState->startSteadyStateReplication(opCtxHolder.get(), this);
- };
+ // Clear maint. mode.
+ while (getMaintenanceMode()) {
+ setMaintenanceMode(false).transitional_ignore();
+ }
+
+ if (startCompleted) {
+ startCompleted();
+ }
+ // Because initial sync completed, we can only be in STARTUP2, not REMOVED.
+ // Transition from STARTUP2 to RECOVERING and start the producer and the applier.
+ invariant(getMemberState().startup2());
+ invariant(setFollowerMode(MemberState::RS_RECOVERING));
+ auto opCtxHolder = cc().makeOperationContext();
+ _externalState->startSteadyStateReplication(opCtxHolder.get(), this);
+ };
std::shared_ptr<InitialSyncer> initialSyncerCopy;
try {
@@ -1080,7 +1079,7 @@ void ReplicationCoordinatorImpl::setMyLastAppliedOpTimeAndWallTimeForward(
const OpTimeAndWallTime& opTimeAndWallTime, DataConsistency consistency) {
// Update the global timestamp before setting the last applied opTime forward so the last
// applied optime is never greater than the latest cluster time in the logical clock.
- const auto opTime = std::get<0>(opTimeAndWallTime);
+ const auto opTime = opTimeAndWallTime.opTime;
_externalState->setGlobalTimestamp(getServiceContext(), opTime.getTimestamp());
stdx::unique_lock<stdx::mutex> lock(_mutex);
@@ -1110,7 +1109,7 @@ void ReplicationCoordinatorImpl::setMyLastAppliedOpTimeAndWallTimeForward(
void ReplicationCoordinatorImpl::setMyLastDurableOpTimeAndWallTimeForward(
const OpTimeAndWallTime& opTimeAndWallTime) {
stdx::unique_lock<stdx::mutex> lock(_mutex);
- if (std::get<0>(opTimeAndWallTime) > _getMyLastDurableOpTime_inlock()) {
+ if (opTimeAndWallTime.opTime > _getMyLastDurableOpTime_inlock()) {
_setMyLastDurableOpTimeAndWallTime(lock, opTimeAndWallTime, false);
_reportUpstream_inlock(std::move(lock));
}
@@ -1118,7 +1117,7 @@ void ReplicationCoordinatorImpl::setMyLastDurableOpTimeAndWallTimeForward(
void ReplicationCoordinatorImpl::setMyLastAppliedOpTimeAndWallTime(
const OpTimeAndWallTime& opTimeAndWallTime) {
- const auto opTime = std::get<0>(opTimeAndWallTime);
+ const auto opTime = opTimeAndWallTime.opTime;
// Update the global timestamp before setting the last applied opTime forward so the last
// applied optime is never greater than the latest cluster time in the logical clock.
_externalState->setGlobalTimestamp(getServiceContext(), opTime.getTimestamp());
@@ -1146,12 +1145,9 @@ void ReplicationCoordinatorImpl::_resetMyLastOpTimes(WithLock lk) {
LOG(1) << "resetting durable/applied optimes.";
// Reset to uninitialized OpTime
bool isRollbackAllowed = true;
- _setMyLastAppliedOpTimeAndWallTime(lk,
- std::make_tuple(OpTime(), Date_t::min()),
- isRollbackAllowed,
- DataConsistency::Inconsistent);
- _setMyLastDurableOpTimeAndWallTime(
- lk, std::make_tuple(OpTime(), Date_t::min()), isRollbackAllowed);
+ _setMyLastAppliedOpTimeAndWallTime(
+ lk, {OpTime(), Date_t::min()}, isRollbackAllowed, DataConsistency::Inconsistent);
+ _setMyLastDurableOpTimeAndWallTime(lk, {OpTime(), Date_t::min()}, isRollbackAllowed);
_stableOpTimeCandidates.clear();
}
@@ -1176,7 +1172,7 @@ void ReplicationCoordinatorImpl::_setMyLastAppliedOpTimeAndWallTime(
const OpTimeAndWallTime& opTimeAndWallTime,
bool isRollbackAllowed,
DataConsistency consistency) {
- const auto opTime = std::get<0>(opTimeAndWallTime);
+ const auto opTime = opTimeAndWallTime.opTime;
// The last applied opTime should never advance beyond the global timestamp (i.e. the latest
// cluster time). Not enforced if the logical clock is disabled, e.g. for arbiters.
@@ -2696,7 +2692,7 @@ Status ReplicationCoordinatorImpl::processReplSetInitiate(OperationContext* opCt
// Sets the initial data timestamp on the storage engine so it can assign a timestamp
// to data on disk. We do this after writing the "initiating set" oplog entry.
_storage->setInitialDataTimestamp(getServiceContext(),
- std::get<0>(lastAppliedOpTimeAndWallTime).getTimestamp());
+ lastAppliedOpTimeAndWallTime.opTime.getTimestamp());
_finishReplSetInitiate(opCtx, newConfig, myIndex.getValue());
@@ -3295,7 +3291,7 @@ void ReplicationCoordinatorImpl::blacklistSyncSource(const HostAndPort& host, Da
void ReplicationCoordinatorImpl::resetLastOpTimesFromOplog(OperationContext* opCtx,
DataConsistency consistency) {
auto lastOpTimeAndWallTimeStatus = _externalState->loadLastOpTimeAndWallTime(opCtx);
- OpTimeAndWallTime lastOpTimeAndWallTime = std::make_tuple(OpTime(), Date_t::min());
+ OpTimeAndWallTime lastOpTimeAndWallTime = {OpTime(), Date_t::min()};
if (!lastOpTimeAndWallTimeStatus.getStatus().isOK()) {
warning() << "Failed to load timestamp and/or wall clock time of most recently applied "
"operation; "
@@ -3307,7 +3303,7 @@ void ReplicationCoordinatorImpl::resetLastOpTimesFromOplog(OperationContext* opC
// Update the global timestamp before setting last applied opTime forward so the last applied
// optime is never greater than the latest in-memory cluster time.
_externalState->setGlobalTimestamp(opCtx->getServiceContext(),
- std::get<0>(lastOpTimeAndWallTime).getTimestamp());
+ lastOpTimeAndWallTime.opTime.getTimestamp());
stdx::unique_lock<stdx::mutex> lock(_mutex);
bool isRollbackAllowed = true;
@@ -3531,10 +3527,8 @@ void ReplicationCoordinatorImpl::_advanceCommitPoint(WithLock lk, const OpTime&
if (_getMemberState_inlock().arbiter()) {
// Arbiters do not store replicated data, so we consider their data trivially
// consistent.
- _setMyLastAppliedOpTimeAndWallTime(lk,
- std::make_tuple(committedOpTime, Date_t::min()),
- false,
- DataConsistency::Consistent);
+ _setMyLastAppliedOpTimeAndWallTime(
+ lk, {committedOpTime, Date_t::min()}, false, DataConsistency::Consistent);
}
_setStableTimestampForStorage(lk);
diff --git a/src/mongo/db/repl/replication_coordinator_impl.h b/src/mongo/db/repl/replication_coordinator_impl.h
index 59eaa7ac157..195e11d799b 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.h
+++ b/src/mongo/db/repl/replication_coordinator_impl.h
@@ -875,11 +875,10 @@ private:
* Callback that finishes the work started in _startLoadLocalConfig and sets _rsConfigState
* to kConfigSteady, so that we can begin processing heartbeats and reconfigs.
*/
- void _finishLoadLocalConfig(
- const executor::TaskExecutor::CallbackArgs& cbData,
- const ReplSetConfig& localConfig,
- const StatusWith<std::tuple<OpTime, Date_t>>& lastOpTimeAndWallTimeStatus,
- const StatusWith<LastVote>& lastVoteStatus);
+ void _finishLoadLocalConfig(const executor::TaskExecutor::CallbackArgs& cbData,
+ const ReplSetConfig& localConfig,
+ const StatusWith<OpTimeAndWallTime>& lastOpTimeAndWallTimeStatus,
+ const StatusWith<LastVote>& lastVoteStatus);
/**
* Start replicating data, and does an initial sync if needed first.
diff --git a/src/mongo/db/repl/replication_coordinator_mock.cpp b/src/mongo/db/repl/replication_coordinator_mock.cpp
index 3e3435d15cb..5eec6c5ae09 100644
--- a/src/mongo/db/repl/replication_coordinator_mock.cpp
+++ b/src/mongo/db/repl/replication_coordinator_mock.cpp
@@ -199,29 +199,29 @@ void ReplicationCoordinatorMock::setMyHeartbeatMessage(const std::string& msg) {
void ReplicationCoordinatorMock::setMyLastAppliedOpTimeAndWallTime(
const OpTimeAndWallTime& opTimeAndWallTime) {
- _myLastAppliedOpTime = std::get<0>(opTimeAndWallTime);
- _myLastAppliedWallTime = std::get<1>(opTimeAndWallTime);
+ _myLastAppliedOpTime = opTimeAndWallTime.opTime;
+ _myLastAppliedWallTime = opTimeAndWallTime.wallTime;
}
void ReplicationCoordinatorMock::setMyLastDurableOpTimeAndWallTime(
const OpTimeAndWallTime& opTimeAndWallTime) {
- _myLastDurableOpTime = std::get<0>(opTimeAndWallTime);
- _myLastDurableWallTime = std::get<1>(opTimeAndWallTime);
+ _myLastDurableOpTime = opTimeAndWallTime.opTime;
+ _myLastDurableWallTime = opTimeAndWallTime.wallTime;
}
void ReplicationCoordinatorMock::setMyLastAppliedOpTimeAndWallTimeForward(
const OpTimeAndWallTime& opTimeAndWallTime, DataConsistency consistency) {
- if (std::get<0>(opTimeAndWallTime) > _myLastAppliedOpTime) {
- _myLastAppliedOpTime = std::get<0>(opTimeAndWallTime);
- _myLastAppliedWallTime = std::get<1>(opTimeAndWallTime);
+ if (opTimeAndWallTime.opTime > _myLastAppliedOpTime) {
+ _myLastAppliedOpTime = opTimeAndWallTime.opTime;
+ _myLastAppliedWallTime = opTimeAndWallTime.wallTime;
}
}
void ReplicationCoordinatorMock::setMyLastDurableOpTimeAndWallTimeForward(
const OpTimeAndWallTime& opTimeAndWallTime) {
- if (std::get<0>(opTimeAndWallTime) > _myLastDurableOpTime) {
- _myLastDurableOpTime = std::get<0>(opTimeAndWallTime);
- _myLastDurableWallTime = std::get<1>(opTimeAndWallTime);
+ if (opTimeAndWallTime.opTime > _myLastDurableOpTime) {
+ _myLastDurableOpTime = opTimeAndWallTime.opTime;
+ _myLastDurableWallTime = opTimeAndWallTime.wallTime;
}
}
@@ -231,7 +231,7 @@ void ReplicationCoordinatorMock::resetMyLastOpTimes() {
}
OpTimeAndWallTime ReplicationCoordinatorMock::getMyLastAppliedOpTimeAndWallTime() const {
- return std::make_tuple(_myLastAppliedOpTime, _myLastAppliedWallTime);
+ return {_myLastAppliedOpTime, _myLastAppliedWallTime};
}
OpTime ReplicationCoordinatorMock::getMyLastAppliedOpTime() const {
@@ -239,7 +239,7 @@ OpTime ReplicationCoordinatorMock::getMyLastAppliedOpTime() const {
}
OpTimeAndWallTime ReplicationCoordinatorMock::getMyLastDurableOpTimeAndWallTime() const {
- return std::make_tuple(_myLastDurableOpTime, _myLastDurableWallTime);
+ return {_myLastDurableOpTime, _myLastDurableWallTime};
}
OpTime ReplicationCoordinatorMock::getMyLastDurableOpTime() const {
diff --git a/src/mongo/db/repl/replication_coordinator_test_fixture.h b/src/mongo/db/repl/replication_coordinator_test_fixture.h
index 41b2b1c3b82..db1406e1227 100644
--- a/src/mongo/db/repl/replication_coordinator_test_fixture.h
+++ b/src/mongo/db/repl/replication_coordinator_test_fixture.h
@@ -106,23 +106,22 @@ protected:
}
void replCoordSetMyLastAppliedOpTime(const OpTime& opTime, Date_t wallTime = Date_t::min()) {
- getReplCoord()->setMyLastAppliedOpTimeAndWallTime(std::make_tuple(opTime, wallTime));
+ getReplCoord()->setMyLastAppliedOpTimeAndWallTime({opTime, wallTime});
}
void replCoordSetMyLastAppliedOpTimeForward(const OpTime& opTime,
ReplicationCoordinator::DataConsistency consistency,
Date_t wallTime = Date_t::min()) {
- getReplCoord()->setMyLastAppliedOpTimeAndWallTimeForward(std::make_tuple(opTime, wallTime),
- consistency);
+ getReplCoord()->setMyLastAppliedOpTimeAndWallTimeForward({opTime, wallTime}, consistency);
}
void replCoordSetMyLastDurableOpTime(const OpTime& opTime, Date_t wallTime = Date_t::min()) {
- getReplCoord()->setMyLastDurableOpTimeAndWallTime(std::make_tuple(opTime, wallTime));
+ getReplCoord()->setMyLastDurableOpTimeAndWallTime({opTime, wallTime});
}
void replCoordSetMyLastDurableOpTimeForward(const OpTime& opTime,
Date_t wallTime = Date_t::min()) {
- getReplCoord()->setMyLastDurableOpTimeAndWallTimeForward(std::make_tuple(opTime, wallTime));
+ getReplCoord()->setMyLastDurableOpTimeAndWallTimeForward({opTime, wallTime});
}
/**
diff --git a/src/mongo/db/repl/sync_tail.cpp b/src/mongo/db/repl/sync_tail.cpp
index 9602191eb7e..5870c6e57e9 100644
--- a/src/mongo/db/repl/sync_tail.cpp
+++ b/src/mongo/db/repl/sync_tail.cpp
@@ -189,11 +189,11 @@ void ApplyBatchFinalizerForJournal::_run() {
Client::initThread("ApplyBatchFinalizerForJournal");
while (true) {
- OpTimeAndWallTime latestOpTimeAndWallTime = std::make_tuple(OpTime(), Date_t::min());
+ OpTimeAndWallTime latestOpTimeAndWallTime = {OpTime(), Date_t::min()};
{
stdx::unique_lock<stdx::mutex> lock(_mutex);
- while (std::get<0>(_latestOpTimeAndWallTime).isNull() && !_shutdownSignaled) {
+ while (_latestOpTimeAndWallTime.opTime.isNull() && !_shutdownSignaled) {
_cond.wait(lock);
}
@@ -202,7 +202,7 @@ void ApplyBatchFinalizerForJournal::_run() {
}
latestOpTimeAndWallTime = _latestOpTimeAndWallTime;
- _latestOpTimeAndWallTime = std::make_tuple(OpTime(), Date_t::min());
+ _latestOpTimeAndWallTime = {OpTime(), Date_t::min()};
}
auto opCtx = cc().makeOperationContext();
@@ -805,8 +805,7 @@ void SyncTail::_oplogApplication(OplogBuffer* oplogBuffer,
: ReplicationCoordinator::DataConsistency::Inconsistent;
// Wall clock time is non-optional post 3.6.
invariant(lastWallTimeInBatch);
- finalizer->record(std::make_tuple(lastOpTimeInBatch, lastWallTimeInBatch.get()),
- consistency);
+ finalizer->record({lastOpTimeInBatch, lastWallTimeInBatch.get()}, consistency);
}
}
diff --git a/src/mongo/db/repl/topology_coordinator.cpp b/src/mongo/db/repl/topology_coordinator.cpp
index a9921fe31c8..3783e6338e1 100644
--- a/src/mongo/db/repl/topology_coordinator.cpp
+++ b/src/mongo/db/repl/topology_coordinator.cpp
@@ -958,14 +958,13 @@ OpTime TopologyCoordinator::getMyLastAppliedOpTime() const {
}
OpTimeAndWallTime TopologyCoordinator::getMyLastAppliedOpTimeAndWallTime() const {
- return std::make_tuple(_selfMemberData().getLastAppliedOpTime(),
- _selfMemberData().getLastAppliedWallTime());
+ return {_selfMemberData().getLastAppliedOpTime(), _selfMemberData().getLastAppliedWallTime()};
}
void TopologyCoordinator::setMyLastAppliedOpTimeAndWallTime(OpTimeAndWallTime opTimeAndWallTime,
Date_t now,
bool isRollbackAllowed) {
- auto opTime = std::get<0>(opTimeAndWallTime);
+ auto opTime = opTimeAndWallTime.opTime;
auto& myMemberData = _selfMemberData();
auto myLastAppliedOpTime = myMemberData.getLastAppliedOpTime();
@@ -986,14 +985,13 @@ OpTime TopologyCoordinator::getMyLastDurableOpTime() const {
}
OpTimeAndWallTime TopologyCoordinator::getMyLastDurableOpTimeAndWallTime() const {
- return std::tuple(_selfMemberData().getLastDurableOpTime(),
- _selfMemberData().getLastDurableWallTime());
+ return {_selfMemberData().getLastDurableOpTime(), _selfMemberData().getLastDurableWallTime()};
}
void TopologyCoordinator::setMyLastDurableOpTimeAndWallTime(OpTimeAndWallTime opTimeAndWallTime,
Date_t now,
bool isRollbackAllowed) {
- auto opTime = std::get<0>(opTimeAndWallTime);
+ auto opTime = opTimeAndWallTime.opTime;
auto& myMemberData = _selfMemberData();
invariant(isRollbackAllowed || opTime >= myMemberData.getLastDurableOpTime());
myMemberData.setLastDurableOpTimeAndWallTime(opTimeAndWallTime, now);
@@ -1423,9 +1421,9 @@ void TopologyCoordinator::prepareStatusResponse(const ReplSetStatusArgs& rsStatu
const MemberState myState = getMemberState();
const Date_t now = rsStatusArgs.now;
const OpTime lastOpApplied = getMyLastAppliedOpTime();
- const Date_t lastOpAppliedWall = std::get<1>(getMyLastAppliedOpTimeAndWallTime());
+ const Date_t lastOpAppliedWall = getMyLastAppliedOpTimeAndWallTime().wallTime;
const OpTime lastOpDurable = getMyLastDurableOpTime();
- const Date_t lastOpDurableWall = std::get<1>(getMyLastDurableOpTimeAndWallTime());
+ const Date_t lastOpDurableWall = getMyLastDurableOpTimeAndWallTime().wallTime;
const BSONObj& initialSyncStatus = rsStatusArgs.initialSyncStatus;
const boost::optional<Timestamp>& lastStableRecoveryTimestamp =
rsStatusArgs.lastStableRecoveryTimestamp;
diff --git a/src/mongo/db/repl/topology_coordinator_v1_test.cpp b/src/mongo/db/repl/topology_coordinator_v1_test.cpp
index 4dc9c528404..15da15af5d8 100644
--- a/src/mongo/db/repl/topology_coordinator_v1_test.cpp
+++ b/src/mongo/db/repl/topology_coordinator_v1_test.cpp
@@ -117,8 +117,7 @@ protected:
}
void setMyOpTime(const OpTime& opTime, const Date_t wallTime = Date_t::min()) {
- getTopoCoord().setMyLastAppliedOpTimeAndWallTime(
- std::make_tuple(opTime, wallTime), now(), false);
+ getTopoCoord().setMyLastAppliedOpTimeAndWallTime({opTime, wallTime}, now(), false);
}
void topoCoordSetMyLastAppliedOpTime(const OpTime& opTime,
@@ -126,7 +125,7 @@ protected:
bool isRollbackAllowed,
const Date_t wallTime = Date_t::min()) {
getTopoCoord().setMyLastAppliedOpTimeAndWallTime(
- std::make_tuple(opTime, wallTime), now, isRollbackAllowed);
+ {opTime, wallTime}, now, isRollbackAllowed);
}
void topoCoordSetMyLastDurableOpTime(const OpTime& opTime,
@@ -134,7 +133,7 @@ protected:
bool isRollbackAllowed,
const Date_t wallTime = Date_t::min()) {
getTopoCoord().setMyLastDurableOpTimeAndWallTime(
- std::make_tuple(opTime, wallTime), now, isRollbackAllowed);
+ {opTime, wallTime}, now, isRollbackAllowed);
}
void setSelfMemberState(const MemberState& newState) {
diff --git a/src/mongo/db/write_concern.cpp b/src/mongo/db/write_concern.cpp
index 5b5fd9222f9..a2cc613282d 100644
--- a/src/mongo/db/write_concern.cpp
+++ b/src/mongo/db/write_concern.cpp
@@ -53,6 +53,7 @@ namespace mongo {
using std::string;
using repl::OpTime;
+using repl::OpTimeAndWallTime;
static TimerStats gleWtimeStats;
static ServerStatusMetricField<TimerStats> displayGleLatency("getLastError.wtime", &gleWtimeStats);
diff --git a/src/mongo/dbtests/storage_timestamp_tests.cpp b/src/mongo/dbtests/storage_timestamp_tests.cpp
index 6806f07548a..2a326ced824 100644
--- a/src/mongo/dbtests/storage_timestamp_tests.cpp
+++ b/src/mongo/dbtests/storage_timestamp_tests.cpp
@@ -448,7 +448,7 @@ public:
void setReplCoordAppliedOpTime(const repl::OpTime& opTime, Date_t wallTime = Date_t::min()) {
repl::ReplicationCoordinator::get(getGlobalServiceContext())
- ->setMyLastAppliedOpTimeAndWallTime(std::make_tuple(opTime, wallTime));
+ ->setMyLastAppliedOpTimeAndWallTime({opTime, wallTime});
ASSERT_OK(repl::ReplicationCoordinator::get(getGlobalServiceContext())
->updateTerm(_opCtx, opTime.getTerm()));
}