summaryrefslogtreecommitdiff
path: root/src/mongo/db/repl
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/db/repl')
-rw-r--r--src/mongo/db/repl/SConscript9
-rw-r--r--src/mongo/db/repl/collection_cloner.cpp6
-rw-r--r--src/mongo/db/repl/initial_syncer.cpp114
-rw-r--r--src/mongo/db/repl/multiapplier.cpp2
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp33
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp4
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp56
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_test.cpp2
-rw-r--r--src/mongo/db/repl/reporter.cpp4
-rw-r--r--src/mongo/db/repl/sync_source_resolver.cpp14
-rw-r--r--src/mongo/db/repl/tenant_file_cloner.cpp7
-rw-r--r--src/mongo/db/repl/tenant_oplog_applier.cpp25
12 files changed, 146 insertions, 130 deletions
diff --git a/src/mongo/db/repl/SConscript b/src/mongo/db/repl/SConscript
index 302dd4186d2..f313f2942fa 100644
--- a/src/mongo/db/repl/SConscript
+++ b/src/mongo/db/repl/SConscript
@@ -2031,8 +2031,13 @@ env.Library(
'hello_command',
],
)
-
-env.Library(
+wait_for_majority_service_env = env.Clone()
+# TODO(SERVER-77205): Review and Possibly Remove '-Wno-deprecated' After Mozjs Update
+wait_for_majority_service_env.Append(
+ CXXFLAGS=[] if wait_for_majority_service_env.TargetOSIs('windows') else [
+ '-Wno-deprecated',
+ ], )
+wait_for_majority_service_env.Library(
target='wait_for_majority_service',
source=[
'wait_for_majority_service.cpp',
diff --git a/src/mongo/db/repl/collection_cloner.cpp b/src/mongo/db/repl/collection_cloner.cpp
index 1680c6d91fc..a3358a67a92 100644
--- a/src/mongo/db/repl/collection_cloner.cpp
+++ b/src/mongo/db/repl/collection_cloner.cpp
@@ -411,8 +411,10 @@ void CollectionCloner::handleNextBatch(DBClientCursor& cursor) {
}
// Schedule the next document batch insertion.
- auto&& scheduleResult = _scheduleDbWorkFn(
- [=](const executor::TaskExecutor::CallbackArgs& cbd) { insertDocumentsCallback(cbd); });
+ auto&& scheduleResult =
+ _scheduleDbWorkFn([=, this](const executor::TaskExecutor::CallbackArgs& cbd) {
+ insertDocumentsCallback(cbd);
+ });
if (!scheduleResult.isOK()) {
Status newStatus = scheduleResult.getStatus().withContext(
diff --git a/src/mongo/db/repl/initial_syncer.cpp b/src/mongo/db/repl/initial_syncer.cpp
index cd4bba6c986..21d12d6ecb9 100644
--- a/src/mongo/db/repl/initial_syncer.cpp
+++ b/src/mongo/db/repl/initial_syncer.cpp
@@ -284,7 +284,7 @@ Status InitialSyncer::startup(OperationContext* opCtx,
_clonerAttemptExec = std::make_unique<executor::ScopedTaskExecutor>(
_clonerExec, Status(ErrorCodes::CallbackCanceled, "Initial Sync Attempt Canceled"));
auto status = _scheduleWorkAndSaveHandle_inlock(
- [=](const executor::TaskExecutor::CallbackArgs& args) {
+ [=, this](const executor::TaskExecutor::CallbackArgs& args) {
_startInitialSyncAttemptCallback(args, initialSyncAttempt, initialSyncMaxAttempts);
},
&_startInitialSyncAttemptHandle,
@@ -681,7 +681,7 @@ void InitialSyncer::_startInitialSyncAttemptCallback(
// _scheduleWorkAndSaveHandle_inlock() is shutdown-aware.
status = _scheduleWorkAndSaveHandle_inlock(
- [=](const executor::TaskExecutor::CallbackArgs& args) {
+ [=, this](const executor::TaskExecutor::CallbackArgs& args) {
_chooseSyncSourceCallback(
args, chooseSyncSourceAttempt, chooseSyncSourceMaxAttempts, onCompletionGuard);
},
@@ -745,7 +745,7 @@ void InitialSyncer::_chooseSyncSourceCallback(
"numInitialSyncConnectAttempts"_attr = numInitialSyncConnectAttempts.load());
auto status = _scheduleWorkAtAndSaveHandle_inlock(
when,
- [=](const executor::TaskExecutor::CallbackArgs& args) {
+ [=, this](const executor::TaskExecutor::CallbackArgs& args) {
_chooseSyncSourceCallback(args,
chooseSyncSourceAttempt + 1,
chooseSyncSourceMaxAttempts,
@@ -786,7 +786,7 @@ void InitialSyncer::_chooseSyncSourceCallback(
// Schedule rollback ID checker.
_rollbackChecker = std::make_unique<RollbackChecker>(*_attemptExec, _syncSource);
- auto scheduleResult = _rollbackChecker->reset([=](const RollbackChecker::Result& result) {
+ auto scheduleResult = _rollbackChecker->reset([=, this](const RollbackChecker::Result& result) {
return _rollbackCheckerResetCallback(result, onCompletionGuard);
});
status = scheduleResult.getStatus();
@@ -868,9 +868,9 @@ void InitialSyncer::_rollbackCheckerResetCallback(
// which retries up to 'numInitialSyncOplogFindAttempts' times'. This will fail relatively
// quickly in the presence of network errors, allowing us to choose a different sync source.
status = _scheduleLastOplogEntryFetcher_inlock(
- [=](const StatusWith<mongo::Fetcher::QueryResponse>& response,
- mongo::Fetcher::NextAction*,
- mongo::BSONObjBuilder*) mutable {
+ [=, this](const StatusWith<mongo::Fetcher::QueryResponse>& response,
+ mongo::Fetcher::NextAction*,
+ mongo::BSONObjBuilder*) mutable {
_lastOplogEntryFetcherCallbackForDefaultBeginFetchingOpTime(response,
onCompletionGuard);
},
@@ -947,9 +947,9 @@ Status InitialSyncer::_scheduleGetBeginFetchingOpTime_inlock(
_syncSource,
NamespaceString::kSessionTransactionsTableNamespace.db().toString(),
cmd.obj(),
- [=](const StatusWith<mongo::Fetcher::QueryResponse>& response,
- mongo::Fetcher::NextAction*,
- mongo::BSONObjBuilder*) mutable {
+ [=, this](const StatusWith<mongo::Fetcher::QueryResponse>& response,
+ mongo::Fetcher::NextAction*,
+ mongo::BSONObjBuilder*) mutable {
_getBeginFetchingOpTimeCallback(
response, onCompletionGuard, defaultBeginFetchingOpTime);
},
@@ -1019,9 +1019,9 @@ void InitialSyncer::_getBeginFetchingOpTimeCallback(
// which retries up to 'numInitialSyncOplogFindAttempts' times'. This will fail relatively
// quickly in the presence of network errors, allowing us to choose a different sync source.
status = _scheduleLastOplogEntryFetcher_inlock(
- [=](const StatusWith<mongo::Fetcher::QueryResponse>& response,
- mongo::Fetcher::NextAction*,
- mongo::BSONObjBuilder*) mutable {
+ [=, this](const StatusWith<mongo::Fetcher::QueryResponse>& response,
+ mongo::Fetcher::NextAction*,
+ mongo::BSONObjBuilder*) mutable {
_lastOplogEntryFetcherCallbackForBeginApplyingTimestamp(
response, onCompletionGuard, beginFetchingOpTime);
},
@@ -1075,9 +1075,9 @@ void InitialSyncer::_lastOplogEntryFetcherCallbackForBeginApplyingTimestamp(
_syncSource,
NamespaceString::kServerConfigurationNamespace.db().toString(),
queryBob.obj(),
- [=](const StatusWith<mongo::Fetcher::QueryResponse>& response,
- mongo::Fetcher::NextAction*,
- mongo::BSONObjBuilder*) mutable {
+ [=, this](const StatusWith<mongo::Fetcher::QueryResponse>& response,
+ mongo::Fetcher::NextAction*,
+ mongo::BSONObjBuilder*) mutable {
_fcvFetcherCallback(response, onCompletionGuard, lastOpTime, beginFetchingOpTime);
},
ReadPreferenceSetting::secondaryPreferredMetadata(),
@@ -1227,12 +1227,12 @@ void InitialSyncer::_fcvFetcherCallback(const StatusWith<Fetcher::QueryResponse>
std::make_unique<OplogFetcherRestartDecisionInitialSyncer>(
_sharedData.get(), _opts.oplogFetcherMaxFetcherRestarts),
_dataReplicatorExternalState.get(),
- [=](OplogFetcher::Documents::const_iterator first,
- OplogFetcher::Documents::const_iterator last,
- const OplogFetcher::DocumentsInfo& info) {
+ [=, this](OplogFetcher::Documents::const_iterator first,
+ OplogFetcher::Documents::const_iterator last,
+ const OplogFetcher::DocumentsInfo& info) {
return _enqueueDocuments(first, last, info);
},
- [=](const Status& s, int rbid) { _oplogFetcherCallback(s, onCompletionGuard); },
+ [=, this](const Status& s, int rbid) { _oplogFetcherCallback(s, onCompletionGuard); },
std::move(oplogFetcherConfig));
LOGV2_DEBUG(21178,
@@ -1385,9 +1385,9 @@ void InitialSyncer::_allDatabaseClonerCallback(
// strategy used when retrieving collection data, and avoids retrieving all the data and then
// throwing it away due to a transient network outage.
status = _scheduleLastOplogEntryFetcher_inlock(
- [=](const StatusWith<mongo::Fetcher::QueryResponse>& status,
- mongo::Fetcher::NextAction*,
- mongo::BSONObjBuilder*) {
+ [=, this](const StatusWith<mongo::Fetcher::QueryResponse>& status,
+ mongo::Fetcher::NextAction*,
+ mongo::BSONObjBuilder*) {
_lastOplogEntryFetcherCallbackForStopTimestamp(status, onCompletionGuard);
},
kInitialSyncerHandlesRetries);
@@ -1409,31 +1409,31 @@ void InitialSyncer::_lastOplogEntryFetcherCallbackForStopTimestamp(
if (_shouldRetryError(lock, status)) {
auto scheduleStatus =
(*_attemptExec)
- ->scheduleWork(
- [this, onCompletionGuard](executor::TaskExecutor::CallbackArgs args) {
- // It is not valid to schedule the retry from within this callback,
- // hence we schedule a lambda to schedule the retry.
- stdx::lock_guard<Latch> lock(_mutex);
- // Since the stopTimestamp is retrieved after we have done all the
- // work of retrieving collection data, we handle retries within this
- // class by retrying for
- // 'initialSyncTransientErrorRetryPeriodSeconds' (default 24 hours).
- // This is the same retry strategy used when retrieving collection
- // data, and avoids retrieving all the data and then throwing it
- // away due to a transient network outage.
- auto status = _scheduleLastOplogEntryFetcher_inlock(
- [=](const StatusWith<mongo::Fetcher::QueryResponse>& status,
- mongo::Fetcher::NextAction*,
- mongo::BSONObjBuilder*) {
- _lastOplogEntryFetcherCallbackForStopTimestamp(
- status, onCompletionGuard);
- },
- kInitialSyncerHandlesRetries);
- if (!status.isOK()) {
- onCompletionGuard->setResultAndCancelRemainingWork_inlock(
- lock, status);
- }
- });
+ ->scheduleWork([this, onCompletionGuard](
+ executor::TaskExecutor::CallbackArgs args) {
+ // It is not valid to schedule the retry from within this callback,
+ // hence we schedule a lambda to schedule the retry.
+ stdx::lock_guard<Latch> lock(_mutex);
+ // Since the stopTimestamp is retrieved after we have done all the
+ // work of retrieving collection data, we handle retries within this
+ // class by retrying for
+ // 'initialSyncTransientErrorRetryPeriodSeconds' (default 24 hours).
+ // This is the same retry strategy used when retrieving collection
+ // data, and avoids retrieving all the data and then throwing it
+ // away due to a transient network outage.
+ auto status = _scheduleLastOplogEntryFetcher_inlock(
+ [=, this](const StatusWith<mongo::Fetcher::QueryResponse>& status,
+ mongo::Fetcher::NextAction*,
+ mongo::BSONObjBuilder*) {
+ _lastOplogEntryFetcherCallbackForStopTimestamp(
+ status, onCompletionGuard);
+ },
+ kInitialSyncerHandlesRetries);
+ if (!status.isOK()) {
+ onCompletionGuard->setResultAndCancelRemainingWork_inlock(lock,
+ status);
+ }
+ });
if (scheduleStatus.isOK())
return;
// If scheduling failed, we're shutting down and cannot retry.
@@ -1569,7 +1569,7 @@ void InitialSyncer::_getNextApplierBatchCallback(
Date_t lastAppliedWall = ops.back().getWallClockTime();
auto numApplied = ops.size();
- MultiApplier::CallbackFn onCompletionFn = [=](const Status& s) {
+ MultiApplier::CallbackFn onCompletionFn = [=, this](const Status& s) {
return _multiApplierCallback(
s, {lastApplied, lastAppliedWall}, numApplied, onCompletionGuard);
};
@@ -1611,7 +1611,9 @@ void InitialSyncer::_getNextApplierBatchCallback(
auto when = (*_attemptExec)->now() + _opts.getApplierBatchCallbackRetryWait;
status = _scheduleWorkAtAndSaveHandle_inlock(
when,
- [=](const CallbackArgs& args) { _getNextApplierBatchCallback(args, onCompletionGuard); },
+ [=, this](const CallbackArgs& args) {
+ _getNextApplierBatchCallback(args, onCompletionGuard);
+ },
&_getNextApplierBatchHandle,
"_getNextApplierBatchCallback");
if (!status.isOK()) {
@@ -1722,8 +1724,10 @@ void InitialSyncer::_finishInitialSyncAttempt(const StatusWith<OpTimeAndWallTime
// declare the scope guard before the lock guard.
auto result = lastApplied;
ScopeGuard finishCallbackGuard([this, &result] {
- auto scheduleResult = _exec->scheduleWork(
- [=](const mongo::executor::TaskExecutor::CallbackArgs&) { _finishCallback(result); });
+ auto scheduleResult =
+ _exec->scheduleWork([=, this](const mongo::executor::TaskExecutor::CallbackArgs&) {
+ _finishCallback(result);
+ });
if (!scheduleResult.isOK()) {
LOGV2_WARNING(21197,
"Unable to schedule initial syncer completion task due to "
@@ -1811,7 +1815,7 @@ void InitialSyncer::_finishInitialSyncAttempt(const StatusWith<OpTimeAndWallTime
auto when = (*_attemptExec)->now() + _opts.initialSyncRetryWait;
auto status = _scheduleWorkAtAndSaveHandle_inlock(
when,
- [=](const executor::TaskExecutor::CallbackArgs& args) {
+ [=, this](const executor::TaskExecutor::CallbackArgs& args) {
_startInitialSyncAttemptCallback(
args, _stats.failedInitialSyncAttempts, _stats.maxFailedInitialSyncAttempts);
},
@@ -1987,7 +1991,7 @@ void InitialSyncer::_checkApplierProgressAndScheduleGetNextApplierBatch_inlock(
// Get another batch to apply.
// _scheduleWorkAndSaveHandle_inlock() is shutdown-aware.
auto status = _scheduleWorkAndSaveHandle_inlock(
- [=](const executor::TaskExecutor::CallbackArgs& args) {
+ [=, this](const executor::TaskExecutor::CallbackArgs& args) {
return _getNextApplierBatchCallback(args, onCompletionGuard);
},
&_getNextApplierBatchHandle,
@@ -2013,7 +2017,7 @@ void InitialSyncer::_scheduleRollbackCheckerCheckForRollback_inlock(
}
auto scheduleResult =
- _rollbackChecker->checkForRollback([=](const RollbackChecker::Result& result) {
+ _rollbackChecker->checkForRollback([=, this](const RollbackChecker::Result& result) {
_rollbackCheckerCheckForRollbackCallback(result, onCompletionGuard);
});
diff --git a/src/mongo/db/repl/multiapplier.cpp b/src/mongo/db/repl/multiapplier.cpp
index e4c9a20079f..b967899e217 100644
--- a/src/mongo/db/repl/multiapplier.cpp
+++ b/src/mongo/db/repl/multiapplier.cpp
@@ -84,7 +84,7 @@ Status MultiApplier::startup() noexcept {
}
auto scheduleResult = _executor->scheduleWork(
- [=](const executor::TaskExecutor::CallbackArgs& cbd) { return _callback(cbd); });
+ [=, this](const executor::TaskExecutor::CallbackArgs& cbd) { return _callback(cbd); });
if (!scheduleResult.isOK()) {
_state = State::kComplete;
return scheduleResult.getStatus();
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index 695e8d55298..c402aca5bdd 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -552,7 +552,7 @@ bool ReplicationCoordinatorImpl::_startLoadLocalConfig(
// that the server's networking layer be up and running and accepting connections, which
// doesn't happen until startReplication finishes.
auto handle =
- _replExecutor->scheduleWork([=](const executor::TaskExecutor::CallbackArgs& args) {
+ _replExecutor->scheduleWork([=, this](const executor::TaskExecutor::CallbackArgs& args) {
_finishLoadLocalConfig(args, localConfig, lastOpTimeAndWallTimeResult, lastVote);
});
if (handle == ErrorCodes::ShutdownInProgress) {
@@ -816,15 +816,16 @@ void ReplicationCoordinatorImpl::_initialSyncerCompletionFunction(
"error"_attr = opTimeStatus.getStatus());
lock.unlock();
clearSyncSourceDenylist();
- _scheduleWorkAt(_replExecutor->now(),
- [=](const mongo::executor::TaskExecutor::CallbackArgs& cbData) {
- _startInitialSync(
- cc().makeOperationContext().get(),
- [this](const StatusWith<OpTimeAndWallTime>& opTimeStatus) {
- _initialSyncerCompletionFunction(opTimeStatus);
- },
- true /* fallbackToLogical */);
- });
+ _scheduleWorkAt(
+ _replExecutor->now(),
+ [=, this](const mongo::executor::TaskExecutor::CallbackArgs& cbData) {
+ _startInitialSync(
+ cc().makeOperationContext().get(),
+ [this](const StatusWith<OpTimeAndWallTime>& opTimeStatus) {
+ _initialSyncerCompletionFunction(opTimeStatus);
+ },
+ true /* fallbackToLogical */);
+ });
return;
} else {
LOGV2_ERROR(21416,
@@ -2930,7 +2931,7 @@ void ReplicationCoordinatorImpl::stepDown(OperationContext* opCtx,
updateMemberState();
// Schedule work to (potentially) step back up once the stepdown period has ended.
- _scheduleWorkAt(stepDownUntil, [=](const executor::TaskExecutor::CallbackArgs& cbData) {
+ _scheduleWorkAt(stepDownUntil, [=, this](const executor::TaskExecutor::CallbackArgs& cbData) {
_handleTimePassing(cbData);
});
@@ -3535,7 +3536,7 @@ Status ReplicationCoordinatorImpl::processReplSetSyncFrom(OperationContext* opCt
}
Status ReplicationCoordinatorImpl::processReplSetFreeze(int secs, BSONObjBuilder* resultObj) {
- auto result = [=]() {
+ auto result = [=, this]() {
stdx::lock_guard<Latch> lock(_mutex);
return _topCoord->prepareFreezeResponse(_replExecutor->now(), secs, resultObj);
}();
@@ -3767,7 +3768,7 @@ Status ReplicationCoordinatorImpl::_doReplSetReconfig(OperationContext* opCtx,
_setConfigState_inlock(kConfigReconfiguring);
auto configStateGuard =
- ScopeGuard([&] { lockAndCall(&lk, [=] { _setConfigState_inlock(kConfigSteady); }); });
+ ScopeGuard([&] { lockAndCall(&lk, [=, this] { _setConfigState_inlock(kConfigSteady); }); });
ReplSetConfig oldConfig = _rsConfig;
int myIndex = _selfIndex;
@@ -4316,7 +4317,7 @@ Status ReplicationCoordinatorImpl::processReplSetInitiate(OperationContext* opCt
_setConfigState_inlock(kConfigInitiating);
ScopeGuard configStateGuard = [&] {
- lockAndCall(&lk, [=] { _setConfigState_inlock(kConfigUninitialized); });
+ lockAndCall(&lk, [=, this] { _setConfigState_inlock(kConfigUninitialized); });
};
// When writing our first oplog entry below, disable advancement of the stable timestamp so that
@@ -4670,7 +4671,7 @@ ReplicationCoordinatorImpl::_updateMemberStateFromTopologyCoordinator(WithLock l
if (_memberState.removed() && !newState.arbiter()) {
LOGV2(5268000, "Scheduling a task to begin or continue replication");
_scheduleWorkAt(_replExecutor->now(),
- [=](const mongo::executor::TaskExecutor::CallbackArgs& cbData) {
+ [=, this](const mongo::executor::TaskExecutor::CallbackArgs& cbData) {
_externalState->startThreads();
auto opCtx = cc().makeOperationContext();
_startDataReplication(opCtx.get());
@@ -5347,7 +5348,7 @@ void ReplicationCoordinatorImpl::_undenylistSyncSource(
void ReplicationCoordinatorImpl::denylistSyncSource(const HostAndPort& host, Date_t until) {
stdx::lock_guard<Latch> lock(_mutex);
_topCoord->denylistSyncSource(host, until);
- _scheduleWorkAt(until, [=](const executor::TaskExecutor::CallbackArgs& cbData) {
+ _scheduleWorkAt(until, [=, this](const executor::TaskExecutor::CallbackArgs& cbData) {
_undenylistSyncSource(cbData, host);
});
}
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
index 7a4ce7c783a..48f96efcba9 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
@@ -221,7 +221,7 @@ void ReplicationCoordinatorImpl::ElectionState::start(WithLock lk, StartElection
fassert(28685, nextPhaseEvh.getStatus());
_replExecutor
->onEvent(nextPhaseEvh.getValue(),
- [=](const executor::TaskExecutor::CallbackArgs&) {
+ [=, this](const executor::TaskExecutor::CallbackArgs&) {
_processDryRunResult(term, reason);
})
.status_with_transitional_ignore();
@@ -402,7 +402,7 @@ void ReplicationCoordinatorImpl::ElectionState::_requestVotesForRealElection(
fassert(28643, nextPhaseEvh.getStatus());
_replExecutor
->onEvent(nextPhaseEvh.getValue(),
- [=](const executor::TaskExecutor::CallbackArgs&) {
+ [=, this](const executor::TaskExecutor::CallbackArgs&) {
if (MONGO_unlikely(hangBeforeOnVoteRequestCompleteCallback.shouldFail())) {
LOGV2(7277400,
"Hang due to hangBeforeOnVoteRequestCompleteCallback failpoint");
diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
index 4330baaf036..ce1c96b6c8b 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
@@ -123,7 +123,7 @@ void ReplicationCoordinatorImpl::_doMemberHeartbeat(executor::TaskExecutor::Call
const RemoteCommandRequest request(
target, "admin", heartbeatObj, BSON(rpc::kReplSetMetadataFieldName << 1), nullptr, timeout);
const executor::TaskExecutor::RemoteCommandCallbackFn callback =
- [=](const executor::TaskExecutor::RemoteCommandCallbackArgs& cbData) {
+ [=, this](const executor::TaskExecutor::RemoteCommandCallbackArgs& cbData) {
return _handleHeartbeatResponse(cbData, replSetName);
};
@@ -149,7 +149,7 @@ void ReplicationCoordinatorImpl::_scheduleHeartbeatToTarget_inlock(const HostAnd
"when"_attr = when);
_trackHeartbeatHandle_inlock(
_replExecutor->scheduleWorkAt(when,
- [=, replSetName = std::move(replSetName)](
+ [=, this, replSetName = std::move(replSetName)](
const executor::TaskExecutor::CallbackArgs& cbData) {
_doMemberHeartbeat(cbData, target, replSetName);
}),
@@ -354,7 +354,7 @@ void ReplicationCoordinatorImpl::_handleHeartbeatResponse(
if (mem && mem->isNewlyAdded()) {
const auto memId = mem->getId();
auto status = _replExecutor->scheduleWork(
- [=](const executor::TaskExecutor::CallbackArgs& cbData) {
+ [=, this](const executor::TaskExecutor::CallbackArgs& cbData) {
_reconfigToRemoveNewlyAddedField(
cbData, memId, _rsConfig.getConfigVersionAndTerm());
});
@@ -445,7 +445,8 @@ stdx::unique_lock<Latch> ReplicationCoordinatorImpl::_handleHeartbeatResponseAct
"Scheduling priority takeover",
"when"_attr = _priorityTakeoverWhen);
_priorityTakeoverCbh = _scheduleWorkAt(
- _priorityTakeoverWhen, [=](const mongo::executor::TaskExecutor::CallbackArgs&) {
+ _priorityTakeoverWhen,
+ [=, this](const mongo::executor::TaskExecutor::CallbackArgs&) {
_startElectSelfIfEligibleV1(StartElectionReasonEnum::kPriorityTakeover);
});
}
@@ -462,7 +463,8 @@ stdx::unique_lock<Latch> ReplicationCoordinatorImpl::_handleHeartbeatResponseAct
"Scheduling catchup takeover",
"when"_attr = _catchupTakeoverWhen);
_catchupTakeoverCbh = _scheduleWorkAt(
- _catchupTakeoverWhen, [=](const mongo::executor::TaskExecutor::CallbackArgs&) {
+ _catchupTakeoverWhen,
+ [=, this](const mongo::executor::TaskExecutor::CallbackArgs&) {
_startElectSelfIfEligibleV1(StartElectionReasonEnum::kCatchupTakeover);
});
}
@@ -512,7 +514,7 @@ executor::TaskExecutor::EventHandle ReplicationCoordinatorImpl::_stepDownStart()
}
_replExecutor
- ->scheduleWork([=](const executor::TaskExecutor::CallbackArgs& cbData) {
+ ->scheduleWork([=, this](const executor::TaskExecutor::CallbackArgs& cbData) {
_stepDownFinish(cbData, finishEvent);
})
.status_with_transitional_ignore();
@@ -658,7 +660,7 @@ void ReplicationCoordinatorImpl::_scheduleHeartbeatReconfig(WithLock lk,
_rsConfig.getConfigVersionAndTerm() < newConfig.getConfigVersionAndTerm() ||
_selfIndex < 0);
_replExecutor
- ->scheduleWork([=](const executor::TaskExecutor::CallbackArgs& cbData) {
+ ->scheduleWork([=, this](const executor::TaskExecutor::CallbackArgs& cbData) {
const auto [swConfig, isSplitRecipientConfig] = _resolveConfigToApply(newConfig);
if (!swConfig.isOK()) {
LOGV2_WARNING(
@@ -679,24 +681,24 @@ void ReplicationCoordinatorImpl::_scheduleHeartbeatReconfig(WithLock lk,
}
LOGV2(8423366, "Waiting for oplog buffer to drain before applying recipient config.");
- _drainForShardSplit().getAsync(
- [this,
- resolvedConfig = swConfig.getValue(),
- replExecutor = _replExecutor.get(),
- isSplitRecipientConfig = isSplitRecipientConfig](Status status) {
- if (!status.isOK()) {
- stdx::lock_guard<Latch> lg(_mutex);
- _setConfigState_inlock(!_rsConfig.isInitialized() ? kConfigUninitialized
- : kConfigSteady);
- return;
- }
-
- replExecutor
- ->scheduleWork([=](const executor::TaskExecutor::CallbackArgs& cbData) {
- _heartbeatReconfigStore(cbData, resolvedConfig, isSplitRecipientConfig);
- })
- .status_with_transitional_ignore();
- });
+ _drainForShardSplit().getAsync([this,
+ resolvedConfig = swConfig.getValue(),
+ replExecutor = _replExecutor.get(),
+ isSplitRecipientConfig =
+ isSplitRecipientConfig](Status status) {
+ if (!status.isOK()) {
+ stdx::lock_guard<Latch> lg(_mutex);
+ _setConfigState_inlock(!_rsConfig.isInitialized() ? kConfigUninitialized
+ : kConfigSteady);
+ return;
+ }
+
+ replExecutor
+ ->scheduleWork([=, this](const executor::TaskExecutor::CallbackArgs& cbData) {
+ _heartbeatReconfigStore(cbData, resolvedConfig, isSplitRecipientConfig);
+ })
+ .status_with_transitional_ignore();
+ });
})
.status_with_transitional_ignore();
}
@@ -938,7 +940,7 @@ void ReplicationCoordinatorImpl::_heartbeatReconfigFinish(
"_heartbeatReconfigFinish until fail point is disabled");
_replExecutor
->scheduleWorkAt(_replExecutor->now() + Milliseconds{10},
- [=](const executor::TaskExecutor::CallbackArgs& cbData) {
+ [=, this](const executor::TaskExecutor::CallbackArgs& cbData) {
_heartbeatReconfigFinish(
cbData, newConfig, myIndex, isSplitRecipientConfig);
})
@@ -963,7 +965,7 @@ void ReplicationCoordinatorImpl::_heartbeatReconfigFinish(
// Wait for the election to complete and the node's Role to be set to follower.
_replExecutor
->onEvent(electionFinishedEvent,
- [=](const executor::TaskExecutor::CallbackArgs& cbData) {
+ [=, this](const executor::TaskExecutor::CallbackArgs& cbData) {
_heartbeatReconfigFinish(
cbData, newConfig, myIndex, isSplitRecipientConfig);
})
diff --git a/src/mongo/db/repl/replication_coordinator_impl_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
index 7754fcddb6d..ae89606f267 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
@@ -1561,7 +1561,7 @@ protected:
bool force, Milliseconds waitTime, Milliseconds stepDownTime) {
using PromisedClientAndOperation = stdx::promise<SharedClientAndOperation>;
auto task = stdx::packaged_task<boost::optional<Status>(PromisedClientAndOperation)>(
- [=](PromisedClientAndOperation operationPromise) -> boost::optional<Status> {
+ [=, this](PromisedClientAndOperation operationPromise) -> boost::optional<Status> {
auto result = SharedClientAndOperation::make(getServiceContext());
operationPromise.set_value(result);
try {
diff --git a/src/mongo/db/repl/reporter.cpp b/src/mongo/db/repl/reporter.cpp
index c135c179877..5ef1405d4df 100644
--- a/src/mongo/db/repl/reporter.cpp
+++ b/src/mongo/db/repl/reporter.cpp
@@ -142,7 +142,7 @@ Status Reporter::trigger() {
}
auto scheduleResult =
- _executor->scheduleWork([=](const executor::TaskExecutor::CallbackArgs& args) {
+ _executor->scheduleWork([=, this](const executor::TaskExecutor::CallbackArgs& args) {
_prepareAndSendCommandCallback(args, true);
});
@@ -252,7 +252,7 @@ void Reporter::_processResponseCallback(
auto when = _executor->now() + _keepAliveInterval;
bool fromTrigger = false;
auto scheduleResult = _executor->scheduleWorkAt(
- when, [=](const executor::TaskExecutor::CallbackArgs& args) {
+ when, [=, this](const executor::TaskExecutor::CallbackArgs& args) {
_prepareAndSendCommandCallback(args, fromTrigger);
});
_status = scheduleResult.getStatus();
diff --git a/src/mongo/db/repl/sync_source_resolver.cpp b/src/mongo/db/repl/sync_source_resolver.cpp
index 8460ce006d4..29cfb6f1b13 100644
--- a/src/mongo/db/repl/sync_source_resolver.cpp
+++ b/src/mongo/db/repl/sync_source_resolver.cpp
@@ -174,9 +174,9 @@ std::unique_ptr<Fetcher> SyncSourceResolver::_makeFirstOplogEntryFetcher(
<< BSON(OplogEntryBase::kTimestampFieldName
<< 1 << OplogEntryBase::kTermFieldName << 1)
<< ReadConcernArgs::kReadConcernFieldName << ReadConcernArgs::kLocal),
- [=](const StatusWith<Fetcher::QueryResponse>& response,
- Fetcher::NextAction*,
- BSONObjBuilder*) {
+ [=, this](const StatusWith<Fetcher::QueryResponse>& response,
+ Fetcher::NextAction*,
+ BSONObjBuilder*) {
return _firstOplogEntryFetcherCallback(response, candidate, earliestOpTimeSeen);
},
ReadPreferenceSetting::secondaryPreferredMetadata(),
@@ -198,9 +198,9 @@ std::unique_ptr<Fetcher> SyncSourceResolver::_makeRequiredOpTimeFetcher(HostAndP
<< BSON("ts" << BSON("$gte" << _requiredOpTime.getTimestamp() << "$lte"
<< _requiredOpTime.getTimestamp()))
<< ReadConcernArgs::kReadConcernFieldName << ReadConcernArgs::kLocal),
- [=](const StatusWith<Fetcher::QueryResponse>& response,
- Fetcher::NextAction*,
- BSONObjBuilder*) {
+ [=, this](const StatusWith<Fetcher::QueryResponse>& response,
+ Fetcher::NextAction*,
+ BSONObjBuilder*) {
return _requiredOpTimeFetcherCallback(response, candidate, earliestOpTimeSeen, rbid);
},
ReadPreferenceSetting::secondaryPreferredMetadata(),
@@ -401,7 +401,7 @@ Status SyncSourceResolver::_scheduleRBIDRequest(HostAndPort candidate, OpTime ea
invariant(_state == State::kRunning);
auto handle = _taskExecutor->scheduleRemoteCommand(
{candidate, "admin", BSON("replSetGetRBID" << 1), nullptr, kFetcherTimeout},
- [=](const executor::TaskExecutor::RemoteCommandCallbackArgs& rbidReply) {
+ [=, this](const executor::TaskExecutor::RemoteCommandCallbackArgs& rbidReply) {
_rbidRequestCallback(candidate, earliestOpTimeSeen, rbidReply);
});
if (!handle.isOK()) {
diff --git a/src/mongo/db/repl/tenant_file_cloner.cpp b/src/mongo/db/repl/tenant_file_cloner.cpp
index 0da47917aab..3b0ec54cbeb 100644
--- a/src/mongo/db/repl/tenant_file_cloner.cpp
+++ b/src/mongo/db/repl/tenant_file_cloner.cpp
@@ -231,9 +231,10 @@ void TenantFileCloner::handleNextBatch(DBClientCursor& cursor) {
}
// Schedule the next set of writes.
- auto&& scheduleResult = _scheduleFsWorkFn([=](const executor::TaskExecutor::CallbackArgs& cbd) {
- writeDataToFilesystemCallback(cbd);
- });
+ auto&& scheduleResult =
+ _scheduleFsWorkFn([=, this](const executor::TaskExecutor::CallbackArgs& cbd) {
+ writeDataToFilesystemCallback(cbd);
+ });
if (!scheduleResult.isOK()) {
Status newStatus = scheduleResult.getStatus().withContext(
diff --git a/src/mongo/db/repl/tenant_oplog_applier.cpp b/src/mongo/db/repl/tenant_oplog_applier.cpp
index 41b9c1a1738..e7cfd088d24 100644
--- a/src/mongo/db/repl/tenant_oplog_applier.cpp
+++ b/src/mongo/db/repl/tenant_oplog_applier.cpp
@@ -502,7 +502,7 @@ TenantOplogApplier::OpTimePair TenantOplogApplier::_writeNoOpEntries(
if (thread == numOplogThreads - 1) {
numOps = numOpsRemaining;
}
- _writerPool->schedule([=, &status = statusVector.at(thread)](auto scheduleStatus) {
+ _writerPool->schedule([=, this, &status = statusVector.at(thread)](auto scheduleStatus) {
if (!scheduleStatus.isOK()) {
status = scheduleStatus;
} else {
@@ -521,18 +521,19 @@ TenantOplogApplier::OpTimePair TenantOplogApplier::_writeNoOpEntries(
// Dispatch noop writes for oplog entries from the same session into the same writer thread.
size_t sessionThreadNum = 0;
for (const auto& s : sessionOps) {
- _writerPool->schedule([=, &status = statusVector.at(numOplogThreads + sessionThreadNum)](
- auto scheduleStatus) {
- if (!scheduleStatus.isOK()) {
- status = scheduleStatus;
- } else {
- try {
- _writeSessionNoOpsForRange(s.second.begin(), s.second.end());
- } catch (const DBException& e) {
- status = e.toStatus();
+ _writerPool->schedule(
+ [=, this, &status = statusVector.at(numOplogThreads + sessionThreadNum)](
+ auto scheduleStatus) {
+ if (!scheduleStatus.isOK()) {
+ status = scheduleStatus;
+ } else {
+ try {
+ _writeSessionNoOpsForRange(s.second.begin(), s.second.end());
+ } catch (const DBException& e) {
+ status = e.toStatus();
+ }
}
- }
- });
+ });
sessionThreadNum++;
}