summaryrefslogtreecommitdiff
path: root/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp')
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp56
1 files changed, 29 insertions, 27 deletions
diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
index 4330baaf036..ce1c96b6c8b 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
@@ -123,7 +123,7 @@ void ReplicationCoordinatorImpl::_doMemberHeartbeat(executor::TaskExecutor::Call
const RemoteCommandRequest request(
target, "admin", heartbeatObj, BSON(rpc::kReplSetMetadataFieldName << 1), nullptr, timeout);
const executor::TaskExecutor::RemoteCommandCallbackFn callback =
- [=](const executor::TaskExecutor::RemoteCommandCallbackArgs& cbData) {
+ [=, this](const executor::TaskExecutor::RemoteCommandCallbackArgs& cbData) {
return _handleHeartbeatResponse(cbData, replSetName);
};
@@ -149,7 +149,7 @@ void ReplicationCoordinatorImpl::_scheduleHeartbeatToTarget_inlock(const HostAnd
"when"_attr = when);
_trackHeartbeatHandle_inlock(
_replExecutor->scheduleWorkAt(when,
- [=, replSetName = std::move(replSetName)](
+ [=, this, replSetName = std::move(replSetName)](
const executor::TaskExecutor::CallbackArgs& cbData) {
_doMemberHeartbeat(cbData, target, replSetName);
}),
@@ -354,7 +354,7 @@ void ReplicationCoordinatorImpl::_handleHeartbeatResponse(
if (mem && mem->isNewlyAdded()) {
const auto memId = mem->getId();
auto status = _replExecutor->scheduleWork(
- [=](const executor::TaskExecutor::CallbackArgs& cbData) {
+ [=, this](const executor::TaskExecutor::CallbackArgs& cbData) {
_reconfigToRemoveNewlyAddedField(
cbData, memId, _rsConfig.getConfigVersionAndTerm());
});
@@ -445,7 +445,8 @@ stdx::unique_lock<Latch> ReplicationCoordinatorImpl::_handleHeartbeatResponseAct
"Scheduling priority takeover",
"when"_attr = _priorityTakeoverWhen);
_priorityTakeoverCbh = _scheduleWorkAt(
- _priorityTakeoverWhen, [=](const mongo::executor::TaskExecutor::CallbackArgs&) {
+ _priorityTakeoverWhen,
+ [=, this](const mongo::executor::TaskExecutor::CallbackArgs&) {
_startElectSelfIfEligibleV1(StartElectionReasonEnum::kPriorityTakeover);
});
}
@@ -462,7 +463,8 @@ stdx::unique_lock<Latch> ReplicationCoordinatorImpl::_handleHeartbeatResponseAct
"Scheduling catchup takeover",
"when"_attr = _catchupTakeoverWhen);
_catchupTakeoverCbh = _scheduleWorkAt(
- _catchupTakeoverWhen, [=](const mongo::executor::TaskExecutor::CallbackArgs&) {
+ _catchupTakeoverWhen,
+ [=, this](const mongo::executor::TaskExecutor::CallbackArgs&) {
_startElectSelfIfEligibleV1(StartElectionReasonEnum::kCatchupTakeover);
});
}
@@ -512,7 +514,7 @@ executor::TaskExecutor::EventHandle ReplicationCoordinatorImpl::_stepDownStart()
}
_replExecutor
- ->scheduleWork([=](const executor::TaskExecutor::CallbackArgs& cbData) {
+ ->scheduleWork([=, this](const executor::TaskExecutor::CallbackArgs& cbData) {
_stepDownFinish(cbData, finishEvent);
})
.status_with_transitional_ignore();
@@ -658,7 +660,7 @@ void ReplicationCoordinatorImpl::_scheduleHeartbeatReconfig(WithLock lk,
_rsConfig.getConfigVersionAndTerm() < newConfig.getConfigVersionAndTerm() ||
_selfIndex < 0);
_replExecutor
- ->scheduleWork([=](const executor::TaskExecutor::CallbackArgs& cbData) {
+ ->scheduleWork([=, this](const executor::TaskExecutor::CallbackArgs& cbData) {
const auto [swConfig, isSplitRecipientConfig] = _resolveConfigToApply(newConfig);
if (!swConfig.isOK()) {
LOGV2_WARNING(
@@ -679,24 +681,24 @@ void ReplicationCoordinatorImpl::_scheduleHeartbeatReconfig(WithLock lk,
}
LOGV2(8423366, "Waiting for oplog buffer to drain before applying recipient config.");
- _drainForShardSplit().getAsync(
- [this,
- resolvedConfig = swConfig.getValue(),
- replExecutor = _replExecutor.get(),
- isSplitRecipientConfig = isSplitRecipientConfig](Status status) {
- if (!status.isOK()) {
- stdx::lock_guard<Latch> lg(_mutex);
- _setConfigState_inlock(!_rsConfig.isInitialized() ? kConfigUninitialized
- : kConfigSteady);
- return;
- }
-
- replExecutor
- ->scheduleWork([=](const executor::TaskExecutor::CallbackArgs& cbData) {
- _heartbeatReconfigStore(cbData, resolvedConfig, isSplitRecipientConfig);
- })
- .status_with_transitional_ignore();
- });
+ _drainForShardSplit().getAsync([this,
+ resolvedConfig = swConfig.getValue(),
+ replExecutor = _replExecutor.get(),
+ isSplitRecipientConfig =
+ isSplitRecipientConfig](Status status) {
+ if (!status.isOK()) {
+ stdx::lock_guard<Latch> lg(_mutex);
+ _setConfigState_inlock(!_rsConfig.isInitialized() ? kConfigUninitialized
+ : kConfigSteady);
+ return;
+ }
+
+ replExecutor
+ ->scheduleWork([=, this](const executor::TaskExecutor::CallbackArgs& cbData) {
+ _heartbeatReconfigStore(cbData, resolvedConfig, isSplitRecipientConfig);
+ })
+ .status_with_transitional_ignore();
+ });
})
.status_with_transitional_ignore();
}
@@ -938,7 +940,7 @@ void ReplicationCoordinatorImpl::_heartbeatReconfigFinish(
"_heartbeatReconfigFinish until fail point is disabled");
_replExecutor
->scheduleWorkAt(_replExecutor->now() + Milliseconds{10},
- [=](const executor::TaskExecutor::CallbackArgs& cbData) {
+ [=, this](const executor::TaskExecutor::CallbackArgs& cbData) {
_heartbeatReconfigFinish(
cbData, newConfig, myIndex, isSplitRecipientConfig);
})
@@ -963,7 +965,7 @@ void ReplicationCoordinatorImpl::_heartbeatReconfigFinish(
// Wait for the election to complete and the node's Role to be set to follower.
_replExecutor
->onEvent(electionFinishedEvent,
- [=](const executor::TaskExecutor::CallbackArgs& cbData) {
+ [=, this](const executor::TaskExecutor::CallbackArgs& cbData) {
_heartbeatReconfigFinish(
cbData, newConfig, myIndex, isSplitRecipientConfig);
})