summaryrefslogtreecommitdiff
path: root/src/mongo/db/repl
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/db/repl')
-rw-r--r--src/mongo/db/repl/collection_bulk_loader_impl.cpp2
-rw-r--r--src/mongo/db/repl/initial_syncer.cpp2
-rw-r--r--src/mongo/db/repl/oplog.cpp2
-rw-r--r--src/mongo/db/repl/repl_set_commands.cpp2
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp11
-rw-r--r--src/mongo/db/repl/scatter_gather_runner.cpp2
-rw-r--r--src/mongo/db/repl/tenant_migration_recipient_service_test.cpp2
-rw-r--r--src/mongo/db/repl/topology_version_observer.cpp2
-rw-r--r--src/mongo/db/repl/transaction_oplog_application.cpp2
-rw-r--r--src/mongo/db/repl/vote_requester.cpp2
10 files changed, 15 insertions, 14 deletions
diff --git a/src/mongo/db/repl/collection_bulk_loader_impl.cpp b/src/mongo/db/repl/collection_bulk_loader_impl.cpp
index 5224bc5c7bb..c06603c61ab 100644
--- a/src/mongo/db/repl/collection_bulk_loader_impl.cpp
+++ b/src/mongo/db/repl/collection_bulk_loader_impl.cpp
@@ -362,7 +362,7 @@ void CollectionBulkLoaderImpl::_releaseResources() {
template <typename F>
Status CollectionBulkLoaderImpl::_runTaskReleaseResourcesOnFailure(const F& task) noexcept {
AlternativeClientRegion acr(_client);
- auto guard = makeGuard([this] { _releaseResources(); });
+ ScopeGuard guard([this] { _releaseResources(); });
try {
const auto status = task();
if (status.isOK()) {
diff --git a/src/mongo/db/repl/initial_syncer.cpp b/src/mongo/db/repl/initial_syncer.cpp
index 5820d2e2ab5..a404f7b6ccf 100644
--- a/src/mongo/db/repl/initial_syncer.cpp
+++ b/src/mongo/db/repl/initial_syncer.cpp
@@ -1700,7 +1700,7 @@ void InitialSyncer::_finishInitialSyncAttempt(const StatusWith<OpTimeAndWallTime
// if the task scheduling fails and we have to invoke _finishCallback() synchronously), we
// declare the scope guard before the lock guard.
auto result = lastApplied;
- auto finishCallbackGuard = makeGuard([this, &result] {
+ ScopeGuard finishCallbackGuard([this, &result] {
auto scheduleResult = _exec->scheduleWork(
[=](const mongo::executor::TaskExecutor::CallbackArgs&) { _finishCallback(result); });
if (!scheduleResult.isOK()) {
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 1c48b3efd87..d4d35779ded 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -428,7 +428,7 @@ OpTime logOp(OperationContext* opCtx, MutableOplogEntry* oplogEntry) {
// again. For example, if the WUOW gets aborted within a writeConflictRetry loop, we need to
// reset the OpTime to null so a new OpTime will be assigned on retry.
OplogSlot slot = oplogEntry->getOpTime();
- auto resetOpTimeGuard = makeGuard([&, resetOpTimeOnExit = bool(slot.isNull())] {
+ ScopeGuard resetOpTimeGuard([&, resetOpTimeOnExit = bool(slot.isNull())] {
if (resetOpTimeOnExit)
oplogEntry->setOpTime(OplogSlot());
});
diff --git a/src/mongo/db/repl/repl_set_commands.cpp b/src/mongo/db/repl/repl_set_commands.cpp
index 135bdd13614..de1b9a458db 100644
--- a/src/mongo/db/repl/repl_set_commands.cpp
+++ b/src/mongo/db/repl/repl_set_commands.cpp
@@ -514,7 +514,7 @@ public:
_stepDownCmdsWithForceExecuted.increment();
}
- auto onExitGuard = makeGuard([&] {
+ ScopeGuard onExitGuard([&] {
if (force) {
_stepDownCmdsWithForceFailed.increment();
}
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index 44b1c72a7c9..5e5d1834ea0 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -2583,7 +2583,7 @@ void ReplicationCoordinatorImpl::stepDown(OperationContext* opCtx,
_waitingForRSTLAtStepDown++;
_fulfillTopologyChangePromise(lk);
}
- auto clearStepDownFlag = makeGuard([&] {
+ ScopeGuard clearStepDownFlag([&] {
stdx::lock_guard lk(_mutex);
_waitingForRSTLAtStepDown--;
_fulfillTopologyChangePromise(lk);
@@ -2651,7 +2651,7 @@ void ReplicationCoordinatorImpl::stepDown(OperationContext* opCtx,
_performPostMemberStateUpdateAction(action);
};
- auto onExitGuard = makeGuard([&] {
+ ScopeGuard onExitGuard([&] {
abortFn();
updateMemberState();
});
@@ -3492,7 +3492,7 @@ Status ReplicationCoordinatorImpl::_doReplSetReconfig(OperationContext* opCtx,
_setConfigState_inlock(kConfigReconfiguring);
auto configStateGuard =
- makeGuard([&] { lockAndCall(&lk, [=] { _setConfigState_inlock(kConfigSteady); }); });
+ ScopeGuard([&] { lockAndCall(&lk, [=] { _setConfigState_inlock(kConfigSteady); }); });
ReplSetConfig oldConfig = _rsConfig;
int myIndex = _selfIndex;
@@ -3933,8 +3933,9 @@ Status ReplicationCoordinatorImpl::processReplSetInitiate(OperationContext* opCt
invariant(!_rsConfig.isInitialized());
_setConfigState_inlock(kConfigInitiating);
- auto configStateGuard =
- makeGuard([&] { lockAndCall(&lk, [=] { _setConfigState_inlock(kConfigUninitialized); }); });
+ ScopeGuard configStateGuard = [&] {
+ lockAndCall(&lk, [=] { _setConfigState_inlock(kConfigUninitialized); });
+ };
// When writing our first oplog entry below, disable advancement of the stable timestamp so that
// we don't set it before setting our initial data timestamp. We will set it after we set our
diff --git a/src/mongo/db/repl/scatter_gather_runner.cpp b/src/mongo/db/repl/scatter_gather_runner.cpp
index c45401d53f0..f688c5cf0cd 100644
--- a/src/mongo/db/repl/scatter_gather_runner.cpp
+++ b/src/mongo/db/repl/scatter_gather_runner.cpp
@@ -101,7 +101,7 @@ StatusWith<EventHandle> ScatterGatherRunner::RunnerImpl::start(
return evh;
}
_sufficientResponsesReceived = evh.getValue();
- auto earlyReturnGuard = makeGuard([this] { _signalSufficientResponsesReceived(); });
+ ScopeGuard earlyReturnGuard([this] { _signalSufficientResponsesReceived(); });
std::vector<RemoteCommandRequest> requests = _algorithm->getRequests();
for (size_t i = 0; i < requests.size(); ++i) {
diff --git a/src/mongo/db/repl/tenant_migration_recipient_service_test.cpp b/src/mongo/db/repl/tenant_migration_recipient_service_test.cpp
index a303a3cc442..b30dee191a0 100644
--- a/src/mongo/db/repl/tenant_migration_recipient_service_test.cpp
+++ b/src/mongo/db/repl/tenant_migration_recipient_service_test.cpp
@@ -1560,7 +1560,7 @@ TEST_F(TenantMigrationRecipientServiceTest, TenantMigrationRecipientStartsCloner
stopFailPointEnableBlock fp("fpAfterCollectionClonerDone");
auto taskFp = globalFailPointRegistry().find("hangBeforeTaskCompletion");
- auto taskFpGuard = makeGuard([&taskFp] { taskFp->setMode(FailPoint::off); });
+ ScopeGuard taskFpGuard([&taskFp] { taskFp->setMode(FailPoint::off); });
auto initialTimesEntered = taskFp->setMode(FailPoint::alwaysOn);
diff --git a/src/mongo/db/repl/topology_version_observer.cpp b/src/mongo/db/repl/topology_version_observer.cpp
index 877466bf7f6..b0882e76b8d 100644
--- a/src/mongo/db/repl/topology_version_observer.cpp
+++ b/src/mongo/db/repl/topology_version_observer.cpp
@@ -129,7 +129,7 @@ void TopologyVersionObserver::_cacheHelloResponse(
LOGV2_DEBUG(4794600, 3, "Waiting for a topology change");
{
- auto cacheGuard = makeGuard([&] {
+ ScopeGuard cacheGuard([&] {
// If we're not dismissed, reset the _cache.
stdx::lock_guard lk(_mutex);
_cache.reset();
diff --git a/src/mongo/db/repl/transaction_oplog_application.cpp b/src/mongo/db/repl/transaction_oplog_application.cpp
index 103a5c4e149..844971df246 100644
--- a/src/mongo/db/repl/transaction_oplog_application.cpp
+++ b/src/mongo/db/repl/transaction_oplog_application.cpp
@@ -433,7 +433,7 @@ Status _applyPrepareTransaction(OperationContext* opCtx,
// Release the WUOW, transaction lock resources and abort storage transaction so that the
// writeConflictRetry loop will be able to retry applying transactional ops on WCE error.
- auto abortOnError = makeGuard([&txnParticipant, opCtx] {
+ ScopeGuard abortOnError([&txnParticipant, opCtx] {
// Abort the transaction and invalidate the session it is associated with.
txnParticipant.abortTransaction(opCtx);
txnParticipant.invalidate(opCtx);
diff --git a/src/mongo/db/repl/vote_requester.cpp b/src/mongo/db/repl/vote_requester.cpp
index 6a29e4785c8..f57765dc73c 100644
--- a/src/mongo/db/repl/vote_requester.cpp
+++ b/src/mongo/db/repl/vote_requester.cpp
@@ -117,7 +117,7 @@ void VoteRequester::Algorithm::processResponse(const RemoteCommandRequest& reque
// All local variables captured in logAttrs needs to be above the guard that logs.
logv2::DynamicAttributes logAttrs;
auto logAtExit =
- makeGuard([&logAttrs]() { LOGV2(51799, "VoteRequester processResponse", logAttrs); });
+ ScopeGuard([&logAttrs]() { LOGV2(51799, "VoteRequester processResponse", logAttrs); });
logAttrs.add("term", _term);
logAttrs.add("dryRun", _dryRun);