summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
authorBilly Donahue <billy.donahue@mongodb.com>2021-09-03 19:11:40 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-09-08 07:54:48 +0000
commitc90f38192378c8526354465991830524eda90c1f (patch)
tree73ce6a76f4dc6f8eaa206309becf5881aaf20a32 /src/mongo/db
parenta0a0d7eecccf38f028a77c4cc8b0f13f8673f675 (diff)
downloadmongo-c90f38192378c8526354465991830524eda90c1f.tar.gz
SERVER-59782 migrate makeGuard calls to ScopeGuard
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/catalog/catalog_control.cpp2
-rw-r--r--src/mongo/db/catalog/database_holder_impl.cpp2
-rw-r--r--src/mongo/db/catalog/drop_database.cpp8
-rw-r--r--src/mongo/db/catalog/multi_index_block.cpp4
-rw-r--r--src/mongo/db/catalog/rename_collection.cpp2
-rw-r--r--src/mongo/db/client_out_of_line_executor.cpp2
-rw-r--r--src/mongo/db/commands/drop_indexes.cpp2
-rw-r--r--src/mongo/db/commands/getmore_cmd.cpp2
-rw-r--r--src/mongo/db/commands/run_aggregate.cpp2
-rw-r--r--src/mongo/db/commands/user_management_commands.cpp6
-rw-r--r--src/mongo/db/commands/write_commands.cpp2
-rw-r--r--src/mongo/db/concurrency/d_concurrency.cpp4
-rw-r--r--src/mongo/db/concurrency/d_concurrency_test.cpp2
-rw-r--r--src/mongo/db/concurrency/lock_state.cpp6
-rw-r--r--src/mongo/db/default_baton.cpp2
-rw-r--r--src/mongo/db/exec/delete.cpp2
-rw-r--r--src/mongo/db/exec/js_function.cpp4
-rw-r--r--src/mongo/db/exec/update_stage.cpp2
-rw-r--r--src/mongo/db/field_parser.h2
-rw-r--r--src/mongo/db/index/index_build_interceptor.cpp2
-rw-r--r--src/mongo/db/index_builds_coordinator.cpp6
-rw-r--r--src/mongo/db/index_builds_coordinator_mongod.cpp4
-rw-r--r--src/mongo/db/logical_session_cache_impl.cpp6
-rw-r--r--src/mongo/db/repl/collection_bulk_loader_impl.cpp2
-rw-r--r--src/mongo/db/repl/initial_syncer.cpp2
-rw-r--r--src/mongo/db/repl/oplog.cpp2
-rw-r--r--src/mongo/db/repl/repl_set_commands.cpp2
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp11
-rw-r--r--src/mongo/db/repl/scatter_gather_runner.cpp2
-rw-r--r--src/mongo/db/repl/tenant_migration_recipient_service_test.cpp2
-rw-r--r--src/mongo/db/repl/topology_version_observer.cpp2
-rw-r--r--src/mongo/db/repl/transaction_oplog_application.cpp2
-rw-r--r--src/mongo/db/repl/vote_requester.cpp2
-rw-r--r--src/mongo/db/s/balancer/migration_manager.cpp4
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager.cpp2
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp2
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp2
-rw-r--r--src/mongo/db/s/dist_lock_manager.cpp2
-rw-r--r--src/mongo/db/s/dist_lock_manager_replset.cpp2
-rw-r--r--src/mongo/db/s/migration_destination_manager.cpp4
-rw-r--r--src/mongo/db/s/migration_source_manager.cpp10
-rw-r--r--src/mongo/db/s/move_primary_source_manager.cpp6
-rw-r--r--src/mongo/db/s/resharding/resharding_collection_cloner.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_donor_oplog_iterator.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_op_observer.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_application.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_service_test_helpers.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_txn_cloner.cpp2
-rw-r--r--src/mongo/db/s/transaction_coordinator_service_test.cpp4
-rw-r--r--src/mongo/db/server_options_test.cpp8
-rw-r--r--src/mongo/db/service_context.cpp6
-rw-r--r--src/mongo/db/service_entry_point_common.cpp2
-rw-r--r--src/mongo/db/sessions_collection_rs.cpp2
-rw-r--r--src/mongo/db/startup_recovery.cpp4
-rw-r--r--src/mongo/db/startup_warnings_common.cpp2
-rw-r--r--src/mongo/db/stats/operation_latency_histogram_test.cpp2
-rw-r--r--src/mongo/db/storage/storage_engine_impl.cpp2
-rw-r--r--src/mongo/db/storage/storage_engine_init.cpp2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp10
-rw-r--r--src/mongo/db/traffic_reader.cpp2
-rw-r--r--src/mongo/db/transaction_participant.cpp8
-rw-r--r--src/mongo/db/transaction_participant_test.cpp2
-rw-r--r--src/mongo/db/views/view_graph.cpp2
65 files changed, 108 insertions, 105 deletions
diff --git a/src/mongo/db/catalog/catalog_control.cpp b/src/mongo/db/catalog/catalog_control.cpp
index c5c1bb95f63..2df8a69f650 100644
--- a/src/mongo/db/catalog/catalog_control.cpp
+++ b/src/mongo/db/catalog/catalog_control.cpp
@@ -82,7 +82,7 @@ MinVisibleTimestampMap closeCatalog(OperationContext* opCtx) {
}
// Need to mark the CollectionCatalog as open if we our closeAll fails, dismissed if successful.
- auto reopenOnFailure = makeGuard([opCtx] {
+ ScopeGuard reopenOnFailure([opCtx] {
CollectionCatalog::write(opCtx,
[&](CollectionCatalog& catalog) { catalog.onOpenCatalog(opCtx); });
});
diff --git a/src/mongo/db/catalog/database_holder_impl.cpp b/src/mongo/db/catalog/database_holder_impl.cpp
index b3b81174a77..35fa81a7e3e 100644
--- a/src/mongo/db/catalog/database_holder_impl.cpp
+++ b/src/mongo/db/catalog/database_holder_impl.cpp
@@ -137,7 +137,7 @@ Database* DatabaseHolderImpl::openDb(OperationContext* opCtx, StringData ns, boo
return db;
// We've inserted a nullptr entry for dbname: make sure to remove it on unsuccessful exit.
- auto removeDbGuard = makeGuard([this, &lk, dbname] {
+ ScopeGuard removeDbGuard([this, &lk, dbname] {
if (!lk.owns_lock())
lk.lock();
_dbs.erase(dbname);
diff --git a/src/mongo/db/catalog/drop_database.cpp b/src/mongo/db/catalog/drop_database.cpp
index 953b19e599a..a75e574a8b6 100644
--- a/src/mongo/db/catalog/drop_database.cpp
+++ b/src/mongo/db/catalog/drop_database.cpp
@@ -93,7 +93,7 @@ void _finishDropDatabase(OperationContext* opCtx,
invariant(opCtx->lockState()->isDbLockedForMode(dbName, MODE_X));
// If DatabaseHolder::dropDb() fails, we should reset the drop-pending state on Database.
- auto dropPendingGuard = makeGuard([db, opCtx] { db->setDropPending(opCtx, false); });
+ ScopeGuard dropPendingGuard([db, opCtx] { db->setDropPending(opCtx, false); });
if (!abortIndexBuilds) {
IndexBuildsCoordinator::get(opCtx)->assertNoBgOpInProgForDb(dbName);
@@ -170,7 +170,7 @@ Status _dropDatabase(OperationContext* opCtx, const std::string& dbName, bool ab
// If Database::dropCollectionEventIfSystem() fails, we should reset the drop-pending state
// on Database.
- auto dropPendingGuard = makeGuard([&db, opCtx] { db->setDropPending(opCtx, false); });
+ ScopeGuard dropPendingGuard([&db, opCtx] { db->setDropPending(opCtx, false); });
auto indexBuildsCoord = IndexBuildsCoordinator::get(opCtx);
if (abortIndexBuilds) {
@@ -180,7 +180,7 @@ Status _dropDatabase(OperationContext* opCtx, const std::string& dbName, bool ab
// Create a scope guard to reset the drop-pending state on the database to false if
// there is a replica state change that kills this operation while the locks were
// yielded.
- auto dropPendingGuardWhileUnlocked = makeGuard([dbName, opCtx, &dropPendingGuard] {
+ ScopeGuard dropPendingGuardWhileUnlocked([dbName, opCtx, &dropPendingGuard] {
UninterruptibleLockGuard noInterrupt(opCtx->lockState());
AutoGetDb autoDB(opCtx, dbName, MODE_IX);
if (auto db = autoDB.getDb()) {
@@ -296,7 +296,7 @@ Status _dropDatabase(OperationContext* opCtx, const std::string& dbName, bool ab
// Create a scope guard to reset the drop-pending state on the Database to false if there are
// any errors while we await the replication of any collection drops and then reacquire the
// locks (which can throw) needed to finish the drop database.
- auto dropPendingGuardWhileUnlocked = makeGuard([dbName, opCtx] {
+ ScopeGuard dropPendingGuardWhileUnlocked([dbName, opCtx] {
UninterruptibleLockGuard noInterrupt(opCtx->lockState());
AutoGetDb autoDB(opCtx, dbName, MODE_IX);
if (auto db = autoDB.getDb()) {
diff --git a/src/mongo/db/catalog/multi_index_block.cpp b/src/mongo/db/catalog/multi_index_block.cpp
index 5b99bf84e1e..137793a4c42 100644
--- a/src/mongo/db/catalog/multi_index_block.cpp
+++ b/src/mongo/db/catalog/multi_index_block.cpp
@@ -365,7 +365,7 @@ Status MultiIndexBlock::insertAllDocumentsInCollection(
// Refrain from persisting any multikey updates as a result from building the index. Instead,
// accumulate them in the `MultikeyPathTracker` and do the write as part of the update that
// commits the index.
- auto stopTracker = makeGuard(
+ ScopeGuard stopTracker(
[this, opCtx] { MultikeyPathTracker::get(opCtx).stopTrackingMultikeyPathInfo(); });
if (MultikeyPathTracker::get(opCtx).isTrackingMultikeyPathInfo()) {
stopTracker.dismiss();
@@ -846,7 +846,7 @@ Status MultiIndexBlock::commit(OperationContext* opCtx,
}
// Do not interfere with writing multikey information when committing index builds.
- auto restartTracker = makeGuard(
+ ScopeGuard restartTracker(
[this, opCtx] { MultikeyPathTracker::get(opCtx).startTrackingMultikeyPathInfo(); });
if (!MultikeyPathTracker::get(opCtx).isTrackingMultikeyPathInfo()) {
restartTracker.dismiss();
diff --git a/src/mongo/db/catalog/rename_collection.cpp b/src/mongo/db/catalog/rename_collection.cpp
index 06f1fa8bc35..dddf2f4ffb2 100644
--- a/src/mongo/db/catalog/rename_collection.cpp
+++ b/src/mongo/db/catalog/rename_collection.cpp
@@ -573,7 +573,7 @@ Status renameBetweenDBs(OperationContext* opCtx,
}
// Dismissed on success
- auto tmpCollectionDropper = makeGuard([&] {
+ ScopeGuard tmpCollectionDropper([&] {
Status status = Status::OK();
try {
status = dropCollectionForApplyOps(
diff --git a/src/mongo/db/client_out_of_line_executor.cpp b/src/mongo/db/client_out_of_line_executor.cpp
index f82612266ba..785b090043b 100644
--- a/src/mongo/db/client_out_of_line_executor.cpp
+++ b/src/mongo/db/client_out_of_line_executor.cpp
@@ -113,7 +113,7 @@ void ClientOutOfLineExecutor::consumeAllTasks() noexcept {
}
void ClientOutOfLineExecutor::QueueHandle::schedule(Task&& task) {
- auto guard = makeGuard(
+ ScopeGuard guard(
[&task] { task(Status(ErrorCodes::CallbackCanceled, "Client no longer exists")); });
if (auto queue = _weakQueue.lock()) {
diff --git a/src/mongo/db/commands/drop_indexes.cpp b/src/mongo/db/commands/drop_indexes.cpp
index 781e71259e6..dfbf68fa9c8 100644
--- a/src/mongo/db/commands/drop_indexes.cpp
+++ b/src/mongo/db/commands/drop_indexes.cpp
@@ -232,7 +232,7 @@ public:
});
// The 'indexer' can throw, so ensure build cleanup occurs.
- auto abortOnExit = makeGuard([&] {
+ ScopeGuard abortOnExit([&] {
indexer->abortIndexBuild(opCtx, collection, MultiIndexBlock::kNoopOnCleanUpFn);
});
diff --git a/src/mongo/db/commands/getmore_cmd.cpp b/src/mongo/db/commands/getmore_cmd.cpp
index 9268249469a..75fba16a90b 100644
--- a/src/mongo/db/commands/getmore_cmd.cpp
+++ b/src/mongo/db/commands/getmore_cmd.cpp
@@ -512,7 +512,7 @@ public:
}
// On early return, get rid of the cursor.
- auto cursorFreer = makeGuard([&] { cursorPin.deleteUnderlying(); });
+ ScopeGuard cursorFreer([&] { cursorPin.deleteUnderlying(); });
// If the 'waitAfterPinningCursorBeforeGetMoreBatch' fail point is enabled, set the
// 'msg' field of this operation's CurOp to signal that we've hit this point and then
diff --git a/src/mongo/db/commands/run_aggregate.cpp b/src/mongo/db/commands/run_aggregate.cpp
index 87b8454f518..0102b719aac 100644
--- a/src/mongo/db/commands/run_aggregate.cpp
+++ b/src/mongo/db/commands/run_aggregate.cpp
@@ -844,7 +844,7 @@ Status runAggregate(OperationContext* opCtx,
std::vector<ClientCursorPin> pins;
std::vector<ClientCursor*> cursors;
- auto cursorFreer = makeGuard([&] {
+ ScopeGuard cursorFreer([&] {
for (auto& p : pins) {
p.deleteUnderlying();
}
diff --git a/src/mongo/db/commands/user_management_commands.cpp b/src/mongo/db/commands/user_management_commands.cpp
index d4da340dc5b..5990af74f85 100644
--- a/src/mongo/db/commands/user_management_commands.cpp
+++ b/src/mongo/db/commands/user_management_commands.cpp
@@ -1836,7 +1836,7 @@ void CmdUMCTyped<DropRoleCommand>::Invocation::typedRun(OperationContext* opCtx)
uassertStatusOK(authzManager->rolesExist(opCtx, {roleName}));
// From here on, we always want to invalidate the user cache before returning.
- auto invalidateGuard = makeGuard([&] {
+ ScopeGuard invalidateGuard([&] {
try {
authzManager->invalidateUserCache(opCtx);
} catch (const AssertionException& ex) {
@@ -1896,7 +1896,7 @@ DropAllRolesFromDatabaseReply CmdUMCTyped<DropAllRolesFromDatabaseCommand>::Invo
auto lk = uassertStatusOK(requireWritableAuthSchema28SCRAM(opCtx, authzManager));
// From here on, we always want to invalidate the user cache before returning.
- auto invalidateGuard = makeGuard([opCtx, authzManager] {
+ ScopeGuard invalidateGuard([opCtx, authzManager] {
try {
authzManager->invalidateUserCache(opCtx);
} catch (const AssertionException& ex) {
@@ -2394,7 +2394,7 @@ void CmdMergeAuthzCollections::Invocation::typedRun(OperationContext* opCtx) {
auto lk = uassertStatusOK(requireWritableAuthSchema28SCRAM(opCtx, authzManager));
// From here on, we always want to invalidate the user cache before returning.
- auto invalidateGuard = makeGuard([&] { authzManager->invalidateUserCache(opCtx); });
+ ScopeGuard invalidateGuard([&] { authzManager->invalidateUserCache(opCtx); });
const auto db = cmd.getDb();
const bool drop = cmd.getDrop();
diff --git a/src/mongo/db/commands/write_commands.cpp b/src/mongo/db/commands/write_commands.cpp
index 3d78ebe912c..3de92c37fc3 100644
--- a/src/mongo/db/commands/write_commands.cpp
+++ b/src/mongo/db/commands/write_commands.cpp
@@ -667,7 +667,7 @@ public:
return;
}
// Now that the batch is prepared, make sure we clean up if we throw.
- auto batchGuard = makeGuard([&] { bucketCatalog.abort(batch); });
+ ScopeGuard batchGuard([&] { bucketCatalog.abort(batch); });
hangTimeseriesInsertBeforeWrite.pauseWhileSet();
diff --git a/src/mongo/db/concurrency/d_concurrency.cpp b/src/mongo/db/concurrency/d_concurrency.cpp
index 115540aaa64..5e8bd537b55 100644
--- a/src/mongo/db/concurrency/d_concurrency.cpp
+++ b/src/mongo/db/concurrency/d_concurrency.cpp
@@ -154,7 +154,7 @@ Lock::GlobalLock::GlobalLock(OperationContext* opCtx,
if (_opCtx->lockState()->shouldConflictWithSecondaryBatchApplication()) {
_pbwm.lock(opCtx, MODE_IS, deadline);
}
- auto unlockPBWM = makeGuard([this] {
+ ScopeGuard unlockPBWM([this] {
if (_opCtx->lockState()->shouldConflictWithSecondaryBatchApplication()) {
_pbwm.unlock();
}
@@ -184,7 +184,7 @@ void Lock::GlobalLock::_takeGlobalLockOnly(LockMode lockMode, Date_t deadline) {
void Lock::GlobalLock::_takeGlobalAndRSTLLocks(LockMode lockMode, Date_t deadline) {
_opCtx->lockState()->lock(_opCtx, resourceIdReplicationStateTransitionLock, MODE_IX, deadline);
- auto unlockRSTL = makeGuard(
+ ScopeGuard unlockRSTL(
[this] { _opCtx->lockState()->unlock(resourceIdReplicationStateTransitionLock); });
_opCtx->lockState()->lockGlobal(_opCtx, lockMode, deadline);
diff --git a/src/mongo/db/concurrency/d_concurrency_test.cpp b/src/mongo/db/concurrency/d_concurrency_test.cpp
index 6154cb14b49..9af772ff4fe 100644
--- a/src/mongo/db/concurrency/d_concurrency_test.cpp
+++ b/src/mongo/db/concurrency/d_concurrency_test.cpp
@@ -113,7 +113,7 @@ public:
auto result = task.get_future();
stdx::thread taskThread{std::move(task)};
- auto taskThreadJoiner = makeGuard([&] { taskThread.join(); });
+ ScopeGuard taskThreadJoiner([&] { taskThread.join(); });
{
stdx::lock_guard<Client> clientLock(*opCtx->getClient());
diff --git a/src/mongo/db/concurrency/lock_state.cpp b/src/mongo/db/concurrency/lock_state.cpp
index eb833039667..e984ef4fffc 100644
--- a/src/mongo/db/concurrency/lock_state.cpp
+++ b/src/mongo/db/concurrency/lock_state.cpp
@@ -367,7 +367,7 @@ bool LockerImpl::_acquireTicket(OperationContext* opCtx, LockMode mode, Date_t d
_clientState.store(reader ? kQueuedReader : kQueuedWriter);
// If the ticket wait is interrupted, restore the state of the client.
- auto restoreStateOnErrorGuard = makeGuard([&] { _clientState.store(kInactive); });
+ ScopeGuard restoreStateOnErrorGuard([&] { _clientState.store(kInactive); });
// Acquiring a ticket is a potentially blocking operation. This must not be called after a
// transaction timestamp has been set, indicating this transaction has created an oplog
@@ -937,7 +937,7 @@ void LockerImpl::_lockComplete(OperationContext* opCtx,
}
// Clean up the state on any failed lock attempts.
- auto unlockOnErrorGuard = makeGuard([&] {
+ ScopeGuard unlockOnErrorGuard([&] {
LockRequestsMap::Iterator it = _requests.find(resId);
invariant(it);
_unlockImpl(&it);
@@ -1037,7 +1037,7 @@ void LockerImpl::getFlowControlTicket(OperationContext* opCtx, LockMode lockMode
// method must not exit with a side-effect on the clientState. That value is also used for
// tracking whether other resources need to be released.
_clientState.store(kQueuedWriter);
- auto restoreState = makeGuard([&] { _clientState.store(kInactive); });
+ ScopeGuard restoreState([&] { _clientState.store(kInactive); });
// Acquiring a ticket is a potentially blocking operation. This must not be called after a
// transaction timestamp has been set, indicating this transaction has created an oplog
// hole.
diff --git a/src/mongo/db/default_baton.cpp b/src/mongo/db/default_baton.cpp
index b361cf38346..3f7e73fa59d 100644
--- a/src/mongo/db/default_baton.cpp
+++ b/src/mongo/db/default_baton.cpp
@@ -107,7 +107,7 @@ Waitable::TimeoutState DefaultBaton::run_until(ClockSource* clkSource,
stdx::unique_lock<Latch> lk(_mutex);
// We'll fulfill promises and run jobs on the way out, ensuring we don't hold any locks
- const auto guard = makeGuard([&] {
+ const ScopeGuard guard([&] {
// While we have scheduled work, keep running jobs
while (_scheduled.size()) {
auto toRun = std::exchange(_scheduled, {});
diff --git a/src/mongo/db/exec/delete.cpp b/src/mongo/db/exec/delete.cpp
index b5095bb8eda..8cc3c5fafb6 100644
--- a/src/mongo/db/exec/delete.cpp
+++ b/src/mongo/db/exec/delete.cpp
@@ -139,7 +139,7 @@ PlanStage::StageState DeleteStage::doWork(WorkingSetID* out) {
WorkingSetMember* member = _ws->get(id);
// We want to free this member when we return, unless we need to retry deleting or returning it.
- auto memberFreer = makeGuard([&] { _ws->free(id); });
+ ScopeGuard memberFreer([&] { _ws->free(id); });
invariant(member->hasRecordId());
RecordId recordId = member->recordId;
diff --git a/src/mongo/db/exec/js_function.cpp b/src/mongo/db/exec/js_function.cpp
index ae6e479c521..58e3a880b00 100644
--- a/src/mongo/db/exec/js_function.cpp
+++ b/src/mongo/db/exec/js_function.cpp
@@ -64,7 +64,7 @@ JsFunction::JsFunction(OperationContext* opCtx,
const auto userToken = getAuthenticatedUserNamesToken(opCtx->getClient());
_scope = getGlobalScriptEngine()->getPooledScope(opCtx, dbName, "where" + userToken);
- const auto guard = makeGuard([&] { _scope->unregisterOperation(); });
+ const ScopeGuard guard([&] { _scope->unregisterOperation(); });
_func = _scope->createFunction(code.c_str());
uassert(ErrorCodes::BadValue, "$where compile error", _func);
@@ -72,7 +72,7 @@ JsFunction::JsFunction(OperationContext* opCtx,
bool JsFunction::runAsPredicate(const BSONObj& obj) const {
_scope->registerOperation(Client::getCurrent()->getOperationContext());
- const auto scopeOpCtxGuard = makeGuard([&] { _scope->unregisterOperation(); });
+ const ScopeGuard scopeOpCtxGuard([&] { _scope->unregisterOperation(); });
_scope->advanceGeneration();
_scope->setObject("obj", obj);
diff --git a/src/mongo/db/exec/update_stage.cpp b/src/mongo/db/exec/update_stage.cpp
index 708c25dfe30..5859e85e72f 100644
--- a/src/mongo/db/exec/update_stage.cpp
+++ b/src/mongo/db/exec/update_stage.cpp
@@ -402,7 +402,7 @@ PlanStage::StageState UpdateStage::doWork(WorkingSetID* out) {
// We want to free this member when we return, unless we need to retry updating or returning
// it.
- auto memberFreer = makeGuard([&] { _ws->free(id); });
+ ScopeGuard memberFreer([&] { _ws->free(id); });
invariant(member->hasRecordId());
recordId = member->recordId;
diff --git a/src/mongo/db/field_parser.h b/src/mongo/db/field_parser.h
index 45cbf50506b..078668d28f9 100644
--- a/src/mongo/db/field_parser.h
+++ b/src/mongo/db/field_parser.h
@@ -548,7 +548,7 @@ FieldParser::FieldState FieldParser::extract(BSONObj doc,
}
auto tempVector = std::make_unique<std::vector<T*>>();
- auto guard = makeGuard([&tempVector] {
+ ScopeGuard guard([&tempVector] {
if (tempVector) {
for (T*& raw : *tempVector) {
delete raw;
diff --git a/src/mongo/db/index/index_build_interceptor.cpp b/src/mongo/db/index/index_build_interceptor.cpp
index 77d13478b30..88a0690a8c6 100644
--- a/src/mongo/db/index/index_build_interceptor.cpp
+++ b/src/mongo/db/index/index_build_interceptor.cpp
@@ -80,7 +80,7 @@ IndexBuildInterceptor::IndexBuildInterceptor(OperationContext* opCtx,
_skippedRecordTracker(opCtx, entry, skippedRecordTrackerIdent),
_skipNumAppliedCheck(true) {
- auto finalizeTableOnFailure = makeGuard([&] {
+ ScopeGuard finalizeTableOnFailure([&] {
_sideWritesTable->finalizeTemporaryTable(opCtx,
TemporaryRecordStore::FinalizationAction::kDelete);
});
diff --git a/src/mongo/db/index_builds_coordinator.cpp b/src/mongo/db/index_builds_coordinator.cpp
index cb1f3252884..18ca33e224b 100644
--- a/src/mongo/db/index_builds_coordinator.cpp
+++ b/src/mongo/db/index_builds_coordinator.cpp
@@ -1578,7 +1578,7 @@ void IndexBuildsCoordinator::createIndex(OperationContext* opCtx,
throw;
}
- auto abortOnExit = makeGuard([&] {
+ ScopeGuard abortOnExit([&] {
_indexBuildsManager.abortIndexBuild(
opCtx, collection, buildUUID, MultiIndexBlock::kNoopOnCleanUpFn);
});
@@ -2329,7 +2329,7 @@ void IndexBuildsCoordinator::_scanCollectionAndInsertSortedKeysIntoIndex(
// Collection scan and insert into index.
{
- auto scopeGuard = makeGuard([&] {
+ ScopeGuard scopeGuard([&] {
opCtx->recoveryUnit()->setTimestampReadSource(RecoveryUnit::ReadSource::kNoTimestamp);
});
@@ -2504,7 +2504,7 @@ IndexBuildsCoordinator::CommitResult IndexBuildsCoordinator::_insertKeysFromSide
// While we are still holding the RSTL and before returning, ensure the metrics collected for
// this index build are attributed to the primary that commits or aborts the index build.
- auto metricsGuard = makeGuard([&]() {
+ ScopeGuard metricsGuard([&]() {
auto& collector = ResourceConsumption::MetricsCollector::get(opCtx);
bool wasCollecting = collector.endScopedCollecting();
if (!isPrimary || !wasCollecting || !ResourceConsumption::isMetricsAggregationEnabled()) {
diff --git a/src/mongo/db/index_builds_coordinator_mongod.cpp b/src/mongo/db/index_builds_coordinator_mongod.cpp
index 87603bc2869..d89065fc419 100644
--- a/src/mongo/db/index_builds_coordinator_mongod.cpp
+++ b/src/mongo/db/index_builds_coordinator_mongod.cpp
@@ -218,7 +218,7 @@ IndexBuildsCoordinatorMongod::_startIndexBuild(OperationContext* opCtx,
}
}
- auto onScopeExitGuard = makeGuard([&] {
+ ScopeGuard onScopeExitGuard([&] {
stdx::unique_lock<Latch> lk(_throttlingMutex);
_numActiveIndexBuilds--;
_indexBuildFinished.notify_one();
@@ -328,7 +328,7 @@ IndexBuildsCoordinatorMongod::_startIndexBuild(OperationContext* opCtx,
resumeInfo,
impersonatedClientAttrs = std::move(impersonatedClientAttrs)
](auto status) mutable noexcept {
- auto onScopeExitGuard = makeGuard([&] {
+ ScopeGuard onScopeExitGuard([&] {
stdx::unique_lock<Latch> lk(_throttlingMutex);
_numActiveIndexBuilds--;
_indexBuildFinished.notify_one();
diff --git a/src/mongo/db/logical_session_cache_impl.cpp b/src/mongo/db/logical_session_cache_impl.cpp
index 52bee61ed64..43f800c6820 100644
--- a/src/mongo/db/logical_session_cache_impl.cpp
+++ b/src/mongo/db/logical_session_cache_impl.cpp
@@ -258,7 +258,7 @@ void LogicalSessionCacheImpl::_refresh(Client* client) {
}
// This will finish timing _refresh for our stats no matter when we return.
- const auto timeRefreshJob = makeGuard([this] {
+ const ScopeGuard timeRefreshJob([this] {
stdx::lock_guard<Latch> lk(_mutex);
auto millis = _service->now() - _stats.getLastSessionsCollectionJobTimestamp();
_stats.setLastSessionsCollectionJobDurationMillis(millis.count());
@@ -299,9 +299,9 @@ void LogicalSessionCacheImpl::_refresh(Client* client) {
member.emplace(it);
}
};
- auto activeSessionsBackSwapper = makeGuard([&] { backSwap(_activeSessions, activeSessions); });
+ ScopeGuard activeSessionsBackSwapper([&] { backSwap(_activeSessions, activeSessions); });
auto explicitlyEndingBackSwaper =
- makeGuard([&] { backSwap(_endingSessions, explicitlyEndingSessions); });
+ ScopeGuard([&] { backSwap(_endingSessions, explicitlyEndingSessions); });
// remove all explicitlyEndingSessions from activeSessions
for (const auto& lsid : explicitlyEndingSessions) {
diff --git a/src/mongo/db/repl/collection_bulk_loader_impl.cpp b/src/mongo/db/repl/collection_bulk_loader_impl.cpp
index 5224bc5c7bb..c06603c61ab 100644
--- a/src/mongo/db/repl/collection_bulk_loader_impl.cpp
+++ b/src/mongo/db/repl/collection_bulk_loader_impl.cpp
@@ -362,7 +362,7 @@ void CollectionBulkLoaderImpl::_releaseResources() {
template <typename F>
Status CollectionBulkLoaderImpl::_runTaskReleaseResourcesOnFailure(const F& task) noexcept {
AlternativeClientRegion acr(_client);
- auto guard = makeGuard([this] { _releaseResources(); });
+ ScopeGuard guard([this] { _releaseResources(); });
try {
const auto status = task();
if (status.isOK()) {
diff --git a/src/mongo/db/repl/initial_syncer.cpp b/src/mongo/db/repl/initial_syncer.cpp
index 5820d2e2ab5..a404f7b6ccf 100644
--- a/src/mongo/db/repl/initial_syncer.cpp
+++ b/src/mongo/db/repl/initial_syncer.cpp
@@ -1700,7 +1700,7 @@ void InitialSyncer::_finishInitialSyncAttempt(const StatusWith<OpTimeAndWallTime
// if the task scheduling fails and we have to invoke _finishCallback() synchronously), we
// declare the scope guard before the lock guard.
auto result = lastApplied;
- auto finishCallbackGuard = makeGuard([this, &result] {
+ ScopeGuard finishCallbackGuard([this, &result] {
auto scheduleResult = _exec->scheduleWork(
[=](const mongo::executor::TaskExecutor::CallbackArgs&) { _finishCallback(result); });
if (!scheduleResult.isOK()) {
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 1c48b3efd87..d4d35779ded 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -428,7 +428,7 @@ OpTime logOp(OperationContext* opCtx, MutableOplogEntry* oplogEntry) {
// again. For example, if the WUOW gets aborted within a writeConflictRetry loop, we need to
// reset the OpTime to null so a new OpTime will be assigned on retry.
OplogSlot slot = oplogEntry->getOpTime();
- auto resetOpTimeGuard = makeGuard([&, resetOpTimeOnExit = bool(slot.isNull())] {
+ ScopeGuard resetOpTimeGuard([&, resetOpTimeOnExit = bool(slot.isNull())] {
if (resetOpTimeOnExit)
oplogEntry->setOpTime(OplogSlot());
});
diff --git a/src/mongo/db/repl/repl_set_commands.cpp b/src/mongo/db/repl/repl_set_commands.cpp
index 135bdd13614..de1b9a458db 100644
--- a/src/mongo/db/repl/repl_set_commands.cpp
+++ b/src/mongo/db/repl/repl_set_commands.cpp
@@ -514,7 +514,7 @@ public:
_stepDownCmdsWithForceExecuted.increment();
}
- auto onExitGuard = makeGuard([&] {
+ ScopeGuard onExitGuard([&] {
if (force) {
_stepDownCmdsWithForceFailed.increment();
}
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index 44b1c72a7c9..5e5d1834ea0 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -2583,7 +2583,7 @@ void ReplicationCoordinatorImpl::stepDown(OperationContext* opCtx,
_waitingForRSTLAtStepDown++;
_fulfillTopologyChangePromise(lk);
}
- auto clearStepDownFlag = makeGuard([&] {
+ ScopeGuard clearStepDownFlag([&] {
stdx::lock_guard lk(_mutex);
_waitingForRSTLAtStepDown--;
_fulfillTopologyChangePromise(lk);
@@ -2651,7 +2651,7 @@ void ReplicationCoordinatorImpl::stepDown(OperationContext* opCtx,
_performPostMemberStateUpdateAction(action);
};
- auto onExitGuard = makeGuard([&] {
+ ScopeGuard onExitGuard([&] {
abortFn();
updateMemberState();
});
@@ -3492,7 +3492,7 @@ Status ReplicationCoordinatorImpl::_doReplSetReconfig(OperationContext* opCtx,
_setConfigState_inlock(kConfigReconfiguring);
auto configStateGuard =
- makeGuard([&] { lockAndCall(&lk, [=] { _setConfigState_inlock(kConfigSteady); }); });
+ ScopeGuard([&] { lockAndCall(&lk, [=] { _setConfigState_inlock(kConfigSteady); }); });
ReplSetConfig oldConfig = _rsConfig;
int myIndex = _selfIndex;
@@ -3933,8 +3933,9 @@ Status ReplicationCoordinatorImpl::processReplSetInitiate(OperationContext* opCt
invariant(!_rsConfig.isInitialized());
_setConfigState_inlock(kConfigInitiating);
- auto configStateGuard =
- makeGuard([&] { lockAndCall(&lk, [=] { _setConfigState_inlock(kConfigUninitialized); }); });
+ ScopeGuard configStateGuard = [&] {
+ lockAndCall(&lk, [=] { _setConfigState_inlock(kConfigUninitialized); });
+ };
// When writing our first oplog entry below, disable advancement of the stable timestamp so that
// we don't set it before setting our initial data timestamp. We will set it after we set our
diff --git a/src/mongo/db/repl/scatter_gather_runner.cpp b/src/mongo/db/repl/scatter_gather_runner.cpp
index c45401d53f0..f688c5cf0cd 100644
--- a/src/mongo/db/repl/scatter_gather_runner.cpp
+++ b/src/mongo/db/repl/scatter_gather_runner.cpp
@@ -101,7 +101,7 @@ StatusWith<EventHandle> ScatterGatherRunner::RunnerImpl::start(
return evh;
}
_sufficientResponsesReceived = evh.getValue();
- auto earlyReturnGuard = makeGuard([this] { _signalSufficientResponsesReceived(); });
+ ScopeGuard earlyReturnGuard([this] { _signalSufficientResponsesReceived(); });
std::vector<RemoteCommandRequest> requests = _algorithm->getRequests();
for (size_t i = 0; i < requests.size(); ++i) {
diff --git a/src/mongo/db/repl/tenant_migration_recipient_service_test.cpp b/src/mongo/db/repl/tenant_migration_recipient_service_test.cpp
index a303a3cc442..b30dee191a0 100644
--- a/src/mongo/db/repl/tenant_migration_recipient_service_test.cpp
+++ b/src/mongo/db/repl/tenant_migration_recipient_service_test.cpp
@@ -1560,7 +1560,7 @@ TEST_F(TenantMigrationRecipientServiceTest, TenantMigrationRecipientStartsCloner
stopFailPointEnableBlock fp("fpAfterCollectionClonerDone");
auto taskFp = globalFailPointRegistry().find("hangBeforeTaskCompletion");
- auto taskFpGuard = makeGuard([&taskFp] { taskFp->setMode(FailPoint::off); });
+ ScopeGuard taskFpGuard([&taskFp] { taskFp->setMode(FailPoint::off); });
auto initialTimesEntered = taskFp->setMode(FailPoint::alwaysOn);
diff --git a/src/mongo/db/repl/topology_version_observer.cpp b/src/mongo/db/repl/topology_version_observer.cpp
index 877466bf7f6..b0882e76b8d 100644
--- a/src/mongo/db/repl/topology_version_observer.cpp
+++ b/src/mongo/db/repl/topology_version_observer.cpp
@@ -129,7 +129,7 @@ void TopologyVersionObserver::_cacheHelloResponse(
LOGV2_DEBUG(4794600, 3, "Waiting for a topology change");
{
- auto cacheGuard = makeGuard([&] {
+ ScopeGuard cacheGuard([&] {
// If we're not dismissed, reset the _cache.
stdx::lock_guard lk(_mutex);
_cache.reset();
diff --git a/src/mongo/db/repl/transaction_oplog_application.cpp b/src/mongo/db/repl/transaction_oplog_application.cpp
index 103a5c4e149..844971df246 100644
--- a/src/mongo/db/repl/transaction_oplog_application.cpp
+++ b/src/mongo/db/repl/transaction_oplog_application.cpp
@@ -433,7 +433,7 @@ Status _applyPrepareTransaction(OperationContext* opCtx,
// Release the WUOW, transaction lock resources and abort storage transaction so that the
// writeConflictRetry loop will be able to retry applying transactional ops on WCE error.
- auto abortOnError = makeGuard([&txnParticipant, opCtx] {
+ ScopeGuard abortOnError([&txnParticipant, opCtx] {
// Abort the transaction and invalidate the session it is associated with.
txnParticipant.abortTransaction(opCtx);
txnParticipant.invalidate(opCtx);
diff --git a/src/mongo/db/repl/vote_requester.cpp b/src/mongo/db/repl/vote_requester.cpp
index 6a29e4785c8..f57765dc73c 100644
--- a/src/mongo/db/repl/vote_requester.cpp
+++ b/src/mongo/db/repl/vote_requester.cpp
@@ -117,7 +117,7 @@ void VoteRequester::Algorithm::processResponse(const RemoteCommandRequest& reque
// All local variables captured in logAttrs needs to be above the guard that logs.
logv2::DynamicAttributes logAttrs;
auto logAtExit =
- makeGuard([&logAttrs]() { LOGV2(51799, "VoteRequester processResponse", logAttrs); });
+ ScopeGuard([&logAttrs]() { LOGV2(51799, "VoteRequester processResponse", logAttrs); });
logAttrs.add("term", _term);
logAttrs.add("dryRun", _dryRun);
diff --git a/src/mongo/db/s/balancer/migration_manager.cpp b/src/mongo/db/s/balancer/migration_manager.cpp
index b2c34f7f555..6935621ef44 100644
--- a/src/mongo/db/s/balancer/migration_manager.cpp
+++ b/src/mongo/db/s/balancer/migration_manager.cpp
@@ -223,7 +223,7 @@ void MigrationManager::startRecoveryAndAcquireDistLocks(OperationContext* opCtx)
_state = State::kRecovering;
}
- auto scopedGuard = makeGuard([&] {
+ ScopeGuard scopedGuard([&] {
_migrationRecoveryMap.clear();
_abandonActiveMigrationsAndEnableManager(opCtx);
});
@@ -313,7 +313,7 @@ void MigrationManager::finishRecovery(OperationContext* opCtx,
invariant(_state == State::kRecovering);
}
- auto scopedGuard = makeGuard([&] {
+ ScopeGuard scopedGuard([&] {
_migrationRecoveryMap.clear();
_abandonActiveMigrationsAndEnableManager(opCtx);
});
diff --git a/src/mongo/db/s/config/sharding_catalog_manager.cpp b/src/mongo/db/s/config/sharding_catalog_manager.cpp
index d2d0055d931..295fa1bc183 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager.cpp
@@ -837,7 +837,7 @@ void ShardingCatalogManager::withTransaction(
AuthorizationSession::get(client)->grantInternalAuthorization(client);
TxnNumber txnNumber = 0;
- auto guard = makeGuard([opCtx = asr.opCtx(), &txnNumber] {
+ ScopeGuard guard([opCtx = asr.opCtx(), &txnNumber] {
try {
abortTransaction(opCtx, txnNumber);
} catch (DBException& e) {
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
index 45b1212452d..3e65ecf8903 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
@@ -1400,7 +1400,7 @@ void ShardingCatalogManager::ensureChunkVersionIsGreaterThan(OperationContext* o
const BSONObj& minKey,
const BSONObj& maxKey,
const ChunkVersion& version) {
- auto earlyReturnBeforeDoingWriteGuard = makeGuard([&] {
+ ScopeGuard earlyReturnBeforeDoingWriteGuard([&] {
// Ensure waiting for writeConcern of the data read.
repl::ReplClientInfo::forClient(opCtx->getClient()).setLastOpToSystemLastOpTime(opCtx);
});
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
index 525d3385b9a..ed00416ef5d 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
@@ -595,7 +595,7 @@ StatusWith<std::string> ShardingCatalogManager::addShard(
const std::shared_ptr<Shard> shard{shardRegistry->createConnection(shardConnectionString)};
auto targeter = shard->getTargeter();
- auto stopMonitoringGuard = makeGuard([&] {
+ ScopeGuard stopMonitoringGuard([&] {
if (shardConnectionString.type() == ConnectionString::ConnectionType::kReplicaSet) {
// This is a workaround for the case were we could have some bad shard being
// requested to be added and we put that bad connection string on the global replica set
diff --git a/src/mongo/db/s/dist_lock_manager.cpp b/src/mongo/db/s/dist_lock_manager.cpp
index a2921a1c49c..96b68bc25b7 100644
--- a/src/mongo/db/s/dist_lock_manager.cpp
+++ b/src/mongo/db/s/dist_lock_manager.cpp
@@ -127,7 +127,7 @@ DistLockManager::ScopedLock DistLockManager::lockDirectLocally(OperationContext*
} else {
auto nsLock = iter->second;
nsLock->numWaiting++;
- auto guard = makeGuard([&] { nsLock->numWaiting--; });
+ ScopeGuard guard([&] { nsLock->numWaiting--; });
if (!opCtx->waitForConditionOrInterruptFor(
nsLock->cvLocked, lock, waitFor, [nsLock]() { return !nsLock->isInProgress; })) {
uasserted(ErrorCodes::LockBusy,
diff --git a/src/mongo/db/s/dist_lock_manager_replset.cpp b/src/mongo/db/s/dist_lock_manager_replset.cpp
index d4c5cf0df8a..a41839870fe 100644
--- a/src/mongo/db/s/dist_lock_manager_replset.cpp
+++ b/src/mongo/db/s/dist_lock_manager_replset.cpp
@@ -689,7 +689,7 @@ long long ReplSetDistLockManager::_waitForRecovery(OperationContext* opCtx) {
LOGV2(570181, "Recovering dist lock manager", "term"_attr = term);
- auto anotherThreadMustRecoverGuard = makeGuard([&] {
+ ScopeGuard anotherThreadMustRecoverGuard([&] {
lk.lock();
if (term == _recoveryTerm) {
_recoveryState = kMustRecover;
diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp
index 5f81beb94fa..51df79c8c46 100644
--- a/src/mongo/db/s/migration_destination_manager.cpp
+++ b/src/mongo/db/s/migration_destination_manager.cpp
@@ -471,7 +471,7 @@ repl::OpTime MigrationDestinationManager::fetchAndApplyBatch(
auto applicationOpCtx = CancelableOperationContext(
cc().makeOperationContext(), opCtx->getCancellationToken(), executor);
- auto consumerGuard = makeGuard([&] {
+ ScopeGuard consumerGuard([&] {
batches.closeConsumerEnd();
lastOpApplied =
repl::ReplClientInfo::forClient(applicationOpCtx->getClient()).getLastOp();
@@ -496,7 +496,7 @@ repl::OpTime MigrationDestinationManager::fetchAndApplyBatch(
{
- auto applicationThreadJoinGuard = makeGuard([&] {
+ ScopeGuard applicationThreadJoinGuard([&] {
batches.closeProducerEnd();
applicationThread.join();
});
diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp
index 13deb093da8..41c029b2153 100644
--- a/src/mongo/db/s/migration_source_manager.cpp
+++ b/src/mongo/db/s/migration_source_manager.cpp
@@ -214,7 +214,7 @@ NamespaceString MigrationSourceManager::getNss() const {
Status MigrationSourceManager::startClone() {
invariant(!_opCtx->lockState()->isLocked());
invariant(_state == kCreated);
- auto scopedGuard = makeGuard([&] { cleanupOnError(); });
+ ScopeGuard scopedGuard([&] { cleanupOnError(); });
_stats.countDonorMoveChunkStarted.addAndFetch(1);
const Status logStatus = ShardingLogging::get(_opCtx)->logChangeChecked(
@@ -297,7 +297,7 @@ Status MigrationSourceManager::startClone() {
Status MigrationSourceManager::awaitToCatchUp() {
invariant(!_opCtx->lockState()->isLocked());
invariant(_state == kCloning);
- auto scopedGuard = makeGuard([&] { cleanupOnError(); });
+ ScopeGuard scopedGuard([&] { cleanupOnError(); });
_stats.totalDonorChunkCloneTimeMillis.addAndFetch(_cloneAndCommitTimer.millis());
_cloneAndCommitTimer.reset();
@@ -316,7 +316,7 @@ Status MigrationSourceManager::awaitToCatchUp() {
Status MigrationSourceManager::enterCriticalSection() {
invariant(!_opCtx->lockState()->isLocked());
invariant(_state == kCloneCaughtUp);
- auto scopedGuard = makeGuard([&] { cleanupOnError(); });
+ ScopeGuard scopedGuard([&] { cleanupOnError(); });
_stats.totalDonorChunkCloneTimeMillis.addAndFetch(_cloneAndCommitTimer.millis());
_cloneAndCommitTimer.reset();
@@ -371,7 +371,7 @@ Status MigrationSourceManager::enterCriticalSection() {
Status MigrationSourceManager::commitChunkOnRecipient() {
invariant(!_opCtx->lockState()->isLocked());
invariant(_state == kCriticalSection);
- auto scopedGuard = makeGuard([&] { cleanupOnError(); });
+ ScopeGuard scopedGuard([&] { cleanupOnError(); });
// Tell the recipient shard to fetch the latest changes.
auto commitCloneStatus = _cloneDriver->commitClone(_opCtx);
@@ -395,7 +395,7 @@ Status MigrationSourceManager::commitChunkOnRecipient() {
Status MigrationSourceManager::commitChunkMetadataOnConfig() {
invariant(!_opCtx->lockState()->isLocked());
invariant(_state == kCloneCompleted);
- auto scopedGuard = makeGuard([&] { cleanupOnError(); });
+ ScopeGuard scopedGuard([&] { cleanupOnError(); });
// If we have chunks left on the FROM shard, bump the version of one of them as well. This will
// change the local collection major version, which indicates to other processes that the chunk
diff --git a/src/mongo/db/s/move_primary_source_manager.cpp b/src/mongo/db/s/move_primary_source_manager.cpp
index 99bf70f5ba7..e7b2e87ccef 100644
--- a/src/mongo/db/s/move_primary_source_manager.cpp
+++ b/src/mongo/db/s/move_primary_source_manager.cpp
@@ -79,7 +79,7 @@ NamespaceString MovePrimarySourceManager::getNss() const {
Status MovePrimarySourceManager::clone(OperationContext* opCtx) {
invariant(!opCtx->lockState()->isLocked());
invariant(_state == kCreated);
- auto scopedGuard = makeGuard([&] { cleanupOnError(opCtx); });
+ ScopeGuard scopedGuard([&] { cleanupOnError(opCtx); });
LOGV2(22042,
"Moving {db} primary from: {fromShard} to: {toShard}",
@@ -155,7 +155,7 @@ Status MovePrimarySourceManager::clone(OperationContext* opCtx) {
Status MovePrimarySourceManager::enterCriticalSection(OperationContext* opCtx) {
invariant(!opCtx->lockState()->isLocked());
invariant(_state == kCloneCaughtUp);
- auto scopedGuard = makeGuard([&] { cleanupOnError(opCtx); });
+ ScopeGuard scopedGuard([&] { cleanupOnError(opCtx); });
// Mark the shard as running a critical operation that requires recovery on crash.
auto startMetadataOpStatus = ShardingStateRecovery::startMetadataOp(opCtx);
@@ -213,7 +213,7 @@ Status MovePrimarySourceManager::enterCriticalSection(OperationContext* opCtx) {
Status MovePrimarySourceManager::commitOnConfig(OperationContext* opCtx) {
invariant(!opCtx->lockState()->isLocked());
invariant(_state == kCriticalSection);
- auto scopedGuard = makeGuard([&] { cleanupOnError(opCtx); });
+ ScopeGuard scopedGuard([&] { cleanupOnError(opCtx); });
{
AutoGetDb autoDb(opCtx, getNss().toString(), MODE_X);
diff --git a/src/mongo/db/s/resharding/resharding_collection_cloner.cpp b/src/mongo/db/s/resharding/resharding_collection_cloner.cpp
index 0225c21cc69..65668d804d7 100644
--- a/src/mongo/db/s/resharding/resharding_collection_cloner.cpp
+++ b/src/mongo/db/s/resharding/resharding_collection_cloner.cpp
@@ -305,7 +305,7 @@ SemiFuture<void> ReshardingCollectionCloner::run(
}
auto opCtx = factory.makeOperationContext(&cc());
- auto guard = makeGuard([&] {
+ ScopeGuard guard([&] {
chainCtx->pipeline->dispose(opCtx.get());
chainCtx->pipeline.reset();
});
diff --git a/src/mongo/db/s/resharding/resharding_donor_oplog_iterator.cpp b/src/mongo/db/s/resharding/resharding_donor_oplog_iterator.cpp
index 9d8a1949cb5..17a5d8fb966 100644
--- a/src/mongo/db/s/resharding/resharding_donor_oplog_iterator.cpp
+++ b/src/mongo/db/s/resharding/resharding_donor_oplog_iterator.cpp
@@ -205,7 +205,7 @@ ExecutorFuture<std::vector<repl::OplogEntry>> ReshardingDonorOplogIterator::getN
auto batch = [&] {
auto opCtx = factory.makeOperationContext(&cc());
- auto guard = makeGuard([&] { dispose(opCtx.get()); });
+ ScopeGuard guard([&] { dispose(opCtx.get()); });
// A primary which steps down may briefly continue running the ReshardingDonorOplogIterator
// as a secondary. AutoGetCollectionForReadBase forbids reads on a secondary from using the
diff --git a/src/mongo/db/s/resharding/resharding_op_observer.cpp b/src/mongo/db/s/resharding/resharding_op_observer.cpp
index d84c59c7edd..0027b9e0d62 100644
--- a/src/mongo/db/s/resharding/resharding_op_observer.cpp
+++ b/src/mongo/db/s/resharding/resharding_op_observer.cpp
@@ -100,7 +100,7 @@ boost::optional<Timestamp> _calculatePin(OperationContext* opCtx) {
// If the RecoveryUnit already had an open snapshot, keep the snapshot open. Otherwise abandon
// the snapshot when exitting the function.
- auto scopeGuard = makeGuard([&] { opCtx->recoveryUnit()->abandonSnapshot(); });
+ ScopeGuard scopeGuard([&] { opCtx->recoveryUnit()->abandonSnapshot(); });
if (opCtx->recoveryUnit()->isActive()) {
scopeGuard.dismiss();
}
diff --git a/src/mongo/db/s/resharding/resharding_oplog_application.cpp b/src/mongo/db/s/resharding/resharding_oplog_application.cpp
index 5a71b14a7da..f704bb10181 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_application.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_application.cpp
@@ -76,7 +76,7 @@ void runWithTransaction(OperationContext* opCtx, unique_function<void(OperationC
MongoDOperationContextSession ocs(asr.opCtx());
auto txnParticipant = TransactionParticipant::get(asr.opCtx());
- auto guard = makeGuard([opCtx = asr.opCtx(), &txnParticipant] {
+ ScopeGuard guard([opCtx = asr.opCtx(), &txnParticipant] {
try {
txnParticipant.abortTransaction(opCtx);
} catch (DBException& e) {
diff --git a/src/mongo/db/s/resharding/resharding_service_test_helpers.cpp b/src/mongo/db/s/resharding/resharding_service_test_helpers.cpp
index 01b800167e6..865042348df 100644
--- a/src/mongo/db/s/resharding/resharding_service_test_helpers.cpp
+++ b/src/mongo/db/s/resharding/resharding_service_test_helpers.cpp
@@ -67,7 +67,7 @@ template <class StateEnum>
void StateTransitionController<StateEnum>::_notifyNewStateAndWaitUntilUnpaused(
OperationContext* opCtx, StateEnum newState) {
stdx::unique_lock lk(_mutex);
- auto guard = makeGuard([this, prevState = _state] { _state = prevState; });
+ ScopeGuard guard([this, prevState = _state] { _state = prevState; });
_state = newState;
_waitUntilUnpausedCond.notify_all();
opCtx->waitForConditionOrInterrupt(_pauseDuringTransitionCond, lk, [this, newState] {
diff --git a/src/mongo/db/s/resharding/resharding_txn_cloner.cpp b/src/mongo/db/s/resharding/resharding_txn_cloner.cpp
index 2ffa4e40ab0..d4b2cc88bee 100644
--- a/src/mongo/db/s/resharding/resharding_txn_cloner.cpp
+++ b/src/mongo/db/s/resharding/resharding_txn_cloner.cpp
@@ -220,7 +220,7 @@ SemiFuture<void> ReshardingTxnCloner::run(
// due to a prepared transaction having been in progress.
if (!chainCtx->donorRecord) {
auto opCtx = factory.makeOperationContext(&cc());
- auto guard = makeGuard([&] {
+ ScopeGuard guard([&] {
chainCtx->pipeline->dispose(opCtx.get());
chainCtx->pipeline.reset();
});
diff --git a/src/mongo/db/s/transaction_coordinator_service_test.cpp b/src/mongo/db/s/transaction_coordinator_service_test.cpp
index 06086fa3f4a..c63753bd3b2 100644
--- a/src/mongo/db/s/transaction_coordinator_service_test.cpp
+++ b/src/mongo/db/s/transaction_coordinator_service_test.cpp
@@ -213,7 +213,7 @@ TEST_F(TransactionCoordinatorServiceStepUpStepDownTest, OperationsFailBeforeStep
TEST_F(TransactionCoordinatorServiceStepUpStepDownTest, OperationsBlockBeforeStepUpCompletes) {
service()->onStepUp(operationContext(), Milliseconds(1));
- auto stepDownGuard = makeGuard([&] { service()->onStepDown(); });
+ ScopeGuard stepDownGuard([&] { service()->onStepDown(); });
ASSERT_THROWS_CODE(operationContext()->runWithDeadline(
Date_t::now() + Milliseconds{5},
@@ -246,7 +246,7 @@ TEST_F(TransactionCoordinatorServiceStepUpStepDownTest, StepUpFailsDueToBadCoord
ASSERT_EQ(1, response["n"].Int());
service()->onStepUp(operationContext());
- auto stepDownGuard = makeGuard([&] { service()->onStepDown(); });
+ ScopeGuard stepDownGuard([&] { service()->onStepDown(); });
ASSERT_THROWS_CODE(service()->coordinateCommit(
operationContext(), makeLogicalSessionIdForTest(), 0, kTwoShardIdSet),
diff --git a/src/mongo/db/server_options_test.cpp b/src/mongo/db/server_options_test.cpp
index 5cc26ccd3c1..55e67ba6d15 100644
--- a/src/mongo/db/server_options_test.cpp
+++ b/src/mongo/db/server_options_test.cpp
@@ -723,10 +723,10 @@ TEST(SetupOptions, DeepCwd) {
sb << "/tmp/deepcwd-" << getpid();
boost::filesystem::path deepBaseDir = sb.str();
- auto cleanup = ::mongo::makeGuard([&] {
+ ::mongo::ScopeGuard cleanup = [&] {
boost::filesystem::current_path(cwd, ec);
boost::filesystem::remove_all(deepBaseDir, ec);
- });
+ };
// Clear out any old base dir, and create an empty dir.
boost::filesystem::remove_all(deepBaseDir, ec);
@@ -780,12 +780,12 @@ TEST(SetupOptions, UnlinkedCwd) {
std::string unlinkDir;
- auto cleanup = ::mongo::makeGuard([&] {
+ ::mongo::ScopeGuard cleanup = [&] {
boost::filesystem::current_path(cwd, ec);
if (!unlinkDir.empty()) {
boost::filesystem::remove(cwd / unlinkDir, ec);
}
- });
+ };
// mkdir our own unlink dir
unsigned int i = 0;
diff --git a/src/mongo/db/service_context.cpp b/src/mongo/db/service_context.cpp
index eb3901c01c1..551bc7cdf34 100644
--- a/src/mongo/db/service_context.cpp
+++ b/src/mongo/db/service_context.cpp
@@ -242,14 +242,14 @@ ServiceContext::UniqueOperationContext ServiceContext::makeOperationContext(Clie
_numCurrentOps.addAndFetch(1);
}
- auto numOpsGuard = makeGuard([&] {
+ ScopeGuard numOpsGuard([&] {
if (client->session()) {
_numCurrentOps.subtractAndFetch(1);
}
});
onCreate(opCtx.get(), _clientObservers);
- auto onCreateGuard = makeGuard([&] { onDestroy(opCtx.get(), _clientObservers); });
+ ScopeGuard onCreateGuard([&] { onDestroy(opCtx.get(), _clientObservers); });
if (!opCtx->lockState()) {
opCtx->setLockState(std::make_unique<LockerNoop>());
@@ -265,7 +265,7 @@ ServiceContext::UniqueOperationContext ServiceContext::makeOperationContext(Clie
makeBaton(opCtx.get());
}
- auto batonGuard = makeGuard([&] { opCtx->getBaton()->detach(); });
+ ScopeGuard batonGuard([&] { opCtx->getBaton()->detach(); });
{
stdx::lock_guard<Client> lk(*client);
diff --git a/src/mongo/db/service_entry_point_common.cpp b/src/mongo/db/service_entry_point_common.cpp
index 64cf8a9ff1b..aec93184463 100644
--- a/src/mongo/db/service_entry_point_common.cpp
+++ b/src/mongo/db/service_entry_point_common.cpp
@@ -898,7 +898,7 @@ void CheckoutSessionAndInvokeCommand::_checkOutSession() {
// transactions on failure to unstash the transaction resources to opCtx. We don't want to
// have this error guard for beginOrContinue as it can abort the transaction for any
// accidental invalid statements in the transaction.
- auto abortOnError = makeGuard([&] {
+ ScopeGuard abortOnError([&] {
if (_txnParticipant->transactionIsInProgress()) {
_txnParticipant->abortTransaction(opCtx);
}
diff --git a/src/mongo/db/sessions_collection_rs.cpp b/src/mongo/db/sessions_collection_rs.cpp
index fc7f8977717..8d53352c455 100644
--- a/src/mongo/db/sessions_collection_rs.cpp
+++ b/src/mongo/db/sessions_collection_rs.cpp
@@ -107,7 +107,7 @@ auto SessionsCollectionRS::_dispatch(const NamespaceString& ns,
auto conn = _makePrimaryConnection(opCtx);
DBClientBase* client = conn->get();
- auto guard = makeGuard([&] { conn->done(); });
+ ScopeGuard guard([&] { conn->done(); });
try {
return std::forward<RemoteCallback>(remoteCallback)(client);
} catch (...) {
diff --git a/src/mongo/db/startup_recovery.cpp b/src/mongo/db/startup_recovery.cpp
index 655af5e36dd..179d4f35be3 100644
--- a/src/mongo/db/startup_recovery.cpp
+++ b/src/mongo/db/startup_recovery.cpp
@@ -160,7 +160,7 @@ bool checkIdIndexExists(OperationContext* opCtx, const CollectionPtr& coll) {
Status buildMissingIdIndex(OperationContext* opCtx, Collection* collection) {
LOGV2(4805002, "Building missing _id index", logAttrs(*collection));
MultiIndexBlock indexer;
- auto abortOnExit = makeGuard([&] {
+ ScopeGuard abortOnExit([&] {
CollectionWriter collWriter(collection);
indexer.abortIndexBuild(opCtx, collWriter, MultiIndexBlock::kNoopOnCleanUpFn);
});
@@ -465,7 +465,7 @@ void startupRepair(OperationContext* opCtx, StorageEngine* storageEngine) {
// document.
// If we fail to load the FCV document due to upgrade problems, we need to abort the repair in
// order to allow downgrading to older binary versions.
- auto abortRepairOnFCVErrors = makeGuard(
+ ScopeGuard abortRepairOnFCVErrors(
[&] { StorageRepairObserver::get(opCtx->getServiceContext())->onRepairDone(opCtx); });
if (auto fcvColl = CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(
opCtx, NamespaceString::kServerConfigurationNamespace)) {
diff --git a/src/mongo/db/startup_warnings_common.cpp b/src/mongo/db/startup_warnings_common.cpp
index aa7dafc5cf8..947f6feab64 100644
--- a/src/mongo/db/startup_warnings_common.cpp
+++ b/src/mongo/db/startup_warnings_common.cpp
@@ -63,7 +63,7 @@ bool CheckPrivilegeEnabled(const wchar_t* name) {
return false;
}
- const auto accessTokenGuard = makeGuard([&] { CloseHandle(accessToken); });
+ const ScopeGuard accessTokenGuard([&] { CloseHandle(accessToken); });
BOOL ret;
PRIVILEGE_SET privileges;
diff --git a/src/mongo/db/stats/operation_latency_histogram_test.cpp b/src/mongo/db/stats/operation_latency_histogram_test.cpp
index c1cf84e082a..046def97078 100644
--- a/src/mongo/db/stats/operation_latency_histogram_test.cpp
+++ b/src/mongo/db/stats/operation_latency_histogram_test.cpp
@@ -106,7 +106,7 @@ TEST(OperationLatencyHistogram, CheckBucketCountsAndTotalLatencySlowBuckets) {
auto orig = serverGlobalParams.slowMS;
serverGlobalParams.slowMS = 100;
- auto g1 = makeGuard([orig] { serverGlobalParams.slowMS = orig; });
+ ScopeGuard g1 = [orig] { serverGlobalParams.slowMS = orig; };
// The additional +1 because of the first boundary.
uint64_t expectedSum = 3 * std::accumulate(kLowerBounds.begin(), kLowerBounds.end(), 0ULL) + 1;
diff --git a/src/mongo/db/storage/storage_engine_impl.cpp b/src/mongo/db/storage/storage_engine_impl.cpp
index edebfe78423..d8e990925b5 100644
--- a/src/mongo/db/storage/storage_engine_impl.cpp
+++ b/src/mongo/db/storage/storage_engine_impl.cpp
@@ -799,7 +799,7 @@ Status StorageEngineImpl::_dropCollectionsNoTimestamp(OperationContext* opCtx,
}
// Ensure the method exits with the same "commit timestamp" state that it was called with.
- auto addCommitTimestamp = makeGuard([&opCtx, commitTs] {
+ ScopeGuard addCommitTimestamp([&opCtx, commitTs] {
if (!commitTs.isNull()) {
opCtx->recoveryUnit()->setCommitTimestamp(commitTs);
}
diff --git a/src/mongo/db/storage/storage_engine_init.cpp b/src/mongo/db/storage/storage_engine_init.cpp
index 01f0278f175..d94179be8e4 100644
--- a/src/mongo/db/storage/storage_engine_init.cpp
+++ b/src/mongo/db/storage/storage_engine_init.cpp
@@ -149,7 +149,7 @@ StorageEngine::LastShutdownState initializeStorageEngine(OperationContext* opCtx
uassertStatusOK(factory->validateMetadata(*metadata, storageGlobalParams));
}
- auto guard = makeGuard([&] {
+ ScopeGuard guard([&] {
auto& lockFile = StorageEngineLockFile::get(service);
if (lockFile) {
lockFile->close();
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
index 449fbc4bb55..f53d39bf0e8 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
@@ -1210,7 +1210,7 @@ WiredTigerKVEngine::beginNonBlockingBackup(OperationContext* opCtx,
// Oplog truncation thread won't remove oplog since the checkpoint pinned by the backup cursor.
stdx::lock_guard<Latch> lock(_oplogPinnedByBackupMutex);
_oplogPinnedByBackup = Timestamp(_oplogNeededForCrashRecovery.load());
- auto pinOplogGuard = makeGuard([&] { _oplogPinnedByBackup = boost::none; });
+ ScopeGuard pinOplogGuard([&] { _oplogPinnedByBackup = boost::none; });
// Persist the sizeStorer information to disk before opening the backup cursor. We aren't
// guaranteed to have the most up-to-date size information after the backup as writes can still
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp
index 7c68769472e..332f3bb0519 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp
@@ -148,7 +148,7 @@ void WiredTigerOplogManager::waitForAllEarlierOplogWritesToBeVisible(
// this wait excessively.
++_opsWaitingForOplogVisibilityUpdate;
invariant(_opsWaitingForOplogVisibilityUpdate > 0);
- auto exitGuard = makeGuard([&] { --_opsWaitingForOplogVisibilityUpdate; });
+ ScopeGuard exitGuard([&] { --_opsWaitingForOplogVisibilityUpdate; });
// Out of order writes to the oplog always call triggerOplogVisibilityUpdate() on commit to
// prompt the OplogVisibilityThread to run and update the oplog visibility. We simply need to
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp
index 3ef3a2fce2a..9376542bfa2 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp
@@ -227,8 +227,9 @@ StatusWith<std::string> WiredTigerUtil::getMetadataCreate(OperationContext* opCt
LOGV2_FATAL_NOTRACE(51257, "Cursor not found", "error"_attr = ex);
}
invariant(cursor);
- auto releaser = makeGuard(
- [&] { session->releaseCursor(WiredTigerSession::kMetadataCreateTableId, cursor, ""); });
+ ScopeGuard releaser = [&] {
+ session->releaseCursor(WiredTigerSession::kMetadataCreateTableId, cursor, "");
+ };
return _getMetadata(cursor, uri);
}
@@ -257,8 +258,9 @@ StatusWith<std::string> WiredTigerUtil::getMetadata(OperationContext* opCtx, Str
LOGV2_FATAL_NOTRACE(31293, "Cursor not found", "error"_attr = ex);
}
invariant(cursor);
- auto releaser =
- makeGuard([&] { session->releaseCursor(WiredTigerSession::kMetadataTableId, cursor, ""); });
+ ScopeGuard releaser = [&] {
+ session->releaseCursor(WiredTigerSession::kMetadataTableId, cursor, "");
+ };
return _getMetadata(cursor, uri);
}
diff --git a/src/mongo/db/traffic_reader.cpp b/src/mongo/db/traffic_reader.cpp
index b6de5022d0b..d6b5bc27a8f 100644
--- a/src/mongo/db/traffic_reader.cpp
+++ b/src/mongo/db/traffic_reader.cpp
@@ -217,7 +217,7 @@ BSONArray trafficRecordingFileToBSONArr(const std::string& inputFile) {
str::stream() << "Specified file does not exist (" << inputFile << ")",
inputFd > 0);
- const auto guard = makeGuard([&] { ::close(inputFd); });
+ const ScopeGuard guard([&] { ::close(inputFd); });
auto buf = SharedBuffer::allocate(MaxMessageSizeBytes);
while (auto packet = readPacket(buf.get(), inputFd)) {
diff --git a/src/mongo/db/transaction_participant.cpp b/src/mongo/db/transaction_participant.cpp
index c588ec4d22a..9c018294513 100644
--- a/src/mongo/db/transaction_participant.cpp
+++ b/src/mongo/db/transaction_participant.cpp
@@ -826,7 +826,7 @@ TransactionParticipant::TxnResources::~TxnResources() {
void TransactionParticipant::TxnResources::release(OperationContext* opCtx) {
// Perform operations that can fail the release before marking the TxnResources as released.
- auto onError = makeGuard([&] {
+ ScopeGuard onError([&] {
// Release any locks acquired as part of lock restoration.
if (_lockSnapshot) {
// WUOW should be released before unlocking.
@@ -1002,7 +1002,7 @@ void TransactionParticipant::Participant::_releaseTransactionResourcesToOpCtx(
}
();
- auto releaseOnError = makeGuard([&] {
+ ScopeGuard releaseOnError([&] {
// Restore the lock resources back to transaction participant.
using std::swap;
stdx::lock_guard<Client> lk(*opCtx->getClient());
@@ -1165,7 +1165,7 @@ void TransactionParticipant::Participant::refreshLocksForPreparedTransaction(
Timestamp TransactionParticipant::Participant::prepareTransaction(
OperationContext* opCtx, boost::optional<repl::OpTime> prepareOptime) {
- auto abortGuard = makeGuard([&] {
+ ScopeGuard abortGuard([&] {
// Prepare transaction on secondaries should always succeed.
invariant(!prepareOptime);
@@ -1415,7 +1415,7 @@ void TransactionParticipant::Participant::commitPreparedTransaction(
// Prepared transactions cannot hold the RSTL, or else they will deadlock with state
// transitions. If we do not commit the transaction we must unlock the RSTL explicitly so two
// phase locking doesn't hold onto it.
- auto unlockGuard = makeGuard([&] { invariant(opCtx->lockState()->unlockRSTLforPrepare()); });
+ ScopeGuard unlockGuard([&] { invariant(opCtx->lockState()->unlockRSTLforPrepare()); });
const auto replCoord = repl::ReplicationCoordinator::get(opCtx);
diff --git a/src/mongo/db/transaction_participant_test.cpp b/src/mongo/db/transaction_participant_test.cpp
index ea5f53ca6bb..d7567b1b003 100644
--- a/src/mongo/db/transaction_participant_test.cpp
+++ b/src/mongo/db/transaction_participant_test.cpp
@@ -1310,7 +1310,7 @@ TEST_F(TxnParticipantTest, CannotStartNewTransactionWhilePreparedTransactionInPr
txnParticipant.stashTransactionResources(opCtx());
OperationContextSession::checkIn(opCtx());
{
- auto guard = makeGuard([&]() { OperationContextSession::checkOut(opCtx()); });
+ ScopeGuard guard([&]() { OperationContextSession::checkOut(opCtx()); });
// Try to start a new transaction while there is already a prepared transaction on the
// session. This should fail with a PreparedTransactionInProgress error.
runFunctionFromDifferentOpCtx([lsid = *opCtx()->getLogicalSessionId(),
diff --git a/src/mongo/db/views/view_graph.cpp b/src/mongo/db/views/view_graph.cpp
index def5d50154d..4282a0469a4 100644
--- a/src/mongo/db/views/view_graph.cpp
+++ b/src/mongo/db/views/view_graph.cpp
@@ -65,7 +65,7 @@ Status ViewGraph::insertAndValidate(const ViewDefinition& view,
// If the graph fails validation for any reason, the insert is automatically rolled back on
// exiting this method.
- auto guard = makeGuard([&] { remove(viewNss); });
+ ScopeGuard guard([&] { remove(viewNss); });
// Check for cycles and get the height of the children.
StatsMap statsMap;