summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/mongo/base/secure_allocator.cpp2
-rw-r--r--src/mongo/bson/bsonobjbuilder.h2
-rw-r--r--src/mongo/client/dbclient_connection.cpp6
-rw-r--r--src/mongo/client/fetcher.cpp2
-rw-r--r--src/mongo/db/catalog/catalog_control.cpp2
-rw-r--r--src/mongo/db/catalog/database_holder_impl.cpp2
-rw-r--r--src/mongo/db/catalog/drop_database.cpp8
-rw-r--r--src/mongo/db/catalog/multi_index_block.cpp4
-rw-r--r--src/mongo/db/catalog/rename_collection.cpp2
-rw-r--r--src/mongo/db/client_out_of_line_executor.cpp2
-rw-r--r--src/mongo/db/commands/drop_indexes.cpp2
-rw-r--r--src/mongo/db/commands/getmore_cmd.cpp2
-rw-r--r--src/mongo/db/commands/run_aggregate.cpp2
-rw-r--r--src/mongo/db/commands/user_management_commands.cpp6
-rw-r--r--src/mongo/db/commands/write_commands.cpp2
-rw-r--r--src/mongo/db/concurrency/d_concurrency.cpp4
-rw-r--r--src/mongo/db/concurrency/d_concurrency_test.cpp2
-rw-r--r--src/mongo/db/concurrency/lock_state.cpp6
-rw-r--r--src/mongo/db/default_baton.cpp2
-rw-r--r--src/mongo/db/exec/delete.cpp2
-rw-r--r--src/mongo/db/exec/js_function.cpp4
-rw-r--r--src/mongo/db/exec/update_stage.cpp2
-rw-r--r--src/mongo/db/field_parser.h2
-rw-r--r--src/mongo/db/index/index_build_interceptor.cpp2
-rw-r--r--src/mongo/db/index_builds_coordinator.cpp6
-rw-r--r--src/mongo/db/index_builds_coordinator_mongod.cpp4
-rw-r--r--src/mongo/db/logical_session_cache_impl.cpp6
-rw-r--r--src/mongo/db/repl/collection_bulk_loader_impl.cpp2
-rw-r--r--src/mongo/db/repl/initial_syncer.cpp2
-rw-r--r--src/mongo/db/repl/oplog.cpp2
-rw-r--r--src/mongo/db/repl/repl_set_commands.cpp2
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp11
-rw-r--r--src/mongo/db/repl/scatter_gather_runner.cpp2
-rw-r--r--src/mongo/db/repl/tenant_migration_recipient_service_test.cpp2
-rw-r--r--src/mongo/db/repl/topology_version_observer.cpp2
-rw-r--r--src/mongo/db/repl/transaction_oplog_application.cpp2
-rw-r--r--src/mongo/db/repl/vote_requester.cpp2
-rw-r--r--src/mongo/db/s/balancer/migration_manager.cpp4
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager.cpp2
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp2
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp2
-rw-r--r--src/mongo/db/s/dist_lock_manager.cpp2
-rw-r--r--src/mongo/db/s/dist_lock_manager_replset.cpp2
-rw-r--r--src/mongo/db/s/migration_destination_manager.cpp4
-rw-r--r--src/mongo/db/s/migration_source_manager.cpp10
-rw-r--r--src/mongo/db/s/move_primary_source_manager.cpp6
-rw-r--r--src/mongo/db/s/resharding/resharding_collection_cloner.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_donor_oplog_iterator.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_op_observer.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_application.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_service_test_helpers.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_txn_cloner.cpp2
-rw-r--r--src/mongo/db/s/transaction_coordinator_service_test.cpp4
-rw-r--r--src/mongo/db/server_options_test.cpp8
-rw-r--r--src/mongo/db/service_context.cpp6
-rw-r--r--src/mongo/db/service_entry_point_common.cpp2
-rw-r--r--src/mongo/db/sessions_collection_rs.cpp2
-rw-r--r--src/mongo/db/startup_recovery.cpp4
-rw-r--r--src/mongo/db/startup_warnings_common.cpp2
-rw-r--r--src/mongo/db/stats/operation_latency_histogram_test.cpp2
-rw-r--r--src/mongo/db/storage/storage_engine_impl.cpp2
-rw-r--r--src/mongo/db/storage/storage_engine_init.cpp2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp10
-rw-r--r--src/mongo/db/traffic_reader.cpp2
-rw-r--r--src/mongo/db/transaction_participant.cpp8
-rw-r--r--src/mongo/db/transaction_participant_test.cpp2
-rw-r--r--src/mongo/db/views/view_graph.cpp2
-rw-r--r--src/mongo/dbtests/dbtests.cpp2
-rw-r--r--src/mongo/dbtests/indexupdatetests.cpp8
-rw-r--r--src/mongo/dbtests/mock/mock_dbclient_connection.cpp4
-rw-r--r--src/mongo/dbtests/querytests.cpp2
-rw-r--r--src/mongo/dbtests/storage_timestamp_tests.cpp10
-rw-r--r--src/mongo/dbtests/validate_tests.cpp74
-rw-r--r--src/mongo/dbtests/wildcard_multikey_persistence_test.cpp2
-rw-r--r--src/mongo/embedded/embedded.cpp4
-rw-r--r--src/mongo/embedded/mongo_embedded/mongo_embedded_test.cpp2
-rw-r--r--src/mongo/embedded/stitch_support/stitch_support_test.cpp1
-rw-r--r--src/mongo/executor/connection_pool_test.cpp4
-rw-r--r--src/mongo/executor/network_interface_perf_test.cpp2
-rw-r--r--src/mongo/executor/network_interface_thread_pool.cpp4
-rw-r--r--src/mongo/installer/msi/ca/CustomAction.cpp2
-rw-r--r--src/mongo/logv2/log_manager.cpp2
-rw-r--r--src/mongo/logv2/logv2_test.cpp2
-rw-r--r--src/mongo/rpc/metadata/client_metadata_test.cpp2
-rw-r--r--src/mongo/s/commands/strategy.cpp2
-rw-r--r--src/mongo/s/query/cluster_find.cpp2
-rw-r--r--src/mongo/s/transaction_router.cpp4
-rw-r--r--src/mongo/scripting/mozjs/implscope.cpp6
-rw-r--r--src/mongo/scripting/mozjs/proxyscope.cpp4
-rw-r--r--src/mongo/shell/encrypted_dbclient_base.cpp2
-rw-r--r--src/mongo/shell/mongo_main.cpp2
-rw-r--r--src/mongo/tools/bridge.cpp2
-rw-r--r--src/mongo/transport/baton_asio_linux.h2
-rw-r--r--src/mongo/transport/service_executor_reserved.cpp2
-rw-r--r--src/mongo/transport/service_executor_test.cpp2
-rw-r--r--src/mongo/transport/service_executor_utils.cpp2
-rw-r--r--src/mongo/transport/transport_layer_asio_integration_test.cpp8
-rw-r--r--src/mongo/unittest/death_test.cpp6
-rw-r--r--src/mongo/unittest/thread_assertion_monitor.h2
-rw-r--r--src/mongo/util/decoration_registry.h4
-rw-r--r--src/mongo/util/fail_point_test.cpp6
-rw-r--r--src/mongo/util/log_with_sampling_test.cpp2
-rw-r--r--src/mongo/util/net/hostname_canonicalization.cpp2
-rw-r--r--src/mongo/util/net/http_client_winhttp.cpp2
-rw-r--r--src/mongo/util/net/openssl_init.cpp2
-rw-r--r--src/mongo/util/net/ssl_manager_openssl.cpp4
-rw-r--r--src/mongo/util/options_parser/options_parser.cpp4
-rw-r--r--src/mongo/util/perfctr_collect.cpp2
-rw-r--r--src/mongo/util/procparser.cpp2
-rw-r--r--src/mongo/util/producer_consumer_queue.h4
-rw-r--r--src/mongo/util/stacktrace_threads.cpp2
113 files changed, 219 insertions, 217 deletions
diff --git a/src/mongo/base/secure_allocator.cpp b/src/mongo/base/secure_allocator.cpp
index fca3e4ab82c..f59aee7b96c 100644
--- a/src/mongo/base/secure_allocator.cpp
+++ b/src/mongo/base/secure_allocator.cpp
@@ -86,7 +86,7 @@ void EnablePrivilege(const wchar_t* name) {
return;
}
- const auto accessTokenGuard = makeGuard([&] { CloseHandle(accessToken); });
+ const ScopeGuard accessTokenGuard([&] { CloseHandle(accessToken); });
TOKEN_PRIVILEGES privileges = {0};
diff --git a/src/mongo/bson/bsonobjbuilder.h b/src/mongo/bson/bsonobjbuilder.h
index 50ee973d931..3a527c72850 100644
--- a/src/mongo/bson/bsonobjbuilder.h
+++ b/src/mongo/bson/bsonobjbuilder.h
@@ -585,7 +585,7 @@ public:
// None of the code which resets this builder to the not-done state is expected to throw.
// If it does, that would be a violation of our expectations.
- auto resetObjectState = makeGuard([this]() noexcept {
+ ScopeGuard resetObjectState([this]() noexcept {
// Immediately after the buffer for the ephemeral space created by the call to `_done()`
// is ready, reset our state to not-done.
_doneCalled = false;
diff --git a/src/mongo/client/dbclient_connection.cpp b/src/mongo/client/dbclient_connection.cpp
index 9770437645b..48b7977992c 100644
--- a/src/mongo/client/dbclient_connection.cpp
+++ b/src/mongo/client/dbclient_connection.cpp
@@ -704,7 +704,7 @@ DBClientConnection::DBClientConnection(bool _autoReconnect,
void DBClientConnection::say(Message& toSend, bool isRetry, string* actualServer) {
checkConnection();
- auto killSessionOnError = makeGuard([this] { _markFailed(kEndSession); });
+ ScopeGuard killSessionOnError([this] { _markFailed(kEndSession); });
toSend.header().setId(nextMessageId());
toSend.header().setResponseToMsgId(0);
@@ -723,7 +723,7 @@ void DBClientConnection::say(Message& toSend, bool isRetry, string* actualServer
}
Status DBClientConnection::recv(Message& m, int lastRequestId) {
- auto killSessionOnError = makeGuard([this] { _markFailed(kEndSession); });
+ ScopeGuard killSessionOnError([this] { _markFailed(kEndSession); });
auto swm = _session->sourceMessage();
if (!swm.isOK()) {
return swm.getStatus();
@@ -747,7 +747,7 @@ bool DBClientConnection::call(Message& toSend,
bool assertOk,
string* actualServer) {
checkConnection();
- auto killSessionOnError = makeGuard([this] { _markFailed(kEndSession); });
+ ScopeGuard killSessionOnError([this] { _markFailed(kEndSession); });
auto maybeThrow = [&](const auto& errStatus) {
if (assertOk)
uassertStatusOKWithContext(errStatus,
diff --git a/src/mongo/client/fetcher.cpp b/src/mongo/client/fetcher.cpp
index 4dc580c5d12..75cbf7f1d0d 100644
--- a/src/mongo/client/fetcher.cpp
+++ b/src/mongo/client/fetcher.cpp
@@ -339,7 +339,7 @@ Status Fetcher::_scheduleGetMore(const BSONObj& cmdObj) {
void Fetcher::_callback(const RemoteCommandCallbackArgs& rcbd, const char* batchFieldName) {
QueryResponse batchData;
- auto finishCallbackGuard = makeGuard([this, &batchData] {
+ ScopeGuard finishCallbackGuard([this, &batchData] {
if (batchData.cursorId && !batchData.nss.isEmpty()) {
_sendKillCursors(batchData.cursorId, batchData.nss);
}
diff --git a/src/mongo/db/catalog/catalog_control.cpp b/src/mongo/db/catalog/catalog_control.cpp
index c5c1bb95f63..2df8a69f650 100644
--- a/src/mongo/db/catalog/catalog_control.cpp
+++ b/src/mongo/db/catalog/catalog_control.cpp
@@ -82,7 +82,7 @@ MinVisibleTimestampMap closeCatalog(OperationContext* opCtx) {
}
// Need to mark the CollectionCatalog as open if we our closeAll fails, dismissed if successful.
- auto reopenOnFailure = makeGuard([opCtx] {
+ ScopeGuard reopenOnFailure([opCtx] {
CollectionCatalog::write(opCtx,
[&](CollectionCatalog& catalog) { catalog.onOpenCatalog(opCtx); });
});
diff --git a/src/mongo/db/catalog/database_holder_impl.cpp b/src/mongo/db/catalog/database_holder_impl.cpp
index b3b81174a77..35fa81a7e3e 100644
--- a/src/mongo/db/catalog/database_holder_impl.cpp
+++ b/src/mongo/db/catalog/database_holder_impl.cpp
@@ -137,7 +137,7 @@ Database* DatabaseHolderImpl::openDb(OperationContext* opCtx, StringData ns, boo
return db;
// We've inserted a nullptr entry for dbname: make sure to remove it on unsuccessful exit.
- auto removeDbGuard = makeGuard([this, &lk, dbname] {
+ ScopeGuard removeDbGuard([this, &lk, dbname] {
if (!lk.owns_lock())
lk.lock();
_dbs.erase(dbname);
diff --git a/src/mongo/db/catalog/drop_database.cpp b/src/mongo/db/catalog/drop_database.cpp
index 953b19e599a..a75e574a8b6 100644
--- a/src/mongo/db/catalog/drop_database.cpp
+++ b/src/mongo/db/catalog/drop_database.cpp
@@ -93,7 +93,7 @@ void _finishDropDatabase(OperationContext* opCtx,
invariant(opCtx->lockState()->isDbLockedForMode(dbName, MODE_X));
// If DatabaseHolder::dropDb() fails, we should reset the drop-pending state on Database.
- auto dropPendingGuard = makeGuard([db, opCtx] { db->setDropPending(opCtx, false); });
+ ScopeGuard dropPendingGuard([db, opCtx] { db->setDropPending(opCtx, false); });
if (!abortIndexBuilds) {
IndexBuildsCoordinator::get(opCtx)->assertNoBgOpInProgForDb(dbName);
@@ -170,7 +170,7 @@ Status _dropDatabase(OperationContext* opCtx, const std::string& dbName, bool ab
// If Database::dropCollectionEventIfSystem() fails, we should reset the drop-pending state
// on Database.
- auto dropPendingGuard = makeGuard([&db, opCtx] { db->setDropPending(opCtx, false); });
+ ScopeGuard dropPendingGuard([&db, opCtx] { db->setDropPending(opCtx, false); });
auto indexBuildsCoord = IndexBuildsCoordinator::get(opCtx);
if (abortIndexBuilds) {
@@ -180,7 +180,7 @@ Status _dropDatabase(OperationContext* opCtx, const std::string& dbName, bool ab
// Create a scope guard to reset the drop-pending state on the database to false if
// there is a replica state change that kills this operation while the locks were
// yielded.
- auto dropPendingGuardWhileUnlocked = makeGuard([dbName, opCtx, &dropPendingGuard] {
+ ScopeGuard dropPendingGuardWhileUnlocked([dbName, opCtx, &dropPendingGuard] {
UninterruptibleLockGuard noInterrupt(opCtx->lockState());
AutoGetDb autoDB(opCtx, dbName, MODE_IX);
if (auto db = autoDB.getDb()) {
@@ -296,7 +296,7 @@ Status _dropDatabase(OperationContext* opCtx, const std::string& dbName, bool ab
// Create a scope guard to reset the drop-pending state on the Database to false if there are
// any errors while we await the replication of any collection drops and then reacquire the
// locks (which can throw) needed to finish the drop database.
- auto dropPendingGuardWhileUnlocked = makeGuard([dbName, opCtx] {
+ ScopeGuard dropPendingGuardWhileUnlocked([dbName, opCtx] {
UninterruptibleLockGuard noInterrupt(opCtx->lockState());
AutoGetDb autoDB(opCtx, dbName, MODE_IX);
if (auto db = autoDB.getDb()) {
diff --git a/src/mongo/db/catalog/multi_index_block.cpp b/src/mongo/db/catalog/multi_index_block.cpp
index 5b99bf84e1e..137793a4c42 100644
--- a/src/mongo/db/catalog/multi_index_block.cpp
+++ b/src/mongo/db/catalog/multi_index_block.cpp
@@ -365,7 +365,7 @@ Status MultiIndexBlock::insertAllDocumentsInCollection(
// Refrain from persisting any multikey updates as a result from building the index. Instead,
// accumulate them in the `MultikeyPathTracker` and do the write as part of the update that
// commits the index.
- auto stopTracker = makeGuard(
+ ScopeGuard stopTracker(
[this, opCtx] { MultikeyPathTracker::get(opCtx).stopTrackingMultikeyPathInfo(); });
if (MultikeyPathTracker::get(opCtx).isTrackingMultikeyPathInfo()) {
stopTracker.dismiss();
@@ -846,7 +846,7 @@ Status MultiIndexBlock::commit(OperationContext* opCtx,
}
// Do not interfere with writing multikey information when committing index builds.
- auto restartTracker = makeGuard(
+ ScopeGuard restartTracker(
[this, opCtx] { MultikeyPathTracker::get(opCtx).startTrackingMultikeyPathInfo(); });
if (!MultikeyPathTracker::get(opCtx).isTrackingMultikeyPathInfo()) {
restartTracker.dismiss();
diff --git a/src/mongo/db/catalog/rename_collection.cpp b/src/mongo/db/catalog/rename_collection.cpp
index 06f1fa8bc35..dddf2f4ffb2 100644
--- a/src/mongo/db/catalog/rename_collection.cpp
+++ b/src/mongo/db/catalog/rename_collection.cpp
@@ -573,7 +573,7 @@ Status renameBetweenDBs(OperationContext* opCtx,
}
// Dismissed on success
- auto tmpCollectionDropper = makeGuard([&] {
+ ScopeGuard tmpCollectionDropper([&] {
Status status = Status::OK();
try {
status = dropCollectionForApplyOps(
diff --git a/src/mongo/db/client_out_of_line_executor.cpp b/src/mongo/db/client_out_of_line_executor.cpp
index f82612266ba..785b090043b 100644
--- a/src/mongo/db/client_out_of_line_executor.cpp
+++ b/src/mongo/db/client_out_of_line_executor.cpp
@@ -113,7 +113,7 @@ void ClientOutOfLineExecutor::consumeAllTasks() noexcept {
}
void ClientOutOfLineExecutor::QueueHandle::schedule(Task&& task) {
- auto guard = makeGuard(
+ ScopeGuard guard(
[&task] { task(Status(ErrorCodes::CallbackCanceled, "Client no longer exists")); });
if (auto queue = _weakQueue.lock()) {
diff --git a/src/mongo/db/commands/drop_indexes.cpp b/src/mongo/db/commands/drop_indexes.cpp
index 781e71259e6..dfbf68fa9c8 100644
--- a/src/mongo/db/commands/drop_indexes.cpp
+++ b/src/mongo/db/commands/drop_indexes.cpp
@@ -232,7 +232,7 @@ public:
});
// The 'indexer' can throw, so ensure build cleanup occurs.
- auto abortOnExit = makeGuard([&] {
+ ScopeGuard abortOnExit([&] {
indexer->abortIndexBuild(opCtx, collection, MultiIndexBlock::kNoopOnCleanUpFn);
});
diff --git a/src/mongo/db/commands/getmore_cmd.cpp b/src/mongo/db/commands/getmore_cmd.cpp
index 9268249469a..75fba16a90b 100644
--- a/src/mongo/db/commands/getmore_cmd.cpp
+++ b/src/mongo/db/commands/getmore_cmd.cpp
@@ -512,7 +512,7 @@ public:
}
// On early return, get rid of the cursor.
- auto cursorFreer = makeGuard([&] { cursorPin.deleteUnderlying(); });
+ ScopeGuard cursorFreer([&] { cursorPin.deleteUnderlying(); });
// If the 'waitAfterPinningCursorBeforeGetMoreBatch' fail point is enabled, set the
// 'msg' field of this operation's CurOp to signal that we've hit this point and then
diff --git a/src/mongo/db/commands/run_aggregate.cpp b/src/mongo/db/commands/run_aggregate.cpp
index 87b8454f518..0102b719aac 100644
--- a/src/mongo/db/commands/run_aggregate.cpp
+++ b/src/mongo/db/commands/run_aggregate.cpp
@@ -844,7 +844,7 @@ Status runAggregate(OperationContext* opCtx,
std::vector<ClientCursorPin> pins;
std::vector<ClientCursor*> cursors;
- auto cursorFreer = makeGuard([&] {
+ ScopeGuard cursorFreer([&] {
for (auto& p : pins) {
p.deleteUnderlying();
}
diff --git a/src/mongo/db/commands/user_management_commands.cpp b/src/mongo/db/commands/user_management_commands.cpp
index d4da340dc5b..5990af74f85 100644
--- a/src/mongo/db/commands/user_management_commands.cpp
+++ b/src/mongo/db/commands/user_management_commands.cpp
@@ -1836,7 +1836,7 @@ void CmdUMCTyped<DropRoleCommand>::Invocation::typedRun(OperationContext* opCtx)
uassertStatusOK(authzManager->rolesExist(opCtx, {roleName}));
// From here on, we always want to invalidate the user cache before returning.
- auto invalidateGuard = makeGuard([&] {
+ ScopeGuard invalidateGuard([&] {
try {
authzManager->invalidateUserCache(opCtx);
} catch (const AssertionException& ex) {
@@ -1896,7 +1896,7 @@ DropAllRolesFromDatabaseReply CmdUMCTyped<DropAllRolesFromDatabaseCommand>::Invo
auto lk = uassertStatusOK(requireWritableAuthSchema28SCRAM(opCtx, authzManager));
// From here on, we always want to invalidate the user cache before returning.
- auto invalidateGuard = makeGuard([opCtx, authzManager] {
+ ScopeGuard invalidateGuard([opCtx, authzManager] {
try {
authzManager->invalidateUserCache(opCtx);
} catch (const AssertionException& ex) {
@@ -2394,7 +2394,7 @@ void CmdMergeAuthzCollections::Invocation::typedRun(OperationContext* opCtx) {
auto lk = uassertStatusOK(requireWritableAuthSchema28SCRAM(opCtx, authzManager));
// From here on, we always want to invalidate the user cache before returning.
- auto invalidateGuard = makeGuard([&] { authzManager->invalidateUserCache(opCtx); });
+ ScopeGuard invalidateGuard([&] { authzManager->invalidateUserCache(opCtx); });
const auto db = cmd.getDb();
const bool drop = cmd.getDrop();
diff --git a/src/mongo/db/commands/write_commands.cpp b/src/mongo/db/commands/write_commands.cpp
index 3d78ebe912c..3de92c37fc3 100644
--- a/src/mongo/db/commands/write_commands.cpp
+++ b/src/mongo/db/commands/write_commands.cpp
@@ -667,7 +667,7 @@ public:
return;
}
// Now that the batch is prepared, make sure we clean up if we throw.
- auto batchGuard = makeGuard([&] { bucketCatalog.abort(batch); });
+ ScopeGuard batchGuard([&] { bucketCatalog.abort(batch); });
hangTimeseriesInsertBeforeWrite.pauseWhileSet();
diff --git a/src/mongo/db/concurrency/d_concurrency.cpp b/src/mongo/db/concurrency/d_concurrency.cpp
index 115540aaa64..5e8bd537b55 100644
--- a/src/mongo/db/concurrency/d_concurrency.cpp
+++ b/src/mongo/db/concurrency/d_concurrency.cpp
@@ -154,7 +154,7 @@ Lock::GlobalLock::GlobalLock(OperationContext* opCtx,
if (_opCtx->lockState()->shouldConflictWithSecondaryBatchApplication()) {
_pbwm.lock(opCtx, MODE_IS, deadline);
}
- auto unlockPBWM = makeGuard([this] {
+ ScopeGuard unlockPBWM([this] {
if (_opCtx->lockState()->shouldConflictWithSecondaryBatchApplication()) {
_pbwm.unlock();
}
@@ -184,7 +184,7 @@ void Lock::GlobalLock::_takeGlobalLockOnly(LockMode lockMode, Date_t deadline) {
void Lock::GlobalLock::_takeGlobalAndRSTLLocks(LockMode lockMode, Date_t deadline) {
_opCtx->lockState()->lock(_opCtx, resourceIdReplicationStateTransitionLock, MODE_IX, deadline);
- auto unlockRSTL = makeGuard(
+ ScopeGuard unlockRSTL(
[this] { _opCtx->lockState()->unlock(resourceIdReplicationStateTransitionLock); });
_opCtx->lockState()->lockGlobal(_opCtx, lockMode, deadline);
diff --git a/src/mongo/db/concurrency/d_concurrency_test.cpp b/src/mongo/db/concurrency/d_concurrency_test.cpp
index 6154cb14b49..9af772ff4fe 100644
--- a/src/mongo/db/concurrency/d_concurrency_test.cpp
+++ b/src/mongo/db/concurrency/d_concurrency_test.cpp
@@ -113,7 +113,7 @@ public:
auto result = task.get_future();
stdx::thread taskThread{std::move(task)};
- auto taskThreadJoiner = makeGuard([&] { taskThread.join(); });
+ ScopeGuard taskThreadJoiner([&] { taskThread.join(); });
{
stdx::lock_guard<Client> clientLock(*opCtx->getClient());
diff --git a/src/mongo/db/concurrency/lock_state.cpp b/src/mongo/db/concurrency/lock_state.cpp
index eb833039667..e984ef4fffc 100644
--- a/src/mongo/db/concurrency/lock_state.cpp
+++ b/src/mongo/db/concurrency/lock_state.cpp
@@ -367,7 +367,7 @@ bool LockerImpl::_acquireTicket(OperationContext* opCtx, LockMode mode, Date_t d
_clientState.store(reader ? kQueuedReader : kQueuedWriter);
// If the ticket wait is interrupted, restore the state of the client.
- auto restoreStateOnErrorGuard = makeGuard([&] { _clientState.store(kInactive); });
+ ScopeGuard restoreStateOnErrorGuard([&] { _clientState.store(kInactive); });
// Acquiring a ticket is a potentially blocking operation. This must not be called after a
// transaction timestamp has been set, indicating this transaction has created an oplog
@@ -937,7 +937,7 @@ void LockerImpl::_lockComplete(OperationContext* opCtx,
}
// Clean up the state on any failed lock attempts.
- auto unlockOnErrorGuard = makeGuard([&] {
+ ScopeGuard unlockOnErrorGuard([&] {
LockRequestsMap::Iterator it = _requests.find(resId);
invariant(it);
_unlockImpl(&it);
@@ -1037,7 +1037,7 @@ void LockerImpl::getFlowControlTicket(OperationContext* opCtx, LockMode lockMode
// method must not exit with a side-effect on the clientState. That value is also used for
// tracking whether other resources need to be released.
_clientState.store(kQueuedWriter);
- auto restoreState = makeGuard([&] { _clientState.store(kInactive); });
+ ScopeGuard restoreState([&] { _clientState.store(kInactive); });
// Acquiring a ticket is a potentially blocking operation. This must not be called after a
// transaction timestamp has been set, indicating this transaction has created an oplog
// hole.
diff --git a/src/mongo/db/default_baton.cpp b/src/mongo/db/default_baton.cpp
index b361cf38346..3f7e73fa59d 100644
--- a/src/mongo/db/default_baton.cpp
+++ b/src/mongo/db/default_baton.cpp
@@ -107,7 +107,7 @@ Waitable::TimeoutState DefaultBaton::run_until(ClockSource* clkSource,
stdx::unique_lock<Latch> lk(_mutex);
// We'll fulfill promises and run jobs on the way out, ensuring we don't hold any locks
- const auto guard = makeGuard([&] {
+ const ScopeGuard guard([&] {
// While we have scheduled work, keep running jobs
while (_scheduled.size()) {
auto toRun = std::exchange(_scheduled, {});
diff --git a/src/mongo/db/exec/delete.cpp b/src/mongo/db/exec/delete.cpp
index b5095bb8eda..8cc3c5fafb6 100644
--- a/src/mongo/db/exec/delete.cpp
+++ b/src/mongo/db/exec/delete.cpp
@@ -139,7 +139,7 @@ PlanStage::StageState DeleteStage::doWork(WorkingSetID* out) {
WorkingSetMember* member = _ws->get(id);
// We want to free this member when we return, unless we need to retry deleting or returning it.
- auto memberFreer = makeGuard([&] { _ws->free(id); });
+ ScopeGuard memberFreer([&] { _ws->free(id); });
invariant(member->hasRecordId());
RecordId recordId = member->recordId;
diff --git a/src/mongo/db/exec/js_function.cpp b/src/mongo/db/exec/js_function.cpp
index ae6e479c521..58e3a880b00 100644
--- a/src/mongo/db/exec/js_function.cpp
+++ b/src/mongo/db/exec/js_function.cpp
@@ -64,7 +64,7 @@ JsFunction::JsFunction(OperationContext* opCtx,
const auto userToken = getAuthenticatedUserNamesToken(opCtx->getClient());
_scope = getGlobalScriptEngine()->getPooledScope(opCtx, dbName, "where" + userToken);
- const auto guard = makeGuard([&] { _scope->unregisterOperation(); });
+ const ScopeGuard guard([&] { _scope->unregisterOperation(); });
_func = _scope->createFunction(code.c_str());
uassert(ErrorCodes::BadValue, "$where compile error", _func);
@@ -72,7 +72,7 @@ JsFunction::JsFunction(OperationContext* opCtx,
bool JsFunction::runAsPredicate(const BSONObj& obj) const {
_scope->registerOperation(Client::getCurrent()->getOperationContext());
- const auto scopeOpCtxGuard = makeGuard([&] { _scope->unregisterOperation(); });
+ const ScopeGuard scopeOpCtxGuard([&] { _scope->unregisterOperation(); });
_scope->advanceGeneration();
_scope->setObject("obj", obj);
diff --git a/src/mongo/db/exec/update_stage.cpp b/src/mongo/db/exec/update_stage.cpp
index 708c25dfe30..5859e85e72f 100644
--- a/src/mongo/db/exec/update_stage.cpp
+++ b/src/mongo/db/exec/update_stage.cpp
@@ -402,7 +402,7 @@ PlanStage::StageState UpdateStage::doWork(WorkingSetID* out) {
// We want to free this member when we return, unless we need to retry updating or returning
// it.
- auto memberFreer = makeGuard([&] { _ws->free(id); });
+ ScopeGuard memberFreer([&] { _ws->free(id); });
invariant(member->hasRecordId());
recordId = member->recordId;
diff --git a/src/mongo/db/field_parser.h b/src/mongo/db/field_parser.h
index 45cbf50506b..078668d28f9 100644
--- a/src/mongo/db/field_parser.h
+++ b/src/mongo/db/field_parser.h
@@ -548,7 +548,7 @@ FieldParser::FieldState FieldParser::extract(BSONObj doc,
}
auto tempVector = std::make_unique<std::vector<T*>>();
- auto guard = makeGuard([&tempVector] {
+ ScopeGuard guard([&tempVector] {
if (tempVector) {
for (T*& raw : *tempVector) {
delete raw;
diff --git a/src/mongo/db/index/index_build_interceptor.cpp b/src/mongo/db/index/index_build_interceptor.cpp
index 77d13478b30..88a0690a8c6 100644
--- a/src/mongo/db/index/index_build_interceptor.cpp
+++ b/src/mongo/db/index/index_build_interceptor.cpp
@@ -80,7 +80,7 @@ IndexBuildInterceptor::IndexBuildInterceptor(OperationContext* opCtx,
_skippedRecordTracker(opCtx, entry, skippedRecordTrackerIdent),
_skipNumAppliedCheck(true) {
- auto finalizeTableOnFailure = makeGuard([&] {
+ ScopeGuard finalizeTableOnFailure([&] {
_sideWritesTable->finalizeTemporaryTable(opCtx,
TemporaryRecordStore::FinalizationAction::kDelete);
});
diff --git a/src/mongo/db/index_builds_coordinator.cpp b/src/mongo/db/index_builds_coordinator.cpp
index cb1f3252884..18ca33e224b 100644
--- a/src/mongo/db/index_builds_coordinator.cpp
+++ b/src/mongo/db/index_builds_coordinator.cpp
@@ -1578,7 +1578,7 @@ void IndexBuildsCoordinator::createIndex(OperationContext* opCtx,
throw;
}
- auto abortOnExit = makeGuard([&] {
+ ScopeGuard abortOnExit([&] {
_indexBuildsManager.abortIndexBuild(
opCtx, collection, buildUUID, MultiIndexBlock::kNoopOnCleanUpFn);
});
@@ -2329,7 +2329,7 @@ void IndexBuildsCoordinator::_scanCollectionAndInsertSortedKeysIntoIndex(
// Collection scan and insert into index.
{
- auto scopeGuard = makeGuard([&] {
+ ScopeGuard scopeGuard([&] {
opCtx->recoveryUnit()->setTimestampReadSource(RecoveryUnit::ReadSource::kNoTimestamp);
});
@@ -2504,7 +2504,7 @@ IndexBuildsCoordinator::CommitResult IndexBuildsCoordinator::_insertKeysFromSide
// While we are still holding the RSTL and before returning, ensure the metrics collected for
// this index build are attributed to the primary that commits or aborts the index build.
- auto metricsGuard = makeGuard([&]() {
+ ScopeGuard metricsGuard([&]() {
auto& collector = ResourceConsumption::MetricsCollector::get(opCtx);
bool wasCollecting = collector.endScopedCollecting();
if (!isPrimary || !wasCollecting || !ResourceConsumption::isMetricsAggregationEnabled()) {
diff --git a/src/mongo/db/index_builds_coordinator_mongod.cpp b/src/mongo/db/index_builds_coordinator_mongod.cpp
index 87603bc2869..d89065fc419 100644
--- a/src/mongo/db/index_builds_coordinator_mongod.cpp
+++ b/src/mongo/db/index_builds_coordinator_mongod.cpp
@@ -218,7 +218,7 @@ IndexBuildsCoordinatorMongod::_startIndexBuild(OperationContext* opCtx,
}
}
- auto onScopeExitGuard = makeGuard([&] {
+ ScopeGuard onScopeExitGuard([&] {
stdx::unique_lock<Latch> lk(_throttlingMutex);
_numActiveIndexBuilds--;
_indexBuildFinished.notify_one();
@@ -328,7 +328,7 @@ IndexBuildsCoordinatorMongod::_startIndexBuild(OperationContext* opCtx,
resumeInfo,
impersonatedClientAttrs = std::move(impersonatedClientAttrs)
](auto status) mutable noexcept {
- auto onScopeExitGuard = makeGuard([&] {
+ ScopeGuard onScopeExitGuard([&] {
stdx::unique_lock<Latch> lk(_throttlingMutex);
_numActiveIndexBuilds--;
_indexBuildFinished.notify_one();
diff --git a/src/mongo/db/logical_session_cache_impl.cpp b/src/mongo/db/logical_session_cache_impl.cpp
index 52bee61ed64..43f800c6820 100644
--- a/src/mongo/db/logical_session_cache_impl.cpp
+++ b/src/mongo/db/logical_session_cache_impl.cpp
@@ -258,7 +258,7 @@ void LogicalSessionCacheImpl::_refresh(Client* client) {
}
// This will finish timing _refresh for our stats no matter when we return.
- const auto timeRefreshJob = makeGuard([this] {
+ const ScopeGuard timeRefreshJob([this] {
stdx::lock_guard<Latch> lk(_mutex);
auto millis = _service->now() - _stats.getLastSessionsCollectionJobTimestamp();
_stats.setLastSessionsCollectionJobDurationMillis(millis.count());
@@ -299,9 +299,9 @@ void LogicalSessionCacheImpl::_refresh(Client* client) {
member.emplace(it);
}
};
- auto activeSessionsBackSwapper = makeGuard([&] { backSwap(_activeSessions, activeSessions); });
+ ScopeGuard activeSessionsBackSwapper([&] { backSwap(_activeSessions, activeSessions); });
auto explicitlyEndingBackSwaper =
- makeGuard([&] { backSwap(_endingSessions, explicitlyEndingSessions); });
+ ScopeGuard([&] { backSwap(_endingSessions, explicitlyEndingSessions); });
// remove all explicitlyEndingSessions from activeSessions
for (const auto& lsid : explicitlyEndingSessions) {
diff --git a/src/mongo/db/repl/collection_bulk_loader_impl.cpp b/src/mongo/db/repl/collection_bulk_loader_impl.cpp
index 5224bc5c7bb..c06603c61ab 100644
--- a/src/mongo/db/repl/collection_bulk_loader_impl.cpp
+++ b/src/mongo/db/repl/collection_bulk_loader_impl.cpp
@@ -362,7 +362,7 @@ void CollectionBulkLoaderImpl::_releaseResources() {
template <typename F>
Status CollectionBulkLoaderImpl::_runTaskReleaseResourcesOnFailure(const F& task) noexcept {
AlternativeClientRegion acr(_client);
- auto guard = makeGuard([this] { _releaseResources(); });
+ ScopeGuard guard([this] { _releaseResources(); });
try {
const auto status = task();
if (status.isOK()) {
diff --git a/src/mongo/db/repl/initial_syncer.cpp b/src/mongo/db/repl/initial_syncer.cpp
index 5820d2e2ab5..a404f7b6ccf 100644
--- a/src/mongo/db/repl/initial_syncer.cpp
+++ b/src/mongo/db/repl/initial_syncer.cpp
@@ -1700,7 +1700,7 @@ void InitialSyncer::_finishInitialSyncAttempt(const StatusWith<OpTimeAndWallTime
// if the task scheduling fails and we have to invoke _finishCallback() synchronously), we
// declare the scope guard before the lock guard.
auto result = lastApplied;
- auto finishCallbackGuard = makeGuard([this, &result] {
+ ScopeGuard finishCallbackGuard([this, &result] {
auto scheduleResult = _exec->scheduleWork(
[=](const mongo::executor::TaskExecutor::CallbackArgs&) { _finishCallback(result); });
if (!scheduleResult.isOK()) {
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 1c48b3efd87..d4d35779ded 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -428,7 +428,7 @@ OpTime logOp(OperationContext* opCtx, MutableOplogEntry* oplogEntry) {
// again. For example, if the WUOW gets aborted within a writeConflictRetry loop, we need to
// reset the OpTime to null so a new OpTime will be assigned on retry.
OplogSlot slot = oplogEntry->getOpTime();
- auto resetOpTimeGuard = makeGuard([&, resetOpTimeOnExit = bool(slot.isNull())] {
+ ScopeGuard resetOpTimeGuard([&, resetOpTimeOnExit = bool(slot.isNull())] {
if (resetOpTimeOnExit)
oplogEntry->setOpTime(OplogSlot());
});
diff --git a/src/mongo/db/repl/repl_set_commands.cpp b/src/mongo/db/repl/repl_set_commands.cpp
index 135bdd13614..de1b9a458db 100644
--- a/src/mongo/db/repl/repl_set_commands.cpp
+++ b/src/mongo/db/repl/repl_set_commands.cpp
@@ -514,7 +514,7 @@ public:
_stepDownCmdsWithForceExecuted.increment();
}
- auto onExitGuard = makeGuard([&] {
+ ScopeGuard onExitGuard([&] {
if (force) {
_stepDownCmdsWithForceFailed.increment();
}
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index 44b1c72a7c9..5e5d1834ea0 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -2583,7 +2583,7 @@ void ReplicationCoordinatorImpl::stepDown(OperationContext* opCtx,
_waitingForRSTLAtStepDown++;
_fulfillTopologyChangePromise(lk);
}
- auto clearStepDownFlag = makeGuard([&] {
+ ScopeGuard clearStepDownFlag([&] {
stdx::lock_guard lk(_mutex);
_waitingForRSTLAtStepDown--;
_fulfillTopologyChangePromise(lk);
@@ -2651,7 +2651,7 @@ void ReplicationCoordinatorImpl::stepDown(OperationContext* opCtx,
_performPostMemberStateUpdateAction(action);
};
- auto onExitGuard = makeGuard([&] {
+ ScopeGuard onExitGuard([&] {
abortFn();
updateMemberState();
});
@@ -3492,7 +3492,7 @@ Status ReplicationCoordinatorImpl::_doReplSetReconfig(OperationContext* opCtx,
_setConfigState_inlock(kConfigReconfiguring);
auto configStateGuard =
- makeGuard([&] { lockAndCall(&lk, [=] { _setConfigState_inlock(kConfigSteady); }); });
+ ScopeGuard([&] { lockAndCall(&lk, [=] { _setConfigState_inlock(kConfigSteady); }); });
ReplSetConfig oldConfig = _rsConfig;
int myIndex = _selfIndex;
@@ -3933,8 +3933,9 @@ Status ReplicationCoordinatorImpl::processReplSetInitiate(OperationContext* opCt
invariant(!_rsConfig.isInitialized());
_setConfigState_inlock(kConfigInitiating);
- auto configStateGuard =
- makeGuard([&] { lockAndCall(&lk, [=] { _setConfigState_inlock(kConfigUninitialized); }); });
+ ScopeGuard configStateGuard = [&] {
+ lockAndCall(&lk, [=] { _setConfigState_inlock(kConfigUninitialized); });
+ };
// When writing our first oplog entry below, disable advancement of the stable timestamp so that
// we don't set it before setting our initial data timestamp. We will set it after we set our
diff --git a/src/mongo/db/repl/scatter_gather_runner.cpp b/src/mongo/db/repl/scatter_gather_runner.cpp
index c45401d53f0..f688c5cf0cd 100644
--- a/src/mongo/db/repl/scatter_gather_runner.cpp
+++ b/src/mongo/db/repl/scatter_gather_runner.cpp
@@ -101,7 +101,7 @@ StatusWith<EventHandle> ScatterGatherRunner::RunnerImpl::start(
return evh;
}
_sufficientResponsesReceived = evh.getValue();
- auto earlyReturnGuard = makeGuard([this] { _signalSufficientResponsesReceived(); });
+ ScopeGuard earlyReturnGuard([this] { _signalSufficientResponsesReceived(); });
std::vector<RemoteCommandRequest> requests = _algorithm->getRequests();
for (size_t i = 0; i < requests.size(); ++i) {
diff --git a/src/mongo/db/repl/tenant_migration_recipient_service_test.cpp b/src/mongo/db/repl/tenant_migration_recipient_service_test.cpp
index a303a3cc442..b30dee191a0 100644
--- a/src/mongo/db/repl/tenant_migration_recipient_service_test.cpp
+++ b/src/mongo/db/repl/tenant_migration_recipient_service_test.cpp
@@ -1560,7 +1560,7 @@ TEST_F(TenantMigrationRecipientServiceTest, TenantMigrationRecipientStartsCloner
stopFailPointEnableBlock fp("fpAfterCollectionClonerDone");
auto taskFp = globalFailPointRegistry().find("hangBeforeTaskCompletion");
- auto taskFpGuard = makeGuard([&taskFp] { taskFp->setMode(FailPoint::off); });
+ ScopeGuard taskFpGuard([&taskFp] { taskFp->setMode(FailPoint::off); });
auto initialTimesEntered = taskFp->setMode(FailPoint::alwaysOn);
diff --git a/src/mongo/db/repl/topology_version_observer.cpp b/src/mongo/db/repl/topology_version_observer.cpp
index 877466bf7f6..b0882e76b8d 100644
--- a/src/mongo/db/repl/topology_version_observer.cpp
+++ b/src/mongo/db/repl/topology_version_observer.cpp
@@ -129,7 +129,7 @@ void TopologyVersionObserver::_cacheHelloResponse(
LOGV2_DEBUG(4794600, 3, "Waiting for a topology change");
{
- auto cacheGuard = makeGuard([&] {
+ ScopeGuard cacheGuard([&] {
// If we're not dismissed, reset the _cache.
stdx::lock_guard lk(_mutex);
_cache.reset();
diff --git a/src/mongo/db/repl/transaction_oplog_application.cpp b/src/mongo/db/repl/transaction_oplog_application.cpp
index 103a5c4e149..844971df246 100644
--- a/src/mongo/db/repl/transaction_oplog_application.cpp
+++ b/src/mongo/db/repl/transaction_oplog_application.cpp
@@ -433,7 +433,7 @@ Status _applyPrepareTransaction(OperationContext* opCtx,
// Release the WUOW, transaction lock resources and abort storage transaction so that the
// writeConflictRetry loop will be able to retry applying transactional ops on WCE error.
- auto abortOnError = makeGuard([&txnParticipant, opCtx] {
+ ScopeGuard abortOnError([&txnParticipant, opCtx] {
// Abort the transaction and invalidate the session it is associated with.
txnParticipant.abortTransaction(opCtx);
txnParticipant.invalidate(opCtx);
diff --git a/src/mongo/db/repl/vote_requester.cpp b/src/mongo/db/repl/vote_requester.cpp
index 6a29e4785c8..f57765dc73c 100644
--- a/src/mongo/db/repl/vote_requester.cpp
+++ b/src/mongo/db/repl/vote_requester.cpp
@@ -117,7 +117,7 @@ void VoteRequester::Algorithm::processResponse(const RemoteCommandRequest& reque
// All local variables captured in logAttrs needs to be above the guard that logs.
logv2::DynamicAttributes logAttrs;
auto logAtExit =
- makeGuard([&logAttrs]() { LOGV2(51799, "VoteRequester processResponse", logAttrs); });
+ ScopeGuard([&logAttrs]() { LOGV2(51799, "VoteRequester processResponse", logAttrs); });
logAttrs.add("term", _term);
logAttrs.add("dryRun", _dryRun);
diff --git a/src/mongo/db/s/balancer/migration_manager.cpp b/src/mongo/db/s/balancer/migration_manager.cpp
index b2c34f7f555..6935621ef44 100644
--- a/src/mongo/db/s/balancer/migration_manager.cpp
+++ b/src/mongo/db/s/balancer/migration_manager.cpp
@@ -223,7 +223,7 @@ void MigrationManager::startRecoveryAndAcquireDistLocks(OperationContext* opCtx)
_state = State::kRecovering;
}
- auto scopedGuard = makeGuard([&] {
+ ScopeGuard scopedGuard([&] {
_migrationRecoveryMap.clear();
_abandonActiveMigrationsAndEnableManager(opCtx);
});
@@ -313,7 +313,7 @@ void MigrationManager::finishRecovery(OperationContext* opCtx,
invariant(_state == State::kRecovering);
}
- auto scopedGuard = makeGuard([&] {
+ ScopeGuard scopedGuard([&] {
_migrationRecoveryMap.clear();
_abandonActiveMigrationsAndEnableManager(opCtx);
});
diff --git a/src/mongo/db/s/config/sharding_catalog_manager.cpp b/src/mongo/db/s/config/sharding_catalog_manager.cpp
index d2d0055d931..295fa1bc183 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager.cpp
@@ -837,7 +837,7 @@ void ShardingCatalogManager::withTransaction(
AuthorizationSession::get(client)->grantInternalAuthorization(client);
TxnNumber txnNumber = 0;
- auto guard = makeGuard([opCtx = asr.opCtx(), &txnNumber] {
+ ScopeGuard guard([opCtx = asr.opCtx(), &txnNumber] {
try {
abortTransaction(opCtx, txnNumber);
} catch (DBException& e) {
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
index 45b1212452d..3e65ecf8903 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
@@ -1400,7 +1400,7 @@ void ShardingCatalogManager::ensureChunkVersionIsGreaterThan(OperationContext* o
const BSONObj& minKey,
const BSONObj& maxKey,
const ChunkVersion& version) {
- auto earlyReturnBeforeDoingWriteGuard = makeGuard([&] {
+ ScopeGuard earlyReturnBeforeDoingWriteGuard([&] {
// Ensure waiting for writeConcern of the data read.
repl::ReplClientInfo::forClient(opCtx->getClient()).setLastOpToSystemLastOpTime(opCtx);
});
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
index 525d3385b9a..ed00416ef5d 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
@@ -595,7 +595,7 @@ StatusWith<std::string> ShardingCatalogManager::addShard(
const std::shared_ptr<Shard> shard{shardRegistry->createConnection(shardConnectionString)};
auto targeter = shard->getTargeter();
- auto stopMonitoringGuard = makeGuard([&] {
+ ScopeGuard stopMonitoringGuard([&] {
if (shardConnectionString.type() == ConnectionString::ConnectionType::kReplicaSet) {
// This is a workaround for the case were we could have some bad shard being
// requested to be added and we put that bad connection string on the global replica set
diff --git a/src/mongo/db/s/dist_lock_manager.cpp b/src/mongo/db/s/dist_lock_manager.cpp
index a2921a1c49c..96b68bc25b7 100644
--- a/src/mongo/db/s/dist_lock_manager.cpp
+++ b/src/mongo/db/s/dist_lock_manager.cpp
@@ -127,7 +127,7 @@ DistLockManager::ScopedLock DistLockManager::lockDirectLocally(OperationContext*
} else {
auto nsLock = iter->second;
nsLock->numWaiting++;
- auto guard = makeGuard([&] { nsLock->numWaiting--; });
+ ScopeGuard guard([&] { nsLock->numWaiting--; });
if (!opCtx->waitForConditionOrInterruptFor(
nsLock->cvLocked, lock, waitFor, [nsLock]() { return !nsLock->isInProgress; })) {
uasserted(ErrorCodes::LockBusy,
diff --git a/src/mongo/db/s/dist_lock_manager_replset.cpp b/src/mongo/db/s/dist_lock_manager_replset.cpp
index d4c5cf0df8a..a41839870fe 100644
--- a/src/mongo/db/s/dist_lock_manager_replset.cpp
+++ b/src/mongo/db/s/dist_lock_manager_replset.cpp
@@ -689,7 +689,7 @@ long long ReplSetDistLockManager::_waitForRecovery(OperationContext* opCtx) {
LOGV2(570181, "Recovering dist lock manager", "term"_attr = term);
- auto anotherThreadMustRecoverGuard = makeGuard([&] {
+ ScopeGuard anotherThreadMustRecoverGuard([&] {
lk.lock();
if (term == _recoveryTerm) {
_recoveryState = kMustRecover;
diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp
index 5f81beb94fa..51df79c8c46 100644
--- a/src/mongo/db/s/migration_destination_manager.cpp
+++ b/src/mongo/db/s/migration_destination_manager.cpp
@@ -471,7 +471,7 @@ repl::OpTime MigrationDestinationManager::fetchAndApplyBatch(
auto applicationOpCtx = CancelableOperationContext(
cc().makeOperationContext(), opCtx->getCancellationToken(), executor);
- auto consumerGuard = makeGuard([&] {
+ ScopeGuard consumerGuard([&] {
batches.closeConsumerEnd();
lastOpApplied =
repl::ReplClientInfo::forClient(applicationOpCtx->getClient()).getLastOp();
@@ -496,7 +496,7 @@ repl::OpTime MigrationDestinationManager::fetchAndApplyBatch(
{
- auto applicationThreadJoinGuard = makeGuard([&] {
+ ScopeGuard applicationThreadJoinGuard([&] {
batches.closeProducerEnd();
applicationThread.join();
});
diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp
index 13deb093da8..41c029b2153 100644
--- a/src/mongo/db/s/migration_source_manager.cpp
+++ b/src/mongo/db/s/migration_source_manager.cpp
@@ -214,7 +214,7 @@ NamespaceString MigrationSourceManager::getNss() const {
Status MigrationSourceManager::startClone() {
invariant(!_opCtx->lockState()->isLocked());
invariant(_state == kCreated);
- auto scopedGuard = makeGuard([&] { cleanupOnError(); });
+ ScopeGuard scopedGuard([&] { cleanupOnError(); });
_stats.countDonorMoveChunkStarted.addAndFetch(1);
const Status logStatus = ShardingLogging::get(_opCtx)->logChangeChecked(
@@ -297,7 +297,7 @@ Status MigrationSourceManager::startClone() {
Status MigrationSourceManager::awaitToCatchUp() {
invariant(!_opCtx->lockState()->isLocked());
invariant(_state == kCloning);
- auto scopedGuard = makeGuard([&] { cleanupOnError(); });
+ ScopeGuard scopedGuard([&] { cleanupOnError(); });
_stats.totalDonorChunkCloneTimeMillis.addAndFetch(_cloneAndCommitTimer.millis());
_cloneAndCommitTimer.reset();
@@ -316,7 +316,7 @@ Status MigrationSourceManager::awaitToCatchUp() {
Status MigrationSourceManager::enterCriticalSection() {
invariant(!_opCtx->lockState()->isLocked());
invariant(_state == kCloneCaughtUp);
- auto scopedGuard = makeGuard([&] { cleanupOnError(); });
+ ScopeGuard scopedGuard([&] { cleanupOnError(); });
_stats.totalDonorChunkCloneTimeMillis.addAndFetch(_cloneAndCommitTimer.millis());
_cloneAndCommitTimer.reset();
@@ -371,7 +371,7 @@ Status MigrationSourceManager::enterCriticalSection() {
Status MigrationSourceManager::commitChunkOnRecipient() {
invariant(!_opCtx->lockState()->isLocked());
invariant(_state == kCriticalSection);
- auto scopedGuard = makeGuard([&] { cleanupOnError(); });
+ ScopeGuard scopedGuard([&] { cleanupOnError(); });
// Tell the recipient shard to fetch the latest changes.
auto commitCloneStatus = _cloneDriver->commitClone(_opCtx);
@@ -395,7 +395,7 @@ Status MigrationSourceManager::commitChunkOnRecipient() {
Status MigrationSourceManager::commitChunkMetadataOnConfig() {
invariant(!_opCtx->lockState()->isLocked());
invariant(_state == kCloneCompleted);
- auto scopedGuard = makeGuard([&] { cleanupOnError(); });
+ ScopeGuard scopedGuard([&] { cleanupOnError(); });
// If we have chunks left on the FROM shard, bump the version of one of them as well. This will
// change the local collection major version, which indicates to other processes that the chunk
diff --git a/src/mongo/db/s/move_primary_source_manager.cpp b/src/mongo/db/s/move_primary_source_manager.cpp
index 99bf70f5ba7..e7b2e87ccef 100644
--- a/src/mongo/db/s/move_primary_source_manager.cpp
+++ b/src/mongo/db/s/move_primary_source_manager.cpp
@@ -79,7 +79,7 @@ NamespaceString MovePrimarySourceManager::getNss() const {
Status MovePrimarySourceManager::clone(OperationContext* opCtx) {
invariant(!opCtx->lockState()->isLocked());
invariant(_state == kCreated);
- auto scopedGuard = makeGuard([&] { cleanupOnError(opCtx); });
+ ScopeGuard scopedGuard([&] { cleanupOnError(opCtx); });
LOGV2(22042,
"Moving {db} primary from: {fromShard} to: {toShard}",
@@ -155,7 +155,7 @@ Status MovePrimarySourceManager::clone(OperationContext* opCtx) {
Status MovePrimarySourceManager::enterCriticalSection(OperationContext* opCtx) {
invariant(!opCtx->lockState()->isLocked());
invariant(_state == kCloneCaughtUp);
- auto scopedGuard = makeGuard([&] { cleanupOnError(opCtx); });
+ ScopeGuard scopedGuard([&] { cleanupOnError(opCtx); });
// Mark the shard as running a critical operation that requires recovery on crash.
auto startMetadataOpStatus = ShardingStateRecovery::startMetadataOp(opCtx);
@@ -213,7 +213,7 @@ Status MovePrimarySourceManager::enterCriticalSection(OperationContext* opCtx) {
Status MovePrimarySourceManager::commitOnConfig(OperationContext* opCtx) {
invariant(!opCtx->lockState()->isLocked());
invariant(_state == kCriticalSection);
- auto scopedGuard = makeGuard([&] { cleanupOnError(opCtx); });
+ ScopeGuard scopedGuard([&] { cleanupOnError(opCtx); });
{
AutoGetDb autoDb(opCtx, getNss().toString(), MODE_X);
diff --git a/src/mongo/db/s/resharding/resharding_collection_cloner.cpp b/src/mongo/db/s/resharding/resharding_collection_cloner.cpp
index 0225c21cc69..65668d804d7 100644
--- a/src/mongo/db/s/resharding/resharding_collection_cloner.cpp
+++ b/src/mongo/db/s/resharding/resharding_collection_cloner.cpp
@@ -305,7 +305,7 @@ SemiFuture<void> ReshardingCollectionCloner::run(
}
auto opCtx = factory.makeOperationContext(&cc());
- auto guard = makeGuard([&] {
+ ScopeGuard guard([&] {
chainCtx->pipeline->dispose(opCtx.get());
chainCtx->pipeline.reset();
});
diff --git a/src/mongo/db/s/resharding/resharding_donor_oplog_iterator.cpp b/src/mongo/db/s/resharding/resharding_donor_oplog_iterator.cpp
index 9d8a1949cb5..17a5d8fb966 100644
--- a/src/mongo/db/s/resharding/resharding_donor_oplog_iterator.cpp
+++ b/src/mongo/db/s/resharding/resharding_donor_oplog_iterator.cpp
@@ -205,7 +205,7 @@ ExecutorFuture<std::vector<repl::OplogEntry>> ReshardingDonorOplogIterator::getN
auto batch = [&] {
auto opCtx = factory.makeOperationContext(&cc());
- auto guard = makeGuard([&] { dispose(opCtx.get()); });
+ ScopeGuard guard([&] { dispose(opCtx.get()); });
// A primary which steps down may briefly continue running the ReshardingDonorOplogIterator
// as a secondary. AutoGetCollectionForReadBase forbids reads on a secondary from using the
diff --git a/src/mongo/db/s/resharding/resharding_op_observer.cpp b/src/mongo/db/s/resharding/resharding_op_observer.cpp
index d84c59c7edd..0027b9e0d62 100644
--- a/src/mongo/db/s/resharding/resharding_op_observer.cpp
+++ b/src/mongo/db/s/resharding/resharding_op_observer.cpp
@@ -100,7 +100,7 @@ boost::optional<Timestamp> _calculatePin(OperationContext* opCtx) {
// If the RecoveryUnit already had an open snapshot, keep the snapshot open. Otherwise abandon
// the snapshot when exitting the function.
- auto scopeGuard = makeGuard([&] { opCtx->recoveryUnit()->abandonSnapshot(); });
+ ScopeGuard scopeGuard([&] { opCtx->recoveryUnit()->abandonSnapshot(); });
if (opCtx->recoveryUnit()->isActive()) {
scopeGuard.dismiss();
}
diff --git a/src/mongo/db/s/resharding/resharding_oplog_application.cpp b/src/mongo/db/s/resharding/resharding_oplog_application.cpp
index 5a71b14a7da..f704bb10181 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_application.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_application.cpp
@@ -76,7 +76,7 @@ void runWithTransaction(OperationContext* opCtx, unique_function<void(OperationC
MongoDOperationContextSession ocs(asr.opCtx());
auto txnParticipant = TransactionParticipant::get(asr.opCtx());
- auto guard = makeGuard([opCtx = asr.opCtx(), &txnParticipant] {
+ ScopeGuard guard([opCtx = asr.opCtx(), &txnParticipant] {
try {
txnParticipant.abortTransaction(opCtx);
} catch (DBException& e) {
diff --git a/src/mongo/db/s/resharding/resharding_service_test_helpers.cpp b/src/mongo/db/s/resharding/resharding_service_test_helpers.cpp
index 01b800167e6..865042348df 100644
--- a/src/mongo/db/s/resharding/resharding_service_test_helpers.cpp
+++ b/src/mongo/db/s/resharding/resharding_service_test_helpers.cpp
@@ -67,7 +67,7 @@ template <class StateEnum>
void StateTransitionController<StateEnum>::_notifyNewStateAndWaitUntilUnpaused(
OperationContext* opCtx, StateEnum newState) {
stdx::unique_lock lk(_mutex);
- auto guard = makeGuard([this, prevState = _state] { _state = prevState; });
+ ScopeGuard guard([this, prevState = _state] { _state = prevState; });
_state = newState;
_waitUntilUnpausedCond.notify_all();
opCtx->waitForConditionOrInterrupt(_pauseDuringTransitionCond, lk, [this, newState] {
diff --git a/src/mongo/db/s/resharding/resharding_txn_cloner.cpp b/src/mongo/db/s/resharding/resharding_txn_cloner.cpp
index 2ffa4e40ab0..d4b2cc88bee 100644
--- a/src/mongo/db/s/resharding/resharding_txn_cloner.cpp
+++ b/src/mongo/db/s/resharding/resharding_txn_cloner.cpp
@@ -220,7 +220,7 @@ SemiFuture<void> ReshardingTxnCloner::run(
// due to a prepared transaction having been in progress.
if (!chainCtx->donorRecord) {
auto opCtx = factory.makeOperationContext(&cc());
- auto guard = makeGuard([&] {
+ ScopeGuard guard([&] {
chainCtx->pipeline->dispose(opCtx.get());
chainCtx->pipeline.reset();
});
diff --git a/src/mongo/db/s/transaction_coordinator_service_test.cpp b/src/mongo/db/s/transaction_coordinator_service_test.cpp
index 06086fa3f4a..c63753bd3b2 100644
--- a/src/mongo/db/s/transaction_coordinator_service_test.cpp
+++ b/src/mongo/db/s/transaction_coordinator_service_test.cpp
@@ -213,7 +213,7 @@ TEST_F(TransactionCoordinatorServiceStepUpStepDownTest, OperationsFailBeforeStep
TEST_F(TransactionCoordinatorServiceStepUpStepDownTest, OperationsBlockBeforeStepUpCompletes) {
service()->onStepUp(operationContext(), Milliseconds(1));
- auto stepDownGuard = makeGuard([&] { service()->onStepDown(); });
+ ScopeGuard stepDownGuard([&] { service()->onStepDown(); });
ASSERT_THROWS_CODE(operationContext()->runWithDeadline(
Date_t::now() + Milliseconds{5},
@@ -246,7 +246,7 @@ TEST_F(TransactionCoordinatorServiceStepUpStepDownTest, StepUpFailsDueToBadCoord
ASSERT_EQ(1, response["n"].Int());
service()->onStepUp(operationContext());
- auto stepDownGuard = makeGuard([&] { service()->onStepDown(); });
+ ScopeGuard stepDownGuard([&] { service()->onStepDown(); });
ASSERT_THROWS_CODE(service()->coordinateCommit(
operationContext(), makeLogicalSessionIdForTest(), 0, kTwoShardIdSet),
diff --git a/src/mongo/db/server_options_test.cpp b/src/mongo/db/server_options_test.cpp
index 5cc26ccd3c1..55e67ba6d15 100644
--- a/src/mongo/db/server_options_test.cpp
+++ b/src/mongo/db/server_options_test.cpp
@@ -723,10 +723,10 @@ TEST(SetupOptions, DeepCwd) {
sb << "/tmp/deepcwd-" << getpid();
boost::filesystem::path deepBaseDir = sb.str();
- auto cleanup = ::mongo::makeGuard([&] {
+ ::mongo::ScopeGuard cleanup = [&] {
boost::filesystem::current_path(cwd, ec);
boost::filesystem::remove_all(deepBaseDir, ec);
- });
+ };
// Clear out any old base dir, and create an empty dir.
boost::filesystem::remove_all(deepBaseDir, ec);
@@ -780,12 +780,12 @@ TEST(SetupOptions, UnlinkedCwd) {
std::string unlinkDir;
- auto cleanup = ::mongo::makeGuard([&] {
+ ::mongo::ScopeGuard cleanup = [&] {
boost::filesystem::current_path(cwd, ec);
if (!unlinkDir.empty()) {
boost::filesystem::remove(cwd / unlinkDir, ec);
}
- });
+ };
// mkdir our own unlink dir
unsigned int i = 0;
diff --git a/src/mongo/db/service_context.cpp b/src/mongo/db/service_context.cpp
index eb3901c01c1..551bc7cdf34 100644
--- a/src/mongo/db/service_context.cpp
+++ b/src/mongo/db/service_context.cpp
@@ -242,14 +242,14 @@ ServiceContext::UniqueOperationContext ServiceContext::makeOperationContext(Clie
_numCurrentOps.addAndFetch(1);
}
- auto numOpsGuard = makeGuard([&] {
+ ScopeGuard numOpsGuard([&] {
if (client->session()) {
_numCurrentOps.subtractAndFetch(1);
}
});
onCreate(opCtx.get(), _clientObservers);
- auto onCreateGuard = makeGuard([&] { onDestroy(opCtx.get(), _clientObservers); });
+ ScopeGuard onCreateGuard([&] { onDestroy(opCtx.get(), _clientObservers); });
if (!opCtx->lockState()) {
opCtx->setLockState(std::make_unique<LockerNoop>());
@@ -265,7 +265,7 @@ ServiceContext::UniqueOperationContext ServiceContext::makeOperationContext(Clie
makeBaton(opCtx.get());
}
- auto batonGuard = makeGuard([&] { opCtx->getBaton()->detach(); });
+ ScopeGuard batonGuard([&] { opCtx->getBaton()->detach(); });
{
stdx::lock_guard<Client> lk(*client);
diff --git a/src/mongo/db/service_entry_point_common.cpp b/src/mongo/db/service_entry_point_common.cpp
index 64cf8a9ff1b..aec93184463 100644
--- a/src/mongo/db/service_entry_point_common.cpp
+++ b/src/mongo/db/service_entry_point_common.cpp
@@ -898,7 +898,7 @@ void CheckoutSessionAndInvokeCommand::_checkOutSession() {
// transactions on failure to unstash the transaction resources to opCtx. We don't want to
// have this error guard for beginOrContinue as it can abort the transaction for any
// accidental invalid statements in the transaction.
- auto abortOnError = makeGuard([&] {
+ ScopeGuard abortOnError([&] {
if (_txnParticipant->transactionIsInProgress()) {
_txnParticipant->abortTransaction(opCtx);
}
diff --git a/src/mongo/db/sessions_collection_rs.cpp b/src/mongo/db/sessions_collection_rs.cpp
index fc7f8977717..8d53352c455 100644
--- a/src/mongo/db/sessions_collection_rs.cpp
+++ b/src/mongo/db/sessions_collection_rs.cpp
@@ -107,7 +107,7 @@ auto SessionsCollectionRS::_dispatch(const NamespaceString& ns,
auto conn = _makePrimaryConnection(opCtx);
DBClientBase* client = conn->get();
- auto guard = makeGuard([&] { conn->done(); });
+ ScopeGuard guard([&] { conn->done(); });
try {
return std::forward<RemoteCallback>(remoteCallback)(client);
} catch (...) {
diff --git a/src/mongo/db/startup_recovery.cpp b/src/mongo/db/startup_recovery.cpp
index 655af5e36dd..179d4f35be3 100644
--- a/src/mongo/db/startup_recovery.cpp
+++ b/src/mongo/db/startup_recovery.cpp
@@ -160,7 +160,7 @@ bool checkIdIndexExists(OperationContext* opCtx, const CollectionPtr& coll) {
Status buildMissingIdIndex(OperationContext* opCtx, Collection* collection) {
LOGV2(4805002, "Building missing _id index", logAttrs(*collection));
MultiIndexBlock indexer;
- auto abortOnExit = makeGuard([&] {
+ ScopeGuard abortOnExit([&] {
CollectionWriter collWriter(collection);
indexer.abortIndexBuild(opCtx, collWriter, MultiIndexBlock::kNoopOnCleanUpFn);
});
@@ -465,7 +465,7 @@ void startupRepair(OperationContext* opCtx, StorageEngine* storageEngine) {
// document.
// If we fail to load the FCV document due to upgrade problems, we need to abort the repair in
// order to allow downgrading to older binary versions.
- auto abortRepairOnFCVErrors = makeGuard(
+ ScopeGuard abortRepairOnFCVErrors(
[&] { StorageRepairObserver::get(opCtx->getServiceContext())->onRepairDone(opCtx); });
if (auto fcvColl = CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(
opCtx, NamespaceString::kServerConfigurationNamespace)) {
diff --git a/src/mongo/db/startup_warnings_common.cpp b/src/mongo/db/startup_warnings_common.cpp
index aa7dafc5cf8..947f6feab64 100644
--- a/src/mongo/db/startup_warnings_common.cpp
+++ b/src/mongo/db/startup_warnings_common.cpp
@@ -63,7 +63,7 @@ bool CheckPrivilegeEnabled(const wchar_t* name) {
return false;
}
- const auto accessTokenGuard = makeGuard([&] { CloseHandle(accessToken); });
+ const ScopeGuard accessTokenGuard([&] { CloseHandle(accessToken); });
BOOL ret;
PRIVILEGE_SET privileges;
diff --git a/src/mongo/db/stats/operation_latency_histogram_test.cpp b/src/mongo/db/stats/operation_latency_histogram_test.cpp
index c1cf84e082a..046def97078 100644
--- a/src/mongo/db/stats/operation_latency_histogram_test.cpp
+++ b/src/mongo/db/stats/operation_latency_histogram_test.cpp
@@ -106,7 +106,7 @@ TEST(OperationLatencyHistogram, CheckBucketCountsAndTotalLatencySlowBuckets) {
auto orig = serverGlobalParams.slowMS;
serverGlobalParams.slowMS = 100;
- auto g1 = makeGuard([orig] { serverGlobalParams.slowMS = orig; });
+ ScopeGuard g1 = [orig] { serverGlobalParams.slowMS = orig; };
// The additional +1 because of the first boundary.
uint64_t expectedSum = 3 * std::accumulate(kLowerBounds.begin(), kLowerBounds.end(), 0ULL) + 1;
diff --git a/src/mongo/db/storage/storage_engine_impl.cpp b/src/mongo/db/storage/storage_engine_impl.cpp
index edebfe78423..d8e990925b5 100644
--- a/src/mongo/db/storage/storage_engine_impl.cpp
+++ b/src/mongo/db/storage/storage_engine_impl.cpp
@@ -799,7 +799,7 @@ Status StorageEngineImpl::_dropCollectionsNoTimestamp(OperationContext* opCtx,
}
// Ensure the method exits with the same "commit timestamp" state that it was called with.
- auto addCommitTimestamp = makeGuard([&opCtx, commitTs] {
+ ScopeGuard addCommitTimestamp([&opCtx, commitTs] {
if (!commitTs.isNull()) {
opCtx->recoveryUnit()->setCommitTimestamp(commitTs);
}
diff --git a/src/mongo/db/storage/storage_engine_init.cpp b/src/mongo/db/storage/storage_engine_init.cpp
index 01f0278f175..d94179be8e4 100644
--- a/src/mongo/db/storage/storage_engine_init.cpp
+++ b/src/mongo/db/storage/storage_engine_init.cpp
@@ -149,7 +149,7 @@ StorageEngine::LastShutdownState initializeStorageEngine(OperationContext* opCtx
uassertStatusOK(factory->validateMetadata(*metadata, storageGlobalParams));
}
- auto guard = makeGuard([&] {
+ ScopeGuard guard([&] {
auto& lockFile = StorageEngineLockFile::get(service);
if (lockFile) {
lockFile->close();
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
index 449fbc4bb55..f53d39bf0e8 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
@@ -1210,7 +1210,7 @@ WiredTigerKVEngine::beginNonBlockingBackup(OperationContext* opCtx,
// Oplog truncation thread won't remove oplog since the checkpoint pinned by the backup cursor.
stdx::lock_guard<Latch> lock(_oplogPinnedByBackupMutex);
_oplogPinnedByBackup = Timestamp(_oplogNeededForCrashRecovery.load());
- auto pinOplogGuard = makeGuard([&] { _oplogPinnedByBackup = boost::none; });
+ ScopeGuard pinOplogGuard([&] { _oplogPinnedByBackup = boost::none; });
// Persist the sizeStorer information to disk before opening the backup cursor. We aren't
// guaranteed to have the most up-to-date size information after the backup as writes can still
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp
index 7c68769472e..332f3bb0519 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp
@@ -148,7 +148,7 @@ void WiredTigerOplogManager::waitForAllEarlierOplogWritesToBeVisible(
// this wait excessively.
++_opsWaitingForOplogVisibilityUpdate;
invariant(_opsWaitingForOplogVisibilityUpdate > 0);
- auto exitGuard = makeGuard([&] { --_opsWaitingForOplogVisibilityUpdate; });
+ ScopeGuard exitGuard([&] { --_opsWaitingForOplogVisibilityUpdate; });
// Out of order writes to the oplog always call triggerOplogVisibilityUpdate() on commit to
// prompt the OplogVisibilityThread to run and update the oplog visibility. We simply need to
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp
index 3ef3a2fce2a..9376542bfa2 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp
@@ -227,8 +227,9 @@ StatusWith<std::string> WiredTigerUtil::getMetadataCreate(OperationContext* opCt
LOGV2_FATAL_NOTRACE(51257, "Cursor not found", "error"_attr = ex);
}
invariant(cursor);
- auto releaser = makeGuard(
- [&] { session->releaseCursor(WiredTigerSession::kMetadataCreateTableId, cursor, ""); });
+ ScopeGuard releaser = [&] {
+ session->releaseCursor(WiredTigerSession::kMetadataCreateTableId, cursor, "");
+ };
return _getMetadata(cursor, uri);
}
@@ -257,8 +258,9 @@ StatusWith<std::string> WiredTigerUtil::getMetadata(OperationContext* opCtx, Str
LOGV2_FATAL_NOTRACE(31293, "Cursor not found", "error"_attr = ex);
}
invariant(cursor);
- auto releaser =
- makeGuard([&] { session->releaseCursor(WiredTigerSession::kMetadataTableId, cursor, ""); });
+ ScopeGuard releaser = [&] {
+ session->releaseCursor(WiredTigerSession::kMetadataTableId, cursor, "");
+ };
return _getMetadata(cursor, uri);
}
diff --git a/src/mongo/db/traffic_reader.cpp b/src/mongo/db/traffic_reader.cpp
index b6de5022d0b..d6b5bc27a8f 100644
--- a/src/mongo/db/traffic_reader.cpp
+++ b/src/mongo/db/traffic_reader.cpp
@@ -217,7 +217,7 @@ BSONArray trafficRecordingFileToBSONArr(const std::string& inputFile) {
str::stream() << "Specified file does not exist (" << inputFile << ")",
inputFd > 0);
- const auto guard = makeGuard([&] { ::close(inputFd); });
+ const ScopeGuard guard([&] { ::close(inputFd); });
auto buf = SharedBuffer::allocate(MaxMessageSizeBytes);
while (auto packet = readPacket(buf.get(), inputFd)) {
diff --git a/src/mongo/db/transaction_participant.cpp b/src/mongo/db/transaction_participant.cpp
index c588ec4d22a..9c018294513 100644
--- a/src/mongo/db/transaction_participant.cpp
+++ b/src/mongo/db/transaction_participant.cpp
@@ -826,7 +826,7 @@ TransactionParticipant::TxnResources::~TxnResources() {
void TransactionParticipant::TxnResources::release(OperationContext* opCtx) {
// Perform operations that can fail the release before marking the TxnResources as released.
- auto onError = makeGuard([&] {
+ ScopeGuard onError([&] {
// Release any locks acquired as part of lock restoration.
if (_lockSnapshot) {
// WUOW should be released before unlocking.
@@ -1002,7 +1002,7 @@ void TransactionParticipant::Participant::_releaseTransactionResourcesToOpCtx(
}
();
- auto releaseOnError = makeGuard([&] {
+ ScopeGuard releaseOnError([&] {
// Restore the lock resources back to transaction participant.
using std::swap;
stdx::lock_guard<Client> lk(*opCtx->getClient());
@@ -1165,7 +1165,7 @@ void TransactionParticipant::Participant::refreshLocksForPreparedTransaction(
Timestamp TransactionParticipant::Participant::prepareTransaction(
OperationContext* opCtx, boost::optional<repl::OpTime> prepareOptime) {
- auto abortGuard = makeGuard([&] {
+ ScopeGuard abortGuard([&] {
// Prepare transaction on secondaries should always succeed.
invariant(!prepareOptime);
@@ -1415,7 +1415,7 @@ void TransactionParticipant::Participant::commitPreparedTransaction(
// Prepared transactions cannot hold the RSTL, or else they will deadlock with state
// transitions. If we do not commit the transaction we must unlock the RSTL explicitly so two
// phase locking doesn't hold onto it.
- auto unlockGuard = makeGuard([&] { invariant(opCtx->lockState()->unlockRSTLforPrepare()); });
+ ScopeGuard unlockGuard([&] { invariant(opCtx->lockState()->unlockRSTLforPrepare()); });
const auto replCoord = repl::ReplicationCoordinator::get(opCtx);
diff --git a/src/mongo/db/transaction_participant_test.cpp b/src/mongo/db/transaction_participant_test.cpp
index ea5f53ca6bb..d7567b1b003 100644
--- a/src/mongo/db/transaction_participant_test.cpp
+++ b/src/mongo/db/transaction_participant_test.cpp
@@ -1310,7 +1310,7 @@ TEST_F(TxnParticipantTest, CannotStartNewTransactionWhilePreparedTransactionInPr
txnParticipant.stashTransactionResources(opCtx());
OperationContextSession::checkIn(opCtx());
{
- auto guard = makeGuard([&]() { OperationContextSession::checkOut(opCtx()); });
+ ScopeGuard guard([&]() { OperationContextSession::checkOut(opCtx()); });
// Try to start a new transaction while there is already a prepared transaction on the
// session. This should fail with a PreparedTransactionInProgress error.
runFunctionFromDifferentOpCtx([lsid = *opCtx()->getLogicalSessionId(),
diff --git a/src/mongo/db/views/view_graph.cpp b/src/mongo/db/views/view_graph.cpp
index def5d50154d..4282a0469a4 100644
--- a/src/mongo/db/views/view_graph.cpp
+++ b/src/mongo/db/views/view_graph.cpp
@@ -65,7 +65,7 @@ Status ViewGraph::insertAndValidate(const ViewDefinition& view,
// If the graph fails validation for any reason, the insert is automatically rolled back on
// exiting this method.
- auto guard = makeGuard([&] { remove(viewNss); });
+ ScopeGuard guard([&] { remove(viewNss); });
// Check for cycles and get the height of the children.
StatsMap statsMap;
diff --git a/src/mongo/dbtests/dbtests.cpp b/src/mongo/dbtests/dbtests.cpp
index c34817f10f4..6f0b0f944db 100644
--- a/src/mongo/dbtests/dbtests.cpp
+++ b/src/mongo/dbtests/dbtests.cpp
@@ -116,7 +116,7 @@ Status createIndexFromSpec(OperationContext* opCtx, StringData ns, const BSONObj
}
MultiIndexBlock indexer;
CollectionWriter collection(coll);
- auto abortOnExit = makeGuard(
+ ScopeGuard abortOnExit(
[&] { indexer.abortIndexBuild(opCtx, collection, MultiIndexBlock::kNoopOnCleanUpFn); });
Status status = indexer
.init(opCtx,
diff --git a/src/mongo/dbtests/indexupdatetests.cpp b/src/mongo/dbtests/indexupdatetests.cpp
index e9989e1c90d..13f405678bd 100644
--- a/src/mongo/dbtests/indexupdatetests.cpp
+++ b/src/mongo/dbtests/indexupdatetests.cpp
@@ -92,7 +92,7 @@ protected:
try {
MultiIndexBlock indexer;
- auto abortOnExit = makeGuard([&] {
+ ScopeGuard abortOnExit([&] {
indexer.abortIndexBuild(_opCtx, collection(), MultiIndexBlock::kNoopOnCleanUpFn);
});
@@ -160,7 +160,7 @@ public:
<< static_cast<int>(kIndexVersion) << "unique" << true
<< "background" << background);
- auto abortOnExit = makeGuard([&] {
+ ScopeGuard abortOnExit([&] {
indexer.abortIndexBuild(_opCtx, collection(), MultiIndexBlock::kNoopOnCleanUpFn);
});
@@ -215,7 +215,7 @@ public:
<< "key" << BSON("a" << 1) << "v"
<< static_cast<int>(kIndexVersion) << "unique" << true
<< "background" << background);
- auto abortOnExit = makeGuard([&] {
+ ScopeGuard abortOnExit([&] {
indexer.abortIndexBuild(_opCtx, collection(), MultiIndexBlock::kNoopOnCleanUpFn);
});
@@ -331,7 +331,7 @@ Status IndexBuildBase::createIndex(const BSONObj& indexSpec) {
Lock::CollectionLock collLk(_opCtx, _nss, MODE_X);
MultiIndexBlock indexer;
- auto abortOnExit = makeGuard(
+ ScopeGuard abortOnExit(
[&] { indexer.abortIndexBuild(_opCtx, collection(), MultiIndexBlock::kNoopOnCleanUpFn); });
Status status =
indexer.init(_opCtx, collection(), indexSpec, MultiIndexBlock::kNoopOnInitFn).getStatus();
diff --git a/src/mongo/dbtests/mock/mock_dbclient_connection.cpp b/src/mongo/dbtests/mock/mock_dbclient_connection.cpp
index e08ab6c84f7..1de5ad73484 100644
--- a/src/mongo/dbtests/mock/mock_dbclient_connection.cpp
+++ b/src/mongo/dbtests/mock/mock_dbclient_connection.cpp
@@ -252,7 +252,7 @@ bool MockDBClientConnection::call(mongo::Message& toSend,
}
}
- auto killSessionOnDisconnect = makeGuard([this] { shutdown(); });
+ ScopeGuard killSessionOnDisconnect([this] { shutdown(); });
stdx::unique_lock lk(_netMutex);
checkConnection();
@@ -279,7 +279,7 @@ bool MockDBClientConnection::call(mongo::Message& toSend,
}
Status MockDBClientConnection::recv(mongo::Message& m, int lastRequestId) {
- auto killSessionOnDisconnect = makeGuard([this] { shutdown(); });
+ ScopeGuard killSessionOnDisconnect([this] { shutdown(); });
stdx::unique_lock lk(_netMutex);
if (!isStillConnected() || !_remoteServer->isRunning()) {
diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp
index 624b9bf3cea..d8628e30b0b 100644
--- a/src/mongo/dbtests/querytests.cpp
+++ b/src/mongo/dbtests/querytests.cpp
@@ -104,7 +104,7 @@ protected:
CollectionWriter collection(&_opCtx, _collection->ns());
MultiIndexBlock indexer;
- auto abortOnExit = makeGuard([&] {
+ ScopeGuard abortOnExit([&] {
indexer.abortIndexBuild(&_opCtx, collection, MultiIndexBlock::kNoopOnCleanUpFn);
});
{
diff --git a/src/mongo/dbtests/storage_timestamp_tests.cpp b/src/mongo/dbtests/storage_timestamp_tests.cpp
index 067ab52d4cd..bcd823446a7 100644
--- a/src/mongo/dbtests/storage_timestamp_tests.cpp
+++ b/src/mongo/dbtests/storage_timestamp_tests.cpp
@@ -275,7 +275,7 @@ public:
// Build an index.
MultiIndexBlock indexer;
- auto abortOnExit = makeGuard(
+ ScopeGuard abortOnExit(
[&] { indexer.abortIndexBuild(_opCtx, coll, MultiIndexBlock::kNoopOnCleanUpFn); });
BSONObj indexInfoObj;
@@ -2108,7 +2108,7 @@ public:
// Build an index on `{a: 1}`. This index will be multikey.
MultiIndexBlock indexer;
- auto abortOnExit = makeGuard(
+ ScopeGuard abortOnExit(
[&] { indexer.abortIndexBuild(_opCtx, coll, MultiIndexBlock::kNoopOnCleanUpFn); });
const LogicalTime beforeIndexBuild = _clock->tickClusterTime(2);
BSONObj indexInfoObj;
@@ -2222,7 +2222,7 @@ public:
// Build an index on `{a: 1}`.
MultiIndexBlock indexer;
- auto abortOnExit = makeGuard([&] {
+ ScopeGuard abortOnExit([&] {
indexer.abortIndexBuild(_opCtx, collection, MultiIndexBlock::kNoopOnCleanUpFn);
});
const LogicalTime beforeIndexBuild = _clock->tickClusterTime(2);
@@ -2963,7 +2963,7 @@ public:
const IndexCatalogEntry* buildingIndex = nullptr;
MultiIndexBlock indexer;
- auto abortOnExit = makeGuard([&] {
+ ScopeGuard abortOnExit([&] {
indexer.abortIndexBuild(_opCtx, collection, MultiIndexBlock::kNoopOnCleanUpFn);
});
@@ -3114,7 +3114,7 @@ public:
auto taskFuture = task.get_future();
stdx::thread taskThread{std::move(task)};
- auto joinGuard = makeGuard([&] {
+ ScopeGuard joinGuard([&] {
batchInProgress.promise.emplaceValue(false);
taskThread.join();
});
diff --git a/src/mongo/dbtests/validate_tests.cpp b/src/mongo/dbtests/validate_tests.cpp
index f474b1ac558..cd589503c0e 100644
--- a/src/mongo/dbtests/validate_tests.cpp
+++ b/src/mongo/dbtests/validate_tests.cpp
@@ -145,7 +145,7 @@ protected:
void ensureValidateWorked() {
ValidateResults results = runValidate();
- auto dumpOnErrorGuard = makeGuard([&] {
+ ScopeGuard dumpOnErrorGuard([&] {
StorageDebugUtil::printValidateResults(results);
StorageDebugUtil::printCollectionAndIndexTableEntries(&_opCtx, _nss);
});
@@ -158,7 +158,7 @@ protected:
void ensureValidateFailed() {
ValidateResults results = runValidate();
- auto dumpOnErrorGuard = makeGuard([&] {
+ ScopeGuard dumpOnErrorGuard([&] {
StorageDebugUtil::printValidateResults(results);
StorageDebugUtil::printCollectionAndIndexTableEntries(&_opCtx, _nss);
});
@@ -1180,7 +1180,7 @@ public:
&output,
kTurnOnExtraLoggingForTest));
- auto dumpOnErrorGuard = makeGuard([&] {
+ ScopeGuard dumpOnErrorGuard([&] {
StorageDebugUtil::printValidateResults(results);
StorageDebugUtil::printCollectionAndIndexTableEntries(&_opCtx, coll->ns());
});
@@ -1298,7 +1298,7 @@ public:
&output,
kTurnOnExtraLoggingForTest));
- auto dumpOnErrorGuard = makeGuard([&] {
+ ScopeGuard dumpOnErrorGuard([&] {
StorageDebugUtil::printValidateResults(results);
StorageDebugUtil::printCollectionAndIndexTableEntries(&_opCtx, coll->ns());
});
@@ -1387,7 +1387,7 @@ public:
&output,
kTurnOnExtraLoggingForTest));
- auto dumpOnErrorGuard = makeGuard([&] {
+ ScopeGuard dumpOnErrorGuard([&] {
StorageDebugUtil::printValidateResults(results);
StorageDebugUtil::printCollectionAndIndexTableEntries(&_opCtx, coll->ns());
});
@@ -1499,7 +1499,7 @@ public:
&output,
kTurnOnExtraLoggingForTest));
- auto dumpOnErrorGuard = makeGuard([&] {
+ ScopeGuard dumpOnErrorGuard([&] {
StorageDebugUtil::printValidateResults(results);
StorageDebugUtil::printCollectionAndIndexTableEntries(&_opCtx, coll->ns());
});
@@ -1528,7 +1528,7 @@ public:
&output,
kTurnOnExtraLoggingForTest));
- auto dumpOnErrorGuard = makeGuard([&] {
+ ScopeGuard dumpOnErrorGuard([&] {
StorageDebugUtil::printValidateResults(results);
StorageDebugUtil::printCollectionAndIndexTableEntries(&_opCtx, coll->ns());
});
@@ -1564,7 +1564,7 @@ public:
&output,
kTurnOnExtraLoggingForTest));
- auto dumpOnErrorGuard = makeGuard([&] {
+ ScopeGuard dumpOnErrorGuard([&] {
StorageDebugUtil::printValidateResults(results);
StorageDebugUtil::printCollectionAndIndexTableEntries(&_opCtx, coll->ns());
});
@@ -1683,7 +1683,7 @@ public:
&output,
kTurnOnExtraLoggingForTest));
- auto dumpOnErrorGuard = makeGuard([&] {
+ ScopeGuard dumpOnErrorGuard([&] {
StorageDebugUtil::printValidateResults(results);
StorageDebugUtil::printCollectionAndIndexTableEntries(&_opCtx, coll->ns());
});
@@ -1714,7 +1714,7 @@ public:
&output,
kTurnOnExtraLoggingForTest));
- auto dumpOnErrorGuard = makeGuard([&] {
+ ScopeGuard dumpOnErrorGuard([&] {
StorageDebugUtil::printValidateResults(results);
StorageDebugUtil::printCollectionAndIndexTableEntries(&_opCtx, coll->ns());
});
@@ -1746,7 +1746,7 @@ public:
&output,
kTurnOnExtraLoggingForTest));
- auto dumpOnErrorGuard = makeGuard([&] {
+ ScopeGuard dumpOnErrorGuard([&] {
StorageDebugUtil::printValidateResults(results);
StorageDebugUtil::printCollectionAndIndexTableEntries(&_opCtx, coll->ns());
});
@@ -1836,7 +1836,7 @@ public:
&output,
kTurnOnExtraLoggingForTest));
- auto dumpOnErrorGuard = makeGuard([&] {
+ ScopeGuard dumpOnErrorGuard([&] {
StorageDebugUtil::printValidateResults(results);
StorageDebugUtil::printCollectionAndIndexTableEntries(&_opCtx, coll->ns());
});
@@ -1867,7 +1867,7 @@ public:
&output,
kTurnOnExtraLoggingForTest));
- auto dumpOnErrorGuard = makeGuard([&] {
+ ScopeGuard dumpOnErrorGuard([&] {
StorageDebugUtil::printValidateResults(results);
StorageDebugUtil::printCollectionAndIndexTableEntries(&_opCtx, coll->ns());
});
@@ -1898,7 +1898,7 @@ public:
&output,
kTurnOnExtraLoggingForTest));
- auto dumpOnErrorGuard = makeGuard([&] {
+ ScopeGuard dumpOnErrorGuard([&] {
StorageDebugUtil::printValidateResults(results);
StorageDebugUtil::printCollectionAndIndexTableEntries(&_opCtx, coll->ns());
});
@@ -2089,7 +2089,7 @@ public:
&output,
kTurnOnExtraLoggingForTest));
- auto dumpOnErrorGuard = makeGuard([&] {
+ ScopeGuard dumpOnErrorGuard([&] {
StorageDebugUtil::printValidateResults(results);
StorageDebugUtil::printCollectionAndIndexTableEntries(&_opCtx, coll->ns());
});
@@ -2119,7 +2119,7 @@ public:
&output,
kTurnOnExtraLoggingForTest));
- auto dumpOnErrorGuard = makeGuard([&] {
+ ScopeGuard dumpOnErrorGuard([&] {
StorageDebugUtil::printValidateResults(results);
StorageDebugUtil::printCollectionAndIndexTableEntries(&_opCtx, coll->ns());
});
@@ -2240,7 +2240,7 @@ public:
&output,
kTurnOnExtraLoggingForTest));
- auto dumpOnErrorGuard = makeGuard([&] {
+ ScopeGuard dumpOnErrorGuard([&] {
StorageDebugUtil::printValidateResults(results);
StorageDebugUtil::printCollectionAndIndexTableEntries(&_opCtx, coll->ns());
});
@@ -2271,7 +2271,7 @@ public:
&output,
kTurnOnExtraLoggingForTest));
- auto dumpOnErrorGuard = makeGuard([&] {
+ ScopeGuard dumpOnErrorGuard([&] {
StorageDebugUtil::printValidateResults(results);
StorageDebugUtil::printCollectionAndIndexTableEntries(&_opCtx, coll->ns());
});
@@ -2303,7 +2303,7 @@ public:
&output,
kTurnOnExtraLoggingForTest));
- auto dumpOnErrorGuard = makeGuard([&] {
+ ScopeGuard dumpOnErrorGuard([&] {
StorageDebugUtil::printValidateResults(results);
StorageDebugUtil::printCollectionAndIndexTableEntries(&_opCtx, coll->ns());
});
@@ -2656,7 +2656,7 @@ public:
ValidateResults results = runValidate();
- auto dumpOnErrorGuard = makeGuard([&] {
+ ScopeGuard dumpOnErrorGuard([&] {
StorageDebugUtil::printValidateResults(results);
StorageDebugUtil::printCollectionAndIndexTableEntries(&_opCtx, coll->ns());
});
@@ -2721,7 +2721,7 @@ public:
&output,
kTurnOnExtraLoggingForTest));
- auto dumpOnErrorGuard = makeGuard([&] {
+ ScopeGuard dumpOnErrorGuard([&] {
StorageDebugUtil::printValidateResults(results);
StorageDebugUtil::printCollectionAndIndexTableEntries(&_opCtx, coll->ns());
});
@@ -2789,7 +2789,7 @@ public:
&output,
kTurnOnExtraLoggingForTest));
- auto dumpOnErrorGuard = makeGuard([&] {
+ ScopeGuard dumpOnErrorGuard([&] {
StorageDebugUtil::printValidateResults(results);
StorageDebugUtil::printCollectionAndIndexTableEntries(&_opCtx, coll->ns());
});
@@ -2820,7 +2820,7 @@ public:
&output,
kTurnOnExtraLoggingForTest));
- auto dumpOnErrorGuard = makeGuard([&] {
+ ScopeGuard dumpOnErrorGuard([&] {
StorageDebugUtil::printValidateResults(results);
StorageDebugUtil::printCollectionAndIndexTableEntries(&_opCtx, coll->ns());
});
@@ -2855,7 +2855,7 @@ public:
&output,
kTurnOnExtraLoggingForTest));
- auto dumpOnErrorGuard = makeGuard([&] {
+ ScopeGuard dumpOnErrorGuard([&] {
StorageDebugUtil::printValidateResults(results);
StorageDebugUtil::printCollectionAndIndexTableEntries(&_opCtx, coll->ns());
});
@@ -2886,7 +2886,7 @@ public:
&output,
kTurnOnExtraLoggingForTest));
- auto dumpOnErrorGuard = makeGuard([&] {
+ ScopeGuard dumpOnErrorGuard([&] {
StorageDebugUtil::printValidateResults(results);
StorageDebugUtil::printCollectionAndIndexTableEntries(&_opCtx, coll->ns());
});
@@ -3055,7 +3055,7 @@ public:
&output,
kTurnOnExtraLoggingForTest));
- auto dumpOnErrorGuard = makeGuard([&] {
+ ScopeGuard dumpOnErrorGuard([&] {
StorageDebugUtil::printValidateResults(results);
StorageDebugUtil::printCollectionAndIndexTableEntries(&_opCtx, coll->ns());
});
@@ -3084,7 +3084,7 @@ public:
&output,
kTurnOnExtraLoggingForTest));
- auto dumpOnErrorGuard = makeGuard([&] {
+ ScopeGuard dumpOnErrorGuard([&] {
StorageDebugUtil::printValidateResults(results);
StorageDebugUtil::printCollectionAndIndexTableEntries(&_opCtx, coll->ns());
});
@@ -3114,7 +3114,7 @@ public:
&output,
kTurnOnExtraLoggingForTest));
- auto dumpOnErrorGuard = makeGuard([&] {
+ ScopeGuard dumpOnErrorGuard([&] {
StorageDebugUtil::printValidateResults(results);
StorageDebugUtil::printCollectionAndIndexTableEntries(&_opCtx, coll->ns());
});
@@ -3262,7 +3262,7 @@ public:
&output,
kTurnOnExtraLoggingForTest));
- auto dumpOnErrorGuard = makeGuard([&] {
+ ScopeGuard dumpOnErrorGuard([&] {
StorageDebugUtil::printValidateResults(results);
StorageDebugUtil::printCollectionAndIndexTableEntries(&_opCtx, coll->ns());
});
@@ -3291,7 +3291,7 @@ public:
&output,
kTurnOnExtraLoggingForTest));
- auto dumpOnErrorGuard = makeGuard([&] {
+ ScopeGuard dumpOnErrorGuard([&] {
StorageDebugUtil::printValidateResults(results);
StorageDebugUtil::printCollectionAndIndexTableEntries(&_opCtx, coll->ns());
});
@@ -3320,7 +3320,7 @@ public:
&output,
kTurnOnExtraLoggingForTest));
- auto dumpOnErrorGuard = makeGuard([&] {
+ ScopeGuard dumpOnErrorGuard([&] {
StorageDebugUtil::printValidateResults(results);
StorageDebugUtil::printCollectionAndIndexTableEntries(&_opCtx, coll->ns());
});
@@ -3425,7 +3425,7 @@ public:
&output,
kTurnOnExtraLoggingForTest));
- auto dumpOnErrorGuard = makeGuard([&] {
+ ScopeGuard dumpOnErrorGuard([&] {
StorageDebugUtil::printValidateResults(results);
StorageDebugUtil::printCollectionAndIndexTableEntries(&_opCtx, coll->ns());
});
@@ -3458,7 +3458,7 @@ public:
&output,
kTurnOnExtraLoggingForTest));
- auto dumpOnErrorGuard = makeGuard([&] {
+ ScopeGuard dumpOnErrorGuard([&] {
StorageDebugUtil::printValidateResults(results);
StorageDebugUtil::printCollectionAndIndexTableEntries(&_opCtx, coll->ns());
});
@@ -3522,7 +3522,7 @@ public:
&output,
kTurnOnExtraLoggingForTest));
- auto dumpOnErrorGuard = makeGuard([&] {
+ ScopeGuard dumpOnErrorGuard([&] {
StorageDebugUtil::printValidateResults(results);
StorageDebugUtil::printCollectionAndIndexTableEntries(&_opCtx, coll->ns());
});
@@ -3622,7 +3622,7 @@ public:
&output,
kTurnOnExtraLoggingForTest));
- auto dumpOnErrorGuard = makeGuard([&] {
+ ScopeGuard dumpOnErrorGuard([&] {
StorageDebugUtil::printValidateResults(results);
StorageDebugUtil::printCollectionAndIndexTableEntries(&_opCtx, coll->ns());
});
@@ -3719,7 +3719,7 @@ public:
&output,
kTurnOnExtraLoggingForTest));
- auto dumpOnErrorGuard = makeGuard([&] {
+ ScopeGuard dumpOnErrorGuard([&] {
StorageDebugUtil::printValidateResults(results);
StorageDebugUtil::printCollectionAndIndexTableEntries(&_opCtx, coll->ns());
});
@@ -3748,7 +3748,7 @@ public:
&output,
kTurnOnExtraLoggingForTest));
- auto dumpOnErrorGuard = makeGuard([&] {
+ ScopeGuard dumpOnErrorGuard([&] {
StorageDebugUtil::printValidateResults(results);
StorageDebugUtil::printCollectionAndIndexTableEntries(&_opCtx, coll->ns());
});
diff --git a/src/mongo/dbtests/wildcard_multikey_persistence_test.cpp b/src/mongo/dbtests/wildcard_multikey_persistence_test.cpp
index 89595a0c762..112a0205348 100644
--- a/src/mongo/dbtests/wildcard_multikey_persistence_test.cpp
+++ b/src/mongo/dbtests/wildcard_multikey_persistence_test.cpp
@@ -206,7 +206,7 @@ protected:
CollectionWriter coll(autoColl);
MultiIndexBlock indexer;
- auto abortOnExit = makeGuard(
+ ScopeGuard abortOnExit(
[&] { indexer.abortIndexBuild(opCtx(), coll, MultiIndexBlock::kNoopOnCleanUpFn); });
// Initialize the index builder and add all documents currently in the collection.
diff --git a/src/mongo/embedded/embedded.cpp b/src/mongo/embedded/embedded.cpp
index c08fde3c60a..c6692fbee6b 100644
--- a/src/mongo/embedded/embedded.cpp
+++ b/src/mongo/embedded/embedded.cpp
@@ -195,12 +195,12 @@ ServiceContext* initialize(const char* yaml_config) {
Status status = mongo::runGlobalInitializers(std::vector<std::string>{});
uassertStatusOKWithContext(status, "Global initilization failed");
- auto giGuard = makeGuard([] { mongo::runGlobalDeinitializers().ignore(); });
+ ScopeGuard giGuard([] { mongo::runGlobalDeinitializers().ignore(); });
setGlobalServiceContext(ServiceContext::make());
Client::initThread("initandlisten");
// Make sure current thread have no client set in thread_local when we leave this function
- auto clientGuard = makeGuard([] { Client::releaseCurrent(); });
+ ScopeGuard clientGuard([] { Client::releaseCurrent(); });
auto serviceContext = getGlobalServiceContext();
serviceContext->setServiceEntryPoint(std::make_unique<ServiceEntryPointEmbedded>());
diff --git a/src/mongo/embedded/mongo_embedded/mongo_embedded_test.cpp b/src/mongo/embedded/mongo_embedded/mongo_embedded_test.cpp
index 955fbf62fff..3785a11754e 100644
--- a/src/mongo/embedded/mongo_embedded/mongo_embedded_test.cpp
+++ b/src/mongo/embedded/mongo_embedded/mongo_embedded_test.cpp
@@ -387,7 +387,7 @@ TEST_F(MongodbCAPITest, KillOp) {
ASSERT(outputBSON.getField("ok").numberDouble() == 1.0);
});
- auto guard = mongo::makeGuard([&] { killOpThread.join(); });
+ mongo::ScopeGuard guard = [&] { killOpThread.join(); };
mongo::BSONObj sleepObj = mongo::fromjson("{'sleep': {'secs': 1000}}");
auto sleepOpMsg = mongo::OpMsgRequest::fromDBAndBody("admin", sleepObj);
diff --git a/src/mongo/embedded/stitch_support/stitch_support_test.cpp b/src/mongo/embedded/stitch_support/stitch_support_test.cpp
index d8984f43367..0b2807c9d02 100644
--- a/src/mongo/embedded/stitch_support/stitch_support_test.cpp
+++ b/src/mongo/embedded/stitch_support/stitch_support_test.cpp
@@ -41,7 +41,6 @@
namespace {
-using mongo::makeGuard;
using mongo::ScopeGuard;
class StitchSupportTest : public mongo::unittest::Test {
diff --git a/src/mongo/executor/connection_pool_test.cpp b/src/mongo/executor/connection_pool_test.cpp
index b08f8d3900a..9ac648e0bad 100644
--- a/src/mongo/executor/connection_pool_test.cpp
+++ b/src/mongo/executor/connection_pool_test.cpp
@@ -157,7 +157,7 @@ TEST_F(ConnectionPoolTest, ConnectionsAreAcquiredInMRUOrder) {
// to the pool by destroying the 'connections' vector. Otherwise,
// this test would cause an invariant failure instead of a normal
// test failure if it fails, which would be confusing.
- const auto guard = makeGuard([&] {
+ const ScopeGuard guard([&] {
while (!connections.empty()) {
try {
ConnectionPool::ConnectionHandle conn = std::move(connections.back());
@@ -233,7 +233,7 @@ TEST_F(ConnectionPoolTest, ConnectionsNotUsedRecentlyArePurged) {
// to the pool by destroying the 'connections' vector. Otherwise,
// this test would cause an invariant failure instead of a normal
// test failure if it fails, which would be confusing.
- const auto guard = makeGuard([&] {
+ const ScopeGuard guard([&] {
while (!connections.empty()) {
try {
ConnectionPool::ConnectionHandle conn = std::move(connections.back());
diff --git a/src/mongo/executor/network_interface_perf_test.cpp b/src/mongo/executor/network_interface_perf_test.cpp
index d8ece73bdd2..ca9c05bf3d4 100644
--- a/src/mongo/executor/network_interface_perf_test.cpp
+++ b/src/mongo/executor/network_interface_perf_test.cpp
@@ -60,7 +60,7 @@ const std::size_t numOperations = 16384;
int timeNetworkTestMillis(std::size_t operations, NetworkInterface* net) {
net->startup();
- auto guard = makeGuard([&] { net->shutdown(); });
+ ScopeGuard guard([&] { net->shutdown(); });
auto fixture = unittest::getFixtureConnectionString();
auto server = fixture.getServers()[0];
diff --git a/src/mongo/executor/network_interface_thread_pool.cpp b/src/mongo/executor/network_interface_thread_pool.cpp
index f9b3faa899d..bc03820a173 100644
--- a/src/mongo/executor/network_interface_thread_pool.cpp
+++ b/src/mongo/executor/network_interface_thread_pool.cpp
@@ -149,7 +149,7 @@ void NetworkInterfaceThreadPool::_consumeTasks(stdx::unique_lock<Latch> lk) {
void NetworkInterfaceThreadPool::_consumeTasksInline(stdx::unique_lock<Latch> lk) noexcept {
_consumeState = ConsumeState::kConsuming;
- const auto consumingTasksGuard = makeGuard([&] { _consumeState = ConsumeState::kNeutral; });
+ const ScopeGuard consumingTasksGuard([&] { _consumeState = ConsumeState::kNeutral; });
decltype(_tasks) tasks;
@@ -158,7 +158,7 @@ void NetworkInterfaceThreadPool::_consumeTasksInline(stdx::unique_lock<Latch> lk
swap(tasks, _tasks);
lk.unlock();
- const auto lkGuard = makeGuard([&] { lk.lock(); });
+ const ScopeGuard lkGuard([&] { lk.lock(); });
for (auto&& task : tasks) {
task(Status::OK());
diff --git a/src/mongo/installer/msi/ca/CustomAction.cpp b/src/mongo/installer/msi/ca/CustomAction.cpp
index b5a5dd1c15c..466ab326a95 100644
--- a/src/mongo/installer/msi/ca/CustomAction.cpp
+++ b/src/mongo/installer/msi/ca/CustomAction.cpp
@@ -294,7 +294,7 @@ extern "C" UINT __stdcall UpdateMongoYAML(MSIHANDLE hInstall) {
CHECKGLE_AND_LOG("Failed to open yaml file");
}
- const auto handleGuard = mongo::makeGuard([&] { CloseHandle(hFile); });
+ const mongo::ScopeGuard handleGuard = [&] { CloseHandle(hFile); };
LARGE_INTEGER fileSize;
if (GetFileSizeEx(hFile, &fileSize) == 0) {
diff --git a/src/mongo/logv2/log_manager.cpp b/src/mongo/logv2/log_manager.cpp
index f9841d7fd79..8cc4dba7187 100644
--- a/src/mongo/logv2/log_manager.cpp
+++ b/src/mongo/logv2/log_manager.cpp
@@ -55,7 +55,7 @@ LogManager::LogManager() {
boost::log::core::get()->set_exception_handler([]() {
thread_local uint32_t depth = 0;
- auto depthGuard = makeGuard([]() { --depth; });
+ ScopeGuard depthGuard([]() { --depth; });
++depth;
// Try and log that we failed to log
if (depth == 1) {
diff --git a/src/mongo/logv2/logv2_test.cpp b/src/mongo/logv2/logv2_test.cpp
index 9eca2ecbe16..fed5d05f2d1 100644
--- a/src/mongo/logv2/logv2_test.cpp
+++ b/src/mongo/logv2/logv2_test.cpp
@@ -198,7 +198,7 @@ public:
sink->set_formatter(PlainFormatter());
boost::log::core::get()->add_sink(sink);
- auto enabledGuard = makeGuard([this] { enabled = false; });
+ ScopeGuard enabledGuard([this] { enabled = false; });
LOGV2(20001, "log during init");
ASSERT_EQUALS(lines.back(), "log during init");
}
diff --git a/src/mongo/rpc/metadata/client_metadata_test.cpp b/src/mongo/rpc/metadata/client_metadata_test.cpp
index 6dbaace6281..e6d414c9336 100644
--- a/src/mongo/rpc/metadata/client_metadata_test.cpp
+++ b/src/mongo/rpc/metadata/client_metadata_test.cpp
@@ -260,7 +260,7 @@ TEST(ClientMetadataTest, TestNegativeWrongTypes) {
// Negative: document larger than 512 bytes
TEST(ClientMetadataTest, TestNegativeLargeDocument) {
bool savedMongos = isMongos();
- auto unsetMongoS = makeGuard([&] { setMongos(savedMongos); });
+ ScopeGuard unsetMongoS([&] { setMongos(savedMongos); });
setMongos(true);
{
diff --git a/src/mongo/s/commands/strategy.cpp b/src/mongo/s/commands/strategy.cpp
index c512ccc9867..595c90f49a4 100644
--- a/src/mongo/s/commands/strategy.cpp
+++ b/src/mongo/s/commands/strategy.cpp
@@ -914,7 +914,7 @@ void ParseAndRunCommand::RunAndRetry::_checkRetryForTransaction(Status& status)
if (!txnRouter)
return;
- auto abortGuard = makeGuard([&] { txnRouter.implicitlyAbortTransaction(opCtx, status); });
+ ScopeGuard abortGuard([&] { txnRouter.implicitlyAbortTransaction(opCtx, status); });
if (!_canRetry()) {
addContextForTransactionAbortingError(
diff --git a/src/mongo/s/query/cluster_find.cpp b/src/mongo/s/query/cluster_find.cpp
index 180b0b32449..d90fd43ba1d 100644
--- a/src/mongo/s/query/cluster_find.cpp
+++ b/src/mongo/s/query/cluster_find.cpp
@@ -660,7 +660,7 @@ void validateTxnNumber(OperationContext* opCtx,
void validateOperationSessionInfo(OperationContext* opCtx,
int64_t cursorId,
ClusterCursorManager::PinnedCursor* cursor) {
- auto returnCursorGuard = makeGuard(
+ ScopeGuard returnCursorGuard(
[cursor] { cursor->returnCursor(ClusterCursorManager::CursorState::NotExhausted); });
validateLSID(opCtx, cursorId, *cursor);
validateTxnNumber(opCtx, cursorId, *cursor);
diff --git a/src/mongo/s/transaction_router.cpp b/src/mongo/s/transaction_router.cpp
index 64742e972c7..32bdb81b368 100644
--- a/src/mongo/s/transaction_router.cpp
+++ b/src/mongo/s/transaction_router.cpp
@@ -1221,7 +1221,7 @@ BSONObj TransactionRouter::Router::abortTransaction(OperationContext* opCtx) {
// Update stats on scope exit so the transaction is considered "active" while waiting on abort
// responses.
- auto updateStatsGuard = makeGuard([&] { _onExplicitAbort(opCtx); });
+ ScopeGuard updateStatsGuard([&] { _onExplicitAbort(opCtx); });
// The router has yet to send any commands to a remote shard for this transaction.
// Return the same error that would have been returned by a shard.
@@ -1297,7 +1297,7 @@ void TransactionRouter::Router::implicitlyAbortTransaction(OperationContext* opC
// Update stats on scope exit so the transaction is considered "active" while waiting on abort
// responses.
- auto updateStatsGuard = makeGuard([&] { _onImplicitAbort(opCtx, status); });
+ ScopeGuard updateStatsGuard([&] { _onImplicitAbort(opCtx, status); });
if (o().participants.empty()) {
return;
diff --git a/src/mongo/scripting/mozjs/implscope.cpp b/src/mongo/scripting/mozjs/implscope.cpp
index 097ecd1304a..7cf6e8c28c8 100644
--- a/src/mongo/scripting/mozjs/implscope.cpp
+++ b/src/mongo/scripting/mozjs/implscope.cpp
@@ -201,7 +201,7 @@ bool MozJSImplScope::_interruptCallback(JSContext* cx) {
auto scope = getScope(cx);
JS_DisableInterruptCallback(scope->_context);
- auto guard = makeGuard([&]() { JS_ResetInterruptCallback(scope->_context, false); });
+ ScopeGuard guard([&]() { JS_ResetInterruptCallback(scope->_context, false); });
if (scope->_pendingGC.load() || closeToMaxMemory()) {
scope->_pendingGC.store(false);
@@ -712,7 +712,7 @@ int MozJSImplScope::invoke(ScriptingFunction func,
JS::RootedValue out(_context);
{
- auto guard = makeGuard([&] { _engine->getDeadlineMonitor().stopDeadline(this); });
+ ScopeGuard guard([&] { _engine->getDeadlineMonitor().stopDeadline(this); });
JS::RootedObject obj(_context, smrecv.toObjectOrNull());
@@ -766,7 +766,7 @@ bool MozJSImplScope::exec(StringData code,
JS::RootedValue out(_context);
{
- auto guard = makeGuard([&] { _engine->getDeadlineMonitor().stopDeadline(this); });
+ ScopeGuard guard([&] { _engine->getDeadlineMonitor().stopDeadline(this); });
success = JS_ExecuteScript(_context, script, &out);
diff --git a/src/mongo/scripting/mozjs/proxyscope.cpp b/src/mongo/scripting/mozjs/proxyscope.cpp
index a474eb1a719..4002704e2d6 100644
--- a/src/mongo/scripting/mozjs/proxyscope.cpp
+++ b/src/mongo/scripting/mozjs/proxyscope.cpp
@@ -355,7 +355,7 @@ void MozJSProxyScope::implThread(MozJSProxyScope* proxy) {
// This is mostly to silence coverity, so that it sees that the
// ProxyScope doesn't hold a reference to the ImplScope after it
// is deleted by the unique_ptr.
- const auto unbindImplScope = makeGuard([&proxy] { proxy->_implScope = nullptr; });
+ const ScopeGuard unbindImplScope([&proxy] { proxy->_implScope = nullptr; });
while (true) {
stdx::unique_lock<Latch> lk(proxy->_mutex);
@@ -371,7 +371,7 @@ void MozJSProxyScope::implThread(MozJSProxyScope* proxy) {
try {
lk.unlock();
- const auto unlockGuard = makeGuard([&] { lk.lock(); });
+ const ScopeGuard unlockGuard([&] { lk.lock(); });
proxy->_function();
} catch (...) {
proxy->_status = exceptionToStatus();
diff --git a/src/mongo/shell/encrypted_dbclient_base.cpp b/src/mongo/shell/encrypted_dbclient_base.cpp
index c547e51df59..d843b095aef 100644
--- a/src/mongo/shell/encrypted_dbclient_base.cpp
+++ b/src/mongo/shell/encrypted_dbclient_base.cpp
@@ -120,7 +120,7 @@ BSONObj EncryptedDBClientBase::encryptDecryptCommand(const BSONObj& object,
// decrypt payload throw an exception, the stack's destructor will fire. Because a stack's
// variables are not guaranteed to be destroyed in any order, we need to add a guard
// to ensure the stack is destroyed in order.
- const auto frameStackGuard = makeGuard([&] {
+ const ScopeGuard frameStackGuard([&] {
while (!frameStack.empty()) {
frameStack.pop();
}
diff --git a/src/mongo/shell/mongo_main.cpp b/src/mongo/shell/mongo_main.cpp
index 6d95fca1ec7..811f896535c 100644
--- a/src/mongo/shell/mongo_main.cpp
+++ b/src/mongo/shell/mongo_main.cpp
@@ -837,7 +837,7 @@ int mongo_main(int argc, char* argv[]) {
mongo::getGlobalScriptEngine()->enableJavaScriptProtection(
shellGlobalParams.javascriptProtection);
- auto poolGuard = makeGuard([] { ScriptEngine::dropScopeCache(); });
+ ScopeGuard poolGuard([] { ScriptEngine::dropScopeCache(); });
std::unique_ptr<mongo::Scope> scope(mongo::getGlobalScriptEngine()->newScope());
shellMainScope = scope.get();
diff --git a/src/mongo/tools/bridge.cpp b/src/mongo/tools/bridge.cpp
index d78db632536..3ba0fe41d3c 100644
--- a/src/mongo/tools/bridge.cpp
+++ b/src/mongo/tools/bridge.cpp
@@ -269,7 +269,7 @@ Future<DbResponse> ServiceEntryPointBridge::handleRequest(OperationContext* opCt
// If the bridge decides to return something else other than a response from an active exhaust
// stream, make sure we close the exhaust stream properly.
- auto earlyExhaustExitGuard = makeGuard([&] {
+ ScopeGuard earlyExhaustExitGuard([&] {
if (dest.inExhaust()) {
LOGV2(4622301, "mongobridge shutting down exhaust stream", "remote"_attr = dest);
dest.setInExhaust(false);
diff --git a/src/mongo/transport/baton_asio_linux.h b/src/mongo/transport/baton_asio_linux.h
index becc205cd2b..19cebe3ae3a 100644
--- a/src/mongo/transport/baton_asio_linux.h
+++ b/src/mongo/transport/baton_asio_linux.h
@@ -289,7 +289,7 @@ public:
std::vector<Promise<void>> toFulfill;
// We'll fulfill promises and run jobs on the way out, ensuring we don't hold any locks
- const auto guard = makeGuard([&] {
+ const ScopeGuard guard([&] {
for (auto& promise : toFulfill) {
promise.emplaceValue();
}
diff --git a/src/mongo/transport/service_executor_reserved.cpp b/src/mongo/transport/service_executor_reserved.cpp
index 4a086efb01d..e8745d9e5f7 100644
--- a/src/mongo/transport/service_executor_reserved.cpp
+++ b/src/mongo/transport/service_executor_reserved.cpp
@@ -100,7 +100,7 @@ Status ServiceExecutorReserved::_startWorker() {
return launchServiceWorkerThread([this] {
stdx::unique_lock<Latch> lk(_mutex);
_numRunningWorkerThreads.addAndFetch(1);
- auto numRunningGuard = makeGuard([&] {
+ ScopeGuard numRunningGuard([&] {
_numRunningWorkerThreads.subtractAndFetch(1);
_shutdownCondition.notify_one();
});
diff --git a/src/mongo/transport/service_executor_test.cpp b/src/mongo/transport/service_executor_test.cpp
index 1b93c7d8280..303063d22b1 100644
--- a/src/mongo/transport/service_executor_test.cpp
+++ b/src/mongo/transport/service_executor_test.cpp
@@ -277,7 +277,7 @@ TEST_F(ServiceExecutorFixedTest, ScheduleSucceedsBeforeShutdown) {
handle.start();
stdx::thread scheduleClient;
- auto joinGuard = makeGuard([&] { scheduleClient.join(); });
+ ScopeGuard joinGuard([&] { scheduleClient.join(); });
{
FailPointEnableBlock failpoint("hangBeforeSchedulingServiceExecutorFixedTask");
diff --git a/src/mongo/transport/service_executor_utils.cpp b/src/mongo/transport/service_executor_utils.cpp
index 13cba5d994d..f4382074734 100644
--- a/src/mongo/transport/service_executor_utils.cpp
+++ b/src/mongo/transport/service_executor_utils.cpp
@@ -75,7 +75,7 @@ Status launchServiceWorkerThread(unique_function<void()> task) noexcept {
#else
pthread_attr_t attrs;
pthread_attr_init(&attrs);
- auto attrsGuard = makeGuard([&attrs] { pthread_attr_destroy(&attrs); });
+ ScopeGuard attrsGuard([&attrs] { pthread_attr_destroy(&attrs); });
pthread_attr_setdetachstate(&attrs, PTHREAD_CREATE_DETACHED);
static const rlim_t kStackSize =
diff --git a/src/mongo/transport/transport_layer_asio_integration_test.cpp b/src/mongo/transport/transport_layer_asio_integration_test.cpp
index 2b8fea1f8f5..9f6eafa21ad 100644
--- a/src/mongo/transport/transport_layer_asio_integration_test.cpp
+++ b/src/mongo/transport/transport_layer_asio_integration_test.cpp
@@ -110,7 +110,7 @@ TEST(TransportLayerASIO, ShortReadsAndWritesWork) {
auto reactor = sc->getTransportLayer()->getReactor(transport::TransportLayer::kNewReactor);
stdx::thread thread([&] { reactor->run(); });
- const auto threadGuard = makeGuard([&] {
+ const ScopeGuard threadGuard([&] {
reactor->stop();
thread.join();
});
@@ -143,7 +143,7 @@ TEST(TransportLayerASIO, asyncConnectTimeoutCleansUpSocket) {
stdx::thread thread([&] { reactor->run(); });
- const auto threadGuard = makeGuard([&] {
+ const ScopeGuard threadGuard([&] {
reactor->stop();
thread.join();
});
@@ -163,7 +163,7 @@ TEST(TransportLayerASIO, exhaustIsMasterShouldReceiveMultipleReplies) {
auto reactor = sc->getTransportLayer()->getReactor(transport::TransportLayer::kNewReactor);
stdx::thread thread([&] { reactor->run(); });
- const auto threadGuard = makeGuard([&] {
+ const ScopeGuard threadGuard([&] {
reactor->stop();
thread.join();
});
@@ -245,7 +245,7 @@ TEST(TransportLayerASIO, exhaustIsMasterShouldStopOnFailure) {
auto reactor = sc->getTransportLayer()->getReactor(transport::TransportLayer::kNewReactor);
stdx::thread thread([&] { reactor->run(); });
- const auto threadGuard = makeGuard([&] {
+ const ScopeGuard threadGuard([&] {
reactor->stop();
thread.join();
});
diff --git a/src/mongo/unittest/death_test.cpp b/src/mongo/unittest/death_test.cpp
index 0afe8800d09..55b011ab437 100644
--- a/src/mongo/unittest/death_test.cpp
+++ b/src/mongo/unittest/death_test.cpp
@@ -119,16 +119,16 @@ void DeathTestBase::_doTest() {
FILE* pf = 0;
if ((pf = fdopen(pipes[0], "r")) == NULL)
logAndThrowWithErrno("fdopen(pipe[0], \"r\")");
- auto pfGuard = makeGuard([&] {
+ ScopeGuard pfGuard([&] {
if (fclose(pf) != 0)
logAndThrowWithErrno("fclose(pf)");
});
LOGV2(5042601, "Death test starting");
- auto alwaysLogExit = makeGuard([] { LOGV2(5042602, "Death test finishing"); });
+ ScopeGuard alwaysLogExit([] { LOGV2(5042602, "Death test finishing"); });
char* lineBuf = nullptr;
size_t lineBufSize = 0;
- auto lineBufGuard = makeGuard([&] { free(lineBuf); });
+ ScopeGuard lineBufGuard([&] { free(lineBuf); });
while (true) {
errno = 0; // Needed as getline can return -1 without setting errno.
ssize_t bytesRead = getline(&lineBuf, &lineBufSize, pf);
diff --git a/src/mongo/unittest/thread_assertion_monitor.h b/src/mongo/unittest/thread_assertion_monitor.h
index 157eb3c4885..b9ce8cba8cb 100644
--- a/src/mongo/unittest/thread_assertion_monitor.h
+++ b/src/mongo/unittest/thread_assertion_monitor.h
@@ -65,7 +65,7 @@ public:
template <typename F>
stdx::thread spawnController(F&& f) {
return spawn([this, f = std::move(f)]() mutable {
- auto notifyDoneGuard = makeGuard([this] { notifyDone(); });
+ ScopeGuard notifyDoneGuard([this] { notifyDone(); });
exec(std::move(f));
});
}
diff --git a/src/mongo/util/decoration_registry.h b/src/mongo/util/decoration_registry.h
index c434b0c350e..bd47fa96abd 100644
--- a/src/mongo/util/decoration_registry.h
+++ b/src/mongo/util/decoration_registry.h
@@ -124,7 +124,7 @@ public:
});
};
- auto cleanup = makeGuard(std::move(cleanupFunction));
+ ScopeGuard cleanup(std::move(cleanupFunction));
using std::cend;
@@ -157,7 +157,7 @@ public:
});
};
- auto cleanup = makeGuard(std::move(cleanupFunction));
+ ScopeGuard cleanup(std::move(cleanupFunction));
using std::cend;
diff --git a/src/mongo/util/fail_point_test.cpp b/src/mongo/util/fail_point_test.cpp
index cb882853548..7ad3ba4f0b0 100644
--- a/src/mongo/util/fail_point_test.cpp
+++ b/src/mongo/util/fail_point_test.cpp
@@ -209,13 +209,13 @@ TEST(FailPoint, Stress) {
FailPoint fp("testFP");
fp.setMode(FailPoint::alwaysOn, 0, BSON("a" << 44));
auto fpGuard =
- mongo::makeGuard([&] { fp.setMode(FailPoint::off, 0, BSON("a" << 66)); });
+ mongo::ScopeGuard([&] { fp.setMode(FailPoint::off, 0, BSON("a" << 66)); });
std::vector<stdx::thread> tasks;
- auto joinGuard = mongo::makeGuard([&] {
+ mongo::ScopeGuard joinGuard = [&] {
for (auto&& t : tasks)
if (t.joinable())
t.join();
- });
+ };
auto launchLoop = [&](auto&& f) {
tasks.push_back(monitor.spawn([&, f] {
while (!done.load())
diff --git a/src/mongo/util/log_with_sampling_test.cpp b/src/mongo/util/log_with_sampling_test.cpp
index 140abe36a8f..61121221a80 100644
--- a/src/mongo/util/log_with_sampling_test.cpp
+++ b/src/mongo/util/log_with_sampling_test.cpp
@@ -61,7 +61,7 @@ auto scenario(bool debugLogEnabled, bool slowOp, bool forceSample) {
auto loggedSeverityGuard = unittest::MinimumLoggedSeverityGuard(
component, debugLogEnabled ? logv2::LogSeverity::Debug(1) : logv2::LogSeverity::Info());
- auto sampleRateGuard = makeGuard(
+ ScopeGuard sampleRateGuard(
[savedRate = serverGlobalParams.sampleRate] { serverGlobalParams.sampleRate = savedRate; });
serverGlobalParams.sampleRate = forceSample ? 1.0 : 0.0;
diff --git a/src/mongo/util/net/hostname_canonicalization.cpp b/src/mongo/util/net/hostname_canonicalization.cpp
index b329d0f6149..3cae5f64440 100644
--- a/src/mongo/util/net/hostname_canonicalization.cpp
+++ b/src/mongo/util/net/hostname_canonicalization.cpp
@@ -98,7 +98,7 @@ std::vector<std::string> getHostFQDNs(std::string hostName, HostnameCanonicaliza
"error"_attr = getAddrInfoStrError(err));
return results;
}
- const auto guard = makeGuard(shim_freeaddrinfo);
+ const ScopeGuard guard(shim_freeaddrinfo);
if (mode == HostnameCanonicalizationMode::kForward) {
results.emplace_back(shim_fromNativeString(info->ai_canonname));
diff --git a/src/mongo/util/net/http_client_winhttp.cpp b/src/mongo/util/net/http_client_winhttp.cpp
index 9e877966167..d972e60065e 100644
--- a/src/mongo/util/net/http_client_winhttp.cpp
+++ b/src/mongo/util/net/http_client_winhttp.cpp
@@ -188,7 +188,7 @@ public:
// Cleanup handled in a guard rather than UniquePtrs to ensure order.
HINTERNET session = nullptr, connect = nullptr, request = nullptr;
- auto guard = makeGuard([&] {
+ ScopeGuard guard([&] {
if (request) {
WinHttpCloseHandle(request);
}
diff --git a/src/mongo/util/net/openssl_init.cpp b/src/mongo/util/net/openssl_init.cpp
index 0f2c439e5f9..6dd58bb23c4 100644
--- a/src/mongo/util/net/openssl_init.cpp
+++ b/src/mongo/util/net/openssl_init.cpp
@@ -77,7 +77,7 @@ public:
// The `guard` callback will cause an invocation of `getID`, so it must be destroyed first.
static thread_local ManagedId managedId;
- static thread_local auto guard = makeGuard([] { ERR_remove_state(0); });
+ static thread_local ScopeGuard guard([] { ERR_remove_state(0); });
return managedId.id;
}
diff --git a/src/mongo/util/net/ssl_manager_openssl.cpp b/src/mongo/util/net/ssl_manager_openssl.cpp
index 1a143bb6a91..d50556b7ec3 100644
--- a/src/mongo/util/net/ssl_manager_openssl.cpp
+++ b/src/mongo/util/net/ssl_manager_openssl.cpp
@@ -693,7 +693,7 @@ StatusWith<std::vector<std::string>> addOCSPUrlToMap(
for (int i = 0; i < sk_OPENSSL_STRING_num(aiaOCSP.get()); i++) {
int useSSL = 0;
char *host, *port, *path;
- auto OCSPStrGuard = makeGuard([&] {
+ ScopeGuard OCSPStrGuard([&] {
if (host) {
OPENSSL_free(host);
}
@@ -2820,7 +2820,7 @@ bool SSLManagerOpenSSL::_setupPEMFromBIO(SSL_CTX* context,
LOGV2_ERROR(23251, "Cannot read PEM key", errorAttrs);
return false;
}
- const auto privateKeyGuard = makeGuard([&privateKey]() { EVP_PKEY_free(privateKey); });
+ const ScopeGuard privateKeyGuard([&privateKey]() { EVP_PKEY_free(privateKey); });
if (SSL_CTX_use_PrivateKey(context, privateKey) != 1) {
CaptureSSLErrorInAttrs capture(errorAttrs);
diff --git a/src/mongo/util/options_parser/options_parser.cpp b/src/mongo/util/options_parser/options_parser.cpp
index 315c62dd448..ad51118032b 100644
--- a/src/mongo/util/options_parser/options_parser.cpp
+++ b/src/mongo/util/options_parser/options_parser.cpp
@@ -1444,9 +1444,9 @@ Status OptionsParser::readConfigFile(const std::string& filename,
#ifdef _WIN32
// The checks below are only performed on POSIX systems
// due to differing permission models.
- auto fdguard = makeGuard([&fd] { ::_close(fd); });
+ ScopeGuard fdguard([&fd] { ::_close(fd); });
#else
- auto fdguard = makeGuard([&fd] { ::close(fd); });
+ ScopeGuard fdguard([&fd] { ::close(fd); });
if (configExpand.rest) {
auto status = checkFileOwnershipAndMode(fd, S_IRGRP | S_IROTH, "readable"_sd);
diff --git a/src/mongo/util/perfctr_collect.cpp b/src/mongo/util/perfctr_collect.cpp
index 6e9ae1dc0f2..7b9c6413608 100644
--- a/src/mongo/util/perfctr_collect.cpp
+++ b/src/mongo/util/perfctr_collect.cpp
@@ -81,7 +81,7 @@ std::string errnoWithPdhDescription(PDH_STATUS status) {
return str::stream() << "Format message failed with " << gle << " for status " << status;
}
- auto errorTextGuard = makeGuard([errorText] { LocalFree(errorText); });
+ ScopeGuard errorTextGuard([errorText] { LocalFree(errorText); });
std::string utf8ErrorText = toUtf8String(errorText);
auto size = utf8ErrorText.find_first_of("\r\n");
diff --git a/src/mongo/util/procparser.cpp b/src/mongo/util/procparser.cpp
index 24b9d1e2c95..7c8bf38c247 100644
--- a/src/mongo/util/procparser.cpp
+++ b/src/mongo/util/procparser.cpp
@@ -96,7 +96,7 @@ StatusWith<std::string> readFileAsString(StringData filename) {
str::stream() << "Failed to open file " << filename
<< " with error: " << errnoWithDescription(err));
}
- auto scopedGuard = makeGuard([fd] { close(fd); });
+ ScopeGuard scopedGuard([fd] { close(fd); });
BufBuilder builder(kFileBufferSize);
std::array<char, kFileBufferSize> buf;
diff --git a/src/mongo/util/producer_consumer_queue.h b/src/mongo/util/producer_consumer_queue.h
index d3ef0749e52..65f0313d049 100644
--- a/src/mongo/util/producer_consumer_queue.h
+++ b/src/mongo/util/producer_consumer_queue.h
@@ -807,7 +807,7 @@ private:
_checkProducerClosed(lk);
- const auto guard = makeGuard([&] { _notifyIfNecessary(lk); });
+ const ScopeGuard guard([&] { _notifyIfNecessary(lk); });
return cb(lk);
}
@@ -818,7 +818,7 @@ private:
_checkConsumerClosed(lk);
- const auto guard = makeGuard([&] { _notifyIfNecessary(lk); });
+ const ScopeGuard guard([&] { _notifyIfNecessary(lk); });
return cb(lk);
}
diff --git a/src/mongo/util/stacktrace_threads.cpp b/src/mongo/util/stacktrace_threads.cpp
index d2ee29d24b4..d7157d0e212 100644
--- a/src/mongo/util/stacktrace_threads.cpp
+++ b/src/mongo/util/stacktrace_threads.cpp
@@ -615,7 +615,7 @@ void State::printToEmitter(AbstractEmitter& emitter) {
}
void State::action(siginfo_t* si) {
- const auto errnoGuard = makeGuard([e = errno] { errno = e; });
+ const ScopeGuard errnoGuard([e = errno] { errno = e; });
switch (si->si_code) {
case SI_USER:
case SI_QUEUE: