summaryrefslogtreecommitdiff
path: root/src/mongo
diff options
context:
space:
mode:
authorBilly Donahue <billy.donahue@mongodb.com>2019-01-09 17:32:12 -0500
committerBilly Donahue <billy.donahue@mongodb.com>2019-01-11 10:50:23 -0500
commitbbf28648de0d8695c502e13922a8d9e5ca1b51e8 (patch)
tree6382810d03fb698d9b2d49f488be90e604324811 /src/mongo
parent17514947cc816df2500aa0e919506586d4d56aa0 (diff)
downloadmongo-bbf28648de0d8695c502e13922a8d9e5ca1b51e8.tar.gz
SERVER-30711: scope_guard rewrite, to avoid -Werror=noexcept-type
Macro ON_BLOCK_EXIT(...) now takes a single callable, Some renames: Dismias -> dismiss MakeGuard => makeGuard
Diffstat (limited to 'src/mongo')
-rw-r--r--src/mongo/base/secure_allocator.cpp2
-rw-r--r--src/mongo/bson/bsonelement.cpp4
-rw-r--r--src/mongo/client/dbclient_connection.cpp12
-rw-r--r--src/mongo/client/fetcher.cpp4
-rw-r--r--src/mongo/client/sasl_sspi.cpp2
-rw-r--r--src/mongo/db/auth/authorization_session_impl.cpp6
-rw-r--r--src/mongo/db/catalog/catalog_control.cpp4
-rw-r--r--src/mongo/db/catalog/database_holder_impl.cpp4
-rw-r--r--src/mongo/db/catalog/drop_database.cpp16
-rw-r--r--src/mongo/db/catalog/multi_index_block.cpp8
-rw-r--r--src/mongo/db/catalog/rename_collection.cpp4
-rw-r--r--src/mongo/db/commands/create_indexes.cpp4
-rw-r--r--src/mongo/db/commands/getmore_cmd.cpp4
-rw-r--r--src/mongo/db/commands/run_aggregate.cpp14
-rw-r--r--src/mongo/db/commands/user_management_commands.cpp6
-rw-r--r--src/mongo/db/concurrency/d_concurrency_test.cpp2
-rw-r--r--src/mongo/db/concurrency/lock_state.cpp8
-rw-r--r--src/mongo/db/exec/delete.cpp10
-rw-r--r--src/mongo/db/exec/update.cpp10
-rw-r--r--src/mongo/db/logical_session_cache_impl.cpp38
-rw-r--r--src/mongo/db/operation_context.cpp2
-rw-r--r--src/mongo/db/repl/collection_bulk_loader_impl.cpp16
-rw-r--r--src/mongo/db/repl/collection_bulk_loader_impl.h2
-rw-r--r--src/mongo/db/repl/initial_syncer.cpp4
-rw-r--r--src/mongo/db/repl/isself.cpp4
-rw-r--r--src/mongo/db/repl/isself_test.cpp4
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp20
-rw-r--r--src/mongo/db/repl/scatter_gather_runner.cpp4
-rw-r--r--src/mongo/db/s/balancer/migration_manager.cpp8
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp4
-rw-r--r--src/mongo/db/s/migration_destination_manager.cpp6
-rw-r--r--src/mongo/db/s/migration_source_manager.cpp20
-rw-r--r--src/mongo/db/s/move_primary_source_manager.cpp12
-rw-r--r--src/mongo/db/server_options_test.cpp4
-rw-r--r--src/mongo/db/service_entry_point_common.cpp6
-rw-r--r--src/mongo/db/storage/kv/kv_storage_engine.cpp2
-rw-r--r--src/mongo/db/storage/storage_engine_init.cpp4
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp6
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp6
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.h2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.cpp2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp8
-rw-r--r--src/mongo/db/transaction_participant.cpp2
-rw-r--r--src/mongo/db/transaction_reaper.cpp2
-rw-r--r--src/mongo/db/views/view_graph.cpp7
-rw-r--r--src/mongo/dbtests/storage_timestamp_tests.cpp4
-rw-r--r--src/mongo/executor/connection_pool.cpp4
-rw-r--r--src/mongo/executor/connection_pool_test.cpp4
-rw-r--r--src/mongo/executor/network_interface_perf_test.cpp2
-rw-r--r--src/mongo/executor/network_interface_thread_pool.cpp4
-rw-r--r--src/mongo/installer/msi/ca/CustomAction.cpp2
-rw-r--r--src/mongo/platform/stack_locator_freebsd.cpp2
-rw-r--r--src/mongo/platform/stack_locator_pthread_getattr_np.cpp2
-rw-r--r--src/mongo/rpc/metadata/client_metadata_test.cpp2
-rw-r--r--src/mongo/rpc/object_check_test.cpp2
-rw-r--r--src/mongo/s/client/shard_registry.cpp2
-rw-r--r--src/mongo/s/commands/strategy.cpp12
-rw-r--r--src/mongo/s/query/cluster_find.cpp4
-rw-r--r--src/mongo/s/service_entry_point_mongos.cpp2
-rw-r--r--src/mongo/scripting/mozjs/implscope.cpp4
-rw-r--r--src/mongo/scripting/mozjs/proxyscope.cpp2
-rw-r--r--src/mongo/shell/dbshell.cpp2
-rw-r--r--src/mongo/shell/shell_utils_extended.cpp10
-rw-r--r--src/mongo/shell/shell_utils_launcher.cpp12
-rw-r--r--src/mongo/transport/baton_asio_linux.h4
-rw-r--r--src/mongo/transport/service_executor_adaptive.cpp4
-rw-r--r--src/mongo/transport/service_executor_adaptive_test.cpp10
-rw-r--r--src/mongo/transport/service_executor_reserved.cpp2
-rw-r--r--src/mongo/transport/service_executor_test.cpp4
-rw-r--r--src/mongo/transport/transport_layer_asio_integration_test.cpp6
-rw-r--r--src/mongo/unittest/unittest.cpp2
-rw-r--r--src/mongo/util/decoration_registry.h4
-rw-r--r--src/mongo/util/net/hostname_canonicalization.cpp2
-rw-r--r--src/mongo/util/net/http_client_winhttp.cpp2
-rw-r--r--src/mongo/util/net/ssl_manager_openssl.cpp16
-rw-r--r--src/mongo/util/perfctr_collect.cpp2
-rw-r--r--src/mongo/util/processinfo_solaris.cpp2
-rw-r--r--src/mongo/util/procparser.cpp2
-rw-r--r--src/mongo/util/producer_consumer_queue.h4
-rw-r--r--src/mongo/util/scopeguard.h418
-rw-r--r--src/mongo/util/signal_handlers.cpp2
82 files changed, 283 insertions, 602 deletions
diff --git a/src/mongo/base/secure_allocator.cpp b/src/mongo/base/secure_allocator.cpp
index a8b504516d3..f5f696e1078 100644
--- a/src/mongo/base/secure_allocator.cpp
+++ b/src/mongo/base/secure_allocator.cpp
@@ -89,7 +89,7 @@ void EnablePrivilege(const wchar_t* name) {
return;
}
- const auto accessTokenGuard = MakeGuard([&] { CloseHandle(accessToken); });
+ const auto accessTokenGuard = makeGuard([&] { CloseHandle(accessToken); });
TOKEN_PRIVILEGES privileges = {0};
diff --git a/src/mongo/bson/bsonelement.cpp b/src/mongo/bson/bsonelement.cpp
index caa2af3f879..b87de5f5d26 100644
--- a/src/mongo/bson/bsonelement.cpp
+++ b/src/mongo/bson/bsonelement.cpp
@@ -93,7 +93,7 @@ void BSONElement::jsonStringStream(JsonStringFormat format,
if (number() >= -std::numeric_limits<double>::max() &&
number() <= std::numeric_limits<double>::max()) {
auto origPrecision = s.precision();
- auto guard = MakeGuard([&s, origPrecision]() { s.precision(origPrecision); });
+ auto guard = makeGuard([&s, origPrecision]() { s.precision(origPrecision); });
s.precision(16);
s << number();
}
@@ -217,7 +217,7 @@ void BSONElement::jsonStringStream(JsonStringFormat format,
auto origFill = s.fill();
auto origFmtF = s.flags();
auto origWidth = s.width();
- auto guard = MakeGuard([&s, origFill, origFmtF, origWidth] {
+ auto guard = makeGuard([&s, origFill, origFmtF, origWidth] {
s.fill(origFill);
s.setf(origFmtF);
s.width(origWidth);
diff --git a/src/mongo/client/dbclient_connection.cpp b/src/mongo/client/dbclient_connection.cpp
index 51940bd819a..4f1c8acac54 100644
--- a/src/mongo/client/dbclient_connection.cpp
+++ b/src/mongo/client/dbclient_connection.cpp
@@ -554,17 +554,17 @@ DBClientConnection::DBClientConnection(bool _autoReconnect,
void DBClientConnection::say(Message& toSend, bool isRetry, string* actualServer) {
checkConnection();
- auto killSessionOnError = MakeGuard([this] { _markFailed(kEndSession); });
+ auto killSessionOnError = makeGuard([this] { _markFailed(kEndSession); });
toSend.header().setId(nextMessageId());
toSend.header().setResponseToMsgId(0);
uassertStatusOK(
_session->sinkMessage(uassertStatusOK(_compressorManager.compressMessage(toSend))));
- killSessionOnError.Dismiss();
+ killSessionOnError.dismiss();
}
bool DBClientConnection::recv(Message& m, int lastRequestId) {
- auto killSessionOnError = MakeGuard([this] { _markFailed(kEndSession); });
+ auto killSessionOnError = makeGuard([this] { _markFailed(kEndSession); });
auto swm = _session->sourceMessage();
if (!swm.isOK()) {
return false;
@@ -579,7 +579,7 @@ bool DBClientConnection::recv(Message& m, int lastRequestId) {
m = uassertStatusOK(_compressorManager.decompressMessage(m));
}
- killSessionOnError.Dismiss();
+ killSessionOnError.dismiss();
return true;
}
@@ -588,7 +588,7 @@ bool DBClientConnection::call(Message& toSend,
bool assertOk,
string* actualServer) {
checkConnection();
- auto killSessionOnError = MakeGuard([this] { _markFailed(kEndSession); });
+ auto killSessionOnError = makeGuard([this] { _markFailed(kEndSession); });
auto maybeThrow = [&](const auto& errStatus) {
if (assertOk)
uasserted(10278,
@@ -620,7 +620,7 @@ bool DBClientConnection::call(Message& toSend,
response = uassertStatusOK(_compressorManager.decompressMessage(response));
}
- killSessionOnError.Dismiss();
+ killSessionOnError.dismiss();
return true;
}
diff --git a/src/mongo/client/fetcher.cpp b/src/mongo/client/fetcher.cpp
index 65540e088c4..c3cbea95e10 100644
--- a/src/mongo/client/fetcher.cpp
+++ b/src/mongo/client/fetcher.cpp
@@ -333,7 +333,7 @@ Status Fetcher::_scheduleGetMore(const BSONObj& cmdObj) {
void Fetcher::_callback(const RemoteCommandCallbackArgs& rcbd, const char* batchFieldName) {
QueryResponse batchData;
- auto finishCallbackGuard = MakeGuard([this, &batchData] {
+ auto finishCallbackGuard = makeGuard([this, &batchData] {
if (batchData.cursorId && !batchData.nss.isEmpty()) {
_sendKillCursors(batchData.cursorId, batchData.nss);
}
@@ -403,7 +403,7 @@ void Fetcher::_callback(const RemoteCommandCallbackArgs& rcbd, const char* batch
return;
}
- finishCallbackGuard.Dismiss();
+ finishCallbackGuard.dismiss();
}
void Fetcher::_sendKillCursors(const CursorId id, const NamespaceString& nss) {
diff --git a/src/mongo/client/sasl_sspi.cpp b/src/mongo/client/sasl_sspi.cpp
index fa0b915c377..9daa7739bcf 100644
--- a/src/mongo/client/sasl_sspi.cpp
+++ b/src/mongo/client/sasl_sspi.cpp
@@ -411,7 +411,7 @@ int sspiClientMechStep(void* conn_context,
return SASL_FAIL;
}
- ON_BLOCK_EXIT(FreeContextBuffer, outbuf.pBuffers[0].pvBuffer);
+ ON_BLOCK_EXIT([&] { FreeContextBuffer(outbuf.pBuffers[0].pvBuffer); });
pcctx->haveCtxt = true;
if (status == SEC_E_OK) {
diff --git a/src/mongo/db/auth/authorization_session_impl.cpp b/src/mongo/db/auth/authorization_session_impl.cpp
index 9302c2b3763..a92ba21068b 100644
--- a/src/mongo/db/auth/authorization_session_impl.cpp
+++ b/src/mongo/db/auth/authorization_session_impl.cpp
@@ -734,7 +734,7 @@ void AuthorizationSessionImpl::_refreshUserInfoAsNeeded(OperationContext* opCtx)
if (!user->isValid()) {
// The user is invalid, so make sure that we erase it from _authenticateUsers at the
// end of this block.
- auto removeGuard = MakeGuard([&] { _authenticatedUsers.removeAt(it++); });
+ auto removeGuard = makeGuard([&] { _authenticatedUsers.removeAt(it++); });
// Make a good faith effort to acquire an up-to-date user object, since the one
// we've cached is marked "out-of-date."
@@ -768,7 +768,7 @@ void AuthorizationSessionImpl::_refreshUserInfoAsNeeded(OperationContext* opCtx)
}
// Success! Replace the old User object with the updated one.
- removeGuard.Dismiss();
+ removeGuard.dismiss();
_authenticatedUsers.replaceAt(it, std::move(updatedUser));
LOG(1) << "Updated session cache of user information for " << name;
break;
@@ -792,7 +792,7 @@ void AuthorizationSessionImpl::_refreshUserInfoAsNeeded(OperationContext* opCtx)
warning() << "Could not fetch updated user privilege information for " << name
<< "; continuing to use old information. Reason is "
<< redact(status);
- removeGuard.Dismiss();
+ removeGuard.dismiss();
break;
}
}
diff --git a/src/mongo/db/catalog/catalog_control.cpp b/src/mongo/db/catalog/catalog_control.cpp
index 122630ef4e4..fa6ed43e9e5 100644
--- a/src/mongo/db/catalog/catalog_control.cpp
+++ b/src/mongo/db/catalog/catalog_control.cpp
@@ -70,7 +70,7 @@ MinVisibleTimestampMap closeCatalog(OperationContext* opCtx) {
}
// Need to mark the UUIDCatalog as open if we our closeAll fails, dismissed if successful.
- auto reopenOnFailure = MakeGuard([opCtx] { UUIDCatalog::get(opCtx).onOpenCatalog(opCtx); });
+ auto reopenOnFailure = makeGuard([opCtx] { UUIDCatalog::get(opCtx).onOpenCatalog(opCtx); });
// Closing UUID Catalog: only lookupNSSByUUID will fall back to using pre-closing state to
// allow authorization for currently unknown UUIDs. This is needed because authorization needs
// to work before acquiring locks, and might otherwise spuriously regard a UUID as unknown
@@ -87,7 +87,7 @@ MinVisibleTimestampMap closeCatalog(OperationContext* opCtx) {
log() << "closeCatalog: closing storage engine catalog";
opCtx->getServiceContext()->getStorageEngine()->closeCatalog(opCtx);
- reopenOnFailure.Dismiss();
+ reopenOnFailure.dismiss();
return minVisibleTimestampMap;
}
diff --git a/src/mongo/db/catalog/database_holder_impl.cpp b/src/mongo/db/catalog/database_holder_impl.cpp
index c1671326474..483e96e4f17 100644
--- a/src/mongo/db/catalog/database_holder_impl.cpp
+++ b/src/mongo/db/catalog/database_holder_impl.cpp
@@ -116,7 +116,7 @@ Database* DatabaseHolderImpl::openDb(OperationContext* opCtx, StringData ns, boo
return db;
// We've inserted a nullptr entry for dbname: make sure to remove it on unsuccessful exit.
- auto removeDbGuard = MakeGuard([this, &lk, dbname] {
+ auto removeDbGuard = makeGuard([this, &lk, dbname] {
if (!lk.owns_lock())
lk.lock();
_dbs.erase(dbname);
@@ -151,7 +151,7 @@ Database* DatabaseHolderImpl::openDb(OperationContext* opCtx, StringData ns, boo
newDb->init(opCtx);
// Finally replace our nullptr entry with the new Database pointer.
- removeDbGuard.Dismiss();
+ removeDbGuard.dismiss();
lk.lock();
auto it = _dbs.find(dbname);
invariant(it != _dbs.end() && it->second == nullptr);
diff --git a/src/mongo/db/catalog/drop_database.cpp b/src/mongo/db/catalog/drop_database.cpp
index c48f4e40214..1130ece6357 100644
--- a/src/mongo/db/catalog/drop_database.cpp
+++ b/src/mongo/db/catalog/drop_database.cpp
@@ -67,11 +67,11 @@ Status _finishDropDatabase(OperationContext* opCtx,
Database* db,
std::size_t numCollections) {
// If DatabaseHolder::dropDb() fails, we should reset the drop-pending state on Database.
- auto dropPendingGuard = MakeGuard([db, opCtx] { db->setDropPending(opCtx, false); });
+ auto dropPendingGuard = makeGuard([db, opCtx] { db->setDropPending(opCtx, false); });
auto databaseHolder = DatabaseHolder::get(opCtx);
databaseHolder->dropDb(opCtx, db);
- dropPendingGuard.Dismiss();
+ dropPendingGuard.dismiss();
log() << "dropDatabase " << dbName << " - dropped " << numCollections << " collection(s)";
log() << "dropDatabase " << dbName << " - finished";
@@ -142,7 +142,7 @@ Status dropDatabase(OperationContext* opCtx, const std::string& dbName) {
// If Database::dropCollectionEventIfSystem() fails, we should reset the drop-pending state
// on Database.
- auto dropPendingGuard = MakeGuard([&db, opCtx] { db->setDropPending(opCtx, false); });
+ auto dropPendingGuard = makeGuard([&db, opCtx] { db->setDropPending(opCtx, false); });
std::vector<NamespaceString> collectionsToDrop;
for (Collection* collection : *db) {
@@ -183,7 +183,7 @@ Status dropDatabase(OperationContext* opCtx, const std::string& dbName) {
fassert(40476, db->dropCollectionEvenIfSystem(opCtx, nss));
wunit.commit();
}
- dropPendingGuard.Dismiss();
+ dropPendingGuard.dismiss();
// If there are no collection drops to wait for, we complete the drop database operation.
if (numCollectionsToDrop == 0U && latestDropPendingOpTime.isNull()) {
@@ -199,7 +199,7 @@ Status dropDatabase(OperationContext* opCtx, const std::string& dbName) {
// If waitForWriteConcern() returns an error or throws an exception, we should reset the
// drop-pending state on Database.
- auto dropPendingGuardWhileAwaitingReplication = MakeGuard([dbName, opCtx] {
+ auto dropPendingGuardWhileAwaitingReplication = makeGuard([dbName, opCtx] {
UninterruptibleLockGuard noInterrupt(opCtx->lockState());
AutoGetDb autoDB(opCtx, dbName, MODE_IX);
if (auto db = autoDB.getDb()) {
@@ -272,7 +272,7 @@ Status dropDatabase(OperationContext* opCtx, const std::string& dbName) {
<< result.duration << ". dropping database";
}
- dropPendingGuardWhileAwaitingReplication.Dismiss();
+ dropPendingGuardWhileAwaitingReplication.dismiss();
if (MONGO_FAIL_POINT(dropDatabaseHangAfterAllCollectionsDrop)) {
log() << "dropDatabase - fail point dropDatabaseHangAfterAllCollectionsDrop enabled. "
@@ -294,7 +294,7 @@ Status dropDatabase(OperationContext* opCtx, const std::string& dbName) {
// If we fail to complete the database drop, we should reset the drop-pending state on
// Database.
- auto dropPendingGuard = MakeGuard([&db, opCtx] { db->setDropPending(opCtx, false); });
+ auto dropPendingGuard = makeGuard([&db, opCtx] { db->setDropPending(opCtx, false); });
bool userInitiatedWritesAndNotPrimary =
opCtx->writesAreReplicated() && !replCoord->canAcceptWritesForDatabase(opCtx, dbName);
@@ -309,7 +309,7 @@ Status dropDatabase(OperationContext* opCtx, const std::string& dbName) {
<< " pending collection drop(s).");
}
- dropPendingGuard.Dismiss();
+ dropPendingGuard.dismiss();
return _finishDropDatabase(opCtx, dbName, db, numCollections);
});
}
diff --git a/src/mongo/db/catalog/multi_index_block.cpp b/src/mongo/db/catalog/multi_index_block.cpp
index d24b3fe49d8..b4fb4a481be 100644
--- a/src/mongo/db/catalog/multi_index_block.cpp
+++ b/src/mongo/db/catalog/multi_index_block.cpp
@@ -302,9 +302,9 @@ Status MultiIndexBlock::insertAllDocumentsInCollection() {
// accumulate them in the `MultikeyPathTracker` and do the write as part of the update that
// commits the index.
auto stopTracker =
- MakeGuard([this] { MultikeyPathTracker::get(_opCtx).stopTrackingMultikeyPathInfo(); });
+ makeGuard([this] { MultikeyPathTracker::get(_opCtx).stopTrackingMultikeyPathInfo(); });
if (MultikeyPathTracker::get(_opCtx).isTrackingMultikeyPathInfo()) {
- stopTracker.Dismiss();
+ stopTracker.dismiss();
}
MultikeyPathTracker::get(_opCtx).startTrackingMultikeyPathInfo();
@@ -613,9 +613,9 @@ Status MultiIndexBlock::commit(stdx::function<void(const BSONObj& spec)> onCreat
// Do not interfere with writing multikey information when committing index builds.
auto restartTracker =
- MakeGuard([this] { MultikeyPathTracker::get(_opCtx).startTrackingMultikeyPathInfo(); });
+ makeGuard([this] { MultikeyPathTracker::get(_opCtx).startTrackingMultikeyPathInfo(); });
if (!MultikeyPathTracker::get(_opCtx).isTrackingMultikeyPathInfo()) {
- restartTracker.Dismiss();
+ restartTracker.dismiss();
}
MultikeyPathTracker::get(_opCtx).stopTrackingMultikeyPathInfo();
diff --git a/src/mongo/db/catalog/rename_collection.cpp b/src/mongo/db/catalog/rename_collection.cpp
index 0d05cd8c227..c0bb212b6dd 100644
--- a/src/mongo/db/catalog/rename_collection.cpp
+++ b/src/mongo/db/catalog/rename_collection.cpp
@@ -411,7 +411,7 @@ Status renameCollectionCommon(OperationContext* opCtx,
}
// Dismissed on success
- auto tmpCollectionDropper = MakeGuard([&] {
+ auto tmpCollectionDropper = makeGuard([&] {
BSONObjBuilder unusedResult;
Status status = Status::OK();
try {
@@ -563,7 +563,7 @@ Status renameCollectionCommon(OperationContext* opCtx,
if (!status.isOK()) {
return status;
}
- tmpCollectionDropper.Dismiss();
+ tmpCollectionDropper.dismiss();
BSONObjBuilder unusedResult;
return dropCollection(opCtx,
diff --git a/src/mongo/db/commands/create_indexes.cpp b/src/mongo/db/commands/create_indexes.cpp
index c1bed1a0dd2..f6169d8f858 100644
--- a/src/mongo/db/commands/create_indexes.cpp
+++ b/src/mongo/db/commands/create_indexes.cpp
@@ -390,7 +390,7 @@ bool runCreateIndexes(OperationContext* opCtx,
dbLock.relockWithMode(MODE_IX);
}
- auto relockOnErrorGuard = MakeGuard([&] {
+ auto relockOnErrorGuard = makeGuard([&] {
// Must have exclusive DB lock before we clean up the index build via the
// destructor of 'indexer'.
if (indexer.getBuildInBackground()) {
@@ -443,7 +443,7 @@ bool runCreateIndexes(OperationContext* opCtx,
MONGO_FAIL_POINT_PAUSE_WHILE_SET(hangAfterIndexBuildSecondDrain);
}
- relockOnErrorGuard.Dismiss();
+ relockOnErrorGuard.dismiss();
// Need to return db lock back to exclusive, to complete the index build.
if (indexer.getBuildInBackground()) {
diff --git a/src/mongo/db/commands/getmore_cmd.cpp b/src/mongo/db/commands/getmore_cmd.cpp
index ea669c4a209..8f3eee6d760 100644
--- a/src/mongo/db/commands/getmore_cmd.cpp
+++ b/src/mongo/db/commands/getmore_cmd.cpp
@@ -401,7 +401,7 @@ public:
}
// On early return, get rid of the cursor.
- ScopeGuard cursorFreer = MakeGuard(&ClientCursorPin::deleteUnderlying, &ccPin);
+ auto cursorFreer = makeGuard([&] { ccPin.deleteUnderlying(); });
// We must respect the read concern from the cursor.
applyCursorReadConcern(opCtx, cursor->getReadConcernArgs());
@@ -528,7 +528,7 @@ public:
curOp->debug().nreturned = numResults;
if (respondWithId) {
- cursorFreer.Dismiss();
+ cursorFreer.dismiss();
}
// We're about to unpin or delete the cursor as the ClientCursorPin goes out of scope.
diff --git a/src/mongo/db/commands/run_aggregate.cpp b/src/mongo/db/commands/run_aggregate.cpp
index 501b16d154e..55678fb52ea 100644
--- a/src/mongo/db/commands/run_aggregate.cpp
+++ b/src/mongo/db/commands/run_aggregate.cpp
@@ -622,13 +622,11 @@ Status runAggregate(OperationContext* opCtx,
std::vector<ClientCursorPin> pins;
std::vector<ClientCursor*> cursors;
- ScopeGuard cursorFreer = MakeGuard(
- [](std::vector<ClientCursorPin>* pins) {
- for (auto& p : *pins) {
- p.deleteUnderlying();
- }
- },
- &pins);
+ auto cursorFreer = makeGuard([&] {
+ for (auto& p : pins) {
+ p.deleteUnderlying();
+ }
+ });
for (size_t idx = 0; idx < execs.size(); ++idx) {
ClientCursorParams cursorParams(
@@ -660,7 +658,7 @@ Status runAggregate(OperationContext* opCtx,
const bool keepCursor =
handleCursorCommand(opCtx, origNss, std::move(cursors), request, result);
if (keepCursor) {
- cursorFreer.Dismiss();
+ cursorFreer.dismiss();
}
}
diff --git a/src/mongo/db/commands/user_management_commands.cpp b/src/mongo/db/commands/user_management_commands.cpp
index df1d33ba54f..f1cd1574494 100644
--- a/src/mongo/db/commands/user_management_commands.cpp
+++ b/src/mongo/db/commands/user_management_commands.cpp
@@ -1981,7 +1981,7 @@ public:
uassertStatusOK(status);
// From here on, we always want to invalidate the user cache before returning.
- auto invalidateGuard = MakeGuard([&] {
+ auto invalidateGuard = makeGuard([&] {
try {
authzManager->invalidateUserCache(opCtx);
} catch (const DBException& e) {
@@ -2102,7 +2102,7 @@ public:
auto lk = uassertStatusOK(requireWritableAuthSchema28SCRAM(opCtx, authzManager));
// From here on, we always want to invalidate the user cache before returning.
- auto invalidateGuard = MakeGuard([&] {
+ auto invalidateGuard = makeGuard([&] {
try {
authzManager->invalidateUserCache(opCtx);
} catch (const DBException& e) {
@@ -2712,7 +2712,7 @@ public:
auto lk = uassertStatusOK(requireWritableAuthSchema28SCRAM(opCtx, authzManager));
// From here on, we always want to invalidate the user cache before returning.
- auto invalidateGuard = MakeGuard([&] {
+ auto invalidateGuard = makeGuard([&] {
try {
authzManager->invalidateUserCache(opCtx);
} catch (const DBException& e) {
diff --git a/src/mongo/db/concurrency/d_concurrency_test.cpp b/src/mongo/db/concurrency/d_concurrency_test.cpp
index 8723904ba81..991cdadf86d 100644
--- a/src/mongo/db/concurrency/d_concurrency_test.cpp
+++ b/src/mongo/db/concurrency/d_concurrency_test.cpp
@@ -108,7 +108,7 @@ public:
auto result = task.get_future();
stdx::thread taskThread{std::move(task)};
- auto taskThreadJoiner = MakeGuard([&] { taskThread.join(); });
+ auto taskThreadJoiner = makeGuard([&] { taskThread.join(); });
{
stdx::lock_guard<Client> clientLock(*opCtx->getClient());
diff --git a/src/mongo/db/concurrency/lock_state.cpp b/src/mongo/db/concurrency/lock_state.cpp
index 8ffbee8c523..11a4028e7fb 100644
--- a/src/mongo/db/concurrency/lock_state.cpp
+++ b/src/mongo/db/concurrency/lock_state.cpp
@@ -335,7 +335,7 @@ LockResult LockerImpl::_acquireTicket(OperationContext* opCtx, LockMode mode, Da
}
// If the ticket wait is interrupted, restore the state of the client.
- auto restoreStateOnErrorGuard = MakeGuard([&] { _clientState.store(kInactive); });
+ auto restoreStateOnErrorGuard = makeGuard([&] { _clientState.store(kInactive); });
OperationContext* interruptible = _uninterruptibleLocksRequested ? nullptr : opCtx;
if (deadline == Date_t::max()) {
@@ -343,7 +343,7 @@ LockResult LockerImpl::_acquireTicket(OperationContext* opCtx, LockMode mode, Da
} else if (!holder->waitForTicketUntil(interruptible, deadline)) {
return LOCK_TIMEOUT;
}
- restoreStateOnErrorGuard.Dismiss();
+ restoreStateOnErrorGuard.dismiss();
}
_clientState.store(reader ? kActiveReader : kActiveWriter);
return LOCK_OK;
@@ -835,7 +835,7 @@ LockResult LockerImpl::lockComplete(OperationContext* opCtx,
uint64_t startOfCurrentWaitTime = startOfTotalWaitTime;
// Clean up the state on any failed lock attempts.
- auto unlockOnErrorGuard = MakeGuard([&] {
+ auto unlockOnErrorGuard = makeGuard([&] {
LockRequestsMap::Iterator it = _requests.find(resId);
_unlockImpl(&it);
});
@@ -892,7 +892,7 @@ LockResult LockerImpl::lockComplete(OperationContext* opCtx,
// lock was still granted after all, but we don't try to take advantage of that and will return
// a timeout.
if (result == LOCK_OK) {
- unlockOnErrorGuard.Dismiss();
+ unlockOnErrorGuard.dismiss();
}
return result;
}
diff --git a/src/mongo/db/exec/delete.cpp b/src/mongo/db/exec/delete.cpp
index 49bbef02748..b1ae8b66942 100644
--- a/src/mongo/db/exec/delete.cpp
+++ b/src/mongo/db/exec/delete.cpp
@@ -152,7 +152,7 @@ PlanStage::StageState DeleteStage::doWork(WorkingSetID* out) {
WorkingSetMember* member = _ws->get(id);
// We want to free this member when we return, unless we need to retry deleting or returning it.
- ScopeGuard memberFreer = MakeGuard(&WorkingSet::free, _ws, id);
+ auto memberFreer = makeGuard([&] { _ws->free(id); });
invariant(member->hasRecordId());
RecordId recordId = member->recordId;
@@ -167,7 +167,7 @@ PlanStage::StageState DeleteStage::doWork(WorkingSetID* out) {
collection(), getOpCtx(), _ws, id, _params.canonicalQuery);
} catch (const WriteConflictException&) {
// There was a problem trying to detect if the document still exists, so retry.
- memberFreer.Dismiss();
+ memberFreer.dismiss();
return prepareToRetryWSM(id, out);
}
@@ -213,7 +213,7 @@ PlanStage::StageState DeleteStage::doWork(WorkingSetID* out) {
: Collection::StoreDeletedDoc::Off);
wunit.commit();
} catch (const WriteConflictException&) {
- memberFreer.Dismiss(); // Keep this member around so we can retry deleting it.
+ memberFreer.dismiss(); // Keep this member around so we can retry deleting it.
return prepareToRetryWSM(id, out);
}
}
@@ -240,7 +240,7 @@ PlanStage::StageState DeleteStage::doWork(WorkingSetID* out) {
_idReturning = id;
// Keep this member around so that we can return it on the next work() call.
- memberFreer.Dismiss();
+ memberFreer.dismiss();
}
*out = WorkingSet::INVALID_ID;
return NEED_YIELD;
@@ -250,7 +250,7 @@ PlanStage::StageState DeleteStage::doWork(WorkingSetID* out) {
// member->obj should refer to the deleted document.
invariant(member->getState() == WorkingSetMember::OWNED_OBJ);
- memberFreer.Dismiss(); // Keep this member around so we can return it.
+ memberFreer.dismiss(); // Keep this member around so we can return it.
*out = id;
return PlanStage::ADVANCED;
}
diff --git a/src/mongo/db/exec/update.cpp b/src/mongo/db/exec/update.cpp
index 60e081322ed..83f07245294 100644
--- a/src/mongo/db/exec/update.cpp
+++ b/src/mongo/db/exec/update.cpp
@@ -643,7 +643,7 @@ PlanStage::StageState UpdateStage::doWork(WorkingSetID* out) {
// We want to free this member when we return, unless we need to retry updating or returning
// it.
- ScopeGuard memberFreer = MakeGuard(&WorkingSet::free, _ws, id);
+ auto memberFreer = makeGuard([&] { _ws->free(id); });
invariant(member->hasRecordId());
recordId = member->recordId;
@@ -666,7 +666,7 @@ PlanStage::StageState UpdateStage::doWork(WorkingSetID* out) {
collection(), getOpCtx(), _ws, id, _params.canonicalQuery);
} catch (const WriteConflictException&) {
// There was a problem trying to detect if the document still exists, so retry.
- memberFreer.Dismiss();
+ memberFreer.dismiss();
return prepareToRetryWSM(id, out);
}
@@ -702,7 +702,7 @@ PlanStage::StageState UpdateStage::doWork(WorkingSetID* out) {
// Do the update, get us the new version of the doc.
newObj = transformAndUpdate(member->obj, recordId);
} catch (const WriteConflictException&) {
- memberFreer.Dismiss(); // Keep this member around so we can retry updating it.
+ memberFreer.dismiss(); // Keep this member around so we can retry updating it.
return prepareToRetryWSM(id, out);
}
@@ -738,7 +738,7 @@ PlanStage::StageState UpdateStage::doWork(WorkingSetID* out) {
_idReturning = id;
// Keep this member around so that we can return it on the next work() call.
- memberFreer.Dismiss();
+ memberFreer.dismiss();
}
*out = WorkingSet::INVALID_ID;
return NEED_YIELD;
@@ -748,7 +748,7 @@ PlanStage::StageState UpdateStage::doWork(WorkingSetID* out) {
// member->obj should refer to the document we want to return.
invariant(member->getState() == WorkingSetMember::OWNED_OBJ);
- memberFreer.Dismiss(); // Keep this member around so we can return it.
+ memberFreer.dismiss(); // Keep this member around so we can return it.
*out = id;
return PlanStage::ADVANCED;
}
diff --git a/src/mongo/db/logical_session_cache_impl.cpp b/src/mongo/db/logical_session_cache_impl.cpp
index ce5a013f20f..f7afce2fc3a 100644
--- a/src/mongo/db/logical_session_cache_impl.cpp
+++ b/src/mongo/db/logical_session_cache_impl.cpp
@@ -289,7 +289,7 @@ void LogicalSessionCacheImpl::_refresh(Client* client) {
}
// This will finish timing _refresh for our stats no matter when we return.
- const auto timeRefreshJob = MakeGuard([this] {
+ const auto timeRefreshJob = makeGuard([this] {
stdx::lock_guard<stdx::mutex> lk(_cacheMutex);
auto millis = now() - _stats.getLastSessionsCollectionJobTimestamp();
_stats.setLastSessionsCollectionJobDurationMillis(millis.count());
@@ -320,29 +320,27 @@ void LogicalSessionCacheImpl::_refresh(Client* client) {
LogicalSessionIdSet explicitlyEndingSessions;
LogicalSessionIdMap<LogicalSessionRecord> activeSessions;
- // backSwapper creates a guard that in the case of a exception
- // replaces the ending or active sessions that swapped out of of LogicalSessionCache,
- // and merges in any records that had been added since we swapped them
- // out.
- auto backSwapper = [this](auto& member, auto& temp) {
- return MakeGuard([this, &member, &temp] {
- stdx::lock_guard<stdx::mutex> lk(_cacheMutex);
- using std::swap;
- swap(member, temp);
- for (const auto& it : temp) {
- member.emplace(it);
- }
- });
- };
-
{
using std::swap;
stdx::lock_guard<stdx::mutex> lk(_cacheMutex);
swap(explicitlyEndingSessions, _endingSessions);
swap(activeSessions, _activeSessions);
}
- auto activeSessionsBackSwapper = backSwapper(_activeSessions, activeSessions);
- auto explicitlyEndingBackSwaper = backSwapper(_endingSessions, explicitlyEndingSessions);
+
+ // Create guards that in the case of a exception replace the ending or active sessions that
+ // swapped out of LogicalSessionCache, and merges in any records that had been added since we
+ // swapped them out.
+ auto backSwap = [this](auto& member, auto& temp) {
+ stdx::lock_guard<stdx::mutex> lk(_cacheMutex);
+ using std::swap;
+ swap(member, temp);
+ for (const auto& it : temp) {
+ member.emplace(it);
+ }
+ };
+ auto activeSessionsBackSwapper = makeGuard([&] { backSwap(_activeSessions, activeSessions); });
+ auto explicitlyEndingBackSwaper =
+ makeGuard([&] { backSwap(_endingSessions, explicitlyEndingSessions); });
// remove all explicitlyEndingSessions from activeSessions
for (const auto& lsid : explicitlyEndingSessions) {
@@ -368,7 +366,7 @@ void LogicalSessionCacheImpl::_refresh(Client* client) {
// Refresh the active sessions in the sessions collection.
uassertStatusOK(_sessionsColl->refreshSessions(opCtx, activeSessionRecords));
- activeSessionsBackSwapper.Dismiss();
+ activeSessionsBackSwapper.dismiss();
{
stdx::lock_guard<stdx::mutex> lk(_cacheMutex);
_stats.setLastSessionsCollectionJobEntriesRefreshed(activeSessionRecords.size());
@@ -376,7 +374,7 @@ void LogicalSessionCacheImpl::_refresh(Client* client) {
// Remove the ending sessions from the sessions collection.
uassertStatusOK(_sessionsColl->removeRecords(opCtx, explicitlyEndingSessions));
- explicitlyEndingBackSwaper.Dismiss();
+ explicitlyEndingBackSwaper.dismiss();
{
stdx::lock_guard<stdx::mutex> lk(_cacheMutex);
_stats.setLastSessionsCollectionJobEntriesEnded(explicitlyEndingSessions.size());
diff --git a/src/mongo/db/operation_context.cpp b/src/mongo/db/operation_context.cpp
index 88af8855212..87080f39195 100644
--- a/src/mongo/db/operation_context.cpp
+++ b/src/mongo/db/operation_context.cpp
@@ -327,7 +327,7 @@ void OperationContext::markKilled(ErrorCodes::Error killCode) {
if (_waitMutex) {
invariant(++_numKillers > 0);
getClient()->unlock();
- ON_BLOCK_EXIT([this]() noexcept {
+ ON_BLOCK_EXIT([this] {
getClient()->lock();
invariant(--_numKillers >= 0);
});
diff --git a/src/mongo/db/repl/collection_bulk_loader_impl.cpp b/src/mongo/db/repl/collection_bulk_loader_impl.cpp
index 9b2e09dc08c..b529bfc33be 100644
--- a/src/mongo/db/repl/collection_bulk_loader_impl.cpp
+++ b/src/mongo/db/repl/collection_bulk_loader_impl.cpp
@@ -109,7 +109,7 @@ Status CollectionBulkLoaderImpl::init(const std::vector<BSONObj>& secondaryIndex
Status CollectionBulkLoaderImpl::insertDocuments(const std::vector<BSONObj>::const_iterator begin,
const std::vector<BSONObj>::const_iterator end) {
int count = 0;
- return _runTaskReleaseResourcesOnFailure([&]() -> Status {
+ return _runTaskReleaseResourcesOnFailure([&] {
UnreplicatedWritesBlock uwb(_opCtx.get());
for (auto iter = begin; iter != end; ++iter) {
@@ -154,7 +154,7 @@ Status CollectionBulkLoaderImpl::insertDocuments(const std::vector<BSONObj>::con
}
Status CollectionBulkLoaderImpl::commit() {
- return _runTaskReleaseResourcesOnFailure([this]() -> Status {
+ return _runTaskReleaseResourcesOnFailure([&] {
_stats.startBuildingIndexes = Date_t::now();
LOG(2) << "Creating indexes for ns: " << _nss.ns();
UnreplicatedWritesBlock uwb(_opCtx.get());
@@ -247,17 +247,13 @@ void CollectionBulkLoaderImpl::_releaseResources() {
}
template <typename F>
-Status CollectionBulkLoaderImpl::_runTaskReleaseResourcesOnFailure(F task) noexcept {
-
+Status CollectionBulkLoaderImpl::_runTaskReleaseResourcesOnFailure(const F& task) noexcept {
AlternativeClientRegion acr(_client);
- ScopeGuard guard = MakeGuard(&CollectionBulkLoaderImpl::_releaseResources, this);
+ auto guard = makeGuard([this] { _releaseResources(); });
try {
- const auto status = [&task]() noexcept {
- return task();
- }
- ();
+ const auto status = task();
if (status.isOK()) {
- guard.Dismiss();
+ guard.dismiss();
}
return status;
} catch (...) {
diff --git a/src/mongo/db/repl/collection_bulk_loader_impl.h b/src/mongo/db/repl/collection_bulk_loader_impl.h
index 2c81b5588d4..88da9d32256 100644
--- a/src/mongo/db/repl/collection_bulk_loader_impl.h
+++ b/src/mongo/db/repl/collection_bulk_loader_impl.h
@@ -82,7 +82,7 @@ private:
void _releaseResources();
template <typename F>
- Status _runTaskReleaseResourcesOnFailure(F task) noexcept;
+ Status _runTaskReleaseResourcesOnFailure(const F& task) noexcept;
/**
* Adds document and associated RecordId to index blocks after inserting into RecordStore.
diff --git a/src/mongo/db/repl/initial_syncer.cpp b/src/mongo/db/repl/initial_syncer.cpp
index 6bad2095c27..7937ae81fdd 100644
--- a/src/mongo/db/repl/initial_syncer.cpp
+++ b/src/mongo/db/repl/initial_syncer.cpp
@@ -1205,7 +1205,7 @@ void InitialSyncer::_finishInitialSyncAttempt(const StatusWith<OpTimeWithHash>&
// if the task scheduling fails and we have to invoke _finishCallback() synchronously), we
// declare the scope guard before the lock guard.
auto result = lastApplied;
- auto finishCallbackGuard = MakeGuard([this, &result] {
+ auto finishCallbackGuard = makeGuard([this, &result] {
auto scheduleResult = _exec->scheduleWork(
[=](const mongo::executor::TaskExecutor::CallbackArgs&) { _finishCallback(result); });
if (!scheduleResult.isOK()) {
@@ -1273,7 +1273,7 @@ void InitialSyncer::_finishInitialSyncAttempt(const StatusWith<OpTimeWithHash>&
// Next initial sync attempt scheduled successfully and we do not need to call _finishCallback()
// until the next initial sync attempt finishes.
- finishCallbackGuard.Dismiss();
+ finishCallbackGuard.dismiss();
}
void InitialSyncer::_finishCallback(StatusWith<OpTimeWithHash> lastApplied) {
diff --git a/src/mongo/db/repl/isself.cpp b/src/mongo/db/repl/isself.cpp
index a8f77bc6378..d700c5d5c1d 100644
--- a/src/mongo/db/repl/isself.cpp
+++ b/src/mongo/db/repl/isself.cpp
@@ -125,7 +125,7 @@ std::vector<std::string> getAddrsForHost(const std::string& iporhost,
return out;
}
- ON_BLOCK_EXIT(freeaddrinfo, addrs);
+ ON_BLOCK_EXIT([&] { freeaddrinfo(addrs); });
for (addrinfo* addr = addrs; addr != NULL; addr = addr->ai_next) {
int family = addr->ai_family;
@@ -233,7 +233,7 @@ std::vector<std::string> getBoundAddrs(const bool ipv6enabled) {
warning() << "getifaddrs failure: " << errnoWithDescription(err) << std::endl;
return out;
}
- ON_BLOCK_EXIT(freeifaddrs, addrs);
+ ON_BLOCK_EXIT([&] { freeifaddrs(addrs); });
// based on example code from linux getifaddrs manpage
for (ifaddrs* addr = addrs; addr != NULL; addr = addr->ifa_next) {
diff --git a/src/mongo/db/repl/isself_test.cpp b/src/mongo/db/repl/isself_test.cpp
index d06b512aab9..fcc9f59aede 100644
--- a/src/mongo/db/repl/isself_test.cpp
+++ b/src/mongo/db/repl/isself_test.cpp
@@ -50,7 +50,7 @@ TEST_F(ServiceContextTest, DetectsSameHostIPv4) {
#if defined(_WIN32) || defined(__linux__) || defined(__APPLE__)
bool wasEnabled = IPv6Enabled();
enableIPv6(false);
- ON_BLOCK_EXIT(enableIPv6, wasEnabled);
+ ON_BLOCK_EXIT([&] { enableIPv6(wasEnabled); });
// first we get the addrs bound on this host
const std::vector<std::string> addrs = getBoundAddrs(false);
// Fastpath should agree with the result of getBoundAddrs
@@ -67,7 +67,7 @@ TEST_F(ServiceContextTest, DetectsSameHostIPv6) {
#if defined(_WIN32) || defined(__linux__) || defined(__APPLE__)
bool wasEnabled = IPv6Enabled();
enableIPv6(true);
- ON_BLOCK_EXIT(enableIPv6, wasEnabled);
+ ON_BLOCK_EXIT([&] { enableIPv6(wasEnabled); });
// first we get the addrs bound on this host
const std::vector<std::string> addrs = getBoundAddrs(true);
// Fastpath should agree with the result of getBoundAddrs
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index 5cf9d75ea6a..8a8739db0c5 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -1682,7 +1682,7 @@ Status ReplicationCoordinatorImpl::_awaitReplication_inlock(
ThreadWaiter waiter(opTime, &writeConcern, &condVar);
WaiterGuard guard(*lock, &_replicationWaiterList, &waiter);
- ScopeGuard failGuard = MakeGuard([&]() {
+ auto failGuard = makeGuard([&] {
if (getTestCommandsEnabled()) {
log() << "Replication failed for write concern: " << writeConcern.toBSON()
<< ", waitInfo: " << waiter << ", opID: " << opCtx->getOpID()
@@ -1716,7 +1716,7 @@ Status ReplicationCoordinatorImpl::_awaitReplication_inlock(
return satisfiableStatus;
}
- failGuard.Dismiss();
+ failGuard.dismiss();
return Status::OK();
}
@@ -1801,7 +1801,7 @@ void ReplicationCoordinatorImpl::stepDown(OperationContext* opCtx,
_performPostMemberStateUpdateAction(action);
};
- ScopeGuard onExitGuard = MakeGuard([&] {
+ auto onExitGuard = makeGuard([&] {
abortFn();
updateMemberState();
});
@@ -1855,7 +1855,7 @@ void ReplicationCoordinatorImpl::stepDown(OperationContext* opCtx,
}
// Stepdown success!
- onExitGuard.Dismiss();
+ onExitGuard.dismiss();
updateMemberState();
// Schedule work to (potentially) step back up once the stepdown period has ended.
@@ -2319,8 +2319,8 @@ Status ReplicationCoordinatorImpl::processReplSetReconfig(OperationContext* opCt
}
_setConfigState_inlock(kConfigReconfiguring);
- ScopeGuard configStateGuard =
- MakeGuard(lockAndCall, &lk, [=] { _setConfigState_inlock(kConfigSteady); });
+ auto configStateGuard =
+ makeGuard([&] { lockAndCall(&lk, [=] { _setConfigState_inlock(kConfigSteady); }); });
ReplSetConfig oldConfig = _rsConfig;
lk.unlock();
@@ -2380,7 +2380,7 @@ Status ReplicationCoordinatorImpl::processReplSetReconfig(OperationContext* opCt
const executor::TaskExecutor::CallbackArgs& cbData) {
_finishReplSetReconfig(cbData, newConfig, f, v, reconfigFinished);
}));
- configStateGuard.Dismiss();
+ configStateGuard.dismiss();
_replExecutor->waitForEvent(reconfigFinished);
return Status::OK();
}
@@ -2455,8 +2455,8 @@ Status ReplicationCoordinatorImpl::processReplSetInitiate(OperationContext* opCt
invariant(!_rsConfig.isInitialized());
_setConfigState_inlock(kConfigInitiating);
- ScopeGuard configStateGuard =
- MakeGuard(lockAndCall, &lk, [=] { _setConfigState_inlock(kConfigUninitialized); });
+ auto configStateGuard =
+ makeGuard([&] { lockAndCall(&lk, [=] { _setConfigState_inlock(kConfigUninitialized); }); });
lk.unlock();
ReplSetConfig newConfig;
@@ -2521,7 +2521,7 @@ Status ReplicationCoordinatorImpl::processReplSetInitiate(OperationContext* opCt
_externalState->startThreads(_settings);
_startDataReplication(opCtx);
- configStateGuard.Dismiss();
+ configStateGuard.dismiss();
return Status::OK();
}
diff --git a/src/mongo/db/repl/scatter_gather_runner.cpp b/src/mongo/db/repl/scatter_gather_runner.cpp
index 9be747e60c4..e615589b09b 100644
--- a/src/mongo/db/repl/scatter_gather_runner.cpp
+++ b/src/mongo/db/repl/scatter_gather_runner.cpp
@@ -102,7 +102,7 @@ StatusWith<EventHandle> ScatterGatherRunner::RunnerImpl::start(
return evh;
}
_sufficientResponsesReceived = evh.getValue();
- ScopeGuard earlyReturnGuard = MakeGuard(&RunnerImpl::_signalSufficientResponsesReceived, this);
+ auto earlyReturnGuard = makeGuard([this] { _signalSufficientResponsesReceived(); });
std::vector<RemoteCommandRequest> requests = _algorithm->getRequests();
for (size_t i = 0; i < requests.size(); ++i) {
@@ -122,7 +122,7 @@ StatusWith<EventHandle> ScatterGatherRunner::RunnerImpl::start(
_signalSufficientResponsesReceived();
}
- earlyReturnGuard.Dismiss();
+ earlyReturnGuard.dismiss();
return evh;
}
diff --git a/src/mongo/db/s/balancer/migration_manager.cpp b/src/mongo/db/s/balancer/migration_manager.cpp
index 296ff07573e..59d10dc8b31 100644
--- a/src/mongo/db/s/balancer/migration_manager.cpp
+++ b/src/mongo/db/s/balancer/migration_manager.cpp
@@ -217,7 +217,7 @@ void MigrationManager::startRecoveryAndAcquireDistLocks(OperationContext* opCtx)
_state = State::kRecovering;
}
- auto scopedGuard = MakeGuard([&] {
+ auto scopedGuard = makeGuard([&] {
_migrationRecoveryMap.clear();
_abandonActiveMigrationsAndEnableManager(opCtx);
});
@@ -279,7 +279,7 @@ void MigrationManager::startRecoveryAndAcquireDistLocks(OperationContext* opCtx)
it->second.push_back(std::move(migrateType));
}
- scopedGuard.Dismiss();
+ scopedGuard.dismiss();
}
void MigrationManager::finishRecovery(OperationContext* opCtx,
@@ -301,7 +301,7 @@ void MigrationManager::finishRecovery(OperationContext* opCtx,
invariant(_state == State::kRecovering);
}
- auto scopedGuard = MakeGuard([&] {
+ auto scopedGuard = makeGuard([&] {
_migrationRecoveryMap.clear();
_abandonActiveMigrationsAndEnableManager(opCtx);
});
@@ -365,7 +365,7 @@ void MigrationManager::finishRecovery(OperationContext* opCtx,
}
_migrationRecoveryMap.clear();
- scopedGuard.Dismiss();
+ scopedGuard.dismiss();
{
stdx::lock_guard<stdx::mutex> lock(_mutex);
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
index 76b1ba10087..9a87e4fafd9 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
@@ -581,7 +581,7 @@ StatusWith<std::string> ShardingCatalogManager::addShard(
invariant(shard);
auto targeter = shard->getTargeter();
- auto stopMonitoringGuard = MakeGuard([&] {
+ auto stopMonitoringGuard = makeGuard([&] {
if (shardConnectionString.type() == ConnectionString::SET) {
// This is a workaround for the case were we could have some bad shard being
// requested to be added and we put that bad connection string on the global replica set
@@ -770,7 +770,7 @@ StatusWith<std::string> ShardingCatalogManager::addShard(
"Could not find shard metadata for shard after adding it. This most likely "
"indicates that the shard was removed immediately after it was added."};
}
- stopMonitoringGuard.Dismiss();
+ stopMonitoringGuard.dismiss();
return shardType.getName();
}
diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp
index 921c9864336..cacf25a608c 100644
--- a/src/mongo/db/s/migration_destination_manager.cpp
+++ b/src/mongo/db/s/migration_destination_manager.cpp
@@ -389,7 +389,7 @@ void MigrationDestinationManager::cloneDocumentsFromDonor(
stdx::thread inserterThread{[&] {
ThreadClient tc("chunkInserter", opCtx->getServiceContext());
auto inserterOpCtx = Client::getCurrent()->makeOperationContext();
- auto consumerGuard = MakeGuard([&] { batches.closeConsumerEnd(); });
+ auto consumerGuard = makeGuard([&] { batches.closeConsumerEnd(); });
try {
while (true) {
auto nextBatch = batches.pop(inserterOpCtx.get());
@@ -405,7 +405,7 @@ void MigrationDestinationManager::cloneDocumentsFromDonor(
log() << "Batch insertion failed " << causedBy(redact(exceptionToStatus()));
}
}};
- auto inserterThreadJoinGuard = MakeGuard([&] {
+ auto inserterThreadJoinGuard = makeGuard([&] {
batches.closeProducerEnd();
inserterThread.join();
});
@@ -419,7 +419,7 @@ void MigrationDestinationManager::cloneDocumentsFromDonor(
batches.push(res.getOwned(), opCtx);
auto arr = res["objects"].Obj();
if (arr.isEmpty()) {
- inserterThreadJoinGuard.Dismiss();
+ inserterThreadJoinGuard.dismiss();
inserterThread.join();
opCtx->checkForInterrupt();
break;
diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp
index e5ef3fe1d57..31f432bedf1 100644
--- a/src/mongo/db/s/migration_source_manager.cpp
+++ b/src/mongo/db/s/migration_source_manager.cpp
@@ -217,7 +217,7 @@ NamespaceString MigrationSourceManager::getNss() const {
Status MigrationSourceManager::startClone(OperationContext* opCtx) {
invariant(!opCtx->lockState()->isLocked());
invariant(_state == kCreated);
- auto scopedGuard = MakeGuard([&] { cleanupOnError(opCtx); });
+ auto scopedGuard = makeGuard([&] { cleanupOnError(opCtx); });
_stats.countDonorMoveChunkStarted.addAndFetch(1);
const Status logStatus = ShardingLogging::get(opCtx)->logChangeChecked(
@@ -257,14 +257,14 @@ Status MigrationSourceManager::startClone(OperationContext* opCtx) {
return startCloneStatus;
}
- scopedGuard.Dismiss();
+ scopedGuard.dismiss();
return Status::OK();
}
Status MigrationSourceManager::awaitToCatchUp(OperationContext* opCtx) {
invariant(!opCtx->lockState()->isLocked());
invariant(_state == kCloning);
- auto scopedGuard = MakeGuard([&] { cleanupOnError(opCtx); });
+ auto scopedGuard = makeGuard([&] { cleanupOnError(opCtx); });
_stats.totalDonorChunkCloneTimeMillis.addAndFetch(_cloneAndCommitTimer.millis());
_cloneAndCommitTimer.reset();
@@ -276,14 +276,14 @@ Status MigrationSourceManager::awaitToCatchUp(OperationContext* opCtx) {
}
_state = kCloneCaughtUp;
- scopedGuard.Dismiss();
+ scopedGuard.dismiss();
return Status::OK();
}
Status MigrationSourceManager::enterCriticalSection(OperationContext* opCtx) {
invariant(!opCtx->lockState()->isLocked());
invariant(_state == kCloneCaughtUp);
- auto scopedGuard = MakeGuard([&] { cleanupOnError(opCtx); });
+ auto scopedGuard = makeGuard([&] { cleanupOnError(opCtx); });
_stats.totalDonorChunkCloneTimeMillis.addAndFetch(_cloneAndCommitTimer.millis());
_cloneAndCommitTimer.reset();
@@ -324,14 +324,14 @@ Status MigrationSourceManager::enterCriticalSection(OperationContext* opCtx) {
log() << "Migration successfully entered critical section";
- scopedGuard.Dismiss();
+ scopedGuard.dismiss();
return Status::OK();
}
Status MigrationSourceManager::commitChunkOnRecipient(OperationContext* opCtx) {
invariant(!opCtx->lockState()->isLocked());
invariant(_state == kCriticalSection);
- auto scopedGuard = MakeGuard([&] { cleanupOnError(opCtx); });
+ auto scopedGuard = makeGuard([&] { cleanupOnError(opCtx); });
// Tell the recipient shard to fetch the latest changes.
auto commitCloneStatus = _cloneDriver->commitClone(opCtx);
@@ -348,14 +348,14 @@ Status MigrationSourceManager::commitChunkOnRecipient(OperationContext* opCtx) {
_recipientCloneCounts = commitCloneStatus.getValue()["counts"].Obj().getOwned();
_state = kCloneCompleted;
- scopedGuard.Dismiss();
+ scopedGuard.dismiss();
return Status::OK();
}
Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* opCtx) {
invariant(!opCtx->lockState()->isLocked());
invariant(_state == kCloneCompleted);
- auto scopedGuard = MakeGuard([&] { cleanupOnError(opCtx); });
+ auto scopedGuard = makeGuard([&] { cleanupOnError(opCtx); });
// If we have chunks left on the FROM shard, bump the version of one of them as well. This will
// change the local collection major version, which indicates to other processes that the chunk
@@ -525,7 +525,7 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* opC
MONGO_FAIL_POINT_PAUSE_WHILE_SET(hangBeforeLeavingCriticalSection);
- scopedGuard.Dismiss();
+ scopedGuard.dismiss();
_stats.totalCriticalSectionCommitTimeMillis.addAndFetch(t.millis());
diff --git a/src/mongo/db/s/move_primary_source_manager.cpp b/src/mongo/db/s/move_primary_source_manager.cpp
index c8c1cc00c90..4ce743ce0d6 100644
--- a/src/mongo/db/s/move_primary_source_manager.cpp
+++ b/src/mongo/db/s/move_primary_source_manager.cpp
@@ -73,7 +73,7 @@ NamespaceString MovePrimarySourceManager::getNss() const {
Status MovePrimarySourceManager::clone(OperationContext* opCtx) {
invariant(!opCtx->lockState()->isLocked());
invariant(_state == kCreated);
- auto scopedGuard = MakeGuard([&] { cleanupOnError(opCtx); });
+ auto scopedGuard = makeGuard([&] { cleanupOnError(opCtx); });
log() << "Moving " << _dbname << " primary from: " << _fromShard << " to: " << _toShard;
@@ -122,14 +122,14 @@ Status MovePrimarySourceManager::clone(OperationContext* opCtx) {
}
_state = kCloneCaughtUp;
- scopedGuard.Dismiss();
+ scopedGuard.dismiss();
return Status::OK();
}
Status MovePrimarySourceManager::enterCriticalSection(OperationContext* opCtx) {
invariant(!opCtx->lockState()->isLocked());
invariant(_state == kCloneCaughtUp);
- auto scopedGuard = MakeGuard([&] { cleanupOnError(opCtx); });
+ auto scopedGuard = makeGuard([&] { cleanupOnError(opCtx); });
// Mark the shard as running a critical operation that requires recovery on crash.
uassertStatusOK(ShardingStateRecovery::startMetadataOp(opCtx));
@@ -173,14 +173,14 @@ Status MovePrimarySourceManager::enterCriticalSection(OperationContext* opCtx) {
log() << "movePrimary successfully entered critical section";
- scopedGuard.Dismiss();
+ scopedGuard.dismiss();
return Status::OK();
}
Status MovePrimarySourceManager::commitOnConfig(OperationContext* opCtx) {
invariant(!opCtx->lockState()->isLocked());
invariant(_state == kCriticalSection);
- auto scopedGuard = MakeGuard([&] { cleanupOnError(opCtx); });
+ auto scopedGuard = makeGuard([&] { cleanupOnError(opCtx); });
ConfigsvrCommitMovePrimary commitMovePrimaryRequest;
commitMovePrimaryRequest.set_configsvrCommitMovePrimary(getNss().ns());
@@ -290,7 +290,7 @@ Status MovePrimarySourceManager::commitOnConfig(OperationContext* opCtx) {
_buildMoveLogEntry(_dbname.toString(), _fromShard.toString(), _toShard.toString()),
ShardingCatalogClient::kMajorityWriteConcern));
- scopedGuard.Dismiss();
+ scopedGuard.dismiss();
_state = kNeedCleanStaleData;
diff --git a/src/mongo/db/server_options_test.cpp b/src/mongo/db/server_options_test.cpp
index 03b768a3690..8ef3c8a843d 100644
--- a/src/mongo/db/server_options_test.cpp
+++ b/src/mongo/db/server_options_test.cpp
@@ -806,7 +806,7 @@ TEST(SetupOptions, DeepCwd) {
sb << "/tmp/deepcwd-" << getpid();
boost::filesystem::path deepBaseDir = sb.str();
- auto cleanup = ::mongo::MakeGuard([&] {
+ auto cleanup = ::mongo::makeGuard([&] {
boost::filesystem::current_path(cwd, ec);
boost::filesystem::remove_all(deepBaseDir, ec);
});
@@ -863,7 +863,7 @@ TEST(SetupOptions, UnlinkedCwd) {
std::string unlinkDir;
- auto cleanup = ::mongo::MakeGuard([&] {
+ auto cleanup = ::mongo::makeGuard([&] {
boost::filesystem::current_path(cwd, ec);
if (!unlinkDir.empty()) {
boost::filesystem::remove(cwd / unlinkDir, ec);
diff --git a/src/mongo/db/service_entry_point_common.cpp b/src/mongo/db/service_entry_point_common.cpp
index cb2042addcb..18cf78b0109 100644
--- a/src/mongo/db/service_entry_point_common.cpp
+++ b/src/mongo/db/service_entry_point_common.cpp
@@ -381,7 +381,7 @@ void invokeWithSessionCheckedOut(OperationContext* opCtx,
txnParticipant->unstashTransactionResources(opCtx, invocation->definition()->getName());
}
- ScopeGuard guard = MakeGuard([&txnParticipant, opCtx]() {
+ auto guard = makeGuard([&txnParticipant, opCtx] {
txnParticipant->abortActiveUnpreparedOrStashPreparedTransaction(opCtx);
});
@@ -406,7 +406,7 @@ void invokeWithSessionCheckedOut(OperationContext* opCtx,
// If this shard has completed an earlier statement for this transaction, it must already be
// in the transaction's participant list, so it is guaranteed to learn its outcome.
txnParticipant->stashTransactionResources(opCtx);
- guard.Dismiss();
+ guard.dismiss();
throw;
}
@@ -419,7 +419,7 @@ void invokeWithSessionCheckedOut(OperationContext* opCtx,
// Stash or commit the transaction when the command succeeds.
txnParticipant->stashTransactionResources(opCtx);
- guard.Dismiss();
+ guard.dismiss();
} catch (const ExceptionFor<ErrorCodes::NoSuchTransaction>&) {
// We make our decision about the transaction state based on the oplog we have, so
// we set the client last op to the last optime observed by the system to ensure that
diff --git a/src/mongo/db/storage/kv/kv_storage_engine.cpp b/src/mongo/db/storage/kv/kv_storage_engine.cpp
index 902a242645d..a09d4c7a743 100644
--- a/src/mongo/db/storage/kv/kv_storage_engine.cpp
+++ b/src/mongo/db/storage/kv/kv_storage_engine.cpp
@@ -578,7 +578,7 @@ Status KVStorageEngine::_dropCollectionsNoTimestamp(OperationContext* opCtx,
}
// Ensure the method exits with the same "commit timestamp" state that it was called with.
- auto addCommitTimestamp = MakeGuard([&opCtx, commitTs] {
+ auto addCommitTimestamp = makeGuard([&opCtx, commitTs] {
if (!commitTs.isNull()) {
opCtx->recoveryUnit()->setCommitTimestamp(commitTs);
}
diff --git a/src/mongo/db/storage/storage_engine_init.cpp b/src/mongo/db/storage/storage_engine_init.cpp
index ede2b647bf7..1ff25dd6b2d 100644
--- a/src/mongo/db/storage/storage_engine_init.cpp
+++ b/src/mongo/db/storage/storage_engine_init.cpp
@@ -176,7 +176,7 @@ void initializeStorageEngine(ServiceContext* service, const StorageEngineInitFla
uassertStatusOK(factory->validateMetadata(*metadata, storageGlobalParams));
}
- ScopeGuard guard = MakeGuard([&] {
+ auto guard = makeGuard([&] {
auto& lockFile = StorageEngineLockFile::get(service);
if (lockFile) {
lockFile->close();
@@ -201,7 +201,7 @@ void initializeStorageEngine(ServiceContext* service, const StorageEngineInitFla
uassertStatusOK(metadata->write());
}
- guard.Dismiss();
+ guard.dismiss();
_supportsDocLocking = service->getStorageEngine()->supportsDocLocking();
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
index d04603d1705..ef00dd7a21b 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
@@ -925,7 +925,7 @@ StatusWith<std::vector<std::string>> WiredTigerKVEngine::beginNonBlockingBackup(
// Oplog truncation thread won't remove oplog since the checkpoint pinned by the backup cursor.
stdx::lock_guard<stdx::mutex> lock(_oplogPinnedByBackupMutex);
_checkpointThread->assignOplogNeededForCrashRecoveryTo(&_oplogPinnedByBackup);
- auto pinOplogGuard = MakeGuard([&] { _oplogPinnedByBackup = boost::none; });
+ auto pinOplogGuard = makeGuard([&] { _oplogPinnedByBackup = boost::none; });
// This cursor will be freed by the backupSession being closed as the session is uncached
auto sessionRaii = stdx::make_unique<WiredTigerSession>(_conn);
@@ -943,7 +943,7 @@ StatusWith<std::vector<std::string>> WiredTigerKVEngine::beginNonBlockingBackup(
return swFilesToCopy;
}
- pinOplogGuard.Dismiss();
+ pinOplogGuard.dismiss();
_backupSession = std::move(sessionRaii);
_backupCursor = cursor;
@@ -1380,7 +1380,7 @@ bool WiredTigerKVEngine::_hasUri(WT_SESSION* session, const std::string& uri) co
if (ret == ENOENT)
return false;
invariantWTOK(ret);
- ON_BLOCK_EXIT(c->close, c);
+ ON_BLOCK_EXIT([&] { c->close(c); });
c->set_key(c, uri.c_str());
return c->search(c) == 0;
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp
index d04ba36dc7f..c21088ddfaa 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp
@@ -125,7 +125,7 @@ void WiredTigerOplogManager::waitForAllEarlierOplogWritesToBeVisible(
// Prevent any scheduled journal flushes from being delayed and blocking this wait excessively.
_opsWaitingForVisibility++;
invariant(_opsWaitingForVisibility > 0);
- auto exitGuard = MakeGuard([&] { _opsWaitingForVisibility--; });
+ auto exitGuard = makeGuard([&] { _opsWaitingForVisibility--; });
opCtx->waitForConditionOrInterrupt(_opsBecameVisibleCV, lk, [&] {
auto newLatestVisibleTimestamp = getOplogReadTimestamp();
@@ -155,8 +155,8 @@ void WiredTigerOplogManager::triggerJournalFlush() {
}
}
-void WiredTigerOplogManager::_oplogJournalThreadLoop(
- WiredTigerSessionCache* sessionCache, WiredTigerRecordStore* oplogRecordStore) noexcept {
+void WiredTigerOplogManager::_oplogJournalThreadLoop(WiredTigerSessionCache* sessionCache,
+ WiredTigerRecordStore* oplogRecordStore) {
Client::initThread("WTOplogJournalThread");
// This thread updates the oplog read timestamp, the timestamp used to read from the oplog with
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.h b/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.h
index 3a84c939dfc..c51a650dea5 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.h
@@ -85,7 +85,7 @@ public:
private:
void _oplogJournalThreadLoop(WiredTigerSessionCache* sessionCache,
- WiredTigerRecordStore* oplogRecordStore) noexcept;
+ WiredTigerRecordStore* oplogRecordStore);
void _setOplogReadTimestamp(WithLock, uint64_t newTimestamp);
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
index e65b027686e..ddb9f38c2c2 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
@@ -113,7 +113,7 @@ void WiredTigerOperationStats::fetchStats(WT_SESSION* session,
uassert(ErrorCodes::CursorNotFound, "Unable to open statistics cursor", ret == 0);
invariant(c);
- ON_BLOCK_EXIT(c->close, c);
+ ON_BLOCK_EXIT([&] { c->close(c); });
const char* desc;
uint64_t value;
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.cpp
index c055f004761..c2e0e752e62 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.cpp
@@ -101,7 +101,7 @@ std::shared_ptr<WiredTigerSizeStorer::SizeInfo> WiredTigerSizeStorer::load(Strin
stdx::lock_guard<stdx::mutex> cursorLock(_cursorMutex);
// Intentionally ignoring return value.
- ON_BLOCK_EXIT(_cursor->reset, _cursor);
+ ON_BLOCK_EXIT([&] { _cursor->reset(_cursor); });
_cursor->reset(_cursor);
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp
index 12ea893ba01..845a7b2ebfe 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp
@@ -138,7 +138,7 @@ StatusWith<std::string> WiredTigerUtil::getMetadata(OperationContext* opCtx, Str
session->getCursor("metadata:create", WiredTigerSession::kMetadataTableId, false);
invariant(cursor);
auto releaser =
- MakeGuard([&] { session->releaseCursor(WiredTigerSession::kMetadataTableId, cursor); });
+ makeGuard([&] { session->releaseCursor(WiredTigerSession::kMetadataTableId, cursor); });
std::string strUri = uri.toString();
cursor->set_key(cursor, strUri.c_str());
@@ -326,7 +326,7 @@ StatusWith<uint64_t> WiredTigerUtil::getStatisticsValue(WT_SESSION* session,
<< wiredtiger_strerror(ret));
}
invariant(cursor);
- ON_BLOCK_EXIT(cursor->close, cursor);
+ ON_BLOCK_EXIT([&] { cursor->close(cursor); });
cursor->set_key(cursor, statisticsKey);
ret = cursor->search(cursor);
@@ -519,7 +519,7 @@ int WiredTigerUtil::verifyTable(OperationContext* opCtx,
WT_CONNECTION* conn = WiredTigerRecoveryUnit::get(opCtx)->getSessionCache()->conn();
WT_SESSION* session;
invariantWTOK(conn->open_session(conn, &eventHandler, NULL, &session));
- ON_BLOCK_EXIT(session->close, session, "");
+ ON_BLOCK_EXIT([&] { session->close(session, ""); });
// Do the verify. Weird parens prevent treating "verify" as a macro.
return (session->verify)(session, uri.c_str(), NULL);
@@ -616,7 +616,7 @@ Status WiredTigerUtil::exportTableToBSON(WT_SESSION* session,
}
bob->append("uri", uri);
invariant(c);
- ON_BLOCK_EXIT(c->close, c);
+ ON_BLOCK_EXIT([&] { c->close(c); });
std::map<string, BSONObjBuilder*> subs;
const char* desc;
diff --git a/src/mongo/db/transaction_participant.cpp b/src/mongo/db/transaction_participant.cpp
index 087f158c2cd..49151824ea9 100644
--- a/src/mongo/db/transaction_participant.cpp
+++ b/src/mongo/db/transaction_participant.cpp
@@ -830,7 +830,7 @@ Timestamp TransactionParticipant::prepareTransaction(OperationContext* opCtx,
// session kill and migration, which do not check out the session.
_checkIsActiveTransaction(lk, *opCtx->getTxnNumber(), true);
- ScopeGuard abortGuard = MakeGuard([&] {
+ auto abortGuard = makeGuard([&] {
// Prepare transaction on secondaries should always succeed.
invariant(!prepareOptime);
diff --git a/src/mongo/db/transaction_reaper.cpp b/src/mongo/db/transaction_reaper.cpp
index 02ed1e0783b..e87d267d146 100644
--- a/src/mongo/db/transaction_reaper.cpp
+++ b/src/mongo/db/transaction_reaper.cpp
@@ -161,7 +161,7 @@ int removeSessionsRecords(OperationContext* opCtx,
Locker::LockSnapshot snapshot;
invariant(locker->saveLockStateAndUnlock(&snapshot));
- const auto guard = MakeGuard([&] {
+ const auto guard = makeGuard([&] {
UninterruptibleLockGuard noInterrupt(opCtx->lockState());
locker->restoreLockState(opCtx, snapshot);
});
diff --git a/src/mongo/db/views/view_graph.cpp b/src/mongo/db/views/view_graph.cpp
index a4131ad2ec3..9c89e000e4b 100644
--- a/src/mongo/db/views/view_graph.cpp
+++ b/src/mongo/db/views/view_graph.cpp
@@ -66,10 +66,7 @@ Status ViewGraph::insertAndValidate(const ViewDefinition& view,
// If the graph fails validation for any reason, the insert is automatically rolled back on
// exiting this method.
- auto rollBackInsert = [&]() -> auto {
- remove(viewNss);
- };
- auto guard = MakeGuard(rollBackInsert);
+ auto guard = makeGuard([&] { remove(viewNss); });
// Check for cycles and get the height of the children.
StatsMap statsMap;
@@ -118,7 +115,7 @@ Status ViewGraph::insertAndValidate(const ViewDefinition& view,
<< " bytes"};
}
- guard.Dismiss();
+ guard.dismiss();
return Status::OK();
}
diff --git a/src/mongo/dbtests/storage_timestamp_tests.cpp b/src/mongo/dbtests/storage_timestamp_tests.cpp
index 131ff253790..d856592ff64 100644
--- a/src/mongo/dbtests/storage_timestamp_tests.cpp
+++ b/src/mongo/dbtests/storage_timestamp_tests.cpp
@@ -2188,7 +2188,7 @@ public:
auto taskFuture = task.get_future();
stdx::thread taskThread{std::move(task)};
- auto joinGuard = MakeGuard([&] {
+ auto joinGuard = makeGuard([&] {
batchInProgress.promise.emplaceValue(false);
taskThread.join();
});
@@ -2241,7 +2241,7 @@ public:
auto lastOpTime = unittest::assertGet(syncTail.multiApply(_opCtx, {insertOp}));
ASSERT_EQ(insertOp.getOpTime(), lastOpTime);
- joinGuard.Dismiss();
+ joinGuard.dismiss();
taskThread.join();
// Read on the local snapshot to verify the document was inserted.
diff --git a/src/mongo/executor/connection_pool.cpp b/src/mongo/executor/connection_pool.cpp
index b3d37472c45..54107e04a2e 100644
--- a/src/mongo/executor/connection_pool.cpp
+++ b/src/mongo/executor/connection_pool.cpp
@@ -743,7 +743,7 @@ void ConnectionPool::SpecificPool::fulfillRequests(stdx::unique_lock<stdx::mutex
return;
_inFulfillRequests = true;
- auto guard = MakeGuard([&] { _inFulfillRequests = false; });
+ auto guard = makeGuard([&] { _inFulfillRequests = false; });
while (_requests.size()) {
// Caution: If this returns with a value, it's important that we not throw until we've
@@ -781,7 +781,7 @@ void ConnectionPool::SpecificPool::spawnConnections(stdx::unique_lock<stdx::mute
return;
_inSpawnConnections = true;
- auto guard = MakeGuard([&] { _inSpawnConnections = false; });
+ auto guard = makeGuard([&] { _inSpawnConnections = false; });
// We want minConnections <= outstanding requests <= maxConnections
auto target = [&] {
diff --git a/src/mongo/executor/connection_pool_test.cpp b/src/mongo/executor/connection_pool_test.cpp
index cd2ca6db149..ccb7af42fd0 100644
--- a/src/mongo/executor/connection_pool_test.cpp
+++ b/src/mongo/executor/connection_pool_test.cpp
@@ -117,7 +117,7 @@ TEST_F(ConnectionPoolTest, ConnectionsAreAcquiredInMRUOrder) {
// to the pool by destroying the 'connections' vector. Otherwise,
// this test would cause an invariant failure instead of a normal
// test failure if it fails, which would be confusing.
- const auto guard = MakeGuard([&] {
+ const auto guard = makeGuard([&] {
while (!connections.empty()) {
try {
ConnectionPool::ConnectionHandle conn = std::move(connections.back());
@@ -190,7 +190,7 @@ TEST_F(ConnectionPoolTest, ConnectionsNotUsedRecentlyArePurged) {
// to the pool by destroying the 'connections' vector. Otherwise,
// this test would cause an invariant failure instead of a normal
// test failure if it fails, which would be confusing.
- const auto guard = MakeGuard([&] {
+ const auto guard = makeGuard([&] {
while (!connections.empty()) {
try {
ConnectionPool::ConnectionHandle conn = std::move(connections.back());
diff --git a/src/mongo/executor/network_interface_perf_test.cpp b/src/mongo/executor/network_interface_perf_test.cpp
index c49a9056e94..2a76bf55e13 100644
--- a/src/mongo/executor/network_interface_perf_test.cpp
+++ b/src/mongo/executor/network_interface_perf_test.cpp
@@ -61,7 +61,7 @@ const std::size_t numOperations = 16384;
int timeNetworkTestMillis(std::size_t operations, NetworkInterface* net) {
net->startup();
- auto guard = MakeGuard([&] { net->shutdown(); });
+ auto guard = makeGuard([&] { net->shutdown(); });
auto fixture = unittest::getFixtureConnectionString();
auto server = fixture.getServers()[0];
diff --git a/src/mongo/executor/network_interface_thread_pool.cpp b/src/mongo/executor/network_interface_thread_pool.cpp
index 46c30f112b1..8fbfade41e8 100644
--- a/src/mongo/executor/network_interface_thread_pool.cpp
+++ b/src/mongo/executor/network_interface_thread_pool.cpp
@@ -147,7 +147,7 @@ void NetworkInterfaceThreadPool::consumeTasks(stdx::unique_lock<stdx::mutex> lk)
}
_consumingTasks = true;
- const auto consumingTasksGuard = MakeGuard([&] { _consumingTasks = false; });
+ const auto consumingTasksGuard = makeGuard([&] { _consumingTasks = false; });
decltype(_tasks) tasks;
@@ -156,7 +156,7 @@ void NetworkInterfaceThreadPool::consumeTasks(stdx::unique_lock<stdx::mutex> lk)
swap(tasks, _tasks);
lk.unlock();
- const auto lkGuard = MakeGuard([&] { lk.lock(); });
+ const auto lkGuard = makeGuard([&] { lk.lock(); });
for (auto&& task : tasks) {
try {
diff --git a/src/mongo/installer/msi/ca/CustomAction.cpp b/src/mongo/installer/msi/ca/CustomAction.cpp
index 37c5064aa20..f26e68321a1 100644
--- a/src/mongo/installer/msi/ca/CustomAction.cpp
+++ b/src/mongo/installer/msi/ca/CustomAction.cpp
@@ -285,7 +285,7 @@ extern "C" UINT __stdcall UpdateMongoYAML(MSIHANDLE hInstall) {
CHECKGLE_AND_LOG("Failed to open yaml file");
}
- const auto handleGuard = mongo::MakeGuard([&] { CloseHandle(hFile); });
+ const auto handleGuard = mongo::makeGuard([&] { CloseHandle(hFile); });
LARGE_INTEGER fileSize;
if (GetFileSizeEx(hFile, &fileSize) == 0) {
diff --git a/src/mongo/platform/stack_locator_freebsd.cpp b/src/mongo/platform/stack_locator_freebsd.cpp
index 6020eed4f06..beb1e805fcb 100644
--- a/src/mongo/platform/stack_locator_freebsd.cpp
+++ b/src/mongo/platform/stack_locator_freebsd.cpp
@@ -47,7 +47,7 @@ StackLocator::StackLocator() {
pthread_t self = pthread_self();
invariant(pthread_attr_init(&attr) == 0);
- ON_BLOCK_EXIT(pthread_attr_destroy, &attr);
+ ON_BLOCK_EXIT([&] { pthread_attr_destroy(&attr); });
invariant(pthread_attr_get_np(self, &attr) == 0);
diff --git a/src/mongo/platform/stack_locator_pthread_getattr_np.cpp b/src/mongo/platform/stack_locator_pthread_getattr_np.cpp
index 88d5bf1c846..72542b560d8 100644
--- a/src/mongo/platform/stack_locator_pthread_getattr_np.cpp
+++ b/src/mongo/platform/stack_locator_pthread_getattr_np.cpp
@@ -44,7 +44,7 @@ StackLocator::StackLocator() {
pthread_attr_t selfAttrs;
invariant(pthread_attr_init(&selfAttrs) == 0);
invariant(pthread_getattr_np(self, &selfAttrs) == 0);
- ON_BLOCK_EXIT(pthread_attr_destroy, &selfAttrs);
+ ON_BLOCK_EXIT([&] { pthread_attr_destroy(&selfAttrs); });
void* base = nullptr;
size_t size = 0;
diff --git a/src/mongo/rpc/metadata/client_metadata_test.cpp b/src/mongo/rpc/metadata/client_metadata_test.cpp
index fd02aee23c3..30985af8b3c 100644
--- a/src/mongo/rpc/metadata/client_metadata_test.cpp
+++ b/src/mongo/rpc/metadata/client_metadata_test.cpp
@@ -257,7 +257,7 @@ TEST(ClientMetadatTest, TestNegativeWrongTypes) {
// Negative: document larger than 512 bytes
TEST(ClientMetadatTest, TestNegativeLargeDocument) {
bool savedMongos = isMongos();
- auto unsetMongoS = MakeGuard(&setMongos, savedMongos);
+ auto unsetMongoS = makeGuard([&] { setMongos(savedMongos); });
setMongos(true);
{
diff --git a/src/mongo/rpc/object_check_test.cpp b/src/mongo/rpc/object_check_test.cpp
index f9d96f7f242..80e3e179fbc 100644
--- a/src/mongo/rpc/object_check_test.cpp
+++ b/src/mongo/rpc/object_check_test.cpp
@@ -48,7 +48,7 @@ TEST(DataTypeValidated, BSONValidationEnabled) {
bool wasEnabled = serverGlobalParams.objcheck;
const auto setValidation = [&](bool enabled) { serverGlobalParams.objcheck = enabled; };
- ON_BLOCK_EXIT(setValidation, wasEnabled);
+ ON_BLOCK_EXIT([=] { setValidation(wasEnabled); });
using std::begin;
using std::end;
diff --git a/src/mongo/s/client/shard_registry.cpp b/src/mongo/s/client/shard_registry.cpp
index ef02fb16186..812c505e885 100644
--- a/src/mongo/s/client/shard_registry.cpp
+++ b/src/mongo/s/client/shard_registry.cpp
@@ -311,7 +311,7 @@ bool ShardRegistry::reload(OperationContext* opCtx) {
auto nextReloadState = ReloadState::Failed;
- auto failGuard = MakeGuard([&] {
+ auto failGuard = makeGuard([&] {
if (!reloadLock.owns_lock()) {
reloadLock.lock();
}
diff --git a/src/mongo/s/commands/strategy.cpp b/src/mongo/s/commands/strategy.cpp
index 837c0a67855..92e4b26d694 100644
--- a/src/mongo/s/commands/strategy.cpp
+++ b/src/mongo/s/commands/strategy.cpp
@@ -492,11 +492,11 @@ void runCommand(OperationContext* opCtx,
// Update transaction tracking state for a possible retry. Throws and aborts the
// transaction if it cannot continue.
if (auto txnRouter = TransactionRouter::get(opCtx)) {
- auto abortGuard = MakeGuard(
+ auto abortGuard = makeGuard(
[&] { txnRouter->implicitlyAbortTransaction(opCtx, ex.toStatus()); });
handleCanRetryInTransaction(opCtx, txnRouter, canRetry, ex);
txnRouter->onStaleShardOrDbError(commandName, ex.toStatus());
- abortGuard.Dismiss();
+ abortGuard.dismiss();
}
if (canRetry) {
@@ -511,11 +511,11 @@ void runCommand(OperationContext* opCtx,
// Update transaction tracking state for a possible retry. Throws and aborts the
// transaction if it cannot continue.
if (auto txnRouter = TransactionRouter::get(opCtx)) {
- auto abortGuard = MakeGuard(
+ auto abortGuard = makeGuard(
[&] { txnRouter->implicitlyAbortTransaction(opCtx, ex.toStatus()); });
handleCanRetryInTransaction(opCtx, txnRouter, canRetry, ex);
txnRouter->onStaleShardOrDbError(commandName, ex.toStatus());
- abortGuard.Dismiss();
+ abortGuard.dismiss();
}
if (canRetry) {
@@ -528,11 +528,11 @@ void runCommand(OperationContext* opCtx,
// Update transaction tracking state for a possible retry. Throws and aborts the
// transaction if it cannot continue.
if (auto txnRouter = TransactionRouter::get(opCtx)) {
- auto abortGuard = MakeGuard(
+ auto abortGuard = makeGuard(
[&] { txnRouter->implicitlyAbortTransaction(opCtx, ex.toStatus()); });
handleCanRetryInTransaction(opCtx, txnRouter, canRetry, ex);
txnRouter->onSnapshotError(ex.toStatus());
- abortGuard.Dismiss();
+ abortGuard.dismiss();
}
if (canRetry) {
diff --git a/src/mongo/s/query/cluster_find.cpp b/src/mongo/s/query/cluster_find.cpp
index 9770c4a02be..ebaae43ca5e 100644
--- a/src/mongo/s/query/cluster_find.cpp
+++ b/src/mongo/s/query/cluster_find.cpp
@@ -538,11 +538,11 @@ void validateTxnNumber(OperationContext* opCtx,
void validateOperationSessionInfo(OperationContext* opCtx,
const GetMoreRequest& request,
ClusterCursorManager::PinnedCursor* cursor) {
- ScopeGuard returnCursorGuard = MakeGuard(
+ auto returnCursorGuard = makeGuard(
[cursor] { cursor->returnCursor(ClusterCursorManager::CursorState::NotExhausted); });
validateLSID(opCtx, request, cursor);
validateTxnNumber(opCtx, request, cursor);
- returnCursorGuard.Dismiss();
+ returnCursorGuard.dismiss();
}
StatusWith<CursorResponse> ClusterFind::runGetMore(OperationContext* opCtx,
diff --git a/src/mongo/s/service_entry_point_mongos.cpp b/src/mongo/s/service_entry_point_mongos.cpp
index d50fee624e3..ddb598e7c69 100644
--- a/src/mongo/s/service_entry_point_mongos.cpp
+++ b/src/mongo/s/service_entry_point_mongos.cpp
@@ -64,7 +64,7 @@ BSONObj buildErrReply(const DBException& ex) {
DbResponse ServiceEntryPointMongos::handleRequest(OperationContext* opCtx, const Message& message) {
// Release any cached egress connections for client back to pool before destroying
- auto guard = MakeGuard(ShardConnection::releaseMyConnections);
+ auto guard = makeGuard(ShardConnection::releaseMyConnections);
const int32_t msgId = message.header().getId();
const NetworkOp op = message.operation();
diff --git a/src/mongo/scripting/mozjs/implscope.cpp b/src/mongo/scripting/mozjs/implscope.cpp
index 0f3859306fd..830d2df320a 100644
--- a/src/mongo/scripting/mozjs/implscope.cpp
+++ b/src/mongo/scripting/mozjs/implscope.cpp
@@ -130,7 +130,7 @@ void MozJSImplScope::_reportError(JSContext* cx, const char* message, JSErrorRep
}
scope->_inReportError = true;
- const auto guard = MakeGuard([&] { scope->_inReportError = false; });
+ const auto guard = makeGuard([&] { scope->_inReportError = false; });
if (!JSREPORT_IS_WARNING(report->flags)) {
@@ -243,7 +243,7 @@ bool MozJSImplScope::_interruptCallback(JSContext* cx) {
auto scope = getScope(cx);
JS_SetInterruptCallback(scope->_runtime, nullptr);
- auto guard = MakeGuard([&]() { JS_SetInterruptCallback(scope->_runtime, _interruptCallback); });
+ auto guard = makeGuard([&]() { JS_SetInterruptCallback(scope->_runtime, _interruptCallback); });
if (scope->_pendingGC.load() || closeToMaxMemory()) {
scope->_pendingGC.store(false);
diff --git a/src/mongo/scripting/mozjs/proxyscope.cpp b/src/mongo/scripting/mozjs/proxyscope.cpp
index d62eb913b97..8add7340d6a 100644
--- a/src/mongo/scripting/mozjs/proxyscope.cpp
+++ b/src/mongo/scripting/mozjs/proxyscope.cpp
@@ -344,7 +344,7 @@ void MozJSProxyScope::implThread(void* arg) {
// This is mostly to silence coverity, so that it sees that the
// ProxyScope doesn't hold a reference to the ImplScope after it
// is deleted by the unique_ptr.
- const auto unbindImplScope = MakeGuard([&proxy] { proxy->_implScope = nullptr; });
+ const auto unbindImplScope = makeGuard([&proxy] { proxy->_implScope = nullptr; });
while (true) {
stdx::unique_lock<stdx::mutex> lk(proxy->_mutex);
diff --git a/src/mongo/shell/dbshell.cpp b/src/mongo/shell/dbshell.cpp
index 9306a719b6f..afe45fe337e 100644
--- a/src/mongo/shell/dbshell.cpp
+++ b/src/mongo/shell/dbshell.cpp
@@ -889,7 +889,7 @@ int _main(int argc, char* argv[], char** envp) {
mongo::getGlobalScriptEngine()->enableJavaScriptProtection(
shellGlobalParams.javascriptProtection);
- auto poolGuard = MakeGuard([] { ScriptEngine::dropScopeCache(); });
+ auto poolGuard = makeGuard([] { ScriptEngine::dropScopeCache(); });
unique_ptr<mongo::Scope> scope(mongo::getGlobalScriptEngine()->newScope());
shellMainScope = scope.get();
diff --git a/src/mongo/shell/shell_utils_extended.cpp b/src/mongo/shell/shell_utils_extended.cpp
index c4a0edee046..3b6b1999b36 100644
--- a/src/mongo/shell/shell_utils_extended.cpp
+++ b/src/mongo/shell/shell_utils_extended.cpp
@@ -200,7 +200,7 @@ BSONObj md5sumFile(const BSONObj& args, void* data) {
stringstream ss;
FILE* f = fopen(e.valuestrsafe(), "rb");
uassert(CANT_OPEN_FILE, "couldn't open file", f);
- ON_BLOCK_EXIT(fclose, f);
+ ON_BLOCK_EXIT([&] { fclose(f); });
md5digest d;
md5_state_t st;
@@ -275,10 +275,10 @@ BSONObj copyFile(const BSONObj& args, void* data) {
BSONObj writeFile(const BSONObj& args, void* data) {
// Parse the arguments.
- uassert(
- 40340,
- "writeFile requires at least 2 arguments: writeFile(filePath, content, [useBinaryMode])",
- args.nFields() >= 2);
+ uassert(40340,
+ "writeFile requires at least 2 arguments: writeFile(filePath, content, "
+ "[useBinaryMode])",
+ args.nFields() >= 2);
BSONObjIterator it(args);
diff --git a/src/mongo/shell/shell_utils_launcher.cpp b/src/mongo/shell/shell_utils_launcher.cpp
index 62cfb509b9b..58b33fe83aa 100644
--- a/src/mongo/shell/shell_utils_launcher.cpp
+++ b/src/mongo/shell/shell_utils_launcher.cpp
@@ -338,12 +338,10 @@ ProgramRunner::ProgramRunner(const BSONObj& args, const BSONObj& env, bool isMon
// we explicitly override them.
#ifdef _WIN32
wchar_t* processEnv = GetEnvironmentStringsW();
- ON_BLOCK_EXIT(
- [](wchar_t* toFree) {
- if (toFree)
- FreeEnvironmentStringsW(toFree);
- },
- processEnv);
+ ON_BLOCK_EXIT([processEnv] {
+ if (processEnv)
+ FreeEnvironmentStringsW(processEnv);
+ });
// Windows' GetEnvironmentStringsW returns a NULL terminated array of NULL separated
// <key>=<value> pairs.
@@ -959,7 +957,7 @@ inline void kill_wrapper(ProcessId pid, int sig, int port, const BSONObj& opt) {
return;
}
- ON_BLOCK_EXIT(CloseHandle, event);
+ ON_BLOCK_EXIT([&] { CloseHandle(event); });
bool result = SetEvent(event);
if (!result) {
diff --git a/src/mongo/transport/baton_asio_linux.h b/src/mongo/transport/baton_asio_linux.h
index e8dcf79deeb..63bfe67da3f 100644
--- a/src/mongo/transport/baton_asio_linux.h
+++ b/src/mongo/transport/baton_asio_linux.h
@@ -248,7 +248,7 @@ public:
std::vector<Promise<void>> toFulfill;
// We'll fulfill promises and run jobs on the way out, ensuring we don't hold any locks
- const auto guard = MakeGuard([&] {
+ const auto guard = makeGuard([&] {
for (auto& promise : toFulfill) {
promise.emplaceValue();
}
@@ -315,7 +315,7 @@ public:
pollSet.size(),
deadline ? Milliseconds(*deadline - now).count() : -1);
- const auto pollGuard = MakeGuard([&] {
+ const auto pollGuard = makeGuard([&] {
lk.lock();
_inPoll = false;
});
diff --git a/src/mongo/transport/service_executor_adaptive.cpp b/src/mongo/transport/service_executor_adaptive.cpp
index d7370bce318..649382086b1 100644
--- a/src/mongo/transport/service_executor_adaptive.cpp
+++ b/src/mongo/transport/service_executor_adaptive.cpp
@@ -224,7 +224,7 @@ Status ServiceExecutorAdaptive::schedule(ServiceExecutorAdaptive::Task task,
_localThreadState->executing.markRunning();
_threadsInUse.addAndFetch(1);
}
- const auto guard = MakeGuard([this, taskName] {
+ const auto guard = makeGuard([this, taskName] {
if (--_localThreadState->recursionDepth == 0) {
_localThreadState->executingCurRun += _localThreadState->executing.markStopped();
_threadsInUse.subtractAndFetch(1);
@@ -563,7 +563,7 @@ void ServiceExecutorAdaptive::_workerThreadRoutine(
log() << "Started new database worker thread " << threadId;
bool guardThreadsRunning = true;
- const auto guard = MakeGuard([this, &guardThreadsRunning, state] {
+ const auto guard = makeGuard([this, &guardThreadsRunning, state] {
if (guardThreadsRunning)
_threadsRunning.subtractAndFetch(1);
_pastThreadsSpentRunning.addAndFetch(state->running.totalTime());
diff --git a/src/mongo/transport/service_executor_adaptive_test.cpp b/src/mongo/transport/service_executor_adaptive_test.cpp
index 66d00b1dfd9..04dff386f4b 100644
--- a/src/mongo/transport/service_executor_adaptive_test.cpp
+++ b/src/mongo/transport/service_executor_adaptive_test.cpp
@@ -168,7 +168,7 @@ TEST_F(ServiceExecutorAdaptiveFixture, TestStuckTask) {
stdx::unique_lock<stdx::mutex> blockedLock(blockedMutex);
auto exec = makeAndStartExecutor<TestOptions>();
- auto guard = MakeGuard([&] {
+ auto guard = makeGuard([&] {
if (blockedLock)
blockedLock.unlock();
ASSERT_OK(exec->shutdown(config->workerThreadRunTime() * 2));
@@ -213,7 +213,7 @@ TEST_F(ServiceExecutorAdaptiveFixture, TestStuckThreads) {
stdx::unique_lock<stdx::mutex> blockedLock(blockedMutex);
auto exec = makeAndStartExecutor<TestOptions>();
- auto guard = MakeGuard([&] {
+ auto guard = makeGuard([&] {
if (blockedLock)
blockedLock.unlock();
ASSERT_OK(exec->shutdown(config->workerThreadRunTime() * 2));
@@ -264,7 +264,7 @@ TEST_F(ServiceExecutorAdaptiveFixture, TestStarvation) {
// Mutex so we don't attempt to call schedule and shutdown concurrently
stdx::mutex scheduleMutex;
- auto guard = MakeGuard([&] { ASSERT_OK(exec->shutdown(config->workerThreadRunTime() * 2)); });
+ auto guard = makeGuard([&] { ASSERT_OK(exec->shutdown(config->workerThreadRunTime() * 2)); });
bool scheduleNew{true};
@@ -316,7 +316,7 @@ TEST_F(ServiceExecutorAdaptiveFixture, TestRecursion) {
stdx::condition_variable cv;
stdx::function<void()> task;
- auto guard = MakeGuard([&] { ASSERT_OK(exec->shutdown(config->workerThreadRunTime() * 2)); });
+ auto guard = makeGuard([&] { ASSERT_OK(exec->shutdown(config->workerThreadRunTime() * 2)); });
task = [this, &task, &exec, &mutex, &cv, &remainingTasks] {
if (remainingTasks.subtractAndFetch(1) == 0) {
@@ -358,7 +358,7 @@ TEST_F(ServiceExecutorAdaptiveFixture, TestDeferredTasks) {
stdx::unique_lock<stdx::mutex> blockedLock(blockedMutex);
auto exec = makeAndStartExecutor<TestOptions>();
- auto guard = MakeGuard([&] {
+ auto guard = makeGuard([&] {
if (blockedLock)
blockedLock.unlock();
ASSERT_OK(exec->shutdown(config->workerThreadRunTime() * 2));
diff --git a/src/mongo/transport/service_executor_reserved.cpp b/src/mongo/transport/service_executor_reserved.cpp
index 93409ba9e26..beeb7618bd0 100644
--- a/src/mongo/transport/service_executor_reserved.cpp
+++ b/src/mongo/transport/service_executor_reserved.cpp
@@ -88,7 +88,7 @@ Status ServiceExecutorReserved::_startWorker() {
return launchServiceWorkerThread([this] {
stdx::unique_lock<stdx::mutex> lk(_mutex);
_numRunningWorkerThreads.addAndFetch(1);
- auto numRunningGuard = MakeGuard([&] {
+ auto numRunningGuard = makeGuard([&] {
_numRunningWorkerThreads.subtractAndFetch(1);
_shutdownCondition.notify_one();
});
diff --git a/src/mongo/transport/service_executor_test.cpp b/src/mongo/transport/service_executor_test.cpp
index f6c66ec98b7..9d2d28d12c1 100644
--- a/src/mongo/transport/service_executor_test.cpp
+++ b/src/mongo/transport/service_executor_test.cpp
@@ -198,7 +198,7 @@ void scheduleBasicTask(ServiceExecutor* exec, bool expectSuccess) {
TEST_F(ServiceExecutorAdaptiveFixture, BasicTaskRuns) {
ASSERT_OK(executor->start());
- auto guard = MakeGuard([this] { ASSERT_OK(executor->shutdown(kShutdownTime)); });
+ auto guard = makeGuard([this] { ASSERT_OK(executor->shutdown(kShutdownTime)); });
scheduleBasicTask(executor.get(), true);
}
@@ -209,7 +209,7 @@ TEST_F(ServiceExecutorAdaptiveFixture, ScheduleFailsBeforeStartup) {
TEST_F(ServiceExecutorSynchronousFixture, BasicTaskRuns) {
ASSERT_OK(executor->start());
- auto guard = MakeGuard([this] { ASSERT_OK(executor->shutdown(kShutdownTime)); });
+ auto guard = makeGuard([this] { ASSERT_OK(executor->shutdown(kShutdownTime)); });
scheduleBasicTask(executor.get(), true);
}
diff --git a/src/mongo/transport/transport_layer_asio_integration_test.cpp b/src/mongo/transport/transport_layer_asio_integration_test.cpp
index 39edbea0be8..00b73b246db 100644
--- a/src/mongo/transport/transport_layer_asio_integration_test.cpp
+++ b/src/mongo/transport/transport_layer_asio_integration_test.cpp
@@ -106,7 +106,7 @@ TEST(TransportLayerASIO, ShortReadsAndWritesWork) {
auto reactor = sc->getTransportLayer()->getReactor(transport::TransportLayer::kNewReactor);
stdx::thread thread([&] { reactor->run(); });
- const auto threadGuard = MakeGuard([&] {
+ const auto threadGuard = makeGuard([&] {
reactor->stop();
thread.join();
});
@@ -129,7 +129,7 @@ TEST(TransportLayerASIO, ShortReadsAndWritesWork) {
if (auto baton = sc->getTransportLayer()->makeBaton(opCtx.get())) {
auto future = handle->runCommandRequest(ecr, baton);
- const auto batonGuard = MakeGuard([&] { baton->detach(); });
+ const auto batonGuard = makeGuard([&] { baton->detach(); });
future.get(opCtx.get());
}
@@ -144,7 +144,7 @@ TEST(TransportLayerASIO, asyncConnectTimeoutCleansUpSocket) {
stdx::thread thread([&] { reactor->run(); });
- const auto threadGuard = MakeGuard([&] {
+ const auto threadGuard = makeGuard([&] {
reactor->stop();
thread.join();
});
diff --git a/src/mongo/unittest/unittest.cpp b/src/mongo/unittest/unittest.cpp
index eea2d7afac3..ac8c1947f7d 100644
--- a/src/mongo/unittest/unittest.cpp
+++ b/src/mongo/unittest/unittest.cpp
@@ -162,7 +162,7 @@ private:
};
template <typename F>
-inline UnsafeScopeGuard<F> MakeUnsafeScopeGuard(F fun) {
+UnsafeScopeGuard<F> MakeUnsafeScopeGuard(F fun) {
return UnsafeScopeGuard<F>(std::move(fun));
}
diff --git a/src/mongo/util/decoration_registry.h b/src/mongo/util/decoration_registry.h
index 921d8f9b009..6f852b387e2 100644
--- a/src/mongo/util/decoration_registry.h
+++ b/src/mongo/util/decoration_registry.h
@@ -100,7 +100,7 @@ public:
});
};
- auto cleanup = MakeGuard(std::move(cleanupFunction));
+ auto cleanup = makeGuard(std::move(cleanupFunction));
using std::cend;
@@ -108,7 +108,7 @@ public:
iter->constructor(container->getDecoration(iter->descriptor));
}
- cleanup.Dismiss();
+ cleanup.dismiss();
}
/**
diff --git a/src/mongo/util/net/hostname_canonicalization.cpp b/src/mongo/util/net/hostname_canonicalization.cpp
index 9a52454dcb4..ae39f103e79 100644
--- a/src/mongo/util/net/hostname_canonicalization.cpp
+++ b/src/mongo/util/net/hostname_canonicalization.cpp
@@ -95,7 +95,7 @@ std::vector<std::string> getHostFQDNs(std::string hostName, HostnameCanonicaliza
<< getAddrInfoStrError(err);
return results;
}
- const auto guard = MakeGuard(shim_freeaddrinfo);
+ const auto guard = makeGuard(shim_freeaddrinfo);
if (mode == HostnameCanonicalizationMode::kForward) {
results.emplace_back(shim_fromNativeString(info->ai_canonname));
diff --git a/src/mongo/util/net/http_client_winhttp.cpp b/src/mongo/util/net/http_client_winhttp.cpp
index 9155580489c..de095199e2d 100644
--- a/src/mongo/util/net/http_client_winhttp.cpp
+++ b/src/mongo/util/net/http_client_winhttp.cpp
@@ -181,7 +181,7 @@ private:
// Cleanup handled in a guard rather than UniquePtrs to ensure order.
HINTERNET session = nullptr, connect = nullptr, request = nullptr;
- auto guard = MakeGuard([&] {
+ auto guard = makeGuard([&] {
if (request) {
WinHttpCloseHandle(request);
}
diff --git a/src/mongo/util/net/ssl_manager_openssl.cpp b/src/mongo/util/net/ssl_manager_openssl.cpp
index 44521e0115c..1ab1ce6660a 100644
--- a/src/mongo/util/net/ssl_manager_openssl.cpp
+++ b/src/mongo/util/net/ssl_manager_openssl.cpp
@@ -1061,7 +1061,7 @@ bool SSLManagerOpenSSL::_initSynchronousSSLContext(UniqueSSLContext* contextPtr,
unsigned long long SSLManagerOpenSSL::_convertASN1ToMillis(ASN1_TIME* asn1time) {
BIO* outBIO = BIO_new(BIO_s_mem());
int timeError = ASN1_TIME_print(outBIO, asn1time);
- ON_BLOCK_EXIT(BIO_free, outBIO);
+ ON_BLOCK_EXIT([&] { BIO_free(outBIO); });
if (timeError <= 0) {
error() << "ASN1_TIME_print failed or wrote no data.";
@@ -1104,7 +1104,7 @@ bool SSLManagerOpenSSL::_parseAndValidateCertificate(const std::string& keyFile,
return false;
}
- ON_BLOCK_EXIT(BIO_free, inBIO);
+ ON_BLOCK_EXIT([&] { BIO_free(inBIO); });
if (BIO_read_filename(inBIO, keyFile.c_str()) <= 0) {
error() << "cannot read key file when setting subject name: " << keyFile << ' '
<< getSSLErrorMessage(ERR_get_error());
@@ -1118,7 +1118,7 @@ bool SSLManagerOpenSSL::_parseAndValidateCertificate(const std::string& keyFile,
<< getSSLErrorMessage(ERR_get_error());
return false;
}
- ON_BLOCK_EXIT(X509_free, x509);
+ ON_BLOCK_EXIT([&] { X509_free(x509); });
*subjectName = getCertificateSubjectX509Name(x509);
if (serverCertificateExpirationDate != NULL) {
@@ -1159,7 +1159,7 @@ bool SSLManagerOpenSSL::_setupPEM(SSL_CTX* context,
error() << "failed to allocate BIO object: " << getSSLErrorMessage(ERR_get_error());
return false;
}
- const auto bioGuard = MakeGuard([&inBio]() { BIO_free(inBio); });
+ const auto bioGuard = makeGuard([&inBio]() { BIO_free(inBio); });
if (BIO_read_filename(inBio, keyFile.c_str()) <= 0) {
error() << "cannot read PEM key file: " << keyFile << ' '
@@ -1176,7 +1176,7 @@ bool SSLManagerOpenSSL::_setupPEM(SSL_CTX* context,
<< getSSLErrorMessage(ERR_get_error());
return false;
}
- const auto privateKeyGuard = MakeGuard([&privateKey]() { EVP_PKEY_free(privateKey); });
+ const auto privateKeyGuard = makeGuard([&privateKey]() { EVP_PKEY_free(privateKey); });
if (SSL_CTX_use_PrivateKey(context, privateKey) != 1) {
error() << "cannot use PEM key file: " << keyFile << ' '
@@ -1243,7 +1243,7 @@ Status importCertStoreToX509_STORE(const wchar_t* storeName,
return {ErrorCodes::InvalidSSLConfiguration,
str::stream() << "error opening system CA store: " << errnoWithDescription()};
}
- auto systemStoreGuard = MakeGuard([systemStore]() { CertCloseStore(systemStore, 0); });
+ auto systemStoreGuard = makeGuard([systemStore]() { CertCloseStore(systemStore, 0); });
PCCERT_CONTEXT certCtx = NULL;
while ((certCtx = CertEnumCertificatesInStore(systemStore, certCtx)) != NULL) {
@@ -1254,7 +1254,7 @@ Status importCertStoreToX509_STORE(const wchar_t* storeName,
str::stream() << "Error parsing X509 object from Windows certificate store"
<< SSLManagerInterface::getSSLErrorMessage(ERR_get_error())};
}
- const auto x509ObjGuard = MakeGuard([&x509Obj]() { X509_free(x509Obj); });
+ const auto x509ObjGuard = makeGuard([&x509Obj]() { X509_free(x509Obj); });
if (X509_STORE_add_cert(verifyStore, x509Obj) != 1) {
auto status = checkX509_STORE_error();
@@ -1539,7 +1539,7 @@ StatusWith<boost::optional<SSLPeerInfo>> SSLManagerOpenSSL::parseAndValidatePeer
return Status(ErrorCodes::SSLHandshakeFailed, msg);
}
}
- ON_BLOCK_EXIT(X509_free, peerCert);
+ ON_BLOCK_EXIT([&] { X509_free(peerCert); });
long result = SSL_get_verify_result(conn);
diff --git a/src/mongo/util/perfctr_collect.cpp b/src/mongo/util/perfctr_collect.cpp
index 6e195d664b2..acf6f3a79e8 100644
--- a/src/mongo/util/perfctr_collect.cpp
+++ b/src/mongo/util/perfctr_collect.cpp
@@ -84,7 +84,7 @@ std::string errnoWithPdhDescription(PDH_STATUS status) {
return str::stream() << "Format message failed with " << gle << " for status " << status;
}
- ScopeGuard errorTextGuard = MakeGuard([errorText]() { LocalFree(errorText); });
+ auto errorTextGuard = makeGuard([errorText] { LocalFree(errorText); });
std::string utf8ErrorText = toUtf8String(errorText);
auto size = utf8ErrorText.find_first_of("\r\n");
diff --git a/src/mongo/util/processinfo_solaris.cpp b/src/mongo/util/processinfo_solaris.cpp
index b115a41a008..779b3b6ac6c 100644
--- a/src/mongo/util/processinfo_solaris.cpp
+++ b/src/mongo/util/processinfo_solaris.cpp
@@ -207,7 +207,7 @@ bool ProcessInfo::checkNumaEnabled() {
return false;
}
- ON_BLOCK_EXIT(lgrp_fini, cookie);
+ ON_BLOCK_EXIT([&] { lgrp_fini(cookie); });
int groups = lgrp_nlgrps(cookie);
diff --git a/src/mongo/util/procparser.cpp b/src/mongo/util/procparser.cpp
index 625b8b7466a..90d148f3219 100644
--- a/src/mongo/util/procparser.cpp
+++ b/src/mongo/util/procparser.cpp
@@ -97,7 +97,7 @@ StatusWith<std::string> readFileAsString(StringData filename) {
str::stream() << "Failed to open file " << filename << " with error: "
<< errnoWithDescription(err));
}
- auto scopedGuard = MakeGuard([fd] { close(fd); });
+ auto scopedGuard = makeGuard([fd] { close(fd); });
BufBuilder builder(kFileBufferSize);
std::array<char, kFileBufferSize> buf;
diff --git a/src/mongo/util/producer_consumer_queue.h b/src/mongo/util/producer_consumer_queue.h
index 44576fd667d..626370489e4 100644
--- a/src/mongo/util/producer_consumer_queue.h
+++ b/src/mongo/util/producer_consumer_queue.h
@@ -814,7 +814,7 @@ private:
_checkProducerClosed(lk);
- const auto guard = MakeGuard([&] { _notifyIfNecessary(lk); });
+ const auto guard = makeGuard([&] { _notifyIfNecessary(lk); });
return cb(lk);
}
@@ -825,7 +825,7 @@ private:
_checkConsumerClosed(lk);
- const auto guard = MakeGuard([&] { _notifyIfNecessary(lk); });
+ const auto guard = makeGuard([&] { _notifyIfNecessary(lk); });
return cb(lk);
}
diff --git a/src/mongo/util/scopeguard.h b/src/mongo/util/scopeguard.h
index f9b9862c858..144b067b031 100644
--- a/src/mongo/util/scopeguard.h
+++ b/src/mongo/util/scopeguard.h
@@ -1,379 +1,76 @@
-////////////////////////////////////////////////////////////////////////////////
-// The Loki Library
-// Copyright (c) 2000 Andrei Alexandrescu
-// Copyright (c) 2000 Petru Marginean
-// Copyright (c) 2005 Joshua Lehrer
-//
-// Permission to use, copy, modify, distribute and sell this software for any
-// purpose is hereby granted without fee, provided that the above copyright
-// notice appear in all copies and that both that copyright notice and this
-// permission notice appear in supporting documentation.
-// The author makes no representations about the
-// suitability of this software for any purpose. It is provided "as is"
-// without express or implied warranty.
-////////////////////////////////////////////////////////////////////////////////
+/**
+ * Copyright (C) 2018-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
#pragma once
-#include <exception>
+#include <type_traits>
#include "mongo/platform/compiler.h"
namespace mongo {
-////////////////////////////////////////////////////////////////////////////////
-/// \class RefToValue
-///
-/// Transports a reference as a value
-/// Serves to implement the Colvin/Gibbons trick for SmartPtr/ScopeGuard
-////////////////////////////////////////////////////////////////////////////////
-
-template <class T>
-class RefToValue {
-public:
- RefToValue(T& ref) : ref_(ref) {}
-
- RefToValue(const RefToValue& rhs) : ref_(rhs.ref_) {}
-
- operator T&() const {
- return ref_;
- }
-
-private:
- // Disable - not implemented
- RefToValue();
- RefToValue& operator=(const RefToValue&);
-
- T& ref_;
-};
-
-
-////////////////////////////////////////////////////////////////////////////////
-/// RefToValue creator.
-////////////////////////////////////////////////////////////////////////////////
-
-template <class T>
-inline RefToValue<T> ByRef(T& t) {
- return RefToValue<T>(t);
-}
-
-
-////////////////////////////////////////////
-/// ScopeGuard
-/*
- Trivial example for use:
-
- FILE* f = fopen("myfile.txt", "w+");
- if (!f)
- return error;
- ON_BLOCK_EXIT(fclose, f);
-
-
- More complicated example:
-
- ScopeGuard guard = MakeGuard(my_rollback_func, myparam);
- ...
- if (successful) {
- guard.Dismiss();
- return;
- }
- // guard is still active here and will fire at scope exit
- ...
-
-
-*/
-
-
-class ScopeGuardImplBase {
- ScopeGuardImplBase& operator=(const ScopeGuardImplBase&);
-
-protected:
- ~ScopeGuardImplBase() {}
-
- ScopeGuardImplBase(const ScopeGuardImplBase& other) throw() : dismissed_(other.dismissed_) {
- other.Dismiss();
- }
-
- template <typename J>
- static void SafeExecute(J& j) throw() {
- if (!j.dismissed_)
- try {
- j.Execute();
- } catch (...) {
- std::terminate();
- }
- }
-
- mutable bool dismissed_;
-
-public:
- ScopeGuardImplBase() throw() : dismissed_(false) {}
-
- void Dismiss() const throw() {
- dismissed_ = true;
- }
-};
-
-////////////////////////////////////////////////////////////////
-///
-/// \typedef typedef const ScopeGuardImplBase& ScopeGuard
-///
-/// See Andrei's and Petru Marginean's CUJ article
-/// http://www.cuj.com/documents/s=8000/cujcexp1812alexandr/alexandr.htm
-///
-/// Changes to the original code by Joshua Lehrer:
-/// http://www.lehrerfamily.com/scopeguard.html
-////////////////////////////////////////////////////////////////
-
-typedef const ScopeGuardImplBase& ScopeGuard;
-
template <typename F>
-class ScopeGuardImpl0 : public ScopeGuardImplBase {
-public:
- static ScopeGuardImpl0<F> MakeGuard(F fun) {
- return ScopeGuardImpl0<F>(fun);
- }
-
- ~ScopeGuardImpl0() throw() {
- SafeExecute(*this);
- }
-
- void Execute() {
- fun_();
- }
-
-protected:
- ScopeGuardImpl0(F fun) : fun_(fun) {}
-
- F fun_;
-};
-
-template <typename F>
-inline ScopeGuardImpl0<F> MakeGuard(F fun) {
- return ScopeGuardImpl0<F>::MakeGuard(fun);
-}
-
-template <typename F, typename P1>
-class ScopeGuardImpl1 : public ScopeGuardImplBase {
-public:
- static ScopeGuardImpl1<F, P1> MakeGuard(F fun, P1 p1) {
- return ScopeGuardImpl1<F, P1>(fun, p1);
- }
-
- ~ScopeGuardImpl1() throw() {
- SafeExecute(*this);
- }
-
- void Execute() {
- fun_(p1_);
- }
-
-protected:
- ScopeGuardImpl1(F fun, P1 p1) : fun_(fun), p1_(p1) {}
-
- F fun_;
- const P1 p1_;
-};
-
-template <typename F, typename P1>
-inline ScopeGuardImpl1<F, P1> MakeGuard(F fun, P1 p1) {
- return ScopeGuardImpl1<F, P1>::MakeGuard(fun, p1);
-}
-
-template <typename F, typename P1, typename P2>
-class ScopeGuardImpl2 : public ScopeGuardImplBase {
+class ScopeGuard {
public:
- static ScopeGuardImpl2<F, P1, P2> MakeGuard(F fun, P1 p1, P2 p2) {
- return ScopeGuardImpl2<F, P1, P2>(fun, p1, p2);
- }
-
- ~ScopeGuardImpl2() throw() {
- SafeExecute(*this);
- }
-
- void Execute() {
- fun_(p1_, p2_);
- }
-
-protected:
- ScopeGuardImpl2(F fun, P1 p1, P2 p2) : fun_(fun), p1_(p1), p2_(p2) {}
-
- F fun_;
- const P1 p1_;
- const P2 p2_;
-};
-
-template <typename F, typename P1, typename P2>
-inline ScopeGuardImpl2<F, P1, P2> MakeGuard(F fun, P1 p1, P2 p2) {
- return ScopeGuardImpl2<F, P1, P2>::MakeGuard(fun, p1, p2);
-}
-
-template <typename F, typename P1, typename P2, typename P3>
-class ScopeGuardImpl3 : public ScopeGuardImplBase {
-public:
- static ScopeGuardImpl3<F, P1, P2, P3> MakeGuard(F fun, P1 p1, P2 p2, P3 p3) {
- return ScopeGuardImpl3<F, P1, P2, P3>(fun, p1, p2, p3);
- }
-
- ~ScopeGuardImpl3() throw() {
- SafeExecute(*this);
- }
-
- void Execute() {
- fun_(p1_, p2_, p3_);
- }
-
-protected:
- ScopeGuardImpl3(F fun, P1 p1, P2 p2, P3 p3) : fun_(fun), p1_(p1), p2_(p2), p3_(p3) {}
-
- F fun_;
- const P1 p1_;
- const P2 p2_;
- const P3 p3_;
-};
+ explicit ScopeGuard(const F& f) : _func(f) {}
+ explicit ScopeGuard(F&& f) : _func(std::move(f)) {}
-template <typename F, typename P1, typename P2, typename P3>
-inline ScopeGuardImpl3<F, P1, P2, P3> MakeGuard(F fun, P1 p1, P2 p2, P3 p3) {
- return ScopeGuardImpl3<F, P1, P2, P3>::MakeGuard(fun, p1, p2, p3);
-}
-
-//************************************************************
-
-template <class Obj, typename MemFun>
-class ObjScopeGuardImpl0 : public ScopeGuardImplBase {
-public:
- static ObjScopeGuardImpl0<Obj, MemFun> MakeObjGuard(Obj& obj, MemFun memFun) {
- return ObjScopeGuardImpl0<Obj, MemFun>(obj, memFun);
+ // Can only be move-constructed, to support being returned from a function.
+ ScopeGuard(const ScopeGuard&) = delete;
+ ScopeGuard(ScopeGuard&& o) : _func(std::move(o._func)), _dismissed(o._dismissed) {
+ o._dismissed = true;
}
+ ScopeGuard& operator=(const ScopeGuard&) = delete;
+ ScopeGuard& operator=(ScopeGuard&&) = delete;
- ~ObjScopeGuardImpl0() throw() {
- SafeExecute(*this);
+ ~ScopeGuard() {
+ if (_dismissed)
+ return;
+ try {
+ _func();
+ } catch (...) {
+ std::terminate();
+ }
}
- void Execute() {
- (obj_.*memFun_)();
+ void dismiss() noexcept {
+ _dismissed = true;
}
-protected:
- ObjScopeGuardImpl0(Obj& obj, MemFun memFun) : obj_(obj), memFun_(memFun) {}
-
- Obj& obj_;
- MemFun memFun_;
-};
-
-template <class Obj, typename MemFun>
-inline ObjScopeGuardImpl0<Obj, MemFun> MakeObjGuard(Obj& obj, MemFun memFun) {
- return ObjScopeGuardImpl0<Obj, MemFun>::MakeObjGuard(obj, memFun);
-}
-
-template <typename Ret, class Obj1, class Obj2>
-inline ObjScopeGuardImpl0<Obj1, Ret (Obj2::*)()> MakeGuard(Ret (Obj2::*memFun)(), Obj1& obj) {
- return ObjScopeGuardImpl0<Obj1, Ret (Obj2::*)()>::MakeObjGuard(obj, memFun);
-}
-
-template <typename Ret, class Obj1, class Obj2>
-inline ObjScopeGuardImpl0<Obj1, Ret (Obj2::*)()> MakeGuard(Ret (Obj2::*memFun)(), Obj1* obj) {
- return ObjScopeGuardImpl0<Obj1, Ret (Obj2::*)()>::MakeObjGuard(*obj, memFun);
-}
-
-template <class Obj, typename MemFun, typename P1>
-class ObjScopeGuardImpl1 : public ScopeGuardImplBase {
-public:
- static ObjScopeGuardImpl1<Obj, MemFun, P1> MakeObjGuard(Obj& obj, MemFun memFun, P1 p1) {
- return ObjScopeGuardImpl1<Obj, MemFun, P1>(obj, memFun, p1);
- }
-
- ~ObjScopeGuardImpl1() throw() {
- SafeExecute(*this);
- }
-
- void Execute() {
- (obj_.*memFun_)(p1_);
- }
-
-protected:
- ObjScopeGuardImpl1(Obj& obj, MemFun memFun, P1 p1) : obj_(obj), memFun_(memFun), p1_(p1) {}
-
- Obj& obj_;
- MemFun memFun_;
- const P1 p1_;
-};
-
-template <class Obj, typename MemFun, typename P1>
-inline ObjScopeGuardImpl1<Obj, MemFun, P1> MakeObjGuard(Obj& obj, MemFun memFun, P1 p1) {
- return ObjScopeGuardImpl1<Obj, MemFun, P1>::MakeObjGuard(obj, memFun, p1);
-}
-
-template <typename Ret, class Obj1, class Obj2, typename P1a, typename P1b>
-inline ObjScopeGuardImpl1<Obj1, Ret (Obj2::*)(P1a), P1b> MakeGuard(Ret (Obj2::*memFun)(P1a),
- Obj1& obj,
- P1b p1) {
- return ObjScopeGuardImpl1<Obj1, Ret (Obj2::*)(P1a), P1b>::MakeObjGuard(obj, memFun, p1);
-}
-
-template <typename Ret, class Obj1, class Obj2, typename P1a, typename P1b>
-inline ObjScopeGuardImpl1<Obj1, Ret (Obj2::*)(P1a), P1b> MakeGuard(Ret (Obj2::*memFun)(P1a),
- Obj1* obj,
- P1b p1) {
- return ObjScopeGuardImpl1<Obj1, Ret (Obj2::*)(P1a), P1b>::MakeObjGuard(*obj, memFun, p1);
-}
-
-template <class Obj, typename MemFun, typename P1, typename P2>
-class ObjScopeGuardImpl2 : public ScopeGuardImplBase {
-public:
- static ObjScopeGuardImpl2<Obj, MemFun, P1, P2> MakeObjGuard(Obj& obj,
- MemFun memFun,
- P1 p1,
- P2 p2) {
- return ObjScopeGuardImpl2<Obj, MemFun, P1, P2>(obj, memFun, p1, p2);
- }
-
- ~ObjScopeGuardImpl2() throw() {
- SafeExecute(*this);
- }
-
- void Execute() {
- (obj_.*memFun_)(p1_, p2_);
- }
-
-protected:
- ObjScopeGuardImpl2(Obj& obj, MemFun memFun, P1 p1, P2 p2)
- : obj_(obj), memFun_(memFun), p1_(p1), p2_(p2) {}
-
- Obj& obj_;
- MemFun memFun_;
- const P1 p1_;
- const P2 p2_;
+private:
+ F _func;
+ bool _dismissed = false;
};
-template <class Obj, typename MemFun, typename P1, typename P2>
-inline ObjScopeGuardImpl2<Obj, MemFun, P1, P2> MakeObjGuard(Obj& obj, MemFun memFun, P1 p1, P2 p2) {
- return ObjScopeGuardImpl2<Obj, MemFun, P1, P2>::MakeObjGuard(obj, memFun, p1, p2);
-}
-
-template <typename Ret,
- class Obj1,
- class Obj2,
- typename P1a,
- typename P1b,
- typename P2a,
- typename P2b>
-inline ObjScopeGuardImpl2<Obj1, Ret (Obj2::*)(P1a, P2a), P1b, P2b> MakeGuard(
- Ret (Obj2::*memFun)(P1a, P2a), Obj1& obj, P1b p1, P2b p2) {
- return ObjScopeGuardImpl2<Obj1, Ret (Obj2::*)(P1a, P2a), P1b, P2b>::MakeObjGuard(
- obj, memFun, p1, p2);
-}
-
-template <typename Ret,
- class Obj1,
- class Obj2,
- typename P1a,
- typename P1b,
- typename P2a,
- typename P2b>
-inline ObjScopeGuardImpl2<Obj1, Ret (Obj2::*)(P1a, P2a), P1b, P2b> MakeGuard(
- Ret (Obj2::*memFun)(P1a, P2a), Obj1* obj, P1b p1, P2b p2) {
- return ObjScopeGuardImpl2<Obj1, Ret (Obj2::*)(P1a, P2a), P1b, P2b>::MakeObjGuard(
- *obj, memFun, p1, p2);
+template <typename F>
+auto makeGuard(F&& fun) {
+ return ScopeGuard<std::decay_t<F>>(std::forward<F>(fun));
}
} // namespace mongo
@@ -382,7 +79,4 @@ inline ObjScopeGuardImpl2<Obj1, Ret (Obj2::*)(P1a, P2a), P1b, P2b> MakeGuard(
#define MONGO_SCOPEGUARD_CAT(s1, s2) MONGO_SCOPEGUARD_CAT2(s1, s2)
#define MONGO_SCOPEGUARD_ANON(str) MONGO_SCOPEGUARD_CAT(str, __LINE__)
-#define ON_BLOCK_EXIT \
- MONGO_COMPILER_VARIABLE_UNUSED ScopeGuard MONGO_SCOPEGUARD_ANON(scopeGuard) = MakeGuard
-#define ON_BLOCK_EXIT_OBJ \
- MONGO_COMPILER_VARIABLE_UNUSED ScopeGuard MONGO_SCOPEGUARD_ANON(scopeGuard) = MakeObjGuard
+#define ON_BLOCK_EXIT(...) auto MONGO_SCOPEGUARD_ANON(onBlockExit) = makeGuard(__VA_ARGS__)
diff --git a/src/mongo/util/signal_handlers.cpp b/src/mongo/util/signal_handlers.cpp
index 7bf957c9e96..53d33787b05 100644
--- a/src/mongo/util/signal_handlers.cpp
+++ b/src/mongo/util/signal_handlers.cpp
@@ -138,7 +138,7 @@ void eventProcessingThread() {
return;
}
- ON_BLOCK_EXIT(CloseHandle, event);
+ ON_BLOCK_EXIT([&] { CloseHandle(event); });
int returnCode = WaitForSingleObject(event, INFINITE);
if (returnCode != WAIT_OBJECT_0) {