summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJuan Gu <juan.gu@mongodb.com>2023-05-05 17:59:44 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2023-05-17 22:34:33 +0000
commit02bf3f55f88874bf397b42758c9cd36093633f9e (patch)
tree3964271e581d4b735252c2462e9a2bdf32df47f6
parent41c95363ac628dd0a89fb18c369b0fdef42496e2 (diff)
downloadmongo-02bf3f55f88874bf397b42758c9cd36093633f9e.tar.gz
SERVER-60173 Ban implicit capture of `this` via `[=]` in lambda expressions
-rw-r--r--SConstruct9
-rw-r--r--src/mongo/client/sdam/topology_state_machine.cpp2
-rw-r--r--src/mongo/db/catalog/multi_index_block.cpp4
-rw-r--r--src/mongo/db/index/index_build_interceptor.cpp11
-rw-r--r--src/mongo/db/operation_context_test.cpp2
-rw-r--r--src/mongo/db/process_health/fault_manager.cpp2
-rw-r--r--src/mongo/db/process_health/fault_manager_test_suite.h2
-rw-r--r--src/mongo/db/repl/SConscript9
-rw-r--r--src/mongo/db/repl/collection_cloner.cpp6
-rw-r--r--src/mongo/db/repl/initial_syncer.cpp114
-rw-r--r--src/mongo/db/repl/multiapplier.cpp2
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp33
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp4
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp56
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_test.cpp2
-rw-r--r--src/mongo/db/repl/reporter.cpp4
-rw-r--r--src/mongo/db/repl/sync_source_resolver.cpp14
-rw-r--r--src/mongo/db/repl/tenant_file_cloner.cpp7
-rw-r--r--src/mongo/db/repl/tenant_oplog_applier.cpp25
-rw-r--r--src/mongo/db/s/config/set_cluster_parameter_coordinator.h2
-rw-r--r--src/mongo/db/s/config/set_user_write_block_mode_coordinator.h2
-rw-r--r--src/mongo/db/s/metrics/sharding_data_transform_metrics_test_fixture.h2
-rw-r--r--src/mongo/db/s/rename_collection_participant_service.h2
-rw-r--r--src/mongo/db/s/session_catalog_migration_destination.cpp2
-rw-r--r--src/mongo/db/s/shard_server_catalog_cache_loader.cpp2
-rw-r--r--src/mongo/db/s/sharding_ddl_coordinator.h2
-rw-r--r--src/mongo/db/session/sessions_collection_mock.cpp8
-rw-r--r--src/mongo/dbtests/threadedtests.cpp4
-rw-r--r--src/mongo/embedded/stitch_support/stitch_support_test.cpp4
-rw-r--r--src/mongo/executor/mock_async_rpc.h2
-rw-r--r--src/mongo/executor/task_executor_test_common.cpp8
-rw-r--r--src/mongo/scripting/SConscript12
-rw-r--r--src/mongo/shell/SConscript3
-rw-r--r--src/mongo/transport/asio/asio_transport_layer.cpp7
-rw-r--r--src/mongo/transport/session_workflow_test.cpp4
-rw-r--r--src/third_party/mozjs/SConscript13
36 files changed, 204 insertions, 183 deletions
diff --git a/SConstruct b/SConstruct
index aa246316f39..f9ab97fd905 100644
--- a/SConstruct
+++ b/SConstruct
@@ -3716,15 +3716,6 @@ def doConfigure(myenv):
# only) flag that turns it on.
myenv.AddToCXXFLAGSIfSupported("-Wunused-exception-parameter")
- # TODO(SERVER-60151): Avoid the dilemma identified in
- # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=100493. Unfortunately,
- # we don't have a more targeted warning suppression we can use
- # other than disabling all deprecation warnings. We will
- # revisit this once we are fully on C++20 and can commit the
- # C++20 style code.
- if get_option('cxx-std') == "20":
- myenv.AddToCXXFLAGSIfSupported('-Wno-deprecated')
-
# TODO SERVER-58675 - Remove this suppression after abseil is upgraded
myenv.AddToCXXFLAGSIfSupported("-Wno-deprecated-builtins")
diff --git a/src/mongo/client/sdam/topology_state_machine.cpp b/src/mongo/client/sdam/topology_state_machine.cpp
index 6cdff657f82..237ae887635 100644
--- a/src/mongo/client/sdam/topology_state_machine.cpp
+++ b/src/mongo/client/sdam/topology_state_machine.cpp
@@ -61,7 +61,7 @@ inline int idx(T enumType) {
*/
void mongo::sdam::TopologyStateMachine::initTransitionTable() {
auto bindThis = [&](auto&& pmf) {
- return [=](auto&&... a) {
+ return [=, this](auto&&... a) {
(this->*pmf)(a...);
};
};
diff --git a/src/mongo/db/catalog/multi_index_block.cpp b/src/mongo/db/catalog/multi_index_block.cpp
index 5d72129db49..4c6d796c613 100644
--- a/src/mongo/db/catalog/multi_index_block.cpp
+++ b/src/mongo/db/catalog/multi_index_block.cpp
@@ -865,7 +865,7 @@ Status MultiIndexBlock::dumpInsertsFromBulk(
collection,
dupsAllowed,
kYieldIterations,
- [=](const KeyString::Value& duplicateKey) {
+ [=, this](const KeyString::Value& duplicateKey) {
// Do not record duplicates when explicitly ignored. This may be the case on
// secondaries.
return writeConflictRetry(
@@ -1226,7 +1226,7 @@ Status MultiIndexBlock::_failPointHangDuringBuild(OperationContext* opCtx,
unsigned long long iteration) const {
try {
fp->executeIf(
- [=, &doc](const BSONObj& data) {
+ [=, this, &doc](const BSONObj& data) {
LOGV2(20386,
"Hanging index build during collection scan phase",
"where"_attr = where,
diff --git a/src/mongo/db/index/index_build_interceptor.cpp b/src/mongo/db/index/index_build_interceptor.cpp
index a8704fd9239..436ace5c29b 100644
--- a/src/mongo/db/index/index_build_interceptor.cpp
+++ b/src/mongo/db/index/index_build_interceptor.cpp
@@ -307,10 +307,11 @@ Status IndexBuildInterceptor::_applyWrite(OperationContext* opCtx,
// Sorted index types may choose to disallow duplicates (enforcing an unique index). Columnar
// indexes are not sorted and therefore cannot enforce uniqueness constraints. Only sorted
// indexes will use this lambda passed through the IndexAccessMethod interface.
- IndexAccessMethod::KeyHandlerFn onDuplicateKeyFn = [=](const KeyString::Value& duplicateKey) {
- return trackDups == TrackDuplicates::kTrack ? recordDuplicateKey(opCtx, duplicateKey)
- : Status::OK();
- };
+ IndexAccessMethod::KeyHandlerFn onDuplicateKeyFn =
+ [=, this](const KeyString::Value& duplicateKey) {
+ return trackDups == TrackDuplicates::kTrack ? recordDuplicateKey(opCtx, duplicateKey)
+ : Status::OK();
+ };
return _indexCatalogEntry->accessMethod()->applyIndexBuildSideWrite(
opCtx, coll, operation, options, std::move(onDuplicateKeyFn), keysInserted, keysDeleted);
@@ -549,7 +550,7 @@ void IndexBuildInterceptor::_checkDrainPhaseFailPoint(OperationContext* opCtx,
FailPoint* fp,
long long iteration) const {
fp->executeIf(
- [=](const BSONObj& data) {
+ [=, this](const BSONObj& data) {
LOGV2(4841800,
"Hanging index build during drain writes phase",
"iteration"_attr = iteration,
diff --git a/src/mongo/db/operation_context_test.cpp b/src/mongo/db/operation_context_test.cpp
index 11205ad7a19..01b8c559d7a 100644
--- a/src/mongo/db/operation_context_test.cpp
+++ b/src/mongo/db/operation_context_test.cpp
@@ -748,7 +748,7 @@ public:
boost::optional<Date_t> maxTime,
WaitFn waitFn) {
auto barrier = std::make_shared<unittest::Barrier>(2);
- task = stdx::packaged_task<bool()>([=] {
+ task = stdx::packaged_task<bool()>([=, this] {
if (maxTime)
opCtx->setDeadlineByDate(*maxTime, ErrorCodes::ExceededTimeLimit);
stdx::unique_lock<Latch> lk(mutex);
diff --git a/src/mongo/db/process_health/fault_manager.cpp b/src/mongo/db/process_health/fault_manager.cpp
index 26872eed400..c2d508df311 100644
--- a/src/mongo/db/process_health/fault_manager.cpp
+++ b/src/mongo/db/process_health/fault_manager.cpp
@@ -223,7 +223,7 @@ void FaultManager::setupStateMachine() {
});
auto bindThis = [&](auto&& pmf) {
- return [=](auto&&... a) {
+ return [=, this](auto&&... a) {
return (this->*pmf)(a...);
};
};
diff --git a/src/mongo/db/process_health/fault_manager_test_suite.h b/src/mongo/db/process_health/fault_manager_test_suite.h
index 224a14a3553..1fc93f79e00 100644
--- a/src/mongo/db/process_health/fault_manager_test_suite.h
+++ b/src/mongo/db/process_health/fault_manager_test_suite.h
@@ -283,7 +283,7 @@ public:
}
void waitForTransitionIntoState(FaultState state) {
- assertSoon([=]() { return manager().getFaultState() == state; });
+ assertSoon([=, this]() { return manager().getFaultState() == state; });
}
private:
diff --git a/src/mongo/db/repl/SConscript b/src/mongo/db/repl/SConscript
index 302dd4186d2..f313f2942fa 100644
--- a/src/mongo/db/repl/SConscript
+++ b/src/mongo/db/repl/SConscript
@@ -2031,8 +2031,13 @@ env.Library(
'hello_command',
],
)
-
-env.Library(
+wait_for_majority_service_env = env.Clone()
+# TODO(SERVER-77205): Review and Possibly Remove '-Wno-deprecated' After Mozjs Update
+wait_for_majority_service_env.Append(
+ CXXFLAGS=[] if wait_for_majority_service_env.TargetOSIs('windows') else [
+ '-Wno-deprecated',
+ ], )
+wait_for_majority_service_env.Library(
target='wait_for_majority_service',
source=[
'wait_for_majority_service.cpp',
diff --git a/src/mongo/db/repl/collection_cloner.cpp b/src/mongo/db/repl/collection_cloner.cpp
index 1680c6d91fc..a3358a67a92 100644
--- a/src/mongo/db/repl/collection_cloner.cpp
+++ b/src/mongo/db/repl/collection_cloner.cpp
@@ -411,8 +411,10 @@ void CollectionCloner::handleNextBatch(DBClientCursor& cursor) {
}
// Schedule the next document batch insertion.
- auto&& scheduleResult = _scheduleDbWorkFn(
- [=](const executor::TaskExecutor::CallbackArgs& cbd) { insertDocumentsCallback(cbd); });
+ auto&& scheduleResult =
+ _scheduleDbWorkFn([=, this](const executor::TaskExecutor::CallbackArgs& cbd) {
+ insertDocumentsCallback(cbd);
+ });
if (!scheduleResult.isOK()) {
Status newStatus = scheduleResult.getStatus().withContext(
diff --git a/src/mongo/db/repl/initial_syncer.cpp b/src/mongo/db/repl/initial_syncer.cpp
index cd4bba6c986..21d12d6ecb9 100644
--- a/src/mongo/db/repl/initial_syncer.cpp
+++ b/src/mongo/db/repl/initial_syncer.cpp
@@ -284,7 +284,7 @@ Status InitialSyncer::startup(OperationContext* opCtx,
_clonerAttemptExec = std::make_unique<executor::ScopedTaskExecutor>(
_clonerExec, Status(ErrorCodes::CallbackCanceled, "Initial Sync Attempt Canceled"));
auto status = _scheduleWorkAndSaveHandle_inlock(
- [=](const executor::TaskExecutor::CallbackArgs& args) {
+ [=, this](const executor::TaskExecutor::CallbackArgs& args) {
_startInitialSyncAttemptCallback(args, initialSyncAttempt, initialSyncMaxAttempts);
},
&_startInitialSyncAttemptHandle,
@@ -681,7 +681,7 @@ void InitialSyncer::_startInitialSyncAttemptCallback(
// _scheduleWorkAndSaveHandle_inlock() is shutdown-aware.
status = _scheduleWorkAndSaveHandle_inlock(
- [=](const executor::TaskExecutor::CallbackArgs& args) {
+ [=, this](const executor::TaskExecutor::CallbackArgs& args) {
_chooseSyncSourceCallback(
args, chooseSyncSourceAttempt, chooseSyncSourceMaxAttempts, onCompletionGuard);
},
@@ -745,7 +745,7 @@ void InitialSyncer::_chooseSyncSourceCallback(
"numInitialSyncConnectAttempts"_attr = numInitialSyncConnectAttempts.load());
auto status = _scheduleWorkAtAndSaveHandle_inlock(
when,
- [=](const executor::TaskExecutor::CallbackArgs& args) {
+ [=, this](const executor::TaskExecutor::CallbackArgs& args) {
_chooseSyncSourceCallback(args,
chooseSyncSourceAttempt + 1,
chooseSyncSourceMaxAttempts,
@@ -786,7 +786,7 @@ void InitialSyncer::_chooseSyncSourceCallback(
// Schedule rollback ID checker.
_rollbackChecker = std::make_unique<RollbackChecker>(*_attemptExec, _syncSource);
- auto scheduleResult = _rollbackChecker->reset([=](const RollbackChecker::Result& result) {
+ auto scheduleResult = _rollbackChecker->reset([=, this](const RollbackChecker::Result& result) {
return _rollbackCheckerResetCallback(result, onCompletionGuard);
});
status = scheduleResult.getStatus();
@@ -868,9 +868,9 @@ void InitialSyncer::_rollbackCheckerResetCallback(
// which retries up to 'numInitialSyncOplogFindAttempts' times'. This will fail relatively
// quickly in the presence of network errors, allowing us to choose a different sync source.
status = _scheduleLastOplogEntryFetcher_inlock(
- [=](const StatusWith<mongo::Fetcher::QueryResponse>& response,
- mongo::Fetcher::NextAction*,
- mongo::BSONObjBuilder*) mutable {
+ [=, this](const StatusWith<mongo::Fetcher::QueryResponse>& response,
+ mongo::Fetcher::NextAction*,
+ mongo::BSONObjBuilder*) mutable {
_lastOplogEntryFetcherCallbackForDefaultBeginFetchingOpTime(response,
onCompletionGuard);
},
@@ -947,9 +947,9 @@ Status InitialSyncer::_scheduleGetBeginFetchingOpTime_inlock(
_syncSource,
NamespaceString::kSessionTransactionsTableNamespace.db().toString(),
cmd.obj(),
- [=](const StatusWith<mongo::Fetcher::QueryResponse>& response,
- mongo::Fetcher::NextAction*,
- mongo::BSONObjBuilder*) mutable {
+ [=, this](const StatusWith<mongo::Fetcher::QueryResponse>& response,
+ mongo::Fetcher::NextAction*,
+ mongo::BSONObjBuilder*) mutable {
_getBeginFetchingOpTimeCallback(
response, onCompletionGuard, defaultBeginFetchingOpTime);
},
@@ -1019,9 +1019,9 @@ void InitialSyncer::_getBeginFetchingOpTimeCallback(
// which retries up to 'numInitialSyncOplogFindAttempts' times'. This will fail relatively
// quickly in the presence of network errors, allowing us to choose a different sync source.
status = _scheduleLastOplogEntryFetcher_inlock(
- [=](const StatusWith<mongo::Fetcher::QueryResponse>& response,
- mongo::Fetcher::NextAction*,
- mongo::BSONObjBuilder*) mutable {
+ [=, this](const StatusWith<mongo::Fetcher::QueryResponse>& response,
+ mongo::Fetcher::NextAction*,
+ mongo::BSONObjBuilder*) mutable {
_lastOplogEntryFetcherCallbackForBeginApplyingTimestamp(
response, onCompletionGuard, beginFetchingOpTime);
},
@@ -1075,9 +1075,9 @@ void InitialSyncer::_lastOplogEntryFetcherCallbackForBeginApplyingTimestamp(
_syncSource,
NamespaceString::kServerConfigurationNamespace.db().toString(),
queryBob.obj(),
- [=](const StatusWith<mongo::Fetcher::QueryResponse>& response,
- mongo::Fetcher::NextAction*,
- mongo::BSONObjBuilder*) mutable {
+ [=, this](const StatusWith<mongo::Fetcher::QueryResponse>& response,
+ mongo::Fetcher::NextAction*,
+ mongo::BSONObjBuilder*) mutable {
_fcvFetcherCallback(response, onCompletionGuard, lastOpTime, beginFetchingOpTime);
},
ReadPreferenceSetting::secondaryPreferredMetadata(),
@@ -1227,12 +1227,12 @@ void InitialSyncer::_fcvFetcherCallback(const StatusWith<Fetcher::QueryResponse>
std::make_unique<OplogFetcherRestartDecisionInitialSyncer>(
_sharedData.get(), _opts.oplogFetcherMaxFetcherRestarts),
_dataReplicatorExternalState.get(),
- [=](OplogFetcher::Documents::const_iterator first,
- OplogFetcher::Documents::const_iterator last,
- const OplogFetcher::DocumentsInfo& info) {
+ [=, this](OplogFetcher::Documents::const_iterator first,
+ OplogFetcher::Documents::const_iterator last,
+ const OplogFetcher::DocumentsInfo& info) {
return _enqueueDocuments(first, last, info);
},
- [=](const Status& s, int rbid) { _oplogFetcherCallback(s, onCompletionGuard); },
+ [=, this](const Status& s, int rbid) { _oplogFetcherCallback(s, onCompletionGuard); },
std::move(oplogFetcherConfig));
LOGV2_DEBUG(21178,
@@ -1385,9 +1385,9 @@ void InitialSyncer::_allDatabaseClonerCallback(
// strategy used when retrieving collection data, and avoids retrieving all the data and then
// throwing it away due to a transient network outage.
status = _scheduleLastOplogEntryFetcher_inlock(
- [=](const StatusWith<mongo::Fetcher::QueryResponse>& status,
- mongo::Fetcher::NextAction*,
- mongo::BSONObjBuilder*) {
+ [=, this](const StatusWith<mongo::Fetcher::QueryResponse>& status,
+ mongo::Fetcher::NextAction*,
+ mongo::BSONObjBuilder*) {
_lastOplogEntryFetcherCallbackForStopTimestamp(status, onCompletionGuard);
},
kInitialSyncerHandlesRetries);
@@ -1409,31 +1409,31 @@ void InitialSyncer::_lastOplogEntryFetcherCallbackForStopTimestamp(
if (_shouldRetryError(lock, status)) {
auto scheduleStatus =
(*_attemptExec)
- ->scheduleWork(
- [this, onCompletionGuard](executor::TaskExecutor::CallbackArgs args) {
- // It is not valid to schedule the retry from within this callback,
- // hence we schedule a lambda to schedule the retry.
- stdx::lock_guard<Latch> lock(_mutex);
- // Since the stopTimestamp is retrieved after we have done all the
- // work of retrieving collection data, we handle retries within this
- // class by retrying for
- // 'initialSyncTransientErrorRetryPeriodSeconds' (default 24 hours).
- // This is the same retry strategy used when retrieving collection
- // data, and avoids retrieving all the data and then throwing it
- // away due to a transient network outage.
- auto status = _scheduleLastOplogEntryFetcher_inlock(
- [=](const StatusWith<mongo::Fetcher::QueryResponse>& status,
- mongo::Fetcher::NextAction*,
- mongo::BSONObjBuilder*) {
- _lastOplogEntryFetcherCallbackForStopTimestamp(
- status, onCompletionGuard);
- },
- kInitialSyncerHandlesRetries);
- if (!status.isOK()) {
- onCompletionGuard->setResultAndCancelRemainingWork_inlock(
- lock, status);
- }
- });
+ ->scheduleWork([this, onCompletionGuard](
+ executor::TaskExecutor::CallbackArgs args) {
+ // It is not valid to schedule the retry from within this callback,
+ // hence we schedule a lambda to schedule the retry.
+ stdx::lock_guard<Latch> lock(_mutex);
+ // Since the stopTimestamp is retrieved after we have done all the
+ // work of retrieving collection data, we handle retries within this
+ // class by retrying for
+ // 'initialSyncTransientErrorRetryPeriodSeconds' (default 24 hours).
+ // This is the same retry strategy used when retrieving collection
+ // data, and avoids retrieving all the data and then throwing it
+ // away due to a transient network outage.
+ auto status = _scheduleLastOplogEntryFetcher_inlock(
+ [=, this](const StatusWith<mongo::Fetcher::QueryResponse>& status,
+ mongo::Fetcher::NextAction*,
+ mongo::BSONObjBuilder*) {
+ _lastOplogEntryFetcherCallbackForStopTimestamp(
+ status, onCompletionGuard);
+ },
+ kInitialSyncerHandlesRetries);
+ if (!status.isOK()) {
+ onCompletionGuard->setResultAndCancelRemainingWork_inlock(lock,
+ status);
+ }
+ });
if (scheduleStatus.isOK())
return;
// If scheduling failed, we're shutting down and cannot retry.
@@ -1569,7 +1569,7 @@ void InitialSyncer::_getNextApplierBatchCallback(
Date_t lastAppliedWall = ops.back().getWallClockTime();
auto numApplied = ops.size();
- MultiApplier::CallbackFn onCompletionFn = [=](const Status& s) {
+ MultiApplier::CallbackFn onCompletionFn = [=, this](const Status& s) {
return _multiApplierCallback(
s, {lastApplied, lastAppliedWall}, numApplied, onCompletionGuard);
};
@@ -1611,7 +1611,9 @@ void InitialSyncer::_getNextApplierBatchCallback(
auto when = (*_attemptExec)->now() + _opts.getApplierBatchCallbackRetryWait;
status = _scheduleWorkAtAndSaveHandle_inlock(
when,
- [=](const CallbackArgs& args) { _getNextApplierBatchCallback(args, onCompletionGuard); },
+ [=, this](const CallbackArgs& args) {
+ _getNextApplierBatchCallback(args, onCompletionGuard);
+ },
&_getNextApplierBatchHandle,
"_getNextApplierBatchCallback");
if (!status.isOK()) {
@@ -1722,8 +1724,10 @@ void InitialSyncer::_finishInitialSyncAttempt(const StatusWith<OpTimeAndWallTime
// declare the scope guard before the lock guard.
auto result = lastApplied;
ScopeGuard finishCallbackGuard([this, &result] {
- auto scheduleResult = _exec->scheduleWork(
- [=](const mongo::executor::TaskExecutor::CallbackArgs&) { _finishCallback(result); });
+ auto scheduleResult =
+ _exec->scheduleWork([=, this](const mongo::executor::TaskExecutor::CallbackArgs&) {
+ _finishCallback(result);
+ });
if (!scheduleResult.isOK()) {
LOGV2_WARNING(21197,
"Unable to schedule initial syncer completion task due to "
@@ -1811,7 +1815,7 @@ void InitialSyncer::_finishInitialSyncAttempt(const StatusWith<OpTimeAndWallTime
auto when = (*_attemptExec)->now() + _opts.initialSyncRetryWait;
auto status = _scheduleWorkAtAndSaveHandle_inlock(
when,
- [=](const executor::TaskExecutor::CallbackArgs& args) {
+ [=, this](const executor::TaskExecutor::CallbackArgs& args) {
_startInitialSyncAttemptCallback(
args, _stats.failedInitialSyncAttempts, _stats.maxFailedInitialSyncAttempts);
},
@@ -1987,7 +1991,7 @@ void InitialSyncer::_checkApplierProgressAndScheduleGetNextApplierBatch_inlock(
// Get another batch to apply.
// _scheduleWorkAndSaveHandle_inlock() is shutdown-aware.
auto status = _scheduleWorkAndSaveHandle_inlock(
- [=](const executor::TaskExecutor::CallbackArgs& args) {
+ [=, this](const executor::TaskExecutor::CallbackArgs& args) {
return _getNextApplierBatchCallback(args, onCompletionGuard);
},
&_getNextApplierBatchHandle,
@@ -2013,7 +2017,7 @@ void InitialSyncer::_scheduleRollbackCheckerCheckForRollback_inlock(
}
auto scheduleResult =
- _rollbackChecker->checkForRollback([=](const RollbackChecker::Result& result) {
+ _rollbackChecker->checkForRollback([=, this](const RollbackChecker::Result& result) {
_rollbackCheckerCheckForRollbackCallback(result, onCompletionGuard);
});
diff --git a/src/mongo/db/repl/multiapplier.cpp b/src/mongo/db/repl/multiapplier.cpp
index e4c9a20079f..b967899e217 100644
--- a/src/mongo/db/repl/multiapplier.cpp
+++ b/src/mongo/db/repl/multiapplier.cpp
@@ -84,7 +84,7 @@ Status MultiApplier::startup() noexcept {
}
auto scheduleResult = _executor->scheduleWork(
- [=](const executor::TaskExecutor::CallbackArgs& cbd) { return _callback(cbd); });
+ [=, this](const executor::TaskExecutor::CallbackArgs& cbd) { return _callback(cbd); });
if (!scheduleResult.isOK()) {
_state = State::kComplete;
return scheduleResult.getStatus();
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index 695e8d55298..c402aca5bdd 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -552,7 +552,7 @@ bool ReplicationCoordinatorImpl::_startLoadLocalConfig(
// that the server's networking layer be up and running and accepting connections, which
// doesn't happen until startReplication finishes.
auto handle =
- _replExecutor->scheduleWork([=](const executor::TaskExecutor::CallbackArgs& args) {
+ _replExecutor->scheduleWork([=, this](const executor::TaskExecutor::CallbackArgs& args) {
_finishLoadLocalConfig(args, localConfig, lastOpTimeAndWallTimeResult, lastVote);
});
if (handle == ErrorCodes::ShutdownInProgress) {
@@ -816,15 +816,16 @@ void ReplicationCoordinatorImpl::_initialSyncerCompletionFunction(
"error"_attr = opTimeStatus.getStatus());
lock.unlock();
clearSyncSourceDenylist();
- _scheduleWorkAt(_replExecutor->now(),
- [=](const mongo::executor::TaskExecutor::CallbackArgs& cbData) {
- _startInitialSync(
- cc().makeOperationContext().get(),
- [this](const StatusWith<OpTimeAndWallTime>& opTimeStatus) {
- _initialSyncerCompletionFunction(opTimeStatus);
- },
- true /* fallbackToLogical */);
- });
+ _scheduleWorkAt(
+ _replExecutor->now(),
+ [=, this](const mongo::executor::TaskExecutor::CallbackArgs& cbData) {
+ _startInitialSync(
+ cc().makeOperationContext().get(),
+ [this](const StatusWith<OpTimeAndWallTime>& opTimeStatus) {
+ _initialSyncerCompletionFunction(opTimeStatus);
+ },
+ true /* fallbackToLogical */);
+ });
return;
} else {
LOGV2_ERROR(21416,
@@ -2930,7 +2931,7 @@ void ReplicationCoordinatorImpl::stepDown(OperationContext* opCtx,
updateMemberState();
// Schedule work to (potentially) step back up once the stepdown period has ended.
- _scheduleWorkAt(stepDownUntil, [=](const executor::TaskExecutor::CallbackArgs& cbData) {
+ _scheduleWorkAt(stepDownUntil, [=, this](const executor::TaskExecutor::CallbackArgs& cbData) {
_handleTimePassing(cbData);
});
@@ -3535,7 +3536,7 @@ Status ReplicationCoordinatorImpl::processReplSetSyncFrom(OperationContext* opCt
}
Status ReplicationCoordinatorImpl::processReplSetFreeze(int secs, BSONObjBuilder* resultObj) {
- auto result = [=]() {
+ auto result = [=, this]() {
stdx::lock_guard<Latch> lock(_mutex);
return _topCoord->prepareFreezeResponse(_replExecutor->now(), secs, resultObj);
}();
@@ -3767,7 +3768,7 @@ Status ReplicationCoordinatorImpl::_doReplSetReconfig(OperationContext* opCtx,
_setConfigState_inlock(kConfigReconfiguring);
auto configStateGuard =
- ScopeGuard([&] { lockAndCall(&lk, [=] { _setConfigState_inlock(kConfigSteady); }); });
+ ScopeGuard([&] { lockAndCall(&lk, [=, this] { _setConfigState_inlock(kConfigSteady); }); });
ReplSetConfig oldConfig = _rsConfig;
int myIndex = _selfIndex;
@@ -4316,7 +4317,7 @@ Status ReplicationCoordinatorImpl::processReplSetInitiate(OperationContext* opCt
_setConfigState_inlock(kConfigInitiating);
ScopeGuard configStateGuard = [&] {
- lockAndCall(&lk, [=] { _setConfigState_inlock(kConfigUninitialized); });
+ lockAndCall(&lk, [=, this] { _setConfigState_inlock(kConfigUninitialized); });
};
// When writing our first oplog entry below, disable advancement of the stable timestamp so that
@@ -4670,7 +4671,7 @@ ReplicationCoordinatorImpl::_updateMemberStateFromTopologyCoordinator(WithLock l
if (_memberState.removed() && !newState.arbiter()) {
LOGV2(5268000, "Scheduling a task to begin or continue replication");
_scheduleWorkAt(_replExecutor->now(),
- [=](const mongo::executor::TaskExecutor::CallbackArgs& cbData) {
+ [=, this](const mongo::executor::TaskExecutor::CallbackArgs& cbData) {
_externalState->startThreads();
auto opCtx = cc().makeOperationContext();
_startDataReplication(opCtx.get());
@@ -5347,7 +5348,7 @@ void ReplicationCoordinatorImpl::_undenylistSyncSource(
void ReplicationCoordinatorImpl::denylistSyncSource(const HostAndPort& host, Date_t until) {
stdx::lock_guard<Latch> lock(_mutex);
_topCoord->denylistSyncSource(host, until);
- _scheduleWorkAt(until, [=](const executor::TaskExecutor::CallbackArgs& cbData) {
+ _scheduleWorkAt(until, [=, this](const executor::TaskExecutor::CallbackArgs& cbData) {
_undenylistSyncSource(cbData, host);
});
}
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
index 7a4ce7c783a..48f96efcba9 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
@@ -221,7 +221,7 @@ void ReplicationCoordinatorImpl::ElectionState::start(WithLock lk, StartElection
fassert(28685, nextPhaseEvh.getStatus());
_replExecutor
->onEvent(nextPhaseEvh.getValue(),
- [=](const executor::TaskExecutor::CallbackArgs&) {
+ [=, this](const executor::TaskExecutor::CallbackArgs&) {
_processDryRunResult(term, reason);
})
.status_with_transitional_ignore();
@@ -402,7 +402,7 @@ void ReplicationCoordinatorImpl::ElectionState::_requestVotesForRealElection(
fassert(28643, nextPhaseEvh.getStatus());
_replExecutor
->onEvent(nextPhaseEvh.getValue(),
- [=](const executor::TaskExecutor::CallbackArgs&) {
+ [=, this](const executor::TaskExecutor::CallbackArgs&) {
if (MONGO_unlikely(hangBeforeOnVoteRequestCompleteCallback.shouldFail())) {
LOGV2(7277400,
"Hang due to hangBeforeOnVoteRequestCompleteCallback failpoint");
diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
index 4330baaf036..ce1c96b6c8b 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
@@ -123,7 +123,7 @@ void ReplicationCoordinatorImpl::_doMemberHeartbeat(executor::TaskExecutor::Call
const RemoteCommandRequest request(
target, "admin", heartbeatObj, BSON(rpc::kReplSetMetadataFieldName << 1), nullptr, timeout);
const executor::TaskExecutor::RemoteCommandCallbackFn callback =
- [=](const executor::TaskExecutor::RemoteCommandCallbackArgs& cbData) {
+ [=, this](const executor::TaskExecutor::RemoteCommandCallbackArgs& cbData) {
return _handleHeartbeatResponse(cbData, replSetName);
};
@@ -149,7 +149,7 @@ void ReplicationCoordinatorImpl::_scheduleHeartbeatToTarget_inlock(const HostAnd
"when"_attr = when);
_trackHeartbeatHandle_inlock(
_replExecutor->scheduleWorkAt(when,
- [=, replSetName = std::move(replSetName)](
+ [=, this, replSetName = std::move(replSetName)](
const executor::TaskExecutor::CallbackArgs& cbData) {
_doMemberHeartbeat(cbData, target, replSetName);
}),
@@ -354,7 +354,7 @@ void ReplicationCoordinatorImpl::_handleHeartbeatResponse(
if (mem && mem->isNewlyAdded()) {
const auto memId = mem->getId();
auto status = _replExecutor->scheduleWork(
- [=](const executor::TaskExecutor::CallbackArgs& cbData) {
+ [=, this](const executor::TaskExecutor::CallbackArgs& cbData) {
_reconfigToRemoveNewlyAddedField(
cbData, memId, _rsConfig.getConfigVersionAndTerm());
});
@@ -445,7 +445,8 @@ stdx::unique_lock<Latch> ReplicationCoordinatorImpl::_handleHeartbeatResponseAct
"Scheduling priority takeover",
"when"_attr = _priorityTakeoverWhen);
_priorityTakeoverCbh = _scheduleWorkAt(
- _priorityTakeoverWhen, [=](const mongo::executor::TaskExecutor::CallbackArgs&) {
+ _priorityTakeoverWhen,
+ [=, this](const mongo::executor::TaskExecutor::CallbackArgs&) {
_startElectSelfIfEligibleV1(StartElectionReasonEnum::kPriorityTakeover);
});
}
@@ -462,7 +463,8 @@ stdx::unique_lock<Latch> ReplicationCoordinatorImpl::_handleHeartbeatResponseAct
"Scheduling catchup takeover",
"when"_attr = _catchupTakeoverWhen);
_catchupTakeoverCbh = _scheduleWorkAt(
- _catchupTakeoverWhen, [=](const mongo::executor::TaskExecutor::CallbackArgs&) {
+ _catchupTakeoverWhen,
+ [=, this](const mongo::executor::TaskExecutor::CallbackArgs&) {
_startElectSelfIfEligibleV1(StartElectionReasonEnum::kCatchupTakeover);
});
}
@@ -512,7 +514,7 @@ executor::TaskExecutor::EventHandle ReplicationCoordinatorImpl::_stepDownStart()
}
_replExecutor
- ->scheduleWork([=](const executor::TaskExecutor::CallbackArgs& cbData) {
+ ->scheduleWork([=, this](const executor::TaskExecutor::CallbackArgs& cbData) {
_stepDownFinish(cbData, finishEvent);
})
.status_with_transitional_ignore();
@@ -658,7 +660,7 @@ void ReplicationCoordinatorImpl::_scheduleHeartbeatReconfig(WithLock lk,
_rsConfig.getConfigVersionAndTerm() < newConfig.getConfigVersionAndTerm() ||
_selfIndex < 0);
_replExecutor
- ->scheduleWork([=](const executor::TaskExecutor::CallbackArgs& cbData) {
+ ->scheduleWork([=, this](const executor::TaskExecutor::CallbackArgs& cbData) {
const auto [swConfig, isSplitRecipientConfig] = _resolveConfigToApply(newConfig);
if (!swConfig.isOK()) {
LOGV2_WARNING(
@@ -679,24 +681,24 @@ void ReplicationCoordinatorImpl::_scheduleHeartbeatReconfig(WithLock lk,
}
LOGV2(8423366, "Waiting for oplog buffer to drain before applying recipient config.");
- _drainForShardSplit().getAsync(
- [this,
- resolvedConfig = swConfig.getValue(),
- replExecutor = _replExecutor.get(),
- isSplitRecipientConfig = isSplitRecipientConfig](Status status) {
- if (!status.isOK()) {
- stdx::lock_guard<Latch> lg(_mutex);
- _setConfigState_inlock(!_rsConfig.isInitialized() ? kConfigUninitialized
- : kConfigSteady);
- return;
- }
-
- replExecutor
- ->scheduleWork([=](const executor::TaskExecutor::CallbackArgs& cbData) {
- _heartbeatReconfigStore(cbData, resolvedConfig, isSplitRecipientConfig);
- })
- .status_with_transitional_ignore();
- });
+ _drainForShardSplit().getAsync([this,
+ resolvedConfig = swConfig.getValue(),
+ replExecutor = _replExecutor.get(),
+ isSplitRecipientConfig =
+ isSplitRecipientConfig](Status status) {
+ if (!status.isOK()) {
+ stdx::lock_guard<Latch> lg(_mutex);
+ _setConfigState_inlock(!_rsConfig.isInitialized() ? kConfigUninitialized
+ : kConfigSteady);
+ return;
+ }
+
+ replExecutor
+ ->scheduleWork([=, this](const executor::TaskExecutor::CallbackArgs& cbData) {
+ _heartbeatReconfigStore(cbData, resolvedConfig, isSplitRecipientConfig);
+ })
+ .status_with_transitional_ignore();
+ });
})
.status_with_transitional_ignore();
}
@@ -938,7 +940,7 @@ void ReplicationCoordinatorImpl::_heartbeatReconfigFinish(
"_heartbeatReconfigFinish until fail point is disabled");
_replExecutor
->scheduleWorkAt(_replExecutor->now() + Milliseconds{10},
- [=](const executor::TaskExecutor::CallbackArgs& cbData) {
+ [=, this](const executor::TaskExecutor::CallbackArgs& cbData) {
_heartbeatReconfigFinish(
cbData, newConfig, myIndex, isSplitRecipientConfig);
})
@@ -963,7 +965,7 @@ void ReplicationCoordinatorImpl::_heartbeatReconfigFinish(
// Wait for the election to complete and the node's Role to be set to follower.
_replExecutor
->onEvent(electionFinishedEvent,
- [=](const executor::TaskExecutor::CallbackArgs& cbData) {
+ [=, this](const executor::TaskExecutor::CallbackArgs& cbData) {
_heartbeatReconfigFinish(
cbData, newConfig, myIndex, isSplitRecipientConfig);
})
diff --git a/src/mongo/db/repl/replication_coordinator_impl_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
index 7754fcddb6d..ae89606f267 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
@@ -1561,7 +1561,7 @@ protected:
bool force, Milliseconds waitTime, Milliseconds stepDownTime) {
using PromisedClientAndOperation = stdx::promise<SharedClientAndOperation>;
auto task = stdx::packaged_task<boost::optional<Status>(PromisedClientAndOperation)>(
- [=](PromisedClientAndOperation operationPromise) -> boost::optional<Status> {
+ [=, this](PromisedClientAndOperation operationPromise) -> boost::optional<Status> {
auto result = SharedClientAndOperation::make(getServiceContext());
operationPromise.set_value(result);
try {
diff --git a/src/mongo/db/repl/reporter.cpp b/src/mongo/db/repl/reporter.cpp
index c135c179877..5ef1405d4df 100644
--- a/src/mongo/db/repl/reporter.cpp
+++ b/src/mongo/db/repl/reporter.cpp
@@ -142,7 +142,7 @@ Status Reporter::trigger() {
}
auto scheduleResult =
- _executor->scheduleWork([=](const executor::TaskExecutor::CallbackArgs& args) {
+ _executor->scheduleWork([=, this](const executor::TaskExecutor::CallbackArgs& args) {
_prepareAndSendCommandCallback(args, true);
});
@@ -252,7 +252,7 @@ void Reporter::_processResponseCallback(
auto when = _executor->now() + _keepAliveInterval;
bool fromTrigger = false;
auto scheduleResult = _executor->scheduleWorkAt(
- when, [=](const executor::TaskExecutor::CallbackArgs& args) {
+ when, [=, this](const executor::TaskExecutor::CallbackArgs& args) {
_prepareAndSendCommandCallback(args, fromTrigger);
});
_status = scheduleResult.getStatus();
diff --git a/src/mongo/db/repl/sync_source_resolver.cpp b/src/mongo/db/repl/sync_source_resolver.cpp
index 8460ce006d4..29cfb6f1b13 100644
--- a/src/mongo/db/repl/sync_source_resolver.cpp
+++ b/src/mongo/db/repl/sync_source_resolver.cpp
@@ -174,9 +174,9 @@ std::unique_ptr<Fetcher> SyncSourceResolver::_makeFirstOplogEntryFetcher(
<< BSON(OplogEntryBase::kTimestampFieldName
<< 1 << OplogEntryBase::kTermFieldName << 1)
<< ReadConcernArgs::kReadConcernFieldName << ReadConcernArgs::kLocal),
- [=](const StatusWith<Fetcher::QueryResponse>& response,
- Fetcher::NextAction*,
- BSONObjBuilder*) {
+ [=, this](const StatusWith<Fetcher::QueryResponse>& response,
+ Fetcher::NextAction*,
+ BSONObjBuilder*) {
return _firstOplogEntryFetcherCallback(response, candidate, earliestOpTimeSeen);
},
ReadPreferenceSetting::secondaryPreferredMetadata(),
@@ -198,9 +198,9 @@ std::unique_ptr<Fetcher> SyncSourceResolver::_makeRequiredOpTimeFetcher(HostAndP
<< BSON("ts" << BSON("$gte" << _requiredOpTime.getTimestamp() << "$lte"
<< _requiredOpTime.getTimestamp()))
<< ReadConcernArgs::kReadConcernFieldName << ReadConcernArgs::kLocal),
- [=](const StatusWith<Fetcher::QueryResponse>& response,
- Fetcher::NextAction*,
- BSONObjBuilder*) {
+ [=, this](const StatusWith<Fetcher::QueryResponse>& response,
+ Fetcher::NextAction*,
+ BSONObjBuilder*) {
return _requiredOpTimeFetcherCallback(response, candidate, earliestOpTimeSeen, rbid);
},
ReadPreferenceSetting::secondaryPreferredMetadata(),
@@ -401,7 +401,7 @@ Status SyncSourceResolver::_scheduleRBIDRequest(HostAndPort candidate, OpTime ea
invariant(_state == State::kRunning);
auto handle = _taskExecutor->scheduleRemoteCommand(
{candidate, "admin", BSON("replSetGetRBID" << 1), nullptr, kFetcherTimeout},
- [=](const executor::TaskExecutor::RemoteCommandCallbackArgs& rbidReply) {
+ [=, this](const executor::TaskExecutor::RemoteCommandCallbackArgs& rbidReply) {
_rbidRequestCallback(candidate, earliestOpTimeSeen, rbidReply);
});
if (!handle.isOK()) {
diff --git a/src/mongo/db/repl/tenant_file_cloner.cpp b/src/mongo/db/repl/tenant_file_cloner.cpp
index 0da47917aab..3b0ec54cbeb 100644
--- a/src/mongo/db/repl/tenant_file_cloner.cpp
+++ b/src/mongo/db/repl/tenant_file_cloner.cpp
@@ -231,9 +231,10 @@ void TenantFileCloner::handleNextBatch(DBClientCursor& cursor) {
}
// Schedule the next set of writes.
- auto&& scheduleResult = _scheduleFsWorkFn([=](const executor::TaskExecutor::CallbackArgs& cbd) {
- writeDataToFilesystemCallback(cbd);
- });
+ auto&& scheduleResult =
+ _scheduleFsWorkFn([=, this](const executor::TaskExecutor::CallbackArgs& cbd) {
+ writeDataToFilesystemCallback(cbd);
+ });
if (!scheduleResult.isOK()) {
Status newStatus = scheduleResult.getStatus().withContext(
diff --git a/src/mongo/db/repl/tenant_oplog_applier.cpp b/src/mongo/db/repl/tenant_oplog_applier.cpp
index 41b9c1a1738..e7cfd088d24 100644
--- a/src/mongo/db/repl/tenant_oplog_applier.cpp
+++ b/src/mongo/db/repl/tenant_oplog_applier.cpp
@@ -502,7 +502,7 @@ TenantOplogApplier::OpTimePair TenantOplogApplier::_writeNoOpEntries(
if (thread == numOplogThreads - 1) {
numOps = numOpsRemaining;
}
- _writerPool->schedule([=, &status = statusVector.at(thread)](auto scheduleStatus) {
+ _writerPool->schedule([=, this, &status = statusVector.at(thread)](auto scheduleStatus) {
if (!scheduleStatus.isOK()) {
status = scheduleStatus;
} else {
@@ -521,18 +521,19 @@ TenantOplogApplier::OpTimePair TenantOplogApplier::_writeNoOpEntries(
// Dispatch noop writes for oplog entries from the same session into the same writer thread.
size_t sessionThreadNum = 0;
for (const auto& s : sessionOps) {
- _writerPool->schedule([=, &status = statusVector.at(numOplogThreads + sessionThreadNum)](
- auto scheduleStatus) {
- if (!scheduleStatus.isOK()) {
- status = scheduleStatus;
- } else {
- try {
- _writeSessionNoOpsForRange(s.second.begin(), s.second.end());
- } catch (const DBException& e) {
- status = e.toStatus();
+ _writerPool->schedule(
+ [=, this, &status = statusVector.at(numOplogThreads + sessionThreadNum)](
+ auto scheduleStatus) {
+ if (!scheduleStatus.isOK()) {
+ status = scheduleStatus;
+ } else {
+ try {
+ _writeSessionNoOpsForRange(s.second.begin(), s.second.end());
+ } catch (const DBException& e) {
+ status = e.toStatus();
+ }
}
- }
- });
+ });
sessionThreadNum++;
}
diff --git a/src/mongo/db/s/config/set_cluster_parameter_coordinator.h b/src/mongo/db/s/config/set_cluster_parameter_coordinator.h
index 13d6c755bb5..aa821ed92d8 100644
--- a/src/mongo/db/s/config/set_cluster_parameter_coordinator.h
+++ b/src/mongo/db/s/config/set_cluster_parameter_coordinator.h
@@ -79,7 +79,7 @@ private:
template <typename Func>
auto _buildPhaseHandler(const Phase& newPhase, Func&& handlerFn) {
- return [=] {
+ return [=, this] {
const auto& currPhase = _doc.getPhase();
if (currPhase > newPhase) {
diff --git a/src/mongo/db/s/config/set_user_write_block_mode_coordinator.h b/src/mongo/db/s/config/set_user_write_block_mode_coordinator.h
index d30bb66e471..23dbd874e72 100644
--- a/src/mongo/db/s/config/set_user_write_block_mode_coordinator.h
+++ b/src/mongo/db/s/config/set_user_write_block_mode_coordinator.h
@@ -63,7 +63,7 @@ private:
template <typename Func>
auto _buildPhaseHandler(const Phase& newPhase, Func&& handlerFn) {
- return [=] {
+ return [=, this] {
const auto& currPhase = _doc.getPhase();
if (currPhase > newPhase) {
diff --git a/src/mongo/db/s/metrics/sharding_data_transform_metrics_test_fixture.h b/src/mongo/db/s/metrics/sharding_data_transform_metrics_test_fixture.h
index 73a6fa3fe9f..2858cf3d519 100644
--- a/src/mongo/db/s/metrics/sharding_data_transform_metrics_test_fixture.h
+++ b/src/mongo/db/s/metrics/sharding_data_transform_metrics_test_fixture.h
@@ -359,7 +359,7 @@ protected:
: kNoSpecialBehavior;
auto& done = threadPFs[i].promise;
threads.emplace_back(
- [=, &storage, specialBehavior = std::move(specialBehavior), &done] {
+ [=, this, &storage, specialBehavior = std::move(specialBehavior), &done] {
performRandomOperations(
storage, kIterations, kRemovalOdds, seed, specialBehavior);
done.emplaceValue();
diff --git a/src/mongo/db/s/rename_collection_participant_service.h b/src/mongo/db/s/rename_collection_participant_service.h
index 041bc31df58..920c4bdcc96 100644
--- a/src/mongo/db/s/rename_collection_participant_service.h
+++ b/src/mongo/db/s/rename_collection_participant_service.h
@@ -153,7 +153,7 @@ private:
template <typename Func>
auto _buildPhaseHandler(const Phase& newPhase, Func&& handlerFn) {
- return [=] {
+ return [=, this] {
const auto& currPhase = _doc.getPhase();
if (currPhase > newPhase) {
diff --git a/src/mongo/db/s/session_catalog_migration_destination.cpp b/src/mongo/db/s/session_catalog_migration_destination.cpp
index 7a3ca5b75bd..9ea0a5021b7 100644
--- a/src/mongo/db/s/session_catalog_migration_destination.cpp
+++ b/src/mongo/db/s/session_catalog_migration_destination.cpp
@@ -217,7 +217,7 @@ void SessionCatalogMigrationDestination::start(ServiceContext* service) {
_state = State::Migrating;
}
- _thread = stdx::thread([=] {
+ _thread = stdx::thread([=, this] {
try {
_retrieveSessionStateFromSource(service);
} catch (const DBException& ex) {
diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
index c3f0ad9d603..5f79e3f35b9 100644
--- a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
+++ b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
@@ -453,7 +453,7 @@ SemiFuture<CollectionAndChangedChunks> ShardServerCatalogCacheLoader::getChunksS
}();
return ExecutorFuture<void>(_executor)
- .then([=]() {
+ .then([=, this]() {
ThreadClient tc("ShardServerCatalogCacheLoader::getChunksSince",
getGlobalServiceContext());
auto context = _contexts.makeOperationContext(*tc);
diff --git a/src/mongo/db/s/sharding_ddl_coordinator.h b/src/mongo/db/s/sharding_ddl_coordinator.h
index db0827d1142..722388c00ed 100644
--- a/src/mongo/db/s/sharding_ddl_coordinator.h
+++ b/src/mongo/db/s/sharding_ddl_coordinator.h
@@ -277,7 +277,7 @@ protected:
template <typename Func>
auto _buildPhaseHandler(const Phase& newPhase, Func&& handlerFn) {
- return [=] {
+ return [=, this] {
const auto& currPhase = _doc.getPhase();
if (currPhase > newPhase) {
diff --git a/src/mongo/db/session/sessions_collection_mock.cpp b/src/mongo/db/session/sessions_collection_mock.cpp
index 793d996cd29..4836e728d1c 100644
--- a/src/mongo/db/session/sessions_collection_mock.cpp
+++ b/src/mongo/db/session/sessions_collection_mock.cpp
@@ -35,8 +35,8 @@
namespace mongo {
MockSessionsCollectionImpl::MockSessionsCollectionImpl()
- : _refresh([=](const LogicalSessionRecordSet& sessions) { _refreshSessions(sessions); }),
- _remove([=](const LogicalSessionIdSet& sessions) { _removeRecords(sessions); }) {}
+ : _refresh([=, this](const LogicalSessionRecordSet& sessions) { _refreshSessions(sessions); }),
+ _remove([=, this](const LogicalSessionIdSet& sessions) { _removeRecords(sessions); }) {}
void MockSessionsCollectionImpl::setRefreshHook(RefreshHook hook) {
_refresh = std::move(hook);
@@ -47,10 +47,10 @@ void MockSessionsCollectionImpl::setRemoveHook(RemoveHook hook) {
}
void MockSessionsCollectionImpl::clearHooks() {
- _refresh = [=](const LogicalSessionRecordSet& sessions) {
+ _refresh = [=, this](const LogicalSessionRecordSet& sessions) {
_refreshSessions(sessions);
};
- _remove = [=](const LogicalSessionIdSet& sessions) {
+ _remove = [=, this](const LogicalSessionIdSet& sessions) {
_removeRecords(sessions);
};
}
diff --git a/src/mongo/dbtests/threadedtests.cpp b/src/mongo/dbtests/threadedtests.cpp
index 9486227a1cc..8d046cc504a 100644
--- a/src/mongo/dbtests/threadedtests.cpp
+++ b/src/mongo/dbtests/threadedtests.cpp
@@ -79,7 +79,7 @@ private:
if (!remaining)
return;
- stdx::thread athread([=] { subthread(remaining); });
+ stdx::thread athread([=, this] { subthread(remaining); });
launch_subthreads(remaining - 1);
athread.join();
}
@@ -134,7 +134,7 @@ public:
tp.startup();
for (unsigned i = 0; i < iterations; i++) {
- tp.schedule([=](auto status) {
+ tp.schedule([=, this](auto status) {
ASSERT_OK(status);
increment(2);
});
diff --git a/src/mongo/embedded/stitch_support/stitch_support_test.cpp b/src/mongo/embedded/stitch_support/stitch_support_test.cpp
index f1c0ab59b4b..443464596f9 100644
--- a/src/mongo/embedded/stitch_support/stitch_support_test.cpp
+++ b/src/mongo/embedded/stitch_support/stitch_support_test.cpp
@@ -98,7 +98,7 @@ protected:
ASSERT(matcher);
ON_BLOCK_EXIT([matcher] { stitch_support_v1_matcher_destroy(matcher); });
return std::all_of(
- documentsJSON.begin(), documentsJSON.end(), [=](const char* documentJSON) {
+ documentsJSON.begin(), documentsJSON.end(), [=, this](const char* documentJSON) {
bool isMatch;
stitch_support_v1_check_match(
matcher, toBSONForAPI(documentJSON).first, &isMatch, nullptr);
@@ -153,7 +153,7 @@ protected:
std::transform(documentsJSON.begin(),
documentsJSON.end(),
std::back_inserter(results),
- [=](const char* documentJSON) {
+ [=, this](const char* documentJSON) {
auto bson = stitch_support_v1_projection_apply(
projection, toBSONForAPI(documentJSON).first, nullptr);
auto result = fromBSONForAPI(bson);
diff --git a/src/mongo/executor/mock_async_rpc.h b/src/mongo/executor/mock_async_rpc.h
index 5af1ae70d07..5e0b5820a5e 100644
--- a/src/mongo/executor/mock_async_rpc.h
+++ b/src/mongo/executor/mock_async_rpc.h
@@ -109,7 +109,7 @@ public:
.onError([](Status s) -> StatusWith<std::vector<HostAndPort>> {
return Status{AsyncRPCErrorInfo(s), "Remote command execution failed"};
})
- .then([=, f = std::move(f), p = std::move(p), dbName = dbName.toString()](
+ .then([=, this, f = std::move(f), p = std::move(p), dbName = dbName.toString()](
auto&& targets) mutable {
stdx::lock_guard lg{_m};
*targetsAttempted = targets;
diff --git a/src/mongo/executor/task_executor_test_common.cpp b/src/mongo/executor/task_executor_test_common.cpp
index b54c5a6adfb..4e2eb775a74 100644
--- a/src/mongo/executor/task_executor_test_common.cpp
+++ b/src/mongo/executor/task_executor_test_common.cpp
@@ -287,7 +287,8 @@ EventChainAndWaitingTest::~EventChainAndWaitingTest() {
}
void EventChainAndWaitingTest::run() {
- executor->onEvent(goEvent, [=](const TaskExecutor::CallbackArgs& cbData) { onGo(cbData); })
+ executor
+ ->onEvent(goEvent, [=, this](const TaskExecutor::CallbackArgs& cbData) { onGo(cbData); })
.status_with_transitional_ignore();
executor->signalEvent(goEvent);
executor->waitForEvent(goEvent);
@@ -340,8 +341,9 @@ void EventChainAndWaitingTest::onGo(const TaskExecutor::CallbackArgs& cbData) {
return;
}
- cbHandle = executor->onEvent(
- goEvent, [=](const TaskExecutor::CallbackArgs& cbData) { onGoAfterTriggered(cbData); });
+ cbHandle = executor->onEvent(goEvent, [=, this](const TaskExecutor::CallbackArgs& cbData) {
+ onGoAfterTriggered(cbData);
+ });
if (!cbHandle.isOK()) {
status1 = cbHandle.getStatus();
executor->shutdown();
diff --git a/src/mongo/scripting/SConscript b/src/mongo/scripting/SConscript
index fb5b8ccc41e..c9b283bd822 100644
--- a/src/mongo/scripting/SConscript
+++ b/src/mongo/scripting/SConscript
@@ -7,7 +7,14 @@ Import([
'serverJs',
])
-env.Library(
+scripting_common_env = env.Clone()
+# TODO(SERVER-77205): Review and Possibly Remove '-Wno-deprecated' After Mozjs Update
+scripting_common_env.Append(
+ CXXFLAGS=[] if scripting_common_env.TargetOSIs('windows') else [
+ '-Wno-deprecated',
+ ], )
+
+scripting_common_env.Library(
target='scripting_common',
source=[
'deadline_monitor.cpp',
@@ -54,8 +61,9 @@ if jsEngine:
# TODO(SERVER-59992): Remove -Wno-class-memacces where possible.
'-Wno-unknown-warning-option',
'-Wno-class-memaccess',
+ # TODO(SERVER-77205): Review and Possibly Remove '-Wno-deprecated' After Mozjs Update
+ '-Wno-deprecated',
], )
-
scriptingEnv.InjectMozJS()
scriptingEnv.JSHeader(
diff --git a/src/mongo/shell/SConscript b/src/mongo/shell/SConscript
index e43690d06db..a8af390807f 100644
--- a/src/mongo/shell/SConscript
+++ b/src/mongo/shell/SConscript
@@ -210,8 +210,9 @@ if get_option('ssl') == 'on':
# TODO(SERVER-59992): Remove -Wno-class-memacces where possible.
'-Wno-unknown-warning-option',
'-Wno-class-memaccess',
+ # TODO(SERVER-77205): Review and Possibly Remove '-Wno-deprecated' After Mozjs Update
+ '-Wno-deprecated',
], )
-
scriptingEnv.InjectMozJS()
scriptingEnv.Library(
diff --git a/src/mongo/transport/asio/asio_transport_layer.cpp b/src/mongo/transport/asio/asio_transport_layer.cpp
index a31bec2aa06..0c1997d05aa 100644
--- a/src/mongo/transport/asio/asio_transport_layer.cpp
+++ b/src/mongo/transport/asio/asio_transport_layer.cpp
@@ -483,7 +483,7 @@ public:
// Then, if the numeric (IP address) lookup failed, we fall back to DNS or return the error
// from the resolver.
return _resolve(peer, flags | Resolver::numeric_host, enableIPv6)
- .onError([=](Status) { return _resolve(peer, flags, enableIPv6); })
+ .onError([=, this](Status) { return _resolve(peer, flags, enableIPv6); })
.getNoThrow();
}
@@ -495,9 +495,8 @@ public:
// We follow the same numeric -> hostname fallback procedure as the synchronous resolver
// function for setting resolver flags (see above).
const auto flags = Resolver::numeric_service;
- return _asyncResolve(peer, flags | Resolver::numeric_host, enableIPv6).onError([=](Status) {
- return _asyncResolve(peer, flags, enableIPv6);
- });
+ return _asyncResolve(peer, flags | Resolver::numeric_host, enableIPv6)
+ .onError([=, this](Status) { return _asyncResolve(peer, flags, enableIPv6); });
}
void cancel() {
diff --git a/src/mongo/transport/session_workflow_test.cpp b/src/mongo/transport/session_workflow_test.cpp
index ad87fad7a29..7406f369ffa 100644
--- a/src/mongo/transport/session_workflow_test.cpp
+++ b/src/mongo/transport/session_workflow_test.cpp
@@ -336,7 +336,7 @@ private:
std::unique_ptr<MockServiceEntryPoint> _makeServiceEntryPoint(ServiceContext* sc) {
auto sep = std::make_unique<MockServiceEntryPoint>(sc);
- sep->handleRequestCb = [=](OperationContext* opCtx, const Message& msg) {
+ sep->handleRequestCb = [=, this](OperationContext* opCtx, const Message& msg) {
if (!gInitialUseDedicatedThread) {
// Simulates an async command implemented under the borrowed
// thread model. The returned future will be fulfilled while
@@ -359,7 +359,7 @@ private:
}
return _onMockEvent<Event::sepHandleRequest>(std::tie(opCtx, msg));
};
- sep->onEndSessionCb = [=](const std::shared_ptr<Session>& session) {
+ sep->onEndSessionCb = [=, this](const std::shared_ptr<Session>& session) {
_onMockEvent<Event::sepEndSession>(std::tie(session));
};
sep->derivedOnClientDisconnectCb = [&](Client*) {
diff --git a/src/third_party/mozjs/SConscript b/src/third_party/mozjs/SConscript
index a861c315d5f..43d34113c26 100644
--- a/src/third_party/mozjs/SConscript
+++ b/src/third_party/mozjs/SConscript
@@ -78,11 +78,14 @@ if env.TargetOSIs('windows'):
'/wd4003',
], )
else:
- env.Append(CXXFLAGS=[
- '-Wno-non-virtual-dtor',
- '-Wno-invalid-offsetof',
- '-Wno-sign-compare',
- ], )
+ env.Append(
+ CXXFLAGS=[
+ '-Wno-non-virtual-dtor',
+ '-Wno-invalid-offsetof',
+ '-Wno-sign-compare',
+ # TODO(SERVER-77205): Review and Possibly Remove '-Wno-deprecated' After Mozjs Update
+ '-Wno-deprecated',
+ ], )
# js/src, js/public and mfbt are the only required sources right now, that
# could change in the future