diff options
Diffstat (limited to 'src')
21 files changed, 126 insertions, 167 deletions
diff --git a/src/mongo/bson/util/bson_extract_test.cpp b/src/mongo/bson/util/bson_extract_test.cpp index 7681fbbf7a4..6778c01aa94 100644 --- a/src/mongo/bson/util/bson_extract_test.cpp +++ b/src/mongo/bson/util/bson_extract_test.cpp @@ -140,7 +140,7 @@ TEST(ExtractBSON, ExtractIntegerField) { ASSERT_EQUALS(-(1LL << 55), v); ASSERT_OK(bsonExtractIntegerField(BSON("a" << 5178), "a", &v)); ASSERT_EQUALS(5178, v); - auto pred = stdx::bind(std::greater<long long>(), stdx::placeholders::_1, 0); + auto pred = [](long long x) { return x > 0; }; ASSERT_OK(bsonExtractIntegerFieldWithDefaultIf(BSON("a" << 1), "a", -1LL, pred, &v)); ASSERT_OK(bsonExtractIntegerFieldWithDefaultIf(BSON("a" << 1), "b", 1LL, pred, &v)); auto msg = "'a' has to be greater than zero"; diff --git a/src/mongo/client/fetcher.cpp b/src/mongo/client/fetcher.cpp index 7128c2800bd..05ca4159810 100644 --- a/src/mongo/client/fetcher.cpp +++ b/src/mongo/client/fetcher.cpp @@ -186,7 +186,7 @@ Fetcher::Fetcher(executor::TaskExecutor* executor, _firstRemoteCommandScheduler( _executor, RemoteCommandRequest(_source, _dbname, _cmdObj, _metadata, nullptr, _findNetworkTimeout), - stdx::bind(&Fetcher::_callback, this, stdx::placeholders::_1, kFirstBatchFieldName), + [this](const auto& x) { return _callback(x, kFirstBatchFieldName); }, std::move(firstCommandRetryPolicy)) { uassert(ErrorCodes::BadValue, "callback function cannot be null", work); } @@ -318,7 +318,7 @@ Status Fetcher::_scheduleGetMore(const BSONObj& cmdObj) { _executor->scheduleRemoteCommand( RemoteCommandRequest( _source, _dbname, cmdObj, _metadata, nullptr, _getMoreNetworkTimeout), - stdx::bind(&Fetcher::_callback, this, stdx::placeholders::_1, kNextBatchFieldName)); + [this](const auto& x) { return _callback(x, kNextBatchFieldName); }); if (!scheduleResult.isOK()) { return scheduleResult.getStatus(); diff --git a/src/mongo/client/fetcher_test.cpp b/src/mongo/client/fetcher_test.cpp index 1bc8b9008b6..684d7de0532 100644 --- a/src/mongo/client/fetcher_test.cpp +++ b/src/mongo/client/fetcher_test.cpp @@ -101,11 +101,7 @@ FetcherTest::FetcherTest() : status(getDetectableErrorStatus()), cursorId(-1), nextAction(Fetcher::NextAction::kInvalid) {} Fetcher::CallbackFn FetcherTest::makeCallback() { - return stdx::bind(&FetcherTest::_callback, - this, - stdx::placeholders::_1, - stdx::placeholders::_2, - stdx::placeholders::_3); + return [this](const auto& x, const auto& y, const auto& z) { return _callback(x, y, z); }; } void FetcherTest::setUp() { @@ -1012,13 +1008,9 @@ TEST_F(FetcherTest, ShutdownDuringSecondBatch) { const BSONObj doc2 = BSON("_id" << 2); bool isShutdownCalled = false; - callbackHook = stdx::bind(shutdownDuringSecondBatch, - stdx::placeholders::_1, - stdx::placeholders::_2, - stdx::placeholders::_3, - doc2, - &getExecutor(), - &isShutdownCalled); + callbackHook = [this, doc2, &isShutdownCalled](const auto& x, const auto& y, const auto& z) { + return shutdownDuringSecondBatch(x, y, z, doc2, &getExecutor(), &isShutdownCalled); + }; processNetworkResponse(BSON("cursor" << BSON("id" << 1LL << "ns" << "db.coll" diff --git a/src/mongo/client/remote_command_retry_scheduler.cpp b/src/mongo/client/remote_command_retry_scheduler.cpp index 08e8d4477a8..017f8ebf308 100644 --- a/src/mongo/client/remote_command_retry_scheduler.cpp +++ b/src/mongo/client/remote_command_retry_scheduler.cpp @@ -223,9 +223,7 @@ std::string RemoteCommandRetryScheduler::toString() const { Status RemoteCommandRetryScheduler::_schedule_inlock() { ++_currentAttempt; auto scheduleResult = _executor->scheduleRemoteCommand( - _request, - stdx::bind( - &RemoteCommandRetryScheduler::_remoteCommandCallback, this, stdx::placeholders::_1)); + _request, [this](const auto& x) { return _remoteCommandCallback(x); }); if (!scheduleResult.isOK()) { return scheduleResult.getStatus(); diff --git a/src/mongo/db/auth/authz_manager_external_state_local.cpp b/src/mongo/db/auth/authz_manager_external_state_local.cpp index ad8b34fbce5..3c340c5cada 100644 --- a/src/mongo/db/auth/authz_manager_external_state_local.cpp +++ b/src/mongo/db/auth/authz_manager_external_state_local.cpp @@ -493,7 +493,7 @@ Status AuthzManagerExternalStateLocal::_initializeRoleGraph(OperationContext* op AuthorizationManager::rolesCollectionNamespace, BSONObj(), BSONObj(), - stdx::bind(addRoleFromDocumentOrWarn, &newRoleGraph, stdx::placeholders::_1)); + [p = &newRoleGraph](const auto& x) { return addRoleFromDocumentOrWarn(p, x); }); if (!status.isOK()) return status; diff --git a/src/mongo/db/catalog/database_impl.cpp b/src/mongo/db/catalog/database_impl.cpp index dc044477718..fdbbc1db591 100644 --- a/src/mongo/db/catalog/database_impl.cpp +++ b/src/mongo/db/catalog/database_impl.cpp @@ -1079,20 +1079,17 @@ auto mongo::userCreateNSImpl(OperationContext* opCtx, } status = - validateStorageOptions(collectionOptions.storageEngine, - stdx::bind(&StorageEngine::Factory::validateCollectionStorageOptions, - stdx::placeholders::_1, - stdx::placeholders::_2)); + validateStorageOptions(collectionOptions.storageEngine, [](const auto& x, const auto& y) { + return x->validateCollectionStorageOptions(y); + }); if (!status.isOK()) return status; if (auto indexOptions = collectionOptions.indexOptionDefaults["storageEngine"]) { - status = - validateStorageOptions(indexOptions.Obj(), - stdx::bind(&StorageEngine::Factory::validateIndexStorageOptions, - stdx::placeholders::_1, - stdx::placeholders::_2)); + status = validateStorageOptions(indexOptions.Obj(), [](const auto& x, const auto& y) { + return x->validateIndexStorageOptions(y); + }); if (!status.isOK()) { return status; diff --git a/src/mongo/db/catalog/index_catalog_impl.cpp b/src/mongo/db/catalog/index_catalog_impl.cpp index 071a134963a..b92c23b02f5 100644 --- a/src/mongo/db/catalog/index_catalog_impl.cpp +++ b/src/mongo/db/catalog/index_catalog_impl.cpp @@ -734,10 +734,9 @@ Status IndexCatalogImpl::_isSpecOk(OperationContext* opCtx, const BSONObj& spec) "Please remove the field or include valid options."); } Status storageEngineStatus = - validateStorageOptions(storageEngineOptions, - stdx::bind(&StorageEngine::Factory::validateIndexStorageOptions, - stdx::placeholders::_1, - stdx::placeholders::_2)); + validateStorageOptions(storageEngineOptions, [](const auto& x, const auto& y) { + return x->validateIndexStorageOptions(y); + }); if (!storageEngineStatus.isOK()) { return storageEngineStatus; } diff --git a/src/mongo/db/ftdc/controller.cpp b/src/mongo/db/ftdc/controller.cpp index bc3d4df444c..c8748cc8b02 100644 --- a/src/mongo/db/ftdc/controller.cpp +++ b/src/mongo/db/ftdc/controller.cpp @@ -139,7 +139,7 @@ void FTDCController::start() { << _path.generic_string() << "'"; // Start the thread - _thread = stdx::thread(stdx::bind(&FTDCController::doLoop, this)); + _thread = stdx::thread([this] { doLoop(); }); { stdx::lock_guard<stdx::mutex> lock(_mutex); diff --git a/src/mongo/db/keys_collection_manager_sharding.cpp b/src/mongo/db/keys_collection_manager_sharding.cpp index b60db4dc112..8baf74d073c 100644 --- a/src/mongo/db/keys_collection_manager_sharding.cpp +++ b/src/mongo/db/keys_collection_manager_sharding.cpp @@ -323,12 +323,9 @@ void KeysCollectionManagerSharding::PeriodicRunner::start(ServiceContext* servic invariant(!_backgroundThread.joinable()); invariant(!_inShutdown); - _backgroundThread = - stdx::thread(stdx::bind(&KeysCollectionManagerSharding::PeriodicRunner::_doPeriodicRefresh, - this, - service, - threadName, - refreshInterval)); + _backgroundThread = stdx::thread([this, service, threadName, refreshInterval] { + _doPeriodicRefresh(service, threadName, refreshInterval); + }); } void KeysCollectionManagerSharding::PeriodicRunner::stop() { diff --git a/src/mongo/db/pipeline/expression.cpp b/src/mongo/db/pipeline/expression.cpp index 28f57d7061b..3c3156b3435 100644 --- a/src/mongo/db/pipeline/expression.cpp +++ b/src/mongo/db/pipeline/expression.cpp @@ -705,48 +705,26 @@ Value ExpressionCoerceToBool::serialize(bool explain) const { /* ----------------------- ExpressionCompare --------------------------- */ -REGISTER_EXPRESSION(cmp, - stdx::bind(ExpressionCompare::parse, - stdx::placeholders::_1, - stdx::placeholders::_2, - stdx::placeholders::_3, - ExpressionCompare::CMP)); -REGISTER_EXPRESSION(eq, - stdx::bind(ExpressionCompare::parse, - stdx::placeholders::_1, - stdx::placeholders::_2, - stdx::placeholders::_3, - ExpressionCompare::EQ)); -REGISTER_EXPRESSION(gt, - stdx::bind(ExpressionCompare::parse, - stdx::placeholders::_1, - stdx::placeholders::_2, - stdx::placeholders::_3, - ExpressionCompare::GT)); -REGISTER_EXPRESSION(gte, - stdx::bind(ExpressionCompare::parse, - stdx::placeholders::_1, - stdx::placeholders::_2, - stdx::placeholders::_3, - ExpressionCompare::GTE)); -REGISTER_EXPRESSION(lt, - stdx::bind(ExpressionCompare::parse, - stdx::placeholders::_1, - stdx::placeholders::_2, - stdx::placeholders::_3, - ExpressionCompare::LT)); -REGISTER_EXPRESSION(lte, - stdx::bind(ExpressionCompare::parse, - stdx::placeholders::_1, - stdx::placeholders::_2, - stdx::placeholders::_3, - ExpressionCompare::LTE)); -REGISTER_EXPRESSION(ne, - stdx::bind(ExpressionCompare::parse, - stdx::placeholders::_1, - stdx::placeholders::_2, - stdx::placeholders::_3, - ExpressionCompare::NE)); +namespace { +struct BoundOp { + ExpressionCompare::CmpOp op; + + auto operator()(const boost::intrusive_ptr<ExpressionContext>& expCtx, + BSONElement bsonExpr, + const VariablesParseState& vps) const { + return ExpressionCompare::parse(expCtx, std::move(bsonExpr), vps, op); + } +}; +} // namespace + +REGISTER_EXPRESSION(cmp, BoundOp{ExpressionCompare::CMP}); +REGISTER_EXPRESSION(eq, BoundOp{ExpressionCompare::EQ}); +REGISTER_EXPRESSION(gt, BoundOp{ExpressionCompare::GT}); +REGISTER_EXPRESSION(gte, BoundOp{ExpressionCompare::GTE}); +REGISTER_EXPRESSION(lt, BoundOp{ExpressionCompare::LT}); +REGISTER_EXPRESSION(lte, BoundOp{ExpressionCompare::LTE}); +REGISTER_EXPRESSION(ne, BoundOp{ExpressionCompare::NE}); + intrusive_ptr<Expression> ExpressionCompare::parse( const boost::intrusive_ptr<ExpressionContext>& expCtx, BSONElement bsonExpr, diff --git a/src/mongo/db/repl/bgsync.cpp b/src/mongo/db/repl/bgsync.cpp index 24024b8a4af..1a8246d9136 100644 --- a/src/mongo/db/repl/bgsync.cpp +++ b/src/mongo/db/repl/bgsync.cpp @@ -155,7 +155,7 @@ void BackgroundSync::startup(OperationContext* opCtx) { _oplogBuffer->startup(opCtx); invariant(!_producerThread); - _producerThread.reset(new stdx::thread(stdx::bind(&BackgroundSync::_run, this))); + _producerThread.reset(new stdx::thread([this] { _run(); })); } void BackgroundSync::shutdown(OperationContext* opCtx) { @@ -444,11 +444,9 @@ void BackgroundSync::_produce() { syncSourceResp.rbid, true /* requireFresherSyncSource */, &dataReplicatorExternalState, - stdx::bind(&BackgroundSync::_enqueueDocuments, - this, - stdx::placeholders::_1, - stdx::placeholders::_2, - stdx::placeholders::_3), + [this](const auto& a1, const auto& a2, const auto& a3) { + return _enqueueDocuments(a1, a2, a3); + }, onOplogFetcherShutdownCallbackFn, bgSyncOplogFetcherBatchSize); stdx::lock_guard<stdx::mutex> lock(_mutex); diff --git a/src/mongo/db/repl/check_quorum_for_config_change_test.cpp b/src/mongo/db/repl/check_quorum_for_config_change_test.cpp index 696e66764e3..a81c5cd9d8c 100644 --- a/src/mongo/db/repl/check_quorum_for_config_change_test.cpp +++ b/src/mongo/db/repl/check_quorum_for_config_change_test.cpp @@ -97,7 +97,7 @@ void CheckQuorumTest::startQuorumCheck(const ReplSetConfig& config, int myIndex) ASSERT_FALSE(_quorumCheckThread); _isQuorumCheckDone = false; _quorumCheckThread.reset( - new stdx::thread(stdx::bind(&CheckQuorumTest::_runQuorumCheck, this, config, myIndex))); + new stdx::thread([this, config, myIndex] { _runQuorumCheck(config, myIndex); })); } Status CheckQuorumTest::waitForQuorumCheck() { diff --git a/src/mongo/db/repl/collection_cloner_test.cpp b/src/mongo/db/repl/collection_cloner_test.cpp index dfe09f3ee2f..c3f2bd92cef 100644 --- a/src/mongo/db/repl/collection_cloner_test.cpp +++ b/src/mongo/db/repl/collection_cloner_test.cpp @@ -63,8 +63,13 @@ public: BaseCloner* getCloner() const override; protected: + auto setStatusCallback() { + return [this](const Status& s) { setStatus(s); }; + } + void setUp() override; void tearDown() override; + std::vector<BSONObj> makeSecondaryIndexSpecs(const NamespaceString& nss); // A simple arbitrary value to use as the default batch size. @@ -84,16 +89,15 @@ void CollectionClonerTest::setUp() { BaseClonerTest::setUp(); options = {}; collectionCloner.reset(nullptr); - collectionCloner = stdx::make_unique<CollectionCloner>( - &getExecutor(), - dbWorkThreadPool.get(), - target, - nss, - options, - stdx::bind(&CollectionClonerTest::setStatus, this, stdx::placeholders::_1), - storageInterface.get(), - defaultBatchSize, - defaultNumCloningCursors); + collectionCloner = stdx::make_unique<CollectionCloner>(&getExecutor(), + dbWorkThreadPool.get(), + target, + nss, + options, + setStatusCallback(), + storageInterface.get(), + defaultBatchSize, + defaultNumCloningCursors); collectionStats = CollectionMockStats(); storageInterface->createCollectionForBulkFn = [this](const NamespaceString& nss, @@ -362,16 +366,15 @@ TEST_F(CollectionClonerTest, return str::equals("listIndexes", request.cmdObj.firstElementFieldName()); }); - collectionCloner = stdx::make_unique<CollectionCloner>( - &_executorProxy, - dbWorkThreadPool.get(), - target, - nss, - options, - stdx::bind(&CollectionClonerTest::setStatus, this, stdx::placeholders::_1), - storageInterface.get(), - defaultBatchSize, - defaultNumCloningCursors); + collectionCloner = stdx::make_unique<CollectionCloner>(&_executorProxy, + dbWorkThreadPool.get(), + target, + nss, + options, + setStatusCallback(), + storageInterface.get(), + defaultBatchSize, + defaultNumCloningCursors); ASSERT_OK(collectionCloner->startup()); @@ -386,16 +389,15 @@ TEST_F(CollectionClonerTest, TEST_F(CollectionClonerTest, DoNotCreateIDIndexIfAutoIndexIdUsed) { options = {}; options.autoIndexId = CollectionOptions::NO; - collectionCloner.reset(new CollectionCloner( - &getExecutor(), - dbWorkThreadPool.get(), - target, - nss, - options, - stdx::bind(&CollectionClonerTest::setStatus, this, stdx::placeholders::_1), - storageInterface.get(), - defaultBatchSize, - defaultNumCloningCursors)); + collectionCloner.reset(new CollectionCloner(&getExecutor(), + dbWorkThreadPool.get(), + target, + nss, + options, + setStatusCallback(), + storageInterface.get(), + defaultBatchSize, + defaultNumCloningCursors)); NamespaceString collNss; CollectionOptions collOptions; @@ -1409,16 +1411,15 @@ protected: void startupWithUUID(int maxNumCloningCursors = 1) { collectionCloner.reset(); options.uuid = UUID::gen(); - collectionCloner = stdx::make_unique<CollectionCloner>( - &getExecutor(), - dbWorkThreadPool.get(), - target, - alternateNss, - options, - stdx::bind(&CollectionClonerTest::setStatus, this, stdx::placeholders::_1), - storageInterface.get(), - defaultBatchSize, - maxNumCloningCursors); + collectionCloner = stdx::make_unique<CollectionCloner>(&getExecutor(), + dbWorkThreadPool.get(), + target, + alternateNss, + options, + setStatusCallback(), + storageInterface.get(), + defaultBatchSize, + maxNumCloningCursors); ASSERT_OK(collectionCloner->startup()); } @@ -1575,6 +1576,10 @@ protected: void tearDown() override; std::vector<BSONObj> generateDocs(std::size_t numDocs); + auto setStatusCallback() { + return [this](const Status& s) { setStatus(s); }; + } + // A simple arbitrary value to use as the default batch size. const int defaultBatchSize = 1024; @@ -1592,16 +1597,15 @@ void ParallelCollectionClonerTest::setUp() { BaseClonerTest::setUp(); options = {}; collectionCloner.reset(nullptr); - collectionCloner = stdx::make_unique<CollectionCloner>( - &getExecutor(), - dbWorkThreadPool.get(), - target, - nss, - options, - stdx::bind(&CollectionClonerTest::setStatus, this, stdx::placeholders::_1), - storageInterface.get(), - defaultBatchSize, - defaultNumCloningCursors); + collectionCloner = stdx::make_unique<CollectionCloner>(&getExecutor(), + dbWorkThreadPool.get(), + target, + nss, + options, + setStatusCallback(), + storageInterface.get(), + defaultBatchSize, + defaultNumCloningCursors); collectionStats = CollectionMockStats(); storageInterface->createCollectionForBulkFn = [this](const NamespaceString& nss, diff --git a/src/mongo/db/repl/databases_cloner.cpp b/src/mongo/db/repl/databases_cloner.cpp index 886ccf753c3..3aac650d501 100644 --- a/src/mongo/db/repl/databases_cloner.cpp +++ b/src/mongo/db/repl/databases_cloner.cpp @@ -210,7 +210,7 @@ Status DatabasesCloner::startup() noexcept { _listDBsScheduler = stdx::make_unique<RemoteCommandRetryScheduler>( _exec, listDBsReq, - stdx::bind(&DatabasesCloner::_onListDatabaseFinish, this, stdx::placeholders::_1), + [this](const auto& x) { _onListDatabaseFinish(x); }, RemoteCommandRetryScheduler::makeRetryPolicy( numInitialSyncListDatabasesAttempts.load(), executor::RemoteCommandRequest::kNoTimeout, diff --git a/src/mongo/db/repl/multiapplier.cpp b/src/mongo/db/repl/multiapplier.cpp index 32efe2c68a7..e748710ef6e 100644 --- a/src/mongo/db/repl/multiapplier.cpp +++ b/src/mongo/db/repl/multiapplier.cpp @@ -91,8 +91,8 @@ Status MultiApplier::startup() noexcept { return Status(ErrorCodes::ShutdownInProgress, "multi applier completed"); } - auto scheduleResult = - _executor->scheduleWork(stdx::bind(&MultiApplier::_callback, this, stdx::placeholders::_1)); + auto scheduleResult = _executor->scheduleWork( + [=](const executor::TaskExecutor::CallbackArgs& cbd) { return _callback(cbd); }); if (!scheduleResult.isOK()) { _state = State::kComplete; return scheduleResult.getStatus(); diff --git a/src/mongo/db/repl/repl_set_config.cpp b/src/mongo/db/repl/repl_set_config.cpp index 910e7c386d4..03619b1a859 100644 --- a/src/mongo/db/repl/repl_set_config.cpp +++ b/src/mongo/db/repl/repl_set_config.cpp @@ -242,8 +242,8 @@ Status ReplSetConfig::_parseSettingsSubdocument(const BSONObj& settings) { // // Parse electionTimeoutMillis // - auto greaterThanZero = stdx::bind(std::greater<long long>(), stdx::placeholders::_1, 0); long long electionTimeoutMillis; + auto greaterThanZero = [](const auto& x) { return x > 0; }; auto electionTimeoutStatus = bsonExtractIntegerFieldWithDefaultIf( settings, kElectionTimeoutFieldName, @@ -706,13 +706,10 @@ StatusWith<ReplSetTagPattern> ReplSetConfig::findCustomWriteMode(StringData patt } void ReplSetConfig::_calculateMajorities() { - const int voters = std::count_if(_members.begin(), - _members.end(), - stdx::bind(&MemberConfig::isVoter, stdx::placeholders::_1)); - const int arbiters = - std::count_if(_members.begin(), - _members.end(), - stdx::bind(&MemberConfig::isArbiter, stdx::placeholders::_1)); + const int voters = + std::count_if(begin(_members), end(_members), [](const auto& x) { return x.isVoter(); }); + const int arbiters = std::count_if( + begin(_members), end(_members), [](const auto& x) { return x.isArbiter(); }); _totalVotingMembers = voters; _majorityVoteCount = voters / 2 + 1; _writeMajority = std::min(_majorityVoteCount, voters - arbiters); diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp index c26edc2f70f..cd822f9fb0c 100644 --- a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp +++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp @@ -242,11 +242,10 @@ void ReplicationCoordinatorExternalStateImpl::startSteadyStateReplication( _applierThread->startup(); log() << "Starting replication reporter thread"; invariant(!_syncSourceFeedbackThread); - _syncSourceFeedbackThread.reset(new stdx::thread(stdx::bind(&SyncSourceFeedback::run, - &_syncSourceFeedback, - _taskExecutor.get(), - _bgSync.get(), - replCoord))); + _syncSourceFeedbackThread = stdx::make_unique<stdx::thread>( + [this, replCoord] { + _syncSourceFeedback.run(_taskExecutor.get(), _bgSync.get(), replCoord); + }); } void ReplicationCoordinatorExternalStateImpl::stopDataReplication(OperationContext* opCtx) { diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect.cpp index ac6e07ce3ee..b50252225a0 100644 --- a/src/mongo/db/repl/replication_coordinator_impl_elect.cpp +++ b/src/mongo/db/repl/replication_coordinator_impl_elect.cpp @@ -144,7 +144,9 @@ void ReplicationCoordinatorImpl::_startElectSelf_inlock() { fassert(18681, nextPhaseEvh.getStatus()); _replExecutor ->onEvent(nextPhaseEvh.getValue(), - stdx::bind(&ReplicationCoordinatorImpl::_onFreshnessCheckComplete, this)) + [this](const mongo::executor::TaskExecutor::CallbackArgs&) { + _onFreshnessCheckComplete(); + }) .status_with_transitional_ignore(); lossGuard.dismiss(); } @@ -180,9 +182,9 @@ void ReplicationCoordinatorImpl::_onFreshnessCheckComplete() { << dateToISOStringLocal(nextCandidateTime); _topCoord->setElectionSleepUntil(nextCandidateTime); _scheduleWorkAt(nextCandidateTime, - stdx::bind(&ReplicationCoordinatorImpl::_recoverFromElectionTie, - this, - stdx::placeholders::_1)); + [=](const executor::TaskExecutor::CallbackArgs& cbData) { + _recoverFromElectionTie(cbData); + }); _sleptLastElection = true; return; } @@ -220,7 +222,7 @@ void ReplicationCoordinatorImpl::_onFreshnessCheckComplete() { _replExecutor ->onEvent(nextPhaseEvh.getValue(), - stdx::bind(&ReplicationCoordinatorImpl::_onElectCmdRunnerComplete, this)) + [=](const executor::TaskExecutor::CallbackArgs&) { _onElectCmdRunnerComplete(); }) .status_with_transitional_ignore(); lossGuard.dismiss(); } @@ -252,10 +254,9 @@ void ReplicationCoordinatorImpl::_onElectCmdRunnerComplete() { const Date_t nextCandidateTime = now + ms; log() << "waiting until " << nextCandidateTime << " before standing for election again"; _topCoord->setElectionSleepUntil(nextCandidateTime); - _scheduleWorkAt(nextCandidateTime, - stdx::bind(&ReplicationCoordinatorImpl::_recoverFromElectionTie, - this, - stdx::placeholders::_1)); + _scheduleWorkAt(nextCandidateTime, [=](const executor::TaskExecutor::CallbackArgs& cbData) { + _recoverFromElectionTie(cbData); + }); return; } diff --git a/src/mongo/util/background.cpp b/src/mongo/util/background.cpp index 7c0575daa46..155ad760413 100644 --- a/src/mongo/util/background.cpp +++ b/src/mongo/util/background.cpp @@ -177,8 +177,7 @@ void BackgroundJob::go() { // If the job is already 'done', for instance because it was cancelled or already // finished, ignore additional requests to run the job. if (_status->state == NotStarted) { - stdx::thread t(stdx::bind(&BackgroundJob::jobBody, this)); - t.detach(); + stdx::thread{[this] { jobBody(); }}.detach(); _status->state = Running; } } diff --git a/src/mongo/util/concurrency/spin_lock_test.cpp b/src/mongo/util/concurrency/spin_lock_test.cpp index a830ba07db6..6d41251a2a9 100644 --- a/src/mongo/util/concurrency/spin_lock_test.cpp +++ b/src/mongo/util/concurrency/spin_lock_test.cpp @@ -50,7 +50,7 @@ public: } void start(int increments) { - _t = new stdx::thread(mongo::stdx::bind(&LockTester::test, this, increments)); + _t = new stdx::thread([this, increments] { test(increments); }); } void join() { diff --git a/src/mongo/util/concurrency/thread_pool.cpp b/src/mongo/util/concurrency/thread_pool.cpp index 06cff9f3b4d..411e19ebd30 100644 --- a/src/mongo/util/concurrency/thread_pool.cpp +++ b/src/mongo/util/concurrency/thread_pool.cpp @@ -382,7 +382,7 @@ void ThreadPool::_startWorkerThread_inlock() { invariant(_threads.size() < _options.maxThreads); const std::string threadName = str::stream() << _options.threadNamePrefix << _nextThreadId++; try { - _threads.emplace_back(stdx::bind(&ThreadPool::_workerThreadBody, this, threadName)); + _threads.emplace_back([this, threadName] { _workerThreadBody(this, threadName); }); ++_numIdleThreads; } catch (const std::exception& ex) { error() << "Failed to start " << threadName << "; " << _threads.size() |