summaryrefslogtreecommitdiff
path: root/src/mongo/executor
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/executor')
-rw-r--r--src/mongo/executor/connection_pool.cpp21
-rw-r--r--src/mongo/executor/connection_pool_test.cpp4
-rw-r--r--src/mongo/executor/connection_pool_test_fixture.cpp8
-rw-r--r--src/mongo/executor/connection_pool_tl.cpp12
-rw-r--r--src/mongo/executor/connection_pool_tl.h2
-rw-r--r--src/mongo/executor/egress_tag_closer.h7
-rw-r--r--src/mongo/executor/network_interface_integration_test.cpp23
-rw-r--r--src/mongo/executor/network_interface_mock.cpp52
-rw-r--r--src/mongo/executor/network_interface_tl.cpp55
-rw-r--r--src/mongo/executor/scoped_task_executor.cpp4
-rw-r--r--src/mongo/executor/task_executor.h1
-rw-r--r--src/mongo/executor/task_executor_cursor_integration_test.cpp3
-rw-r--r--src/mongo/executor/task_executor_cursor_test.cpp48
-rw-r--r--src/mongo/executor/task_executor_test_common.cpp7
-rw-r--r--src/mongo/executor/task_executor_test_common.h7
-rw-r--r--src/mongo/executor/thread_pool_task_executor.cpp2
16 files changed, 117 insertions, 139 deletions
diff --git a/src/mongo/executor/connection_pool.cpp b/src/mongo/executor/connection_pool.cpp
index e425d72ea5f..76077ae0809 100644
--- a/src/mongo/executor/connection_pool.cpp
+++ b/src/mongo/executor/connection_pool.cpp
@@ -74,7 +74,7 @@ void emplaceOrInvariant(Map&& map, Args&&... args) noexcept {
invariant(ret.second, "Element already existed in map/set");
}
-} // anonymous
+} // namespace
namespace executor {
@@ -164,7 +164,8 @@ public:
const auto& data = getOrInvariant(_poolData, id);
return {
- getPool()->_options.maxConnecting, data.target,
+ getPool()->_options.maxConnecting,
+ data.target,
};
}
@@ -219,11 +220,11 @@ public:
template <typename Callback>
auto guardCallback(Callback&& cb) {
return
- [ this, cb = std::forward<Callback>(cb), anchor = shared_from_this() ](auto&&... args) {
- stdx::lock_guard lk(_parent->_mutex);
- cb(std::forward<decltype(args)>(args)...);
- updateState();
- };
+ [this, cb = std::forward<Callback>(cb), anchor = shared_from_this()](auto&&... args) {
+ stdx::lock_guard lk(_parent->_mutex);
+ cb(std::forward<decltype(args)>(args)...);
+ updateState();
+ };
}
SpecificPool(std::shared_ptr<ConnectionPool> parent,
@@ -519,7 +520,7 @@ void ConnectionPool::get_forTest(const HostAndPort& hostAndPort,
Milliseconds timeout,
GetConnectionCallback cb) {
// We kick ourselves onto the executor queue to prevent us from deadlocking with our own thread
- auto getConnectionFunc = [ this, hostAndPort, timeout, cb = std::move(cb) ](Status &&) mutable {
+ auto getConnectionFunc = [this, hostAndPort, timeout, cb = std::move(cb)](Status&&) mutable {
get(hostAndPort, transport::kGlobalSSLMode, timeout)
.thenRunOn(_factory->getExecutor())
.getAsync(std::move(cb));
@@ -650,7 +651,7 @@ Future<ConnectionPool::ConnectionHandle> ConnectionPool::SpecificPool::getConnec
}
auto ConnectionPool::SpecificPool::makeHandle(ConnectionInterface* connection) -> ConnectionHandle {
- auto deleter = [ this, anchor = shared_from_this() ](ConnectionInterface * connection) {
+ auto deleter = [this, anchor = shared_from_this()](ConnectionInterface* connection) {
stdx::lock_guard lk(_parent->_mutex);
returnConnection(connection);
_lastActiveTime = _parent->_factory->now();
@@ -1120,7 +1121,7 @@ void ConnectionPool::SpecificPool::updateState() {
}
ExecutorFuture(ExecutorPtr(_parent->_factory->getExecutor())) //
- .getAsync([ this, anchor = shared_from_this() ](Status && status) mutable {
+ .getAsync([this, anchor = shared_from_this()](Status&& status) mutable {
invariant(status);
stdx::lock_guard lk(_parent->_mutex);
diff --git a/src/mongo/executor/connection_pool_test.cpp b/src/mongo/executor/connection_pool_test.cpp
index 16dbffeb56e..96ec25ba64d 100644
--- a/src/mongo/executor/connection_pool_test.cpp
+++ b/src/mongo/executor/connection_pool_test.cpp
@@ -80,14 +80,14 @@ protected:
template <typename... Args>
auto getFromPool(Args&&... args) {
return ExecutorFuture(_executor)
- .then([ pool = _pool, args... ]() { return pool->get(args...); })
+ .then([pool = _pool, args...]() { return pool->get(args...); })
.semi();
}
void doneWith(ConnectionPool::ConnectionHandle& conn) {
dynamic_cast<ConnectionImpl*>(conn.get())->indicateSuccess();
- ExecutorFuture(_executor).getAsync([conn = std::move(conn)](auto){});
+ ExecutorFuture(_executor).getAsync([conn = std::move(conn)](auto) {});
}
using StatusWithConn = StatusWith<ConnectionPool::ConnectionHandle>;
diff --git a/src/mongo/executor/connection_pool_test_fixture.cpp b/src/mongo/executor/connection_pool_test_fixture.cpp
index 9b28d752157..5cbfdf8d1d5 100644
--- a/src/mongo/executor/connection_pool_test_fixture.cpp
+++ b/src/mongo/executor/connection_pool_test_fixture.cpp
@@ -74,9 +74,7 @@ void TimerImpl::fireIfNecessary() {
for (auto&& x : timers) {
if (_timers.count(x) && (x->_expiration <= x->now())) {
- auto execCB = [cb = std::move(x->_cb)](auto&&) mutable {
- std::move(cb)();
- };
+ auto execCB = [cb = std::move(x->_cb)](auto&&) mutable { std::move(cb)(); };
auto global = x->_global;
_timers.erase(x);
global->_executor->schedule(std::move(execCB));
@@ -122,7 +120,7 @@ void ConnectionImpl::processSetup() {
_setupQueue.pop_front();
_pushSetupQueue.pop_front();
- connPtr->_global->_executor->schedule([ connPtr, callback = std::move(callback) ](auto&&) {
+ connPtr->_global->_executor->schedule([connPtr, callback = std::move(callback)](auto&&) {
auto cb = std::move(connPtr->_setupCallback);
connPtr->indicateUsed();
cb(connPtr, callback());
@@ -152,7 +150,7 @@ void ConnectionImpl::processRefresh() {
_refreshQueue.pop_front();
_pushRefreshQueue.pop_front();
- connPtr->_global->_executor->schedule([ connPtr, callback = std::move(callback) ](auto&&) {
+ connPtr->_global->_executor->schedule([connPtr, callback = std::move(callback)](auto&&) {
auto cb = std::move(connPtr->_refreshCallback);
connPtr->indicateUsed();
cb(connPtr, callback());
diff --git a/src/mongo/executor/connection_pool_tl.cpp b/src/mongo/executor/connection_pool_tl.cpp
index a7ab984b600..e2f7711cca7 100644
--- a/src/mongo/executor/connection_pool_tl.cpp
+++ b/src/mongo/executor/connection_pool_tl.cpp
@@ -139,7 +139,7 @@ AsyncDBClient* TLConnection::client() {
void TLConnection::setTimeout(Milliseconds timeout, TimeoutCallback cb) {
auto anchor = shared_from_this();
- _timer->setTimeout(timeout, [ cb = std::move(cb), anchor = std::move(anchor) ] { cb(); });
+ _timer->setTimeout(timeout, [cb = std::move(cb), anchor = std::move(anchor)] { cb(); });
}
void TLConnection::cancelTimeout() {
@@ -213,14 +213,14 @@ void TLConnection::setup(Milliseconds timeout, SetupCallback cb) {
auto pf = makePromiseFuture<void>();
auto handler = std::make_shared<TimeoutHandler>(std::move(pf.promise));
std::move(pf.future).thenRunOn(_reactor).getAsync(
- [ this, cb = std::move(cb), anchor ](Status status) { cb(this, std::move(status)); });
+ [this, cb = std::move(cb), anchor](Status status) { cb(this, std::move(status)); });
setTimeout(timeout, [this, handler, timeout] {
if (handler->done.swap(true)) {
return;
}
- std::string reason = str::stream() << "Timed out connecting to " << _peer << " after "
- << timeout;
+ std::string reason = str::stream()
+ << "Timed out connecting to " << _peer << " after " << timeout;
handler->promise.setError(
Status(ErrorCodes::NetworkInterfaceExceededTimeLimit, std::move(reason)));
@@ -286,7 +286,7 @@ void TLConnection::refresh(Milliseconds timeout, RefreshCallback cb) {
auto pf = makePromiseFuture<void>();
auto handler = std::make_shared<TimeoutHandler>(std::move(pf.promise));
std::move(pf.future).thenRunOn(_reactor).getAsync(
- [ this, cb = std::move(cb), anchor ](Status status) { cb(this, status); });
+ [this, cb = std::move(cb), anchor](Status status) { cb(this, status); });
setTimeout(timeout, [this, handler] {
if (handler->done.swap(true)) {
@@ -361,4 +361,4 @@ Date_t TLTypeFactory::now() {
} // namespace connection_pool_tl
} // namespace executor
-} // namespace
+} // namespace mongo
diff --git a/src/mongo/executor/connection_pool_tl.h b/src/mongo/executor/connection_pool_tl.h
index 7a138589055..7297713b92b 100644
--- a/src/mongo/executor/connection_pool_tl.h
+++ b/src/mongo/executor/connection_pool_tl.h
@@ -182,6 +182,6 @@ private:
AsyncDBClient::Handle _client;
};
-} // namespace connection_pool_asio
+} // namespace connection_pool_tl
} // namespace executor
} // namespace mongo
diff --git a/src/mongo/executor/egress_tag_closer.h b/src/mongo/executor/egress_tag_closer.h
index b1ab2430ed6..43d2078f4dd 100644
--- a/src/mongo/executor/egress_tag_closer.h
+++ b/src/mongo/executor/egress_tag_closer.h
@@ -50,9 +50,10 @@ public:
virtual void dropConnections(const HostAndPort& hostAndPort) = 0;
- virtual void mutateTags(const HostAndPort& hostAndPort,
- const std::function<transport::Session::TagMask(
- transport::Session::TagMask)>& mutateFunc) = 0;
+ virtual void mutateTags(
+ const HostAndPort& hostAndPort,
+ const std::function<transport::Session::TagMask(transport::Session::TagMask)>&
+ mutateFunc) = 0;
protected:
EgressTagCloser() {}
diff --git a/src/mongo/executor/network_interface_integration_test.cpp b/src/mongo/executor/network_interface_integration_test.cpp
index 163415c923c..2723f66bde7 100644
--- a/src/mongo/executor/network_interface_integration_test.cpp
+++ b/src/mongo/executor/network_interface_integration_test.cpp
@@ -87,8 +87,7 @@ class HangingHook : public executor::NetworkConnectionHook {
"admin",
BSON("sleep" << 1 << "lock"
<< "none"
- << "secs"
- << 100000000),
+ << "secs" << 100000000),
BSONObj(),
nullptr))};
}
@@ -274,8 +273,7 @@ TEST_F(NetworkInterfaceTest, AsyncOpTimeout) {
auto request = makeTestCommand(Milliseconds{1000});
request.cmdObj = BSON("sleep" << 1 << "lock"
<< "none"
- << "secs"
- << 1000000000);
+ << "secs" << 1000000000);
auto deferred = runCommand(cb, request);
waitForIsMaster();
@@ -322,14 +320,15 @@ TEST_F(NetworkInterfaceTest, SetAlarm) {
Date_t expiration = net().now() + Milliseconds(100);
auto makeTimerFuture = [&] {
auto pf = makePromiseFuture<Date_t>();
- return std::make_pair([ this, promise = std::move(pf.promise) ](Status status) mutable {
- if (status.isOK()) {
- promise.emplaceValue(net().now());
- } else {
- promise.setError(status);
- }
- },
- std::move(pf.future));
+ return std::make_pair(
+ [this, promise = std::move(pf.promise)](Status status) mutable {
+ if (status.isOK()) {
+ promise.emplaceValue(net().now());
+ } else {
+ promise.setError(status);
+ }
+ },
+ std::move(pf.future));
};
auto futurePair = makeTimerFuture();
diff --git a/src/mongo/executor/network_interface_mock.cpp b/src/mongo/executor/network_interface_mock.cpp
index da9aa915b54..86e1144b81e 100644
--- a/src/mongo/executor/network_interface_mock.cpp
+++ b/src/mongo/executor/network_interface_mock.cpp
@@ -487,12 +487,14 @@ void NetworkInterfaceMock::_enqueueOperation_inlock(
ResponseStatus rs(
ErrorCodes::NetworkInterfaceExceededTimeLimit, "Network timeout", Milliseconds(0));
std::vector<NetworkOperationList*> queuesToCheck{&_unscheduled, &_blackHoled, &_scheduled};
- _alarms.emplace(cbh, _now_inlock() + timeout, [
- this,
- cbh = std::move(cbh),
- queuesToCheck = std::move(queuesToCheck),
- rs = std::move(rs)
- ](Status) { _interruptWithResponse_inlock(cbh, queuesToCheck, rs); });
+ _alarms.emplace(cbh,
+ _now_inlock() + timeout,
+ [this,
+ cbh = std::move(cbh),
+ queuesToCheck = std::move(queuesToCheck),
+ rs = std::move(rs)](Status) {
+ _interruptWithResponse_inlock(cbh, queuesToCheck, rs);
+ });
}
}
@@ -535,25 +537,25 @@ void NetworkInterfaceMock::_connectThenEnqueueOperation_inlock(const HostAndPort
auto cbh = op.getCallbackHandle();
// The completion handler for the postconnect command schedules the original command.
auto postconnectCompletionHandler =
- [ this, op = std::move(op) ](TaskExecutor::ResponseOnAnyStatus rs) mutable {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- if (!rs.isOK()) {
- op.setResponse(_now_inlock(), rs);
- op.finishResponse();
- return;
- }
-
- auto handleStatus = _hook->handleReply(op.getRequest().target, std::move(rs));
-
- if (!handleStatus.isOK()) {
- op.setResponse(_now_inlock(), handleStatus);
- op.finishResponse();
- return;
- }
-
- _connections.emplace(op.getRequest().target);
- _enqueueOperation_inlock(std::move(op));
- };
+ [this, op = std::move(op)](TaskExecutor::ResponseOnAnyStatus rs) mutable {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ if (!rs.isOK()) {
+ op.setResponse(_now_inlock(), rs);
+ op.finishResponse();
+ return;
+ }
+
+ auto handleStatus = _hook->handleReply(op.getRequest().target, std::move(rs));
+
+ if (!handleStatus.isOK()) {
+ op.setResponse(_now_inlock(), handleStatus);
+ op.finishResponse();
+ return;
+ }
+
+ _connections.emplace(op.getRequest().target);
+ _enqueueOperation_inlock(std::move(op));
+ };
auto postconnectOp = NetworkOperation(cbh,
std::move(*hookPostconnectCommand),
diff --git a/src/mongo/executor/network_interface_tl.cpp b/src/mongo/executor/network_interface_tl.cpp
index c7528397951..cbd3484bf1c 100644
--- a/src/mongo/executor/network_interface_tl.cpp
+++ b/src/mongo/executor/network_interface_tl.cpp
@@ -239,21 +239,21 @@ Status NetworkInterfaceTL::startCommand(const TaskExecutor::CallbackHandle& cbHa
auto executor = baton ? ExecutorPtr(baton) : ExecutorPtr(_reactor);
std::move(cmdPF.future)
.thenRunOn(executor)
- .onError([requestId = cmdState->requestOnAny.id](auto error)
- ->StatusWith<RemoteCommandOnAnyResponse> {
- LOG(2) << "Failed to get connection from pool for request " << requestId
- << ": " << redact(error);
-
- // The TransportLayer has, for historical reasons returned SocketException
- // for network errors, but sharding assumes HostUnreachable on network
- // errors.
- if (error == ErrorCodes::SocketException) {
- error = Status(ErrorCodes::HostUnreachable, error.reason());
- }
- return error;
- })
- .getAsync([ this, cmdState, onFinish = std::move(onFinish) ](
- StatusWith<RemoteCommandOnAnyResponse> response) {
+ .onError([requestId = cmdState->requestOnAny.id](
+ auto error) -> StatusWith<RemoteCommandOnAnyResponse> {
+ LOG(2) << "Failed to get connection from pool for request " << requestId << ": "
+ << redact(error);
+
+ // The TransportLayer has, for historical reasons returned SocketException
+ // for network errors, but sharding assumes HostUnreachable on network
+ // errors.
+ if (error == ErrorCodes::SocketException) {
+ error = Status(ErrorCodes::HostUnreachable, error.reason());
+ }
+ return error;
+ })
+ .getAsync([this, cmdState, onFinish = std::move(onFinish)](
+ StatusWith<RemoteCommandOnAnyResponse> response) {
auto duration = now() - cmdState->start;
if (!response.isOK()) {
onFinish(RemoteCommandOnAnyResponse(boost::none, response.getStatus(), duration));
@@ -270,7 +270,7 @@ Status NetworkInterfaceTL::startCommand(const TaskExecutor::CallbackHandle& cbHa
return Status::OK();
}
- auto[connPromise, connFuture] = makePromiseFuture<ConnectionPool::ConnectionHandle>();
+ auto [connPromise, connFuture] = makePromiseFuture<ConnectionPool::ConnectionHandle>();
std::move(connFuture).thenRunOn(executor).getAsync([this, cmdState, baton](auto swConn) {
auto status = swConn.getStatus();
@@ -360,8 +360,7 @@ void NetworkInterfaceTL::_onAcquireConn(std::shared_ptr<CommandState> state,
uasserted(ErrorCodes::NetworkInterfaceExceededTimeLimit,
str::stream() << "Remote command timed out while waiting to get a "
"connection from the pool, took "
- << connDuration
- << ", timeout was set to "
+ << connDuration << ", timeout was set to "
<< state->requestOnAny.timeout);
}
@@ -509,14 +508,14 @@ Status NetworkInterfaceTL::setAlarm(const TaskExecutor::CallbackHandle& cbHandle
// If a user has already scheduled an alarm with a handle, make sure they intentionally
// override it by canceling and setting a new one.
auto alarmPair = std::make_pair(cbHandle, std::shared_ptr<AlarmState>(alarmState));
- auto && [ _, wasInserted ] = _inProgressAlarms.insert(std::move(alarmPair));
+ auto&& [_, wasInserted] = _inProgressAlarms.insert(std::move(alarmPair));
invariant(wasInserted);
}
- alarmState->timer->waitUntil(alarmState->when, nullptr).getAsync([
- this,
- state = std::move(alarmState)
- ](Status status) mutable { _answerAlarm(status, state); });
+ alarmState->timer->waitUntil(alarmState->when, nullptr)
+ .getAsync([this, state = std::move(alarmState)](Status status) mutable {
+ _answerAlarm(status, state);
+ });
return Status::OK();
}
@@ -546,7 +545,7 @@ void NetworkInterfaceTL::_cancelAllAlarms() {
return std::exchange(_inProgressAlarms, {});
}();
- for (auto && [ cbHandle, state ] : alarms) {
+ for (auto&& [cbHandle, state] : alarms) {
state->timer->cancel();
state->promise.setError(Status(ErrorCodes::CallbackCanceled, "Alarm cancelled"));
}
@@ -566,10 +565,10 @@ void NetworkInterfaceTL::_answerAlarm(Status status, std::shared_ptr<AlarmState>
if (status.isOK() && currentTime < state->when) {
LOG(2) << "Alarm returned early. Expected at: " << state->when
<< ", fired at: " << currentTime;
- state->timer->waitUntil(state->when, nullptr).getAsync([
- this,
- state = std::move(state)
- ](Status status) mutable { _answerAlarm(status, state); });
+ state->timer->waitUntil(state->when, nullptr)
+ .getAsync([this, state = std::move(state)](Status status) mutable {
+ _answerAlarm(status, state);
+ });
return;
}
diff --git a/src/mongo/executor/scoped_task_executor.cpp b/src/mongo/executor/scoped_task_executor.cpp
index 0f718242163..6f2b4823139 100644
--- a/src/mongo/executor/scoped_task_executor.cpp
+++ b/src/mongo/executor/scoped_task_executor.cpp
@@ -68,7 +68,7 @@ public:
return _cbHandles;
}();
- for (auto & [ id, handle ] : handles) {
+ for (auto& [id, handle] : handles) {
// If we don't have a handle yet, it means there's a scheduling thread that's
// dropped the lock but hasn't yet stashed it (or failed to schedule it on the
// underlying executor).
@@ -223,7 +223,7 @@ private:
// State 2 - Indeterminate state. We don't know yet if the task will get scheduled.
auto swCbHandle = std::forward<ScheduleCall>(schedule)(
- [ id, work = std::forward<Work>(work), self = shared_from_this() ](const auto& cargs) {
+ [id, work = std::forward<Work>(work), self = shared_from_this()](const auto& cargs) {
using ArgsT = std::decay_t<decltype(cargs)>;
stdx::unique_lock<stdx::mutex> lk(self->_mutex);
diff --git a/src/mongo/executor/task_executor.h b/src/mongo/executor/task_executor.h
index a392f72b974..d36f5c9bac6 100644
--- a/src/mongo/executor/task_executor.h
+++ b/src/mongo/executor/task_executor.h
@@ -30,7 +30,6 @@
#pragma once
#include <functional>
-#include <functional>
#include <memory>
#include <string>
diff --git a/src/mongo/executor/task_executor_cursor_integration_test.cpp b/src/mongo/executor/task_executor_cursor_integration_test.cpp
index e65068990dc..55b75650083 100644
--- a/src/mongo/executor/task_executor_cursor_integration_test.cpp
+++ b/src/mongo/executor/task_executor_cursor_integration_test.cpp
@@ -90,8 +90,7 @@ TEST_F(TaskExecutorCursorFixture, Basic) {
"test",
BSON("find"
<< "test"
- << "batchSize"
- << 10),
+ << "batchSize" << 10),
opCtx.get());
TaskExecutorCursor tec(executor(), rcr, [] {
diff --git a/src/mongo/executor/task_executor_cursor_test.cpp b/src/mongo/executor/task_executor_cursor_test.cpp
index 7fc7af43c65..57719c44a2c 100644
--- a/src/mongo/executor/task_executor_cursor_test.cpp
+++ b/src/mongo/executor/task_executor_cursor_test.cpp
@@ -95,14 +95,10 @@ public:
NetworkInterfaceMock::InNetworkGuard ing(getNet());
ASSERT(getNet()->hasReadyRequests());
- auto rcr = getNet()->scheduleSuccessfulResponse(BSON(
- "cursorsKilled" << BSON_ARRAY((long long)(cursorId)) << "cursorsNotFound" << BSONArray()
- << "cursorsAlive"
- << BSONArray()
- << "cursorsUnknown"
- << BSONArray()
- << "ok"
- << 1));
+ auto rcr = getNet()->scheduleSuccessfulResponse(
+ BSON("cursorsKilled" << BSON_ARRAY((long long)(cursorId)) << "cursorsNotFound"
+ << BSONArray() << "cursorsAlive" << BSONArray() << "cursorsUnknown"
+ << BSONArray() << "ok" << 1));
getNet()->runReadyNetworkOperations();
return rcr.cmdObj.getOwned();
@@ -124,8 +120,7 @@ public:
TEST_F(TaskExecutorCursorFixture, SingleBatchWorks) {
auto findCmd = BSON("find"
<< "test"
- << "batchSize"
- << 2);
+ << "batchSize" << 2);
RemoteCommandRequest rcr(HostAndPort("localhost"), "test", findCmd, opCtx.get());
@@ -150,8 +145,7 @@ TEST_F(TaskExecutorCursorFixture, FailureInFind) {
"test",
BSON("find"
<< "test"
- << "batchSize"
- << 2),
+ << "batchSize" << 2),
opCtx.get());
TaskExecutorCursor tec(&getExecutor(), rcr);
@@ -175,8 +169,7 @@ TEST_F(TaskExecutorCursorFixture, EarlyReturnKillsCursor) {
"test",
BSON("find"
<< "test"
- << "batchSize"
- << 2),
+ << "batchSize" << 2),
opCtx.get());
{
@@ -189,8 +182,7 @@ TEST_F(TaskExecutorCursorFixture, EarlyReturnKillsCursor) {
ASSERT_BSONOBJ_EQ(BSON("killCursors"
<< "test"
- << "cursors"
- << BSON_ARRAY(1)),
+ << "cursors" << BSON_ARRAY(1)),
scheduleSuccessfulKillCursorResponse(1));
}
@@ -202,8 +194,7 @@ TEST_F(TaskExecutorCursorFixture, MultipleBatchesWorks) {
"test",
BSON("find"
<< "test"
- << "batchSize"
- << 2),
+ << "batchSize" << 2),
opCtx.get());
TaskExecutorCursor tec(&getExecutor(), rcr, [] {
@@ -230,8 +221,7 @@ TEST_F(TaskExecutorCursorFixture, MultipleBatchesWorks) {
// We can pick up after that interruption though
ASSERT_BSONOBJ_EQ(BSON("getMore" << (long long)(1) << "collection"
<< "test"
- << "batchSize"
- << 3),
+ << "batchSize" << 3),
scheduleSuccessfulCursorResponse("nextBatch", 3, 5, 1));
ASSERT_EQUALS(tec.getNext(opCtx.get()).get()["x"].Int(), 3);
@@ -257,8 +247,7 @@ TEST_F(TaskExecutorCursorFixture, LsidIsPassed) {
auto findCmd = BSON("find"
<< "test"
- << "batchSize"
- << 1);
+ << "batchSize" << 1);
RemoteCommandRequest rcr(HostAndPort("localhost"), "test", findCmd, opCtx.get());
@@ -272,10 +261,7 @@ TEST_F(TaskExecutorCursorFixture, LsidIsPassed) {
// lsid in the first batch
ASSERT_BSONOBJ_EQ(BSON("find"
<< "test"
- << "batchSize"
- << 1
- << "lsid"
- << lsid.toBSON()),
+ << "batchSize" << 1 << "lsid" << lsid.toBSON()),
scheduleSuccessfulCursorResponse("firstBatch", 1, 1, 1));
ASSERT_EQUALS(tec->getNext(opCtx.get()).get()["x"].Int(), 1);
@@ -283,10 +269,7 @@ TEST_F(TaskExecutorCursorFixture, LsidIsPassed) {
// lsid in the getmore
ASSERT_BSONOBJ_EQ(BSON("getMore" << (long long)(1) << "collection"
<< "test"
- << "batchSize"
- << 1
- << "lsid"
- << lsid.toBSON()),
+ << "batchSize" << 1 << "lsid" << lsid.toBSON()),
scheduleSuccessfulCursorResponse("nextBatch", 2, 2, 1));
tec.reset();
@@ -294,10 +277,7 @@ TEST_F(TaskExecutorCursorFixture, LsidIsPassed) {
// lsid in the killcursor
ASSERT_BSONOBJ_EQ(BSON("killCursors"
<< "test"
- << "cursors"
- << BSON_ARRAY(1)
- << "lsid"
- << lsid.toBSON()),
+ << "cursors" << BSON_ARRAY(1) << "lsid" << lsid.toBSON()),
scheduleSuccessfulKillCursorResponse(1));
ASSERT_FALSE(hasReadyRequests());
diff --git a/src/mongo/executor/task_executor_test_common.cpp b/src/mongo/executor/task_executor_test_common.cpp
index 02cc4367bba..a80a96ff55e 100644
--- a/src/mongo/executor/task_executor_test_common.cpp
+++ b/src/mongo/executor/task_executor_test_common.cpp
@@ -149,10 +149,9 @@ auto makeSetStatusOnRemoteCommandCompletionClosure(const RemoteCommandRequest* e
return str::stream() << "Request(" << request.target.toString() << ", "
<< request.dbname << ", " << request.cmdObj << ')';
};
- *outStatus =
- Status(ErrorCodes::BadValue,
- str::stream() << "Actual request: " << desc(cbData.request) << "; expected: "
- << desc(*expectedRequest));
+ *outStatus = Status(ErrorCodes::BadValue,
+ str::stream() << "Actual request: " << desc(cbData.request)
+ << "; expected: " << desc(*expectedRequest));
return;
}
*outStatus = cbData.response.status;
diff --git a/src/mongo/executor/task_executor_test_common.h b/src/mongo/executor/task_executor_test_common.h
index 1cb87db0422..69e5a37a0e9 100644
--- a/src/mongo/executor/task_executor_test_common.h
+++ b/src/mongo/executor/task_executor_test_common.h
@@ -49,9 +49,10 @@ class TaskExecutor;
* presumably after the release of MSVC2015, the signature can be changed to take the unique_ptr
* by value.
*/
-void addTestsForExecutor(const std::string& suiteName,
- std::function<std::unique_ptr<TaskExecutor>(
- std::unique_ptr<NetworkInterfaceMock>)> makeExecutor);
+void addTestsForExecutor(
+ const std::string& suiteName,
+ std::function<std::unique_ptr<TaskExecutor>(std::unique_ptr<NetworkInterfaceMock>)>
+ makeExecutor);
} // namespace executor
} // namespace mongo
diff --git a/src/mongo/executor/thread_pool_task_executor.cpp b/src/mongo/executor/thread_pool_task_executor.cpp
index 7ef0669aea0..808b2a7350c 100644
--- a/src/mongo/executor/thread_pool_task_executor.cpp
+++ b/src/mongo/executor/thread_pool_task_executor.cpp
@@ -360,7 +360,7 @@ StatusWith<TaskExecutor::CallbackHandle> ThreadPoolTaskExecutor::scheduleWorkAt(
lk.unlock();
auto status = _net->setAlarm(
- cbHandle.getValue(), when, [ this, cbHandle = cbHandle.getValue() ](Status status) {
+ cbHandle.getValue(), when, [this, cbHandle = cbHandle.getValue()](Status status) {
if (status == ErrorCodes::CallbackCanceled) {
return;
}