diff options
author | ADAM David Alan Martin <adam.martin@10gen.com> | 2017-06-18 23:22:02 -0400 |
---|---|---|
committer | ADAM David Alan Martin <adam.martin@10gen.com> | 2017-06-18 23:46:57 -0400 |
commit | 9abef6f25aadfd04309cb2219068097f93dc961d (patch) | |
tree | f88c7f183f201813f363d5d68c1a4a76781ca7ef /src/mongo/executor | |
parent | a5f0a84c79b6ce41fef33da920c62be0ecc8f07b (diff) | |
download | mongo-9abef6f25aadfd04309cb2219068097f93dc961d.tar.gz |
SERVER-27244 Status usage compile-time facilities.
There are numerous places in the codebase where `mongo::Status` or
`mongo::StatusWith< T >` objects are returned and never checked.
Many of these are innocuous, but many of them are potentially
severe bugs. This change introduces facilities to permit
compile-time warning of unchecked `Status` and `StatusWith` usage
on clang compilers. It introduces an `ignore` function which is
useful to state that a specific "ignored status" case was
intentional. It not presently an error, in clang builds, to
forget to check a `Status` -- this will come in a later commit.
This also introduces a `transitional_ignore` function, which allows
for easy continual auditing of the codebase for current "whitelisted"
unchecked-status instances. All present "ignored status" cases
have been marked `transitional_ignore`.
Diffstat (limited to 'src/mongo/executor')
7 files changed, 82 insertions, 61 deletions
diff --git a/src/mongo/executor/connection_pool_asio_integration_test.cpp b/src/mongo/executor/connection_pool_asio_integration_test.cpp index 6c60cd78ecd..da245a03fd4 100644 --- a/src/mongo/executor/connection_pool_asio_integration_test.cpp +++ b/src/mongo/executor/connection_pool_asio_integration_test.cpp @@ -108,9 +108,10 @@ TEST(ConnectionPoolASIO, TestPing) { RemoteCommandRequest request{ fixture.getServers()[0], "admin", BSON("ping" << 1), BSONObj(), nullptr}; net.startCommand( - makeCallbackHandle(), request, [&deferred](RemoteCommandResponse resp) { - deferred.emplace(std::move(resp)); - }); + makeCallbackHandle(), + request, + [&deferred](RemoteCommandResponse resp) { deferred.emplace(std::move(resp)); }) + .transitional_ignore(); ASSERT_OK(deferred.get().status); }); @@ -143,9 +144,10 @@ TEST(ConnectionPoolASIO, TestHostTimeoutRace) { Deferred<RemoteCommandResponse> deferred; RemoteCommandRequest request{ fixture.getServers()[0], "admin", BSON("ping" << 1), BSONObj(), nullptr}; - net.startCommand(makeCallbackHandle(), request, [&](RemoteCommandResponse resp) { - deferred.emplace(std::move(resp)); - }); + net.startCommand(makeCallbackHandle(), + request, + [&](RemoteCommandResponse resp) { deferred.emplace(std::move(resp)); }) + .transitional_ignore(); ASSERT_OK(deferred.get().status); sleepmillis(1); @@ -171,9 +173,10 @@ TEST(ConnectionPoolASIO, ConnSetupTimeout) { Deferred<RemoteCommandResponse> deferred; RemoteCommandRequest request{ fixture.getServers()[0], "admin", BSON("ping" << 1), BSONObj(), nullptr}; - net.startCommand(makeCallbackHandle(), request, [&](RemoteCommandResponse resp) { - deferred.emplace(std::move(resp)); - }); + net.startCommand(makeCallbackHandle(), + request, + [&](RemoteCommandResponse resp) { deferred.emplace(std::move(resp)); }) + .transitional_ignore(); ASSERT_EQ(deferred.get().status.code(), ErrorCodes::ExceededTimeLimit); } @@ -206,9 +209,10 @@ TEST(ConnectionPoolASIO, ConnRefreshHappens) { nullptr}; for (auto& deferred : deferreds) { - net.startCommand(makeCallbackHandle(), request, [&](RemoteCommandResponse resp) { - deferred.emplace(std::move(resp)); - }); + net.startCommand(makeCallbackHandle(), + request, + [&](RemoteCommandResponse resp) { deferred.emplace(std::move(resp)); }) + .transitional_ignore(); } for (auto& deferred : deferreds) { @@ -243,9 +247,10 @@ TEST(ConnectionPoolASIO, ConnRefreshSurvivesFailure) { RemoteCommandRequest request{ fixture.getServers()[0], "admin", BSON("ping" << 1), BSONObj(), nullptr}; - net.startCommand(makeCallbackHandle(), request, [&](RemoteCommandResponse resp) { - deferred.emplace(std::move(resp)); - }); + net.startCommand(makeCallbackHandle(), + request, + [&](RemoteCommandResponse resp) { deferred.emplace(std::move(resp)); }) + .transitional_ignore(); deferred.get(); @@ -301,11 +306,14 @@ TEST(ConnectionPoolASIO, ConnSetupSurvivesFailure) { << 3), BSONObj(), nullptr}; - net.startCommand(makeCallbackHandle(), request, [&](RemoteCommandResponse resp) { - if (!unfinished.subtractAndFetch(1)) { - condvar.notify_one(); - } - }); + net.startCommand(makeCallbackHandle(), + request, + [&](RemoteCommandResponse resp) { + if (!unfinished.subtractAndFetch(1)) { + condvar.notify_one(); + } + }) + .transitional_ignore(); unstarted.subtractAndFetch(1); } }); @@ -313,7 +321,7 @@ TEST(ConnectionPoolASIO, ConnSetupSurvivesFailure) { stdx::thread timerThrasher([&] { while (unstarted.load()) { - net.setAlarm(Date_t::now() + Seconds(1), [] {}); + net.setAlarm(Date_t::now() + Seconds(1), [] {}).transitional_ignore(); } }); diff --git a/src/mongo/executor/network_interface_asio_integration_fixture.cpp b/src/mongo/executor/network_interface_asio_integration_fixture.cpp index 76f0e451a71..718904b3acb 100644 --- a/src/mongo/executor/network_interface_asio_integration_fixture.cpp +++ b/src/mongo/executor/network_interface_asio_integration_fixture.cpp @@ -87,15 +87,18 @@ void NetworkInterfaceASIOIntegrationFixture::startCommand( const TaskExecutor::CallbackHandle& cbHandle, RemoteCommandRequest& request, StartCommandCB onFinish) { - net().startCommand(cbHandle, request, onFinish); + net().startCommand(cbHandle, request, onFinish).transitional_ignore(); } Deferred<RemoteCommandResponse> NetworkInterfaceASIOIntegrationFixture::runCommand( const TaskExecutor::CallbackHandle& cbHandle, RemoteCommandRequest& request) { Deferred<RemoteCommandResponse> deferred; - net().startCommand(cbHandle, request, [deferred](RemoteCommandResponse resp) mutable { - deferred.emplace(std::move(resp)); - }); + net() + .startCommand( + cbHandle, + request, + [deferred](RemoteCommandResponse resp) mutable { deferred.emplace(std::move(resp)); }) + .transitional_ignore(); return deferred; } diff --git a/src/mongo/executor/network_interface_mock.cpp b/src/mongo/executor/network_interface_mock.cpp index bf45981490b..32dd7f49e3d 100644 --- a/src/mongo/executor/network_interface_mock.cpp +++ b/src/mongo/executor/network_interface_mock.cpp @@ -308,7 +308,8 @@ void NetworkInterfaceMock::scheduleResponse(NetworkOperationIterator noi, // If no RemoteCommandResponse was returned (for example, on a simulated network error), then // do not attempt to run the metadata hook, since there is no returned metadata. if (_metadataHook && response.isOK()) { - _metadataHook->readReplyMetadata(noi->getRequest().target.toString(), response.metadata); + _metadataHook->readReplyMetadata(noi->getRequest().target.toString(), response.metadata) + .transitional_ignore(); } noi->setResponse(when, response); diff --git a/src/mongo/executor/network_interface_perf_test.cpp b/src/mongo/executor/network_interface_perf_test.cpp index 0ec9966dd56..1159844a06b 100644 --- a/src/mongo/executor/network_interface_perf_test.cpp +++ b/src/mongo/executor/network_interface_perf_test.cpp @@ -87,7 +87,7 @@ int timeNetworkTestMillis(std::size_t operations, NetworkInterface* net) { func = [&]() { RemoteCommandRequest request{ server, "admin", bsonObjPing, BSONObj(), nullptr, Milliseconds(-1)}; - net->startCommand(makeCallbackHandle(), request, callback); + net->startCommand(makeCallbackHandle(), request, callback).transitional_ignore(); }; func(); diff --git a/src/mongo/executor/network_interface_thread_pool.cpp b/src/mongo/executor/network_interface_thread_pool.cpp index f556b2aab35..1e26b02f2a9 100644 --- a/src/mongo/executor/network_interface_thread_pool.cpp +++ b/src/mongo/executor/network_interface_thread_pool.cpp @@ -132,11 +132,13 @@ void NetworkInterfaceThreadPool::consumeTasks(stdx::unique_lock<stdx::mutex> lk) if (!_registeredAlarm) { _registeredAlarm = true; lk.unlock(); - _net->setAlarm(_net->now(), [this] { - stdx::unique_lock<stdx::mutex> lk(_mutex); - _registeredAlarm = false; - consumeTasks(std::move(lk)); - }); + _net->setAlarm(_net->now(), + [this] { + stdx::unique_lock<stdx::mutex> lk(_mutex); + _registeredAlarm = false; + consumeTasks(std::move(lk)); + }) + .transitional_ignore(); } return; diff --git a/src/mongo/executor/task_executor_test_common.cpp b/src/mongo/executor/task_executor_test_common.cpp index 94984a32381..7532dc09569 100644 --- a/src/mongo/executor/task_executor_test_common.cpp +++ b/src/mongo/executor/task_executor_test_common.cpp @@ -252,8 +252,10 @@ EventChainAndWaitingTest::~EventChainAndWaitingTest() { } void EventChainAndWaitingTest::run() { - executor->onEvent(goEvent, - stdx::bind(&EventChainAndWaitingTest::onGo, this, stdx::placeholders::_1)); + executor + ->onEvent(goEvent, + stdx::bind(&EventChainAndWaitingTest::onGo, this, stdx::placeholders::_1)) + .status_with_transitional_ignore(); executor->signalEvent(goEvent); executor->waitForEvent(goEvent); executor->waitForEvent(event2); diff --git a/src/mongo/executor/thread_pool_task_executor.cpp b/src/mongo/executor/thread_pool_task_executor.cpp index dbcf465f8d8..f5a33b8e451 100644 --- a/src/mongo/executor/thread_pool_task_executor.cpp +++ b/src/mongo/executor/thread_pool_task_executor.cpp @@ -318,17 +318,20 @@ StatusWith<TaskExecutor::CallbackHandle> ThreadPoolTaskExecutor::scheduleWorkAt( return cbHandle; } lk.unlock(); - _net->setAlarm(when, [this, when, cbHandle] { - auto cbState = checked_cast<CallbackState*>(getCallbackFromHandle(cbHandle.getValue())); - if (cbState->canceled.load()) { - return; - } - stdx::unique_lock<stdx::mutex> lk(_mutex); - if (cbState->canceled.load()) { - return; - } - scheduleIntoPool_inlock(&_sleepersQueue, cbState->iter, std::move(lk)); - }); + _net->setAlarm(when, + [this, when, cbHandle] { + auto cbState = + checked_cast<CallbackState*>(getCallbackFromHandle(cbHandle.getValue())); + if (cbState->canceled.load()) { + return; + } + stdx::unique_lock<stdx::mutex> lk(_mutex); + if (cbState->canceled.load()) { + return; + } + scheduleIntoPool_inlock(&_sleepersQueue, cbState->iter, std::move(lk)); + }) + .transitional_ignore(); return cbHandle; } @@ -384,22 +387,24 @@ StatusWith<TaskExecutor::CallbackHandle> ThreadPoolTaskExecutor::scheduleRemoteC LOG(3) << "Scheduling remote command request: " << redact(scheduledRequest.toString()); lk.unlock(); _net->startCommand( - cbHandle.getValue(), - scheduledRequest, - [this, scheduledRequest, cbState, cb](const ResponseStatus& response) { - using std::swap; - CallbackFn newCb = [cb, scheduledRequest, response](const CallbackArgs& cbData) { - remoteCommandFinished(cbData, cb, scheduledRequest, response); - }; - stdx::unique_lock<stdx::mutex> lk(_mutex); - if (_inShutdown_inlock()) { - return; - } - LOG(3) << "Received remote response: " - << redact(response.isOK() ? response.toString() : response.status.toString()); - swap(cbState->callback, newCb); - scheduleIntoPool_inlock(&_networkInProgressQueue, cbState->iter, std::move(lk)); - }); + cbHandle.getValue(), + scheduledRequest, + [this, scheduledRequest, cbState, cb](const ResponseStatus& response) { + using std::swap; + CallbackFn newCb = [cb, scheduledRequest, response](const CallbackArgs& cbData) { + remoteCommandFinished(cbData, cb, scheduledRequest, response); + }; + stdx::unique_lock<stdx::mutex> lk(_mutex); + if (_inShutdown_inlock()) { + return; + } + LOG(3) << "Received remote response: " + << redact(response.isOK() ? response.toString() + : response.status.toString()); + swap(cbState->callback, newCb); + scheduleIntoPool_inlock(&_networkInProgressQueue, cbState->iter, std::move(lk)); + }) + .transitional_ignore(); return cbHandle; } |