summaryrefslogtreecommitdiff
path: root/src/mongo/s/async_requests_sender.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/s/async_requests_sender.cpp')
-rw-r--r--src/mongo/s/async_requests_sender.cpp97
1 files changed, 48 insertions, 49 deletions
diff --git a/src/mongo/s/async_requests_sender.cpp b/src/mongo/s/async_requests_sender.cpp
index 045404c4fad..0eba85b1893 100644
--- a/src/mongo/s/async_requests_sender.cpp
+++ b/src/mongo/s/async_requests_sender.cpp
@@ -151,9 +151,10 @@ AsyncRequestsSender::RemoteData::RemoteData(AsyncRequestsSender* ars,
BSONObj cmdObj)
: _ars(ars), _shardId(std::move(shardId)), _cmdObj(std::move(cmdObj)) {}
-std::shared_ptr<Shard> AsyncRequestsSender::RemoteData::getShard() {
- // TODO: Pass down an OperationContext* to use here.
- return Grid::get(getGlobalServiceContext())->shardRegistry()->getShardNoReload(_shardId);
+SemiFuture<std::shared_ptr<Shard>> AsyncRequestsSender::RemoteData::getShard() noexcept {
+ return Grid::get(getGlobalServiceContext())
+ ->shardRegistry()
+ ->getShard(*_ars->_subBaton, _shardId);
}
void AsyncRequestsSender::RemoteData::executeRequest() {
@@ -173,7 +174,12 @@ void AsyncRequestsSender::RemoteData::executeRequest() {
auto AsyncRequestsSender::RemoteData::scheduleRequest()
-> SemiFuture<RemoteCommandOnAnyCallbackArgs> {
- return resolveShardIdToHostAndPorts(_ars->_readPreference)
+ return getShard()
+ .thenRunOn(*_ars->_subBaton)
+ .then([this](auto&& shard) {
+ return shard->getTargeter()->findHosts(_ars->_readPreference,
+ CancellationToken::uncancelable());
+ })
.thenRunOn(*_ars->_subBaton)
.then([this](auto&& hostAndPorts) {
_shardHostAndPort.emplace(hostAndPorts.front());
@@ -183,17 +189,6 @@ auto AsyncRequestsSender::RemoteData::scheduleRequest()
.semi();
}
-SemiFuture<std::vector<HostAndPort>> AsyncRequestsSender::RemoteData::resolveShardIdToHostAndPorts(
- const ReadPreferenceSetting& readPref) {
- const auto shard = getShard();
- if (!shard) {
- return Status(ErrorCodes::ShardNotFound,
- str::stream() << "Could not find shard " << _shardId);
- }
-
- return shard->getTargeter()->findHosts(readPref, CancellationToken::uncancelable());
-}
-
auto AsyncRequestsSender::RemoteData::scheduleRemoteCommand(std::vector<HostAndPort>&& hostAndPorts)
-> SemiFuture<RemoteCommandOnAnyCallbackArgs> {
hangBeforeSchedulingRemoteCommand.executeIf(
@@ -259,43 +254,47 @@ auto AsyncRequestsSender::RemoteData::handleResponse(RemoteCommandOnAnyCallbackA
}
// There was an error with either the response or the command.
- auto shard = getShard();
- if (!shard) {
- uasserted(ErrorCodes::ShardNotFound, str::stream() << "Could not find shard " << _shardId);
- } else {
- std::vector<HostAndPort> failedTargets;
-
- if (rcr.response.target) {
- failedTargets = {*rcr.response.target};
- } else {
- failedTargets = rcr.request.target;
- }
+ return getShard()
+ .thenRunOn(*_ars->_subBaton)
+ .then([this, status = std::move(status), rcr = std::move(rcr)](
+ std::shared_ptr<mongo::Shard>&& shard) {
+ std::vector<HostAndPort> failedTargets;
- shard->updateReplSetMonitor(failedTargets.front(), status);
- bool isStartingTransaction = _cmdObj.getField("startTransaction").booleanSafe();
- if (!_ars->_stopRetrying && shard->isRetriableError(status.code(), _ars->_retryPolicy) &&
- _retryCount < kMaxNumFailedHostRetryAttempts && !isStartingTransaction) {
-
- LOGV2_DEBUG(4615637,
- 1,
- "Command to remote {shardId} for hosts {hosts} failed with retryable error "
- "{error} and will be retried",
- "Command to remote shard failed with retryable error and will be retried",
- "shardId"_attr = _shardId,
- "hosts"_attr = failedTargets,
- "error"_attr = redact(status));
- ++_retryCount;
- _shardHostAndPort.reset();
- // retry through recursion
- return scheduleRequest();
- }
- }
+ if (rcr.response.target) {
+ failedTargets = {*rcr.response.target};
+ } else {
+ failedTargets = rcr.request.target;
+ }
- // Status' in the response.status field that aren't retried get converted to top level errors
- uassertStatusOK(rcr.response.status);
+ shard->updateReplSetMonitor(failedTargets.front(), status);
+ bool isStartingTransaction = _cmdObj.getField("startTransaction").booleanSafe();
+ if (!_ars->_stopRetrying &&
+ shard->isRetriableError(status.code(), _ars->_retryPolicy) &&
+ _retryCount < kMaxNumFailedHostRetryAttempts && !isStartingTransaction) {
+
+ LOGV2_DEBUG(
+ 4615637,
+ 1,
+ "Command to remote {shardId} for hosts {hosts} failed with retryable error "
+ "{error} and will be retried",
+ "Command to remote shard failed with retryable error and will be retried",
+ "shardId"_attr = _shardId,
+ "hosts"_attr = failedTargets,
+ "error"_attr = redact(status));
+ ++_retryCount;
+ _shardHostAndPort.reset();
+ // retry through recursion
+ return scheduleRequest();
+ }
+
+ // Status' in the response.status field that aren't retried get converted to top level
+ // errors
+ uassertStatusOK(rcr.response.status);
- // We're not okay (on the remote), but still not going to retry
- return std::move(rcr);
+ // We're not okay (on the remote), but still not going to retry
+ return Future<RemoteCommandOnAnyCallbackArgs>::makeReady(std::move(rcr)).semi();
+ })
+ .semi();
};
} // namespace mongo