diff options
author | Spencer T Brody <spencer@mongodb.com> | 2016-05-02 16:31:22 -0400 |
---|---|---|
committer | Spencer T Brody <spencer@mongodb.com> | 2016-05-02 18:45:07 -0400 |
commit | 913ad63919332d7f512396286ea114aeb4fd9665 (patch) | |
tree | ea6f9e9dc67ba05bc96b4a5f3542e79bd2095aed /src/mongo/s | |
parent | d809a6221545cc3feaac4c69e5af372b3f82e53a (diff) | |
download | mongo-913ad63919332d7f512396286ea114aeb4fd9665.tar.gz |
SERVER-23211 Remove all callers of ShardRegistry::runCommandOnConfigWithRetries
Diffstat (limited to 'src/mongo/s')
-rw-r--r-- | src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp | 147 | ||||
-rw-r--r-- | src/mongo/s/catalog/replset/catalog_manager_replica_set.h | 2 | ||||
-rw-r--r-- | src/mongo/s/catalog/replset/dist_lock_catalog_impl.cpp | 116 | ||||
-rw-r--r-- | src/mongo/s/client/shard.cpp | 4 | ||||
-rw-r--r-- | src/mongo/s/client/shard.h | 16 | ||||
-rw-r--r-- | src/mongo/s/client/shard_local.cpp | 2 | ||||
-rw-r--r-- | src/mongo/s/client/shard_local.h | 4 | ||||
-rw-r--r-- | src/mongo/s/client/shard_remote.cpp | 2 | ||||
-rw-r--r-- | src/mongo/s/client/shard_remote.h | 4 |
9 files changed, 178 insertions, 119 deletions
diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp index e7e5bc9495d..8db4eeb4f5e 100644 --- a/src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp +++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp @@ -126,6 +126,37 @@ void toBatchError(const Status& status, BatchedCommandResponse* response) { response->setOk(false); } +/** + * Takes the response from running a batch write command and writes the appropriate response into + * *batchResponse, while also returning the Status of the operation. + */ +Status _processBatchWriteResponse(StatusWith<Shard::CommandResponse> response, + BatchedCommandResponse* batchResponse) { + Status status(ErrorCodes::InternalError, "status not set"); + + if (!response.isOK()) { + status = response.getStatus(); + } else if (!response.getValue().commandStatus.isOK()) { + status = response.getValue().commandStatus; + } else if (!response.getValue().writeConcernStatus.isOK()) { + status = response.getValue().writeConcernStatus; + } else { + string errmsg; + if (!batchResponse->parseBSON(response.getValue().response, &errmsg)) { + status = Status(ErrorCodes::FailedToParse, + str::stream() << "Failed to parse config server response: " << errmsg); + } else { + status = batchResponse->toStatus(); + } + } + + if (!status.isOK()) { + toBatchError(status, batchResponse); + } + + return status; +} + } // namespace StatusWith<ShardType> CatalogManagerReplicaSet::_validateHostAsShard( @@ -555,7 +586,7 @@ Status CatalogManagerReplicaSet::logAction(OperationContext* txn, if (_actionLogCollectionCreated.load() == 0) { Status result = _createCappedConfigCollection( txn, kActionLogCollectionName, kActionLogCollectionSizeMB); - if (result.isOK() || result == ErrorCodes::NamespaceExists) { + if (result.isOK()) { _actionLogCollectionCreated.store(1); } else { log() << "couldn't create config.actionlog collection:" << causedBy(result); @@ -573,7 +604,7 @@ Status CatalogManagerReplicaSet::logChange(OperationContext* txn, if (_changeLogCollectionCreated.load() == 0) { Status result = _createCappedConfigCollection( txn, kChangeLogCollectionName, kChangeLogCollectionSizeMB); - if (result.isOK() || result == ErrorCodes::NamespaceExists) { + if (result.isOK()) { _changeLogCollectionCreated.store(1); } else { log() << "couldn't create config.changelog collection:" << causedBy(result); @@ -1426,15 +1457,25 @@ bool CatalogManagerReplicaSet::runUserManagementWriteCommand(OperationContext* t cmdToRun = modifiedCmd.obj(); } - auto response = grid.shardRegistry()->runCommandOnConfigWithRetries( - txn, dbname, cmdToRun, ShardRegistry::kNotMasterErrors); + auto response = Grid::get(txn)->shardRegistry()->getConfigShard()->runCommand( + txn, + ReadPreferenceSetting{ReadPreference::PrimaryOnly}, + dbname, + cmdToRun, + Shard::RetryPolicy::kNotIdempotent); if (!response.isOK()) { return Command::appendCommandStatus(*result, response.getStatus()); } + if (!response.getValue().commandStatus.isOK()) { + return Command::appendCommandStatus(*result, response.getValue().commandStatus); + } + if (!response.getValue().writeConcernStatus.isOK()) { + return Command::appendCommandStatus(*result, response.getValue().writeConcernStatus); + } - result->appendElements(response.getValue()); - return getStatusFromCommandResult(response.getValue()).isOK(); + result->appendElements(response.getValue().response); + return true; } bool CatalogManagerReplicaSet::runReadCommandForTest(OperationContext* txn, @@ -1477,14 +1518,20 @@ Status CatalogManagerReplicaSet::applyChunkOpsDeprecated(OperationContext* txn, BSONObj cmd = BSON("applyOps" << updateOps << "preCondition" << preCondition << kWriteConcernField << kMajorityWriteConcern.toBSON()); - auto response = grid.shardRegistry()->runCommandOnConfigWithRetries( - txn, "config", cmd, ShardRegistry::kAllRetriableErrors); + auto response = Grid::get(txn)->shardRegistry()->getConfigShard()->runCommand( + txn, + ReadPreferenceSetting{ReadPreference::PrimaryOnly}, + "config", + cmd, + Shard::RetryPolicy::kIdempotent); if (!response.isOK()) { return response.getStatus(); } - Status status = getStatusFromCommandResult(response.getValue()); + Status status = response.getValue().commandStatus.isOK() + ? std::move(response.getValue().writeConcernStatus) + : std::move(response.getValue().commandStatus); if (MONGO_FAIL_POINT(failApplyChunkOps)) { status = Status(ErrorCodes::InternalError, "Failpoint 'failApplyChunkOps' generated error"); @@ -1517,7 +1564,7 @@ Status CatalogManagerReplicaSet::applyChunkOpsDeprecated(OperationContext* txn, << "operation metadata: " << causedBy(chunkStatus) << ". applyChunkOpsDeprecated failed to get confirmation " << "of commit. Unable to save chunk ops. Command: " << cmd - << ". Result: " << response.getValue(); + << ". Result: " << response.getValue().response; } else if (!newestChunk.empty()) { invariant(newestChunk.size() == 1); log() << "chunk operation commit confirmed"; @@ -1526,7 +1573,7 @@ Status CatalogManagerReplicaSet::applyChunkOpsDeprecated(OperationContext* txn, errMsg = str::stream() << "chunk operation commit failed: version " << lastChunkVersion.toString() << " doesn't exist in namespace" << nss << ". Unable to save chunk ops. Command: " << cmd - << ". Result: " << response.getValue(); + << ". Result: " << response.getValue().response; } return Status(status.code(), errMsg); } @@ -1551,14 +1598,13 @@ void CatalogManagerReplicaSet::writeConfigServerDirect(OperationContext* txn, return; } - _runBatchWriteCommand(txn, batchRequest, batchResponse, ShardRegistry::kNotMasterErrors); + _runBatchWriteCommand(txn, batchRequest, batchResponse, Shard::RetryPolicy::kNotIdempotent); } -void CatalogManagerReplicaSet::_runBatchWriteCommand( - OperationContext* txn, - const BatchedCommandRequest& batchRequest, - BatchedCommandResponse* batchResponse, - const ShardRegistry::ErrorCodesSet& errorsToCheck) { +void CatalogManagerReplicaSet::_runBatchWriteCommand(OperationContext* txn, + const BatchedCommandRequest& batchRequest, + BatchedCommandResponse* batchResponse, + Shard::RetryPolicy retryPolicy) { const std::string dbname = batchRequest.getNS().db().toString(); invariant(dbname == "config" || dbname == "admin"); @@ -1567,30 +1613,19 @@ void CatalogManagerReplicaSet::_runBatchWriteCommand( const BSONObj cmdObj = batchRequest.toBSON(); for (int retry = 1; retry <= kMaxWriteRetry; ++retry) { - // runCommandOnConfigWithRetries already does its own retries based on the generic command - // errors. If this fails, we know that it has done all the retries that it could do so there - // is no need to retry anymore. - auto response = - grid.shardRegistry()->runCommandOnConfigWithRetries(txn, dbname, cmdObj, errorsToCheck); - if (!response.isOK()) { - toBatchError(response.getStatus(), batchResponse); - return; - } - - string errmsg; - if (!batchResponse->parseBSON(response.getValue(), &errmsg)) { - toBatchError( - Status(ErrorCodes::FailedToParse, - str::stream() << "Failed to parse config server response: " << errmsg), - batchResponse); - return; - } - - // If one of the write operations failed (which is reported in the error details), see if - // this is a retriable error as well. - Status status = batchResponse->toStatus(); - if (errorsToCheck.count(status.code()) && retry < kMaxWriteRetry) { - LOG(1) << "Command failed with retriable error and will be retried" << causedBy(status); + auto configShard = Grid::get(txn)->shardRegistry()->getConfigShard(); + auto response = configShard->runCommand( + txn, + ReadPreferenceSetting{ReadPreference::PrimaryOnly}, + dbname, + cmdObj, + Shard::RetryPolicy::kNoRetry); // We're handling our own retries here. + + Status status = _processBatchWriteResponse(response, batchResponse); + if (retry < kMaxWriteRetry && configShard->isRetriableError(status.code(), retryPolicy)) { + batchResponse->clear(); + LOG(1) << "Batch write command failed with retriable error and will be retried" + << causedBy(status); continue; } @@ -1618,7 +1653,7 @@ Status CatalogManagerReplicaSet::insertConfigDocument(OperationContext* txn, for (int retry = 1; retry <= kMaxWriteRetry; retry++) { BatchedCommandResponse response; - _runBatchWriteCommand(txn, request, &response, ShardRegistry::kNotMasterErrors); + _runBatchWriteCommand(txn, request, &response, Shard::RetryPolicy::kNotIdempotent); Status status = response.toStatus(); @@ -1694,7 +1729,7 @@ StatusWith<bool> CatalogManagerReplicaSet::updateConfigDocument(OperationContext request.setWriteConcern(kMajorityWriteConcern.toBSON()); BatchedCommandResponse response; - _runBatchWriteCommand(txn, request, &response, ShardRegistry::kAllRetriableErrors); + _runBatchWriteCommand(txn, request, &response, Shard::RetryPolicy::kIdempotent); Status status = response.toStatus(); if (!status.isOK()) { @@ -1724,7 +1759,7 @@ Status CatalogManagerReplicaSet::removeConfigDocuments(OperationContext* txn, request.setWriteConcern(kMajorityWriteConcern.toBSON()); BatchedCommandResponse response; - _runBatchWriteCommand(txn, request, &response, ShardRegistry::kAllRetriableErrors); + _runBatchWriteCommand(txn, request, &response, Shard::RetryPolicy::kIdempotent); return response.toStatus(); } @@ -1814,13 +1849,31 @@ Status CatalogManagerReplicaSet::_createCappedConfigCollection(OperationContext* StringData collName, int cappedSize) { BSONObj createCmd = BSON("create" << collName << "capped" << true << "size" << cappedSize); - auto result = grid.shardRegistry()->runCommandOnConfigWithRetries( - txn, "config", createCmd, ShardRegistry::kAllRetriableErrors); + + auto result = Grid::get(txn)->shardRegistry()->getConfigShard()->runCommand( + txn, + ReadPreferenceSetting{ReadPreference::PrimaryOnly}, + "config", + createCmd, + Shard::RetryPolicy::kIdempotent); + if (!result.isOK()) { return result.getStatus(); } - return getStatusFromCommandResult(result.getValue()); + if (!result.getValue().commandStatus.isOK()) { + if (result.getValue().commandStatus == ErrorCodes::NamespaceExists) { + if (result.getValue().writeConcernStatus.isOK()) { + return Status::OK(); + } else { + return result.getValue().writeConcernStatus; + } + } else { + return result.getValue().commandStatus; + } + } + + return result.getValue().writeConcernStatus; } StatusWith<long long> CatalogManagerReplicaSet::_runCountCommandOnConfig(OperationContext* txn, diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set.h b/src/mongo/s/catalog/replset/catalog_manager_replica_set.h index ccabbb958c9..db0e7d686bd 100644 --- a/src/mongo/s/catalog/replset/catalog_manager_replica_set.h +++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set.h @@ -259,7 +259,7 @@ private: void _runBatchWriteCommand(OperationContext* txn, const BatchedCommandRequest& request, BatchedCommandResponse* response, - const ShardRegistry::ErrorCodesSet& errorsToCheck); + Shard::RetryPolicy retryPolicy); /** * Helper method for running a count command against the config server with appropriate diff --git a/src/mongo/s/catalog/replset/dist_lock_catalog_impl.cpp b/src/mongo/s/catalog/replset/dist_lock_catalog_impl.cpp index fd8b4037100..f986d760678 100644 --- a/src/mongo/s/catalog/replset/dist_lock_catalog_impl.cpp +++ b/src/mongo/s/catalog/replset/dist_lock_catalog_impl.cpp @@ -75,13 +75,19 @@ const WriteConcernOptions kMajorityWriteConcern(WriteConcernOptions::kMajority, * the findAndModify command did not modify any document. * This also checks for errors in the response object. */ -StatusWith<BSONObj> extractFindAndModifyNewObj(const BSONObj& responseObj) { - auto cmdStatus = getStatusFromCommandResult(responseObj); - - if (!cmdStatus.isOK()) { - return cmdStatus; +StatusWith<BSONObj> extractFindAndModifyNewObj(StatusWith<Shard::CommandResponse> response) { + if (!response.isOK()) { + return response.getStatus(); + } + if (!response.getValue().commandStatus.isOK()) { + return response.getValue().commandStatus; + } + if (!response.getValue().writeConcernStatus.isOK()) { + return response.getValue().writeConcernStatus; } + auto responseObj = std::move(response.getValue().response); + if (const auto& newDocElem = responseObj[kFindAndModifyResponseResultDocField]) { if (newDocElem.isNull()) { return {ErrorCodes::LockStateChangeFailed, @@ -95,7 +101,7 @@ StatusWith<BSONObj> extractFindAndModifyNewObj(const BSONObj& responseObj) { << "'field, got: " << newDocElem}; } - return newDocElem.Obj(); + return newDocElem.Obj().getOwned(); } return {ErrorCodes::UnsupportedFormat, @@ -167,15 +173,14 @@ Status DistLockCatalogImpl::ping(OperationContext* txn, StringData processID, Da request.setUpsert(true); request.setWriteConcern(kMajorityWriteConcern); - auto resultStatus = _client->runCommandOnConfigWithRetries( - txn, _locksNS.db().toString(), request.toBSON(), ShardRegistry::kNotMasterErrors); + auto resultStatus = + _client->getConfigShard()->runCommand(txn, + ReadPreferenceSetting{ReadPreference::PrimaryOnly}, + _locksNS.db().toString(), + request.toBSON(), + Shard::RetryPolicy::kNotIdempotent); - if (!resultStatus.isOK()) { - return resultStatus.getStatus(); - } - - BSONObj responseObj(resultStatus.getValue()); - auto findAndModifyStatus = extractFindAndModifyNewObj(responseObj); + auto findAndModifyStatus = extractFindAndModifyNewObj(std::move(resultStatus)); return findAndModifyStatus.getStatus(); } @@ -199,16 +204,14 @@ StatusWith<LocksType> DistLockCatalogImpl::grabLock(OperationContext* txn, request.setShouldReturnNew(true); request.setWriteConcern(kMajorityWriteConcern); - auto resultStatus = _client->runCommandOnConfigWithRetries( - txn, _locksNS.db().toString(), request.toBSON(), ShardRegistry::kNotMasterErrors); - - if (!resultStatus.isOK()) { - return resultStatus.getStatus(); - } - - BSONObj responseObj(resultStatus.getValue()); + auto resultStatus = + _client->getConfigShard()->runCommand(txn, + ReadPreferenceSetting{ReadPreference::PrimaryOnly}, + _locksNS.db().toString(), + request.toBSON(), + Shard::RetryPolicy::kNotIdempotent); - auto findAndModifyStatus = extractFindAndModifyNewObj(responseObj); + auto findAndModifyStatus = extractFindAndModifyNewObj(std::move(resultStatus)); if (!findAndModifyStatus.isOK()) { if (findAndModifyStatus == ErrorCodes::DuplicateKey) { // Another thread won the upsert race. Also see SERVER-14322. @@ -253,16 +256,14 @@ StatusWith<LocksType> DistLockCatalogImpl::overtakeLock(OperationContext* txn, request.setShouldReturnNew(true); request.setWriteConcern(kMajorityWriteConcern); - auto resultStatus = _client->runCommandOnConfigWithRetries( - txn, _locksNS.db().toString(), request.toBSON(), ShardRegistry::kNotMasterErrors); - - if (!resultStatus.isOK()) { - return resultStatus.getStatus(); - } - - BSONObj responseObj(resultStatus.getValue()); + auto resultStatus = + _client->getConfigShard()->runCommand(txn, + ReadPreferenceSetting{ReadPreference::PrimaryOnly}, + _locksNS.db().toString(), + request.toBSON(), + Shard::RetryPolicy::kNotIdempotent); - auto findAndModifyStatus = extractFindAndModifyNewObj(responseObj); + auto findAndModifyStatus = extractFindAndModifyNewObj(std::move(resultStatus)); if (!findAndModifyStatus.isOK()) { return findAndModifyStatus.getStatus(); } @@ -285,17 +286,14 @@ Status DistLockCatalogImpl::unlock(OperationContext* txn, const OID& lockSession BSON("$set" << BSON(LocksType::state(LocksType::UNLOCKED)))); request.setWriteConcern(kMajorityWriteConcern); - auto resultStatus = _client->runCommandOnConfigWithRetries( - txn, _locksNS.db().toString(), request.toBSON(), ShardRegistry::kAllRetriableErrors); - - if (!resultStatus.isOK()) { - return resultStatus.getStatus(); - } - - BSONObj responseObj(resultStatus.getValue()); - - auto findAndModifyStatus = extractFindAndModifyNewObj(responseObj).getStatus(); + auto resultStatus = + _client->getConfigShard()->runCommand(txn, + ReadPreferenceSetting{ReadPreference::PrimaryOnly}, + _locksNS.db().toString(), + request.toBSON(), + Shard::RetryPolicy::kIdempotent); + auto findAndModifyStatus = extractFindAndModifyNewObj(std::move(resultStatus)); if (findAndModifyStatus == ErrorCodes::LockStateChangeFailed) { // Did not modify any document, which implies that the lock already has a // a different owner. This is ok since it means that the objective of @@ -303,7 +301,7 @@ Status DistLockCatalogImpl::unlock(OperationContext* txn, const OID& lockSession return Status::OK(); } - return findAndModifyStatus; + return findAndModifyStatus.getStatus(); } Status DistLockCatalogImpl::unlockAll(OperationContext* txn, const std::string& processID) { @@ -322,16 +320,26 @@ Status DistLockCatalogImpl::unlockAll(OperationContext* txn, const std::string& BSONObj cmdObj = request.toBSON(); - auto response = _client->runCommandOnConfigWithRetries( - txn, "config", cmdObj, ShardRegistry::kAllRetriableErrors); + auto response = + _client->getConfigShard()->runCommand(txn, + ReadPreferenceSetting{ReadPreference::PrimaryOnly}, + _locksNS.db().toString(), + cmdObj, + Shard::RetryPolicy::kIdempotent); if (!response.isOK()) { return response.getStatus(); } + if (!response.getValue().commandStatus.isOK()) { + return response.getValue().commandStatus; + } + if (!response.getValue().writeConcernStatus.isOK()) { + return response.getValue().writeConcernStatus; + } BatchedCommandResponse batchResponse; std::string errmsg; - if (!batchResponse.parseBSON(response.getValue(), &errmsg)) { + if (!batchResponse.parseBSON(response.getValue().response, &errmsg)) { return Status(ErrorCodes::FailedToParse, str::stream() << "Failed to parse config server response to batch request for " @@ -428,16 +436,14 @@ Status DistLockCatalogImpl::stopPing(OperationContext* txn, StringData processId FindAndModifyRequest::makeRemove(_lockPingNS, BSON(LockpingsType::process() << processId)); request.setWriteConcern(kMajorityWriteConcern); - auto resultStatus = _client->runCommandOnConfigWithRetries( - txn, _locksNS.db().toString(), request.toBSON(), ShardRegistry::kNotMasterErrors); - - if (!resultStatus.isOK()) { - return resultStatus.getStatus(); - } - - BSONObj responseObj(resultStatus.getValue()); + auto resultStatus = + _client->getConfigShard()->runCommand(txn, + ReadPreferenceSetting{ReadPreference::PrimaryOnly}, + _locksNS.db().toString(), + request.toBSON(), + Shard::RetryPolicy::kNotIdempotent); - auto findAndModifyStatus = extractFindAndModifyNewObj(responseObj); + auto findAndModifyStatus = extractFindAndModifyNewObj(std::move(resultStatus)); return findAndModifyStatus.getStatus(); } diff --git a/src/mongo/s/client/shard.cpp b/src/mongo/s/client/shard.cpp index 036405581ca..98c7cdae768 100644 --- a/src/mongo/s/client/shard.cpp +++ b/src/mongo/s/client/shard.cpp @@ -80,7 +80,7 @@ StatusWith<Shard::CommandResponse> Shard::runCommand(OperationContext* txn, auto swCmdResponse = _runCommand(txn, readPref, dbName, cmdObj); auto commandStatus = _getEffectiveCommandStatus(swCmdResponse); - if (retry < kOnErrorNumRetries && _isRetriableError(commandStatus.code(), retryPolicy)) { + if (retry < kOnErrorNumRetries && isRetriableError(commandStatus.code(), retryPolicy)) { LOG(3) << "Command " << cmdObj << " failed with retriable error and will be retried" << causedBy(commandStatus); continue; @@ -102,7 +102,7 @@ StatusWith<Shard::QueryResponse> Shard::exhaustiveFindOnConfig( auto result = _exhaustiveFindOnConfig(txn, readPref, nss, query, sort, limit); if (retry < kOnErrorNumRetries && - _isRetriableError(result.getStatus().code(), RetryPolicy::kIdempotent)) { + isRetriableError(result.getStatus().code(), RetryPolicy::kIdempotent)) { continue; } diff --git a/src/mongo/s/client/shard.h b/src/mongo/s/client/shard.h index 68ff1ba7f5f..f343e750b37 100644 --- a/src/mongo/s/client/shard.h +++ b/src/mongo/s/client/shard.h @@ -119,6 +119,14 @@ public: virtual std::string toString() const = 0; /** + * Returns whether a server operation which failed with the given error code should be retried + * (i.e. is safe to retry and has the potential to succeed next time). The 'options' argument + * describes whether the operation that generated the given code was idempotent, which affects + * which codes are safe to retry on. + */ + virtual bool isRetriableError(ErrorCodes::Error code, RetryPolicy options) = 0; + + /** * Runs a command against this shard and returns the BSON command response, as well as the * already-parsed out Status of the command response and write concern error (if present). * Retries failed operations according to the given "retryPolicy". @@ -145,14 +153,6 @@ protected: Shard(const ShardId& id); private: - /** - * Returns whether a server operation which failed with the given error code should be retried - * (i.e. is safe to retry and has the potential to succeed next time). The 'options' argument - * describes whether the operation that generated the given code was idempotent, which affects - * which codes are safe to retry on. - */ - virtual bool _isRetriableError(ErrorCodes::Error code, RetryPolicy options) = 0; - virtual StatusWith<CommandResponse> _runCommand(OperationContext* txn, const ReadPreferenceSetting& readPref, const std::string& dbname, diff --git a/src/mongo/s/client/shard_local.cpp b/src/mongo/s/client/shard_local.cpp index b4ec88e636a..3249d2d88c1 100644 --- a/src/mongo/s/client/shard_local.cpp +++ b/src/mongo/s/client/shard_local.cpp @@ -71,7 +71,7 @@ std::string ShardLocal::toString() const { return getId() + ":<local>"; } -bool ShardLocal::_isRetriableError(ErrorCodes::Error code, RetryPolicy options) { +bool ShardLocal::isRetriableError(ErrorCodes::Error code, RetryPolicy options) { if (options == RetryPolicy::kNoRetry) { return false; } diff --git a/src/mongo/s/client/shard_local.h b/src/mongo/s/client/shard_local.h index a5e990a7927..5d1455a72cf 100644 --- a/src/mongo/s/client/shard_local.h +++ b/src/mongo/s/client/shard_local.h @@ -54,9 +54,9 @@ public: std::string toString() const override; -private: - bool _isRetriableError(ErrorCodes::Error code, RetryPolicy options) final; + bool isRetriableError(ErrorCodes::Error code, RetryPolicy options) final; +private: StatusWith<Shard::CommandResponse> _runCommand(OperationContext* txn, const ReadPreferenceSetting& unused, const std::string& dbName, diff --git a/src/mongo/s/client/shard_remote.cpp b/src/mongo/s/client/shard_remote.cpp index cccc6daf38f..4af861d8e16 100644 --- a/src/mongo/s/client/shard_remote.cpp +++ b/src/mongo/s/client/shard_remote.cpp @@ -167,7 +167,7 @@ ShardRemote::ShardRemote(const ShardId& id, ShardRemote::~ShardRemote() = default; -bool ShardRemote::_isRetriableError(ErrorCodes::Error code, RetryPolicy options) { +bool ShardRemote::isRetriableError(ErrorCodes::Error code, RetryPolicy options) { if (options == RetryPolicy::kNoRetry) { return false; } diff --git a/src/mongo/s/client/shard_remote.h b/src/mongo/s/client/shard_remote.h index a2ec30a9e9d..1ef844d9a5b 100644 --- a/src/mongo/s/client/shard_remote.h +++ b/src/mongo/s/client/shard_remote.h @@ -68,9 +68,9 @@ public: std::string toString() const override; -private: - bool _isRetriableError(ErrorCodes::Error code, RetryPolicy options) final; + bool isRetriableError(ErrorCodes::Error code, RetryPolicy options) final; +private: /** * Returns the metadata that should be used when running commands against this shard with * the given read preference. |