diff options
22 files changed, 207 insertions, 242 deletions
diff --git a/src/mongo/client/connpool.h b/src/mongo/client/connpool.h index 509965971ff..586f1958319 100644 --- a/src/mongo/client/connpool.h +++ b/src/mongo/client/connpool.h @@ -51,8 +51,6 @@ struct ConnectionPoolStats; * thread safety is handled by DBConnectionPool */ class PoolForHost { - MONGO_DISALLOW_COPYING(PoolForHost); - public: // Sentinel value indicating pool has no cleanup limit static const int kPoolSizeUnlimited; @@ -64,6 +62,16 @@ public: _maxPoolSize(kPoolSizeUnlimited), _checkedOut(0) {} + PoolForHost(const PoolForHost& other) + : _created(other._created), + _minValidCreationTimeMicroSec(other._minValidCreationTimeMicroSec), + _type(other._type), + _maxPoolSize(other._maxPoolSize), + _checkedOut(other._checkedOut) { + verify(_created == 0); + verify(other._pool.size() == 0); + } + ~PoolForHost(); /** diff --git a/src/mongo/client/dbclient.cpp b/src/mongo/client/dbclient.cpp index a2aff7de7b1..d8199e3d315 100644 --- a/src/mongo/client/dbclient.cpp +++ b/src/mongo/client/dbclient.cpp @@ -1432,13 +1432,12 @@ void DBClientConnection::handleNotMasterResponse(const BSONElement& elemToCheck) return; } + MONGO_LOG_COMPONENT(1, logger::LogComponent::kReplication) + << "got not master from: " << _serverAddress << " of repl set: " << _parentReplSetName; + ReplicaSetMonitorPtr monitor = ReplicaSetMonitor::get(_parentReplSetName); if (monitor) { - monitor->failedHost(_serverAddress, - {ErrorCodes::NotMaster, - str::stream() << "got not master from: " << _serverAddress - << " of repl set: " - << _parentReplSetName}); + monitor->failedHost(_serverAddress); } _failed = true; diff --git a/src/mongo/client/dbclient_rs.cpp b/src/mongo/client/dbclient_rs.cpp index 6261fa093c3..bd8ec5863c9 100644 --- a/src/mongo/client/dbclient_rs.cpp +++ b/src/mongo/client/dbclient_rs.cpp @@ -80,11 +80,6 @@ public: } _populateReadPrefSecOkCmdList; /** - * Maximum number of retries to make for auto-retry logic when performing a slave ok operation. - */ -const size_t MAX_RETRY = 3; - -/** * Extracts the read preference settings from the query document. Note that this method * assumes that the query is ok for secondaries so it defaults to * ReadPreference::SecondaryPreferred when nothing is specified. Supports the following @@ -129,13 +124,13 @@ ReadPreferenceSetting* _extractReadPref(const BSONObj& query, int queryOptions) : mongo::ReadPreference::PrimaryOnly; return new ReadPreferenceSetting(pref, TagSet()); } - } // namespace // -------------------------------- // ----- DBClientReplicaSet --------- // -------------------------------- +const size_t DBClientReplicaSet::MAX_RETRY = 3; bool DBClientReplicaSet::_authPooledSecondaryConn = true; DBClientReplicaSet::DBClientReplicaSet(const string& name, @@ -298,8 +293,7 @@ DBClientConnection* DBClientReplicaSet::checkMaster() { if (!_master->isFailed()) return _master.get(); - monitor->failedHost( - _masterHost, {ErrorCodes::fromInt(40332), "Last known master host cannot be reached"}); + monitor->failedHost(_masterHost); h = monitor->getMasterOrUassert(); // old master failed, try again. } @@ -321,11 +315,13 @@ DBClientConnection* DBClientReplicaSet::checkMaster() { } if (newConn == NULL || !errmsg.empty()) { - const std::string message = str::stream() << "can't connect to new replica set master [" - << _masterHost.toString() << "]" - << (errmsg.empty() ? "" : ", err: ") << errmsg; - monitor->failedHost(_masterHost, {ErrorCodes::fromInt(40333), message}); - uasserted(ErrorCodes::FailedToSatisfyReadPreference, message); + monitor->failedHost(_masterHost); + uasserted(ErrorCodes::FailedToSatisfyReadPreference, + str::stream() << "can't connect to new replica set master [" + << _masterHost.toString() + << "]" + << (errmsg.empty() ? "" : ", err: ") + << errmsg); } resetMaster(); @@ -353,8 +349,7 @@ bool DBClientReplicaSet::checkLastHost(const ReadPreferenceSetting* readPref) { // Make sure we don't think the host is down. if (_lastSlaveOkConn->isFailed() || !_getMonitor()->isHostUp(_lastSlaveOkHost)) { - _invalidateLastSlaveOkCache( - {ErrorCodes::fromInt(40334), "Last slave connection is no longer available"}); + invalidateLastSlaveOkCache(); return false; } @@ -380,7 +375,7 @@ void DBClientReplicaSet::logoutAll(DBClientConnection* conn) { conn->logout(i->first, response); } catch (const UserException& ex) { warning() << "Failed to logout: " << conn->getServerAddress() << " on db: " << i->first - << causedBy(redact(ex)); + << endl; } } } @@ -453,13 +448,13 @@ void DBClientReplicaSet::_auth(const BSONObj& params) { throw; } - const Status status = ex.toStatus(); - lastNodeStatus = {status.code(), - str::stream() << "can't authenticate against replica set node " - << _lastSlaveOkHost - << ": " - << status.reason()}; - _invalidateLastSlaveOkCache(lastNodeStatus); + StringBuilder errMsgB; + errMsgB << "can't authenticate against replica set node " + << _lastSlaveOkHost.toString(); + lastNodeStatus = ex.toStatus(errMsgB.str()); + + LOG(1) << lastNodeStatus.reason() << endl; + invalidateLastSlaveOkCache(); } } @@ -542,12 +537,14 @@ unique_ptr<DBClientCursor> DBClientReplicaSet::query(const string& ns, ns, query, nToReturn, nToSkip, fieldsToReturn, queryOptions, batchSize); return checkSlaveQueryResult(std::move(cursor)); - } catch (const DBException& ex) { - const Status status = ex.toStatus(); - _invalidateLastSlaveOkCache( - {status.code(), - str::stream() << "can't query replica set node " << _lastSlaveOkHost << ": " - << status.reason()}); + } catch (const DBException& dbExcep) { + StringBuilder errMsgBuilder; + errMsgBuilder << "can't query replica set node " << _lastSlaveOkHost.toString() + << ": " << causedBy(redact(dbExcep)); + lastNodeErrMsg = errMsgBuilder.str(); + + LOG(1) << lastNodeErrMsg << endl; + invalidateLastSlaveOkCache(); } } @@ -592,13 +589,14 @@ BSONObj DBClientReplicaSet::findOne(const string& ns, } return conn->findOne(ns, query, fieldsToReturn, queryOptions); - } catch (const DBException& ex) { - const Status status = ex.toStatus(); - _invalidateLastSlaveOkCache({status.code(), - str::stream() << "can't findone replica set node " - << _lastSlaveOkHost.toString() - << ": " - << status.reason()}); + } catch (const DBException& dbExcep) { + StringBuilder errMsgBuilder; + errMsgBuilder << "can't findone replica set node " << _lastSlaveOkHost.toString() + << ": " << causedBy(redact(dbExcep)); + lastNodeErrMsg = errMsgBuilder.str(); + + LOG(1) << lastNodeErrMsg << endl; + invalidateLastSlaveOkCache(); } } @@ -625,11 +623,10 @@ void DBClientReplicaSet::killCursor(long long cursorID) { } void DBClientReplicaSet::isntMaster() { - // Can't use _getMonitor because that will create a new monitor from the cached seed if the - // monitor doesn't exist. - _rsm->failedHost( - _masterHost, - {ErrorCodes::NotMaster, str::stream() << "got not master for: " << _masterHost}); + log() << "got not master for: " << _masterHost << endl; + // Can't use _getMonitor because that will create a new monitor from the cached seed if + // the monitor doesn't exist. + _rsm->failedHost(_masterHost); resetMaster(); } @@ -659,11 +656,9 @@ unique_ptr<DBClientCursor> DBClientReplicaSet::checkSlaveQueryResult( } void DBClientReplicaSet::isntSecondary() { + log() << "slave no longer has secondary status: " << _lastSlaveOkHost << endl; // Failover to next slave - _getMonitor()->failedHost( - _lastSlaveOkHost, - {ErrorCodes::NotMasterOrSecondary, - str::stream() << "slave no longer has secondary status: " << _lastSlaveOkHost}); + _getMonitor()->failedHost(_lastSlaveOkHost); resetSlaveOkConn(); } @@ -779,13 +774,15 @@ void DBClientReplicaSet::say(Message& toSend, bool isRetry, string* actualServer _lazyState._lastOp = lastOp; _lazyState._secondaryQueryOk = true; _lazyState._lastClient = conn; - } catch (const DBException& e) { - const Status status = e.toStatus(); - _invalidateLastSlaveOkCache({status.code(), - str::stream() << "can't callLazy replica set node " - << _lastSlaveOkHost.toString() - << ": " - << status.reason()}); + } catch (const DBException& DBExcep) { + StringBuilder errMsgBuilder; + errMsgBuilder << "can't callLazy replica set node " + << _lastSlaveOkHost.toString() << ": " + << causedBy(redact(DBExcep)); + lastNodeErrMsg = errMsgBuilder.str(); + + LOG(1) << lastNodeErrMsg << endl; + invalidateLastSlaveOkCache(); continue; } @@ -955,10 +952,10 @@ DBClientReplicaSet::runCommandWithMetadataAndTarget(StringData database, return std::make_tuple( conn->runCommandWithMetadata(database, command, metadata, commandArgs), conn); } catch (const DBException& ex) { - _invalidateLastSlaveOkCache(ex.toStatus()); + log() << exceptionToStatus(); + invalidateLastSlaveOkCache(); } } - uasserted(ErrorCodes::NodeNotFound, str::stream() << "Could not satisfy $readPreference of '" << readPref.toBSON() << "' " << "while attempting to run command " @@ -1001,15 +998,14 @@ bool DBClientReplicaSet::call(Message& toSend, } return conn->call(toSend, response, assertOk, nullptr); - } catch (const DBException& ex) { + } catch (const DBException& dbExcep) { + LOG(1) << "can't call replica set node " << _lastSlaveOkHost << ": " + << causedBy(redact(dbExcep)); + if (actualServer) *actualServer = ""; - const Status status = ex.toStatus(); - _invalidateLastSlaveOkCache( - {status.code(), - str::stream() << "can't call replica set node " << _lastSlaveOkHost << ": " - << status.reason()}); + invalidateLastSlaveOkCache(); } } @@ -1044,11 +1040,12 @@ bool DBClientReplicaSet::call(Message& toSend, return true; } -void DBClientReplicaSet::_invalidateLastSlaveOkCache(const Status& status) { - // This is not wrapped in with if (_lastSlaveOkConn && _lastSlaveOkConn->isFailed()) because - // there are certain exceptions that will not make the connection be labeled as failed. For - // example, asserts 13079, 13080, 16386 - _getMonitor()->failedHost(_lastSlaveOkHost, status); +void DBClientReplicaSet::invalidateLastSlaveOkCache() { + /* This is not wrapped in with if (_lastSlaveOkConn && _lastSlaveOkConn->isFailed()) + * because there are certain exceptions that will not make the connection be labeled + * as failed. For example, asserts 13079, 13080, 16386 + */ + _getMonitor()->failedHost(_lastSlaveOkHost); resetSlaveOkConn(); } diff --git a/src/mongo/client/dbclient_rs.h b/src/mongo/client/dbclient_rs.h index 02e9db27f65..17f25904c25 100644 --- a/src/mongo/client/dbclient_rs.h +++ b/src/mongo/client/dbclient_rs.h @@ -269,10 +269,9 @@ private: bool checkLastHost(const ReadPreferenceSetting* readPref); /** - * Destroys all cached information about the last slaveOk operation and reports the host as - * failed in the replica set monitor with the specified 'status'. + * Destroys all cached information about the last slaveOk operation. */ - void _invalidateLastSlaveOkCache(const Status& status); + void invalidateLastSlaveOkCache(); void _authConnection(DBClientConnection* conn); @@ -292,6 +291,12 @@ private: */ void resetSlaveOkConn(); + /** + * Maximum number of retries to make for auto-retry logic when performing a slave ok + * operation. + */ + static const size_t MAX_RETRY; + // TODO: remove this when processes other than mongos uses the driver version. static bool _authPooledSecondaryConn; diff --git a/src/mongo/client/dbclient_rs_test.cpp b/src/mongo/client/dbclient_rs_test.cpp index b900070e755..e101695cdc8 100644 --- a/src/mongo/client/dbclient_rs_test.cpp +++ b/src/mongo/client/dbclient_rs_test.cpp @@ -691,8 +691,7 @@ TEST_F(TaggedFiveMemberRS, ConnShouldNotPinIfHostMarkedAsFailed) { // This is the only difference from ConnShouldPinIfSameSettings which tests that we *do* pin // in if the host is still marked as up. Note that this only notifies the RSM, and does not // directly effect the DBClientRS. - ReplicaSetMonitor::get(replSet->getSetName()) - ->failedHost(HostAndPort(dest), {ErrorCodes::InternalError, "Test error"}); + ReplicaSetMonitor::get(replSet->getSetName())->failedHost(HostAndPort(dest)); { Query query; diff --git a/src/mongo/client/remote_command_targeter.h b/src/mongo/client/remote_command_targeter.h index 98c6fa0c72c..ca751be0dec 100644 --- a/src/mongo/client/remote_command_targeter.h +++ b/src/mongo/client/remote_command_targeter.h @@ -93,18 +93,18 @@ public: } /** - * Reports to the targeter that a 'status' indicating a not master error was received when - * communicating with 'host', and so it should update its bookkeeping to avoid giving out the - * host again on a subsequent request for the primary. + * Reports to the targeter that a NotMaster response was received when communicating with + * "host', and so it should update its bookkeeping to avoid giving out the host again on a + * subsequent request for the primary. */ - virtual void markHostNotMaster(const HostAndPort& host, const Status& status) = 0; + virtual void markHostNotMaster(const HostAndPort& host) = 0; /** - * Reports to the targeter that a 'status' indicating a network error was received when trying - * to communicate with 'host', and so it should update its bookkeeping to avoid giving out the - * host again on a subsequent request for the primary. + * Similar to markHostNotMaster(). Reports to the targeter that a HostUnreachable response was + * received when communicating with "host". The targeter should update its bookkeeping to avoid + * giving out the same host on a subsequent request. */ - virtual void markHostUnreachable(const HostAndPort& host, const Status& status) = 0; + virtual void markHostUnreachable(const HostAndPort& host) = 0; protected: RemoteCommandTargeter() = default; diff --git a/src/mongo/client/remote_command_targeter_factory_mock.cpp b/src/mongo/client/remote_command_targeter_factory_mock.cpp index 41cf0949f61..3d30edf7a32 100644 --- a/src/mongo/client/remote_command_targeter_factory_mock.cpp +++ b/src/mongo/client/remote_command_targeter_factory_mock.cpp @@ -57,12 +57,12 @@ public: return _mock->findHostWithMaxWait(readPref, maxWait); } - void markHostNotMaster(const HostAndPort& host, const Status& status) override { - _mock->markHostNotMaster(host, status); + void markHostNotMaster(const HostAndPort& host) override { + _mock->markHostNotMaster(host); } - void markHostUnreachable(const HostAndPort& host, const Status& status) override { - _mock->markHostUnreachable(host, status); + void markHostUnreachable(const HostAndPort& host) override { + _mock->markHostUnreachable(host); } private: diff --git a/src/mongo/client/remote_command_targeter_mock.cpp b/src/mongo/client/remote_command_targeter_mock.cpp index b216a717a29..617efb699fc 100644 --- a/src/mongo/client/remote_command_targeter_mock.cpp +++ b/src/mongo/client/remote_command_targeter_mock.cpp @@ -63,10 +63,9 @@ StatusWith<HostAndPort> RemoteCommandTargeterMock::findHostWithMaxWait( return _findHostReturnValue; } -void RemoteCommandTargeterMock::markHostNotMaster(const HostAndPort& host, const Status& status) {} +void RemoteCommandTargeterMock::markHostNotMaster(const HostAndPort& host) {} -void RemoteCommandTargeterMock::markHostUnreachable(const HostAndPort& host, const Status& status) { -} +void RemoteCommandTargeterMock::markHostUnreachable(const HostAndPort& host) {} void RemoteCommandTargeterMock::setConnectionStringReturnValue(const ConnectionString returnValue) { _connectionStringReturnValue = std::move(returnValue); diff --git a/src/mongo/client/remote_command_targeter_mock.h b/src/mongo/client/remote_command_targeter_mock.h index 23dbb92a6ba..6183a635f61 100644 --- a/src/mongo/client/remote_command_targeter_mock.h +++ b/src/mongo/client/remote_command_targeter_mock.h @@ -63,12 +63,12 @@ public: /** * No-op for the mock. */ - void markHostNotMaster(const HostAndPort& host, const Status& status) override; + void markHostNotMaster(const HostAndPort& host) override; /** * No-op for the mock. */ - void markHostUnreachable(const HostAndPort& host, const Status& status) override; + void markHostUnreachable(const HostAndPort& host) override; /** * Sets the return value for the next call to connectionString. diff --git a/src/mongo/client/remote_command_targeter_rs.cpp b/src/mongo/client/remote_command_targeter_rs.cpp index 59ae0d9cdbf..b389df23f0e 100644 --- a/src/mongo/client/remote_command_targeter_rs.cpp +++ b/src/mongo/client/remote_command_targeter_rs.cpp @@ -88,16 +88,16 @@ StatusWith<HostAndPort> RemoteCommandTargeterRS::findHost(OperationContext* txn, } } -void RemoteCommandTargeterRS::markHostNotMaster(const HostAndPort& host, const Status& status) { +void RemoteCommandTargeterRS::markHostNotMaster(const HostAndPort& host) { invariant(_rsMonitor); - _rsMonitor->failedHost(host, status); + _rsMonitor->failedHost(host); } -void RemoteCommandTargeterRS::markHostUnreachable(const HostAndPort& host, const Status& status) { +void RemoteCommandTargeterRS::markHostUnreachable(const HostAndPort& host) { invariant(_rsMonitor); - _rsMonitor->failedHost(host, status); + _rsMonitor->failedHost(host); } } // namespace mongo diff --git a/src/mongo/client/remote_command_targeter_rs.h b/src/mongo/client/remote_command_targeter_rs.h index 7a8851c01ee..9917613834e 100644 --- a/src/mongo/client/remote_command_targeter_rs.h +++ b/src/mongo/client/remote_command_targeter_rs.h @@ -58,9 +58,9 @@ public: StatusWith<HostAndPort> findHostWithMaxWait(const ReadPreferenceSetting& readPref, Milliseconds maxWait) override; - void markHostNotMaster(const HostAndPort& host, const Status& status) override; + void markHostNotMaster(const HostAndPort& host) override; - void markHostUnreachable(const HostAndPort& host, const Status& status) override; + void markHostUnreachable(const HostAndPort& host) override; private: // Name of the replica set which this targeter maintains diff --git a/src/mongo/client/remote_command_targeter_standalone.cpp b/src/mongo/client/remote_command_targeter_standalone.cpp index b44cb1b4c44..a5afaef951a 100644 --- a/src/mongo/client/remote_command_targeter_standalone.cpp +++ b/src/mongo/client/remote_command_targeter_standalone.cpp @@ -52,13 +52,11 @@ StatusWith<HostAndPort> RemoteCommandTargeterStandalone::findHost( return _hostAndPort; } -void RemoteCommandTargeterStandalone::markHostNotMaster(const HostAndPort& host, - const Status& status) { +void RemoteCommandTargeterStandalone::markHostNotMaster(const HostAndPort& host) { dassert(host == _hostAndPort); } -void RemoteCommandTargeterStandalone::markHostUnreachable(const HostAndPort& host, - const Status& status) { +void RemoteCommandTargeterStandalone::markHostUnreachable(const HostAndPort& host) { dassert(host == _hostAndPort); } diff --git a/src/mongo/client/remote_command_targeter_standalone.h b/src/mongo/client/remote_command_targeter_standalone.h index dd9e97a9df2..ab7ae099243 100644 --- a/src/mongo/client/remote_command_targeter_standalone.h +++ b/src/mongo/client/remote_command_targeter_standalone.h @@ -49,9 +49,9 @@ public: StatusWith<HostAndPort> findHostWithMaxWait(const ReadPreferenceSetting& readPref, Milliseconds maxWait) override; - void markHostNotMaster(const HostAndPort& host, const Status& status) override; + void markHostNotMaster(const HostAndPort& host) override; - void markHostUnreachable(const HostAndPort& host, const Status& status) override; + void markHostUnreachable(const HostAndPort& host) override; private: const HostAndPort _hostAndPort; diff --git a/src/mongo/client/replica_set_monitor.cpp b/src/mongo/client/replica_set_monitor.cpp index d27cd68d162..60f58bab9f1 100644 --- a/src/mongo/client/replica_set_monitor.cpp +++ b/src/mongo/client/replica_set_monitor.cpp @@ -310,11 +310,11 @@ Refresher ReplicaSetMonitor::startOrContinueRefresh() { return out; } -void ReplicaSetMonitor::failedHost(const HostAndPort& host, const Status& status) { +void ReplicaSetMonitor::failedHost(const HostAndPort& host) { stdx::lock_guard<stdx::mutex> lk(_state->mutex); Node* node = _state->findNode(host); if (node) - node->markFailed(status); + node->markFailed(); DEV _state->checkInvariants(); } @@ -545,10 +545,9 @@ void Refresher::receivedIsMaster(const HostAndPort& from, _scan->waitingFor.erase(from); const IsMasterReply reply(from, latencyMicros, replyObj); - // Handle various failure cases if (!reply.ok) { - failedHost(from, {ErrorCodes::CommandFailed, "Failed to execute 'ismaster' command"}); + failedHost(from); return; } @@ -566,19 +565,17 @@ void Refresher::receivedIsMaster(const HostAndPort& from, warning() << "node: " << from << " isn't a part of set: " << _set->name << " ismaster: " << replyObj; } - - failedHost(from, - {ErrorCodes::InconsistentReplicaSetNames, - str::stream() << "Target replica set name " << reply.setName - << " does not match the monitored set name " - << _set->name}); + failedHost(from); return; } if (reply.isMaster) { - Status status = receivedIsMasterFromMaster(from, reply); - if (!status.isOK()) { - failedHost(from, status); + if (!receivedIsMasterFromMaster(reply)) { + log() << "node " << from << " believes it is primary, but its election id of " + << reply.electionId << " and config version of " << reply.configVersion + << " is older than the most recent election id " << _set->maxElectionId + << " and config version of " << _set->configVersion; + failedHost(from); return; } } @@ -601,7 +598,7 @@ void Refresher::receivedIsMaster(const HostAndPort& from, DEV _set->checkInvariants(); } -void Refresher::failedHost(const HostAndPort& host, const Status& status) { +void Refresher::failedHost(const HostAndPort& host) { _scan->waitingFor.erase(host); // Failed hosts can't pass criteria, so the only way they'd effect the _refreshUntilMatches @@ -611,7 +608,7 @@ void Refresher::failedHost(const HostAndPort& host, const Status& status) { Node* node = _set->findNode(host); if (node) - node->markFailed(status); + node->markFailed(); } ScanStatePtr Refresher::startNewScan(const SetState* set) { @@ -649,18 +646,13 @@ ScanStatePtr Refresher::startNewScan(const SetState* set) { return scan; } -Status Refresher::receivedIsMasterFromMaster(const HostAndPort& from, const IsMasterReply& reply) { +bool Refresher::receivedIsMasterFromMaster(const IsMasterReply& reply) { invariant(reply.isMaster); // Reject if config version is older. This is for backwards compatibility with nodes in pv0 // since they don't have the same ordering with pv1 electionId. if (reply.configVersion < _set->configVersion) { - return {ErrorCodes::NotMaster, - str::stream() << "Node " << from - << " believes it is primary, but its config version " - << reply.configVersion - << " is older than the most recent config version " - << _set->configVersion}; + return false; } if (reply.electionId.isSet()) { @@ -669,12 +661,7 @@ Status Refresher::receivedIsMasterFromMaster(const HostAndPort& from, const IsMa // because configVersion needs to be incremented whenever the protocol version is changed. if (reply.configVersion == _set->configVersion && _set->maxElectionId.isSet() && _set->maxElectionId.compare(reply.electionId) > 0) { - return {ErrorCodes::NotMaster, - str::stream() << "Node " << from - << " believes it is primary, but its election id " - << reply.electionId - << " is older than the most recent election id " - << _set->maxElectionId}; + return false; } _set->maxElectionId = reply.electionId; @@ -757,7 +744,7 @@ Status Refresher::receivedIsMasterFromMaster(const HostAndPort& from, const IsMa _scan->foundUpMaster = true; _set->lastSeenMaster = reply.host; - return Status::OK(); + return true; } void Refresher::receivedIsMasterBeforeFoundMaster(const IsMasterReply& reply) { @@ -802,23 +789,19 @@ HostAndPort Refresher::_refreshUntilMatches(const ReadPreferenceSetting* criteri continue; case NextStep::CONTACT_HOST: { - StatusWith<BSONObj> isMasterReplyStatus{ErrorCodes::InternalError, - "Uninitialized variable"}; + BSONObj reply; // empty on error int64_t pingMicros = 0; - // Do not do network calls while holding a mutex - lk.unlock(); + lk.unlock(); // relocked after attempting to call isMaster try { ScopedDbConnection conn(ConnectionString(ns.host), socketTimeoutSecs); bool ignoredOutParam = false; Timer timer; - BSONObj reply; conn->isMaster(ignoredOutParam, &reply); - isMasterReplyStatus = reply; pingMicros = timer.micros(); conn.done(); // return to pool on success. - } catch (const DBException& ex) { - isMasterReplyStatus = ex.toStatus(); + } catch (...) { + reply = BSONObj(); // should be a no-op but want to be sure } lk.lock(); @@ -827,10 +810,10 @@ HostAndPort Refresher::_refreshUntilMatches(const ReadPreferenceSetting* criteri if (_scan != _set->currentScan) return criteria ? _set->getMatchingHost(*criteria) : HostAndPort(); - if (isMasterReplyStatus.isOK()) - receivedIsMaster(ns.host, pingMicros, isMasterReplyStatus.getValue()); + if (reply.isEmpty()) + failedHost(ns.host); else - failedHost(ns.host, isMasterReplyStatus.getStatus()); + receivedIsMaster(ns.host, pingMicros, reply); } } } @@ -889,13 +872,10 @@ void IsMasterReply::parse(const BSONObj& obj) { Node::Node(const HostAndPort& host) : host(host), latencyMicros(unknownLatency) {} -void Node::markFailed(const Status& status) { - if (isUp) { - log() << "Marking host " << host << " as failed" << causedBy(redact(status)); - - isUp = false; - } +void Node::markFailed() { + LOG(1) << "Marking host " << host << " as failed"; + isUp = false; isMaster = false; } diff --git a/src/mongo/client/replica_set_monitor.h b/src/mongo/client/replica_set_monitor.h index e83f41e4556..1b2db2c906f 100644 --- a/src/mongo/client/replica_set_monitor.h +++ b/src/mongo/client/replica_set_monitor.h @@ -106,14 +106,12 @@ public: Refresher startOrContinueRefresh(); /** - * Notifies this Monitor that a host has failed because of the specified error 'status' and - * should be considered down. + * Notifies this Monitor that a host has failed and should be considered down. * - * Call this when you get a connection error. If you get an error while trying to refresh our - * view of a host, call Refresher::failedHost instead because it bypasses taking the monitor's - * mutex. + * Call this when you get a connection error. If you get an error while trying to refresh + * our view of a host, call Refresher::hostFailed() instead. */ - void failedHost(const HostAndPort& host, const Status& status); + void failedHost(const HostAndPort& host); /** * Returns true if this node is the master based ONLY on local data. Be careful, return may @@ -341,7 +339,7 @@ public: /** * Call this if a host returned from getNextStep failed to reply to an isMaster call. */ - void failedHost(const HostAndPort& host, const Status& status); + void failedHost(const HostAndPort& host); /** * True if this Refresher started a new full scan rather than joining an existing one. @@ -357,17 +355,15 @@ public: private: /** - * First, checks that the "reply" is not from a stale primary by comparing the electionId of - * "reply" to the maxElectionId recorded by the SetState and returns OK status if "reply" - * belongs to a non-stale primary. Otherwise returns a failed status. - * - * The 'from' parameter specifies the node from which the response is received. + * First, checks that the "reply" is not from a stale primary by + * comparing the electionId of "reply" to the maxElectionId recorded by the SetState. + * Returns true if "reply" belongs to a non-stale primary. * * Updates _set and _scan based on set-membership information from a master. * Applies _scan->unconfirmedReplies to confirmed nodes. * Does not update this host's node in _set->nodes. */ - Status receivedIsMasterFromMaster(const HostAndPort& from, const IsMasterReply& reply); + bool receivedIsMasterFromMaster(const IsMasterReply& reply); /** * Adjusts the _scan work queue based on information from this host. diff --git a/src/mongo/client/replica_set_monitor_internal.h b/src/mongo/client/replica_set_monitor_internal.h index 237401b1f41..300452398b7 100644 --- a/src/mongo/client/replica_set_monitor_internal.h +++ b/src/mongo/client/replica_set_monitor_internal.h @@ -93,7 +93,7 @@ public: struct Node { explicit Node(const HostAndPort& host); - void markFailed(const Status& status); + void markFailed(); bool matches(const ReadPreference pref) const; diff --git a/src/mongo/client/replica_set_monitor_read_preference_test.cpp b/src/mongo/client/replica_set_monitor_read_preference_test.cpp index 0344a08ff9f..956f40003bc 100644 --- a/src/mongo/client/replica_set_monitor_read_preference_test.cpp +++ b/src/mongo/client/replica_set_monitor_read_preference_test.cpp @@ -36,9 +36,9 @@ #include "mongo/stdx/memory.h" #include "mongo/unittest/unittest.h" -namespace mongo { namespace { +using namespace mongo; using std::set; using std::vector; @@ -145,7 +145,7 @@ TEST(ReplSetMonitorReadPref, PrimaryOnlyPriNotOk) { vector<Node> nodes = getThreeMemberWithTags(); TagSet tags(getDefaultTagSet()); - nodes[1].markFailed({ErrorCodes::InternalError, "Test error"}); + nodes[1].markFailed(); bool isPrimarySelected = false; HostAndPort host = @@ -183,7 +183,7 @@ TEST(ReplSetMonitorReadPref, PriPrefWithPriNotOk) { vector<Node> nodes = getThreeMemberWithTags(); TagSet tags(getDefaultTagSet()); - nodes[1].markFailed({ErrorCodes::InternalError, "Test error"}); + nodes[1].markFailed(); bool isPrimarySelected = false; HostAndPort host = @@ -197,7 +197,7 @@ TEST(ReplSetMonitorReadPref, SecOnly) { vector<Node> nodes = getThreeMemberWithTags(); TagSet tags(getDefaultTagSet()); - nodes[2].markFailed({ErrorCodes::InternalError, "Test error"}); + nodes[2].markFailed(); bool isPrimarySelected = false; HostAndPort host = @@ -211,8 +211,8 @@ TEST(ReplSetMonitorReadPref, SecOnlyOnlyPriOk) { vector<Node> nodes = getThreeMemberWithTags(); TagSet tags(getDefaultTagSet()); - nodes[0].markFailed({ErrorCodes::InternalError, "Test error"}); - nodes[2].markFailed({ErrorCodes::InternalError, "Test error"}); + nodes[0].markFailed(); + nodes[2].markFailed(); bool isPrimarySelected = false; HostAndPort host = @@ -225,7 +225,7 @@ TEST(ReplSetMonitorReadPref, SecPref) { vector<Node> nodes = getThreeMemberWithTags(); TagSet tags(getDefaultTagSet()); - nodes[2].markFailed({ErrorCodes::InternalError, "Test error"}); + nodes[2].markFailed(); bool isPrimarySelected = false; HostAndPort host = @@ -239,8 +239,8 @@ TEST(ReplSetMonitorReadPref, SecPrefWithNoSecOk) { vector<Node> nodes = getThreeMemberWithTags(); TagSet tags(getDefaultTagSet()); - nodes[0].markFailed({ErrorCodes::InternalError, "Test error"}); - nodes[2].markFailed({ErrorCodes::InternalError, "Test error"}); + nodes[0].markFailed(); + nodes[2].markFailed(); bool isPrimarySelected = false; HostAndPort host = @@ -254,9 +254,9 @@ TEST(ReplSetMonitorReadPref, SecPrefWithNoNodeOk) { vector<Node> nodes = getThreeMemberWithTags(); TagSet tags(getDefaultTagSet()); - nodes[0].markFailed({ErrorCodes::InternalError, "Test error"}); - nodes[1].markFailed({ErrorCodes::InternalError, "Test error"}); - nodes[2].markFailed({ErrorCodes::InternalError, "Test error"}); + nodes[0].markFailed(); + nodes[1].markFailed(); + nodes[2].markFailed(); bool isPrimarySelected = false; HostAndPort host = @@ -315,7 +315,7 @@ TEST(ReplSetMonitorReadPref, PriPrefPriNotOkWithTags) { vector<Node> nodes = getThreeMemberWithTags(); TagSet tags(getP2TagSet()); - nodes[1].markFailed({ErrorCodes::InternalError, "Test error"}); + nodes[1].markFailed(); bool isPrimarySelected = false; HostAndPort host = @@ -341,7 +341,7 @@ TEST(ReplSetMonitorReadPref, PriPrefPriNotOkWithTagsNoMatch) { vector<Node> nodes = getThreeMemberWithTags(); TagSet tags(getSingleNoMatchTag()); - nodes[1].markFailed({ErrorCodes::InternalError, "Test error"}); + nodes[1].markFailed(); bool isPrimarySelected = false; HostAndPort host = @@ -397,7 +397,7 @@ TEST(ReplSetMonitorReadPref, SecPrefSecNotOkWithTags) { << "nyc")); TagSet tags(arrayBuilder.arr()); - nodes[2].markFailed({ErrorCodes::InternalError, "Test error"}); + nodes[2].markFailed(); bool isPrimarySelected = false; HostAndPort host = @@ -423,7 +423,7 @@ TEST(ReplSetMonitorReadPref, SecPrefPriNotOkWithTagsNoMatch) { vector<Node> nodes = getThreeMemberWithTags(); TagSet tags(getSingleNoMatchTag()); - nodes[1].markFailed({ErrorCodes::InternalError, "Test error"}); + nodes[1].markFailed(); bool isPrimarySelected = false; HostAndPort host = @@ -487,7 +487,7 @@ TEST(ReplSetMonitorReadPref, MultiPriOnlyPriNotOkTag) { vector<Node> nodes = getThreeMemberWithTags(); TagSet tags(getMultiNoMatchTag()); - nodes[1].markFailed({ErrorCodes::InternalError, "Test error"}); + nodes[1].markFailed(); bool isPrimarySelected = false; HostAndPort host = @@ -595,7 +595,7 @@ private: TEST_F(MultiTags, MultiTagsMatchesFirst) { vector<Node> nodes = getThreeMemberWithTags(); - nodes[1].markFailed({ErrorCodes::InternalError, "Test error"}); + nodes[1].markFailed(); bool isPrimarySelected = false; HostAndPort host = selectNode(nodes, @@ -611,8 +611,8 @@ TEST_F(MultiTags, MultiTagsMatchesFirst) { TEST_F(MultiTags, PriPrefPriNotOkMatchesFirstNotOk) { vector<Node> nodes = getThreeMemberWithTags(); - nodes[0].markFailed({ErrorCodes::InternalError, "Test error"}); - nodes[1].markFailed({ErrorCodes::InternalError, "Test error"}); + nodes[0].markFailed(); + nodes[1].markFailed(); bool isPrimarySelected = false; HostAndPort host = selectNode(nodes, @@ -628,7 +628,7 @@ TEST_F(MultiTags, PriPrefPriNotOkMatchesFirstNotOk) { TEST_F(MultiTags, PriPrefPriNotOkMatchesSecondTest) { vector<Node> nodes = getThreeMemberWithTags(); - nodes[1].markFailed({ErrorCodes::InternalError, "Test error"}); + nodes[1].markFailed(); bool isPrimarySelected = false; HostAndPort host = selectNode(nodes, @@ -644,8 +644,8 @@ TEST_F(MultiTags, PriPrefPriNotOkMatchesSecondTest) { TEST_F(MultiTags, PriPrefPriNotOkMatchesSecondNotOkTest) { vector<Node> nodes = getThreeMemberWithTags(); - nodes[1].markFailed({ErrorCodes::InternalError, "Test error"}); - nodes[2].markFailed({ErrorCodes::InternalError, "Test error"}); + nodes[1].markFailed(); + nodes[2].markFailed(); bool isPrimarySelected = false; HostAndPort host = selectNode(nodes, @@ -661,7 +661,7 @@ TEST_F(MultiTags, PriPrefPriNotOkMatchesSecondNotOkTest) { TEST_F(MultiTags, PriPrefPriNotOkMatchesLastTest) { vector<Node> nodes = getThreeMemberWithTags(); - nodes[1].markFailed({ErrorCodes::InternalError, "Test error"}); + nodes[1].markFailed(); bool isPrimarySelected = false; HostAndPort host = selectNode(nodes, @@ -677,8 +677,8 @@ TEST_F(MultiTags, PriPrefPriNotOkMatchesLastTest) { TEST_F(MultiTags, PriPrefPriNotOkMatchesLastNotOkTest) { vector<Node> nodes = getThreeMemberWithTags(); - nodes[0].markFailed({ErrorCodes::InternalError, "Test error"}); - nodes[1].markFailed({ErrorCodes::InternalError, "Test error"}); + nodes[0].markFailed(); + nodes[1].markFailed(); bool isPrimarySelected = false; HostAndPort host = selectNode(nodes, @@ -707,7 +707,7 @@ TEST(MultiTags, PriPrefPriNotOkNoMatch) { vector<Node> nodes = getThreeMemberWithTags(); TagSet tags(getMultiNoMatchTag()); - nodes[1].markFailed({ErrorCodes::InternalError, "Test error"}); + nodes[1].markFailed(); bool isPrimarySelected = false; HostAndPort host = @@ -733,7 +733,7 @@ TEST_F(MultiTags, SecOnlyMatchesFirstTest) { TEST_F(MultiTags, SecOnlyMatchesFirstNotOk) { vector<Node> nodes = getThreeMemberWithTags(); - nodes[0].markFailed({ErrorCodes::InternalError, "Test error"}); + nodes[0].markFailed(); bool isPrimarySelected = false; HostAndPort host = selectNode(nodes, @@ -763,7 +763,7 @@ TEST_F(MultiTags, SecOnlyMatchesSecond) { TEST_F(MultiTags, SecOnlyMatchesSecondNotOk) { vector<Node> nodes = getThreeMemberWithTags(); - nodes[2].markFailed({ErrorCodes::InternalError, "Test error"}); + nodes[2].markFailed(); bool isPrimarySelected = false; HostAndPort host = selectNode(nodes, @@ -790,7 +790,7 @@ TEST_F(MultiTags, SecOnlyMatchesLast) { TEST_F(MultiTags, SecOnlyMatchesLastNotOk) { vector<Node> nodes = getThreeMemberWithTags(); - nodes[0].markFailed({ErrorCodes::InternalError, "Test error"}); + nodes[0].markFailed(); bool isPrimarySelected = false; HostAndPort host = selectNode( @@ -838,7 +838,7 @@ TEST_F(MultiTags, SecPrefMatchesFirst) { TEST_F(MultiTags, SecPrefMatchesFirstNotOk) { vector<Node> nodes = getThreeMemberWithTags(); - nodes[0].markFailed({ErrorCodes::InternalError, "Test error"}); + nodes[0].markFailed(); bool isPrimarySelected = false; HostAndPort host = selectNode(nodes, @@ -868,7 +868,7 @@ TEST_F(MultiTags, SecPrefMatchesSecond) { TEST_F(MultiTags, SecPrefMatchesSecondNotOk) { vector<Node> nodes = getThreeMemberWithTags(); - nodes[2].markFailed({ErrorCodes::InternalError, "Test error"}); + nodes[2].markFailed(); bool isPrimarySelected = false; HostAndPort host = selectNode(nodes, @@ -898,7 +898,7 @@ TEST_F(MultiTags, SecPrefMatchesLast) { TEST_F(MultiTags, SecPrefMatchesLastNotOk) { vector<Node> nodes = getThreeMemberWithTags(); - nodes[0].markFailed({ErrorCodes::InternalError, "Test error"}); + nodes[0].markFailed(); bool isPrimarySelected = false; HostAndPort host = selectNode(nodes, @@ -941,7 +941,7 @@ TEST(MultiTags, SecPrefMultiTagsNoMatchPriNotOk) { vector<Node> nodes = getThreeMemberWithTags(); TagSet tags(getMultiNoMatchTag()); - nodes[1].markFailed({ErrorCodes::InternalError, "Test error"}); + nodes[1].markFailed(); bool isPrimarySelected = false; HostAndPort host = @@ -972,7 +972,7 @@ TEST(MultiTags, NearestMatchesFirstNotOk) { TagSet tags(arrayBuilder.arr()); - nodes[0].markFailed({ErrorCodes::InternalError, "Test error"}); + nodes[0].markFailed(); bool isPrimarySelected = false; HostAndPort host = @@ -1006,7 +1006,7 @@ TEST_F(MultiTags, NearestMatchesSecondNotOk) { TagSet tags(arrayBuilder.arr()); - nodes[2].markFailed({ErrorCodes::InternalError, "Test error"}); + nodes[2].markFailed(); bool isPrimarySelected = false; HostAndPort host = @@ -1030,7 +1030,7 @@ TEST_F(MultiTags, NearestMatchesLast) { TEST_F(MultiTags, NeatestMatchesLastNotOk) { vector<Node> nodes = getThreeMemberWithTags(); - nodes[0].markFailed({ErrorCodes::InternalError, "Test error"}); + nodes[0].markFailed(); bool isPrimarySelected = false; HostAndPort host = selectNode( @@ -1065,6 +1065,4 @@ TEST(TagSet, DefaultConstructorMatchesAll) { TagSet tags; ASSERT_BSONOBJ_EQ(tags.getTagBSON(), BSON_ARRAY(BSONObj())); } - -} // namespace -} // namespace mongo +} diff --git a/src/mongo/client/replica_set_monitor_test.cpp b/src/mongo/client/replica_set_monitor_test.cpp index 8720176b183..45cd93cc7e2 100644 --- a/src/mongo/client/replica_set_monitor_test.cpp +++ b/src/mongo/client/replica_set_monitor_test.cpp @@ -28,13 +28,15 @@ #include "mongo/platform/basic.h" + #include "mongo/client/replica_set_monitor.h" #include "mongo/client/replica_set_monitor_internal.h" #include "mongo/unittest/unittest.h" -namespace mongo { + namespace { +using namespace mongo; using std::set; // Pull nested types to top-level scope @@ -940,7 +942,7 @@ TEST(ReplicaSetMonitor, OutOfBandFailedHost) { if (i >= 1) { HostAndPort a("a"); - rsm->failedHost(a, {ErrorCodes::InternalError, "Test error"}); + rsm->failedHost(a); Node* node = state->findNode(a); ASSERT(node); ASSERT(!node->isUp); @@ -1510,9 +1512,9 @@ TEST(ReplicaSetMonitor, MaxStalenessMSAllFailed) { ASSERT_EQUALS(ns.step, NextStep::DONE); // make sure all secondaries are in the scan - refresher.failedHost(HostAndPort("a"), {ErrorCodes::InternalError, "Test error"}); - refresher.failedHost(HostAndPort("b"), {ErrorCodes::InternalError, "Test error"}); - refresher.failedHost(HostAndPort("c"), {ErrorCodes::InternalError, "Test error"}); + refresher.failedHost(HostAndPort("a")); + refresher.failedHost(HostAndPort("b")); + refresher.failedHost(HostAndPort("c")); HostAndPort notStale = state->getMatchingHost(secondary); ASSERT_EQUALS(notStale.host(), ""); @@ -1562,8 +1564,8 @@ TEST(ReplicaSetMonitor, MaxStalenessMSAllButPrimaryFailed) { // make sure the primary is in the scan ASSERT(state->findNode(HostAndPort("a"))); - refresher.failedHost(HostAndPort("b"), {ErrorCodes::InternalError, "Test error"}); - refresher.failedHost(HostAndPort("c"), {ErrorCodes::InternalError, "Test error"}); + refresher.failedHost(HostAndPort("b")); + refresher.failedHost(HostAndPort("c")); // No match because the request needs secondaryOnly host HostAndPort notStale = state->getMatchingHost(secondary); @@ -1614,7 +1616,7 @@ TEST(ReplicaSetMonitor, MaxStalenessMSOneSecondaryFailed) { ASSERT(state->findNode(HostAndPort("a"))); ASSERT(state->findNode(HostAndPort("b"))); - refresher.failedHost(HostAndPort("c"), {ErrorCodes::InternalError, "Test error"}); + refresher.failedHost(HostAndPort("c")); // No match because the write date is stale HostAndPort notStale = state->getMatchingHost(secondary); @@ -1665,9 +1667,9 @@ TEST(ReplicaSetMonitor, MaxStalenessMSNonStaleSecondaryMatched) { // Ensure that we have heard from all hosts and scan is done ASSERT_EQUALS(ns.step, NextStep::DONE); - refresher.failedHost(HostAndPort("a"), {ErrorCodes::InternalError, "Test error"}); + refresher.failedHost(HostAndPort("a")); ASSERT(state->findNode(HostAndPort("b"))); - refresher.failedHost(HostAndPort("c"), {ErrorCodes::InternalError, "Test error"}); + refresher.failedHost(HostAndPort("c")); HostAndPort notStale = state->getMatchingHost(secondary); ASSERT_EQUALS(notStale.host(), "b"); @@ -1895,5 +1897,5 @@ TEST(ReplicaSetMonitor, MinOpTimeIgnored) { ASSERT_EQUALS(notStale.host(), "c"); } + } // namespace -} // namespace mongo diff --git a/src/mongo/dbtests/replica_set_monitor_test.cpp b/src/mongo/dbtests/replica_set_monitor_test.cpp index 69cbe39e1d6..d6454fc50c2 100644 --- a/src/mongo/dbtests/replica_set_monitor_test.cpp +++ b/src/mongo/dbtests/replica_set_monitor_test.cpp @@ -28,9 +28,6 @@ #include "mongo/platform/basic.h" -#include <set> -#include <vector> - #include "mongo/client/connpool.h" #include "mongo/client/dbclient_rs.h" #include "mongo/client/dbclientinterface.h" @@ -40,9 +37,13 @@ #include "mongo/dbtests/mock/mock_replica_set.h" #include "mongo/unittest/unittest.h" -namespace mongo { +#include <set> +#include <vector> + namespace { +using namespace mongo; + using std::map; using std::vector; using std::set; @@ -302,5 +303,4 @@ TEST_F(TwoNodeWithTags, SecDownRetryWithTag) { monitor.reset(); } -} // namespace } // namespace mongo diff --git a/src/mongo/s/client/shard_remote.cpp b/src/mongo/s/client/shard_remote.cpp index 64d4f58ff1f..997c32c2387 100644 --- a/src/mongo/s/client/shard_remote.cpp +++ b/src/mongo/s/client/shard_remote.cpp @@ -126,13 +126,13 @@ void ShardRemote::updateReplSetMonitor(const HostAndPort& remoteHost, if (ErrorCodes::isNotMasterError(remoteCommandStatus.code()) || (remoteCommandStatus == ErrorCodes::InterruptedDueToReplStateChange) || (remoteCommandStatus == ErrorCodes::PrimarySteppedDown)) { - _targeter->markHostNotMaster(remoteHost, remoteCommandStatus); + _targeter->markHostNotMaster(remoteHost); } else if (ErrorCodes::isNetworkError(remoteCommandStatus.code())) { - _targeter->markHostUnreachable(remoteHost, remoteCommandStatus); + _targeter->markHostUnreachable(remoteHost); } else if (remoteCommandStatus == ErrorCodes::NotMasterOrSecondary) { - _targeter->markHostUnreachable(remoteHost, remoteCommandStatus); + _targeter->markHostUnreachable(remoteHost); } else if (remoteCommandStatus == ErrorCodes::ExceededTimeLimit) { - _targeter->markHostUnreachable(remoteHost, remoteCommandStatus); + _targeter->markHostUnreachable(remoteHost); } } diff --git a/src/mongo/util/net/hostandport.cpp b/src/mongo/util/net/hostandport.cpp index abd8f2d6ade..04088d21861 100644 --- a/src/mongo/util/net/hostandport.cpp +++ b/src/mongo/util/net/hostandport.cpp @@ -180,16 +180,6 @@ std::ostream& operator<<(std::ostream& os, const HostAndPort& hp) { return os << hp.toString(); } -template <typename Allocator> -StringBuilderImpl<Allocator>& operator<<(StringBuilderImpl<Allocator>& os, const HostAndPort& hp) { - return os << hp.toString(); -} - -template StringBuilderImpl<StackAllocator>& operator<<(StringBuilderImpl<StackAllocator>&, - const HostAndPort&); -template StringBuilderImpl<SharedBufferAllocator>& operator<<( - StringBuilderImpl<SharedBufferAllocator>&, const HostAndPort&); - } // namespace mongo MONGO_HASH_NAMESPACE_START diff --git a/src/mongo/util/net/hostandport.h b/src/mongo/util/net/hostandport.h index 53209bbaa04..6f7bd46b43c 100644 --- a/src/mongo/util/net/hostandport.h +++ b/src/mongo/util/net/hostandport.h @@ -34,10 +34,7 @@ #include "mongo/platform/hash_namespace.h" namespace mongo { - class Status; -template <typename Allocator> -class StringBuilderImpl; class StringData; template <typename T> class StatusWith; @@ -127,9 +124,6 @@ private: std::ostream& operator<<(std::ostream& os, const HostAndPort& hp); -template <typename Allocator> -StringBuilderImpl<Allocator>& operator<<(StringBuilderImpl<Allocator>& os, const HostAndPort& hp); - } // namespace mongo MONGO_HASH_NAMESPACE_START |