summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/mongo/client/parallel.cpp20
-rw-r--r--src/mongo/db/catalog/database_holder.h1
-rw-r--r--src/mongo/db/exec/working_set_common.h2
-rw-r--r--src/mongo/db/repl/bgsync.h3
-rw-r--r--src/mongo/db/repl/oplogreader.h8
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.cpp2
-rw-r--r--src/mongo/dbtests/chunktests.cpp2
-rw-r--r--src/mongo/dbtests/sharding.cpp4
-rw-r--r--src/mongo/s/catalog/legacy/catalog_manager_legacy.cpp3
-rw-r--r--src/mongo/s/chunk.cpp4
-rw-r--r--src/mongo/s/client/shard.cpp24
-rw-r--r--src/mongo/s/client/shard.h22
-rw-r--r--src/mongo/s/client/shard_connection.cpp21
-rw-r--r--src/mongo/s/client/shard_connection.h15
-rw-r--r--src/mongo/s/client/shard_connection_test.cpp50
-rw-r--r--src/mongo/s/client/shard_registry.cpp25
-rw-r--r--src/mongo/s/client/shard_test.cpp46
-rw-r--r--src/mongo/s/cluster_explain.cpp2
-rw-r--r--src/mongo/s/commands/cluster_map_reduce_cmd.cpp4
-rw-r--r--src/mongo/s/commands/cluster_merge_chunks_cmd.cpp5
-rw-r--r--src/mongo/s/commands/cluster_move_primary_cmd.cpp41
-rw-r--r--src/mongo/s/commands/cluster_netstat_cmd.cpp5
-rw-r--r--src/mongo/s/commands/cluster_pipeline_cmd.cpp3
-rw-r--r--src/mongo/s/commands/cluster_reset_error_cmd.cpp2
-rw-r--r--src/mongo/s/commands/commands_public.cpp20
-rw-r--r--src/mongo/s/commands/run_on_all_shards_cmd.cpp2
-rw-r--r--src/mongo/s/config.cpp6
-rw-r--r--src/mongo/s/config.h3
-rw-r--r--src/mongo/s/d_migrate.cpp10
-rw-r--r--src/mongo/s/d_state.cpp4
-rw-r--r--src/mongo/s/dbclient_shard_resolver.cpp2
-rw-r--r--src/mongo/s/server.cpp1
-rw-r--r--src/mongo/s/strategy.cpp2
-rw-r--r--src/mongo/s/version_manager.cpp3
-rw-r--r--src/mongo/util/exit_code.h4
35 files changed, 195 insertions, 176 deletions
diff --git a/src/mongo/client/parallel.cpp b/src/mongo/client/parallel.cpp
index 11ee0a32e42..27a5666106a 100644
--- a/src/mongo/client/parallel.cpp
+++ b/src/mongo/client/parallel.cpp
@@ -1131,11 +1131,10 @@ namespace mongo {
// Put the cursors in the legacy format
int index = 0;
for( map< Shard, PCMData >::iterator i = _cursorMap.begin(), end = _cursorMap.end(); i != end; ++i ){
-
PCMData& mdata = i->second;
_cursors[ index ].reset( mdata.pcState->cursor.get(), &mdata );
- _servers.insert(i->first.getConnString());
+ _servers.insert(i->first.getConnString().toString());
index++;
}
@@ -1250,14 +1249,19 @@ namespace mongo {
// This may be the first time connecting to this shard, if so we can get an error here
try {
- conns.push_back(shared_ptr<ShardConnection>(new ShardConnection(serverHost, _ns)));
+ conns.push_back(
+ shared_ptr<ShardConnection>(
+ new ShardConnection(uassertStatusOK(
+ ConnectionString::parse(serverHost)),
+ _ns)));
}
- catch( std::exception& e ){
+ catch( std::exception& e ) {
socketExs.push_back( e.what() + errLoc );
if( ! returnPartial ){
num--;
break;
}
+
conns.push_back( shared_ptr<ShardConnection>() );
continue;
}
@@ -1558,7 +1562,7 @@ namespace mongo {
for( set<Shard>::iterator i = shards.begin(), end = shards.end(); i != end; ++i ){
// TODO: Make this the shard name, not address
- list<BSONObj>& l = out[ i->getAddress().toString() ];
+ list<BSONObj>& l = out[i->getConnString().toString()];
l.push_back( getShardCursor( *i )->peekFirst().getOwned() );
}
@@ -1589,11 +1593,15 @@ namespace mongo {
try {
if ( ! _conn ){
if ( _useShardConn) {
- _connHolder.reset( new ShardConnection( _server, "" ));
+ _connHolder.reset(
+ new ShardConnection(uassertStatusOK(ConnectionString::parse(_server)),
+ "",
+ NULL));
}
else {
_connHolder.reset( new ScopedDbConnection( _server ) );
}
+
_conn = _connHolder->get();
}
diff --git a/src/mongo/db/catalog/database_holder.h b/src/mongo/db/catalog/database_holder.h
index 9919af2d91a..264a03b36b3 100644
--- a/src/mongo/db/catalog/database_holder.h
+++ b/src/mongo/db/catalog/database_holder.h
@@ -38,6 +38,7 @@
namespace mongo {
class Database;
+ class OperationContext;
/**
* Registry of opened databases.
diff --git a/src/mongo/db/exec/working_set_common.h b/src/mongo/db/exec/working_set_common.h
index e1e207d22bc..bc4ceda2da0 100644
--- a/src/mongo/db/exec/working_set_common.h
+++ b/src/mongo/db/exec/working_set_common.h
@@ -31,8 +31,10 @@
#include "mongo/db/exec/working_set.h"
namespace mongo {
+
class CanonicalQuery;
class Collection;
+ class OperationContext;
class WorkingSetCommon {
public:
diff --git a/src/mongo/db/repl/bgsync.h b/src/mongo/db/repl/bgsync.h
index 5fb3d4eeba8..41654d55469 100644
--- a/src/mongo/db/repl/bgsync.h
+++ b/src/mongo/db/repl/bgsync.h
@@ -35,6 +35,9 @@
#include "mongo/db/jsobj.h"
namespace mongo {
+
+ class OperationContext;
+
namespace repl {
class Member;
diff --git a/src/mongo/db/repl/oplogreader.h b/src/mongo/db/repl/oplogreader.h
index fc19c83d5cb..017e2613281 100644
--- a/src/mongo/db/repl/oplogreader.h
+++ b/src/mongo/db/repl/oplogreader.h
@@ -38,12 +38,16 @@
#include "mongo/util/net/hostandport.h"
namespace mongo {
-namespace repl {
- extern const BSONObj reverseNaturalObj; // {"$natural": -1 }
+ class OperationContext;
+
+namespace repl {
class ReplicationCoordinator;
+ // {"$natural": -1 }
+ extern const BSONObj reverseNaturalObj;
+
/**
* Authenticates conn using the server's cluster-membership credentials.
*
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
index fdbe881da62..c5ba2565810 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
@@ -315,7 +315,7 @@ namespace {
OperationContext* ReplicationCoordinatorExternalStateImpl::createOperationContext(
const std::string& threadName) {
Client::initThreadIfNotAlready(threadName.c_str());
- return new OperationContextImpl;
+ return new OperationContextImpl();
}
void ReplicationCoordinatorExternalStateImpl::dropAllTempCollections(OperationContext* txn) {
diff --git a/src/mongo/dbtests/chunktests.cpp b/src/mongo/dbtests/chunktests.cpp
index f4530d9cee3..a85e629770f 100644
--- a/src/mongo/dbtests/chunktests.cpp
+++ b/src/mongo/dbtests/chunktests.cpp
@@ -59,7 +59,7 @@ namespace mongo {
for( unsigned i = 1; i < mySplitPoints.size(); ++i ) {
string name = str::stream() << (i-1);
Shard shard(name,
- name,
+ ConnectionString(HostAndPort(name)),
0 /* maxSize */,
false /* draining */);
shards.insert( shard );
diff --git a/src/mongo/dbtests/sharding.cpp b/src/mongo/dbtests/sharding.cpp
index 9ee59759d98..f4230a92ff0 100644
--- a/src/mongo/dbtests/sharding.cpp
+++ b/src/mongo/dbtests/sharding.cpp
@@ -108,7 +108,7 @@ namespace ShardingTests {
// Since we've redirected the conns, the host doesn't matter here so long as it's
// prefixed with a "$"
_shard = Shard("shard0000",
- "$hostFooBar:27017",
+ ConnectionString(HostAndPort("$hostFooBar:27017")),
0 /* maxSize */,
false /* draining */);
// Need to run this to ensure the shard is in the global lookup table
@@ -116,7 +116,7 @@ namespace ShardingTests {
// Add dummy shard to config DB
_client.insert(ShardType::ConfigNS,
BSON(ShardType::name() << _shard.getName() <<
- ShardType::host() << _shard.getConnString()));
+ ShardType::host() << _shard.getConnString().toString()));
// Create an index so that diffing works correctly, otherwise no cursors from S&O
ASSERT_OK(dbtests::createIndex(
diff --git a/src/mongo/s/catalog/legacy/catalog_manager_legacy.cpp b/src/mongo/s/catalog/legacy/catalog_manager_legacy.cpp
index e77bc9f3b7d..08c518f662a 100644
--- a/src/mongo/s/catalog/legacy/catalog_manager_legacy.cpp
+++ b/src/mongo/s/catalog/legacy/catalog_manager_legacy.cpp
@@ -983,7 +983,8 @@ namespace {
conn.done();
continue;
}
- errors[shard.getConnString()] = info;
+
+ errors[shard.getConnString().toString()] = info;
}
conn.done();
diff --git a/src/mongo/s/chunk.cpp b/src/mongo/s/chunk.cpp
index 3694c576878..06435c446f1 100644
--- a/src/mongo/s/chunk.cpp
+++ b/src/mongo/s/chunk.cpp
@@ -480,8 +480,8 @@ namespace {
BSONObjBuilder builder;
builder.append("moveChunk", _manager->getns());
- builder.append("from", from.getAddress().toString());
- builder.append("to", to.getAddress().toString());
+ builder.append("from", from.getConnString().toString());
+ builder.append("to", to.getConnString().toString());
// NEEDED FOR 2.0 COMPATIBILITY
builder.append("fromShard", from.getName());
builder.append("toShard", to.getName());
diff --git a/src/mongo/s/client/shard.cpp b/src/mongo/s/client/shard.cpp
index 5d1a761623a..35734ae9800 100644
--- a/src/mongo/s/client/shard.cpp
+++ b/src/mongo/s/client/shard.cpp
@@ -93,36 +93,21 @@ namespace {
Shard::Shard()
: _name(""),
- _addr(""),
_maxSizeMB(0),
_isDraining(false) {
}
Shard::Shard(const std::string& name,
- const std::string& addr,
- long long maxSizeMB,
- bool isDraining)
- : _name(name),
- _addr(addr),
- _maxSizeMB(maxSizeMB),
- _isDraining(isDraining) {
-
- if (!_addr.empty()) {
- _cs = ConnectionString(addr, ConnectionString::SET);
- }
- }
-
- Shard::Shard(const std::string& name,
const ConnectionString& connStr,
long long maxSizeMB,
bool isDraining)
: _name(name),
- _addr(connStr.toString()),
_cs(connStr),
_maxSizeMB(maxSizeMB),
_isDraining(isDraining) {
+ invariant(_cs.isValid());
}
Shard Shard::findIfExists( const string& shardName ) {
@@ -138,8 +123,9 @@ namespace {
}
bool Shard::containsNode( const string& node ) const {
- if ( _addr == node )
+ if (_cs.toString() == node) {
return true;
+ }
if ( _cs.type() == ConnectionString::SET ) {
ReplicaSetMonitorPtr rs = ReplicaSetMonitor::get( _cs.getSetName(), true );
@@ -233,8 +219,8 @@ namespace {
ShardStatus Shard::getStatus() const {
return ShardStatus(*this,
- getShardDataSizeBytes(getConnString()),
- getShardMongoVersion(getConnString()));
+ getShardDataSizeBytes(getConnString().toString()),
+ getShardMongoVersion(getConnString().toString()));
}
void Shard::reloadShardInfo() {
diff --git a/src/mongo/s/client/shard.h b/src/mongo/s/client/shard.h
index 3a5652294ff..94364cafb86 100644
--- a/src/mongo/s/client/shard.h
+++ b/src/mongo/s/client/shard.h
@@ -40,17 +40,11 @@ namespace mongo {
/*
* A "shard" one partition of the overall database (and a replica set typically).
*/
-
class Shard {
public:
Shard();
Shard(const std::string& name,
- const std::string& addr,
- long long maxSizeMB,
- bool isDraining);
-
- Shard(const std::string& name,
const ConnectionString& connStr,
long long maxSizeMB,
bool isDraining);
@@ -82,9 +76,8 @@ namespace mongo {
*/
void reset( const std::string& ident );
- const ConnectionString& getAddress() const { return _cs; }
const std::string& getName() const { return _name; }
- const std::string& getConnString() const { return _addr; }
+ const ConnectionString& getConnString() const { return _cs; }
long long getMaxSizeMB() const {
return _maxSizeMB;
@@ -95,7 +88,7 @@ namespace mongo {
}
std::string toString() const {
- return _name + ":" + _addr;
+ return _name + ":" + _cs.toString();
}
friend std::ostream& operator << (std::ostream& out, const Shard& s) {
@@ -112,19 +105,11 @@ namespace mongo {
return ! ( *this == s );
}
- bool operator==( const std::string& s ) const {
- return _name == s || _addr == s;
- }
-
- bool operator!=( const std::string& s ) const {
- return _name != s && _addr != s;
- }
-
bool operator<(const Shard& o) const {
return _name < o._name;
}
- bool ok() const { return _addr.size() > 0; }
+ bool ok() const { return _cs.isValid(); }
BSONObj runCommand(const std::string& db, const std::string& simple) const;
BSONObj runCommand(const std::string& db, const BSONObj& cmd) const;
@@ -175,7 +160,6 @@ namespace mongo {
private:
std::string _name;
- std::string _addr;
ConnectionString _cs;
long long _maxSizeMB; // in MBytes, 0 is unlimited
bool _isDraining; // shard is currently being removed
diff --git a/src/mongo/s/client/shard_connection.cpp b/src/mongo/s/client/shard_connection.cpp
index f27041c7167..8c86bb79495 100644
--- a/src/mongo/s/client/shard_connection.cpp
+++ b/src/mongo/s/client/shard_connection.cpp
@@ -272,7 +272,6 @@ namespace {
Status* ss = i->second;
if ( ss->avail )
ss->avail->getLastError();
-
}
}
@@ -289,7 +288,7 @@ namespace {
Shard& shard = all[i];
try {
- string sconnString = shard.getConnString();
+ string sconnString = shard.getConnString().toString();
Status* s = _getStatus( sconnString );
if( ! s->avail ) {
@@ -410,8 +409,10 @@ namespace {
void usingAShardConnection(const string& addr);
- ShardConnection::ShardConnection(const string& addr, const string& ns, ChunkManagerPtr manager)
- : _addr(addr),
+ ShardConnection::ShardConnection(const ConnectionString& connectionString,
+ const string& ns,
+ boost::shared_ptr<ChunkManager> manager)
+ : _cs(connectionString),
_ns(ns),
_manager(manager) {
@@ -441,10 +442,10 @@ namespace {
}
void ShardConnection::_init() {
- verify( _addr.size() );
- _conn = ClientConnections::threadInstance()->get( _addr , _ns );
+ invariant(_cs.isValid());
+ _conn = ClientConnections::threadInstance()->get(_cs.toString(), _ns);
_finishedInit = false;
- usingAShardConnection( _addr );
+ usingAShardConnection(_cs.toString());
}
void ShardConnection::_finishInit() {
@@ -467,7 +468,7 @@ namespace {
void ShardConnection::done() {
if ( _conn ) {
- ClientConnections::threadInstance()->done( _addr , _conn );
+ ClientConnections::threadInstance()->done(_cs.toString(), _conn);
_conn = 0;
_finishedInit = true;
}
@@ -481,7 +482,7 @@ namespace {
if (_conn->isFailed()) {
// Let the pool know about the bad connection and also delegate disposal to it.
- ClientConnections::threadInstance()->done(_addr, _conn);
+ ClientConnections::threadInstance()->done(_cs.toString(), _conn);
}
else {
delete _conn;
@@ -528,7 +529,7 @@ namespace {
Shard s = Shard::make(conn.getServerAddress());
cmdBuilder.append("shard", s.getName());
- cmdBuilder.append("shardHost", s.getConnString());
+ cmdBuilder.append("shardHost", s.getConnString().toString());
if (ns.size() > 0) {
version.addToBSON(cmdBuilder);
diff --git a/src/mongo/s/client/shard_connection.h b/src/mongo/s/client/shard_connection.h
index 5bb77ffbb84..41af573320a 100644
--- a/src/mongo/s/client/shard_connection.h
+++ b/src/mongo/s/client/shard_connection.h
@@ -42,7 +42,9 @@ namespace mongo {
class ShardConnection : public AScopedConnection {
public:
- ShardConnection(const std::string& addr, const std::string& ns, ChunkManagerPtr manager = ChunkManagerPtr());
+ ShardConnection(const ConnectionString& connectionString,
+ const std::string& ns,
+ boost::shared_ptr<ChunkManager> manager = nullptr);
~ShardConnection();
@@ -77,14 +79,14 @@ namespace mongo {
}
std::string getHost() const {
- return _addr;
+ return _cs.toString();
}
std::string getNS() const {
return _ns;
}
- ChunkManagerPtr getManager() const {
+ boost::shared_ptr<ChunkManager> getManager() const {
return _manager;
}
@@ -126,9 +128,10 @@ namespace mongo {
void _init();
void _finishInit();
- std::string _addr;
- std::string _ns;
- ChunkManagerPtr _manager;
+ const ConnectionString _cs;
+ const std::string _ns;
+
+ boost::shared_ptr<ChunkManager> _manager;
bool _finishedInit;
diff --git a/src/mongo/s/client/shard_connection_test.cpp b/src/mongo/s/client/shard_connection_test.cpp
index 632bee4e603..ff55380ad44 100644
--- a/src/mongo/s/client/shard_connection_test.cpp
+++ b/src/mongo/s/client/shard_connection_test.cpp
@@ -114,7 +114,7 @@ namespace {
size_t newConnsToCreate) {
vector<ShardConnection*> newConnList;
for (size_t x = 0; x < newConnsToCreate; x++) {
- ShardConnection* newConn = new ShardConnection(TARGET_HOST, "test.user");
+ ShardConnection* newConn = new ShardConnection(ConnectionString(HostAndPort(TARGET_HOST)), "test.user");
checkFunc(newConn->get()->getSockCreationMicroSec(), arg2);
newConnList.push_back(newConn);
}
@@ -131,7 +131,7 @@ namespace {
// Check that connections created after the purge was put back to the pool.
for (size_t x = 0; x < newConnsToCreate; x++) {
- ShardConnection* newConn = new ShardConnection(TARGET_HOST, "test.user");
+ ShardConnection* newConn = new ShardConnection(ConnectionString(HostAndPort(TARGET_HOST)), "test.user");
ASSERT_LESS_THAN(newConn->get()->getSockCreationMicroSec(), oldCreationTime);
newConnList.push_back(newConn);
}
@@ -149,13 +149,13 @@ namespace {
};
TEST_F(ShardConnFixture, BasicShardConnection) {
- ShardConnection conn1(TARGET_HOST, "test.user");
- ShardConnection conn2(TARGET_HOST, "test.user");
+ ShardConnection conn1(ConnectionString(HostAndPort(TARGET_HOST)), "test.user");
+ ShardConnection conn2(ConnectionString(HostAndPort(TARGET_HOST)), "test.user");
DBClientBase* conn1Ptr = conn1.get();
conn1.done();
- ShardConnection conn3(TARGET_HOST, "test.user");
+ ShardConnection conn3(ConnectionString(HostAndPort(TARGET_HOST)), "test.user");
ASSERT_EQUALS(conn1Ptr, conn3.get());
conn2.done();
@@ -163,9 +163,9 @@ namespace {
}
TEST_F(ShardConnFixture, InvalidateBadConnInPool) {
- ShardConnection conn1(TARGET_HOST, "test.user");
- ShardConnection conn2(TARGET_HOST, "test.user");
- ShardConnection conn3(TARGET_HOST, "test.user");
+ ShardConnection conn1(ConnectionString(HostAndPort(TARGET_HOST)), "test.user");
+ ShardConnection conn2(ConnectionString(HostAndPort(TARGET_HOST)), "test.user");
+ ShardConnection conn3(ConnectionString(HostAndPort(TARGET_HOST)), "test.user");
conn1.done();
conn3.done();
@@ -186,9 +186,9 @@ namespace {
}
TEST_F(ShardConnFixture, DontReturnKnownBadConnToPool) {
- ShardConnection conn1(TARGET_HOST, "test.user");
- ShardConnection conn2(TARGET_HOST, "test.user");
- ShardConnection conn3(TARGET_HOST, "test.user");
+ ShardConnection conn1(ConnectionString(HostAndPort(TARGET_HOST)), "test.user");
+ ShardConnection conn2(ConnectionString(HostAndPort(TARGET_HOST)), "test.user");
+ ShardConnection conn3(ConnectionString(HostAndPort(TARGET_HOST)), "test.user");
conn1.done();
killServer();
@@ -210,9 +210,9 @@ namespace {
}
TEST_F(ShardConnFixture, BadConnClearsPoolWhenKilled) {
- ShardConnection conn1(TARGET_HOST, "test.user");
- ShardConnection conn2(TARGET_HOST, "test.user");
- ShardConnection conn3(TARGET_HOST, "test.user");
+ ShardConnection conn1(ConnectionString(HostAndPort(TARGET_HOST)), "test.user");
+ ShardConnection conn2(ConnectionString(HostAndPort(TARGET_HOST)), "test.user");
+ ShardConnection conn3(ConnectionString(HostAndPort(TARGET_HOST)), "test.user");
conn1.done();
killServer();
@@ -234,9 +234,9 @@ namespace {
}
TEST_F(ShardConnFixture, KilledGoodConnShouldNotClearPool) {
- ShardConnection conn1(TARGET_HOST, "test.user");
- ShardConnection conn2(TARGET_HOST, "test.user");
- ShardConnection conn3(TARGET_HOST, "test.user");
+ ShardConnection conn1(ConnectionString(HostAndPort(TARGET_HOST)), "test.user");
+ ShardConnection conn2(ConnectionString(HostAndPort(TARGET_HOST)), "test.user");
+ ShardConnection conn3(ConnectionString(HostAndPort(TARGET_HOST)), "test.user");
const uint64_t upperBoundCreationTime =
conn3.get()->getSockCreationMicroSec();
@@ -247,8 +247,8 @@ namespace {
conn2.done();
- ShardConnection conn4(TARGET_HOST, "test.user");
- ShardConnection conn5(TARGET_HOST, "test.user");
+ ShardConnection conn4(ConnectionString(HostAndPort(TARGET_HOST)), "test.user");
+ ShardConnection conn5(ConnectionString(HostAndPort(TARGET_HOST)), "test.user");
ASSERT_GREATER_THAN(conn4.get()->getSockCreationMicroSec(), badCreationTime);
ASSERT_LESS_THAN_OR_EQUALS(conn4.get()->getSockCreationMicroSec(),
@@ -264,9 +264,9 @@ namespace {
TEST_F(ShardConnFixture, InvalidateBadConnEvenWhenPoolIsFull) {
mongo::shardConnectionPool.setMaxPoolSize(2);
- ShardConnection conn1(TARGET_HOST, "test.user");
- ShardConnection conn2(TARGET_HOST, "test.user");
- ShardConnection conn3(TARGET_HOST, "test.user");
+ ShardConnection conn1(ConnectionString(HostAndPort(TARGET_HOST)), "test.user");
+ ShardConnection conn2(ConnectionString(HostAndPort(TARGET_HOST)), "test.user");
+ ShardConnection conn3(ConnectionString(HostAndPort(TARGET_HOST)), "test.user");
conn1.done();
conn3.done();
@@ -287,13 +287,13 @@ namespace {
}
TEST_F(ShardConnFixture, DontReturnConnGoneBadToPool) {
- ShardConnection conn1(TARGET_HOST, "test.user");
+ ShardConnection conn1(ConnectionString(HostAndPort(TARGET_HOST)), "test.user");
const uint64_t conn1CreationTime = conn1.get()->getSockCreationMicroSec();
uint64_t conn2CreationTime = 0;
{
- ShardConnection conn2(TARGET_HOST, "test.user");
+ ShardConnection conn2(ConnectionString(HostAndPort(TARGET_HOST)), "test.user");
conn2CreationTime = conn2.get()->getSockCreationMicroSec();
conn1.done();
@@ -304,7 +304,7 @@ namespace {
// also not invalidate older connections since it didn't encounter
// a socket exception.
- ShardConnection conn1Again(TARGET_HOST, "test.user");
+ ShardConnection conn1Again(ConnectionString(HostAndPort(TARGET_HOST)), "test.user");
ASSERT_EQUALS(conn1CreationTime, conn1Again.get()->getSockCreationMicroSec());
checkNewConns(assertNotEqual, conn2CreationTime, 10);
diff --git a/src/mongo/s/client/shard_registry.cpp b/src/mongo/s/client/shard_registry.cpp
index f811a18cb1e..d9c145fe4ce 100644
--- a/src/mongo/s/client/shard_registry.cpp
+++ b/src/mongo/s/client/shard_registry.cpp
@@ -87,22 +87,31 @@ namespace mongo {
for (const ShardType& shardData : shards) {
uassertStatusOK(shardData.validate());
+ // This validation should ideally go inside the ShardType::validate call. However,
+ // doing it there would prevent us from loading previously faulty shard hosts, which
+ // might have been stored (i.e., the entire getAllShards call would fail).
+ auto shardHostStatus = ConnectionString::parse(shardData.getHost());
+ if (!shardHostStatus.isOK()) {
+ warning() << "Unable to parse shard host "
+ << shardHostStatus.getStatus().toString();
+ }
+
+ const ConnectionString& shardHost(shardHostStatus.getValue());
+
shared_ptr<Shard> shard = boost::make_shared<Shard>(shardData.getName(),
- shardData.getHost(),
+ shardHost,
shardData.getMaxSize(),
shardData.getDraining());
_lookup[shardData.getName()] = shard;
_lookup[shardData.getHost()] = shard;
- const ConnectionString& cs = shard->getAddress();
-
- if (cs.type() == ConnectionString::SET) {
- if (cs.getSetName().size()) {
+ if (shardHost.type() == ConnectionString::SET) {
+ if (shardHost.getSetName().size()) {
boost::lock_guard<boost::mutex> lk(_rsMutex);
- _rsLookup[cs.getSetName()] = shard;
+ _rsLookup[shardHost.getSetName()] = shard;
}
- vector<HostAndPort> servers = cs.getServers();
+ vector<HostAndPort> servers = shardHost.getServers();
for (unsigned i = 0; i < servers.size(); i++) {
_lookup[servers[i].toString()] = shard;
}
@@ -237,7 +246,7 @@ namespace mongo {
boost::lock_guard<boost::mutex> lk(_mutex);
for (ShardMap::const_iterator i = _lookup.begin(); i != _lookup.end(); ++i) {
- b.append(i->first, i->second->getConnString());
+ b.append(i->first, i->second->getConnString().toString());
}
result->append("map", b.obj());
diff --git a/src/mongo/s/client/shard_test.cpp b/src/mongo/s/client/shard_test.cpp
index 0c57d6450da..24983d7c6c5 100644
--- a/src/mongo/s/client/shard_test.cpp
+++ b/src/mongo/s/client/shard_test.cpp
@@ -26,31 +26,47 @@
* it in the license file.
*/
+#include "mongo/platform/basic.h"
+
+#include "mongo/client/connection_string.h"
#include "mongo/s/client/shard.h"
#include "mongo/unittest/unittest.h"
namespace {
using namespace mongo;
+ using unittest::assertGet;
+
+ TEST(Shard, EqualityRs) {
+ Shard a("foo", assertGet(ConnectionString::parse("bar/a,b")), 0, false);
- TEST( Shard, EqualityRs ) {
- Shard a("foo", "bar/a,b", 0, false);
- Shard b("foo", "bar/a,b", 0, false);
- ASSERT_EQUALS( a, b );
+ {
+ Shard s("foo", assertGet(ConnectionString::parse("bar/a,b")), 0, false);
+ ASSERT_EQUALS(a, s);
+ }
- b = Shard("foo", "bar/b,a", 0, false);
- ASSERT_EQUALS( a, b );
+ {
+ Shard s("foo", assertGet(ConnectionString::parse("bar/b,a")), 0, false);
+ ASSERT_EQUALS(a, s);
+ }
}
- TEST( Shard, EqualitySingle ) {
- ASSERT_EQUALS(Shard("foo", "b.foo.com:123", 0, false),
- Shard("foo", "b.foo.com:123", 0, false));
- ASSERT_NOT_EQUALS(Shard("foo", "b.foo.com:123", 0, false),
- Shard("foo", "a.foo.com:123", 0, false));
- ASSERT_NOT_EQUALS(Shard("foo", "b.foo.com:123", 0, false),
- Shard("foo", "b.foo.com:124", 0, false));
- ASSERT_NOT_EQUALS(Shard("foo", "b.foo.com:123", 0, false),
- Shard("foa", "b.foo.com:123", 0, false));
+ TEST(Shard, EqualitySingle) {
+ ASSERT_EQUALS(
+ Shard("foo", assertGet(ConnectionString::parse("b.foo.com:123")), 0, false),
+ Shard("foo", assertGet(ConnectionString::parse("b.foo.com:123")), 0, false));
+
+ ASSERT_NOT_EQUALS(
+ Shard("foo", assertGet(ConnectionString::parse("b.foo.com:123")), 0, false),
+ Shard("foo", assertGet(ConnectionString::parse("a.foo.com:123")), 0, false));
+
+ ASSERT_NOT_EQUALS(
+ Shard("foo", assertGet(ConnectionString::parse("b.foo.com:123")), 0, false),
+ Shard("foo", assertGet(ConnectionString::parse("b.foo.com:124")), 0, false));
+
+ ASSERT_NOT_EQUALS(
+ Shard("foo", assertGet(ConnectionString::parse("b.foo.com:123")), 0, false),
+ Shard("foa", assertGet(ConnectionString::parse("b.foo.com:123")), 0, false));
}
} // namespace
diff --git a/src/mongo/s/cluster_explain.cpp b/src/mongo/s/cluster_explain.cpp
index 0443a0f8396..7bd9569a69c 100644
--- a/src/mongo/s/cluster_explain.cpp
+++ b/src/mongo/s/cluster_explain.cpp
@@ -211,7 +211,7 @@ namespace mongo {
BSONObj serverInfo = shardResults[i].result["serverInfo"].Obj();
singleShardBob.append("shardName", shardResults[i].shardTarget.getName());
- std::string connStr = shardResults[i].shardTarget.getAddress().toString();
+ std::string connStr = shardResults[i].shardTarget.getConnString().toString();
singleShardBob.append("connectionString", connStr);
appendIfRoom(&singleShardBob, serverInfo, "serverInfo");
appendElementsIfRoom(&singleShardBob, queryPlanner);
diff --git a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
index 926ade44806..ec9aa1734d7 100644
--- a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
+++ b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
@@ -329,7 +329,7 @@ namespace {
for (const auto& result : results) {
// Need to gather list of all servers even if an error happened
- const string server = result.shardTarget.getConnString();
+ const string server = result.shardTarget.getConnString().toString();
servers.insert(server);
if (!ok) {
@@ -497,7 +497,7 @@ namespace {
for (vector<Strategy::CommandResult>::iterator i = results.begin();
i != results.end(); ++i) {
- string server = i->shardTarget.getConnString();
+ const string server = i->shardTarget.getConnString().toString();
singleResult = i->result;
ok = singleResult["ok"].trueValue();
if (!ok) break;
diff --git a/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp b/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp
index 34db493370e..ee00655cf5a 100644
--- a/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp
+++ b/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp
@@ -169,14 +169,15 @@ namespace {
remoteCmdObjB.append( cmdObj[ ClusterMergeChunksCommand::nsField() ] );
remoteCmdObjB.append( cmdObj[ ClusterMergeChunksCommand::boundsField() ] );
remoteCmdObjB.append( ClusterMergeChunksCommand::configField(),
- configServer.getPrimary().getAddress().toString() );
+ configServer.getPrimary().getConnString().toString() );
remoteCmdObjB.append( ClusterMergeChunksCommand::shardNameField(),
shard.getName() );
BSONObj remoteResult;
+
// Throws, but handled at level above. Don't want to rewrap to preserve exception
// formatting.
- ScopedDbConnection conn( shard.getAddress() );
+ ScopedDbConnection conn(shard.getConnString());
bool ok = conn->runCommand( "admin", remoteCmdObjB.obj(), remoteResult );
conn.done();
diff --git a/src/mongo/s/commands/cluster_move_primary_cmd.cpp b/src/mongo/s/commands/cluster_move_primary_cmd.cpp
index 79f22547f1f..bb9d5d4bdb5 100644
--- a/src/mongo/s/commands/cluster_move_primary_cmd.cpp
+++ b/src/mongo/s/commands/cluster_move_primary_cmd.cpp
@@ -45,6 +45,7 @@
#include "mongo/s/catalog/catalog_cache.h"
#include "mongo/s/catalog/catalog_manager.h"
#include "mongo/s/catalog/dist_lock_manager.h"
+#include "mongo/s/client/shard_registry.h"
#include "mongo/s/config.h"
#include "mongo/s/grid.h"
#include "mongo/util/log.h"
@@ -130,29 +131,28 @@ namespace {
return false;
}
- Shard s = Shard::findIfExists(to);
- if (!s.ok()) {
- string msg(str::stream() <<
- "Could not move database '" << dbname <<
- "' to shard '" << to <<
- "' because that shard does not exist");
+ shared_ptr<Shard> s = grid.shardRegistry()->findIfExists(to);
+ if (!s) {
+ string msg(str::stream() << "Could not move database '" << dbname
+ << "' to shard '" << to
+ << "' because the shard does not exist");
log() << msg;
return appendCommandStatus(result,
Status(ErrorCodes::ShardNotFound, msg));
}
- if (config->getPrimary() == s.getConnString()) {
+ if (config->getPrimary().getConnString().sameLogicalEndpoint(s->getConnString())) {
errmsg = "it is already the primary";
return false;
}
- if (!grid.catalogManager()->isShardHost(s.getAddress())) {
+ if (!grid.catalogManager()->isShardHost(s->getConnString())) {
errmsg = "that server isn't known to me";
return false;
}
log() << "Moving " << dbname << " primary from: "
- << config->getPrimary().toString() << " to: " << s.toString();
+ << config->getPrimary().toString() << " to: " << s->toString();
string whyMessage(str::stream() << "Moving primary shard of " << dbname);
auto scopedDistLock = grid.catalogManager()->getDistLockManager()->lock(
@@ -168,7 +168,7 @@ namespace {
// Record start in changelog
BSONObj moveStartDetails = _buildMoveEntry(dbname,
config->getPrimary().toString(),
- s.toString(),
+ s->toString(),
shardedColls);
grid.catalogManager()->logChange(txn, "movePrimary.start", dbname, moveStartDetails);
@@ -176,16 +176,17 @@ namespace {
BSONArrayBuilder barr;
barr.append(shardedColls);
- ScopedDbConnection toconn(s.getConnString());
+ ScopedDbConnection toconn(s->getConnString());
// TODO ERH - we need a clone command which replays operations from clone start to now
// can just use local.oplog.$main
BSONObj cloneRes;
- bool worked = toconn->runCommand(dbname.c_str(),
- BSON("clone" << config->getPrimary().getConnString()
- << "collsToIgnore" << barr.arr()
- << bypassDocumentValidationCommandOption() << true),
- cloneRes);
+ bool worked = toconn->runCommand(
+ dbname.c_str(),
+ BSON("clone" << config->getPrimary().getConnString().toString()
+ << "collsToIgnore" << barr.arr()
+ << bypassDocumentValidationCommandOption() << true),
+ cloneRes);
toconn.done();
if (!worked) {
@@ -194,11 +195,11 @@ namespace {
return false;
}
- string oldPrimary = config->getPrimary().getConnString();
+ const string oldPrimary = config->getPrimary().getConnString().toString();
ScopedDbConnection fromconn(config->getPrimary().getConnString());
- config->setPrimary(s.getConnString());
+ config->setPrimary(s->getConnString().toString());
if (shardedColls.empty()){
@@ -247,12 +248,12 @@ namespace {
fromconn.done();
- result << "primary " << s.toString();
+ result << "primary " << s->toString();
// Record finish in changelog
BSONObj moveFinishDetails = _buildMoveEntry(dbname,
oldPrimary,
- s.toString(),
+ s->toString(),
shardedColls);
grid.catalogManager()->logChange(txn, "movePrimary", dbname, moveFinishDetails);
diff --git a/src/mongo/s/commands/cluster_netstat_cmd.cpp b/src/mongo/s/commands/cluster_netstat_cmd.cpp
index 33411e246b8..ca1bf2fd9af 100644
--- a/src/mongo/s/commands/cluster_netstat_cmd.cpp
+++ b/src/mongo/s/commands/cluster_netstat_cmd.cpp
@@ -29,7 +29,8 @@
#include "mongo/platform/basic.h"
#include "mongo/db/commands.h"
-#include "mongo/s/config.h"
+#include "mongo/s/catalog/catalog_manager.h"
+#include "mongo/s/grid.h"
namespace mongo {
namespace {
@@ -69,7 +70,7 @@ namespace {
std::string& errmsg,
BSONObjBuilder& result) {
- result.append("configserver", configServer.getPrimary().getConnString());
+ result.append("configserver", grid.catalogManager()->connectionString().toString());
result.append("isdbgrid", 1);
return true;
}
diff --git a/src/mongo/s/commands/cluster_pipeline_cmd.cpp b/src/mongo/s/commands/cluster_pipeline_cmd.cpp
index 4a8aa2443e6..b9d75d76243 100644
--- a/src/mongo/s/commands/cluster_pipeline_cmd.cpp
+++ b/src/mongo/s/commands/cluster_pipeline_cmd.cpp
@@ -194,8 +194,7 @@ namespace {
// Run merging command on primary shard of database. Need to use ShardConnection so
// that the merging mongod is sent the config servers on connection init.
- const string mergeServer = conf->getPrimary().getConnString();
- ShardConnection conn(mergeServer, outputNsOrEmpty);
+ ShardConnection conn(conf->getPrimary().getConnString(), outputNsOrEmpty);
BSONObj mergedResults = aggRunCommand(conn.get(),
dbname,
mergeCmd.freeze().toBson(),
diff --git a/src/mongo/s/commands/cluster_reset_error_cmd.cpp b/src/mongo/s/commands/cluster_reset_error_cmd.cpp
index 8bc2af039f3..efa56f524b3 100644
--- a/src/mongo/s/commands/cluster_reset_error_cmd.cpp
+++ b/src/mongo/s/commands/cluster_reset_error_cmd.cpp
@@ -77,7 +77,7 @@ namespace {
const std::string shardName = *i;
- ShardConnection conn(shardName, "");
+ ShardConnection conn(ConnectionString(shardName, ConnectionString::SET), "");
BSONObj res;
diff --git a/src/mongo/s/commands/commands_public.cpp b/src/mongo/s/commands/commands_public.cpp
index cb632e59a37..b038a789105 100644
--- a/src/mongo/s/commands/commands_public.cpp
+++ b/src/mongo/s/commands/commands_public.cpp
@@ -638,7 +638,7 @@ namespace mongo {
}
}
- b.append("fromhost", confFrom->getPrimary().getConnString());
+ b.append("fromhost", confFrom->getPrimary().getConnString().toString());
BSONObj fixed = b.obj();
return adminPassthrough( confTo , fixed , result );
@@ -871,7 +871,7 @@ namespace mongo {
Strategy::CommandResult cmdResult;
cmdResult.shardTarget = shard;
- cmdResult.target = shard.getAddress();
+ cmdResult.target = shard.getConnString();
cmdResult.result = result.obj();
vector<Strategy::CommandResult> shardResults;
@@ -1395,7 +1395,10 @@ namespace mongo {
list< shared_ptr<Future::CommandResult> > futures;
BSONArrayBuilder shardArray;
for ( set<Shard>::const_iterator i=shards.begin(), end=shards.end() ; i != end ; i++ ) {
- futures.push_back( Future::spawnCommand( i->getConnString() , dbName , cmdObj, options ) );
+ futures.push_back(Future::spawnCommand(i->getConnString().toString(),
+ dbName,
+ cmdObj,
+ options));
shardArray.append(i->getName());
}
@@ -1579,8 +1582,9 @@ namespace mongo {
shared_ptr<DBConfig> conf = status.getValue();
bool retval = passthrough( conf, cmdObj, result );
- Status storeCursorStatus = storePossibleCursor(conf->getPrimary().getConnString(),
- result.asTempObj());
+ Status storeCursorStatus =
+ storePossibleCursor(conf->getPrimary().getConnString().toString(),
+ result.asTempObj());
if (!storeCursorStatus.isOK()) {
return appendCommandStatus(result, storeCursorStatus);
}
@@ -1611,14 +1615,16 @@ namespace mongo {
auto conf = uassertStatusOK(grid.catalogCache()->getDatabase(dbName));
bool retval = passthrough( conf, cmdObj, result );
- Status storeCursorStatus = storePossibleCursor(conf->getPrimary().getConnString(),
- result.asTempObj());
+ Status storeCursorStatus =
+ storePossibleCursor(conf->getPrimary().getConnString().toString(),
+ result.asTempObj());
if (!storeCursorStatus.isOK()) {
return appendCommandStatus(result, storeCursorStatus);
}
return retval;
}
+
} cmdListIndexes;
class AvailableQueryOptions : public Command {
diff --git a/src/mongo/s/commands/run_on_all_shards_cmd.cpp b/src/mongo/s/commands/run_on_all_shards_cmd.cpp
index 5d36639064e..4fbcc568784 100644
--- a/src/mongo/s/commands/run_on_all_shards_cmd.cpp
+++ b/src/mongo/s/commands/run_on_all_shards_cmd.cpp
@@ -84,7 +84,7 @@ namespace mongo {
// TODO: Future is deprecated, replace with commandOp()
std::list< boost::shared_ptr<Future::CommandResult> > futures;
for (std::set<Shard>::const_iterator i=shards.begin(), end=shards.end() ; i != end ; i++) {
- futures.push_back( Future::spawnCommand( i->getConnString(),
+ futures.push_back( Future::spawnCommand( i->getConnString().toString(),
dbName,
cmdObj,
0,
diff --git a/src/mongo/s/config.cpp b/src/mongo/s/config.cpp
index f92d03a032b..e34f690e89a 100644
--- a/src/mongo/s/config.cpp
+++ b/src/mongo/s/config.cpp
@@ -656,11 +656,7 @@ namespace mongo {
const std::string& ConfigServer::modelServer() const {
uassert(10190, "ConfigServer not setup", _primary.ok());
- return _primary.getConnString();
- }
-
- ConnectionString ConfigServer::getConnectionString() const {
- return _primary.getAddress();
+ return _primary.getConnString().toString();
}
bool ConfigServer::init( const ConnectionString& configCS ) {
diff --git a/src/mongo/s/config.h b/src/mongo/s/config.h
index 3be7bc1bba7..a2fa72877a3 100644
--- a/src/mongo/s/config.h
+++ b/src/mongo/s/config.h
@@ -30,7 +30,6 @@
#include <boost/shared_ptr.hpp>
-#include "mongo/client/dbclient_rs.h"
#include "mongo/s/client/shard.h"
#include "mongo/s/shard_key_pattern.h"
#include "mongo/util/concurrency/mutex.h"
@@ -206,8 +205,6 @@ namespace mongo {
void reloadSettings();
- ConnectionString getConnectionString() const;
-
void replicaSetChange(const std::string& setName, const std::string& newConnectionString);
static int VERSION;
diff --git a/src/mongo/s/d_migrate.cpp b/src/mongo/s/d_migrate.cpp
index 7e11cf2287b..f3deb22ff7d 100644
--- a/src/mongo/s/d_migrate.cpp
+++ b/src/mongo/s/d_migrate.cpp
@@ -1304,7 +1304,7 @@ namespace mongo {
BSONObjBuilder recvChunkStartBuilder;
recvChunkStartBuilder.append("_recvChunkStart", ns);
- recvChunkStartBuilder.append("from", fromShard.getConnString());
+ recvChunkStartBuilder.append("from", fromShard.getConnString().toString());
recvChunkStartBuilder.append("fromShardName", fromShard.getName());
recvChunkStartBuilder.append("toShardName", toShard.getName());
recvChunkStartBuilder.append("min", min);
@@ -1369,7 +1369,7 @@ namespace mongo {
conn.done();
if ( res["ns"].str() != ns ||
- res["from"].str() != fromShard.getConnString() ||
+ res["from"].str() != fromShard.getConnString().toString() ||
!res["min"].isABSONObj() ||
res["min"].Obj().woCompare(min) != 0 ||
!res["max"].isABSONObj() ||
@@ -1490,9 +1490,9 @@ namespace mongo {
}
catch ( DBException& e ) {
errmsg = str::stream() << "moveChunk could not contact to: shard "
- << toShard.getConnString() << " to commit transfer"
- << causedBy( e );
- warning() << errmsg << endl;
+ << toShard.getConnString().toString()
+ << " to commit transfer" << causedBy(e);
+ warning() << errmsg;
ok = false;
}
diff --git a/src/mongo/s/d_state.cpp b/src/mongo/s/d_state.cpp
index 58f17718f8e..c41f4502877 100644
--- a/src/mongo/s/d_state.cpp
+++ b/src/mongo/s/d_state.cpp
@@ -94,7 +94,7 @@ namespace mongo {
boost::lock_guard<boost::mutex> lk(_mutex);
invariant(_enabled);
- return configServer.getConnectionString().toString();
+ return grid.catalogManager()->connectionString().toString();
}
void ShardingState::initialize(const string& server) {
@@ -796,7 +796,7 @@ namespace mongo {
return;
}
- builder.append("configServer", configServer.getConnectionString().toString());
+ builder.append("configServer", grid.catalogManager()->connectionString().toString());
builder.append("shardName", _shardName);
BSONObjBuilder versionB(builder.subobjStart("versions"));
diff --git a/src/mongo/s/dbclient_shard_resolver.cpp b/src/mongo/s/dbclient_shard_resolver.cpp
index cc8f89130a0..a1648e7cd5c 100644
--- a/src/mongo/s/dbclient_shard_resolver.cpp
+++ b/src/mongo/s/dbclient_shard_resolver.cpp
@@ -61,7 +61,7 @@ namespace mongo {
return Status( ErrorCodes::ShardNotFound,
string("unknown shard name ") + shardName );
}
- return findMaster(shard.getConnString(), shardHost);
+ return findMaster(shard.getConnString().toString(), shardHost);
}
Status DBClientShardResolver::findMaster( const std::string connString,
diff --git a/src/mongo/s/server.cpp b/src/mongo/s/server.cpp
index eb6ad02d06b..40c296da303 100644
--- a/src/mongo/s/server.cpp
+++ b/src/mongo/s/server.cpp
@@ -40,6 +40,7 @@
#include "mongo/base/initializer.h"
#include "mongo/base/status.h"
#include "mongo/client/connpool.h"
+#include "mongo/client/dbclient_rs.h"
#include "mongo/client/replica_set_monitor.h"
#include "mongo/config.h"
#include "mongo/db/audit.h"
diff --git a/src/mongo/s/strategy.cpp b/src/mongo/s/strategy.cpp
index 87eccbe14f9..6e6b129ca10 100644
--- a/src/mongo/s/strategy.cpp
+++ b/src/mongo/s/strategy.cpp
@@ -534,7 +534,7 @@ namespace mongo {
// Fill out the command result.
cmdResult->shardTarget = primaryShard;
cmdResult->result = shardResult;
- cmdResult->target = primaryShard.getAddress();
+ cmdResult->target = primaryShard.getConnString();
return Status::OK();
}
diff --git a/src/mongo/s/version_manager.cpp b/src/mongo/s/version_manager.cpp
index 87e9a1d93c6..9939d2ced18 100644
--- a/src/mongo/s/version_manager.cpp
+++ b/src/mongo/s/version_manager.cpp
@@ -36,6 +36,7 @@
#include <boost/shared_ptr.hpp>
+#include "mongo/client/dbclient_rs.h"
#include "mongo/db/namespace_string.h"
#include "mongo/s/catalog/catalog_cache.h"
#include "mongo/s/chunk_manager.h"
@@ -319,7 +320,7 @@ namespace mongo {
<< refVersion.toString()
<< " : " << refManager->getSequenceNumber() << ") "
<< "on shard " << shard.getName()
- << " (" << shard.getAddress().toString() << ")");
+ << " (" << shard.getConnString().toString() << ")");
throw SendStaleConfigException(ns,
msg,
diff --git a/src/mongo/util/exit_code.h b/src/mongo/util/exit_code.h
index bff937ba808..d6609849bcf 100644
--- a/src/mongo/util/exit_code.h
+++ b/src/mongo/util/exit_code.h
@@ -32,10 +32,7 @@
#pragma once
-#include <cstring>
-
namespace mongo {
- class OperationContext;
enum ExitCode {
EXIT_CLEAN = 0 ,
@@ -73,4 +70,5 @@ namespace mongo {
* proper NT Service shutdown.
*/
void signalShutdown();
+
} // namespace mongo