summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/mongo/client/connpool.cpp27
-rw-r--r--src/mongo/client/connpool.h28
-rw-r--r--src/mongo/client/dbclient_rs.cpp5
-rw-r--r--src/mongo/client/global_conn_pool.cpp4
-rw-r--r--src/mongo/client/global_conn_pool.h6
-rw-r--r--src/mongo/client/replica_set_monitor.cpp2
-rw-r--r--src/mongo/client/scoped_db_conn_test.cpp153
-rw-r--r--src/mongo/db/commands.cpp11
-rw-r--r--src/mongo/db/conn_pool_options.cpp5
-rw-r--r--src/mongo/s/catalog/catalog_manager.h6
-rw-r--r--src/mongo/s/catalog/legacy/catalog_manager_legacy.cpp4
-rw-r--r--src/mongo/s/catalog/legacy/catalog_manager_legacy.h2
-rw-r--r--src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp4
-rw-r--r--src/mongo/s/catalog/replset/catalog_manager_replica_set.h2
-rw-r--r--src/mongo/s/d_state.cpp6
-rw-r--r--src/mongo/s/server.cpp7
16 files changed, 137 insertions, 135 deletions
diff --git a/src/mongo/client/connpool.cpp b/src/mongo/client/connpool.cpp
index 457f4c0de5a..79f0aa9801c 100644
--- a/src/mongo/client/connpool.cpp
+++ b/src/mongo/client/connpool.cpp
@@ -467,6 +467,31 @@ namespace mongo {
// ------ ScopedDbConnection ------
+ ScopedDbConnection::ScopedDbConnection(const std::string& host, double socketTimeout)
+ : _host(host),
+ _conn(globalConnPool.get(host, socketTimeout)),
+ _socketTimeout(socketTimeout) {
+
+ _setSocketTimeout();
+ }
+
+ ScopedDbConnection::ScopedDbConnection(const ConnectionString& host, double socketTimeout)
+ : _host(host.toString()),
+ _conn(globalConnPool.get(host, socketTimeout)),
+ _socketTimeout(socketTimeout) {
+
+ _setSocketTimeout();
+ }
+
+ void ScopedDbConnection::done() {
+ if (!_conn) {
+ return;
+ }
+
+ globalConnPool.release(_host, _conn);
+ _conn = NULL;
+ }
+
void ScopedDbConnection::_setSocketTimeout(){
if( ! _conn ) return;
if( _conn->type() == ConnectionString::MASTER )
@@ -498,7 +523,7 @@ namespace mongo {
}
void ScopedDbConnection::clearPool() {
- pool.clear();
+ globalConnPool.clear();
}
AtomicInt32 AScopedConnection::_numConnections;
diff --git a/src/mongo/client/connpool.h b/src/mongo/client/connpool.h
index 5c9735af4a9..1c6728d3352 100644
--- a/src/mongo/client/connpool.h
+++ b/src/mongo/client/connpool.h
@@ -266,8 +266,6 @@ namespace mongo {
};
- extern DBConnectionPool pool;
-
class AScopedConnection : boost::noncopyable {
public:
AScopedConnection() { _numConnections.fetchAndAdd(1); }
@@ -300,13 +298,8 @@ namespace mongo {
/** the main constructor you want to use
throws UserException if can't connect
*/
- explicit ScopedDbConnection(const std::string& host, double socketTimeout = 0) : _host(host), _conn( pool.get(host, socketTimeout) ), _socketTimeout( socketTimeout ) {
- _setSocketTimeout();
- }
-
- explicit ScopedDbConnection(const ConnectionString& host, double socketTimeout = 0) : _host(host.toString()), _conn( pool.get(host, socketTimeout) ), _socketTimeout( socketTimeout ) {
- _setSocketTimeout();
- }
+ explicit ScopedDbConnection(const std::string& host, double socketTimeout = 0);
+ explicit ScopedDbConnection(const ConnectionString& host, double socketTimeout = 0);
ScopedDbConnection() : _host( "" ) , _conn(0), _socketTimeout( 0 ) {}
@@ -315,10 +308,10 @@ namespace mongo {
_setSocketTimeout();
}
- static void clearPool();
-
~ScopedDbConnection();
+ static void clearPool();
+
/** get the associated connection object */
DBClientBase* operator->() {
uassert( 11004 , "connection was returned to the pool already" , _conn );
@@ -355,18 +348,7 @@ namespace mongo {
we can't be sure we fully read all expected data of a reply on the socket. so
we don't try to reuse the connection in that situation.
*/
- void done() {
- if ( ! _conn )
- return;
-
- /* we could do this, but instead of assume one is using autoreconnect mode on the connection
- if ( _conn->isFailed() )
- kill();
- else
- */
- pool.release(_host, _conn);
- _conn = 0;
- }
+ void done();
private:
diff --git a/src/mongo/client/dbclient_rs.cpp b/src/mongo/client/dbclient_rs.cpp
index f6bdd726006..30c91049c3f 100644
--- a/src/mongo/client/dbclient_rs.cpp
+++ b/src/mongo/client/dbclient_rs.cpp
@@ -37,6 +37,7 @@
#include "mongo/bson/util/builder.h"
#include "mongo/client/connpool.h"
#include "mongo/client/dbclientcursor.h"
+#include "mongo/client/global_conn_pool.h"
#include "mongo/client/replica_set_monitor.h"
#include "mongo/client/sasl_client_authenticate.h"
#include "mongo/db/dbmessage.h"
@@ -702,7 +703,7 @@ namespace {
// callback. We should eventually not need this after we remove the
// callback.
DBClientConnection* newConn = dynamic_cast<DBClientConnection*>(
- pool.get(_lastSlaveOkHost.toString(), _so_timeout));
+ globalConnPool.get(_lastSlaveOkHost.toString(), _so_timeout));
// Assert here instead of returning NULL since the contract of this method is such
// that returning NULL means none of the nodes were good, which is not the case here.
@@ -1027,7 +1028,7 @@ namespace {
}
// If the connection was bad, the pool will clean it up.
- pool.release(_lastSlaveOkHost.toString(), _lastSlaveOkConn.release());
+ globalConnPool.release(_lastSlaveOkHost.toString(), _lastSlaveOkConn.release());
}
_lastSlaveOkHost = HostAndPort();
diff --git a/src/mongo/client/global_conn_pool.cpp b/src/mongo/client/global_conn_pool.cpp
index 86451da40ed..f63ff7012fb 100644
--- a/src/mongo/client/global_conn_pool.cpp
+++ b/src/mongo/client/global_conn_pool.cpp
@@ -30,11 +30,9 @@
#include "mongo/client/global_conn_pool.h"
-#include "mongo/client/connpool.h"
-
namespace mongo {
- DBConnectionPool pool;
+ DBConnectionPool globalConnPool;
ReplicaSetMonitorManager globalRSMonitorManager;
diff --git a/src/mongo/client/global_conn_pool.h b/src/mongo/client/global_conn_pool.h
index 68e72311758..b5c37d2a415 100644
--- a/src/mongo/client/global_conn_pool.h
+++ b/src/mongo/client/global_conn_pool.h
@@ -28,11 +28,17 @@
#pragma once
+#include "mongo/client/connpool.h"
#include "mongo/client/replica_set_monitor_manager.h"
namespace mongo {
/**
+ * Global connection pool (used by all references to the internal DB client).
+ */
+ extern DBConnectionPool globalConnPool;
+
+ /**
* Maintains the replica set monitors associated with the global connection pool.
*/
extern ReplicaSetMonitorManager globalRSMonitorManager;
diff --git a/src/mongo/client/replica_set_monitor.cpp b/src/mongo/client/replica_set_monitor.cpp
index 4d02a045f1f..6a6063c852e 100644
--- a/src/mongo/client/replica_set_monitor.cpp
+++ b/src/mongo/client/replica_set_monitor.cpp
@@ -347,7 +347,7 @@ namespace {
// Kill all pooled ReplicaSetConnections for this set. They will not function correctly
// after we kill the ReplicaSetMonitor.
- pool.removeHost(name);
+ globalConnPool.removeHost(name);
}
void ReplicaSetMonitor::setConfigChangeHook(ConfigChangeHook hook) {
diff --git a/src/mongo/client/scoped_db_conn_test.cpp b/src/mongo/client/scoped_db_conn_test.cpp
index ef68972dabc..1a39b1dbe69 100644
--- a/src/mongo/client/scoped_db_conn_test.cpp
+++ b/src/mongo/client/scoped_db_conn_test.cpp
@@ -27,9 +27,15 @@
#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kDefault
-#include "mongo/base/init.h"
+#include "mongo/platform/basic.h"
+
+#include <mutex>
+#include <vector>
+#include <string>
+#include <thread>
+
#include "mongo/client/connpool.h"
-#include "mongo/platform/cstdint.h"
+#include "mongo/client/global_conn_pool.h"
#include "mongo/util/net/listen.h"
#include "mongo/util/net/message_port.h"
#include "mongo/util/net/message_server.h"
@@ -40,9 +46,6 @@
#include "mongo/util/timer.h"
#include "mongo/unittest/unittest.h"
-#include <vector>
-#include <boost/thread/thread.hpp>
-
/**
* Tests for ScopedDbConnection, particularly in connection pool management.
* The tests also indirectly tests DBClientConnection's failure detection
@@ -50,40 +53,37 @@
* connection).
*/
-using std::unique_ptr;
-using mongo::DBClientBase;
-using mongo::FailPoint;
-using mongo::ScopedDbConnection;
-using std::string;
-using std::vector;
+namespace mongo {
+
+ using std::unique_ptr;
+ using std::string;
+ using std::vector;
+
+ class Client;
+ class OperationContext;
namespace {
- const string TARGET_HOST = "localhost:27017";
- const int TARGET_PORT = 27017;
- mongo::mutex shutDownMutex;
+ std::mutex shutDownMutex;
bool shuttingDown = false;
-}
-namespace mongo {
-
- class Client;
- class OperationContext;
+} // namespace
// Symbols defined to build the binary correctly.
-
bool inShutdown() {
- boost::lock_guard<boost::mutex> sl(shutDownMutex);
+ std::lock_guard<std::mutex> sl(shutDownMutex);
return shuttingDown;
}
- void signalShutdown() {}
+ void signalShutdown() { }
- DBClientBase* createDirectClient(OperationContext* txn) { return NULL; }
+ DBClientBase* createDirectClient(OperationContext* txn) {
+ return NULL;
+ }
- void dbexit(ExitCode rc, const char *why){
+ void dbexit(ExitCode rc, const char *why) {
{
- boost::lock_guard<boost::mutex> sl(shutDownMutex);
+ std::lock_guard<std::mutex> sl(shutDownMutex);
shuttingDown = true;
}
@@ -98,18 +98,22 @@ namespace mongo {
return false;
}
- class DummyMessageHandler: public MessageHandler {
+namespace {
+
+ const string TARGET_HOST = "localhost:27017";
+ const int TARGET_PORT = 27017;
+
+ class DummyMessageHandler final : public MessageHandler {
public:
virtual void connected(AbstractMessagingPort* p) {
+
}
virtual void process(Message& m, AbstractMessagingPort* por) {
+
}
- };
-}
-namespace mongo_test {
- mongo::DummyMessageHandler dummyHandler;
+ } dummyHandler;
// TODO: Take this out and make it as a reusable class in a header file. The only
// thing that is preventing this from happening is the dependency on the inShutdown
@@ -131,7 +135,8 @@ namespace mongo_test {
*
* @param port the port number to listen to.
*/
- DummyServer(int port): _port(port), _server(NULL) {
+ DummyServer(int port) : _port(port) {
+
}
~DummyServer() {
@@ -144,88 +149,88 @@ namespace mongo_test {
* @param messageHandler the message handler to use for this server. Ownership
* of this object is passed to this server.
*/
- void run(mongo::MessageHandler* messsageHandler) {
+ void run(MessageHandler* messsageHandler) {
if (_server != NULL) {
return;
}
- mongo::MessageServer::Options options;
+ MessageServer::Options options;
options.port = _port;
{
- boost::lock_guard<boost::mutex> sl(shutDownMutex);
+ std::lock_guard<std::mutex> sl(shutDownMutex);
shuttingDown = false;
}
- _server = mongo::createServer(options, messsageHandler);
- _serverThread = boost::thread(runServer, _server);
+ _server.reset(createServer(options, messsageHandler));
+ _serverThread = std::thread(runServer, _server.get());
}
/**
* Stops the server if it is running.
*/
void stop() {
- if (_server == NULL) {
+ if (!_server) {
return;
}
{
- boost::lock_guard<boost::mutex> sl(shutDownMutex);
+ std::lock_guard<std::mutex> sl(shutDownMutex);
shuttingDown = true;
}
- mongo::ListeningSockets::get()->closeAll();
+ ListeningSockets::get()->closeAll();
_serverThread.join();
- int connCount = mongo::Listener::globalTicketHolder.used();
+ int connCount = Listener::globalTicketHolder.used();
size_t iterCount = 0;
while (connCount > 0) {
if ((++iterCount % 20) == 0) {
- mongo::log() << "DummyServer: Waiting for " << connCount
+ log() << "DummyServer: Waiting for " << connCount
<< " connections to close." << std::endl;
}
- mongo::sleepmillis(500);
- connCount = mongo::Listener::globalTicketHolder.used();
+ sleepmillis(500);
+ connCount = Listener::globalTicketHolder.used();
}
- delete _server;
- _server = NULL;
+ _server.release();
}
/**
* Helper method for running the server on a separate thread.
*/
- static void runServer(mongo::MessageServer* server) {
+ static void runServer(MessageServer* server) {
server->setupSockets();
server->run();
}
private:
const int _port;
- boost::thread _serverThread;
- mongo::MessageServer* _server;
+
+ std::thread _serverThread;
+ unique_ptr<MessageServer> _server;
};
/**
* Warning: cannot run in parallel
*/
- class DummyServerFixture: public mongo::unittest::Test {
+ class DummyServerFixture: public unittest::Test {
public:
void setUp() {
- _maxPoolSizePerHost = mongo::pool.getMaxPoolSize();
+ _maxPoolSizePerHost = globalConnPool.getMaxPoolSize();
_dummyServer = new DummyServer(TARGET_PORT);
_dummyServer->run(&dummyHandler);
- mongo::DBClientConnection conn;
- mongo::Timer timer;
+ DBClientConnection conn;
+ Timer timer;
// Make sure the dummy server is up and running before proceeding
while (true) {
try {
conn.connect(TARGET_HOST);
break;
- } catch (const mongo::ConnectException&) {
+ } catch (const ConnectException&) {
if (timer.seconds() > 20) {
FAIL("Timed out connecting to dummy server");
}
@@ -237,7 +242,7 @@ namespace mongo_test {
ScopedDbConnection::clearPool();
delete _dummyServer;
- mongo::pool.setMaxPoolSize(_maxPoolSizePerHost);
+ globalConnPool.setMaxPoolSize(_maxPoolSizePerHost);
}
protected:
@@ -251,7 +256,7 @@ namespace mongo_test {
/**
* Tries to grab a series of connections from the pool, perform checks on
- * them, then put them back into the pool. After that, it checks these
+ * them, then put them back into the globalConnPool. After that, it checks these
* connections can be retrieved again from the pool.
*
* @param checkFunc method for comparing new connections and arg2.
@@ -267,7 +272,7 @@ namespace mongo_test {
newConnList.push_back(newConn);
}
- const uint64_t oldCreationTime = mongo::curTimeMicros64();
+ const uint64_t oldCreationTime = curTimeMicros64();
for (vector<ScopedDbConnection*>::iterator iter = newConnList.begin();
iter != newConnList.end(); ++iter) {
@@ -292,7 +297,7 @@ namespace mongo_test {
}
private:
- static void runServer(mongo::MessageServer* server) {
+ static void runServer(MessageServer* server) {
server->setupSockets();
server->run();
}
@@ -323,18 +328,18 @@ namespace mongo_test {
conn1.done();
conn3.done();
- const uint64_t badCreationTime = mongo::curTimeMicros64();
+ const uint64_t badCreationTime = curTimeMicros64();
- mongo::getGlobalFailPointRegistry()->getFailPoint("throwSockExcep")->
+ getGlobalFailPointRegistry()->getFailPoint("throwSockExcep")->
setMode(FailPoint::alwaysOn);
try {
- conn2->query("test.user", mongo::Query());
+ conn2->query("test.user", Query());
}
- catch (const mongo::SocketException&) {
+ catch (const SocketException&) {
}
- mongo::getGlobalFailPointRegistry()->getFailPoint("throwSockExcep")->
+ getGlobalFailPointRegistry()->getFailPoint("throwSockExcep")->
setMode(FailPoint::off);
conn2.done();
@@ -348,16 +353,16 @@ namespace mongo_test {
conn1.done();
- mongo::getGlobalFailPointRegistry()->getFailPoint("throwSockExcep")->
+ getGlobalFailPointRegistry()->getFailPoint("throwSockExcep")->
setMode(FailPoint::alwaysOn);
try {
- conn3->query("test.user", mongo::Query());
+ conn3->query("test.user", Query());
}
- catch (const mongo::SocketException&) {
+ catch (const SocketException&) {
}
- mongo::getGlobalFailPointRegistry()->getFailPoint("throwSockExcep")->
+ getGlobalFailPointRegistry()->getFailPoint("throwSockExcep")->
setMode(FailPoint::off);
const uint64_t badCreationTime = conn3->getSockCreationMicroSec();
@@ -369,7 +374,7 @@ namespace mongo_test {
}
TEST_F(DummyServerFixture, InvalidateBadConnEvenWhenPoolIsFull) {
- mongo::pool.setMaxPoolSize(2);
+ globalConnPool.setMaxPoolSize(2);
ScopedDbConnection conn1(TARGET_HOST);
ScopedDbConnection conn2(TARGET_HOST);
@@ -378,18 +383,18 @@ namespace mongo_test {
conn1.done();
conn3.done();
- const uint64_t badCreationTime = mongo::curTimeMicros64();
+ const uint64_t badCreationTime = curTimeMicros64();
- mongo::getGlobalFailPointRegistry()->getFailPoint("throwSockExcep")->
+ getGlobalFailPointRegistry()->getFailPoint("throwSockExcep")->
setMode(FailPoint::alwaysOn);
try {
- conn2->query("test.user", mongo::Query());
+ conn2->query("test.user", Query());
}
- catch (const mongo::SocketException&) {
+ catch (const SocketException&) {
}
- mongo::getGlobalFailPointRegistry()->getFailPoint("throwSockExcep")->
+ getGlobalFailPointRegistry()->getFailPoint("throwSockExcep")->
setMode(FailPoint::off);
conn2.done();
@@ -422,4 +427,6 @@ namespace mongo_test {
conn1Again.done();
}
-}
+
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/commands.cpp b/src/mongo/db/commands.cpp
index 493537f049e..e0a3c2aec17 100644
--- a/src/mongo/db/commands.cpp
+++ b/src/mongo/db/commands.cpp
@@ -41,6 +41,7 @@
#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/bson/mutable/document.h"
#include "mongo/client/connpool.h"
+#include "mongo/client/global_conn_pool.h"
#include "mongo/db/audit.h"
#include "mongo/db/auth/action_set.h"
#include "mongo/db/auth/action_type.h"
@@ -54,6 +55,7 @@
#include "mongo/db/server_parameters.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/rpc/metadata.h"
+#include "mongo/s/client/shard_connection.h"
#include "mongo/s/stale_exception.h"
#include "mongo/s/write_ops/wc_error_detail.h"
#include "mongo/util/log.h"
@@ -512,10 +514,6 @@ namespace {
}
}
- extern DBConnectionPool pool;
- // This is mainly used by the internal writes using write commands.
- extern DBConnectionPool shardConnectionPool;
-
class PoolFlushCmd : public Command {
public:
PoolFlushCmd() : Command( "connPoolSync" , false , "connpoolsync" ) {}
@@ -536,7 +534,7 @@ namespace {
std::string&,
mongo::BSONObjBuilder& result) {
shardConnectionPool.flush();
- pool.flush();
+ globalConnPool.flush();
return true;
}
virtual bool slaveOk() const {
@@ -563,7 +561,8 @@ namespace {
int,
std::string&,
mongo::BSONObjBuilder& result) {
- pool.appendInfo( result );
+
+ globalConnPool.appendInfo(result);
result.append( "numDBClientConnection" , DBClientConnection::getNumConnections() );
result.append( "numAScopedConnection" , AScopedConnection::getNumConnections() );
return true;
diff --git a/src/mongo/db/conn_pool_options.cpp b/src/mongo/db/conn_pool_options.cpp
index ae37e1a3155..5fd4c1ffb9d 100644
--- a/src/mongo/db/conn_pool_options.cpp
+++ b/src/mongo/db/conn_pool_options.cpp
@@ -32,6 +32,7 @@
#include "mongo/base/init.h"
#include "mongo/client/connpool.h"
+#include "mongo/client/global_conn_pool.h"
#include "mongo/db/server_parameters.h"
#include "mongo/s/client/shard_connection.h"
@@ -64,8 +65,8 @@ namespace mongo {
// - The connection hooks for sharding are added on startup (mongos) or on first sharded
// operation (mongod)
- pool.setName("connection pool");
- pool.setMaxPoolSize(ConnPoolOptions::maxConnsPerHost);
+ globalConnPool.setName("connection pool");
+ globalConnPool.setMaxPoolSize(ConnPoolOptions::maxConnsPerHost);
shardConnectionPool.setName("sharded connection pool");
shardConnectionPool.setMaxPoolSize(ConnPoolOptions::maxShardedConnsPerHost);
diff --git a/src/mongo/s/catalog/catalog_manager.h b/src/mongo/s/catalog/catalog_manager.h
index 8d2283666ce..6ff9b7e9b91 100644
--- a/src/mongo/s/catalog/catalog_manager.h
+++ b/src/mongo/s/catalog/catalog_manager.h
@@ -269,12 +269,6 @@ namespace mongo {
virtual bool isShardHost(const ConnectionString& shardConnectionString) = 0;
/**
- * Returns true if there are any shards in the sharded cluster.
- * Otherwise, returns false.
- */
- virtual bool doShardsExist() = 0;
-
- /**
* Runs a user management command on the config servers.
* @param commandName: name of command
* @param dbname: database for which the user management command is invoked
diff --git a/src/mongo/s/catalog/legacy/catalog_manager_legacy.cpp b/src/mongo/s/catalog/legacy/catalog_manager_legacy.cpp
index 5d92e23a7a4..27b4854403b 100644
--- a/src/mongo/s/catalog/legacy/catalog_manager_legacy.cpp
+++ b/src/mongo/s/catalog/legacy/catalog_manager_legacy.cpp
@@ -1345,10 +1345,6 @@ namespace {
return _getShardCount(BSON(ShardType::host(connectionString.toString())));
}
- bool CatalogManagerLegacy::doShardsExist() {
- return _getShardCount() > 0;
- }
-
bool CatalogManagerLegacy::runUserManagementWriteCommand(const string& commandName,
const string& dbname,
const BSONObj& cmdObj,
diff --git a/src/mongo/s/catalog/legacy/catalog_manager_legacy.h b/src/mongo/s/catalog/legacy/catalog_manager_legacy.h
index 1d405cd3676..471a7727a2d 100644
--- a/src/mongo/s/catalog/legacy/catalog_manager_legacy.h
+++ b/src/mongo/s/catalog/legacy/catalog_manager_legacy.h
@@ -105,8 +105,6 @@ namespace mongo {
bool isShardHost(const ConnectionString& shardConnectionString) override;
- bool doShardsExist() override;
-
/**
* Grabs a distributed lock and runs the command on all config servers.
*/
diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp
index 4bc67c3ce2f..e8ff8b6e4d0 100644
--- a/src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp
+++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp
@@ -225,10 +225,6 @@ namespace {
return false;
}
- bool CatalogManagerReplicaSet::doShardsExist() {
- return false;
- }
-
bool CatalogManagerReplicaSet::runUserManagementWriteCommand(const std::string& commandName,
const std::string& dbname,
const BSONObj& cmdObj,
diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set.h b/src/mongo/s/catalog/replset/catalog_manager_replica_set.h
index c85b661c317..ccaa16a0863 100644
--- a/src/mongo/s/catalog/replset/catalog_manager_replica_set.h
+++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set.h
@@ -118,8 +118,6 @@ namespace executor {
bool isShardHost(const ConnectionString& shardConnectionString) override;
- bool doShardsExist() override;
-
bool runUserManagementWriteCommand(const std::string& commandName,
const std::string& dbname,
const BSONObj& cmdObj,
diff --git a/src/mongo/s/d_state.cpp b/src/mongo/s/d_state.cpp
index f6f7af53a6c..44c42890e46 100644
--- a/src/mongo/s/d_state.cpp
+++ b/src/mongo/s/d_state.cpp
@@ -39,6 +39,7 @@
#include <vector>
#include "mongo/client/connpool.h"
+#include "mongo/client/global_conn_pool.h"
#include "mongo/client/remote_command_runner_impl.h"
#include "mongo/client/remote_command_targeter_factory_impl.h"
#include "mongo/db/auth/action_set.h"
@@ -892,8 +893,9 @@ namespace mongo {
boost::lock_guard<boost::mutex> lk(lock);
if (!done) {
log() << "first cluster operation detected, adding sharding hook to enable versioning "
- "and authentication to remote servers" << endl;
- pool.addHook(new ShardingConnectionHook(false));
+ "and authentication to remote servers";
+
+ globalConnPool.addHook(new ShardingConnectionHook(false));
shardConnectionPool.addHook(new ShardingConnectionHook(true));
done = true;
}
diff --git a/src/mongo/s/server.cpp b/src/mongo/s/server.cpp
index 855931afaf0..4c1b9d810fd 100644
--- a/src/mongo/s/server.cpp
+++ b/src/mongo/s/server.cpp
@@ -39,6 +39,7 @@
#include "mongo/base/status.h"
#include "mongo/client/connpool.h"
#include "mongo/client/dbclient_rs.h"
+#include "mongo/client/global_conn_pool.h"
#include "mongo/client/remote_command_runner_impl.h"
#include "mongo/client/remote_command_targeter_factory_impl.h"
#include "mongo/client/replica_set_monitor.h"
@@ -204,11 +205,9 @@ static ExitCode runMongosServer( bool doUpgrade ) {
setThreadName( "mongosMain" );
printShardingVersionInfo( false );
- // set some global state
-
// Add sharding hooks to both connection pools - ShardingConnectionHook includes auth hooks
- pool.addHook( new ShardingConnectionHook( false ) );
- shardConnectionPool.addHook( new ShardingConnectionHook( true ) );
+ globalConnPool.addHook(new ShardingConnectionHook(false));
+ shardConnectionPool.addHook(new ShardingConnectionHook(true));
// Mongos shouldn't lazily kill cursors, otherwise we can end up with extras from migration
DBClientConnection::setLazyKillCursor( false );