diff options
author | Kaloian Manassiev <kaloian.manassiev@mongodb.com> | 2015-06-14 13:13:04 -0400 |
---|---|---|
committer | Kaloian Manassiev <kaloian.manassiev@mongodb.com> | 2015-06-15 12:21:13 -0400 |
commit | ba6a3b8e5075f8dec1c8d3dadee6c91b7c7dc112 (patch) | |
tree | 496ca545c1a61266a0fe2e43061d79881ee30e08 /src/mongo/client/scoped_db_conn_test.cpp | |
parent | 80d45077ae98fa1961c5e9daa1c377c633ff177a (diff) | |
download | mongo-ba6a3b8e5075f8dec1c8d3dadee6c91b7c7dc112.tar.gz |
SERVER-18947 Remove unused catalog manager code
Diffstat (limited to 'src/mongo/client/scoped_db_conn_test.cpp')
-rw-r--r-- | src/mongo/client/scoped_db_conn_test.cpp | 153 |
1 files changed, 80 insertions, 73 deletions
diff --git a/src/mongo/client/scoped_db_conn_test.cpp b/src/mongo/client/scoped_db_conn_test.cpp index ef68972dabc..1a39b1dbe69 100644 --- a/src/mongo/client/scoped_db_conn_test.cpp +++ b/src/mongo/client/scoped_db_conn_test.cpp @@ -27,9 +27,15 @@ #define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kDefault -#include "mongo/base/init.h" +#include "mongo/platform/basic.h" + +#include <mutex> +#include <vector> +#include <string> +#include <thread> + #include "mongo/client/connpool.h" -#include "mongo/platform/cstdint.h" +#include "mongo/client/global_conn_pool.h" #include "mongo/util/net/listen.h" #include "mongo/util/net/message_port.h" #include "mongo/util/net/message_server.h" @@ -40,9 +46,6 @@ #include "mongo/util/timer.h" #include "mongo/unittest/unittest.h" -#include <vector> -#include <boost/thread/thread.hpp> - /** * Tests for ScopedDbConnection, particularly in connection pool management. * The tests also indirectly tests DBClientConnection's failure detection @@ -50,40 +53,37 @@ * connection). */ -using std::unique_ptr; -using mongo::DBClientBase; -using mongo::FailPoint; -using mongo::ScopedDbConnection; -using std::string; -using std::vector; +namespace mongo { + + using std::unique_ptr; + using std::string; + using std::vector; + + class Client; + class OperationContext; namespace { - const string TARGET_HOST = "localhost:27017"; - const int TARGET_PORT = 27017; - mongo::mutex shutDownMutex; + std::mutex shutDownMutex; bool shuttingDown = false; -} -namespace mongo { - - class Client; - class OperationContext; +} // namespace // Symbols defined to build the binary correctly. - bool inShutdown() { - boost::lock_guard<boost::mutex> sl(shutDownMutex); + std::lock_guard<std::mutex> sl(shutDownMutex); return shuttingDown; } - void signalShutdown() {} + void signalShutdown() { } - DBClientBase* createDirectClient(OperationContext* txn) { return NULL; } + DBClientBase* createDirectClient(OperationContext* txn) { + return NULL; + } - void dbexit(ExitCode rc, const char *why){ + void dbexit(ExitCode rc, const char *why) { { - boost::lock_guard<boost::mutex> sl(shutDownMutex); + std::lock_guard<std::mutex> sl(shutDownMutex); shuttingDown = true; } @@ -98,18 +98,22 @@ namespace mongo { return false; } - class DummyMessageHandler: public MessageHandler { +namespace { + + const string TARGET_HOST = "localhost:27017"; + const int TARGET_PORT = 27017; + + class DummyMessageHandler final : public MessageHandler { public: virtual void connected(AbstractMessagingPort* p) { + } virtual void process(Message& m, AbstractMessagingPort* por) { + } - }; -} -namespace mongo_test { - mongo::DummyMessageHandler dummyHandler; + } dummyHandler; // TODO: Take this out and make it as a reusable class in a header file. The only // thing that is preventing this from happening is the dependency on the inShutdown @@ -131,7 +135,8 @@ namespace mongo_test { * * @param port the port number to listen to. */ - DummyServer(int port): _port(port), _server(NULL) { + DummyServer(int port) : _port(port) { + } ~DummyServer() { @@ -144,88 +149,88 @@ namespace mongo_test { * @param messageHandler the message handler to use for this server. Ownership * of this object is passed to this server. */ - void run(mongo::MessageHandler* messsageHandler) { + void run(MessageHandler* messsageHandler) { if (_server != NULL) { return; } - mongo::MessageServer::Options options; + MessageServer::Options options; options.port = _port; { - boost::lock_guard<boost::mutex> sl(shutDownMutex); + std::lock_guard<std::mutex> sl(shutDownMutex); shuttingDown = false; } - _server = mongo::createServer(options, messsageHandler); - _serverThread = boost::thread(runServer, _server); + _server.reset(createServer(options, messsageHandler)); + _serverThread = std::thread(runServer, _server.get()); } /** * Stops the server if it is running. */ void stop() { - if (_server == NULL) { + if (!_server) { return; } { - boost::lock_guard<boost::mutex> sl(shutDownMutex); + std::lock_guard<std::mutex> sl(shutDownMutex); shuttingDown = true; } - mongo::ListeningSockets::get()->closeAll(); + ListeningSockets::get()->closeAll(); _serverThread.join(); - int connCount = mongo::Listener::globalTicketHolder.used(); + int connCount = Listener::globalTicketHolder.used(); size_t iterCount = 0; while (connCount > 0) { if ((++iterCount % 20) == 0) { - mongo::log() << "DummyServer: Waiting for " << connCount + log() << "DummyServer: Waiting for " << connCount << " connections to close." << std::endl; } - mongo::sleepmillis(500); - connCount = mongo::Listener::globalTicketHolder.used(); + sleepmillis(500); + connCount = Listener::globalTicketHolder.used(); } - delete _server; - _server = NULL; + _server.release(); } /** * Helper method for running the server on a separate thread. */ - static void runServer(mongo::MessageServer* server) { + static void runServer(MessageServer* server) { server->setupSockets(); server->run(); } private: const int _port; - boost::thread _serverThread; - mongo::MessageServer* _server; + + std::thread _serverThread; + unique_ptr<MessageServer> _server; }; /** * Warning: cannot run in parallel */ - class DummyServerFixture: public mongo::unittest::Test { + class DummyServerFixture: public unittest::Test { public: void setUp() { - _maxPoolSizePerHost = mongo::pool.getMaxPoolSize(); + _maxPoolSizePerHost = globalConnPool.getMaxPoolSize(); _dummyServer = new DummyServer(TARGET_PORT); _dummyServer->run(&dummyHandler); - mongo::DBClientConnection conn; - mongo::Timer timer; + DBClientConnection conn; + Timer timer; // Make sure the dummy server is up and running before proceeding while (true) { try { conn.connect(TARGET_HOST); break; - } catch (const mongo::ConnectException&) { + } catch (const ConnectException&) { if (timer.seconds() > 20) { FAIL("Timed out connecting to dummy server"); } @@ -237,7 +242,7 @@ namespace mongo_test { ScopedDbConnection::clearPool(); delete _dummyServer; - mongo::pool.setMaxPoolSize(_maxPoolSizePerHost); + globalConnPool.setMaxPoolSize(_maxPoolSizePerHost); } protected: @@ -251,7 +256,7 @@ namespace mongo_test { /** * Tries to grab a series of connections from the pool, perform checks on - * them, then put them back into the pool. After that, it checks these + * them, then put them back into the globalConnPool. After that, it checks these * connections can be retrieved again from the pool. * * @param checkFunc method for comparing new connections and arg2. @@ -267,7 +272,7 @@ namespace mongo_test { newConnList.push_back(newConn); } - const uint64_t oldCreationTime = mongo::curTimeMicros64(); + const uint64_t oldCreationTime = curTimeMicros64(); for (vector<ScopedDbConnection*>::iterator iter = newConnList.begin(); iter != newConnList.end(); ++iter) { @@ -292,7 +297,7 @@ namespace mongo_test { } private: - static void runServer(mongo::MessageServer* server) { + static void runServer(MessageServer* server) { server->setupSockets(); server->run(); } @@ -323,18 +328,18 @@ namespace mongo_test { conn1.done(); conn3.done(); - const uint64_t badCreationTime = mongo::curTimeMicros64(); + const uint64_t badCreationTime = curTimeMicros64(); - mongo::getGlobalFailPointRegistry()->getFailPoint("throwSockExcep")-> + getGlobalFailPointRegistry()->getFailPoint("throwSockExcep")-> setMode(FailPoint::alwaysOn); try { - conn2->query("test.user", mongo::Query()); + conn2->query("test.user", Query()); } - catch (const mongo::SocketException&) { + catch (const SocketException&) { } - mongo::getGlobalFailPointRegistry()->getFailPoint("throwSockExcep")-> + getGlobalFailPointRegistry()->getFailPoint("throwSockExcep")-> setMode(FailPoint::off); conn2.done(); @@ -348,16 +353,16 @@ namespace mongo_test { conn1.done(); - mongo::getGlobalFailPointRegistry()->getFailPoint("throwSockExcep")-> + getGlobalFailPointRegistry()->getFailPoint("throwSockExcep")-> setMode(FailPoint::alwaysOn); try { - conn3->query("test.user", mongo::Query()); + conn3->query("test.user", Query()); } - catch (const mongo::SocketException&) { + catch (const SocketException&) { } - mongo::getGlobalFailPointRegistry()->getFailPoint("throwSockExcep")-> + getGlobalFailPointRegistry()->getFailPoint("throwSockExcep")-> setMode(FailPoint::off); const uint64_t badCreationTime = conn3->getSockCreationMicroSec(); @@ -369,7 +374,7 @@ namespace mongo_test { } TEST_F(DummyServerFixture, InvalidateBadConnEvenWhenPoolIsFull) { - mongo::pool.setMaxPoolSize(2); + globalConnPool.setMaxPoolSize(2); ScopedDbConnection conn1(TARGET_HOST); ScopedDbConnection conn2(TARGET_HOST); @@ -378,18 +383,18 @@ namespace mongo_test { conn1.done(); conn3.done(); - const uint64_t badCreationTime = mongo::curTimeMicros64(); + const uint64_t badCreationTime = curTimeMicros64(); - mongo::getGlobalFailPointRegistry()->getFailPoint("throwSockExcep")-> + getGlobalFailPointRegistry()->getFailPoint("throwSockExcep")-> setMode(FailPoint::alwaysOn); try { - conn2->query("test.user", mongo::Query()); + conn2->query("test.user", Query()); } - catch (const mongo::SocketException&) { + catch (const SocketException&) { } - mongo::getGlobalFailPointRegistry()->getFailPoint("throwSockExcep")-> + getGlobalFailPointRegistry()->getFailPoint("throwSockExcep")-> setMode(FailPoint::off); conn2.done(); @@ -422,4 +427,6 @@ namespace mongo_test { conn1Again.done(); } -} + +} // namespace +} // namespace mongo |