diff options
author | Andy Schwerin <schwerin@mongodb.com> | 2015-04-07 16:49:06 -0400 |
---|---|---|
committer | Andy Schwerin <schwerin@mongodb.com> | 2015-04-20 14:06:16 -0400 |
commit | 84c4b7d15c6b98c7bb648bc60ade93b6af62b129 (patch) | |
tree | 6a0afa97d6f911c07089ccdf7bc29aa2deaed314 | |
parent | 8e5b16fe0d64d587e0741dab7cabe64b0a818e51 (diff) | |
download | mongo-84c4b7d15c6b98c7bb648bc60ade93b6af62b129.tar.gz |
SERVER-17817 Make ServiceContext create and manage Client objects.
Also, deduplicate Client::* method implementations, guard the identity of the
current CurOp of a Client with the Client's _mutex instead of the mutex guarding
the list of all clients.
Makes the currentClient object private to client.cpp, and all access to the
thread-bound client is now done with haveClient() and cc() free functions in the
mongo namespace.
Removes the vesitgal Client::shutdown() methods.
47 files changed, 353 insertions, 298 deletions
diff --git a/src/mongo/SConscript b/src/mongo/SConscript index 4f446d4c1be..6b97e491766 100644 --- a/src/mongo/SConscript +++ b/src/mongo/SConscript @@ -681,10 +681,13 @@ env.Library('update_index_data', [ 'db/update_index_data.cpp' ], LIBDEPS=[ 'db/c env.Library( target='service_context', source=[ + 'db/client.cpp', + 'db/client_basic.cpp', 'db/service_context.cpp', 'db/service_context_noop.cpp', ], LIBDEPS=[ + 'lasterror', # TODO(schwerin): REMOVE! 'util/decorable', ]) @@ -717,7 +720,6 @@ serverOnlyFiles = [ "db/background.cpp", "db/catalog/index_catalog.cpp", "db/catalog/index_catalog_entry.cpp", "db/catalog/index_create.cpp", - "db/client.cpp", "db/db_raii.cpp", "db/clientcursor.cpp", "db/cloner.cpp", @@ -1118,7 +1120,6 @@ mongodOnlyFiles = [ "db/db.cpp", "db/mongod_options_init.cpp" ] env.Library( target='coreserver', source=[ - 'db/client_basic.cpp', 'db/conn_pool_options.cpp', 'db/log_process_details.cpp', 'db/stats/counters.cpp', diff --git a/src/mongo/client/scoped_db_conn_test.cpp b/src/mongo/client/scoped_db_conn_test.cpp index 9f19c3a7ce7..6adfca400cd 100644 --- a/src/mongo/client/scoped_db_conn_test.cpp +++ b/src/mongo/client/scoped_db_conn_test.cpp @@ -108,9 +108,6 @@ namespace mongo { LastError * le) { boost::this_thread::interruption_point(); } - - virtual void disconnected(AbstractMessagingPort* p) { - } }; } diff --git a/src/mongo/db/auth/auth_decorations.cpp b/src/mongo/db/auth/auth_decorations.cpp index 46a38c64ac1..48194435bbe 100644 --- a/src/mongo/db/auth/auth_decorations.cpp +++ b/src/mongo/db/auth/auth_decorations.cpp @@ -34,8 +34,9 @@ #include "mongo/db/auth/authentication_session.h" #include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/client_basic.h" +#include "mongo/db/client.h" #include "mongo/db/service_context.h" +#include "mongo/stdx/memory.h" #include "mongo/util/assert_util.h" namespace mongo { @@ -50,6 +51,17 @@ namespace { const auto getAuthorizationSession = ClientBasic::declareDecoration<std::unique_ptr<AuthorizationSession>>(); + class AuthzClientObserver final : public ServiceContext::ClientObserver { + public: + void onCreateClient(ServiceContext* service, Client* client) override { + AuthorizationSession::set( + client, + AuthorizationManager::get(service)->makeAuthorizationSession()); + } + + void onDestroyClient(ServiceContext* service, Client* client) override {} + }; + } // namespace void AuthenticationSession::set( @@ -79,6 +91,7 @@ namespace { invariant(authzManager); invariant(!manager); manager = std::move(authzManager); + service->registerClientObserver(stdx::make_unique<AuthzClientObserver>()); } AuthorizationSession* AuthorizationSession::get(ClientBasic* client) { diff --git a/src/mongo/db/catalog/database.cpp b/src/mongo/db/catalog/database.cpp index 46b58a16618..52902be6161 100644 --- a/src/mongo/db/catalog/database.cpp +++ b/src/mongo/db/catalog/database.cpp @@ -380,7 +380,7 @@ namespace mongo { BackgroundOperation::assertNoBgOpInProgForNs( fullns ); - audit::logDropCollection( currentClient.get(), fullns ); + audit::logDropCollection( &cc(), fullns ); Status s = collection->getIndexCatalog()->dropAllIndexes(txn, true); if ( !s.isOK() ) { @@ -450,7 +450,7 @@ namespace mongo { StringData toNS, bool stayTemp ) { - audit::logRenameCollection( currentClient.get(), fromNS, toNS ); + audit::logRenameCollection( &cc(), fromNS, toNS ); invariant(txn->lockState()->isDbLockedForMode(name(), MODE_X)); { // remove anything cached @@ -508,7 +508,7 @@ namespace mongo { NamespaceString nss( ns ); uassert( 17316, "cannot create a blank collection", nss.coll() > 0 ); - audit::logCreateCollection( currentClient.get(), ns ); + audit::logCreateCollection( &cc(), ns ); txn->recoveryUnit()->registerChange( new AddCollectionChange(this, ns) ); @@ -577,7 +577,7 @@ namespace mongo { BackgroundOperation::assertNoBgOpInProgForDb(name.c_str()); - audit::logDropDatabase( currentClient.get(), name ); + audit::logDropDatabase( &cc(), name ); dbHolder().close( txn, name ); db = NULL; // d is now deleted diff --git a/src/mongo/db/catalog/database_holder.cpp b/src/mongo/db/catalog/database_holder.cpp index e8bb0138d88..9b23d193557 100644 --- a/src/mongo/db/catalog/database_holder.cpp +++ b/src/mongo/db/catalog/database_holder.cpp @@ -129,7 +129,7 @@ namespace { invariant(entry); const bool exists = entry->exists(); if (!exists) { - audit::logCreateDatabase(currentClient.get(), dbname); + audit::logCreateDatabase(&cc(), dbname); } if (justCreated) { diff --git a/src/mongo/db/catalog/index_catalog.cpp b/src/mongo/db/catalog/index_catalog.cpp index 483d2f1ad2b..ba45d066805 100644 --- a/src/mongo/db/catalog/index_catalog.cpp +++ b/src/mongo/db/catalog/index_catalog.cpp @@ -864,7 +864,7 @@ namespace { // --------- START REAL WORK ---------- - audit::logDropIndex( currentClient.get(), indexName, _collection->ns().ns() ); + audit::logDropIndex( &cc(), indexName, _collection->ns().ns() ); invariant(_entries.release(entry->descriptor()) == entry); txn->recoveryUnit()->registerChange(new IndexRemoveChange(txn, _collection, diff --git a/src/mongo/db/client.cpp b/src/mongo/db/client.cpp index d471bcd5ae4..cc4a7c69cab 100644 --- a/src/mongo/db/client.cpp +++ b/src/mongo/db/client.cpp @@ -40,8 +40,6 @@ #include <vector> #include "mongo/base/status.h" -#include "mongo/db/auth/authorization_manager_global.h" -#include "mongo/db/auth/authorization_session.h" #include "mongo/db/lasterror.h" #include "mongo/db/service_context.h" #include "mongo/util/concurrency/thread_name.h" @@ -52,17 +50,29 @@ namespace mongo { using logger::LogComponent; - boost::mutex Client::clientsMutex; - ClientSet Client::clients; + TSP_DECLARE(ServiceContext::UniqueClient, currentClient) + TSP_DEFINE(ServiceContext::UniqueClient, currentClient) - TSP_DEFINE(Client, currentClient) + void Client::initThreadIfNotAlready(const char* desc) { + if (currentClient.getMake()->get()) + return; + initThread(desc); + } + + void Client::initThreadIfNotAlready() { + initThreadIfNotAlready(getThreadName().c_str()); + } + + void Client::initThread(const char *desc, AbstractMessagingPort *mp) { + initThread(desc, getGlobalServiceContext(), mp); + } /** * This must be called whenever a new thread is started, so that active threads can be tracked * so each thread has a Client object in TLS. */ - void Client::initThread(const char *desc, AbstractMessagingPort *mp) { - invariant(currentClient.get() == 0); + void Client::initThread(const char *desc, ServiceContext* service, AbstractMessagingPort *mp) { + invariant(currentClient.getMake()->get() == nullptr); std::string fullDesc; if (mp != NULL) { @@ -76,43 +86,16 @@ namespace mongo { mongo::lastError.initThread(); // Create the client obj, attach to thread - Client* client = new Client(fullDesc, getGlobalServiceContext(), mp); - AuthorizationSession::set(client, - getGlobalAuthorizationManager()->makeAuthorizationSession()); - - currentClient.reset(client); - - // This makes the client visible to maintenance threads - boost::lock_guard<boost::mutex> clientLock(clientsMutex); - clients.insert(client); + *currentClient.get() = service->makeClient(fullDesc, mp); } - Client::Client(const std::string& desc, + Client::Client(std::string desc, ServiceContext* serviceContext, AbstractMessagingPort *p) : ClientBasic(serviceContext, p), - _desc(desc), + _desc(std::move(desc)), _threadId(boost::this_thread::get_id()), - _connectionId(p ? p->connectionId() : 0), - _inDirectClient(false), - _txn(NULL) { - } - - Client::~Client() { - if ( ! inShutdown() ) { - // we can't clean up safely once we're in shutdown - { - boost::lock_guard<boost::mutex> clientLock(clientsMutex); - clients.erase(this); - } - } - } - - void Client::shutdown() { - if (!inShutdown()) { - boost::lock_guard<boost::mutex> clientLock(clientsMutex); - clients.erase(this); - } + _connectionId(p ? p->connectionId() : 0) { } void Client::reportState(BSONObjBuilder& builder) { @@ -152,12 +135,15 @@ namespace mongo { } ClientBasic* ClientBasic::getCurrent() { - return currentClient.get(); + return currentClient.getMake()->get(); } - void saveGLEStats(const BSONObj& result, const std::string& conn) { - // This can be called in mongod, which is unfortunate. To fix this, - // we can redesign how connection pooling works on mongod for sharded operations. + Client& cc() { + Client* c = currentClient.getMake()->get(); + verify(c); + return *c; } + bool haveClient() { return currentClient.getMake()->get(); } + } // namespace mongo diff --git a/src/mongo/db/client.h b/src/mongo/db/client.h index b01656d3407..f81bc91bf37 100644 --- a/src/mongo/db/client.h +++ b/src/mongo/db/client.h @@ -43,6 +43,7 @@ #include "mongo/db/lasterror.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/platform/unordered_set.h" #include "mongo/util/concurrency/spin_lock.h" #include "mongo/util/concurrency/threadlocal.h" @@ -52,21 +53,11 @@ namespace mongo { class Collection; class AbstractMessagingPort; - TSP_DECLARE(Client, currentClient) - typedef long long ConnectionId; - typedef unordered_set<Client*> ClientSet; - /** the database's concept of an outside "client" */ class Client : public ClientBasic { public: - // A set of currently active clients along with a mutex to protect the list - static boost::mutex clientsMutex; - static ClientSet clients; - - ~Client(); - /** each thread which does db operations has a Client object in TLS. * call this when your thread starts. */ @@ -79,25 +70,12 @@ namespace mongo { * Inits a thread if that thread has not already been init'd, setting the thread name to * "desc". */ - static void initThreadIfNotAlready(const char* desc) { - if (currentClient.get()) - return; - initThread(desc); - } + static void initThreadIfNotAlready(const char* desc); /** * Inits a thread if that thread has not already been init'd, using the existing thread name */ - static void initThreadIfNotAlready() { - if (currentClient.get()) - return; - initThread(getThreadName().c_str()); - } - - /** this has to be called as the client goes away, but before thread termination - * @return true if anything was done - */ - void shutdown(); + static void initThreadIfNotAlready(); std::string clientAddress(bool includePort = false) const; const std::string& desc() const { return _desc; } @@ -123,7 +101,8 @@ namespace mongo { bool isFromUserConnection() const { return _connectionId > 0; } private: - Client(const std::string& desc, + friend class ServiceContext; + Client(std::string desc, ServiceContext* serviceContext, AbstractMessagingPort *p = 0); @@ -141,19 +120,15 @@ namespace mongo { mutable SpinLock _lock; // Whether this client is running as DBDirectClient - bool _inDirectClient; + bool _inDirectClient = false; // If != NULL, then contains the currently active OperationContext - OperationContext* _txn; + OperationContext* _txn = nullptr; }; /** get the Client object for this thread. */ - inline Client& cc() { - Client * c = currentClient.get(); - verify( c ); - return *c; - } + Client& cc(); - inline bool haveClient() { return currentClient.get() != NULL; } + bool haveClient(); }; diff --git a/src/mongo/db/client_basic.h b/src/mongo/db/client_basic.h index 573923cc147..3fc6409c99d 100644 --- a/src/mongo/db/client_basic.h +++ b/src/mongo/db/client_basic.h @@ -50,8 +50,6 @@ namespace mongo { class ClientBasic : public Decorable<ClientBasic> { MONGO_DISALLOW_COPYING(ClientBasic); public: - virtual ~ClientBasic(); - bool getIsLocalHostConnection() { if (!hasRemote()) { return false; @@ -59,8 +57,8 @@ namespace mongo { return getRemote().isLocalHost(); } - virtual bool hasRemote() const { return _messagingPort; } - virtual HostAndPort getRemote() const { + bool hasRemote() const { return _messagingPort; } + HostAndPort getRemote() const { verify( _messagingPort ); return _messagingPort->remote(); } @@ -79,6 +77,7 @@ namespace mongo { protected: ClientBasic(ServiceContext* serviceContext, AbstractMessagingPort* messagingPort); + ~ClientBasic(); private: ServiceContext* const _serviceContext; diff --git a/src/mongo/db/clientcursor.cpp b/src/mongo/db/clientcursor.cpp index d0919fce1ab..b26fe10a583 100644 --- a/src/mongo/db/clientcursor.cpp +++ b/src/mongo/db/clientcursor.cpp @@ -290,7 +290,6 @@ namespace mongo { void run() { Client::initThread("clientcursormon"); - Client& client = cc(); Timer t; const int Secs = 4; while (!inShutdown()) { @@ -299,7 +298,6 @@ namespace mongo { CursorManager::timeoutCursorsGlobal(&txn, t.millisReset())); sleepsecs(Secs); } - client.shutdown(); } }; diff --git a/src/mongo/db/clientlistplugin.cpp b/src/mongo/db/clientlistplugin.cpp index ba2069d2b69..7f59234a27e 100644 --- a/src/mongo/db/clientlistplugin.cpp +++ b/src/mongo/db/clientlistplugin.cpp @@ -76,21 +76,19 @@ namespace { << "</tr>\n"; - _processAllClients(ss); + _processAllClients(txn->getClient()->getServiceContext(), ss); ss << "</table>\n"; } private: - static void _processAllClients(std::stringstream& ss) { + static void _processAllClients(ServiceContext* service, std::stringstream& ss) { using namespace html; - boost::lock_guard<boost::mutex> scopedLock(Client::clientsMutex); + for (ServiceContext::LockedClientsCursor cursor(service); + Client* client = cursor.next();) { - ClientSet::const_iterator it = Client::clients.begin(); - for (; it != Client::clients.end(); it++) { - Client* client = *it; invariant(client); // Make the client stable @@ -186,7 +184,9 @@ namespace { filter.reset( res.getValue() ); } - result.appendArray("operations", _processAllClients(filter.get())); + result.appendArray( + "operations", + _processAllClients(txn->getClient()->getServiceContext(), filter.get())); return true; } @@ -194,14 +194,12 @@ namespace { private: - static BSONArray _processAllClients(MatchExpression* matcher) { + static BSONArray _processAllClients(ServiceContext* service, MatchExpression* matcher) { BSONArrayBuilder array; - boost::lock_guard<boost::mutex> scopedLock(Client::clientsMutex); + for (ServiceContext::LockedClientsCursor cursor(service); + Client* client = cursor.next();) { - ClientSet::const_iterator it = Client::clients.begin(); - for (; it != Client::clients.end(); it++) { - Client* client = *it; invariant(client); BSONObjBuilder b; diff --git a/src/mongo/db/commands/current_op.cpp b/src/mongo/db/commands/current_op.cpp index f5306d9dbe0..0bb4576c954 100644 --- a/src/mongo/db/commands/current_op.cpp +++ b/src/mongo/db/commands/current_op.cpp @@ -101,11 +101,9 @@ namespace mongo { BSONArrayBuilder inprogBuilder(result.subarrayStart("inprog")); - boost::lock_guard<boost::mutex> scopedLock(Client::clientsMutex); + for (ServiceContext::LockedClientsCursor cursor(txn->getClient()->getServiceContext()); + Client* client = cursor.next();) { - ClientSet::const_iterator it = Client::clients.begin(); - for ( ; it != Client::clients.end(); it++) { - Client* client = *it; invariant(client); boost::unique_lock<Client> uniqueLock(*client); diff --git a/src/mongo/db/commands/fsync.cpp b/src/mongo/db/commands/fsync.cpp index fb4e1dd167b..90418dce52d 100644 --- a/src/mongo/db/commands/fsync.cpp +++ b/src/mongo/db/commands/fsync.cpp @@ -75,7 +75,6 @@ namespace mongo { catch ( std::exception& e ) { error() << "FSyncLockThread exception: " << e.what() << endl; } - cc().shutdown(); } }; diff --git a/src/mongo/db/curop.cpp b/src/mongo/db/curop.cpp index 83ad73d0c4c..46b4e336cbf 100644 --- a/src/mongo/db/curop.cpp +++ b/src/mongo/db/curop.cpp @@ -58,7 +58,7 @@ namespace mongo { class CurOp::ClientCuropStack { MONGO_DISALLOW_COPYING(ClientCuropStack); public: - ClientCuropStack() : _base(this) {} + ClientCuropStack() : _base(nullptr, this) {} /** * Returns the top of the CurOp stack. @@ -68,8 +68,19 @@ namespace mongo { /** * Adds "curOp" to the top of the CurOp stack for a client. Called by CurOp's constructor. */ - void push(CurOp* curOp) { - boost::lock_guard<boost::mutex> clientLock(Client::clientsMutex); + void push(Client* client, CurOp* curOp) { + invariant(client); + if (_client) { + invariant(_client == client); + } + else { + _client = client; + } + boost::lock_guard<Client> lk(*_client); + push_nolock(curOp); + } + + void push_nolock(CurOp* curOp) { invariant(!curOp->_parent); curOp->_parent = _top; _top = curOp; @@ -79,14 +90,31 @@ namespace mongo { * Pops the top off the CurOp stack for a Client. Called by CurOp's destructor. */ CurOp* pop() { - boost::lock_guard<boost::mutex> clientLock(Client::clientsMutex); + // It is not necessary to lock when popping the final item off of the curop stack. This + // is because the item at the base of the stack is owned by the stack itself, and is not + // popped until the stack is being destroyed. By the time the stack is being destroyed, + // no other threads can be observing the Client that owns the stack, because it has been + // removed from its ServiceContext's set of owned clients. Further, because the last + // item is popped in the destructor of the stack, and that destructor runs during + // destruction of the owning client, it is not safe to access other member variables of + // the client during the final pop. + const bool shouldLock = _top->_parent; + if (shouldLock) { + invariant(_client); + _client->lock(); + } invariant(_top); CurOp* retval = _top; _top = _top->_parent; + if (shouldLock) { + _client->unlock(); + } return retval; } private: + Client* _client = nullptr; + // Top of the stack of CurOps for a Client. CurOp* _top = nullptr; @@ -118,10 +146,15 @@ namespace mongo { CurOp* CurOp::get(const Client* client) { return _curopStack(client).top(); } CurOp* CurOp::get(const Client& client) { return _curopStack(client).top(); } - CurOp::CurOp(Client* client) : CurOp(&_curopStack(client)) {} + CurOp::CurOp(Client* client) : CurOp(client, &_curopStack(client)) {} - CurOp::CurOp(ClientCuropStack* stack) : _stack(stack) { - _stack->push(this); + CurOp::CurOp(Client* client, ClientCuropStack* stack) : _stack(stack) { + if (client) { + _stack->push(client, this); + } + else { + _stack->push_nolock(this); + } _start = 0; _active = false; _reset(); diff --git a/src/mongo/db/curop.h b/src/mongo/db/curop.h index c6cb57c8a17..634a897c16e 100644 --- a/src/mongo/db/curop.h +++ b/src/mongo/db/curop.h @@ -327,7 +327,7 @@ namespace mongo { static const Client::Decoration<ClientCuropStack> _curopStack; - explicit CurOp(ClientCuropStack*); + CurOp(Client*, ClientCuropStack*); void _reset(); diff --git a/src/mongo/db/curop_test.cpp b/src/mongo/db/curop_test.cpp index 26a4472078c..17ac612c3a3 100644 --- a/src/mongo/db/curop_test.cpp +++ b/src/mongo/db/curop_test.cpp @@ -31,9 +31,6 @@ #include <boost/thread/thread.hpp> #include "mongo/base/init.h" -#include "mongo/db/auth/authorization_manager.h" -#include "mongo/db/auth/authorization_manager_global.h" -#include "mongo/db/auth/authz_manager_external_state_mock.h" #include "mongo/db/client.h" #include "mongo/db/curop.h" #include "mongo/db/service_context.h" @@ -73,19 +70,14 @@ namespace mongo { while (Listener::getElapsedTimeMillis() == 0) { sleepmillis(10); } - - auto service = stdx::make_unique<ServiceContextNoop>(); - AuthorizationManager::set( - service.get(), - stdx::make_unique<AuthorizationManager>(new AuthzManagerExternalStateMock())); - setGlobalServiceContext(std::move(service)); - Client::initThread("CurOpTestMain"); return Status::OK(); } // Long operation + short timeout => time should expire. TEST(TimeHasExpired, PosSimple) { - CurOp curOp(&cc()); + auto service = stdx::make_unique<ServiceContextNoop>(); + auto client = service->makeClient("CurOpTest"); + CurOp curOp(client.get()); curOp.setMaxTimeMicros(intervalShort); curOp.ensureStarted(); sleepmicros(intervalLong); @@ -94,7 +86,9 @@ namespace mongo { // Short operation + long timeout => time should not expire. TEST(TimeHasExpired, NegSimple) { - CurOp curOp(&cc()); + auto service = stdx::make_unique<ServiceContextNoop>(); + auto client = service->makeClient("CurOpTest"); + CurOp curOp(client.get()); curOp.setMaxTimeMicros(intervalLong); curOp.ensureStarted(); sleepmicros(intervalShort); diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp index 734e71ae4b1..92bb4177c91 100644 --- a/src/mongo/db/db.cpp +++ b/src/mongo/db/db.cpp @@ -199,12 +199,6 @@ namespace mongo { break; } } - - virtual void disconnected( AbstractMessagingPort* p ) { - Client * c = currentClient.get(); - if( c ) c->shutdown(); - } - }; static void logStartup() { @@ -516,7 +510,6 @@ namespace mongo { if (storageGlobalParams.upgrade) { log() << "finished checking dbs" << endl; - cc().shutdown(); exitCleanly(EXIT_CLEAN); } diff --git a/src/mongo/db/dbcommands_generic.cpp b/src/mongo/db/dbcommands_generic.cpp index fa640d0fa47..c07dc30a1dc 100644 --- a/src/mongo/db/dbcommands_generic.cpp +++ b/src/mongo/db/dbcommands_generic.cpp @@ -288,12 +288,8 @@ namespace mongo { } ::abort(); } - Client * c = currentClient.get(); - if ( c ) { - c->shutdown(); - } - log() << "terminating, shutdown command received" << endl; + log() << "terminating, shutdown command received"; #if defined(_WIN32) // Signal the ServiceMain thread to shutdown. diff --git a/src/mongo/db/dbwebserver.cpp b/src/mongo/db/dbwebserver.cpp index ef14455bcfa..9a919b8967b 100644 --- a/src/mongo/db/dbwebserver.cpp +++ b/src/mongo/db/dbwebserver.cpp @@ -591,8 +591,6 @@ namespace { Client::initThread("websvr"); dbWebServer->initAndListen(); - - cc().shutdown(); } } // namespace mongo diff --git a/src/mongo/db/index_builder.cpp b/src/mongo/db/index_builder.cpp index 35ebcee4bb4..1d1914712ff 100644 --- a/src/mongo/db/index_builder.cpp +++ b/src/mongo/db/index_builder.cpp @@ -101,8 +101,6 @@ namespace { error() << "IndexBuilder could not build index: " << status.toString(); fassert(28555, ErrorCodes::isInterruption(status.code())); } - - txn.getClient()->shutdown(); } Status IndexBuilder::buildInForeground(OperationContext* txn, Database* db) const { diff --git a/src/mongo/db/instance.cpp b/src/mongo/db/instance.cpp index 4085ffbed55..80bf95116ce 100644 --- a/src/mongo/db/instance.cpp +++ b/src/mongo/db/instance.cpp @@ -1263,7 +1263,7 @@ namespace { } NOINLINE_DECL void dbexit( ExitCode rc, const char *why ) { - audit::logShutdown(currentClient.get()); + audit::logShutdown(&cc()); log(LogComponent::kControl) << "dbexit: " << why << " rc: " << rc; diff --git a/src/mongo/db/operation_context_impl.cpp b/src/mongo/db/operation_context_impl.cpp index ca9cfa3a61d..bd29ce23251 100644 --- a/src/mongo/db/operation_context_impl.cpp +++ b/src/mongo/db/operation_context_impl.cpp @@ -74,7 +74,7 @@ namespace { using std::string; OperationContextImpl::OperationContextImpl() - : _client(currentClient.get()), + : _client(&cc()), _locker(clientOperationInfoDecoration(_client).getLocker()), _writesAreReplicated(true) { diff --git a/src/mongo/db/range_deleter_db_env.cpp b/src/mongo/db/range_deleter_db_env.cpp index a717a2ba6fc..5756d97616a 100644 --- a/src/mongo/db/range_deleter_db_env.cpp +++ b/src/mongo/db/range_deleter_db_env.cpp @@ -50,8 +50,7 @@ namespace mongo { using std::string; void RangeDeleterDBEnv::initThread() { - if ( currentClient.get() == NULL ) - Client::initThread( "RangeDeleter" ); + Client::initThreadIfNotAlready("RangeDeleter"); } /** @@ -76,11 +75,7 @@ namespace mongo { const bool fromMigrate = taskDetails.options.fromMigrate; const bool onlyRemoveOrphans = taskDetails.options.onlyRemoveOrphanedDocs; - const bool initiallyHaveClient = haveClient(); - - if (!initiallyHaveClient) { - Client::initThread("RangeDeleter"); - } + Client::initThreadIfNotAlready("RangeDeleter"); *deletedDocs = 0; ShardForceVersionOkModeBlock forceVersion; @@ -119,10 +114,6 @@ namespace mongo { *errMsg = "collection or index dropped before data could be cleaned"; warning() << *errMsg << endl; - if (!initiallyHaveClient) { - txn->getClient()->shutdown(); - } - return false; } @@ -139,18 +130,10 @@ namespace mongo { << " -> " << exclusiveUpper << ", cause by:" << causedBy(ex); - if (!initiallyHaveClient) { - txn->getClient()->shutdown(); - } - return false; } } - if (!initiallyHaveClient) { - txn->getClient()->shutdown(); - } - return true; } diff --git a/src/mongo/db/repl/bgsync.cpp b/src/mongo/db/repl/bgsync.cpp index 270789226f9..b1343272d52 100644 --- a/src/mongo/db/repl/bgsync.cpp +++ b/src/mongo/db/repl/bgsync.cpp @@ -162,8 +162,6 @@ namespace { fassertFailed(28546); } } - - cc().shutdown(); } void BackgroundSync::_producerThread() { diff --git a/src/mongo/db/repl/master_slave.cpp b/src/mongo/db/repl/master_slave.cpp index 10497525c89..c8011d7aed1 100644 --- a/src/mongo/db/repl/master_slave.cpp +++ b/src/mongo/db/repl/master_slave.cpp @@ -1362,11 +1362,7 @@ namespace repl { int _dummy_z; void pretouchN(vector<BSONObj>& v, unsigned a, unsigned b) { - Client *c = currentClient.get(); - if( c == 0 ) { - Client::initThread("pretouchN"); - c = &cc(); - } + Client::initThreadIfNotAlready("pretouchN"); OperationContextImpl txn; // XXX ScopedTransaction transaction(&txn, MODE_S); diff --git a/src/mongo/db/repl/rs_sync.cpp b/src/mongo/db/repl/rs_sync.cpp index dbb914d6c6e..f13973c2532 100644 --- a/src/mongo/db/repl/rs_sync.cpp +++ b/src/mongo/db/repl/rs_sync.cpp @@ -142,7 +142,6 @@ namespace repl { sleepsecs(10); } } - cc().shutdown(); } } // namespace repl diff --git a/src/mongo/db/repl/sync_source_feedback.cpp b/src/mongo/db/repl/sync_source_feedback.cpp index 3b5f6ac389e..ae909cfd95b 100644 --- a/src/mongo/db/repl/sync_source_feedback.cpp +++ b/src/mongo/db/repl/sync_source_feedback.cpp @@ -208,7 +208,6 @@ namespace repl { _positionChanged = true; } } - cc().shutdown(); } } // namespace repl } // namespace mongo diff --git a/src/mongo/db/service_context.cpp b/src/mongo/db/service_context.cpp index 64e24d33a27..4dade021ccc 100644 --- a/src/mongo/db/service_context.cpp +++ b/src/mongo/db/service_context.cpp @@ -31,7 +31,9 @@ #include "mongo/db/service_context.h" #include "mongo/bson/bsonobj.h" +#include "mongo/db/client.h" #include "mongo/db/operation_context.h" +#include "mongo/stdx/memory.h" #include "mongo/util/assert_util.h" #include "mongo/util/mongoutils/str.h" @@ -107,4 +109,71 @@ namespace mongo { return Status::OK(); } + ServiceContext::~ServiceContext() { + boost::lock_guard<boost::mutex> lk(_mutex); + invariant(_clients.empty()); + } + + ServiceContext::UniqueClient ServiceContext::makeClient(std::string desc, + AbstractMessagingPort* p) { + std::unique_ptr<Client> client(new Client(std::move(desc), this, p)); + auto observer = _clientObservers.cbegin(); + try { + for (; observer != _clientObservers.cend(); ++observer) { + observer->get()->onCreateClient(this, client.get()); + } + } + catch (...) { + try { + while (observer != _clientObservers.cbegin()) { + --observer; + observer->get()->onDestroyClient(this, client.get()); + } + } + catch (...) { + std::terminate(); + } + throw; + } + { + boost::lock_guard<boost::mutex> lk(_mutex); + invariant(_clients.insert(client.get()).second); + } + return UniqueClient(client.release()); + } + + void ServiceContext::ClientDeleter::operator()(Client* client) const { + ServiceContext* const service = client->getServiceContext(); + { + boost::lock_guard<boost::mutex> lk(service->_mutex); + invariant(service->_clients.erase(client)); + } + try { + for (const auto& observer : service->_clientObservers) { + observer->onDestroyClient(service, client); + } + } + catch (...) { + std::terminate(); + } + delete client; + } + + void ServiceContext::registerClientObserver(std::unique_ptr<ClientObserver> observer) { + _clientObservers.push_back(std::move(observer)); + } + + ServiceContext::LockedClientsCursor::LockedClientsCursor(ServiceContext* service) + : _lock(service->_mutex), + _curr(service->_clients.cbegin()), + _end(service->_clients.cend()) {} + + Client* ServiceContext::LockedClientsCursor::next() { + if (_curr == _end) + return nullptr; + Client* result = *_curr; + ++_curr; + return result; + } + } // namespace mongo diff --git a/src/mongo/db/service_context.h b/src/mongo/db/service_context.h index 0e5bd6b413d..ba1476656f7 100644 --- a/src/mongo/db/service_context.h +++ b/src/mongo/db/service_context.h @@ -28,13 +28,19 @@ #pragma once +#include <memory> +#include <vector> + #include "mongo/base/disallow_copying.h" #include "mongo/db/storage/storage_engine.h" -#include "mongo/util/decorable.h" +#include "mongo/platform/unordered_set.h" #include "mongo/stdx/functional.h" +#include "mongo/util/decorable.h" namespace mongo { + class AbstractMessagingPort; + class Client; class OperationContext; class OpObserver; @@ -68,10 +74,103 @@ namespace mongo { StorageFactoriesIterator() { } }; + /** + * Class representing the context of a service, such as a MongoD database service or + * a MongoS routing service. + * + * A ServiceContext is the root of a hierarchy of contexts. A ServiceContext owns + * zero or more Clients, which in turn each own OperationContexts. + */ class ServiceContext : public Decorable<ServiceContext> { MONGO_DISALLOW_COPYING(ServiceContext); public: - virtual ~ServiceContext() { } + /** + * Special deleter used for cleaning up Client objects owned by a ServiceContext. + * See UniqueClient, below. + */ + class ClientDeleter { + public: + void operator()(Client* client) const; + }; + + /** + * Observer interface implemented to hook client creation and destruction. + */ + class ClientObserver { + public: + virtual ~ClientObserver() = default; + + /** + * Hook called after a new client "client" is created on "service" by + * service->makeClient(). + * + * For a given client and registered instance of ClientObserver, if onCreateClient + * returns without throwing an exception, onDestroyClient will be called when "client" + * is deleted. + */ + virtual void onCreateClient(ServiceContext* service, Client* client) = 0; + + /** + * Hook called on a "client" created by "service" before deleting "client". + * + * Like a destructor, must not throw exceptions. + */ + virtual void onDestroyClient(ServiceContext* service, Client* client) = 0; + }; + + using ClientSet = unordered_set<Client*>; + + /** + * Cursor for enumerating the live Client objects belonging to a ServiceContext. + * + * Lifetimes of this type are synchronized with client creation and destruction. + */ + class LockedClientsCursor { + public: + /** + * Constructs a cursor for enumerating the clients of "service", blocking "service" from + * creating or destroying Client objects until this instance is destroyed. + */ + explicit LockedClientsCursor(ServiceContext* service); + + /** + * Returns the next client in the enumeration, or nullptr if there are no more clients. + */ + Client* next(); + + private: + boost::unique_lock<boost::mutex> _lock; + ClientSet::const_iterator _curr; + ClientSet::const_iterator _end; + }; + + /** + * This is the unique handle type for Clients created by a ServiceContext. + */ + using UniqueClient = std::unique_ptr<Client, ClientDeleter>; + + virtual ~ServiceContext(); + + /** + * Registers an observer of lifecycle events on Clients created by this ServiceContext. + * + * See the ClientObserver type, above, for details. + * + * All calls to registerClientObserver must complete before ServiceContext + * is used in multi-threaded operation, or is used to create clients via calls + * to makeClient. + */ + void registerClientObserver(std::unique_ptr<ClientObserver> observer); + + /** + * Creates a new Client object representing a client session associated with this + * ServiceContext. + * + * The "desc" string is used to set a descriptive name for the client, used in logging. + * + * If supplied, "p" is the communication channel used for communicating with the client. + */ + UniqueClient makeClient(std::string desc, AbstractMessagingPort* p = nullptr); // // Storage @@ -174,7 +273,20 @@ namespace mongo { virtual OpObserver* getOpObserver() = 0; protected: - ServiceContext() { } + ServiceContext() = default; + + /** + * Mutex used to synchronize access to mutable state of this ServiceContext instance, + * including possibly by its subclasses. + */ + boost::mutex _mutex; + + private: + /** + * Vector of registered observers. + */ + std::vector<std::unique_ptr<ClientObserver>> _clientObservers; + ClientSet _clients; }; /** diff --git a/src/mongo/db/service_context_d.cpp b/src/mongo/db/service_context_d.cpp index 50a9b660394..d78cf0be802 100644 --- a/src/mongo/db/service_context_d.cpp +++ b/src/mongo/db/service_context_d.cpp @@ -28,6 +28,8 @@ #define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kDefault +#include "mongo/platform/basic.h" + #include "mongo/db/service_context_d.h" #include "mongo/base/init.h" @@ -169,7 +171,7 @@ namespace mongo { } void ServiceContextMongoD::setKillAllOperations() { - boost::lock_guard<boost::mutex> clientLock(Client::clientsMutex); + boost::lock_guard<boost::mutex> clientLock(_mutex); _globalKill = true; for (size_t i = 0; i < _killOpListeners.size(); i++) { try { @@ -210,13 +212,7 @@ namespace mongo { } bool ServiceContextMongoD::killOperation(unsigned int opId) { - boost::lock_guard<boost::mutex> clientLock(Client::clientsMutex); - - for(ClientSet::const_iterator j = Client::clients.begin(); - j != Client::clients.end(); ++j) { - - Client* client = *j; - + for (LockedClientsCursor cursor(this); Client* client = cursor.next();) { bool found = _killOperationsAssociatedWithClientAndOpId_inlock(client, opId); if (found) { return true; @@ -227,11 +223,7 @@ namespace mongo { } void ServiceContextMongoD::killAllUserOperations(const OperationContext* txn) { - boost::lock_guard<boost::mutex> scopedLock(Client::clientsMutex); - for (ClientSet::const_iterator i = Client::clients.begin(); - i != Client::clients.end(); i++) { - - Client* client = *i; + for (LockedClientsCursor cursor(this); Client* client = cursor.next();) { if (!client->isFromUserConnection()) { // Don't kill system operations. continue; @@ -256,7 +248,7 @@ namespace mongo { } void ServiceContextMongoD::registerKillOpListener(KillOpListenerInterface* listener) { - boost::lock_guard<boost::mutex> clientLock(Client::clientsMutex); + boost::lock_guard<boost::mutex> clientLock(_mutex); _killOpListeners.push_back(listener); } diff --git a/src/mongo/db/service_context_d.h b/src/mongo/db/service_context_d.h index cae5841ee7a..caa6ba17bdf 100644 --- a/src/mongo/db/service_context_d.h +++ b/src/mongo/db/service_context_d.h @@ -33,7 +33,6 @@ #include "mongo/db/service_context.h" #include "mongo/platform/unordered_set.h" -#include "mongo/util/concurrency/mutex.h" namespace mongo { @@ -85,7 +84,7 @@ namespace mongo { bool _globalKill; - // protected by Client::clientsMutex + // protected by parent class's _mutex std::vector<KillOpListenerInterface*> _killOpListeners; boost::scoped_ptr<StorageEngineLockFile> _lockFile; diff --git a/src/mongo/db/stats/lock_server_status_section.cpp b/src/mongo/db/stats/lock_server_status_section.cpp index 1e81904b158..7b1350af9c0 100644 --- a/src/mongo/db/stats/lock_server_status_section.cpp +++ b/src/mongo/db/stats/lock_server_status_section.cpp @@ -55,35 +55,28 @@ namespace { int numWaitingWrite = 0; // This returns the blocked lock states - { - boost::lock_guard<boost::mutex> scopedLock(Client::clientsMutex); - - // Count all clients - numTotal = Client::clients.size(); + for (ServiceContext::LockedClientsCursor cursor(txn->getClient()->getServiceContext()); + Client* client = cursor.next();) { - ClientSet::const_iterator it = Client::clients.begin(); - for (; it != Client::clients.end(); it++) { - Client* client = *it; - invariant(client); + invariant(client); + ++numTotal; + boost::unique_lock<Client> uniqueLock(*client); - boost::unique_lock<Client> uniqueLock(*client); + const OperationContext* opCtx = client->getOperationContext(); + if (opCtx == NULL) continue; - const OperationContext* opCtx = client->getOperationContext(); - if (opCtx == NULL) continue; + if (opCtx->lockState()->isWriteLocked()) { + numWriteLocked++; - if (opCtx->lockState()->isWriteLocked()) { - numWriteLocked++; - - if (opCtx->lockState()->getWaitingResource().isValid()) { - numWaitingWrite++; - } + if (opCtx->lockState()->getWaitingResource().isValid()) { + numWaitingWrite++; } - else if (opCtx->lockState()->isReadLocked()) { - numReadLocked++; + } + else if (opCtx->lockState()->isReadLocked()) { + numReadLocked++; - if (opCtx->lockState()->getWaitingResource().isValid()) { - numWaitingRead++; - } + if (opCtx->lockState()->getWaitingResource().isValid()) { + numWaitingRead++; } } } diff --git a/src/mongo/db/stats/snapshots.cpp b/src/mongo/db/stats/snapshots.cpp index addf361bcb6..2e2d2f9398f 100644 --- a/src/mongo/db/stats/snapshots.cpp +++ b/src/mongo/db/stats/snapshots.cpp @@ -109,8 +109,6 @@ namespace mongo { void SnapshotThread::run() { Client::initThread("snapshot"); - Client& client = cc(); - while ( ! inShutdown() ) { try { statsSnapshots.takeSnapshot(); @@ -121,8 +119,6 @@ namespace mongo { sleepsecs(4); } - - client.shutdown(); } Snapshots statsSnapshots; diff --git a/src/mongo/db/storage/mmap_v1/dur.cpp b/src/mongo/db/storage/mmap_v1/dur.cpp index 840a5410edd..bc6f4060a90 100644 --- a/src/mongo/db/storage/mmap_v1/dur.cpp +++ b/src/mongo/db/storage/mmap_v1/dur.cpp @@ -889,8 +889,6 @@ namespace { journalWriter.shutdown(); log() << "Durability thread stopped"; - - cc().shutdown(); } diff --git a/src/mongo/db/storage/mmap_v1/dur_journal_writer.cpp b/src/mongo/db/storage/mmap_v1/dur_journal_writer.cpp index 56a9d827671..121cbcf9d0e 100644 --- a/src/mongo/db/storage/mmap_v1/dur_journal_writer.cpp +++ b/src/mongo/db/storage/mmap_v1/dur_journal_writer.cpp @@ -279,8 +279,6 @@ namespace { } log() << "Journal writer thread stopped"; - - cc().shutdown(); } diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mongod.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mongod.cpp index a8681ff2b61..22829ecc6ac 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mongod.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mongod.cpp @@ -133,8 +133,6 @@ namespace mongo { } } - cc().shutdown(); - log() << "shutting down"; } diff --git a/src/mongo/dbtests/framework.cpp b/src/mongo/dbtests/framework.cpp index 98dca4eed2e..edae5e7744b 100644 --- a/src/mongo/dbtests/framework.cpp +++ b/src/mongo/dbtests/framework.cpp @@ -132,7 +132,6 @@ namespace mongo { frameworkGlobalParams.runsPerTest); - cc().shutdown(); exitCleanly( (ExitCode)ret ); // so everything shuts down cleanly return ret; } diff --git a/src/mongo/dbtests/perftests.cpp b/src/mongo/dbtests/perftests.cpp index 1da93f0e3d3..90c0d74c632 100644 --- a/src/mongo/dbtests/perftests.cpp +++ b/src/mongo/dbtests/perftests.cpp @@ -399,7 +399,6 @@ namespace PerfTests { if( stop ) break; } - cc().shutdown(); } unsigned long long launchThreads(int remaining) { diff --git a/src/mongo/dbtests/threadedtests.cpp b/src/mongo/dbtests/threadedtests.cpp index 4d959ace021..ae94c32fb1d 100644 --- a/src/mongo/dbtests/threadedtests.cpp +++ b/src/mongo/dbtests/threadedtests.cpp @@ -246,7 +246,6 @@ namespace ThreadedTests { } pm.hit(); } - cc().shutdown(); } virtual void validate() { @@ -568,8 +567,6 @@ namespace ThreadedTests { default: ASSERT(false); } - - cc().shutdown(); } }; @@ -736,7 +733,6 @@ namespace ThreadedTests { LOG(Z) << t.millis() << endl; ASSERT( t.millis() > 50 ); } - cc().shutdown(); } }; @@ -800,9 +796,6 @@ namespace ThreadedTests { mongo::unittest::log() << "checked in " << i << " times..." << endl; } - - cc().shutdown(); - } virtual void validate() { diff --git a/src/mongo/s/catalog/legacy/catalog_manager_legacy.cpp b/src/mongo/s/catalog/legacy/catalog_manager_legacy.cpp index b18c509dabe..9c134925961 100644 --- a/src/mongo/s/catalog/legacy/catalog_manager_legacy.cpp +++ b/src/mongo/s/catalog/legacy/catalog_manager_legacy.cpp @@ -912,7 +912,16 @@ namespace { const string changeID = changeIdBuilder.str(); - Client* const client = (opCtx ? opCtx->getClient() : currentClient.get()); + Client* client; + if (opCtx) { + client = opCtx->getClient(); + } + else if (haveClient()) { + client = &cc(); + } + else { + client = nullptr; + } // Send a copy of the message to the local log in case it doesn't manage to reach // config.changelog diff --git a/src/mongo/s/d_migrate.cpp b/src/mongo/s/d_migrate.cpp index 87126fc952b..bc594bcd2d7 100644 --- a/src/mongo/s/d_migrate.cpp +++ b/src/mongo/s/d_migrate.cpp @@ -2654,7 +2654,6 @@ namespace mongo { txn.getCurOp()->reset(); migrateStatus.go(&txn, ns, min, max, shardKeyPattern, fromShard, epoch, writeConcern); - cc().shutdown(); } /** diff --git a/src/mongo/s/d_state.cpp b/src/mongo/s/d_state.cpp index 88b2dfd0267..f4ada947354 100644 --- a/src/mongo/s/d_state.cpp +++ b/src/mongo/s/d_state.cpp @@ -1433,4 +1433,12 @@ namespace mongo { void usingAShardConnection( const string& addr ) { } + + void saveGLEStats(const BSONObj& result, const std::string& conn) { + // Declared in cluster_last_error_info.h. + // + // This can be called in mongod, which is unfortunate. To fix this, + // we can redesign how connection pooling works on mongod for sharded operations. + } + } diff --git a/src/mongo/s/s_only.cpp b/src/mongo/s/s_only.cpp index 11784b4e0a0..3329d708fe7 100644 --- a/src/mongo/s/s_only.cpp +++ b/src/mongo/s/s_only.cpp @@ -58,53 +58,6 @@ namespace mongo { ClusterLastErrorInfo::get(cc()).addShardHost(addr); } - TSP_DEFINE(Client,currentClient) - - Client::Client(const string& desc, ServiceContext* serviceContext, AbstractMessagingPort *p) : - ClientBasic(serviceContext, p), - _desc(desc), - _connectionId(), - _inDirectClient(false) { - } - Client::~Client() {} - void Client::shutdown() {} - - void Client::initThread(const char *desc, AbstractMessagingPort *mp) { - initThread(desc, getGlobalServiceContext(), mp); - } - - void Client::initThread(const char* desc, - ServiceContext* serviceContext, - AbstractMessagingPort *mp) { - - verify(currentClient.get() == 0); - - string fullDesc = desc; - if ( str::equals( "conn" , desc ) && mp != NULL ) - fullDesc = str::stream() << desc << mp->connectionId(); - - setThreadName( fullDesc.c_str() ); - - Client *c = new Client(fullDesc, serviceContext, mp); - currentClient.reset(c); - mongo::lastError.initThread(); - AuthorizationSession::set(c, getGlobalAuthorizationManager()->makeAuthorizationSession()); - } - - ClientBasic* ClientBasic::getCurrent() { - return currentClient.get(); - } - - string Client::clientAddress(bool includePort) const { - if (!hasRemote()) { - return ""; - } - if (includePort) { - return getRemote().toString(); - } - return getRemote().host(); - } - // Need a version that takes a Client to match the mongod interface so the web server can call // execCommand and not need to worry if it's in a mongod or mongos. void Command::execCommand(OperationContext* txn, diff --git a/src/mongo/s/server.cpp b/src/mongo/s/server.cpp index a9353f5b1a5..f2f21915b5b 100644 --- a/src/mongo/s/server.cpp +++ b/src/mongo/s/server.cpp @@ -184,10 +184,6 @@ namespace mongo { // Release connections back to pool, if any still cached ShardConnection::releaseMyConnections(); } - - virtual void disconnected( AbstractMessagingPort* p ) { - // all things are thread local - } }; void start( const MessageServer::Options& opts ) { diff --git a/src/mongo/shell/dbshell.cpp b/src/mongo/shell/dbshell.cpp index 3db06409eff..8abfcecbc9f 100644 --- a/src/mongo/shell/dbshell.cpp +++ b/src/mongo/shell/dbshell.cpp @@ -179,7 +179,6 @@ void killOps() { // Stubs for signal_handlers.cpp namespace mongo { - void Client::initThread(const char *desc, mongo::AbstractMessagingPort *mp) {} void logProcessDetailsForLogRotate() {} void exitCleanly(ExitCode code) { diff --git a/src/mongo/util/net/message_server.h b/src/mongo/util/net/message_server.h index 384a0fea5b7..66bdec58ad7 100644 --- a/src/mongo/util/net/message_server.h +++ b/src/mongo/util/net/message_server.h @@ -54,11 +54,6 @@ namespace mongo { * handler is responsible for responding to client */ virtual void process( Message& m , AbstractMessagingPort* p , LastError * err ) = 0; - - /** - * called once when a socket is disconnected - */ - virtual void disconnected( AbstractMessagingPort* p ) = 0; }; class MessageServer { diff --git a/src/mongo/util/net/message_server_port.cpp b/src/mongo/util/net/message_server_port.cpp index f84192f8cd8..8db034fbe6e 100644 --- a/src/mongo/util/net/message_server_port.cpp +++ b/src/mongo/util/net/message_server_port.cpp @@ -260,7 +260,6 @@ namespace { if (manager) manager->cleanupThreadLocals(); #endif - handler->disconnected(portWithHandler.get()); return NULL; } |