diff options
author | Kaloian Manassiev <kaloian.manassiev@mongodb.com> | 2014-06-13 15:35:51 -0400 |
---|---|---|
committer | Kaloian Manassiev <kaloian.manassiev@mongodb.com> | 2014-06-27 22:54:43 -0400 |
commit | 52edab726185cbba1401cb46de221fb3d1cb0408 (patch) | |
tree | 165f9e8fded43db1dac6ab9a1c46cc2367362e5b /src/mongo/db | |
parent | ecfc44d7bce08660804fa4475b45f9a09d203f09 (diff) | |
download | mongo-52edab726185cbba1401cb46de221fb3d1cb0408.tar.gz |
SERVER-13961 Add OperationContext argument to Client::Context
Time tracking and database access in Client::Context require access to the
OperationContext. Adding it as argument.
This is in preparation for removing LockState from Client.
Diffstat (limited to 'src/mongo/db')
37 files changed, 156 insertions, 138 deletions
diff --git a/src/mongo/db/catalog/collection_cursor_cache.cpp b/src/mongo/db/catalog/collection_cursor_cache.cpp index 2e941dd1f40..c8e8feae704 100644 --- a/src/mongo/db/catalog/collection_cursor_cache.cpp +++ b/src/mongo/db/catalog/collection_cursor_cache.cpp @@ -191,7 +191,7 @@ namespace mongo { Database* db = dbHolder().get(txn, ns); if ( !db ) return false; - Client::Context context( ns, db ); + Client::Context context(txn, ns, db ); Collection* collection = db->getCollection( txn, ns ); if ( !collection ) { if ( checkAuth ) @@ -221,7 +221,7 @@ namespace mongo { Database* db = dbHolder().get(txn, ns); if ( !db ) continue; - Client::Context context( ns, db ); + Client::Context context(txn, ns, db ); Collection* collection = db->getCollection( txn, ns ); if ( collection == NULL ) { continue; diff --git a/src/mongo/db/catalog/database.cpp b/src/mongo/db/catalog/database.cpp index affa5d64195..1238072ba59 100644 --- a/src/mongo/db/catalog/database.cpp +++ b/src/mongo/db/catalog/database.cpp @@ -513,7 +513,7 @@ namespace mongo { log() << "dropAllDatabasesExceptLocal " << n.size() << endl; for( vector<string>::iterator i = n.begin(); i != n.end(); i++ ) { if( *i != "local" ) { - Client::Context ctx(*i); + Client::Context ctx(txn, *i); dropDatabase(txn, ctx.db()); } } diff --git a/src/mongo/db/catalog/database_holder.cpp b/src/mongo/db/catalog/database_holder.cpp index e1f83b53e98..438be2fd6aa 100644 --- a/src/mongo/db/catalog/database_holder.cpp +++ b/src/mongo/db/catalog/database_holder.cpp @@ -141,8 +141,10 @@ namespace mongo { int nNotClosed = 0; for( set< string >::iterator i = dbs.begin(); i != dbs.end(); ++i ) { string name = *i; + LOG(2) << "DatabaseHolder::closeAll name:" << name; - Client::Context ctx( name ); + Client::Context ctx(txn, name); + if( !force && BackgroundOperation::inProgForDb(name) ) { log() << "WARNING: can't close database " << name diff --git a/src/mongo/db/catalog/index_catalog.cpp b/src/mongo/db/catalog/index_catalog.cpp index 309775aed85..8d70c6b123c 100644 --- a/src/mongo/db/catalog/index_catalog.cpp +++ b/src/mongo/db/catalog/index_catalog.cpp @@ -420,7 +420,8 @@ namespace mongo { return; } - Client::Context context( _collection->ns().ns(), + Client::Context context( _txn, + _collection->ns().ns(), _collection->_database ); // if we're here, the index build failed or was interrupted diff --git a/src/mongo/db/client.cpp b/src/mongo/db/client.cpp index fb87dc5b9a7..6cfefce96a6 100644 --- a/src/mongo/db/client.cpp +++ b/src/mongo/db/client.cpp @@ -54,7 +54,7 @@ #include "mongo/db/instance.h" #include "mongo/db/json.h" #include "mongo/db/jsobj.h" -#include "mongo/db/operation_context_impl.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/repl_coordinator_global.h" #include "mongo/db/repl/rs.h" #include "mongo/db/storage_options.h" @@ -162,23 +162,27 @@ namespace mongo { } BSONObj CachedBSONObjBase::_tooBig = fromjson("{\"$msg\":\"query not recording (too large)\"}"); - Client::Context::Context(const std::string& ns , Database * db) : - _client( currentClient.get() ), - _justCreated(false), - _doVersion( true ), - _ns( ns ), - _db(db) - { + + Client::Context::Context(OperationContext* txn, const std::string& ns, Database * db) + : _client( currentClient.get() ), + _justCreated(false), + _doVersion( true ), + _ns( ns ), + _db(db), + _txn(txn) { } - Client::Context::Context(const string& ns, bool doVersion) : - _client( currentClient.get() ), - _justCreated(false), // set for real in finishInit - _doVersion(doVersion), - _ns( ns ), - _db(0) - { + Client::Context::Context(OperationContext* txn, + const string& ns, + bool doVersion) + : _client( currentClient.get() ), + _justCreated(false), // set for real in finishInit + _doVersion(doVersion), + _ns( ns ), + _db(NULL), + _txn(txn) { + _finishInit(); } @@ -191,7 +195,7 @@ namespace mongo { _lk.reset(new Lock::DBRead(txn->lockState(), ns)); Database *db = dbHolder().get(txn, ns); if( db ) { - _c.reset(new Context(ns, db, doVersion)); + _c.reset(new Context(txn, ns, db, doVersion)); return; } } @@ -202,18 +206,18 @@ namespace mongo { if (txn->lockState()->isW()) { // write locked already DEV RARELY log() << "write locked on ReadContext construction " << ns << endl; - _c.reset(new Context(ns, doVersion)); + _c.reset(new Context(txn, ns, doVersion)); } else if (!txn->lockState()->isRecursive()) { _lk.reset(0); { Lock::GlobalWrite w(txn->lockState()); - Context c(ns, doVersion); + Context c(txn, ns, doVersion); } // db could be closed at this interim point -- that is ok, we will throw, and don't mind throwing. _lk.reset(new Lock::DBRead(txn->lockState(), ns)); - _c.reset(new Context(ns, doVersion)); + _c.reset(new Context(txn, ns, doVersion)); } else { uasserted(15928, str::stream() << "can't open a database from a nested read lock " << ns); @@ -228,7 +232,8 @@ namespace mongo { Client::WriteContext::WriteContext( OperationContext* opCtx, const std::string& ns, bool doVersion) : _lk(opCtx->lockState(), ns), - _c(ns, doVersion) { + _c(opCtx, ns, doVersion) { + } @@ -252,21 +257,24 @@ namespace mongo { } // invoked from ReadContext - Client::Context::Context(const string& ns, Database *db, bool doVersion) : - _client( currentClient.get() ), - _justCreated(false), - _doVersion( doVersion ), - _ns( ns ), - _db(db) - { + Client::Context::Context(OperationContext* txn, + const string& ns, + Database *db, + bool doVersion) + : _client( currentClient.get() ), + _justCreated(false), + _doVersion( doVersion ), + _ns( ns ), + _db(db), + _txn(txn) { + verify(_db); if (_doVersion) checkNotStale(); _client->_curOp->enter( this ); } void Client::Context::_finishInit() { - OperationContextImpl txn; // TODO get rid of this once reads require transactions - _db = dbHolder().getOrCreate(&txn, _ns, _justCreated); + _db = dbHolder().getOrCreate(_txn, _ns, _justCreated); invariant(_db); if( _doVersion ) checkNotStale(); @@ -276,7 +284,11 @@ namespace mongo { Client::Context::~Context() { DEV verify( _client == currentClient.get() ); - _client->_curOp->recordGlobalTime( _timer.micros() ); + + // Lock must still be held + invariant(_txn->lockState()->isLocked()); + + _client->_curOp->recordGlobalTime(_txn->lockState()->isWriteLocked(), _timer.micros()); } void Client::appendLastOp( BSONObjBuilder& b ) const { diff --git a/src/mongo/db/client.h b/src/mongo/db/client.h index 48a8d74dd18..cf5c9f38964 100644 --- a/src/mongo/db/client.h +++ b/src/mongo/db/client.h @@ -157,20 +157,20 @@ namespace mongo { /* Set database we want to use, then, restores when we finish (are out of scope) Note this is also helpful if an exception happens as the state if fixed up. */ - class Context : boost::noncopyable { + class Context { + MONGO_DISALLOW_COPYING(Context); public: /** this is probably what you want */ - Context(const std::string& ns, - bool doVersion = true); + Context(OperationContext* txn, const std::string& ns, bool doVersion = true); /** note: this does not call finishInit -- i.e., does not call shardVersionOk() for example. see also: reset(). */ - Context(const std::string& ns , Database * db); + Context(OperationContext* txn, const std::string& ns, Database * db); // used by ReadContext - Context(const std::string& ns, Database *db, bool doVersion ); + Context(OperationContext* txn, const std::string& ns, Database *db, bool doVersion); ~Context(); Client* getClient() const { return _client; } @@ -199,6 +199,7 @@ namespace mongo { bool _doVersion; const std::string _ns; Database * _db; + OperationContext* _txn; Timer _timer; }; // class Client::Context diff --git a/src/mongo/db/commands/apply_ops.cpp b/src/mongo/db/commands/apply_ops.cpp index 88c8c66f484..b3fa1b6ce2e 100644 --- a/src/mongo/db/commands/apply_ops.cpp +++ b/src/mongo/db/commands/apply_ops.cpp @@ -133,7 +133,7 @@ namespace mongo { Lock::DBWrite lk(txn->lockState(), ns); invariant(txn->lockState()->isRecursive()); - Client::Context ctx(ns); + Client::Context ctx(txn, ns); bool failed = repl::applyOperation_inlock(txn, ctx.db(), temp, diff --git a/src/mongo/db/commands/collection_to_capped.cpp b/src/mongo/db/commands/collection_to_capped.cpp index 7299f0abf29..476d80edf08 100644 --- a/src/mongo/db/commands/collection_to_capped.cpp +++ b/src/mongo/db/commands/collection_to_capped.cpp @@ -60,7 +60,7 @@ namespace mongo { // create new collection { - Client::Context ctx( toNs ); + Client::Context ctx(txn, toNs ); BSONObjBuilder spec; spec.appendBool( "capped", true ); spec.append( "size", size ); @@ -153,7 +153,7 @@ namespace mongo { } Lock::DBWrite dbXLock(txn->lockState(), dbname); - Client::Context ctx(dbname); + Client::Context ctx(txn, dbname); Status status = cloneCollectionAsCapped( txn, ctx.db(), from, to, size, temp, true ); return appendCommandStatus( result, status ); @@ -200,7 +200,7 @@ namespace mongo { // calls renamecollection which does a global lock, so we must too: // Lock::GlobalWrite globalWriteLock(txn->lockState()); - Client::Context ctx(dbname); + Client::Context ctx(txn, dbname); Database* db = ctx.db(); diff --git a/src/mongo/db/commands/compact.cpp b/src/mongo/db/commands/compact.cpp index e9d4ca106d8..c0df090d29a 100644 --- a/src/mongo/db/commands/compact.cpp +++ b/src/mongo/db/commands/compact.cpp @@ -142,7 +142,7 @@ namespace mongo { Lock::DBWrite lk(txn->lockState(), ns.ns()); BackgroundOperation::assertNoBgOpInProgForNs(ns.ns()); - Client::Context ctx(ns); + Client::Context ctx(txn, ns); Collection* collection = ctx.db()->getCollection(txn, ns.ns()); if( ! collection ) { diff --git a/src/mongo/db/commands/count.cpp b/src/mongo/db/commands/count.cpp index 48ebb4422e1..aa43882ea78 100644 --- a/src/mongo/db/commands/count.cpp +++ b/src/mongo/db/commands/count.cpp @@ -73,7 +73,7 @@ namespace mongo { string &err, int &errCode) { // Lock 'ns'. - Client::Context cx(ns); + Client::Context cx(txn, ns); Collection* collection = cx.db()->getCollection(txn, ns); if (NULL == collection) { diff --git a/src/mongo/db/commands/cpuprofile.cpp b/src/mongo/db/commands/cpuprofile.cpp index b6c6f923f83..de614b8a553 100644 --- a/src/mongo/db/commands/cpuprofile.cpp +++ b/src/mongo/db/commands/cpuprofile.cpp @@ -136,7 +136,7 @@ namespace mongo { BSONObjBuilder &result, bool fromRepl ) { Lock::DBWrite dbXLock(db); - Client::Context ctx(db); + Client::Context ctx(txn, db); std::string profileFilename = cmdObj[commandName]["profileFilename"].String(); if ( ! ::ProfilerStart( profileFilename.c_str() ) ) { @@ -154,7 +154,7 @@ namespace mongo { BSONObjBuilder &result, bool fromRepl ) { Lock::DBWrite dbXLock(db); - Client::Context ctx(db); + Client::Context ctx(txn, db); ::ProfilerStop(); return true; diff --git a/src/mongo/db/commands/drop_indexes.cpp b/src/mongo/db/commands/drop_indexes.cpp index c659454d684..d5491d166e7 100644 --- a/src/mongo/db/commands/drop_indexes.cpp +++ b/src/mongo/db/commands/drop_indexes.cpp @@ -110,7 +110,7 @@ namespace mongo { LOG(0) << "CMD: dropIndexes " << toDeleteNs << endl; } - Client::Context ctx(toDeleteNs); + Client::Context ctx(txn, toDeleteNs); Database* db = ctx.db(); Collection* collection = db->getCollection( txn, toDeleteNs ); @@ -222,7 +222,7 @@ namespace mongo { LOG(0) << "CMD: reIndex " << toDeleteNs << endl; Lock::DBWrite dbXLock(txn->lockState(), dbname); - Client::Context ctx(toDeleteNs); + Client::Context ctx(txn, toDeleteNs); Collection* collection = ctx.db()->getCollection( txn, toDeleteNs ); diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp index 7c3221cbbb6..3981705ed00 100644 --- a/src/mongo/db/commands/find_and_modify.cpp +++ b/src/mongo/db/commands/find_and_modify.cpp @@ -94,7 +94,7 @@ namespace mongo { } Lock::DBWrite dbXLock(txn->lockState(), dbname); - Client::Context ctx(ns); + Client::Context ctx(txn, ns); return runNoDirectClient( txn, ns , query , fields , update , @@ -134,7 +134,7 @@ namespace mongo { string& errmsg) { Lock::DBWrite lk(txn->lockState(), ns); - Client::Context cx( ns ); + Client::Context cx(txn, ns); Collection* collection = cx.db()->getCollection( txn, ns ); const WhereCallbackReal whereCallback = WhereCallbackReal(StringData(ns)); @@ -330,7 +330,7 @@ namespace mongo { } Lock::DBWrite dbXLock(txn->lockState(), dbname); - Client::Context ctx(ns); + Client::Context ctx(txn, ns); BSONObj out = db.findOne(ns, q, fields); if (out.isEmpty()) { diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp index 38dbceefe4d..b359a6fa3e4 100644 --- a/src/mongo/db/commands/mr.cpp +++ b/src/mongo/db/commands/mr.cpp @@ -588,7 +588,7 @@ namespace mongo { bool found; { - Client::Context tx( _config.outputOptions.finalNamespace ); + Client::Context tx(txn, _config.outputOptions.finalNamespace); Collection* coll = tx.db()->getCollection(_txn, _config.outputOptions.finalNamespace); found = Helpers::findOne(_txn, @@ -1290,7 +1290,7 @@ namespace mongo { // This context does no version check, safe b/c we checked earlier and have an // open cursor - scoped_ptr<Client::Context> ctx(new Client::Context(config.ns, false)); + scoped_ptr<Client::Context> ctx(new Client::Context(txn, config.ns, false)); const NamespaceString nss(config.ns); const WhereCallbackReal whereCallback(nss.db()); @@ -1349,7 +1349,8 @@ namespace mongo { lock.reset(); state.reduceAndSpillInMemoryStateIfNeeded(); lock.reset(new Lock::DBRead(txn->lockState(), config.ns)); - ctx.reset(new Client::Context(config.ns, false)); + + ctx.reset(new Client::Context(txn, config.ns, false)); reduceTime += t.micros(); diff --git a/src/mongo/db/commands/rename_collection.cpp b/src/mongo/db/commands/rename_collection.cpp index bae48fcc17f..a25b2e19a09 100644 --- a/src/mongo/db/commands/rename_collection.cpp +++ b/src/mongo/db/commands/rename_collection.cpp @@ -139,7 +139,7 @@ namespace mongo { std::vector<BSONObj> indexesInProg; { - Client::Context srcCtx( source ); + Client::Context srcCtx(txn, source); Collection* sourceColl = srcCtx.db()->getCollection( txn, source ); if ( !sourceColl ) { @@ -182,7 +182,7 @@ namespace mongo { } { - Client::Context ctx( target ); + Client::Context ctx(txn, target ); // Check if the target namespace exists and if dropTarget is true. // If target exists and dropTarget is not true, return false. @@ -245,7 +245,7 @@ namespace mongo { Collection* sourceColl = NULL; { - Client::Context srcCtx( source ); + Client::Context srcCtx(txn, source); sourceColl = srcCtx.db()->getCollection( txn, source ); sourceIt.reset( sourceColl->getIterator( DiskLoc(), false, CollectionScanParams::FORWARD ) ); } @@ -254,12 +254,12 @@ namespace mongo { while ( !sourceIt->isEOF() ) { BSONObj o; { - Client::Context srcCtx( source ); + Client::Context srcCtx(txn, source); o = sourceColl->docFor(sourceIt->getNext()); } // Insert and check return status of insert. { - Client::Context ctx( target ); + Client::Context ctx(txn, target ); if ( !targetColl ) targetColl = ctx.db()->getCollection( txn, target ); // No logOp necessary because the entire renameCollection command is one logOp. @@ -275,7 +275,7 @@ namespace mongo { // If inserts were unsuccessful, drop the target collection and return false. if ( !insertSuccessful ) { - Client::Context ctx( target ); + Client::Context ctx(txn, target ); Status s = ctx.db()->dropCollection( txn, target ); if ( !s.isOK() ) errmsg = s.toString(); @@ -287,7 +287,7 @@ namespace mongo { vector<BSONObj> copiedIndexes; bool indexSuccessful = true; { - Client::Context srcCtx( source ); + Client::Context srcCtx(txn, source); IndexCatalog::IndexIterator sourceIndIt = sourceColl->getIndexCatalog()->getIndexIterator( true ); @@ -313,7 +313,7 @@ namespace mongo { } { - Client::Context ctx( target ); + Client::Context ctx(txn, target ); if ( !targetColl ) targetColl = ctx.db()->getCollection( txn, target ); @@ -339,7 +339,7 @@ namespace mongo { // Drop the source collection. { - Client::Context srcCtx( source ); + Client::Context srcCtx(txn, source); Status s = srcCtx.db()->dropCollection( txn, source ); if ( !s.isOK() ) { errmsg = s.toString(); diff --git a/src/mongo/db/commands/test_commands.cpp b/src/mongo/db/commands/test_commands.cpp index 58d5e143a11..20175588afc 100644 --- a/src/mongo/db/commands/test_commands.cpp +++ b/src/mongo/db/commands/test_commands.cpp @@ -62,7 +62,7 @@ namespace mongo { BSONObj obj = cmdObj[ "obj" ].embeddedObjectUserCheck(); Lock::DBWrite lk(txn->lockState(), ns); - Client::Context ctx( ns ); + Client::Context ctx(txn, ns ); Database* db = ctx.db(); Collection* collection = db->getCollection( txn, ns ); if ( !collection ) { diff --git a/src/mongo/db/commands/write_commands/batch_executor.cpp b/src/mongo/db/commands/write_commands/batch_executor.cpp index 52acb7aae6f..39b5e7c0014 100644 --- a/src/mongo/db/commands/write_commands/batch_executor.cpp +++ b/src/mongo/db/commands/write_commands/batch_executor.cpp @@ -918,8 +918,9 @@ namespace mongo { if (!checkIndexConstraints(txn, &shardingState, *request, result)) { return false; } - _context.reset(new Client::Context(request->getNS(), - false /* don't check version */)); + + _context.reset(new Client::Context(txn, request->getNS(), false)); + Database* database = _context->db(); dassert(database); _collection = database->getCollection(txn, request->getTargetingNS()); @@ -1098,7 +1099,7 @@ namespace mongo { if (!checkShardVersion(txn, &shardingState, *updateItem.getRequest(), result)) return; - Client::Context ctx(nsString.ns(), false /* don't check version */); + Client::Context ctx(txn, nsString.ns(), false /* don't check version */); try { UpdateResult res = executor.execute(txn, ctx.db()); @@ -1159,8 +1160,7 @@ namespace mongo { // Context once we're locked, to set more details in currentOp() // TODO: better constructor? - Client::Context writeContext( nss.ns(), - false /* don't check version */); + Client::Context writeContext(txn, nss.ns(), false /* don't check version */); try { result->getStats().n = executor.execute(txn, writeContext.db()); diff --git a/src/mongo/db/curop.cpp b/src/mongo/db/curop.cpp index 178166402d3..2f968ca4979 100644 --- a/src/mongo/db/curop.cpp +++ b/src/mongo/db/curop.cpp @@ -158,12 +158,8 @@ namespace mongo { _dbprofile = std::max( context->_db ? context->_db->getProfilingLevel() : 0 , _dbprofile ); } - void CurOp::recordGlobalTime( long long micros ) const { - if ( _client ) { - const LockState& ls = _client->lockState(); - verify( ls.threadState() ); - Top::global.record( _ns , _op , ls.isWriteLocked() ? 1 : -1 , micros , _isCommand ); - } + void CurOp::recordGlobalTime(bool isWriteLocked, long long micros) const { + Top::global.record(_ns, _op, isWriteLocked ? 1 : -1, micros, _isCommand); } void CurOp::reportState(BSONObjBuilder* builder) { diff --git a/src/mongo/db/curop.h b/src/mongo/db/curop.h index f24da292c98..ee52d2682a9 100644 --- a/src/mongo/db/curop.h +++ b/src/mongo/db/curop.h @@ -305,7 +305,7 @@ namespace mongo { long long getExpectedLatencyMs() const { return _expectedLatencyMs; } void setExpectedLatencyMs( long long latency ) { _expectedLatencyMs = latency; } - void recordGlobalTime( long long micros ) const; + void recordGlobalTime(bool isWriteLocked, long long micros) const; const LockStat& lockStat() const { return _lockStat; } LockStat& lockStat() { return _lockStat; } diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp index 913419ef4f0..75242557c32 100644 --- a/src/mongo/db/db.cpp +++ b/src/mongo/db/db.cpp @@ -335,7 +335,7 @@ namespace mongo { string dbName = *i; LOG(1) << "\t" << dbName << endl; - Client::Context ctx( dbName ); + Client::Context ctx(&txn, dbName ); if (repl::replSettings.usingReplSets()) { // we only care about the _id index if we are in a replset diff --git a/src/mongo/db/dbcommands.cpp b/src/mongo/db/dbcommands.cpp index 4d7f8000f15..8dfc574c064 100644 --- a/src/mongo/db/dbcommands.cpp +++ b/src/mongo/db/dbcommands.cpp @@ -181,7 +181,7 @@ namespace mongo { // this is suboptimal but syncDataAndTruncateJournal is called from dropDatabase, // and that may need a global lock. Lock::GlobalWrite lk(txn->lockState()); - Client::Context context(dbname); + Client::Context context(txn, dbname); log() << "dropDatabase " << dbname << " starting" << endl; @@ -258,7 +258,7 @@ namespace mongo { // SERVER-4328 todo don't lock globally. currently syncDataAndTruncateJournal is being // called within, and that requires a global lock i believe. Lock::GlobalWrite lk(txn->lockState()); - Client::Context context( dbname ); + Client::Context context(txn, dbname ); log() << "repairDatabase " << dbname; std::vector<BSONObj> indexesInProg = stopIndexBuilds(txn, context.db(), cmdObj); @@ -331,7 +331,7 @@ namespace mongo { // in the local database. // Lock::DBWrite dbXLock(txn->lockState(), dbname); - Client::Context ctx(dbname); + Client::Context ctx(txn, dbname); BSONElement e = cmdObj.firstElement(); result.append("was", ctx.db()->getProfilingLevel()); @@ -381,7 +381,7 @@ namespace mongo { // locking, but originally the lock was set to be WRITE, so preserving the behaviour. // Lock::DBWrite dbXLock(txn->lockState(), dbname); - Client::Context ctx(dbname); + Client::Context ctx(txn, dbname); int was = _diaglog.setLevel( cmdObj.firstElement().numberInt() ); _diaglog.flush(); @@ -436,7 +436,7 @@ namespace mongo { } Lock::DBWrite dbXLock(txn->lockState(), dbname); - Client::Context ctx(nsToDrop); + Client::Context ctx(txn, nsToDrop); Database* db = ctx.db(); Collection* coll = db->getCollection( txn, nsToDrop ); @@ -534,7 +534,7 @@ namespace mongo { options.hasField("$nExtents")); Lock::DBWrite dbXLock(txn->lockState(), dbname); - Client::Context ctx(ns); + Client::Context ctx(txn, ns); // Create collection. return appendCommandStatus( result, @@ -648,7 +648,7 @@ namespace mongo { bool run(OperationContext* txn, const string& dbname , BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) { Lock::GlobalWrite globalWriteLock(txn->lockState()); - Client::Context ctx(dbname); + Client::Context ctx(txn, dbname); try { return dbHolder().closeAll(txn, result, false); @@ -1032,7 +1032,7 @@ namespace mongo { const string ns = dbname + "." + jsobj.firstElement().valuestr(); Lock::DBWrite dbXLock(txn->lockState(), dbname); - Client::Context ctx( ns ); + Client::Context ctx(txn, ns ); Collection* coll = ctx.db()->getCollection( txn, ns ); if ( !coll ) { diff --git a/src/mongo/db/dbeval.cpp b/src/mongo/db/dbeval.cpp index f2076e97178..e5bd01890e7 100644 --- a/src/mongo/db/dbeval.cpp +++ b/src/mongo/db/dbeval.cpp @@ -143,7 +143,7 @@ namespace mongo { } Lock::GlobalWrite lk(txn->lockState()); - Client::Context ctx( dbname ); + Client::Context ctx(txn, dbname ); return dbEval(dbname, cmdObj, result, errmsg); } diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp index abbeea15a86..0de77233f93 100644 --- a/src/mongo/db/dbhelpers.cpp +++ b/src/mongo/db/dbhelpers.cpp @@ -181,7 +181,7 @@ namespace mongo { Returns: true if object exists. */ bool Helpers::getSingleton(OperationContext* txn, const char *ns, BSONObj& result) { - Client::Context context(ns); + Client::Context context(txn, ns); auto_ptr<Runner> runner(InternalPlanner::collectionScan(ns, context.db()->getCollection(txn, ns))); @@ -191,7 +191,7 @@ namespace mongo { } bool Helpers::getLast(OperationContext* txn, const char *ns, BSONObj& result) { - Client::Context ctx(ns); + Client::Context ctx(txn, ns); Collection* coll = ctx.db()->getCollection( txn, ns ); auto_ptr<Runner> runner(InternalPlanner::collectionScan(ns, coll, @@ -209,7 +209,7 @@ namespace mongo { BSONObj id = e.wrap(); OpDebug debug; - Client::Context context(ns); + Client::Context context(txn, ns); const NamespaceString requestNs(ns); UpdateRequest request(requestNs); @@ -227,7 +227,7 @@ namespace mongo { void Helpers::putSingleton(OperationContext* txn, const char *ns, BSONObj obj) { OpDebug debug; - Client::Context context(ns); + Client::Context context(txn, ns); const NamespaceString requestNs(ns); UpdateRequest request(requestNs); @@ -245,7 +245,7 @@ namespace mongo { void Helpers::putSingletonGod(OperationContext* txn, const char *ns, BSONObj obj, bool logTheOp) { OpDebug debug; - Client::Context context(ns); + Client::Context context(txn, ns); const NamespaceString requestNs(ns); UpdateRequest request(requestNs); @@ -546,7 +546,7 @@ namespace mongo { void Helpers::emptyCollection(OperationContext* txn, const char *ns) { - Client::Context context(ns); + Client::Context context(txn, ns); deleteObjects(txn, context.db(), ns, BSONObj(), false); } diff --git a/src/mongo/db/instance.cpp b/src/mongo/db/instance.cpp index 745ab93bd52..3edb27e5e65 100644 --- a/src/mongo/db/instance.cpp +++ b/src/mongo/db/instance.cpp @@ -615,7 +615,7 @@ namespace mongo { if ( ! broadcast && handlePossibleShardedMessage( m , 0 ) ) return; - Client::Context ctx( ns ); + Client::Context ctx(txn, ns ); UpdateResult res = executor.execute(txn, ctx.db()); @@ -654,7 +654,7 @@ namespace mongo { if ( ! broadcast && handlePossibleShardedMessage( m , 0 ) ) return; - Client::Context ctx(ns); + Client::Context ctx(txn, ns); long long n = executor.execute(txn, ctx.db()); lastError.getSafe()->recordDelete( n ); @@ -895,7 +895,7 @@ namespace mongo { if ( handlePossibleShardedMessage( m , 0 ) ) return; - Client::Context ctx(ns); + Client::Context ctx(txn, ns); if (multi.size() > 1) { const bool keepGoing = d.reservedField() & InsertOption_ContinueOnError; diff --git a/src/mongo/db/introspect.cpp b/src/mongo/db/introspect.cpp index 667964aee1a..87b74e6201a 100644 --- a/src/mongo/db/introspect.cpp +++ b/src/mongo/db/introspect.cpp @@ -138,8 +138,7 @@ namespace { // we're sometimes inside the lock already Lock::DBWrite lk(txn->lockState(), currentOp.getNS() ); if (dbHolder().get(txn, nsToDatabase(currentOp.getNS())) != NULL) { - - Client::Context cx(currentOp.getNS(), false); + Client::Context cx(txn, currentOp.getNS(), false); _profile(txn, c, cx.db(), currentOp, profileBufBuilder); } else { diff --git a/src/mongo/db/operation_context_impl.h b/src/mongo/db/operation_context_impl.h index 3acadbca4a5..ed681f1940a 100644 --- a/src/mongo/db/operation_context_impl.h +++ b/src/mongo/db/operation_context_impl.h @@ -25,13 +25,13 @@ * exception statement from all source files in the program, then also delete * it in the license file. */ +#pragma once #include <boost/scoped_ptr.hpp> #include <string> #include "mongo/db/operation_context.h" -#pragma once namespace mongo { diff --git a/src/mongo/db/operation_context_noop.h b/src/mongo/db/operation_context_noop.h index 527348b1537..f3964494936 100644 --- a/src/mongo/db/operation_context_noop.h +++ b/src/mongo/db/operation_context_noop.h @@ -25,13 +25,13 @@ * exception statement from all source files in the program, then also delete * it in the license file. */ +#pragma once #include "mongo/db/operation_context.h" #include "mongo/db/client.h" #include "mongo/db/curop.h" - #include "mongo/db/storage/recovery_unit_noop.h" -#pragma once + namespace mongo { diff --git a/src/mongo/db/pipeline/document_source_cursor.cpp b/src/mongo/db/pipeline/document_source_cursor.cpp index 62b8b52645a..6c4d8c5d6c0 100644 --- a/src/mongo/db/pipeline/document_source_cursor.cpp +++ b/src/mongo/db/pipeline/document_source_cursor.cpp @@ -80,7 +80,7 @@ namespace mongo { // We have already validated the sharding version when we constructed the Runner // so we shouldn't check it again. Lock::DBRead lk(pExpCtx->opCtx->lockState(), _ns); - Client::Context ctx(_ns, /*doVersion=*/false); + Client::Context ctx(pExpCtx->opCtx, _ns, /*doVersion=*/false); _runner->restoreState(pExpCtx->opCtx); @@ -200,7 +200,8 @@ namespace { scoped_ptr<TypeExplain> plan; { Lock::DBRead lk(pExpCtx->opCtx->lockState(), _ns); - Client::Context ctx(_ns, /*doVersion=*/false); + Client::Context ctx(pExpCtx->opCtx, _ns, /*doVersion=*/ false); + massert(17392, "No _runner. Were we disposed before explained?", _runner); diff --git a/src/mongo/db/repl/master_slave.cpp b/src/mongo/db/repl/master_slave.cpp index 2b3eca3de42..bdbbc14cc41 100644 --- a/src/mongo/db/repl/master_slave.cpp +++ b/src/mongo/db/repl/master_slave.cpp @@ -196,9 +196,10 @@ namespace repl { LOG( 1 ) << "Saving repl source: " << o << endl; { - OpDebug debug; - Client::Context ctx("local.sources"); OperationContextImpl txn; + OpDebug debug; + + Client::Context ctx(&txn, "local.sources"); const NamespaceString requestNs("local.sources"); UpdateRequest request(requestNs); @@ -234,7 +235,7 @@ namespace repl { */ void ReplSource::loadAll(OperationContext* txn, SourceVector &v) { const char* localSources = "local.sources"; - Client::Context ctx(localSources); + Client::Context ctx(txn, localSources); SourceVector old = v; v.clear(); @@ -364,7 +365,7 @@ namespace repl { void ReplSource::resyncDrop( OperationContext* txn, const string& db ) { log() << "resync: dropping database " << db; - Client::Context ctx(db); + Client::Context ctx(txn, db); dropDatabase(txn, ctx.db()); } @@ -513,8 +514,9 @@ namespace repl { ___databaseIgnorer.doIgnoreUntilAfter( *i, lastTime ); incompleteCloneDbs.erase(*i); addDbNextPass.erase(*i); - Client::Context ctx(*i); - dropDatabase(txn, ctx.db() ); + + Client::Context ctx(txn, *i); + dropDatabase(txn, ctx.db()); } massert(14034, "Duplicate database names present after attempting to delete duplicates", @@ -626,11 +628,11 @@ namespace repl { if (!handleDuplicateDbName(txn, op, ns, clientName)) { return; } - + // This code executes on the slaves only, so it doesn't need to be sharding-aware since // mongos will not send requests there. That's why the last argument is false (do not do // version checking). - Client::Context ctx(ns, false); + Client::Context ctx(txn, ns, false); ctx.getClient()->curop()->reset(); bool empty = ctx.db()->getDatabaseCatalogEntry()->isEmpty(); @@ -661,7 +663,7 @@ namespace repl { log() << "An earlier initial clone of '" << clientName << "' did not complete, now resyncing." << endl; } save(); - Client::Context ctx(ns); + Client::Context ctx(txn, ns); nClonedThisPass++; resync(txn, ctx.db()->name()); addDbNextPass.erase(clientName); @@ -1285,7 +1287,7 @@ namespace repl { BSONObjBuilder b; b.append(_id); BSONObj result; - Client::Context ctx( ns ); + Client::Context ctx(&txn, ns); if( Helpers::findById(&txn, ctx.db(), ns, b.done(), result) ) _dummy_z += result.objsize(); // touch } diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp index 632986c9426..18754046e2f 100644 --- a/src/mongo/db/repl/oplog.cpp +++ b/src/mongo/db/repl/oplog.cpp @@ -123,7 +123,8 @@ namespace repl { { if ( localOplogRSCollection == 0 ) { - Client::Context ctx(rsoplog); + Client::Context ctx(&txn, rsoplog); + localDB = ctx.db(); verify( localDB ); localOplogRSCollection = localDB->getCollection( &txn, rsoplog ); @@ -131,7 +132,7 @@ namespace repl { "local.oplog.rs missing. did you drop it? if so restart server", localOplogRSCollection); } - Client::Context ctx(rsoplog, localDB); + Client::Context ctx(&txn, rsoplog, localDB); checkOplogInsert( localOplogRSCollection->insertDocument( &txn, op, false ) ); /* todo: now() has code to handle clock skew. but if the skew server to server is large it will get unhappy. @@ -283,14 +284,14 @@ namespace repl { DEV verify( logNS == 0 ); // check this was never a master/slave master if ( localOplogRSCollection == 0 ) { - Client::Context ctx(rsoplog); + Client::Context ctx(txn, rsoplog); localDB = ctx.db(); verify( localDB ); localOplogRSCollection = localDB->getCollection( txn, rsoplog ); massert(13347, "local.oplog.rs missing. did you drop it? if so restart server", localOplogRSCollection); } - Client::Context ctx(rsoplog, localDB); + Client::Context ctx(txn, rsoplog, localDB); OplogDocWriter writer( partial, obj ); checkOplogInsert( localOplogRSCollection->insertDocument( txn, &writer, false ) ); @@ -361,14 +362,14 @@ namespace repl { } if ( localOplogMainCollection == 0 ) { - Client::Context ctx(logNS); + Client::Context ctx(txn, logNS); localDB = ctx.db(); verify( localDB ); localOplogMainCollection = localDB->getCollection(txn, logNS); verify( localOplogMainCollection ); } - Client::Context ctx(logNS , localDB); + Client::Context ctx(txn, logNS , localDB); OplogDocWriter writer( partial, obj ); checkOplogInsert( localOplogMainCollection->insertDocument( txn, &writer, false ) ); @@ -454,7 +455,7 @@ namespace repl { if( rs ) ns = rsoplog; - Client::Context ctx(ns); + Client::Context ctx(&txn, ns); Collection* collection = ctx.db()->getCollection( &txn, ns ); if ( collection ) { diff --git a/src/mongo/db/repl/repl_set_impl.cpp b/src/mongo/db/repl/repl_set_impl.cpp index 5a9d393ae4e..030da5fc532 100644 --- a/src/mongo/db/repl/repl_set_impl.cpp +++ b/src/mongo/db/repl/repl_set_impl.cpp @@ -105,7 +105,7 @@ namespace { if (*it == "local") continue; - Client::Context ctx(*it); + Client::Context ctx(&txn, *it); ctx.db()->clearTmpCollections(&txn); } } diff --git a/src/mongo/db/repl/resync.cpp b/src/mongo/db/repl/resync.cpp index d1c693d4b92..ca16a00139a 100644 --- a/src/mongo/db/repl/resync.cpp +++ b/src/mongo/db/repl/resync.cpp @@ -68,7 +68,7 @@ namespace repl { const std::string ns = parseNs(dbname, cmdObj); Lock::GlobalWrite globalWriteLock(txn->lockState()); - Client::Context ctx(ns); + Client::Context ctx(txn, ns); if (replSettings.usingReplSets()) { if (!theReplSet) { errmsg = "no replication yet active"; diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp index 985d751aa8e..19a2b70fbf7 100644 --- a/src/mongo/db/repl/rs_rollback.cpp +++ b/src/mongo/db/repl/rs_rollback.cpp @@ -230,7 +230,7 @@ namespace repl { int getRBID(DBClientConnection*); static void syncRollbackFindCommonPoint(OperationContext* txn, DBClientConnection* them, FixUpInfo& fixUpInfo) { - Client::Context ctx(rsoplog); + Client::Context ctx(txn, rsoplog); boost::scoped_ptr<Runner> runner( InternalPlanner::collectionScan(rsoplog, @@ -499,13 +499,13 @@ namespace repl { for (set<string>::iterator it = fixUpInfo.toDrop.begin(); it != fixUpInfo.toDrop.end(); it++) { - Client::Context ctx(*it); + Client::Context ctx(txn, *it); log() << "replSet rollback drop: " << *it << rsLog; ctx.db()->dropCollection(txn, *it); } sethbmsg("rollback 4.7"); - Client::Context ctx(rsoplog); + Client::Context ctx(txn, rsoplog); Collection* oplogCollection = ctx.db()->getCollection(txn, rsoplog); uassert(13423, str::stream() << "replSet error in rollback can't find " << rsoplog, @@ -543,7 +543,7 @@ namespace repl { removeSaver.reset(new Helpers::RemoveSaver("rollback", "", doc.ns)); // todo: lots of overhead in context, this can be faster - Client::Context ctx(doc.ns); + Client::Context ctx(txn, doc.ns); // Add the doc to our rollback file BSONObj obj; diff --git a/src/mongo/db/repl/rs_sync.cpp b/src/mongo/db/repl/rs_sync.cpp index 41b7247bd52..b0a16002336 100644 --- a/src/mongo/db/repl/rs_sync.cpp +++ b/src/mongo/db/repl/rs_sync.cpp @@ -228,8 +228,9 @@ namespace repl { bool ReplSetImpl::resync(string& errmsg) { changeState(MemberState::RS_RECOVERING); - Client::Context ctx("local"); OperationContextImpl txn; + Client::Context ctx(&txn, "local"); + ctx.db()->dropCollection(&txn, "local.oplog.rs"); { boost::unique_lock<boost::mutex> lock(theReplSet->initialSyncMutex); diff --git a/src/mongo/db/repl/sync.cpp b/src/mongo/db/repl/sync.cpp index 153e7e048e2..c9f43ebf894 100644 --- a/src/mongo/db/repl/sync.cpp +++ b/src/mongo/db/repl/sync.cpp @@ -109,7 +109,7 @@ namespace repl { // should already have write lock const char *ns = o.getStringField("ns"); - Client::Context ctx(ns); + Client::Context ctx(txn, ns); // we don't have the object yet, which is possible on initial sync. get it. log() << "replication info adding missing object" << endl; // rare enough we can log diff --git a/src/mongo/db/repl/sync_tail.cpp b/src/mongo/db/repl/sync_tail.cpp index 9719dceeeee..ce4e3902a59 100644 --- a/src/mongo/db/repl/sync_tail.cpp +++ b/src/mongo/db/repl/sync_tail.cpp @@ -106,7 +106,7 @@ namespace repl { lk.reset(new Lock::DBWrite(txn->lockState(), ns)); } - Client::Context ctx(ns); + Client::Context ctx(txn, ns); ctx.getClient()->curop()->reset(); // For non-initial-sync, we convert updates to upserts // to suppress errors when replaying oplog entries. diff --git a/src/mongo/db/storage/mmap_v1/repair_database.cpp b/src/mongo/db/storage/mmap_v1/repair_database.cpp index f1061147a70..dc9e50f1f6c 100644 --- a/src/mongo/db/storage/mmap_v1/repair_database.cpp +++ b/src/mongo/db/storage/mmap_v1/repair_database.cpp @@ -247,7 +247,9 @@ namespace mongo { try { _txn->recoveryUnit()->syncDataAndTruncateJournal(); + globalStorageEngine->flushAllFiles(true); // need both in case journaling is disabled + MONGO_ASSERT_ON_EXCEPTION( boost::filesystem::remove_all( _path ) ); } catch ( DBException& e ) { @@ -335,7 +337,7 @@ namespace mongo { map<string,CollectionOptions> namespacesToCopy; { string ns = dbName + ".system.namespaces"; - Client::Context ctx( ns ); + Client::Context ctx(txn, ns ); Collection* coll = originalDatabase->getCollection( txn, ns ); if ( coll ) { scoped_ptr<RecordIterator> it( coll->getIterator( DiskLoc(), @@ -377,11 +379,11 @@ namespace mongo { Collection* tempCollection = NULL; { - Client::Context tempContext( ns, tempDatabase ); + Client::Context tempContext(txn, ns, tempDatabase ); tempCollection = tempDatabase->createCollection( txn, ns, options, true, false ); } - Client::Context readContext( ns, originalDatabase ); + Client::Context readContext(txn, ns, originalDatabase); Collection* originalCollection = originalDatabase->getCollection( txn, ns ); invariant( originalCollection ); @@ -397,7 +399,7 @@ namespace mongo { indexes.push_back( desc->infoObj() ); } - Client::Context tempContext( ns, tempDatabase ); + Client::Context tempContext(txn, ns, tempDatabase); Status status = indexBlock.init( indexes ); if ( !status.isOK() ) return status; @@ -413,7 +415,7 @@ namespace mongo { BSONObj doc = originalCollection->docFor( loc ); - Client::Context tempContext( ns, tempDatabase ); + Client::Context tempContext(txn, ns, tempDatabase); StatusWith<DiskLoc> result = tempCollection->insertDocument( txn, doc, indexBlock ); if ( !result.isOK() ) return result.getStatus(); @@ -423,7 +425,7 @@ namespace mongo { } { - Client::Context tempContext( ns, tempDatabase ); + Client::Context tempContext(txn, ns, tempDatabase); Status status = indexBlock.commit(); if ( !status.isOK() ) return status; @@ -435,7 +437,6 @@ namespace mongo { globalStorageEngine->flushAllFiles(true); // need both in case journaling is disabled txn->checkForInterrupt(false); - } // at this point if we abort, we don't want to delete new files @@ -444,7 +445,7 @@ namespace mongo { if ( repairFileDeleter.get() ) repairFileDeleter->success(); - Client::Context ctx( dbName ); + Client::Context ctx(txn, dbName); Database::closeDatabase(txn, dbName); if ( backupOriginalFiles ) { |