diff options
55 files changed, 207 insertions, 212 deletions
diff --git a/jstests/core/stages_and_sorted.js b/jstests/core/stages_and_sorted.js index fd96ab24153..96fe82049c5 100644 --- a/jstests/core/stages_and_sorted.js +++ b/jstests/core/stages_and_sorted.js @@ -39,6 +39,7 @@ ixscan3 = {ixscan: {args:{name: "stages_and_sorted", keyPattern:{baz: 1}, // Intersect foo==1 with bar==1 with baz==12. andix1ix2 = {andSorted: {args: {nodes: [ixscan1, ixscan2, ixscan3]}}}; res = db.runCommand({stageDebug: andix1ix2}); +printjson(res); assert.eq(res.ok, 1); assert.eq(res.results.length, N); diff --git a/src/mongo/db/catalog/database.cpp b/src/mongo/db/catalog/database.cpp index ece0e04db27..19bb36e66b3 100644 --- a/src/mongo/db/catalog/database.cpp +++ b/src/mongo/db/catalog/database.cpp @@ -359,8 +359,6 @@ namespace mongo { return true; } - verify( cc().database() == this ); - if (!getOrCreateProfileCollection(this, true, &errmsg)) return false; diff --git a/src/mongo/db/client.h b/src/mongo/db/client.h index 8f3e5817c25..2ebfce97d2b 100644 --- a/src/mongo/db/client.h +++ b/src/mongo/db/client.h @@ -94,7 +94,6 @@ namespace mongo { string clientAddress(bool includePort=false) const; CurOp* curop() const { return _curOp; } Context* getContext() const { return _context; } - Database* database() const { return _context ? _context->db() : 0; } const StringData desc() const { return _desc; } void setLastOp( OpTime op ) { _lastOp = op; } OpTime getLastOp() const { return _lastOp; } diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp index 06a6a37a312..aa37056f340 100644 --- a/src/mongo/db/cloner.cpp +++ b/src/mongo/db/cloner.cpp @@ -84,7 +84,7 @@ namespace mongo { we need to fix up the value in the "ns" parameter so that the name prefix is correct on a copy to a new name. */ - BSONObj fixindex(BSONObj o) { + BSONObj fixindex(const string& newDbName, BSONObj o) { BSONObjBuilder b; BSONObjIterator i(o); while ( i.moreWithEOO() ) { @@ -101,7 +101,7 @@ namespace mongo { uassert( 10024 , "bad ns field for index during dbcopy", e.type() == String); const char *p = strchr(e.valuestr(), '.'); uassert( 10025 , "bad ns field for index during dbcopy [2]", p); - string newname = cc().database()->name() + p; + string newname = newDbName + p; b.append("ns", newname); } else @@ -174,7 +174,7 @@ namespace mongo { BSONObj js = tmp; if ( isindex ) { verify(nsToCollectionSubstring(from_collection) == "system.indexes"); - js = fixindex(tmp); + js = fixindex(context.db()->name(), tmp); indexesToBuild->push_back( js.getOwned() ); continue; } @@ -315,7 +315,7 @@ namespace mongo { string temp = ctx.ctx().db()->name() + ".system.namespaces"; BSONObj config = _conn->findOne(temp , BSON("name" << ns)); if (config["options"].isABSONObj()) { - Status status = userCreateNS(ns.c_str(), config["options"].Obj(), logForRepl, 0); + Status status = userCreateNS(ctx.ctx().db(), ns, config["options"].Obj(), logForRepl, 0); if ( !status.isOK() ) { errmsg = status.toString(); return false; @@ -351,13 +351,13 @@ namespace mongo { } massert( 10289 , "useReplAuth is not written to replication log", !opts.useReplAuth || !opts.logForRepl ); - string todb = cc().database()->name(); + string todb = context.db()->name(); stringstream a,b; a << "localhost:" << serverGlobalParams.port; b << "127.0.0.1:" << serverGlobalParams.port; bool masterSameProcess = ( a.str() == masterHost || b.str() == masterHost ); if ( masterSameProcess ) { - if (opts.fromDB == todb && cc().database()->path() == storageGlobalParams.dbpath) { + if (opts.fromDB == todb && context.db()->path() == storageGlobalParams.dbpath) { // guard against an "infinite" loop /* if you are replicating, the local.sources config may be wrong if you get this */ errmsg = "can't clone from self (localhost)."; @@ -469,7 +469,7 @@ namespace mongo { { /* we defer building id index for performance - building it in batch is much faster */ - userCreateNS(to_name, options, opts.logForRepl, false); + userCreateNS(context.db(), to_name, options, opts.logForRepl, false); } LOG(1) << "\t\t cloning " << from_name << " -> " << to_name << endl; Query q; @@ -485,7 +485,7 @@ namespace mongo { bool old = inDBRepair; try { inDBRepair = true; - Collection* c = cc().database()->getCollection( to_name ); + Collection* c = context.db()->getCollection( to_name ); if ( c ) c->getIndexCatalog()->ensureHaveIdIndex(); inDBRepair = old; diff --git a/src/mongo/db/commands/apply_ops.cpp b/src/mongo/db/commands/apply_ops.cpp index 9e2a18a1bf8..828fb61aa69 100644 --- a/src/mongo/db/commands/apply_ops.cpp +++ b/src/mongo/db/commands/apply_ops.cpp @@ -126,7 +126,7 @@ namespace mongo { invariant(Lock::nested()); Client::Context ctx(ns); - bool failed = applyOperation_inlock(temp, false, alwaysUpsert); + bool failed = applyOperation_inlock(ctx.db(), temp, false, alwaysUpsert); ab.append(!failed); if ( failed ) errors++; diff --git a/src/mongo/db/commands/collection_to_capped.cpp b/src/mongo/db/commands/collection_to_capped.cpp index d7304afc31d..6e9c1f64056 100644 --- a/src/mongo/db/commands/collection_to_capped.cpp +++ b/src/mongo/db/commands/collection_to_capped.cpp @@ -66,7 +66,7 @@ namespace mongo { if ( temp ) spec.appendBool( "temp", true ); - Status status = userCreateNS( toNs.c_str(), spec.done(), logForReplication ); + Status status = userCreateNS( ctx.db(), toNs, spec.done(), logForReplication ); if ( !status.isOK() ) return status; } @@ -154,7 +154,7 @@ namespace mongo { Lock::DBWrite dbXLock(dbname); Client::Context ctx(dbname); - Status status = cloneCollectionAsCapped( cc().database(), from, to, size, temp, true ); + Status status = cloneCollectionAsCapped( ctx.db(), from, to, size, temp, true ); return appendCommandStatus( result, status ); } } cmdCloneCollectionAsCapped; @@ -200,7 +200,7 @@ namespace mongo { Lock::GlobalWrite globalWriteLock; Client::Context ctx(dbname); - Database* db = cc().database(); + Database* db = ctx.db(); stopIndexBuilds(db, jsobj); BackgroundOperation::assertNoBgOpInProgForDb(dbname.c_str()); diff --git a/src/mongo/db/commands/dbhash.cpp b/src/mongo/db/commands/dbhash.cpp index 61e75e61e61..1b6936c6877 100644 --- a/src/mongo/db/commands/dbhash.cpp +++ b/src/mongo/db/commands/dbhash.cpp @@ -67,7 +67,7 @@ namespace mongo { out->push_back(Privilege(ResourcePattern::forDatabaseName(dbname), actions)); } - string DBHashCmd::hashCollection( const string& fullCollectionName, bool* fromCache ) { + string DBHashCmd::hashCollection( Database* db, const string& fullCollectionName, bool* fromCache ) { scoped_ptr<scoped_lock> cachedHashedLock; @@ -81,7 +81,7 @@ namespace mongo { } *fromCache = false; - Collection* collection = cc().database()->getCollection( fullCollectionName ); + Collection* collection = db->getCollection( fullCollectionName ); if ( !collection ) return ""; @@ -147,11 +147,11 @@ namespace mongo { } } + list<string> colls; const string ns = parseNs(dbname, cmdObj); - Client::ReadContext ctx(ns); - list<string> colls; - Database* db = cc().database(); + Client::ReadContext ctx(ns); + Database* db = ctx.ctx().db(); if ( db ) db->namespaceIndex().getNamespaces( colls ); colls.sort(); @@ -181,7 +181,7 @@ namespace mongo { continue; bool fromCache = false; - string hash = hashCollection( fullCollectionName, &fromCache ); + string hash = hashCollection( db, fullCollectionName, &fromCache ); bb.append( shortCollectionName, hash ); diff --git a/src/mongo/db/commands/dbhash.h b/src/mongo/db/commands/dbhash.h index 175c5df100e..3853d5532e1 100644 --- a/src/mongo/db/commands/dbhash.h +++ b/src/mongo/db/commands/dbhash.h @@ -59,7 +59,7 @@ namespace mongo { bool isCachable( const StringData& ns ) const; - string hashCollection( const string& fullCollectionName, bool* fromCache ); + string hashCollection( Database* db, const string& fullCollectionName, bool* fromCache ); map<string,string> _cachedHashed; mutex _cachedHashedMutex; diff --git a/src/mongo/db/commands/distinct.cpp b/src/mongo/db/commands/distinct.cpp index 9e52a41074a..236867d4a1f 100644 --- a/src/mongo/db/commands/distinct.cpp +++ b/src/mongo/db/commands/distinct.cpp @@ -92,7 +92,7 @@ namespace mongo { Client::ReadContext ctx(ns); - Collection* collection = cc().database()->getCollection( ns ); + Collection* collection = ctx.ctx().db()->getCollection( ns ); if (!collection) { result.appendArray( "values" , BSONObj() ); diff --git a/src/mongo/db/commands/drop_indexes.cpp b/src/mongo/db/commands/drop_indexes.cpp index 27e01a7da5a..1936a6e9e07 100644 --- a/src/mongo/db/commands/drop_indexes.cpp +++ b/src/mongo/db/commands/drop_indexes.cpp @@ -102,14 +102,15 @@ namespace mongo { Lock::DBWrite dbXLock(dbname); Client::Context ctx(toDeleteNs); + Database* db = ctx.db(); - Collection* collection = cc().database()->getCollection( toDeleteNs ); + Collection* collection = db->getCollection( toDeleteNs ); if ( ! collection ) { errmsg = "ns not found"; return false; } - stopIndexBuilds(cc().database(), jsobj); + stopIndexBuilds(db, jsobj); IndexCatalog* indexCatalog = collection->getIndexCatalog(); anObjBuilder.appendNumber("nIndexesWas", indexCatalog->numIndexesTotal() ); @@ -214,7 +215,7 @@ namespace mongo { Lock::DBWrite dbXLock(dbname); Client::Context ctx(toDeleteNs); - Collection* collection = cc().database()->getCollection( toDeleteNs ); + Collection* collection = ctx.db()->getCollection( toDeleteNs ); if ( !collection ) { errmsg = "ns not found"; @@ -223,7 +224,7 @@ namespace mongo { BackgroundOperation::assertNoBgOpInProgForNs( toDeleteNs ); - std::vector<BSONObj> indexesInProg = stopIndexBuilds(cc().database(), jsobj); + std::vector<BSONObj> indexesInProg = stopIndexBuilds(ctx.db(), jsobj); list<BSONObj> all; auto_ptr<DBClientCursor> i = db.query( dbname + ".system.indexes" , BSON( "ns" << toDeleteNs ) , 0 , 0 , 0 , QueryOption_SlaveOk ); diff --git a/src/mongo/db/commands/geonear.cpp b/src/mongo/db/commands/geonear.cpp index 1df9385c43e..1fbd03524df 100644 --- a/src/mongo/db/commands/geonear.cpp +++ b/src/mongo/db/commands/geonear.cpp @@ -79,7 +79,7 @@ namespace mongo { Client::ReadContext ctx(ns); - Database* db = cc().database(); + Database* db = ctx.ctx().db(); if ( !db ) { errmsg = "can't find ns"; return false; diff --git a/src/mongo/db/commands/group.cpp b/src/mongo/db/commands/group.cpp index e0e28264b13..95059c64380 100644 --- a/src/mongo/db/commands/group.cpp +++ b/src/mongo/db/commands/group.cpp @@ -87,7 +87,7 @@ namespace mongo { return obj.extractFields( keyPattern , true ).getOwned(); } - bool group( const std::string& realdbname, + bool group( Database* db, const std::string& ns, const BSONObj& query, BSONObj keyPattern, @@ -101,7 +101,7 @@ namespace mongo { const string userToken = ClientBasic::getCurrent()->getAuthorizationSession() ->getAuthenticatedUserNamesToken(); - auto_ptr<Scope> s = globalScriptEngine->getPooledScope(realdbname, "group" + userToken); + auto_ptr<Scope> s = globalScriptEngine->getPooledScope(db->name(), "group" + userToken); if ( reduceScope ) s->init( reduceScope ); @@ -131,7 +131,7 @@ namespace mongo { double keysize = keyPattern.objsize() * 3; double keynum = 1; - Collection* collection = cc().database()->getCollection( ns ); + Collection* collection = db->getCollection( ns ); map<BSONObj,int,BSONObjCmp> map; list<BSONObj> blah; @@ -255,7 +255,7 @@ namespace mongo { const string ns = parseNs(dbname, jsobj); Client::ReadContext ctx(ns); - return group( dbname , ns , q , + return group( ctx.ctx().db() , ns , q , key , keyf , reduce._asCode() , reduce.type() != CodeWScope ? 0 : reduce.codeWScopeScopeDataUnsafe() , initial.embeddedObject() , finalize , errmsg , result ); diff --git a/src/mongo/db/commands/index_stats.cpp b/src/mongo/db/commands/index_stats.cpp index 3ebf31b58e9..92bb1a944da 100644 --- a/src/mongo/db/commands/index_stats.cpp +++ b/src/mongo/db/commands/index_stats.cpp @@ -500,7 +500,7 @@ namespace mongo { Client::ReadContext ctx(nss.ns()); - const Collection* collection = cc().database()->getCollection( nss.ns() ); + const Collection* collection = ctx.ctx().db()->getCollection( nss.ns() ); if (!collection) { errmsg = "ns not found"; return false; diff --git a/src/mongo/db/commands/parallel_collection_scan.cpp b/src/mongo/db/commands/parallel_collection_scan.cpp index 32196ceff4b..57302169ae8 100644 --- a/src/mongo/db/commands/parallel_collection_scan.cpp +++ b/src/mongo/db/commands/parallel_collection_scan.cpp @@ -190,7 +190,7 @@ namespace mongo { Client::ReadContext ctx(ns.ns()); - Database* db = cc().database(); + Database* db = ctx.ctx().db(); Collection* collection = db->getCollection( ns ); if ( !collection ) diff --git a/src/mongo/db/commands/rename_collection.cpp b/src/mongo/db/commands/rename_collection.cpp index ba90f5120a5..e7cc87a2a13 100644 --- a/src/mongo/db/commands/rename_collection.cpp +++ b/src/mongo/db/commands/rename_collection.cpp @@ -182,7 +182,7 @@ namespace mongo { return false; } - Status s = cc().database()->dropCollection( target ); + Status s = ctx.db()->dropCollection( target ); if ( !s.isOK() ) { errmsg = s.toString(); restoreIndexBuildsOnSource( indexesInProg, source ); diff --git a/src/mongo/db/commands/test_commands.cpp b/src/mongo/db/commands/test_commands.cpp index 0553f314a12..a3560d6c15d 100644 --- a/src/mongo/db/commands/test_commands.cpp +++ b/src/mongo/db/commands/test_commands.cpp @@ -187,10 +187,11 @@ namespace mongo { NamespaceString nss( dbname, coll ); Client::WriteContext ctx( nss.ns() ); - Collection* collection = ctx.ctx().db()->getCollection( nss.ns() ); + Database* db = ctx.ctx().db(); + Collection* collection = db->getCollection( nss.ns() ); massert( 13429, "emptycapped no such collection", collection ); - std::vector<BSONObj> indexes = stopIndexBuilds(cc().database(), cmdObj); + std::vector<BSONObj> indexes = stopIndexBuilds(db, cmdObj); Status status = collection->truncate(); if ( !status.isOK() ) diff --git a/src/mongo/db/commands/validate.cpp b/src/mongo/db/commands/validate.cpp index a416085d32f..2cb7c623a0b 100644 --- a/src/mongo/db/commands/validate.cpp +++ b/src/mongo/db/commands/validate.cpp @@ -77,8 +77,7 @@ namespace mongo { } Client::ReadContext ctx(ns_string.ns()); - - Database* db = cc().database(); + Database* db = ctx.ctx().db(); if ( !db ) { errmsg = "database not found"; return false; diff --git a/src/mongo/db/dbcommands.cpp b/src/mongo/db/dbcommands.cpp index fdedbb125a1..00dfa9179fa 100644 --- a/src/mongo/db/dbcommands.cpp +++ b/src/mongo/db/dbcommands.cpp @@ -214,7 +214,7 @@ namespace mongo { Client::Context context(dbname); stopIndexBuilds(context.db(), cmdObj); - dropDatabase(dbname); + dropDatabase(context.db()); log() << "dropDatabase " << dbname << " finished"; } @@ -358,7 +358,7 @@ namespace mongo { Client::Context ctx(dbname); BSONElement e = cmdObj.firstElement(); - result.append("was", cc().database()->getProfilingLevel()); + result.append("was", ctx.db()->getProfilingLevel()); result.append("slowms", serverGlobalParams.slowMS); int p = (int) e.number(); @@ -367,7 +367,7 @@ namespace mongo { if ( p == -1 ) ok = true; else if ( p >= 0 && p <= 2 ) { - ok = cc().database()->setProfilingLevel( p , errmsg ); + ok = ctx.db()->setProfilingLevel( p , errmsg ); } BSONElement slow = cmdObj["slowms"]; @@ -482,8 +482,9 @@ namespace mongo { Lock::DBWrite dbXLock(dbname); Client::Context ctx(nsToDrop); + Database* db = ctx.db(); - Collection* coll = cc().database()->getCollection( nsToDrop ); + Collection* coll = db->getCollection( nsToDrop ); // If collection does not exist, short circuit and return. if ( !coll ) { errmsg = "ns not found"; @@ -492,12 +493,12 @@ namespace mongo { int numIndexes = coll->getIndexCatalog()->numIndexesTotal(); - stopIndexBuilds(cc().database(), cmdObj); + stopIndexBuilds(db, cmdObj); result.append( "ns", nsToDrop ); result.append( "nIndexesWas", numIndexes ); - Status s = cc().database()->dropCollection( nsToDrop ); + Status s = db->dropCollection( nsToDrop ); if ( s.isOK() ) return true; @@ -649,7 +650,7 @@ namespace mongo { // Create collection. return appendCommandStatus( result, - userCreateNS(ns.c_str(), options, !fromRepl) ); + userCreateNS(ctx.db(), ns.c_str(), options, !fromRepl) ); } } cmdCreate; @@ -1339,10 +1340,10 @@ namespace mongo { } const string ns = parseNs(dbname, jsobj); - Client::ReadContext ctx(ns); - list<string> collections; - Database* d = cc().database(); + + Client::ReadContext ctx(ns); + Database* d = ctx.ctx().db(); if ( d && ( d->isEmpty() || d->getExtentManager().numFiles() == 0 ) ) d = NULL; diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp index 6ffaaf6c2fc..91a433cef4a 100644 --- a/src/mongo/db/dbhelpers.cpp +++ b/src/mongo/db/dbhelpers.cpp @@ -60,17 +60,6 @@ namespace mongo { const BSONObj reverseNaturalObj = BSON( "$natural" << -1 ); - void Helpers::ensureIndex(const char *ns, BSONObj keyPattern, bool unique, const char *name) { - Database* db = cc().database(); - verify(db); - - Collection* collection = db->getCollection( ns ); - if ( !collection ) - return; - - ensureIndex( collection, keyPattern, unique, name ); - } - void Helpers::ensureIndex(Collection* collection, BSONObj keyPattern, bool unique, const char *name) { BSONObjBuilder b; @@ -122,11 +111,10 @@ namespace mongo { return DiskLoc(); } - bool Helpers::findById(Client& c, const char *ns, BSONObj query, BSONObj& result , + bool Helpers::findById(Database* database, const char *ns, BSONObj query, BSONObj& result , bool* nsFound , bool* indexFound ) { Lock::assertAtLeastReadLocked(ns); - Database *database = c.database(); - verify( database ); + invariant( database ); Collection* collection = database->getCollection( ns ); if ( !collection ) { @@ -295,11 +283,11 @@ namespace mongo { return kpBuilder.obj(); } - bool findShardKeyIndexPattern_inlock( const string& ns, - const BSONObj& shardKeyPattern, - BSONObj* indexPattern ) { - verify( Lock::isLocked() ); - Collection* collection = cc().database()->getCollection( ns ); + bool findShardKeyIndexPattern( const string& ns, + const BSONObj& shardKeyPattern, + BSONObj* indexPattern ) { + Client::ReadContext context( ns ); + Collection* collection = context.ctx().db()->getCollection( ns ); if ( !collection ) return false; @@ -316,13 +304,6 @@ namespace mongo { return true; } - bool findShardKeyIndexPattern( const string& ns, - const BSONObj& shardKeyPattern, - BSONObj* indexPattern ) { - Client::ReadContext context( ns ); - return findShardKeyIndexPattern_inlock( ns, shardKeyPattern, indexPattern ); - } - long long Helpers::removeRange( const KeyRange& range, bool maxInclusive, bool secondaryThrottle, @@ -372,7 +353,8 @@ namespace mongo { { Client::WriteContext ctx(ns); Collection* collection = ctx.ctx().db()->getCollection( ns ); - if ( !collection ) break; + if ( !collection ) + break; IndexDescriptor* desc = collection->getIndexCatalog()->findIndexByKeyPattern( indexKeyPattern.toBSON() ); @@ -383,7 +365,6 @@ namespace mongo { InternalPlanner::IXSCAN_FETCH)); runner->setYieldPolicy(Runner::YIELD_AUTO); - DiskLoc rloc; BSONObj obj; Runner::RunnerState state; @@ -422,12 +403,11 @@ namespace mongo { break; } } - if ( callback ) callback->goingToDelete( obj ); logOp("d", ns.c_str(), obj["_id"].wrap(), 0, 0, fromMigrate); - c.database()->getCollection( ns )->deleteDocument( rloc ); + collection->deleteDocument( rloc ); numDeleted++; } diff --git a/src/mongo/db/dbhelpers.h b/src/mongo/db/dbhelpers.h index 15aa8d96e77..86a40682ced 100644 --- a/src/mongo/db/dbhelpers.h +++ b/src/mongo/db/dbhelpers.h @@ -63,9 +63,6 @@ namespace mongo { Note: does nothing if collection does not yet exist. */ - static void ensureIndex(const char *ns, BSONObj keyPattern, bool unique, const char *name); - - // same as other ensureIndex static void ensureIndex(Collection* collection, BSONObj keyPattern, bool unique, const char *name); @@ -94,8 +91,8 @@ namespace mongo { * @param foundIndex if passed in will be set to 1 if ns and index found * @return true if object found */ - static bool findById(Client&, const char *ns, BSONObj query, BSONObj& result , - bool * nsFound = 0 , bool * indexFound = 0 ); + static bool findById(Database* db, const char *ns, BSONObj query, BSONObj& result, + bool* nsFound = 0, bool* indexFound = 0 ); /* TODO: should this move into Collection? * uasserts if no _id index. diff --git a/src/mongo/db/exec/stagedebug_cmd.cpp b/src/mongo/db/exec/stagedebug_cmd.cpp index f2af118dc6d..c0596bc16c1 100644 --- a/src/mongo/db/exec/stagedebug_cmd.cpp +++ b/src/mongo/db/exec/stagedebug_cmd.cpp @@ -114,10 +114,9 @@ namespace mongo { OwnedPointerVector<MatchExpression> exprs; auto_ptr<WorkingSet> ws(new WorkingSet()); - const string ns = parseNs(dbname, cmdObj); - Client::ReadContext ctx(ns); + Client::ReadContext ctx(dbname); - PlanStage* userRoot = parseQuery(dbname, argObj, ws.get(), &exprs); + PlanStage* userRoot = parseQuery(ctx.ctx().db(), argObj, ws.get(), &exprs); uassert(16911, "Couldn't parse plan from " + argObj.toString(), NULL != userRoot); // Add a fetch at the top for the user so we can get obj back for sure. @@ -136,7 +135,7 @@ namespace mongo { return true; } - PlanStage* parseQuery(const string& dbname, BSONObj obj, WorkingSet* workingSet, + PlanStage* parseQuery(Database* db, BSONObj obj, WorkingSet* workingSet, OwnedPointerVector<MatchExpression>* exprs) { BSONElement firstElt = obj.firstElement(); if (!firstElt.isABSONObj()) { return NULL; } @@ -176,8 +175,7 @@ namespace mongo { if ("ixscan" == nodeName) { - Database* db = cc().database(); - Collection* collection = db->getCollection( dbname + "." + nodeArgs["name"].String() ); + Collection* collection = db->getCollection( db->name() + "." + nodeArgs["name"].String() ); uassert(16913, "Can't find collection " + nodeArgs["name"].String(), collection); IndexDescriptor* desc = @@ -207,7 +205,7 @@ namespace mongo { uassert(16922, "node of AND isn't an obj?: " + e.toString(), e.isABSONObj()); - PlanStage* subNode = parseQuery(dbname, e.Obj(), workingSet, exprs); + PlanStage* subNode = parseQuery(db, e.Obj(), workingSet, exprs); uassert(16923, "Can't parse sub-node of AND: " + e.Obj().toString(), NULL != subNode); // takes ownership @@ -233,7 +231,7 @@ namespace mongo { uassert(16925, "node of AND isn't an obj?: " + e.toString(), e.isABSONObj()); - PlanStage* subNode = parseQuery(dbname, e.Obj(), workingSet, exprs); + PlanStage* subNode = parseQuery(db, e.Obj(), workingSet, exprs); uassert(16926, "Can't parse sub-node of AND: " + e.Obj().toString(), NULL != subNode); // takes ownership @@ -256,7 +254,7 @@ namespace mongo { while (it.more()) { BSONElement e = it.next(); if (!e.isABSONObj()) { return NULL; } - PlanStage* subNode = parseQuery(dbname, e.Obj(), workingSet, exprs); + PlanStage* subNode = parseQuery(db, e.Obj(), workingSet, exprs); uassert(16936, "Can't parse sub-node of OR: " + e.Obj().toString(), NULL != subNode); // takes ownership @@ -268,7 +266,7 @@ namespace mongo { else if ("fetch" == nodeName) { uassert(16929, "Node argument must be provided to fetch", nodeArgs["node"].isABSONObj()); - PlanStage* subNode = parseQuery(dbname, nodeArgs["node"].Obj(), workingSet, exprs); + PlanStage* subNode = parseQuery(db, nodeArgs["node"].Obj(), workingSet, exprs); return new FetchStage(workingSet, subNode, matcher); } else if ("limit" == nodeName) { @@ -278,7 +276,7 @@ namespace mongo { nodeArgs["node"].isABSONObj()); uassert(16931, "Num argument must be provided to limit", nodeArgs["num"].isNumber()); - PlanStage* subNode = parseQuery(dbname, nodeArgs["node"].Obj(), workingSet, exprs); + PlanStage* subNode = parseQuery(db, nodeArgs["node"].Obj(), workingSet, exprs); return new LimitStage(nodeArgs["num"].numberInt(), workingSet, subNode); } else if ("skip" == nodeName) { @@ -288,15 +286,15 @@ namespace mongo { nodeArgs["node"].isABSONObj()); uassert(16933, "Num argument must be provided to skip", nodeArgs["num"].isNumber()); - PlanStage* subNode = parseQuery(dbname, nodeArgs["node"].Obj(), workingSet, exprs); + PlanStage* subNode = parseQuery(db, nodeArgs["node"].Obj(), workingSet, exprs); return new SkipStage(nodeArgs["num"].numberInt(), workingSet, subNode); } else if ("cscan" == nodeName) { CollectionScanParams params; // What collection? - string ns = dbname + "." + nodeArgs["name"].String(); - params.collection = cc().database()->getCollection(ns); + string ns = db->name() + "." + nodeArgs["name"].String(); + params.collection = db->getCollection(ns); uassert(16962, "Can't find collection " + ns, NULL != params.collection ); // What direction? @@ -318,7 +316,7 @@ namespace mongo { nodeArgs["node"].isABSONObj()); uassert(16970, "Pattern argument must be provided to sort", nodeArgs["pattern"].isABSONObj()); - PlanStage* subNode = parseQuery(dbname, nodeArgs["node"].Obj(), workingSet, exprs); + PlanStage* subNode = parseQuery(db, nodeArgs["node"].Obj(), workingSet, exprs); SortStageParams params; params.pattern = nodeArgs["pattern"].Obj(); return new SortStage(params, workingSet, subNode); @@ -342,7 +340,7 @@ namespace mongo { uassert(16973, "node of mergeSort isn't an obj?: " + e.toString(), e.isABSONObj()); - PlanStage* subNode = parseQuery(dbname, e.Obj(), workingSet, exprs); + PlanStage* subNode = parseQuery(db, e.Obj(), workingSet, exprs); uassert(16974, "Can't parse sub-node of mergeSort: " + e.Obj().toString(), NULL != subNode); // takes ownership @@ -353,7 +351,6 @@ namespace mongo { else if ("text" == nodeName) { string ns = nodeArgs["name"].String(); string search = nodeArgs["search"].String(); - Database* db = cc().database(); Collection* collection = db->getCollection( ns ); uassert(17193, "Can't find namespace " + ns, collection); vector<IndexDescriptor*> idxMatches; diff --git a/src/mongo/db/geo/haystack.cpp b/src/mongo/db/geo/haystack.cpp index b8250ee1a30..7b8699533cb 100644 --- a/src/mongo/db/geo/haystack.cpp +++ b/src/mongo/db/geo/haystack.cpp @@ -73,7 +73,7 @@ namespace mongo { const string ns = dbname + "." + cmdObj.firstElement().valuestr(); Client::ReadContext ctx(ns); - Database* db = cc().database(); + Database* db = ctx.ctx().db(); if ( !db ) { errmsg = "can't find ns"; return false; diff --git a/src/mongo/db/index_rebuilder.cpp b/src/mongo/db/index_rebuilder.cpp index 734b7bf75b2..35969190807 100644 --- a/src/mongo/db/index_rebuilder.cpp +++ b/src/mongo/db/index_rebuilder.cpp @@ -62,7 +62,7 @@ namespace mongo { dbName < dbNames.end(); dbName++) { Client::ReadContext ctx(*dbName); - Database* db = cc().database(); + Database* db = ctx.ctx().db(); db->namespaceIndex().getNamespaces(collNames, /* onlyCollections */ true); } checkNS(collNames); diff --git a/src/mongo/db/introspect.cpp b/src/mongo/db/introspect.cpp index 9d466d51249..4f18011d2fb 100644 --- a/src/mongo/db/introspect.cpp +++ b/src/mongo/db/introspect.cpp @@ -79,9 +79,9 @@ namespace { } } // namespace - static void _profile(const Client& c, CurOp& currentOp, BufBuilder& profileBufBuilder) { - Database *db = c.database(); - DEV verify( db ); + static void _profile(const Client& c, Database* db, + CurOp& currentOp, BufBuilder& profileBufBuilder) { + dassert( db ); // build object BSONObjBuilder b(profileBufBuilder); @@ -137,7 +137,8 @@ namespace { Lock::DBWrite lk( currentOp.getNS() ); if (dbHolder()._isLoaded(nsToDatabase(currentOp.getNS()), storageGlobalParams.dbpath)) { Client::Context cx(currentOp.getNS(), storageGlobalParams.dbpath, false); - _profile(c, currentOp, profileBufBuilder); + _profile(c, cx.db(), + currentOp, profileBufBuilder); } else { mongo::log() << "note: not profiling because db went away - probably a close on: " diff --git a/src/mongo/db/ops/delete_executor.cpp b/src/mongo/db/ops/delete_executor.cpp index c5afe38374a..a7007e8df2c 100644 --- a/src/mongo/db/ops/delete_executor.cpp +++ b/src/mongo/db/ops/delete_executor.cpp @@ -101,12 +101,14 @@ namespace mongo { } } + Database* db = currentClient.get()->getContext()->db(); + massert(17418, mongoutils::str::stream() << - "dbname = " << currentClient.get()->database()->name() << + "dbname = " << db->name() << "; ns = " << ns.ns(), - currentClient.get()->database()->name() == nsToDatabaseSubstring(ns.ns())); - Collection* collection = currentClient.get()->database()->getCollection(ns.ns()); + db->name() == nsToDatabaseSubstring(ns.ns())); + Collection* collection = db->getCollection(ns.ns()); if (NULL == collection) { return 0; } diff --git a/src/mongo/db/ops/update.cpp b/src/mongo/db/ops/update.cpp index 8bdb532e860..50ff3a30379 100644 --- a/src/mongo/db/ops/update.cpp +++ b/src/mongo/db/ops/update.cpp @@ -405,7 +405,7 @@ namespace mongo { } Collection* oldCollection = collection; - collection = cc().database()->getCollection(nsString.ns()); + collection = cc().getContext()->db()->getCollection(nsString.ns()); // We should not get a new pointer to the same collection... if (oldCollection && (oldCollection != collection)) @@ -500,7 +500,7 @@ namespace mongo { const NamespaceString& nsString = request.getNamespaceString(); UpdateLifecycle* lifecycle = request.getLifecycle(); const CurOp* curOp = cc().curop(); - Collection* collection = cc().database()->getCollection(nsString.ns()); + Collection* collection = cc().getContext()->db()->getCollection(nsString.ns()); validateUpdate(nsString.ns().c_str(), request.getUpdates(), request.getQuery()); @@ -874,9 +874,10 @@ namespace mongo { // Only create the collection if the doc will be inserted. if (!collection) { - collection = cc().database()->getCollection(request.getNamespaceString().ns()); + Database* db = cc().getContext()->db(); + collection = db->getCollection(request.getNamespaceString().ns()); if (!collection) { - collection = cc().database()->createCollection(request.getNamespaceString().ns()); + collection = db->createCollection(request.getNamespaceString().ns()); } } diff --git a/src/mongo/db/pdfile.cpp b/src/mongo/db/pdfile.cpp index 655c1a6041e..4cc0552ebc0 100644 --- a/src/mongo/db/pdfile.cpp +++ b/src/mongo/db/pdfile.cpp @@ -106,19 +106,20 @@ namespace mongo { * @param createDefaultIndexes - if false, defers id (and other) index creation. * @return true if successful */ - Status userCreateNS( const StringData& ns, + Status userCreateNS( Database* db, + const StringData& ns, BSONObj options, bool logForReplication, bool createDefaultIndexes ) { + invariant( db ); + LOG(1) << "create collection " << ns << ' ' << options; if ( !NamespaceString::validCollectionComponent(ns) ) return Status( ErrorCodes::InvalidNamespace, str::stream() << "invalid ns: " << ns ); - Database* db = cc().database(); - Collection* collection = db->getCollection( ns ); if ( collection ) @@ -156,21 +157,22 @@ namespace mongo { for( vector<string>::iterator i = n.begin(); i != n.end(); i++ ) { if( *i != "local" ) { Client::Context ctx(*i); - dropDatabase(*i); + dropDatabase(ctx.db()); } } } - void dropDatabase(const std::string& db) { - LOG(1) << "dropDatabase " << db << endl; - Lock::assertWriteLocked(db); - Database *d = cc().database(); - verify( d ); - verify( d->name() == db ); + void dropDatabase(Database* db ) { + invariant( db ); + + string name = db->name(); // just to have safe + LOG(1) << "dropDatabase " << name << endl; + + Lock::assertWriteLocked( name ); - BackgroundOperation::assertNoBgOpInProgForDb(d->name().c_str()); + BackgroundOperation::assertNoBgOpInProgForDb(name.c_str()); - audit::logDropDatabase( currentClient.get(), db ); + audit::logDropDatabase( currentClient.get(), name ); // Not sure we need this here, so removed. If we do, we need to move it down // within other calls both (1) as they could be called from elsewhere and @@ -181,10 +183,10 @@ namespace mongo { getDur().syncDataAndTruncateJournal(); - Database::closeDatabase( d->name(), d->path() ); - d = 0; // d is now deleted + Database::closeDatabase( name, db->path() ); + db = 0; // d is now deleted - _deleteDataFiles( db ); + _deleteDataFiles( name ); } } // namespace mongo diff --git a/src/mongo/db/pdfile.h b/src/mongo/db/pdfile.h index ca89b1586da..0d347d09e64 100644 --- a/src/mongo/db/pdfile.h +++ b/src/mongo/db/pdfile.h @@ -44,9 +44,12 @@ namespace mongo { - void dropDatabase(const std::string& db); + class Database; - Status userCreateNS( const StringData& ns, + void dropDatabase(Database* db); + + Status userCreateNS( Database* db, + const StringData& ns, BSONObj options, bool logForReplication, bool createDefaultIndexes = true ); diff --git a/src/mongo/db/prefetch.cpp b/src/mongo/db/prefetch.cpp index ee1646288e8..fdcd3bc2c75 100644 --- a/src/mongo/db/prefetch.cpp +++ b/src/mongo/db/prefetch.cpp @@ -56,7 +56,7 @@ namespace mongo { &prefetchDocStats ); // prefetch for an oplog operation - void prefetchPagesForReplicatedOp(const BSONObj& op) { + void prefetchPagesForReplicatedOp(Database* db, const BSONObj& op) { const char *opField; const char *opType = op.getStringField("op"); switch (*opType) { @@ -75,10 +75,6 @@ namespace mongo { BSONObj obj = op.getObjectField(opField); const char *ns = op.getStringField("ns"); - Database* db = cc().database(); - if ( !db ) - return; - Collection* collection = db->getCollection( ns ); if ( !collection ) return; @@ -185,12 +181,12 @@ namespace mongo { // we can probably use Client::Context here instead of ReadContext as we // have locked higher up the call stack already Client::ReadContext ctx( ns ); - if( Helpers::findById(cc(), ns, builder.done(), result) ) { + if( Helpers::findById(ctx.ctx().db(), ns, builder.done(), result) ) { // do we want to use Record::touch() here? it's pretty similar. volatile char _dummy_char = '\0'; // Touch the first word on every page in order to fault it into memory - for (int i = 0; i < result.objsize(); i += g_minOSPageSizeBytes) { - _dummy_char += *(result.objdata() + i); + for (int i = 0; i < result.objsize(); i += g_minOSPageSizeBytes) { + _dummy_char += *(result.objdata() + i); } // hit the last page, in case we missed it above _dummy_char += *(result.objdata() + result.objsize() - 1); diff --git a/src/mongo/db/prefetch.h b/src/mongo/db/prefetch.h index d70a662a754..3e97753da23 100644 --- a/src/mongo/db/prefetch.h +++ b/src/mongo/db/prefetch.h @@ -32,8 +32,10 @@ namespace mongo { class Collection; + class Database; + // page in both index and data pages for an op from the oplog - void prefetchPagesForReplicatedOp(const BSONObj& op); + void prefetchPagesForReplicatedOp(Database* db, const BSONObj& op); // page in pages needed for all index lookups on a given object void prefetchIndexPages(Collection *nsd, const BSONObj& obj); diff --git a/src/mongo/db/query/cached_plan_runner.cpp b/src/mongo/db/query/cached_plan_runner.cpp index 08c944b9b8d..60d72b4aa12 100644 --- a/src/mongo/db/query/cached_plan_runner.cpp +++ b/src/mongo/db/query/cached_plan_runner.cpp @@ -185,7 +185,7 @@ namespace mongo { // the CachedPlanRunner. In some cases, the db or collection could be dropped without kill() // being called on the runner (for example, timeout of a ClientCursor holding the runner). // XXX - this whole thing is odd - Database* db = cc().database(); + Database* db = cc().getContext()->db(); if (NULL == db) { return; } Collection* collection = db->getCollection(_canonicalQuery->ns()); if (NULL == collection) { return; } diff --git a/src/mongo/db/query/multi_plan_runner.cpp b/src/mongo/db/query/multi_plan_runner.cpp index 4547b92e3e2..316fddc7689 100644 --- a/src/mongo/db/query/multi_plan_runner.cpp +++ b/src/mongo/db/query/multi_plan_runner.cpp @@ -293,7 +293,7 @@ namespace mongo { // cached plan runner to fall back on a different solution // if the best solution fails. Alternatively we could try to // defer cache insertion to be after the first produced result. - Database* db = cc().database(); + Database* db = cc().getContext()->db(); verify(NULL != db); Collection* collection = db->getCollection(_query->ns()); verify(NULL != collection); @@ -397,7 +397,7 @@ namespace mongo { const PlanStageStats* bestStats = _ranking->stats.vector()[0]; if (PlanCache::shouldCacheQuery(*_query) && (!_alreadyProduced.empty() || bestStats->common.isEOF)) { - Database* db = cc().database(); + Database* db = cc().getContext()->db(); verify(NULL != db); Collection* collection = db->getCollection(_query->ns()); verify(NULL != collection); diff --git a/src/mongo/db/repair_database.cpp b/src/mongo/db/repair_database.cpp index b28859b5237..31db42f7fa3 100644 --- a/src/mongo/db/repair_database.cpp +++ b/src/mongo/db/repair_database.cpp @@ -279,9 +279,6 @@ namespace mongo { log() << "repairDatabase " << dbName << endl; - invariant( cc().database()->name() == dbName ); - invariant( cc().database()->path() == storageGlobalParams.dbpath ); - BackgroundOperation::assertNoBgOpInProgForDb(dbName); getDur().syncDataAndTruncateJournal(); // Must be done before and after repair diff --git a/src/mongo/db/repl/is_master.h b/src/mongo/db/repl/is_master.h index 7457364bb60..93d7047357e 100644 --- a/src/mongo/db/repl/is_master.h +++ b/src/mongo/db/repl/is_master.h @@ -74,11 +74,15 @@ namespace mongo { if( _isMaster() ) return true; if ( ! dbname ) { - Database *database = cc().database(); - verify( database ); - dbname = database->name().c_str(); + // XXX: remove this magic and make dbname required? + if ( cc().getContext() ) { + Database *database = cc().getContext()->db(); + if ( database ) { + dbname = database->name().c_str(); + } + } } - return strcmp( dbname , "local" ) == 0; + return strcmp( dbname, "local" ) == 0; } inline bool isMasterNs( const char *ns ) { if ( _isMaster() ) diff --git a/src/mongo/db/repl/master_slave.cpp b/src/mongo/db/repl/master_slave.cpp index 83e1527ebcc..fb83eb842c5 100644 --- a/src/mongo/db/repl/master_slave.cpp +++ b/src/mongo/db/repl/master_slave.cpp @@ -354,7 +354,7 @@ namespace mongo { void ReplSource::resyncDrop( const string& db ) { log() << "resync: dropping database " << db; Client::Context ctx(db); - dropDatabase(db); + dropDatabase(ctx.db()); } /* grab initial copy of a database from the master */ @@ -489,7 +489,7 @@ namespace mongo { incompleteCloneDbs.erase(*i); addDbNextPass.erase(*i); Client::Context ctx(*i); - dropDatabase(*i); + dropDatabase(ctx.db()); } massert( 14034, "Duplicate database names present after attempting to delete duplicates", @@ -497,13 +497,15 @@ namespace mongo { return true; } - void ReplSource::applyOperation(const BSONObj& op) { + void ReplSource::applyOperation(Database* db, const BSONObj& op) { try { - bool failedUpdate = applyOperation_inlock( op ); + bool failedUpdate = applyOperation_inlock( db, op ); if (failedUpdate) { Sync sync(hostName); if (sync.shouldRetry(op)) { - uassert(15914, "Failure retrying initial sync update", !applyOperation_inlock(op)); + uassert(15914, + "Failure retrying initial sync update", + !applyOperation_inlock(db, op)); } } } @@ -611,7 +613,7 @@ namespace mongo { // always apply admin command command // this is a bit hacky -- the semantics of replication/commands aren't well specified if ( strcmp( clientName, "admin" ) == 0 && *op.getStringField( "op" ) == 'c' ) { - applyOperation( op ); + applyOperation( ctx.db(), op ); return; } @@ -640,7 +642,7 @@ namespace mongo { save(); } else { - applyOperation( op ); + applyOperation( ctx.db(), op ); addDbNextPass.erase( clientName ); } } @@ -1254,7 +1256,7 @@ namespace mongo { b.append(_id); BSONObj result; Client::Context ctx( ns ); - if( Helpers::findById(cc(), ns, b.done(), result) ) + if( Helpers::findById(ctx.db(), ns, b.done(), result) ) _dummy_z += result.objsize(); // touch } } @@ -1288,7 +1290,7 @@ namespace mongo { b.append(_id); BSONObj result; Client::ReadContext ctx( ns ); - if( Helpers::findById(cc(), ns, b.done(), result) ) + if( Helpers::findById(ctx.ctx().db(), ns, b.done(), result) ) _dummy_z += result.objsize(); // touch } } diff --git a/src/mongo/db/repl/master_slave.h b/src/mongo/db/repl/master_slave.h index 66f21eeb53d..24279627ab2 100644 --- a/src/mongo/db/repl/master_slave.h +++ b/src/mongo/db/repl/master_slave.h @@ -41,6 +41,8 @@ namespace mongo { + class Database; + // Main entry point for master/slave at startup time. void startMasterSlave(); @@ -117,7 +119,7 @@ namespace mongo { public: OplogReader oplogReader; - void applyOperation(const BSONObj& op); + void applyOperation(Database* db, const BSONObj& op); string hostName; // ip addr or hostname plus optionally, ":<port>" string _sourceName; // a logical source name. string sourceName() const { return _sourceName.empty() ? "main" : _sourceName; } diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp index c1308a85e43..e90542808ca 100644 --- a/src/mongo/db/repl/oplog.cpp +++ b/src/mongo/db/repl/oplog.cpp @@ -466,7 +466,8 @@ namespace mongo { /** @param fromRepl false if from ApplyOpsCmd @return true if was and update should have happened and the document DNE. see replset initial sync code. */ - bool applyOperation_inlock(const BSONObj& op, bool fromRepl, bool convertUpdateToUpsert) { + bool applyOperation_inlock(Database* db, const BSONObj& op, + bool fromRepl, bool convertUpdateToUpsert) { LOG(3) << "applying op: " << op << endl; bool failedUpdate = false; @@ -495,7 +496,7 @@ namespace mongo { Lock::assertWriteLocked(ns); - Collection* collection = cc().database()->getCollection( ns ); + Collection* collection = db->getCollection( ns ); IndexCatalog* indexCatalog = collection == NULL ? NULL : collection->getIndexCatalog(); // operation type -- see logOp() comments for types diff --git a/src/mongo/db/repl/oplog.h b/src/mongo/db/repl/oplog.h index 7e788f36bb3..bf32e1e062c 100644 --- a/src/mongo/db/repl/oplog.h +++ b/src/mongo/db/repl/oplog.h @@ -90,7 +90,8 @@ namespace mongo { * @param convertUpdateToUpsert convert some updates to upserts for idempotency reasons * Returns if the op was an update that could not be applied (true on failure) */ - bool applyOperation_inlock(const BSONObj& op, - bool fromRepl = true, + bool applyOperation_inlock(Database* db, + const BSONObj& op, + bool fromRepl = true, bool convertUpdateToUpsert = false); } diff --git a/src/mongo/db/repl/rs.cpp b/src/mongo/db/repl/rs.cpp index 0e733b7f489..9a6c79fbd5f 100644 --- a/src/mongo/db/repl/rs.cpp +++ b/src/mongo/db/repl/rs.cpp @@ -124,7 +124,7 @@ namespace { continue; Client::Context ctx(*it); - cc().database()->clearTmpCollections(); + ctx.db()->clearTmpCollections(); } } } diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp index 250c55b0a71..3a22f7a07bb 100644 --- a/src/mongo/db/repl/rs_rollback.cpp +++ b/src/mongo/db/repl/rs_rollback.cpp @@ -553,12 +553,12 @@ namespace mongo { // did we just empty the collection? if so let's check if it even exists on the source. if( collection->numRecords() == 0 ) { try { - string sys = cc().database()->name() + ".system.namespaces"; + string sys = c.db()->name() + ".system.namespaces"; bo o = them->findOne(sys, QUERY("name"<<d.ns)); if( o.isEmpty() ) { // we should drop try { - cc().database()->dropCollection(d.ns); + c.db()->dropCollection(d.ns); } catch(...) { log() << "replset error rolling back collection " << d.ns << rsLog; diff --git a/src/mongo/db/repl/rs_sync.cpp b/src/mongo/db/repl/rs_sync.cpp index 88275b981d2..31ac2902f89 100644 --- a/src/mongo/db/repl/rs_sync.cpp +++ b/src/mongo/db/repl/rs_sync.cpp @@ -128,7 +128,7 @@ namespace replset { ctx.getClient()->curop()->reset(); // For non-initial-sync, we convert updates to upserts // to suppress errors when replaying oplog entries. - bool ok = !applyOperation_inlock(op, true, convertUpdateToUpsert); + bool ok = !applyOperation_inlock(ctx.db(), op, true, convertUpdateToUpsert); opsAppliedStats.increment(); getDur().commitIfNeeded(); @@ -219,7 +219,7 @@ namespace replset { // one possible tweak here would be to stay in the read lock for this database // for multiple prefetches if they are for the same database. Client::ReadContext ctx(ns); - prefetchPagesForReplicatedOp(op); + prefetchPagesForReplicatedOp(ctx.ctx().db(), op); } catch (const DBException& e) { LOG(2) << "ignoring exception in prefetchOp(): " << e.what() << endl; @@ -786,7 +786,7 @@ namespace replset { changeState(MemberState::RS_RECOVERING); Client::Context ctx("local"); - cc().database()->dropCollection("local.oplog.rs"); + ctx.db()->dropCollection("local.oplog.rs"); { boost::unique_lock<boost::mutex> lock(theReplSet->initialSyncMutex); theReplSet->initialSyncRequested = true; diff --git a/src/mongo/db/repl/sync.cpp b/src/mongo/db/repl/sync.cpp index f09d06355a5..87380803105 100644 --- a/src/mongo/db/repl/sync.cpp +++ b/src/mongo/db/repl/sync.cpp @@ -47,12 +47,12 @@ namespace mongo { hn = hostname; } - BSONObj Sync::getMissingDoc(const BSONObj& o) { + BSONObj Sync::getMissingDoc(Database* db, const BSONObj& o) { OplogReader missingObjReader; // why are we using OplogReader to run a non-oplog query? const char *ns = o.getStringField("ns"); // capped collections - Collection* collection = cc().database()->getCollection(ns); + Collection* collection = db->getCollection(ns); if ( collection && collection->isCapped() ) { log() << "replication missing doc, but this is okay for a capped collection (" << ns << ")" << endl; return BSONObj(); @@ -113,7 +113,7 @@ namespace mongo { // we don't have the object yet, which is possible on initial sync. get it. log() << "replication info adding missing object" << endl; // rare enough we can log - BSONObj missingObj = getMissingDoc(o); + BSONObj missingObj = getMissingDoc(ctx.db(), o); if( missingObj.isEmpty() ) { log() << "replication missing object not found on source. presumably deleted later in oplog" << endl; diff --git a/src/mongo/db/repl/sync.h b/src/mongo/db/repl/sync.h index cf11dfbb7c2..9edca0b585b 100644 --- a/src/mongo/db/repl/sync.h +++ b/src/mongo/db/repl/sync.h @@ -32,7 +32,9 @@ #include "mongo/db/jsobj.h" -namespace mongo { +namespace mongo { + + class Database; class Sync { protected: @@ -40,7 +42,7 @@ namespace mongo { public: Sync(const string& hostname) : hn(hostname) {} virtual ~Sync() {} - virtual BSONObj getMissingDoc(const BSONObj& o); + virtual BSONObj getMissingDoc(Database* db, const BSONObj& o); /** * If applyOperation_inlock should be called again after an update fails. diff --git a/src/mongo/db/storage/record.cpp b/src/mongo/db/storage/record.cpp index 2db9815902d..9da02d5d6c4 100644 --- a/src/mongo/db/storage/record.cpp +++ b/src/mongo/db/storage/record.cpp @@ -526,12 +526,12 @@ namespace mongo { Record* DiskLoc::rec() const { // XXX-ERH verify(a() != -1); - return cc().database()->getExtentManager().recordFor( *this ); + return cc().getContext()->db()->getExtentManager().recordFor( *this ); } Extent* DiskLoc::ext() const { verify( a() != -1 ); - return cc().database()->getExtentManager().getExtent(*this); + return cc().getContext()->db()->getExtentManager().getExtent(*this); } BSONObj DiskLoc::obj() const { @@ -543,7 +543,7 @@ namespace mongo { return; const Client& client = cc(); - Database* db = client.database(); + Database* db = client.getContext()->db(); recordStats.accessesNotInMemory.fetchAndAdd(1); if ( db ) diff --git a/src/mongo/db/structure/catalog/namespace_details.cpp b/src/mongo/db/structure/catalog/namespace_details.cpp index 5acd5d98585..b9d25980541 100644 --- a/src/mongo/db/structure/catalog/namespace_details.cpp +++ b/src/mongo/db/structure/catalog/namespace_details.cpp @@ -413,7 +413,7 @@ namespace mongo { Lock::assertWriteLocked( ns ); string system_namespaces = nsToDatabaseSubstring(ns).toString() + ".system.namespaces"; - Collection* coll = cc().database()->getCollection( system_namespaces ); + Collection* coll = cc().getContext()->db()->getCollection( system_namespaces ); DiskLoc oldLocation = Helpers::findOne( coll, BSON( "name" << ns ), false ); fassert( 17247, !oldLocation.isNull() ); diff --git a/src/mongo/dbtests/indexupdatetests.cpp b/src/mongo/dbtests/indexupdatetests.cpp index 2160975eb70..a1d594fb201 100644 --- a/src/mongo/dbtests/indexupdatetests.cpp +++ b/src/mongo/dbtests/indexupdatetests.cpp @@ -475,7 +475,7 @@ namespace IndexUpdateTests { // Request an interrupt. killCurrentOp.killAll(); // The call is not interrupted. - Helpers::ensureIndex( _ns, BSON( "a" << 1 ), false, "a_1" ); + Helpers::ensureIndex( collection(), BSON( "a" << 1 ), false, "a_1" ); // only want to interrupt the index build killCurrentOp.reset(); // The new index is listed in system.indexes because the index build completed. diff --git a/src/mongo/dbtests/namespacetests.cpp b/src/mongo/dbtests/namespacetests.cpp index 687d26776b6..039f8a6651f 100644 --- a/src/mongo/dbtests/namespacetests.cpp +++ b/src/mongo/dbtests/namespacetests.cpp @@ -1041,12 +1041,12 @@ namespace NamespaceTests { virtual ~Base() { if ( !nsd() ) return; - cc().database()->dropCollection( ns() ); + _context.db()->dropCollection( ns() ); } protected: void create() { Lock::GlobalWrite lk; - ASSERT( userCreateNS( ns(), fromjson( spec() ), false ).isOK() ); + ASSERT( userCreateNS( db(), ns(), fromjson( spec() ), false ).isOK() ); } virtual string spec() const { return "{\"capped\":true,\"size\":512,\"$nExtents\":1}"; diff --git a/src/mongo/dbtests/query_multi_plan_runner.cpp b/src/mongo/dbtests/query_multi_plan_runner.cpp index 717035c5ce1..8f3d36f0cfe 100644 --- a/src/mongo/dbtests/query_multi_plan_runner.cpp +++ b/src/mongo/dbtests/query_multi_plan_runner.cpp @@ -63,8 +63,8 @@ namespace QueryMultiPlanRunner { _client.ensureIndex(ns(), obj); } - IndexDescriptor* getIndex(const BSONObj& obj) { - Collection* collection = cc().database()->getCollection( ns() ); + IndexDescriptor* getIndex(Database* db, const BSONObj& obj) { + Collection* collection = db->getCollection( ns() ); return collection->getIndexCatalog()->findIndexByKeyPattern(obj); } @@ -102,7 +102,7 @@ namespace QueryMultiPlanRunner { // Every call to work() returns something so this should clearly win (by current scoring // at least). IndexScanParams ixparams; - ixparams.descriptor = getIndex(BSON("foo" << 1)); + ixparams.descriptor = getIndex(ctx.ctx().db(), BSON("foo" << 1)); ixparams.bounds.isSimpleRange = true; ixparams.bounds.startKey = BSON("" << 7); ixparams.bounds.endKey = BSON("" << 7); diff --git a/src/mongo/dbtests/query_single_solution_runner.cpp b/src/mongo/dbtests/query_single_solution_runner.cpp index 2d6064b90d7..beae1e0ec7e 100644 --- a/src/mongo/dbtests/query_single_solution_runner.cpp +++ b/src/mongo/dbtests/query_single_solution_runner.cpp @@ -120,7 +120,7 @@ namespace QuerySingleSolutionRunner { BSONObj& indexSpec, int start, int end) { // Build the index scan stage. IndexScanParams ixparams; - ixparams.descriptor = getIndex(indexSpec); + ixparams.descriptor = getIndex(context.db(), indexSpec); ixparams.bounds.isSimpleRange = true; ixparams.bounds.startKey = BSON("" << start); ixparams.bounds.endKey = BSON("" << end); @@ -163,8 +163,8 @@ namespace QuerySingleSolutionRunner { } private: - IndexDescriptor* getIndex(const BSONObj& obj) { - Collection* collection = cc().database()->getCollection( ns() ); + IndexDescriptor* getIndex(Database* db, const BSONObj& obj) { + Collection* collection = db->getCollection( ns() ); return collection->getIndexCatalog()->findIndexByKeyPattern(obj); } diff --git a/src/mongo/dbtests/query_stage_collscan.cpp b/src/mongo/dbtests/query_stage_collscan.cpp index 47360cdf9a7..b6ff997e914 100644 --- a/src/mongo/dbtests/query_stage_collscan.cpp +++ b/src/mongo/dbtests/query_stage_collscan.cpp @@ -62,7 +62,7 @@ namespace QueryStageCollectionScan { stringstream spec; spec << "{\"capped\":true,\"size\":2000,\"$nExtents\":" << nExtents() << "}"; - ASSERT( userCreateNS( ns(), fromjson( spec.str() ), false ).isOK() ); + ASSERT( userCreateNS( db(), ns(), fromjson( spec.str() ), false ).isOK() ); // Tell the test to add data/extents/etc. insertTestData(); diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp index 82454857fab..c8634a6c6b1 100644 --- a/src/mongo/dbtests/querytests.cpp +++ b/src/mongo/dbtests/querytests.cpp @@ -1044,7 +1044,7 @@ namespace QueryTests { Lock::GlobalWrite lk; Client::Context ctx( "unittests.DirectLocking" ); client().remove( "a.b", BSONObj() ); - ASSERT_EQUALS( "unittests", cc().database()->name() ); + ASSERT_EQUALS( "unittests", ctx.db()->name() ); } const char *ns; }; @@ -1171,7 +1171,8 @@ namespace QueryTests { Client::WriteContext ctx( "unittests" ); // note that extents are always at least 4KB now - so this will get rounded up a bit. - ASSERT( userCreateNS( ns(), fromjson( "{ capped : true, size : 2000 }" ), false ).isOK() ); + ASSERT( userCreateNS( ctx.ctx().db(), ns(), + fromjson( "{ capped : true, size : 2000 }" ), false ).isOK() ); for ( int i=0; i<200; i++ ) { insertNext(); // cout << count() << endl; @@ -1230,10 +1231,10 @@ namespace QueryTests { BSON( "_id" << 20 ) , res , true ) ); ASSERT_EQUALS( 40 , res["x"].numberInt() ); - ASSERT( Helpers::findById( cc(), ns() , BSON( "_id" << 20 ) , res ) ); + ASSERT( Helpers::findById( ctx.ctx().db(), ns() , BSON( "_id" << 20 ) , res ) ); ASSERT_EQUALS( 40 , res["x"].numberInt() ); - ASSERT( ! Helpers::findById( cc(), ns() , BSON( "_id" << 200 ) , res ) ); + ASSERT( ! Helpers::findById( ctx.ctx().db(), ns() , BSON( "_id" << 200 ) , res ) ); unsigned long long slow , fast; @@ -1250,7 +1251,7 @@ namespace QueryTests { { Timer t; for ( int i=0; i<n; i++ ) { - ASSERT( Helpers::findById( cc(), ns() , BSON( "_id" << 20 ) , res ) ); + ASSERT( Helpers::findById( ctx.ctx().db(), ns() , BSON( "_id" << 20 ) , res ) ); } fast = t.micros(); } @@ -1278,7 +1279,7 @@ namespace QueryTests { BSONObj res; for ( int i=0; i<1000; i++ ) { - bool found = Helpers::findById( cc(), ns() , BSON( "_id" << i ) , res ); + bool found = Helpers::findById( ctx.ctx().db(), ns() , BSON( "_id" << i ) , res ); ASSERT_EQUALS( i % 2 , int(found) ); } diff --git a/src/mongo/dbtests/queryutiltests.cpp b/src/mongo/dbtests/queryutiltests.cpp index ae0ebdd48dc..8b75aa21b41 100644 --- a/src/mongo/dbtests/queryutiltests.cpp +++ b/src/mongo/dbtests/queryutiltests.cpp @@ -1587,7 +1587,7 @@ namespace QueryUtilTests { Client::Context _ctx; public: IndexBase() : _lk(ns()), _ctx( ns() ) , indexNum_( 0 ) { - userCreateNS( ns(), BSONObj(), false ); + userCreateNS( _ctx.db(), ns(), BSONObj(), false ); } ~IndexBase() { if ( !nsd() ) diff --git a/src/mongo/dbtests/replsettests.cpp b/src/mongo/dbtests/replsettests.cpp index f334cc2f3b1..e1779e2465d 100644 --- a/src/mongo/dbtests/replsettests.cpp +++ b/src/mongo/dbtests/replsettests.cpp @@ -316,7 +316,7 @@ namespace ReplSetTests { void create() { Client::Context c(_cappedNs); - ASSERT( userCreateNS( _cappedNs, fromjson( spec() ), false ).isOK() ); + ASSERT( userCreateNS( c.db(), _cappedNs, fromjson( spec() ), false ).isOK() ); } void dropCapped() { @@ -359,7 +359,7 @@ namespace ReplSetTests { bool apply(const BSONObj& op) { Client::Context ctx( _cappedNs ); // in an annoying twist of api, returns true on failure - return !applyOperation_inlock(op, true); + return !applyOperation_inlock(ctx.db(), op, true); } void run() { diff --git a/src/mongo/dbtests/repltests.cpp b/src/mongo/dbtests/repltests.cpp index 10c08845122..2dfa2f99fbc 100644 --- a/src/mongo/dbtests/repltests.cpp +++ b/src/mongo/dbtests/repltests.cpp @@ -172,7 +172,7 @@ namespace ReplTests { if ( 0 ) { mongo::unittest::log() << "op: " << *i << endl; } - a.applyOperation( *i ); + a.applyOperation( ctx.db(), *i ); } } } @@ -244,6 +244,9 @@ namespace ReplTests { b.appendElements( fromjson( json ) ); return b.obj(); } + Database* db() { + return _context.db(); + } private: static DBDirectClient client_; }; @@ -1384,7 +1387,7 @@ namespace ReplTests { bool returnEmpty; SyncTest() : Sync(""), returnEmpty(false) {} virtual ~SyncTest() {} - virtual BSONObj getMissingDoc(const BSONObj& o) { + virtual BSONObj getMissingDoc(Database* db, const BSONObj& o) { if (returnEmpty) { BSONObj o; return o; @@ -1402,7 +1405,7 @@ namespace ReplTests { // this should fail because we can't connect try { Sync badSource("localhost:123"); - badSource.getMissingDoc(o); + badSource.getMissingDoc(db(), o); } catch (DBException&) { threw = true; diff --git a/src/mongo/s/d_migrate.cpp b/src/mongo/s/d_migrate.cpp index 0f5088fbc66..4a789d3067f 100644 --- a/src/mongo/s/d_migrate.cpp +++ b/src/mongo/s/d_migrate.cpp @@ -320,7 +320,8 @@ namespace mongo { break; case 'u': - if ( ! Helpers::findById( cc() , _ns.c_str() , ide.wrap() , it ) ) { + Client::Context ctx( _ns ); + if ( ! Helpers::findById( ctx.db(), _ns.c_str(), ide.wrap(), it ) ) { warning() << "logOpForSharding couldn't find: " << ide << " even though should have" << migrateLog; return; } @@ -335,7 +336,7 @@ namespace mongo { _memoryUsed += ide.size() + 5; } - void xfer( list<BSONObj> * l , BSONObjBuilder& b , const char * name , long long& size , bool explode ) { + void xfer( Database* db, list<BSONObj> * l , BSONObjBuilder& b , const char * name , long long& size , bool explode ) { const long long maxSize = 1024 * 1024; if ( l->size() == 0 || size > maxSize ) @@ -349,7 +350,7 @@ namespace mongo { BSONObj t = *i; if ( explode ) { BSONObj it; - if ( Helpers::findById( cc() , _ns.c_str() , t, it ) ) { + if ( Helpers::findById( db , _ns.c_str() , t, it ) ) { arr.append( it ); size += it.objsize(); } @@ -379,8 +380,8 @@ namespace mongo { { Client::ReadContext cx( _ns ); - xfer( &_deleted , b , "deleted" , size , false ); - xfer( &_reload , b , "reload" , size , true ); + xfer( cx.ctx().db(), &_deleted, b, "deleted", size, false ); + xfer( cx.ctx().db(), &_reload, b, "reload", size, true ); } b.append( "size" , size ); @@ -1652,7 +1653,7 @@ namespace mongo { string system_namespaces = nsToDatabase(ns) + ".system.namespaces"; BSONObj entry = conn->findOne( system_namespaces, BSON( "name" << ns ) ); if ( entry["options"].isABSONObj() ) { - Status status = userCreateNS( ns, entry["options"].Obj(), true, 0 ); + Status status = userCreateNS( db, ns, entry["options"].Obj(), true, 0 ); if ( !status.isOK() ) { warning() << "failed to create collection [" << ns << "] " << " with options: " << status; @@ -1778,7 +1779,7 @@ namespace mongo { Client::WriteContext cx( ns ); BSONObj localDoc; - if ( willOverrideLocalId( o, &localDoc ) ) { + if ( willOverrideLocalId( cx.ctx().db(), o, &localDoc ) ) { string errMsg = str::stream() << "cannot migrate chunk, local document " << localDoc @@ -1981,7 +1982,7 @@ namespace mongo { // do not apply deletes if they do not belong to the chunk being migrated BSONObj fullObj; - if ( Helpers::findById( cc() , ns.c_str() , id, fullObj ) ) { + if ( Helpers::findById( cx.ctx().db(), ns.c_str(), id, fullObj ) ) { if ( ! isInRange( fullObj , min , max , shardKeyPattern ) ) { log() << "not applying out of range deletion: " << fullObj << migrateLog; @@ -2014,7 +2015,7 @@ namespace mongo { BSONObj it = i.next().Obj(); BSONObj localDoc; - if ( willOverrideLocalId( it, &localDoc ) ) { + if ( willOverrideLocalId( cx.ctx().db(), it, &localDoc ) ) { string errMsg = str::stream() << "cannot migrate chunk, local document " << localDoc @@ -2044,10 +2045,10 @@ namespace mongo { * Must be in WriteContext to avoid races and DBHelper errors. * TODO: Could optimize this check out if sharding on _id. */ - bool willOverrideLocalId( BSONObj remoteDoc, BSONObj* localDoc ) { + bool willOverrideLocalId( Database* db, BSONObj remoteDoc, BSONObj* localDoc ) { *localDoc = BSONObj(); - if ( Helpers::findById( cc(), ns.c_str(), remoteDoc, *localDoc ) ) { + if ( Helpers::findById( db, ns.c_str(), remoteDoc, *localDoc ) ) { return !isInRange( *localDoc , min , max , shardKeyPattern ); } |