diff options
author | Eliot Horowitz <eliot@10gen.com> | 2014-06-17 23:21:25 -0400 |
---|---|---|
committer | Eliot Horowitz <eliot@10gen.com> | 2014-06-18 14:43:30 -0400 |
commit | 4e13ac7a63b747fc0b990d65193c737215bb9e4f (patch) | |
tree | 76fb95e2c11aafa258c9d9ce0b5ed659e96e63fa | |
parent | e11156fe20421c0f09758cfc6355964159640616 (diff) | |
download | mongo-4e13ac7a63b747fc0b990d65193c737215bb9e4f.tar.gz |
SERVER-13635: remove path concept from DatabaseHolder, and move repair and listDatabase into storageEngine
38 files changed, 203 insertions, 216 deletions
diff --git a/src/mongo/SConscript b/src/mongo/SConscript index e968a3900b0..e3542eaf830 100644 --- a/src/mongo/SConscript +++ b/src/mongo/SConscript @@ -559,6 +559,7 @@ serverOnlyFiles = [ "db/curop.cpp", "db/storage/mmap_v1/dur_recovery_unit.cpp", "db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp", "db/storage/mmap_v1/mmap_v1_engine.cpp", + "db/storage/mmap_v1/repair_database.cpp", "db/operation_context_impl.cpp", "db/storage/mmap_v1/mmap_v1_extent_manager.cpp", "db/introspect.cpp", @@ -627,7 +628,6 @@ serverOnlyFiles = [ "db/curop.cpp", "db/catalog/database_holder.cpp", "db/background.cpp", "db/pdfile.cpp", - "db/repair_database.cpp", "db/structure/catalog/index_details.cpp", "db/index_builder.cpp", "db/index_rebuilder.cpp", diff --git a/src/mongo/db/auth/authz_manager_external_state_d.cpp b/src/mongo/db/auth/authz_manager_external_state_d.cpp index 0878364f8a3..2079ffb4e42 100644 --- a/src/mongo/db/auth/authz_manager_external_state_d.cpp +++ b/src/mongo/db/auth/authz_manager_external_state_d.cpp @@ -40,6 +40,7 @@ #include "mongo/db/instance.h" #include "mongo/db/jsobj.h" #include "mongo/db/operation_context_impl.h" +#include "mongo/db/storage/storage_engine.h" #include "mongo/util/assert_util.h" #include "mongo/util/mongoutils/str.h" @@ -99,8 +100,7 @@ namespace mongo { Status AuthzManagerExternalStateMongod::getAllDatabaseNames( OperationContext* txn, std::vector<std::string>* dbnames) { - Lock::GlobalRead lk(txn->lockState()); - getDatabaseNames(*dbnames); + globalStorageEngine->listDatabases( dbnames ); return Status::OK(); } diff --git a/src/mongo/db/catalog/collection_cursor_cache.cpp b/src/mongo/db/catalog/collection_cursor_cache.cpp index 2059ab2b983..e9962c9441f 100644 --- a/src/mongo/db/catalog/collection_cursor_cache.cpp +++ b/src/mongo/db/catalog/collection_cursor_cache.cpp @@ -188,7 +188,7 @@ namespace mongo { } Lock::DBRead lock(txn->lockState(), ns); - Database* db = dbHolder().get(txn, ns, storageGlobalParams.dbpath); + Database* db = dbHolder().get(txn, ns); if ( !db ) return false; Client::Context context( ns, db ); @@ -218,7 +218,7 @@ namespace mongo { for ( unsigned i = 0; i < todo.size(); i++ ) { const string& ns = todo[i]; Lock::DBRead lock(txn->lockState(), ns); - Database* db = dbHolder().get(txn, ns, storageGlobalParams.dbpath); + Database* db = dbHolder().get(txn, ns); if ( !db ) continue; Client::Context context( ns, db ); diff --git a/src/mongo/db/catalog/database.cpp b/src/mongo/db/catalog/database.cpp index 6143bdb9c0a..2334ca24934 100644 --- a/src/mongo/db/catalog/database.cpp +++ b/src/mongo/db/catalog/database.cpp @@ -50,6 +50,7 @@ #include "mongo/db/pdfile.h" #include "mongo/db/server_parameters.h" #include "mongo/db/storage_options.h" +#include "mongo/db/storage/storage_engine.h" #include "mongo/db/catalog/collection.h" namespace mongo { @@ -124,13 +125,13 @@ namespace mongo { /*static*/ - string Database::duplicateUncasedName(const string &name, const string &path, set< string > *duplicates) { + string Database::duplicateUncasedName(const string &name, set< string > *duplicates) { if ( duplicates ) { duplicates->clear(); } vector<string> others; - getDatabaseNames( others , path ); + globalStorageEngine->listDatabases( &others ); set<string> allShortNames; dbHolder().getAllShortNames(allShortNames); diff --git a/src/mongo/db/catalog/database.h b/src/mongo/db/catalog/database.h index 72009ffd4c1..cf944668a80 100644 --- a/src/mongo/db/catalog/database.h +++ b/src/mongo/db/catalog/database.h @@ -64,8 +64,11 @@ namespace mongo { /* you must use this to close - there is essential code in this method that is not in the ~Database destructor. thus the destructor is private. this could be cleaned up one day... */ - static void closeDatabase( - OperationContext* txn, const std::string& db, const std::string& path); + static void closeDatabase(OperationContext* txn, + const std::string& db); + + // do not use! + ~Database(); // closes files and other cleanup see below. const std::string& name() const { return _name; } @@ -129,7 +132,6 @@ namespace mongo { // TODO move??? */ static string duplicateUncasedName( const std::string &name, - const std::string &path, std::set< std::string > *duplicates = 0 ); static Status validateDBName( const StringData& dbname ); @@ -141,8 +143,6 @@ namespace mongo { void _clearCollectionCache_inlock( const StringData& fullns ); - ~Database(); // closes files and other cleanup see below. - const std::string _name; // "alleyinsider" boost::scoped_ptr<DatabaseCatalogEntry> _dbEntry; diff --git a/src/mongo/db/catalog/database_holder.cpp b/src/mongo/db/catalog/database_holder.cpp index 35b0b25c797..c58a0ac0f38 100644 --- a/src/mongo/db/catalog/database_holder.cpp +++ b/src/mongo/db/catalog/database_holder.cpp @@ -47,25 +47,21 @@ namespace mongo { Database* DatabaseHolder::get(OperationContext* txn, - const std::string& ns, - const std::string& path) const { + const std::string& ns) const { txn->lockState()->assertAtLeastReadLocked(ns); SimpleMutex::scoped_lock lk(_m); - Paths::const_iterator x = _paths.find( path ); - if ( x == _paths.end() ) - return 0; - const DBs& m = x->second; const std::string db = _todb( ns ); - DBs::const_iterator it = m.find(db); - if ( it != m.end() ) + DBs::const_iterator it = _dbs.find(db); + if ( it != _dbs.end() ) return it->second; return NULL; } - Database* DatabaseHolder::getOrCreate( - OperationContext* txn, const string& ns, const string& path, bool& justCreated) { + Database* DatabaseHolder::getOrCreate(OperationContext* txn, + const string& ns, + bool& justCreated) { const string dbname = _todb( ns ); invariant(txn->lockState()->isAtLeastReadLocked(dbname)); @@ -76,10 +72,9 @@ namespace mongo { { SimpleMutex::scoped_lock lk(_m); - DBs& m = _paths[path]; { - DBs::iterator i = m.find(dbname); - if( i != m.end() ) { + DBs::iterator i = _dbs.find(dbname); + if( i != _dbs.end() ) { justCreated = false; return i->second; } @@ -90,10 +85,8 @@ namespace mongo { // perhaps just log it, which is what we do here with the "> 40" : bool cant = !txn->lockState()->isWriteLocked(ns); if( logger::globalLogDomain()->shouldLog(logger::LogSeverity::Debug(1)) || - m.size() > 40 || cant || DEBUG_BUILD ) { - log() << "opening db: " - << (path == storageGlobalParams.dbpath ? "" : path) << ' ' << dbname - << endl; + _dbs.size() > 40 || cant || DEBUG_BUILD ) { + log() << "opening db: " << dbname; } massert(15927, "can't open database in a read lock. if db was just closed, consider retrying the query. might otherwise indicate an internal error", !cant); } @@ -107,43 +100,36 @@ namespace mongo { dbname, justCreated, new MMAPV1DatabaseCatalogEntry(txn, - dbname, - path, - storageGlobalParams.directoryperdb)); + dbname, + storageGlobalParams.dbpath, + storageGlobalParams.directoryperdb, + false)); { SimpleMutex::scoped_lock lk(_m); - DBs& m = _paths[path]; - verify( m[dbname] == 0 ); - m[dbname] = db; - _size++; + _dbs[dbname] = db; } return db; } void DatabaseHolder::erase(OperationContext* txn, - const std::string& ns, - const std::string& path) { + const std::string& ns) { invariant(txn->lockState()->isW()); SimpleMutex::scoped_lock lk(_m); - DBs& m = _paths[path]; - _size -= (int)m.erase(_todb(ns)); + _dbs.erase(_todb(ns)); } - bool DatabaseHolder::closeAll( - OperationContext* txn, const string& path, BSONObjBuilder& result, bool force) { - log() << "DatabaseHolder::closeAll path:" << path << endl; + bool DatabaseHolder::closeAll(OperationContext* txn, + BSONObjBuilder& result, + bool force) { invariant(txn->lockState()->isW()); getDur().commitNow(txn); // bad things happen if we close a DB with outstanding writes - map<string,Database*>& m = _paths[path]; - _size -= m.size(); - set< string > dbs; - for ( map<string,Database*>::iterator i = m.begin(); i != m.end(); i++ ) { + for ( map<string,Database*>::iterator i = _dbs.begin(); i != _dbs.end(); i++ ) { dbs.insert( i->first ); } @@ -152,8 +138,8 @@ namespace mongo { int nNotClosed = 0; for( set< string >::iterator i = dbs.begin(); i != dbs.end(); ++i ) { string name = *i; - LOG(2) << "DatabaseHolder::closeAll path:" << path << " name:" << name << endl; - Client::Context ctx( name , path ); + LOG(2) << "DatabaseHolder::closeAll name:" << name; + Client::Context ctx( name ); if( !force && BackgroundOperation::inProgForDb(name) ) { log() << "WARNING: can't close database " << name @@ -162,7 +148,7 @@ namespace mongo { nNotClosed++; } else { - Database::closeDatabase(txn, name.c_str(), path); + Database::closeDatabase(txn, name.c_str()); bb.append( bb.numStr( n++ ) , name ); } } diff --git a/src/mongo/db/catalog/database_holder.h b/src/mongo/db/catalog/database_holder.h index 5ffd4534744..07f40f47b85 100644 --- a/src/mongo/db/catalog/database_holder.h +++ b/src/mongo/db/catalog/database_holder.h @@ -34,52 +34,40 @@ #include "mongo/db/d_concurrency.h" #include "mongo/db/namespace_string.h" -namespace mongo { +namespace mongo { /** - * path + dbname -> Database + * dbname -> Database */ class DatabaseHolder { typedef std::map<std::string,Database*> DBs; - typedef std::map<std::string,DBs> Paths; // todo: we want something faster than this if called a lot: mutable SimpleMutex _m; - Paths _paths; - int _size; + DBs _dbs; public: - DatabaseHolder() : _m("dbholder"),_size(0) { } + DatabaseHolder() : _m("dbholder"){ } Database* get(OperationContext* txn, - const std::string& ns, - const std::string& path) const; + const std::string& ns) const; Database* getOrCreate(OperationContext* txn, const std::string& ns, - const std::string& path, bool& justCreated); - void erase(OperationContext* txn, const std::string& ns, const std::string& path); + void erase(OperationContext* txn, const std::string& ns); /** @param force - force close even if something underway - use at shutdown */ bool closeAll(OperationContext* txn, - const std::string& path, BSONObjBuilder& result, bool force); - // "info" as this is informational only could change on you if you are not write locked - int sizeInfo() const { return _size; } - /** - * gets all unique db names, ignoring paths * need some lock */ void getAllShortNames( std::set<std::string>& all ) const { SimpleMutex::scoped_lock lk(_m); - for ( Paths::const_iterator i=_paths.begin(); i!=_paths.end(); i++ ) { - DBs m = i->second; - for( DBs::const_iterator j=m.begin(); j!=m.end(); j++ ) { - all.insert( j->first ); - } + for( DBs::const_iterator j=_dbs.begin(); j!=_dbs.end(); j++ ) { + all.insert( j->first ); } } diff --git a/src/mongo/db/client.cpp b/src/mongo/db/client.cpp index 75812b5214a..2abfc67e358 100644 --- a/src/mongo/db/client.cpp +++ b/src/mongo/db/client.cpp @@ -164,8 +164,6 @@ namespace mongo { BSONObj CachedBSONObjBase::_tooBig = fromjson("{\"$msg\":\"query not recording (too large)\"}"); Client::Context::Context(const std::string& ns , Database * db) : _client( currentClient.get() ), - _path(storageGlobalParams.dbpath), // is this right? could be a different db? - // may need a dassert for this _justCreated(false), _doVersion( true ), _ns( ns ), @@ -174,9 +172,8 @@ namespace mongo { } - Client::Context::Context(const string& ns, const std::string& path, bool doVersion) : + Client::Context::Context(const string& ns, bool doVersion) : _client( currentClient.get() ), - _path( path ), _justCreated(false), // set for real in finishInit _doVersion(doVersion), _ns( ns ), @@ -192,9 +189,9 @@ namespace mongo { OperationContext* txn, const string& ns, bool doVersion) { { _lk.reset(new Lock::DBRead(txn->lockState(), ns)); - Database *db = dbHolder().get(txn, ns, storageGlobalParams.dbpath); + Database *db = dbHolder().get(txn, ns); if( db ) { - _c.reset(new Context(storageGlobalParams.dbpath, ns, db, doVersion)); + _c.reset(new Context(ns, db, doVersion)); return; } } @@ -205,18 +202,18 @@ namespace mongo { if (txn->lockState()->isW()) { // write locked already DEV RARELY log() << "write locked on ReadContext construction " << ns << endl; - _c.reset(new Context(ns, storageGlobalParams.dbpath, doVersion)); + _c.reset(new Context(ns, doVersion)); } else if (!txn->lockState()->isRecursive()) { _lk.reset(0); { Lock::GlobalWrite w(txn->lockState()); - Context c(ns, storageGlobalParams.dbpath, doVersion); + Context c(ns, doVersion); } // db could be closed at this interim point -- that is ok, we will throw, and don't mind throwing. _lk.reset(new Lock::DBRead(txn->lockState(), ns)); - _c.reset(new Context(ns, storageGlobalParams.dbpath, doVersion)); + _c.reset(new Context(ns, doVersion)); } else { uasserted(15928, str::stream() << "can't open a database from a nested read lock " << ns); @@ -231,7 +228,7 @@ namespace mongo { Client::WriteContext::WriteContext( OperationContext* opCtx, const std::string& ns, bool doVersion) : _lk(opCtx->lockState(), ns), - _c(ns, storageGlobalParams.dbpath, doVersion) { + _c(ns, doVersion) { } @@ -255,9 +252,8 @@ namespace mongo { } // invoked from ReadContext - Client::Context::Context(const string& path, const string& ns, Database *db, bool doVersion) : + Client::Context::Context(const string& ns, Database *db, bool doVersion) : _client( currentClient.get() ), - _path( path ), _justCreated(false), _doVersion( doVersion ), _ns( ns ), @@ -270,7 +266,7 @@ namespace mongo { void Client::Context::_finishInit() { OperationContextImpl txn; // TODO get rid of this once reads require transactions - _db = dbHolder().getOrCreate(&txn, _ns, _path, _justCreated); + _db = dbHolder().getOrCreate(&txn, _ns, _justCreated); invariant(_db); if( _doVersion ) checkNotStale(); diff --git a/src/mongo/db/client.h b/src/mongo/db/client.h index 6775bf579e7..13dcaab77c9 100644 --- a/src/mongo/db/client.h +++ b/src/mongo/db/client.h @@ -159,7 +159,7 @@ namespace mongo { class Context : boost::noncopyable { public: /** this is probably what you want */ - Context(const std::string& ns, const std::string& path=storageGlobalParams.dbpath, + Context(const std::string& ns, bool doVersion = true); /** note: this does not call finishInit -- i.e., does not call @@ -169,15 +169,12 @@ namespace mongo { Context(const std::string& ns , Database * db); // used by ReadContext - Context(const std::string& path, const std::string& ns, Database *db, bool doVersion = true); + Context(const std::string& ns, Database *db, bool doVersion ); ~Context(); Client* getClient() const { return _client; } Database* db() const { return _db; } const char * ns() const { return _ns.c_str(); } - bool equals(const std::string& ns, const std::string& path=storageGlobalParams.dbpath) const { - return _ns == ns && _path == path; - } /** @return if the db was created by this Context */ bool justCreated() const { return _justCreated; } @@ -197,7 +194,6 @@ namespace mongo { void checkNsAccess( bool doauth ); void checkNsAccess( bool doauth, int lockState ); Client * const _client; - const std::string _path; bool _justCreated; bool _doVersion; const std::string _ns; diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp index 2ec997888e2..67da6bba6d6 100644 --- a/src/mongo/db/cloner.cpp +++ b/src/mongo/db/cloner.cpp @@ -106,8 +106,7 @@ namespace mongo { // Make sure database still exists after we resume from the temp release bool unused; - Database* db = dbHolder().getOrCreate( - txn, _dbName, storageGlobalParams.dbpath, unused); + Database* db = dbHolder().getOrCreate(txn, _dbName, unused); bool createdCollection = false; Collection* collection = NULL; @@ -231,7 +230,7 @@ namespace mongo { // We are under lock here again, so reload the database in case it may have disappeared // during the temp release bool unused; - Database* db = dbHolder().getOrCreate(txn, toDBName, storageGlobalParams.dbpath, unused); + Database* db = dbHolder().getOrCreate(txn, toDBName, unused); if ( indexesToBuild.size() ) { for (list<BSONObj>::const_iterator i = indexesToBuild.begin(); @@ -305,7 +304,7 @@ namespace mongo { const string dbName = nss.db().toString(); bool unused; - Database* db = dbHolder().getOrCreate(txn, dbName, storageGlobalParams.dbpath, unused); + Database* db = dbHolder().getOrCreate(txn, dbName, unused); // config string temp = dbName + ".system.namespaces"; @@ -482,8 +481,7 @@ namespace mongo { // throw if the database has changed in between, but for now preserve the existing // behaviour. bool unused; - Database* db = - dbHolder().getOrCreate(txn, toDBName, storageGlobalParams.dbpath, unused); + Database* db = dbHolder().getOrCreate(txn, toDBName, unused); /* we defer building id index for performance - building it in batch is much faster */ Status createStatus = userCreateNS( txn, db, to_name, options, diff --git a/src/mongo/db/commands/fsync.cpp b/src/mongo/db/commands/fsync.cpp index 27abba2d754..2f40aac1fe2 100644 --- a/src/mongo/db/commands/fsync.cpp +++ b/src/mongo/db/commands/fsync.cpp @@ -136,8 +136,6 @@ namespace mongo { Lock::GlobalWrite w(txn->lockState()); getDur().commitNow(txn); } - // question : is it ok this is not in the dblock? i think so but this is a change from past behavior, - // please advise. result.append( "numFiles" , globalStorageEngine->flushAllFiles( sync ) ); } return 1; diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp index 04e80d043df..a92f99e42ef 100644 --- a/src/mongo/db/commands/mr.cpp +++ b/src/mongo/db/commands/mr.cpp @@ -1290,7 +1290,7 @@ namespace mongo { // This context does no version check, safe b/c we checked earlier and have an // open cursor - scoped_ptr<Client::Context> ctx(new Client::Context(config.ns, storageGlobalParams.dbpath, false)); + scoped_ptr<Client::Context> ctx(new Client::Context(config.ns, false)); const NamespaceString nss(config.ns); const WhereCallbackReal whereCallback(nss.db()); @@ -1349,7 +1349,7 @@ namespace mongo { lock.reset(); state.reduceAndSpillInMemoryStateIfNeeded(); lock.reset(new Lock::DBRead(txn->lockState(), config.ns)); - ctx.reset(new Client::Context(config.ns, storageGlobalParams.dbpath, false)); + ctx.reset(new Client::Context(config.ns, false)); reduceTime += t.micros(); diff --git a/src/mongo/db/commands/write_commands/batch_executor.cpp b/src/mongo/db/commands/write_commands/batch_executor.cpp index ab88d578cd6..b4b5a5e44dd 100644 --- a/src/mongo/db/commands/write_commands/batch_executor.cpp +++ b/src/mongo/db/commands/write_commands/batch_executor.cpp @@ -919,7 +919,6 @@ namespace mongo { return false; } _context.reset(new Client::Context(request->getNS(), - storageGlobalParams.dbpath, false /* don't check version */)); Database* database = _context->db(); dassert(database); @@ -1100,7 +1099,6 @@ namespace mongo { return; Client::Context ctx( nsString.ns(), - storageGlobalParams.dbpath, false /* don't check version */ ); try { @@ -1163,7 +1161,6 @@ namespace mongo { // Context once we're locked, to set more details in currentOp() // TODO: better constructor? Client::Context writeContext( nss.ns(), - storageGlobalParams.dbpath, false /* don't check version */); try { diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp index 092760bd42c..bb57f42023e 100644 --- a/src/mongo/db/db.cpp +++ b/src/mongo/db/db.cpp @@ -338,7 +338,8 @@ namespace mongo { Lock::GlobalWrite lk(txn.lockState()); vector< string > dbNames; - getDatabaseNames( dbNames ); + globalStorageEngine->listDatabases( &dbNames ); + for ( vector< string >::iterator i = dbNames.begin(); i != dbNames.end(); ++i ) { string dbName = *i; LOG(1) << "\t" << dbName << endl; @@ -354,7 +355,7 @@ namespace mongo { ctx.db()->clearTmpCollections(&txn); if ( mongodGlobalParams.repair ) { - fassert(18506, repairDatabase(&txn, dbName)); + fassert(18506, globalStorageEngine->repairDatabase(&txn, dbName)); } else if (!ctx.db()->getDatabaseCatalogEntry()->currentFilesCompatible(&txn)) { log() << "****"; @@ -401,7 +402,7 @@ namespace mongo { warning() << "Internal error while reading collection " << systemIndexes; } - Database::closeDatabase(&txn, dbName.c_str(), storageGlobalParams.dbpath); + Database::closeDatabase(&txn, dbName.c_str()); } } diff --git a/src/mongo/db/dbcommands.cpp b/src/mongo/db/dbcommands.cpp index b1c8b76f250..3e9810ef0ea 100644 --- a/src/mongo/db/dbcommands.cpp +++ b/src/mongo/db/dbcommands.cpp @@ -51,6 +51,7 @@ #include "mongo/db/commands/shutdown.h" #include "mongo/db/db.h" #include "mongo/db/dbhelpers.h" +#include "mongo/db/storage/storage_engine.h" #include "mongo/db/storage/mmap_v1/dur_stats.h" #include "mongo/db/index_builder.h" #include "mongo/db/instance.h" @@ -186,7 +187,7 @@ namespace mongo { log() << "dropDatabase " << dbname << " starting" << endl; stopIndexBuilds(txn, context.db(), cmdObj); - dropDatabase(txn, context.db(), storageGlobalParams.dbpath); + dropDatabase(txn, context.db()); log() << "dropDatabase " << dbname << " finished"; @@ -267,8 +268,10 @@ namespace mongo { bool preserveClonedFilesOnFailure = e.isBoolean() && e.boolean(); e = cmdObj.getField( "backupOriginalFiles" ); bool backupOriginalFiles = e.isBoolean() && e.boolean(); - Status status = - repairDatabase( txn, dbname, preserveClonedFilesOnFailure, backupOriginalFiles ); + Status status = globalStorageEngine->repairDatabase( txn, + dbname, + preserveClonedFilesOnFailure, + backupOriginalFiles ); IndexBuilder::restoreIndexes(indexesInProg); @@ -629,7 +632,8 @@ namespace mongo { CmdListDatabases() : Command("listDatabases" , true ) {} bool run(OperationContext* txn, const string& dbname , BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) { vector< string > dbNames; - getDatabaseNames( dbNames ); + globalStorageEngine->listDatabases( &dbNames ); + vector< BSONObj > dbInfos; set<string> seen; @@ -714,7 +718,7 @@ namespace mongo { Client::Context ctx(dbname); try { - return dbHolder().closeAll(txn, storageGlobalParams.dbpath, result, false); + return dbHolder().closeAll(txn, result, false); } catch(DBException&) { throw; diff --git a/src/mongo/db/index_builder.cpp b/src/mongo/db/index_builder.cpp index ec3381900d5..c99a51d15be 100644 --- a/src/mongo/db/index_builder.cpp +++ b/src/mongo/db/index_builder.cpp @@ -66,7 +66,7 @@ namespace mongo { NamespaceString ns(_index["ns"].String()); Client::WriteContext ctx(&txn, ns.getSystemIndexesCollection()); - Database* db = dbHolder().get(&txn, ns.db().toString(), storageGlobalParams.dbpath); + Database* db = dbHolder().get(&txn, ns.db().toString()); Status status = build(&txn, db); if ( !status.isOK() ) { diff --git a/src/mongo/db/index_rebuilder.cpp b/src/mongo/db/index_rebuilder.cpp index e7a74014be3..9226866c8a4 100644 --- a/src/mongo/db/index_rebuilder.cpp +++ b/src/mongo/db/index_rebuilder.cpp @@ -38,6 +38,7 @@ #include "mongo/db/pdfile.h" #include "mongo/db/repl/rs.h" #include "mongo/db/operation_context_impl.h" +#include "mongo/db/storage/storage_engine.h" #include "mongo/util/scopeguard.h" namespace mongo { @@ -56,7 +57,7 @@ namespace mongo { cc().getAuthorizationSession()->grantInternalAuthorization(); std::vector<std::string> dbNames; - getDatabaseNames(dbNames); + globalStorageEngine->listDatabases( &dbNames ); try { std::list<std::string> collNames; diff --git a/src/mongo/db/instance.cpp b/src/mongo/db/instance.cpp index 2ed438d5e35..8223c0b225a 100644 --- a/src/mongo/db/instance.cpp +++ b/src/mongo/db/instance.cpp @@ -546,11 +546,11 @@ namespace mongo { } /*static*/ - void Database::closeDatabase(OperationContext* txn, const string& db, const string& path) { + void Database::closeDatabase(OperationContext* txn, const string& db) { // XXX? - Do we need to close database under global lock or just DB-lock is sufficient ? invariant(txn->lockState()->isW()); - Database* database = dbHolder().get(txn, db, path); + Database* database = dbHolder().get(txn, db); invariant(database != NULL); repl::oplogCheckCloseDatabase(txn, database); // oplog caches some things, dirty its caches @@ -559,16 +559,12 @@ namespace mongo { log() << "warning: bg op in prog during close db? " << db << endl; } - /* important: kill all open cursors on the database */ - string prefix(db); - prefix += '.'; - // Before the files are closed, flush any potentially outstanding changes, which might // reference this database. Otherwise we will assert when subsequent commit if needed // is called and it happens to have write intents for the removed files. txn->recoveryUnit()->commitIfNeeded(true); - dbHolder().erase(txn, db, path); + dbHolder().erase(txn, db); delete database; // closes files } @@ -911,32 +907,14 @@ namespace mongo { } } - void getDatabaseNames( vector< string > &names , const string& usePath ) { - boost::filesystem::path path( usePath ); - for ( boost::filesystem::directory_iterator i( path ); - i != boost::filesystem::directory_iterator(); ++i ) { - if (storageGlobalParams.directoryperdb) { - boost::filesystem::path p = *i; - string dbName = p.leaf().string(); - p /= ( dbName + ".ns" ); - if ( exists( p ) ) - names.push_back( dbName ); - } - else { - string fileName = boost::filesystem::path(*i).leaf().string(); - if ( fileName.length() > 3 && fileName.substr( fileName.length() - 3, 3 ) == ".ns" ) - names.push_back( fileName.substr( 0, fileName.length() - 3 ) ); - } - } - } - /* returns true if there is data on this server. useful when starting replication. local database does NOT count except for rsoplog collection. used to set the hasData field on replset heartbeat command response */ bool replHasDatabases(OperationContext* txn) { vector<string> names; - getDatabaseNames(names); + globalStorageEngine->listDatabases( &names ); + if( names.size() >= 2 ) return true; if( names.size() == 1 ) { if( names[0] != "local" ) @@ -1265,10 +1243,10 @@ namespace { // even prealloc files, then it means that it is mounted so we can continue. // Previously there was an issue (SERVER-5056) where we would fail to start up // if killed during prealloc. - + vector<string> dbnames; - getDatabaseNames( dbnames ); - + globalStorageEngine->listDatabases( &dbnames ); + if ( dbnames.size() == 0 ) { // this means that mongod crashed // between initial startup and when journaling was initialized diff --git a/src/mongo/db/instance.h b/src/mongo/db/instance.h index b7039b30c4c..d5fef449645 100644 --- a/src/mongo/db/instance.h +++ b/src/mongo/db/instance.h @@ -73,9 +73,6 @@ namespace mongo { DbResponse& dbresponse, const HostAndPort &client ); - void getDatabaseNames(std::vector<std::string> &names, - const std::string& usePath = storageGlobalParams.dbpath); - /* returns true if there is no data on this server. useful when starting replication. local database does NOT count. */ diff --git a/src/mongo/db/introspect.cpp b/src/mongo/db/introspect.cpp index 5f9651cc41d..fe55d39de5a 100644 --- a/src/mongo/db/introspect.cpp +++ b/src/mongo/db/introspect.cpp @@ -138,10 +138,9 @@ namespace { // NOTE: It's kind of weird that we lock the op's namespace, but have to for now since // we're sometimes inside the lock already Lock::DBWrite lk(txn->lockState(), currentOp.getNS() ); - if (dbHolder().get( - txn, nsToDatabase(currentOp.getNS()), storageGlobalParams.dbpath) != NULL) { + if (dbHolder().get(txn, nsToDatabase(currentOp.getNS())) != NULL) { - Client::Context cx(currentOp.getNS(), storageGlobalParams.dbpath, false); + Client::Context cx(currentOp.getNS(), false); _profile(txn, c, cx.db(), currentOp, profileBufBuilder); } else { diff --git a/src/mongo/db/pdfile.cpp b/src/mongo/db/pdfile.cpp index 6d693eac627..a2bd737670f 100644 --- a/src/mongo/db/pdfile.cpp +++ b/src/mongo/db/pdfile.cpp @@ -60,7 +60,6 @@ _ disallow system* manipulations from the database. #include "mongo/db/index_names.h" #include "mongo/db/index/index_descriptor.h" #include "mongo/db/index/index_access_method.h" -#include "mongo/db/instance.h" #include "mongo/db/lasterror.h" #include "mongo/db/namespace_string.h" #include "mongo/db/ops/delete.h" @@ -68,6 +67,7 @@ _ disallow system* manipulations from the database. #include "mongo/db/repl/is_master.h" #include "mongo/db/repl/oplog.h" #include "mongo/db/storage_options.h" +#include "mongo/db/storage/storage_engine.h" #include "mongo/db/catalog/collection.h" #include "mongo/util/assert_util.h" #include "mongo/util/file.h" @@ -141,18 +141,18 @@ namespace mongo { Lock::GlobalWrite lk(txn->lockState()); vector<string> n; - getDatabaseNames(n); + globalStorageEngine->listDatabases( &n ); if( n.size() == 0 ) return; log() << "dropAllDatabasesExceptLocal " << n.size() << endl; for( vector<string>::iterator i = n.begin(); i != n.end(); i++ ) { if( *i != "local" ) { Client::Context ctx(*i); - dropDatabase(txn, ctx.db(), storageGlobalParams.dbpath); + dropDatabase(txn, ctx.db()); } } } - void dropDatabase(OperationContext* txn, Database* db, const std::string& path ) { + void dropDatabase(OperationContext* txn, Database* db ) { invariant( db ); string name = db->name(); // just to have safe @@ -173,7 +173,7 @@ namespace mongo { txn->recoveryUnit()->syncDataAndTruncateJournal(); - Database::closeDatabase(txn, name, path ); + Database::closeDatabase(txn, name ); db = 0; // d is now deleted _deleteDataFiles( name ); diff --git a/src/mongo/db/pdfile.h b/src/mongo/db/pdfile.h index fdad750b70b..a81ca752ade 100644 --- a/src/mongo/db/pdfile.h +++ b/src/mongo/db/pdfile.h @@ -47,7 +47,7 @@ namespace mongo { class Database; class OperationContext; - void dropDatabase(OperationContext* txn, Database* db, const std::string& path ); + void dropDatabase(OperationContext* txn, Database* db ); void dropAllDatabasesExceptLocal(OperationContext* txn); diff --git a/src/mongo/db/pipeline/document_source_cursor.cpp b/src/mongo/db/pipeline/document_source_cursor.cpp index 05b384fd3a6..62b8b52645a 100644 --- a/src/mongo/db/pipeline/document_source_cursor.cpp +++ b/src/mongo/db/pipeline/document_source_cursor.cpp @@ -80,7 +80,7 @@ namespace mongo { // We have already validated the sharding version when we constructed the Runner // so we shouldn't check it again. Lock::DBRead lk(pExpCtx->opCtx->lockState(), _ns); - Client::Context ctx(_ns, storageGlobalParams.dbpath, /*doVersion=*/false); + Client::Context ctx(_ns, /*doVersion=*/false); _runner->restoreState(pExpCtx->opCtx); @@ -200,7 +200,7 @@ namespace { scoped_ptr<TypeExplain> plan; { Lock::DBRead lk(pExpCtx->opCtx->lockState(), _ns); - Client::Context ctx(_ns, storageGlobalParams.dbpath, /*doVersion=*/false); + Client::Context ctx(_ns, /*doVersion=*/false); massert(17392, "No _runner. Were we disposed before explained?", _runner); diff --git a/src/mongo/db/repair_database.h b/src/mongo/db/repair_database.h index eb4f0f9b4e7..00999efb441 100644 --- a/src/mongo/db/repair_database.h +++ b/src/mongo/db/repair_database.h @@ -44,11 +44,4 @@ namespace mongo { // TODO: move void _deleteDataFiles(const std::string& database); - // must have a global lock - Status repairDatabase( OperationContext* txn, - std::string db, - bool preserveClonedFilesOnFailure = false, - bool backupOriginalFiles = false ); - - } // namespace mongo diff --git a/src/mongo/db/repl/master_slave.cpp b/src/mongo/db/repl/master_slave.cpp index e44d86483d1..2b3eca3de42 100644 --- a/src/mongo/db/repl/master_slave.cpp +++ b/src/mongo/db/repl/master_slave.cpp @@ -365,7 +365,7 @@ namespace repl { void ReplSource::resyncDrop( OperationContext* txn, const string& db ) { log() << "resync: dropping database " << db; Client::Context ctx(db); - dropDatabase(txn, ctx.db(), storageGlobalParams.dbpath); + dropDatabase(txn, ctx.db()); } /* grab initial copy of a database from the master */ @@ -440,7 +440,7 @@ namespace repl { const char* ns, const char* db ) { // We are already locked at this point - if (dbHolder().get(txn, ns, storageGlobalParams.dbpath) != NULL) { + if (dbHolder().get(txn, ns) != NULL) { // Database is already present. return true; } @@ -450,7 +450,7 @@ namespace repl { // missing from master after optime "ts". return false; } - if (Database::duplicateUncasedName(db, storageGlobalParams.dbpath).empty()) { + if (Database::duplicateUncasedName(db).empty()) { // No duplicate database names are present. return true; } @@ -505,7 +505,7 @@ namespace repl { // Check for duplicates again, since we released the lock above. set< string > duplicates; - Database::duplicateUncasedName(db, storageGlobalParams.dbpath, &duplicates); + Database::duplicateUncasedName(db, &duplicates); // The database is present on the master and no conflicting databases // are present on the master. Drop any local conflicts. @@ -514,11 +514,11 @@ namespace repl { incompleteCloneDbs.erase(*i); addDbNextPass.erase(*i); Client::Context ctx(*i); - dropDatabase(txn, ctx.db(), storageGlobalParams.dbpath ); + dropDatabase(txn, ctx.db() ); } massert(14034, "Duplicate database names present after attempting to delete duplicates", - Database::duplicateUncasedName(db, storageGlobalParams.dbpath).empty()); + Database::duplicateUncasedName(db).empty()); return true; } @@ -630,7 +630,7 @@ namespace repl { // This code executes on the slaves only, so it doesn't need to be sharding-aware since // mongos will not send requests there. That's why the last argument is false (do not do // version checking). - Client::Context ctx(ns, storageGlobalParams.dbpath, false); + Client::Context ctx(ns, false); ctx.getClient()->curop()->reset(); bool empty = ctx.db()->getDatabaseCatalogEntry()->isEmpty(); diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp index c0019342ad5..9af9056c784 100644 --- a/src/mongo/db/repl/oplog.cpp +++ b/src/mongo/db/repl/oplog.cpp @@ -123,7 +123,7 @@ namespace repl { { if ( localOplogRSCollection == 0 ) { - Client::Context ctx(rsoplog, storageGlobalParams.dbpath); + Client::Context ctx(rsoplog); localDB = ctx.db(); verify( localDB ); localOplogRSCollection = localDB->getCollection( &txn, rsoplog ); @@ -282,7 +282,7 @@ namespace repl { DEV verify( logNS == 0 ); // check this was never a master/slave master if ( localOplogRSCollection == 0 ) { - Client::Context ctx(rsoplog, storageGlobalParams.dbpath); + Client::Context ctx(rsoplog); localDB = ctx.db(); verify( localDB ); localOplogRSCollection = localDB->getCollection( txn, rsoplog ); @@ -338,8 +338,6 @@ namespace repl { OpTime ts(getNextGlobalOptime()); newOptimeNotifier.notify_all(); - Client::Context context("", 0); - /* we jump through a bunch of hoops here to avoid copying the obj buffer twice -- instead we do a single copy to the destination position in the memory mapped file. */ @@ -362,7 +360,7 @@ namespace repl { } if ( localOplogMainCollection == 0 ) { - Client::Context ctx(logNS, storageGlobalParams.dbpath); + Client::Context ctx(logNS); localDB = ctx.db(); verify( localDB ); localOplogMainCollection = localDB->getCollection(txn, logNS); @@ -373,7 +371,7 @@ namespace repl { OplogDocWriter writer( partial, obj ); checkOplogInsert( localOplogMainCollection->insertDocument( txn, &writer, false ) ); - context.getClient()->setLastOp( ts ); + ctx.getClient()->setLastOp( ts ); } static void (*_logOp)(OperationContext* txn, diff --git a/src/mongo/db/repl/repl_set_impl.cpp b/src/mongo/db/repl/repl_set_impl.cpp index a9550c7a941..0d2a2f0203b 100644 --- a/src/mongo/db/repl/repl_set_impl.cpp +++ b/src/mongo/db/repl/repl_set_impl.cpp @@ -32,13 +32,13 @@ #include "mongo/db/catalog/database.h" #include "mongo/db/commands/get_last_error.h" #include "mongo/db/dbhelpers.h" -#include "mongo/db/instance.h" #include "mongo/db/operation_context_impl.h" #include "mongo/db/repl/bgsync.h" #include "mongo/db/repl/connections.h" #include "mongo/db/repl/oplog.h" #include "mongo/db/repl/repl_set_seed_list.h" #include "mongo/db/repl/repl_settings.h" // replSettings +#include "mongo/db/storage/storage_engine.h" #include "mongo/s/d_logic.h" #include "mongo/util/background.h" #include "mongo/util/exit.h" @@ -95,7 +95,8 @@ namespace repl { namespace { void dropAllTempCollections() { vector<string> dbNames; - getDatabaseNames(dbNames); + globalStorageEngine->listDatabases( &dbNames ); + OperationContextImpl txn; for (vector<string>::const_iterator it = dbNames.begin(); it != dbNames.end(); ++it) { // The local db is special because it isn't replicated. It is cleared at startup even on diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp index 7c9fe5a886f..f19bf4113fa 100644 --- a/src/mongo/db/repl/rs_rollback.cpp +++ b/src/mongo/db/repl/rs_rollback.cpp @@ -438,8 +438,7 @@ namespace repl { const NamespaceString nss(ns); bool unused; - Database* db = dbHolder().getOrCreate( - txn, nss.db().toString(), storageGlobalParams.dbpath, unused); + Database* db = dbHolder().getOrCreate(txn, nss.db().toString(), unused); invariant(db); db->dropCollection(txn, ns); @@ -652,7 +651,7 @@ namespace repl { } removeSavers.clear(); // this effectively closes all of them - + sethbmsg(str::stream() << "rollback 5 d:" << deletes << " u:" << updates); sethbmsg("rollback 6"); // clean up oplog diff --git a/src/mongo/db/restapi.cpp b/src/mongo/db/restapi.cpp index ed40676d54f..cc6b7eceed2 100644 --- a/src/mongo/db/restapi.cpp +++ b/src/mongo/db/restapi.cpp @@ -274,7 +274,6 @@ namespace mongo { void _gotLock( int millis , stringstream& ss ) { ss << "<pre>\n"; ss << "time to get readlock: " << millis << "ms\n"; - ss << "# databases: " << dbHolder().sizeInfo() << '\n'; ss << "# Cursors: " << ClientCursor::totalOpen() << '\n'; ss << "replication: "; if (*repl::replInfo) diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp index 6084a737bbd..08f36b9e8a1 100644 --- a/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp +++ b/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp @@ -53,16 +53,18 @@ namespace mongo { MONGO_EXPORT_SERVER_PARAMETER(newCollectionsUsePowerOf2Sizes, bool, true); MMAPV1DatabaseCatalogEntry::MMAPV1DatabaseCatalogEntry( OperationContext* txn, - const StringData& name, - const StringData& path, - bool directoryPerDB ) + const StringData& name, + const StringData& path, + bool directoryPerDB, + bool transient ) : DatabaseCatalogEntry( name ), _path( path.toString() ), _extentManager( name, path, directoryPerDB ), _namespaceIndex( _path, name.toString() ) { try { - _checkDuplicateUncasedNames(); + if ( !transient ) + _checkDuplicateUncasedNames(); Status s = _extentManager.init(txn); if ( !s.isOK() ) { @@ -373,7 +375,7 @@ namespace mongo { } void MMAPV1DatabaseCatalogEntry::_checkDuplicateUncasedNames() const { - string duplicate = Database::duplicateUncasedName(name(), _path); + string duplicate = Database::duplicateUncasedName(name()); if ( !duplicate.empty() ) { stringstream ss; ss << "db already exists with different case already have: [" << duplicate diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.h b/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.h index 1d832872199..d4b9e45c72e 100644 --- a/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.h +++ b/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.h @@ -50,9 +50,10 @@ namespace mongo { class MMAPV1DatabaseCatalogEntry : public DatabaseCatalogEntry { public: MMAPV1DatabaseCatalogEntry( OperationContext* txn, - const StringData& name, - const StringData& path, - bool directoryperdb ); + const StringData& name, + const StringData& path, + bool directoryperdb, + bool transient ); virtual ~MMAPV1DatabaseCatalogEntry(); diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp index 75f7dbaa9b0..91c71d0f131 100644 --- a/src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp +++ b/src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp @@ -30,6 +30,10 @@ #include "mongo/db/storage/mmap_v1/mmap_v1_engine.h" +#include <boost/filesystem/path.hpp> +#include <boost/filesystem/operations.hpp> + +#include "mongo/db/storage_options.h" #include "mongo/util/mmap.h" namespace mongo { @@ -37,6 +41,31 @@ namespace mongo { MMAPV1Engine::~MMAPV1Engine() { } + void MMAPV1Engine::listDatabases( std::vector<std::string>* out ) const { + _listDatabases( storageGlobalParams.dbpath, out ); + } + + void MMAPV1Engine::_listDatabases( const std::string& directory, + std::vector<std::string>* out ) { + boost::filesystem::path path( directory ); + for ( boost::filesystem::directory_iterator i( path ); + i != boost::filesystem::directory_iterator(); + ++i ) { + if (storageGlobalParams.directoryperdb) { + boost::filesystem::path p = *i; + string dbName = p.leaf().string(); + p /= ( dbName + ".ns" ); + if ( exists( p ) ) + out->push_back( dbName ); + } + else { + string fileName = boost::filesystem::path(*i).leaf().string(); + if ( fileName.length() > 3 && fileName.substr( fileName.length() - 3, 3 ) == ".ns" ) + out->push_back( fileName.substr( 0, fileName.length() - 3 ) ); + } + } + } + int MMAPV1Engine::flushAllFiles( bool sync ) { return MongoFile::flushAll( sync ); } diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_engine.h b/src/mongo/db/storage/mmap_v1/mmap_v1_engine.h index 94129f30895..55a6540ed0a 100644 --- a/src/mongo/db/storage/mmap_v1/mmap_v1_engine.h +++ b/src/mongo/db/storage/mmap_v1/mmap_v1_engine.h @@ -37,6 +37,17 @@ namespace mongo { class MMAPV1Engine : public StorageEngine { public: virtual ~MMAPV1Engine(); + + void listDatabases( std::vector<std::string>* out ) const; int flushAllFiles( bool sync ); + + Status repairDatabase( OperationContext* tnx, + const std::string& dbName, + bool preserveClonedFilesOnFailure, + bool backupOriginalFiles ); + + private: + static void _listDatabases( const std::string& directory, + std::vector<std::string>* out ); }; } diff --git a/src/mongo/db/repair_database.cpp b/src/mongo/db/storage/mmap_v1/repair_database.cpp index 2eca6f969e7..9bfc6ad2d7f 100644 --- a/src/mongo/db/repair_database.cpp +++ b/src/mongo/db/storage/mmap_v1/repair_database.cpp @@ -28,7 +28,7 @@ * it in the license file. */ -#include "mongo/db/repair_database.h" +#include "mongo/db/storage/mmap_v1/mmap_v1_engine.h" #include <boost/filesystem/operations.hpp> @@ -39,7 +39,7 @@ #include "mongo/db/catalog/index_create.h" #include "mongo/db/client.h" #include "mongo/db/index/index_descriptor.h" -#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.h" #include "mongo/util/file.h" #include "mongo/util/file_allocator.h" #include "mongo/util/mmap.h" @@ -248,10 +248,6 @@ namespace mongo { try { _txn->recoveryUnit()->syncDataAndTruncateJournal(); globalStorageEngine->flushAllFiles(true); // need both in case journaling is disabled - { - Client::Context tempContext( _dbName, _pathString ); - Database::closeDatabase(_txn, _dbName, _pathString); - } MONGO_ASSERT_ON_EXCEPTION( boost::filesystem::remove_all( _path ) ); } catch ( DBException& e ) { @@ -273,16 +269,16 @@ namespace mongo { bool _success; }; - Status repairDatabase( OperationContext* txn, - string dbName, - bool preserveClonedFilesOnFailure, - bool backupOriginalFiles ) { + Status MMAPV1Engine::repairDatabase( OperationContext* txn, + const std::string& dbName, + bool preserveClonedFilesOnFailure, + bool backupOriginalFiles ) { // We must hold some form of lock here invariant(txn->lockState()->threadState()); + invariant( dbName.find( '.' ) == string::npos ); scoped_ptr<RepairFileDeleter> repairFileDeleter; doingRepair dr; - dbName = nsToDatabase( dbName ); log() << "repairDatabase " << dbName << endl; @@ -315,17 +311,26 @@ namespace mongo { reservedPath ) ); { - Database* originalDatabase = - dbHolder().get(txn, dbName, storageGlobalParams.dbpath); + Database* originalDatabase = + dbHolder().get(txn, dbName); if (originalDatabase == NULL) { return Status(ErrorCodes::NamespaceNotFound, "database does not exist to repair"); } - Database* tempDatabase = NULL; + scoped_ptr<Database> tempDatabase; { + MMAPV1DatabaseCatalogEntry* entry = + new MMAPV1DatabaseCatalogEntry( txn, + dbName, + reservedPathString, + storageGlobalParams.directoryperdb, + true ); bool justCreated = false; - tempDatabase = - dbHolder().getOrCreate(txn, dbName, reservedPathString, justCreated); + tempDatabase.reset( new Database( txn, + dbName, + justCreated, + entry ) ); + invariant( justCreated ); } @@ -433,8 +438,6 @@ namespace mongo { txn->checkForInterrupt(false); - Client::Context tempContext( dbName, reservedPathString ); - Database::closeDatabase(txn, dbName, reservedPathString); } // at this point if we abort, we don't want to delete new files @@ -444,7 +447,7 @@ namespace mongo { repairFileDeleter->success(); Client::Context ctx( dbName ); - Database::closeDatabase(txn, dbName, storageGlobalParams.dbpath); + Database::closeDatabase(txn, dbName); if ( backupOriginalFiles ) { _renameForBackup( dbName, reservedPath ); diff --git a/src/mongo/db/storage/storage_engine.h b/src/mongo/db/storage/storage_engine.h index fe53564499c..193ca79e038 100644 --- a/src/mongo/db/storage/storage_engine.h +++ b/src/mongo/db/storage/storage_engine.h @@ -30,19 +30,30 @@ #pragma once +#include <string> +#include <vector> + #include "mongo/base/status.h" namespace mongo { + class OperationContext; + class StorageEngine { public: virtual ~StorageEngine() {} + virtual void listDatabases( std::vector<std::string>* out ) const = 0; + /** * @return number of files flushed */ virtual int flushAllFiles( bool sync ) = 0; + virtual Status repairDatabase( OperationContext* tnx, + const std::string& dbName, + bool preserveClonedFilesOnFailure = false, + bool backupOriginalFiles = false ) = 0; }; // TODO: this is temporary diff --git a/src/mongo/dbtests/basictests.cpp b/src/mongo/dbtests/basictests.cpp index 6d8d172b18c..8149254024b 100644 --- a/src/mongo/dbtests/basictests.cpp +++ b/src/mongo/dbtests/basictests.cpp @@ -386,11 +386,12 @@ namespace BasicTests { // if that changes, should put this on the stack { MMAPV1DatabaseCatalogEntry* temp = new MMAPV1DatabaseCatalogEntry(&txn, - "dbtests_basictests_ownsns", - storageGlobalParams.dbpath, - storageGlobalParams.directoryperdb); + "dbtests_basictests_ownsns", + storageGlobalParams.dbpath, + storageGlobalParams.directoryperdb, + true); Database * db = new Database( &txn, "dbtests_basictests_ownsns", isNew, temp ); - verify( isNew ); + ASSERT( isNew ); ASSERT( db->ownsNS( "dbtests_basictests_ownsns.x" ) ); ASSERT( db->ownsNS( "dbtests_basictests_ownsns.x.y" ) ); diff --git a/src/mongo/dbtests/dbhelper_tests.cpp b/src/mongo/dbtests/dbhelper_tests.cpp index 90717127cae..dac9e27ca73 100644 --- a/src/mongo/dbtests/dbhelper_tests.cpp +++ b/src/mongo/dbtests/dbhelper_tests.cpp @@ -150,8 +150,7 @@ namespace mongo { ASSERT_NOT_EQUALS( estSizeBytes, 0 ); ASSERT_LESS_THAN( estSizeBytes, maxSizeBytes ); - Database* db = dbHolder().get( - &txn, nsToDatabase(range.ns), storageGlobalParams.dbpath); + Database* db = dbHolder().get( &txn, nsToDatabase(range.ns) ); const Collection* collection = db->getCollection(&txn, ns); // Make sure all the disklocs actually correspond to the right info diff --git a/src/mongo/tools/dump.cpp b/src/mongo/tools/dump.cpp index 0e774545897..c4aab6b1bf4 100644 --- a/src/mongo/tools/dump.cpp +++ b/src/mongo/tools/dump.cpp @@ -374,7 +374,7 @@ public: OperationContextImpl txn; Client::WriteContext cx(&txn, dbname); - Database* db = dbHolder().get(&txn, dbname, storageGlobalParams.dbpath); + Database* db = dbHolder().get(&txn, dbname); list<string> namespaces; db->getDatabaseCatalogEntry()->getCollectionNamespaces( &namespaces ); |