summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2014-09-26 14:02:49 -0400
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2014-10-06 17:30:12 -0400
commit101e026f45dea5e9e68520238495c89a476e6172 (patch)
treebbdd3710ffc5721527ad9f5682ef0dbb4876dfee
parent10c86dc6cad9853514148e0ab59894a0d29353b9 (diff)
downloadmongo-101e026f45dea5e9e68520238495c89a476e6172.tar.gz
SERVER-14668/SERVER-15294 Collection-level locking for all read paths
-rw-r--r--jstests/core/dbcase2.js12
-rw-r--r--src/mongo/db/auth/auth_index_d.cpp21
-rw-r--r--src/mongo/db/auth/auth_index_d.h7
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_d.cpp4
-rw-r--r--src/mongo/db/catalog/collection_cursor_cache.cpp38
-rw-r--r--src/mongo/db/catalog/collection_cursor_cache.h2
-rw-r--r--src/mongo/db/catalog/database.cpp13
-rw-r--r--src/mongo/db/catalog/database_holder.cpp89
-rw-r--r--src/mongo/db/catalog/database_holder.h59
-rw-r--r--src/mongo/db/client.cpp130
-rw-r--r--src/mongo/db/client.h83
-rw-r--r--src/mongo/db/cloner.cpp12
-rw-r--r--src/mongo/db/cloner.h9
-rw-r--r--src/mongo/db/commands/count.cpp18
-rw-r--r--src/mongo/db/commands/dbhash.cpp14
-rw-r--r--src/mongo/db/commands/distinct.cpp7
-rw-r--r--src/mongo/db/commands/drop_indexes.cpp1
-rw-r--r--src/mongo/db/commands/find_cmd.cpp12
-rw-r--r--src/mongo/db/commands/geo_near_cmd.cpp20
-rw-r--r--src/mongo/db/commands/group.cpp10
-rw-r--r--src/mongo/db/commands/index_filter_commands.cpp20
-rw-r--r--src/mongo/db/commands/list_collections.cpp4
-rw-r--r--src/mongo/db/commands/list_databases.cpp22
-rw-r--r--src/mongo/db/commands/list_indexes.cpp8
-rw-r--r--src/mongo/db/commands/mr.cpp47
-rw-r--r--src/mongo/db/commands/parallel_collection_scan.cpp6
-rw-r--r--src/mongo/db/commands/pipeline_command.cpp6
-rw-r--r--src/mongo/db/commands/plan_cache_commands.cpp18
-rw-r--r--src/mongo/db/commands/rename_collection.cpp3
-rw-r--r--src/mongo/db/commands/touch.cpp11
-rw-r--r--src/mongo/db/commands/validate.cpp12
-rw-r--r--src/mongo/db/commands/write_commands/batch_executor.cpp4
-rw-r--r--src/mongo/db/commands/write_commands/write_commands.cpp1
-rw-r--r--src/mongo/db/curop.cpp10
-rw-r--r--src/mongo/db/curop.h4
-rw-r--r--src/mongo/db/db.cpp28
-rw-r--r--src/mongo/db/dbcommands.cpp80
-rw-r--r--src/mongo/db/dbhelpers.cpp32
-rw-r--r--src/mongo/db/dbhelpers.h3
-rw-r--r--src/mongo/db/fts/fts_command_mongod.cpp7
-rw-r--r--src/mongo/db/geo/haystack.cpp10
-rw-r--r--src/mongo/db/index_builder.cpp2
-rw-r--r--src/mongo/db/index_builder.h9
-rw-r--r--src/mongo/db/index_rebuilder.cpp12
-rw-r--r--src/mongo/db/index_rebuilder.h4
-rw-r--r--src/mongo/db/instance.h1
-rw-r--r--src/mongo/db/introspect.cpp3
-rw-r--r--src/mongo/db/ops/update.h1
-rw-r--r--src/mongo/db/pipeline/document_source_cursor.cpp9
-rw-r--r--src/mongo/db/pipeline/pipeline_d.cpp8
-rw-r--r--src/mongo/db/pipeline/pipeline_d.h9
-rw-r--r--src/mongo/db/prefetch.cpp21
-rw-r--r--src/mongo/db/query/new_find.cpp27
-rw-r--r--src/mongo/db/range_deleter_db_env.cpp7
-rw-r--r--src/mongo/db/repl/bgsync.cpp1
-rw-r--r--src/mongo/db/repl/heartbeat.cpp9
-rw-r--r--src/mongo/db/repl/master_slave.cpp15
-rw-r--r--src/mongo/db/repl/minvalid.cpp2
-rw-r--r--src/mongo/db/repl/repl_coordinator_external_state_impl.cpp2
-rw-r--r--src/mongo/db/repl/repl_coordinator_legacy.cpp2
-rw-r--r--src/mongo/db/repl/repl_info.cpp13
-rw-r--r--src/mongo/db/repl/repl_set_impl.cpp6
-rw-r--r--src/mongo/db/repl/repl_set_impl.h3
-rw-r--r--src/mongo/db/repl/rs_rollback.cpp3
-rw-r--r--src/mongo/db/repl/sync_tail.cpp6
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.cpp4
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace_index.cpp15
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp65
-rw-r--r--src/mongo/db/storage/mmap_v1/repair_database.cpp14
-rw-r--r--src/mongo/db/ttl.cpp9
-rw-r--r--src/mongo/dbtests/dbhelper_tests.cpp3
-rw-r--r--src/mongo/dbtests/matchertests.cpp2
-rw-r--r--src/mongo/dbtests/namespacetests.cpp4
-rw-r--r--src/mongo/dbtests/plan_ranking.cpp4
-rw-r--r--src/mongo/dbtests/query_multi_plan_runner.cpp24
-rw-r--r--src/mongo/dbtests/query_plan_executor.cpp49
-rw-r--r--src/mongo/dbtests/query_stage_collscan.cpp12
-rw-r--r--src/mongo/dbtests/query_stage_distinct.cpp16
-rw-r--r--src/mongo/dbtests/query_stage_tests.cpp8
-rw-r--r--src/mongo/dbtests/query_stage_update.cpp8
-rw-r--r--src/mongo/dbtests/querytests.cpp21
-rw-r--r--src/mongo/s/d_migrate.cpp20
-rw-r--r--src/mongo/s/d_split.cpp12
-rw-r--r--src/mongo/s/d_state.cpp2
-rw-r--r--src/mongo/tools/dump.cpp14
-rw-r--r--src/mongo/tools/shim.cpp16
-rw-r--r--src/mongo/tools/tool.cpp1
87 files changed, 738 insertions, 657 deletions
diff --git a/jstests/core/dbcase2.js b/jstests/core/dbcase2.js
index f9973d98837..937527e752e 100644
--- a/jstests/core/dbcase2.js
+++ b/jstests/core/dbcase2.js
@@ -1,9 +1,13 @@
// SERVER-2111 Check that an in memory db name will block creation of a db with a similar but differently cased name.
-a = db.getSisterDB( "dbcase2test_dbnamea" )
-b = db.getSisterDB( "dbcase2test_dbnameA" )
+var dbLowerCase = db.getSisterDB( "dbcase2test_dbnamea" )
+var dbUpperCase = db.getSisterDB( "dbcase2test_dbnameA" )
-a.c.count();
-assert.throws( function() { b.c.count() } );
+var resultLower = dbLowerCase.c.insert({});
+assert.eq(1, resultLower.nInserted);
+
+var resultUpper = dbUpperCase.c.insert({});
+assert.eq(0, resultUpper.nInserted);
+assert.eq(13297, resultUpper.getWriteError().code);
assert.eq( -1, db.getMongo().getDBNames().indexOf( "dbcase2test_dbnameA" ) );
diff --git a/src/mongo/db/auth/auth_index_d.cpp b/src/mongo/db/auth/auth_index_d.cpp
index 65c02466fff..20f2bd2eb5d 100644
--- a/src/mongo/db/auth/auth_index_d.cpp
+++ b/src/mongo/db/auth/auth_index_d.cpp
@@ -72,7 +72,7 @@ namespace {
} // namespace
- void configureSystemIndexes(OperationContext* txn, const StringData& dbname) {
+ void configureSystemIndexes(OperationContext* txn) {
int authzVersion;
Status status = getGlobalAuthorizationManager()->getAuthorizationVersion(
txn, &authzVersion);
@@ -80,22 +80,29 @@ namespace {
return;
}
- if (dbname == "admin" && authzVersion >= AuthorizationManager::schemaVersion26Final) {
- NamespaceString systemUsers(dbname, "system.users");
+ if (authzVersion >= AuthorizationManager::schemaVersion26Final) {
+ const NamespaceString systemUsers("admin", "system.users");
// Make sure the old unique index from v2.4 on system.users doesn't exist.
- Client::WriteContext wctx(txn, systemUsers);
- Collection* collection = wctx.ctx().db()->getCollection(txn,
- NamespaceString(systemUsers));
+ AutoGetDb autoDb(txn, systemUsers.db(), newlm::MODE_X);
+ if (!autoDb.getDb()) {
+ return;
+ }
+
+ Collection* collection = autoDb.getDb()->getCollection(txn,
+ NamespaceString(systemUsers));
if (!collection) {
return;
}
+
IndexCatalog* indexCatalog = collection->getIndexCatalog();
IndexDescriptor* oldIndex = NULL;
+
+ WriteUnitOfWork wunit(txn);
while ((oldIndex = indexCatalog->findIndexByKeyPattern(txn, v1SystemUsersKeyPattern))) {
indexCatalog->dropIndex(txn, oldIndex);
}
- wctx.commit();
+ wunit.commit();
}
}
diff --git a/src/mongo/db/auth/auth_index_d.h b/src/mongo/db/auth/auth_index_d.h
index 964eec9fbb0..71568b7b377 100644
--- a/src/mongo/db/auth/auth_index_d.h
+++ b/src/mongo/db/auth/auth_index_d.h
@@ -45,12 +45,9 @@ namespace authindex {
/**
* Ensures that exactly the appropriate indexes to support authentication and authorization
- * are present for the given database.
- *
- * It is appropriate to call this function on new or existing databases, though it is
- * primarily intended for use on existing databases.
+ * are present in the admin database
*/
- void configureSystemIndexes(OperationContext* txn, const StringData& dbname);
+ void configureSystemIndexes(OperationContext* txn);
} // namespace authindex
} // namespace mongo
diff --git a/src/mongo/db/auth/authz_manager_external_state_d.cpp b/src/mongo/db/auth/authz_manager_external_state_d.cpp
index 124d4b396b5..525c5ec6757 100644
--- a/src/mongo/db/auth/authz_manager_external_state_d.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_d.cpp
@@ -83,11 +83,11 @@ namespace mongo {
const BSONObj& query,
BSONObj* result) {
- Client::ReadContext ctx(txn, collectionName.ns());
+ AutoGetCollectionForRead ctx(txn, collectionName);
BSONObj found;
if (Helpers::findOne(txn,
- ctx.ctx().db()->getCollection(txn, collectionName),
+ ctx.getCollection(),
query,
found)) {
*result = found.getOwned();
diff --git a/src/mongo/db/catalog/collection_cursor_cache.cpp b/src/mongo/db/catalog/collection_cursor_cache.cpp
index f6c2a7793b7..057f6ddcd17 100644
--- a/src/mongo/db/catalog/collection_cursor_cache.cpp
+++ b/src/mongo/db/catalog/collection_cursor_cache.cpp
@@ -37,7 +37,7 @@
#include "mongo/db/catalog/database.h"
#include "mongo/db/catalog/database_holder.h"
#include "mongo/db/client.h"
-#include "mongo/db/operation_context_impl.h"
+#include "mongo/db/operation_context.h"
#include "mongo/db/query/plan_executor.h"
#include "mongo/platform/random.h"
#include "mongo/util/startup_test.h"
@@ -173,14 +173,14 @@ namespace mongo {
ns = it->second;
}
- NamespaceString nss( ns );
+ const NamespaceString nss( ns );
if ( checkAuth ) {
AuthorizationSession* as = txn->getClient()->getAuthorizationSession();
bool isAuthorized = as->isAuthorizedForActionsOnNamespace(
nss, ActionType::killCursors);
if ( !isAuthorized ) {
- audit::logKillCursorsAuthzCheck( currentClient.get(),
+ audit::logKillCursorsAuthzCheck( txn->getClient(),
nss,
id,
ErrorCodes::Unauthorized );
@@ -188,22 +188,22 @@ namespace mongo {
}
}
- Lock::DBRead lock(txn->lockState(), ns);
- Database* db = dbHolder().get(txn, ns);
- if ( !db )
+ AutoGetCollectionForRead ctx(txn, nss);
+ if (!ctx.getDb()) {
return false;
- Client::Context context(txn, ns, db );
- Collection* collection = db->getCollection( txn, ns );
+ }
+
+ Collection* collection = ctx.getCollection();
if ( !collection ) {
if ( checkAuth )
- audit::logKillCursorsAuthzCheck( currentClient.get(),
+ audit::logKillCursorsAuthzCheck( txn->getClient(),
nss,
id,
ErrorCodes::CursorNotFound );
return false;
}
- return collection->cursorCache()->eraseCursor( id, checkAuth );
+ return collection->cursorCache()->eraseCursor(txn, id, checkAuth);
}
std::size_t GlobalCursorIdCache::timeoutCursors(OperationContext* txn, int millisSinceLastCall) {
@@ -217,13 +217,12 @@ namespace mongo {
size_t totalTimedOut = 0;
for ( unsigned i = 0; i < todo.size(); i++ ) {
- const string& ns = todo[i];
- Lock::DBRead lock(txn->lockState(), ns);
- Database* db = dbHolder().get(txn, ns);
- if ( !db )
+ AutoGetCollectionForRead ctx(txn, todo[i]);
+ if (!ctx.getDb()) {
continue;
- Client::Context context(txn, ns, db );
- Collection* collection = db->getCollection( txn, ns );
+ }
+
+ Collection* collection = ctx.getCollection();
if ( collection == NULL ) {
continue;
}
@@ -464,14 +463,13 @@ namespace mongo {
_deregisterCursor_inlock( cc );
}
- bool CollectionCursorCache::eraseCursor( CursorId id, bool checkAuth ) {
-
+ bool CollectionCursorCache::eraseCursor(OperationContext* txn, CursorId id, bool checkAuth) {
SimpleMutex::scoped_lock lk( _mutex );
CursorMap::iterator it = _cursors.find( id );
if ( it == _cursors.end() ) {
if ( checkAuth )
- audit::logKillCursorsAuthzCheck( currentClient.get(),
+ audit::logKillCursorsAuthzCheck( txn->getClient(),
_nss,
id,
ErrorCodes::CursorNotFound );
@@ -481,7 +479,7 @@ namespace mongo {
ClientCursor* cursor = it->second;
if ( checkAuth )
- audit::logKillCursorsAuthzCheck( currentClient.get(),
+ audit::logKillCursorsAuthzCheck( txn->getClient(),
_nss,
id,
ErrorCodes::OK );
diff --git a/src/mongo/db/catalog/collection_cursor_cache.h b/src/mongo/db/catalog/collection_cursor_cache.h
index 77c0df16557..2aff607168b 100644
--- a/src/mongo/db/catalog/collection_cursor_cache.h
+++ b/src/mongo/db/catalog/collection_cursor_cache.h
@@ -94,7 +94,7 @@ namespace mongo {
CursorId registerCursor( ClientCursor* cc );
void deregisterCursor( ClientCursor* cc );
- bool eraseCursor( CursorId id, bool checkAuth );
+ bool eraseCursor(OperationContext* txn, CursorId id, bool checkAuth );
void getCursorIds( std::set<CursorId>* openCursors );
std::size_t numCursors();
diff --git a/src/mongo/db/catalog/database.cpp b/src/mongo/db/catalog/database.cpp
index ed12d41d38f..850ce3e3863 100644
--- a/src/mongo/db/catalog/database.cpp
+++ b/src/mongo/db/catalog/database.cpp
@@ -269,7 +269,6 @@ namespace mongo {
indexSize += collection->getIndexSize(opCtx);
}
- output->append ( "db" , _name );
output->appendNumber( "collections" , ncollections );
output->appendNumber( "objects" , objects );
output->append ( "avgObjSize" , objects == 0 ? 0 : double(size) / double(objects) );
@@ -507,11 +506,13 @@ namespace mongo {
if( n.size() == 0 ) return;
log() << "dropAllDatabasesExceptLocal " << n.size() << endl;
- for( vector<string>::iterator i = n.begin(); i != n.end(); i++ ) {
- if( *i != "local" ) {
+ for (vector<string>::iterator i = n.begin(); i != n.end(); i++) {
+ if (*i != "local") {
+ Database* db = dbHolder().get(txn, *i);
+ invariant(db);
+
WriteUnitOfWork wunit(txn);
- Client::Context ctx(txn, *i);
- dropDatabase(txn, ctx.db());
+ dropDatabase(txn, db);
wunit.commit();
}
}
@@ -539,7 +540,7 @@ namespace mongo {
txn->recoveryUnit()->syncDataAndTruncateJournal();
dbHolder().close( txn, name );
- db = 0; // d is now deleted
+ db = NULL; // d is now deleted
getGlobalEnvironment()->getGlobalStorageEngine()->dropDatabase( txn, name );
}
diff --git a/src/mongo/db/catalog/database_holder.cpp b/src/mongo/db/catalog/database_holder.cpp
index 7be6afb0744..16aa4b9e668 100644
--- a/src/mongo/db/catalog/database_holder.cpp
+++ b/src/mongo/db/catalog/database_holder.cpp
@@ -47,6 +47,23 @@ namespace mongo {
static DatabaseHolder _dbHolder;
+namespace {
+ static StringData _todb(const StringData& ns) {
+ size_t i = ns.find('.');
+ if (i == std::string::npos) {
+ uassert(13074, "db name can't be empty", ns.size());
+ return ns;
+ }
+
+ uassert(13075, "db name can't be empty", i > 0);
+
+ const StringData d = ns.substr(0, i);
+ uassert(13280, "invalid db name: " + ns.toString(), NamespaceString::validDBName(d));
+
+ return d;
+ }
+}
+
DatabaseHolder& dbHolder() {
return _dbHolder;
}
@@ -54,88 +71,88 @@ namespace mongo {
Database* DatabaseHolder::get(OperationContext* txn,
const StringData& ns) const {
- const StringData db = _todb( ns );
- txn->lockState()->assertAtLeastReadLocked(db);
+ const StringData db = _todb(ns);
+ invariant(txn->lockState()->isAtLeastReadLocked(db));
SimpleMutex::scoped_lock lk(_m);
DBs::const_iterator it = _dbs.find(db);
- if ( it != _dbs.end() )
+ if (it != _dbs.end()) {
return it->second;
+ }
+
return NULL;
}
- Database* DatabaseHolder::getOrCreate(OperationContext* txn,
- const StringData& ns,
- bool& justCreated) {
+ Database* DatabaseHolder::openDb(OperationContext* txn,
+ const StringData& ns,
+ bool* justCreated) {
- const StringData dbname = _todb( ns );
- invariant(txn->lockState()->isAtLeastReadLocked(dbname));
+ const StringData dbname = _todb(ns);
+ invariant(txn->lockState()->isWriteLocked(dbname));
Database* db = get(txn, ns);
if (db) {
- justCreated = false;
- return db;
- }
+ if (justCreated) {
+ *justCreated = false;
+ }
- // todo: protect against getting sprayed with requests for different db names that DNE -
- // that would make the DBs map very large. not clear what to do to handle though,
- // perhaps just log it, which is what we do here with the "> 40" :
- bool cant = !txn->lockState()->isWriteLocked(ns);
- if( logger::globalLogDomain()->shouldLog(logger::LogSeverity::Debug(1)) ||
- _dbs.size() > 40 || cant || DEBUG_BUILD ) {
- log() << "opening db: " << dbname;
+ return db;
}
- massert(15927, "can't open database in a read lock. if db was just closed, consider retrying the query. might otherwise indicate an internal error", !cant);
-
+ // Check casing
const string duplicate = Database::duplicateUncasedName(dbname.toString());
- if ( !duplicate.empty() ) {
+ if (!duplicate.empty()) {
stringstream ss;
ss << "db already exists with different case already have: ["
<< duplicate
<< "] trying to create ["
<< dbname.toString()
<< "]";
- uasserted( DatabaseDifferCaseCode , ss.str() );
+ uasserted(DatabaseDifferCaseCode, ss.str());
}
+
StorageEngine* storageEngine = getGlobalEnvironment()->getGlobalStorageEngine();
invariant(storageEngine);
DatabaseCatalogEntry* entry = storageEngine->getDatabaseCatalogEntry(txn, dbname);
invariant(entry);
- justCreated = !entry->exists();
+ if (justCreated) {
+ *justCreated = !entry->exists();
+ }
- db = new Database(dbname, entry);
+ // Only one thread can be inside this method for the same DB name, because of the
+ // requirement for X-lock on the database. So there is no way we can insert two different
+ // databases for the same name.
+ SimpleMutex::scoped_lock lk(_m);
- {
- SimpleMutex::scoped_lock lk(_m);
- _dbs[dbname] = db;
- }
+ db = new Database(dbname, entry);
+ _dbs[dbname] = db;
return db;
}
void DatabaseHolder::close(OperationContext* txn,
const StringData& ns) {
+ // TODO: This should be fine if only a DB X-lock
invariant(txn->lockState()->isW());
- StringData db = _todb(ns);
+ const StringData dbName = _todb(ns);
SimpleMutex::scoped_lock lk(_m);
- DBs::const_iterator it = _dbs.find(db);
- if ( it == _dbs.end() )
+
+ DBs::const_iterator it = _dbs.find(dbName);
+ if (it == _dbs.end()) {
return;
+ }
it->second->close( txn );
delete it->second;
- _dbs.erase( db );
+ _dbs.erase(it);
- getGlobalEnvironment()->getGlobalStorageEngine()->closeDatabase( txn, db.toString() );
+ getGlobalEnvironment()->getGlobalStorageEngine()->closeDatabase(txn, dbName.toString());
}
- bool DatabaseHolder::closeAll(OperationContext* txn,
- BSONObjBuilder& result,
- bool force) {
+ bool DatabaseHolder::closeAll(OperationContext* txn, BSONObjBuilder& result, bool force) {
invariant(txn->lockState()->isW());
SimpleMutex::scoped_lock lk(_m);
diff --git a/src/mongo/db/catalog/database_holder.h b/src/mongo/db/catalog/database_holder.h
index c4ca5a47280..dd3afdf2470 100644
--- a/src/mongo/db/catalog/database_holder.h
+++ b/src/mongo/db/catalog/database_holder.h
@@ -39,30 +39,41 @@ namespace mongo {
* Registry of opened databases.
*/
class DatabaseHolder {
- typedef StringMap<Database*> DBs;
- // todo: we want something faster than this if called a lot:
- mutable SimpleMutex _m;
- DBs _dbs;
public:
DatabaseHolder() : _m("dbholder") { }
- Database* get(OperationContext* txn,
- const StringData& ns) const;
-
- Database* getOrCreate(OperationContext* txn,
- const StringData& ns,
- bool& justCreated);
+ /**
+ * Retrieves an already opened database or returns NULL. Must be called with the database
+ * locked in at least IS-mode.
+ */
+ Database* get(OperationContext* txn, const StringData& ns) const;
+ /**
+ * Retrieves a database reference if it is already opened, or opens it if it hasn't been
+ * opened/created yet. Must be called with the database locked in X-mode.
+ *
+ * @param justCreated Returns whether the database was newly created (true) or it already
+ * existed (false). Can be NULL if this information is not necessary.
+ */
+ Database* openDb(OperationContext* txn, const StringData& ns, bool* justCreated = NULL);
+ /**
+ * Closes the specified database. Must be called with the database locked in X-mode.
+ */
void close(OperationContext* txn, const StringData& ns);
- /** @param force - force close even if something underway - use at shutdown */
- bool closeAll(OperationContext* txn,
- BSONObjBuilder& result,
- bool force);
+ /**
+ * Closes all opened databases. Must be called with the global lock acquired in X-mode.
+ *
+ * @param result Populated with the names of the databases, which were closed.
+ * @param force Force close even if something underway - use at shutdown
+ */
+ bool closeAll(OperationContext* txn, BSONObjBuilder& result, bool force);
/**
- * need some lock
+ * Retrieves the names of all currently opened databases. Does not require locking, but it
+ * is not guaranteed that the returned set of names will be still valid unless a global
+ * lock is held, which would prevent database from disappearing or being created.
*/
void getAllShortNames( std::set<std::string>& all ) const {
SimpleMutex::scoped_lock lk(_m);
@@ -72,20 +83,10 @@ namespace mongo {
}
private:
- static StringData _todb( const StringData& ns ) {
- StringData d = __todb( ns );
- uassert(13280, "invalid db name: " + ns.toString(), NamespaceString::validDBName(d));
- return d;
- }
- static StringData __todb( const StringData& ns ) {
- size_t i = ns.find( '.' );
- if ( i == std::string::npos ) {
- uassert( 13074 , "db name can't be empty" , ns.size() );
- return ns;
- }
- uassert( 13075 , "db name can't be empty" , i > 0 );
- return ns.substr( 0 , i );
- }
+ typedef StringMap<Database*> DBs;
+
+ mutable SimpleMutex _m;
+ DBs _dbs;
};
DatabaseHolder& dbHolder();
diff --git a/src/mongo/db/client.cpp b/src/mongo/db/client.cpp
index 17901be98de..d2f58534349 100644
--- a/src/mongo/db/client.cpp
+++ b/src/mongo/db/client.cpp
@@ -186,56 +186,77 @@ namespace mongo {
_finishInit();
}
-
- /** "read lock, and set my context, all in one operation"
- * This handles (if not recursively locked) opening an unopened database.
- */
- Client::ReadContext::ReadContext(
- OperationContext* txn, const string& ns, bool doVersion) {
- {
- _lk.reset(new Lock::DBRead(txn->lockState(), ns));
- Database *db = dbHolder().get(txn, ns);
- if( db ) {
- _c.reset(new Context(txn, ns, db, doVersion));
- return;
- }
- }
- // we usually don't get here, so doesn't matter how fast this part is
- {
- DEV log(LogComponent::kStorage)
- << "_DEBUG ReadContext db wasn't open, will try to open " << ns << endl;
- if (txn->lockState()->isW()) {
- // write locked already
- WriteUnitOfWork wunit(txn);
- DEV RARELY log(LogComponent::kStorage)
- << "write locked on ReadContext construction " << ns << endl;
- _c.reset(new Context(txn, ns, doVersion));
- wunit.commit();
- }
- else if (!txn->lockState()->isRecursive()) {
- _lk.reset(0);
- {
- Lock::GlobalWrite w(txn->lockState());
- WriteUnitOfWork wunit(txn);
- Context c(txn, ns, doVersion);
- wunit.commit();
- }
- // db could be closed at this interim point -- that is ok, we will throw, and don't mind throwing.
- _lk.reset(new Lock::DBRead(txn->lockState(), ns));
- _c.reset(new Context(txn, ns, doVersion));
- }
- else {
- uasserted(15928, str::stream() << "can't open a database from a nested read lock " << ns);
- }
+ AutoGetDb::AutoGetDb(OperationContext* txn, const StringData& ns, newlm::LockMode mode)
+ : _dbLock(txn->lockState(), ns, mode),
+ _db(dbHolder().get(txn, ns)) {
+
+ }
+
+
+ AutoGetCollectionForRead::AutoGetCollectionForRead(OperationContext* txn,
+ const std::string& ns)
+ : _txn(txn),
+ _nss(ns),
+ _dbLock(_txn->lockState(), _nss.db(), newlm::MODE_IS),
+ _db(NULL),
+ _coll(NULL) {
+
+ _init();
+ }
+
+ AutoGetCollectionForRead::AutoGetCollectionForRead(OperationContext* txn,
+ const NamespaceString& nss)
+ : _txn(txn),
+ _nss(nss),
+ _dbLock(_txn->lockState(), _nss.db(), newlm::MODE_IS),
+ _db(NULL),
+ _coll(NULL) {
+
+ _init();
+ }
+
+ void AutoGetCollectionForRead::_init() {
+ invariant(!_nss.coll().empty());
+
+ // TODO: Client::Context legacy, needs to be removed
+ _txn->getCurOp()->ensureStarted();
+ _txn->getCurOp()->setNS(_nss.toString());
+
+ // Lock both the DB and the collection (DB is locked in the constructor), because this is
+ // necessary in order to to shard version checking.
+ const newlm::ResourceId resId(newlm::RESOURCE_COLLECTION, _nss);
+ const newlm::LockMode collLockMode = supportsDocLocking() ? newlm::MODE_IS : newlm::MODE_S;
+
+ invariant(newlm::LOCK_OK == _txn->lockState()->lock(resId, collLockMode));
+
+ // Shard version check needs to be performed under the collection lock
+ ensureShardVersionOKOrThrow(_nss);
+
+ // At this point, we are locked in shared mode for the database by the DB lock in the
+ // constructor, so it is safe to load the DB pointer.
+ _db = dbHolder().get(_txn, _nss.db());
+ if (_db != NULL) {
+ // TODO: Client::Context legacy, needs to be removed
+ _txn->getCurOp()->enter(_nss.toString().c_str(), _db->getProfilingLevel());
+
+ _coll = _db->getCollection(_txn, _nss);
+ }
+ }
+
+ AutoGetCollectionForRead::~AutoGetCollectionForRead() {
+ // If the database is NULL, we would never have tried to lock the collection resource
+ if (_db) {
+ const newlm::ResourceId resId(newlm::RESOURCE_COLLECTION, _nss);
+ _txn->lockState()->unlock(resId);
}
- // todo: are receipts of thousands of queries for a nonexisting database a potential
- // cause of bad performance due to the write lock acquisition above? let's fix that.
- // it would be easy to first check that there is at least a .ns file, or something similar.
+ // Report time spent in read lock
+ _txn->getCurOp()->recordGlobalTime(false, _timer.micros());
}
+
Client::WriteContext::WriteContext(
OperationContext* opCtx, const std::string& ns, bool doVersion)
: _lk(opCtx->lockState(), ns),
@@ -258,31 +279,14 @@ namespace mongo {
}
}
}
-
- // invoked from ReadContext
- Client::Context::Context(OperationContext* txn,
- const string& ns,
- Database *db,
- bool doVersion)
- : _client( currentClient.get() ),
- _justCreated(false),
- _doVersion( doVersion ),
- _ns( ns ),
- _db(db),
- _txn(txn) {
-
- verify(_db);
- if (_doVersion) checkNotStale();
- _client->_curOp->enter( this );
- }
void Client::Context::_finishInit() {
- _db = dbHolder().getOrCreate(_txn, _ns, _justCreated);
+ _db = dbHolder().openDb(_txn, _ns, &_justCreated);
invariant(_db);
if( _doVersion ) checkNotStale();
- _client->_curOp->enter( this );
+ _client->_curOp->enter(_ns.c_str(), _db->getProfilingLevel());
}
Client::Context::~Context() {
diff --git a/src/mongo/db/client.h b/src/mongo/db/client.h
index ef78f899154..a3eb0cee79e 100644
--- a/src/mongo/db/client.h
+++ b/src/mongo/db/client.h
@@ -39,6 +39,7 @@
#include "mongo/db/client_basic.h"
#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/lasterror.h"
+#include "mongo/db/namespace_string.h"
#include "mongo/db/operation_context.h"
#include "mongo/util/concurrency/threadlocal.h"
#include "mongo/util/paths.h"
@@ -50,6 +51,7 @@ namespace mongo {
class Database;
class CurOp;
class Client;
+ class Collection;
class AbstractMessagingPort;
@@ -128,22 +130,6 @@ namespace mongo {
public:
- class Context;
-
- /** "read lock, and set my context, all in one operation"
- * This handles (if not recursively locked) opening an unopened database.
- */
- class ReadContext : boost::noncopyable {
- public:
- ReadContext(OperationContext* txn,
- const std::string& ns,
- bool doVersion = true);
- Context& ctx() { return *_c.get(); }
- private:
- scoped_ptr<Lock::DBRead> _lk;
- scoped_ptr<Context> _c;
- };
-
/* Set database we want to use, then, restores when we finish (are out of scope)
Note this is also helpful if an exception happens as the state if fixed up.
*/
@@ -159,9 +145,6 @@ namespace mongo {
*/
Context(OperationContext* txn, const std::string& ns, Database * db);
- // used by ReadContext
- Context(OperationContext* txn, const std::string& ns, Database *db, bool doVersion);
-
~Context();
Client* getClient() const { return _client; }
Database* db() const { return _db; }
@@ -194,6 +177,7 @@ namespace mongo {
Timer _timer;
}; // class Client::Context
+
class WriteContext : boost::noncopyable {
public:
WriteContext(OperationContext* opCtx, const std::string& ns, bool doVersion = true);
@@ -209,10 +193,69 @@ namespace mongo {
Context _c;
};
-
}; // class Client
+ /**
+ * RAII-style class, which acquires a lock on the specified database in the requested mode and
+ * obtains a reference to the database. Used as a shortcut for calls to dbHolder().get().
+ *
+ * It is guaranteed that locks will be released when this object goes out of scope, therefore
+ * the database reference returned by this class should not be retained.
+ *
+ * TODO: This should be moved outside of client.h (maybe dbhelpers.h)
+ */
+ class AutoGetDb {
+ MONGO_DISALLOW_COPYING(AutoGetDb);
+ public:
+ AutoGetDb(OperationContext* txn, const StringData& ns, newlm::LockMode mode);
+
+ Database* getDb() const {
+ return _db;
+ }
+
+ private:
+ const Lock::DBLock _dbLock;
+ Database* const _db;
+ };
+
+ /**
+ * RAII-style class, which would acquire the appropritate hierarchy of locks for obtaining
+ * a particular collection and would retrieve a reference to the collection.
+ *
+ * It is guaranteed that locks will be released when this object goes out of scope, therefore
+ * database and collection references returned by this class should not be retained.
+ *
+ * TODO: This should be moved outside of client.h (maybe dbhelpers.h)
+ */
+ class AutoGetCollectionForRead {
+ MONGO_DISALLOW_COPYING(AutoGetCollectionForRead);
+ public:
+ AutoGetCollectionForRead(OperationContext* txn, const std::string& ns);
+ AutoGetCollectionForRead(OperationContext* txn, const NamespaceString& nss);
+ ~AutoGetCollectionForRead();
+
+ Database* getDb() const {
+ return _db;
+ }
+
+ Collection* getCollection() const {
+ return _coll;
+ }
+
+ private:
+ void _init();
+
+ const Timer _timer;
+ OperationContext* const _txn;
+ const NamespaceString _nss;
+ const Lock::DBLock _dbLock;
+
+ Database* _db;
+ Collection* _coll;
+ };
+
+
/** get the Client object for this thread. */
inline Client& cc() {
Client * c = currentClient.get();
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index 1686cfe2c10..eb75db0a55c 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -110,8 +110,7 @@ namespace mongo {
Lock::GlobalWrite lk(txn->lockState());
// Make sure database still exists after we resume from the temp release
- bool unused;
- Database* db = dbHolder().getOrCreate(txn, _dbName, unused);
+ Database* db = dbHolder().openDb(txn, _dbName);
bool createdCollection = false;
Collection* collection = NULL;
@@ -254,8 +253,7 @@ namespace mongo {
// We are under lock here again, so reload the database in case it may have disappeared
// during the temp release
- bool unused;
- Database* db = dbHolder().getOrCreate(txn, toDBName, unused);
+ Database* db = dbHolder().openDb(txn, toDBName);
Collection* collection = db->getCollection( txn, to_collection );
if ( !collection ) {
@@ -312,8 +310,7 @@ namespace mongo {
const string dbName = nss.db().toString();
- bool unused;
- Database* db = dbHolder().getOrCreate(txn, dbName, unused);
+ Database* db = dbHolder().openDb(txn, dbName);
// config
string temp = dbName + ".system.namespaces";
@@ -487,8 +484,7 @@ namespace mongo {
// Copy releases the lock, so we need to re-load the database. This should
// probably throw if the database has changed in between, but for now preserve
// the existing behaviour.
- bool unused;
- db = dbHolder().getOrCreate(txn, toDBName, unused);
+ db = dbHolder().openDb(txn, toDBName);
// we defer building id index for performance - building it in batch is much
// faster
diff --git a/src/mongo/db/cloner.h b/src/mongo/db/cloner.h
index 24f27befe47..169460257cc 100644
--- a/src/mongo/db/cloner.h
+++ b/src/mongo/db/cloner.h
@@ -31,19 +31,18 @@
#pragma once
#include "mongo/client/dbclientinterface.h"
-#include "mongo/db/client.h"
-#include "mongo/db/jsobj.h"
+#include "mongo/base/disallow_copying.h"
namespace mongo {
struct CloneOptions;
class DBClientBase;
- class DBClientCursor;
class NamespaceString;
class OperationContext;
- class Query;
- class Cloner: boost::noncopyable {
+
+ class Cloner {
+ MONGO_DISALLOW_COPYING(Cloner);
public:
Cloner();
diff --git a/src/mongo/db/commands/count.cpp b/src/mongo/db/commands/count.cpp
index e91b6e12fb5..d102bedf5df 100644
--- a/src/mongo/db/commands/count.cpp
+++ b/src/mongo/db/commands/count.cpp
@@ -77,8 +77,8 @@ namespace mongo {
request.explain = true;
// Acquire the db read lock.
- Client::ReadContext ctx(txn, request.ns);
- Collection* collection = ctx.ctx().db()->getCollection(txn, request.ns);
+ AutoGetCollectionForRead ctx(txn, request.ns);
+ Collection* collection = ctx.getCollection();
PlanExecutor* rawExec;
Status getExecStatus = getExecutorCount(txn, collection, request, &rawExec);
@@ -105,8 +105,8 @@ namespace mongo {
return appendCommandStatus(result, parseStatus);
}
- Client::ReadContext ctx(txn, request.ns);
- Collection* collection = ctx.ctx().db()->getCollection(txn, request.ns);
+ AutoGetCollectionForRead ctx(txn, request.ns);
+ Collection* collection = ctx.getCollection();
PlanExecutor* rawExec;
Status getExecStatus = getExecutorCount(txn, collection, request, &rawExec);
@@ -234,19 +234,19 @@ namespace mongo {
string &err,
int &errCode) {
- // Lock 'ns'.
- Client::ReadContext ctx(txn, ns);
- Collection* collection = ctx.ctx().db()->getCollection(txn, ns);
- const string& dbname = ctx.ctx().db()->name();
+ AutoGetCollectionForRead ctx(txn, ns);
+ Collection* collection = ctx.getCollection();
if (NULL == collection) {
err = "ns missing";
return -1;
}
+ const NamespaceString nss(ns);
+
CountRequest request;
CmdCount* countComm = static_cast<CmdCount*>(Command::findCommand("count"));
- Status parseStatus = countComm->parseRequest(dbname, cmd, &request);
+ Status parseStatus = countComm->parseRequest(nss.db().toString(), cmd, &request);
if (!parseStatus.isOK()) {
err = parseStatus.reason();
errCode = parseStatus.code();
diff --git a/src/mongo/db/commands/dbhash.cpp b/src/mongo/db/commands/dbhash.cpp
index 7518365a53a..596675d1ef6 100644
--- a/src/mongo/db/commands/dbhash.cpp
+++ b/src/mongo/db/commands/dbhash.cpp
@@ -64,7 +64,6 @@ namespace mongo {
}
string DBHashCmd::hashCollection( OperationContext* opCtx, Database* db, const string& fullCollectionName, bool* fromCache ) {
-
scoped_ptr<scoped_lock> cachedHashedLock;
if ( isCachable( fullCollectionName ) ) {
@@ -148,11 +147,14 @@ namespace mongo {
list<string> colls;
const string ns = parseNs(dbname, cmdObj);
- Client::ReadContext ctx(txn, ns);
- Database* db = ctx.ctx().db();
- if ( db )
- db->getDatabaseCatalogEntry()->getCollectionNamespaces( &colls );
- colls.sort();
+ // We lock the entire database in S-mode in order to ensure that the contents will not
+ // change for the snapshot.
+ AutoGetDb autoDb(txn, ns, newlm::MODE_S);
+ Database* db = autoDb.getDb();
+ if (db) {
+ db->getDatabaseCatalogEntry()->getCollectionNamespaces(&colls);
+ colls.sort();
+ }
result.appendNumber( "numCollections" , (long long)colls.size() );
result.append( "host" , prettyHostName() );
diff --git a/src/mongo/db/commands/distinct.cpp b/src/mongo/db/commands/distinct.cpp
index de6ce85b6c3..4f34ea89aba 100644
--- a/src/mongo/db/commands/distinct.cpp
+++ b/src/mongo/db/commands/distinct.cpp
@@ -70,7 +70,6 @@ namespace mongo {
bool fromRepl ) {
Timer t;
- string ns = dbname + '.' + cmdObj.firstElement().valuestr();
// ensure that the key is a string
uassert(18510,
@@ -98,10 +97,10 @@ namespace mongo {
BSONArrayBuilder arr( bb );
BSONElementSet values;
- Client::ReadContext ctx(txn, ns);
-
- Collection* collection = ctx.ctx().db()->getCollection( txn, ns );
+ const string ns = parseNs(dbname, cmdObj);
+ AutoGetCollectionForRead ctx(txn, ns);
+ Collection* collection = ctx.getCollection();
if (!collection) {
result.appendArray( "values" , BSONObj() );
result.append("stats", BSON("n" << 0 <<
diff --git a/src/mongo/db/commands/drop_indexes.cpp b/src/mongo/db/commands/drop_indexes.cpp
index ba93d95bc54..26ea4d8df9c 100644
--- a/src/mongo/db/commands/drop_indexes.cpp
+++ b/src/mongo/db/commands/drop_indexes.cpp
@@ -36,6 +36,7 @@
#include <vector>
#include "mongo/db/background.h"
+#include "mongo/db/client.h"
#include "mongo/db/commands.h"
#include "mongo/db/dbdirectclient.h"
#include "mongo/db/index_builder.h"
diff --git a/src/mongo/db/commands/find_cmd.cpp b/src/mongo/db/commands/find_cmd.cpp
index f1aef7ba2e5..9bd8d9935e9 100644
--- a/src/mongo/db/commands/find_cmd.cpp
+++ b/src/mongo/db/commands/find_cmd.cpp
@@ -73,22 +73,24 @@ namespace mongo {
}
auto_ptr<LiteParsedQuery> lpq(rawLpq);
- Client::ReadContext ctx(txn, fullns);
- // The collection may be NULL. If so, getExecutor() should handle it by returning
- // an execution tree with an EOFStage.
- Collection* collection = ctx.ctx().db()->getCollection(txn, fullns);
+ const NamespaceString nss(fullns);
// Finish the parsing step by using the LiteParsedQuery to create a CanonicalQuery.
// This requires a lock on the collection in case we're parsing $where: where-specific
// parsing code assumes we have a lock and creates execution machinery that requires it.
CanonicalQuery* rawCq;
- WhereCallbackReal whereCallback(txn, ctx.ctx().db()->name());
+ WhereCallbackReal whereCallback(txn, nss.db());
Status canonStatus = CanonicalQuery::canonicalize(lpq.release(), &rawCq, whereCallback);
if (!canonStatus.isOK()) {
return canonStatus;
}
auto_ptr<CanonicalQuery> cq(rawCq);
+ AutoGetCollectionForRead ctx(txn, nss);
+ // The collection may be NULL. If so, getExecutor() should handle it by returning
+ // an execution tree with an EOFStage.
+ Collection* collection = ctx.getCollection();
+
// We have a parsed query. Time to get the execution plan for it.
PlanExecutor* rawExec;
Status execStatus = Status::OK();
diff --git a/src/mongo/db/commands/geo_near_cmd.cpp b/src/mongo/db/commands/geo_near_cmd.cpp
index 2268eb18f6f..388e4c7bb27 100644
--- a/src/mongo/db/commands/geo_near_cmd.cpp
+++ b/src/mongo/db/commands/geo_near_cmd.cpp
@@ -32,6 +32,7 @@
#include "mongo/db/auth/action_type.h"
#include "mongo/db/auth/privilege.h"
#include "mongo/db/catalog/database.h"
+#include "mongo/db/client.h"
#include "mongo/db/commands.h"
#include "mongo/db/curop.h"
#include "mongo/db/geo/geoconstants.h"
@@ -69,22 +70,15 @@ namespace mongo {
}
bool run(OperationContext* txn, const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
- const string ns = dbname + "." + cmdObj.firstElement().valuestr();
-
if (!cmdObj["start"].eoo()) {
errmsg = "using deprecated 'start' argument to geoNear";
return false;
}
- Client::ReadContext ctx(txn, ns);
-
- Database* db = ctx.ctx().db();
- if ( !db ) {
- errmsg = "can't find ns";
- return false;
- }
+ const NamespaceString nss(parseNs(dbname, cmdObj));
+ AutoGetCollectionForRead ctx(txn, nss);
- Collection* collection = db->getCollection( txn, ns );
+ Collection* collection = ctx.getCollection();
if ( !collection ) {
errmsg = "can't find ns";
return false;
@@ -131,7 +125,7 @@ namespace mongo {
}
if (!cmdObj["uniqueDocs"].eoo()) {
- warning() << ns << ": ignoring deprecated uniqueDocs option in geoNear command";
+ warning() << nss << ": ignoring deprecated uniqueDocs option in geoNear command";
}
// And, build the full query expression.
@@ -170,11 +164,9 @@ namespace mongo {
"$dis" << BSON("$meta" << LiteParsedQuery::metaGeoNearDistance));
CanonicalQuery* cq;
-
- const NamespaceString nss(dbname);
const WhereCallbackReal whereCallback(txn, nss.db());
- if (!CanonicalQuery::canonicalize(ns,
+ if (!CanonicalQuery::canonicalize(nss,
rewritten,
BSONObj(),
projObj,
diff --git a/src/mongo/db/commands/group.cpp b/src/mongo/db/commands/group.cpp
index 1310ad381f3..376ed35823a 100644
--- a/src/mongo/db/commands/group.cpp
+++ b/src/mongo/db/commands/group.cpp
@@ -35,7 +35,7 @@
#include "mongo/db/auth/authorization_session.h"
#include "mongo/db/auth/privilege.h"
#include "mongo/db/catalog/database.h"
-#include "mongo/db/client_basic.h"
+#include "mongo/db/client.h"
#include "mongo/db/exec/group.h"
#include "mongo/db/exec/working_set_common.h"
#include "mongo/db/query/get_executor.h"
@@ -134,8 +134,8 @@ namespace mongo {
return appendCommandStatus(out, parseRequestStatus);
}
- Client::ReadContext ctx(txn, groupRequest.ns);
- Collection* coll = ctx.ctx().db()->getCollection(txn, groupRequest.ns);
+ AutoGetCollectionForRead ctx(txn, groupRequest.ns);
+ Collection* coll = ctx.getCollection();
PlanExecutor *rawPlanExecutor;
Status getExecStatus = getExecutorGroup(txn, coll, groupRequest, &rawPlanExecutor);
@@ -186,8 +186,8 @@ namespace mongo {
groupRequest.explain = true;
- Client::ReadContext ctx(txn, groupRequest.ns);
- Collection* coll = ctx.ctx().db()->getCollection(txn, groupRequest.ns);
+ AutoGetCollectionForRead ctx(txn, groupRequest.ns);
+ Collection* coll = ctx.getCollection();
PlanExecutor *rawPlanExecutor;
Status getExecStatus = getExecutorGroup(txn, coll, groupRequest, &rawPlanExecutor);
diff --git a/src/mongo/db/commands/index_filter_commands.cpp b/src/mongo/db/commands/index_filter_commands.cpp
index ab47fe40fe7..a876a0a509a 100644
--- a/src/mongo/db/commands/index_filter_commands.cpp
+++ b/src/mongo/db/commands/index_filter_commands.cpp
@@ -169,11 +169,12 @@ namespace mongo {
BSONObj& cmdObj,
BSONObjBuilder* bob) {
// This is a read lock. The query settings is owned by the collection.
- Client::ReadContext ctx(txn, ns);
- Collection* collection = ctx.ctx().db()->getCollection(txn, ns);
+ AutoGetCollectionForRead ctx(txn, ns);
+
QuerySettings* querySettings;
PlanCache* unused;
- Status status = getQuerySettingsAndPlanCache(txn, collection, ns, &querySettings, &unused);
+ Status status =
+ getQuerySettingsAndPlanCache(txn, ctx.getCollection(), ns, &querySettings, &unused);
if (!status.isOK()) {
// No collection - return empty array of filters.
BSONArrayBuilder hintsBuilder(bob->subarrayStart("filters"));
@@ -231,12 +232,12 @@ namespace mongo {
BSONObj& cmdObj,
BSONObjBuilder* bob) {
// This is a read lock. The query settings is owned by the collection.
- Client::ReadContext ctx(txn, ns);
- Collection* collection = ctx.ctx().db()->getCollection(txn, ns);
+ AutoGetCollectionForRead ctx(txn, ns);
+
QuerySettings* querySettings;
PlanCache* planCache;
Status status =
- getQuerySettingsAndPlanCache(txn, collection, ns, &querySettings, &planCache);
+ getQuerySettingsAndPlanCache(txn, ctx.getCollection(), ns, &querySettings, &planCache);
if (!status.isOK()) {
// No collection - do nothing.
return Status::OK();
@@ -326,12 +327,13 @@ namespace mongo {
BSONObj& cmdObj,
BSONObjBuilder* bob) {
// This is a read lock. The query settings is owned by the collection.
- Client::ReadContext ctx(txn, ns);
- Collection* collection = ctx.ctx().db()->getCollection(txn, ns);
+ const NamespaceString nss(ns);
+ AutoGetCollectionForRead ctx(txn, nss);
+
QuerySettings* querySettings;
PlanCache* planCache;
Status status =
- getQuerySettingsAndPlanCache(txn, collection, ns, &querySettings, &planCache);
+ getQuerySettingsAndPlanCache(txn, ctx.getCollection(), ns, &querySettings, &planCache);
if (!status.isOK()) {
return status;
}
diff --git a/src/mongo/db/commands/list_collections.cpp b/src/mongo/db/commands/list_collections.cpp
index 720542cd580..e00872f983e 100644
--- a/src/mongo/db/commands/list_collections.cpp
+++ b/src/mongo/db/commands/list_collections.cpp
@@ -67,9 +67,9 @@ namespace mongo {
BSONObjBuilder& result,
bool /*fromRepl*/) {
- Lock::DBRead lk( txn->lockState(), dbname );
+ AutoGetDb autoDb(txn, dbname, newlm::MODE_S);
- const Database* d = dbHolder().get( txn, dbname );
+ const Database* d = autoDb.getDb();
const DatabaseCatalogEntry* dbEntry = NULL;
list<string> names;
diff --git a/src/mongo/db/commands/list_databases.cpp b/src/mongo/db/commands/list_databases.cpp
index 4c87043f9c9..8a11e595231 100644
--- a/src/mongo/db/commands/list_databases.cpp
+++ b/src/mongo/db/commands/list_databases.cpp
@@ -88,10 +88,7 @@ namespace mongo {
b.append( "sizeOnDisk", (double) size );
totalSize += size;
- {
- Client::ReadContext rc(txn, *i );
- b.appendBool( "empty", rc.ctx().db()->getDatabaseCatalogEntry()->isEmpty() );
- }
+ b.appendBool("empty", size == 0);
dbInfos.push_back( b.obj() );
@@ -99,24 +96,27 @@ namespace mongo {
}
set<string> allShortNames;
- {
- Lock::GlobalRead lk(txn->lockState());
- dbHolder().getAllShortNames(allShortNames);
- }
+ dbHolder().getAllShortNames(allShortNames);
for ( set<string>::iterator i = allShortNames.begin(); i != allShortNames.end(); i++ ) {
string name = *i;
- if ( seen.count( name ) )
+ if (seen.count(name)) {
continue;
+ }
+
+ // This should never happen once the write collection locking changes are in
+ // invariant(false);
BSONObjBuilder b;
b.append( "name" , name );
b.append( "sizeOnDisk" , (double)1.0 );
{
- Client::ReadContext ctx(txn, name);
- b.appendBool( "empty", ctx.ctx().db()->getDatabaseCatalogEntry()->isEmpty() );
+ // This will open the database, if it was closed
+ AutoGetDb autoDb(txn, *i, newlm::MODE_S);
+ Database* db = autoDb.getDb();
+ b.appendBool("empty", db->getDatabaseCatalogEntry()->isEmpty());
}
dbInfos.push_back( b.obj() );
diff --git a/src/mongo/db/commands/list_indexes.cpp b/src/mongo/db/commands/list_indexes.cpp
index 7849417e370..bc526a834b8 100644
--- a/src/mongo/db/commands/list_indexes.cpp
+++ b/src/mongo/db/commands/list_indexes.cpp
@@ -67,15 +67,15 @@ namespace mongo {
BSONObjBuilder& result,
bool /*fromRepl*/) {
- string ns = parseNs( dbname, cmdObj );
-
- Lock::DBRead lock( txn->lockState(), dbname );
- const Database* d = dbHolder().get( txn, dbname );
+ AutoGetDb autoDb(txn, dbname, newlm::MODE_S);
+ const Database* d = autoDb.getDb();
if ( !d ) {
return appendCommandStatus( result, Status( ErrorCodes::NamespaceNotFound,
"no database" ) );
}
+ const string ns = parseNs(dbname, cmdObj);
+
const DatabaseCatalogEntry* dbEntry = d->getDatabaseCatalogEntry();
const CollectionCatalogEntry* cce = dbEntry->getCollectionCatalogEntry( txn, ns );
if ( !cce ) {
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index e58e001a221..d5613aea554 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -983,7 +983,7 @@ namespace mongo {
verify( foundIndex );
}
- scoped_ptr<Client::ReadContext> ctx(new Client::ReadContext(_txn, _config.incLong));
+ scoped_ptr<AutoGetCollectionForRead> ctx(new AutoGetCollectionForRead(_txn, _config.incLong));
BSONObj prev;
BSONList all;
@@ -1004,7 +1004,7 @@ namespace mongo {
whereCallback).isOK());
PlanExecutor* rawExec;
- verify(getExecutor(_txn, getCollectionOrUassert(ctx->ctx().db(), _config.incLong),
+ verify(getExecutor(_txn, getCollectionOrUassert(ctx->getDb(), _config.incLong),
cq, &rawExec, QueryPlannerParams::NO_TABLE_SCAN).isOK());
auto_ptr<PlanExecutor> exec(rawExec);
@@ -1039,7 +1039,7 @@ namespace mongo {
// reduce a finalize array
finalReduce( all );
- ctx.reset(new Client::ReadContext(_txn, _config.incLong));
+ ctx.reset(new AutoGetCollectionForRead(_txn, _config.incLong));
all.clear();
prev = o;
@@ -1055,7 +1055,7 @@ namespace mongo {
ctx.reset();
// reduce and finalize last array
finalReduce( all );
- ctx.reset(new Client::ReadContext(_txn, _config.incLong));
+ ctx.reset(new AutoGetCollectionForRead(_txn, _config.incLong));
pm.finished();
}
@@ -1270,10 +1270,12 @@ namespace mongo {
// Prevent sharding state from changing during the MR.
auto_ptr<RangePreserver> rangePreserver;
{
- Client::ReadContext ctx(txn, config.ns);
- Collection* collection = ctx.ctx().db()->getCollection( txn, config.ns );
- if ( collection )
+ AutoGetCollectionForRead ctx(txn, config.ns);
+
+ Collection* collection = ctx.getCollection();
+ if (collection) {
rangePreserver.reset(new RangePreserver(collection));
+ }
// Get metadata before we check our version, to make sure it doesn't increment
// in the meantime. Need to do this in the same lock scope as the block.
@@ -1332,14 +1334,11 @@ namespace mongo {
{
// We've got a cursor preventing migrations off, now re-establish our useful cursor
- // Need lock and context to use it
- scoped_ptr<Lock::DBRead> lock(new Lock::DBRead(txn->lockState(), config.ns));
+ const NamespaceString nss(config.ns);
- // This context does no version check, safe b/c we checked earlier and have an
- // open cursor
- scoped_ptr<Client::Context> ctx(new Client::Context(txn, config.ns, false));
+ // Need lock and context to use it
+ scoped_ptr<Lock::DBRead> lock(new Lock::DBRead(txn->lockState(), nss.db()));
- const NamespaceString nss(config.ns);
const WhereCallbackReal whereCallback(txn, nss.db());
CanonicalQuery* cq;
@@ -1353,8 +1352,11 @@ namespace mongo {
return 0;
}
+ Database* db = dbHolder().get(txn, nss.db());
+ invariant(db);
+
PlanExecutor* rawExec;
- if (!getExecutor(txn, state.getCollectionOrUassert(ctx->db(), config.ns),
+ if (!getExecutor(txn, state.getCollectionOrUassert(db, config.ns),
cq, &rawExec).isOK()) {
uasserted(17239, "Can't get executor for query "
+ config.filter.toString());
@@ -1396,12 +1398,21 @@ namespace mongo {
// TODO: As an optimization, we might want to do the save/restore
// state and yield inside the reduceAndSpillInMemoryState method, so
// it only happens if necessary.
- ctx.reset();
lock.reset();
state.reduceAndSpillInMemoryStateIfNeeded();
- lock.reset(new Lock::DBRead(txn->lockState(), config.ns));
-
- ctx.reset(new Client::Context(txn, config.ns, false));
+ lock.reset(new Lock::DBRead(txn->lockState(), nss.db()));
+
+ // Need to reload the database, in case it was dropped after we
+ // released the lock
+ db = dbHolder().get(txn, nss.db());
+ if (db == NULL) {
+ // Database was deleted after we freed the lock
+ StringBuilder sb;
+ sb << "Database "
+ << nss.db()
+ << " was deleted in the middle of the reduce job.";
+ uasserted(28523, sb.str());
+ }
reduceTime += t.micros();
diff --git a/src/mongo/db/commands/parallel_collection_scan.cpp b/src/mongo/db/commands/parallel_collection_scan.cpp
index 2e653880b80..449254bc138 100644
--- a/src/mongo/db/commands/parallel_collection_scan.cpp
+++ b/src/mongo/db/commands/parallel_collection_scan.cpp
@@ -74,11 +74,9 @@ namespace mongo {
NamespaceString ns( dbname, cmdObj[name].String() );
- Client::ReadContext ctx(txn, ns.ns());
-
- Database* db = ctx.ctx().db();
- Collection* collection = db->getCollection( txn, ns );
+ AutoGetCollectionForRead ctx(txn, ns.ns());
+ Collection* collection = ctx.getCollection();
if ( !collection )
return appendCommandStatus( result,
Status( ErrorCodes::NamespaceNotFound,
diff --git a/src/mongo/db/commands/pipeline_command.cpp b/src/mongo/db/commands/pipeline_command.cpp
index f360d550f7f..dc2581b3a12 100644
--- a/src/mongo/db/commands/pipeline_command.cpp
+++ b/src/mongo/db/commands/pipeline_command.cpp
@@ -26,7 +26,7 @@
* it in the license file.
*/
-#include "mongo/pch.h"
+#include "mongo/platform/basic.h"
#include <boost/smart_ptr.hpp>
#include <vector>
@@ -223,9 +223,9 @@ namespace mongo {
// sharding version that we synchronize on here. This is also why we always need to
// create a ClientCursor even when we aren't outputting to a cursor. See the comment
// on ShardFilterStage for more details.
- Client::ReadContext ctx(txn, ns);
+ AutoGetCollectionForRead ctx(txn, ns);
- Collection* collection = ctx.ctx().db()->getCollection(txn, ns);
+ Collection* collection = ctx.getCollection();
// This does mongod-specific stuff like creating the input PlanExecutor and adding
// it to the front of the pipeline if needed.
diff --git a/src/mongo/db/commands/plan_cache_commands.cpp b/src/mongo/db/commands/plan_cache_commands.cpp
index 601e9b47318..3f9223aa15c 100644
--- a/src/mongo/db/commands/plan_cache_commands.cpp
+++ b/src/mongo/db/commands/plan_cache_commands.cpp
@@ -225,10 +225,10 @@ namespace mongo {
BSONObj& cmdObj,
BSONObjBuilder* bob) {
// This is a read lock. The query cache is owned by the collection.
- Client::ReadContext ctx(txn, ns);
- Collection* collection = ctx.ctx().db()->getCollection(txn, ns);
+ AutoGetCollectionForRead ctx(txn, ns);
+
PlanCache* planCache;
- Status status = getPlanCache(txn, collection, ns, &planCache);
+ Status status = getPlanCache(txn, ctx.getCollection(), ns, &planCache);
if (!status.isOK()) {
// No collection - return results with empty shapes array.
BSONArrayBuilder arrayBuilder(bob->subarrayStart("shapes"));
@@ -273,10 +273,10 @@ namespace mongo {
BSONObj& cmdObj,
BSONObjBuilder* bob) {
// This is a read lock. The query cache is owned by the collection.
- Client::ReadContext ctx(txn, ns);
- Collection* collection = ctx.ctx().db()->getCollection(txn, ns);
+ AutoGetCollectionForRead ctx(txn, ns);
+
PlanCache* planCache;
- Status status = getPlanCache(txn, collection, ns, &planCache);
+ Status status = getPlanCache(txn, ctx.getCollection(), ns, &planCache);
if (!status.isOK()) {
// No collection - nothing to do. Return OK status.
return Status::OK();
@@ -347,10 +347,10 @@ namespace mongo {
const std::string& ns,
BSONObj& cmdObj,
BSONObjBuilder* bob) {
- Client::ReadContext ctx(txn, ns);
- Collection* collection = ctx.ctx().db()->getCollection(txn, ns);
+ AutoGetCollectionForRead ctx(txn, ns);
+
PlanCache* planCache;
- Status status = getPlanCache(txn, collection, ns, &planCache);
+ Status status = getPlanCache(txn, ctx.getCollection(), ns, &planCache);
if (!status.isOK()) {
// No collection - return empty plans array.
BSONArrayBuilder plansBuilder(bob->subarrayStart("plans"));
diff --git a/src/mongo/db/commands/rename_collection.cpp b/src/mongo/db/commands/rename_collection.cpp
index dd613705eac..12b44ad77ad 100644
--- a/src/mongo/db/commands/rename_collection.cpp
+++ b/src/mongo/db/commands/rename_collection.cpp
@@ -178,8 +178,7 @@ namespace mongo {
// Dismissed on success
ScopeGuard indexBuildRestorer = MakeGuard(IndexBuilder::restoreIndexes, indexesInProg);
- bool unused;
- Database* const targetDB = dbHolder().getOrCreate(txn, nsToDatabase(target), unused);
+ Database* const targetDB = dbHolder().openDb(txn, nsToDatabase(target));
{
WriteUnitOfWork wunit(txn);
diff --git a/src/mongo/db/commands/touch.cpp b/src/mongo/db/commands/touch.cpp
index 187ba27ab04..34cdc891c78 100644
--- a/src/mongo/db/commands/touch.cpp
+++ b/src/mongo/db/commands/touch.cpp
@@ -30,7 +30,7 @@
* it in the license file.
*/
-#include "mongo/pch.h"
+#include "mongo/platform/basic.h"
#include <string>
#include <vector>
@@ -41,8 +41,8 @@
#include "mongo/db/auth/privilege.h"
#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/database.h"
+#include "mongo/db/client.h"
#include "mongo/db/commands.h"
-#include "mongo/db/curop.h"
#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/operation_context_impl.h"
@@ -85,7 +85,7 @@ namespace mongo {
return false;
}
- NamespaceString nss( dbname, coll );
+ const NamespaceString nss( dbname, coll );
if ( ! nss.isNormal() ) {
errmsg = "bad namespace name";
return false;
@@ -99,10 +99,9 @@ namespace mongo {
return false;
}
- Client::ReadContext context(txn, nss.ns());
+ AutoGetCollectionForRead context(txn, nss);
- Database* db = context.ctx().db();
- Collection* collection = db->getCollection( txn, nss.ns() );
+ Collection* collection = context.getCollection();
if ( !collection ) {
errmsg = "collection not found";
return false;
diff --git a/src/mongo/db/commands/validate.cpp b/src/mongo/db/commands/validate.cpp
index bbddae56505..650087760fc 100644
--- a/src/mongo/db/commands/validate.cpp
+++ b/src/mongo/db/commands/validate.cpp
@@ -77,17 +77,11 @@ namespace mongo {
LOG(0) << "CMD: validate " << ns << endl;
}
- Client::ReadContext ctx(txn, ns_string.ns());
+ AutoGetCollectionForRead ctx(txn, ns_string.ns());
- Database* db = ctx.ctx().db();
- if ( !db ) {
- errmsg = "database not found";
- return false;
- }
-
- Collection* collection = db->getCollection( txn, ns );
+ Collection* collection = ctx.getCollection();
if ( !collection ) {
- errmsg = "collection not found";
+ errmsg = "ns not found";
return false;
}
diff --git a/src/mongo/db/commands/write_commands/batch_executor.cpp b/src/mongo/db/commands/write_commands/batch_executor.cpp
index 8b21578ce89..1e433e82e66 100644
--- a/src/mongo/db/commands/write_commands/batch_executor.cpp
+++ b/src/mongo/db/commands/write_commands/batch_executor.cpp
@@ -914,7 +914,9 @@ namespace mongo {
bool WriteBatchExecutor::ExecInsertsState::_lockAndCheckImpl(WriteOpResult* result) {
if (hasLock()) {
- txn->getCurOp()->enter(_context.get());
+ // TODO: Client::Context legacy, needs to be removed
+ txn->getCurOp()->enter(_context->ns(),
+ _context->db() ? _context->db()->getProfilingLevel() : 0);
return true;
}
diff --git a/src/mongo/db/commands/write_commands/write_commands.cpp b/src/mongo/db/commands/write_commands/write_commands.cpp
index a51fac45f9d..fb014322e4a 100644
--- a/src/mongo/db/commands/write_commands/write_commands.cpp
+++ b/src/mongo/db/commands/write_commands/write_commands.cpp
@@ -31,6 +31,7 @@
#include "mongo/base/init.h"
#include "mongo/bson/mutable/document.h"
#include "mongo/bson/mutable/element.h"
+#include "mongo/db/client.h"
#include "mongo/db/commands/write_commands/batch_executor.h"
#include "mongo/db/commands/write_commands/write_commands_common.h"
#include "mongo/db/curop.h"
diff --git a/src/mongo/db/curop.cpp b/src/mongo/db/curop.cpp
index 561b6253c1c..55ab25d6570 100644
--- a/src/mongo/db/curop.cpp
+++ b/src/mongo/db/curop.cpp
@@ -28,9 +28,11 @@
#include "mongo/platform/basic.h"
+#include "mongo/db/curop.h"
+
#include "mongo/base/counter.h"
+#include "mongo/db/client.h"
#include "mongo/db/commands/server_status_metric.h"
-#include "mongo/db/curop.h"
#include "mongo/db/catalog/database.h"
#include "mongo/db/stats/top.h"
#include "mongo/util/fail_point_service.h"
@@ -145,10 +147,10 @@ namespace mongo {
}
}
- void CurOp::enter( Client::Context * context ) {
+ void CurOp::enter(const char* ns, int dbProfileLevel) {
ensureStarted();
- _ns = context->ns();
- _dbprofile = std::max( context->_db ? context->_db->getProfilingLevel() : 0 , _dbprofile );
+ _ns = ns;
+ _dbprofile = std::max(dbProfileLevel, _dbprofile);
}
void CurOp::recordGlobalTime(bool isWriteLocked, long long micros) const {
diff --git a/src/mongo/db/curop.h b/src/mongo/db/curop.h
index 84da239c085..65660d8889a 100644
--- a/src/mongo/db/curop.h
+++ b/src/mongo/db/curop.h
@@ -43,6 +43,7 @@
namespace mongo {
+ class Client;
class Command;
class CurOp;
@@ -201,12 +202,11 @@ namespace mongo {
BSONObj query() const { return _query.get(); }
void appendQuery( BSONObjBuilder& b , const StringData& name ) const { _query.append( b , name ); }
- void enter( Client::Context * context );
+ void enter(const char* ns, int dbProfileLevel);
void reset();
void reset( const HostAndPort& remote, int op );
void markCommand() { _isCommand = true; }
OpDebug& debug() { return _debug; }
- int profileLevel() const { return _dbprofile; }
string getNS() const { return _ns.toString(); }
bool shouldDBProfile( int ms ) const {
diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp
index 70ca0ab1a81..d6327660110 100644
--- a/src/mongo/db/db.cpp
+++ b/src/mongo/db/db.cpp
@@ -335,12 +335,11 @@ namespace mongo {
|| replSettings.usingReplSets()
|| replSettings.slave == repl::SimpleSlave);
- for ( vector< string >::iterator i = dbNames.begin(); i != dbNames.end(); ++i ) {
+ for (vector<string>::const_iterator i = dbNames.begin(); i != dbNames.end(); ++i) {
const string dbName = *i;
LOG(1) << " Recovering database: " << dbName << endl;
- bool unusedJustCreated;
- Database* db = dbHolder().getOrCreate(&txn, dbName, unusedJustCreated);
+ Database* db = dbHolder().openDb(&txn, dbName);
invariant(db);
// First thing after opening the database is to check for file compatibility,
@@ -410,6 +409,7 @@ namespace mongo {
const repl::ReplSettings& replSettings =
repl::getGlobalReplicationCoordinator()->getSettings();
+
{
ProcessId pid = ProcessId::getCurrent();
LogstreamBuilder l = log(LogComponent::kDefault);
@@ -512,16 +512,18 @@ namespace mongo {
{
OperationContextImpl txn;
- const unsigned long long missingRepl = checkIfReplMissingFromCommandLine(&txn);
+ const unsigned long long missingRepl = checkIfReplMissingFromCommandLine(&txn);
if (missingRepl) {
log() << startupWarningsLog;
log() << "** WARNING: mongod started without --replSet yet " << missingRepl
<< " documents are present in local.system.replset" << startupWarningsLog;
- log() << "** Restart with --replSet unless you are doing maintenance and no"
- << " other clients are connected." << startupWarningsLog;
- log() << "** The TTL collection monitor will not start because of this." << startupWarningsLog;
- log() << "** For more info see http://dochub.mongodb.org/core/ttlcollections" << startupWarningsLog;
+ log() << "** Restart with --replSet unless you are doing maintenance and "
+ << " no other clients are connected." << startupWarningsLog;
+ log() << "** The TTL collection monitor will not start because of this."
+ << startupWarningsLog;
+ log() << "** ";
+ log() << " For more info see http://dochub.mongodb.org/core/ttlcollections";
log() << startupWarningsLog;
}
else {
@@ -532,13 +534,7 @@ namespace mongo {
mongo::signalForkSuccess();
#endif
- if (getGlobalAuthorizationManager()->isAuthEnabled()) {
- // open admin db in case we need to use it later. TODO this is not the right way to
- // resolve this.
- Client::WriteContext ctx(&txn, "admin");
- }
-
- authindex::configureSystemIndexes(&txn, "admin");
+ authindex::configureSystemIndexes(&txn);
// SERVER-14090: Verify that auth schema version is schemaVersion26Final.
int foundSchemaVersion;
@@ -564,7 +560,7 @@ namespace mongo {
getDeleter()->startWorkers();
- restartInProgressIndexesFromLastShutdown();
+ restartInProgressIndexesFromLastShutdown(&txn);
repl::getGlobalReplicationCoordinator()->startReplication(&txn);
}
diff --git a/src/mongo/db/dbcommands.cpp b/src/mongo/db/dbcommands.cpp
index 31b0c8a8475..181db9e4012 100644
--- a/src/mongo/db/dbcommands.cpp
+++ b/src/mongo/db/dbcommands.cpp
@@ -350,7 +350,6 @@ namespace mongo {
// in the local database.
//
Lock::DBWrite dbXLock(txn->lockState(), dbname);
- WriteUnitOfWork wunit(txn);
Client::Context ctx(txn, dbname);
BSONElement e = cmdObj.firstElement();
@@ -367,10 +366,10 @@ namespace mongo {
}
BSONElement slow = cmdObj["slowms"];
- if ( slow.isNumber() )
+ if (slow.isNumber()) {
serverGlobalParams.slowMS = slow.numberInt();
+ }
- wunit.commit();
return ok;
}
} cmdProfile;
@@ -637,8 +636,8 @@ namespace mongo {
// Check shard version at startup.
// This will throw before we've done any work if shard version is outdated
- Client::ReadContext ctx(txn, ns);
- Collection* coll = ctx.ctx().db()->getCollection(txn, ns);
+ AutoGetCollectionForRead ctx(txn, ns);
+ Collection* coll = ctx.getCollection();
CanonicalQuery* cq;
if (!CanonicalQuery::canonicalize(ns, query, sort, BSONObj(), &cq).isOK()) {
@@ -748,9 +747,9 @@ namespace mongo {
BSONObj keyPattern = jsobj.getObjectField( "keyPattern" );
bool estimate = jsobj["estimate"].trueValue();
- Client::ReadContext ctx(txn, ns);
+ AutoGetCollectionForRead ctx(txn, ns);
- Collection* collection = ctx.ctx().db()->getCollection( txn, ns );
+ Collection* collection = ctx.getCollection();
if ( !collection || collection->numRecords(txn) == 0 ) {
result.appendNumber( "size" , 0 );
@@ -864,17 +863,6 @@ namespace mongo {
}
bool run(OperationContext* txn, const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
- const string ns = dbname + "." + jsobj.firstElement().valuestr();
- Client::ReadContext cx(txn, ns);
- Database* db = cx.ctx().db();
- Collection* collection = db->getCollection( txn, ns );
- if ( !collection ) {
- errmsg = "Collection [" + ns + "] not found.";
- return false;
- }
-
- result.append( "ns" , ns.c_str() );
-
int scale = 1;
if ( jsobj["scale"].isNumber() ) {
scale = jsobj["scale"].numberInt();
@@ -890,6 +878,27 @@ namespace mongo {
bool verbose = jsobj["verbose"].trueValue();
+ const NamespaceString nss(parseNs(dbname, jsobj));
+
+ if (nss.coll().empty()) {
+ errmsg = "No collection name specified";
+ return false;
+ }
+
+ AutoGetCollectionForRead ctx(txn, nss);
+ if (!ctx.getDb()) {
+ errmsg = "Database [" + nss.db().toString() + "] not found.";
+ return false;
+ }
+
+ Collection* collection = ctx.getCollection();
+ if (!collection) {
+ errmsg = "Collection [" + nss.toString() + "] not found.";
+ return false;
+ }
+
+ result.append( "ns" , nss );
+
long long size = collection->dataSize(txn) / scale;
long long numRecords = collection->numRecords(txn);
result.appendNumber( "count" , numRecords );
@@ -1079,10 +1088,39 @@ namespace mongo {
const string ns = parseNs(dbname, jsobj);
- Client::ReadContext ctx(txn, ns);
- Database* d = ctx.ctx().db();
+ // TODO: Client::Context legacy, needs to be removed
+ txn->getCurOp()->ensureStarted();
+ txn->getCurOp()->setNS(dbname);
+
+ // We lock the entire database in S-mode in order to ensure that the contents will not
+ // change for the stats snapshot. This might be unnecessary and if it becomes a
+ // performance issue, we can take IS lock and then lock collection-by-collection.
+ AutoGetDb autoDb(txn, ns, newlm::MODE_S);
+
+ result.append("db", ns);
+
+ Database* db = autoDb.getDb();
+ if (!db) {
+ // TODO: This preserves old behaviour where we used to create an empty database
+ // metadata even when the database is accessed for read. Without this several
+ // unit-tests will fail, which are fairly easy to fix. If backwards compatibility
+ // is not needed for the missing DB case, we can just do the same that's done in
+ // CollectionStats.
+ result.appendNumber("collections", 0);
+ result.appendNumber("objects", 0);
+ result.append("avgObjSize", 0);
+ result.appendNumber("dataSize", 0);
+ result.appendNumber("storageSize", 0);
+ result.appendNumber("numExtents", 0);
+ result.appendNumber("indexes", 0);
+ result.appendNumber("indexSize", 0);
+ }
+ else {
+ // TODO: Client::Context legacy, needs to be removed
+ txn->getCurOp()->enter(dbname.c_str(), db->getProfilingLevel());
- d->getStats( txn, &result, scale );
+ db->getStats(txn, &result, scale);
+ }
return true;
}
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index 9a2be8a7599..a2f5e964a70 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -195,20 +195,20 @@ namespace mongo {
Returns: true if object exists.
*/
bool Helpers::getSingleton(OperationContext* txn, const char *ns, BSONObj& result) {
- Client::Context context(txn, ns);
- auto_ptr<PlanExecutor> exec(
- InternalPlanner::collectionScan(txn, ns, context.db()->getCollection(txn, ns)));
+ AutoGetCollectionForRead ctx(txn, ns);
+ auto_ptr<PlanExecutor> exec(InternalPlanner::collectionScan(txn, ns, ctx.getCollection()));
PlanExecutor::ExecState state = exec->getNext(&result, NULL);
- context.getClient()->curop()->done();
+ txn->getCurOp()->done();
return PlanExecutor::ADVANCED == state;
}
bool Helpers::getLast(OperationContext* txn, const char *ns, BSONObj& result) {
- Client::Context ctx(txn, ns);
- Collection* coll = ctx.db()->getCollection( txn, ns );
- auto_ptr<PlanExecutor> exec(
- InternalPlanner::collectionScan(txn, ns, coll, InternalPlanner::BACKWARD));
+ AutoGetCollectionForRead autoColl(txn, ns);
+ auto_ptr<PlanExecutor> exec(InternalPlanner::collectionScan(txn,
+ ns,
+ autoColl.getCollection(),
+ InternalPlanner::BACKWARD));
PlanExecutor::ExecState state = exec->getNext(&result, NULL);
return PlanExecutor::ADVANCED == state;
@@ -295,10 +295,11 @@ namespace mongo {
const BSONObj& shardKeyPattern,
BSONObj* indexPattern ) {
- Client::ReadContext context(txn, ns);
- Collection* collection = context.ctx().db()->getCollection( txn, ns );
- if ( !collection )
+ AutoGetCollectionForRead ctx(txn, ns);
+ Collection* collection = ctx.getCollection();
+ if (!collection) {
return false;
+ }
// Allow multiKey based on the invariant that shard keys must be single-valued.
// Therefore, any multi-key index prefixed by shard key cannot be multikey over
@@ -492,9 +493,12 @@ namespace mongo {
*estChunkSizeBytes = 0;
*numDocs = 0;
- Client::ReadContext ctx(txn, ns);
- Collection* collection = ctx.ctx().db()->getCollection( txn, ns );
- if ( !collection ) return Status( ErrorCodes::NamespaceNotFound, ns );
+ AutoGetCollectionForRead ctx(txn, ns);
+
+ Collection* collection = ctx.getCollection();
+ if (!collection) {
+ return Status(ErrorCodes::NamespaceNotFound, ns);
+ }
// Require single key
IndexDescriptor *idx =
diff --git a/src/mongo/db/dbhelpers.h b/src/mongo/db/dbhelpers.h
index 865543c74f9..d657a970331 100644
--- a/src/mongo/db/dbhelpers.h
+++ b/src/mongo/db/dbhelpers.h
@@ -115,9 +115,6 @@ namespace mongo {
static bool getSingleton(OperationContext* txn, const char *ns, BSONObj& result);
static void putSingleton(OperationContext* txn, const char *ns, BSONObj obj);
static void putSingletonGod(OperationContext* txn, const char *ns, BSONObj obj, bool logTheOp);
- static bool getFirst(OperationContext* txn, const char *ns, BSONObj& result) {
- return getSingleton(txn, ns, result);
- }
/**
* get last object int he collection; e.g. {$natural : -1}
diff --git a/src/mongo/db/fts/fts_command_mongod.cpp b/src/mongo/db/fts/fts_command_mongod.cpp
index 610f2438edb..08396d50d9c 100644
--- a/src/mongo/db/fts/fts_command_mongod.cpp
+++ b/src/mongo/db/fts/fts_command_mongod.cpp
@@ -94,7 +94,7 @@ namespace mongo {
projBob.appendElements(sortSpec);
BSONObj projObj = projBob.obj();
- Client::ReadContext ctx(txn, ns);
+ AutoGetCollectionForRead ctx(txn, ns);
CanonicalQuery* cq;
Status canonicalizeStatus =
@@ -106,15 +106,14 @@ namespace mongo {
limit,
BSONObj(),
&cq,
- WhereCallbackReal(txn, StringData(dbname)));
+ WhereCallbackReal(txn, dbname));
if (!canonicalizeStatus.isOK()) {
errmsg = canonicalizeStatus.reason();
return false;
}
PlanExecutor* rawExec;
- Status getExecStatus = getExecutor(
- txn, ctx.ctx().db()->getCollection(txn, ns), cq, &rawExec);
+ Status getExecStatus = getExecutor(txn, ctx.getCollection(), cq, &rawExec);
if (!getExecStatus.isOK()) {
errmsg = getExecStatus.reason();
return false;
diff --git a/src/mongo/db/geo/haystack.cpp b/src/mongo/db/geo/haystack.cpp
index 42cae74d1e6..aad8bca021f 100644
--- a/src/mongo/db/geo/haystack.cpp
+++ b/src/mongo/db/geo/haystack.cpp
@@ -70,15 +70,9 @@ namespace mongo {
bool run(OperationContext* txn, const string& dbname, BSONObj& cmdObj, int,
string& errmsg, BSONObjBuilder& result, bool fromRepl) {
const string ns = dbname + "." + cmdObj.firstElement().valuestr();
- Client::ReadContext ctx(txn, ns);
+ AutoGetCollectionForRead ctx(txn, ns);
- Database* db = ctx.ctx().db();
- if ( !db ) {
- errmsg = "can't find ns";
- return false;
- }
-
- Collection* collection = db->getCollection( txn, ns );
+ Collection* collection = ctx.getCollection();
if ( !collection ) {
errmsg = "can't find ns";
return false;
diff --git a/src/mongo/db/index_builder.cpp b/src/mongo/db/index_builder.cpp
index 6d477063ee5..9e7e7c09312 100644
--- a/src/mongo/db/index_builder.cpp
+++ b/src/mongo/db/index_builder.cpp
@@ -66,7 +66,7 @@ namespace mongo {
Client::initThread(name().c_str());
Lock::ParallelBatchWriterMode::iAmABatchParticipant(txn.lockState());
- cc().getAuthorizationSession()->grantInternalAuthorization();
+ txn.getClient()->getAuthorizationSession()->grantInternalAuthorization();
txn.getCurOp()->reset(HostAndPort(), dbInsert);
NamespaceString ns(_index["ns"].String());
diff --git a/src/mongo/db/index_builder.h b/src/mongo/db/index_builder.h
index 9c71ec7c85d..29364757526 100644
--- a/src/mongo/db/index_builder.h
+++ b/src/mongo/db/index_builder.h
@@ -30,20 +30,21 @@
#include <string>
+#include "mongo/base/status.h"
#include "mongo/db/catalog/index_catalog.h"
-#include "mongo/db/client.h"
#include "mongo/db/jsobj.h"
#include "mongo/platform/atomic_word.h"
#include "mongo/util/background.h"
-/**
- * Forks off a thread to build an index.
- */
namespace mongo {
class Collection;
+ class Database;
class OperationContext;
+ /**
+ * Forks off a thread to build an index.
+ */
class IndexBuilder : public BackgroundJob {
public:
IndexBuilder(const BSONObj& index);
diff --git a/src/mongo/db/index_rebuilder.cpp b/src/mongo/db/index_rebuilder.cpp
index cbde00dbf04..22d8e055382 100644
--- a/src/mongo/db/index_rebuilder.cpp
+++ b/src/mongo/db/index_rebuilder.cpp
@@ -134,10 +134,8 @@ namespace {
}
} // namespace
- void restartInProgressIndexesFromLastShutdown() {
- OperationContextImpl txn;
-
- txn.getClient()->getAuthorizationSession()->grantInternalAuthorization();
+ void restartInProgressIndexesFromLastShutdown(OperationContext* txn) {
+ txn->getClient()->getAuthorizationSession()->grantInternalAuthorization();
std::vector<std::string> dbNames;
@@ -149,12 +147,12 @@ namespace {
for (std::vector<std::string>::const_iterator dbName = dbNames.begin();
dbName < dbNames.end();
++dbName) {
- Client::ReadContext ctx(&txn, *dbName);
+ AutoGetDb autoDb(txn, *dbName, newlm::MODE_S);
- Database* db = ctx.ctx().db();
+ Database* db = autoDb.getDb();
db->getDatabaseCatalogEntry()->getCollectionNamespaces(&collNames);
}
- checkNS(&txn, collNames);
+ checkNS(txn, collNames);
}
catch (const DBException& e) {
error() << "Index rebuilding did not complete: " << e.toString();
diff --git a/src/mongo/db/index_rebuilder.h b/src/mongo/db/index_rebuilder.h
index 4fd59b14966..26ddea997fb 100644
--- a/src/mongo/db/index_rebuilder.h
+++ b/src/mongo/db/index_rebuilder.h
@@ -30,9 +30,11 @@
namespace mongo {
+ class OperationContext;
+
/**
* Restarts building indexes that were in progress during shutdown.
* Only call this at startup before taking requests.
*/
- void restartInProgressIndexesFromLastShutdown();
+ void restartInProgressIndexesFromLastShutdown(OperationContext* txn);
}
diff --git a/src/mongo/db/instance.h b/src/mongo/db/instance.h
index 8b14a0e37e3..9b42cfc32f0 100644
--- a/src/mongo/db/instance.h
+++ b/src/mongo/db/instance.h
@@ -32,7 +32,6 @@
#pragma once
#include "mongo/client/dbclientinterface.h"
-#include "mongo/db/client.h"
#include "mongo/db/curop.h"
#include "mongo/db/dbmessage.h"
#include "mongo/db/operation_context.h"
diff --git a/src/mongo/db/introspect.cpp b/src/mongo/db/introspect.cpp
index c2f11b34e6d..f3172fce6a4 100644
--- a/src/mongo/db/introspect.cpp
+++ b/src/mongo/db/introspect.cpp
@@ -195,8 +195,11 @@ namespace {
collectionOptions.capped = true;
collectionOptions.cappedSize = 1024 * 1024;
+ WriteUnitOfWork wunit(txn);
collection = db->createCollection( txn, profileName, collectionOptions );
invariant( collection );
+ wunit.commit();
+
return collection;
}
diff --git a/src/mongo/db/ops/update.h b/src/mongo/db/ops/update.h
index b28d722b28b..801c058a763 100644
--- a/src/mongo/db/ops/update.h
+++ b/src/mongo/db/ops/update.h
@@ -38,6 +38,7 @@
namespace mongo {
class CanonicalQuery;
+ class Database;
class OperationContext;
class UpdateDriver;
diff --git a/src/mongo/db/pipeline/document_source_cursor.cpp b/src/mongo/db/pipeline/document_source_cursor.cpp
index 79270e2297a..ab7d631ca1d 100644
--- a/src/mongo/db/pipeline/document_source_cursor.cpp
+++ b/src/mongo/db/pipeline/document_source_cursor.cpp
@@ -30,6 +30,7 @@
#include "mongo/db/pipeline/document_source.h"
+#include "mongo/db/catalog/database_holder.h"
#include "mongo/db/exec/working_set_common.h"
#include "mongo/db/instance.h"
#include "mongo/db/pipeline/document.h"
@@ -79,8 +80,8 @@ namespace mongo {
// We have already validated the sharding version when we constructed the PlanExecutor
// so we shouldn't check it again.
- Lock::DBRead lk(pExpCtx->opCtx->lockState(), _ns);
- Client::Context ctx(pExpCtx->opCtx, _ns, /*doVersion=*/false);
+ const NamespaceString nss(_ns);
+ AutoGetCollectionForRead autoColl(pExpCtx->opCtx, nss);
_exec->restoreState(pExpCtx->opCtx);
@@ -159,8 +160,8 @@ namespace mongo {
BSONObjBuilder explainBuilder;
Status explainStatus(ErrorCodes::InternalError, "");
{
- Lock::DBRead lk(pExpCtx->opCtx->lockState(), _ns);
- Client::Context ctx(pExpCtx->opCtx, _ns, /*doVersion=*/ false);
+ const NamespaceString nss(_ns);
+ AutoGetCollectionForRead autoColl(pExpCtx->opCtx, nss);
massert(17392, "No _exec. Were we disposed before explained?", _exec);
diff --git a/src/mongo/db/pipeline/pipeline_d.cpp b/src/mongo/db/pipeline/pipeline_d.cpp
index 1a923abc7b7..b9d4ca9795e 100644
--- a/src/mongo/db/pipeline/pipeline_d.cpp
+++ b/src/mongo/db/pipeline/pipeline_d.cpp
@@ -26,7 +26,7 @@
* it in the license file.
*/
-#include "mongo/pch.h"
+#include "mongo/platform/basic.h"
#include "mongo/db/pipeline/pipeline_d.h"
@@ -63,8 +63,8 @@ namespace {
}
bool isCapped(const NamespaceString& ns) {
- Client::ReadContext ctx(_ctx->opCtx, ns.ns());
- Collection* collection = ctx.ctx().db()->getCollection(_ctx->opCtx, ns);
+ AutoGetCollectionForRead ctx(_ctx->opCtx, ns.ns());
+ Collection* collection = ctx.getCollection();
return collection && collection->isCapped();
}
@@ -74,7 +74,7 @@ namespace {
};
}
- boost::shared_ptr<PlanExecutor> PipelineD::prepareCursorSource(
+ shared_ptr<PlanExecutor> PipelineD::prepareCursorSource(
OperationContext* txn,
Collection* collection,
const intrusive_ptr<Pipeline>& pPipeline,
diff --git a/src/mongo/db/pipeline/pipeline_d.h b/src/mongo/db/pipeline/pipeline_d.h
index 1147755f5b6..40c7668d75d 100644
--- a/src/mongo/db/pipeline/pipeline_d.h
+++ b/src/mongo/db/pipeline/pipeline_d.h
@@ -28,7 +28,8 @@
#pragma once
-#include <boost/smart_ptr.hpp>
+#include <boost/intrusive_ptr.hpp>
+#include <boost/shared_ptr.hpp>
namespace mongo {
class Collection;
@@ -63,7 +64,7 @@ namespace mongo {
*
* The cursor is added to the front of the pipeline's sources.
*
- * Must have a ReadContext before entering.
+ * Must have a AutoGetCollectionForRead before entering.
*
* If the returned PlanExecutor is non-null, you are responsible for ensuring
* it receives appropriate invalidate and kill messages.
@@ -74,8 +75,8 @@ namespace mongo {
static boost::shared_ptr<PlanExecutor> prepareCursorSource(
OperationContext* txn,
Collection* collection,
- const intrusive_ptr<Pipeline> &pPipeline,
- const intrusive_ptr<ExpressionContext> &pExpCtx);
+ const boost::intrusive_ptr<Pipeline> &pPipeline,
+ const boost::intrusive_ptr<ExpressionContext> &pExpCtx);
private:
PipelineD(); // does not exist: prevent instantiation
diff --git a/src/mongo/db/prefetch.cpp b/src/mongo/db/prefetch.cpp
index 0f4a6c76a77..2bda9526e42 100644
--- a/src/mongo/db/prefetch.cpp
+++ b/src/mongo/db/prefetch.cpp
@@ -117,7 +117,11 @@ namespace {
}
// page in the data pages for a record associated with an object
- void prefetchRecordPages(OperationContext* txn, const char* ns, const BSONObj& obj) {
+ void prefetchRecordPages(OperationContext* txn,
+ Database* db,
+ const char* ns,
+ const BSONObj& obj) {
+
BSONElement _id;
if( obj.getObjectID(_id) ) {
TimerHolder timer(&prefetchDocStats);
@@ -125,12 +129,10 @@ namespace {
builder.append(_id);
BSONObj result;
try {
- // we can probably use Client::Context here instead of ReadContext as we
- // have locked higher up the call stack already
- Client::ReadContext ctx(txn, ns);
- if( Helpers::findById(txn, ctx.ctx().db(), ns, builder.done(), result) ) {
+ if (Helpers::findById(txn, db, ns, builder.done(), result)) {
// do we want to use Record::touch() here? it's pretty similar.
volatile char _dummy_char = '\0';
+
// Touch the first word on every page in order to fault it into memory
for (int i = 0; i < result.objsize(); i += g_minOSPageSizeBytes) {
_dummy_char += *(result.objdata() + i);
@@ -170,14 +172,15 @@ namespace {
BSONObj obj = op.getObjectField(opField);
const char *ns = op.getStringField("ns");
+ txn->lockState()->assertAtLeastReadLocked(ns);
+
Collection* collection = db->getCollection( txn, ns );
- if ( !collection )
+ if (!collection) {
return;
+ }
LOG(4) << "index prefetch for op " << *opType << endl;
- DEV txn->lockState()->assertAtLeastReadLocked(ns);
-
// should we prefetch index pages on updates? if the update is in-place and doesn't change
// indexed values, it is actually slower - a lot slower if there are a dozen indexes or
// lots of multikeys. possible variations (not all mutually exclusive):
@@ -205,7 +208,7 @@ namespace {
// do not prefetch the data for capped collections because
// they typically do not have an _id index for findById() to use.
!collection->isCapped()) {
- prefetchRecordPages(txn, ns, obj);
+ prefetchRecordPages(txn, db, ns, obj);
}
}
diff --git a/src/mongo/db/query/new_find.cpp b/src/mongo/db/query/new_find.cpp
index 4db350e0eab..bd456ebf9ae 100644
--- a/src/mongo/db/query/new_find.cpp
+++ b/src/mongo/db/query/new_find.cpp
@@ -35,6 +35,7 @@
#include "mongo/client/dbclientinterface.h"
#include "mongo/db/clientcursor.h"
#include "mongo/db/commands.h"
+#include "mongo/db/catalog/database_holder.h"
#include "mongo/db/exec/filter.h"
#include "mongo/db/exec/oplogstart.h"
#include "mongo/db/exec/working_set_common.h"
@@ -184,8 +185,9 @@ namespace mongo {
exhaust = false;
// This is a read lock.
- scoped_ptr<Client::ReadContext> ctx(new Client::ReadContext(txn, ns));
- Collection* collection = ctx->ctx().db()->getCollection(txn, ns);
+ const NamespaceString nss(ns);
+ scoped_ptr<AutoGetCollectionForRead> ctx(new AutoGetCollectionForRead(txn, nss));
+ Collection* collection = ctx->getCollection();
uassert( 17356, "collection dropped between getMore calls", collection );
QLOG() << "Running getMore, cursorid: " << cursorid << endl;
@@ -196,7 +198,7 @@ namespace mongo {
// reads are not okay.
Status status = repl::getGlobalReplicationCoordinator()->checkCanServeReadsFor(
txn,
- NamespaceString(ns),
+ nss,
true);
uassertStatusOK(status);
@@ -527,20 +529,15 @@ namespace mongo {
return "";
}
- // This is a read lock. We require this because if we're parsing a $where, the
- // where-specific parsing code assumes we have a lock and creates execution machinery that
- // requires it.
- Client::ReadContext ctx(txn, q.ns);
- Collection* collection = ctx.ctx().db()->getCollection( txn, ns );
+ const NamespaceString nss(q.ns);
// Parse the qm into a CanonicalQuery.
CanonicalQuery* cq;
Status canonStatus = CanonicalQuery::canonicalize(
- q, &cq, WhereCallbackReal(txn, StringData(ctx.ctx().db()->name())));
+ q, &cq, WhereCallbackReal(txn, StringData(nss.db())));
if (!canonStatus.isOK()) {
uasserted(17287, str::stream() << "Can't canonicalize query: " << canonStatus.toString());
}
- verify(cq);
QLOG() << "Running query:\n" << cq->toString();
LOG(2) << "Running query: " << cq->toStringShort();
@@ -551,6 +548,13 @@ namespace mongo {
// We use this a lot below.
const LiteParsedQuery& pq = cq->getParsed();
+ AutoGetCollectionForRead ctx(txn, nss);
+
+ const int dbProfilingLevel = (ctx.getDb() != NULL) ? ctx.getDb()->getProfilingLevel() :
+ serverGlobalParams.defaultProfile;
+
+ Collection* collection = ctx.getCollection();
+
// We'll now try to get the query executor that will execute this query for us. There
// are a few cases in which we know upfront which executor we should get and, therefore,
// we shortcut the selection process here.
@@ -744,7 +748,6 @@ namespace mongo {
// If we're tailing a capped collection, we don't bother saving the cursor if the
// collection is empty. Otherwise, the semantics of the tailable cursor is that the
// client will keep trying to read from it. So we'll keep it around.
- Collection* collection = ctx.ctx().db()->getCollection(txn, cq->ns());
if (collection && collection->numRecords(txn) != 0 && pq.getNumToReturn() != 1) {
saveClientCursor = true;
}
@@ -763,7 +766,7 @@ namespace mongo {
const logger::LogSeverity logLevelOne = logger::LogSeverity::Debug(1);
// Set debug information for consumption by the profiler.
- if (ctx.ctx().db()->getProfilingLevel() > 0 ||
+ if (dbProfilingLevel > 0 ||
curop.elapsedMillis() > serverGlobalParams.slowMS ||
logger::globalLogDomain()->shouldLog(queryLogComponent, logLevelOne)) {
PlanSummaryStats newStats;
diff --git a/src/mongo/db/range_deleter_db_env.cpp b/src/mongo/db/range_deleter_db_env.cpp
index 0c7f5676c36..caa95550a06 100644
--- a/src/mongo/db/range_deleter_db_env.cpp
+++ b/src/mongo/db/range_deleter_db_env.cpp
@@ -153,10 +153,11 @@ namespace mongo {
void RangeDeleterDBEnv::getCursorIds(OperationContext* txn,
const StringData& ns,
std::set<CursorId>* openCursors) {
- Client::ReadContext ctx(txn, ns.toString());
- Collection* collection = ctx.ctx().db()->getCollection( txn, ns );
- if ( !collection )
+ AutoGetCollectionForRead ctx(txn, ns.toString());
+ Collection* collection = ctx.getCollection();
+ if (!collection) {
return;
+ }
collection->cursorCache()->getCursorIds( openCursors );
}
diff --git a/src/mongo/db/repl/bgsync.cpp b/src/mongo/db/repl/bgsync.cpp
index 743a7d6300e..dfc1b4717dc 100644
--- a/src/mongo/db/repl/bgsync.cpp
+++ b/src/mongo/db/repl/bgsync.cpp
@@ -495,7 +495,6 @@ namespace {
}
void BackgroundSync::loadLastAppliedHash(OperationContext* txn) {
- Lock::DBRead lk(txn->lockState(), rsoplog);
BSONObj oplogEntry;
try {
if (!Helpers::getLast(txn, rsoplog, oplogEntry)) {
diff --git a/src/mongo/db/repl/heartbeat.cpp b/src/mongo/db/repl/heartbeat.cpp
index d37358a5dda..3bf3e328a24 100644
--- a/src/mongo/db/repl/heartbeat.cpp
+++ b/src/mongo/db/repl/heartbeat.cpp
@@ -74,12 +74,11 @@ namespace {
if( names.size() == 1 ) {
if( names[0] != "local" )
return true;
+
// we have a local database. return true if oplog isn't empty
- {
- Lock::DBRead lk(txn->lockState(), repl::rsoplog);
- BSONObj o;
- if( Helpers::getFirst(txn, repl::rsoplog, o) )
- return true;
+ BSONObj o;
+ if (Helpers::getSingleton(txn, repl::rsoplog, o)) {
+ return true;
}
}
return false;
diff --git a/src/mongo/db/repl/master_slave.cpp b/src/mongo/db/repl/master_slave.cpp
index ed212ec276f..77a4b09e9d2 100644
--- a/src/mongo/db/repl/master_slave.cpp
+++ b/src/mongo/db/repl/master_slave.cpp
@@ -163,12 +163,10 @@ namespace repl {
void ReplSource::ensureMe(OperationContext* txn) {
string myname = getHostName();
- bool exists = false;
- {
- Client::ReadContext ctx(txn, "local");
- // local.me is an identifier for a server for getLastError w:2+
- exists = Helpers::getSingleton(txn, "local.me", _me);
- }
+
+ // local.me is an identifier for a server for getLastError w:2+
+ bool exists = Helpers::getSingleton(txn, "local.me", _me);
+
if (!exists || !_me.hasField("host") || _me["host"].String() != myname) {
Client::WriteContext ctx(txn, "local");
// clean out local.me
@@ -1375,9 +1373,10 @@ namespace repl {
BSONObjBuilder b;
b.append(_id);
BSONObj result;
- Client::ReadContext ctx(txn, ns );
- if( Helpers::findById(txn, ctx.ctx().db(), ns, b.done(), result) )
+ AutoGetCollectionForRead ctx(txn, ns );
+ if (Helpers::findById(txn, ctx.getDb(), ns, b.done(), result)) {
_dummy_z += result.objsize(); // touch
+ }
}
}
catch( DBException& ) {
diff --git a/src/mongo/db/repl/minvalid.cpp b/src/mongo/db/repl/minvalid.cpp
index f2b0f4c189e..c1d4db3f221 100644
--- a/src/mongo/db/repl/minvalid.cpp
+++ b/src/mongo/db/repl/minvalid.cpp
@@ -64,7 +64,6 @@ namespace {
bool getInitialSyncFlag() {
OperationContextImpl txn; // XXX?
- Lock::DBRead lk (txn.lockState(), "local");
BSONObj mv;
if (Helpers::getSingleton(&txn, minvalidNS, mv)) {
return mv[initialSyncFlagString].trueValue();
@@ -80,7 +79,6 @@ namespace {
}
OpTime getMinValid(OperationContext* txn) {
- Lock::DBRead lk(txn->lockState(), "local.replset.minvalid");
BSONObj mv;
if (Helpers::getSingleton(txn, minvalidNS, mv)) {
return mv["ts"]._opTime();
diff --git a/src/mongo/db/repl/repl_coordinator_external_state_impl.cpp b/src/mongo/db/repl/repl_coordinator_external_state_impl.cpp
index e1d1b52be7b..8f1ea7638cf 100644
--- a/src/mongo/db/repl/repl_coordinator_external_state_impl.cpp
+++ b/src/mongo/db/repl/repl_coordinator_external_state_impl.cpp
@@ -123,7 +123,6 @@ namespace {
OperationContext* txn) {
try {
BSONObj config;
- Lock::DBRead dbReadLock(txn->lockState(), configCollectionName);
if (!Helpers::getSingleton(txn, configCollectionName, config)) {
return StatusWith<BSONObj>(
ErrorCodes::NoMatchingDocument,
@@ -154,7 +153,6 @@ namespace {
OperationContext* txn) {
try {
- Lock::DBRead lk(txn->lockState(), rsoplog);
BSONObj oplogEntry;
if (!Helpers::getLast(txn, rsoplog, oplogEntry)) {
return StatusWith<OpTime>(
diff --git a/src/mongo/db/repl/repl_coordinator_legacy.cpp b/src/mongo/db/repl/repl_coordinator_legacy.cpp
index ac5d8d85644..27056341b82 100644
--- a/src/mongo/db/repl/repl_coordinator_legacy.cpp
+++ b/src/mongo/db/repl/repl_coordinator_legacy.cpp
@@ -814,7 +814,7 @@ namespace {
it is ok if the initiating member has *other* data than that.
*/
BSONObj o;
- if( Helpers::getFirst(txn, rsoplog, o) ) {
+ if (Helpers::getSingleton(txn, rsoplog, o)) {
return Status(ErrorCodes::AlreadyInitialized,
rsoplog + string(" is not empty on the initiating member. "
"cannot initiate."));
diff --git a/src/mongo/db/repl/repl_info.cpp b/src/mongo/db/repl/repl_info.cpp
index 49eabbedd05..6859a2b6260 100644
--- a/src/mongo/db/repl/repl_info.cpp
+++ b/src/mongo/db/repl/repl_info.cpp
@@ -26,6 +26,8 @@
* it in the license file.
*/
+#include "mongo/platform/basic.h"
+
#include <list>
#include <vector>
#include <boost/scoped_ptr.hpp>
@@ -74,12 +76,9 @@ namespace repl {
list<BSONObj> src;
{
const char* localSources = "local.sources";
- Client::ReadContext ctx(txn, localSources);
+ AutoGetCollectionForRead ctx(txn, localSources);
auto_ptr<PlanExecutor> exec(
- InternalPlanner::collectionScan(txn,
- localSources,
- ctx.ctx().db()->getCollection(txn,
- localSources)));
+ InternalPlanner::collectionScan(txn, localSources, ctx.getCollection()));
BSONObj obj;
PlanExecutor::ExecState state;
while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
@@ -156,10 +155,12 @@ namespace repl {
if (!theReplSet)
return BSONObj();
+ OperationContextImpl txn;
+
BSONObjBuilder result;
result.appendTimestamp("latestOptime", theReplSet->lastOpTimeWritten.asDate());
result.appendTimestamp("earliestOptime",
- theReplSet->getEarliestOpTimeWritten().asDate());
+ theReplSet->getEarliestOpTimeWritten(&txn).asDate());
return result.obj();
}
diff --git a/src/mongo/db/repl/repl_set_impl.cpp b/src/mongo/db/repl/repl_set_impl.cpp
index 86e880151ef..e1be2899866 100644
--- a/src/mongo/db/repl/repl_set_impl.cpp
+++ b/src/mongo/db/repl/repl_set_impl.cpp
@@ -403,12 +403,10 @@ namespace {
}
}
- OpTime ReplSetImpl::getEarliestOpTimeWritten() const {
- OperationContextImpl txn; // XXX?
- Lock::DBRead lk(txn.lockState(), rsoplog);
+ OpTime ReplSetImpl::getEarliestOpTimeWritten(OperationContext* txn) const {
BSONObj o;
uassert(17347, "Problem reading earliest entry from oplog",
- Helpers::getFirst(&txn, rsoplog, o));
+ Helpers::getSingleton(txn, rsoplog, o));
return o["ts"]._opTime();
}
diff --git a/src/mongo/db/repl/repl_set_impl.h b/src/mongo/db/repl/repl_set_impl.h
index 79c909c194e..e7b58c66805 100644
--- a/src/mongo/db/repl/repl_set_impl.h
+++ b/src/mongo/db/repl/repl_set_impl.h
@@ -88,7 +88,7 @@ namespace repl {
SyncSourceFeedback syncSourceFeedback;
OpTime lastOpTimeWritten;
- OpTime getEarliestOpTimeWritten() const;
+ OpTime getEarliestOpTimeWritten(OperationContext* txn) const;
Status forceSyncFrom(const string& host, BSONObjBuilder* result);
// Check if the current sync target is suboptimal. This must be called while holding a mutex
@@ -275,7 +275,6 @@ namespace repl {
const Member* source);
void _initialSync();
void syncTail();
- void syncFixUp(OperationContext* txn, FixUpInfo& h, OplogReader& r);
public:
// keep a list of hosts that we've tried recently that didn't work
diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp
index 8bdf9ceba2d..20212711222 100644
--- a/src/mongo/db/repl/rs_rollback.cpp
+++ b/src/mongo/db/repl/rs_rollback.cpp
@@ -455,8 +455,7 @@ namespace {
const NamespaceString nss(ns);
- bool unused;
- Database* db = dbHolder().getOrCreate(txn, nss.db().toString(), unused);
+ Database* db = dbHolder().openDb(txn, nss.db().toString());
invariant(db);
{
diff --git a/src/mongo/db/repl/sync_tail.cpp b/src/mongo/db/repl/sync_tail.cpp
index cb8dd5552c9..53a934ed504 100644
--- a/src/mongo/db/repl/sync_tail.cpp
+++ b/src/mongo/db/repl/sync_tail.cpp
@@ -147,10 +147,8 @@ namespace repl {
// one possible tweak here would be to stay in the read lock for this database
// for multiple prefetches if they are for the same database.
OperationContextImpl txn;
- Client::ReadContext ctx(&txn, ns);
- prefetchPagesForReplicatedOp(&txn,
- ctx.ctx().db(),
- op);
+ AutoGetCollectionForRead ctx(&txn, ns);
+ prefetchPagesForReplicatedOp(&txn, ctx.getDb(), op);
}
catch (const DBException& e) {
LOG(2) << "ignoring exception in prefetchOp(): " << e.what() << endl;
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.cpp b/src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.cpp
index 464c874e8b7..54d74ae1d2e 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.cpp
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.cpp
@@ -28,9 +28,11 @@
* it in the license file.
*/
-#include "mongo/db/ops/update.h"
#include "mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.h"
+#include "mongo/db/operation_context.h"
+#include "mongo/db/ops/update.h"
+
namespace mongo {
NamespaceDetailsRSV1MetaData::NamespaceDetailsRSV1MetaData( const StringData& ns,
NamespaceDetails* details,
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_index.cpp b/src/mongo/db/storage/mmap_v1/catalog/namespace_index.cpp
index ac091b167f7..f4f0b5fd9fb 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace_index.cpp
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_index.cpp
@@ -134,20 +134,7 @@ namespace mongo {
}
NOINLINE_DECL void NamespaceIndex::_init( OperationContext* txn ) {
- verify( !_ht.get() );
-
- txn->lockState()->assertWriteLocked(_database);
-
- /* if someone manually deleted the datafiles for a database,
- we need to be sure to clear any cached info for the database in
- local.*.
- */
- /*
- if ( "local" != _database ) {
- DBInfo i(_database.c_str());
- i.dbDropped();
- }
- */
+ invariant(!_ht.get());
unsigned long long len = 0;
boost::filesystem::path nsPath = path();
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp
index 671b353847b..6c3a9ee679b 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp
@@ -1,32 +1,30 @@
-// mmap_v1_database_catalog_entry.cpp
-
/**
-* Copyright (C) 2014 MongoDB Inc.
-*
-* This program is free software: you can redistribute it and/or modify
-* it under the terms of the GNU Affero General Public License, version 3,
-* as published by the Free Software Foundation.
-*
-* This program is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-* GNU Affero General Public License for more details.
-*
-* You should have received a copy of the GNU Affero General Public License
-* along with this program. If not, see <http://www.gnu.org/licenses/>.
-*
-* As a special exception, the copyright holders give permission to link the
-* code of portions of this program with the OpenSSL library under certain
-* conditions as described in each individual source file and distribute
-* linked combinations including the program with the OpenSSL library. You
-* must comply with the GNU Affero General Public License in all respects for
-* all of the code used other than as permitted herein. If you modify file(s)
-* with this exception, you may extend this exception to your version of the
-* file(s), but you are not obligated to do so. If you do not wish to do so,
-* delete this exception statement from your version. If you delete this
-* exception statement from all source files in the program, then also delete
-* it in the license file.
-*/
+ * Copyright (C) 2014 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
#include "mongo/platform/basic.h"
@@ -118,15 +116,12 @@ namespace mongo {
_extentManager( name, path, directoryPerDB ),
_namespaceIndex( _path, name.toString() ) {
- uassert(17507,
- "Cannot open or create database while out of disk space",
- !FileAllocator::get()->hasFailed());
+ invariant(txn->lockState()->isWriteLocked(name));
try {
- // we mark our thread as having done writes now as we do not want any exceptions
- // once we start creating a new database
- // TODO(Mathias): Remove this when rollback is enabled.
- txn->getClient()->writeHappened();
+ uassert(17507,
+ "Cannot open or create database while out of disk space",
+ !FileAllocator::get()->hasFailed());
WriteUnitOfWork wunit(txn);
diff --git a/src/mongo/db/storage/mmap_v1/repair_database.cpp b/src/mongo/db/storage/mmap_v1/repair_database.cpp
index 7394f718296..a34fd1bd29d 100644
--- a/src/mongo/db/storage/mmap_v1/repair_database.cpp
+++ b/src/mongo/db/storage/mmap_v1/repair_database.cpp
@@ -306,8 +306,7 @@ namespace mongo {
reservedPath ) );
{
- bool unusedJustCreated;
- Database* originalDatabase = dbHolder().getOrCreate(txn, dbName, unusedJustCreated);
+ Database* originalDatabase = dbHolder().openDb(txn, dbName);
if (originalDatabase == NULL) {
return Status(ErrorCodes::NamespaceNotFound, "database does not exist to repair");
}
@@ -448,7 +447,8 @@ namespace mongo {
if ( repairFileDeleter.get() )
repairFileDeleter->success();
- dbHolder().close( txn, dbName );
+ // Close the database so we can rename/delete the original data files
+ dbHolder().close(txn, dbName);
if ( backupOriginalFiles ) {
_renameForBackup( dbName, reservedPath );
@@ -470,8 +470,12 @@ namespace mongo {
_replaceWithRecovered( dbName, reservedPathString.c_str() );
- if ( !backupOriginalFiles )
- MONGO_ASSERT_ON_EXCEPTION( boost::filesystem::remove_all( reservedPath ) );
+ if (!backupOriginalFiles) {
+ MONGO_ASSERT_ON_EXCEPTION(boost::filesystem::remove_all(reservedPath));
+ }
+
+ // Reopen the database so it's discoverable
+ dbHolder().openDb(txn, dbName);
return Status::OK();
}
diff --git a/src/mongo/db/ttl.cpp b/src/mongo/db/ttl.cpp
index 1d9989c54dc..7ea062c0784 100644
--- a/src/mongo/db/ttl.cpp
+++ b/src/mongo/db/ttl.cpp
@@ -128,7 +128,7 @@ namespace mongo {
}
if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(
- collection->ns().db())) {
+ dbName)) {
// we've stepped down since we started this function,
// so we should stop working as we only do deletes on the primary
break;
@@ -181,11 +181,8 @@ namespace mongo {
continue;
set<string> dbs;
- {
- Lock::DBRead lk(txn.lockState(), "local");
- dbHolder().getAllShortNames( dbs );
- }
-
+ dbHolder().getAllShortNames( dbs );
+
ttlPasses.increment();
for ( set<string>::const_iterator i=dbs.begin(); i!=dbs.end(); ++i ) {
diff --git a/src/mongo/dbtests/dbhelper_tests.cpp b/src/mongo/dbtests/dbhelper_tests.cpp
index 4801b711a1c..3706b5dac48 100644
--- a/src/mongo/dbtests/dbhelper_tests.cpp
+++ b/src/mongo/dbtests/dbhelper_tests.cpp
@@ -187,7 +187,6 @@ namespace mongo {
long long estSizeBytes;
{
Lock::DBRead lk(txn.lockState(), ns);
- Client::Context ctx(&txn, ns );
// search invalid index range
KeyRange range( ns,
@@ -233,7 +232,7 @@ namespace mongo {
long long estSizeBytes;
{
Lock::DBRead lk(txn.lockState(), ns);
- Client::Context ctx(&txn, ns );
+
KeyRange range( ns,
BSON( "_id" << 0 ),
BSON( "_id" << numDocsInserted ),
diff --git a/src/mongo/dbtests/matchertests.cpp b/src/mongo/dbtests/matchertests.cpp
index e4e1bc91316..591afcfd383 100644
--- a/src/mongo/dbtests/matchertests.cpp
+++ b/src/mongo/dbtests/matchertests.cpp
@@ -207,7 +207,7 @@ namespace MatcherTests {
public:
void run() {
OperationContextImpl txn;
- Client::ReadContext ctx(&txn, "unittests.matchertests");
+ AutoGetCollectionForRead ctx(&txn, "unittests.matchertests");
M m(BSON("$where" << "function(){ return this.a == 1; }"),
WhereCallbackReal(&txn, StringData("unittests")));
diff --git a/src/mongo/dbtests/namespacetests.cpp b/src/mongo/dbtests/namespacetests.cpp
index 72a27859efa..a33f33ef06c 100644
--- a/src/mongo/dbtests/namespacetests.cpp
+++ b/src/mongo/dbtests/namespacetests.cpp
@@ -527,7 +527,7 @@ namespace NamespaceTests {
Lock::DBWrite lk(txn.lockState(), dbName);
bool justCreated;
- Database* db = dbHolder().getOrCreate(&txn, dbName, justCreated);
+ Database* db = dbHolder().openDb(&txn, dbName, &justCreated);
ASSERT(justCreated);
Collection* committedColl;
@@ -570,7 +570,7 @@ namespace NamespaceTests {
Lock::DBWrite lk(txn.lockState(), dbName);
bool justCreated;
- Database* db = dbHolder().getOrCreate(&txn, dbName, justCreated);
+ Database* db = dbHolder().openDb(&txn, dbName, &justCreated);
ASSERT(justCreated);
{
diff --git a/src/mongo/dbtests/plan_ranking.cpp b/src/mongo/dbtests/plan_ranking.cpp
index cbd44009ee9..d570d5dd645 100644
--- a/src/mongo/dbtests/plan_ranking.cpp
+++ b/src/mongo/dbtests/plan_ranking.cpp
@@ -100,8 +100,8 @@ namespace PlanRankingTests {
* Takes ownership of 'cq'. Caller DOES NOT own the returned QuerySolution*.
*/
QuerySolution* pickBestPlan(CanonicalQuery* cq) {
- Client::ReadContext ctx(&_txn, ns);
- Collection* collection = ctx.ctx().db()->getCollection(&_txn, ns);
+ AutoGetCollectionForRead ctx(&_txn, ns);
+ Collection* collection = ctx.getCollection();
QueryPlannerParams plannerParams;
fillOutPlannerParams(&_txn, collection, cq, &plannerParams);
diff --git a/src/mongo/dbtests/query_multi_plan_runner.cpp b/src/mongo/dbtests/query_multi_plan_runner.cpp
index 99eafa85908..a4b0a96ce51 100644
--- a/src/mongo/dbtests/query_multi_plan_runner.cpp
+++ b/src/mongo/dbtests/query_multi_plan_runner.cpp
@@ -87,11 +87,6 @@ namespace QueryMultiPlanRunner {
ctx.commit();
}
- IndexDescriptor* getIndex(OperationContext* txn, Database* db, const BSONObj& obj) {
- const Collection* collection = db->getCollection( txn, ns() );
- return collection->getIndexCatalog()->findIndexByKeyPattern(txn, obj);
- }
-
void insert(const BSONObj& obj) {
Client::WriteContext ctx(&_txn, ns());
_client.insert(ns(), obj);
@@ -124,14 +119,14 @@ namespace QueryMultiPlanRunner {
addIndex(BSON("foo" << 1));
- Client::ReadContext ctx(&_txn, ns());
- const Collection* coll = ctx.ctx().db()->getCollection(&_txn, ns());
+ AutoGetCollectionForRead ctx(&_txn, ns());
+ const Collection* coll = ctx.getCollection();
// Plan 0: IXScan over foo == 7
// Every call to work() returns something so this should clearly win (by current scoring
// at least).
IndexScanParams ixparams;
- ixparams.descriptor = getIndex(&_txn, ctx.ctx().db(), BSON("foo" << 1));
+ ixparams.descriptor = coll->getIndexCatalog()->findIndexByKeyPattern(&_txn, BSON("foo" << 1));
ixparams.bounds.isSimpleRange = true;
ixparams.bounds.startKey = BSON("" << 7);
ixparams.bounds.endKey = BSON("" << 7);
@@ -144,7 +139,7 @@ namespace QueryMultiPlanRunner {
// Plan 1: CollScan with matcher.
CollectionScanParams csparams;
- csparams.collection = ctx.ctx().db()->getCollection( &_txn, ns() );
+ csparams.collection = coll;
csparams.direction = CollectionScanParams::FORWARD;
// Make the filter.
@@ -161,9 +156,7 @@ namespace QueryMultiPlanRunner {
verify(CanonicalQuery::canonicalize(ns(), BSON("foo" << 7), &cq).isOK());
verify(NULL != cq);
- MultiPlanStage* mps = new MultiPlanStage(&_txn,
- ctx.ctx().db()->getCollection(&_txn, ns()),
- cq);
+ MultiPlanStage* mps = new MultiPlanStage(&_txn, ctx.getCollection(), cq);
mps->addPlan(createQuerySolution(), firstRoot.release(), sharedWs.get());
mps->addPlan(createQuerySolution(), secondRoot.release(), sharedWs.get());
@@ -172,9 +165,8 @@ namespace QueryMultiPlanRunner {
ASSERT(mps->bestPlanChosen());
ASSERT_EQUALS(0, mps->bestPlanIdx());
- Collection* collection = ctx.ctx().db()->getCollection(&_txn, ns());
// Takes ownership of arguments other than 'collection'.
- PlanExecutor exec(&_txn, sharedWs.release(), mps, cq, collection);
+ PlanExecutor exec(&_txn, sharedWs.release(), mps, cq, coll);
// Get all our results out.
int results = 0;
@@ -200,8 +192,8 @@ namespace QueryMultiPlanRunner {
addIndex(BSON("a" << 1));
addIndex(BSON("b" << 1));
- Client::ReadContext ctx(&_txn, ns());
- Collection* collection = ctx.ctx().db()->getCollection(&_txn, ns());
+ AutoGetCollectionForRead ctx(&_txn, ns());
+ Collection* collection = ctx.getCollection();
// Query for both 'a' and 'b' and sort on 'b'.
CanonicalQuery* cq;
diff --git a/src/mongo/dbtests/query_plan_executor.cpp b/src/mongo/dbtests/query_plan_executor.cpp
index 0168af6f9d1..8b77c8f4eb4 100644
--- a/src/mongo/dbtests/query_plan_executor.cpp
+++ b/src/mongo/dbtests/query_plan_executor.cpp
@@ -83,9 +83,9 @@ namespace QueryPlanExecutor {
*
* The caller takes ownership of the returned PlanExecutor*.
*/
- PlanExecutor* makeCollScanExec(Client::Context& ctx, BSONObj& filterObj) {
+ PlanExecutor* makeCollScanExec(Collection* coll, BSONObj& filterObj) {
CollectionScanParams csparams;
- csparams.collection = ctx.db()->getCollection( &_txn, ns() );
+ csparams.collection = coll;
csparams.direction = CollectionScanParams::FORWARD;
auto_ptr<WorkingSet> ws(new WorkingSet());
// Parse the filter.
@@ -100,8 +100,7 @@ namespace QueryPlanExecutor {
verify(NULL != cq);
// Hand the plan off to the executor.
- PlanExecutor* exec = new PlanExecutor(&_txn, ws.release(), root.release(), cq,
- ctx.db()->getCollection(&_txn, ns()));
+ PlanExecutor* exec = new PlanExecutor(&_txn, ws.release(), root.release(), cq, coll);
return exec;
}
@@ -146,22 +145,24 @@ namespace QueryPlanExecutor {
static const char* ns() { return "unittests.QueryPlanExecutor"; }
size_t numCursors() {
- Client::ReadContext ctx(&_txn, ns() );
- Collection* collection = ctx.ctx().db()->getCollection( &_txn, ns() );
+ AutoGetCollectionForRead ctx(&_txn, ns() );
+ Collection* collection = ctx.getCollection();
if ( !collection )
return 0;
return collection->cursorCache()->numCursors();
}
void registerExec( PlanExecutor* exec ) {
- Client::ReadContext ctx(&_txn, ns());
- Collection* collection = ctx.ctx().db()->getOrCreateCollection( &_txn, ns() );
+ // TODO: This is not correct (create collection under S-lock)
+ AutoGetCollectionForRead ctx(&_txn, ns());
+ Collection* collection = ctx.getDb()->getOrCreateCollection(&_txn, ns());
collection->cursorCache()->registerExecutor( exec );
}
void deregisterExec( PlanExecutor* exec ) {
- Client::ReadContext ctx(&_txn, ns());
- Collection* collection = ctx.ctx().db()->getOrCreateCollection( &_txn, ns() );
+ // TODO: This is not correct (create collection under S-lock)
+ AutoGetCollectionForRead ctx(&_txn, ns());
+ Collection* collection = ctx.getDb()->getOrCreateCollection(&_txn, ns());
collection->cursorCache()->deregisterExecutor( exec );
}
@@ -189,7 +190,9 @@ namespace QueryPlanExecutor {
insert(BSON("_id" << 2));
BSONObj filterObj = fromjson("{_id: {$gt: 0}}");
- scoped_ptr<PlanExecutor> exec(makeCollScanExec(ctx.ctx(),filterObj));
+
+ Collection* coll = ctx.ctx().db()->getCollection(&_txn, ns());
+ scoped_ptr<PlanExecutor> exec(makeCollScanExec(coll, filterObj));
registerExec(exec.get());
BSONObj objOut;
@@ -339,7 +342,9 @@ namespace QueryPlanExecutor {
setupCollection();
BSONObj filterObj = fromjson("{a: {$gte: 2}}");
- scoped_ptr<PlanExecutor> exec(makeCollScanExec(ctx.ctx(),filterObj));
+
+ Collection* coll = ctx.ctx().db()->getCollection(&_txn, ns());
+ scoped_ptr<PlanExecutor> exec(makeCollScanExec(coll, filterObj));
BSONObj objOut;
ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&objOut, NULL));
@@ -397,11 +402,12 @@ namespace QueryPlanExecutor {
insert(BSON("a" << 1 << "b" << 1));
BSONObj filterObj = fromjson("{_id: {$gt: 0}, b: {$gt: 0}}");
- PlanExecutor* exec = makeCollScanExec(ctx.ctx(),filterObj);
+
+ Collection* coll = ctx.ctx().db()->getCollection(&_txn, ns());
+ PlanExecutor* exec = makeCollScanExec(coll,filterObj);
// Make a client cursor from the runner.
- new ClientCursor(ctx.ctx().db()->getCollection(&_txn, ns()),
- exec, 0, BSONObj());
+ new ClientCursor(coll, exec, 0, BSONObj());
// There should be one cursor before invalidation,
// and zero cursors after invalidation.
@@ -425,12 +431,11 @@ namespace QueryPlanExecutor {
Collection* collection = ctx.ctx().db()->getCollection(&_txn, ns());
BSONObj filterObj = fromjson("{_id: {$gt: 0}, b: {$gt: 0}}");
- PlanExecutor* exec = makeCollScanExec(ctx.ctx(),filterObj);
+ PlanExecutor* exec = makeCollScanExec(collection, filterObj);
// Make a client cursor from the runner.
- ClientCursor* cc = new ClientCursor(collection,
- exec, 0, BSONObj());
- ClientCursorPin ccPin(collection,cc->cursorid());
+ ClientCursor* cc = new ClientCursor(collection, exec, 0, BSONObj());
+ ClientCursorPin ccPin(collection, cc->cursorid());
// If the cursor is pinned, it sticks around,
// even after invalidation.
@@ -464,11 +469,11 @@ namespace QueryPlanExecutor {
}
{
- Client::ReadContext ctx(&_txn, ns());
- Collection* collection = ctx.ctx().db()->getCollection(&_txn, ns());
+ AutoGetCollectionForRead ctx(&_txn, ns());
+ Collection* collection = ctx.getCollection();
BSONObj filterObj = fromjson("{_id: {$gt: 0}, b: {$gt: 0}}");
- PlanExecutor* exec = makeCollScanExec(ctx.ctx(),filterObj);
+ PlanExecutor* exec = makeCollScanExec(collection, filterObj);
// Make a client cursor from the runner.
new ClientCursor(collection, exec, 0, BSONObj());
diff --git a/src/mongo/dbtests/query_stage_collscan.cpp b/src/mongo/dbtests/query_stage_collscan.cpp
index 536a9446cff..689ce3677e9 100644
--- a/src/mongo/dbtests/query_stage_collscan.cpp
+++ b/src/mongo/dbtests/query_stage_collscan.cpp
@@ -74,11 +74,11 @@ namespace QueryStageCollectionScan {
}
int countResults(CollectionScanParams::Direction direction, const BSONObj& filterObj) {
- Client::ReadContext ctx(&_txn, ns());
+ AutoGetCollectionForRead ctx(&_txn, ns());
// Configure the scan.
CollectionScanParams params;
- params.collection = ctx.ctx().db()->getCollection( &_txn, ns() );
+ params.collection = ctx.getCollection();
params.direction = direction;
params.tailable = false;
@@ -184,11 +184,11 @@ namespace QueryStageCollectionScan {
class QueryStageCollscanObjectsInOrderForward : public QueryStageCollectionScanBase {
public:
void run() {
- Client::ReadContext ctx(&_txn, ns());
+ AutoGetCollectionForRead ctx(&_txn, ns());
// Configure the scan.
CollectionScanParams params;
- params.collection = ctx.ctx().db()->getCollection( &_txn, ns() );
+ params.collection = ctx.getCollection();
params.direction = CollectionScanParams::FORWARD;
params.tailable = false;
@@ -215,10 +215,10 @@ namespace QueryStageCollectionScan {
class QueryStageCollscanObjectsInOrderBackward : public QueryStageCollectionScanBase {
public:
void run() {
- Client::ReadContext ctx(&_txn, ns());
+ AutoGetCollectionForRead ctx(&_txn, ns());
CollectionScanParams params;
- params.collection = ctx.ctx().db()->getCollection( &_txn, ns() );
+ params.collection = ctx.getCollection();
params.direction = CollectionScanParams::BACKWARD;
params.tailable = false;
diff --git a/src/mongo/dbtests/query_stage_distinct.cpp b/src/mongo/dbtests/query_stage_distinct.cpp
index fc0d422e8d7..31f543e4170 100644
--- a/src/mongo/dbtests/query_stage_distinct.cpp
+++ b/src/mongo/dbtests/query_stage_distinct.cpp
@@ -62,12 +62,6 @@ namespace QueryStageDistinct {
_client.insert(ns(), obj);
}
- IndexDescriptor* getIndex(const BSONObj& obj) {
- Client::ReadContext ctx(&_txn, ns());
- Collection* collection = ctx.ctx().db()->getCollection( &_txn, ns() );
- return collection->getIndexCatalog()->findIndexByKeyPattern( &_txn, obj );
- }
-
/**
* Returns the projected value from the working set that would
* be returned in the 'values' field of the distinct command result.
@@ -123,11 +117,12 @@ namespace QueryStageDistinct {
// Make an index on a:1
addIndex(BSON("a" << 1));
- Client::ReadContext ctx(&_txn, ns());
+ AutoGetCollectionForRead ctx(&_txn, ns());
+ Collection* coll = ctx.getCollection();
// Set up the distinct stage.
DistinctParams params;
- params.descriptor = getIndex(BSON("a" << 1));
+ params.descriptor = coll->getIndexCatalog()->findIndexByKeyPattern(&_txn, BSON("a" << 1));
verify(params.descriptor);
params.direction = 1;
// Distinct-ing over the 0-th field of the keypattern.
@@ -186,11 +181,12 @@ namespace QueryStageDistinct {
// Make an index on a:1
addIndex(BSON("a" << 1));
- Client::ReadContext ctx(&_txn, ns());
+ AutoGetCollectionForRead ctx(&_txn, ns());
+ Collection* coll = ctx.getCollection();
// Set up the distinct stage.
DistinctParams params;
- params.descriptor = getIndex(BSON("a" << 1));
+ params.descriptor = coll->getIndexCatalog()->findIndexByKeyPattern(&_txn, BSON("a" << 1));
ASSERT_TRUE(params.descriptor->isMultikey(&_txn));
verify(params.descriptor);
diff --git a/src/mongo/dbtests/query_stage_tests.cpp b/src/mongo/dbtests/query_stage_tests.cpp
index 34ea6fe798b..ea59b791c5a 100644
--- a/src/mongo/dbtests/query_stage_tests.cpp
+++ b/src/mongo/dbtests/query_stage_tests.cpp
@@ -75,7 +75,7 @@ namespace QueryStageTests {
}
int countResults(const IndexScanParams& params, BSONObj filterObj = BSONObj()) {
- Client::ReadContext ctx(&_txn, ns());
+ AutoGetCollectionForRead ctx(&_txn, ns());
StatusWithMatchExpression swme = MatchExpressionParser::parse(filterObj);
verify(swme.isOK());
@@ -85,7 +85,7 @@ namespace QueryStageTests {
PlanExecutor runner(&_txn,
ws,
new IndexScan(&_txn, params, ws, filterExpr.get()),
- ctx.ctx().db()->getCollection(&_txn, ns()));
+ ctx.getCollection());
int count = 0;
for (DiskLoc dl; PlanExecutor::ADVANCED == runner.getNext(NULL, &dl); ) {
@@ -107,8 +107,8 @@ namespace QueryStageTests {
}
IndexDescriptor* getIndex(const BSONObj& obj) {
- Client::ReadContext ctx(&_txn, ns());
- Collection* collection = ctx.ctx().db()->getCollection( &_txn, ns() );
+ AutoGetCollectionForRead ctx(&_txn, ns());
+ Collection* collection = ctx.getCollection();
return collection->getIndexCatalog()->findIndexByKeyPattern( &_txn, obj );
}
diff --git a/src/mongo/dbtests/query_stage_update.cpp b/src/mongo/dbtests/query_stage_update.cpp
index 356045358af..2ca29438a93 100644
--- a/src/mongo/dbtests/query_stage_update.cpp
+++ b/src/mongo/dbtests/query_stage_update.cpp
@@ -223,8 +223,8 @@ namespace QueryStageUpdate {
// Verify the contents of the resulting collection.
{
- Client::ReadContext ctx(&_txn, ns());
- Collection* collection = ctx.ctx().db()->getCollection(&_txn, ns());
+ AutoGetCollectionForRead ctx(&_txn, ns());
+ Collection* collection = ctx.getCollection();
vector<BSONObj> objs;
getCollContents(collection, &objs);
@@ -332,8 +332,8 @@ namespace QueryStageUpdate {
// Check the contents of the collection.
{
- Client::ReadContext ctx(&_txn, ns());
- Collection* collection = ctx.ctx().db()->getCollection(&_txn, ns());
+ AutoGetCollectionForRead ctx(&_txn, ns());
+ Collection* collection = ctx.getCollection();
vector<BSONObj> objs;
getCollContents(collection, &objs);
diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp
index 7140f489d73..f092eb926a4 100644
--- a/src/mongo/dbtests/querytests.cpp
+++ b/src/mongo/dbtests/querytests.cpp
@@ -28,7 +28,7 @@
* then also delete it in the license file.
*/
-#include "mongo/pch.h"
+#include "mongo/platform/basic.h"
#include "mongo/client/dbclientcursor.h"
#include "mongo/db/clientcursor.h"
@@ -307,8 +307,8 @@ namespace QueryTests {
// Check that the cursor has been removed.
{
- Client::ReadContext ctx(&_txn, ns);
- ASSERT(0 == ctx.ctx().db()->getCollection(&_txn, ns)->cursorCache()->numCursors());
+ AutoGetCollectionForRead ctx(&_txn, ns);
+ ASSERT(0 == ctx.getCollection()->cursorCache()->numCursors());
}
ASSERT_FALSE(CollectionCursorCache::eraseCursorGlobal(&_txn, cursorId));
@@ -357,9 +357,9 @@ namespace QueryTests {
// Check that the cursor still exists
{
- Client::ReadContext ctx(&_txn, ns);
- ASSERT( 1 == ctx.ctx().db()->getCollection( &_txn, ns )->cursorCache()->numCursors() );
- ASSERT( ctx.ctx().db()->getCollection( &_txn, ns )->cursorCache()->find( cursorId, false ) );
+ AutoGetCollectionForRead ctx(&_txn, ns);
+ ASSERT(1 == ctx.getCollection()->cursorCache()->numCursors());
+ ASSERT(ctx.getCollection()->cursorCache()->find(cursorId, false));
}
// Check that the cursor can be iterated until all documents are returned.
@@ -1122,8 +1122,8 @@ namespace QueryTests {
}
size_t numCursorsOpen() {
- Client::ReadContext ctx(&_txn, _ns);
- Collection* collection = ctx.ctx().db()->getCollection( &_txn, _ns );
+ AutoGetCollectionForRead ctx(&_txn, _ns);
+ Collection* collection = ctx.getCollection();
if ( !collection )
return 0;
return collection->cursorCache()->numCursors();
@@ -1451,9 +1451,8 @@ namespace QueryTests {
ClientCursor *clientCursor = 0;
{
- Client::ReadContext ctx(&_txn, ns());
- ClientCursorPin clientCursorPointer( ctx.ctx().db()->getCollection( &_txn, ns() ),
- cursorId );
+ AutoGetCollectionForRead ctx(&_txn, ns());
+ ClientCursorPin clientCursorPointer(ctx.getCollection(), cursorId);
clientCursor = clientCursorPointer.c();
// clientCursorPointer destructor unpins the cursor.
}
diff --git a/src/mongo/s/d_migrate.cpp b/src/mongo/s/d_migrate.cpp
index 6f8ba1b0bf0..434dbce863d 100644
--- a/src/mongo/s/d_migrate.cpp
+++ b/src/mongo/s/d_migrate.cpp
@@ -396,10 +396,10 @@ namespace mongo {
long long size = 0;
{
- Client::ReadContext cx(txn, _ns);
+ AutoGetCollectionForRead ctx(txn, _ns);
- xfer( txn, cx.ctx().db(), &_deleted, b, "deleted", size, false );
- xfer( txn, cx.ctx().db(), &_reload, b, "reload", size, true );
+ xfer(txn, ctx.getDb(), &_deleted, b, "deleted", size, false);
+ xfer(txn, ctx.getDb(), &_reload, b, "reload", size, true);
}
b.append( "size" , size );
@@ -418,8 +418,8 @@ namespace mongo {
long long maxChunkSize,
string& errmsg,
BSONObjBuilder& result ) {
- Client::ReadContext ctx(txn, _ns);
- Collection* collection = ctx.ctx().db()->getCollection( txn, _ns );
+ AutoGetCollectionForRead ctx(txn, _ns);
+ Collection* collection = ctx.getCollection();
if ( !collection ) {
errmsg = "ns not found, should be impossible";
return false;
@@ -515,9 +515,9 @@ namespace mongo {
int allocSize;
{
- Client::ReadContext ctx(txn, _ns);
- Collection* collection = ctx.ctx().db()->getCollection( txn, _ns );
- verify( collection );
+ AutoGetCollectionForRead ctx(txn, _ns);
+ Collection* collection = ctx.getCollection();
+ invariant(collection);
scoped_spinlock lk( _trackerLocks );
allocSize =
std::min(BSONObjMaxUserSize,
@@ -528,8 +528,8 @@ namespace mongo {
while ( 1 ) {
bool filledBuffer = false;
- Client::ReadContext ctx(txn, _ns);
- Collection* collection = ctx.ctx().db()->getCollection( txn, _ns );
+ AutoGetCollectionForRead ctx(txn, _ns);
+ Collection* collection = ctx.getCollection();
scoped_spinlock lk( _trackerLocks );
set<DiskLoc>::iterator i = _cloneLocs.begin();
diff --git a/src/mongo/s/d_split.cpp b/src/mongo/s/d_split.cpp
index 721678695e5..344e0bd2698 100644
--- a/src/mongo/s/d_split.cpp
+++ b/src/mongo/s/d_split.cpp
@@ -122,8 +122,8 @@ namespace mongo {
return false;
}
- Client::ReadContext ctx(txn, ns);
- Collection* collection = ctx.ctx().db()->getCollection( txn, ns );
+ AutoGetCollectionForRead ctx(txn, ns);
+ Collection* collection = ctx.getCollection();
if ( !collection ) {
errmsg = "ns not found";
return false;
@@ -280,8 +280,8 @@ namespace mongo {
{
// Get the size estimate for this namespace
- Client::ReadContext ctx(txn, ns);
- Collection* collection = ctx.ctx().db()->getCollection( txn, ns );
+ AutoGetCollectionForRead ctx(txn, ns);
+ Collection* collection = ctx.getCollection();
if ( !collection ) {
errmsg = "ns not found";
return false;
@@ -829,8 +829,8 @@ namespace mongo {
dassert(newChunks.size() > 1);
{
- Client::ReadContext ctx(txn, ns);
- Collection* collection = ctx.ctx().db()->getCollection(txn, ns);
+ AutoGetCollectionForRead ctx(txn, ns);
+ Collection* collection = ctx.getCollection();
invariant(collection);
// Allow multiKey based on the invariant that shard keys must be
diff --git a/src/mongo/s/d_state.cpp b/src/mongo/s/d_state.cpp
index 24928c69400..93b3a19b62f 100644
--- a/src/mongo/s/d_state.cpp
+++ b/src/mongo/s/d_state.cpp
@@ -1396,7 +1396,7 @@ namespace mongo {
ChunkVersion wanted;
if (!shardVersionOk(ns, errmsg, received, wanted)) {
StringBuilder sb;
- sb << "[" << ns << "] shard version not ok in Client::Context: " << errmsg;
+ sb << "[" << ns << "] shard version not ok: " << errmsg;
throw SendStaleConfigException(ns, sb.str(), received, wanted);
}
}
diff --git a/src/mongo/tools/dump.cpp b/src/mongo/tools/dump.cpp
index 53a89d2efd9..97772f54655 100644
--- a/src/mongo/tools/dump.cpp
+++ b/src/mongo/tools/dump.cpp
@@ -26,7 +26,7 @@
* then also delete it in the license file.
*/
-#include "mongo/pch.h"
+#include "mongo/platform/basic.h"
#include <boost/filesystem/operations.hpp>
#include <boost/filesystem/convenience.hpp>
@@ -39,10 +39,11 @@
#include "mongo/client/dbclientcursor.h"
#include "mongo/db/auth/authorization_manager.h"
#include "mongo/db/catalog/database_catalog_entry.h"
-#include "mongo/db/db.h"
+#include "mongo/db/catalog/database_holder.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/operation_context_impl.h"
#include "mongo/db/catalog/collection.h"
+#include "mongo/db/catalog/database.h"
#include "mongo/tools/mongodump_options.h"
#include "mongo/tools/tool.h"
#include "mongo/util/options_parser/option_section.h"
@@ -360,7 +361,9 @@ public:
Writer w( f , &m );
try {
+ WriteUnitOfWork wunit(opCtx);
_repairExtents(opCtx, collection, w);
+ wunit.commit();
}
catch ( DBException& e ){
toolError() << "Repair scan failed: " << e.toString() << std::endl;
@@ -373,9 +376,7 @@ public:
int _repairByName(string dbname) {
OperationContextImpl txn;
- Client::WriteContext cx(&txn, dbname);
-
- Database* db = dbHolder().get(&txn, dbname);
+ Database* db = dbHolder().openDb(&txn, dbname);
list<string> namespaces;
db->getDatabaseCatalogEntry()->getCollectionNamespaces( &namespaces );
@@ -409,8 +410,7 @@ public:
toolError() << "ERROR recovering: " << ns << " " << e.toString() << std::endl;
}
}
- cx.commit();
-
+
return 0;
}
diff --git a/src/mongo/tools/shim.cpp b/src/mongo/tools/shim.cpp
index 5da90bfd7ae..2badc79261c 100644
--- a/src/mongo/tools/shim.cpp
+++ b/src/mongo/tools/shim.cpp
@@ -43,7 +43,6 @@
#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/database.h"
#include "mongo/db/catalog/database_holder.h"
-#include "mongo/db/client.h"
#include "mongo/db/json.h"
#include "mongo/db/operation_context_impl.h"
#include "mongo/db/storage/record_store.h"
@@ -237,12 +236,16 @@ public:
virtual void generateOutputDocuments(std::ostream* out) const {
invariant(out);
toolInfoLog() << "going to try to recover data from: " << _ns << std::endl;
+
OperationContextImpl txn;
- Client::WriteContext cx(&txn, toolGlobalParams.db);
- Database* db = dbHolder().get(&txn, toolGlobalParams.db);
- Collection* collection = db->getCollection(&txn, _ns);
+ Database* db = dbHolder().openDb(&txn, toolGlobalParams.db);
+ if (!db) {
+ toolError() << "Database does not exist: " << toolGlobalParams.db << std::endl;
+ return;
+ }
+ Collection* collection = db->getCollection(&txn, _ns);
if (!collection) {
toolError() << "Collection does not exist: " << toolGlobalParams.coll << std::endl;
return;
@@ -251,6 +254,8 @@ public:
toolInfoLog() << "nrecords: " << collection->numRecords(&txn)
<< " datasize: " << collection->dataSize(&txn);
try {
+ WriteUnitOfWork wunit(&txn);
+
boost::scoped_ptr<RecordIterator> iter(
collection->getRecordStore()->getIteratorForRepair(&txn));
for (DiskLoc currLoc = iter->getNext(); !currLoc.isNull(); currLoc = iter->getNext()) {
@@ -287,11 +292,12 @@ public:
}
}
}
+
+ wunit.commit();
}
catch (DBException& e) {
toolError() << "ERROR recovering: " << _ns << " " << e.toString();
}
- cx.commit();
}
private:
diff --git a/src/mongo/tools/tool.cpp b/src/mongo/tools/tool.cpp
index c80a140bffb..c6675da43f3 100644
--- a/src/mongo/tools/tool.cpp
+++ b/src/mongo/tools/tool.cpp
@@ -42,7 +42,6 @@
#include "mongo/db/auth/authorization_manager.h"
#include "mongo/db/auth/authorization_manager_global.h"
#include "mongo/db/auth/authz_manager_external_state_mock.h"
-#include "mongo/db/client.h"
#include "mongo/db/dbdirectclient.h"
#include "mongo/db/global_environment_experiment.h"
#include "mongo/db/global_environment_d.h"