summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2014-06-02 16:32:25 -0400
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2014-06-02 22:55:05 -0400
commite5da18f2dfbd71fb997734b524e5e4306d0af550 (patch)
tree25d2d291a47414967b4f5b5974ce7be80f7ff843
parent8c9fcc939f9f1a2b593e606bd790cc87efd4064f (diff)
downloadmongo-e5da18f2dfbd71fb997734b524e5e4306d0af550.tar.gz
SERVER-13961 Remove all 'checking' variants of dbHolder
All places which call dbHolder are verifiably under the appropriate lock, so there is no need to do checking. This allows for the Lock::isLocked check to be removed from there.
-rw-r--r--src/mongo/db/catalog/collection_cursor_cache.cpp2
-rw-r--r--src/mongo/db/catalog/database.cpp6
-rw-r--r--src/mongo/db/catalog/database.h3
-rw-r--r--src/mongo/db/catalog/database_holder.h11
-rw-r--r--src/mongo/db/client.cpp2
-rw-r--r--src/mongo/db/db.h2
-rw-r--r--src/mongo/db/dbcommands.cpp4
-rw-r--r--src/mongo/db/instance.cpp2
-rw-r--r--src/mongo/db/introspect.cpp7
-rw-r--r--src/mongo/db/pdfile.cpp2
-rw-r--r--src/mongo/db/repair_database.cpp14
-rw-r--r--src/mongo/db/repl/master_slave.cpp11
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp2
-rw-r--r--src/mongo/dbtests/dbhelper_tests.cpp3
-rw-r--r--src/mongo/s/d_migrate.cpp4
-rw-r--r--src/mongo/tools/dump.cpp2
16 files changed, 36 insertions, 41 deletions
diff --git a/src/mongo/db/catalog/collection_cursor_cache.cpp b/src/mongo/db/catalog/collection_cursor_cache.cpp
index dea303ed0e7..d73d16ce489 100644
--- a/src/mongo/db/catalog/collection_cursor_cache.cpp
+++ b/src/mongo/db/catalog/collection_cursor_cache.cpp
@@ -218,7 +218,7 @@ namespace mongo {
for ( unsigned i = 0; i < todo.size(); i++ ) {
const string& ns = todo[i];
Lock::DBRead lock(txn->lockState(), ns);
- Database* db = dbHolder().get( ns, storageGlobalParams.dbpath );
+ Database* db = dbHolder().get(ns, storageGlobalParams.dbpath);
if ( !db )
continue;
Client::Context context( ns, db );
diff --git a/src/mongo/db/catalog/database.cpp b/src/mongo/db/catalog/database.cpp
index 4a2207d638f..8a0d25dd1be 100644
--- a/src/mongo/db/catalog/database.cpp
+++ b/src/mongo/db/catalog/database.cpp
@@ -130,9 +130,7 @@ namespace mongo {
/*static*/
- string Database::duplicateUncasedName( bool inholderlock, const string &name, const string &path, set< string > *duplicates ) {
- Lock::assertAtLeastReadLocked(name);
-
+ string Database::duplicateUncasedName(const string &name, const string &path, set< string > *duplicates) {
if ( duplicates ) {
duplicates->clear();
}
@@ -141,7 +139,7 @@ namespace mongo {
getDatabaseNames( others , path );
set<string> allShortNames;
- dbHolder().getAllShortNames( allShortNames );
+ dbHolder().getAllShortNames(allShortNames);
others.insert( others.end(), allShortNames.begin(), allShortNames.end() );
diff --git a/src/mongo/db/catalog/database.h b/src/mongo/db/catalog/database.h
index aae37fdde46..94b52680931 100644
--- a/src/mongo/db/catalog/database.h
+++ b/src/mongo/db/catalog/database.h
@@ -148,8 +148,7 @@ namespace mongo {
* 'duplicates' is specified, it is filled with all duplicate names.
// TODO move???
*/
- static string duplicateUncasedName( bool inholderlockalready,
- const std::string &name,
+ static string duplicateUncasedName( const std::string &name,
const std::string &path,
std::set< std::string > *duplicates = 0 );
diff --git a/src/mongo/db/catalog/database_holder.h b/src/mongo/db/catalog/database_holder.h
index ad40b8601c4..751215518c6 100644
--- a/src/mongo/db/catalog/database_holder.h
+++ b/src/mongo/db/catalog/database_holder.h
@@ -131,14 +131,5 @@ namespace mongo {
}
};
- DatabaseHolder& dbHolderUnchecked();
- inline const DatabaseHolder& dbHolder() {
- dassert( Lock::isLocked() );
- return dbHolderUnchecked();
- }
- inline DatabaseHolder& dbHolderW() {
- dassert( Lock::isW() );
- return dbHolderUnchecked();
- }
-
+ DatabaseHolder& dbHolder();
}
diff --git a/src/mongo/db/client.cpp b/src/mongo/db/client.cpp
index 4a28e280ad6..46f2c2c10da 100644
--- a/src/mongo/db/client.cpp
+++ b/src/mongo/db/client.cpp
@@ -282,7 +282,7 @@ namespace mongo {
}
OperationContextImpl txn; // TODO get rid of this once reads require transactions
- _db = dbHolderUnchecked().getOrCreate(&txn, _ns, _path, _justCreated);
+ _db = dbHolder().getOrCreate(&txn, _ns, _path, _justCreated);
verify(_db);
if( _doVersion ) checkNotStale();
massert(16107,
diff --git a/src/mongo/db/db.h b/src/mongo/db/db.h
index 32cadc2ac1c..0ab445d94b6 100644
--- a/src/mongo/db/db.h
+++ b/src/mongo/db/db.h
@@ -53,7 +53,7 @@ namespace mongo {
dbtemprelease(LockState* lockState) {
const Client& c = cc();
_context = c.getContext();
- verify( Lock::isLocked() );
+ invariant(lockState->threadState());
if( Lock::nested() ) {
massert(10298 , "can't temprelease nested lock", false);
}
diff --git a/src/mongo/db/dbcommands.cpp b/src/mongo/db/dbcommands.cpp
index a9d1da2431e..921dd10a519 100644
--- a/src/mongo/db/dbcommands.cpp
+++ b/src/mongo/db/dbcommands.cpp
@@ -684,7 +684,7 @@ namespace mongo {
set<string> allShortNames;
{
Lock::GlobalRead lk(txn->lockState());
- dbHolder().getAllShortNames( allShortNames );
+ dbHolder().getAllShortNames(allShortNames);
}
for ( set<string>::iterator i = allShortNames.begin(); i != allShortNames.end(); i++ ) {
@@ -741,7 +741,7 @@ namespace mongo {
Client::Context ctx(dbname);
try {
- return dbHolderW().closeAll(storageGlobalParams.dbpath, result, false);
+ return dbHolder().closeAll(storageGlobalParams.dbpath, result, false);
}
catch(DBException&) {
throw;
diff --git a/src/mongo/db/instance.cpp b/src/mongo/db/instance.cpp
index 467217f0425..ac0323ac517 100644
--- a/src/mongo/db/instance.cpp
+++ b/src/mongo/db/instance.cpp
@@ -560,7 +560,7 @@ namespace mongo {
string prefix(db);
prefix += '.';
- dbHolderW().erase( db, path );
+ dbHolder().erase(db, path);
ctx->_clear();
delete database; // closes files
}
diff --git a/src/mongo/db/introspect.cpp b/src/mongo/db/introspect.cpp
index ff9004ab6e8..a59e08024df 100644
--- a/src/mongo/db/introspect.cpp
+++ b/src/mongo/db/introspect.cpp
@@ -138,10 +138,11 @@ namespace {
// NOTE: It's kind of weird that we lock the op's namespace, but have to for now since
// we're sometimes inside the lock already
Lock::DBWrite lk(txn->lockState(), currentOp.getNS() );
- if (dbHolder()._isLoaded(nsToDatabase(currentOp.getNS()), storageGlobalParams.dbpath)) {
+ if (dbHolder()._isLoaded(
+ nsToDatabase(currentOp.getNS()), storageGlobalParams.dbpath)) {
+
Client::Context cx(currentOp.getNS(), storageGlobalParams.dbpath, false);
- _profile(txn, c, cx.db(),
- currentOp, profileBufBuilder);
+ _profile(txn, c, cx.db(), currentOp, profileBufBuilder);
}
else {
mongo::log() << "note: not profiling because db went away - probably a close on: "
diff --git a/src/mongo/db/pdfile.cpp b/src/mongo/db/pdfile.cpp
index ed7630b4ea9..bed1717e790 100644
--- a/src/mongo/db/pdfile.cpp
+++ b/src/mongo/db/pdfile.cpp
@@ -86,7 +86,7 @@ namespace mongo {
DatabaseHolder _dbHolder;
- DatabaseHolder& dbHolderUnchecked() {
+ DatabaseHolder& dbHolder() {
return _dbHolder;
}
diff --git a/src/mongo/db/repair_database.cpp b/src/mongo/db/repair_database.cpp
index aac1c1b3eca..23f617877f2 100644
--- a/src/mongo/db/repair_database.cpp
+++ b/src/mongo/db/repair_database.cpp
@@ -276,6 +276,9 @@ namespace mongo {
string dbName,
bool preserveClonedFilesOnFailure,
bool backupOriginalFiles ) {
+ // We must hold some form of lock here
+ invariant(txn->lockState()->threadState());
+
scoped_ptr<RepairFileDeleter> repairFileDeleter;
doingRepair dr;
dbName = nsToDatabase( dbName );
@@ -311,14 +314,17 @@ namespace mongo {
reservedPath ) );
{
- Database* originalDatabase = dbHolder().get( dbName, storageGlobalParams.dbpath );
- if ( originalDatabase == NULL )
- return Status( ErrorCodes::NamespaceNotFound, "database does not exist to repair" );
+ Database* originalDatabase =
+ dbHolder().get(dbName, storageGlobalParams.dbpath);
+ if (originalDatabase == NULL) {
+ return Status(ErrorCodes::NamespaceNotFound, "database does not exist to repair");
+ }
Database* tempDatabase = NULL;
{
bool justCreated = false;
- tempDatabase = dbHolderW().getOrCreate(txn, dbName, reservedPathString, justCreated);
+ tempDatabase =
+ dbHolder().getOrCreate(txn, dbName, reservedPathString, justCreated);
invariant( justCreated );
}
diff --git a/src/mongo/db/repl/master_slave.cpp b/src/mongo/db/repl/master_slave.cpp
index 5405307dd5c..6cebb00f087 100644
--- a/src/mongo/db/repl/master_slave.cpp
+++ b/src/mongo/db/repl/master_slave.cpp
@@ -427,6 +427,7 @@ namespace repl {
const BSONObj &op,
const char* ns,
const char* db ) {
+ // We are already locked at this point
if (dbHolder()._isLoaded(ns, storageGlobalParams.dbpath)) {
// Database is already present.
return true;
@@ -437,7 +438,7 @@ namespace repl {
// missing from master after optime "ts".
return false;
}
- if (Database::duplicateUncasedName(false, db, storageGlobalParams.dbpath).empty()) {
+ if (Database::duplicateUncasedName(db, storageGlobalParams.dbpath).empty()) {
// No duplicate database names are present.
return true;
}
@@ -446,7 +447,7 @@ namespace repl {
bool dbOk = false;
{
dbtemprelease release(txn->lockState());
-
+
// We always log an operation after executing it (never before), so
// a database list will always be valid as of an oplog entry generated
// before it was retrieved.
@@ -490,7 +491,7 @@ namespace repl {
// Check for duplicates again, since we released the lock above.
set< string > duplicates;
- Database::duplicateUncasedName(false, db, storageGlobalParams.dbpath, &duplicates);
+ Database::duplicateUncasedName(db, storageGlobalParams.dbpath, &duplicates);
// The database is present on the master and no conflicting databases
// are present on the master. Drop any local conflicts.
@@ -502,8 +503,8 @@ namespace repl {
dropDatabase(txn, ctx.db());
}
- massert( 14034, "Duplicate database names present after attempting to delete duplicates",
- Database::duplicateUncasedName(false, db, storageGlobalParams.dbpath).empty() );
+ massert(14034, "Duplicate database names present after attempting to delete duplicates",
+ Database::duplicateUncasedName(db, storageGlobalParams.dbpath).empty());
return true;
}
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp
index f246405d64d..f875aabac72 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp
@@ -110,7 +110,7 @@ namespace mongo {
}
void MMAP1DatabaseCatalogEntry::_checkDuplicateUncasedNames() const {
- string duplicate = Database::duplicateUncasedName(true, name(), _path );
+ string duplicate = Database::duplicateUncasedName(name(), _path);
if ( !duplicate.empty() ) {
stringstream ss;
ss << "db already exists with different case already have: [" << duplicate
diff --git a/src/mongo/dbtests/dbhelper_tests.cpp b/src/mongo/dbtests/dbhelper_tests.cpp
index 3c74b2af526..1339deb0074 100644
--- a/src/mongo/dbtests/dbhelper_tests.cpp
+++ b/src/mongo/dbtests/dbhelper_tests.cpp
@@ -150,7 +150,8 @@ namespace mongo {
ASSERT_NOT_EQUALS( estSizeBytes, 0 );
ASSERT_LESS_THAN( estSizeBytes, maxSizeBytes );
- Database* db = dbHolder().get(nsToDatabase(range.ns), storageGlobalParams.dbpath);
+ Database* db = dbHolder().get(
+ nsToDatabase(range.ns), storageGlobalParams.dbpath);
const Collection* collection = db->getCollection(&txn, ns);
// Make sure all the disklocs actually correspond to the right info
diff --git a/src/mongo/s/d_migrate.cpp b/src/mongo/s/d_migrate.cpp
index c3515288ed6..c44d207dd9c 100644
--- a/src/mongo/s/d_migrate.cpp
+++ b/src/mongo/s/d_migrate.cpp
@@ -556,8 +556,6 @@ namespace mongo {
* @return true if we are NOT in the critical section
*/
bool waitTillNotInCriticalSection( int maxSecondsToWait ) {
- verify( !Lock::isLocked() );
-
boost::xtime xt;
boost::xtime_get(&xt, MONGO_BOOST_TIME_UTC);
xt.sec += maxSecondsToWait;
@@ -1094,7 +1092,7 @@ namespace mongo {
// Track last result from TO shard for sanity check
BSONObj res;
for ( int i=0; i<86400; i++ ) { // don't want a single chunk move to take more than a day
- verify( !Lock::isLocked() );
+ verify(!txn->lockState()->threadState());
// Exponential sleep backoff, up to 1024ms. Don't sleep much on the first few
// iterations, since we want empty chunk migrations to be fast.
sleepmillis( 1 << std::min( i , 10 ) );
diff --git a/src/mongo/tools/dump.cpp b/src/mongo/tools/dump.cpp
index 83382b6c1ff..9144743c414 100644
--- a/src/mongo/tools/dump.cpp
+++ b/src/mongo/tools/dump.cpp
@@ -338,7 +338,7 @@ public:
OperationContextImpl txn;
Client::WriteContext cx(&txn, dbname);
- Database* db = dbHolderUnchecked().get(dbname, storageGlobalParams.dbpath);
+ Database* db = dbHolder().get(dbname, storageGlobalParams.dbpath);
list<string> namespaces;
db->getDatabaseCatalogEntry()->getCollectionNamespaces( &namespaces );