summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/mongo/db/catalog/collection_cursor_cache.cpp4
-rw-r--r--src/mongo/db/catalog/collection_info_cache.cpp3
-rw-r--r--src/mongo/db/catalog/collection_info_cache.h5
-rw-r--r--src/mongo/db/catalog/database.cpp13
-rw-r--r--src/mongo/db/catalog/database_holder.cpp30
-rw-r--r--src/mongo/db/catalog/database_holder.h42
-rw-r--r--src/mongo/db/catalog/index_catalog.cpp7
-rw-r--r--src/mongo/db/catalog/index_create.cpp2
-rw-r--r--src/mongo/db/client.cpp10
-rw-r--r--src/mongo/db/clientcursor.cpp2
-rw-r--r--src/mongo/db/clientcursor.h3
-rw-r--r--src/mongo/db/clientlistplugin.cpp2
-rw-r--r--src/mongo/db/commands/apply_ops.cpp2
-rw-r--r--src/mongo/db/commands/create_indexes.cpp7
-rw-r--r--src/mongo/db/commands/write_commands/batch_executor.cpp22
-rw-r--r--src/mongo/db/curop.cpp2
-rw-r--r--src/mongo/db/d_concurrency.cpp53
-rw-r--r--src/mongo/db/d_concurrency.h11
-rw-r--r--src/mongo/db/db.cpp7
-rw-r--r--src/mongo/db/dbhelpers.cpp4
-rw-r--r--src/mongo/db/dbwebserver.cpp8
-rw-r--r--src/mongo/db/dbwebserver.h4
-rw-r--r--src/mongo/db/index_builder.cpp4
-rw-r--r--src/mongo/db/instance.cpp11
-rw-r--r--src/mongo/db/introspect.cpp4
-rw-r--r--src/mongo/db/lockstate.cpp25
-rw-r--r--src/mongo/db/lockstate.h8
-rw-r--r--src/mongo/db/operation_context_impl.cpp2
-rw-r--r--src/mongo/db/pdfile.cpp2
-rw-r--r--src/mongo/db/pipeline/pipeline_d.cpp2
-rw-r--r--src/mongo/db/prefetch.cpp2
-rw-r--r--src/mongo/db/repair_database.cpp2
-rw-r--r--src/mongo/db/repl/bgsync.cpp1
-rw-r--r--src/mongo/db/repl/master_slave.cpp4
-rw-r--r--src/mongo/db/repl/oplog.cpp7
-rw-r--r--src/mongo/db/repl/oplog.h2
-rw-r--r--src/mongo/db/repl/repl_coordinator_impl.cpp2
-rw-r--r--src/mongo/db/repl/rs_rollback.cpp4
-rw-r--r--src/mongo/db/repl/rs_sync.cpp2
-rw-r--r--src/mongo/db/repl/sync.cpp2
-rw-r--r--src/mongo/db/repl/sync_tail.cpp6
-rw-r--r--src/mongo/db/repl/topology_coordinator.h3
-rw-r--r--src/mongo/db/repl/topology_coordinator_impl.cpp5
-rw-r--r--src/mongo/db/repl/topology_coordinator_impl.h5
-rw-r--r--src/mongo/db/repl/topology_coordinator_mock.cpp2
-rw-r--r--src/mongo/db/repl/topology_coordinator_mock.h5
-rw-r--r--src/mongo/db/restapi.cpp2
-rw-r--r--src/mongo/db/stats/snapshots_webplugins.cpp7
-rw-r--r--src/mongo/db/storage/durable_mapped_file.cpp38
-rw-r--r--src/mongo/db/storage/durable_mapped_file.h1
-rw-r--r--src/mongo/db/storage/extent_manager.h2
-rw-r--r--src/mongo/db/storage/mmap_v1/dur.cpp16
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_commitjob.cpp3
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_recovery_unit.cpp3
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp22
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.h2
-rw-r--r--src/mongo/db/structure/catalog/namespace_details.cpp2
-rw-r--r--src/mongo/db/structure/catalog/namespace_index.cpp6
-rw-r--r--src/mongo/db/structure/record_store_v1_test_help.cpp3
-rw-r--r--src/mongo/db/structure/record_store_v1_test_help.h2
-rw-r--r--src/mongo/dbtests/dbhelper_tests.cpp2
-rw-r--r--src/mongo/dbtests/mmaptests.cpp3
-rw-r--r--src/mongo/dbtests/threadedtests.cpp4
-rw-r--r--src/mongo/s/d_logic.h27
-rw-r--r--src/mongo/s/d_merge.cpp2
-rw-r--r--src/mongo/s/d_migrate.cpp10
-rw-r--r--src/mongo/s/d_split.cpp2
-rw-r--r--src/mongo/s/d_state.cpp34
-rw-r--r--src/mongo/tools/dump.cpp2
-rw-r--r--src/mongo/util/mmap_posix.cpp1
-rw-r--r--src/mongo/util/mmap_win.cpp2
71 files changed, 254 insertions, 299 deletions
diff --git a/src/mongo/db/catalog/collection_cursor_cache.cpp b/src/mongo/db/catalog/collection_cursor_cache.cpp
index d73d16ce489..2059ab2b983 100644
--- a/src/mongo/db/catalog/collection_cursor_cache.cpp
+++ b/src/mongo/db/catalog/collection_cursor_cache.cpp
@@ -188,7 +188,7 @@ namespace mongo {
}
Lock::DBRead lock(txn->lockState(), ns);
- Database* db = dbHolder().get(ns, storageGlobalParams.dbpath);
+ Database* db = dbHolder().get(txn, ns, storageGlobalParams.dbpath);
if ( !db )
return false;
Client::Context context( ns, db );
@@ -218,7 +218,7 @@ namespace mongo {
for ( unsigned i = 0; i < todo.size(); i++ ) {
const string& ns = todo[i];
Lock::DBRead lock(txn->lockState(), ns);
- Database* db = dbHolder().get(ns, storageGlobalParams.dbpath);
+ Database* db = dbHolder().get(txn, ns, storageGlobalParams.dbpath);
if ( !db )
continue;
Client::Context context( ns, db );
diff --git a/src/mongo/db/catalog/collection_info_cache.cpp b/src/mongo/db/catalog/collection_info_cache.cpp
index 2775be475ea..bac3cb9a4e1 100644
--- a/src/mongo/db/catalog/collection_info_cache.cpp
+++ b/src/mongo/db/catalog/collection_info_cache.cpp
@@ -46,7 +46,6 @@ namespace mongo {
_querySettings(new QuerySettings()) { }
void CollectionInfoCache::reset() {
- Lock::assertWriteLocked( _collection->ns().ns() );
LOG(1) << _collection->ns().ns() << ": clearing plan cache - collection info cache reset";
clearQueryCache();
_keysComputed = false;
@@ -55,8 +54,6 @@ namespace mongo {
}
void CollectionInfoCache::computeIndexKeys() {
- DEV Lock::assertWriteLocked( _collection->ns().ns() );
-
_indexedPaths.clear();
IndexCatalog::IndexIterator i = _collection->getIndexCatalog()->getIndexIterator( true );
diff --git a/src/mongo/db/catalog/collection_info_cache.h b/src/mongo/db/catalog/collection_info_cache.h
index 223ed4112a3..874018fdc89 100644
--- a/src/mongo/db/catalog/collection_info_cache.h
+++ b/src/mongo/db/catalog/collection_info_cache.h
@@ -50,7 +50,7 @@ namespace mongo {
CollectionInfoCache( Collection* collection );
/*
- * resets entire cache state
+ * Resets entire cache state. Must be called under exclusive DB lock.
*/
void reset();
@@ -106,6 +106,9 @@ namespace mongo {
// Includes index filters.
boost::scoped_ptr<QuerySettings> _querySettings;
+ /**
+ * Must be called under exclusive DB lock.
+ */
void computeIndexKeys();
};
diff --git a/src/mongo/db/catalog/database.cpp b/src/mongo/db/catalog/database.cpp
index d3b67c97557..4b17f49453d 100644
--- a/src/mongo/db/catalog/database.cpp
+++ b/src/mongo/db/catalog/database.cpp
@@ -62,9 +62,7 @@ namespace mongo {
}
Database::~Database() {
- verify( Lock::isW() );
-
- for ( CollectionMap::const_iterator i = _collections.begin(); i != _collections.end(); ++i )
+ for (CollectionMap::const_iterator i = _collections.begin(); i != _collections.end(); ++i)
delete i->second;
}
@@ -104,8 +102,8 @@ namespace mongo {
}
Database::Database(OperationContext* txn, const char *nm, bool& newDb, const string& path )
- : _name(nm), _path(path),
- _dbEntry(new MMAP1DatabaseCatalogEntry( txn, _name, _path, storageGlobalParams.directoryperdb) ),
+ : _name(nm),
+ _path(path),
_profileName(_name + ".system.profile"),
_namespacesName(_name + ".system.namespaces"),
_indexesName(_name + ".system.indexes"),
@@ -117,6 +115,9 @@ namespace mongo {
uasserted( 10028, status.toString() );
}
+ _dbEntry.reset(new MMAP1DatabaseCatalogEntry(
+ txn, _name, _path, storageGlobalParams.directoryperdb));
+
_profile = serverGlobalParams.defaultProfile;
newDb = !_dbEntry->exists();
}
@@ -158,7 +159,7 @@ namespace mongo {
void Database::clearTmpCollections(OperationContext* txn) {
- Lock::assertWriteLocked( _name );
+ txn->lockState()->assertWriteLocked( _name );
// Note: we build up a toDelete vector rather than dropping the collection inside the loop
// to avoid modifying the system.namespaces collection while iterating over it since that
diff --git a/src/mongo/db/catalog/database_holder.cpp b/src/mongo/db/catalog/database_holder.cpp
index 0cd14a4f7f9..9d71d4b75f7 100644
--- a/src/mongo/db/catalog/database_holder.cpp
+++ b/src/mongo/db/catalog/database_holder.cpp
@@ -43,13 +43,31 @@
namespace mongo {
+ Database* DatabaseHolder::get(OperationContext* txn,
+ const std::string& ns,
+ const std::string& path) const {
+
+ txn->lockState()->assertAtLeastReadLocked(ns);
+
+ SimpleMutex::scoped_lock lk(_m);
+ Paths::const_iterator x = _paths.find( path );
+ if ( x == _paths.end() )
+ return 0;
+ const DBs& m = x->second;
+ const std::string db = _todb( ns );
+ DBs::const_iterator it = m.find(db);
+ if ( it != m.end() )
+ return it->second;
+ return NULL;
+ }
+
Database* DatabaseHolder::getOrCreate(
OperationContext* txn, const string& ns, const string& path, bool& justCreated) {
const string dbname = _todb( ns );
invariant(txn->lockState()->isAtLeastReadLocked(dbname));
- if (txn->lockState()->hasAnyWriteLock() && FileAllocator::get()->hasFailed()) {
+ if (txn->lockState()->isWriteLocked() && FileAllocator::get()->hasFailed()) {
uassert(17507, "Can't take a write lock while out of disk space", false);
}
@@ -95,6 +113,16 @@ namespace mongo {
return db;
}
+ void DatabaseHolder::erase(OperationContext* txn,
+ const std::string& ns,
+ const std::string& path) {
+ invariant(txn->lockState()->isW());
+
+ SimpleMutex::scoped_lock lk(_m);
+ DBs& m = _paths[path];
+ _size -= (int)m.erase(_todb(ns));
+ }
+
bool DatabaseHolder::closeAll(
OperationContext* txn, const string& path, BSONObjBuilder& result, bool force) {
log() << "DatabaseHolder::closeAll path:" << path << endl;
diff --git a/src/mongo/db/catalog/database_holder.h b/src/mongo/db/catalog/database_holder.h
index 007ed7a3871..5ffd4534744 100644
--- a/src/mongo/db/catalog/database_holder.h
+++ b/src/mongo/db/catalog/database_holder.h
@@ -49,50 +49,16 @@ namespace mongo {
public:
DatabaseHolder() : _m("dbholder"),_size(0) { }
- bool __isLoaded( const std::string& ns , const std::string& path ) const {
- SimpleMutex::scoped_lock lk(_m);
- Paths::const_iterator x = _paths.find( path );
- if ( x == _paths.end() )
- return false;
- const DBs& m = x->second;
-
- std::string db = _todb( ns );
-
- DBs::const_iterator it = m.find(db);
- return it != m.end();
- }
- // must be write locked as otherwise isLoaded could go false->true on you
- // in the background and you might not expect that.
- bool _isLoaded( const std::string& ns , const std::string& path ) const {
- Lock::assertWriteLocked(ns);
- return __isLoaded(ns,path);
- }
-
- Database * get( const std::string& ns , const std::string& path ) const {
- SimpleMutex::scoped_lock lk(_m);
- Lock::assertAtLeastReadLocked(ns);
- Paths::const_iterator x = _paths.find( path );
- if ( x == _paths.end() )
- return 0;
- const DBs& m = x->second;
- std::string db = _todb( ns );
- DBs::const_iterator it = m.find(db);
- if ( it != m.end() )
- return it->second;
- return 0;
- }
+ Database* get(OperationContext* txn,
+ const std::string& ns,
+ const std::string& path) const;
Database* getOrCreate(OperationContext* txn,
const std::string& ns,
const std::string& path,
bool& justCreated);
- void erase( const std::string& ns , const std::string& path ) {
- SimpleMutex::scoped_lock lk(_m);
- verify( Lock::isW() );
- DBs& m = _paths[path];
- _size -= (int)m.erase( _todb( ns ) );
- }
+ void erase(OperationContext* txn, const std::string& ns, const std::string& path);
/** @param force - force close even if something underway - use at shutdown */
bool closeAll(OperationContext* txn,
diff --git a/src/mongo/db/catalog/index_catalog.cpp b/src/mongo/db/catalog/index_catalog.cpp
index 6f8559d48d2..a8dfa6b35a8 100644
--- a/src/mongo/db/catalog/index_catalog.cpp
+++ b/src/mongo/db/catalog/index_catalog.cpp
@@ -276,7 +276,7 @@ namespace mongo {
BSONObj spec,
bool mayInterrupt,
ShutdownBehavior shutdownBehavior ) {
- Lock::assertWriteLocked( _collection->_database->name() );
+ txn->lockState()->assertWriteLocked( _collection->_database->name() );
_checkMagic();
Status status = _checkUnfinished();
@@ -640,7 +640,7 @@ namespace mongo {
Status IndexCatalog::dropAllIndexes(OperationContext* txn,
bool includingIdIndex) {
- Lock::assertWriteLocked( _collection->_database->name() );
+ txn->lockState()->assertWriteLocked( _collection->_database->name() );
BackgroundOperation::assertNoBgOpInProgForNs( _collection->ns().ns() );
@@ -724,7 +724,7 @@ namespace mongo {
Status IndexCatalog::dropIndex(OperationContext* txn,
IndexDescriptor* desc ) {
- Lock::assertWriteLocked( _collection->_database->name() );
+ txn->lockState()->assertWriteLocked( _collection->_database->name() );
IndexCatalogEntry* entry = _entries.find( desc );
if ( !entry )
@@ -1170,7 +1170,6 @@ namespace mongo {
std::vector<BSONObj>
IndexCatalog::killMatchingIndexBuilds(const IndexCatalog::IndexKillCriteria& criteria) {
- verify(Lock::somethingWriteLocked());
std::vector<BSONObj> indexes;
for (InProgressIndexesMap::iterator it = _inProgressIndexes.begin();
it != _inProgressIndexes.end();
diff --git a/src/mongo/db/catalog/index_create.cpp b/src/mongo/db/catalog/index_create.cpp
index c0739c2075c..62c851d0260 100644
--- a/src/mongo/db/catalog/index_create.cpp
+++ b/src/mongo/db/catalog/index_create.cpp
@@ -219,7 +219,7 @@ namespace mongo {
backgroundOperation.reset( new BackgroundOperation(ns) );
uassert( 13130,
"can't start bg index b/c in recursive lock (db.eval?)",
- !Lock::nested() );
+ !txn->lockState()->isRecursive() );
log() << "\t building index in background";
}
diff --git a/src/mongo/db/client.cpp b/src/mongo/db/client.cpp
index 14f38e4fd31..75812b5214a 100644
--- a/src/mongo/db/client.cpp
+++ b/src/mongo/db/client.cpp
@@ -192,7 +192,7 @@ namespace mongo {
OperationContext* txn, const string& ns, bool doVersion) {
{
_lk.reset(new Lock::DBRead(txn->lockState(), ns));
- Database *db = dbHolder().get(ns, storageGlobalParams.dbpath);
+ Database *db = dbHolder().get(txn, ns, storageGlobalParams.dbpath);
if( db ) {
_c.reset(new Context(storageGlobalParams.dbpath, ns, db, doVersion));
return;
@@ -202,12 +202,12 @@ namespace mongo {
// we usually don't get here, so doesn't matter how fast this part is
{
DEV log() << "_DEBUG ReadContext db wasn't open, will try to open " << ns << endl;
- if( Lock::isW() ) {
+ if (txn->lockState()->isW()) {
// write locked already
DEV RARELY log() << "write locked on ReadContext construction " << ns << endl;
_c.reset(new Context(ns, storageGlobalParams.dbpath, doVersion));
}
- else if( !Lock::nested() ) {
+ else if (!txn->lockState()->isRecursive()) {
_lk.reset(0);
{
Lock::GlobalWrite w(txn->lockState());
@@ -376,7 +376,7 @@ namespace mongo {
Client* c = *i;
if ( c->lockState().hasLockPending() ) {
num++;
- if ( c->lockState().hasAnyWriteLock() )
+ if ( c->lockState().isWriteLocked() )
w++;
else
r++;
@@ -409,7 +409,7 @@ namespace mongo {
if ( ! c->curop()->active() )
continue;
- if ( c->lockState().hasAnyWriteLock() )
+ if ( c->lockState().isWriteLocked() )
writers++;
if ( c->lockState().hasAnyReadLock() )
readers++;
diff --git a/src/mongo/db/clientcursor.cpp b/src/mongo/db/clientcursor.cpp
index 428d7f7b2d2..2174949678e 100644
--- a/src/mongo/db/clientcursor.cpp
+++ b/src/mongo/db/clientcursor.cpp
@@ -100,8 +100,6 @@ namespace mongo {
_pinValue = 0;
_pos = 0;
- Lock::assertAtLeastReadLocked(_ns);
-
if (_queryOptions & QueryOption_NoCursorTimeout) {
// cursors normally timeout after an inactivity period to prevent excess memory use
// setting this prevents timeout of the cursor in question.
diff --git a/src/mongo/db/clientcursor.h b/src/mongo/db/clientcursor.h
index ae6640fa39f..252212a712a 100644
--- a/src/mongo/db/clientcursor.h
+++ b/src/mongo/db/clientcursor.h
@@ -146,7 +146,8 @@ namespace mongo {
friend class CollectionCursorCache;
/**
- * Initialization common between both constructors for the ClientCursor.
+ * Initialization common between both constructors for the ClientCursor. The database must
+ * be stable when this is called, because cursors hang off the collection.
*/
void init();
diff --git a/src/mongo/db/clientlistplugin.cpp b/src/mongo/db/clientlistplugin.cpp
index 9c5f6d3ce60..7cf07358056 100644
--- a/src/mongo/db/clientlistplugin.cpp
+++ b/src/mongo/db/clientlistplugin.cpp
@@ -40,7 +40,7 @@ namespace {
ClientListPlugin() : WebStatusPlugin( "clients" , 20 ) {}
virtual void init() {}
- virtual void run( std::stringstream& ss ) {
+ virtual void run(OperationContext* txn, std::stringstream& ss ) {
using namespace mongoutils::html;
ss << "\n<table border=1 cellpadding=2 cellspacing=0>";
diff --git a/src/mongo/db/commands/apply_ops.cpp b/src/mongo/db/commands/apply_ops.cpp
index 2a1fd60bba6..eec416cbfbc 100644
--- a/src/mongo/db/commands/apply_ops.cpp
+++ b/src/mongo/db/commands/apply_ops.cpp
@@ -128,7 +128,7 @@ namespace mongo {
// a DBWrite on the namespace creates a nested lock, and yields are disallowed for
// operations that hold a nested lock.
Lock::DBWrite lk(txn->lockState(), ns);
- invariant(Lock::nested());
+ invariant(txn->lockState()->isRecursive());
Client::Context ctx(ns);
bool failed = repl::applyOperation_inlock(txn,
diff --git a/src/mongo/db/commands/create_indexes.cpp b/src/mongo/db/commands/create_indexes.cpp
index 125f2b76f11..502f3e94f79 100644
--- a/src/mongo/db/commands/create_indexes.cpp
+++ b/src/mongo/db/commands/create_indexes.cpp
@@ -178,7 +178,7 @@ namespace mongo {
BSONObj spec = specs[i];
if ( spec["unique"].trueValue() ) {
- status = checkUniqueIndexConstraints( ns.ns(), spec["key"].Obj() );
+ status = checkUniqueIndexConstraints(txn, ns.ns(), spec["key"].Obj());
if ( !status.isOK() ) {
appendCommandStatus( result, status );
@@ -210,9 +210,10 @@ namespace mongo {
}
private:
- static Status checkUniqueIndexConstraints(const StringData& ns,
+ static Status checkUniqueIndexConstraints(OperationContext* txn,
+ const StringData& ns,
const BSONObj& newIdxKey) {
- Lock::assertWriteLocked( ns );
+ txn->lockState()->assertWriteLocked( ns );
if ( shardingState.enabled() ) {
CollectionMetadataPtr metadata(
diff --git a/src/mongo/db/commands/write_commands/batch_executor.cpp b/src/mongo/db/commands/write_commands/batch_executor.cpp
index 1ffa34ba920..ab88d578cd6 100644
--- a/src/mongo/db/commands/write_commands/batch_executor.cpp
+++ b/src/mongo/db/commands/write_commands/batch_executor.cpp
@@ -405,12 +405,13 @@ namespace mongo {
error->setErrMessage( errMsg );
}
- static bool checkShardVersion(ShardingState* shardingState,
+ static bool checkShardVersion(OperationContext* txn,
+ ShardingState* shardingState,
const BatchedCommandRequest& request,
WriteOpResult* result) {
const NamespaceString nss( request.getTargetingNS() );
- Lock::assertWriteLocked( nss.ns() );
+ txn->lockState()->assertWriteLocked( nss.ns() );
ChunkVersion requestShardVersion =
request.isMetadataSet() && request.getMetadata()->isShardVersionSet() ?
@@ -457,12 +458,13 @@ namespace mongo {
error->setErrMessage( errMsg );
}
- static bool checkIndexConstraints(ShardingState* shardingState,
+ static bool checkIndexConstraints(OperationContext* txn,
+ ShardingState* shardingState,
const BatchedCommandRequest& request,
WriteOpResult* result) {
const NamespaceString nss( request.getTargetingNS() );
- Lock::assertWriteLocked( nss.ns() );
+ txn->lockState()->assertWriteLocked( nss.ns() );
if ( !request.isUniqueIndexRequest() )
return true;
@@ -910,10 +912,10 @@ namespace mongo {
if (!checkIsMasterForDatabase(request->getNS(), result)) {
return false;
}
- if (!checkShardVersion(&shardingState, *request, result)) {
+ if (!checkShardVersion(txn, &shardingState, *request, result)) {
return false;
}
- if (!checkIndexConstraints(&shardingState, *request, result)) {
+ if (!checkIndexConstraints(txn, &shardingState, *request, result)) {
return false;
}
_context.reset(new Client::Context(request->getNS(),
@@ -1026,7 +1028,7 @@ namespace mongo {
const string& insertNS = collection->ns().ns();
- Lock::assertWriteLocked( insertNS );
+ txn->lockState()->assertWriteLocked( insertNS );
StatusWith<DiskLoc> status = collection->insertDocument( txn, docToInsert, true );
@@ -1053,7 +1055,7 @@ namespace mongo {
const string indexNS = collection->ns().getSystemIndexesCollection();
- Lock::assertWriteLocked( indexNS );
+ txn->lockState()->assertWriteLocked( indexNS );
Status status = collection->getIndexCatalog()->createIndex(txn, indexDesc, true);
@@ -1094,7 +1096,7 @@ namespace mongo {
Lock::DBWrite writeLock(txn->lockState(), nsString.ns());
///////////////////////////////////////////
- if ( !checkShardVersion( &shardingState, *updateItem.getRequest(), result ) )
+ if (!checkShardVersion(txn, &shardingState, *updateItem.getRequest(), result))
return;
Client::Context ctx( nsString.ns(),
@@ -1153,7 +1155,7 @@ namespace mongo {
// Check version once we're locked
- if ( !checkShardVersion( &shardingState, *removeItem.getRequest(), result ) ) {
+ if (!checkShardVersion(txn, &shardingState, *removeItem.getRequest(), result)) {
// Version error
return;
}
diff --git a/src/mongo/db/curop.cpp b/src/mongo/db/curop.cpp
index 01eaf32be32..b5b535b3ba9 100644
--- a/src/mongo/db/curop.cpp
+++ b/src/mongo/db/curop.cpp
@@ -162,7 +162,7 @@ namespace mongo {
if ( _client ) {
const LockState& ls = _client->lockState();
verify( ls.threadState() );
- Top::global.record( _ns , _op , ls.hasAnyWriteLock() ? 1 : -1 , micros , _isCommand );
+ Top::global.record( _ns , _op , ls.isWriteLocked() ? 1 : -1 , micros , _isCommand );
}
}
diff --git a/src/mongo/db/d_concurrency.cpp b/src/mongo/db/d_concurrency.cpp
index 073b8c7673c..c13ea3e9866 100644
--- a/src/mongo/db/d_concurrency.cpp
+++ b/src/mongo/db/d_concurrency.cpp
@@ -42,6 +42,7 @@
#include "mongo/util/concurrency/qlock.h"
#include "mongo/util/concurrency/rwlock.h"
#include "mongo/util/concurrency/threadlocal.h"
+#include "mongo/util/mongoutils/str.h"
#include "mongo/util/stacktrace.h"
// oplog locking
@@ -50,11 +51,7 @@
// oplog now
// yielding
-namespace mongo {
-
- inline LockState& lockStateTempOnly() {
- return cc().lockState();
- }
+namespace mongo {
class DBTryLockTimeoutException : public std::exception {
public:
@@ -152,51 +149,11 @@ namespace mongo {
LockStat* Lock::globalLockStat() {
return &qlk.stats;
}
-
- int Lock::isLocked() {
- return lockStateTempOnly().threadState();
- }
- int Lock::somethingWriteLocked() {
- return lockStateTempOnly().threadState() == 'W' || lockStateTempOnly().threadState() == 'w';
- }
- bool Lock::isRW() {
- return lockStateTempOnly().threadState() == 'W' || lockStateTempOnly().threadState() == 'R';
- }
- bool Lock::isW() {
- return lockStateTempOnly().threadState() == 'W';
- }
- bool Lock::isR() {
- return lockStateTempOnly().threadState() == 'R';
- }
- bool Lock::nested() {
- // note this doesn't tell us much actually, it tells us if we are nesting locks but
- // they could be the a global lock twice or a global and a specific or two specifics
- // (such as including local)
- return lockStateTempOnly().recursiveCount() > 1;
- }
-
- bool Lock::isWriteLocked(const StringData& ns) {
- return lockStateTempOnly().isWriteLocked(ns);
- }
- void Lock::assertAtLeastReadLocked(const StringData& ns) {
- if( !atLeastReadLocked(ns) ) {
- LockState &ls = lockStateTempOnly();
- log() << "error expected " << ns << " to be locked " << endl;
- ls.dump();
- msgasserted(16104, str::stream() << "expected to be read locked for " << ns);
- }
- }
- void Lock::assertWriteLocked(const StringData& ns) {
- if( !Lock::isWriteLocked(ns) ) {
- lockStateTempOnly().dump();
- msgasserted(16105, str::stream() << "expected to be write locked for " << ns);
- }
- }
RWLockRecursive &Lock::ParallelBatchWriterMode::_batchLock = *(new RWLockRecursive("special"));
- void Lock::ParallelBatchWriterMode::iAmABatchParticipant() {
- lockStateTempOnly()._batchWriter = true;
+ void Lock::ParallelBatchWriterMode::iAmABatchParticipant(LockState* lockState) {
+ lockState->_batchWriter = true;
}
Lock::ScopedLock::ParallelBatchWriterSupport::ParallelBatchWriterSupport(LockState* lockState)
@@ -267,7 +224,7 @@ namespace mongo {
}
Lock::TempRelease::TempRelease(LockState* lockState)
- : cant(lockState->isNested()), _lockState(lockState) {
+ : cant(lockState->isRecursive()), _lockState(lockState) {
if( cant )
return;
diff --git a/src/mongo/db/d_concurrency.h b/src/mongo/db/d_concurrency.h
index 35e5b032d8d..4696626b704 100644
--- a/src/mongo/db/d_concurrency.h
+++ b/src/mongo/db/d_concurrency.h
@@ -48,15 +48,6 @@ namespace mongo {
class Lock : boost::noncopyable {
public:
enum Nestable { notnestable=0, local, admin };
- static int isLocked(); // true if *anything* is locked (by us)
- static int somethingWriteLocked(); // w or W
- static bool isW(); // W
- static bool isR();
- static bool isRW(); // R or W. i.e., we are write-exclusive
- static bool nested();
- static bool isWriteLocked(const StringData& ns);
- static void assertAtLeastReadLocked(const StringData& ns);
- static void assertWriteLocked(const StringData& ns);
static LockStat* globalLockStat();
static LockStat* nestableLockStat( Nestable db );
@@ -83,7 +74,7 @@ namespace mongo {
RWLockRecursive::Exclusive _lk;
public:
ParallelBatchWriterMode() : _lk(_batchLock) {}
- static void iAmABatchParticipant();
+ static void iAmABatchParticipant(LockState* lockState);
static RWLockRecursive &_batchLock;
};
diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp
index 5c3a09b89cf..c54f5547872 100644
--- a/src/mongo/db/db.cpp
+++ b/src/mongo/db/db.cpp
@@ -358,11 +358,10 @@ namespace mongo {
if (shouldClearNonLocalTmpCollections || dbName == "local")
ctx.db()->clearTmpCollections(&txn);
- OperationContextImpl opCtx;
if ( mongodGlobalParams.repair ) {
- fassert( 18506, repairDatabase( &opCtx, dbName ) );
+ fassert(18506, repairDatabase(&txn, dbName));
}
- else if ( !ctx.db()->getDatabaseCatalogEntry()->currentFilesCompatible( &opCtx ) ) {
+ else if (!ctx.db()->getDatabaseCatalogEntry()->currentFilesCompatible(&txn)) {
log() << "****";
log() << "cannot do this upgrade without an upgrade in the middle";
log() << "please do a --repair with 2.6 and then start this version";
@@ -382,7 +381,7 @@ namespace mongo {
const BSONObj key = index.getObjectField("key");
const string plugin = IndexNames::findPluginName(key);
- if (ctx.db()->getDatabaseCatalogEntry()->isOlderThan24( &opCtx )) {
+ if (ctx.db()->getDatabaseCatalogEntry()->isOlderThan24(&txn)) {
if (IndexNames::existedBefore24(plugin))
continue;
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index f36ea3c745a..d3315e29200 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -130,7 +130,7 @@ namespace mongo {
BSONObj& result,
bool* nsFound,
bool* indexFound) {
- Lock::assertAtLeastReadLocked(ns);
+ txn->lockState()->assertAtLeastReadLocked(ns);
invariant( database );
Collection* collection = database->getCollection( txn, ns );
@@ -434,7 +434,7 @@ namespace mongo {
millisWaitingForReplication += secondaryThrottleTime.millis();
}
- if ( ! Lock::isLocked() ) {
+ if (!txn->lockState()->isLocked()) {
int micros = ( 2 * Client::recommendedYieldMicros() ) - secondaryThrottleTime.micros();
if ( micros > 0 ) {
LOG(1) << "Helpers::removeRangeUnlocked going to sleep for " << micros << " micros" << endl;
diff --git a/src/mongo/db/dbwebserver.cpp b/src/mongo/db/dbwebserver.cpp
index 9c2357fa43b..f37dcb32c61 100644
--- a/src/mongo/db/dbwebserver.cpp
+++ b/src/mongo/db/dbwebserver.cpp
@@ -293,7 +293,7 @@ namespace mongo {
doUnlockedStuff(ss);
- WebStatusPlugin::runAll( ss );
+ WebStatusPlugin::runAll(txn.get(), ss);
ss << "</body></html>\n";
responseMsg = ss.str();
@@ -334,7 +334,7 @@ namespace mongo {
(*_plugins)[i]->init();
}
- void WebStatusPlugin::runAll( stringstream& ss ) {
+ void WebStatusPlugin::runAll(OperationContext* txn, stringstream& ss) {
if ( ! _plugins )
return;
@@ -347,7 +347,7 @@ namespace mongo {
ss << "<br>\n";
- p->run(ss);
+ p->run(txn, ss);
}
}
@@ -364,7 +364,7 @@ namespace mongo {
virtual void init() {}
- virtual void run( stringstream& ss ) {
+ virtual void run(OperationContext* txn, stringstream& ss ) {
_log->toHTML( ss );
}
RamLog * _log;
diff --git a/src/mongo/db/dbwebserver.h b/src/mongo/db/dbwebserver.h
index e03eb0e1388..8a47dde547e 100644
--- a/src/mongo/db/dbwebserver.h
+++ b/src/mongo/db/dbwebserver.h
@@ -84,12 +84,12 @@ namespace mongo {
WebStatusPlugin( const std::string& secionName , double priority , const std::string& subheader = "" );
virtual ~WebStatusPlugin() {}
- virtual void run( std::stringstream& ss ) = 0;
+ virtual void run(OperationContext* txn, std::stringstream& ss) = 0;
/** called when web server stats up */
virtual void init() = 0;
static void initAll();
- static void runAll( std::stringstream& ss );
+ static void runAll(OperationContext* txn, std::stringstream& ss);
private:
std::string _name;
std::string _subHeading;
diff --git a/src/mongo/db/index_builder.cpp b/src/mongo/db/index_builder.cpp
index 23cae43bd2b..ec3381900d5 100644
--- a/src/mongo/db/index_builder.cpp
+++ b/src/mongo/db/index_builder.cpp
@@ -58,7 +58,7 @@ namespace mongo {
OperationContextImpl txn;
Client::initThread(name().c_str());
- Lock::ParallelBatchWriterMode::iAmABatchParticipant();
+ Lock::ParallelBatchWriterMode::iAmABatchParticipant(txn.lockState());
repl::replLocalAuth();
@@ -66,7 +66,7 @@ namespace mongo {
NamespaceString ns(_index["ns"].String());
Client::WriteContext ctx(&txn, ns.getSystemIndexesCollection());
- Database* db = dbHolder().get(ns.db().toString(), storageGlobalParams.dbpath);
+ Database* db = dbHolder().get(&txn, ns.db().toString(), storageGlobalParams.dbpath);
Status status = build(&txn, db);
if ( !status.isOK() ) {
diff --git a/src/mongo/db/instance.cpp b/src/mongo/db/instance.cpp
index d015d65ea08..b7dc42102c8 100644
--- a/src/mongo/db/instance.cpp
+++ b/src/mongo/db/instance.cpp
@@ -546,10 +546,10 @@ namespace mongo {
// XXX? - Do we need to close database under global lock or just DB-lock is sufficient ?
invariant(txn->lockState()->isW());
- Database* database = dbHolder().get(db, path);
+ Database* database = dbHolder().get(txn, db, path);
invariant(database != NULL);
- repl::oplogCheckCloseDatabase(database); // oplog caches some things, dirty its caches
+ repl::oplogCheckCloseDatabase(txn, database); // oplog caches some things, dirty its caches
if( BackgroundOperation::inProgForDb(db) ) {
log() << "warning: bg op in prog during close db? " << db << endl;
@@ -559,7 +559,12 @@ namespace mongo {
string prefix(db);
prefix += '.';
- dbHolder().erase(db, path);
+ // Before the files are closed, flush any potentially outstanding changes, which might
+ // reference this database. Otherwise we will assert when subsequent commit if needed
+ // is called and it happens to have write intents for the removed files.
+ txn->recoveryUnit()->commitIfNeeded(true);
+
+ dbHolder().erase(txn, db, path);
delete database; // closes files
}
diff --git a/src/mongo/db/introspect.cpp b/src/mongo/db/introspect.cpp
index a59e08024df..5f9651cc41d 100644
--- a/src/mongo/db/introspect.cpp
+++ b/src/mongo/db/introspect.cpp
@@ -138,8 +138,8 @@ namespace {
// NOTE: It's kind of weird that we lock the op's namespace, but have to for now since
// we're sometimes inside the lock already
Lock::DBWrite lk(txn->lockState(), currentOp.getNS() );
- if (dbHolder()._isLoaded(
- nsToDatabase(currentOp.getNS()), storageGlobalParams.dbpath)) {
+ if (dbHolder().get(
+ txn, nsToDatabase(currentOp.getNS()), storageGlobalParams.dbpath) != NULL) {
Client::Context cx(currentOp.getNS(), storageGlobalParams.dbpath, false);
_profile(txn, c, cx.db(), currentOp, profileBufBuilder);
diff --git a/src/mongo/db/lockstate.cpp b/src/mongo/db/lockstate.cpp
index f0aec4a7666..30168ea8623 100644
--- a/src/mongo/db/lockstate.cpp
+++ b/src/mongo/db/lockstate.cpp
@@ -62,10 +62,6 @@ namespace mongo {
return _threadState == 'r' || _threadState == 'R';
}
- bool LockState::hasAnyWriteLock() const {
- return _threadState == 'w' || _threadState == 'W';
- }
-
bool LockState::isLocked( const StringData& ns ) const {
char db[MaxDatabaseNameLen];
nsToDatabase(ns, db);
@@ -112,10 +108,27 @@ namespace mongo {
return threadState() == 'R' || threadState() == 'W';
}
- bool LockState::isNested() const {
+ bool LockState::isRecursive() const {
return recursiveCount() > 1;
}
+ void LockState::assertWriteLocked(const StringData& ns) const {
+ if (!isWriteLocked(ns)) {
+ dump();
+ msgasserted(
+ 16105, mongoutils::str::stream() << "expected to be write locked for " << ns);
+ }
+ }
+
+ void LockState::assertAtLeastReadLocked(const StringData& ns) const {
+ if (!isAtLeastReadLocked(ns)) {
+ log() << "error expected " << ns << " to be locked " << endl;
+ dump();
+ msgasserted(
+ 16104, mongoutils::str::stream() << "expected to be read locked for " << ns);
+ }
+ }
+
void LockState::lockedStart( char newState ) {
_threadState = newState;
}
@@ -176,7 +189,7 @@ namespace mongo {
res.append( "waitingForLock" , _lockPending );
}
- void LockState::dump() {
+ void LockState::dump() const {
char s = _threadState;
stringstream ss;
ss << "lock status: ";
diff --git a/src/mongo/db/lockstate.h b/src/mongo/db/lockstate.h
index 302861b37a9..8f17c793632 100644
--- a/src/mongo/db/lockstate.h
+++ b/src/mongo/db/lockstate.h
@@ -42,7 +42,7 @@ namespace mongo {
public:
LockState();
- void dump();
+ void dump() const;
BSONObj reportState();
void reportState(BSONObjBuilder& b);
@@ -57,7 +57,6 @@ namespace mongo {
bool isRW() const; // RW
bool isW() const; // W
bool hasAnyReadLock() const; // explicitly rR
- bool hasAnyWriteLock() const; // wW
bool isLocked(const StringData& ns) const; // rwRW
bool isLocked() const;
@@ -65,7 +64,10 @@ namespace mongo {
bool isWriteLocked(const StringData& ns) const;
bool isAtLeastReadLocked(const StringData& ns) const;
bool isLockedForCommitting() const;
- bool isNested() const;
+ bool isRecursive() const;
+
+ void assertWriteLocked(const StringData& ns) const;
+ void assertAtLeastReadLocked(const StringData& ns) const;
/** pending means we are currently trying to get a lock */
bool hasLockPending() const { return _lockPending || _lockPendingParallelWriter; }
diff --git a/src/mongo/db/operation_context_impl.cpp b/src/mongo/db/operation_context_impl.cpp
index 84111a0f3a6..756a4f0120b 100644
--- a/src/mongo/db/operation_context_impl.cpp
+++ b/src/mongo/db/operation_context_impl.cpp
@@ -116,7 +116,7 @@ namespace mongo {
void OperationContextImpl::checkForInterrupt(bool heedMutex) const {
Client& c = cc();
- if (heedMutex && Lock::somethingWriteLocked() && c.hasWrittenSinceCheckpoint()) {
+ if (heedMutex && lockState()->isWriteLocked() && c.hasWrittenSinceCheckpoint()) {
return;
}
diff --git a/src/mongo/db/pdfile.cpp b/src/mongo/db/pdfile.cpp
index 741affbe6c3..4d6b04110f8 100644
--- a/src/mongo/db/pdfile.cpp
+++ b/src/mongo/db/pdfile.cpp
@@ -158,7 +158,7 @@ namespace mongo {
string name = db->name(); // just to have safe
LOG(1) << "dropDatabase " << name << endl;
- Lock::assertWriteLocked( name );
+ txn->lockState()->assertWriteLocked( name );
BackgroundOperation::assertNoBgOpInProgForDb(name.c_str());
diff --git a/src/mongo/db/pipeline/pipeline_d.cpp b/src/mongo/db/pipeline/pipeline_d.cpp
index b790fe4b4d3..625a5f1d0b7 100644
--- a/src/mongo/db/pipeline/pipeline_d.cpp
+++ b/src/mongo/db/pipeline/pipeline_d.cpp
@@ -81,7 +81,7 @@ namespace {
const intrusive_ptr<ExpressionContext>& pExpCtx) {
// get the full "namespace" name
const string& fullName = pExpCtx->ns.ns();
- Lock::assertAtLeastReadLocked(fullName);
+ pExpCtx->opCtx->lockState()->assertAtLeastReadLocked(fullName);
// We will be modifying the source vector as we go
Pipeline::SourceContainer& sources = pPipeline->sources;
diff --git a/src/mongo/db/prefetch.cpp b/src/mongo/db/prefetch.cpp
index 7e463a592f6..9b2a0837a55 100644
--- a/src/mongo/db/prefetch.cpp
+++ b/src/mongo/db/prefetch.cpp
@@ -85,7 +85,7 @@ namespace mongo {
LOG(4) << "index prefetch for op " << *opType << endl;
- DEV Lock::assertAtLeastReadLocked(ns);
+ DEV txn->lockState()->assertAtLeastReadLocked(ns);
// should we prefetch index pages on updates? if the update is in-place and doesn't change
// indexed values, it is actually slower - a lot slower if there are a dozen indexes or
diff --git a/src/mongo/db/repair_database.cpp b/src/mongo/db/repair_database.cpp
index 6e7fec2a020..17a2a9e18fd 100644
--- a/src/mongo/db/repair_database.cpp
+++ b/src/mongo/db/repair_database.cpp
@@ -315,7 +315,7 @@ namespace mongo {
{
Database* originalDatabase =
- dbHolder().get(dbName, storageGlobalParams.dbpath);
+ dbHolder().get(txn, dbName, storageGlobalParams.dbpath);
if (originalDatabase == NULL) {
return Status(ErrorCodes::NamespaceNotFound, "database does not exist to repair");
}
diff --git a/src/mongo/db/repl/bgsync.cpp b/src/mongo/db/repl/bgsync.cpp
index 6d81029e37f..0922a1cc5e2 100644
--- a/src/mongo/db/repl/bgsync.cpp
+++ b/src/mongo/db/repl/bgsync.cpp
@@ -229,7 +229,6 @@ namespace repl {
// the inference here is basically if the batch is really small, we are
// "caught up".
//
- dassert( !Lock::isLocked() );
sleepmillis(SleepToAllowBatchingMillis);
}
diff --git a/src/mongo/db/repl/master_slave.cpp b/src/mongo/db/repl/master_slave.cpp
index febd43e3c12..283d3cbfe38 100644
--- a/src/mongo/db/repl/master_slave.cpp
+++ b/src/mongo/db/repl/master_slave.cpp
@@ -440,7 +440,7 @@ namespace repl {
const char* ns,
const char* db ) {
// We are already locked at this point
- if (dbHolder()._isLoaded(ns, storageGlobalParams.dbpath)) {
+ if (dbHolder().get(txn, ns, storageGlobalParams.dbpath) != NULL) {
// Database is already present.
return true;
}
@@ -1256,8 +1256,6 @@ namespace repl {
int _dummy_z;
void pretouchN(vector<BSONObj>& v, unsigned a, unsigned b) {
- DEV verify( ! Lock::isW() );
-
Client *c = currentClient.get();
if( c == 0 ) {
Client::initThread("pretouchN");
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 3578b7acd0b..7527c841281 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -83,8 +83,9 @@ namespace repl {
newOptimeNotifier.notify_all();
}
- void oplogCheckCloseDatabase( Database* db ) {
- verify( Lock::isW() );
+ void oplogCheckCloseDatabase(OperationContext* txn, Database* db) {
+ invariant(txn->lockState()->isW());
+
localDB = NULL;
localOplogMainCollection = NULL;
localOplogRSCollection = NULL;
@@ -542,7 +543,7 @@ namespace repl {
bool valueB = fieldB.booleanSafe();
- Lock::assertWriteLocked(ns);
+ txn->lockState()->assertWriteLocked(ns);
Collection* collection = db->getCollection( txn, ns );
IndexCatalog* indexCatalog = collection == NULL ? NULL : collection->getIndexCatalog();
diff --git a/src/mongo/db/repl/oplog.h b/src/mongo/db/repl/oplog.h
index 6728811667e..20536de9e08 100644
--- a/src/mongo/db/repl/oplog.h
+++ b/src/mongo/db/repl/oplog.h
@@ -88,7 +88,7 @@ namespace repl {
// Flush out the cached pointers to the local database and oplog.
// Used by the closeDatabase command to ensure we don't cache closed things.
- void oplogCheckCloseDatabase( Database * db );
+ void oplogCheckCloseDatabase(OperationContext* txn, Database * db);
/**
* take an op and apply locally
diff --git a/src/mongo/db/repl/repl_coordinator_impl.cpp b/src/mongo/db/repl/repl_coordinator_impl.cpp
index 798d4c8ac74..d5d79a15824 100644
--- a/src/mongo/db/repl/repl_coordinator_impl.cpp
+++ b/src/mongo/db/repl/repl_coordinator_impl.cpp
@@ -144,7 +144,7 @@ namespace repl {
if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
return Status(ErrorCodes::ShutdownInProgress, "replication shutdown in progress");
}
- fassert(18507, cbh.getStatus());
+ fassert(18508, cbh.getStatus());
_replExecutor->wait(cbh.getValue());
return result;
}
diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp
index 34cfb5d9e9f..645c4506e86 100644
--- a/src/mongo/db/repl/rs_rollback.cpp
+++ b/src/mongo/db/repl/rs_rollback.cpp
@@ -421,8 +421,8 @@ namespace repl {
bool warn = false;
- verify(!fixUpInfo.commonPointOurDiskloc.isNull());
- verify(Lock::isW());
+ invariant(!fixUpInfo.commonPointOurDiskloc.isNull());
+ invariant(txn->lockState()->isW());
// we have items we are writing that aren't from a point-in-time. thus best not to come
// online until we get to that point in freshness.
diff --git a/src/mongo/db/repl/rs_sync.cpp b/src/mongo/db/repl/rs_sync.cpp
index ad9c8a5ebe7..895a1e8d1a6 100644
--- a/src/mongo/db/repl/rs_sync.cpp
+++ b/src/mongo/db/repl/rs_sync.cpp
@@ -260,8 +260,6 @@ namespace repl {
return;
}
- fassert(16113, !Lock::isLocked());
-
try {
_syncThread();
}
diff --git a/src/mongo/db/repl/sync.cpp b/src/mongo/db/repl/sync.cpp
index 16e6225a1fb..45940715004 100644
--- a/src/mongo/db/repl/sync.cpp
+++ b/src/mongo/db/repl/sync.cpp
@@ -106,7 +106,7 @@ namespace repl {
}
bool Sync::shouldRetry(OperationContext* txn, const BSONObj& o) {
- invariant(txn->lockState()->hasAnyWriteLock());
+ invariant(txn->lockState()->isWriteLocked());
// should already have write lock
const char *ns = o.getStringField("ns");
diff --git a/src/mongo/db/repl/sync_tail.cpp b/src/mongo/db/repl/sync_tail.cpp
index 07d6eab4243..ef01eb2b60e 100644
--- a/src/mongo/db/repl/sync_tail.cpp
+++ b/src/mongo/db/repl/sync_tail.cpp
@@ -288,8 +288,6 @@ namespace repl {
while( 1 ) {
OpQueue ops;
- verify( !Lock::isLocked() );
-
Timer batchTimer;
int lastTimeChecked = 0;
@@ -533,14 +531,14 @@ namespace repl {
static AtomicUInt32 replWriterWorkerId;
- void initializeWriterThread() {
+ static void initializeWriterThread() {
// Only do this once per thread
if (!ClientBasic::getCurrent()) {
string threadName = str::stream() << "repl writer worker "
<< replWriterWorkerId.addAndFetch(1);
Client::initThread( threadName.c_str() );
// allow us to get through the magic barrier
- Lock::ParallelBatchWriterMode::iAmABatchParticipant();
+ Lock::ParallelBatchWriterMode::iAmABatchParticipant(&cc().lockState());
replLocalAuth();
}
}
diff --git a/src/mongo/db/repl/topology_coordinator.h b/src/mongo/db/repl/topology_coordinator.h
index 4bcf7b47e01..d37a79fc2cc 100644
--- a/src/mongo/db/repl/topology_coordinator.h
+++ b/src/mongo/db/repl/topology_coordinator.h
@@ -37,6 +37,7 @@
namespace mongo {
+ class OperationContext;
class OpTime;
namespace repl {
@@ -154,7 +155,7 @@ namespace repl {
virtual void updateHeartbeatInfo(Date_t now, const HeartbeatInfo& newInfo) = 0;
// transition PRIMARY to SECONDARY; caller must already be holding an appropriate dblock
- virtual void relinquishPrimary() = 0;
+ virtual void relinquishPrimary(OperationContext* txn) = 0;
protected:
TopologyCoordinator() {}
};
diff --git a/src/mongo/db/repl/topology_coordinator_impl.cpp b/src/mongo/db/repl/topology_coordinator_impl.cpp
index d25392c5f27..2425205ee21 100644
--- a/src/mongo/db/repl/topology_coordinator_impl.cpp
+++ b/src/mongo/db/repl/topology_coordinator_impl.cpp
@@ -28,6 +28,7 @@
#include "mongo/db/repl/topology_coordinator_impl.h"
+#include "mongo/db/operation_context.h"
#include "mongo/db/repl/member.h"
#include "mongo/db/repl/repl_settings.h"
#include "mongo/db/repl/replication_executor.h"
@@ -187,9 +188,9 @@ namespace repl {
}
- void TopologyCoordinatorImpl::relinquishPrimary() {
+ void TopologyCoordinatorImpl::relinquishPrimary(OperationContext* txn) {
LOG(2) << "replSet attempting to relinquish" << endl;
- invariant(Lock::somethingWriteLocked());
+ invariant(txn->lockState()->isWriteLocked());
if (_memberState != MemberState::RS_PRIMARY) {
// Already relinquished?
log() << "replSet warning attempted to relinquish but not primary";
diff --git a/src/mongo/db/repl/topology_coordinator_impl.h b/src/mongo/db/repl/topology_coordinator_impl.h
index a7c49f58f61..2eaaf7e6965 100644
--- a/src/mongo/db/repl/topology_coordinator_impl.h
+++ b/src/mongo/db/repl/topology_coordinator_impl.h
@@ -38,6 +38,9 @@
#include "mongo/util/time_support.h"
namespace mongo {
+
+ class OperationContext;
+
namespace repl {
class TopologyCoordinatorImpl : public TopologyCoordinator {
@@ -88,7 +91,7 @@ namespace repl {
virtual void updateHeartbeatInfo(Date_t now, const HeartbeatInfo& newInfo);
// transition PRIMARY to SECONDARY; caller must already be holding an appropriate dblock
- virtual void relinquishPrimary();
+ virtual void relinquishPrimary(OperationContext* txn);
private:
diff --git a/src/mongo/db/repl/topology_coordinator_mock.cpp b/src/mongo/db/repl/topology_coordinator_mock.cpp
index cecffb1e575..467744c4f38 100644
--- a/src/mongo/db/repl/topology_coordinator_mock.cpp
+++ b/src/mongo/db/repl/topology_coordinator_mock.cpp
@@ -53,7 +53,7 @@ namespace repl {
void TopologyCoordinatorMock::signalDrainComplete() {}
- void TopologyCoordinatorMock::relinquishPrimary() {}
+ void TopologyCoordinatorMock::relinquishPrimary(OperationContext* txn) {}
bool TopologyCoordinatorMock::prepareRequestVoteResponse(const BSONObj& cmdObj,
std::string& errmsg,
diff --git a/src/mongo/db/repl/topology_coordinator_mock.h b/src/mongo/db/repl/topology_coordinator_mock.h
index 691fa61fa78..7b7a8fddf84 100644
--- a/src/mongo/db/repl/topology_coordinator_mock.h
+++ b/src/mongo/db/repl/topology_coordinator_mock.h
@@ -31,6 +31,9 @@
#include "mongo/db/repl/topology_coordinator.h"
namespace mongo {
+
+ class OperationContext;
+
namespace repl {
class TopologyCoordinatorMock : public TopologyCoordinator {
@@ -69,7 +72,7 @@ namespace repl {
virtual void updateHeartbeatInfo(Date_t now, const HeartbeatInfo& newInfo);
- virtual void relinquishPrimary();
+ virtual void relinquishPrimary(OperationContext* txn);
};
diff --git a/src/mongo/db/restapi.cpp b/src/mongo/db/restapi.cpp
index e6a33c4fd11..ed40676d54f 100644
--- a/src/mongo/db/restapi.cpp
+++ b/src/mongo/db/restapi.cpp
@@ -295,7 +295,7 @@ namespace mongo {
ss << "</pre>\n";
}
- virtual void run( stringstream& ss ) {
+ virtual void run(OperationContext* txn, stringstream& ss ) {
Timer t;
LockState lockState;
readlocktry lk(&lockState, 300);
diff --git a/src/mongo/db/stats/snapshots_webplugins.cpp b/src/mongo/db/stats/snapshots_webplugins.cpp
index f558595a45c..caa4b611109 100644
--- a/src/mongo/db/stats/snapshots_webplugins.cpp
+++ b/src/mongo/db/stats/snapshots_webplugins.cpp
@@ -30,6 +30,7 @@
#include "mongo/db/d_concurrency.h"
#include "mongo/db/dbwebserver.h"
+#include "mongo/db/operation_context.h"
#include "mongo/db/stats/snapshots.h"
#include "mongo/util/mongoutils/html.h"
@@ -44,13 +45,13 @@ namespace {
WriteLockStatus() : WebStatusPlugin( "write lock" , 51 , "% time in write lock, by 4 sec periods" ) {}
virtual void init() {}
- virtual void run( stringstream& ss ) {
+ virtual void run(OperationContext* txn, stringstream& ss) {
statsSnapshots.outputLockInfoHTML( ss );
ss << "<a "
"href=\"http://dochub.mongodb.org/core/concurrency\" "
"title=\"snapshot: was the db in the write lock when this page was generated?\">";
- ss << "write locked now:</a> " << (Lock::isW() ? "true" : "false") << "\n";
+ ss << "write locked now:</a> " << (txn->lockState()->isW() ? "true" : "false") << "\n";
}
} writeLockStatus;
@@ -91,7 +92,7 @@ namespace {
ss << "</tr>\n";
}
- void run( stringstream& ss ) {
+ void run(OperationContext* txn, stringstream& ss) {
auto_ptr<SnapshotDelta> delta = statsSnapshots.computeDelta();
if ( ! delta.get() )
return;
diff --git a/src/mongo/db/storage/durable_mapped_file.cpp b/src/mongo/db/storage/durable_mapped_file.cpp
index 03ed8d68efc..f1354fb79aa 100644
--- a/src/mongo/db/storage/durable_mapped_file.cpp
+++ b/src/mongo/db/storage/durable_mapped_file.cpp
@@ -175,40 +175,24 @@ namespace mongo {
_view_write = _view_private = 0;
}
- DurableMappedFile::~DurableMappedFile() {
- try {
- close();
- }
- catch(...) { error() << "exception in ~DurableMappedFile" << endl; }
- }
-
namespace dur {
void closingFileNotification();
}
- /*virtual*/ void DurableMappedFile::close() {
- LOG(3) << "mmf close " << filename() << endl;
+ DurableMappedFile::~DurableMappedFile() {
+ try {
+ LOG(3) << "mmf close " << filename() << endl;
- if( view_write() /*actually was opened*/ ) {
- if (storageGlobalParams.dur) {
+ // Only notifiy the durability system if the file was actually opened
+ if (view_write()) {
dur::closingFileNotification();
}
- /* todo: is it ok to close files if we are not globally locked exclusively?
- probably, but need to review. also note the lock assert below is
- rather vague and not checking if the right database is locked
- */
- if( !Lock::somethingWriteLocked() ) {
- verify( inShutdown() );
- DEV {
- log() << "is it really ok to close a mongommf outside a write lock? file:" << filename() << endl;
- }
- }
- }
- LockMongoFilesExclusive lk;
- privateViews.remove(_view_private);
- _view_write = _view_private = 0;
- MemoryMappedFile::close();
+ LockMongoFilesExclusive lk;
+ privateViews.remove(_view_private);
+ _view_write = _view_private = 0;
+ MemoryMappedFile::close();
+ }
+ catch(...) { error() << "exception in ~DurableMappedFile" << endl; }
}
-
}
diff --git a/src/mongo/db/storage/durable_mapped_file.h b/src/mongo/db/storage/durable_mapped_file.h
index 628327252f5..cf1fa59b9d6 100644
--- a/src/mongo/db/storage/durable_mapped_file.h
+++ b/src/mongo/db/storage/durable_mapped_file.h
@@ -47,7 +47,6 @@ namespace mongo {
public:
DurableMappedFile();
virtual ~DurableMappedFile();
- virtual void close();
/** @return true if opened ok. */
bool open(const std::string& fname, bool sequentialHint /*typically we open with this false*/);
diff --git a/src/mongo/db/storage/extent_manager.h b/src/mongo/db/storage/extent_manager.h
index 671f4271698..21f8a3813b4 100644
--- a/src/mongo/db/storage/extent_manager.h
+++ b/src/mongo/db/storage/extent_manager.h
@@ -73,8 +73,6 @@ namespace mongo {
virtual size_t numFiles() const = 0;
virtual long long fileSize() const = 0;
- virtual void flushFiles( bool sync ) = 0;
-
// must call Extent::reuse on the returned extent
virtual DiskLoc allocateExtent( OperationContext* txn,
bool capped,
diff --git a/src/mongo/db/storage/mmap_v1/dur.cpp b/src/mongo/db/storage/mmap_v1/dur.cpp
index c6a82d7a092..812ac5dd823 100644
--- a/src/mongo/db/storage/mmap_v1/dur.cpp
+++ b/src/mongo/db/storage/mmap_v1/dur.cpp
@@ -474,7 +474,7 @@ namespace mongo {
LOG(4) << "journal REMAPPRIVATEVIEW" << endl;
invariant(txn->lockState()->isW());
- verify( !commitJob.hasWritten() );
+ invariant(!commitJob.hasWritten());
// we want to remap all private views about every 2 seconds. there could be ~1000 views so
// we do a little each pass; beyond the remap time, more significantly, there will be copy on write
@@ -780,13 +780,13 @@ namespace mongo {
if (!storageGlobalParams.dur)
return;
- if( Lock::isLocked() ) {
- getDur().commitIfNeeded(true);
- }
- else {
- verify( inShutdown() );
- if( commitJob.hasWritten() ) {
- log() << "journal warning files are closing outside locks with writes pending" << endl;
+ if (commitJob.hasWritten()) {
+ if (inShutdown()) {
+ log() << "journal warning files are closing outside locks with writes pending"
+ << endl;
+ }
+ else {
+ fassert(18507, !"File is closing while there are unwritten changes.");
}
}
}
diff --git a/src/mongo/db/storage/mmap_v1/dur_commitjob.cpp b/src/mongo/db/storage/mmap_v1/dur_commitjob.cpp
index dfa78d45663..60a5c932a8c 100644
--- a/src/mongo/db/storage/mmap_v1/dur_commitjob.cpp
+++ b/src/mongo/db/storage/mmap_v1/dur_commitjob.cpp
@@ -44,7 +44,6 @@ namespace mongo {
/** base declare write intent function that all the helpers call. */
/** we batch up our write intents so that we do not have to synchronize too often */
void DurableImpl::declareWriteIntent(void *p, unsigned len) {
- dassert( Lock::somethingWriteLocked() );
MemoryMappedFile::makeWritable(p, len);
commitJob.note(p, len);
}
@@ -85,7 +84,6 @@ namespace mongo {
/** note an operation other than a "basic write" */
void CommitJob::noteOp(shared_ptr<DurOp> p) {
- dassert( Lock::somethingWriteLocked() );
dassert(storageGlobalParams.dur);
// DurOp's are rare so it is ok to have the lock cost here
SimpleMutex::scoped_lock lk(groupCommitMutex);
@@ -116,7 +114,6 @@ namespace mongo {
}
void CommitJob::note(void* p, int len) {
- dassert( Lock::somethingWriteLocked() );
SimpleMutex::scoped_lock lk(groupCommitMutex);
_hasWritten = true;
diff --git a/src/mongo/db/storage/mmap_v1/dur_recovery_unit.cpp b/src/mongo/db/storage/mmap_v1/dur_recovery_unit.cpp
index 1785966a783..814c159bd69 100644
--- a/src/mongo/db/storage/mmap_v1/dur_recovery_unit.cpp
+++ b/src/mongo/db/storage/mmap_v1/dur_recovery_unit.cpp
@@ -28,6 +28,7 @@
#include "mongo/db/storage/mmap_v1/dur_recovery_unit.h"
+#include "mongo/db/operation_context.h"
#include "mongo/db/storage/mmap_v1/dur.h"
// Remove once we are ready to enable
@@ -153,6 +154,8 @@ namespace mongo {
_hasWrittenSinceCheckpoint = true;
return data;
#else
+ invariant(_txn->lockState()->isWriteLocked());
+
_hasWrittenSinceCheckpoint = true;
return getDur().writingPtr(data, len);
#endif
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp
index 79115de2df2..23360fb05c7 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp
@@ -87,6 +87,10 @@ namespace mongo {
auto_ptr<DataFile> df( new DataFile(n) );
Status s = df->openExisting( txn, fullNameString.c_str() );
+
+ // openExisting may upgrade the files, so make sure to commit its changes
+ txn->recoveryUnit()->commitIfNeeded(true);
+
if ( !s.isOK() ) {
return s;
}
@@ -103,11 +107,9 @@ namespace mongo {
}
const DataFile* MmapV1ExtentManager::_getOpenFile( int n ) const {
- verify(this);
- DEV Lock::assertAtLeastReadLocked( _dbname );
if ( n < 0 || n >= static_cast<int>(_files.size()) )
log() << "uh oh: " << n;
- verify( n >= 0 && n < static_cast<int>(_files.size()) );
+ invariant(n >= 0 && n < static_cast<int>(_files.size()));
return _files[n];
}
@@ -118,7 +120,7 @@ namespace mongo {
int sizeNeeded ,
bool preallocateOnly) {
verify(this);
- DEV Lock::assertAtLeastReadLocked( _dbname );
+ DEV txn->lockState()->assertAtLeastReadLocked( _dbname );
if ( n < 0 || n >= DiskLoc::MaxFiles ) {
log() << "getFile(): n=" << n << endl;
@@ -145,7 +147,7 @@ namespace mongo {
}
if ( p == 0 ) {
if ( n == 0 ) audit::logCreateDatabase( currentClient.get(), _dbname );
- DEV Lock::assertWriteLocked( _dbname );
+ DEV txn->lockState()->assertWriteLocked( _dbname );
boost::filesystem::path fullName = fileName( n );
string fullNameString = fullName.string();
p = new DataFile(n);
@@ -177,7 +179,7 @@ namespace mongo {
DataFile* MmapV1ExtentManager::_addAFile( OperationContext* txn,
int sizeNeeded,
bool preallocateNextFile ) {
- DEV Lock::assertWriteLocked( _dbname );
+ DEV txn->lockState()->assertWriteLocked(_dbname);
int n = (int) _files.size();
DataFile *ret = getFile( txn, n, sizeNeeded );
if ( preallocateNextFile )
@@ -196,14 +198,6 @@ namespace mongo {
return size;
}
- void MmapV1ExtentManager::flushFiles( bool sync ) {
- DEV Lock::assertAtLeastReadLocked( _dbname );
- for( vector<DataFile*>::iterator i = _files.begin(); i != _files.end(); i++ ) {
- DataFile *f = *i;
- f->flush(sync);
- }
- }
-
Record* MmapV1ExtentManager::recordForV1( const DiskLoc& loc ) const {
loc.assertOk();
const DataFile* df = _getOpenFile( loc.a() );
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.h b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.h
index 1ba960fcac3..728d1540831 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.h
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.h
@@ -94,8 +94,6 @@ namespace mongo {
int sizeNeeded = 0,
bool preallocateOnly = false );
- void flushFiles( bool sync );
-
// must call Extent::reuse on the returned extent
DiskLoc allocateExtent( OperationContext* txn,
bool capped,
diff --git a/src/mongo/db/structure/catalog/namespace_details.cpp b/src/mongo/db/structure/catalog/namespace_details.cpp
index 30678e7d05a..a48f5ac75df 100644
--- a/src/mongo/db/structure/catalog/namespace_details.cpp
+++ b/src/mongo/db/structure/catalog/namespace_details.cpp
@@ -89,7 +89,7 @@ namespace mongo {
const StringData& ns,
NamespaceIndex& ni,
int nindexessofar) {
- Lock::assertWriteLocked(ns);
+ txn->lockState()->assertWriteLocked(ns);
int i = (nindexessofar - NIndexesBase) / NIndexesExtra;
verify( i >= 0 && i <= 1 );
diff --git a/src/mongo/db/structure/catalog/namespace_index.cpp b/src/mongo/db/structure/catalog/namespace_index.cpp
index e4ce0bdca27..118297c311d 100644
--- a/src/mongo/db/structure/catalog/namespace_index.cpp
+++ b/src/mongo/db/structure/catalog/namespace_index.cpp
@@ -65,14 +65,14 @@ namespace mongo {
void NamespaceIndex::add_ns( OperationContext* txn,
const Namespace& ns, const NamespaceDetails* details ) {
string nsString = ns.toString();
- Lock::assertWriteLocked( nsString );
+ txn->lockState()->assertWriteLocked( nsString );
massert( 17315, "no . in ns", nsString.find( '.' ) != string::npos );
init( txn );
uassert( 10081, "too many namespaces/collections", _ht->put(txn, ns, *details));
}
void NamespaceIndex::kill_ns( OperationContext* txn, const StringData& ns) {
- Lock::assertWriteLocked(ns);
+ txn->lockState()->assertWriteLocked(ns);
if ( !_ht.get() )
return;
Namespace n(ns);
@@ -132,7 +132,7 @@ namespace mongo {
NOINLINE_DECL void NamespaceIndex::_init( OperationContext* txn ) {
verify( !_ht.get() );
- Lock::assertWriteLocked(_database);
+ txn->lockState()->assertWriteLocked(_database);
/* if someone manually deleted the datafiles for a database,
we need to be sure to clear any cached info for the database in
diff --git a/src/mongo/db/structure/record_store_v1_test_help.cpp b/src/mongo/db/structure/record_store_v1_test_help.cpp
index cebb035b87c..251c930fdcd 100644
--- a/src/mongo/db/structure/record_store_v1_test_help.cpp
+++ b/src/mongo/db/structure/record_store_v1_test_help.cpp
@@ -214,9 +214,6 @@ namespace mongo {
return -1;
}
- void DummyExtentManager::flushFiles( bool sync ) {
- }
-
DiskLoc DummyExtentManager::allocateExtent( OperationContext* txn,
bool capped,
int size,
diff --git a/src/mongo/db/structure/record_store_v1_test_help.h b/src/mongo/db/structure/record_store_v1_test_help.h
index b4694f6acb9..1631713b8f8 100644
--- a/src/mongo/db/structure/record_store_v1_test_help.h
+++ b/src/mongo/db/structure/record_store_v1_test_help.h
@@ -119,8 +119,6 @@ namespace mongo {
virtual size_t numFiles() const;
virtual long long fileSize() const;
- virtual void flushFiles( bool sync );
-
virtual DiskLoc allocateExtent( OperationContext* txn,
bool capped,
int size,
diff --git a/src/mongo/dbtests/dbhelper_tests.cpp b/src/mongo/dbtests/dbhelper_tests.cpp
index 1339deb0074..90717127cae 100644
--- a/src/mongo/dbtests/dbhelper_tests.cpp
+++ b/src/mongo/dbtests/dbhelper_tests.cpp
@@ -151,7 +151,7 @@ namespace mongo {
ASSERT_LESS_THAN( estSizeBytes, maxSizeBytes );
Database* db = dbHolder().get(
- nsToDatabase(range.ns), storageGlobalParams.dbpath);
+ &txn, nsToDatabase(range.ns), storageGlobalParams.dbpath);
const Collection* collection = db->getCollection(&txn, ns);
// Make sure all the disklocs actually correspond to the right info
diff --git a/src/mongo/dbtests/mmaptests.cpp b/src/mongo/dbtests/mmaptests.cpp
index 743286f044b..33b26cfe4ac 100644
--- a/src/mongo/dbtests/mmaptests.cpp
+++ b/src/mongo/dbtests/mmaptests.cpp
@@ -58,7 +58,8 @@ namespace MMapTests {
try { boost::filesystem::remove(fn); }
catch(...) { }
- Lock::GlobalWrite lk(&cc().lockState());
+ LockState lockState;
+ Lock::GlobalWrite lk(&lockState);
{
DurableMappedFile f;
diff --git a/src/mongo/dbtests/threadedtests.cpp b/src/mongo/dbtests/threadedtests.cpp
index 3907ff3855e..998ba2c2bed 100644
--- a/src/mongo/dbtests/threadedtests.cpp
+++ b/src/mongo/dbtests/threadedtests.cpp
@@ -195,9 +195,9 @@ namespace ThreadedTests {
char what = 'r';
Lock::DBRead r(&lockState, "foo");
ASSERT(lockState.threadState() == what && lockState.isAtLeastReadLocked("foo"));
- ASSERT( !lockState.isNested() );
+ ASSERT(!lockState.isRecursive());
Lock::DBRead r2(&lockState, "foo");
- ASSERT(lockState.isNested());
+ ASSERT(lockState.isRecursive());
ASSERT(lockState.threadState() == what && lockState.isAtLeastReadLocked("foo"));
Lock::DBRead r3(&lockState, "local");
if( sometimes ) {
diff --git a/src/mongo/s/d_logic.h b/src/mongo/s/d_logic.h
index d23bc6f9c2e..970eee5b8ad 100644
--- a/src/mongo/s/d_logic.h
+++ b/src/mongo/s/d_logic.h
@@ -152,7 +152,11 @@ namespace mongo {
* @param min max the chunk to eliminate from the current metadata
* @param version at which the new metadata should be at
*/
- void donateChunk( const std::string& ns , const BSONObj& min , const BSONObj& max , ChunkVersion version );
+ void donateChunk(OperationContext* txn,
+ const std::string& ns,
+ const BSONObj& min,
+ const BSONObj& max,
+ ChunkVersion version);
/**
* Creates and installs new chunk metadata for a given collection by reclaiming a previously
@@ -168,7 +172,9 @@ namespace mongo {
* @param ns the collection
* @param prevMetadata the previous metadata before we donated a chunk
*/
- void undoDonateChunk( const std::string& ns, CollectionMetadataPtr prevMetadata );
+ void undoDonateChunk(OperationContext* txn,
+ const std::string& ns,
+ CollectionMetadataPtr prevMetadata);
/**
* Remembers a chunk range between 'min' and 'max' as a range which will have data migrated
@@ -179,7 +185,8 @@ namespace mongo {
*
* @return false with errMsg if the range is owned by this shard
*/
- bool notePending( const std::string& ns,
+ bool notePending(OperationContext* txn,
+ const std::string& ns,
const BSONObj& min,
const BSONObj& max,
const OID& epoch,
@@ -197,7 +204,8 @@ namespace mongo {
* @return false with errMsg if the range is owned by the shard or the epoch of the metadata
* has changed
*/
- bool forgetPending( const std::string& ns,
+ bool forgetPending(OperationContext* txn,
+ const std::string& ns,
const BSONObj& min,
const BSONObj& max,
const OID& epoch,
@@ -216,8 +224,12 @@ namespace mongo {
* @param splitKeys point in which to split
* @param version at which the new metadata should be at
*/
- void splitChunk( const std::string& ns , const BSONObj& min , const BSONObj& max , const std::vector<BSONObj>& splitKeys ,
- ChunkVersion version );
+ void splitChunk(OperationContext* txn,
+ const std::string& ns,
+ const BSONObj& min,
+ const BSONObj& max,
+ const std::vector<BSONObj>& splitKeys,
+ ChunkVersion version );
/**
* Creates and installs a new chunk metadata for a given collection by merging a range of
@@ -232,7 +244,8 @@ namespace mongo {
* @param minKey maxKey the range which should be merged
* @param newShardVersion the shard version the newly merged chunk should have
*/
- void mergeChunks( const std::string& ns,
+ void mergeChunks(OperationContext* txn,
+ const std::string& ns,
const BSONObj& minKey,
const BSONObj& maxKey,
ChunkVersion mergedVersion );
diff --git a/src/mongo/s/d_merge.cpp b/src/mongo/s/d_merge.cpp
index 9badce32443..caf678ab00c 100644
--- a/src/mongo/s/d_merge.cpp
+++ b/src/mongo/s/d_merge.cpp
@@ -293,7 +293,7 @@ namespace mongo {
{
OperationContextImpl txn; // XXX?
Lock::DBWrite writeLk(txn.lockState(), nss.ns());
- shardingState.mergeChunks( nss.ns(), minKey, maxKey, mergeVersion );
+ shardingState.mergeChunks(&txn, nss.ns(), minKey, maxKey, mergeVersion);
}
//
diff --git a/src/mongo/s/d_migrate.cpp b/src/mongo/s/d_migrate.cpp
index a4de23b3d1c..0dd5ccbf39d 100644
--- a/src/mongo/s/d_migrate.cpp
+++ b/src/mongo/s/d_migrate.cpp
@@ -1216,7 +1216,7 @@ namespace mongo {
// bump the metadata's version up and "forget" about the chunk being moved
// this is not the commit point but in practice the state in this shard won't
// until the commit it done
- shardingState.donateChunk( ns , min , max , myVersion );
+ shardingState.donateChunk(txn, ns, min, max, myVersion);
}
log() << "moveChunk setting version to: " << myVersion << migrateLog;
@@ -1252,7 +1252,7 @@ namespace mongo {
// revert the chunk manager back to the state before "forgetting" about the
// chunk
- shardingState.undoDonateChunk( ns, origCollMetadata );
+ shardingState.undoDonateChunk(txn, ns, origCollMetadata);
}
log() << "Shard version successfully reset to clean up failed migration"
<< endl;
@@ -1415,7 +1415,7 @@ namespace mongo {
// Revert the metadata back to the state before "forgetting"
// about the chunk.
- shardingState.undoDonateChunk( ns, origCollMetadata );
+ shardingState.undoDonateChunk(txn, ns, origCollMetadata);
}
log() << "Shard version successfully reset to clean up failed migration" << endl;
@@ -1619,7 +1619,7 @@ namespace mongo {
// Unprotect the range if needed/possible on unsuccessful TO migration
Lock::DBWrite lk(txn->lockState(), ns);
string errMsg;
- if ( !shardingState.forgetPending( ns, min, max, epoch, &errMsg ) ) {
+ if (!shardingState.forgetPending(txn, ns, min, max, epoch, &errMsg)) {
warning() << errMsg << endl;
}
}
@@ -1738,7 +1738,7 @@ namespace mongo {
{
// Protect the range by noting that we're now starting a migration to it
Lock::DBWrite lk(txn->lockState(), ns);
- if ( !shardingState.notePending( ns, min, max, epoch, &errmsg ) ) {
+ if (!shardingState.notePending(txn, ns, min, max, epoch, &errmsg)) {
warning() << errmsg << endl;
setState(FAIL);
return;
diff --git a/src/mongo/s/d_split.cpp b/src/mongo/s/d_split.cpp
index 6c99a1d8237..aad554d5265 100644
--- a/src/mongo/s/d_split.cpp
+++ b/src/mongo/s/d_split.cpp
@@ -825,7 +825,7 @@ namespace mongo {
{
Lock::DBWrite writeLk(txn->lockState(), ns);
- shardingState.splitChunk( ns , min , max , splitKeys , maxVersion );
+ shardingState.splitChunk(txn, ns, min, max, splitKeys, maxVersion);
}
//
diff --git a/src/mongo/s/d_state.cpp b/src/mongo/s/d_state.cpp
index c266b245cae..c843b01709f 100644
--- a/src/mongo/s/d_state.cpp
+++ b/src/mongo/s/d_state.cpp
@@ -185,9 +185,13 @@ namespace mongo {
}
}
- void ShardingState::donateChunk( const string& ns , const BSONObj& min , const BSONObj& max , ChunkVersion version ) {
+ void ShardingState::donateChunk(OperationContext* txn,
+ const string& ns,
+ const BSONObj& min,
+ const BSONObj& max,
+ ChunkVersion version) {
- Lock::assertWriteLocked( ns );
+ txn->lockState()->assertWriteLocked( ns );
scoped_lock lk( _mutex );
CollectionMetadataMap::const_iterator it = _collMetadata.find( ns );
@@ -213,9 +217,11 @@ namespace mongo {
_collMetadata[ns] = cloned;
}
- void ShardingState::undoDonateChunk( const string& ns, CollectionMetadataPtr prevMetadata ) {
+ void ShardingState::undoDonateChunk(OperationContext* txn,
+ const string& ns,
+ CollectionMetadataPtr prevMetadata) {
- Lock::assertWriteLocked( ns );
+ txn->lockState()->assertWriteLocked( ns );
scoped_lock lk( _mutex );
log() << "ShardingState::undoDonateChunk acquired _mutex" << endl;
@@ -225,13 +231,14 @@ namespace mongo {
it->second = prevMetadata;
}
- bool ShardingState::notePending( const string& ns,
+ bool ShardingState::notePending(OperationContext* txn,
+ const string& ns,
const BSONObj& min,
const BSONObj& max,
const OID& epoch,
string* errMsg ) {
- Lock::assertWriteLocked( ns );
+ txn->lockState()->assertWriteLocked( ns );
scoped_lock lk( _mutex );
CollectionMetadataMap::const_iterator it = _collMetadata.find( ns );
@@ -269,13 +276,14 @@ namespace mongo {
return true;
}
- bool ShardingState::forgetPending( const string& ns,
+ bool ShardingState::forgetPending(OperationContext* txn,
+ const string& ns,
const BSONObj& min,
const BSONObj& max,
const OID& epoch,
string* errMsg ) {
- Lock::assertWriteLocked( ns );
+ txn->lockState()->assertWriteLocked( ns );
scoped_lock lk( _mutex );
CollectionMetadataMap::const_iterator it = _collMetadata.find( ns );
@@ -313,13 +321,14 @@ namespace mongo {
return true;
}
- void ShardingState::splitChunk( const string& ns,
+ void ShardingState::splitChunk(OperationContext* txn,
+ const string& ns,
const BSONObj& min,
const BSONObj& max,
const vector<BSONObj>& splitKeys,
ChunkVersion version ) {
- Lock::assertWriteLocked( ns );
+ txn->lockState()->assertWriteLocked( ns );
scoped_lock lk( _mutex );
CollectionMetadataMap::const_iterator it = _collMetadata.find( ns );
@@ -337,12 +346,13 @@ namespace mongo {
_collMetadata[ns] = cloned;
}
- void ShardingState::mergeChunks( const string& ns,
+ void ShardingState::mergeChunks(OperationContext* txn,
+ const string& ns,
const BSONObj& minKey,
const BSONObj& maxKey,
ChunkVersion mergedVersion ) {
- Lock::assertWriteLocked( ns );
+ txn->lockState()->assertWriteLocked( ns );
scoped_lock lk( _mutex );
CollectionMetadataMap::const_iterator it = _collMetadata.find( ns );
diff --git a/src/mongo/tools/dump.cpp b/src/mongo/tools/dump.cpp
index 64da13a0d85..36cd51a387b 100644
--- a/src/mongo/tools/dump.cpp
+++ b/src/mongo/tools/dump.cpp
@@ -342,7 +342,7 @@ public:
OperationContextImpl txn;
Client::WriteContext cx(&txn, dbname);
- Database* db = dbHolder().get(dbname, storageGlobalParams.dbpath);
+ Database* db = dbHolder().get(&txn, dbname, storageGlobalParams.dbpath);
list<string> namespaces;
db->getDatabaseCatalogEntry()->getCollectionNamespaces( &namespaces );
diff --git a/src/mongo/util/mmap_posix.cpp b/src/mongo/util/mmap_posix.cpp
index 22367dbe2e0..110bda3253f 100644
--- a/src/mongo/util/mmap_posix.cpp
+++ b/src/mongo/util/mmap_posix.cpp
@@ -232,7 +232,6 @@ namespace mongo {
void* MemoryMappedFile::remapPrivateView(void *oldPrivateAddr) {
#if defined(__sunos__) // SERVER-8795
- verify( Lock::isW() );
LockMongoFilesExclusive lockMongoFiles;
#endif
diff --git a/src/mongo/util/mmap_win.cpp b/src/mongo/util/mmap_win.cpp
index 27f72ce5e1c..061a3bf228a 100644
--- a/src/mongo/util/mmap_win.cpp
+++ b/src/mongo/util/mmap_win.cpp
@@ -453,8 +453,6 @@ namespace mongo {
}
void* MemoryMappedFile::remapPrivateView(void *oldPrivateAddr) {
- verify( Lock::isW() );
-
LockMongoFilesExclusive lockMongoFiles;
clearWritableBits(oldPrivateAddr);