summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/mongo/db/catalog/database.cpp8
-rw-r--r--src/mongo/db/catalog/index_catalog.cpp11
-rw-r--r--src/mongo/db/commands/create_indexes.cpp3
-rw-r--r--src/mongo/db/commands/write_commands/batch_executor.cpp7
-rw-r--r--src/mongo/db/concurrency/d_concurrency_test.cpp5
-rw-r--r--src/mongo/db/concurrency/lock_state.cpp25
-rw-r--r--src/mongo/db/concurrency/lock_state.h5
-rw-r--r--src/mongo/db/concurrency/locker.h5
-rw-r--r--src/mongo/db/instance.cpp2
-rw-r--r--src/mongo/db/repl/oplog.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace_details.cpp8
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace_index.cpp22
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp4
-rw-r--r--src/mongo/dbtests/documentsourcetests.cpp22
-rw-r--r--src/mongo/dbtests/threadedtests.cpp2
-rw-r--r--src/mongo/s/d_state.cpp14
17 files changed, 81 insertions, 66 deletions
diff --git a/src/mongo/db/catalog/database.cpp b/src/mongo/db/catalog/database.cpp
index 24c0ea01821..88c46cce27c 100644
--- a/src/mongo/db/catalog/database.cpp
+++ b/src/mongo/db/catalog/database.cpp
@@ -218,7 +218,8 @@ namespace mongo {
}
void Database::clearTmpCollections(OperationContext* txn) {
- invariant(txn->lockState()->isDbLockedForMode(name(), MODE_X));
+
+ txn->lockState()->assertWriteLocked( _name );
list<string> collections;
_dbEntry->getCollectionNamespaces( &collections );
@@ -553,11 +554,10 @@ namespace mongo {
void dropDatabase(OperationContext* txn, Database* db ) {
invariant( db );
- // Store the name so we have if for after the db object is deleted
- const string name = db->name();
+ string name = db->name(); // just to have safe
LOG(1) << "dropDatabase " << name << endl;
- invariant(txn->lockState()->isDbLockedForMode(name, MODE_X));
+ txn->lockState()->assertWriteLocked( name );
BackgroundOperation::assertNoBgOpInProgForDb(name.c_str());
diff --git a/src/mongo/db/catalog/index_catalog.cpp b/src/mongo/db/catalog/index_catalog.cpp
index 8d54729040a..379b78ca1ee 100644
--- a/src/mongo/db/catalog/index_catalog.cpp
+++ b/src/mongo/db/catalog/index_catalog.cpp
@@ -344,7 +344,7 @@ namespace {
} // namespace
Status IndexCatalog::createIndexOnEmptyCollection(OperationContext* txn, BSONObj spec) {
- invariant(txn->lockState()->isDbLockedForMode(_collection->_database->name(), MODE_X));
+ txn->lockState()->assertWriteLocked( _collection->_database->name() );
invariant(_collection->numRecords(txn) == 0);
_checkMagic();
@@ -696,7 +696,7 @@ namespace {
Status IndexCatalog::dropAllIndexes(OperationContext* txn,
bool includingIdIndex) {
- invariant(txn->lockState()->isDbLockedForMode(_collection->_database->name(), MODE_X));
+ txn->lockState()->assertWriteLocked( _collection->_database->name() );
BackgroundOperation::assertNoBgOpInProgForNs( _collection->ns().ns() );
@@ -768,7 +768,7 @@ namespace {
Status IndexCatalog::dropIndex(OperationContext* txn,
IndexDescriptor* desc ) {
- invariant(txn->lockState()->isDbLockedForMode(_collection->_database->name(), MODE_X));
+ txn->lockState()->assertWriteLocked( _collection->_database->name() );
IndexCatalogEntry* entry = _entries.find( desc );
if ( !entry )
@@ -1038,10 +1038,9 @@ namespace {
const IndexDescriptor* IndexCatalog::refreshEntry( OperationContext* txn,
const IndexDescriptor* oldDesc ) {
+ txn->lockState()->assertWriteLocked( _collection->_database->name() );
- invariant(txn->lockState()->isDbLockedForMode(_collection->_database->name(), MODE_X));
-
- const std::string indexName = oldDesc->indexName();
+ std::string indexName = oldDesc->indexName();
invariant( _collection->getCatalogEntry()->isIndexReady( txn, indexName ) );
// Notify other users of the IndexCatalog that we're about to invalidate 'oldDesc'.
diff --git a/src/mongo/db/commands/create_indexes.cpp b/src/mongo/db/commands/create_indexes.cpp
index 9c9bb8905d0..67cd6bdff33 100644
--- a/src/mongo/db/commands/create_indexes.cpp
+++ b/src/mongo/db/commands/create_indexes.cpp
@@ -247,8 +247,7 @@ namespace mongo {
static Status checkUniqueIndexConstraints(OperationContext* txn,
const StringData& ns,
const BSONObj& newIdxKey) {
-
- invariant(txn->lockState()->isCollectionLockedForMode(ns, MODE_X));
+ txn->lockState()->assertWriteLocked( ns );
if ( shardingState.enabled() ) {
CollectionMetadataPtr metadata(
diff --git a/src/mongo/db/commands/write_commands/batch_executor.cpp b/src/mongo/db/commands/write_commands/batch_executor.cpp
index 760696862f2..1be8e88f502 100644
--- a/src/mongo/db/commands/write_commands/batch_executor.cpp
+++ b/src/mongo/db/commands/write_commands/batch_executor.cpp
@@ -433,7 +433,7 @@ namespace mongo {
WriteOpResult* result) {
const NamespaceString& nss = request.getTargetingNSS();
- dassert(txn->lockState()->isCollectionLockedForMode(nss.ns(), MODE_IX));
+ txn->lockState()->assertWriteLocked( nss.ns() );
ChunkVersion requestShardVersion =
request.isMetadataSet() && request.getMetadata()->isShardVersionSet() ?
@@ -486,7 +486,7 @@ namespace mongo {
WriteOpResult* result) {
const NamespaceString& nss = request.getTargetingNSS();
- dassert(txn->lockState()->isCollectionLockedForMode(nss.ns(), MODE_IX));
+ txn->lockState()->assertWriteLocked( nss.ns() );
if ( !request.isUniqueIndexRequest() )
return true;
@@ -1103,7 +1103,8 @@ namespace mongo {
WriteOpResult* result ) {
const string& insertNS = collection->ns().ns();
- invariant(txn->lockState()->isCollectionLockedForMode(insertNS, MODE_IX));
+
+ txn->lockState()->assertWriteLocked( insertNS );
WriteUnitOfWork wunit(txn);
StatusWith<RecordId> status = collection->insertDocument( txn, docToInsert, true );
diff --git a/src/mongo/db/concurrency/d_concurrency_test.cpp b/src/mongo/db/concurrency/d_concurrency_test.cpp
index b874a1c1b4d..cdc28b34d25 100644
--- a/src/mongo/db/concurrency/d_concurrency_test.cpp
+++ b/src/mongo/db/concurrency/d_concurrency_test.cpp
@@ -176,7 +176,7 @@ namespace mongo {
Lock::DBLock r1(&ls, "db1", MODE_X);
Lock::DBLock r2(&ls, "db1", MODE_X);
- ASSERT(ls.isDbLockedForMode("db1", MODE_X));
+ ASSERT(ls.isWriteLocked("db1"));
}
TEST(DConcurrency, MultipleConflictingDBLocksOnSameThread) {
@@ -185,8 +185,7 @@ namespace mongo {
Lock::DBLock r1(&ls, "db1", MODE_X);
Lock::DBLock r2(&ls, "db1", MODE_S);
- ASSERT(ls.isDbLockedForMode("db1", MODE_X));
- ASSERT(ls.isDbLockedForMode("db1", MODE_S));
+ ASSERT(ls.isWriteLocked("db1"));
}
TEST(DConcurrency, IsDbLockedForSMode) {
diff --git a/src/mongo/db/concurrency/lock_state.cpp b/src/mongo/db/concurrency/lock_state.cpp
index 87eedcc1bda..a80a2a966a4 100644
--- a/src/mongo/db/concurrency/lock_state.cpp
+++ b/src/mongo/db/concurrency/lock_state.cpp
@@ -161,6 +161,11 @@ namespace {
}
template<bool IsForMMAPV1>
+ bool LockerImpl<IsForMMAPV1>::hasAnyReadLock() const {
+ return isLockHeldForMode(resourceIdGlobal, MODE_IS);
+ }
+
+ template<bool IsForMMAPV1>
bool LockerImpl<IsForMMAPV1>::isLocked() const {
return getLockMode(resourceIdGlobal) != MODE_NONE;
}
@@ -171,8 +176,24 @@ namespace {
}
template<bool IsForMMAPV1>
- bool LockerImpl<IsForMMAPV1>::isReadLocked() const {
- return isLockHeldForMode(resourceIdGlobal, MODE_IS);
+ bool LockerImpl<IsForMMAPV1>::isWriteLocked(const StringData& ns) const {
+ if (isWriteLocked()) {
+ return true;
+ }
+
+ const StringData db = nsToDatabaseSubstring(ns);
+ const ResourceId resIdNs(RESOURCE_DATABASE, db);
+
+ return isLockHeldForMode(resIdNs, MODE_X);
+ }
+
+ template<bool IsForMMAPV1>
+ void LockerImpl<IsForMMAPV1>::assertWriteLocked(const StringData& ns) const {
+ if (!isWriteLocked(ns)) {
+ dump();
+ msgasserted(
+ 16105, mongoutils::str::stream() << "expected to be write locked for " << ns);
+ }
}
template<bool IsForMMAPV1>
diff --git a/src/mongo/db/concurrency/lock_state.h b/src/mongo/db/concurrency/lock_state.h
index 8918d5b3ee3..d034b158e8b 100644
--- a/src/mongo/db/concurrency/lock_state.h
+++ b/src/mongo/db/concurrency/lock_state.h
@@ -213,10 +213,13 @@ namespace mongo {
virtual bool isW() const;
virtual bool isR() const;
+ virtual bool hasAnyReadLock() const;
virtual bool isLocked() const;
virtual bool isWriteLocked() const;
- virtual bool isReadLocked() const;
+ virtual bool isWriteLocked(const StringData& ns) const;
+
+ virtual void assertWriteLocked(const StringData& ns) const;
virtual bool hasLockPending() const { return getWaitingResource().isValid() || _lockPendingParallelWriter; }
diff --git a/src/mongo/db/concurrency/locker.h b/src/mongo/db/concurrency/locker.h
index 352ab67328c..fff17db48ef 100644
--- a/src/mongo/db/concurrency/locker.h
+++ b/src/mongo/db/concurrency/locker.h
@@ -253,10 +253,13 @@ namespace mongo {
virtual bool isW() const = 0;
virtual bool isR() const = 0;
+ virtual bool hasAnyReadLock() const = 0; // explicitly r or R
virtual bool isLocked() const = 0;
virtual bool isWriteLocked() const = 0;
- virtual bool isReadLocked() const = 0;
+ virtual bool isWriteLocked(const StringData& ns) const = 0;
+
+ virtual void assertWriteLocked(const StringData& ns) const = 0;
/**
* Pending means we are currently trying to get a lock (could be the parallel batch writer
diff --git a/src/mongo/db/instance.cpp b/src/mongo/db/instance.cpp
index 51464fa184d..8ba0cfbe52e 100644
--- a/src/mongo/db/instance.cpp
+++ b/src/mongo/db/instance.cpp
@@ -490,7 +490,7 @@ namespace {
if ( currentOp.shouldDBProfile( debug.executionTime ) ) {
// performance profiling is on
- if (txn->lockState()->isReadLocked()) {
+ if (txn->lockState()->hasAnyReadLock()) {
MONGO_LOG_COMPONENT(1, logComponentForOp(op))
<< "note: not profiling because recursive read lock" << endl;
}
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 0c928943a4a..ea1259519b1 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -579,7 +579,7 @@ namespace repl {
bool valueB = fieldB.booleanSafe();
- invariant(txn->lockState()->isCollectionLockedForMode(ns, MODE_X));
+ txn->lockState()->assertWriteLocked(ns);
Collection* collection = db->getCollection( txn, ns );
IndexCatalog* indexCatalog = collection == NULL ? NULL : collection->getIndexCatalog();
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_details.cpp b/src/mongo/db/storage/mmap_v1/catalog/namespace_details.cpp
index 825178ea8c4..5df604dca16 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace_details.cpp
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_details.cpp
@@ -53,6 +53,9 @@
namespace mongo {
+
+ BSONObj idKeyPattern = fromjson("{\"_id\":1}");
+
NamespaceDetails::NamespaceDetails( const DiskLoc &loc, bool capped ) {
BOOST_STATIC_ASSERT( sizeof(NamespaceDetails::Extra) <= sizeof(NamespaceDetails) );
@@ -88,10 +91,7 @@ namespace mongo {
const StringData& ns,
NamespaceIndex& ni,
int nindexessofar) {
-
- // Namespace details must always be changed under an exclusive DB lock
- const NamespaceString nss(ns);
- invariant(txn->lockState()->isDbLockedForMode(nss.db(), MODE_X));
+ txn->lockState()->assertWriteLocked(ns);
int i = (nindexessofar - NIndexesBase) / NIndexesExtra;
verify( i >= 0 && i <= 1 );
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_index.cpp b/src/mongo/db/storage/mmap_v1/catalog/namespace_index.cpp
index 97b8a9d74ce..02c7c04122a 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace_index.cpp
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_index.cpp
@@ -69,27 +69,19 @@ namespace mongo {
}
void NamespaceIndex::add_ns( OperationContext* txn,
- const Namespace& ns,
- const NamespaceDetails* details ) {
-
- const NamespaceString nss(ns.toString());
- invariant(txn->lockState()->isDbLockedForMode(nss.db(), MODE_X));
-
- massert(17315, "no . in ns", nsIsFull(nss.toString()));
-
+ const Namespace& ns, const NamespaceDetails* details ) {
+ string nsString = ns.toString();
+ txn->lockState()->assertWriteLocked( nsString );
+ massert( 17315, "no . in ns", nsString.find( '.' ) != string::npos );
init( txn );
uassert( 10081, "too many namespaces/collections", _ht->put(txn, ns, *details));
}
void NamespaceIndex::kill_ns( OperationContext* txn, const StringData& ns) {
- const NamespaceString nss(ns.toString());
- invariant(txn->lockState()->isDbLockedForMode(nss.db(), MODE_X));
-
- if (!_ht.get()) {
+ txn->lockState()->assertWriteLocked(ns);
+ if ( !_ht.get() )
return;
- }
-
- const Namespace n(ns);
+ Namespace n(ns);
_ht->kill(txn, n);
if (ns.size() <= Namespace::MaxNsColletionLen) {
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp
index 0fe029516f5..405aafa6994 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp
@@ -227,7 +227,7 @@ namespace mongo {
}
Status MMAPV1DatabaseCatalogEntry::dropCollection(OperationContext* txn, const StringData& ns) {
- invariant(txn->lockState()->isCollectionLockedForMode(ns, MODE_X));
+ invariant(txn->lockState()->isWriteLocked(ns));
_removeFromCache(txn->recoveryUnit(), ns);
NamespaceDetails* details = _namespaceIndex.details( ns );
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp
index a579723a964..e14a5f7b4c8 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp
@@ -190,9 +190,7 @@ namespace mongo {
int sizeNeeded,
bool preallocateNextFile) {
- // Database must be stable and we need to be in some sort of an update operation in order
- // to add a new file.
- invariant(txn->lockState()->isDbLockedForMode(_dbname, MODE_IX));
+ invariant(txn->lockState()->isWriteLocked(_dbname));
const int allocFileId = _files.size();
diff --git a/src/mongo/dbtests/documentsourcetests.cpp b/src/mongo/dbtests/documentsourcetests.cpp
index 67476ff42b2..4e5a44742a9 100644
--- a/src/mongo/dbtests/documentsourcetests.cpp
+++ b/src/mongo/dbtests/documentsourcetests.cpp
@@ -205,11 +205,11 @@ namespace DocumentSourceTests {
void run() {
createSource();
// The DocumentSourceCursor doesn't hold a read lock.
- ASSERT( !_opCtx.lockState()->isReadLocked() );
+ ASSERT( !_opCtx.lockState()->hasAnyReadLock() );
// The collection is empty, so the source produces no results.
ASSERT( !source()->getNext() );
// Exhausting the source releases the read lock.
- ASSERT( !_opCtx.lockState()->isReadLocked() );
+ ASSERT( !_opCtx.lockState()->hasAnyReadLock() );
}
};
@@ -220,7 +220,7 @@ namespace DocumentSourceTests {
client.insert( ns, BSON( "a" << 1 ) );
createSource();
// The DocumentSourceCursor doesn't hold a read lock.
- ASSERT( !_opCtx.lockState()->isReadLocked() );
+ ASSERT( !_opCtx.lockState()->hasAnyReadLock() );
// The cursor will produce the expected result.
boost::optional<Document> next = source()->getNext();
ASSERT(bool(next));
@@ -228,7 +228,7 @@ namespace DocumentSourceTests {
// There are no more results.
ASSERT( !source()->getNext() );
// Exhausting the source releases the read lock.
- ASSERT( !_opCtx.lockState()->isReadLocked() );
+ ASSERT( !_opCtx.lockState()->hasAnyReadLock() );
}
};
@@ -238,10 +238,10 @@ namespace DocumentSourceTests {
void run() {
createSource();
// The DocumentSourceCursor doesn't hold a read lock.
- ASSERT( !_opCtx.lockState()->isReadLocked() );
+ ASSERT( !_opCtx.lockState()->hasAnyReadLock() );
source()->dispose();
// Releasing the cursor releases the read lock.
- ASSERT( !_opCtx.lockState()->isReadLocked() );
+ ASSERT( !_opCtx.lockState()->hasAnyReadLock() );
// The source is marked as exhausted.
ASSERT( !source()->getNext() );
}
@@ -264,10 +264,10 @@ namespace DocumentSourceTests {
ASSERT(bool(next));
ASSERT_EQUALS(Value(2), next->getField("a"));
// The DocumentSourceCursor doesn't hold a read lock.
- ASSERT( !_opCtx.lockState()->isReadLocked() );
+ ASSERT( !_opCtx.lockState()->hasAnyReadLock() );
source()->dispose();
// Disposing of the source releases the lock.
- ASSERT( !_opCtx.lockState()->isReadLocked() );
+ ASSERT( !_opCtx.lockState()->hasAnyReadLock() );
// The source cannot be advanced further.
ASSERT( !source()->getNext() );
}
@@ -356,7 +356,7 @@ namespace DocumentSourceTests {
client.insert( ns, BSON( "a" << 2 ) );
createSource();
// The DocumentSourceCursor doesn't hold a read lock.
- ASSERT( !_opCtx.lockState()->isReadLocked() );
+ ASSERT( !_opCtx.lockState()->hasAnyReadLock() );
createLimit( 1 );
limit()->setSource( source() );
// The limit's result is as expected.
@@ -366,7 +366,7 @@ namespace DocumentSourceTests {
// The limit is exhausted.
ASSERT( !limit()->getNext() );
// The limit disposes the source, releasing the read lock.
- ASSERT( !_opCtx.lockState()->isReadLocked() );
+ ASSERT( !_opCtx.lockState()->hasAnyReadLock() );
}
};
@@ -395,7 +395,7 @@ namespace DocumentSourceTests {
ASSERT( !limit()->getNext() );
// The limit disposes the match, which disposes the source and releases the read
// lock.
- ASSERT( !_opCtx.lockState()->isReadLocked() );
+ ASSERT( !_opCtx.lockState()->hasAnyReadLock() );
}
};
diff --git a/src/mongo/dbtests/threadedtests.cpp b/src/mongo/dbtests/threadedtests.cpp
index 2a708b4bfe9..fe6e3945e53 100644
--- a/src/mongo/dbtests/threadedtests.cpp
+++ b/src/mongo/dbtests/threadedtests.cpp
@@ -125,7 +125,7 @@ namespace ThreadedTests {
}
else if( i % 7 == 1 ) {
Lock::GlobalRead r(txn.lockState());
- ASSERT(txn.lockState()->isReadLocked());
+ ASSERT(txn.lockState()->hasAnyReadLock());
}
else if( i % 7 == 4 &&
tnumber == 1 /*only one upgrader legal*/ ) {
diff --git a/src/mongo/s/d_state.cpp b/src/mongo/s/d_state.cpp
index e45afddb60e..4addb2b72e1 100644
--- a/src/mongo/s/d_state.cpp
+++ b/src/mongo/s/d_state.cpp
@@ -198,7 +198,7 @@ namespace mongo {
const BSONObj& max,
ChunkVersion version) {
- invariant(txn->lockState()->isCollectionLockedForMode(ns, MODE_X));
+ txn->lockState()->assertWriteLocked( ns );
scoped_lock lk( _mutex );
CollectionMetadataMap::const_iterator it = _collMetadata.find( ns );
@@ -227,8 +227,8 @@ namespace mongo {
void ShardingState::undoDonateChunk(OperationContext* txn,
const string& ns,
CollectionMetadataPtr prevMetadata) {
-
- invariant(txn->lockState()->isCollectionLockedForMode(ns, MODE_X));
+
+ txn->lockState()->assertWriteLocked( ns );
scoped_lock lk( _mutex );
log() << "ShardingState::undoDonateChunk acquired _mutex" << endl;
@@ -245,7 +245,7 @@ namespace mongo {
const OID& epoch,
string* errMsg ) {
- invariant(txn->lockState()->isCollectionLockedForMode(ns, MODE_X));
+ txn->lockState()->assertWriteLocked( ns );
scoped_lock lk( _mutex );
CollectionMetadataMap::const_iterator it = _collMetadata.find( ns );
@@ -290,7 +290,7 @@ namespace mongo {
const OID& epoch,
string* errMsg ) {
- invariant(txn->lockState()->isCollectionLockedForMode(ns, MODE_X));
+ txn->lockState()->assertWriteLocked( ns );
scoped_lock lk( _mutex );
CollectionMetadataMap::const_iterator it = _collMetadata.find( ns );
@@ -335,7 +335,7 @@ namespace mongo {
const vector<BSONObj>& splitKeys,
ChunkVersion version ) {
- invariant(txn->lockState()->isCollectionLockedForMode(ns, MODE_X));
+ txn->lockState()->assertWriteLocked( ns );
scoped_lock lk( _mutex );
CollectionMetadataMap::const_iterator it = _collMetadata.find( ns );
@@ -359,7 +359,7 @@ namespace mongo {
const BSONObj& maxKey,
ChunkVersion mergedVersion ) {
- invariant(txn->lockState()->isCollectionLockedForMode(ns, MODE_X));
+ txn->lockState()->assertWriteLocked( ns );
scoped_lock lk( _mutex );
CollectionMetadataMap::const_iterator it = _collMetadata.find( ns );