summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/mongo/db/concurrency/lock_state.cpp124
-rw-r--r--src/mongo/db/concurrency/lock_state.h85
-rw-r--r--src/mongo/dbtests/framework.cpp4
3 files changed, 92 insertions, 121 deletions
diff --git a/src/mongo/db/concurrency/lock_state.cpp b/src/mongo/db/concurrency/lock_state.cpp
index 233e1f2f78e..3fc6602f178 100644
--- a/src/mongo/db/concurrency/lock_state.cpp
+++ b/src/mongo/db/concurrency/lock_state.cpp
@@ -43,10 +43,8 @@ namespace mongo {
// Dispenses unique Locker instance identifiers
AtomicUInt64 idCounter(0);
- // Global lock manager instance. We have a pointer an an instance so that they can be
- // changed and restored for unit-tests.
- LockManager globalLockManagerInstance;
- LockManager* globalLockManagerPtr = &globalLockManagerInstance;
+ // Global lock manager instance.
+ LockManager globalLockManager;
// Global lock. Every server operation, which uses the Locker must acquire this lock at
// least once. See comments in the header file (begin/endTransaction) for more information
@@ -367,13 +365,13 @@ namespace mongo {
return globalLockResult;
}
- // Obey the requested timeout
- const unsigned elapsedTimeMs = timer.millis();
- const unsigned remainingTimeMs =
- elapsedTimeMs < timeoutMs ? (timeoutMs - elapsedTimeMs) : 0;
-
+ // Special-handling for MMAP V1 concurrency control
if (request == NULL) {
- // Special-handling for MMAP V1.
+ // Obey the requested timeout
+ const unsigned elapsedTimeMs = timer.millis();
+ const unsigned remainingTimeMs =
+ elapsedTimeMs < timeoutMs ? (timeoutMs - elapsedTimeMs) : 0;
+
LockResult flushLockResult =
lock(resourceIdMMAPV1Flush, getLockMode(resourceIdGlobal), remainingTimeMs);
@@ -402,8 +400,8 @@ namespace mongo {
invariant(flushLockRequest->mode == MODE_X);
invariant(flushLockRequest->recursiveCount == 1);
- globalLockManagerPtr->downgrade(globalLockRequest, MODE_S);
- globalLockManagerPtr->downgrade(flushLockRequest, MODE_S);
+ globalLockManager.downgrade(globalLockRequest, MODE_S);
+ globalLockManager.downgrade(flushLockRequest, MODE_S);
}
bool LockerImpl::unlockAll() {
@@ -466,18 +464,15 @@ namespace mongo {
LockResult LockerImpl::lock(const ResourceId& resId, LockMode mode, unsigned timeoutMs) {
_notify.clear();
- _lock.lock();
LockRequest* request = _find(resId);
+
+ _lock.lock();
if (request == NULL) {
request = new LockRequest();
request->initNew(this, &_notify);
_requests.insert(LockRequestsPair(resId, request));
}
- else {
- invariant(request->recursiveCount > 0);
- request->notify = &_notify;
- }
_lock.unlock();
// Methods on the Locker class are always called single-threadly, so it is safe to release
@@ -485,29 +480,29 @@ namespace mongo {
// state of the request is deadlock detection, which however would synchronize on the
// LockManager calls.
- LockResult result = globalLockManagerPtr->lock(resId, request, mode);
+ LockResult result = globalLockManager.lock(resId, request, mode);
if (result == LOCK_WAITING) {
- // Under MMAP V1 engine a deadlock can occur if a thread goes to sleep waiting on DB
- // lock, while holding the flush lock, so it has to be released. This is only correct
- // to do if not in a write unit of work.
- bool unlockedFlushLock = false;
-
- if (!inAWriteUnitOfWork() &&
- (resId != resourceIdGlobal) &&
- (resId != resourceIdMMAPV1Flush) &&
- (resId != resourceIdLocalDB)) {
-
- invariant(unlock(resourceIdMMAPV1Flush));
+ // Under MMAP V1 engine a deadlock can occur if a thread goes to sleep waiting on DB
+ // lock, while holding the flush lock, so it has to be released. This is only correct
+ // to do if not in a write unit of work.
+ bool unlockedFlushLock = false;
+
+ if (!inAWriteUnitOfWork() &&
+ (resId != resourceIdGlobal) &&
+ (resId != resourceIdMMAPV1Flush) &&
+ (resId != resourceIdLocalDB)) {
+
+ invariant(unlock(resourceIdMMAPV1Flush));
unlockedFlushLock = true;
}
// Do the blocking outside of the flush lock (if not in a write unit of work)
result = _notify.wait(timeoutMs);
- if (unlockedFlushLock) {
- // We cannot obey the timeout here, because it is not correct to return from the
- // lock request with the flush lock released.
- invariant(LOCK_OK ==
+ if (unlockedFlushLock) {
+ // We cannot obey the timeout here, because it is not correct to return from the
+ // lock request with the flush lock released.
+ invariant(LOCK_OK ==
lock(resourceIdMMAPV1Flush, getLockMode(resourceIdGlobal), UINT_MAX));
}
}
@@ -516,7 +511,10 @@ namespace mongo {
// Can only be LOCK_TIMEOUT, because the lock manager does not return any other errors
// at this point. Could be LOCK_DEADLOCK, when deadlock detection is implemented.
invariant(result == LOCK_TIMEOUT);
- _unlockAndUpdateRequestsList(resId, request);
+
+ if (globalLockManager.unlock(request)) {
+ _freeRequest(resId, request);
+ }
}
return result;
@@ -538,7 +536,12 @@ namespace mongo {
return false;
}
- return _unlockAndUpdateRequestsList(resId, request);
+ if (globalLockManager.unlock(request)) {
+ _freeRequest(resId, request);
+ return true;
+ }
+
+ return false;
}
LockMode LockerImpl::getLockMode(const ResourceId& resId) const {
@@ -664,11 +667,6 @@ namespace mongo {
}
}
- // Static
- void LockerImpl::dumpGlobalLockManager() {
- globalLockManagerPtr->dump();
- }
-
LockRequest* LockerImpl::_find(const ResourceId& resId) const {
LockRequestsMap::const_iterator it = _requests.find(resId);
@@ -676,26 +674,17 @@ namespace mongo {
return it->second;
}
- bool LockerImpl::_unlockAndUpdateRequestsList(const ResourceId& resId, LockRequest* request) {
- globalLockManagerPtr->unlock(request);
-
- const int recursiveCount = request->recursiveCount;
-
- if (recursiveCount == 0) {
- _lock.lock();
-
- const int numErased = _requests.erase(resId);
- invariant(numErased == 1);
+ void LockerImpl::_freeRequest(const ResourceId& resId, LockRequest* request) {
+ _lock.lock();
+ const int numErased = _requests.erase(resId);
+ _lock.unlock();
- _lock.unlock();
+ invariant(numErased == 1);
- // TODO: At some point we might want to cache a couple of these at least for the locks
- // which are acquired frequently (Global/Flush/DB) in order to reduce the number of
- // memory allocations.
- delete request;
- }
-
- return recursiveCount == 0;
+ // TODO: At some point we might want to cache a couple of these at least for the locks
+ // which are acquired frequently (Global/Flush/DB) in order to reduce the number of
+ // memory allocations.
+ delete request;
}
void LockerImpl::_yieldFlushLockForMMAPV1() {
@@ -706,16 +695,6 @@ namespace mongo {
}
}
- // Static
- void LockerImpl::changeGlobalLockManagerForTestingOnly(LockManager* newLockMgr) {
- if (newLockMgr != NULL) {
- globalLockManagerPtr = newLockMgr;
- }
- else {
- globalLockManagerPtr = &globalLockManagerInstance;
- }
- }
-
//
// Auto classes
@@ -744,4 +723,13 @@ namespace mongo {
invariant(_locker->unlock(resourceIdMMAPV1Flush));
}
+
+ //
+ // Standalone functions
+ //
+
+ LockManager* getGlobalLockManager() {
+ return &globalLockManager;
+ }
+
} // namespace mongo
diff --git a/src/mongo/db/concurrency/lock_state.h b/src/mongo/db/concurrency/lock_state.h
index 8a6c13e1f02..14bedf314df 100644
--- a/src/mongo/db/concurrency/lock_state.h
+++ b/src/mongo/db/concurrency/lock_state.h
@@ -114,55 +114,20 @@ namespace mongo {
virtual void restoreLockState(const LockSnapshot& stateToRestore);
- /**
- * Dumps all locks, on the global lock manager to the log for debugging purposes.
- */
- static void dumpGlobalLockManager();
-
-
- //
- // Methods used for unit-testing only
- //
-
- /**
- * Used for testing purposes only - changes the global lock manager. Doesn't delete the
- * previous instance, so make sure that it doesn't leak.
- *
- * @param newLockMgr New lock manager to be used. If NULL is passed, the original lock
- * manager is restored.
- */
- static void changeGlobalLockManagerForTestingOnly(LockManager* newLockMgr);
-
private:
+ typedef unordered_map<ResourceId, LockRequest*> LockRequestsMap;
+ typedef LockRequestsMap::value_type LockRequestsPair;
+
/**
* Shortcut to do the lookup in _requests. Must be called with the spinlock acquired.
*/
LockRequest* _find(const ResourceId& resId) const;
- bool _unlockAndUpdateRequestsList(const ResourceId& resId, LockRequest* request);
-
- // BEGIN MMAP V1 SPECIFIC
- //
-
- // These methods, along with the resourceIdMMAPV1Flush lock, implement the MMAP V1 storage
- // engine durability system synchronization. This is the way it works:
- //
- // Every operation, which starts calls lockGlobal, which acquires the global and flush
- // locks in the appropriate mode (IS for read operations, IX for write operations). Having
- // the flush lock in these modes indicates that there is an active reader/write
- // respectively.
- //
- // Whenever the flush thread (dur.cpp) activates, it goes through the following steps:
- // - Acquires the flush lock in X-mode (by creating a stack instance of
- // AutoAcquireFlushLockForMMAPV1Commit). This waits till all activity on the system
- // completes and does not allow new operations to start.
- //
- // This works, because as long as an operation is not in a write transaction
- // (beginWriteUnitOfWork has not been called), occasionally, on each lock acquisition
- // point, the locker will yield the flush lock and then acquire it again, so that the flush
- // thread can take turn. This is safe to do outside of a write transaction, because there
- // are no partially written changes.
+ /**
+ * Removes the specified lock request from the resources list and deallocates it.
+ */
+ void _freeRequest(const ResourceId& resId, LockRequest* request);
/**
* Temporarily yields the flush lock, if not in a write unit of work so that the commit
@@ -171,12 +136,6 @@ namespace mongo {
*/
void _yieldFlushLockForMMAPV1();
- //
- // END MMAP V1 SPECIFIC
-
-
- typedef unordered_map<ResourceId, LockRequest*> LockRequestsMap;
- typedef LockRequestsMap::value_type LockRequestsPair;
const uint64_t _id;
@@ -283,9 +242,27 @@ namespace mongo {
/**
- * There should be only one instance of this class used anywhere (outside of unit-tests) and it
- * should be in dur.cpp. See the comments above, in the MMAP V1 SPECIFIC section for more
- * information on how this is used.
+ * The resourceIdMMAPV1Flush lock is used to implement the MMAP V1 storage engine durability
+ * system synchronization. This is how it works :
+ *
+ * Every server operation (OperationContext), which calls lockGlobal as the first locking
+ * action (it is illegal to acquire any other locks without calling this first). This action
+ * acquires the global and flush locks in the appropriate modes (IS for read operations, IX
+ * for write operations). Having the flush lock in one of these modes indicates to the flush
+ * thread that there is an active reader or writer.
+ *
+ * Whenever the flush thread(dur.cpp) activates, it goes through the following steps :
+ *
+ * - Acquire the flush lock in S - mode by creating a stack instance of
+ * AutoAcquireFlushLockForMMAPV1Commit. This waits till all write activity on the system
+ * completes and does not allow new write operations to start. Readers may still proceed.
+ *
+ * - Once the flush lock is granted in S - mode, the flush thread writes the journal entries
+ * to disk and applies them to the shared view. After that, it upgrades the S - lock to X
+ * and remaps the private view.
+ *
+ * NOTE: There should be only one usage of this class and this should be in dur.cpp.
+ *
*/
class AutoAcquireFlushLockForMMAPV1Commit {
public:
@@ -296,4 +273,10 @@ namespace mongo {
Locker* _locker;
};
+
+ /**
+ * Retrieves the global lock manager instance.
+ */
+ LockManager* getGlobalLockManager();
+
} // namespace mongo
diff --git a/src/mongo/dbtests/framework.cpp b/src/mongo/dbtests/framework.cpp
index 5c3f37421e8..800647cdea6 100644
--- a/src/mongo/dbtests/framework.cpp
+++ b/src/mongo/dbtests/framework.cpp
@@ -94,9 +94,9 @@ namespace mongo {
}
else if (minutesRunning > 1){
warning() << currentTestName << " has been running for more than " << minutesRunning-1 << " minutes." << endl;
-
+
// See what is stuck
- LockerImpl::dumpGlobalLockManager();
+ getGlobalLockManager()->dump();
}
}
}