summaryrefslogtreecommitdiff
path: root/src/mongo/db/concurrency/lock_state.cpp
diff options
context:
space:
mode:
authorMark Benvenuto <mark.benvenuto@mongodb.com>2015-06-20 00:22:50 -0400
committerMark Benvenuto <mark.benvenuto@mongodb.com>2015-06-20 10:56:02 -0400
commit9c2ed42daa8fbbef4a919c21ec564e2db55e8d60 (patch)
tree3814f79c10d7b490948d8cb7b112ac1dd41ceff1 /src/mongo/db/concurrency/lock_state.cpp
parent01965cf52bce6976637ecb8f4a622aeb05ab256a (diff)
downloadmongo-9c2ed42daa8fbbef4a919c21ec564e2db55e8d60.tar.gz
SERVER-18579: Clang-Format - reformat code, no comment reflow
Diffstat (limited to 'src/mongo/db/concurrency/lock_state.cpp')
-rw-r--r--src/mongo/db/concurrency/lock_state.cpp1293
1 files changed, 643 insertions, 650 deletions
diff --git a/src/mongo/db/concurrency/lock_state.cpp b/src/mongo/db/concurrency/lock_state.cpp
index 6c12a8ae1b1..37ca3c7f611 100644
--- a/src/mongo/db/concurrency/lock_state.cpp
+++ b/src/mongo/db/concurrency/lock_state.cpp
@@ -46,104 +46,102 @@
namespace mongo {
namespace {
- /**
- * Partitioned global lock statistics, so we don't hit the same bucket.
- */
- class PartitionedInstanceWideLockStats {
- MONGO_DISALLOW_COPYING(PartitionedInstanceWideLockStats);
- public:
+/**
+ * Partitioned global lock statistics, so we don't hit the same bucket.
+ */
+class PartitionedInstanceWideLockStats {
+ MONGO_DISALLOW_COPYING(PartitionedInstanceWideLockStats);
- PartitionedInstanceWideLockStats() { }
+public:
+ PartitionedInstanceWideLockStats() {}
- void recordAcquisition(LockerId id, ResourceId resId, LockMode mode) {
- _get(id).recordAcquisition(resId, mode);
- }
+ void recordAcquisition(LockerId id, ResourceId resId, LockMode mode) {
+ _get(id).recordAcquisition(resId, mode);
+ }
- void recordWait(LockerId id, ResourceId resId, LockMode mode) {
- _get(id).recordWait(resId, mode);
- }
+ void recordWait(LockerId id, ResourceId resId, LockMode mode) {
+ _get(id).recordWait(resId, mode);
+ }
- void recordWaitTime(LockerId id, ResourceId resId, LockMode mode, uint64_t waitMicros) {
- _get(id).recordWaitTime(resId, mode, waitMicros);
- }
+ void recordWaitTime(LockerId id, ResourceId resId, LockMode mode, uint64_t waitMicros) {
+ _get(id).recordWaitTime(resId, mode, waitMicros);
+ }
- void recordDeadlock(ResourceId resId, LockMode mode) {
- _get(resId).recordDeadlock(resId, mode);
- }
+ void recordDeadlock(ResourceId resId, LockMode mode) {
+ _get(resId).recordDeadlock(resId, mode);
+ }
- void report(SingleThreadedLockStats* outStats) const {
- for (int i = 0; i < NumPartitions; i++) {
- outStats->append(_partitions[i].stats);
- }
+ void report(SingleThreadedLockStats* outStats) const {
+ for (int i = 0; i < NumPartitions; i++) {
+ outStats->append(_partitions[i].stats);
}
+ }
- void reset() {
- for (int i = 0; i < NumPartitions; i++) {
- _partitions[i].stats.reset();
- }
+ void reset() {
+ for (int i = 0; i < NumPartitions; i++) {
+ _partitions[i].stats.reset();
}
+ }
- private:
-
- // This alignment is a best effort approach to ensure that each partition falls on a
- // separate page/cache line in order to avoid false sharing.
- struct MONGO_COMPILER_ALIGN_TYPE(128) AlignedLockStats {
- AtomicLockStats stats;
- };
+private:
+ // This alignment is a best effort approach to ensure that each partition falls on a
+ // separate page/cache line in order to avoid false sharing.
+ struct MONGO_COMPILER_ALIGN_TYPE(128) AlignedLockStats {
+ AtomicLockStats stats;
+ };
- enum { NumPartitions = 8 };
+ enum { NumPartitions = 8 };
- AtomicLockStats& _get(LockerId id) {
- return _partitions[id % NumPartitions].stats;
- }
+ AtomicLockStats& _get(LockerId id) {
+ return _partitions[id % NumPartitions].stats;
+ }
- AlignedLockStats _partitions[NumPartitions];
- };
+ AlignedLockStats _partitions[NumPartitions];
+};
- // Global lock manager instance.
- LockManager globalLockManager;
+// Global lock manager instance.
+LockManager globalLockManager;
- // Global lock. Every server operation, which uses the Locker must acquire this lock at least
- // once. See comments in the header file (begin/endTransaction) for more information.
- const ResourceId resourceIdGlobal = ResourceId(RESOURCE_GLOBAL,
- ResourceId::SINGLETON_GLOBAL);
+// Global lock. Every server operation, which uses the Locker must acquire this lock at least
+// once. See comments in the header file (begin/endTransaction) for more information.
+const ResourceId resourceIdGlobal = ResourceId(RESOURCE_GLOBAL, ResourceId::SINGLETON_GLOBAL);
- // Flush lock. This is only used for the MMAP V1 storage engine and synchronizes journal writes
- // to the shared view and remaps. See the comments in the header for information on how MMAP V1
- // concurrency control works.
- const ResourceId resourceIdMMAPV1Flush = ResourceId(RESOURCE_MMAPV1_FLUSH,
- ResourceId::SINGLETON_MMAPV1_FLUSH);
+// Flush lock. This is only used for the MMAP V1 storage engine and synchronizes journal writes
+// to the shared view and remaps. See the comments in the header for information on how MMAP V1
+// concurrency control works.
+const ResourceId resourceIdMMAPV1Flush =
+ ResourceId(RESOURCE_MMAPV1_FLUSH, ResourceId::SINGLETON_MMAPV1_FLUSH);
- // How often (in millis) to check for deadlock if a lock has not been granted for some time
- const unsigned DeadlockTimeoutMs = 500;
+// How often (in millis) to check for deadlock if a lock has not been granted for some time
+const unsigned DeadlockTimeoutMs = 500;
- // Dispenses unique LockerId identifiers
- AtomicUInt64 idCounter(0);
+// Dispenses unique LockerId identifiers
+AtomicUInt64 idCounter(0);
- // Partitioned global lock statistics, so we don't hit the same bucket
- PartitionedInstanceWideLockStats globalStats;
+// Partitioned global lock statistics, so we don't hit the same bucket
+PartitionedInstanceWideLockStats globalStats;
- /**
- * Whether the particular lock's release should be held until the end of the operation. We
- * delay release of exclusive locks (locks that are for write operations) in order to ensure
- * that the data they protect is committed successfully.
- */
- bool shouldDelayUnlock(ResourceId resId, LockMode mode) {
- // Global and flush lock are not used to protect transactional resources and as such, they
- // need to be acquired and released when requested.
- if (resId.getType() == RESOURCE_GLOBAL) {
- return false;
- }
+/**
+ * Whether the particular lock's release should be held until the end of the operation. We
+ * delay release of exclusive locks (locks that are for write operations) in order to ensure
+ * that the data they protect is committed successfully.
+ */
+bool shouldDelayUnlock(ResourceId resId, LockMode mode) {
+ // Global and flush lock are not used to protect transactional resources and as such, they
+ // need to be acquired and released when requested.
+ if (resId.getType() == RESOURCE_GLOBAL) {
+ return false;
+ }
- if (resId == resourceIdMMAPV1Flush) {
- return false;
- }
+ if (resId == resourceIdMMAPV1Flush) {
+ return false;
+ }
- switch (mode) {
+ switch (mode) {
case MODE_X:
case MODE_IX:
return true;
@@ -154,614 +152,612 @@ namespace {
default:
invariant(false);
- }
}
+}
-} // namespace
+} // namespace
- template<bool IsForMMAPV1>
- bool LockerImpl<IsForMMAPV1>::isW() const {
- return getLockMode(resourceIdGlobal) == MODE_X;
- }
+template <bool IsForMMAPV1>
+bool LockerImpl<IsForMMAPV1>::isW() const {
+ return getLockMode(resourceIdGlobal) == MODE_X;
+}
- template<bool IsForMMAPV1>
- bool LockerImpl<IsForMMAPV1>::isR() const {
- return getLockMode(resourceIdGlobal) == MODE_S;
- }
+template <bool IsForMMAPV1>
+bool LockerImpl<IsForMMAPV1>::isR() const {
+ return getLockMode(resourceIdGlobal) == MODE_S;
+}
- template<bool IsForMMAPV1>
- bool LockerImpl<IsForMMAPV1>::isLocked() const {
- return getLockMode(resourceIdGlobal) != MODE_NONE;
- }
+template <bool IsForMMAPV1>
+bool LockerImpl<IsForMMAPV1>::isLocked() const {
+ return getLockMode(resourceIdGlobal) != MODE_NONE;
+}
- template<bool IsForMMAPV1>
- bool LockerImpl<IsForMMAPV1>::isWriteLocked() const {
- return isLockHeldForMode(resourceIdGlobal, MODE_IX);
- }
+template <bool IsForMMAPV1>
+bool LockerImpl<IsForMMAPV1>::isWriteLocked() const {
+ return isLockHeldForMode(resourceIdGlobal, MODE_IX);
+}
- template<bool IsForMMAPV1>
- bool LockerImpl<IsForMMAPV1>::isReadLocked() const {
- return isLockHeldForMode(resourceIdGlobal, MODE_IS);
- }
+template <bool IsForMMAPV1>
+bool LockerImpl<IsForMMAPV1>::isReadLocked() const {
+ return isLockHeldForMode(resourceIdGlobal, MODE_IS);
+}
- template<bool IsForMMAPV1>
- void LockerImpl<IsForMMAPV1>::assertEmptyAndReset() {
- invariant(!inAWriteUnitOfWork());
- invariant(_resourcesToUnlockAtEndOfUnitOfWork.empty());
- invariant(_requests.empty());
+template <bool IsForMMAPV1>
+void LockerImpl<IsForMMAPV1>::assertEmptyAndReset() {
+ invariant(!inAWriteUnitOfWork());
+ invariant(_resourcesToUnlockAtEndOfUnitOfWork.empty());
+ invariant(_requests.empty());
- // Reset the locking statistics so the object can be reused
- _stats.reset();
- }
+ // Reset the locking statistics so the object can be reused
+ _stats.reset();
+}
- template<bool IsForMMAPV1>
- void LockerImpl<IsForMMAPV1>::dump() const {
- StringBuilder ss;
- ss << "Locker id " << _id << " status: ";
+template <bool IsForMMAPV1>
+void LockerImpl<IsForMMAPV1>::dump() const {
+ StringBuilder ss;
+ ss << "Locker id " << _id << " status: ";
- _lock.lock();
- LockRequestsMap::ConstIterator it = _requests.begin();
- while (!it.finished()) {
- ss << it.key().toString() << " "
- << lockRequestStatusName(it->status) << " in "
- << modeName(it->mode) << "; ";
- it.next();
- }
- _lock.unlock();
-
- log() << ss.str() << std::endl;
+ _lock.lock();
+ LockRequestsMap::ConstIterator it = _requests.begin();
+ while (!it.finished()) {
+ ss << it.key().toString() << " " << lockRequestStatusName(it->status) << " in "
+ << modeName(it->mode) << "; ";
+ it.next();
}
+ _lock.unlock();
+ log() << ss.str() << std::endl;
+}
- //
- // CondVarLockGrantNotification
- //
- CondVarLockGrantNotification::CondVarLockGrantNotification() {
- clear();
- }
+//
+// CondVarLockGrantNotification
+//
- void CondVarLockGrantNotification::clear() {
- _result = LOCK_INVALID;
- }
+CondVarLockGrantNotification::CondVarLockGrantNotification() {
+ clear();
+}
- LockResult CondVarLockGrantNotification::wait(unsigned timeoutMs) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
- while (_result == LOCK_INVALID) {
- if (boost::cv_status::timeout == _cond.wait_for(lock, Milliseconds(timeoutMs))) {
- // Timeout
- return LOCK_TIMEOUT;
- }
- }
+void CondVarLockGrantNotification::clear() {
+ _result = LOCK_INVALID;
+}
- return _result;
+LockResult CondVarLockGrantNotification::wait(unsigned timeoutMs) {
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
+ while (_result == LOCK_INVALID) {
+ if (boost::cv_status::timeout == _cond.wait_for(lock, Milliseconds(timeoutMs))) {
+ // Timeout
+ return LOCK_TIMEOUT;
+ }
}
- void CondVarLockGrantNotification::notify(ResourceId resId, LockResult result) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
- invariant(_result == LOCK_INVALID);
- _result = result;
+ return _result;
+}
- _cond.notify_all();
- }
+void CondVarLockGrantNotification::notify(ResourceId resId, LockResult result) {
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
+ invariant(_result == LOCK_INVALID);
+ _result = result;
+ _cond.notify_all();
+}
- //
- // Locker
- //
- template<bool IsForMMAPV1>
- LockerImpl<IsForMMAPV1>::LockerImpl()
- : _id(idCounter.addAndFetch(1)),
- _requestStartTime(0),
- _wuowNestingLevel(0),
- _batchWriter(false) {
- }
+//
+// Locker
+//
- template<bool IsForMMAPV1>
- LockerImpl<IsForMMAPV1>::~LockerImpl() {
- // Cannot delete the Locker while there are still outstanding requests, because the
- // LockManager may attempt to access deleted memory. Besides it is probably incorrect
- // to delete with unaccounted locks anyways.
- assertEmptyAndReset();
- }
+template <bool IsForMMAPV1>
+LockerImpl<IsForMMAPV1>::LockerImpl()
+ : _id(idCounter.addAndFetch(1)),
+ _requestStartTime(0),
+ _wuowNestingLevel(0),
+ _batchWriter(false) {}
- template<bool IsForMMAPV1>
- LockResult LockerImpl<IsForMMAPV1>::lockGlobal(LockMode mode, unsigned timeoutMs) {
- LockResult result = lockGlobalBegin(mode);
- if (result == LOCK_WAITING) {
- result = lockGlobalComplete(timeoutMs);
- }
+template <bool IsForMMAPV1>
+LockerImpl<IsForMMAPV1>::~LockerImpl() {
+ // Cannot delete the Locker while there are still outstanding requests, because the
+ // LockManager may attempt to access deleted memory. Besides it is probably incorrect
+ // to delete with unaccounted locks anyways.
+ assertEmptyAndReset();
+}
- if (result == LOCK_OK) {
- lockMMAPV1Flush();
- }
+template <bool IsForMMAPV1>
+LockResult LockerImpl<IsForMMAPV1>::lockGlobal(LockMode mode, unsigned timeoutMs) {
+ LockResult result = lockGlobalBegin(mode);
+ if (result == LOCK_WAITING) {
+ result = lockGlobalComplete(timeoutMs);
+ }
- return result;
+ if (result == LOCK_OK) {
+ lockMMAPV1Flush();
}
- template<bool IsForMMAPV1>
- LockResult LockerImpl<IsForMMAPV1>::lockGlobalBegin(LockMode mode) {
- const LockResult result = lockBegin(resourceIdGlobal, mode);
- if (result == LOCK_OK) return LOCK_OK;
+ return result;
+}
- // Currently, deadlock detection does not happen inline with lock acquisition so the only
- // unsuccessful result that the lock manager would return is LOCK_WAITING.
- invariant(result == LOCK_WAITING);
+template <bool IsForMMAPV1>
+LockResult LockerImpl<IsForMMAPV1>::lockGlobalBegin(LockMode mode) {
+ const LockResult result = lockBegin(resourceIdGlobal, mode);
+ if (result == LOCK_OK)
+ return LOCK_OK;
- return result;
- }
+ // Currently, deadlock detection does not happen inline with lock acquisition so the only
+ // unsuccessful result that the lock manager would return is LOCK_WAITING.
+ invariant(result == LOCK_WAITING);
- template<bool IsForMMAPV1>
- LockResult LockerImpl<IsForMMAPV1>::lockGlobalComplete(unsigned timeoutMs) {
- return lockComplete(resourceIdGlobal, getLockMode(resourceIdGlobal), timeoutMs, false);
- }
+ return result;
+}
- template<bool IsForMMAPV1>
- void LockerImpl<IsForMMAPV1>::lockMMAPV1Flush() {
- if (!IsForMMAPV1) return;
+template <bool IsForMMAPV1>
+LockResult LockerImpl<IsForMMAPV1>::lockGlobalComplete(unsigned timeoutMs) {
+ return lockComplete(resourceIdGlobal, getLockMode(resourceIdGlobal), timeoutMs, false);
+}
- // The flush lock always has a reference count of 1, because it is dropped at the end of
- // each write unit of work in order to allow the flush thread to run. See the comments in
- // the header for information on how the MMAP V1 journaling system works.
- LockRequest* globalLockRequest = _requests.find(resourceIdGlobal).objAddr();
- if (globalLockRequest->recursiveCount == 1) {
- invariant(LOCK_OK == lock(resourceIdMMAPV1Flush, _getModeForMMAPV1FlushLock()));
- }
+template <bool IsForMMAPV1>
+void LockerImpl<IsForMMAPV1>::lockMMAPV1Flush() {
+ if (!IsForMMAPV1)
+ return;
- dassert(getLockMode(resourceIdMMAPV1Flush) == _getModeForMMAPV1FlushLock());
+ // The flush lock always has a reference count of 1, because it is dropped at the end of
+ // each write unit of work in order to allow the flush thread to run. See the comments in
+ // the header for information on how the MMAP V1 journaling system works.
+ LockRequest* globalLockRequest = _requests.find(resourceIdGlobal).objAddr();
+ if (globalLockRequest->recursiveCount == 1) {
+ invariant(LOCK_OK == lock(resourceIdMMAPV1Flush, _getModeForMMAPV1FlushLock()));
}
- template<bool IsForMMAPV1>
- void LockerImpl<IsForMMAPV1>::downgradeGlobalXtoSForMMAPV1() {
- invariant(!inAWriteUnitOfWork());
+ dassert(getLockMode(resourceIdMMAPV1Flush) == _getModeForMMAPV1FlushLock());
+}
- LockRequest* globalLockRequest = _requests.find(resourceIdGlobal).objAddr();
- invariant(globalLockRequest->mode == MODE_X);
- invariant(globalLockRequest->recursiveCount == 1);
+template <bool IsForMMAPV1>
+void LockerImpl<IsForMMAPV1>::downgradeGlobalXtoSForMMAPV1() {
+ invariant(!inAWriteUnitOfWork());
- // Making this call here will record lock downgrades as acquisitions, which is acceptable
- globalStats.recordAcquisition(_id, resourceIdGlobal, MODE_S);
- _stats.recordAcquisition(resourceIdGlobal, MODE_S);
+ LockRequest* globalLockRequest = _requests.find(resourceIdGlobal).objAddr();
+ invariant(globalLockRequest->mode == MODE_X);
+ invariant(globalLockRequest->recursiveCount == 1);
- globalLockManager.downgrade(globalLockRequest, MODE_S);
+ // Making this call here will record lock downgrades as acquisitions, which is acceptable
+ globalStats.recordAcquisition(_id, resourceIdGlobal, MODE_S);
+ _stats.recordAcquisition(resourceIdGlobal, MODE_S);
- if (IsForMMAPV1) {
- invariant(unlock(resourceIdMMAPV1Flush));
- }
+ globalLockManager.downgrade(globalLockRequest, MODE_S);
+
+ if (IsForMMAPV1) {
+ invariant(unlock(resourceIdMMAPV1Flush));
}
+}
- template<bool IsForMMAPV1>
- bool LockerImpl<IsForMMAPV1>::unlockAll() {
- if (!unlock(resourceIdGlobal)) {
- return false;
- }
+template <bool IsForMMAPV1>
+bool LockerImpl<IsForMMAPV1>::unlockAll() {
+ if (!unlock(resourceIdGlobal)) {
+ return false;
+ }
- LockRequestsMap::Iterator it = _requests.begin();
- while (!it.finished()) {
- // If we're here we should only have one reference to any lock. It is a programming
- // error for any lock to have more references than the global lock, because every
- // scope starts by calling lockGlobal.
- if (it.key().getType() == RESOURCE_GLOBAL) {
- it.next();
- }
- else {
- invariant(_unlockImpl(it));
- }
+ LockRequestsMap::Iterator it = _requests.begin();
+ while (!it.finished()) {
+ // If we're here we should only have one reference to any lock. It is a programming
+ // error for any lock to have more references than the global lock, because every
+ // scope starts by calling lockGlobal.
+ if (it.key().getType() == RESOURCE_GLOBAL) {
+ it.next();
+ } else {
+ invariant(_unlockImpl(it));
}
-
- return true;
}
- template<bool IsForMMAPV1>
- void LockerImpl<IsForMMAPV1>::beginWriteUnitOfWork() {
- // Sanity check that write transactions under MMAP V1 have acquired the flush lock, so we
- // don't allow partial changes to be written.
- dassert(!IsForMMAPV1 || isLockHeldForMode(resourceIdMMAPV1Flush, MODE_IX));
+ return true;
+}
- _wuowNestingLevel++;
- }
+template <bool IsForMMAPV1>
+void LockerImpl<IsForMMAPV1>::beginWriteUnitOfWork() {
+ // Sanity check that write transactions under MMAP V1 have acquired the flush lock, so we
+ // don't allow partial changes to be written.
+ dassert(!IsForMMAPV1 || isLockHeldForMode(resourceIdMMAPV1Flush, MODE_IX));
- template<bool IsForMMAPV1>
- void LockerImpl<IsForMMAPV1>::endWriteUnitOfWork() {
- invariant(_wuowNestingLevel > 0);
+ _wuowNestingLevel++;
+}
- if (--_wuowNestingLevel > 0) {
- // Don't do anything unless leaving outermost WUOW.
- return;
- }
+template <bool IsForMMAPV1>
+void LockerImpl<IsForMMAPV1>::endWriteUnitOfWork() {
+ invariant(_wuowNestingLevel > 0);
- while (!_resourcesToUnlockAtEndOfUnitOfWork.empty()) {
- unlock(_resourcesToUnlockAtEndOfUnitOfWork.front());
- _resourcesToUnlockAtEndOfUnitOfWork.pop();
- }
+ if (--_wuowNestingLevel > 0) {
+ // Don't do anything unless leaving outermost WUOW.
+ return;
+ }
- // For MMAP V1, we need to yield the flush lock so that the flush thread can run
- if (IsForMMAPV1) {
- invariant(unlock(resourceIdMMAPV1Flush));
- invariant(LOCK_OK == lock(resourceIdMMAPV1Flush, _getModeForMMAPV1FlushLock()));
- }
+ while (!_resourcesToUnlockAtEndOfUnitOfWork.empty()) {
+ unlock(_resourcesToUnlockAtEndOfUnitOfWork.front());
+ _resourcesToUnlockAtEndOfUnitOfWork.pop();
}
- template<bool IsForMMAPV1>
- LockResult LockerImpl<IsForMMAPV1>::lock(ResourceId resId,
- LockMode mode,
- unsigned timeoutMs,
- bool checkDeadlock) {
+ // For MMAP V1, we need to yield the flush lock so that the flush thread can run
+ if (IsForMMAPV1) {
+ invariant(unlock(resourceIdMMAPV1Flush));
+ invariant(LOCK_OK == lock(resourceIdMMAPV1Flush, _getModeForMMAPV1FlushLock()));
+ }
+}
- const LockResult result = lockBegin(resId, mode);
+template <bool IsForMMAPV1>
+LockResult LockerImpl<IsForMMAPV1>::lock(ResourceId resId,
+ LockMode mode,
+ unsigned timeoutMs,
+ bool checkDeadlock) {
+ const LockResult result = lockBegin(resId, mode);
- // Fast, uncontended path
- if (result == LOCK_OK) return LOCK_OK;
+ // Fast, uncontended path
+ if (result == LOCK_OK)
+ return LOCK_OK;
- // Currently, deadlock detection does not happen inline with lock acquisition so the only
- // unsuccessful result that the lock manager would return is LOCK_WAITING.
- invariant(result == LOCK_WAITING);
+ // Currently, deadlock detection does not happen inline with lock acquisition so the only
+ // unsuccessful result that the lock manager would return is LOCK_WAITING.
+ invariant(result == LOCK_WAITING);
- return lockComplete(resId, mode, timeoutMs, checkDeadlock);
- }
+ return lockComplete(resId, mode, timeoutMs, checkDeadlock);
+}
- template<bool IsForMMAPV1>
- void LockerImpl<IsForMMAPV1>::downgrade(ResourceId resId, LockMode newMode) {
- LockRequestsMap::Iterator it = _requests.find(resId);
- globalLockManager.downgrade(it.objAddr(), newMode);
- }
+template <bool IsForMMAPV1>
+void LockerImpl<IsForMMAPV1>::downgrade(ResourceId resId, LockMode newMode) {
+ LockRequestsMap::Iterator it = _requests.find(resId);
+ globalLockManager.downgrade(it.objAddr(), newMode);
+}
- template<bool IsForMMAPV1>
- bool LockerImpl<IsForMMAPV1>::unlock(ResourceId resId) {
- LockRequestsMap::Iterator it = _requests.find(resId);
- return _unlockImpl(it);
- }
+template <bool IsForMMAPV1>
+bool LockerImpl<IsForMMAPV1>::unlock(ResourceId resId) {
+ LockRequestsMap::Iterator it = _requests.find(resId);
+ return _unlockImpl(it);
+}
- template<bool IsForMMAPV1>
- LockMode LockerImpl<IsForMMAPV1>::getLockMode(ResourceId resId) const {
- scoped_spinlock scopedLock(_lock);
+template <bool IsForMMAPV1>
+LockMode LockerImpl<IsForMMAPV1>::getLockMode(ResourceId resId) const {
+ scoped_spinlock scopedLock(_lock);
- const LockRequestsMap::ConstIterator it = _requests.find(resId);
- if (!it) return MODE_NONE;
+ const LockRequestsMap::ConstIterator it = _requests.find(resId);
+ if (!it)
+ return MODE_NONE;
- return it->mode;
- }
+ return it->mode;
+}
- template<bool IsForMMAPV1>
- bool LockerImpl<IsForMMAPV1>::isLockHeldForMode(ResourceId resId, LockMode mode) const {
- return isModeCovered(mode, getLockMode(resId));
- }
+template <bool IsForMMAPV1>
+bool LockerImpl<IsForMMAPV1>::isLockHeldForMode(ResourceId resId, LockMode mode) const {
+ return isModeCovered(mode, getLockMode(resId));
+}
- template<bool IsForMMAPV1>
- bool LockerImpl<IsForMMAPV1>::isDbLockedForMode(StringData dbName,
- LockMode mode) const {
- invariant(nsIsDbOnly(dbName));
+template <bool IsForMMAPV1>
+bool LockerImpl<IsForMMAPV1>::isDbLockedForMode(StringData dbName, LockMode mode) const {
+ invariant(nsIsDbOnly(dbName));
- if (isW()) return true;
- if (isR() && isSharedLockMode(mode)) return true;
+ if (isW())
+ return true;
+ if (isR() && isSharedLockMode(mode))
+ return true;
- const ResourceId resIdDb(RESOURCE_DATABASE, dbName);
- return isLockHeldForMode(resIdDb, mode);
- }
+ const ResourceId resIdDb(RESOURCE_DATABASE, dbName);
+ return isLockHeldForMode(resIdDb, mode);
+}
- template<bool IsForMMAPV1>
- bool LockerImpl<IsForMMAPV1>::isCollectionLockedForMode(StringData ns,
- LockMode mode) const {
- invariant(nsIsFull(ns));
+template <bool IsForMMAPV1>
+bool LockerImpl<IsForMMAPV1>::isCollectionLockedForMode(StringData ns, LockMode mode) const {
+ invariant(nsIsFull(ns));
- if (isW()) return true;
- if (isR() && isSharedLockMode(mode)) return true;
+ if (isW())
+ return true;
+ if (isR() && isSharedLockMode(mode))
+ return true;
- const NamespaceString nss(ns);
- const ResourceId resIdDb(RESOURCE_DATABASE, nss.db());
+ const NamespaceString nss(ns);
+ const ResourceId resIdDb(RESOURCE_DATABASE, nss.db());
- LockMode dbMode = getLockMode(resIdDb);
+ LockMode dbMode = getLockMode(resIdDb);
- switch (dbMode) {
- case MODE_NONE: return false;
- case MODE_X: return true;
- case MODE_S: return isSharedLockMode(mode);
+ switch (dbMode) {
+ case MODE_NONE:
+ return false;
+ case MODE_X:
+ return true;
+ case MODE_S:
+ return isSharedLockMode(mode);
case MODE_IX:
- case MODE_IS:
- {
- const ResourceId resIdColl(RESOURCE_COLLECTION, ns);
- return isLockHeldForMode(resIdColl, mode);
- }
- break;
+ case MODE_IS: {
+ const ResourceId resIdColl(RESOURCE_COLLECTION, ns);
+ return isLockHeldForMode(resIdColl, mode);
+ } break;
case LockModesCount:
break;
- }
-
- invariant(false);
- return false;
}
- template<bool IsForMMAPV1>
- ResourceId LockerImpl<IsForMMAPV1>::getWaitingResource() const {
- scoped_spinlock scopedLock(_lock);
+ invariant(false);
+ return false;
+}
- LockRequestsMap::ConstIterator it = _requests.begin();
- while (!it.finished()) {
- if (it->status != LockRequest::STATUS_GRANTED) {
- return it.key();
- }
+template <bool IsForMMAPV1>
+ResourceId LockerImpl<IsForMMAPV1>::getWaitingResource() const {
+ scoped_spinlock scopedLock(_lock);
- it.next();
+ LockRequestsMap::ConstIterator it = _requests.begin();
+ while (!it.finished()) {
+ if (it->status != LockRequest::STATUS_GRANTED) {
+ return it.key();
}
- return ResourceId();
+ it.next();
}
- template<bool IsForMMAPV1>
- void LockerImpl<IsForMMAPV1>::getLockerInfo(LockerInfo* lockerInfo) const {
- invariant(lockerInfo);
+ return ResourceId();
+}
- // Zero-out the contents
- lockerInfo->locks.clear();
- lockerInfo->waitingResource = ResourceId();
- lockerInfo->stats.reset();
+template <bool IsForMMAPV1>
+void LockerImpl<IsForMMAPV1>::getLockerInfo(LockerInfo* lockerInfo) const {
+ invariant(lockerInfo);
- _lock.lock();
- LockRequestsMap::ConstIterator it = _requests.begin();
- while (!it.finished()) {
- OneLock info;
- info.resourceId = it.key();
- info.mode = it->mode;
+ // Zero-out the contents
+ lockerInfo->locks.clear();
+ lockerInfo->waitingResource = ResourceId();
+ lockerInfo->stats.reset();
- lockerInfo->locks.push_back(info);
- it.next();
- }
- _lock.unlock();
-
- std::sort(lockerInfo->locks.begin(), lockerInfo->locks.end());
+ _lock.lock();
+ LockRequestsMap::ConstIterator it = _requests.begin();
+ while (!it.finished()) {
+ OneLock info;
+ info.resourceId = it.key();
+ info.mode = it->mode;
- lockerInfo->waitingResource = getWaitingResource();
- lockerInfo->stats.append(_stats);
+ lockerInfo->locks.push_back(info);
+ it.next();
}
+ _lock.unlock();
- template<bool IsForMMAPV1>
- bool LockerImpl<IsForMMAPV1>::saveLockStateAndUnlock(Locker::LockSnapshot* stateOut) {
- // We shouldn't be saving and restoring lock state from inside a WriteUnitOfWork.
- invariant(!inAWriteUnitOfWork());
-
- // Clear out whatever is in stateOut.
- stateOut->locks.clear();
- stateOut->globalMode = MODE_NONE;
-
- // First, we look at the global lock. There is special handling for this (as the flush
- // lock goes along with it) so we store it separately from the more pedestrian locks.
- LockRequestsMap::Iterator globalRequest = _requests.find(resourceIdGlobal);
- if (!globalRequest) {
- // If there's no global lock there isn't really anything to do.
- invariant(_requests.empty());
- return false;
- }
-
- // If the global lock has been acquired more than once, we're probably somewhere in a
- // DBDirectClient call. It's not safe to release and reacquire locks -- the context using
- // the DBDirectClient is probably not prepared for lock release.
- if (globalRequest->recursiveCount > 1) {
- return false;
- }
+ std::sort(lockerInfo->locks.begin(), lockerInfo->locks.end());
- // The global lock must have been acquired just once
- stateOut->globalMode = globalRequest->mode;
- invariant(unlock(resourceIdGlobal));
+ lockerInfo->waitingResource = getWaitingResource();
+ lockerInfo->stats.append(_stats);
+}
- // Next, the non-global locks.
- for (LockRequestsMap::Iterator it = _requests.begin(); !it.finished(); it.next()) {
- const ResourceId resId = it.key();
+template <bool IsForMMAPV1>
+bool LockerImpl<IsForMMAPV1>::saveLockStateAndUnlock(Locker::LockSnapshot* stateOut) {
+ // We shouldn't be saving and restoring lock state from inside a WriteUnitOfWork.
+ invariant(!inAWriteUnitOfWork());
- // We should never have to save and restore metadata locks.
- invariant((IsForMMAPV1 && (resourceIdMMAPV1Flush == resId)) ||
- RESOURCE_DATABASE == resId.getType() ||
- RESOURCE_COLLECTION == resId.getType() ||
- (RESOURCE_GLOBAL == resId.getType() && isSharedLockMode(it->mode)));
+ // Clear out whatever is in stateOut.
+ stateOut->locks.clear();
+ stateOut->globalMode = MODE_NONE;
- // And, stuff the info into the out parameter.
- OneLock info;
- info.resourceId = resId;
- info.mode = it->mode;
+ // First, we look at the global lock. There is special handling for this (as the flush
+ // lock goes along with it) so we store it separately from the more pedestrian locks.
+ LockRequestsMap::Iterator globalRequest = _requests.find(resourceIdGlobal);
+ if (!globalRequest) {
+ // If there's no global lock there isn't really anything to do.
+ invariant(_requests.empty());
+ return false;
+ }
- stateOut->locks.push_back(info);
+ // If the global lock has been acquired more than once, we're probably somewhere in a
+ // DBDirectClient call. It's not safe to release and reacquire locks -- the context using
+ // the DBDirectClient is probably not prepared for lock release.
+ if (globalRequest->recursiveCount > 1) {
+ return false;
+ }
- invariant(unlock(resId));
- }
+ // The global lock must have been acquired just once
+ stateOut->globalMode = globalRequest->mode;
+ invariant(unlock(resourceIdGlobal));
- // Sort locks by ResourceId. They'll later be acquired in this canonical locking order.
- std::sort(stateOut->locks.begin(), stateOut->locks.end());
+ // Next, the non-global locks.
+ for (LockRequestsMap::Iterator it = _requests.begin(); !it.finished(); it.next()) {
+ const ResourceId resId = it.key();
- return true;
- }
+ // We should never have to save and restore metadata locks.
+ invariant((IsForMMAPV1 && (resourceIdMMAPV1Flush == resId)) ||
+ RESOURCE_DATABASE == resId.getType() || RESOURCE_COLLECTION == resId.getType() ||
+ (RESOURCE_GLOBAL == resId.getType() && isSharedLockMode(it->mode)));
- template<bool IsForMMAPV1>
- void LockerImpl<IsForMMAPV1>::restoreLockState(const Locker::LockSnapshot& state) {
- // We shouldn't be saving and restoring lock state from inside a WriteUnitOfWork.
- invariant(!inAWriteUnitOfWork());
+ // And, stuff the info into the out parameter.
+ OneLock info;
+ info.resourceId = resId;
+ info.mode = it->mode;
- std::vector<OneLock>::const_iterator it = state.locks.begin();
- // If we locked the PBWM, it must be locked before the resourceIdGlobal resource.
- if (it != state.locks.end() && it->resourceId == resourceIdParallelBatchWriterMode) {
- invariant(LOCK_OK == lock(it->resourceId, it->mode));
- it++;
- }
+ stateOut->locks.push_back(info);
- invariant(LOCK_OK == lockGlobal(state.globalMode));
- for (; it != state.locks.end(); it++) {
- // This is a sanity check that lockGlobal restored the MMAP V1 flush lock in the
- // expected mode.
- if (IsForMMAPV1 && (it->resourceId == resourceIdMMAPV1Flush)) {
- invariant(it->mode == _getModeForMMAPV1FlushLock());
- }
- else {
- invariant(LOCK_OK == lock(it->resourceId, it->mode));
- }
- }
+ invariant(unlock(resId));
}
- template<bool IsForMMAPV1>
- LockResult LockerImpl<IsForMMAPV1>::lockBegin(ResourceId resId, LockMode mode) {
- dassert(!getWaitingResource().isValid());
+ // Sort locks by ResourceId. They'll later be acquired in this canonical locking order.
+ std::sort(stateOut->locks.begin(), stateOut->locks.end());
- LockRequest* request;
- bool isNew = true;
+ return true;
+}
- LockRequestsMap::Iterator it = _requests.find(resId);
- if (!it) {
- scoped_spinlock scopedLock(_lock);
- LockRequestsMap::Iterator itNew = _requests.insert(resId);
- itNew->initNew(this, &_notify);
+template <bool IsForMMAPV1>
+void LockerImpl<IsForMMAPV1>::restoreLockState(const Locker::LockSnapshot& state) {
+ // We shouldn't be saving and restoring lock state from inside a WriteUnitOfWork.
+ invariant(!inAWriteUnitOfWork());
- request = itNew.objAddr();
- }
- else {
- request = it.objAddr();
- isNew = false;
- }
+ std::vector<OneLock>::const_iterator it = state.locks.begin();
+ // If we locked the PBWM, it must be locked before the resourceIdGlobal resource.
+ if (it != state.locks.end() && it->resourceId == resourceIdParallelBatchWriterMode) {
+ invariant(LOCK_OK == lock(it->resourceId, it->mode));
+ it++;
+ }
- // Making this call here will record lock re-acquisitions and conversions as well.
- globalStats.recordAcquisition(_id, resId, mode);
- _stats.recordAcquisition(resId, mode);
-
- // Give priority to the full modes for global, parallel batch writer mode,
- // and flush lock so we don't stall global operations such as shutdown or flush.
- const ResourceType resType = resId.getType();
- if (resType == RESOURCE_GLOBAL || (IsForMMAPV1 && resId == resourceIdMMAPV1Flush)) {
- if (mode == MODE_S || mode == MODE_X) {
- request->enqueueAtFront = true;
- request->compatibleFirst = true;
- }
- }
- else {
- // This is all sanity checks that the global and flush locks are always be acquired
- // before any other lock has been acquired and they must be in sync with the nesting.
- DEV {
- const LockRequestsMap::Iterator itGlobal = _requests.find(resourceIdGlobal);
- invariant(itGlobal->recursiveCount > 0);
- invariant(itGlobal->mode != MODE_NONE);
-
- // Check the MMAP V1 flush lock is held in the appropriate mode
- invariant(!IsForMMAPV1 || isLockHeldForMode(resourceIdMMAPV1Flush,
- _getModeForMMAPV1FlushLock()));
- };
+ invariant(LOCK_OK == lockGlobal(state.globalMode));
+ for (; it != state.locks.end(); it++) {
+ // This is a sanity check that lockGlobal restored the MMAP V1 flush lock in the
+ // expected mode.
+ if (IsForMMAPV1 && (it->resourceId == resourceIdMMAPV1Flush)) {
+ invariant(it->mode == _getModeForMMAPV1FlushLock());
+ } else {
+ invariant(LOCK_OK == lock(it->resourceId, it->mode));
}
+ }
+}
- // The notification object must be cleared before we invoke the lock manager, because
- // otherwise we might reset state if the lock becomes granted very fast.
- _notify.clear();
-
- LockResult result = isNew ? globalLockManager.lock(resId, request, mode) :
- globalLockManager.convert(resId, request, mode);
+template <bool IsForMMAPV1>
+LockResult LockerImpl<IsForMMAPV1>::lockBegin(ResourceId resId, LockMode mode) {
+ dassert(!getWaitingResource().isValid());
- if (result == LOCK_WAITING) {
- // Start counting the wait time so that lockComplete can update that metric
- _requestStartTime = curTimeMicros64();
- globalStats.recordWait(_id, resId, mode);
- _stats.recordWait(resId, mode);
- }
+ LockRequest* request;
+ bool isNew = true;
- return result;
+ LockRequestsMap::Iterator it = _requests.find(resId);
+ if (!it) {
+ scoped_spinlock scopedLock(_lock);
+ LockRequestsMap::Iterator itNew = _requests.insert(resId);
+ itNew->initNew(this, &_notify);
+
+ request = itNew.objAddr();
+ } else {
+ request = it.objAddr();
+ isNew = false;
+ }
+
+ // Making this call here will record lock re-acquisitions and conversions as well.
+ globalStats.recordAcquisition(_id, resId, mode);
+ _stats.recordAcquisition(resId, mode);
+
+ // Give priority to the full modes for global, parallel batch writer mode,
+ // and flush lock so we don't stall global operations such as shutdown or flush.
+ const ResourceType resType = resId.getType();
+ if (resType == RESOURCE_GLOBAL || (IsForMMAPV1 && resId == resourceIdMMAPV1Flush)) {
+ if (mode == MODE_S || mode == MODE_X) {
+ request->enqueueAtFront = true;
+ request->compatibleFirst = true;
+ }
+ } else {
+ // This is all sanity checks that the global and flush locks are always be acquired
+ // before any other lock has been acquired and they must be in sync with the nesting.
+ DEV {
+ const LockRequestsMap::Iterator itGlobal = _requests.find(resourceIdGlobal);
+ invariant(itGlobal->recursiveCount > 0);
+ invariant(itGlobal->mode != MODE_NONE);
+
+ // Check the MMAP V1 flush lock is held in the appropriate mode
+ invariant(!IsForMMAPV1 ||
+ isLockHeldForMode(resourceIdMMAPV1Flush, _getModeForMMAPV1FlushLock()));
+ };
}
- template<bool IsForMMAPV1>
- LockResult LockerImpl<IsForMMAPV1>::lockComplete(ResourceId resId,
- LockMode mode,
- unsigned timeoutMs,
- bool checkDeadlock) {
+ // The notification object must be cleared before we invoke the lock manager, because
+ // otherwise we might reset state if the lock becomes granted very fast.
+ _notify.clear();
- // Under MMAP V1 engine a deadlock can occur if a thread goes to sleep waiting on
- // DB lock, while holding the flush lock, so it has to be released. This is only
- // correct to do if not in a write unit of work.
- const bool yieldFlushLock = IsForMMAPV1 &&
- !inAWriteUnitOfWork() &&
- resId.getType() != RESOURCE_GLOBAL &&
- resId != resourceIdMMAPV1Flush;
- if (yieldFlushLock) {
- invariant(unlock(resourceIdMMAPV1Flush));
- }
+ LockResult result = isNew ? globalLockManager.lock(resId, request, mode)
+ : globalLockManager.convert(resId, request, mode);
- LockResult result;
+ if (result == LOCK_WAITING) {
+ // Start counting the wait time so that lockComplete can update that metric
+ _requestStartTime = curTimeMicros64();
+ globalStats.recordWait(_id, resId, mode);
+ _stats.recordWait(resId, mode);
+ }
- // Don't go sleeping without bound in order to be able to report long waits or wake up for
- // deadlock detection.
- unsigned waitTimeMs = std::min(timeoutMs, DeadlockTimeoutMs);
- while (true) {
- // It is OK if this call wakes up spuriously, because we re-evaluate the remaining
- // wait time anyways.
- result = _notify.wait(waitTimeMs);
+ return result;
+}
- // Account for the time spent waiting on the notification object
- const uint64_t elapsedTimeMicros = curTimeMicros64() - _requestStartTime;
- globalStats.recordWaitTime(_id, resId, mode, elapsedTimeMicros);
- _stats.recordWaitTime(resId, mode, elapsedTimeMicros);
+template <bool IsForMMAPV1>
+LockResult LockerImpl<IsForMMAPV1>::lockComplete(ResourceId resId,
+ LockMode mode,
+ unsigned timeoutMs,
+ bool checkDeadlock) {
+ // Under MMAP V1 engine a deadlock can occur if a thread goes to sleep waiting on
+ // DB lock, while holding the flush lock, so it has to be released. This is only
+ // correct to do if not in a write unit of work.
+ const bool yieldFlushLock = IsForMMAPV1 && !inAWriteUnitOfWork() &&
+ resId.getType() != RESOURCE_GLOBAL && resId != resourceIdMMAPV1Flush;
+ if (yieldFlushLock) {
+ invariant(unlock(resourceIdMMAPV1Flush));
+ }
- if (result == LOCK_OK) break;
+ LockResult result;
- if (checkDeadlock) {
- DeadlockDetector wfg(globalLockManager, this);
- if (wfg.check().hasCycle()) {
- warning() << "Deadlock found: " << wfg.toString();
+ // Don't go sleeping without bound in order to be able to report long waits or wake up for
+ // deadlock detection.
+ unsigned waitTimeMs = std::min(timeoutMs, DeadlockTimeoutMs);
+ while (true) {
+ // It is OK if this call wakes up spuriously, because we re-evaluate the remaining
+ // wait time anyways.
+ result = _notify.wait(waitTimeMs);
- globalStats.recordDeadlock(resId, mode);
- _stats.recordDeadlock(resId, mode);
+ // Account for the time spent waiting on the notification object
+ const uint64_t elapsedTimeMicros = curTimeMicros64() - _requestStartTime;
+ globalStats.recordWaitTime(_id, resId, mode, elapsedTimeMicros);
+ _stats.recordWaitTime(resId, mode, elapsedTimeMicros);
- result = LOCK_DEADLOCK;
- break;
- }
- }
+ if (result == LOCK_OK)
+ break;
- // If infinite timeout was requested, just keep waiting
- if (timeoutMs == UINT_MAX) {
- continue;
- }
+ if (checkDeadlock) {
+ DeadlockDetector wfg(globalLockManager, this);
+ if (wfg.check().hasCycle()) {
+ warning() << "Deadlock found: " << wfg.toString();
- const unsigned elapsedTimeMs = elapsedTimeMicros / 1000;
- waitTimeMs = (elapsedTimeMs < timeoutMs) ?
- std::min(timeoutMs - elapsedTimeMs, DeadlockTimeoutMs) : 0;
+ globalStats.recordDeadlock(resId, mode);
+ _stats.recordDeadlock(resId, mode);
- if (waitTimeMs == 0) {
+ result = LOCK_DEADLOCK;
break;
}
}
- // Cleanup the state, since this is an unused lock now
- if (result != LOCK_OK) {
- LockRequestsMap::Iterator it = _requests.find(resId);
- if (globalLockManager.unlock(it.objAddr())) {
- scoped_spinlock scopedLock(_lock);
- it.remove();
- }
- }
-
- if (yieldFlushLock) {
- // We cannot obey the timeout here, because it is not correct to return from the lock
- // request with the flush lock released.
- invariant(LOCK_OK == lock(resourceIdMMAPV1Flush, _getModeForMMAPV1FlushLock()));
+ // If infinite timeout was requested, just keep waiting
+ if (timeoutMs == UINT_MAX) {
+ continue;
}
- return result;
- }
+ const unsigned elapsedTimeMs = elapsedTimeMicros / 1000;
+ waitTimeMs = (elapsedTimeMs < timeoutMs)
+ ? std::min(timeoutMs - elapsedTimeMs, DeadlockTimeoutMs)
+ : 0;
- template<bool IsForMMAPV1>
- bool LockerImpl<IsForMMAPV1>::_unlockImpl(LockRequestsMap::Iterator& it) {
- if (inAWriteUnitOfWork() && shouldDelayUnlock(it.key(), it->mode)) {
- _resourcesToUnlockAtEndOfUnitOfWork.push(it.key());
- return false;
+ if (waitTimeMs == 0) {
+ break;
}
+ }
+ // Cleanup the state, since this is an unused lock now
+ if (result != LOCK_OK) {
+ LockRequestsMap::Iterator it = _requests.find(resId);
if (globalLockManager.unlock(it.objAddr())) {
scoped_spinlock scopedLock(_lock);
it.remove();
-
- return true;
}
+ }
+ if (yieldFlushLock) {
+ // We cannot obey the timeout here, because it is not correct to return from the lock
+ // request with the flush lock released.
+ invariant(LOCK_OK == lock(resourceIdMMAPV1Flush, _getModeForMMAPV1FlushLock()));
+ }
+
+ return result;
+}
+
+template <bool IsForMMAPV1>
+bool LockerImpl<IsForMMAPV1>::_unlockImpl(LockRequestsMap::Iterator& it) {
+ if (inAWriteUnitOfWork() && shouldDelayUnlock(it.key(), it->mode)) {
+ _resourcesToUnlockAtEndOfUnitOfWork.push(it.key());
return false;
}
- template<bool IsForMMAPV1>
- LockMode LockerImpl<IsForMMAPV1>::_getModeForMMAPV1FlushLock() const {
- invariant(IsForMMAPV1);
+ if (globalLockManager.unlock(it.objAddr())) {
+ scoped_spinlock scopedLock(_lock);
+ it.remove();
+
+ return true;
+ }
- LockMode mode = getLockMode(resourceIdGlobal);
- switch (mode) {
+ return false;
+}
+
+template <bool IsForMMAPV1>
+LockMode LockerImpl<IsForMMAPV1>::_getModeForMMAPV1FlushLock() const {
+ invariant(IsForMMAPV1);
+
+ LockMode mode = getLockMode(resourceIdGlobal);
+ switch (mode) {
case MODE_X:
case MODE_IX:
return MODE_IX;
@@ -771,153 +767,150 @@ namespace {
default:
invariant(false);
return MODE_NONE;
- }
}
+}
- template<bool IsForMMAPV1>
- bool LockerImpl<IsForMMAPV1>::hasStrongLocks() const {
- if (!isLocked()) return false;
-
- stdx::lock_guard<SpinLock> lk(_lock);
- LockRequestsMap::ConstIterator it = _requests.begin();
- while (!it.finished()) {
- if (it->mode == MODE_X || it->mode == MODE_S) {
- return true;
- }
+template <bool IsForMMAPV1>
+bool LockerImpl<IsForMMAPV1>::hasStrongLocks() const {
+ if (!isLocked())
+ return false;
- it.next();
+ stdx::lock_guard<SpinLock> lk(_lock);
+ LockRequestsMap::ConstIterator it = _requests.begin();
+ while (!it.finished()) {
+ if (it->mode == MODE_X || it->mode == MODE_S) {
+ return true;
}
- return false;
+ it.next();
}
+ return false;
+}
- //
- // Auto classes
- //
- AutoYieldFlushLockForMMAPV1Commit::AutoYieldFlushLockForMMAPV1Commit(Locker* locker)
- : _locker(static_cast<MMAPV1LockerImpl*>(locker)) {
+//
+// Auto classes
+//
- // Explicit yielding of the flush lock should happen only at global synchronization points
- // such as database drop. There should not be any active writes at these points.
- invariant(!_locker->inAWriteUnitOfWork());
+AutoYieldFlushLockForMMAPV1Commit::AutoYieldFlushLockForMMAPV1Commit(Locker* locker)
+ : _locker(static_cast<MMAPV1LockerImpl*>(locker)) {
+ // Explicit yielding of the flush lock should happen only at global synchronization points
+ // such as database drop. There should not be any active writes at these points.
+ invariant(!_locker->inAWriteUnitOfWork());
- if (isMMAPV1()) {
- invariant(_locker->unlock(resourceIdMMAPV1Flush));
- }
+ if (isMMAPV1()) {
+ invariant(_locker->unlock(resourceIdMMAPV1Flush));
}
+}
- AutoYieldFlushLockForMMAPV1Commit::~AutoYieldFlushLockForMMAPV1Commit() {
- if (isMMAPV1()) {
- invariant(LOCK_OK == _locker->lock(resourceIdMMAPV1Flush,
- _locker->_getModeForMMAPV1FlushLock()));
- }
+AutoYieldFlushLockForMMAPV1Commit::~AutoYieldFlushLockForMMAPV1Commit() {
+ if (isMMAPV1()) {
+ invariant(LOCK_OK ==
+ _locker->lock(resourceIdMMAPV1Flush, _locker->_getModeForMMAPV1FlushLock()));
}
+}
- AutoAcquireFlushLockForMMAPV1Commit::AutoAcquireFlushLockForMMAPV1Commit(Locker* locker)
- : _locker(locker),
- _released(false) {
-
- // The journal thread acquiring the journal lock in S-mode opens opportunity for deadlock
- // involving operations which do not acquire and release the Oplog collection's X lock
- // inside a WUOW (see SERVER-17416 for the sequence of events), therefore acquire it with
- // check for deadlock and back-off if one is encountered.
- //
- // This exposes theoretical chance that we might starve the journaling system, but given
- // that these deadlocks happen extremely rarely and are usually due to incorrect locking
- // policy, and we have the deadlock counters as part of the locking statistics, this is a
- // reasonable handling.
- //
- // In the worst case, if we are to starve the journaling system, the server will shut down
- // due to too much uncommitted in-memory journal, but won't have corruption.
+AutoAcquireFlushLockForMMAPV1Commit::AutoAcquireFlushLockForMMAPV1Commit(Locker* locker)
+ : _locker(locker), _released(false) {
+ // The journal thread acquiring the journal lock in S-mode opens opportunity for deadlock
+ // involving operations which do not acquire and release the Oplog collection's X lock
+ // inside a WUOW (see SERVER-17416 for the sequence of events), therefore acquire it with
+ // check for deadlock and back-off if one is encountered.
+ //
+ // This exposes theoretical chance that we might starve the journaling system, but given
+ // that these deadlocks happen extremely rarely and are usually due to incorrect locking
+ // policy, and we have the deadlock counters as part of the locking statistics, this is a
+ // reasonable handling.
+ //
+ // In the worst case, if we are to starve the journaling system, the server will shut down
+ // due to too much uncommitted in-memory journal, but won't have corruption.
- while (true) {
- LockResult result = _locker->lock(resourceIdMMAPV1Flush, MODE_S, UINT_MAX, true);
- if (result == LOCK_OK) {
- break;
- }
+ while (true) {
+ LockResult result = _locker->lock(resourceIdMMAPV1Flush, MODE_S, UINT_MAX, true);
+ if (result == LOCK_OK) {
+ break;
+ }
- invariant(result == LOCK_DEADLOCK);
+ invariant(result == LOCK_DEADLOCK);
- warning() << "Delayed journaling in order to avoid deadlock during MMAP V1 journal " <<
- "lock acquisition. See the previous messages for information on the " <<
- "involved threads.";
- }
+ warning() << "Delayed journaling in order to avoid deadlock during MMAP V1 journal "
+ << "lock acquisition. See the previous messages for information on the "
+ << "involved threads.";
}
+}
- void AutoAcquireFlushLockForMMAPV1Commit::upgradeFlushLockToExclusive() {
- // This should not be able to deadlock, since we already hold the S journal lock, which
- // means all writers are kicked out. Readers always yield the journal lock if they block
- // waiting on any other lock.
- invariant(LOCK_OK == _locker->lock(resourceIdMMAPV1Flush, MODE_X, UINT_MAX, false));
+void AutoAcquireFlushLockForMMAPV1Commit::upgradeFlushLockToExclusive() {
+ // This should not be able to deadlock, since we already hold the S journal lock, which
+ // means all writers are kicked out. Readers always yield the journal lock if they block
+ // waiting on any other lock.
+ invariant(LOCK_OK == _locker->lock(resourceIdMMAPV1Flush, MODE_X, UINT_MAX, false));
- // Lock bumps the recursive count. Drop it back down so that the destructor doesn't
- // complain.
- invariant(!_locker->unlock(resourceIdMMAPV1Flush));
- }
+ // Lock bumps the recursive count. Drop it back down so that the destructor doesn't
+ // complain.
+ invariant(!_locker->unlock(resourceIdMMAPV1Flush));
+}
- void AutoAcquireFlushLockForMMAPV1Commit::release() {
- if (!_released) {
- invariant(_locker->unlock(resourceIdMMAPV1Flush));
- _released = true;
- }
+void AutoAcquireFlushLockForMMAPV1Commit::release() {
+ if (!_released) {
+ invariant(_locker->unlock(resourceIdMMAPV1Flush));
+ _released = true;
}
+}
- AutoAcquireFlushLockForMMAPV1Commit::~AutoAcquireFlushLockForMMAPV1Commit() {
- release();
- }
+AutoAcquireFlushLockForMMAPV1Commit::~AutoAcquireFlushLockForMMAPV1Commit() {
+ release();
+}
namespace {
- /**
- * Periodically purges unused lock buckets. The first time the lock is used again after
- * cleanup it needs to be allocated, and similarly, every first use by a client for an intent
- * mode may need to create a partitioned lock head. Cleanup is done roughtly once a minute.
- */
- class UnusedLockCleaner : PeriodicTask {
- public:
- std::string taskName() const {
- return "UnusedLockCleaner";
- }
+/**
+ * Periodically purges unused lock buckets. The first time the lock is used again after
+ * cleanup it needs to be allocated, and similarly, every first use by a client for an intent
+ * mode may need to create a partitioned lock head. Cleanup is done roughtly once a minute.
+ */
+class UnusedLockCleaner : PeriodicTask {
+public:
+ std::string taskName() const {
+ return "UnusedLockCleaner";
+ }
- void taskDoWork() {
- LOG(2) << "cleaning up unused lock buckets of the global lock manager";
- getGlobalLockManager()->cleanupUnusedLocks();
- }
- } unusedLockCleaner;
-} // namespace
+ void taskDoWork() {
+ LOG(2) << "cleaning up unused lock buckets of the global lock manager";
+ getGlobalLockManager()->cleanupUnusedLocks();
+ }
+} unusedLockCleaner;
+} // namespace
- //
- // Standalone functions
- //
+//
+// Standalone functions
+//
- LockManager* getGlobalLockManager() {
- return &globalLockManager;
- }
+LockManager* getGlobalLockManager() {
+ return &globalLockManager;
+}
- void reportGlobalLockingStats(SingleThreadedLockStats* outStats) {
- globalStats.report(outStats);
- }
+void reportGlobalLockingStats(SingleThreadedLockStats* outStats) {
+ globalStats.report(outStats);
+}
+
+void resetGlobalLockStats() {
+ globalStats.reset();
+}
- void resetGlobalLockStats() {
- globalStats.reset();
- }
-
- // Ensures that there are two instances compiled for LockerImpl for the two values of the
- // template argument.
- template class LockerImpl<true>;
- template class LockerImpl<false>;
+// Ensures that there are two instances compiled for LockerImpl for the two values of the
+// template argument.
+template class LockerImpl<true>;
+template class LockerImpl<false>;
- // Definition for the hardcoded localdb and oplog collection info
- const ResourceId resourceIdLocalDB = ResourceId(RESOURCE_DATABASE, StringData("local"));
- const ResourceId resourceIdOplog =
- ResourceId(RESOURCE_COLLECTION, StringData("local.oplog.rs"));
- const ResourceId resourceIdAdminDB = ResourceId(RESOURCE_DATABASE, StringData("admin"));
- const ResourceId resourceIdParallelBatchWriterMode =
- ResourceId(RESOURCE_GLOBAL, ResourceId::SINGLETON_PARALLEL_BATCH_WRITER_MODE);
+// Definition for the hardcoded localdb and oplog collection info
+const ResourceId resourceIdLocalDB = ResourceId(RESOURCE_DATABASE, StringData("local"));
+const ResourceId resourceIdOplog = ResourceId(RESOURCE_COLLECTION, StringData("local.oplog.rs"));
+const ResourceId resourceIdAdminDB = ResourceId(RESOURCE_DATABASE, StringData("admin"));
+const ResourceId resourceIdParallelBatchWriterMode =
+ ResourceId(RESOURCE_GLOBAL, ResourceId::SINGLETON_PARALLEL_BATCH_WRITER_MODE);
-} // namespace mongo
+} // namespace mongo