summaryrefslogtreecommitdiff
path: root/src/mongo/db/concurrency
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/db/concurrency')
-rw-r--r--src/mongo/db/concurrency/d_concurrency.cpp254
-rw-r--r--src/mongo/db/concurrency/d_concurrency.h405
-rw-r--r--src/mongo/db/concurrency/d_concurrency_test.cpp370
-rw-r--r--src/mongo/db/concurrency/deadlock_detection_test.cpp230
-rw-r--r--src/mongo/db/concurrency/fast_map_noalloc.h357
-rw-r--r--src/mongo/db/concurrency/fast_map_noalloc_test.cpp182
-rw-r--r--src/mongo/db/concurrency/lock_manager.cpp1697
-rw-r--r--src/mongo/db/concurrency/lock_manager.h418
-rw-r--r--src/mongo/db/concurrency/lock_manager_defs.h626
-rw-r--r--src/mongo/db/concurrency/lock_manager_test.cpp1289
-rw-r--r--src/mongo/db/concurrency/lock_manager_test_help.h60
-rw-r--r--src/mongo/db/concurrency/lock_request_list.h127
-rw-r--r--src/mongo/db/concurrency/lock_state.cpp1293
-rw-r--r--src/mongo/db/concurrency/lock_state.h475
-rw-r--r--src/mongo/db/concurrency/lock_state_test.cpp412
-rw-r--r--src/mongo/db/concurrency/lock_stats.cpp182
-rw-r--r--src/mongo/db/concurrency/lock_stats.h257
-rw-r--r--src/mongo/db/concurrency/lock_stats_test.cpp96
-rw-r--r--src/mongo/db/concurrency/locker.h511
-rw-r--r--src/mongo/db/concurrency/locker_noop.h212
-rw-r--r--src/mongo/db/concurrency/write_conflict_exception.cpp65
-rw-r--r--src/mongo/db/concurrency/write_conflict_exception.h76
22 files changed, 4742 insertions, 4852 deletions
diff --git a/src/mongo/db/concurrency/d_concurrency.cpp b/src/mongo/db/concurrency/d_concurrency.cpp
index 1851e84dcc2..ae0526bb535 100644
--- a/src/mongo/db/concurrency/d_concurrency.cpp
+++ b/src/mongo/db/concurrency/d_concurrency.cpp
@@ -42,184 +42,168 @@
namespace mongo {
namespace {
- // SERVER-14668: Remove or invert sense once MMAPv1 CLL can be default
- MONGO_EXPORT_STARTUP_SERVER_PARAMETER(enableCollectionLocking, bool, true);
-} // namespace
+// SERVER-14668: Remove or invert sense once MMAPv1 CLL can be default
+MONGO_EXPORT_STARTUP_SERVER_PARAMETER(enableCollectionLocking, bool, true);
+} // namespace
- Lock::TempRelease::TempRelease(Locker* lockState)
- : _lockState(lockState),
- _lockSnapshot(),
- _locksReleased(_lockState->saveLockStateAndUnlock(&_lockSnapshot)) {
+Lock::TempRelease::TempRelease(Locker* lockState)
+ : _lockState(lockState),
+ _lockSnapshot(),
+ _locksReleased(_lockState->saveLockStateAndUnlock(&_lockSnapshot)) {}
+Lock::TempRelease::~TempRelease() {
+ if (_locksReleased) {
+ invariant(!_lockState->isLocked());
+ _lockState->restoreLockState(_lockSnapshot);
}
+}
- Lock::TempRelease::~TempRelease() {
- if (_locksReleased) {
- invariant(!_lockState->isLocked());
- _lockState->restoreLockState(_lockSnapshot);
- }
- }
-
- Lock::GlobalLock::GlobalLock(Locker* locker)
- : _locker(locker),
- _result(LOCK_INVALID),
- _pbwm(locker, resourceIdParallelBatchWriterMode) { }
-
- Lock::GlobalLock::GlobalLock(Locker* locker, LockMode lockMode, unsigned timeoutMs)
- : _locker(locker),
- _result(LOCK_INVALID),
- _pbwm(locker, resourceIdParallelBatchWriterMode) {
- _lock(lockMode, timeoutMs);
- }
-
+Lock::GlobalLock::GlobalLock(Locker* locker)
+ : _locker(locker), _result(LOCK_INVALID), _pbwm(locker, resourceIdParallelBatchWriterMode) {}
+Lock::GlobalLock::GlobalLock(Locker* locker, LockMode lockMode, unsigned timeoutMs)
+ : _locker(locker), _result(LOCK_INVALID), _pbwm(locker, resourceIdParallelBatchWriterMode) {
+ _lock(lockMode, timeoutMs);
+}
- void Lock::GlobalLock::_lock(LockMode lockMode, unsigned timeoutMs) {
- if (!_locker->isBatchWriter()) {
- _pbwm.lock(MODE_IS);
- }
- _result = _locker->lockGlobalBegin(lockMode);
- if (_result == LOCK_WAITING) {
- _result = _locker->lockGlobalComplete(timeoutMs);
- }
-
- if (_result != LOCK_OK && !_locker->isBatchWriter()) {
- _pbwm.unlock();
- }
+void Lock::GlobalLock::_lock(LockMode lockMode, unsigned timeoutMs) {
+ if (!_locker->isBatchWriter()) {
+ _pbwm.lock(MODE_IS);
}
- void Lock::GlobalLock::_unlock() {
- if (isLocked()) {
- _locker->unlockAll();
- _result = LOCK_INVALID;
- }
+ _result = _locker->lockGlobalBegin(lockMode);
+ if (_result == LOCK_WAITING) {
+ _result = _locker->lockGlobalComplete(timeoutMs);
}
+ if (_result != LOCK_OK && !_locker->isBatchWriter()) {
+ _pbwm.unlock();
+ }
+}
- Lock::DBLock::DBLock(Locker* locker, StringData db, LockMode mode)
- : _id(RESOURCE_DATABASE, db),
- _locker(locker),
- _mode(mode),
- _globalLock(locker, isSharedLockMode(_mode) ? MODE_IS : MODE_IX, UINT_MAX) {
+void Lock::GlobalLock::_unlock() {
+ if (isLocked()) {
+ _locker->unlockAll();
+ _result = LOCK_INVALID;
+ }
+}
- massert(28539, "need a valid database name", !db.empty() && nsIsDbOnly(db));
- // Need to acquire the flush lock
- _locker->lockMMAPV1Flush();
+Lock::DBLock::DBLock(Locker* locker, StringData db, LockMode mode)
+ : _id(RESOURCE_DATABASE, db),
+ _locker(locker),
+ _mode(mode),
+ _globalLock(locker, isSharedLockMode(_mode) ? MODE_IS : MODE_IX, UINT_MAX) {
+ massert(28539, "need a valid database name", !db.empty() && nsIsDbOnly(db));
- if (supportsDocLocking() || enableCollectionLocking) {
- // The check for the admin db is to ensure direct writes to auth collections
- // are serialized (see SERVER-16092).
- if ((_id == resourceIdAdminDB) && !isSharedLockMode(_mode)) {
- _mode = MODE_X;
- }
+ // Need to acquire the flush lock
+ _locker->lockMMAPV1Flush();
- invariant(LOCK_OK == _locker->lock(_id, _mode));
- }
- else {
- invariant(LOCK_OK == _locker->lock(_id, isSharedLockMode(_mode) ? MODE_S : MODE_X));
+ if (supportsDocLocking() || enableCollectionLocking) {
+ // The check for the admin db is to ensure direct writes to auth collections
+ // are serialized (see SERVER-16092).
+ if ((_id == resourceIdAdminDB) && !isSharedLockMode(_mode)) {
+ _mode = MODE_X;
}
- }
- Lock::DBLock::~DBLock() {
- _locker->unlock(_id);
+ invariant(LOCK_OK == _locker->lock(_id, _mode));
+ } else {
+ invariant(LOCK_OK == _locker->lock(_id, isSharedLockMode(_mode) ? MODE_S : MODE_X));
}
+}
- void Lock::DBLock::relockWithMode(LockMode newMode) {
- // 2PL would delay the unlocking
- invariant(!_locker->inAWriteUnitOfWork());
+Lock::DBLock::~DBLock() {
+ _locker->unlock(_id);
+}
- // Not allowed to change global intent
- invariant(!isSharedLockMode(_mode) || isSharedLockMode(newMode));
+void Lock::DBLock::relockWithMode(LockMode newMode) {
+ // 2PL would delay the unlocking
+ invariant(!_locker->inAWriteUnitOfWork());
- _locker->unlock(_id);
- _mode = newMode;
+ // Not allowed to change global intent
+ invariant(!isSharedLockMode(_mode) || isSharedLockMode(newMode));
- if (supportsDocLocking() || enableCollectionLocking) {
- invariant(LOCK_OK == _locker->lock(_id, _mode));
- }
- else {
- invariant(LOCK_OK == _locker->lock(_id, isSharedLockMode(_mode) ? MODE_S : MODE_X));
- }
- }
+ _locker->unlock(_id);
+ _mode = newMode;
+ if (supportsDocLocking() || enableCollectionLocking) {
+ invariant(LOCK_OK == _locker->lock(_id, _mode));
+ } else {
+ invariant(LOCK_OK == _locker->lock(_id, isSharedLockMode(_mode) ? MODE_S : MODE_X));
+ }
+}
- Lock::CollectionLock::CollectionLock(Locker* lockState,
- StringData ns,
- LockMode mode)
- : _id(RESOURCE_COLLECTION, ns),
- _lockState(lockState) {
- massert(28538, "need a non-empty collection name", nsIsFull(ns));
+Lock::CollectionLock::CollectionLock(Locker* lockState, StringData ns, LockMode mode)
+ : _id(RESOURCE_COLLECTION, ns), _lockState(lockState) {
+ massert(28538, "need a non-empty collection name", nsIsFull(ns));
- dassert(_lockState->isDbLockedForMode(nsToDatabaseSubstring(ns),
- isSharedLockMode(mode) ? MODE_IS : MODE_IX));
- if (supportsDocLocking()) {
- _lockState->lock(_id, mode);
- }
- else if (enableCollectionLocking) {
- _lockState->lock(_id, isSharedLockMode(mode) ? MODE_S : MODE_X);
- }
+ dassert(_lockState->isDbLockedForMode(nsToDatabaseSubstring(ns),
+ isSharedLockMode(mode) ? MODE_IS : MODE_IX));
+ if (supportsDocLocking()) {
+ _lockState->lock(_id, mode);
+ } else if (enableCollectionLocking) {
+ _lockState->lock(_id, isSharedLockMode(mode) ? MODE_S : MODE_X);
}
+}
- Lock::CollectionLock::~CollectionLock() {
- if (supportsDocLocking() || enableCollectionLocking) {
- _lockState->unlock(_id);
- }
+Lock::CollectionLock::~CollectionLock() {
+ if (supportsDocLocking() || enableCollectionLocking) {
+ _lockState->unlock(_id);
}
+}
- void Lock::CollectionLock::relockAsDatabaseExclusive(Lock::DBLock& dbLock) {
- if (supportsDocLocking() || enableCollectionLocking) {
- _lockState->unlock(_id);
- }
+void Lock::CollectionLock::relockAsDatabaseExclusive(Lock::DBLock& dbLock) {
+ if (supportsDocLocking() || enableCollectionLocking) {
+ _lockState->unlock(_id);
+ }
- dbLock.relockWithMode(MODE_X);
+ dbLock.relockWithMode(MODE_X);
- if (supportsDocLocking() || enableCollectionLocking) {
- // don't need the lock, but need something to unlock in the destructor
- _lockState->lock(_id, MODE_IX);
- }
+ if (supportsDocLocking() || enableCollectionLocking) {
+ // don't need the lock, but need something to unlock in the destructor
+ _lockState->lock(_id, MODE_IX);
}
+}
namespace {
- stdx::mutex oplogSerialization; // for OplogIntentWriteLock
-} // namespace
+stdx::mutex oplogSerialization; // for OplogIntentWriteLock
+} // namespace
- Lock::OplogIntentWriteLock::OplogIntentWriteLock(Locker* lockState)
- : _lockState(lockState),
- _serialized(false) {
- _lockState->lock(resourceIdOplog, MODE_IX);
- }
+Lock::OplogIntentWriteLock::OplogIntentWriteLock(Locker* lockState)
+ : _lockState(lockState), _serialized(false) {
+ _lockState->lock(resourceIdOplog, MODE_IX);
+}
- Lock::OplogIntentWriteLock::~OplogIntentWriteLock() {
- if (_serialized) {
- oplogSerialization.unlock();
- }
- _lockState->unlock(resourceIdOplog);
+Lock::OplogIntentWriteLock::~OplogIntentWriteLock() {
+ if (_serialized) {
+ oplogSerialization.unlock();
}
+ _lockState->unlock(resourceIdOplog);
+}
- void Lock::OplogIntentWriteLock::serializeIfNeeded() {
- if (!supportsDocLocking() && !_serialized) {
- oplogSerialization.lock();
- _serialized = true;
- }
+void Lock::OplogIntentWriteLock::serializeIfNeeded() {
+ if (!supportsDocLocking() && !_serialized) {
+ oplogSerialization.lock();
+ _serialized = true;
}
+}
- Lock::ParallelBatchWriterMode::ParallelBatchWriterMode(Locker* lockState)
- : _pbwm(lockState, resourceIdParallelBatchWriterMode, MODE_X) { }
+Lock::ParallelBatchWriterMode::ParallelBatchWriterMode(Locker* lockState)
+ : _pbwm(lockState, resourceIdParallelBatchWriterMode, MODE_X) {}
- void Lock::ResourceLock::lock(LockMode mode) {
- invariant(_result == LOCK_INVALID);
- _result = _locker->lock(_rid, mode);
- invariant(_result == LOCK_OK);
- }
+void Lock::ResourceLock::lock(LockMode mode) {
+ invariant(_result == LOCK_INVALID);
+ _result = _locker->lock(_rid, mode);
+ invariant(_result == LOCK_OK);
+}
- void Lock::ResourceLock::unlock() {
- if (_result == LOCK_OK) {
- _locker->unlock(_rid);
- _result = LOCK_INVALID;
- }
+void Lock::ResourceLock::unlock() {
+ if (_result == LOCK_OK) {
+ _locker->unlock(_rid);
+ _result = LOCK_INVALID;
}
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/concurrency/d_concurrency.h b/src/mongo/db/concurrency/d_concurrency.h
index f21d356994e..b95b7b46fec 100644
--- a/src/mongo/db/concurrency/d_concurrency.h
+++ b/src/mongo/db/concurrency/d_concurrency.h
@@ -28,257 +28,256 @@
#pragma once
-#include <climits> // For UINT_MAX
+#include <climits> // For UINT_MAX
#include "mongo/db/concurrency/locker.h"
#include "mongo/util/timer.h"
namespace mongo {
- class StringData;
+class StringData;
+
+class Lock {
+public:
+ /**
+ * NOTE: DO NOT add any new usages of TempRelease. It is being deprecated/removed.
+ */
+ class TempRelease {
+ MONGO_DISALLOW_COPYING(TempRelease);
- class Lock {
public:
+ explicit TempRelease(Locker* lockState);
+ ~TempRelease();
- /**
- * NOTE: DO NOT add any new usages of TempRelease. It is being deprecated/removed.
- */
- class TempRelease {
- MONGO_DISALLOW_COPYING(TempRelease);
- public:
- explicit TempRelease(Locker* lockState);
- ~TempRelease();
+ private:
+ // Not owned
+ Locker* const _lockState;
- private:
- // Not owned
- Locker* const _lockState;
+ // If _locksReleased is true, this stores the persisted lock information to be restored
+ // in the destructor. Otherwise it is empty.
+ Locker::LockSnapshot _lockSnapshot;
- // If _locksReleased is true, this stores the persisted lock information to be restored
- // in the destructor. Otherwise it is empty.
- Locker::LockSnapshot _lockSnapshot;
+ // False if locks could not be released because of recursive locking
+ const bool _locksReleased;
+ };
- // False if locks could not be released because of recursive locking
- const bool _locksReleased;
- };
+ /**
+ * General purpose RAII wrapper for a resource managed by the lock manager
+ *
+ * See LockMode for the supported modes. Unlike DBLock/Collection lock, this will not do
+ * any additional checks/upgrades or global locking. Use ResourceLock for locking
+ * resources other than RESOURCE_GLOBAL, RESOURCE_DATABASE and RESOURCE_COLLECTION.
+ */
+ class ResourceLock {
+ MONGO_DISALLOW_COPYING(ResourceLock);
- /**
- * General purpose RAII wrapper for a resource managed by the lock manager
- *
- * See LockMode for the supported modes. Unlike DBLock/Collection lock, this will not do
- * any additional checks/upgrades or global locking. Use ResourceLock for locking
- * resources other than RESOURCE_GLOBAL, RESOURCE_DATABASE and RESOURCE_COLLECTION.
- */
- class ResourceLock {
- MONGO_DISALLOW_COPYING(ResourceLock);
-
- public:
- ResourceLock(Locker* locker, ResourceId rid)
- : _rid(rid),
- _locker(locker),
- _result(LOCK_INVALID) {
- }
-
- ResourceLock(Locker* locker, ResourceId rid, LockMode mode)
- : _rid(rid),
- _locker(locker),
- _result(LOCK_INVALID) {
- lock(mode);
- }
+ public:
+ ResourceLock(Locker* locker, ResourceId rid)
+ : _rid(rid), _locker(locker), _result(LOCK_INVALID) {}
- ~ResourceLock() {
- unlock();
- }
+ ResourceLock(Locker* locker, ResourceId rid, LockMode mode)
+ : _rid(rid), _locker(locker), _result(LOCK_INVALID) {
+ lock(mode);
+ }
- void lock(LockMode mode);
- void unlock();
+ ~ResourceLock() {
+ unlock();
+ }
- bool isLocked() const { return _result == LOCK_OK; }
+ void lock(LockMode mode);
+ void unlock();
- private:
- const ResourceId _rid;
- Locker* const _locker;
+ bool isLocked() const {
+ return _result == LOCK_OK;
+ }
- LockResult _result;
- };
+ private:
+ const ResourceId _rid;
+ Locker* const _locker;
+ LockResult _result;
+ };
- /**
- * Global lock.
- *
- * Grabs global resource lock. Allows further (recursive) acquisition of the global lock
- * in any mode, see LockMode.
- * NOTE: Does not acquire flush lock.
- */
- class GlobalLock {
- public:
- explicit GlobalLock(Locker* locker);
- GlobalLock(Locker* locker, LockMode lockMode, unsigned timeoutMs);
- ~GlobalLock() {
- _unlock();
- }
+ /**
+ * Global lock.
+ *
+ * Grabs global resource lock. Allows further (recursive) acquisition of the global lock
+ * in any mode, see LockMode.
+ * NOTE: Does not acquire flush lock.
+ */
+ class GlobalLock {
+ public:
+ explicit GlobalLock(Locker* locker);
+ GlobalLock(Locker* locker, LockMode lockMode, unsigned timeoutMs);
- bool isLocked() const { return _result == LOCK_OK; }
+ ~GlobalLock() {
+ _unlock();
+ }
- private:
+ bool isLocked() const {
+ return _result == LOCK_OK;
+ }
- void _lock(LockMode lockMode, unsigned timeoutMs);
- void _unlock();
+ private:
+ void _lock(LockMode lockMode, unsigned timeoutMs);
+ void _unlock();
- Locker* const _locker;
- LockResult _result;
- ResourceLock _pbwm;
- };
+ Locker* const _locker;
+ LockResult _result;
+ ResourceLock _pbwm;
+ };
- /**
- * Global exclusive lock
- *
- * Allows exclusive write access to all databases and collections, blocking all other
- * access. Allows further (recursive) acquisition of the global lock in any mode,
- * see LockMode.
- */
- class GlobalWrite : public GlobalLock {
- public:
- explicit GlobalWrite(Locker* locker, unsigned timeoutMs = UINT_MAX)
- : GlobalLock(locker, MODE_X, timeoutMs) {
-
- if (isLocked()) {
- locker->lockMMAPV1Flush();
- }
+ /**
+ * Global exclusive lock
+ *
+ * Allows exclusive write access to all databases and collections, blocking all other
+ * access. Allows further (recursive) acquisition of the global lock in any mode,
+ * see LockMode.
+ */
+ class GlobalWrite : public GlobalLock {
+ public:
+ explicit GlobalWrite(Locker* locker, unsigned timeoutMs = UINT_MAX)
+ : GlobalLock(locker, MODE_X, timeoutMs) {
+ if (isLocked()) {
+ locker->lockMMAPV1Flush();
}
- };
+ }
+ };
- /**
- * Global shared lock
- *
- * Allows concurrent read access to all databases and collections, blocking any writers.
- * Allows further (recursive) acquisition of the global lock in shared (S) or intent-shared
- * (IS) mode, see LockMode.
- */
- class GlobalRead : public GlobalLock {
- public:
- explicit GlobalRead(Locker* locker, unsigned timeoutMs = UINT_MAX)
- : GlobalLock(locker, MODE_S, timeoutMs) {
-
- if (isLocked()) {
- locker->lockMMAPV1Flush();
- }
+ /**
+ * Global shared lock
+ *
+ * Allows concurrent read access to all databases and collections, blocking any writers.
+ * Allows further (recursive) acquisition of the global lock in shared (S) or intent-shared
+ * (IS) mode, see LockMode.
+ */
+ class GlobalRead : public GlobalLock {
+ public:
+ explicit GlobalRead(Locker* locker, unsigned timeoutMs = UINT_MAX)
+ : GlobalLock(locker, MODE_S, timeoutMs) {
+ if (isLocked()) {
+ locker->lockMMAPV1Flush();
}
- };
+ }
+ };
+
+ /**
+ * Database lock with support for collection- and document-level locking
+ *
+ * This lock supports four modes (see Lock_Mode):
+ * MODE_IS: concurrent database access, requiring further collection read locks
+ * MODE_IX: concurrent database access, requiring further collection read or write locks
+ * MODE_S: shared read access to the database, blocking any writers
+ * MODE_X: exclusive access to the database, blocking all other readers and writers
+ *
+ * For MODE_IS or MODE_S also acquires global lock in intent-shared (IS) mode, and
+ * for MODE_IX or MODE_X also acquires global lock in intent-exclusive (IX) mode.
+ * For storage engines that do not support collection-level locking, MODE_IS will be
+ * upgraded to MODE_S and MODE_IX will be upgraded to MODE_X.
+ */
+ class DBLock {
+ public:
+ DBLock(Locker* locker, StringData db, LockMode mode);
+ ~DBLock();
/**
- * Database lock with support for collection- and document-level locking
- *
- * This lock supports four modes (see Lock_Mode):
- * MODE_IS: concurrent database access, requiring further collection read locks
- * MODE_IX: concurrent database access, requiring further collection read or write locks
- * MODE_S: shared read access to the database, blocking any writers
- * MODE_X: exclusive access to the database, blocking all other readers and writers
- *
- * For MODE_IS or MODE_S also acquires global lock in intent-shared (IS) mode, and
- * for MODE_IX or MODE_X also acquires global lock in intent-exclusive (IX) mode.
- * For storage engines that do not support collection-level locking, MODE_IS will be
- * upgraded to MODE_S and MODE_IX will be upgraded to MODE_X.
+ * Releases the DBLock and reacquires it with the new mode. The global intent
+ * lock is retained (so the database can't disappear). Relocking from MODE_IS or
+ * MODE_S to MODE_IX or MODE_X is not allowed to avoid violating the global intent.
+ * Use relockWithMode() instead of upgrading to avoid deadlock.
*/
- class DBLock {
- public:
- DBLock(Locker* locker, StringData db, LockMode mode);
- ~DBLock();
+ void relockWithMode(LockMode newMode);
- /**
- * Releases the DBLock and reacquires it with the new mode. The global intent
- * lock is retained (so the database can't disappear). Relocking from MODE_IS or
- * MODE_S to MODE_IX or MODE_X is not allowed to avoid violating the global intent.
- * Use relockWithMode() instead of upgrading to avoid deadlock.
- */
- void relockWithMode(LockMode newMode);
+ private:
+ const ResourceId _id;
+ Locker* const _locker;
- private:
- const ResourceId _id;
- Locker* const _locker;
+ // May be changed through relockWithMode. The global lock mode won't change though,
+ // because we never change from IS/S to IX/X or vice versa, just convert locks from
+ // IX -> X.
+ LockMode _mode;
- // May be changed through relockWithMode. The global lock mode won't change though,
- // because we never change from IS/S to IX/X or vice versa, just convert locks from
- // IX -> X.
- LockMode _mode;
+ // Acquires the global lock on our behalf.
+ GlobalLock _globalLock;
+ };
- // Acquires the global lock on our behalf.
- GlobalLock _globalLock;
- };
+ /**
+ * Collection lock with support for document-level locking
+ *
+ * This lock supports four modes (see Lock_Mode):
+ * MODE_IS: concurrent collection access, requiring document level locking read locks
+ * MODE_IX: concurrent collection access, requiring document level read or write locks
+ * MODE_S: shared read access to the collection, blocking any writers
+ * MODE_X: exclusive access to the collection, blocking all other readers and writers
+ *
+ * An appropriate DBLock must already be held before locking a collection: it is an error,
+ * checked with a dassert(), to not have a suitable database lock before locking the
+ * collection. For storage engines that do not support document-level locking, MODE_IS
+ * will be upgraded to MODE_S and MODE_IX will be upgraded to MODE_X.
+ */
+ class CollectionLock {
+ MONGO_DISALLOW_COPYING(CollectionLock);
- /**
- * Collection lock with support for document-level locking
- *
- * This lock supports four modes (see Lock_Mode):
- * MODE_IS: concurrent collection access, requiring document level locking read locks
- * MODE_IX: concurrent collection access, requiring document level read or write locks
- * MODE_S: shared read access to the collection, blocking any writers
- * MODE_X: exclusive access to the collection, blocking all other readers and writers
- *
- * An appropriate DBLock must already be held before locking a collection: it is an error,
- * checked with a dassert(), to not have a suitable database lock before locking the
- * collection. For storage engines that do not support document-level locking, MODE_IS
- * will be upgraded to MODE_S and MODE_IX will be upgraded to MODE_X.
- */
- class CollectionLock {
- MONGO_DISALLOW_COPYING(CollectionLock);
- public:
- CollectionLock(Locker* lockState, StringData ns, LockMode mode);
- ~CollectionLock();
-
- /**
- * When holding the collection in MODE_IX or MODE_X, calling this will release the
- * collection and database locks, and relocks the database in MODE_X. This is typically
- * used if the collection still needs to be created. Upgrading would not be safe as
- * it could lead to deadlock, similarly for relocking the database without releasing
- * the collection lock. The collection lock will also be reacquired even though it is
- * not really needed, as it simplifies invariant checking: the CollectionLock class
- * has as invariant that a collection lock is being held.
- */
- void relockAsDatabaseExclusive(Lock::DBLock& dbLock);
-
- private:
- const ResourceId _id;
- Locker* const _lockState;
- };
+ public:
+ CollectionLock(Locker* lockState, StringData ns, LockMode mode);
+ ~CollectionLock();
/**
- * Like the CollectionLock, but optimized for the local oplog. Always locks in MODE_IX,
- * must call serializeIfNeeded() before doing any concurrent operations in order to
- * support storage engines without document level locking. It is an error, checked with a
- * dassert(), to not have a suitable database lock when taking this lock.
+ * When holding the collection in MODE_IX or MODE_X, calling this will release the
+ * collection and database locks, and relocks the database in MODE_X. This is typically
+ * used if the collection still needs to be created. Upgrading would not be safe as
+ * it could lead to deadlock, similarly for relocking the database without releasing
+ * the collection lock. The collection lock will also be reacquired even though it is
+ * not really needed, as it simplifies invariant checking: the CollectionLock class
+ * has as invariant that a collection lock is being held.
*/
- class OplogIntentWriteLock {
- MONGO_DISALLOW_COPYING(OplogIntentWriteLock);
- public:
- explicit OplogIntentWriteLock(Locker* lockState);
- ~OplogIntentWriteLock();
- void serializeIfNeeded();
- private:
- Locker* const _lockState;
- bool _serialized;
- };
+ void relockAsDatabaseExclusive(Lock::DBLock& dbLock);
+ private:
+ const ResourceId _id;
+ Locker* const _lockState;
+ };
- /**
- * Turn on "parallel batch writer mode" by locking the global ParallelBatchWriterMode
- * resource in exclusive mode. This mode is off by default.
- * Note that only one thread creates a ParallelBatchWriterMode object; the other batch
- * writers just call setIsBatchWriter().
- */
- class ParallelBatchWriterMode {
- MONGO_DISALLOW_COPYING(ParallelBatchWriterMode);
+ /**
+ * Like the CollectionLock, but optimized for the local oplog. Always locks in MODE_IX,
+ * must call serializeIfNeeded() before doing any concurrent operations in order to
+ * support storage engines without document level locking. It is an error, checked with a
+ * dassert(), to not have a suitable database lock when taking this lock.
+ */
+ class OplogIntentWriteLock {
+ MONGO_DISALLOW_COPYING(OplogIntentWriteLock);
+
+ public:
+ explicit OplogIntentWriteLock(Locker* lockState);
+ ~OplogIntentWriteLock();
+ void serializeIfNeeded();
- public:
- explicit ParallelBatchWriterMode(Locker* lockState);
+ private:
+ Locker* const _lockState;
+ bool _serialized;
+ };
+
+
+ /**
+ * Turn on "parallel batch writer mode" by locking the global ParallelBatchWriterMode
+ * resource in exclusive mode. This mode is off by default.
+ * Note that only one thread creates a ParallelBatchWriterMode object; the other batch
+ * writers just call setIsBatchWriter().
+ */
+ class ParallelBatchWriterMode {
+ MONGO_DISALLOW_COPYING(ParallelBatchWriterMode);
+
+ public:
+ explicit ParallelBatchWriterMode(Locker* lockState);
- private:
- ResourceLock _pbwm;
- };
+ private:
+ ResourceLock _pbwm;
};
+};
}
diff --git a/src/mongo/db/concurrency/d_concurrency_test.cpp b/src/mongo/db/concurrency/d_concurrency_test.cpp
index 245424cdf39..855a17d99c6 100644
--- a/src/mongo/db/concurrency/d_concurrency_test.cpp
+++ b/src/mongo/db/concurrency/d_concurrency_test.cpp
@@ -36,274 +36,274 @@
namespace mongo {
- using std::string;
+using std::string;
- TEST(DConcurrency, GlobalRead) {
- MMAPV1LockerImpl ls;
- Lock::GlobalRead globalRead(&ls);
- ASSERT(ls.isR());
- }
-
- TEST(DConcurrency, GlobalWrite) {
- MMAPV1LockerImpl ls;
- Lock::GlobalWrite globalWrite(&ls);
- ASSERT(ls.isW());
- }
+TEST(DConcurrency, GlobalRead) {
+ MMAPV1LockerImpl ls;
+ Lock::GlobalRead globalRead(&ls);
+ ASSERT(ls.isR());
+}
- TEST(DConcurrency, GlobalWriteAndGlobalRead) {
- MMAPV1LockerImpl ls;
+TEST(DConcurrency, GlobalWrite) {
+ MMAPV1LockerImpl ls;
+ Lock::GlobalWrite globalWrite(&ls);
+ ASSERT(ls.isW());
+}
- Lock::GlobalWrite globalWrite(&ls);
- ASSERT(ls.isW());
+TEST(DConcurrency, GlobalWriteAndGlobalRead) {
+ MMAPV1LockerImpl ls;
- {
- Lock::GlobalRead globalRead(&ls);
- ASSERT(ls.isW());
- }
+ Lock::GlobalWrite globalWrite(&ls);
+ ASSERT(ls.isW());
+ {
+ Lock::GlobalRead globalRead(&ls);
ASSERT(ls.isW());
}
- TEST(DConcurrency, GlobalLockS_Timeout) {
- MMAPV1LockerImpl ls;
- Lock::GlobalLock globalWrite(&ls, MODE_X, 0);
- ASSERT(globalWrite.isLocked());
-
- {
- MMAPV1LockerImpl lsTry;
- Lock::GlobalLock globalReadTry(&lsTry, MODE_S, 1);
- ASSERT(!globalReadTry.isLocked());
- }
- }
-
- TEST(DConcurrency, GlobalLockX_Timeout) {
- MMAPV1LockerImpl ls;
- Lock::GlobalLock globalWrite(&ls, MODE_X, 0);
- ASSERT(globalWrite.isLocked());
-
- {
- MMAPV1LockerImpl lsTry;
- Lock::GlobalLock globalWriteTry(&lsTry, MODE_X, 1);
- ASSERT(!globalWriteTry.isLocked());
- }
- }
+ ASSERT(ls.isW());
+}
- TEST(DConcurrency, GlobalLockS_NoTimeoutDueToGlobalLockS) {
- MMAPV1LockerImpl ls;
- Lock::GlobalRead globalRead(&ls);
+TEST(DConcurrency, GlobalLockS_Timeout) {
+ MMAPV1LockerImpl ls;
+ Lock::GlobalLock globalWrite(&ls, MODE_X, 0);
+ ASSERT(globalWrite.isLocked());
+ {
MMAPV1LockerImpl lsTry;
Lock::GlobalLock globalReadTry(&lsTry, MODE_S, 1);
-
- ASSERT(globalReadTry.isLocked());
+ ASSERT(!globalReadTry.isLocked());
}
+}
- TEST(DConcurrency, GlobalLockX_TimeoutDueToGlobalLockS) {
- MMAPV1LockerImpl ls;
- Lock::GlobalRead globalRead(&ls);
+TEST(DConcurrency, GlobalLockX_Timeout) {
+ MMAPV1LockerImpl ls;
+ Lock::GlobalLock globalWrite(&ls, MODE_X, 0);
+ ASSERT(globalWrite.isLocked());
+ {
MMAPV1LockerImpl lsTry;
Lock::GlobalLock globalWriteTry(&lsTry, MODE_X, 1);
-
ASSERT(!globalWriteTry.isLocked());
}
+}
- TEST(DConcurrency, GlobalLockS_TimeoutDueToGlobalLockX) {
- MMAPV1LockerImpl ls;
- Lock::GlobalWrite globalWrite(&ls);
+TEST(DConcurrency, GlobalLockS_NoTimeoutDueToGlobalLockS) {
+ MMAPV1LockerImpl ls;
+ Lock::GlobalRead globalRead(&ls);
- MMAPV1LockerImpl lsTry;
- Lock::GlobalLock globalReadTry(&lsTry, MODE_S, 1);
+ MMAPV1LockerImpl lsTry;
+ Lock::GlobalLock globalReadTry(&lsTry, MODE_S, 1);
- ASSERT(!globalReadTry.isLocked());
- }
+ ASSERT(globalReadTry.isLocked());
+}
- TEST(DConcurrency, GlobalLockX_TimeoutDueToGlobalLockX) {
- MMAPV1LockerImpl ls;
- Lock::GlobalWrite globalWrite(&ls);
+TEST(DConcurrency, GlobalLockX_TimeoutDueToGlobalLockS) {
+ MMAPV1LockerImpl ls;
+ Lock::GlobalRead globalRead(&ls);
- MMAPV1LockerImpl lsTry;
- Lock::GlobalLock globalWriteTry(&lsTry, MODE_X, 1);
+ MMAPV1LockerImpl lsTry;
+ Lock::GlobalLock globalWriteTry(&lsTry, MODE_X, 1);
- ASSERT(!globalWriteTry.isLocked());
- }
+ ASSERT(!globalWriteTry.isLocked());
+}
- TEST(DConcurrency, TempReleaseGlobalWrite) {
- MMAPV1LockerImpl ls;
- Lock::GlobalWrite globalWrite(&ls);
+TEST(DConcurrency, GlobalLockS_TimeoutDueToGlobalLockX) {
+ MMAPV1LockerImpl ls;
+ Lock::GlobalWrite globalWrite(&ls);
- {
- Lock::TempRelease tempRelease(&ls);
- ASSERT(!ls.isLocked());
- }
+ MMAPV1LockerImpl lsTry;
+ Lock::GlobalLock globalReadTry(&lsTry, MODE_S, 1);
- ASSERT(ls.isW());
+ ASSERT(!globalReadTry.isLocked());
+}
+
+TEST(DConcurrency, GlobalLockX_TimeoutDueToGlobalLockX) {
+ MMAPV1LockerImpl ls;
+ Lock::GlobalWrite globalWrite(&ls);
+
+ MMAPV1LockerImpl lsTry;
+ Lock::GlobalLock globalWriteTry(&lsTry, MODE_X, 1);
+
+ ASSERT(!globalWriteTry.isLocked());
+}
+
+TEST(DConcurrency, TempReleaseGlobalWrite) {
+ MMAPV1LockerImpl ls;
+ Lock::GlobalWrite globalWrite(&ls);
+
+ {
+ Lock::TempRelease tempRelease(&ls);
+ ASSERT(!ls.isLocked());
}
- TEST(DConcurrency, TempReleaseRecursive) {
- MMAPV1LockerImpl ls;
- Lock::GlobalWrite globalWrite(&ls);
- Lock::DBLock lk(&ls, "SomeDBName", MODE_X);
+ ASSERT(ls.isW());
+}
- {
- Lock::TempRelease tempRelease(&ls);
- ASSERT(ls.isW());
- ASSERT(ls.isDbLockedForMode("SomeDBName", MODE_X));
- }
+TEST(DConcurrency, TempReleaseRecursive) {
+ MMAPV1LockerImpl ls;
+ Lock::GlobalWrite globalWrite(&ls);
+ Lock::DBLock lk(&ls, "SomeDBName", MODE_X);
+ {
+ Lock::TempRelease tempRelease(&ls);
ASSERT(ls.isW());
+ ASSERT(ls.isDbLockedForMode("SomeDBName", MODE_X));
}
- TEST(DConcurrency, DBLockTakesS) {
- MMAPV1LockerImpl ls;
+ ASSERT(ls.isW());
+}
- Lock::DBLock dbRead(&ls, "db", MODE_S);
+TEST(DConcurrency, DBLockTakesS) {
+ MMAPV1LockerImpl ls;
- const ResourceId resIdDb(RESOURCE_DATABASE, string("db"));
- ASSERT(ls.getLockMode(resIdDb) == MODE_S);
- }
+ Lock::DBLock dbRead(&ls, "db", MODE_S);
- TEST(DConcurrency, DBLockTakesX) {
- MMAPV1LockerImpl ls;
+ const ResourceId resIdDb(RESOURCE_DATABASE, string("db"));
+ ASSERT(ls.getLockMode(resIdDb) == MODE_S);
+}
- Lock::DBLock dbWrite(&ls, "db", MODE_X);
+TEST(DConcurrency, DBLockTakesX) {
+ MMAPV1LockerImpl ls;
- const ResourceId resIdDb(RESOURCE_DATABASE, string("db"));
- ASSERT(ls.getLockMode(resIdDb) == MODE_X);
- }
+ Lock::DBLock dbWrite(&ls, "db", MODE_X);
- TEST(DConcurrency, DBLockTakesISForAdminIS) {
- DefaultLockerImpl ls;
+ const ResourceId resIdDb(RESOURCE_DATABASE, string("db"));
+ ASSERT(ls.getLockMode(resIdDb) == MODE_X);
+}
- Lock::DBLock dbRead(&ls, "admin", MODE_IS);
+TEST(DConcurrency, DBLockTakesISForAdminIS) {
+ DefaultLockerImpl ls;
- ASSERT(ls.getLockMode(resourceIdAdminDB) == MODE_IS);
- }
+ Lock::DBLock dbRead(&ls, "admin", MODE_IS);
- TEST(DConcurrency, DBLockTakesSForAdminS) {
- DefaultLockerImpl ls;
+ ASSERT(ls.getLockMode(resourceIdAdminDB) == MODE_IS);
+}
- Lock::DBLock dbRead(&ls, "admin", MODE_S);
+TEST(DConcurrency, DBLockTakesSForAdminS) {
+ DefaultLockerImpl ls;
- ASSERT(ls.getLockMode(resourceIdAdminDB) == MODE_S);
- }
+ Lock::DBLock dbRead(&ls, "admin", MODE_S);
- TEST(DConcurrency, DBLockTakesXForAdminIX) {
- DefaultLockerImpl ls;
+ ASSERT(ls.getLockMode(resourceIdAdminDB) == MODE_S);
+}
- Lock::DBLock dbWrite(&ls, "admin", MODE_IX);
+TEST(DConcurrency, DBLockTakesXForAdminIX) {
+ DefaultLockerImpl ls;
- ASSERT(ls.getLockMode(resourceIdAdminDB) == MODE_X);
- }
+ Lock::DBLock dbWrite(&ls, "admin", MODE_IX);
- TEST(DConcurrency, DBLockTakesXForAdminX) {
- DefaultLockerImpl ls;
+ ASSERT(ls.getLockMode(resourceIdAdminDB) == MODE_X);
+}
- Lock::DBLock dbWrite(&ls, "admin", MODE_X);
+TEST(DConcurrency, DBLockTakesXForAdminX) {
+ DefaultLockerImpl ls;
- ASSERT(ls.getLockMode(resourceIdAdminDB) == MODE_X);
- }
+ Lock::DBLock dbWrite(&ls, "admin", MODE_X);
- TEST(DConcurrency, MultipleWriteDBLocksOnSameThread) {
- MMAPV1LockerImpl ls;
+ ASSERT(ls.getLockMode(resourceIdAdminDB) == MODE_X);
+}
- Lock::DBLock r1(&ls, "db1", MODE_X);
- Lock::DBLock r2(&ls, "db1", MODE_X);
+TEST(DConcurrency, MultipleWriteDBLocksOnSameThread) {
+ MMAPV1LockerImpl ls;
- ASSERT(ls.isDbLockedForMode("db1", MODE_X));
- }
+ Lock::DBLock r1(&ls, "db1", MODE_X);
+ Lock::DBLock r2(&ls, "db1", MODE_X);
- TEST(DConcurrency, MultipleConflictingDBLocksOnSameThread) {
- MMAPV1LockerImpl ls;
+ ASSERT(ls.isDbLockedForMode("db1", MODE_X));
+}
- Lock::DBLock r1(&ls, "db1", MODE_X);
- Lock::DBLock r2(&ls, "db1", MODE_S);
+TEST(DConcurrency, MultipleConflictingDBLocksOnSameThread) {
+ MMAPV1LockerImpl ls;
- ASSERT(ls.isDbLockedForMode("db1", MODE_X));
- ASSERT(ls.isDbLockedForMode("db1", MODE_S));
- }
+ Lock::DBLock r1(&ls, "db1", MODE_X);
+ Lock::DBLock r2(&ls, "db1", MODE_S);
- TEST(DConcurrency, IsDbLockedForSMode) {
- const std::string dbName("db");
+ ASSERT(ls.isDbLockedForMode("db1", MODE_X));
+ ASSERT(ls.isDbLockedForMode("db1", MODE_S));
+}
- MMAPV1LockerImpl ls;
+TEST(DConcurrency, IsDbLockedForSMode) {
+ const std::string dbName("db");
- Lock::DBLock dbLock(&ls, dbName, MODE_S);
+ MMAPV1LockerImpl ls;
- ASSERT(ls.isDbLockedForMode(dbName, MODE_IS));
- ASSERT(!ls.isDbLockedForMode(dbName, MODE_IX));
- ASSERT(ls.isDbLockedForMode(dbName, MODE_S));
- ASSERT(!ls.isDbLockedForMode(dbName, MODE_X));
- }
+ Lock::DBLock dbLock(&ls, dbName, MODE_S);
- TEST(DConcurrency, IsDbLockedForXMode) {
- const std::string dbName("db");
+ ASSERT(ls.isDbLockedForMode(dbName, MODE_IS));
+ ASSERT(!ls.isDbLockedForMode(dbName, MODE_IX));
+ ASSERT(ls.isDbLockedForMode(dbName, MODE_S));
+ ASSERT(!ls.isDbLockedForMode(dbName, MODE_X));
+}
- MMAPV1LockerImpl ls;
+TEST(DConcurrency, IsDbLockedForXMode) {
+ const std::string dbName("db");
- Lock::DBLock dbLock(&ls, dbName, MODE_X);
+ MMAPV1LockerImpl ls;
- ASSERT(ls.isDbLockedForMode(dbName, MODE_IS));
- ASSERT(ls.isDbLockedForMode(dbName, MODE_IX));
- ASSERT(ls.isDbLockedForMode(dbName, MODE_S));
- ASSERT(ls.isDbLockedForMode(dbName, MODE_X));
- }
+ Lock::DBLock dbLock(&ls, dbName, MODE_X);
- TEST(DConcurrency, IsCollectionLocked_DB_Locked_IS) {
- const std::string ns("db1.coll");
+ ASSERT(ls.isDbLockedForMode(dbName, MODE_IS));
+ ASSERT(ls.isDbLockedForMode(dbName, MODE_IX));
+ ASSERT(ls.isDbLockedForMode(dbName, MODE_S));
+ ASSERT(ls.isDbLockedForMode(dbName, MODE_X));
+}
- MMAPV1LockerImpl ls;
+TEST(DConcurrency, IsCollectionLocked_DB_Locked_IS) {
+ const std::string ns("db1.coll");
- Lock::DBLock dbLock(&ls, "db1", MODE_IS);
+ MMAPV1LockerImpl ls;
- {
- Lock::CollectionLock collLock(&ls, ns, MODE_IS);
+ Lock::DBLock dbLock(&ls, "db1", MODE_IS);
- ASSERT(ls.isCollectionLockedForMode(ns, MODE_IS));
- ASSERT(!ls.isCollectionLockedForMode(ns, MODE_IX));
+ {
+ Lock::CollectionLock collLock(&ls, ns, MODE_IS);
- // TODO: This is TRUE because Lock::CollectionLock converts IS lock to S
- ASSERT(ls.isCollectionLockedForMode(ns, MODE_S));
+ ASSERT(ls.isCollectionLockedForMode(ns, MODE_IS));
+ ASSERT(!ls.isCollectionLockedForMode(ns, MODE_IX));
- ASSERT(!ls.isCollectionLockedForMode(ns, MODE_X));
- }
+ // TODO: This is TRUE because Lock::CollectionLock converts IS lock to S
+ ASSERT(ls.isCollectionLockedForMode(ns, MODE_S));
- {
- Lock::CollectionLock collLock(&ls, ns, MODE_S);
+ ASSERT(!ls.isCollectionLockedForMode(ns, MODE_X));
+ }
- ASSERT(ls.isCollectionLockedForMode(ns, MODE_IS));
- ASSERT(!ls.isCollectionLockedForMode(ns, MODE_IX));
- ASSERT(ls.isCollectionLockedForMode(ns, MODE_S));
- ASSERT(!ls.isCollectionLockedForMode(ns, MODE_X));
- }
+ {
+ Lock::CollectionLock collLock(&ls, ns, MODE_S);
+
+ ASSERT(ls.isCollectionLockedForMode(ns, MODE_IS));
+ ASSERT(!ls.isCollectionLockedForMode(ns, MODE_IX));
+ ASSERT(ls.isCollectionLockedForMode(ns, MODE_S));
+ ASSERT(!ls.isCollectionLockedForMode(ns, MODE_X));
}
+}
- TEST(DConcurrency, IsCollectionLocked_DB_Locked_IX) {
- const std::string ns("db1.coll");
+TEST(DConcurrency, IsCollectionLocked_DB_Locked_IX) {
+ const std::string ns("db1.coll");
- MMAPV1LockerImpl ls;
+ MMAPV1LockerImpl ls;
- Lock::DBLock dbLock(&ls, "db1", MODE_IX);
+ Lock::DBLock dbLock(&ls, "db1", MODE_IX);
- {
- Lock::CollectionLock collLock(&ls, ns, MODE_IX);
+ {
+ Lock::CollectionLock collLock(&ls, ns, MODE_IX);
- // TODO: This is TRUE because Lock::CollectionLock converts IX lock to X
- ASSERT(ls.isCollectionLockedForMode(ns, MODE_IS));
+ // TODO: This is TRUE because Lock::CollectionLock converts IX lock to X
+ ASSERT(ls.isCollectionLockedForMode(ns, MODE_IS));
- ASSERT(ls.isCollectionLockedForMode(ns, MODE_IX));
- ASSERT(ls.isCollectionLockedForMode(ns, MODE_S));
- ASSERT(ls.isCollectionLockedForMode(ns, MODE_X));
- }
+ ASSERT(ls.isCollectionLockedForMode(ns, MODE_IX));
+ ASSERT(ls.isCollectionLockedForMode(ns, MODE_S));
+ ASSERT(ls.isCollectionLockedForMode(ns, MODE_X));
+ }
- {
- Lock::CollectionLock collLock(&ls, ns, MODE_X);
+ {
+ Lock::CollectionLock collLock(&ls, ns, MODE_X);
- ASSERT(ls.isCollectionLockedForMode(ns, MODE_IS));
- ASSERT(ls.isCollectionLockedForMode(ns, MODE_IX));
- ASSERT(ls.isCollectionLockedForMode(ns, MODE_S));
- ASSERT(ls.isCollectionLockedForMode(ns, MODE_X));
- }
+ ASSERT(ls.isCollectionLockedForMode(ns, MODE_IS));
+ ASSERT(ls.isCollectionLockedForMode(ns, MODE_IX));
+ ASSERT(ls.isCollectionLockedForMode(ns, MODE_S));
+ ASSERT(ls.isCollectionLockedForMode(ns, MODE_X));
}
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/concurrency/deadlock_detection_test.cpp b/src/mongo/db/concurrency/deadlock_detection_test.cpp
index 87274255635..ce29fd37b01 100644
--- a/src/mongo/db/concurrency/deadlock_detection_test.cpp
+++ b/src/mongo/db/concurrency/deadlock_detection_test.cpp
@@ -31,161 +31,161 @@
namespace mongo {
- TEST(Deadlock, NoDeadlock) {
- const ResourceId resId(RESOURCE_DATABASE, std::string("A"));
+TEST(Deadlock, NoDeadlock) {
+ const ResourceId resId(RESOURCE_DATABASE, std::string("A"));
- LockerForTests locker1(MODE_IS);
- LockerForTests locker2(MODE_IS);
+ LockerForTests locker1(MODE_IS);
+ LockerForTests locker2(MODE_IS);
- ASSERT_EQUALS(LOCK_OK, locker1.lockBegin(resId, MODE_S));
- ASSERT_EQUALS(LOCK_OK, locker2.lockBegin(resId, MODE_S));
+ ASSERT_EQUALS(LOCK_OK, locker1.lockBegin(resId, MODE_S));
+ ASSERT_EQUALS(LOCK_OK, locker2.lockBegin(resId, MODE_S));
- DeadlockDetector wfg1(*getGlobalLockManager(), &locker1);
- ASSERT(!wfg1.check().hasCycle());
+ DeadlockDetector wfg1(*getGlobalLockManager(), &locker1);
+ ASSERT(!wfg1.check().hasCycle());
- DeadlockDetector wfg2(*getGlobalLockManager(), &locker2);
- ASSERT(!wfg2.check().hasCycle());
- }
+ DeadlockDetector wfg2(*getGlobalLockManager(), &locker2);
+ ASSERT(!wfg2.check().hasCycle());
+}
- TEST(Deadlock, Simple) {
- const ResourceId resIdA(RESOURCE_DATABASE, std::string("A"));
- const ResourceId resIdB(RESOURCE_DATABASE, std::string("B"));
+TEST(Deadlock, Simple) {
+ const ResourceId resIdA(RESOURCE_DATABASE, std::string("A"));
+ const ResourceId resIdB(RESOURCE_DATABASE, std::string("B"));
- LockerForTests locker1(MODE_IX);
- LockerForTests locker2(MODE_IX);
+ LockerForTests locker1(MODE_IX);
+ LockerForTests locker2(MODE_IX);
- ASSERT_EQUALS(LOCK_OK, locker1.lockBegin(resIdA, MODE_X));
- ASSERT_EQUALS(LOCK_OK, locker2.lockBegin(resIdB, MODE_X));
+ ASSERT_EQUALS(LOCK_OK, locker1.lockBegin(resIdA, MODE_X));
+ ASSERT_EQUALS(LOCK_OK, locker2.lockBegin(resIdB, MODE_X));
- // 1 -> 2
- ASSERT_EQUALS(LOCK_WAITING, locker1.lockBegin(resIdB, MODE_X));
+ // 1 -> 2
+ ASSERT_EQUALS(LOCK_WAITING, locker1.lockBegin(resIdB, MODE_X));
- // 2 -> 1
- ASSERT_EQUALS(LOCK_WAITING, locker2.lockBegin(resIdA, MODE_X));
+ // 2 -> 1
+ ASSERT_EQUALS(LOCK_WAITING, locker2.lockBegin(resIdA, MODE_X));
- DeadlockDetector wfg1(*getGlobalLockManager(), &locker1);
- ASSERT(wfg1.check().hasCycle());
+ DeadlockDetector wfg1(*getGlobalLockManager(), &locker1);
+ ASSERT(wfg1.check().hasCycle());
- DeadlockDetector wfg2(*getGlobalLockManager(), &locker2);
- ASSERT(wfg2.check().hasCycle());
+ DeadlockDetector wfg2(*getGlobalLockManager(), &locker2);
+ ASSERT(wfg2.check().hasCycle());
- // Cleanup, so that LockerImpl doesn't complain about leaked locks
- locker1.unlock(resIdB);
- locker2.unlock(resIdA);
- }
+ // Cleanup, so that LockerImpl doesn't complain about leaked locks
+ locker1.unlock(resIdB);
+ locker2.unlock(resIdA);
+}
- TEST(Deadlock, SimpleUpgrade) {
- const ResourceId resId(RESOURCE_DATABASE, std::string("A"));
+TEST(Deadlock, SimpleUpgrade) {
+ const ResourceId resId(RESOURCE_DATABASE, std::string("A"));
- LockerForTests locker1(MODE_IX);
- LockerForTests locker2(MODE_IX);
+ LockerForTests locker1(MODE_IX);
+ LockerForTests locker2(MODE_IX);
- // Both acquire lock in intent mode
- ASSERT_EQUALS(LOCK_OK, locker1.lockBegin(resId, MODE_IX));
- ASSERT_EQUALS(LOCK_OK, locker2.lockBegin(resId, MODE_IX));
+ // Both acquire lock in intent mode
+ ASSERT_EQUALS(LOCK_OK, locker1.lockBegin(resId, MODE_IX));
+ ASSERT_EQUALS(LOCK_OK, locker2.lockBegin(resId, MODE_IX));
- // Both try to upgrade
- ASSERT_EQUALS(LOCK_WAITING, locker1.lockBegin(resId, MODE_X));
- ASSERT_EQUALS(LOCK_WAITING, locker2.lockBegin(resId, MODE_X));
+ // Both try to upgrade
+ ASSERT_EQUALS(LOCK_WAITING, locker1.lockBegin(resId, MODE_X));
+ ASSERT_EQUALS(LOCK_WAITING, locker2.lockBegin(resId, MODE_X));
- DeadlockDetector wfg1(*getGlobalLockManager(), &locker1);
- ASSERT(wfg1.check().hasCycle());
+ DeadlockDetector wfg1(*getGlobalLockManager(), &locker1);
+ ASSERT(wfg1.check().hasCycle());
- DeadlockDetector wfg2(*getGlobalLockManager(), &locker2);
- ASSERT(wfg2.check().hasCycle());
+ DeadlockDetector wfg2(*getGlobalLockManager(), &locker2);
+ ASSERT(wfg2.check().hasCycle());
- // Cleanup, so that LockerImpl doesn't complain about leaked locks
- locker1.unlock(resId);
- locker2.unlock(resId);
- }
+ // Cleanup, so that LockerImpl doesn't complain about leaked locks
+ locker1.unlock(resId);
+ locker2.unlock(resId);
+}
- TEST(Deadlock, Indirect) {
- const ResourceId resIdA(RESOURCE_DATABASE, std::string("A"));
- const ResourceId resIdB(RESOURCE_DATABASE, std::string("B"));
+TEST(Deadlock, Indirect) {
+ const ResourceId resIdA(RESOURCE_DATABASE, std::string("A"));
+ const ResourceId resIdB(RESOURCE_DATABASE, std::string("B"));
- LockerForTests locker1(MODE_IX);
- LockerForTests locker2(MODE_IX);
- LockerForTests lockerIndirect(MODE_IX);
+ LockerForTests locker1(MODE_IX);
+ LockerForTests locker2(MODE_IX);
+ LockerForTests lockerIndirect(MODE_IX);
- ASSERT_EQUALS(LOCK_OK, locker1.lockBegin(resIdA, MODE_X));
- ASSERT_EQUALS(LOCK_OK, locker2.lockBegin(resIdB, MODE_X));
+ ASSERT_EQUALS(LOCK_OK, locker1.lockBegin(resIdA, MODE_X));
+ ASSERT_EQUALS(LOCK_OK, locker2.lockBegin(resIdB, MODE_X));
- // 1 -> 2
- ASSERT_EQUALS(LOCK_WAITING, locker1.lockBegin(resIdB, MODE_X));
+ // 1 -> 2
+ ASSERT_EQUALS(LOCK_WAITING, locker1.lockBegin(resIdB, MODE_X));
- // 2 -> 1
- ASSERT_EQUALS(LOCK_WAITING, locker2.lockBegin(resIdA, MODE_X));
+ // 2 -> 1
+ ASSERT_EQUALS(LOCK_WAITING, locker2.lockBegin(resIdA, MODE_X));
- // 3 -> 2
- ASSERT_EQUALS(LOCK_WAITING, lockerIndirect.lockBegin(resIdA, MODE_X));
+ // 3 -> 2
+ ASSERT_EQUALS(LOCK_WAITING, lockerIndirect.lockBegin(resIdA, MODE_X));
- DeadlockDetector wfg1(*getGlobalLockManager(), &locker1);
- ASSERT(wfg1.check().hasCycle());
+ DeadlockDetector wfg1(*getGlobalLockManager(), &locker1);
+ ASSERT(wfg1.check().hasCycle());
- DeadlockDetector wfg2(*getGlobalLockManager(), &locker2);
- ASSERT(wfg2.check().hasCycle());
+ DeadlockDetector wfg2(*getGlobalLockManager(), &locker2);
+ ASSERT(wfg2.check().hasCycle());
- // Indirect locker should not report the cycle since it does not participate in it
- DeadlockDetector wfgIndirect(*getGlobalLockManager(), &lockerIndirect);
- ASSERT(!wfgIndirect.check().hasCycle());
+ // Indirect locker should not report the cycle since it does not participate in it
+ DeadlockDetector wfgIndirect(*getGlobalLockManager(), &lockerIndirect);
+ ASSERT(!wfgIndirect.check().hasCycle());
- // Cleanup, so that LockerImpl doesn't complain about leaked locks
- locker1.unlock(resIdB);
- locker2.unlock(resIdA);
- }
+ // Cleanup, so that LockerImpl doesn't complain about leaked locks
+ locker1.unlock(resIdB);
+ locker2.unlock(resIdA);
+}
- TEST(Deadlock, IndirectWithUpgrade) {
- const ResourceId resIdFlush(RESOURCE_MMAPV1_FLUSH, 1);
- const ResourceId resIdDb(RESOURCE_DATABASE, 2);
+TEST(Deadlock, IndirectWithUpgrade) {
+ const ResourceId resIdFlush(RESOURCE_MMAPV1_FLUSH, 1);
+ const ResourceId resIdDb(RESOURCE_DATABASE, 2);
- LockerForTests flush(MODE_IX);
- LockerForTests reader(MODE_IS);
- LockerForTests writer(MODE_IX);
+ LockerForTests flush(MODE_IX);
+ LockerForTests reader(MODE_IS);
+ LockerForTests writer(MODE_IX);
- // This sequence simulates the deadlock which occurs during flush
- ASSERT_EQUALS(LOCK_OK, writer.lockBegin(resIdFlush, MODE_IX));
- ASSERT_EQUALS(LOCK_OK, writer.lockBegin(resIdDb, MODE_X));
+ // This sequence simulates the deadlock which occurs during flush
+ ASSERT_EQUALS(LOCK_OK, writer.lockBegin(resIdFlush, MODE_IX));
+ ASSERT_EQUALS(LOCK_OK, writer.lockBegin(resIdDb, MODE_X));
- ASSERT_EQUALS(LOCK_OK, reader.lockBegin(resIdFlush, MODE_IS));
+ ASSERT_EQUALS(LOCK_OK, reader.lockBegin(resIdFlush, MODE_IS));
- // R -> W
- ASSERT_EQUALS(LOCK_WAITING, reader.lockBegin(resIdDb, MODE_S));
+ // R -> W
+ ASSERT_EQUALS(LOCK_WAITING, reader.lockBegin(resIdDb, MODE_S));
- // R -> W
- // F -> W
- ASSERT_EQUALS(LOCK_WAITING, flush.lockBegin(resIdFlush, MODE_S));
+ // R -> W
+ // F -> W
+ ASSERT_EQUALS(LOCK_WAITING, flush.lockBegin(resIdFlush, MODE_S));
- // W yields its flush lock, so now f is granted in mode S
- //
- // R -> W
- writer.unlock(resIdFlush);
+ // W yields its flush lock, so now f is granted in mode S
+ //
+ // R -> W
+ writer.unlock(resIdFlush);
- // Flush thread upgrades S -> X in order to do the remap
- //
- // R -> W
- // F -> R
- ASSERT_EQUALS(LOCK_WAITING, flush.lockBegin(resIdFlush, MODE_X));
+ // Flush thread upgrades S -> X in order to do the remap
+ //
+ // R -> W
+ // F -> R
+ ASSERT_EQUALS(LOCK_WAITING, flush.lockBegin(resIdFlush, MODE_X));
- // W comes back from the commit and tries to re-acquire the flush lock
- //
- // R -> W
- // F -> R
- // W -> F
- ASSERT_EQUALS(LOCK_WAITING, writer.lockBegin(resIdFlush, MODE_IX));
+ // W comes back from the commit and tries to re-acquire the flush lock
+ //
+ // R -> W
+ // F -> R
+ // W -> F
+ ASSERT_EQUALS(LOCK_WAITING, writer.lockBegin(resIdFlush, MODE_IX));
- // Run deadlock detection from the point of view of each of the involved lockers
- DeadlockDetector wfgF(*getGlobalLockManager(), &flush);
- ASSERT(wfgF.check().hasCycle());
+ // Run deadlock detection from the point of view of each of the involved lockers
+ DeadlockDetector wfgF(*getGlobalLockManager(), &flush);
+ ASSERT(wfgF.check().hasCycle());
- DeadlockDetector wfgR(*getGlobalLockManager(), &reader);
- ASSERT(wfgR.check().hasCycle());
+ DeadlockDetector wfgR(*getGlobalLockManager(), &reader);
+ ASSERT(wfgR.check().hasCycle());
- DeadlockDetector wfgW(*getGlobalLockManager(), &writer);
- ASSERT(wfgW.check().hasCycle());
+ DeadlockDetector wfgW(*getGlobalLockManager(), &writer);
+ ASSERT(wfgW.check().hasCycle());
- // Cleanup, so that LockerImpl doesn't complain about leaked locks
- flush.unlock(resIdFlush);
- writer.unlock(resIdFlush);
- }
+ // Cleanup, so that LockerImpl doesn't complain about leaked locks
+ flush.unlock(resIdFlush);
+ writer.unlock(resIdFlush);
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/concurrency/fast_map_noalloc.h b/src/mongo/db/concurrency/fast_map_noalloc.h
index e7077a56222..cc2ccdb64ae 100644
--- a/src/mongo/db/concurrency/fast_map_noalloc.h
+++ b/src/mongo/db/concurrency/fast_map_noalloc.h
@@ -32,244 +32,229 @@
#include "mongo/util/assert_util.h"
namespace mongo {
-
+
+/**
+ * NOTE: This structure should not be used for anything other than the Lock Manager.
+ *
+ * This is a simple implementation of an unordered associative array with minimal
+ * functionality, used by the lock manager. It keeps a small number of memory entries to store
+ * values, in order to avoid memory allocations, which dominate the cost of the lock manager
+ * calls by a wide margin.
+ *
+ * This class is not thread-safe.
+ */
+template <class KeyType, class ValueType, int PreallocCount>
+class FastMapNoAlloc {
+public:
/**
- * NOTE: This structure should not be used for anything other than the Lock Manager.
- *
- * This is a simple implementation of an unordered associative array with minimal
- * functionality, used by the lock manager. It keeps a small number of memory entries to store
- * values, in order to avoid memory allocations, which dominate the cost of the lock manager
- * calls by a wide margin.
- *
- * This class is not thread-safe.
+ * Forward-only iterator. Does not synchronize with the underlying collection in any way.
+ * In other words, do not modify the collection while there is an open iterator on it.
*/
- template <class KeyType, class ValueType, int PreallocCount>
- class FastMapNoAlloc {
+ template <class MapType, class IteratorValueType>
+ class IteratorImpl {
public:
+ IteratorImpl(const IteratorImpl& other) : _map(other._map), _idx(other._idx) {}
- /**
- * Forward-only iterator. Does not synchronize with the underlying collection in any way.
- * In other words, do not modify the collection while there is an open iterator on it.
- */
- template<class MapType, class IteratorValueType>
- class IteratorImpl {
- public:
-
- IteratorImpl(const IteratorImpl& other)
- : _map(other._map),
- _idx(other._idx) {
-
- }
+ //
+ // Operators
+ //
- //
- // Operators
- //
+ bool operator!() const {
+ return finished();
+ }
- bool operator!() const {
- return finished();
- }
+ IteratorValueType& operator*() const {
+ return *objAddr();
+ }
- IteratorValueType& operator*() const {
- return *objAddr();
- }
+ IteratorValueType* operator->() const {
+ return objAddr();
+ }
- IteratorValueType* operator->() const {
- return objAddr();
- }
+ //
+ // Other methods
+ //
- //
- // Other methods
- //
+ /**
+ * Returns whether the iterator has been exhausted through calls to next. This value
+ * can be used to determine whether a previous call to find has found something.
+ */
+ bool finished() const {
+ return (MONGO_unlikely(_idx == PreallocCount));
+ }
- /**
- * Returns whether the iterator has been exhausted through calls to next. This value
- * can be used to determine whether a previous call to find has found something.
- */
- bool finished() const {
- return (MONGO_unlikely(_idx == PreallocCount));
- }
+ /**
+ * Returns the address of the object at the current position. Cannot be called with an
+ * uninitialized iterator, or iterator which has reached the end.
+ */
+ IteratorValueType* objAddr() const {
+ invariant(!finished());
- /**
- * Returns the address of the object at the current position. Cannot be called with an
- * uninitialized iterator, or iterator which has reached the end.
- */
- IteratorValueType* objAddr() const {
- invariant(!finished());
+ return &_map._fastAccess[_idx].value;
+ }
- return &_map._fastAccess[_idx].value;
- }
+ /**
+ * Returns the key of the value at the current position. Cannot be called with an
+ * uninitialized iterator or iterator which has reached the end.
+ */
+ const KeyType& key() const {
+ invariant(!finished());
- /**
- * Returns the key of the value at the current position. Cannot be called with an
- * uninitialized iterator or iterator which has reached the end.
- */
- const KeyType& key() const {
- invariant(!finished());
+ return _map._fastAccess[_idx].key;
+ }
- return _map._fastAccess[_idx].key;
- }
+ /**
+ * Advances the iterator to the next entry. No particular order of iteration is
+ * guaranteed.
+ */
+ void next() {
+ invariant(!finished());
- /**
- * Advances the iterator to the next entry. No particular order of iteration is
- * guaranteed.
- */
- void next() {
- invariant(!finished());
-
- while (++_idx < PreallocCount) {
- if (_map._fastAccess[_idx].inUse) {
- return;
- }
+ while (++_idx < PreallocCount) {
+ if (_map._fastAccess[_idx].inUse) {
+ return;
}
}
+ }
- /**
- * Removes the element at the current position and moves the iterator to the next,
- * which might be the last entry on the map.
- */
- void remove() {
- invariant(!finished());
- invariant(_map._fastAccess[_idx].inUse);
-
- _map._fastAccess[_idx].inUse = false;
- _map._fastAccessUsedSize--;
-
- next();
- }
-
-
- private:
-
- friend class FastMapNoAlloc<KeyType, ValueType, PreallocCount>;
+ /**
+ * Removes the element at the current position and moves the iterator to the next,
+ * which might be the last entry on the map.
+ */
+ void remove() {
+ invariant(!finished());
+ invariant(_map._fastAccess[_idx].inUse);
- // Used for iteration of the complete map
- IteratorImpl(MapType& map)
- : _map(map),
- _idx(-1) {
+ _map._fastAccess[_idx].inUse = false;
+ _map._fastAccessUsedSize--;
- next();
- }
+ next();
+ }
- // Used for iterator starting at a position
- IteratorImpl(MapType& map, int idx)
- : _map(map),
- _idx(idx) {
- invariant(_idx >= 0);
- }
+ private:
+ friend class FastMapNoAlloc<KeyType, ValueType, PreallocCount>;
- // Used for iteration starting at a particular key
- IteratorImpl(MapType& map, const KeyType& key)
- : _map(map),
- _idx(0) {
+ // Used for iteration of the complete map
+ IteratorImpl(MapType& map) : _map(map), _idx(-1) {
+ next();
+ }
- while (_idx < PreallocCount) {
- if (_map._fastAccess[_idx].inUse && (_map._fastAccess[_idx].key == key)) {
- return;
- }
+ // Used for iterator starting at a position
+ IteratorImpl(MapType& map, int idx) : _map(map), _idx(idx) {
+ invariant(_idx >= 0);
+ }
- ++_idx;
+ // Used for iteration starting at a particular key
+ IteratorImpl(MapType& map, const KeyType& key) : _map(map), _idx(0) {
+ while (_idx < PreallocCount) {
+ if (_map._fastAccess[_idx].inUse && (_map._fastAccess[_idx].key == key)) {
+ return;
}
- }
+ ++_idx;
+ }
+ }
- // The map being iterated on
- MapType& _map;
-
- // Index to the current entry being iterated
- int _idx;
- };
+ // The map being iterated on
+ MapType& _map;
- typedef IteratorImpl<FastMapNoAlloc<KeyType, ValueType, PreallocCount>,
- ValueType> Iterator;
+ // Index to the current entry being iterated
+ int _idx;
+ };
- typedef IteratorImpl<const FastMapNoAlloc<KeyType, ValueType, PreallocCount>,
- const ValueType> ConstIterator;
+ typedef IteratorImpl<FastMapNoAlloc<KeyType, ValueType, PreallocCount>, ValueType> Iterator;
- FastMapNoAlloc() : _fastAccess(),
- _fastAccessUsedSize(0) { }
+ typedef IteratorImpl<const FastMapNoAlloc<KeyType, ValueType, PreallocCount>, const ValueType>
+ ConstIterator;
- /**
- * Inserts the specified entry in the map and returns a reference to the memory for the
- * entry just inserted.
- */
- Iterator insert(const KeyType& key) {
- // Find the first unused slot. This could probably be even further optimized by adding
- // a field pointing to the first unused location.
- int idx = 0;
- for (; _fastAccess[idx].inUse; idx++);
- invariant(idx < PreallocCount);
+ FastMapNoAlloc() : _fastAccess(), _fastAccessUsedSize(0) {}
- _fastAccess[idx].inUse = true;
- _fastAccess[idx].key = key;
- _fastAccessUsedSize++;
+ /**
+ * Inserts the specified entry in the map and returns a reference to the memory for the
+ * entry just inserted.
+ */
+ Iterator insert(const KeyType& key) {
+ // Find the first unused slot. This could probably be even further optimized by adding
+ // a field pointing to the first unused location.
+ int idx = 0;
+ for (; _fastAccess[idx].inUse; idx++)
+ ;
- return Iterator(*this, idx);
- }
+ invariant(idx < PreallocCount);
- /**
- * Returns an iterator to the first element in the map.
- */
- Iterator begin() {
- return Iterator(*this);
- }
+ _fastAccess[idx].inUse = true;
+ _fastAccess[idx].key = key;
+ _fastAccessUsedSize++;
- ConstIterator begin() const {
- return ConstIterator(*this);
- }
+ return Iterator(*this, idx);
+ }
- /**
- * Returns an iterator pointing to the first position, which has entry with the specified
- * key. Before dereferencing the returned iterator, it should be checked for validity using
- * the finished() method or the ! operator. If no element was found, finished() will return
- * false.
- *
- * While it is allowed to call next() on the returned iterator, this is not very useful,
- * because the container is not ordered.
- */
- Iterator find(const KeyType& key) {
- return Iterator(*this, key);
- }
+ /**
+ * Returns an iterator to the first element in the map.
+ */
+ Iterator begin() {
+ return Iterator(*this);
+ }
- ConstIterator find(const KeyType& key) const {
- return ConstIterator(*this, key);
- }
+ ConstIterator begin() const {
+ return ConstIterator(*this);
+ }
- int size() const { return _fastAccessUsedSize; }
- bool empty() const { return (_fastAccessUsedSize == 0); }
+ /**
+ * Returns an iterator pointing to the first position, which has entry with the specified
+ * key. Before dereferencing the returned iterator, it should be checked for validity using
+ * the finished() method or the ! operator. If no element was found, finished() will return
+ * false.
+ *
+ * While it is allowed to call next() on the returned iterator, this is not very useful,
+ * because the container is not ordered.
+ */
+ Iterator find(const KeyType& key) {
+ return Iterator(*this, key);
+ }
- private:
+ ConstIterator find(const KeyType& key) const {
+ return ConstIterator(*this, key);
+ }
- // Empty and very large maps do not make sense since there will be no performance gain, so
- // disallow them.
- BOOST_STATIC_ASSERT(PreallocCount > 0);
- BOOST_STATIC_ASSERT(PreallocCount < 32);
+ int size() const {
+ return _fastAccessUsedSize;
+ }
+ bool empty() const {
+ return (_fastAccessUsedSize == 0);
+ }
- // Iterator accesses the map directly
- friend class IteratorImpl<FastMapNoAlloc<KeyType, ValueType, PreallocCount>,
- ValueType>;
+private:
+ // Empty and very large maps do not make sense since there will be no performance gain, so
+ // disallow them.
+ BOOST_STATIC_ASSERT(PreallocCount > 0);
+ BOOST_STATIC_ASSERT(PreallocCount < 32);
- friend class IteratorImpl<const FastMapNoAlloc<KeyType, ValueType, PreallocCount>,
- const ValueType>;
+ // Iterator accesses the map directly
+ friend class IteratorImpl<FastMapNoAlloc<KeyType, ValueType, PreallocCount>, ValueType>;
+ friend class IteratorImpl<const FastMapNoAlloc<KeyType, ValueType, PreallocCount>,
+ const ValueType>;
- struct PreallocEntry {
- PreallocEntry() : inUse(false) { }
- bool inUse;
+ struct PreallocEntry {
+ PreallocEntry() : inUse(false) {}
- KeyType key;
- ValueType value;
- };
+ bool inUse;
- // Pre-allocated memory for entries
- PreallocEntry _fastAccess[PreallocCount];
- int _fastAccessUsedSize;
+ KeyType key;
+ ValueType value;
};
-} // namespace mongo
+ // Pre-allocated memory for entries
+ PreallocEntry _fastAccess[PreallocCount];
+ int _fastAccessUsedSize;
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/concurrency/fast_map_noalloc_test.cpp b/src/mongo/db/concurrency/fast_map_noalloc_test.cpp
index 70810cb6b5a..e9a013427df 100644
--- a/src/mongo/db/concurrency/fast_map_noalloc_test.cpp
+++ b/src/mongo/db/concurrency/fast_map_noalloc_test.cpp
@@ -36,127 +36,125 @@
namespace mongo {
- struct TestStruct {
-
- void initNew(int newId, const std::string& newValue) {
- id = newId;
- value = newValue;
- }
-
- int id;
- std::string value;
- };
+struct TestStruct {
+ void initNew(int newId, const std::string& newValue) {
+ id = newId;
+ value = newValue;
+ }
- typedef class FastMapNoAlloc<ResourceId, TestStruct, 6> TestFastMapNoAlloc;
+ int id;
+ std::string value;
+};
+typedef class FastMapNoAlloc<ResourceId, TestStruct, 6> TestFastMapNoAlloc;
- TEST(FastMapNoAlloc, Empty) {
- TestFastMapNoAlloc map;
- ASSERT(map.empty());
- TestFastMapNoAlloc::Iterator it = map.begin();
- ASSERT(it.finished());
- }
+TEST(FastMapNoAlloc, Empty) {
+ TestFastMapNoAlloc map;
+ ASSERT(map.empty());
- TEST(FastMapNoAlloc, NotEmpty) {
- TestFastMapNoAlloc map;
+ TestFastMapNoAlloc::Iterator it = map.begin();
+ ASSERT(it.finished());
+}
- map.insert(ResourceId(RESOURCE_COLLECTION, 1))->initNew(101, "Item101");
- map.insert(ResourceId(RESOURCE_COLLECTION, 2))->initNew(102, "Item102");
- ASSERT(!map.empty());
+TEST(FastMapNoAlloc, NotEmpty) {
+ TestFastMapNoAlloc map;
- TestFastMapNoAlloc::Iterator it = map.begin();
- ASSERT(!it.finished());
- ASSERT(!!it);
+ map.insert(ResourceId(RESOURCE_COLLECTION, 1))->initNew(101, "Item101");
+ map.insert(ResourceId(RESOURCE_COLLECTION, 2))->initNew(102, "Item102");
+ ASSERT(!map.empty());
- ASSERT(it->id == 101);
- ASSERT(it->value == "Item101");
+ TestFastMapNoAlloc::Iterator it = map.begin();
+ ASSERT(!it.finished());
+ ASSERT(!!it);
- it.next();
- ASSERT(!it.finished());
- ASSERT(!!it);
+ ASSERT(it->id == 101);
+ ASSERT(it->value == "Item101");
- ASSERT(it->id == 102);
- ASSERT(it->value == "Item102");
+ it.next();
+ ASSERT(!it.finished());
+ ASSERT(!!it);
- // We are at the last element
- it.next();
- ASSERT(it.finished());
- ASSERT(!it);
- }
+ ASSERT(it->id == 102);
+ ASSERT(it->value == "Item102");
- TEST(FastMapNoAlloc, FindNonExisting) {
- TestFastMapNoAlloc map;
+ // We are at the last element
+ it.next();
+ ASSERT(it.finished());
+ ASSERT(!it);
+}
- ASSERT(!map.find(ResourceId(RESOURCE_COLLECTION, 0)));
- }
+TEST(FastMapNoAlloc, FindNonExisting) {
+ TestFastMapNoAlloc map;
- TEST(FastMapNoAlloc, FindAndRemove) {
- TestFastMapNoAlloc map;
+ ASSERT(!map.find(ResourceId(RESOURCE_COLLECTION, 0)));
+}
- for (int i = 0; i < 6; i++) {
- map.insert(ResourceId(RESOURCE_COLLECTION, i))->initNew(
- i, "Item" + boost::lexical_cast<std::string>(i));
- }
+TEST(FastMapNoAlloc, FindAndRemove) {
+ TestFastMapNoAlloc map;
- for (int i = 0; i < 6; i++) {
- ASSERT(!map.find(ResourceId(RESOURCE_COLLECTION, i)).finished());
+ for (int i = 0; i < 6; i++) {
+ map.insert(ResourceId(RESOURCE_COLLECTION, i))
+ ->initNew(i, "Item" + boost::lexical_cast<std::string>(i));
+ }
- ASSERT_EQUALS(i, map.find(ResourceId(RESOURCE_COLLECTION, i))->id);
+ for (int i = 0; i < 6; i++) {
+ ASSERT(!map.find(ResourceId(RESOURCE_COLLECTION, i)).finished());
- ASSERT_EQUALS("Item" + boost::lexical_cast<std::string>(i),
- map.find(ResourceId(RESOURCE_COLLECTION, i))->value);
- }
+ ASSERT_EQUALS(i, map.find(ResourceId(RESOURCE_COLLECTION, i))->id);
- // Remove a middle entry
- map.find(ResourceId(RESOURCE_COLLECTION, 2)).remove();
- ASSERT(!map.find(ResourceId(RESOURCE_COLLECTION, 2)));
+ ASSERT_EQUALS("Item" + boost::lexical_cast<std::string>(i),
+ map.find(ResourceId(RESOURCE_COLLECTION, i))->value);
+ }
- // Remove entry after first
- map.find(ResourceId(RESOURCE_COLLECTION, 1)).remove();
- ASSERT(!map.find(ResourceId(RESOURCE_COLLECTION, 1)));
+ // Remove a middle entry
+ map.find(ResourceId(RESOURCE_COLLECTION, 2)).remove();
+ ASSERT(!map.find(ResourceId(RESOURCE_COLLECTION, 2)));
- // Remove entry before last
- map.find(ResourceId(RESOURCE_COLLECTION, 4)).remove();
- ASSERT(!map.find(ResourceId(RESOURCE_COLLECTION, 4)));
+ // Remove entry after first
+ map.find(ResourceId(RESOURCE_COLLECTION, 1)).remove();
+ ASSERT(!map.find(ResourceId(RESOURCE_COLLECTION, 1)));
- // Remove first entry
- map.find(ResourceId(RESOURCE_COLLECTION, 0)).remove();
- ASSERT(!map.find(ResourceId(RESOURCE_COLLECTION, 0)));
+ // Remove entry before last
+ map.find(ResourceId(RESOURCE_COLLECTION, 4)).remove();
+ ASSERT(!map.find(ResourceId(RESOURCE_COLLECTION, 4)));
- // Remove last entry
- map.find(ResourceId(RESOURCE_COLLECTION, 5)).remove();
- ASSERT(!map.find(ResourceId(RESOURCE_COLLECTION, 5)));
+ // Remove first entry
+ map.find(ResourceId(RESOURCE_COLLECTION, 0)).remove();
+ ASSERT(!map.find(ResourceId(RESOURCE_COLLECTION, 0)));
- // Remove final entry
- map.find(ResourceId(RESOURCE_COLLECTION, 3)).remove();
- ASSERT(!map.find(ResourceId(RESOURCE_COLLECTION, 3)));
- }
+ // Remove last entry
+ map.find(ResourceId(RESOURCE_COLLECTION, 5)).remove();
+ ASSERT(!map.find(ResourceId(RESOURCE_COLLECTION, 5)));
- TEST(FastMapNoAlloc, RemoveAll) {
- TestFastMapNoAlloc map;
- unordered_map<ResourceId, TestStruct> checkMap;
+ // Remove final entry
+ map.find(ResourceId(RESOURCE_COLLECTION, 3)).remove();
+ ASSERT(!map.find(ResourceId(RESOURCE_COLLECTION, 3)));
+}
- for (int i = 1; i <= 6; i++) {
- map.insert(ResourceId(RESOURCE_COLLECTION, i))->initNew(
- i, "Item" + boost::lexical_cast<std::string>(i));
+TEST(FastMapNoAlloc, RemoveAll) {
+ TestFastMapNoAlloc map;
+ unordered_map<ResourceId, TestStruct> checkMap;
- checkMap[ResourceId(RESOURCE_COLLECTION, i)].initNew(
- i, "Item" + boost::lexical_cast<std::string>(i));
- }
+ for (int i = 1; i <= 6; i++) {
+ map.insert(ResourceId(RESOURCE_COLLECTION, i))
+ ->initNew(i, "Item" + boost::lexical_cast<std::string>(i));
- TestFastMapNoAlloc::Iterator it = map.begin();
- while (!it.finished()) {
- ASSERT_EQUALS(it->id, checkMap[it.key()].id);
- ASSERT_EQUALS(
- "Item" + boost::lexical_cast<std::string>(it->id), checkMap[it.key()].value);
+ checkMap[ResourceId(RESOURCE_COLLECTION, i)].initNew(
+ i, "Item" + boost::lexical_cast<std::string>(i));
+ }
- checkMap.erase(it.key());
- it.remove();
- }
+ TestFastMapNoAlloc::Iterator it = map.begin();
+ while (!it.finished()) {
+ ASSERT_EQUALS(it->id, checkMap[it.key()].id);
+ ASSERT_EQUALS("Item" + boost::lexical_cast<std::string>(it->id), checkMap[it.key()].value);
- ASSERT(map.empty());
- ASSERT(checkMap.empty());
+ checkMap.erase(it.key());
+ it.remove();
}
-} // namespace mongo
+ ASSERT(map.empty());
+ ASSERT(checkMap.empty());
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/concurrency/lock_manager.cpp b/src/mongo/db/concurrency/lock_manager.cpp
index 7a0e094ea30..c82e2d0ee5c 100644
--- a/src/mongo/db/concurrency/lock_manager.cpp
+++ b/src/mongo/db/concurrency/lock_manager.cpp
@@ -43,997 +43,933 @@
namespace mongo {
- using std::string;
+using std::string;
namespace {
- /**
- * Map of conflicts. 'LockConflictsTable[newMode] & existingMode != 0' means that a new request
- * with the given 'newMode' conflicts with an existing request with mode 'existingMode'.
- */
- static const int LockConflictsTable[] = {
- // MODE_NONE
- 0,
+/**
+ * Map of conflicts. 'LockConflictsTable[newMode] & existingMode != 0' means that a new request
+ * with the given 'newMode' conflicts with an existing request with mode 'existingMode'.
+ */
+static const int LockConflictsTable[] = {
+ // MODE_NONE
+ 0,
- // MODE_IS
- (1 << MODE_X),
+ // MODE_IS
+ (1 << MODE_X),
- // MODE_IX
- (1 << MODE_S) | (1 << MODE_X),
+ // MODE_IX
+ (1 << MODE_S) | (1 << MODE_X),
- // MODE_S
- (1 << MODE_IX) | (1 << MODE_X),
+ // MODE_S
+ (1 << MODE_IX) | (1 << MODE_X),
- // MODE_X
- (1 << MODE_S) | (1 << MODE_X) | (1 << MODE_IS) | (1 << MODE_IX),
- };
+ // MODE_X
+ (1 << MODE_S) | (1 << MODE_X) | (1 << MODE_IS) | (1 << MODE_IX),
+};
- // Mask of modes
- const uint64_t intentModes = (1 << MODE_IS) | (1<< MODE_IX);
+// Mask of modes
+const uint64_t intentModes = (1 << MODE_IS) | (1 << MODE_IX);
- // Ensure we do not add new modes without updating the conflicts table
- BOOST_STATIC_ASSERT(
- (sizeof(LockConflictsTable) / sizeof(LockConflictsTable[0])) == LockModesCount);
+// Ensure we do not add new modes without updating the conflicts table
+BOOST_STATIC_ASSERT((sizeof(LockConflictsTable) / sizeof(LockConflictsTable[0])) == LockModesCount);
- /**
- * Maps the mode id to a string.
- */
- static const char* LockModeNames[] = {
- "NONE", "IS", "IX", "S", "X"
- };
+/**
+ * Maps the mode id to a string.
+ */
+static const char* LockModeNames[] = {"NONE", "IS", "IX", "S", "X"};
- static const char* LegacyLockModeNames[] = {
- "", "r", "w", "R", "W"
- };
+static const char* LegacyLockModeNames[] = {"", "r", "w", "R", "W"};
- // Ensure we do not add new modes without updating the names array
- BOOST_STATIC_ASSERT((sizeof(LockModeNames) / sizeof(LockModeNames[0])) == LockModesCount);
- BOOST_STATIC_ASSERT(
- (sizeof(LegacyLockModeNames) / sizeof(LegacyLockModeNames[0])) == LockModesCount);
+// Ensure we do not add new modes without updating the names array
+BOOST_STATIC_ASSERT((sizeof(LockModeNames) / sizeof(LockModeNames[0])) == LockModesCount);
+BOOST_STATIC_ASSERT((sizeof(LegacyLockModeNames) / sizeof(LegacyLockModeNames[0])) ==
+ LockModesCount);
- // Helper functions for the lock modes
- bool conflicts(LockMode newMode, uint32_t existingModesMask) {
- return (LockConflictsTable[newMode] & existingModesMask) != 0;
- }
+// Helper functions for the lock modes
+bool conflicts(LockMode newMode, uint32_t existingModesMask) {
+ return (LockConflictsTable[newMode] & existingModesMask) != 0;
+}
- uint32_t modeMask(LockMode mode) {
- return 1 << mode;
- }
+uint32_t modeMask(LockMode mode) {
+ return 1 << mode;
+}
- /**
- * Maps the resource id to a human-readable string.
- */
- static const char* ResourceTypeNames[] = {
- "Invalid",
- "Global",
- "MMAPV1Journal",
- "Database",
- "Collection",
- "Metadata",
- };
+/**
+ * Maps the resource id to a human-readable string.
+ */
+static const char* ResourceTypeNames[] = {
+ "Invalid", "Global", "MMAPV1Journal", "Database", "Collection", "Metadata",
+};
- // Ensure we do not add new types without updating the names array
- BOOST_STATIC_ASSERT(
- (sizeof(ResourceTypeNames) / sizeof(ResourceTypeNames[0])) == ResourceTypesCount);
+// Ensure we do not add new types without updating the names array
+BOOST_STATIC_ASSERT((sizeof(ResourceTypeNames) / sizeof(ResourceTypeNames[0])) ==
+ ResourceTypesCount);
- /**
- * Maps the LockRequest status to a human-readable string.
- */
- static const char* LockRequestStatusNames[] = {
- "new",
- "granted",
- "waiting",
- "converting",
- };
+/**
+ * Maps the LockRequest status to a human-readable string.
+ */
+static const char* LockRequestStatusNames[] = {
+ "new", "granted", "waiting", "converting",
+};
- // Ensure we do not add new status types without updating the names array
- BOOST_STATIC_ASSERT(
- (sizeof(LockRequestStatusNames) / sizeof(LockRequestStatusNames[0]))
- == LockRequest::StatusCount);
+// Ensure we do not add new status types without updating the names array
+BOOST_STATIC_ASSERT((sizeof(LockRequestStatusNames) / sizeof(LockRequestStatusNames[0])) ==
+ LockRequest::StatusCount);
-} // namespace
+} // namespace
+/**
+ * There is one of these objects for each resource that has a lock request. Empty objects
+ * (i.e. LockHead with no requests) are allowed to exist on the lock manager's hash table.
+ *
+ * The memory and lifetime is controlled entirely by the LockManager class.
+ *
+ * Not thread-safe and should only be accessed under the LockManager's bucket lock.
+ * Must be locked before locking a partition, not after.
+ */
+struct LockHead {
/**
- * There is one of these objects for each resource that has a lock request. Empty objects
- * (i.e. LockHead with no requests) are allowed to exist on the lock manager's hash table.
- *
- * The memory and lifetime is controlled entirely by the LockManager class.
- *
- * Not thread-safe and should only be accessed under the LockManager's bucket lock.
- * Must be locked before locking a partition, not after.
+ * Used for initialization of a LockHead, which might have been retrieved from cache and
+ * also in order to keep the LockHead structure a POD.
*/
- struct LockHead {
+ void initNew(ResourceId resId) {
+ resourceId = resId;
- /**
- * Used for initialization of a LockHead, which might have been retrieved from cache and
- * also in order to keep the LockHead structure a POD.
- */
- void initNew(ResourceId resId) {
- resourceId = resId;
+ grantedList.reset();
+ memset(grantedCounts, 0, sizeof(grantedCounts));
+ grantedModes = 0;
- grantedList.reset();
- memset(grantedCounts, 0, sizeof(grantedCounts));
- grantedModes = 0;
+ conflictList.reset();
+ memset(conflictCounts, 0, sizeof(conflictCounts));
+ conflictModes = 0;
- conflictList.reset();
- memset(conflictCounts, 0, sizeof(conflictCounts));
- conflictModes = 0;
+ conversionsCount = 0;
+ compatibleFirstCount = 0;
+ }
- conversionsCount = 0;
- compatibleFirstCount = 0;
- }
+ /**
+ * True iff there may be partitions with granted requests for this
+ * resource.
+ */
+ bool partitioned() const {
+ return !partitions.empty();
+ }
- /**
- * True iff there may be partitions with granted requests for this
- * resource.
- */
- bool partitioned() const {
- return !partitions.empty();
+ /**
+ * Locates the request corresponding to the particular locker or returns NULL. Must be
+ * called with the bucket holding this lock head locked.
+ */
+ LockRequest* findRequest(LockerId lockerId) const {
+ // Check the granted queue first
+ for (LockRequest* it = grantedList._front; it != NULL; it = it->next) {
+ if (it->locker->getId() == lockerId) {
+ return it;
+ }
}
- /**
- * Locates the request corresponding to the particular locker or returns NULL. Must be
- * called with the bucket holding this lock head locked.
- */
- LockRequest* findRequest(LockerId lockerId) const {
- // Check the granted queue first
- for (LockRequest* it = grantedList._front; it != NULL; it = it->next) {
- if (it->locker->getId() == lockerId) {
- return it;
- }
+ // Check the conflict queue second
+ for (LockRequest* it = conflictList._front; it != NULL; it = it->next) {
+ if (it->locker->getId() == lockerId) {
+ return it;
}
+ }
- // Check the conflict queue second
- for (LockRequest* it = conflictList._front; it != NULL; it = it->next) {
- if (it->locker->getId() == lockerId) {
- return it;
- }
- }
+ return NULL;
+ }
- return NULL;
+ /**
+ * Finish creation of request and put it on the lockhead's conflict or granted queues.
+ * Returns LOCK_WAITING for conflict case and LOCK_OK otherwise.
+ */
+ LockResult newRequest(LockRequest* request, LockMode mode) {
+ request->mode = mode;
+ request->lock = this;
+ request->partitionedLock = NULL;
+ if (!partitioned()) {
+ request->recursiveCount = 1;
}
-
- /**
- * Finish creation of request and put it on the lockhead's conflict or granted queues.
- * Returns LOCK_WAITING for conflict case and LOCK_OK otherwise.
- */
- LockResult newRequest(LockRequest* request, LockMode mode) {
- request->mode = mode;
- request->lock = this;
- request->partitionedLock = NULL;
- if (!partitioned()) {
- request->recursiveCount = 1;
+ // request->partitioned cannot be set to false, as this might be a migration, in
+ // which case access to that field is not protected. The 'partitioned' member instead
+ // indicates if a request was initially partitioned.
+
+ // New lock request. Queue after all granted modes and after any already requested
+ // conflicting modes.
+ if (conflicts(mode, grantedModes) ||
+ (!compatibleFirstCount && conflicts(mode, conflictModes))) {
+ request->status = LockRequest::STATUS_WAITING;
+
+ // Put it on the conflict queue. Conflicts are granted front to back.
+ if (request->enqueueAtFront) {
+ conflictList.push_front(request);
+ } else {
+ conflictList.push_back(request);
}
- // request->partitioned cannot be set to false, as this might be a migration, in
- // which case access to that field is not protected. The 'partitioned' member instead
- // indicates if a request was initially partitioned.
-
- // New lock request. Queue after all granted modes and after any already requested
- // conflicting modes.
- if (conflicts(mode, grantedModes) ||
- (!compatibleFirstCount && conflicts(mode, conflictModes))) {
- request->status = LockRequest::STATUS_WAITING;
-
- // Put it on the conflict queue. Conflicts are granted front to back.
- if (request->enqueueAtFront) {
- conflictList.push_front(request);
- }
- else {
- conflictList.push_back(request);
- }
- incConflictModeCount(mode);
+ incConflictModeCount(mode);
- return LOCK_WAITING;
- }
-
- // No conflict, new request
- request->status = LockRequest::STATUS_GRANTED;
+ return LOCK_WAITING;
+ }
- grantedList.push_back(request);
- incGrantedModeCount(mode);
+ // No conflict, new request
+ request->status = LockRequest::STATUS_GRANTED;
- if (request->compatibleFirst) {
- compatibleFirstCount++;
- }
+ grantedList.push_back(request);
+ incGrantedModeCount(mode);
- return LOCK_OK;
+ if (request->compatibleFirst) {
+ compatibleFirstCount++;
}
- /**
- * Lock each partitioned LockHead in turn, and move any (granted) intent mode requests for
- * lock->resourceId to lock, which must itself already be locked.
- */
- void migratePartitionedLockHeads();
-
- // Methods to maintain the granted queue
- void incGrantedModeCount(LockMode mode) {
- invariant(grantedCounts[mode] >= 0);
- if (++grantedCounts[mode] == 1) {
- invariant((grantedModes & modeMask(mode)) == 0);
- grantedModes |= modeMask(mode);
- }
- }
+ return LOCK_OK;
+ }
- void decGrantedModeCount(LockMode mode) {
- invariant(grantedCounts[mode] >= 1);
- if (--grantedCounts[mode] == 0) {
- invariant((grantedModes & modeMask(mode)) == modeMask(mode));
- grantedModes &= ~modeMask(mode);
- }
+ /**
+ * Lock each partitioned LockHead in turn, and move any (granted) intent mode requests for
+ * lock->resourceId to lock, which must itself already be locked.
+ */
+ void migratePartitionedLockHeads();
+
+ // Methods to maintain the granted queue
+ void incGrantedModeCount(LockMode mode) {
+ invariant(grantedCounts[mode] >= 0);
+ if (++grantedCounts[mode] == 1) {
+ invariant((grantedModes & modeMask(mode)) == 0);
+ grantedModes |= modeMask(mode);
}
+ }
- // Methods to maintain the conflict queue
- void incConflictModeCount(LockMode mode) {
- invariant(conflictCounts[mode] >= 0);
- if (++conflictCounts[mode] == 1) {
- invariant((conflictModes & modeMask(mode)) == 0);
- conflictModes |= modeMask(mode);
- }
+ void decGrantedModeCount(LockMode mode) {
+ invariant(grantedCounts[mode] >= 1);
+ if (--grantedCounts[mode] == 0) {
+ invariant((grantedModes & modeMask(mode)) == modeMask(mode));
+ grantedModes &= ~modeMask(mode);
}
+ }
- void decConflictModeCount(LockMode mode) {
- invariant(conflictCounts[mode] >= 1);
- if (--conflictCounts[mode] == 0) {
- invariant((conflictModes & modeMask(mode)) == modeMask(mode));
- conflictModes &= ~modeMask(mode);
- }
+ // Methods to maintain the conflict queue
+ void incConflictModeCount(LockMode mode) {
+ invariant(conflictCounts[mode] >= 0);
+ if (++conflictCounts[mode] == 1) {
+ invariant((conflictModes & modeMask(mode)) == 0);
+ conflictModes |= modeMask(mode);
}
+ }
+ void decConflictModeCount(LockMode mode) {
+ invariant(conflictCounts[mode] >= 1);
+ if (--conflictCounts[mode] == 0) {
+ invariant((conflictModes & modeMask(mode)) == modeMask(mode));
+ conflictModes &= ~modeMask(mode);
+ }
+ }
- // Id of the resource which this lock protects
- ResourceId resourceId;
-
- //
- // Granted queue
- //
-
- // Doubly-linked list of requests, which have been granted. Newly granted requests go to
- // the end of the queue. Conversion requests are granted from the beginning forward.
- LockRequestList grantedList;
-
- // Counts the grants and coversion counts for each of the supported lock modes. These
- // counts should exactly match the aggregated modes on the granted list.
- uint32_t grantedCounts[LockModesCount];
-
- // Bit-mask of the granted + converting modes on the granted queue. Maintained in lock-step
- // with the grantedCounts array.
- uint32_t grantedModes;
-
-
- //
- // Conflict queue
- //
- // Doubly-linked list of requests, which have not been granted yet because they conflict
- // with the set of granted modes. Requests are queued at the end of the queue and are
- // granted from the beginning forward, which gives these locks FIFO ordering. Exceptions
- // are high-priorty locks, such as the MMAP V1 flush lock.
- LockRequestList conflictList;
+ // Id of the resource which this lock protects
+ ResourceId resourceId;
- // Counts the conflicting requests for each of the lock modes. These counts should exactly
- // match the aggregated modes on the conflicts list.
- uint32_t conflictCounts[LockModesCount];
+ //
+ // Granted queue
+ //
- // Bit-mask of the conflict modes on the conflict queue. Maintained in lock-step with the
- // conflictCounts array.
- uint32_t conflictModes;
+ // Doubly-linked list of requests, which have been granted. Newly granted requests go to
+ // the end of the queue. Conversion requests are granted from the beginning forward.
+ LockRequestList grantedList;
- // References partitions that may have PartitionedLockHeads for this LockHead.
- // Non-empty implies the lock has no conflicts and only has intent modes as grantedModes.
- // TODO: Remove this vector and make LockHead a POD
- std::vector<LockManager::Partition *> partitions;
+ // Counts the grants and coversion counts for each of the supported lock modes. These
+ // counts should exactly match the aggregated modes on the granted list.
+ uint32_t grantedCounts[LockModesCount];
- //
- // Conversion
- //
+ // Bit-mask of the granted + converting modes on the granted queue. Maintained in lock-step
+ // with the grantedCounts array.
+ uint32_t grantedModes;
- // Counts the number of requests on the granted queue, which have requested any kind of
- // conflicting conversion and are blocked (i.e. all requests which are currently
- // STATUS_CONVERTING). This is an optimization for unlocking in that we do not need to
- // check the granted queue for requests in STATUS_CONVERTING if this count is zero. This
- // saves cycles in the regular case and only burdens the less-frequent lock upgrade case.
- uint32_t conversionsCount;
- // Counts the number of requests on the granted queue, which have requested that the policy
- // be switched to compatible-first. As long as this value is > 0, the policy will stay
- // compatible-first.
- uint32_t compatibleFirstCount;
- };
+ //
+ // Conflict queue
+ //
- /**
- * The PartitionedLockHead allows optimizing the case where requests overwhelmingly use
- * the intent lock modes MODE_IS and MODE_IX, which are compatible with each other.
- * Having to use a single LockHead causes contention where none would be needed.
- * So, each Locker is associated with a specific partition containing a mapping
- * of resourceId to PartitionedLockHead.
- *
- * As long as all lock requests for a resource have an intent mode, as opposed to a conflicting
- * mode, its LockHead may reference ParitionedLockHeads. A partitioned LockHead will not have
- * any conflicts. The total set of granted requests (with intent mode) is the union of
- * its grantedList and all grantedLists in PartitionedLockHeads.
- *
- * The existence of a PartitionedLockHead for a resource implies that its LockHead is
- * partitioned. If a conflicting request is made on a LockHead, all requests from
- * PartitionedLockHeads are migrated to that LockHead and the LockHead no longer partitioned.
- *
- * Not thread-safe, must be accessed under its partition lock.
- * May not lock a LockManager bucket while holding a partition lock.
- */
- struct PartitionedLockHead {
- void initNew(ResourceId resId) {
- grantedList.reset();
- }
+ // Doubly-linked list of requests, which have not been granted yet because they conflict
+ // with the set of granted modes. Requests are queued at the end of the queue and are
+ // granted from the beginning forward, which gives these locks FIFO ordering. Exceptions
+ // are high-priorty locks, such as the MMAP V1 flush lock.
+ LockRequestList conflictList;
- void newRequest(LockRequest* request, LockMode mode) {
- request->lock = NULL;
- request->partitionedLock = this;
- request->recursiveCount = 1;
- request->status = LockRequest::STATUS_GRANTED;
- request->partitioned = true;
- request->mode = mode;
+ // Counts the conflicting requests for each of the lock modes. These counts should exactly
+ // match the aggregated modes on the conflicts list.
+ uint32_t conflictCounts[LockModesCount];
- grantedList.push_back(request);
- }
+ // Bit-mask of the conflict modes on the conflict queue. Maintained in lock-step with the
+ // conflictCounts array.
+ uint32_t conflictModes;
- //
- // Granted queue
- //
-
- // Doubly-linked list of requests, which have been granted. Newly granted requests go to
- // the end of the queue. The PartitionedLockHead never contains anything but granted
- // requests with intent modes.
- LockRequestList grantedList;
- };
-
- void LockHead::migratePartitionedLockHeads() {
- invariant(partitioned());
- // There can't be non-intent modes or conflicts when the lock is partitioned
- invariant(!(grantedModes & ~intentModes) && !conflictModes);
-
- // Migration time: lock each partition in turn and transfer its requests, if any
- while(partitioned()) {
- LockManager::Partition* partition = partitions.back();
- stdx::lock_guard<SimpleMutex> scopedLock(partition->mutex);
-
- LockManager::Partition::Map::iterator it = partition->data.find(resourceId);
- if (it != partition->data.end()) {
- PartitionedLockHead* partitionedLock = it->second;
-
- while (!partitionedLock->grantedList.empty()) {
- LockRequest* request = partitionedLock->grantedList._front;
- partitionedLock->grantedList.remove(request);
- // Ordering is important here, as the next/prev fields are shared.
- // Note that newRequest() will preserve the recursiveCount in this case
- LockResult res = newRequest(request, request->mode);
- invariant(res == LOCK_OK); // Lock must still be granted
- }
- partition->data.erase(it);
- delete partitionedLock;
- }
- // Don't pop-back to early as otherwise the lock will be considered not partioned in
- // newRequest().
- partitions.pop_back();
- }
- }
+ // References partitions that may have PartitionedLockHeads for this LockHead.
+ // Non-empty implies the lock has no conflicts and only has intent modes as grantedModes.
+ // TODO: Remove this vector and make LockHead a POD
+ std::vector<LockManager::Partition*> partitions;
//
- // LockManager
+ // Conversion
//
- // Have more buckets than CPUs to reduce contention on lock and caches
- const unsigned LockManager::_numLockBuckets(128);
+ // Counts the number of requests on the granted queue, which have requested any kind of
+ // conflicting conversion and are blocked (i.e. all requests which are currently
+ // STATUS_CONVERTING). This is an optimization for unlocking in that we do not need to
+ // check the granted queue for requests in STATUS_CONVERTING if this count is zero. This
+ // saves cycles in the regular case and only burdens the less-frequent lock upgrade case.
+ uint32_t conversionsCount;
- // Balance scalability of intent locks against potential added cost of conflicting locks.
- // The exact value doesn't appear very important, but should be power of two
- const unsigned LockManager::_numPartitions = 32;
+ // Counts the number of requests on the granted queue, which have requested that the policy
+ // be switched to compatible-first. As long as this value is > 0, the policy will stay
+ // compatible-first.
+ uint32_t compatibleFirstCount;
+};
- LockManager::LockManager() {
- _lockBuckets = new LockBucket[_numLockBuckets];
- _partitions = new Partition[_numPartitions];
+/**
+ * The PartitionedLockHead allows optimizing the case where requests overwhelmingly use
+ * the intent lock modes MODE_IS and MODE_IX, which are compatible with each other.
+ * Having to use a single LockHead causes contention where none would be needed.
+ * So, each Locker is associated with a specific partition containing a mapping
+ * of resourceId to PartitionedLockHead.
+ *
+ * As long as all lock requests for a resource have an intent mode, as opposed to a conflicting
+ * mode, its LockHead may reference ParitionedLockHeads. A partitioned LockHead will not have
+ * any conflicts. The total set of granted requests (with intent mode) is the union of
+ * its grantedList and all grantedLists in PartitionedLockHeads.
+ *
+ * The existence of a PartitionedLockHead for a resource implies that its LockHead is
+ * partitioned. If a conflicting request is made on a LockHead, all requests from
+ * PartitionedLockHeads are migrated to that LockHead and the LockHead no longer partitioned.
+ *
+ * Not thread-safe, must be accessed under its partition lock.
+ * May not lock a LockManager bucket while holding a partition lock.
+ */
+struct PartitionedLockHead {
+ void initNew(ResourceId resId) {
+ grantedList.reset();
}
- LockManager::~LockManager() {
- cleanupUnusedLocks();
-
- for (unsigned i = 0; i < _numLockBuckets; i++) {
- // TODO: dump more information about the non-empty bucket to see what locks were leaked
- invariant(_lockBuckets[i].data.empty());
- }
+ void newRequest(LockRequest* request, LockMode mode) {
+ request->lock = NULL;
+ request->partitionedLock = this;
+ request->recursiveCount = 1;
+ request->status = LockRequest::STATUS_GRANTED;
+ request->partitioned = true;
+ request->mode = mode;
- delete[] _lockBuckets;
- delete[] _partitions;
+ grantedList.push_back(request);
}
- LockResult LockManager::lock(ResourceId resId, LockRequest* request, LockMode mode) {
- // Sanity check that requests are not being reused without proper cleanup
- invariant(request->status == LockRequest::STATUS_NEW);
-
- request->partitioned = (mode == MODE_IX || mode == MODE_IS);
-
- // For intent modes, try the PartitionedLockHead
- if (request->partitioned) {
- Partition* partition = _getPartition(request);
- stdx::lock_guard<SimpleMutex> scopedLock(partition->mutex);
-
- // Fast path for intent locks
- PartitionedLockHead* partitionedLock = partition->find(resId);
+ //
+ // Granted queue
+ //
- if (partitionedLock) {
- partitionedLock->newRequest(request, mode);
- return LOCK_OK;
+ // Doubly-linked list of requests, which have been granted. Newly granted requests go to
+ // the end of the queue. The PartitionedLockHead never contains anything but granted
+ // requests with intent modes.
+ LockRequestList grantedList;
+};
+
+void LockHead::migratePartitionedLockHeads() {
+ invariant(partitioned());
+ // There can't be non-intent modes or conflicts when the lock is partitioned
+ invariant(!(grantedModes & ~intentModes) && !conflictModes);
+
+ // Migration time: lock each partition in turn and transfer its requests, if any
+ while (partitioned()) {
+ LockManager::Partition* partition = partitions.back();
+ stdx::lock_guard<SimpleMutex> scopedLock(partition->mutex);
+
+ LockManager::Partition::Map::iterator it = partition->data.find(resourceId);
+ if (it != partition->data.end()) {
+ PartitionedLockHead* partitionedLock = it->second;
+
+ while (!partitionedLock->grantedList.empty()) {
+ LockRequest* request = partitionedLock->grantedList._front;
+ partitionedLock->grantedList.remove(request);
+ // Ordering is important here, as the next/prev fields are shared.
+ // Note that newRequest() will preserve the recursiveCount in this case
+ LockResult res = newRequest(request, request->mode);
+ invariant(res == LOCK_OK); // Lock must still be granted
}
- // Unsuccessful: there was no PartitionedLockHead yet, so use regular LockHead.
- // Must not hold any locks. It is OK for requests with intent modes to be on
- // both a PartitionedLockHead and a regular LockHead, so the race here is benign.
- }
-
- // Use regular LockHead, maybe start partitioning
- LockBucket* bucket = _getBucket(resId);
- stdx::lock_guard<SimpleMutex> scopedLock(bucket->mutex);
-
- LockHead* lock = bucket->findOrInsert(resId);
-
- // Start a partitioned lock if possible
- if (request->partitioned && !(lock->grantedModes & (~intentModes))
- && !lock->conflictModes) {
- Partition* partition = _getPartition(request);
- stdx::lock_guard<SimpleMutex> scopedLock(partition->mutex);
- PartitionedLockHead* partitionedLock = partition->findOrInsert(resId);
- invariant(partitionedLock);
- lock->partitions.push_back(partition);
- partitionedLock->newRequest(request, mode);
- return LOCK_OK;
+ partition->data.erase(it);
+ delete partitionedLock;
}
-
- // For the first lock with a non-intent mode, migrate requests from partitioned lock heads
- if (lock->partitioned()) {
- lock->migratePartitionedLockHeads();
- }
-
- request->partitioned = false;
- return lock->newRequest(request, mode);
+ // Don't pop-back to early as otherwise the lock will be considered not partioned in
+ // newRequest().
+ partitions.pop_back();
}
+}
- LockResult LockManager::convert(ResourceId resId, LockRequest* request, LockMode newMode) {
- // If we are here, we already hold the lock in some mode. In order to keep it simple, we do
- // not allow requesting a conversion while a lock is already waiting or pending conversion.
- invariant(request->status == LockRequest::STATUS_GRANTED);
- invariant(request->recursiveCount > 0);
+//
+// LockManager
+//
- request->recursiveCount++;
+// Have more buckets than CPUs to reduce contention on lock and caches
+const unsigned LockManager::_numLockBuckets(128);
- // Fast path for acquiring the same lock multiple times in modes, which are already covered
- // by the current mode. It is safe to do this without locking, because 1) all calls for the
- // same lock request must be done on the same thread and 2) if there are lock requests
- // hanging off a given LockHead, then this lock will never disappear.
- if ((LockConflictsTable[request->mode] | LockConflictsTable[newMode]) ==
- LockConflictsTable[request->mode]) {
- return LOCK_OK;
- }
+// Balance scalability of intent locks against potential added cost of conflicting locks.
+// The exact value doesn't appear very important, but should be power of two
+const unsigned LockManager::_numPartitions = 32;
- // TODO: For the time being we do not need conversions between unrelated lock modes (i.e.,
- // modes which both add and remove to the conflicts set), so these are not implemented yet
- // (e.g., S -> IX).
- invariant((LockConflictsTable[request->mode] | LockConflictsTable[newMode]) ==
- LockConflictsTable[newMode]);
+LockManager::LockManager() {
+ _lockBuckets = new LockBucket[_numLockBuckets];
+ _partitions = new Partition[_numPartitions];
+}
- LockBucket* bucket = _getBucket(resId);
- stdx::lock_guard<SimpleMutex> scopedLock(bucket->mutex);
+LockManager::~LockManager() {
+ cleanupUnusedLocks();
- LockBucket::Map::iterator it = bucket->data.find(resId);
- invariant(it != bucket->data.end());
+ for (unsigned i = 0; i < _numLockBuckets; i++) {
+ // TODO: dump more information about the non-empty bucket to see what locks were leaked
+ invariant(_lockBuckets[i].data.empty());
+ }
- LockHead* const lock = it->second;
+ delete[] _lockBuckets;
+ delete[] _partitions;
+}
- if (lock->partitioned()) {
- lock->migratePartitionedLockHeads();
- }
+LockResult LockManager::lock(ResourceId resId, LockRequest* request, LockMode mode) {
+ // Sanity check that requests are not being reused without proper cleanup
+ invariant(request->status == LockRequest::STATUS_NEW);
- // Construct granted mask without our current mode, so that it is not counted as
- // conflicting
- uint32_t grantedModesWithoutCurrentRequest = 0;
+ request->partitioned = (mode == MODE_IX || mode == MODE_IS);
- // We start the counting at 1 below, because LockModesCount also includes MODE_NONE
- // at position 0, which can never be acquired/granted.
- for (uint32_t i = 1; i < LockModesCount; i++) {
- const uint32_t currentRequestHolds =
- (request->mode == static_cast<LockMode>(i) ? 1 : 0);
+ // For intent modes, try the PartitionedLockHead
+ if (request->partitioned) {
+ Partition* partition = _getPartition(request);
+ stdx::lock_guard<SimpleMutex> scopedLock(partition->mutex);
- if (lock->grantedCounts[i] > currentRequestHolds) {
- grantedModesWithoutCurrentRequest |= modeMask(static_cast<LockMode>(i));
- }
- }
-
- // This check favours conversion requests over pending requests. For example:
- //
- // T1 requests lock L in IS
- // T2 requests lock L in X
- // T1 then upgrades L from IS -> S
- //
- // Because the check does not look into the conflict modes bitmap, it will grant L to
- // T1 in S mode, instead of block, which would otherwise cause deadlock.
- if (conflicts(newMode, grantedModesWithoutCurrentRequest)) {
- request->status = LockRequest::STATUS_CONVERTING;
- invariant(request->recursiveCount > 1);
- request->convertMode = newMode;
-
- lock->conversionsCount++;
- lock->incGrantedModeCount(request->convertMode);
-
- return LOCK_WAITING;
- }
- else { // No conflict, existing request
- lock->incGrantedModeCount(newMode);
- lock->decGrantedModeCount(request->mode);
- request->mode = newMode;
+ // Fast path for intent locks
+ PartitionedLockHead* partitionedLock = partition->find(resId);
+ if (partitionedLock) {
+ partitionedLock->newRequest(request, mode);
return LOCK_OK;
}
+ // Unsuccessful: there was no PartitionedLockHead yet, so use regular LockHead.
+ // Must not hold any locks. It is OK for requests with intent modes to be on
+ // both a PartitionedLockHead and a regular LockHead, so the race here is benign.
}
- bool LockManager::unlock(LockRequest* request) {
- // Fast path for decrementing multiple references of the same lock. It is safe to do this
- // without locking, because 1) all calls for the same lock request must be done on the same
- // thread and 2) if there are lock requests hanging of a given LockHead, then this lock
- // will never disappear.
- invariant(request->recursiveCount > 0);
- request->recursiveCount--;
- if ((request->status == LockRequest::STATUS_GRANTED) && (request->recursiveCount > 0)) {
- return false;
- }
-
- if (request->partitioned) {
- // Unlocking a lock that was acquired as partitioned. The lock request may since have
- // moved to the lock head, but there is no safe way to find out without synchronizing
- // thorough the partition mutex. Migrations are expected to be rare.
- invariant(request->status == LockRequest::STATUS_GRANTED
- || request->status == LockRequest::STATUS_CONVERTING);
- Partition* partition = _getPartition(request);
- stdx::lock_guard<SimpleMutex> scopedLock(partition->mutex);
- // Fast path: still partitioned.
- if (request->partitionedLock) {
- request->partitionedLock->grantedList.remove(request);
- return true;
- }
-
- // not partitioned anymore, fall through to regular case
- }
- invariant(request->lock);
+ // Use regular LockHead, maybe start partitioning
+ LockBucket* bucket = _getBucket(resId);
+ stdx::lock_guard<SimpleMutex> scopedLock(bucket->mutex);
+
+ LockHead* lock = bucket->findOrInsert(resId);
+
+ // Start a partitioned lock if possible
+ if (request->partitioned && !(lock->grantedModes & (~intentModes)) && !lock->conflictModes) {
+ Partition* partition = _getPartition(request);
+ stdx::lock_guard<SimpleMutex> scopedLock(partition->mutex);
+ PartitionedLockHead* partitionedLock = partition->findOrInsert(resId);
+ invariant(partitionedLock);
+ lock->partitions.push_back(partition);
+ partitionedLock->newRequest(request, mode);
+ return LOCK_OK;
+ }
- LockHead* lock = request->lock;
- LockBucket* bucket = _getBucket(lock->resourceId);
- stdx::lock_guard<SimpleMutex> scopedLock(bucket->mutex);
+ // For the first lock with a non-intent mode, migrate requests from partitioned lock heads
+ if (lock->partitioned()) {
+ lock->migratePartitionedLockHeads();
+ }
- if (request->status == LockRequest::STATUS_GRANTED) {
- // This releases a currently held lock and is the most common path, so it should be
- // as efficient as possible. The fast path for decrementing multiple references did
- // already ensure request->recursiveCount == 0.
+ request->partitioned = false;
+ return lock->newRequest(request, mode);
+}
+
+LockResult LockManager::convert(ResourceId resId, LockRequest* request, LockMode newMode) {
+ // If we are here, we already hold the lock in some mode. In order to keep it simple, we do
+ // not allow requesting a conversion while a lock is already waiting or pending conversion.
+ invariant(request->status == LockRequest::STATUS_GRANTED);
+ invariant(request->recursiveCount > 0);
+
+ request->recursiveCount++;
+
+ // Fast path for acquiring the same lock multiple times in modes, which are already covered
+ // by the current mode. It is safe to do this without locking, because 1) all calls for the
+ // same lock request must be done on the same thread and 2) if there are lock requests
+ // hanging off a given LockHead, then this lock will never disappear.
+ if ((LockConflictsTable[request->mode] | LockConflictsTable[newMode]) ==
+ LockConflictsTable[request->mode]) {
+ return LOCK_OK;
+ }
- // Remove from the granted list
- lock->grantedList.remove(request);
- lock->decGrantedModeCount(request->mode);
+ // TODO: For the time being we do not need conversions between unrelated lock modes (i.e.,
+ // modes which both add and remove to the conflicts set), so these are not implemented yet
+ // (e.g., S -> IX).
+ invariant((LockConflictsTable[request->mode] | LockConflictsTable[newMode]) ==
+ LockConflictsTable[newMode]);
- if (request->compatibleFirst) {
- lock->compatibleFirstCount--;
- }
+ LockBucket* bucket = _getBucket(resId);
+ stdx::lock_guard<SimpleMutex> scopedLock(bucket->mutex);
- _onLockModeChanged(lock, lock->grantedCounts[request->mode] == 0);
- }
- else if (request->status == LockRequest::STATUS_WAITING) {
- // This cancels a pending lock request
- invariant(request->recursiveCount == 0);
+ LockBucket::Map::iterator it = bucket->data.find(resId);
+ invariant(it != bucket->data.end());
- lock->conflictList.remove(request);
- lock->decConflictModeCount(request->mode);
- }
- else if (request->status == LockRequest::STATUS_CONVERTING) {
- // This cancels a pending convert request
- invariant(request->recursiveCount > 0);
+ LockHead* const lock = it->second;
- // Lock only goes from GRANTED to CONVERTING, so cancelling the conversion request
- // brings it back to the previous granted mode.
- request->status = LockRequest::STATUS_GRANTED;
+ if (lock->partitioned()) {
+ lock->migratePartitionedLockHeads();
+ }
- lock->conversionsCount--;
- lock->decGrantedModeCount(request->convertMode);
+ // Construct granted mask without our current mode, so that it is not counted as
+ // conflicting
+ uint32_t grantedModesWithoutCurrentRequest = 0;
- request->convertMode = MODE_NONE;
+ // We start the counting at 1 below, because LockModesCount also includes MODE_NONE
+ // at position 0, which can never be acquired/granted.
+ for (uint32_t i = 1; i < LockModesCount; i++) {
+ const uint32_t currentRequestHolds = (request->mode == static_cast<LockMode>(i) ? 1 : 0);
- _onLockModeChanged(lock, lock->grantedCounts[request->convertMode] == 0);
- }
- else {
- // Invalid request status
- invariant(false);
+ if (lock->grantedCounts[i] > currentRequestHolds) {
+ grantedModesWithoutCurrentRequest |= modeMask(static_cast<LockMode>(i));
}
-
- return (request->recursiveCount == 0);
}
- void LockManager::downgrade(LockRequest* request, LockMode newMode) {
- invariant(request->lock);
- invariant(request->status == LockRequest::STATUS_GRANTED);
- invariant(request->recursiveCount > 0);
-
- // The conflict set of the newMode should be a subset of the conflict set of the old mode.
- // Can't downgrade from S -> IX for example.
- invariant((LockConflictsTable[request->mode] | LockConflictsTable[newMode])
- == LockConflictsTable[request->mode]);
-
- LockHead* lock = request->lock;
-
- LockBucket* bucket = _getBucket(lock->resourceId);
- stdx::lock_guard<SimpleMutex> scopedLock(bucket->mutex);
-
+ // This check favours conversion requests over pending requests. For example:
+ //
+ // T1 requests lock L in IS
+ // T2 requests lock L in X
+ // T1 then upgrades L from IS -> S
+ //
+ // Because the check does not look into the conflict modes bitmap, it will grant L to
+ // T1 in S mode, instead of block, which would otherwise cause deadlock.
+ if (conflicts(newMode, grantedModesWithoutCurrentRequest)) {
+ request->status = LockRequest::STATUS_CONVERTING;
+ invariant(request->recursiveCount > 1);
+ request->convertMode = newMode;
+
+ lock->conversionsCount++;
+ lock->incGrantedModeCount(request->convertMode);
+
+ return LOCK_WAITING;
+ } else { // No conflict, existing request
lock->incGrantedModeCount(newMode);
lock->decGrantedModeCount(request->mode);
request->mode = newMode;
- _onLockModeChanged(lock, true);
+ return LOCK_OK;
+ }
+}
+
+bool LockManager::unlock(LockRequest* request) {
+ // Fast path for decrementing multiple references of the same lock. It is safe to do this
+ // without locking, because 1) all calls for the same lock request must be done on the same
+ // thread and 2) if there are lock requests hanging of a given LockHead, then this lock
+ // will never disappear.
+ invariant(request->recursiveCount > 0);
+ request->recursiveCount--;
+ if ((request->status == LockRequest::STATUS_GRANTED) && (request->recursiveCount > 0)) {
+ return false;
}
- void LockManager::cleanupUnusedLocks() {
- size_t deletedLockHeads = 0;
- for (unsigned i = 0; i < _numLockBuckets; i++) {
- LockBucket* bucket = &_lockBuckets[i];
- stdx::lock_guard<SimpleMutex> scopedLock(bucket->mutex);
-
- LockBucket::Map::iterator it = bucket->data.begin();
- while (it != bucket->data.end()) {
- LockHead* lock = it->second;
- if (lock->partitioned()) {
- lock->migratePartitionedLockHeads();
- }
- if (lock->grantedModes == 0) {
- invariant(lock->grantedModes == 0);
- invariant(lock->grantedList._front == NULL);
- invariant(lock->grantedList._back == NULL);
- invariant(lock->conflictModes == 0);
- invariant(lock->conflictList._front == NULL);
- invariant(lock->conflictList._back == NULL);
- invariant(lock->conversionsCount == 0);
- invariant(lock->compatibleFirstCount == 0);
-
- bucket->data.erase(it++);
- deletedLockHeads++;
- delete lock;
- }
- else {
- it++;
- }
- }
+ if (request->partitioned) {
+ // Unlocking a lock that was acquired as partitioned. The lock request may since have
+ // moved to the lock head, but there is no safe way to find out without synchronizing
+ // thorough the partition mutex. Migrations are expected to be rare.
+ invariant(request->status == LockRequest::STATUS_GRANTED ||
+ request->status == LockRequest::STATUS_CONVERTING);
+ Partition* partition = _getPartition(request);
+ stdx::lock_guard<SimpleMutex> scopedLock(partition->mutex);
+ // Fast path: still partitioned.
+ if (request->partitionedLock) {
+ request->partitionedLock->grantedList.remove(request);
+ return true;
}
+
+ // not partitioned anymore, fall through to regular case
}
+ invariant(request->lock);
- void LockManager::_onLockModeChanged(LockHead* lock, bool checkConflictQueue) {
- // Unblock any converting requests (because conversions are still counted as granted and
- // are on the granted queue).
- for (LockRequest* iter = lock->grantedList._front;
- (iter != NULL) && (lock->conversionsCount > 0);
- iter = iter->next) {
+ LockHead* lock = request->lock;
+ LockBucket* bucket = _getBucket(lock->resourceId);
+ stdx::lock_guard<SimpleMutex> scopedLock(bucket->mutex);
- // Conversion requests are going in a separate queue
- if (iter->status == LockRequest::STATUS_CONVERTING) {
- invariant(iter->convertMode != 0);
+ if (request->status == LockRequest::STATUS_GRANTED) {
+ // This releases a currently held lock and is the most common path, so it should be
+ // as efficient as possible. The fast path for decrementing multiple references did
+ // already ensure request->recursiveCount == 0.
- // Construct granted mask without our current mode, so that it is not accounted as
- // a conflict
- uint32_t grantedModesWithoutCurrentRequest = 0;
+ // Remove from the granted list
+ lock->grantedList.remove(request);
+ lock->decGrantedModeCount(request->mode);
- // We start the counting at 1 below, because LockModesCount also includes
- // MODE_NONE at position 0, which can never be acquired/granted.
- for (uint32_t i = 1; i < LockModesCount; i++) {
- const uint32_t currentRequestHolds =
- (iter->mode == static_cast<LockMode>(i) ? 1 : 0);
+ if (request->compatibleFirst) {
+ lock->compatibleFirstCount--;
+ }
- const uint32_t currentRequestWaits =
- (iter->convertMode == static_cast<LockMode>(i) ? 1 : 0);
+ _onLockModeChanged(lock, lock->grantedCounts[request->mode] == 0);
+ } else if (request->status == LockRequest::STATUS_WAITING) {
+ // This cancels a pending lock request
+ invariant(request->recursiveCount == 0);
- // We cannot both hold and wait on the same lock mode
- invariant(currentRequestHolds + currentRequestWaits <= 1);
+ lock->conflictList.remove(request);
+ lock->decConflictModeCount(request->mode);
+ } else if (request->status == LockRequest::STATUS_CONVERTING) {
+ // This cancels a pending convert request
+ invariant(request->recursiveCount > 0);
- if (lock->grantedCounts[i] > (currentRequestHolds + currentRequestWaits)) {
- grantedModesWithoutCurrentRequest |= modeMask(static_cast<LockMode>(i));
- }
- }
+ // Lock only goes from GRANTED to CONVERTING, so cancelling the conversion request
+ // brings it back to the previous granted mode.
+ request->status = LockRequest::STATUS_GRANTED;
- if (!conflicts(iter->convertMode, grantedModesWithoutCurrentRequest)) {
- lock->conversionsCount--;
- lock->decGrantedModeCount(iter->mode);
- iter->status = LockRequest::STATUS_GRANTED;
- iter->mode = iter->convertMode;
- iter->convertMode = MODE_NONE;
+ lock->conversionsCount--;
+ lock->decGrantedModeCount(request->convertMode);
- iter->notify->notify(lock->resourceId, LOCK_OK);
- }
- }
- }
+ request->convertMode = MODE_NONE;
- // Grant any conflicting requests, which might now be unblocked. Note that the loop below
- // slightly violates fairness in that it will grant *all* compatible requests on the line
- // even though there might be conflicting ones interspersed between them. For example,
- // consider an X lock was just freed and the conflict queue looked like this:
- //
- // IS -> IS -> X -> X -> S -> IS
- //
- // In strict FIFO, we should grant the first two IS modes and then stop when we reach the
- // first X mode (the third request on the queue). However, the loop below would actually
- // grant all IS + S modes and once they all drain it will grant X.
+ _onLockModeChanged(lock, lock->grantedCounts[request->convertMode] == 0);
+ } else {
+ // Invalid request status
+ invariant(false);
+ }
- LockRequest* iterNext = NULL;
+ return (request->recursiveCount == 0);
+}
- for (LockRequest* iter = lock->conflictList._front;
- (iter != NULL) && checkConflictQueue;
- iter = iterNext) {
+void LockManager::downgrade(LockRequest* request, LockMode newMode) {
+ invariant(request->lock);
+ invariant(request->status == LockRequest::STATUS_GRANTED);
+ invariant(request->recursiveCount > 0);
- invariant(iter->status == LockRequest::STATUS_WAITING);
+ // The conflict set of the newMode should be a subset of the conflict set of the old mode.
+ // Can't downgrade from S -> IX for example.
+ invariant((LockConflictsTable[request->mode] | LockConflictsTable[newMode]) ==
+ LockConflictsTable[request->mode]);
- // Store the actual next pointer, because we muck with the iter below and move it to
- // the granted queue.
- iterNext = iter->next;
+ LockHead* lock = request->lock;
- if (conflicts(iter->mode, lock->grantedModes)) {
- continue;
- }
+ LockBucket* bucket = _getBucket(lock->resourceId);
+ stdx::lock_guard<SimpleMutex> scopedLock(bucket->mutex);
- iter->status = LockRequest::STATUS_GRANTED;
+ lock->incGrantedModeCount(newMode);
+ lock->decGrantedModeCount(request->mode);
+ request->mode = newMode;
- lock->conflictList.remove(iter);
- lock->grantedList.push_back(iter);
+ _onLockModeChanged(lock, true);
+}
- lock->incGrantedModeCount(iter->mode);
- lock->decConflictModeCount(iter->mode);
+void LockManager::cleanupUnusedLocks() {
+ size_t deletedLockHeads = 0;
+ for (unsigned i = 0; i < _numLockBuckets; i++) {
+ LockBucket* bucket = &_lockBuckets[i];
+ stdx::lock_guard<SimpleMutex> scopedLock(bucket->mutex);
- if (iter->compatibleFirst) {
- lock->compatibleFirstCount++;
+ LockBucket::Map::iterator it = bucket->data.begin();
+ while (it != bucket->data.end()) {
+ LockHead* lock = it->second;
+ if (lock->partitioned()) {
+ lock->migratePartitionedLockHeads();
}
-
- iter->notify->notify(lock->resourceId, LOCK_OK);
-
- // Small optimization - nothing is compatible with MODE_X, so no point in looking
- // further in the conflict queue.
- if (iter->mode == MODE_X) {
- break;
+ if (lock->grantedModes == 0) {
+ invariant(lock->grantedModes == 0);
+ invariant(lock->grantedList._front == NULL);
+ invariant(lock->grantedList._back == NULL);
+ invariant(lock->conflictModes == 0);
+ invariant(lock->conflictList._front == NULL);
+ invariant(lock->conflictList._back == NULL);
+ invariant(lock->conversionsCount == 0);
+ invariant(lock->compatibleFirstCount == 0);
+
+ bucket->data.erase(it++);
+ deletedLockHeads++;
+ delete lock;
+ } else {
+ it++;
}
}
-
- // This is a convenient place to check that the state of the two request queues is in sync
- // with the bitmask on the modes.
- invariant((lock->grantedModes == 0) ^ (lock->grantedList._front != NULL));
- invariant((lock->conflictModes == 0) ^ (lock->conflictList._front != NULL));
}
+}
+
+void LockManager::_onLockModeChanged(LockHead* lock, bool checkConflictQueue) {
+ // Unblock any converting requests (because conversions are still counted as granted and
+ // are on the granted queue).
+ for (LockRequest* iter = lock->grantedList._front;
+ (iter != NULL) && (lock->conversionsCount > 0);
+ iter = iter->next) {
+ // Conversion requests are going in a separate queue
+ if (iter->status == LockRequest::STATUS_CONVERTING) {
+ invariant(iter->convertMode != 0);
+
+ // Construct granted mask without our current mode, so that it is not accounted as
+ // a conflict
+ uint32_t grantedModesWithoutCurrentRequest = 0;
+
+ // We start the counting at 1 below, because LockModesCount also includes
+ // MODE_NONE at position 0, which can never be acquired/granted.
+ for (uint32_t i = 1; i < LockModesCount; i++) {
+ const uint32_t currentRequestHolds =
+ (iter->mode == static_cast<LockMode>(i) ? 1 : 0);
+
+ const uint32_t currentRequestWaits =
+ (iter->convertMode == static_cast<LockMode>(i) ? 1 : 0);
+
+ // We cannot both hold and wait on the same lock mode
+ invariant(currentRequestHolds + currentRequestWaits <= 1);
+
+ if (lock->grantedCounts[i] > (currentRequestHolds + currentRequestWaits)) {
+ grantedModesWithoutCurrentRequest |= modeMask(static_cast<LockMode>(i));
+ }
+ }
- LockManager::LockBucket* LockManager::_getBucket(ResourceId resId) const {
- return &_lockBuckets[resId % _numLockBuckets];
- }
-
- LockManager::Partition* LockManager::_getPartition(LockRequest* request) const {
- return &_partitions[request->locker->getId() % _numPartitions];
- }
-
- void LockManager::dump() const {
- log() << "Dumping LockManager @ " << static_cast<const void*>(this) << '\n';
-
- for (unsigned i = 0; i < _numLockBuckets; i++) {
- LockBucket* bucket = &_lockBuckets[i];
- stdx::lock_guard<SimpleMutex> scopedLock(bucket->mutex);
+ if (!conflicts(iter->convertMode, grantedModesWithoutCurrentRequest)) {
+ lock->conversionsCount--;
+ lock->decGrantedModeCount(iter->mode);
+ iter->status = LockRequest::STATUS_GRANTED;
+ iter->mode = iter->convertMode;
+ iter->convertMode = MODE_NONE;
- if (!bucket->data.empty()) {
- _dumpBucket(bucket);
+ iter->notify->notify(lock->resourceId, LOCK_OK);
}
}
}
- void LockManager::_dumpBucket(const LockBucket* bucket) const {
- for (LockBucket::Map::const_iterator it = bucket->data.begin();
- it != bucket->data.end();
- it++) {
+ // Grant any conflicting requests, which might now be unblocked. Note that the loop below
+ // slightly violates fairness in that it will grant *all* compatible requests on the line
+ // even though there might be conflicting ones interspersed between them. For example,
+ // consider an X lock was just freed and the conflict queue looked like this:
+ //
+ // IS -> IS -> X -> X -> S -> IS
+ //
+ // In strict FIFO, we should grant the first two IS modes and then stop when we reach the
+ // first X mode (the third request on the queue). However, the loop below would actually
+ // grant all IS + S modes and once they all drain it will grant X.
- const LockHead* lock = it->second;
+ LockRequest* iterNext = NULL;
- if (lock->grantedList.empty()) {
- // If there are no granted requests, this lock is empty, so no need to print it
- continue;
- }
+ for (LockRequest* iter = lock->conflictList._front; (iter != NULL) && checkConflictQueue;
+ iter = iterNext) {
+ invariant(iter->status == LockRequest::STATUS_WAITING);
- StringBuilder sb;
- sb << "Lock @ " << lock << ": " << lock->resourceId.toString() << '\n';
-
- sb << "GRANTED:\n";
- for (const LockRequest* iter = lock->grantedList._front;
- iter != NULL;
- iter = iter->next) {
-
- sb << '\t'
- << "LockRequest " << iter->locker->getId() << " @ " << iter->locker << ": "
- << "Mode = " << modeName(iter->mode) << "; "
- << "ConvertMode = " << modeName(iter->convertMode) << "; "
- << "EnqueueAtFront = " << iter->enqueueAtFront << "; "
- << "CompatibleFirst = " << iter->compatibleFirst << "; "
- << '\n';
- }
+ // Store the actual next pointer, because we muck with the iter below and move it to
+ // the granted queue.
+ iterNext = iter->next;
- sb << '\n';
+ if (conflicts(iter->mode, lock->grantedModes)) {
+ continue;
+ }
- sb << "PENDING:\n";
- for (const LockRequest* iter = lock->conflictList._front;
- iter != NULL;
- iter = iter->next) {
+ iter->status = LockRequest::STATUS_GRANTED;
- sb << '\t'
- << "LockRequest " << iter->locker->getId() << " @ " << iter->locker << ": "
- << "Mode = " << modeName(iter->mode) << "; "
- << "ConvertMode = " << modeName(iter->convertMode) << "; "
- << "EnqueueAtFront = " << iter->enqueueAtFront << "; "
- << "CompatibleFirst = " << iter->compatibleFirst << "; "
- << '\n';
- }
+ lock->conflictList.remove(iter);
+ lock->grantedList.push_back(iter);
- log() << sb.str();
- }
- }
+ lock->incGrantedModeCount(iter->mode);
+ lock->decConflictModeCount(iter->mode);
- PartitionedLockHead* LockManager::Partition::find(ResourceId resId) {
- Map::iterator it = data.find(resId);
- return it == data.end() ? NULL : it->second;
- }
+ if (iter->compatibleFirst) {
+ lock->compatibleFirstCount++;
+ }
- PartitionedLockHead* LockManager::Partition::findOrInsert(ResourceId resId) {
- PartitionedLockHead* lock;
- Map::iterator it = data.find(resId);
- if (it == data.end()) {
- lock = new PartitionedLockHead();
- lock->initNew(resId);
+ iter->notify->notify(lock->resourceId, LOCK_OK);
- data.insert(Map::value_type(resId, lock));
+ // Small optimization - nothing is compatible with MODE_X, so no point in looking
+ // further in the conflict queue.
+ if (iter->mode == MODE_X) {
+ break;
}
- else {
- lock = it->second;
- }
- return lock;
}
- LockHead* LockManager::LockBucket::findOrInsert(ResourceId resId) {
- LockHead* lock;
- Map::iterator it = data.find(resId);
- if (it == data.end()) {
- lock = new LockHead();
- lock->initNew(resId);
+ // This is a convenient place to check that the state of the two request queues is in sync
+ // with the bitmask on the modes.
+ invariant((lock->grantedModes == 0) ^ (lock->grantedList._front != NULL));
+ invariant((lock->conflictModes == 0) ^ (lock->conflictList._front != NULL));
+}
- data.insert(Map::value_type(resId, lock));
- }
- else {
- lock = it->second;
- }
- return lock;
- }
+LockManager::LockBucket* LockManager::_getBucket(ResourceId resId) const {
+ return &_lockBuckets[resId % _numLockBuckets];
+}
- //
- // DeadlockDetector
- //
+LockManager::Partition* LockManager::_getPartition(LockRequest* request) const {
+ return &_partitions[request->locker->getId() % _numPartitions];
+}
- DeadlockDetector::DeadlockDetector(const LockManager& lockMgr, const Locker* initialLocker)
- : _lockMgr(lockMgr),
- _initialLockerId(initialLocker->getId()),
- _foundCycle(false) {
+void LockManager::dump() const {
+ log() << "Dumping LockManager @ " << static_cast<const void*>(this) << '\n';
- const ResourceId resId = initialLocker->getWaitingResource();
+ for (unsigned i = 0; i < _numLockBuckets; i++) {
+ LockBucket* bucket = &_lockBuckets[i];
+ stdx::lock_guard<SimpleMutex> scopedLock(bucket->mutex);
- // If there is no resource waiting there is nothing to do
- if (resId.isValid()) {
- _queue.push_front(UnprocessedNode(_initialLockerId, resId));
+ if (!bucket->data.empty()) {
+ _dumpBucket(bucket);
}
}
+}
- bool DeadlockDetector::next() {
- if (_queue.empty()) return false;
+void LockManager::_dumpBucket(const LockBucket* bucket) const {
+ for (LockBucket::Map::const_iterator it = bucket->data.begin(); it != bucket->data.end();
+ it++) {
+ const LockHead* lock = it->second;
- UnprocessedNode front = _queue.front();
- _queue.pop_front();
+ if (lock->grantedList.empty()) {
+ // If there are no granted requests, this lock is empty, so no need to print it
+ continue;
+ }
- _processNextNode(front);
+ StringBuilder sb;
+ sb << "Lock @ " << lock << ": " << lock->resourceId.toString() << '\n';
+
+ sb << "GRANTED:\n";
+ for (const LockRequest* iter = lock->grantedList._front; iter != NULL; iter = iter->next) {
+ sb << '\t' << "LockRequest " << iter->locker->getId() << " @ " << iter->locker << ": "
+ << "Mode = " << modeName(iter->mode) << "; "
+ << "ConvertMode = " << modeName(iter->convertMode) << "; "
+ << "EnqueueAtFront = " << iter->enqueueAtFront << "; "
+ << "CompatibleFirst = " << iter->compatibleFirst << "; " << '\n';
+ }
- return !_queue.empty();
- }
+ sb << '\n';
- bool DeadlockDetector::hasCycle() const {
- invariant(_queue.empty());
+ sb << "PENDING:\n";
+ for (const LockRequest* iter = lock->conflictList._front; iter != NULL; iter = iter->next) {
+ sb << '\t' << "LockRequest " << iter->locker->getId() << " @ " << iter->locker << ": "
+ << "Mode = " << modeName(iter->mode) << "; "
+ << "ConvertMode = " << modeName(iter->convertMode) << "; "
+ << "EnqueueAtFront = " << iter->enqueueAtFront << "; "
+ << "CompatibleFirst = " << iter->compatibleFirst << "; " << '\n';
+ }
- return _foundCycle;
+ log() << sb.str();
+ }
+}
+
+PartitionedLockHead* LockManager::Partition::find(ResourceId resId) {
+ Map::iterator it = data.find(resId);
+ return it == data.end() ? NULL : it->second;
+}
+
+PartitionedLockHead* LockManager::Partition::findOrInsert(ResourceId resId) {
+ PartitionedLockHead* lock;
+ Map::iterator it = data.find(resId);
+ if (it == data.end()) {
+ lock = new PartitionedLockHead();
+ lock->initNew(resId);
+
+ data.insert(Map::value_type(resId, lock));
+ } else {
+ lock = it->second;
}
+ return lock;
+}
+
+LockHead* LockManager::LockBucket::findOrInsert(ResourceId resId) {
+ LockHead* lock;
+ Map::iterator it = data.find(resId);
+ if (it == data.end()) {
+ lock = new LockHead();
+ lock->initNew(resId);
+
+ data.insert(Map::value_type(resId, lock));
+ } else {
+ lock = it->second;
+ }
+ return lock;
+}
- string DeadlockDetector::toString() const {
- StringBuilder sb;
+//
+// DeadlockDetector
+//
- for (WaitForGraph::const_iterator it = _graph.begin(); it != _graph.end(); it++) {
- sb << "Locker " << it->first << " waits for resource " << it->second.resId.toString()
- << " held by [";
+DeadlockDetector::DeadlockDetector(const LockManager& lockMgr, const Locker* initialLocker)
+ : _lockMgr(lockMgr), _initialLockerId(initialLocker->getId()), _foundCycle(false) {
+ const ResourceId resId = initialLocker->getWaitingResource();
- const ConflictingOwnersList owners = it->second.owners;
- for (ConflictingOwnersList::const_iterator itW = owners.begin();
- itW != owners.end();
- itW++) {
+ // If there is no resource waiting there is nothing to do
+ if (resId.isValid()) {
+ _queue.push_front(UnprocessedNode(_initialLockerId, resId));
+ }
+}
- sb << *itW << ", ";
- }
+bool DeadlockDetector::next() {
+ if (_queue.empty())
+ return false;
- sb << "]\n";
- }
+ UnprocessedNode front = _queue.front();
+ _queue.pop_front();
- return sb.str();
- }
+ _processNextNode(front);
- void DeadlockDetector::_processNextNode(const UnprocessedNode& node) {
- // Locate the request
- LockManager::LockBucket* bucket = _lockMgr._getBucket(node.resId);
- stdx::lock_guard<SimpleMutex> scopedLock(bucket->mutex);
+ return !_queue.empty();
+}
- LockManager::LockBucket::Map::const_iterator iter = bucket->data.find(node.resId);
- if (iter == bucket->data.end()) {
- return;
- }
+bool DeadlockDetector::hasCycle() const {
+ invariant(_queue.empty());
+
+ return _foundCycle;
+}
- const LockHead* lock = iter->second;
+string DeadlockDetector::toString() const {
+ StringBuilder sb;
- LockRequest* request = lock->findRequest(node.lockerId);
+ for (WaitForGraph::const_iterator it = _graph.begin(); it != _graph.end(); it++) {
+ sb << "Locker " << it->first << " waits for resource " << it->second.resId.toString()
+ << " held by [";
- // It is possible that a request which was thought to be waiting suddenly became
- // granted, so check that before proceeding
- if (!request || (request->status == LockRequest::STATUS_GRANTED)) {
- return;
+ const ConflictingOwnersList owners = it->second.owners;
+ for (ConflictingOwnersList::const_iterator itW = owners.begin(); itW != owners.end();
+ itW++) {
+ sb << *itW << ", ";
}
- std::pair<WaitForGraph::iterator, bool> val =
- _graph.insert(WaitForGraphPair(node.lockerId, Edges(node.resId)));
- if (!val.second) {
- // We already saw this locker id, which means we have a cycle.
- if (!_foundCycle) {
- _foundCycle = (node.lockerId == _initialLockerId);
- }
+ sb << "]\n";
+ }
- return;
- }
+ return sb.str();
+}
- Edges& edges = val.first->second;
+void DeadlockDetector::_processNextNode(const UnprocessedNode& node) {
+ // Locate the request
+ LockManager::LockBucket* bucket = _lockMgr._getBucket(node.resId);
+ stdx::lock_guard<SimpleMutex> scopedLock(bucket->mutex);
- bool seen = false;
- for (LockRequest* it = lock->grantedList._back; it != NULL; it = it->prev) {
- // We can't conflict with ourselves
- if (it == request) {
- seen = true;
- continue;
- }
+ LockManager::LockBucket::Map::const_iterator iter = bucket->data.find(node.resId);
+ if (iter == bucket->data.end()) {
+ return;
+ }
- // If we are a regular conflicting request, both granted and conversion modes need to
- // be checked for conflict, since conversions will be granted first.
- if (request->status == LockRequest::STATUS_WAITING) {
- if (conflicts(request->mode, modeMask(it->mode)) ||
- conflicts(request->mode, modeMask(it->convertMode))) {
+ const LockHead* lock = iter->second;
- const LockerId lockerId = it->locker->getId();
- const ResourceId waitResId = it->locker->getWaitingResource();
+ LockRequest* request = lock->findRequest(node.lockerId);
- if (waitResId.isValid()) {
- _queue.push_front(UnprocessedNode(lockerId, waitResId));
- edges.owners.push_back(lockerId);
- }
- }
+ // It is possible that a request which was thought to be waiting suddenly became
+ // granted, so check that before proceeding
+ if (!request || (request->status == LockRequest::STATUS_GRANTED)) {
+ return;
+ }
- continue;
- }
+ std::pair<WaitForGraph::iterator, bool> val =
+ _graph.insert(WaitForGraphPair(node.lockerId, Edges(node.resId)));
+ if (!val.second) {
+ // We already saw this locker id, which means we have a cycle.
+ if (!_foundCycle) {
+ _foundCycle = (node.lockerId == _initialLockerId);
+ }
- // If we are a conversion request, only requests, which are before us need to be
- // accounted for.
- invariant(request->status == LockRequest::STATUS_CONVERTING);
+ return;
+ }
- if (conflicts(request->convertMode, modeMask(it->mode)) ||
- (seen && conflicts(request->convertMode, modeMask(it->convertMode)))) {
+ Edges& edges = val.first->second;
+ bool seen = false;
+ for (LockRequest* it = lock->grantedList._back; it != NULL; it = it->prev) {
+ // We can't conflict with ourselves
+ if (it == request) {
+ seen = true;
+ continue;
+ }
+
+ // If we are a regular conflicting request, both granted and conversion modes need to
+ // be checked for conflict, since conversions will be granted first.
+ if (request->status == LockRequest::STATUS_WAITING) {
+ if (conflicts(request->mode, modeMask(it->mode)) ||
+ conflicts(request->mode, modeMask(it->convertMode))) {
const LockerId lockerId = it->locker->getId();
const ResourceId waitResId = it->locker->getWaitingResource();
@@ -1042,117 +978,132 @@ namespace {
edges.owners.push_back(lockerId);
}
}
+
+ continue;
}
- // All conflicting waits, which would be granted before us
- for (LockRequest* it = request->prev;
- (request->status == LockRequest::STATUS_WAITING) && (it != NULL);
- it = it->prev) {
+ // If we are a conversion request, only requests, which are before us need to be
+ // accounted for.
+ invariant(request->status == LockRequest::STATUS_CONVERTING);
- // We started from the previous element, so we should never see ourselves
- invariant(it != request);
+ if (conflicts(request->convertMode, modeMask(it->mode)) ||
+ (seen && conflicts(request->convertMode, modeMask(it->convertMode)))) {
+ const LockerId lockerId = it->locker->getId();
+ const ResourceId waitResId = it->locker->getWaitingResource();
- if (conflicts(request->mode, modeMask(it->mode))) {
- const LockerId lockerId = it->locker->getId();
- const ResourceId waitResId = it->locker->getWaitingResource();
+ if (waitResId.isValid()) {
+ _queue.push_front(UnprocessedNode(lockerId, waitResId));
+ edges.owners.push_back(lockerId);
+ }
+ }
+ }
- if (waitResId.isValid()) {
- _queue.push_front(UnprocessedNode(lockerId, waitResId));
- edges.owners.push_back(lockerId);
- }
+ // All conflicting waits, which would be granted before us
+ for (LockRequest* it = request->prev;
+ (request->status == LockRequest::STATUS_WAITING) && (it != NULL);
+ it = it->prev) {
+ // We started from the previous element, so we should never see ourselves
+ invariant(it != request);
+
+ if (conflicts(request->mode, modeMask(it->mode))) {
+ const LockerId lockerId = it->locker->getId();
+ const ResourceId waitResId = it->locker->getWaitingResource();
+
+ if (waitResId.isValid()) {
+ _queue.push_front(UnprocessedNode(lockerId, waitResId));
+ edges.owners.push_back(lockerId);
}
}
}
+}
- //
- // ResourceId
- //
+//
+// ResourceId
+//
- static const StringData::Hasher stringDataHashFunction = StringData::Hasher();
+static const StringData::Hasher stringDataHashFunction = StringData::Hasher();
- uint64_t ResourceId::fullHash(ResourceType type, uint64_t hashId) {
- return (static_cast<uint64_t>(type) << (64 - resourceTypeBits))
- + (hashId & (std::numeric_limits<uint64_t>::max() >> resourceTypeBits));
- }
+uint64_t ResourceId::fullHash(ResourceType type, uint64_t hashId) {
+ return (static_cast<uint64_t>(type) << (64 - resourceTypeBits)) +
+ (hashId & (std::numeric_limits<uint64_t>::max() >> resourceTypeBits));
+}
- ResourceId::ResourceId(ResourceType type, StringData ns)
- : _fullHash(fullHash(type, stringDataHashFunction(ns))) {
+ResourceId::ResourceId(ResourceType type, StringData ns)
+ : _fullHash(fullHash(type, stringDataHashFunction(ns))) {
#ifdef MONGO_CONFIG_DEBUG_BUILD
- _nsCopy = ns.toString();
+ _nsCopy = ns.toString();
#endif
- }
+}
- ResourceId::ResourceId(ResourceType type, const string& ns)
- : _fullHash(fullHash(type, stringDataHashFunction(ns))) {
+ResourceId::ResourceId(ResourceType type, const string& ns)
+ : _fullHash(fullHash(type, stringDataHashFunction(ns))) {
#ifdef MONGO_CONFIG_DEBUG_BUILD
- _nsCopy = ns;
+ _nsCopy = ns;
#endif
- }
+}
- ResourceId::ResourceId(ResourceType type, uint64_t hashId)
- : _fullHash(fullHash(type, hashId)) { }
+ResourceId::ResourceId(ResourceType type, uint64_t hashId) : _fullHash(fullHash(type, hashId)) {}
- string ResourceId::toString() const {
- StringBuilder ss;
- ss << "{" << _fullHash << ": " << resourceTypeName(getType())
- << ", " << getHashId();
+string ResourceId::toString() const {
+ StringBuilder ss;
+ ss << "{" << _fullHash << ": " << resourceTypeName(getType()) << ", " << getHashId();
#ifdef MONGO_CONFIG_DEBUG_BUILD
- ss << ", " << _nsCopy;
+ ss << ", " << _nsCopy;
#endif
- ss << "}";
+ ss << "}";
- return ss.str();
- }
+ return ss.str();
+}
- //
- // LockRequest
- //
+//
+// LockRequest
+//
- void LockRequest::initNew(Locker* locker, LockGrantNotification* notify) {
- this->locker = locker;
- this->notify = notify;
-
- enqueueAtFront = false;
- compatibleFirst = false;
- recursiveCount = 0;
-
- lock = NULL;
- prev = NULL;
- next = NULL;
- status = STATUS_NEW;
- partitioned = false;
- mode = MODE_NONE;
- convertMode = MODE_NONE;
- }
+void LockRequest::initNew(Locker* locker, LockGrantNotification* notify) {
+ this->locker = locker;
+ this->notify = notify;
+ enqueueAtFront = false;
+ compatibleFirst = false;
+ recursiveCount = 0;
- //
- // Helper calls
- //
+ lock = NULL;
+ prev = NULL;
+ next = NULL;
+ status = STATUS_NEW;
+ partitioned = false;
+ mode = MODE_NONE;
+ convertMode = MODE_NONE;
+}
- const char* modeName(LockMode mode) {
- return LockModeNames[mode];
- }
- const char* legacyModeName(LockMode mode) {
- return LegacyLockModeNames[mode];
- }
+//
+// Helper calls
+//
- bool isModeCovered(LockMode mode, LockMode coveringMode) {
- return (LockConflictsTable[coveringMode] | LockConflictsTable[mode]) ==
- LockConflictsTable[coveringMode];
- }
+const char* modeName(LockMode mode) {
+ return LockModeNames[mode];
+}
- const char* resourceTypeName(ResourceType resourceType) {
- return ResourceTypeNames[resourceType];
- }
+const char* legacyModeName(LockMode mode) {
+ return LegacyLockModeNames[mode];
+}
- const char* lockRequestStatusName(LockRequest::Status status) {
- return LockRequestStatusNames[status];
- }
+bool isModeCovered(LockMode mode, LockMode coveringMode) {
+ return (LockConflictsTable[coveringMode] | LockConflictsTable[mode]) ==
+ LockConflictsTable[coveringMode];
+}
+
+const char* resourceTypeName(ResourceType resourceType) {
+ return ResourceTypeNames[resourceType];
+}
+
+const char* lockRequestStatusName(LockRequest::Status status) {
+ return LockRequestStatusNames[status];
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/concurrency/lock_manager.h b/src/mongo/db/concurrency/lock_manager.h
index badad082214..991768f54c4 100644
--- a/src/mongo/db/concurrency/lock_manager.h
+++ b/src/mongo/db/concurrency/lock_manager.h
@@ -43,247 +43,241 @@
namespace mongo {
+/**
+ * Entry point for the lock manager scheduling functionality. Don't use it directly, but
+ * instead go through the Locker interface.
+ */
+class LockManager {
+ MONGO_DISALLOW_COPYING(LockManager);
+
+public:
+ LockManager();
+ ~LockManager();
+
/**
- * Entry point for the lock manager scheduling functionality. Don't use it directly, but
- * instead go through the Locker interface.
- */
- class LockManager {
- MONGO_DISALLOW_COPYING(LockManager);
- public:
- LockManager();
- ~LockManager();
-
- /**
- * Acquires lock on the specified resource in the specified mode and returns the outcome
- * of the operation. See the details for LockResult for more information on what the
- * different results mean.
- *
- * Locking the same resource twice increments the reference count of the lock so each call
- * to lock must be matched with a call to unlock with the same resource.
- *
- * @param resId Id of the resource to be locked.
- * @param request LockRequest structure on which the state of the request will be tracked.
- * This value cannot be NULL and the notify value must be set. If the
- * return value is not LOCK_WAITING, this pointer can be freed and will
- * not be used any more.
- *
- * If the return value is LOCK_WAITING, the notification method will be
- * called at some point into the future, when the lock either becomes
- * granted or a deadlock is discovered. If unlock is called before the
- * lock becomes granted, the notification will not be invoked.
- *
- * If the return value is LOCK_WAITING, the notification object *must*
- * live at least until the notfy method has been invoked or unlock has
- * been called for the resource it was assigned to. Failure to do so will
- * cause the lock manager to call into an invalid memory location.
- * @param mode Mode in which the resource should be locked. Lock upgrades are allowed.
- *
- * @return See comments for LockResult.
- */
- LockResult lock(ResourceId resId, LockRequest* request, LockMode mode);
- LockResult convert(ResourceId resId, LockRequest* request, LockMode newMode);
-
- /**
- * Decrements the reference count of a previously locked request and if the reference count
- * becomes zero, removes the request and proceeds to granting any conflicts.
- *
- * This method always succeeds and never blocks.
- *
- * @param request A previously locked request. Calling unlock more times than lock was
- * called for the same LockRequest is an error.
- *
- * @return true if this is the last reference for the request; false otherwise
- */
- bool unlock(LockRequest* request);
-
- /**
- * Downgrades the mode in which an already granted request is held, without changing the
- * reference count of the lock request. This call never blocks, will always succeed and may
- * potentially allow other blocked lock requests to proceed.
- *
- * @param request Request, already in granted mode through a previous call to lock.
- * @param newMode Mode, which is less-restrictive than the mode in which the request is
- * already held. I.e., the conflict set of newMode must be a sub-set of
- * the conflict set of the request's current mode.
- */
- void downgrade(LockRequest* request, LockMode newMode);
-
- /**
- * Iterates through all buckets and deletes all locks, which have no requests on them. This
- * call is kind of expensive and should only be used for reducing the memory footprint of
- * the lock manager.
- */
- void cleanupUnusedLocks();
-
- /**
- * Dumps the contents of all locks to the log.
- */
- void dump() const;
-
- private:
- // The deadlock detector needs to access the buckets and locks directly
- friend class DeadlockDetector;
-
- // The lockheads need access to the partitions
- friend struct LockHead;
-
- // These types describe the locks hash table
-
- struct LockBucket {
- SimpleMutex mutex;
- typedef unordered_map<ResourceId, LockHead*> Map;
- Map data;
- LockHead* findOrInsert(ResourceId resId);
- };
-
- // Each locker maps to a partition that is used for resources acquired in intent modes
- // modes and potentially other modes that don't conflict with themselves. This avoids
- // contention on the regular LockHead in the lock manager.
- struct Partition {
- PartitionedLockHead* find(ResourceId resId);
- PartitionedLockHead* findOrInsert(ResourceId resId);
- typedef unordered_map<ResourceId, PartitionedLockHead*> Map;
- SimpleMutex mutex;
- Map data;
- };
-
- /**
- * Retrieves the bucket in which the particular resource must reside. There is no need to
- * hold a lock when calling this function.
- */
- LockBucket* _getBucket(ResourceId resId) const;
-
-
- /**
- * Retrieves the Partition that a particular LockRequest should use for intent locking.
- */
- Partition* _getPartition(LockRequest* request) const;
-
- /**
- * Prints the contents of a bucket to the log.
- */
- void _dumpBucket(const LockBucket* bucket) const;
-
- /**
- * Should be invoked when the state of a lock changes in a way, which could potentially
- * allow other blocked requests to proceed.
- *
- * MUST be called under the lock bucket's mutex.
- *
- * @param lock Lock whose grant state should be recalculated.
- * @param checkConflictQueue Whether to go through the conflict queue. This is an
- * optimisation in that we only need to check the conflict queue if one of the
- * granted modes, which was conflicting before became zero.
- */
- void _onLockModeChanged(LockHead* lock, bool checkConflictQueue);
-
- static const unsigned _numLockBuckets;
- LockBucket* _lockBuckets;
-
- static const unsigned _numPartitions;
- Partition* _partitions;
- };
+ * Acquires lock on the specified resource in the specified mode and returns the outcome
+ * of the operation. See the details for LockResult for more information on what the
+ * different results mean.
+ *
+ * Locking the same resource twice increments the reference count of the lock so each call
+ * to lock must be matched with a call to unlock with the same resource.
+ *
+ * @param resId Id of the resource to be locked.
+ * @param request LockRequest structure on which the state of the request will be tracked.
+ * This value cannot be NULL and the notify value must be set. If the
+ * return value is not LOCK_WAITING, this pointer can be freed and will
+ * not be used any more.
+ *
+ * If the return value is LOCK_WAITING, the notification method will be
+ * called at some point into the future, when the lock either becomes
+ * granted or a deadlock is discovered. If unlock is called before the
+ * lock becomes granted, the notification will not be invoked.
+ *
+ * If the return value is LOCK_WAITING, the notification object *must*
+ * live at least until the notfy method has been invoked or unlock has
+ * been called for the resource it was assigned to. Failure to do so will
+ * cause the lock manager to call into an invalid memory location.
+ * @param mode Mode in which the resource should be locked. Lock upgrades are allowed.
+ *
+ * @return See comments for LockResult.
+ */
+ LockResult lock(ResourceId resId, LockRequest* request, LockMode mode);
+ LockResult convert(ResourceId resId, LockRequest* request, LockMode newMode);
+ /**
+ * Decrements the reference count of a previously locked request and if the reference count
+ * becomes zero, removes the request and proceeds to granting any conflicts.
+ *
+ * This method always succeeds and never blocks.
+ *
+ * @param request A previously locked request. Calling unlock more times than lock was
+ * called for the same LockRequest is an error.
+ *
+ * @return true if this is the last reference for the request; false otherwise
+ */
+ bool unlock(LockRequest* request);
/**
- * Iteratively builds the wait-for graph, starting from a given blocked Locker and stops either
- * when all reachable nodes have been checked or if a cycle is detected. This class is
- * thread-safe. Because locks may come and go in parallel with deadlock detection, it may
- * report false positives, but if there is a stable cycle it will be discovered.
+ * Downgrades the mode in which an already granted request is held, without changing the
+ * reference count of the lock request. This call never blocks, will always succeed and may
+ * potentially allow other blocked lock requests to proceed.
*
- * Implemented as a separate class in order to facilitate diagnostics and also unit-testing for
- * cases where locks come and go in parallel with deadlock detection.
+ * @param request Request, already in granted mode through a previous call to lock.
+ * @param newMode Mode, which is less-restrictive than the mode in which the request is
+ * already held. I.e., the conflict set of newMode must be a sub-set of
+ * the conflict set of the request's current mode.
*/
- class DeadlockDetector {
- public:
+ void downgrade(LockRequest* request, LockMode newMode);
- /**
- * Initializes the wait-for graph builder with the LM to operate on and a locker object
- * from which to start the search. Deadlock will only be reported if there is a wait cycle
- * in which the initial locker participates.
- */
- DeadlockDetector(const LockManager& lockMgr, const Locker* initialLocker);
+ /**
+ * Iterates through all buckets and deletes all locks, which have no requests on them. This
+ * call is kind of expensive and should only be used for reducing the memory footprint of
+ * the lock manager.
+ */
+ void cleanupUnusedLocks();
- DeadlockDetector& check() {
- while (next()) {
+ /**
+ * Dumps the contents of all locks to the log.
+ */
+ void dump() const;
- }
+private:
+ // The deadlock detector needs to access the buckets and locks directly
+ friend class DeadlockDetector;
- return *this;
- }
+ // The lockheads need access to the partitions
+ friend struct LockHead;
- /**
- * Processes the next wait for node and queues up its set of owners to the unprocessed
- * queue.
- *
- * @return true if there are more unprocessed nodes and no cycle has been discovered yet;
- * false if either all reachable nodes have been processed or
- */
- bool next();
+ // These types describe the locks hash table
- /**
- * Checks whether a cycle exists in the wait-for graph, which has been built so far. It's
- * only useful to call this after next() has returned false.
- */
- bool hasCycle() const;
+ struct LockBucket {
+ SimpleMutex mutex;
+ typedef unordered_map<ResourceId, LockHead*> Map;
+ Map data;
+ LockHead* findOrInsert(ResourceId resId);
+ };
+
+ // Each locker maps to a partition that is used for resources acquired in intent modes
+ // modes and potentially other modes that don't conflict with themselves. This avoids
+ // contention on the regular LockHead in the lock manager.
+ struct Partition {
+ PartitionedLockHead* find(ResourceId resId);
+ PartitionedLockHead* findOrInsert(ResourceId resId);
+ typedef unordered_map<ResourceId, PartitionedLockHead*> Map;
+ SimpleMutex mutex;
+ Map data;
+ };
- /**
- * Produces a string containing the wait-for graph that has been built so far.
- */
- std::string toString() const;
+ /**
+ * Retrieves the bucket in which the particular resource must reside. There is no need to
+ * hold a lock when calling this function.
+ */
+ LockBucket* _getBucket(ResourceId resId) const;
- private:
- // An entry in the owners list below means that some locker L is blocked on some resource
- // resId, which is currently held by the given set of owners. The reason to store it in
- // such form is in order to avoid storing pointers to the lockers or to have to look them
- // up by id, both of which require some form of synchronization other than locking the
- // bucket for the resource. Instead, given the resId, we can lock the bucket for the lock
- // and find the respective LockRequests and continue our scan forward.
- typedef std::vector<LockerId> ConflictingOwnersList;
+ /**
+ * Retrieves the Partition that a particular LockRequest should use for intent locking.
+ */
+ Partition* _getPartition(LockRequest* request) const;
- struct Edges {
- explicit Edges(ResourceId resId) : resId(resId) { }
+ /**
+ * Prints the contents of a bucket to the log.
+ */
+ void _dumpBucket(const LockBucket* bucket) const;
- // Resource id indicating the lock node
- ResourceId resId;
+ /**
+ * Should be invoked when the state of a lock changes in a way, which could potentially
+ * allow other blocked requests to proceed.
+ *
+ * MUST be called under the lock bucket's mutex.
+ *
+ * @param lock Lock whose grant state should be recalculated.
+ * @param checkConflictQueue Whether to go through the conflict queue. This is an
+ * optimisation in that we only need to check the conflict queue if one of the
+ * granted modes, which was conflicting before became zero.
+ */
+ void _onLockModeChanged(LockHead* lock, bool checkConflictQueue);
- // List of lock owners/pariticipants with which the initial locker conflicts for
- // obtaining the lock
- ConflictingOwnersList owners;
- };
+ static const unsigned _numLockBuckets;
+ LockBucket* _lockBuckets;
- typedef std::map<LockerId, Edges> WaitForGraph;
- typedef WaitForGraph::value_type WaitForGraphPair;
+ static const unsigned _numPartitions;
+ Partition* _partitions;
+};
- // We don't want to hold locks between iteration cycles, so just store the resourceId and
- // the lockerId so we can directly find them from the lock manager.
- struct UnprocessedNode {
- UnprocessedNode(LockerId lockerId, ResourceId resId)
- : lockerId(lockerId),
- resId(resId) {
+/**
+ * Iteratively builds the wait-for graph, starting from a given blocked Locker and stops either
+ * when all reachable nodes have been checked or if a cycle is detected. This class is
+ * thread-safe. Because locks may come and go in parallel with deadlock detection, it may
+ * report false positives, but if there is a stable cycle it will be discovered.
+ *
+ * Implemented as a separate class in order to facilitate diagnostics and also unit-testing for
+ * cases where locks come and go in parallel with deadlock detection.
+ */
+class DeadlockDetector {
+public:
+ /**
+ * Initializes the wait-for graph builder with the LM to operate on and a locker object
+ * from which to start the search. Deadlock will only be reported if there is a wait cycle
+ * in which the initial locker participates.
+ */
+ DeadlockDetector(const LockManager& lockMgr, const Locker* initialLocker);
- }
+ DeadlockDetector& check() {
+ while (next()) {
+ }
- LockerId lockerId;
- ResourceId resId;
- };
+ return *this;
+ }
- typedef std::deque<UnprocessedNode> UnprocessedNodesQueue;
+ /**
+ * Processes the next wait for node and queues up its set of owners to the unprocessed
+ * queue.
+ *
+ * @return true if there are more unprocessed nodes and no cycle has been discovered yet;
+ * false if either all reachable nodes have been processed or
+ */
+ bool next();
+ /**
+ * Checks whether a cycle exists in the wait-for graph, which has been built so far. It's
+ * only useful to call this after next() has returned false.
+ */
+ bool hasCycle() const;
- void _processNextNode(const UnprocessedNode& node);
+ /**
+ * Produces a string containing the wait-for graph that has been built so far.
+ */
+ std::string toString() const;
+
+private:
+ // An entry in the owners list below means that some locker L is blocked on some resource
+ // resId, which is currently held by the given set of owners. The reason to store it in
+ // such form is in order to avoid storing pointers to the lockers or to have to look them
+ // up by id, both of which require some form of synchronization other than locking the
+ // bucket for the resource. Instead, given the resId, we can lock the bucket for the lock
+ // and find the respective LockRequests and continue our scan forward.
+ typedef std::vector<LockerId> ConflictingOwnersList;
+
+ struct Edges {
+ explicit Edges(ResourceId resId) : resId(resId) {}
+
+ // Resource id indicating the lock node
+ ResourceId resId;
+
+ // List of lock owners/pariticipants with which the initial locker conflicts for
+ // obtaining the lock
+ ConflictingOwnersList owners;
+ };
+ typedef std::map<LockerId, Edges> WaitForGraph;
+ typedef WaitForGraph::value_type WaitForGraphPair;
- // Not owned. Lifetime must be longer than that of the graph builder.
- const LockManager& _lockMgr;
- const LockerId _initialLockerId;
- UnprocessedNodesQueue _queue;
- WaitForGraph _graph;
+ // We don't want to hold locks between iteration cycles, so just store the resourceId and
+ // the lockerId so we can directly find them from the lock manager.
+ struct UnprocessedNode {
+ UnprocessedNode(LockerId lockerId, ResourceId resId) : lockerId(lockerId), resId(resId) {}
- bool _foundCycle;
+ LockerId lockerId;
+ ResourceId resId;
};
-} // namespace mongo
+ typedef std::deque<UnprocessedNode> UnprocessedNodesQueue;
+
+
+ void _processNextNode(const UnprocessedNode& node);
+
+
+ // Not owned. Lifetime must be longer than that of the graph builder.
+ const LockManager& _lockMgr;
+ const LockerId _initialLockerId;
+
+ UnprocessedNodesQueue _queue;
+ WaitForGraph _graph;
+
+ bool _foundCycle;
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/concurrency/lock_manager_defs.h b/src/mongo/db/concurrency/lock_manager_defs.h
index 33cc0279ea9..2f93a4ca017 100644
--- a/src/mongo/db/concurrency/lock_manager_defs.h
+++ b/src/mongo/db/concurrency/lock_manager_defs.h
@@ -39,369 +39,371 @@
namespace mongo {
- class Locker;
+class Locker;
- struct LockHead;
- struct PartitionedLockHead;
+struct LockHead;
+struct PartitionedLockHead;
+
+/**
+ * Lock modes.
+ *
+ * Compatibility Matrix
+ * Granted mode
+ * ---------------.--------------------------------------------------------.
+ * Requested Mode | MODE_NONE MODE_IS MODE_IX MODE_S MODE_X |
+ * MODE_IS | + + + + - |
+ * MODE_IX | + + + - - |
+ * MODE_S | + + - + - |
+ * MODE_X | + - - - - |
+ */
+enum LockMode {
+ MODE_NONE = 0,
+ MODE_IS = 1,
+ MODE_IX = 2,
+ MODE_S = 3,
+ MODE_X = 4,
+
+ // Counts the lock modes. Used for array size allocations, etc. Always insert new lock
+ // modes above this entry.
+ LockModesCount
+};
+
+/**
+ * Returns a human-readable name for the specified lock mode.
+ */
+const char* modeName(LockMode mode);
+
+/**
+ * Legacy lock mode names in parity for 2.6 reports.
+ */
+const char* legacyModeName(LockMode mode);
+
+/**
+ * Mode A is covered by mode B if the set of conflicts for mode A is a subset of the set of
+ * conflicts for mode B. For example S is covered by X. IS is covered by S. However, IX is not
+ * covered by S or IS.
+ */
+bool isModeCovered(LockMode mode, LockMode coveringMode);
+
+/**
+ * Returns whether the passed in mode is S or IS. Used for validation checks.
+ */
+inline bool isSharedLockMode(LockMode mode) {
+ return (mode == MODE_IS || mode == MODE_S);
+}
+
+
+/**
+ * Return values for the locking functions of the lock manager.
+ */
+enum LockResult {
/**
- * Lock modes.
- *
- * Compatibility Matrix
- * Granted mode
- * ---------------.--------------------------------------------------------.
- * Requested Mode | MODE_NONE MODE_IS MODE_IX MODE_S MODE_X |
- * MODE_IS | + + + + - |
- * MODE_IX | + + + - - |
- * MODE_S | + + - + - |
- * MODE_X | + - - - - |
+ * The lock request was granted and is now on the granted list for the specified resource.
*/
- enum LockMode {
- MODE_NONE = 0,
- MODE_IS = 1,
- MODE_IX = 2,
- MODE_S = 3,
- MODE_X = 4,
-
- // Counts the lock modes. Used for array size allocations, etc. Always insert new lock
- // modes above this entry.
- LockModesCount
- };
+ LOCK_OK,
/**
- * Returns a human-readable name for the specified lock mode.
+ * The lock request was not granted because of conflict. If this value is returned, the
+ * request was placed on the conflict queue of the specified resource and a call to the
+ * LockGrantNotification::notify callback should be expected with the resource whose lock
+ * was requested.
*/
- const char* modeName(LockMode mode);
+ LOCK_WAITING,
/**
- * Legacy lock mode names in parity for 2.6 reports.
+ * The lock request waited, but timed out before it could be granted. This value is never
+ * returned by the LockManager methods here, but by the Locker class, which offers
+ * capability to block while waiting for locks.
*/
- const char* legacyModeName(LockMode mode);
+ LOCK_TIMEOUT,
/**
- * Mode A is covered by mode B if the set of conflicts for mode A is a subset of the set of
- * conflicts for mode B. For example S is covered by X. IS is covered by S. However, IX is not
- * covered by S or IS.
+ * The lock request was not granted because it would result in a deadlock. No changes to
+ * the state of the Locker would be made if this value is returned (i.e., it will not be
+ * killed due to deadlock). It is up to the caller to decide how to recover from this
+ * return value - could be either release some locks and try again, or just bail with an
+ * error and have some upper code handle it.
*/
- bool isModeCovered(LockMode mode, LockMode coveringMode);
+ LOCK_DEADLOCK,
/**
- * Returns whether the passed in mode is S or IS. Used for validation checks.
+ * This is used as an initialiser value. Should never be returned.
*/
- inline bool isSharedLockMode(LockMode mode) {
- return (mode == MODE_IS || mode == MODE_S);
- }
+ LOCK_INVALID
+};
- /**
- * Return values for the locking functions of the lock manager.
- */
- enum LockResult {
-
- /**
- * The lock request was granted and is now on the granted list for the specified resource.
- */
- LOCK_OK,
-
- /**
- * The lock request was not granted because of conflict. If this value is returned, the
- * request was placed on the conflict queue of the specified resource and a call to the
- * LockGrantNotification::notify callback should be expected with the resource whose lock
- * was requested.
- */
- LOCK_WAITING,
-
- /**
- * The lock request waited, but timed out before it could be granted. This value is never
- * returned by the LockManager methods here, but by the Locker class, which offers
- * capability to block while waiting for locks.
- */
- LOCK_TIMEOUT,
-
- /**
- * The lock request was not granted because it would result in a deadlock. No changes to
- * the state of the Locker would be made if this value is returned (i.e., it will not be
- * killed due to deadlock). It is up to the caller to decide how to recover from this
- * return value - could be either release some locks and try again, or just bail with an
- * error and have some upper code handle it.
- */
- LOCK_DEADLOCK,
-
- /**
- * This is used as an initialiser value. Should never be returned.
- */
- LOCK_INVALID
- };
+/**
+ * Hierarchy of resource types. The lock manager knows nothing about this hierarchy, it is
+ * purely logical. Resources of different types will never conflict with each other.
+ *
+ * While the lock manager does not know or care about ordering, the general policy is that
+ * resources are acquired in the order below. For example, one might first acquire a
+ * RESOURCE_GLOBAL and then the desired RESOURCE_DATABASE, both using intent modes, and
+ * finally a RESOURCE_COLLECTION in exclusive mode. When locking multiple resources of the
+ * same type, the canonical order is by resourceId order.
+ *
+ * It is OK to lock resources out of order, but it is the users responsibility to ensure
+ * ordering is consistent so deadlock cannot occur.
+ */
+enum ResourceType {
+ // Types used for special resources, use with a hash id from ResourceId::SingletonHashIds.
+ RESOURCE_INVALID = 0,
+ RESOURCE_GLOBAL, // Used for mode changes or global exclusive operations
+ RESOURCE_MMAPV1_FLUSH, // Necessary only for the MMAPv1 engine
+
+ // Generic resources
+ RESOURCE_DATABASE,
+ RESOURCE_COLLECTION,
+ RESOURCE_METADATA,
+
+ // Counts the rest. Always insert new resource types above this entry.
+ ResourceTypesCount
+};
+
+/**
+ * Returns a human-readable name for the specified resource type.
+ */
+const char* resourceTypeName(ResourceType resourceType);
+/**
+ * Uniquely identifies a lockable resource.
+ */
+class ResourceId {
+ // We only use 3 bits for the resource type in the ResourceId hash
+ enum { resourceTypeBits = 3 };
+ BOOST_STATIC_ASSERT(ResourceTypesCount <= (1 << resourceTypeBits));
+public:
/**
- * Hierarchy of resource types. The lock manager knows nothing about this hierarchy, it is
- * purely logical. Resources of different types will never conflict with each other.
- *
- * While the lock manager does not know or care about ordering, the general policy is that
- * resources are acquired in the order below. For example, one might first acquire a
- * RESOURCE_GLOBAL and then the desired RESOURCE_DATABASE, both using intent modes, and
- * finally a RESOURCE_COLLECTION in exclusive mode. When locking multiple resources of the
- * same type, the canonical order is by resourceId order.
- *
- * It is OK to lock resources out of order, but it is the users responsibility to ensure
- * ordering is consistent so deadlock cannot occur.
+ * Assign hash ids for special resources to avoid accidental reuse of ids. For ids used
+ * with the same ResourceType, the order here must be the same as the locking order.
*/
- enum ResourceType {
- // Types used for special resources, use with a hash id from ResourceId::SingletonHashIds.
- RESOURCE_INVALID = 0,
- RESOURCE_GLOBAL, // Used for mode changes or global exclusive operations
- RESOURCE_MMAPV1_FLUSH, // Necessary only for the MMAPv1 engine
-
- // Generic resources
- RESOURCE_DATABASE,
- RESOURCE_COLLECTION,
- RESOURCE_METADATA,
-
- // Counts the rest. Always insert new resource types above this entry.
- ResourceTypesCount
+ enum SingletonHashIds {
+ SINGLETON_INVALID = 0,
+ SINGLETON_PARALLEL_BATCH_WRITER_MODE,
+ SINGLETON_GLOBAL,
+ SINGLETON_MMAPV1_FLUSH
};
- /**
- * Returns a human-readable name for the specified resource type.
- */
- const char* resourceTypeName(ResourceType resourceType);
+ ResourceId() : _fullHash(0) {}
+ ResourceId(ResourceType type, StringData ns);
+ ResourceId(ResourceType type, const std::string& ns);
+ ResourceId(ResourceType type, uint64_t hashId);
+
+ bool isValid() const {
+ return getType() != RESOURCE_INVALID;
+ }
+
+ operator uint64_t() const {
+ return _fullHash;
+ }
+ // This defines the canonical locking order, first by type and then hash id
+ bool operator<(const ResourceId& rhs) const {
+ return _fullHash < rhs._fullHash;
+ }
+
+ ResourceType getType() const {
+ return static_cast<ResourceType>(_fullHash >> (64 - resourceTypeBits));
+ }
+
+ uint64_t getHashId() const {
+ return _fullHash & (std::numeric_limits<uint64_t>::max() >> resourceTypeBits);
+ }
+
+ std::string toString() const;
+
+private:
/**
- * Uniquely identifies a lockable resource.
+ * The top 'resourceTypeBits' bits of '_fullHash' represent the resource type,
+ * while the remaining bits contain the bottom bits of the hashId. This avoids false
+ * conflicts between resources of different types, which is necessary to prevent deadlocks.
*/
- class ResourceId {
- // We only use 3 bits for the resource type in the ResourceId hash
- enum {resourceTypeBits = 3};
- BOOST_STATIC_ASSERT(ResourceTypesCount <= (1 << resourceTypeBits));
-
- public:
- /**
- * Assign hash ids for special resources to avoid accidental reuse of ids. For ids used
- * with the same ResourceType, the order here must be the same as the locking order.
- */
- enum SingletonHashIds {
- SINGLETON_INVALID = 0,
- SINGLETON_PARALLEL_BATCH_WRITER_MODE,
- SINGLETON_GLOBAL,
- SINGLETON_MMAPV1_FLUSH
- };
-
- ResourceId() : _fullHash(0) { }
- ResourceId(ResourceType type, StringData ns);
- ResourceId(ResourceType type, const std::string& ns);
- ResourceId(ResourceType type, uint64_t hashId);
-
- bool isValid() const { return getType() != RESOURCE_INVALID; }
-
- operator uint64_t() const {
- return _fullHash;
- }
-
- // This defines the canonical locking order, first by type and then hash id
- bool operator<(const ResourceId& rhs) const {
- return _fullHash < rhs._fullHash;
- }
-
- ResourceType getType() const {
- return static_cast<ResourceType>(_fullHash >> (64 - resourceTypeBits));
- }
-
- uint64_t getHashId() const {
- return _fullHash & (std::numeric_limits<uint64_t>::max() >> resourceTypeBits);
- }
-
- std::string toString() const;
-
- private:
- /**
- * The top 'resourceTypeBits' bits of '_fullHash' represent the resource type,
- * while the remaining bits contain the bottom bits of the hashId. This avoids false
- * conflicts between resources of different types, which is necessary to prevent deadlocks.
- */
- uint64_t _fullHash;
-
- static uint64_t fullHash(ResourceType type, uint64_t hashId);
+ uint64_t _fullHash;
+
+ static uint64_t fullHash(ResourceType type, uint64_t hashId);
#ifdef MONGO_CONFIG_DEBUG_BUILD
- // Keep the complete namespace name for debugging purposes (TODO: this will be
- // removed once we are confident in the robustness of the lock manager).
- std::string _nsCopy;
+ // Keep the complete namespace name for debugging purposes (TODO: this will be
+ // removed once we are confident in the robustness of the lock manager).
+ std::string _nsCopy;
#endif
- };
+};
#ifndef MONGO_CONFIG_DEBUG_BUILD
- // Treat the resource ids as 64-bit integers in release mode in order to ensure we do
- // not spend too much time doing comparisons for hashing.
- BOOST_STATIC_ASSERT(sizeof(ResourceId) == sizeof(uint64_t));
+// Treat the resource ids as 64-bit integers in release mode in order to ensure we do
+// not spend too much time doing comparisons for hashing.
+BOOST_STATIC_ASSERT(sizeof(ResourceId) == sizeof(uint64_t));
#endif
- // Type to uniquely identify a given locker object
- typedef uint64_t LockerId;
+// Type to uniquely identify a given locker object
+typedef uint64_t LockerId;
- // Hardcoded resource id for the oplog collection, which is special-cased both for resource
- // acquisition purposes and for statistics reporting.
- extern const ResourceId resourceIdLocalDB;
- extern const ResourceId resourceIdOplog;
+// Hardcoded resource id for the oplog collection, which is special-cased both for resource
+// acquisition purposes and for statistics reporting.
+extern const ResourceId resourceIdLocalDB;
+extern const ResourceId resourceIdOplog;
- // Hardcoded resource id for admin db. This is to ensure direct writes to auth collections
- // are serialized (see SERVER-16092)
- extern const ResourceId resourceIdAdminDB;
+// Hardcoded resource id for admin db. This is to ensure direct writes to auth collections
+// are serialized (see SERVER-16092)
+extern const ResourceId resourceIdAdminDB;
- // Hardcoded resource id for ParallelBatchWriterMode. We use the same resource type
- // as resourceIdGlobal. This will also ensure the waits are reported as global, which
- // is appropriate. The lock will never be contended unless the parallel batch writers
- // must stop all other accesses globally. This resource must be locked before all other
- // resources (including resourceIdGlobal). Replication applier threads don't take this
- // lock.
- // TODO: Merge this with resourceIdGlobal
- extern const ResourceId resourceIdParallelBatchWriterMode;
+// Hardcoded resource id for ParallelBatchWriterMode. We use the same resource type
+// as resourceIdGlobal. This will also ensure the waits are reported as global, which
+// is appropriate. The lock will never be contended unless the parallel batch writers
+// must stop all other accesses globally. This resource must be locked before all other
+// resources (including resourceIdGlobal). Replication applier threads don't take this
+// lock.
+// TODO: Merge this with resourceIdGlobal
+extern const ResourceId resourceIdParallelBatchWriterMode;
+
+/**
+ * Interface on which granted lock requests will be notified. See the contract for the notify
+ * method for more information and also the LockManager::lock call.
+ *
+ * The default implementation of this method would simply block on an event until notify has
+ * been invoked (see CondVarLockGrantNotification).
+ *
+ * Test implementations could just count the number of notifications and their outcome so that
+ * they can validate locks are granted as desired and drive the test execution.
+ */
+class LockGrantNotification {
+public:
+ virtual ~LockGrantNotification() {}
/**
- * Interface on which granted lock requests will be notified. See the contract for the notify
- * method for more information and also the LockManager::lock call.
+ * This method is invoked at most once for each lock request and indicates the outcome of
+ * the lock acquisition for the specified resource id.
+ *
+ * Cases where it won't be called are if a lock acquisition (be it in waiting or converting
+ * state) is cancelled through a call to unlock.
*
- * The default implementation of this method would simply block on an event until notify has
- * been invoked (see CondVarLockGrantNotification).
+ * IMPORTANT: This callback runs under a spinlock for the lock manager, so the work done
+ * inside must be kept to a minimum and no locks or operations which may block
+ * should be run. Also, no methods which call back into the lock manager should
+ * be invoked from within this methods (LockManager is not reentrant).
*
- * Test implementations could just count the number of notifications and their outcome so that
- * they can validate locks are granted as desired and drive the test execution.
+ * @resId ResourceId for which a lock operation was previously called.
+ * @result Outcome of the lock operation.
*/
- class LockGrantNotification {
- public:
- virtual ~LockGrantNotification() {}
-
- /**
- * This method is invoked at most once for each lock request and indicates the outcome of
- * the lock acquisition for the specified resource id.
- *
- * Cases where it won't be called are if a lock acquisition (be it in waiting or converting
- * state) is cancelled through a call to unlock.
- *
- * IMPORTANT: This callback runs under a spinlock for the lock manager, so the work done
- * inside must be kept to a minimum and no locks or operations which may block
- * should be run. Also, no methods which call back into the lock manager should
- * be invoked from within this methods (LockManager is not reentrant).
- *
- * @resId ResourceId for which a lock operation was previously called.
- * @result Outcome of the lock operation.
- */
- virtual void notify(ResourceId resId, LockResult result) = 0;
- };
+ virtual void notify(ResourceId resId, LockResult result) = 0;
+};
- /**
- * There is one of those entries per each request for a lock. They hang on a linked list off
- * the LockHead or off a PartitionedLockHead and also are in a map for each Locker. This
- * structure is not thread-safe.
- *
- * LockRequest are owned by the Locker class and it controls their lifetime. They should not
- * be deleted while on the LockManager though (see the contract for the lock/unlock methods).
- */
- struct LockRequest {
-
- enum Status {
- STATUS_NEW,
- STATUS_GRANTED,
- STATUS_WAITING,
- STATUS_CONVERTING,
-
- // Counts the rest. Always insert new status types above this entry.
- StatusCount
- };
-
- /**
- * Used for initialization of a LockRequest, which might have been retrieved from cache.
- */
- void initNew(Locker* locker, LockGrantNotification* notify);
-
-
- //
- // These fields are maintained by the Locker class
- //
-
- // This is the Locker, which created this LockRequest. Pointer is not owned, just
- // referenced. Must outlive the LockRequest.
- Locker* locker;
-
- // Not owned, just referenced. If a request is in the WAITING or CONVERTING state, must
- // live at least until LockManager::unlock is cancelled or the notification has been
- // invoked.
- LockGrantNotification* notify;
-
-
- //
- // These fields are maintained by both the LockManager and Locker class
- //
-
- // If the request cannot be granted right away, whether to put it at the front or at the
- // end of the queue. By default, requests are put at the back. If a request is requested
- // to be put at the front, this effectively bypasses fairness. Default is FALSE.
- bool enqueueAtFront;
-
- // When this request is granted and as long as it is on the granted queue, the particular
- // resource's policy will be changed to "compatibleFirst". This means that even if there
- // are pending requests on the conflict queue, if a compatible request comes in it will be
- // granted immediately. This effectively turns off fairness.
- bool compatibleFirst;
-
- // When set, an attempt is made to execute this request using partitioned lockheads.
- // This speeds up the common case where all requested locking modes are compatible with
- // each other, at the cost of extra overhead for conflicting modes.
- bool partitioned;
-
- // How many times has LockManager::lock been called for this request. Locks are released
- // when their recursive count drops to zero.
- unsigned recursiveCount;
-
- //
- // These fields are owned and maintained by the LockManager class exclusively
- //
-
-
- // Pointer to the lock to which this request belongs, or null if this request has not yet
- // been assigned to a lock or if it belongs to the PartitionedLockHead for locker. The
- // LockHead should be alive as long as there are LockRequests on it, so it is safe to have
- // this pointer hanging around.
- LockHead* lock;
-
- // Pointer to the partitioned lock to which this request belongs, or null if it is not
- // partitioned. Only one of 'lock' and 'partitionedLock' is non-NULL, and a request can
- // only transition from 'partitionedLock' to 'lock', never the other way around.
- PartitionedLockHead* partitionedLock;
-
- // The reason intrusive linked list is used instead of the std::list class is to allow
- // for entries to be removed from the middle of the list in O(1) time, if they are known
- // instead of having to search for them and we cannot persist iterators, because the list
- // can be modified while an iterator is held.
- LockRequest* prev;
- LockRequest* next;
-
- // Current status of this request.
- Status status;
-
- // If not granted, the mode which has been requested for this lock. If granted, the mode
- // in which it is currently granted.
- LockMode mode;
-
- // This value is different from MODE_NONE only if a conversion is requested for a lock and
- // that conversion cannot be immediately granted.
- LockMode convertMode;
+/**
+ * There is one of those entries per each request for a lock. They hang on a linked list off
+ * the LockHead or off a PartitionedLockHead and also are in a map for each Locker. This
+ * structure is not thread-safe.
+ *
+ * LockRequest are owned by the Locker class and it controls their lifetime. They should not
+ * be deleted while on the LockManager though (see the contract for the lock/unlock methods).
+ */
+struct LockRequest {
+ enum Status {
+ STATUS_NEW,
+ STATUS_GRANTED,
+ STATUS_WAITING,
+ STATUS_CONVERTING,
+
+ // Counts the rest. Always insert new status types above this entry.
+ StatusCount
};
/**
- * Returns a human readable status name for the specified LockRequest status.
+ * Used for initialization of a LockRequest, which might have been retrieved from cache.
*/
- const char* lockRequestStatusName(LockRequest::Status status);
+ void initNew(Locker* locker, LockGrantNotification* notify);
+
-} // namespace mongo
+ //
+ // These fields are maintained by the Locker class
+ //
+
+ // This is the Locker, which created this LockRequest. Pointer is not owned, just
+ // referenced. Must outlive the LockRequest.
+ Locker* locker;
+
+ // Not owned, just referenced. If a request is in the WAITING or CONVERTING state, must
+ // live at least until LockManager::unlock is cancelled or the notification has been
+ // invoked.
+ LockGrantNotification* notify;
+
+
+ //
+ // These fields are maintained by both the LockManager and Locker class
+ //
+
+ // If the request cannot be granted right away, whether to put it at the front or at the
+ // end of the queue. By default, requests are put at the back. If a request is requested
+ // to be put at the front, this effectively bypasses fairness. Default is FALSE.
+ bool enqueueAtFront;
+
+ // When this request is granted and as long as it is on the granted queue, the particular
+ // resource's policy will be changed to "compatibleFirst". This means that even if there
+ // are pending requests on the conflict queue, if a compatible request comes in it will be
+ // granted immediately. This effectively turns off fairness.
+ bool compatibleFirst;
+
+ // When set, an attempt is made to execute this request using partitioned lockheads.
+ // This speeds up the common case where all requested locking modes are compatible with
+ // each other, at the cost of extra overhead for conflicting modes.
+ bool partitioned;
+
+ // How many times has LockManager::lock been called for this request. Locks are released
+ // when their recursive count drops to zero.
+ unsigned recursiveCount;
+
+ //
+ // These fields are owned and maintained by the LockManager class exclusively
+ //
+
+
+ // Pointer to the lock to which this request belongs, or null if this request has not yet
+ // been assigned to a lock or if it belongs to the PartitionedLockHead for locker. The
+ // LockHead should be alive as long as there are LockRequests on it, so it is safe to have
+ // this pointer hanging around.
+ LockHead* lock;
+
+ // Pointer to the partitioned lock to which this request belongs, or null if it is not
+ // partitioned. Only one of 'lock' and 'partitionedLock' is non-NULL, and a request can
+ // only transition from 'partitionedLock' to 'lock', never the other way around.
+ PartitionedLockHead* partitionedLock;
+
+ // The reason intrusive linked list is used instead of the std::list class is to allow
+ // for entries to be removed from the middle of the list in O(1) time, if they are known
+ // instead of having to search for them and we cannot persist iterators, because the list
+ // can be modified while an iterator is held.
+ LockRequest* prev;
+ LockRequest* next;
+
+ // Current status of this request.
+ Status status;
+
+ // If not granted, the mode which has been requested for this lock. If granted, the mode
+ // in which it is currently granted.
+ LockMode mode;
+
+ // This value is different from MODE_NONE only if a conversion is requested for a lock and
+ // that conversion cannot be immediately granted.
+ LockMode convertMode;
+};
+
+/**
+ * Returns a human readable status name for the specified LockRequest status.
+ */
+const char* lockRequestStatusName(LockRequest::Status status);
+
+} // namespace mongo
MONGO_HASH_NAMESPACE_START
- template <> struct hash<mongo::ResourceId> {
- size_t operator()(const mongo::ResourceId& resource) const {
- return resource;
- }
- };
+template <>
+struct hash<mongo::ResourceId> {
+ size_t operator()(const mongo::ResourceId& resource) const {
+ return resource;
+ }
+};
MONGO_HASH_NAMESPACE_END
diff --git a/src/mongo/db/concurrency/lock_manager_test.cpp b/src/mongo/db/concurrency/lock_manager_test.cpp
index 50fc9826a9b..ce722b6f572 100644
--- a/src/mongo/db/concurrency/lock_manager_test.cpp
+++ b/src/mongo/db/concurrency/lock_manager_test.cpp
@@ -31,792 +31,789 @@
namespace mongo {
- TEST(ResourceId, Semantics) {
- ResourceId resIdDb(RESOURCE_DATABASE, 324334234);
- ASSERT(resIdDb.getType() == RESOURCE_DATABASE);
- ASSERT(resIdDb.getHashId() == 324334234);
-
- ResourceId resIdColl(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- ASSERT(resIdColl.getType() == RESOURCE_COLLECTION);
-
- // Comparison functions
-
- // Make sure the operator < is defined.
- ASSERT(resIdDb < resIdColl || resIdColl < resIdDb);
-
- ResourceId resId(RESOURCE_DATABASE, 324334234);
- ASSERT_EQUALS(resIdDb, resId);
-
- // Assignment functions
- resId = resIdColl;
- ASSERT_EQUALS(resId, resIdColl);
- }
-
- TEST(ResourceId, Constructors) {
- ResourceId resIdString(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- ResourceId resIdStringData(RESOURCE_COLLECTION, StringData("TestDB.collection"));
-
- ASSERT_EQUALS(resIdString, resIdStringData);
- }
-
- TEST(ResourceId, Masking) {
- const ResourceType maxRes = static_cast<ResourceType>(ResourceTypesCount - 1);
- const uint64_t maxHash = (1ULL<<61) - 1; // Only 61 bits usable for hash
- ResourceType resources[3] = { maxRes, RESOURCE_GLOBAL, RESOURCE_METADATA };
- uint64_t hashes[3] = {maxHash, maxHash / 3, maxHash / 3 * 2};
-
- // The test below verifies that types/hashes are stored/retrieved unchanged
- for (int h = 0; h < 3; h++) {
- for (int r = 0; r < 3; r++) {
- ResourceId id(resources[r], hashes[h]);
- ASSERT_EQUALS(id.getHashId(), hashes[h]);
- ASSERT_EQUALS(id.getType(), resources[r]);
- }
+TEST(ResourceId, Semantics) {
+ ResourceId resIdDb(RESOURCE_DATABASE, 324334234);
+ ASSERT(resIdDb.getType() == RESOURCE_DATABASE);
+ ASSERT(resIdDb.getHashId() == 324334234);
+
+ ResourceId resIdColl(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+ ASSERT(resIdColl.getType() == RESOURCE_COLLECTION);
+
+ // Comparison functions
+
+ // Make sure the operator < is defined.
+ ASSERT(resIdDb < resIdColl || resIdColl < resIdDb);
+
+ ResourceId resId(RESOURCE_DATABASE, 324334234);
+ ASSERT_EQUALS(resIdDb, resId);
+
+ // Assignment functions
+ resId = resIdColl;
+ ASSERT_EQUALS(resId, resIdColl);
+}
+
+TEST(ResourceId, Constructors) {
+ ResourceId resIdString(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+ ResourceId resIdStringData(RESOURCE_COLLECTION, StringData("TestDB.collection"));
+
+ ASSERT_EQUALS(resIdString, resIdStringData);
+}
+
+TEST(ResourceId, Masking) {
+ const ResourceType maxRes = static_cast<ResourceType>(ResourceTypesCount - 1);
+ const uint64_t maxHash = (1ULL << 61) - 1; // Only 61 bits usable for hash
+ ResourceType resources[3] = {maxRes, RESOURCE_GLOBAL, RESOURCE_METADATA};
+ uint64_t hashes[3] = {maxHash, maxHash / 3, maxHash / 3 * 2};
+
+ // The test below verifies that types/hashes are stored/retrieved unchanged
+ for (int h = 0; h < 3; h++) {
+ for (int r = 0; r < 3; r++) {
+ ResourceId id(resources[r], hashes[h]);
+ ASSERT_EQUALS(id.getHashId(), hashes[h]);
+ ASSERT_EQUALS(id.getType(), resources[r]);
}
}
+}
- //
- // LockManager
- //
+//
+// LockManager
+//
- TEST(LockManager, Grant) {
- LockManager lockMgr;
- const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+TEST(LockManager, Grant) {
+ LockManager lockMgr;
+ const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl locker;
- TrackingLockGrantNotification notify;
+ MMAPV1LockerImpl locker;
+ TrackingLockGrantNotification notify;
- LockRequest request;
- request.initNew(&locker, &notify);
+ LockRequest request;
+ request.initNew(&locker, &notify);
- ASSERT(LOCK_OK == lockMgr.lock(resId, &request, MODE_S));
- ASSERT(request.mode == MODE_S);
- ASSERT(request.recursiveCount == 1);
- ASSERT(notify.numNotifies == 0);
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &request, MODE_S));
+ ASSERT(request.mode == MODE_S);
+ ASSERT(request.recursiveCount == 1);
+ ASSERT(notify.numNotifies == 0);
- lockMgr.unlock(&request);
- ASSERT(request.recursiveCount == 0);
- }
+ lockMgr.unlock(&request);
+ ASSERT(request.recursiveCount == 0);
+}
- TEST(LockManager, GrantMultipleNoConflict) {
- LockManager lockMgr;
- const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+TEST(LockManager, GrantMultipleNoConflict) {
+ LockManager lockMgr;
+ const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl locker;
- TrackingLockGrantNotification notify;
+ MMAPV1LockerImpl locker;
+ TrackingLockGrantNotification notify;
- LockRequest request[6];
- for (int i = 0; i < 6; i++) {
- request[i].initNew(&locker, &notify);
- ASSERT(LOCK_OK == lockMgr.lock(resId, &request[i], MODE_S));
+ LockRequest request[6];
+ for (int i = 0; i < 6; i++) {
+ request[i].initNew(&locker, &notify);
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &request[i], MODE_S));
- ASSERT(request[i].mode == MODE_S);
- ASSERT(request[i].recursiveCount == 1);
- }
-
- ASSERT(notify.numNotifies == 0);
-
- // Free the first
- lockMgr.unlock(&request[0]);
-
- // Free the last
- lockMgr.unlock(&request[5]);
-
- // Free one in the middle
- lockMgr.unlock(&request[3]);
-
- // Free the remaining so the LockMgr does not compain about leaked locks
- lockMgr.unlock(&request[1]);
- lockMgr.unlock(&request[2]);
- lockMgr.unlock(&request[4]);
+ ASSERT(request[i].mode == MODE_S);
+ ASSERT(request[i].recursiveCount == 1);
}
- TEST(LockManager, GrantMultipleFIFOOrder) {
- LockManager lockMgr;
- const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+ ASSERT(notify.numNotifies == 0);
- std::unique_ptr<MMAPV1LockerImpl> locker[6];
- for (int i = 0; i < 6; i++) {
- locker[i].reset(new MMAPV1LockerImpl());
- }
+ // Free the first
+ lockMgr.unlock(&request[0]);
- TrackingLockGrantNotification notify[6];
+ // Free the last
+ lockMgr.unlock(&request[5]);
- LockRequest request[6];
- for (int i = 0; i < 6; i++) {
- request[i].initNew(locker[i].get(), &notify[i]);
- lockMgr.lock(resId, &request[i], MODE_X);
+ // Free one in the middle
+ lockMgr.unlock(&request[3]);
- ASSERT(request[i].mode == MODE_X);
- ASSERT(request[i].recursiveCount == 1);
- }
+ // Free the remaining so the LockMgr does not compain about leaked locks
+ lockMgr.unlock(&request[1]);
+ lockMgr.unlock(&request[2]);
+ lockMgr.unlock(&request[4]);
+}
- // Release the last held lock and ensure the next one, based on time is granted
- for (int i = 0; i < 5; i++) {
- lockMgr.unlock(&request[i]);
-
- ASSERT(notify[i + 1].numNotifies == 1);
- ASSERT(notify[i + 1].lastResId == resId);
- ASSERT(notify[i + 1].lastResult == LOCK_OK);
- }
+TEST(LockManager, GrantMultipleFIFOOrder) {
+ LockManager lockMgr;
+ const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- // Release the last one
- lockMgr.unlock(&request[5]);
+ std::unique_ptr<MMAPV1LockerImpl> locker[6];
+ for (int i = 0; i < 6; i++) {
+ locker[i].reset(new MMAPV1LockerImpl());
}
- TEST(LockManager, GrantRecursive) {
- LockManager lockMgr;
- const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+ TrackingLockGrantNotification notify[6];
- MMAPV1LockerImpl locker;
- LockRequestCombo request(&locker);
+ LockRequest request[6];
+ for (int i = 0; i < 6; i++) {
+ request[i].initNew(locker[i].get(), &notify[i]);
+ lockMgr.lock(resId, &request[i], MODE_X);
- ASSERT(LOCK_OK == lockMgr.lock(resId, &request, MODE_S));
- ASSERT(request.mode == MODE_S);
- ASSERT(request.recursiveCount == 1);
- ASSERT(request.numNotifies == 0);
-
- // Acquire again, in the same mode
- ASSERT(LOCK_OK == lockMgr.convert(resId, &request, MODE_S));
- ASSERT(request.mode == MODE_S);
- ASSERT(request.recursiveCount == 2);
- ASSERT(request.numNotifies == 0);
-
- // Release first acquire
- lockMgr.unlock(&request);
- ASSERT(request.mode == MODE_S);
- ASSERT(request.recursiveCount == 1);
-
- // Release second acquire
- lockMgr.unlock(&request);
- ASSERT(request.recursiveCount == 0);
+ ASSERT(request[i].mode == MODE_X);
+ ASSERT(request[i].recursiveCount == 1);
}
- TEST(LockManager, GrantRecursiveCompatibleConvertUp) {
- LockManager lockMgr;
- const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
-
- MMAPV1LockerImpl locker;
- LockRequestCombo request(&locker);
-
- ASSERT(LOCK_OK == lockMgr.lock(resId, &request, MODE_IS));
- ASSERT(request.mode == MODE_IS);
- ASSERT(request.recursiveCount == 1);
- ASSERT(request.numNotifies == 0);
+ // Release the last held lock and ensure the next one, based on time is granted
+ for (int i = 0; i < 5; i++) {
+ lockMgr.unlock(&request[i]);
- // Acquire again, in *compatible*, but stricter mode
- ASSERT(LOCK_OK == lockMgr.convert(resId, &request, MODE_S));
- ASSERT(request.mode == MODE_S);
- ASSERT(request.recursiveCount == 2);
- ASSERT(request.numNotifies == 0);
-
- // Release the first acquire
- lockMgr.unlock(&request);
- ASSERT(request.mode == MODE_S);
- ASSERT(request.recursiveCount == 1);
-
- // Release the second acquire
- lockMgr.unlock(&request);
- ASSERT(request.recursiveCount == 0);
+ ASSERT(notify[i + 1].numNotifies == 1);
+ ASSERT(notify[i + 1].lastResId == resId);
+ ASSERT(notify[i + 1].lastResult == LOCK_OK);
}
- TEST(LockManager, GrantRecursiveNonCompatibleConvertUp) {
- LockManager lockMgr;
- const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
-
- MMAPV1LockerImpl locker;
- LockRequestCombo request(&locker);
-
- ASSERT(LOCK_OK == lockMgr.lock(resId, &request, MODE_S));
- ASSERT(request.mode == MODE_S);
- ASSERT(request.recursiveCount == 1);
- ASSERT(request.numNotifies == 0);
-
- // Acquire again, in *non-compatible*, but stricter mode
- ASSERT(LOCK_OK == lockMgr.convert(resId, &request, MODE_X));
- ASSERT(request.mode == MODE_X);
- ASSERT(request.recursiveCount == 2);
- ASSERT(request.numNotifies == 0);
-
- // Release first acquire
- lockMgr.unlock(&request);
- ASSERT(request.mode == MODE_X);
- ASSERT(request.recursiveCount == 1);
+ // Release the last one
+ lockMgr.unlock(&request[5]);
+}
+
+TEST(LockManager, GrantRecursive) {
+ LockManager lockMgr;
+ const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+
+ MMAPV1LockerImpl locker;
+ LockRequestCombo request(&locker);
+
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &request, MODE_S));
+ ASSERT(request.mode == MODE_S);
+ ASSERT(request.recursiveCount == 1);
+ ASSERT(request.numNotifies == 0);
+
+ // Acquire again, in the same mode
+ ASSERT(LOCK_OK == lockMgr.convert(resId, &request, MODE_S));
+ ASSERT(request.mode == MODE_S);
+ ASSERT(request.recursiveCount == 2);
+ ASSERT(request.numNotifies == 0);
+
+ // Release first acquire
+ lockMgr.unlock(&request);
+ ASSERT(request.mode == MODE_S);
+ ASSERT(request.recursiveCount == 1);
+
+ // Release second acquire
+ lockMgr.unlock(&request);
+ ASSERT(request.recursiveCount == 0);
+}
+
+TEST(LockManager, GrantRecursiveCompatibleConvertUp) {
+ LockManager lockMgr;
+ const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+
+ MMAPV1LockerImpl locker;
+ LockRequestCombo request(&locker);
+
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &request, MODE_IS));
+ ASSERT(request.mode == MODE_IS);
+ ASSERT(request.recursiveCount == 1);
+ ASSERT(request.numNotifies == 0);
+
+ // Acquire again, in *compatible*, but stricter mode
+ ASSERT(LOCK_OK == lockMgr.convert(resId, &request, MODE_S));
+ ASSERT(request.mode == MODE_S);
+ ASSERT(request.recursiveCount == 2);
+ ASSERT(request.numNotifies == 0);
+
+ // Release the first acquire
+ lockMgr.unlock(&request);
+ ASSERT(request.mode == MODE_S);
+ ASSERT(request.recursiveCount == 1);
+
+ // Release the second acquire
+ lockMgr.unlock(&request);
+ ASSERT(request.recursiveCount == 0);
+}
+
+TEST(LockManager, GrantRecursiveNonCompatibleConvertUp) {
+ LockManager lockMgr;
+ const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+
+ MMAPV1LockerImpl locker;
+ LockRequestCombo request(&locker);
+
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &request, MODE_S));
+ ASSERT(request.mode == MODE_S);
+ ASSERT(request.recursiveCount == 1);
+ ASSERT(request.numNotifies == 0);
+
+ // Acquire again, in *non-compatible*, but stricter mode
+ ASSERT(LOCK_OK == lockMgr.convert(resId, &request, MODE_X));
+ ASSERT(request.mode == MODE_X);
+ ASSERT(request.recursiveCount == 2);
+ ASSERT(request.numNotifies == 0);
+
+ // Release first acquire
+ lockMgr.unlock(&request);
+ ASSERT(request.mode == MODE_X);
+ ASSERT(request.recursiveCount == 1);
+
+ // Release second acquire
+ lockMgr.unlock(&request);
+ ASSERT(request.recursiveCount == 0);
+}
+
+TEST(LockManager, GrantRecursiveNonCompatibleConvertDown) {
+ LockManager lockMgr;
+ const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+
+ MMAPV1LockerImpl locker;
+ LockRequestCombo request(&locker);
+
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &request, MODE_X));
+ ASSERT(request.mode == MODE_X);
+ ASSERT(request.recursiveCount == 1);
+ ASSERT(request.numNotifies == 0);
+
+ // Acquire again, in *non-compatible*, but less strict mode
+ ASSERT(LOCK_OK == lockMgr.convert(resId, &request, MODE_S));
+ ASSERT(request.mode == MODE_X);
+ ASSERT(request.recursiveCount == 2);
+ ASSERT(request.numNotifies == 0);
+
+ // Release first acquire
+ lockMgr.unlock(&request);
+ ASSERT(request.mode == MODE_X);
+ ASSERT(request.recursiveCount == 1);
+
+ // Release second acquire
+ lockMgr.unlock(&request);
+ ASSERT(request.recursiveCount == 0);
+}
+
+TEST(LockManager, Conflict) {
+ LockManager lockMgr;
+ const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+
+ MMAPV1LockerImpl locker1;
+ MMAPV1LockerImpl locker2;
+
+ LockRequestCombo request1(&locker1);
+ LockRequestCombo request2(&locker2);
+
+ // First request granted right away
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &request1, MODE_S));
+ ASSERT(request1.recursiveCount == 1);
+ ASSERT(request1.numNotifies == 0);
+
+ // Second request must block
+ ASSERT(LOCK_WAITING == lockMgr.lock(resId, &request2, MODE_X));
+ ASSERT(request2.mode == MODE_X);
+ ASSERT(request2.recursiveCount == 1);
+ ASSERT(request2.numNotifies == 0);
+
+ // Release first request
+ lockMgr.unlock(&request1);
+ ASSERT(request1.recursiveCount == 0);
+ ASSERT(request1.numNotifies == 0);
+
+ ASSERT(request2.mode == MODE_X);
+ ASSERT(request2.recursiveCount == 1);
+ ASSERT(request2.numNotifies == 1);
+ ASSERT(request2.lastResult == LOCK_OK);
+
+ // Release second acquire
+ lockMgr.unlock(&request2);
+ ASSERT(request2.recursiveCount == 0);
+
+ ASSERT(request1.numNotifies == 0);
+ ASSERT(request2.numNotifies == 1);
+}
+
+TEST(LockManager, MultipleConflict) {
+ LockManager lockMgr;
+ const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+
+ MMAPV1LockerImpl locker;
+ TrackingLockGrantNotification notify;
+
+ LockRequest request[6];
+ for (int i = 0; i < 6; i++) {
+ request[i].initNew(&locker, &notify);
+
+ if (i == 0) {
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &request[i], MODE_X));
+ } else {
+ ASSERT(LOCK_WAITING == lockMgr.lock(resId, &request[i], MODE_X));
+ }
- // Release second acquire
- lockMgr.unlock(&request);
- ASSERT(request.recursiveCount == 0);
+ ASSERT(request[i].mode == MODE_X);
+ ASSERT(request[i].recursiveCount == 1);
}
- TEST(LockManager, GrantRecursiveNonCompatibleConvertDown) {
- LockManager lockMgr;
- const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
-
- MMAPV1LockerImpl locker;
- LockRequestCombo request(&locker);
-
- ASSERT(LOCK_OK == lockMgr.lock(resId, &request, MODE_X));
- ASSERT(request.mode == MODE_X);
- ASSERT(request.recursiveCount == 1);
- ASSERT(request.numNotifies == 0);
-
- // Acquire again, in *non-compatible*, but less strict mode
- ASSERT(LOCK_OK == lockMgr.convert(resId, &request, MODE_S));
- ASSERT(request.mode == MODE_X);
- ASSERT(request.recursiveCount == 2);
- ASSERT(request.numNotifies == 0);
+ ASSERT(notify.numNotifies == 0);
- // Release first acquire
- lockMgr.unlock(&request);
- ASSERT(request.mode == MODE_X);
- ASSERT(request.recursiveCount == 1);
+ // Free them one by one and make sure they get granted in the correct order
+ for (int i = 0; i < 6; i++) {
+ lockMgr.unlock(&request[i]);
- // Release second acquire
- lockMgr.unlock(&request);
- ASSERT(request.recursiveCount == 0);
+ if (i < 5) {
+ ASSERT(notify.numNotifies == i + 1);
+ }
}
+}
- TEST(LockManager, Conflict) {
- LockManager lockMgr;
- const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
-
- MMAPV1LockerImpl locker1;
- MMAPV1LockerImpl locker2;
-
- LockRequestCombo request1(&locker1);
- LockRequestCombo request2(&locker2);
+TEST(LockManager, ConflictCancelWaiting) {
+ LockManager lockMgr;
+ const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- // First request granted right away
- ASSERT(LOCK_OK == lockMgr.lock(resId, &request1, MODE_S));
- ASSERT(request1.recursiveCount == 1);
- ASSERT(request1.numNotifies == 0);
+ MMAPV1LockerImpl locker1;
+ TrackingLockGrantNotification notify1;
- // Second request must block
- ASSERT(LOCK_WAITING == lockMgr.lock(resId, &request2, MODE_X));
- ASSERT(request2.mode == MODE_X);
- ASSERT(request2.recursiveCount == 1);
- ASSERT(request2.numNotifies == 0);
+ MMAPV1LockerImpl locker2;
+ TrackingLockGrantNotification notify2;
- // Release first request
- lockMgr.unlock(&request1);
- ASSERT(request1.recursiveCount == 0);
- ASSERT(request1.numNotifies == 0);
+ LockRequest request1;
+ request1.initNew(&locker1, &notify1);
- ASSERT(request2.mode == MODE_X);
- ASSERT(request2.recursiveCount == 1);
- ASSERT(request2.numNotifies == 1);
- ASSERT(request2.lastResult == LOCK_OK);
+ LockRequest request2;
+ request2.initNew(&locker2, &notify2);
- // Release second acquire
- lockMgr.unlock(&request2);
- ASSERT(request2.recursiveCount == 0);
-
- ASSERT(request1.numNotifies == 0);
- ASSERT(request2.numNotifies == 1);
- }
+ // First request granted right away
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &request1, MODE_S));
+ ASSERT(notify1.numNotifies == 0);
- TEST(LockManager, MultipleConflict) {
- LockManager lockMgr;
- const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+ ASSERT(LOCK_WAITING == lockMgr.lock(resId, &request2, MODE_X));
- MMAPV1LockerImpl locker;
- TrackingLockGrantNotification notify;
+ // Release second request (which is still in the WAITING mode)
+ lockMgr.unlock(&request2);
+ ASSERT(notify2.numNotifies == 0);
- LockRequest request[6];
- for (int i = 0; i < 6; i++) {
- request[i].initNew(&locker, &notify);
+ ASSERT(request1.mode == MODE_S);
+ ASSERT(request1.recursiveCount == 1);
- if (i == 0) {
- ASSERT(LOCK_OK == lockMgr.lock(resId, &request[i], MODE_X));
- }
- else {
- ASSERT(LOCK_WAITING == lockMgr.lock(resId, &request[i], MODE_X));
- }
+ // Release second acquire
+ lockMgr.unlock(&request1);
+}
- ASSERT(request[i].mode == MODE_X);
- ASSERT(request[i].recursiveCount == 1);
- }
+TEST(LockManager, ConflictCancelMultipleWaiting) {
+ LockManager lockMgr;
+ const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- ASSERT(notify.numNotifies == 0);
+ MMAPV1LockerImpl locker;
+ TrackingLockGrantNotification notify;
- // Free them one by one and make sure they get granted in the correct order
- for (int i = 0; i < 6; i++) {
- lockMgr.unlock(&request[i]);
+ LockRequest request[6];
+ for (int i = 0; i < 6; i++) {
+ request[i].initNew(&locker, &notify);
+ lockMgr.lock(resId, &request[i], MODE_X);
- if (i < 5) {
- ASSERT(notify.numNotifies == i + 1);
- }
- }
+ ASSERT(request[i].mode == MODE_X);
+ ASSERT(request[i].recursiveCount == 1);
}
- TEST(LockManager, ConflictCancelWaiting) {
- LockManager lockMgr;
- const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
-
- MMAPV1LockerImpl locker1;
- TrackingLockGrantNotification notify1;
+ ASSERT(notify.numNotifies == 0);
- MMAPV1LockerImpl locker2;
- TrackingLockGrantNotification notify2;
+ // Free the second (waiting)
+ lockMgr.unlock(&request[1]);
- LockRequest request1;
- request1.initNew(&locker1, &notify1);
+ // Free the last
+ lockMgr.unlock(&request[5]);
- LockRequest request2;
- request2.initNew(&locker2, &notify2);
+ // Free one in the middle
+ lockMgr.unlock(&request[3]);
- // First request granted right away
- ASSERT(LOCK_OK == lockMgr.lock(resId, &request1, MODE_S));
- ASSERT(notify1.numNotifies == 0);
+ // Free the remaining so the LockMgr does not compain about leaked locks
+ lockMgr.unlock(&request[2]);
+ lockMgr.unlock(&request[4]);
+ lockMgr.unlock(&request[0]);
+}
- ASSERT(LOCK_WAITING == lockMgr.lock(resId, &request2, MODE_X));
+TEST(LockManager, ConflictCancelWaitingConversion) {
+ LockManager lockMgr;
+ const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- // Release second request (which is still in the WAITING mode)
- lockMgr.unlock(&request2);
- ASSERT(notify2.numNotifies == 0);
+ MMAPV1LockerImpl locker1;
+ MMAPV1LockerImpl locker2;
- ASSERT(request1.mode == MODE_S);
- ASSERT(request1.recursiveCount == 1);
+ LockRequestCombo request1(&locker1);
+ LockRequestCombo request2(&locker2);
- // Release second acquire
- lockMgr.unlock(&request1);
- }
+ // First request granted right away
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &request1, MODE_S));
+ ASSERT(request1.numNotifies == 0);
- TEST(LockManager, ConflictCancelMultipleWaiting) {
- LockManager lockMgr;
- const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+ // Second request is granted right away
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &request2, MODE_S));
+ ASSERT(request2.numNotifies == 0);
- MMAPV1LockerImpl locker;
- TrackingLockGrantNotification notify;
+ // Convert second request to conflicting
+ ASSERT(LOCK_WAITING == lockMgr.convert(resId, &request2, MODE_X));
+ ASSERT(request2.mode == MODE_S);
+ ASSERT(request2.convertMode == MODE_X);
+ ASSERT(request2.numNotifies == 0);
- LockRequest request[6];
- for (int i = 0; i < 6; i++) {
- request[i].initNew(&locker, &notify);
- lockMgr.lock(resId, &request[i], MODE_X);
+ // Cancel the conflicting upgrade
+ lockMgr.unlock(&request2);
+ ASSERT(request2.mode == MODE_S);
+ ASSERT(request2.convertMode == MODE_NONE);
+ ASSERT(request2.numNotifies == 0);
- ASSERT(request[i].mode == MODE_X);
- ASSERT(request[i].recursiveCount == 1);
- }
+ // Free the remaining locks so the LockManager destructor does not complain
+ lockMgr.unlock(&request1);
+ lockMgr.unlock(&request2);
+}
- ASSERT(notify.numNotifies == 0);
+TEST(LockManager, ConflictingConversion) {
+ LockManager lockMgr;
+ const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- // Free the second (waiting)
- lockMgr.unlock(&request[1]);
+ MMAPV1LockerImpl locker1;
+ MMAPV1LockerImpl locker2;
- // Free the last
- lockMgr.unlock(&request[5]);
+ LockRequestCombo request1(&locker1);
+ LockRequestCombo request2(&locker2);
- // Free one in the middle
- lockMgr.unlock(&request[3]);
+ // The S requests are granted right away
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &request1, MODE_S));
+ ASSERT(request1.numNotifies == 0);
- // Free the remaining so the LockMgr does not compain about leaked locks
- lockMgr.unlock(&request[2]);
- lockMgr.unlock(&request[4]);
- lockMgr.unlock(&request[0]);
- }
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &request2, MODE_S));
+ ASSERT(request2.numNotifies == 0);
- TEST(LockManager, ConflictCancelWaitingConversion) {
- LockManager lockMgr;
- const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+ // Convert first request to conflicting
+ ASSERT(LOCK_WAITING == lockMgr.convert(resId, &request1, MODE_X));
+ ASSERT(request1.numNotifies == 0);
- MMAPV1LockerImpl locker1;
- MMAPV1LockerImpl locker2;
+ // Free the second lock and make sure the first is granted
+ lockMgr.unlock(&request2);
+ ASSERT(request1.mode == MODE_X);
+ ASSERT(request1.numNotifies == 1);
+ ASSERT(request2.numNotifies == 0);
- LockRequestCombo request1(&locker1);
- LockRequestCombo request2(&locker2);
+ // Frees the first reference, mode remains X
+ lockMgr.unlock(&request1);
+ ASSERT(request1.mode == MODE_X);
+ ASSERT(request1.recursiveCount == 1);
- // First request granted right away
- ASSERT(LOCK_OK == lockMgr.lock(resId, &request1, MODE_S));
- ASSERT(request1.numNotifies == 0);
+ lockMgr.unlock(&request1);
+}
- // Second request is granted right away
- ASSERT(LOCK_OK == lockMgr.lock(resId, &request2, MODE_S));
- ASSERT(request2.numNotifies == 0);
+TEST(LockManager, ConflictingConversionInTheMiddle) {
+ LockManager lockMgr;
+ const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- // Convert second request to conflicting
- ASSERT(LOCK_WAITING == lockMgr.convert(resId, &request2, MODE_X));
- ASSERT(request2.mode == MODE_S);
- ASSERT(request2.convertMode == MODE_X);
- ASSERT(request2.numNotifies == 0);
+ MMAPV1LockerImpl locker;
+ TrackingLockGrantNotification notify;
- // Cancel the conflicting upgrade
- lockMgr.unlock(&request2);
- ASSERT(request2.mode == MODE_S);
- ASSERT(request2.convertMode == MODE_NONE);
- ASSERT(request2.numNotifies == 0);
-
- // Free the remaining locks so the LockManager destructor does not complain
- lockMgr.unlock(&request1);
- lockMgr.unlock(&request2);
+ LockRequest request[3];
+ for (int i = 0; i < 3; i++) {
+ request[i].initNew(&locker, &notify);
+ lockMgr.lock(resId, &request[i], MODE_S);
}
- TEST(LockManager, ConflictingConversion) {
- LockManager lockMgr;
- const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+ // Upgrade the one in the middle (not the first one)
+ ASSERT(LOCK_WAITING == lockMgr.convert(resId, &request[1], MODE_X));
- MMAPV1LockerImpl locker1;
- MMAPV1LockerImpl locker2;
+ ASSERT(notify.numNotifies == 0);
- LockRequestCombo request1(&locker1);
- LockRequestCombo request2(&locker2);
+ // Release the two shared modes
+ lockMgr.unlock(&request[0]);
+ ASSERT(notify.numNotifies == 0);
- // The S requests are granted right away
- ASSERT(LOCK_OK == lockMgr.lock(resId, &request1, MODE_S));
- ASSERT(request1.numNotifies == 0);
+ lockMgr.unlock(&request[2]);
+ ASSERT(notify.numNotifies == 1);
- ASSERT(LOCK_OK == lockMgr.lock(resId, &request2, MODE_S));
- ASSERT(request2.numNotifies == 0);
+ ASSERT(request[1].mode == MODE_X);
- // Convert first request to conflicting
- ASSERT(LOCK_WAITING == lockMgr.convert(resId, &request1, MODE_X));
- ASSERT(request1.numNotifies == 0);
+ // Request 1 should be unlocked twice
+ lockMgr.unlock(&request[1]);
+ lockMgr.unlock(&request[1]);
+}
- // Free the second lock and make sure the first is granted
- lockMgr.unlock(&request2);
- ASSERT(request1.mode == MODE_X);
- ASSERT(request1.numNotifies == 1);
- ASSERT(request2.numNotifies == 0);
+TEST(LockManager, ConvertUpgrade) {
+ LockManager lockMgr;
+ const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- // Frees the first reference, mode remains X
- lockMgr.unlock(&request1);
- ASSERT(request1.mode == MODE_X);
- ASSERT(request1.recursiveCount == 1);
+ MMAPV1LockerImpl locker1;
+ LockRequestCombo request1(&locker1);
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &request1, MODE_S));
- lockMgr.unlock(&request1);
- }
+ MMAPV1LockerImpl locker2;
+ LockRequestCombo request2(&locker2);
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &request2, MODE_S));
- TEST(LockManager, ConflictingConversionInTheMiddle) {
- LockManager lockMgr;
- const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+ // Upgrade the S lock to X
+ ASSERT(LOCK_WAITING == lockMgr.convert(resId, &request1, MODE_X));
- MMAPV1LockerImpl locker;
- TrackingLockGrantNotification notify;
+ ASSERT(!lockMgr.unlock(&request1));
+ ASSERT(lockMgr.unlock(&request1));
- LockRequest request[3];
- for (int i = 0; i < 3; i++) {
- request[i].initNew(&locker, &notify);
- lockMgr.lock(resId, &request[i], MODE_S);
- }
+ ASSERT(lockMgr.unlock(&request2));
+}
- // Upgrade the one in the middle (not the first one)
- ASSERT(LOCK_WAITING == lockMgr.convert(resId, &request[1], MODE_X));
+TEST(LockManager, Downgrade) {
+ LockManager lockMgr;
+ const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- ASSERT(notify.numNotifies == 0);
+ MMAPV1LockerImpl locker1;
+ LockRequestCombo request1(&locker1);
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &request1, MODE_X));
- // Release the two shared modes
- lockMgr.unlock(&request[0]);
- ASSERT(notify.numNotifies == 0);
+ MMAPV1LockerImpl locker2;
+ LockRequestCombo request2(&locker2);
+ ASSERT(LOCK_WAITING == lockMgr.lock(resId, &request2, MODE_S));
- lockMgr.unlock(&request[2]);
- ASSERT(notify.numNotifies == 1);
+ // Downgrade the X request to S
+ lockMgr.downgrade(&request1, MODE_S);
- ASSERT(request[1].mode == MODE_X);
+ ASSERT(request2.numNotifies == 1);
+ ASSERT(request2.lastResult == LOCK_OK);
+ ASSERT(request2.recursiveCount == 1);
- // Request 1 should be unlocked twice
- lockMgr.unlock(&request[1]);
- lockMgr.unlock(&request[1]);
- }
+ ASSERT(lockMgr.unlock(&request1));
+ ASSERT(lockMgr.unlock(&request2));
+}
- TEST(LockManager, ConvertUpgrade) {
- LockManager lockMgr;
- const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl locker1;
- LockRequestCombo request1(&locker1);
- ASSERT(LOCK_OK == lockMgr.lock(resId, &request1, MODE_S));
+// Lock conflict matrix tests
+static void checkConflict(LockMode existingMode, LockMode newMode, bool hasConflict) {
+ LockManager lockMgr;
+ const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl locker2;
- LockRequestCombo request2(&locker2);
- ASSERT(LOCK_OK == lockMgr.lock(resId, &request2, MODE_S));
+ MMAPV1LockerImpl lockerExisting;
+ TrackingLockGrantNotification notifyExisting;
+ LockRequest requestExisting;
+ requestExisting.initNew(&lockerExisting, &notifyExisting);
- // Upgrade the S lock to X
- ASSERT(LOCK_WAITING == lockMgr.convert(resId, &request1, MODE_X));
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &requestExisting, existingMode));
- ASSERT(!lockMgr.unlock(&request1));
- ASSERT(lockMgr.unlock(&request1));
+ MMAPV1LockerImpl lockerNew;
+ TrackingLockGrantNotification notifyNew;
+ LockRequest requestNew;
+ requestNew.initNew(&lockerNew, &notifyNew);
- ASSERT(lockMgr.unlock(&request2));
+ LockResult result = lockMgr.lock(resId, &requestNew, newMode);
+ if (hasConflict) {
+ ASSERT_EQUALS(LOCK_WAITING, result);
+ } else {
+ ASSERT_EQUALS(LOCK_OK, result);
}
- TEST(LockManager, Downgrade) {
- LockManager lockMgr;
- const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+ lockMgr.unlock(&requestNew);
+ lockMgr.unlock(&requestExisting);
+}
- MMAPV1LockerImpl locker1;
- LockRequestCombo request1(&locker1);
- ASSERT(LOCK_OK == lockMgr.lock(resId, &request1, MODE_X));
+TEST(LockManager, ValidateConflictMatrix) {
+ checkConflict(MODE_IS, MODE_IS, false);
+ checkConflict(MODE_IS, MODE_IX, false);
+ checkConflict(MODE_IS, MODE_S, false);
+ checkConflict(MODE_IS, MODE_X, true);
- MMAPV1LockerImpl locker2;
- LockRequestCombo request2(&locker2);
- ASSERT(LOCK_WAITING == lockMgr.lock(resId, &request2, MODE_S));
+ checkConflict(MODE_IX, MODE_IS, false);
+ checkConflict(MODE_IX, MODE_IX, false);
+ checkConflict(MODE_IX, MODE_S, true);
+ checkConflict(MODE_IX, MODE_X, true);
- // Downgrade the X request to S
- lockMgr.downgrade(&request1, MODE_S);
+ checkConflict(MODE_S, MODE_IS, false);
+ checkConflict(MODE_S, MODE_IX, true);
+ checkConflict(MODE_S, MODE_S, false);
+ checkConflict(MODE_S, MODE_X, true);
- ASSERT(request2.numNotifies == 1);
- ASSERT(request2.lastResult == LOCK_OK);
- ASSERT(request2.recursiveCount == 1);
+ checkConflict(MODE_X, MODE_IS, true);
+ checkConflict(MODE_X, MODE_IX, true);
+ checkConflict(MODE_X, MODE_S, true);
+ checkConflict(MODE_X, MODE_X, true);
+}
- ASSERT(lockMgr.unlock(&request1));
- ASSERT(lockMgr.unlock(&request2));
- }
+TEST(LockManager, EnqueueAtFront) {
+ LockManager lockMgr;
+ const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+ MMAPV1LockerImpl lockerX;
+ LockRequestCombo requestX(&lockerX);
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &requestX, MODE_X));
- // Lock conflict matrix tests
- static void checkConflict(LockMode existingMode, LockMode newMode, bool hasConflict) {
- LockManager lockMgr;
- const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+ // The subsequent request will block
+ MMAPV1LockerImpl lockerLow;
+ LockRequestCombo requestLow(&lockerLow);
- MMAPV1LockerImpl lockerExisting;
- TrackingLockGrantNotification notifyExisting;
- LockRequest requestExisting;
- requestExisting.initNew(&lockerExisting, &notifyExisting);
+ ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestLow, MODE_X));
- ASSERT(LOCK_OK == lockMgr.lock(resId, &requestExisting, existingMode));
+ // This is a "queue jumping request", which will go before locker 2 above
+ MMAPV1LockerImpl lockerHi;
+ LockRequestCombo requestHi(&lockerHi);
+ requestHi.enqueueAtFront = true;
- MMAPV1LockerImpl lockerNew;
- TrackingLockGrantNotification notifyNew;
- LockRequest requestNew;
- requestNew.initNew(&lockerNew, &notifyNew);
+ ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestHi, MODE_X));
- LockResult result = lockMgr.lock(resId, &requestNew, newMode);
- if (hasConflict) {
- ASSERT_EQUALS(LOCK_WAITING, result);
- }
- else {
- ASSERT_EQUALS(LOCK_OK, result);
- }
-
- lockMgr.unlock(&requestNew);
- lockMgr.unlock(&requestExisting);
- }
-
- TEST(LockManager, ValidateConflictMatrix) {
- checkConflict(MODE_IS, MODE_IS, false);
- checkConflict(MODE_IS, MODE_IX, false);
- checkConflict(MODE_IS, MODE_S, false);
- checkConflict(MODE_IS, MODE_X, true);
-
- checkConflict(MODE_IX, MODE_IS, false);
- checkConflict(MODE_IX, MODE_IX, false);
- checkConflict(MODE_IX, MODE_S, true);
- checkConflict(MODE_IX, MODE_X, true);
-
- checkConflict(MODE_S, MODE_IS, false);
- checkConflict(MODE_S, MODE_IX, true);
- checkConflict(MODE_S, MODE_S, false);
- checkConflict(MODE_S, MODE_X, true);
-
- checkConflict(MODE_X, MODE_IS, true);
- checkConflict(MODE_X, MODE_IX, true);
- checkConflict(MODE_X, MODE_S, true);
- checkConflict(MODE_X, MODE_X, true);
- }
+ // Once the X request is gone, lockerHi should be granted, because it's queue jumping
+ ASSERT(lockMgr.unlock(&requestX));
- TEST(LockManager, EnqueueAtFront) {
- LockManager lockMgr;
- const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+ ASSERT(requestHi.lastResId == resId);
+ ASSERT(requestHi.lastResult == LOCK_OK);
- MMAPV1LockerImpl lockerX;
- LockRequestCombo requestX(&lockerX);
+ // Finally lockerLow should be granted
+ ASSERT(lockMgr.unlock(&requestHi));
- ASSERT(LOCK_OK == lockMgr.lock(resId, &requestX, MODE_X));
+ ASSERT(requestLow.lastResId == resId);
+ ASSERT(requestLow.lastResult == LOCK_OK);
- // The subsequent request will block
- MMAPV1LockerImpl lockerLow;
- LockRequestCombo requestLow(&lockerLow);
+ // This avoids the lock manager asserting on leaked locks
+ ASSERT(lockMgr.unlock(&requestLow));
+}
- ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestLow, MODE_X));
+TEST(LockManager, CompatibleFirstImmediateGrant) {
+ LockManager lockMgr;
+ const ResourceId resId(RESOURCE_GLOBAL, 0);
- // This is a "queue jumping request", which will go before locker 2 above
- MMAPV1LockerImpl lockerHi;
- LockRequestCombo requestHi(&lockerHi);
- requestHi.enqueueAtFront = true;
+ MMAPV1LockerImpl locker1;
+ LockRequestCombo request1(&locker1);
- ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestHi, MODE_X));
+ MMAPV1LockerImpl locker2;
+ LockRequestCombo request2(&locker2);
+ request2.compatibleFirst = true;
- // Once the X request is gone, lockerHi should be granted, because it's queue jumping
- ASSERT(lockMgr.unlock(&requestX));
+ MMAPV1LockerImpl locker3;
+ LockRequestCombo request3(&locker3);
- ASSERT(requestHi.lastResId == resId);
- ASSERT(requestHi.lastResult == LOCK_OK);
+ // Lock all in IS mode
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &request1, MODE_IS));
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &request2, MODE_IS));
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &request3, MODE_IS));
- // Finally lockerLow should be granted
- ASSERT(lockMgr.unlock(&requestHi));
+ // Now an exclusive mode comes, which would block
+ MMAPV1LockerImpl lockerX;
+ LockRequestCombo requestX(&lockerX);
- ASSERT(requestLow.lastResId == resId);
- ASSERT(requestLow.lastResult == LOCK_OK);
+ ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestX, MODE_X));
- // This avoids the lock manager asserting on leaked locks
- ASSERT(lockMgr.unlock(&requestLow));
+ // If an S comes, it should be granted, because of request2
+ {
+ MMAPV1LockerImpl lockerS;
+ LockRequestCombo requestS(&lockerS);
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &requestS, MODE_S));
+ ASSERT(lockMgr.unlock(&requestS));
}
- TEST(LockManager, CompatibleFirstImmediateGrant) {
- LockManager lockMgr;
- const ResourceId resId(RESOURCE_GLOBAL, 0);
-
- MMAPV1LockerImpl locker1;
- LockRequestCombo request1(&locker1);
-
- MMAPV1LockerImpl locker2;
- LockRequestCombo request2(&locker2);
- request2.compatibleFirst = true;
-
- MMAPV1LockerImpl locker3;
- LockRequestCombo request3(&locker3);
-
- // Lock all in IS mode
- ASSERT(LOCK_OK == lockMgr.lock(resId, &request1, MODE_IS));
- ASSERT(LOCK_OK == lockMgr.lock(resId, &request2, MODE_IS));
- ASSERT(LOCK_OK == lockMgr.lock(resId, &request3, MODE_IS));
+ // If request1 goes away, the policy should still be compatible-first, because of request2
+ ASSERT(lockMgr.unlock(&request1));
- // Now an exclusive mode comes, which would block
- MMAPV1LockerImpl lockerX;
- LockRequestCombo requestX(&lockerX);
-
- ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestX, MODE_X));
-
- // If an S comes, it should be granted, because of request2
- {
- MMAPV1LockerImpl lockerS;
- LockRequestCombo requestS(&lockerS);
- ASSERT(LOCK_OK == lockMgr.lock(resId, &requestS, MODE_S));
- ASSERT(lockMgr.unlock(&requestS));
- }
-
- // If request1 goes away, the policy should still be compatible-first, because of request2
- ASSERT(lockMgr.unlock(&request1));
-
- // If S comes again, it should be granted, because of request2 still there
- {
- MMAPV1LockerImpl lockerS;
- LockRequestCombo requestS(&lockerS);
- ASSERT(LOCK_OK == lockMgr.lock(resId, &requestS, MODE_S));
- ASSERT(lockMgr.unlock(&requestS));
- }
+ // If S comes again, it should be granted, because of request2 still there
+ {
+ MMAPV1LockerImpl lockerS;
+ LockRequestCombo requestS(&lockerS);
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &requestS, MODE_S));
+ ASSERT(lockMgr.unlock(&requestS));
+ }
- // With request2 gone the policy should go back to FIFO, even though request3 is active
- ASSERT(lockMgr.unlock(&request2));
+ // With request2 gone the policy should go back to FIFO, even though request3 is active
+ ASSERT(lockMgr.unlock(&request2));
- {
- MMAPV1LockerImpl lockerS;
- LockRequestCombo requestS(&lockerS);
- ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestS, MODE_S));
- ASSERT(lockMgr.unlock(&requestS));
- }
-
- // Unlock request3 to keep the lock mgr not assert for leaked locks
- ASSERT(lockMgr.unlock(&request3));
- ASSERT(lockMgr.unlock(&requestX));
+ {
+ MMAPV1LockerImpl lockerS;
+ LockRequestCombo requestS(&lockerS);
+ ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestS, MODE_S));
+ ASSERT(lockMgr.unlock(&requestS));
}
- TEST(LockManager, CompatibleFirstDelayedGrant) {
- LockManager lockMgr;
- const ResourceId resId(RESOURCE_GLOBAL, 0);
-
- MMAPV1LockerImpl lockerXInitial;
- LockRequestCombo requestXInitial(&lockerXInitial);
- ASSERT(LOCK_OK == lockMgr.lock(resId, &requestXInitial, MODE_X));
-
- MMAPV1LockerImpl locker1;
- LockRequestCombo request1(&locker1);
-
- MMAPV1LockerImpl locker2;
- LockRequestCombo request2(&locker2);
- request2.compatibleFirst = true;
-
- MMAPV1LockerImpl locker3;
- LockRequestCombo request3(&locker3);
-
- // Lock all in IS mode (should block behind the global lock)
- ASSERT(LOCK_WAITING == lockMgr.lock(resId, &request1, MODE_IS));
- ASSERT(LOCK_WAITING == lockMgr.lock(resId, &request2, MODE_IS));
- ASSERT(LOCK_WAITING == lockMgr.lock(resId, &request3, MODE_IS));
-
- // Now an exclusive mode comes, which would block behind the IS modes
- MMAPV1LockerImpl lockerX;
- LockRequestCombo requestX(&lockerX);
- ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestX, MODE_X));
-
- // Free the first X lock so all IS modes are granted
- ASSERT(lockMgr.unlock(&requestXInitial));
- ASSERT(request1.lastResult == LOCK_OK);
- ASSERT(request2.lastResult == LOCK_OK);
- ASSERT(request3.lastResult == LOCK_OK);
-
- // If an S comes, it should be granted, because of request2
- {
- MMAPV1LockerImpl lockerS;
- LockRequestCombo requestS(&lockerS);
- ASSERT(LOCK_OK == lockMgr.lock(resId, &requestS, MODE_S));
- ASSERT(lockMgr.unlock(&requestS));
- }
-
- // If request1 goes away, the policy should still be compatible-first, because of request2
- ASSERT(lockMgr.unlock(&request1));
+ // Unlock request3 to keep the lock mgr not assert for leaked locks
+ ASSERT(lockMgr.unlock(&request3));
+ ASSERT(lockMgr.unlock(&requestX));
+}
+
+TEST(LockManager, CompatibleFirstDelayedGrant) {
+ LockManager lockMgr;
+ const ResourceId resId(RESOURCE_GLOBAL, 0);
+
+ MMAPV1LockerImpl lockerXInitial;
+ LockRequestCombo requestXInitial(&lockerXInitial);
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &requestXInitial, MODE_X));
+
+ MMAPV1LockerImpl locker1;
+ LockRequestCombo request1(&locker1);
+
+ MMAPV1LockerImpl locker2;
+ LockRequestCombo request2(&locker2);
+ request2.compatibleFirst = true;
+
+ MMAPV1LockerImpl locker3;
+ LockRequestCombo request3(&locker3);
+
+ // Lock all in IS mode (should block behind the global lock)
+ ASSERT(LOCK_WAITING == lockMgr.lock(resId, &request1, MODE_IS));
+ ASSERT(LOCK_WAITING == lockMgr.lock(resId, &request2, MODE_IS));
+ ASSERT(LOCK_WAITING == lockMgr.lock(resId, &request3, MODE_IS));
+
+ // Now an exclusive mode comes, which would block behind the IS modes
+ MMAPV1LockerImpl lockerX;
+ LockRequestCombo requestX(&lockerX);
+ ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestX, MODE_X));
+
+ // Free the first X lock so all IS modes are granted
+ ASSERT(lockMgr.unlock(&requestXInitial));
+ ASSERT(request1.lastResult == LOCK_OK);
+ ASSERT(request2.lastResult == LOCK_OK);
+ ASSERT(request3.lastResult == LOCK_OK);
+
+ // If an S comes, it should be granted, because of request2
+ {
+ MMAPV1LockerImpl lockerS;
+ LockRequestCombo requestS(&lockerS);
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &requestS, MODE_S));
+ ASSERT(lockMgr.unlock(&requestS));
+ }
- // If S comes again, it should be granted, because of request2 still there
- {
- MMAPV1LockerImpl lockerS;
- LockRequestCombo requestS(&lockerS);
- ASSERT(LOCK_OK == lockMgr.lock(resId, &requestS, MODE_S));
- ASSERT(lockMgr.unlock(&requestS));
- }
+ // If request1 goes away, the policy should still be compatible-first, because of request2
+ ASSERT(lockMgr.unlock(&request1));
- // With request2 gone the policy should go back to FIFO, even though request3 is active
- ASSERT(lockMgr.unlock(&request2));
+ // If S comes again, it should be granted, because of request2 still there
+ {
+ MMAPV1LockerImpl lockerS;
+ LockRequestCombo requestS(&lockerS);
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &requestS, MODE_S));
+ ASSERT(lockMgr.unlock(&requestS));
+ }
- {
- MMAPV1LockerImpl lockerS;
- LockRequestCombo requestS(&lockerS);
- ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestS, MODE_S));
- ASSERT(lockMgr.unlock(&requestS));
- }
+ // With request2 gone the policy should go back to FIFO, even though request3 is active
+ ASSERT(lockMgr.unlock(&request2));
- // Unlock request3 to keep the lock mgr not assert for leaked locks
- ASSERT(lockMgr.unlock(&request3));
- ASSERT(lockMgr.unlock(&requestX));
+ {
+ MMAPV1LockerImpl lockerS;
+ LockRequestCombo requestS(&lockerS);
+ ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestS, MODE_S));
+ ASSERT(lockMgr.unlock(&requestS));
}
- TEST(LockManager, CompatibleFirstCancelWaiting) {
- LockManager lockMgr;
- const ResourceId resId(RESOURCE_GLOBAL, 0);
-
- MMAPV1LockerImpl lockerSInitial;
- LockRequestCombo requestSInitial(&lockerSInitial);
- ASSERT(LOCK_OK == lockMgr.lock(resId, &requestSInitial, MODE_S));
-
- MMAPV1LockerImpl lockerX;
- LockRequestCombo requestX(&lockerX);
- ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestX, MODE_X));
-
- MMAPV1LockerImpl lockerPending;
- LockRequestCombo requestPending(&lockerPending);
- requestPending.compatibleFirst = true;
- ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestPending, MODE_S));
-
- // S1 is not granted yet, so the policy should still be FIFO
- {
- MMAPV1LockerImpl lockerS;
- LockRequestCombo requestS(&lockerS);
- ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestS, MODE_S));
- ASSERT(lockMgr.unlock(&requestS));
- }
+ // Unlock request3 to keep the lock mgr not assert for leaked locks
+ ASSERT(lockMgr.unlock(&request3));
+ ASSERT(lockMgr.unlock(&requestX));
+}
+
+TEST(LockManager, CompatibleFirstCancelWaiting) {
+ LockManager lockMgr;
+ const ResourceId resId(RESOURCE_GLOBAL, 0);
+
+ MMAPV1LockerImpl lockerSInitial;
+ LockRequestCombo requestSInitial(&lockerSInitial);
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &requestSInitial, MODE_S));
+
+ MMAPV1LockerImpl lockerX;
+ LockRequestCombo requestX(&lockerX);
+ ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestX, MODE_X));
+
+ MMAPV1LockerImpl lockerPending;
+ LockRequestCombo requestPending(&lockerPending);
+ requestPending.compatibleFirst = true;
+ ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestPending, MODE_S));
+
+ // S1 is not granted yet, so the policy should still be FIFO
+ {
+ MMAPV1LockerImpl lockerS;
+ LockRequestCombo requestS(&lockerS);
+ ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestS, MODE_S));
+ ASSERT(lockMgr.unlock(&requestS));
+ }
- // Unlock S1, the policy should still be FIFO
- ASSERT(lockMgr.unlock(&requestPending));
+ // Unlock S1, the policy should still be FIFO
+ ASSERT(lockMgr.unlock(&requestPending));
- {
- MMAPV1LockerImpl lockerS;
- LockRequestCombo requestS(&lockerS);
- ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestS, MODE_S));
- ASSERT(lockMgr.unlock(&requestS));
- }
-
- // Unlock remaining locks to keep the leak detection logic happy
- ASSERT(lockMgr.unlock(&requestSInitial));
- ASSERT(lockMgr.unlock(&requestX));
+ {
+ MMAPV1LockerImpl lockerS;
+ LockRequestCombo requestS(&lockerS);
+ ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestS, MODE_S));
+ ASSERT(lockMgr.unlock(&requestS));
}
-} // namespace mongo
+ // Unlock remaining locks to keep the leak detection logic happy
+ ASSERT(lockMgr.unlock(&requestSInitial));
+ ASSERT(lockMgr.unlock(&requestX));
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/concurrency/lock_manager_test_help.h b/src/mongo/db/concurrency/lock_manager_test_help.h
index 1650ca0806a..9344ee67a32 100644
--- a/src/mongo/db/concurrency/lock_manager_test_help.h
+++ b/src/mongo/db/concurrency/lock_manager_test_help.h
@@ -33,43 +33,41 @@
namespace mongo {
- class LockerForTests : public LockerImpl<false> {
- public:
- explicit LockerForTests(LockMode globalLockMode) {
- lockGlobal(globalLockMode);
- }
+class LockerForTests : public LockerImpl<false> {
+public:
+ explicit LockerForTests(LockMode globalLockMode) {
+ lockGlobal(globalLockMode);
+ }
- ~LockerForTests() {
- unlockAll();
- }
- };
+ ~LockerForTests() {
+ unlockAll();
+ }
+};
- class TrackingLockGrantNotification : public LockGrantNotification {
- public:
- TrackingLockGrantNotification() : numNotifies(0), lastResult(LOCK_INVALID) {
+class TrackingLockGrantNotification : public LockGrantNotification {
+public:
+ TrackingLockGrantNotification() : numNotifies(0), lastResult(LOCK_INVALID) {}
- }
+ virtual void notify(ResourceId resId, LockResult result) {
+ numNotifies++;
+ lastResId = resId;
+ lastResult = result;
+ }
- virtual void notify(ResourceId resId, LockResult result) {
- numNotifies++;
- lastResId = resId;
- lastResult = result;
- }
+public:
+ int numNotifies;
- public:
- int numNotifies;
+ ResourceId lastResId;
+ LockResult lastResult;
+};
- ResourceId lastResId;
- LockResult lastResult;
- };
+struct LockRequestCombo : public LockRequest, TrackingLockGrantNotification {
+public:
+ explicit LockRequestCombo(Locker* locker) {
+ initNew(locker, this);
+ }
+};
- struct LockRequestCombo : public LockRequest, TrackingLockGrantNotification {
- public:
- explicit LockRequestCombo (Locker* locker) {
- initNew(locker, this);
- }
- };
-
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/concurrency/lock_request_list.h b/src/mongo/db/concurrency/lock_request_list.h
index 066a09dcd98..3501504ac4f 100644
--- a/src/mongo/db/concurrency/lock_request_list.h
+++ b/src/mongo/db/concurrency/lock_request_list.h
@@ -33,81 +33,76 @@
namespace mongo {
- /**
- * Simple intrusive list implementation for the lock's granted and conflicting lists. Does not
- * own its contents, just uses the intrusive pointers on the LockRequest structure to link them
- * together. Therefore requests must outlive this list.
- *
- * Intentionally implemented as a POD in order to avoid constructor/destructor invocations.
- *
- * NOTE: This class should not be used for generic purposes and should not be used outside of
- * the Lock Manager library.
- */
- class LockRequestList {
- public:
-
- void push_front(LockRequest* request) {
- // Sanity check that we do not reuse entries without cleaning them up
- invariant(request->next == NULL);
- invariant(request->prev == NULL);
-
- if (_front == NULL) {
- _front = _back = request;
- }
- else {
- request->next = _front;
-
- _front->prev = request;
- _front = request;
- }
+/**
+ * Simple intrusive list implementation for the lock's granted and conflicting lists. Does not
+ * own its contents, just uses the intrusive pointers on the LockRequest structure to link them
+ * together. Therefore requests must outlive this list.
+ *
+ * Intentionally implemented as a POD in order to avoid constructor/destructor invocations.
+ *
+ * NOTE: This class should not be used for generic purposes and should not be used outside of
+ * the Lock Manager library.
+ */
+class LockRequestList {
+public:
+ void push_front(LockRequest* request) {
+ // Sanity check that we do not reuse entries without cleaning them up
+ invariant(request->next == NULL);
+ invariant(request->prev == NULL);
+
+ if (_front == NULL) {
+ _front = _back = request;
+ } else {
+ request->next = _front;
+
+ _front->prev = request;
+ _front = request;
}
+ }
- void push_back(LockRequest* request) {
- // Sanity check that we do not reuse entries without cleaning them up
- invariant(request->next == NULL);
- invariant(request->prev == NULL);
+ void push_back(LockRequest* request) {
+ // Sanity check that we do not reuse entries without cleaning them up
+ invariant(request->next == NULL);
+ invariant(request->prev == NULL);
- if (_front == NULL) {
- _front = _back = request;
- }
- else {
- request->prev = _back;
+ if (_front == NULL) {
+ _front = _back = request;
+ } else {
+ request->prev = _back;
- _back->next = request;
- _back = request;
- }
+ _back->next = request;
+ _back = request;
}
+ }
- void remove(LockRequest* request) {
- if (request->prev != NULL) {
- request->prev->next = request->next;
- }
- else {
- _front = request->next;
- }
-
- if (request->next != NULL) {
- request->next->prev = request->prev;
- }
- else {
- _back = request->prev;
- }
-
- request->prev = NULL;
- request->next = NULL;
+ void remove(LockRequest* request) {
+ if (request->prev != NULL) {
+ request->prev->next = request->next;
+ } else {
+ _front = request->next;
}
- void reset() {
- _front = _back = NULL;
+ if (request->next != NULL) {
+ request->next->prev = request->prev;
+ } else {
+ _back = request->prev;
}
- bool empty() const {
- return _front == NULL;
- }
+ request->prev = NULL;
+ request->next = NULL;
+ }
+
+ void reset() {
+ _front = _back = NULL;
+ }
+
+ bool empty() const {
+ return _front == NULL;
+ }
- // Pointers to the beginning and the end of the list
- LockRequest* _front;
- LockRequest* _back;
- };
+ // Pointers to the beginning and the end of the list
+ LockRequest* _front;
+ LockRequest* _back;
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/concurrency/lock_state.cpp b/src/mongo/db/concurrency/lock_state.cpp
index 6c12a8ae1b1..37ca3c7f611 100644
--- a/src/mongo/db/concurrency/lock_state.cpp
+++ b/src/mongo/db/concurrency/lock_state.cpp
@@ -46,104 +46,102 @@
namespace mongo {
namespace {
- /**
- * Partitioned global lock statistics, so we don't hit the same bucket.
- */
- class PartitionedInstanceWideLockStats {
- MONGO_DISALLOW_COPYING(PartitionedInstanceWideLockStats);
- public:
+/**
+ * Partitioned global lock statistics, so we don't hit the same bucket.
+ */
+class PartitionedInstanceWideLockStats {
+ MONGO_DISALLOW_COPYING(PartitionedInstanceWideLockStats);
- PartitionedInstanceWideLockStats() { }
+public:
+ PartitionedInstanceWideLockStats() {}
- void recordAcquisition(LockerId id, ResourceId resId, LockMode mode) {
- _get(id).recordAcquisition(resId, mode);
- }
+ void recordAcquisition(LockerId id, ResourceId resId, LockMode mode) {
+ _get(id).recordAcquisition(resId, mode);
+ }
- void recordWait(LockerId id, ResourceId resId, LockMode mode) {
- _get(id).recordWait(resId, mode);
- }
+ void recordWait(LockerId id, ResourceId resId, LockMode mode) {
+ _get(id).recordWait(resId, mode);
+ }
- void recordWaitTime(LockerId id, ResourceId resId, LockMode mode, uint64_t waitMicros) {
- _get(id).recordWaitTime(resId, mode, waitMicros);
- }
+ void recordWaitTime(LockerId id, ResourceId resId, LockMode mode, uint64_t waitMicros) {
+ _get(id).recordWaitTime(resId, mode, waitMicros);
+ }
- void recordDeadlock(ResourceId resId, LockMode mode) {
- _get(resId).recordDeadlock(resId, mode);
- }
+ void recordDeadlock(ResourceId resId, LockMode mode) {
+ _get(resId).recordDeadlock(resId, mode);
+ }
- void report(SingleThreadedLockStats* outStats) const {
- for (int i = 0; i < NumPartitions; i++) {
- outStats->append(_partitions[i].stats);
- }
+ void report(SingleThreadedLockStats* outStats) const {
+ for (int i = 0; i < NumPartitions; i++) {
+ outStats->append(_partitions[i].stats);
}
+ }
- void reset() {
- for (int i = 0; i < NumPartitions; i++) {
- _partitions[i].stats.reset();
- }
+ void reset() {
+ for (int i = 0; i < NumPartitions; i++) {
+ _partitions[i].stats.reset();
}
+ }
- private:
-
- // This alignment is a best effort approach to ensure that each partition falls on a
- // separate page/cache line in order to avoid false sharing.
- struct MONGO_COMPILER_ALIGN_TYPE(128) AlignedLockStats {
- AtomicLockStats stats;
- };
+private:
+ // This alignment is a best effort approach to ensure that each partition falls on a
+ // separate page/cache line in order to avoid false sharing.
+ struct MONGO_COMPILER_ALIGN_TYPE(128) AlignedLockStats {
+ AtomicLockStats stats;
+ };
- enum { NumPartitions = 8 };
+ enum { NumPartitions = 8 };
- AtomicLockStats& _get(LockerId id) {
- return _partitions[id % NumPartitions].stats;
- }
+ AtomicLockStats& _get(LockerId id) {
+ return _partitions[id % NumPartitions].stats;
+ }
- AlignedLockStats _partitions[NumPartitions];
- };
+ AlignedLockStats _partitions[NumPartitions];
+};
- // Global lock manager instance.
- LockManager globalLockManager;
+// Global lock manager instance.
+LockManager globalLockManager;
- // Global lock. Every server operation, which uses the Locker must acquire this lock at least
- // once. See comments in the header file (begin/endTransaction) for more information.
- const ResourceId resourceIdGlobal = ResourceId(RESOURCE_GLOBAL,
- ResourceId::SINGLETON_GLOBAL);
+// Global lock. Every server operation, which uses the Locker must acquire this lock at least
+// once. See comments in the header file (begin/endTransaction) for more information.
+const ResourceId resourceIdGlobal = ResourceId(RESOURCE_GLOBAL, ResourceId::SINGLETON_GLOBAL);
- // Flush lock. This is only used for the MMAP V1 storage engine and synchronizes journal writes
- // to the shared view and remaps. See the comments in the header for information on how MMAP V1
- // concurrency control works.
- const ResourceId resourceIdMMAPV1Flush = ResourceId(RESOURCE_MMAPV1_FLUSH,
- ResourceId::SINGLETON_MMAPV1_FLUSH);
+// Flush lock. This is only used for the MMAP V1 storage engine and synchronizes journal writes
+// to the shared view and remaps. See the comments in the header for information on how MMAP V1
+// concurrency control works.
+const ResourceId resourceIdMMAPV1Flush =
+ ResourceId(RESOURCE_MMAPV1_FLUSH, ResourceId::SINGLETON_MMAPV1_FLUSH);
- // How often (in millis) to check for deadlock if a lock has not been granted for some time
- const unsigned DeadlockTimeoutMs = 500;
+// How often (in millis) to check for deadlock if a lock has not been granted for some time
+const unsigned DeadlockTimeoutMs = 500;
- // Dispenses unique LockerId identifiers
- AtomicUInt64 idCounter(0);
+// Dispenses unique LockerId identifiers
+AtomicUInt64 idCounter(0);
- // Partitioned global lock statistics, so we don't hit the same bucket
- PartitionedInstanceWideLockStats globalStats;
+// Partitioned global lock statistics, so we don't hit the same bucket
+PartitionedInstanceWideLockStats globalStats;
- /**
- * Whether the particular lock's release should be held until the end of the operation. We
- * delay release of exclusive locks (locks that are for write operations) in order to ensure
- * that the data they protect is committed successfully.
- */
- bool shouldDelayUnlock(ResourceId resId, LockMode mode) {
- // Global and flush lock are not used to protect transactional resources and as such, they
- // need to be acquired and released when requested.
- if (resId.getType() == RESOURCE_GLOBAL) {
- return false;
- }
+/**
+ * Whether the particular lock's release should be held until the end of the operation. We
+ * delay release of exclusive locks (locks that are for write operations) in order to ensure
+ * that the data they protect is committed successfully.
+ */
+bool shouldDelayUnlock(ResourceId resId, LockMode mode) {
+ // Global and flush lock are not used to protect transactional resources and as such, they
+ // need to be acquired and released when requested.
+ if (resId.getType() == RESOURCE_GLOBAL) {
+ return false;
+ }
- if (resId == resourceIdMMAPV1Flush) {
- return false;
- }
+ if (resId == resourceIdMMAPV1Flush) {
+ return false;
+ }
- switch (mode) {
+ switch (mode) {
case MODE_X:
case MODE_IX:
return true;
@@ -154,614 +152,612 @@ namespace {
default:
invariant(false);
- }
}
+}
-} // namespace
+} // namespace
- template<bool IsForMMAPV1>
- bool LockerImpl<IsForMMAPV1>::isW() const {
- return getLockMode(resourceIdGlobal) == MODE_X;
- }
+template <bool IsForMMAPV1>
+bool LockerImpl<IsForMMAPV1>::isW() const {
+ return getLockMode(resourceIdGlobal) == MODE_X;
+}
- template<bool IsForMMAPV1>
- bool LockerImpl<IsForMMAPV1>::isR() const {
- return getLockMode(resourceIdGlobal) == MODE_S;
- }
+template <bool IsForMMAPV1>
+bool LockerImpl<IsForMMAPV1>::isR() const {
+ return getLockMode(resourceIdGlobal) == MODE_S;
+}
- template<bool IsForMMAPV1>
- bool LockerImpl<IsForMMAPV1>::isLocked() const {
- return getLockMode(resourceIdGlobal) != MODE_NONE;
- }
+template <bool IsForMMAPV1>
+bool LockerImpl<IsForMMAPV1>::isLocked() const {
+ return getLockMode(resourceIdGlobal) != MODE_NONE;
+}
- template<bool IsForMMAPV1>
- bool LockerImpl<IsForMMAPV1>::isWriteLocked() const {
- return isLockHeldForMode(resourceIdGlobal, MODE_IX);
- }
+template <bool IsForMMAPV1>
+bool LockerImpl<IsForMMAPV1>::isWriteLocked() const {
+ return isLockHeldForMode(resourceIdGlobal, MODE_IX);
+}
- template<bool IsForMMAPV1>
- bool LockerImpl<IsForMMAPV1>::isReadLocked() const {
- return isLockHeldForMode(resourceIdGlobal, MODE_IS);
- }
+template <bool IsForMMAPV1>
+bool LockerImpl<IsForMMAPV1>::isReadLocked() const {
+ return isLockHeldForMode(resourceIdGlobal, MODE_IS);
+}
- template<bool IsForMMAPV1>
- void LockerImpl<IsForMMAPV1>::assertEmptyAndReset() {
- invariant(!inAWriteUnitOfWork());
- invariant(_resourcesToUnlockAtEndOfUnitOfWork.empty());
- invariant(_requests.empty());
+template <bool IsForMMAPV1>
+void LockerImpl<IsForMMAPV1>::assertEmptyAndReset() {
+ invariant(!inAWriteUnitOfWork());
+ invariant(_resourcesToUnlockAtEndOfUnitOfWork.empty());
+ invariant(_requests.empty());
- // Reset the locking statistics so the object can be reused
- _stats.reset();
- }
+ // Reset the locking statistics so the object can be reused
+ _stats.reset();
+}
- template<bool IsForMMAPV1>
- void LockerImpl<IsForMMAPV1>::dump() const {
- StringBuilder ss;
- ss << "Locker id " << _id << " status: ";
+template <bool IsForMMAPV1>
+void LockerImpl<IsForMMAPV1>::dump() const {
+ StringBuilder ss;
+ ss << "Locker id " << _id << " status: ";
- _lock.lock();
- LockRequestsMap::ConstIterator it = _requests.begin();
- while (!it.finished()) {
- ss << it.key().toString() << " "
- << lockRequestStatusName(it->status) << " in "
- << modeName(it->mode) << "; ";
- it.next();
- }
- _lock.unlock();
-
- log() << ss.str() << std::endl;
+ _lock.lock();
+ LockRequestsMap::ConstIterator it = _requests.begin();
+ while (!it.finished()) {
+ ss << it.key().toString() << " " << lockRequestStatusName(it->status) << " in "
+ << modeName(it->mode) << "; ";
+ it.next();
}
+ _lock.unlock();
+ log() << ss.str() << std::endl;
+}
- //
- // CondVarLockGrantNotification
- //
- CondVarLockGrantNotification::CondVarLockGrantNotification() {
- clear();
- }
+//
+// CondVarLockGrantNotification
+//
- void CondVarLockGrantNotification::clear() {
- _result = LOCK_INVALID;
- }
+CondVarLockGrantNotification::CondVarLockGrantNotification() {
+ clear();
+}
- LockResult CondVarLockGrantNotification::wait(unsigned timeoutMs) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
- while (_result == LOCK_INVALID) {
- if (boost::cv_status::timeout == _cond.wait_for(lock, Milliseconds(timeoutMs))) {
- // Timeout
- return LOCK_TIMEOUT;
- }
- }
+void CondVarLockGrantNotification::clear() {
+ _result = LOCK_INVALID;
+}
- return _result;
+LockResult CondVarLockGrantNotification::wait(unsigned timeoutMs) {
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
+ while (_result == LOCK_INVALID) {
+ if (boost::cv_status::timeout == _cond.wait_for(lock, Milliseconds(timeoutMs))) {
+ // Timeout
+ return LOCK_TIMEOUT;
+ }
}
- void CondVarLockGrantNotification::notify(ResourceId resId, LockResult result) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
- invariant(_result == LOCK_INVALID);
- _result = result;
+ return _result;
+}
- _cond.notify_all();
- }
+void CondVarLockGrantNotification::notify(ResourceId resId, LockResult result) {
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
+ invariant(_result == LOCK_INVALID);
+ _result = result;
+ _cond.notify_all();
+}
- //
- // Locker
- //
- template<bool IsForMMAPV1>
- LockerImpl<IsForMMAPV1>::LockerImpl()
- : _id(idCounter.addAndFetch(1)),
- _requestStartTime(0),
- _wuowNestingLevel(0),
- _batchWriter(false) {
- }
+//
+// Locker
+//
- template<bool IsForMMAPV1>
- LockerImpl<IsForMMAPV1>::~LockerImpl() {
- // Cannot delete the Locker while there are still outstanding requests, because the
- // LockManager may attempt to access deleted memory. Besides it is probably incorrect
- // to delete with unaccounted locks anyways.
- assertEmptyAndReset();
- }
+template <bool IsForMMAPV1>
+LockerImpl<IsForMMAPV1>::LockerImpl()
+ : _id(idCounter.addAndFetch(1)),
+ _requestStartTime(0),
+ _wuowNestingLevel(0),
+ _batchWriter(false) {}
- template<bool IsForMMAPV1>
- LockResult LockerImpl<IsForMMAPV1>::lockGlobal(LockMode mode, unsigned timeoutMs) {
- LockResult result = lockGlobalBegin(mode);
- if (result == LOCK_WAITING) {
- result = lockGlobalComplete(timeoutMs);
- }
+template <bool IsForMMAPV1>
+LockerImpl<IsForMMAPV1>::~LockerImpl() {
+ // Cannot delete the Locker while there are still outstanding requests, because the
+ // LockManager may attempt to access deleted memory. Besides it is probably incorrect
+ // to delete with unaccounted locks anyways.
+ assertEmptyAndReset();
+}
- if (result == LOCK_OK) {
- lockMMAPV1Flush();
- }
+template <bool IsForMMAPV1>
+LockResult LockerImpl<IsForMMAPV1>::lockGlobal(LockMode mode, unsigned timeoutMs) {
+ LockResult result = lockGlobalBegin(mode);
+ if (result == LOCK_WAITING) {
+ result = lockGlobalComplete(timeoutMs);
+ }
- return result;
+ if (result == LOCK_OK) {
+ lockMMAPV1Flush();
}
- template<bool IsForMMAPV1>
- LockResult LockerImpl<IsForMMAPV1>::lockGlobalBegin(LockMode mode) {
- const LockResult result = lockBegin(resourceIdGlobal, mode);
- if (result == LOCK_OK) return LOCK_OK;
+ return result;
+}
- // Currently, deadlock detection does not happen inline with lock acquisition so the only
- // unsuccessful result that the lock manager would return is LOCK_WAITING.
- invariant(result == LOCK_WAITING);
+template <bool IsForMMAPV1>
+LockResult LockerImpl<IsForMMAPV1>::lockGlobalBegin(LockMode mode) {
+ const LockResult result = lockBegin(resourceIdGlobal, mode);
+ if (result == LOCK_OK)
+ return LOCK_OK;
- return result;
- }
+ // Currently, deadlock detection does not happen inline with lock acquisition so the only
+ // unsuccessful result that the lock manager would return is LOCK_WAITING.
+ invariant(result == LOCK_WAITING);
- template<bool IsForMMAPV1>
- LockResult LockerImpl<IsForMMAPV1>::lockGlobalComplete(unsigned timeoutMs) {
- return lockComplete(resourceIdGlobal, getLockMode(resourceIdGlobal), timeoutMs, false);
- }
+ return result;
+}
- template<bool IsForMMAPV1>
- void LockerImpl<IsForMMAPV1>::lockMMAPV1Flush() {
- if (!IsForMMAPV1) return;
+template <bool IsForMMAPV1>
+LockResult LockerImpl<IsForMMAPV1>::lockGlobalComplete(unsigned timeoutMs) {
+ return lockComplete(resourceIdGlobal, getLockMode(resourceIdGlobal), timeoutMs, false);
+}
- // The flush lock always has a reference count of 1, because it is dropped at the end of
- // each write unit of work in order to allow the flush thread to run. See the comments in
- // the header for information on how the MMAP V1 journaling system works.
- LockRequest* globalLockRequest = _requests.find(resourceIdGlobal).objAddr();
- if (globalLockRequest->recursiveCount == 1) {
- invariant(LOCK_OK == lock(resourceIdMMAPV1Flush, _getModeForMMAPV1FlushLock()));
- }
+template <bool IsForMMAPV1>
+void LockerImpl<IsForMMAPV1>::lockMMAPV1Flush() {
+ if (!IsForMMAPV1)
+ return;
- dassert(getLockMode(resourceIdMMAPV1Flush) == _getModeForMMAPV1FlushLock());
+ // The flush lock always has a reference count of 1, because it is dropped at the end of
+ // each write unit of work in order to allow the flush thread to run. See the comments in
+ // the header for information on how the MMAP V1 journaling system works.
+ LockRequest* globalLockRequest = _requests.find(resourceIdGlobal).objAddr();
+ if (globalLockRequest->recursiveCount == 1) {
+ invariant(LOCK_OK == lock(resourceIdMMAPV1Flush, _getModeForMMAPV1FlushLock()));
}
- template<bool IsForMMAPV1>
- void LockerImpl<IsForMMAPV1>::downgradeGlobalXtoSForMMAPV1() {
- invariant(!inAWriteUnitOfWork());
+ dassert(getLockMode(resourceIdMMAPV1Flush) == _getModeForMMAPV1FlushLock());
+}
- LockRequest* globalLockRequest = _requests.find(resourceIdGlobal).objAddr();
- invariant(globalLockRequest->mode == MODE_X);
- invariant(globalLockRequest->recursiveCount == 1);
+template <bool IsForMMAPV1>
+void LockerImpl<IsForMMAPV1>::downgradeGlobalXtoSForMMAPV1() {
+ invariant(!inAWriteUnitOfWork());
- // Making this call here will record lock downgrades as acquisitions, which is acceptable
- globalStats.recordAcquisition(_id, resourceIdGlobal, MODE_S);
- _stats.recordAcquisition(resourceIdGlobal, MODE_S);
+ LockRequest* globalLockRequest = _requests.find(resourceIdGlobal).objAddr();
+ invariant(globalLockRequest->mode == MODE_X);
+ invariant(globalLockRequest->recursiveCount == 1);
- globalLockManager.downgrade(globalLockRequest, MODE_S);
+ // Making this call here will record lock downgrades as acquisitions, which is acceptable
+ globalStats.recordAcquisition(_id, resourceIdGlobal, MODE_S);
+ _stats.recordAcquisition(resourceIdGlobal, MODE_S);
- if (IsForMMAPV1) {
- invariant(unlock(resourceIdMMAPV1Flush));
- }
+ globalLockManager.downgrade(globalLockRequest, MODE_S);
+
+ if (IsForMMAPV1) {
+ invariant(unlock(resourceIdMMAPV1Flush));
}
+}
- template<bool IsForMMAPV1>
- bool LockerImpl<IsForMMAPV1>::unlockAll() {
- if (!unlock(resourceIdGlobal)) {
- return false;
- }
+template <bool IsForMMAPV1>
+bool LockerImpl<IsForMMAPV1>::unlockAll() {
+ if (!unlock(resourceIdGlobal)) {
+ return false;
+ }
- LockRequestsMap::Iterator it = _requests.begin();
- while (!it.finished()) {
- // If we're here we should only have one reference to any lock. It is a programming
- // error for any lock to have more references than the global lock, because every
- // scope starts by calling lockGlobal.
- if (it.key().getType() == RESOURCE_GLOBAL) {
- it.next();
- }
- else {
- invariant(_unlockImpl(it));
- }
+ LockRequestsMap::Iterator it = _requests.begin();
+ while (!it.finished()) {
+ // If we're here we should only have one reference to any lock. It is a programming
+ // error for any lock to have more references than the global lock, because every
+ // scope starts by calling lockGlobal.
+ if (it.key().getType() == RESOURCE_GLOBAL) {
+ it.next();
+ } else {
+ invariant(_unlockImpl(it));
}
-
- return true;
}
- template<bool IsForMMAPV1>
- void LockerImpl<IsForMMAPV1>::beginWriteUnitOfWork() {
- // Sanity check that write transactions under MMAP V1 have acquired the flush lock, so we
- // don't allow partial changes to be written.
- dassert(!IsForMMAPV1 || isLockHeldForMode(resourceIdMMAPV1Flush, MODE_IX));
+ return true;
+}
- _wuowNestingLevel++;
- }
+template <bool IsForMMAPV1>
+void LockerImpl<IsForMMAPV1>::beginWriteUnitOfWork() {
+ // Sanity check that write transactions under MMAP V1 have acquired the flush lock, so we
+ // don't allow partial changes to be written.
+ dassert(!IsForMMAPV1 || isLockHeldForMode(resourceIdMMAPV1Flush, MODE_IX));
- template<bool IsForMMAPV1>
- void LockerImpl<IsForMMAPV1>::endWriteUnitOfWork() {
- invariant(_wuowNestingLevel > 0);
+ _wuowNestingLevel++;
+}
- if (--_wuowNestingLevel > 0) {
- // Don't do anything unless leaving outermost WUOW.
- return;
- }
+template <bool IsForMMAPV1>
+void LockerImpl<IsForMMAPV1>::endWriteUnitOfWork() {
+ invariant(_wuowNestingLevel > 0);
- while (!_resourcesToUnlockAtEndOfUnitOfWork.empty()) {
- unlock(_resourcesToUnlockAtEndOfUnitOfWork.front());
- _resourcesToUnlockAtEndOfUnitOfWork.pop();
- }
+ if (--_wuowNestingLevel > 0) {
+ // Don't do anything unless leaving outermost WUOW.
+ return;
+ }
- // For MMAP V1, we need to yield the flush lock so that the flush thread can run
- if (IsForMMAPV1) {
- invariant(unlock(resourceIdMMAPV1Flush));
- invariant(LOCK_OK == lock(resourceIdMMAPV1Flush, _getModeForMMAPV1FlushLock()));
- }
+ while (!_resourcesToUnlockAtEndOfUnitOfWork.empty()) {
+ unlock(_resourcesToUnlockAtEndOfUnitOfWork.front());
+ _resourcesToUnlockAtEndOfUnitOfWork.pop();
}
- template<bool IsForMMAPV1>
- LockResult LockerImpl<IsForMMAPV1>::lock(ResourceId resId,
- LockMode mode,
- unsigned timeoutMs,
- bool checkDeadlock) {
+ // For MMAP V1, we need to yield the flush lock so that the flush thread can run
+ if (IsForMMAPV1) {
+ invariant(unlock(resourceIdMMAPV1Flush));
+ invariant(LOCK_OK == lock(resourceIdMMAPV1Flush, _getModeForMMAPV1FlushLock()));
+ }
+}
- const LockResult result = lockBegin(resId, mode);
+template <bool IsForMMAPV1>
+LockResult LockerImpl<IsForMMAPV1>::lock(ResourceId resId,
+ LockMode mode,
+ unsigned timeoutMs,
+ bool checkDeadlock) {
+ const LockResult result = lockBegin(resId, mode);
- // Fast, uncontended path
- if (result == LOCK_OK) return LOCK_OK;
+ // Fast, uncontended path
+ if (result == LOCK_OK)
+ return LOCK_OK;
- // Currently, deadlock detection does not happen inline with lock acquisition so the only
- // unsuccessful result that the lock manager would return is LOCK_WAITING.
- invariant(result == LOCK_WAITING);
+ // Currently, deadlock detection does not happen inline with lock acquisition so the only
+ // unsuccessful result that the lock manager would return is LOCK_WAITING.
+ invariant(result == LOCK_WAITING);
- return lockComplete(resId, mode, timeoutMs, checkDeadlock);
- }
+ return lockComplete(resId, mode, timeoutMs, checkDeadlock);
+}
- template<bool IsForMMAPV1>
- void LockerImpl<IsForMMAPV1>::downgrade(ResourceId resId, LockMode newMode) {
- LockRequestsMap::Iterator it = _requests.find(resId);
- globalLockManager.downgrade(it.objAddr(), newMode);
- }
+template <bool IsForMMAPV1>
+void LockerImpl<IsForMMAPV1>::downgrade(ResourceId resId, LockMode newMode) {
+ LockRequestsMap::Iterator it = _requests.find(resId);
+ globalLockManager.downgrade(it.objAddr(), newMode);
+}
- template<bool IsForMMAPV1>
- bool LockerImpl<IsForMMAPV1>::unlock(ResourceId resId) {
- LockRequestsMap::Iterator it = _requests.find(resId);
- return _unlockImpl(it);
- }
+template <bool IsForMMAPV1>
+bool LockerImpl<IsForMMAPV1>::unlock(ResourceId resId) {
+ LockRequestsMap::Iterator it = _requests.find(resId);
+ return _unlockImpl(it);
+}
- template<bool IsForMMAPV1>
- LockMode LockerImpl<IsForMMAPV1>::getLockMode(ResourceId resId) const {
- scoped_spinlock scopedLock(_lock);
+template <bool IsForMMAPV1>
+LockMode LockerImpl<IsForMMAPV1>::getLockMode(ResourceId resId) const {
+ scoped_spinlock scopedLock(_lock);
- const LockRequestsMap::ConstIterator it = _requests.find(resId);
- if (!it) return MODE_NONE;
+ const LockRequestsMap::ConstIterator it = _requests.find(resId);
+ if (!it)
+ return MODE_NONE;
- return it->mode;
- }
+ return it->mode;
+}
- template<bool IsForMMAPV1>
- bool LockerImpl<IsForMMAPV1>::isLockHeldForMode(ResourceId resId, LockMode mode) const {
- return isModeCovered(mode, getLockMode(resId));
- }
+template <bool IsForMMAPV1>
+bool LockerImpl<IsForMMAPV1>::isLockHeldForMode(ResourceId resId, LockMode mode) const {
+ return isModeCovered(mode, getLockMode(resId));
+}
- template<bool IsForMMAPV1>
- bool LockerImpl<IsForMMAPV1>::isDbLockedForMode(StringData dbName,
- LockMode mode) const {
- invariant(nsIsDbOnly(dbName));
+template <bool IsForMMAPV1>
+bool LockerImpl<IsForMMAPV1>::isDbLockedForMode(StringData dbName, LockMode mode) const {
+ invariant(nsIsDbOnly(dbName));
- if (isW()) return true;
- if (isR() && isSharedLockMode(mode)) return true;
+ if (isW())
+ return true;
+ if (isR() && isSharedLockMode(mode))
+ return true;
- const ResourceId resIdDb(RESOURCE_DATABASE, dbName);
- return isLockHeldForMode(resIdDb, mode);
- }
+ const ResourceId resIdDb(RESOURCE_DATABASE, dbName);
+ return isLockHeldForMode(resIdDb, mode);
+}
- template<bool IsForMMAPV1>
- bool LockerImpl<IsForMMAPV1>::isCollectionLockedForMode(StringData ns,
- LockMode mode) const {
- invariant(nsIsFull(ns));
+template <bool IsForMMAPV1>
+bool LockerImpl<IsForMMAPV1>::isCollectionLockedForMode(StringData ns, LockMode mode) const {
+ invariant(nsIsFull(ns));
- if (isW()) return true;
- if (isR() && isSharedLockMode(mode)) return true;
+ if (isW())
+ return true;
+ if (isR() && isSharedLockMode(mode))
+ return true;
- const NamespaceString nss(ns);
- const ResourceId resIdDb(RESOURCE_DATABASE, nss.db());
+ const NamespaceString nss(ns);
+ const ResourceId resIdDb(RESOURCE_DATABASE, nss.db());
- LockMode dbMode = getLockMode(resIdDb);
+ LockMode dbMode = getLockMode(resIdDb);
- switch (dbMode) {
- case MODE_NONE: return false;
- case MODE_X: return true;
- case MODE_S: return isSharedLockMode(mode);
+ switch (dbMode) {
+ case MODE_NONE:
+ return false;
+ case MODE_X:
+ return true;
+ case MODE_S:
+ return isSharedLockMode(mode);
case MODE_IX:
- case MODE_IS:
- {
- const ResourceId resIdColl(RESOURCE_COLLECTION, ns);
- return isLockHeldForMode(resIdColl, mode);
- }
- break;
+ case MODE_IS: {
+ const ResourceId resIdColl(RESOURCE_COLLECTION, ns);
+ return isLockHeldForMode(resIdColl, mode);
+ } break;
case LockModesCount:
break;
- }
-
- invariant(false);
- return false;
}
- template<bool IsForMMAPV1>
- ResourceId LockerImpl<IsForMMAPV1>::getWaitingResource() const {
- scoped_spinlock scopedLock(_lock);
+ invariant(false);
+ return false;
+}
- LockRequestsMap::ConstIterator it = _requests.begin();
- while (!it.finished()) {
- if (it->status != LockRequest::STATUS_GRANTED) {
- return it.key();
- }
+template <bool IsForMMAPV1>
+ResourceId LockerImpl<IsForMMAPV1>::getWaitingResource() const {
+ scoped_spinlock scopedLock(_lock);
- it.next();
+ LockRequestsMap::ConstIterator it = _requests.begin();
+ while (!it.finished()) {
+ if (it->status != LockRequest::STATUS_GRANTED) {
+ return it.key();
}
- return ResourceId();
+ it.next();
}
- template<bool IsForMMAPV1>
- void LockerImpl<IsForMMAPV1>::getLockerInfo(LockerInfo* lockerInfo) const {
- invariant(lockerInfo);
+ return ResourceId();
+}
- // Zero-out the contents
- lockerInfo->locks.clear();
- lockerInfo->waitingResource = ResourceId();
- lockerInfo->stats.reset();
+template <bool IsForMMAPV1>
+void LockerImpl<IsForMMAPV1>::getLockerInfo(LockerInfo* lockerInfo) const {
+ invariant(lockerInfo);
- _lock.lock();
- LockRequestsMap::ConstIterator it = _requests.begin();
- while (!it.finished()) {
- OneLock info;
- info.resourceId = it.key();
- info.mode = it->mode;
+ // Zero-out the contents
+ lockerInfo->locks.clear();
+ lockerInfo->waitingResource = ResourceId();
+ lockerInfo->stats.reset();
- lockerInfo->locks.push_back(info);
- it.next();
- }
- _lock.unlock();
-
- std::sort(lockerInfo->locks.begin(), lockerInfo->locks.end());
+ _lock.lock();
+ LockRequestsMap::ConstIterator it = _requests.begin();
+ while (!it.finished()) {
+ OneLock info;
+ info.resourceId = it.key();
+ info.mode = it->mode;
- lockerInfo->waitingResource = getWaitingResource();
- lockerInfo->stats.append(_stats);
+ lockerInfo->locks.push_back(info);
+ it.next();
}
+ _lock.unlock();
- template<bool IsForMMAPV1>
- bool LockerImpl<IsForMMAPV1>::saveLockStateAndUnlock(Locker::LockSnapshot* stateOut) {
- // We shouldn't be saving and restoring lock state from inside a WriteUnitOfWork.
- invariant(!inAWriteUnitOfWork());
-
- // Clear out whatever is in stateOut.
- stateOut->locks.clear();
- stateOut->globalMode = MODE_NONE;
-
- // First, we look at the global lock. There is special handling for this (as the flush
- // lock goes along with it) so we store it separately from the more pedestrian locks.
- LockRequestsMap::Iterator globalRequest = _requests.find(resourceIdGlobal);
- if (!globalRequest) {
- // If there's no global lock there isn't really anything to do.
- invariant(_requests.empty());
- return false;
- }
-
- // If the global lock has been acquired more than once, we're probably somewhere in a
- // DBDirectClient call. It's not safe to release and reacquire locks -- the context using
- // the DBDirectClient is probably not prepared for lock release.
- if (globalRequest->recursiveCount > 1) {
- return false;
- }
+ std::sort(lockerInfo->locks.begin(), lockerInfo->locks.end());
- // The global lock must have been acquired just once
- stateOut->globalMode = globalRequest->mode;
- invariant(unlock(resourceIdGlobal));
+ lockerInfo->waitingResource = getWaitingResource();
+ lockerInfo->stats.append(_stats);
+}
- // Next, the non-global locks.
- for (LockRequestsMap::Iterator it = _requests.begin(); !it.finished(); it.next()) {
- const ResourceId resId = it.key();
+template <bool IsForMMAPV1>
+bool LockerImpl<IsForMMAPV1>::saveLockStateAndUnlock(Locker::LockSnapshot* stateOut) {
+ // We shouldn't be saving and restoring lock state from inside a WriteUnitOfWork.
+ invariant(!inAWriteUnitOfWork());
- // We should never have to save and restore metadata locks.
- invariant((IsForMMAPV1 && (resourceIdMMAPV1Flush == resId)) ||
- RESOURCE_DATABASE == resId.getType() ||
- RESOURCE_COLLECTION == resId.getType() ||
- (RESOURCE_GLOBAL == resId.getType() && isSharedLockMode(it->mode)));
+ // Clear out whatever is in stateOut.
+ stateOut->locks.clear();
+ stateOut->globalMode = MODE_NONE;
- // And, stuff the info into the out parameter.
- OneLock info;
- info.resourceId = resId;
- info.mode = it->mode;
+ // First, we look at the global lock. There is special handling for this (as the flush
+ // lock goes along with it) so we store it separately from the more pedestrian locks.
+ LockRequestsMap::Iterator globalRequest = _requests.find(resourceIdGlobal);
+ if (!globalRequest) {
+ // If there's no global lock there isn't really anything to do.
+ invariant(_requests.empty());
+ return false;
+ }
- stateOut->locks.push_back(info);
+ // If the global lock has been acquired more than once, we're probably somewhere in a
+ // DBDirectClient call. It's not safe to release and reacquire locks -- the context using
+ // the DBDirectClient is probably not prepared for lock release.
+ if (globalRequest->recursiveCount > 1) {
+ return false;
+ }
- invariant(unlock(resId));
- }
+ // The global lock must have been acquired just once
+ stateOut->globalMode = globalRequest->mode;
+ invariant(unlock(resourceIdGlobal));
- // Sort locks by ResourceId. They'll later be acquired in this canonical locking order.
- std::sort(stateOut->locks.begin(), stateOut->locks.end());
+ // Next, the non-global locks.
+ for (LockRequestsMap::Iterator it = _requests.begin(); !it.finished(); it.next()) {
+ const ResourceId resId = it.key();
- return true;
- }
+ // We should never have to save and restore metadata locks.
+ invariant((IsForMMAPV1 && (resourceIdMMAPV1Flush == resId)) ||
+ RESOURCE_DATABASE == resId.getType() || RESOURCE_COLLECTION == resId.getType() ||
+ (RESOURCE_GLOBAL == resId.getType() && isSharedLockMode(it->mode)));
- template<bool IsForMMAPV1>
- void LockerImpl<IsForMMAPV1>::restoreLockState(const Locker::LockSnapshot& state) {
- // We shouldn't be saving and restoring lock state from inside a WriteUnitOfWork.
- invariant(!inAWriteUnitOfWork());
+ // And, stuff the info into the out parameter.
+ OneLock info;
+ info.resourceId = resId;
+ info.mode = it->mode;
- std::vector<OneLock>::const_iterator it = state.locks.begin();
- // If we locked the PBWM, it must be locked before the resourceIdGlobal resource.
- if (it != state.locks.end() && it->resourceId == resourceIdParallelBatchWriterMode) {
- invariant(LOCK_OK == lock(it->resourceId, it->mode));
- it++;
- }
+ stateOut->locks.push_back(info);
- invariant(LOCK_OK == lockGlobal(state.globalMode));
- for (; it != state.locks.end(); it++) {
- // This is a sanity check that lockGlobal restored the MMAP V1 flush lock in the
- // expected mode.
- if (IsForMMAPV1 && (it->resourceId == resourceIdMMAPV1Flush)) {
- invariant(it->mode == _getModeForMMAPV1FlushLock());
- }
- else {
- invariant(LOCK_OK == lock(it->resourceId, it->mode));
- }
- }
+ invariant(unlock(resId));
}
- template<bool IsForMMAPV1>
- LockResult LockerImpl<IsForMMAPV1>::lockBegin(ResourceId resId, LockMode mode) {
- dassert(!getWaitingResource().isValid());
+ // Sort locks by ResourceId. They'll later be acquired in this canonical locking order.
+ std::sort(stateOut->locks.begin(), stateOut->locks.end());
- LockRequest* request;
- bool isNew = true;
+ return true;
+}
- LockRequestsMap::Iterator it = _requests.find(resId);
- if (!it) {
- scoped_spinlock scopedLock(_lock);
- LockRequestsMap::Iterator itNew = _requests.insert(resId);
- itNew->initNew(this, &_notify);
+template <bool IsForMMAPV1>
+void LockerImpl<IsForMMAPV1>::restoreLockState(const Locker::LockSnapshot& state) {
+ // We shouldn't be saving and restoring lock state from inside a WriteUnitOfWork.
+ invariant(!inAWriteUnitOfWork());
- request = itNew.objAddr();
- }
- else {
- request = it.objAddr();
- isNew = false;
- }
+ std::vector<OneLock>::const_iterator it = state.locks.begin();
+ // If we locked the PBWM, it must be locked before the resourceIdGlobal resource.
+ if (it != state.locks.end() && it->resourceId == resourceIdParallelBatchWriterMode) {
+ invariant(LOCK_OK == lock(it->resourceId, it->mode));
+ it++;
+ }
- // Making this call here will record lock re-acquisitions and conversions as well.
- globalStats.recordAcquisition(_id, resId, mode);
- _stats.recordAcquisition(resId, mode);
-
- // Give priority to the full modes for global, parallel batch writer mode,
- // and flush lock so we don't stall global operations such as shutdown or flush.
- const ResourceType resType = resId.getType();
- if (resType == RESOURCE_GLOBAL || (IsForMMAPV1 && resId == resourceIdMMAPV1Flush)) {
- if (mode == MODE_S || mode == MODE_X) {
- request->enqueueAtFront = true;
- request->compatibleFirst = true;
- }
- }
- else {
- // This is all sanity checks that the global and flush locks are always be acquired
- // before any other lock has been acquired and they must be in sync with the nesting.
- DEV {
- const LockRequestsMap::Iterator itGlobal = _requests.find(resourceIdGlobal);
- invariant(itGlobal->recursiveCount > 0);
- invariant(itGlobal->mode != MODE_NONE);
-
- // Check the MMAP V1 flush lock is held in the appropriate mode
- invariant(!IsForMMAPV1 || isLockHeldForMode(resourceIdMMAPV1Flush,
- _getModeForMMAPV1FlushLock()));
- };
+ invariant(LOCK_OK == lockGlobal(state.globalMode));
+ for (; it != state.locks.end(); it++) {
+ // This is a sanity check that lockGlobal restored the MMAP V1 flush lock in the
+ // expected mode.
+ if (IsForMMAPV1 && (it->resourceId == resourceIdMMAPV1Flush)) {
+ invariant(it->mode == _getModeForMMAPV1FlushLock());
+ } else {
+ invariant(LOCK_OK == lock(it->resourceId, it->mode));
}
+ }
+}
- // The notification object must be cleared before we invoke the lock manager, because
- // otherwise we might reset state if the lock becomes granted very fast.
- _notify.clear();
-
- LockResult result = isNew ? globalLockManager.lock(resId, request, mode) :
- globalLockManager.convert(resId, request, mode);
+template <bool IsForMMAPV1>
+LockResult LockerImpl<IsForMMAPV1>::lockBegin(ResourceId resId, LockMode mode) {
+ dassert(!getWaitingResource().isValid());
- if (result == LOCK_WAITING) {
- // Start counting the wait time so that lockComplete can update that metric
- _requestStartTime = curTimeMicros64();
- globalStats.recordWait(_id, resId, mode);
- _stats.recordWait(resId, mode);
- }
+ LockRequest* request;
+ bool isNew = true;
- return result;
+ LockRequestsMap::Iterator it = _requests.find(resId);
+ if (!it) {
+ scoped_spinlock scopedLock(_lock);
+ LockRequestsMap::Iterator itNew = _requests.insert(resId);
+ itNew->initNew(this, &_notify);
+
+ request = itNew.objAddr();
+ } else {
+ request = it.objAddr();
+ isNew = false;
+ }
+
+ // Making this call here will record lock re-acquisitions and conversions as well.
+ globalStats.recordAcquisition(_id, resId, mode);
+ _stats.recordAcquisition(resId, mode);
+
+ // Give priority to the full modes for global, parallel batch writer mode,
+ // and flush lock so we don't stall global operations such as shutdown or flush.
+ const ResourceType resType = resId.getType();
+ if (resType == RESOURCE_GLOBAL || (IsForMMAPV1 && resId == resourceIdMMAPV1Flush)) {
+ if (mode == MODE_S || mode == MODE_X) {
+ request->enqueueAtFront = true;
+ request->compatibleFirst = true;
+ }
+ } else {
+ // This is all sanity checks that the global and flush locks are always be acquired
+ // before any other lock has been acquired and they must be in sync with the nesting.
+ DEV {
+ const LockRequestsMap::Iterator itGlobal = _requests.find(resourceIdGlobal);
+ invariant(itGlobal->recursiveCount > 0);
+ invariant(itGlobal->mode != MODE_NONE);
+
+ // Check the MMAP V1 flush lock is held in the appropriate mode
+ invariant(!IsForMMAPV1 ||
+ isLockHeldForMode(resourceIdMMAPV1Flush, _getModeForMMAPV1FlushLock()));
+ };
}
- template<bool IsForMMAPV1>
- LockResult LockerImpl<IsForMMAPV1>::lockComplete(ResourceId resId,
- LockMode mode,
- unsigned timeoutMs,
- bool checkDeadlock) {
+ // The notification object must be cleared before we invoke the lock manager, because
+ // otherwise we might reset state if the lock becomes granted very fast.
+ _notify.clear();
- // Under MMAP V1 engine a deadlock can occur if a thread goes to sleep waiting on
- // DB lock, while holding the flush lock, so it has to be released. This is only
- // correct to do if not in a write unit of work.
- const bool yieldFlushLock = IsForMMAPV1 &&
- !inAWriteUnitOfWork() &&
- resId.getType() != RESOURCE_GLOBAL &&
- resId != resourceIdMMAPV1Flush;
- if (yieldFlushLock) {
- invariant(unlock(resourceIdMMAPV1Flush));
- }
+ LockResult result = isNew ? globalLockManager.lock(resId, request, mode)
+ : globalLockManager.convert(resId, request, mode);
- LockResult result;
+ if (result == LOCK_WAITING) {
+ // Start counting the wait time so that lockComplete can update that metric
+ _requestStartTime = curTimeMicros64();
+ globalStats.recordWait(_id, resId, mode);
+ _stats.recordWait(resId, mode);
+ }
- // Don't go sleeping without bound in order to be able to report long waits or wake up for
- // deadlock detection.
- unsigned waitTimeMs = std::min(timeoutMs, DeadlockTimeoutMs);
- while (true) {
- // It is OK if this call wakes up spuriously, because we re-evaluate the remaining
- // wait time anyways.
- result = _notify.wait(waitTimeMs);
+ return result;
+}
- // Account for the time spent waiting on the notification object
- const uint64_t elapsedTimeMicros = curTimeMicros64() - _requestStartTime;
- globalStats.recordWaitTime(_id, resId, mode, elapsedTimeMicros);
- _stats.recordWaitTime(resId, mode, elapsedTimeMicros);
+template <bool IsForMMAPV1>
+LockResult LockerImpl<IsForMMAPV1>::lockComplete(ResourceId resId,
+ LockMode mode,
+ unsigned timeoutMs,
+ bool checkDeadlock) {
+ // Under MMAP V1 engine a deadlock can occur if a thread goes to sleep waiting on
+ // DB lock, while holding the flush lock, so it has to be released. This is only
+ // correct to do if not in a write unit of work.
+ const bool yieldFlushLock = IsForMMAPV1 && !inAWriteUnitOfWork() &&
+ resId.getType() != RESOURCE_GLOBAL && resId != resourceIdMMAPV1Flush;
+ if (yieldFlushLock) {
+ invariant(unlock(resourceIdMMAPV1Flush));
+ }
- if (result == LOCK_OK) break;
+ LockResult result;
- if (checkDeadlock) {
- DeadlockDetector wfg(globalLockManager, this);
- if (wfg.check().hasCycle()) {
- warning() << "Deadlock found: " << wfg.toString();
+ // Don't go sleeping without bound in order to be able to report long waits or wake up for
+ // deadlock detection.
+ unsigned waitTimeMs = std::min(timeoutMs, DeadlockTimeoutMs);
+ while (true) {
+ // It is OK if this call wakes up spuriously, because we re-evaluate the remaining
+ // wait time anyways.
+ result = _notify.wait(waitTimeMs);
- globalStats.recordDeadlock(resId, mode);
- _stats.recordDeadlock(resId, mode);
+ // Account for the time spent waiting on the notification object
+ const uint64_t elapsedTimeMicros = curTimeMicros64() - _requestStartTime;
+ globalStats.recordWaitTime(_id, resId, mode, elapsedTimeMicros);
+ _stats.recordWaitTime(resId, mode, elapsedTimeMicros);
- result = LOCK_DEADLOCK;
- break;
- }
- }
+ if (result == LOCK_OK)
+ break;
- // If infinite timeout was requested, just keep waiting
- if (timeoutMs == UINT_MAX) {
- continue;
- }
+ if (checkDeadlock) {
+ DeadlockDetector wfg(globalLockManager, this);
+ if (wfg.check().hasCycle()) {
+ warning() << "Deadlock found: " << wfg.toString();
- const unsigned elapsedTimeMs = elapsedTimeMicros / 1000;
- waitTimeMs = (elapsedTimeMs < timeoutMs) ?
- std::min(timeoutMs - elapsedTimeMs, DeadlockTimeoutMs) : 0;
+ globalStats.recordDeadlock(resId, mode);
+ _stats.recordDeadlock(resId, mode);
- if (waitTimeMs == 0) {
+ result = LOCK_DEADLOCK;
break;
}
}
- // Cleanup the state, since this is an unused lock now
- if (result != LOCK_OK) {
- LockRequestsMap::Iterator it = _requests.find(resId);
- if (globalLockManager.unlock(it.objAddr())) {
- scoped_spinlock scopedLock(_lock);
- it.remove();
- }
- }
-
- if (yieldFlushLock) {
- // We cannot obey the timeout here, because it is not correct to return from the lock
- // request with the flush lock released.
- invariant(LOCK_OK == lock(resourceIdMMAPV1Flush, _getModeForMMAPV1FlushLock()));
+ // If infinite timeout was requested, just keep waiting
+ if (timeoutMs == UINT_MAX) {
+ continue;
}
- return result;
- }
+ const unsigned elapsedTimeMs = elapsedTimeMicros / 1000;
+ waitTimeMs = (elapsedTimeMs < timeoutMs)
+ ? std::min(timeoutMs - elapsedTimeMs, DeadlockTimeoutMs)
+ : 0;
- template<bool IsForMMAPV1>
- bool LockerImpl<IsForMMAPV1>::_unlockImpl(LockRequestsMap::Iterator& it) {
- if (inAWriteUnitOfWork() && shouldDelayUnlock(it.key(), it->mode)) {
- _resourcesToUnlockAtEndOfUnitOfWork.push(it.key());
- return false;
+ if (waitTimeMs == 0) {
+ break;
}
+ }
+ // Cleanup the state, since this is an unused lock now
+ if (result != LOCK_OK) {
+ LockRequestsMap::Iterator it = _requests.find(resId);
if (globalLockManager.unlock(it.objAddr())) {
scoped_spinlock scopedLock(_lock);
it.remove();
-
- return true;
}
+ }
+ if (yieldFlushLock) {
+ // We cannot obey the timeout here, because it is not correct to return from the lock
+ // request with the flush lock released.
+ invariant(LOCK_OK == lock(resourceIdMMAPV1Flush, _getModeForMMAPV1FlushLock()));
+ }
+
+ return result;
+}
+
+template <bool IsForMMAPV1>
+bool LockerImpl<IsForMMAPV1>::_unlockImpl(LockRequestsMap::Iterator& it) {
+ if (inAWriteUnitOfWork() && shouldDelayUnlock(it.key(), it->mode)) {
+ _resourcesToUnlockAtEndOfUnitOfWork.push(it.key());
return false;
}
- template<bool IsForMMAPV1>
- LockMode LockerImpl<IsForMMAPV1>::_getModeForMMAPV1FlushLock() const {
- invariant(IsForMMAPV1);
+ if (globalLockManager.unlock(it.objAddr())) {
+ scoped_spinlock scopedLock(_lock);
+ it.remove();
+
+ return true;
+ }
- LockMode mode = getLockMode(resourceIdGlobal);
- switch (mode) {
+ return false;
+}
+
+template <bool IsForMMAPV1>
+LockMode LockerImpl<IsForMMAPV1>::_getModeForMMAPV1FlushLock() const {
+ invariant(IsForMMAPV1);
+
+ LockMode mode = getLockMode(resourceIdGlobal);
+ switch (mode) {
case MODE_X:
case MODE_IX:
return MODE_IX;
@@ -771,153 +767,150 @@ namespace {
default:
invariant(false);
return MODE_NONE;
- }
}
+}
- template<bool IsForMMAPV1>
- bool LockerImpl<IsForMMAPV1>::hasStrongLocks() const {
- if (!isLocked()) return false;
-
- stdx::lock_guard<SpinLock> lk(_lock);
- LockRequestsMap::ConstIterator it = _requests.begin();
- while (!it.finished()) {
- if (it->mode == MODE_X || it->mode == MODE_S) {
- return true;
- }
+template <bool IsForMMAPV1>
+bool LockerImpl<IsForMMAPV1>::hasStrongLocks() const {
+ if (!isLocked())
+ return false;
- it.next();
+ stdx::lock_guard<SpinLock> lk(_lock);
+ LockRequestsMap::ConstIterator it = _requests.begin();
+ while (!it.finished()) {
+ if (it->mode == MODE_X || it->mode == MODE_S) {
+ return true;
}
- return false;
+ it.next();
}
+ return false;
+}
- //
- // Auto classes
- //
- AutoYieldFlushLockForMMAPV1Commit::AutoYieldFlushLockForMMAPV1Commit(Locker* locker)
- : _locker(static_cast<MMAPV1LockerImpl*>(locker)) {
+//
+// Auto classes
+//
- // Explicit yielding of the flush lock should happen only at global synchronization points
- // such as database drop. There should not be any active writes at these points.
- invariant(!_locker->inAWriteUnitOfWork());
+AutoYieldFlushLockForMMAPV1Commit::AutoYieldFlushLockForMMAPV1Commit(Locker* locker)
+ : _locker(static_cast<MMAPV1LockerImpl*>(locker)) {
+ // Explicit yielding of the flush lock should happen only at global synchronization points
+ // such as database drop. There should not be any active writes at these points.
+ invariant(!_locker->inAWriteUnitOfWork());
- if (isMMAPV1()) {
- invariant(_locker->unlock(resourceIdMMAPV1Flush));
- }
+ if (isMMAPV1()) {
+ invariant(_locker->unlock(resourceIdMMAPV1Flush));
}
+}
- AutoYieldFlushLockForMMAPV1Commit::~AutoYieldFlushLockForMMAPV1Commit() {
- if (isMMAPV1()) {
- invariant(LOCK_OK == _locker->lock(resourceIdMMAPV1Flush,
- _locker->_getModeForMMAPV1FlushLock()));
- }
+AutoYieldFlushLockForMMAPV1Commit::~AutoYieldFlushLockForMMAPV1Commit() {
+ if (isMMAPV1()) {
+ invariant(LOCK_OK ==
+ _locker->lock(resourceIdMMAPV1Flush, _locker->_getModeForMMAPV1FlushLock()));
}
+}
- AutoAcquireFlushLockForMMAPV1Commit::AutoAcquireFlushLockForMMAPV1Commit(Locker* locker)
- : _locker(locker),
- _released(false) {
-
- // The journal thread acquiring the journal lock in S-mode opens opportunity for deadlock
- // involving operations which do not acquire and release the Oplog collection's X lock
- // inside a WUOW (see SERVER-17416 for the sequence of events), therefore acquire it with
- // check for deadlock and back-off if one is encountered.
- //
- // This exposes theoretical chance that we might starve the journaling system, but given
- // that these deadlocks happen extremely rarely and are usually due to incorrect locking
- // policy, and we have the deadlock counters as part of the locking statistics, this is a
- // reasonable handling.
- //
- // In the worst case, if we are to starve the journaling system, the server will shut down
- // due to too much uncommitted in-memory journal, but won't have corruption.
+AutoAcquireFlushLockForMMAPV1Commit::AutoAcquireFlushLockForMMAPV1Commit(Locker* locker)
+ : _locker(locker), _released(false) {
+ // The journal thread acquiring the journal lock in S-mode opens opportunity for deadlock
+ // involving operations which do not acquire and release the Oplog collection's X lock
+ // inside a WUOW (see SERVER-17416 for the sequence of events), therefore acquire it with
+ // check for deadlock and back-off if one is encountered.
+ //
+ // This exposes theoretical chance that we might starve the journaling system, but given
+ // that these deadlocks happen extremely rarely and are usually due to incorrect locking
+ // policy, and we have the deadlock counters as part of the locking statistics, this is a
+ // reasonable handling.
+ //
+ // In the worst case, if we are to starve the journaling system, the server will shut down
+ // due to too much uncommitted in-memory journal, but won't have corruption.
- while (true) {
- LockResult result = _locker->lock(resourceIdMMAPV1Flush, MODE_S, UINT_MAX, true);
- if (result == LOCK_OK) {
- break;
- }
+ while (true) {
+ LockResult result = _locker->lock(resourceIdMMAPV1Flush, MODE_S, UINT_MAX, true);
+ if (result == LOCK_OK) {
+ break;
+ }
- invariant(result == LOCK_DEADLOCK);
+ invariant(result == LOCK_DEADLOCK);
- warning() << "Delayed journaling in order to avoid deadlock during MMAP V1 journal " <<
- "lock acquisition. See the previous messages for information on the " <<
- "involved threads.";
- }
+ warning() << "Delayed journaling in order to avoid deadlock during MMAP V1 journal "
+ << "lock acquisition. See the previous messages for information on the "
+ << "involved threads.";
}
+}
- void AutoAcquireFlushLockForMMAPV1Commit::upgradeFlushLockToExclusive() {
- // This should not be able to deadlock, since we already hold the S journal lock, which
- // means all writers are kicked out. Readers always yield the journal lock if they block
- // waiting on any other lock.
- invariant(LOCK_OK == _locker->lock(resourceIdMMAPV1Flush, MODE_X, UINT_MAX, false));
+void AutoAcquireFlushLockForMMAPV1Commit::upgradeFlushLockToExclusive() {
+ // This should not be able to deadlock, since we already hold the S journal lock, which
+ // means all writers are kicked out. Readers always yield the journal lock if they block
+ // waiting on any other lock.
+ invariant(LOCK_OK == _locker->lock(resourceIdMMAPV1Flush, MODE_X, UINT_MAX, false));
- // Lock bumps the recursive count. Drop it back down so that the destructor doesn't
- // complain.
- invariant(!_locker->unlock(resourceIdMMAPV1Flush));
- }
+ // Lock bumps the recursive count. Drop it back down so that the destructor doesn't
+ // complain.
+ invariant(!_locker->unlock(resourceIdMMAPV1Flush));
+}
- void AutoAcquireFlushLockForMMAPV1Commit::release() {
- if (!_released) {
- invariant(_locker->unlock(resourceIdMMAPV1Flush));
- _released = true;
- }
+void AutoAcquireFlushLockForMMAPV1Commit::release() {
+ if (!_released) {
+ invariant(_locker->unlock(resourceIdMMAPV1Flush));
+ _released = true;
}
+}
- AutoAcquireFlushLockForMMAPV1Commit::~AutoAcquireFlushLockForMMAPV1Commit() {
- release();
- }
+AutoAcquireFlushLockForMMAPV1Commit::~AutoAcquireFlushLockForMMAPV1Commit() {
+ release();
+}
namespace {
- /**
- * Periodically purges unused lock buckets. The first time the lock is used again after
- * cleanup it needs to be allocated, and similarly, every first use by a client for an intent
- * mode may need to create a partitioned lock head. Cleanup is done roughtly once a minute.
- */
- class UnusedLockCleaner : PeriodicTask {
- public:
- std::string taskName() const {
- return "UnusedLockCleaner";
- }
+/**
+ * Periodically purges unused lock buckets. The first time the lock is used again after
+ * cleanup it needs to be allocated, and similarly, every first use by a client for an intent
+ * mode may need to create a partitioned lock head. Cleanup is done roughtly once a minute.
+ */
+class UnusedLockCleaner : PeriodicTask {
+public:
+ std::string taskName() const {
+ return "UnusedLockCleaner";
+ }
- void taskDoWork() {
- LOG(2) << "cleaning up unused lock buckets of the global lock manager";
- getGlobalLockManager()->cleanupUnusedLocks();
- }
- } unusedLockCleaner;
-} // namespace
+ void taskDoWork() {
+ LOG(2) << "cleaning up unused lock buckets of the global lock manager";
+ getGlobalLockManager()->cleanupUnusedLocks();
+ }
+} unusedLockCleaner;
+} // namespace
- //
- // Standalone functions
- //
+//
+// Standalone functions
+//
- LockManager* getGlobalLockManager() {
- return &globalLockManager;
- }
+LockManager* getGlobalLockManager() {
+ return &globalLockManager;
+}
- void reportGlobalLockingStats(SingleThreadedLockStats* outStats) {
- globalStats.report(outStats);
- }
+void reportGlobalLockingStats(SingleThreadedLockStats* outStats) {
+ globalStats.report(outStats);
+}
+
+void resetGlobalLockStats() {
+ globalStats.reset();
+}
- void resetGlobalLockStats() {
- globalStats.reset();
- }
-
- // Ensures that there are two instances compiled for LockerImpl for the two values of the
- // template argument.
- template class LockerImpl<true>;
- template class LockerImpl<false>;
+// Ensures that there are two instances compiled for LockerImpl for the two values of the
+// template argument.
+template class LockerImpl<true>;
+template class LockerImpl<false>;
- // Definition for the hardcoded localdb and oplog collection info
- const ResourceId resourceIdLocalDB = ResourceId(RESOURCE_DATABASE, StringData("local"));
- const ResourceId resourceIdOplog =
- ResourceId(RESOURCE_COLLECTION, StringData("local.oplog.rs"));
- const ResourceId resourceIdAdminDB = ResourceId(RESOURCE_DATABASE, StringData("admin"));
- const ResourceId resourceIdParallelBatchWriterMode =
- ResourceId(RESOURCE_GLOBAL, ResourceId::SINGLETON_PARALLEL_BATCH_WRITER_MODE);
+// Definition for the hardcoded localdb and oplog collection info
+const ResourceId resourceIdLocalDB = ResourceId(RESOURCE_DATABASE, StringData("local"));
+const ResourceId resourceIdOplog = ResourceId(RESOURCE_COLLECTION, StringData("local.oplog.rs"));
+const ResourceId resourceIdAdminDB = ResourceId(RESOURCE_DATABASE, StringData("admin"));
+const ResourceId resourceIdParallelBatchWriterMode =
+ ResourceId(RESOURCE_GLOBAL, ResourceId::SINGLETON_PARALLEL_BATCH_WRITER_MODE);
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/concurrency/lock_state.h b/src/mongo/db/concurrency/lock_state.h
index ca16767eb49..f8c696b786c 100644
--- a/src/mongo/db/concurrency/lock_state.h
+++ b/src/mongo/db/concurrency/lock_state.h
@@ -36,286 +36,293 @@
namespace mongo {
+/**
+ * Notfication callback, which stores the last notification result and signals a condition
+ * variable, which can be waited on.
+ */
+class CondVarLockGrantNotification : public LockGrantNotification {
+ MONGO_DISALLOW_COPYING(CondVarLockGrantNotification);
+
+public:
+ CondVarLockGrantNotification();
+
+ /**
+ * Clears the object so it can be reused.
+ */
+ void clear();
+
+ /**
+ * Uninterruptible blocking method, which waits for the notification to fire.
+ *
+ * @param timeoutMs How many milliseconds to wait before returning LOCK_TIMEOUT.
+ */
+ LockResult wait(unsigned timeoutMs);
+
+private:
+ virtual void notify(ResourceId resId, LockResult result);
+
+ // These two go together to implement the conditional variable pattern.
+ stdx::mutex _mutex;
+ stdx::condition_variable _cond;
+
+ // Result from the last call to notify
+ LockResult _result;
+};
+
+
+/**
+ * Interface for acquiring locks. One of those objects will have to be instantiated for each
+ * request (transaction).
+ *
+ * Lock/unlock methods must always be called from a single thread.
+ *
+ * All instances reference a single global lock manager.
+ *
+ * @param IsForMMAPV1 Whether to compile-in the flush lock functionality, which is specific to
+ * the way the MMAP V1 (legacy) storag engine does commit concurrency control.
+ */
+template <bool IsForMMAPV1>
+class LockerImpl : public Locker {
+public:
/**
- * Notfication callback, which stores the last notification result and signals a condition
- * variable, which can be waited on.
+ * Instantiates new locker. Must be given a unique identifier for disambiguation. Lockers
+ * having the same identifier will not conflict on lock acquisition.
*/
- class CondVarLockGrantNotification : public LockGrantNotification {
- MONGO_DISALLOW_COPYING(CondVarLockGrantNotification);
- public:
- CondVarLockGrantNotification();
+ LockerImpl();
+
+ virtual ~LockerImpl();
+
+ virtual LockerId getId() const {
+ return _id;
+ }
+
+ virtual LockResult lockGlobal(LockMode mode, unsigned timeoutMs = UINT_MAX);
+ virtual LockResult lockGlobalBegin(LockMode mode);
+ virtual LockResult lockGlobalComplete(unsigned timeoutMs);
+ virtual void lockMMAPV1Flush();
+
+ virtual void downgradeGlobalXtoSForMMAPV1();
+ virtual bool unlockAll();
- /**
- * Clears the object so it can be reused.
- */
- void clear();
+ virtual void beginWriteUnitOfWork();
+ virtual void endWriteUnitOfWork();
- /**
- * Uninterruptible blocking method, which waits for the notification to fire.
- *
- * @param timeoutMs How many milliseconds to wait before returning LOCK_TIMEOUT.
- */
- LockResult wait(unsigned timeoutMs);
+ virtual bool inAWriteUnitOfWork() const {
+ return _wuowNestingLevel > 0;
+ }
- private:
+ virtual LockResult lock(ResourceId resId,
+ LockMode mode,
+ unsigned timeoutMs = UINT_MAX,
+ bool checkDeadlock = false);
- virtual void notify(ResourceId resId, LockResult result);
+ virtual void downgrade(ResourceId resId, LockMode newMode);
- // These two go together to implement the conditional variable pattern.
- stdx::mutex _mutex;
- stdx::condition_variable _cond;
+ virtual bool unlock(ResourceId resId);
- // Result from the last call to notify
- LockResult _result;
- };
+ virtual LockMode getLockMode(ResourceId resId) const;
+ virtual bool isLockHeldForMode(ResourceId resId, LockMode mode) const;
+ virtual bool isDbLockedForMode(StringData dbName, LockMode mode) const;
+ virtual bool isCollectionLockedForMode(StringData ns, LockMode mode) const;
+ virtual ResourceId getWaitingResource() const;
+
+ virtual void getLockerInfo(LockerInfo* lockerInfo) const;
+
+ virtual bool saveLockStateAndUnlock(LockSnapshot* stateOut);
+
+ virtual void restoreLockState(const LockSnapshot& stateToRestore);
/**
- * Interface for acquiring locks. One of those objects will have to be instantiated for each
- * request (transaction).
+ * Allows for lock requests to be requested in a non-blocking way. There can be only one
+ * outstanding pending lock request per locker object.
+ *
+ * lockBegin posts a request to the lock manager for the specified lock to be acquired,
+ * which either immediately grants the lock, or puts the requestor on the conflict queue
+ * and returns immediately with the result of the acquisition. The result can be one of:
*
- * Lock/unlock methods must always be called from a single thread.
+ * LOCK_OK - Nothing more needs to be done. The lock is granted.
+ * LOCK_WAITING - The request has been queued up and will be granted as soon as the lock
+ * is free. If this result is returned, typically lockComplete needs to be called in
+ * order to wait for the actual grant to occur. If the caller no longer needs to wait
+ * for the grant to happen, unlock needs to be called with the same resource passed
+ * to lockBegin.
*
- * All instances reference a single global lock manager.
+ * In other words for each call to lockBegin, which does not return LOCK_OK, there needs to
+ * be a corresponding call to either lockComplete or unlock.
*
- * @param IsForMMAPV1 Whether to compile-in the flush lock functionality, which is specific to
- * the way the MMAP V1 (legacy) storag engine does commit concurrency control.
+ * NOTE: These methods are not public and should only be used inside the class
+ * implementation and for unit-tests and not called directly.
*/
- template<bool IsForMMAPV1>
- class LockerImpl : public Locker {
- public:
-
- /**
- * Instantiates new locker. Must be given a unique identifier for disambiguation. Lockers
- * having the same identifier will not conflict on lock acquisition.
- */
- LockerImpl();
-
- virtual ~LockerImpl();
-
- virtual LockerId getId() const { return _id; }
-
- virtual LockResult lockGlobal(LockMode mode, unsigned timeoutMs = UINT_MAX);
- virtual LockResult lockGlobalBegin(LockMode mode);
- virtual LockResult lockGlobalComplete(unsigned timeoutMs);
- virtual void lockMMAPV1Flush();
-
- virtual void downgradeGlobalXtoSForMMAPV1();
- virtual bool unlockAll();
-
- virtual void beginWriteUnitOfWork();
- virtual void endWriteUnitOfWork();
-
- virtual bool inAWriteUnitOfWork() const { return _wuowNestingLevel > 0; }
-
- virtual LockResult lock(ResourceId resId,
- LockMode mode,
- unsigned timeoutMs = UINT_MAX,
- bool checkDeadlock = false);
-
- virtual void downgrade(ResourceId resId, LockMode newMode);
-
- virtual bool unlock(ResourceId resId);
-
- virtual LockMode getLockMode(ResourceId resId) const;
- virtual bool isLockHeldForMode(ResourceId resId, LockMode mode) const;
- virtual bool isDbLockedForMode(StringData dbName, LockMode mode) const;
- virtual bool isCollectionLockedForMode(StringData ns, LockMode mode) const;
-
- virtual ResourceId getWaitingResource() const;
-
- virtual void getLockerInfo(LockerInfo* lockerInfo) const;
-
- virtual bool saveLockStateAndUnlock(LockSnapshot* stateOut);
-
- virtual void restoreLockState(const LockSnapshot& stateToRestore);
-
- /**
- * Allows for lock requests to be requested in a non-blocking way. There can be only one
- * outstanding pending lock request per locker object.
- *
- * lockBegin posts a request to the lock manager for the specified lock to be acquired,
- * which either immediately grants the lock, or puts the requestor on the conflict queue
- * and returns immediately with the result of the acquisition. The result can be one of:
- *
- * LOCK_OK - Nothing more needs to be done. The lock is granted.
- * LOCK_WAITING - The request has been queued up and will be granted as soon as the lock
- * is free. If this result is returned, typically lockComplete needs to be called in
- * order to wait for the actual grant to occur. If the caller no longer needs to wait
- * for the grant to happen, unlock needs to be called with the same resource passed
- * to lockBegin.
- *
- * In other words for each call to lockBegin, which does not return LOCK_OK, there needs to
- * be a corresponding call to either lockComplete or unlock.
- *
- * NOTE: These methods are not public and should only be used inside the class
- * implementation and for unit-tests and not called directly.
- */
- LockResult lockBegin(ResourceId resId, LockMode mode);
+ LockResult lockBegin(ResourceId resId, LockMode mode);
- /**
- * Waits for the completion of a lock, previously requested through lockBegin or
- * lockGlobalBegin. Must only be called, if lockBegin returned LOCK_WAITING.
- *
- * @param resId Resource id which was passed to an earlier lockBegin call. Must match.
- * @param mode Mode which was passed to an earlier lockBegin call. Must match.
- * @param timeoutMs How long to wait for the lock acquisition to complete.
- * @param checkDeadlock whether to perform deadlock detection while waiting.
- */
- LockResult lockComplete(ResourceId resId,
- LockMode mode,
- unsigned timeoutMs,
- bool checkDeadlock);
-
- private:
+ /**
+ * Waits for the completion of a lock, previously requested through lockBegin or
+ * lockGlobalBegin. Must only be called, if lockBegin returned LOCK_WAITING.
+ *
+ * @param resId Resource id which was passed to an earlier lockBegin call. Must match.
+ * @param mode Mode which was passed to an earlier lockBegin call. Must match.
+ * @param timeoutMs How long to wait for the lock acquisition to complete.
+ * @param checkDeadlock whether to perform deadlock detection while waiting.
+ */
+ LockResult lockComplete(ResourceId resId,
+ LockMode mode,
+ unsigned timeoutMs,
+ bool checkDeadlock);
- friend class AutoYieldFlushLockForMMAPV1Commit;
+private:
+ friend class AutoYieldFlushLockForMMAPV1Commit;
- typedef FastMapNoAlloc<ResourceId, LockRequest, 16> LockRequestsMap;
+ typedef FastMapNoAlloc<ResourceId, LockRequest, 16> LockRequestsMap;
- /**
- * The main functionality of the unlock method, except accepts iterator in order to avoid
- * additional lookups during unlockAll.
- */
- bool _unlockImpl(LockRequestsMap::Iterator& it);
+ /**
+ * The main functionality of the unlock method, except accepts iterator in order to avoid
+ * additional lookups during unlockAll.
+ */
+ bool _unlockImpl(LockRequestsMap::Iterator& it);
- /**
- * MMAP V1 locking code yields and re-acquires the flush lock occasionally in order to
- * allow the flush thread proceed. This call returns in what mode the flush lock should be
- * acquired. It is based on the type of the operation (IS for readers, IX for writers).
- */
- LockMode _getModeForMMAPV1FlushLock() const;
+ /**
+ * MMAP V1 locking code yields and re-acquires the flush lock occasionally in order to
+ * allow the flush thread proceed. This call returns in what mode the flush lock should be
+ * acquired. It is based on the type of the operation (IS for readers, IX for writers).
+ */
+ LockMode _getModeForMMAPV1FlushLock() const;
- // Used to disambiguate different lockers
- const LockerId _id;
+ // Used to disambiguate different lockers
+ const LockerId _id;
- // The only reason we have this spin lock here is for the diagnostic tools, which could
- // iterate through the LockRequestsMap on a separate thread and need it to be stable.
- // Apart from that, all accesses to the LockerImpl are always from a single thread.
- //
- // This has to be locked inside const methods, hence the mutable.
- mutable SpinLock _lock;
- LockRequestsMap _requests;
+ // The only reason we have this spin lock here is for the diagnostic tools, which could
+ // iterate through the LockRequestsMap on a separate thread and need it to be stable.
+ // Apart from that, all accesses to the LockerImpl are always from a single thread.
+ //
+ // This has to be locked inside const methods, hence the mutable.
+ mutable SpinLock _lock;
+ LockRequestsMap _requests;
- // Reuse the notification object across requests so we don't have to create a new mutex
- // and condition variable every time.
- CondVarLockGrantNotification _notify;
+ // Reuse the notification object across requests so we don't have to create a new mutex
+ // and condition variable every time.
+ CondVarLockGrantNotification _notify;
- // Timer for measuring duration and timeouts. This value is set when lock acquisition is
- // about to wait and is sampled at grant time.
- uint64_t _requestStartTime;
+ // Timer for measuring duration and timeouts. This value is set when lock acquisition is
+ // about to wait and is sampled at grant time.
+ uint64_t _requestStartTime;
- // Per-locker locking statistics. Reported in the slow-query log message and through
- // db.currentOp. Complementary to the per-instance locking statistics.
- SingleThreadedLockStats _stats;
+ // Per-locker locking statistics. Reported in the slow-query log message and through
+ // db.currentOp. Complementary to the per-instance locking statistics.
+ SingleThreadedLockStats _stats;
- // Delays release of exclusive/intent-exclusive locked resources until the write unit of
- // work completes. Value of 0 means we are not inside a write unit of work.
- int _wuowNestingLevel;
- std::queue<ResourceId> _resourcesToUnlockAtEndOfUnitOfWork;
+ // Delays release of exclusive/intent-exclusive locked resources until the write unit of
+ // work completes. Value of 0 means we are not inside a write unit of work.
+ int _wuowNestingLevel;
+ std::queue<ResourceId> _resourcesToUnlockAtEndOfUnitOfWork;
- //////////////////////////////////////////////////////////////////////////////////////////
- //
- // Methods merged from LockState, which should eventually be removed or changed to methods
- // on the LockerImpl interface.
- //
+ //////////////////////////////////////////////////////////////////////////////////////////
+ //
+ // Methods merged from LockState, which should eventually be removed or changed to methods
+ // on the LockerImpl interface.
+ //
- public:
+public:
+ virtual void dump() const;
- virtual void dump() const;
-
- virtual bool isW() const;
- virtual bool isR() const;
-
- virtual bool isLocked() const;
- virtual bool isWriteLocked() const;
- virtual bool isReadLocked() const;
-
- virtual void assertEmptyAndReset();
-
- virtual bool hasLockPending() const { return getWaitingResource().isValid(); }
+ virtual bool isW() const;
+ virtual bool isR() const;
- virtual void setIsBatchWriter(bool newValue) { _batchWriter = newValue; }
- virtual bool isBatchWriter() const { return _batchWriter; }
+ virtual bool isLocked() const;
+ virtual bool isWriteLocked() const;
+ virtual bool isReadLocked() const;
- virtual bool hasStrongLocks() const;
+ virtual void assertEmptyAndReset();
- private:
- bool _batchWriter;
- };
+ virtual bool hasLockPending() const {
+ return getWaitingResource().isValid();
+ }
- typedef LockerImpl<false> DefaultLockerImpl;
- typedef LockerImpl<true> MMAPV1LockerImpl;
+ virtual void setIsBatchWriter(bool newValue) {
+ _batchWriter = newValue;
+ }
+ virtual bool isBatchWriter() const {
+ return _batchWriter;
+ }
+ virtual bool hasStrongLocks() const;
- /**
- * At global synchronization points, such as drop database we are running under a global
- * exclusive lock and without an active write unit of work, doing changes which require global
- * commit. This utility allows the flush lock to be temporarily dropped so the flush thread
- * could run in such circumstances. Should not be used where write units of work are used,
- * because these have different mechanism of yielding the flush lock.
- */
- class AutoYieldFlushLockForMMAPV1Commit {
- public:
- AutoYieldFlushLockForMMAPV1Commit(Locker* locker);
- ~AutoYieldFlushLockForMMAPV1Commit();
+private:
+ bool _batchWriter;
+};
- private:
- MMAPV1LockerImpl* const _locker;
- };
+typedef LockerImpl<false> DefaultLockerImpl;
+typedef LockerImpl<true> MMAPV1LockerImpl;
- /**
- * This explains how the MMAP V1 durability system is implemented.
- *
- * Every server operation (OperationContext), must call Locker::lockGlobal as the first lock
- * action (it is illegal to acquire any other locks without calling this first). This action
- * acquires the global and flush locks in the appropriate modes (IS for read operations, IX
- * for write operations). Having the flush lock in one of these modes indicates to the flush
- * thread that there is an active reader or writer.
- *
- * Whenever the flush thread(dur.cpp) activates, it goes through the following steps :
- *
- * Acquire the flush lock in S mode using AutoAcquireFlushLockForMMAPV1Commit. This waits until
- * all current write activity on the system completes and does not allow any new operations to
- * start.
- *
- * Once the S lock is granted, the flush thread writes the journal entries to disk (it is
- * guaranteed that there will not be any modifications) and applies them to the shared view.
- *
- * After that, it upgrades the S lock to X and remaps the private view.
- *
- * NOTE: There should be only one usage of this class and this should be in dur.cpp
- */
- class AutoAcquireFlushLockForMMAPV1Commit {
- public:
- AutoAcquireFlushLockForMMAPV1Commit(Locker* locker);
- ~AutoAcquireFlushLockForMMAPV1Commit();
+/**
+ * At global synchronization points, such as drop database we are running under a global
+ * exclusive lock and without an active write unit of work, doing changes which require global
+ * commit. This utility allows the flush lock to be temporarily dropped so the flush thread
+ * could run in such circumstances. Should not be used where write units of work are used,
+ * because these have different mechanism of yielding the flush lock.
+ */
+class AutoYieldFlushLockForMMAPV1Commit {
+public:
+ AutoYieldFlushLockForMMAPV1Commit(Locker* locker);
+ ~AutoYieldFlushLockForMMAPV1Commit();
- /**
- * We need the exclusive lock in order to do the shared view remap.
- */
- void upgradeFlushLockToExclusive();
+private:
+ MMAPV1LockerImpl* const _locker;
+};
- /**
- * Allows the acquired flush lock to be prematurely released. This is helpful for the case
- * where we know that we won't be doing a remap after gathering the write intents, so the
- * rest can be done outside of flush lock.
- */
- void release();
- private:
- Locker* const _locker;
- bool _released;
- };
+/**
+ * This explains how the MMAP V1 durability system is implemented.
+ *
+ * Every server operation (OperationContext), must call Locker::lockGlobal as the first lock
+ * action (it is illegal to acquire any other locks without calling this first). This action
+ * acquires the global and flush locks in the appropriate modes (IS for read operations, IX
+ * for write operations). Having the flush lock in one of these modes indicates to the flush
+ * thread that there is an active reader or writer.
+ *
+ * Whenever the flush thread(dur.cpp) activates, it goes through the following steps :
+ *
+ * Acquire the flush lock in S mode using AutoAcquireFlushLockForMMAPV1Commit. This waits until
+ * all current write activity on the system completes and does not allow any new operations to
+ * start.
+ *
+ * Once the S lock is granted, the flush thread writes the journal entries to disk (it is
+ * guaranteed that there will not be any modifications) and applies them to the shared view.
+ *
+ * After that, it upgrades the S lock to X and remaps the private view.
+ *
+ * NOTE: There should be only one usage of this class and this should be in dur.cpp
+ */
+class AutoAcquireFlushLockForMMAPV1Commit {
+public:
+ AutoAcquireFlushLockForMMAPV1Commit(Locker* locker);
+ ~AutoAcquireFlushLockForMMAPV1Commit();
+ /**
+ * We need the exclusive lock in order to do the shared view remap.
+ */
+ void upgradeFlushLockToExclusive();
/**
- * Retrieves the global lock manager instance.
+ * Allows the acquired flush lock to be prematurely released. This is helpful for the case
+ * where we know that we won't be doing a remap after gathering the write intents, so the
+ * rest can be done outside of flush lock.
*/
- LockManager* getGlobalLockManager();
+ void release();
+
+private:
+ Locker* const _locker;
+ bool _released;
+};
+
+
+/**
+ * Retrieves the global lock manager instance.
+ */
+LockManager* getGlobalLockManager();
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/concurrency/lock_state_test.cpp b/src/mongo/db/concurrency/lock_state_test.cpp
index 54f305f20df..cb3d87aa1ad 100644
--- a/src/mongo/db/concurrency/lock_state_test.cpp
+++ b/src/mongo/db/concurrency/lock_state_test.cpp
@@ -40,280 +40,278 @@
namespace mongo {
namespace {
- const int NUM_PERF_ITERS = 1000*1000; // numeber of iterations to use for lock perf
+const int NUM_PERF_ITERS = 1000 * 1000; // numeber of iterations to use for lock perf
}
- TEST(LockerImpl, LockNoConflict) {
- const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+TEST(LockerImpl, LockNoConflict) {
+ const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl locker;
- locker.lockGlobal(MODE_IX);
+ MMAPV1LockerImpl locker;
+ locker.lockGlobal(MODE_IX);
- ASSERT(LOCK_OK == locker.lock(resId, MODE_X));
+ ASSERT(LOCK_OK == locker.lock(resId, MODE_X));
- ASSERT(locker.isLockHeldForMode(resId, MODE_X));
- ASSERT(locker.isLockHeldForMode(resId, MODE_S));
+ ASSERT(locker.isLockHeldForMode(resId, MODE_X));
+ ASSERT(locker.isLockHeldForMode(resId, MODE_S));
- ASSERT(locker.unlock(resId));
+ ASSERT(locker.unlock(resId));
- ASSERT(locker.isLockHeldForMode(resId, MODE_NONE));
+ ASSERT(locker.isLockHeldForMode(resId, MODE_NONE));
- locker.unlockAll();
- }
+ locker.unlockAll();
+}
- TEST(LockerImpl, ReLockNoConflict) {
- const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+TEST(LockerImpl, ReLockNoConflict) {
+ const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl locker;
- locker.lockGlobal(MODE_IX);
+ MMAPV1LockerImpl locker;
+ locker.lockGlobal(MODE_IX);
- ASSERT(LOCK_OK == locker.lock(resId, MODE_S));
- ASSERT(LOCK_OK == locker.lock(resId, MODE_X));
+ ASSERT(LOCK_OK == locker.lock(resId, MODE_S));
+ ASSERT(LOCK_OK == locker.lock(resId, MODE_X));
- ASSERT(!locker.unlock(resId));
- ASSERT(locker.isLockHeldForMode(resId, MODE_X));
+ ASSERT(!locker.unlock(resId));
+ ASSERT(locker.isLockHeldForMode(resId, MODE_X));
- ASSERT(locker.unlock(resId));
- ASSERT(locker.isLockHeldForMode(resId, MODE_NONE));
+ ASSERT(locker.unlock(resId));
+ ASSERT(locker.isLockHeldForMode(resId, MODE_NONE));
- ASSERT(locker.unlockAll());
- }
+ ASSERT(locker.unlockAll());
+}
- TEST(LockerImpl, ConflictWithTimeout) {
- const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+TEST(LockerImpl, ConflictWithTimeout) {
+ const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- DefaultLockerImpl locker1;
- ASSERT(LOCK_OK == locker1.lockGlobal(MODE_IX));
- ASSERT(LOCK_OK == locker1.lock(resId, MODE_X));
+ DefaultLockerImpl locker1;
+ ASSERT(LOCK_OK == locker1.lockGlobal(MODE_IX));
+ ASSERT(LOCK_OK == locker1.lock(resId, MODE_X));
- DefaultLockerImpl locker2;
- ASSERT(LOCK_OK == locker2.lockGlobal(MODE_IX));
- ASSERT(LOCK_TIMEOUT == locker2.lock(resId, MODE_S, 0));
+ DefaultLockerImpl locker2;
+ ASSERT(LOCK_OK == locker2.lockGlobal(MODE_IX));
+ ASSERT(LOCK_TIMEOUT == locker2.lock(resId, MODE_S, 0));
- ASSERT(locker2.getLockMode(resId) == MODE_NONE);
+ ASSERT(locker2.getLockMode(resId) == MODE_NONE);
- ASSERT(locker1.unlock(resId));
+ ASSERT(locker1.unlock(resId));
- ASSERT(locker1.unlockAll());
- ASSERT(locker2.unlockAll());
- }
+ ASSERT(locker1.unlockAll());
+ ASSERT(locker2.unlockAll());
+}
- TEST(LockerImpl, ConflictUpgradeWithTimeout) {
- const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+TEST(LockerImpl, ConflictUpgradeWithTimeout) {
+ const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- DefaultLockerImpl locker1;
- ASSERT(LOCK_OK == locker1.lockGlobal(MODE_IS));
- ASSERT(LOCK_OK == locker1.lock(resId, MODE_S));
+ DefaultLockerImpl locker1;
+ ASSERT(LOCK_OK == locker1.lockGlobal(MODE_IS));
+ ASSERT(LOCK_OK == locker1.lock(resId, MODE_S));
- DefaultLockerImpl locker2;
- ASSERT(LOCK_OK == locker2.lockGlobal(MODE_IS));
- ASSERT(LOCK_OK == locker2.lock(resId, MODE_S));
+ DefaultLockerImpl locker2;
+ ASSERT(LOCK_OK == locker2.lockGlobal(MODE_IS));
+ ASSERT(LOCK_OK == locker2.lock(resId, MODE_S));
- // Try upgrading locker 1, which should block and timeout
- ASSERT(LOCK_TIMEOUT == locker1.lock(resId, MODE_X, 1));
+ // Try upgrading locker 1, which should block and timeout
+ ASSERT(LOCK_TIMEOUT == locker1.lock(resId, MODE_X, 1));
- locker1.unlockAll();
- locker2.unlockAll();
- }
+ locker1.unlockAll();
+ locker2.unlockAll();
+}
- TEST(LockerImpl, ReadTransaction) {
- DefaultLockerImpl locker;
+TEST(LockerImpl, ReadTransaction) {
+ DefaultLockerImpl locker;
- locker.lockGlobal(MODE_IS);
- locker.unlockAll();
+ locker.lockGlobal(MODE_IS);
+ locker.unlockAll();
- locker.lockGlobal(MODE_IX);
- locker.unlockAll();
+ locker.lockGlobal(MODE_IX);
+ locker.unlockAll();
- locker.lockGlobal(MODE_IX);
- locker.lockGlobal(MODE_IS);
- locker.unlockAll();
- locker.unlockAll();
- }
+ locker.lockGlobal(MODE_IX);
+ locker.lockGlobal(MODE_IS);
+ locker.unlockAll();
+ locker.unlockAll();
+}
- /**
- * Test that saveMMAPV1LockerImpl works by examining the output.
- */
- TEST(LockerImpl, saveAndRestoreGlobal) {
- Locker::LockSnapshot lockInfo;
+/**
+ * Test that saveMMAPV1LockerImpl works by examining the output.
+ */
+TEST(LockerImpl, saveAndRestoreGlobal) {
+ Locker::LockSnapshot lockInfo;
- DefaultLockerImpl locker;
+ DefaultLockerImpl locker;
- // No lock requests made, no locks held.
- locker.saveLockStateAndUnlock(&lockInfo);
- ASSERT_EQUALS(0U, lockInfo.locks.size());
+ // No lock requests made, no locks held.
+ locker.saveLockStateAndUnlock(&lockInfo);
+ ASSERT_EQUALS(0U, lockInfo.locks.size());
- // Lock the global lock, but just once.
- locker.lockGlobal(MODE_IX);
+ // Lock the global lock, but just once.
+ locker.lockGlobal(MODE_IX);
- // We've locked the global lock. This should be reflected in the lockInfo.
- locker.saveLockStateAndUnlock(&lockInfo);
- ASSERT(!locker.isLocked());
- ASSERT_EQUALS(MODE_IX, lockInfo.globalMode);
+ // We've locked the global lock. This should be reflected in the lockInfo.
+ locker.saveLockStateAndUnlock(&lockInfo);
+ ASSERT(!locker.isLocked());
+ ASSERT_EQUALS(MODE_IX, lockInfo.globalMode);
- // Restore the lock(s) we had.
- locker.restoreLockState(lockInfo);
+ // Restore the lock(s) we had.
+ locker.restoreLockState(lockInfo);
- ASSERT(locker.isLocked());
- ASSERT(locker.unlockAll());
- }
+ ASSERT(locker.isLocked());
+ ASSERT(locker.unlockAll());
+}
- /**
- * Test that we don't unlock when we have the global lock more than once.
- */
- TEST(LockerImpl, saveAndRestoreGlobalAcquiredTwice) {
- Locker::LockSnapshot lockInfo;
+/**
+ * Test that we don't unlock when we have the global lock more than once.
+ */
+TEST(LockerImpl, saveAndRestoreGlobalAcquiredTwice) {
+ Locker::LockSnapshot lockInfo;
- DefaultLockerImpl locker;
+ DefaultLockerImpl locker;
- // No lock requests made, no locks held.
- locker.saveLockStateAndUnlock(&lockInfo);
- ASSERT_EQUALS(0U, lockInfo.locks.size());
+ // No lock requests made, no locks held.
+ locker.saveLockStateAndUnlock(&lockInfo);
+ ASSERT_EQUALS(0U, lockInfo.locks.size());
- // Lock the global lock.
- locker.lockGlobal(MODE_IX);
- locker.lockGlobal(MODE_IX);
+ // Lock the global lock.
+ locker.lockGlobal(MODE_IX);
+ locker.lockGlobal(MODE_IX);
- // This shouldn't actually unlock as we're in a nested scope.
- ASSERT(!locker.saveLockStateAndUnlock(&lockInfo));
+ // This shouldn't actually unlock as we're in a nested scope.
+ ASSERT(!locker.saveLockStateAndUnlock(&lockInfo));
- ASSERT(locker.isLocked());
+ ASSERT(locker.isLocked());
- // We must unlockAll twice.
- ASSERT(!locker.unlockAll());
- ASSERT(locker.unlockAll());
- }
+ // We must unlockAll twice.
+ ASSERT(!locker.unlockAll());
+ ASSERT(locker.unlockAll());
+}
- /**
- * Tests that restoreMMAPV1LockerImpl works by locking a db and collection and saving + restoring.
- */
- TEST(LockerImpl, saveAndRestoreDBAndCollection) {
- Locker::LockSnapshot lockInfo;
+/**
+ * Tests that restoreMMAPV1LockerImpl works by locking a db and collection and saving + restoring.
+ */
+TEST(LockerImpl, saveAndRestoreDBAndCollection) {
+ Locker::LockSnapshot lockInfo;
- DefaultLockerImpl locker;
+ DefaultLockerImpl locker;
- const ResourceId resIdDatabase(RESOURCE_DATABASE, std::string("TestDB"));
- const ResourceId resIdCollection(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+ const ResourceId resIdDatabase(RESOURCE_DATABASE, std::string("TestDB"));
+ const ResourceId resIdCollection(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- // Lock some stuff.
- locker.lockGlobal(MODE_IX);
- ASSERT_EQUALS(LOCK_OK, locker.lock(resIdDatabase, MODE_IX));
- ASSERT_EQUALS(LOCK_OK, locker.lock(resIdCollection, MODE_X));
- locker.saveLockStateAndUnlock(&lockInfo);
+ // Lock some stuff.
+ locker.lockGlobal(MODE_IX);
+ ASSERT_EQUALS(LOCK_OK, locker.lock(resIdDatabase, MODE_IX));
+ ASSERT_EQUALS(LOCK_OK, locker.lock(resIdCollection, MODE_X));
+ locker.saveLockStateAndUnlock(&lockInfo);
- // Things shouldn't be locked anymore.
- ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resIdDatabase));
- ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resIdCollection));
+ // Things shouldn't be locked anymore.
+ ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resIdDatabase));
+ ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resIdCollection));
- // Restore lock state.
- locker.restoreLockState(lockInfo);
+ // Restore lock state.
+ locker.restoreLockState(lockInfo);
- // Make sure things were re-locked.
- ASSERT_EQUALS(MODE_IX, locker.getLockMode(resIdDatabase));
- ASSERT_EQUALS(MODE_X, locker.getLockMode(resIdCollection));
+ // Make sure things were re-locked.
+ ASSERT_EQUALS(MODE_IX, locker.getLockMode(resIdDatabase));
+ ASSERT_EQUALS(MODE_X, locker.getLockMode(resIdCollection));
- ASSERT(locker.unlockAll());
- }
+ ASSERT(locker.unlockAll());
+}
- TEST(LockerImpl, DefaultLocker) {
- const ResourceId resId(RESOURCE_DATABASE, std::string("TestDB"));
+TEST(LockerImpl, DefaultLocker) {
+ const ResourceId resId(RESOURCE_DATABASE, std::string("TestDB"));
- DefaultLockerImpl locker;
- ASSERT_EQUALS(LOCK_OK, locker.lockGlobal(MODE_IX));
- ASSERT_EQUALS(LOCK_OK, locker.lock(resId, MODE_X));
-
- // Make sure the flush lock IS NOT held
- Locker::LockerInfo info;
- locker.getLockerInfo(&info);
- ASSERT(!info.waitingResource.isValid());
- ASSERT_EQUALS(2U, info.locks.size());
- ASSERT_EQUALS(RESOURCE_GLOBAL, info.locks[0].resourceId.getType());
- ASSERT_EQUALS(resId, info.locks[1].resourceId);
-
- ASSERT(locker.unlockAll());
- }
+ DefaultLockerImpl locker;
+ ASSERT_EQUALS(LOCK_OK, locker.lockGlobal(MODE_IX));
+ ASSERT_EQUALS(LOCK_OK, locker.lock(resId, MODE_X));
- TEST(LockerImpl, MMAPV1Locker) {
- const ResourceId resId(RESOURCE_DATABASE, std::string("TestDB"));
+ // Make sure the flush lock IS NOT held
+ Locker::LockerInfo info;
+ locker.getLockerInfo(&info);
+ ASSERT(!info.waitingResource.isValid());
+ ASSERT_EQUALS(2U, info.locks.size());
+ ASSERT_EQUALS(RESOURCE_GLOBAL, info.locks[0].resourceId.getType());
+ ASSERT_EQUALS(resId, info.locks[1].resourceId);
- MMAPV1LockerImpl locker;
- ASSERT_EQUALS(LOCK_OK, locker.lockGlobal(MODE_IX));
- ASSERT_EQUALS(LOCK_OK, locker.lock(resId, MODE_X));
+ ASSERT(locker.unlockAll());
+}
- // Make sure the flush lock IS held
- Locker::LockerInfo info;
- locker.getLockerInfo(&info);
- ASSERT(!info.waitingResource.isValid());
- ASSERT_EQUALS(3U, info.locks.size());
- ASSERT_EQUALS(RESOURCE_GLOBAL, info.locks[0].resourceId.getType());
- ASSERT_EQUALS(RESOURCE_MMAPV1_FLUSH, info.locks[1].resourceId.getType());
- ASSERT_EQUALS(resId, info.locks[2].resourceId);
+TEST(LockerImpl, MMAPV1Locker) {
+ const ResourceId resId(RESOURCE_DATABASE, std::string("TestDB"));
- ASSERT(locker.unlockAll());
- }
+ MMAPV1LockerImpl locker;
+ ASSERT_EQUALS(LOCK_OK, locker.lockGlobal(MODE_IX));
+ ASSERT_EQUALS(LOCK_OK, locker.lock(resId, MODE_X));
+ // Make sure the flush lock IS held
+ Locker::LockerInfo info;
+ locker.getLockerInfo(&info);
+ ASSERT(!info.waitingResource.isValid());
+ ASSERT_EQUALS(3U, info.locks.size());
+ ASSERT_EQUALS(RESOURCE_GLOBAL, info.locks[0].resourceId.getType());
+ ASSERT_EQUALS(RESOURCE_MMAPV1_FLUSH, info.locks[1].resourceId.getType());
+ ASSERT_EQUALS(resId, info.locks[2].resourceId);
- // These two tests exercise single-threaded performance of uncontended lock acquisition. It
- // is not practical to run them on debug builds.
+ ASSERT(locker.unlockAll());
+}
+
+
+// These two tests exercise single-threaded performance of uncontended lock acquisition. It
+// is not practical to run them on debug builds.
#ifndef MONGO_CONFIG_DEBUG_BUILD
- TEST(Locker, PerformanceBoostSharedMutex) {
- for (int numLockers = 1; numLockers <= 64; numLockers = numLockers * 2) {
- stdx::mutex mtx;
-
- // Do some warm-up loops
- for (int i = 0; i < 1000; i++) {
- mtx.lock();
- mtx.unlock();
- }
-
- // Measure the number of loops
- //
- Timer t;
-
- for (int i = 0; i < NUM_PERF_ITERS; i++) {
- mtx.lock();
- mtx.unlock();
- }
-
- log() << numLockers
- << " locks took: "
- << static_cast<double>(t.micros()) * 1000.0 / static_cast<double>(NUM_PERF_ITERS)
- << " ns";
+TEST(Locker, PerformanceBoostSharedMutex) {
+ for (int numLockers = 1; numLockers <= 64; numLockers = numLockers * 2) {
+ stdx::mutex mtx;
+
+ // Do some warm-up loops
+ for (int i = 0; i < 1000; i++) {
+ mtx.lock();
+ mtx.unlock();
+ }
+
+ // Measure the number of loops
+ //
+ Timer t;
+
+ for (int i = 0; i < NUM_PERF_ITERS; i++) {
+ mtx.lock();
+ mtx.unlock();
}
+
+ log() << numLockers << " locks took: "
+ << static_cast<double>(t.micros()) * 1000.0 / static_cast<double>(NUM_PERF_ITERS)
+ << " ns";
}
+}
+
+TEST(Locker, PerformanceLocker) {
+ for (int numLockers = 1; numLockers <= 64; numLockers = numLockers * 2) {
+ std::vector<std::shared_ptr<LockerForTests>> lockers(numLockers);
+ for (int i = 0; i < numLockers; i++) {
+ lockers[i].reset(new LockerForTests(MODE_S));
+ }
+
+ DefaultLockerImpl locker;
+
+ // Do some warm-up loops
+ for (int i = 0; i < 1000; i++) {
+ locker.lockGlobal(MODE_IS);
+ locker.unlockAll();
+ }
- TEST(Locker, PerformanceLocker) {
- for (int numLockers = 1; numLockers <= 64; numLockers = numLockers * 2) {
- std::vector<std::shared_ptr<LockerForTests> > lockers(numLockers);
- for (int i = 0; i < numLockers; i++) {
- lockers[i].reset(new LockerForTests(MODE_S));
- }
-
- DefaultLockerImpl locker;
-
- // Do some warm-up loops
- for (int i = 0; i < 1000; i++) {
- locker.lockGlobal(MODE_IS);
- locker.unlockAll();
- }
-
- // Measure the number of loops
- Timer t;
-
- for (int i = 0; i < NUM_PERF_ITERS; i++) {
- locker.lockGlobal(MODE_IS);
- locker.unlockAll();
- }
-
- log() << numLockers
- << " locks took: "
- << static_cast<double>(t.micros()) * 1000.0 / static_cast<double>(NUM_PERF_ITERS)
- << " ns";
+ // Measure the number of loops
+ Timer t;
+
+ for (int i = 0; i < NUM_PERF_ITERS; i++) {
+ locker.lockGlobal(MODE_IS);
+ locker.unlockAll();
}
+
+ log() << numLockers << " locks took: "
+ << static_cast<double>(t.micros()) * 1000.0 / static_cast<double>(NUM_PERF_ITERS)
+ << " ns";
}
+}
#endif // MONGO_CONFIG_DEBUG_BUILD
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/concurrency/lock_stats.cpp b/src/mongo/db/concurrency/lock_stats.cpp
index c115244f369..809799299a7 100644
--- a/src/mongo/db/concurrency/lock_stats.cpp
+++ b/src/mongo/db/concurrency/lock_stats.cpp
@@ -34,125 +34,121 @@
namespace mongo {
- template<typename CounterType>
- LockStats<CounterType>::LockStats() {
- reset();
+template <typename CounterType>
+LockStats<CounterType>::LockStats() {
+ reset();
+}
+
+template <typename CounterType>
+void LockStats<CounterType>::report(BSONObjBuilder* builder) const {
+ // All indexing below starts from offset 1, because we do not want to report/account
+ // position 0, which is a sentinel value for invalid resource/no lock.
+ for (int i = 1; i < ResourceTypesCount; i++) {
+ _report(builder, resourceTypeName(static_cast<ResourceType>(i)), _stats[i]);
}
- template<typename CounterType>
- void LockStats<CounterType>::report(BSONObjBuilder* builder) const {
- // All indexing below starts from offset 1, because we do not want to report/account
- // position 0, which is a sentinel value for invalid resource/no lock.
- for (int i = 1; i < ResourceTypesCount; i++) {
- _report(builder, resourceTypeName(static_cast<ResourceType>(i)), _stats[i]);
- }
-
- _report(builder, "oplog", _oplogStats);
- }
-
- template<typename CounterType>
- void LockStats<CounterType>::_report(BSONObjBuilder* builder,
- const char* sectionName,
- const PerModeLockStatCounters& stat) const {
-
- std::unique_ptr<BSONObjBuilder> section;
-
- // All indexing below starts from offset 1, because we do not want to report/account
- // position 0, which is a sentinel value for invalid resource/no lock.
-
- // Num acquires
- {
- std::unique_ptr<BSONObjBuilder> numAcquires;
- for (int mode = 1; mode < LockModesCount; mode++) {
- const long long value = CounterOps::get(stat.modeStats[mode].numAcquisitions);
- if (value > 0) {
- if (!numAcquires) {
- if (!section) {
- section.reset(new BSONObjBuilder(builder->subobjStart(sectionName)));
- }
-
- numAcquires.reset(
- new BSONObjBuilder(section->subobjStart("acquireCount")));
+ _report(builder, "oplog", _oplogStats);
+}
+
+template <typename CounterType>
+void LockStats<CounterType>::_report(BSONObjBuilder* builder,
+ const char* sectionName,
+ const PerModeLockStatCounters& stat) const {
+ std::unique_ptr<BSONObjBuilder> section;
+
+ // All indexing below starts from offset 1, because we do not want to report/account
+ // position 0, which is a sentinel value for invalid resource/no lock.
+
+ // Num acquires
+ {
+ std::unique_ptr<BSONObjBuilder> numAcquires;
+ for (int mode = 1; mode < LockModesCount; mode++) {
+ const long long value = CounterOps::get(stat.modeStats[mode].numAcquisitions);
+ if (value > 0) {
+ if (!numAcquires) {
+ if (!section) {
+ section.reset(new BSONObjBuilder(builder->subobjStart(sectionName)));
}
- numAcquires->append(legacyModeName(static_cast<LockMode>(mode)), value);
+
+ numAcquires.reset(new BSONObjBuilder(section->subobjStart("acquireCount")));
}
+ numAcquires->append(legacyModeName(static_cast<LockMode>(mode)), value);
}
}
+ }
- // Num waits
- {
- std::unique_ptr<BSONObjBuilder> numWaits;
- for (int mode = 1; mode < LockModesCount; mode++) {
- const long long value = CounterOps::get(stat.modeStats[mode].numWaits);
- if (value > 0) {
- if (!numWaits) {
- if (!section) {
- section.reset(new BSONObjBuilder(builder->subobjStart(sectionName)));
- }
-
- numWaits.reset(
- new BSONObjBuilder(section->subobjStart("acquireWaitCount")));
+ // Num waits
+ {
+ std::unique_ptr<BSONObjBuilder> numWaits;
+ for (int mode = 1; mode < LockModesCount; mode++) {
+ const long long value = CounterOps::get(stat.modeStats[mode].numWaits);
+ if (value > 0) {
+ if (!numWaits) {
+ if (!section) {
+ section.reset(new BSONObjBuilder(builder->subobjStart(sectionName)));
}
- numWaits->append(legacyModeName(static_cast<LockMode>(mode)), value);
+
+ numWaits.reset(new BSONObjBuilder(section->subobjStart("acquireWaitCount")));
}
+ numWaits->append(legacyModeName(static_cast<LockMode>(mode)), value);
}
}
+ }
- // Total time waiting
- {
- std::unique_ptr<BSONObjBuilder> timeAcquiring;
- for (int mode = 1; mode < LockModesCount; mode++) {
- const long long value = CounterOps::get(stat.modeStats[mode].combinedWaitTimeMicros);
- if (value > 0) {
- if (!timeAcquiring) {
- if (!section) {
- section.reset(new BSONObjBuilder(builder->subobjStart(sectionName)));
- }
-
- timeAcquiring.reset(
- new BSONObjBuilder(section->subobjStart("timeAcquiringMicros")));
+ // Total time waiting
+ {
+ std::unique_ptr<BSONObjBuilder> timeAcquiring;
+ for (int mode = 1; mode < LockModesCount; mode++) {
+ const long long value = CounterOps::get(stat.modeStats[mode].combinedWaitTimeMicros);
+ if (value > 0) {
+ if (!timeAcquiring) {
+ if (!section) {
+ section.reset(new BSONObjBuilder(builder->subobjStart(sectionName)));
}
- timeAcquiring->append(legacyModeName(static_cast<LockMode>(mode)), value);
+
+ timeAcquiring.reset(
+ new BSONObjBuilder(section->subobjStart("timeAcquiringMicros")));
}
+ timeAcquiring->append(legacyModeName(static_cast<LockMode>(mode)), value);
}
}
+ }
- // Deadlocks
- {
- std::unique_ptr<BSONObjBuilder> deadlockCount;
- for (int mode = 1; mode < LockModesCount; mode++) {
- const long long value = CounterOps::get(stat.modeStats[mode].numDeadlocks);
- if (value > 0) {
- if (!deadlockCount) {
- if (!section) {
- section.reset(new BSONObjBuilder(builder->subobjStart(sectionName)));
- }
-
- deadlockCount.reset(
- new BSONObjBuilder(section->subobjStart("deadlockCount")));
+ // Deadlocks
+ {
+ std::unique_ptr<BSONObjBuilder> deadlockCount;
+ for (int mode = 1; mode < LockModesCount; mode++) {
+ const long long value = CounterOps::get(stat.modeStats[mode].numDeadlocks);
+ if (value > 0) {
+ if (!deadlockCount) {
+ if (!section) {
+ section.reset(new BSONObjBuilder(builder->subobjStart(sectionName)));
}
- deadlockCount->append(legacyModeName(static_cast<LockMode>(mode)), value);
+
+ deadlockCount.reset(new BSONObjBuilder(section->subobjStart("deadlockCount")));
}
+ deadlockCount->append(legacyModeName(static_cast<LockMode>(mode)), value);
}
}
}
+}
- template<typename CounterType>
- void LockStats<CounterType>::reset() {
- for (int i = 0; i < ResourceTypesCount; i++) {
- for (int mode = 0; mode < LockModesCount; mode++) {
- _stats[i].modeStats[mode].reset();
- }
- }
-
+template <typename CounterType>
+void LockStats<CounterType>::reset() {
+ for (int i = 0; i < ResourceTypesCount; i++) {
for (int mode = 0; mode < LockModesCount; mode++) {
- _oplogStats.modeStats[mode].reset();
+ _stats[i].modeStats[mode].reset();
}
}
+ for (int mode = 0; mode < LockModesCount; mode++) {
+ _oplogStats.modeStats[mode].reset();
+ }
+}
+
- // Ensures that there are instances compiled for LockStats for AtomicInt64 and int64_t
- template class LockStats<int64_t>;
- template class LockStats<AtomicInt64>;
+// Ensures that there are instances compiled for LockStats for AtomicInt64 and int64_t
+template class LockStats<int64_t>;
+template class LockStats<AtomicInt64>;
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/concurrency/lock_stats.h b/src/mongo/db/concurrency/lock_stats.h
index 2994fb99e3e..86f4a26d273 100644
--- a/src/mongo/db/concurrency/lock_stats.h
+++ b/src/mongo/db/concurrency/lock_stats.h
@@ -33,172 +33,171 @@
namespace mongo {
- class BSONObjBuilder;
+class BSONObjBuilder;
- /**
- * Operations for manipulating the lock statistics abstracting whether they are atomic or not.
- */
- struct CounterOps {
- static int64_t get(const int64_t& counter) {
- return counter;
- }
+/**
+ * Operations for manipulating the lock statistics abstracting whether they are atomic or not.
+ */
+struct CounterOps {
+ static int64_t get(const int64_t& counter) {
+ return counter;
+ }
- static int64_t get(const AtomicInt64& counter) {
- return counter.load();
- }
+ static int64_t get(const AtomicInt64& counter) {
+ return counter.load();
+ }
- static void set(int64_t& counter, int64_t value) {
- counter = value;
- }
+ static void set(int64_t& counter, int64_t value) {
+ counter = value;
+ }
- static void set(AtomicInt64& counter, int64_t value) {
- counter.store(value);
- }
+ static void set(AtomicInt64& counter, int64_t value) {
+ counter.store(value);
+ }
- static void add(int64_t& counter, int64_t value) {
- counter += value;
- }
+ static void add(int64_t& counter, int64_t value) {
+ counter += value;
+ }
- static void add(int64_t& counter, const AtomicInt64& value) {
- counter += value.load();
- }
+ static void add(int64_t& counter, const AtomicInt64& value) {
+ counter += value.load();
+ }
- static void add(AtomicInt64& counter, int64_t value) {
- counter.addAndFetch(value);
- }
- };
-
-
- /**
- * Bundle of locking statistics values.
- */
- template<typename CounterType>
- struct LockStatCounters {
-
- template<typename OtherType>
- void append(const LockStatCounters<OtherType>& other) {
- CounterOps::add(numAcquisitions, other.numAcquisitions);
- CounterOps::add(numWaits, other.numWaits);
- CounterOps::add(combinedWaitTimeMicros, other.combinedWaitTimeMicros);
- CounterOps::add(numDeadlocks, other.numDeadlocks);
- }
+ static void add(AtomicInt64& counter, int64_t value) {
+ counter.addAndFetch(value);
+ }
+};
- void reset() {
- CounterOps::set(numAcquisitions, 0);
- CounterOps::set(numWaits, 0);
- CounterOps::set(combinedWaitTimeMicros, 0);
- CounterOps::set(numDeadlocks, 0);
- }
+/**
+ * Bundle of locking statistics values.
+ */
+template <typename CounterType>
+struct LockStatCounters {
+ template <typename OtherType>
+ void append(const LockStatCounters<OtherType>& other) {
+ CounterOps::add(numAcquisitions, other.numAcquisitions);
+ CounterOps::add(numWaits, other.numWaits);
+ CounterOps::add(combinedWaitTimeMicros, other.combinedWaitTimeMicros);
+ CounterOps::add(numDeadlocks, other.numDeadlocks);
+ }
+
+ void reset() {
+ CounterOps::set(numAcquisitions, 0);
+ CounterOps::set(numWaits, 0);
+ CounterOps::set(combinedWaitTimeMicros, 0);
+ CounterOps::set(numDeadlocks, 0);
+ }
+
+
+ CounterType numAcquisitions;
+ CounterType numWaits;
+ CounterType combinedWaitTimeMicros;
+ CounterType numDeadlocks;
+};
- CounterType numAcquisitions;
- CounterType numWaits;
- CounterType combinedWaitTimeMicros;
- CounterType numDeadlocks;
- };
+/**
+ * Templatized lock statistics management class, which can be specialized with atomic integers
+ * for the global stats and with regular integers for the per-locker stats.
+ */
+template <typename CounterType>
+class LockStats {
+public:
+ // Declare the type for the lock counters bundle
+ typedef LockStatCounters<CounterType> LockStatCountersType;
/**
- * Templatized lock statistics management class, which can be specialized with atomic integers
- * for the global stats and with regular integers for the per-locker stats.
+ * Initializes the locking statistics with zeroes (calls reset).
*/
- template<typename CounterType>
- class LockStats {
- public:
- // Declare the type for the lock counters bundle
- typedef LockStatCounters<CounterType> LockStatCountersType;
-
- /**
- * Initializes the locking statistics with zeroes (calls reset).
- */
- LockStats();
-
- void recordAcquisition(ResourceId resId, LockMode mode) {
- CounterOps::add(get(resId, mode).numAcquisitions, 1);
- }
+ LockStats();
- void recordWait(ResourceId resId, LockMode mode) {
- CounterOps::add(get(resId, mode).numWaits, 1);
- }
+ void recordAcquisition(ResourceId resId, LockMode mode) {
+ CounterOps::add(get(resId, mode).numAcquisitions, 1);
+ }
- void recordWaitTime(ResourceId resId, LockMode mode, int64_t waitMicros) {
- CounterOps::add(get(resId, mode).combinedWaitTimeMicros, waitMicros);
- }
+ void recordWait(ResourceId resId, LockMode mode) {
+ CounterOps::add(get(resId, mode).numWaits, 1);
+ }
- void recordDeadlock(ResourceId resId, LockMode mode) {
- CounterOps::add(get(resId, mode).numDeadlocks, 1);
- }
+ void recordWaitTime(ResourceId resId, LockMode mode, int64_t waitMicros) {
+ CounterOps::add(get(resId, mode).combinedWaitTimeMicros, waitMicros);
+ }
- LockStatCountersType& get(ResourceId resId, LockMode mode) {
- if (resId == resourceIdOplog) {
- return _oplogStats.modeStats[mode];
- }
+ void recordDeadlock(ResourceId resId, LockMode mode) {
+ CounterOps::add(get(resId, mode).numDeadlocks, 1);
+ }
- return _stats[resId.getType()].modeStats[mode];
+ LockStatCountersType& get(ResourceId resId, LockMode mode) {
+ if (resId == resourceIdOplog) {
+ return _oplogStats.modeStats[mode];
}
- template<typename OtherType>
- void append(const LockStats<OtherType>& other) {
- typedef LockStatCounters<OtherType> OtherLockStatCountersType;
-
- // Append all lock stats
- for (int i = 0; i < ResourceTypesCount; i++) {
- for (int mode = 0; mode < LockModesCount; mode++) {
- const OtherLockStatCountersType& otherStats = other._stats[i].modeStats[mode];
- LockStatCountersType& thisStats = _stats[i].modeStats[mode];
- thisStats.append(otherStats);
- }
- }
+ return _stats[resId.getType()].modeStats[mode];
+ }
+
+ template <typename OtherType>
+ void append(const LockStats<OtherType>& other) {
+ typedef LockStatCounters<OtherType> OtherLockStatCountersType;
- // Append the oplog stats
+ // Append all lock stats
+ for (int i = 0; i < ResourceTypesCount; i++) {
for (int mode = 0; mode < LockModesCount; mode++) {
- const OtherLockStatCountersType& otherStats = other._oplogStats.modeStats[mode];
- LockStatCountersType& thisStats = _oplogStats.modeStats[mode];
+ const OtherLockStatCountersType& otherStats = other._stats[i].modeStats[mode];
+ LockStatCountersType& thisStats = _stats[i].modeStats[mode];
thisStats.append(otherStats);
}
}
- void report(BSONObjBuilder* builder) const;
- void reset();
+ // Append the oplog stats
+ for (int mode = 0; mode < LockModesCount; mode++) {
+ const OtherLockStatCountersType& otherStats = other._oplogStats.modeStats[mode];
+ LockStatCountersType& thisStats = _oplogStats.modeStats[mode];
+ thisStats.append(otherStats);
+ }
+ }
- private:
- // Necessary for the append call, which accepts argument of type different than our
- // template parameter.
- template<typename T>
- friend class LockStats;
+ void report(BSONObjBuilder* builder) const;
+ void reset();
+private:
+ // Necessary for the append call, which accepts argument of type different than our
+ // template parameter.
+ template <typename T>
+ friend class LockStats;
- // Keep the per-mode lock stats next to each other in case we want to do fancy operations
- // such as atomic operations on 128-bit values.
- struct PerModeLockStatCounters {
- LockStatCountersType modeStats[LockModesCount];
- };
+ // Keep the per-mode lock stats next to each other in case we want to do fancy operations
+ // such as atomic operations on 128-bit values.
+ struct PerModeLockStatCounters {
+ LockStatCountersType modeStats[LockModesCount];
+ };
- void _report(BSONObjBuilder* builder,
- const char* sectionName,
- const PerModeLockStatCounters& stat) const;
+ void _report(BSONObjBuilder* builder,
+ const char* sectionName,
+ const PerModeLockStatCounters& stat) const;
- // Split the lock stats per resource type and special-case the oplog so we can collect
- // more detailed stats for it.
- PerModeLockStatCounters _stats[ResourceTypesCount];
- PerModeLockStatCounters _oplogStats;
- };
- typedef LockStats<int64_t> SingleThreadedLockStats;
- typedef LockStats<AtomicInt64> AtomicLockStats;
+ // Split the lock stats per resource type and special-case the oplog so we can collect
+ // more detailed stats for it.
+ PerModeLockStatCounters _stats[ResourceTypesCount];
+ PerModeLockStatCounters _oplogStats;
+};
+typedef LockStats<int64_t> SingleThreadedLockStats;
+typedef LockStats<AtomicInt64> AtomicLockStats;
- /**
- * Reports instance-wide locking statistics, which can then be converted to BSON or logged.
- */
- void reportGlobalLockingStats(SingleThreadedLockStats* outStats);
- /**
- * Currently used for testing only.
- */
- void resetGlobalLockStats();
+/**
+ * Reports instance-wide locking statistics, which can then be converted to BSON or logged.
+ */
+void reportGlobalLockingStats(SingleThreadedLockStats* outStats);
+
+/**
+ * Currently used for testing only.
+ */
+void resetGlobalLockStats();
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/concurrency/lock_stats_test.cpp b/src/mongo/db/concurrency/lock_stats_test.cpp
index 9b148e606bd..8ae6eb3c010 100644
--- a/src/mongo/db/concurrency/lock_stats_test.cpp
+++ b/src/mongo/db/concurrency/lock_stats_test.cpp
@@ -34,69 +34,69 @@
namespace mongo {
- TEST(LockStats, NoWait) {
- const ResourceId resId(RESOURCE_COLLECTION, std::string("LockStats.NoWait"));
+TEST(LockStats, NoWait) {
+ const ResourceId resId(RESOURCE_COLLECTION, std::string("LockStats.NoWait"));
- resetGlobalLockStats();
+ resetGlobalLockStats();
- LockerForTests locker(MODE_IX);
- locker.lock(resId, MODE_X);
- locker.unlock(resId);
+ LockerForTests locker(MODE_IX);
+ locker.lock(resId, MODE_X);
+ locker.unlock(resId);
- // Make sure that the waits/blocks are zero
- SingleThreadedLockStats stats;
- reportGlobalLockingStats(&stats);
+ // Make sure that the waits/blocks are zero
+ SingleThreadedLockStats stats;
+ reportGlobalLockingStats(&stats);
- ASSERT_EQUALS(1, stats.get(resId, MODE_X).numAcquisitions);
- ASSERT_EQUALS(0, stats.get(resId, MODE_X).numWaits);
- ASSERT_EQUALS(0, stats.get(resId, MODE_X).combinedWaitTimeMicros);
- }
+ ASSERT_EQUALS(1, stats.get(resId, MODE_X).numAcquisitions);
+ ASSERT_EQUALS(0, stats.get(resId, MODE_X).numWaits);
+ ASSERT_EQUALS(0, stats.get(resId, MODE_X).combinedWaitTimeMicros);
+}
- TEST(LockStats, Wait) {
- const ResourceId resId(RESOURCE_COLLECTION, std::string("LockStats.Wait"));
+TEST(LockStats, Wait) {
+ const ResourceId resId(RESOURCE_COLLECTION, std::string("LockStats.Wait"));
- resetGlobalLockStats();
+ resetGlobalLockStats();
- LockerForTests locker(MODE_IX);
- locker.lock(resId, MODE_X);
+ LockerForTests locker(MODE_IX);
+ locker.lock(resId, MODE_X);
- {
- // This will block
- LockerForTests lockerConflict(MODE_IX);
- ASSERT_EQUALS(LOCK_WAITING, lockerConflict.lockBegin(resId, MODE_S));
+ {
+ // This will block
+ LockerForTests lockerConflict(MODE_IX);
+ ASSERT_EQUALS(LOCK_WAITING, lockerConflict.lockBegin(resId, MODE_S));
- // Sleep 1 millisecond so the wait time passes
- ASSERT_EQUALS(LOCK_TIMEOUT, lockerConflict.lockComplete(resId, MODE_S, 1, false));
- }
+ // Sleep 1 millisecond so the wait time passes
+ ASSERT_EQUALS(LOCK_TIMEOUT, lockerConflict.lockComplete(resId, MODE_S, 1, false));
+ }
- // Make sure that the waits/blocks are non-zero
- SingleThreadedLockStats stats;
- reportGlobalLockingStats(&stats);
+ // Make sure that the waits/blocks are non-zero
+ SingleThreadedLockStats stats;
+ reportGlobalLockingStats(&stats);
- ASSERT_EQUALS(1, stats.get(resId, MODE_X).numAcquisitions);
- ASSERT_EQUALS(0, stats.get(resId, MODE_X).numWaits);
- ASSERT_EQUALS(0, stats.get(resId, MODE_X).combinedWaitTimeMicros);
+ ASSERT_EQUALS(1, stats.get(resId, MODE_X).numAcquisitions);
+ ASSERT_EQUALS(0, stats.get(resId, MODE_X).numWaits);
+ ASSERT_EQUALS(0, stats.get(resId, MODE_X).combinedWaitTimeMicros);
- ASSERT_EQUALS(1, stats.get(resId, MODE_S).numAcquisitions);
- ASSERT_EQUALS(1, stats.get(resId, MODE_S).numWaits);
- ASSERT_GREATER_THAN(stats.get(resId, MODE_S).combinedWaitTimeMicros, 0);
- }
+ ASSERT_EQUALS(1, stats.get(resId, MODE_S).numAcquisitions);
+ ASSERT_EQUALS(1, stats.get(resId, MODE_S).numWaits);
+ ASSERT_GREATER_THAN(stats.get(resId, MODE_S).combinedWaitTimeMicros, 0);
+}
- TEST(LockStats, Reporting) {
- const ResourceId resId(RESOURCE_COLLECTION, std::string("LockStats.Reporting"));
+TEST(LockStats, Reporting) {
+ const ResourceId resId(RESOURCE_COLLECTION, std::string("LockStats.Reporting"));
- resetGlobalLockStats();
+ resetGlobalLockStats();
- LockerForTests locker(MODE_IX);
- locker.lock(resId, MODE_X);
- locker.unlock(resId);
+ LockerForTests locker(MODE_IX);
+ locker.lock(resId, MODE_X);
+ locker.unlock(resId);
- // Make sure that the waits/blocks are zero
- SingleThreadedLockStats stats;
- reportGlobalLockingStats(&stats);
+ // Make sure that the waits/blocks are zero
+ SingleThreadedLockStats stats;
+ reportGlobalLockingStats(&stats);
- BSONObjBuilder builder;
- stats.report(&builder);
- }
+ BSONObjBuilder builder;
+ stats.report(&builder);
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/concurrency/locker.h b/src/mongo/db/concurrency/locker.h
index 304b92bea9c..7c19f421e5c 100644
--- a/src/mongo/db/concurrency/locker.h
+++ b/src/mongo/db/concurrency/locker.h
@@ -28,269 +28,270 @@
#pragma once
-#include <climits> // For UINT_MAX
+#include <climits> // For UINT_MAX
#include <vector>
#include "mongo/db/concurrency/lock_manager.h"
#include "mongo/db/concurrency/lock_stats.h"
namespace mongo {
-
+
+/**
+ * Interface for acquiring locks. One of those objects will have to be instantiated for each
+ * request (transaction).
+ *
+ * Lock/unlock methods must always be called from a single thread.
+ */
+class Locker {
+ MONGO_DISALLOW_COPYING(Locker);
+
+public:
+ virtual ~Locker() {}
+
+ virtual LockerId getId() const = 0;
+
+ /**
+ * This should be the first method invoked for a particular Locker object. It acquires the
+ * Global lock in the specified mode and effectively indicates the mode of the operation.
+ * This is what the lock modes on the global lock mean:
+ *
+ * IX - Regular write operation
+ * IS - Regular read operation
+ * S - Stops all *write* activity. Used for administrative operations (repl, etc).
+ * X - Stops all activity. Used for administrative operations (repl state changes,
+ * shutdown, etc).
+ *
+ * This method can be called recursively, but each call to lockGlobal must be accompanied
+ * by a call to unlockAll.
+ *
+ * @param mode Mode in which the global lock should be acquired. Also indicates the intent
+ * of the operation.
+ * @param timeoutMs How long to wait for the global lock (and the flush lock, for the MMAP
+ * V1 engine) to be acquired.
+ *
+ * @return LOCK_OK, if the global lock (and the flush lock, for the MMAP V1 engine) were
+ * acquired within the specified time bound. Otherwise, the respective failure
+ * code and neither lock will be acquired.
+ */
+ virtual LockResult lockGlobal(LockMode mode, unsigned timeoutMs = UINT_MAX) = 0;
+
+ /**
+ * Requests the global lock to be acquired in the specified mode.
+ *
+ * See the comments for lockBegin/Complete for more information on the semantics.
+ */
+ virtual LockResult lockGlobalBegin(LockMode mode) = 0;
+ virtual LockResult lockGlobalComplete(unsigned timeoutMs) = 0;
+
+ /**
+ * This method is used only in the MMAP V1 storage engine, otherwise it is a no-op. See the
+ * comments in the implementation for more details on how MMAP V1 journaling works.
+ */
+ virtual void lockMMAPV1Flush() = 0;
+
+ /**
+ * Decrements the reference count on the global lock. If the reference count on the
+ * global lock hits zero, the transaction is over, and unlockAll unlocks all other locks.
+ *
+ * @return true if this is the last endTransaction call (i.e., the global lock was
+ * released); false if there are still references on the global lock. This value
+ * should not be relied on and is only used for assertion purposes.
+ *
+ * @return false if the global lock is still held.
+ */
+ virtual bool unlockAll() = 0;
+
+ /**
+ * This is only necessary for the MMAP V1 engine and in particular, the fsyncLock command
+ * which needs to first acquire the global lock in X-mode for truncating the journal and
+ * then downgrade to S before it blocks.
+ *
+ * The downgrade is necessary in order to be nice and not block readers while under
+ * fsyncLock.
+ */
+ virtual void downgradeGlobalXtoSForMMAPV1() = 0;
+
+ /**
+ * beginWriteUnitOfWork/endWriteUnitOfWork must only be called by WriteUnitOfWork. See
+ * comments there for the semantics of units of work.
+ */
+ virtual void beginWriteUnitOfWork() = 0;
+ virtual void endWriteUnitOfWork() = 0;
+
+ virtual bool inAWriteUnitOfWork() const = 0;
+
+ /**
+ * Acquires lock on the specified resource in the specified mode and returns the outcome
+ * of the operation. See the details for LockResult for more information on what the
+ * different results mean.
+ *
+ * Each successful acquisition of a lock on a given resource increments the reference count
+ * of the lock. Therefore, each call, which returns LOCK_OK must be matched with a
+ * corresponding call to unlock.
+ *
+ * @param resId Id of the resource to be locked.
+ * @param mode Mode in which the resource should be locked. Lock upgrades are allowed.
+ * @param timeoutMs How many milliseconds to wait for the lock to be granted, before
+ * returning LOCK_TIMEOUT. This parameter defaults to UINT_MAX, which means
+ * wait infinitely. If 0 is passed, the request will return immediately, if
+ * the request could not be granted right away.
+ * @param checkDeadlock Whether to enable deadlock detection for this acquisition. This
+ * parameter is put in place until we can handle deadlocks at all places,
+ * which acquire locks.
+ *
+ * @return All LockResults except for LOCK_WAITING, because it blocks.
+ */
+ virtual LockResult lock(ResourceId resId,
+ LockMode mode,
+ unsigned timeoutMs = UINT_MAX,
+ bool checkDeadlock = false) = 0;
+
+ /**
+ * Downgrades the specified resource's lock mode without changing the reference count.
+ */
+ virtual void downgrade(ResourceId resId, LockMode newMode) = 0;
+
+ /**
+ * Releases a lock previously acquired through a lock call. It is an error to try to
+ * release lock which has not been previously acquired (invariant violation).
+ *
+ * @return true if the lock was actually released; false if only the reference count was
+ * decremented, but the lock is still held.
+ */
+ virtual bool unlock(ResourceId resId) = 0;
+
/**
- * Interface for acquiring locks. One of those objects will have to be instantiated for each
- * request (transaction).
+ * Retrieves the mode in which a lock is held or checks whether the lock held for a
+ * particular resource covers the specified mode.
*
- * Lock/unlock methods must always be called from a single thread.
+ * For example isLockHeldForMode will return true for MODE_S, if MODE_X is already held,
+ * because MODE_X covers MODE_S.
+ */
+ virtual LockMode getLockMode(ResourceId resId) const = 0;
+ virtual bool isLockHeldForMode(ResourceId resId, LockMode mode) const = 0;
+
+ // These are shortcut methods for the above calls. They however check that the entire
+ // hierarchy is properly locked and because of this they are very expensive to call.
+ // Do not use them in performance critical code paths.
+ virtual bool isDbLockedForMode(StringData dbName, LockMode mode) const = 0;
+ virtual bool isCollectionLockedForMode(StringData ns, LockMode mode) const = 0;
+
+ /**
+ * Returns the resource that this locker is waiting/blocked on (if any). If the locker is
+ * not waiting for a resource the returned value will be invalid (isValid() == false).
+ */
+ virtual ResourceId getWaitingResource() const = 0;
+
+ /**
+ * Describes a single lock acquisition for reporting/serialization purposes.
*/
- class Locker {
- MONGO_DISALLOW_COPYING(Locker);
- public:
- virtual ~Locker() {}
-
- virtual LockerId getId() const = 0;
-
- /**
- * This should be the first method invoked for a particular Locker object. It acquires the
- * Global lock in the specified mode and effectively indicates the mode of the operation.
- * This is what the lock modes on the global lock mean:
- *
- * IX - Regular write operation
- * IS - Regular read operation
- * S - Stops all *write* activity. Used for administrative operations (repl, etc).
- * X - Stops all activity. Used for administrative operations (repl state changes,
- * shutdown, etc).
- *
- * This method can be called recursively, but each call to lockGlobal must be accompanied
- * by a call to unlockAll.
- *
- * @param mode Mode in which the global lock should be acquired. Also indicates the intent
- * of the operation.
- * @param timeoutMs How long to wait for the global lock (and the flush lock, for the MMAP
- * V1 engine) to be acquired.
- *
- * @return LOCK_OK, if the global lock (and the flush lock, for the MMAP V1 engine) were
- * acquired within the specified time bound. Otherwise, the respective failure
- * code and neither lock will be acquired.
- */
- virtual LockResult lockGlobal(LockMode mode, unsigned timeoutMs = UINT_MAX) = 0;
-
- /**
- * Requests the global lock to be acquired in the specified mode.
- *
- * See the comments for lockBegin/Complete for more information on the semantics.
- */
- virtual LockResult lockGlobalBegin(LockMode mode) = 0;
- virtual LockResult lockGlobalComplete(unsigned timeoutMs) = 0;
-
- /**
- * This method is used only in the MMAP V1 storage engine, otherwise it is a no-op. See the
- * comments in the implementation for more details on how MMAP V1 journaling works.
- */
- virtual void lockMMAPV1Flush() = 0;
-
- /**
- * Decrements the reference count on the global lock. If the reference count on the
- * global lock hits zero, the transaction is over, and unlockAll unlocks all other locks.
- *
- * @return true if this is the last endTransaction call (i.e., the global lock was
- * released); false if there are still references on the global lock. This value
- * should not be relied on and is only used for assertion purposes.
- *
- * @return false if the global lock is still held.
- */
- virtual bool unlockAll() = 0;
-
- /**
- * This is only necessary for the MMAP V1 engine and in particular, the fsyncLock command
- * which needs to first acquire the global lock in X-mode for truncating the journal and
- * then downgrade to S before it blocks.
- *
- * The downgrade is necessary in order to be nice and not block readers while under
- * fsyncLock.
- */
- virtual void downgradeGlobalXtoSForMMAPV1() = 0;
-
- /**
- * beginWriteUnitOfWork/endWriteUnitOfWork must only be called by WriteUnitOfWork. See
- * comments there for the semantics of units of work.
- */
- virtual void beginWriteUnitOfWork() = 0;
- virtual void endWriteUnitOfWork() = 0;
-
- virtual bool inAWriteUnitOfWork() const = 0;
-
- /**
- * Acquires lock on the specified resource in the specified mode and returns the outcome
- * of the operation. See the details for LockResult for more information on what the
- * different results mean.
- *
- * Each successful acquisition of a lock on a given resource increments the reference count
- * of the lock. Therefore, each call, which returns LOCK_OK must be matched with a
- * corresponding call to unlock.
- *
- * @param resId Id of the resource to be locked.
- * @param mode Mode in which the resource should be locked. Lock upgrades are allowed.
- * @param timeoutMs How many milliseconds to wait for the lock to be granted, before
- * returning LOCK_TIMEOUT. This parameter defaults to UINT_MAX, which means
- * wait infinitely. If 0 is passed, the request will return immediately, if
- * the request could not be granted right away.
- * @param checkDeadlock Whether to enable deadlock detection for this acquisition. This
- * parameter is put in place until we can handle deadlocks at all places,
- * which acquire locks.
- *
- * @return All LockResults except for LOCK_WAITING, because it blocks.
- */
- virtual LockResult lock(ResourceId resId,
- LockMode mode,
- unsigned timeoutMs = UINT_MAX,
- bool checkDeadlock = false) = 0;
-
- /**
- * Downgrades the specified resource's lock mode without changing the reference count.
- */
- virtual void downgrade(ResourceId resId, LockMode newMode) = 0;
-
- /**
- * Releases a lock previously acquired through a lock call. It is an error to try to
- * release lock which has not been previously acquired (invariant violation).
- *
- * @return true if the lock was actually released; false if only the reference count was
- * decremented, but the lock is still held.
- */
- virtual bool unlock(ResourceId resId) = 0;
-
- /**
- * Retrieves the mode in which a lock is held or checks whether the lock held for a
- * particular resource covers the specified mode.
- *
- * For example isLockHeldForMode will return true for MODE_S, if MODE_X is already held,
- * because MODE_X covers MODE_S.
- */
- virtual LockMode getLockMode(ResourceId resId) const = 0;
- virtual bool isLockHeldForMode(ResourceId resId, LockMode mode) const = 0;
-
- // These are shortcut methods for the above calls. They however check that the entire
- // hierarchy is properly locked and because of this they are very expensive to call.
- // Do not use them in performance critical code paths.
- virtual bool isDbLockedForMode(StringData dbName, LockMode mode) const = 0;
- virtual bool isCollectionLockedForMode(StringData ns, LockMode mode) const = 0;
-
- /**
- * Returns the resource that this locker is waiting/blocked on (if any). If the locker is
- * not waiting for a resource the returned value will be invalid (isValid() == false).
- */
- virtual ResourceId getWaitingResource() const = 0;
-
- /**
- * Describes a single lock acquisition for reporting/serialization purposes.
- */
- struct OneLock {
- // What lock resource is held?
- ResourceId resourceId;
-
- // In what mode is it held?
- LockMode mode;
-
- // Reporting/serialization order is by resourceId, which is the canonical locking order
- bool operator<(const OneLock& rhs) const {
- return resourceId < rhs.resourceId;
- }
- };
-
- /**
- * Returns information and locking statistics for this instance of the locker. Used to
- * support the db.currentOp view. This structure is not thread-safe and ideally should
- * be used only for obtaining the necessary information and then discarded instead of
- * reused.
- */
- struct LockerInfo {
- // List of high-level locks held by this locker, sorted by ResourceId
- std::vector<OneLock> locks;
-
- // If isValid(), then what lock this particular locker is sleeping on
- ResourceId waitingResource;
-
- // Lock timing statistics
- SingleThreadedLockStats stats;
- };
-
- virtual void getLockerInfo(LockerInfo* lockerInfo) const = 0;
-
- /**
- * LockSnapshot captures the state of all resources that are locked, what modes they're
- * locked in, and how many times they've been locked in that mode.
- */
- struct LockSnapshot {
- // The global lock is handled differently from all other locks.
- LockMode globalMode;
-
- // The non-global non-flush locks held, sorted by granularity. That is, locks[i] is
- // coarser or as coarse as locks[i + 1].
- std::vector<OneLock> locks;
- };
-
- /**
- * Retrieves all locks held by this transaction, and what mode they're held in.
- * Stores these locks in 'stateOut', destroying any previous state. Unlocks all locks
- * held by this transaction. This functionality is used for yielding in the MMAPV1
- * storage engine. MMAPV1 uses voluntary/cooperative lock release and reacquisition
- * in order to allow for interleaving of otherwise conflicting long-running operations.
- *
- * This functionality is also used for releasing locks on databases and collections
- * when cursors are dormant and waiting for a getMore request.
- *
- * Returns true if locks are released. It is expected that restoreLockerImpl will be called
- * in the future.
- *
- * Returns false if locks are not released. restoreLockState(...) does not need to be
- * called in this case.
- */
- virtual bool saveLockStateAndUnlock(LockSnapshot* stateOut) = 0;
-
- /**
- * Re-locks all locks whose state was stored in 'stateToRestore'.
- */
- virtual void restoreLockState(const LockSnapshot& stateToRestore) = 0;
-
- //
- // These methods are legacy from LockerImpl and will eventually go away or be converted to
- // calls into the Locker methods
- //
-
- virtual void dump() const = 0;
-
- virtual bool isW() const = 0;
- virtual bool isR() const = 0;
-
- virtual bool isLocked() const = 0;
- virtual bool isWriteLocked() const = 0;
- virtual bool isReadLocked() const = 0;
-
- /**
- * Asserts that the Locker is effectively not in use and resets the locking statistics.
- * This means, there should be no locks on it, no WUOW, etc, so it would be safe to call
- * the destructor or reuse the Locker.
- */
- virtual void assertEmptyAndReset() = 0;
-
- /**
- * Pending means we are currently trying to get a lock (could be the parallel batch writer
- * lock).
- */
- virtual bool hasLockPending() const = 0;
-
- // Used for the replication parallel log op application threads
- virtual void setIsBatchWriter(bool newValue) = 0;
- virtual bool isBatchWriter() const = 0;
-
- /**
- * A string lock is MODE_X or MODE_S.
- * These are incompatible with other locks and therefore are strong.
- */
- virtual bool hasStrongLocks() const = 0;
-
- protected:
- Locker() { }
+ struct OneLock {
+ // What lock resource is held?
+ ResourceId resourceId;
+
+ // In what mode is it held?
+ LockMode mode;
+
+ // Reporting/serialization order is by resourceId, which is the canonical locking order
+ bool operator<(const OneLock& rhs) const {
+ return resourceId < rhs.resourceId;
+ }
};
-} // namespace mongo
+ /**
+ * Returns information and locking statistics for this instance of the locker. Used to
+ * support the db.currentOp view. This structure is not thread-safe and ideally should
+ * be used only for obtaining the necessary information and then discarded instead of
+ * reused.
+ */
+ struct LockerInfo {
+ // List of high-level locks held by this locker, sorted by ResourceId
+ std::vector<OneLock> locks;
+
+ // If isValid(), then what lock this particular locker is sleeping on
+ ResourceId waitingResource;
+
+ // Lock timing statistics
+ SingleThreadedLockStats stats;
+ };
+
+ virtual void getLockerInfo(LockerInfo* lockerInfo) const = 0;
+
+ /**
+ * LockSnapshot captures the state of all resources that are locked, what modes they're
+ * locked in, and how many times they've been locked in that mode.
+ */
+ struct LockSnapshot {
+ // The global lock is handled differently from all other locks.
+ LockMode globalMode;
+
+ // The non-global non-flush locks held, sorted by granularity. That is, locks[i] is
+ // coarser or as coarse as locks[i + 1].
+ std::vector<OneLock> locks;
+ };
+
+ /**
+ * Retrieves all locks held by this transaction, and what mode they're held in.
+ * Stores these locks in 'stateOut', destroying any previous state. Unlocks all locks
+ * held by this transaction. This functionality is used for yielding in the MMAPV1
+ * storage engine. MMAPV1 uses voluntary/cooperative lock release and reacquisition
+ * in order to allow for interleaving of otherwise conflicting long-running operations.
+ *
+ * This functionality is also used for releasing locks on databases and collections
+ * when cursors are dormant and waiting for a getMore request.
+ *
+ * Returns true if locks are released. It is expected that restoreLockerImpl will be called
+ * in the future.
+ *
+ * Returns false if locks are not released. restoreLockState(...) does not need to be
+ * called in this case.
+ */
+ virtual bool saveLockStateAndUnlock(LockSnapshot* stateOut) = 0;
+
+ /**
+ * Re-locks all locks whose state was stored in 'stateToRestore'.
+ */
+ virtual void restoreLockState(const LockSnapshot& stateToRestore) = 0;
+
+ //
+ // These methods are legacy from LockerImpl and will eventually go away or be converted to
+ // calls into the Locker methods
+ //
+
+ virtual void dump() const = 0;
+
+ virtual bool isW() const = 0;
+ virtual bool isR() const = 0;
+
+ virtual bool isLocked() const = 0;
+ virtual bool isWriteLocked() const = 0;
+ virtual bool isReadLocked() const = 0;
+
+ /**
+ * Asserts that the Locker is effectively not in use and resets the locking statistics.
+ * This means, there should be no locks on it, no WUOW, etc, so it would be safe to call
+ * the destructor or reuse the Locker.
+ */
+ virtual void assertEmptyAndReset() = 0;
+
+ /**
+ * Pending means we are currently trying to get a lock (could be the parallel batch writer
+ * lock).
+ */
+ virtual bool hasLockPending() const = 0;
+
+ // Used for the replication parallel log op application threads
+ virtual void setIsBatchWriter(bool newValue) = 0;
+ virtual bool isBatchWriter() const = 0;
+
+ /**
+ * A string lock is MODE_X or MODE_S.
+ * These are incompatible with other locks and therefore are strong.
+ */
+ virtual bool hasStrongLocks() const = 0;
+
+protected:
+ Locker() {}
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/concurrency/locker_noop.h b/src/mongo/db/concurrency/locker_noop.h
index 9bfcbc93227..e62c87c862e 100644
--- a/src/mongo/db/concurrency/locker_noop.h
+++ b/src/mongo/db/concurrency/locker_noop.h
@@ -31,144 +31,142 @@
#include "mongo/db/concurrency/locker.h"
namespace mongo {
-
- /**
- * Locker, which cannot be used to lock/unlock resources and just returns true for checks for
- * whether a particular resource is locked. Do not use it for cases where actual locking
- * behaviour is expected or locking is performed.
- */
- class LockerNoop : public Locker {
- public:
- LockerNoop() { }
- virtual LockerId getId() const { invariant(false); }
-
- virtual LockResult lockGlobal(LockMode mode, unsigned timeoutMs) {
- invariant(false);
- }
+/**
+ * Locker, which cannot be used to lock/unlock resources and just returns true for checks for
+ * whether a particular resource is locked. Do not use it for cases where actual locking
+ * behaviour is expected or locking is performed.
+ */
+class LockerNoop : public Locker {
+public:
+ LockerNoop() {}
- virtual LockResult lockGlobalBegin(LockMode mode) {
- invariant(false);
- }
+ virtual LockerId getId() const {
+ invariant(false);
+ }
- virtual LockResult lockGlobalComplete(unsigned timeoutMs) {
- invariant(false);
- }
+ virtual LockResult lockGlobal(LockMode mode, unsigned timeoutMs) {
+ invariant(false);
+ }
- virtual void lockMMAPV1Flush() {
- invariant(false);
- }
+ virtual LockResult lockGlobalBegin(LockMode mode) {
+ invariant(false);
+ }
- virtual bool unlockAll() {
- invariant(false);
- }
+ virtual LockResult lockGlobalComplete(unsigned timeoutMs) {
+ invariant(false);
+ }
- virtual void downgradeGlobalXtoSForMMAPV1() {
- invariant(false);
- }
+ virtual void lockMMAPV1Flush() {
+ invariant(false);
+ }
- virtual void beginWriteUnitOfWork() {
+ virtual bool unlockAll() {
+ invariant(false);
+ }
- }
+ virtual void downgradeGlobalXtoSForMMAPV1() {
+ invariant(false);
+ }
- virtual void endWriteUnitOfWork() {
+ virtual void beginWriteUnitOfWork() {}
- }
+ virtual void endWriteUnitOfWork() {}
- virtual bool inAWriteUnitOfWork() const {
- invariant(false);
- }
+ virtual bool inAWriteUnitOfWork() const {
+ invariant(false);
+ }
- virtual LockResult lock(ResourceId resId,
- LockMode mode,
- unsigned timeoutMs,
- bool checkDeadlock) {
- invariant(false);
- }
+ virtual LockResult lock(ResourceId resId,
+ LockMode mode,
+ unsigned timeoutMs,
+ bool checkDeadlock) {
+ invariant(false);
+ }
- virtual void downgrade(ResourceId resId, LockMode newMode) {
- invariant(false);
- }
+ virtual void downgrade(ResourceId resId, LockMode newMode) {
+ invariant(false);
+ }
- virtual bool unlock(ResourceId resId) {
- invariant(false);
- }
+ virtual bool unlock(ResourceId resId) {
+ invariant(false);
+ }
- virtual LockMode getLockMode(ResourceId resId) const {
- invariant(false);
- }
+ virtual LockMode getLockMode(ResourceId resId) const {
+ invariant(false);
+ }
- virtual bool isLockHeldForMode(ResourceId resId, LockMode mode) const {
- return true;
- }
+ virtual bool isLockHeldForMode(ResourceId resId, LockMode mode) const {
+ return true;
+ }
- virtual bool isDbLockedForMode(StringData dbName, LockMode mode) const {
- return true;
- }
+ virtual bool isDbLockedForMode(StringData dbName, LockMode mode) const {
+ return true;
+ }
- virtual bool isCollectionLockedForMode(StringData ns, LockMode mode) const {
- return true;
- }
+ virtual bool isCollectionLockedForMode(StringData ns, LockMode mode) const {
+ return true;
+ }
- virtual ResourceId getWaitingResource() const {
- invariant(false);
- }
+ virtual ResourceId getWaitingResource() const {
+ invariant(false);
+ }
- virtual void getLockerInfo(LockerInfo* lockerInfo) const {
- invariant(false);
- }
+ virtual void getLockerInfo(LockerInfo* lockerInfo) const {
+ invariant(false);
+ }
- virtual bool saveLockStateAndUnlock(LockSnapshot* stateOut) {
- invariant(false);
- }
+ virtual bool saveLockStateAndUnlock(LockSnapshot* stateOut) {
+ invariant(false);
+ }
- virtual void restoreLockState(const LockSnapshot& stateToRestore) {
- invariant(false);
- }
+ virtual void restoreLockState(const LockSnapshot& stateToRestore) {
+ invariant(false);
+ }
- virtual void dump() const {
- invariant(false);
- }
+ virtual void dump() const {
+ invariant(false);
+ }
- virtual bool isW() const {
- invariant(false);
- }
+ virtual bool isW() const {
+ invariant(false);
+ }
- virtual bool isR() const {
- invariant(false);
- }
+ virtual bool isR() const {
+ invariant(false);
+ }
- virtual bool isLocked() const {
- invariant(false);
- }
+ virtual bool isLocked() const {
+ invariant(false);
+ }
- virtual bool isWriteLocked() const {
- return false;
- }
+ virtual bool isWriteLocked() const {
+ return false;
+ }
- virtual bool isReadLocked() const {
- invariant(false);
- }
+ virtual bool isReadLocked() const {
+ invariant(false);
+ }
- virtual void assertEmptyAndReset() {
- invariant(false);
- }
+ virtual void assertEmptyAndReset() {
+ invariant(false);
+ }
- virtual bool hasLockPending() const {
- invariant(false);
- }
+ virtual bool hasLockPending() const {
+ invariant(false);
+ }
- virtual void setIsBatchWriter(bool newValue) {
- invariant(false);
- }
+ virtual void setIsBatchWriter(bool newValue) {
+ invariant(false);
+ }
- virtual bool isBatchWriter() const {
- invariant(false);
- }
+ virtual bool isBatchWriter() const {
+ invariant(false);
+ }
- virtual bool hasStrongLocks() const {
- return false;
- }
- };
+ virtual bool hasStrongLocks() const {
+ return false;
+ }
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/concurrency/write_conflict_exception.cpp b/src/mongo/db/concurrency/write_conflict_exception.cpp
index 1bd6859e0c2..a0547976100 100644
--- a/src/mongo/db/concurrency/write_conflict_exception.cpp
+++ b/src/mongo/db/concurrency/write_conflict_exception.cpp
@@ -37,48 +37,37 @@
namespace mongo {
- bool WriteConflictException::trace = false;
-
- WriteConflictException::WriteConflictException()
- : DBException( "WriteConflict", ErrorCodes::WriteConflict ) {
-
- if ( trace ) {
- printStackTrace();
- }
+bool WriteConflictException::trace = false;
+WriteConflictException::WriteConflictException()
+ : DBException("WriteConflict", ErrorCodes::WriteConflict) {
+ if (trace) {
+ printStackTrace();
}
+}
- void WriteConflictException::logAndBackoff(int attempt,
- StringData operation,
- StringData ns) {
-
- LOG(1) << "Caught WriteConflictException doing " << operation
- << " on " << ns
- << ", attempt: " << attempt << " retrying";
-
- // All numbers below chosen by guess and check against a few random benchmarks.
- if (attempt < 4) {
- // no-op
- }
- else if (attempt < 10) {
- sleepmillis(1);
- }
- else if (attempt < 100) {
- sleepmillis(5);
- }
- else {
- sleepmillis(10);
- }
-
- }
+void WriteConflictException::logAndBackoff(int attempt, StringData operation, StringData ns) {
+ LOG(1) << "Caught WriteConflictException doing " << operation << " on " << ns
+ << ", attempt: " << attempt << " retrying";
- namespace {
- // for WriteConflictException
- ExportedServerParameter<bool> TraceWCExceptionsSetting(ServerParameterSet::getGlobal(),
- "traceWriteConflictExceptions",
- &WriteConflictException::trace,
- true, // allowedToChangeAtStartup
- true); // allowedToChangeAtRuntime
+ // All numbers below chosen by guess and check against a few random benchmarks.
+ if (attempt < 4) {
+ // no-op
+ } else if (attempt < 10) {
+ sleepmillis(1);
+ } else if (attempt < 100) {
+ sleepmillis(5);
+ } else {
+ sleepmillis(10);
}
+}
+namespace {
+// for WriteConflictException
+ExportedServerParameter<bool> TraceWCExceptionsSetting(ServerParameterSet::getGlobal(),
+ "traceWriteConflictExceptions",
+ &WriteConflictException::trace,
+ true, // allowedToChangeAtStartup
+ true); // allowedToChangeAtRuntime
+}
}
diff --git a/src/mongo/db/concurrency/write_conflict_exception.h b/src/mongo/db/concurrency/write_conflict_exception.h
index e94eab741d7..5c7bcd5e87c 100644
--- a/src/mongo/db/concurrency/write_conflict_exception.h
+++ b/src/mongo/db/concurrency/write_conflict_exception.h
@@ -34,45 +34,51 @@
#include "mongo/util/assert_util.h"
-#define MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN do { int wcr__Attempts = 0; do { try
-#define MONGO_WRITE_CONFLICT_RETRY_LOOP_END(PTXN, OPSTR, NSSTR) \
- catch (const ::mongo::WriteConflictException &wce) { \
- const OperationContext* ptxn = (PTXN); \
- ++CurOp::get(ptxn)->debug().writeConflicts; \
- wce.logAndBackoff(wcr__Attempts, (OPSTR), (NSSTR)); \
- ++wcr__Attempts; \
- ptxn->recoveryUnit()->abandonSnapshot(); \
- continue; \
- } \
- break; \
- } while (true); } while (false);
+#define MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN \
+ do { \
+ int wcr__Attempts = 0; \
+ do { \
+ try
+#define MONGO_WRITE_CONFLICT_RETRY_LOOP_END(PTXN, OPSTR, NSSTR) \
+ catch (const ::mongo::WriteConflictException& wce) { \
+ const OperationContext* ptxn = (PTXN); \
+ ++CurOp::get(ptxn)->debug().writeConflicts; \
+ wce.logAndBackoff(wcr__Attempts, (OPSTR), (NSSTR)); \
+ ++wcr__Attempts; \
+ ptxn->recoveryUnit()->abandonSnapshot(); \
+ continue; \
+ } \
+ break; \
+ } \
+ while (true) \
+ ; \
+ } \
+ while (false) \
+ ;
namespace mongo {
+/**
+ * This is thrown if during a write, two or more operations conflict with each other.
+ * For example if two operations get the same version of a document, and then both try to
+ * modify that document, this exception will get thrown by one of them.
+ */
+class WriteConflictException : public DBException {
+public:
+ WriteConflictException();
+
/**
- * This is thrown if during a write, two or more operations conflict with each other.
- * For example if two operations get the same version of a document, and then both try to
- * modify that document, this exception will get thrown by one of them.
+ * Will log a message if sensible and will do an exponential backoff to make sure
+ * we don't hammer the same doc over and over.
+ * @param attempt - what attempt is this, 1 based
+ * @param operation - e.g. "update"
*/
- class WriteConflictException : public DBException {
- public:
- WriteConflictException();
-
- /**
- * Will log a message if sensible and will do an exponential backoff to make sure
- * we don't hammer the same doc over and over.
- * @param attempt - what attempt is this, 1 based
- * @param operation - e.g. "update"
- */
- static void logAndBackoff(int attempt,
- StringData operation,
- StringData ns);
-
- /**
- * If true, will call printStackTrace on every WriteConflictException created.
- * Can be set via setParameter named traceWriteConflictExceptions.
- */
- static bool trace;
- };
+ static void logAndBackoff(int attempt, StringData operation, StringData ns);
+ /**
+ * If true, will call printStackTrace on every WriteConflictException created.
+ * Can be set via setParameter named traceWriteConflictExceptions.
+ */
+ static bool trace;
+};
}