summaryrefslogtreecommitdiff
path: root/src/mongo
diff options
context:
space:
mode:
authorGeert Bosch <geert@mongodb.com>2016-03-10 11:48:13 -0500
committerGeert Bosch <geert@mongodb.com>2016-03-10 11:48:13 -0500
commita8c1b9c48cea581b07c3d777c91adfd7c6a49ccd (patch)
treecb1c874f40ed20d172e3c4348cb661e869d5ade1 /src/mongo
parent44d8a4dd0f8f27b72e2040e2bde74c552739eb23 (diff)
downloadmongo-a8c1b9c48cea581b07c3d777c91adfd7c6a49ccd.tar.gz
Revert "SERVER-20524: Add new RESOURCE_MUTEX type for use as recursive readers/writers lock"
This reverts commit 67e3d403f80367b3fb648b84bd87070675045815.
Diffstat (limited to 'src/mongo')
-rw-r--r--src/mongo/db/concurrency/d_concurrency.cpp6
-rw-r--r--src/mongo/db/concurrency/d_concurrency.h46
-rw-r--r--src/mongo/db/concurrency/d_concurrency_test.cpp217
-rw-r--r--src/mongo/db/concurrency/lock_manager.cpp3
-rw-r--r--src/mongo/db/concurrency/lock_manager_defs.h5
-rw-r--r--src/mongo/db/concurrency/lock_state.cpp39
-rw-r--r--src/mongo/db/concurrency/lock_state.h2
-rw-r--r--src/mongo/db/concurrency/lock_state_test.cpp66
-rw-r--r--src/mongo/db/concurrency/locker.h12
9 files changed, 88 insertions, 308 deletions
diff --git a/src/mongo/db/concurrency/d_concurrency.cpp b/src/mongo/db/concurrency/d_concurrency.cpp
index a9021f310cb..2477b58f02d 100644
--- a/src/mongo/db/concurrency/d_concurrency.cpp
+++ b/src/mongo/db/concurrency/d_concurrency.cpp
@@ -53,12 +53,6 @@ Lock::TempRelease::~TempRelease() {
}
}
-namespace {
-AtomicWord<uint64_t> lastResourceMutexHash{0};
-} // namespace
-
-Lock::ResourceMutex::ResourceMutex() : _rid(RESOURCE_MUTEX, lastResourceMutexHash.fetchAndAdd(1)) {}
-
Lock::GlobalLock::GlobalLock(Locker* locker)
: _locker(locker), _result(LOCK_INVALID), _pbwm(locker, resourceIdParallelBatchWriterMode) {}
diff --git a/src/mongo/db/concurrency/d_concurrency.h b/src/mongo/db/concurrency/d_concurrency.h
index bbaae4630b0..b70187b6bc9 100644
--- a/src/mongo/db/concurrency/d_concurrency.h
+++ b/src/mongo/db/concurrency/d_concurrency.h
@@ -100,52 +100,6 @@ public:
LockResult _result;
};
- class SharedLock;
- class ExclusiveLock;
-
- /**
- * For use as general mutex or readers/writers lock, outside the general multi-granularity
- * model. A ResourceMutex is not affected by yielding/temprelease and two phase locking
- * semantics inside WUOWs. Lock with ResourceLock, SharedLock or ExclusiveLock. Uses same
- * fairness as other LockManager locks.
- */
- class ResourceMutex {
- public:
- ResourceMutex();
-
- private:
- friend class Lock::SharedLock;
- friend class Lock::ExclusiveLock;
-
- /**
- * Each instantiation of this class allocates a new ResourceId.
- */
- ResourceId rid() const {
- return _rid;
- }
-
- const ResourceId _rid;
- };
-
- /**
- * Obtains a ResourceMutex for exclusive use.
- */
- class ExclusiveLock : public ResourceLock {
- public:
- ExclusiveLock(Locker* locker, ResourceMutex mutex)
- : ResourceLock(locker, mutex.rid(), MODE_X) {}
- };
-
- /**
- * Obtains a ResourceMutex for shared/non-exclusive use. This uses MODE_IS rather than MODE_S
- * to take advantage of optimizations in the lock manager for intent modes. This is OK as
- * this just has to conflict with exclusive locks.
- */
- class SharedLock : public ResourceLock {
- public:
- SharedLock(Locker* locker, ResourceMutex mutex)
- : ResourceLock(locker, mutex.rid(), MODE_IS) {}
- };
/**
* Global lock.
diff --git a/src/mongo/db/concurrency/d_concurrency_test.cpp b/src/mongo/db/concurrency/d_concurrency_test.cpp
index 373a99ccb57..855a17d99c6 100644
--- a/src/mongo/db/concurrency/d_concurrency_test.cpp
+++ b/src/mongo/db/concurrency/d_concurrency_test.cpp
@@ -26,159 +26,18 @@
* it in the license file.
*/
-#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kDefault
-
#include "mongo/platform/basic.h"
#include <string>
-#include <vector>
#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/concurrency/lock_manager_test_help.h"
-
-#include "mongo/stdx/functional.h"
-#include "mongo/stdx/thread.h"
#include "mongo/unittest/unittest.h"
-#include "mongo/util/log.h"
-#include "mongo/util/debug_util.h"
namespace mongo {
using std::string;
-namespace {
-const int kMaxPerfThreads = 16; // max number of threads to use for lock perf
-const int kMinPerfMillis = 30; // min duration for reliable timing
-
-/**
- * Calls fn the given number of iterations, spread out over up to maxThreads threads.
- * The threadNr passed is an integer between 0 and maxThreads exclusive. Logs timing
- * statistics for for all power-of-two thread counts from 1 up to maxThreds.
- */
-void perfTest(stdx::function<void(int threadNr)> fn, int maxThreads) {
- for (int numThreads = 1; numThreads <= maxThreads; numThreads *= 2) {
- std::vector<stdx::thread> threads;
-
- AtomicInt32 ready{0};
- AtomicInt64 elapsedNanos{0};
- AtomicInt64 timedIters{0};
-
- for (int threadId = 0; threadId < numThreads; threadId++)
- threads.emplace_back([&, threadId]() {
- // Busy-wait until everybody is ready
- ready.fetchAndAdd(1);
- while (ready.load() < numThreads) {
- }
-
- uint64_t micros = 0;
- int iters;
- // Ensure at least 16 iterations are done and at least 25 milliseconds is timed
- for (iters = 16; iters < (1 << 30) && micros < kMinPerfMillis * 1000; iters *= 2) {
- // Measure the number of loops
- Timer t;
-
- for (int i = 0; i < iters; i++)
- fn(threadId);
-
- micros = t.micros();
- }
-
- elapsedNanos.fetchAndAdd(micros * 1000);
- timedIters.fetchAndAdd(iters);
- });
-
- for (auto& thread : threads)
- thread.join();
-
- log() << numThreads
- << " threads took: " << elapsedNanos.load() / static_cast<double>(timedIters.load())
- << " ns per call" << (kDebugBuild ? " (DEBUG BUILD!)" : "");
- }
-}
-} // namespace
-
-TEST(DConcurrency, ResourceMutex) {
- Lock::ResourceMutex mtx;
- DefaultLockerImpl locker1;
- DefaultLockerImpl locker2;
- DefaultLockerImpl locker3;
-
- struct State {
- void check(int n) {
- ASSERT_EQ(step.load(), n);
- }
- void finish(int n) {
- auto actual = step.fetchAndAdd(1);
- ASSERT_EQ(actual, n);
- }
- void waitFor(stdx::function<bool()> cond) {
- while (!cond())
- sleepmillis(0);
- }
- void waitFor(int n) {
- waitFor([this, n]() { return this->step.load() == n; });
- }
- AtomicInt32 step{0};
- } state;
-
- stdx::thread t1([&]() {
- // Step 0: Single thread acquires shared lock
- state.waitFor(0);
- Lock::SharedLock lk(&locker1, mtx);
- ASSERT(lk.isLocked());
- state.finish(0);
-
- // Step 4: Wait for t2 to regain its shared lock
- {
- // Check that TempRelease does not actually unlock anything
- Lock::TempRelease yield(&locker1);
-
- state.waitFor(4);
- state.waitFor([&locker2]() { return locker2.getWaitingResource().isValid(); });
- state.finish(4);
- }
-
- // Step 5: After t2 becomes blocked, unlock, yielding the mutex to t3
- lk.unlock();
- ASSERT(!lk.isLocked());
- });
- stdx::thread t2([&]() {
- // Step 1: Two threads acquire shared lock
- state.waitFor(1);
- Lock::SharedLock lk(&locker2, mtx);
- ASSERT(lk.isLocked());
- state.finish(1);
-
- // Step 2: Wait for t3 to attempt the exclusive lock
- state.waitFor([&locker3]() { return locker3.getWaitingResource().isValid(); });
- state.finish(2);
-
- // Step 3: Yield shared lock
- lk.unlock();
- ASSERT(!lk.isLocked());
- state.finish(3);
-
- // Step 4: Try to regain the shared lock // transfers control to t1
- lk.lock(MODE_IS);
-
- // Step 6: CHeck we actually got back the shared lock
- ASSERT(lk.isLocked());
- state.check(6);
- });
- stdx::thread t3([&]() {
- // Step 2: Third thread attempts to acquire exclusive lock
- state.waitFor(2);
- Lock::ExclusiveLock lk(&locker3, mtx); // transfers control to t2
-
- // Step 5: Actually get the exclusive lock
- ASSERT(lk.isLocked());
- state.finish(5);
- });
- t1.join();
- t2.join();
- t3.join();
-}
-
TEST(DConcurrency, GlobalRead) {
MMAPV1LockerImpl ls;
Lock::GlobalRead globalRead(&ls);
@@ -447,80 +306,4 @@ TEST(DConcurrency, IsCollectionLocked_DB_Locked_IX) {
}
}
-// These tests exercise single- and multi-threaded performance of uncontended lock acquisition. It
-// is meither practical nor useful to run them on debug builds.
-
-extern bool _supportsDocLocking;
-namespace {
-/**
- * Temporarily forces setting of the docLockingSupported global for testing purposes.
- */
-class ForceSupportsDocLocking {
-public:
- explicit ForceSupportsDocLocking(bool supported) : _oldSupportsDocLocking(_supportsDocLocking) {
- _supportsDocLocking = supported;
- }
-
- ~ForceSupportsDocLocking() {
- _supportsDocLocking = _oldSupportsDocLocking;
- }
-
-private:
- bool _oldSupportsDocLocking;
-};
-} // namespace
-
-TEST(Locker, PerformanceStdMutex) {
- stdx::mutex mtx;
- perfTest([&](int threadId) { stdx::unique_lock<stdx::mutex> lk(mtx); }, kMaxPerfThreads);
-}
-
-TEST(Locker, PerformanceResourceMutexShared) {
- Lock::ResourceMutex mtx;
- std::array<DefaultLockerImpl, kMaxPerfThreads> locker;
- perfTest([&](int threadId) { Lock::SharedLock lk(&locker[threadId], mtx); }, kMaxPerfThreads);
-}
-
-TEST(Locker, PerformanceResourceMutexExclusive) {
- Lock::ResourceMutex mtx;
- std::array<DefaultLockerImpl, kMaxPerfThreads> locker;
- perfTest([&](int threadId) { Lock::ExclusiveLock lk(&locker[threadId], mtx); },
- kMaxPerfThreads);
-}
-
-TEST(Locker, PerformanceCollectionIntentSharedLock) {
- std::array<DefaultLockerImpl, kMaxPerfThreads> locker;
- ForceSupportsDocLocking supported(true);
- perfTest([&](int threadId) {
- Lock::DBLock dlk(&locker[threadId], "test", MODE_IS);
- Lock::CollectionLock clk(&locker[threadId], "test.coll", MODE_IS);
- }, kMaxPerfThreads);
-}
-
-TEST(Locker, PerformanceCollectionIntentExclusiveLock) {
- std::array<DefaultLockerImpl, kMaxPerfThreads> locker;
- ForceSupportsDocLocking supported(true);
- perfTest([&](int threadId) {
- Lock::DBLock dlk(&locker[threadId], "test", MODE_IX);
- Lock::CollectionLock clk(&locker[threadId], "test.coll", MODE_IX);
- }, kMaxPerfThreads);
-}
-
-TEST(Locker, PerformanceMMAPv1CollectionSharedLock) {
- std::array<MMAPV1LockerImpl, kMaxPerfThreads> locker;
- ForceSupportsDocLocking supported(false);
- perfTest([&](int threadId) {
- Lock::DBLock dlk(&locker[threadId], "test", MODE_IS);
- Lock::CollectionLock clk(&locker[threadId], "test.coll", MODE_S);
- }, kMaxPerfThreads);
-}
-
-TEST(Locker, PerformanceMMAPv1CollectionExclusive) {
- std::array<MMAPV1LockerImpl, kMaxPerfThreads> locker;
- ForceSupportsDocLocking supported(false);
- perfTest([&](int threadId) {
- Lock::DBLock dlk(&locker[threadId], "test", MODE_IX);
- Lock::CollectionLock clk(&locker[threadId], "test.coll", MODE_X);
- }, kMaxPerfThreads);
-}
} // namespace mongo
diff --git a/src/mongo/db/concurrency/lock_manager.cpp b/src/mongo/db/concurrency/lock_manager.cpp
index cb31e740c91..b2b04e788ef 100644
--- a/src/mongo/db/concurrency/lock_manager.cpp
+++ b/src/mongo/db/concurrency/lock_manager.cpp
@@ -103,7 +103,8 @@ uint32_t modeMask(LockMode mode) {
* Maps the resource id to a human-readable string.
*/
static const char* ResourceTypeNames[] = {
- "Invalid", "Global", "MMAPV1Journal", "Database", "Collection", "Metadata", "Mutex"};
+ "Invalid", "Global", "MMAPV1Journal", "Database", "Collection", "Metadata",
+};
// Ensure we do not add new types without updating the names array
static_assert((sizeof(ResourceTypeNames) / sizeof(ResourceTypeNames[0])) == ResourceTypesCount,
diff --git a/src/mongo/db/concurrency/lock_manager_defs.h b/src/mongo/db/concurrency/lock_manager_defs.h
index 7b9698b8940..48fcb073bbe 100644
--- a/src/mongo/db/concurrency/lock_manager_defs.h
+++ b/src/mongo/db/concurrency/lock_manager_defs.h
@@ -152,14 +152,11 @@ enum ResourceType {
RESOURCE_GLOBAL, // Used for mode changes or global exclusive operations
RESOURCE_MMAPV1_FLUSH, // Necessary only for the MMAPv1 engine
- // Generic resources, used for multi-granularity locking, together with RESOURCE_GLOBAL
+ // Generic resources
RESOURCE_DATABASE,
RESOURCE_COLLECTION,
RESOURCE_METADATA,
- // Resource type used for locking general resources not related to the storage hierarchy.
- RESOURCE_MUTEX,
-
// Counts the rest. Always insert new resource types above this entry.
ResourceTypesCount
};
diff --git a/src/mongo/db/concurrency/lock_state.cpp b/src/mongo/db/concurrency/lock_state.cpp
index b9712046dce..a1da8b0075e 100644
--- a/src/mongo/db/concurrency/lock_state.cpp
+++ b/src/mongo/db/concurrency/lock_state.cpp
@@ -134,19 +134,12 @@ PartitionedInstanceWideLockStats globalStats;
bool shouldDelayUnlock(ResourceId resId, LockMode mode) {
// Global and flush lock are not used to protect transactional resources and as such, they
// need to be acquired and released when requested.
- switch (resId.getType()) {
- case RESOURCE_GLOBAL:
- case RESOURCE_MMAPV1_FLUSH:
- case RESOURCE_MUTEX:
- return false;
-
- case RESOURCE_COLLECTION:
- case RESOURCE_DATABASE:
- case RESOURCE_METADATA:
- break;
+ if (resId.getType() == RESOURCE_GLOBAL) {
+ return false;
+ }
- default:
- MONGO_UNREACHABLE;
+ if (resId == resourceIdMMAPV1Flush) {
+ return false;
}
switch (mode) {
@@ -159,7 +152,7 @@ bool shouldDelayUnlock(ResourceId resId, LockMode mode) {
return false;
default:
- MONGO_UNREACHABLE;
+ invariant(false);
}
}
@@ -384,9 +377,9 @@ bool LockerImpl<IsForMMAPV1>::unlockGlobal() {
LockRequestsMap::Iterator it = _requests.begin();
while (!it.finished()) {
// If we're here we should only have one reference to any lock. It is a programming
- // error for any lock used with multi-granularity locking to have more references than
- // the global lock, because every scope starts by calling lockGlobal.
- if (it.key().getType() == RESOURCE_GLOBAL || it.key().getType() == RESOURCE_MUTEX) {
+ // error for any lock to have more references than the global lock, because every
+ // scope starts by calling lockGlobal.
+ if (it.key().getType() == RESOURCE_GLOBAL) {
it.next();
} else {
invariant(_unlockImpl(&it));
@@ -580,10 +573,8 @@ bool LockerImpl<IsForMMAPV1>::saveLockStateAndUnlock(Locker::LockSnapshot* state
// lock goes along with it) so we store it separately from the more pedestrian locks.
LockRequestsMap::Iterator globalRequest = _requests.find(resourceIdGlobal);
if (!globalRequest) {
- // If there's no global lock there isn't really anything to do. Check that.
- for (auto it = _requests.begin(); !it.finished(); it.next()) {
- invariant(it.key().getType() == RESOURCE_MUTEX);
- }
+ // If there's no global lock there isn't really anything to do.
+ invariant(_requests.empty());
return false;
}
@@ -601,9 +592,6 @@ bool LockerImpl<IsForMMAPV1>::saveLockStateAndUnlock(Locker::LockSnapshot* state
// Next, the non-global locks.
for (LockRequestsMap::Iterator it = _requests.begin(); !it.finished(); it.next()) {
const ResourceId resId = it.key();
- const ResourceType resType = resId.getType();
- if (resType == RESOURCE_MUTEX)
- continue;
// We should never have to save and restore metadata locks.
invariant((IsForMMAPV1 && (resourceIdMMAPV1Flush == resId)) ||
@@ -684,7 +672,7 @@ LockResult LockerImpl<IsForMMAPV1>::lockBegin(ResourceId resId, LockMode mode) {
request->enqueueAtFront = true;
request->compatibleFirst = true;
}
- } else if (resType != RESOURCE_MUTEX) {
+ } else {
// This is all sanity checks that the global and flush locks are always be acquired
// before any other lock has been acquired and they must be in sync with the nesting.
DEV {
@@ -722,8 +710,7 @@ LockResult LockerImpl<IsForMMAPV1>::lockComplete(ResourceId resId,
// DB lock, while holding the flush lock, so it has to be released. This is only
// correct to do if not in a write unit of work.
const bool yieldFlushLock = IsForMMAPV1 && !inAWriteUnitOfWork() &&
- resId.getType() != RESOURCE_GLOBAL && resId.getType() != RESOURCE_MUTEX &&
- resId != resourceIdMMAPV1Flush;
+ resId.getType() != RESOURCE_GLOBAL && resId != resourceIdMMAPV1Flush;
if (yieldFlushLock) {
invariant(unlock(resourceIdMMAPV1Flush));
}
diff --git a/src/mongo/db/concurrency/lock_state.h b/src/mongo/db/concurrency/lock_state.h
index f66b3fc3bf7..b876e8badd4 100644
--- a/src/mongo/db/concurrency/lock_state.h
+++ b/src/mongo/db/concurrency/lock_state.h
@@ -181,7 +181,7 @@ private:
/**
* The main functionality of the unlock method, except accepts iterator in order to avoid
- * additional lookups during unlockGlobal. Frees locks immediately, so must not be called from
+ * additional lookups during unlockGlobal. Frees locks immediately, so must not be called from
* inside a WUOW.
*/
bool _unlockImpl(LockRequestsMap::Iterator* it);
diff --git a/src/mongo/db/concurrency/lock_state_test.cpp b/src/mongo/db/concurrency/lock_state_test.cpp
index 2a80c0b4a42..beebcaf6abc 100644
--- a/src/mongo/db/concurrency/lock_state_test.cpp
+++ b/src/mongo/db/concurrency/lock_state_test.cpp
@@ -40,6 +40,10 @@
#include "mongo/util/timer.h"
namespace mongo {
+namespace {
+const int NUM_PERF_ITERS = 1000 * 1000; // numeber of iterations to use for lock perf
+}
+
TEST(LockerImpl, LockNoConflict) {
const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
@@ -296,4 +300,66 @@ TEST(LockerImpl, CanceledDeadlockUnblocks) {
ASSERT(locker2.unlockGlobal());
ASSERT(locker3.unlockGlobal());
}
+
+
+// These two tests exercise single-threaded performance of uncontended lock acquisition. It
+// is not practical to run them on debug builds.
+#ifndef MONGO_CONFIG_DEBUG_BUILD
+
+TEST(Locker, PerformanceBoostSharedMutex) {
+ for (int numLockers = 1; numLockers <= 64; numLockers = numLockers * 2) {
+ stdx::mutex mtx;
+
+ // Do some warm-up loops
+ for (int i = 0; i < 1000; i++) {
+ mtx.lock();
+ mtx.unlock();
+ }
+
+ // Measure the number of loops
+ //
+ Timer t;
+
+ for (int i = 0; i < NUM_PERF_ITERS; i++) {
+ mtx.lock();
+ mtx.unlock();
+ }
+
+ log() << numLockers << " locks took: "
+ << static_cast<double>(t.micros()) * 1000.0 / static_cast<double>(NUM_PERF_ITERS)
+ << " ns";
+ }
+}
+
+TEST(Locker, PerformanceLocker) {
+ for (int numLockers = 1; numLockers <= 64; numLockers = numLockers * 2) {
+ std::vector<std::shared_ptr<LockerForTests>> lockers(numLockers);
+ for (int i = 0; i < numLockers; i++) {
+ lockers[i].reset(new LockerForTests(MODE_S));
+ }
+
+ DefaultLockerImpl locker;
+
+ // Do some warm-up loops
+ for (int i = 0; i < 1000; i++) {
+ locker.lockGlobal(MODE_IS);
+ locker.unlockGlobal();
+ }
+
+ // Measure the number of loops
+ Timer t;
+
+ for (int i = 0; i < NUM_PERF_ITERS; i++) {
+ locker.lockGlobal(MODE_IS);
+ locker.unlockGlobal();
+ }
+
+ log() << numLockers << " locks took: "
+ << static_cast<double>(t.micros()) * 1000.0 / static_cast<double>(NUM_PERF_ITERS)
+ << " ns";
+ }
+}
+
+#endif // MONGO_CONFIG_DEBUG_BUILD
+
} // namespace mongo
diff --git a/src/mongo/db/concurrency/locker.h b/src/mongo/db/concurrency/locker.h
index a858a5b0d79..3e50e41b545 100644
--- a/src/mongo/db/concurrency/locker.h
+++ b/src/mongo/db/concurrency/locker.h
@@ -111,8 +111,7 @@ public:
/**
* Decrements the reference count on the global lock. If the reference count on the
- * global lock hits zero, the transaction is over, and unlockGlobal unlocks all other locks
- * except for RESOURCE_MUTEX locks.
+ * global lock hits zero, the transaction is over, and unlockGlobal unlocks all other locks.
*
* @return true if this is the last endTransaction call (i.e., the global lock was
* released); false if there are still references on the global lock. This value
@@ -252,12 +251,11 @@ public:
};
/**
- * Retrieves all locks held by this transaction, other than RESOURCE_MUTEX locks, and what mode
- * they're held in.
+ * Retrieves all locks held by this transaction, and what mode they're held in.
* Stores these locks in 'stateOut', destroying any previous state. Unlocks all locks
- * held by this transaction. This functionality is used for yielding, which is
- * voluntary/cooperative lock release and reacquisition in order to allow for interleaving
- * of otherwise conflicting long-running operations.
+ * held by this transaction. This functionality is used for yielding in the MMAPV1
+ * storage engine. MMAPV1 uses voluntary/cooperative lock release and reacquisition
+ * in order to allow for interleaving of otherwise conflicting long-running operations.
*
* This functionality is also used for releasing locks on databases and collections
* when cursors are dormant and waiting for a getMore request.