summaryrefslogtreecommitdiff
path: root/src/mongo/db/concurrency
diff options
context:
space:
mode:
authorCharlie Swanson <charlie.swanson@mongodb.com>2017-03-02 13:54:24 -0500
committerCharlie Swanson <charlie.swanson@mongodb.com>2017-03-15 11:03:44 -0400
commitf05b9437fbdc53deecf55ed3c20e36af3d733953 (patch)
tree8b66acc133f83f27ef00f31600538f225ec2746a /src/mongo/db/concurrency
parenta1f15dfd788a78da77bda6675132d4144691fce1 (diff)
downloadmongo-f05b9437fbdc53deecf55ed3c20e36af3d733953.tar.gz
SERVER-22541 Refactor RAII locking helpers.
Removes the class 'ScopedTransaction' and moves the responsibility of abandoning the snapshot onto the GlobalLock class. Also renames the AutoGetCollectionForRead class to AutoGetCollectionForReadCommand, and adds a new AutoGetCollectionForRead class. Unlike AutoGetCollectionForReadCommand, this new class will not update the namespace on the CurrentOp object, nor will it add an entry to Top.
Diffstat (limited to 'src/mongo/db/concurrency')
-rw-r--r--src/mongo/db/concurrency/SConscript1
-rw-r--r--src/mongo/db/concurrency/d_concurrency.cpp38
-rw-r--r--src/mongo/db/concurrency/d_concurrency.h32
-rw-r--r--src/mongo/db/concurrency/d_concurrency_test.cpp617
4 files changed, 374 insertions, 314 deletions
diff --git a/src/mongo/db/concurrency/SConscript b/src/mongo/db/concurrency/SConscript
index 16a254db7aa..6a41d6e8564 100644
--- a/src/mongo/db/concurrency/SConscript
+++ b/src/mongo/db/concurrency/SConscript
@@ -45,6 +45,7 @@ env.CppUnitTest(
],
LIBDEPS=[
'$BUILD_DIR/mongo/util/progress_meter',
+ '$BUILD_DIR/mongo/db/service_context_noop_init',
'lock_manager',
]
)
diff --git a/src/mongo/db/concurrency/d_concurrency.cpp b/src/mongo/db/concurrency/d_concurrency.cpp
index e12f38a13fe..5360ca958f2 100644
--- a/src/mongo/db/concurrency/d_concurrency.cpp
+++ b/src/mongo/db/concurrency/d_concurrency.cpp
@@ -135,51 +135,53 @@ bool Lock::ResourceMutex::isAtLeastReadLocked(Locker* locker) {
return locker->isLockHeldForMode(_rid, MODE_IS);
}
-Lock::GlobalLock::GlobalLock(Locker* locker, LockMode lockMode, unsigned timeoutMs)
- : GlobalLock(locker, lockMode, EnqueueOnly()) {
+Lock::GlobalLock::GlobalLock(OperationContext* opCtx, LockMode lockMode, unsigned timeoutMs)
+ : GlobalLock(opCtx, lockMode, EnqueueOnly()) {
waitForLock(timeoutMs);
}
-Lock::GlobalLock::GlobalLock(Locker* locker, LockMode lockMode, EnqueueOnly enqueueOnly)
- : _locker(locker), _result(LOCK_INVALID), _pbwm(locker, resourceIdParallelBatchWriterMode) {
+Lock::GlobalLock::GlobalLock(OperationContext* opCtx, LockMode lockMode, EnqueueOnly enqueueOnly)
+ : _opCtx(opCtx),
+ _result(LOCK_INVALID),
+ _pbwm(opCtx->lockState(), resourceIdParallelBatchWriterMode) {
_enqueue(lockMode);
}
void Lock::GlobalLock::_enqueue(LockMode lockMode) {
- if (_locker->shouldConflictWithSecondaryBatchApplication()) {
+ if (_opCtx->lockState()->shouldConflictWithSecondaryBatchApplication()) {
_pbwm.lock(MODE_IS);
}
- _result = _locker->lockGlobalBegin(lockMode);
+ _result = _opCtx->lockState()->lockGlobalBegin(lockMode);
}
void Lock::GlobalLock::waitForLock(unsigned timeoutMs) {
if (_result == LOCK_WAITING) {
- _result = _locker->lockGlobalComplete(timeoutMs);
+ _result = _opCtx->lockState()->lockGlobalComplete(timeoutMs);
}
- if (_result != LOCK_OK && _locker->shouldConflictWithSecondaryBatchApplication()) {
+ if (_result != LOCK_OK && _opCtx->lockState()->shouldConflictWithSecondaryBatchApplication()) {
_pbwm.unlock();
}
}
void Lock::GlobalLock::_unlock() {
if (isLocked()) {
- _locker->unlockGlobal();
+ _opCtx->lockState()->unlockGlobal();
_result = LOCK_INVALID;
}
}
-Lock::DBLock::DBLock(Locker* locker, StringData db, LockMode mode)
+Lock::DBLock::DBLock(OperationContext* opCtx, StringData db, LockMode mode)
: _id(RESOURCE_DATABASE, db),
- _locker(locker),
+ _opCtx(opCtx),
_mode(mode),
- _globalLock(locker, isSharedLockMode(_mode) ? MODE_IS : MODE_IX, UINT_MAX) {
+ _globalLock(opCtx, isSharedLockMode(_mode) ? MODE_IS : MODE_IX, UINT_MAX) {
massert(28539, "need a valid database name", !db.empty() && nsIsDbOnly(db));
// Need to acquire the flush lock
- _locker->lockMMAPV1Flush();
+ _opCtx->lockState()->lockMMAPV1Flush();
// The check for the admin db is to ensure direct writes to auth collections
// are serialized (see SERVER-16092).
@@ -187,24 +189,24 @@ Lock::DBLock::DBLock(Locker* locker, StringData db, LockMode mode)
_mode = MODE_X;
}
- invariant(LOCK_OK == _locker->lock(_id, _mode));
+ invariant(LOCK_OK == _opCtx->lockState()->lock(_id, _mode));
}
Lock::DBLock::~DBLock() {
- _locker->unlock(_id);
+ _opCtx->lockState()->unlock(_id);
}
void Lock::DBLock::relockWithMode(LockMode newMode) {
// 2PL would delay the unlocking
- invariant(!_locker->inAWriteUnitOfWork());
+ invariant(!_opCtx->lockState()->inAWriteUnitOfWork());
// Not allowed to change global intent
invariant(!isSharedLockMode(_mode) || isSharedLockMode(newMode));
- _locker->unlock(_id);
+ _opCtx->lockState()->unlock(_id);
_mode = newMode;
- invariant(LOCK_OK == _locker->lock(_id, _mode));
+ invariant(LOCK_OK == _opCtx->lockState()->lock(_id, _mode));
}
diff --git a/src/mongo/db/concurrency/d_concurrency.h b/src/mongo/db/concurrency/d_concurrency.h
index 0d93b3ef2d0..5eb5de3a5e2 100644
--- a/src/mongo/db/concurrency/d_concurrency.h
+++ b/src/mongo/db/concurrency/d_concurrency.h
@@ -31,6 +31,7 @@
#include <climits> // For UINT_MAX
#include "mongo/db/concurrency/locker.h"
+#include "mongo/db/operation_context.h"
#include "mongo/util/timer.h"
namespace mongo {
@@ -161,23 +162,30 @@ public:
* Global lock.
*
* Grabs global resource lock. Allows further (recursive) acquisition of the global lock
- * in any mode, see LockMode.
+ * in any mode, see LockMode. An outermost GlobalLock calls abandonSnapshot() on destruction, so
+ * that the storage engine can release resources, such as snapshots or locks, that it may have
+ * acquired during the transaction. Note that any writes are committed in nested WriteUnitOfWork
+ * scopes, so write conflicts cannot happen when releasing the GlobalLock.
+ *
* NOTE: Does not acquire flush lock.
*/
class GlobalLock {
public:
class EnqueueOnly {};
- GlobalLock(Locker* locker, LockMode lockMode, unsigned timeoutMs);
+ GlobalLock(OperationContext* opCtx, LockMode lockMode, unsigned timeoutMs);
/**
* Enqueues lock but does not block on lock acquisition.
* Call waitForLock() to complete locking process.
*/
- GlobalLock(Locker* locker, LockMode lockMode, EnqueueOnly enqueueOnly);
+ GlobalLock(OperationContext* opCtx, LockMode lockMode, EnqueueOnly enqueueOnly);
~GlobalLock() {
_unlock();
+ if (!_opCtx->lockState()->isLocked()) {
+ _opCtx->recoveryUnit()->abandonSnapshot();
+ }
}
/**
@@ -193,7 +201,7 @@ public:
void _enqueue(LockMode lockMode);
void _unlock();
- Locker* const _locker;
+ OperationContext* const _opCtx;
LockResult _result;
ResourceLock _pbwm;
};
@@ -208,10 +216,10 @@ public:
*/
class GlobalWrite : public GlobalLock {
public:
- explicit GlobalWrite(Locker* locker, unsigned timeoutMs = UINT_MAX)
- : GlobalLock(locker, MODE_X, timeoutMs) {
+ explicit GlobalWrite(OperationContext* opCtx, unsigned timeoutMs = UINT_MAX)
+ : GlobalLock(opCtx, MODE_X, timeoutMs) {
if (isLocked()) {
- locker->lockMMAPV1Flush();
+ opCtx->lockState()->lockMMAPV1Flush();
}
}
};
@@ -226,10 +234,10 @@ public:
*/
class GlobalRead : public GlobalLock {
public:
- explicit GlobalRead(Locker* locker, unsigned timeoutMs = UINT_MAX)
- : GlobalLock(locker, MODE_S, timeoutMs) {
+ explicit GlobalRead(OperationContext* opCtx, unsigned timeoutMs = UINT_MAX)
+ : GlobalLock(opCtx, MODE_S, timeoutMs) {
if (isLocked()) {
- locker->lockMMAPV1Flush();
+ opCtx->lockState()->lockMMAPV1Flush();
}
}
};
@@ -251,7 +259,7 @@ public:
*/
class DBLock {
public:
- DBLock(Locker* locker, StringData db, LockMode mode);
+ DBLock(OperationContext* opCtx, StringData db, LockMode mode);
~DBLock();
/**
@@ -264,7 +272,7 @@ public:
private:
const ResourceId _id;
- Locker* const _locker;
+ OperationContext* const _opCtx;
// May be changed through relockWithMode. The global lock mode won't change though,
// because we never change from IS/S to IX/X or vice versa, just convert locks from
diff --git a/src/mongo/db/concurrency/d_concurrency_test.cpp b/src/mongo/db/concurrency/d_concurrency_test.cpp
index 1058d8a0163..2c497e4fa84 100644
--- a/src/mongo/db/concurrency/d_concurrency_test.cpp
+++ b/src/mongo/db/concurrency/d_concurrency_test.cpp
@@ -36,6 +36,7 @@
#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/concurrency/lock_manager_test_help.h"
#include "mongo/stdx/functional.h"
+#include "mongo/stdx/memory.h"
#include "mongo/stdx/thread.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/debug_util.h"
@@ -69,53 +70,94 @@ private:
bool _oldSupportsDocLocking;
};
-/**
- * Calls fn the given number of iterations, spread out over up to maxThreads threads.
- * The threadNr passed is an integer between 0 and maxThreads exclusive. Logs timing
- * statistics for for all power-of-two thread counts from 1 up to maxThreds.
- */
-void perfTest(stdx::function<void(int threadNr)> fn, int maxThreads) {
- for (int numThreads = 1; numThreads <= maxThreads; numThreads *= 2) {
- std::vector<stdx::thread> threads;
-
- AtomicInt32 ready{0};
- AtomicInt64 elapsedNanos{0};
- AtomicInt64 timedIters{0};
-
- for (int threadId = 0; threadId < numThreads; threadId++)
- threads.emplace_back([&, threadId]() {
- // Busy-wait until everybody is ready
- ready.fetchAndAdd(1);
- while (ready.load() < numThreads) {
- }
- uint64_t micros = 0;
- int iters;
- // Ensure at least 16 iterations are done and at least 25 milliseconds is timed
- for (iters = 16; iters < (1 << 30) && micros < kMinPerfMillis * 1000; iters *= 2) {
- // Measure the number of loops
- Timer t;
+class DConcurrencyTestFixture : public unittest::Test {
+public:
+ DConcurrencyTestFixture() : _client(getGlobalServiceContext()->makeClient("testClient")) {}
+
+ /**
+ * Constructs and returns a new OperationContext.
+ */
+ ServiceContext::UniqueOperationContext makeOpCtx() const {
+ auto opCtx = _client->makeOperationContext();
+ opCtx->releaseLockState();
+ return opCtx;
+ }
+
+ /**
+ * Returns a vector of Clients of length 'k', each of which has an OperationContext with its
+ * lockState set to a DefaultLockerImpl.
+ */
+ template <typename LockerType>
+ std::vector<std::pair<ServiceContext::UniqueClient, ServiceContext::UniqueOperationContext>>
+ makeKClientsWithLockers(int k) {
+ std::vector<std::pair<ServiceContext::UniqueClient, ServiceContext::UniqueOperationContext>>
+ clients;
+ clients.reserve(k);
+ for (int i = 0; i < k; ++i) {
+ auto client = getGlobalServiceContext()->makeClient(
+ str::stream() << "test client for thread " << i);
+ auto opCtx = client->makeOperationContext();
+ opCtx->releaseLockState();
+ opCtx->setLockState(stdx::make_unique<LockerType>());
+ clients.emplace_back(std::move(client), std::move(opCtx));
+ }
+ return clients;
+ }
+
+ /**
+ * Calls fn the given number of iterations, spread out over up to maxThreads threads.
+ * The threadNr passed is an integer between 0 and maxThreads exclusive. Logs timing
+ * statistics for for all power-of-two thread counts from 1 up to maxThreds.
+ */
+ void perfTest(stdx::function<void(int threadNr)> fn, int maxThreads) {
+ for (int numThreads = 1; numThreads <= maxThreads; numThreads *= 2) {
+ std::vector<stdx::thread> threads;
+
+ AtomicInt32 ready{0};
+ AtomicInt64 elapsedNanos{0};
+ AtomicInt64 timedIters{0};
+
+ for (int threadId = 0; threadId < numThreads; threadId++)
+ threads.emplace_back([&, threadId]() {
+ // Busy-wait until everybody is ready
+ ready.fetchAndAdd(1);
+ while (ready.load() < numThreads) {
+ }
- for (int i = 0; i < iters; i++)
- fn(threadId);
+ uint64_t micros = 0;
+ int iters;
+ // Ensure at least 16 iterations are done and at least 25 milliseconds is timed
+ for (iters = 16; iters < (1 << 30) && micros < kMinPerfMillis * 1000;
+ iters *= 2) {
+ // Measure the number of loops
+ Timer t;
- micros = t.micros();
- }
+ for (int i = 0; i < iters; i++)
+ fn(threadId);
- elapsedNanos.fetchAndAdd(micros * 1000);
- timedIters.fetchAndAdd(iters);
- });
+ micros = t.micros();
+ }
+
+ elapsedNanos.fetchAndAdd(micros * 1000);
+ timedIters.fetchAndAdd(iters);
+ });
- for (auto& thread : threads)
- thread.join();
+ for (auto& thread : threads)
+ thread.join();
- log() << numThreads
- << " threads took: " << elapsedNanos.load() / static_cast<double>(timedIters.load())
- << " ns per call" << (kDebugBuild ? " (DEBUG BUILD!)" : "");
+ log() << numThreads << " threads took: "
+ << elapsedNanos.load() / static_cast<double>(timedIters.load()) << " ns per call"
+ << (kDebugBuild ? " (DEBUG BUILD!)" : "");
+ }
}
-}
-TEST(DConcurrency, ResourceMutex) {
+private:
+ ServiceContext::UniqueClient _client;
+};
+
+
+TEST_F(DConcurrencyTestFixture, ResourceMutex) {
Lock::ResourceMutex mtx("testMutex");
DefaultLockerImpl locker1;
DefaultLockerImpl locker2;
@@ -197,284 +239,292 @@ TEST(DConcurrency, ResourceMutex) {
t3.join();
}
-TEST(DConcurrency, GlobalRead) {
- MMAPV1LockerImpl ls;
- Lock::GlobalRead globalRead(&ls);
- ASSERT(ls.isR());
+TEST_F(DConcurrencyTestFixture, GlobalRead) {
+ auto opCtx = makeOpCtx();
+ opCtx->setLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ Lock::GlobalRead globalRead(opCtx.get());
+ ASSERT(opCtx->lockState()->isR());
}
-TEST(DConcurrency, GlobalWrite) {
- MMAPV1LockerImpl ls;
- Lock::GlobalWrite globalWrite(&ls);
- ASSERT(ls.isW());
+TEST_F(DConcurrencyTestFixture, GlobalWrite) {
+ auto opCtx = makeOpCtx();
+ opCtx->setLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ Lock::GlobalWrite globalWrite(opCtx.get());
+ ASSERT(opCtx->lockState()->isW());
}
-TEST(DConcurrency, GlobalWriteAndGlobalRead) {
- MMAPV1LockerImpl ls;
+TEST_F(DConcurrencyTestFixture, GlobalWriteAndGlobalRead) {
+ auto opCtx = makeOpCtx();
+ opCtx->setLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ auto lockState = opCtx->lockState();
- Lock::GlobalWrite globalWrite(&ls);
- ASSERT(ls.isW());
+ Lock::GlobalWrite globalWrite(opCtx.get());
+ ASSERT(lockState->isW());
{
- Lock::GlobalRead globalRead(&ls);
- ASSERT(ls.isW());
+ Lock::GlobalRead globalRead(opCtx.get());
+ ASSERT(lockState->isW());
}
- ASSERT(ls.isW());
+ ASSERT(lockState->isW());
}
-TEST(DConcurrency, GlobalLockS_Timeout) {
- MMAPV1LockerImpl ls;
- Lock::GlobalLock globalWrite(&ls, MODE_X, 0);
+TEST_F(DConcurrencyTestFixture, GlobalLockS_Timeout) {
+ auto clients = makeKClientsWithLockers<MMAPV1LockerImpl>(2);
+
+ Lock::GlobalLock globalWrite(clients[0].second.get(), MODE_X, 0);
ASSERT(globalWrite.isLocked());
- {
- MMAPV1LockerImpl lsTry;
- Lock::GlobalLock globalReadTry(&lsTry, MODE_S, 1);
- ASSERT(!globalReadTry.isLocked());
- }
+ Lock::GlobalLock globalReadTry(clients[1].second.get(), MODE_S, 1);
+ ASSERT(!globalReadTry.isLocked());
}
-TEST(DConcurrency, GlobalLockX_Timeout) {
- MMAPV1LockerImpl ls;
- Lock::GlobalLock globalWrite(&ls, MODE_X, 0);
+TEST_F(DConcurrencyTestFixture, GlobalLockX_Timeout) {
+ auto clients = makeKClientsWithLockers<MMAPV1LockerImpl>(2);
+ Lock::GlobalLock globalWrite(clients[0].second.get(), MODE_X, 0);
ASSERT(globalWrite.isLocked());
- {
- MMAPV1LockerImpl lsTry;
- Lock::GlobalLock globalWriteTry(&lsTry, MODE_X, 1);
- ASSERT(!globalWriteTry.isLocked());
- }
+ Lock::GlobalLock globalWriteTry(clients[1].second.get(), MODE_X, 1);
+ ASSERT(!globalWriteTry.isLocked());
}
-TEST(DConcurrency, GlobalLockS_NoTimeoutDueToGlobalLockS) {
- MMAPV1LockerImpl ls;
- Lock::GlobalRead globalRead(&ls);
+TEST_F(DConcurrencyTestFixture, GlobalLockS_NoTimeoutDueToGlobalLockS) {
+ auto clients = makeKClientsWithLockers<MMAPV1LockerImpl>(2);
- MMAPV1LockerImpl lsTry;
- Lock::GlobalLock globalReadTry(&lsTry, MODE_S, 1);
+ Lock::GlobalRead globalRead(clients[0].second.get());
+ Lock::GlobalLock globalReadTry(clients[1].second.get(), MODE_S, 1);
ASSERT(globalReadTry.isLocked());
}
-TEST(DConcurrency, GlobalLockX_TimeoutDueToGlobalLockS) {
- MMAPV1LockerImpl ls;
- Lock::GlobalRead globalRead(&ls);
+TEST_F(DConcurrencyTestFixture, GlobalLockX_TimeoutDueToGlobalLockS) {
+ auto clients = makeKClientsWithLockers<MMAPV1LockerImpl>(2);
- MMAPV1LockerImpl lsTry;
- Lock::GlobalLock globalWriteTry(&lsTry, MODE_X, 1);
+ Lock::GlobalRead globalRead(clients[0].second.get());
+ Lock::GlobalLock globalWriteTry(clients[1].second.get(), MODE_X, 1);
ASSERT(!globalWriteTry.isLocked());
}
-TEST(DConcurrency, GlobalLockS_TimeoutDueToGlobalLockX) {
- MMAPV1LockerImpl ls;
- Lock::GlobalWrite globalWrite(&ls);
+TEST_F(DConcurrencyTestFixture, GlobalLockS_TimeoutDueToGlobalLockX) {
+ auto clients = makeKClientsWithLockers<MMAPV1LockerImpl>(2);
- MMAPV1LockerImpl lsTry;
- Lock::GlobalLock globalReadTry(&lsTry, MODE_S, 1);
+ Lock::GlobalWrite globalWrite(clients[0].second.get());
+ Lock::GlobalLock globalReadTry(clients[1].second.get(), MODE_S, 1);
ASSERT(!globalReadTry.isLocked());
}
-TEST(DConcurrency, GlobalLockX_TimeoutDueToGlobalLockX) {
- MMAPV1LockerImpl ls;
- Lock::GlobalWrite globalWrite(&ls);
+TEST_F(DConcurrencyTestFixture, GlobalLockX_TimeoutDueToGlobalLockX) {
+ auto clients = makeKClientsWithLockers<MMAPV1LockerImpl>(2);
- MMAPV1LockerImpl lsTry;
- Lock::GlobalLock globalWriteTry(&lsTry, MODE_X, 1);
+ Lock::GlobalWrite globalWrite(clients[0].second.get());
+ Lock::GlobalLock globalWriteTry(clients[1].second.get(), MODE_X, 1);
ASSERT(!globalWriteTry.isLocked());
}
-TEST(DConcurrency, TempReleaseGlobalWrite) {
- MMAPV1LockerImpl ls;
- Lock::GlobalWrite globalWrite(&ls);
+TEST_F(DConcurrencyTestFixture, TempReleaseGlobalWrite) {
+ auto opCtx = makeOpCtx();
+ opCtx->setLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ auto lockState = opCtx->lockState();
+ Lock::GlobalWrite globalWrite(opCtx.get());
{
- Lock::TempRelease tempRelease(&ls);
- ASSERT(!ls.isLocked());
+ Lock::TempRelease tempRelease(lockState);
+ ASSERT(!lockState->isLocked());
}
- ASSERT(ls.isW());
+ ASSERT(lockState->isW());
}
-TEST(DConcurrency, TempReleaseRecursive) {
- MMAPV1LockerImpl ls;
- Lock::GlobalWrite globalWrite(&ls);
- Lock::DBLock lk(&ls, "SomeDBName", MODE_X);
+TEST_F(DConcurrencyTestFixture, TempReleaseRecursive) {
+ auto opCtx = makeOpCtx();
+ opCtx->setLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ auto lockState = opCtx->lockState();
+ Lock::GlobalWrite globalWrite(opCtx.get());
+ Lock::DBLock lk(opCtx.get(), "SomeDBName", MODE_X);
{
- Lock::TempRelease tempRelease(&ls);
- ASSERT(ls.isW());
- ASSERT(ls.isDbLockedForMode("SomeDBName", MODE_X));
+ Lock::TempRelease tempRelease(lockState);
+ ASSERT(lockState->isW());
+ ASSERT(lockState->isDbLockedForMode("SomeDBName", MODE_X));
}
- ASSERT(ls.isW());
+ ASSERT(lockState->isW());
}
-TEST(DConcurrency, DBLockTakesS) {
- MMAPV1LockerImpl ls;
-
- Lock::DBLock dbRead(&ls, "db", MODE_S);
+TEST_F(DConcurrencyTestFixture, DBLockTakesS) {
+ auto opCtx = makeOpCtx();
+ opCtx->setLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ Lock::DBLock dbRead(opCtx.get(), "db", MODE_S);
const ResourceId resIdDb(RESOURCE_DATABASE, std::string("db"));
- ASSERT(ls.getLockMode(resIdDb) == MODE_S);
+ ASSERT(opCtx->lockState()->getLockMode(resIdDb) == MODE_S);
}
-TEST(DConcurrency, DBLockTakesX) {
- MMAPV1LockerImpl ls;
-
- Lock::DBLock dbWrite(&ls, "db", MODE_X);
+TEST_F(DConcurrencyTestFixture, DBLockTakesX) {
+ auto opCtx = makeOpCtx();
+ opCtx->setLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ Lock::DBLock dbWrite(opCtx.get(), "db", MODE_X);
const ResourceId resIdDb(RESOURCE_DATABASE, std::string("db"));
- ASSERT(ls.getLockMode(resIdDb) == MODE_X);
+ ASSERT(opCtx->lockState()->getLockMode(resIdDb) == MODE_X);
}
-TEST(DConcurrency, DBLockTakesISForAdminIS) {
- DefaultLockerImpl ls;
-
- Lock::DBLock dbRead(&ls, "admin", MODE_IS);
+TEST_F(DConcurrencyTestFixture, DBLockTakesISForAdminIS) {
+ auto opCtx = makeOpCtx();
+ opCtx->setLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ Lock::DBLock dbRead(opCtx.get(), "admin", MODE_IS);
- ASSERT(ls.getLockMode(resourceIdAdminDB) == MODE_IS);
+ ASSERT(opCtx->lockState()->getLockMode(resourceIdAdminDB) == MODE_IS);
}
-TEST(DConcurrency, DBLockTakesSForAdminS) {
- DefaultLockerImpl ls;
+TEST_F(DConcurrencyTestFixture, DBLockTakesSForAdminS) {
+ auto opCtx = makeOpCtx();
+ opCtx->setLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ Lock::DBLock dbRead(opCtx.get(), "admin", MODE_S);
- Lock::DBLock dbRead(&ls, "admin", MODE_S);
-
- ASSERT(ls.getLockMode(resourceIdAdminDB) == MODE_S);
+ ASSERT(opCtx->lockState()->getLockMode(resourceIdAdminDB) == MODE_S);
}
-TEST(DConcurrency, DBLockTakesXForAdminIX) {
- DefaultLockerImpl ls;
-
- Lock::DBLock dbWrite(&ls, "admin", MODE_IX);
+TEST_F(DConcurrencyTestFixture, DBLockTakesXForAdminIX) {
+ auto opCtx = makeOpCtx();
+ opCtx->setLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ Lock::DBLock dbWrite(opCtx.get(), "admin", MODE_IX);
- ASSERT(ls.getLockMode(resourceIdAdminDB) == MODE_X);
+ ASSERT(opCtx->lockState()->getLockMode(resourceIdAdminDB) == MODE_X);
}
-TEST(DConcurrency, DBLockTakesXForAdminX) {
- DefaultLockerImpl ls;
+TEST_F(DConcurrencyTestFixture, DBLockTakesXForAdminX) {
+ auto opCtx = makeOpCtx();
+ opCtx->setLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ Lock::DBLock dbWrite(opCtx.get(), "admin", MODE_X);
- Lock::DBLock dbWrite(&ls, "admin", MODE_X);
-
- ASSERT(ls.getLockMode(resourceIdAdminDB) == MODE_X);
+ ASSERT(opCtx->lockState()->getLockMode(resourceIdAdminDB) == MODE_X);
}
-TEST(DConcurrency, MultipleWriteDBLocksOnSameThread) {
- MMAPV1LockerImpl ls;
-
- Lock::DBLock r1(&ls, "db1", MODE_X);
- Lock::DBLock r2(&ls, "db1", MODE_X);
+TEST_F(DConcurrencyTestFixture, MultipleWriteDBLocksOnSameThread) {
+ auto opCtx = makeOpCtx();
+ opCtx->setLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ Lock::DBLock r1(opCtx.get(), "db1", MODE_X);
+ Lock::DBLock r2(opCtx.get(), "db1", MODE_X);
- ASSERT(ls.isDbLockedForMode("db1", MODE_X));
+ ASSERT(opCtx->lockState()->isDbLockedForMode("db1", MODE_X));
}
-TEST(DConcurrency, MultipleConflictingDBLocksOnSameThread) {
- MMAPV1LockerImpl ls;
-
- Lock::DBLock r1(&ls, "db1", MODE_X);
- Lock::DBLock r2(&ls, "db1", MODE_S);
+TEST_F(DConcurrencyTestFixture, MultipleConflictingDBLocksOnSameThread) {
+ auto opCtx = makeOpCtx();
+ opCtx->setLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ auto lockState = opCtx->lockState();
+ Lock::DBLock r1(opCtx.get(), "db1", MODE_X);
+ Lock::DBLock r2(opCtx.get(), "db1", MODE_S);
- ASSERT(ls.isDbLockedForMode("db1", MODE_X));
- ASSERT(ls.isDbLockedForMode("db1", MODE_S));
+ ASSERT(lockState->isDbLockedForMode("db1", MODE_X));
+ ASSERT(lockState->isDbLockedForMode("db1", MODE_S));
}
-TEST(DConcurrency, IsDbLockedForSMode) {
+TEST_F(DConcurrencyTestFixture, IsDbLockedForSMode) {
const std::string dbName("db");
- MMAPV1LockerImpl ls;
+ auto opCtx = makeOpCtx();
+ opCtx->setLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ auto lockState = opCtx->lockState();
+ Lock::DBLock dbLock(opCtx.get(), dbName, MODE_S);
- Lock::DBLock dbLock(&ls, dbName, MODE_S);
-
- ASSERT(ls.isDbLockedForMode(dbName, MODE_IS));
- ASSERT(!ls.isDbLockedForMode(dbName, MODE_IX));
- ASSERT(ls.isDbLockedForMode(dbName, MODE_S));
- ASSERT(!ls.isDbLockedForMode(dbName, MODE_X));
+ ASSERT(lockState->isDbLockedForMode(dbName, MODE_IS));
+ ASSERT(!lockState->isDbLockedForMode(dbName, MODE_IX));
+ ASSERT(lockState->isDbLockedForMode(dbName, MODE_S));
+ ASSERT(!lockState->isDbLockedForMode(dbName, MODE_X));
}
-TEST(DConcurrency, IsDbLockedForXMode) {
+TEST_F(DConcurrencyTestFixture, IsDbLockedForXMode) {
const std::string dbName("db");
- MMAPV1LockerImpl ls;
-
- Lock::DBLock dbLock(&ls, dbName, MODE_X);
+ auto opCtx = makeOpCtx();
+ opCtx->setLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ auto lockState = opCtx->lockState();
+ Lock::DBLock dbLock(opCtx.get(), dbName, MODE_X);
- ASSERT(ls.isDbLockedForMode(dbName, MODE_IS));
- ASSERT(ls.isDbLockedForMode(dbName, MODE_IX));
- ASSERT(ls.isDbLockedForMode(dbName, MODE_S));
- ASSERT(ls.isDbLockedForMode(dbName, MODE_X));
+ ASSERT(lockState->isDbLockedForMode(dbName, MODE_IS));
+ ASSERT(lockState->isDbLockedForMode(dbName, MODE_IX));
+ ASSERT(lockState->isDbLockedForMode(dbName, MODE_S));
+ ASSERT(lockState->isDbLockedForMode(dbName, MODE_X));
}
-TEST(DConcurrency, IsCollectionLocked_DB_Locked_IS) {
+TEST_F(DConcurrencyTestFixture, IsCollectionLocked_DB_Locked_IS) {
const std::string ns("db1.coll");
- MMAPV1LockerImpl ls;
+ auto opCtx = makeOpCtx();
+ opCtx->setLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ auto lockState = opCtx->lockState();
- Lock::DBLock dbLock(&ls, "db1", MODE_IS);
+ Lock::DBLock dbLock(opCtx.get(), "db1", MODE_IS);
{
- Lock::CollectionLock collLock(&ls, ns, MODE_IS);
+ Lock::CollectionLock collLock(lockState, ns, MODE_IS);
- ASSERT(ls.isCollectionLockedForMode(ns, MODE_IS));
- ASSERT(!ls.isCollectionLockedForMode(ns, MODE_IX));
+ ASSERT(lockState->isCollectionLockedForMode(ns, MODE_IS));
+ ASSERT(!lockState->isCollectionLockedForMode(ns, MODE_IX));
// TODO: This is TRUE because Lock::CollectionLock converts IS lock to S
- ASSERT(ls.isCollectionLockedForMode(ns, MODE_S));
+ ASSERT(lockState->isCollectionLockedForMode(ns, MODE_S));
- ASSERT(!ls.isCollectionLockedForMode(ns, MODE_X));
+ ASSERT(!lockState->isCollectionLockedForMode(ns, MODE_X));
}
{
- Lock::CollectionLock collLock(&ls, ns, MODE_S);
+ Lock::CollectionLock collLock(lockState, ns, MODE_S);
- ASSERT(ls.isCollectionLockedForMode(ns, MODE_IS));
- ASSERT(!ls.isCollectionLockedForMode(ns, MODE_IX));
- ASSERT(ls.isCollectionLockedForMode(ns, MODE_S));
- ASSERT(!ls.isCollectionLockedForMode(ns, MODE_X));
+ ASSERT(lockState->isCollectionLockedForMode(ns, MODE_IS));
+ ASSERT(!lockState->isCollectionLockedForMode(ns, MODE_IX));
+ ASSERT(lockState->isCollectionLockedForMode(ns, MODE_S));
+ ASSERT(!lockState->isCollectionLockedForMode(ns, MODE_X));
}
}
-TEST(DConcurrency, IsCollectionLocked_DB_Locked_IX) {
+TEST_F(DConcurrencyTestFixture, IsCollectionLocked_DB_Locked_IX) {
const std::string ns("db1.coll");
- MMAPV1LockerImpl ls;
+ auto opCtx = makeOpCtx();
+ opCtx->setLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ auto lockState = opCtx->lockState();
- Lock::DBLock dbLock(&ls, "db1", MODE_IX);
+ Lock::DBLock dbLock(opCtx.get(), "db1", MODE_IX);
{
- Lock::CollectionLock collLock(&ls, ns, MODE_IX);
+ Lock::CollectionLock collLock(lockState, ns, MODE_IX);
// TODO: This is TRUE because Lock::CollectionLock converts IX lock to X
- ASSERT(ls.isCollectionLockedForMode(ns, MODE_IS));
+ ASSERT(lockState->isCollectionLockedForMode(ns, MODE_IS));
- ASSERT(ls.isCollectionLockedForMode(ns, MODE_IX));
- ASSERT(ls.isCollectionLockedForMode(ns, MODE_S));
- ASSERT(ls.isCollectionLockedForMode(ns, MODE_X));
+ ASSERT(lockState->isCollectionLockedForMode(ns, MODE_IX));
+ ASSERT(lockState->isCollectionLockedForMode(ns, MODE_S));
+ ASSERT(lockState->isCollectionLockedForMode(ns, MODE_X));
}
{
- Lock::CollectionLock collLock(&ls, ns, MODE_X);
+ Lock::CollectionLock collLock(lockState, ns, MODE_X);
- ASSERT(ls.isCollectionLockedForMode(ns, MODE_IS));
- ASSERT(ls.isCollectionLockedForMode(ns, MODE_IX));
- ASSERT(ls.isCollectionLockedForMode(ns, MODE_S));
- ASSERT(ls.isCollectionLockedForMode(ns, MODE_X));
+ ASSERT(lockState->isCollectionLockedForMode(ns, MODE_IS));
+ ASSERT(lockState->isCollectionLockedForMode(ns, MODE_IX));
+ ASSERT(lockState->isCollectionLockedForMode(ns, MODE_S));
+ ASSERT(lockState->isCollectionLockedForMode(ns, MODE_X));
}
}
-TEST(DConcurrency, Stress) {
+TEST_F(DConcurrencyTestFixture, Stress) {
const int kNumIterations = 5000;
ProgressMeter progressMeter(kNumIterations * kMaxStressThreads);
- std::array<DefaultLockerImpl, kMaxStressThreads> locker;
+ std::vector<std::pair<ServiceContext::UniqueClient, ServiceContext::UniqueOperationContext>>
+ clients = makeKClientsWithLockers<DefaultLockerImpl>(kMaxStressThreads);
AtomicInt32 ready{0};
std::vector<stdx::thread> threads;
- for (int threadId = 0; threadId < kMaxStressThreads; threadId++)
+
+ for (int threadId = 0; threadId < kMaxStressThreads; threadId++) {
threads.emplace_back([&, threadId]() {
// Busy-wait until everybody is ready
ready.fetchAndAdd(1);
@@ -485,119 +535,119 @@ TEST(DConcurrency, Stress) {
const bool sometimes = (std::rand() % 15 == 0);
if (i % 7 == 0 && threadId == 0 /* Only one upgrader legal */) {
- Lock::GlobalWrite w(&locker[threadId]);
+ Lock::GlobalWrite w(clients[threadId].second.get());
if (i % 7 == 2) {
- Lock::TempRelease t(&locker[threadId]);
+ Lock::TempRelease t(clients[threadId].second->lockState());
}
- ASSERT(locker[threadId].isW());
+ ASSERT(clients[threadId].second->lockState()->isW());
} else if (i % 7 == 1) {
- Lock::GlobalRead r(&locker[threadId]);
- ASSERT(locker[threadId].isReadLocked());
+ Lock::GlobalRead r(clients[threadId].second.get());
+ ASSERT(clients[threadId].second->lockState()->isReadLocked());
} else if (i % 7 == 2) {
- Lock::GlobalWrite w(&locker[threadId]);
+ Lock::GlobalWrite w(clients[threadId].second.get());
if (sometimes) {
- Lock::TempRelease t(&locker[threadId]);
+ Lock::TempRelease t(clients[threadId].second->lockState());
}
- ASSERT(locker[threadId].isW());
+ ASSERT(clients[threadId].second->lockState()->isW());
} else if (i % 7 == 3) {
- Lock::GlobalWrite w(&locker[threadId]);
- { Lock::TempRelease t(&locker[threadId]); }
+ Lock::GlobalWrite w(clients[threadId].second.get());
+ { Lock::TempRelease t(clients[threadId].second->lockState()); }
- Lock::GlobalRead r(&locker[threadId]);
+ Lock::GlobalRead r(clients[threadId].second.get());
if (sometimes) {
- Lock::TempRelease t(&locker[threadId]);
+ Lock::TempRelease t(clients[threadId].second->lockState());
}
- ASSERT(locker[threadId].isW());
+ ASSERT(clients[threadId].second->lockState()->isW());
} else if (i % 7 == 4) {
- Lock::GlobalRead r(&locker[threadId]);
- Lock::GlobalRead r2(&locker[threadId]);
- ASSERT(locker[threadId].isReadLocked());
+ Lock::GlobalRead r(clients[threadId].second.get());
+ Lock::GlobalRead r2(clients[threadId].second.get());
+ ASSERT(clients[threadId].second->lockState()->isReadLocked());
} else if (i % 7 == 5) {
- { Lock::DBLock r(&locker[threadId], "foo", MODE_S); }
- { Lock::DBLock r(&locker[threadId], "bar", MODE_S); }
+ { Lock::DBLock r(clients[threadId].second.get(), "foo", MODE_S); }
+ { Lock::DBLock r(clients[threadId].second.get(), "bar", MODE_S); }
} else if (i % 7 == 6) {
if (i > kNumIterations / 2) {
int q = i % 11;
if (q == 0) {
- Lock::DBLock r(&locker[threadId], "foo", MODE_S);
- ASSERT(locker[threadId].isDbLockedForMode("foo", MODE_S));
-
- Lock::DBLock r2(&locker[threadId], "foo", MODE_S);
- ASSERT(locker[threadId].isDbLockedForMode("foo", MODE_S));
-
- Lock::DBLock r3(&locker[threadId], "local", MODE_S);
- ASSERT(locker[threadId].isDbLockedForMode("foo", MODE_S));
- ASSERT(locker[threadId].isDbLockedForMode("local", MODE_S));
+ Lock::DBLock r(clients[threadId].second.get(), "foo", MODE_S);
+ ASSERT(clients[threadId].second->lockState()->isDbLockedForMode(
+ "foo", MODE_S));
+
+ Lock::DBLock r2(clients[threadId].second.get(), "foo", MODE_S);
+ ASSERT(clients[threadId].second->lockState()->isDbLockedForMode(
+ "foo", MODE_S));
+
+ Lock::DBLock r3(clients[threadId].second.get(), "local", MODE_S);
+ ASSERT(clients[threadId].second->lockState()->isDbLockedForMode(
+ "foo", MODE_S));
+ ASSERT(clients[threadId].second->lockState()->isDbLockedForMode(
+ "local", MODE_S));
} else if (q == 1) {
// test locking local only -- with no preceding lock
- { Lock::DBLock x(&locker[threadId], "local", MODE_S); }
+ { Lock::DBLock x(clients[threadId].second.get(), "local", MODE_S); }
- Lock::DBLock x(&locker[threadId], "local", MODE_X);
+ Lock::DBLock x(clients[threadId].second.get(), "local", MODE_X);
if (sometimes) {
- Lock::TempRelease t(&locker[threadId]);
+ Lock::TempRelease t(clients[threadId].second.get()->lockState());
}
} else if (q == 2) {
- { Lock::DBLock x(&locker[threadId], "admin", MODE_S); }
- { Lock::DBLock x(&locker[threadId], "admin", MODE_X); }
+ { Lock::DBLock x(clients[threadId].second.get(), "admin", MODE_S); }
+ { Lock::DBLock x(clients[threadId].second.get(), "admin", MODE_X); }
} else if (q == 3) {
- Lock::DBLock x(&locker[threadId], "foo", MODE_X);
- Lock::DBLock y(&locker[threadId], "admin", MODE_S);
+ Lock::DBLock x(clients[threadId].second.get(), "foo", MODE_X);
+ Lock::DBLock y(clients[threadId].second.get(), "admin", MODE_S);
} else if (q == 4) {
- Lock::DBLock x(&locker[threadId], "foo2", MODE_S);
- Lock::DBLock y(&locker[threadId], "admin", MODE_S);
+ Lock::DBLock x(clients[threadId].second.get(), "foo2", MODE_S);
+ Lock::DBLock y(clients[threadId].second.get(), "admin", MODE_S);
} else if (q == 5) {
- Lock::DBLock x(&locker[threadId], "foo", MODE_IS);
+ Lock::DBLock x(clients[threadId].second.get(), "foo", MODE_IS);
} else if (q == 6) {
- Lock::DBLock x(&locker[threadId], "foo", MODE_IX);
- Lock::DBLock y(&locker[threadId], "local", MODE_IX);
+ Lock::DBLock x(clients[threadId].second.get(), "foo", MODE_IX);
+ Lock::DBLock y(clients[threadId].second.get(), "local", MODE_IX);
} else {
- Lock::DBLock w(&locker[threadId], "foo", MODE_X);
+ Lock::DBLock w(clients[threadId].second.get(), "foo", MODE_X);
- { Lock::TempRelease t(&locker[threadId]); }
+ { Lock::TempRelease t(clients[threadId].second->lockState()); }
- Lock::DBLock r2(&locker[threadId], "foo", MODE_S);
- Lock::DBLock r3(&locker[threadId], "local", MODE_S);
+ Lock::DBLock r2(clients[threadId].second.get(), "foo", MODE_S);
+ Lock::DBLock r3(clients[threadId].second.get(), "local", MODE_S);
}
} else {
- Lock::DBLock r(&locker[threadId], "foo", MODE_S);
- Lock::DBLock r2(&locker[threadId], "foo", MODE_S);
- Lock::DBLock r3(&locker[threadId], "local", MODE_S);
+ Lock::DBLock r(clients[threadId].second.get(), "foo", MODE_S);
+ Lock::DBLock r2(clients[threadId].second.get(), "foo", MODE_S);
+ Lock::DBLock r3(clients[threadId].second.get(), "local", MODE_S);
}
}
progressMeter.hit();
}
});
+ }
for (auto& thread : threads)
thread.join();
- {
- MMAPV1LockerImpl ls;
- Lock::GlobalWrite w(&ls);
- }
-
- {
- MMAPV1LockerImpl ls;
- Lock::GlobalRead r(&ls);
- }
+ auto newClients = makeKClientsWithLockers<MMAPV1LockerImpl>(2);
+ { Lock::GlobalWrite w(newClients[0].second.get()); }
+ { Lock::GlobalRead r(newClients[1].second.get()); }
}
-TEST(DConcurrency, StressPartitioned) {
+TEST_F(DConcurrencyTestFixture, StressPartitioned) {
const int kNumIterations = 5000;
ProgressMeter progressMeter(kNumIterations * kMaxStressThreads);
- std::array<DefaultLockerImpl, kMaxStressThreads> locker;
+ std::vector<std::pair<ServiceContext::UniqueClient, ServiceContext::UniqueOperationContext>>
+ clients = makeKClientsWithLockers<DefaultLockerImpl>(kMaxStressThreads);
AtomicInt32 ready{0};
std::vector<stdx::thread> threads;
- for (int threadId = 0; threadId < kMaxStressThreads; threadId++)
+ for (int threadId = 0; threadId < kMaxStressThreads; threadId++) {
threads.emplace_back([&, threadId]() {
// Busy-wait until everybody is ready
ready.fetchAndAdd(1);
@@ -607,10 +657,10 @@ TEST(DConcurrency, StressPartitioned) {
for (int i = 0; i < kNumIterations; i++) {
if (threadId == 0) {
if (i % 100 == 0) {
- Lock::GlobalWrite w(&locker[threadId]);
+ Lock::GlobalWrite w(clients[threadId].second.get());
continue;
} else if (i % 100 == 1) {
- Lock::GlobalRead w(&locker[threadId]);
+ Lock::GlobalRead w(clients[threadId].second.get());
continue;
}
@@ -618,31 +668,26 @@ TEST(DConcurrency, StressPartitioned) {
}
if (i % 2 == 0) {
- Lock::DBLock x(&locker[threadId], "foo", MODE_IS);
+ Lock::DBLock x(clients[threadId].second.get(), "foo", MODE_IS);
} else {
- Lock::DBLock x(&locker[threadId], "foo", MODE_IX);
- Lock::DBLock y(&locker[threadId], "local", MODE_IX);
+ Lock::DBLock x(clients[threadId].second.get(), "foo", MODE_IX);
+ Lock::DBLock y(clients[threadId].second.get(), "local", MODE_IX);
}
progressMeter.hit();
}
});
+ }
for (auto& thread : threads)
thread.join();
- {
- MMAPV1LockerImpl ls;
- Lock::GlobalWrite w(&ls);
- }
-
- {
- MMAPV1LockerImpl ls;
- Lock::GlobalRead r(&ls);
- }
+ auto newClients = makeKClientsWithLockers<MMAPV1LockerImpl>(2);
+ { Lock::GlobalWrite w(newClients[0].second.get()); }
+ { Lock::GlobalRead r(newClients[1].second.get()); }
}
-TEST(DConcurrency, ResourceMutexLabels) {
+TEST_F(DConcurrencyTestFixture, ResourceMutexLabels) {
Lock::ResourceMutex mutex("label");
ASSERT(mutex.getName() == "label");
Lock::ResourceMutex mutex2("label2");
@@ -653,64 +698,68 @@ TEST(DConcurrency, ResourceMutexLabels) {
// These tests exercise single- and multi-threaded performance of uncontended lock acquisition. It
// is neither practical nor useful to run them on debug builds.
-TEST(Locker, PerformanceStdMutex) {
+TEST_F(DConcurrencyTestFixture, PerformanceStdMutex) {
stdx::mutex mtx;
perfTest([&](int threadId) { stdx::unique_lock<stdx::mutex> lk(mtx); }, kMaxPerfThreads);
}
-TEST(Locker, PerformanceResourceMutexShared) {
+TEST_F(DConcurrencyTestFixture, PerformanceResourceMutexShared) {
Lock::ResourceMutex mtx("testMutex");
std::array<DefaultLockerImpl, kMaxPerfThreads> locker;
perfTest([&](int threadId) { Lock::SharedLock lk(&locker[threadId], mtx); }, kMaxPerfThreads);
}
-TEST(Locker, PerformanceResourceMutexExclusive) {
+TEST_F(DConcurrencyTestFixture, PerformanceResourceMutexExclusive) {
Lock::ResourceMutex mtx("testMutex");
std::array<DefaultLockerImpl, kMaxPerfThreads> locker;
perfTest([&](int threadId) { Lock::ExclusiveLock lk(&locker[threadId], mtx); },
kMaxPerfThreads);
}
-TEST(Locker, PerformanceCollectionIntentSharedLock) {
- std::array<DefaultLockerImpl, kMaxPerfThreads> locker;
+TEST_F(DConcurrencyTestFixture, PerformanceCollectionIntentSharedLock) {
+ std::vector<std::pair<ServiceContext::UniqueClient, ServiceContext::UniqueOperationContext>>
+ clients = makeKClientsWithLockers<DefaultLockerImpl>(kMaxPerfThreads);
ForceSupportsDocLocking supported(true);
perfTest(
[&](int threadId) {
- Lock::DBLock dlk(&locker[threadId], "test", MODE_IS);
- Lock::CollectionLock clk(&locker[threadId], "test.coll", MODE_IS);
+ Lock::DBLock dlk(clients[threadId].second.get(), "test", MODE_IS);
+ Lock::CollectionLock clk(clients[threadId].second->lockState(), "test.coll", MODE_IS);
},
kMaxPerfThreads);
}
-TEST(Locker, PerformanceCollectionIntentExclusiveLock) {
- std::array<DefaultLockerImpl, kMaxPerfThreads> locker;
+TEST_F(DConcurrencyTestFixture, PerformanceCollectionIntentExclusiveLock) {
+ std::vector<std::pair<ServiceContext::UniqueClient, ServiceContext::UniqueOperationContext>>
+ clients = makeKClientsWithLockers<DefaultLockerImpl>(kMaxPerfThreads);
ForceSupportsDocLocking supported(true);
perfTest(
[&](int threadId) {
- Lock::DBLock dlk(&locker[threadId], "test", MODE_IX);
- Lock::CollectionLock clk(&locker[threadId], "test.coll", MODE_IX);
+ Lock::DBLock dlk(clients[threadId].second.get(), "test", MODE_IX);
+ Lock::CollectionLock clk(clients[threadId].second->lockState(), "test.coll", MODE_IX);
},
kMaxPerfThreads);
}
-TEST(Locker, PerformanceMMAPv1CollectionSharedLock) {
- std::array<MMAPV1LockerImpl, kMaxPerfThreads> locker;
+TEST_F(DConcurrencyTestFixture, PerformanceMMAPv1CollectionSharedLock) {
+ std::vector<std::pair<ServiceContext::UniqueClient, ServiceContext::UniqueOperationContext>>
+ clients = makeKClientsWithLockers<DefaultLockerImpl>(kMaxPerfThreads);
ForceSupportsDocLocking supported(false);
perfTest(
[&](int threadId) {
- Lock::DBLock dlk(&locker[threadId], "test", MODE_IS);
- Lock::CollectionLock clk(&locker[threadId], "test.coll", MODE_S);
+ Lock::DBLock dlk(clients[threadId].second.get(), "test", MODE_IS);
+ Lock::CollectionLock clk(clients[threadId].second->lockState(), "test.coll", MODE_S);
},
kMaxPerfThreads);
}
-TEST(Locker, PerformanceMMAPv1CollectionExclusive) {
- std::array<MMAPV1LockerImpl, kMaxPerfThreads> locker;
+TEST_F(DConcurrencyTestFixture, PerformanceMMAPv1CollectionExclusive) {
+ std::vector<std::pair<ServiceContext::UniqueClient, ServiceContext::UniqueOperationContext>>
+ clients = makeKClientsWithLockers<DefaultLockerImpl>(kMaxPerfThreads);
ForceSupportsDocLocking supported(false);
perfTest(
[&](int threadId) {
- Lock::DBLock dlk(&locker[threadId], "test", MODE_IX);
- Lock::CollectionLock clk(&locker[threadId], "test.coll", MODE_X);
+ Lock::DBLock dlk(clients[threadId].second.get(), "test", MODE_IX);
+ Lock::CollectionLock clk(clients[threadId].second->lockState(), "test.coll", MODE_X);
},
kMaxPerfThreads);
}