summaryrefslogtreecommitdiff
path: root/src/mongo/db/concurrency
diff options
context:
space:
mode:
authorBynn Lee <bynn.lee@mongodb.com>2020-06-29 19:03:53 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-07-01 19:45:58 +0000
commite14ec02403b4119b17bf013aa257a5eafeb35073 (patch)
treeea7853ce563766adb76cfc0b5b4a95b2bf900267 /src/mongo/db/concurrency
parent5efdcc1269e8e1e0a7899af8ee853ac407d510f4 (diff)
downloadmongo-e14ec02403b4119b17bf013aa257a5eafeb35073.tar.gz
SERVER-43040 Remove the unit test used only variation of LockerImpl::lockGlobal that passes in a nullptr for opCtx
Diffstat (limited to 'src/mongo/db/concurrency')
-rw-r--r--src/mongo/db/concurrency/d_concurrency_test.cpp18
-rw-r--r--src/mongo/db/concurrency/lock_manager_test_help.h4
-rw-r--r--src/mongo/db/concurrency/lock_state.h3
-rw-r--r--src/mongo/db/concurrency/lock_state_test.cpp130
-rw-r--r--src/mongo/db/concurrency/lock_stats_test.cpp36
-rw-r--r--src/mongo/db/concurrency/locker.h1
6 files changed, 120 insertions, 72 deletions
diff --git a/src/mongo/db/concurrency/d_concurrency_test.cpp b/src/mongo/db/concurrency/d_concurrency_test.cpp
index 3fa649bb04e..2d3d602f4ba 100644
--- a/src/mongo/db/concurrency/d_concurrency_test.cpp
+++ b/src/mongo/db/concurrency/d_concurrency_test.cpp
@@ -2247,6 +2247,8 @@ TEST_F(DConcurrencyTestFixture, RSTLLockGuardResilientToExceptionThrownBeforeWai
}
TEST_F(DConcurrencyTestFixture, FailPointInLockDoesNotFailUninterruptibleGlobalNonIntentLocks) {
+ auto opCtx = makeOperationContext();
+
FailPointEnableBlock failWaitingNonPartitionedLocks("failNonIntentLocksIfWaitNeeded");
LockerImpl locker1;
@@ -2254,12 +2256,12 @@ TEST_F(DConcurrencyTestFixture, FailPointInLockDoesNotFailUninterruptibleGlobalN
LockerImpl locker3;
{
- locker1.lockGlobal(MODE_IX);
+ locker1.lockGlobal(opCtx.get(), MODE_IX);
// MODE_S attempt.
stdx::thread t2([&]() {
UninterruptibleLockGuard noInterrupt(&locker2);
- locker2.lockGlobal(MODE_S);
+ locker2.lockGlobal(opCtx.get(), MODE_S);
});
// Wait for the thread to attempt to acquire the global lock in MODE_S.
@@ -2271,12 +2273,12 @@ TEST_F(DConcurrencyTestFixture, FailPointInLockDoesNotFailUninterruptibleGlobalN
}
{
- locker1.lockGlobal(MODE_IX);
+ locker1.lockGlobal(opCtx.get(), MODE_IX);
// MODE_X attempt.
stdx::thread t3([&]() {
UninterruptibleLockGuard noInterrupt(&locker3);
- locker3.lockGlobal(MODE_X);
+ locker3.lockGlobal(opCtx.get(), MODE_X);
});
// Wait for the thread to attempt to acquire the global lock in MODE_X.
@@ -2289,6 +2291,8 @@ TEST_F(DConcurrencyTestFixture, FailPointInLockDoesNotFailUninterruptibleGlobalN
}
TEST_F(DConcurrencyTestFixture, FailPointInLockDoesNotFailUninterruptibleNonIntentLocks) {
+ auto opCtx = makeOperationContext();
+
FailPointEnableBlock failWaitingNonPartitionedLocks("failNonIntentLocksIfWaitNeeded");
LockerImpl locker1;
@@ -2298,7 +2302,7 @@ TEST_F(DConcurrencyTestFixture, FailPointInLockDoesNotFailUninterruptibleNonInte
// Granted MODE_X lock, fail incoming MODE_S and MODE_X.
const ResourceId resId(RESOURCE_COLLECTION, "TestDB.collection"_sd);
- locker1.lockGlobal(MODE_IX);
+ locker1.lockGlobal(opCtx.get(), MODE_IX);
{
locker1.lock(resId, MODE_X);
@@ -2306,7 +2310,7 @@ TEST_F(DConcurrencyTestFixture, FailPointInLockDoesNotFailUninterruptibleNonInte
// MODE_S attempt.
stdx::thread t2([&]() {
UninterruptibleLockGuard noInterrupt(&locker2);
- locker2.lockGlobal(MODE_IS);
+ locker2.lockGlobal(opCtx.get(), MODE_IS);
locker2.lock(resId, MODE_S);
});
@@ -2325,7 +2329,7 @@ TEST_F(DConcurrencyTestFixture, FailPointInLockDoesNotFailUninterruptibleNonInte
// MODE_X attempt.
stdx::thread t3([&]() {
UninterruptibleLockGuard noInterrupt(&locker3);
- locker3.lockGlobal(MODE_IX);
+ locker3.lockGlobal(opCtx.get(), MODE_IX);
locker3.lock(resId, MODE_X);
});
diff --git a/src/mongo/db/concurrency/lock_manager_test_help.h b/src/mongo/db/concurrency/lock_manager_test_help.h
index 2e998a5b215..d8b1c68b4e0 100644
--- a/src/mongo/db/concurrency/lock_manager_test_help.h
+++ b/src/mongo/db/concurrency/lock_manager_test_help.h
@@ -36,8 +36,8 @@ namespace mongo {
class LockerForTests : public LockerImpl {
public:
- explicit LockerForTests(LockMode globalLockMode) {
- lockGlobal(globalLockMode);
+ explicit LockerForTests(OperationContext* opCtx, LockMode globalLockMode) {
+ lockGlobal(opCtx, globalLockMode);
}
~LockerForTests() {
diff --git a/src/mongo/db/concurrency/lock_state.h b/src/mongo/db/concurrency/lock_state.h
index dd2d5df2445..202530cf8ae 100644
--- a/src/mongo/db/concurrency/lock_state.h
+++ b/src/mongo/db/concurrency/lock_state.h
@@ -135,9 +135,6 @@ public:
virtual void lockGlobal(OperationContext* opCtx,
LockMode mode,
Date_t deadline = Date_t::max());
- virtual void lockGlobal(LockMode mode, Date_t deadline = Date_t::max()) {
- return lockGlobal(nullptr, mode, deadline);
- }
virtual bool unlockGlobal();
diff --git a/src/mongo/db/concurrency/lock_state_test.cpp b/src/mongo/db/concurrency/lock_state_test.cpp
index 8e92e7df44d..8a8db4217f5 100644
--- a/src/mongo/db/concurrency/lock_state_test.cpp
+++ b/src/mongo/db/concurrency/lock_state_test.cpp
@@ -50,10 +50,12 @@ namespace mongo {
class LockerImplTest : public ServiceContextTest {};
TEST_F(LockerImplTest, LockNoConflict) {
+ auto opCtx = makeOperationContext();
+
const ResourceId resId(RESOURCE_COLLECTION, "TestDB.collection"_sd);
LockerImpl locker;
- locker.lockGlobal(MODE_IX);
+ locker.lockGlobal(opCtx.get(), MODE_IX);
locker.lock(resId, MODE_X);
@@ -68,10 +70,12 @@ TEST_F(LockerImplTest, LockNoConflict) {
}
TEST_F(LockerImplTest, ReLockNoConflict) {
+ auto opCtx = makeOperationContext();
+
const ResourceId resId(RESOURCE_COLLECTION, "TestDB.collection"_sd);
LockerImpl locker;
- locker.lockGlobal(MODE_IX);
+ locker.lockGlobal(opCtx.get(), MODE_IX);
locker.lock(resId, MODE_S);
locker.lock(resId, MODE_X);
@@ -86,16 +90,17 @@ TEST_F(LockerImplTest, ReLockNoConflict) {
}
TEST_F(LockerImplTest, ConflictWithTimeout) {
+ auto opCtx = makeOperationContext();
+
const ResourceId resId(RESOURCE_COLLECTION, "TestDB.collection"_sd);
LockerImpl locker1;
- locker1.lockGlobal(MODE_IX);
+ locker1.lockGlobal(opCtx.get(), MODE_IX);
locker1.lock(resId, MODE_X);
LockerImpl locker2;
- locker2.lockGlobal(MODE_IX);
+ locker2.lockGlobal(opCtx.get(), MODE_IX);
- auto opCtx = makeOperationContext();
ASSERT_THROWS_CODE(locker2.lock(opCtx.get(), resId, MODE_S, Date_t::now()),
AssertionException,
ErrorCodes::LockTimeout);
@@ -109,18 +114,19 @@ TEST_F(LockerImplTest, ConflictWithTimeout) {
}
TEST_F(LockerImplTest, ConflictUpgradeWithTimeout) {
+ auto opCtx = makeOperationContext();
+
const ResourceId resId(RESOURCE_COLLECTION, "TestDB.collection"_sd);
LockerImpl locker1;
- locker1.lockGlobal(MODE_IS);
+ locker1.lockGlobal(opCtx.get(), MODE_IS);
locker1.lock(resId, MODE_S);
LockerImpl locker2;
- locker2.lockGlobal(MODE_IS);
+ locker2.lockGlobal(opCtx.get(), MODE_IS);
locker2.lock(resId, MODE_S);
// Try upgrading locker 1, which should block and timeout
- auto opCtx = makeOperationContext();
ASSERT_THROWS_CODE(locker1.lock(opCtx.get(), resId, MODE_X, Date_t::now() + Milliseconds(1)),
AssertionException,
ErrorCodes::LockTimeout);
@@ -138,7 +144,7 @@ TEST_F(LockerImplTest, FailPointInLockFailsGlobalNonIntentLocksIfTheyCannotBeImm
auto newOpCtx = cc().makeOperationContext();
LockerImpl locker1;
- locker1.lockGlobal(MODE_IX);
+ locker1.lockGlobal(newOpCtx.get(), MODE_IX);
{
FailPointEnableBlock failWaitingNonPartitionedLocks("failNonIntentLocksIfWaitNeeded");
@@ -204,16 +210,18 @@ TEST_F(LockerImplTest, FailPointInLockFailsNonIntentLocksIfTheyCannotBeImmediate
}
TEST_F(LockerImplTest, ReadTransaction) {
+ auto opCtx = makeOperationContext();
+
LockerImpl locker;
- locker.lockGlobal(MODE_IS);
+ locker.lockGlobal(opCtx.get(), MODE_IS);
locker.unlockGlobal();
- locker.lockGlobal(MODE_IX);
+ locker.lockGlobal(opCtx.get(), MODE_IX);
locker.unlockGlobal();
- locker.lockGlobal(MODE_IX);
- locker.lockGlobal(MODE_IS);
+ locker.lockGlobal(opCtx.get(), MODE_IX);
+ locker.lockGlobal(opCtx.get(), MODE_IS);
locker.unlockGlobal();
locker.unlockGlobal();
}
@@ -222,6 +230,8 @@ TEST_F(LockerImplTest, ReadTransaction) {
* Test that saveLockerImpl works by examining the output.
*/
TEST_F(LockerImplTest, saveAndRestoreGlobal) {
+ auto opCtx = makeOperationContext();
+
Locker::LockSnapshot lockInfo;
LockerImpl locker;
@@ -231,7 +241,7 @@ TEST_F(LockerImplTest, saveAndRestoreGlobal) {
ASSERT_EQUALS(0U, lockInfo.locks.size());
// Lock the global lock, but just once.
- locker.lockGlobal(MODE_IX);
+ locker.lockGlobal(opCtx.get(), MODE_IX);
// We've locked the global lock. This should be reflected in the lockInfo.
locker.saveLockStateAndUnlock(&lockInfo);
@@ -249,6 +259,8 @@ TEST_F(LockerImplTest, saveAndRestoreGlobal) {
* Test that saveLockerImpl can save and restore the RSTL.
*/
TEST_F(LockerImplTest, saveAndRestoreRSTL) {
+ auto opCtx = makeOperationContext();
+
Locker::LockSnapshot lockInfo;
LockerImpl locker;
@@ -257,7 +269,7 @@ TEST_F(LockerImplTest, saveAndRestoreRSTL) {
// Acquire locks.
locker.lock(resourceIdReplicationStateTransitionLock, MODE_IX);
- locker.lockGlobal(MODE_IX);
+ locker.lockGlobal(opCtx.get(), MODE_IX);
locker.lock(resIdDatabase, MODE_IX);
// Save the lock state.
@@ -286,6 +298,8 @@ TEST_F(LockerImplTest, saveAndRestoreRSTL) {
* Test that we don't unlock when we have the global lock more than once.
*/
TEST_F(LockerImplTest, saveAndRestoreGlobalAcquiredTwice) {
+ auto opCtx = makeOperationContext();
+
Locker::LockSnapshot lockInfo;
LockerImpl locker;
@@ -295,8 +309,8 @@ TEST_F(LockerImplTest, saveAndRestoreGlobalAcquiredTwice) {
ASSERT_EQUALS(0U, lockInfo.locks.size());
// Lock the global lock.
- locker.lockGlobal(MODE_IX);
- locker.lockGlobal(MODE_IX);
+ locker.lockGlobal(opCtx.get(), MODE_IX);
+ locker.lockGlobal(opCtx.get(), MODE_IX);
// This shouldn't actually unlock as we're in a nested scope.
ASSERT(!locker.saveLockStateAndUnlock(&lockInfo));
@@ -312,6 +326,8 @@ TEST_F(LockerImplTest, saveAndRestoreGlobalAcquiredTwice) {
* Tests that restoreLockerImpl works by locking a db and collection and saving + restoring.
*/
TEST_F(LockerImplTest, saveAndRestoreDBAndCollection) {
+ auto opCtx = makeOperationContext();
+
Locker::LockSnapshot lockInfo;
LockerImpl locker;
@@ -320,7 +336,7 @@ TEST_F(LockerImplTest, saveAndRestoreDBAndCollection) {
const ResourceId resIdCollection(RESOURCE_COLLECTION, "TestDB.collection"_sd);
// Lock some stuff.
- locker.lockGlobal(MODE_IX);
+ locker.lockGlobal(opCtx.get(), MODE_IX);
locker.lock(resIdDatabase, MODE_IX);
locker.lock(resIdCollection, MODE_X);
locker.saveLockStateAndUnlock(&lockInfo);
@@ -340,6 +356,8 @@ TEST_F(LockerImplTest, saveAndRestoreDBAndCollection) {
}
TEST_F(LockerImplTest, releaseWriteUnitOfWork) {
+ auto opCtx = makeOperationContext();
+
Locker::LockSnapshot lockInfo;
LockerImpl locker;
@@ -349,7 +367,7 @@ TEST_F(LockerImplTest, releaseWriteUnitOfWork) {
locker.beginWriteUnitOfWork();
// Lock some stuff.
- locker.lockGlobal(MODE_IX);
+ locker.lockGlobal(opCtx.get(), MODE_IX);
locker.lock(resIdDatabase, MODE_IX);
locker.lock(resIdCollection, MODE_X);
// Unlock them so that they will be pending to unlock.
@@ -368,6 +386,8 @@ TEST_F(LockerImplTest, releaseWriteUnitOfWork) {
}
TEST_F(LockerImplTest, restoreWriteUnitOfWork) {
+ auto opCtx = makeOperationContext();
+
Locker::LockSnapshot lockInfo;
LockerImpl locker;
@@ -377,7 +397,7 @@ TEST_F(LockerImplTest, restoreWriteUnitOfWork) {
locker.beginWriteUnitOfWork();
// Lock some stuff.
- locker.lockGlobal(MODE_IX);
+ locker.lockGlobal(opCtx.get(), MODE_IX);
locker.lock(resIdDatabase, MODE_IX);
locker.lock(resIdCollection, MODE_X);
// Unlock them so that they will be pending to unlock.
@@ -408,6 +428,8 @@ TEST_F(LockerImplTest, restoreWriteUnitOfWork) {
}
TEST_F(LockerImplTest, releaseAndRestoreWriteUnitOfWorkWithoutUnlock) {
+ auto opCtx = makeOperationContext();
+
Locker::WUOWLockSnapshot lockInfo;
LockerImpl locker;
@@ -418,12 +440,12 @@ TEST_F(LockerImplTest, releaseAndRestoreWriteUnitOfWorkWithoutUnlock) {
locker.beginWriteUnitOfWork();
// Lock some stuff.
- locker.lockGlobal(MODE_IX);
+ locker.lockGlobal(opCtx.get(), MODE_IX);
locker.lock(resIdDatabase, MODE_IX);
locker.lock(resIdCollection, MODE_X);
// Recursive global lock.
- locker.lockGlobal(MODE_IX);
+ locker.lockGlobal(opCtx.get(), MODE_IX);
ASSERT_EQ(locker.getRequestsForTest().find(resourceIdGlobal).objAddr()->recursiveCount, 2U);
ASSERT_FALSE(locker.unlockGlobal());
@@ -460,7 +482,7 @@ TEST_F(LockerImplTest, releaseAndRestoreWriteUnitOfWorkWithoutUnlock) {
locker.beginWriteUnitOfWork();
// Grab new locks inside the new WUOW.
- locker.lockGlobal(MODE_IX);
+ locker.lockGlobal(opCtx.get(), MODE_IX);
locker.lock(resIdDatabase, MODE_IX);
locker.lock(resIdCollection2, MODE_IX);
@@ -522,6 +544,8 @@ TEST_F(LockerImplTest, releaseAndRestoreWriteUnitOfWorkWithoutUnlock) {
}
TEST_F(LockerImplTest, releaseAndRestoreReadOnlyWriteUnitOfWork) {
+ auto opCtx = makeOperationContext();
+
Locker::LockSnapshot lockInfo;
LockerImpl locker;
@@ -534,7 +558,7 @@ TEST_F(LockerImplTest, releaseAndRestoreReadOnlyWriteUnitOfWork) {
locker.beginWriteUnitOfWork();
// Lock some stuff in IS mode.
- locker.lockGlobal(MODE_IS);
+ locker.lockGlobal(opCtx.get(), MODE_IS);
locker.lock(resIdDatabase, MODE_IS);
locker.lock(resIdCollection, MODE_IS);
// Unlock them.
@@ -586,6 +610,8 @@ TEST_F(LockerImplTest, releaseAndRestoreEmptyWriteUnitOfWork) {
}
TEST_F(LockerImplTest, releaseAndRestoreWriteUnitOfWorkWithRecursiveLocks) {
+ auto opCtx = makeOperationContext();
+
Locker::LockSnapshot lockInfo;
LockerImpl locker;
@@ -595,11 +621,11 @@ TEST_F(LockerImplTest, releaseAndRestoreWriteUnitOfWorkWithRecursiveLocks) {
locker.beginWriteUnitOfWork();
// Lock some stuff.
- locker.lockGlobal(MODE_IX);
+ locker.lockGlobal(opCtx.get(), MODE_IX);
locker.lock(resIdDatabase, MODE_IX);
locker.lock(resIdCollection, MODE_X);
// Recursively lock them again with a weaker mode.
- locker.lockGlobal(MODE_IS);
+ locker.lockGlobal(opCtx.get(), MODE_IS);
locker.lock(resIdDatabase, MODE_IS);
locker.lock(resIdCollection, MODE_S);
@@ -669,10 +695,12 @@ TEST_F(LockerImplTest, releaseAndRestoreWriteUnitOfWorkWithRecursiveLocks) {
}
TEST_F(LockerImplTest, DefaultLocker) {
+ auto opCtx = makeOperationContext();
+
const ResourceId resId(RESOURCE_DATABASE, "TestDB"_sd);
LockerImpl locker;
- locker.lockGlobal(MODE_IX);
+ locker.lockGlobal(opCtx.get(), MODE_IX);
locker.lock(resId, MODE_X);
// Make sure only Global and TestDB resources are locked.
@@ -691,6 +719,8 @@ TEST_F(LockerImplTest, SharedLocksShouldTwoPhaseLockIsTrue) {
// and S locks are postponed until endWriteUnitOfWork() is called. Mode IX and X locks always
// participate in two-phased locking, regardless of the setting.
+ auto opCtx = makeOperationContext();
+
const ResourceId resId1(RESOURCE_DATABASE, "TestDB1"_sd);
const ResourceId resId2(RESOURCE_DATABASE, "TestDB2"_sd);
const ResourceId resId3(RESOURCE_COLLECTION, "TestDB.collection3"_sd);
@@ -699,7 +729,7 @@ TEST_F(LockerImplTest, SharedLocksShouldTwoPhaseLockIsTrue) {
LockerImpl locker;
locker.setSharedLocksShouldTwoPhaseLock(true);
- locker.lockGlobal(MODE_IS);
+ locker.lockGlobal(opCtx.get(), MODE_IS);
ASSERT_EQ(locker.getLockMode(resourceIdGlobal), MODE_IS);
locker.lock(resourceIdReplicationStateTransitionLock, MODE_IS);
@@ -745,6 +775,8 @@ TEST_F(LockerImplTest, ModeIXAndXLockParticipatesInTwoPhaseLocking) {
// Unlock on mode IX and X locks during a WUOW should always be postponed until
// endWriteUnitOfWork() is called. Mode IS and S locks should unlock immediately.
+ auto opCtx = makeOperationContext();
+
const ResourceId resId1(RESOURCE_DATABASE, "TestDB1"_sd);
const ResourceId resId2(RESOURCE_DATABASE, "TestDB2"_sd);
const ResourceId resId3(RESOURCE_COLLECTION, "TestDB.collection3"_sd);
@@ -752,7 +784,7 @@ TEST_F(LockerImplTest, ModeIXAndXLockParticipatesInTwoPhaseLocking) {
LockerImpl locker;
- locker.lockGlobal(MODE_IX);
+ locker.lockGlobal(opCtx.get(), MODE_IX);
ASSERT_EQ(locker.getLockMode(resourceIdGlobal), MODE_IX);
locker.lock(resourceIdReplicationStateTransitionLock, MODE_IX);
@@ -884,6 +916,8 @@ TEST_F(LockerImplTest, RSTLTwoPhaseLockingBehaviorModeIS) {
}
TEST_F(LockerImplTest, OverrideLockRequestTimeout) {
+ auto opCtx = makeOperationContext();
+
const ResourceId resIdFirstDB(RESOURCE_DATABASE, "FirstDB"_sd);
const ResourceId resIdSecondDB(RESOURCE_DATABASE, "SecondDB"_sd);
@@ -893,8 +927,8 @@ TEST_F(LockerImplTest, OverrideLockRequestTimeout) {
// Set up locker2 to override lock requests' provided timeout if greater than 1000 milliseconds.
locker2.setMaxLockTimeout(Milliseconds(1000));
- locker1.lockGlobal(MODE_IX);
- locker2.lockGlobal(MODE_IX);
+ locker1.lockGlobal(opCtx.get(), MODE_IX);
+ locker2.lockGlobal(opCtx.get(), MODE_IX);
// locker1 acquires FirstDB under an exclusive lock.
locker1.lock(resIdFirstDB, MODE_X);
@@ -902,7 +936,6 @@ TEST_F(LockerImplTest, OverrideLockRequestTimeout) {
// locker2's attempt to acquire FirstDB with unlimited wait time should timeout after 1000
// milliseconds and throw because _maxLockRequestTimeout is set to 1000 milliseconds.
- auto opCtx = makeOperationContext();
ASSERT_THROWS_CODE(locker2.lock(opCtx.get(), resIdFirstDB, MODE_X, Date_t::max()),
AssertionException,
ErrorCodes::LockTimeout);
@@ -920,6 +953,8 @@ TEST_F(LockerImplTest, OverrideLockRequestTimeout) {
}
TEST_F(LockerImplTest, DoNotWaitForLockAcquisition) {
+ auto opCtx = makeOperationContext();
+
const ResourceId resIdFirstDB(RESOURCE_DATABASE, "FirstDB"_sd);
const ResourceId resIdSecondDB(RESOURCE_DATABASE, "SecondDB"_sd);
@@ -930,8 +965,8 @@ TEST_F(LockerImplTest, DoNotWaitForLockAcquisition) {
// deadlines in the lock request.
locker2.setMaxLockTimeout(Milliseconds(0));
- locker1.lockGlobal(MODE_IX);
- locker2.lockGlobal(MODE_IX);
+ locker1.lockGlobal(opCtx.get(), MODE_IX);
+ locker2.lockGlobal(opCtx.get(), MODE_IX);
// locker1 acquires FirstDB under an exclusive lock.
locker1.lock(resIdFirstDB, MODE_X);
@@ -939,7 +974,6 @@ TEST_F(LockerImplTest, DoNotWaitForLockAcquisition) {
// locker2's attempt to acquire FirstDB with unlimited wait time should fail immediately and
// throw because _maxLockRequestTimeout was set to 0.
- auto opCtx = makeOperationContext();
ASSERT_THROWS_CODE(locker2.lock(opCtx.get(), resIdFirstDB, MODE_X, Date_t::max()),
AssertionException,
ErrorCodes::LockTimeout);
@@ -974,12 +1008,14 @@ bool lockerInfoContainsLock(const Locker::LockerInfo& lockerInfo,
} // namespace
TEST_F(LockerImplTest, GetLockerInfoShouldReportHeldLocks) {
+ auto opCtx = makeOperationContext();
+
const ResourceId dbId(RESOURCE_DATABASE, "TestDB"_sd);
const ResourceId collectionId(RESOURCE_COLLECTION, "TestDB.collection"_sd);
// Take an exclusive lock on the collection.
LockerImpl locker;
- locker.lockGlobal(MODE_IX);
+ locker.lockGlobal(opCtx.get(), MODE_IX);
locker.lock(dbId, MODE_IX);
locker.lock(collectionId, MODE_X);
@@ -998,18 +1034,20 @@ TEST_F(LockerImplTest, GetLockerInfoShouldReportHeldLocks) {
}
TEST_F(LockerImplTest, GetLockerInfoShouldReportPendingLocks) {
+ auto opCtx = makeOperationContext();
+
const ResourceId dbId(RESOURCE_DATABASE, "TestDB"_sd);
const ResourceId collectionId(RESOURCE_COLLECTION, "TestDB.collection"_sd);
// Take an exclusive lock on the collection.
LockerImpl successfulLocker;
- successfulLocker.lockGlobal(MODE_IX);
+ successfulLocker.lockGlobal(opCtx.get(), MODE_IX);
successfulLocker.lock(dbId, MODE_IX);
successfulLocker.lock(collectionId, MODE_X);
// Now attempt to get conflicting locks.
LockerImpl conflictingLocker;
- conflictingLocker.lockGlobal(MODE_IS);
+ conflictingLocker.lockGlobal(opCtx.get(), MODE_IS);
conflictingLocker.lock(dbId, MODE_IS);
ASSERT_EQ(LOCK_WAITING,
conflictingLocker.lockBeginForTest(nullptr /* opCtx */, collectionId, MODE_IS));
@@ -1042,10 +1080,12 @@ TEST_F(LockerImplTest, GetLockerInfoShouldReportPendingLocks) {
}
TEST_F(LockerImplTest, ReaquireLockPendingUnlock) {
+ auto opCtx = makeOperationContext();
+
const ResourceId resId(RESOURCE_COLLECTION, "TestDB.collection"_sd);
LockerImpl locker;
- locker.lockGlobal(MODE_IS);
+ locker.lockGlobal(opCtx.get(), MODE_IS);
locker.lock(resId, MODE_X);
ASSERT_TRUE(locker.isLockHeldForMode(resId, MODE_X));
@@ -1070,10 +1110,12 @@ TEST_F(LockerImplTest, ReaquireLockPendingUnlock) {
}
TEST_F(LockerImplTest, AcquireLockPendingUnlockWithCoveredMode) {
+ auto opCtx = makeOperationContext();
+
const ResourceId resId(RESOURCE_COLLECTION, "TestDB.collection"_sd);
LockerImpl locker;
- locker.lockGlobal(MODE_IS);
+ locker.lockGlobal(opCtx.get(), MODE_IS);
locker.lock(resId, MODE_X);
ASSERT_TRUE(locker.isLockHeldForMode(resId, MODE_X));
@@ -1098,10 +1140,12 @@ TEST_F(LockerImplTest, AcquireLockPendingUnlockWithCoveredMode) {
}
TEST_F(LockerImplTest, ConvertLockPendingUnlock) {
+ auto opCtx = makeOperationContext();
+
const ResourceId resId(RESOURCE_COLLECTION, "TestDB.collection"_sd);
LockerImpl locker;
- locker.lockGlobal(MODE_IS);
+ locker.lockGlobal(opCtx.get(), MODE_IS);
locker.lock(resId, MODE_IX);
ASSERT_TRUE(locker.isLockHeldForMode(resId, MODE_IX));
@@ -1130,10 +1174,12 @@ TEST_F(LockerImplTest, ConvertLockPendingUnlock) {
}
TEST_F(LockerImplTest, ConvertLockPendingUnlockAndUnlock) {
+ auto opCtx = makeOperationContext();
+
const ResourceId resId(RESOURCE_COLLECTION, "TestDB.collection"_sd);
LockerImpl locker;
- locker.lockGlobal(MODE_IS);
+ locker.lockGlobal(opCtx.get(), MODE_IS);
locker.lock(resId, MODE_IX);
ASSERT_TRUE(locker.isLockHeldForMode(resId, MODE_IX));
diff --git a/src/mongo/db/concurrency/lock_stats_test.cpp b/src/mongo/db/concurrency/lock_stats_test.cpp
index fc10fe33745..c06bb260b9b 100644
--- a/src/mongo/db/concurrency/lock_stats_test.cpp
+++ b/src/mongo/db/concurrency/lock_stats_test.cpp
@@ -43,7 +43,8 @@ TEST_F(LockStatsTest, NoWait) {
resetGlobalLockStats();
- LockerForTests locker(MODE_IX);
+ auto opCtx = makeOperationContext();
+ LockerForTests locker(opCtx.get(), MODE_IX);
locker.lock(resId, MODE_X);
locker.unlock(resId);
@@ -61,14 +62,13 @@ TEST_F(LockStatsTest, Wait) {
resetGlobalLockStats();
- LockerForTests locker(MODE_IX);
- locker.lock(resId, MODE_X);
-
auto opCtx = makeOperationContext();
+ LockerForTests locker(opCtx.get(), MODE_IX);
+ locker.lock(resId, MODE_X);
{
// This will block
- LockerForTests lockerConflict(MODE_IX);
+ LockerForTests lockerConflict(opCtx.get(), MODE_IX);
ASSERT_EQUALS(LOCK_WAITING, lockerConflict.lockBeginForTest(opCtx.get(), resId, MODE_S));
// Sleep 1 millisecond so the wait time passes
@@ -96,7 +96,8 @@ TEST_F(LockStatsTest, Reporting) {
resetGlobalLockStats();
- LockerForTests locker(MODE_IX);
+ auto opCtx = makeOperationContext();
+ LockerForTests locker(opCtx.get(), MODE_IX);
locker.lock(resId, MODE_X);
locker.unlock(resId);
@@ -113,13 +114,12 @@ TEST_F(LockStatsTest, Subtraction) {
resetGlobalLockStats();
- LockerForTests locker(MODE_IX);
- locker.lock(resId, MODE_X);
-
auto opCtx = makeOperationContext();
+ LockerForTests locker(opCtx.get(), MODE_IX);
+ locker.lock(resId, MODE_X);
{
- LockerForTests lockerConflict(MODE_IX);
+ LockerForTests lockerConflict(opCtx.get(), MODE_IX);
ASSERT_THROWS_CODE(
lockerConflict.lock(opCtx.get(), resId, MODE_S, Date_t::now() + Milliseconds(5)),
AssertionException,
@@ -133,7 +133,7 @@ TEST_F(LockStatsTest, Subtraction) {
ASSERT_GREATER_THAN(stats.get(resId, MODE_S).combinedWaitTimeMicros, 0);
{
- LockerForTests lockerConflict(MODE_IX);
+ LockerForTests lockerConflict(opCtx.get(), MODE_IX);
ASSERT_THROWS_CODE(
lockerConflict.lock(opCtx.get(), resId, MODE_S, Date_t::now() + Milliseconds(5)),
AssertionException,
@@ -158,7 +158,7 @@ namespace {
* reported locked if 'rid' is the global lock resource, or unlocked if 'rid' is not the global lock
* resource.
*/
-void assertGlobalAcquisitionStats(ResourceId rid) {
+void assertGlobalAcquisitionStats(OperationContext* opCtx, ResourceId rid) {
resetGlobalLockStats();
SingleThreadedLockStats stats;
@@ -167,7 +167,7 @@ void assertGlobalAcquisitionStats(ResourceId rid) {
LockerImpl locker;
if (rid == resourceIdGlobal) {
- locker.lockGlobal(LockMode::MODE_IX);
+ locker.lockGlobal(opCtx, LockMode::MODE_IX);
} else {
locker.lock(rid, LockMode::MODE_IX);
}
@@ -188,9 +188,10 @@ void assertGlobalAcquisitionStats(ResourceId rid) {
} // namespace
TEST_F(LockStatsTest, GlobalRetrievableSeparately) {
- assertGlobalAcquisitionStats(resourceIdGlobal);
- assertGlobalAcquisitionStats(resourceIdParallelBatchWriterMode);
- assertGlobalAcquisitionStats(resourceIdReplicationStateTransitionLock);
+ auto opCtx = makeOperationContext();
+ assertGlobalAcquisitionStats(opCtx.get(), resourceIdGlobal);
+ assertGlobalAcquisitionStats(opCtx.get(), resourceIdParallelBatchWriterMode);
+ assertGlobalAcquisitionStats(opCtx.get(), resourceIdReplicationStateTransitionLock);
}
TEST_F(LockStatsTest, ServerStatus) {
@@ -205,7 +206,8 @@ TEST_F(LockStatsTest, ServerStatus) {
// Take the global, PBWM and RSTL locks in MODE_IX to create acquisition stats for them.
LockerImpl locker;
- locker.lockGlobal(LockMode::MODE_IX);
+ auto opCtx = makeOperationContext();
+ locker.lockGlobal(opCtx.get(), LockMode::MODE_IX);
locker.lock(resourceIdParallelBatchWriterMode, LockMode::MODE_IX);
locker.lock(resourceIdReplicationStateTransitionLock, LockMode::MODE_IX);
diff --git a/src/mongo/db/concurrency/locker.h b/src/mongo/db/concurrency/locker.h
index 34ac15d8586..c98f2f008e5 100644
--- a/src/mongo/db/concurrency/locker.h
+++ b/src/mongo/db/concurrency/locker.h
@@ -162,7 +162,6 @@ public:
virtual void lockGlobal(OperationContext* opCtx,
LockMode mode,
Date_t deadline = Date_t::max()) = 0;
- virtual void lockGlobal(LockMode mode, Date_t deadline = Date_t::max()) = 0;
/**
* Decrements the reference count on the global lock. If the reference count on the