summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorXiangyu Yao <xiangyu.yao@mongodb.com>2019-02-12 15:53:24 -0500
committerXiangyu Yao <xiangyu.yao@mongodb.com>2019-02-20 13:14:13 -0500
commitacddc7f35f0373ccb2e8fe9d45f42304b2b74f95 (patch)
tree8064f3c96f8f265d088b101c1c3bb4dd247e9bce /src
parent70aa69bfad6a66c7a00701403f1979ce77d654af (diff)
downloadmongo-acddc7f35f0373ccb2e8fe9d45f42304b2b74f95.tar.gz
SERVER-39425 Improve lock acquisition contract
Lock acquisition timeout should always throw exceptions rather than fail silently
Diffstat (limited to 'src')
-rw-r--r--src/mongo/db/catalog_raii.cpp4
-rw-r--r--src/mongo/db/commands/oplog_note.cpp2
-rw-r--r--src/mongo/db/concurrency/d_concurrency.cpp64
-rw-r--r--src/mongo/db/concurrency/d_concurrency_test.cpp233
-rw-r--r--src/mongo/db/concurrency/lock_manager_defs.h2
-rw-r--r--src/mongo/db/concurrency/lock_state.cpp125
-rw-r--r--src/mongo/db/concurrency/lock_state.h50
-rw-r--r--src/mongo/db/concurrency/lock_state_test.cpp135
-rw-r--r--src/mongo/db/concurrency/lock_stats_test.cpp16
-rw-r--r--src/mongo/db/concurrency/locker.h54
-rw-r--r--src/mongo/db/concurrency/locker_noop.h21
-rw-r--r--src/mongo/db/concurrency/replication_state_transition_lock_guard.cpp7
-rw-r--r--src/mongo/db/db.cpp4
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_test.cpp2
-rw-r--r--src/mongo/db/transaction_participant_test.cpp8
15 files changed, 359 insertions, 368 deletions
diff --git a/src/mongo/db/catalog_raii.cpp b/src/mongo/db/catalog_raii.cpp
index 651ba7d1ca6..a9a77a5e7e2 100644
--- a/src/mongo/db/catalog_raii.cpp
+++ b/src/mongo/db/catalog_raii.cpp
@@ -53,7 +53,6 @@ void uassertLockTimeout(std::string resourceName, LockMode lockMode, bool isLock
AutoGetDb::AutoGetDb(OperationContext* opCtx, StringData dbName, LockMode mode, Date_t deadline)
: _dbLock(opCtx, dbName, mode, deadline), _db([&] {
- uassertLockTimeout(str::stream() << "database " << dbName, mode, _dbLock.isLocked());
auto databaseHolder = DatabaseHolder::get(opCtx);
return databaseHolder->getDb(opCtx, dbName);
}()) {
@@ -76,9 +75,6 @@ AutoGetCollection::AutoGetCollection(OperationContext* opCtx,
deadline),
_resolvedNss(resolveNamespaceStringOrUUID(opCtx, nsOrUUID)) {
_collLock.emplace(opCtx->lockState(), _resolvedNss.ns(), modeColl, deadline);
- uassertLockTimeout(
- str::stream() << "collection " << nsOrUUID.toString(), modeColl, _collLock->isLocked());
-
// Wait for a configured amount of time after acquiring locks if the failpoint is enabled
MONGO_FAIL_POINT_BLOCK(setAutoGetCollectionWait, customWait) {
const BSONObj& data = customWait.getData();
diff --git a/src/mongo/db/commands/oplog_note.cpp b/src/mongo/db/commands/oplog_note.cpp
index cdbe58e5989..2899d5b602f 100644
--- a/src/mongo/db/commands/oplog_note.cpp
+++ b/src/mongo/db/commands/oplog_note.cpp
@@ -57,7 +57,7 @@ Status _performNoopWrite(OperationContext* opCtx, BSONObj msgObj, StringData not
// Use GlobalLock instead of DBLock to allow return when the lock is not available. It may
// happen when the primary steps down and a shared global lock is acquired.
Lock::GlobalLock lock(
- opCtx, MODE_IX, Date_t::now() + Milliseconds(1), Lock::InterruptBehavior::kThrow);
+ opCtx, MODE_IX, Date_t::now() + Milliseconds(1), Lock::InterruptBehavior::kLeaveUnlocked);
if (!lock.isLocked()) {
LOG(1) << "Global lock is not available skipping noopWrite";
diff --git a/src/mongo/db/concurrency/d_concurrency.cpp b/src/mongo/db/concurrency/d_concurrency.cpp
index 21db8a583c5..b7101b023bc 100644
--- a/src/mongo/db/concurrency/d_concurrency.cpp
+++ b/src/mongo/db/concurrency/d_concurrency.cpp
@@ -173,26 +173,23 @@ void Lock::GlobalLock::_enqueue(LockMode lockMode, Date_t deadline) {
if (_opCtx->lockState()->shouldConflictWithSecondaryBatchApplication()) {
_pbwm.lock(MODE_IS);
}
-
- _result = _opCtx->lockState()->lock(
- _opCtx, resourceIdReplicationStateTransitionLock, MODE_IX, deadline);
- if (_result != LOCK_OK) {
+ auto unlockPBWM = makeGuard([this] {
if (_opCtx->lockState()->shouldConflictWithSecondaryBatchApplication()) {
_pbwm.unlock();
}
- return;
- }
+ });
- // At this point the RSTL is locked and must be unlocked if acquiring the GlobalLock fails.
- // We only want to unlock the RSTL if we were interrupted acquiring the GlobalLock and not
- // if we were interrupted acquiring the RSTL itself. If we were interrupted acquiring the
- // RSTL then the RSTL will not be locked and we do not want to attempt to unlock it.
- try {
- _result = _opCtx->lockState()->lockGlobalBegin(_opCtx, lockMode, deadline);
- } catch (...) {
- _opCtx->lockState()->unlock(resourceIdReplicationStateTransitionLock);
- throw;
- }
+ _opCtx->lockState()->lock(
+ _opCtx, resourceIdReplicationStateTransitionLock, MODE_IX, deadline);
+
+ auto unlockRSTL = makeGuard(
+ [this] { _opCtx->lockState()->unlock(resourceIdReplicationStateTransitionLock); });
+
+ _result = LOCK_INVALID;
+ _result = _opCtx->lockState()->lockGlobalBegin(_opCtx, lockMode, deadline);
+
+ unlockRSTL.dismiss();
+ unlockPBWM.dismiss();
} catch (const ExceptionForCat<ErrorCategory::Interruption>&) {
// The kLeaveUnlocked behavior suppresses this exception.
if (_interruptBehavior == InterruptBehavior::kThrow)
@@ -203,18 +200,15 @@ void Lock::GlobalLock::_enqueue(LockMode lockMode, Date_t deadline) {
void Lock::GlobalLock::waitForLockUntil(Date_t deadline) {
try {
if (_result == LOCK_WAITING) {
- _result = _opCtx->lockState()->lockGlobalComplete(_opCtx, deadline);
- }
-
- if (_result != LOCK_OK) {
- _opCtx->lockState()->unlock(resourceIdReplicationStateTransitionLock);
-
- if (_opCtx->lockState()->shouldConflictWithSecondaryBatchApplication()) {
- _pbwm.unlock();
- }
+ _result = LOCK_INVALID;
+ _opCtx->lockState()->lockGlobalComplete(_opCtx, deadline);
+ _result = LOCK_OK;
}
} catch (const ExceptionForCat<ErrorCategory::Interruption>&) {
_opCtx->lockState()->unlock(resourceIdReplicationStateTransitionLock);
+ if (_opCtx->lockState()->shouldConflictWithSecondaryBatchApplication()) {
+ _pbwm.unlock();
+ }
// The kLeaveUnlocked behavior suppresses this exception.
if (_interruptBehavior == InterruptBehavior::kThrow)
throw;
@@ -239,19 +233,14 @@ Lock::DBLock::DBLock(OperationContext* opCtx, StringData db, LockMode mode, Date
opCtx, isSharedLockMode(_mode) ? MODE_IS : MODE_IX, deadline, InterruptBehavior::kThrow) {
massert(28539, "need a valid database name", !db.empty() && nsIsDbOnly(db));
- if (!_globalLock.isLocked()) {
- invariant(deadline != Date_t::max() || _opCtx->lockState()->hasMaxLockTimeout());
- return;
- }
-
// The check for the admin db is to ensure direct writes to auth collections
// are serialized (see SERVER-16092).
if ((_id == resourceIdAdminDB) && !isSharedLockMode(_mode)) {
_mode = MODE_X;
}
- _result = _opCtx->lockState()->lock(_opCtx, _id, _mode, deadline);
- invariant(_result == LOCK_OK || _result == LOCK_TIMEOUT);
+ _opCtx->lockState()->lock(_opCtx, _id, _mode, deadline);
+ _result = LOCK_OK;
}
Lock::DBLock::DBLock(DBLock&& otherLock)
@@ -280,7 +269,8 @@ void Lock::DBLock::relockWithMode(LockMode newMode) {
_opCtx->lockState()->unlock(_id);
_mode = newMode;
- invariant(LOCK_OK == _opCtx->lockState()->lock(_opCtx, _id, _mode));
+ _opCtx->lockState()->lock(_opCtx, _id, _mode);
+ _result = LOCK_OK;
}
@@ -298,8 +288,8 @@ Lock::CollectionLock::CollectionLock(Locker* lockState,
actualLockMode = isSharedLockMode(mode) ? MODE_S : MODE_X;
}
- _result = _lockState->lock(_id, actualLockMode, deadline);
- invariant(_result == LOCK_OK || _result == LOCK_TIMEOUT);
+ _lockState->lock(_id, actualLockMode, deadline);
+ _result = LOCK_OK;
}
Lock::CollectionLock::CollectionLock(CollectionLock&& otherLock)
@@ -343,8 +333,8 @@ Lock::ParallelBatchWriterMode::ParallelBatchWriterMode(Locker* lockState)
void Lock::ResourceLock::lock(LockMode mode) {
invariant(_result == LOCK_INVALID);
- _result = _locker->lock(_rid, mode);
- invariant(_result == LOCK_OK);
+ _locker->lock(_rid, mode);
+ _result = LOCK_OK;
}
void Lock::ResourceLock::unlock() {
diff --git a/src/mongo/db/concurrency/d_concurrency_test.cpp b/src/mongo/db/concurrency/d_concurrency_test.cpp
index ebe6619558b..865a8bacb5f 100644
--- a/src/mongo/db/concurrency/d_concurrency_test.cpp
+++ b/src/mongo/db/concurrency/d_concurrency_test.cpp
@@ -425,11 +425,12 @@ TEST_F(DConcurrencyTestFixture, GlobalLockS_Timeout) {
clients[0].second.get(), MODE_X, Date_t::now(), Lock::InterruptBehavior::kThrow);
ASSERT(globalWrite.isLocked());
- Lock::GlobalLock globalReadTry(clients[1].second.get(),
- MODE_S,
- Date_t::now() + Milliseconds(1),
- Lock::InterruptBehavior::kThrow);
- ASSERT(!globalReadTry.isLocked());
+ ASSERT_THROWS_CODE(Lock::GlobalLock(clients[1].second.get(),
+ MODE_S,
+ Date_t::now() + Milliseconds(1),
+ Lock::InterruptBehavior::kThrow),
+ AssertionException,
+ ErrorCodes::LockTimeout);
}
TEST_F(DConcurrencyTestFixture, GlobalLockX_Timeout) {
@@ -438,11 +439,12 @@ TEST_F(DConcurrencyTestFixture, GlobalLockX_Timeout) {
clients[0].second.get(), MODE_X, Date_t::now(), Lock::InterruptBehavior::kThrow);
ASSERT(globalWrite.isLocked());
- Lock::GlobalLock globalWriteTry(clients[1].second.get(),
- MODE_X,
- Date_t::now() + Milliseconds(1),
- Lock::InterruptBehavior::kThrow);
- ASSERT(!globalWriteTry.isLocked());
+ ASSERT_THROWS_CODE(Lock::GlobalLock(clients[1].second.get(),
+ MODE_X,
+ Date_t::now() + Milliseconds(1),
+ Lock::InterruptBehavior::kThrow),
+ AssertionException,
+ ErrorCodes::LockTimeout);
}
TEST_F(DConcurrencyTestFixture, RSTLmodeX_Timeout) {
@@ -453,11 +455,12 @@ TEST_F(DConcurrencyTestFixture, RSTLmodeX_Timeout) {
clients[0].second.get()->lockState()->getLockMode(resourceIdReplicationStateTransitionLock),
MODE_X);
- Lock::GlobalLock globalWriteTry(clients[1].second.get(),
- MODE_X,
- Date_t::now() + Milliseconds(1),
- Lock::InterruptBehavior::kThrow);
- ASSERT(!globalWriteTry.isLocked());
+ ASSERT_THROWS_CODE(Lock::GlobalLock(clients[1].second.get(),
+ MODE_X,
+ Date_t::now() + Milliseconds(1),
+ Lock::InterruptBehavior::kThrow),
+ AssertionException,
+ ErrorCodes::LockTimeout);
ASSERT_EQ(
clients[0].second.get()->lockState()->getLockMode(resourceIdReplicationStateTransitionLock),
MODE_X);
@@ -541,9 +544,11 @@ TEST_F(DConcurrencyTestFixture, GlobalLockXDoesNotSetGlobalWriteLockedWhenLockAc
auto opCtx = clients[1].second.get();
ASSERT_FALSE(GlobalLockAcquisitionTracker::get(opCtx).getGlobalWriteLocked());
{
- Lock::GlobalLock globalWrite1(
- opCtx, MODE_X, Date_t::now() + Milliseconds(1), Lock::InterruptBehavior::kThrow);
- ASSERT_FALSE(globalWrite1.isLocked());
+ ASSERT_THROWS_CODE(
+ Lock::GlobalLock(
+ opCtx, MODE_X, Date_t::now() + Milliseconds(1), Lock::InterruptBehavior::kThrow),
+ AssertionException,
+ ErrorCodes::LockTimeout);
}
ASSERT_FALSE(GlobalLockAcquisitionTracker::get(opCtx).getGlobalWriteLocked());
}
@@ -641,9 +646,11 @@ TEST_F(DConcurrencyTestFixture,
auto opCtx = clients[1].second.get();
ASSERT_FALSE(GlobalLockAcquisitionTracker::get(opCtx).getGlobalSharedLockTaken());
{
- Lock::GlobalLock globalWrite1(
- opCtx, MODE_S, Date_t::now() + Milliseconds(1), Lock::InterruptBehavior::kThrow);
- ASSERT_FALSE(globalWrite1.isLocked());
+ ASSERT_THROWS_CODE(
+ Lock::GlobalLock(
+ opCtx, MODE_X, Date_t::now() + Milliseconds(1), Lock::InterruptBehavior::kThrow),
+ AssertionException,
+ ErrorCodes::LockTimeout);
}
ASSERT_FALSE(GlobalLockAcquisitionTracker::get(opCtx).getGlobalSharedLockTaken());
}
@@ -664,36 +671,36 @@ TEST_F(DConcurrencyTestFixture, GlobalLockX_TimeoutDueToGlobalLockS) {
auto clients = makeKClientsWithLockers(2);
Lock::GlobalRead globalRead(clients[0].second.get());
- Lock::GlobalLock globalWriteTry(clients[1].second.get(),
- MODE_X,
- Date_t::now() + Milliseconds(1),
- Lock::InterruptBehavior::kThrow);
-
- ASSERT(!globalWriteTry.isLocked());
+ ASSERT_THROWS_CODE(Lock::GlobalLock(clients[1].second.get(),
+ MODE_X,
+ Date_t::now() + Milliseconds(1),
+ Lock::InterruptBehavior::kThrow),
+ AssertionException,
+ ErrorCodes::LockTimeout);
}
TEST_F(DConcurrencyTestFixture, GlobalLockS_TimeoutDueToGlobalLockX) {
auto clients = makeKClientsWithLockers(2);
Lock::GlobalWrite globalWrite(clients[0].second.get());
- Lock::GlobalLock globalReadTry(clients[1].second.get(),
- MODE_S,
- Date_t::now() + Milliseconds(1),
- Lock::InterruptBehavior::kThrow);
-
- ASSERT(!globalReadTry.isLocked());
+ ASSERT_THROWS_CODE(Lock::GlobalLock(clients[1].second.get(),
+ MODE_S,
+ Date_t::now() + Milliseconds(1),
+ Lock::InterruptBehavior::kThrow),
+ AssertionException,
+ ErrorCodes::LockTimeout);
}
TEST_F(DConcurrencyTestFixture, GlobalLockX_TimeoutDueToGlobalLockX) {
auto clients = makeKClientsWithLockers(2);
Lock::GlobalWrite globalWrite(clients[0].second.get());
- Lock::GlobalLock globalWriteTry(clients[1].second.get(),
- MODE_X,
- Date_t::now() + Milliseconds(1),
- Lock::InterruptBehavior::kThrow);
-
- ASSERT(!globalWriteTry.isLocked());
+ ASSERT_THROWS_CODE(Lock::GlobalLock(clients[1].second.get(),
+ MODE_X,
+ Date_t::now() + Milliseconds(1),
+ Lock::InterruptBehavior::kThrow),
+ AssertionException,
+ ErrorCodes::LockTimeout);
}
TEST_F(DConcurrencyTestFixture, TempReleaseGlobalWrite) {
@@ -1420,9 +1427,11 @@ TEST_F(DConcurrencyTestFixture, Throttling) {
Date_t t1 = Date_t::now();
{
- Lock::GlobalRead R2(
- opctx2, Date_t::now() + timeoutMillis, Lock::InterruptBehavior::kThrow);
- ASSERT(!R2.isLocked());
+ ASSERT_THROWS_CODE(Lock::GlobalRead(opctx2,
+ Date_t::now() + timeoutMillis,
+ Lock::InterruptBehavior::kThrow),
+ AssertionException,
+ ErrorCodes::LockTimeout);
}
Date_t t2 = Date_t::now();
@@ -1466,8 +1475,9 @@ TEST_F(DConcurrencyTestFixture, ReleaseAndReacquireTicket) {
{
// A second Locker should not be able to acquire a ticket.
- Lock::GlobalRead R2(opctx2, Date_t::now(), Lock::InterruptBehavior::kThrow);
- ASSERT(!R2.isLocked());
+ ASSERT_THROWS_CODE(Lock::GlobalRead(opctx2, Date_t::now(), Lock::InterruptBehavior::kThrow),
+ AssertionException,
+ ErrorCodes::LockTimeout);
}
opctx1->lockState()->releaseTicket();
@@ -1482,8 +1492,9 @@ TEST_F(DConcurrencyTestFixture, ReleaseAndReacquireTicket) {
{
// Now a second Locker cannot acquire a ticket.
- Lock::GlobalRead R2(opctx2, Date_t::now(), Lock::InterruptBehavior::kThrow);
- ASSERT(!R2.isLocked());
+ ASSERT_THROWS_CODE(Lock::GlobalRead(opctx2, Date_t::now(), Lock::InterruptBehavior::kThrow),
+ AssertionException,
+ ErrorCodes::LockTimeout);
}
}
@@ -1497,33 +1508,62 @@ TEST_F(DConcurrencyTestFixture, LockerWithReleasedTicketCanBeUnlocked) {
opctx1->lockState()->releaseTicket();
}
-TEST_F(DConcurrencyTestFixture, TicketAcquireCanBeInterrupted) {
+TEST_F(DConcurrencyTestFixture, TicketAcquireCanThrowDueToKill) {
auto clientOpctxPairs = makeKClientsWithLockers(1);
auto opctx1 = clientOpctxPairs[0].second.get();
// Limit the locker to 0 tickets at a time.
UseGlobalThrottling throttle(opctx1, 0);
- // This thread should block because it cannot acquire a ticket.
+ // This thread should block because it cannot acquire a ticket and then get interrupted.
auto result = runTaskAndKill(opctx1, [&] { Lock::GlobalRead R2(opctx1); });
ASSERT_THROWS_CODE(result.get(), AssertionException, ErrorCodes::Interrupted);
}
-TEST_F(DConcurrencyTestFixture, TicketAcquireRespectsUninterruptibleLockGuard) {
- auto clientOpctxPairs = makeKClientsWithLockers(1);
- auto opCtx = clientOpctxPairs[0].second.get();
- // Limit the locker to 0 tickets at a time.
+TEST_F(DConcurrencyTestFixture, TicketAcquireCanThrowDueToMaxLockTimeout) {
+ auto clients = makeKClientsWithLockers(1);
+ auto opCtx = clients[0].second.get();
+
UseGlobalThrottling throttle(opCtx, 0);
- // This thread should block and return because it cannot acquire a ticket within the deadline.
- auto result = runTaskAndKill(opCtx, [&] {
- UninterruptibleLockGuard noInterrupt(opCtx->lockState());
- Lock::GlobalRead R(
- opCtx, Date_t::now() + Milliseconds(1500), Lock::InterruptBehavior::kThrow);
- ASSERT(!R.isLocked());
- });
+ opCtx->lockState()->setMaxLockTimeout(Milliseconds(100));
+ ASSERT_THROWS_CODE(
+ Lock::GlobalLock(opCtx, MODE_IX), AssertionException, ErrorCodes::LockTimeout);
+}
- result.get(); // This should not throw.
+TEST_F(DConcurrencyTestFixture, TicketAcquireCanThrowDueToDeadline) {
+ auto clients = makeKClientsWithLockers(1);
+ auto opCtx = clients[0].second.get();
+
+ UseGlobalThrottling throttle(opCtx, 0);
+ ASSERT_THROWS_CODE(
+ Lock::GlobalLock(
+ opCtx, MODE_IX, Date_t::now() + Milliseconds(1500), Lock::InterruptBehavior::kThrow),
+ AssertionException,
+ ErrorCodes::LockTimeout);
+}
+
+TEST_F(DConcurrencyTestFixture, TicketAcquireShouldNotThrowIfBehaviorIsLeaveUnlocked1) {
+ auto clients = makeKClientsWithLockers(1);
+ auto opCtx = clients[0].second.get();
+
+ UseGlobalThrottling throttle(opCtx, 0);
+
+ opCtx->lockState()->setMaxLockTimeout(Milliseconds(100));
+ Lock::GlobalLock(opCtx, MODE_IX, Date_t::max(), Lock::InterruptBehavior::kLeaveUnlocked);
+}
+
+TEST_F(DConcurrencyTestFixture, TicketAcquireShouldNotThrowIfBehaviorIsLeaveUnlocked2) {
+ auto clients = makeKClientsWithLockers(1);
+ auto opCtx = clients[0].second.get();
+
+ UseGlobalThrottling throttle(opCtx, 0);
+ boost::optional<Lock::GlobalLock> globalLock;
+ globalLock.emplace(opCtx,
+ MODE_IX,
+ Date_t::now() + Milliseconds(1500),
+ Lock::InterruptBehavior::kLeaveUnlocked);
+ ASSERT(!globalLock->isLocked());
}
TEST_F(DConcurrencyTestFixture, TicketAcquireWithMaxDeadlineRespectsUninterruptibleLockGuard) {
@@ -1568,8 +1608,10 @@ TEST_F(DConcurrencyTestFixture, TicketReacquireCanBeInterrupted) {
{
// A second Locker should not be able to acquire a ticket.
- Lock::GlobalRead R2(opctx2, Date_t::now(), Lock::InterruptBehavior::kThrow);
- ASSERT(!R2.isLocked());
+
+ ASSERT_THROWS_CODE(Lock::GlobalRead(opctx2, Date_t::now(), Lock::InterruptBehavior::kThrow),
+ AssertionException,
+ ErrorCodes::LockTimeout);
}
opctx1->lockState()->releaseTicket();
@@ -1584,31 +1626,6 @@ TEST_F(DConcurrencyTestFixture, TicketReacquireCanBeInterrupted) {
ASSERT_THROWS_CODE(result.get(), AssertionException, ErrorCodes::Interrupted);
}
-TEST_F(DConcurrencyTestFixture,
- GlobalLockTimeoutDueToTicketOutageShouldThrowIfMaxLockTimeoutIsEffective) {
- auto clients = makeKClientsWithLockers(1);
- auto opCtx = clients[0].second.get();
-
- UseGlobalThrottling throttle(opCtx, 0);
-
- boost::optional<Lock::GlobalLock> globalLock;
- opCtx->lockState()->setMaxLockTimeout(Milliseconds(100));
- ASSERT_THROWS_CODE(
- globalLock.emplace(opCtx, MODE_IX), AssertionException, ErrorCodes::LockTimeout);
-}
-
-TEST_F(DConcurrencyTestFixture,
- GlobalLockTimeoutDueToTicketOutageShouldFailSilentlyIfDeadlineIsEffective) {
- auto clients = makeKClientsWithLockers(1);
- auto opCtx = clients[0].second.get();
-
- UseGlobalThrottling throttle(opCtx, 0);
-
- Lock::GlobalLock globalLock(
- opCtx, MODE_IX, Date_t::now() + Milliseconds(100), Lock::InterruptBehavior::kThrow);
- ASSERT(!globalLock.isLocked());
-}
-
TEST_F(DConcurrencyTestFixture, GlobalLockInInterruptedContextThrowsEvenWhenUncontested) {
auto clients = makeKClientsWithLockers(1);
auto opCtx = clients[0].second.get();
@@ -1699,8 +1716,9 @@ TEST_F(DConcurrencyTestFixture, DBLockTimeout) {
ASSERT(L1.isLocked());
Date_t t1 = Date_t::now();
- Lock::DBLock L2(opctx2, "testdb"_sd, MODE_X, Date_t::now() + timeoutMillis);
- ASSERT(!L2.isLocked());
+ ASSERT_THROWS_CODE(Lock::DBLock(opctx2, "testdb"_sd, MODE_X, Date_t::now() + timeoutMillis),
+ AssertionException,
+ ErrorCodes::LockTimeout);
Date_t t2 = Date_t::now();
ASSERT_GTE(t2 - t1, Milliseconds(timeoutMillis));
}
@@ -1716,8 +1734,9 @@ TEST_F(DConcurrencyTestFixture, DBLockTimeoutDueToGlobalLock) {
ASSERT(G1.isLocked());
Date_t t1 = Date_t::now();
- Lock::DBLock L2(opctx2, "testdb"_sd, MODE_X, Date_t::now() + timeoutMillis);
- ASSERT(!L2.isLocked());
+ ASSERT_THROWS_CODE(Lock::DBLock(opctx2, "testdb"_sd, MODE_X, Date_t::now() + timeoutMillis),
+ AssertionException,
+ ErrorCodes::LockTimeout);
Date_t t2 = Date_t::now();
ASSERT_GTE(t2 - t1, Milliseconds(timeoutMillis));
}
@@ -1737,9 +1756,11 @@ TEST_F(DConcurrencyTestFixture, CollectionLockTimeout) {
Date_t t1 = Date_t::now();
Lock::DBLock DBL2(opctx2, "testdb"_sd, MODE_IX, Date_t::max());
ASSERT(opctx2->lockState()->isDbLockedForMode("testdb"_sd, MODE_IX));
- Lock::CollectionLock CL2(
- opctx2->lockState(), "testdb.test"_sd, MODE_X, Date_t::now() + timeoutMillis);
- ASSERT(!CL2.isLocked());
+ ASSERT_THROWS_CODE(
+ Lock::CollectionLock(
+ opctx2->lockState(), "testdb.test"_sd, MODE_X, Date_t::now() + timeoutMillis),
+ AssertionException,
+ ErrorCodes::LockTimeout);
Date_t t2 = Date_t::now();
// 2 terms both can have .9ms rounded away, so we adjust by + 1.
ASSERT_GTE(t2 - t1 + Milliseconds(1), Milliseconds(timeoutMillis));
@@ -1765,7 +1786,8 @@ TEST_F(DConcurrencyTestFixture, CompatibleFirstWithSXIS) {
Lock::GlobalLock lockIS(opctx3, MODE_IS, Date_t::now(), Lock::InterruptBehavior::kThrow);
ASSERT(lockIS.isLocked());
- lockX.waitForLockUntil(Date_t::now());
+ ASSERT_THROWS_CODE(
+ lockX.waitForLockUntil(Date_t::now()), AssertionException, ErrorCodes::LockTimeout);
ASSERT(!lockX.isLocked());
}
@@ -1867,9 +1889,11 @@ TEST_F(DConcurrencyTestFixture, CompatibleFirstWithXSXIXIS) {
lockS->waitForLockUntil(Date_t::now());
ASSERT(lockS->isLocked());
- lockX->waitForLockUntil(Date_t::now());
+ ASSERT_THROWS_CODE(
+ lockX->waitForLockUntil(Date_t::now()), AssertionException, ErrorCodes::LockTimeout);
ASSERT(!lockX->isLocked());
- lockIX.waitForLockUntil(Date_t::now());
+ ASSERT_THROWS_CODE(
+ lockIX.waitForLockUntil(Date_t::now()), AssertionException, ErrorCodes::LockTimeout);
ASSERT(!lockIX.isLocked());
lockIS.waitForLockUntil(Date_t::now());
@@ -1906,8 +1930,9 @@ TEST_F(DConcurrencyTestFixture, CompatibleFirstStress) {
OperationContext* opCtx = clientOpctxPairs[0].second.get();
for (int iters = 0; (t.micros() < endTime); iters++) {
busyWait(0, iters % 20);
- Lock::GlobalRead readLock(
- opCtx, Date_t::now() + Milliseconds(iters % 2), Lock::InterruptBehavior::kThrow);
+ Lock::GlobalRead readLock(opCtx,
+ Date_t::now() + Milliseconds(iters % 2),
+ Lock::InterruptBehavior::kLeaveUnlocked);
if (!readLock.isLocked()) {
timeoutCount[0]++;
continue;
@@ -1940,7 +1965,7 @@ TEST_F(DConcurrencyTestFixture, CompatibleFirstStress) {
lock.emplace(opCtx,
iters % 20 ? MODE_IS : MODE_S,
Date_t::now(),
- Lock::InterruptBehavior::kThrow,
+ Lock::InterruptBehavior::kLeaveUnlocked,
Lock::GlobalLock::EnqueueOnly());
// If thread 0 is holding the MODE_S lock while we tried to acquire a
// MODE_IS or MODE_S lock, the CompatibleFirst policy guarantees success.
@@ -1954,14 +1979,14 @@ TEST_F(DConcurrencyTestFixture, CompatibleFirstStress) {
lock.emplace(opCtx,
MODE_X,
Date_t::now() + Milliseconds(iters % 2),
- Lock::InterruptBehavior::kThrow);
+ Lock::InterruptBehavior::kLeaveUnlocked);
busyWait(threadId, iters % 10);
break;
case 6:
lock.emplace(opCtx,
iters % 25 ? MODE_IX : MODE_S,
Date_t::now() + Milliseconds(iters % 2),
- Lock::InterruptBehavior::kThrow);
+ Lock::InterruptBehavior::kLeaveUnlocked);
busyWait(threadId, iters % 100);
break;
case 7:
@@ -1969,7 +1994,7 @@ TEST_F(DConcurrencyTestFixture, CompatibleFirstStress) {
lock.emplace(opCtx,
iters % 20 ? MODE_IS : MODE_X,
Date_t::now(),
- Lock::InterruptBehavior::kThrow);
+ Lock::InterruptBehavior::kLeaveUnlocked);
break;
default:
MONGO_UNREACHABLE;
@@ -2075,7 +2100,7 @@ TEST_F(DConcurrencyTestFixture, RSTLLockGuardTimeout) {
// The second opCtx times out.
ASSERT_THROWS_CODE(secondRSTL.waitForLockUntil(Date_t::now() + Milliseconds(1)),
AssertionException,
- ErrorCodes::ExceededTimeLimit);
+ ErrorCodes::LockTimeout);
// Check the first opCtx is still holding the RSTL.
ASSERT_TRUE(firstRSTL.isLocked());
diff --git a/src/mongo/db/concurrency/lock_manager_defs.h b/src/mongo/db/concurrency/lock_manager_defs.h
index 5722d3eff69..d35b5677a55 100644
--- a/src/mongo/db/concurrency/lock_manager_defs.h
+++ b/src/mongo/db/concurrency/lock_manager_defs.h
@@ -50,7 +50,7 @@ struct PartitionedLockHead;
* This matrix answers the question, "Is a lock request with mode 'Requested Mode' compatible with
* an existing lock held in mode 'Granted Mode'?"
*
- * | Requested Mode | Granted Mode | | | | |
+ * | Requested Mode | Granted Mode |
* |----------------|:------------:|:-------:|:--------:|:------:|:--------:|
* | | MODE_NONE | MODE_IS | MODE_IX | MODE_S | MODE_X |
* | MODE_IS | + | + | + | + | |
diff --git a/src/mongo/db/concurrency/lock_state.cpp b/src/mongo/db/concurrency/lock_state.cpp
index 27ec015445e..0ce20b0aa3b 100644
--- a/src/mongo/db/concurrency/lock_state.cpp
+++ b/src/mongo/db/concurrency/lock_state.cpp
@@ -290,14 +290,12 @@ Locker::ClientState LockerImpl::getClientState() const {
return state;
}
-LockResult LockerImpl::lockGlobal(OperationContext* opCtx, LockMode mode) {
+void LockerImpl::lockGlobal(OperationContext* opCtx, LockMode mode) {
LockResult result = _lockGlobalBegin(opCtx, mode, Date_t::max());
if (result == LOCK_WAITING) {
- result = lockGlobalComplete(opCtx, Date_t::max());
+ lockGlobalComplete(opCtx, Date_t::max());
}
-
- return result;
}
void LockerImpl::reacquireTicket(OperationContext* opCtx) {
@@ -313,17 +311,19 @@ void LockerImpl::reacquireTicket(OperationContext* opCtx) {
if (clientState != kInactive)
return;
- auto deadline = _maxLockTimeout ? Date_t::now() + *_maxLockTimeout : Date_t::max();
- auto acquireTicketResult = _acquireTicket(opCtx, _modeForTicket, deadline);
- uassert(ErrorCodes::LockTimeout,
- str::stream() << "Unable to acquire ticket with mode '" << _modeForTicket
- << "' within a max lock request timeout of '"
- << *_maxLockTimeout
- << "' milliseconds.",
- acquireTicketResult == LOCK_OK || _uninterruptibleLocksRequested);
+ if (!_maxLockTimeout || _uninterruptibleLocksRequested) {
+ invariant(_acquireTicket(opCtx, _modeForTicket, Date_t::max()));
+ } else {
+ uassert(ErrorCodes::LockTimeout,
+ str::stream() << "Unable to acquire ticket with mode '" << _modeForTicket
+ << "' within a max lock request timeout of '"
+ << *_maxLockTimeout
+ << "' milliseconds.",
+ _acquireTicket(opCtx, _modeForTicket, Date_t::now() + *_maxLockTimeout));
+ }
}
-LockResult LockerImpl::_acquireTicket(OperationContext* opCtx, LockMode mode, Date_t deadline) {
+bool LockerImpl::_acquireTicket(OperationContext* opCtx, LockMode mode, Date_t deadline) {
const bool reader = isSharedLockMode(mode);
auto holder = shouldAcquireTicket() ? ticketHolders[mode] : nullptr;
if (holder) {
@@ -336,32 +336,30 @@ LockResult LockerImpl::_acquireTicket(OperationContext* opCtx, LockMode mode, Da
if (deadline == Date_t::max()) {
holder->waitForTicket(interruptible);
} else if (!holder->waitForTicketUntil(interruptible, deadline)) {
- return LOCK_TIMEOUT;
+ return false;
}
restoreStateOnErrorGuard.dismiss();
}
_clientState.store(reader ? kActiveReader : kActiveWriter);
- return LOCK_OK;
+ return true;
}
LockResult LockerImpl::_lockGlobalBegin(OperationContext* opCtx, LockMode mode, Date_t deadline) {
dassert(isLocked() == (_modeForTicket != MODE_NONE));
if (_modeForTicket == MODE_NONE) {
- auto lockTimeoutDate =
- _maxLockTimeout ? Date_t::now() + _maxLockTimeout.get() : Date_t::max();
- auto useLockTimeout = lockTimeoutDate < deadline;
- auto acquireTicketResult =
- _acquireTicket(opCtx, mode, useLockTimeout ? lockTimeoutDate : deadline);
- if (useLockTimeout) {
+ if (_uninterruptibleLocksRequested) {
+ // Ignore deadline and _maxLockTimeout.
+ invariant(_acquireTicket(opCtx, mode, Date_t::max()));
+ } else {
+ auto beforeAcquire = Date_t::now();
+ deadline = std::min(deadline,
+ _maxLockTimeout ? beforeAcquire + *_maxLockTimeout : Date_t::max());
uassert(ErrorCodes::LockTimeout,
str::stream() << "Unable to acquire ticket with mode '" << _modeForTicket
<< "' within a max lock request timeout of '"
- << *_maxLockTimeout
+ << Date_t::now() - beforeAcquire
<< "' milliseconds.",
- acquireTicketResult == LOCK_OK || _uninterruptibleLocksRequested);
- }
- if (acquireTicketResult != LOCK_OK) {
- return acquireTicketResult;
+ _acquireTicket(opCtx, mode, deadline));
}
_modeForTicket = mode;
}
@@ -374,16 +372,12 @@ LockResult LockerImpl::_lockGlobalBegin(OperationContext* opCtx, LockMode mode,
}
}
const LockResult result = lockBegin(opCtx, resourceIdGlobal, actualLockMode);
- if (result == LOCK_OK)
- return LOCK_OK;
-
- invariant(result == LOCK_WAITING);
-
+ invariant(result == LOCK_OK || result == LOCK_WAITING);
return result;
}
-LockResult LockerImpl::lockGlobalComplete(OperationContext* opCtx, Date_t deadline) {
- return lockComplete(opCtx, resourceIdGlobal, getLockMode(resourceIdGlobal), deadline);
+void LockerImpl::lockGlobalComplete(OperationContext* opCtx, Date_t deadline) {
+ lockComplete(opCtx, resourceIdGlobal, getLockMode(resourceIdGlobal), deadline);
}
bool LockerImpl::unlockGlobal() {
@@ -470,20 +464,17 @@ void LockerImpl::restoreWriteUnitOfWork(OperationContext* opCtx,
beginWriteUnitOfWork();
}
-LockResult LockerImpl::lock(OperationContext* opCtx,
- ResourceId resId,
- LockMode mode,
- Date_t deadline) {
+void LockerImpl::lock(OperationContext* opCtx, ResourceId resId, LockMode mode, Date_t deadline) {
const LockResult result = lockBegin(opCtx, resId, mode);
// Fast, uncontended path
if (result == LOCK_OK)
- return LOCK_OK;
+ return;
invariant(result == LOCK_WAITING);
- return lockComplete(opCtx, resId, mode, deadline);
+ lockComplete(opCtx, resId, mode, deadline);
}
void LockerImpl::downgrade(ResourceId resId, LockMode newMode) {
@@ -721,19 +712,19 @@ void LockerImpl::restoreLockState(OperationContext* opCtx, const Locker::LockSna
// If we locked the PBWM, it must be locked before the resourceIdGlobal and
// resourceIdReplicationStateTransitionLock resources.
if (it != state.locks.end() && it->resourceId == resourceIdParallelBatchWriterMode) {
- invariant(LOCK_OK == lock(opCtx, it->resourceId, it->mode));
+ lock(opCtx, it->resourceId, it->mode);
it++;
}
// If we locked the RSTL, it must be locked before the resourceIdGlobal resource.
if (it != state.locks.end() && it->resourceId == resourceIdReplicationStateTransitionLock) {
- invariant(LOCK_OK == lock(opCtx, it->resourceId, it->mode));
+ lock(opCtx, it->resourceId, it->mode);
it++;
}
- invariant(LOCK_OK == lockGlobal(opCtx, state.globalMode));
+ lockGlobal(opCtx, state.globalMode);
for (; it != state.locks.end(); it++) {
- invariant(LOCK_OK == lock(it->resourceId, it->mode));
+ lock(opCtx, it->resourceId, it->mode);
}
invariant(_modeForTicket != MODE_NONE);
}
@@ -814,10 +805,10 @@ LockResult LockerImpl::lockBegin(OperationContext* opCtx, ResourceId resId, Lock
return result;
}
-LockResult LockerImpl::lockComplete(OperationContext* opCtx,
- ResourceId resId,
- LockMode mode,
- Date_t deadline) {
+void LockerImpl::lockComplete(OperationContext* opCtx,
+ ResourceId resId,
+ LockMode mode,
+ Date_t deadline) {
LockResult result;
Milliseconds timeout;
@@ -828,12 +819,9 @@ LockResult LockerImpl::lockComplete(OperationContext* opCtx,
} else {
timeout = deadline - Date_t::now();
}
-
- // If _maxLockTimeout is set and lower than the given timeout, override it.
- // TODO: there should be an invariant against the simultaneous usage of
- // _uninterruptibleLocksRequested and _maxLockTimeout (SERVER-34951).
- if (_maxLockTimeout && _uninterruptibleLocksRequested == 0) {
- timeout = std::min(timeout, _maxLockTimeout.get());
+ timeout = std::min(timeout, _maxLockTimeout ? *_maxLockTimeout : Milliseconds::max());
+ if (_uninterruptibleLocksRequested) {
+ timeout = Milliseconds::max();
}
// Don't go sleeping without bound in order to be able to report long waits.
@@ -880,28 +868,15 @@ LockResult LockerImpl::lockComplete(OperationContext* opCtx,
waitTime = (totalBlockTime < timeout) ? std::min(timeout - totalBlockTime, MaxWaitTime)
: Milliseconds(0);
- if (waitTime == Milliseconds(0)) {
- // If the caller provided the max deadline then presumably they are not expecting nor
- // checking for lock acquisition failure. In that case, to prevent the caller from
- // continuing under the assumption of a successful lock acquisition, we'll throw.
- if (_maxLockTimeout && deadline == Date_t::max()) {
- uasserted(ErrorCodes::LockTimeout,
- str::stream() << "Unable to acquire lock '" << resId.toString()
- << "' within a max lock request timeout of '"
- << _maxLockTimeout.get()
- << "' milliseconds.");
- }
- break;
- }
+ uassert(ErrorCodes::LockTimeout,
+ str::stream() << "Unable to acquire lock '" << resId.toString() << "' within "
+ << timeout
+ << "' milliseconds.",
+ waitTime > Milliseconds(0));
}
- // Note: in case of the _notify object returning LOCK_TIMEOUT, it is possible to find that the
- // lock was still granted after all, but we don't try to take advantage of that and will return
- // a timeout.
- if (result == LOCK_OK) {
- unlockOnErrorGuard.dismiss();
- }
- return result;
+ invariant(result == LOCK_OK);
+ unlockOnErrorGuard.dismiss();
}
LockResult LockerImpl::lockRSTLBegin(OperationContext* opCtx) {
@@ -909,8 +884,8 @@ LockResult LockerImpl::lockRSTLBegin(OperationContext* opCtx) {
return lockBegin(opCtx, resourceIdReplicationStateTransitionLock, MODE_X);
}
-LockResult LockerImpl::lockRSTLComplete(OperationContext* opCtx, Date_t deadline) {
- return lockComplete(opCtx, resourceIdReplicationStateTransitionLock, MODE_X, deadline);
+void LockerImpl::lockRSTLComplete(OperationContext* opCtx, Date_t deadline) {
+ lockComplete(opCtx, resourceIdReplicationStateTransitionLock, MODE_X, deadline);
}
void LockerImpl::releaseTicket() {
diff --git a/src/mongo/db/concurrency/lock_state.h b/src/mongo/db/concurrency/lock_state.h
index 20364646ee5..44effcb124a 100644
--- a/src/mongo/db/concurrency/lock_state.h
+++ b/src/mongo/db/concurrency/lock_state.h
@@ -128,8 +128,8 @@ public:
_maxLockTimeout = boost::none;
}
- virtual LockResult lockGlobal(OperationContext* opCtx, LockMode mode);
- virtual LockResult lockGlobal(LockMode mode) {
+ virtual void lockGlobal(OperationContext* opCtx, LockMode mode);
+ virtual void lockGlobal(LockMode mode) {
return lockGlobal(nullptr, mode);
}
virtual LockResult lockGlobalBegin(OperationContext* opCtx, LockMode mode, Date_t deadline) {
@@ -138,15 +138,15 @@ public:
virtual LockResult lockGlobalBegin(LockMode mode, Date_t deadline) {
return _lockGlobalBegin(nullptr, mode, deadline);
}
- virtual LockResult lockGlobalComplete(OperationContext* opCtx, Date_t deadline);
- virtual LockResult lockGlobalComplete(Date_t deadline) {
- return lockGlobalComplete(nullptr, deadline);
+ virtual void lockGlobalComplete(OperationContext* opCtx, Date_t deadline);
+ virtual void lockGlobalComplete(Date_t deadline) {
+ lockGlobalComplete(nullptr, deadline);
}
virtual bool unlockGlobal();
virtual LockResult lockRSTLBegin(OperationContext* opCtx);
- virtual LockResult lockRSTLComplete(OperationContext* opCtx, Date_t deadline);
+ virtual void lockRSTLComplete(OperationContext* opCtx, Date_t deadline);
virtual bool unlockRSTLforPrepare();
@@ -163,13 +163,13 @@ public:
* the lock acquisition. A lock operation would otherwise wait until a timeout or the lock is
* granted.
*/
- virtual LockResult lock(OperationContext* opCtx,
- ResourceId resId,
- LockMode mode,
- Date_t deadline = Date_t::max());
+ virtual void lock(OperationContext* opCtx,
+ ResourceId resId,
+ LockMode mode,
+ Date_t deadline = Date_t::max());
- virtual LockResult lock(ResourceId resId, LockMode mode, Date_t deadline = Date_t::max()) {
- return lock(nullptr, resId, mode, deadline);
+ virtual void lock(ResourceId resId, LockMode mode, Date_t deadline = Date_t::max()) {
+ lock(nullptr, resId, mode, deadline);
}
virtual void downgrade(ResourceId resId, LockMode newMode);
@@ -239,14 +239,13 @@ public:
* @param mode Mode which was passed to an earlier lockBegin call. Must match.
* @param deadline The absolute time point when this lock acquisition will time out, if not yet
* granted.
+ *
+ * Throws an exception if it is interrupted.
*/
- LockResult lockComplete(OperationContext* opCtx,
- ResourceId resId,
- LockMode mode,
- Date_t deadline);
+ void lockComplete(OperationContext* opCtx, ResourceId resId, LockMode mode, Date_t deadline);
- LockResult lockComplete(ResourceId resId, LockMode mode, Date_t deadline) {
- return lockComplete(nullptr, resId, mode, deadline);
+ void lockComplete(ResourceId resId, LockMode mode, Date_t deadline) {
+ lockComplete(nullptr, resId, mode, deadline);
}
/**
@@ -261,7 +260,12 @@ private:
typedef FastMapNoAlloc<ResourceId, LockRequest> LockRequestsMap;
/**
- * Like lockGlobalBegin, but accepts a deadline for acquiring a ticket.
+ * Acquires the ticket within the deadline (or _maxLockTimeout) and tries to grab the lock.
+ *
+ * Returns LOCK_OK if successfully acquired the global lock,
+ * or LOCK_WAITING if the global lock is currently held by someone else.
+ *
+ * The ticket acquisition can be interrupted (by killOp/timeout), thus throwing an exception.
*/
LockResult _lockGlobalBegin(OperationContext* opCtx, LockMode, Date_t deadline);
@@ -293,11 +297,11 @@ private:
/**
* Acquires a ticket for the Locker under 'mode'.
- * Returns LOCK_OK if a ticket is successfully acquired.
- * LOCK_TIMEOUT if it cannot acquire a ticket within 'deadline'.
+ * Returns true if a ticket is successfully acquired.
+ * false if it cannot acquire a ticket within 'deadline'.
* It may throw an exception when it is interrupted.
*/
- LockResult _acquireTicket(OperationContext* opCtx, LockMode mode, Date_t deadline);
+ bool _acquireTicket(OperationContext* opCtx, LockMode mode, Date_t deadline);
// Used to disambiguate different lockers
const LockerId _id;
@@ -340,7 +344,7 @@ private:
// If this is set, dictates the max number of milliseconds that we will wait for lock
// acquisition. Effectively resets lock acquisition deadlines to time out sooner. If set to 0,
// for example, lock attempts will time out immediately if the lock is not immediately
- // available.
+ // available. Note this will be ineffective if uninterruptible lock guard is set.
boost::optional<Milliseconds> _maxLockTimeout;
//////////////////////////////////////////////////////////////////////////////////////////
diff --git a/src/mongo/db/concurrency/lock_state_test.cpp b/src/mongo/db/concurrency/lock_state_test.cpp
index 27bda6a04d2..115d1b5c42b 100644
--- a/src/mongo/db/concurrency/lock_state_test.cpp
+++ b/src/mongo/db/concurrency/lock_state_test.cpp
@@ -50,7 +50,7 @@ TEST(LockerImpl, LockNoConflict) {
LockerImpl locker;
locker.lockGlobal(MODE_IX);
- ASSERT(LOCK_OK == locker.lock(resId, MODE_X));
+ locker.lock(resId, MODE_X);
ASSERT(locker.isLockHeldForMode(resId, MODE_X));
ASSERT(locker.isLockHeldForMode(resId, MODE_S));
@@ -68,8 +68,8 @@ TEST(LockerImpl, ReLockNoConflict) {
LockerImpl locker;
locker.lockGlobal(MODE_IX);
- ASSERT(LOCK_OK == locker.lock(resId, MODE_S));
- ASSERT(LOCK_OK == locker.lock(resId, MODE_X));
+ locker.lock(resId, MODE_S);
+ locker.lock(resId, MODE_X);
ASSERT(!locker.unlock(resId));
ASSERT(locker.isLockHeldForMode(resId, MODE_X));
@@ -84,12 +84,13 @@ TEST(LockerImpl, ConflictWithTimeout) {
const ResourceId resId(RESOURCE_COLLECTION, "TestDB.collection"_sd);
LockerImpl locker1;
- ASSERT(LOCK_OK == locker1.lockGlobal(MODE_IX));
- ASSERT(LOCK_OK == locker1.lock(resId, MODE_X));
+ locker1.lockGlobal(MODE_IX);
+ locker1.lock(resId, MODE_X);
LockerImpl locker2;
- ASSERT(LOCK_OK == locker2.lockGlobal(MODE_IX));
- ASSERT(LOCK_TIMEOUT == locker2.lock(resId, MODE_S, Date_t::now()));
+ locker2.lockGlobal(MODE_IX);
+ ASSERT_THROWS_CODE(
+ locker2.lock(resId, MODE_S, Date_t::now()), AssertionException, ErrorCodes::LockTimeout);
ASSERT(locker2.getLockMode(resId) == MODE_NONE);
@@ -103,15 +104,17 @@ TEST(LockerImpl, ConflictUpgradeWithTimeout) {
const ResourceId resId(RESOURCE_COLLECTION, "TestDB.collection"_sd);
LockerImpl locker1;
- ASSERT(LOCK_OK == locker1.lockGlobal(MODE_IS));
- ASSERT(LOCK_OK == locker1.lock(resId, MODE_S));
+ locker1.lockGlobal(MODE_IS);
+ locker1.lock(resId, MODE_S);
LockerImpl locker2;
- ASSERT(LOCK_OK == locker2.lockGlobal(MODE_IS));
- ASSERT(LOCK_OK == locker2.lock(resId, MODE_S));
+ locker2.lockGlobal(MODE_IS);
+ locker2.lock(resId, MODE_S);
// Try upgrading locker 1, which should block and timeout
- ASSERT(LOCK_TIMEOUT == locker1.lock(resId, MODE_X, Date_t::now() + Milliseconds(1)));
+ ASSERT_THROWS_CODE(locker1.lock(resId, MODE_X, Date_t::now() + Milliseconds(1)),
+ AssertionException,
+ ErrorCodes::LockTimeout);
locker1.unlockGlobal();
locker2.unlockGlobal();
@@ -171,9 +174,9 @@ TEST(LockerImpl, saveAndRestoreRSTL) {
const ResourceId resIdDatabase(RESOURCE_DATABASE, "TestDB"_sd);
// Acquire locks.
- ASSERT_EQUALS(LOCK_OK, locker.lock(resourceIdReplicationStateTransitionLock, MODE_IX));
+ locker.lock(resourceIdReplicationStateTransitionLock, MODE_IX);
locker.lockGlobal(MODE_IX);
- ASSERT_EQUALS(LOCK_OK, locker.lock(resIdDatabase, MODE_IX));
+ locker.lock(resIdDatabase, MODE_IX);
// Save the lock state.
locker.saveLockStateAndUnlock(&lockInfo);
@@ -236,8 +239,8 @@ TEST(LockerImpl, saveAndRestoreDBAndCollection) {
// Lock some stuff.
locker.lockGlobal(MODE_IX);
- ASSERT_EQUALS(LOCK_OK, locker.lock(resIdDatabase, MODE_IX));
- ASSERT_EQUALS(LOCK_OK, locker.lock(resIdCollection, MODE_X));
+ locker.lock(resIdDatabase, MODE_IX);
+ locker.lock(resIdCollection, MODE_X);
locker.saveLockStateAndUnlock(&lockInfo);
// Things shouldn't be locked anymore.
@@ -265,8 +268,8 @@ TEST(LockerImpl, releaseWriteUnitOfWork) {
locker.beginWriteUnitOfWork();
// Lock some stuff.
locker.lockGlobal(MODE_IX);
- ASSERT_EQUALS(LOCK_OK, locker.lock(resIdDatabase, MODE_IX));
- ASSERT_EQUALS(LOCK_OK, locker.lock(resIdCollection, MODE_X));
+ locker.lock(resIdDatabase, MODE_IX);
+ locker.lock(resIdCollection, MODE_X);
// Unlock them so that they will be pending to unlock.
ASSERT_FALSE(locker.unlock(resIdCollection));
ASSERT_FALSE(locker.unlock(resIdDatabase));
@@ -293,8 +296,8 @@ TEST(LockerImpl, restoreWriteUnitOfWork) {
locker.beginWriteUnitOfWork();
// Lock some stuff.
locker.lockGlobal(MODE_IX);
- ASSERT_EQUALS(LOCK_OK, locker.lock(resIdDatabase, MODE_IX));
- ASSERT_EQUALS(LOCK_OK, locker.lock(resIdCollection, MODE_X));
+ locker.lock(resIdDatabase, MODE_IX);
+ locker.lock(resIdCollection, MODE_X);
// Unlock them so that they will be pending to unlock.
ASSERT_FALSE(locker.unlock(resIdCollection));
ASSERT_FALSE(locker.unlock(resIdDatabase));
@@ -336,8 +339,8 @@ TEST(LockerImpl, releaseAndRestoreReadOnlyWriteUnitOfWork) {
locker.beginWriteUnitOfWork();
// Lock some stuff in IS mode.
locker.lockGlobal(MODE_IS);
- ASSERT_EQUALS(LOCK_OK, locker.lock(resIdDatabase, MODE_IS));
- ASSERT_EQUALS(LOCK_OK, locker.lock(resIdCollection, MODE_IS));
+ locker.lock(resIdDatabase, MODE_IS);
+ locker.lock(resIdCollection, MODE_IS);
// Unlock them.
ASSERT_FALSE(locker.unlock(resIdCollection));
ASSERT_FALSE(locker.unlock(resIdDatabase));
@@ -390,8 +393,8 @@ TEST(LockerImpl, DefaultLocker) {
const ResourceId resId(RESOURCE_DATABASE, "TestDB"_sd);
LockerImpl locker;
- ASSERT_EQUALS(LOCK_OK, locker.lockGlobal(MODE_IX));
- ASSERT_EQUALS(LOCK_OK, locker.lock(resId, MODE_X));
+ locker.lockGlobal(MODE_IX);
+ locker.lock(resId, MODE_X);
// Make sure the flush lock IS NOT held
Locker::LockerInfo info;
@@ -418,16 +421,16 @@ TEST(LockerImpl, SharedLocksShouldTwoPhaseLockIsTrue) {
LockerImpl locker;
locker.setSharedLocksShouldTwoPhaseLock(true);
- ASSERT_EQ(LOCK_OK, locker.lockGlobal(MODE_IS));
+ locker.lockGlobal(MODE_IS);
ASSERT_EQ(locker.getLockMode(globalResId), MODE_IS);
- ASSERT_EQ(LOCK_OK, locker.lock(resourceIdReplicationStateTransitionLock, MODE_IS));
+ locker.lock(resourceIdReplicationStateTransitionLock, MODE_IS);
ASSERT_EQ(locker.getLockMode(resourceIdReplicationStateTransitionLock), MODE_IS);
- ASSERT_EQ(LOCK_OK, locker.lock(resId1, MODE_IS));
- ASSERT_EQ(LOCK_OK, locker.lock(resId2, MODE_IX));
- ASSERT_EQ(LOCK_OK, locker.lock(resId3, MODE_S));
- ASSERT_EQ(LOCK_OK, locker.lock(resId4, MODE_X));
+ locker.lock(resId1, MODE_IS);
+ locker.lock(resId2, MODE_IX);
+ locker.lock(resId3, MODE_S);
+ locker.lock(resId4, MODE_X);
ASSERT_EQ(locker.getLockMode(resId1), MODE_IS);
ASSERT_EQ(locker.getLockMode(resId2), MODE_IX);
ASSERT_EQ(locker.getLockMode(resId3), MODE_S);
@@ -472,16 +475,16 @@ TEST(LockerImpl, ModeIXAndXLockParticipatesInTwoPhaseLocking) {
LockerImpl locker;
- ASSERT_EQ(LOCK_OK, locker.lockGlobal(MODE_IX));
+ locker.lockGlobal(MODE_IX);
ASSERT_EQ(locker.getLockMode(globalResId), MODE_IX);
- ASSERT_EQ(LOCK_OK, locker.lock(resourceIdReplicationStateTransitionLock, MODE_IX));
+ locker.lock(resourceIdReplicationStateTransitionLock, MODE_IX);
ASSERT_EQ(locker.getLockMode(resourceIdReplicationStateTransitionLock), MODE_IX);
- ASSERT_EQ(LOCK_OK, locker.lock(resId1, MODE_IS));
- ASSERT_EQ(LOCK_OK, locker.lock(resId2, MODE_IX));
- ASSERT_EQ(LOCK_OK, locker.lock(resId3, MODE_S));
- ASSERT_EQ(LOCK_OK, locker.lock(resId4, MODE_X));
+ locker.lock(resId1, MODE_IS);
+ locker.lock(resId2, MODE_IX);
+ locker.lock(resId3, MODE_S);
+ locker.lock(resId4, MODE_X);
ASSERT_EQ(locker.getLockMode(resId1), MODE_IS);
ASSERT_EQ(locker.getLockMode(resId2), MODE_IX);
ASSERT_EQ(locker.getLockMode(resId3), MODE_S);
@@ -515,13 +518,13 @@ TEST(LockerImpl, ModeIXAndXLockParticipatesInTwoPhaseLocking) {
TEST(LockerImpl, RSTLUnlocksWithNestedLock) {
LockerImpl locker;
- ASSERT_EQ(LOCK_OK, locker.lock(resourceIdReplicationStateTransitionLock, MODE_IX));
+ locker.lock(resourceIdReplicationStateTransitionLock, MODE_IX);
ASSERT_EQ(locker.getLockMode(resourceIdReplicationStateTransitionLock), MODE_IX);
locker.beginWriteUnitOfWork();
// Do a nested lock acquisition.
- ASSERT_EQ(LOCK_OK, locker.lock(resourceIdReplicationStateTransitionLock, MODE_IX));
+ locker.lock(resourceIdReplicationStateTransitionLock, MODE_IX);
ASSERT_EQ(locker.getLockMode(resourceIdReplicationStateTransitionLock), MODE_IX);
ASSERT(locker.unlockRSTLforPrepare());
@@ -540,7 +543,7 @@ TEST(LockerImpl, RSTLUnlocksWithNestedLock) {
TEST(LockerImpl, RSTLModeIXWithTwoPhaseLockingCanBeUnlockedWhenPrepared) {
LockerImpl locker;
- ASSERT_EQ(LOCK_OK, locker.lock(resourceIdReplicationStateTransitionLock, MODE_IX));
+ locker.lock(resourceIdReplicationStateTransitionLock, MODE_IX);
ASSERT_EQ(locker.getLockMode(resourceIdReplicationStateTransitionLock), MODE_IX);
locker.beginWriteUnitOfWork();
@@ -564,7 +567,7 @@ TEST(LockerImpl, RSTLModeIXWithTwoPhaseLockingCanBeUnlockedWhenPrepared) {
TEST(LockerImpl, RSTLModeISWithTwoPhaseLockingCanBeUnlockedWhenPrepared) {
LockerImpl locker;
- ASSERT_EQ(LOCK_OK, locker.lock(resourceIdReplicationStateTransitionLock, MODE_IS));
+ locker.lock(resourceIdReplicationStateTransitionLock, MODE_IS);
ASSERT_EQ(locker.getLockMode(resourceIdReplicationStateTransitionLock), MODE_IS);
locker.beginWriteUnitOfWork();
@@ -585,7 +588,7 @@ TEST(LockerImpl, RSTLModeISWithTwoPhaseLockingCanBeUnlockedWhenPrepared) {
TEST(LockerImpl, RSTLTwoPhaseLockingBehaviorModeIS) {
LockerImpl locker;
- ASSERT_EQ(LOCK_OK, locker.lock(resourceIdReplicationStateTransitionLock, MODE_IS));
+ locker.lock(resourceIdReplicationStateTransitionLock, MODE_IS);
ASSERT_EQ(locker.getLockMode(resourceIdReplicationStateTransitionLock), MODE_IS);
locker.beginWriteUnitOfWork();
@@ -613,11 +616,11 @@ TEST(LockerImpl, OverrideLockRequestTimeout) {
// Set up locker2 to override lock requests' provided timeout if greater than 1000 milliseconds.
locker2.setMaxLockTimeout(Milliseconds(1000));
- ASSERT_EQ(LOCK_OK, locker1.lockGlobal(MODE_IX));
- ASSERT_EQ(LOCK_OK, locker2.lockGlobal(MODE_IX));
+ locker1.lockGlobal(MODE_IX);
+ locker2.lockGlobal(MODE_IX);
// locker1 acquires FirstDB under an exclusive lock.
- ASSERT_EQ(LOCK_OK, locker1.lock(resIdFirstDB, MODE_X));
+ locker1.lock(resIdFirstDB, MODE_X);
ASSERT_TRUE(locker1.isLockHeldForMode(resIdFirstDB, MODE_X));
// locker2's attempt to acquire FirstDB with unlimited wait time should timeout after 1000
@@ -627,7 +630,7 @@ TEST(LockerImpl, OverrideLockRequestTimeout) {
ErrorCodes::LockTimeout);
// locker2's attempt to acquire an uncontested lock should still succeed normally.
- ASSERT_EQ(LOCK_OK, locker2.lock(resIdSecondDB, MODE_X));
+ locker2.lock(resIdSecondDB, MODE_X);
ASSERT_TRUE(locker1.unlock(resIdFirstDB));
ASSERT_TRUE(locker1.isLockHeldForMode(resIdFirstDB, MODE_NONE));
@@ -649,11 +652,11 @@ TEST(LockerImpl, DoNotWaitForLockAcquisition) {
// deadlines in the lock request.
locker2.setMaxLockTimeout(Milliseconds(0));
- ASSERT_EQ(LOCK_OK, locker1.lockGlobal(MODE_IX));
- ASSERT_EQ(LOCK_OK, locker2.lockGlobal(MODE_IX));
+ locker1.lockGlobal(MODE_IX);
+ locker2.lockGlobal(MODE_IX);
// locker1 acquires FirstDB under an exclusive lock.
- ASSERT_EQ(LOCK_OK, locker1.lock(resIdFirstDB, MODE_X));
+ locker1.lock(resIdFirstDB, MODE_X);
ASSERT_TRUE(locker1.isLockHeldForMode(resIdFirstDB, MODE_X));
// locker2's attempt to acquire FirstDB with unlimited wait time should fail immediately and
@@ -663,7 +666,7 @@ TEST(LockerImpl, DoNotWaitForLockAcquisition) {
ErrorCodes::LockTimeout);
// locker2's attempt to acquire an uncontested lock should still succeed normally.
- ASSERT_EQ(LOCK_OK, locker2.lock(resIdSecondDB, MODE_X));
+ locker2.lock(resIdSecondDB, MODE_X);
ASSERT_TRUE(locker1.unlock(resIdFirstDB));
ASSERT_TRUE(locker1.isLockHeldForMode(resIdFirstDB, MODE_NONE));
@@ -697,9 +700,9 @@ TEST(LockerImpl, GetLockerInfoShouldReportHeldLocks) {
// Take an exclusive lock on the collection.
LockerImpl locker;
- ASSERT_EQ(LOCK_OK, locker.lockGlobal(MODE_IX));
- ASSERT_EQ(LOCK_OK, locker.lock(dbId, MODE_IX));
- ASSERT_EQ(LOCK_OK, locker.lock(collectionId, MODE_X));
+ locker.lockGlobal(MODE_IX);
+ locker.lock(dbId, MODE_IX);
+ locker.lock(collectionId, MODE_X);
// Assert it shows up in the output of getLockerInfo().
Locker::LockerInfo lockerInfo;
@@ -722,14 +725,14 @@ TEST(LockerImpl, GetLockerInfoShouldReportPendingLocks) {
// Take an exclusive lock on the collection.
LockerImpl successfulLocker;
- ASSERT_EQ(LOCK_OK, successfulLocker.lockGlobal(MODE_IX));
- ASSERT_EQ(LOCK_OK, successfulLocker.lock(dbId, MODE_IX));
- ASSERT_EQ(LOCK_OK, successfulLocker.lock(collectionId, MODE_X));
+ successfulLocker.lockGlobal(MODE_IX);
+ successfulLocker.lock(dbId, MODE_IX);
+ successfulLocker.lock(collectionId, MODE_X);
// Now attempt to get conflicting locks.
LockerImpl conflictingLocker;
- ASSERT_EQ(LOCK_OK, conflictingLocker.lockGlobal(MODE_IS));
- ASSERT_EQ(LOCK_OK, conflictingLocker.lock(dbId, MODE_IS));
+ conflictingLocker.lockGlobal(MODE_IS);
+ conflictingLocker.lock(dbId, MODE_IS);
ASSERT_EQ(LOCK_WAITING, conflictingLocker.lockBegin(nullptr, collectionId, MODE_IS));
// Assert the held locks show up in the output of getLockerInfo().
@@ -748,7 +751,7 @@ TEST(LockerImpl, GetLockerInfoShouldReportPendingLocks) {
ASSERT(successfulLocker.unlock(dbId));
ASSERT(successfulLocker.unlockGlobal());
- ASSERT_EQ(LOCK_OK, conflictingLocker.lockComplete(collectionId, MODE_IS, Date_t::now()));
+ conflictingLocker.lockComplete(collectionId, MODE_IS, Date_t::now());
conflictingLocker.getLockerInfo(&lockerInfo, boost::none);
ASSERT_FALSE(lockerInfo.waitingResource.isValid());
@@ -764,7 +767,7 @@ TEST(LockerImpl, ReaquireLockPendingUnlock) {
LockerImpl locker;
locker.lockGlobal(MODE_IS);
- ASSERT_EQ(LOCK_OK, locker.lock(resId, MODE_X));
+ locker.lock(resId, MODE_X);
ASSERT_TRUE(locker.isLockHeldForMode(resId, MODE_X));
locker.beginWriteUnitOfWork();
@@ -775,7 +778,7 @@ TEST(LockerImpl, ReaquireLockPendingUnlock) {
ASSERT(locker.getRequestsForTest().find(resId).objAddr()->unlockPending == 1);
// Reacquire lock pending unlock.
- ASSERT_EQ(LOCK_OK, locker.lock(resId, MODE_X));
+ locker.lock(resId, MODE_X);
ASSERT(locker.numResourcesToUnlockAtEndUnitOfWorkForTest() == 0);
ASSERT(locker.getRequestsForTest().find(resId).objAddr()->unlockPending == 0);
@@ -792,7 +795,7 @@ TEST(LockerImpl, AcquireLockPendingUnlockWithCoveredMode) {
LockerImpl locker;
locker.lockGlobal(MODE_IS);
- ASSERT_EQ(LOCK_OK, locker.lock(resId, MODE_X));
+ locker.lock(resId, MODE_X);
ASSERT_TRUE(locker.isLockHeldForMode(resId, MODE_X));
locker.beginWriteUnitOfWork();
@@ -803,7 +806,7 @@ TEST(LockerImpl, AcquireLockPendingUnlockWithCoveredMode) {
ASSERT(locker.getRequestsForTest().find(resId).objAddr()->unlockPending == 1);
// Attempt to lock the resource with a mode that is covered by the existing mode.
- ASSERT_EQ(LOCK_OK, locker.lock(resId, MODE_IX));
+ locker.lock(resId, MODE_IX);
ASSERT(locker.numResourcesToUnlockAtEndUnitOfWorkForTest() == 0);
ASSERT(locker.getRequestsForTest().find(resId).objAddr()->unlockPending == 0);
@@ -820,7 +823,7 @@ TEST(LockerImpl, ConvertLockPendingUnlock) {
LockerImpl locker;
locker.lockGlobal(MODE_IS);
- ASSERT_EQ(LOCK_OK, locker.lock(resId, MODE_IX));
+ locker.lock(resId, MODE_IX);
ASSERT_TRUE(locker.isLockHeldForMode(resId, MODE_IX));
locker.beginWriteUnitOfWork();
@@ -831,7 +834,7 @@ TEST(LockerImpl, ConvertLockPendingUnlock) {
ASSERT(locker.getRequestsForTest().find(resId).objAddr()->unlockPending == 1);
// Convert lock pending unlock.
- ASSERT_EQ(LOCK_OK, locker.lock(resId, MODE_X));
+ locker.lock(resId, MODE_X);
ASSERT(locker.numResourcesToUnlockAtEndUnitOfWorkForTest() == 1);
ASSERT(locker.getRequestsForTest().find(resId).objAddr()->unlockPending == 1);
@@ -850,7 +853,7 @@ TEST(LockerImpl, ConvertLockPendingUnlockAndUnlock) {
LockerImpl locker;
locker.lockGlobal(MODE_IS);
- ASSERT_EQ(LOCK_OK, locker.lock(resId, MODE_IX));
+ locker.lock(resId, MODE_IX);
ASSERT_TRUE(locker.isLockHeldForMode(resId, MODE_IX));
locker.beginWriteUnitOfWork();
@@ -861,7 +864,7 @@ TEST(LockerImpl, ConvertLockPendingUnlockAndUnlock) {
ASSERT(locker.getRequestsForTest().find(resId).objAddr()->unlockPending == 1);
// Convert lock pending unlock.
- ASSERT_EQ(LOCK_OK, locker.lock(resId, MODE_X));
+ locker.lock(resId, MODE_X);
ASSERT(locker.numResourcesToUnlockAtEndUnitOfWorkForTest() == 1);
ASSERT(locker.getRequestsForTest().find(resId).objAddr()->unlockPending == 1);
diff --git a/src/mongo/db/concurrency/lock_stats_test.cpp b/src/mongo/db/concurrency/lock_stats_test.cpp
index 32fc9bdb570..5876d9aaa65 100644
--- a/src/mongo/db/concurrency/lock_stats_test.cpp
+++ b/src/mongo/db/concurrency/lock_stats_test.cpp
@@ -67,8 +67,10 @@ TEST(LockStats, Wait) {
ASSERT_EQUALS(LOCK_WAITING, lockerConflict.lockBegin(nullptr, resId, MODE_S));
// Sleep 1 millisecond so the wait time passes
- ASSERT_EQUALS(LOCK_TIMEOUT,
- lockerConflict.lockComplete(resId, MODE_S, Date_t::now() + Milliseconds(5)));
+ ASSERT_THROWS_CODE(
+ lockerConflict.lockComplete(resId, MODE_S, Date_t::now() + Milliseconds(5)),
+ AssertionException,
+ ErrorCodes::LockTimeout);
}
// Make sure that the waits/blocks are non-zero
@@ -111,8 +113,9 @@ TEST(LockStats, Subtraction) {
{
LockerForTests lockerConflict(MODE_IX);
- ASSERT_EQUALS(LOCK_TIMEOUT,
- lockerConflict.lock(resId, MODE_S, Date_t::now() + Milliseconds(5)));
+ ASSERT_THROWS_CODE(lockerConflict.lock(resId, MODE_S, Date_t::now() + Milliseconds(5)),
+ AssertionException,
+ ErrorCodes::LockTimeout);
}
SingleThreadedLockStats stats;
@@ -123,8 +126,9 @@ TEST(LockStats, Subtraction) {
{
LockerForTests lockerConflict(MODE_IX);
- ASSERT_EQUALS(LOCK_TIMEOUT,
- lockerConflict.lock(resId, MODE_S, Date_t::now() + Milliseconds(5)));
+ ASSERT_THROWS_CODE(lockerConflict.lock(resId, MODE_S, Date_t::now() + Milliseconds(5)),
+ AssertionException,
+ ErrorCodes::LockTimeout);
}
SingleThreadedLockStats stats2;
diff --git a/src/mongo/db/concurrency/locker.h b/src/mongo/db/concurrency/locker.h
index 66bda894552..3c5653f39bc 100644
--- a/src/mongo/db/concurrency/locker.h
+++ b/src/mongo/db/concurrency/locker.h
@@ -152,19 +152,23 @@ public:
* @param mode Mode in which the global lock should be acquired. Also indicates the intent
* of the operation.
*
- * @return LOCK_OK, if the global lock was acquired within the specified time bound. Otherwise,
- * the failure code and no lock will be acquired.
+ * It may throw an exception if it is interrupted.
*/
- virtual LockResult lockGlobal(OperationContext* opCtx, LockMode mode) = 0;
- virtual LockResult lockGlobal(LockMode mode) = 0;
+ virtual void lockGlobal(OperationContext* opCtx, LockMode mode) = 0;
+ virtual void lockGlobal(LockMode mode) = 0;
/**
* Requests the global lock to be acquired in the specified mode.
*
* See the comments for lockBegin/Complete for more information on the semantics. The deadline
* indicates the absolute time point when this lock acquisition will time out, if not yet
- * granted. The lockGlobalBegin method has a deadline for use with the TicketHolder, if there
- * is one.
+ * granted. The lockGlobalBegin method has a deadline for use with the TicketHolder, if there is
+ * one.
+ *
+ * Returns LOCK_OK if the global lock is successfully acquired,
+ * or LOCK_WAITING if the global lock is currently held by someone else.
+ *
+ * The ticket acquisition phase can be interrupted or time out, thus throwing an exception.
*/
virtual LockResult lockGlobalBegin(OperationContext* opCtx, LockMode mode, Date_t deadline) = 0;
virtual LockResult lockGlobalBegin(LockMode mode, Date_t deadline) = 0;
@@ -172,9 +176,11 @@ public:
/**
* Calling lockGlobalComplete without an OperationContext does not allow the lock acquisition
* to be interrupted.
+ *
+ * It may throw an exception if it is interrupted.
*/
- virtual LockResult lockGlobalComplete(OperationContext* opCtx, Date_t deadline) = 0;
- virtual LockResult lockGlobalComplete(Date_t deadline) = 0;
+ virtual void lockGlobalComplete(OperationContext* opCtx, Date_t deadline) = 0;
+ virtual void lockGlobalComplete(Date_t deadline) = 0;
/**
* Decrements the reference count on the global lock. If the reference count on the
@@ -200,8 +206,10 @@ public:
/**
* Waits for the completion of acquiring the RSTL in mode X. This should only be called inside
* ReplicationStateTransitionLockGuard.
+ *
+ * It may throw an exception if it is interrupted.
*/
- virtual LockResult lockRSTLComplete(OperationContext* opCtx, Date_t deadline) = 0;
+ virtual void lockRSTLComplete(OperationContext* opCtx, Date_t deadline) = 0;
/**
* Unlocks the RSTL when the transaction becomes prepared. This is used to bypass two-phase
@@ -235,28 +243,29 @@ public:
* corresponding call to unlock.
*
* If setLockTimeoutMillis has been called, then a lock request with a Date_t::max() deadline
- * may throw a LockTimeout error. See setMaxLockTimeout() above for details.
+ * may throw a LockTimeout exception. See setMaxLockTimeout() above for details.
*
* @param opCtx If provided, will be used to interrupt a LOCK_WAITING state.
* @param resId Id of the resource to be locked.
* @param mode Mode in which the resource should be locked. Lock upgrades are allowed.
- * @param deadline How long to wait for the lock to be granted, before
- * returning LOCK_TIMEOUT. This parameter defaults to an infinite deadline.
- * If Milliseconds(0) is passed, the request will return immediately, if
- * the request could not be granted right away.
+ * @param deadline How long to wait for the lock to be granted.
+ * This parameter defaults to an infinite deadline.
+ * If Milliseconds(0) is passed, the function will return immediately if the
+ * request could be granted right away, or throws a LockTimeout exception
+ * otherwise.
*
- * @return All LockResults except for LOCK_WAITING, because it blocks.
+ * It may throw an exception if it is interrupted.
*/
- virtual LockResult lock(OperationContext* opCtx,
- ResourceId resId,
- LockMode mode,
- Date_t deadline = Date_t::max()) = 0;
+ virtual void lock(OperationContext* opCtx,
+ ResourceId resId,
+ LockMode mode,
+ Date_t deadline = Date_t::max()) = 0;
/**
* Calling lock without an OperationContext does not allow LOCK_WAITING states to be
* interrupted.
*/
- virtual LockResult lock(ResourceId resId, LockMode mode, Date_t deadline = Date_t::max()) = 0;
+ virtual void lock(ResourceId resId, LockMode mode, Date_t deadline = Date_t::max()) = 0;
/**
* Downgrades the specified resource's lock mode without changing the reference count.
@@ -474,9 +483,8 @@ protected:
/**
* The number of callers that are guarding from lock interruptions.
- * When 0, all lock acquisitions are interruptible. When positive, no lock acquisitions
- * are interruptible. This is only true for database and global locks. Collection locks are
- * never interruptible.
+ * When 0, all lock acquisitions are interruptible. When positive, no lock acquisitions are
+ * interruptible or can time out.
*/
int _uninterruptibleLocksRequested = 0;
diff --git a/src/mongo/db/concurrency/locker_noop.h b/src/mongo/db/concurrency/locker_noop.h
index 2ee5f6bc372..bb1dbfe5d86 100644
--- a/src/mongo/db/concurrency/locker_noop.h
+++ b/src/mongo/db/concurrency/locker_noop.h
@@ -82,11 +82,11 @@ public:
MONGO_UNREACHABLE;
}
- virtual LockResult lockGlobal(OperationContext* opCtx, LockMode mode) {
+ virtual void lockGlobal(OperationContext* opCtx, LockMode mode) {
MONGO_UNREACHABLE;
}
- virtual LockResult lockGlobal(LockMode mode) {
+ virtual void lockGlobal(LockMode mode) {
MONGO_UNREACHABLE;
}
@@ -98,11 +98,11 @@ public:
MONGO_UNREACHABLE;
}
- virtual LockResult lockGlobalComplete(OperationContext* opCtx, Date_t deadline) {
+ virtual void lockGlobalComplete(OperationContext* opCtx, Date_t deadline) {
MONGO_UNREACHABLE;
}
- virtual LockResult lockGlobalComplete(Date_t deadline) {
+ virtual void lockGlobalComplete(Date_t deadline) {
MONGO_UNREACHABLE;
}
@@ -122,7 +122,7 @@ public:
MONGO_UNREACHABLE;
}
- virtual LockResult lockRSTLComplete(OperationContext* opCtx, Date_t deadline) {
+ virtual void lockRSTLComplete(OperationContext* opCtx, Date_t deadline) {
MONGO_UNREACHABLE;
}
@@ -130,16 +130,9 @@ public:
MONGO_UNREACHABLE;
}
- virtual LockResult lock(OperationContext* opCtx,
- ResourceId resId,
- LockMode mode,
- Date_t deadline) {
- return LockResult::LOCK_OK;
- }
+ virtual void lock(OperationContext* opCtx, ResourceId resId, LockMode mode, Date_t deadline) {}
- virtual LockResult lock(ResourceId resId, LockMode mode, Date_t deadline) {
- return LockResult::LOCK_OK;
- }
+ virtual void lock(ResourceId resId, LockMode mode, Date_t deadline) {}
virtual void downgrade(ResourceId resId, LockMode newMode) {
MONGO_UNREACHABLE;
diff --git a/src/mongo/db/concurrency/replication_state_transition_lock_guard.cpp b/src/mongo/db/concurrency/replication_state_transition_lock_guard.cpp
index 8308bd3bc85..290018f7f8e 100644
--- a/src/mongo/db/concurrency/replication_state_transition_lock_guard.cpp
+++ b/src/mongo/db/concurrency/replication_state_transition_lock_guard.cpp
@@ -64,11 +64,10 @@ void ReplicationStateTransitionLockGuard::waitForLockUntil(mongo::Date_t deadlin
return;
}
+ _result = LOCK_INVALID;
// Wait for the completion of the lock request for the RSTL in mode X.
- _result = _opCtx->lockState()->lockRSTLComplete(_opCtx, deadline);
- uassert(ErrorCodes::ExceededTimeLimit,
- "Could not acquire the RSTL before the deadline",
- _opCtx->lockState()->isRSTLExclusive());
+ _opCtx->lockState()->lockRSTLComplete(_opCtx, deadline);
+ _result = LOCK_OK;
}
void ReplicationStateTransitionLockGuard::release() {
diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp
index 75d895a9ae6..7e340097b23 100644
--- a/src/mongo/db/db.cpp
+++ b/src/mongo/db/db.cpp
@@ -981,11 +981,9 @@ void shutdownTask() {
LockerImpl* globalLocker = new LockerImpl();
LockResult result = globalLocker->lockGlobalBegin(MODE_X, Date_t::max());
if (result == LOCK_WAITING) {
- result = globalLocker->lockGlobalComplete(Date_t::max());
+ globalLocker->lockGlobalComplete(Date_t::max());
}
- invariant(LOCK_OK == result);
-
// Global storage engine may not be started in all cases before we exit
if (serviceContext->getStorageEngine()) {
shutdownGlobalStorageEngineCleanly(serviceContext);
diff --git a/src/mongo/db/repl/replication_coordinator_impl_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
index ba1a922fb4a..8ccb4e7747d 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
@@ -1934,7 +1934,7 @@ TEST_F(StepDownTest,
ASSERT_THROWS_CODE(
getReplCoord()->stepDown(opCtx.get(), false, Milliseconds(0), Milliseconds(1000)),
AssertionException,
- ErrorCodes::ExceededTimeLimit);
+ ErrorCodes::LockTimeout);
ASSERT_TRUE(getReplCoord()->getMemberState().primary());
ASSERT_TRUE(locker->isRSTLExclusive());
diff --git a/src/mongo/db/transaction_participant_test.cpp b/src/mongo/db/transaction_participant_test.cpp
index 75e6ddafbad..adf777840fa 100644
--- a/src/mongo/db/transaction_participant_test.cpp
+++ b/src/mongo/db/transaction_participant_test.cpp
@@ -711,9 +711,7 @@ TEST_F(TxnParticipantTest, StepDownAfterPrepareDoesNotBlock) {
// Test that we can acquire the RSTL in mode X, and then immediately release it so the test can
// complete successfully.
auto func = [&](OperationContext* opCtx) {
- ASSERT_EQ(
- LOCK_OK,
- opCtx->lockState()->lock(opCtx, resourceIdReplicationStateTransitionLock, MODE_X));
+ opCtx->lockState()->lock(opCtx, resourceIdReplicationStateTransitionLock, MODE_X);
opCtx->lockState()->unlock(resourceIdReplicationStateTransitionLock);
};
runFunctionFromDifferentOpCtx(func);
@@ -734,9 +732,7 @@ TEST_F(TxnParticipantTest, StepDownAfterPrepareDoesNotBlockThenCommit) {
// Test that we can acquire the RSTL in mode X, and then immediately release it so the test can
// complete successfully.
auto func = [&](OperationContext* opCtx) {
- ASSERT_EQ(
- LOCK_OK,
- opCtx->lockState()->lock(opCtx, resourceIdReplicationStateTransitionLock, MODE_X));
+ opCtx->lockState()->lock(opCtx, resourceIdReplicationStateTransitionLock, MODE_X);
opCtx->lockState()->unlock(resourceIdReplicationStateTransitionLock);
};
runFunctionFromDifferentOpCtx(func);