summaryrefslogtreecommitdiff
path: root/src/mongo
diff options
context:
space:
mode:
authorSuganthi Mani <suganthi.mani@mongodb.com>2019-09-05 02:03:45 +0000
committerevergreen <evergreen@mongodb.com>2019-09-05 02:03:45 +0000
commit78bd6e0f1860a7c9a7ea8aa0f5de5cfb7c40465f (patch)
treee47020f7c7b4500eb377a0057bb8b88f5e6e77bf /src/mongo
parente8e3f6ff2604e33e2d7fe1ec56d6559df066a452 (diff)
downloadmongo-78bd6e0f1860a7c9a7ea8aa0f5de5cfb7c40465f.tar.gz
SERVER-38163 Remove enqueue abilities from GlobalLock.
Diffstat (limited to 'src/mongo')
-rw-r--r--src/mongo/db/concurrency/d_concurrency.cpp55
-rw-r--r--src/mongo/db/concurrency/d_concurrency.h20
-rw-r--r--src/mongo/db/concurrency/d_concurrency_test.cpp134
-rw-r--r--src/mongo/db/concurrency/lock_state.cpp48
-rw-r--r--src/mongo/db/concurrency/lock_state.h105
-rw-r--r--src/mongo/db/concurrency/lock_state_test.cpp6
-rw-r--r--src/mongo/db/concurrency/lock_stats_test.cpp4
-rw-r--r--src/mongo/db/concurrency/locker.h38
-rw-r--r--src/mongo/db/concurrency/locker_noop.h20
-rw-r--r--src/mongo/db/db.cpp9
10 files changed, 149 insertions, 290 deletions
diff --git a/src/mongo/db/concurrency/d_concurrency.cpp b/src/mongo/db/concurrency/d_concurrency.cpp
index d574e6c491c..5daed970104 100644
--- a/src/mongo/db/concurrency/d_concurrency.cpp
+++ b/src/mongo/db/concurrency/d_concurrency.cpp
@@ -141,34 +141,11 @@ Lock::GlobalLock::GlobalLock(OperationContext* opCtx,
LockMode lockMode,
Date_t deadline,
InterruptBehavior behavior)
- : GlobalLock(opCtx, lockMode, deadline, behavior, EnqueueOnly()) {
- waitForLockUntil(deadline);
-}
-
-Lock::GlobalLock::GlobalLock(OperationContext* opCtx,
- LockMode lockMode,
- Date_t deadline,
- InterruptBehavior behavior,
- EnqueueOnly enqueueOnly)
: _opCtx(opCtx),
_result(LOCK_INVALID),
_pbwm(opCtx->lockState(), resourceIdParallelBatchWriterMode),
_interruptBehavior(behavior),
_isOutermostLock(!opCtx->lockState()->isLocked()) {
- _enqueue(lockMode, deadline);
-}
-
-Lock::GlobalLock::GlobalLock(GlobalLock&& otherLock)
- : _opCtx(otherLock._opCtx),
- _result(otherLock._result),
- _pbwm(std::move(otherLock._pbwm)),
- _interruptBehavior(otherLock._interruptBehavior),
- _isOutermostLock(otherLock._isOutermostLock) {
- // Mark as moved so the destructor doesn't invalidate the newly-constructed lock.
- otherLock._result = LOCK_INVALID;
-}
-
-void Lock::GlobalLock::_enqueue(LockMode lockMode, Date_t deadline) {
_opCtx->lockState()->getFlowControlTicket(_opCtx, lockMode);
try {
@@ -188,7 +165,8 @@ void Lock::GlobalLock::_enqueue(LockMode lockMode, Date_t deadline) {
[this] { _opCtx->lockState()->unlock(resourceIdReplicationStateTransitionLock); });
_result = LOCK_INVALID;
- _result = _opCtx->lockState()->lockGlobalBegin(_opCtx, lockMode, deadline);
+ _opCtx->lockState()->lockGlobal(_opCtx, lockMode, deadline);
+ _result = LOCK_OK;
unlockRSTL.dismiss();
unlockPBWM.dismiss();
@@ -197,27 +175,18 @@ void Lock::GlobalLock::_enqueue(LockMode lockMode, Date_t deadline) {
if (_interruptBehavior == InterruptBehavior::kThrow)
throw;
}
+ auto acquiredLockMode = _opCtx->lockState()->getLockMode(resourceIdGlobal);
+ _opCtx->lockState()->setGlobalLockTakenInMode(acquiredLockMode);
}
-void Lock::GlobalLock::waitForLockUntil(Date_t deadline) {
- try {
- if (_result == LOCK_WAITING) {
- _result = LOCK_INVALID;
- _opCtx->lockState()->lockGlobalComplete(_opCtx, deadline);
- _result = LOCK_OK;
- }
- } catch (const ExceptionForCat<ErrorCategory::Interruption>&) {
- _opCtx->lockState()->unlock(resourceIdReplicationStateTransitionLock);
- if (_opCtx->lockState()->shouldConflictWithSecondaryBatchApplication()) {
- _pbwm.unlock();
- }
- // The kLeaveUnlocked behavior suppresses this exception.
- if (_interruptBehavior == InterruptBehavior::kThrow)
- throw;
- }
-
- auto lockMode = _opCtx->lockState()->getLockMode(resourceIdGlobal);
- _opCtx->lockState()->setGlobalLockTakenInMode(lockMode);
+Lock::GlobalLock::GlobalLock(GlobalLock&& otherLock)
+ : _opCtx(otherLock._opCtx),
+ _result(otherLock._result),
+ _pbwm(std::move(otherLock._pbwm)),
+ _interruptBehavior(otherLock._interruptBehavior),
+ _isOutermostLock(otherLock._isOutermostLock) {
+ // Mark as moved so the destructor doesn't invalidate the newly-constructed lock.
+ otherLock._result = LOCK_INVALID;
}
void Lock::GlobalLock::_unlock() {
diff --git a/src/mongo/db/concurrency/d_concurrency.h b/src/mongo/db/concurrency/d_concurrency.h
index be71ff93f02..4b8be82dc60 100644
--- a/src/mongo/db/concurrency/d_concurrency.h
+++ b/src/mongo/db/concurrency/d_concurrency.h
@@ -188,8 +188,6 @@ public:
*/
class GlobalLock {
public:
- class EnqueueOnly {};
-
/**
* A GlobalLock without a deadline defaults to Date_t::max() and an InterruptBehavior of
* kThrow.
@@ -207,18 +205,6 @@ public:
GlobalLock(GlobalLock&&);
- /**
- * Enqueues lock but does not block on lock acquisition.
- * Call waitForLockUntil() to complete locking process.
- *
- * Does not set Locker::setGlobalLockTakenInMode(). Call waitForLockUntil to do so.
- */
- GlobalLock(OperationContext* opCtx,
- LockMode lockMode,
- Date_t deadline,
- InterruptBehavior behavior,
- EnqueueOnly enqueueOnly);
-
~GlobalLock() {
// Preserve the original lock result which will be overridden by unlock().
auto lockResult = _result;
@@ -238,17 +224,11 @@ public:
}
}
- /**
- * Waits for lock to be granted. Sets Locker::setGlobalLockTakenInMode().
- */
- void waitForLockUntil(Date_t deadline);
-
bool isLocked() const {
return _result == LOCK_OK;
}
private:
- void _enqueue(LockMode lockMode, Date_t deadline);
void _unlock();
OperationContext* const _opCtx;
diff --git a/src/mongo/db/concurrency/d_concurrency_test.cpp b/src/mongo/db/concurrency/d_concurrency_test.cpp
index a8d93e8e0ee..ebe288562c9 100644
--- a/src/mongo/db/concurrency/d_concurrency_test.cpp
+++ b/src/mongo/db/concurrency/d_concurrency_test.cpp
@@ -850,7 +850,7 @@ TEST_F(DConcurrencyTestFixture,
result.get();
}
-TEST_F(DConcurrencyTestFixture, GlobalLockEnqueueOnlyNotInterruptedWithLeaveUnlockedBehavior) {
+TEST_F(DConcurrencyTestFixture, GlobalLockNotInterruptedWithLeaveUnlockedBehavior) {
auto clients = makeKClientsWithLockers(2);
auto opCtx1 = clients[0].second.get();
@@ -860,40 +860,10 @@ TEST_F(DConcurrencyTestFixture, GlobalLockEnqueueOnlyNotInterruptedWithLeaveUnlo
opCtx1->markKilled();
}
// This should not throw or acquire the lock.
- Lock::GlobalLock g1(opCtx1,
- MODE_S,
- Date_t::max(),
- Lock::InterruptBehavior::kLeaveUnlocked,
- Lock::GlobalLock::EnqueueOnly());
+ Lock::GlobalLock g1(opCtx1, MODE_S, Date_t::max(), Lock::InterruptBehavior::kLeaveUnlocked);
ASSERT(!g1.isLocked());
}
-TEST_F(DConcurrencyTestFixture, GlobalLockWaitForLockUntilNotInterruptedWithLeaveUnlockedBehavior) {
- auto clients = makeKClientsWithLockers(2);
- auto opCtx1 = clients[0].second.get();
- auto opCtx2 = clients[1].second.get();
-
- // The main thread takes an exclusive lock, causing the spawned thread to wait when it attempts
- // to acquire a conflicting lock.
- Lock::GlobalLock g1(opCtx1, MODE_X);
- // Enqueue now so waitForLockUntil can be interrupted.
- Lock::GlobalLock g2(opCtx2,
- MODE_S,
- Date_t::max(),
- Lock::InterruptBehavior::kLeaveUnlocked,
- Lock::GlobalLock::EnqueueOnly());
-
- ASSERT(g1.isLocked());
- ASSERT(!g2.isLocked());
-
- // Killing the lock wait should not interrupt it, but rather leave it lock unlocked.
- auto result = runTaskAndKill(opCtx2, [&]() { g2.waitForLockUntil(Date_t::max()); });
-
- ASSERT(!g2.isLocked());
- // Should not throw an exception.
- result.get();
-}
-
TEST_F(DConcurrencyTestFixture, SetMaxLockTimeoutMillisAndDoNotUsingWithInterruptBehavior) {
auto clients = makeKClientsWithLockers(2);
auto opCtx1 = clients[0].second.get();
@@ -1075,16 +1045,16 @@ TEST_F(DConcurrencyTestFixture, LockCompleteInterruptedWhenUncontested) {
auto opCtx1 = clientOpctxPairs[0].second.get();
auto opCtx2 = clientOpctxPairs[1].second.get();
- boost::optional<Lock::GlobalLock> globalWrite;
- globalWrite.emplace(opCtx1, MODE_IX);
- ASSERT(globalWrite->isLocked());
+ boost::optional<repl::ReplicationStateTransitionLockGuard> lockXGranted;
+ lockXGranted.emplace(opCtx1, MODE_IX, repl::ReplicationStateTransitionLockGuard::EnqueueOnly());
+ ASSERT(lockXGranted->isLocked());
// Attempt to take a conflicting lock, which will fail.
- LockResult result = opCtx2->lockState()->lockGlobalBegin(opCtx2, MODE_X, Date_t::max());
+ LockResult result = opCtx2->lockState()->lockRSTLBegin(opCtx2, MODE_X);
ASSERT_EQ(result, LOCK_WAITING);
// Release the conflicting lock.
- globalWrite.reset();
+ lockXGranted.reset();
{
stdx::lock_guard<Client> clientLock(*opCtx2->getClient());
@@ -1093,7 +1063,7 @@ TEST_F(DConcurrencyTestFixture, LockCompleteInterruptedWhenUncontested) {
// After the operation has been killed, the lockComplete request should fail, even though the
// lock is uncontested.
- ASSERT_THROWS_CODE(opCtx2->lockState()->lockGlobalComplete(opCtx2, Date_t::max()),
+ ASSERT_THROWS_CODE(opCtx2->lockState()->lockRSTLComplete(opCtx2, MODE_X, Date_t::max()),
AssertionException,
ErrorCodes::Interrupted);
}
@@ -1841,23 +1811,25 @@ TEST_F(DConcurrencyTestFixture, CollectionLockTimeout) {
}
TEST_F(DConcurrencyTestFixture, CompatibleFirstWithSXIS) {
+ // Currently, we are allowed to acquire IX and X lock modes for RSTL. To overcome it,
+ // this fail point will allow the test to acquire RSTL in any lock modes.
+ FailPointEnableBlock enableTestOnlyFlag("enableTestOnlyFlagforRSTL");
+
auto clientOpctxPairs = makeKClientsWithLockers(3);
auto opctx1 = clientOpctxPairs[0].second.get();
auto opctx2 = clientOpctxPairs[1].second.get();
auto opctx3 = clientOpctxPairs[2].second.get();
// Build a queue of MODE_S <- MODE_X <- MODE_IS, with MODE_S granted.
- Lock::GlobalRead lockS(opctx1);
+ repl::ReplicationStateTransitionLockGuard lockS(opctx1, MODE_S);
ASSERT(lockS.isLocked());
- Lock::GlobalLock lockX(opctx2,
- MODE_X,
- Date_t::max(),
- Lock::InterruptBehavior::kThrow,
- Lock::GlobalLock::EnqueueOnly());
+
+ repl::ReplicationStateTransitionLockGuard lockX(
+ opctx2, MODE_X, repl::ReplicationStateTransitionLockGuard::EnqueueOnly());
ASSERT(!lockX.isLocked());
// A MODE_IS should be granted due to compatibleFirst policy.
- Lock::GlobalLock lockIS(opctx3, MODE_IS, Date_t::now(), Lock::InterruptBehavior::kThrow);
+ repl::ReplicationStateTransitionLockGuard lockIS(opctx3, MODE_IS);
ASSERT(lockIS.isLocked());
ASSERT_THROWS_CODE(
@@ -1867,6 +1839,10 @@ TEST_F(DConcurrencyTestFixture, CompatibleFirstWithSXIS) {
TEST_F(DConcurrencyTestFixture, CompatibleFirstWithXSIXIS) {
+ // Currently, we are allowed to acquire IX and X lock modes for RSTL. To overcome it,
+ // this fail point will allow the test to acquire RSTL in any lock modes.
+ FailPointEnableBlock enableTestOnlyFlag("enableTestOnlyFlagforRSTL");
+
auto clientOpctxPairs = makeKClientsWithLockers(4);
auto opctx1 = clientOpctxPairs[0].second.get();
auto opctx2 = clientOpctxPairs[1].second.get();
@@ -1874,27 +1850,17 @@ TEST_F(DConcurrencyTestFixture, CompatibleFirstWithXSIXIS) {
auto opctx4 = clientOpctxPairs[3].second.get();
// Build a queue of MODE_X <- MODE_S <- MODE_IX <- MODE_IS, with MODE_X granted.
- boost::optional<Lock::GlobalWrite> lockX;
- lockX.emplace(opctx1);
+ boost::optional<repl::ReplicationStateTransitionLockGuard> lockX;
+ lockX.emplace(opctx1, MODE_X);
ASSERT(lockX->isLocked());
- boost::optional<Lock::GlobalLock> lockS;
- lockS.emplace(opctx2,
- MODE_S,
- Date_t::max(),
- Lock::InterruptBehavior::kThrow,
- Lock::GlobalLock::EnqueueOnly());
+ boost::optional<repl::ReplicationStateTransitionLockGuard> lockS;
+ lockS.emplace(opctx2, MODE_S, repl::ReplicationStateTransitionLockGuard::EnqueueOnly());
ASSERT(!lockS->isLocked());
- Lock::GlobalLock lockIX(opctx3,
- MODE_IX,
- Date_t::max(),
- Lock::InterruptBehavior::kThrow,
- Lock::GlobalLock::EnqueueOnly());
+ repl::ReplicationStateTransitionLockGuard lockIX(
+ opctx3, MODE_IX, repl::ReplicationStateTransitionLockGuard::EnqueueOnly());
ASSERT(!lockIX.isLocked());
- Lock::GlobalLock lockIS(opctx4,
- MODE_IS,
- Date_t::max(),
- Lock::InterruptBehavior::kThrow,
- Lock::GlobalLock::EnqueueOnly());
+ repl::ReplicationStateTransitionLockGuard lockIS(
+ opctx4, MODE_IS, repl::ReplicationStateTransitionLockGuard::EnqueueOnly());
ASSERT(!lockIS.isLocked());
@@ -1913,6 +1879,10 @@ TEST_F(DConcurrencyTestFixture, CompatibleFirstWithXSIXIS) {
}
TEST_F(DConcurrencyTestFixture, CompatibleFirstWithXSXIXIS) {
+ // Currently, we are allowed to acquire IX and X lock modes for RSTL. To overcome it,
+ // this fail point will allow the test to acquire RSTL in any lock modes.
+ FailPointEnableBlock enableTestOnlyFlag("enableTestOnlyFlagforRSTL");
+
auto clientOpctxPairs = makeKClientsWithLockers(5);
auto opctx1 = clientOpctxPairs[0].second.get();
auto opctx2 = clientOpctxPairs[1].second.get();
@@ -1922,38 +1892,24 @@ TEST_F(DConcurrencyTestFixture, CompatibleFirstWithXSXIXIS) {
// Build a queue of MODE_X <- MODE_S <- MODE_X <- MODE_IX <- MODE_IS, with the first MODE_X
// granted and check that releasing it will result in the MODE_IS being granted.
- boost::optional<Lock::GlobalWrite> lockXgranted;
- lockXgranted.emplace(opctx1);
+ boost::optional<repl::ReplicationStateTransitionLockGuard> lockXgranted;
+ lockXgranted.emplace(opctx1, MODE_X);
ASSERT(lockXgranted->isLocked());
- boost::optional<Lock::GlobalLock> lockX;
- lockX.emplace(opctx3,
- MODE_X,
- Date_t::max(),
- Lock::InterruptBehavior::kThrow,
- Lock::GlobalLock::EnqueueOnly());
+ boost::optional<repl::ReplicationStateTransitionLockGuard> lockX;
+ lockX.emplace(opctx3, MODE_X, repl::ReplicationStateTransitionLockGuard::EnqueueOnly());
ASSERT(!lockX->isLocked());
// Now request MODE_S: it will be first in the pending list due to EnqueueAtFront policy.
- boost::optional<Lock::GlobalLock> lockS;
- lockS.emplace(opctx2,
- MODE_S,
- Date_t::max(),
- Lock::InterruptBehavior::kThrow,
- Lock::GlobalLock::EnqueueOnly());
+ boost::optional<repl::ReplicationStateTransitionLockGuard> lockS;
+ lockS.emplace(opctx2, MODE_S, repl::ReplicationStateTransitionLockGuard::EnqueueOnly());
ASSERT(!lockS->isLocked());
- Lock::GlobalLock lockIX(opctx4,
- MODE_IX,
- Date_t::max(),
- Lock::InterruptBehavior::kThrow,
- Lock::GlobalLock::EnqueueOnly());
+ repl::ReplicationStateTransitionLockGuard lockIX(
+ opctx4, MODE_IX, repl::ReplicationStateTransitionLockGuard::EnqueueOnly());
ASSERT(!lockIX.isLocked());
- Lock::GlobalLock lockIS(opctx5,
- MODE_IS,
- Date_t::max(),
- Lock::InterruptBehavior::kThrow,
- Lock::GlobalLock::EnqueueOnly());
+ repl::ReplicationStateTransitionLockGuard lockIS(
+ opctx5, MODE_IS, repl::ReplicationStateTransitionLockGuard::EnqueueOnly());
ASSERT(!lockIS.isLocked());
@@ -2039,13 +1995,11 @@ TEST_F(DConcurrencyTestFixture, CompatibleFirstStress) {
lock.emplace(opCtx,
iters % 20 ? MODE_IS : MODE_S,
Date_t::now(),
- Lock::InterruptBehavior::kLeaveUnlocked,
- Lock::GlobalLock::EnqueueOnly());
+ Lock::InterruptBehavior::kLeaveUnlocked);
// If thread 0 is holding the MODE_S lock while we tried to acquire a
// MODE_IS or MODE_S lock, the CompatibleFirst policy guarantees success.
auto newInterval = readOnlyInterval.load();
invariant(!interval || interval != newInterval || lock->isLocked());
- lock->waitForLockUntil(Date_t::now());
break;
}
case 5:
diff --git a/src/mongo/db/concurrency/lock_state.cpp b/src/mongo/db/concurrency/lock_state.cpp
index 06f9a78fee2..3a162b425b0 100644
--- a/src/mongo/db/concurrency/lock_state.cpp
+++ b/src/mongo/db/concurrency/lock_state.cpp
@@ -52,6 +52,7 @@
namespace mongo {
MONGO_FAIL_POINT_DEFINE(failNonIntentLocksIfWaitNeeded);
+MONGO_FAIL_POINT_DEFINE(enableTestOnlyFlagforRSTL);
namespace {
@@ -303,13 +304,6 @@ Locker::ClientState LockerImpl::getClientState() const {
return state;
}
-void LockerImpl::lockGlobal(OperationContext* opCtx, LockMode mode) {
- LockResult result = _lockGlobalBegin(opCtx, mode, Date_t::max());
- if (result == LOCK_WAITING) {
- lockGlobalComplete(opCtx, Date_t::max());
- }
-}
-
void LockerImpl::reacquireTicket(OperationContext* opCtx) {
invariant(_modeForTicket != MODE_NONE);
auto clientState = _clientState.load();
@@ -355,7 +349,7 @@ bool LockerImpl::_acquireTicket(OperationContext* opCtx, LockMode mode, Date_t d
return true;
}
-LockResult LockerImpl::_lockGlobalBegin(OperationContext* opCtx, LockMode mode, Date_t deadline) {
+void LockerImpl::lockGlobal(OperationContext* opCtx, LockMode mode, Date_t deadline) {
dassert(isLocked() == (_modeForTicket != MODE_NONE));
if (_modeForTicket == MODE_NONE) {
if (_uninterruptibleLocksRequested) {
@@ -382,13 +376,13 @@ LockResult LockerImpl::_lockGlobalBegin(OperationContext* opCtx, LockMode mode,
}
}
- const LockResult result = lockBegin(opCtx, resourceIdGlobal, actualLockMode);
- invariant(result == LOCK_OK || result == LOCK_WAITING);
- return result;
-}
+ const LockResult result = _lockBegin(opCtx, resourceIdGlobal, actualLockMode);
+ // Fast, uncontended path
+ if (result == LOCK_OK)
+ return;
-void LockerImpl::lockGlobalComplete(OperationContext* opCtx, Date_t deadline) {
- lockComplete(opCtx, resourceIdGlobal, getLockMode(resourceIdGlobal), deadline);
+ invariant(result == LOCK_WAITING);
+ _lockComplete(opCtx, resourceIdGlobal, actualLockMode, deadline);
}
bool LockerImpl::unlockGlobal() {
@@ -517,14 +511,14 @@ void LockerImpl::lock(OperationContext* opCtx, ResourceId resId, LockMode mode,
// `lockGlobal` must be called to lock `resourceIdGlobal`.
invariant(resId != resourceIdGlobal);
- const LockResult result = lockBegin(opCtx, resId, mode);
+ const LockResult result = _lockBegin(opCtx, resId, mode);
// Fast, uncontended path
if (result == LOCK_OK)
return;
invariant(result == LOCK_WAITING);
- lockComplete(opCtx, resId, mode, deadline);
+ _lockComplete(opCtx, resId, mode, deadline);
}
void LockerImpl::downgrade(ResourceId resId, LockMode newMode) {
@@ -811,7 +805,7 @@ void LockerImpl::restoreLockState(OperationContext* opCtx, const Locker::LockSna
invariant(_modeForTicket != MODE_NONE);
}
-LockResult LockerImpl::lockBegin(OperationContext* opCtx, ResourceId resId, LockMode mode) {
+LockResult LockerImpl::_lockBegin(OperationContext* opCtx, ResourceId resId, LockMode mode) {
dassert(!getWaitingResource().isValid());
LockRequest* request;
@@ -887,10 +881,10 @@ LockResult LockerImpl::lockBegin(OperationContext* opCtx, ResourceId resId, Lock
return result;
}
-void LockerImpl::lockComplete(OperationContext* opCtx,
- ResourceId resId,
- LockMode mode,
- Date_t deadline) {
+void LockerImpl::_lockComplete(OperationContext* opCtx,
+ ResourceId resId,
+ LockMode mode,
+ Date_t deadline) {
// Clean up the state on any failed lock attempts.
auto unlockOnErrorGuard = makeGuard([&] {
@@ -995,12 +989,18 @@ void LockerImpl::getFlowControlTicket(OperationContext* opCtx, LockMode lockMode
}
LockResult LockerImpl::lockRSTLBegin(OperationContext* opCtx, LockMode mode) {
- invariant(mode == MODE_IX || mode == MODE_X);
- return lockBegin(opCtx, resourceIdReplicationStateTransitionLock, mode);
+ bool testOnly = false;
+
+ if (MONGO_FAIL_POINT(enableTestOnlyFlagforRSTL)) {
+ testOnly = true;
+ }
+
+ invariant(testOnly || mode == MODE_IX || mode == MODE_X);
+ return _lockBegin(opCtx, resourceIdReplicationStateTransitionLock, mode);
}
void LockerImpl::lockRSTLComplete(OperationContext* opCtx, LockMode mode, Date_t deadline) {
- lockComplete(opCtx, resourceIdReplicationStateTransitionLock, mode, deadline);
+ _lockComplete(opCtx, resourceIdReplicationStateTransitionLock, mode, deadline);
}
void LockerImpl::releaseTicket() {
diff --git a/src/mongo/db/concurrency/lock_state.h b/src/mongo/db/concurrency/lock_state.h
index 7a592d1ac11..9371ba0ae13 100644
--- a/src/mongo/db/concurrency/lock_state.h
+++ b/src/mongo/db/concurrency/lock_state.h
@@ -129,19 +129,14 @@ public:
_maxLockTimeout = boost::none;
}
- virtual void lockGlobal(OperationContext* opCtx, LockMode mode);
- virtual void lockGlobal(LockMode mode) {
- return lockGlobal(nullptr, mode);
- }
- virtual LockResult lockGlobalBegin(OperationContext* opCtx, LockMode mode, Date_t deadline) {
- return _lockGlobalBegin(opCtx, mode, deadline);
- }
- virtual LockResult lockGlobalBegin(LockMode mode, Date_t deadline) {
- return _lockGlobalBegin(nullptr, mode, deadline);
- }
- virtual void lockGlobalComplete(OperationContext* opCtx, Date_t deadline);
- virtual void lockGlobalComplete(Date_t deadline) {
- lockGlobalComplete(nullptr, deadline);
+ /**
+ * Acquires the ticket within the deadline (or _maxLockTimeout) and tries to grab the lock.
+ */
+ virtual void lockGlobal(OperationContext* opCtx,
+ LockMode mode,
+ Date_t deadline = Date_t::max());
+ virtual void lockGlobal(LockMode mode, Date_t deadline = Date_t::max()) {
+ return lockGlobal(nullptr, mode, deadline);
}
virtual bool unlockGlobal();
@@ -214,79 +209,81 @@ public:
virtual void releaseTicket();
virtual void reacquireTicket(OperationContext* opCtx);
+ void getFlowControlTicket(OperationContext* opCtx, LockMode lockMode) override;
+
+ FlowControlTicketholder::CurOp getFlowControlStats() const override {
+ return _flowControlStats;
+ }
+
+ //
+ // Below functions are for testing only.
+ //
+
+ FastMapNoAlloc<ResourceId, LockRequest> getRequestsForTest() const {
+ scoped_spinlock scopedLock(_lock);
+ return _requests;
+ }
+
+ LockResult lockBeginForTest(OperationContext* opCtx, ResourceId resId, LockMode mode) {
+ return _lockBegin(opCtx, resId, mode);
+ }
+
+ void lockCompleteForTest(OperationContext* opCtx,
+ ResourceId resId,
+ LockMode mode,
+ Date_t deadline) {
+ _lockComplete(opCtx, resId, mode, deadline);
+ }
+
+private:
+ typedef FastMapNoAlloc<ResourceId, LockRequest> LockRequestsMap;
+
/**
* Allows for lock requests to be requested in a non-blocking way. There can be only one
* outstanding pending lock request per locker object.
*
- * lockBegin posts a request to the lock manager for the specified lock to be acquired,
+ * _lockBegin posts a request to the lock manager for the specified lock to be acquired,
* which either immediately grants the lock, or puts the requestor on the conflict queue
* and returns immediately with the result of the acquisition. The result can be one of:
*
* LOCK_OK - Nothing more needs to be done. The lock is granted.
* LOCK_WAITING - The request has been queued up and will be granted as soon as the lock
- * is free. If this result is returned, typically lockComplete needs to be called in
+ * is free. If this result is returned, typically _lockComplete needs to be called in
* order to wait for the actual grant to occur. If the caller no longer needs to wait
* for the grant to happen, unlock needs to be called with the same resource passed
- * to lockBegin.
+ * to _lockBegin.
*
- * In other words for each call to lockBegin, which does not return LOCK_OK, there needs to
- * be a corresponding call to either lockComplete or unlock.
+ * In other words for each call to _lockBegin, which does not return LOCK_OK, there needs to
+ * be a corresponding call to either _lockComplete or unlock.
*
- * If an operation context is provided that represents an interrupted operation, lockBegin will
+ * If an operation context is provided that represents an interrupted operation, _lockBegin will
* throw an exception whenever it would have been possible to grant the lock with LOCK_OK. This
* behavior can be disabled with an UninterruptibleLockGuard.
*
* NOTE: These methods are not public and should only be used inside the class
* implementation and for unit-tests and not called directly.
*/
- LockResult lockBegin(OperationContext* opCtx, ResourceId resId, LockMode mode);
+ LockResult _lockBegin(OperationContext* opCtx, ResourceId resId, LockMode mode);
/**
- * Waits for the completion of a lock, previously requested through lockBegin or
- * lockGlobalBegin. Must only be called, if lockBegin returned LOCK_WAITING.
+ * Waits for the completion of a lock, previously requested through _lockBegin/
+ * Must only be called, if _lockBegin returned LOCK_WAITING.
*
* @param opCtx Operation context that, if not null, will be used to allow interruptible lock
* acquisition.
- * @param resId Resource id which was passed to an earlier lockBegin call. Must match.
- * @param mode Mode which was passed to an earlier lockBegin call. Must match.
+ * @param resId Resource id which was passed to an earlier _lockBegin call. Must match.
+ * @param mode Mode which was passed to an earlier _lockBegin call. Must match.
* @param deadline The absolute time point when this lock acquisition will time out, if not yet
* granted.
*
* Throws an exception if it is interrupted.
*/
- void lockComplete(OperationContext* opCtx, ResourceId resId, LockMode mode, Date_t deadline);
-
- void lockComplete(ResourceId resId, LockMode mode, Date_t deadline) {
- lockComplete(nullptr, resId, mode, deadline);
- }
-
- void getFlowControlTicket(OperationContext* opCtx, LockMode lockMode) override;
+ void _lockComplete(OperationContext* opCtx, ResourceId resId, LockMode mode, Date_t deadline);
- FlowControlTicketholder::CurOp getFlowControlStats() const override {
- return _flowControlStats;
- }
-
- /**
- * This function is for unit testing only.
- */
- FastMapNoAlloc<ResourceId, LockRequest> getRequestsForTest() const {
- scoped_spinlock scopedLock(_lock);
- return _requests;
+ void _lockComplete(ResourceId resId, LockMode mode, Date_t deadline) {
+ _lockComplete(nullptr, resId, mode, deadline);
}
-private:
- typedef FastMapNoAlloc<ResourceId, LockRequest> LockRequestsMap;
-
- /**
- * Acquires the ticket within the deadline (or _maxLockTimeout) and tries to grab the lock.
- *
- * Returns LOCK_OK if successfully acquired the global lock,
- * or LOCK_WAITING if the global lock is currently held by someone else.
- *
- * The ticket acquisition can be interrupted (by killOp/timeout), thus throwing an exception.
- */
- LockResult _lockGlobalBegin(OperationContext* opCtx, LockMode, Date_t deadline);
-
/**
* The main functionality of the unlock method, except accepts iterator in order to avoid
* additional lookups during unlockGlobal. Frees locks immediately, so must not be called from
diff --git a/src/mongo/db/concurrency/lock_state_test.cpp b/src/mongo/db/concurrency/lock_state_test.cpp
index 4c2d841d28d..f32068aa74b 100644
--- a/src/mongo/db/concurrency/lock_state_test.cpp
+++ b/src/mongo/db/concurrency/lock_state_test.cpp
@@ -992,7 +992,8 @@ TEST_F(LockerImplTest, GetLockerInfoShouldReportPendingLocks) {
LockerImpl conflictingLocker;
conflictingLocker.lockGlobal(MODE_IS);
conflictingLocker.lock(dbId, MODE_IS);
- ASSERT_EQ(LOCK_WAITING, conflictingLocker.lockBegin(nullptr, collectionId, MODE_IS));
+ ASSERT_EQ(LOCK_WAITING,
+ conflictingLocker.lockBeginForTest(nullptr /* opCtx */, collectionId, MODE_IS));
// Assert the held locks show up in the output of getLockerInfo().
Locker::LockerInfo lockerInfo;
@@ -1010,7 +1011,8 @@ TEST_F(LockerImplTest, GetLockerInfoShouldReportPendingLocks) {
ASSERT(successfulLocker.unlock(dbId));
ASSERT(successfulLocker.unlockGlobal());
- conflictingLocker.lockComplete(collectionId, MODE_IS, Date_t::now());
+ conflictingLocker.lockCompleteForTest(
+ nullptr /* opCtx */, collectionId, MODE_IS, Date_t::now());
conflictingLocker.getLockerInfo(&lockerInfo, boost::none);
ASSERT_FALSE(lockerInfo.waitingResource.isValid());
diff --git a/src/mongo/db/concurrency/lock_stats_test.cpp b/src/mongo/db/concurrency/lock_stats_test.cpp
index a83758904dc..fc10fe33745 100644
--- a/src/mongo/db/concurrency/lock_stats_test.cpp
+++ b/src/mongo/db/concurrency/lock_stats_test.cpp
@@ -69,10 +69,10 @@ TEST_F(LockStatsTest, Wait) {
{
// This will block
LockerForTests lockerConflict(MODE_IX);
- ASSERT_EQUALS(LOCK_WAITING, lockerConflict.lockBegin(opCtx.get(), resId, MODE_S));
+ ASSERT_EQUALS(LOCK_WAITING, lockerConflict.lockBeginForTest(opCtx.get(), resId, MODE_S));
// Sleep 1 millisecond so the wait time passes
- ASSERT_THROWS_CODE(lockerConflict.lockComplete(
+ ASSERT_THROWS_CODE(lockerConflict.lockCompleteForTest(
opCtx.get(), resId, MODE_S, Date_t::now() + Milliseconds(5)),
AssertionException,
ErrorCodes::LockTimeout);
diff --git a/src/mongo/db/concurrency/locker.h b/src/mongo/db/concurrency/locker.h
index 746b65fa40d..48674103bd5 100644
--- a/src/mongo/db/concurrency/locker.h
+++ b/src/mongo/db/concurrency/locker.h
@@ -153,36 +153,16 @@ public:
* @param opCtx OperationContext used to interrupt the lock waiting, if provided.
* @param mode Mode in which the global lock should be acquired. Also indicates the intent
* of the operation.
+ * @param deadline indicates the absolute time point when this lock acquisition will time out,
+ * if not yet granted. Deadline will be also used for TicketHolder, if there is one.
*
- * It may throw an exception if it is interrupted.
- */
- virtual void lockGlobal(OperationContext* opCtx, LockMode mode) = 0;
- virtual void lockGlobal(LockMode mode) = 0;
-
- /**
- * Requests the global lock to be acquired in the specified mode.
- *
- * See the comments for lockBegin/Complete for more information on the semantics. The deadline
- * indicates the absolute time point when this lock acquisition will time out, if not yet
- * granted. The lockGlobalBegin method has a deadline for use with the TicketHolder, if there is
- * one.
- *
- * Returns LOCK_OK if the global lock is successfully acquired,
- * or LOCK_WAITING if the global lock is currently held by someone else.
- *
- * The ticket acquisition phase can be interrupted or time out, thus throwing an exception.
- */
- virtual LockResult lockGlobalBegin(OperationContext* opCtx, LockMode mode, Date_t deadline) = 0;
- virtual LockResult lockGlobalBegin(LockMode mode, Date_t deadline) = 0;
-
- /**
- * Calling lockGlobalComplete without an OperationContext does not allow the lock acquisition
- * to be interrupted.
- *
- * It may throw an exception if it is interrupted.
+ * It may throw an exception if it is interrupted. The ticket acquisition phase can also be
+ * interrupted by killOp or time out, thus throwing an exception.
*/
- virtual void lockGlobalComplete(OperationContext* opCtx, Date_t deadline) = 0;
- virtual void lockGlobalComplete(Date_t deadline) = 0;
+ virtual void lockGlobal(OperationContext* opCtx,
+ LockMode mode,
+ Date_t deadline = Date_t::max()) = 0;
+ virtual void lockGlobal(LockMode mode, Date_t deadline = Date_t::max()) = 0;
/**
* Decrements the reference count on the global lock. If the reference count on the
@@ -201,7 +181,7 @@ public:
* Requests the RSTL to be acquired in the requested mode (typically mode X) . This should only
* be called inside ReplicationStateTransitionLockGuard.
*
- * See the comments for lockBegin/Complete for more information on the semantics.
+ * See the comments for _lockBegin/Complete for more information on the semantics.
*/
virtual LockResult lockRSTLBegin(OperationContext* opCtx, LockMode mode) = 0;
diff --git a/src/mongo/db/concurrency/locker_noop.h b/src/mongo/db/concurrency/locker_noop.h
index 3780e4f5c0b..7e60aeeec4a 100644
--- a/src/mongo/db/concurrency/locker_noop.h
+++ b/src/mongo/db/concurrency/locker_noop.h
@@ -82,27 +82,11 @@ public:
MONGO_UNREACHABLE;
}
- virtual void lockGlobal(OperationContext* opCtx, LockMode mode) {
+ virtual void lockGlobal(OperationContext* opCtx, LockMode mode, Date_t deadline) {
MONGO_UNREACHABLE;
}
- virtual void lockGlobal(LockMode mode) {
- MONGO_UNREACHABLE;
- }
-
- virtual LockResult lockGlobalBegin(OperationContext* opCtx, LockMode mode, Date_t deadline) {
- MONGO_UNREACHABLE;
- }
-
- virtual LockResult lockGlobalBegin(LockMode mode, Date_t deadline) {
- MONGO_UNREACHABLE;
- }
-
- virtual void lockGlobalComplete(OperationContext* opCtx, Date_t deadline) {
- MONGO_UNREACHABLE;
- }
-
- virtual void lockGlobalComplete(Date_t deadline) {
+ virtual void lockGlobal(LockMode mode, Date_t deadline) {
MONGO_UNREACHABLE;
}
diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp
index b24bdf42600..b9025bd8cce 100644
--- a/src/mongo/db/db.cpp
+++ b/src/mongo/db/db.cpp
@@ -1023,18 +1023,11 @@ void shutdownTask(const ShutdownTaskArgs& shutdownArgs) {
// We should always be able to acquire the global lock at shutdown.
//
- // TODO: This call chain uses the locker directly, because we do not want to start an
- // operation context, which also instantiates a recovery unit. Also, using the
- // lockGlobalBegin/lockGlobalComplete sequence, we avoid taking the flush lock.
- //
// For a Windows service, dbexit does not call exit(), so we must leak the lock outside
// of this function to prevent any operations from running that need a lock.
//
LockerImpl* globalLocker = new LockerImpl();
- LockResult result = globalLocker->lockGlobalBegin(MODE_X, Date_t::max());
- if (result == LOCK_WAITING) {
- globalLocker->lockGlobalComplete(Date_t::max());
- }
+ globalLocker->lockGlobal(MODE_X);
// Global storage engine may not be started in all cases before we exit
if (serviceContext->getStorageEngine()) {