summaryrefslogtreecommitdiff
path: root/src/mongo/db/concurrency
diff options
context:
space:
mode:
authorSuganthi Mani <suganthi.mani@mongodb.com>2019-03-04 09:40:08 -0500
committerSuganthi Mani <suganthi.mani@mongodb.com>2019-03-07 00:49:59 -0500
commit91daf30b31e60048025162663ccb4f40cc24a837 (patch)
tree2ea3036266d37c1c9598e8b32ac7585cf6159dcf /src/mongo/db/concurrency
parent281401e56c2d89f8751203c8be0e5251ff7aaa5c (diff)
downloadmongo-91daf30b31e60048025162663ccb4f40cc24a837.tar.gz
SERVER-39092 ReplicationStateTransitionLockGuard should be resilient to exceptions thrown before waitForLockUntil().
Diffstat (limited to 'src/mongo/db/concurrency')
-rw-r--r--src/mongo/db/concurrency/d_concurrency_test.cpp36
-rw-r--r--src/mongo/db/concurrency/replication_state_transition_lock_guard.cpp14
2 files changed, 41 insertions, 9 deletions
diff --git a/src/mongo/db/concurrency/d_concurrency_test.cpp b/src/mongo/db/concurrency/d_concurrency_test.cpp
index 504b3fbe4ab..29852943be9 100644
--- a/src/mongo/db/concurrency/d_concurrency_test.cpp
+++ b/src/mongo/db/concurrency/d_concurrency_test.cpp
@@ -2139,5 +2139,41 @@ TEST_F(DConcurrencyTestFixture, RSTLLockGuardEnqueueAndWait) {
MODE_X);
}
+TEST_F(DConcurrencyTestFixture, RSTLLockGuardResilientToExceptionThrownBeforeWaitForRSTLComplete) {
+ auto clients = makeKClientsWithLockers(2);
+ auto firstOpCtx = clients[0].second.get();
+ auto secondOpCtx = clients[1].second.get();
+
+ // The first opCtx holds the RSTL.
+ repl::ReplicationStateTransitionLockGuard firstRSTL(firstOpCtx, MODE_X);
+ ASSERT_TRUE(firstRSTL.isLocked());
+ ASSERT_TRUE(firstOpCtx->lockState()->isRSTLExclusive());
+
+ {
+ // The second opCtx enqueues the lock request but cannot acquire it.
+ repl::ReplicationStateTransitionLockGuard secondRSTL(
+ secondOpCtx, MODE_X, repl::ReplicationStateTransitionLockGuard::EnqueueOnly());
+ ASSERT_FALSE(secondRSTL.isLocked());
+
+ // secondRSTL is going to go out of scope with the lock result as LOCK_WAITING. As a result,
+ // ReplicationStateTransitionLockGuard destructor will be called and we should expect
+ // the RSTL lock state cleaned from both locker and lock manager.
+ }
+
+ // Verify that the RSTL lock state is cleaned from secondOpCtx's locker.
+ ASSERT_FALSE(secondOpCtx->lockState()->isRSTLLocked());
+
+ // Now, make first opCtx to release the lock to test if we can reacquire the lock again.
+ ASSERT_TRUE(firstOpCtx->lockState()->isRSTLExclusive());
+ firstRSTL.release();
+ ASSERT_FALSE(firstRSTL.isLocked());
+
+ // If we haven't cleaned the RSTL lock state from the conflict queue in the lock manager after
+ // the destruction of secondRSTL, first opCtx won't be able to reacquire the RSTL in X mode.
+ firstRSTL.reacquire();
+ ASSERT_TRUE(firstRSTL.isLocked());
+ ASSERT_TRUE(firstOpCtx->lockState()->isRSTLExclusive());
+}
+
} // namespace
} // namespace mongo
diff --git a/src/mongo/db/concurrency/replication_state_transition_lock_guard.cpp b/src/mongo/db/concurrency/replication_state_transition_lock_guard.cpp
index 6224f7f5351..540621d88c8 100644
--- a/src/mongo/db/concurrency/replication_state_transition_lock_guard.cpp
+++ b/src/mongo/db/concurrency/replication_state_transition_lock_guard.cpp
@@ -87,15 +87,11 @@ void ReplicationStateTransitionLockGuard::_enqueueLock() {
}
void ReplicationStateTransitionLockGuard::_unlock() {
- // waitForLockUntil() must be called after enqueue. It either times out or succeeds,
- // so we should never see LOCK_WAITING here. This also means between the enqueue and
- // waitForLockUntil(), no exception is accepted. We could call lockRSTLComplete() with
- // timeout 0 here for pending locks to clean up the lock's state, but it's clearer to enforce
- // the exception-free pattern.
- invariant(_result != LOCK_WAITING);
- if (isLocked()) {
- _opCtx->lockState()->unlock(resourceIdReplicationStateTransitionLock);
- }
+ // If ReplicationStateTransitionLockGuard is called in a WriteUnitOfWork, we won't accept
+ // any exceptions to be thrown between _enqueueLock and waitForLockUntil because that would
+ // delay cleaning up any failed RSTL lock attempt state from lock manager.
+ invariant(!(_result == LOCK_WAITING && _opCtx->lockState()->inAWriteUnitOfWork()));
+ _opCtx->lockState()->unlock(resourceIdReplicationStateTransitionLock);
_result = LOCK_INVALID;
}