summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorSiyuan Zhou <siyuan.zhou@mongodb.com>2019-01-08 16:48:35 -0500
committerSiyuan Zhou <siyuan.zhou@mongodb.com>2019-01-11 14:48:12 -0500
commita3170e3e75007c037acbc72de09ec5d39313e2b5 (patch)
tree65a39f62637dab005c58077ba4caa27f6b12bdc3 /src
parenta83b8477796991c522199cdd5b53800ae08c1e55 (diff)
downloadmongo-a3170e3e75007c037acbc72de09ec5d39313e2b5.tar.gz
SERVER-38282 RSTL lock guard refactoring
Diffstat (limited to 'src')
-rw-r--r--src/mongo/db/concurrency/SConscript1
-rw-r--r--src/mongo/db/concurrency/d_concurrency_test.cpp57
-rw-r--r--src/mongo/db/concurrency/replication_state_transition_lock_guard.cpp (renamed from src/mongo/db/repl/replication_state_transition_lock_guard.cpp)72
-rw-r--r--src/mongo/db/concurrency/replication_state_transition_lock_guard.h (renamed from src/mongo/db/repl/replication_state_transition_lock_guard.h)42
-rw-r--r--src/mongo/db/repl/SConscript14
-rw-r--r--src/mongo/db/repl/bgsync.cpp2
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp32
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.h5
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp2
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp10
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_test.cpp2
-rw-r--r--src/mongo/db/repl/rollback_impl.cpp2
-rw-r--r--src/mongo/db/repl/rs_rollback.cpp2
-rw-r--r--src/mongo/db/repl/sync_tail.cpp2
14 files changed, 165 insertions, 80 deletions
diff --git a/src/mongo/db/concurrency/SConscript b/src/mongo/db/concurrency/SConscript
index 5ce446c1dae..7af452c201c 100644
--- a/src/mongo/db/concurrency/SConscript
+++ b/src/mongo/db/concurrency/SConscript
@@ -39,6 +39,7 @@ env.Library(
'lock_manager.cpp',
'lock_state.cpp',
'lock_stats.cpp',
+ 'replication_state_transition_lock_guard.cpp',
],
LIBDEPS=[
'$BUILD_DIR/mongo/base',
diff --git a/src/mongo/db/concurrency/d_concurrency_test.cpp b/src/mongo/db/concurrency/d_concurrency_test.cpp
index 991cdadf86d..6b45cabd58f 100644
--- a/src/mongo/db/concurrency/d_concurrency_test.cpp
+++ b/src/mongo/db/concurrency/d_concurrency_test.cpp
@@ -38,6 +38,7 @@
#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/concurrency/global_lock_acquisition_tracker.h"
#include "mongo/db/concurrency/lock_manager_test_help.h"
+#include "mongo/db/concurrency/replication_state_transition_lock_guard.h"
#include "mongo/db/concurrency/write_conflict_exception.h"
#include "mongo/db/service_context_d_test_fixture.h"
#include "mongo/db/storage/recovery_unit_noop.h"
@@ -1930,6 +1931,62 @@ TEST_F(DConcurrencyTestFixture, TestGlobalLockDoesNotAbandonSnapshotWhenInWriteU
opCtx->lockState()->endWriteUnitOfWork();
}
+TEST_F(DConcurrencyTestFixture, RSTLLockGuardTimeout) {
+ auto clients = makeKClientsWithLockers(2);
+ auto firstOpCtx = clients[0].second.get();
+ auto secondOpCtx = clients[1].second.get();
+
+ // The first opCtx holds the RSTL.
+ repl::ReplicationStateTransitionLockGuard firstRSTL(firstOpCtx);
+ ASSERT_TRUE(firstRSTL.isLocked());
+ ASSERT_EQ(firstOpCtx->lockState()->getLockMode(resourceIdReplicationStateTransitionLock),
+ MODE_X);
+
+ // The second opCtx enqueues the lock request but cannot acquire it.
+ repl::ReplicationStateTransitionLockGuard secondRSTL(
+ secondOpCtx, repl::ReplicationStateTransitionLockGuard::EnqueueOnly());
+ ASSERT_FALSE(secondRSTL.isLocked());
+
+ // The second opCtx times out.
+ ASSERT_THROWS_CODE(secondRSTL.waitForLockUntil(Date_t::now() + Milliseconds(1)),
+ AssertionException,
+ ErrorCodes::ExceededTimeLimit);
+
+ // Check the first opCtx is still holding the RSTL.
+ ASSERT_TRUE(firstRSTL.isLocked());
+ ASSERT_EQ(firstOpCtx->lockState()->getLockMode(resourceIdReplicationStateTransitionLock),
+ MODE_X);
+ ASSERT_FALSE(secondRSTL.isLocked());
+}
+
+TEST_F(DConcurrencyTestFixture, RSTLLockGuardEnqueueAndWait) {
+ auto clients = makeKClientsWithLockers(2);
+ auto firstOpCtx = clients[0].second.get();
+ auto secondOpCtx = clients[1].second.get();
+
+ // The first opCtx holds the RSTL.
+ auto firstRSTL = stdx::make_unique<repl::ReplicationStateTransitionLockGuard>(firstOpCtx);
+ ASSERT_TRUE(firstRSTL->isLocked());
+ ASSERT_EQ(firstOpCtx->lockState()->getLockMode(resourceIdReplicationStateTransitionLock),
+ MODE_X);
+
+
+ // The second opCtx enqueues the lock request but cannot acquire it.
+ repl::ReplicationStateTransitionLockGuard secondRSTL(
+ secondOpCtx, repl::ReplicationStateTransitionLockGuard::EnqueueOnly());
+ ASSERT_FALSE(secondRSTL.isLocked());
+
+ // The first opCtx unlocks so the second opCtx acquires it.
+ firstRSTL.reset();
+ ASSERT_EQ(firstOpCtx->lockState()->getLockMode(resourceIdReplicationStateTransitionLock),
+ MODE_NONE);
+
+
+ secondRSTL.waitForLockUntil(Date_t::now());
+ ASSERT_TRUE(secondRSTL.isLocked());
+ ASSERT_EQ(secondOpCtx->lockState()->getLockMode(resourceIdReplicationStateTransitionLock),
+ MODE_X);
+}
} // namespace
} // namespace mongo
diff --git a/src/mongo/db/repl/replication_state_transition_lock_guard.cpp b/src/mongo/db/concurrency/replication_state_transition_lock_guard.cpp
index f9bfb5040b9..54b6e3696e8 100644
--- a/src/mongo/db/repl/replication_state_transition_lock_guard.cpp
+++ b/src/mongo/db/concurrency/replication_state_transition_lock_guard.cpp
@@ -32,61 +32,73 @@
#include "mongo/platform/basic.h"
-#include "mongo/db/repl/replication_state_transition_lock_guard.h"
-
-#include "mongo/db/kill_sessions_local.h"
+#include "mongo/db/concurrency/replication_state_transition_lock_guard.h"
+#include "mongo/db/operation_context.h"
namespace mongo {
namespace repl {
ReplicationStateTransitionLockGuard::ReplicationStateTransitionLockGuard(OperationContext* opCtx)
- : ReplicationStateTransitionLockGuard(opCtx, Args()) {}
+ : ReplicationStateTransitionLockGuard(opCtx, EnqueueOnly()) {
+ waitForLockUntil(Date_t::max());
+}
ReplicationStateTransitionLockGuard::ReplicationStateTransitionLockGuard(OperationContext* opCtx,
- const Args& args)
- : _opCtx(opCtx), _args(args) {
- // Enqueue a lock request for the RSTL in mode X.
- LockResult result = _opCtx->lockState()->lockRSTLBegin(_opCtx);
+ EnqueueOnly)
+ : _opCtx(opCtx) {
+ _enqueueLock();
+}
- if (args.killUserOperations) {
- ServiceContext* environment = opCtx->getServiceContext();
- environment->killAllUserOperations(opCtx, ErrorCodes::InterruptedDueToStepDown);
+ReplicationStateTransitionLockGuard::ReplicationStateTransitionLockGuard(
+ ReplicationStateTransitionLockGuard&& other)
+ : _opCtx(other._opCtx), _result(other._result) {
+ other._result = LOCK_INVALID;
+}
- // Destroy all stashed transaction resources, in order to release locks.
- SessionKiller::Matcher matcherAllSessions(
- KillAllSessionsByPatternSet{makeKillAllSessionsByPattern(opCtx)});
- killSessionsLocalKillTransactions(
- opCtx, matcherAllSessions, ErrorCodes::InterruptedDueToStepDown);
- }
+ReplicationStateTransitionLockGuard::~ReplicationStateTransitionLockGuard() {
+ _unlock();
+}
+void ReplicationStateTransitionLockGuard::waitForLockUntil(mongo::Date_t deadline) {
// We can return early if the lock request was already satisfied.
- if (result == LOCK_OK) {
+ if (_result == LOCK_OK) {
return;
}
// Wait for the completion of the lock request for the RSTL in mode X.
- _opCtx->lockState()->lockRSTLComplete(opCtx, args.lockDeadline);
+ _result = _opCtx->lockState()->lockRSTLComplete(_opCtx, deadline);
uassert(ErrorCodes::ExceededTimeLimit,
"Could not acquire the RSTL before the deadline",
- opCtx->lockState()->isRSTLExclusive());
+ _opCtx->lockState()->isRSTLExclusive());
}
-ReplicationStateTransitionLockGuard::~ReplicationStateTransitionLockGuard() {
- invariant(_opCtx->lockState()->isRSTLExclusive());
- _opCtx->lockState()->unlock(resourceIdReplicationStateTransitionLock);
+void ReplicationStateTransitionLockGuard::release() {
+ _unlock();
}
-void ReplicationStateTransitionLockGuard::releaseRSTL() {
- invariant(_opCtx->lockState()->isRSTLExclusive());
- _opCtx->lockState()->unlock(resourceIdReplicationStateTransitionLock);
+void ReplicationStateTransitionLockGuard::reacquire() {
+ _enqueueLock();
+ waitForLockUntil(Date_t::max());
}
-void ReplicationStateTransitionLockGuard::reacquireRSTL() {
- invariant(!_opCtx->lockState()->isRSTLLocked());
+void ReplicationStateTransitionLockGuard::_enqueueLock() {
+ // Enqueue a lock request for the RSTL in mode X.
+ _result = _opCtx->lockState()->lockRSTLBegin(_opCtx);
+}
- UninterruptibleLockGuard noInterrupt(_opCtx->lockState());
- _opCtx->lockState()->lock(_opCtx, resourceIdReplicationStateTransitionLock, MODE_X);
+void ReplicationStateTransitionLockGuard::_unlock() {
+ // waitForLockUntil() must be called after enqueue. It either times out or succeeds,
+ // so we should never see LOCK_WAITING here. This also means between the enqueue and
+ // waitForLockUntil(), no exception is accepted. We could call lockRSTLComplete() with
+ // timeout 0 here for pending locks to clean up the lock's state, but it's clearer to enforce
+ // the exception-free pattern.
+ invariant(_result != LOCK_WAITING);
+ if (isLocked()) {
+ _opCtx->lockState()->unlock(resourceIdReplicationStateTransitionLock);
+ }
+ _result = LOCK_INVALID;
}
+
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/replication_state_transition_lock_guard.h b/src/mongo/db/concurrency/replication_state_transition_lock_guard.h
index d05007a93ee..4e3e7f148a6 100644
--- a/src/mongo/db/repl/replication_state_transition_lock_guard.h
+++ b/src/mongo/db/concurrency/replication_state_transition_lock_guard.h
@@ -33,10 +33,13 @@
#include <boost/optional.hpp>
#include "mongo/base/disallow_copying.h"
-#include "mongo/db/concurrency/d_concurrency.h"
+#include "mongo/db/concurrency/lock_manager_defs.h"
#include "mongo/util/time_support.h"
namespace mongo {
+
+class OperationContext;
+
namespace repl {
/**
@@ -47,14 +50,7 @@ class ReplicationStateTransitionLockGuard {
MONGO_DISALLOW_COPYING(ReplicationStateTransitionLockGuard);
public:
- struct Args {
- // How long to wait for the RSTL in mode X.
- Date_t lockDeadline = Date_t::max();
-
- // If true, will kill all user operations in between enqueuing the RSTL request and waiting
- // for it to be granted.
- bool killUserOperations = false;
- };
+ class EnqueueOnly {};
/**
* Acquires the RSTL in mode X.
@@ -62,27 +58,37 @@ public:
ReplicationStateTransitionLockGuard(OperationContext* opCtx);
/**
- * Acquires the RSTL in mode X and performs any other required actions according to the Args
- * provided.
+ * Enqueues RSTL in mode X but does not block on lock acquisition.
+ * Must call waitForLockUntil() to complete locking process.
*/
- ReplicationStateTransitionLockGuard(OperationContext* opCtx, const Args& args);
+ ReplicationStateTransitionLockGuard(OperationContext* opCtx, EnqueueOnly);
+
+ ReplicationStateTransitionLockGuard(ReplicationStateTransitionLockGuard&&);
+ ReplicationStateTransitionLockGuard& operator=(ReplicationStateTransitionLockGuard&&) = delete;
~ReplicationStateTransitionLockGuard();
/**
- * Temporarily releases the RSTL in mode X. Must be followed by a call to reacquireRSTL().
+ * Waits for RSTL to be granted.
*/
- void releaseRSTL();
+ void waitForLockUntil(Date_t deadline);
/**
- * Re-acquires the RSTL in mode X after it was released via a call to releaseRSTL. Ignores
- * the configured 'lockDeadline' and instead waits forever for the lock to be acquired.
+ * Release and reacquire the RSTL in mode X.
*/
- void reacquireRSTL();
+ void release();
+ void reacquire();
+
+ bool isLocked() const {
+ return _result == LOCK_OK;
+ }
private:
+ void _enqueueLock();
+ void _unlock();
+
OperationContext* const _opCtx;
- Args _args;
+ LockResult _result;
};
} // namespace repl
diff --git a/src/mongo/db/repl/SConscript b/src/mongo/db/repl/SConscript
index 1221abc160d..fb8a4a18619 100644
--- a/src/mongo/db/repl/SConscript
+++ b/src/mongo/db/repl/SConscript
@@ -595,13 +595,13 @@ env.Library(
],
LIBDEPS=[
'$BUILD_DIR/mongo/db/concurrency/lock_manager',
+ '$BUILD_DIR/mongo/db/kill_sessions_local',
'$BUILD_DIR/mongo/db/s/sharding_runtime_d',
'$BUILD_DIR/mongo/db/service_context',
'$BUILD_DIR/mongo/db/write_ops',
'$BUILD_DIR/mongo/util/net/network',
'optime',
'repl_coordinator_interface',
- 'repl_state_transition_lock_guard',
'roll_back_local_operations',
],
LIBDEPS_PRIVATE=[
@@ -679,7 +679,6 @@ env.Library(
'oplogreader',
'repl_coordinator_interface',
'repl_settings',
- 'repl_state_transition_lock_guard',
'storage_interface',
],
LIBDEPS_PRIVATE=[
@@ -812,16 +811,6 @@ env.CppUnitTest(
)
env.Library(
- target='repl_state_transition_lock_guard',
- source=[
- 'replication_state_transition_lock_guard.cpp',
- ],
- LIBDEPS=[
- '$BUILD_DIR/mongo/db/kill_sessions_local',
- ]
-)
-
-env.Library(
target='repl_coordinator_impl',
source=[
'check_quorum_for_config_change.cpp',
@@ -848,7 +837,6 @@ env.Library(
'data_replicator_external_state_initial_sync',
'repl_coordinator_interface',
'repl_settings',
- 'repl_state_transition_lock_guard',
'replica_set_messages',
'replication_process',
'reporter',
diff --git a/src/mongo/db/repl/bgsync.cpp b/src/mongo/db/repl/bgsync.cpp
index 7fff33e3aee..ca02b597752 100644
--- a/src/mongo/db/repl/bgsync.cpp
+++ b/src/mongo/db/repl/bgsync.cpp
@@ -41,6 +41,7 @@
#include "mongo/db/auth/authorization_session.h"
#include "mongo/db/client.h"
#include "mongo/db/commands/test_commands_enabled.h"
+#include "mongo/db/concurrency/replication_state_transition_lock_guard.h"
#include "mongo/db/concurrency/write_conflict_exception.h"
#include "mongo/db/dbhelpers.h"
#include "mongo/db/repl/data_replicator_external_state_impl.h"
@@ -50,7 +51,6 @@
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/repl/replication_coordinator_impl.h"
#include "mongo/db/repl/replication_process.h"
-#include "mongo/db/repl/replication_state_transition_lock_guard.h"
#include "mongo/db/repl/rollback_source_impl.h"
#include "mongo/db/repl/rs_rollback.h"
#include "mongo/db/repl/storage_interface.h"
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index 8a8739db0c5..65ecfe92aa5 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -46,7 +46,9 @@
#include "mongo/db/commands.h"
#include "mongo/db/commands/test_commands_enabled.h"
#include "mongo/db/concurrency/d_concurrency.h"
+#include "mongo/db/concurrency/replication_state_transition_lock_guard.h"
#include "mongo/db/index/index_descriptor.h"
+#include "mongo/db/kill_sessions_local.h"
#include "mongo/db/logical_clock.h"
#include "mongo/db/logical_time.h"
#include "mongo/db/logical_time_validator.h"
@@ -63,7 +65,6 @@
#include "mongo/db/repl/repl_set_request_votes_args.h"
#include "mongo/db/repl/repl_settings.h"
#include "mongo/db/repl/replication_process.h"
-#include "mongo/db/repl/replication_state_transition_lock_guard.h"
#include "mongo/db/repl/rslog.h"
#include "mongo/db/repl/storage_interface.h"
#include "mongo/db/repl/topology_coordinator.h"
@@ -1732,6 +1733,17 @@ void ReplicationCoordinatorImpl::waitForStepDownAttempt_forTest() {
}
}
+void ReplicationCoordinatorImpl::_killOperationsOnStepDown(OperationContext* opCtx) {
+ ServiceContext* environment = opCtx->getServiceContext();
+ environment->killAllUserOperations(opCtx, ErrorCodes::InterruptedDueToStepDown);
+
+ // Destroy all stashed transaction resources, in order to release locks.
+ SessionKiller::Matcher matcherAllSessions(
+ KillAllSessionsByPatternSet{makeKillAllSessionsByPattern(opCtx)});
+ killSessionsLocalKillTransactions(
+ opCtx, matcherAllSessions, ErrorCodes::InterruptedDueToStepDown);
+}
+
void ReplicationCoordinatorImpl::stepDown(OperationContext* opCtx,
const bool force,
const Milliseconds& waitTime,
@@ -1746,17 +1758,17 @@ void ReplicationCoordinatorImpl::stepDown(OperationContext* opCtx,
// from acquiring the global X lock unnecessarily.
uassert(ErrorCodes::NotMaster, "not primary so can't step down", getMemberState().primary());
- ReplicationStateTransitionLockGuard::Args transitionArgs;
+ ReplicationStateTransitionLockGuard rstlLock(
+ opCtx, ReplicationStateTransitionLockGuard::EnqueueOnly());
+
// Kill all user operations to help us get the global lock faster, as well as to ensure that
// operations that are no longer safe to run (like writes) get killed.
- transitionArgs.killUserOperations = true;
+ _killOperationsOnStepDown(opCtx);
+
// Using 'force' sets the default for the wait time to zero, which means the stepdown will
// fail if it does not acquire the lock immediately. In such a scenario, we use the
// stepDownUntil deadline instead.
- transitionArgs.lockDeadline = force ? stepDownUntil : waitUntil;
-
- ReplicationStateTransitionLockGuard transitionGuard(opCtx, transitionArgs);
- invariant(opCtx->lockState()->isRSTLExclusive());
+ rstlLock.waitForLockUntil(force ? stepDownUntil : waitUntil);
stdx::unique_lock<stdx::mutex> lk(_mutex);
@@ -1824,7 +1836,7 @@ void ReplicationCoordinatorImpl::stepDown(OperationContext* opCtx,
// The stepdown attempt failed. We now release the RSTL to allow secondaries to read the
// oplog, then wait until enough secondaries are caught up for us to finish stepdown.
- transitionGuard.releaseRSTL();
+ rstlLock.release();
invariant(!opCtx->lockState()->isLocked());
// Make sure we re-acquire the RSTL before returning so that we're always holding the
@@ -1840,8 +1852,10 @@ void ReplicationCoordinatorImpl::stepDown(OperationContext* opCtx,
// clean up a failed stepdown attempt, we might as well spend whatever time we need
// to acquire it now. For the same reason, we also disable lock acquisition
// interruption, to guarantee that we get the lock eventually.
- transitionGuard.reacquireRSTL();
+ UninterruptibleLockGuard noInterrupt(opCtx->lockState());
+ rstlLock.reacquire();
invariant(opCtx->lockState()->isRSTLExclusive());
+
lk.lock();
});
diff --git a/src/mongo/db/repl/replication_coordinator_impl.h b/src/mongo/db/repl/replication_coordinator_impl.h
index f4aa0b4146b..ab6e2c4dd62 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.h
+++ b/src/mongo/db/repl/replication_coordinator_impl.h
@@ -924,6 +924,11 @@ private:
executor::TaskExecutor::EventHandle _stepDownStart();
/**
+ * Kills users operations and aborts unprepared transactions.
+ */
+ void _killOperationsOnStepDown(OperationContext* opCtx);
+
+ /**
* Completes a step-down of the current node. Must be run with a global
* shared or global exclusive lock.
* Signals 'finishedEvent' on successful completion.
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
index 4936aec01aa..bed6596e6d1 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
@@ -32,6 +32,7 @@
#include "mongo/platform/basic.h"
+#include "mongo/db/concurrency/replication_state_transition_lock_guard.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/operation_context_noop.h"
#include "mongo/db/repl/is_master_response.h"
@@ -41,7 +42,6 @@
#include "mongo/db/repl/replication_coordinator_external_state_mock.h"
#include "mongo/db/repl/replication_coordinator_impl.h"
#include "mongo/db/repl/replication_coordinator_test_fixture.h"
-#include "mongo/db/repl/replication_state_transition_lock_guard.h"
#include "mongo/db/repl/topology_coordinator.h"
#include "mongo/executor/network_interface_mock.h"
#include "mongo/unittest/unittest.h"
diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
index 72fc9a9cbfe..dafe36af09c 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
@@ -39,6 +39,7 @@
#include <algorithm>
#include "mongo/base/status.h"
+#include "mongo/db/concurrency/replication_state_transition_lock_guard.h"
#include "mongo/db/logical_clock.h"
#include "mongo/db/logical_time_validator.h"
#include "mongo/db/operation_context.h"
@@ -48,7 +49,6 @@
#include "mongo/db/repl/repl_set_heartbeat_response.h"
#include "mongo/db/repl/replication_coordinator_impl.h"
#include "mongo/db/repl/replication_process.h"
-#include "mongo/db/repl/replication_state_transition_lock_guard.h"
#include "mongo/db/repl/topology_coordinator.h"
#include "mongo/db/repl/vote_requester.h"
#include "mongo/db/service_context.h"
@@ -381,11 +381,13 @@ void ReplicationCoordinatorImpl::_stepDownFinish(
}
auto opCtx = cc().makeOperationContext();
- ReplicationStateTransitionLockGuard::Args transitionArgs;
+
+ ReplicationStateTransitionLockGuard rstlLock(
+ opCtx.get(), ReplicationStateTransitionLockGuard::EnqueueOnly());
// Kill all user operations to help us get the global lock faster, as well as to ensure that
// operations that are no longer safe to run (like writes) get killed.
- transitionArgs.killUserOperations = true;
- ReplicationStateTransitionLockGuard transitionGuard(opCtx.get(), transitionArgs);
+ _killOperationsOnStepDown(opCtx.get());
+ rstlLock.waitForLockUntil(Date_t::max());
stdx::unique_lock<stdx::mutex> lk(_mutex);
diff --git a/src/mongo/db/repl/replication_coordinator_impl_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
index dcd620715f5..0674d4a88d4 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
@@ -40,6 +40,7 @@
#include "mongo/bson/util/bson_extract.h"
#include "mongo/db/concurrency/lock_state.h"
+#include "mongo/db/concurrency/replication_state_transition_lock_guard.h"
#include "mongo/db/operation_context_noop.h"
#include "mongo/db/repl/bson_extract_optime.h"
#include "mongo/db/repl/is_master_response.h"
@@ -54,7 +55,6 @@
#include "mongo/db/repl/replication_coordinator_external_state_mock.h"
#include "mongo/db/repl/replication_coordinator_impl.h"
#include "mongo/db/repl/replication_coordinator_test_fixture.h"
-#include "mongo/db/repl/replication_state_transition_lock_guard.h"
#include "mongo/db/repl/storage_interface_mock.h"
#include "mongo/db/repl/topology_coordinator.h"
#include "mongo/db/repl/update_position_args.h"
diff --git a/src/mongo/db/repl/rollback_impl.cpp b/src/mongo/db/repl/rollback_impl.cpp
index 7c7bc94cf38..8e2a8a436b6 100644
--- a/src/mongo/db/repl/rollback_impl.cpp
+++ b/src/mongo/db/repl/rollback_impl.cpp
@@ -40,6 +40,7 @@
#include "mongo/db/catalog/uuid_catalog.h"
#include "mongo/db/commands.h"
#include "mongo/db/concurrency/d_concurrency.h"
+#include "mongo/db/concurrency/replication_state_transition_lock_guard.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/kill_sessions_local.h"
#include "mongo/db/logical_time_validator.h"
@@ -48,7 +49,6 @@
#include "mongo/db/repl/drop_pending_collection_reaper.h"
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/repl/replication_process.h"
-#include "mongo/db/repl/replication_state_transition_lock_guard.h"
#include "mongo/db/repl/roll_back_local_operations.h"
#include "mongo/db/repl/storage_interface.h"
#include "mongo/db/s/shard_identity_rollback_notifier.h"
diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp
index 6896955ec78..ba0720d6a5d 100644
--- a/src/mongo/db/repl/rs_rollback.cpp
+++ b/src/mongo/db/repl/rs_rollback.cpp
@@ -49,6 +49,7 @@
#include "mongo/db/catalog_raii.h"
#include "mongo/db/client.h"
#include "mongo/db/commands.h"
+#include "mongo/db/concurrency/replication_state_transition_lock_guard.h"
#include "mongo/db/concurrency/write_conflict_exception.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/dbhelpers.h"
@@ -66,7 +67,6 @@
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/repl/replication_coordinator_impl.h"
#include "mongo/db/repl/replication_process.h"
-#include "mongo/db/repl/replication_state_transition_lock_guard.h"
#include "mongo/db/repl/roll_back_local_operations.h"
#include "mongo/db/repl/rollback_source.h"
#include "mongo/db/repl/rslog.h"
diff --git a/src/mongo/db/repl/sync_tail.cpp b/src/mongo/db/repl/sync_tail.cpp
index e6730e59f3b..45468807acc 100644
--- a/src/mongo/db/repl/sync_tail.cpp
+++ b/src/mongo/db/repl/sync_tail.cpp
@@ -52,6 +52,7 @@
#include "mongo/db/commands/server_status_metric.h"
#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/concurrency/lock_state.h"
+#include "mongo/db/concurrency/replication_state_transition_lock_guard.h"
#include "mongo/db/concurrency/write_conflict_exception.h"
#include "mongo/db/curop.h"
#include "mongo/db/db_raii.h"
@@ -68,7 +69,6 @@
#include "mongo/db/repl/repl_client_info.h"
#include "mongo/db/repl/repl_set_config.h"
#include "mongo/db/repl/replication_coordinator.h"
-#include "mongo/db/repl/replication_state_transition_lock_guard.h"
#include "mongo/db/repl/session_update_tracker.h"
#include "mongo/db/service_context.h"
#include "mongo/db/session.h"