summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
authorJustin Seyster <justin.seyster@mongodb.com>2018-03-30 15:25:47 -0400
committerJustin Seyster <justin.seyster@mongodb.com>2018-04-12 16:05:53 -0400
commitbc19d43fdc4aab85264def96f638128c0ddb8483 (patch)
tree09b5a9a15982edd4998d4665d6c67f8fbd7c83e7 /src/mongo/db
parentc6620182aebd1b62d31879ce4d9456ff197aea22 (diff)
downloadmongo-bc19d43fdc4aab85264def96f638128c0ddb8483.tar.gz
SERVER-27534 All writing operations must fail if the term changes.
The description of this SERVER ticket describes a nasty race that can occur if elections happen inbetween two batches during a large update. (The included test confirms that the race is possible.) To fix this, we want to check the operation context for interrupts with each batch, and we need to make sure the check happens _after_ the collection lock gets taken and before the batch inserts/updates/deletes execute. A recent change to locking gives us almost exactly this for free: if a collection lock has to wait, it throws an exception when the operation context is interrupted, ending the operation. If the lock doesn't wait, though, there is no check. This patch adds that check in. Acquiring a lock now always throws if the operation context is interrupted, which closes the race window in this bug.
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/catalog/rename_collection.cpp3
-rw-r--r--src/mongo/db/commands/getmore_cmd.cpp3
-rw-r--r--src/mongo/db/concurrency/d_concurrency_test.cpp74
-rw-r--r--src/mongo/db/concurrency/deadlock_detection_test.cpp44
-rw-r--r--src/mongo/db/concurrency/lock_state.cpp18
-rw-r--r--src/mongo/db/concurrency/lock_state.h6
-rw-r--r--src/mongo/db/concurrency/lock_state_test.cpp8
-rw-r--r--src/mongo/db/concurrency/lock_stats_test.cpp2
-rw-r--r--src/mongo/db/ops/write_ops_exec.cpp8
-rw-r--r--src/mongo/db/repair_database.cpp3
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp3
-rw-r--r--src/mongo/db/service_entry_point_common.cpp3
12 files changed, 140 insertions, 35 deletions
diff --git a/src/mongo/db/catalog/rename_collection.cpp b/src/mongo/db/catalog/rename_collection.cpp
index 8aa634a14c5..4d6d08753a5 100644
--- a/src/mongo/db/catalog/rename_collection.cpp
+++ b/src/mongo/db/catalog/rename_collection.cpp
@@ -332,6 +332,9 @@ Status renameCollectionCommon(OperationContext* opCtx,
// Dismissed on success
auto tmpCollectionDropper = MakeGuard([&] {
+ // Ensure that we don't trigger an exception when attempting to take locks.
+ UninterruptibleLockGuard noInterrupt(opCtx->lockState());
+
BSONObjBuilder unusedResult;
auto status =
dropCollection(opCtx,
diff --git a/src/mongo/db/commands/getmore_cmd.cpp b/src/mongo/db/commands/getmore_cmd.cpp
index e62e3667afd..be4d0451fd5 100644
--- a/src/mongo/db/commands/getmore_cmd.cpp
+++ b/src/mongo/db/commands/getmore_cmd.cpp
@@ -294,6 +294,9 @@ public:
// Only used by the failpoints.
const auto dropAndReaquireReadLock = [&readLock, opCtx, &request]() {
+ // Make sure an interrupted operation does not prevent us from reacquiring the lock.
+ UninterruptibleLockGuard noInterrupt(opCtx->lockState());
+
readLock.reset();
readLock.emplace(opCtx, request.nss);
};
diff --git a/src/mongo/db/concurrency/d_concurrency_test.cpp b/src/mongo/db/concurrency/d_concurrency_test.cpp
index 902fe3c2fa9..4058836c478 100644
--- a/src/mongo/db/concurrency/d_concurrency_test.cpp
+++ b/src/mongo/db/concurrency/d_concurrency_test.cpp
@@ -1176,6 +1176,80 @@ TEST_F(DConcurrencyTestFixture, TicketReacquireCanBeInterrupted) {
ASSERT_THROWS_CODE(result.get(), AssertionException, ErrorCodes::Interrupted);
}
+TEST_F(DConcurrencyTestFixture, GlobalLockInInterruptedContextThrowsEvenWhenUncontested) {
+ auto clients = makeKClientsWithLockers<DefaultLockerImpl>(1);
+ auto opCtx = clients[0].second.get();
+
+ opCtx->markKilled();
+
+ boost::optional<Lock::GlobalRead> globalReadLock;
+ ASSERT_THROWS_CODE(
+ globalReadLock.emplace(opCtx, Date_t::now()), AssertionException, ErrorCodes::Interrupted);
+}
+
+TEST_F(DConcurrencyTestFixture, GlobalLockInInterruptedContextThrowsEvenAcquiringRecursively) {
+ auto clients = makeKClientsWithLockers<DefaultLockerImpl>(1);
+ auto opCtx = clients[0].second.get();
+
+ Lock::GlobalWrite globalWriteLock(opCtx, Date_t::now());
+
+ opCtx->markKilled();
+
+ {
+ boost::optional<Lock::GlobalWrite> recursiveGlobalWriteLock;
+ ASSERT_THROWS_CODE(recursiveGlobalWriteLock.emplace(opCtx, Date_t::now()),
+ AssertionException,
+ ErrorCodes::Interrupted);
+ }
+}
+
+TEST_F(DConcurrencyTestFixture, GlobalLockInInterruptedContextRespectsUninterruptibleGuard) {
+ auto clients = makeKClientsWithLockers<DefaultLockerImpl>(1);
+ auto opCtx = clients[0].second.get();
+
+ opCtx->markKilled();
+
+ UninterruptibleLockGuard noInterrupt(opCtx->lockState());
+ Lock::GlobalRead globalReadLock(opCtx, Date_t::now()); // Does not throw.
+}
+
+TEST_F(DConcurrencyTestFixture, DBLockInInterruptedContextThrowsEvenWhenUncontested) {
+ auto clients = makeKClientsWithLockers<DefaultLockerImpl>(1);
+ auto opCtx = clients[0].second.get();
+
+ opCtx->markKilled();
+
+ boost::optional<Lock::DBLock> dbWriteLock;
+ ASSERT_THROWS_CODE(
+ dbWriteLock.emplace(opCtx, "db", MODE_IX), AssertionException, ErrorCodes::Interrupted);
+}
+
+TEST_F(DConcurrencyTestFixture, DBLockInInterruptedContextThrowsEvenWhenAcquiringRecursively) {
+ auto clients = makeKClientsWithLockers<DefaultLockerImpl>(1);
+ auto opCtx = clients[0].second.get();
+
+ Lock::DBLock dbWriteLock(opCtx, "db", MODE_X);
+
+ opCtx->markKilled();
+
+ {
+ boost::optional<Lock::DBLock> recursiveDBWriteLock;
+ ASSERT_THROWS_CODE(recursiveDBWriteLock.emplace(opCtx, "db", MODE_X),
+ AssertionException,
+ ErrorCodes::Interrupted);
+ }
+}
+
+TEST_F(DConcurrencyTestFixture, DBLockInInterruptedContextRespectsUninterruptibleGuard) {
+ auto clients = makeKClientsWithLockers<DefaultLockerImpl>(1);
+ auto opCtx = clients[0].second.get();
+
+ opCtx->markKilled();
+
+ UninterruptibleLockGuard noInterrupt(opCtx->lockState());
+ Lock::DBLock dbWriteLock(opCtx, "db", MODE_X); // Does not throw.
+}
+
TEST_F(DConcurrencyTestFixture, DBLockTimeout) {
auto clientOpctxPairs = makeKClientsWithLockers<DefaultLockerImpl>(2);
auto opctx1 = clientOpctxPairs[0].second.get();
diff --git a/src/mongo/db/concurrency/deadlock_detection_test.cpp b/src/mongo/db/concurrency/deadlock_detection_test.cpp
index ce29fd37b01..ead27261e1a 100644
--- a/src/mongo/db/concurrency/deadlock_detection_test.cpp
+++ b/src/mongo/db/concurrency/deadlock_detection_test.cpp
@@ -37,8 +37,8 @@ TEST(Deadlock, NoDeadlock) {
LockerForTests locker1(MODE_IS);
LockerForTests locker2(MODE_IS);
- ASSERT_EQUALS(LOCK_OK, locker1.lockBegin(resId, MODE_S));
- ASSERT_EQUALS(LOCK_OK, locker2.lockBegin(resId, MODE_S));
+ ASSERT_EQUALS(LOCK_OK, locker1.lockBegin(nullptr, resId, MODE_S));
+ ASSERT_EQUALS(LOCK_OK, locker2.lockBegin(nullptr, resId, MODE_S));
DeadlockDetector wfg1(*getGlobalLockManager(), &locker1);
ASSERT(!wfg1.check().hasCycle());
@@ -54,14 +54,14 @@ TEST(Deadlock, Simple) {
LockerForTests locker1(MODE_IX);
LockerForTests locker2(MODE_IX);
- ASSERT_EQUALS(LOCK_OK, locker1.lockBegin(resIdA, MODE_X));
- ASSERT_EQUALS(LOCK_OK, locker2.lockBegin(resIdB, MODE_X));
+ ASSERT_EQUALS(LOCK_OK, locker1.lockBegin(nullptr, resIdA, MODE_X));
+ ASSERT_EQUALS(LOCK_OK, locker2.lockBegin(nullptr, resIdB, MODE_X));
// 1 -> 2
- ASSERT_EQUALS(LOCK_WAITING, locker1.lockBegin(resIdB, MODE_X));
+ ASSERT_EQUALS(LOCK_WAITING, locker1.lockBegin(nullptr, resIdB, MODE_X));
// 2 -> 1
- ASSERT_EQUALS(LOCK_WAITING, locker2.lockBegin(resIdA, MODE_X));
+ ASSERT_EQUALS(LOCK_WAITING, locker2.lockBegin(nullptr, resIdA, MODE_X));
DeadlockDetector wfg1(*getGlobalLockManager(), &locker1);
ASSERT(wfg1.check().hasCycle());
@@ -81,12 +81,12 @@ TEST(Deadlock, SimpleUpgrade) {
LockerForTests locker2(MODE_IX);
// Both acquire lock in intent mode
- ASSERT_EQUALS(LOCK_OK, locker1.lockBegin(resId, MODE_IX));
- ASSERT_EQUALS(LOCK_OK, locker2.lockBegin(resId, MODE_IX));
+ ASSERT_EQUALS(LOCK_OK, locker1.lockBegin(nullptr, resId, MODE_IX));
+ ASSERT_EQUALS(LOCK_OK, locker2.lockBegin(nullptr, resId, MODE_IX));
// Both try to upgrade
- ASSERT_EQUALS(LOCK_WAITING, locker1.lockBegin(resId, MODE_X));
- ASSERT_EQUALS(LOCK_WAITING, locker2.lockBegin(resId, MODE_X));
+ ASSERT_EQUALS(LOCK_WAITING, locker1.lockBegin(nullptr, resId, MODE_X));
+ ASSERT_EQUALS(LOCK_WAITING, locker2.lockBegin(nullptr, resId, MODE_X));
DeadlockDetector wfg1(*getGlobalLockManager(), &locker1);
ASSERT(wfg1.check().hasCycle());
@@ -107,17 +107,17 @@ TEST(Deadlock, Indirect) {
LockerForTests locker2(MODE_IX);
LockerForTests lockerIndirect(MODE_IX);
- ASSERT_EQUALS(LOCK_OK, locker1.lockBegin(resIdA, MODE_X));
- ASSERT_EQUALS(LOCK_OK, locker2.lockBegin(resIdB, MODE_X));
+ ASSERT_EQUALS(LOCK_OK, locker1.lockBegin(nullptr, resIdA, MODE_X));
+ ASSERT_EQUALS(LOCK_OK, locker2.lockBegin(nullptr, resIdB, MODE_X));
// 1 -> 2
- ASSERT_EQUALS(LOCK_WAITING, locker1.lockBegin(resIdB, MODE_X));
+ ASSERT_EQUALS(LOCK_WAITING, locker1.lockBegin(nullptr, resIdB, MODE_X));
// 2 -> 1
- ASSERT_EQUALS(LOCK_WAITING, locker2.lockBegin(resIdA, MODE_X));
+ ASSERT_EQUALS(LOCK_WAITING, locker2.lockBegin(nullptr, resIdA, MODE_X));
// 3 -> 2
- ASSERT_EQUALS(LOCK_WAITING, lockerIndirect.lockBegin(resIdA, MODE_X));
+ ASSERT_EQUALS(LOCK_WAITING, lockerIndirect.lockBegin(nullptr, resIdA, MODE_X));
DeadlockDetector wfg1(*getGlobalLockManager(), &locker1);
ASSERT(wfg1.check().hasCycle());
@@ -143,17 +143,17 @@ TEST(Deadlock, IndirectWithUpgrade) {
LockerForTests writer(MODE_IX);
// This sequence simulates the deadlock which occurs during flush
- ASSERT_EQUALS(LOCK_OK, writer.lockBegin(resIdFlush, MODE_IX));
- ASSERT_EQUALS(LOCK_OK, writer.lockBegin(resIdDb, MODE_X));
+ ASSERT_EQUALS(LOCK_OK, writer.lockBegin(nullptr, resIdFlush, MODE_IX));
+ ASSERT_EQUALS(LOCK_OK, writer.lockBegin(nullptr, resIdDb, MODE_X));
- ASSERT_EQUALS(LOCK_OK, reader.lockBegin(resIdFlush, MODE_IS));
+ ASSERT_EQUALS(LOCK_OK, reader.lockBegin(nullptr, resIdFlush, MODE_IS));
// R -> W
- ASSERT_EQUALS(LOCK_WAITING, reader.lockBegin(resIdDb, MODE_S));
+ ASSERT_EQUALS(LOCK_WAITING, reader.lockBegin(nullptr, resIdDb, MODE_S));
// R -> W
// F -> W
- ASSERT_EQUALS(LOCK_WAITING, flush.lockBegin(resIdFlush, MODE_S));
+ ASSERT_EQUALS(LOCK_WAITING, flush.lockBegin(nullptr, resIdFlush, MODE_S));
// W yields its flush lock, so now f is granted in mode S
//
@@ -164,14 +164,14 @@ TEST(Deadlock, IndirectWithUpgrade) {
//
// R -> W
// F -> R
- ASSERT_EQUALS(LOCK_WAITING, flush.lockBegin(resIdFlush, MODE_X));
+ ASSERT_EQUALS(LOCK_WAITING, flush.lockBegin(nullptr, resIdFlush, MODE_X));
// W comes back from the commit and tries to re-acquire the flush lock
//
// R -> W
// F -> R
// W -> F
- ASSERT_EQUALS(LOCK_WAITING, writer.lockBegin(resIdFlush, MODE_IX));
+ ASSERT_EQUALS(LOCK_WAITING, writer.lockBegin(nullptr, resIdFlush, MODE_IX));
// Run deadlock detection from the point of view of each of the involved lockers
DeadlockDetector wfgF(*getGlobalLockManager(), &flush);
diff --git a/src/mongo/db/concurrency/lock_state.cpp b/src/mongo/db/concurrency/lock_state.cpp
index 17e79b24e61..0b1d58468ea 100644
--- a/src/mongo/db/concurrency/lock_state.cpp
+++ b/src/mongo/db/concurrency/lock_state.cpp
@@ -347,7 +347,7 @@ LockResult LockerImpl<IsForMMAPV1>::_lockGlobalBegin(OperationContext* opCtx,
}
_modeForTicket = mode;
}
- const LockResult result = lockBegin(resourceIdGlobal, mode);
+ const LockResult result = lockBegin(opCtx, resourceIdGlobal, mode);
if (result == LOCK_OK)
return LOCK_OK;
@@ -469,7 +469,7 @@ template <bool IsForMMAPV1>
LockResult LockerImpl<IsForMMAPV1>::lock(
OperationContext* opCtx, ResourceId resId, LockMode mode, Date_t deadline, bool checkDeadlock) {
- const LockResult result = lockBegin(resId, mode);
+ const LockResult result = lockBegin(opCtx, resId, mode);
// Fast, uncontended path
if (result == LOCK_OK)
@@ -713,7 +713,9 @@ void LockerImpl<IsForMMAPV1>::restoreLockState(OperationContext* opCtx,
}
template <bool IsForMMAPV1>
-LockResult LockerImpl<IsForMMAPV1>::lockBegin(ResourceId resId, LockMode mode) {
+LockResult LockerImpl<IsForMMAPV1>::lockBegin(OperationContext* opCtx,
+ ResourceId resId,
+ LockMode mode) {
dassert(!getWaitingResource().isValid());
LockRequest* request;
@@ -778,6 +780,16 @@ LockResult LockerImpl<IsForMMAPV1>::lockBegin(ResourceId resId, LockMode mode) {
if (result == LOCK_WAITING) {
globalStats.recordWait(_id, resId, mode);
_stats.recordWait(resId, mode);
+ } else if (result == LOCK_OK && opCtx && _uninterruptibleLocksRequested == 0) {
+ // Lock acquisitions are not allowed to succeed when opCtx is marked as interrupted, unless
+ // the caller requested an uninterruptible lock.
+ auto interruptStatus = opCtx->checkForInterruptNoAssert();
+ if (!interruptStatus.isOK()) {
+ auto unlockIt = _requests.find(resId);
+ invariant(unlockIt);
+ _unlockImpl(&unlockIt);
+ uassertStatusOK(interruptStatus);
+ }
}
return result;
diff --git a/src/mongo/db/concurrency/lock_state.h b/src/mongo/db/concurrency/lock_state.h
index 669d94b8f50..162bf191617 100644
--- a/src/mongo/db/concurrency/lock_state.h
+++ b/src/mongo/db/concurrency/lock_state.h
@@ -202,10 +202,14 @@ public:
* In other words for each call to lockBegin, which does not return LOCK_OK, there needs to
* be a corresponding call to either lockComplete or unlock.
*
+ * If an operation context is provided that represents an interrupted operation, lockBegin will
+ * throw an exception whenever it would have been possible to grant the lock with LOCK_OK. This
+ * behavior can be disabled with an UninterruptibleLockGuard.
+ *
* NOTE: These methods are not public and should only be used inside the class
* implementation and for unit-tests and not called directly.
*/
- LockResult lockBegin(ResourceId resId, LockMode mode);
+ LockResult lockBegin(OperationContext* opCtx, ResourceId resId, LockMode mode);
/**
* Waits for the completion of a lock, previously requested through lockBegin or
diff --git a/src/mongo/db/concurrency/lock_state_test.cpp b/src/mongo/db/concurrency/lock_state_test.cpp
index 5e37fdf5e35..7e299d7cc7f 100644
--- a/src/mongo/db/concurrency/lock_state_test.cpp
+++ b/src/mongo/db/concurrency/lock_state_test.cpp
@@ -268,12 +268,12 @@ TEST(LockerImpl, CanceledDeadlockUnblocks) {
ASSERT(LOCK_OK == locker2.lock(db2, MODE_X));
// Set up locker1 and locker2 for deadlock
- ASSERT(LOCK_WAITING == locker1.lockBegin(db2, MODE_X));
- ASSERT(LOCK_WAITING == locker2.lockBegin(db1, MODE_X));
+ ASSERT(LOCK_WAITING == locker1.lockBegin(nullptr, db2, MODE_X));
+ ASSERT(LOCK_WAITING == locker2.lockBegin(nullptr, db1, MODE_X));
// Locker3 blocks behind locker 2
ASSERT(LOCK_OK == locker3.lockGlobal(MODE_IX));
- ASSERT(LOCK_WAITING == locker3.lockBegin(db1, MODE_S));
+ ASSERT(LOCK_WAITING == locker3.lockBegin(nullptr, db1, MODE_S));
// Detect deadlock, canceling our request
ASSERT(
@@ -442,7 +442,7 @@ TEST(LockerImpl, GetLockerInfoShouldReportPendingLocks) {
DefaultLockerImpl conflictingLocker;
ASSERT_EQ(LOCK_OK, conflictingLocker.lockGlobal(MODE_IS));
ASSERT_EQ(LOCK_OK, conflictingLocker.lock(dbId, MODE_IS));
- ASSERT_EQ(LOCK_WAITING, conflictingLocker.lockBegin(collectionId, MODE_IS));
+ ASSERT_EQ(LOCK_WAITING, conflictingLocker.lockBegin(nullptr, collectionId, MODE_IS));
// Assert the held locks show up in the output of getLockerInfo().
Locker::LockerInfo lockerInfo;
diff --git a/src/mongo/db/concurrency/lock_stats_test.cpp b/src/mongo/db/concurrency/lock_stats_test.cpp
index 1d064a23fcb..53237a5e0f0 100644
--- a/src/mongo/db/concurrency/lock_stats_test.cpp
+++ b/src/mongo/db/concurrency/lock_stats_test.cpp
@@ -63,7 +63,7 @@ TEST(LockStats, Wait) {
{
// This will block
LockerForTests lockerConflict(MODE_IX);
- ASSERT_EQUALS(LOCK_WAITING, lockerConflict.lockBegin(resId, MODE_S));
+ ASSERT_EQUALS(LOCK_WAITING, lockerConflict.lockBegin(nullptr, resId, MODE_S));
// Sleep 1 millisecond so the wait time passes
ASSERT_EQUALS(
diff --git a/src/mongo/db/ops/write_ops_exec.cpp b/src/mongo/db/ops/write_ops_exec.cpp
index 7152285fc0f..bfbeb06bc94 100644
--- a/src/mongo/db/ops/write_ops_exec.cpp
+++ b/src/mongo/db/ops/write_ops_exec.cpp
@@ -85,6 +85,7 @@ namespace {
MONGO_FP_DECLARE(failAllInserts);
MONGO_FP_DECLARE(failAllUpdates);
MONGO_FP_DECLARE(failAllRemoves);
+MONGO_FP_DECLARE(hangDuringBatchInsert);
void updateRetryStats(OperationContext* opCtx, bool containsRetry) {
if (containsRetry) {
@@ -368,7 +369,9 @@ bool insertBatchAndHandleErrors(OperationContext* opCtx,
boost::optional<AutoGetCollection> collection;
auto acquireCollection = [&] {
while (true) {
- opCtx->checkForInterrupt();
+ if (MONGO_FAIL_POINT(hangDuringBatchInsert)) {
+ MONGO_FAIL_POINT_PAUSE_WHILE_SET(hangDuringBatchInsert);
+ }
if (MONGO_FAIL_POINT(failAllInserts)) {
uasserted(ErrorCodes::InternalError, "failAllInserts failpoint active!");
@@ -606,7 +609,6 @@ static SingleWriteResult performSingleUpdateOp(OperationContext* opCtx,
boost::optional<AutoGetCollection> collection;
while (true) {
- opCtx->checkForInterrupt();
if (MONGO_FAIL_POINT(failAllUpdates)) {
uasserted(ErrorCodes::InternalError, "failAllUpdates failpoint active!");
}
@@ -762,8 +764,6 @@ static SingleWriteResult performSingleDeleteOp(OperationContext* opCtx,
ParsedDelete parsedDelete(opCtx, &request);
uassertStatusOK(parsedDelete.parseRequest());
- opCtx->checkForInterrupt();
-
if (MONGO_FAIL_POINT(failAllRemoves)) {
uasserted(ErrorCodes::InternalError, "failAllRemoves failpoint active!");
}
diff --git a/src/mongo/db/repair_database.cpp b/src/mongo/db/repair_database.cpp
index 2ff3eb85056..93773d7fdc4 100644
--- a/src/mongo/db/repair_database.cpp
+++ b/src/mongo/db/repair_database.cpp
@@ -269,6 +269,9 @@ Status repairDatabase(OperationContext* opCtx,
dbHolder().close(opCtx, dbName, "database closed for repair");
ON_BLOCK_EXIT([&dbName, &opCtx] {
try {
+ // Ensure that we don't trigger an exception when attempting to take locks.
+ UninterruptibleLockGuard noInterrupt(opCtx->lockState());
+
// Open the db after everything finishes.
auto db = dbHolder().openDb(opCtx, dbName);
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
index 220e2658a35..6eea5422c0a 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
@@ -538,6 +538,9 @@ void MigrationChunkClonerSourceLegacy::_cleanup(OperationContext* opCtx) {
}
if (_deleteNotifyExec) {
+ // Don't allow an Interrupt exception to prevent _deleteNotifyExec from getting cleaned up.
+ UninterruptibleLockGuard noInterrupt(opCtx->lockState());
+
AutoGetCollection autoColl(opCtx, _args.getNss(), MODE_IS);
const auto cursorManager =
autoColl.getCollection() ? autoColl.getCollection()->getCursorManager() : nullptr;
diff --git a/src/mongo/db/service_entry_point_common.cpp b/src/mongo/db/service_entry_point_common.cpp
index 43ab916f157..02431e9e396 100644
--- a/src/mongo/db/service_entry_point_common.cpp
+++ b/src/mongo/db/service_entry_point_common.cpp
@@ -1117,6 +1117,9 @@ DbResponse receivedGetMore(OperationContext* opCtx,
getMore(opCtx, ns, ntoreturn, cursorid, &exhaust, &isCursorAuthorized);
} catch (AssertionException& e) {
if (isCursorAuthorized) {
+ // Make sure that killCursorGlobal does not throw an exception if it is interrupted.
+ UninterruptibleLockGuard noInterrupt(opCtx->lockState());
+
// If a cursor with id 'cursorid' was authorized, it may have been advanced
// before an exception terminated processGetMore. Erase the ClientCursor
// because it may now be out of sync with the client's iteration state.