summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIan Boros <ian.boros@10gen.com>2018-02-26 14:11:30 -0500
committerIan Boros <ian.boros@10gen.com>2018-02-26 14:11:50 -0500
commitd63f1a99ae31f71f37c53613cc30f751658e8453 (patch)
tree8770a9971365810846cca0eb7f1d3a8be9c74707
parent3d66f610c72c390f6adb654844e13ff42649ec26 (diff)
downloadmongo-d63f1a99ae31f71f37c53613cc30f751658e8453.tar.gz
SERVER-32957 part 2: remove killPending flag and reapZombieCursors()
-rw-r--r--src/mongo/s/query/cluster_cursor_cleanup_job.cpp4
-rw-r--r--src/mongo/s/query/cluster_cursor_manager.cpp183
-rw-r--r--src/mongo/s/query/cluster_cursor_manager.h84
-rw-r--r--src/mongo/s/query/cluster_cursor_manager_test.cpp112
4 files changed, 182 insertions, 201 deletions
diff --git a/src/mongo/s/query/cluster_cursor_cleanup_job.cpp b/src/mongo/s/query/cluster_cursor_cleanup_job.cpp
index 963f306b2b2..d1f0438e655 100644
--- a/src/mongo/s/query/cluster_cursor_cleanup_job.cpp
+++ b/src/mongo/s/query/cluster_cursor_cleanup_job.cpp
@@ -62,8 +62,8 @@ void ClusterCursorCleanupJob::run() {
Date_t cutoff = (cursorTimeoutValue > 0)
? (Date_t::now() - Milliseconds(cursorTimeoutValue))
: Date_t::now();
- manager->killMortalCursorsInactiveSince(cutoff);
- manager->incrementCursorsTimedOut(manager->reapZombieCursors(opCtx.get()));
+ manager->incrementCursorsTimedOut(
+ manager->killMortalCursorsInactiveSince(opCtx.get(), cutoff));
MONGO_IDLE_THREAD_BLOCK;
sleepsecs(getClientCursorMonitorFrequencySecs());
diff --git a/src/mongo/s/query/cluster_cursor_manager.cpp b/src/mongo/s/query/cluster_cursor_manager.cpp
index 6715964daa5..8d853eb3e57 100644
--- a/src/mongo/s/query/cluster_cursor_manager.cpp
+++ b/src/mongo/s/query/cluster_cursor_manager.cpp
@@ -189,8 +189,7 @@ void ClusterCursorManager::shutdown(OperationContext* opCtx) {
stdx::lock_guard<stdx::mutex> lk(_mutex);
_inShutdown = true;
}
- killAllCursors();
- reapZombieCursors(opCtx);
+ killAllCursors(opCtx);
}
StatusWith<CursorId> ClusterCursorManager::registerCursor(
@@ -271,9 +270,6 @@ StatusWith<ClusterCursorManager::PinnedCursor> ClusterCursorManager::checkOutCur
if (!entry) {
return cursorNotFoundStatus(nss, cursorId);
}
- if (entry->getKillPending()) {
- return cursorNotFoundStatus(nss, cursorId);
- }
// Check if the user is coauthorized to access this cursor.
auto authCheckStatus = authChecker(entry->getAuthenticatedUsers());
@@ -327,10 +323,8 @@ void ClusterCursorManager::checkInCursor(std::unique_ptr<ClusterClientCursor> cu
CursorEntry* entry = _getEntry(lk, nss, cursorId);
invariant(entry);
- // killPending will be true if killCursor() was called while the cursor was in use or if the
- // ClusterCursorCleanupJob decided that it expired.
- // TODO SERVER-32957: Remove the killPending flag.
- const bool killPending = entry->getKillPending();
+ // killPending will be true if killCursor() was called while the cursor was in use.
+ const bool killPending = entry->isKillPending();
entry->setLastActive(now);
entry->returnCursor(std::move(cursor));
@@ -361,6 +355,17 @@ Status ClusterCursorManager::checkAuthForKillCursors(OperationContext* opCtx,
return authChecker(entry->getAuthenticatedUsers());
}
+void ClusterCursorManager::killOperationUsingCursor(WithLock, CursorEntry* entry) {
+ invariant(entry->getOperationUsingCursor());
+ // Interrupt any operation currently using the cursor.
+ OperationContext* opUsingCursor = entry->getOperationUsingCursor();
+ stdx::lock_guard<Client> lk(*opUsingCursor->getClient());
+ opUsingCursor->getServiceContext()->killOperation(opUsingCursor, ErrorCodes::CursorKilled);
+
+ // Don't delete the cursor, as an operation is using it. It will be cleaned up when the
+ // operation is done.
+}
+
Status ClusterCursorManager::killCursor(OperationContext* opCtx,
const NamespaceString& nss,
CursorId cursorId) {
@@ -375,15 +380,10 @@ Status ClusterCursorManager::killCursor(OperationContext* opCtx,
// Interrupt any operation currently using the cursor, unless if it's the current operation.
OperationContext* opUsingCursor = entry->getOperationUsingCursor();
- entry->setKillPending();
if (opUsingCursor) {
// The caller shouldn't need to call killCursor on their own cursor.
invariant(opUsingCursor != opCtx, "Cannot call killCursor() on your own cursor");
-
- stdx::lock_guard<Client> lk(*opUsingCursor->getClient());
- opUsingCursor->getServiceContext()->killOperation(opUsingCursor, ErrorCodes::CursorKilled);
- // Don't delete the cursor, as an operation is using it. It will be cleaned up when the
- // operation is done.
+ killOperationUsingCursor(lk, entry);
return Status::OK();
}
@@ -408,83 +408,85 @@ void ClusterCursorManager::detachAndKillCursor(stdx::unique_lock<stdx::mutex> lk
detachedCursor.getValue().reset();
}
-void ClusterCursorManager::killMortalCursorsInactiveSince(Date_t cutoff) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+std::size_t ClusterCursorManager::killMortalCursorsInactiveSince(OperationContext* opCtx,
+ Date_t cutoff) {
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
- for (auto& nsContainerPair : _namespaceToContainerMap) {
- for (auto& cursorIdEntryPair : nsContainerPair.second.entryMap) {
- CursorEntry& entry = cursorIdEntryPair.second;
- if (entry.getLifetimeType() == CursorLifetime::Mortal &&
- !entry.getOperationUsingCursor() && entry.getLastActive() <= cutoff) {
- entry.setInactive();
- log() << "Marking cursor id " << cursorIdEntryPair.first
- << " for deletion, idle since " << entry.getLastActive().toString();
- entry.setKillPending();
- }
+ auto pred = [cutoff](CursorId cursorId, const CursorEntry& entry) -> bool {
+ bool res = entry.getLifetimeType() == CursorLifetime::Mortal &&
+ !entry.getOperationUsingCursor() && entry.getLastActive() <= cutoff;
+
+ if (res) {
+ log() << "Marking cursor id " << cursorId << " for deletion, idle since "
+ << entry.getLastActive().toString();
}
- }
-}
-void ClusterCursorManager::killAllCursors() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ return res;
+ };
- for (auto& nsContainerPair : _namespaceToContainerMap) {
- for (auto& cursorIdEntryPair : nsContainerPair.second.entryMap) {
- cursorIdEntryPair.second.setKillPending();
- }
- }
+ return killCursorsSatisfying(std::move(lk), opCtx, std::move(pred));
}
-std::size_t ClusterCursorManager::reapZombieCursors(OperationContext* opCtx) {
- struct CursorDescriptor {
- CursorDescriptor(NamespaceString ns, CursorId cursorId, bool isInactive)
- : ns(std::move(ns)), cursorId(cursorId), isInactive(isInactive) {}
+void ClusterCursorManager::killAllCursors(OperationContext* opCtx) {
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
+ auto pred = [](CursorId, const CursorEntry&) -> bool { return true; };
- NamespaceString ns;
- CursorId cursorId;
- bool isInactive;
- };
+ killCursorsSatisfying(std::move(lk), opCtx, std::move(pred));
+}
- // List all zombie cursors under the manager lock, and kill them one-by-one while not holding
- // the lock (ClusterClientCursor::kill() is blocking, so we don't want to hold a lock while
- // issuing the kill).
+std::size_t ClusterCursorManager::killCursorsSatisfying(
+ stdx::unique_lock<stdx::mutex> lk,
+ OperationContext* opCtx,
+ std::function<bool(CursorId, const CursorEntry&)> pred) {
+ invariant(opCtx);
+ invariant(lk.owns_lock());
+ std::size_t nKilled = 0;
+
+ std::vector<std::unique_ptr<ClusterClientCursor>> cursorsToDestroy;
+ auto nsContainerIt = _namespaceToContainerMap.begin();
+ while (nsContainerIt != _namespaceToContainerMap.end()) {
+ auto&& entryMap = nsContainerIt->second.entryMap;
+ auto cursorIdEntryIt = entryMap.begin();
+ while (cursorIdEntryIt != entryMap.end()) {
+ auto cursorId = cursorIdEntryIt->first;
+ auto& entry = cursorIdEntryIt->second;
+
+ if (!pred(cursorId, entry)) {
+ ++cursorIdEntryIt;
+ continue;
+ }
- stdx::unique_lock<stdx::mutex> lk(_mutex);
- std::vector<CursorDescriptor> zombieCursorDescriptors;
- for (auto& nsContainerPair : _namespaceToContainerMap) {
- const NamespaceString& nss = nsContainerPair.first;
- for (auto& cursorIdEntryPair : nsContainerPair.second.entryMap) {
- CursorId cursorId = cursorIdEntryPair.first;
- const CursorEntry& entry = cursorIdEntryPair.second;
- if (!entry.getKillPending()) {
+ ++nKilled;
+
+ if (entry.getOperationUsingCursor()) {
+ // Mark the OperationContext using the cursor as killed, and move on.
+ killOperationUsingCursor(lk, &entry);
+ ++cursorIdEntryIt;
continue;
}
- zombieCursorDescriptors.emplace_back(nss, cursorId, entry.isInactive());
- }
- }
- std::size_t cursorsTimedOut = 0;
+ cursorsToDestroy.push_back(entry.releaseCursor(nullptr));
- for (auto& cursorDescriptor : zombieCursorDescriptors) {
- StatusWith<std::unique_ptr<ClusterClientCursor>> zombieCursor =
- _detachCursor(lk, cursorDescriptor.ns, cursorDescriptor.cursorId);
- if (!zombieCursor.isOK()) {
- // Cursor in use, or has already been deleted.
- continue;
+ // Destroy the entry and set the iterator to the next element.
+ cursorIdEntryIt = entryMap.erase(cursorIdEntryIt);
}
- lk.unlock();
- // Pass opCtx to kill(), since a cursor which wraps an underlying aggregation pipeline is
- // obliged to call Pipeline::dispose with a valid OperationContext prior to deletion.
- zombieCursor.getValue()->kill(opCtx);
- zombieCursor.getValue().reset();
- lk.lock();
-
- if (cursorDescriptor.isInactive) {
- ++cursorsTimedOut;
+ if (entryMap.empty()) {
+ nsContainerIt = eraseContainer(nsContainerIt);
+ } else {
+ ++nsContainerIt;
}
}
- return cursorsTimedOut;
+
+ // Call kill() outside of the lock, as it may require waiting for callbacks to finish.
+ lk.unlock();
+
+ for (auto&& cursor : cursorsToDestroy) {
+ invariant(cursor.get());
+ cursor->kill(opCtx);
+ }
+
+ return nKilled;
}
ClusterCursorManager::Stats ClusterCursorManager::stats() const {
@@ -496,7 +498,7 @@ ClusterCursorManager::Stats ClusterCursorManager::stats() const {
for (auto& cursorIdEntryPair : nsContainerPair.second.entryMap) {
const CursorEntry& entry = cursorIdEntryPair.second;
- if (entry.getKillPending()) {
+ if (entry.isKillPending()) {
// Killed cursors do not count towards the number of pinned cursors or the number of
// open cursors.
continue;
@@ -527,7 +529,7 @@ void ClusterCursorManager::appendActiveSessions(LogicalSessionIdSet* lsids) cons
for (const auto& cursorIdEntryPair : nsContainerPair.second.entryMap) {
const CursorEntry& entry = cursorIdEntryPair.second;
- if (entry.getKillPending()) {
+ if (entry.isKillPending()) {
// Don't include sessions for killed cursors.
continue;
}
@@ -549,7 +551,7 @@ std::vector<GenericCursor> ClusterCursorManager::getAllCursors() const {
for (const auto& cursorIdEntryPair : nsContainerPair.second.entryMap) {
const CursorEntry& entry = cursorIdEntryPair.second;
- if (entry.getKillPending()) {
+ if (entry.isKillPending()) {
// Don't include sessions for killed cursors.
continue;
}
@@ -586,7 +588,7 @@ stdx::unordered_set<CursorId> ClusterCursorManager::getCursorsForSession(
for (auto&& cursorIdEntryPair : nsContainerPair.second.entryMap) {
const CursorEntry& entry = cursorIdEntryPair.second;
- if (entry.getKillPending()) {
+ if (entry.isKillPending()) {
// Don't include sessions for killed cursors.
continue;
}
@@ -628,6 +630,21 @@ auto ClusterCursorManager::_getEntry(WithLock, NamespaceString const& nss, Curso
return &entryMapIt->second;
}
+auto ClusterCursorManager::eraseContainer(NssToCursorContainerMap::iterator it)
+ -> NssToCursorContainerMap::iterator {
+ auto&& container = it->second;
+ auto&& entryMap = container.entryMap;
+ invariant(entryMap.empty());
+
+ // This was the last cursor remaining in the given namespace. Erase all state associated
+ // with this namespace.
+ size_t numDeleted = _cursorIdPrefixToNamespaceMap.erase(container.containerPrefix);
+ invariant(numDeleted == 1);
+ it = _namespaceToContainerMap.erase(it);
+ invariant(_namespaceToContainerMap.size() == _cursorIdPrefixToNamespaceMap.size());
+ return it;
+}
+
StatusWith<std::unique_ptr<ClusterClientCursor>> ClusterCursorManager::_detachCursor(
WithLock lk, NamespaceString const& nss, CursorId cursorId) {
@@ -650,13 +667,7 @@ StatusWith<std::unique_ptr<ClusterClientCursor>> ClusterCursorManager::_detachCu
size_t eraseResult = entryMap.erase(cursorId);
invariant(1 == eraseResult);
if (entryMap.empty()) {
- // This was the last cursor remaining in the given namespace. Erase all state associated
- // with this namespace.
- size_t numDeleted =
- _cursorIdPrefixToNamespaceMap.erase(nsToContainerIt->second.containerPrefix);
- invariant(numDeleted == 1);
- _namespaceToContainerMap.erase(nsToContainerIt);
- invariant(_namespaceToContainerMap.size() == _cursorIdPrefixToNamespaceMap.size());
+ eraseContainer(nsToContainerIt);
}
return std::move(cursor);
diff --git a/src/mongo/s/query/cluster_cursor_manager.h b/src/mongo/s/query/cluster_cursor_manager.h
index 4f172cb9715..bb7bc857130 100644
--- a/src/mongo/s/query/cluster_cursor_manager.h
+++ b/src/mongo/s/query/cluster_cursor_manager.h
@@ -63,8 +63,7 @@ class StatusWith;
* the manager by calling PinnedCursor::returnCursor().
*
* The manager supports killing of registered cursors, either through the PinnedCursor object or
- * with the kill*() suite of methods. These simply mark the affected cursors as 'kill pending',
- * which can be cleaned up by later calls to the reapZombieCursors() method.
+ * with the kill*() suite of methods.
*
* No public methods throw exceptions, and all public methods are thread-safe.
*
@@ -338,7 +337,7 @@ public:
* A thread which is currently using a cursor may not call killCursor() on it, but rather
* should kill the cursor by checking it back into the manager in the exhausted state.
*
- * Does not block.
+ * May block waiting for other threads to finish, but does not block on the network.
*/
Status killCursor(OperationContext* opCtx, const NamespaceString& nss, CursorId cursorId);
@@ -346,30 +345,21 @@ public:
* Informs the manager that all mortal cursors with a 'last active' time equal to or earlier
* than 'cutoff' should be killed. The cursors need not necessarily be in the 'idle' state.
*
- * Does not block.
- */
- void killMortalCursorsInactiveSince(Date_t cutoff);
-
- /**
- * Informs the manager that all currently-registered cursors should be killed (regardless of
- * pinned status or lifetime type).
+ * May block waiting for other threads to finish, but does not block on the network.
*
- * Does not block.
+ * Returns the number of cursors that were killed due to inactivity.
*/
- void killAllCursors();
+ std::size_t killMortalCursorsInactiveSince(OperationContext* opCtx, Date_t cutoff);
/**
- * Attempts to performs a blocking kill and deletion of all non-pinned cursors that are marked
- * as 'kill pending'. Returns the number of cursors that were marked as inactive.
+ * Kills all cursors which are registered at the time of the call. If a cursor is registered
+ * while this function is running, it may not be killed. If the caller wants to guarantee that
+ * all cursors are killed, shutdown() should be used instead.
*
- * If no other non-const methods are called simultaneously, it is guaranteed that this method
- * will delete all non-pinned cursors marked as 'kill pending'. Otherwise, no such guarantee is
- * made (this is due to the fact that the blocking kill for each cursor is performed outside of
- * the cursor manager lock).
- *
- * Can block.
+ * May block waiting for other threads to finish, but does not block on the network.
*/
- std::size_t reapZombieCursors(OperationContext* opCtx);
+ void killAllCursors(OperationContext* opCtx);
+
/**
* Returns the number of open cursors on a ClusterCursorManager, broken down by type.
@@ -419,16 +409,16 @@ public:
private:
class CursorEntry;
+ struct CursorEntryContainer;
using CursorEntryMap = stdx::unordered_map<CursorId, CursorEntry>;
+ using NssToCursorContainerMap =
+ stdx::unordered_map<NamespaceString, CursorEntryContainer, NamespaceString::Hasher>;
/**
* Transfers ownership of the given pinned cursor back to the manager, and moves the cursor to
* the 'idle' state.
*
- * If 'cursorState' is 'Exhausted', the cursor will be destroyed. However, destruction will be
- * delayed until reapZombieCursors() is called under the following circumstances:
- * - The cursor is already marked as 'kill pending'.
- * - The cursor is managing open remote cursors which still need to be cleaned up.
+ * If 'cursorState' is 'Exhausted', the cursor will be destroyed.
*
* Thread-safe.
*
@@ -470,6 +460,20 @@ private:
CursorId cursorId);
/**
+ * Flags the OperationContext that's using the given cursor as interrupted.
+ */
+ void killOperationUsingCursor(WithLock, CursorEntry* entry);
+
+ /**
+ * Kill the cursors satisfying the given predicate. Assumes that 'lk' is held upon entry.
+ *
+ * Returns the number of cursors killed.
+ */
+ std::size_t killCursorsSatisfying(stdx::unique_lock<stdx::mutex> lk,
+ OperationContext* opCtx,
+ std::function<bool(CursorId, const CursorEntry&)> pred);
+
+ /**
* CursorEntry is a moveable, non-copyable container for a single cursor.
*/
class CursorEntry {
@@ -496,12 +500,11 @@ private:
CursorEntry(CursorEntry&& other) = default;
CursorEntry& operator=(CursorEntry&& other) = default;
- bool getKillPending() const {
- return _killPending;
- }
-
- bool isInactive() const {
- return _isInactive;
+ bool isKillPending() const {
+ // A cursor is kill pending if it's checked out by an OperationContext that was
+ // interrupted.
+ return _operationUsingCursor &&
+ !_operationUsingCursor->checkForInterruptNoAssert().isOK();
}
CursorType getCursorType() const {
@@ -550,14 +553,6 @@ private:
_operationUsingCursor = nullptr;
}
- void setKillPending() {
- _killPending = true;
- }
-
- void setInactive() {
- _isInactive = true;
- }
-
void setLastActive(Date_t lastActive) {
_lastActive = lastActive;
}
@@ -568,8 +563,6 @@ private:
private:
std::unique_ptr<ClusterClientCursor> _cursor;
- bool _killPending = false;
- bool _isInactive = false;
CursorType _cursorType = CursorType::SingleTarget;
CursorLifetime _cursorLifetime = CursorLifetime::Mortal;
Date_t _lastActive;
@@ -601,6 +594,12 @@ private:
CursorEntryMap entryMap;
};
+ /**
+ * Erase the container that 'it' points to and return an iterator to the next one. Assumes 'it'
+ * is an iterator in '_namespaceToContainerMap'.
+ */
+ NssToCursorContainerMap::iterator eraseContainer(NssToCursorContainerMap::iterator it);
+
// Clock source. Used when the 'last active' time for a cursor needs to be set/updated. May be
// concurrently accessed by multiple threads.
ClockSource* _clockSource;
@@ -629,8 +628,7 @@ private:
//
// Entries are added when the first cursor on the given namespace is registered, and removed
// when the last cursor on the given namespace is destroyed.
- stdx::unordered_map<NamespaceString, CursorEntryContainer, NamespaceString::Hasher>
- _namespaceToContainerMap;
+ NssToCursorContainerMap _namespaceToContainerMap;
size_t _cursorsTimedOut = 0;
};
diff --git a/src/mongo/s/query/cluster_cursor_manager_test.cpp b/src/mongo/s/query/cluster_cursor_manager_test.cpp
index d9cc6bcd8e3..b9ea435cc99 100644
--- a/src/mongo/s/query/cluster_cursor_manager_test.cpp
+++ b/src/mongo/s/query/cluster_cursor_manager_test.cpp
@@ -138,8 +138,7 @@ private:
}
void tearDown() final {
- _manager.killAllCursors();
- _manager.reapZombieCursors(nullptr);
+ _manager.shutdown(_opCtx.get());
if (_opCtx) {
_opCtx.reset();
@@ -338,9 +337,7 @@ TEST_F(ClusterCursorManagerTest, CheckOutCursorUpdateActiveTime) {
getManager()->checkOutCursor(nss, cursorId, _opCtx.get(), successAuthChecker);
ASSERT_OK(checkedOutCursor.getStatus());
checkedOutCursor.getValue().returnCursor(ClusterCursorManager::CursorState::NotExhausted);
- getManager()->killMortalCursorsInactiveSince(cursorRegistrationTime);
- ASSERT(!isMockCursorKilled(0));
- getManager()->reapZombieCursors(nullptr);
+ getManager()->killMortalCursorsInactiveSince(_opCtx.get(), cursorRegistrationTime);
ASSERT(!isMockCursorKilled(0));
}
@@ -374,9 +371,7 @@ TEST_F(ClusterCursorManagerTest, ReturnCursorUpdateActiveTime) {
ASSERT_OK(checkedOutCursor.getStatus());
getClockSource()->advance(Milliseconds(1));
checkedOutCursor.getValue().returnCursor(ClusterCursorManager::CursorState::NotExhausted);
- getManager()->killMortalCursorsInactiveSince(cursorCheckOutTime);
- ASSERT(!isMockCursorKilled(0));
- getManager()->reapZombieCursors(nullptr);
+ getManager()->killMortalCursorsInactiveSince(_opCtx.get(), cursorCheckOutTime);
ASSERT(!isMockCursorKilled(0));
}
@@ -481,9 +476,7 @@ TEST_F(ClusterCursorManagerTest, KillMortalCursorsInactiveSinceBasic) {
ClusterCursorManager::CursorType::SingleTarget,
ClusterCursorManager::CursorLifetime::Mortal,
UserNameIterator()));
- getManager()->killMortalCursorsInactiveSince(getClockSource()->now());
- ASSERT(!isMockCursorKilled(0));
- getManager()->reapZombieCursors(nullptr);
+ getManager()->killMortalCursorsInactiveSince(_opCtx.get(), getClockSource()->now());
ASSERT(isMockCursorKilled(0));
}
@@ -497,9 +490,7 @@ TEST_F(ClusterCursorManagerTest, KillMortalCursorsInactiveSinceSkipUnexpired) {
ClusterCursorManager::CursorType::SingleTarget,
ClusterCursorManager::CursorLifetime::Mortal,
UserNameIterator()));
- getManager()->killMortalCursorsInactiveSince(timeBeforeCursorCreation);
- ASSERT(!isMockCursorKilled(0));
- getManager()->reapZombieCursors(nullptr);
+ getManager()->killMortalCursorsInactiveSince(_opCtx.get(), timeBeforeCursorCreation);
ASSERT(!isMockCursorKilled(0));
}
@@ -511,9 +502,7 @@ TEST_F(ClusterCursorManagerTest, KillMortalCursorsInactiveSinceSkipImmortal) {
ClusterCursorManager::CursorType::SingleTarget,
ClusterCursorManager::CursorLifetime::Immortal,
UserNameIterator()));
- getManager()->killMortalCursorsInactiveSince(getClockSource()->now());
- ASSERT(!isMockCursorKilled(0));
- getManager()->reapZombieCursors(nullptr);
+ getManager()->killMortalCursorsInactiveSince(_opCtx.get(), getClockSource()->now());
ASSERT(!isMockCursorKilled(0));
}
@@ -529,14 +518,10 @@ TEST_F(ClusterCursorManagerTest, ShouldNotKillPinnedCursors) {
UserNameIterator()));
auto pin =
assertGet(getManager()->checkOutCursor(nss, cursorId, _opCtx.get(), successAuthChecker));
- getManager()->killMortalCursorsInactiveSince(getClockSource()->now());
- ASSERT(!isMockCursorKilled(0));
- getManager()->reapZombieCursors(nullptr);
+ getManager()->killMortalCursorsInactiveSince(_opCtx.get(), getClockSource()->now());
ASSERT(!isMockCursorKilled(0));
pin.returnCursor(ClusterCursorManager::CursorState::NotExhausted);
- getManager()->killMortalCursorsInactiveSince(getClockSource()->now());
- ASSERT(!isMockCursorKilled(0));
- getManager()->reapZombieCursors(nullptr);
+ getManager()->killMortalCursorsInactiveSince(_opCtx.get(), getClockSource()->now());
ASSERT(isMockCursorKilled(0));
}
@@ -558,11 +543,7 @@ TEST_F(ClusterCursorManagerTest, KillMortalCursorsInactiveSinceMultipleCursors)
UserNameIterator()));
getClockSource()->advance(Milliseconds(1));
}
- getManager()->killMortalCursorsInactiveSince(cutoff);
- for (size_t i = 0; i < numCursors; ++i) {
- ASSERT(!isMockCursorKilled(i));
- }
- getManager()->reapZombieCursors(nullptr);
+ getManager()->killMortalCursorsInactiveSince(_opCtx.get(), cutoff);
for (size_t i = 0; i < numCursors; ++i) {
if (i < numKilledCursorsExpected) {
ASSERT(isMockCursorKilled(i));
@@ -583,47 +564,12 @@ TEST_F(ClusterCursorManagerTest, KillAllCursors) {
ClusterCursorManager::CursorLifetime::Mortal,
UserNameIterator()));
}
- getManager()->killAllCursors();
- for (size_t i = 0; i < numCursors; ++i) {
- ASSERT(!isMockCursorKilled(i));
- }
- getManager()->reapZombieCursors(nullptr);
+ getManager()->killAllCursors(_opCtx.get());
for (size_t i = 0; i < numCursors; ++i) {
ASSERT(isMockCursorKilled(i));
}
}
-// Test that reaping does not call kill() on the underlying ClusterClientCursor for a killed cursor
-// that is still pinned.
-TEST_F(ClusterCursorManagerTest, ReapZombieCursorsSkipPinned) {
- auto cursorId =
- assertGet(getManager()->registerCursor(_opCtx.get(),
- allocateMockCursor(),
- nss,
- ClusterCursorManager::CursorType::SingleTarget,
- ClusterCursorManager::CursorLifetime::Mortal,
- UserNameIterator()));
- auto pinnedCursor =
- getManager()->checkOutCursor(nss, cursorId, _opCtx.get(), successAuthChecker);
- ASSERT(!isMockCursorKilled(0));
- getManager()->reapZombieCursors(nullptr);
- ASSERT(!isMockCursorKilled(0));
-}
-
-// Test that reaping does not call kill() on the underlying ClusterClientCursor for cursors that
-// haven't been killed.
-TEST_F(ClusterCursorManagerTest, ReapZombieCursorsSkipNonZombies) {
- ASSERT_OK(getManager()->registerCursor(_opCtx.get(),
- allocateMockCursor(),
- nss,
- ClusterCursorManager::CursorType::SingleTarget,
- ClusterCursorManager::CursorLifetime::Mortal,
- UserNameIterator()));
- ASSERT(!isMockCursorKilled(0));
- getManager()->reapZombieCursors(nullptr);
- ASSERT(!isMockCursorKilled(0));
-}
-
// Test that a new ClusterCursorManager's stats() is initially zero for the cursor counts.
TEST_F(ClusterCursorManagerTest, StatsInitAsZero) {
ASSERT_EQ(0U, getManager()->stats().cursorsMultiTarget);
@@ -1018,8 +964,9 @@ TEST_F(ClusterCursorManagerTest, RemotesExhausted) {
ASSERT_FALSE(pinnedCursor.getValue().remotesExhausted());
}
-// Test that killed cursors which are still pinned are not reaped.
-TEST_F(ClusterCursorManagerTest, DoNotReapKilledPinnedCursors) {
+// Test that killed cursors which are still pinned are not destroyed immediately.
+TEST_F(ClusterCursorManagerTest, DoNotDestroyKilledPinnedCursors) {
+ const Date_t cutoff = getClockSource()->now();
auto cursorId =
assertGet(getManager()->registerCursor(_opCtx.get(),
allocateMockCursor(),
@@ -1036,8 +983,11 @@ TEST_F(ClusterCursorManagerTest, DoNotReapKilledPinnedCursors) {
ASSERT_EQ(_opCtx->checkForInterruptNoAssert(), ErrorCodes::CursorKilled);
ASSERT(!isMockCursorKilled(0));
- // Pinned cursor should remain alive after reaping.
- getManager()->reapZombieCursors(nullptr);
+ // The cursor cleanup system should not destroy the cursor either.
+ getManager()->killMortalCursorsInactiveSince(_opCtx.get(), cutoff);
+
+ // The cursor's operation context should be marked as interrupted, but the cursor itself should
+ // not have been destroyed.
ASSERT(!isMockCursorKilled(0));
// The cursor can be destroyed once it is returned to the manager.
@@ -1054,7 +1004,7 @@ TEST_F(ClusterCursorManagerTest, CannotRegisterCursorDuringShutdown) {
UserNameIterator()));
ASSERT(!isMockCursorKilled(0));
- getManager()->shutdown(nullptr);
+ getManager()->shutdown(_opCtx.get());
ASSERT(isMockCursorKilled(0));
@@ -1067,6 +1017,28 @@ TEST_F(ClusterCursorManagerTest, CannotRegisterCursorDuringShutdown) {
UserNameIterator()));
}
+TEST_F(ClusterCursorManagerTest, PinnedCursorNotKilledOnShutdown) {
+ auto cursorId =
+ assertGet(getManager()->registerCursor(_opCtx.get(),
+ allocateMockCursor(),
+ nss,
+ ClusterCursorManager::CursorType::SingleTarget,
+ ClusterCursorManager::CursorLifetime::Mortal,
+ UserNameIterator()));
+
+ auto pinnedCursor =
+ getManager()->checkOutCursor(nss, cursorId, _opCtx.get(), successAuthChecker);
+ getManager()->shutdown(_opCtx.get());
+
+ ASSERT_EQ(_opCtx->checkForInterruptNoAssert(), ErrorCodes::CursorKilled);
+ ASSERT(!isMockCursorKilled(0));
+
+ // Even if it's checked back in as not exhausted, it should have been marked as killed when
+ // shutdown() was called.
+ pinnedCursor.getValue().returnCursor(ClusterCursorManager::CursorState::NotExhausted);
+ ASSERT(isMockCursorKilled(0));
+}
+
TEST_F(ClusterCursorManagerTest, CannotCheckoutCursorDuringShutdown) {
auto cursorId =
assertGet(getManager()->registerCursor(_opCtx.get(),
@@ -1077,7 +1049,7 @@ TEST_F(ClusterCursorManagerTest, CannotCheckoutCursorDuringShutdown) {
UserNameIterator()));
ASSERT(!isMockCursorKilled(0));
- getManager()->shutdown(nullptr);
+ getManager()->shutdown(_opCtx.get());
ASSERT(isMockCursorKilled(0));