summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--jstests/auth/getMore.js7
-rw-r--r--jstests/core/getmore_invalidated_cursors.js42
-rw-r--r--jstests/core/operation_latency_histogram.js10
-rw-r--r--jstests/core/profile_getmore.js1
-rw-r--r--jstests/core/restart_catalog.js2
-rw-r--r--jstests/core/tailable_cursor_invalidation.js6
-rw-r--r--jstests/noPassthrough/commands_handle_kill.js4
-rw-r--r--jstests/noPassthroughWithMongod/captrunc_cursor_invalidation.js37
-rw-r--r--src/mongo/db/auth/authorization_session_impl.cpp2
-rw-r--r--src/mongo/db/clientcursor.cpp1
-rw-r--r--src/mongo/db/clientcursor.h37
-rw-r--r--src/mongo/db/commands/find_cmd.cpp16
-rw-r--r--src/mongo/db/commands/getmore_cmd.cpp113
-rw-r--r--src/mongo/db/commands/killcursors_cmd.cpp18
-rw-r--r--src/mongo/db/commands/list_collections.cpp3
-rw-r--r--src/mongo/db/commands/list_indexes.cpp18
-rw-r--r--src/mongo/db/commands/repair_cursor.cpp5
-rw-r--r--src/mongo/db/commands/run_aggregate.cpp3
-rw-r--r--src/mongo/db/cursor_manager.cpp61
-rw-r--r--src/mongo/db/exec/requires_collection_stage.cpp10
-rw-r--r--src/mongo/db/namespace_string.cpp29
-rw-r--r--src/mongo/db/namespace_string.h26
-rw-r--r--src/mongo/db/namespace_string_test.cpp79
-rw-r--r--src/mongo/db/query/find.cpp428
-rw-r--r--src/mongo/db/repl/base_cloner_test_fixture.cpp2
-rw-r--r--src/mongo/db/repl/collection_cloner.cpp6
-rw-r--r--src/mongo/db/repl/collection_cloner_test.cpp94
-rw-r--r--src/mongo/db/repl/databases_cloner_test.cpp93
-rw-r--r--src/mongo/dbtests/cursor_manager_test.cpp88
-rw-r--r--src/mongo/dbtests/plan_executor_invalidation_test.cpp46
-rw-r--r--src/mongo/dbtests/querytests.cpp25
-rw-r--r--src/mongo/s/commands/commands_public.cpp7
32 files changed, 641 insertions, 678 deletions
diff --git a/jstests/auth/getMore.js b/jstests/auth/getMore.js
index e232b52bb35..49c60fcf6ca 100644
--- a/jstests/auth/getMore.js
+++ b/jstests/auth/getMore.js
@@ -91,10 +91,9 @@
cursorId = res.cursor.id;
testDB.logout();
assert.eq(1, testDB.auth("Mallory", "pwd"));
- assert.commandFailedWithCode(
- testDB.runCommand({getMore: cursorId, collection: "$cmd.listIndexes.foo"}),
- ErrorCodes.Unauthorized,
- "read from another user's listIndexes cursor");
+ assert.commandFailedWithCode(testDB.runCommand({getMore: cursorId, collection: "foo"}),
+ ErrorCodes.Unauthorized,
+ "read from another user's listIndexes cursor");
testDB.logout();
//
diff --git a/jstests/core/getmore_invalidated_cursors.js b/jstests/core/getmore_invalidated_cursors.js
index 788fb662cba..c244b071716 100644
--- a/jstests/core/getmore_invalidated_cursors.js
+++ b/jstests/core/getmore_invalidated_cursors.js
@@ -45,7 +45,7 @@
// The cursor will be invalidated on mongos, and we won't be able to find it.
assert.neq(-1, error.message.indexOf('didn\'t exist on server'), error.message);
} else {
- assert.eq(error.code, ErrorCodes.OperationFailed, tojson(error));
+ assert.eq(error.code, ErrorCodes.QueryPlanKilled, tojson(error));
assert.neq(-1, error.message.indexOf('collection dropped'), error.message);
}
@@ -56,29 +56,31 @@
cursor.next(); // Send the query to the server.
coll.drop();
-
error = assert.throws(() => cursor.itcount());
- if (isShardedCollection) {
- // The cursor will be invalidated on mongos, and we won't be able to find it.
- if (shellReadMode == 'legacy') {
- assert.neq(-1, error.message.indexOf('didn\'t exist on server'), error.message);
- } else {
- assert.eq(error.code, ErrorCodes.CursorNotFound, tojson(error));
- assert.neq(-1, error.message.indexOf('not found'), error.message);
- }
- } else {
- assert.eq(error.code, ErrorCodes.OperationFailed, tojson(error));
- assert.neq(-1, error.message.indexOf('collection dropped'), error.message);
- }
-
- // Test that dropping an index between a find and a getMore will return an appropriate error
- // code and message.
+ assert.eq(error.code, ErrorCodes.QueryPlanKilled, tojson(error));
+ // In replica sets, collection drops are done in two phases, first renaming the collection to a
+ // "drop pending" namespace, and then later reaping the collection. Therefore, we expect to
+ // either see an error message related to a collection drop, or one related to a collection
+ // rename.
+ const droppedMsg = 'collection dropped';
+ const renamedMsg = 'collection renamed';
+ assert(-1 !== error.message.indexOf(droppedMsg) || -1 !== error.message.indexOf(renamedMsg),
+ error.message);
+
+ // Test that dropping an index between a find and a getMore has no effect on the query if the
+ // query is not using the index.
setupCollection();
cursor = coll.find().batchSize(batchSize);
cursor.next(); // Send the query to the server.
-
assert.commandWorked(testDB.runCommand({dropIndexes: coll.getName(), index: {x: 1}}));
+ assert.eq(cursor.itcount(), nDocs - 1);
+ // Test that dropping the index being scanned by a cursor between a find and a getMore kills the
+ // query with the appropriate code and message.
+ setupCollection();
+ cursor = coll.find().hint({x: 1}).batchSize(batchSize);
+ cursor.next(); // Send the query to the server.
+ assert.commandWorked(testDB.runCommand({dropIndexes: coll.getName(), index: {x: 1}}));
error = assert.throws(() => cursor.itcount());
assert.eq(error.code, ErrorCodes.QueryPlanKilled, tojson(error));
assert.neq(-1, error.message.indexOf('index \'x_1\' dropped'), error.message);
@@ -111,8 +113,8 @@
// Ensure getMore fails with an appropriate error code and message.
error = assert.throws(() => cursor.itcount());
- assert.eq(error.code, ErrorCodes.OperationFailed, tojson(error));
- assert.neq(-1, error.message.indexOf('collection dropped'), error.message);
+ assert.eq(error.code, ErrorCodes.QueryPlanKilled, tojson(error));
+ assert.neq(-1, error.message.indexOf('collection renamed'), error.message);
}
}());
diff --git a/jstests/core/operation_latency_histogram.js b/jstests/core/operation_latency_histogram.js
index a8f0800b327..d3bce1305c9 100644
--- a/jstests/core/operation_latency_histogram.js
+++ b/jstests/core/operation_latency_histogram.js
@@ -81,15 +81,7 @@
for (var i = 0; i < numRecords - 1; i++) {
cursors[i].close();
}
- try {
- // Each close may result in two commands in latencyStats due to separate
- // pinning during auth check and execution.
- lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 2 * (numRecords - 1));
- } catch (e) {
- // Increment last reads to account for extra getstats call
- ++lastHistogram.reads.ops;
- lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, numRecords - 1);
- }
+ lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, numRecords - 1);
// Remove
for (var i = 0; i < numRecords; i++) {
diff --git a/jstests/core/profile_getmore.js b/jstests/core/profile_getmore.js
index 344800dc011..74c62f0176b 100644
--- a/jstests/core/profile_getmore.js
+++ b/jstests/core/profile_getmore.js
@@ -45,6 +45,7 @@
assert.eq(profileObj.originatingCommand.filter, {a: {$gt: 0}});
assert.eq(profileObj.originatingCommand.sort, {a: 1});
assert.eq(profileObj.planSummary, "IXSCAN { a: 1 }", tojson(profileObj));
+ assert(profileObj.hasOwnProperty("execStats"), tojson(profileObj));
assert(profileObj.execStats.hasOwnProperty("stage"), tojson(profileObj));
assert(profileObj.hasOwnProperty("responseLength"), tojson(profileObj));
assert(profileObj.hasOwnProperty("numYield"), tojson(profileObj));
diff --git a/jstests/core/restart_catalog.js b/jstests/core/restart_catalog.js
index 239c69f5e07..19bd0f9f27c 100644
--- a/jstests/core/restart_catalog.js
+++ b/jstests/core/restart_catalog.js
@@ -134,5 +134,5 @@
assert.commandFailedWithCode(
secondTestDB.runCommand(
{getMore: cursorResponse.cursor.id, collection: foodColl.getName()}),
- ErrorCodes.CursorNotFound);
+ ErrorCodes.QueryPlanKilled);
}());
diff --git a/jstests/core/tailable_cursor_invalidation.js b/jstests/core/tailable_cursor_invalidation.js
index 856dfc9c5c4..97ea96bb8d0 100644
--- a/jstests/core/tailable_cursor_invalidation.js
+++ b/jstests/core/tailable_cursor_invalidation.js
@@ -60,13 +60,13 @@
return findRes.cursor.id;
}
- // Test that a cursor cannot be found if a collection is dropped between a find and a getMore.
+ // Test that the cursor dies on getMore if the collection has been dropped.
let cursorId = openCursor({tailable: true, awaitData: false});
dropAndRecreateColl();
assert.commandFailedWithCode(db.runCommand({getMore: cursorId, collection: collName}),
- ErrorCodes.CursorNotFound);
+ ErrorCodes.QueryPlanKilled);
cursorId = openCursor({tailable: true, awaitData: true});
dropAndRecreateColl();
assert.commandFailedWithCode(db.runCommand({getMore: cursorId, collection: collName}),
- ErrorCodes.CursorNotFound);
+ ErrorCodes.QueryPlanKilled);
}());
diff --git a/jstests/noPassthrough/commands_handle_kill.js b/jstests/noPassthrough/commands_handle_kill.js
index 884f57a5d04..799cfc8cca1 100644
--- a/jstests/noPassthrough/commands_handle_kill.js
+++ b/jstests/noPassthrough/commands_handle_kill.js
@@ -87,8 +87,8 @@
// These are commands that will cause all running PlanExecutors to be invalidated, and the
// error messages that should be propagated when that happens.
const invalidatingCommands = [
- {command: {dropDatabase: 1}, message: 'Collection dropped'},
- {command: {drop: collName}, message: 'Collection dropped'},
+ {command: {dropDatabase: 1}, message: 'collection dropped'},
+ {command: {drop: collName}, message: 'collection dropped'},
];
if (options.usesIndex) {
diff --git a/jstests/noPassthroughWithMongod/captrunc_cursor_invalidation.js b/jstests/noPassthroughWithMongod/captrunc_cursor_invalidation.js
new file mode 100644
index 00000000000..3b1f7337133
--- /dev/null
+++ b/jstests/noPassthroughWithMongod/captrunc_cursor_invalidation.js
@@ -0,0 +1,37 @@
+// Test that when a capped collection is truncated, tailable cursors die on getMore with the error
+// code 'CappedPositionLost'.
+//
+// @tags: [requires_capped]
+(function() {
+ "use strict";
+
+ const coll = db.captrunc_cursor_invalidation;
+ coll.drop();
+
+ // Create a capped collection with four documents.
+ assert.commandWorked(db.createCollection(coll.getName(), {capped: true, size: 1024}));
+ const numDocs = 4;
+ const bulk = coll.initializeUnorderedBulkOp();
+ for (let i = 0; i < numDocs; ++i) {
+ bulk.insert({_id: i});
+ }
+ assert.commandWorked(bulk.execute());
+
+ // Open a tailable cursor against the capped collection.
+ const findRes = assert.commandWorked(db.runCommand({find: coll.getName(), tailable: true}));
+ assert.neq(findRes.cursor.id, 0);
+ assert.eq(findRes.cursor.ns, coll.getFullName());
+ assert.eq(findRes.cursor.firstBatch.length, 4);
+ const cursorId = findRes.cursor.id;
+
+ // Truncate the capped collection so that the cursor's position no longer exists.
+ assert.commandWorked(db.runCommand({captrunc: coll.getName(), n: 2}));
+
+ // A subsequent getMore should fail with 'CappedPositionLost'.
+ assert.commandFailedWithCode(db.runCommand({getMore: cursorId, collection: coll.getName()}),
+ ErrorCodes.CappedPositionLost);
+
+ // The cursor has now been destroyed, so another getMore should fail with 'CursorNotFound'.
+ assert.commandFailedWithCode(db.runCommand({getMore: cursorId, collection: coll.getName()}),
+ ErrorCodes.CursorNotFound);
+}());
diff --git a/src/mongo/db/auth/authorization_session_impl.cpp b/src/mongo/db/auth/authorization_session_impl.cpp
index a92ba21068b..52b1f041fef 100644
--- a/src/mongo/db/auth/authorization_session_impl.cpp
+++ b/src/mongo/db/auth/authorization_session_impl.cpp
@@ -414,8 +414,6 @@ Status AuthorizationSessionImpl::checkAuthForKillCursors(const NamespaceString&
ResourcePattern target;
if (ns.isListCollectionsCursorNS()) {
target = ResourcePattern::forDatabaseName(ns.db());
- } else if (ns.isListIndexesCursorNS()) {
- target = ResourcePattern::forExactNamespace(ns.getTargetNSForListIndexes());
} else {
target = ResourcePattern::forExactNamespace(ns);
}
diff --git a/src/mongo/db/clientcursor.cpp b/src/mongo/db/clientcursor.cpp
index fc7a1110052..ec988a1ce32 100644
--- a/src/mongo/db/clientcursor.cpp
+++ b/src/mongo/db/clientcursor.cpp
@@ -92,6 +92,7 @@ ClientCursor::ClientCursor(ClientCursorParams params,
_cursorManager(cursorManager),
_originatingCommand(params.originatingCommandObj),
_queryOptions(params.queryOptions),
+ _lockPolicy(params.lockPolicy),
_exec(std::move(params.exec)),
_operationUsingCursor(operationUsingCursor),
_lastUseDate(now),
diff --git a/src/mongo/db/clientcursor.h b/src/mongo/db/clientcursor.h
index 10fe242abac..8e17f0e0bec 100644
--- a/src/mongo/db/clientcursor.h
+++ b/src/mongo/db/clientcursor.h
@@ -55,18 +55,40 @@ class RecoveryUnit;
* using a CursorManager. See cursor_manager.h for more details.
*/
struct ClientCursorParams {
+ // Describes whether callers should acquire locks when using a ClientCursor. Not all cursors
+ // have the same locking behavior. In particular, find cursors require the caller to lock the
+ // collection in MODE_IS before calling methods on the underlying plan executor. Aggregate
+ // cursors, on the other hand, may access multiple collections and acquire their own locks on
+ // any involved collections while producing query results. Therefore, the caller need not
+ // explicitly acquire any locks when using a ClientCursor which houses execution machinery for
+ // an aggregate.
+ //
+ // The policy is consulted on getMore in order to determine locking behavior, since during
+ // getMore we otherwise could not easily know what flavor of cursor we're using.
+ enum class LockPolicy {
+ // The caller is responsible for locking the collection over which this ClientCursor
+ // executes.
+ kLockExternally,
+
+ // The caller need not hold no locks; this ClientCursor's plan executor acquires any
+ // necessary locks itself.
+ kLocksInternally,
+ };
+
ClientCursorParams(std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> planExecutor,
NamespaceString nss,
UserNameIterator authenticatedUsersIter,
repl::ReadConcernArgs readConcernArgs,
- BSONObj originatingCommandObj)
+ BSONObj originatingCommandObj,
+ LockPolicy lockPolicy)
: exec(std::move(planExecutor)),
nss(std::move(nss)),
readConcernArgs(readConcernArgs),
queryOptions(exec->getCanonicalQuery()
? exec->getCanonicalQuery()->getQueryRequest().getOptions()
: 0),
- originatingCommandObj(originatingCommandObj.getOwned()) {
+ originatingCommandObj(originatingCommandObj.getOwned()),
+ lockPolicy(lockPolicy) {
while (authenticatedUsersIter.more()) {
authenticatedUsers.emplace_back(authenticatedUsersIter.next());
}
@@ -92,6 +114,7 @@ struct ClientCursorParams {
const repl::ReadConcernArgs readConcernArgs;
int queryOptions = 0;
BSONObj originatingCommandObj;
+ const LockPolicy lockPolicy;
};
/**
@@ -219,6 +242,10 @@ public:
return StringData(_planSummary);
}
+ ClientCursorParams::LockPolicy lockPolicy() const {
+ return _lockPolicy;
+ }
+
/**
* Returns a generic cursor containing diagnostics about this cursor.
* The caller must either have this cursor pinned or hold a mutex from the cursor manager.
@@ -348,6 +375,8 @@ private:
// See the QueryOptions enum in dbclientinterface.h.
const int _queryOptions = 0;
+ const ClientCursorParams::LockPolicy _lockPolicy;
+
// Unused maxTime budget for this cursor.
Microseconds _leftoverMaxTimeMicros = Microseconds::max();
@@ -467,6 +496,10 @@ public:
*/
ClientCursor* getCursor() const;
+ ClientCursor* operator->() {
+ return _cursor;
+ }
+
private:
friend class CursorManager;
diff --git a/src/mongo/db/commands/find_cmd.cpp b/src/mongo/db/commands/find_cmd.cpp
index f92fbbc62de..439c98b0ca4 100644
--- a/src/mongo/db/commands/find_cmd.cpp
+++ b/src/mongo/db/commands/find_cmd.cpp
@@ -388,13 +388,15 @@ public:
if (shouldSaveCursor(opCtx, collection, state, exec.get())) {
// Create a ClientCursor containing this plan executor and register it with the
// cursor manager.
- ClientCursorPin pinnedCursor = collection->getCursorManager()->registerCursor(
- opCtx,
- {std::move(exec),
- nss,
- AuthorizationSession::get(opCtx->getClient())->getAuthenticatedUserNames(),
- repl::ReadConcernArgs::get(opCtx),
- _request.body});
+ ClientCursorPin pinnedCursor =
+ CursorManager::getGlobalCursorManager()->registerCursor(
+ opCtx,
+ {std::move(exec),
+ nss,
+ AuthorizationSession::get(opCtx->getClient())->getAuthenticatedUserNames(),
+ repl::ReadConcernArgs::get(opCtx),
+ _request.body,
+ ClientCursorParams::LockPolicy::kLockExternally});
cursorId = pinnedCursor.getCursor()->cursorid();
invariant(!exec);
diff --git a/src/mongo/db/commands/getmore_cmd.cpp b/src/mongo/db/commands/getmore_cmd.cpp
index 8f3eee6d760..b0a34a2bbf5 100644
--- a/src/mongo/db/commands/getmore_cmd.cpp
+++ b/src/mongo/db/commands/getmore_cmd.cpp
@@ -286,41 +286,41 @@ public:
}
// Cursors come in one of two flavors:
- // - Cursors owned by the collection cursor manager, such as those generated via the
- // find command. For these cursors, we hold the appropriate collection lock for the
- // duration of the getMore using AutoGetCollectionForRead.
- // - Cursors owned by the global cursor manager, e.g. those generated via the aggregate
- // command. These cursors either hold no collection state or manage their collection
- // state internally, so we acquire no locks.
//
- // While we only need to acquire locks in the case of a cursor which is *not* globally
- // owned, we need to create an AutoStatsTracker in either case. This is responsible for
- // updating statistics in CurOp and Top. We avoid using AutoGetCollectionForReadCommand
- // because we may need to drop and reacquire locks when the cursor is awaitData, but we
- // don't want to update the stats twice.
+ // - Cursors which read from a single collection, such as those generated via the
+ // find command. For these cursors, we hold the appropriate collection lock for the
+ // duration of the getMore using AutoGetCollectionForRead. These cursors have the
+ // 'kLockExternally' lock policy.
//
- // Note that we acquire our locks before our ClientCursorPin, in order to ensure that
- // the pin's destructor is called before the lock's destructor (if there is one) so that
- // the cursor cleanup can occur under the lock.
+ // - Cursors which may read from many collections, e.g. those generated via the
+ // aggregate command, or which do not read from a collection at all, e.g. those
+ // generated by the listIndexes command. We don't need to acquire locks to use these
+ // cursors, since they either manage locking themselves or don't access data protected
+ // by collection locks. These cursors have the 'kLocksInternally' lock policy.
+ //
+ // While we only need to acquire locks for 'kLockExternally' cursors, we need to create
+ // an AutoStatsTracker in either case. This is responsible for updating statistics in
+ // CurOp and Top. We avoid using AutoGetCollectionForReadCommand because we may need to
+ // drop and reacquire locks when the cursor is awaitData, but we don't want to update
+ // the stats twice.
boost::optional<AutoGetCollectionForRead> readLock;
boost::optional<AutoStatsTracker> statsTracker;
- CursorManager* cursorManager;
- if (CursorManager::isGloballyManagedCursor(_request.cursorid)) {
- cursorManager = CursorManager::getGlobalCursorManager();
+ auto cursorManager = CursorManager::getGlobalCursorManager();
+ auto cursorPin = uassertStatusOK(cursorManager->pinCursor(opCtx, _request.cursorid));
- if (boost::optional<NamespaceString> nssForCurOp =
- _request.nss.isGloballyManagedNamespace()
- ? _request.nss.getTargetNSForGloballyManagedNamespace()
- : _request.nss) {
+ if (cursorPin->lockPolicy() == ClientCursorParams::LockPolicy::kLocksInternally) {
+ if (!_request.nss.isCollectionlessCursorNamespace()) {
const boost::optional<int> dbProfilingLevel = boost::none;
statsTracker.emplace(opCtx,
- *nssForCurOp,
+ _request.nss,
Top::LockType::NotLocked,
AutoStatsTracker::LogMode::kUpdateTopAndCurop,
dbProfilingLevel);
}
} else {
+ invariant(cursorPin->lockPolicy() ==
+ ClientCursorParams::LockPolicy::kLockExternally);
readLock.emplace(opCtx, _request.nss);
const int doNotChangeProfilingLevel = 0;
statsTracker.emplace(opCtx,
@@ -329,18 +329,8 @@ public:
AutoStatsTracker::LogMode::kUpdateTopAndCurop,
readLock->getDb() ? readLock->getDb()->getProfilingLevel()
: doNotChangeProfilingLevel);
-
- Collection* collection = readLock->getCollection();
- if (!collection) {
- uasserted(ErrorCodes::OperationFailed,
- "collection dropped between getMore calls");
- }
- cursorManager = collection->getCursorManager();
}
- auto ccPin = uassertStatusOK(cursorManager->pinCursor(opCtx, _request.cursorid));
- ClientCursor* cursor = ccPin.getCursor();
-
// Only used by the failpoints.
const auto dropAndReaquireReadLock = [&readLock, opCtx, this]() {
// Make sure an interrupted operation does not prevent us from reacquiring the lock.
@@ -367,22 +357,22 @@ public:
// authenticated when the cursor was created, then at least one of them must be
// authenticated in order to run getMore on the cursor.
if (!AuthorizationSession::get(opCtx->getClient())
- ->isCoauthorizedWith(cursor->getAuthenticatedUsers())) {
+ ->isCoauthorizedWith(cursorPin->getAuthenticatedUsers())) {
uasserted(ErrorCodes::Unauthorized,
str::stream() << "cursor id " << _request.cursorid
<< " was not created by the authenticated user");
}
- if (_request.nss != cursor->nss()) {
+ if (_request.nss != cursorPin->nss()) {
uasserted(ErrorCodes::Unauthorized,
str::stream() << "Requested getMore on namespace '" << _request.nss.ns()
<< "', but cursor belongs to a different namespace "
- << cursor->nss().ns());
+ << cursorPin->nss().ns());
}
// Ensure the lsid and txnNumber of the getMore match that of the originating command.
- validateLSID(opCtx, _request, cursor);
- validateTxnNumber(opCtx, _request, cursor);
+ validateLSID(opCtx, _request, cursorPin.getCursor());
+ validateTxnNumber(opCtx, _request, cursorPin.getCursor());
if (_request.nss.isOplog() && MONGO_FAIL_POINT(rsStopGetMoreCmd)) {
uasserted(ErrorCodes::CommandFailed,
@@ -391,20 +381,20 @@ public:
}
// Validation related to awaitData.
- if (cursor->isAwaitData()) {
- invariant(cursor->isTailable());
+ if (cursorPin->isAwaitData()) {
+ invariant(cursorPin->isTailable());
}
- if (_request.awaitDataTimeout && !cursor->isAwaitData()) {
+ if (_request.awaitDataTimeout && !cursorPin->isAwaitData()) {
uasserted(ErrorCodes::BadValue,
"cannot set maxTimeMS on getMore command for a non-awaitData cursor");
}
// On early return, get rid of the cursor.
- auto cursorFreer = makeGuard([&] { ccPin.deleteUnderlying(); });
+ auto cursorFreer = makeGuard([&] { cursorPin.deleteUnderlying(); });
// We must respect the read concern from the cursor.
- applyCursorReadConcern(opCtx, cursor->getReadConcernArgs());
+ applyCursorReadConcern(opCtx, cursorPin->getReadConcernArgs());
const bool disableAwaitDataFailpointActive =
MONGO_FAIL_POINT(disableAwaitDataForGetMoreCmd);
@@ -416,20 +406,20 @@ public:
// awaitData, then we supply a default time of one second. Otherwise we roll over
// any leftover time from the maxTimeMS of the operation that spawned this cursor,
// applying it to this getMore.
- if (cursor->isAwaitData() && !disableAwaitDataFailpointActive) {
+ if (cursorPin->isAwaitData() && !disableAwaitDataFailpointActive) {
awaitDataState(opCtx).waitForInsertsDeadline =
opCtx->getServiceContext()->getPreciseClockSource()->now() +
_request.awaitDataTimeout.value_or(Seconds{1});
- } else if (cursor->getLeftoverMaxTimeMicros() < Microseconds::max()) {
- opCtx->setDeadlineAfterNowBy(cursor->getLeftoverMaxTimeMicros(),
+ } else if (cursorPin->getLeftoverMaxTimeMicros() < Microseconds::max()) {
+ opCtx->setDeadlineAfterNowBy(cursorPin->getLeftoverMaxTimeMicros(),
ErrorCodes::MaxTimeMSExpired);
}
}
- if (!cursor->isAwaitData()) {
+ if (!cursorPin->isAwaitData()) {
opCtx->checkForInterrupt(); // May trigger maxTimeAlwaysTimeOut fail point.
}
- PlanExecutor* exec = cursor->getExecutor();
+ PlanExecutor* exec = cursorPin->getExecutor();
const auto* cq = exec->getCanonicalQuery();
if (cq && cq->getQueryRequest().isReadOnce()) {
// The readOnce option causes any storage-layer cursors created during plan
@@ -446,13 +436,13 @@ public:
// Ensure that the original query or command object is available in the slow query
// log, profiler and currentOp.
- auto originatingCommand = cursor->getOriginatingCommandObj();
+ auto originatingCommand = cursorPin->getOriginatingCommandObj();
if (!originatingCommand.isEmpty()) {
curOp->setOriginatingCommand_inlock(originatingCommand);
}
// Update the genericCursor stored in curOp with the new cursor stats.
- curOp->setGenericCursor_inlock(cursor->toGenericCursor());
+ curOp->setGenericCursor_inlock(cursorPin->toGenericCursor());
}
CursorId respondWithId = 0;
@@ -469,7 +459,7 @@ public:
Explain::getSummaryStats(*exec, &preExecutionStats);
// Mark this as an AwaitData operation if appropriate.
- if (cursor->isAwaitData() && !disableAwaitDataFailpointActive) {
+ if (cursorPin->isAwaitData() && !disableAwaitDataFailpointActive) {
if (_request.lastKnownCommittedOpTime)
clientsLastKnownCommittedOpTime(opCtx) =
_request.lastKnownCommittedOpTime.get();
@@ -488,8 +478,8 @@ public:
dropAndReaquireReadLock);
}
- uassertStatusOK(
- generateBatch(opCtx, cursor, _request, &nextBatch, &state, &numResults));
+ uassertStatusOK(generateBatch(
+ opCtx, cursorPin.getCursor(), _request, &nextBatch, &state, &numResults));
PlanSummaryStats postExecutionStats;
Explain::getSummaryStats(*exec, &postExecutionStats);
@@ -497,26 +487,27 @@ public:
postExecutionStats.totalDocsExamined -= preExecutionStats.totalDocsExamined;
curOp->debug().setPlanSummaryMetrics(postExecutionStats);
- // We do not report 'execStats' for aggregation or other globally managed cursors, both
- // in the original request and subsequent getMore. It would be useful to have this info
- // for an aggregation, but the source PlanExecutor could be destroyed before we know if
- // we need execStats and we do not want to generate for all operations due to cost.
- if (!CursorManager::isGloballyManagedCursor(_request.cursorid) &&
+ // We do not report 'execStats' for aggregation or other cursors with the
+ // 'kLocksInternally' policy, both in the original request and subsequent getMore. It
+ // would be useful to have this info for an aggregation, but the source PlanExecutor
+ // could be destroyed before we know if we need 'execStats' and we do not want to
+ // generate the stats eagerly for all operations due to cost.
+ if (cursorPin->lockPolicy() != ClientCursorParams::LockPolicy::kLocksInternally &&
curOp->shouldDBProfile()) {
BSONObjBuilder execStatsBob;
Explain::getWinningPlanStats(exec, &execStatsBob);
curOp->debug().execStats = execStatsBob.obj();
}
- if (shouldSaveCursorGetMore(state, exec, cursor->isTailable())) {
+ if (shouldSaveCursorGetMore(state, exec, cursorPin->isTailable())) {
respondWithId = _request.cursorid;
exec->saveState();
exec->detachFromOperationContext();
- cursor->setLeftoverMaxTimeMicros(opCtx->getRemainingMaxTimeMicros());
- cursor->incNReturnedSoFar(numResults);
- cursor->incNBatches();
+ cursorPin->setLeftoverMaxTimeMicros(opCtx->getRemainingMaxTimeMicros());
+ cursorPin->incNReturnedSoFar(numResults);
+ cursorPin->incNBatches();
} else {
curOp->debug().cursorExhausted = true;
}
diff --git a/src/mongo/db/commands/killcursors_cmd.cpp b/src/mongo/db/commands/killcursors_cmd.cpp
index dc381aae471..74d61b1ebdc 100644
--- a/src/mongo/db/commands/killcursors_cmd.cpp
+++ b/src/mongo/db/commands/killcursors_cmd.cpp
@@ -78,17 +78,13 @@ private:
const NamespaceString& nss,
CursorId id) const final {
boost::optional<AutoStatsTracker> statsTracker;
- if (CursorManager::isGloballyManagedCursor(id)) {
- if (auto nssForCurOp = nss.isGloballyManagedNamespace()
- ? nss.getTargetNSForGloballyManagedNamespace()
- : nss) {
- const boost::optional<int> dbProfilingLevel = boost::none;
- statsTracker.emplace(opCtx,
- *nssForCurOp,
- Top::LockType::NotLocked,
- AutoStatsTracker::LogMode::kUpdateTopAndCurop,
- dbProfilingLevel);
- }
+ if (!nss.isCollectionlessCursorNamespace()) {
+ const boost::optional<int> dbProfilingLevel = boost::none;
+ statsTracker.emplace(opCtx,
+ nss,
+ Top::LockType::NotLocked,
+ AutoStatsTracker::LogMode::kUpdateTopAndCurop,
+ dbProfilingLevel);
}
return CursorManager::withCursorManager(
diff --git a/src/mongo/db/commands/list_collections.cpp b/src/mongo/db/commands/list_collections.cpp
index 9ec3c768989..9cc44ca94d2 100644
--- a/src/mongo/db/commands/list_collections.cpp
+++ b/src/mongo/db/commands/list_collections.cpp
@@ -374,7 +374,8 @@ public:
cursorNss,
AuthorizationSession::get(opCtx->getClient())->getAuthenticatedUserNames(),
repl::ReadConcernArgs::get(opCtx),
- jsobj});
+ jsobj,
+ ClientCursorParams::LockPolicy::kLocksInternally});
appendCursorResponseObject(
pinnedCursor.getCursor()->cursorid(), cursorNss.ns(), firstBatch.arr(), &result);
diff --git a/src/mongo/db/commands/list_indexes.cpp b/src/mongo/db/commands/list_indexes.cpp
index 7e62d8272a3..3c823191c6a 100644
--- a/src/mongo/db/commands/list_indexes.cpp
+++ b/src/mongo/db/commands/list_indexes.cpp
@@ -131,8 +131,8 @@ public:
auto includeIndexBuilds = cmdObj["includeIndexBuilds"].trueValue();
+ NamespaceString nss;
std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> exec;
- NamespaceString cursorNss;
BSONArrayBuilder firstBatch;
{
AutoGetCollectionForReadCommand ctx(opCtx,
@@ -145,7 +145,7 @@ public:
const CollectionCatalogEntry* cce = collection->getCatalogEntry();
invariant(cce);
- const auto nss = ctx.getNss();
+ nss = ctx.getNss();
vector<string> indexNames;
writeConflictRetry(opCtx, "listIndexes", nss.ns(), [&] {
@@ -182,11 +182,8 @@ public:
root->pushBack(id);
}
- cursorNss = NamespaceString::makeListIndexesNSS(dbname, nss.coll());
- invariant(nss == cursorNss.getTargetNSForListIndexes());
-
exec = uassertStatusOK(PlanExecutor::make(
- opCtx, std::move(ws), std::move(root), cursorNss, PlanExecutor::NO_YIELD));
+ opCtx, std::move(ws), std::move(root), nss, PlanExecutor::NO_YIELD));
for (long long objCount = 0; objCount < batchSize; objCount++) {
BSONObj next;
@@ -206,7 +203,7 @@ public:
}
if (exec->isEOF()) {
- appendCursorResponseObject(0LL, cursorNss.ns(), firstBatch.arr(), &result);
+ appendCursorResponseObject(0LL, nss.ns(), firstBatch.arr(), &result);
return true;
}
@@ -218,13 +215,14 @@ public:
const auto pinnedCursor = CursorManager::getGlobalCursorManager()->registerCursor(
opCtx,
{std::move(exec),
- cursorNss,
+ nss,
AuthorizationSession::get(opCtx->getClient())->getAuthenticatedUserNames(),
repl::ReadConcernArgs::get(opCtx),
- cmdObj});
+ cmdObj,
+ ClientCursorParams::LockPolicy::kLocksInternally});
appendCursorResponseObject(
- pinnedCursor.getCursor()->cursorid(), cursorNss.ns(), firstBatch.arr(), &result);
+ pinnedCursor.getCursor()->cursorid(), nss.ns(), firstBatch.arr(), &result);
return true;
}
diff --git a/src/mongo/db/commands/repair_cursor.cpp b/src/mongo/db/commands/repair_cursor.cpp
index ae43ab43e63..a02645209d0 100644
--- a/src/mongo/db/commands/repair_cursor.cpp
+++ b/src/mongo/db/commands/repair_cursor.cpp
@@ -100,13 +100,14 @@ public:
exec->saveState();
exec->detachFromOperationContext();
- auto pinnedCursor = collection->getCursorManager()->registerCursor(
+ auto pinnedCursor = CursorManager::getGlobalCursorManager()->registerCursor(
opCtx,
{std::move(exec),
ns,
AuthorizationSession::get(opCtx->getClient())->getAuthenticatedUserNames(),
repl::ReadConcernArgs::get(opCtx),
- cmdObj});
+ cmdObj,
+ ClientCursorParams::LockPolicy::kLockExternally});
appendCursorResponseObject(
pinnedCursor.getCursor()->cursorid(), ns.ns(), BSONArray(), &result);
diff --git a/src/mongo/db/commands/run_aggregate.cpp b/src/mongo/db/commands/run_aggregate.cpp
index 55678fb52ea..bafb689df8f 100644
--- a/src/mongo/db/commands/run_aggregate.cpp
+++ b/src/mongo/db/commands/run_aggregate.cpp
@@ -634,7 +634,8 @@ Status runAggregate(OperationContext* opCtx,
origNss,
AuthorizationSession::get(opCtx->getClient())->getAuthenticatedUserNames(),
repl::ReadConcernArgs::get(opCtx),
- cmdObj);
+ cmdObj,
+ ClientCursorParams::LockPolicy::kLocksInternally);
if (expCtx->tailableMode == TailableModeEnum::kTailable) {
cursorParams.setTailable(true);
} else if (expCtx->tailableMode == TailableModeEnum::kTailableAndAwaitData) {
diff --git a/src/mongo/db/cursor_manager.cpp b/src/mongo/db/cursor_manager.cpp
index 395555af2b7..071983c65b4 100644
--- a/src/mongo/db/cursor_manager.cpp
+++ b/src/mongo/db/cursor_manager.cpp
@@ -181,7 +181,7 @@ void GlobalCursorIdCache::deregisterCursorManager(uint32_t id, const NamespaceSt
bool GlobalCursorIdCache::killCursor(OperationContext* opCtx, CursorId id, bool checkAuth) {
// Figure out what the namespace of this cursor is.
NamespaceString nss;
- if (CursorManager::isGloballyManagedCursor(id)) {
+ {
auto pin = globalCursorManager->pinCursor(opCtx, id, CursorManager::kNoCheckSession);
if (!pin.isOK()) {
// Either the cursor doesn't exist, or it was killed during the last time it was being
@@ -189,18 +189,19 @@ bool GlobalCursorIdCache::killCursor(OperationContext* opCtx, CursorId id, bool
return false;
}
nss = pin.getValue().getCursor()->nss();
- } else {
- stdx::lock_guard<SimpleMutex> lk(_mutex);
- uint32_t nsid = idFromCursorId(id);
- IdToNssMap::const_iterator it = _idToNss.find(nsid);
- if (it == _idToNss.end()) {
- // No namespace corresponding to this cursor id prefix.
- return false;
- }
- nss = it->second;
}
invariant(nss.isValid());
+ boost::optional<AutoStatsTracker> statsTracker;
+ if (!nss.isCollectionlessCursorNamespace()) {
+ const boost::optional<int> dbProfilingLevel = boost::none;
+ statsTracker.emplace(opCtx,
+ nss,
+ Top::LockType::NotLocked,
+ AutoStatsTracker::LogMode::kUpdateTopAndCurop,
+ dbProfilingLevel);
+ }
+
// Check if we are authorized to kill this cursor.
if (checkAuth) {
auto status = CursorManager::withCursorManager(
@@ -219,33 +220,11 @@ bool GlobalCursorIdCache::killCursor(OperationContext* opCtx, CursorId id, bool
}
}
- // If this cursor is owned by the global cursor manager, ask it to kill the cursor for us.
- if (CursorManager::isGloballyManagedCursor(id)) {
- Status killStatus = globalCursorManager->killCursor(opCtx, id, checkAuth);
- massert(28697,
- killStatus.reason(),
- killStatus.code() == ErrorCodes::OK ||
- killStatus.code() == ErrorCodes::CursorNotFound);
- return killStatus.isOK();
- }
-
- // If not, then the cursor must be owned by a collection. Kill the cursor under the
- // collection lock (to prevent the collection from going away during the erase).
- AutoGetCollectionForReadCommand ctx(opCtx, nss);
- Collection* collection = ctx.getCollection();
- if (!collection) {
- if (checkAuth)
- audit::logKillCursorsAuthzCheck(
- opCtx->getClient(), nss, id, ErrorCodes::CursorNotFound);
- return false;
- }
-
- Status eraseStatus = collection->getCursorManager()->killCursor(opCtx, id, checkAuth);
- uassert(16089,
- eraseStatus.reason(),
- eraseStatus.code() == ErrorCodes::OK ||
- eraseStatus.code() == ErrorCodes::CursorNotFound);
- return eraseStatus.isOK();
+ Status killStatus = globalCursorManager->killCursor(opCtx, id, checkAuth);
+ massert(28697,
+ killStatus.reason(),
+ killStatus.code() == ErrorCodes::OK || killStatus.code() == ErrorCodes::CursorNotFound);
+ return killStatus.isOK();
}
std::size_t GlobalCursorIdCache::timeoutCursors(OperationContext* opCtx, Date_t now) {
@@ -427,8 +406,7 @@ CursorManager::~CursorManager() {
void CursorManager::invalidateAll(OperationContext* opCtx,
bool collectionGoingAway,
const std::string& reason) {
- invariant(!isGlobalManager()); // The global cursor manager should never need to kill cursors.
- dassert(opCtx->lockState()->isCollectionLockedForMode(_nss.ns(), MODE_X));
+ dassert(isGlobalManager() || opCtx->lockState()->isCollectionLockedForMode(_nss.ns(), MODE_X));
fassert(28819, !BackgroundOperation::inProgForNs(_nss));
// Mark all cursors as killed, but keep around those we can in order to provide a useful error
@@ -656,6 +634,11 @@ CursorId CursorManager::allocateCursorId_inlock() {
ClientCursorPin CursorManager::registerCursor(OperationContext* opCtx,
ClientCursorParams&& cursorParams) {
+ // TODO SERVER-37455: Cursors should only ever be registered against the global cursor manager.
+ // Follow-up work is required to actually delete the concept of a per-collection cursor manager
+ // from the code base.
+ invariant(isGlobalManager());
+
// Avoid computing the current time within the critical section.
auto now = opCtx->getServiceContext()->getPreciseClockSource()->now();
diff --git a/src/mongo/db/exec/requires_collection_stage.cpp b/src/mongo/db/exec/requires_collection_stage.cpp
index 6d955469cf0..358d2bcc72a 100644
--- a/src/mongo/db/exec/requires_collection_stage.cpp
+++ b/src/mongo/db/exec/requires_collection_stage.cpp
@@ -50,7 +50,7 @@ void RequiresCollectionStageBase<CollectionT>::doRestoreState() {
const UUIDCatalog& catalog = UUIDCatalog::get(getOpCtx());
_collection = catalog.lookupCollectionByUUID(_collectionUUID);
uassert(ErrorCodes::QueryPlanKilled,
- str::stream() << "Collection dropped. UUID " << _collectionUUID << " no longer exists.",
+ str::stream() << "collection dropped. UUID " << _collectionUUID,
_collection);
uassert(ErrorCodes::QueryPlanKilled,
@@ -61,10 +61,10 @@ void RequiresCollectionStageBase<CollectionT>::doRestoreState() {
// TODO SERVER-31695: Allow queries to survive collection rename, rather than throwing here when
// a rename has happened during yield.
uassert(ErrorCodes::QueryPlanKilled,
- str::stream() << "Collection with UUID " << _collectionUUID << " was renamed from '"
- << _nss.ns()
- << "' to '"
- << _collection->ns().ns(),
+ str::stream() << "collection renamed from '" << _nss.ns() << "' to '"
+ << _collection->ns().ns()
+ << "'. UUID "
+ << _collectionUUID,
_nss == _collection->ns());
doRestoreStateRequiresCollection();
diff --git a/src/mongo/db/namespace_string.cpp b/src/mongo/db/namespace_string.cpp
index 4011a9d9dfa..a4548626983 100644
--- a/src/mongo/db/namespace_string.cpp
+++ b/src/mongo/db/namespace_string.cpp
@@ -41,7 +41,6 @@ namespace mongo {
namespace {
constexpr auto listCollectionsCursorCol = "$cmd.listCollections"_sd;
-constexpr auto listIndexesCursorNSPrefix = "$cmd.listIndexes."_sd;
constexpr auto collectionlessAggregateCursorCol = "$cmd.aggregate"_sd;
constexpr auto dropPendingNSPrefix = "system.drop."_sd;
@@ -79,11 +78,6 @@ bool NamespaceString::isListCollectionsCursorNS() const {
return coll() == listCollectionsCursorCol;
}
-bool NamespaceString::isListIndexesCursorNS() const {
- return coll().size() > listIndexesCursorNSPrefix.size() &&
- coll().startsWith(listIndexesCursorNSPrefix);
-}
-
bool NamespaceString::isCollectionlessAggregateNS() const {
return coll() == collectionlessAggregateCursorCol;
}
@@ -126,13 +120,6 @@ NamespaceString NamespaceString::makeListCollectionsNSS(StringData dbName) {
return nss;
}
-NamespaceString NamespaceString::makeListIndexesNSS(StringData dbName, StringData collectionName) {
- NamespaceString nss(dbName, str::stream() << listIndexesCursorNSPrefix << collectionName);
- dassert(nss.isValid());
- dassert(nss.isListIndexesCursorNS());
- return nss;
-}
-
NamespaceString NamespaceString::makeCollectionlessAggregateNSS(StringData dbname) {
NamespaceString nss(dbname, collectionlessAggregateCursorCol);
dassert(nss.isValid());
@@ -140,27 +127,11 @@ NamespaceString NamespaceString::makeCollectionlessAggregateNSS(StringData dbnam
return nss;
}
-NamespaceString NamespaceString::getTargetNSForListIndexes() const {
- dassert(isListIndexesCursorNS());
- return NamespaceString(db(), coll().substr(listIndexesCursorNSPrefix.size()));
-}
-
std::string NamespaceString::getSisterNS(StringData local) const {
verify(local.size() && local[0] != '.');
return db().toString() + "." + local.toString();
}
-boost::optional<NamespaceString> NamespaceString::getTargetNSForGloballyManagedNamespace() const {
- // Globally managed namespaces are of the form '$cmd.commandName.<targetNs>' or simply
- // '$cmd.commandName'.
- dassert(isGloballyManagedNamespace());
- const size_t indexOfNextDot = coll().find('.', 5);
- if (indexOfNextDot == std::string::npos) {
- return boost::none;
- }
- return NamespaceString{db(), coll().substr(indexOfNextDot + 1)};
-}
-
bool NamespaceString::isDropPendingNamespace() const {
return coll().startsWith(dropPendingNSPrefix);
}
diff --git a/src/mongo/db/namespace_string.h b/src/mongo/db/namespace_string.h
index 941221783c5..6920cab7736 100644
--- a/src/mongo/db/namespace_string.h
+++ b/src/mongo/db/namespace_string.h
@@ -151,12 +151,6 @@ public:
static NamespaceString makeListCollectionsNSS(StringData dbName);
/**
- * Constructs a NamespaceString representing a listIndexes namespace. The format for this
- * namespace is "<dbName>.$cmd.listIndexes.<collectionName>".
- */
- static NamespaceString makeListIndexesNSS(StringData dbName, StringData collectionName);
-
- /**
* Note that these values are derived from the mmap_v1 implementation and that is the only
* reason they are constrained as such.
*/
@@ -279,15 +273,16 @@ public:
bool isReplicated() const;
/**
- * Returns true if cursors for this namespace are registered with the global cursor manager.
+ * The namespace associated with some ClientCursors does not correspond to a particular
+ * namespace. For example, this is true for listCollections cursors and $currentOp agg cursors.
+ * Returns true if the namespace string is for a "collectionless" cursor.
*/
- bool isGloballyManagedNamespace() const {
+ bool isCollectionlessCursorNamespace() const {
return coll().startsWith("$cmd."_sd);
}
bool isCollectionlessAggregateNS() const;
bool isListCollectionsCursorNS() const;
- bool isListIndexesCursorNS() const;
/**
* Returns true if a client can modify this namespace even though it is under ".system."
@@ -296,13 +291,6 @@ public:
bool isLegalClientSystemNS() const;
/**
- * Given a NamespaceString for which isGloballyManagedNamespace() returns true, returns the
- * namespace the command targets, or boost::none for commands like 'listCollections' which
- * do not target a collection.
- */
- boost::optional<NamespaceString> getTargetNSForGloballyManagedNamespace() const;
-
- /**
* Returns true if this namespace refers to a drop-pending collection.
*/
bool isDropPendingNamespace() const;
@@ -331,12 +319,6 @@ public:
Status checkLengthForRename(const std::string::size_type longestIndexNameLength) const;
/**
- * Given a NamespaceString for which isListIndexesCursorNS() returns true, returns the
- * NamespaceString for the collection that the "listIndexes" targets.
- */
- NamespaceString getTargetNSForListIndexes() const;
-
- /**
* Returns true if the namespace is valid. Special namespaces for internal use are considered as
* valid.
*/
diff --git a/src/mongo/db/namespace_string_test.cpp b/src/mongo/db/namespace_string_test.cpp
index 87456009bf3..44976b5e8ee 100644
--- a/src/mongo/db/namespace_string_test.cpp
+++ b/src/mongo/db/namespace_string_test.cpp
@@ -152,62 +152,20 @@ TEST(NamespaceStringTest, ListCollectionsCursorNS) {
ASSERT(!NamespaceString("test.$cmd.listIndexes.foo").isListCollectionsCursorNS());
}
-TEST(NamespaceStringTest, ListIndexesCursorNS) {
- NamespaceString ns1("test.$cmd.listIndexes.f");
- ASSERT(ns1.isListIndexesCursorNS());
- ASSERT("test.f" == ns1.getTargetNSForListIndexes().ns());
-
- NamespaceString ns2("test.$cmd.listIndexes.foo");
- ASSERT(ns2.isListIndexesCursorNS());
- ASSERT("test.foo" == ns2.getTargetNSForListIndexes().ns());
-
- NamespaceString ns3("test.$cmd.listIndexes.foo.bar");
- ASSERT(ns3.isListIndexesCursorNS());
- ASSERT("test.foo.bar" == ns3.getTargetNSForListIndexes().ns());
-
- ASSERT(!NamespaceString("test.foo").isListIndexesCursorNS());
- ASSERT(!NamespaceString("test.foo.$cmd.listIndexes").isListIndexesCursorNS());
- ASSERT(!NamespaceString("test.$cmd.").isListIndexesCursorNS());
- ASSERT(!NamespaceString("test.$cmd.foo.").isListIndexesCursorNS());
- ASSERT(!NamespaceString("test.$cmd.listIndexes").isListIndexesCursorNS());
- ASSERT(!NamespaceString("test.$cmd.listIndexes.").isListIndexesCursorNS());
- ASSERT(!NamespaceString("test.$cmd.listCollections").isListIndexesCursorNS());
- ASSERT(!NamespaceString("test.$cmd.listCollections.foo").isListIndexesCursorNS());
-}
-
-TEST(NamespaceStringTest, IsGloballyManagedNamespace) {
- ASSERT_TRUE(NamespaceString{"test.$cmd.aggregate.foo"}.isGloballyManagedNamespace());
- ASSERT_TRUE(NamespaceString{"test.$cmd.listIndexes.foo"}.isGloballyManagedNamespace());
- ASSERT_TRUE(NamespaceString{"test.$cmd.otherCommand.foo"}.isGloballyManagedNamespace());
- ASSERT_TRUE(NamespaceString{"test.$cmd.listCollections"}.isGloballyManagedNamespace());
- ASSERT_TRUE(NamespaceString{"test.$cmd.otherCommand"}.isGloballyManagedNamespace());
- ASSERT_TRUE(NamespaceString{"test.$cmd.aggregate"}.isGloballyManagedNamespace());
- ASSERT_TRUE(NamespaceString{"test.$cmd.listIndexes"}.isGloballyManagedNamespace());
-
- ASSERT_FALSE(NamespaceString{"test.foo"}.isGloballyManagedNamespace());
- ASSERT_FALSE(NamespaceString{"test.$cmd"}.isGloballyManagedNamespace());
-
- ASSERT_FALSE(NamespaceString{"$cmd.aggregate.foo"}.isGloballyManagedNamespace());
- ASSERT_FALSE(NamespaceString{"$cmd.listCollections"}.isGloballyManagedNamespace());
-}
-
-TEST(NamespaceStringTest, GetTargetNSForGloballyManagedNamespace) {
- ASSERT_EQ(
- (NamespaceString{"test", "foo"}),
- NamespaceString{"test.$cmd.aggregate.foo"}.getTargetNSForGloballyManagedNamespace().get());
- ASSERT_EQ((NamespaceString{"test", "foo"}),
- NamespaceString{"test.$cmd.listIndexes.foo"}
- .getTargetNSForGloballyManagedNamespace()
- .get());
- ASSERT_EQ((NamespaceString{"test", "foo"}),
- NamespaceString{"test.$cmd.otherCommand.foo"}
- .getTargetNSForGloballyManagedNamespace()
- .get());
-
- ASSERT_FALSE(
- NamespaceString{"test.$cmd.listCollections"}.getTargetNSForGloballyManagedNamespace());
- ASSERT_FALSE(
- NamespaceString{"test.$cmd.otherCommand"}.getTargetNSForGloballyManagedNamespace());
+TEST(NamespaceStringTest, IsCollectionlessCursorNamespace) {
+ ASSERT_TRUE(NamespaceString{"test.$cmd.aggregate.foo"}.isCollectionlessCursorNamespace());
+ ASSERT_TRUE(NamespaceString{"test.$cmd.listIndexes.foo"}.isCollectionlessCursorNamespace());
+ ASSERT_TRUE(NamespaceString{"test.$cmd.otherCommand.foo"}.isCollectionlessCursorNamespace());
+ ASSERT_TRUE(NamespaceString{"test.$cmd.listCollections"}.isCollectionlessCursorNamespace());
+ ASSERT_TRUE(NamespaceString{"test.$cmd.otherCommand"}.isCollectionlessCursorNamespace());
+ ASSERT_TRUE(NamespaceString{"test.$cmd.aggregate"}.isCollectionlessCursorNamespace());
+ ASSERT_TRUE(NamespaceString{"test.$cmd.listIndexes"}.isCollectionlessCursorNamespace());
+
+ ASSERT_FALSE(NamespaceString{"test.foo"}.isCollectionlessCursorNamespace());
+ ASSERT_FALSE(NamespaceString{"test.$cmd"}.isCollectionlessCursorNamespace());
+
+ ASSERT_FALSE(NamespaceString{"$cmd.aggregate.foo"}.isCollectionlessCursorNamespace());
+ ASSERT_FALSE(NamespaceString{"$cmd.listCollections"}.isCollectionlessCursorNamespace());
}
TEST(NamespaceStringTest, IsDropPendingNamespace) {
@@ -355,15 +313,6 @@ TEST(NamespaceStringTest, makeListCollectionsNSIsCorrect) {
ASSERT(ns.isListCollectionsCursorNS());
}
-TEST(NamespaceStringTest, makeListIndexesNSIsCorrect) {
- NamespaceString ns = NamespaceString::makeListIndexesNSS("DB", "COLL");
- ASSERT_EQUALS("DB", ns.db());
- ASSERT_EQUALS("$cmd.listIndexes.COLL", ns.coll());
- ASSERT(ns.isValid());
- ASSERT(ns.isListIndexesCursorNS());
- ASSERT_EQUALS(NamespaceString("DB.COLL"), ns.getTargetNSForListIndexes());
-}
-
TEST(NamespaceStringTest, EmptyNSStringReturnsEmptyColl) {
NamespaceString nss{};
ASSERT_TRUE(nss.isEmpty());
diff --git a/src/mongo/db/query/find.cpp b/src/mongo/db/query/find.cpp
index a4ad2ad5d91..765e79a7183 100644
--- a/src/mongo/db/query/find.cpp
+++ b/src/mongo/db/query/find.cpp
@@ -215,6 +215,20 @@ void generateBatch(int ntoreturn,
MONGO_UNREACHABLE;
}
+Message makeCursorNotFoundResponse() {
+ const int initialBufSize = 512 + sizeof(QueryResult::Value);
+ BufBuilder bb(initialBufSize);
+ bb.skip(sizeof(QueryResult::Value));
+ QueryResult::View qr = bb.buf();
+ qr.msgdata().setLen(bb.len());
+ qr.msgdata().setOperation(opReply);
+ qr.setResultFlags(ResultFlag_CursorNotFound);
+ qr.setCursorId(0);
+ qr.setStartingFrom(0);
+ qr.setNReturned(0);
+ return Message(bb.release());
+}
+
} // namespace
/**
@@ -228,6 +242,8 @@ Message getMore(OperationContext* opCtx,
bool* isCursorAuthorized) {
invariant(ntoreturn >= 0);
+ LOG(5) << "Running getMore, cursorid: " << cursorid;
+
CurOp& curOp = *CurOp::get(opCtx);
curOp.ensureStarted();
@@ -241,48 +257,52 @@ Message getMore(OperationContext* opCtx,
const NamespaceString nss(ns);
// Cursors come in one of two flavors:
- // - Cursors owned by the collection cursor manager, such as those generated via the find
- // command. For these cursors, we hold the appropriate collection lock for the duration of the
- // getMore using AutoGetCollectionForRead.
- // - Cursors owned by the global cursor manager, such as those generated via the aggregate
- // command. These cursors either hold no collection state or manage their collection state
- // internally, so we acquire no locks.
//
- // While we only need to acquire locks in the case of a cursor which is *not* globally owned, we
- // need to create an AutoStatsTracker in either case. This is responsible for updating
- // statistics in CurOp and Top. We avoid using AutoGetCollectionForReadCommand because we may
- // need to drop and reacquire locks when the cursor is awaitData, but we don't want to update
- // the stats twice.
+ // - Cursors which read from a single collection, such as those generated via the find command.
+ // For these cursors, we hold the appropriate collection lock for the duration of the getMore
+ // using AutoGetCollectionForRead. These cursors have the 'kLockExternally' lock policy.
+ //
+ // - Cursors which may read from many collections, e.g. those generated via the aggregate
+ // command, or which do not read from a collection at all, e.g. those generated by the
+ // listIndexes command. We don't need to acquire locks to use these cursors, since they either
+ // manage locking themselves or don't access data protected by collection locks. These cursors
+ // have the 'kLocksInternally' lock policy.
//
- // Note that we acquire our locks before our ClientCursorPin, in order to ensure that the pin's
- // destructor is called before the lock's destructor (if there is one) so that the cursor
- // cleanup can occur under the lock.
+ // While we only need to acquire locks for 'kLockExternally' cursors, we need to create an
+ // AutoStatsTracker in either case. This is responsible for updating statistics in CurOp and
+ // Top. We avoid using AutoGetCollectionForReadCommand because we may need to drop and reacquire
+ // locks when the cursor is awaitData, but we don't want to update the stats twice.
UninterruptibleLockGuard noInterrupt(opCtx->lockState());
boost::optional<AutoGetCollectionForRead> readLock;
boost::optional<AutoStatsTracker> statsTracker;
- CursorManager* cursorManager;
- if (CursorManager::isGloballyManagedCursor(cursorid)) {
- cursorManager = CursorManager::getGlobalCursorManager();
+ // These are set in the QueryResult msg we return.
+ int resultFlags = ResultFlag_AwaitCapable;
- if (boost::optional<NamespaceString> nssForCurOp = nss.isGloballyManagedNamespace()
- ? nss.getTargetNSForGloballyManagedNamespace()
- : nss) {
- AutoGetDb autoDb(opCtx, nssForCurOp->db(), MODE_IS);
+ auto cursorManager = CursorManager::getGlobalCursorManager();
+ auto statusWithCursorPin = cursorManager->pinCursor(opCtx, cursorid);
+ if (statusWithCursorPin == ErrorCodes::CursorNotFound) {
+ return makeCursorNotFoundResponse();
+ }
+ uassertStatusOK(statusWithCursorPin.getStatus());
+ auto cursorPin = std::move(statusWithCursorPin.getValue());
+
+ if (cursorPin->lockPolicy() == ClientCursorParams::LockPolicy::kLocksInternally) {
+ if (!nss.isCollectionlessCursorNamespace()) {
+ AutoGetDb autoDb(opCtx, nss.db(), MODE_IS);
const auto profilingLevel = autoDb.getDb()
? boost::optional<int>{autoDb.getDb()->getProfilingLevel()}
: boost::none;
statsTracker.emplace(opCtx,
- *nssForCurOp,
+ nss,
Top::LockType::NotLocked,
AutoStatsTracker::LogMode::kUpdateTopAndCurop,
profilingLevel);
- auto view = autoDb.getDb()
- ? autoDb.getDb()->getViewCatalog()->lookup(opCtx, nssForCurOp->ns())
- : nullptr;
+ auto view = autoDb.getDb() ? autoDb.getDb()->getViewCatalog()->lookup(opCtx, nss.ns())
+ : nullptr;
uassert(
ErrorCodes::CommandNotSupportedOnView,
- str::stream() << "Namespace " << nssForCurOp->ns()
+ str::stream() << "Namespace " << nss.ns()
<< " is a view. OP_GET_MORE operations are not supported on views. "
<< "Only clients which support the getMore command can be used to "
"query views.",
@@ -297,10 +317,6 @@ Message getMore(OperationContext* opCtx,
AutoStatsTracker::LogMode::kUpdateTopAndCurop,
readLock->getDb() ? readLock->getDb()->getProfilingLevel()
: doNotChangeProfilingLevel);
- Collection* collection = readLock->getCollection();
- uassert(
- ErrorCodes::OperationFailed, "collection dropped between getMore calls", collection);
- cursorManager = collection->getCursorManager();
// This checks to make sure the operation is allowed on a replicated node. Since we are not
// passing in a query object (necessary to check SlaveOK query option), we allow reads
@@ -309,206 +325,183 @@ Message getMore(OperationContext* opCtx,
repl::ReplicationCoordinator::get(opCtx)->checkCanServeReadsFor(opCtx, nss, true));
}
- LOG(5) << "Running getMore, cursorid: " << cursorid;
-
- // A pin performs a CC lookup and if there is a CC, increments the CC's pin value so it
- // doesn't time out. Also informs ClientCursor that there is somebody actively holding the
- // CC, so don't delete it.
- auto ccPin = cursorManager->pinCursor(opCtx, cursorid);
-
- // These are set in the QueryResult msg we return.
- int resultFlags = ResultFlag_AwaitCapable;
-
std::uint64_t numResults = 0;
int startingResult = 0;
- const int InitialBufSize =
+ const int initialBufSize =
512 + sizeof(QueryResult::Value) + FindCommon::kMaxBytesToReturnToClientAtOnce;
- BufBuilder bb(InitialBufSize);
+ BufBuilder bb(initialBufSize);
bb.skip(sizeof(QueryResult::Value));
- if (!ccPin.isOK()) {
- if (ccPin == ErrorCodes::CursorNotFound) {
- cursorid = 0;
- resultFlags = ResultFlag_CursorNotFound;
- } else {
- uassertStatusOK(ccPin.getStatus());
- }
- } else {
- ClientCursor* cc = ccPin.getValue().getCursor();
-
- // Check for spoofing of the ns such that it does not match the one originally
- // there for the cursor.
- uassert(ErrorCodes::Unauthorized,
- str::stream() << "Requested getMore on namespace " << ns << ", but cursor "
- << cursorid
- << " belongs to namespace "
- << cc->nss().ns(),
- nss == cc->nss());
-
- // A user can only call getMore on their own cursor. If there were multiple users
- // authenticated when the cursor was created, then at least one of them must be
- // authenticated in order to run getMore on the cursor.
- uassert(ErrorCodes::Unauthorized,
- str::stream() << "cursor id " << cursorid
- << " was not created by the authenticated user",
- AuthorizationSession::get(opCtx->getClient())
- ->isCoauthorizedWith(cc->getAuthenticatedUsers()));
-
- *isCursorAuthorized = true;
-
- const auto replicationMode = repl::ReplicationCoordinator::get(opCtx)->getReplicationMode();
-
- if (replicationMode == repl::ReplicationCoordinator::modeReplSet &&
- cc->getReadConcernArgs().getLevel() == repl::ReadConcernLevel::kMajorityReadConcern) {
- opCtx->recoveryUnit()->setTimestampReadSource(
- RecoveryUnit::ReadSource::kMajorityCommitted);
- uassertStatusOK(opCtx->recoveryUnit()->obtainMajorityCommittedSnapshot());
- }
+ // Check for spoofing of the ns such that it does not match the one originally there for the
+ // cursor.
+ uassert(ErrorCodes::Unauthorized,
+ str::stream() << "Requested getMore on namespace " << ns << ", but cursor " << cursorid
+ << " belongs to namespace "
+ << cursorPin->nss().ns(),
+ nss == cursorPin->nss());
+
+ // A user can only call getMore on their own cursor. If there were multiple users authenticated
+ // when the cursor was created, then at least one of them must be authenticated in order to run
+ // getMore on the cursor.
+ uassert(ErrorCodes::Unauthorized,
+ str::stream() << "cursor id " << cursorid
+ << " was not created by the authenticated user",
+ AuthorizationSession::get(opCtx->getClient())
+ ->isCoauthorizedWith(cursorPin->getAuthenticatedUsers()));
+
+ *isCursorAuthorized = true;
+
+ const auto replicationMode = repl::ReplicationCoordinator::get(opCtx)->getReplicationMode();
+
+ if (replicationMode == repl::ReplicationCoordinator::modeReplSet &&
+ cursorPin->getReadConcernArgs().getLevel() ==
+ repl::ReadConcernLevel::kMajorityReadConcern) {
+ opCtx->recoveryUnit()->setTimestampReadSource(RecoveryUnit::ReadSource::kMajorityCommitted);
+ uassertStatusOK(opCtx->recoveryUnit()->obtainMajorityCommittedSnapshot());
+ }
- uassert(40548,
- "OP_GET_MORE operations are not supported on tailable aggregations. Only clients "
- "which support the getMore command can be used on tailable aggregations.",
- readLock || !cc->isAwaitData());
-
- // If the operation that spawned this cursor had a time limit set, apply leftover
- // time to this getmore.
- if (cc->getLeftoverMaxTimeMicros() < Microseconds::max()) {
- uassert(40136,
- "Illegal attempt to set operation deadline within DBDirectClient",
- !opCtx->getClient()->isInDirectClient());
- opCtx->setDeadlineAfterNowBy(cc->getLeftoverMaxTimeMicros(),
- ErrorCodes::MaxTimeMSExpired);
- }
- opCtx->checkForInterrupt(); // May trigger maxTimeAlwaysTimeOut fail point.
-
- // What number result are we starting at? Used to fill out the reply.
- startingResult = cc->nReturnedSoFar();
-
- uint64_t notifierVersion = 0;
- std::shared_ptr<CappedInsertNotifier> notifier;
- if (cc->isAwaitData()) {
- invariant(readLock->getCollection()->isCapped());
- // Retrieve the notifier which we will wait on until new data arrives. We make sure
- // to do this in the lock because once we drop the lock it is possible for the
- // collection to become invalid. The notifier itself will outlive the collection if
- // the collection is dropped, as we keep a shared_ptr to it.
- notifier = readLock->getCollection()->getCappedInsertNotifier();
-
- // Must get the version before we call generateBatch in case a write comes in after
- // that call and before we call wait on the notifier.
- notifierVersion = notifier->getVersion();
- }
+ uassert(40548,
+ "OP_GET_MORE operations are not supported on tailable aggregations. Only clients "
+ "which support the getMore command can be used on tailable aggregations.",
+ readLock || !cursorPin->isAwaitData());
- PlanExecutor* exec = cc->getExecutor();
- exec->reattachToOperationContext(opCtx);
- exec->restoreState();
+ // If the operation that spawned this cursor had a time limit set, apply leftover time to this
+ // getmore.
+ if (cursorPin->getLeftoverMaxTimeMicros() < Microseconds::max()) {
+ uassert(40136,
+ "Illegal attempt to set operation deadline within DBDirectClient",
+ !opCtx->getClient()->isInDirectClient());
+ opCtx->setDeadlineAfterNowBy(cursorPin->getLeftoverMaxTimeMicros(),
+ ErrorCodes::MaxTimeMSExpired);
+ }
+ opCtx->checkForInterrupt(); // May trigger maxTimeAlwaysTimeOut fail point.
- auto planSummary = Explain::getPlanSummary(exec);
- {
- stdx::lock_guard<Client> lk(*opCtx->getClient());
- curOp.setPlanSummary_inlock(planSummary);
-
- // Ensure that the original query object is available in the slow query log, profiler
- // and currentOp. Upconvert _query to resemble a getMore command, and set the original
- // command or upconverted legacy query in the originatingCommand field.
- curOp.setOpDescription_inlock(upconvertGetMoreEntry(nss, cursorid, ntoreturn));
- curOp.setOriginatingCommand_inlock(cc->getOriginatingCommandObj());
- // Update the generic cursor in curOp.
- curOp.setGenericCursor_inlock(cc->toGenericCursor());
- }
+ // What number result are we starting at? Used to fill out the reply.
+ startingResult = cursorPin->nReturnedSoFar();
+
+ uint64_t notifierVersion = 0;
+ std::shared_ptr<CappedInsertNotifier> notifier;
+ if (cursorPin->isAwaitData()) {
+ invariant(readLock->getCollection()->isCapped());
+ // Retrieve the notifier which we will wait on until new data arrives. We make sure to do
+ // this in the lock because once we drop the lock it is possible for the collection to
+ // become invalid. The notifier itself will outlive the collection if the collection is
+ // dropped, as we keep a shared_ptr to it.
+ notifier = readLock->getCollection()->getCappedInsertNotifier();
+
+ // Must get the version before we call generateBatch in case a write comes in after that
+ // call and before we call wait on the notifier.
+ notifierVersion = notifier->getVersion();
+ }
- PlanExecutor::ExecState state;
+ PlanExecutor* exec = cursorPin->getExecutor();
+ exec->reattachToOperationContext(opCtx);
+ exec->restoreState();
- // We report keysExamined and docsExamined to OpDebug for a given getMore operation. To
- // obtain these values we need to take a diff of the pre-execution and post-execution
- // metrics, as they accumulate over the course of a cursor's lifetime.
- PlanSummaryStats preExecutionStats;
- Explain::getSummaryStats(*exec, &preExecutionStats);
- if (MONGO_FAIL_POINT(legacyGetMoreWaitWithCursor)) {
- CurOpFailpointHelpers::waitWhileFailPointEnabled(
- &legacyGetMoreWaitWithCursor, opCtx, "legacyGetMoreWaitWithCursor", nullptr);
- }
+ auto planSummary = Explain::getPlanSummary(exec);
+ {
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
+ curOp.setPlanSummary_inlock(planSummary);
+
+ // Ensure that the original query object is available in the slow query log, profiler and
+ // currentOp. Upconvert _query to resemble a getMore command, and set the original command
+ // or upconverted legacy query in the originatingCommand field.
+ curOp.setOpDescription_inlock(upconvertGetMoreEntry(nss, cursorid, ntoreturn));
+ curOp.setOriginatingCommand_inlock(cursorPin->getOriginatingCommandObj());
+ // Update the generic cursor in curOp.
+ curOp.setGenericCursor_inlock(cursorPin->toGenericCursor());
+ }
- generateBatch(ntoreturn, cc, &bb, &numResults, &state);
-
- // If this is an await data cursor, and we hit EOF without generating any results, then
- // we block waiting for new data to arrive.
- if (cc->isAwaitData() && state == PlanExecutor::IS_EOF && numResults == 0) {
- // Save the PlanExecutor and drop our locks.
- exec->saveState();
- readLock.reset();
-
- // Block waiting for data for up to 1 second. Time spent blocking is not counted towards
- // the total operation latency.
- curOp.pauseTimer();
- Seconds timeout(1);
- notifier->waitUntil(notifierVersion,
- opCtx->getServiceContext()->getPreciseClockSource()->now() +
- timeout);
- notifier.reset();
- curOp.resumeTimer();
-
- // Reacquiring locks.
- readLock.emplace(opCtx, nss);
- exec->restoreState();
-
- // We woke up because either the timed_wait expired, or there was more data. Either
- // way, attempt to generate another batch of results.
- generateBatch(ntoreturn, cc, &bb, &numResults, &state);
- }
+ PlanExecutor::ExecState state;
- PlanSummaryStats postExecutionStats;
- Explain::getSummaryStats(*exec, &postExecutionStats);
- postExecutionStats.totalKeysExamined -= preExecutionStats.totalKeysExamined;
- postExecutionStats.totalDocsExamined -= preExecutionStats.totalDocsExamined;
- curOp.debug().setPlanSummaryMetrics(postExecutionStats);
-
- // We do not report 'execStats' for aggregation or other globally managed cursors, both in
- // the original request and subsequent getMore. It would be useful to have this information
- // for an aggregation, but the source PlanExecutor could be destroyed before we know whether
- // we need execStats and we do not want to generate for all operations due to cost.
- if (!CursorManager::isGloballyManagedCursor(cursorid) && curOp.shouldDBProfile()) {
- BSONObjBuilder execStatsBob;
- Explain::getWinningPlanStats(exec, &execStatsBob);
- curOp.debug().execStats = execStatsBob.obj();
- }
+ // We report keysExamined and docsExamined to OpDebug for a given getMore operation. To obtain
+ // these values we need to take a diff of the pre-execution and post-execution metrics, as they
+ // accumulate over the course of a cursor's lifetime.
+ PlanSummaryStats preExecutionStats;
+ Explain::getSummaryStats(*exec, &preExecutionStats);
+ if (MONGO_FAIL_POINT(legacyGetMoreWaitWithCursor)) {
+ CurOpFailpointHelpers::waitWhileFailPointEnabled(
+ &legacyGetMoreWaitWithCursor, opCtx, "legacyGetMoreWaitWithCursor", nullptr);
+ }
- // Our two possible ClientCursorPin cleanup paths are:
- // 1) If the cursor is not going to be saved, we call deleteUnderlying() on the pin.
- // 2) If the cursor is going to be saved, we simply let the pin go out of scope. In this
- // case, the pin's destructor will be invoked, which will call release() on the pin.
- // Because our ClientCursorPin is declared after our lock is declared, this will happen
- // under the lock if any locking was necessary.
- if (!shouldSaveCursorGetMore(state, exec, cc->isTailable())) {
- ccPin.getValue().deleteUnderlying();
-
- // cc is now invalid, as is the executor
- cursorid = 0;
- cc = nullptr;
- curOp.debug().cursorExhausted = true;
-
- LOG(5) << "getMore NOT saving client cursor, ended with state "
- << PlanExecutor::statestr(state);
- } else {
- // Continue caching the ClientCursor.
- cc->incNReturnedSoFar(numResults);
- cc->incNBatches();
- exec->saveState();
- exec->detachFromOperationContext();
- LOG(5) << "getMore saving client cursor ended with state "
- << PlanExecutor::statestr(state);
-
- *exhaust = cc->queryOptions() & QueryOption_Exhaust;
-
- // We assume that cursors created through a DBDirectClient are always used from their
- // original OperationContext, so we do not need to move time to and from the cursor.
- if (!opCtx->getClient()->isInDirectClient()) {
- // If the getmore had a time limit, remaining time is "rolled over" back to the
- // cursor (for use by future getmore ops).
- cc->setLeftoverMaxTimeMicros(opCtx->getRemainingMaxTimeMicros());
- }
+ generateBatch(ntoreturn, cursorPin.getCursor(), &bb, &numResults, &state);
+
+ // If this is an await data cursor, and we hit EOF without generating any results, then we block
+ // waiting for new data to arrive.
+ if (cursorPin->isAwaitData() && state == PlanExecutor::IS_EOF && numResults == 0) {
+ // Save the PlanExecutor and drop our locks.
+ exec->saveState();
+ readLock.reset();
+
+ // Block waiting for data for up to 1 second. Time spent blocking is not counted towards the
+ // total operation latency.
+ curOp.pauseTimer();
+ Seconds timeout(1);
+ notifier->waitUntil(notifierVersion,
+ opCtx->getServiceContext()->getPreciseClockSource()->now() + timeout);
+ notifier.reset();
+ curOp.resumeTimer();
+
+ // Reacquiring locks.
+ readLock.emplace(opCtx, nss);
+ exec->restoreState();
+
+ // We woke up because either the timed_wait expired, or there was more data. Either way,
+ // attempt to generate another batch of results.
+ generateBatch(ntoreturn, cursorPin.getCursor(), &bb, &numResults, &state);
+ }
+
+ PlanSummaryStats postExecutionStats;
+ Explain::getSummaryStats(*exec, &postExecutionStats);
+ postExecutionStats.totalKeysExamined -= preExecutionStats.totalKeysExamined;
+ postExecutionStats.totalDocsExamined -= preExecutionStats.totalDocsExamined;
+ curOp.debug().setPlanSummaryMetrics(postExecutionStats);
+
+ // We do not report 'execStats' for aggregation or other cursors with the 'kLocksInternally'
+ // policy, both in the original request and subsequent getMore. It would be useful to have this
+ // info for an aggregation, but the source PlanExecutor could be destroyed before we know if we
+ // need 'execStats' and we do not want to generate the stats eagerly for all operations due to
+ // cost.
+ if (cursorPin->lockPolicy() != ClientCursorParams::LockPolicy::kLocksInternally &&
+ curOp.shouldDBProfile()) {
+ BSONObjBuilder execStatsBob;
+ Explain::getWinningPlanStats(exec, &execStatsBob);
+ curOp.debug().execStats = execStatsBob.obj();
+ }
+
+ // Our two possible ClientCursorPin cleanup paths are:
+ // 1) If the cursor is not going to be saved, we call deleteUnderlying() on the pin.
+ // 2) If the cursor is going to be saved, we simply let the pin go out of scope. In this case,
+ // the pin's destructor will be invoked, which will call release() on the pin. Because our
+ // ClientCursorPin is declared after our lock is declared, this will happen under the lock if
+ // any locking was necessary.
+ if (!shouldSaveCursorGetMore(state, exec, cursorPin->isTailable())) {
+ cursorPin.deleteUnderlying();
+
+ // cc is now invalid, as is the executor
+ cursorid = 0;
+ curOp.debug().cursorExhausted = true;
+
+ LOG(5) << "getMore NOT saving client cursor, ended with state "
+ << PlanExecutor::statestr(state);
+ } else {
+ // Continue caching the ClientCursor.
+ cursorPin->incNReturnedSoFar(numResults);
+ cursorPin->incNBatches();
+ exec->saveState();
+ exec->detachFromOperationContext();
+ LOG(5) << "getMore saving client cursor ended with state " << PlanExecutor::statestr(state);
+
+ *exhaust = cursorPin->queryOptions() & QueryOption_Exhaust;
+
+ // We assume that cursors created through a DBDirectClient are always used from their
+ // original OperationContext, so we do not need to move time to and from the cursor.
+ if (!opCtx->getClient()->isInDirectClient()) {
+ // If the getmore had a time limit, remaining time is "rolled over" back to the cursor
+ // (for use by future getmore ops).
+ cursorPin->setLeftoverMaxTimeMicros(opCtx->getRemainingMaxTimeMicros());
}
}
@@ -676,13 +669,14 @@ std::string runQuery(OperationContext* opCtx,
const auto& readConcernArgs = repl::ReadConcernArgs::get(opCtx);
// Allocate a new ClientCursor and register it with the cursor manager.
- ClientCursorPin pinnedCursor = collection->getCursorManager()->registerCursor(
+ ClientCursorPin pinnedCursor = CursorManager::getGlobalCursorManager()->registerCursor(
opCtx,
{std::move(exec),
nss,
AuthorizationSession::get(opCtx->getClient())->getAuthenticatedUserNames(),
readConcernArgs,
- upconvertedQuery});
+ upconvertedQuery,
+ ClientCursorParams::LockPolicy::kLockExternally});
ccId = pinnedCursor.getCursor()->cursorid();
LOG(5) << "caching executor with cursorid " << ccId << " after returning " << numResults
diff --git a/src/mongo/db/repl/base_cloner_test_fixture.cpp b/src/mongo/db/repl/base_cloner_test_fixture.cpp
index 3f53cb8ddf9..8e5d8f3cb20 100644
--- a/src/mongo/db/repl/base_cloner_test_fixture.cpp
+++ b/src/mongo/db/repl/base_cloner_test_fixture.cpp
@@ -98,7 +98,7 @@ BSONObj BaseClonerTest::createListCollectionsResponse(CursorId cursorId, const B
BSONObj BaseClonerTest::createListIndexesResponse(CursorId cursorId,
const BSONArray& specs,
const char* batchFieldName) {
- return createCursorResponse(cursorId, "test.$cmd.listIndexes.coll", specs, batchFieldName);
+ return createCursorResponse(cursorId, "test.coll", specs, batchFieldName);
}
// static
diff --git a/src/mongo/db/repl/collection_cloner.cpp b/src/mongo/db/repl/collection_cloner.cpp
index 72df66633f1..393edf5e7c3 100644
--- a/src/mongo/db/repl/collection_cloner.cpp
+++ b/src/mongo/db/repl/collection_cloner.cpp
@@ -574,10 +574,14 @@ void CollectionCloner::_runQuery(const executor::TaskExecutor::CallbackArgs& cal
<< _sourceNss.ns());
stdx::unique_lock<stdx::mutex> lock(_mutex);
if (queryStatus.code() == ErrorCodes::OperationFailed ||
- queryStatus.code() == ErrorCodes::CursorNotFound) {
+ queryStatus.code() == ErrorCodes::CursorNotFound ||
+ queryStatus.code() == ErrorCodes::QueryPlanKilled) {
// With these errors, it's possible the collection was dropped while we were
// cloning. If so, we'll execute the drop during oplog application, so it's OK to
// just stop cloning.
+ //
+ // A 4.2 node should only ever raise QueryPlanKilled, but an older node could raise
+ // OperationFailed or CursorNotFound.
_verifyCollectionWasDropped(lock, queryStatus, onCompletionGuard);
return;
} else if (queryStatus.code() != ErrorCodes::NamespaceNotFound) {
diff --git a/src/mongo/db/repl/collection_cloner_test.cpp b/src/mongo/db/repl/collection_cloner_test.cpp
index 66fae8cf6d7..3908a8d8501 100644
--- a/src/mongo/db/repl/collection_cloner_test.cpp
+++ b/src/mongo/db/repl/collection_cloner_test.cpp
@@ -1246,15 +1246,15 @@ protected:
/**
* Sets up a test for the CollectionCloner that simulates the collection being dropped while
- * copying the documents.
- * The DBClientConnection returns a CursorNotFound error to indicate a collection drop.
+ * copying the documents by making a query return the given error code.
+ *
+ * The DBClientConnection returns 'code' to indicate a collection drop.
*/
- void setUpVerifyCollectionWasDroppedTest() {
+ void setUpVerifyCollectionWasDroppedTest(ErrorCodes::Error code) {
// Pause the query so we can reliably wait for it to complete.
MockClientPauser pauser(_client);
// Return error response from the query.
- _client->setFailureForQuery(
- {ErrorCodes::CursorNotFound, "collection dropped while copying documents"});
+ _client->setFailureForQuery({code, "collection dropped while copying documents"});
ASSERT_OK(collectionCloner->startup());
{
executor::NetworkInterfaceMock::InNetworkGuard guard(getNet());
@@ -1284,6 +1284,39 @@ protected:
ASSERT_EQUALS(*options.uuid, unittest::assertGet(UUID::parse(firstElement)));
return noi;
}
+
+ /**
+ * Start cloning. While copying collection, simulate a collection drop by having the
+ * DBClientConnection return code 'collectionDropErrCode'.
+ *
+ * The CollectionCloner should run a find command on the collection by UUID. Simulate successful
+ * find command with a drop-pending namespace in the response. The CollectionCloner should
+ * complete with a successful final status.
+ */
+ void runCloningSuccessfulWithCollectionDropTest(ErrorCodes::Error collectionDropErrCode) {
+ setUpVerifyCollectionWasDroppedTest(collectionDropErrCode);
+
+ // CollectionCloner should send a find command with the collection's UUID.
+ {
+ executor::NetworkInterfaceMock::InNetworkGuard guard(getNet());
+ auto noi = getVerifyCollectionDroppedRequest(getNet());
+
+ // Return a drop-pending namespace in the find response instead of the original
+ // collection name passed to CollectionCloner at construction.
+ repl::OpTime dropOpTime(Timestamp(Seconds(100), 0), 1LL);
+ auto dpns = nss.makeDropPendingNamespace(dropOpTime);
+ scheduleNetworkResponse(noi,
+ createCursorResponse(0, dpns.ns(), BSONArray(), "firstBatch"));
+ finishProcessingNetworkResponse();
+ }
+
+ // CollectionCloner treats a in collection state to drop-pending during cloning as a
+ // successful
+ // clone operation.
+ collectionCloner->join();
+ ASSERT_OK(getStatus());
+ ASSERT_FALSE(collectionCloner->isActive());
+ }
};
TEST_F(CollectionClonerRenamedBeforeStartTest, FirstRemoteCommandWithRenamedCollection) {
@@ -1379,49 +1412,32 @@ TEST_F(CollectionClonerRenamedBeforeStartTest, BeginCollectionWithUUID) {
ASSERT_TRUE(collectionCloner->isActive());
}
-/**
- * Start cloning.
- * While copying collection, simulate a collection drop by having the DBClientConnection return a
- * CursorNotFound error.
- * The CollectionCloner should run a find command on the collection by UUID.
- * Simulate successful find command with a drop-pending namespace in the response.
- * The CollectionCloner should complete with a successful final status.
- */
TEST_F(CollectionClonerRenamedBeforeStartTest,
- CloningIsSuccessfulIfCollectionWasDroppedWhileCopyingDocuments) {
- setUpVerifyCollectionWasDroppedTest();
-
- // CollectionCloner should send a find command with the collection's UUID.
- {
- executor::NetworkInterfaceMock::InNetworkGuard guard(getNet());
- auto noi = getVerifyCollectionDroppedRequest(getNet());
+ CloningIsSuccessfulIfCollectionWasDroppedWithCursorNotFoundWhileCopyingDocuments) {
+ runCloningSuccessfulWithCollectionDropTest(ErrorCodes::CursorNotFound);
+}
- // Return a drop-pending namespace in the find response instead of the original collection
- // name passed to CollectionCloner at construction.
- repl::OpTime dropOpTime(Timestamp(Seconds(100), 0), 1LL);
- auto dpns = nss.makeDropPendingNamespace(dropOpTime);
- scheduleNetworkResponse(noi, createCursorResponse(0, dpns.ns(), BSONArray(), "firstBatch"));
- finishProcessingNetworkResponse();
- }
+TEST_F(CollectionClonerRenamedBeforeStartTest,
+ CloningIsSuccessfulIfCollectionWasDroppedWithOperationFailedWhileCopyingDocuments) {
+ runCloningSuccessfulWithCollectionDropTest(ErrorCodes::OperationFailed);
+}
- // CollectionCloner treats a in collection state to drop-pending during cloning as a successful
- // clone operation.
- collectionCloner->join();
- ASSERT_OK(getStatus());
- ASSERT_FALSE(collectionCloner->isActive());
+TEST_F(CollectionClonerRenamedBeforeStartTest,
+ CloningIsSuccessfulIfCollectionWasDroppedWithQueryPlanKilledWhileCopyingDocuments) {
+ runCloningSuccessfulWithCollectionDropTest(ErrorCodes::QueryPlanKilled);
}
/**
- * Start cloning.
- * While copying collection, simulate a collection drop by having the DBClientConnection return a
- * CursorNotFound error.
- * The CollectionCloner should run a find command on the collection by UUID.
- * Shut the CollectionCloner down.
- * The CollectionCloner should return a CursorNotFound final status.
+ * Start cloning. While copying collection, simulate a collection drop by having the
+ * DBClientConnection return a CursorNotFound error.
+ *
+ * The CollectionCloner should run a find command on the collection by UUID. Shut the
+ * CollectionCloner down. The CollectionCloner should return final status corresponding to the
+ * error code from the DBClientConnection.
*/
TEST_F(CollectionClonerRenamedBeforeStartTest,
ShuttingDownCollectionClonerDuringCollectionDropVerificationReturnsCallbackCanceled) {
- setUpVerifyCollectionWasDroppedTest();
+ setUpVerifyCollectionWasDroppedTest(ErrorCodes::CursorNotFound);
// CollectionCloner should send a find command with the collection's UUID.
{
diff --git a/src/mongo/db/repl/databases_cloner_test.cpp b/src/mongo/db/repl/databases_cloner_test.cpp
index 40a3b39a744..4340c0a8d51 100644
--- a/src/mongo/db/repl/databases_cloner_test.cpp
+++ b/src/mongo/db/repl/databases_cloner_test.cpp
@@ -933,13 +933,11 @@ TEST_F(DBsClonerTest, SingleDatabaseCopiesCompletely) {
// count:a
{"count", BSON("n" << 1 << "ok" << 1)},
// listIndexes:a
- {
- "listIndexes",
- fromjson(str::stream()
- << "{ok:1, cursor:{id:NumberLong(0), ns:'a.$cmd.listIndexes.a', firstBatch:["
- "{v:"
- << OplogEntry::kOplogVersion
- << ", key:{_id:1}, name:'_id_', ns:'a.a'}]}}")},
+ {"listIndexes",
+ fromjson(str::stream() << "{ok:1, cursor:{id:NumberLong(0), ns:'a.a', firstBatch:["
+ "{v:"
+ << OplogEntry::kOplogVersion
+ << ", key:{_id:1}, name:'_id_', ns:'a.a'}]}}")},
// Clone Done
};
runCompleteClone(resps);
@@ -952,48 +950,45 @@ TEST_F(DBsClonerTest, TwoDatabasesCopiesCompletely) {
options2.uuid = UUID::gen();
_mockServer->assignCollectionUuid("a.a", *options1.uuid);
_mockServer->assignCollectionUuid("b.b", *options1.uuid);
- const Responses resps =
- {
- // Clone Start
- // listDatabases
- {"listDatabases", fromjson("{ok:1, databases:[{name:'a'}, {name:'b'}]}")},
- // listCollections for "a"
- {"listCollections",
- BSON("ok" << 1 << "cursor" << BSON("id" << 0ll << "ns"
- << "a.$cmd.listCollections"
- << "firstBatch"
- << BSON_ARRAY(BSON("name"
- << "a"
- << "options"
- << options1.toBSON()))))},
- // count:a
- {"count", BSON("n" << 1 << "ok" << 1)},
- // listIndexes:a
- {"listIndexes",
- fromjson(str::stream()
- << "{ok:1, cursor:{id:NumberLong(0), ns:'a.$cmd.listIndexes.a', firstBatch:["
- "{v:"
- << OplogEntry::kOplogVersion
- << ", key:{_id:1}, name:'_id_', ns:'a.a'}]}}")},
- // listCollections for "b"
- {"listCollections",
- BSON("ok" << 1 << "cursor" << BSON("id" << 0ll << "ns"
- << "b.$cmd.listCollections"
- << "firstBatch"
- << BSON_ARRAY(BSON("name"
- << "b"
- << "options"
- << options2.toBSON()))))},
- // count:b
- {"count", BSON("n" << 2 << "ok" << 1)},
- // listIndexes:b
- {"listIndexes",
- fromjson(str::stream()
- << "{ok:1, cursor:{id:NumberLong(0), ns:'b.$cmd.listIndexes.b', firstBatch:["
- "{v:"
- << OplogEntry::kOplogVersion
- << ", key:{_id:1}, name:'_id_', ns:'b.b'}]}}")},
- };
+ const Responses resps = {
+ // Clone Start
+ // listDatabases
+ {"listDatabases", fromjson("{ok:1, databases:[{name:'a'}, {name:'b'}]}")},
+ // listCollections for "a"
+ {"listCollections",
+ BSON("ok" << 1 << "cursor" << BSON("id" << 0ll << "ns"
+ << "a.$cmd.listCollections"
+ << "firstBatch"
+ << BSON_ARRAY(BSON("name"
+ << "a"
+ << "options"
+ << options1.toBSON()))))},
+ // count:a
+ {"count", BSON("n" << 1 << "ok" << 1)},
+ // listIndexes:a
+ {"listIndexes",
+ fromjson(str::stream() << "{ok:1, cursor:{id:NumberLong(0), ns:'a.a', firstBatch:["
+ "{v:"
+ << OplogEntry::kOplogVersion
+ << ", key:{_id:1}, name:'_id_', ns:'a.a'}]}}")},
+ // listCollections for "b"
+ {"listCollections",
+ BSON("ok" << 1 << "cursor" << BSON("id" << 0ll << "ns"
+ << "b.$cmd.listCollections"
+ << "firstBatch"
+ << BSON_ARRAY(BSON("name"
+ << "b"
+ << "options"
+ << options2.toBSON()))))},
+ // count:b
+ {"count", BSON("n" << 2 << "ok" << 1)},
+ // listIndexes:b
+ {"listIndexes",
+ fromjson(str::stream() << "{ok:1, cursor:{id:NumberLong(0), ns:'b.b', firstBatch:["
+ "{v:"
+ << OplogEntry::kOplogVersion
+ << ", key:{_id:1}, name:'_id_', ns:'b.b'}]}}")},
+ };
runCompleteClone(resps);
}
diff --git a/src/mongo/dbtests/cursor_manager_test.cpp b/src/mongo/dbtests/cursor_manager_test.cpp
index 9fd6890baed..08c072dcde0 100644
--- a/src/mongo/dbtests/cursor_manager_test.cpp
+++ b/src/mongo/dbtests/cursor_manager_test.cpp
@@ -114,7 +114,8 @@ public:
kTestNss,
{},
repl::ReadConcernArgs(repl::ReadConcernLevel::kLocalReadConcern),
- BSONObj()};
+ BSONObj(),
+ ClientCursorParams::LockPolicy::kLocksInternally};
}
ClientCursorPin makeCursor(OperationContext* opCtx) {
@@ -135,7 +136,7 @@ protected:
private:
ClockSourceMock* _clock;
- CursorManager _cursorManager{kTestNss};
+ CursorManager _cursorManager{NamespaceString{}};
};
class CursorManagerTestCustomOpCtx : public CursorManagerTest {
@@ -156,52 +157,12 @@ TEST_F(CursorManagerTest, GlobalCursorManagerShouldReportOwnershipOfCursorsItCre
NamespaceString{"test.collection"},
{},
repl::ReadConcernArgs(repl::ReadConcernLevel::kLocalReadConcern),
- BSONObj()});
+ BSONObj(),
+ ClientCursorParams::LockPolicy::kLocksInternally});
ASSERT_TRUE(CursorManager::isGloballyManagedCursor(cursorPin.getCursor()->cursorid()));
}
}
-TEST_F(CursorManagerTest,
- CursorsFromCollectionCursorManagerShouldNotReportBeingManagedByGlobalCursorManager) {
- CursorManager* cursorManager = useCursorManager();
- auto opCtx = cc().makeOperationContext();
- for (int i = 0; i < 1000; i++) {
- auto cursorPin = cursorManager->registerCursor(
- _opCtx.get(),
- {makeFakePlanExecutor(),
- kTestNss,
- {},
- repl::ReadConcernArgs(repl::ReadConcernLevel::kLocalReadConcern),
- BSONObj()});
- ASSERT_FALSE(CursorManager::isGloballyManagedCursor(cursorPin.getCursor()->cursorid()));
- }
-}
-
-uint32_t extractLeading32Bits(CursorId cursorId) {
- return static_cast<uint32_t>((cursorId & 0xFFFFFFFF00000000) >> 32);
-}
-
-TEST_F(CursorManagerTest,
- AllCursorsFromCollectionCursorManagerShouldContainIdentical32BitPrefixes) {
- CursorManager* cursorManager = useCursorManager();
- boost::optional<uint32_t> prefix;
- for (int i = 0; i < 1000; i++) {
- auto cursorPin = cursorManager->registerCursor(
- _opCtx.get(),
- {makeFakePlanExecutor(),
- kTestNss,
- {},
- repl::ReadConcernArgs(repl::ReadConcernLevel::kLocalReadConcern),
- BSONObj()});
- auto cursorId = cursorPin.getCursor()->cursorid();
- if (prefix) {
- ASSERT_EQ(*prefix, extractLeading32Bits(cursorId));
- } else {
- prefix = extractLeading32Bits(cursorId);
- }
- }
-}
-
/**
* Tests that invalidating a cursor without dropping the collection while the cursor is not in use
* will keep the cursor registered. After being invalidated, pinning the cursor should take
@@ -216,7 +177,8 @@ TEST_F(CursorManagerTest, InvalidateCursor) {
kTestNss,
{},
repl::ReadConcernArgs(repl::ReadConcernLevel::kLocalReadConcern),
- BSONObj()});
+ BSONObj(),
+ ClientCursorParams::LockPolicy::kLocksInternally});
auto cursorId = cursorPin.getCursor()->cursorid();
cursorPin.release();
@@ -247,7 +209,8 @@ TEST_F(CursorManagerTest, InvalidateCursorWithDrop) {
kTestNss,
{},
repl::ReadConcernArgs(repl::ReadConcernLevel::kLocalReadConcern),
- BSONObj()});
+ BSONObj(),
+ ClientCursorParams::LockPolicy::kLocksInternally});
auto cursorId = cursorPin.getCursor()->cursorid();
cursorPin.release();
@@ -275,7 +238,8 @@ TEST_F(CursorManagerTest, InvalidatePinnedCursor) {
kTestNss,
{},
repl::ReadConcernArgs(repl::ReadConcernLevel::kLocalReadConcern),
- BSONObj()});
+ BSONObj(),
+ ClientCursorParams::LockPolicy::kLocksInternally});
// If the cursor is pinned, it sticks around, even after invalidation.
ASSERT_EQUALS(1U, cursorManager->numCursors());
@@ -308,7 +272,8 @@ TEST_F(CursorManagerTest, ShouldBeAbleToKillPinnedCursor) {
kTestNss,
{},
repl::ReadConcernArgs(repl::ReadConcernLevel::kLocalReadConcern),
- BSONObj()});
+ BSONObj(),
+ ClientCursorParams::LockPolicy::kLocksInternally});
auto cursorId = cursorPin.getCursor()->cursorid();
ASSERT_OK(cursorManager->killCursor(_opCtx.get(), cursorId, shouldAudit));
@@ -332,7 +297,8 @@ TEST_F(CursorManagerTest, ShouldBeAbleToKillPinnedCursorMultiClient) {
kTestNss,
{},
repl::ReadConcernArgs(repl::ReadConcernLevel::kLocalReadConcern),
- BSONObj()});
+ BSONObj(),
+ ClientCursorParams::LockPolicy::kLocksInternally});
auto cursorId = cursorPin.getCursor()->cursorid();
@@ -366,7 +332,8 @@ TEST_F(CursorManagerTest, InactiveCursorShouldTimeout) {
NamespaceString{"test.collection"},
{},
repl::ReadConcernArgs(repl::ReadConcernLevel::kLocalReadConcern),
- BSONObj()});
+ BSONObj(),
+ ClientCursorParams::LockPolicy::kLocksInternally});
ASSERT_EQ(0UL, cursorManager->timeoutCursors(_opCtx.get(), Date_t()));
@@ -379,7 +346,8 @@ TEST_F(CursorManagerTest, InactiveCursorShouldTimeout) {
NamespaceString{"test.collection"},
{},
repl::ReadConcernArgs(repl::ReadConcernLevel::kLocalReadConcern),
- BSONObj()});
+ BSONObj(),
+ ClientCursorParams::LockPolicy::kLocksInternally});
ASSERT_EQ(1UL, cursorManager->timeoutCursors(_opCtx.get(), Date_t::max()));
ASSERT_EQ(0UL, cursorManager->numCursors());
}
@@ -397,7 +365,8 @@ TEST_F(CursorManagerTest, InactivePinnedCursorShouldNotTimeout) {
NamespaceString{"test.collection"},
{},
repl::ReadConcernArgs(repl::ReadConcernLevel::kLocalReadConcern),
- BSONObj()});
+ BSONObj(),
+ ClientCursorParams::LockPolicy::kLocksInternally});
// The pin is still in scope, so it should not time out.
clock->advance(getDefaultCursorTimeoutMillis());
@@ -418,7 +387,8 @@ TEST_F(CursorManagerTest, InactiveKilledCursorsShouldTimeout) {
NamespaceString{"test.collection"},
{},
repl::ReadConcernArgs(repl::ReadConcernLevel::kLocalReadConcern),
- BSONObj()});
+ BSONObj(),
+ ClientCursorParams::LockPolicy::kLocksInternally});
cursorPin.release();
const bool collectionGoingAway = false;
cursorManager->invalidateAll(
@@ -445,7 +415,8 @@ TEST_F(CursorManagerTest, InactiveKilledCursorsThatAreStillPinnedShouldNotTimeou
NamespaceString{"test.collection"},
{},
repl::ReadConcernArgs(repl::ReadConcernLevel::kLocalReadConcern),
- BSONObj()});
+ BSONObj(),
+ ClientCursorParams::LockPolicy::kLocksInternally});
const bool collectionGoingAway = false;
cursorManager->invalidateAll(
_opCtx.get(), collectionGoingAway, "KilledCursorsShouldTimeoutTest");
@@ -471,7 +442,8 @@ TEST_F(CursorManagerTest, UsingACursorShouldUpdateTimeOfLastUse) {
kTestNss,
{},
repl::ReadConcernArgs(repl::ReadConcernLevel::kLocalReadConcern),
- BSONObj()});
+ BSONObj(),
+ ClientCursorParams::LockPolicy::kLocksInternally});
auto usedCursorId = cursorPin.getCursor()->cursorid();
cursorPin.release();
@@ -482,7 +454,8 @@ TEST_F(CursorManagerTest, UsingACursorShouldUpdateTimeOfLastUse) {
kTestNss,
{},
repl::ReadConcernArgs(repl::ReadConcernLevel::kLocalReadConcern),
- BSONObj()});
+ BSONObj(),
+ ClientCursorParams::LockPolicy::kLocksInternally});
// Advance the clock to simulate time passing.
clock->advance(Milliseconds(1));
@@ -517,7 +490,8 @@ TEST_F(CursorManagerTest, CursorShouldNotTimeOutUntilIdleForLongEnoughAfterBeing
kTestNss,
{},
repl::ReadConcernArgs(repl::ReadConcernLevel::kLocalReadConcern),
- BSONObj()});
+ BSONObj(),
+ ClientCursorParams::LockPolicy::kLocksInternally});
// Advance the clock to simulate time passing.
clock->advance(getDefaultCursorTimeoutMillis() + Milliseconds(1));
diff --git a/src/mongo/dbtests/plan_executor_invalidation_test.cpp b/src/mongo/dbtests/plan_executor_invalidation_test.cpp
index b79ab4f5e8f..ad4bd74b9d9 100644
--- a/src/mongo/dbtests/plan_executor_invalidation_test.cpp
+++ b/src/mongo/dbtests/plan_executor_invalidation_test.cpp
@@ -119,6 +119,12 @@ public:
return _ctx->db()->getCollection(&_opCtx, nss);
}
+ void truncateCollection(Collection* collection) const {
+ WriteUnitOfWork wunit(&_opCtx);
+ ASSERT_OK(collection->truncate(&_opCtx));
+ wunit.commit();
+ }
+
// Order of these is important for initialization
const ServiceContext::UniqueOperationContext _opCtxPtr = cc().makeOperationContext();
OperationContext& _opCtx = *_opCtxPtr;
@@ -400,4 +406,44 @@ TEST_F(PlanExecutorInvalidationTest, CollScanDiesOnRestartCatalog) {
ASSERT_THROWS_CODE(exec->restoreState(), DBException, ErrorCodes::QueryPlanKilled);
}
+TEST_F(PlanExecutorInvalidationTest, IxscanDiesWhenTruncateCollectionDropsAllIndices) {
+ BSONObj keyPattern = BSON("foo" << 1);
+ ASSERT_OK(dbtests::createIndex(&_opCtx, nss.ns(), keyPattern));
+
+ auto exec = makeIxscanPlan(keyPattern, BSON("foo" << 0), BSON("foo" << N()));
+
+ // Partially scan the index.
+ BSONObj obj;
+ for (int i = 0; i < 10; ++i) {
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL));
+ ASSERT_EQUALS(i, obj.firstElement().numberInt());
+ }
+
+ // Call truncate() on the Collection during yield, and verify that yield recovery throws the
+ // expected error code.
+ exec->saveState();
+ truncateCollection(collection());
+ ASSERT_THROWS_CODE(exec->restoreState(), DBException, ErrorCodes::QueryPlanKilled);
+}
+
+TEST_F(PlanExecutorInvalidationTest, CollScanExecutorSurvivesCollectionTruncate) {
+ auto exec = getCollscan();
+
+ // Partially scan the collection.
+ BSONObj obj;
+ for (int i = 0; i < 10; ++i) {
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL));
+ ASSERT_EQUALS(i, obj["foo"].numberInt());
+ }
+
+ // Call truncate() on the Collection during yield. The PlanExecutor should be restored
+ // successfully.
+ exec->saveState();
+ truncateCollection(collection());
+ exec->restoreState();
+
+ // Since all documents in the collection have been deleted, the PlanExecutor should issue EOF.
+ ASSERT_EQUALS(PlanExecutor::IS_EOF, exec->getNext(&obj, NULL));
+}
+
} // namespace mongo
diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp
index 5686b2f3836..5c7441c13fc 100644
--- a/src/mongo/dbtests/querytests.cpp
+++ b/src/mongo/dbtests/querytests.cpp
@@ -282,10 +282,10 @@ public:
cursor.reset();
{
- // Check internal server handoff to getmore.
- dbtests::WriteContextForTests ctx(&_opCtx, ns);
+ // Check that a cursor has been registered with the global cursor manager, and has
+ // already returned its first batch of results.
auto pinnedCursor = unittest::assertGet(
- ctx.getCollection()->getCursorManager()->pinCursor(&_opCtx, cursorId));
+ CursorManager::getGlobalCursorManager()->pinCursor(&_opCtx, cursorId));
ASSERT_EQUALS(std::uint64_t(2), pinnedCursor.getCursor()->nReturnedSoFar());
}
@@ -348,6 +348,8 @@ public:
_client.dropCollection("unittests.querytests.GetMoreInvalidRequest");
}
void run() {
+ auto startNumCursors = CursorManager::getGlobalCursorManager()->numCursors();
+
// Create a collection with some data.
const char* ns = "unittests.querytests.GetMoreInvalidRequest";
for (int i = 0; i < 1000; ++i) {
@@ -366,20 +368,16 @@ public:
++count;
}
- // Send a get more with a namespace that is incorrect ('spoofed') for this cursor id.
- // This is the invalaid get more request described in the comment preceding this class.
+ // Send a getMore with a namespace that is incorrect ('spoofed') for this cursor id.
ASSERT_THROWS(
_client.getMore("unittests.querytests.GetMoreInvalidRequest_WRONG_NAMESPACE_FOR_CURSOR",
cursor->getCursorId()),
AssertionException);
- // Check that the cursor still exists
- {
- AutoGetCollectionForReadCommand ctx(&_opCtx, NamespaceString(ns));
- ASSERT(1 == ctx.getCollection()->getCursorManager()->numCursors());
- ASSERT_OK(
- ctx.getCollection()->getCursorManager()->pinCursor(&_opCtx, cursorId).getStatus());
- }
+ // Check that the cursor still exists.
+ ASSERT_EQ(startNumCursors + 1, CursorManager::getGlobalCursorManager()->numCursors());
+ ASSERT_OK(
+ CursorManager::getGlobalCursorManager()->pinCursor(&_opCtx, cursorId).getStatus());
// Check that the cursor can be iterated until all documents are returned.
while (cursor->more()) {
@@ -387,6 +385,9 @@ public:
++count;
}
ASSERT_EQUALS(1000, count);
+
+ // The cursor should no longer exist, since we exhausted it.
+ ASSERT_EQ(startNumCursors, CursorManager::getGlobalCursorManager()->numCursors());
}
};
diff --git a/src/mongo/s/commands/commands_public.cpp b/src/mongo/s/commands/commands_public.cpp
index 207f438c746..eb0a9f75704 100644
--- a/src/mongo/s/commands/commands_public.cpp
+++ b/src/mongo/s/commands/commands_public.cpp
@@ -498,12 +498,7 @@ public:
const auto routingInfo =
uassertStatusOK(Grid::get(opCtx)->catalogCache()->getCollectionRoutingInfo(opCtx, nss));
- return cursorCommandPassthrough(opCtx,
- nss.db(),
- routingInfo.db(),
- cmdObj,
- NamespaceString::makeListIndexesNSS(nss.db(), nss.coll()),
- &result);
+ return cursorCommandPassthrough(opCtx, nss.db(), routingInfo.db(), cmdObj, nss, &result);
}
} cmdListIndexes;