summaryrefslogtreecommitdiff
path: root/src/mongo/dbtests
diff options
context:
space:
mode:
authorCharlie Swanson <charlie.swanson@mongodb.com>2017-04-05 11:35:23 -0400
committerCharlie Swanson <charlie.swanson@mongodb.com>2017-04-13 16:15:20 -0400
commitcc954e9e1d88b30d1ab89ee3bbbd9db0bb15263d (patch)
tree37df000f0d37d17bc82d5d1ad5436b4911249e4b /src/mongo/dbtests
parentb02b7f7bb78d4fd0bb006591769faaa216e6f8a7 (diff)
downloadmongo-cc954e9e1d88b30d1ab89ee3bbbd9db0bb15263d.tar.gz
SERVER-25694 Eliminate race in PlanExecutor cleanup.
Ensures that a collection lock is held in at least MODE_IS while deregistering a PlanExecutor from the cursor manager. Introduces new PlanExecutor::dispose() and ClientCursor::dispose() methods that must be called before destruction of those classes, and ensures they are called before destruction. These calls will thread an OperationContext all the way through to DocumentSource::dispose() for each stage in a Pipeline, which will give DocumentSourceCursor a chance to acquire locks and deregister its PlanExecutor if necessary.
Diffstat (limited to 'src/mongo/dbtests')
-rw-r--r--src/mongo/dbtests/documentsourcetests.cpp7
-rw-r--r--src/mongo/dbtests/executor_registry.cpp139
-rw-r--r--src/mongo/dbtests/plan_ranking.cpp2
-rw-r--r--src/mongo/dbtests/query_plan_executor.cpp178
-rw-r--r--src/mongo/dbtests/query_stage_cached_plan.cpp4
-rw-r--r--src/mongo/dbtests/query_stage_collscan.cpp12
-rw-r--r--src/mongo/dbtests/query_stage_merge_sort.cpp32
-rw-r--r--src/mongo/dbtests/query_stage_multiplan.cpp17
-rw-r--r--src/mongo/dbtests/query_stage_sort.cpp17
-rw-r--r--src/mongo/dbtests/query_stage_subplan.cpp12
-rw-r--r--src/mongo/dbtests/query_stage_tests.cpp4
-rw-r--r--src/mongo/dbtests/querytests.cpp30
12 files changed, 232 insertions, 222 deletions
diff --git a/src/mongo/dbtests/documentsourcetests.cpp b/src/mongo/dbtests/documentsourcetests.cpp
index a6588ffc6b2..1db6048e282 100644
--- a/src/mongo/dbtests/documentsourcetests.cpp
+++ b/src/mongo/dbtests/documentsourcetests.cpp
@@ -105,7 +105,7 @@ protected:
&_opCtx, std::move(qr), ExtensionsCallbackDisallowExtensions()));
auto exec = uassertStatusOK(
- getExecutor(&_opCtx, ctx.getCollection(), std::move(cq), PlanExecutor::YIELD_MANUAL));
+ getExecutor(&_opCtx, ctx.getCollection(), std::move(cq), PlanExecutor::NO_YIELD));
exec->saveState();
_source = DocumentSourceCursor::create(ctx.getCollection(), std::move(exec), _ctx);
@@ -270,6 +270,7 @@ public:
void run() {
createSource(BSON("$natural" << 1));
ASSERT_EQ(source()->getOutputSorts().size(), 0U);
+ source()->dispose();
}
};
@@ -281,6 +282,7 @@ public:
ASSERT_EQ(source()->getOutputSorts().size(), 1U);
ASSERT_EQ(source()->getOutputSorts().count(BSON("a" << 1)), 1U);
+ source()->dispose();
}
};
@@ -292,6 +294,7 @@ public:
ASSERT_EQ(source()->getOutputSorts().size(), 1U);
ASSERT_EQ(source()->getOutputSorts().count(BSON("a" << -1)), 1U);
+ source()->dispose();
}
};
@@ -304,6 +307,7 @@ public:
ASSERT_EQ(source()->getOutputSorts().size(), 2U);
ASSERT_EQ(source()->getOutputSorts().count(BSON("a" << 1)), 1U);
ASSERT_EQ(source()->getOutputSorts().count(BSON("a" << 1 << "b" << -1)), 1U);
+ source()->dispose();
}
};
@@ -338,6 +342,7 @@ public:
ASSERT_FALSE(explainResult["$cursor"]["executionStats"].missing());
ASSERT_FALSE(explainResult["$cursor"]["executionStats"]["allPlansExecution"].missing());
}
+ source()->dispose();
}
};
diff --git a/src/mongo/dbtests/executor_registry.cpp b/src/mongo/dbtests/executor_registry.cpp
index cfe2b68efb6..46ec9c1e3a0 100644
--- a/src/mongo/dbtests/executor_registry.cpp
+++ b/src/mongo/dbtests/executor_registry.cpp
@@ -27,8 +27,8 @@
*/
/**
- * This file tests PlanExecutor forced yielding, ClientCursor::registerExecutor, and
- * ClientCursor::deregisterExecutor.
+ * This file tests that a manually yielded PlanExecutor will still receive invalidations and be
+ * marked as killed by events like collection drops.
*/
#include "mongo/platform/basic.h"
@@ -68,7 +68,7 @@ public:
/**
* Return a plan executor that is going over the collection in nss.ns().
*/
- PlanExecutor* getCollscan() {
+ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> getCollscan() {
unique_ptr<WorkingSet> ws(new WorkingSet());
CollectionScanParams params;
params.collection = collection();
@@ -91,22 +91,7 @@ public:
_ctx->db()->getCollection(&_opCtx, nss),
PlanExecutor::YIELD_MANUAL);
ASSERT_OK(statusWithPlanExecutor.getStatus());
- return statusWithPlanExecutor.getValue().release();
- }
-
- void registerExecutor(PlanExecutor* exec) {
- WriteUnitOfWork wuow(&_opCtx);
- _ctx->db()->getOrCreateCollection(&_opCtx, nss)->getCursorManager()->registerExecutor(exec);
- wuow.commit();
- }
-
- void deregisterExecutor(PlanExecutor* exec) {
- WriteUnitOfWork wuow(&_opCtx);
- _ctx->db()
- ->getOrCreateCollection(&_opCtx, nss)
- ->getCursorManager()
- ->deregisterExecutor(exec);
- wuow.commit();
+ return std::move(statusWithPlanExecutor.getValue());
}
int N() {
@@ -125,7 +110,7 @@ public:
};
-// Test that a registered runner receives invalidation notifications.
+// Test that a registered PlanExecutor receives invalidation notifications.
class ExecutorRegistryDiskLocInvalid : public ExecutorRegistryBase {
public:
void run() {
@@ -133,18 +118,17 @@ public:
return;
}
- unique_ptr<PlanExecutor> run(getCollscan());
+ auto exec = getCollscan();
BSONObj obj;
// Read some of it.
for (int i = 0; i < 10; ++i) {
- ASSERT_EQUALS(PlanExecutor::ADVANCED, run->getNext(&obj, NULL));
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL));
ASSERT_EQUALS(i, obj["foo"].numberInt());
}
// Register it.
- run->saveState();
- registerExecutor(run.get());
+ exec->saveState();
// At this point it's safe to yield. forceYield would do that. Let's now simulate some
// stuff going on in the yield.
@@ -154,174 +138,137 @@ public:
// At this point, we're done yielding. We recover our lock.
- // Unregister the runner.
- deregisterExecutor(run.get());
-
// And clean up anything that happened before.
- run->restoreState();
+ exec->restoreState();
- // Make sure that the runner moved forward over the deleted data. We don't see foo==10
+ // Make sure that the PlanExecutor moved forward over the deleted data. We don't see
+ // foo==10
// or foo==11.
for (int i = 12; i < N(); ++i) {
- ASSERT_EQUALS(PlanExecutor::ADVANCED, run->getNext(&obj, NULL));
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL));
ASSERT_EQUALS(i, obj["foo"].numberInt());
}
- ASSERT_EQUALS(PlanExecutor::IS_EOF, run->getNext(&obj, NULL));
+ ASSERT_EQUALS(PlanExecutor::IS_EOF, exec->getNext(&obj, NULL));
}
};
-// Test that registered runners are killed when their collection is dropped.
+// Test that registered PlanExecutors are killed when their collection is dropped.
class ExecutorRegistryDropCollection : public ExecutorRegistryBase {
public:
void run() {
- unique_ptr<PlanExecutor> run(getCollscan());
+ auto exec = getCollscan();
BSONObj obj;
// Read some of it.
for (int i = 0; i < 10; ++i) {
- ASSERT_EQUALS(PlanExecutor::ADVANCED, run->getNext(&obj, NULL));
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL));
ASSERT_EQUALS(i, obj["foo"].numberInt());
}
// Save state and register.
- run->saveState();
- registerExecutor(run.get());
+ exec->saveState();
// Drop a collection that's not ours.
_client.dropCollection("unittests.someboguscollection");
// Unregister and restore state.
- deregisterExecutor(run.get());
- run->restoreState();
+ exec->restoreState();
- ASSERT_EQUALS(PlanExecutor::ADVANCED, run->getNext(&obj, NULL));
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL));
ASSERT_EQUALS(10, obj["foo"].numberInt());
- // Save state and register.
- run->saveState();
- registerExecutor(run.get());
+ exec->saveState();
- // Drop our collection.
_client.dropCollection(nss.ns());
- // Unregister and restore state.
- deregisterExecutor(run.get());
- run->restoreState();
+ exec->restoreState();
// PlanExecutor was killed.
- ASSERT_EQUALS(PlanExecutor::DEAD, run->getNext(&obj, NULL));
+ ASSERT_EQUALS(PlanExecutor::DEAD, exec->getNext(&obj, NULL));
}
};
-// Test that registered runners are killed when all indices are dropped on the collection.
+// Test that registered PlanExecutors are killed when all indices are dropped on the collection.
class ExecutorRegistryDropAllIndices : public ExecutorRegistryBase {
public:
void run() {
- unique_ptr<PlanExecutor> run(getCollscan());
+ auto exec = getCollscan();
BSONObj obj;
ASSERT_OK(dbtests::createIndex(&_opCtx, nss.ns(), BSON("foo" << 1)));
// Read some of it.
for (int i = 0; i < 10; ++i) {
- ASSERT_EQUALS(PlanExecutor::ADVANCED, run->getNext(&obj, NULL));
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL));
ASSERT_EQUALS(i, obj["foo"].numberInt());
}
- // Save state and register.
- run->saveState();
- registerExecutor(run.get());
-
- // Drop all indices.
+ exec->saveState();
_client.dropIndexes(nss.ns());
-
- // Unregister and restore state.
- deregisterExecutor(run.get());
- run->restoreState();
-
- // PlanExecutor was killed.
- ASSERT_EQUALS(PlanExecutor::DEAD, run->getNext(&obj, NULL));
+ exec->restoreState();
+ ASSERT_EQUALS(PlanExecutor::DEAD, exec->getNext(&obj, NULL));
}
};
-// Test that registered runners are killed when an index is dropped on the collection.
+// Test that registered PlanExecutors are killed when an index is dropped on the collection.
class ExecutorRegistryDropOneIndex : public ExecutorRegistryBase {
public:
void run() {
- unique_ptr<PlanExecutor> run(getCollscan());
+ auto exec = getCollscan();
BSONObj obj;
ASSERT_OK(dbtests::createIndex(&_opCtx, nss.ns(), BSON("foo" << 1)));
// Read some of it.
for (int i = 0; i < 10; ++i) {
- ASSERT_EQUALS(PlanExecutor::ADVANCED, run->getNext(&obj, NULL));
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL));
ASSERT_EQUALS(i, obj["foo"].numberInt());
}
- // Save state and register.
- run->saveState();
- registerExecutor(run.get());
-
- // Drop a specific index.
+ exec->saveState();
_client.dropIndex(nss.ns(), BSON("foo" << 1));
-
- // Unregister and restore state.
- deregisterExecutor(run.get());
- run->restoreState();
-
- // PlanExecutor was killed.
- ASSERT_EQUALS(PlanExecutor::DEAD, run->getNext(&obj, NULL));
+ exec->restoreState();
+ ASSERT_EQUALS(PlanExecutor::DEAD, exec->getNext(&obj, NULL));
}
};
-// Test that registered runners are killed when their database is dropped.
+// Test that registered PlanExecutors are killed when their database is dropped.
class ExecutorRegistryDropDatabase : public ExecutorRegistryBase {
public:
void run() {
- unique_ptr<PlanExecutor> run(getCollscan());
+ auto exec = getCollscan();
BSONObj obj;
// Read some of it.
for (int i = 0; i < 10; ++i) {
- ASSERT_EQUALS(PlanExecutor::ADVANCED, run->getNext(&obj, NULL));
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL));
ASSERT_EQUALS(i, obj["foo"].numberInt());
}
- // Save state and register.
- run->saveState();
- registerExecutor(run.get());
+ exec->saveState();
// Drop a DB that's not ours. We can't have a lock at all to do this as dropping a DB
// requires a "global write lock."
_ctx.reset();
_client.dropDatabase("somesillydb");
_ctx.reset(new OldClientWriteContext(&_opCtx, nss.ns()));
+ exec->restoreState();
- // Unregister and restore state.
- deregisterExecutor(run.get());
- run->restoreState();
-
- ASSERT_EQUALS(PlanExecutor::ADVANCED, run->getNext(&obj, NULL));
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL));
ASSERT_EQUALS(10, obj["foo"].numberInt());
- // Save state and register.
- run->saveState();
- registerExecutor(run.get());
+ exec->saveState();
// Drop our DB. Once again, must give up the lock.
_ctx.reset();
_client.dropDatabase("unittests");
_ctx.reset(new OldClientWriteContext(&_opCtx, nss.ns()));
-
- // Unregister and restore state.
- deregisterExecutor(run.get());
- run->restoreState();
+ exec->restoreState();
_ctx.reset();
// PlanExecutor was killed.
- ASSERT_EQUALS(PlanExecutor::DEAD, run->getNext(&obj, NULL));
+ ASSERT_EQUALS(PlanExecutor::DEAD, exec->getNext(&obj, NULL));
}
};
diff --git a/src/mongo/dbtests/plan_ranking.cpp b/src/mongo/dbtests/plan_ranking.cpp
index bbcd0b6a2d0..c376bd605c3 100644
--- a/src/mongo/dbtests/plan_ranking.cpp
+++ b/src/mongo/dbtests/plan_ranking.cpp
@@ -133,7 +133,7 @@ public:
_mps->addPlan(solutions[i], root, ws.get());
}
// This is what sets a backup plan, should we test for it.
- PlanYieldPolicy yieldPolicy(PlanExecutor::YIELD_MANUAL,
+ PlanYieldPolicy yieldPolicy(PlanExecutor::NO_YIELD,
_opCtx.getServiceContext()->getFastClockSource());
_mps->pickBestPlan(&yieldPolicy);
ASSERT(_mps->bestPlanChosen());
diff --git a/src/mongo/dbtests/query_plan_executor.cpp b/src/mongo/dbtests/query_plan_executor.cpp
index 7b4f372c5b0..6796996da0d 100644
--- a/src/mongo/dbtests/query_plan_executor.cpp
+++ b/src/mongo/dbtests/query_plan_executor.cpp
@@ -94,7 +94,8 @@ public:
* Given a match expression, represented as the BSON object 'filterObj', create a PlanExecutor
* capable of executing a simple collection scan.
*/
- unique_ptr<PlanExecutor> makeCollScanExec(Collection* coll, BSONObj& filterObj) {
+ unique_ptr<PlanExecutor, PlanExecutor::Deleter> makeCollScanExec(Collection* coll,
+ BSONObj& filterObj) {
CollectionScanParams csparams;
csparams.collection = coll;
csparams.direction = CollectionScanParams::FORWARD;
@@ -134,10 +135,11 @@ public:
*
* Returns a PlanExecutor capable of executing an index scan
* over the specified index with the specified bounds.
- *
- * The caller takes ownership of the returned PlanExecutor*.
*/
- PlanExecutor* makeIndexScanExec(Database* db, BSONObj& indexSpec, int start, int end) {
+ unique_ptr<PlanExecutor, PlanExecutor::Deleter> makeIndexScanExec(Database* db,
+ BSONObj& indexSpec,
+ int start,
+ int end) {
// Build the index scan stage.
IndexScanParams ixparams;
ixparams.descriptor = getIndex(db, indexSpec);
@@ -168,7 +170,7 @@ public:
coll,
PlanExecutor::YIELD_MANUAL);
ASSERT_OK(statusWithPlanExecutor.getStatus());
- return statusWithPlanExecutor.getValue().release();
+ return std::move(statusWithPlanExecutor.getValue());
}
size_t numCursors() {
@@ -179,24 +181,6 @@ public:
return collection->getCursorManager()->numCursors();
}
- void registerExec(PlanExecutor* exec) {
- // TODO: This is not correct (create collection under S-lock)
- AutoGetCollectionForReadCommand ctx(&_opCtx, nss);
- WriteUnitOfWork wunit(&_opCtx);
- Collection* collection = ctx.getDb()->getOrCreateCollection(&_opCtx, nss);
- collection->getCursorManager()->registerExecutor(exec);
- wunit.commit();
- }
-
- void deregisterExec(PlanExecutor* exec) {
- // TODO: This is not correct (create collection under S-lock)
- AutoGetCollectionForReadCommand ctx(&_opCtx, nss);
- WriteUnitOfWork wunit(&_opCtx);
- Collection* collection = ctx.getDb()->getOrCreateCollection(&_opCtx, nss);
- collection->getCursorManager()->deregisterExecutor(exec);
- wunit.commit();
- }
-
protected:
const ServiceContext::UniqueOperationContext _opCtxPtr = cc().makeOperationContext();
OperationContext& _opCtx = *_opCtxPtr;
@@ -227,8 +211,7 @@ public:
BSONObj filterObj = fromjson("{_id: {$gt: 0}}");
Collection* coll = ctx.getCollection();
- unique_ptr<PlanExecutor> exec(makeCollScanExec(coll, filterObj));
- registerExec(exec.get());
+ auto exec = makeCollScanExec(coll, filterObj);
BSONObj objOut;
ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&objOut, NULL));
@@ -237,8 +220,6 @@ public:
// After dropping the collection, the plan executor should be dead.
dropCollection();
ASSERT_EQUALS(PlanExecutor::DEAD, exec->getNext(&objOut, NULL));
-
- deregisterExec(exec.get());
}
};
@@ -255,8 +236,7 @@ public:
BSONObj indexSpec = BSON("a" << 1);
addIndex(indexSpec);
- unique_ptr<PlanExecutor> exec(makeIndexScanExec(ctx.db(), indexSpec, 7, 10));
- registerExec(exec.get());
+ auto exec = makeIndexScanExec(ctx.db(), indexSpec, 7, 10);
BSONObj objOut;
ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&objOut, NULL));
@@ -265,8 +245,6 @@ public:
// After dropping the collection, the plan executor should be dead.
dropCollection();
ASSERT_EQUALS(PlanExecutor::DEAD, exec->getNext(&objOut, NULL));
-
- deregisterExec(exec.get());
}
};
@@ -293,8 +271,8 @@ public:
// Create an "inner" plan executor and register it with the cursor manager so that it can
// get notified when the collection is dropped.
- unique_ptr<PlanExecutor> innerExec(makeIndexScanExec(ctx.db(), indexSpec, 7, 10));
- registerExec(innerExec.get());
+ unique_ptr<PlanExecutor, PlanExecutor::Deleter> innerExec(
+ makeIndexScanExec(ctx.db(), indexSpec, 7, 10));
// Wrap the "inner" plan executor in a DocumentSourceCursor and add it as the first source
// in the pipeline.
@@ -304,29 +282,20 @@ public:
// Create the output PlanExecutor that pulls results from the pipeline.
auto ws = make_unique<WorkingSet>();
- auto proxy = make_unique<PipelineProxyStage>(&_opCtx, pipeline, ws.get());
+ auto proxy = make_unique<PipelineProxyStage>(&_opCtx, std::move(pipeline), ws.get());
auto statusWithPlanExecutor = PlanExecutor::make(
- &_opCtx, std::move(ws), std::move(proxy), collection, PlanExecutor::YIELD_MANUAL);
+ &_opCtx, std::move(ws), std::move(proxy), collection, PlanExecutor::NO_YIELD);
ASSERT_OK(statusWithPlanExecutor.getStatus());
- unique_ptr<PlanExecutor> outerExec = std::move(statusWithPlanExecutor.getValue());
-
- // Register the "outer" plan executor with the cursor manager so it can get notified when
- // the collection is dropped.
- registerExec(outerExec.get());
+ auto outerExec = std::move(statusWithPlanExecutor.getValue());
dropCollection();
// Verify that the aggregation pipeline returns an error because its "inner" plan executor
// has been killed due to the collection being dropped.
- ASSERT_THROWS_CODE(pipeline->getNext(), UserException, ErrorCodes::QueryPlanKilled);
-
- // Verify that the "outer" plan executor has been killed due to the collection being
- // dropped.
BSONObj objOut;
- ASSERT_EQUALS(PlanExecutor::DEAD, outerExec->getNext(&objOut, nullptr));
-
- deregisterExec(outerExec.get());
+ ASSERT_THROWS_CODE(
+ outerExec->getNext(&objOut, nullptr), UserException, ErrorCodes::QueryPlanKilled);
}
};
@@ -388,7 +357,7 @@ public:
BSONObj filterObj = fromjson("{a: {$gte: 2}}");
Collection* coll = ctx.getCollection();
- unique_ptr<PlanExecutor> exec(makeCollScanExec(coll, filterObj));
+ auto exec = makeCollScanExec(coll, filterObj);
BSONObj objOut;
ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&objOut, NULL));
@@ -415,7 +384,7 @@ public:
addIndex(indexSpec);
BSONObj filterObj = fromjson("{a: {$gte: 2}}");
- unique_ptr<PlanExecutor> exec(makeIndexScanExec(ctx.db(), indexSpec, 2, 5));
+ auto exec = makeIndexScanExec(ctx.db(), indexSpec, 2, 5);
BSONObj objOut;
ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&objOut, NULL));
@@ -435,7 +404,10 @@ namespace ClientCursor {
using mongo::ClientCursor;
/**
- * Test invalidation of ClientCursor.
+ * Tests that invalidating a cursor without dropping the collection while the cursor is not in use
+ * will keep the cursor registered. After being invalidated, pinning the cursor should take
+ * ownership of the cursor and calling getNext() on its PlanExecutor should return an error
+ * including the error message.
*/
class Invalidate : public PlanExecutorBase {
public:
@@ -449,19 +421,62 @@ public:
auto exec = makeCollScanExec(coll, filterObj);
// Make a client cursor from the plan executor.
- coll->getCursorManager()->registerCursor({std::move(exec), nss, {}, false, BSONObj()});
+ auto cursorPin = coll->getCursorManager()->registerCursor(
+ &_opCtx, {std::move(exec), nss, {}, false, BSONObj()});
+
+ auto cursorId = cursorPin.getCursor()->cursorid();
+ cursorPin.release();
+
+ ASSERT_EQUALS(1U, numCursors());
+ auto invalidateReason = "Invalidate Test";
+ const bool collectionGoingAway = false;
+ coll->getCursorManager()->invalidateAll(&_opCtx, collectionGoingAway, invalidateReason);
+ // Since the collection is not going away, the cursor should remain open, but be killed.
+ ASSERT_EQUALS(1U, numCursors());
+
+ // Pinning a killed cursor should result in an error and clean up the cursor.
+ ASSERT_EQ(ErrorCodes::QueryPlanKilled,
+ coll->getCursorManager()->pinCursor(&_opCtx, cursorId).getStatus());
+ ASSERT_EQUALS(0U, numCursors());
+ }
+};
+
+/**
+ * Tests that invalidating a cursor and dropping the collection while the cursor is not in use will
+ * not keep the cursor registered.
+ */
+class InvalidateWithDrop : public PlanExecutorBase {
+public:
+ void run() {
+ OldClientWriteContext ctx(&_opCtx, nss.ns());
+ insert(BSON("a" << 1 << "b" << 1));
+
+ BSONObj filterObj = fromjson("{_id: {$gt: 0}, b: {$gt: 0}}");
+
+ Collection* coll = ctx.getCollection();
+ auto exec = makeCollScanExec(coll, filterObj);
+
+ // Make a client cursor from the plan executor.
+ auto cursorPin = coll->getCursorManager()->registerCursor(
+ &_opCtx, {std::move(exec), nss, {}, false, BSONObj()});
+
+ auto cursorId = cursorPin.getCursor()->cursorid();
+ cursorPin.release();
- // There should be one cursor before invalidation,
- // and zero cursors after invalidation.
ASSERT_EQUALS(1U, numCursors());
- coll->getCursorManager()->invalidateAll(false, "Invalidate Test");
+ auto invalidateReason = "Invalidate Test";
+ const bool collectionGoingAway = true;
+ coll->getCursorManager()->invalidateAll(&_opCtx, collectionGoingAway, invalidateReason);
+ // Since the collection is going away, the cursor should not remain open.
+ ASSERT_EQ(ErrorCodes::CursorNotFound,
+ coll->getCursorManager()->pinCursor(&_opCtx, cursorId).getStatus());
ASSERT_EQUALS(0U, numCursors());
}
};
/**
- * Test that pinned client cursors persist even after
- * invalidation.
+ * Tests that invalidating a cursor while it is in use will deregister it from the cursor manager,
+ * transferring ownership to the pinned cursor.
*/
class InvalidatePinned : public PlanExecutorBase {
public:
@@ -476,13 +491,13 @@ public:
// Make a client cursor from the plan executor.
auto ccPin = collection->getCursorManager()->registerCursor(
- {std::move(exec), nss, {}, false, BSONObj()});
+ &_opCtx, {std::move(exec), nss, {}, false, BSONObj()});
// If the cursor is pinned, it sticks around, even after invalidation.
ASSERT_EQUALS(1U, numCursors());
const std::string invalidateReason("InvalidatePinned Test");
- collection->getCursorManager()->invalidateAll(false, invalidateReason);
- ASSERT_EQUALS(1U, numCursors());
+ collection->getCursorManager()->invalidateAll(&_opCtx, false, invalidateReason);
+ ASSERT_EQUALS(0U, numCursors());
// The invalidation should have killed the plan executor.
BSONObj objOut;
@@ -499,10 +514,9 @@ public:
};
/**
- * Test that client cursors time out and get
- * deleted.
+ * Test that client cursors time out and get deleted.
*/
-class Timeout : public PlanExecutorBase {
+class ShouldTimeout : public PlanExecutorBase {
public:
void run() {
{
@@ -519,7 +533,41 @@ public:
// Make a client cursor from the plan executor.
collection->getCursorManager()->registerCursor(
- {std::move(exec), nss, {}, false, BSONObj()});
+ &_opCtx, {std::move(exec), nss, {}, false, BSONObj()});
+ }
+
+ // There should be one cursor before timeout,
+ // and zero cursors after timeout.
+ ASSERT_EQUALS(1U, numCursors());
+ CursorManager::timeoutCursorsGlobal(&_opCtx, 600001);
+ ASSERT_EQUALS(0U, numCursors());
+ }
+};
+
+/**
+ * Test that client cursors which have been marked as killed time out and get deleted.
+ */
+class KilledCursorsShouldTimeout : public PlanExecutorBase {
+public:
+ void run() {
+ {
+ OldClientWriteContext ctx(&_opCtx, nss.ns());
+ insert(BSON("a" << 1 << "b" << 1));
+ }
+
+ {
+ AutoGetCollectionForReadCommand ctx(&_opCtx, nss);
+ Collection* collection = ctx.getCollection();
+
+ BSONObj filterObj = fromjson("{_id: {$gt: 0}, b: {$gt: 0}}");
+ auto exec = makeCollScanExec(collection, filterObj);
+
+ // Make a client cursor from the plan executor, and immediately kill it.
+ auto* cursorManager = collection->getCursorManager();
+ cursorManager->registerCursor(&_opCtx, {std::move(exec), nss, {}, false, BSONObj()});
+ const bool collectionGoingAway = false;
+ cursorManager->invalidateAll(
+ &_opCtx, collectionGoingAway, "KilledCursorsShouldTimeoutTest");
}
// There should be one cursor before timeout,
@@ -543,8 +591,10 @@ public:
add<SnapshotControl>();
add<SnapshotTest>();
add<ClientCursor::Invalidate>();
+ add<ClientCursor::InvalidateWithDrop>();
add<ClientCursor::InvalidatePinned>();
- add<ClientCursor::Timeout>();
+ add<ClientCursor::ShouldTimeout>();
+ add<ClientCursor::KilledCursorsShouldTimeout>();
}
};
diff --git a/src/mongo/dbtests/query_stage_cached_plan.cpp b/src/mongo/dbtests/query_stage_cached_plan.cpp
index 48e9da129a1..1f7cc5abcf3 100644
--- a/src/mongo/dbtests/query_stage_cached_plan.cpp
+++ b/src/mongo/dbtests/query_stage_cached_plan.cpp
@@ -147,7 +147,7 @@ public:
&_opCtx, collection, &_ws, cq.get(), plannerParams, decisionWorks, mockChild.release());
// This should succeed after triggering a replan.
- PlanYieldPolicy yieldPolicy(PlanExecutor::YIELD_MANUAL,
+ PlanYieldPolicy yieldPolicy(PlanExecutor::NO_YIELD,
_opCtx.getServiceContext()->getFastClockSource());
ASSERT_OK(cachedPlanStage.pickBestPlan(&yieldPolicy));
@@ -219,7 +219,7 @@ public:
&_opCtx, collection, &_ws, cq.get(), plannerParams, decisionWorks, mockChild.release());
// This should succeed after triggering a replan.
- PlanYieldPolicy yieldPolicy(PlanExecutor::YIELD_MANUAL,
+ PlanYieldPolicy yieldPolicy(PlanExecutor::NO_YIELD,
_opCtx.getServiceContext()->getFastClockSource());
ASSERT_OK(cachedPlanStage.pickBestPlan(&yieldPolicy));
diff --git a/src/mongo/dbtests/query_stage_collscan.cpp b/src/mongo/dbtests/query_stage_collscan.cpp
index e2db62bc4f4..d8e0088807c 100644
--- a/src/mongo/dbtests/query_stage_collscan.cpp
+++ b/src/mongo/dbtests/query_stage_collscan.cpp
@@ -105,9 +105,9 @@ public:
make_unique<CollectionScan>(&_opCtx, params, ws.get(), filterExpr.get());
auto statusWithPlanExecutor = PlanExecutor::make(
- &_opCtx, std::move(ws), std::move(ps), params.collection, PlanExecutor::YIELD_MANUAL);
+ &_opCtx, std::move(ws), std::move(ps), params.collection, PlanExecutor::NO_YIELD);
ASSERT_OK(statusWithPlanExecutor.getStatus());
- unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
+ auto exec = std::move(statusWithPlanExecutor.getValue());
// Use the runner to count the number of objects scanned.
int count = 0;
@@ -219,9 +219,9 @@ public:
unique_ptr<PlanStage> ps = make_unique<CollectionScan>(&_opCtx, params, ws.get(), nullptr);
auto statusWithPlanExecutor = PlanExecutor::make(
- &_opCtx, std::move(ws), std::move(ps), params.collection, PlanExecutor::YIELD_MANUAL);
+ &_opCtx, std::move(ws), std::move(ps), params.collection, PlanExecutor::NO_YIELD);
ASSERT_OK(statusWithPlanExecutor.getStatus());
- unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
+ auto exec = std::move(statusWithPlanExecutor.getValue());
int count = 0;
PlanExecutor::ExecState state;
@@ -253,9 +253,9 @@ public:
unique_ptr<PlanStage> ps = make_unique<CollectionScan>(&_opCtx, params, ws.get(), nullptr);
auto statusWithPlanExecutor = PlanExecutor::make(
- &_opCtx, std::move(ws), std::move(ps), params.collection, PlanExecutor::YIELD_MANUAL);
+ &_opCtx, std::move(ws), std::move(ps), params.collection, PlanExecutor::NO_YIELD);
ASSERT_OK(statusWithPlanExecutor.getStatus());
- unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
+ auto exec = std::move(statusWithPlanExecutor.getValue());
int count = 0;
PlanExecutor::ExecState state;
diff --git a/src/mongo/dbtests/query_stage_merge_sort.cpp b/src/mongo/dbtests/query_stage_merge_sort.cpp
index 238237617ca..0fd02c81eb3 100644
--- a/src/mongo/dbtests/query_stage_merge_sort.cpp
+++ b/src/mongo/dbtests/query_stage_merge_sort.cpp
@@ -166,9 +166,9 @@ public:
make_unique<FetchStage>(&_opCtx, ws.get(), ms, nullptr, coll);
// Must fetch if we want to easily pull out an obj.
auto statusWithPlanExecutor = PlanExecutor::make(
- &_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL);
+ &_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::NO_YIELD);
ASSERT_OK(statusWithPlanExecutor.getStatus());
- unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
+ auto exec = std::move(statusWithPlanExecutor.getValue());
for (int i = 0; i < N; ++i) {
BSONObj first, second;
@@ -236,9 +236,9 @@ public:
make_unique<FetchStage>(&_opCtx, ws.get(), ms, nullptr, coll);
auto statusWithPlanExecutor = PlanExecutor::make(
- &_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL);
+ &_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::NO_YIELD);
ASSERT_OK(statusWithPlanExecutor.getStatus());
- unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
+ auto exec = std::move(statusWithPlanExecutor.getValue());
for (int i = 0; i < N; ++i) {
BSONObj first, second;
@@ -306,9 +306,9 @@ public:
make_unique<FetchStage>(&_opCtx, ws.get(), ms, nullptr, coll);
auto statusWithPlanExecutor = PlanExecutor::make(
- &_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL);
+ &_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::NO_YIELD);
ASSERT_OK(statusWithPlanExecutor.getStatus());
- unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
+ auto exec = std::move(statusWithPlanExecutor.getValue());
for (int i = 0; i < N; ++i) {
BSONObj first, second;
@@ -379,9 +379,9 @@ public:
make_unique<FetchStage>(&_opCtx, ws.get(), ms, nullptr, coll);
auto statusWithPlanExecutor = PlanExecutor::make(
- &_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL);
+ &_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::NO_YIELD);
ASSERT_OK(statusWithPlanExecutor.getStatus());
- unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
+ auto exec = std::move(statusWithPlanExecutor.getValue());
for (int i = 0; i < N; ++i) {
BSONObj first, second;
@@ -451,9 +451,9 @@ public:
make_unique<FetchStage>(&_opCtx, ws.get(), ms, nullptr, coll);
auto statusWithPlanExecutor = PlanExecutor::make(
- &_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL);
+ &_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::NO_YIELD);
ASSERT_OK(statusWithPlanExecutor.getStatus());
- unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
+ auto exec = std::move(statusWithPlanExecutor.getValue());
// Only getting results from the a:1 index scan.
for (int i = 0; i < N; ++i) {
@@ -510,9 +510,9 @@ public:
make_unique<FetchStage>(&_opCtx, ws.get(), ms, nullptr, coll);
auto statusWithPlanExecutor = PlanExecutor::make(
- &_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL);
+ &_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::NO_YIELD);
ASSERT_OK(statusWithPlanExecutor.getStatus());
- unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
+ auto exec = std::move(statusWithPlanExecutor.getValue());
for (int i = 0; i < numIndices; ++i) {
BSONObj obj;
@@ -794,9 +794,9 @@ public:
make_unique<FetchStage>(&_opCtx, ws.get(), ms, nullptr, coll);
// Must fetch if we want to easily pull out an obj.
auto statusWithPlanExecutor = PlanExecutor::make(
- &_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL);
+ &_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::NO_YIELD);
ASSERT_OK(statusWithPlanExecutor.getStatus());
- unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
+ auto exec = std::move(statusWithPlanExecutor.getValue());
for (int i = 0; i < N; ++i) {
BSONObj first, second;
@@ -868,9 +868,9 @@ public:
make_unique<FetchStage>(&_opCtx, ws.get(), ms, nullptr, coll);
// Must fetch if we want to easily pull out an obj.
auto statusWithPlanExecutor = PlanExecutor::make(
- &_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL);
+ &_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::NO_YIELD);
ASSERT_OK(statusWithPlanExecutor.getStatus());
- unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
+ auto exec = std::move(statusWithPlanExecutor.getValue());
for (int i = 0; i < N; ++i) {
BSONObj first, second;
diff --git a/src/mongo/dbtests/query_stage_multiplan.cpp b/src/mongo/dbtests/query_stage_multiplan.cpp
index 59d06e93d05..703eee3c7c9 100644
--- a/src/mongo/dbtests/query_stage_multiplan.cpp
+++ b/src/mongo/dbtests/query_stage_multiplan.cpp
@@ -187,7 +187,7 @@ public:
mps->addPlan(createQuerySolution(), secondRoot.release(), sharedWs.get());
// Plan 0 aka the first plan aka the index scan should be the best.
- PlanYieldPolicy yieldPolicy(PlanExecutor::YIELD_MANUAL, _clock);
+ PlanYieldPolicy yieldPolicy(PlanExecutor::NO_YIELD, _clock);
mps->pickBestPlan(&yieldPolicy);
ASSERT(mps->bestPlanChosen());
ASSERT_EQUALS(0, mps->bestPlanIdx());
@@ -198,9 +198,9 @@ public:
std::move(mps),
std::move(cq),
coll,
- PlanExecutor::YIELD_MANUAL);
+ PlanExecutor::NO_YIELD);
ASSERT_OK(statusWithPlanExecutor.getStatus());
- std::unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
+ auto exec = std::move(statusWithPlanExecutor.getValue());
// Get all our results out.
int results = 0;
@@ -271,7 +271,7 @@ public:
}
// This sets a backup plan.
- PlanYieldPolicy yieldPolicy(PlanExecutor::YIELD_MANUAL, _clock);
+ PlanYieldPolicy yieldPolicy(PlanExecutor::NO_YIELD, _clock);
mps->pickBestPlan(&yieldPolicy);
ASSERT(mps->bestPlanChosen());
ASSERT(mps->hasBackupPlan());
@@ -351,11 +351,8 @@ public:
mps->addPlan(secondSoln.release(), secondPlan.release(), ws.get());
// Making a PlanExecutor chooses the best plan.
- auto exec = uassertStatusOK(PlanExecutor::make(&_opCtx,
- std::move(ws),
- std::move(mps),
- ctx.getCollection(),
- PlanExecutor::YIELD_MANUAL));
+ auto exec = uassertStatusOK(PlanExecutor::make(
+ &_opCtx, std::move(ws), std::move(mps), ctx.getCollection(), PlanExecutor::NO_YIELD));
auto root = static_cast<MultiPlanStage*>(exec->getRootStage());
ASSERT_TRUE(root->bestPlanChosen());
@@ -422,7 +419,7 @@ public:
auto cq = uassertStatusOK(CanonicalQuery::canonicalize(
opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions()));
auto exec =
- uassertStatusOK(getExecutor(&_opCtx, coll, std::move(cq), PlanExecutor::YIELD_MANUAL));
+ uassertStatusOK(getExecutor(&_opCtx, coll, std::move(cq), PlanExecutor::NO_YIELD));
ASSERT_EQ(exec->getRootStage()->stageType(), STAGE_MULTI_PLAN);
diff --git a/src/mongo/dbtests/query_stage_sort.cpp b/src/mongo/dbtests/query_stage_sort.cpp
index ea5d4fb87f3..40f03c7a8c4 100644
--- a/src/mongo/dbtests/query_stage_sort.cpp
+++ b/src/mongo/dbtests/query_stage_sort.cpp
@@ -107,7 +107,8 @@ public:
* Wraps a sort stage with a QueuedDataStage in a plan executor. Returns the plan executor,
* which is owned by the caller.
*/
- PlanExecutor* makePlanExecutorWithSortStage(Collection* coll) {
+ unique_ptr<PlanExecutor, PlanExecutor::Deleter> makePlanExecutorWithSortStage(
+ Collection* coll) {
// Build the mock scan stage which feeds the data.
auto ws = make_unique<WorkingSet>();
auto queuedDataStage = make_unique<QueuedDataStage>(&_opCtx, ws.get());
@@ -128,7 +129,7 @@ public:
auto statusWithPlanExecutor = PlanExecutor::make(
&_opCtx, std::move(ws), std::move(ss), coll, PlanExecutor::YIELD_AUTO);
invariant(statusWithPlanExecutor.isOK());
- return statusWithPlanExecutor.getValue().release();
+ return std::move(statusWithPlanExecutor.getValue());
}
// Return a value in the set {-1, 0, 1} to represent the sign of parameter i. Used to
@@ -167,9 +168,9 @@ public:
// Must fetch so we can look at the doc as a BSONObj.
auto statusWithPlanExecutor = PlanExecutor::make(
- &_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL);
+ &_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::NO_YIELD);
ASSERT_OK(statusWithPlanExecutor.getStatus());
- unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
+ auto exec = std::move(statusWithPlanExecutor.getValue());
// Look at pairs of objects to make sure that the sort order is pairwise (and therefore
// totally) correct.
@@ -328,7 +329,7 @@ public:
set<RecordId> recordIds;
getRecordIds(&recordIds, coll);
- unique_ptr<PlanExecutor> exec(makePlanExecutorWithSortStage(coll));
+ auto exec = makePlanExecutorWithSortStage(coll);
SortStage* ss = static_cast<SortStage*>(exec->getRootStage());
SortKeyGeneratorStage* keyGenStage =
static_cast<SortKeyGeneratorStage*>(ss->getChildren()[0].get());
@@ -437,7 +438,7 @@ public:
set<RecordId> recordIds;
getRecordIds(&recordIds, coll);
- unique_ptr<PlanExecutor> exec(makePlanExecutorWithSortStage(coll));
+ auto exec = makePlanExecutorWithSortStage(coll);
SortStage* ss = static_cast<SortStage*>(exec->getRootStage());
SortKeyGeneratorStage* keyGenStage =
static_cast<SortKeyGeneratorStage*>(ss->getChildren()[0].get());
@@ -566,8 +567,8 @@ public:
// We don't get results back since we're sorting some parallel arrays.
auto statusWithPlanExecutor = PlanExecutor::make(
- &_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL);
- unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
+ &_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::NO_YIELD);
+ auto exec = std::move(statusWithPlanExecutor.getValue());
PlanExecutor::ExecState runnerState = exec->getNext(NULL, NULL);
ASSERT_EQUALS(PlanExecutor::FAILURE, runnerState);
diff --git a/src/mongo/dbtests/query_stage_subplan.cpp b/src/mongo/dbtests/query_stage_subplan.cpp
index b89df74ac83..4450434b977 100644
--- a/src/mongo/dbtests/query_stage_subplan.cpp
+++ b/src/mongo/dbtests/query_stage_subplan.cpp
@@ -131,7 +131,7 @@ public:
new SubplanStage(&_opCtx, collection, &ws, plannerParams, cq.get()));
// Plan selection should succeed due to falling back on regular planning.
- PlanYieldPolicy yieldPolicy(PlanExecutor::YIELD_MANUAL, _clock);
+ PlanYieldPolicy yieldPolicy(PlanExecutor::NO_YIELD, _clock);
ASSERT_OK(subplan->pickBestPlan(&yieldPolicy));
}
};
@@ -174,7 +174,7 @@ public:
std::unique_ptr<SubplanStage> subplan(
new SubplanStage(&_opCtx, collection, &ws, plannerParams, cq.get()));
- PlanYieldPolicy yieldPolicy(PlanExecutor::YIELD_MANUAL, _clock);
+ PlanYieldPolicy yieldPolicy(PlanExecutor::NO_YIELD, _clock);
ASSERT_OK(subplan->pickBestPlan(&yieldPolicy));
// Nothing is in the cache yet, so neither branch should have been planned from
@@ -232,7 +232,7 @@ public:
std::unique_ptr<SubplanStage> subplan(
new SubplanStage(&_opCtx, collection, &ws, plannerParams, cq.get()));
- PlanYieldPolicy yieldPolicy(PlanExecutor::YIELD_MANUAL, _clock);
+ PlanYieldPolicy yieldPolicy(PlanExecutor::NO_YIELD, _clock);
ASSERT_OK(subplan->pickBestPlan(&yieldPolicy));
// Nothing is in the cache yet, so neither branch should have been planned from
@@ -291,7 +291,7 @@ public:
std::unique_ptr<SubplanStage> subplan(
new SubplanStage(&_opCtx, collection, &ws, plannerParams, cq.get()));
- PlanYieldPolicy yieldPolicy(PlanExecutor::YIELD_MANUAL, _clock);
+ PlanYieldPolicy yieldPolicy(PlanExecutor::NO_YIELD, _clock);
ASSERT_OK(subplan->pickBestPlan(&yieldPolicy));
// Nothing is in the cache yet, so neither branch should have been planned from
@@ -548,7 +548,7 @@ public:
new SubplanStage(&_opCtx, collection, &ws, plannerParams, cq.get()));
// Plan selection should succeed due to falling back on regular planning.
- PlanYieldPolicy yieldPolicy(PlanExecutor::YIELD_MANUAL, _clock);
+ PlanYieldPolicy yieldPolicy(PlanExecutor::NO_YIELD, _clock);
ASSERT_OK(subplan->pickBestPlan(&yieldPolicy));
// Work the stage until it produces all results.
@@ -608,7 +608,7 @@ public:
std::unique_ptr<SubplanStage> subplan(
new SubplanStage(&_opCtx, collection, &ws, plannerParams, cq.get()));
- PlanYieldPolicy yieldPolicy(PlanExecutor::YIELD_MANUAL, _clock);
+ PlanYieldPolicy yieldPolicy(PlanExecutor::NO_YIELD, _clock);
ASSERT_OK(subplan->pickBestPlan(&yieldPolicy));
size_t numResults = 0;
diff --git a/src/mongo/dbtests/query_stage_tests.cpp b/src/mongo/dbtests/query_stage_tests.cpp
index 852058ac005..d7899aa7564 100644
--- a/src/mongo/dbtests/query_stage_tests.cpp
+++ b/src/mongo/dbtests/query_stage_tests.cpp
@@ -92,9 +92,9 @@ public:
stdx::make_unique<IndexScan>(&_opCtx, params, ws.get(), filterExpr.get());
auto statusWithPlanExecutor = PlanExecutor::make(
- &_opCtx, std::move(ws), std::move(ix), ctx.getCollection(), PlanExecutor::YIELD_MANUAL);
+ &_opCtx, std::move(ws), std::move(ix), ctx.getCollection(), PlanExecutor::NO_YIELD);
ASSERT_OK(statusWithPlanExecutor.getStatus());
- unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
+ auto exec = std::move(statusWithPlanExecutor.getValue());
int count = 0;
PlanExecutor::ExecState state;
diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp
index 9a756d054d2..006bb223c72 100644
--- a/src/mongo/dbtests/querytests.cpp
+++ b/src/mongo/dbtests/querytests.cpp
@@ -282,8 +282,8 @@ public:
{
// Check internal server handoff to getmore.
OldClientWriteContext ctx(&_opCtx, ns);
- auto pinnedCursor =
- unittest::assertGet(ctx.getCollection()->getCursorManager()->pinCursor(cursorId));
+ auto pinnedCursor = unittest::assertGet(
+ ctx.getCollection()->getCursorManager()->pinCursor(&_opCtx, cursorId));
ASSERT_EQUALS(2, pinnedCursor.getCursor()->pos());
}
@@ -382,7 +382,8 @@ public:
{
AutoGetCollectionForReadCommand ctx(&_opCtx, NamespaceString(ns));
ASSERT(1 == ctx.getCollection()->getCursorManager()->numCursors());
- ASSERT_OK(ctx.getCollection()->getCursorManager()->pinCursor(cursorId).getStatus());
+ ASSERT_OK(
+ ctx.getCollection()->getCursorManager()->pinCursor(&_opCtx, cursorId).getStatus());
}
// Check that the cursor can be iterated until all documents are returned.
@@ -691,7 +692,7 @@ public:
long long cursorId = c->getCursorId();
auto pinnedCursor = unittest::assertGet(
- ctx.db()->getCollection(&_opCtx, ns)->getCursorManager()->pinCursor(cursorId));
+ ctx.db()->getCollection(&_opCtx, ns)->getCursorManager()->pinCursor(&_opCtx, cursorId));
ASSERT_EQUALS(three.toULL(), pinnedCursor.getCursor()->getSlaveReadTill().asULL());
}
};
@@ -1647,8 +1648,8 @@ public:
ClientCursor* clientCursor = 0;
{
AutoGetCollectionForReadCommand ctx(&_opCtx, NamespaceString(ns()));
- auto clientCursorPin =
- unittest::assertGet(ctx.getCollection()->getCursorManager()->pinCursor(cursorId));
+ auto clientCursorPin = unittest::assertGet(
+ ctx.getCollection()->getCursorManager()->pinCursor(&_opCtx, cursorId));
clientCursor = clientCursorPin.getCursor();
// clientCursorPointer destructor unpins the cursor.
}
@@ -1699,8 +1700,10 @@ public:
{
OldClientWriteContext ctx(&_opCtx, ns());
- auto pinnedCursor = unittest::assertGet(
- ctx.db()->getCollection(&_opCtx, ns())->getCursorManager()->pinCursor(cursorId));
+ auto pinnedCursor = unittest::assertGet(ctx.db()
+ ->getCollection(&_opCtx, ns())
+ ->getCursorManager()
+ ->pinCursor(&_opCtx, cursorId));
string expectedAssertion = str::stream() << "Cannot kill pinned cursor: " << cursorId;
ASSERT_THROWS_WHAT(CursorManager::eraseCursorGlobal(&_opCtx, cursorId),
MsgAssertionException,
@@ -1785,14 +1788,15 @@ public:
class CursorManagerTest {
public:
- std::unique_ptr<PlanExecutor> makeFakePlanExecutor(OperationContext* opCtx) {
+ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> makeFakePlanExecutor(
+ OperationContext* opCtx) {
auto workingSet = stdx::make_unique<WorkingSet>();
auto queuedDataStage = stdx::make_unique<QueuedDataStage>(opCtx, workingSet.get());
return unittest::assertGet(PlanExecutor::make(opCtx,
std::move(workingSet),
std::move(queuedDataStage),
NamespaceString{"test.collection"},
- PlanExecutor::YieldPolicy::YIELD_MANUAL));
+ PlanExecutor::YieldPolicy::NO_YIELD));
}
};
@@ -1803,8 +1807,10 @@ public:
for (int i = 0; i < 1000; i++) {
auto exec = makeFakePlanExecutor(opCtx.get());
auto cursorPin = CursorManager::getGlobalCursorManager()->registerCursor(
+ opCtx.get(),
{std::move(exec), NamespaceString{"test.collection"}, {}, false, BSONObj()});
ASSERT_TRUE(CursorManager::isGloballyManagedCursor(cursorPin.getCursor()->cursorid()));
+ cursorPin.deleteUnderlying();
}
}
};
@@ -1818,8 +1824,10 @@ public:
for (int i = 0; i < 1000; i++) {
auto exec = makeFakePlanExecutor(opCtx.get());
auto cursorPin = testManager.registerCursor(
+ opCtx.get(),
{std::move(exec), NamespaceString{"test.collection"}, {}, false, BSONObj()});
ASSERT_FALSE(CursorManager::isGloballyManagedCursor(cursorPin.getCursor()->cursorid()));
+ cursorPin.deleteUnderlying();
}
}
};
@@ -1834,6 +1842,7 @@ public:
for (int i = 0; i < 1000; i++) {
auto exec = makeFakePlanExecutor(opCtx.get());
auto cursorPin = testManager.registerCursor(
+ opCtx.get(),
{std::move(exec), NamespaceString{"test.collection"}, {}, false, BSONObj()});
auto cursorId = cursorPin.getCursor()->cursorid();
if (prefix) {
@@ -1841,6 +1850,7 @@ public:
} else {
prefix = extractLeading32Bits(cursorId);
}
+ cursorPin.deleteUnderlying();
}
}