summaryrefslogtreecommitdiff
path: root/src/mongo/dbtests
diff options
context:
space:
mode:
authorIan Boros <ian.boros@mongodb.com>2020-01-30 13:10:55 -0500
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-02-28 22:16:41 +0000
commitcfa5c05fa1855fb1a04cb3a6e2eb10a7e82bf726 (patch)
tree7ab1e1ce8e2edd6837952c131fe14d43a0633235 /src/mongo/dbtests
parent793ae32c597f197b6445750aa9bfdaabc206132d (diff)
downloadmongo-cfa5c05fa1855fb1a04cb3a6e2eb10a7e82bf726.tar.gz
SERVER-45406 Plumb ExpressionContext through PlanStage
This patch includes also moves ownership of the collator to the ExpressionContext.
Diffstat (limited to 'src/mongo/dbtests')
-rw-r--r--src/mongo/dbtests/cursor_manager_test.cpp5
-rw-r--r--src/mongo/dbtests/documentsourcetests.cpp28
-rw-r--r--src/mongo/dbtests/matchertests.cpp10
-rw-r--r--src/mongo/dbtests/plan_executor_invalidation_test.cpp7
-rw-r--r--src/mongo/dbtests/plan_ranking.cpp5
-rw-r--r--src/mongo/dbtests/query_plan_executor.cpp17
-rw-r--r--src/mongo/dbtests/query_stage_and.cpp143
-rw-r--r--src/mongo/dbtests/query_stage_cached_plan.cpp44
-rw-r--r--src/mongo/dbtests/query_stage_collscan.cpp24
-rw-r--r--src/mongo/dbtests/query_stage_count.cpp16
-rw-r--r--src/mongo/dbtests/query_stage_count_scan.cpp27
-rw-r--r--src/mongo/dbtests/query_stage_delete.cpp20
-rw-r--r--src/mongo/dbtests/query_stage_distinct.cpp10
-rw-r--r--src/mongo/dbtests/query_stage_ensure_sorted.cpp29
-rw-r--r--src/mongo/dbtests/query_stage_fetch.cpp16
-rw-r--r--src/mongo/dbtests/query_stage_ixscan.cpp9
-rw-r--r--src/mongo/dbtests/query_stage_limit_skip.cpp12
-rw-r--r--src/mongo/dbtests/query_stage_merge_sort.cpp80
-rw-r--r--src/mongo/dbtests/query_stage_multiplan.cpp90
-rw-r--r--src/mongo/dbtests/query_stage_near.cpp20
-rw-r--r--src/mongo/dbtests/query_stage_sort.cpp14
-rw-r--r--src/mongo/dbtests/query_stage_sort_key_generator.cpp53
-rw-r--r--src/mongo/dbtests/query_stage_subplan.cpp30
-rw-r--r--src/mongo/dbtests/query_stage_tests.cpp10
-rw-r--r--src/mongo/dbtests/query_stage_trial.cpp33
-rw-r--r--src/mongo/dbtests/query_stage_update.cpp39
26 files changed, 438 insertions, 353 deletions
diff --git a/src/mongo/dbtests/cursor_manager_test.cpp b/src/mongo/dbtests/cursor_manager_test.cpp
index a4c3d835079..e6227d325e6 100644
--- a/src/mongo/dbtests/cursor_manager_test.cpp
+++ b/src/mongo/dbtests/cursor_manager_test.cpp
@@ -71,8 +71,11 @@ public:
std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> makeFakePlanExecutor(
OperationContext* opCtx) {
+ // Create a mock ExpressionContext.
+ auto expCtx = make_intrusive<ExpressionContext>(opCtx, nullptr, kTestNss);
+
auto workingSet = std::make_unique<WorkingSet>();
- auto queuedDataStage = std::make_unique<QueuedDataStage>(opCtx, workingSet.get());
+ auto queuedDataStage = std::make_unique<QueuedDataStage>(expCtx.get(), workingSet.get());
return unittest::assertGet(PlanExecutor::make(opCtx,
std::move(workingSet),
std::move(queuedDataStage),
diff --git a/src/mongo/dbtests/documentsourcetests.cpp b/src/mongo/dbtests/documentsourcetests.cpp
index bf184315ecd..84738eb6e17 100644
--- a/src/mongo/dbtests/documentsourcetests.cpp
+++ b/src/mongo/dbtests/documentsourcetests.cpp
@@ -301,8 +301,11 @@ TEST_F(DocumentSourceCursorTest, TailableAwaitDataCursorShouldErrorAfterTimeout)
collScanParams.tailable = true;
auto filter = BSON("a" << 1);
auto matchExpression = uassertStatusOK(MatchExpressionParser::parse(filter, ctx()));
- auto collectionScan = std::make_unique<CollectionScan>(
- opCtx(), readLock.getCollection(), collScanParams, workingSet.get(), matchExpression.get());
+ auto collectionScan = std::make_unique<CollectionScan>(ctx().get(),
+ readLock.getCollection(),
+ collScanParams,
+ workingSet.get(),
+ matchExpression.get());
auto queryRequest = std::make_unique<QueryRequest>(nss);
queryRequest->setFilter(filter);
queryRequest->setTailableMode(TailableModeEnum::kTailableAndAwaitData);
@@ -340,8 +343,11 @@ TEST_F(DocumentSourceCursorTest, NonAwaitDataCursorShouldErrorAfterTimeout) {
CollectionScanParams collScanParams;
auto filter = BSON("a" << 1);
auto matchExpression = uassertStatusOK(MatchExpressionParser::parse(filter, ctx()));
- auto collectionScan = std::make_unique<CollectionScan>(
- opCtx(), readLock.getCollection(), collScanParams, workingSet.get(), matchExpression.get());
+ auto collectionScan = std::make_unique<CollectionScan>(ctx().get(),
+ readLock.getCollection(),
+ collScanParams,
+ workingSet.get(),
+ matchExpression.get());
auto queryRequest = std::make_unique<QueryRequest>(nss);
queryRequest->setFilter(filter);
auto canonicalQuery = unittest::assertGet(
@@ -386,8 +392,11 @@ TEST_F(DocumentSourceCursorTest, TailableAwaitDataCursorShouldErrorAfterBeingKil
collScanParams.tailable = true;
auto filter = BSON("a" << 1);
auto matchExpression = uassertStatusOK(MatchExpressionParser::parse(filter, ctx()));
- auto collectionScan = std::make_unique<CollectionScan>(
- opCtx(), readLock.getCollection(), collScanParams, workingSet.get(), matchExpression.get());
+ auto collectionScan = std::make_unique<CollectionScan>(ctx().get(),
+ readLock.getCollection(),
+ collScanParams,
+ workingSet.get(),
+ matchExpression.get());
auto queryRequest = std::make_unique<QueryRequest>(nss);
queryRequest->setFilter(filter);
queryRequest->setTailableMode(TailableModeEnum::kTailableAndAwaitData);
@@ -424,8 +433,11 @@ TEST_F(DocumentSourceCursorTest, NormalCursorShouldErrorAfterBeingKilled) {
CollectionScanParams collScanParams;
auto filter = BSON("a" << 1);
auto matchExpression = uassertStatusOK(MatchExpressionParser::parse(filter, ctx()));
- auto collectionScan = std::make_unique<CollectionScan>(
- opCtx(), readLock.getCollection(), collScanParams, workingSet.get(), matchExpression.get());
+ auto collectionScan = std::make_unique<CollectionScan>(ctx().get(),
+ readLock.getCollection(),
+ collScanParams,
+ workingSet.get(),
+ matchExpression.get());
auto queryRequest = std::make_unique<QueryRequest>(nss);
queryRequest->setFilter(filter);
auto canonicalQuery = unittest::assertGet(
diff --git a/src/mongo/dbtests/matchertests.cpp b/src/mongo/dbtests/matchertests.cpp
index bc3e329816c..1ea2ba9233e 100644
--- a/src/mongo/dbtests/matchertests.cpp
+++ b/src/mongo/dbtests/matchertests.cpp
@@ -229,9 +229,8 @@ public:
const NamespaceString nss("unittests.matchertests");
AutoGetCollectionForReadCommand ctx(&opCtx, nss);
- const CollatorInterface* collator = nullptr;
- const boost::intrusive_ptr<ExpressionContext> expCtx(
- new ExpressionContext(opCtxPtr.get(), collator, kTestNss));
+ const boost::intrusive_ptr<ExpressionContext> expCtx(new ExpressionContext(
+ opCtxPtr.get(), std::unique_ptr<CollatorInterface>(nullptr), kTestNss));
M m(BSON("$where"
<< "function(){ return this.a == 1; }"),
expCtx,
@@ -291,9 +290,10 @@ template <typename M>
class Collator {
public:
void run() {
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kAlwaysEqual);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kAlwaysEqual);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
M matcher(BSON("a"
<< "string"),
expCtx);
diff --git a/src/mongo/dbtests/plan_executor_invalidation_test.cpp b/src/mongo/dbtests/plan_executor_invalidation_test.cpp
index 06b33cc30d4..a7e949465bd 100644
--- a/src/mongo/dbtests/plan_executor_invalidation_test.cpp
+++ b/src/mongo/dbtests/plan_executor_invalidation_test.cpp
@@ -58,7 +58,8 @@ static const NamespaceString nss("unittests.PlanExecutorInvalidationTest");
*/
class PlanExecutorInvalidationTest : public unittest::Test {
public:
- PlanExecutorInvalidationTest() : _client(&_opCtx) {
+ PlanExecutorInvalidationTest()
+ : _client(&_opCtx), _expCtx(make_intrusive<ExpressionContext>(&_opCtx, nullptr, nss)) {
_ctx.reset(new dbtests::WriteContextForTests(&_opCtx, nss.ns()));
_client.dropCollection(nss.ns());
@@ -76,7 +77,7 @@ public:
params.direction = CollectionScanParams::FORWARD;
params.tailable = false;
unique_ptr<CollectionScan> scan(
- new CollectionScan(&_opCtx, collection(), params, ws.get(), nullptr));
+ new CollectionScan(_expCtx.get(), collection(), params, ws.get(), nullptr));
// Create a plan executor to hold it
auto qr = std::make_unique<QueryRequest>(nss);
@@ -131,6 +132,8 @@ public:
OperationContext& _opCtx = *_opCtxPtr;
unique_ptr<dbtests::WriteContextForTests> _ctx;
DBDirectClient _client;
+
+ boost::intrusive_ptr<ExpressionContext> _expCtx;
};
TEST_F(PlanExecutorInvalidationTest, ExecutorToleratesDeletedDocumentsDuringYield) {
diff --git a/src/mongo/dbtests/plan_ranking.cpp b/src/mongo/dbtests/plan_ranking.cpp
index 95ff9f4da3d..8831ee951a0 100644
--- a/src/mongo/dbtests/plan_ranking.cpp
+++ b/src/mongo/dbtests/plan_ranking.cpp
@@ -126,7 +126,7 @@ public:
ASSERT_GREATER_THAN_OR_EQUALS(solutions.size(), 1U);
// Fill out the MPR.
- _mps.reset(new MultiPlanStage(&_opCtx, collection, cq));
+ _mps.reset(new MultiPlanStage(_expCtx.get(), collection, cq));
unique_ptr<WorkingSet> ws(new WorkingSet());
// Put each solution from the planner into the MPR.
for (size_t i = 0; i < solutions.size(); ++i) {
@@ -167,6 +167,9 @@ protected:
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
OperationContext& _opCtx = *_txnPtr;
+ boost::intrusive_ptr<ExpressionContext> _expCtx =
+ make_intrusive<ExpressionContext>(&_opCtx, nullptr, nss);
+
private:
// Holds the value of global "internalQueryForceIntersectionPlans" setParameter flag.
// Restored at end of test invocation regardless of test result.
diff --git a/src/mongo/dbtests/query_plan_executor.cpp b/src/mongo/dbtests/query_plan_executor.cpp
index 0529ec381bb..25182aa327b 100644
--- a/src/mongo/dbtests/query_plan_executor.cpp
+++ b/src/mongo/dbtests/query_plan_executor.cpp
@@ -116,7 +116,7 @@ public:
// Make the stage.
unique_ptr<PlanStage> root(
- new CollectionScan(&_opCtx, coll, csparams, ws.get(), cq.get()->root()));
+ new CollectionScan(cq->getExpCtx().get(), coll, csparams, ws.get(), cq.get()->root()));
// Hand the plan off to the executor.
auto statusWithPlanExecutor =
@@ -153,9 +153,9 @@ public:
unique_ptr<WorkingSet> ws(new WorkingSet());
- auto ixscan = std::make_unique<IndexScan>(&_opCtx, ixparams, ws.get(), nullptr);
+ auto ixscan = std::make_unique<IndexScan>(_expCtx.get(), ixparams, ws.get(), nullptr);
unique_ptr<PlanStage> root =
- std::make_unique<FetchStage>(&_opCtx, ws.get(), std::move(ixscan), nullptr, coll);
+ std::make_unique<FetchStage>(_expCtx.get(), ws.get(), std::move(ixscan), nullptr, coll);
auto qr = std::make_unique<QueryRequest>(nss);
auto statusWithCQ = CanonicalQuery::canonicalize(&_opCtx, std::move(qr));
@@ -174,6 +174,9 @@ protected:
const ServiceContext::UniqueOperationContext _opCtxPtr = cc().makeOperationContext();
OperationContext& _opCtx = *_opCtxPtr;
+ boost::intrusive_ptr<ExpressionContext> _expCtx =
+ make_intrusive<ExpressionContext>(&_opCtx, nullptr, nss);
+
private:
const IndexDescriptor* getIndex(Database* db, const BSONObj& obj) {
Collection* collection =
@@ -203,8 +206,6 @@ TEST_F(PlanExecutorTest, DropIndexScanAgg) {
// Create the aggregation pipeline.
std::vector<BSONObj> rawPipeline = {fromjson("{$match: {a: {$gte: 7, $lte: 10}}}")};
- boost::intrusive_ptr<ExpressionContextForTest> expCtx =
- new ExpressionContextForTest(&_opCtx, AggregationRequest(nss, rawPipeline));
// Create an "inner" plan executor and register it with the cursor manager so that it can
// get notified when the collection is dropped.
@@ -215,12 +216,12 @@ TEST_F(PlanExecutorTest, DropIndexScanAgg) {
// in the pipeline.
innerExec->saveState();
auto cursorSource = DocumentSourceCursor::create(
- collection, std::move(innerExec), expCtx, DocumentSourceCursor::CursorType::kRegular);
- auto pipeline = Pipeline::create({cursorSource}, expCtx);
+ collection, std::move(innerExec), _expCtx, DocumentSourceCursor::CursorType::kRegular);
+ auto pipeline = Pipeline::create({cursorSource}, _expCtx);
// Create the output PlanExecutor that pulls results from the pipeline.
auto ws = std::make_unique<WorkingSet>();
- auto proxy = std::make_unique<PipelineProxyStage>(&_opCtx, std::move(pipeline), ws.get());
+ auto proxy = std::make_unique<PipelineProxyStage>(_expCtx.get(), std::move(pipeline), ws.get());
auto statusWithPlanExecutor = PlanExecutor::make(
&_opCtx, std::move(ws), std::move(proxy), collection, PlanExecutor::NO_YIELD);
diff --git a/src/mongo/dbtests/query_stage_and.cpp b/src/mongo/dbtests/query_stage_and.cpp
index f06e8fb3589..9750b9af706 100644
--- a/src/mongo/dbtests/query_stage_and.cpp
+++ b/src/mongo/dbtests/query_stage_and.cpp
@@ -167,6 +167,9 @@ protected:
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
OperationContext& _opCtx = *_txnPtr;
+ boost::intrusive_ptr<ExpressionContext> _expCtx =
+ make_intrusive<ExpressionContext>(&_opCtx, nullptr, nss());
+
private:
DBDirectClient _client;
};
@@ -199,18 +202,18 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- auto ah = std::make_unique<AndHashStage>(&_opCtx, &ws);
+ auto ah = std::make_unique<AndHashStage>(_expCtx.get(), &ws);
// Foo <= 20.
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 20);
params.direction = -1;
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// Bar >= 10.
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll));
params.bounds.startKey = BSON("" << 10);
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// 'ah' reads the first child into its hash table: foo=20, foo=19, ..., foo=0
// in that order. Read half of them.
@@ -286,19 +289,19 @@ public:
addIndex(BSON("baz" << 1));
WorkingSet ws;
- auto ah = std::make_unique<AndHashStage>(&_opCtx, &ws);
+ auto ah = std::make_unique<AndHashStage>(_expCtx.get(), &ws);
// Foo <= 20 (descending).
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 20);
params.direction = -1;
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// Bar <= 19 (descending).
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll));
params.bounds.startKey = BSON("" << 19);
params.direction = -1;
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// First call to work reads the first result from the children. The first result for the
// first scan over foo is {foo: 20, bar: 20, baz: 20}. The first result for the second scan
@@ -366,19 +369,19 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- auto ah = std::make_unique<AndHashStage>(&_opCtx, &ws);
+ auto ah = std::make_unique<AndHashStage>(_expCtx.get(), &ws);
// Foo <= 20
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 20);
params.direction = -1;
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// Bar >= 10
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll));
params.bounds.startKey = BSON("" << 10);
params.direction = -1;
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// foo == bar == baz, and foo<=20, bar>=10, so our values are:
// foo == 10, 11, 12, 13, 14, 15. 16, 17, 18, 19, 20
@@ -415,19 +418,19 @@ public:
// before hashed AND is done reading the first child (stage has to
// hold 21 keys in buffer for Foo <= 20).
WorkingSet ws;
- auto ah = std::make_unique<AndHashStage>(&_opCtx, &ws, 20 * big.size());
+ auto ah = std::make_unique<AndHashStage>(_expCtx.get(), &ws, 20 * big.size());
// Foo <= 20
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1 << "big" << 1), coll));
params.bounds.startKey = BSON("" << 20 << "" << big);
params.direction = -1;
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// Bar >= 10
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll));
params.bounds.startKey = BSON("" << 10);
params.direction = -1;
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// Stage execution should fail.
ASSERT_EQUALS(-1, countResults(ah.get()));
@@ -462,19 +465,19 @@ public:
// keys in last child's index are not buffered. There are 6 keys
// that satisfy the criteria Foo <= 20 and Bar >= 10 and 5 <= baz <= 15.
WorkingSet ws;
- auto ah = std::make_unique<AndHashStage>(&_opCtx, &ws, 5 * big.size());
+ auto ah = std::make_unique<AndHashStage>(_expCtx.get(), &ws, 5 * big.size());
// Foo <= 20
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 20);
params.direction = -1;
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// Bar >= 10
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1 << "big" << 1), coll));
params.bounds.startKey = BSON("" << 10 << "" << big);
params.direction = -1;
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// foo == bar == baz, and foo<=20, bar>=10, so our values are:
// foo == 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20.
@@ -504,24 +507,24 @@ public:
addIndex(BSON("baz" << 1));
WorkingSet ws;
- auto ah = std::make_unique<AndHashStage>(&_opCtx, &ws);
+ auto ah = std::make_unique<AndHashStage>(_expCtx.get(), &ws);
// Foo <= 20
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 20);
params.direction = -1;
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// Bar >= 10
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll));
params.bounds.startKey = BSON("" << 10);
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// 5 <= baz <= 15
params = makeIndexScanParams(&_opCtx, getIndex(BSON("baz" << 1), coll));
params.bounds.startKey = BSON("" << 5);
params.bounds.endKey = BSON("" << 15);
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// foo == bar == baz, and foo<=20, bar>=10, 5<=baz<=15, so our values are:
// foo == 10, 11, 12, 13, 14, 15.
@@ -562,24 +565,24 @@ public:
// before hashed AND is done reading the second child (stage has to
// hold 11 keys in buffer for Foo <= 20 and Bar >= 10).
WorkingSet ws;
- auto ah = std::make_unique<AndHashStage>(&_opCtx, &ws, 10 * big.size());
+ auto ah = std::make_unique<AndHashStage>(_expCtx.get(), &ws, 10 * big.size());
// Foo <= 20
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 20);
params.direction = -1;
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// Bar >= 10
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1 << "big" << 1), coll));
params.bounds.startKey = BSON("" << 10 << "" << big);
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// 5 <= baz <= 15
params = makeIndexScanParams(&_opCtx, getIndex(BSON("baz" << 1), coll));
params.bounds.startKey = BSON("" << 5);
params.bounds.endKey = BSON("" << 15);
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// Stage execution should fail.
ASSERT_EQUALS(-1, countResults(ah.get()));
@@ -607,19 +610,19 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- auto ah = std::make_unique<AndHashStage>(&_opCtx, &ws);
+ auto ah = std::make_unique<AndHashStage>(_expCtx.get(), &ws);
// Foo <= 20
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 20);
params.direction = -1;
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// Bar == 5. Index scan should be eof.
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll));
params.bounds.startKey = BSON("" << 5);
params.bounds.endKey = BSON("" << 5);
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
int count = 0;
int works = 0;
@@ -664,12 +667,12 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- auto ah = std::make_unique<AndHashStage>(&_opCtx, &ws);
+ auto ah = std::make_unique<AndHashStage>(_expCtx.get(), &ws);
// Foo >= 100
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 100);
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// Bar <= 100
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll));
@@ -681,7 +684,7 @@ public:
<< "");
params.bounds.boundInclusion = BoundInclusion::kIncludeStartKeyOnly;
params.direction = -1;
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
ASSERT_EQUALS(0, countResults(ah.get()));
}
@@ -711,24 +714,24 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- auto ah = std::make_unique<AndHashStage>(&_opCtx, &ws);
+ auto ah = std::make_unique<AndHashStage>(_expCtx.get(), &ws);
// Foo <= 20
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 20);
params.direction = -1;
- auto firstScan = std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr);
+ auto firstScan = std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr);
// First child of the AND_HASH stage is a Fetch. The NULL in the
// constructor means there is no filter.
auto fetch =
- std::make_unique<FetchStage>(&_opCtx, &ws, std::move(firstScan), nullptr, coll);
+ std::make_unique<FetchStage>(_expCtx.get(), &ws, std::move(firstScan), nullptr, coll);
ah->addChild(std::move(fetch));
// Bar >= 10
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll));
params.bounds.startKey = BSON("" << 10);
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// Check that the AndHash stage returns docs {foo: 10, bar: 10}
// through {foo: 20, bar: 20}.
@@ -764,23 +767,23 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- auto ah = std::make_unique<AndHashStage>(&_opCtx, &ws);
+ auto ah = std::make_unique<AndHashStage>(_expCtx.get(), &ws);
// Foo <= 20
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 20);
params.direction = -1;
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// Bar >= 10
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll));
params.bounds.startKey = BSON("" << 10);
- auto secondScan = std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr);
+ auto secondScan = std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr);
// Second child of the AND_HASH stage is a Fetch. The NULL in the
// constructor means there is no filter.
auto fetch =
- std::make_unique<FetchStage>(&_opCtx, &ws, std::move(secondScan), nullptr, coll);
+ std::make_unique<FetchStage>(_expCtx.get(), &ws, std::move(secondScan), nullptr, coll);
ah->addChild(std::move(fetch));
// Check that the AndHash stage returns docs {foo: 10, bar: 10}
@@ -813,9 +816,9 @@ public:
// Child2: NEED_TIME, FAILURE
{
WorkingSet ws;
- const auto andHashStage = std::make_unique<AndHashStage>(&_opCtx, &ws);
+ const auto andHashStage = std::make_unique<AndHashStage>(_expCtx.get(), &ws);
- auto childStage1 = std::make_unique<QueuedDataStage>(&_opCtx, &ws);
+ auto childStage1 = std::make_unique<QueuedDataStage>(_expCtx.get(), &ws);
{
WorkingSetID id = ws.allocate();
WorkingSetMember* wsm = ws.get(id);
@@ -825,7 +828,7 @@ public:
childStage1->pushBack(id);
}
- auto childStage2 = std::make_unique<QueuedDataStage>(&_opCtx, &ws);
+ auto childStage2 = std::make_unique<QueuedDataStage>(_expCtx.get(), &ws);
childStage2->pushBack(PlanStage::NEED_TIME);
childStage2->pushBack(PlanStage::FAILURE);
@@ -846,9 +849,9 @@ public:
// Child2: Data
{
WorkingSet ws;
- const auto andHashStage = std::make_unique<AndHashStage>(&_opCtx, &ws);
+ const auto andHashStage = std::make_unique<AndHashStage>(_expCtx.get(), &ws);
- auto childStage1 = std::make_unique<QueuedDataStage>(&_opCtx, &ws);
+ auto childStage1 = std::make_unique<QueuedDataStage>(_expCtx.get(), &ws);
{
WorkingSetID id = ws.allocate();
@@ -860,7 +863,7 @@ public:
}
childStage1->pushBack(PlanStage::FAILURE);
- auto childStage2 = std::make_unique<QueuedDataStage>(&_opCtx, &ws);
+ auto childStage2 = std::make_unique<QueuedDataStage>(_expCtx.get(), &ws);
{
WorkingSetID id = ws.allocate();
WorkingSetMember* wsm = ws.get(id);
@@ -887,9 +890,9 @@ public:
// Child2: Data, FAILURE
{
WorkingSet ws;
- const auto andHashStage = std::make_unique<AndHashStage>(&_opCtx, &ws);
+ const auto andHashStage = std::make_unique<AndHashStage>(_expCtx.get(), &ws);
- auto childStage1 = std::make_unique<QueuedDataStage>(&_opCtx, &ws);
+ auto childStage1 = std::make_unique<QueuedDataStage>(_expCtx.get(), &ws);
{
WorkingSetID id = ws.allocate();
WorkingSetMember* wsm = ws.get(id);
@@ -899,7 +902,7 @@ public:
childStage1->pushBack(id);
}
- auto childStage2 = std::make_unique<QueuedDataStage>(&_opCtx, &ws);
+ auto childStage2 = std::make_unique<QueuedDataStage>(_expCtx.get(), &ws);
{
WorkingSetID id = ws.allocate();
WorkingSetMember* wsm = ws.get(id);
@@ -951,19 +954,19 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- auto ah = std::make_unique<AndSortedStage>(&_opCtx, &ws);
+ auto ah = std::make_unique<AndSortedStage>(_expCtx.get(), &ws);
// Scan over foo == 1.
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 1);
params.bounds.endKey = BSON("" << 1);
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// Scan over bar == 1.
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll));
params.bounds.startKey = BSON("" << 1);
params.bounds.endKey = BSON("" << 1);
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// Get the set of RecordIds in our collection to use later.
set<RecordId> data;
@@ -1068,25 +1071,25 @@ public:
addIndex(BSON("baz" << 1));
WorkingSet ws;
- auto ah = std::make_unique<AndSortedStage>(&_opCtx, &ws);
+ auto ah = std::make_unique<AndSortedStage>(_expCtx.get(), &ws);
// Scan over foo == 1
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 1);
params.bounds.endKey = BSON("" << 1);
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// bar == 1
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll));
params.bounds.startKey = BSON("" << 1);
params.bounds.endKey = BSON("" << 1);
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// baz == 1
params = makeIndexScanParams(&_opCtx, getIndex(BSON("baz" << 1), coll));
params.bounds.startKey = BSON("" << 1);
params.bounds.endKey = BSON("" << 1);
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
ASSERT_EQUALS(50, countResults(ah.get()));
}
@@ -1113,19 +1116,19 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- auto ah = std::make_unique<AndSortedStage>(&_opCtx, &ws);
+ auto ah = std::make_unique<AndSortedStage>(_expCtx.get(), &ws);
// Foo == 7. Should be EOF.
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 7);
params.bounds.endKey = BSON("" << 7);
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// Bar == 20, not EOF.
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll));
params.bounds.startKey = BSON("" << 20);
params.bounds.endKey = BSON("" << 20);
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
ASSERT_EQUALS(0, countResults(ah.get()));
}
@@ -1156,19 +1159,19 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- auto ah = std::make_unique<AndSortedStage>(&_opCtx, &ws);
+ auto ah = std::make_unique<AndSortedStage>(_expCtx.get(), &ws);
// foo == 7.
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 7);
params.bounds.endKey = BSON("" << 7);
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// bar == 20.
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll));
params.bounds.startKey = BSON("" << 20);
params.bounds.endKey = BSON("" << 20);
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
ASSERT_EQUALS(0, countResults(ah.get()));
}
@@ -1195,19 +1198,19 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- auto ah = std::make_unique<AndHashStage>(&_opCtx, &ws);
+ auto ah = std::make_unique<AndHashStage>(_expCtx.get(), &ws);
// Scan over foo == 1
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 1);
params.bounds.endKey = BSON("" << 1);
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// Intersect with 7 <= bar < 10000
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll));
params.bounds.startKey = BSON("" << 7);
params.bounds.endKey = BSON("" << 10000);
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
WorkingSetID lastId = WorkingSet::INVALID_ID;
@@ -1257,25 +1260,25 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- unique_ptr<AndSortedStage> as = std::make_unique<AndSortedStage>(&_opCtx, &ws);
+ unique_ptr<AndSortedStage> as = std::make_unique<AndSortedStage>(_expCtx.get(), &ws);
// Scan over foo == 1
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 1);
params.bounds.endKey = BSON("" << 1);
- auto firstScan = std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr);
+ auto firstScan = std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr);
// First child of the AND_SORTED stage is a Fetch. The NULL in the
// constructor means there is no filter.
auto fetch =
- std::make_unique<FetchStage>(&_opCtx, &ws, std::move(firstScan), nullptr, coll);
+ std::make_unique<FetchStage>(_expCtx.get(), &ws, std::move(firstScan), nullptr, coll);
as->addChild(std::move(fetch));
// bar == 1
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll));
params.bounds.startKey = BSON("" << 1);
params.bounds.endKey = BSON("" << 1);
- as->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ as->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
for (int i = 0; i < 50; i++) {
BSONObj obj = getNext(as.get(), &ws);
@@ -1310,24 +1313,24 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- unique_ptr<AndSortedStage> as = std::make_unique<AndSortedStage>(&_opCtx, &ws);
+ unique_ptr<AndSortedStage> as = std::make_unique<AndSortedStage>(_expCtx.get(), &ws);
// Scan over foo == 1
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 1);
params.bounds.endKey = BSON("" << 1);
- as->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ as->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// bar == 1
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll));
params.bounds.startKey = BSON("" << 1);
params.bounds.endKey = BSON("" << 1);
- auto secondScan = std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr);
+ auto secondScan = std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr);
// Second child of the AND_SORTED stage is a Fetch. The NULL in the
// constructor means there is no filter.
auto fetch =
- std::make_unique<FetchStage>(&_opCtx, &ws, std::move(secondScan), nullptr, coll);
+ std::make_unique<FetchStage>(_expCtx.get(), &ws, std::move(secondScan), nullptr, coll);
as->addChild(std::move(fetch));
for (int i = 0; i < 50; i++) {
diff --git a/src/mongo/dbtests/query_stage_cached_plan.cpp b/src/mongo/dbtests/query_stage_cached_plan.cpp
index fd3b508fc37..3811a73e3d0 100644
--- a/src/mongo/dbtests/query_stage_cached_plan.cpp
+++ b/src/mongo/dbtests/query_stage_cached_plan.cpp
@@ -152,13 +152,18 @@ public:
const size_t decisionWorks = 10;
const size_t mockWorks =
1U + static_cast<size_t>(internalQueryCacheEvictionRatio * decisionWorks);
- auto mockChild = std::make_unique<QueuedDataStage>(&_opCtx, &_ws);
+ auto mockChild = std::make_unique<QueuedDataStage>(_expCtx.get(), &_ws);
for (size_t i = 0; i < mockWorks; i++) {
mockChild->pushBack(PlanStage::NEED_TIME);
}
- CachedPlanStage cachedPlanStage(
- &_opCtx, collection, &_ws, cq, plannerParams, decisionWorks, std::move(mockChild));
+ CachedPlanStage cachedPlanStage(_expCtx.get(),
+ collection,
+ &_ws,
+ cq,
+ plannerParams,
+ decisionWorks,
+ std::move(mockChild));
// This should succeed after triggering a replan.
PlanYieldPolicy yieldPolicy(PlanExecutor::NO_YIELD,
@@ -171,6 +176,9 @@ protected:
OperationContext& _opCtx = *_opCtxPtr;
WorkingSet _ws;
DBDirectClient _client{&_opCtx};
+
+ boost::intrusive_ptr<ExpressionContext> _expCtx =
+ make_intrusive<ExpressionContext>(&_opCtx, nullptr, nss);
};
/**
@@ -199,13 +207,18 @@ TEST_F(QueryStageCachedPlan, QueryStageCachedPlanFailure) {
fillOutPlannerParams(&_opCtx, collection, cq.get(), &plannerParams);
// Queued data stage will return a failure during the cached plan trial period.
- auto mockChild = std::make_unique<QueuedDataStage>(&_opCtx, &_ws);
+ auto mockChild = std::make_unique<QueuedDataStage>(_expCtx.get(), &_ws);
mockChild->pushBack(PlanStage::FAILURE);
// High enough so that we shouldn't trigger a replan based on works.
const size_t decisionWorks = 50;
- CachedPlanStage cachedPlanStage(
- &_opCtx, collection, &_ws, cq.get(), plannerParams, decisionWorks, std::move(mockChild));
+ CachedPlanStage cachedPlanStage(_expCtx.get(),
+ collection,
+ &_ws,
+ cq.get(),
+ plannerParams,
+ decisionWorks,
+ std::move(mockChild));
// This should succeed after triggering a replan.
PlanYieldPolicy yieldPolicy(PlanExecutor::NO_YIELD,
@@ -249,13 +262,18 @@ TEST_F(QueryStageCachedPlan, QueryStageCachedPlanHitMaxWorks) {
const size_t decisionWorks = 10;
const size_t mockWorks =
1U + static_cast<size_t>(internalQueryCacheEvictionRatio * decisionWorks);
- auto mockChild = std::make_unique<QueuedDataStage>(&_opCtx, &_ws);
+ auto mockChild = std::make_unique<QueuedDataStage>(_expCtx.get(), &_ws);
for (size_t i = 0; i < mockWorks; i++) {
mockChild->pushBack(PlanStage::NEED_TIME);
}
- CachedPlanStage cachedPlanStage(
- &_opCtx, collection, &_ws, cq.get(), plannerParams, decisionWorks, std::move(mockChild));
+ CachedPlanStage cachedPlanStage(_expCtx.get(),
+ collection,
+ &_ws,
+ cq.get(),
+ plannerParams,
+ decisionWorks,
+ std::move(mockChild));
// This should succeed after triggering a replan.
PlanYieldPolicy yieldPolicy(PlanExecutor::NO_YIELD,
@@ -453,13 +471,13 @@ TEST_F(QueryStageCachedPlan, ThrowsOnYieldRecoveryWhenIndexIsDroppedBeforePlanSe
fillOutPlannerParams(&_opCtx, collection, cq.get(), &plannerParams);
const size_t decisionWorks = 10;
- CachedPlanStage cachedPlanStage(&_opCtx,
+ CachedPlanStage cachedPlanStage(_expCtx.get(),
collection,
&_ws,
cq.get(),
plannerParams,
decisionWorks,
- std::make_unique<QueuedDataStage>(&_opCtx, &_ws));
+ std::make_unique<QueuedDataStage>(_expCtx.get(), &_ws));
// Drop an index while the CachedPlanStage is in a saved state. Restoring should fail, since we
// may still need the dropped index for plan selection.
@@ -495,13 +513,13 @@ TEST_F(QueryStageCachedPlan, DoesNotThrowOnYieldRecoveryWhenIndexIsDroppedAferPl
fillOutPlannerParams(&_opCtx, collection, cq.get(), &plannerParams);
const size_t decisionWorks = 10;
- CachedPlanStage cachedPlanStage(&_opCtx,
+ CachedPlanStage cachedPlanStage(_expCtx.get(),
collection,
&_ws,
cq.get(),
plannerParams,
decisionWorks,
- std::make_unique<QueuedDataStage>(&_opCtx, &_ws));
+ std::make_unique<QueuedDataStage>(_expCtx.get(), &_ws));
PlanYieldPolicy yieldPolicy(PlanExecutor::YIELD_MANUAL,
_opCtx.getServiceContext()->getFastClockSource());
diff --git a/src/mongo/dbtests/query_stage_collscan.cpp b/src/mongo/dbtests/query_stage_collscan.cpp
index 5eb79bb6e80..9040569a58c 100644
--- a/src/mongo/dbtests/query_stage_collscan.cpp
+++ b/src/mongo/dbtests/query_stage_collscan.cpp
@@ -94,18 +94,15 @@ public:
params.tailable = false;
// Make the filter.
- const CollatorInterface* collator = nullptr;
- const boost::intrusive_ptr<ExpressionContext> expCtx(
- new ExpressionContext(&_opCtx, collator, nss));
StatusWithMatchExpression statusWithMatcher =
- MatchExpressionParser::parse(filterObj, expCtx);
+ MatchExpressionParser::parse(filterObj, _expCtx);
verify(statusWithMatcher.isOK());
unique_ptr<MatchExpression> filterExpr = std::move(statusWithMatcher.getValue());
// Make a scan and have the runner own it.
unique_ptr<WorkingSet> ws = std::make_unique<WorkingSet>();
unique_ptr<PlanStage> ps = std::make_unique<CollectionScan>(
- &_opCtx, collection, params, ws.get(), filterExpr.get());
+ _expCtx.get(), collection, params, ws.get(), filterExpr.get());
auto statusWithPlanExecutor = PlanExecutor::make(
&_opCtx, std::move(ws), std::move(ps), collection, PlanExecutor::NO_YIELD);
@@ -132,7 +129,7 @@ public:
params.tailable = false;
unique_ptr<CollectionScan> scan(
- new CollectionScan(&_opCtx, collection, params, &ws, nullptr));
+ new CollectionScan(_expCtx.get(), collection, params, &ws, nullptr));
while (!scan->isEOF()) {
WorkingSetID id = WorkingSet::INVALID_ID;
PlanStage::StageState state = scan->work(&id);
@@ -152,6 +149,9 @@ protected:
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
OperationContext& _opCtx = *_txnPtr;
+ boost::intrusive_ptr<ExpressionContext> _expCtx =
+ make_intrusive<ExpressionContext>(&_opCtx, nullptr, nss);
+
private:
DBDirectClient _client;
};
@@ -192,7 +192,7 @@ TEST_F(QueryStageCollectionScanTest, QueryStageCollscanObjectsInOrderForward) {
// Make a scan and have the runner own it.
unique_ptr<WorkingSet> ws = std::make_unique<WorkingSet>();
unique_ptr<PlanStage> ps =
- std::make_unique<CollectionScan>(&_opCtx, collection, params, ws.get(), nullptr);
+ std::make_unique<CollectionScan>(_expCtx.get(), collection, params, ws.get(), nullptr);
auto statusWithPlanExecutor = PlanExecutor::make(
&_opCtx, std::move(ws), std::move(ps), collection, PlanExecutor::NO_YIELD);
@@ -221,7 +221,7 @@ TEST_F(QueryStageCollectionScanTest, QueryStageCollscanObjectsInOrderBackward) {
unique_ptr<WorkingSet> ws = std::make_unique<WorkingSet>();
unique_ptr<PlanStage> ps =
- std::make_unique<CollectionScan>(&_opCtx, collection, params, ws.get(), nullptr);
+ std::make_unique<CollectionScan>(_expCtx.get(), collection, params, ws.get(), nullptr);
auto statusWithPlanExecutor = PlanExecutor::make(
&_opCtx, std::move(ws), std::move(ps), collection, PlanExecutor::NO_YIELD);
@@ -255,7 +255,7 @@ TEST_F(QueryStageCollectionScanTest, QueryStageCollscanDeleteUpcomingObject) {
params.tailable = false;
WorkingSet ws;
- unique_ptr<PlanStage> scan(new CollectionScan(&_opCtx, coll, params, &ws, nullptr));
+ unique_ptr<PlanStage> scan(new CollectionScan(_expCtx.get(), coll, params, &ws, nullptr));
int count = 0;
while (count < 10) {
@@ -308,7 +308,7 @@ TEST_F(QueryStageCollectionScanTest, QueryStageCollscanDeleteUpcomingObjectBackw
params.tailable = false;
WorkingSet ws;
- unique_ptr<PlanStage> scan(new CollectionScan(&_opCtx, coll, params, &ws, nullptr));
+ unique_ptr<PlanStage> scan(new CollectionScan(_expCtx.get(), coll, params, &ws, nullptr));
int count = 0;
while (count < 10) {
@@ -368,7 +368,7 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanResumeAfterRecordIdSeekSuc
// Create plan stage.
unique_ptr<WorkingSet> ws = std::make_unique<WorkingSet>();
unique_ptr<PlanStage> ps =
- std::make_unique<CollectionScan>(&_opCtx, collection, params, ws.get(), nullptr);
+ std::make_unique<CollectionScan>(_expCtx.get(), collection, params, ws.get(), nullptr);
WorkingSetID id = WorkingSet::INVALID_ID;
@@ -417,7 +417,7 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanResumeAfterRecordIdSeekFai
// Create plan stage.
unique_ptr<WorkingSet> ws = std::make_unique<WorkingSet>();
unique_ptr<PlanStage> ps =
- std::make_unique<CollectionScan>(&_opCtx, coll, params, ws.get(), nullptr);
+ std::make_unique<CollectionScan>(_expCtx.get(), coll, params, ws.get(), nullptr);
WorkingSetID id = WorkingSet::INVALID_ID;
diff --git a/src/mongo/dbtests/query_stage_count.cpp b/src/mongo/dbtests/query_stage_count.cpp
index 0c62e6636f4..304297724be 100644
--- a/src/mongo/dbtests/query_stage_count.cpp
+++ b/src/mongo/dbtests/query_stage_count.cpp
@@ -60,6 +60,7 @@ public:
CountStageTest()
: _dbLock(&_opCtx, nsToDatabaseSubstring(ns()), MODE_X),
_ctx(&_opCtx, ns()),
+ _expCtx(make_intrusive<ExpressionContext>(&_opCtx, nullptr, kTestNss)),
_coll(nullptr) {}
virtual ~CountStageTest() {}
@@ -94,7 +95,8 @@ public:
params.direction = CollectionScanParams::FORWARD;
params.tailable = false;
- unique_ptr<CollectionScan> scan(new CollectionScan(&_opCtx, _coll, params, &ws, nullptr));
+ unique_ptr<CollectionScan> scan(
+ new CollectionScan(_expCtx.get(), _coll, params, &ws, nullptr));
while (!scan->isEOF()) {
WorkingSetID id = WorkingSet::INVALID_ID;
PlanStage::StageState state = scan->work(&id);
@@ -146,11 +148,8 @@ public:
unique_ptr<WorkingSet> ws(new WorkingSet);
- const CollatorInterface* collator = nullptr;
- const boost::intrusive_ptr<ExpressionContext> expCtx(
- new ExpressionContext(&_opCtx, collator, kTestNss));
StatusWithMatchExpression statusWithMatcher =
- MatchExpressionParser::parse(request.getQuery(), expCtx);
+ MatchExpressionParser::parse(request.getQuery(), _expCtx);
ASSERT(statusWithMatcher.isOK());
unique_ptr<MatchExpression> expression = std::move(statusWithMatcher.getValue());
@@ -161,7 +160,7 @@ public:
scan = createCollScan(expression.get(), ws.get());
}
- CountStage countStage(&_opCtx,
+ CountStage countStage(_expCtx.get(),
_coll,
request.getLimit().value_or(0),
request.getSkip().value_or(0),
@@ -216,14 +215,14 @@ public:
params.direction = 1;
// This child stage gets owned and freed by its parent CountStage
- return new IndexScan(&_opCtx, params, ws, expr);
+ return new IndexScan(_expCtx.get(), params, ws, expr);
}
CollectionScan* createCollScan(MatchExpression* expr, WorkingSet* ws) {
CollectionScanParams params;
// This child stage gets owned and freed by its parent CountStage
- return new CollectionScan(&_opCtx, _coll, params, ws, expr);
+ return new CollectionScan(_expCtx.get(), _coll, params, ws, expr);
}
static const char* ns() {
@@ -240,6 +239,7 @@ protected:
OperationContext& _opCtx = *_opCtxPtr;
Lock::DBLock _dbLock;
OldClientContext _ctx;
+ boost::intrusive_ptr<ExpressionContext> _expCtx;
Collection* _coll;
};
diff --git a/src/mongo/dbtests/query_stage_count_scan.cpp b/src/mongo/dbtests/query_stage_count_scan.cpp
index 913baf62ba9..d89316948fc 100644
--- a/src/mongo/dbtests/query_stage_count_scan.cpp
+++ b/src/mongo/dbtests/query_stage_count_scan.cpp
@@ -110,6 +110,9 @@ protected:
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
OperationContext& _opCtx = *_txnPtr;
+ boost::intrusive_ptr<ExpressionContext> _expCtx =
+ make_intrusive<ExpressionContext>(&_opCtx, nullptr, NamespaceString(ns()));
+
private:
DBDirectClient _client;
};
@@ -138,7 +141,7 @@ public:
params.endKeyInclusive = true;
WorkingSet ws;
- CountScan count(&_opCtx, params, &ws);
+ CountScan count(_expCtx.get(), params, &ws);
int numCounted = runCount(&count);
ASSERT_EQUALS(2, numCounted);
@@ -169,7 +172,7 @@ public:
params.endKeyInclusive = true;
WorkingSet ws;
- CountScan count(&_opCtx, params, &ws);
+ CountScan count(_expCtx.get(), params, &ws);
int numCounted = runCount(&count);
ASSERT_EQUALS(5, numCounted);
@@ -200,7 +203,7 @@ public:
params.endKeyInclusive = false;
WorkingSet ws;
- CountScan count(&_opCtx, params, &ws);
+ CountScan count(_expCtx.get(), params, &ws);
int numCounted = runCount(&count);
ASSERT_EQUALS(3, numCounted);
@@ -227,7 +230,7 @@ public:
params.endKeyInclusive = false;
WorkingSet ws;
- CountScan count(&_opCtx, params, &ws);
+ CountScan count(_expCtx.get(), params, &ws);
int numCounted = runCount(&count);
ASSERT_EQUALS(0, numCounted);
@@ -255,7 +258,7 @@ public:
params.endKeyInclusive = false;
WorkingSet ws;
- CountScan count(&_opCtx, params, &ws);
+ CountScan count(_expCtx.get(), params, &ws);
int numCounted = runCount(&count);
ASSERT_EQUALS(0, numCounted);
@@ -284,7 +287,7 @@ public:
params.endKeyInclusive = true;
WorkingSet ws;
- CountScan count(&_opCtx, params, &ws);
+ CountScan count(_expCtx.get(), params, &ws);
int numCounted = runCount(&count);
ASSERT_EQUALS(0, numCounted);
@@ -314,7 +317,7 @@ public:
params.endKeyInclusive = true;
WorkingSet ws;
- CountScan count(&_opCtx, params, &ws);
+ CountScan count(_expCtx.get(), params, &ws);
WorkingSetID wsid;
int numCounted = 0;
@@ -366,7 +369,7 @@ public:
params.endKeyInclusive = true;
WorkingSet ws;
- CountScan count(&_opCtx, params, &ws);
+ CountScan count(_expCtx.get(), params, &ws);
WorkingSetID wsid;
int numCounted = 0;
@@ -421,7 +424,7 @@ public:
params.endKeyInclusive = true;
WorkingSet ws;
- CountScan count(&_opCtx, params, &ws);
+ CountScan count(_expCtx.get(), params, &ws);
WorkingSetID wsid;
int numCounted = 0;
@@ -483,7 +486,7 @@ public:
params.endKeyInclusive = true;
WorkingSet ws;
- CountScan count(&_opCtx, params, &ws);
+ CountScan count(_expCtx.get(), params, &ws);
int numCounted = runCount(&count);
ASSERT_EQUALS(7, numCounted);
@@ -515,7 +518,7 @@ public:
params.endKeyInclusive = true; // yes?
WorkingSet ws;
- CountScan count(&_opCtx, params, &ws);
+ CountScan count(_expCtx.get(), params, &ws);
int numCounted = runCount(&count);
ASSERT_EQUALS(9, numCounted);
@@ -544,7 +547,7 @@ public:
params.endKeyInclusive = true;
WorkingSet ws;
- CountScan count(&_opCtx, params, &ws);
+ CountScan count(_expCtx.get(), params, &ws);
WorkingSetID wsid;
int numCounted = 0;
diff --git a/src/mongo/dbtests/query_stage_delete.cpp b/src/mongo/dbtests/query_stage_delete.cpp
index db316f5a15f..6ff9419dd17 100644
--- a/src/mongo/dbtests/query_stage_delete.cpp
+++ b/src/mongo/dbtests/query_stage_delete.cpp
@@ -91,7 +91,7 @@ public:
params.tailable = false;
unique_ptr<CollectionScan> scan(
- new CollectionScan(&_opCtx, collection, params, &ws, nullptr));
+ new CollectionScan(_expCtx.get(), collection, params, &ws, nullptr));
while (!scan->isEOF()) {
WorkingSetID id = WorkingSet::INVALID_ID;
PlanStage::StageState state = scan->work(&id);
@@ -119,6 +119,9 @@ protected:
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
OperationContext& _opCtx = *_txnPtr;
+ boost::intrusive_ptr<ExpressionContext> _expCtx =
+ make_intrusive<ExpressionContext>(&_opCtx, nullptr, nss);
+
private:
DBDirectClient _client;
};
@@ -147,11 +150,12 @@ public:
deleteStageParams->isMulti = true;
WorkingSet ws;
- DeleteStage deleteStage(&_opCtx,
- std::move(deleteStageParams),
- &ws,
- coll,
- new CollectionScan(&_opCtx, coll, collScanParams, &ws, nullptr));
+ DeleteStage deleteStage(
+ _expCtx.get(),
+ std::move(deleteStageParams),
+ &ws,
+ coll,
+ new CollectionScan(_expCtx.get(), coll, collScanParams, &ws, nullptr));
const DeleteStats* stats = static_cast<const DeleteStats*>(deleteStage.getSpecificStats());
@@ -203,7 +207,7 @@ public:
// Configure a QueuedDataStage to pass the first object in the collection back in a
// RID_AND_OBJ state.
- auto qds = std::make_unique<QueuedDataStage>(&_opCtx, ws.get());
+ auto qds = std::make_unique<QueuedDataStage>(_expCtx.get(), ws.get());
WorkingSetID id = ws->allocate();
WorkingSetMember* member = ws->get(id);
member->recordId = recordIds[targetDocIndex];
@@ -218,7 +222,7 @@ public:
deleteParams->canonicalQuery = cq.get();
const auto deleteStage = std::make_unique<DeleteStage>(
- &_opCtx, std::move(deleteParams), ws.get(), coll, qds.release());
+ _expCtx.get(), std::move(deleteParams), ws.get(), coll, qds.release());
const DeleteStats* stats = static_cast<const DeleteStats*>(deleteStage->getSpecificStats());
diff --git a/src/mongo/dbtests/query_stage_distinct.cpp b/src/mongo/dbtests/query_stage_distinct.cpp
index fdfa2c06469..2dd15cd8f6c 100644
--- a/src/mongo/dbtests/query_stage_distinct.cpp
+++ b/src/mongo/dbtests/query_stage_distinct.cpp
@@ -54,7 +54,8 @@ static const NamespaceString nss{"unittests.QueryStageDistinct"};
class DistinctBase {
public:
- DistinctBase() : _client(&_opCtx) {}
+ DistinctBase()
+ : _expCtx(make_intrusive<ExpressionContext>(&_opCtx, nullptr, nss)), _client(&_opCtx) {}
virtual ~DistinctBase() {
_client.dropCollection(nss.ns());
@@ -98,6 +99,7 @@ public:
protected:
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
OperationContext& _opCtx = *_txnPtr;
+ boost::intrusive_ptr<ExpressionContext> _expCtx;
private:
DBDirectClient _client;
@@ -142,7 +144,7 @@ public:
params.bounds.fields.push_back(oil);
WorkingSet ws;
- DistinctScan distinct(&_opCtx, std::move(params), &ws);
+ DistinctScan distinct(_expCtx.get(), std::move(params), &ws);
WorkingSetID wsid;
// Get our first result.
@@ -210,7 +212,7 @@ public:
params.bounds.fields.push_back(oil);
WorkingSet ws;
- DistinctScan distinct(&_opCtx, std::move(params), &ws);
+ DistinctScan distinct(_expCtx.get(), std::move(params), &ws);
// We should see each number in the range [1, 6] exactly once.
std::set<int> seen;
@@ -279,7 +281,7 @@ public:
params.bounds.fields.push_back(bOil);
WorkingSet ws;
- DistinctScan distinct(&_opCtx, std::move(params), &ws);
+ DistinctScan distinct(_expCtx.get(), std::move(params), &ws);
WorkingSetID wsid;
PlanStage::StageState state;
diff --git a/src/mongo/dbtests/query_stage_ensure_sorted.cpp b/src/mongo/dbtests/query_stage_ensure_sorted.cpp
index e956472d40d..cccca5ac8d4 100644
--- a/src/mongo/dbtests/query_stage_ensure_sorted.cpp
+++ b/src/mongo/dbtests/query_stage_ensure_sorted.cpp
@@ -59,11 +59,15 @@ public:
void testWork(const char* patternStr,
const char* inputStr,
const char* expectedStr,
- CollatorInterface* collator = nullptr) {
+ std::unique_ptr<CollatorInterface> collator = nullptr) {
auto opCtx = _serviceContext.makeOperationContext();
+ // Create a mock ExpressionContext.
+ boost::intrusive_ptr<ExpressionContext> expCtx(
+ make_intrusive<ExpressionContext>(opCtx.get(), std::move(collator), kTestNss));
+
WorkingSet ws;
- auto queuedDataStage = std::make_unique<QueuedDataStage>(opCtx.get(), &ws);
+ auto queuedDataStage = std::make_unique<QueuedDataStage>(expCtx.get(), &ws);
BSONObj inputObj = fromjson(inputStr);
BSONElement inputElt = inputObj["input"];
ASSERT(inputElt.isABSONObj());
@@ -80,16 +84,11 @@ public:
queuedDataStage->pushBack(id);
}
- // Create a mock ExpressionContext.
- boost::intrusive_ptr<ExpressionContext> pExpCtx(
- new ExpressionContext(opCtx.get(), collator, kTestNss));
- pExpCtx->setCollator(collator);
-
// Initialization.
BSONObj pattern = fromjson(patternStr);
auto sortKeyGen = std::make_unique<SortKeyGeneratorStage>(
- pExpCtx, std::move(queuedDataStage), &ws, pattern);
- EnsureSortedStage ess(opCtx.get(), pattern, &ws, std::move(sortKeyGen));
+ expCtx.get(), std::move(queuedDataStage), &ws, pattern);
+ EnsureSortedStage ess(expCtx.get(), pattern, &ws, std::move(sortKeyGen));
WorkingSetID id = WorkingSet::INVALID_ID;
PlanStage::StageState state = PlanStage::NEED_TIME;
@@ -127,10 +126,10 @@ TEST_F(QueryStageEnsureSortedTest, EnsureSortedEmptyWorkingSet) {
new ExpressionContext(opCtx.get(), nullptr, kTestNss));
WorkingSet ws;
- auto queuedDataStage = std::make_unique<QueuedDataStage>(opCtx.get(), &ws);
+ auto queuedDataStage = std::make_unique<QueuedDataStage>(pExpCtx.get(), &ws);
auto sortKeyGen = std::make_unique<SortKeyGeneratorStage>(
pExpCtx, std::move(queuedDataStage), &ws, BSONObj());
- EnsureSortedStage ess(opCtx.get(), BSONObj(), &ws, std::move(sortKeyGen));
+ EnsureSortedStage ess(pExpCtx.get(), BSONObj(), &ws, std::move(sortKeyGen));
WorkingSetID id = WorkingSet::INVALID_ID;
PlanStage::StageState state = PlanStage::NEED_TIME;
@@ -184,8 +183,12 @@ TEST_F(QueryStageEnsureSortedTest, EnsureSortedStringsNullCollator) {
}
TEST_F(QueryStageEnsureSortedTest, EnsureSortedStringsCollator) {
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
- testWork("{a: 1}", "{input: [{a: 'abc'}, {a: 'cba'}]}", "{output: [{a: 'abc'}]}", &collator);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kReverseString);
+ testWork("{a: 1}",
+ "{input: [{a: 'abc'}, {a: 'cba'}]}",
+ "{output: [{a: 'abc'}]}",
+ std::move(collator));
}
} // namespace
diff --git a/src/mongo/dbtests/query_stage_fetch.cpp b/src/mongo/dbtests/query_stage_fetch.cpp
index 53b4e1e0646..163621ce89e 100644
--- a/src/mongo/dbtests/query_stage_fetch.cpp
+++ b/src/mongo/dbtests/query_stage_fetch.cpp
@@ -88,6 +88,9 @@ protected:
const ServiceContext::UniqueOperationContext _opCtxPtr = cc().makeOperationContext();
OperationContext& _opCtx = *_opCtxPtr;
DBDirectClient _client;
+
+ boost::intrusive_ptr<ExpressionContext> _expCtx =
+ make_intrusive<ExpressionContext>(&_opCtx, nullptr, nss());
};
@@ -116,7 +119,7 @@ public:
ASSERT_EQUALS(size_t(1), recordIds.size());
// Create a mock stage that returns the WSM.
- auto mockStage = std::make_unique<QueuedDataStage>(&_opCtx, &ws);
+ auto mockStage = std::make_unique<QueuedDataStage>(_expCtx.get(), &ws);
// Mock data.
{
@@ -140,7 +143,7 @@ public:
}
auto fetchStage =
- std::make_unique<FetchStage>(&_opCtx, &ws, std::move(mockStage), nullptr, coll);
+ std::make_unique<FetchStage>(_expCtx.get(), &ws, std::move(mockStage), nullptr, coll);
WorkingSetID id = WorkingSet::INVALID_ID;
PlanStage::StageState state;
@@ -183,7 +186,7 @@ public:
ASSERT_EQUALS(size_t(1), recordIds.size());
// Create a mock stage that returns the WSM.
- auto mockStage = std::make_unique<QueuedDataStage>(&_opCtx, &ws);
+ auto mockStage = std::make_unique<QueuedDataStage>(_expCtx.get(), &ws);
// Mock data.
{
@@ -200,17 +203,14 @@ public:
// Make the filter.
BSONObj filterObj = BSON("foo" << 6);
- const CollatorInterface* collator = nullptr;
- const boost::intrusive_ptr<ExpressionContext> expCtx(
- new ExpressionContext(&_opCtx, collator, nss()));
StatusWithMatchExpression statusWithMatcher =
- MatchExpressionParser::parse(filterObj, expCtx);
+ MatchExpressionParser::parse(filterObj, _expCtx);
verify(statusWithMatcher.isOK());
unique_ptr<MatchExpression> filterExpr = std::move(statusWithMatcher.getValue());
// Matcher requires that foo==6 but we only have data with foo==5.
auto fetchStage = std::make_unique<FetchStage>(
- &_opCtx, &ws, std::move(mockStage), filterExpr.get(), coll);
+ _expCtx.get(), &ws, std::move(mockStage), filterExpr.get(), coll);
// First call should return a fetch request as it's not in memory.
WorkingSetID id = WorkingSet::INVALID_ID;
diff --git a/src/mongo/dbtests/query_stage_ixscan.cpp b/src/mongo/dbtests/query_stage_ixscan.cpp
index e9ac3c744d8..c7f3d0aa5fd 100644
--- a/src/mongo/dbtests/query_stage_ixscan.cpp
+++ b/src/mongo/dbtests/query_stage_ixscan.cpp
@@ -49,7 +49,8 @@ public:
IndexScanTest()
: _dbLock(&_opCtx, nsToDatabaseSubstring(ns()), MODE_X),
_ctx(&_opCtx, ns()),
- _coll(nullptr) {}
+ _coll(nullptr),
+ _expCtx(make_intrusive<ExpressionContext>(&_opCtx, nullptr, nss())) {}
virtual ~IndexScanTest() {}
@@ -110,7 +111,7 @@ public:
// This child stage gets owned and freed by the caller.
MatchExpression* filter = nullptr;
- return new IndexScan(&_opCtx, params, &_ws, filter);
+ return new IndexScan(_expCtx.get(), params, &_ws, filter);
}
IndexScan* createIndexScan(BSONObj startKey,
@@ -134,7 +135,7 @@ public:
params.bounds.fields.push_back(oil);
MatchExpression* filter = nullptr;
- return new IndexScan(&_opCtx, params, &_ws, filter);
+ return new IndexScan(_expCtx.get(), params, &_ws, filter);
}
static const char* ns() {
@@ -153,6 +154,8 @@ protected:
Collection* _coll;
WorkingSet _ws;
+
+ boost::intrusive_ptr<ExpressionContext> _expCtx;
};
// SERVER-15958: Some IndexScanStats info must be initialized on construction of an IndexScan.
diff --git a/src/mongo/dbtests/query_stage_limit_skip.cpp b/src/mongo/dbtests/query_stage_limit_skip.cpp
index 7d5fba46ae1..41050b305dd 100644
--- a/src/mongo/dbtests/query_stage_limit_skip.cpp
+++ b/src/mongo/dbtests/query_stage_limit_skip.cpp
@@ -56,8 +56,9 @@ using std::unique_ptr;
static const int N = 50;
/* Populate a QueuedDataStage and return it. Caller owns it. */
-std::unique_ptr<QueuedDataStage> getMS(OperationContext* opCtx, WorkingSet* ws) {
- auto ms = std::make_unique<QueuedDataStage>(opCtx, ws);
+std::unique_ptr<QueuedDataStage> getMS(const boost::intrusive_ptr<ExpressionContext>& expCtx,
+ WorkingSet* ws) {
+ auto ms = std::make_unique<QueuedDataStage>(expCtx.get(), ws);
// Put N ADVANCED results into the mock stage, and some other stalling results (YIELD/TIME).
for (int i = 0; i < N; ++i) {
@@ -94,15 +95,18 @@ int countResults(PlanStage* stage) {
class QueryStageLimitSkipBasicTest {
public:
void run() {
+ const boost::intrusive_ptr<ExpressionContext> expCtx(make_intrusive<ExpressionContext>(
+ _opCtx, std::unique_ptr<CollatorInterface>(nullptr), NamespaceString("test.dummyNS")));
+
for (int i = 0; i < 2 * N; ++i) {
WorkingSet ws;
unique_ptr<PlanStage> skip =
- std::make_unique<SkipStage>(_opCtx, i, &ws, getMS(_opCtx, &ws));
+ std::make_unique<SkipStage>(expCtx.get(), i, &ws, getMS(expCtx.get(), &ws));
ASSERT_EQUALS(max(0, N - i), countResults(skip.get()));
unique_ptr<PlanStage> limit =
- std::make_unique<LimitStage>(_opCtx, i, &ws, getMS(_opCtx, &ws));
+ std::make_unique<LimitStage>(expCtx.get(), i, &ws, getMS(expCtx.get(), &ws));
ASSERT_EQUALS(min(N, i), countResults(limit.get()));
}
}
diff --git a/src/mongo/dbtests/query_stage_merge_sort.cpp b/src/mongo/dbtests/query_stage_merge_sort.cpp
index 78fdcfd0ead..b6b1fa9e87e 100644
--- a/src/mongo/dbtests/query_stage_merge_sort.cpp
+++ b/src/mongo/dbtests/query_stage_merge_sort.cpp
@@ -134,6 +134,9 @@ protected:
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
OperationContext& _opCtx = *_txnPtr;
+ boost::intrusive_ptr<ExpressionContext> _expCtx =
+ make_intrusive<ExpressionContext>(&_opCtx, nullptr, nss());
+
private:
DBDirectClient _client;
};
@@ -170,18 +173,18 @@ public:
// Sort by c:1
MergeSortStageParams msparams;
msparams.pattern = BSON("c" << 1);
- auto ms = std::make_unique<MergeSortStage>(&_opCtx, msparams, ws.get());
+ auto ms = std::make_unique<MergeSortStage>(_expCtx.get(), msparams, ws.get());
// a:1
auto params = makeIndexScanParams(&_opCtx, getIndex(firstIndex, coll));
- ms->addChild(std::make_unique<IndexScan>(&_opCtx, params, ws.get(), nullptr));
+ ms->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, ws.get(), nullptr));
// b:1
params = makeIndexScanParams(&_opCtx, getIndex(secondIndex, coll));
- ms->addChild(std::make_unique<IndexScan>(&_opCtx, params, ws.get(), nullptr));
+ ms->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, ws.get(), nullptr));
unique_ptr<FetchStage> fetchStage =
- make_unique<FetchStage>(&_opCtx, ws.get(), std::move(ms), nullptr, coll);
+ make_unique<FetchStage>(_expCtx.get(), ws.get(), std::move(ms), nullptr, coll);
// Must fetch if we want to easily pull out an obj.
auto statusWithPlanExecutor = PlanExecutor::make(
&_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::NO_YIELD);
@@ -236,17 +239,17 @@ public:
// Sort by c:1
MergeSortStageParams msparams;
msparams.pattern = BSON("c" << 1);
- auto ms = std::make_unique<MergeSortStage>(&_opCtx, msparams, ws.get());
+ auto ms = std::make_unique<MergeSortStage>(_expCtx.get(), msparams, ws.get());
// a:1
auto params = makeIndexScanParams(&_opCtx, getIndex(firstIndex, coll));
- ms->addChild(std::make_unique<IndexScan>(&_opCtx, params, ws.get(), nullptr));
+ ms->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, ws.get(), nullptr));
// b:1
params = makeIndexScanParams(&_opCtx, getIndex(secondIndex, coll));
- ms->addChild(std::make_unique<IndexScan>(&_opCtx, params, ws.get(), nullptr));
+ ms->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, ws.get(), nullptr));
unique_ptr<FetchStage> fetchStage =
- make_unique<FetchStage>(&_opCtx, ws.get(), std::move(ms), nullptr, coll);
+ make_unique<FetchStage>(_expCtx.get(), ws.get(), std::move(ms), nullptr, coll);
auto statusWithPlanExecutor = PlanExecutor::make(
&_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::NO_YIELD);
@@ -301,17 +304,17 @@ public:
MergeSortStageParams msparams;
msparams.dedup = false;
msparams.pattern = BSON("c" << 1);
- auto ms = std::make_unique<MergeSortStage>(&_opCtx, msparams, ws.get());
+ auto ms = std::make_unique<MergeSortStage>(_expCtx.get(), msparams, ws.get());
// a:1
auto params = makeIndexScanParams(&_opCtx, getIndex(firstIndex, coll));
- ms->addChild(std::make_unique<IndexScan>(&_opCtx, params, ws.get(), nullptr));
+ ms->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, ws.get(), nullptr));
// b:1
params = makeIndexScanParams(&_opCtx, getIndex(secondIndex, coll));
- ms->addChild(std::make_unique<IndexScan>(&_opCtx, params, ws.get(), nullptr));
+ ms->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, ws.get(), nullptr));
unique_ptr<FetchStage> fetchStage =
- make_unique<FetchStage>(&_opCtx, ws.get(), std::move(ms), nullptr, coll);
+ make_unique<FetchStage>(_expCtx.get(), ws.get(), std::move(ms), nullptr, coll);
auto statusWithPlanExecutor = PlanExecutor::make(
&_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::NO_YIELD);
@@ -368,21 +371,21 @@ public:
// Sort by c:-1
MergeSortStageParams msparams;
msparams.pattern = BSON("c" << -1);
- auto ms = std::make_unique<MergeSortStage>(&_opCtx, msparams, ws.get());
+ auto ms = std::make_unique<MergeSortStage>(_expCtx.get(), msparams, ws.get());
// a:1
auto params = makeIndexScanParams(&_opCtx, getIndex(firstIndex, coll));
params.bounds.startKey = objWithMaxKey(1);
params.bounds.endKey = objWithMinKey(1);
- ms->addChild(std::make_unique<IndexScan>(&_opCtx, params, ws.get(), nullptr));
+ ms->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, ws.get(), nullptr));
// b:1
params = makeIndexScanParams(&_opCtx, getIndex(secondIndex, coll));
params.bounds.startKey = objWithMaxKey(1);
params.bounds.endKey = objWithMinKey(1);
- ms->addChild(std::make_unique<IndexScan>(&_opCtx, params, ws.get(), nullptr));
+ ms->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, ws.get(), nullptr));
unique_ptr<FetchStage> fetchStage =
- make_unique<FetchStage>(&_opCtx, ws.get(), std::move(ms), nullptr, coll);
+ make_unique<FetchStage>(_expCtx.get(), ws.get(), std::move(ms), nullptr, coll);
auto statusWithPlanExecutor = PlanExecutor::make(
&_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::NO_YIELD);
@@ -437,19 +440,19 @@ public:
// Sort by c:1
MergeSortStageParams msparams;
msparams.pattern = BSON("c" << 1);
- auto ms = std::make_unique<MergeSortStage>(&_opCtx, msparams, ws.get());
+ auto ms = std::make_unique<MergeSortStage>(_expCtx.get(), msparams, ws.get());
// a:1
auto params = makeIndexScanParams(&_opCtx, getIndex(firstIndex, coll));
- ms->addChild(std::make_unique<IndexScan>(&_opCtx, params, ws.get(), nullptr));
+ ms->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, ws.get(), nullptr));
// b:51 (EOF)
params = makeIndexScanParams(&_opCtx, getIndex(secondIndex, coll));
params.bounds.startKey = BSON("" << 51 << "" << MinKey);
params.bounds.endKey = BSON("" << 51 << "" << MaxKey);
- ms->addChild(std::make_unique<IndexScan>(&_opCtx, params, ws.get(), nullptr));
+ ms->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, ws.get(), nullptr));
unique_ptr<FetchStage> fetchStage =
- make_unique<FetchStage>(&_opCtx, ws.get(), std::move(ms), nullptr, coll);
+ make_unique<FetchStage>(_expCtx.get(), ws.get(), std::move(ms), nullptr, coll);
auto statusWithPlanExecutor = PlanExecutor::make(
&_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::NO_YIELD);
@@ -488,7 +491,7 @@ public:
// Sort by foo:1
MergeSortStageParams msparams;
msparams.pattern = BSON("foo" << 1);
- auto ms = std::make_unique<MergeSortStage>(&_opCtx, msparams, ws.get());
+ auto ms = std::make_unique<MergeSortStage>(_expCtx.get(), msparams, ws.get());
int numIndices = 20;
for (int i = 0; i < numIndices; ++i) {
@@ -499,10 +502,10 @@ public:
BSONObj indexSpec = BSON(index << 1 << "foo" << 1);
addIndex(indexSpec);
auto params = makeIndexScanParams(&_opCtx, getIndex(indexSpec, coll));
- ms->addChild(std::make_unique<IndexScan>(&_opCtx, params, ws.get(), nullptr));
+ ms->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, ws.get(), nullptr));
}
unique_ptr<FetchStage> fetchStage =
- make_unique<FetchStage>(&_opCtx, ws.get(), std::move(ms), nullptr, coll);
+ make_unique<FetchStage>(_expCtx.get(), ws.get(), std::move(ms), nullptr, coll);
auto statusWithPlanExecutor = PlanExecutor::make(
&_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::NO_YIELD);
@@ -541,7 +544,7 @@ public:
// Sort by foo:1
MergeSortStageParams msparams;
msparams.pattern = BSON("foo" << 1);
- auto ms = make_unique<MergeSortStage>(&_opCtx, msparams, &ws);
+ auto ms = make_unique<MergeSortStage>(_expCtx.get(), msparams, &ws);
// Index 'a'+i has foo equal to 'i'.
@@ -554,7 +557,7 @@ public:
BSONObj indexSpec = BSON(index << 1 << "foo" << 1);
addIndex(indexSpec);
auto params = makeIndexScanParams(&_opCtx, getIndex(indexSpec, coll));
- ms->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ms->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
}
set<RecordId> recordIds;
@@ -672,7 +675,7 @@ public:
WorkingSetMember* member;
MergeSortStageParams msparams;
msparams.pattern = BSON("a" << 1);
- auto ms = std::make_unique<MergeSortStage>(&_opCtx, msparams, &ws);
+ auto ms = std::make_unique<MergeSortStage>(_expCtx.get(), msparams, &ws);
// First child scans [5, 10].
{
@@ -680,9 +683,9 @@ public:
params.bounds.startKey = BSON("" << 5);
params.bounds.endKey = BSON("" << 10);
auto fetchStage = std::make_unique<FetchStage>(
- &_opCtx,
+ _expCtx.get(),
&ws,
- std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr),
+ std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr),
nullptr,
coll);
ms->addChild(std::move(fetchStage));
@@ -694,9 +697,9 @@ public:
params.bounds.startKey = BSON("" << 4);
params.bounds.endKey = BSON("" << 10);
auto fetchStage = std::make_unique<FetchStage>(
- &_opCtx,
+ _expCtx.get(),
&ws,
- std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr),
+ std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr),
nullptr,
coll);
ms->addChild(std::move(fetchStage));
@@ -780,17 +783,18 @@ public:
MergeSortStageParams msparams;
msparams.pattern = BSON("c" << 1 << "d" << 1);
msparams.collator = nullptr;
- auto ms = std::make_unique<MergeSortStage>(&_opCtx, msparams, ws.get());
+ auto ms = std::make_unique<MergeSortStage>(_expCtx.get(), msparams, ws.get());
// a:1
auto params = makeIndexScanParams(&_opCtx, getIndex(firstIndex, coll));
- ms->addChild(std::make_unique<IndexScan>(&_opCtx, params, ws.get(), nullptr));
+ ms->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, ws.get(), nullptr));
// b:1
params = makeIndexScanParams(&_opCtx, getIndex(secondIndex, coll));
- ms->addChild(std::make_unique<IndexScan>(&_opCtx, params, ws.get(), nullptr));
+ ms->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, ws.get(), nullptr));
- auto fetchStage = make_unique<FetchStage>(&_opCtx, ws.get(), std::move(ms), nullptr, coll);
+ auto fetchStage =
+ make_unique<FetchStage>(_expCtx.get(), ws.get(), std::move(ms), nullptr, coll);
// Must fetch if we want to easily pull out an obj.
auto statusWithPlanExecutor = PlanExecutor::make(
&_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::NO_YIELD);
@@ -848,18 +852,18 @@ public:
msparams.pattern = BSON("c" << 1 << "d" << 1);
CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
msparams.collator = &collator;
- auto ms = std::make_unique<MergeSortStage>(&_opCtx, msparams, ws.get());
+ auto ms = std::make_unique<MergeSortStage>(_expCtx.get(), msparams, ws.get());
// a:1
auto params = makeIndexScanParams(&_opCtx, getIndex(firstIndex, coll));
- ms->addChild(std::make_unique<IndexScan>(&_opCtx, params, ws.get(), nullptr));
+ ms->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, ws.get(), nullptr));
// b:1
params = makeIndexScanParams(&_opCtx, getIndex(secondIndex, coll));
- ms->addChild(std::make_unique<IndexScan>(&_opCtx, params, ws.get(), nullptr));
+ ms->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, ws.get(), nullptr));
unique_ptr<FetchStage> fetchStage =
- make_unique<FetchStage>(&_opCtx, ws.get(), std::move(ms), nullptr, coll);
+ make_unique<FetchStage>(_expCtx.get(), ws.get(), std::move(ms), nullptr, coll);
// Must fetch if we want to easily pull out an obj.
auto statusWithPlanExecutor = PlanExecutor::make(
&_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::NO_YIELD);
diff --git a/src/mongo/dbtests/query_stage_multiplan.cpp b/src/mongo/dbtests/query_stage_multiplan.cpp
index a9b5a8375fe..38d44b1d8f2 100644
--- a/src/mongo/dbtests/query_stage_multiplan.cpp
+++ b/src/mongo/dbtests/query_stage_multiplan.cpp
@@ -118,6 +118,9 @@ protected:
const ServiceContext::UniqueOperationContext _opCtx = cc().makeOperationContext();
ClockSource* const _clock = _opCtx->getServiceContext()->getFastClockSource();
+ boost::intrusive_ptr<ExpressionContext> _expCtx =
+ make_intrusive<ExpressionContext>(_opCtx.get(), nullptr, nss);
+
DBDirectClient _client;
};
@@ -133,30 +136,28 @@ std::unique_ptr<CanonicalQuery> makeCanonicalQuery(OperationContext* opCtx,
return cq;
}
-unique_ptr<PlanStage> getIxScanPlan(OperationContext* opCtx,
+unique_ptr<PlanStage> getIxScanPlan(ExpressionContext* expCtx,
const Collection* coll,
WorkingSet* sharedWs,
int desiredFooValue) {
std::vector<const IndexDescriptor*> indexes;
- coll->getIndexCatalog()->findIndexesByKeyPattern(opCtx, BSON("foo" << 1), false, &indexes);
+ coll->getIndexCatalog()->findIndexesByKeyPattern(
+ expCtx->opCtx, BSON("foo" << 1), false, &indexes);
ASSERT_EQ(indexes.size(), 1U);
- IndexScanParams ixparams(opCtx, indexes[0]);
+ IndexScanParams ixparams(expCtx->opCtx, indexes[0]);
ixparams.bounds.isSimpleRange = true;
ixparams.bounds.startKey = BSON("" << desiredFooValue);
ixparams.bounds.endKey = BSON("" << desiredFooValue);
ixparams.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
ixparams.direction = 1;
- auto ixscan = std::make_unique<IndexScan>(opCtx, ixparams, sharedWs, nullptr);
- return std::make_unique<FetchStage>(opCtx, sharedWs, std::move(ixscan), nullptr, coll);
+ auto ixscan = std::make_unique<IndexScan>(expCtx, ixparams, sharedWs, nullptr);
+ return std::make_unique<FetchStage>(expCtx, sharedWs, std::move(ixscan), nullptr, coll);
}
-unique_ptr<MatchExpression> makeMatchExpressionFromFilter(OperationContext* opCtx,
+unique_ptr<MatchExpression> makeMatchExpressionFromFilter(ExpressionContext* expCtx,
BSONObj filterObj) {
- const CollatorInterface* collator = nullptr;
- const boost::intrusive_ptr<ExpressionContext> expCtx(
- new ExpressionContext(opCtx, collator, nss));
StatusWithMatchExpression statusWithMatcher = MatchExpressionParser::parse(filterObj, expCtx);
ASSERT_OK(statusWithMatcher.getStatus());
unique_ptr<MatchExpression> filter = std::move(statusWithMatcher.getValue());
@@ -165,19 +166,19 @@ unique_ptr<MatchExpression> makeMatchExpressionFromFilter(OperationContext* opCt
}
-unique_ptr<PlanStage> getCollScanPlan(OperationContext* opCtx,
+unique_ptr<PlanStage> getCollScanPlan(ExpressionContext* expCtx,
const Collection* coll,
WorkingSet* sharedWs,
MatchExpression* matchExpr) {
CollectionScanParams csparams;
csparams.direction = CollectionScanParams::FORWARD;
- unique_ptr<PlanStage> root(new CollectionScan(opCtx, coll, csparams, sharedWs, matchExpr));
+ unique_ptr<PlanStage> root(new CollectionScan(expCtx, coll, csparams, sharedWs, matchExpr));
return root;
}
-std::unique_ptr<MultiPlanStage> runMultiPlanner(OperationContext* opCtx,
+std::unique_ptr<MultiPlanStage> runMultiPlanner(ExpressionContext* expCtx,
const NamespaceString& nss,
const Collection* coll,
int desiredFooValue) {
@@ -185,23 +186,24 @@ std::unique_ptr<MultiPlanStage> runMultiPlanner(OperationContext* opCtx,
// Every call to work() returns something so this should clearly win (by current scoring
// at least).
unique_ptr<WorkingSet> sharedWs(new WorkingSet());
- unique_ptr<PlanStage> ixScanRoot = getIxScanPlan(opCtx, coll, sharedWs.get(), desiredFooValue);
+ unique_ptr<PlanStage> ixScanRoot = getIxScanPlan(expCtx, coll, sharedWs.get(), desiredFooValue);
// Plan 1: CollScan.
BSONObj filterObj = BSON("foo" << desiredFooValue);
- unique_ptr<MatchExpression> filter = makeMatchExpressionFromFilter(opCtx, filterObj);
- unique_ptr<PlanStage> collScanRoot = getCollScanPlan(opCtx, coll, sharedWs.get(), filter.get());
+ unique_ptr<MatchExpression> filter = makeMatchExpressionFromFilter(expCtx, filterObj);
+ unique_ptr<PlanStage> collScanRoot =
+ getCollScanPlan(expCtx, coll, sharedWs.get(), filter.get());
// Hand the plans off to the MPS.
- auto cq = makeCanonicalQuery(opCtx, nss, BSON("foo" << desiredFooValue));
+ auto cq = makeCanonicalQuery(expCtx->opCtx, nss, BSON("foo" << desiredFooValue));
- unique_ptr<MultiPlanStage> mps = std::make_unique<MultiPlanStage>(opCtx, coll, cq.get());
+ unique_ptr<MultiPlanStage> mps = std::make_unique<MultiPlanStage>(expCtx, coll, cq.get());
mps->addPlan(createQuerySolution(), std::move(ixScanRoot), sharedWs.get());
mps->addPlan(createQuerySolution(), std::move(collScanRoot), sharedWs.get());
// Plan 0 aka the first plan aka the index scan should be the best.
PlanYieldPolicy yieldPolicy(PlanExecutor::NO_YIELD,
- opCtx->getServiceContext()->getFastClockSource());
+ expCtx->opCtx->getServiceContext()->getFastClockSource());
ASSERT_OK(mps->pickBestPlan(&yieldPolicy));
ASSERT(mps->bestPlanChosen());
ASSERT_EQUALS(0, mps->bestPlanIdx());
@@ -231,19 +233,19 @@ TEST_F(QueryStageMultiPlanTest, MPSCollectionScanVsHighlySelectiveIXScan) {
// Every call to work() returns something so this should clearly win (by current scoring
// at least).
unique_ptr<WorkingSet> sharedWs(new WorkingSet());
- unique_ptr<PlanStage> ixScanRoot = getIxScanPlan(_opCtx.get(), coll, sharedWs.get(), 7);
+ unique_ptr<PlanStage> ixScanRoot = getIxScanPlan(_expCtx.get(), coll, sharedWs.get(), 7);
// Plan 1: CollScan with matcher.
BSONObj filterObj = BSON("foo" << 7);
- unique_ptr<MatchExpression> filter = makeMatchExpressionFromFilter(_opCtx.get(), filterObj);
+ unique_ptr<MatchExpression> filter = makeMatchExpressionFromFilter(_expCtx.get(), filterObj);
unique_ptr<PlanStage> collScanRoot =
- getCollScanPlan(_opCtx.get(), coll, sharedWs.get(), filter.get());
+ getCollScanPlan(_expCtx.get(), coll, sharedWs.get(), filter.get());
// Hand the plans off to the MPS.
auto cq = makeCanonicalQuery(_opCtx.get(), nss, filterObj);
unique_ptr<MultiPlanStage> mps =
- std::make_unique<MultiPlanStage>(_opCtx.get(), ctx.getCollection(), cq.get());
+ std::make_unique<MultiPlanStage>(_expCtx.get(), ctx.getCollection(), cq.get());
mps->addPlan(createQuerySolution(), std::move(ixScanRoot), sharedWs.get());
mps->addPlan(createQuerySolution(), std::move(collScanRoot), sharedWs.get());
@@ -287,7 +289,7 @@ TEST_F(QueryStageMultiPlanTest, MPSDoesNotCreateActiveCacheEntryImmediately) {
const auto cq = makeCanonicalQuery(_opCtx.get(), nss, BSON("foo" << 7));
// Run an index scan and collection scan, searching for {foo: 7}.
- auto mps = runMultiPlanner(_opCtx.get(), nss, coll, 7);
+ auto mps = runMultiPlanner(_expCtx.get(), nss, coll, 7);
// Be sure that an inactive cache entry was added.
PlanCache* cache = CollectionQueryInfo::get(coll).getPlanCache();
@@ -299,7 +301,7 @@ TEST_F(QueryStageMultiPlanTest, MPSDoesNotCreateActiveCacheEntryImmediately) {
// Run the multi-planner again. The index scan will again win, but the number of works
// will be greater, since {foo: 5} appears more frequently in the collection.
- mps = runMultiPlanner(_opCtx.get(), nss, coll, 5);
+ mps = runMultiPlanner(_expCtx.get(), nss, coll, 5);
// The last plan run should have required far more works than the previous plan. This means
// that the 'works' in the cache entry should have doubled.
@@ -310,14 +312,14 @@ TEST_F(QueryStageMultiPlanTest, MPSDoesNotCreateActiveCacheEntryImmediately) {
// Run the exact same query again. This will still take more works than 'works', and
// should cause the cache entry's 'works' to be doubled again.
- mps = runMultiPlanner(_opCtx.get(), nss, coll, 5);
+ mps = runMultiPlanner(_expCtx.get(), nss, coll, 5);
ASSERT_EQ(cache->size(), 1U);
entry = assertGet(cache->getEntry(*cq));
ASSERT_FALSE(entry->isActive);
ASSERT_EQ(firstQueryWorks * 2 * 2, entry->works);
// Run the query yet again. This time, an active cache entry should be created.
- mps = runMultiPlanner(_opCtx.get(), nss, coll, 5);
+ mps = runMultiPlanner(_expCtx.get(), nss, coll, 5);
ASSERT_EQ(cache->size(), 1U);
entry = assertGet(cache->getEntry(*cq));
ASSERT_TRUE(entry->isActive);
@@ -342,14 +344,14 @@ TEST_F(QueryStageMultiPlanTest, MPSDoesCreatesActiveEntryWhenInactiveEntriesDisa
const auto cq = makeCanonicalQuery(_opCtx.get(), nss, BSON("foo" << 7));
// Run an index scan and collection scan, searching for {foo: 7}.
- auto mps = runMultiPlanner(_opCtx.get(), nss, coll, 7);
+ auto mps = runMultiPlanner(_expCtx.get(), nss, coll, 7);
// Be sure that an _active_ cache entry was added.
PlanCache* cache = CollectionQueryInfo::get(coll).getPlanCache();
ASSERT_EQ(cache->get(*cq).state, PlanCache::CacheEntryState::kPresentActive);
// Run the multi-planner again. The entry should still be active.
- mps = runMultiPlanner(_opCtx.get(), nss, coll, 5);
+ mps = runMultiPlanner(_expCtx.get(), nss, coll, 5);
ASSERT_EQ(cache->get(*cq).state, PlanCache::CacheEntryState::kPresentActive);
}
@@ -394,7 +396,7 @@ TEST_F(QueryStageMultiPlanTest, MPSBackupPlan) {
ASSERT_EQUALS(solutions.size(), 3U);
// Fill out the MultiPlanStage.
- unique_ptr<MultiPlanStage> mps(new MultiPlanStage(_opCtx.get(), collection, cq.get()));
+ unique_ptr<MultiPlanStage> mps(new MultiPlanStage(_expCtx.get(), collection, cq.get()));
unique_ptr<WorkingSet> ws(new WorkingSet());
// Put each solution from the planner into the MPR.
for (size_t i = 0; i < solutions.size(); ++i) {
@@ -465,8 +467,8 @@ TEST_F(QueryStageMultiPlanTest, MPSExplainAllPlans) {
const int nDocs = 500;
auto ws = std::make_unique<WorkingSet>();
- auto firstPlan = std::make_unique<QueuedDataStage>(_opCtx.get(), ws.get());
- auto secondPlan = std::make_unique<QueuedDataStage>(_opCtx.get(), ws.get());
+ auto firstPlan = std::make_unique<QueuedDataStage>(_expCtx.get(), ws.get());
+ auto secondPlan = std::make_unique<QueuedDataStage>(_expCtx.get(), ws.get());
for (int i = 0; i < nDocs; ++i) {
addMember(firstPlan.get(), ws.get(), BSON("x" << 1));
@@ -482,7 +484,7 @@ TEST_F(QueryStageMultiPlanTest, MPSExplainAllPlans) {
qr->setFilter(BSON("x" << 1));
auto cq = uassertStatusOK(CanonicalQuery::canonicalize(opCtx(), std::move(qr)));
unique_ptr<MultiPlanStage> mps =
- std::make_unique<MultiPlanStage>(_opCtx.get(), ctx.getCollection(), cq.get());
+ std::make_unique<MultiPlanStage>(_expCtx.get(), ctx.getCollection(), cq.get());
// Put each plan into the MultiPlanStage. Takes ownership of 'firstPlan' and 'secondPlan'.
mps->addPlan(std::make_unique<QuerySolution>(), std::move(firstPlan), ws.get());
@@ -571,20 +573,20 @@ TEST_F(QueryStageMultiPlanTest, ShouldReportErrorIfExceedsTimeLimitDuringPlannin
// Every call to work() returns something so this should clearly win (by current scoring
// at least).
unique_ptr<WorkingSet> sharedWs(new WorkingSet());
- unique_ptr<PlanStage> ixScanRoot = getIxScanPlan(_opCtx.get(), coll, sharedWs.get(), 7);
+ unique_ptr<PlanStage> ixScanRoot = getIxScanPlan(_expCtx.get(), coll, sharedWs.get(), 7);
// Make the filter.
BSONObj filterObj = BSON("foo" << 7);
- unique_ptr<MatchExpression> filter = makeMatchExpressionFromFilter(_opCtx.get(), filterObj);
+ unique_ptr<MatchExpression> filter = makeMatchExpressionFromFilter(_expCtx.get(), filterObj);
unique_ptr<PlanStage> collScanRoot =
- getCollScanPlan(_opCtx.get(), coll, sharedWs.get(), filter.get());
+ getCollScanPlan(_expCtx.get(), coll, sharedWs.get(), filter.get());
auto queryRequest = std::make_unique<QueryRequest>(nss);
queryRequest->setFilter(filterObj);
auto canonicalQuery =
uassertStatusOK(CanonicalQuery::canonicalize(opCtx(), std::move(queryRequest)));
- MultiPlanStage multiPlanStage(opCtx(),
+ MultiPlanStage multiPlanStage(_expCtx.get(),
ctx.getCollection(),
canonicalQuery.get(),
MultiPlanStage::CachingMode::NeverCache);
@@ -615,19 +617,19 @@ TEST_F(QueryStageMultiPlanTest, ShouldReportErrorIfKilledDuringPlanning) {
// Every call to work() returns something so this should clearly win (by current scoring
// at least).
unique_ptr<WorkingSet> sharedWs(new WorkingSet());
- unique_ptr<PlanStage> ixScanRoot = getIxScanPlan(_opCtx.get(), coll, sharedWs.get(), 7);
+ unique_ptr<PlanStage> ixScanRoot = getIxScanPlan(_expCtx.get(), coll, sharedWs.get(), 7);
// Plan 1: CollScan.
BSONObj filterObj = BSON("foo" << 7);
- unique_ptr<MatchExpression> filter = makeMatchExpressionFromFilter(_opCtx.get(), filterObj);
+ unique_ptr<MatchExpression> filter = makeMatchExpressionFromFilter(_expCtx.get(), filterObj);
unique_ptr<PlanStage> collScanRoot =
- getCollScanPlan(_opCtx.get(), coll, sharedWs.get(), filter.get());
+ getCollScanPlan(_expCtx.get(), coll, sharedWs.get(), filter.get());
auto queryRequest = std::make_unique<QueryRequest>(nss);
queryRequest->setFilter(BSON("foo" << BSON("$gte" << 0)));
auto canonicalQuery =
uassertStatusOK(CanonicalQuery::canonicalize(opCtx(), std::move(queryRequest)));
- MultiPlanStage multiPlanStage(opCtx(),
+ MultiPlanStage multiPlanStage(_expCtx.get(),
ctx.getCollection(),
canonicalQuery.get(),
MultiPlanStage::CachingMode::NeverCache);
@@ -649,7 +651,7 @@ protected:
}
public:
- ThrowyPlanStage(OperationContext* opCtx) : PlanStage("throwy", opCtx) {}
+ ThrowyPlanStage(ExpressionContext* expCtx) : PlanStage("throwy", expCtx) {}
bool isEOF() final {
return false;
}
@@ -673,15 +675,15 @@ TEST_F(QueryStageMultiPlanTest, AddsContextDuringException) {
<< "query"));
auto canonicalQuery =
uassertStatusOK(CanonicalQuery::canonicalize(opCtx(), std::move(queryRequest)));
- MultiPlanStage multiPlanStage(opCtx(),
+ MultiPlanStage multiPlanStage(_expCtx.get(),
ctx.getCollection(),
canonicalQuery.get(),
MultiPlanStage::CachingMode::NeverCache);
unique_ptr<WorkingSet> sharedWs(new WorkingSet());
multiPlanStage.addPlan(
- createQuerySolution(), std::make_unique<ThrowyPlanStage>(opCtx()), sharedWs.get());
+ createQuerySolution(), std::make_unique<ThrowyPlanStage>(_expCtx.get()), sharedWs.get());
multiPlanStage.addPlan(
- createQuerySolution(), std::make_unique<ThrowyPlanStage>(opCtx()), sharedWs.get());
+ createQuerySolution(), std::make_unique<ThrowyPlanStage>(_expCtx.get()), sharedWs.get());
PlanYieldPolicy yieldPolicy(PlanExecutor::NO_YIELD, _clock);
ASSERT_THROWS_WITH_CHECK(multiPlanStage.pickBestPlan(&yieldPolicy),
diff --git a/src/mongo/dbtests/query_stage_near.cpp b/src/mongo/dbtests/query_stage_near.cpp
index 77a9f0c81a1..938a98e2f88 100644
--- a/src/mongo/dbtests/query_stage_near.cpp
+++ b/src/mongo/dbtests/query_stage_near.cpp
@@ -60,6 +60,9 @@ const BSONObj kTestKeyPattern = BSON("testIndex" << 1);
class QueryStageNearTest : public unittest::Test {
public:
void setUp() override {
+ _expCtx =
+ make_intrusive<ExpressionContext>(_opCtx, nullptr, NamespaceString(kTestNamespace));
+
directClient.createCollection(kTestNamespace);
ASSERT_OK(dbtests::createIndex(_opCtx, kTestNamespace, kTestKeyPattern));
@@ -76,6 +79,8 @@ protected:
OperationContext* const _opCtx = _uniqOpCtx.get();
DBDirectClient directClient{_opCtx};
+ boost::intrusive_ptr<ExpressionContext> _expCtx;
+
boost::optional<AutoGetCollectionForRead> _autoColl;
const IndexDescriptor* _mockGeoIndex;
};
@@ -95,11 +100,14 @@ public:
double max;
};
- MockNearStage(OperationContext* opCtx,
+ MockNearStage(const boost::intrusive_ptr<ExpressionContext>& expCtx,
WorkingSet* workingSet,
const IndexDescriptor* indexDescriptor)
- : NearStage(
- opCtx, "MOCK_DISTANCE_SEARCH_STAGE", STAGE_UNKNOWN, workingSet, indexDescriptor),
+ : NearStage(expCtx.get(),
+ "MOCK_DISTANCE_SEARCH_STAGE",
+ STAGE_UNKNOWN,
+ workingSet,
+ indexDescriptor),
_pos(0) {}
void addInterval(vector<BSONObj> data, double min, double max) {
@@ -116,7 +124,7 @@ public:
bool lastInterval = _pos == static_cast<int>(_intervals.size());
- auto queuedStage = std::make_unique<QueuedDataStage>(opCtx, workingSet);
+ auto queuedStage = std::make_unique<QueuedDataStage>(expCtx(), workingSet);
for (unsigned int i = 0; i < interval.data.size(); i++) {
// Add all documents from the lastInterval into the QueuedDataStage.
@@ -178,7 +186,7 @@ TEST_F(QueryStageNearTest, Basic) {
vector<BSONObj> mockData;
WorkingSet workingSet;
- MockNearStage nearStage(_opCtx, &workingSet, _mockGeoIndex);
+ MockNearStage nearStage(_expCtx.get(), &workingSet, _mockGeoIndex);
// First set of results
mockData.clear();
@@ -217,7 +225,7 @@ TEST_F(QueryStageNearTest, EmptyResults) {
auto* coll = autoColl.getCollection();
ASSERT(coll);
- MockNearStage nearStage(_opCtx, &workingSet, _mockGeoIndex);
+ MockNearStage nearStage(_expCtx.get(), &workingSet, _mockGeoIndex);
// Empty set of results
mockData.clear();
diff --git a/src/mongo/dbtests/query_stage_sort.cpp b/src/mongo/dbtests/query_stage_sort.cpp
index 3e01a3c97f6..1d6833337bc 100644
--- a/src/mongo/dbtests/query_stage_sort.cpp
+++ b/src/mongo/dbtests/query_stage_sort.cpp
@@ -113,7 +113,7 @@ public:
Collection* coll) {
// Build the mock scan stage which feeds the data.
auto ws = std::make_unique<WorkingSet>();
- auto queuedDataStage = std::make_unique<QueuedDataStage>(&_opCtx, ws.get());
+ auto queuedDataStage = std::make_unique<QueuedDataStage>(_expCtx.get(), ws.get());
insertVarietyOfObjects(ws.get(), queuedDataStage.get(), coll);
auto sortPattern = BSON("foo" << 1);
@@ -152,7 +152,7 @@ public:
*/
void sortAndCheck(int direction, Collection* coll) {
auto ws = std::make_unique<WorkingSet>();
- auto queuedDataStage = std::make_unique<QueuedDataStage>(&_opCtx, ws.get());
+ auto queuedDataStage = std::make_unique<QueuedDataStage>(_expCtx.get(), ws.get());
// Insert a mix of the various types of data.
insertVarietyOfObjects(ws.get(), queuedDataStage.get(), coll);
@@ -169,8 +169,8 @@ public:
false, // addSortKeyMetadata
std::move(keyGenStage));
- auto fetchStage =
- std::make_unique<FetchStage>(&_opCtx, ws.get(), std::move(sortStage), nullptr, coll);
+ auto fetchStage = std::make_unique<FetchStage>(
+ _expCtx.get(), ws.get(), std::move(sortStage), nullptr, coll);
// Must fetch so we can look at the doc as a BSONObj.
auto statusWithPlanExecutor = PlanExecutor::make(
@@ -551,7 +551,7 @@ public:
}
auto ws = std::make_unique<WorkingSet>();
- auto queuedDataStage = std::make_unique<QueuedDataStage>(&_opCtx, ws.get());
+ auto queuedDataStage = std::make_unique<QueuedDataStage>(_expCtx.get(), ws.get());
for (int i = 0; i < numObj(); ++i) {
{
@@ -585,8 +585,8 @@ public:
false, // addSortKeyMetadata
std::move(keyGenStage));
- auto fetchStage =
- std::make_unique<FetchStage>(&_opCtx, ws.get(), std::move(sortStage), nullptr, coll);
+ auto fetchStage = std::make_unique<FetchStage>(
+ _expCtx.get(), ws.get(), std::move(sortStage), nullptr, coll);
// We don't get results back since we're sorting some parallel arrays.
auto statusWithPlanExecutor = PlanExecutor::make(
diff --git a/src/mongo/dbtests/query_stage_sort_key_generator.cpp b/src/mongo/dbtests/query_stage_sort_key_generator.cpp
index 351ad1cca0c..b1a77f75be2 100644
--- a/src/mongo/dbtests/query_stage_sort_key_generator.cpp
+++ b/src/mongo/dbtests/query_stage_sort_key_generator.cpp
@@ -65,15 +65,17 @@ const NamespaceString kTestNss = NamespaceString("db.dummy");
* The 'collator' is used to specify the string comparison semantics that should be used when
* generating the sort key.
*/
-Value extractSortKey(const char* sortSpec, const char* doc, const CollatorInterface* collator) {
+Value extractSortKey(const char* sortSpec,
+ const char* doc,
+ std::unique_ptr<CollatorInterface> collator = nullptr) {
QueryTestServiceContext serviceContext;
auto opCtx = serviceContext.makeOperationContext();
boost::intrusive_ptr<ExpressionContext> pExpCtx(
- new ExpressionContext(opCtx.get(), collator, kTestNss));
+ new ExpressionContext(opCtx.get(), std::move(collator), kTestNss));
WorkingSet workingSet;
- auto mockStage = std::make_unique<QueuedDataStage>(opCtx.get(), &workingSet);
+ auto mockStage = std::make_unique<QueuedDataStage>(pExpCtx.get(), &workingSet);
auto wsid = workingSet.allocate();
auto wsm = workingSet.get(wsid);
wsm->doc = {SnapshotId(), Document{fromjson(doc)}};
@@ -95,15 +97,15 @@ Value extractSortKey(const char* sortSpec, const char* doc, const CollatorInterf
*/
Value extractSortKeyCovered(const char* sortSpec,
const IndexKeyDatum& ikd,
- const CollatorInterface* collator) {
+ std::unique_ptr<CollatorInterface> collator = nullptr) {
QueryTestServiceContext serviceContext;
auto opCtx = serviceContext.makeOperationContext();
boost::intrusive_ptr<ExpressionContext> pExpCtx(
- new ExpressionContext(opCtx.get(), collator, kTestNss));
+ new ExpressionContext(opCtx.get(), std::move(collator), kTestNss));
WorkingSet workingSet;
- auto mockStage = std::make_unique<QueuedDataStage>(opCtx.get(), &workingSet);
+ auto mockStage = std::make_unique<QueuedDataStage>(pExpCtx.get(), &workingSet);
auto wsid = workingSet.allocate();
auto wsm = workingSet.get(wsid);
wsm->keyData.push_back(ikd);
@@ -157,82 +159,76 @@ TEST(SortKeyGeneratorStageTest, SortKeyArray) {
}
TEST(SortKeyGeneratorStageTest, SortKeyCoveredNormal) {
- CollatorInterface* collator = nullptr;
Value actualOut = extractSortKeyCovered(
- "{a: 1}", IndexKeyDatum(BSON("a" << 1), BSON("" << 5), 0, SnapshotId{}), collator);
+ "{a: 1}", IndexKeyDatum(BSON("a" << 1), BSON("" << 5), 0, SnapshotId{}));
Value expectedOut({Value(5)});
ASSERT_VALUE_EQ(actualOut, expectedOut);
}
TEST(SortKeyGeneratorStageTest, SortKeyCoveredEmbedded) {
- CollatorInterface* collator = nullptr;
Value actualOut = extractSortKeyCovered(
"{'a.c': 1}",
- IndexKeyDatum(BSON("a.c" << 1 << "c" << 1), BSON("" << 5 << "" << 6), 0, SnapshotId{}),
- collator);
+ IndexKeyDatum(BSON("a.c" << 1 << "c" << 1), BSON("" << 5 << "" << 6), 0, SnapshotId{}));
Value expectedOut(5);
ASSERT_VALUE_EQ(actualOut, expectedOut);
}
TEST(SortKeyGeneratorStageTest, SortKeyCoveredCompound) {
- CollatorInterface* collator = nullptr;
Value actualOut = extractSortKeyCovered(
"{a: 1, c: 1}",
- IndexKeyDatum(BSON("a" << 1 << "c" << 1), BSON("" << 5 << "" << 6), 0, SnapshotId{}),
- collator);
+ IndexKeyDatum(BSON("a" << 1 << "c" << 1), BSON("" << 5 << "" << 6), 0, SnapshotId{}));
Value expectedOut(std::vector<Value>{Value(5), Value(6)});
ASSERT_VALUE_EQ(actualOut, expectedOut);
}
TEST(SortKeyGeneratorStageTest, SortKeyCoveredCompound2) {
- CollatorInterface* collator = nullptr;
Value actualOut = extractSortKeyCovered("{a: 1, b: 1}",
IndexKeyDatum(BSON("a" << 1 << "b" << 1 << "c" << 1),
BSON("" << 5 << "" << 6 << "" << 4),
0,
- SnapshotId{}),
- collator);
+ SnapshotId{}));
Value expectedOut(std::vector<Value>{Value(5), Value(6)});
ASSERT_VALUE_EQ(actualOut, expectedOut);
}
TEST(SortKeyGeneratorStageTest, SortKeyCoveredCompound3) {
- CollatorInterface* collator = nullptr;
Value actualOut =
extractSortKeyCovered("{b: 1, c: 1}",
IndexKeyDatum(BSON("a" << 1 << "b" << 1 << "c" << 1 << "d" << 1),
BSON("" << 5 << "" << 6 << "" << 4 << "" << 9000),
0,
- SnapshotId{}),
- collator);
+ SnapshotId{}));
Value expectedOut(std::vector<Value>{Value(6), Value(4)});
ASSERT_VALUE_EQ(actualOut, expectedOut);
}
TEST(SortKeyGeneratorStageTest, ExtractStringSortKeyWithCollatorUsesComparisonKey) {
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kReverseString);
Value actualOut =
- extractSortKey("{a: 1}", "{_id: 0, z: 'thing1', a: 'thing2', b: 16}", &collator);
+ extractSortKey("{a: 1}", "{_id: 0, z: 'thing1', a: 'thing2', b: 16}", std::move(collator));
Value expectedOut = Value("2gniht"_sd);
ASSERT_VALUE_EQ(actualOut, expectedOut);
}
TEST(SortKeyGeneratorStageTest, CollatorHasNoEffectWhenExtractingNonStringSortKey) {
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
- Value actualOut = extractSortKey("{a: 1}", "{_id: 0, z: 10, a: 6, b: 16}", &collator);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kReverseString);
+ Value actualOut = extractSortKey("{a: 1}", "{_id: 0, z: 10, a: 6, b: 16}", std::move(collator));
Value expectedOut = Value(6);
ASSERT_VALUE_EQ(actualOut, expectedOut);
}
TEST(SortKeyGeneratorStageTest, CollatorAppliesWhenExtractingCoveredSortKeyString) {
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kReverseString);
Value actualOut = extractSortKeyCovered("{b: 1}",
IndexKeyDatum(BSON("a" << 1 << "b" << 1),
BSON("" << 4 << ""
<< "foo"),
0,
SnapshotId{}),
- &collator);
+ std::move(collator));
Value expectedOut = Value("oof"_sd);
ASSERT_VALUE_EQ(actualOut, expectedOut);
}
@@ -244,9 +240,10 @@ TEST(SortKeyGeneratorStageTest, SortKeyGenerationForArraysChoosesCorrectKey) {
}
TEST(SortKeyGeneratorStageTest, EnsureSortKeyGenerationForArraysRespectsCollation) {
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kReverseString);
Value actualOut =
- extractSortKey("{a: 1}", "{_id: 0, a: ['aaz', 'zza', 'yya', 'zzb']}", &collator);
+ extractSortKey("{a: 1}", "{_id: 0, a: ['aaz', 'zza', 'yya', 'zzb']}", std::move(collator));
Value expectedOut("ayy"_sd);
ASSERT_VALUE_EQ(actualOut, expectedOut);
}
diff --git a/src/mongo/dbtests/query_stage_subplan.cpp b/src/mongo/dbtests/query_stage_subplan.cpp
index be533ba7142..28f657879a1 100644
--- a/src/mongo/dbtests/query_stage_subplan.cpp
+++ b/src/mongo/dbtests/query_stage_subplan.cpp
@@ -79,6 +79,10 @@ public:
return _opCtx.get();
}
+ ExpressionContext* expCtx() {
+ return _expCtx.get();
+ }
+
ServiceContext* serviceContext() {
return _opCtx->getServiceContext();
}
@@ -105,6 +109,8 @@ protected:
const ServiceContext::UniqueOperationContext _opCtx = cc().makeOperationContext();
ClockSource* _clock = _opCtx->getServiceContext()->getFastClockSource();
+ boost::intrusive_ptr<ExpressionContext> _expCtx =
+ make_intrusive<ExpressionContext>(_opCtx.get(), nullptr, nss);
private:
DBDirectClient _client;
@@ -142,7 +148,7 @@ TEST_F(QueryStageSubplanTest, QueryStageSubplanGeo2dOr) {
WorkingSet ws;
std::unique_ptr<SubplanStage> subplan(
- new SubplanStage(opCtx(), collection, &ws, plannerParams, cq.get()));
+ new SubplanStage(_expCtx.get(), collection, &ws, plannerParams, cq.get()));
// Plan selection should succeed due to falling back on regular planning.
PlanYieldPolicy yieldPolicy(PlanExecutor::NO_YIELD, _clock);
@@ -181,7 +187,7 @@ void assertSubplanFromCache(QueryStageSubplanTest* test, const dbtests::WriteCon
WorkingSet ws;
std::unique_ptr<SubplanStage> subplan(
- new SubplanStage(test->opCtx(), collection, &ws, plannerParams, cq.get()));
+ new SubplanStage(test->expCtx(), collection, &ws, plannerParams, cq.get()));
PlanYieldPolicy yieldPolicy(PlanExecutor::NO_YIELD,
test->serviceContext()->getFastClockSource());
@@ -195,7 +201,7 @@ void assertSubplanFromCache(QueryStageSubplanTest* test, const dbtests::WriteCon
// If we repeat the same query, the plan for the first branch should have come from
// the cache.
ws.clear();
- subplan.reset(new SubplanStage(test->opCtx(), collection, &ws, plannerParams, cq.get()));
+ subplan.reset(new SubplanStage(test->expCtx(), collection, &ws, plannerParams, cq.get()));
ASSERT_OK(subplan->pickBestPlan(&yieldPolicy));
@@ -262,7 +268,7 @@ TEST_F(QueryStageSubplanTest, QueryStageSubplanDontCacheZeroResults) {
WorkingSet ws;
std::unique_ptr<SubplanStage> subplan(
- new SubplanStage(opCtx(), collection, &ws, plannerParams, cq.get()));
+ new SubplanStage(_expCtx.get(), collection, &ws, plannerParams, cq.get()));
PlanYieldPolicy yieldPolicy(PlanExecutor::NO_YIELD, _clock);
ASSERT_OK(subplan->pickBestPlan(&yieldPolicy));
@@ -276,7 +282,7 @@ TEST_F(QueryStageSubplanTest, QueryStageSubplanDontCacheZeroResults) {
// from the cache (because the first call to pickBestPlan() refrained from creating any
// cache entries).
ws.clear();
- subplan.reset(new SubplanStage(opCtx(), collection, &ws, plannerParams, cq.get()));
+ subplan.reset(new SubplanStage(_expCtx.get(), collection, &ws, plannerParams, cq.get()));
ASSERT_OK(subplan->pickBestPlan(&yieldPolicy));
@@ -318,7 +324,7 @@ TEST_F(QueryStageSubplanTest, QueryStageSubplanDontCacheTies) {
WorkingSet ws;
std::unique_ptr<SubplanStage> subplan(
- new SubplanStage(opCtx(), collection, &ws, plannerParams, cq.get()));
+ new SubplanStage(_expCtx.get(), collection, &ws, plannerParams, cq.get()));
PlanYieldPolicy yieldPolicy(PlanExecutor::NO_YIELD, _clock);
ASSERT_OK(subplan->pickBestPlan(&yieldPolicy));
@@ -332,7 +338,7 @@ TEST_F(QueryStageSubplanTest, QueryStageSubplanDontCacheTies) {
// from the cache (because the first call to pickBestPlan() refrained from creating any
// cache entries).
ws.clear();
- subplan.reset(new SubplanStage(opCtx(), collection, &ws, plannerParams, cq.get()));
+ subplan.reset(new SubplanStage(_expCtx.get(), collection, &ws, plannerParams, cq.get()));
ASSERT_OK(subplan->pickBestPlan(&yieldPolicy));
@@ -490,7 +496,7 @@ TEST_F(QueryStageSubplanTest, QueryStageSubplanPlanRootedOrNE) {
WorkingSet ws;
std::unique_ptr<SubplanStage> subplan(
- new SubplanStage(opCtx(), collection, &ws, plannerParams, cq.get()));
+ new SubplanStage(_expCtx.get(), collection, &ws, plannerParams, cq.get()));
PlanYieldPolicy yieldPolicy(PlanExecutor::NO_YIELD, _clock);
ASSERT_OK(subplan->pickBestPlan(&yieldPolicy));
@@ -536,7 +542,7 @@ TEST_F(QueryStageSubplanTest, ShouldReportErrorIfExceedsTimeLimitDuringPlanning)
// Create the SubplanStage.
WorkingSet workingSet;
SubplanStage subplanStage(
- opCtx(), ctx.getCollection(), &workingSet, params, canonicalQuery.get());
+ _expCtx.get(), ctx.getCollection(), &workingSet, params, canonicalQuery.get());
AlwaysTimeOutYieldPolicy alwaysTimeOutPolicy(serviceContext()->getFastClockSource());
ASSERT_EQ(ErrorCodes::ExceededTimeLimit, subplanStage.pickBestPlan(&alwaysTimeOutPolicy));
@@ -561,7 +567,7 @@ TEST_F(QueryStageSubplanTest, ShouldReportErrorIfKilledDuringPlanning) {
// Create the SubplanStage.
WorkingSet workingSet;
SubplanStage subplanStage(
- opCtx(), ctx.getCollection(), &workingSet, params, canonicalQuery.get());
+ _expCtx.get(), ctx.getCollection(), &workingSet, params, canonicalQuery.get());
AlwaysPlanKilledYieldPolicy alwaysPlanKilledYieldPolicy(serviceContext()->getFastClockSource());
ASSERT_EQ(ErrorCodes::QueryPlanKilled, subplanStage.pickBestPlan(&alwaysPlanKilledYieldPolicy));
@@ -597,7 +603,7 @@ TEST_F(QueryStageSubplanTest, ShouldThrowOnRestoreIfIndexDroppedBeforePlanSelect
// Create the SubplanStage.
WorkingSet workingSet;
- SubplanStage subplanStage(opCtx(), collection, &workingSet, params, canonicalQuery.get());
+ SubplanStage subplanStage(_expCtx.get(), collection, &workingSet, params, canonicalQuery.get());
// Mimic a yield by saving the state of the subplan stage. Then, drop an index not being used
// while yielded.
@@ -641,7 +647,7 @@ TEST_F(QueryStageSubplanTest, ShouldNotThrowOnRestoreIfIndexDroppedAfterPlanSele
// Create the SubplanStage.
WorkingSet workingSet;
- SubplanStage subplanStage(opCtx(), collection, &workingSet, params, canonicalQuery.get());
+ SubplanStage subplanStage(_expCtx.get(), collection, &workingSet, params, canonicalQuery.get());
PlanYieldPolicy yieldPolicy(PlanExecutor::YIELD_MANUAL, serviceContext()->getFastClockSource());
ASSERT_OK(subplanStage.pickBestPlan(&yieldPolicy));
diff --git a/src/mongo/dbtests/query_stage_tests.cpp b/src/mongo/dbtests/query_stage_tests.cpp
index 43a6c482131..e86d851c7a4 100644
--- a/src/mongo/dbtests/query_stage_tests.cpp
+++ b/src/mongo/dbtests/query_stage_tests.cpp
@@ -83,17 +83,14 @@ public:
int countResults(const IndexScanParams& params, BSONObj filterObj = BSONObj()) {
AutoGetCollectionForReadCommand ctx(&_opCtx, NamespaceString(ns()));
- const CollatorInterface* collator = nullptr;
- const boost::intrusive_ptr<ExpressionContext> expCtx(
- new ExpressionContext(&_opCtx, collator, NamespaceString(ns())));
StatusWithMatchExpression statusWithMatcher =
- MatchExpressionParser::parse(filterObj, expCtx);
+ MatchExpressionParser::parse(filterObj, _expCtx);
verify(statusWithMatcher.isOK());
unique_ptr<MatchExpression> filterExpr = std::move(statusWithMatcher.getValue());
unique_ptr<WorkingSet> ws = std::make_unique<WorkingSet>();
unique_ptr<IndexScan> ix =
- std::make_unique<IndexScan>(&_opCtx, params, ws.get(), filterExpr.get());
+ std::make_unique<IndexScan>(_expCtx.get(), params, ws.get(), filterExpr.get());
auto statusWithPlanExecutor = PlanExecutor::make(
&_opCtx, std::move(ws), std::move(ix), ctx.getCollection(), PlanExecutor::NO_YIELD);
@@ -150,6 +147,9 @@ protected:
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
OperationContext& _opCtx = *_txnPtr;
+ boost::intrusive_ptr<ExpressionContext> _expCtx =
+ new ExpressionContext(&_opCtx, nullptr, NamespaceString(ns()));
+
private:
DBDirectClient _client;
};
diff --git a/src/mongo/dbtests/query_stage_trial.cpp b/src/mongo/dbtests/query_stage_trial.cpp
index 9f3681f2c89..5b843c7d1ba 100644
--- a/src/mongo/dbtests/query_stage_trial.cpp
+++ b/src/mongo/dbtests/query_stage_trial.cpp
@@ -44,9 +44,13 @@ namespace mongo {
namespace {
+const NamespaceString kTestNss = NamespaceString("db.dummy");
+
class TrialStageTest : public unittest::Test {
public:
- TrialStageTest() : _opCtx(cc().makeOperationContext()) {}
+ TrialStageTest()
+ : _opCtx(cc().makeOperationContext()),
+ _expCtx(make_intrusive<ExpressionContext>(_opCtx.get(), nullptr, kTestNss)) {}
protected:
// Pushes BSONObjs from the given vector into the given QueuedDataStage. Each empty BSONObj in
@@ -98,11 +102,14 @@ protected:
private:
ServiceContext::UniqueOperationContext _opCtx;
WorkingSet _ws;
+
+protected:
+ boost::intrusive_ptr<ExpressionContext> _expCtx;
};
TEST_F(TrialStageTest, AdoptsTrialPlanIfTrialSucceeds) {
- auto trialPlan = std::make_unique<QueuedDataStage>(opCtx(), ws());
- auto backupPlan = std::make_unique<QueuedDataStage>(opCtx(), ws());
+ auto trialPlan = std::make_unique<QueuedDataStage>(_expCtx.get(), ws());
+ auto backupPlan = std::make_unique<QueuedDataStage>(_expCtx.get(), ws());
// Seed the trial plan with 20 results and no NEED_TIMEs.
std::vector<BSONObj> trialResults;
@@ -114,7 +121,7 @@ TEST_F(TrialStageTest, AdoptsTrialPlanIfTrialSucceeds) {
// Set the minimum advanced-to-works ratio to 0.75. Because every work() will result in an
// ADVANCE, the trial plan will succeed.
auto trialStage = std::make_unique<TrialStage>(
- opCtx(), ws(), std::move(trialPlan), std::move(backupPlan), 10, 0.75);
+ _expCtx.get(), ws(), std::move(trialPlan), std::move(backupPlan), 10, 0.75);
ASSERT_OK(trialStage->pickBestPlan(yieldPolicy().get()));
@@ -131,8 +138,8 @@ TEST_F(TrialStageTest, AdoptsTrialPlanIfTrialSucceeds) {
}
TEST_F(TrialStageTest, AdoptsTrialPlanIfTrialPlanHitsEOF) {
- auto trialPlan = std::make_unique<QueuedDataStage>(opCtx(), ws());
- auto backupPlan = std::make_unique<QueuedDataStage>(opCtx(), ws());
+ auto trialPlan = std::make_unique<QueuedDataStage>(_expCtx.get(), ws());
+ auto backupPlan = std::make_unique<QueuedDataStage>(_expCtx.get(), ws());
// Seed the trial plan with 5 results and no NEED_TIMEs.
std::vector<BSONObj> trialResults;
@@ -144,7 +151,7 @@ TEST_F(TrialStageTest, AdoptsTrialPlanIfTrialPlanHitsEOF) {
// We schedule the trial to run for 10 works. Because we hit EOF after 5 results, we will end
// the trial phase early and adopt the successful trial plan.
auto trialStage = std::make_unique<TrialStage>(
- opCtx(), ws(), std::move(trialPlan), std::move(backupPlan), 10, 0.75);
+ _expCtx.get(), ws(), std::move(trialPlan), std::move(backupPlan), 10, 0.75);
ASSERT_OK(trialStage->pickBestPlan(yieldPolicy().get()));
@@ -166,8 +173,8 @@ TEST_F(TrialStageTest, AdoptsTrialPlanIfTrialPlanHitsEOF) {
}
TEST_F(TrialStageTest, AdoptsBackupPlanIfTrialDoesNotSucceed) {
- auto trialPlan = std::make_unique<QueuedDataStage>(opCtx(), ws());
- auto backupPlan = std::make_unique<QueuedDataStage>(opCtx(), ws());
+ auto trialPlan = std::make_unique<QueuedDataStage>(_expCtx.get(), ws());
+ auto backupPlan = std::make_unique<QueuedDataStage>(_expCtx.get(), ws());
// Seed the trial plan with 20 results. Every second result will produce a NEED_TIME.
std::vector<BSONObj> trialResults;
@@ -187,7 +194,7 @@ TEST_F(TrialStageTest, AdoptsBackupPlanIfTrialDoesNotSucceed) {
// Set the minimum advanced-to-works ratio to 0.75. Because every second work() will result in a
// NEED_TIME and the actual ratio is thus 0.5, the trial plan will fail.
auto trialStage = std::make_unique<TrialStage>(
- opCtx(), ws(), std::move(trialPlan), std::move(backupPlan), 10, 0.75);
+ _expCtx.get(), ws(), std::move(trialPlan), std::move(backupPlan), 10, 0.75);
ASSERT_OK(trialStage->pickBestPlan(yieldPolicy().get()));
@@ -204,8 +211,8 @@ TEST_F(TrialStageTest, AdoptsBackupPlanIfTrialDoesNotSucceed) {
}
TEST_F(TrialStageTest, AdoptsBackupPlanIfTrialPlanDies) {
- auto trialPlan = std::make_unique<QueuedDataStage>(opCtx(), ws());
- auto backupPlan = std::make_unique<QueuedDataStage>(opCtx(), ws());
+ auto trialPlan = std::make_unique<QueuedDataStage>(_expCtx.get(), ws());
+ auto backupPlan = std::make_unique<QueuedDataStage>(_expCtx.get(), ws());
// Seed the trial plan with 2 results followed by a PlanStage::FAILURE.
queueData({BSON("_id" << 0), BSON("_id" << 1)}, trialPlan.get());
@@ -222,7 +229,7 @@ TEST_F(TrialStageTest, AdoptsBackupPlanIfTrialPlanDies) {
// We schedule the trial to run for 10 works. Because we will encounter a PlanStage::FAILURE
// before this point, the trial will complete early and the backup plan will be adopted.
auto trialStage = std::make_unique<TrialStage>(
- opCtx(), ws(), std::move(trialPlan), std::move(backupPlan), 10, 0.75);
+ _expCtx.get(), ws(), std::move(trialPlan), std::move(backupPlan), 10, 0.75);
ASSERT_OK(trialStage->pickBestPlan(yieldPolicy().get()));
diff --git a/src/mongo/dbtests/query_stage_update.cpp b/src/mongo/dbtests/query_stage_update.cpp
index 98061f09523..a309f00db8d 100644
--- a/src/mongo/dbtests/query_stage_update.cpp
+++ b/src/mongo/dbtests/query_stage_update.cpp
@@ -133,7 +133,7 @@ public:
params.tailable = false;
unique_ptr<CollectionScan> scan(
- new CollectionScan(&_opCtx, collection, params, &ws, nullptr));
+ new CollectionScan(_expCtx.get(), collection, params, &ws, nullptr));
while (!scan->isEOF()) {
WorkingSetID id = WorkingSet::INVALID_ID;
PlanStage::StageState state = scan->work(&id);
@@ -155,7 +155,7 @@ public:
params.tailable = false;
unique_ptr<CollectionScan> scan(
- new CollectionScan(&_opCtx, collection, params, &ws, nullptr));
+ new CollectionScan(_expCtx.get(), collection, params, &ws, nullptr));
while (!scan->isEOF()) {
WorkingSetID id = WorkingSet::INVALID_ID;
PlanStage::StageState state = scan->work(&id);
@@ -185,6 +185,9 @@ protected:
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
OperationContext& _opCtx = *_txnPtr;
+ boost::intrusive_ptr<ExpressionContext> _expCtx =
+ make_intrusive<ExpressionContext>(&_opCtx, nullptr, nss);
+
private:
DBDirectClient _client;
};
@@ -200,8 +203,7 @@ public:
dbtests::WriteContextForTests ctx(&_opCtx, nss.ns());
CurOp& curOp = *CurOp::get(_opCtx);
OpDebug* opDebug = &curOp.debug();
- const CollatorInterface* collator = nullptr;
- UpdateDriver driver(new ExpressionContext(&_opCtx, collator, nss));
+ UpdateDriver driver(_expCtx);
Collection* collection = ctx.getCollection();
ASSERT(collection);
@@ -230,10 +232,10 @@ public:
params.canonicalQuery = cq.get();
auto ws = make_unique<WorkingSet>();
- auto eofStage = make_unique<EOFStage>(&_opCtx);
+ auto eofStage = make_unique<EOFStage>(_expCtx.get());
- auto updateStage =
- make_unique<UpsertStage>(&_opCtx, params, ws.get(), collection, eofStage.release());
+ auto updateStage = make_unique<UpsertStage>(
+ _expCtx.get(), params, ws.get(), collection, eofStage.release());
runUpdate(updateStage.get());
}
@@ -271,8 +273,7 @@ public:
CurOp& curOp = *CurOp::get(_opCtx);
OpDebug* opDebug = &curOp.debug();
- const CollatorInterface* collator = nullptr;
- UpdateDriver driver(new ExpressionContext(&_opCtx, collator, nss));
+ UpdateDriver driver(_expCtx);
Collection* coll =
CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss);
ASSERT(coll);
@@ -309,11 +310,11 @@ public:
updateParams.canonicalQuery = cq.get();
auto ws = make_unique<WorkingSet>();
- auto cs =
- make_unique<CollectionScan>(&_opCtx, coll, collScanParams, ws.get(), cq->root());
+ auto cs = make_unique<CollectionScan>(
+ _expCtx.get(), coll, collScanParams, ws.get(), cq->root());
auto updateStage =
- make_unique<UpdateStage>(&_opCtx, updateParams, ws.get(), coll, cs.release());
+ make_unique<UpdateStage>(_expCtx.get(), updateParams, ws.get(), coll, cs.release());
const UpdateStats* stats =
static_cast<const UpdateStats*>(updateStage->getSpecificStats());
@@ -386,8 +387,7 @@ public:
Collection* coll = ctx.getCollection();
ASSERT(coll);
UpdateRequest request(nss);
- const CollatorInterface* collator = nullptr;
- UpdateDriver driver(new ExpressionContext(&_opCtx, collator, nss));
+ UpdateDriver driver(_expCtx);
const int targetDocIndex = 0; // We'll be working with the first doc in the collection.
const BSONObj query = BSON("foo" << BSON("$gte" << targetDocIndex));
const auto ws = make_unique<WorkingSet>();
@@ -412,7 +412,7 @@ public:
// Configure a QueuedDataStage to pass the first object in the collection back in a
// RID_AND_OBJ state.
- auto qds = make_unique<QueuedDataStage>(&_opCtx, ws.get());
+ auto qds = make_unique<QueuedDataStage>(_expCtx.get(), ws.get());
WorkingSetID id = ws->allocate();
WorkingSetMember* member = ws->get(id);
member->recordId = recordIds[targetDocIndex];
@@ -426,7 +426,7 @@ public:
updateParams.canonicalQuery = cq.get();
const auto updateStage =
- make_unique<UpdateStage>(&_opCtx, updateParams, ws.get(), coll, qds.release());
+ make_unique<UpdateStage>(_expCtx.get(), updateParams, ws.get(), coll, qds.release());
// Should return advanced.
id = WorkingSet::INVALID_ID;
@@ -478,8 +478,7 @@ public:
Collection* coll = ctx.getCollection();
ASSERT(coll);
UpdateRequest request(nss);
- const CollatorInterface* collator = nullptr;
- UpdateDriver driver(new ExpressionContext(&_opCtx, collator, nss));
+ UpdateDriver driver(_expCtx);
const int targetDocIndex = 10;
const BSONObj query = BSON("foo" << BSON("$gte" << targetDocIndex));
const auto ws = make_unique<WorkingSet>();
@@ -504,7 +503,7 @@ public:
// Configure a QueuedDataStage to pass the first object in the collection back in a
// RID_AND_OBJ state.
- auto qds = make_unique<QueuedDataStage>(&_opCtx, ws.get());
+ auto qds = make_unique<QueuedDataStage>(_expCtx.get(), ws.get());
WorkingSetID id = ws->allocate();
WorkingSetMember* member = ws->get(id);
member->recordId = recordIds[targetDocIndex];
@@ -518,7 +517,7 @@ public:
updateParams.canonicalQuery = cq.get();
auto updateStage =
- make_unique<UpdateStage>(&_opCtx, updateParams, ws.get(), coll, qds.release());
+ make_unique<UpdateStage>(_expCtx.get(), updateParams, ws.get(), coll, qds.release());
// Should return advanced.
id = WorkingSet::INVALID_ID;