summaryrefslogtreecommitdiff
path: root/src/mongo/db/query
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/db/query')
-rw-r--r--src/mongo/db/query/canonical_query.cpp10
-rw-r--r--src/mongo/db/query/canonical_query.h12
-rw-r--r--src/mongo/db/query/canonical_query_test.cpp46
-rw-r--r--src/mongo/db/query/find.cpp83
-rw-r--r--src/mongo/db/query/find.h16
-rw-r--r--src/mongo/db/query/get_executor.cpp186
-rw-r--r--src/mongo/db/query/get_executor.h16
-rw-r--r--src/mongo/db/query/get_executor_test.cpp4
-rw-r--r--src/mongo/db/query/internal_plans.cpp42
-rw-r--r--src/mongo/db/query/internal_plans.h12
-rw-r--r--src/mongo/db/query/parsed_distinct.cpp4
-rw-r--r--src/mongo/db/query/parsed_distinct.h2
-rw-r--r--src/mongo/db/query/parsed_distinct_test.cpp12
-rw-r--r--src/mongo/db/query/plan_cache_test.cpp28
-rw-r--r--src/mongo/db/query/plan_executor.cpp14
-rw-r--r--src/mongo/db/query/plan_executor.h4
-rw-r--r--src/mongo/db/query/query_planner_test.cpp10
-rw-r--r--src/mongo/db/query/query_planner_test_fixture.cpp10
-rw-r--r--src/mongo/db/query/query_planner_test_fixture.h2
-rw-r--r--src/mongo/db/query/query_yield.cpp10
-rw-r--r--src/mongo/db/query/query_yield.h2
-rw-r--r--src/mongo/db/query/stage_builder.cpp89
-rw-r--r--src/mongo/db/query/stage_builder.h2
23 files changed, 312 insertions, 304 deletions
diff --git a/src/mongo/db/query/canonical_query.cpp b/src/mongo/db/query/canonical_query.cpp
index 4ef22584133..55d1b49f5cb 100644
--- a/src/mongo/db/query/canonical_query.cpp
+++ b/src/mongo/db/query/canonical_query.cpp
@@ -102,19 +102,19 @@ bool matchExpressionLessThan(const MatchExpression* lhs, const MatchExpression*
// static
StatusWith<std::unique_ptr<CanonicalQuery>> CanonicalQuery::canonicalize(
- OperationContext* txn, const QueryMessage& qm, const ExtensionsCallback& extensionsCallback) {
+ OperationContext* opCtx, const QueryMessage& qm, const ExtensionsCallback& extensionsCallback) {
// Make QueryRequest.
auto qrStatus = QueryRequest::fromLegacyQueryMessage(qm);
if (!qrStatus.isOK()) {
return qrStatus.getStatus();
}
- return CanonicalQuery::canonicalize(txn, std::move(qrStatus.getValue()), extensionsCallback);
+ return CanonicalQuery::canonicalize(opCtx, std::move(qrStatus.getValue()), extensionsCallback);
}
// static
StatusWith<std::unique_ptr<CanonicalQuery>> CanonicalQuery::canonicalize(
- OperationContext* txn,
+ OperationContext* opCtx,
std::unique_ptr<QueryRequest> qr,
const ExtensionsCallback& extensionsCallback) {
auto qrStatus = qr->validate();
@@ -124,7 +124,7 @@ StatusWith<std::unique_ptr<CanonicalQuery>> CanonicalQuery::canonicalize(
std::unique_ptr<CollatorInterface> collator;
if (!qr->getCollation().isEmpty()) {
- auto statusWithCollator = CollatorFactoryInterface::get(txn->getServiceContext())
+ auto statusWithCollator = CollatorFactoryInterface::get(opCtx->getServiceContext())
->makeFromBSON(qr->getCollation());
if (!statusWithCollator.isOK()) {
return statusWithCollator.getStatus();
@@ -154,7 +154,7 @@ StatusWith<std::unique_ptr<CanonicalQuery>> CanonicalQuery::canonicalize(
// static
StatusWith<std::unique_ptr<CanonicalQuery>> CanonicalQuery::canonicalize(
- OperationContext* txn,
+ OperationContext* opCtx,
const CanonicalQuery& baseQuery,
MatchExpression* root,
const ExtensionsCallback& extensionsCallback) {
diff --git a/src/mongo/db/query/canonical_query.h b/src/mongo/db/query/canonical_query.h
index 3ff0ecb2d8a..dfcbaf070f0 100644
--- a/src/mongo/db/query/canonical_query.h
+++ b/src/mongo/db/query/canonical_query.h
@@ -47,13 +47,14 @@ public:
* If parsing succeeds, returns a std::unique_ptr<CanonicalQuery> representing the parsed
* query (which will never be NULL). If parsing fails, returns an error Status.
*
- * 'txn' must point to a valid OperationContext, but 'txn' does not need to outlive the returned
+ * 'opCtx' must point to a valid OperationContext, but 'opCtx' does not need to outlive the
+ * returned
* CanonicalQuery.
*
* Used for legacy find through the OP_QUERY message.
*/
static StatusWith<std::unique_ptr<CanonicalQuery>> canonicalize(
- OperationContext* txn,
+ OperationContext* opCtx,
const QueryMessage& qm,
const ExtensionsCallback& extensionsCallback);
@@ -61,11 +62,12 @@ public:
* If parsing succeeds, returns a std::unique_ptr<CanonicalQuery> representing the parsed
* query (which will never be NULL). If parsing fails, returns an error Status.
*
- * 'txn' must point to a valid OperationContext, but 'txn' does not need to outlive the returned
+ * 'opCtx' must point to a valid OperationContext, but 'opCtx' does not need to outlive the
+ * returned
* CanonicalQuery.
*/
static StatusWith<std::unique_ptr<CanonicalQuery>> canonicalize(
- OperationContext* txn, std::unique_ptr<QueryRequest> qr, const ExtensionsCallback&);
+ OperationContext* opCtx, std::unique_ptr<QueryRequest> qr, const ExtensionsCallback&);
/**
* For testing or for internal clients to use.
@@ -79,7 +81,7 @@ public:
* Does not take ownership of 'root'.
*/
static StatusWith<std::unique_ptr<CanonicalQuery>> canonicalize(
- OperationContext* txn,
+ OperationContext* opCtx,
const CanonicalQuery& baseQuery,
MatchExpression* root,
const ExtensionsCallback& extensionsCallback);
diff --git a/src/mongo/db/query/canonical_query_test.cpp b/src/mongo/db/query/canonical_query_test.cpp
index 292425167f7..a874f1f89e3 100644
--- a/src/mongo/db/query/canonical_query_test.cpp
+++ b/src/mongo/db/query/canonical_query_test.cpp
@@ -322,7 +322,7 @@ TEST(CanonicalQueryTest, IsValidTextAndSnapshot) {
TEST(CanonicalQueryTest, IsValidSortKeyMetaProjection) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
// Passing a sortKey meta-projection without a sort is an error.
{
@@ -330,7 +330,7 @@ TEST(CanonicalQueryTest, IsValidSortKeyMetaProjection) {
auto qr = assertGet(QueryRequest::makeFromFindCommand(
nss, fromjson("{find: 'testcoll', projection: {foo: {$meta: 'sortKey'}}}"), isExplain));
auto cq = CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_NOT_OK(cq.getStatus());
}
@@ -342,7 +342,7 @@ TEST(CanonicalQueryTest, IsValidSortKeyMetaProjection) {
fromjson("{find: 'testcoll', projection: {foo: {$meta: 'sortKey'}}, sort: {bar: 1}}"),
isExplain));
auto cq = CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(cq.getStatus());
}
}
@@ -435,12 +435,12 @@ TEST(CanonicalQueryTest, SortTreeNumChildrenComparison) {
*/
unique_ptr<CanonicalQuery> canonicalize(const char* queryStr) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(fromjson(queryStr));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
return std::move(statusWithCQ.getValue());
}
@@ -449,14 +449,14 @@ std::unique_ptr<CanonicalQuery> canonicalize(const char* queryStr,
const char* sortStr,
const char* projStr) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(fromjson(queryStr));
qr->setSort(fromjson(sortStr));
qr->setProj(fromjson(projStr));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
return std::move(statusWithCQ.getValue());
}
@@ -561,18 +561,18 @@ TEST(CanonicalQueryTest, NormalizeWithInPreservesCollator) {
TEST(CanonicalQueryTest, CanonicalizeFromBaseQuery) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
const bool isExplain = true;
const std::string cmdStr =
"{find:'bogusns', filter:{$or:[{a:1,b:1},{a:1,c:1}]}, projection:{a:1}, sort:{b:1}}";
auto qr = assertGet(QueryRequest::makeFromFindCommand(nss, fromjson(cmdStr), isExplain));
auto baseCq = assertGet(CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions()));
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions()));
MatchExpression* firstClauseExpr = baseCq->root()->getChild(0);
auto childCq = assertGet(CanonicalQuery::canonicalize(
- txn.get(), *baseCq, firstClauseExpr, ExtensionsCallbackDisallowExtensions()));
+ opCtx.get(), *baseCq, firstClauseExpr, ExtensionsCallbackDisallowExtensions()));
// Descriptive test. The childCq's filter should be the relevant $or clause, rather than the
// entire query predicate.
@@ -586,55 +586,55 @@ TEST(CanonicalQueryTest, CanonicalizeFromBaseQuery) {
TEST(CanonicalQueryTest, CanonicalQueryFromQRWithNoCollation) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
auto qr = stdx::make_unique<QueryRequest>(nss);
auto cq = assertGet(CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions()));
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions()));
ASSERT_TRUE(cq->getCollator() == nullptr);
}
TEST(CanonicalQueryTest, CanonicalQueryFromQRWithCollation) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setCollation(BSON("locale"
<< "reverse"));
auto cq = assertGet(CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions()));
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions()));
CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
ASSERT_TRUE(CollatorInterface::collatorsMatch(cq->getCollator(), &collator));
}
TEST(CanonicalQueryTest, CanonicalQueryFromBaseQueryWithNoCollation) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(fromjson("{$or:[{a:1,b:1},{a:1,c:1}]}"));
auto baseCq = assertGet(CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions()));
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions()));
MatchExpression* firstClauseExpr = baseCq->root()->getChild(0);
auto childCq = assertGet(CanonicalQuery::canonicalize(
- txn.get(), *baseCq, firstClauseExpr, ExtensionsCallbackDisallowExtensions()));
+ opCtx.get(), *baseCq, firstClauseExpr, ExtensionsCallbackDisallowExtensions()));
ASSERT_TRUE(baseCq->getCollator() == nullptr);
ASSERT_TRUE(childCq->getCollator() == nullptr);
}
TEST(CanonicalQueryTest, CanonicalQueryFromBaseQueryWithCollation) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(fromjson("{$or:[{a:1,b:1},{a:1,c:1}]}"));
qr->setCollation(BSON("locale"
<< "reverse"));
auto baseCq = assertGet(CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions()));
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions()));
MatchExpression* firstClauseExpr = baseCq->root()->getChild(0);
auto childCq = assertGet(CanonicalQuery::canonicalize(
- txn.get(), *baseCq, firstClauseExpr, ExtensionsCallbackDisallowExtensions()));
+ opCtx.get(), *baseCq, firstClauseExpr, ExtensionsCallbackDisallowExtensions()));
ASSERT(baseCq->getCollator());
ASSERT(childCq->getCollator());
ASSERT_TRUE(*(childCq->getCollator()) == *(baseCq->getCollator()));
@@ -642,12 +642,12 @@ TEST(CanonicalQueryTest, CanonicalQueryFromBaseQueryWithCollation) {
TEST(CanonicalQueryTest, SettingCollatorUpdatesCollatorAndMatchExpression) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(fromjson("{a: 'foo', b: {$in: ['bar', 'baz']}}"));
auto cq = assertGet(CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions()));
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions()));
ASSERT_EQUALS(2U, cq->root()->numChildren());
auto firstChild = cq->root()->getChild(0);
auto secondChild = cq->root()->getChild(1);
@@ -663,7 +663,7 @@ TEST(CanonicalQueryTest, SettingCollatorUpdatesCollatorAndMatchExpression) {
ASSERT(!inExpr->getCollator());
unique_ptr<CollatorInterface> collator =
- assertGet(CollatorFactoryInterface::get(txn->getServiceContext())
+ assertGet(CollatorFactoryInterface::get(opCtx->getServiceContext())
->makeFromBSON(BSON("locale"
<< "reverse")));
cq->setCollator(std::move(collator));
diff --git a/src/mongo/db/query/find.cpp b/src/mongo/db/query/find.cpp
index f60bbe33ae5..d8c9feb8fde 100644
--- a/src/mongo/db/query/find.cpp
+++ b/src/mongo/db/query/find.cpp
@@ -80,7 +80,7 @@ bool isCursorAwaitData(const ClientCursor* cursor) {
return cursor->queryOptions() & QueryOption_AwaitData;
}
-bool shouldSaveCursor(OperationContext* txn,
+bool shouldSaveCursor(OperationContext* opCtx,
const Collection* collection,
PlanExecutor::ExecState finalState,
PlanExecutor* exec) {
@@ -100,7 +100,7 @@ bool shouldSaveCursor(OperationContext* txn,
// an empty collection. Right now we do not keep a cursor if the collection
// has zero records.
if (qr.isTailable()) {
- return collection && collection->numRecords(txn) != 0U;
+ return collection && collection->numRecords(opCtx) != 0U;
}
return !exec->isEOF();
@@ -120,25 +120,25 @@ bool shouldSaveCursorGetMore(PlanExecutor::ExecState finalState,
return !exec->isEOF();
}
-void beginQueryOp(OperationContext* txn,
+void beginQueryOp(OperationContext* opCtx,
const NamespaceString& nss,
const BSONObj& queryObj,
long long ntoreturn,
long long ntoskip) {
- auto curOp = CurOp::get(txn);
+ auto curOp = CurOp::get(opCtx);
curOp->debug().ntoreturn = ntoreturn;
curOp->debug().ntoskip = ntoskip;
- stdx::lock_guard<Client> lk(*txn->getClient());
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
curOp->setQuery_inlock(queryObj);
curOp->setNS_inlock(nss.ns());
}
-void endQueryOp(OperationContext* txn,
+void endQueryOp(OperationContext* opCtx,
Collection* collection,
const PlanExecutor& exec,
long long numResults,
CursorId cursorId) {
- auto curOp = CurOp::get(txn);
+ auto curOp = CurOp::get(opCtx);
// Fill out basic CurOp query exec properties.
curOp->debug().nreturned = numResults;
@@ -151,7 +151,7 @@ void endQueryOp(OperationContext* txn,
curOp->debug().setPlanSummaryMetrics(summaryStats);
if (collection) {
- collection->infoCache()->notifyOfQuery(txn, summaryStats.indexesUsed);
+ collection->infoCache()->notifyOfQuery(opCtx, summaryStats.indexesUsed);
}
if (curOp->shouldDBProfile()) {
@@ -219,7 +219,7 @@ void generateBatch(int ntoreturn,
/**
* Called by db/instance.cpp. This is the getMore entry point.
*/
-Message getMore(OperationContext* txn,
+Message getMore(OperationContext* opCtx,
const char* ns,
int ntoreturn,
long long cursorid,
@@ -227,7 +227,7 @@ Message getMore(OperationContext* txn,
bool* isCursorAuthorized) {
invariant(ntoreturn >= 0);
- CurOp& curOp = *CurOp::get(txn);
+ CurOp& curOp = *CurOp::get(opCtx);
// For testing, we may want to fail if we receive a getmore.
if (MONGO_FAIL_POINT(failReceivedGetmore)) {
@@ -267,7 +267,7 @@ Message getMore(OperationContext* txn,
// the data within a collection.
cursorManager = CursorManager::getGlobalCursorManager();
} else {
- ctx = stdx::make_unique<AutoGetCollectionOrViewForRead>(txn, nss);
+ ctx = stdx::make_unique<AutoGetCollectionOrViewForRead>(opCtx, nss);
auto viewCtx = static_cast<AutoGetCollectionOrViewForRead*>(ctx.get());
if (viewCtx->getView()) {
uasserted(
@@ -290,7 +290,7 @@ Message getMore(OperationContext* txn,
// reads are allowed is PRIMARY (or master in master/slave). This function uasserts if
// reads are not okay.
Status status =
- repl::getGlobalReplicationCoordinator()->checkCanServeReadsFor_UNSAFE(txn, nss, true);
+ repl::getGlobalReplicationCoordinator()->checkCanServeReadsFor_UNSAFE(opCtx, nss, true);
uassertStatusOK(status);
// A pin performs a CC lookup and if there is a CC, increments the CC's pin value so it
@@ -328,7 +328,7 @@ Message getMore(OperationContext* txn,
*isCursorAuthorized = true;
if (cc->isReadCommitted())
- uassertStatusOK(txn->recoveryUnit()->setReadFromMajorityCommittedSnapshot());
+ uassertStatusOK(opCtx->recoveryUnit()->setReadFromMajorityCommittedSnapshot());
// Reset timeout timer on the cursor since the cursor is still in use.
cc->resetIdleTime();
@@ -338,12 +338,12 @@ Message getMore(OperationContext* txn,
if (cc->getLeftoverMaxTimeMicros() < Microseconds::max()) {
uassert(40136,
"Illegal attempt to set operation deadline within DBDirectClient",
- !txn->getClient()->isInDirectClient());
- txn->setDeadlineAfterNowBy(cc->getLeftoverMaxTimeMicros());
+ !opCtx->getClient()->isInDirectClient());
+ opCtx->setDeadlineAfterNowBy(cc->getLeftoverMaxTimeMicros());
}
- txn->checkForInterrupt(); // May trigger maxTimeAlwaysTimeOut fail point.
+ opCtx->checkForInterrupt(); // May trigger maxTimeAlwaysTimeOut fail point.
- cc->updateSlaveLocation(txn);
+ cc->updateSlaveLocation(opCtx);
if (cc->isAggCursor()) {
// Agg cursors handle their own locking internally.
@@ -372,12 +372,12 @@ Message getMore(OperationContext* txn,
}
PlanExecutor* exec = cc->getExecutor();
- exec->reattachToOperationContext(txn);
+ exec->reattachToOperationContext(opCtx);
exec->restoreState();
auto planSummary = Explain::getPlanSummary(exec);
{
- stdx::lock_guard<Client> lk(*txn->getClient());
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
curOp.setPlanSummary_inlock(planSummary);
// Ensure that the original query object is available in the slow query log, profiler
@@ -414,7 +414,7 @@ Message getMore(OperationContext* txn,
curOp.setExpectedLatencyMs(durationCount<Milliseconds>(timeout));
// Reacquiring locks.
- ctx = make_unique<AutoGetCollectionForRead>(txn, nss);
+ ctx = make_unique<AutoGetCollectionForRead>(opCtx, nss);
exec->restoreState();
// We woke up because either the timed_wait expired, or there was more data. Either
@@ -449,8 +449,9 @@ Message getMore(OperationContext* txn,
// if the cursor is aggregation, we release these locks.
if (cc->isAggCursor()) {
invariant(NULL == ctx.get());
- unpinDBLock = make_unique<Lock::DBLock>(txn->lockState(), nss.db(), MODE_IS);
- unpinCollLock = make_unique<Lock::CollectionLock>(txn->lockState(), nss.ns(), MODE_IS);
+ unpinDBLock = make_unique<Lock::DBLock>(opCtx->lockState(), nss.db(), MODE_IS);
+ unpinCollLock =
+ make_unique<Lock::CollectionLock>(opCtx->lockState(), nss.ns(), MODE_IS);
}
// Our two possible ClientCursorPin cleanup paths are:
@@ -486,7 +487,7 @@ Message getMore(OperationContext* txn,
// If the getmore had a time limit, remaining time is "rolled over" back to the
// cursor (for use by future getmore ops).
- cc->setLeftoverMaxTimeMicros(txn->getRemainingMaxTimeMicros());
+ cc->setLeftoverMaxTimeMicros(opCtx->getRemainingMaxTimeMicros());
}
}
@@ -501,11 +502,11 @@ Message getMore(OperationContext* txn,
return Message(bb.release());
}
-std::string runQuery(OperationContext* txn,
+std::string runQuery(OperationContext* opCtx,
QueryMessage& q,
const NamespaceString& nss,
Message& result) {
- CurOp& curOp = *CurOp::get(txn);
+ CurOp& curOp = *CurOp::get(opCtx);
uassert(ErrorCodes::InvalidNamespace,
str::stream() << "Invalid ns [" << nss.ns() << "]",
@@ -513,11 +514,11 @@ std::string runQuery(OperationContext* txn,
invariant(!nss.isCommand());
// Set CurOp information.
- beginQueryOp(txn, nss, q.query, q.ntoreturn, q.ntoskip);
+ beginQueryOp(opCtx, nss, q.query, q.ntoreturn, q.ntoskip);
// Parse the qm into a CanonicalQuery.
- auto statusWithCQ = CanonicalQuery::canonicalize(txn, q, ExtensionsCallbackReal(txn, &nss));
+ auto statusWithCQ = CanonicalQuery::canonicalize(opCtx, q, ExtensionsCallbackReal(opCtx, &nss));
if (!statusWithCQ.isOK()) {
uasserted(17287,
str::stream() << "Can't canonicalize query: "
@@ -530,7 +531,7 @@ std::string runQuery(OperationContext* txn,
LOG(2) << "Running query: " << redact(cq->toStringShort());
// Parse, canonicalize, plan, transcribe, and get a plan executor.
- AutoGetCollectionOrViewForRead ctx(txn, nss);
+ AutoGetCollectionOrViewForRead ctx(opCtx, nss);
Collection* collection = ctx.getCollection();
if (ctx.getView()) {
@@ -544,7 +545,7 @@ std::string runQuery(OperationContext* txn,
// We have a parsed query. Time to get the execution plan for it.
std::unique_ptr<PlanExecutor> exec = uassertStatusOK(
- getExecutorFind(txn, collection, nss, std::move(cq), PlanExecutor::YIELD_AUTO));
+ getExecutorFind(opCtx, collection, nss, std::move(cq), PlanExecutor::YIELD_AUTO));
const QueryRequest& qr = exec->getCanonicalQuery()->getQueryRequest();
@@ -578,15 +579,15 @@ std::string runQuery(OperationContext* txn,
if (qr.getMaxTimeMS() > 0) {
uassert(40116,
"Illegal attempt to set operation deadline within DBDirectClient",
- !txn->getClient()->isInDirectClient());
- txn->setDeadlineAfterNowBy(Milliseconds{qr.getMaxTimeMS()});
+ !opCtx->getClient()->isInDirectClient());
+ opCtx->setDeadlineAfterNowBy(Milliseconds{qr.getMaxTimeMS()});
}
- txn->checkForInterrupt(); // May trigger maxTimeAlwaysTimeOut fail point.
+ opCtx->checkForInterrupt(); // May trigger maxTimeAlwaysTimeOut fail point.
// uassert if we are not on a primary, and not a secondary with SlaveOk query parameter set.
bool slaveOK = qr.isSlaveOk() || qr.hasReadPref();
Status serveReadsStatus =
- repl::getGlobalReplicationCoordinator()->checkCanServeReadsFor_UNSAFE(txn, nss, slaveOK);
+ repl::getGlobalReplicationCoordinator()->checkCanServeReadsFor_UNSAFE(opCtx, nss, slaveOK);
uassertStatusOK(serveReadsStatus);
// Run the query.
@@ -607,7 +608,7 @@ std::string runQuery(OperationContext* txn,
// Get summary info about which plan the executor is using.
{
- stdx::lock_guard<Client> lk(*txn->getClient());
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
curOp.setPlanSummary_inlock(Explain::getPlanSummary(exec.get()));
}
@@ -657,14 +658,14 @@ std::string runQuery(OperationContext* txn,
// Before saving the cursor, ensure that whatever plan we established happened with the expected
// collection version
- auto css = CollectionShardingState::get(txn, nss);
- css->checkShardVersionOrThrow(txn);
+ auto css = CollectionShardingState::get(opCtx, nss);
+ css->checkShardVersionOrThrow(opCtx);
// Fill out CurOp based on query results. If we have a cursorid, we will fill out CurOp with
// this cursorid later.
long long ccId = 0;
- if (shouldSaveCursor(txn, collection, state, exec.get())) {
+ if (shouldSaveCursor(opCtx, collection, state, exec.get())) {
// We won't use the executor until it's getMore'd.
exec->saveState();
exec->detachFromOperationContext();
@@ -673,7 +674,7 @@ std::string runQuery(OperationContext* txn,
ClientCursorPin pinnedCursor = collection->getCursorManager()->registerCursor(
{exec.release(),
nss.ns(),
- txn->recoveryUnit()->isReadingFromMajorityCommittedSnapshot(),
+ opCtx->recoveryUnit()->isReadingFromMajorityCommittedSnapshot(),
qr.getOptions(),
upconvertQueryEntry(q.query, qr.nss(), q.ntoreturn, q.ntoskip)});
ccId = pinnedCursor.getCursor()->cursorid();
@@ -695,12 +696,12 @@ std::string runQuery(OperationContext* txn,
// If the query had a time limit, remaining time is "rolled over" to the cursor (for
// use by future getmore ops).
- pinnedCursor.getCursor()->setLeftoverMaxTimeMicros(txn->getRemainingMaxTimeMicros());
+ pinnedCursor.getCursor()->setLeftoverMaxTimeMicros(opCtx->getRemainingMaxTimeMicros());
- endQueryOp(txn, collection, *pinnedCursor.getCursor()->getExecutor(), numResults, ccId);
+ endQueryOp(opCtx, collection, *pinnedCursor.getCursor()->getExecutor(), numResults, ccId);
} else {
LOG(5) << "Not caching executor but returning " << numResults << " results.";
- endQueryOp(txn, collection, *exec, numResults, ccId);
+ endQueryOp(opCtx, collection, *exec, numResults, ccId);
}
// Fill out the output buffer's header.
diff --git a/src/mongo/db/query/find.h b/src/mongo/db/query/find.h
index e6c8160b5cb..2795934ec87 100644
--- a/src/mongo/db/query/find.h
+++ b/src/mongo/db/query/find.h
@@ -58,7 +58,7 @@ bool isCursorAwaitData(const ClientCursor* cursor);
* If false, the caller should close the cursor and indicate this to the client by sending back
* a cursor ID of 0.
*/
-bool shouldSaveCursor(OperationContext* txn,
+bool shouldSaveCursor(OperationContext* opCtx,
const Collection* collection,
PlanExecutor::ExecState finalState,
PlanExecutor* exec);
@@ -75,21 +75,21 @@ bool shouldSaveCursorGetMore(PlanExecutor::ExecState finalState,
bool isTailable);
/**
- * Fills out the CurOp for "txn" with information about this query.
+ * Fills out the CurOp for "opCtx" with information about this query.
*/
-void beginQueryOp(OperationContext* txn,
+void beginQueryOp(OperationContext* opCtx,
const NamespaceString& nss,
const BSONObj& queryObj,
long long ntoreturn,
long long ntoskip);
/**
- * 1) Fills out CurOp for "txn" with information regarding this query's execution.
+ * 1) Fills out CurOp for "opCtx" with information regarding this query's execution.
* 2) Reports index usage to the CollectionInfoCache.
*
* Uses explain functionality to extract stats from 'exec'.
*/
-void endQueryOp(OperationContext* txn,
+void endQueryOp(OperationContext* opCtx,
Collection* collection,
const PlanExecutor& exec,
long long numResults,
@@ -103,7 +103,7 @@ void endQueryOp(OperationContext* txn,
* The oplog start finding hack requires that 'cq' has a $gt or $gte predicate over
* a field named 'ts'.
*/
-StatusWith<std::unique_ptr<PlanExecutor>> getOplogStartHack(OperationContext* txn,
+StatusWith<std::unique_ptr<PlanExecutor>> getOplogStartHack(OperationContext* opCtx,
Collection* collection,
std::unique_ptr<CanonicalQuery> cq);
@@ -111,7 +111,7 @@ StatusWith<std::unique_ptr<PlanExecutor>> getOplogStartHack(OperationContext* tx
* Called from the getMore entry point in ops/query.cpp.
* Returned buffer is the message to return to the client.
*/
-Message getMore(OperationContext* txn,
+Message getMore(OperationContext* opCtx,
const char* ns,
int ntoreturn,
long long cursorid,
@@ -121,7 +121,7 @@ Message getMore(OperationContext* txn,
/**
* Run the query 'q' and place the result in 'result'.
*/
-std::string runQuery(OperationContext* txn,
+std::string runQuery(OperationContext* opCtx,
QueryMessage& q,
const NamespaceString& ns,
Message& result);
diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp
index eea6597cdce..9aa85a7d81a 100644
--- a/src/mongo/db/query/get_executor.cpp
+++ b/src/mongo/db/query/get_executor.cpp
@@ -122,19 +122,19 @@ bool turnIxscanIntoCount(QuerySolution* soln);
} // namespace
-void fillOutPlannerParams(OperationContext* txn,
+void fillOutPlannerParams(OperationContext* opCtx,
Collection* collection,
CanonicalQuery* canonicalQuery,
QueryPlannerParams* plannerParams) {
// If it's not NULL, we may have indices. Access the catalog and fill out IndexEntry(s)
- IndexCatalog::IndexIterator ii = collection->getIndexCatalog()->getIndexIterator(txn, false);
+ IndexCatalog::IndexIterator ii = collection->getIndexCatalog()->getIndexIterator(opCtx, false);
while (ii.more()) {
const IndexDescriptor* desc = ii.next();
IndexCatalogEntry* ice = ii.catalogEntry(desc);
plannerParams->indices.push_back(IndexEntry(desc->keyPattern(),
desc->getAccessMethodName(),
- desc->isMultikey(txn),
- ice->getMultikeyPaths(txn),
+ desc->isMultikey(opCtx),
+ ice->getMultikeyPaths(opCtx),
desc->isSparse(),
desc->unique(),
desc->indexName(),
@@ -174,7 +174,8 @@ void fillOutPlannerParams(OperationContext* txn,
// If the caller wants a shard filter, make sure we're actually sharded.
if (plannerParams->options & QueryPlannerParams::INCLUDE_SHARD_FILTER) {
- auto collMetadata = CollectionShardingState::get(txn, canonicalQuery->nss())->getMetadata();
+ auto collMetadata =
+ CollectionShardingState::get(opCtx, canonicalQuery->nss())->getMetadata();
if (collMetadata) {
plannerParams->shardKey = collMetadata->getKeyPattern();
} else {
@@ -459,21 +460,21 @@ StatusWith<PrepareExecutionResult> prepareExecution(OperationContext* opCtx,
} // namespace
-StatusWith<unique_ptr<PlanExecutor>> getExecutor(OperationContext* txn,
+StatusWith<unique_ptr<PlanExecutor>> getExecutor(OperationContext* opCtx,
Collection* collection,
unique_ptr<CanonicalQuery> canonicalQuery,
PlanExecutor::YieldPolicy yieldPolicy,
size_t plannerOptions) {
unique_ptr<WorkingSet> ws = make_unique<WorkingSet>();
StatusWith<PrepareExecutionResult> executionResult =
- prepareExecution(txn, collection, ws.get(), std::move(canonicalQuery), plannerOptions);
+ prepareExecution(opCtx, collection, ws.get(), std::move(canonicalQuery), plannerOptions);
if (!executionResult.isOK()) {
return executionResult.getStatus();
}
invariant(executionResult.getValue().root);
// We must have a tree of stages in order to have a valid plan executor, but the query
// solution may be null.
- return PlanExecutor::make(txn,
+ return PlanExecutor::make(opCtx,
std::move(ws),
std::move(executionResult.getValue().root),
std::move(executionResult.getValue().querySolution),
@@ -506,7 +507,7 @@ mongo::BSONElement extractOplogTsOptime(const mongo::MatchExpression* me) {
return static_cast<const mongo::ComparisonMatchExpression*>(me)->getData();
}
-StatusWith<unique_ptr<PlanExecutor>> getOplogStartHack(OperationContext* txn,
+StatusWith<unique_ptr<PlanExecutor>> getOplogStartHack(OperationContext* opCtx,
Collection* collection,
unique_ptr<CanonicalQuery> cq) {
invariant(collection);
@@ -555,7 +556,7 @@ StatusWith<unique_ptr<PlanExecutor>> getOplogStartHack(OperationContext* txn,
if (tsElem.type() == bsonTimestamp) {
StatusWith<RecordId> goal = oploghack::keyForOptime(tsElem.timestamp());
if (goal.isOK()) {
- startLoc = collection->getRecordStore()->oplogStartHack(txn, goal.getValue());
+ startLoc = collection->getRecordStore()->oplogStartHack(opCtx, goal.getValue());
}
}
@@ -567,10 +568,10 @@ StatusWith<unique_ptr<PlanExecutor>> getOplogStartHack(OperationContext* txn,
// Fallback to trying the OplogStart stage.
unique_ptr<WorkingSet> oplogws = make_unique<WorkingSet>();
unique_ptr<OplogStart> stage =
- make_unique<OplogStart>(txn, collection, tsExpr, oplogws.get());
+ make_unique<OplogStart>(opCtx, collection, tsExpr, oplogws.get());
// Takes ownership of oplogws and stage.
auto statusWithPlanExecutor = PlanExecutor::make(
- txn, std::move(oplogws), std::move(stage), collection, PlanExecutor::YIELD_AUTO);
+ opCtx, std::move(oplogws), std::move(stage), collection, PlanExecutor::YIELD_AUTO);
invariant(statusWithPlanExecutor.isOK());
unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
@@ -580,7 +581,7 @@ StatusWith<unique_ptr<PlanExecutor>> getOplogStartHack(OperationContext* txn,
// This is normal. The start of the oplog is the beginning of the collection.
if (PlanExecutor::IS_EOF == state) {
- return getExecutor(txn, collection, std::move(cq), PlanExecutor::YIELD_AUTO);
+ return getExecutor(opCtx, collection, std::move(cq), PlanExecutor::YIELD_AUTO);
}
// This is not normal. An error was encountered.
@@ -605,29 +606,30 @@ StatusWith<unique_ptr<PlanExecutor>> getOplogStartHack(OperationContext* txn,
}
unique_ptr<WorkingSet> ws = make_unique<WorkingSet>();
- unique_ptr<CollectionScan> cs = make_unique<CollectionScan>(txn, params, ws.get(), cq->root());
+ unique_ptr<CollectionScan> cs =
+ make_unique<CollectionScan>(opCtx, params, ws.get(), cq->root());
// Takes ownership of 'ws', 'cs', and 'cq'.
return PlanExecutor::make(
- txn, std::move(ws), std::move(cs), std::move(cq), collection, PlanExecutor::YIELD_AUTO);
+ opCtx, std::move(ws), std::move(cs), std::move(cq), collection, PlanExecutor::YIELD_AUTO);
}
} // namespace
-StatusWith<unique_ptr<PlanExecutor>> getExecutorFind(OperationContext* txn,
+StatusWith<unique_ptr<PlanExecutor>> getExecutorFind(OperationContext* opCtx,
Collection* collection,
const NamespaceString& nss,
unique_ptr<CanonicalQuery> canonicalQuery,
PlanExecutor::YieldPolicy yieldPolicy) {
if (NULL != collection && canonicalQuery->getQueryRequest().isOplogReplay()) {
- return getOplogStartHack(txn, collection, std::move(canonicalQuery));
+ return getOplogStartHack(opCtx, collection, std::move(canonicalQuery));
}
size_t options = QueryPlannerParams::DEFAULT;
- if (ShardingState::get(txn)->needCollectionMetadata(txn, nss.ns())) {
+ if (ShardingState::get(opCtx)->needCollectionMetadata(opCtx, nss.ns())) {
options |= QueryPlannerParams::INCLUDE_SHARD_FILTER;
}
return getExecutor(
- txn, collection, std::move(canonicalQuery), PlanExecutor::YIELD_AUTO, options);
+ opCtx, collection, std::move(canonicalQuery), PlanExecutor::YIELD_AUTO, options);
}
namespace {
@@ -639,7 +641,7 @@ namespace {
* If the projection was valid, then return Status::OK() with a pointer to the newly created
* ProjectionStage. Otherwise, return a status indicating the error reason.
*/
-StatusWith<unique_ptr<PlanStage>> applyProjection(OperationContext* txn,
+StatusWith<unique_ptr<PlanStage>> applyProjection(OperationContext* opCtx,
const NamespaceString& nsString,
CanonicalQuery* cq,
const BSONObj& proj,
@@ -670,11 +672,11 @@ StatusWith<unique_ptr<PlanStage>> applyProjection(OperationContext* txn,
"Cannot use a $meta sortKey projection in findAndModify commands."};
}
- ProjectionStageParams params(ExtensionsCallbackReal(txn, &nsString));
+ ProjectionStageParams params(ExtensionsCallbackReal(opCtx, &nsString));
params.projObj = proj;
params.collator = cq->getCollator();
params.fullExpression = cq->root();
- return {make_unique<ProjectionStage>(txn, params, ws, root.release())};
+ return {make_unique<ProjectionStage>(opCtx, params, ws, root.release())};
}
} // namespace
@@ -683,7 +685,7 @@ StatusWith<unique_ptr<PlanStage>> applyProjection(OperationContext* txn,
// Delete
//
-StatusWith<unique_ptr<PlanExecutor>> getExecutorDelete(OperationContext* txn,
+StatusWith<unique_ptr<PlanExecutor>> getExecutorDelete(OperationContext* opCtx,
OpDebug* opDebug,
Collection* collection,
ParsedDelete* parsedDelete) {
@@ -705,8 +707,8 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorDelete(OperationContext* txn,
str::stream() << "cannot remove from a capped collection: " << nss.ns());
}
- bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(txn, nss);
+ bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, nss);
if (userInitiatedWritesAndNotPrimary) {
return Status(ErrorCodes::PrimarySteppedDown,
@@ -736,16 +738,17 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorDelete(OperationContext* txn,
LOG(2) << "Collection " << nss.ns() << " does not exist."
<< " Using EOF stage: " << redact(unparsedQuery);
auto deleteStage = make_unique<DeleteStage>(
- txn, deleteStageParams, ws.get(), nullptr, new EOFStage(txn));
- return PlanExecutor::make(txn, std::move(ws), std::move(deleteStage), nss.ns(), policy);
+ opCtx, deleteStageParams, ws.get(), nullptr, new EOFStage(opCtx));
+ return PlanExecutor::make(
+ opCtx, std::move(ws), std::move(deleteStage), nss.ns(), policy);
}
- const IndexDescriptor* descriptor = collection->getIndexCatalog()->findIdIndex(txn);
+ const IndexDescriptor* descriptor = collection->getIndexCatalog()->findIdIndex(opCtx);
// Construct delete request collator.
std::unique_ptr<CollatorInterface> collator;
if (!request->getCollation().isEmpty()) {
- auto statusWithCollator = CollatorFactoryInterface::get(txn->getServiceContext())
+ auto statusWithCollator = CollatorFactoryInterface::get(opCtx->getServiceContext())
->makeFromBSON(request->getCollation());
if (!statusWithCollator.isOK()) {
return statusWithCollator.getStatus();
@@ -759,11 +762,11 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorDelete(OperationContext* txn,
request->getProj().isEmpty() && hasCollectionDefaultCollation) {
LOG(2) << "Using idhack: " << redact(unparsedQuery);
- PlanStage* idHackStage =
- new IDHackStage(txn, collection, unparsedQuery["_id"].wrap(), ws.get(), descriptor);
- unique_ptr<DeleteStage> root =
- make_unique<DeleteStage>(txn, deleteStageParams, ws.get(), collection, idHackStage);
- return PlanExecutor::make(txn, std::move(ws), std::move(root), collection, policy);
+ PlanStage* idHackStage = new IDHackStage(
+ opCtx, collection, unparsedQuery["_id"].wrap(), ws.get(), descriptor);
+ unique_ptr<DeleteStage> root = make_unique<DeleteStage>(
+ opCtx, deleteStageParams, ws.get(), collection, idHackStage);
+ return PlanExecutor::make(opCtx, std::move(ws), std::move(root), collection, policy);
}
// If we're here then we don't have a parsed query, but we're also not eligible for
@@ -779,7 +782,7 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorDelete(OperationContext* txn,
const size_t defaultPlannerOptions = 0;
StatusWith<PrepareExecutionResult> executionResult =
- prepareExecution(txn, collection, ws.get(), std::move(cq), defaultPlannerOptions);
+ prepareExecution(opCtx, collection, ws.get(), std::move(cq), defaultPlannerOptions);
if (!executionResult.isOK()) {
return executionResult.getStatus();
}
@@ -790,14 +793,14 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorDelete(OperationContext* txn,
deleteStageParams.canonicalQuery = cq.get();
invariant(root);
- root = make_unique<DeleteStage>(txn, deleteStageParams, ws.get(), collection, root.release());
+ root = make_unique<DeleteStage>(opCtx, deleteStageParams, ws.get(), collection, root.release());
if (!request->getProj().isEmpty()) {
invariant(request->shouldReturnDeleted());
const bool allowPositional = true;
StatusWith<unique_ptr<PlanStage>> projStatus = applyProjection(
- txn, nss, cq.get(), request->getProj(), allowPositional, ws.get(), std::move(root));
+ opCtx, nss, cq.get(), request->getProj(), allowPositional, ws.get(), std::move(root));
if (!projStatus.isOK()) {
return projStatus.getStatus();
}
@@ -806,7 +809,7 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorDelete(OperationContext* txn,
// We must have a tree of stages in order to have a valid plan executor, but the query
// solution may be null.
- return PlanExecutor::make(txn,
+ return PlanExecutor::make(opCtx,
std::move(ws),
std::move(root),
std::move(querySolution),
@@ -837,7 +840,7 @@ inline void validateUpdate(const char* ns, const BSONObj& updateobj, const BSONO
} // namespace
-StatusWith<unique_ptr<PlanExecutor>> getExecutorUpdate(OperationContext* txn,
+StatusWith<unique_ptr<PlanExecutor>> getExecutorUpdate(OperationContext* opCtx,
OpDebug* opDebug,
Collection* collection,
ParsedUpdate* parsedUpdate) {
@@ -871,8 +874,8 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorUpdate(OperationContext* txn,
// If this is a user-issued update, then we want to return an error: you cannot perform
// writes on a secondary. If this is an update to a secondary from the replication system,
// however, then we make an exception and let the write proceed.
- bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(txn, nsString);
+ bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, nsString);
if (userInitiatedWritesAndNotPrimary) {
return Status(ErrorCodes::PrimarySteppedDown,
@@ -881,7 +884,7 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorUpdate(OperationContext* txn,
if (lifecycle) {
lifecycle->setCollection(collection);
- driver->refreshIndexKeys(lifecycle->getIndexKeys(txn));
+ driver->refreshIndexKeys(lifecycle->getIndexKeys(opCtx));
}
const PlanExecutor::YieldPolicy policy = parsedUpdate->yieldPolicy();
@@ -901,12 +904,12 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorUpdate(OperationContext* txn,
LOG(2) << "Collection " << nsString.ns() << " does not exist."
<< " Using EOF stage: " << redact(unparsedQuery);
auto updateStage = make_unique<UpdateStage>(
- txn, updateStageParams, ws.get(), collection, new EOFStage(txn));
+ opCtx, updateStageParams, ws.get(), collection, new EOFStage(opCtx));
return PlanExecutor::make(
- txn, std::move(ws), std::move(updateStage), nsString.ns(), policy);
+ opCtx, std::move(ws), std::move(updateStage), nsString.ns(), policy);
}
- const IndexDescriptor* descriptor = collection->getIndexCatalog()->findIdIndex(txn);
+ const IndexDescriptor* descriptor = collection->getIndexCatalog()->findIdIndex(opCtx);
const bool hasCollectionDefaultCollation = CollatorInterface::collatorsMatch(
parsedUpdate->getCollator(), collection->getDefaultCollator());
@@ -915,11 +918,11 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorUpdate(OperationContext* txn,
request->getProj().isEmpty() && hasCollectionDefaultCollation) {
LOG(2) << "Using idhack: " << redact(unparsedQuery);
- PlanStage* idHackStage =
- new IDHackStage(txn, collection, unparsedQuery["_id"].wrap(), ws.get(), descriptor);
- unique_ptr<UpdateStage> root =
- make_unique<UpdateStage>(txn, updateStageParams, ws.get(), collection, idHackStage);
- return PlanExecutor::make(txn, std::move(ws), std::move(root), collection, policy);
+ PlanStage* idHackStage = new IDHackStage(
+ opCtx, collection, unparsedQuery["_id"].wrap(), ws.get(), descriptor);
+ unique_ptr<UpdateStage> root = make_unique<UpdateStage>(
+ opCtx, updateStageParams, ws.get(), collection, idHackStage);
+ return PlanExecutor::make(opCtx, std::move(ws), std::move(root), collection, policy);
}
// If we're here then we don't have a parsed query, but we're also not eligible for
@@ -935,7 +938,7 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorUpdate(OperationContext* txn,
const size_t defaultPlannerOptions = 0;
StatusWith<PrepareExecutionResult> executionResult =
- prepareExecution(txn, collection, ws.get(), std::move(cq), defaultPlannerOptions);
+ prepareExecution(opCtx, collection, ws.get(), std::move(cq), defaultPlannerOptions);
if (!executionResult.isOK()) {
return executionResult.getStatus();
}
@@ -947,7 +950,7 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorUpdate(OperationContext* txn,
updateStageParams.canonicalQuery = cq.get();
root = stdx::make_unique<UpdateStage>(
- txn, updateStageParams, ws.get(), collection, root.release());
+ opCtx, updateStageParams, ws.get(), collection, root.release());
if (!request->getProj().isEmpty()) {
invariant(request->shouldReturnAnyDocs());
@@ -956,7 +959,7 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorUpdate(OperationContext* txn,
// is invalid to use a positional projection because the query expression need not
// match the array element after the update has been applied.
const bool allowPositional = request->shouldReturnOldDocs();
- StatusWith<unique_ptr<PlanStage>> projStatus = applyProjection(txn,
+ StatusWith<unique_ptr<PlanStage>> projStatus = applyProjection(opCtx,
nsString,
cq.get(),
request->getProj(),
@@ -970,8 +973,8 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorUpdate(OperationContext* txn,
}
// We must have a tree of stages in order to have a valid plan executor, but the query
- // solution may be null. Takes ownership of all args other than 'collection' and 'txn'
- return PlanExecutor::make(txn,
+ // solution may be null. Takes ownership of all args other than 'collection' and 'opCtx'
+ return PlanExecutor::make(opCtx,
std::move(ws),
std::move(root),
std::move(querySolution),
@@ -984,7 +987,7 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorUpdate(OperationContext* txn,
// Group
//
-StatusWith<unique_ptr<PlanExecutor>> getExecutorGroup(OperationContext* txn,
+StatusWith<unique_ptr<PlanExecutor>> getExecutorGroup(OperationContext* opCtx,
Collection* collection,
const GroupRequest& request,
PlanExecutor::YieldPolicy yieldPolicy) {
@@ -999,10 +1002,10 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorGroup(OperationContext* txn,
// reporting machinery always assumes that the root stage for a group operation is a
// GroupStage, so in this case we put a GroupStage on top of an EOFStage.
unique_ptr<PlanStage> root =
- make_unique<GroupStage>(txn, request, ws.get(), new EOFStage(txn));
+ make_unique<GroupStage>(opCtx, request, ws.get(), new EOFStage(opCtx));
return PlanExecutor::make(
- txn, std::move(ws), std::move(root), request.ns.ns(), yieldPolicy);
+ opCtx, std::move(ws), std::move(root), request.ns.ns(), yieldPolicy);
}
const NamespaceString nss(request.ns);
@@ -1011,9 +1014,9 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorGroup(OperationContext* txn,
qr->setCollation(request.collation);
qr->setExplain(request.explain);
- const ExtensionsCallbackReal extensionsCallback(txn, &nss);
+ const ExtensionsCallbackReal extensionsCallback(opCtx, &nss);
- auto statusWithCQ = CanonicalQuery::canonicalize(txn, std::move(qr), extensionsCallback);
+ auto statusWithCQ = CanonicalQuery::canonicalize(opCtx, std::move(qr), extensionsCallback);
if (!statusWithCQ.isOK()) {
return statusWithCQ.getStatus();
}
@@ -1021,7 +1024,7 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorGroup(OperationContext* txn,
const size_t defaultPlannerOptions = 0;
StatusWith<PrepareExecutionResult> executionResult = prepareExecution(
- txn, collection, ws.get(), std::move(canonicalQuery), defaultPlannerOptions);
+ opCtx, collection, ws.get(), std::move(canonicalQuery), defaultPlannerOptions);
if (!executionResult.isOK()) {
return executionResult.getStatus();
}
@@ -1031,10 +1034,10 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorGroup(OperationContext* txn,
invariant(root);
- root = make_unique<GroupStage>(txn, request, ws.get(), root.release());
+ root = make_unique<GroupStage>(opCtx, request, ws.get(), root.release());
// We must have a tree of stages in order to have a valid plan executor, but the query
// solution may be null. Takes ownership of all args other than 'collection'.
- return PlanExecutor::make(txn,
+ return PlanExecutor::make(opCtx,
std::move(ws),
std::move(root),
std::move(querySolution),
@@ -1227,7 +1230,7 @@ BSONObj getDistinctProjection(const std::string& field) {
} // namespace
-StatusWith<unique_ptr<PlanExecutor>> getExecutorCount(OperationContext* txn,
+StatusWith<unique_ptr<PlanExecutor>> getExecutorCount(OperationContext* opCtx,
Collection* collection,
const CountRequest& request,
bool explain,
@@ -1241,11 +1244,11 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorCount(OperationContext* txn,
qr->setExplain(explain);
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn,
+ opCtx,
std::move(qr),
- collection
- ? static_cast<const ExtensionsCallback&>(ExtensionsCallbackReal(txn, &collection->ns()))
- : static_cast<const ExtensionsCallback&>(ExtensionsCallbackNoop()));
+ collection ? static_cast<const ExtensionsCallback&>(
+ ExtensionsCallbackReal(opCtx, &collection->ns()))
+ : static_cast<const ExtensionsCallback&>(ExtensionsCallbackNoop()));
if (!statusWithCQ.isOK()) {
return statusWithCQ.getStatus();
@@ -1258,9 +1261,9 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorCount(OperationContext* txn,
const bool useRecordStoreCount = false;
CountStageParams params(request, useRecordStoreCount);
unique_ptr<PlanStage> root = make_unique<CountStage>(
- txn, collection, std::move(params), ws.get(), new EOFStage(txn));
+ opCtx, collection, std::move(params), ws.get(), new EOFStage(opCtx));
return PlanExecutor::make(
- txn, std::move(ws), std::move(root), request.getNs().ns(), yieldPolicy);
+ opCtx, std::move(ws), std::move(root), request.getNs().ns(), yieldPolicy);
}
// If the query is empty, then we can determine the count by just asking the collection
@@ -1275,14 +1278,14 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorCount(OperationContext* txn,
if (useRecordStoreCount) {
unique_ptr<PlanStage> root =
- make_unique<CountStage>(txn, collection, std::move(params), ws.get(), nullptr);
+ make_unique<CountStage>(opCtx, collection, std::move(params), ws.get(), nullptr);
return PlanExecutor::make(
- txn, std::move(ws), std::move(root), request.getNs().ns(), yieldPolicy);
+ opCtx, std::move(ws), std::move(root), request.getNs().ns(), yieldPolicy);
}
const size_t plannerOptions = QueryPlannerParams::IS_COUNT;
StatusWith<PrepareExecutionResult> executionResult =
- prepareExecution(txn, collection, ws.get(), std::move(cq), plannerOptions);
+ prepareExecution(opCtx, collection, ws.get(), std::move(cq), plannerOptions);
if (!executionResult.isOK()) {
return executionResult.getStatus();
}
@@ -1293,10 +1296,10 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorCount(OperationContext* txn,
invariant(root);
// Make a CountStage to be the new root.
- root = make_unique<CountStage>(txn, collection, std::move(params), ws.get(), root.release());
+ root = make_unique<CountStage>(opCtx, collection, std::move(params), ws.get(), root.release());
// We must have a tree of stages in order to have a valid plan executor, but the query
- // solution may be NULL. Takes ownership of all args other than 'collection' and 'txn'
- return PlanExecutor::make(txn,
+ // solution may be NULL. Takes ownership of all args other than 'collection' and 'opCtx'
+ return PlanExecutor::make(opCtx,
std::move(ws),
std::move(root),
std::move(querySolution),
@@ -1406,16 +1409,16 @@ bool turnIxscanIntoDistinctIxscan(QuerySolution* soln, const string& field) {
return true;
}
-StatusWith<unique_ptr<PlanExecutor>> getExecutorDistinct(OperationContext* txn,
+StatusWith<unique_ptr<PlanExecutor>> getExecutorDistinct(OperationContext* opCtx,
Collection* collection,
const std::string& ns,
ParsedDistinct* parsedDistinct,
PlanExecutor::YieldPolicy yieldPolicy) {
if (!collection) {
// Treat collections that do not exist as empty collections.
- return PlanExecutor::make(txn,
+ return PlanExecutor::make(opCtx,
make_unique<WorkingSet>(),
- make_unique<EOFStage>(txn),
+ make_unique<EOFStage>(opCtx),
parsedDistinct->releaseQuery(),
collection,
yieldPolicy);
@@ -1435,15 +1438,15 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorDistinct(OperationContext* txn,
QueryPlannerParams plannerParams;
plannerParams.options = QueryPlannerParams::NO_TABLE_SCAN;
- IndexCatalog::IndexIterator ii = collection->getIndexCatalog()->getIndexIterator(txn, false);
+ IndexCatalog::IndexIterator ii = collection->getIndexCatalog()->getIndexIterator(opCtx, false);
while (ii.more()) {
const IndexDescriptor* desc = ii.next();
IndexCatalogEntry* ice = ii.catalogEntry(desc);
if (desc->keyPattern().hasField(parsedDistinct->getKey())) {
plannerParams.indices.push_back(IndexEntry(desc->keyPattern(),
desc->getAccessMethodName(),
- desc->isMultikey(txn),
- ice->getMultikeyPaths(txn),
+ desc->isMultikey(opCtx),
+ ice->getMultikeyPaths(opCtx),
desc->isSparse(),
desc->unique(),
desc->indexName(),
@@ -1453,12 +1456,12 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorDistinct(OperationContext* txn,
}
}
- const ExtensionsCallbackReal extensionsCallback(txn, &collection->ns());
+ const ExtensionsCallbackReal extensionsCallback(opCtx, &collection->ns());
// If there are no suitable indices for the distinct hack bail out now into regular planning
// with no projection.
if (plannerParams.indices.empty()) {
- return getExecutor(txn, collection, parsedDistinct->releaseQuery(), yieldPolicy);
+ return getExecutor(opCtx, collection, parsedDistinct->releaseQuery(), yieldPolicy);
}
//
@@ -1473,7 +1476,7 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorDistinct(OperationContext* txn,
auto qr = stdx::make_unique<QueryRequest>(parsedDistinct->getQuery()->getQueryRequest());
qr->setProj(projection);
- auto statusWithCQ = CanonicalQuery::canonicalize(txn, std::move(qr), extensionsCallback);
+ auto statusWithCQ = CanonicalQuery::canonicalize(opCtx, std::move(qr), extensionsCallback);
if (!statusWithCQ.isOK()) {
return statusWithCQ.getStatus();
}
@@ -1518,13 +1521,13 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorDistinct(OperationContext* txn,
unique_ptr<WorkingSet> ws = make_unique<WorkingSet>();
PlanStage* rawRoot;
- verify(StageBuilder::build(txn, collection, *cq, *soln, ws.get(), &rawRoot));
+ verify(StageBuilder::build(opCtx, collection, *cq, *soln, ws.get(), &rawRoot));
unique_ptr<PlanStage> root(rawRoot);
LOG(2) << "Using fast distinct: " << redact(cq->toStringShort())
<< ", planSummary: " << redact(Explain::getPlanSummary(root.get()));
- return PlanExecutor::make(txn,
+ return PlanExecutor::make(opCtx,
std::move(ws),
std::move(root),
std::move(soln),
@@ -1537,7 +1540,7 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorDistinct(OperationContext* txn,
vector<QuerySolution*> solutions;
Status status = QueryPlanner::plan(*cq, plannerParams, &solutions);
if (!status.isOK()) {
- return getExecutor(txn, collection, std::move(cq), yieldPolicy);
+ return getExecutor(opCtx, collection, std::move(cq), yieldPolicy);
}
// We look for a solution that has an ixscan we can turn into a distinctixscan
@@ -1554,13 +1557,14 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorDistinct(OperationContext* txn,
unique_ptr<WorkingSet> ws = make_unique<WorkingSet>();
unique_ptr<QuerySolution> currentSolution(solutions[i]);
PlanStage* rawRoot;
- verify(StageBuilder::build(txn, collection, *cq, *currentSolution, ws.get(), &rawRoot));
+ verify(
+ StageBuilder::build(opCtx, collection, *cq, *currentSolution, ws.get(), &rawRoot));
unique_ptr<PlanStage> root(rawRoot);
LOG(2) << "Using fast distinct: " << redact(cq->toStringShort())
<< ", planSummary: " << redact(Explain::getPlanSummary(root.get()));
- return PlanExecutor::make(txn,
+ return PlanExecutor::make(opCtx,
std::move(ws),
std::move(root),
std::move(currentSolution),
@@ -1577,7 +1581,7 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorDistinct(OperationContext* txn,
delete solutions[i];
}
- return getExecutor(txn, collection, parsedDistinct->releaseQuery(), yieldPolicy);
+ return getExecutor(opCtx, collection, parsedDistinct->releaseQuery(), yieldPolicy);
}
} // namespace mongo
diff --git a/src/mongo/db/query/get_executor.h b/src/mongo/db/query/get_executor.h
index 48f7eefcf83..65708d135fd 100644
--- a/src/mongo/db/query/get_executor.h
+++ b/src/mongo/db/query/get_executor.h
@@ -58,7 +58,7 @@ void filterAllowedIndexEntries(const AllowedIndicesFilter& allowedIndicesFilter,
* Fill out the provided 'plannerParams' for the 'canonicalQuery' operating on the collection
* 'collection'. Exposed for testing.
*/
-void fillOutPlannerParams(OperationContext* txn,
+void fillOutPlannerParams(OperationContext* opCtx,
Collection* collection,
CanonicalQuery* canonicalQuery,
QueryPlannerParams* plannerParams);
@@ -72,7 +72,7 @@ void fillOutPlannerParams(OperationContext* txn,
* If the query cannot be executed, returns a Status indicating why.
*/
StatusWith<std::unique_ptr<PlanExecutor>> getExecutor(
- OperationContext* txn,
+ OperationContext* opCtx,
Collection* collection,
std::unique_ptr<CanonicalQuery> canonicalQuery,
PlanExecutor::YieldPolicy yieldPolicy,
@@ -87,7 +87,7 @@ StatusWith<std::unique_ptr<PlanExecutor>> getExecutor(
* If the query cannot be executed, returns a Status indicating why.
*/
StatusWith<std::unique_ptr<PlanExecutor>> getExecutorFind(
- OperationContext* txn,
+ OperationContext* opCtx,
Collection* collection,
const NamespaceString& nss,
std::unique_ptr<CanonicalQuery> canonicalQuery,
@@ -110,7 +110,7 @@ bool turnIxscanIntoDistinctIxscan(QuerySolution* soln, const std::string& field)
* body of method for detail).
*/
StatusWith<std::unique_ptr<PlanExecutor>> getExecutorDistinct(
- OperationContext* txn,
+ OperationContext* opCtx,
Collection* collection,
const std::string& ns,
ParsedDistinct* parsedDistinct,
@@ -123,7 +123,7 @@ StatusWith<std::unique_ptr<PlanExecutor>> getExecutorDistinct(
* As such, with certain covered queries, we can skip the overhead of fetching etc. when
* executing a count.
*/
-StatusWith<std::unique_ptr<PlanExecutor>> getExecutorCount(OperationContext* txn,
+StatusWith<std::unique_ptr<PlanExecutor>> getExecutorCount(OperationContext* opCtx,
Collection* collection,
const CountRequest& request,
bool explain,
@@ -145,7 +145,7 @@ StatusWith<std::unique_ptr<PlanExecutor>> getExecutorCount(OperationContext* txn
*
* If the query cannot be executed, returns a Status indicating why.
*/
-StatusWith<std::unique_ptr<PlanExecutor>> getExecutorDelete(OperationContext* txn,
+StatusWith<std::unique_ptr<PlanExecutor>> getExecutorDelete(OperationContext* opCtx,
OpDebug* opDebug,
Collection* collection,
ParsedDelete* parsedDelete);
@@ -167,7 +167,7 @@ StatusWith<std::unique_ptr<PlanExecutor>> getExecutorDelete(OperationContext* tx
*
* If the query cannot be executed, returns a Status indicating why.
*/
-StatusWith<std::unique_ptr<PlanExecutor>> getExecutorUpdate(OperationContext* txn,
+StatusWith<std::unique_ptr<PlanExecutor>> getExecutorUpdate(OperationContext* opCtx,
OpDebug* opDebug,
Collection* collection,
ParsedUpdate* parsedUpdate);
@@ -180,7 +180,7 @@ StatusWith<std::unique_ptr<PlanExecutor>> getExecutorUpdate(OperationContext* tx
*
* If an executor could not be created, returns a Status indicating why.
*/
-StatusWith<std::unique_ptr<PlanExecutor>> getExecutorGroup(OperationContext* txn,
+StatusWith<std::unique_ptr<PlanExecutor>> getExecutorGroup(OperationContext* opCtx,
Collection* collection,
const GroupRequest& request,
PlanExecutor::YieldPolicy yieldPolicy);
diff --git a/src/mongo/db/query/get_executor_test.cpp b/src/mongo/db/query/get_executor_test.cpp
index a05c8e08e13..a1dc1f88648 100644
--- a/src/mongo/db/query/get_executor_test.cpp
+++ b/src/mongo/db/query/get_executor_test.cpp
@@ -59,14 +59,14 @@ unique_ptr<CanonicalQuery> canonicalize(const char* queryStr,
const char* sortStr,
const char* projStr) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(fromjson(queryStr));
qr->setSort(fromjson(sortStr));
qr->setProj(fromjson(projStr));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
return std::move(statusWithCQ.getValue());
}
diff --git a/src/mongo/db/query/internal_plans.cpp b/src/mongo/db/query/internal_plans.cpp
index 3889e82031b..ade228a0223 100644
--- a/src/mongo/db/query/internal_plans.cpp
+++ b/src/mongo/db/query/internal_plans.cpp
@@ -41,7 +41,7 @@
namespace mongo {
-std::unique_ptr<PlanExecutor> InternalPlanner::collectionScan(OperationContext* txn,
+std::unique_ptr<PlanExecutor> InternalPlanner::collectionScan(OperationContext* opCtx,
StringData ns,
Collection* collection,
PlanExecutor::YieldPolicy yieldPolicy,
@@ -50,27 +50,27 @@ std::unique_ptr<PlanExecutor> InternalPlanner::collectionScan(OperationContext*
std::unique_ptr<WorkingSet> ws = stdx::make_unique<WorkingSet>();
if (NULL == collection) {
- auto eof = stdx::make_unique<EOFStage>(txn);
+ auto eof = stdx::make_unique<EOFStage>(opCtx);
// Takes ownership of 'ws' and 'eof'.
auto statusWithPlanExecutor =
- PlanExecutor::make(txn, std::move(ws), std::move(eof), ns.toString(), yieldPolicy);
+ PlanExecutor::make(opCtx, std::move(ws), std::move(eof), ns.toString(), yieldPolicy);
invariant(statusWithPlanExecutor.isOK());
return std::move(statusWithPlanExecutor.getValue());
}
invariant(ns == collection->ns().ns());
- auto cs = _collectionScan(txn, ws.get(), collection, direction, startLoc);
+ auto cs = _collectionScan(opCtx, ws.get(), collection, direction, startLoc);
// Takes ownership of 'ws' and 'cs'.
auto statusWithPlanExecutor =
- PlanExecutor::make(txn, std::move(ws), std::move(cs), collection, yieldPolicy);
+ PlanExecutor::make(opCtx, std::move(ws), std::move(cs), collection, yieldPolicy);
invariant(statusWithPlanExecutor.isOK());
return std::move(statusWithPlanExecutor.getValue());
}
std::unique_ptr<PlanExecutor> InternalPlanner::deleteWithCollectionScan(
- OperationContext* txn,
+ OperationContext* opCtx,
Collection* collection,
const DeleteStageParams& params,
PlanExecutor::YieldPolicy yieldPolicy,
@@ -78,18 +78,18 @@ std::unique_ptr<PlanExecutor> InternalPlanner::deleteWithCollectionScan(
const RecordId& startLoc) {
auto ws = stdx::make_unique<WorkingSet>();
- auto root = _collectionScan(txn, ws.get(), collection, direction, startLoc);
+ auto root = _collectionScan(opCtx, ws.get(), collection, direction, startLoc);
- root = stdx::make_unique<DeleteStage>(txn, params, ws.get(), collection, root.release());
+ root = stdx::make_unique<DeleteStage>(opCtx, params, ws.get(), collection, root.release());
auto executor =
- PlanExecutor::make(txn, std::move(ws), std::move(root), collection, yieldPolicy);
+ PlanExecutor::make(opCtx, std::move(ws), std::move(root), collection, yieldPolicy);
invariantOK(executor.getStatus());
return std::move(executor.getValue());
}
-std::unique_ptr<PlanExecutor> InternalPlanner::indexScan(OperationContext* txn,
+std::unique_ptr<PlanExecutor> InternalPlanner::indexScan(OperationContext* opCtx,
const Collection* collection,
const IndexDescriptor* descriptor,
const BSONObj& startKey,
@@ -100,7 +100,7 @@ std::unique_ptr<PlanExecutor> InternalPlanner::indexScan(OperationContext* txn,
int options) {
auto ws = stdx::make_unique<WorkingSet>();
- std::unique_ptr<PlanStage> root = _indexScan(txn,
+ std::unique_ptr<PlanStage> root = _indexScan(opCtx,
ws.get(),
collection,
descriptor,
@@ -111,13 +111,13 @@ std::unique_ptr<PlanExecutor> InternalPlanner::indexScan(OperationContext* txn,
options);
auto executor =
- PlanExecutor::make(txn, std::move(ws), std::move(root), collection, yieldPolicy);
+ PlanExecutor::make(opCtx, std::move(ws), std::move(root), collection, yieldPolicy);
invariantOK(executor.getStatus());
return std::move(executor.getValue());
}
std::unique_ptr<PlanExecutor> InternalPlanner::deleteWithIndexScan(
- OperationContext* txn,
+ OperationContext* opCtx,
Collection* collection,
const DeleteStageParams& params,
const IndexDescriptor* descriptor,
@@ -128,7 +128,7 @@ std::unique_ptr<PlanExecutor> InternalPlanner::deleteWithIndexScan(
Direction direction) {
auto ws = stdx::make_unique<WorkingSet>();
- std::unique_ptr<PlanStage> root = _indexScan(txn,
+ std::unique_ptr<PlanStage> root = _indexScan(opCtx,
ws.get(),
collection,
descriptor,
@@ -138,15 +138,15 @@ std::unique_ptr<PlanExecutor> InternalPlanner::deleteWithIndexScan(
direction,
InternalPlanner::IXSCAN_FETCH);
- root = stdx::make_unique<DeleteStage>(txn, params, ws.get(), collection, root.release());
+ root = stdx::make_unique<DeleteStage>(opCtx, params, ws.get(), collection, root.release());
auto executor =
- PlanExecutor::make(txn, std::move(ws), std::move(root), collection, yieldPolicy);
+ PlanExecutor::make(opCtx, std::move(ws), std::move(root), collection, yieldPolicy);
invariantOK(executor.getStatus());
return std::move(executor.getValue());
}
-std::unique_ptr<PlanStage> InternalPlanner::_collectionScan(OperationContext* txn,
+std::unique_ptr<PlanStage> InternalPlanner::_collectionScan(OperationContext* opCtx,
WorkingSet* ws,
const Collection* collection,
Direction direction,
@@ -163,10 +163,10 @@ std::unique_ptr<PlanStage> InternalPlanner::_collectionScan(OperationContext* tx
params.direction = CollectionScanParams::BACKWARD;
}
- return stdx::make_unique<CollectionScan>(txn, params, ws, nullptr);
+ return stdx::make_unique<CollectionScan>(opCtx, params, ws, nullptr);
}
-std::unique_ptr<PlanStage> InternalPlanner::_indexScan(OperationContext* txn,
+std::unique_ptr<PlanStage> InternalPlanner::_indexScan(OperationContext* opCtx,
WorkingSet* ws,
const Collection* collection,
const IndexDescriptor* descriptor,
@@ -186,10 +186,10 @@ std::unique_ptr<PlanStage> InternalPlanner::_indexScan(OperationContext* txn,
params.bounds.endKey = endKey;
params.bounds.boundInclusion = boundInclusion;
- std::unique_ptr<PlanStage> root = stdx::make_unique<IndexScan>(txn, params, ws, nullptr);
+ std::unique_ptr<PlanStage> root = stdx::make_unique<IndexScan>(opCtx, params, ws, nullptr);
if (InternalPlanner::IXSCAN_FETCH & options) {
- root = stdx::make_unique<FetchStage>(txn, ws, root.release(), nullptr, collection);
+ root = stdx::make_unique<FetchStage>(opCtx, ws, root.release(), nullptr, collection);
}
return root;
diff --git a/src/mongo/db/query/internal_plans.h b/src/mongo/db/query/internal_plans.h
index 40b42f75df1..f9da8e89f70 100644
--- a/src/mongo/db/query/internal_plans.h
+++ b/src/mongo/db/query/internal_plans.h
@@ -66,7 +66,7 @@ public:
/**
* Returns a collection scan. Caller owns pointer.
*/
- static std::unique_ptr<PlanExecutor> collectionScan(OperationContext* txn,
+ static std::unique_ptr<PlanExecutor> collectionScan(OperationContext* opCtx,
StringData ns,
Collection* collection,
PlanExecutor::YieldPolicy yieldPolicy,
@@ -77,7 +77,7 @@ public:
* Returns a FETCH => DELETE plan.
*/
static std::unique_ptr<PlanExecutor> deleteWithCollectionScan(
- OperationContext* txn,
+ OperationContext* opCtx,
Collection* collection,
const DeleteStageParams& params,
PlanExecutor::YieldPolicy yieldPolicy,
@@ -87,7 +87,7 @@ public:
/**
* Returns an index scan. Caller owns returned pointer.
*/
- static std::unique_ptr<PlanExecutor> indexScan(OperationContext* txn,
+ static std::unique_ptr<PlanExecutor> indexScan(OperationContext* opCtx,
const Collection* collection,
const IndexDescriptor* descriptor,
const BSONObj& startKey,
@@ -100,7 +100,7 @@ public:
/**
* Returns an IXSCAN => FETCH => DELETE plan.
*/
- static std::unique_ptr<PlanExecutor> deleteWithIndexScan(OperationContext* txn,
+ static std::unique_ptr<PlanExecutor> deleteWithIndexScan(OperationContext* opCtx,
Collection* collection,
const DeleteStageParams& params,
const IndexDescriptor* descriptor,
@@ -116,7 +116,7 @@ private:
*
* Used as a helper for collectionScan() and deleteWithCollectionScan().
*/
- static std::unique_ptr<PlanStage> _collectionScan(OperationContext* txn,
+ static std::unique_ptr<PlanStage> _collectionScan(OperationContext* opCtx,
WorkingSet* ws,
const Collection* collection,
Direction direction,
@@ -127,7 +127,7 @@ private:
*
* Used as a helper for indexScan() and deleteWithIndexScan().
*/
- static std::unique_ptr<PlanStage> _indexScan(OperationContext* txn,
+ static std::unique_ptr<PlanStage> _indexScan(OperationContext* opCtx,
WorkingSet* ws,
const Collection* collection,
const IndexDescriptor* descriptor,
diff --git a/src/mongo/db/query/parsed_distinct.cpp b/src/mongo/db/query/parsed_distinct.cpp
index 02de55ed4e8..8bd80ff405e 100644
--- a/src/mongo/db/query/parsed_distinct.cpp
+++ b/src/mongo/db/query/parsed_distinct.cpp
@@ -97,7 +97,7 @@ StatusWith<BSONObj> ParsedDistinct::asAggregationCommand() const {
return aggregationBuilder.obj();
}
-StatusWith<ParsedDistinct> ParsedDistinct::parse(OperationContext* txn,
+StatusWith<ParsedDistinct> ParsedDistinct::parse(OperationContext* opCtx,
const NamespaceString& nss,
const BSONObj& cmdObj,
const ExtensionsCallback& extensionsCallback,
@@ -142,7 +142,7 @@ StatusWith<ParsedDistinct> ParsedDistinct::parse(OperationContext* txn,
qr->setExplain(isExplain);
- auto cq = CanonicalQuery::canonicalize(txn, std::move(qr), extensionsCallback);
+ auto cq = CanonicalQuery::canonicalize(opCtx, std::move(qr), extensionsCallback);
if (!cq.isOK()) {
return cq.getStatus();
}
diff --git a/src/mongo/db/query/parsed_distinct.h b/src/mongo/db/query/parsed_distinct.h
index 8e07116809b..eb1c4e2b0ab 100644
--- a/src/mongo/db/query/parsed_distinct.h
+++ b/src/mongo/db/query/parsed_distinct.h
@@ -78,7 +78,7 @@ public:
* 'extensionsCallback' allows for additional mongod parsing. If called from mongos, an
* ExtensionsCallbackNoop object should be passed to skip this parsing.
*/
- static StatusWith<ParsedDistinct> parse(OperationContext* txn,
+ static StatusWith<ParsedDistinct> parse(OperationContext* opCtx,
const NamespaceString& nss,
const BSONObj& cmdObj,
const ExtensionsCallback& extensionsCallback,
diff --git a/src/mongo/db/query/parsed_distinct_test.cpp b/src/mongo/db/query/parsed_distinct_test.cpp
index f543a13b95a..d53457e500e 100644
--- a/src/mongo/db/query/parsed_distinct_test.cpp
+++ b/src/mongo/db/query/parsed_distinct_test.cpp
@@ -46,9 +46,9 @@ static const bool isExplain = true;
TEST(ParsedDistinctTest, ConvertToAggregationNoQuery) {
QueryTestServiceContext serviceContext;
auto uniqueTxn = serviceContext.makeOperationContext();
- OperationContext* txn = uniqueTxn.get();
+ OperationContext* opCtx = uniqueTxn.get();
- auto pd = ParsedDistinct::parse(txn,
+ auto pd = ParsedDistinct::parse(opCtx,
testns,
fromjson("{distinct: 'testcoll', key: 'x'}"),
ExtensionsCallbackDisallowExtensions(),
@@ -82,9 +82,9 @@ TEST(ParsedDistinctTest, ConvertToAggregationNoQuery) {
TEST(ParsedDistinctTest, ConvertToAggregationWithQuery) {
QueryTestServiceContext serviceContext;
auto uniqueTxn = serviceContext.makeOperationContext();
- OperationContext* txn = uniqueTxn.get();
+ OperationContext* opCtx = uniqueTxn.get();
- auto pd = ParsedDistinct::parse(txn,
+ auto pd = ParsedDistinct::parse(opCtx,
testns,
fromjson("{distinct: 'testcoll', key: 'y', query: {z: 7}}"),
ExtensionsCallbackDisallowExtensions(),
@@ -119,9 +119,9 @@ TEST(ParsedDistinctTest, ConvertToAggregationWithQuery) {
TEST(ParsedDistinctTest, ConvertToAggregationWithExplain) {
QueryTestServiceContext serviceContext;
auto uniqueTxn = serviceContext.makeOperationContext();
- OperationContext* txn = uniqueTxn.get();
+ OperationContext* opCtx = uniqueTxn.get();
- auto pd = ParsedDistinct::parse(txn,
+ auto pd = ParsedDistinct::parse(opCtx,
testns,
fromjson("{distinct: 'testcoll', key: 'x'}"),
ExtensionsCallbackDisallowExtensions(),
diff --git a/src/mongo/db/query/plan_cache_test.cpp b/src/mongo/db/query/plan_cache_test.cpp
index 752afe99fa2..5672f6cc1af 100644
--- a/src/mongo/db/query/plan_cache_test.cpp
+++ b/src/mongo/db/query/plan_cache_test.cpp
@@ -67,12 +67,12 @@ static const NamespaceString nss("test.collection");
*/
unique_ptr<CanonicalQuery> canonicalize(const BSONObj& queryObj) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(queryObj);
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
return std::move(statusWithCQ.getValue());
}
@@ -87,7 +87,7 @@ unique_ptr<CanonicalQuery> canonicalize(const char* queryStr,
const char* projStr,
const char* collationStr) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(fromjson(queryStr));
@@ -95,7 +95,7 @@ unique_ptr<CanonicalQuery> canonicalize(const char* queryStr,
qr->setProj(fromjson(projStr));
qr->setCollation(fromjson(collationStr));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
return std::move(statusWithCQ.getValue());
}
@@ -109,7 +109,7 @@ unique_ptr<CanonicalQuery> canonicalize(const char* queryStr,
const char* minStr,
const char* maxStr) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(fromjson(queryStr));
@@ -125,7 +125,7 @@ unique_ptr<CanonicalQuery> canonicalize(const char* queryStr,
qr->setMin(fromjson(minStr));
qr->setMax(fromjson(maxStr));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
return std::move(statusWithCQ.getValue());
}
@@ -141,7 +141,7 @@ unique_ptr<CanonicalQuery> canonicalize(const char* queryStr,
bool snapshot,
bool explain) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(fromjson(queryStr));
@@ -159,7 +159,7 @@ unique_ptr<CanonicalQuery> canonicalize(const char* queryStr,
qr->setSnapshot(snapshot);
qr->setExplain(explain);
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
return std::move(statusWithCQ.getValue());
}
@@ -539,7 +539,7 @@ protected:
const BSONObj& maxObj,
bool snapshot) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
// Clean up any previous state from a call to runQueryFull or runQueryAsCommand.
for (vector<QuerySolution*>::iterator it = solns.begin(); it != solns.end(); ++it) {
@@ -563,7 +563,7 @@ protected:
qr->setMax(maxObj);
qr->setSnapshot(snapshot);
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
Status s = QueryPlanner::plan(*statusWithCQ.getValue(), params, &solns);
ASSERT_OK(s);
@@ -571,7 +571,7 @@ protected:
void runQueryAsCommand(const BSONObj& cmdObj) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
// Clean up any previous state from a call to runQueryFull or runQueryAsCommand.
for (vector<QuerySolution*>::iterator it = solns.begin(); it != solns.end(); ++it) {
@@ -585,7 +585,7 @@ protected:
assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
Status s = QueryPlanner::plan(*statusWithCQ.getValue(), params, &solns);
ASSERT_OK(s);
@@ -658,7 +658,7 @@ protected:
const BSONObj& collation,
const QuerySolution& soln) const {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(query);
@@ -666,7 +666,7 @@ protected:
qr->setProj(proj);
qr->setCollation(collation);
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> scopedCq = std::move(statusWithCQ.getValue());
diff --git a/src/mongo/db/query/plan_executor.cpp b/src/mongo/db/query/plan_executor.cpp
index d162e998bab..dd3f66164f4 100644
--- a/src/mongo/db/query/plan_executor.cpp
+++ b/src/mongo/db/query/plan_executor.cpp
@@ -134,7 +134,7 @@ StatusWith<unique_ptr<PlanExecutor>> PlanExecutor::make(OperationContext* opCtx,
}
// static
-StatusWith<unique_ptr<PlanExecutor>> PlanExecutor::make(OperationContext* txn,
+StatusWith<unique_ptr<PlanExecutor>> PlanExecutor::make(OperationContext* opCtx,
unique_ptr<WorkingSet> ws,
unique_ptr<PlanStage> rt,
unique_ptr<QuerySolution> qs,
@@ -143,7 +143,7 @@ StatusWith<unique_ptr<PlanExecutor>> PlanExecutor::make(OperationContext* txn,
const string& ns,
YieldPolicy yieldPolicy) {
unique_ptr<PlanExecutor> exec(new PlanExecutor(
- txn, std::move(ws), std::move(rt), std::move(qs), std::move(cq), collection, ns));
+ opCtx, std::move(ws), std::move(rt), std::move(qs), std::move(cq), collection, ns));
// Perform plan selection, if necessary.
Status status = exec->pickBestPlan(yieldPolicy, collection);
@@ -322,21 +322,21 @@ void PlanExecutor::detachFromOperationContext() {
_everDetachedFromOperationContext = true;
}
-void PlanExecutor::reattachToOperationContext(OperationContext* txn) {
+void PlanExecutor::reattachToOperationContext(OperationContext* opCtx) {
invariant(_currentState == kDetached);
// We're reattaching for a getMore now. Reset the yield timer in order to prevent from
// yielding again right away.
_yieldPolicy->resetTimer();
- _opCtx = txn;
- _root->reattachToOperationContext(txn);
+ _opCtx = opCtx;
+ _root->reattachToOperationContext(opCtx);
_currentState = kSaved;
}
-void PlanExecutor::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
+void PlanExecutor::invalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) {
if (!killed()) {
- _root->invalidate(txn, dl, type);
+ _root->invalidate(opCtx, dl, type);
}
}
diff --git a/src/mongo/db/query/plan_executor.h b/src/mongo/db/query/plan_executor.h
index 207fc6765c2..1e97963f67b 100644
--- a/src/mongo/db/query/plan_executor.h
+++ b/src/mongo/db/query/plan_executor.h
@@ -336,7 +336,7 @@ public:
* state. As such, if the plan yields, it must be notified of relevant writes so that
* we can ensure that it doesn't crash if we try to access invalid state.
*/
- void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
+ void invalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type);
/**
* Helper method to aid in displaying an ExecState for debug or other recreational purposes.
@@ -413,7 +413,7 @@ private:
/**
* Public factory methods delegate to this private factory to do their work.
*/
- static StatusWith<std::unique_ptr<PlanExecutor>> make(OperationContext* txn,
+ static StatusWith<std::unique_ptr<PlanExecutor>> make(OperationContext* opCtx,
std::unique_ptr<WorkingSet> ws,
std::unique_ptr<PlanStage> rt,
std::unique_ptr<QuerySolution> qs,
diff --git a/src/mongo/db/query/query_planner_test.cpp b/src/mongo/db/query/query_planner_test.cpp
index de3607bf0af..022ce5f533b 100644
--- a/src/mongo/db/query/query_planner_test.cpp
+++ b/src/mongo/db/query/query_planner_test.cpp
@@ -4249,8 +4249,8 @@ TEST_F(QueryPlannerTest, CacheDataFromTaggedTreeFailsOnBadInput) {
auto qr = stdx::make_unique<QueryRequest>(NamespaceString("test.collection"));
qr->setFilter(BSON("a" << 3));
- auto statusWithCQ =
- CanonicalQuery::canonicalize(txn(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ auto statusWithCQ = CanonicalQuery::canonicalize(
+ opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
std::unique_ptr<CanonicalQuery> scopedCq = std::move(statusWithCQ.getValue());
scopedCq->root()->setTag(new IndexTag(1));
@@ -4265,8 +4265,8 @@ TEST_F(QueryPlannerTest, TagAccordingToCacheFailsOnBadInput) {
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(BSON("a" << 3));
- auto statusWithCQ =
- CanonicalQuery::canonicalize(txn(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ auto statusWithCQ = CanonicalQuery::canonicalize(
+ opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
std::unique_ptr<CanonicalQuery> scopedCq = std::move(statusWithCQ.getValue());
@@ -4296,7 +4296,7 @@ TEST_F(QueryPlannerTest, TagAccordingToCacheFailsOnBadInput) {
auto newQR = stdx::make_unique<QueryRequest>(nss);
newQR->setFilter(BSON("a" << 3));
statusWithCQ = CanonicalQuery::canonicalize(
- txn(), std::move(newQR), ExtensionsCallbackDisallowExtensions());
+ opCtx(), std::move(newQR), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
scopedCq = std::move(statusWithCQ.getValue());
diff --git a/src/mongo/db/query/query_planner_test_fixture.cpp b/src/mongo/db/query/query_planner_test_fixture.cpp
index bb97ce06bde..4e1a051b8a0 100644
--- a/src/mongo/db/query/query_planner_test_fixture.cpp
+++ b/src/mongo/db/query/query_planner_test_fixture.cpp
@@ -56,7 +56,7 @@ void QueryPlannerTest::setUp() {
addIndex(BSON("_id" << 1));
}
-OperationContext* QueryPlannerTest::txn() {
+OperationContext* QueryPlannerTest::opCtx() {
return opCtx.get();
}
@@ -253,7 +253,7 @@ void QueryPlannerTest::runQueryFull(const BSONObj& query,
qr->setMax(maxObj);
qr->setSnapshot(snapshot);
auto statusWithCQ =
- CanonicalQuery::canonicalize(txn(), std::move(qr), ExtensionsCallbackNoop());
+ CanonicalQuery::canonicalize(opCtx(), std::move(qr), ExtensionsCallbackNoop());
ASSERT_OK(statusWithCQ.getStatus());
cq = std::move(statusWithCQ.getValue());
@@ -330,7 +330,7 @@ void QueryPlannerTest::runInvalidQueryFull(const BSONObj& query,
qr->setMax(maxObj);
qr->setSnapshot(snapshot);
auto statusWithCQ =
- CanonicalQuery::canonicalize(txn(), std::move(qr), ExtensionsCallbackNoop());
+ CanonicalQuery::canonicalize(opCtx(), std::move(qr), ExtensionsCallbackNoop());
ASSERT_OK(statusWithCQ.getStatus());
cq = std::move(statusWithCQ.getValue());
@@ -349,7 +349,7 @@ void QueryPlannerTest::runQueryAsCommand(const BSONObj& cmdObj) {
assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
auto statusWithCQ =
- CanonicalQuery::canonicalize(txn(), std::move(qr), ExtensionsCallbackNoop());
+ CanonicalQuery::canonicalize(opCtx(), std::move(qr), ExtensionsCallbackNoop());
ASSERT_OK(statusWithCQ.getStatus());
cq = std::move(statusWithCQ.getValue());
@@ -368,7 +368,7 @@ void QueryPlannerTest::runInvalidQueryAsCommand(const BSONObj& cmdObj) {
assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
auto statusWithCQ =
- CanonicalQuery::canonicalize(txn(), std::move(qr), ExtensionsCallbackNoop());
+ CanonicalQuery::canonicalize(opCtx(), std::move(qr), ExtensionsCallbackNoop());
ASSERT_OK(statusWithCQ.getStatus());
cq = std::move(statusWithCQ.getValue());
diff --git a/src/mongo/db/query/query_planner_test_fixture.h b/src/mongo/db/query/query_planner_test_fixture.h
index 42738876785..18a586bea61 100644
--- a/src/mongo/db/query/query_planner_test_fixture.h
+++ b/src/mongo/db/query/query_planner_test_fixture.h
@@ -48,7 +48,7 @@ class QueryPlannerTest : public mongo::unittest::Test {
protected:
void setUp();
- OperationContext* txn();
+ OperationContext* opCtx();
//
// Build up test.
diff --git a/src/mongo/db/query/query_yield.cpp b/src/mongo/db/query/query_yield.cpp
index 6548a662a17..5f43cf819ec 100644
--- a/src/mongo/db/query/query_yield.cpp
+++ b/src/mongo/db/query/query_yield.cpp
@@ -45,7 +45,7 @@ MONGO_FP_DECLARE(setYieldAllLocksWait);
} // namespace
// static
-void QueryYield::yieldAllLocks(OperationContext* txn,
+void QueryYield::yieldAllLocks(OperationContext* opCtx,
RecordFetcher* fetcher,
const std::string& planExecNS) {
// Things have to happen here in a specific order:
@@ -55,12 +55,12 @@ void QueryYield::yieldAllLocks(OperationContext* txn,
// 4) Touch the record we're yielding on, if there is one (RecordFetcher::fetch)
// 5) Reacquire lock mgr locks
- Locker* locker = txn->lockState();
+ Locker* locker = opCtx->lockState();
Locker::LockSnapshot snapshot;
if (fetcher) {
- fetcher->setup(txn);
+ fetcher->setup(opCtx);
}
// Nothing was unlocked, just return, yielding is pointless.
@@ -70,7 +70,7 @@ void QueryYield::yieldAllLocks(OperationContext* txn,
// Top-level locks are freed, release any potential low-level (storage engine-specific
// locks). If we are yielding, we are at a safe place to do so.
- txn->recoveryUnit()->abandonSnapshot();
+ opCtx->recoveryUnit()->abandonSnapshot();
MONGO_FAIL_POINT_PAUSE_WHILE_SET(setYieldAllLocksHang);
@@ -83,7 +83,7 @@ void QueryYield::yieldAllLocks(OperationContext* txn,
}
// Track the number of yields in CurOp.
- CurOp::get(txn)->yielded();
+ CurOp::get(opCtx)->yielded();
if (fetcher) {
fetcher->fetch();
diff --git a/src/mongo/db/query/query_yield.h b/src/mongo/db/query/query_yield.h
index 796bf6e797c..a42e29800c9 100644
--- a/src/mongo/db/query/query_yield.h
+++ b/src/mongo/db/query/query_yield.h
@@ -48,7 +48,7 @@ public:
*
* If in a nested context (eg DBDirectClient), does nothing.
*/
- static void yieldAllLocks(OperationContext* txn,
+ static void yieldAllLocks(OperationContext* opCtx,
RecordFetcher* fetcher,
const std::string& planExecNS);
};
diff --git a/src/mongo/db/query/stage_builder.cpp b/src/mongo/db/query/stage_builder.cpp
index e790325a439..dc07281f5da 100644
--- a/src/mongo/db/query/stage_builder.cpp
+++ b/src/mongo/db/query/stage_builder.cpp
@@ -65,7 +65,7 @@ namespace mongo {
using std::unique_ptr;
using stdx::make_unique;
-PlanStage* buildStages(OperationContext* txn,
+PlanStage* buildStages(OperationContext* opCtx,
Collection* collection,
const CanonicalQuery& cq,
const QuerySolution& qsol,
@@ -79,7 +79,7 @@ PlanStage* buildStages(OperationContext* txn,
params.direction =
(csn->direction == 1) ? CollectionScanParams::FORWARD : CollectionScanParams::BACKWARD;
params.maxScan = csn->maxScan;
- return new CollectionScan(txn, params, ws, csn->filter.get());
+ return new CollectionScan(opCtx, params, ws, csn->filter.get());
} else if (STAGE_IXSCAN == root->getType()) {
const IndexScanNode* ixn = static_cast<const IndexScanNode*>(root);
@@ -90,24 +90,24 @@ PlanStage* buildStages(OperationContext* txn,
IndexScanParams params;
- params.descriptor = collection->getIndexCatalog()->findIndexByName(txn, ixn->index.name);
+ params.descriptor = collection->getIndexCatalog()->findIndexByName(opCtx, ixn->index.name);
invariant(params.descriptor);
params.bounds = ixn->bounds;
params.direction = ixn->direction;
params.maxScan = ixn->maxScan;
params.addKeyMetadata = ixn->addKeyMetadata;
- return new IndexScan(txn, params, ws, ixn->filter.get());
+ return new IndexScan(opCtx, params, ws, ixn->filter.get());
} else if (STAGE_FETCH == root->getType()) {
const FetchNode* fn = static_cast<const FetchNode*>(root);
- PlanStage* childStage = buildStages(txn, collection, cq, qsol, fn->children[0], ws);
+ PlanStage* childStage = buildStages(opCtx, collection, cq, qsol, fn->children[0], ws);
if (NULL == childStage) {
return NULL;
}
- return new FetchStage(txn, ws, childStage, fn->filter.get(), collection);
+ return new FetchStage(opCtx, ws, childStage, fn->filter.get(), collection);
} else if (STAGE_SORT == root->getType()) {
const SortNode* sn = static_cast<const SortNode*>(root);
- PlanStage* childStage = buildStages(txn, collection, cq, qsol, sn->children[0], ws);
+ PlanStage* childStage = buildStages(opCtx, collection, cq, qsol, sn->children[0], ws);
if (NULL == childStage) {
return NULL;
}
@@ -115,23 +115,24 @@ PlanStage* buildStages(OperationContext* txn,
params.collection = collection;
params.pattern = sn->pattern;
params.limit = sn->limit;
- return new SortStage(txn, params, ws, childStage);
+ return new SortStage(opCtx, params, ws, childStage);
} else if (STAGE_SORT_KEY_GENERATOR == root->getType()) {
const SortKeyGeneratorNode* keyGenNode = static_cast<const SortKeyGeneratorNode*>(root);
- PlanStage* childStage = buildStages(txn, collection, cq, qsol, keyGenNode->children[0], ws);
+ PlanStage* childStage =
+ buildStages(opCtx, collection, cq, qsol, keyGenNode->children[0], ws);
if (NULL == childStage) {
return NULL;
}
return new SortKeyGeneratorStage(
- txn, childStage, ws, keyGenNode->sortSpec, keyGenNode->queryObj, cq.getCollator());
+ opCtx, childStage, ws, keyGenNode->sortSpec, keyGenNode->queryObj, cq.getCollator());
} else if (STAGE_PROJECTION == root->getType()) {
const ProjectionNode* pn = static_cast<const ProjectionNode*>(root);
- PlanStage* childStage = buildStages(txn, collection, cq, qsol, pn->children[0], ws);
+ PlanStage* childStage = buildStages(opCtx, collection, cq, qsol, pn->children[0], ws);
if (NULL == childStage) {
return NULL;
}
- ProjectionStageParams params(ExtensionsCallbackReal(txn, &collection->ns()));
+ ProjectionStageParams params(ExtensionsCallbackReal(opCtx, &collection->ns()));
params.projObj = pn->projection;
params.collator = cq.getCollator();
@@ -148,26 +149,26 @@ PlanStage* buildStages(OperationContext* txn,
params.projImpl = ProjectionStageParams::SIMPLE_DOC;
}
- return new ProjectionStage(txn, params, ws, childStage);
+ return new ProjectionStage(opCtx, params, ws, childStage);
} else if (STAGE_LIMIT == root->getType()) {
const LimitNode* ln = static_cast<const LimitNode*>(root);
- PlanStage* childStage = buildStages(txn, collection, cq, qsol, ln->children[0], ws);
+ PlanStage* childStage = buildStages(opCtx, collection, cq, qsol, ln->children[0], ws);
if (NULL == childStage) {
return NULL;
}
- return new LimitStage(txn, ln->limit, ws, childStage);
+ return new LimitStage(opCtx, ln->limit, ws, childStage);
} else if (STAGE_SKIP == root->getType()) {
const SkipNode* sn = static_cast<const SkipNode*>(root);
- PlanStage* childStage = buildStages(txn, collection, cq, qsol, sn->children[0], ws);
+ PlanStage* childStage = buildStages(opCtx, collection, cq, qsol, sn->children[0], ws);
if (NULL == childStage) {
return NULL;
}
- return new SkipStage(txn, sn->skip, ws, childStage);
+ return new SkipStage(opCtx, sn->skip, ws, childStage);
} else if (STAGE_AND_HASH == root->getType()) {
const AndHashNode* ahn = static_cast<const AndHashNode*>(root);
- auto ret = make_unique<AndHashStage>(txn, ws, collection);
+ auto ret = make_unique<AndHashStage>(opCtx, ws, collection);
for (size_t i = 0; i < ahn->children.size(); ++i) {
- PlanStage* childStage = buildStages(txn, collection, cq, qsol, ahn->children[i], ws);
+ PlanStage* childStage = buildStages(opCtx, collection, cq, qsol, ahn->children[i], ws);
if (NULL == childStage) {
return NULL;
}
@@ -176,9 +177,9 @@ PlanStage* buildStages(OperationContext* txn,
return ret.release();
} else if (STAGE_OR == root->getType()) {
const OrNode* orn = static_cast<const OrNode*>(root);
- auto ret = make_unique<OrStage>(txn, ws, orn->dedup, orn->filter.get());
+ auto ret = make_unique<OrStage>(opCtx, ws, orn->dedup, orn->filter.get());
for (size_t i = 0; i < orn->children.size(); ++i) {
- PlanStage* childStage = buildStages(txn, collection, cq, qsol, orn->children[i], ws);
+ PlanStage* childStage = buildStages(opCtx, collection, cq, qsol, orn->children[i], ws);
if (NULL == childStage) {
return NULL;
}
@@ -187,9 +188,9 @@ PlanStage* buildStages(OperationContext* txn,
return ret.release();
} else if (STAGE_AND_SORTED == root->getType()) {
const AndSortedNode* asn = static_cast<const AndSortedNode*>(root);
- auto ret = make_unique<AndSortedStage>(txn, ws, collection);
+ auto ret = make_unique<AndSortedStage>(opCtx, ws, collection);
for (size_t i = 0; i < asn->children.size(); ++i) {
- PlanStage* childStage = buildStages(txn, collection, cq, qsol, asn->children[i], ws);
+ PlanStage* childStage = buildStages(opCtx, collection, cq, qsol, asn->children[i], ws);
if (NULL == childStage) {
return NULL;
}
@@ -202,9 +203,9 @@ PlanStage* buildStages(OperationContext* txn,
params.dedup = msn->dedup;
params.pattern = msn->sort;
params.collator = cq.getCollator();
- auto ret = make_unique<MergeSortStage>(txn, params, ws, collection);
+ auto ret = make_unique<MergeSortStage>(opCtx, params, ws, collection);
for (size_t i = 0; i < msn->children.size(); ++i) {
- PlanStage* childStage = buildStages(txn, collection, cq, qsol, msn->children[i], ws);
+ PlanStage* childStage = buildStages(opCtx, collection, cq, qsol, msn->children[i], ws);
if (NULL == childStage) {
return NULL;
}
@@ -222,10 +223,10 @@ PlanStage* buildStages(OperationContext* txn,
params.addDistMeta = node->addDistMeta;
IndexDescriptor* twoDIndex =
- collection->getIndexCatalog()->findIndexByName(txn, node->index.name);
+ collection->getIndexCatalog()->findIndexByName(opCtx, node->index.name);
invariant(twoDIndex);
- GeoNear2DStage* nearStage = new GeoNear2DStage(params, txn, ws, collection, twoDIndex);
+ GeoNear2DStage* nearStage = new GeoNear2DStage(params, opCtx, ws, collection, twoDIndex);
return nearStage;
} else if (STAGE_GEO_NEAR_2DSPHERE == root->getType()) {
@@ -239,14 +240,14 @@ PlanStage* buildStages(OperationContext* txn,
params.addDistMeta = node->addDistMeta;
IndexDescriptor* s2Index =
- collection->getIndexCatalog()->findIndexByName(txn, node->index.name);
+ collection->getIndexCatalog()->findIndexByName(opCtx, node->index.name);
invariant(s2Index);
- return new GeoNear2DSphereStage(params, txn, ws, collection, s2Index);
+ return new GeoNear2DSphereStage(params, opCtx, ws, collection, s2Index);
} else if (STAGE_TEXT == root->getType()) {
const TextNode* node = static_cast<const TextNode*>(root);
IndexDescriptor* desc =
- collection->getIndexCatalog()->findIndexByName(txn, node->index.name);
+ collection->getIndexCatalog()->findIndexByName(opCtx, node->index.name);
invariant(desc);
const FTSAccessMethod* fam =
static_cast<FTSAccessMethod*>(collection->getIndexCatalog()->getIndex(desc));
@@ -260,25 +261,25 @@ PlanStage* buildStages(OperationContext* txn,
// planning a query that contains "no-op" expressions. TODO: make StageBuilder::build()
// fail in this case (this improvement is being tracked by SERVER-21510).
params.query = static_cast<FTSQueryImpl&>(*node->ftsQuery);
- return new TextStage(txn, params, ws, node->filter.get());
+ return new TextStage(opCtx, params, ws, node->filter.get());
} else if (STAGE_SHARDING_FILTER == root->getType()) {
const ShardingFilterNode* fn = static_cast<const ShardingFilterNode*>(root);
- PlanStage* childStage = buildStages(txn, collection, cq, qsol, fn->children[0], ws);
+ PlanStage* childStage = buildStages(opCtx, collection, cq, qsol, fn->children[0], ws);
if (NULL == childStage) {
return NULL;
}
return new ShardFilterStage(
- txn,
- CollectionShardingState::get(txn, collection->ns())->getMetadata(),
+ opCtx,
+ CollectionShardingState::get(opCtx, collection->ns())->getMetadata(),
ws,
childStage);
} else if (STAGE_KEEP_MUTATIONS == root->getType()) {
const KeepMutationsNode* km = static_cast<const KeepMutationsNode*>(root);
- PlanStage* childStage = buildStages(txn, collection, cq, qsol, km->children[0], ws);
+ PlanStage* childStage = buildStages(opCtx, collection, cq, qsol, km->children[0], ws);
if (NULL == childStage) {
return NULL;
}
- return new KeepMutationsStage(txn, km->filter.get(), ws, childStage);
+ return new KeepMutationsStage(opCtx, km->filter.get(), ws, childStage);
} else if (STAGE_DISTINCT_SCAN == root->getType()) {
const DistinctNode* dn = static_cast<const DistinctNode*>(root);
@@ -289,12 +290,12 @@ PlanStage* buildStages(OperationContext* txn,
DistinctParams params;
- params.descriptor = collection->getIndexCatalog()->findIndexByName(txn, dn->index.name);
+ params.descriptor = collection->getIndexCatalog()->findIndexByName(opCtx, dn->index.name);
invariant(params.descriptor);
params.direction = dn->direction;
params.bounds = dn->bounds;
params.fieldNo = dn->fieldNo;
- return new DistinctScan(txn, params, ws);
+ return new DistinctScan(opCtx, params, ws);
} else if (STAGE_COUNT_SCAN == root->getType()) {
const CountScanNode* csn = static_cast<const CountScanNode*>(root);
@@ -305,21 +306,21 @@ PlanStage* buildStages(OperationContext* txn,
CountScanParams params;
- params.descriptor = collection->getIndexCatalog()->findIndexByName(txn, csn->index.name);
+ params.descriptor = collection->getIndexCatalog()->findIndexByName(opCtx, csn->index.name);
invariant(params.descriptor);
params.startKey = csn->startKey;
params.startKeyInclusive = csn->startKeyInclusive;
params.endKey = csn->endKey;
params.endKeyInclusive = csn->endKeyInclusive;
- return new CountScan(txn, params, ws);
+ return new CountScan(opCtx, params, ws);
} else if (STAGE_ENSURE_SORTED == root->getType()) {
const EnsureSortedNode* esn = static_cast<const EnsureSortedNode*>(root);
- PlanStage* childStage = buildStages(txn, collection, cq, qsol, esn->children[0], ws);
+ PlanStage* childStage = buildStages(opCtx, collection, cq, qsol, esn->children[0], ws);
if (NULL == childStage) {
return NULL;
}
- return new EnsureSortedStage(txn, esn->pattern, ws, childStage);
+ return new EnsureSortedStage(opCtx, esn->pattern, ws, childStage);
} else {
mongoutils::str::stream ss;
root->appendToString(&ss, 0);
@@ -330,7 +331,7 @@ PlanStage* buildStages(OperationContext* txn,
}
// static (this one is used for Cached and MultiPlanStage)
-bool StageBuilder::build(OperationContext* txn,
+bool StageBuilder::build(OperationContext* opCtx,
Collection* collection,
const CanonicalQuery& cq,
const QuerySolution& solution,
@@ -349,7 +350,7 @@ bool StageBuilder::build(OperationContext* txn,
if (NULL == solutionNode) {
return false;
}
- return NULL != (*rootOut = buildStages(txn, collection, cq, solution, solutionNode, wsIn));
+ return NULL != (*rootOut = buildStages(opCtx, collection, cq, solution, solutionNode, wsIn));
}
} // namespace mongo
diff --git a/src/mongo/db/query/stage_builder.h b/src/mongo/db/query/stage_builder.h
index ee5ae17bb75..14ab05ab211 100644
--- a/src/mongo/db/query/stage_builder.h
+++ b/src/mongo/db/query/stage_builder.h
@@ -51,7 +51,7 @@ public:
*
* Returns false otherwise. *rootOut and *wsOut are invalid.
*/
- static bool build(OperationContext* txn,
+ static bool build(OperationContext* opCtx,
Collection* collection,
const CanonicalQuery& cq,
const QuerySolution& solution,