summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorQingyang Chen <qingyang.chen@10gen.com>2015-08-03 12:17:47 -0400
committerQingyang Chen <qingyang.chen@10gen.com>2015-08-04 16:12:30 -0400
commitbe8a683771004a2541c730a1ac0e35cd13e03a8b (patch)
tree20b31de1fea9f59011899b568771f069327f113f
parent84182ff1575cbe868a89e7209f12ca665f4bda19 (diff)
downloadmongo-be8a683771004a2541c730a1ac0e35cd13e03a8b.tar.gz
SERVER-19364 move query stage OperationContext pointer management into the base class
-rw-r--r--src/mongo/db/commands/list_collections.cpp12
-rw-r--r--src/mongo/db/commands/list_indexes.cpp9
-rw-r--r--src/mongo/db/commands/pipeline_command.cpp7
-rw-r--r--src/mongo/db/exec/and_hash.cpp11
-rw-r--r--src/mongo/db/exec/and_hash.h7
-rw-r--r--src/mongo/db/exec/and_sorted.cpp6
-rw-r--r--src/mongo/db/exec/and_sorted.h2
-rw-r--r--src/mongo/db/exec/cached_plan.cpp15
-rw-r--r--src/mongo/db/exec/cached_plan.h4
-rw-r--r--src/mongo/db/exec/collection_scan.cpp17
-rw-r--r--src/mongo/db/exec/collection_scan.h5
-rw-r--r--src/mongo/db/exec/count.cpp9
-rw-r--r--src/mongo/db/exec/count.h5
-rw-r--r--src/mongo/db/exec/count_scan.cpp14
-rw-r--r--src/mongo/db/exec/count_scan.h5
-rw-r--r--src/mongo/db/exec/delete.cpp17
-rw-r--r--src/mongo/db/exec/delete.h4
-rw-r--r--src/mongo/db/exec/distinct_scan.cpp12
-rw-r--r--src/mongo/db/exec/distinct_scan.h5
-rw-r--r--src/mongo/db/exec/eof.cpp2
-rw-r--r--src/mongo/db/exec/eof.h2
-rw-r--r--src/mongo/db/exec/fetch.cpp14
-rw-r--r--src/mongo/db/exec/fetch.h4
-rw-r--r--src/mongo/db/exec/group.cpp10
-rw-r--r--src/mongo/db/exec/group.h4
-rw-r--r--src/mongo/db/exec/idhack.cpp23
-rw-r--r--src/mongo/db/exec/idhack.h5
-rw-r--r--src/mongo/db/exec/index_scan.cpp16
-rw-r--r--src/mongo/db/exec/index_scan.h5
-rw-r--r--src/mongo/db/exec/keep_mutations.cpp5
-rw-r--r--src/mongo/db/exec/keep_mutations.h5
-rw-r--r--src/mongo/db/exec/limit.cpp4
-rw-r--r--src/mongo/db/exec/limit.h2
-rw-r--r--src/mongo/db/exec/merge_sort.cpp5
-rw-r--r--src/mongo/db/exec/merge_sort.h3
-rw-r--r--src/mongo/db/exec/multi_iterator.cpp12
-rw-r--r--src/mongo/db/exec/multi_iterator.h2
-rw-r--r--src/mongo/db/exec/multi_plan.cpp9
-rw-r--r--src/mongo/db/exec/multi_plan.h5
-rw-r--r--src/mongo/db/exec/near.cpp12
-rw-r--r--src/mongo/db/exec/near.h3
-rw-r--r--src/mongo/db/exec/oplogstart.cpp16
-rw-r--r--src/mongo/db/exec/oplogstart.h5
-rw-r--r--src/mongo/db/exec/or.cpp4
-rw-r--r--src/mongo/db/exec/or.h2
-rw-r--r--src/mongo/db/exec/pipeline_proxy.cpp11
-rw-r--r--src/mongo/db/exec/pipeline_proxy.h5
-rw-r--r--src/mongo/db/exec/plan_stage.cpp8
-rw-r--r--src/mongo/db/exec/plan_stage.h19
-rw-r--r--src/mongo/db/exec/projection.cpp5
-rw-r--r--src/mongo/db/exec/projection.h5
-rw-r--r--src/mongo/db/exec/queued_data_stage.cpp3
-rw-r--r--src/mongo/db/exec/queued_data_stage.h2
-rw-r--r--src/mongo/db/exec/queued_data_stage_test.cpp6
-rw-r--r--src/mongo/db/exec/shard_filter.cpp5
-rw-r--r--src/mongo/db/exec/shard_filter.h3
-rw-r--r--src/mongo/db/exec/skip.cpp4
-rw-r--r--src/mongo/db/exec/skip.h2
-rw-r--r--src/mongo/db/exec/sort.cpp7
-rw-r--r--src/mongo/db/exec/sort.h5
-rw-r--r--src/mongo/db/exec/sort_test.cpp8
-rw-r--r--src/mongo/db/exec/stagedebug_cmd.cpp13
-rw-r--r--src/mongo/db/exec/subplan.cpp23
-rw-r--r--src/mongo/db/exec/subplan.h5
-rw-r--r--src/mongo/db/exec/text.cpp5
-rw-r--r--src/mongo/db/exec/text_match.cpp5
-rw-r--r--src/mongo/db/exec/text_match.h3
-rw-r--r--src/mongo/db/exec/text_or.cpp20
-rw-r--r--src/mongo/db/exec/text_or.h3
-rw-r--r--src/mongo/db/exec/update.cpp43
-rw-r--r--src/mongo/db/exec/update.h4
-rw-r--r--src/mongo/db/query/get_executor.cpp30
-rw-r--r--src/mongo/db/query/internal_plans.cpp2
-rw-r--r--src/mongo/db/query/stage_builder.cpp23
-rw-r--r--src/mongo/db/s/migration_source_manager.cpp2
-rw-r--r--src/mongo/dbtests/query_plan_executor.cpp9
-rw-r--r--src/mongo/dbtests/query_stage_and.cpp58
-rw-r--r--src/mongo/dbtests/query_stage_cached_plan.cpp4
-rw-r--r--src/mongo/dbtests/query_stage_delete.cpp17
-rw-r--r--src/mongo/dbtests/query_stage_fetch.cpp8
-rw-r--r--src/mongo/dbtests/query_stage_keep.cpp10
-rw-r--r--src/mongo/dbtests/query_stage_limit_skip.cpp8
-rw-r--r--src/mongo/dbtests/query_stage_merge_sort.cpp14
-rw-r--r--src/mongo/dbtests/query_stage_near.cpp2
-rw-r--r--src/mongo/dbtests/query_stage_sort.cpp22
-rw-r--r--src/mongo/dbtests/query_stage_update.cpp42
86 files changed, 380 insertions, 438 deletions
diff --git a/src/mongo/db/commands/list_collections.cpp b/src/mongo/db/commands/list_collections.cpp
index dd3549d15cc..dd7b2744675 100644
--- a/src/mongo/db/commands/list_collections.cpp
+++ b/src/mongo/db/commands/list_collections.cpp
@@ -45,13 +45,15 @@
#include "mongo/db/query/find_constants.h"
#include "mongo/db/service_context.h"
#include "mongo/db/storage/storage_engine.h"
+#include "mongo/stdx/memory.h"
namespace mongo {
-using std::unique_ptr;
using std::list;
using std::string;
using std::stringstream;
+using std::unique_ptr;
+using stdx::make_unique;
class CmdListCollections : public Command {
public:
@@ -99,7 +101,7 @@ public:
int,
string& errmsg,
BSONObjBuilder& result) {
- std::unique_ptr<MatchExpression> matcher;
+ unique_ptr<MatchExpression> matcher;
BSONElement filterElt = jsobj["filter"];
if (!filterElt.eoo()) {
if (filterElt.type() != mongo::Object) {
@@ -134,8 +136,8 @@ public:
names.sort();
}
- std::unique_ptr<WorkingSet> ws(new WorkingSet());
- std::unique_ptr<QueuedDataStage> root(new QueuedDataStage(ws.get()));
+ auto ws = make_unique<WorkingSet>();
+ auto root = make_unique<QueuedDataStage>(txn, ws.get());
for (std::list<std::string>::const_iterator i = names.begin(); i != names.end(); ++i) {
const std::string& ns = *i;
@@ -175,7 +177,7 @@ public:
if (!statusWithPlanExecutor.isOK()) {
return appendCommandStatus(result, statusWithPlanExecutor.getStatus());
}
- std::unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
+ unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
BSONArrayBuilder firstBatch;
diff --git a/src/mongo/db/commands/list_indexes.cpp b/src/mongo/db/commands/list_indexes.cpp
index 0f46e4e7427..0d4b86a06d8 100644
--- a/src/mongo/db/commands/list_indexes.cpp
+++ b/src/mongo/db/commands/list_indexes.cpp
@@ -45,12 +45,15 @@
#include "mongo/db/query/find_constants.h"
#include "mongo/db/service_context.h"
#include "mongo/db/storage/storage_engine.h"
+#include "mongo/stdx/memory.h"
namespace mongo {
using std::string;
using std::stringstream;
+using std::unique_ptr;
using std::vector;
+using stdx::make_unique;
/**
* Lists the indexes for a given collection.
@@ -143,8 +146,8 @@ public:
}
MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "listIndexes", ns.ns());
- std::unique_ptr<WorkingSet> ws(new WorkingSet());
- std::unique_ptr<QueuedDataStage> root(new QueuedDataStage(ws.get()));
+ auto ws = make_unique<WorkingSet>();
+ auto root = make_unique<QueuedDataStage>(txn, ws.get());
for (size_t i = 0; i < indexNames.size(); i++) {
BSONObj indexSpec;
@@ -173,7 +176,7 @@ public:
if (!statusWithPlanExecutor.isOK()) {
return appendCommandStatus(result, statusWithPlanExecutor.getStatus());
}
- std::unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
+ unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
BSONArrayBuilder firstBatch;
diff --git a/src/mongo/db/commands/pipeline_command.cpp b/src/mongo/db/commands/pipeline_command.cpp
index d559d8c3b3f..520e9dc78d1 100644
--- a/src/mongo/db/commands/pipeline_command.cpp
+++ b/src/mongo/db/commands/pipeline_command.cpp
@@ -51,6 +51,7 @@
#include "mongo/db/query/find_constants.h"
#include "mongo/db/query/get_executor.h"
#include "mongo/db/storage_options.h"
+#include "mongo/stdx/memory.h"
namespace mongo {
@@ -60,6 +61,7 @@ using std::shared_ptr;
using std::string;
using std::stringstream;
using std::unique_ptr;
+using stdx::make_unique;
/**
* Returns true if we need to keep a ClientCursor saved for this pipeline (for future getMore
@@ -230,9 +232,8 @@ public:
// Create the PlanExecutor which returns results from the pipeline. The WorkingSet
// ('ws') and the PipelineProxyStage ('proxy') will be owned by the created
// PlanExecutor.
- unique_ptr<WorkingSet> ws(new WorkingSet());
- unique_ptr<PipelineProxyStage> proxy(
- new PipelineProxyStage(pPipeline, input, ws.get()));
+ auto ws = make_unique<WorkingSet>();
+ auto proxy = make_unique<PipelineProxyStage>(txn, pPipeline, input, ws.get());
auto statusWithPlanExecutor = (NULL == collection)
? PlanExecutor::make(
diff --git a/src/mongo/db/exec/and_hash.cpp b/src/mongo/db/exec/and_hash.cpp
index 1c21b4adf83..3e153316931 100644
--- a/src/mongo/db/exec/and_hash.cpp
+++ b/src/mongo/db/exec/and_hash.cpp
@@ -54,8 +54,8 @@ const size_t AndHashStage::kLookAheadWorks = 10;
// static
const char* AndHashStage::kStageType = "AND_HASH";
-AndHashStage::AndHashStage(WorkingSet* ws, const Collection* collection)
- : PlanStage(kStageType),
+AndHashStage::AndHashStage(OperationContext* opCtx, WorkingSet* ws, const Collection* collection)
+ : PlanStage(kStageType, opCtx),
_collection(collection),
_ws(ws),
_hashingChildren(true),
@@ -63,8 +63,11 @@ AndHashStage::AndHashStage(WorkingSet* ws, const Collection* collection)
_memUsage(0),
_maxMemUsage(kDefaultMaxMemUsageBytes) {}
-AndHashStage::AndHashStage(WorkingSet* ws, const Collection* collection, size_t maxMemUsage)
- : PlanStage(kStageType),
+AndHashStage::AndHashStage(OperationContext* opCtx,
+ WorkingSet* ws,
+ const Collection* collection,
+ size_t maxMemUsage)
+ : PlanStage(kStageType, opCtx),
_collection(collection),
_ws(ws),
_hashingChildren(true),
diff --git a/src/mongo/db/exec/and_hash.h b/src/mongo/db/exec/and_hash.h
index 9f42f93d5e1..8f888fb17ab 100644
--- a/src/mongo/db/exec/and_hash.h
+++ b/src/mongo/db/exec/and_hash.h
@@ -51,12 +51,15 @@ namespace mongo {
*/
class AndHashStage final : public PlanStage {
public:
- AndHashStage(WorkingSet* ws, const Collection* collection);
+ AndHashStage(OperationContext* opCtx, WorkingSet* ws, const Collection* collection);
/**
* For testing only. Allows tests to set memory usage threshold.
*/
- AndHashStage(WorkingSet* ws, const Collection* collection, size_t maxMemUsage);
+ AndHashStage(OperationContext* opCtx,
+ WorkingSet* ws,
+ const Collection* collection,
+ size_t maxMemUsage);
void addChild(PlanStage* child);
diff --git a/src/mongo/db/exec/and_sorted.cpp b/src/mongo/db/exec/and_sorted.cpp
index 873e545a2dd..5844078c762 100644
--- a/src/mongo/db/exec/and_sorted.cpp
+++ b/src/mongo/db/exec/and_sorted.cpp
@@ -44,8 +44,10 @@ using stdx::make_unique;
// static
const char* AndSortedStage::kStageType = "AND_SORTED";
-AndSortedStage::AndSortedStage(WorkingSet* ws, const Collection* collection)
- : PlanStage(kStageType),
+AndSortedStage::AndSortedStage(OperationContext* opCtx,
+ WorkingSet* ws,
+ const Collection* collection)
+ : PlanStage(kStageType, opCtx),
_collection(collection),
_ws(ws),
_targetNode(numeric_limits<size_t>::max()),
diff --git a/src/mongo/db/exec/and_sorted.h b/src/mongo/db/exec/and_sorted.h
index ca6ea3e7410..3ea040b0428 100644
--- a/src/mongo/db/exec/and_sorted.h
+++ b/src/mongo/db/exec/and_sorted.h
@@ -53,7 +53,7 @@ namespace mongo {
*/
class AndSortedStage final : public PlanStage {
public:
- AndSortedStage(WorkingSet* ws, const Collection* collection);
+ AndSortedStage(OperationContext* opCtx, WorkingSet* ws, const Collection* collection);
void addChild(PlanStage* child);
diff --git a/src/mongo/db/exec/cached_plan.cpp b/src/mongo/db/exec/cached_plan.cpp
index dd0b751c326..7dead20013d 100644
--- a/src/mongo/db/exec/cached_plan.cpp
+++ b/src/mongo/db/exec/cached_plan.cpp
@@ -60,8 +60,7 @@ CachedPlanStage::CachedPlanStage(OperationContext* txn,
const QueryPlannerParams& params,
size_t decisionWorks,
PlanStage* root)
- : PlanStage(kStageType),
- _txn(txn),
+ : PlanStage(kStageType, txn),
_collection(collection),
_ws(ws),
_canonicalQuery(cq),
@@ -228,7 +227,7 @@ Status CachedPlanStage::replan(PlanYieldPolicy* yieldPolicy, bool shouldCache) {
PlanStage* newRoot;
// Only one possible plan. Build the stages from the solution.
- verify(StageBuilder::build(_txn, _collection, *solutions[0], _ws, &newRoot));
+ verify(StageBuilder::build(getOpCtx(), _collection, *solutions[0], _ws, &newRoot));
_children.emplace_back(newRoot);
_replannedQs.reset(solutions.popAndReleaseBack());
return Status::OK();
@@ -236,7 +235,8 @@ Status CachedPlanStage::replan(PlanYieldPolicy* yieldPolicy, bool shouldCache) {
// Many solutions. Create a MultiPlanStage to pick the best, update the cache,
// and so on. The working set will be shared by all candidate plans.
- _children.emplace_back(new MultiPlanStage(_txn, _collection, _canonicalQuery, shouldCache));
+ _children.emplace_back(
+ new MultiPlanStage(getOpCtx(), _collection, _canonicalQuery, shouldCache));
MultiPlanStage* multiPlanStage = static_cast<MultiPlanStage*>(child().get());
for (size_t ix = 0; ix < solutions.size(); ++ix) {
@@ -245,7 +245,7 @@ Status CachedPlanStage::replan(PlanYieldPolicy* yieldPolicy, bool shouldCache) {
}
PlanStage* nextPlanRoot;
- verify(StageBuilder::build(_txn, _collection, *solutions[ix], _ws, &nextPlanRoot));
+ verify(StageBuilder::build(getOpCtx(), _collection, *solutions[ix], _ws, &nextPlanRoot));
// Takes ownership of 'solutions[ix]' and 'nextPlanRoot'.
multiPlanStage->addPlan(solutions.releaseAt(ix), nextPlanRoot, _ws);
@@ -291,11 +291,6 @@ PlanStage::StageState CachedPlanStage::work(WorkingSetID* out) {
return childStatus;
}
-
-void CachedPlanStage::doReattachToOperationContext(OperationContext* opCtx) {
- _txn = opCtx;
-}
-
void CachedPlanStage::doInvalidate(OperationContext* txn,
const RecordId& dl,
InvalidationType type) {
diff --git a/src/mongo/db/exec/cached_plan.h b/src/mongo/db/exec/cached_plan.h
index 5216e33eac0..d9f54716acb 100644
--- a/src/mongo/db/exec/cached_plan.h
+++ b/src/mongo/db/exec/cached_plan.h
@@ -65,7 +65,6 @@ public:
StageState work(WorkingSetID* out) final;
- void doReattachToOperationContext(OperationContext* opCtx) final;
void doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) final;
StageType stageType() const final {
@@ -115,9 +114,6 @@ private:
*/
Status tryYield(PlanYieldPolicy* yieldPolicy);
- // Not owned.
- OperationContext* _txn;
-
// Not owned. Must be non-null.
Collection* _collection;
diff --git a/src/mongo/db/exec/collection_scan.cpp b/src/mongo/db/exec/collection_scan.cpp
index ef5fb5f9fee..28ed97662c4 100644
--- a/src/mongo/db/exec/collection_scan.cpp
+++ b/src/mongo/db/exec/collection_scan.cpp
@@ -58,8 +58,7 @@ CollectionScan::CollectionScan(OperationContext* txn,
const CollectionScanParams& params,
WorkingSet* workingSet,
const MatchExpression* filter)
- : PlanStage(kStageType),
- _txn(txn),
+ : PlanStage(kStageType, txn),
_workingSet(workingSet),
_filter(filter),
_params(params),
@@ -98,7 +97,7 @@ PlanStage::StageState CollectionScan::work(WorkingSetID* out) {
try {
if (needToMakeCursor) {
const bool forward = _params.direction == CollectionScanParams::FORWARD;
- _cursor = _params.collection->getCursor(_txn, forward);
+ _cursor = _params.collection->getCursor(getOpCtx(), forward);
if (!_lastSeenId.isNull()) {
invariant(_params.tailable);
@@ -165,7 +164,7 @@ PlanStage::StageState CollectionScan::work(WorkingSetID* out) {
WorkingSetID id = _workingSet->allocate();
WorkingSetMember* member = _workingSet->get(id);
member->loc = record->id;
- member->obj = {_txn->recoveryUnit()->getSnapshotId(), record->data.releaseToBson()};
+ member->obj = {getOpCtx()->recoveryUnit()->getSnapshotId(), record->data.releaseToBson()};
_workingSet->transitionToLocAndObj(id);
return returnIfMatches(member, id, out);
@@ -223,23 +222,21 @@ void CollectionScan::doSaveState() {
void CollectionScan::doRestoreState() {
if (_cursor) {
if (!_cursor->restore()) {
- warning() << "Could not restore RecordCursor for CollectionScan: " << _txn->getNS();
+ warning() << "Could not restore RecordCursor for CollectionScan: "
+ << getOpCtx()->getNS();
_isDead = true;
}
}
}
void CollectionScan::doDetachFromOperationContext() {
- _txn = NULL;
if (_cursor)
_cursor->detachFromOperationContext();
}
-void CollectionScan::doReattachToOperationContext(OperationContext* opCtx) {
- invariant(_txn == NULL);
- _txn = opCtx;
+void CollectionScan::doReattachToOperationContext() {
if (_cursor)
- _cursor->reattachToOperationContext(opCtx);
+ _cursor->reattachToOperationContext(getOpCtx());
}
unique_ptr<PlanStageStats> CollectionScan::getStats() {
diff --git a/src/mongo/db/exec/collection_scan.h b/src/mongo/db/exec/collection_scan.h
index 2cfac584456..c886a6435d6 100644
--- a/src/mongo/db/exec/collection_scan.h
+++ b/src/mongo/db/exec/collection_scan.h
@@ -61,7 +61,7 @@ public:
void doSaveState() final;
void doRestoreState() final;
void doDetachFromOperationContext() final;
- void doReattachToOperationContext(OperationContext* opCtx) final;
+ void doReattachToOperationContext() final;
StageType stageType() const final {
return STAGE_COLLSCAN;
@@ -80,9 +80,6 @@ private:
*/
StageState returnIfMatches(WorkingSetMember* member, WorkingSetID memberID, WorkingSetID* out);
- // transactional context for read locks. Not owned by us
- OperationContext* _txn;
-
// WorkingSet is not owned by us.
WorkingSet* _workingSet;
diff --git a/src/mongo/db/exec/count.cpp b/src/mongo/db/exec/count.cpp
index 40bc4e36c5c..83f7f952fae 100644
--- a/src/mongo/db/exec/count.cpp
+++ b/src/mongo/db/exec/count.cpp
@@ -49,8 +49,7 @@ CountStage::CountStage(OperationContext* txn,
const CountRequest& request,
WorkingSet* ws,
PlanStage* child)
- : PlanStage(kStageType),
- _txn(txn),
+ : PlanStage(kStageType, txn),
_collection(collection),
_request(request),
_leftToSkip(request.getSkip()),
@@ -73,7 +72,7 @@ bool CountStage::isEOF() {
void CountStage::trivialCount() {
invariant(_collection);
- long long nCounted = _collection->numRecords(_txn);
+ long long nCounted = _collection->numRecords(getOpCtx());
if (0 != _request.getSkip()) {
nCounted -= _request.getSkip();
@@ -163,10 +162,6 @@ PlanStage::StageState CountStage::work(WorkingSetID* out) {
return PlanStage::NEED_TIME;
}
-void CountStage::doReattachToOperationContext(OperationContext* opCtx) {
- _txn = opCtx;
-}
-
unique_ptr<PlanStageStats> CountStage::getStats() {
_commonStats.isEOF = isEOF();
unique_ptr<PlanStageStats> ret = make_unique<PlanStageStats>(_commonStats, STAGE_COUNT);
diff --git a/src/mongo/db/exec/count.h b/src/mongo/db/exec/count.h
index b84f0bbe498..63c37face02 100644
--- a/src/mongo/db/exec/count.h
+++ b/src/mongo/db/exec/count.h
@@ -56,8 +56,6 @@ public:
bool isEOF() final;
StageState work(WorkingSetID* out) final;
- void doReattachToOperationContext(OperationContext* opCtx) final;
-
StageType stageType() const final {
return STAGE_COUNT;
}
@@ -75,9 +73,6 @@ private:
*/
void trivialCount();
- // Transactional context for read locks. Not owned by us.
- OperationContext* _txn;
-
// The collection over which we are counting.
Collection* _collection;
diff --git a/src/mongo/db/exec/count_scan.cpp b/src/mongo/db/exec/count_scan.cpp
index 6ba9d710f5d..56b7cf34eee 100644
--- a/src/mongo/db/exec/count_scan.cpp
+++ b/src/mongo/db/exec/count_scan.cpp
@@ -43,8 +43,7 @@ using stdx::make_unique;
const char* CountScan::kStageType = "COUNT_SCAN";
CountScan::CountScan(OperationContext* txn, const CountScanParams& params, WorkingSet* workingSet)
- : PlanStage(kStageType),
- _txn(txn),
+ : PlanStage(kStageType, txn),
_workingSet(workingSet),
_descriptor(params.descriptor),
_iam(params.descriptor->getIndexCatalog()->getIndex(params.descriptor)),
@@ -81,7 +80,7 @@ PlanStage::StageState CountScan::work(WorkingSetID* out) {
if (needInit) {
// First call to work(). Perform cursor init.
- _cursor = _iam->newCursor(_txn);
+ _cursor = _iam->newCursor(getOpCtx());
_cursor->setEndPosition(_params.endKey, _params.endKeyInclusive);
entry = _cursor->seek(_params.startKey, _params.startKeyInclusive, kWantLoc);
@@ -131,20 +130,17 @@ void CountScan::doRestoreState() {
// This can change during yielding.
// TODO this isn't sufficient. See SERVER-17678.
- _shouldDedup = _descriptor->isMultikey(_txn);
+ _shouldDedup = _descriptor->isMultikey(getOpCtx());
}
void CountScan::doDetachFromOperationContext() {
- _txn = NULL;
if (_cursor)
_cursor->detachFromOperationContext();
}
-void CountScan::doReattachToOperationContext(OperationContext* opCtx) {
- invariant(_txn == NULL);
- _txn = opCtx;
+void CountScan::doReattachToOperationContext() {
if (_cursor)
- _cursor->reattachToOperationContext(opCtx);
+ _cursor->reattachToOperationContext(getOpCtx());
}
void CountScan::doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
diff --git a/src/mongo/db/exec/count_scan.h b/src/mongo/db/exec/count_scan.h
index f7f65700134..7bc593d0695 100644
--- a/src/mongo/db/exec/count_scan.h
+++ b/src/mongo/db/exec/count_scan.h
@@ -73,7 +73,7 @@ public:
void doSaveState() final;
void doRestoreState() final;
void doDetachFromOperationContext() final;
- void doReattachToOperationContext(OperationContext* opCtx) final;
+ void doReattachToOperationContext() final;
void doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) final;
StageType stageType() const final {
@@ -87,9 +87,6 @@ public:
static const char* kStageType;
private:
- // transactional context for read locks. Not owned by us
- OperationContext* _txn;
-
// The WorkingSet we annotate with results. Not owned by us.
WorkingSet* _workingSet;
diff --git a/src/mongo/db/exec/delete.cpp b/src/mongo/db/exec/delete.cpp
index 4a9f6638c2d..90b87cce6bc 100644
--- a/src/mongo/db/exec/delete.cpp
+++ b/src/mongo/db/exec/delete.cpp
@@ -58,8 +58,7 @@ DeleteStage::DeleteStage(OperationContext* txn,
WorkingSet* ws,
Collection* collection,
PlanStage* child)
- : PlanStage(kStageType),
- _txn(txn),
+ : PlanStage(kStageType, txn),
_params(params),
_ws(ws),
_collection(collection),
@@ -138,9 +137,9 @@ PlanStage::StageState DeleteStage::work(WorkingSetID* out) {
// If the snapshot changed, then we have to make sure we have the latest copy of the
// doc and that it still matches.
std::unique_ptr<RecordCursor> cursor;
- if (_txn->recoveryUnit()->getSnapshotId() != member->obj.snapshotId()) {
- cursor = _collection->getCursor(_txn);
- if (!WorkingSetCommon::fetch(_txn, _ws, id, cursor)) {
+ if (getOpCtx()->recoveryUnit()->getSnapshotId() != member->obj.snapshotId()) {
+ cursor = _collection->getCursor(getOpCtx());
+ if (!WorkingSetCommon::fetch(getOpCtx(), _ws, id, cursor)) {
// Doc is already deleted. Nothing more to do.
++_commonStats.needTime;
return PlanStage::NEED_TIME;
@@ -179,13 +178,13 @@ PlanStage::StageState DeleteStage::work(WorkingSetID* out) {
// Do the write, unless this is an explain.
if (!_params.isExplain) {
- WriteUnitOfWork wunit(_txn);
+ WriteUnitOfWork wunit(getOpCtx());
const bool deleteCappedOK = false;
const bool deleteNoWarn = false;
BSONObj deletedId;
- _collection->deleteDocument(_txn,
+ _collection->deleteDocument(getOpCtx(),
rloc,
deleteCappedOK,
deleteNoWarn,
@@ -265,10 +264,6 @@ void DeleteStage::doRestoreState() {
repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(ns));
}
-void DeleteStage::doReattachToOperationContext(OperationContext* opCtx) {
- _txn = opCtx;
-}
-
unique_ptr<PlanStageStats> DeleteStage::getStats() {
_commonStats.isEOF = isEOF();
unique_ptr<PlanStageStats> ret = make_unique<PlanStageStats>(_commonStats, STAGE_DELETE);
diff --git a/src/mongo/db/exec/delete.h b/src/mongo/db/exec/delete.h
index 528bad31088..5387e4e8ddf 100644
--- a/src/mongo/db/exec/delete.h
+++ b/src/mongo/db/exec/delete.h
@@ -90,7 +90,6 @@ public:
StageState work(WorkingSetID* out) final;
void doRestoreState() final;
- void doReattachToOperationContext(OperationContext* opCtx) final;
StageType stageType() const final {
return STAGE_DELETE;
@@ -110,9 +109,6 @@ public:
static long long getNumDeleted(const PlanExecutor& exec);
private:
- // Transactional context. Not owned by us.
- OperationContext* _txn;
-
DeleteStageParams _params;
// Not owned by us.
diff --git a/src/mongo/db/exec/distinct_scan.cpp b/src/mongo/db/exec/distinct_scan.cpp
index f663bf99cee..0f00c2669d7 100644
--- a/src/mongo/db/exec/distinct_scan.cpp
+++ b/src/mongo/db/exec/distinct_scan.cpp
@@ -48,8 +48,7 @@ const char* DistinctScan::kStageType = "DISTINCT_SCAN";
DistinctScan::DistinctScan(OperationContext* txn,
const DistinctParams& params,
WorkingSet* workingSet)
- : PlanStage(kStageType),
- _txn(txn),
+ : PlanStage(kStageType, txn),
_workingSet(workingSet),
_descriptor(params.descriptor),
_iam(params.descriptor->getIndexCatalog()->getIndex(params.descriptor)),
@@ -74,7 +73,7 @@ PlanStage::StageState DistinctScan::work(WorkingSetID* out) {
boost::optional<IndexKeyEntry> kv;
try {
if (!_cursor)
- _cursor = _iam->newCursor(_txn, _params.direction == 1);
+ _cursor = _iam->newCursor(getOpCtx(), _params.direction == 1);
kv = _cursor->seek(_seekPoint);
} catch (const WriteConflictException& wce) {
*out = WorkingSet::INVALID_ID;
@@ -140,16 +139,13 @@ void DistinctScan::doRestoreState() {
}
void DistinctScan::doDetachFromOperationContext() {
- _txn = NULL;
if (_cursor)
_cursor->detachFromOperationContext();
}
-void DistinctScan::doReattachToOperationContext(OperationContext* opCtx) {
- invariant(_txn == NULL);
- _txn = opCtx;
+void DistinctScan::doReattachToOperationContext() {
if (_cursor)
- _cursor->reattachToOperationContext(opCtx);
+ _cursor->reattachToOperationContext(getOpCtx());
}
unique_ptr<PlanStageStats> DistinctScan::getStats() {
diff --git a/src/mongo/db/exec/distinct_scan.h b/src/mongo/db/exec/distinct_scan.h
index 6caafeae2ff..d915af433cc 100644
--- a/src/mongo/db/exec/distinct_scan.h
+++ b/src/mongo/db/exec/distinct_scan.h
@@ -81,7 +81,7 @@ public:
void doSaveState() final;
void doRestoreState() final;
void doDetachFromOperationContext() final;
- void doReattachToOperationContext(OperationContext* opCtx) final;
+ void doReattachToOperationContext() final;
StageType stageType() const final {
return STAGE_DISTINCT_SCAN;
@@ -94,9 +94,6 @@ public:
static const char* kStageType;
private:
- // transactional context for read locks. Not owned by us
- OperationContext* _txn;
-
// The WorkingSet we annotate with results. Not owned by us.
WorkingSet* _workingSet;
diff --git a/src/mongo/db/exec/eof.cpp b/src/mongo/db/exec/eof.cpp
index aebb31b0277..f5f127f016a 100644
--- a/src/mongo/db/exec/eof.cpp
+++ b/src/mongo/db/exec/eof.cpp
@@ -42,7 +42,7 @@ using stdx::make_unique;
// static
const char* EOFStage::kStageType = "EOF";
-EOFStage::EOFStage() : PlanStage(kStageType) {}
+EOFStage::EOFStage(OperationContext* opCtx) : PlanStage(kStageType, opCtx) {}
EOFStage::~EOFStage() {}
diff --git a/src/mongo/db/exec/eof.h b/src/mongo/db/exec/eof.h
index 33462ca3aa9..6796e4994e7 100644
--- a/src/mongo/db/exec/eof.h
+++ b/src/mongo/db/exec/eof.h
@@ -38,7 +38,7 @@ namespace mongo {
*/
class EOFStage final : public PlanStage {
public:
- EOFStage();
+ EOFStage(OperationContext* opCtx);
~EOFStage();
diff --git a/src/mongo/db/exec/fetch.cpp b/src/mongo/db/exec/fetch.cpp
index 85c404c17e6..fb76a5f9f81 100644
--- a/src/mongo/db/exec/fetch.cpp
+++ b/src/mongo/db/exec/fetch.cpp
@@ -54,8 +54,7 @@ FetchStage::FetchStage(OperationContext* txn,
PlanStage* child,
const MatchExpression* filter,
const Collection* collection)
- : PlanStage(kStageType),
- _txn(txn),
+ : PlanStage(kStageType, txn),
_collection(collection),
_ws(ws),
_filter(filter),
@@ -109,7 +108,7 @@ PlanStage::StageState FetchStage::work(WorkingSetID* out) {
try {
if (!_cursor)
- _cursor = _collection->getCursor(_txn);
+ _cursor = _collection->getCursor(getOpCtx());
if (auto fetcher = _cursor->fetcherForId(member->loc)) {
// There's something to fetch. Hand the fetcher off to the WSM, and pass up
@@ -123,7 +122,7 @@ PlanStage::StageState FetchStage::work(WorkingSetID* out) {
// The doc is already in memory, so go ahead and grab it. Now we have a RecordId
// as well as an unowned object
- if (!WorkingSetCommon::fetch(_txn, _ws, id, _cursor)) {
+ if (!WorkingSetCommon::fetch(getOpCtx(), _ws, id, _cursor)) {
_ws->free(id);
_commonStats.needTime++;
return NEED_TIME;
@@ -170,16 +169,13 @@ void FetchStage::doRestoreState() {
}
void FetchStage::doDetachFromOperationContext() {
- _txn = NULL;
if (_cursor)
_cursor->detachFromOperationContext();
}
-void FetchStage::doReattachToOperationContext(OperationContext* opCtx) {
- invariant(_txn == NULL);
- _txn = opCtx;
+void FetchStage::doReattachToOperationContext() {
if (_cursor)
- _cursor->reattachToOperationContext(opCtx);
+ _cursor->reattachToOperationContext(getOpCtx());
}
void FetchStage::doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
diff --git a/src/mongo/db/exec/fetch.h b/src/mongo/db/exec/fetch.h
index 4cb2c37bca4..19aa5ac4cf2 100644
--- a/src/mongo/db/exec/fetch.h
+++ b/src/mongo/db/exec/fetch.h
@@ -63,7 +63,7 @@ public:
void doSaveState() final;
void doRestoreState() final;
void doDetachFromOperationContext() final;
- void doReattachToOperationContext(OperationContext* opCtx) final;
+ void doReattachToOperationContext() final;
void doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) final;
StageType stageType() const final {
@@ -83,8 +83,6 @@ private:
*/
StageState returnIfMatches(WorkingSetMember* member, WorkingSetID memberID, WorkingSetID* out);
- OperationContext* _txn;
-
// Collection which is used by this stage. Used to resolve record ids retrieved by child
// stages. The lifetime of the collection must supersede that of the stage.
const Collection* _collection;
diff --git a/src/mongo/db/exec/group.cpp b/src/mongo/db/exec/group.cpp
index 891fc232796..2c0ea523a2f 100644
--- a/src/mongo/db/exec/group.cpp
+++ b/src/mongo/db/exec/group.cpp
@@ -77,8 +77,7 @@ GroupStage::GroupStage(OperationContext* txn,
const GroupRequest& request,
WorkingSet* workingSet,
PlanStage* child)
- : PlanStage(kStageType),
- _txn(txn),
+ : PlanStage(kStageType, txn),
_request(request),
_ws(workingSet),
_specificStats(),
@@ -94,7 +93,8 @@ void GroupStage::initGroupScripting() {
AuthorizationSession::get(ClientBasic::getCurrent())->getAuthenticatedUserNamesToken();
const NamespaceString nss(_request.ns);
- _scope = globalScriptEngine->getPooledScope(_txn, nss.db().toString(), "group" + userToken);
+ _scope =
+ globalScriptEngine->getPooledScope(getOpCtx(), nss.db().toString(), "group" + userToken);
if (!_request.reduceScope.isEmpty()) {
_scope->init(&_request.reduceScope);
}
@@ -257,10 +257,6 @@ bool GroupStage::isEOF() {
return _groupState == GroupState_Done;
}
-void GroupStage::doReattachToOperationContext(OperationContext* opCtx) {
- _txn = opCtx;
-}
-
unique_ptr<PlanStageStats> GroupStage::getStats() {
_commonStats.isEOF = isEOF();
unique_ptr<PlanStageStats> ret = make_unique<PlanStageStats>(_commonStats, STAGE_GROUP);
diff --git a/src/mongo/db/exec/group.h b/src/mongo/db/exec/group.h
index ce969c9335f..97a1a3d587b 100644
--- a/src/mongo/db/exec/group.h
+++ b/src/mongo/db/exec/group.h
@@ -89,7 +89,6 @@ public:
StageState work(WorkingSetID* out) final;
bool isEOF() final;
- void doReattachToOperationContext(OperationContext* opCtx) final;
StageType stageType() const final {
return STAGE_GROUP;
@@ -128,9 +127,6 @@ private:
// array.
BSONObj finalizeResults();
- // Transactional context for read locks. Not owned by us.
- OperationContext* _txn;
-
GroupRequest _request;
// The WorkingSet we annotate with results. Not owned by us.
diff --git a/src/mongo/db/exec/idhack.cpp b/src/mongo/db/exec/idhack.cpp
index a66ad20edda..570f80a19d9 100644
--- a/src/mongo/db/exec/idhack.cpp
+++ b/src/mongo/db/exec/idhack.cpp
@@ -54,8 +54,7 @@ IDHackStage::IDHackStage(OperationContext* txn,
const Collection* collection,
CanonicalQuery* query,
WorkingSet* ws)
- : PlanStage(kStageType),
- _txn(txn),
+ : PlanStage(kStageType, txn),
_collection(collection),
_workingSet(ws),
_key(query->getQueryObj()["_id"].wrap()),
@@ -72,8 +71,7 @@ IDHackStage::IDHackStage(OperationContext* txn,
Collection* collection,
const BSONObj& key,
WorkingSet* ws)
- : PlanStage(kStageType),
- _txn(txn),
+ : PlanStage(kStageType, txn),
_collection(collection),
_workingSet(ws),
_key(key),
@@ -108,7 +106,7 @@ PlanStage::StageState IDHackStage::work(WorkingSetID* out) {
WorkingSetID id = _idBeingPagedIn;
_idBeingPagedIn = WorkingSet::INVALID_ID;
- invariant(WorkingSetCommon::fetchIfUnfetched(_txn, _workingSet, id, _recordCursor));
+ invariant(WorkingSetCommon::fetchIfUnfetched(getOpCtx(), _workingSet, id, _recordCursor));
WorkingSetMember* member = _workingSet->get(id);
return advance(id, member, out);
@@ -120,14 +118,14 @@ PlanStage::StageState IDHackStage::work(WorkingSetID* out) {
const IndexCatalog* catalog = _collection->getIndexCatalog();
// Find the index we use.
- IndexDescriptor* idDesc = catalog->findIdIndex(_txn);
+ IndexDescriptor* idDesc = catalog->findIdIndex(getOpCtx());
if (NULL == idDesc) {
_done = true;
return PlanStage::IS_EOF;
}
// Look up the key by going directly to the index.
- RecordId loc = catalog->getIndex(idDesc)->findSingle(_txn, _key);
+ RecordId loc = catalog->getIndex(idDesc)->findSingle(getOpCtx(), _key);
// Key not found.
if (loc.isNull()) {
@@ -145,7 +143,7 @@ PlanStage::StageState IDHackStage::work(WorkingSetID* out) {
_workingSet->transitionToLocAndIdx(id);
if (!_recordCursor)
- _recordCursor = _collection->getCursor(_txn);
+ _recordCursor = _collection->getCursor(getOpCtx());
// We may need to request a yield while we fetch the document.
if (auto fetcher = _recordCursor->fetcherForId(loc)) {
@@ -159,7 +157,7 @@ PlanStage::StageState IDHackStage::work(WorkingSetID* out) {
}
// The doc was already in memory, so we go ahead and return it.
- if (!WorkingSetCommon::fetch(_txn, _workingSet, id, _recordCursor)) {
+ if (!WorkingSetCommon::fetch(getOpCtx(), _workingSet, id, _recordCursor)) {
// _id is immutable so the index would return the only record that could
// possibly match the query.
_workingSet->free(id);
@@ -210,16 +208,13 @@ void IDHackStage::doRestoreState() {
}
void IDHackStage::doDetachFromOperationContext() {
- _txn = NULL;
if (_recordCursor)
_recordCursor->detachFromOperationContext();
}
-void IDHackStage::doReattachToOperationContext(OperationContext* opCtx) {
- invariant(_txn == NULL);
- _txn = opCtx;
+void IDHackStage::doReattachToOperationContext() {
if (_recordCursor)
- _recordCursor->reattachToOperationContext(opCtx);
+ _recordCursor->reattachToOperationContext(getOpCtx());
}
void IDHackStage::doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
diff --git a/src/mongo/db/exec/idhack.h b/src/mongo/db/exec/idhack.h
index 08a65f084c4..ef3df6f10a2 100644
--- a/src/mongo/db/exec/idhack.h
+++ b/src/mongo/db/exec/idhack.h
@@ -61,7 +61,7 @@ public:
void doSaveState() final;
void doRestoreState() final;
void doDetachFromOperationContext() final;
- void doReattachToOperationContext(OperationContext* opCtx) final;
+ void doReattachToOperationContext() final;
void doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) final;
/**
@@ -87,9 +87,6 @@ private:
*/
StageState advance(WorkingSetID id, WorkingSetMember* member, WorkingSetID* out);
- // transactional context for read locks. Not owned by us
- OperationContext* _txn;
-
// Not owned here.
const Collection* _collection;
diff --git a/src/mongo/db/exec/index_scan.cpp b/src/mongo/db/exec/index_scan.cpp
index 32dfb4c8121..fdf3b037416 100644
--- a/src/mongo/db/exec/index_scan.cpp
+++ b/src/mongo/db/exec/index_scan.cpp
@@ -62,8 +62,7 @@ IndexScan::IndexScan(OperationContext* txn,
const IndexScanParams& params,
WorkingSet* workingSet,
const MatchExpression* filter)
- : PlanStage(kStageType),
- _txn(txn),
+ : PlanStage(kStageType, txn),
_workingSet(workingSet),
_iam(params.descriptor->getIndexCatalog()->getIndex(params.descriptor)),
_keyPattern(params.descriptor->keyPattern().getOwned()),
@@ -77,7 +76,7 @@ IndexScan::IndexScan(OperationContext* txn,
// any info we need for stats reporting out here.
_specificStats.keyPattern = _keyPattern;
_specificStats.indexName = _params.descriptor->indexName();
- _specificStats.isMultiKey = _params.descriptor->isMultikey(_txn);
+ _specificStats.isMultiKey = _params.descriptor->isMultikey(getOpCtx());
_specificStats.isUnique = _params.descriptor->unique();
_specificStats.isSparse = _params.descriptor->isSparse();
_specificStats.isPartial = _params.descriptor->isPartial();
@@ -89,11 +88,11 @@ boost::optional<IndexKeyEntry> IndexScan::initIndexScan() {
_shouldDedup = false;
} else {
// TODO it is incorrect to rely on this not changing. SERVER-17678
- _shouldDedup = _params.descriptor->isMultikey(_txn);
+ _shouldDedup = _params.descriptor->isMultikey(getOpCtx());
}
// Perform the possibly heavy-duty initialization of the underlying index cursor.
- _indexCursor = _iam->newCursor(_txn, _forward);
+ _indexCursor = _iam->newCursor(getOpCtx(), _forward);
if (_params.bounds.isSimpleRange) {
// Start at one key, end at another.
@@ -251,16 +250,13 @@ void IndexScan::doRestoreState() {
}
void IndexScan::doDetachFromOperationContext() {
- _txn = NULL;
if (_indexCursor)
_indexCursor->detachFromOperationContext();
}
-void IndexScan::doReattachToOperationContext(OperationContext* opCtx) {
- invariant(_txn == NULL);
- _txn = opCtx;
+void IndexScan::doReattachToOperationContext() {
if (_indexCursor)
- _indexCursor->reattachToOperationContext(opCtx);
+ _indexCursor->reattachToOperationContext(getOpCtx());
}
void IndexScan::doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
diff --git a/src/mongo/db/exec/index_scan.h b/src/mongo/db/exec/index_scan.h
index aae3d54ec4c..e3e80fc7071 100644
--- a/src/mongo/db/exec/index_scan.h
+++ b/src/mongo/db/exec/index_scan.h
@@ -100,7 +100,7 @@ public:
void doSaveState() final;
void doRestoreState() final;
void doDetachFromOperationContext() final;
- void doReattachToOperationContext(OperationContext* opCtx) final;
+ void doReattachToOperationContext() final;
void doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) final;
StageType stageType() const final {
@@ -119,9 +119,6 @@ private:
*/
boost::optional<IndexKeyEntry> initIndexScan();
- // transactional context for read locks. Not owned by us
- OperationContext* _txn;
-
// The WorkingSet we fill with results. Not owned by us.
WorkingSet* const _workingSet;
diff --git a/src/mongo/db/exec/keep_mutations.cpp b/src/mongo/db/exec/keep_mutations.cpp
index 2770c2b25d3..d43cbb560f8 100644
--- a/src/mongo/db/exec/keep_mutations.cpp
+++ b/src/mongo/db/exec/keep_mutations.cpp
@@ -41,10 +41,11 @@ using stdx::make_unique;
// static
const char* KeepMutationsStage::kStageType = "KEEP_MUTATIONS";
-KeepMutationsStage::KeepMutationsStage(const MatchExpression* filter,
+KeepMutationsStage::KeepMutationsStage(OperationContext* opCtx,
+ const MatchExpression* filter,
WorkingSet* ws,
PlanStage* child)
- : PlanStage(kStageType),
+ : PlanStage(kStageType, opCtx),
_workingSet(ws),
_filter(filter),
_doneReadingChild(false),
diff --git a/src/mongo/db/exec/keep_mutations.h b/src/mongo/db/exec/keep_mutations.h
index 67ff15afca1..878de151549 100644
--- a/src/mongo/db/exec/keep_mutations.h
+++ b/src/mongo/db/exec/keep_mutations.h
@@ -46,7 +46,10 @@ namespace mongo {
*/
class KeepMutationsStage final : public PlanStage {
public:
- KeepMutationsStage(const MatchExpression* filter, WorkingSet* ws, PlanStage* child);
+ KeepMutationsStage(OperationContext* opCtx,
+ const MatchExpression* filter,
+ WorkingSet* ws,
+ PlanStage* child);
~KeepMutationsStage();
bool isEOF() final;
diff --git a/src/mongo/db/exec/limit.cpp b/src/mongo/db/exec/limit.cpp
index a2c1d73b399..c4ea7485c3a 100644
--- a/src/mongo/db/exec/limit.cpp
+++ b/src/mongo/db/exec/limit.cpp
@@ -42,8 +42,8 @@ using stdx::make_unique;
// static
const char* LimitStage::kStageType = "LIMIT";
-LimitStage::LimitStage(long long limit, WorkingSet* ws, PlanStage* child)
- : PlanStage(kStageType), _ws(ws), _numToReturn(limit) {
+LimitStage::LimitStage(OperationContext* opCtx, long long limit, WorkingSet* ws, PlanStage* child)
+ : PlanStage(kStageType, opCtx), _ws(ws), _numToReturn(limit) {
_specificStats.limit = _numToReturn;
_children.emplace_back(child);
}
diff --git a/src/mongo/db/exec/limit.h b/src/mongo/db/exec/limit.h
index de8d1ae2598..f51061d6049 100644
--- a/src/mongo/db/exec/limit.h
+++ b/src/mongo/db/exec/limit.h
@@ -44,7 +44,7 @@ namespace mongo {
*/
class LimitStage final : public PlanStage {
public:
- LimitStage(long long limit, WorkingSet* ws, PlanStage* child);
+ LimitStage(OperationContext* opCtx, long long limit, WorkingSet* ws, PlanStage* child);
~LimitStage();
bool isEOF() final;
diff --git a/src/mongo/db/exec/merge_sort.cpp b/src/mongo/db/exec/merge_sort.cpp
index 5bf7db4b9ea..8faf6cfdeb3 100644
--- a/src/mongo/db/exec/merge_sort.cpp
+++ b/src/mongo/db/exec/merge_sort.cpp
@@ -45,10 +45,11 @@ using stdx::make_unique;
// static
const char* MergeSortStage::kStageType = "SORT_MERGE";
-MergeSortStage::MergeSortStage(const MergeSortStageParams& params,
+MergeSortStage::MergeSortStage(OperationContext* opCtx,
+ const MergeSortStageParams& params,
WorkingSet* ws,
const Collection* collection)
- : PlanStage(kStageType),
+ : PlanStage(kStageType, opCtx),
_collection(collection),
_ws(ws),
_pattern(params.pattern),
diff --git a/src/mongo/db/exec/merge_sort.h b/src/mongo/db/exec/merge_sort.h
index ed9fd2f2a11..f95eb8f9a71 100644
--- a/src/mongo/db/exec/merge_sort.h
+++ b/src/mongo/db/exec/merge_sort.h
@@ -55,7 +55,8 @@ class MergeSortStageParams;
*/
class MergeSortStage final : public PlanStage {
public:
- MergeSortStage(const MergeSortStageParams& params,
+ MergeSortStage(OperationContext* opCtx,
+ const MergeSortStageParams& params,
WorkingSet* ws,
const Collection* collection);
diff --git a/src/mongo/db/exec/multi_iterator.cpp b/src/mongo/db/exec/multi_iterator.cpp
index f08052e06f3..508c58e86dd 100644
--- a/src/mongo/db/exec/multi_iterator.cpp
+++ b/src/mongo/db/exec/multi_iterator.cpp
@@ -46,8 +46,7 @@ const char* MultiIteratorStage::kStageType = "MULTI_ITERATOR";
MultiIteratorStage::MultiIteratorStage(OperationContext* txn,
WorkingSet* ws,
Collection* collection)
- : PlanStage(kStageType),
- _txn(txn),
+ : PlanStage(kStageType, txn),
_collection(collection),
_ws(ws),
_wsidForFetch(_ws->allocate()) {}
@@ -92,7 +91,7 @@ PlanStage::StageState MultiIteratorStage::work(WorkingSetID* out) {
*out = _ws->allocate();
WorkingSetMember* member = _ws->get(*out);
member->loc = record->id;
- member->obj = {_txn->recoveryUnit()->getSnapshotId(), record->data.releaseToBson()};
+ member->obj = {getOpCtx()->recoveryUnit()->getSnapshotId(), record->data.releaseToBson()};
_ws->transitionToLocAndObj(*out);
return PlanStage::ADVANCED;
}
@@ -121,17 +120,14 @@ void MultiIteratorStage::doRestoreState() {
}
void MultiIteratorStage::doDetachFromOperationContext() {
- _txn = NULL;
for (auto&& iterator : _iterators) {
iterator->detachFromOperationContext();
}
}
-void MultiIteratorStage::doReattachToOperationContext(OperationContext* opCtx) {
- invariant(_txn == NULL);
- _txn = opCtx;
+void MultiIteratorStage::doReattachToOperationContext() {
for (auto&& iterator : _iterators) {
- iterator->reattachToOperationContext(opCtx);
+ iterator->reattachToOperationContext(getOpCtx());
}
}
diff --git a/src/mongo/db/exec/multi_iterator.h b/src/mongo/db/exec/multi_iterator.h
index 9d67d29abaa..8c260e2042b 100644
--- a/src/mongo/db/exec/multi_iterator.h
+++ b/src/mongo/db/exec/multi_iterator.h
@@ -60,7 +60,7 @@ public:
void doSaveState() final;
void doRestoreState() final;
void doDetachFromOperationContext() final;
- void doReattachToOperationContext(OperationContext* opCtx) final;
+ void doReattachToOperationContext() final;
void doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) final;
// Returns empty PlanStageStats object
diff --git a/src/mongo/db/exec/multi_plan.cpp b/src/mongo/db/exec/multi_plan.cpp
index 18188ab6b68..1f50464039a 100644
--- a/src/mongo/db/exec/multi_plan.cpp
+++ b/src/mongo/db/exec/multi_plan.cpp
@@ -65,8 +65,7 @@ MultiPlanStage::MultiPlanStage(OperationContext* txn,
const Collection* collection,
CanonicalQuery* cq,
bool shouldCache)
- : PlanStage(kStageType),
- _txn(txn),
+ : PlanStage(kStageType, txn),
_collection(collection),
_shouldCache(shouldCache),
_query(cq),
@@ -219,7 +218,7 @@ Status MultiPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
// make sense.
ScopedTimer timer(&_commonStats.executionTimeMillis);
- size_t numWorks = getTrialPeriodWorks(_txn, _collection);
+ size_t numWorks = getTrialPeriodWorks(getOpCtx(), _collection);
size_t numResults = getTrialPeriodNumToReturn(*_query);
// Work the plans, stopping when a plan hits EOF or returns some
@@ -388,10 +387,6 @@ bool MultiPlanStage::workAllPlans(size_t numResults, PlanYieldPolicy* yieldPolic
return !doneWorking;
}
-void MultiPlanStage::doReattachToOperationContext(OperationContext* opCtx) {
- _txn = opCtx;
-}
-
namespace {
void invalidateHelper(OperationContext* txn,
diff --git a/src/mongo/db/exec/multi_plan.h b/src/mongo/db/exec/multi_plan.h
index 85cc3191d13..f0c13916a7d 100644
--- a/src/mongo/db/exec/multi_plan.h
+++ b/src/mongo/db/exec/multi_plan.h
@@ -66,8 +66,6 @@ public:
StageState work(WorkingSetID* out) final;
- void doReattachToOperationContext(OperationContext* opCtx) final;
-
void doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) final;
StageType stageType() const final {
@@ -167,9 +165,6 @@ private:
static const int kNoSuchPlan = -1;
- // Not owned here.
- OperationContext* _txn;
-
// Not owned here. Must be non-null.
const Collection* _collection;
diff --git a/src/mongo/db/exec/near.cpp b/src/mongo/db/exec/near.cpp
index 5cf3018c8a7..1568339cbfe 100644
--- a/src/mongo/db/exec/near.cpp
+++ b/src/mongo/db/exec/near.cpp
@@ -46,8 +46,7 @@ NearStage::NearStage(OperationContext* txn,
StageType type,
WorkingSet* workingSet,
Collection* collection)
- : PlanStage(typeName),
- _txn(txn),
+ : PlanStage(typeName, txn),
_workingSet(workingSet),
_collection(collection),
_searchState(SearchState_Initializing),
@@ -70,7 +69,7 @@ NearStage::CoveredInterval::CoveredInterval(PlanStage* covering,
PlanStage::StageState NearStage::initNext(WorkingSetID* out) {
- PlanStage::StageState state = initialize(_txn, _workingSet, _collection, out);
+ PlanStage::StageState state = initialize(getOpCtx(), _workingSet, _collection, out);
if (state == PlanStage::IS_EOF) {
_searchState = SearchState_Buffering;
return PlanStage::NEED_TIME;
@@ -150,7 +149,8 @@ PlanStage::StageState NearStage::bufferNext(WorkingSetID* toReturn, Status* erro
//
if (!_nextInterval) {
- StatusWith<CoveredInterval*> intervalStatus = nextInterval(_txn, _workingSet, _collection);
+ StatusWith<CoveredInterval*> intervalStatus =
+ nextInterval(getOpCtx(), _workingSet, _collection);
if (!intervalStatus.isOK()) {
_searchState = SearchState_Finished;
*error = intervalStatus.getStatus();
@@ -302,10 +302,6 @@ bool NearStage::isEOF() {
return SearchState_Finished == _searchState;
}
-void NearStage::doReattachToOperationContext(OperationContext* opCtx) {
- _txn = opCtx;
-}
-
void NearStage::doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
// If a result is in _resultBuffer and has a RecordId it will be in _seenDocuments as
// well. It's safe to return the result w/o the RecordId, so just fetch the result.
diff --git a/src/mongo/db/exec/near.h b/src/mongo/db/exec/near.h
index 1a10d9143da..aee013cadc6 100644
--- a/src/mongo/db/exec/near.h
+++ b/src/mongo/db/exec/near.h
@@ -92,7 +92,6 @@ public:
bool isEOF() final;
StageState work(WorkingSetID* out) final;
- void doReattachToOperationContext(OperationContext* opCtx) final;
void doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) final;
StageType stageType() const final;
@@ -161,8 +160,6 @@ private:
//
// Not owned here
- OperationContext* _txn;
- // Not owned here
WorkingSet* const _workingSet;
// Not owned here, used for fetching buffered results before invalidation
Collection* const _collection;
diff --git a/src/mongo/db/exec/oplogstart.cpp b/src/mongo/db/exec/oplogstart.cpp
index f43272caf47..523889df14a 100644
--- a/src/mongo/db/exec/oplogstart.cpp
+++ b/src/mongo/db/exec/oplogstart.cpp
@@ -47,8 +47,7 @@ OplogStart::OplogStart(OperationContext* txn,
const Collection* collection,
MatchExpression* filter,
WorkingSet* ws)
- : PlanStage(kStageType),
- _txn(txn),
+ : PlanStage(kStageType, txn),
_needInit(true),
_backwardsScanning(false),
_extentHopping(false),
@@ -63,7 +62,7 @@ PlanStage::StageState OplogStart::work(WorkingSetID* out) {
CollectionScanParams params;
params.collection = _collection;
params.direction = CollectionScanParams::BACKWARD;
- _children.emplace_back(new CollectionScan(_txn, params, _workingSet, NULL));
+ _children.emplace_back(new CollectionScan(getOpCtx(), params, _workingSet, NULL));
_needInit = false;
_backwardsScanning = true;
@@ -108,7 +107,7 @@ PlanStage::StageState OplogStart::workExtentHopping(WorkingSetID* out) {
WorkingSetID id = _workingSet->allocate();
WorkingSetMember* member = _workingSet->get(id);
member->loc = record->id;
- member->obj = {_txn->recoveryUnit()->getSnapshotId(), std::move(obj)};
+ member->obj = {getOpCtx()->recoveryUnit()->getSnapshotId(), std::move(obj)};
_workingSet->transitionToLocAndObj(id);
*out = id;
return PlanStage::ADVANCED;
@@ -125,7 +124,7 @@ PlanStage::StageState OplogStart::workExtentHopping(WorkingSetID* out) {
void OplogStart::switchToExtentHopping() {
// Set up our extent hopping state.
- _subIterators = _collection->getManyCursors(_txn);
+ _subIterators = _collection->getManyCursors(getOpCtx());
// Transition from backwards scanning to extent hopping.
_backwardsScanning = false;
@@ -197,17 +196,14 @@ void OplogStart::doRestoreState() {
}
void OplogStart::doDetachFromOperationContext() {
- _txn = NULL;
for (auto&& iterator : _subIterators) {
iterator->detachFromOperationContext();
}
}
-void OplogStart::doReattachToOperationContext(OperationContext* opCtx) {
- invariant(_txn == NULL);
- _txn = opCtx;
+void OplogStart::doReattachToOperationContext() {
for (auto&& iterator : _subIterators) {
- iterator->reattachToOperationContext(opCtx);
+ iterator->reattachToOperationContext(getOpCtx());
}
}
diff --git a/src/mongo/db/exec/oplogstart.h b/src/mongo/db/exec/oplogstart.h
index 033f51a8c9d..7d9c10e5df8 100644
--- a/src/mongo/db/exec/oplogstart.h
+++ b/src/mongo/db/exec/oplogstart.h
@@ -73,7 +73,7 @@ public:
void doSaveState() final;
void doRestoreState() final;
void doDetachFromOperationContext() final;
- void doReattachToOperationContext(OperationContext* opCtx) final;
+ void doReattachToOperationContext() final;
// Returns empty PlanStageStats object
std::unique_ptr<PlanStageStats> getStats() final;
@@ -110,9 +110,6 @@ private:
StageState workExtentHopping(WorkingSetID* out);
- // transactional context for read locks. Not owned by us
- OperationContext* _txn;
-
// This is only used for the extent hopping scan.
std::vector<std::unique_ptr<RecordCursor>> _subIterators;
diff --git a/src/mongo/db/exec/or.cpp b/src/mongo/db/exec/or.cpp
index f1ce36bdc5b..0bc195ca236 100644
--- a/src/mongo/db/exec/or.cpp
+++ b/src/mongo/db/exec/or.cpp
@@ -43,8 +43,8 @@ using stdx::make_unique;
// static
const char* OrStage::kStageType = "OR";
-OrStage::OrStage(WorkingSet* ws, bool dedup, const MatchExpression* filter)
- : PlanStage(kStageType), _ws(ws), _filter(filter), _currentChild(0), _dedup(dedup) {}
+OrStage::OrStage(OperationContext* opCtx, WorkingSet* ws, bool dedup, const MatchExpression* filter)
+ : PlanStage(kStageType, opCtx), _ws(ws), _filter(filter), _currentChild(0), _dedup(dedup) {}
void OrStage::addChild(PlanStage* child) {
_children.emplace_back(child);
diff --git a/src/mongo/db/exec/or.h b/src/mongo/db/exec/or.h
index d0fd1a33209..ebe9d7bc10c 100644
--- a/src/mongo/db/exec/or.h
+++ b/src/mongo/db/exec/or.h
@@ -45,7 +45,7 @@ namespace mongo {
*/
class OrStage final : public PlanStage {
public:
- OrStage(WorkingSet* ws, bool dedup, const MatchExpression* filter);
+ OrStage(OperationContext* opCtx, WorkingSet* ws, bool dedup, const MatchExpression* filter);
void addChild(PlanStage* child);
diff --git a/src/mongo/db/exec/pipeline_proxy.cpp b/src/mongo/db/exec/pipeline_proxy.cpp
index 36e2eff10b6..e45318bf3ce 100644
--- a/src/mongo/db/exec/pipeline_proxy.cpp
+++ b/src/mongo/db/exec/pipeline_proxy.cpp
@@ -45,10 +45,11 @@ using stdx::make_unique;
const char* PipelineProxyStage::kStageType = "PIPELINE_PROXY";
-PipelineProxyStage::PipelineProxyStage(intrusive_ptr<Pipeline> pipeline,
+PipelineProxyStage::PipelineProxyStage(OperationContext* opCtx,
+ intrusive_ptr<Pipeline> pipeline,
const std::shared_ptr<PlanExecutor>& child,
WorkingSet* ws)
- : PlanStage(kStageType),
+ : PlanStage(kStageType, opCtx),
_pipeline(pipeline),
_includeMetaData(_pipeline->getContext()->inShard), // send metadata to merger
_childExec(child),
@@ -107,11 +108,11 @@ void PipelineProxyStage::doDetachFromOperationContext() {
}
}
-void PipelineProxyStage::doReattachToOperationContext(OperationContext* opCtx) {
+void PipelineProxyStage::doReattachToOperationContext() {
invariant(_pipeline->getContext()->opCtx == NULL);
- _pipeline->getContext()->opCtx = opCtx;
+ _pipeline->getContext()->opCtx = getOpCtx();
if (auto child = getChildExecutor()) {
- child->reattachToOperationContext(opCtx);
+ child->reattachToOperationContext(getOpCtx());
}
}
diff --git a/src/mongo/db/exec/pipeline_proxy.h b/src/mongo/db/exec/pipeline_proxy.h
index a67d993d4a3..6c85f117470 100644
--- a/src/mongo/db/exec/pipeline_proxy.h
+++ b/src/mongo/db/exec/pipeline_proxy.h
@@ -44,7 +44,8 @@ namespace mongo {
*/
class PipelineProxyStage final : public PlanStage {
public:
- PipelineProxyStage(boost::intrusive_ptr<Pipeline> pipeline,
+ PipelineProxyStage(OperationContext* opCtx,
+ boost::intrusive_ptr<Pipeline> pipeline,
const std::shared_ptr<PlanExecutor>& child,
WorkingSet* ws);
@@ -58,7 +59,7 @@ public:
// Manage our OperationContext.
//
void doDetachFromOperationContext() final;
- void doReattachToOperationContext(OperationContext* opCtx) final;
+ void doReattachToOperationContext() final;
/**
* Make obj the next object returned by getNext().
diff --git a/src/mongo/db/exec/plan_stage.cpp b/src/mongo/db/exec/plan_stage.cpp
index 13303e94acd..95fe9b0ed95 100644
--- a/src/mongo/db/exec/plan_stage.cpp
+++ b/src/mongo/db/exec/plan_stage.cpp
@@ -62,6 +62,9 @@ void PlanStage::invalidate(OperationContext* txn, const RecordId& dl, Invalidati
}
void PlanStage::detachFromOperationContext() {
+ invariant(_opCtx);
+ _opCtx = nullptr;
+
for (auto&& child : _children) {
child->detachFromOperationContext();
}
@@ -70,11 +73,14 @@ void PlanStage::detachFromOperationContext() {
}
void PlanStage::reattachToOperationContext(OperationContext* opCtx) {
+ invariant(_opCtx == nullptr);
+ _opCtx = opCtx;
+
for (auto&& child : _children) {
child->reattachToOperationContext(opCtx);
}
- doReattachToOperationContext(opCtx);
+ doReattachToOperationContext();
}
} // namespace mongo
diff --git a/src/mongo/db/exec/plan_stage.h b/src/mongo/db/exec/plan_stage.h
index cd552144def..6cef4247683 100644
--- a/src/mongo/db/exec/plan_stage.h
+++ b/src/mongo/db/exec/plan_stage.h
@@ -104,7 +104,8 @@ class OperationContext;
*/
class PlanStage {
public:
- PlanStage(const char* typeName) : _commonStats(typeName) {}
+ PlanStage(const char* typeName, OperationContext* opCtx)
+ : _commonStats(typeName), _opCtx(opCtx) {}
virtual ~PlanStage() {}
@@ -323,17 +324,18 @@ protected:
/**
* Does stage-specific detaching.
+ *
+ * Implementations of this method cannot use the pointer returned from getOpCtx().
*/
virtual void doDetachFromOperationContext() {}
/**
* Does stage-specific attaching.
*
- * If the stage needs an OperationContext during its execution, it may keep a handle to the
- * provided OperationContext (which is valid until the next call to
- * doDetachFromOperationContext()).
+ * If an OperationContext* is needed, use getOpCtx(), which will return a valid
+ * OperationContext* (the one to which the stage is reattaching).
*/
- virtual void doReattachToOperationContext(OperationContext* opCtx) {}
+ virtual void doReattachToOperationContext() {}
/**
* Does the stage-specific invalidation work.
@@ -350,8 +352,15 @@ protected:
return _children.front();
}
+ OperationContext* getOpCtx() const {
+ return _opCtx;
+ }
+
Children _children;
CommonStats _commonStats;
+
+private:
+ OperationContext* _opCtx;
};
} // namespace mongo
diff --git a/src/mongo/db/exec/projection.cpp b/src/mongo/db/exec/projection.cpp
index a5698a266ab..b03fd8f3175 100644
--- a/src/mongo/db/exec/projection.cpp
+++ b/src/mongo/db/exec/projection.cpp
@@ -52,10 +52,11 @@ static const char* kIdField = "_id";
// static
const char* ProjectionStage::kStageType = "PROJECTION";
-ProjectionStage::ProjectionStage(const ProjectionStageParams& params,
+ProjectionStage::ProjectionStage(OperationContext* opCtx,
+ const ProjectionStageParams& params,
WorkingSet* ws,
PlanStage* child)
- : PlanStage(kStageType), _ws(ws), _projImpl(params.projImpl) {
+ : PlanStage(kStageType, opCtx), _ws(ws), _projImpl(params.projImpl) {
_children.emplace_back(child);
_projObj = params.projObj;
diff --git a/src/mongo/db/exec/projection.h b/src/mongo/db/exec/projection.h
index 58588b3c1dc..710a2374822 100644
--- a/src/mongo/db/exec/projection.h
+++ b/src/mongo/db/exec/projection.h
@@ -75,7 +75,10 @@ struct ProjectionStageParams {
*/
class ProjectionStage final : public PlanStage {
public:
- ProjectionStage(const ProjectionStageParams& params, WorkingSet* ws, PlanStage* child);
+ ProjectionStage(OperationContext* opCtx,
+ const ProjectionStageParams& params,
+ WorkingSet* ws,
+ PlanStage* child);
bool isEOF() final;
StageState work(WorkingSetID* out) final;
diff --git a/src/mongo/db/exec/queued_data_stage.cpp b/src/mongo/db/exec/queued_data_stage.cpp
index 36edcab1893..9ea015f9c62 100644
--- a/src/mongo/db/exec/queued_data_stage.cpp
+++ b/src/mongo/db/exec/queued_data_stage.cpp
@@ -40,7 +40,8 @@ using stdx::make_unique;
const char* QueuedDataStage::kStageType = "QUEUED_DATA";
-QueuedDataStage::QueuedDataStage(WorkingSet* ws) : PlanStage(kStageType), _ws(ws) {}
+QueuedDataStage::QueuedDataStage(OperationContext* opCtx, WorkingSet* ws)
+ : PlanStage(kStageType, opCtx), _ws(ws) {}
PlanStage::StageState QueuedDataStage::work(WorkingSetID* out) {
++_commonStats.works;
diff --git a/src/mongo/db/exec/queued_data_stage.h b/src/mongo/db/exec/queued_data_stage.h
index a3e57f6101f..7e44bc7f3d5 100644
--- a/src/mongo/db/exec/queued_data_stage.h
+++ b/src/mongo/db/exec/queued_data_stage.h
@@ -47,7 +47,7 @@ class RecordId;
*/
class QueuedDataStage final : public PlanStage {
public:
- QueuedDataStage(WorkingSet* ws);
+ QueuedDataStage(OperationContext* opCtx, WorkingSet* ws);
StageState work(WorkingSetID* out) final;
diff --git a/src/mongo/db/exec/queued_data_stage_test.cpp b/src/mongo/db/exec/queued_data_stage_test.cpp
index c3aa0e88d63..e26d303a833 100644
--- a/src/mongo/db/exec/queued_data_stage_test.cpp
+++ b/src/mongo/db/exec/queued_data_stage_test.cpp
@@ -32,6 +32,7 @@
#include "mongo/db/exec/queued_data_stage.h"
#include "mongo/db/exec/working_set.h"
+#include "mongo/stdx/memory.h"
#include "mongo/unittest/unittest.h"
using namespace mongo;
@@ -39,13 +40,14 @@ using namespace mongo;
namespace {
using std::unique_ptr;
+using stdx::make_unique;
//
// Basic test that we get out valid stats objects.
//
TEST(QueuedDataStageTest, getValidStats) {
WorkingSet ws;
- unique_ptr<QueuedDataStage> mock(new QueuedDataStage(&ws));
+ auto mock = make_unique<QueuedDataStage>(nullptr, &ws);
const CommonStats* commonStats = mock->getCommonStats();
ASSERT_EQUALS(commonStats->works, static_cast<size_t>(0));
const SpecificStats* specificStats = mock->getSpecificStats();
@@ -60,7 +62,7 @@ TEST(QueuedDataStageTest, getValidStats) {
TEST(QueuedDataStageTest, validateStats) {
WorkingSet ws;
WorkingSetID wsID;
- unique_ptr<QueuedDataStage> mock(new QueuedDataStage(&ws));
+ auto mock = make_unique<QueuedDataStage>(nullptr, &ws);
// make sure that we're at all zero
const CommonStats* stats = mock->getCommonStats();
diff --git a/src/mongo/db/exec/shard_filter.cpp b/src/mongo/db/exec/shard_filter.cpp
index 1339e13bd3e..e62b1bc8549 100644
--- a/src/mongo/db/exec/shard_filter.cpp
+++ b/src/mongo/db/exec/shard_filter.cpp
@@ -50,10 +50,11 @@ using stdx::make_unique;
// static
const char* ShardFilterStage::kStageType = "SHARDING_FILTER";
-ShardFilterStage::ShardFilterStage(const shared_ptr<CollectionMetadata>& metadata,
+ShardFilterStage::ShardFilterStage(OperationContext* opCtx,
+ const shared_ptr<CollectionMetadata>& metadata,
WorkingSet* ws,
PlanStage* child)
- : PlanStage(kStageType), _ws(ws), _metadata(metadata) {
+ : PlanStage(kStageType, opCtx), _ws(ws), _metadata(metadata) {
_children.emplace_back(child);
}
diff --git a/src/mongo/db/exec/shard_filter.h b/src/mongo/db/exec/shard_filter.h
index 393cdbe8975..88a86c3577a 100644
--- a/src/mongo/db/exec/shard_filter.h
+++ b/src/mongo/db/exec/shard_filter.h
@@ -71,7 +71,8 @@ class CollectionMetadata;
*/
class ShardFilterStage final : public PlanStage {
public:
- ShardFilterStage(const std::shared_ptr<CollectionMetadata>& metadata,
+ ShardFilterStage(OperationContext* opCtx,
+ const std::shared_ptr<CollectionMetadata>& metadata,
WorkingSet* ws,
PlanStage* child);
~ShardFilterStage();
diff --git a/src/mongo/db/exec/skip.cpp b/src/mongo/db/exec/skip.cpp
index 140c4a1bdb9..15f829ed8bd 100644
--- a/src/mongo/db/exec/skip.cpp
+++ b/src/mongo/db/exec/skip.cpp
@@ -41,8 +41,8 @@ using stdx::make_unique;
// static
const char* SkipStage::kStageType = "SKIP";
-SkipStage::SkipStage(long long toSkip, WorkingSet* ws, PlanStage* child)
- : PlanStage(kStageType), _ws(ws), _toSkip(toSkip) {
+SkipStage::SkipStage(OperationContext* opCtx, long long toSkip, WorkingSet* ws, PlanStage* child)
+ : PlanStage(kStageType, opCtx), _ws(ws), _toSkip(toSkip) {
_children.emplace_back(child);
}
diff --git a/src/mongo/db/exec/skip.h b/src/mongo/db/exec/skip.h
index 45e01bbd120..e97a75b9b64 100644
--- a/src/mongo/db/exec/skip.h
+++ b/src/mongo/db/exec/skip.h
@@ -43,7 +43,7 @@ namespace mongo {
*/
class SkipStage final : public PlanStage {
public:
- SkipStage(long long toSkip, WorkingSet* ws, PlanStage* child);
+ SkipStage(OperationContext* opCtx, long long toSkip, WorkingSet* ws, PlanStage* child);
~SkipStage();
bool isEOF() final;
diff --git a/src/mongo/db/exec/sort.cpp b/src/mongo/db/exec/sort.cpp
index 18bb6b2faa0..b61fa7e7982 100644
--- a/src/mongo/db/exec/sort.cpp
+++ b/src/mongo/db/exec/sort.cpp
@@ -278,8 +278,11 @@ bool SortStage::WorkingSetComparator::operator()(const SortableDataItem& lhs,
return lhs.loc < rhs.loc;
}
-SortStage::SortStage(const SortStageParams& params, WorkingSet* ws, PlanStage* child)
- : PlanStage(kStageType),
+SortStage::SortStage(OperationContext* opCtx,
+ const SortStageParams& params,
+ WorkingSet* ws,
+ PlanStage* child)
+ : PlanStage(kStageType, opCtx),
_collection(params.collection),
_ws(ws),
_pattern(params.pattern),
diff --git a/src/mongo/db/exec/sort.h b/src/mongo/db/exec/sort.h
index f0a29b84efc..2d9f4e6c331 100644
--- a/src/mongo/db/exec/sort.h
+++ b/src/mongo/db/exec/sort.h
@@ -141,7 +141,10 @@ private:
*/
class SortStage final : public PlanStage {
public:
- SortStage(const SortStageParams& params, WorkingSet* ws, PlanStage* child);
+ SortStage(OperationContext* opCtx,
+ const SortStageParams& params,
+ WorkingSet* ws,
+ PlanStage* child);
~SortStage();
bool isEOF() final;
diff --git a/src/mongo/db/exec/sort_test.cpp b/src/mongo/db/exec/sort_test.cpp
index a60146ef1f9..a998304028d 100644
--- a/src/mongo/db/exec/sort_test.cpp
+++ b/src/mongo/db/exec/sort_test.cpp
@@ -45,9 +45,9 @@ TEST(SortStageTest, SortEmptyWorkingSet) {
WorkingSet ws;
// QueuedDataStage will be owned by SortStage.
- QueuedDataStage* ms = new QueuedDataStage(&ws);
+ QueuedDataStage* ms = new QueuedDataStage(nullptr, &ws);
SortStageParams params;
- SortStage sort(params, &ws, ms);
+ SortStage sort(nullptr, params, &ws, ms);
// Check initial EOF state.
ASSERT_TRUE(ms->isEOF());
@@ -87,7 +87,7 @@ void testWork(const char* patternStr,
WorkingSet ws;
// QueuedDataStage will be owned by SortStage.
- QueuedDataStage* ms = new QueuedDataStage(&ws);
+ QueuedDataStage* ms = new QueuedDataStage(nullptr, &ws);
BSONObj inputObj = fromjson(inputStr);
BSONElement inputElt = inputObj.getField("input");
ASSERT(inputElt.isABSONObj());
@@ -112,7 +112,7 @@ void testWork(const char* patternStr,
params.query = fromjson(queryStr);
params.limit = limit;
- SortStage sort(params, &ws, ms);
+ SortStage sort(nullptr, params, &ws, ms);
WorkingSetID id = WorkingSet::INVALID_ID;
PlanStage::StageState state = PlanStage::NEED_TIME;
diff --git a/src/mongo/db/exec/stagedebug_cmd.cpp b/src/mongo/db/exec/stagedebug_cmd.cpp
index 38bffc6dcb0..765b851d89b 100644
--- a/src/mongo/db/exec/stagedebug_cmd.cpp
+++ b/src/mongo/db/exec/stagedebug_cmd.cpp
@@ -273,7 +273,7 @@ public:
uassert(
16921, "Nodes argument must be provided to AND", nodeArgs["nodes"].isABSONObj());
- unique_ptr<AndHashStage> andStage(new AndHashStage(workingSet, collection));
+ auto andStage = make_unique<AndHashStage>(txn, workingSet, collection);
int nodesAdded = 0;
BSONObjIterator it(nodeArgs["nodes"].Obj());
@@ -296,7 +296,7 @@ public:
uassert(
16924, "Nodes argument must be provided to AND", nodeArgs["nodes"].isABSONObj());
- unique_ptr<AndSortedStage> andStage(new AndSortedStage(workingSet, collection));
+ auto andStage = make_unique<AndSortedStage>(txn, workingSet, collection);
int nodesAdded = 0;
BSONObjIterator it(nodeArgs["nodes"].Obj());
@@ -320,7 +320,7 @@ public:
16934, "Nodes argument must be provided to AND", nodeArgs["nodes"].isABSONObj());
uassert(16935, "Dedup argument must be provided to OR", !nodeArgs["dedup"].eoo());
BSONObjIterator it(nodeArgs["nodes"].Obj());
- unique_ptr<OrStage> orStage(new OrStage(workingSet, nodeArgs["dedup"].Bool(), matcher));
+ auto orStage = make_unique<OrStage>(txn, workingSet, nodeArgs["dedup"].Bool(), matcher);
while (it.more()) {
BSONElement e = it.next();
if (!e.isABSONObj()) {
@@ -354,7 +354,7 @@ public:
uassert(28732,
"Can't parse sub-node of LIMIT: " + nodeArgs["node"].Obj().toString(),
NULL != subNode);
- return new LimitStage(nodeArgs["num"].numberInt(), workingSet, subNode);
+ return new LimitStage(txn, nodeArgs["num"].numberInt(), workingSet, subNode);
} else if ("skip" == nodeName) {
uassert(
16938, "Skip stage doesn't have a filter (put it on the child)", NULL == matcher);
@@ -365,7 +365,7 @@ public:
uassert(28733,
"Can't parse sub-node of SKIP: " + nodeArgs["node"].Obj().toString(),
NULL != subNode);
- return new SkipStage(nodeArgs["num"].numberInt(), workingSet, subNode);
+ return new SkipStage(txn, nodeArgs["num"].numberInt(), workingSet, subNode);
} else if ("cscan" == nodeName) {
CollectionScanParams params;
params.collection = collection;
@@ -406,8 +406,7 @@ public:
params.pattern = nodeArgs["pattern"].Obj();
// Dedup is true by default.
- unique_ptr<MergeSortStage> mergeStage(
- new MergeSortStage(params, workingSet, collection));
+ auto mergeStage = make_unique<MergeSortStage>(txn, params, workingSet, collection);
BSONObjIterator it(nodeArgs["nodes"].Obj());
while (it.more()) {
diff --git a/src/mongo/db/exec/subplan.cpp b/src/mongo/db/exec/subplan.cpp
index 7c6e5584d13..ccdc040bc38 100644
--- a/src/mongo/db/exec/subplan.cpp
+++ b/src/mongo/db/exec/subplan.cpp
@@ -59,8 +59,7 @@ SubplanStage::SubplanStage(OperationContext* txn,
WorkingSet* ws,
const QueryPlannerParams& params,
CanonicalQuery* cq)
- : PlanStage(kStageType),
- _txn(txn),
+ : PlanStage(kStageType, txn),
_collection(collection),
_ws(ws),
_plannerParams(params),
@@ -186,7 +185,7 @@ Status SubplanStage::planSubqueries() {
LOG(5) << "Subplanner: index " << i << " is " << ie.toString();
}
- const WhereCallbackReal whereCallback(_txn, _collection->ns().db());
+ const WhereCallbackReal whereCallback(getOpCtx(), _collection->ns().db());
for (size_t i = 0; i < _orExpression->numChildren(); ++i) {
// We need a place to shove the results from planning this branch.
@@ -323,13 +322,14 @@ Status SubplanStage::choosePlanForSubqueries(PlanYieldPolicy* yieldPolicy) {
_ws->clear();
- MultiPlanStage multiPlanStage(_txn, _collection, branchResult->canonicalQuery.get());
+ MultiPlanStage multiPlanStage(
+ getOpCtx(), _collection, branchResult->canonicalQuery.get());
// Dump all the solutions into the MPS.
for (size_t ix = 0; ix < branchResult->solutions.size(); ++ix) {
PlanStage* nextPlanRoot;
invariant(StageBuilder::build(
- _txn, _collection, *branchResult->solutions[ix], _ws, &nextPlanRoot));
+ getOpCtx(), _collection, *branchResult->solutions[ix], _ws, &nextPlanRoot));
// Takes ownership of solution with index 'ix' and 'nextPlanRoot'.
multiPlanStage.addPlan(branchResult->solutions.releaseAt(ix), nextPlanRoot, _ws);
@@ -408,7 +408,7 @@ Status SubplanStage::choosePlanForSubqueries(PlanYieldPolicy* yieldPolicy) {
// and set that solution as our child stage.
_ws->clear();
PlanStage* root;
- invariant(StageBuilder::build(_txn, _collection, *_compositeSolution.get(), _ws, &root));
+ invariant(StageBuilder::build(getOpCtx(), _collection, *_compositeSolution.get(), _ws, &root));
invariant(_children.empty());
_children.emplace_back(root);
@@ -441,7 +441,7 @@ Status SubplanStage::choosePlanWholeQuery(PlanYieldPolicy* yieldPolicy) {
if (1 == solutions.size()) {
PlanStage* root;
// Only one possible plan. Run it. Build the stages from the solution.
- verify(StageBuilder::build(_txn, _collection, *solutions[0], _ws, &root));
+ verify(StageBuilder::build(getOpCtx(), _collection, *solutions[0], _ws, &root));
invariant(_children.empty());
_children.emplace_back(root);
@@ -453,7 +453,7 @@ Status SubplanStage::choosePlanWholeQuery(PlanYieldPolicy* yieldPolicy) {
// Many solutions. Create a MultiPlanStage to pick the best, update the cache,
// and so on. The working set will be shared by all candidate plans.
invariant(_children.empty());
- _children.emplace_back(new MultiPlanStage(_txn, _collection, _query));
+ _children.emplace_back(new MultiPlanStage(getOpCtx(), _collection, _query));
MultiPlanStage* multiPlanStage = static_cast<MultiPlanStage*>(child().get());
for (size_t ix = 0; ix < solutions.size(); ++ix) {
@@ -463,7 +463,8 @@ Status SubplanStage::choosePlanWholeQuery(PlanYieldPolicy* yieldPolicy) {
// version of StageBuild::build when WorkingSet is shared
PlanStage* nextPlanRoot;
- verify(StageBuilder::build(_txn, _collection, *solutions[ix], _ws, &nextPlanRoot));
+ verify(
+ StageBuilder::build(getOpCtx(), _collection, *solutions[ix], _ws, &nextPlanRoot));
// Takes ownership of 'solutions[ix]' and 'nextPlanRoot'.
multiPlanStage->addPlan(solutions.releaseAt(ix), nextPlanRoot, _ws);
@@ -530,10 +531,6 @@ PlanStage::StageState SubplanStage::work(WorkingSetID* out) {
return state;
}
-void SubplanStage::doReattachToOperationContext(OperationContext* opCtx) {
- _txn = opCtx;
-}
-
unique_ptr<PlanStageStats> SubplanStage::getStats() {
_commonStats.isEOF = isEOF();
unique_ptr<PlanStageStats> ret = make_unique<PlanStageStats>(_commonStats, STAGE_SUBPLAN);
diff --git a/src/mongo/db/exec/subplan.h b/src/mongo/db/exec/subplan.h
index 47b54eea46f..bbb401d337a 100644
--- a/src/mongo/db/exec/subplan.h
+++ b/src/mongo/db/exec/subplan.h
@@ -77,8 +77,6 @@ public:
bool isEOF() final;
StageState work(WorkingSetID* out) final;
- void doReattachToOperationContext(OperationContext* opCtx) final;
-
StageType stageType() const final {
return STAGE_SUBPLAN;
}
@@ -174,9 +172,6 @@ private:
*/
Status choosePlanWholeQuery(PlanYieldPolicy* yieldPolicy);
- // transactional context for read locks. Not owned by us
- OperationContext* _txn;
-
// Not owned here. Must be non-null.
Collection* _collection;
diff --git a/src/mongo/db/exec/text.cpp b/src/mongo/db/exec/text.cpp
index b7cc0789286..2988fa1a305 100644
--- a/src/mongo/db/exec/text.cpp
+++ b/src/mongo/db/exec/text.cpp
@@ -60,7 +60,7 @@ TextStage::TextStage(OperationContext* txn,
const TextStageParams& params,
WorkingSet* ws,
const MatchExpression* filter)
- : PlanStage(kStageType), _params(params) {
+ : PlanStage(kStageType, txn), _params(params) {
_children.emplace_back(buildTextTree(txn, ws, filter));
_specificStats.indexPrefix = _params.indexPrefix;
_specificStats.indexName = _params.index->indexName();
@@ -138,7 +138,8 @@ unique_ptr<PlanStage> TextStage::buildTextTree(OperationContext* txn,
auto fetcher = make_unique<FetchStage>(
txn, ws, textScorer.release(), nullptr, _params.index->getCollection());
- auto matcher = make_unique<TextMatchStage>(std::move(fetcher), _params.query, _params.spec, ws);
+ auto matcher =
+ make_unique<TextMatchStage>(txn, std::move(fetcher), _params.query, _params.spec, ws);
unique_ptr<PlanStage> treeRoot = std::move(matcher);
return treeRoot;
diff --git a/src/mongo/db/exec/text_match.cpp b/src/mongo/db/exec/text_match.cpp
index bf92b38a57b..6e5ffe951e6 100644
--- a/src/mongo/db/exec/text_match.cpp
+++ b/src/mongo/db/exec/text_match.cpp
@@ -45,11 +45,12 @@ using stdx::make_unique;
const char* TextMatchStage::kStageType = "TEXT_MATCH";
-TextMatchStage::TextMatchStage(unique_ptr<PlanStage> child,
+TextMatchStage::TextMatchStage(OperationContext* opCtx,
+ unique_ptr<PlanStage> child,
const FTSQuery& query,
const FTSSpec& spec,
WorkingSet* ws)
- : PlanStage(kStageType), _ftsMatcher(query, spec), _ws(ws) {
+ : PlanStage(kStageType, opCtx), _ftsMatcher(query, spec), _ws(ws) {
_children.emplace_back(std::move(child));
}
diff --git a/src/mongo/db/exec/text_match.h b/src/mongo/db/exec/text_match.h
index df3e3b9ae68..7b707efda5b 100644
--- a/src/mongo/db/exec/text_match.h
+++ b/src/mongo/db/exec/text_match.h
@@ -57,7 +57,8 @@ class RecordID;
*/
class TextMatchStage final : public PlanStage {
public:
- TextMatchStage(unique_ptr<PlanStage> child,
+ TextMatchStage(OperationContext* opCtx,
+ unique_ptr<PlanStage> child,
const FTSQuery& query,
const FTSSpec& spec,
WorkingSet* ws);
diff --git a/src/mongo/db/exec/text_or.cpp b/src/mongo/db/exec/text_or.cpp
index bbdbabdd4d0..bf45ba45ec6 100644
--- a/src/mongo/db/exec/text_or.cpp
+++ b/src/mongo/db/exec/text_or.cpp
@@ -59,12 +59,11 @@ TextOrStage::TextOrStage(OperationContext* txn,
WorkingSet* ws,
const MatchExpression* filter,
IndexDescriptor* index)
- : PlanStage(kStageType),
+ : PlanStage(kStageType, txn),
_ftsSpec(ftsSpec),
_ws(ws),
_scoreIterator(_scores.end()),
_filter(filter),
- _txn(txn),
_idRetrying(WorkingSet::INVALID_ID),
_index(index) {}
@@ -91,16 +90,13 @@ void TextOrStage::doRestoreState() {
}
void TextOrStage::doDetachFromOperationContext() {
- _txn = NULL;
if (_recordCursor)
_recordCursor->detachFromOperationContext();
}
-void TextOrStage::doReattachToOperationContext(OperationContext* opCtx) {
- invariant(_txn == NULL);
- _txn = opCtx;
+void TextOrStage::doReattachToOperationContext() {
if (_recordCursor)
- _recordCursor->reattachToOperationContext(opCtx);
+ _recordCursor->reattachToOperationContext(getOpCtx());
}
void TextOrStage::doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
@@ -186,7 +182,7 @@ PlanStage::StageState TextOrStage::work(WorkingSetID* out) {
PlanStage::StageState TextOrStage::initStage(WorkingSetID* out) {
*out = WorkingSet::INVALID_ID;
try {
- _recordCursor = _index->getCollection()->getCursor(_txn);
+ _recordCursor = _index->getCollection()->getCursor(getOpCtx());
_internalState = State::kReadingTerms;
return PlanStage::NEED_TIME;
} catch (const WriteConflictException& wce) {
@@ -369,8 +365,12 @@ PlanStage::StageState TextOrStage::addTerm(WorkingSetID wsid, WorkingSetID* out)
bool shouldKeep;
bool wasDeleted = false;
try {
- TextMatchableDocument tdoc(
- _txn, newKeyData.indexKeyPattern, newKeyData.keyData, _ws, wsid, _recordCursor);
+ TextMatchableDocument tdoc(getOpCtx(),
+ newKeyData.indexKeyPattern,
+ newKeyData.keyData,
+ _ws,
+ wsid,
+ _recordCursor);
shouldKeep = _filter->matches(&tdoc);
} catch (const WriteConflictException& wce) {
_idRetrying = wsid;
diff --git a/src/mongo/db/exec/text_or.h b/src/mongo/db/exec/text_or.h
index 5d9df26f439..0b86ad145b1 100644
--- a/src/mongo/db/exec/text_or.h
+++ b/src/mongo/db/exec/text_or.h
@@ -89,7 +89,7 @@ public:
void doSaveState() final;
void doRestoreState() final;
void doDetachFromOperationContext() final;
- void doReattachToOperationContext(OperationContext* opCtx) final;
+ void doReattachToOperationContext() final;
void doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) final;
StageType stageType() const final {
@@ -157,7 +157,6 @@ private:
// Members needed only for using the TextMatchableDocument.
const MatchExpression* _filter;
- OperationContext* _txn;
WorkingSetID _idRetrying;
std::unique_ptr<RecordCursor> _recordCursor;
IndexDescriptor* _index;
diff --git a/src/mongo/db/exec/update.cpp b/src/mongo/db/exec/update.cpp
index a5147ed6e3b..05c44190b5e 100644
--- a/src/mongo/db/exec/update.cpp
+++ b/src/mongo/db/exec/update.cpp
@@ -415,8 +415,7 @@ UpdateStage::UpdateStage(OperationContext* txn,
WorkingSet* ws,
Collection* collection,
PlanStage* child)
- : PlanStage(kStageType),
- _txn(txn),
+ : PlanStage(kStageType, txn),
_params(params),
_ws(ws),
_collection(collection),
@@ -506,7 +505,7 @@ BSONObj UpdateStage::transformAndUpdate(const Snapshotted<BSONObj>& oldObj, Reco
if (docWasModified) {
// Verify that no immutable fields were changed and data is valid for storage.
- if (!(!_txn->writesAreReplicated() || request->isFromMigration())) {
+ if (!(!getOpCtx()->writesAreReplicated() || request->isFromMigration())) {
const std::vector<FieldRef*>* immutableFields = NULL;
if (lifecycle)
immutableFields = lifecycle->getImmutableFields();
@@ -516,7 +515,7 @@ BSONObj UpdateStage::transformAndUpdate(const Snapshotted<BSONObj>& oldObj, Reco
}
// Prepare to write back the modified document
- WriteUnitOfWork wunit(_txn);
+ WriteUnitOfWork wunit(getOpCtx());
RecordId newLoc;
@@ -532,7 +531,7 @@ BSONObj UpdateStage::transformAndUpdate(const Snapshotted<BSONObj>& oldObj, Reco
args.criteria = idQuery;
args.fromMigrate = request->isFromMigration();
_collection->updateDocumentWithDamages(
- _txn,
+ getOpCtx(),
loc,
Snapshotted<RecordData>(oldObj.snapshotId(), oldRec),
source,
@@ -559,7 +558,7 @@ BSONObj UpdateStage::transformAndUpdate(const Snapshotted<BSONObj>& oldObj, Reco
args.update = logObj;
args.criteria = idQuery;
args.fromMigrate = request->isFromMigration();
- StatusWith<RecordId> res = _collection->updateDocument(_txn,
+ StatusWith<RecordId> res = _collection->updateDocument(getOpCtx(),
loc,
oldObj,
newObj,
@@ -572,7 +571,7 @@ BSONObj UpdateStage::transformAndUpdate(const Snapshotted<BSONObj>& oldObj, Reco
}
}
- invariant(oldObj.snapshotId() == _txn->recoveryUnit()->getSnapshotId());
+ invariant(oldObj.snapshotId() == getOpCtx()->recoveryUnit()->getSnapshotId());
wunit.commit();
// If the document moved, we might see it again in a collection scan (maybe it's
@@ -676,7 +675,7 @@ void UpdateStage::doInsert() {
_specificStats.inserted = true;
const UpdateRequest* request = _params.request;
- bool isInternalRequest = !_txn->writesAreReplicated() || request->isFromMigration();
+ bool isInternalRequest = !getOpCtx()->writesAreReplicated() || request->isFromMigration();
// Reset the document we will be writing to.
_doc.reset();
@@ -698,17 +697,17 @@ void UpdateStage::doInsert() {
return;
}
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- WriteUnitOfWork wunit(_txn);
+ WriteUnitOfWork wunit(getOpCtx());
invariant(_collection);
const bool enforceQuota = !request->isGod();
- uassertStatusOK(
- _collection->insertDocument(_txn, newObj, enforceQuota, request->isFromMigration()));
+ uassertStatusOK(_collection->insertDocument(
+ getOpCtx(), newObj, enforceQuota, request->isFromMigration()));
// Technically, we should save/restore state here, but since we are going to return
// immediately after, it would just be wasted work.
wunit.commit();
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(_txn, "upsert", _collection->ns().ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(getOpCtx(), "upsert", _collection->ns().ns());
}
bool UpdateStage::doneUpdating() {
@@ -757,8 +756,8 @@ PlanStage::StageState UpdateStage::work(WorkingSetID* out) {
BSONObj newObj = _specificStats.objInserted;
*out = _ws->allocate();
WorkingSetMember* member = _ws->get(*out);
- member->obj =
- Snapshotted<BSONObj>(_txn->recoveryUnit()->getSnapshotId(), newObj.getOwned());
+ member->obj = Snapshotted<BSONObj>(getOpCtx()->recoveryUnit()->getSnapshotId(),
+ newObj.getOwned());
member->transitionToOwnedObj();
++_commonStats.advanced;
return PlanStage::ADVANCED;
@@ -835,10 +834,10 @@ PlanStage::StageState UpdateStage::work(WorkingSetID* out) {
try {
std::unique_ptr<RecordCursor> cursor;
- if (_txn->recoveryUnit()->getSnapshotId() != member->obj.snapshotId()) {
- cursor = _collection->getCursor(_txn);
+ if (getOpCtx()->recoveryUnit()->getSnapshotId() != member->obj.snapshotId()) {
+ cursor = _collection->getCursor(getOpCtx());
// our snapshot has changed, refetch
- if (!WorkingSetCommon::fetch(_txn, _ws, id, cursor)) {
+ if (!WorkingSetCommon::fetch(getOpCtx(), _ws, id, cursor)) {
// document was deleted, we're done here
++_commonStats.needTime;
return PlanStage::NEED_TIME;
@@ -877,7 +876,7 @@ PlanStage::StageState UpdateStage::work(WorkingSetID* out) {
// Set member's obj to be the doc we want to return.
if (_params.request->shouldReturnAnyDocs()) {
if (_params.request->shouldReturnNewDocs()) {
- member->obj = Snapshotted<BSONObj>(_txn->recoveryUnit()->getSnapshotId(),
+ member->obj = Snapshotted<BSONObj>(getOpCtx()->recoveryUnit()->getSnapshotId(),
newObj.getOwned());
} else {
invariant(_params.request->shouldReturnOldDocs());
@@ -963,7 +962,7 @@ Status UpdateStage::restoreUpdateState() {
const NamespaceString& nsString(request.getNamespaceString());
// We may have stepped down during the yield.
- bool userInitiatedWritesAndNotPrimary = _txn->writesAreReplicated() &&
+ bool userInitiatedWritesAndNotPrimary = getOpCtx()->writesAreReplicated() &&
!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(nsString);
if (userInitiatedWritesAndNotPrimary) {
@@ -982,7 +981,7 @@ Status UpdateStage::restoreUpdateState() {
17270);
}
- _params.driver->refreshIndexKeys(lifecycle->getIndexKeys(_txn));
+ _params.driver->refreshIndexKeys(lifecycle->getIndexKeys(getOpCtx()));
}
return Status::OK();
@@ -992,10 +991,6 @@ void UpdateStage::doRestoreState() {
uassertStatusOK(restoreUpdateState());
}
-void UpdateStage::doReattachToOperationContext(OperationContext* opCtx) {
- _txn = opCtx;
-}
-
unique_ptr<PlanStageStats> UpdateStage::getStats() {
_commonStats.isEOF = isEOF();
unique_ptr<PlanStageStats> ret = make_unique<PlanStageStats>(_commonStats, STAGE_UPDATE);
diff --git a/src/mongo/db/exec/update.h b/src/mongo/db/exec/update.h
index 87859c9ff5f..a6427ee7f20 100644
--- a/src/mongo/db/exec/update.h
+++ b/src/mongo/db/exec/update.h
@@ -85,7 +85,6 @@ public:
StageState work(WorkingSetID* out) final;
void doRestoreState() final;
- void doReattachToOperationContext(OperationContext* opCtx) final;
StageType stageType() const final {
return STAGE_UPDATE;
@@ -166,9 +165,6 @@ private:
*/
Status restoreUpdateState();
- // Transactional context. Not owned by us.
- OperationContext* _txn;
-
UpdateStageParams _params;
// Not owned by us.
diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp
index 1f7f7859e5f..7b23c1c10e5 100644
--- a/src/mongo/db/query/get_executor.cpp
+++ b/src/mongo/db/query/get_executor.cpp
@@ -228,7 +228,7 @@ Status prepareExecution(OperationContext* opCtx,
const string& ns = canonicalQuery->ns();
LOG(2) << "Collection " << ns << " does not exist."
<< " Using EOF plan: " << canonicalQuery->toStringShort();
- *rootOut = new EOFStage();
+ *rootOut = new EOFStage(opCtx);
return Status::OK();
}
@@ -246,7 +246,8 @@ Status prepareExecution(OperationContext* opCtx,
// Might have to filter out orphaned docs.
if (plannerParams.options & QueryPlannerParams::INCLUDE_SHARD_FILTER) {
- *rootOut = new ShardFilterStage(ShardingState::get(getGlobalServiceContext())
+ *rootOut = new ShardFilterStage(opCtx,
+ ShardingState::get(getGlobalServiceContext())
->getCollectionMetadata(collection->ns().ns()),
ws,
*rootOut);
@@ -268,7 +269,7 @@ Status prepareExecution(OperationContext* opCtx,
params.projImpl = ProjectionStageParams::SIMPLE_DOC;
}
- *rootOut = new ProjectionStage(params, ws, *rootOut);
+ *rootOut = new ProjectionStage(opCtx, params, ws, *rootOut);
}
return Status::OK();
@@ -446,8 +447,8 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutor(OperationContext* txn,
if (!collection) {
LOG(2) << "Collection " << ns << " does not exist."
<< " Using EOF stage: " << unparsedQuery.toString();
- unique_ptr<EOFStage> eofStage = make_unique<EOFStage>();
- unique_ptr<WorkingSet> ws = make_unique<WorkingSet>();
+ auto eofStage = make_unique<EOFStage>(txn);
+ auto ws = make_unique<WorkingSet>();
return PlanExecutor::make(txn, std::move(ws), std::move(eofStage), ns, yieldPolicy);
}
@@ -473,7 +474,8 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutor(OperationContext* txn,
// Might have to filter out orphaned docs.
if (plannerOptions & QueryPlannerParams::INCLUDE_SHARD_FILTER) {
- root = make_unique<ShardFilterStage>(ShardingState::get(getGlobalServiceContext())
+ root = make_unique<ShardFilterStage>(txn,
+ ShardingState::get(getGlobalServiceContext())
->getCollectionMetadata(collection->ns().ns()),
ws.get(),
root.release());
@@ -648,7 +650,7 @@ StatusWith<unique_ptr<PlanStage>> applyProjection(OperationContext* txn,
ProjectionStageParams params(WhereCallbackReal(txn, nsString.db()));
params.projObj = proj;
params.fullExpression = cq->root();
- return {make_unique<ProjectionStage>(params, ws, root.release())};
+ return {make_unique<ProjectionStage>(txn, params, ws, root.release())};
}
} // namespace
@@ -708,8 +710,8 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorDelete(OperationContext* txn,
// a DeleteStage, so in this case we put a DeleteStage on top of an EOFStage.
LOG(2) << "Collection " << nss.ns() << " does not exist."
<< " Using EOF stage: " << unparsedQuery.toString();
- unique_ptr<DeleteStage> deleteStage =
- make_unique<DeleteStage>(txn, deleteStageParams, ws.get(), nullptr, new EOFStage());
+ auto deleteStage = make_unique<DeleteStage>(
+ txn, deleteStageParams, ws.get(), nullptr, new EOFStage(txn));
return PlanExecutor::make(txn, std::move(ws), std::move(deleteStage), nss.ns(), policy);
}
@@ -850,8 +852,8 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorUpdate(OperationContext* txn,
// an UpdateStage, so in this case we put an UpdateStage on top of an EOFStage.
LOG(2) << "Collection " << nsString.ns() << " does not exist."
<< " Using EOF stage: " << unparsedQuery.toString();
- unique_ptr<UpdateStage> updateStage = make_unique<UpdateStage>(
- txn, updateStageParams, ws.get(), collection, new EOFStage());
+ auto updateStage = make_unique<UpdateStage>(
+ txn, updateStageParams, ws.get(), collection, new EOFStage(txn));
return PlanExecutor::make(
txn, std::move(ws), std::move(updateStage), nsString.ns(), policy);
}
@@ -943,7 +945,7 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorGroup(OperationContext* txn,
// reporting machinery always assumes that the root stage for a group operation is a
// GroupStage, so in this case we put a GroupStage on top of an EOFStage.
unique_ptr<PlanStage> root =
- make_unique<GroupStage>(txn, request, ws.get(), new EOFStage());
+ make_unique<GroupStage>(txn, request, ws.get(), new EOFStage(txn));
return PlanExecutor::make(txn, std::move(ws), std::move(root), request.ns, yieldPolicy);
}
@@ -1210,7 +1212,7 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorCount(OperationContext* txn,
// reporting machinery always assumes that the root stage for a count operation is
// a CountStage, so in this case we put a CountStage on top of an EOFStage.
unique_ptr<PlanStage> root =
- make_unique<CountStage>(txn, collection, request, ws.get(), new EOFStage());
+ make_unique<CountStage>(txn, collection, request, ws.get(), new EOFStage(txn));
return PlanExecutor::make(
txn, std::move(ws), std::move(root), request.getNs().ns(), yieldPolicy);
}
@@ -1304,7 +1306,7 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorDistinct(OperationContext* txn,
if (!collection) {
// Treat collections that do not exist as empty collections.
return PlanExecutor::make(
- txn, make_unique<WorkingSet>(), make_unique<EOFStage>(), ns, yieldPolicy);
+ txn, make_unique<WorkingSet>(), make_unique<EOFStage>(txn), ns, yieldPolicy);
}
// TODO: check for idhack here?
diff --git a/src/mongo/db/query/internal_plans.cpp b/src/mongo/db/query/internal_plans.cpp
index f85b8723b3b..07f0a39c927 100644
--- a/src/mongo/db/query/internal_plans.cpp
+++ b/src/mongo/db/query/internal_plans.cpp
@@ -50,7 +50,7 @@ std::unique_ptr<PlanExecutor> InternalPlanner::collectionScan(OperationContext*
std::unique_ptr<WorkingSet> ws = stdx::make_unique<WorkingSet>();
if (NULL == collection) {
- std::unique_ptr<EOFStage> eof = stdx::make_unique<EOFStage>();
+ auto eof = stdx::make_unique<EOFStage>(txn);
// Takes ownership of 'ws' and 'eof'.
auto statusWithPlanExecutor = PlanExecutor::make(
txn, std::move(ws), std::move(eof), ns.toString(), PlanExecutor::YIELD_MANUAL);
diff --git a/src/mongo/db/query/stage_builder.cpp b/src/mongo/db/query/stage_builder.cpp
index c0c29efa91a..ba4e5fe6f99 100644
--- a/src/mongo/db/query/stage_builder.cpp
+++ b/src/mongo/db/query/stage_builder.cpp
@@ -54,11 +54,13 @@
#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/database.h"
#include "mongo/db/s/sharding_state.h"
+#include "mongo/stdx/memory.h"
#include "mongo/util/log.h"
namespace mongo {
using std::unique_ptr;
+using stdx::make_unique;
PlanStage* buildStages(OperationContext* txn,
Collection* collection,
@@ -115,7 +117,7 @@ PlanStage* buildStages(OperationContext* txn,
params.pattern = sn->pattern;
params.query = sn->query;
params.limit = sn->limit;
- return new SortStage(params, ws, childStage);
+ return new SortStage(txn, params, ws, childStage);
} else if (STAGE_PROJECTION == root->getType()) {
const ProjectionNode* pn = static_cast<const ProjectionNode*>(root);
PlanStage* childStage = buildStages(txn, collection, qsol, pn->children[0], ws);
@@ -139,24 +141,24 @@ PlanStage* buildStages(OperationContext* txn,
params.projImpl = ProjectionStageParams::SIMPLE_DOC;
}
- return new ProjectionStage(params, ws, childStage);
+ return new ProjectionStage(txn, params, ws, childStage);
} else if (STAGE_LIMIT == root->getType()) {
const LimitNode* ln = static_cast<const LimitNode*>(root);
PlanStage* childStage = buildStages(txn, collection, qsol, ln->children[0], ws);
if (NULL == childStage) {
return NULL;
}
- return new LimitStage(ln->limit, ws, childStage);
+ return new LimitStage(txn, ln->limit, ws, childStage);
} else if (STAGE_SKIP == root->getType()) {
const SkipNode* sn = static_cast<const SkipNode*>(root);
PlanStage* childStage = buildStages(txn, collection, qsol, sn->children[0], ws);
if (NULL == childStage) {
return NULL;
}
- return new SkipStage(sn->skip, ws, childStage);
+ return new SkipStage(txn, sn->skip, ws, childStage);
} else if (STAGE_AND_HASH == root->getType()) {
const AndHashNode* ahn = static_cast<const AndHashNode*>(root);
- unique_ptr<AndHashStage> ret(new AndHashStage(ws, collection));
+ auto ret = make_unique<AndHashStage>(txn, ws, collection);
for (size_t i = 0; i < ahn->children.size(); ++i) {
PlanStage* childStage = buildStages(txn, collection, qsol, ahn->children[i], ws);
if (NULL == childStage) {
@@ -167,7 +169,7 @@ PlanStage* buildStages(OperationContext* txn,
return ret.release();
} else if (STAGE_OR == root->getType()) {
const OrNode* orn = static_cast<const OrNode*>(root);
- unique_ptr<OrStage> ret(new OrStage(ws, orn->dedup, orn->filter.get()));
+ auto ret = make_unique<OrStage>(txn, ws, orn->dedup, orn->filter.get());
for (size_t i = 0; i < orn->children.size(); ++i) {
PlanStage* childStage = buildStages(txn, collection, qsol, orn->children[i], ws);
if (NULL == childStage) {
@@ -178,7 +180,7 @@ PlanStage* buildStages(OperationContext* txn,
return ret.release();
} else if (STAGE_AND_SORTED == root->getType()) {
const AndSortedNode* asn = static_cast<const AndSortedNode*>(root);
- unique_ptr<AndSortedStage> ret(new AndSortedStage(ws, collection));
+ auto ret = make_unique<AndSortedStage>(txn, ws, collection);
for (size_t i = 0; i < asn->children.size(); ++i) {
PlanStage* childStage = buildStages(txn, collection, qsol, asn->children[i], ws);
if (NULL == childStage) {
@@ -192,7 +194,7 @@ PlanStage* buildStages(OperationContext* txn,
MergeSortStageParams params;
params.dedup = msn->dedup;
params.pattern = msn->sort;
- unique_ptr<MergeSortStage> ret(new MergeSortStage(params, ws, collection));
+ auto ret = make_unique<MergeSortStage>(txn, params, ws, collection);
for (size_t i = 0; i < msn->children.size(); ++i) {
PlanStage* childStage = buildStages(txn, collection, qsol, msn->children[i], ws);
if (NULL == childStage) {
@@ -283,7 +285,8 @@ PlanStage* buildStages(OperationContext* txn,
if (NULL == childStage) {
return NULL;
}
- return new ShardFilterStage(ShardingState::get(getGlobalServiceContext())
+ return new ShardFilterStage(txn,
+ ShardingState::get(getGlobalServiceContext())
->getCollectionMetadata(collection->ns().ns()),
ws,
childStage);
@@ -293,7 +296,7 @@ PlanStage* buildStages(OperationContext* txn,
if (NULL == childStage) {
return NULL;
}
- return new KeepMutationsStage(km->filter.get(), ws, childStage);
+ return new KeepMutationsStage(txn, km->filter.get(), ws, childStage);
} else if (STAGE_DISTINCT_SCAN == root->getType()) {
const DistinctNode* dn = static_cast<const DistinctNode*>(root);
diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp
index ef7250393fe..87e7ed4e0c9 100644
--- a/src/mongo/db/s/migration_source_manager.cpp
+++ b/src/mongo/db/s/migration_source_manager.cpp
@@ -69,7 +69,7 @@ Tee* migrateLog = RamLog::get("migrate");
class DeleteNotificationStage final : public PlanStage {
public:
DeleteNotificationStage(MigrationSourceManager* migrationSourceManager)
- : PlanStage("NOTIFY_DELETE"), _migrationSourceManager(migrationSourceManager) {}
+ : PlanStage("NOTIFY_DELETE", nullptr), _migrationSourceManager(migrationSourceManager) {}
void doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) override {
if (type == INVALIDATION_DELETION) {
diff --git a/src/mongo/dbtests/query_plan_executor.cpp b/src/mongo/dbtests/query_plan_executor.cpp
index 6ceb45ef81c..2b45aaf7c0b 100644
--- a/src/mongo/dbtests/query_plan_executor.cpp
+++ b/src/mongo/dbtests/query_plan_executor.cpp
@@ -46,12 +46,14 @@
#include "mongo/db/query/plan_executor.h"
#include "mongo/db/query/query_solution.h"
#include "mongo/dbtests/dbtests.h"
+#include "mongo/stdx/memory.h"
namespace QueryPlanExecutor {
using std::shared_ptr;
using std::string;
using std::unique_ptr;
+using stdx::make_unique;
static const NamespaceString nss("unittests.QueryPlanExecutor");
@@ -277,15 +279,14 @@ public:
ASSERT_EQUALS(errmsg, "");
// Create the output PlanExecutor that pulls results from the pipeline.
- std::unique_ptr<WorkingSet> ws(new WorkingSet());
- std::unique_ptr<PipelineProxyStage> proxy(
- new PipelineProxyStage(pipeline, innerExec, ws.get()));
+ auto ws = make_unique<WorkingSet>();
+ auto proxy = make_unique<PipelineProxyStage>(&_txn, pipeline, innerExec, ws.get());
Collection* collection = ctx.getCollection();
auto statusWithPlanExecutor = PlanExecutor::make(
&_txn, std::move(ws), std::move(proxy), collection, PlanExecutor::YIELD_MANUAL);
ASSERT_OK(statusWithPlanExecutor.getStatus());
- std::unique_ptr<PlanExecutor> outerExec = std::move(statusWithPlanExecutor.getValue());
+ unique_ptr<PlanExecutor> outerExec = std::move(statusWithPlanExecutor.getValue());
// Only the outer executor gets registered.
registerExec(outerExec.get());
diff --git a/src/mongo/dbtests/query_stage_and.cpp b/src/mongo/dbtests/query_stage_and.cpp
index 22fcdc5ad70..60d914c9dd9 100644
--- a/src/mongo/dbtests/query_stage_and.cpp
+++ b/src/mongo/dbtests/query_stage_and.cpp
@@ -55,6 +55,7 @@ namespace QueryStageAnd {
using std::set;
using std::shared_ptr;
using std::unique_ptr;
+using stdx::make_unique;
class QueryStageAndBase {
public:
@@ -180,7 +181,7 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- unique_ptr<AndHashStage> ah(new AndHashStage(&ws, coll));
+ auto ah = make_unique<AndHashStage>(&_txn, &ws, coll);
// Foo <= 20
IndexScanParams params;
@@ -286,7 +287,7 @@ public:
addIndex(BSON("baz" << 1));
WorkingSet ws;
- unique_ptr<AndHashStage> ah(new AndHashStage(&ws, coll));
+ auto ah = make_unique<AndHashStage>(&_txn, &ws, coll);
// Foo <= 20 (descending)
IndexScanParams params;
@@ -374,7 +375,7 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- unique_ptr<AndHashStage> ah(new AndHashStage(&ws, coll));
+ auto ah = make_unique<AndHashStage>(&_txn, &ws, coll);
// Foo <= 20
IndexScanParams params;
@@ -429,7 +430,7 @@ public:
// before hashed AND is done reading the first child (stage has to
// hold 21 keys in buffer for Foo <= 20).
WorkingSet ws;
- unique_ptr<AndHashStage> ah(new AndHashStage(&ws, coll, 20 * big.size()));
+ auto ah = make_unique<AndHashStage>(&_txn, &ws, coll, 20 * big.size());
// Foo <= 20
IndexScanParams params;
@@ -482,7 +483,7 @@ public:
// keys in last child's index are not buffered. There are 6 keys
// that satisfy the criteria Foo <= 20 and Bar >= 10 and 5 <= baz <= 15.
WorkingSet ws;
- unique_ptr<AndHashStage> ah(new AndHashStage(&ws, coll, 5 * big.size()));
+ auto ah = make_unique<AndHashStage>(&_txn, &ws, coll, 5 * big.size());
// Foo <= 20
IndexScanParams params;
@@ -530,7 +531,7 @@ public:
addIndex(BSON("baz" << 1));
WorkingSet ws;
- unique_ptr<AndHashStage> ah(new AndHashStage(&ws, coll));
+ auto ah = make_unique<AndHashStage>(&_txn, &ws, coll);
// Foo <= 20
IndexScanParams params;
@@ -597,7 +598,7 @@ public:
// before hashed AND is done reading the second child (stage has to
// hold 11 keys in buffer for Foo <= 20 and Bar >= 10).
WorkingSet ws;
- unique_ptr<AndHashStage> ah(new AndHashStage(&ws, coll, 10 * big.size()));
+ auto ah = make_unique<AndHashStage>(&_txn, &ws, coll, 10 * big.size());
// Foo <= 20
IndexScanParams params;
@@ -651,7 +652,7 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- unique_ptr<AndHashStage> ah(new AndHashStage(&ws, coll));
+ auto ah = make_unique<AndHashStage>(&_txn, &ws, coll);
// Foo <= 20
IndexScanParams params;
@@ -714,7 +715,7 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- unique_ptr<AndHashStage> ah(new AndHashStage(&ws, coll));
+ auto ah = make_unique<AndHashStage>(&_txn, &ws, coll);
// Foo >= 100
IndexScanParams params;
@@ -766,7 +767,7 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- unique_ptr<AndHashStage> ah(new AndHashStage(&ws, coll));
+ auto ah = make_unique<AndHashStage>(&_txn, &ws, coll);
// Foo <= 20
IndexScanParams params;
@@ -825,7 +826,7 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- unique_ptr<AndHashStage> ah(new AndHashStage(&ws, coll));
+ auto ah = make_unique<AndHashStage>(&_txn, &ws, coll);
// Foo <= 20
IndexScanParams params;
@@ -880,10 +881,9 @@ public:
// Child2: NEED_TIME, DEAD
{
WorkingSet ws;
- const std::unique_ptr<AndHashStage> andHashStage =
- stdx::make_unique<AndHashStage>(&ws, coll);
+ const auto andHashStage = make_unique<AndHashStage>(&_txn, &ws, coll);
- std::unique_ptr<QueuedDataStage> childStage1 = stdx::make_unique<QueuedDataStage>(&ws);
+ auto childStage1 = make_unique<QueuedDataStage>(&_txn, &ws);
{
WorkingSetID id = ws.allocate();
WorkingSetMember* wsm = ws.get(id);
@@ -893,7 +893,7 @@ public:
childStage1->pushBack(id);
}
- std::unique_ptr<QueuedDataStage> childStage2 = stdx::make_unique<QueuedDataStage>(&ws);
+ auto childStage2 = make_unique<QueuedDataStage>(&_txn, &ws);
childStage2->pushBack(PlanStage::NEED_TIME);
childStage2->pushBack(PlanStage::DEAD);
@@ -914,10 +914,9 @@ public:
// Child2: Data
{
WorkingSet ws;
- const std::unique_ptr<AndHashStage> andHashStage =
- stdx::make_unique<AndHashStage>(&ws, coll);
+ const auto andHashStage = make_unique<AndHashStage>(&_txn, &ws, coll);
- std::unique_ptr<QueuedDataStage> childStage1 = stdx::make_unique<QueuedDataStage>(&ws);
+ auto childStage1 = make_unique<QueuedDataStage>(&_txn, &ws);
{
WorkingSetID id = ws.allocate();
@@ -929,7 +928,7 @@ public:
}
childStage1->pushBack(PlanStage::DEAD);
- std::unique_ptr<QueuedDataStage> childStage2 = stdx::make_unique<QueuedDataStage>(&ws);
+ auto childStage2 = make_unique<QueuedDataStage>(&_txn, &ws);
{
WorkingSetID id = ws.allocate();
WorkingSetMember* wsm = ws.get(id);
@@ -956,10 +955,9 @@ public:
// Child2: Data, DEAD
{
WorkingSet ws;
- const std::unique_ptr<AndHashStage> andHashStage =
- stdx::make_unique<AndHashStage>(&ws, coll);
+ const auto andHashStage = make_unique<AndHashStage>(&_txn, &ws, coll);
- std::unique_ptr<QueuedDataStage> childStage1 = stdx::make_unique<QueuedDataStage>(&ws);
+ auto childStage1 = make_unique<QueuedDataStage>(&_txn, &ws);
{
WorkingSetID id = ws.allocate();
WorkingSetMember* wsm = ws.get(id);
@@ -969,7 +967,7 @@ public:
childStage1->pushBack(id);
}
- std::unique_ptr<QueuedDataStage> childStage2 = stdx::make_unique<QueuedDataStage>(&ws);
+ auto childStage2 = make_unique<QueuedDataStage>(&_txn, &ws);
{
WorkingSetID id = ws.allocate();
WorkingSetMember* wsm = ws.get(id);
@@ -1022,7 +1020,7 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- unique_ptr<AndSortedStage> ah(new AndSortedStage(&ws, coll));
+ auto ah = make_unique<AndSortedStage>(&_txn, &ws, coll);
// Scan over foo == 1
IndexScanParams params;
@@ -1155,7 +1153,7 @@ public:
addIndex(BSON("baz" << 1));
WorkingSet ws;
- unique_ptr<AndSortedStage> ah(new AndSortedStage(&ws, coll));
+ auto ah = make_unique<AndSortedStage>(&_txn, &ws, coll);
// Scan over foo == 1
IndexScanParams params;
@@ -1200,7 +1198,7 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- unique_ptr<AndSortedStage> ah(new AndSortedStage(&ws, coll));
+ auto ah = make_unique<AndSortedStage>(&_txn, &ws, coll);
// Foo == 7. Should be EOF.
IndexScanParams params;
@@ -1249,7 +1247,7 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- unique_ptr<AndSortedStage> ah(new AndSortedStage(&ws, coll));
+ auto ah = make_unique<AndSortedStage>(&_txn, &ws, coll);
// foo == 7.
IndexScanParams params;
@@ -1294,7 +1292,7 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- unique_ptr<AndHashStage> ah(new AndHashStage(&ws, coll));
+ auto ah = make_unique<AndHashStage>(&_txn, &ws, coll);
// Scan over foo == 1
IndexScanParams params;
@@ -1360,7 +1358,7 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- unique_ptr<AndSortedStage> as(new AndSortedStage(&ws, coll));
+ unique_ptr<AndSortedStage> as = make_unique<AndSortedStage>(&_txn, &ws, coll);
// Scan over foo == 1
IndexScanParams params;
@@ -1414,7 +1412,7 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- unique_ptr<AndSortedStage> as(new AndSortedStage(&ws, coll));
+ unique_ptr<AndSortedStage> as = make_unique<AndSortedStage>(&_txn, &ws, coll);
// Scan over foo == 1
IndexScanParams params;
diff --git a/src/mongo/dbtests/query_stage_cached_plan.cpp b/src/mongo/dbtests/query_stage_cached_plan.cpp
index 9191ea5cc21..b3c002333af 100644
--- a/src/mongo/dbtests/query_stage_cached_plan.cpp
+++ b/src/mongo/dbtests/query_stage_cached_plan.cpp
@@ -128,7 +128,7 @@ public:
fillOutPlannerParams(&_txn, collection, cq.get(), &plannerParams);
// Queued data stage will return a failure during the cached plan trial period.
- std::unique_ptr<QueuedDataStage> mockChild = stdx::make_unique<QueuedDataStage>(&_ws);
+ auto mockChild = stdx::make_unique<QueuedDataStage>(&_txn, &_ws);
mockChild->pushBack(PlanStage::FAILURE);
// High enough so that we shouldn't trigger a replan based on works.
@@ -196,7 +196,7 @@ public:
const size_t decisionWorks = 10;
const size_t mockWorks =
1U + static_cast<size_t>(internalQueryCacheEvictionRatio * decisionWorks);
- std::unique_ptr<QueuedDataStage> mockChild = stdx::make_unique<QueuedDataStage>(&_ws);
+ auto mockChild = stdx::make_unique<QueuedDataStage>(&_txn, &_ws);
for (size_t i = 0; i < mockWorks; i++) {
mockChild->pushBack(PlanStage::NEED_TIME);
}
diff --git a/src/mongo/dbtests/query_stage_delete.cpp b/src/mongo/dbtests/query_stage_delete.cpp
index 32e6092131d..85767ef2be4 100644
--- a/src/mongo/dbtests/query_stage_delete.cpp
+++ b/src/mongo/dbtests/query_stage_delete.cpp
@@ -48,6 +48,7 @@ namespace QueryStageDelete {
using std::unique_ptr;
using std::vector;
+using stdx::make_unique;
static const NamespaceString nss("unittests.QueryStageDelete");
@@ -191,7 +192,7 @@ public:
Collection* coll = ctx.getCollection();
const int targetDocIndex = 0;
const BSONObj query = BSON("foo" << BSON("$gte" << targetDocIndex));
- const unique_ptr<WorkingSet> ws(stdx::make_unique<WorkingSet>());
+ const auto ws = make_unique<WorkingSet>();
const unique_ptr<CanonicalQuery> cq(canonicalize(query));
// Get the RecordIds that would be returned by an in-order scan.
@@ -200,7 +201,7 @@ public:
// Configure a QueuedDataStage to pass the first object in the collection back in a
// LOC_AND_OBJ state.
- std::unique_ptr<QueuedDataStage> qds(stdx::make_unique<QueuedDataStage>(ws.get()));
+ auto qds = make_unique<QueuedDataStage>(&_txn, ws.get());
WorkingSetID id = ws->allocate();
WorkingSetMember* member = ws->get(id);
member->loc = locs[targetDocIndex];
@@ -214,8 +215,8 @@ public:
deleteParams.returnDeleted = true;
deleteParams.canonicalQuery = cq.get();
- const unique_ptr<DeleteStage> deleteStage(
- stdx::make_unique<DeleteStage>(&_txn, deleteParams, ws.get(), coll, qds.release()));
+ const auto deleteStage =
+ make_unique<DeleteStage>(&_txn, deleteParams, ws.get(), coll, qds.release());
const DeleteStats* stats = static_cast<const DeleteStats*>(deleteStage->getSpecificStats());
@@ -257,11 +258,11 @@ public:
OldClientWriteContext ctx(&_txn, nss.ns());
Collection* coll = ctx.getCollection();
const BSONObj query = BSONObj();
- const unique_ptr<WorkingSet> ws(stdx::make_unique<WorkingSet>());
+ const auto ws = make_unique<WorkingSet>();
const unique_ptr<CanonicalQuery> cq(canonicalize(query));
// Configure a QueuedDataStage to pass an OWNED_OBJ to the delete stage.
- unique_ptr<QueuedDataStage> qds(stdx::make_unique<QueuedDataStage>(ws.get()));
+ auto qds = make_unique<QueuedDataStage>(&_txn, ws.get());
{
WorkingSetID id = ws->allocate();
WorkingSetMember* member = ws->get(id);
@@ -276,8 +277,8 @@ public:
deleteParams.returnDeleted = true;
deleteParams.canonicalQuery = cq.get();
- const unique_ptr<DeleteStage> deleteStage(
- stdx::make_unique<DeleteStage>(&_txn, deleteParams, ws.get(), coll, qds.release()));
+ const auto deleteStage =
+ make_unique<DeleteStage>(&_txn, deleteParams, ws.get(), coll, qds.release());
const DeleteStats* stats = static_cast<const DeleteStats*>(deleteStage->getSpecificStats());
// Call work, passing the set up member to the delete stage.
diff --git a/src/mongo/dbtests/query_stage_fetch.cpp b/src/mongo/dbtests/query_stage_fetch.cpp
index f7a255ba482..fce4be75848 100644
--- a/src/mongo/dbtests/query_stage_fetch.cpp
+++ b/src/mongo/dbtests/query_stage_fetch.cpp
@@ -43,12 +43,14 @@
#include "mongo/db/matcher/expression_parser.h"
#include "mongo/db/operation_context_impl.h"
#include "mongo/dbtests/dbtests.h"
+#include "mongo/stdx/memory.h"
namespace QueryStageFetch {
+using std::set;
using std::shared_ptr;
using std::unique_ptr;
-using std::set;
+using stdx::make_unique;
class QueryStageFetchBase {
public:
@@ -107,7 +109,7 @@ public:
ASSERT_EQUALS(size_t(1), locs.size());
// Create a mock stage that returns the WSM.
- unique_ptr<QueuedDataStage> mockStage(new QueuedDataStage(&ws));
+ auto mockStage = make_unique<QueuedDataStage>(&_txn, &ws);
// Mock data.
{
@@ -173,7 +175,7 @@ public:
ASSERT_EQUALS(size_t(1), locs.size());
// Create a mock stage that returns the WSM.
- unique_ptr<QueuedDataStage> mockStage(new QueuedDataStage(&ws));
+ auto mockStage = make_unique<QueuedDataStage>(&_txn, &ws);
// Mock data.
{
diff --git a/src/mongo/dbtests/query_stage_keep.cpp b/src/mongo/dbtests/query_stage_keep.cpp
index 9d1e0e13789..a304a13ae2a 100644
--- a/src/mongo/dbtests/query_stage_keep.cpp
+++ b/src/mongo/dbtests/query_stage_keep.cpp
@@ -48,11 +48,14 @@
#include "mongo/util/fail_point.h"
#include "mongo/util/fail_point_registry.h"
#include "mongo/util/fail_point_service.h"
+#include "mongo/stdx/memory.h"
namespace QueryStageKeep {
-using std::shared_ptr;
using std::set;
+using std::shared_ptr;
+using std::unique_ptr;
+using stdx::make_unique;
class QueryStageKeepBase {
public:
@@ -142,7 +145,7 @@ public:
// Create a KeepMutations stage to merge in the 10 flagged objects.
// Takes ownership of 'cs'
MatchExpression* nullFilter = NULL;
- std::unique_ptr<KeepMutationsStage> keep(new KeepMutationsStage(nullFilter, &ws, cs));
+ auto keep = make_unique<KeepMutationsStage>(&_txn, nullFilter, &ws, cs);
for (size_t i = 0; i < 10; ++i) {
WorkingSetID id = getNextResult(keep.get());
@@ -190,8 +193,7 @@ public:
// Create a KeepMutationsStage with an EOF child, and flag 50 objects. We expect these
// objects to be returned by the KeepMutationsStage.
MatchExpression* nullFilter = NULL;
- std::unique_ptr<KeepMutationsStage> keep(
- new KeepMutationsStage(nullFilter, &ws, new EOFStage()));
+ auto keep = make_unique<KeepMutationsStage>(&_txn, nullFilter, &ws, new EOFStage(&_txn));
for (size_t i = 0; i < 50; ++i) {
WorkingSetID id = ws.allocate();
WorkingSetMember* member = ws.get(id);
diff --git a/src/mongo/dbtests/query_stage_limit_skip.cpp b/src/mongo/dbtests/query_stage_limit_skip.cpp
index 31b6cda5241..39c04796fea 100644
--- a/src/mongo/dbtests/query_stage_limit_skip.cpp
+++ b/src/mongo/dbtests/query_stage_limit_skip.cpp
@@ -39,6 +39,7 @@
#include "mongo/db/instance.h"
#include "mongo/db/json.h"
#include "mongo/dbtests/dbtests.h"
+#include "mongo/stdx/memory.h"
using namespace mongo;
@@ -47,12 +48,13 @@ namespace {
using std::max;
using std::min;
using std::unique_ptr;
+using stdx::make_unique;
static const int N = 50;
/* Populate a QueuedDataStage and return it. Caller owns it. */
QueuedDataStage* getMS(WorkingSet* ws) {
- unique_ptr<QueuedDataStage> ms(new QueuedDataStage(ws));
+ auto ms = make_unique<QueuedDataStage>(nullptr, ws);
// Put N ADVANCED results into the mock stage, and some other stalling results (YIELD/TIME).
for (int i = 0; i < N; ++i) {
@@ -92,10 +94,10 @@ public:
for (int i = 0; i < 2 * N; ++i) {
WorkingSet ws;
- unique_ptr<PlanStage> skip(new SkipStage(i, &ws, getMS(&ws)));
+ unique_ptr<PlanStage> skip = make_unique<SkipStage>(nullptr, i, &ws, getMS(&ws));
ASSERT_EQUALS(max(0, N - i), countResults(skip.get()));
- unique_ptr<PlanStage> limit(new LimitStage(i, &ws, getMS(&ws)));
+ unique_ptr<PlanStage> limit = make_unique<LimitStage>(nullptr, i, &ws, getMS(&ws));
ASSERT_EQUALS(min(N, i), countResults(limit.get()));
}
}
diff --git a/src/mongo/dbtests/query_stage_merge_sort.cpp b/src/mongo/dbtests/query_stage_merge_sort.cpp
index e2d8f41ed39..3ceb78b7870 100644
--- a/src/mongo/dbtests/query_stage_merge_sort.cpp
+++ b/src/mongo/dbtests/query_stage_merge_sort.cpp
@@ -140,7 +140,7 @@ public:
// Sort by c:1
MergeSortStageParams msparams;
msparams.pattern = BSON("c" << 1);
- MergeSortStage* ms = new MergeSortStage(msparams, ws.get(), coll);
+ MergeSortStage* ms = new MergeSortStage(&_txn, msparams, ws.get(), coll);
// a:1
IndexScanParams params;
@@ -210,7 +210,7 @@ public:
// Sort by c:1
MergeSortStageParams msparams;
msparams.pattern = BSON("c" << 1);
- MergeSortStage* ms = new MergeSortStage(msparams, ws.get(), coll);
+ MergeSortStage* ms = new MergeSortStage(&_txn, msparams, ws.get(), coll);
// a:1
IndexScanParams params;
@@ -279,7 +279,7 @@ public:
MergeSortStageParams msparams;
msparams.dedup = false;
msparams.pattern = BSON("c" << 1);
- MergeSortStage* ms = new MergeSortStage(msparams, ws.get(), coll);
+ MergeSortStage* ms = new MergeSortStage(&_txn, msparams, ws.get(), coll);
// a:1
IndexScanParams params;
@@ -350,7 +350,7 @@ public:
// Sort by c:-1
MergeSortStageParams msparams;
msparams.pattern = BSON("c" << -1);
- MergeSortStage* ms = new MergeSortStage(msparams, ws.get(), coll);
+ MergeSortStage* ms = new MergeSortStage(&_txn, msparams, ws.get(), coll);
// a:1
IndexScanParams params;
@@ -420,7 +420,7 @@ public:
// Sort by c:1
MergeSortStageParams msparams;
msparams.pattern = BSON("c" << 1);
- MergeSortStage* ms = new MergeSortStage(msparams, ws.get(), coll);
+ MergeSortStage* ms = new MergeSortStage(&_txn, msparams, ws.get(), coll);
// a:1
IndexScanParams params;
@@ -476,7 +476,7 @@ public:
// Sort by foo:1
MergeSortStageParams msparams;
msparams.pattern = BSON("foo" << 1);
- MergeSortStage* ms = new MergeSortStage(msparams, ws.get(), coll);
+ MergeSortStage* ms = new MergeSortStage(&_txn, msparams, ws.get(), coll);
IndexScanParams params;
params.bounds.isSimpleRange = true;
@@ -535,7 +535,7 @@ public:
// Sort by foo:1
MergeSortStageParams msparams;
msparams.pattern = BSON("foo" << 1);
- unique_ptr<MergeSortStage> ms(new MergeSortStage(msparams, &ws, coll));
+ auto ms = make_unique<MergeSortStage>(&_txn, msparams, &ws, coll);
IndexScanParams params;
params.bounds.isSimpleRange = true;
diff --git a/src/mongo/dbtests/query_stage_near.cpp b/src/mongo/dbtests/query_stage_near.cpp
index 7e89b0900c5..08e5cdb82d7 100644
--- a/src/mongo/dbtests/query_stage_near.cpp
+++ b/src/mongo/dbtests/query_stage_near.cpp
@@ -52,7 +52,7 @@ using stdx::make_unique;
class MockStage final : public PlanStage {
public:
MockStage(const vector<BSONObj>& data, WorkingSet* workingSet)
- : PlanStage("MOCK_STAGE"), _data(data), _pos(0), _workingSet(workingSet) {}
+ : PlanStage("MOCK_STAGE", nullptr), _data(data), _pos(0), _workingSet(workingSet) {}
StageState work(WorkingSetID* out) final {
++_commonStats.works;
diff --git a/src/mongo/dbtests/query_stage_sort.cpp b/src/mongo/dbtests/query_stage_sort.cpp
index d6f9ab81d1c..985ce898214 100644
--- a/src/mongo/dbtests/query_stage_sort.cpp
+++ b/src/mongo/dbtests/query_stage_sort.cpp
@@ -104,15 +104,15 @@ public:
*/
PlanExecutor* makePlanExecutorWithSortStage(Collection* coll) {
// Build the mock scan stage which feeds the data.
- unique_ptr<WorkingSet> ws(new WorkingSet());
- unique_ptr<QueuedDataStage> ms(new QueuedDataStage(ws.get()));
+ auto ws = make_unique<WorkingSet>();
+ auto ms = make_unique<QueuedDataStage>(&_txn, ws.get());
insertVarietyOfObjects(ws.get(), ms.get(), coll);
SortStageParams params;
params.collection = coll;
params.pattern = BSON("foo" << 1);
params.limit = limit();
- unique_ptr<SortStage> ss(new SortStage(params, ws.get(), ms.release()));
+ auto ss = make_unique<SortStage>(&_txn, params, ws.get(), ms.release());
// The PlanExecutor will be automatically registered on construction due to the auto
// yield policy, so it can receive invalidations when we remove documents later.
@@ -137,8 +137,8 @@ public:
* If limit is not zero, we limit the output of the sort stage to 'limit' results.
*/
void sortAndCheck(int direction, Collection* coll) {
- unique_ptr<WorkingSet> ws = make_unique<WorkingSet>();
- QueuedDataStage* ms = new QueuedDataStage(ws.get());
+ auto ws = make_unique<WorkingSet>();
+ QueuedDataStage* ms = new QueuedDataStage(&_txn, ws.get());
// Insert a mix of the various types of data.
insertVarietyOfObjects(ws.get(), ms, coll);
@@ -148,8 +148,8 @@ public:
params.pattern = BSON("foo" << direction);
params.limit = limit();
- unique_ptr<FetchStage> fetchStage = make_unique<FetchStage>(
- &_txn, ws.get(), new SortStage(params, ws.get(), ms), nullptr, coll);
+ auto fetchStage = make_unique<FetchStage>(
+ &_txn, ws.get(), new SortStage(&_txn, params, ws.get(), ms), nullptr, coll);
// Must fetch so we can look at the doc as a BSONObj.
auto statusWithPlanExecutor = PlanExecutor::make(
@@ -513,8 +513,8 @@ public:
wuow.commit();
}
- unique_ptr<WorkingSet> ws = make_unique<WorkingSet>();
- QueuedDataStage* ms = new QueuedDataStage(ws.get());
+ auto ws = make_unique<WorkingSet>();
+ QueuedDataStage* ms = new QueuedDataStage(&_txn, ws.get());
for (int i = 0; i < numObj(); ++i) {
{
@@ -539,8 +539,8 @@ public:
params.pattern = BSON("b" << -1 << "c" << 1 << "a" << 1);
params.limit = 0;
- unique_ptr<FetchStage> fetchStage = make_unique<FetchStage>(
- &_txn, ws.get(), new SortStage(params, ws.get(), ms), nullptr, coll);
+ auto fetchStage = make_unique<FetchStage>(
+ &_txn, ws.get(), new SortStage(&_txn, params, ws.get(), ms), nullptr, coll);
// We don't get results back since we're sorting some parallel arrays.
auto statusWithPlanExecutor = PlanExecutor::make(
&_txn, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL);
diff --git a/src/mongo/dbtests/query_stage_update.cpp b/src/mongo/dbtests/query_stage_update.cpp
index fa8370dd3ee..ecff448d363 100644
--- a/src/mongo/dbtests/query_stage_update.cpp
+++ b/src/mongo/dbtests/query_stage_update.cpp
@@ -55,6 +55,7 @@ namespace QueryStageUpdate {
using std::unique_ptr;
using std::vector;
+using stdx::make_unique;
static const NamespaceString nss("unittests.QueryStageUpdate");
@@ -207,11 +208,11 @@ public:
unique_ptr<CanonicalQuery> cq(canonicalize(query));
params.canonicalQuery = cq.get();
- unique_ptr<WorkingSet> ws(new WorkingSet());
- unique_ptr<EOFStage> eofStage(new EOFStage());
+ auto ws = make_unique<WorkingSet>();
+ auto eofStage = make_unique<EOFStage>(&_txn);
- unique_ptr<UpdateStage> updateStage(
- new UpdateStage(&_txn, params, ws.get(), collection, eofStage.release()));
+ auto updateStage =
+ make_unique<UpdateStage>(&_txn, params, ws.get(), collection, eofStage.release());
runUpdate(updateStage.get());
}
@@ -284,12 +285,11 @@ public:
unique_ptr<CanonicalQuery> cq(canonicalize(query));
updateParams.canonicalQuery = cq.get();
- unique_ptr<WorkingSet> ws(new WorkingSet());
- unique_ptr<CollectionScan> cs(
- new CollectionScan(&_txn, collScanParams, ws.get(), cq->root()));
+ auto ws = make_unique<WorkingSet>();
+ auto cs = make_unique<CollectionScan>(&_txn, collScanParams, ws.get(), cq->root());
- unique_ptr<UpdateStage> updateStage(
- new UpdateStage(&_txn, updateParams, ws.get(), coll, cs.release()));
+ auto updateStage =
+ make_unique<UpdateStage>(&_txn, updateParams, ws.get(), coll, cs.release());
const UpdateStats* stats =
static_cast<const UpdateStats*>(updateStage->getSpecificStats());
@@ -366,7 +366,7 @@ public:
UpdateDriver driver((UpdateDriver::Options()));
const int targetDocIndex = 0; // We'll be working with the first doc in the collection.
const BSONObj query = BSON("foo" << BSON("$gte" << targetDocIndex));
- const unique_ptr<WorkingSet> ws(stdx::make_unique<WorkingSet>());
+ const auto ws = make_unique<WorkingSet>();
const unique_ptr<CanonicalQuery> cq(canonicalize(query));
// Get the RecordIds that would be returned by an in-order scan.
@@ -385,7 +385,7 @@ public:
// Configure a QueuedDataStage to pass the first object in the collection back in a
// LOC_AND_OBJ state.
- std::unique_ptr<QueuedDataStage> qds(stdx::make_unique<QueuedDataStage>(ws.get()));
+ auto qds = make_unique<QueuedDataStage>(&_txn, ws.get());
WorkingSetID id = ws->allocate();
WorkingSetMember* member = ws->get(id);
member->loc = locs[targetDocIndex];
@@ -398,8 +398,8 @@ public:
UpdateStageParams updateParams(&request, &driver, opDebug);
updateParams.canonicalQuery = cq.get();
- const unique_ptr<UpdateStage> updateStage(
- stdx::make_unique<UpdateStage>(&_txn, updateParams, ws.get(), coll, qds.release()));
+ const auto updateStage =
+ make_unique<UpdateStage>(&_txn, updateParams, ws.get(), coll, qds.release());
// Should return advanced.
id = WorkingSet::INVALID_ID;
@@ -454,7 +454,7 @@ public:
UpdateDriver driver((UpdateDriver::Options()));
const int targetDocIndex = 10;
const BSONObj query = BSON("foo" << BSON("$gte" << targetDocIndex));
- const unique_ptr<WorkingSet> ws(stdx::make_unique<WorkingSet>());
+ const auto ws = make_unique<WorkingSet>();
const unique_ptr<CanonicalQuery> cq(canonicalize(query));
// Get the RecordIds that would be returned by an in-order scan.
@@ -473,7 +473,7 @@ public:
// Configure a QueuedDataStage to pass the first object in the collection back in a
// LOC_AND_OBJ state.
- std::unique_ptr<QueuedDataStage> qds(stdx::make_unique<QueuedDataStage>(ws.get()));
+ auto qds = make_unique<QueuedDataStage>(&_txn, ws.get());
WorkingSetID id = ws->allocate();
WorkingSetMember* member = ws->get(id);
member->loc = locs[targetDocIndex];
@@ -486,8 +486,8 @@ public:
UpdateStageParams updateParams(&request, &driver, opDebug);
updateParams.canonicalQuery = cq.get();
- unique_ptr<UpdateStage> updateStage(
- stdx::make_unique<UpdateStage>(&_txn, updateParams, ws.get(), coll, qds.release()));
+ auto updateStage =
+ make_unique<UpdateStage>(&_txn, updateParams, ws.get(), coll, qds.release());
// Should return advanced.
id = WorkingSet::INVALID_ID;
@@ -535,7 +535,7 @@ public:
UpdateRequest request(nss);
UpdateDriver driver((UpdateDriver::Options()));
const BSONObj query = BSONObj();
- const unique_ptr<WorkingSet> ws(stdx::make_unique<WorkingSet>());
+ const auto ws = make_unique<WorkingSet>();
const unique_ptr<CanonicalQuery> cq(canonicalize(query));
// Populate the request.
@@ -549,7 +549,7 @@ public:
ASSERT_OK(driver.parse(request.getUpdates(), request.isMulti()));
// Configure a QueuedDataStage to pass an OWNED_OBJ to the update stage.
- unique_ptr<QueuedDataStage> qds(stdx::make_unique<QueuedDataStage>(ws.get()));
+ auto qds = make_unique<QueuedDataStage>(&_txn, ws.get());
{
WorkingSetID id = ws->allocate();
WorkingSetMember* member = ws->get(id);
@@ -562,8 +562,8 @@ public:
UpdateStageParams updateParams(&request, &driver, opDebug);
updateParams.canonicalQuery = cq.get();
- const unique_ptr<UpdateStage> updateStage(
- stdx::make_unique<UpdateStage>(&_txn, updateParams, ws.get(), coll, qds.release()));
+ const auto updateStage =
+ make_unique<UpdateStage>(&_txn, updateParams, ws.get(), coll, qds.release());
const UpdateStats* stats = static_cast<const UpdateStats*>(updateStage->getSpecificStats());
// Call work, passing the set up member to the update stage.