summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
authorMathias Stearn <mathias@10gen.com>2015-07-08 12:40:15 -0400
committerMathias Stearn <mathias@10gen.com>2015-07-16 14:37:02 -0400
commitc832bc753c29f91597b75fa02c0d9019c3c20b0f (patch)
tree5324f665212988a354ff6ba254b239dc594d2c7d /src/mongo/db
parentf64b6c596f9dcd1bae7011a3230b517386baa255 (diff)
downloadmongo-c832bc753c29f91597b75fa02c0d9019c3c20b0f.tar.gz
SERVER-17364 Unify handling of child stages into PlanStage base class
This is prep for adding more methods that need to propagate to children.
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/exec/SConscript1
-rw-r--r--src/mongo/db/exec/and_hash.cpp51
-rw-r--r--src/mongo/db/exec/and_hash.h14
-rw-r--r--src/mongo/db/exec/and_sorted.cpp49
-rw-r--r--src/mongo/db/exec/and_sorted.h13
-rw-r--r--src/mongo/db/exec/cached_plan.cpp59
-rw-r--r--src/mongo/db/exec/cached_plan.h12
-rw-r--r--src/mongo/db/exec/collection_scan.cpp27
-rw-r--r--src/mongo/db/exec/collection_scan.h11
-rw-r--r--src/mongo/db/exec/count.cpp56
-rw-r--r--src/mongo/db/exec/count.h13
-rw-r--r--src/mongo/db/exec/count_scan.cpp25
-rw-r--r--src/mongo/db/exec/count_scan.h12
-rw-r--r--src/mongo/db/exec/delete.cpp48
-rw-r--r--src/mongo/db/exec/delete.h12
-rw-r--r--src/mongo/db/exec/distinct_scan.cpp25
-rw-r--r--src/mongo/db/exec/distinct_scan.h10
-rw-r--r--src/mongo/db/exec/eof.cpp23
-rw-r--r--src/mongo/db/exec/eof.h10
-rw-r--r--src/mongo/db/exec/fetch.cpp39
-rw-r--r--src/mongo/db/exec/fetch.h12
-rw-r--r--src/mongo/db/exec/geo_near.cpp177
-rw-r--r--src/mongo/db/exec/geo_near.h14
-rw-r--r--src/mongo/db/exec/group.cpp39
-rw-r--r--src/mongo/db/exec/group.h11
-rw-r--r--src/mongo/db/exec/idhack.cpp31
-rw-r--r--src/mongo/db/exec/idhack.h11
-rw-r--r--src/mongo/db/exec/index_scan.cpp25
-rw-r--r--src/mongo/db/exec/index_scan.h11
-rw-r--r--src/mongo/db/exec/keep_mutations.cpp40
-rw-r--r--src/mongo/db/exec/keep_mutations.h13
-rw-r--r--src/mongo/db/exec/limit.cpp34
-rw-r--r--src/mongo/db/exec/limit.h10
-rw-r--r--src/mongo/db/exec/merge_sort.cpp44
-rw-r--r--src/mongo/db/exec/merge_sort.h13
-rw-r--r--src/mongo/db/exec/multi_iterator.cpp21
-rw-r--r--src/mongo/db/exec/multi_iterator.h15
-rw-r--r--src/mongo/db/exec/multi_plan.cpp59
-rw-r--r--src/mongo/db/exec/multi_plan.h19
-rw-r--r--src/mongo/db/exec/near.cpp91
-rw-r--r--src/mongo/db/exec/near.h40
-rw-r--r--src/mongo/db/exec/oplogstart.cpp33
-rw-r--r--src/mongo/db/exec/oplogstart.h17
-rw-r--r--src/mongo/db/exec/or.cpp41
-rw-r--r--src/mongo/db/exec/or.h13
-rw-r--r--src/mongo/db/exec/pipeline_proxy.cpp21
-rw-r--r--src/mongo/db/exec/pipeline_proxy.h13
-rw-r--r--src/mongo/db/exec/plan_stage.cpp64
-rw-r--r--src/mongo/db/exec/plan_stage.h75
-rw-r--r--src/mongo/db/exec/projection.cpp36
-rw-r--r--src/mongo/db/exec/projection.h12
-rw-r--r--src/mongo/db/exec/queued_data_stage.cpp22
-rw-r--r--src/mongo/db/exec/queued_data_stage.h13
-rw-r--r--src/mongo/db/exec/shard_filter.cpp37
-rw-r--r--src/mongo/db/exec/shard_filter.h10
-rw-r--r--src/mongo/db/exec/skip.cpp35
-rw-r--r--src/mongo/db/exec/skip.h10
-rw-r--r--src/mongo/db/exec/sort.cpp40
-rw-r--r--src/mongo/db/exec/sort.h17
-rw-r--r--src/mongo/db/exec/subplan.cpp79
-rw-r--r--src/mongo/db/exec/subplan.h12
-rw-r--r--src/mongo/db/exec/text.cpp37
-rw-r--r--src/mongo/db/exec/text.h13
-rw-r--r--src/mongo/db/exec/text_match.cpp36
-rw-r--r--src/mongo/db/exec/text_match.h13
-rw-r--r--src/mongo/db/exec/text_or.cpp41
-rw-r--r--src/mongo/db/exec/text_or.h15
-rw-r--r--src/mongo/db/exec/update.cpp43
-rw-r--r--src/mongo/db/exec/update.h12
-rw-r--r--src/mongo/db/query/explain.cpp8
-rw-r--r--src/mongo/db/query/plan_executor.cpp4
-rw-r--r--src/mongo/db/query/plan_ranker.h25
-rw-r--r--src/mongo/db/s/migration_source_manager.cpp22
73 files changed, 531 insertions, 1578 deletions
diff --git a/src/mongo/db/exec/SConscript b/src/mongo/db/exec/SConscript
index 035817e61cc..282a49887d5 100644
--- a/src/mongo/db/exec/SConscript
+++ b/src/mongo/db/exec/SConscript
@@ -58,6 +58,7 @@ env.Library(
"oplogstart.cpp",
"or.cpp",
"pipeline_proxy.cpp",
+ "plan_stage.cpp",
"projection.cpp",
"projection_exec.cpp",
"queued_data_stage.cpp",
diff --git a/src/mongo/db/exec/and_hash.cpp b/src/mongo/db/exec/and_hash.cpp
index 9fe0111ebe8..1c21b4adf83 100644
--- a/src/mongo/db/exec/and_hash.cpp
+++ b/src/mongo/db/exec/and_hash.cpp
@@ -55,31 +55,25 @@ const size_t AndHashStage::kLookAheadWorks = 10;
const char* AndHashStage::kStageType = "AND_HASH";
AndHashStage::AndHashStage(WorkingSet* ws, const Collection* collection)
- : _collection(collection),
+ : PlanStage(kStageType),
+ _collection(collection),
_ws(ws),
_hashingChildren(true),
_currentChild(0),
- _commonStats(kStageType),
_memUsage(0),
_maxMemUsage(kDefaultMaxMemUsageBytes) {}
AndHashStage::AndHashStage(WorkingSet* ws, const Collection* collection, size_t maxMemUsage)
- : _collection(collection),
+ : PlanStage(kStageType),
+ _collection(collection),
_ws(ws),
_hashingChildren(true),
_currentChild(0),
- _commonStats(kStageType),
_memUsage(0),
_maxMemUsage(maxMemUsage) {}
-AndHashStage::~AndHashStage() {
- for (size_t i = 0; i < _children.size(); ++i) {
- delete _children[i];
- }
-}
-
void AndHashStage::addChild(PlanStage* child) {
- _children.push_back(child);
+ _children.emplace_back(child);
}
size_t AndHashStage::getMemUsage() const {
@@ -137,7 +131,7 @@ PlanStage::StageState AndHashStage::work(WorkingSetID* out) {
// a result. If it's EOF this whole stage will be EOF. If it produces a
// result we cache it for later.
for (size_t i = 0; i < _children.size(); ++i) {
- PlanStage* child = _children[i];
+ auto& child = _children[i];
for (size_t j = 0; j < kLookAheadWorks; ++j) {
StageState childStatus = child->work(&_lookAheadResults[i]);
@@ -428,33 +422,12 @@ PlanStage::StageState AndHashStage::hashOtherChildren(WorkingSetID* out) {
}
}
-void AndHashStage::saveState() {
- ++_commonStats.yields;
-
- for (size_t i = 0; i < _children.size(); ++i) {
- _children[i]->saveState();
- }
-}
-
-void AndHashStage::restoreState(OperationContext* opCtx) {
- ++_commonStats.unyields;
-
- for (size_t i = 0; i < _children.size(); ++i) {
- _children[i]->restoreState(opCtx);
- }
-}
-
-void AndHashStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
- ++_commonStats.invalidates;
-
+void AndHashStage::doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
+ // TODO remove this since calling isEOF is illegal inside of doInvalidate().
if (isEOF()) {
return;
}
- for (size_t i = 0; i < _children.size(); ++i) {
- _children[i]->invalidate(txn, dl, type);
- }
-
// Invalidation can happen to our warmup results. If that occurs just
// flag it and forget about it.
for (size_t i = 0; i < _lookAheadResults.size(); ++i) {
@@ -500,10 +473,6 @@ void AndHashStage::invalidate(OperationContext* txn, const RecordId& dl, Invalid
}
}
-vector<PlanStage*> AndHashStage::getChildren() const {
- return _children;
-}
-
unique_ptr<PlanStageStats> AndHashStage::getStats() {
_commonStats.isEOF = isEOF();
@@ -519,10 +488,6 @@ unique_ptr<PlanStageStats> AndHashStage::getStats() {
return ret;
}
-const CommonStats* AndHashStage::getCommonStats() const {
- return &_commonStats;
-}
-
const SpecificStats* AndHashStage::getSpecificStats() const {
return &_specificStats;
}
diff --git a/src/mongo/db/exec/and_hash.h b/src/mongo/db/exec/and_hash.h
index 974c940acda..b72a78fbeb8 100644
--- a/src/mongo/db/exec/and_hash.h
+++ b/src/mongo/db/exec/and_hash.h
@@ -58,8 +58,6 @@ public:
*/
AndHashStage(WorkingSet* ws, const Collection* collection, size_t maxMemUsage);
- virtual ~AndHashStage();
-
void addChild(PlanStage* child);
/**
@@ -71,11 +69,7 @@ public:
virtual StageState work(WorkingSetID* out);
virtual bool isEOF();
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
-
- virtual std::vector<PlanStage*> getChildren() const;
+ virtual void doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
virtual StageType stageType() const {
return STAGE_AND_HASH;
@@ -83,8 +77,6 @@ public:
virtual std::unique_ptr<PlanStageStats> getStats();
- virtual const CommonStats* getCommonStats() const;
-
virtual const SpecificStats* getSpecificStats() const;
static const char* kStageType;
@@ -102,9 +94,6 @@ private:
// Not owned by us.
WorkingSet* _ws;
- // The stages we read from. Owned by us.
- std::vector<PlanStage*> _children;
-
// We want to see if any of our children are EOF immediately. This requires working them a
// few times to see if they hit EOF or if they produce a result. If they produce a result,
// we place that result here.
@@ -127,7 +116,6 @@ private:
size_t _currentChild;
// Stats
- CommonStats _commonStats;
AndHashStats _specificStats;
// The usage in bytes of all buffered data that we're holding.
diff --git a/src/mongo/db/exec/and_sorted.cpp b/src/mongo/db/exec/and_sorted.cpp
index 7186e694daa..873e545a2dd 100644
--- a/src/mongo/db/exec/and_sorted.cpp
+++ b/src/mongo/db/exec/and_sorted.cpp
@@ -45,21 +45,16 @@ using stdx::make_unique;
const char* AndSortedStage::kStageType = "AND_SORTED";
AndSortedStage::AndSortedStage(WorkingSet* ws, const Collection* collection)
- : _collection(collection),
+ : PlanStage(kStageType),
+ _collection(collection),
_ws(ws),
_targetNode(numeric_limits<size_t>::max()),
_targetId(WorkingSet::INVALID_ID),
- _isEOF(false),
- _commonStats(kStageType) {}
+ _isEOF(false) {}
-AndSortedStage::~AndSortedStage() {
- for (size_t i = 0; i < _children.size(); ++i) {
- delete _children[i];
- }
-}
void AndSortedStage::addChild(PlanStage* child) {
- _children.push_back(child);
+ _children.emplace_back(child);
}
bool AndSortedStage::isEOF() {
@@ -160,7 +155,7 @@ PlanStage::StageState AndSortedStage::moveTowardTargetLoc(WorkingSetID* out) {
// We have nodes that haven't hit _targetLoc yet.
size_t workingChildNumber = _workingTowardRep.front();
- PlanStage* next = _children[workingChildNumber];
+ auto& next = _children[workingChildNumber];
WorkingSetID id = WorkingSet::INVALID_ID;
StageState state = next->work(&id);
@@ -253,33 +248,15 @@ PlanStage::StageState AndSortedStage::moveTowardTargetLoc(WorkingSetID* out) {
}
}
-void AndSortedStage::saveState() {
- ++_commonStats.yields;
-
- for (size_t i = 0; i < _children.size(); ++i) {
- _children[i]->saveState();
- }
-}
-
-void AndSortedStage::restoreState(OperationContext* opCtx) {
- ++_commonStats.unyields;
-
- for (size_t i = 0; i < _children.size(); ++i) {
- _children[i]->restoreState(opCtx);
- }
-}
-
-void AndSortedStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
- ++_commonStats.invalidates;
+void AndSortedStage::doInvalidate(OperationContext* txn,
+ const RecordId& dl,
+ InvalidationType type) {
+ // TODO remove this since calling isEOF is illegal inside of doInvalidate().
if (isEOF()) {
return;
}
- for (size_t i = 0; i < _children.size(); ++i) {
- _children[i]->invalidate(txn, dl, type);
- }
-
if (dl == _targetLoc) {
// We're in the middle of moving children forward until they hit _targetLoc, which is no
// longer a valid target. If it's a deletion we can't AND it with anything, if it's a
@@ -298,10 +275,6 @@ void AndSortedStage::invalidate(OperationContext* txn, const RecordId& dl, Inval
}
}
-vector<PlanStage*> AndSortedStage::getChildren() const {
- return _children;
-}
-
unique_ptr<PlanStageStats> AndSortedStage::getStats() {
_commonStats.isEOF = isEOF();
@@ -314,10 +287,6 @@ unique_ptr<PlanStageStats> AndSortedStage::getStats() {
return ret;
}
-const CommonStats* AndSortedStage::getCommonStats() const {
- return &_commonStats;
-}
-
const SpecificStats* AndSortedStage::getSpecificStats() const {
return &_specificStats;
}
diff --git a/src/mongo/db/exec/and_sorted.h b/src/mongo/db/exec/and_sorted.h
index cda1fa52a28..b742a1090e3 100644
--- a/src/mongo/db/exec/and_sorted.h
+++ b/src/mongo/db/exec/and_sorted.h
@@ -54,18 +54,13 @@ namespace mongo {
class AndSortedStage : public PlanStage {
public:
AndSortedStage(WorkingSet* ws, const Collection* collection);
- virtual ~AndSortedStage();
void addChild(PlanStage* child);
virtual StageState work(WorkingSetID* out);
virtual bool isEOF();
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
-
- virtual std::vector<PlanStage*> getChildren() const;
+ virtual void doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
virtual StageType stageType() const {
return STAGE_AND_SORTED;
@@ -73,8 +68,6 @@ public:
virtual std::unique_ptr<PlanStageStats> getStats();
- virtual const CommonStats* getCommonStats() const;
-
virtual const SpecificStats* getSpecificStats() const;
static const char* kStageType;
@@ -93,9 +86,6 @@ private:
// Not owned by us.
WorkingSet* _ws;
- // Owned by us.
- std::vector<PlanStage*> _children;
-
// The current node we're AND-ing against.
size_t _targetNode;
RecordId _targetLoc;
@@ -110,7 +100,6 @@ private:
bool _isEOF;
// Stats
- CommonStats _commonStats;
AndSortedStats _specificStats;
};
diff --git a/src/mongo/db/exec/cached_plan.cpp b/src/mongo/db/exec/cached_plan.cpp
index 6888b309039..7ad6ef16a6e 100644
--- a/src/mongo/db/exec/cached_plan.cpp
+++ b/src/mongo/db/exec/cached_plan.cpp
@@ -60,15 +60,15 @@ CachedPlanStage::CachedPlanStage(OperationContext* txn,
const QueryPlannerParams& params,
size_t decisionWorks,
PlanStage* root)
- : _txn(txn),
+ : PlanStage(kStageType),
+ _txn(txn),
_collection(collection),
_ws(ws),
_canonicalQuery(cq),
_plannerParams(params),
- _decisionWorks(decisionWorks),
- _root(root),
- _commonStats(kStageType) {
+ _decisionWorks(decisionWorks) {
invariant(_collection);
+ _children.emplace_back(root);
}
Status CachedPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
@@ -93,7 +93,7 @@ Status CachedPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
}
WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState state = _root->work(&id);
+ PlanStage::StageState state = child()->work(&id);
if (PlanStage::ADVANCED == state) {
// Save result for later.
@@ -138,7 +138,7 @@ Status CachedPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
LOG(1) << "Execution of cached plan failed, falling back to replan."
<< " query: " << _canonicalQuery->toStringShort()
- << " planSummary: " << Explain::getPlanSummary(_root.get())
+ << " planSummary: " << Explain::getPlanSummary(child().get())
<< " status: " << statusObj;
const bool shouldCache = false;
@@ -149,7 +149,7 @@ Status CachedPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
LOG(1) << "Execution of cached plan failed: PlanStage died"
<< ", query: " << _canonicalQuery->toStringShort()
- << " planSummary: " << Explain::getPlanSummary(_root.get())
+ << " planSummary: " << Explain::getPlanSummary(child().get())
<< " status: " << statusObj;
return WorkingSetCommon::getMemberObjectStatus(statusObj);
@@ -164,7 +164,7 @@ Status CachedPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
<< " works, but was originally cached with only " << _decisionWorks
<< " works. Evicting cache entry and replanning query: "
<< _canonicalQuery->toStringShort()
- << " plan summary before replan: " << Explain::getPlanSummary(_root.get());
+ << " plan summary before replan: " << Explain::getPlanSummary(child().get());
const bool shouldCache = true;
return replan(yieldPolicy, shouldCache);
@@ -194,11 +194,10 @@ Status CachedPlanStage::tryYield(PlanYieldPolicy* yieldPolicy) {
}
Status CachedPlanStage::replan(PlanYieldPolicy* yieldPolicy, bool shouldCache) {
- // We're going to start over with a new plan. No need for only old buffered results.
+ // We're going to start over with a new plan. Clear out info from our old plan.
_results.clear();
-
- // Clear out the working set. We'll start with a fresh working set.
_ws->clear();
+ _children.clear();
// Use the query planning module to plan the whole query.
std::vector<QuerySolution*> rawSolutions;
@@ -230,15 +229,15 @@ Status CachedPlanStage::replan(PlanYieldPolicy* yieldPolicy, bool shouldCache) {
PlanStage* newRoot;
// Only one possible plan. Build the stages from the solution.
verify(StageBuilder::build(_txn, _collection, *solutions[0], _ws, &newRoot));
- _root.reset(newRoot);
+ _children.emplace_back(newRoot);
_replannedQs.reset(solutions.popAndReleaseBack());
return Status::OK();
}
// Many solutions. Create a MultiPlanStage to pick the best, update the cache,
// and so on. The working set will be shared by all candidate plans.
- _root.reset(new MultiPlanStage(_txn, _collection, _canonicalQuery, shouldCache));
- MultiPlanStage* multiPlanStage = static_cast<MultiPlanStage*>(_root.get());
+ _children.emplace_back(new MultiPlanStage(_txn, _collection, _canonicalQuery, shouldCache));
+ MultiPlanStage* multiPlanStage = static_cast<MultiPlanStage*>(child().get());
for (size_t ix = 0; ix < solutions.size(); ++ix) {
if (solutions[ix]->cacheData.get()) {
@@ -257,7 +256,7 @@ Status CachedPlanStage::replan(PlanYieldPolicy* yieldPolicy, bool shouldCache) {
}
bool CachedPlanStage::isEOF() {
- return _results.empty() && _root->isEOF();
+ return _results.empty() && child()->isEOF();
}
PlanStage::StageState CachedPlanStage::work(WorkingSetID* out) {
@@ -279,7 +278,7 @@ PlanStage::StageState CachedPlanStage::work(WorkingSetID* out) {
}
// Nothing left in trial period buffer.
- StageState childStatus = _root->work(out);
+ StageState childStatus = child()->work(out);
if (PlanStage::ADVANCED == childStatus) {
_commonStats.advanced++;
@@ -292,24 +291,14 @@ PlanStage::StageState CachedPlanStage::work(WorkingSetID* out) {
return childStatus;
}
-void CachedPlanStage::saveState() {
- _txn = NULL;
- ++_commonStats.yields;
- _root->saveState();
-}
-void CachedPlanStage::restoreState(OperationContext* opCtx) {
- invariant(_txn == NULL);
+void CachedPlanStage::doRestoreState(OperationContext* opCtx) {
_txn = opCtx;
-
- ++_commonStats.unyields;
- _root->restoreState(opCtx);
}
-void CachedPlanStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
- _root->invalidate(txn, dl, type);
- ++_commonStats.invalidates;
-
+void CachedPlanStage::doInvalidate(OperationContext* txn,
+ const RecordId& dl,
+ InvalidationType type) {
for (std::list<WorkingSetID>::iterator it = _results.begin(); it != _results.end();) {
WorkingSetMember* member = _ws->get(*it);
if (member->hasLoc() && member->loc == dl) {
@@ -324,25 +313,17 @@ void CachedPlanStage::invalidate(OperationContext* txn, const RecordId& dl, Inva
}
}
-std::vector<PlanStage*> CachedPlanStage::getChildren() const {
- return {_root.get()};
-}
-
std::unique_ptr<PlanStageStats> CachedPlanStage::getStats() {
_commonStats.isEOF = isEOF();
std::unique_ptr<PlanStageStats> ret =
stdx::make_unique<PlanStageStats>(_commonStats, STAGE_CACHED_PLAN);
ret->specific = stdx::make_unique<CachedPlanStats>(_specificStats);
- ret->children.push_back(_root->getStats().release());
+ ret->children.push_back(child()->getStats().release());
return ret;
}
-const CommonStats* CachedPlanStage::getCommonStats() const {
- return &_commonStats;
-}
-
const SpecificStats* CachedPlanStage::getSpecificStats() const {
return &_specificStats;
}
diff --git a/src/mongo/db/exec/cached_plan.h b/src/mongo/db/exec/cached_plan.h
index 02f92350549..117e6aa5dfa 100644
--- a/src/mongo/db/exec/cached_plan.h
+++ b/src/mongo/db/exec/cached_plan.h
@@ -65,11 +65,8 @@ public:
virtual StageState work(WorkingSetID* out);
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
-
- virtual std::vector<PlanStage*> getChildren() const;
+ virtual void doRestoreState(OperationContext* opCtx);
+ virtual void doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
virtual StageType stageType() const {
return STAGE_CACHED_PLAN;
@@ -77,8 +74,6 @@ public:
virtual std::unique_ptr<PlanStageStats> getStats();
- virtual const CommonStats* getCommonStats() const;
-
virtual const SpecificStats* getSpecificStats() const;
static const char* kStageType;
@@ -142,8 +137,6 @@ private:
// that solution is owned here.
std::unique_ptr<QuerySolution> _replannedQs;
- std::unique_ptr<PlanStage> _root;
-
// Any results produced during trial period execution are kept here.
std::list<WorkingSetID> _results;
@@ -154,7 +147,6 @@ private:
std::unique_ptr<RecordFetcher> _fetcher;
// Stats
- CommonStats _commonStats;
CachedPlanStats _specificStats;
};
diff --git a/src/mongo/db/exec/collection_scan.cpp b/src/mongo/db/exec/collection_scan.cpp
index d5a5d550bea..9eee74c4451 100644
--- a/src/mongo/db/exec/collection_scan.cpp
+++ b/src/mongo/db/exec/collection_scan.cpp
@@ -58,13 +58,13 @@ CollectionScan::CollectionScan(OperationContext* txn,
const CollectionScanParams& params,
WorkingSet* workingSet,
const MatchExpression* filter)
- : _txn(txn),
+ : PlanStage(kStageType),
+ _txn(txn),
_workingSet(workingSet),
_filter(filter),
_params(params),
_isDead(false),
- _wsidForFetch(_workingSet->allocate()),
- _commonStats(kStageType) {
+ _wsidForFetch(_workingSet->allocate()) {
// Explain reports the direction of the collection scan.
_specificStats.direction = params.direction;
}
@@ -191,9 +191,9 @@ bool CollectionScan::isEOF() {
return _commonStats.isEOF || _isDead;
}
-void CollectionScan::invalidate(OperationContext* txn, const RecordId& id, InvalidationType type) {
- ++_commonStats.invalidates;
-
+void CollectionScan::doInvalidate(OperationContext* txn,
+ const RecordId& id,
+ InvalidationType type) {
// We don't care about mutations since we apply any filters to the result when we (possibly)
// return it.
if (INVALIDATION_DELETION != type) {
@@ -214,18 +214,16 @@ void CollectionScan::invalidate(OperationContext* txn, const RecordId& id, Inval
}
}
-void CollectionScan::saveState() {
+void CollectionScan::doSaveState() {
_txn = NULL;
- ++_commonStats.yields;
if (_cursor) {
_cursor->savePositioned();
}
}
-void CollectionScan::restoreState(OperationContext* opCtx) {
+void CollectionScan::doRestoreState(OperationContext* opCtx) {
invariant(_txn == NULL);
_txn = opCtx;
- ++_commonStats.unyields;
if (_cursor) {
if (!_cursor->restore(opCtx)) {
warning() << "Could not restore RecordCursor for CollectionScan: " << opCtx->getNS();
@@ -234,11 +232,6 @@ void CollectionScan::restoreState(OperationContext* opCtx) {
}
}
-vector<PlanStage*> CollectionScan::getChildren() const {
- vector<PlanStage*> empty;
- return empty;
-}
-
unique_ptr<PlanStageStats> CollectionScan::getStats() {
// Add a BSON representation of the filter to the stats tree, if there is one.
if (NULL != _filter) {
@@ -252,10 +245,6 @@ unique_ptr<PlanStageStats> CollectionScan::getStats() {
return ret;
}
-const CommonStats* CollectionScan::getCommonStats() const {
- return &_commonStats;
-}
-
const SpecificStats* CollectionScan::getSpecificStats() const {
return &_specificStats;
}
diff --git a/src/mongo/db/exec/collection_scan.h b/src/mongo/db/exec/collection_scan.h
index 418a3b5b7e2..856af5e10ad 100644
--- a/src/mongo/db/exec/collection_scan.h
+++ b/src/mongo/db/exec/collection_scan.h
@@ -57,11 +57,9 @@ public:
virtual StageState work(WorkingSetID* out);
virtual bool isEOF();
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
-
- virtual std::vector<PlanStage*> getChildren() const;
+ virtual void doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
+ virtual void doSaveState();
+ virtual void doRestoreState(OperationContext* opCtx);
virtual StageType stageType() const {
return STAGE_COLLSCAN;
@@ -69,8 +67,6 @@ public:
virtual std::unique_ptr<PlanStageStats> getStats();
- virtual const CommonStats* getCommonStats() const;
-
virtual const SpecificStats* getSpecificStats() const;
static const char* kStageType;
@@ -105,7 +101,6 @@ private:
const WorkingSetID _wsidForFetch;
// Stats
- CommonStats _commonStats;
CollectionScanStats _specificStats;
};
diff --git a/src/mongo/db/exec/count.cpp b/src/mongo/db/exec/count.cpp
index 55e080d8c14..bcc389a488b 100644
--- a/src/mongo/db/exec/count.cpp
+++ b/src/mongo/db/exec/count.cpp
@@ -49,15 +49,15 @@ CountStage::CountStage(OperationContext* txn,
const CountRequest& request,
WorkingSet* ws,
PlanStage* child)
- : _txn(txn),
+ : PlanStage(kStageType),
+ _txn(txn),
_collection(collection),
_request(request),
_leftToSkip(request.getSkip()),
- _ws(ws),
- _child(child),
- _commonStats(kStageType) {}
-
-CountStage::~CountStage() {}
+ _ws(ws) {
+ if (child)
+ _children.emplace_back(child);
+}
bool CountStage::isEOF() {
if (_specificStats.trivialCount) {
@@ -68,7 +68,7 @@ bool CountStage::isEOF() {
return true;
}
- return NULL != _child.get() && _child->isEOF();
+ return !_children.empty() && child()->isEOF();
}
void CountStage::trivialCount() {
@@ -119,9 +119,9 @@ PlanStage::StageState CountStage::work(WorkingSetID* out) {
// For non-trivial counts, we should always have a child stage from which we can retrieve
// results.
- invariant(_child.get());
+ invariant(child());
WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState state = _child->work(&id);
+ PlanStage::StageState state = child()->work(&id);
if (PlanStage::IS_EOF == state) {
_commonStats.isEOF = true;
@@ -163,52 +163,20 @@ PlanStage::StageState CountStage::work(WorkingSetID* out) {
return PlanStage::NEED_TIME;
}
-void CountStage::saveState() {
- _txn = NULL;
- ++_commonStats.yields;
- if (_child.get()) {
- _child->saveState();
- }
-}
-
-void CountStage::restoreState(OperationContext* opCtx) {
- invariant(_txn == NULL);
+void CountStage::doRestoreState(OperationContext* opCtx) {
_txn = opCtx;
- ++_commonStats.unyields;
- if (_child.get()) {
- _child->restoreState(opCtx);
- }
-}
-
-void CountStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
- ++_commonStats.invalidates;
- if (_child.get()) {
- _child->invalidate(txn, dl, type);
- }
-}
-
-vector<PlanStage*> CountStage::getChildren() const {
- vector<PlanStage*> children;
- if (_child.get()) {
- children.push_back(_child.get());
- }
- return children;
}
unique_ptr<PlanStageStats> CountStage::getStats() {
_commonStats.isEOF = isEOF();
unique_ptr<PlanStageStats> ret = make_unique<PlanStageStats>(_commonStats, STAGE_COUNT);
ret->specific = make_unique<CountStats>(_specificStats);
- if (_child.get()) {
- ret->children.push_back(_child->getStats().release());
+ if (!_children.empty()) {
+ ret->children.push_back(child()->getStats().release());
}
return ret;
}
-const CommonStats* CountStage::getCommonStats() const {
- return &_commonStats;
-}
-
const SpecificStats* CountStage::getSpecificStats() const {
return &_specificStats;
}
diff --git a/src/mongo/db/exec/count.h b/src/mongo/db/exec/count.h
index 1dbc4dfa0f4..ccb4a4b364a 100644
--- a/src/mongo/db/exec/count.h
+++ b/src/mongo/db/exec/count.h
@@ -53,16 +53,10 @@ public:
WorkingSet* ws,
PlanStage* child);
- virtual ~CountStage();
-
virtual bool isEOF();
virtual StageState work(WorkingSetID* out);
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
-
- virtual std::vector<PlanStage*> getChildren() const;
+ virtual void doRestoreState(OperationContext* opCtx);
virtual StageType stageType() const {
return STAGE_COUNT;
@@ -70,8 +64,6 @@ public:
std::unique_ptr<PlanStageStats> getStats();
- virtual const CommonStats* getCommonStats() const;
-
virtual const SpecificStats* getSpecificStats() const;
static const char* kStageType;
@@ -98,9 +90,6 @@ private:
// by us.
WorkingSet* _ws;
- std::unique_ptr<PlanStage> _child;
-
- CommonStats _commonStats;
CountStats _specificStats;
};
diff --git a/src/mongo/db/exec/count_scan.cpp b/src/mongo/db/exec/count_scan.cpp
index ccd2b7339c3..e6a9f4b667c 100644
--- a/src/mongo/db/exec/count_scan.cpp
+++ b/src/mongo/db/exec/count_scan.cpp
@@ -43,13 +43,13 @@ using stdx::make_unique;
const char* CountScan::kStageType = "COUNT_SCAN";
CountScan::CountScan(OperationContext* txn, const CountScanParams& params, WorkingSet* workingSet)
- : _txn(txn),
+ : PlanStage(kStageType),
+ _txn(txn),
_workingSet(workingSet),
_descriptor(params.descriptor),
_iam(params.descriptor->getIndexCatalog()->getIndex(params.descriptor)),
_shouldDedup(params.descriptor->isMultikey(txn)),
- _params(params),
- _commonStats(kStageType) {
+ _params(params) {
_specificStats.keyPattern = _params.descriptor->keyPattern();
_specificStats.indexName = _params.descriptor->indexName();
_specificStats.isMultiKey = _params.descriptor->isMultikey(txn);
@@ -120,17 +120,15 @@ bool CountScan::isEOF() {
return _commonStats.isEOF;
}
-void CountScan::saveState() {
+void CountScan::doSaveState() {
_txn = NULL;
- ++_commonStats.yields;
if (_cursor)
_cursor->savePositioned();
}
-void CountScan::restoreState(OperationContext* opCtx) {
+void CountScan::doRestoreState(OperationContext* opCtx) {
invariant(_txn == NULL);
_txn = opCtx;
- ++_commonStats.unyields;
if (_cursor)
_cursor->restore(opCtx);
@@ -140,9 +138,7 @@ void CountScan::restoreState(OperationContext* opCtx) {
_shouldDedup = _descriptor->isMultikey(_txn);
}
-void CountScan::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
- ++_commonStats.invalidates;
-
+void CountScan::doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
// The only state we're responsible for holding is what RecordIds to drop. If a document
// mutates the underlying index cursor will deal with it.
if (INVALIDATION_MUTATION == type) {
@@ -157,11 +153,6 @@ void CountScan::invalidate(OperationContext* txn, const RecordId& dl, Invalidati
}
}
-vector<PlanStage*> CountScan::getChildren() const {
- vector<PlanStage*> empty;
- return empty;
-}
-
unique_ptr<PlanStageStats> CountScan::getStats() {
unique_ptr<PlanStageStats> ret = make_unique<PlanStageStats>(_commonStats, STAGE_COUNT_SCAN);
@@ -172,10 +163,6 @@ unique_ptr<PlanStageStats> CountScan::getStats() {
return ret;
}
-const CommonStats* CountScan::getCommonStats() const {
- return &_commonStats;
-}
-
const SpecificStats* CountScan::getSpecificStats() const {
return &_specificStats;
}
diff --git a/src/mongo/db/exec/count_scan.h b/src/mongo/db/exec/count_scan.h
index 8d7afdef1b6..ffb0035417d 100644
--- a/src/mongo/db/exec/count_scan.h
+++ b/src/mongo/db/exec/count_scan.h
@@ -67,15 +67,12 @@ struct CountScanParams {
class CountScan : public PlanStage {
public:
CountScan(OperationContext* txn, const CountScanParams& params, WorkingSet* workingSet);
- virtual ~CountScan() {}
virtual StageState work(WorkingSetID* out);
virtual bool isEOF();
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
-
- virtual std::vector<PlanStage*> getChildren() const;
+ virtual void doSaveState();
+ virtual void doRestoreState(OperationContext* opCtx);
+ virtual void doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
virtual StageType stageType() const {
return STAGE_COUNT_SCAN;
@@ -83,8 +80,6 @@ public:
virtual std::unique_ptr<PlanStageStats> getStats();
- virtual const CommonStats* getCommonStats() const;
-
virtual const SpecificStats* getSpecificStats() const;
static const char* kStageType;
@@ -108,7 +103,6 @@ private:
CountScanParams _params;
- CommonStats _commonStats;
CountScanStats _specificStats;
};
diff --git a/src/mongo/db/exec/delete.cpp b/src/mongo/db/exec/delete.cpp
index e4a89d197c0..bdc78806521 100644
--- a/src/mongo/db/exec/delete.cpp
+++ b/src/mongo/db/exec/delete.cpp
@@ -58,16 +58,15 @@ DeleteStage::DeleteStage(OperationContext* txn,
WorkingSet* ws,
Collection* collection,
PlanStage* child)
- : _txn(txn),
+ : PlanStage(kStageType),
+ _txn(txn),
_params(params),
_ws(ws),
_collection(collection),
- _child(child),
_idRetrying(WorkingSet::INVALID_ID),
- _idReturning(WorkingSet::INVALID_ID),
- _commonStats(kStageType) {}
-
-DeleteStage::~DeleteStage() {}
+ _idReturning(WorkingSet::INVALID_ID) {
+ _children.emplace_back(child);
+}
bool DeleteStage::isEOF() {
if (!_collection) {
@@ -77,7 +76,7 @@ bool DeleteStage::isEOF() {
return true;
}
return _idRetrying == WorkingSet::INVALID_ID && _idReturning == WorkingSet::INVALID_ID &&
- _child->isEOF();
+ child()->isEOF();
}
PlanStage::StageState DeleteStage::work(WorkingSetID* out) {
@@ -110,7 +109,7 @@ PlanStage::StageState DeleteStage::work(WorkingSetID* out) {
WorkingSetID id;
StageState status;
if (_idRetrying == WorkingSet::INVALID_ID) {
- status = _child->work(&id);
+ status = child()->work(&id);
} else {
status = ADVANCED;
id = _idRetrying;
@@ -160,7 +159,7 @@ PlanStage::StageState DeleteStage::work(WorkingSetID* out) {
// saving/restoring state repeatedly?
try {
- _child->saveState();
+ child()->saveState();
if (supportsDocLocking()) {
// Doc-locking engines require this after saveState() since they don't use
// invalidations.
@@ -208,7 +207,7 @@ PlanStage::StageState DeleteStage::work(WorkingSetID* out) {
// transaction in which they are created, and a WriteUnitOfWork is a
// transaction, make sure to restore the state outside of the WritUnitOfWork.
try {
- _child->restoreState(_txn);
+ child()->restoreState(_txn);
} catch (const WriteConflictException& wce) {
// Note we don't need to retry anything in this case since the delete already
// was committed. However, we still need to return the deleted document
@@ -258,18 +257,8 @@ PlanStage::StageState DeleteStage::work(WorkingSetID* out) {
return status;
}
-void DeleteStage::saveState() {
- _txn = NULL;
- ++_commonStats.yields;
- _child->saveState();
-}
-
-void DeleteStage::restoreState(OperationContext* opCtx) {
- invariant(_txn == NULL);
+void DeleteStage::doRestoreState(OperationContext* opCtx) {
_txn = opCtx;
- ++_commonStats.unyields;
- _child->restoreState(opCtx);
-
const NamespaceString& ns(_collection->ns());
massert(28537,
str::stream() << "Demoted from primary while removing from " << ns.ns(),
@@ -277,29 +266,14 @@ void DeleteStage::restoreState(OperationContext* opCtx) {
repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(ns));
}
-void DeleteStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
- ++_commonStats.invalidates;
- _child->invalidate(txn, dl, type);
-}
-
-vector<PlanStage*> DeleteStage::getChildren() const {
- vector<PlanStage*> children;
- children.push_back(_child.get());
- return children;
-}
-
unique_ptr<PlanStageStats> DeleteStage::getStats() {
_commonStats.isEOF = isEOF();
unique_ptr<PlanStageStats> ret = make_unique<PlanStageStats>(_commonStats, STAGE_DELETE);
ret->specific = make_unique<DeleteStats>(_specificStats);
- ret->children.push_back(_child->getStats().release());
+ ret->children.push_back(child()->getStats().release());
return ret;
}
-const CommonStats* DeleteStage::getCommonStats() const {
- return &_commonStats;
-}
-
const SpecificStats* DeleteStage::getSpecificStats() const {
return &_specificStats;
}
diff --git a/src/mongo/db/exec/delete.h b/src/mongo/db/exec/delete.h
index ab907557bc7..76f6afdb123 100644
--- a/src/mongo/db/exec/delete.h
+++ b/src/mongo/db/exec/delete.h
@@ -85,16 +85,11 @@ public:
WorkingSet* ws,
Collection* collection,
PlanStage* child);
- virtual ~DeleteStage();
virtual bool isEOF();
virtual StageState work(WorkingSetID* out);
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
-
- virtual std::vector<PlanStage*> getChildren() const;
+ virtual void doRestoreState(OperationContext* opCtx);
virtual StageType stageType() const {
return STAGE_DELETE;
@@ -102,8 +97,6 @@ public:
virtual std::unique_ptr<PlanStageStats> getStats();
- virtual const CommonStats* getCommonStats() const;
-
virtual const SpecificStats* getSpecificStats() const;
static const char* kStageType;
@@ -129,8 +122,6 @@ private:
// stage.
Collection* _collection;
- std::unique_ptr<PlanStage> _child;
-
// If not WorkingSet::INVALID_ID, we use this rather than asking our child what to do next.
WorkingSetID _idRetrying;
@@ -138,7 +129,6 @@ private:
WorkingSetID _idReturning;
// Stats
- CommonStats _commonStats;
DeleteStats _specificStats;
};
diff --git a/src/mongo/db/exec/distinct_scan.cpp b/src/mongo/db/exec/distinct_scan.cpp
index e189861dc86..d838177b456 100644
--- a/src/mongo/db/exec/distinct_scan.cpp
+++ b/src/mongo/db/exec/distinct_scan.cpp
@@ -48,13 +48,13 @@ const char* DistinctScan::kStageType = "DISTINCT_SCAN";
DistinctScan::DistinctScan(OperationContext* txn,
const DistinctParams& params,
WorkingSet* workingSet)
- : _txn(txn),
+ : PlanStage(kStageType),
+ _txn(txn),
_workingSet(workingSet),
_descriptor(params.descriptor),
_iam(params.descriptor->getIndexCatalog()->getIndex(params.descriptor)),
_params(params),
- _checker(&_params.bounds, _descriptor->keyPattern(), _params.direction),
- _commonStats(kStageType) {
+ _checker(&_params.bounds, _descriptor->keyPattern(), _params.direction) {
_specificStats.keyPattern = _params.descriptor->keyPattern();
_specificStats.indexName = _params.descriptor->indexName();
_specificStats.indexVersion = _params.descriptor->version();
@@ -128,43 +128,28 @@ bool DistinctScan::isEOF() {
return _commonStats.isEOF;
}
-void DistinctScan::saveState() {
+void DistinctScan::doSaveState() {
_txn = NULL;
- ++_commonStats.yields;
// We always seek, so we don't care where the cursor is.
if (_cursor)
_cursor->saveUnpositioned();
}
-void DistinctScan::restoreState(OperationContext* opCtx) {
+void DistinctScan::doRestoreState(OperationContext* opCtx) {
invariant(_txn == NULL);
_txn = opCtx;
- ++_commonStats.unyields;
if (_cursor)
_cursor->restore(opCtx);
}
-void DistinctScan::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
- ++_commonStats.invalidates;
-}
-
-vector<PlanStage*> DistinctScan::getChildren() const {
- vector<PlanStage*> empty;
- return empty;
-}
-
unique_ptr<PlanStageStats> DistinctScan::getStats() {
unique_ptr<PlanStageStats> ret = make_unique<PlanStageStats>(_commonStats, STAGE_DISTINCT_SCAN);
ret->specific = make_unique<DistinctScanStats>(_specificStats);
return ret;
}
-const CommonStats* DistinctScan::getCommonStats() const {
- return &_commonStats;
-}
-
const SpecificStats* DistinctScan::getSpecificStats() const {
return &_specificStats;
}
diff --git a/src/mongo/db/exec/distinct_scan.h b/src/mongo/db/exec/distinct_scan.h
index 83b4da8c216..b39ab6e9acc 100644
--- a/src/mongo/db/exec/distinct_scan.h
+++ b/src/mongo/db/exec/distinct_scan.h
@@ -79,11 +79,8 @@ public:
virtual StageState work(WorkingSetID* out);
virtual bool isEOF();
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
-
- virtual std::vector<PlanStage*> getChildren() const;
+ virtual void doSaveState();
+ virtual void doRestoreState(OperationContext* opCtx);
virtual StageType stageType() const {
return STAGE_DISTINCT_SCAN;
@@ -91,8 +88,6 @@ public:
virtual std::unique_ptr<PlanStageStats> getStats();
- virtual const CommonStats* getCommonStats() const;
-
virtual const SpecificStats* getSpecificStats() const;
static const char* kStageType;
@@ -118,7 +113,6 @@ private:
IndexSeekPoint _seekPoint;
// Stats
- CommonStats _commonStats;
DistinctScanStats _specificStats;
};
diff --git a/src/mongo/db/exec/eof.cpp b/src/mongo/db/exec/eof.cpp
index a09eeaa9692..aebb31b0277 100644
--- a/src/mongo/db/exec/eof.cpp
+++ b/src/mongo/db/exec/eof.cpp
@@ -42,7 +42,7 @@ using stdx::make_unique;
// static
const char* EOFStage::kStageType = "EOF";
-EOFStage::EOFStage() : _commonStats(kStageType) {}
+EOFStage::EOFStage() : PlanStage(kStageType) {}
EOFStage::~EOFStage() {}
@@ -57,32 +57,11 @@ PlanStage::StageState EOFStage::work(WorkingSetID* out) {
return PlanStage::IS_EOF;
}
-void EOFStage::saveState() {
- ++_commonStats.yields;
-}
-
-void EOFStage::restoreState(OperationContext* opCtx) {
- ++_commonStats.unyields;
-}
-
-void EOFStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
- ++_commonStats.invalidates;
-}
-
-vector<PlanStage*> EOFStage::getChildren() const {
- vector<PlanStage*> empty;
- return empty;
-}
-
unique_ptr<PlanStageStats> EOFStage::getStats() {
_commonStats.isEOF = isEOF();
return make_unique<PlanStageStats>(_commonStats, STAGE_EOF);
}
-const CommonStats* EOFStage::getCommonStats() const {
- return &_commonStats;
-}
-
const SpecificStats* EOFStage::getSpecificStats() const {
return nullptr;
}
diff --git a/src/mongo/db/exec/eof.h b/src/mongo/db/exec/eof.h
index c8825ed085f..30df7679ba1 100644
--- a/src/mongo/db/exec/eof.h
+++ b/src/mongo/db/exec/eof.h
@@ -45,11 +45,6 @@ public:
virtual bool isEOF();
virtual StageState work(WorkingSetID* out);
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
-
- virtual std::vector<PlanStage*> getChildren() const;
virtual StageType stageType() const {
return STAGE_EOF;
@@ -57,14 +52,9 @@ public:
std::unique_ptr<PlanStageStats> getStats();
- virtual const CommonStats* getCommonStats() const;
-
virtual const SpecificStats* getSpecificStats() const;
static const char* kStageType;
-
-private:
- CommonStats _commonStats;
};
} // namespace mongo
diff --git a/src/mongo/db/exec/fetch.cpp b/src/mongo/db/exec/fetch.cpp
index eae3b00a361..c31d9b3f45f 100644
--- a/src/mongo/db/exec/fetch.cpp
+++ b/src/mongo/db/exec/fetch.cpp
@@ -54,13 +54,14 @@ FetchStage::FetchStage(OperationContext* txn,
PlanStage* child,
const MatchExpression* filter,
const Collection* collection)
- : _txn(txn),
+ : PlanStage(kStageType),
+ _txn(txn),
_collection(collection),
_ws(ws),
- _child(child),
_filter(filter),
- _idRetrying(WorkingSet::INVALID_ID),
- _commonStats(kStageType) {}
+ _idRetrying(WorkingSet::INVALID_ID) {
+ _children.emplace_back(child);
+}
FetchStage::~FetchStage() {}
@@ -71,7 +72,7 @@ bool FetchStage::isEOF() {
return false;
}
- return _child->isEOF();
+ return child()->isEOF();
}
PlanStage::StageState FetchStage::work(WorkingSetID* out) {
@@ -88,7 +89,7 @@ PlanStage::StageState FetchStage::work(WorkingSetID* out) {
WorkingSetID id;
StageState status;
if (_idRetrying == WorkingSet::INVALID_ID) {
- status = _child->work(&id);
+ status = child()->work(&id);
} else {
status = ADVANCED;
id = _idRetrying;
@@ -158,28 +159,20 @@ PlanStage::StageState FetchStage::work(WorkingSetID* out) {
return status;
}
-void FetchStage::saveState() {
+void FetchStage::doSaveState() {
_txn = NULL;
- ++_commonStats.yields;
if (_cursor)
_cursor->saveUnpositioned();
- _child->saveState();
}
-void FetchStage::restoreState(OperationContext* opCtx) {
+void FetchStage::doRestoreState(OperationContext* opCtx) {
invariant(_txn == NULL);
_txn = opCtx;
- ++_commonStats.unyields;
if (_cursor)
_cursor->restore(opCtx);
- _child->restoreState(opCtx);
}
-void FetchStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
- ++_commonStats.invalidates;
-
- _child->invalidate(txn, dl, type);
-
+void FetchStage::doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
// It's possible that the loc getting invalidated is the one we're about to
// fetch. In this case we do a "forced fetch" and put the WSM in owned object state.
if (WorkingSet::INVALID_ID != _idRetrying) {
@@ -223,12 +216,6 @@ PlanStage::StageState FetchStage::returnIfMatches(WorkingSetMember* member,
}
}
-vector<PlanStage*> FetchStage::getChildren() const {
- vector<PlanStage*> children;
- children.push_back(_child.get());
- return children;
-}
-
unique_ptr<PlanStageStats> FetchStage::getStats() {
_commonStats.isEOF = isEOF();
@@ -241,14 +228,10 @@ unique_ptr<PlanStageStats> FetchStage::getStats() {
unique_ptr<PlanStageStats> ret = make_unique<PlanStageStats>(_commonStats, STAGE_FETCH);
ret->specific = make_unique<FetchStats>(_specificStats);
- ret->children.push_back(_child->getStats().release());
+ ret->children.push_back(child()->getStats().release());
return ret;
}
-const CommonStats* FetchStage::getCommonStats() const {
- return &_commonStats;
-}
-
const SpecificStats* FetchStage::getSpecificStats() const {
return &_specificStats;
}
diff --git a/src/mongo/db/exec/fetch.h b/src/mongo/db/exec/fetch.h
index 9a0ff81c021..edfd6c35a5f 100644
--- a/src/mongo/db/exec/fetch.h
+++ b/src/mongo/db/exec/fetch.h
@@ -60,11 +60,9 @@ public:
virtual bool isEOF();
virtual StageState work(WorkingSetID* out);
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
-
- virtual std::vector<PlanStage*> getChildren() const;
+ virtual void doSaveState();
+ virtual void doRestoreState(OperationContext* opCtx);
+ virtual void doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
virtual StageType stageType() const {
return STAGE_FETCH;
@@ -72,8 +70,6 @@ public:
std::unique_ptr<PlanStageStats> getStats();
- virtual const CommonStats* getCommonStats() const;
-
virtual const SpecificStats* getSpecificStats() const;
static const char* kStageType;
@@ -95,7 +91,6 @@ private:
// _ws is not owned by us.
WorkingSet* _ws;
- std::unique_ptr<PlanStage> _child;
// The filter is not owned by us.
const MatchExpression* _filter;
@@ -104,7 +99,6 @@ private:
WorkingSetID _idRetrying;
// Stats
- CommonStats _commonStats;
FetchStats _specificStats;
};
diff --git a/src/mongo/db/exec/geo_near.cpp b/src/mongo/db/exec/geo_near.cpp
index 7eaeeac9f9a..db54e778da7 100644
--- a/src/mongo/db/exec/geo_near.cpp
+++ b/src/mongo/db/exec/geo_near.cpp
@@ -259,8 +259,10 @@ static R2Annulus twoDDistanceBounds(const GeoNearParams& nearParams,
class GeoNear2DStage::DensityEstimator {
public:
- DensityEstimator(const IndexDescriptor* twoDindex, const GeoNearParams* nearParams)
- : _twoDIndex(twoDindex), _nearParams(nearParams), _currentLevel(0) {
+ DensityEstimator(PlanStage::Children* children,
+ const IndexDescriptor* twoDindex,
+ const GeoNearParams* nearParams)
+ : _children(children), _twoDIndex(twoDindex), _nearParams(nearParams), _currentLevel(0) {
GeoHashConverter::Parameters hashParams;
Status status = GeoHashConverter::parseParameters(_twoDIndex->infoObj(), &hashParams);
// The index status should always be valid.
@@ -281,16 +283,13 @@ public:
WorkingSetID* out,
double* estimatedDistance);
- void saveState();
- void restoreState(OperationContext* txn);
- void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
-
private:
void buildIndexScan(OperationContext* txn, WorkingSet* workingSet, Collection* collection);
+ PlanStage::Children* _children; // Points to PlanStage::_children in the NearStage.
const IndexDescriptor* _twoDIndex; // Not owned here.
const GeoNearParams* _nearParams; // Not owned here.
- unique_ptr<IndexScan> _indexScan;
+ IndexScan* _indexScan = nullptr; // Owned in PlanStage::_children.
unique_ptr<GeoHashConverter> _converter;
GeoHash _centroidCell;
unsigned _currentLevel;
@@ -335,7 +334,9 @@ void GeoNear2DStage::DensityEstimator::buildIndexScan(OperationContext* txn,
// in the scan (i.e. $within)
IndexBoundsBuilder::intersectize(oil, &scanParams.bounds.fields[twoDFieldPosition]);
- _indexScan.reset(new IndexScan(txn, scanParams, workingSet, NULL));
+ invariant(!_indexScan);
+ _indexScan = new IndexScan(txn, scanParams, workingSet, NULL);
+ _children->emplace_back(_indexScan);
}
// Return IS_EOF is we find a document in it's ancestor cells and set estimated distance
@@ -359,7 +360,9 @@ PlanStage::StageState GeoNear2DStage::DensityEstimator::work(OperationContext* t
// Advance to the next level and search again.
_currentLevel--;
// Reset index scan for the next level.
- _indexScan.reset(NULL);
+ invariant(_children->back().get() == _indexScan);
+ _indexScan = nullptr;
+ _children->pop_back();
return PlanStage::NEED_TIME;
}
@@ -380,33 +383,12 @@ PlanStage::StageState GeoNear2DStage::DensityEstimator::work(OperationContext* t
return state;
}
-void GeoNear2DStage::DensityEstimator::saveState() {
- if (_indexScan) {
- _indexScan->saveState();
- }
-}
-
-void GeoNear2DStage::DensityEstimator::restoreState(OperationContext* txn) {
- if (_indexScan) {
- _indexScan->restoreState(txn);
- }
-}
-
-void GeoNear2DStage::DensityEstimator::invalidate(OperationContext* txn,
- const RecordId& dl,
- InvalidationType type) {
- if (_indexScan) {
- _indexScan->invalidate(txn, dl, type);
- }
-}
-
-
PlanStage::StageState GeoNear2DStage::initialize(OperationContext* txn,
WorkingSet* workingSet,
Collection* collection,
WorkingSetID* out) {
if (!_densityEstimator) {
- _densityEstimator.reset(new DensityEstimator(_twoDIndex, &_nearParams));
+ _densityEstimator.reset(new DensityEstimator(&_children, _twoDIndex, &_nearParams));
}
double estimatedDistance;
@@ -451,40 +433,16 @@ GeoNear2DStage::GeoNear2DStage(const GeoNearParams& nearParams,
WorkingSet* workingSet,
Collection* collection,
IndexDescriptor* twoDIndex)
- : NearStage(txn,
- workingSet,
- collection,
- new PlanStageStats(CommonStats(kTwoDIndexNearStage.c_str()), STAGE_GEO_NEAR_2D)),
+ : NearStage(txn, kTwoDIndexNearStage.c_str(), STAGE_GEO_NEAR_2D, workingSet, collection),
_nearParams(nearParams),
_twoDIndex(twoDIndex),
_fullBounds(twoDDistanceBounds(nearParams, twoDIndex)),
_currBounds(_fullBounds.center(), -1, _fullBounds.getInner()),
_boundsIncrement(0.0) {
- getNearStats()->keyPattern = twoDIndex->keyPattern();
- getNearStats()->indexName = twoDIndex->indexName();
+ _specificStats.keyPattern = twoDIndex->keyPattern();
+ _specificStats.indexName = twoDIndex->indexName();
}
-GeoNear2DStage::~GeoNear2DStage() {}
-
-void GeoNear2DStage::finishSaveState() {
- if (_densityEstimator) {
- _densityEstimator->saveState();
- }
-}
-
-void GeoNear2DStage::finishRestoreState(OperationContext* txn) {
- if (_densityEstimator) {
- _densityEstimator->restoreState(txn);
- }
-}
-
-void GeoNear2DStage::finishInvalidate(OperationContext* txn,
- const RecordId& dl,
- InvalidationType type) {
- if (_densityEstimator) {
- _densityEstimator->invalidate(txn, dl, type);
- }
-}
namespace {
@@ -665,10 +623,8 @@ StatusWith<NearStage::CoveredInterval*> //
// Setup the next interval
//
- const NearStats* stats = getNearStats();
-
- if (!stats->intervalStats.empty()) {
- const IntervalStats& lastIntervalStats = stats->intervalStats.back();
+ if (!_specificStats.intervalStats.empty()) {
+ const IntervalStats& lastIntervalStats = _specificStats.intervalStats.back();
// TODO: Generally we want small numbers of results fast, then larger numbers later
if (lastIntervalStats.numResultsBuffered < 300)
@@ -805,10 +761,13 @@ StatusWith<NearStage::CoveredInterval*> //
}
// FetchStage owns index scan
- FetchStage* fetcher(new FetchStageWithMatch(txn, workingSet, scan, docMatcher, collection));
+ _children.emplace_back(new FetchStageWithMatch(txn, workingSet, scan, docMatcher, collection));
- return StatusWith<CoveredInterval*>(new CoveredInterval(
- fetcher, true, nextBounds.getInner(), nextBounds.getOuter(), isLastInterval));
+ return StatusWith<CoveredInterval*>(new CoveredInterval(_children.back().get(),
+ true,
+ nextBounds.getInner(),
+ nextBounds.getOuter(),
+ isLastInterval));
}
StatusWith<double> GeoNear2DStage::computeDistance(WorkingSetMember* member) {
@@ -843,18 +802,14 @@ GeoNear2DSphereStage::GeoNear2DSphereStage(const GeoNearParams& nearParams,
WorkingSet* workingSet,
Collection* collection,
IndexDescriptor* s2Index)
- : NearStage(
- txn,
- workingSet,
- collection,
- new PlanStageStats(CommonStats(kS2IndexNearStage.c_str()), STAGE_GEO_NEAR_2DSPHERE)),
+ : NearStage(txn, kS2IndexNearStage.c_str(), STAGE_GEO_NEAR_2DSPHERE, workingSet, collection),
_nearParams(nearParams),
_s2Index(s2Index),
_fullBounds(geoNearDistanceBounds(*nearParams.nearQuery)),
_currBounds(_fullBounds.center(), -1, _fullBounds.getInner()),
_boundsIncrement(0.0) {
- getNearStats()->keyPattern = s2Index->keyPattern();
- getNearStats()->indexName = s2Index->indexName();
+ _specificStats.keyPattern = s2Index->keyPattern();
+ _specificStats.indexName = s2Index->indexName();
}
GeoNear2DSphereStage::~GeoNear2DSphereStage() {}
@@ -940,8 +895,10 @@ private:
// Estimate the density of data by search the nearest cells level by level around center.
class GeoNear2DSphereStage::DensityEstimator {
public:
- DensityEstimator(const IndexDescriptor* s2Index, const GeoNearParams* nearParams)
- : _s2Index(s2Index), _nearParams(nearParams), _currentLevel(0) {
+ DensityEstimator(PlanStage::Children* children,
+ const IndexDescriptor* s2Index,
+ const GeoNearParams* nearParams)
+ : _children(children), _s2Index(s2Index), _nearParams(nearParams), _currentLevel(0) {
S2IndexingParams params;
ExpressionParams::parse2dsphereParams(_s2Index->infoObj(), &params);
// Since cellId.AppendVertexNeighbors(level, output) requires level < cellId.level(),
@@ -959,17 +916,14 @@ public:
WorkingSetID* out,
double* estimatedDistance);
- void saveState();
- void restoreState(OperationContext* txn);
- void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
-
private:
void buildIndexScan(OperationContext* txn, WorkingSet* workingSet, Collection* collection);
+ PlanStage::Children* _children; // Points to PlanStage::_children in the NearStage.
const IndexDescriptor* _s2Index; // Not owned here.
const GeoNearParams* _nearParams; // Not owned here.
int _currentLevel;
- unique_ptr<IndexScan> _indexScan;
+ IndexScan* _indexScan = nullptr; // Owned in PlanStage::_children.
};
// Setup the index scan stage for neighbors at this level.
@@ -1016,7 +970,9 @@ void GeoNear2DSphereStage::DensityEstimator::buildIndexScan(OperationContext* tx
invariant(coveredIntervals->isValidFor(1));
// Index scan
- _indexScan.reset(new IndexScan(txn, scanParams, workingSet, NULL));
+ invariant(!_indexScan);
+ _indexScan = new IndexScan(txn, scanParams, workingSet, NULL);
+ _children->emplace_back(_indexScan);
}
PlanStage::StageState GeoNear2DSphereStage::DensityEstimator::work(OperationContext* txn,
@@ -1038,7 +994,9 @@ PlanStage::StageState GeoNear2DSphereStage::DensityEstimator::work(OperationCont
// Advance to the next level and search again.
_currentLevel--;
// Reset index scan for the next level.
- _indexScan.reset(NULL);
+ invariant(_children->back().get() == _indexScan);
+ _indexScan = nullptr;
+ _children->pop_back();
return PlanStage::NEED_TIME;
}
@@ -1059,33 +1017,13 @@ PlanStage::StageState GeoNear2DSphereStage::DensityEstimator::work(OperationCont
return state;
}
-void GeoNear2DSphereStage::DensityEstimator::saveState() {
- if (_indexScan) {
- _indexScan->saveState();
- }
-}
-
-void GeoNear2DSphereStage::DensityEstimator::restoreState(OperationContext* txn) {
- if (_indexScan) {
- _indexScan->restoreState(txn);
- }
-}
-
-void GeoNear2DSphereStage::DensityEstimator::invalidate(OperationContext* txn,
- const RecordId& dl,
- InvalidationType type) {
- if (_indexScan) {
- _indexScan->invalidate(txn, dl, type);
- }
-}
-
PlanStage::StageState GeoNear2DSphereStage::initialize(OperationContext* txn,
WorkingSet* workingSet,
Collection* collection,
WorkingSetID* out) {
if (!_densityEstimator) {
- _densityEstimator.reset(new DensityEstimator(_s2Index, &_nearParams));
+ _densityEstimator.reset(new DensityEstimator(&_children, _s2Index, &_nearParams));
}
double estimatedDistance;
@@ -1110,26 +1048,6 @@ PlanStage::StageState GeoNear2DSphereStage::initialize(OperationContext* txn,
return state;
}
-void GeoNear2DSphereStage::finishSaveState() {
- if (_densityEstimator) {
- _densityEstimator->saveState();
- }
-}
-
-void GeoNear2DSphereStage::finishRestoreState(OperationContext* txn) {
- if (_densityEstimator) {
- _densityEstimator->restoreState(txn);
- }
-}
-
-void GeoNear2DSphereStage::finishInvalidate(OperationContext* txn,
- const RecordId& dl,
- InvalidationType type) {
- if (_densityEstimator) {
- _densityEstimator->invalidate(txn, dl, type);
- }
-}
-
StatusWith<NearStage::CoveredInterval*> //
GeoNear2DSphereStage::nextInterval(OperationContext* txn,
WorkingSet* workingSet,
@@ -1143,10 +1061,8 @@ StatusWith<NearStage::CoveredInterval*> //
// Setup the next interval
//
- const NearStats* stats = getNearStats();
-
- if (!stats->intervalStats.empty()) {
- const IntervalStats& lastIntervalStats = stats->intervalStats.back();
+ if (!_specificStats.intervalStats.empty()) {
+ const IntervalStats& lastIntervalStats = _specificStats.intervalStats.back();
// TODO: Generally we want small numbers of results fast, then larger numbers later
if (lastIntervalStats.numResultsBuffered < 300)
@@ -1196,10 +1112,13 @@ StatusWith<NearStage::CoveredInterval*> //
IndexScan* scan = new IndexScanWithMatch(txn, scanParams, workingSet, keyMatcher);
// FetchStage owns index scan
- FetchStage* fetcher(new FetchStage(txn, workingSet, scan, _nearParams.filter, collection));
+ _children.emplace_back(new FetchStage(txn, workingSet, scan, _nearParams.filter, collection));
- return StatusWith<CoveredInterval*>(new CoveredInterval(
- fetcher, true, nextBounds.getInner(), nextBounds.getOuter(), isLastInterval));
+ return StatusWith<CoveredInterval*>(new CoveredInterval(_children.back().get(),
+ true,
+ nextBounds.getInner(),
+ nextBounds.getOuter(),
+ isLastInterval));
}
StatusWith<double> GeoNear2DSphereStage::computeDistance(WorkingSetMember* member) {
diff --git a/src/mongo/db/exec/geo_near.h b/src/mongo/db/exec/geo_near.h
index f9295217992..06b25ad27b7 100644
--- a/src/mongo/db/exec/geo_near.h
+++ b/src/mongo/db/exec/geo_near.h
@@ -69,8 +69,6 @@ public:
Collection* collection,
IndexDescriptor* twoDIndex);
- virtual ~GeoNear2DStage();
-
protected:
virtual StatusWith<CoveredInterval*> nextInterval(OperationContext* txn,
WorkingSet* workingSet,
@@ -84,12 +82,6 @@ protected:
WorkingSetID* out);
private:
- virtual void finishSaveState();
-
- virtual void finishRestoreState(OperationContext* txn);
-
- virtual void finishInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
-
const GeoNearParams _nearParams;
// The 2D index we're searching over
@@ -135,12 +127,6 @@ protected:
WorkingSetID* out);
private:
- virtual void finishSaveState();
-
- virtual void finishRestoreState(OperationContext* txn);
-
- virtual void finishInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
-
const GeoNearParams _nearParams;
// The 2D index we're searching over
diff --git a/src/mongo/db/exec/group.cpp b/src/mongo/db/exec/group.cpp
index 8c945ef11d0..a3049a1f787 100644
--- a/src/mongo/db/exec/group.cpp
+++ b/src/mongo/db/exec/group.cpp
@@ -77,15 +77,16 @@ GroupStage::GroupStage(OperationContext* txn,
const GroupRequest& request,
WorkingSet* workingSet,
PlanStage* child)
- : _txn(txn),
+ : PlanStage(kStageType),
+ _txn(txn),
_request(request),
_ws(workingSet),
- _commonStats(kStageType),
_specificStats(),
- _child(child),
_groupState(GroupState_Initializing),
_reduceFunction(0),
- _keyFunction(0) {}
+ _keyFunction(0) {
+ _children.emplace_back(child);
+}
void GroupStage::initGroupScripting() {
// Initialize _scope.
@@ -196,7 +197,7 @@ PlanStage::StageState GroupStage::work(WorkingSetID* out) {
// Otherwise, read from our child.
invariant(_groupState == GroupState_ReadingFromChild);
WorkingSetID id = WorkingSet::INVALID_ID;
- StageState state = _child->work(&id);
+ StageState state = child()->work(&id);
if (PlanStage::NEED_TIME == state) {
++_commonStats.needTime;
@@ -256,42 +257,18 @@ bool GroupStage::isEOF() {
return _groupState == GroupState_Done;
}
-void GroupStage::saveState() {
- _txn = NULL;
- ++_commonStats.yields;
- _child->saveState();
-}
-
-void GroupStage::restoreState(OperationContext* opCtx) {
- invariant(_txn == NULL);
+void GroupStage::doRestoreState(OperationContext* opCtx) {
_txn = opCtx;
- ++_commonStats.unyields;
- _child->restoreState(opCtx);
-}
-
-void GroupStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
- ++_commonStats.invalidates;
- _child->invalidate(txn, dl, type);
-}
-
-vector<PlanStage*> GroupStage::getChildren() const {
- vector<PlanStage*> children;
- children.push_back(_child.get());
- return children;
}
unique_ptr<PlanStageStats> GroupStage::getStats() {
_commonStats.isEOF = isEOF();
unique_ptr<PlanStageStats> ret = make_unique<PlanStageStats>(_commonStats, STAGE_GROUP);
ret->specific = make_unique<GroupStats>(_specificStats);
- ret->children.push_back(_child->getStats().release());
+ ret->children.push_back(child()->getStats().release());
return ret;
}
-const CommonStats* GroupStage::getCommonStats() const {
- return &_commonStats;
-}
-
const SpecificStats* GroupStage::getSpecificStats() const {
return &_specificStats;
}
diff --git a/src/mongo/db/exec/group.h b/src/mongo/db/exec/group.h
index 6b7c0fd229c..7f282fce25c 100644
--- a/src/mongo/db/exec/group.h
+++ b/src/mongo/db/exec/group.h
@@ -90,11 +90,7 @@ public:
virtual StageState work(WorkingSetID* out);
virtual bool isEOF();
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
-
- virtual std::vector<PlanStage*> getChildren() const;
+ virtual void doRestoreState(OperationContext* opCtx);
virtual StageType stageType() const {
return STAGE_GROUP;
@@ -102,8 +98,6 @@ public:
virtual std::unique_ptr<PlanStageStats> getStats();
- virtual const CommonStats* getCommonStats() const;
-
virtual const SpecificStats* getSpecificStats() const;
static const char* kStageType;
@@ -143,11 +137,8 @@ private:
// The WorkingSet we annotate with results. Not owned by us.
WorkingSet* _ws;
- CommonStats _commonStats;
GroupStats _specificStats;
- std::unique_ptr<PlanStage> _child;
-
// Current state for this stage.
GroupState _groupState;
diff --git a/src/mongo/db/exec/idhack.cpp b/src/mongo/db/exec/idhack.cpp
index 8e9e290b624..2d9a67cfa4f 100644
--- a/src/mongo/db/exec/idhack.cpp
+++ b/src/mongo/db/exec/idhack.cpp
@@ -54,13 +54,13 @@ IDHackStage::IDHackStage(OperationContext* txn,
const Collection* collection,
CanonicalQuery* query,
WorkingSet* ws)
- : _txn(txn),
+ : PlanStage(kStageType),
+ _txn(txn),
_collection(collection),
_workingSet(ws),
_key(query->getQueryObj()["_id"].wrap()),
_done(false),
- _idBeingPagedIn(WorkingSet::INVALID_ID),
- _commonStats(kStageType) {
+ _idBeingPagedIn(WorkingSet::INVALID_ID) {
if (NULL != query->getProj()) {
_addKeyMetadata = query->getProj()->wantIndexKey();
} else {
@@ -72,14 +72,14 @@ IDHackStage::IDHackStage(OperationContext* txn,
Collection* collection,
const BSONObj& key,
WorkingSet* ws)
- : _txn(txn),
+ : PlanStage(kStageType),
+ _txn(txn),
_collection(collection),
_workingSet(ws),
_key(key),
_done(false),
_addKeyMetadata(false),
- _idBeingPagedIn(WorkingSet::INVALID_ID),
- _commonStats(kStageType) {}
+ _idBeingPagedIn(WorkingSet::INVALID_ID) {}
IDHackStage::~IDHackStage() {}
@@ -199,24 +199,20 @@ PlanStage::StageState IDHackStage::advance(WorkingSetID id,
return PlanStage::ADVANCED;
}
-void IDHackStage::saveState() {
+void IDHackStage::doSaveState() {
_txn = NULL;
- ++_commonStats.yields;
if (_recordCursor)
_recordCursor->saveUnpositioned();
}
-void IDHackStage::restoreState(OperationContext* opCtx) {
+void IDHackStage::doRestoreState(OperationContext* opCtx) {
invariant(_txn == NULL);
_txn = opCtx;
- ++_commonStats.unyields;
if (_recordCursor)
_recordCursor->restore(opCtx);
}
-void IDHackStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
- ++_commonStats.invalidates;
-
+void IDHackStage::doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
// Since updates can't mutate the '_id' field, we can ignore mutation invalidations.
if (INVALIDATION_MUTATION == type) {
return;
@@ -241,11 +237,6 @@ bool IDHackStage::supportsQuery(const CanonicalQuery& query) {
!query.getParsed().isTailable();
}
-vector<PlanStage*> IDHackStage::getChildren() const {
- vector<PlanStage*> empty;
- return empty;
-}
-
unique_ptr<PlanStageStats> IDHackStage::getStats() {
_commonStats.isEOF = isEOF();
unique_ptr<PlanStageStats> ret = make_unique<PlanStageStats>(_commonStats, STAGE_IDHACK);
@@ -253,10 +244,6 @@ unique_ptr<PlanStageStats> IDHackStage::getStats() {
return ret;
}
-const CommonStats* IDHackStage::getCommonStats() const {
- return &_commonStats;
-}
-
const SpecificStats* IDHackStage::getSpecificStats() const {
return &_specificStats;
}
diff --git a/src/mongo/db/exec/idhack.h b/src/mongo/db/exec/idhack.h
index 5377d83b643..52446c064e2 100644
--- a/src/mongo/db/exec/idhack.h
+++ b/src/mongo/db/exec/idhack.h
@@ -58,25 +58,21 @@ public:
virtual bool isEOF();
virtual StageState work(WorkingSetID* out);
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
+ virtual void doSaveState();
+ virtual void doRestoreState(OperationContext* opCtx);
+ virtual void doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
/**
* ID Hack has a very strict criteria for the queries it supports.
*/
static bool supportsQuery(const CanonicalQuery& query);
- virtual std::vector<PlanStage*> getChildren() const;
-
virtual StageType stageType() const {
return STAGE_IDHACK;
}
std::unique_ptr<PlanStageStats> getStats();
- virtual const CommonStats* getCommonStats() const;
-
virtual const SpecificStats* getSpecificStats() const;
static const char* kStageType;
@@ -115,7 +111,6 @@ private:
// the fetch request.
WorkingSetID _idBeingPagedIn;
- CommonStats _commonStats;
IDHackStats _specificStats;
};
diff --git a/src/mongo/db/exec/index_scan.cpp b/src/mongo/db/exec/index_scan.cpp
index e994404960a..34f93786b6f 100644
--- a/src/mongo/db/exec/index_scan.cpp
+++ b/src/mongo/db/exec/index_scan.cpp
@@ -62,7 +62,8 @@ IndexScan::IndexScan(OperationContext* txn,
const IndexScanParams& params,
WorkingSet* workingSet,
const MatchExpression* filter)
- : _txn(txn),
+ : PlanStage(kStageType),
+ _txn(txn),
_workingSet(workingSet),
_iam(params.descriptor->getIndexCatalog()->getIndex(params.descriptor)),
_keyPattern(params.descriptor->keyPattern().getOwned()),
@@ -71,7 +72,6 @@ IndexScan::IndexScan(OperationContext* txn,
_shouldDedup(true),
_forward(params.direction == 1),
_params(params),
- _commonStats(kStageType),
_endKeyInclusive(false) {
// We can't always access the descriptor in the call to getStats() so we pull
// any info we need for stats reporting out here.
@@ -233,14 +233,13 @@ bool IndexScan::isEOF() {
return _commonStats.isEOF;
}
-void IndexScan::saveState() {
+void IndexScan::doSaveState() {
if (!_txn) {
// We were already saved. Nothing to do.
return;
}
-
_txn = NULL;
- ++_commonStats.yields;
+
if (!_indexCursor)
return;
@@ -252,18 +251,14 @@ void IndexScan::saveState() {
_indexCursor->savePositioned();
}
-void IndexScan::restoreState(OperationContext* opCtx) {
+void IndexScan::doRestoreState(OperationContext* opCtx) {
invariant(_txn == NULL);
_txn = opCtx;
- ++_commonStats.unyields;
-
if (_indexCursor)
_indexCursor->restore(opCtx);
}
-void IndexScan::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
- ++_commonStats.invalidates;
-
+void IndexScan::doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
// The only state we're responsible for holding is what RecordIds to drop. If a document
// mutates the underlying index cursor will deal with it.
if (INVALIDATION_MUTATION == type) {
@@ -279,10 +274,6 @@ void IndexScan::invalidate(OperationContext* txn, const RecordId& dl, Invalidati
}
}
-std::vector<PlanStage*> IndexScan::getChildren() const {
- return {};
-}
-
std::unique_ptr<PlanStageStats> IndexScan::getStats() {
// WARNING: this could be called even if the collection was dropped. Do not access any
// catalog information here.
@@ -309,10 +300,6 @@ std::unique_ptr<PlanStageStats> IndexScan::getStats() {
return ret;
}
-const CommonStats* IndexScan::getCommonStats() const {
- return &_commonStats;
-}
-
const SpecificStats* IndexScan::getSpecificStats() const {
return &_specificStats;
}
diff --git a/src/mongo/db/exec/index_scan.h b/src/mongo/db/exec/index_scan.h
index fded244ff9e..d1a721588ac 100644
--- a/src/mongo/db/exec/index_scan.h
+++ b/src/mongo/db/exec/index_scan.h
@@ -99,11 +99,9 @@ public:
virtual StageState work(WorkingSetID* out);
virtual bool isEOF();
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
-
- virtual std::vector<PlanStage*> getChildren() const;
+ virtual void doSaveState();
+ virtual void doRestoreState(OperationContext* opCtx);
+ virtual void doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
virtual StageType stageType() const {
return STAGE_IXSCAN;
@@ -111,8 +109,6 @@ public:
virtual std::unique_ptr<PlanStageStats> getStats();
- virtual const CommonStats* getCommonStats() const;
-
virtual const SpecificStats* getSpecificStats() const;
static const char* kStageType;
@@ -150,7 +146,6 @@ private:
const IndexScanParams _params;
// Stats
- CommonStats _commonStats;
IndexScanStats _specificStats;
//
diff --git a/src/mongo/db/exec/keep_mutations.cpp b/src/mongo/db/exec/keep_mutations.cpp
index 099052149ba..2770c2b25d3 100644
--- a/src/mongo/db/exec/keep_mutations.cpp
+++ b/src/mongo/db/exec/keep_mutations.cpp
@@ -44,12 +44,13 @@ const char* KeepMutationsStage::kStageType = "KEEP_MUTATIONS";
KeepMutationsStage::KeepMutationsStage(const MatchExpression* filter,
WorkingSet* ws,
PlanStage* child)
- : _workingSet(ws),
- _child(child),
+ : PlanStage(kStageType),
+ _workingSet(ws),
_filter(filter),
_doneReadingChild(false),
- _doneReturningFlagged(false),
- _commonStats(kStageType) {}
+ _doneReturningFlagged(false) {
+ _children.emplace_back(child);
+}
KeepMutationsStage::~KeepMutationsStage() {}
@@ -70,7 +71,7 @@ PlanStage::StageState KeepMutationsStage::work(WorkingSetID* out) {
// Stream child results until the child is all done.
if (!_doneReadingChild) {
- StageState status = _child->work(out);
+ StageState status = child()->work(out);
// Child is still returning results. Pass them through.
if (PlanStage::IS_EOF != status) {
@@ -119,41 +120,14 @@ PlanStage::StageState KeepMutationsStage::work(WorkingSetID* out) {
}
}
-void KeepMutationsStage::saveState() {
- ++_commonStats.yields;
- _child->saveState();
-}
-
-void KeepMutationsStage::restoreState(OperationContext* opCtx) {
- ++_commonStats.unyields;
- _child->restoreState(opCtx);
-}
-
-void KeepMutationsStage::invalidate(OperationContext* txn,
- const RecordId& dl,
- InvalidationType type) {
- ++_commonStats.invalidates;
- _child->invalidate(txn, dl, type);
-}
-
-vector<PlanStage*> KeepMutationsStage::getChildren() const {
- vector<PlanStage*> children;
- children.push_back(_child.get());
- return children;
-}
-
unique_ptr<PlanStageStats> KeepMutationsStage::getStats() {
_commonStats.isEOF = isEOF();
unique_ptr<PlanStageStats> ret =
make_unique<PlanStageStats>(_commonStats, STAGE_KEEP_MUTATIONS);
- ret->children.push_back(_child->getStats().release());
+ ret->children.push_back(child()->getStats().release());
return ret;
}
-const CommonStats* KeepMutationsStage::getCommonStats() const {
- return &_commonStats;
-}
-
const SpecificStats* KeepMutationsStage::getSpecificStats() const {
return NULL;
}
diff --git a/src/mongo/db/exec/keep_mutations.h b/src/mongo/db/exec/keep_mutations.h
index 8915d363971..d76ba32dac8 100644
--- a/src/mongo/db/exec/keep_mutations.h
+++ b/src/mongo/db/exec/keep_mutations.h
@@ -52,20 +52,12 @@ public:
virtual bool isEOF();
virtual StageState work(WorkingSetID* out);
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
-
- virtual std::vector<PlanStage*> getChildren() const;
-
virtual StageType stageType() const {
return STAGE_KEEP_MUTATIONS;
}
virtual std::unique_ptr<PlanStageStats> getStats();
- virtual const CommonStats* getCommonStats() const;
-
virtual const SpecificStats* getSpecificStats() const;
static const char* kStageType;
@@ -74,8 +66,6 @@ private:
// Not owned here.
WorkingSet* _workingSet;
- std::unique_ptr<PlanStage> _child;
-
// Not owned here. Should be the full query expression tree.
const MatchExpression* _filter;
@@ -86,9 +76,6 @@ private:
// stream.
bool _doneReturningFlagged;
- // Stats.
- CommonStats _commonStats;
-
// Our copy of the working set's flagged results.
std::vector<WorkingSetID> _flagged;
diff --git a/src/mongo/db/exec/limit.cpp b/src/mongo/db/exec/limit.cpp
index 06566149cd8..a2c1d73b399 100644
--- a/src/mongo/db/exec/limit.cpp
+++ b/src/mongo/db/exec/limit.cpp
@@ -43,14 +43,15 @@ using stdx::make_unique;
const char* LimitStage::kStageType = "LIMIT";
LimitStage::LimitStage(long long limit, WorkingSet* ws, PlanStage* child)
- : _ws(ws), _child(child), _numToReturn(limit), _commonStats(kStageType) {
+ : PlanStage(kStageType), _ws(ws), _numToReturn(limit) {
_specificStats.limit = _numToReturn;
+ _children.emplace_back(child);
}
LimitStage::~LimitStage() {}
bool LimitStage::isEOF() {
- return (0 == _numToReturn) || _child->isEOF();
+ return (0 == _numToReturn) || child()->isEOF();
}
PlanStage::StageState LimitStage::work(WorkingSetID* out) {
@@ -65,7 +66,7 @@ PlanStage::StageState LimitStage::work(WorkingSetID* out) {
}
WorkingSetID id = WorkingSet::INVALID_ID;
- StageState status = _child->work(&id);
+ StageState status = child()->work(&id);
if (PlanStage::ADVANCED == status) {
*out = id;
@@ -94,39 +95,14 @@ PlanStage::StageState LimitStage::work(WorkingSetID* out) {
return status;
}
-void LimitStage::saveState() {
- ++_commonStats.yields;
- _child->saveState();
-}
-
-void LimitStage::restoreState(OperationContext* opCtx) {
- ++_commonStats.unyields;
- _child->restoreState(opCtx);
-}
-
-void LimitStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
- ++_commonStats.invalidates;
- _child->invalidate(txn, dl, type);
-}
-
-vector<PlanStage*> LimitStage::getChildren() const {
- vector<PlanStage*> children;
- children.push_back(_child.get());
- return children;
-}
-
unique_ptr<PlanStageStats> LimitStage::getStats() {
_commonStats.isEOF = isEOF();
unique_ptr<PlanStageStats> ret = make_unique<PlanStageStats>(_commonStats, STAGE_LIMIT);
ret->specific = make_unique<LimitStats>(_specificStats);
- ret->children.push_back(_child->getStats().release());
+ ret->children.push_back(child()->getStats().release());
return ret;
}
-const CommonStats* LimitStage::getCommonStats() const {
- return &_commonStats;
-}
-
const SpecificStats* LimitStage::getSpecificStats() const {
return &_specificStats;
}
diff --git a/src/mongo/db/exec/limit.h b/src/mongo/db/exec/limit.h
index 26894b7479f..33af499d5f8 100644
--- a/src/mongo/db/exec/limit.h
+++ b/src/mongo/db/exec/limit.h
@@ -50,33 +50,23 @@ public:
virtual bool isEOF();
virtual StageState work(WorkingSetID* out);
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
-
- virtual std::vector<PlanStage*> getChildren() const;
-
virtual StageType stageType() const {
return STAGE_LIMIT;
}
virtual std::unique_ptr<PlanStageStats> getStats();
- virtual const CommonStats* getCommonStats() const;
-
virtual const SpecificStats* getSpecificStats() const;
static const char* kStageType;
private:
WorkingSet* _ws;
- std::unique_ptr<PlanStage> _child;
// We only return this many results.
long long _numToReturn;
// Stats
- CommonStats _commonStats;
LimitStats _specificStats;
};
diff --git a/src/mongo/db/exec/merge_sort.cpp b/src/mongo/db/exec/merge_sort.cpp
index a710171975a..5bf7db4b9ea 100644
--- a/src/mongo/db/exec/merge_sort.cpp
+++ b/src/mongo/db/exec/merge_sort.cpp
@@ -48,21 +48,15 @@ const char* MergeSortStage::kStageType = "SORT_MERGE";
MergeSortStage::MergeSortStage(const MergeSortStageParams& params,
WorkingSet* ws,
const Collection* collection)
- : _collection(collection),
+ : PlanStage(kStageType),
+ _collection(collection),
_ws(ws),
_pattern(params.pattern),
_dedup(params.dedup),
- _merging(StageWithValueComparison(ws, params.pattern)),
- _commonStats(kStageType) {}
-
-MergeSortStage::~MergeSortStage() {
- for (size_t i = 0; i < _children.size(); ++i) {
- delete _children[i];
- }
-}
+ _merging(StageWithValueComparison(ws, params.pattern)) {}
void MergeSortStage::addChild(PlanStage* child) {
- _children.push_back(child);
+ _children.emplace_back(child);
// We have to call work(...) on every child before we can pick a min.
_noResultToMerge.push(child);
@@ -191,26 +185,10 @@ PlanStage::StageState MergeSortStage::work(WorkingSetID* out) {
return PlanStage::ADVANCED;
}
-void MergeSortStage::saveState() {
- ++_commonStats.yields;
- for (size_t i = 0; i < _children.size(); ++i) {
- _children[i]->saveState();
- }
-}
-
-void MergeSortStage::restoreState(OperationContext* opCtx) {
- ++_commonStats.unyields;
- for (size_t i = 0; i < _children.size(); ++i) {
- _children[i]->restoreState(opCtx);
- }
-}
-
-void MergeSortStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
- ++_commonStats.invalidates;
- for (size_t i = 0; i < _children.size(); ++i) {
- _children[i]->invalidate(txn, dl, type);
- }
+void MergeSortStage::doInvalidate(OperationContext* txn,
+ const RecordId& dl,
+ InvalidationType type) {
// Go through our data and see if we're holding on to the invalidated loc.
for (list<StageWithValue>::iterator valueIt = _mergingData.begin();
valueIt != _mergingData.end();
@@ -265,10 +243,6 @@ bool MergeSortStage::StageWithValueComparison::operator()(const MergingRef& lhs,
return false;
}
-vector<PlanStage*> MergeSortStage::getChildren() const {
- return _children;
-}
-
unique_ptr<PlanStageStats> MergeSortStage::getStats() {
_commonStats.isEOF = isEOF();
@@ -282,10 +256,6 @@ unique_ptr<PlanStageStats> MergeSortStage::getStats() {
return ret;
}
-const CommonStats* MergeSortStage::getCommonStats() const {
- return &_commonStats;
-}
-
const SpecificStats* MergeSortStage::getSpecificStats() const {
return &_specificStats;
}
diff --git a/src/mongo/db/exec/merge_sort.h b/src/mongo/db/exec/merge_sort.h
index 3a2700002a5..51114bbd0ca 100644
--- a/src/mongo/db/exec/merge_sort.h
+++ b/src/mongo/db/exec/merge_sort.h
@@ -58,18 +58,13 @@ public:
MergeSortStage(const MergeSortStageParams& params,
WorkingSet* ws,
const Collection* collection);
- virtual ~MergeSortStage();
void addChild(PlanStage* child);
virtual bool isEOF();
virtual StageState work(WorkingSetID* out);
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
-
- virtual std::vector<PlanStage*> getChildren() const;
+ virtual void doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
virtual StageType stageType() const {
return STAGE_SORT_MERGE;
@@ -77,8 +72,6 @@ public:
std::unique_ptr<PlanStageStats> getStats();
- virtual const CommonStats* getCommonStats() const;
-
virtual const SpecificStats* getSpecificStats() const;
static const char* kStageType;
@@ -99,9 +92,6 @@ private:
// Which RecordIds have we seen?
unordered_set<RecordId, RecordId::Hasher> _seen;
- // Owned by us. All the children we're reading from.
- std::vector<PlanStage*> _children;
-
// In order to pick the next smallest value, we need each child work(...) until it produces
// a result. This is the queue of children that haven't given us a result yet.
std::queue<PlanStage*> _noResultToMerge;
@@ -149,7 +139,6 @@ private:
std::list<StageWithValue> _mergingData;
// Stats
- CommonStats _commonStats;
MergeSortStats _specificStats;
};
diff --git a/src/mongo/db/exec/multi_iterator.cpp b/src/mongo/db/exec/multi_iterator.cpp
index 84ba4e76578..f3de28c5e54 100644
--- a/src/mongo/db/exec/multi_iterator.cpp
+++ b/src/mongo/db/exec/multi_iterator.cpp
@@ -46,7 +46,11 @@ const char* MultiIteratorStage::kStageType = "MULTI_ITERATOR";
MultiIteratorStage::MultiIteratorStage(OperationContext* txn,
WorkingSet* ws,
Collection* collection)
- : _txn(txn), _collection(collection), _ws(ws), _wsidForFetch(_ws->allocate()) {}
+ : PlanStage(kStageType),
+ _txn(txn),
+ _collection(collection),
+ _ws(ws),
+ _wsidForFetch(_ws->allocate()) {}
void MultiIteratorStage::addIterator(unique_ptr<RecordCursor> it) {
_iterators.push_back(std::move(it));
@@ -102,14 +106,14 @@ void MultiIteratorStage::kill() {
_iterators.clear();
}
-void MultiIteratorStage::saveState() {
+void MultiIteratorStage::doSaveState() {
_txn = NULL;
for (size_t i = 0; i < _iterators.size(); i++) {
_iterators[i]->savePositioned();
}
}
-void MultiIteratorStage::restoreState(OperationContext* opCtx) {
+void MultiIteratorStage::doRestoreState(OperationContext* opCtx) {
invariant(_txn == NULL);
_txn = opCtx;
for (size_t i = 0; i < _iterators.size(); i++) {
@@ -119,9 +123,9 @@ void MultiIteratorStage::restoreState(OperationContext* opCtx) {
}
}
-void MultiIteratorStage::invalidate(OperationContext* txn,
- const RecordId& dl,
- InvalidationType type) {
+void MultiIteratorStage::doInvalidate(OperationContext* txn,
+ const RecordId& dl,
+ InvalidationType type) {
switch (type) {
case INVALIDATION_DELETION:
for (size_t i = 0; i < _iterators.size(); i++) {
@@ -134,11 +138,6 @@ void MultiIteratorStage::invalidate(OperationContext* txn,
}
}
-vector<PlanStage*> MultiIteratorStage::getChildren() const {
- vector<PlanStage*> empty;
- return empty;
-}
-
unique_ptr<PlanStageStats> MultiIteratorStage::getStats() {
unique_ptr<PlanStageStats> ret =
make_unique<PlanStageStats>(CommonStats(kStageType), STAGE_MULTI_ITERATOR);
diff --git a/src/mongo/db/exec/multi_iterator.h b/src/mongo/db/exec/multi_iterator.h
index 9fe8f5338c8..a0b16d57515 100644
--- a/src/mongo/db/exec/multi_iterator.h
+++ b/src/mongo/db/exec/multi_iterator.h
@@ -59,28 +59,19 @@ public:
void kill();
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
-
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
+ virtual void doSaveState();
+ virtual void doRestoreState(OperationContext* opCtx);
+ virtual void doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
// Returns empty PlanStageStats object
virtual std::unique_ptr<PlanStageStats> getStats();
// Not used.
- virtual CommonStats* getCommonStats() const {
- return NULL;
- }
-
- // Not used.
virtual SpecificStats* getSpecificStats() const {
return NULL;
}
// Not used.
- virtual std::vector<PlanStage*> getChildren() const;
-
- // Not used.
virtual StageType stageType() const {
return STAGE_MULTI_ITERATOR;
}
diff --git a/src/mongo/db/exec/multi_plan.cpp b/src/mongo/db/exec/multi_plan.cpp
index 60906acc055..de58312a4b6 100644
--- a/src/mongo/db/exec/multi_plan.cpp
+++ b/src/mongo/db/exec/multi_plan.cpp
@@ -65,7 +65,8 @@ MultiPlanStage::MultiPlanStage(OperationContext* txn,
const Collection* collection,
CanonicalQuery* cq,
bool shouldCache)
- : _txn(txn),
+ : PlanStage(kStageType),
+ _txn(txn),
_collection(collection),
_shouldCache(shouldCache),
_query(cq),
@@ -73,20 +74,13 @@ MultiPlanStage::MultiPlanStage(OperationContext* txn,
_backupPlanIdx(kNoSuchPlan),
_failure(false),
_failureCount(0),
- _statusMemberId(WorkingSet::INVALID_ID),
- _commonStats(kStageType) {
+ _statusMemberId(WorkingSet::INVALID_ID) {
invariant(_collection);
}
-MultiPlanStage::~MultiPlanStage() {
- for (size_t ix = 0; ix < _candidates.size(); ++ix) {
- delete _candidates[ix].solution;
- delete _candidates[ix].root;
- }
-}
-
void MultiPlanStage::addPlan(QuerySolution* solution, PlanStage* root, WorkingSet* ws) {
_candidates.push_back(CandidatePlan(solution, root, ws));
+ _children.emplace_back(root);
}
bool MultiPlanStage::isEOF() {
@@ -255,7 +249,7 @@ Status MultiPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
CandidatePlan& bestCandidate = _candidates[_bestPlanIdx];
std::list<WorkingSetID>& alreadyProduced = bestCandidate.results;
- QuerySolution* bestSolution = bestCandidate.solution;
+ const auto& bestSolution = bestCandidate.solution;
LOG(5) << "Winning solution:\n" << bestSolution->toString() << endl;
LOG(2) << "Winning plan: " << Explain::getPlanSummary(bestCandidate.root);
@@ -283,7 +277,7 @@ Status MultiPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
for (size_t orderingIndex = 0; orderingIndex < candidateOrder.size(); ++orderingIndex) {
// index into candidates/ranking
size_t ix = candidateOrder[orderingIndex];
- solutions.push_back(_candidates[ix].solution);
+ solutions.push_back(_candidates[ix].solution.get());
}
// Check solution cache data. Do not add to cache if
@@ -394,20 +388,8 @@ bool MultiPlanStage::workAllPlans(size_t numResults, PlanYieldPolicy* yieldPolic
return !doneWorking;
}
-void MultiPlanStage::saveState() {
- _txn = NULL;
- for (size_t i = 0; i < _candidates.size(); ++i) {
- _candidates[i].root->saveState();
- }
-}
-
-void MultiPlanStage::restoreState(OperationContext* opCtx) {
- invariant(_txn == NULL);
+void MultiPlanStage::doRestoreState(OperationContext* opCtx) {
_txn = opCtx;
-
- for (size_t i = 0; i < _candidates.size(); ++i) {
- _candidates[i].root->restoreState(opCtx);
- }
}
namespace {
@@ -434,23 +416,22 @@ void invalidateHelper(OperationContext* txn,
}
}
-void MultiPlanStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
+void MultiPlanStage::doInvalidate(OperationContext* txn,
+ const RecordId& dl,
+ InvalidationType type) {
if (_failure) {
return;
}
if (bestPlanChosen()) {
CandidatePlan& bestPlan = _candidates[_bestPlanIdx];
- bestPlan.root->invalidate(txn, dl, type);
invalidateHelper(txn, bestPlan.ws, dl, &bestPlan.results, _collection);
if (hasBackupPlan()) {
CandidatePlan& backupPlan = _candidates[_backupPlanIdx];
- backupPlan.root->invalidate(txn, dl, type);
invalidateHelper(txn, backupPlan.ws, dl, &backupPlan.results, _collection);
}
} else {
for (size_t ix = 0; ix < _candidates.size(); ++ix) {
- _candidates[ix].root->invalidate(txn, dl, type);
invalidateHelper(txn, _candidates[ix].ws, dl, &_candidates[ix].results, _collection);
}
}
@@ -472,21 +453,7 @@ QuerySolution* MultiPlanStage::bestSolution() {
if (_bestPlanIdx == kNoSuchPlan)
return NULL;
- return _candidates[_bestPlanIdx].solution;
-}
-
-vector<PlanStage*> MultiPlanStage::getChildren() const {
- vector<PlanStage*> children;
-
- if (bestPlanChosen()) {
- children.push_back(_candidates[_bestPlanIdx].root);
- } else {
- for (size_t i = 0; i < _candidates.size(); i++) {
- children.push_back(_candidates[i].root);
- }
- }
-
- return children;
+ return _candidates[_bestPlanIdx].solution.get();
}
unique_ptr<PlanStageStats> MultiPlanStage::getStats() {
@@ -501,10 +468,6 @@ unique_ptr<PlanStageStats> MultiPlanStage::getStats() {
return make_unique<PlanStageStats>(_commonStats, STAGE_MULTI_PLAN);
}
-const CommonStats* MultiPlanStage::getCommonStats() const {
- return &_commonStats;
-}
-
const SpecificStats* MultiPlanStage::getSpecificStats() const {
return &_specificStats;
}
diff --git a/src/mongo/db/exec/multi_plan.h b/src/mongo/db/exec/multi_plan.h
index 33948ef3f53..669197f648e 100644
--- a/src/mongo/db/exec/multi_plan.h
+++ b/src/mongo/db/exec/multi_plan.h
@@ -62,19 +62,13 @@ public:
CanonicalQuery* cq,
bool shouldCache = true);
- virtual ~MultiPlanStage();
-
virtual bool isEOF();
virtual StageState work(WorkingSetID* out);
- virtual void saveState();
-
- virtual void restoreState(OperationContext* opCtx);
-
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
+ virtual void doRestoreState(OperationContext* opCtx);
- virtual std::vector<PlanStage*> getChildren() const;
+ virtual void doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
virtual StageType stageType() const {
return STAGE_MULTI_PLAN;
@@ -82,7 +76,6 @@ public:
virtual std::unique_ptr<PlanStageStats> getStats();
- virtual const CommonStats* getCommonStats() const;
virtual const SpecificStats* getSpecificStats() const;
@@ -187,9 +180,10 @@ private:
// not owned here
CanonicalQuery* _query;
- // Candidate plans. Each candidate includes a child PlanStage tree and QuerySolution which
- // are owned here. Ownership of all QuerySolutions is retained here, and will *not* be
- // tranferred to the PlanExecutor that wraps this stage.
+ // Candidate plans. Each candidate includes a child PlanStage tree and QuerySolution. Ownership
+ // of all QuerySolutions is retained here, and will *not* be tranferred to the PlanExecutor that
+ // wraps this stage. Ownership of the PlanStages will be in PlanStage::_children which maps
+ // one-to-one with _candidates.
std::vector<CandidatePlan> _candidates;
// index into _candidates, of the winner of the plan competition
@@ -227,7 +221,6 @@ private:
std::unique_ptr<RecordFetcher> _fetcher;
// Stats
- CommonStats _commonStats;
MultiPlanStats _specificStats;
};
diff --git a/src/mongo/db/exec/near.cpp b/src/mongo/db/exec/near.cpp
index 2012ef4ff9f..a79ae985bd5 100644
--- a/src/mongo/db/exec/near.cpp
+++ b/src/mongo/db/exec/near.cpp
@@ -42,21 +42,17 @@ using std::vector;
using stdx::make_unique;
NearStage::NearStage(OperationContext* txn,
+ const char* typeName,
+ StageType type,
WorkingSet* workingSet,
- Collection* collection,
- PlanStageStats* stats)
- : _txn(txn),
+ Collection* collection)
+ : PlanStage(typeName),
+ _txn(txn),
_workingSet(workingSet),
_collection(collection),
_searchState(SearchState_Initializing),
- _stats(stats),
- _nextInterval(NULL) {
- // Ensure we have specific distance search stats unless a child class specified their
- // own distance stats subclass
- if (!_stats->specific) {
- _stats->specific.reset(new NearStats);
- }
-}
+ _stageType(type),
+ _nextInterval(NULL) {}
NearStage::~NearStage() {}
@@ -86,10 +82,10 @@ PlanStage::StageState NearStage::initNext(WorkingSetID* out) {
}
PlanStage::StageState NearStage::work(WorkingSetID* out) {
- ++_stats->common.works;
+ ++_commonStats.works;
// Adds the amount of time taken by work() to executionTimeMillis.
- ScopedTimer timer(&_stats->common.executionTimeMillis);
+ ScopedTimer timer(&_commonStats.executionTimeMillis);
WorkingSetID toReturn = WorkingSet::INVALID_ID;
Status error = Status::OK();
@@ -118,14 +114,14 @@ PlanStage::StageState NearStage::work(WorkingSetID* out) {
*out = WorkingSetCommon::allocateStatusMember(_workingSet, error);
} else if (PlanStage::ADVANCED == nextState) {
*out = toReturn;
- ++_stats->common.advanced;
+ ++_commonStats.advanced;
} else if (PlanStage::NEED_YIELD == nextState) {
*out = toReturn;
- ++_stats->common.needYield;
+ ++_commonStats.needYield;
} else if (PlanStage::NEED_TIME == nextState) {
- ++_stats->common.needTime;
+ ++_commonStats.needTime;
} else if (PlanStage::IS_EOF == nextState) {
- _stats->common.isEOF = true;
+ _commonStats.isEOF = true;
}
return nextState;
@@ -178,7 +174,7 @@ PlanStage::StageState NearStage::bufferNext(WorkingSetID* toReturn, Status* erro
PlanStage::StageState intervalState = _nextInterval->covering->work(&nextMemberID);
if (PlanStage::IS_EOF == intervalState) {
- getNearStats()->intervalStats.push_back(*_nextIntervalStats);
+ _specificStats.intervalStats.push_back(*_nextIntervalStats);
_nextIntervalStats.reset();
_nextInterval = NULL;
_searchState = SearchState_Advancing;
@@ -288,35 +284,11 @@ bool NearStage::isEOF() {
return SearchState_Finished == _searchState;
}
-void NearStage::saveState() {
- _txn = NULL;
- ++_stats->common.yields;
- for (size_t i = 0; i < _childrenIntervals.size(); i++) {
- _childrenIntervals[i]->covering->saveState();
- }
-
- // Subclass specific saving, e.g. saving the 2d or 2dsphere density estimator.
- finishSaveState();
-}
-
-void NearStage::restoreState(OperationContext* opCtx) {
- invariant(_txn == NULL);
+void NearStage::doRestoreState(OperationContext* opCtx) {
_txn = opCtx;
- ++_stats->common.unyields;
- for (size_t i = 0; i < _childrenIntervals.size(); i++) {
- _childrenIntervals[i]->covering->restoreState(opCtx);
- }
-
- // Subclass specific restoring, e.g. restoring the 2d or 2dsphere density estimator.
- finishRestoreState(opCtx);
}
-void NearStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
- ++_stats->common.invalidates;
- for (size_t i = 0; i < _childrenIntervals.size(); i++) {
- _childrenIntervals[i]->covering->invalidate(txn, dl, type);
- }
-
+void NearStage::doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
// If a result is in _resultBuffer and has a RecordId it will be in _nextIntervalSeen as
// well. It's safe to return the result w/o the RecordId, so just fetch the result.
unordered_map<RecordId, WorkingSetID, RecordId::Hasher>::iterator seenIt =
@@ -331,42 +303,23 @@ void NearStage::invalidate(OperationContext* txn, const RecordId& dl, Invalidati
// Don't keep it around in the seen map since there's no valid RecordId anymore
_nextIntervalSeen.erase(seenIt);
}
-
- // Subclass specific invalidation, e.g. passing the invalidation to the 2d or 2dsphere
- // density estimator.
- finishInvalidate(txn, dl, type);
-}
-
-vector<PlanStage*> NearStage::getChildren() const {
- vector<PlanStage*> children;
- for (size_t i = 0; i < _childrenIntervals.size(); i++) {
- children.push_back(_childrenIntervals[i]->covering.get());
- }
- return children;
}
unique_ptr<PlanStageStats> NearStage::getStats() {
- unique_ptr<PlanStageStats> statsClone(_stats->clone());
+ unique_ptr<PlanStageStats> ret = make_unique<PlanStageStats>(_commonStats, _stageType);
+ ret->specific.reset(_specificStats.clone());
for (size_t i = 0; i < _childrenIntervals.size(); ++i) {
- statsClone->children.push_back(_childrenIntervals[i]->covering->getStats().release());
+ ret->children.push_back(_childrenIntervals[i]->covering->getStats().release());
}
- return statsClone;
+ return ret;
}
StageType NearStage::stageType() const {
- return _stats->stageType;
-}
-
-const CommonStats* NearStage::getCommonStats() const {
- return &_stats->common;
+ return _stageType;
}
const SpecificStats* NearStage::getSpecificStats() const {
- return _stats->specific.get();
-}
-
-NearStats* NearStage::getNearStats() {
- return static_cast<NearStats*>(_stats->specific.get());
+ return &_specificStats;
}
} // namespace mongo
diff --git a/src/mongo/db/exec/near.h b/src/mongo/db/exec/near.h
index 310bae591bc..09a743ae147 100644
--- a/src/mongo/db/exec/near.h
+++ b/src/mongo/db/exec/near.h
@@ -82,32 +82,22 @@ public:
virtual bool isEOF();
virtual StageState work(WorkingSetID* out);
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
-
- virtual std::vector<PlanStage*> getChildren() const;
+ virtual void doRestoreState(OperationContext* opCtx);
+ virtual void doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
virtual StageType stageType() const;
virtual std::unique_ptr<PlanStageStats> getStats();
- virtual const CommonStats* getCommonStats() const;
virtual const SpecificStats* getSpecificStats() const;
protected:
/**
* Subclasses of NearStage must provide basics + a stats object which gets owned here.
- * The stats object must have specific stats which are a subclass of NearStats, otherwise
- * it's generated automatically.
*/
NearStage(OperationContext* txn,
+ const char* typeName,
+ StageType type,
WorkingSet* workingSet,
- Collection* collection,
- PlanStageStats* stats);
-
- /**
- * Exposes NearStats for adaptive search, allows additional specific stats in subclasses.
- */
- NearStats* getNearStats();
+ Collection* collection);
//
// Methods implemented for specific search functionality
@@ -144,19 +134,10 @@ protected:
Collection* collection,
WorkingSetID* out) = 0;
-private:
- //
- // Save/restore/invalidate work specific to the search type.
- //
-
- virtual void finishSaveState() = 0;
-
- virtual void finishRestoreState(OperationContext* txn) = 0;
-
- virtual void finishInvalidate(OperationContext* txn,
- const RecordId& dl,
- InvalidationType type) = 0;
+ // Filled in by subclasses.
+ NearStats _specificStats;
+private:
//
// Generic methods for progressive search functionality
//
@@ -196,7 +177,7 @@ private:
std::priority_queue<SearchResult> _resultBuffer;
// Stats
- std::unique_ptr<PlanStageStats> _stats;
+ const StageType _stageType;
// The current stage from which this stage should buffer results
// Pointer to the last interval in _childrenIntervals. Owned by _childrenIntervals.
@@ -219,8 +200,7 @@ struct NearStage::CoveredInterval {
double maxDistance,
bool inclusiveMax);
- // Owned by NearStage
- std::unique_ptr<PlanStage> const covering;
+ PlanStage* const covering; // Owned in PlanStage::_children.
const bool dedupCovering;
const double minDistance;
diff --git a/src/mongo/db/exec/oplogstart.cpp b/src/mongo/db/exec/oplogstart.cpp
index e077d448067..33b41aed43d 100644
--- a/src/mongo/db/exec/oplogstart.cpp
+++ b/src/mongo/db/exec/oplogstart.cpp
@@ -47,7 +47,8 @@ OplogStart::OplogStart(OperationContext* txn,
const Collection* collection,
MatchExpression* filter,
WorkingSet* ws)
- : _txn(txn),
+ : PlanStage(kStageType),
+ _txn(txn),
_needInit(true),
_backwardsScanning(false),
_extentHopping(false),
@@ -56,15 +57,13 @@ OplogStart::OplogStart(OperationContext* txn,
_workingSet(ws),
_filter(filter) {}
-OplogStart::~OplogStart() {}
-
PlanStage::StageState OplogStart::work(WorkingSetID* out) {
// We do our (heavy) init in a work(), where work is expected.
if (_needInit) {
CollectionScanParams params;
params.collection = _collection;
params.direction = CollectionScanParams::BACKWARD;
- _cs.reset(new CollectionScan(_txn, params, _workingSet, NULL));
+ _children.emplace_back(new CollectionScan(_txn, params, _workingSet, NULL));
_needInit = false;
_backwardsScanning = true;
@@ -133,11 +132,11 @@ void OplogStart::switchToExtentHopping() {
_extentHopping = true;
// Toss the collection scan we were using.
- _cs.reset();
+ _children.clear();
}
PlanStage::StageState OplogStart::workBackwardsScan(WorkingSetID* out) {
- PlanStage::StageState state = _cs->work(out);
+ PlanStage::StageState state = child()->work(out);
// EOF. Just start from the beginning, which is where we've hit.
if (PlanStage::IS_EOF == state) {
@@ -167,7 +166,7 @@ bool OplogStart::isEOF() {
return _done;
}
-void OplogStart::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
+void OplogStart::doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
if (_needInit) {
return;
}
@@ -176,32 +175,21 @@ void OplogStart::invalidate(OperationContext* txn, const RecordId& dl, Invalidat
return;
}
- if (_cs) {
- _cs->invalidate(txn, dl, type);
- }
-
for (size_t i = 0; i < _subIterators.size(); i++) {
_subIterators[i]->invalidate(dl);
}
}
-void OplogStart::saveState() {
+void OplogStart::doSaveState() {
_txn = NULL;
- if (_cs) {
- _cs->saveState();
- }
-
for (size_t i = 0; i < _subIterators.size(); i++) {
_subIterators[i]->savePositioned();
}
}
-void OplogStart::restoreState(OperationContext* opCtx) {
+void OplogStart::doRestoreState(OperationContext* opCtx) {
invariant(_txn == NULL);
_txn = opCtx;
- if (_cs) {
- _cs->restoreState(opCtx);
- }
for (size_t i = 0; i < _subIterators.size(); i++) {
if (!_subIterators[i]->restore(opCtx)) {
@@ -219,11 +207,6 @@ unique_ptr<PlanStageStats> OplogStart::getStats() {
return ret;
}
-vector<PlanStage*> OplogStart::getChildren() const {
- vector<PlanStage*> empty;
- return empty;
-}
-
int OplogStart::_backwardsScanTime = 5;
} // namespace mongo
diff --git a/src/mongo/db/exec/oplogstart.h b/src/mongo/db/exec/oplogstart.h
index 98ec934ba61..d26d8d63b4e 100644
--- a/src/mongo/db/exec/oplogstart.h
+++ b/src/mongo/db/exec/oplogstart.h
@@ -65,26 +65,20 @@ public:
const Collection* collection,
MatchExpression* filter,
WorkingSet* ws);
- virtual ~OplogStart();
virtual StageState work(WorkingSetID* out);
virtual bool isEOF();
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
-
- virtual std::vector<PlanStage*> getChildren() const;
+ virtual void doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
+ virtual void doSaveState();
+ virtual void doRestoreState(OperationContext* opCtx);
// Returns empty PlanStageStats object
virtual std::unique_ptr<PlanStageStats> getStats();
//
- // Exec stats -- do not call these for the oplog start stage.
+ // Exec stats -- do not call for the oplog start stage.
//
- virtual const CommonStats* getCommonStats() const {
- return NULL;
- }
virtual const SpecificStats* getSpecificStats() const {
return NULL;
@@ -117,9 +111,6 @@ private:
// transactional context for read locks. Not owned by us
OperationContext* _txn;
- // If we're backwards scanning we just punt to a collscan.
- std::unique_ptr<CollectionScan> _cs;
-
// This is only used for the extent hopping scan.
std::vector<std::unique_ptr<RecordCursor>> _subIterators;
diff --git a/src/mongo/db/exec/or.cpp b/src/mongo/db/exec/or.cpp
index b0902d683d3..f1ce36bdc5b 100644
--- a/src/mongo/db/exec/or.cpp
+++ b/src/mongo/db/exec/or.cpp
@@ -44,16 +44,10 @@ using stdx::make_unique;
const char* OrStage::kStageType = "OR";
OrStage::OrStage(WorkingSet* ws, bool dedup, const MatchExpression* filter)
- : _ws(ws), _filter(filter), _currentChild(0), _dedup(dedup), _commonStats(kStageType) {}
-
-OrStage::~OrStage() {
- for (size_t i = 0; i < _children.size(); ++i) {
- delete _children[i];
- }
-}
+ : PlanStage(kStageType), _ws(ws), _filter(filter), _currentChild(0), _dedup(dedup) {}
void OrStage::addChild(PlanStage* child) {
- _children.push_back(child);
+ _children.emplace_back(child);
}
bool OrStage::isEOF() {
@@ -138,31 +132,12 @@ PlanStage::StageState OrStage::work(WorkingSetID* out) {
return childStatus;
}
-void OrStage::saveState() {
- ++_commonStats.yields;
- for (size_t i = 0; i < _children.size(); ++i) {
- _children[i]->saveState();
- }
-}
-
-void OrStage::restoreState(OperationContext* opCtx) {
- ++_commonStats.unyields;
- for (size_t i = 0; i < _children.size(); ++i) {
- _children[i]->restoreState(opCtx);
- }
-}
-
-void OrStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
- ++_commonStats.invalidates;
-
+void OrStage::doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
+ // TODO remove this since calling isEOF is illegal inside of doInvalidate().
if (isEOF()) {
return;
}
- for (size_t i = 0; i < _children.size(); ++i) {
- _children[i]->invalidate(txn, dl, type);
- }
-
// If we see DL again it is not the same record as it once was so we still want to
// return it.
if (_dedup && INVALIDATION_DELETION == type) {
@@ -174,10 +149,6 @@ void OrStage::invalidate(OperationContext* txn, const RecordId& dl, Invalidation
}
}
-vector<PlanStage*> OrStage::getChildren() const {
- return _children;
-}
-
unique_ptr<PlanStageStats> OrStage::getStats() {
_commonStats.isEOF = isEOF();
@@ -197,10 +168,6 @@ unique_ptr<PlanStageStats> OrStage::getStats() {
return ret;
}
-const CommonStats* OrStage::getCommonStats() const {
- return &_commonStats;
-}
-
const SpecificStats* OrStage::getSpecificStats() const {
return &_specificStats;
}
diff --git a/src/mongo/db/exec/or.h b/src/mongo/db/exec/or.h
index e2bf02d491a..7a10a78ae7d 100644
--- a/src/mongo/db/exec/or.h
+++ b/src/mongo/db/exec/or.h
@@ -46,7 +46,6 @@ namespace mongo {
class OrStage : public PlanStage {
public:
OrStage(WorkingSet* ws, bool dedup, const MatchExpression* filter);
- virtual ~OrStage();
void addChild(PlanStage* child);
@@ -54,11 +53,7 @@ public:
virtual StageState work(WorkingSetID* out);
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
-
- virtual std::vector<PlanStage*> getChildren() const;
+ virtual void doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
virtual StageType stageType() const {
return STAGE_OR;
@@ -66,8 +61,6 @@ public:
virtual std::unique_ptr<PlanStageStats> getStats();
- virtual const CommonStats* getCommonStats() const;
-
virtual const SpecificStats* getSpecificStats() const;
static const char* kStageType;
@@ -79,9 +72,6 @@ private:
// The filter is not owned by us.
const MatchExpression* _filter;
- // Owned by us.
- std::vector<PlanStage*> _children;
-
// Which of _children are we calling work(...) on now?
size_t _currentChild;
@@ -92,7 +82,6 @@ private:
unordered_set<RecordId, RecordId::Hasher> _seen;
// Stats
- CommonStats _commonStats;
OrStats _specificStats;
};
diff --git a/src/mongo/db/exec/pipeline_proxy.cpp b/src/mongo/db/exec/pipeline_proxy.cpp
index 3aed8d3b4fb..5c6fa17b251 100644
--- a/src/mongo/db/exec/pipeline_proxy.cpp
+++ b/src/mongo/db/exec/pipeline_proxy.cpp
@@ -48,9 +48,9 @@ const char* PipelineProxyStage::kStageType = "PIPELINE_PROXY";
PipelineProxyStage::PipelineProxyStage(intrusive_ptr<Pipeline> pipeline,
const std::shared_ptr<PlanExecutor>& child,
WorkingSet* ws)
- : _pipeline(pipeline),
- _includeMetaData(_pipeline->getContext()->inShard) // send metadata to merger
- ,
+ : PlanStage(kStageType),
+ _pipeline(pipeline),
+ _includeMetaData(_pipeline->getContext()->inShard), // send metadata to merger
_childExec(child),
_ws(ws) {}
@@ -91,20 +91,20 @@ bool PipelineProxyStage::isEOF() {
return true;
}
-void PipelineProxyStage::invalidate(OperationContext* txn,
- const RecordId& dl,
- InvalidationType type) {
+void PipelineProxyStage::doInvalidate(OperationContext* txn,
+ const RecordId& dl,
+ InvalidationType type) {
// propagate to child executor if still in use
if (std::shared_ptr<PlanExecutor> exec = _childExec.lock()) {
exec->invalidate(txn, dl, type);
}
}
-void PipelineProxyStage::saveState() {
+void PipelineProxyStage::doSaveState() {
_pipeline->getContext()->opCtx = NULL;
}
-void PipelineProxyStage::restoreState(OperationContext* opCtx) {
+void PipelineProxyStage::doRestoreState(OperationContext* opCtx) {
invariant(_pipeline->getContext()->opCtx == NULL);
_pipeline->getContext()->opCtx = opCtx;
}
@@ -113,11 +113,6 @@ void PipelineProxyStage::pushBack(const BSONObj& obj) {
_stash.push_back(obj);
}
-vector<PlanStage*> PipelineProxyStage::getChildren() const {
- vector<PlanStage*> empty;
- return empty;
-}
-
unique_ptr<PlanStageStats> PipelineProxyStage::getStats() {
unique_ptr<PlanStageStats> ret =
make_unique<PlanStageStats>(CommonStats(kStageType), STAGE_PIPELINE_PROXY);
diff --git a/src/mongo/db/exec/pipeline_proxy.h b/src/mongo/db/exec/pipeline_proxy.h
index f40674fff38..517cf6ef393 100644
--- a/src/mongo/db/exec/pipeline_proxy.h
+++ b/src/mongo/db/exec/pipeline_proxy.h
@@ -52,14 +52,14 @@ public:
virtual bool isEOF();
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
+ virtual void doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
//
// Manage our OperationContext. We intentionally don't propagate to the child
// Runner as that is handled by DocumentSourceCursor as it needs to.
//
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
+ virtual void doSaveState();
+ virtual void doRestoreState(OperationContext* opCtx);
/**
* Make obj the next object returned by getNext().
@@ -75,10 +75,6 @@ public:
// Returns empty PlanStageStats object
virtual std::unique_ptr<PlanStageStats> getStats();
- // Not used.
- virtual CommonStats* getCommonStats() const {
- return NULL;
- }
// Not used.
virtual SpecificStats* getSpecificStats() const {
@@ -86,9 +82,6 @@ public:
}
// Not used.
- virtual std::vector<PlanStage*> getChildren() const;
-
- // Not used.
virtual StageType stageType() const {
return STAGE_PIPELINE_PROXY;
}
diff --git a/src/mongo/db/exec/plan_stage.cpp b/src/mongo/db/exec/plan_stage.cpp
new file mode 100644
index 00000000000..5c0f9c95472
--- /dev/null
+++ b/src/mongo/db/exec/plan_stage.cpp
@@ -0,0 +1,64 @@
+/**
+ * Copyright (C) 2015 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kQuery
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/db/exec/plan_stage.h"
+
+namespace mongo {
+
+void PlanStage::saveState() {
+ ++_commonStats.yields;
+ for (auto&& child : _children) {
+ child->saveState();
+ }
+
+ doSaveState();
+}
+
+void PlanStage::restoreState(OperationContext* opCtx) {
+ ++_commonStats.unyields;
+ for (auto&& child : _children) {
+ child->restoreState(opCtx);
+ }
+
+ doRestoreState(opCtx);
+}
+
+void PlanStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
+ ++_commonStats.invalidates;
+ for (auto&& child : _children) {
+ child->invalidate(txn, dl, type);
+ }
+
+ doInvalidate(txn, dl, type);
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/exec/plan_stage.h b/src/mongo/db/exec/plan_stage.h
index 289c99cc936..85884f47f9c 100644
--- a/src/mongo/db/exec/plan_stage.h
+++ b/src/mongo/db/exec/plan_stage.h
@@ -28,6 +28,9 @@
#pragma once
+#include <memory>
+#include <vector>
+
#include "mongo/db/exec/plan_stats.h"
#include "mongo/db/exec/working_set.h"
#include "mongo/db/invalidation_type.h"
@@ -101,8 +104,12 @@ class OperationContext;
*/
class PlanStage {
public:
+ PlanStage(const char* typeName) : _commonStats(typeName) {}
+
virtual ~PlanStage() {}
+ using Children = std::vector<std::unique_ptr<PlanStage>>;
+
/**
* All possible return values of work(...)
*/
@@ -199,24 +206,22 @@ public:
//
/**
- * Notifies the stage that all locks are about to be released. The stage must save any
- * state required to resume where it was before saveState was called.
+ * Notifies the stage that the underlying data source may change.
+ *
+ * It is illegal to call work() or isEOF() when a stage is in the "saved" state.
*
- * Stages must be able to handle multiple calls to saveState() in a row without a call to
- * restoreState() in between.
+ * Propagates to all children, then calls doSaveState().
*/
- virtual void saveState() = 0;
+ void saveState();
/**
- * Notifies the stage that any required locks have been reacquired. The stage must restore
- * any saved state and be ready to handle calls to work().
+ * Notifies the stage that underlying data is stable again and prepares for calls to work().
*
- * Can only be called after saveState.
+ * Can only be called while the stage in is the "saved" state.
*
- * If the stage needs an OperationContext during its execution, it may keep a handle to the
- * provided OperationContext (which is valid until the next call to saveState()).
+ * Propagates to all children, then calls doRestoreState().
*/
- virtual void restoreState(OperationContext* opCtx) = 0;
+ void restoreState(OperationContext* opCtx);
/**
* Notifies a stage that a RecordId is going to be deleted (or in-place updated) so that the
@@ -228,14 +233,18 @@ public:
* The provided OperationContext should be used if any work needs to be performed during the
* invalidate (as the state of the stage must be saved before any calls to invalidate, the
* stage's own OperationContext is inactive during the invalidate and should not be used).
+ *
+ * Propagates to all children, then calls doInvalidate().
*/
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) = 0;
+ void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
/**
* Retrieve a list of this stage's children. This stage keeps ownership of
* its children.
*/
- virtual std::vector<PlanStage*> getChildren() const = 0;
+ const Children& getChildren() const {
+ return _children;
+ }
/**
* What type of stage is this?
@@ -262,7 +271,9 @@ public:
* It must not exist past the stage. If you need the stats to outlive the stage,
* use the getStats(...) method above.
*/
- virtual const CommonStats* getCommonStats() const = 0;
+ const CommonStats* getCommonStats() const {
+ return &_commonStats;
+ }
/**
* Get stats specific to this stage. Some stages may not have specific stats, in which
@@ -273,6 +284,42 @@ public:
* use the getStats(...) method above.
*/
virtual const SpecificStats* getSpecificStats() const = 0;
+
+protected:
+ /**
+ * Saves any stage-specific state required to resume where it was if the underlying data
+ * changes.
+ *
+ * Stages must be able to handle multiple calls to doSaveState() in a row without a call to
+ * doRestoreState() in between.
+ */
+ virtual void doSaveState() {}
+
+ /**
+ * Restores any stage-specific saved state and prepares to handle calls to work().
+ *
+ * If the stage needs an OperationContext during its execution, it may keep a handle to the
+ * provided OperationContext (which is valid until the next call to saveState()).
+ */
+ virtual void doRestoreState(OperationContext* txn) {}
+
+ /**
+ * Does the stage-specific invalidation work.
+ */
+ virtual void doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {}
+
+ /**
+ * Returns the only child.
+ *
+ * Convenience method for PlanStages that have exactly one child.
+ */
+ const std::unique_ptr<PlanStage>& child() const {
+ dassert(_children.size() == 1);
+ return _children.front();
+ }
+
+ Children _children;
+ CommonStats _commonStats;
};
} // namespace mongo
diff --git a/src/mongo/db/exec/projection.cpp b/src/mongo/db/exec/projection.cpp
index 4a10248e6a9..a5698a266ab 100644
--- a/src/mongo/db/exec/projection.cpp
+++ b/src/mongo/db/exec/projection.cpp
@@ -55,7 +55,8 @@ const char* ProjectionStage::kStageType = "PROJECTION";
ProjectionStage::ProjectionStage(const ProjectionStageParams& params,
WorkingSet* ws,
PlanStage* child)
- : _ws(ws), _child(child), _commonStats(kStageType), _projImpl(params.projImpl) {
+ : PlanStage(kStageType), _ws(ws), _projImpl(params.projImpl) {
+ _children.emplace_back(child);
_projObj = params.projObj;
if (ProjectionStageParams::NO_FAST_PATH == _projImpl) {
@@ -189,10 +190,8 @@ Status ProjectionStage::transform(WorkingSetMember* member) {
return Status::OK();
}
-ProjectionStage::~ProjectionStage() {}
-
bool ProjectionStage::isEOF() {
- return _child->isEOF();
+ return child()->isEOF();
}
PlanStage::StageState ProjectionStage::work(WorkingSetID* out) {
@@ -202,7 +201,7 @@ PlanStage::StageState ProjectionStage::work(WorkingSetID* out) {
ScopedTimer timer(&_commonStats.executionTimeMillis);
WorkingSetID id = WorkingSet::INVALID_ID;
- StageState status = _child->work(&id);
+ StageState status = child()->work(&id);
// Note that we don't do the normal if isEOF() return EOF thing here. Our child might be a
// tailable cursor and isEOF() would be true even if it had more data...
@@ -239,27 +238,6 @@ PlanStage::StageState ProjectionStage::work(WorkingSetID* out) {
return status;
}
-void ProjectionStage::saveState() {
- ++_commonStats.yields;
- _child->saveState();
-}
-
-void ProjectionStage::restoreState(OperationContext* opCtx) {
- ++_commonStats.unyields;
- _child->restoreState(opCtx);
-}
-
-void ProjectionStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
- ++_commonStats.invalidates;
- _child->invalidate(txn, dl, type);
-}
-
-vector<PlanStage*> ProjectionStage::getChildren() const {
- vector<PlanStage*> children;
- children.push_back(_child.get());
- return children;
-}
-
unique_ptr<PlanStageStats> ProjectionStage::getStats() {
_commonStats.isEOF = isEOF();
unique_ptr<PlanStageStats> ret = make_unique<PlanStageStats>(_commonStats, STAGE_PROJECTION);
@@ -268,14 +246,10 @@ unique_ptr<PlanStageStats> ProjectionStage::getStats() {
projStats->projObj = _projObj;
ret->specific = std::move(projStats);
- ret->children.push_back(_child->getStats().release());
+ ret->children.push_back(child()->getStats().release());
return ret;
}
-const CommonStats* ProjectionStage::getCommonStats() const {
- return &_commonStats;
-}
-
const SpecificStats* ProjectionStage::getSpecificStats() const {
return &_specificStats;
}
diff --git a/src/mongo/db/exec/projection.h b/src/mongo/db/exec/projection.h
index 1810f727ce6..7fa5f0fcc2d 100644
--- a/src/mongo/db/exec/projection.h
+++ b/src/mongo/db/exec/projection.h
@@ -77,25 +77,15 @@ class ProjectionStage : public PlanStage {
public:
ProjectionStage(const ProjectionStageParams& params, WorkingSet* ws, PlanStage* child);
- virtual ~ProjectionStage();
-
virtual bool isEOF();
virtual StageState work(WorkingSetID* out);
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
-
- virtual std::vector<PlanStage*> getChildren() const;
-
virtual StageType stageType() const {
return STAGE_PROJECTION;
}
std::unique_ptr<PlanStageStats> getStats();
- virtual const CommonStats* getCommonStats() const;
-
virtual const SpecificStats* getSpecificStats() const;
typedef unordered_set<StringData, StringData::Hasher> FieldSet;
@@ -126,10 +116,8 @@ private:
// _ws is not owned by us.
WorkingSet* _ws;
- std::unique_ptr<PlanStage> _child;
// Stats
- CommonStats _commonStats;
ProjectionStats _specificStats;
// Fast paths:
diff --git a/src/mongo/db/exec/queued_data_stage.cpp b/src/mongo/db/exec/queued_data_stage.cpp
index e51ae60c347..36edcab1893 100644
--- a/src/mongo/db/exec/queued_data_stage.cpp
+++ b/src/mongo/db/exec/queued_data_stage.cpp
@@ -40,7 +40,7 @@ using stdx::make_unique;
const char* QueuedDataStage::kStageType = "QUEUED_DATA";
-QueuedDataStage::QueuedDataStage(WorkingSet* ws) : _ws(ws), _commonStats(kStageType) {}
+QueuedDataStage::QueuedDataStage(WorkingSet* ws) : PlanStage(kStageType), _ws(ws) {}
PlanStage::StageState QueuedDataStage::work(WorkingSetID* out) {
++_commonStats.works;
@@ -70,18 +70,6 @@ bool QueuedDataStage::isEOF() {
return _results.empty();
}
-void QueuedDataStage::saveState() {
- ++_commonStats.yields;
-}
-
-void QueuedDataStage::restoreState(OperationContext* opCtx) {
- ++_commonStats.unyields;
-}
-
-void QueuedDataStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
- ++_commonStats.invalidates;
-}
-
unique_ptr<PlanStageStats> QueuedDataStage::getStats() {
_commonStats.isEOF = isEOF();
unique_ptr<PlanStageStats> ret = make_unique<PlanStageStats>(_commonStats, STAGE_QUEUED_DATA);
@@ -89,9 +77,6 @@ unique_ptr<PlanStageStats> QueuedDataStage::getStats() {
return ret;
}
-const CommonStats* QueuedDataStage::getCommonStats() const {
- return &_commonStats;
-}
const SpecificStats* QueuedDataStage::getSpecificStats() const {
return &_specificStats;
@@ -109,9 +94,4 @@ void QueuedDataStage::pushBack(const WorkingSetID& id) {
_members.push(id);
}
-vector<PlanStage*> QueuedDataStage::getChildren() const {
- vector<PlanStage*> empty;
- return empty;
-}
-
} // namespace mongo
diff --git a/src/mongo/db/exec/queued_data_stage.h b/src/mongo/db/exec/queued_data_stage.h
index ec93eb1dfb3..cceaaf10457 100644
--- a/src/mongo/db/exec/queued_data_stage.h
+++ b/src/mongo/db/exec/queued_data_stage.h
@@ -48,21 +48,11 @@ class RecordId;
class QueuedDataStage : public PlanStage {
public:
QueuedDataStage(WorkingSet* ws);
- virtual ~QueuedDataStage() {}
virtual StageState work(WorkingSetID* out);
virtual bool isEOF();
- // These don't really mean anything here.
- // Some day we could count the # of calls to the yield functions to check that other stages
- // have correct yielding behavior.
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
-
- virtual std::vector<PlanStage*> getChildren() const;
-
virtual StageType stageType() const {
return STAGE_QUEUED_DATA;
}
@@ -73,8 +63,6 @@ public:
virtual std::unique_ptr<PlanStageStats> getStats();
- virtual const CommonStats* getCommonStats() const;
-
virtual const SpecificStats* getSpecificStats() const;
/**
@@ -110,7 +98,6 @@ private:
std::queue<WorkingSetID> _members;
// Stats
- CommonStats _commonStats;
MockStats _specificStats;
};
diff --git a/src/mongo/db/exec/shard_filter.cpp b/src/mongo/db/exec/shard_filter.cpp
index 26fe05144dd..1339e13bd3e 100644
--- a/src/mongo/db/exec/shard_filter.cpp
+++ b/src/mongo/db/exec/shard_filter.cpp
@@ -53,12 +53,14 @@ const char* ShardFilterStage::kStageType = "SHARDING_FILTER";
ShardFilterStage::ShardFilterStage(const shared_ptr<CollectionMetadata>& metadata,
WorkingSet* ws,
PlanStage* child)
- : _ws(ws), _child(child), _commonStats(kStageType), _metadata(metadata) {}
+ : PlanStage(kStageType), _ws(ws), _metadata(metadata) {
+ _children.emplace_back(child);
+}
ShardFilterStage::~ShardFilterStage() {}
bool ShardFilterStage::isEOF() {
- return _child->isEOF();
+ return child()->isEOF();
}
PlanStage::StageState ShardFilterStage::work(WorkingSetID* out) {
@@ -72,7 +74,7 @@ PlanStage::StageState ShardFilterStage::work(WorkingSetID* out) {
return PlanStage::IS_EOF;
}
- StageState status = _child->work(out);
+ StageState status = child()->work(out);
if (PlanStage::ADVANCED == status) {
// If we're sharded make sure that we don't return data that is not owned by us,
@@ -129,42 +131,15 @@ PlanStage::StageState ShardFilterStage::work(WorkingSetID* out) {
return status;
}
-void ShardFilterStage::saveState() {
- ++_commonStats.yields;
- _child->saveState();
-}
-
-void ShardFilterStage::restoreState(OperationContext* opCtx) {
- ++_commonStats.unyields;
- _child->restoreState(opCtx);
-}
-
-void ShardFilterStage::invalidate(OperationContext* txn,
- const RecordId& dl,
- InvalidationType type) {
- ++_commonStats.invalidates;
- _child->invalidate(txn, dl, type);
-}
-
-vector<PlanStage*> ShardFilterStage::getChildren() const {
- vector<PlanStage*> children;
- children.push_back(_child.get());
- return children;
-}
-
unique_ptr<PlanStageStats> ShardFilterStage::getStats() {
_commonStats.isEOF = isEOF();
unique_ptr<PlanStageStats> ret =
make_unique<PlanStageStats>(_commonStats, STAGE_SHARDING_FILTER);
- ret->children.push_back(_child->getStats().release());
+ ret->children.push_back(child()->getStats().release());
ret->specific = make_unique<ShardingFilterStats>(_specificStats);
return ret;
}
-const CommonStats* ShardFilterStage::getCommonStats() const {
- return &_commonStats;
-}
-
const SpecificStats* ShardFilterStage::getSpecificStats() const {
return &_specificStats;
}
diff --git a/src/mongo/db/exec/shard_filter.h b/src/mongo/db/exec/shard_filter.h
index a61eefc2f23..be3f98d526b 100644
--- a/src/mongo/db/exec/shard_filter.h
+++ b/src/mongo/db/exec/shard_filter.h
@@ -79,30 +79,20 @@ public:
virtual bool isEOF();
virtual StageState work(WorkingSetID* out);
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
-
- virtual std::vector<PlanStage*> getChildren() const;
-
virtual StageType stageType() const {
return STAGE_SHARDING_FILTER;
}
virtual std::unique_ptr<PlanStageStats> getStats();
- virtual const CommonStats* getCommonStats() const;
-
virtual const SpecificStats* getSpecificStats() const;
static const char* kStageType;
private:
WorkingSet* _ws;
- std::unique_ptr<PlanStage> _child;
// Stats
- CommonStats _commonStats;
ShardingFilterStats _specificStats;
// Note: it is important that this is the metadata from the time this stage is constructed.
diff --git a/src/mongo/db/exec/skip.cpp b/src/mongo/db/exec/skip.cpp
index b223b75f364..140c4a1bdb9 100644
--- a/src/mongo/db/exec/skip.cpp
+++ b/src/mongo/db/exec/skip.cpp
@@ -42,12 +42,14 @@ using stdx::make_unique;
const char* SkipStage::kStageType = "SKIP";
SkipStage::SkipStage(long long toSkip, WorkingSet* ws, PlanStage* child)
- : _ws(ws), _child(child), _toSkip(toSkip), _commonStats(kStageType) {}
+ : PlanStage(kStageType), _ws(ws), _toSkip(toSkip) {
+ _children.emplace_back(child);
+}
SkipStage::~SkipStage() {}
bool SkipStage::isEOF() {
- return _child->isEOF();
+ return child()->isEOF();
}
PlanStage::StageState SkipStage::work(WorkingSetID* out) {
@@ -57,7 +59,7 @@ PlanStage::StageState SkipStage::work(WorkingSetID* out) {
ScopedTimer timer(&_commonStats.executionTimeMillis);
WorkingSetID id = WorkingSet::INVALID_ID;
- StageState status = _child->work(&id);
+ StageState status = child()->work(&id);
if (PlanStage::ADVANCED == status) {
// If we're still skipping results...
@@ -95,40 +97,15 @@ PlanStage::StageState SkipStage::work(WorkingSetID* out) {
return status;
}
-void SkipStage::saveState() {
- ++_commonStats.yields;
- _child->saveState();
-}
-
-void SkipStage::restoreState(OperationContext* opCtx) {
- ++_commonStats.unyields;
- _child->restoreState(opCtx);
-}
-
-void SkipStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
- ++_commonStats.invalidates;
- _child->invalidate(txn, dl, type);
-}
-
-vector<PlanStage*> SkipStage::getChildren() const {
- vector<PlanStage*> children;
- children.push_back(_child.get());
- return children;
-}
-
unique_ptr<PlanStageStats> SkipStage::getStats() {
_commonStats.isEOF = isEOF();
_specificStats.skip = _toSkip;
unique_ptr<PlanStageStats> ret = make_unique<PlanStageStats>(_commonStats, STAGE_SKIP);
ret->specific = make_unique<SkipStats>(_specificStats);
- ret->children.push_back(_child->getStats().release());
+ ret->children.push_back(child()->getStats().release());
return ret;
}
-const CommonStats* SkipStage::getCommonStats() const {
- return &_commonStats;
-}
-
const SpecificStats* SkipStage::getSpecificStats() const {
return &_specificStats;
}
diff --git a/src/mongo/db/exec/skip.h b/src/mongo/db/exec/skip.h
index c8f5105e3da..3d2b165abd8 100644
--- a/src/mongo/db/exec/skip.h
+++ b/src/mongo/db/exec/skip.h
@@ -49,33 +49,23 @@ public:
virtual bool isEOF();
virtual StageState work(WorkingSetID* out);
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
-
- virtual std::vector<PlanStage*> getChildren() const;
-
virtual StageType stageType() const {
return STAGE_SKIP;
}
virtual std::unique_ptr<PlanStageStats> getStats();
- virtual const CommonStats* getCommonStats() const;
-
virtual const SpecificStats* getSpecificStats() const;
static const char* kStageType;
private:
WorkingSet* _ws;
- std::unique_ptr<PlanStage> _child;
// We drop the first _toSkip results that we would have returned.
long long _toSkip;
// Stats
- CommonStats _commonStats;
SkipStats _specificStats;
};
diff --git a/src/mongo/db/exec/sort.cpp b/src/mongo/db/exec/sort.cpp
index 246462f953c..d7281e1b38e 100644
--- a/src/mongo/db/exec/sort.cpp
+++ b/src/mongo/db/exec/sort.cpp
@@ -279,23 +279,24 @@ bool SortStage::WorkingSetComparator::operator()(const SortableDataItem& lhs,
}
SortStage::SortStage(const SortStageParams& params, WorkingSet* ws, PlanStage* child)
- : _collection(params.collection),
+ : PlanStage(kStageType),
+ _collection(params.collection),
_ws(ws),
- _child(child),
_pattern(params.pattern),
_query(params.query),
_limit(params.limit),
_sorted(false),
_resultIterator(_data.end()),
- _commonStats(kStageType),
- _memUsage(0) {}
+ _memUsage(0) {
+ _children.emplace_back(child);
+}
SortStage::~SortStage() {}
bool SortStage::isEOF() {
// We're done when our child has no more results, we've sorted the child's results, and
// we've returned all sorted results.
- return _child->isEOF() && _sorted && (_data.end() == _resultIterator);
+ return child()->isEOF() && _sorted && (_data.end() == _resultIterator);
}
PlanStage::StageState SortStage::work(WorkingSetID* out) {
@@ -334,7 +335,7 @@ PlanStage::StageState SortStage::work(WorkingSetID* out) {
// Still reading in results to sort.
if (!_sorted) {
WorkingSetID id = WorkingSet::INVALID_ID;
- StageState code = _child->work(&id);
+ StageState code = child()->work(&id);
if (PlanStage::ADVANCED == code) {
// Add it into the map for quick invalidation if it has a valid RecordId.
@@ -414,20 +415,7 @@ PlanStage::StageState SortStage::work(WorkingSetID* out) {
return PlanStage::ADVANCED;
}
-void SortStage::saveState() {
- ++_commonStats.yields;
- _child->saveState();
-}
-
-void SortStage::restoreState(OperationContext* opCtx) {
- ++_commonStats.unyields;
- _child->restoreState(opCtx);
-}
-
-void SortStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
- ++_commonStats.invalidates;
- _child->invalidate(txn, dl, type);
-
+void SortStage::doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
// If we have a deletion, we can fetch and carry on.
// If we have a mutation, it's easier to fetch and use the previous document.
// So, no matter what, fetch and keep the doc in play.
@@ -451,12 +439,6 @@ void SortStage::invalidate(OperationContext* txn, const RecordId& dl, Invalidati
}
}
-vector<PlanStage*> SortStage::getChildren() const {
- vector<PlanStage*> children;
- children.push_back(_child.get());
- return children;
-}
-
unique_ptr<PlanStageStats> SortStage::getStats() {
_commonStats.isEOF = isEOF();
const size_t maxBytes = static_cast<size_t>(internalQueryExecMaxBlockingSortBytes);
@@ -467,14 +449,10 @@ unique_ptr<PlanStageStats> SortStage::getStats() {
unique_ptr<PlanStageStats> ret = make_unique<PlanStageStats>(_commonStats, STAGE_SORT);
ret->specific = make_unique<SortStats>(_specificStats);
- ret->children.push_back(_child->getStats().release());
+ ret->children.push_back(child()->getStats().release());
return ret;
}
-const CommonStats* SortStage::getCommonStats() const {
- return &_commonStats;
-}
-
const SpecificStats* SortStage::getSpecificStats() const {
return &_specificStats;
}
diff --git a/src/mongo/db/exec/sort.h b/src/mongo/db/exec/sort.h
index 289ab0772b0..39a32ba9c88 100644
--- a/src/mongo/db/exec/sort.h
+++ b/src/mongo/db/exec/sort.h
@@ -142,17 +142,12 @@ private:
class SortStage : public PlanStage {
public:
SortStage(const SortStageParams& params, WorkingSet* ws, PlanStage* child);
-
virtual ~SortStage();
virtual bool isEOF();
virtual StageState work(WorkingSetID* out);
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
-
- virtual std::vector<PlanStage*> getChildren() const;
+ virtual void doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
virtual StageType stageType() const {
return STAGE_SORT;
@@ -160,8 +155,6 @@ public:
std::unique_ptr<PlanStageStats> getStats();
- virtual const CommonStats* getCommonStats() const;
-
virtual const SpecificStats* getSpecificStats() const;
static const char* kStageType;
@@ -177,9 +170,6 @@ private:
// Not owned by us.
WorkingSet* _ws;
- // Where we're reading data to sort from.
- std::unique_ptr<PlanStage> _child;
-
// The raw sort _pattern as expressed by the user
BSONObj _pattern;
@@ -260,11 +250,6 @@ private:
typedef unordered_map<RecordId, WorkingSetID, RecordId::Hasher> DataMap;
DataMap _wsidByDiskLoc;
- //
- // Stats
- //
-
- CommonStats _commonStats;
SortStats _specificStats;
// The usage in bytes of all buffered data that we're sorting.
diff --git a/src/mongo/db/exec/subplan.cpp b/src/mongo/db/exec/subplan.cpp
index b0a9717ba64..bb9b0abb14f 100644
--- a/src/mongo/db/exec/subplan.cpp
+++ b/src/mongo/db/exec/subplan.cpp
@@ -59,12 +59,12 @@ SubplanStage::SubplanStage(OperationContext* txn,
WorkingSet* ws,
const QueryPlannerParams& params,
CanonicalQuery* cq)
- : _txn(txn),
+ : PlanStage(kStageType),
+ _txn(txn),
_collection(collection),
_ws(ws),
_plannerParams(params),
- _query(cq),
- _commonStats(kStageType) {
+ _query(cq) {
invariant(_collection);
}
@@ -323,8 +323,7 @@ Status SubplanStage::choosePlanForSubqueries(PlanYieldPolicy* yieldPolicy) {
_ws->clear();
- _child.reset(new MultiPlanStage(_txn, _collection, branchResult->canonicalQuery.get()));
- MultiPlanStage* multiPlanStage = static_cast<MultiPlanStage*>(_child.get());
+ MultiPlanStage multiPlanStage(_txn, _collection, branchResult->canonicalQuery.get());
// Dump all the solutions into the MPS.
for (size_t ix = 0; ix < branchResult->solutions.size(); ++ix) {
@@ -333,22 +332,22 @@ Status SubplanStage::choosePlanForSubqueries(PlanYieldPolicy* yieldPolicy) {
_txn, _collection, *branchResult->solutions[ix], _ws, &nextPlanRoot));
// Takes ownership of solution with index 'ix' and 'nextPlanRoot'.
- multiPlanStage->addPlan(branchResult->solutions.releaseAt(ix), nextPlanRoot, _ws);
+ multiPlanStage.addPlan(branchResult->solutions.releaseAt(ix), nextPlanRoot, _ws);
}
- Status planSelectStat = multiPlanStage->pickBestPlan(yieldPolicy);
+ Status planSelectStat = multiPlanStage.pickBestPlan(yieldPolicy);
if (!planSelectStat.isOK()) {
return planSelectStat;
}
- if (!multiPlanStage->bestPlanChosen()) {
+ if (!multiPlanStage.bestPlanChosen()) {
mongoutils::str::stream ss;
ss << "Failed to pick best plan for subchild "
<< branchResult->canonicalQuery->toString();
return Status(ErrorCodes::BadValue, ss);
}
- QuerySolution* bestSoln = multiPlanStage->bestSolution();
+ QuerySolution* bestSoln = multiPlanStage.bestSolution();
// Check that we have good cache data. For example, we don't cache things
// for 2d indices.
@@ -410,7 +409,8 @@ Status SubplanStage::choosePlanForSubqueries(PlanYieldPolicy* yieldPolicy) {
_ws->clear();
PlanStage* root;
invariant(StageBuilder::build(_txn, _collection, *_compositeSolution.get(), _ws, &root));
- _child.reset(root);
+ invariant(_children.empty());
+ _children.emplace_back(root);
return Status::OK();
}
@@ -442,7 +442,8 @@ Status SubplanStage::choosePlanWholeQuery(PlanYieldPolicy* yieldPolicy) {
PlanStage* root;
// Only one possible plan. Run it. Build the stages from the solution.
verify(StageBuilder::build(_txn, _collection, *solutions[0], _ws, &root));
- _child.reset(root);
+ invariant(_children.empty());
+ _children.emplace_back(root);
// This SubplanStage takes ownership of the query solution.
_compositeSolution.reset(solutions.popAndReleaseBack());
@@ -451,8 +452,9 @@ Status SubplanStage::choosePlanWholeQuery(PlanYieldPolicy* yieldPolicy) {
} else {
// Many solutions. Create a MultiPlanStage to pick the best, update the cache,
// and so on. The working set will be shared by all candidate plans.
- _child.reset(new MultiPlanStage(_txn, _collection, _query));
- MultiPlanStage* multiPlanStage = static_cast<MultiPlanStage*>(_child.get());
+ invariant(_children.empty());
+ _children.emplace_back(new MultiPlanStage(_txn, _collection, _query));
+ MultiPlanStage* multiPlanStage = static_cast<MultiPlanStage*>(child().get());
for (size_t ix = 0; ix < solutions.size(); ++ix) {
if (solutions[ix]->cacheData.get()) {
@@ -500,8 +502,8 @@ Status SubplanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
bool SubplanStage::isEOF() {
// If we're running we best have a runner.
- invariant(_child.get());
- return _child->isEOF();
+ invariant(child());
+ return child()->isEOF();
}
PlanStage::StageState SubplanStage::work(WorkingSetID* out) {
@@ -514,8 +516,8 @@ PlanStage::StageState SubplanStage::work(WorkingSetID* out) {
return PlanStage::IS_EOF;
}
- invariant(_child.get());
- StageState state = _child->work(out);
+ invariant(child());
+ StageState state = child()->work(out);
if (PlanStage::NEED_TIME == state) {
++_commonStats.needTime;
@@ -528,49 +530,14 @@ PlanStage::StageState SubplanStage::work(WorkingSetID* out) {
return state;
}
-void SubplanStage::saveState() {
- _txn = NULL;
- ++_commonStats.yields;
-
- // We're ranking a sub-plan via an MPS or we're streaming results from this stage. Either
- // way, pass on the request.
- if (NULL != _child.get()) {
- _child->saveState();
- }
-}
-
-void SubplanStage::restoreState(OperationContext* opCtx) {
- invariant(_txn == NULL);
+void SubplanStage::doRestoreState(OperationContext* opCtx) {
_txn = opCtx;
- ++_commonStats.unyields;
-
- // We're ranking a sub-plan via an MPS or we're streaming results from this stage. Either
- // way, pass on the request.
- if (NULL != _child.get()) {
- _child->restoreState(opCtx);
- }
-}
-
-void SubplanStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
- ++_commonStats.invalidates;
-
- if (NULL != _child.get()) {
- _child->invalidate(txn, dl, type);
- }
-}
-
-std::vector<PlanStage*> SubplanStage::getChildren() const {
- std::vector<PlanStage*> children;
- if (NULL != _child.get()) {
- children.push_back(_child.get());
- }
- return children;
}
unique_ptr<PlanStageStats> SubplanStage::getStats() {
_commonStats.isEOF = isEOF();
unique_ptr<PlanStageStats> ret = make_unique<PlanStageStats>(_commonStats, STAGE_SUBPLAN);
- ret->children.push_back(_child->getStats().release());
+ ret->children.push_back(child()->getStats().release());
return ret;
}
@@ -578,10 +545,6 @@ bool SubplanStage::branchPlannedFromCache(size_t i) const {
return NULL != _branchResults[i]->cachedSolution.get();
}
-const CommonStats* SubplanStage::getCommonStats() const {
- return &_commonStats;
-}
-
const SpecificStats* SubplanStage::getSpecificStats() const {
return NULL;
}
diff --git a/src/mongo/db/exec/subplan.h b/src/mongo/db/exec/subplan.h
index c2db2c34fee..d1a54db9d34 100644
--- a/src/mongo/db/exec/subplan.h
+++ b/src/mongo/db/exec/subplan.h
@@ -77,11 +77,7 @@ public:
virtual bool isEOF();
virtual StageState work(WorkingSetID* out);
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
-
- virtual std::vector<PlanStage*> getChildren() const;
+ virtual void doRestoreState(OperationContext* opCtx);
virtual StageType stageType() const {
return STAGE_SUBPLAN;
@@ -89,8 +85,6 @@ public:
std::unique_ptr<PlanStageStats> getStats();
- virtual const CommonStats* getCommonStats() const;
-
virtual const SpecificStats* getSpecificStats() const;
static const char* kStageType;
@@ -203,15 +197,11 @@ private:
// independently, that solution is owned here.
std::unique_ptr<QuerySolution> _compositeSolution;
- std::unique_ptr<PlanStage> _child;
-
// Holds a list of the results from planning each branch.
OwnedPointerVector<BranchPlanningResult> _branchResults;
// We need this to extract cache-friendly index data from the index assignments.
std::map<BSONObj, size_t> _indexMap;
-
- CommonStats _commonStats;
};
} // namespace mongo
diff --git a/src/mongo/db/exec/text.cpp b/src/mongo/db/exec/text.cpp
index 56192851bd9..b7cc0789286 100644
--- a/src/mongo/db/exec/text.cpp
+++ b/src/mongo/db/exec/text.cpp
@@ -60,16 +60,15 @@ TextStage::TextStage(OperationContext* txn,
const TextStageParams& params,
WorkingSet* ws,
const MatchExpression* filter)
- : _params(params), _textTreeRoot(buildTextTree(txn, ws, filter)), _commonStats(kStageType) {
+ : PlanStage(kStageType), _params(params) {
+ _children.emplace_back(buildTextTree(txn, ws, filter));
_specificStats.indexPrefix = _params.indexPrefix;
_specificStats.indexName = _params.index->indexName();
_specificStats.parsedTextQuery = _params.query.toBSON();
}
-TextStage::~TextStage() {}
-
bool TextStage::isEOF() {
- return _textTreeRoot->isEOF();
+ return child()->isEOF();
}
PlanStage::StageState TextStage::work(WorkingSetID* out) {
@@ -82,7 +81,7 @@ PlanStage::StageState TextStage::work(WorkingSetID* out) {
return PlanStage::IS_EOF;
}
- PlanStage::StageState stageState = _textTreeRoot->work(out);
+ PlanStage::StageState stageState = child()->work(out);
// Increment common stats counters that are specific to the return value of work().
switch (stageState) {
@@ -102,41 +101,15 @@ PlanStage::StageState TextStage::work(WorkingSetID* out) {
return stageState;
}
-void TextStage::saveState() {
- ++_commonStats.yields;
-
- _textTreeRoot->saveState();
-}
-
-void TextStage::restoreState(OperationContext* opCtx) {
- ++_commonStats.unyields;
-
- _textTreeRoot->restoreState(opCtx);
-}
-
-void TextStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
- ++_commonStats.invalidates;
-
- _textTreeRoot->invalidate(txn, dl, type);
-}
-
-vector<PlanStage*> TextStage::getChildren() const {
- return {_textTreeRoot.get()};
-}
-
unique_ptr<PlanStageStats> TextStage::getStats() {
_commonStats.isEOF = isEOF();
unique_ptr<PlanStageStats> ret = make_unique<PlanStageStats>(_commonStats, STAGE_TEXT);
ret->specific = make_unique<TextStats>(_specificStats);
- ret->children.push_back(_textTreeRoot->getStats().release());
+ ret->children.push_back(child()->getStats().release());
return ret;
}
-const CommonStats* TextStage::getCommonStats() const {
- return &_commonStats;
-}
-
const SpecificStats* TextStage::getSpecificStats() const {
return &_specificStats;
}
diff --git a/src/mongo/db/exec/text.h b/src/mongo/db/exec/text.h
index 27b5bc62231..4d0d57f66d0 100644
--- a/src/mongo/db/exec/text.h
+++ b/src/mongo/db/exec/text.h
@@ -76,25 +76,16 @@ public:
WorkingSet* ws,
const MatchExpression* filter);
- ~TextStage() final;
StageState work(WorkingSetID* out) final;
bool isEOF() final;
- void saveState() final;
- void restoreState(OperationContext* opCtx) final;
- void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) final;
-
- vector<PlanStage*> getChildren() const;
-
StageType stageType() const final {
return STAGE_TEXT;
}
std::unique_ptr<PlanStageStats> getStats();
- const CommonStats* getCommonStats() const final;
-
const SpecificStats* getSpecificStats() const final;
static const char* kStageType;
@@ -110,11 +101,7 @@ private:
// Parameters of this text stage.
TextStageParams _params;
- // The root of the text query tree.
- unique_ptr<PlanStage> _textTreeRoot;
-
// Stats.
- CommonStats _commonStats;
TextStats _specificStats;
};
diff --git a/src/mongo/db/exec/text_match.cpp b/src/mongo/db/exec/text_match.cpp
index 8edb78b2a3e..bf92b38a57b 100644
--- a/src/mongo/db/exec/text_match.cpp
+++ b/src/mongo/db/exec/text_match.cpp
@@ -49,34 +49,14 @@ TextMatchStage::TextMatchStage(unique_ptr<PlanStage> child,
const FTSQuery& query,
const FTSSpec& spec,
WorkingSet* ws)
- : _ftsMatcher(query, spec), _ws(ws), _child(std::move(child)), _commonStats(kStageType) {}
+ : PlanStage(kStageType), _ftsMatcher(query, spec), _ws(ws) {
+ _children.emplace_back(std::move(child));
+}
TextMatchStage::~TextMatchStage() {}
bool TextMatchStage::isEOF() {
- return _child->isEOF();
-}
-
-void TextMatchStage::saveState() {
- ++_commonStats.yields;
-
- _child->saveState();
-}
-
-void TextMatchStage::restoreState(OperationContext* opCtx) {
- ++_commonStats.unyields;
-
- _child->restoreState(opCtx);
-}
-
-void TextMatchStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
- ++_commonStats.invalidates;
-
- _child->invalidate(txn, dl, type);
-}
-
-vector<PlanStage*> TextMatchStage::getChildren() const {
- return {_child.get()};
+ return child()->isEOF();
}
std::unique_ptr<PlanStageStats> TextMatchStage::getStats() {
@@ -84,15 +64,11 @@ std::unique_ptr<PlanStageStats> TextMatchStage::getStats() {
unique_ptr<PlanStageStats> ret = make_unique<PlanStageStats>(_commonStats, STAGE_TEXT_MATCH);
ret->specific = make_unique<TextMatchStats>(_specificStats);
- ret->children.push_back(_child->getStats().release());
+ ret->children.push_back(child()->getStats().release());
return ret;
}
-const CommonStats* TextMatchStage::getCommonStats() const {
- return &_commonStats;
-}
-
const SpecificStats* TextMatchStage::getSpecificStats() const {
return &_specificStats;
}
@@ -108,7 +84,7 @@ PlanStage::StageState TextMatchStage::work(WorkingSetID* out) {
}
// Retrieve fetched document from child.
- StageState stageState = _child->work(out);
+ StageState stageState = child()->work(out);
if (stageState == PlanStage::ADVANCED) {
// We just successfully retrieved a fetched doc.
diff --git a/src/mongo/db/exec/text_match.h b/src/mongo/db/exec/text_match.h
index bc1cf6b528f..cacec80c4e6 100644
--- a/src/mongo/db/exec/text_match.h
+++ b/src/mongo/db/exec/text_match.h
@@ -69,20 +69,12 @@ public:
StageState work(WorkingSetID* out) final;
- void saveState() final;
- void restoreState(OperationContext* opCtx) final;
- void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) final;
-
- std::vector<PlanStage*> getChildren() const final;
-
StageType stageType() const final {
return STAGE_TEXT_MATCH;
}
std::unique_ptr<PlanStageStats> getStats() final;
- const CommonStats* getCommonStats() const final;
-
const SpecificStats* getSpecificStats() const final;
static const char* kStageType;
@@ -94,11 +86,6 @@ private:
// Not owned by us.
WorkingSet* _ws;
- // The child PlanStage that provides the RecordIDs and scores for text matching.
- unique_ptr<PlanStage> _child;
-
- // Stats
- CommonStats _commonStats;
TextMatchStats _specificStats;
};
} // namespace mongo
diff --git a/src/mongo/db/exec/text_or.cpp b/src/mongo/db/exec/text_or.cpp
index be600e3639d..d1e57871593 100644
--- a/src/mongo/db/exec/text_or.cpp
+++ b/src/mongo/db/exec/text_or.cpp
@@ -59,10 +59,10 @@ TextOrStage::TextOrStage(OperationContext* txn,
WorkingSet* ws,
const MatchExpression* filter,
IndexDescriptor* index)
- : _ftsSpec(ftsSpec),
+ : PlanStage(kStageType),
+ _ftsSpec(ftsSpec),
_ws(ws),
_scoreIterator(_scores.end()),
- _commonStats(kStageType),
_filter(filter),
_txn(txn),
_idRetrying(WorkingSet::INVALID_ID),
@@ -78,41 +78,22 @@ bool TextOrStage::isEOF() {
return _internalState == State::kDone;
}
-void TextOrStage::saveState() {
+void TextOrStage::doSaveState() {
_txn = NULL;
- ++_commonStats.yields;
-
- for (auto& child : _children) {
- child->saveState();
- }
-
if (_recordCursor) {
_recordCursor->saveUnpositioned();
}
}
-void TextOrStage::restoreState(OperationContext* opCtx) {
+void TextOrStage::doRestoreState(OperationContext* opCtx) {
invariant(_txn == NULL);
_txn = opCtx;
- ++_commonStats.unyields;
-
- for (auto& child : _children) {
- child->restoreState(opCtx);
- }
-
if (_recordCursor) {
invariant(_recordCursor->restore(opCtx));
}
}
-void TextOrStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
- ++_commonStats.invalidates;
-
- // Propagate invalidate to children.
- for (auto& child : _children) {
- child->invalidate(txn, dl, type);
- }
-
+void TextOrStage::doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
// Remove the RecordID from the ScoreMap.
ScoreMap::iterator scoreIt = _scores.find(dl);
if (scoreIt != _scores.end()) {
@@ -123,14 +104,6 @@ void TextOrStage::invalidate(OperationContext* txn, const RecordId& dl, Invalida
}
}
-vector<PlanStage*> TextOrStage::getChildren() const {
- std::vector<PlanStage*> vec;
- for (auto& child : _children) {
- vec.push_back(child.get());
- }
- return vec;
-}
-
std::unique_ptr<PlanStageStats> TextOrStage::getStats() {
_commonStats.isEOF = isEOF();
@@ -150,10 +123,6 @@ std::unique_ptr<PlanStageStats> TextOrStage::getStats() {
return ret;
}
-const CommonStats* TextOrStage::getCommonStats() const {
- return &_commonStats;
-}
-
const SpecificStats* TextOrStage::getSpecificStats() const {
return &_specificStats;
}
diff --git a/src/mongo/db/exec/text_or.h b/src/mongo/db/exec/text_or.h
index 4de8b5bade2..a51a91a3919 100644
--- a/src/mongo/db/exec/text_or.h
+++ b/src/mongo/db/exec/text_or.h
@@ -86,11 +86,9 @@ public:
StageState work(WorkingSetID* out) final;
- void saveState() final;
- void restoreState(OperationContext* opCtx) final;
- void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) final;
-
- std::vector<PlanStage*> getChildren() const final;
+ void doSaveState() final;
+ void doRestoreState(OperationContext* opCtx) final;
+ void doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) final;
StageType stageType() const final {
return STAGE_TEXT_OR;
@@ -98,8 +96,6 @@ public:
std::unique_ptr<PlanStageStats> getStats() final;
- const CommonStats* getCommonStats() const final;
-
const SpecificStats* getSpecificStats() const final;
static const char* kStageType;
@@ -137,9 +133,6 @@ private:
// What state are we in? See the State enum above.
State _internalState = State::kInit;
- // Children owned by us.
- vector<unique_ptr<PlanStage>> _children;
-
// Which of _children are we calling work(...) on now?
size_t _currentChild = 0;
@@ -158,8 +151,6 @@ private:
ScoreMap _scores;
ScoreMap::const_iterator _scoreIterator;
- // Stats
- CommonStats _commonStats;
TextOrStats _specificStats;
// Members needed only for using the TextMatchableDocument.
diff --git a/src/mongo/db/exec/update.cpp b/src/mongo/db/exec/update.cpp
index 9a523c4756b..970137bc6e9 100644
--- a/src/mongo/db/exec/update.cpp
+++ b/src/mongo/db/exec/update.cpp
@@ -415,16 +415,16 @@ UpdateStage::UpdateStage(OperationContext* txn,
WorkingSet* ws,
Collection* collection,
PlanStage* child)
- : _txn(txn),
+ : PlanStage(kStageType),
+ _txn(txn),
_params(params),
_ws(ws),
_collection(collection),
- _child(child),
_idRetrying(WorkingSet::INVALID_ID),
_idReturning(WorkingSet::INVALID_ID),
- _commonStats(kStageType),
_updatedLocs(params.request->isMulti() ? new DiskLocSet() : NULL),
_doc(params.driver->getDocument()) {
+ _children.emplace_back(child);
// We are an update until we fall into the insert case.
params.driver->setContext(ModifierInterface::ExecInfo::UPDATE_CONTEXT);
@@ -715,7 +715,7 @@ bool UpdateStage::doneUpdating() {
// We're done updating if either the child has no more results to give us, or we've
// already gotten a result back and we're not a multi-update.
return _idRetrying == WorkingSet::INVALID_ID && _idReturning == WorkingSet::INVALID_ID &&
- (_child->isEOF() || (_specificStats.nMatched > 0 && !_params.request->isMulti()));
+ (child()->isEOF() || (_specificStats.nMatched > 0 && !_params.request->isMulti()));
}
bool UpdateStage::needInsert() {
@@ -795,7 +795,7 @@ PlanStage::StageState UpdateStage::work(WorkingSetID* out) {
WorkingSetID id;
StageState status;
if (_idRetrying == WorkingSet::INVALID_ID) {
- status = _child->work(&id);
+ status = child()->work(&id);
} else {
status = ADVANCED;
id = _idRetrying;
@@ -855,7 +855,7 @@ PlanStage::StageState UpdateStage::work(WorkingSetID* out) {
// Save state before making changes
try {
- _child->saveState();
+ child()->saveState();
if (supportsDocLocking()) {
// Doc-locking engines require this after saveState() since they don't use
// invalidations.
@@ -902,7 +902,7 @@ PlanStage::StageState UpdateStage::work(WorkingSetID* out) {
// As restoreState may restore (recreate) cursors, make sure to restore the
// state outside of the WritUnitOfWork.
try {
- _child->restoreState(_txn);
+ child()->restoreState(_txn);
} catch (const WriteConflictException& wce) {
// Note we don't need to retry updating anything in this case since the update
// already was committed. However, we still need to return the updated document
@@ -958,12 +958,6 @@ PlanStage::StageState UpdateStage::work(WorkingSetID* out) {
return status;
}
-void UpdateStage::saveState() {
- _txn = NULL;
- ++_commonStats.yields;
- _child->saveState();
-}
-
Status UpdateStage::restoreUpdateState(OperationContext* opCtx) {
const UpdateRequest& request = *_params.request;
const NamespaceString& nsString(request.getNamespaceString());
@@ -994,39 +988,20 @@ Status UpdateStage::restoreUpdateState(OperationContext* opCtx) {
return Status::OK();
}
-void UpdateStage::restoreState(OperationContext* opCtx) {
- invariant(_txn == NULL);
+void UpdateStage::doRestoreState(OperationContext* opCtx) {
_txn = opCtx;
- ++_commonStats.unyields;
- // Restore our child.
- _child->restoreState(opCtx);
- // Restore self.
uassertStatusOK(restoreUpdateState(opCtx));
}
-void UpdateStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
- ++_commonStats.invalidates;
- _child->invalidate(txn, dl, type);
-}
-
-vector<PlanStage*> UpdateStage::getChildren() const {
- vector<PlanStage*> children;
- children.push_back(_child.get());
- return children;
-}
unique_ptr<PlanStageStats> UpdateStage::getStats() {
_commonStats.isEOF = isEOF();
unique_ptr<PlanStageStats> ret = make_unique<PlanStageStats>(_commonStats, STAGE_UPDATE);
ret->specific = make_unique<UpdateStats>(_specificStats);
- ret->children.push_back(_child->getStats().release());
+ ret->children.push_back(child()->getStats().release());
return ret;
}
-const CommonStats* UpdateStage::getCommonStats() const {
- return &_commonStats;
-}
-
const SpecificStats* UpdateStage::getSpecificStats() const {
return &_specificStats;
}
diff --git a/src/mongo/db/exec/update.h b/src/mongo/db/exec/update.h
index b0dda4ffc4c..4d59dca50f2 100644
--- a/src/mongo/db/exec/update.h
+++ b/src/mongo/db/exec/update.h
@@ -84,11 +84,7 @@ public:
virtual bool isEOF();
virtual StageState work(WorkingSetID* out);
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
-
- virtual std::vector<PlanStage*> getChildren() const;
+ virtual void doRestoreState(OperationContext* opCtx);
virtual StageType stageType() const {
return STAGE_UPDATE;
@@ -96,8 +92,6 @@ public:
virtual std::unique_ptr<PlanStageStats> getStats();
- virtual const CommonStats* getCommonStats() const;
-
virtual const SpecificStats* getSpecificStats() const;
static const char* kStageType;
@@ -182,9 +176,6 @@ private:
// Not owned by us. May be NULL.
Collection* _collection;
- // Owned by us.
- std::unique_ptr<PlanStage> _child;
-
// If not WorkingSet::INVALID_ID, we use this rather than asking our child what to do next.
WorkingSetID _idRetrying;
@@ -192,7 +183,6 @@ private:
WorkingSetID _idReturning;
// Stats
- CommonStats _commonStats;
UpdateStats _specificStats;
// If the update was in-place, we may see it again. This only matters if we're doing
diff --git a/src/mongo/db/query/explain.cpp b/src/mongo/db/query/explain.cpp
index 28aa656fbe5..895c4a6fa88 100644
--- a/src/mongo/db/query/explain.cpp
+++ b/src/mongo/db/query/explain.cpp
@@ -66,9 +66,9 @@ void flattenStatsTree(const PlanStageStats* root, vector<const PlanStageStats*>*
*/
void flattenExecTree(const PlanStage* root, vector<const PlanStage*>* flattened) {
flattened->push_back(root);
- vector<PlanStage*> children = root->getChildren();
+ const auto& children = root->getChildren();
for (size_t i = 0; i < children.size(); ++i) {
- flattenExecTree(children[i], flattened);
+ flattenExecTree(children[i].get(), flattened);
}
}
@@ -82,9 +82,9 @@ MultiPlanStage* getMultiPlanStage(PlanStage* root) {
return mps;
}
- vector<PlanStage*> children = root->getChildren();
+ const auto& children = root->getChildren();
for (size_t i = 0; i < children.size(); i++) {
- MultiPlanStage* mps = getMultiPlanStage(children[i]);
+ MultiPlanStage* mps = getMultiPlanStage(children[i].get());
if (mps != NULL) {
return mps;
}
diff --git a/src/mongo/db/query/plan_executor.cpp b/src/mongo/db/query/plan_executor.cpp
index 3c216ba313e..dfc97724b6f 100644
--- a/src/mongo/db/query/plan_executor.cpp
+++ b/src/mongo/db/query/plan_executor.cpp
@@ -66,9 +66,9 @@ PlanStage* getStageByType(PlanStage* root, StageType type) {
return root;
}
- vector<PlanStage*> children = root->getChildren();
+ const auto& children = root->getChildren();
for (size_t i = 0; i < children.size(); i++) {
- PlanStage* result = getStageByType(children[i], type);
+ PlanStage* result = getStageByType(children[i].get(), type);
if (result) {
return result;
}
diff --git a/src/mongo/db/query/plan_ranker.h b/src/mongo/db/query/plan_ranker.h
index 653fb332f12..1ae03b083ee 100644
--- a/src/mongo/db/query/plan_ranker.h
+++ b/src/mongo/db/query/plan_ranker.h
@@ -29,6 +29,7 @@
#pragma once
#include <list>
+#include <memory>
#include <vector>
#include "mongo/base/owned_pointer_vector.h"
@@ -71,9 +72,27 @@ struct CandidatePlan {
CandidatePlan(QuerySolution* s, PlanStage* r, WorkingSet* w)
: solution(s), root(r), ws(w), failed(false) {}
- QuerySolution* solution;
- PlanStage* root;
- WorkingSet* ws;
+#if defined(_MSC_VER) && _MSC_VER < 1900 // MVSC++ <= 2013 can't generate default move operations
+ CandidatePlan(CandidatePlan&& other)
+ : solution(std::move(other.solution)),
+ root(std::move(other.root)),
+ ws(std::move(other.ws)),
+ results(std::move(other.results)),
+ failed(std::move(other.failed)) {}
+
+ CandidatePlan& operator=(CandidatePlan&& other) {
+ solution = std::move(other.solution);
+ root = std::move(other.root);
+ ws = std::move(other.ws);
+ results = std::move(other.results);
+ failed = std::move(other.failed);
+ return *this;
+ }
+#endif
+
+ std::unique_ptr<QuerySolution> solution;
+ PlanStage* root; // Not owned here.
+ WorkingSet* ws; // Not owned here.
// Any results produced during the plan's execution prior to ranking are retained here.
std::list<WorkingSetID> results;
diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp
index 96b575419f4..0e9d14d5f22 100644
--- a/src/mongo/db/s/migration_source_manager.cpp
+++ b/src/mongo/db/s/migration_source_manager.cpp
@@ -69,9 +69,9 @@ Tee* migrateLog = RamLog::get("migrate");
class DeleteNotificationStage final : public PlanStage {
public:
DeleteNotificationStage(MigrationSourceManager* migrationSourceManager)
- : _migrationSourceManager(migrationSourceManager) {}
+ : PlanStage("NOTIFY_DELETE"), _migrationSourceManager(migrationSourceManager) {}
- void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) override {
+ void doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) override {
if (type == INVALIDATION_DELETION) {
_migrationSourceManager->aboutToDelete(dl);
}
@@ -85,32 +85,14 @@ public:
MONGO_UNREACHABLE;
}
- virtual void kill() {}
-
- virtual void saveState() {
- MONGO_UNREACHABLE;
- }
-
- virtual void restoreState(OperationContext* opCtx) {
- MONGO_UNREACHABLE;
- }
-
virtual unique_ptr<PlanStageStats> getStats() {
MONGO_UNREACHABLE;
}
- virtual CommonStats* getCommonStats() const {
- MONGO_UNREACHABLE;
- }
-
virtual SpecificStats* getSpecificStats() const {
MONGO_UNREACHABLE;
}
- virtual std::vector<PlanStage*> getChildren() const {
- return {};
- }
-
virtual StageType stageType() const {
return STAGE_NOTIFY_DELETE;
}