diff options
author | David Storch <david.storch@mongodb.com> | 2019-09-04 15:32:56 +0000 |
---|---|---|
committer | evergreen <evergreen@mongodb.com> | 2019-09-04 15:32:56 +0000 |
commit | b13188206e74dbeb66c4b663d83ed1d1f97c286b (patch) | |
tree | 01bcfe63112a9a4981a622a5370493d7d089a7a5 /src/mongo/db/exec | |
parent | d9d50312ccdfcfb628d89f34c0dcda05c8f921bc (diff) | |
download | mongo-b13188206e74dbeb66c4b663d83ed1d1f97c286b.tar.gz |
SERVER-42852 Make PlanStage consistently hold children by unique_ptr.
Diffstat (limited to 'src/mongo/db/exec')
33 files changed, 210 insertions, 193 deletions
diff --git a/src/mongo/db/exec/and_hash.cpp b/src/mongo/db/exec/and_hash.cpp index 1b2e9983286..6c913377c4f 100644 --- a/src/mongo/db/exec/and_hash.cpp +++ b/src/mongo/db/exec/and_hash.cpp @@ -71,8 +71,8 @@ AndHashStage::AndHashStage(OperationContext* opCtx, WorkingSet* ws, size_t maxMe _memUsage(0), _maxMemUsage(maxMemUsage) {} -void AndHashStage::addChild(PlanStage* child) { - _children.emplace_back(child); +void AndHashStage::addChild(std::unique_ptr<PlanStage> child) { + _children.emplace_back(std::move(child)); } size_t AndHashStage::getMemUsage() const { diff --git a/src/mongo/db/exec/and_hash.h b/src/mongo/db/exec/and_hash.h index 2b8c326c3ce..3659504486d 100644 --- a/src/mongo/db/exec/and_hash.h +++ b/src/mongo/db/exec/and_hash.h @@ -55,7 +55,7 @@ public: */ AndHashStage(OperationContext* opCtx, WorkingSet* ws, size_t maxMemUsage); - void addChild(PlanStage* child); + void addChild(std::unique_ptr<PlanStage> child); /** * Returns memory usage. diff --git a/src/mongo/db/exec/and_sorted.cpp b/src/mongo/db/exec/and_sorted.cpp index 6c60553a76e..6cda1ad2bf1 100644 --- a/src/mongo/db/exec/and_sorted.cpp +++ b/src/mongo/db/exec/and_sorted.cpp @@ -53,8 +53,8 @@ AndSortedStage::AndSortedStage(OperationContext* opCtx, WorkingSet* ws) _isEOF(false) {} -void AndSortedStage::addChild(PlanStage* child) { - _children.emplace_back(child); +void AndSortedStage::addChild(std::unique_ptr<PlanStage> child) { + _children.emplace_back(std::move(child)); } bool AndSortedStage::isEOF() { diff --git a/src/mongo/db/exec/and_sorted.h b/src/mongo/db/exec/and_sorted.h index a01ebc70d3a..3d72d15c1f9 100644 --- a/src/mongo/db/exec/and_sorted.h +++ b/src/mongo/db/exec/and_sorted.h @@ -49,7 +49,7 @@ class AndSortedStage final : public PlanStage { public: AndSortedStage(OperationContext* opCtx, WorkingSet* ws); - void addChild(PlanStage* child); + void addChild(std::unique_ptr<PlanStage> child); StageState doWork(WorkingSetID* out) final; bool isEOF() final; diff --git a/src/mongo/db/exec/cached_plan.cpp b/src/mongo/db/exec/cached_plan.cpp index 0c46d328c8a..97e2c9dd5a9 100644 --- a/src/mongo/db/exec/cached_plan.cpp +++ b/src/mongo/db/exec/cached_plan.cpp @@ -63,13 +63,13 @@ CachedPlanStage::CachedPlanStage(OperationContext* opCtx, CanonicalQuery* cq, const QueryPlannerParams& params, size_t decisionWorks, - PlanStage* root) + std::unique_ptr<PlanStage> root) : RequiresAllIndicesStage(kStageType, opCtx, collection), _ws(ws), _canonicalQuery(cq), _plannerParams(params), _decisionWorks(decisionWorks) { - _children.emplace_back(root); + _children.emplace_back(std::move(root)); } Status CachedPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) { @@ -215,11 +215,10 @@ Status CachedPlanStage::replan(PlanYieldPolicy* yieldPolicy, bool shouldCache) { } if (1 == solutions.size()) { - PlanStage* newRoot; // Only one possible plan. Build the stages from the solution. - verify(StageBuilder::build( - getOpCtx(), collection(), *_canonicalQuery, *solutions[0], _ws, &newRoot)); - _children.emplace_back(newRoot); + auto newRoot = + StageBuilder::build(getOpCtx(), collection(), *_canonicalQuery, *solutions[0], _ws); + _children.emplace_back(std::move(newRoot)); _replannedQs = std::move(solutions.back()); solutions.pop_back(); @@ -244,12 +243,10 @@ Status CachedPlanStage::replan(PlanYieldPolicy* yieldPolicy, bool shouldCache) { solutions[ix]->cacheData->indexFilterApplied = _plannerParams.indexFiltersApplied; } - PlanStage* nextPlanRoot; - verify(StageBuilder::build( - getOpCtx(), collection(), *_canonicalQuery, *solutions[ix], _ws, &nextPlanRoot)); + auto nextPlanRoot = + StageBuilder::build(getOpCtx(), collection(), *_canonicalQuery, *solutions[ix], _ws); - // Takes ownership of 'nextPlanRoot'. - multiPlanStage->addPlan(std::move(solutions[ix]), nextPlanRoot, _ws); + multiPlanStage->addPlan(std::move(solutions[ix]), std::move(nextPlanRoot), _ws); } // Delegate to the MultiPlanStage's plan selection facility. diff --git a/src/mongo/db/exec/cached_plan.h b/src/mongo/db/exec/cached_plan.h index 45ca2ed166c..bc7b1bc50bb 100644 --- a/src/mongo/db/exec/cached_plan.h +++ b/src/mongo/db/exec/cached_plan.h @@ -62,7 +62,7 @@ public: CanonicalQuery* cq, const QueryPlannerParams& params, size_t decisionWorks, - PlanStage* root); + std::unique_ptr<PlanStage> root); bool isEOF() final; diff --git a/src/mongo/db/exec/ensure_sorted.cpp b/src/mongo/db/exec/ensure_sorted.cpp index 611835aaba6..e4c0acc052b 100644 --- a/src/mongo/db/exec/ensure_sorted.cpp +++ b/src/mongo/db/exec/ensure_sorted.cpp @@ -45,9 +45,9 @@ const char* EnsureSortedStage::kStageType = "ENSURE_SORTED"; EnsureSortedStage::EnsureSortedStage(OperationContext* opCtx, BSONObj pattern, WorkingSet* ws, - PlanStage* child) + std::unique_ptr<PlanStage> child) : PlanStage(kStageType, opCtx), _ws(ws) { - _children.emplace_back(child); + _children.emplace_back(std::move(child)); _pattern = FindCommon::transformSortSpec(pattern); } diff --git a/src/mongo/db/exec/ensure_sorted.h b/src/mongo/db/exec/ensure_sorted.h index 0f7e09dfa6d..3e3915b6a78 100644 --- a/src/mongo/db/exec/ensure_sorted.h +++ b/src/mongo/db/exec/ensure_sorted.h @@ -42,7 +42,10 @@ namespace mongo { */ class EnsureSortedStage final : public PlanStage { public: - EnsureSortedStage(OperationContext* opCtx, BSONObj pattern, WorkingSet* ws, PlanStage* child); + EnsureSortedStage(OperationContext* opCtx, + BSONObj pattern, + WorkingSet* ws, + std::unique_ptr<PlanStage> child); bool isEOF() final; StageState doWork(WorkingSetID* out) final; diff --git a/src/mongo/db/exec/fetch.cpp b/src/mongo/db/exec/fetch.cpp index 2e30f53e302..bb09866f983 100644 --- a/src/mongo/db/exec/fetch.cpp +++ b/src/mongo/db/exec/fetch.cpp @@ -51,14 +51,14 @@ const char* FetchStage::kStageType = "FETCH"; FetchStage::FetchStage(OperationContext* opCtx, WorkingSet* ws, - PlanStage* child, + std::unique_ptr<PlanStage> child, const MatchExpression* filter, const Collection* collection) : RequiresCollectionStage(kStageType, opCtx, collection), _ws(ws), _filter(filter), _idRetrying(WorkingSet::INVALID_ID) { - _children.emplace_back(child); + _children.emplace_back(std::move(child)); } FetchStage::~FetchStage() {} diff --git a/src/mongo/db/exec/fetch.h b/src/mongo/db/exec/fetch.h index cdfaad31083..074bd63b4d2 100644 --- a/src/mongo/db/exec/fetch.h +++ b/src/mongo/db/exec/fetch.h @@ -52,7 +52,7 @@ class FetchStage : public RequiresCollectionStage { public: FetchStage(OperationContext* opCtx, WorkingSet* ws, - PlanStage* child, + std::unique_ptr<PlanStage> child, const MatchExpression* filter, const Collection* collection); diff --git a/src/mongo/db/exec/geo_near.cpp b/src/mongo/db/exec/geo_near.cpp index d9e7749e5fc..ccf5575044f 100644 --- a/src/mongo/db/exec/geo_near.cpp +++ b/src/mongo/db/exec/geo_near.cpp @@ -40,7 +40,6 @@ #include "mongo/base/owned_pointer_vector.h" #include "mongo/db/bson/dotted_path_support.h" #include "mongo/db/exec/fetch.h" -#include "mongo/db/exec/index_scan.h" #include "mongo/db/geo/geoconstants.h" #include "mongo/db/geo/geoparser.h" #include "mongo/db/geo/hash.h" @@ -265,46 +264,24 @@ static R2Annulus twoDDistanceBounds(const GeoNearParams& nearParams, return fullBounds; } -class GeoNear2DStage::DensityEstimator { -public: - DensityEstimator(PlanStage::Children* children, - BSONObj infoObj, - const GeoNearParams* nearParams, - const R2Annulus& fullBounds) - : _children(children), _nearParams(nearParams), _fullBounds(fullBounds), _currentLevel(0) { - GeoHashConverter::Parameters hashParams; - Status status = GeoHashConverter::parseParameters(std::move(infoObj), &hashParams); - // The index status should always be valid. - invariant(status.isOK()); - - _converter.reset(new GeoHashConverter(hashParams)); - _centroidCell = _converter->hash(_nearParams->nearQuery->centroid->oldPoint); - - // Since appendVertexNeighbors(level, output) requires level < hash.getBits(), - // we have to start to find documents at most GeoHash::kMaxBits - 1. Thus the finest - // search area is 16 * finest cell area at GeoHash::kMaxBits. - _currentLevel = std::max(0, hashParams.bits - 1); - } +GeoNear2DStage::DensityEstimator::DensityEstimator(PlanStage::Children* children, + BSONObj infoObj, + const GeoNearParams* nearParams, + const R2Annulus& fullBounds) + : _children(children), _nearParams(nearParams), _fullBounds(fullBounds), _currentLevel(0) { + GeoHashConverter::Parameters hashParams; + Status status = GeoHashConverter::parseParameters(std::move(infoObj), &hashParams); + // The index status should always be valid. + invariant(status.isOK()); - PlanStage::StageState work(OperationContext* opCtx, - WorkingSet* workingSet, - const IndexDescriptor* twoDIndex, - WorkingSetID* out, - double* estimatedDistance); + _converter.reset(new GeoHashConverter(hashParams)); + _centroidCell = _converter->hash(_nearParams->nearQuery->centroid->oldPoint); -private: - void buildIndexScan(OperationContext* opCtx, - WorkingSet* workingSet, - const IndexDescriptor* twoDIndex); - - PlanStage::Children* _children; // Points to PlanStage::_children in the NearStage. - const GeoNearParams* _nearParams; // Not owned here. - const R2Annulus& _fullBounds; - IndexScan* _indexScan = nullptr; // Owned in PlanStage::_children. - unique_ptr<GeoHashConverter> _converter; - GeoHash _centroidCell; - unsigned _currentLevel; -}; + // Since appendVertexNeighbors(level, output) requires level < hash.getBits(), + // we have to start to find documents at most GeoHash::kMaxBits - 1. Thus the finest + // search area is 16 * finest cell area at GeoHash::kMaxBits. + _currentLevel = std::max(0, hashParams.bits - 1); +} // Initialize the internal states void GeoNear2DStage::DensityEstimator::buildIndexScan(OperationContext* opCtx, @@ -546,10 +523,10 @@ class FetchStageWithMatch final : public FetchStage { public: FetchStageWithMatch(OperationContext* opCtx, WorkingSet* ws, - PlanStage* child, + std::unique_ptr<PlanStage> child, MatchExpression* filter, const Collection* collection) - : FetchStage(opCtx, ws, child, filter, collection), _matcher(filter) {} + : FetchStage(opCtx, ws, std::move(child), filter, collection), _matcher(filter) {} private: // Owns matcher @@ -722,7 +699,7 @@ GeoNear2DStage::nextInterval(OperationContext* opCtx, .transitional_ignore(); // 2D indexes support covered search over additional fields they contain - IndexScan* scan = new IndexScan(opCtx, scanParams, workingSet, _nearParams.filter); + auto scan = std::make_unique<IndexScan>(opCtx, scanParams, workingSet, _nearParams.filter); MatchExpression* docMatcher = nullptr; @@ -733,8 +710,8 @@ GeoNear2DStage::nextInterval(OperationContext* opCtx, } // FetchStage owns index scan - _children.emplace_back( - new FetchStageWithMatch(opCtx, workingSet, scan, docMatcher, collection)); + _children.emplace_back(std::make_unique<FetchStageWithMatch>( + opCtx, workingSet, std::move(scan), docMatcher, collection)); return StatusWith<CoveredInterval*>(new CoveredInterval( _children.back().get(), nextBounds.getInner(), nextBounds.getOuter(), isLastInterval)); @@ -831,44 +808,20 @@ S2Region* buildS2Region(const R2Annulus& sphereBounds) { } } // namespace -// Estimate the density of data by search the nearest cells level by level around center. -class GeoNear2DSphereStage::DensityEstimator { -public: - DensityEstimator(PlanStage::Children* children, - const GeoNearParams* nearParams, - const S2IndexingParams& indexParams, - const R2Annulus& fullBounds) - : _children(children), - _nearParams(nearParams), - _indexParams(indexParams), - _fullBounds(fullBounds), - _currentLevel(0) { - // cellId.AppendVertexNeighbors(level, output) requires level < finest, - // so we use the minimum of max_level - 1 and the user specified finest - int level = std::min(S2::kMaxCellLevel - 1, gInternalQueryS2GeoFinestLevel.load()); - _currentLevel = std::max(0, level); - } - - // Search for a document in neighbors at current level. - // Return IS_EOF is such document exists and set the estimated distance to the nearest doc. - PlanStage::StageState work(OperationContext* opCtx, - WorkingSet* workingSet, - const IndexDescriptor* s2Index, - WorkingSetID* out, - double* estimatedDistance); - -private: - void buildIndexScan(OperationContext* opCtx, - WorkingSet* workingSet, - const IndexDescriptor* s2Index); - - PlanStage::Children* _children; // Points to PlanStage::_children in the NearStage. - const GeoNearParams* _nearParams; // Not owned here. - const S2IndexingParams _indexParams; - const R2Annulus& _fullBounds; - int _currentLevel; - IndexScan* _indexScan = nullptr; // Owned in PlanStage::_children. -}; +GeoNear2DSphereStage::DensityEstimator::DensityEstimator(PlanStage::Children* children, + const GeoNearParams* nearParams, + const S2IndexingParams& indexParams, + const R2Annulus& fullBounds) + : _children(children), + _nearParams(nearParams), + _indexParams(indexParams), + _fullBounds(fullBounds), + _currentLevel(0) { + // cellId.AppendVertexNeighbors(level, output) requires level < finest, + // so we use the minimum of max_level - 1 and the user specified finest + int level = std::min(S2::kMaxCellLevel - 1, gInternalQueryS2GeoFinestLevel.load()); + _currentLevel = std::max(0, level); +} // Setup the index scan stage for neighbors at this level. void GeoNear2DSphereStage::DensityEstimator::buildIndexScan(OperationContext* opCtx, @@ -1077,10 +1030,11 @@ GeoNear2DSphereStage::nextInterval(OperationContext* opCtx, OrderedIntervalList* coveredIntervals = &scanParams.bounds.fields[s2FieldPosition]; ExpressionMapping::S2CellIdsToIntervalsWithParents(cover, _indexParams, coveredIntervals); - IndexScan* scan = new IndexScan(opCtx, scanParams, workingSet, nullptr); + auto scan = std::make_unique<IndexScan>(opCtx, scanParams, workingSet, nullptr); // FetchStage owns index scan - _children.emplace_back(new FetchStage(opCtx, workingSet, scan, _nearParams.filter, collection)); + _children.emplace_back(std::make_unique<FetchStage>( + opCtx, workingSet, std::move(scan), _nearParams.filter, collection)); return StatusWith<CoveredInterval*>(new CoveredInterval( _children.back().get(), nextBounds.getInner(), nextBounds.getOuter(), isLastInterval)); diff --git a/src/mongo/db/exec/geo_near.h b/src/mongo/db/exec/geo_near.h index 33295de7b39..ce23ccd6e38 100644 --- a/src/mongo/db/exec/geo_near.h +++ b/src/mongo/db/exec/geo_near.h @@ -29,6 +29,7 @@ #pragma once +#include "mongo/db/exec/index_scan.h" #include "mongo/db/exec/near.h" #include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/working_set.h" @@ -84,6 +85,33 @@ protected: WorkingSetID* out) final; private: + class DensityEstimator { + public: + DensityEstimator(PlanStage::Children* children, + BSONObj infoObj, + const GeoNearParams* nearParams, + const R2Annulus& fullBounds); + + PlanStage::StageState work(OperationContext* opCtx, + WorkingSet* workingSet, + const IndexDescriptor* twoDIndex, + WorkingSetID* out, + double* estimatedDistance); + + private: + void buildIndexScan(OperationContext* opCtx, + WorkingSet* workingSet, + const IndexDescriptor* twoDIndex); + + PlanStage::Children* _children; // Points to PlanStage::_children in the NearStage. + const GeoNearParams* _nearParams; // Not owned here. + const R2Annulus& _fullBounds; + IndexScan* _indexScan = nullptr; // Owned in PlanStage::_children. + std::unique_ptr<GeoHashConverter> _converter; + GeoHash _centroidCell; + unsigned _currentLevel; + }; + const GeoNearParams _nearParams; // The total search annulus @@ -98,7 +126,6 @@ private: // Keeps track of the region that has already been scanned R2CellUnion _scannedCells; - class DensityEstimator; std::unique_ptr<DensityEstimator> _densityEstimator; }; @@ -126,6 +153,35 @@ protected: WorkingSetID* out) final; private: + // Estimate the density of data by search the nearest cells level by level around center. + class DensityEstimator { + public: + DensityEstimator(PlanStage::Children* children, + const GeoNearParams* nearParams, + const S2IndexingParams& indexParams, + const R2Annulus& fullBounds); + + // Search for a document in neighbors at current level. + // Return IS_EOF is such document exists and set the estimated distance to the nearest doc. + PlanStage::StageState work(OperationContext* opCtx, + WorkingSet* workingSet, + const IndexDescriptor* s2Index, + WorkingSetID* out, + double* estimatedDistance); + + private: + void buildIndexScan(OperationContext* opCtx, + WorkingSet* workingSet, + const IndexDescriptor* s2Index); + + PlanStage::Children* _children; // Points to PlanStage::_children in the NearStage. + const GeoNearParams* _nearParams; // Not owned here. + const S2IndexingParams _indexParams; + const R2Annulus& _fullBounds; + int _currentLevel; + IndexScan* _indexScan = nullptr; // Owned in PlanStage::_children. + }; + const GeoNearParams _nearParams; S2IndexingParams _indexParams; @@ -142,7 +198,6 @@ private: // Keeps track of the region that has already been scanned S2CellUnion _scannedCells; - class DensityEstimator; std::unique_ptr<DensityEstimator> _densityEstimator; }; diff --git a/src/mongo/db/exec/limit.cpp b/src/mongo/db/exec/limit.cpp index 605babeeb31..e800d614039 100644 --- a/src/mongo/db/exec/limit.cpp +++ b/src/mongo/db/exec/limit.cpp @@ -43,10 +43,13 @@ using std::vector; // static const char* LimitStage::kStageType = "LIMIT"; -LimitStage::LimitStage(OperationContext* opCtx, long long limit, WorkingSet* ws, PlanStage* child) +LimitStage::LimitStage(OperationContext* opCtx, + long long limit, + WorkingSet* ws, + std::unique_ptr<PlanStage> child) : PlanStage(kStageType, opCtx), _ws(ws), _numToReturn(limit) { _specificStats.limit = _numToReturn; - _children.emplace_back(child); + _children.emplace_back(std::move(child)); } LimitStage::~LimitStage() {} diff --git a/src/mongo/db/exec/limit.h b/src/mongo/db/exec/limit.h index 61f2e3d1476..f807838b540 100644 --- a/src/mongo/db/exec/limit.h +++ b/src/mongo/db/exec/limit.h @@ -45,7 +45,10 @@ namespace mongo { */ class LimitStage final : public PlanStage { public: - LimitStage(OperationContext* opCtx, long long limit, WorkingSet* ws, PlanStage* child); + LimitStage(OperationContext* opCtx, + long long limit, + WorkingSet* ws, + std::unique_ptr<PlanStage> child); ~LimitStage(); bool isEOF() final; diff --git a/src/mongo/db/exec/merge_sort.cpp b/src/mongo/db/exec/merge_sort.cpp index 8ab56f2b7ad..cc7d40b073e 100644 --- a/src/mongo/db/exec/merge_sort.cpp +++ b/src/mongo/db/exec/merge_sort.cpp @@ -57,11 +57,11 @@ MergeSortStage::MergeSortStage(OperationContext* opCtx, _dedup(params.dedup), _merging(StageWithValueComparison(ws, params.pattern, params.collator)) {} -void MergeSortStage::addChild(PlanStage* child) { - _children.emplace_back(child); +void MergeSortStage::addChild(std::unique_ptr<PlanStage> child) { + _children.emplace_back(std::move(child)); // We have to call work(...) on every child before we can pick a min. - _noResultToMerge.push(child); + _noResultToMerge.push(_children.back().get()); } bool MergeSortStage::isEOF() { diff --git a/src/mongo/db/exec/merge_sort.h b/src/mongo/db/exec/merge_sort.h index 5a25a3243cd..714f6e0c68a 100644 --- a/src/mongo/db/exec/merge_sort.h +++ b/src/mongo/db/exec/merge_sort.h @@ -59,7 +59,7 @@ class MergeSortStage final : public PlanStage { public: MergeSortStage(OperationContext* opCtx, const MergeSortStageParams& params, WorkingSet* ws); - void addChild(PlanStage* child); + void addChild(std::unique_ptr<PlanStage> child); bool isEOF() final; StageState doWork(WorkingSetID* out) final; diff --git a/src/mongo/db/exec/multi_plan.cpp b/src/mongo/db/exec/multi_plan.cpp index 929d22f756d..45c409c7ab3 100644 --- a/src/mongo/db/exec/multi_plan.cpp +++ b/src/mongo/db/exec/multi_plan.cpp @@ -75,10 +75,10 @@ MultiPlanStage::MultiPlanStage(OperationContext* opCtx, _statusMemberId(WorkingSet::INVALID_ID) {} void MultiPlanStage::addPlan(std::unique_ptr<QuerySolution> solution, - PlanStage* root, + std::unique_ptr<PlanStage> root, WorkingSet* ws) { - _candidates.push_back(CandidatePlan(std::move(solution), root, ws)); - _children.emplace_back(root); + _children.emplace_back(std::move(root)); + _candidates.push_back(CandidatePlan(std::move(solution), _children.back().get(), ws)); } bool MultiPlanStage::isEOF() { diff --git a/src/mongo/db/exec/multi_plan.h b/src/mongo/db/exec/multi_plan.h index d11797b5582..f70b2800e63 100644 --- a/src/mongo/db/exec/multi_plan.h +++ b/src/mongo/db/exec/multi_plan.h @@ -95,9 +95,11 @@ public: const SpecificStats* getSpecificStats() const final; /** - * Takes ownership of PlanStage. Does not take ownership of WorkingSet. + * Adsd a new candidate plan to be considered for selection by the MultiPlanStage trial period. */ - void addPlan(std::unique_ptr<QuerySolution> solution, PlanStage* root, WorkingSet* sharedWs); + void addPlan(std::unique_ptr<QuerySolution> solution, + std::unique_ptr<PlanStage> root, + WorkingSet* sharedWs); /** * Runs all plans added by addPlan, ranks them, and picks a best. diff --git a/src/mongo/db/exec/or.cpp b/src/mongo/db/exec/or.cpp index 8269b599f43..3800536b62c 100644 --- a/src/mongo/db/exec/or.cpp +++ b/src/mongo/db/exec/or.cpp @@ -47,8 +47,8 @@ const char* OrStage::kStageType = "OR"; OrStage::OrStage(OperationContext* opCtx, WorkingSet* ws, bool dedup, const MatchExpression* filter) : PlanStage(kStageType, opCtx), _ws(ws), _filter(filter), _currentChild(0), _dedup(dedup) {} -void OrStage::addChild(PlanStage* child) { - _children.emplace_back(child); +void OrStage::addChild(std::unique_ptr<PlanStage> child) { + _children.emplace_back(std::move(child)); } void OrStage::addChildren(Children childrenToAdd) { diff --git a/src/mongo/db/exec/or.h b/src/mongo/db/exec/or.h index dff62d17d51..8d2c043ee46 100644 --- a/src/mongo/db/exec/or.h +++ b/src/mongo/db/exec/or.h @@ -46,7 +46,7 @@ class OrStage final : public PlanStage { public: OrStage(OperationContext* opCtx, WorkingSet* ws, bool dedup, const MatchExpression* filter); - void addChild(PlanStage* child); + void addChild(std::unique_ptr<PlanStage> child); void addChildren(Children childrenToAdd); diff --git a/src/mongo/db/exec/shard_filter.cpp b/src/mongo/db/exec/shard_filter.cpp index ed25ef0ad8e..3ca72e989a0 100644 --- a/src/mongo/db/exec/shard_filter.cpp +++ b/src/mongo/db/exec/shard_filter.cpp @@ -53,9 +53,9 @@ const char* ShardFilterStage::kStageType = "SHARDING_FILTER"; ShardFilterStage::ShardFilterStage(OperationContext* opCtx, ScopedCollectionMetadata metadata, WorkingSet* ws, - PlanStage* child) + std::unique_ptr<PlanStage> child) : PlanStage(kStageType, opCtx), _ws(ws), _shardFilterer(std::move(metadata)) { - _children.emplace_back(child); + _children.emplace_back(std::move(child)); } ShardFilterStage::~ShardFilterStage() {} diff --git a/src/mongo/db/exec/shard_filter.h b/src/mongo/db/exec/shard_filter.h index d8284ad4e6f..5fdb2a2a247 100644 --- a/src/mongo/db/exec/shard_filter.h +++ b/src/mongo/db/exec/shard_filter.h @@ -74,7 +74,7 @@ public: ShardFilterStage(OperationContext* opCtx, ScopedCollectionMetadata metadata, WorkingSet* ws, - PlanStage* child); + std::unique_ptr<PlanStage> child); ~ShardFilterStage(); bool isEOF() final; diff --git a/src/mongo/db/exec/skip.cpp b/src/mongo/db/exec/skip.cpp index a87a9dd745b..bc488c1b410 100644 --- a/src/mongo/db/exec/skip.cpp +++ b/src/mongo/db/exec/skip.cpp @@ -43,9 +43,12 @@ using std::vector; // static const char* SkipStage::kStageType = "SKIP"; -SkipStage::SkipStage(OperationContext* opCtx, long long toSkip, WorkingSet* ws, PlanStage* child) +SkipStage::SkipStage(OperationContext* opCtx, + long long toSkip, + WorkingSet* ws, + std::unique_ptr<PlanStage> child) : PlanStage(kStageType, opCtx), _ws(ws), _toSkip(toSkip) { - _children.emplace_back(child); + _children.emplace_back(std::move(child)); } SkipStage::~SkipStage() {} diff --git a/src/mongo/db/exec/skip.h b/src/mongo/db/exec/skip.h index c885f275f31..8751cb22471 100644 --- a/src/mongo/db/exec/skip.h +++ b/src/mongo/db/exec/skip.h @@ -44,7 +44,10 @@ namespace mongo { */ class SkipStage final : public PlanStage { public: - SkipStage(OperationContext* opCtx, long long toSkip, WorkingSet* ws, PlanStage* child); + SkipStage(OperationContext* opCtx, + long long toSkip, + WorkingSet* ws, + std::unique_ptr<PlanStage> child); ~SkipStage(); bool isEOF() final; diff --git a/src/mongo/db/exec/sort.cpp b/src/mongo/db/exec/sort.cpp index 13f1615ee11..1dfae92f7f6 100644 --- a/src/mongo/db/exec/sort.cpp +++ b/src/mongo/db/exec/sort.cpp @@ -69,7 +69,7 @@ bool SortStage::WorkingSetComparator::operator()(const SortableDataItem& lhs, SortStage::SortStage(OperationContext* opCtx, const SortStageParams& params, WorkingSet* ws, - PlanStage* child) + std::unique_ptr<PlanStage> child) : PlanStage(kStageType, opCtx), _ws(ws), _pattern(params.pattern), @@ -78,7 +78,7 @@ SortStage::SortStage(OperationContext* opCtx, _sorted(false), _resultIterator(_data.end()), _memUsage(0) { - _children.emplace_back(child); + _children.emplace_back(std::move(child)); BSONObj sortComparator = FindCommon::transformSortSpec(_pattern); _sortKeyComparator = std::make_unique<WorkingSetComparator>(sortComparator); diff --git a/src/mongo/db/exec/sort.h b/src/mongo/db/exec/sort.h index adadc9ad121..8fc5827fe13 100644 --- a/src/mongo/db/exec/sort.h +++ b/src/mongo/db/exec/sort.h @@ -72,7 +72,7 @@ public: SortStage(OperationContext* opCtx, const SortStageParams& params, WorkingSet* ws, - PlanStage* child); + std::unique_ptr<PlanStage> child); ~SortStage(); bool isEOF() final; diff --git a/src/mongo/db/exec/sort_key_generator.cpp b/src/mongo/db/exec/sort_key_generator.cpp index 98800394762..2439da67cb3 100644 --- a/src/mongo/db/exec/sort_key_generator.cpp +++ b/src/mongo/db/exec/sort_key_generator.cpp @@ -50,13 +50,13 @@ namespace mongo { const char* SortKeyGeneratorStage::kStageType = "SORT_KEY_GENERATOR"; SortKeyGeneratorStage::SortKeyGeneratorStage(const boost::intrusive_ptr<ExpressionContext>& pExpCtx, - PlanStage* child, + std::unique_ptr<PlanStage> child, WorkingSet* ws, const BSONObj& sortSpecObj) : PlanStage(kStageType, pExpCtx->opCtx), _ws(ws), _sortKeyGen({{sortSpecObj, pExpCtx}, pExpCtx->getCollator()}) { - _children.emplace_back(child); + _children.emplace_back(std::move(child)); } bool SortKeyGeneratorStage::isEOF() { diff --git a/src/mongo/db/exec/sort_key_generator.h b/src/mongo/db/exec/sort_key_generator.h index c7a9e2dfd4b..5732f2008f6 100644 --- a/src/mongo/db/exec/sort_key_generator.h +++ b/src/mongo/db/exec/sort_key_generator.h @@ -51,7 +51,7 @@ class WorkingSetMember; class SortKeyGeneratorStage final : public PlanStage { public: SortKeyGeneratorStage(const boost::intrusive_ptr<ExpressionContext>& pExpCtx, - PlanStage* child, + std::unique_ptr<PlanStage> child, WorkingSet* ws, const BSONObj& sortSpecObj); diff --git a/src/mongo/db/exec/sort_test.cpp b/src/mongo/db/exec/sort_test.cpp index 65971d6ac15..7ff0614d7eb 100644 --- a/src/mongo/db/exec/sort_test.cpp +++ b/src/mongo/db/exec/sort_test.cpp @@ -108,9 +108,9 @@ public: new ExpressionContext(getOpCtx(), collator)); auto sortKeyGen = std::make_unique<SortKeyGeneratorStage>( - pExpCtx, queuedDataStage.release(), &ws, params.pattern); + pExpCtx, std::move(queuedDataStage), &ws, params.pattern); - SortStage sort(getOpCtx(), params, &ws, sortKeyGen.release()); + SortStage sort(getOpCtx(), params, &ws, std::move(sortKeyGen)); WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state = PlanStage::NEED_TIME; @@ -167,10 +167,10 @@ TEST_F(SortStageTest, SortEmptyWorkingSet) { // QueuedDataStage will be owned by SortStage. auto queuedDataStage = std::make_unique<QueuedDataStage>(getOpCtx(), &ws); - auto sortKeyGen = - std::make_unique<SortKeyGeneratorStage>(pExpCtx, queuedDataStage.release(), &ws, BSONObj()); + auto sortKeyGen = std::make_unique<SortKeyGeneratorStage>( + pExpCtx, std::move(queuedDataStage), &ws, BSONObj()); SortStageParams params; - SortStage sort(getOpCtx(), params, &ws, sortKeyGen.release()); + SortStage sort(getOpCtx(), params, &ws, std::move(sortKeyGen)); // Check initial EOF state. ASSERT_FALSE(sort.isEOF()); diff --git a/src/mongo/db/exec/stagedebug_cmd.cpp b/src/mongo/db/exec/stagedebug_cmd.cpp index 3baee889ab1..50bbb7e624b 100644 --- a/src/mongo/db/exec/stagedebug_cmd.cpp +++ b/src/mongo/db/exec/stagedebug_cmd.cpp @@ -170,13 +170,13 @@ public: std::vector<std::unique_ptr<MatchExpression>> exprs; unique_ptr<WorkingSet> ws(new WorkingSet()); - PlanStage* userRoot = parseQuery(opCtx, collection, planObj, ws.get(), &exprs); + std::unique_ptr<PlanStage> userRoot{ + parseQuery(opCtx, collection, planObj, ws.get(), &exprs)}; uassert(16911, "Couldn't parse plan from " + cmdObj.toString(), nullptr != userRoot); // Add a fetch at the top for the user so we can get obj back for sure. - // TODO: Do we want to do this for the user? I think so. unique_ptr<PlanStage> rootFetch = - std::make_unique<FetchStage>(opCtx, ws.get(), userRoot, nullptr, collection); + std::make_unique<FetchStage>(opCtx, ws.get(), std::move(userRoot), nullptr, collection); auto statusWithPlanExecutor = PlanExecutor::make( opCtx, std::move(ws), std::move(rootFetch), collection, PlanExecutor::YIELD_AUTO); @@ -307,12 +307,12 @@ public: BSONElement e = it.next(); uassert(16922, "node of AND isn't an obj?: " + e.toString(), e.isABSONObj()); - PlanStage* subNode = parseQuery(opCtx, collection, e.Obj(), workingSet, exprs); + std::unique_ptr<PlanStage> subNode{ + parseQuery(opCtx, collection, e.Obj(), workingSet, exprs)}; uassert(16923, "Can't parse sub-node of AND: " + e.Obj().toString(), nullptr != subNode); - // takes ownership - andStage->addChild(subNode); + andStage->addChild(std::move(subNode)); ++nodesAdded; } @@ -331,12 +331,12 @@ public: BSONElement e = it.next(); uassert(16925, "node of AND isn't an obj?: " + e.toString(), e.isABSONObj()); - PlanStage* subNode = parseQuery(opCtx, collection, e.Obj(), workingSet, exprs); + std::unique_ptr<PlanStage> subNode{ + parseQuery(opCtx, collection, e.Obj(), workingSet, exprs)}; uassert(16926, "Can't parse sub-node of AND: " + e.Obj().toString(), nullptr != subNode); - // takes ownership - andStage->addChild(subNode); + andStage->addChild(std::move(subNode)); ++nodesAdded; } @@ -355,23 +355,23 @@ public: if (!e.isABSONObj()) { return nullptr; } - PlanStage* subNode = parseQuery(opCtx, collection, e.Obj(), workingSet, exprs); + std::unique_ptr<PlanStage> subNode{ + parseQuery(opCtx, collection, e.Obj(), workingSet, exprs)}; uassert( 16936, "Can't parse sub-node of OR: " + e.Obj().toString(), nullptr != subNode); - // takes ownership - orStage->addChild(subNode); + orStage->addChild(std::move(subNode)); } return orStage.release(); } else if ("fetch" == nodeName) { uassert( 16929, "Node argument must be provided to fetch", nodeArgs["node"].isABSONObj()); - PlanStage* subNode = - parseQuery(opCtx, collection, nodeArgs["node"].Obj(), workingSet, exprs); + std::unique_ptr<PlanStage> subNode{ + parseQuery(opCtx, collection, nodeArgs["node"].Obj(), workingSet, exprs)}; uassert(28731, "Can't parse sub-node of FETCH: " + nodeArgs["node"].Obj().toString(), nullptr != subNode); - return new FetchStage(opCtx, workingSet, subNode, matcher, collection); + return new FetchStage(opCtx, workingSet, std::move(subNode), matcher, collection); } else if ("limit" == nodeName) { uassert(16937, "Limit stage doesn't have a filter (put it on the child)", @@ -379,24 +379,26 @@ public: uassert( 16930, "Node argument must be provided to limit", nodeArgs["node"].isABSONObj()); uassert(16931, "Num argument must be provided to limit", nodeArgs["num"].isNumber()); - PlanStage* subNode = - parseQuery(opCtx, collection, nodeArgs["node"].Obj(), workingSet, exprs); + std::unique_ptr<PlanStage> subNode{ + parseQuery(opCtx, collection, nodeArgs["node"].Obj(), workingSet, exprs)}; uassert(28732, "Can't parse sub-node of LIMIT: " + nodeArgs["node"].Obj().toString(), nullptr != subNode); - return new LimitStage(opCtx, nodeArgs["num"].numberInt(), workingSet, subNode); + return new LimitStage( + opCtx, nodeArgs["num"].numberInt(), workingSet, std ::move(subNode)); } else if ("skip" == nodeName) { uassert(16938, "Skip stage doesn't have a filter (put it on the child)", nullptr == matcher); uassert(16932, "Node argument must be provided to skip", nodeArgs["node"].isABSONObj()); uassert(16933, "Num argument must be provided to skip", nodeArgs["num"].isNumber()); - PlanStage* subNode = - parseQuery(opCtx, collection, nodeArgs["node"].Obj(), workingSet, exprs); + std::unique_ptr<PlanStage> subNode{ + parseQuery(opCtx, collection, nodeArgs["node"].Obj(), workingSet, exprs)}; uassert(28733, "Can't parse sub-node of SKIP: " + nodeArgs["node"].Obj().toString(), nullptr != subNode); - return new SkipStage(opCtx, nodeArgs["num"].numberInt(), workingSet, subNode); + return new SkipStage( + opCtx, nodeArgs["num"].numberInt(), workingSet, std::move(subNode)); } else if ("cscan" == nodeName) { CollectionScanParams params; @@ -429,12 +431,12 @@ public: BSONElement e = it.next(); uassert(16973, "node of mergeSort isn't an obj?: " + e.toString(), e.isABSONObj()); - PlanStage* subNode = parseQuery(opCtx, collection, e.Obj(), workingSet, exprs); + std::unique_ptr<PlanStage> subNode{ + parseQuery(opCtx, collection, e.Obj(), workingSet, exprs)}; uassert(16974, "Can't parse sub-node of mergeSort: " + e.Obj().toString(), nullptr != subNode); - // takes ownership - mergeStage->addChild(subNode); + mergeStage->addChild(std::move(subNode)); } return mergeStage.release(); } else if ("text" == nodeName) { diff --git a/src/mongo/db/exec/subplan.cpp b/src/mongo/db/exec/subplan.cpp index 97fb663f7c6..789fca69ce9 100644 --- a/src/mongo/db/exec/subplan.cpp +++ b/src/mongo/db/exec/subplan.cpp @@ -271,16 +271,14 @@ Status SubplanStage::choosePlanForSubqueries(PlanYieldPolicy* yieldPolicy) { // Dump all the solutions into the MPS. for (size_t ix = 0; ix < branchResult->solutions.size(); ++ix) { - PlanStage* nextPlanRoot; - invariant(StageBuilder::build(getOpCtx(), - collection(), - *branchResult->canonicalQuery, - *branchResult->solutions[ix], - _ws, - &nextPlanRoot)); - - // Takes ownership of 'nextPlanRoot'. - multiPlanStage->addPlan(std::move(branchResult->solutions[ix]), nextPlanRoot, _ws); + auto nextPlanRoot = StageBuilder::build(getOpCtx(), + collection(), + *branchResult->canonicalQuery, + *branchResult->solutions[ix], + _ws); + + multiPlanStage->addPlan( + std::move(branchResult->solutions[ix]), std::move(nextPlanRoot), _ws); } Status planSelectStat = multiPlanStage->pickBestPlan(yieldPolicy); @@ -340,7 +338,6 @@ Status SubplanStage::choosePlanForSubqueries(PlanYieldPolicy* yieldPolicy) { LOG(5) << "Subplanner: fully tagged tree is " << redact(solnRoot->toString()); - // Takes ownership of 'solnRoot' _compositeSolution = QueryPlannerAnalysis::analyzeDataAccess(*_query, _plannerParams, std::move(solnRoot)); @@ -355,11 +352,10 @@ Status SubplanStage::choosePlanForSubqueries(PlanYieldPolicy* yieldPolicy) { // Use the index tags from planning each branch to construct the composite solution, // and set that solution as our child stage. _ws->clear(); - PlanStage* root; - invariant(StageBuilder::build( - getOpCtx(), collection(), *_query, *_compositeSolution.get(), _ws, &root)); + auto root = + StageBuilder::build(getOpCtx(), collection(), *_query, *_compositeSolution.get(), _ws); invariant(_children.empty()); - _children.emplace_back(root); + _children.emplace_back(std::move(root)); return Status::OK(); } @@ -387,11 +383,10 @@ Status SubplanStage::choosePlanWholeQuery(PlanYieldPolicy* yieldPolicy) { } if (1 == solutions.size()) { - PlanStage* root; // Only one possible plan. Run it. Build the stages from the solution. - verify(StageBuilder::build(getOpCtx(), collection(), *_query, *solutions[0], _ws, &root)); + auto root = StageBuilder::build(getOpCtx(), collection(), *_query, *solutions[0], _ws); invariant(_children.empty()); - _children.emplace_back(root); + _children.emplace_back(std::move(root)); // This SubplanStage takes ownership of the query solution. _compositeSolution = std::move(solutions.back()); @@ -410,13 +405,10 @@ Status SubplanStage::choosePlanWholeQuery(PlanYieldPolicy* yieldPolicy) { solutions[ix]->cacheData->indexFilterApplied = _plannerParams.indexFiltersApplied; } - // version of StageBuild::build when WorkingSet is shared - PlanStage* nextPlanRoot; - verify(StageBuilder::build( - getOpCtx(), collection(), *_query, *solutions[ix], _ws, &nextPlanRoot)); + auto nextPlanRoot = + StageBuilder::build(getOpCtx(), collection(), *_query, *solutions[ix], _ws); - // Takes ownership of 'nextPlanRoot'. - multiPlanStage->addPlan(std::move(solutions[ix]), nextPlanRoot, _ws); + multiPlanStage->addPlan(std::move(solutions[ix]), std::move(nextPlanRoot), _ws); } // Delegate the the MultiPlanStage's plan selection facility. diff --git a/src/mongo/db/exec/text.cpp b/src/mongo/db/exec/text.cpp index 93054164ebb..9efb4915c61 100644 --- a/src/mongo/db/exec/text.cpp +++ b/src/mongo/db/exec/text.cpp @@ -140,7 +140,7 @@ unique_ptr<PlanStage> TextStage::buildTextTree(OperationContext* opCtx, // WorkingSetMember inputs have fetched data. const MatchExpression* emptyFilter = nullptr; auto fetchStage = std::make_unique<FetchStage>( - opCtx, ws, textSearcher.release(), emptyFilter, collection); + opCtx, ws, std::move(textSearcher), emptyFilter, collection); textMatchStage = std::make_unique<TextMatchStage>( opCtx, std::move(fetchStage), _params.query, _params.spec, ws); diff --git a/src/mongo/db/exec/trial_stage.cpp b/src/mongo/db/exec/trial_stage.cpp index c54c6e91dc9..1e5b36fb7c4 100644 --- a/src/mongo/db/exec/trial_stage.cpp +++ b/src/mongo/db/exec/trial_stage.cpp @@ -174,8 +174,8 @@ void TrialStage::_assessTrialAndBuildFinalPlan() { // final plan which UNIONs across the QueuedDataStage and the trial plan. std::unique_ptr<PlanStage> unionPlan = std::make_unique<OrStage>(getOpCtx(), _ws, false, nullptr); - static_cast<OrStage*>(unionPlan.get())->addChild(_queuedData.release()); - static_cast<OrStage*>(unionPlan.get())->addChild(_children.front().release()); + static_cast<OrStage*>(unionPlan.get())->addChild(std::move(_queuedData)); + static_cast<OrStage*>(unionPlan.get())->addChild(std::move(_children.front())); _replaceCurrentPlan(unionPlan); } |