summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Storch <david.storch@mongodb.com>2019-09-04 15:32:56 +0000
committerevergreen <evergreen@mongodb.com>2019-09-04 15:32:56 +0000
commitb13188206e74dbeb66c4b663d83ed1d1f97c286b (patch)
tree01bcfe63112a9a4981a622a5370493d7d089a7a5
parentd9d50312ccdfcfb628d89f34c0dcda05c8f921bc (diff)
downloadmongo-b13188206e74dbeb66c4b663d83ed1d1f97c286b.tar.gz
SERVER-42852 Make PlanStage consistently hold children by unique_ptr.
-rw-r--r--src/mongo/db/exec/and_hash.cpp4
-rw-r--r--src/mongo/db/exec/and_hash.h2
-rw-r--r--src/mongo/db/exec/and_sorted.cpp4
-rw-r--r--src/mongo/db/exec/and_sorted.h2
-rw-r--r--src/mongo/db/exec/cached_plan.cpp19
-rw-r--r--src/mongo/db/exec/cached_plan.h2
-rw-r--r--src/mongo/db/exec/ensure_sorted.cpp4
-rw-r--r--src/mongo/db/exec/ensure_sorted.h5
-rw-r--r--src/mongo/db/exec/fetch.cpp4
-rw-r--r--src/mongo/db/exec/fetch.h2
-rw-r--r--src/mongo/db/exec/geo_near.cpp122
-rw-r--r--src/mongo/db/exec/geo_near.h59
-rw-r--r--src/mongo/db/exec/limit.cpp7
-rw-r--r--src/mongo/db/exec/limit.h5
-rw-r--r--src/mongo/db/exec/merge_sort.cpp6
-rw-r--r--src/mongo/db/exec/merge_sort.h2
-rw-r--r--src/mongo/db/exec/multi_plan.cpp6
-rw-r--r--src/mongo/db/exec/multi_plan.h6
-rw-r--r--src/mongo/db/exec/or.cpp4
-rw-r--r--src/mongo/db/exec/or.h2
-rw-r--r--src/mongo/db/exec/shard_filter.cpp4
-rw-r--r--src/mongo/db/exec/shard_filter.h2
-rw-r--r--src/mongo/db/exec/skip.cpp7
-rw-r--r--src/mongo/db/exec/skip.h5
-rw-r--r--src/mongo/db/exec/sort.cpp4
-rw-r--r--src/mongo/db/exec/sort.h2
-rw-r--r--src/mongo/db/exec/sort_key_generator.cpp4
-rw-r--r--src/mongo/db/exec/sort_key_generator.h2
-rw-r--r--src/mongo/db/exec/sort_test.cpp10
-rw-r--r--src/mongo/db/exec/stagedebug_cmd.cpp50
-rw-r--r--src/mongo/db/exec/subplan.cpp40
-rw-r--r--src/mongo/db/exec/text.cpp2
-rw-r--r--src/mongo/db/exec/trial_stage.cpp4
-rw-r--r--src/mongo/db/pipeline/pipeline_d.cpp4
-rw-r--r--src/mongo/db/query/get_executor.cpp63
-rw-r--r--src/mongo/db/query/internal_plans.cpp2
-rw-r--r--src/mongo/db/query/stage_builder.cpp216
-rw-r--r--src/mongo/db/query/stage_builder.h22
-rw-r--r--src/mongo/dbtests/plan_ranking.cpp6
-rw-r--r--src/mongo/dbtests/query_plan_executor.cpp5
-rw-r--r--src/mongo/dbtests/query_stage_and.cpp110
-rw-r--r--src/mongo/dbtests/query_stage_cached_plan.cpp10
-rw-r--r--src/mongo/dbtests/query_stage_ensure_sorted.cpp10
-rw-r--r--src/mongo/dbtests/query_stage_fetch.cpp8
-rw-r--r--src/mongo/dbtests/query_stage_limit_skip.cpp4
-rw-r--r--src/mongo/dbtests/query_stage_merge_sort.cpp81
-rw-r--r--src/mongo/dbtests/query_stage_multiplan.cpp32
-rw-r--r--src/mongo/dbtests/query_stage_sort.cpp16
-rw-r--r--src/mongo/dbtests/query_stage_sort_key_generator.cpp4
49 files changed, 469 insertions, 527 deletions
diff --git a/src/mongo/db/exec/and_hash.cpp b/src/mongo/db/exec/and_hash.cpp
index 1b2e9983286..6c913377c4f 100644
--- a/src/mongo/db/exec/and_hash.cpp
+++ b/src/mongo/db/exec/and_hash.cpp
@@ -71,8 +71,8 @@ AndHashStage::AndHashStage(OperationContext* opCtx, WorkingSet* ws, size_t maxMe
_memUsage(0),
_maxMemUsage(maxMemUsage) {}
-void AndHashStage::addChild(PlanStage* child) {
- _children.emplace_back(child);
+void AndHashStage::addChild(std::unique_ptr<PlanStage> child) {
+ _children.emplace_back(std::move(child));
}
size_t AndHashStage::getMemUsage() const {
diff --git a/src/mongo/db/exec/and_hash.h b/src/mongo/db/exec/and_hash.h
index 2b8c326c3ce..3659504486d 100644
--- a/src/mongo/db/exec/and_hash.h
+++ b/src/mongo/db/exec/and_hash.h
@@ -55,7 +55,7 @@ public:
*/
AndHashStage(OperationContext* opCtx, WorkingSet* ws, size_t maxMemUsage);
- void addChild(PlanStage* child);
+ void addChild(std::unique_ptr<PlanStage> child);
/**
* Returns memory usage.
diff --git a/src/mongo/db/exec/and_sorted.cpp b/src/mongo/db/exec/and_sorted.cpp
index 6c60553a76e..6cda1ad2bf1 100644
--- a/src/mongo/db/exec/and_sorted.cpp
+++ b/src/mongo/db/exec/and_sorted.cpp
@@ -53,8 +53,8 @@ AndSortedStage::AndSortedStage(OperationContext* opCtx, WorkingSet* ws)
_isEOF(false) {}
-void AndSortedStage::addChild(PlanStage* child) {
- _children.emplace_back(child);
+void AndSortedStage::addChild(std::unique_ptr<PlanStage> child) {
+ _children.emplace_back(std::move(child));
}
bool AndSortedStage::isEOF() {
diff --git a/src/mongo/db/exec/and_sorted.h b/src/mongo/db/exec/and_sorted.h
index a01ebc70d3a..3d72d15c1f9 100644
--- a/src/mongo/db/exec/and_sorted.h
+++ b/src/mongo/db/exec/and_sorted.h
@@ -49,7 +49,7 @@ class AndSortedStage final : public PlanStage {
public:
AndSortedStage(OperationContext* opCtx, WorkingSet* ws);
- void addChild(PlanStage* child);
+ void addChild(std::unique_ptr<PlanStage> child);
StageState doWork(WorkingSetID* out) final;
bool isEOF() final;
diff --git a/src/mongo/db/exec/cached_plan.cpp b/src/mongo/db/exec/cached_plan.cpp
index 0c46d328c8a..97e2c9dd5a9 100644
--- a/src/mongo/db/exec/cached_plan.cpp
+++ b/src/mongo/db/exec/cached_plan.cpp
@@ -63,13 +63,13 @@ CachedPlanStage::CachedPlanStage(OperationContext* opCtx,
CanonicalQuery* cq,
const QueryPlannerParams& params,
size_t decisionWorks,
- PlanStage* root)
+ std::unique_ptr<PlanStage> root)
: RequiresAllIndicesStage(kStageType, opCtx, collection),
_ws(ws),
_canonicalQuery(cq),
_plannerParams(params),
_decisionWorks(decisionWorks) {
- _children.emplace_back(root);
+ _children.emplace_back(std::move(root));
}
Status CachedPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
@@ -215,11 +215,10 @@ Status CachedPlanStage::replan(PlanYieldPolicy* yieldPolicy, bool shouldCache) {
}
if (1 == solutions.size()) {
- PlanStage* newRoot;
// Only one possible plan. Build the stages from the solution.
- verify(StageBuilder::build(
- getOpCtx(), collection(), *_canonicalQuery, *solutions[0], _ws, &newRoot));
- _children.emplace_back(newRoot);
+ auto newRoot =
+ StageBuilder::build(getOpCtx(), collection(), *_canonicalQuery, *solutions[0], _ws);
+ _children.emplace_back(std::move(newRoot));
_replannedQs = std::move(solutions.back());
solutions.pop_back();
@@ -244,12 +243,10 @@ Status CachedPlanStage::replan(PlanYieldPolicy* yieldPolicy, bool shouldCache) {
solutions[ix]->cacheData->indexFilterApplied = _plannerParams.indexFiltersApplied;
}
- PlanStage* nextPlanRoot;
- verify(StageBuilder::build(
- getOpCtx(), collection(), *_canonicalQuery, *solutions[ix], _ws, &nextPlanRoot));
+ auto nextPlanRoot =
+ StageBuilder::build(getOpCtx(), collection(), *_canonicalQuery, *solutions[ix], _ws);
- // Takes ownership of 'nextPlanRoot'.
- multiPlanStage->addPlan(std::move(solutions[ix]), nextPlanRoot, _ws);
+ multiPlanStage->addPlan(std::move(solutions[ix]), std::move(nextPlanRoot), _ws);
}
// Delegate to the MultiPlanStage's plan selection facility.
diff --git a/src/mongo/db/exec/cached_plan.h b/src/mongo/db/exec/cached_plan.h
index 45ca2ed166c..bc7b1bc50bb 100644
--- a/src/mongo/db/exec/cached_plan.h
+++ b/src/mongo/db/exec/cached_plan.h
@@ -62,7 +62,7 @@ public:
CanonicalQuery* cq,
const QueryPlannerParams& params,
size_t decisionWorks,
- PlanStage* root);
+ std::unique_ptr<PlanStage> root);
bool isEOF() final;
diff --git a/src/mongo/db/exec/ensure_sorted.cpp b/src/mongo/db/exec/ensure_sorted.cpp
index 611835aaba6..e4c0acc052b 100644
--- a/src/mongo/db/exec/ensure_sorted.cpp
+++ b/src/mongo/db/exec/ensure_sorted.cpp
@@ -45,9 +45,9 @@ const char* EnsureSortedStage::kStageType = "ENSURE_SORTED";
EnsureSortedStage::EnsureSortedStage(OperationContext* opCtx,
BSONObj pattern,
WorkingSet* ws,
- PlanStage* child)
+ std::unique_ptr<PlanStage> child)
: PlanStage(kStageType, opCtx), _ws(ws) {
- _children.emplace_back(child);
+ _children.emplace_back(std::move(child));
_pattern = FindCommon::transformSortSpec(pattern);
}
diff --git a/src/mongo/db/exec/ensure_sorted.h b/src/mongo/db/exec/ensure_sorted.h
index 0f7e09dfa6d..3e3915b6a78 100644
--- a/src/mongo/db/exec/ensure_sorted.h
+++ b/src/mongo/db/exec/ensure_sorted.h
@@ -42,7 +42,10 @@ namespace mongo {
*/
class EnsureSortedStage final : public PlanStage {
public:
- EnsureSortedStage(OperationContext* opCtx, BSONObj pattern, WorkingSet* ws, PlanStage* child);
+ EnsureSortedStage(OperationContext* opCtx,
+ BSONObj pattern,
+ WorkingSet* ws,
+ std::unique_ptr<PlanStage> child);
bool isEOF() final;
StageState doWork(WorkingSetID* out) final;
diff --git a/src/mongo/db/exec/fetch.cpp b/src/mongo/db/exec/fetch.cpp
index 2e30f53e302..bb09866f983 100644
--- a/src/mongo/db/exec/fetch.cpp
+++ b/src/mongo/db/exec/fetch.cpp
@@ -51,14 +51,14 @@ const char* FetchStage::kStageType = "FETCH";
FetchStage::FetchStage(OperationContext* opCtx,
WorkingSet* ws,
- PlanStage* child,
+ std::unique_ptr<PlanStage> child,
const MatchExpression* filter,
const Collection* collection)
: RequiresCollectionStage(kStageType, opCtx, collection),
_ws(ws),
_filter(filter),
_idRetrying(WorkingSet::INVALID_ID) {
- _children.emplace_back(child);
+ _children.emplace_back(std::move(child));
}
FetchStage::~FetchStage() {}
diff --git a/src/mongo/db/exec/fetch.h b/src/mongo/db/exec/fetch.h
index cdfaad31083..074bd63b4d2 100644
--- a/src/mongo/db/exec/fetch.h
+++ b/src/mongo/db/exec/fetch.h
@@ -52,7 +52,7 @@ class FetchStage : public RequiresCollectionStage {
public:
FetchStage(OperationContext* opCtx,
WorkingSet* ws,
- PlanStage* child,
+ std::unique_ptr<PlanStage> child,
const MatchExpression* filter,
const Collection* collection);
diff --git a/src/mongo/db/exec/geo_near.cpp b/src/mongo/db/exec/geo_near.cpp
index d9e7749e5fc..ccf5575044f 100644
--- a/src/mongo/db/exec/geo_near.cpp
+++ b/src/mongo/db/exec/geo_near.cpp
@@ -40,7 +40,6 @@
#include "mongo/base/owned_pointer_vector.h"
#include "mongo/db/bson/dotted_path_support.h"
#include "mongo/db/exec/fetch.h"
-#include "mongo/db/exec/index_scan.h"
#include "mongo/db/geo/geoconstants.h"
#include "mongo/db/geo/geoparser.h"
#include "mongo/db/geo/hash.h"
@@ -265,46 +264,24 @@ static R2Annulus twoDDistanceBounds(const GeoNearParams& nearParams,
return fullBounds;
}
-class GeoNear2DStage::DensityEstimator {
-public:
- DensityEstimator(PlanStage::Children* children,
- BSONObj infoObj,
- const GeoNearParams* nearParams,
- const R2Annulus& fullBounds)
- : _children(children), _nearParams(nearParams), _fullBounds(fullBounds), _currentLevel(0) {
- GeoHashConverter::Parameters hashParams;
- Status status = GeoHashConverter::parseParameters(std::move(infoObj), &hashParams);
- // The index status should always be valid.
- invariant(status.isOK());
-
- _converter.reset(new GeoHashConverter(hashParams));
- _centroidCell = _converter->hash(_nearParams->nearQuery->centroid->oldPoint);
-
- // Since appendVertexNeighbors(level, output) requires level < hash.getBits(),
- // we have to start to find documents at most GeoHash::kMaxBits - 1. Thus the finest
- // search area is 16 * finest cell area at GeoHash::kMaxBits.
- _currentLevel = std::max(0, hashParams.bits - 1);
- }
+GeoNear2DStage::DensityEstimator::DensityEstimator(PlanStage::Children* children,
+ BSONObj infoObj,
+ const GeoNearParams* nearParams,
+ const R2Annulus& fullBounds)
+ : _children(children), _nearParams(nearParams), _fullBounds(fullBounds), _currentLevel(0) {
+ GeoHashConverter::Parameters hashParams;
+ Status status = GeoHashConverter::parseParameters(std::move(infoObj), &hashParams);
+ // The index status should always be valid.
+ invariant(status.isOK());
- PlanStage::StageState work(OperationContext* opCtx,
- WorkingSet* workingSet,
- const IndexDescriptor* twoDIndex,
- WorkingSetID* out,
- double* estimatedDistance);
+ _converter.reset(new GeoHashConverter(hashParams));
+ _centroidCell = _converter->hash(_nearParams->nearQuery->centroid->oldPoint);
-private:
- void buildIndexScan(OperationContext* opCtx,
- WorkingSet* workingSet,
- const IndexDescriptor* twoDIndex);
-
- PlanStage::Children* _children; // Points to PlanStage::_children in the NearStage.
- const GeoNearParams* _nearParams; // Not owned here.
- const R2Annulus& _fullBounds;
- IndexScan* _indexScan = nullptr; // Owned in PlanStage::_children.
- unique_ptr<GeoHashConverter> _converter;
- GeoHash _centroidCell;
- unsigned _currentLevel;
-};
+ // Since appendVertexNeighbors(level, output) requires level < hash.getBits(),
+ // we have to start to find documents at most GeoHash::kMaxBits - 1. Thus the finest
+ // search area is 16 * finest cell area at GeoHash::kMaxBits.
+ _currentLevel = std::max(0, hashParams.bits - 1);
+}
// Initialize the internal states
void GeoNear2DStage::DensityEstimator::buildIndexScan(OperationContext* opCtx,
@@ -546,10 +523,10 @@ class FetchStageWithMatch final : public FetchStage {
public:
FetchStageWithMatch(OperationContext* opCtx,
WorkingSet* ws,
- PlanStage* child,
+ std::unique_ptr<PlanStage> child,
MatchExpression* filter,
const Collection* collection)
- : FetchStage(opCtx, ws, child, filter, collection), _matcher(filter) {}
+ : FetchStage(opCtx, ws, std::move(child), filter, collection), _matcher(filter) {}
private:
// Owns matcher
@@ -722,7 +699,7 @@ GeoNear2DStage::nextInterval(OperationContext* opCtx,
.transitional_ignore();
// 2D indexes support covered search over additional fields they contain
- IndexScan* scan = new IndexScan(opCtx, scanParams, workingSet, _nearParams.filter);
+ auto scan = std::make_unique<IndexScan>(opCtx, scanParams, workingSet, _nearParams.filter);
MatchExpression* docMatcher = nullptr;
@@ -733,8 +710,8 @@ GeoNear2DStage::nextInterval(OperationContext* opCtx,
}
// FetchStage owns index scan
- _children.emplace_back(
- new FetchStageWithMatch(opCtx, workingSet, scan, docMatcher, collection));
+ _children.emplace_back(std::make_unique<FetchStageWithMatch>(
+ opCtx, workingSet, std::move(scan), docMatcher, collection));
return StatusWith<CoveredInterval*>(new CoveredInterval(
_children.back().get(), nextBounds.getInner(), nextBounds.getOuter(), isLastInterval));
@@ -831,44 +808,20 @@ S2Region* buildS2Region(const R2Annulus& sphereBounds) {
}
} // namespace
-// Estimate the density of data by search the nearest cells level by level around center.
-class GeoNear2DSphereStage::DensityEstimator {
-public:
- DensityEstimator(PlanStage::Children* children,
- const GeoNearParams* nearParams,
- const S2IndexingParams& indexParams,
- const R2Annulus& fullBounds)
- : _children(children),
- _nearParams(nearParams),
- _indexParams(indexParams),
- _fullBounds(fullBounds),
- _currentLevel(0) {
- // cellId.AppendVertexNeighbors(level, output) requires level < finest,
- // so we use the minimum of max_level - 1 and the user specified finest
- int level = std::min(S2::kMaxCellLevel - 1, gInternalQueryS2GeoFinestLevel.load());
- _currentLevel = std::max(0, level);
- }
-
- // Search for a document in neighbors at current level.
- // Return IS_EOF is such document exists and set the estimated distance to the nearest doc.
- PlanStage::StageState work(OperationContext* opCtx,
- WorkingSet* workingSet,
- const IndexDescriptor* s2Index,
- WorkingSetID* out,
- double* estimatedDistance);
-
-private:
- void buildIndexScan(OperationContext* opCtx,
- WorkingSet* workingSet,
- const IndexDescriptor* s2Index);
-
- PlanStage::Children* _children; // Points to PlanStage::_children in the NearStage.
- const GeoNearParams* _nearParams; // Not owned here.
- const S2IndexingParams _indexParams;
- const R2Annulus& _fullBounds;
- int _currentLevel;
- IndexScan* _indexScan = nullptr; // Owned in PlanStage::_children.
-};
+GeoNear2DSphereStage::DensityEstimator::DensityEstimator(PlanStage::Children* children,
+ const GeoNearParams* nearParams,
+ const S2IndexingParams& indexParams,
+ const R2Annulus& fullBounds)
+ : _children(children),
+ _nearParams(nearParams),
+ _indexParams(indexParams),
+ _fullBounds(fullBounds),
+ _currentLevel(0) {
+ // cellId.AppendVertexNeighbors(level, output) requires level < finest,
+ // so we use the minimum of max_level - 1 and the user specified finest
+ int level = std::min(S2::kMaxCellLevel - 1, gInternalQueryS2GeoFinestLevel.load());
+ _currentLevel = std::max(0, level);
+}
// Setup the index scan stage for neighbors at this level.
void GeoNear2DSphereStage::DensityEstimator::buildIndexScan(OperationContext* opCtx,
@@ -1077,10 +1030,11 @@ GeoNear2DSphereStage::nextInterval(OperationContext* opCtx,
OrderedIntervalList* coveredIntervals = &scanParams.bounds.fields[s2FieldPosition];
ExpressionMapping::S2CellIdsToIntervalsWithParents(cover, _indexParams, coveredIntervals);
- IndexScan* scan = new IndexScan(opCtx, scanParams, workingSet, nullptr);
+ auto scan = std::make_unique<IndexScan>(opCtx, scanParams, workingSet, nullptr);
// FetchStage owns index scan
- _children.emplace_back(new FetchStage(opCtx, workingSet, scan, _nearParams.filter, collection));
+ _children.emplace_back(std::make_unique<FetchStage>(
+ opCtx, workingSet, std::move(scan), _nearParams.filter, collection));
return StatusWith<CoveredInterval*>(new CoveredInterval(
_children.back().get(), nextBounds.getInner(), nextBounds.getOuter(), isLastInterval));
diff --git a/src/mongo/db/exec/geo_near.h b/src/mongo/db/exec/geo_near.h
index 33295de7b39..ce23ccd6e38 100644
--- a/src/mongo/db/exec/geo_near.h
+++ b/src/mongo/db/exec/geo_near.h
@@ -29,6 +29,7 @@
#pragma once
+#include "mongo/db/exec/index_scan.h"
#include "mongo/db/exec/near.h"
#include "mongo/db/exec/plan_stats.h"
#include "mongo/db/exec/working_set.h"
@@ -84,6 +85,33 @@ protected:
WorkingSetID* out) final;
private:
+ class DensityEstimator {
+ public:
+ DensityEstimator(PlanStage::Children* children,
+ BSONObj infoObj,
+ const GeoNearParams* nearParams,
+ const R2Annulus& fullBounds);
+
+ PlanStage::StageState work(OperationContext* opCtx,
+ WorkingSet* workingSet,
+ const IndexDescriptor* twoDIndex,
+ WorkingSetID* out,
+ double* estimatedDistance);
+
+ private:
+ void buildIndexScan(OperationContext* opCtx,
+ WorkingSet* workingSet,
+ const IndexDescriptor* twoDIndex);
+
+ PlanStage::Children* _children; // Points to PlanStage::_children in the NearStage.
+ const GeoNearParams* _nearParams; // Not owned here.
+ const R2Annulus& _fullBounds;
+ IndexScan* _indexScan = nullptr; // Owned in PlanStage::_children.
+ std::unique_ptr<GeoHashConverter> _converter;
+ GeoHash _centroidCell;
+ unsigned _currentLevel;
+ };
+
const GeoNearParams _nearParams;
// The total search annulus
@@ -98,7 +126,6 @@ private:
// Keeps track of the region that has already been scanned
R2CellUnion _scannedCells;
- class DensityEstimator;
std::unique_ptr<DensityEstimator> _densityEstimator;
};
@@ -126,6 +153,35 @@ protected:
WorkingSetID* out) final;
private:
+ // Estimate the density of data by search the nearest cells level by level around center.
+ class DensityEstimator {
+ public:
+ DensityEstimator(PlanStage::Children* children,
+ const GeoNearParams* nearParams,
+ const S2IndexingParams& indexParams,
+ const R2Annulus& fullBounds);
+
+ // Search for a document in neighbors at current level.
+ // Return IS_EOF is such document exists and set the estimated distance to the nearest doc.
+ PlanStage::StageState work(OperationContext* opCtx,
+ WorkingSet* workingSet,
+ const IndexDescriptor* s2Index,
+ WorkingSetID* out,
+ double* estimatedDistance);
+
+ private:
+ void buildIndexScan(OperationContext* opCtx,
+ WorkingSet* workingSet,
+ const IndexDescriptor* s2Index);
+
+ PlanStage::Children* _children; // Points to PlanStage::_children in the NearStage.
+ const GeoNearParams* _nearParams; // Not owned here.
+ const S2IndexingParams _indexParams;
+ const R2Annulus& _fullBounds;
+ int _currentLevel;
+ IndexScan* _indexScan = nullptr; // Owned in PlanStage::_children.
+ };
+
const GeoNearParams _nearParams;
S2IndexingParams _indexParams;
@@ -142,7 +198,6 @@ private:
// Keeps track of the region that has already been scanned
S2CellUnion _scannedCells;
- class DensityEstimator;
std::unique_ptr<DensityEstimator> _densityEstimator;
};
diff --git a/src/mongo/db/exec/limit.cpp b/src/mongo/db/exec/limit.cpp
index 605babeeb31..e800d614039 100644
--- a/src/mongo/db/exec/limit.cpp
+++ b/src/mongo/db/exec/limit.cpp
@@ -43,10 +43,13 @@ using std::vector;
// static
const char* LimitStage::kStageType = "LIMIT";
-LimitStage::LimitStage(OperationContext* opCtx, long long limit, WorkingSet* ws, PlanStage* child)
+LimitStage::LimitStage(OperationContext* opCtx,
+ long long limit,
+ WorkingSet* ws,
+ std::unique_ptr<PlanStage> child)
: PlanStage(kStageType, opCtx), _ws(ws), _numToReturn(limit) {
_specificStats.limit = _numToReturn;
- _children.emplace_back(child);
+ _children.emplace_back(std::move(child));
}
LimitStage::~LimitStage() {}
diff --git a/src/mongo/db/exec/limit.h b/src/mongo/db/exec/limit.h
index 61f2e3d1476..f807838b540 100644
--- a/src/mongo/db/exec/limit.h
+++ b/src/mongo/db/exec/limit.h
@@ -45,7 +45,10 @@ namespace mongo {
*/
class LimitStage final : public PlanStage {
public:
- LimitStage(OperationContext* opCtx, long long limit, WorkingSet* ws, PlanStage* child);
+ LimitStage(OperationContext* opCtx,
+ long long limit,
+ WorkingSet* ws,
+ std::unique_ptr<PlanStage> child);
~LimitStage();
bool isEOF() final;
diff --git a/src/mongo/db/exec/merge_sort.cpp b/src/mongo/db/exec/merge_sort.cpp
index 8ab56f2b7ad..cc7d40b073e 100644
--- a/src/mongo/db/exec/merge_sort.cpp
+++ b/src/mongo/db/exec/merge_sort.cpp
@@ -57,11 +57,11 @@ MergeSortStage::MergeSortStage(OperationContext* opCtx,
_dedup(params.dedup),
_merging(StageWithValueComparison(ws, params.pattern, params.collator)) {}
-void MergeSortStage::addChild(PlanStage* child) {
- _children.emplace_back(child);
+void MergeSortStage::addChild(std::unique_ptr<PlanStage> child) {
+ _children.emplace_back(std::move(child));
// We have to call work(...) on every child before we can pick a min.
- _noResultToMerge.push(child);
+ _noResultToMerge.push(_children.back().get());
}
bool MergeSortStage::isEOF() {
diff --git a/src/mongo/db/exec/merge_sort.h b/src/mongo/db/exec/merge_sort.h
index 5a25a3243cd..714f6e0c68a 100644
--- a/src/mongo/db/exec/merge_sort.h
+++ b/src/mongo/db/exec/merge_sort.h
@@ -59,7 +59,7 @@ class MergeSortStage final : public PlanStage {
public:
MergeSortStage(OperationContext* opCtx, const MergeSortStageParams& params, WorkingSet* ws);
- void addChild(PlanStage* child);
+ void addChild(std::unique_ptr<PlanStage> child);
bool isEOF() final;
StageState doWork(WorkingSetID* out) final;
diff --git a/src/mongo/db/exec/multi_plan.cpp b/src/mongo/db/exec/multi_plan.cpp
index 929d22f756d..45c409c7ab3 100644
--- a/src/mongo/db/exec/multi_plan.cpp
+++ b/src/mongo/db/exec/multi_plan.cpp
@@ -75,10 +75,10 @@ MultiPlanStage::MultiPlanStage(OperationContext* opCtx,
_statusMemberId(WorkingSet::INVALID_ID) {}
void MultiPlanStage::addPlan(std::unique_ptr<QuerySolution> solution,
- PlanStage* root,
+ std::unique_ptr<PlanStage> root,
WorkingSet* ws) {
- _candidates.push_back(CandidatePlan(std::move(solution), root, ws));
- _children.emplace_back(root);
+ _children.emplace_back(std::move(root));
+ _candidates.push_back(CandidatePlan(std::move(solution), _children.back().get(), ws));
}
bool MultiPlanStage::isEOF() {
diff --git a/src/mongo/db/exec/multi_plan.h b/src/mongo/db/exec/multi_plan.h
index d11797b5582..f70b2800e63 100644
--- a/src/mongo/db/exec/multi_plan.h
+++ b/src/mongo/db/exec/multi_plan.h
@@ -95,9 +95,11 @@ public:
const SpecificStats* getSpecificStats() const final;
/**
- * Takes ownership of PlanStage. Does not take ownership of WorkingSet.
+ * Adsd a new candidate plan to be considered for selection by the MultiPlanStage trial period.
*/
- void addPlan(std::unique_ptr<QuerySolution> solution, PlanStage* root, WorkingSet* sharedWs);
+ void addPlan(std::unique_ptr<QuerySolution> solution,
+ std::unique_ptr<PlanStage> root,
+ WorkingSet* sharedWs);
/**
* Runs all plans added by addPlan, ranks them, and picks a best.
diff --git a/src/mongo/db/exec/or.cpp b/src/mongo/db/exec/or.cpp
index 8269b599f43..3800536b62c 100644
--- a/src/mongo/db/exec/or.cpp
+++ b/src/mongo/db/exec/or.cpp
@@ -47,8 +47,8 @@ const char* OrStage::kStageType = "OR";
OrStage::OrStage(OperationContext* opCtx, WorkingSet* ws, bool dedup, const MatchExpression* filter)
: PlanStage(kStageType, opCtx), _ws(ws), _filter(filter), _currentChild(0), _dedup(dedup) {}
-void OrStage::addChild(PlanStage* child) {
- _children.emplace_back(child);
+void OrStage::addChild(std::unique_ptr<PlanStage> child) {
+ _children.emplace_back(std::move(child));
}
void OrStage::addChildren(Children childrenToAdd) {
diff --git a/src/mongo/db/exec/or.h b/src/mongo/db/exec/or.h
index dff62d17d51..8d2c043ee46 100644
--- a/src/mongo/db/exec/or.h
+++ b/src/mongo/db/exec/or.h
@@ -46,7 +46,7 @@ class OrStage final : public PlanStage {
public:
OrStage(OperationContext* opCtx, WorkingSet* ws, bool dedup, const MatchExpression* filter);
- void addChild(PlanStage* child);
+ void addChild(std::unique_ptr<PlanStage> child);
void addChildren(Children childrenToAdd);
diff --git a/src/mongo/db/exec/shard_filter.cpp b/src/mongo/db/exec/shard_filter.cpp
index ed25ef0ad8e..3ca72e989a0 100644
--- a/src/mongo/db/exec/shard_filter.cpp
+++ b/src/mongo/db/exec/shard_filter.cpp
@@ -53,9 +53,9 @@ const char* ShardFilterStage::kStageType = "SHARDING_FILTER";
ShardFilterStage::ShardFilterStage(OperationContext* opCtx,
ScopedCollectionMetadata metadata,
WorkingSet* ws,
- PlanStage* child)
+ std::unique_ptr<PlanStage> child)
: PlanStage(kStageType, opCtx), _ws(ws), _shardFilterer(std::move(metadata)) {
- _children.emplace_back(child);
+ _children.emplace_back(std::move(child));
}
ShardFilterStage::~ShardFilterStage() {}
diff --git a/src/mongo/db/exec/shard_filter.h b/src/mongo/db/exec/shard_filter.h
index d8284ad4e6f..5fdb2a2a247 100644
--- a/src/mongo/db/exec/shard_filter.h
+++ b/src/mongo/db/exec/shard_filter.h
@@ -74,7 +74,7 @@ public:
ShardFilterStage(OperationContext* opCtx,
ScopedCollectionMetadata metadata,
WorkingSet* ws,
- PlanStage* child);
+ std::unique_ptr<PlanStage> child);
~ShardFilterStage();
bool isEOF() final;
diff --git a/src/mongo/db/exec/skip.cpp b/src/mongo/db/exec/skip.cpp
index a87a9dd745b..bc488c1b410 100644
--- a/src/mongo/db/exec/skip.cpp
+++ b/src/mongo/db/exec/skip.cpp
@@ -43,9 +43,12 @@ using std::vector;
// static
const char* SkipStage::kStageType = "SKIP";
-SkipStage::SkipStage(OperationContext* opCtx, long long toSkip, WorkingSet* ws, PlanStage* child)
+SkipStage::SkipStage(OperationContext* opCtx,
+ long long toSkip,
+ WorkingSet* ws,
+ std::unique_ptr<PlanStage> child)
: PlanStage(kStageType, opCtx), _ws(ws), _toSkip(toSkip) {
- _children.emplace_back(child);
+ _children.emplace_back(std::move(child));
}
SkipStage::~SkipStage() {}
diff --git a/src/mongo/db/exec/skip.h b/src/mongo/db/exec/skip.h
index c885f275f31..8751cb22471 100644
--- a/src/mongo/db/exec/skip.h
+++ b/src/mongo/db/exec/skip.h
@@ -44,7 +44,10 @@ namespace mongo {
*/
class SkipStage final : public PlanStage {
public:
- SkipStage(OperationContext* opCtx, long long toSkip, WorkingSet* ws, PlanStage* child);
+ SkipStage(OperationContext* opCtx,
+ long long toSkip,
+ WorkingSet* ws,
+ std::unique_ptr<PlanStage> child);
~SkipStage();
bool isEOF() final;
diff --git a/src/mongo/db/exec/sort.cpp b/src/mongo/db/exec/sort.cpp
index 13f1615ee11..1dfae92f7f6 100644
--- a/src/mongo/db/exec/sort.cpp
+++ b/src/mongo/db/exec/sort.cpp
@@ -69,7 +69,7 @@ bool SortStage::WorkingSetComparator::operator()(const SortableDataItem& lhs,
SortStage::SortStage(OperationContext* opCtx,
const SortStageParams& params,
WorkingSet* ws,
- PlanStage* child)
+ std::unique_ptr<PlanStage> child)
: PlanStage(kStageType, opCtx),
_ws(ws),
_pattern(params.pattern),
@@ -78,7 +78,7 @@ SortStage::SortStage(OperationContext* opCtx,
_sorted(false),
_resultIterator(_data.end()),
_memUsage(0) {
- _children.emplace_back(child);
+ _children.emplace_back(std::move(child));
BSONObj sortComparator = FindCommon::transformSortSpec(_pattern);
_sortKeyComparator = std::make_unique<WorkingSetComparator>(sortComparator);
diff --git a/src/mongo/db/exec/sort.h b/src/mongo/db/exec/sort.h
index adadc9ad121..8fc5827fe13 100644
--- a/src/mongo/db/exec/sort.h
+++ b/src/mongo/db/exec/sort.h
@@ -72,7 +72,7 @@ public:
SortStage(OperationContext* opCtx,
const SortStageParams& params,
WorkingSet* ws,
- PlanStage* child);
+ std::unique_ptr<PlanStage> child);
~SortStage();
bool isEOF() final;
diff --git a/src/mongo/db/exec/sort_key_generator.cpp b/src/mongo/db/exec/sort_key_generator.cpp
index 98800394762..2439da67cb3 100644
--- a/src/mongo/db/exec/sort_key_generator.cpp
+++ b/src/mongo/db/exec/sort_key_generator.cpp
@@ -50,13 +50,13 @@ namespace mongo {
const char* SortKeyGeneratorStage::kStageType = "SORT_KEY_GENERATOR";
SortKeyGeneratorStage::SortKeyGeneratorStage(const boost::intrusive_ptr<ExpressionContext>& pExpCtx,
- PlanStage* child,
+ std::unique_ptr<PlanStage> child,
WorkingSet* ws,
const BSONObj& sortSpecObj)
: PlanStage(kStageType, pExpCtx->opCtx),
_ws(ws),
_sortKeyGen({{sortSpecObj, pExpCtx}, pExpCtx->getCollator()}) {
- _children.emplace_back(child);
+ _children.emplace_back(std::move(child));
}
bool SortKeyGeneratorStage::isEOF() {
diff --git a/src/mongo/db/exec/sort_key_generator.h b/src/mongo/db/exec/sort_key_generator.h
index c7a9e2dfd4b..5732f2008f6 100644
--- a/src/mongo/db/exec/sort_key_generator.h
+++ b/src/mongo/db/exec/sort_key_generator.h
@@ -51,7 +51,7 @@ class WorkingSetMember;
class SortKeyGeneratorStage final : public PlanStage {
public:
SortKeyGeneratorStage(const boost::intrusive_ptr<ExpressionContext>& pExpCtx,
- PlanStage* child,
+ std::unique_ptr<PlanStage> child,
WorkingSet* ws,
const BSONObj& sortSpecObj);
diff --git a/src/mongo/db/exec/sort_test.cpp b/src/mongo/db/exec/sort_test.cpp
index 65971d6ac15..7ff0614d7eb 100644
--- a/src/mongo/db/exec/sort_test.cpp
+++ b/src/mongo/db/exec/sort_test.cpp
@@ -108,9 +108,9 @@ public:
new ExpressionContext(getOpCtx(), collator));
auto sortKeyGen = std::make_unique<SortKeyGeneratorStage>(
- pExpCtx, queuedDataStage.release(), &ws, params.pattern);
+ pExpCtx, std::move(queuedDataStage), &ws, params.pattern);
- SortStage sort(getOpCtx(), params, &ws, sortKeyGen.release());
+ SortStage sort(getOpCtx(), params, &ws, std::move(sortKeyGen));
WorkingSetID id = WorkingSet::INVALID_ID;
PlanStage::StageState state = PlanStage::NEED_TIME;
@@ -167,10 +167,10 @@ TEST_F(SortStageTest, SortEmptyWorkingSet) {
// QueuedDataStage will be owned by SortStage.
auto queuedDataStage = std::make_unique<QueuedDataStage>(getOpCtx(), &ws);
- auto sortKeyGen =
- std::make_unique<SortKeyGeneratorStage>(pExpCtx, queuedDataStage.release(), &ws, BSONObj());
+ auto sortKeyGen = std::make_unique<SortKeyGeneratorStage>(
+ pExpCtx, std::move(queuedDataStage), &ws, BSONObj());
SortStageParams params;
- SortStage sort(getOpCtx(), params, &ws, sortKeyGen.release());
+ SortStage sort(getOpCtx(), params, &ws, std::move(sortKeyGen));
// Check initial EOF state.
ASSERT_FALSE(sort.isEOF());
diff --git a/src/mongo/db/exec/stagedebug_cmd.cpp b/src/mongo/db/exec/stagedebug_cmd.cpp
index 3baee889ab1..50bbb7e624b 100644
--- a/src/mongo/db/exec/stagedebug_cmd.cpp
+++ b/src/mongo/db/exec/stagedebug_cmd.cpp
@@ -170,13 +170,13 @@ public:
std::vector<std::unique_ptr<MatchExpression>> exprs;
unique_ptr<WorkingSet> ws(new WorkingSet());
- PlanStage* userRoot = parseQuery(opCtx, collection, planObj, ws.get(), &exprs);
+ std::unique_ptr<PlanStage> userRoot{
+ parseQuery(opCtx, collection, planObj, ws.get(), &exprs)};
uassert(16911, "Couldn't parse plan from " + cmdObj.toString(), nullptr != userRoot);
// Add a fetch at the top for the user so we can get obj back for sure.
- // TODO: Do we want to do this for the user? I think so.
unique_ptr<PlanStage> rootFetch =
- std::make_unique<FetchStage>(opCtx, ws.get(), userRoot, nullptr, collection);
+ std::make_unique<FetchStage>(opCtx, ws.get(), std::move(userRoot), nullptr, collection);
auto statusWithPlanExecutor = PlanExecutor::make(
opCtx, std::move(ws), std::move(rootFetch), collection, PlanExecutor::YIELD_AUTO);
@@ -307,12 +307,12 @@ public:
BSONElement e = it.next();
uassert(16922, "node of AND isn't an obj?: " + e.toString(), e.isABSONObj());
- PlanStage* subNode = parseQuery(opCtx, collection, e.Obj(), workingSet, exprs);
+ std::unique_ptr<PlanStage> subNode{
+ parseQuery(opCtx, collection, e.Obj(), workingSet, exprs)};
uassert(16923,
"Can't parse sub-node of AND: " + e.Obj().toString(),
nullptr != subNode);
- // takes ownership
- andStage->addChild(subNode);
+ andStage->addChild(std::move(subNode));
++nodesAdded;
}
@@ -331,12 +331,12 @@ public:
BSONElement e = it.next();
uassert(16925, "node of AND isn't an obj?: " + e.toString(), e.isABSONObj());
- PlanStage* subNode = parseQuery(opCtx, collection, e.Obj(), workingSet, exprs);
+ std::unique_ptr<PlanStage> subNode{
+ parseQuery(opCtx, collection, e.Obj(), workingSet, exprs)};
uassert(16926,
"Can't parse sub-node of AND: " + e.Obj().toString(),
nullptr != subNode);
- // takes ownership
- andStage->addChild(subNode);
+ andStage->addChild(std::move(subNode));
++nodesAdded;
}
@@ -355,23 +355,23 @@ public:
if (!e.isABSONObj()) {
return nullptr;
}
- PlanStage* subNode = parseQuery(opCtx, collection, e.Obj(), workingSet, exprs);
+ std::unique_ptr<PlanStage> subNode{
+ parseQuery(opCtx, collection, e.Obj(), workingSet, exprs)};
uassert(
16936, "Can't parse sub-node of OR: " + e.Obj().toString(), nullptr != subNode);
- // takes ownership
- orStage->addChild(subNode);
+ orStage->addChild(std::move(subNode));
}
return orStage.release();
} else if ("fetch" == nodeName) {
uassert(
16929, "Node argument must be provided to fetch", nodeArgs["node"].isABSONObj());
- PlanStage* subNode =
- parseQuery(opCtx, collection, nodeArgs["node"].Obj(), workingSet, exprs);
+ std::unique_ptr<PlanStage> subNode{
+ parseQuery(opCtx, collection, nodeArgs["node"].Obj(), workingSet, exprs)};
uassert(28731,
"Can't parse sub-node of FETCH: " + nodeArgs["node"].Obj().toString(),
nullptr != subNode);
- return new FetchStage(opCtx, workingSet, subNode, matcher, collection);
+ return new FetchStage(opCtx, workingSet, std::move(subNode), matcher, collection);
} else if ("limit" == nodeName) {
uassert(16937,
"Limit stage doesn't have a filter (put it on the child)",
@@ -379,24 +379,26 @@ public:
uassert(
16930, "Node argument must be provided to limit", nodeArgs["node"].isABSONObj());
uassert(16931, "Num argument must be provided to limit", nodeArgs["num"].isNumber());
- PlanStage* subNode =
- parseQuery(opCtx, collection, nodeArgs["node"].Obj(), workingSet, exprs);
+ std::unique_ptr<PlanStage> subNode{
+ parseQuery(opCtx, collection, nodeArgs["node"].Obj(), workingSet, exprs)};
uassert(28732,
"Can't parse sub-node of LIMIT: " + nodeArgs["node"].Obj().toString(),
nullptr != subNode);
- return new LimitStage(opCtx, nodeArgs["num"].numberInt(), workingSet, subNode);
+ return new LimitStage(
+ opCtx, nodeArgs["num"].numberInt(), workingSet, std ::move(subNode));
} else if ("skip" == nodeName) {
uassert(16938,
"Skip stage doesn't have a filter (put it on the child)",
nullptr == matcher);
uassert(16932, "Node argument must be provided to skip", nodeArgs["node"].isABSONObj());
uassert(16933, "Num argument must be provided to skip", nodeArgs["num"].isNumber());
- PlanStage* subNode =
- parseQuery(opCtx, collection, nodeArgs["node"].Obj(), workingSet, exprs);
+ std::unique_ptr<PlanStage> subNode{
+ parseQuery(opCtx, collection, nodeArgs["node"].Obj(), workingSet, exprs)};
uassert(28733,
"Can't parse sub-node of SKIP: " + nodeArgs["node"].Obj().toString(),
nullptr != subNode);
- return new SkipStage(opCtx, nodeArgs["num"].numberInt(), workingSet, subNode);
+ return new SkipStage(
+ opCtx, nodeArgs["num"].numberInt(), workingSet, std::move(subNode));
} else if ("cscan" == nodeName) {
CollectionScanParams params;
@@ -429,12 +431,12 @@ public:
BSONElement e = it.next();
uassert(16973, "node of mergeSort isn't an obj?: " + e.toString(), e.isABSONObj());
- PlanStage* subNode = parseQuery(opCtx, collection, e.Obj(), workingSet, exprs);
+ std::unique_ptr<PlanStage> subNode{
+ parseQuery(opCtx, collection, e.Obj(), workingSet, exprs)};
uassert(16974,
"Can't parse sub-node of mergeSort: " + e.Obj().toString(),
nullptr != subNode);
- // takes ownership
- mergeStage->addChild(subNode);
+ mergeStage->addChild(std::move(subNode));
}
return mergeStage.release();
} else if ("text" == nodeName) {
diff --git a/src/mongo/db/exec/subplan.cpp b/src/mongo/db/exec/subplan.cpp
index 97fb663f7c6..789fca69ce9 100644
--- a/src/mongo/db/exec/subplan.cpp
+++ b/src/mongo/db/exec/subplan.cpp
@@ -271,16 +271,14 @@ Status SubplanStage::choosePlanForSubqueries(PlanYieldPolicy* yieldPolicy) {
// Dump all the solutions into the MPS.
for (size_t ix = 0; ix < branchResult->solutions.size(); ++ix) {
- PlanStage* nextPlanRoot;
- invariant(StageBuilder::build(getOpCtx(),
- collection(),
- *branchResult->canonicalQuery,
- *branchResult->solutions[ix],
- _ws,
- &nextPlanRoot));
-
- // Takes ownership of 'nextPlanRoot'.
- multiPlanStage->addPlan(std::move(branchResult->solutions[ix]), nextPlanRoot, _ws);
+ auto nextPlanRoot = StageBuilder::build(getOpCtx(),
+ collection(),
+ *branchResult->canonicalQuery,
+ *branchResult->solutions[ix],
+ _ws);
+
+ multiPlanStage->addPlan(
+ std::move(branchResult->solutions[ix]), std::move(nextPlanRoot), _ws);
}
Status planSelectStat = multiPlanStage->pickBestPlan(yieldPolicy);
@@ -340,7 +338,6 @@ Status SubplanStage::choosePlanForSubqueries(PlanYieldPolicy* yieldPolicy) {
LOG(5) << "Subplanner: fully tagged tree is " << redact(solnRoot->toString());
- // Takes ownership of 'solnRoot'
_compositeSolution =
QueryPlannerAnalysis::analyzeDataAccess(*_query, _plannerParams, std::move(solnRoot));
@@ -355,11 +352,10 @@ Status SubplanStage::choosePlanForSubqueries(PlanYieldPolicy* yieldPolicy) {
// Use the index tags from planning each branch to construct the composite solution,
// and set that solution as our child stage.
_ws->clear();
- PlanStage* root;
- invariant(StageBuilder::build(
- getOpCtx(), collection(), *_query, *_compositeSolution.get(), _ws, &root));
+ auto root =
+ StageBuilder::build(getOpCtx(), collection(), *_query, *_compositeSolution.get(), _ws);
invariant(_children.empty());
- _children.emplace_back(root);
+ _children.emplace_back(std::move(root));
return Status::OK();
}
@@ -387,11 +383,10 @@ Status SubplanStage::choosePlanWholeQuery(PlanYieldPolicy* yieldPolicy) {
}
if (1 == solutions.size()) {
- PlanStage* root;
// Only one possible plan. Run it. Build the stages from the solution.
- verify(StageBuilder::build(getOpCtx(), collection(), *_query, *solutions[0], _ws, &root));
+ auto root = StageBuilder::build(getOpCtx(), collection(), *_query, *solutions[0], _ws);
invariant(_children.empty());
- _children.emplace_back(root);
+ _children.emplace_back(std::move(root));
// This SubplanStage takes ownership of the query solution.
_compositeSolution = std::move(solutions.back());
@@ -410,13 +405,10 @@ Status SubplanStage::choosePlanWholeQuery(PlanYieldPolicy* yieldPolicy) {
solutions[ix]->cacheData->indexFilterApplied = _plannerParams.indexFiltersApplied;
}
- // version of StageBuild::build when WorkingSet is shared
- PlanStage* nextPlanRoot;
- verify(StageBuilder::build(
- getOpCtx(), collection(), *_query, *solutions[ix], _ws, &nextPlanRoot));
+ auto nextPlanRoot =
+ StageBuilder::build(getOpCtx(), collection(), *_query, *solutions[ix], _ws);
- // Takes ownership of 'nextPlanRoot'.
- multiPlanStage->addPlan(std::move(solutions[ix]), nextPlanRoot, _ws);
+ multiPlanStage->addPlan(std::move(solutions[ix]), std::move(nextPlanRoot), _ws);
}
// Delegate the the MultiPlanStage's plan selection facility.
diff --git a/src/mongo/db/exec/text.cpp b/src/mongo/db/exec/text.cpp
index 93054164ebb..9efb4915c61 100644
--- a/src/mongo/db/exec/text.cpp
+++ b/src/mongo/db/exec/text.cpp
@@ -140,7 +140,7 @@ unique_ptr<PlanStage> TextStage::buildTextTree(OperationContext* opCtx,
// WorkingSetMember inputs have fetched data.
const MatchExpression* emptyFilter = nullptr;
auto fetchStage = std::make_unique<FetchStage>(
- opCtx, ws, textSearcher.release(), emptyFilter, collection);
+ opCtx, ws, std::move(textSearcher), emptyFilter, collection);
textMatchStage = std::make_unique<TextMatchStage>(
opCtx, std::move(fetchStage), _params.query, _params.spec, ws);
diff --git a/src/mongo/db/exec/trial_stage.cpp b/src/mongo/db/exec/trial_stage.cpp
index c54c6e91dc9..1e5b36fb7c4 100644
--- a/src/mongo/db/exec/trial_stage.cpp
+++ b/src/mongo/db/exec/trial_stage.cpp
@@ -174,8 +174,8 @@ void TrialStage::_assessTrialAndBuildFinalPlan() {
// final plan which UNIONs across the QueuedDataStage and the trial plan.
std::unique_ptr<PlanStage> unionPlan =
std::make_unique<OrStage>(getOpCtx(), _ws, false, nullptr);
- static_cast<OrStage*>(unionPlan.get())->addChild(_queuedData.release());
- static_cast<OrStage*>(unionPlan.get())->addChild(_children.front().release());
+ static_cast<OrStage*>(unionPlan.get())->addChild(std::move(_queuedData));
+ static_cast<OrStage*>(unionPlan.get())->addChild(std::move(_children.front()));
_replaceCurrentPlan(unionPlan);
}
diff --git a/src/mongo/db/pipeline/pipeline_d.cpp b/src/mongo/db/pipeline/pipeline_d.cpp
index e235c28f6ca..12c95222fe3 100644
--- a/src/mongo/db/pipeline/pipeline_d.cpp
+++ b/src/mongo/db/pipeline/pipeline_d.cpp
@@ -146,12 +146,12 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> createRandomCursorEx
sampleSize / (numRecords * kMaxSampleRatioForRandCursor), kMaxSampleRatioForRandCursor);
// The trial plan is SHARDING_FILTER-MULTI_ITERATOR.
auto randomCursorPlan =
- std::make_unique<ShardFilterStage>(opCtx, shardMetadata, ws.get(), root.release());
+ std::make_unique<ShardFilterStage>(opCtx, shardMetadata, ws.get(), std::move(root));
// The backup plan is SHARDING_FILTER-COLLSCAN.
std::unique_ptr<PlanStage> collScanPlan = std::make_unique<CollectionScan>(
opCtx, coll, CollectionScanParams{}, ws.get(), nullptr);
collScanPlan = std::make_unique<ShardFilterStage>(
- opCtx, shardMetadata, ws.get(), collScanPlan.release());
+ opCtx, shardMetadata, ws.get(), std::move(collScanPlan));
// Place a TRIAL stage at the root of the plan tree, and pass it the trial and backup plans.
root = std::make_unique<TrialStage>(opCtx,
ws.get(),
diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp
index d36775885ce..4b039fa7aaf 100644
--- a/src/mongo/db/query/get_executor.cpp
+++ b/src/mongo/db/query/get_executor.cpp
@@ -385,7 +385,7 @@ StatusWith<PrepareExecutionResult> prepareExecution(OperationContext* opCtx,
CollectionShardingState::get(opCtx, canonicalQuery->nss())
->getOrphansFilter(opCtx, collection),
ws,
- root.release());
+ std::move(root));
}
// There might be a projection. The idhack stage will always fetch the full
@@ -397,7 +397,7 @@ StatusWith<PrepareExecutionResult> prepareExecution(OperationContext* opCtx,
if (canonicalQuery->getProj()->wantSortKey()) {
root = std::make_unique<SortKeyGeneratorStage>(
canonicalQuery->getExpCtx(),
- root.release(),
+ std::move(root),
ws,
canonicalQuery->getQueryRequest().getSort());
}
@@ -456,23 +456,23 @@ StatusWith<PrepareExecutionResult> prepareExecution(OperationContext* opCtx,
LOG(2) << "Using fast count: " << redact(canonicalQuery->toStringShort());
}
- PlanStage* rawRoot;
- verify(StageBuilder::build(
- opCtx, collection, *canonicalQuery, *querySolution, ws, &rawRoot));
+ auto root =
+ StageBuilder::build(opCtx, collection, *canonicalQuery, *querySolution, ws);
// Add a CachedPlanStage on top of the previous root.
//
// 'decisionWorks' is used to determine whether the existing cache entry should
// be evicted, and the query replanned.
- root = std::make_unique<CachedPlanStage>(opCtx,
- collection,
- ws,
- canonicalQuery.get(),
- plannerParams,
- cs->decisionWorks,
- rawRoot);
- return PrepareExecutionResult(
- std::move(canonicalQuery), std::move(querySolution), std::move(root));
+ auto cachedPlanStage = std::make_unique<CachedPlanStage>(opCtx,
+ collection,
+ ws,
+ canonicalQuery.get(),
+ plannerParams,
+ cs->decisionWorks,
+ std::move(root));
+ return PrepareExecutionResult(std::move(canonicalQuery),
+ std::move(querySolution),
+ std::move(cachedPlanStage));
}
}
}
@@ -507,10 +507,8 @@ StatusWith<PrepareExecutionResult> prepareExecution(OperationContext* opCtx,
for (size_t i = 0; i < solutions.size(); ++i) {
if (turnIxscanIntoCount(solutions[i].get())) {
// We're not going to cache anything that's fast count.
- PlanStage* rawRoot;
- verify(StageBuilder::build(
- opCtx, collection, *canonicalQuery, *solutions[i], ws, &rawRoot));
- root.reset(rawRoot);
+ auto root =
+ StageBuilder::build(opCtx, collection, *canonicalQuery, *solutions[i], ws);
LOG(2) << "Using fast count: " << redact(canonicalQuery->toStringShort())
<< ", planSummary: " << Explain::getPlanSummary(root.get());
@@ -523,10 +521,7 @@ StatusWith<PrepareExecutionResult> prepareExecution(OperationContext* opCtx,
if (1 == solutions.size()) {
// Only one possible plan. Run it. Build the stages from the solution.
- PlanStage* rawRoot;
- verify(
- StageBuilder::build(opCtx, collection, *canonicalQuery, *solutions[0], ws, &rawRoot));
- root.reset(rawRoot);
+ auto root = StageBuilder::build(opCtx, collection, *canonicalQuery, *solutions[0], ws);
LOG(2) << "Only one plan is available; it will be run but will not be cached. "
<< redact(canonicalQuery->toStringShort())
@@ -545,13 +540,11 @@ StatusWith<PrepareExecutionResult> prepareExecution(OperationContext* opCtx,
solutions[ix]->cacheData->indexFilterApplied = plannerParams.indexFiltersApplied;
}
- // version of StageBuild::build when WorkingSet is shared
- PlanStage* nextPlanRoot;
- verify(StageBuilder::build(
- opCtx, collection, *canonicalQuery, *solutions[ix], ws, &nextPlanRoot));
+ auto nextPlanRoot =
+ StageBuilder::build(opCtx, collection, *canonicalQuery, *solutions[ix], ws);
// Takes ownership of 'nextPlanRoot'.
- multiPlanStage->addPlan(std::move(solutions[ix]), nextPlanRoot, ws);
+ multiPlanStage->addPlan(std::move(solutions[ix]), std::move(nextPlanRoot), ws);
}
root = std::move(multiPlanStage);
@@ -1450,10 +1443,8 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorForS
invariant(soln);
unique_ptr<WorkingSet> ws = std::make_unique<WorkingSet>();
- PlanStage* rawRoot;
- verify(StageBuilder::build(
- opCtx, collection, *parsedDistinct->getQuery(), *soln, ws.get(), &rawRoot));
- unique_ptr<PlanStage> root(rawRoot);
+ auto root =
+ StageBuilder::build(opCtx, collection, *parsedDistinct->getQuery(), *soln, ws.get());
LOG(2) << "Using fast distinct: " << redact(parsedDistinct->getQuery()->toStringShort())
<< ", planSummary: " << Explain::getPlanSummary(root.get());
@@ -1493,14 +1484,8 @@ getExecutorDistinctFromIndexSolutions(OperationContext* opCtx,
// Build and return the SSR over solutions[i].
unique_ptr<WorkingSet> ws = std::make_unique<WorkingSet>();
unique_ptr<QuerySolution> currentSolution = std::move(solutions[i]);
- PlanStage* rawRoot;
- verify(StageBuilder::build(opCtx,
- collection,
- *parsedDistinct->getQuery(),
- *currentSolution,
- ws.get(),
- &rawRoot));
- unique_ptr<PlanStage> root(rawRoot);
+ auto root = StageBuilder::build(
+ opCtx, collection, *parsedDistinct->getQuery(), *currentSolution, ws.get());
LOG(2) << "Using fast distinct: " << redact(parsedDistinct->getQuery()->toStringShort())
<< ", planSummary: " << Explain::getPlanSummary(root.get());
diff --git a/src/mongo/db/query/internal_plans.cpp b/src/mongo/db/query/internal_plans.cpp
index 4e65301e867..860cf5a4ebd 100644
--- a/src/mongo/db/query/internal_plans.cpp
+++ b/src/mongo/db/query/internal_plans.cpp
@@ -217,7 +217,7 @@ std::unique_ptr<PlanStage> InternalPlanner::_indexScan(OperationContext* opCtx,
std::make_unique<IndexScan>(opCtx, std::move(params), ws, nullptr);
if (InternalPlanner::IXSCAN_FETCH & options) {
- root = std::make_unique<FetchStage>(opCtx, ws, root.release(), nullptr, collection);
+ root = std::make_unique<FetchStage>(opCtx, ws, std::move(root), nullptr, collection);
}
return root;
diff --git a/src/mongo/db/query/stage_builder.cpp b/src/mongo/db/query/stage_builder.cpp
index 508690ae278..48cc9e39027 100644
--- a/src/mongo/db/query/stage_builder.cpp
+++ b/src/mongo/db/query/stage_builder.cpp
@@ -65,14 +65,14 @@
namespace mongo {
-using std::unique_ptr;
-
-PlanStage* buildStages(OperationContext* opCtx,
- const Collection* collection,
- const CanonicalQuery& cq,
- const QuerySolution& qsol,
- const QuerySolutionNode* root,
- WorkingSet* ws) {
+// Returns a non-null pointer to the root of a plan tree, or a non-OK status if the PlanStage tree
+// could not be constructed.
+std::unique_ptr<PlanStage> buildStages(OperationContext* opCtx,
+ const Collection* collection,
+ const CanonicalQuery& cq,
+ const QuerySolution& qsol,
+ const QuerySolutionNode* root,
+ WorkingSet* ws) {
switch (root->getType()) {
case STAGE_COLLSCAN: {
const CollectionScanNode* csn = static_cast<const CollectionScanNode*>(root);
@@ -85,16 +85,13 @@ PlanStage* buildStages(OperationContext* opCtx,
params.minTs = csn->minTs;
params.maxTs = csn->maxTs;
params.stopApplyingFilterAfterFirstMatch = csn->stopApplyingFilterAfterFirstMatch;
- return new CollectionScan(opCtx, collection, params, ws, csn->filter.get());
+ return std::make_unique<CollectionScan>(
+ opCtx, collection, params, ws, csn->filter.get());
}
case STAGE_IXSCAN: {
const IndexScanNode* ixn = static_cast<const IndexScanNode*>(root);
- if (nullptr == collection) {
- warning() << "Can't ixscan null namespace";
- return nullptr;
- }
-
+ invariant(collection);
auto descriptor = collection->getIndexCatalog()->findIndexByName(
opCtx, ixn->index.identifier.catalogName);
invariant(descriptor,
@@ -113,124 +110,87 @@ PlanStage* buildStages(OperationContext* opCtx,
params.direction = ixn->direction;
params.addKeyMetadata = ixn->addKeyMetadata;
params.shouldDedup = ixn->shouldDedup;
- return new IndexScan(opCtx, std::move(params), ws, ixn->filter.get());
+ return std::make_unique<IndexScan>(opCtx, std::move(params), ws, ixn->filter.get());
}
case STAGE_FETCH: {
const FetchNode* fn = static_cast<const FetchNode*>(root);
- PlanStage* childStage = buildStages(opCtx, collection, cq, qsol, fn->children[0], ws);
- if (nullptr == childStage) {
- return nullptr;
- }
- return new FetchStage(opCtx, ws, childStage, fn->filter.get(), collection);
+ auto childStage = buildStages(opCtx, collection, cq, qsol, fn->children[0], ws);
+ return std::make_unique<FetchStage>(
+ opCtx, ws, std::move(childStage), fn->filter.get(), collection);
}
case STAGE_SORT: {
const SortNode* sn = static_cast<const SortNode*>(root);
- PlanStage* childStage = buildStages(opCtx, collection, cq, qsol, sn->children[0], ws);
- if (nullptr == childStage) {
- return nullptr;
- }
+ auto childStage = buildStages(opCtx, collection, cq, qsol, sn->children[0], ws);
SortStageParams params;
params.pattern = sn->pattern;
params.limit = sn->limit;
params.allowDiskUse = sn->allowDiskUse;
- return new SortStage(opCtx, params, ws, childStage);
+ return std::make_unique<SortStage>(opCtx, params, ws, std::move(childStage));
}
case STAGE_SORT_KEY_GENERATOR: {
const SortKeyGeneratorNode* keyGenNode = static_cast<const SortKeyGeneratorNode*>(root);
- PlanStage* childStage =
- buildStages(opCtx, collection, cq, qsol, keyGenNode->children[0], ws);
- if (nullptr == childStage) {
- return nullptr;
- }
- return new SortKeyGeneratorStage(cq.getExpCtx(), childStage, ws, keyGenNode->sortSpec);
+ auto childStage = buildStages(opCtx, collection, cq, qsol, keyGenNode->children[0], ws);
+ return std::make_unique<SortKeyGeneratorStage>(
+ cq.getExpCtx(), std::move(childStage), ws, keyGenNode->sortSpec);
}
case STAGE_PROJECTION_DEFAULT: {
auto pn = static_cast<const ProjectionNodeDefault*>(root);
- unique_ptr<PlanStage> childStage{
- buildStages(opCtx, collection, cq, qsol, pn->children[0], ws)};
- if (nullptr == childStage) {
- return nullptr;
- }
- return new ProjectionStageDefault(opCtx,
- pn->projection,
- ws,
- std::move(childStage),
- pn->fullExpression,
- cq.getCollator());
+ auto childStage = buildStages(opCtx, collection, cq, qsol, pn->children[0], ws);
+ return std::make_unique<ProjectionStageDefault>(opCtx,
+ pn->projection,
+ ws,
+ std::move(childStage),
+ pn->fullExpression,
+ cq.getCollator());
}
case STAGE_PROJECTION_COVERED: {
auto pn = static_cast<const ProjectionNodeCovered*>(root);
- unique_ptr<PlanStage> childStage{
- buildStages(opCtx, collection, cq, qsol, pn->children[0], ws)};
- if (nullptr == childStage) {
- return nullptr;
- }
- return new ProjectionStageCovered(
+ auto childStage = buildStages(opCtx, collection, cq, qsol, pn->children[0], ws);
+ return std::make_unique<ProjectionStageCovered>(
opCtx, pn->projection, ws, std::move(childStage), pn->coveredKeyObj);
}
case STAGE_PROJECTION_SIMPLE: {
auto pn = static_cast<const ProjectionNodeSimple*>(root);
- unique_ptr<PlanStage> childStage{
- buildStages(opCtx, collection, cq, qsol, pn->children[0], ws)};
- if (nullptr == childStage) {
- return nullptr;
- }
- return new ProjectionStageSimple(opCtx, pn->projection, ws, std::move(childStage));
+ auto childStage = buildStages(opCtx, collection, cq, qsol, pn->children[0], ws);
+ return std::make_unique<ProjectionStageSimple>(
+ opCtx, pn->projection, ws, std::move(childStage));
}
case STAGE_LIMIT: {
const LimitNode* ln = static_cast<const LimitNode*>(root);
- PlanStage* childStage = buildStages(opCtx, collection, cq, qsol, ln->children[0], ws);
- if (nullptr == childStage) {
- return nullptr;
- }
- return new LimitStage(opCtx, ln->limit, ws, childStage);
+ auto childStage = buildStages(opCtx, collection, cq, qsol, ln->children[0], ws);
+ return std::make_unique<LimitStage>(opCtx, ln->limit, ws, std::move(childStage));
}
case STAGE_SKIP: {
const SkipNode* sn = static_cast<const SkipNode*>(root);
- PlanStage* childStage = buildStages(opCtx, collection, cq, qsol, sn->children[0], ws);
- if (nullptr == childStage) {
- return nullptr;
- }
- return new SkipStage(opCtx, sn->skip, ws, childStage);
+ auto childStage = buildStages(opCtx, collection, cq, qsol, sn->children[0], ws);
+ return std::make_unique<SkipStage>(opCtx, sn->skip, ws, std::move(childStage));
}
case STAGE_AND_HASH: {
const AndHashNode* ahn = static_cast<const AndHashNode*>(root);
auto ret = std::make_unique<AndHashStage>(opCtx, ws);
for (size_t i = 0; i < ahn->children.size(); ++i) {
- PlanStage* childStage =
- buildStages(opCtx, collection, cq, qsol, ahn->children[i], ws);
- if (nullptr == childStage) {
- return nullptr;
- }
- ret->addChild(childStage);
+ auto childStage = buildStages(opCtx, collection, cq, qsol, ahn->children[i], ws);
+ ret->addChild(std::move(childStage));
}
- return ret.release();
+ return ret;
}
case STAGE_OR: {
const OrNode* orn = static_cast<const OrNode*>(root);
auto ret = std::make_unique<OrStage>(opCtx, ws, orn->dedup, orn->filter.get());
for (size_t i = 0; i < orn->children.size(); ++i) {
- PlanStage* childStage =
- buildStages(opCtx, collection, cq, qsol, orn->children[i], ws);
- if (nullptr == childStage) {
- return nullptr;
- }
- ret->addChild(childStage);
+ auto childStage = buildStages(opCtx, collection, cq, qsol, orn->children[i], ws);
+ ret->addChild(std::move(childStage));
}
- return ret.release();
+ return ret;
}
case STAGE_AND_SORTED: {
const AndSortedNode* asn = static_cast<const AndSortedNode*>(root);
auto ret = std::make_unique<AndSortedStage>(opCtx, ws);
for (size_t i = 0; i < asn->children.size(); ++i) {
- PlanStage* childStage =
- buildStages(opCtx, collection, cq, qsol, asn->children[i], ws);
- if (nullptr == childStage) {
- return nullptr;
- }
- ret->addChild(childStage);
+ auto childStage = buildStages(opCtx, collection, cq, qsol, asn->children[i], ws);
+ ret->addChild(std::move(childStage));
}
- return ret.release();
+ return ret;
}
case STAGE_SORT_MERGE: {
const MergeSortNode* msn = static_cast<const MergeSortNode*>(root);
@@ -240,14 +200,10 @@ PlanStage* buildStages(OperationContext* opCtx,
params.collator = cq.getCollator();
auto ret = std::make_unique<MergeSortStage>(opCtx, params, ws);
for (size_t i = 0; i < msn->children.size(); ++i) {
- PlanStage* childStage =
- buildStages(opCtx, collection, cq, qsol, msn->children[i], ws);
- if (nullptr == childStage) {
- return nullptr;
- }
- ret->addChild(childStage);
+ auto childStage = buildStages(opCtx, collection, cq, qsol, msn->children[i], ws);
+ ret->addChild(std::move(childStage));
}
- return ret.release();
+ return ret;
}
case STAGE_GEO_NEAR_2D: {
const GeoNear2DNode* node = static_cast<const GeoNear2DNode*>(root);
@@ -259,13 +215,12 @@ PlanStage* buildStages(OperationContext* opCtx,
params.addPointMeta = node->addPointMeta;
params.addDistMeta = node->addDistMeta;
+ invariant(collection);
const IndexDescriptor* twoDIndex = collection->getIndexCatalog()->findIndexByName(
opCtx, node->index.identifier.catalogName);
invariant(twoDIndex);
- GeoNear2DStage* nearStage = new GeoNear2DStage(params, opCtx, ws, twoDIndex);
-
- return nearStage;
+ return std::make_unique<GeoNear2DStage>(params, opCtx, ws, twoDIndex);
}
case STAGE_GEO_NEAR_2DSPHERE: {
const GeoNear2DSphereNode* node = static_cast<const GeoNear2DSphereNode*>(root);
@@ -277,14 +232,16 @@ PlanStage* buildStages(OperationContext* opCtx,
params.addPointMeta = node->addPointMeta;
params.addDistMeta = node->addDistMeta;
+ invariant(collection);
const IndexDescriptor* s2Index = collection->getIndexCatalog()->findIndexByName(
opCtx, node->index.identifier.catalogName);
invariant(s2Index);
- return new GeoNear2DSphereStage(params, opCtx, ws, s2Index);
+ return std::make_unique<GeoNear2DSphereStage>(params, opCtx, ws, s2Index);
}
case STAGE_TEXT: {
const TextNode* node = static_cast<const TextNode*>(root);
+ invariant(collection);
const IndexDescriptor* desc = collection->getIndexCatalog()->findIndexByName(
opCtx, node->index.identifier.catalogName);
invariant(desc);
@@ -296,33 +253,24 @@ PlanStage* buildStages(OperationContext* opCtx,
params.index = desc;
params.indexPrefix = node->indexPrefix;
// We assume here that node->ftsQuery is an FTSQueryImpl, not an FTSQueryNoop. In
- // practice,
- // this means that it is illegal to use the StageBuilder on a QuerySolution created by
- // planning a query that contains "no-op" expressions. TODO: make StageBuilder::build()
- // fail in this case (this improvement is being tracked by SERVER-21510).
+ // practice, this means that it is illegal to use the StageBuilder on a QuerySolution
+ // created by planning a query that contains "no-op" expressions.
params.query = static_cast<FTSQueryImpl&>(*node->ftsQuery);
params.wantTextScore = (cq.getProj() && cq.getProj()->wantTextScore());
- return new TextStage(opCtx, params, ws, node->filter.get());
+ return std::make_unique<TextStage>(opCtx, params, ws, node->filter.get());
}
case STAGE_SHARDING_FILTER: {
const ShardingFilterNode* fn = static_cast<const ShardingFilterNode*>(root);
- PlanStage* childStage = buildStages(opCtx, collection, cq, qsol, fn->children[0], ws);
- if (nullptr == childStage) {
- return nullptr;
- }
+ auto childStage = buildStages(opCtx, collection, cq, qsol, fn->children[0], ws);
auto css = CollectionShardingState::get(opCtx, collection->ns());
- return new ShardFilterStage(
- opCtx, css->getOrphansFilter(opCtx, collection), ws, childStage);
+ return std::make_unique<ShardFilterStage>(
+ opCtx, css->getOrphansFilter(opCtx, collection), ws, std::move(childStage));
}
case STAGE_DISTINCT_SCAN: {
const DistinctNode* dn = static_cast<const DistinctNode*>(root);
- if (nullptr == collection) {
- warning() << "Can't distinct-scan null namespace";
- return nullptr;
- }
-
+ invariant(collection);
auto descriptor = collection->getIndexCatalog()->findIndexByName(
opCtx, dn->index.identifier.catalogName);
invariant(descriptor);
@@ -338,16 +286,12 @@ PlanStage* buildStages(OperationContext* opCtx,
params.scanDirection = dn->direction;
params.bounds = dn->bounds;
params.fieldNo = dn->fieldNo;
- return new DistinctScan(opCtx, std::move(params), ws);
+ return std::make_unique<DistinctScan>(opCtx, std::move(params), ws);
}
case STAGE_COUNT_SCAN: {
const CountScanNode* csn = static_cast<const CountScanNode*>(root);
- if (nullptr == collection) {
- warning() << "Can't fast-count null namespace (collection null)";
- return nullptr;
- }
-
+ invariant(collection);
auto descriptor = collection->getIndexCatalog()->findIndexByName(
opCtx, csn->index.identifier.catalogName);
invariant(descriptor);
@@ -364,15 +308,13 @@ PlanStage* buildStages(OperationContext* opCtx,
params.startKeyInclusive = csn->startKeyInclusive;
params.endKey = csn->endKey;
params.endKeyInclusive = csn->endKeyInclusive;
- return new CountScan(opCtx, std::move(params), ws);
+ return std::make_unique<CountScan>(opCtx, std::move(params), ws);
}
case STAGE_ENSURE_SORTED: {
const EnsureSortedNode* esn = static_cast<const EnsureSortedNode*>(root);
- PlanStage* childStage = buildStages(opCtx, collection, cq, qsol, esn->children[0], ws);
- if (nullptr == childStage) {
- return nullptr;
- }
- return new EnsureSortedStage(opCtx, esn->pattern, ws, childStage);
+ auto childStage = buildStages(opCtx, collection, cq, qsol, esn->children[0], ws);
+ return std::make_unique<EnsureSortedStage>(
+ opCtx, esn->pattern, ws, std::move(childStage));
}
case STAGE_CACHED_PLAN:
case STAGE_CHANGE_STREAM_PROXY:
@@ -397,30 +339,26 @@ PlanStage* buildStages(OperationContext* opCtx,
warning() << "Can't build exec tree for node " << nodeStr << endl;
}
}
- return nullptr;
+
+ MONGO_UNREACHABLE;
}
-// static (this one is used for Cached and MultiPlanStage)
-bool StageBuilder::build(OperationContext* opCtx,
- const Collection* collection,
- const CanonicalQuery& cq,
- const QuerySolution& solution,
- WorkingSet* wsIn,
- PlanStage** rootOut) {
+std::unique_ptr<PlanStage> StageBuilder::build(OperationContext* opCtx,
+ const Collection* collection,
+ const CanonicalQuery& cq,
+ const QuerySolution& solution,
+ WorkingSet* wsIn) {
// Only QuerySolutions derived from queries parsed with context, or QuerySolutions derived from
// queries that disallow extensions, can be properly executed. If the query does not have
// $text/$where context (and $text/$where are allowed), then no attempt should be made to
// execute the query.
invariant(!cq.canHaveNoopMatchNodes());
- if (nullptr == wsIn || nullptr == rootOut) {
- return false;
- }
+ invariant(wsIn);
+ invariant(solution.root);
+
QuerySolutionNode* solutionNode = solution.root.get();
- if (nullptr == solutionNode) {
- return false;
- }
- return nullptr != (*rootOut = buildStages(opCtx, collection, cq, solution, solutionNode, wsIn));
+ return buildStages(opCtx, collection, cq, solution, solutionNode, wsIn);
}
} // namespace mongo
diff --git a/src/mongo/db/query/stage_builder.h b/src/mongo/db/query/stage_builder.h
index 9ffceef53cf..a0437832822 100644
--- a/src/mongo/db/query/stage_builder.h
+++ b/src/mongo/db/query/stage_builder.h
@@ -43,21 +43,17 @@ class OperationContext;
class StageBuilder {
public:
/**
- * Turns 'solution' into an executable tree of PlanStage(s).
+ * Turns 'solution' into an executable tree of PlanStage(s). Returns a pointer to the root of
+ * the plan stage tree.
*
- * 'cq' must be the CanonicalQuery from which 'solution' is derived.
- *
- * Returns true if the PlanStage tree was built successfully. The root of the tree is in
- * *rootOut and the WorkingSet that the tree uses is in wsIn.
- *
- * Returns false otherwise. *rootOut and *wsOut are invalid.
+ * 'cq' must be the CanonicalQuery from which 'solution' is derived. Illegal to call if 'wsIn'
+ * is nullptr, or if 'solution.root' is nullptr.
*/
- static bool build(OperationContext* opCtx,
- const Collection* collection,
- const CanonicalQuery& cq,
- const QuerySolution& solution,
- WorkingSet* wsIn,
- PlanStage** rootOut);
+ static std::unique_ptr<PlanStage> build(OperationContext* opCtx,
+ const Collection* collection,
+ const CanonicalQuery& cq,
+ const QuerySolution& solution,
+ WorkingSet* wsIn);
};
} // namespace mongo
diff --git a/src/mongo/dbtests/plan_ranking.cpp b/src/mongo/dbtests/plan_ranking.cpp
index dddeb46e162..b4c9d67380e 100644
--- a/src/mongo/dbtests/plan_ranking.cpp
+++ b/src/mongo/dbtests/plan_ranking.cpp
@@ -130,10 +130,8 @@ public:
unique_ptr<WorkingSet> ws(new WorkingSet());
// Put each solution from the planner into the MPR.
for (size_t i = 0; i < solutions.size(); ++i) {
- PlanStage* root;
- ASSERT(StageBuilder::build(&_opCtx, collection, *cq, *solutions[i], ws.get(), &root));
- // Takes ownership of 'root'.
- _mps->addPlan(std::move(solutions[i]), root, ws.get());
+ auto root = StageBuilder::build(&_opCtx, collection, *cq, *solutions[i], ws.get());
+ _mps->addPlan(std::move(solutions[i]), std::move(root), ws.get());
}
// This is what sets a backup plan, should we test for it.
PlanYieldPolicy yieldPolicy(PlanExecutor::NO_YIELD,
diff --git a/src/mongo/dbtests/query_plan_executor.cpp b/src/mongo/dbtests/query_plan_executor.cpp
index 4d645862ffc..82f6251867a 100644
--- a/src/mongo/dbtests/query_plan_executor.cpp
+++ b/src/mongo/dbtests/query_plan_executor.cpp
@@ -151,8 +151,9 @@ public:
const Collection* coll = db->getCollection(&_opCtx, nss);
unique_ptr<WorkingSet> ws(new WorkingSet());
- IndexScan* ix = new IndexScan(&_opCtx, ixparams, ws.get(), nullptr);
- unique_ptr<PlanStage> root(new FetchStage(&_opCtx, ws.get(), ix, nullptr, coll));
+ auto ixscan = std::make_unique<IndexScan>(&_opCtx, ixparams, ws.get(), nullptr);
+ unique_ptr<PlanStage> root =
+ std::make_unique<FetchStage>(&_opCtx, ws.get(), std::move(ixscan), nullptr, coll);
auto qr = std::make_unique<QueryRequest>(nss);
auto statusWithCQ = CanonicalQuery::canonicalize(&_opCtx, std::move(qr));
diff --git a/src/mongo/dbtests/query_stage_and.cpp b/src/mongo/dbtests/query_stage_and.cpp
index d48063a2903..a4059b60115 100644
--- a/src/mongo/dbtests/query_stage_and.cpp
+++ b/src/mongo/dbtests/query_stage_and.cpp
@@ -205,12 +205,12 @@ public:
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 20);
params.direction = -1;
- ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
// Bar >= 10.
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll));
params.bounds.startKey = BSON("" << 10);
- ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
// 'ah' reads the first child into its hash table: foo=20, foo=19, ..., foo=0
// in that order. Read half of them.
@@ -292,13 +292,13 @@ public:
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 20);
params.direction = -1;
- ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
// Bar <= 19 (descending).
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll));
params.bounds.startKey = BSON("" << 19);
params.direction = -1;
- ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
// First call to work reads the first result from the children. The first result for the
// first scan over foo is {foo: 20, bar: 20, baz: 20}. The first result for the second scan
@@ -372,13 +372,13 @@ public:
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 20);
params.direction = -1;
- ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
// Bar >= 10
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll));
params.bounds.startKey = BSON("" << 10);
params.direction = -1;
- ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
// foo == bar == baz, and foo<=20, bar>=10, so our values are:
// foo == 10, 11, 12, 13, 14, 15. 16, 17, 18, 19, 20
@@ -421,13 +421,13 @@ public:
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1 << "big" << 1), coll));
params.bounds.startKey = BSON("" << 20 << "" << big);
params.direction = -1;
- ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
// Bar >= 10
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll));
params.bounds.startKey = BSON("" << 10);
params.direction = -1;
- ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
// Stage execution should fail.
ASSERT_EQUALS(-1, countResults(ah.get()));
@@ -468,13 +468,13 @@ public:
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 20);
params.direction = -1;
- ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
// Bar >= 10
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1 << "big" << 1), coll));
params.bounds.startKey = BSON("" << 10 << "" << big);
params.direction = -1;
- ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
// foo == bar == baz, and foo<=20, bar>=10, so our values are:
// foo == 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20.
@@ -510,18 +510,18 @@ public:
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 20);
params.direction = -1;
- ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
// Bar >= 10
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll));
params.bounds.startKey = BSON("" << 10);
- ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
// 5 <= baz <= 15
params = makeIndexScanParams(&_opCtx, getIndex(BSON("baz" << 1), coll));
params.bounds.startKey = BSON("" << 5);
params.bounds.endKey = BSON("" << 15);
- ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
// foo == bar == baz, and foo<=20, bar>=10, 5<=baz<=15, so our values are:
// foo == 10, 11, 12, 13, 14, 15.
@@ -568,18 +568,18 @@ public:
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 20);
params.direction = -1;
- ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
// Bar >= 10
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1 << "big" << 1), coll));
params.bounds.startKey = BSON("" << 10 << "" << big);
- ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
// 5 <= baz <= 15
params = makeIndexScanParams(&_opCtx, getIndex(BSON("baz" << 1), coll));
params.bounds.startKey = BSON("" << 5);
params.bounds.endKey = BSON("" << 15);
- ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
// Stage execution should fail.
ASSERT_EQUALS(-1, countResults(ah.get()));
@@ -613,13 +613,13 @@ public:
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 20);
params.direction = -1;
- ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
// Bar == 5. Index scan should be eof.
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll));
params.bounds.startKey = BSON("" << 5);
params.bounds.endKey = BSON("" << 5);
- ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
int count = 0;
int works = 0;
@@ -669,7 +669,7 @@ public:
// Foo >= 100
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 100);
- ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
// Bar <= 100
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll));
@@ -681,7 +681,7 @@ public:
<< "");
params.bounds.boundInclusion = BoundInclusion::kIncludeStartKeyOnly;
params.direction = -1;
- ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
ASSERT_EQUALS(0, countResults(ah.get()));
}
@@ -717,17 +717,18 @@ public:
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 20);
params.direction = -1;
- IndexScan* firstScan = new IndexScan(&_opCtx, params, &ws, nullptr);
+ auto firstScan = std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr);
// First child of the AND_HASH stage is a Fetch. The NULL in the
// constructor means there is no filter.
- FetchStage* fetch = new FetchStage(&_opCtx, &ws, firstScan, nullptr, coll);
- ah->addChild(fetch);
+ auto fetch =
+ std::make_unique<FetchStage>(&_opCtx, &ws, std::move(firstScan), nullptr, coll);
+ ah->addChild(std::move(fetch));
// Bar >= 10
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll));
params.bounds.startKey = BSON("" << 10);
- ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
// Check that the AndHash stage returns docs {foo: 10, bar: 10}
// through {foo: 20, bar: 20}.
@@ -769,17 +770,18 @@ public:
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 20);
params.direction = -1;
- ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
// Bar >= 10
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll));
params.bounds.startKey = BSON("" << 10);
- IndexScan* secondScan = new IndexScan(&_opCtx, params, &ws, nullptr);
+ auto secondScan = std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr);
// Second child of the AND_HASH stage is a Fetch. The NULL in the
// constructor means there is no filter.
- FetchStage* fetch = new FetchStage(&_opCtx, &ws, secondScan, nullptr, coll);
- ah->addChild(fetch);
+ auto fetch =
+ std::make_unique<FetchStage>(&_opCtx, &ws, std::move(secondScan), nullptr, coll);
+ ah->addChild(std::move(fetch));
// Check that the AndHash stage returns docs {foo: 10, bar: 10}
// through {foo: 20, bar: 20}.
@@ -827,8 +829,8 @@ public:
childStage2->pushBack(PlanStage::NEED_TIME);
childStage2->pushBack(PlanStage::FAILURE);
- andHashStage->addChild(childStage1.release());
- andHashStage->addChild(childStage2.release());
+ andHashStage->addChild(std::move(childStage1));
+ andHashStage->addChild(std::move(childStage2));
WorkingSetID id = WorkingSet::INVALID_ID;
PlanStage::StageState state = PlanStage::NEED_TIME;
@@ -868,8 +870,8 @@ public:
childStage2->pushBack(id);
}
- andHashStage->addChild(childStage1.release());
- andHashStage->addChild(childStage2.release());
+ andHashStage->addChild(std::move(childStage1));
+ andHashStage->addChild(std::move(childStage2));
WorkingSetID id = WorkingSet::INVALID_ID;
PlanStage::StageState state = PlanStage::NEED_TIME;
@@ -908,8 +910,8 @@ public:
}
childStage2->pushBack(PlanStage::FAILURE);
- andHashStage->addChild(childStage1.release());
- andHashStage->addChild(childStage2.release());
+ andHashStage->addChild(std::move(childStage1));
+ andHashStage->addChild(std::move(childStage2));
WorkingSetID id = WorkingSet::INVALID_ID;
PlanStage::StageState state = PlanStage::NEED_TIME;
@@ -955,13 +957,13 @@ public:
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 1);
params.bounds.endKey = BSON("" << 1);
- ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
// Scan over bar == 1.
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll));
params.bounds.startKey = BSON("" << 1);
params.bounds.endKey = BSON("" << 1);
- ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
// Get the set of RecordIds in our collection to use later.
set<RecordId> data;
@@ -1072,19 +1074,19 @@ public:
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 1);
params.bounds.endKey = BSON("" << 1);
- ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
// bar == 1
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll));
params.bounds.startKey = BSON("" << 1);
params.bounds.endKey = BSON("" << 1);
- ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
// baz == 1
params = makeIndexScanParams(&_opCtx, getIndex(BSON("baz" << 1), coll));
params.bounds.startKey = BSON("" << 1);
params.bounds.endKey = BSON("" << 1);
- ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
ASSERT_EQUALS(50, countResults(ah.get()));
}
@@ -1117,13 +1119,13 @@ public:
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 7);
params.bounds.endKey = BSON("" << 7);
- ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
// Bar == 20, not EOF.
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll));
params.bounds.startKey = BSON("" << 20);
params.bounds.endKey = BSON("" << 20);
- ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
ASSERT_EQUALS(0, countResults(ah.get()));
}
@@ -1160,13 +1162,13 @@ public:
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 7);
params.bounds.endKey = BSON("" << 7);
- ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
// bar == 20.
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll));
params.bounds.startKey = BSON("" << 20);
params.bounds.endKey = BSON("" << 20);
- ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
ASSERT_EQUALS(0, countResults(ah.get()));
}
@@ -1199,13 +1201,13 @@ public:
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 1);
params.bounds.endKey = BSON("" << 1);
- ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
// Intersect with 7 <= bar < 10000
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll));
params.bounds.startKey = BSON("" << 7);
params.bounds.endKey = BSON("" << 10000);
- ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
WorkingSetID lastId = WorkingSet::INVALID_ID;
@@ -1261,18 +1263,19 @@ public:
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 1);
params.bounds.endKey = BSON("" << 1);
- IndexScan* firstScan = new IndexScan(&_opCtx, params, &ws, nullptr);
+ auto firstScan = std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr);
// First child of the AND_SORTED stage is a Fetch. The NULL in the
// constructor means there is no filter.
- FetchStage* fetch = new FetchStage(&_opCtx, &ws, firstScan, nullptr, coll);
- as->addChild(fetch);
+ auto fetch =
+ std::make_unique<FetchStage>(&_opCtx, &ws, std::move(firstScan), nullptr, coll);
+ as->addChild(std::move(fetch));
// bar == 1
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll));
params.bounds.startKey = BSON("" << 1);
params.bounds.endKey = BSON("" << 1);
- as->addChild(new IndexScan(&_opCtx, params, &ws, nullptr));
+ as->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
for (int i = 0; i < 50; i++) {
BSONObj obj = getNext(as.get(), &ws);
@@ -1313,18 +1316,19 @@ public:
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 1);
params.bounds.endKey = BSON("" << 1);
- as->addChild(new IndexScan(&_opCtx, params, &ws, nullptr));
+ as->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
// bar == 1
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll));
params.bounds.startKey = BSON("" << 1);
params.bounds.endKey = BSON("" << 1);
- IndexScan* secondScan = new IndexScan(&_opCtx, params, &ws, nullptr);
+ auto secondScan = std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr);
// Second child of the AND_SORTED stage is a Fetch. The NULL in the
// constructor means there is no filter.
- FetchStage* fetch = new FetchStage(&_opCtx, &ws, secondScan, nullptr, coll);
- as->addChild(fetch);
+ auto fetch =
+ std::make_unique<FetchStage>(&_opCtx, &ws, std::move(secondScan), nullptr, coll);
+ as->addChild(std::move(fetch));
for (int i = 0; i < 50; i++) {
BSONObj obj = getNext(as.get(), &ws);
diff --git a/src/mongo/dbtests/query_stage_cached_plan.cpp b/src/mongo/dbtests/query_stage_cached_plan.cpp
index bfb99241e81..fd3b508fc37 100644
--- a/src/mongo/dbtests/query_stage_cached_plan.cpp
+++ b/src/mongo/dbtests/query_stage_cached_plan.cpp
@@ -158,7 +158,7 @@ public:
}
CachedPlanStage cachedPlanStage(
- &_opCtx, collection, &_ws, cq, plannerParams, decisionWorks, mockChild.release());
+ &_opCtx, collection, &_ws, cq, plannerParams, decisionWorks, std::move(mockChild));
// This should succeed after triggering a replan.
PlanYieldPolicy yieldPolicy(PlanExecutor::NO_YIELD,
@@ -205,7 +205,7 @@ TEST_F(QueryStageCachedPlan, QueryStageCachedPlanFailure) {
// High enough so that we shouldn't trigger a replan based on works.
const size_t decisionWorks = 50;
CachedPlanStage cachedPlanStage(
- &_opCtx, collection, &_ws, cq.get(), plannerParams, decisionWorks, mockChild.release());
+ &_opCtx, collection, &_ws, cq.get(), plannerParams, decisionWorks, std::move(mockChild));
// This should succeed after triggering a replan.
PlanYieldPolicy yieldPolicy(PlanExecutor::NO_YIELD,
@@ -255,7 +255,7 @@ TEST_F(QueryStageCachedPlan, QueryStageCachedPlanHitMaxWorks) {
}
CachedPlanStage cachedPlanStage(
- &_opCtx, collection, &_ws, cq.get(), plannerParams, decisionWorks, mockChild.release());
+ &_opCtx, collection, &_ws, cq.get(), plannerParams, decisionWorks, std::move(mockChild));
// This should succeed after triggering a replan.
PlanYieldPolicy yieldPolicy(PlanExecutor::NO_YIELD,
@@ -459,7 +459,7 @@ TEST_F(QueryStageCachedPlan, ThrowsOnYieldRecoveryWhenIndexIsDroppedBeforePlanSe
cq.get(),
plannerParams,
decisionWorks,
- new QueuedDataStage(&_opCtx, &_ws));
+ std::make_unique<QueuedDataStage>(&_opCtx, &_ws));
// Drop an index while the CachedPlanStage is in a saved state. Restoring should fail, since we
// may still need the dropped index for plan selection.
@@ -501,7 +501,7 @@ TEST_F(QueryStageCachedPlan, DoesNotThrowOnYieldRecoveryWhenIndexIsDroppedAferPl
cq.get(),
plannerParams,
decisionWorks,
- new QueuedDataStage(&_opCtx, &_ws));
+ std::make_unique<QueuedDataStage>(&_opCtx, &_ws));
PlanYieldPolicy yieldPolicy(PlanExecutor::YIELD_MANUAL,
_opCtx.getServiceContext()->getFastClockSource());
diff --git a/src/mongo/dbtests/query_stage_ensure_sorted.cpp b/src/mongo/dbtests/query_stage_ensure_sorted.cpp
index f5d9c8b4c50..a3a81a84660 100644
--- a/src/mongo/dbtests/query_stage_ensure_sorted.cpp
+++ b/src/mongo/dbtests/query_stage_ensure_sorted.cpp
@@ -86,8 +86,8 @@ public:
// Initialization.
BSONObj pattern = fromjson(patternStr);
auto sortKeyGen = std::make_unique<SortKeyGeneratorStage>(
- pExpCtx, queuedDataStage.release(), &ws, pattern);
- EnsureSortedStage ess(opCtx.get(), pattern, &ws, sortKeyGen.release());
+ pExpCtx, std::move(queuedDataStage), &ws, pattern);
+ EnsureSortedStage ess(opCtx.get(), pattern, &ws, std::move(sortKeyGen));
WorkingSetID id = WorkingSet::INVALID_ID;
PlanStage::StageState state = PlanStage::NEED_TIME;
@@ -125,9 +125,9 @@ TEST_F(QueryStageEnsureSortedTest, EnsureSortedEmptyWorkingSet) {
WorkingSet ws;
auto queuedDataStage = std::make_unique<QueuedDataStage>(opCtx.get(), &ws);
- auto sortKeyGen =
- std::make_unique<SortKeyGeneratorStage>(pExpCtx, queuedDataStage.release(), &ws, BSONObj());
- EnsureSortedStage ess(opCtx.get(), BSONObj(), &ws, sortKeyGen.release());
+ auto sortKeyGen = std::make_unique<SortKeyGeneratorStage>(
+ pExpCtx, std::move(queuedDataStage), &ws, BSONObj());
+ EnsureSortedStage ess(opCtx.get(), BSONObj(), &ws, std::move(sortKeyGen));
WorkingSetID id = WorkingSet::INVALID_ID;
PlanStage::StageState state = PlanStage::NEED_TIME;
diff --git a/src/mongo/dbtests/query_stage_fetch.cpp b/src/mongo/dbtests/query_stage_fetch.cpp
index a03c7a64e8e..8e6b0930e90 100644
--- a/src/mongo/dbtests/query_stage_fetch.cpp
+++ b/src/mongo/dbtests/query_stage_fetch.cpp
@@ -138,8 +138,8 @@ public:
mockStage->pushBack(id);
}
- unique_ptr<FetchStage> fetchStage(
- new FetchStage(&_opCtx, &ws, mockStage.release(), nullptr, coll));
+ auto fetchStage =
+ std::make_unique<FetchStage>(&_opCtx, &ws, std::move(mockStage), nullptr, coll);
WorkingSetID id = WorkingSet::INVALID_ID;
PlanStage::StageState state;
@@ -207,8 +207,8 @@ public:
unique_ptr<MatchExpression> filterExpr = std::move(statusWithMatcher.getValue());
// Matcher requires that foo==6 but we only have data with foo==5.
- unique_ptr<FetchStage> fetchStage(
- new FetchStage(&_opCtx, &ws, mockStage.release(), filterExpr.get(), coll));
+ auto fetchStage = std::make_unique<FetchStage>(
+ &_opCtx, &ws, std::move(mockStage), filterExpr.get(), coll);
// First call should return a fetch request as it's not in memory.
WorkingSetID id = WorkingSet::INVALID_ID;
diff --git a/src/mongo/dbtests/query_stage_limit_skip.cpp b/src/mongo/dbtests/query_stage_limit_skip.cpp
index 1ef07023e7c..69dd1d5b6da 100644
--- a/src/mongo/dbtests/query_stage_limit_skip.cpp
+++ b/src/mongo/dbtests/query_stage_limit_skip.cpp
@@ -56,7 +56,7 @@ using std::unique_ptr;
static const int N = 50;
/* Populate a QueuedDataStage and return it. Caller owns it. */
-QueuedDataStage* getMS(OperationContext* opCtx, WorkingSet* ws) {
+std::unique_ptr<QueuedDataStage> getMS(OperationContext* opCtx, WorkingSet* ws) {
auto ms = std::make_unique<QueuedDataStage>(opCtx, ws);
// Put N ADVANCED results into the mock stage, and some other stalling results (YIELD/TIME).
@@ -72,7 +72,7 @@ QueuedDataStage* getMS(OperationContext* opCtx, WorkingSet* ws) {
ms->pushBack(PlanStage::NEED_TIME);
}
- return ms.release();
+ return ms;
}
int countResults(PlanStage* stage) {
diff --git a/src/mongo/dbtests/query_stage_merge_sort.cpp b/src/mongo/dbtests/query_stage_merge_sort.cpp
index 445a6891ed9..14a0d2019d8 100644
--- a/src/mongo/dbtests/query_stage_merge_sort.cpp
+++ b/src/mongo/dbtests/query_stage_merge_sort.cpp
@@ -169,18 +169,18 @@ public:
// Sort by c:1
MergeSortStageParams msparams;
msparams.pattern = BSON("c" << 1);
- MergeSortStage* ms = new MergeSortStage(&_opCtx, msparams, ws.get());
+ auto ms = std::make_unique<MergeSortStage>(&_opCtx, msparams, ws.get());
// a:1
auto params = makeIndexScanParams(&_opCtx, getIndex(firstIndex, coll));
- ms->addChild(new IndexScan(&_opCtx, params, ws.get(), nullptr));
+ ms->addChild(std::make_unique<IndexScan>(&_opCtx, params, ws.get(), nullptr));
// b:1
params = makeIndexScanParams(&_opCtx, getIndex(secondIndex, coll));
- ms->addChild(new IndexScan(&_opCtx, params, ws.get(), nullptr));
+ ms->addChild(std::make_unique<IndexScan>(&_opCtx, params, ws.get(), nullptr));
unique_ptr<FetchStage> fetchStage =
- make_unique<FetchStage>(&_opCtx, ws.get(), ms, nullptr, coll);
+ make_unique<FetchStage>(&_opCtx, ws.get(), std::move(ms), nullptr, coll);
// Must fetch if we want to easily pull out an obj.
auto statusWithPlanExecutor = PlanExecutor::make(
&_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::NO_YIELD);
@@ -234,17 +234,17 @@ public:
// Sort by c:1
MergeSortStageParams msparams;
msparams.pattern = BSON("c" << 1);
- MergeSortStage* ms = new MergeSortStage(&_opCtx, msparams, ws.get());
+ auto ms = std::make_unique<MergeSortStage>(&_opCtx, msparams, ws.get());
// a:1
auto params = makeIndexScanParams(&_opCtx, getIndex(firstIndex, coll));
- ms->addChild(new IndexScan(&_opCtx, params, ws.get(), nullptr));
+ ms->addChild(std::make_unique<IndexScan>(&_opCtx, params, ws.get(), nullptr));
// b:1
params = makeIndexScanParams(&_opCtx, getIndex(secondIndex, coll));
- ms->addChild(new IndexScan(&_opCtx, params, ws.get(), nullptr));
+ ms->addChild(std::make_unique<IndexScan>(&_opCtx, params, ws.get(), nullptr));
unique_ptr<FetchStage> fetchStage =
- make_unique<FetchStage>(&_opCtx, ws.get(), ms, nullptr, coll);
+ make_unique<FetchStage>(&_opCtx, ws.get(), std::move(ms), nullptr, coll);
auto statusWithPlanExecutor = PlanExecutor::make(
&_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::NO_YIELD);
@@ -298,17 +298,17 @@ public:
MergeSortStageParams msparams;
msparams.dedup = false;
msparams.pattern = BSON("c" << 1);
- MergeSortStage* ms = new MergeSortStage(&_opCtx, msparams, ws.get());
+ auto ms = std::make_unique<MergeSortStage>(&_opCtx, msparams, ws.get());
// a:1
auto params = makeIndexScanParams(&_opCtx, getIndex(firstIndex, coll));
- ms->addChild(new IndexScan(&_opCtx, params, ws.get(), nullptr));
+ ms->addChild(std::make_unique<IndexScan>(&_opCtx, params, ws.get(), nullptr));
// b:1
params = makeIndexScanParams(&_opCtx, getIndex(secondIndex, coll));
- ms->addChild(new IndexScan(&_opCtx, params, ws.get(), nullptr));
+ ms->addChild(std::make_unique<IndexScan>(&_opCtx, params, ws.get(), nullptr));
unique_ptr<FetchStage> fetchStage =
- make_unique<FetchStage>(&_opCtx, ws.get(), ms, nullptr, coll);
+ make_unique<FetchStage>(&_opCtx, ws.get(), std::move(ms), nullptr, coll);
auto statusWithPlanExecutor = PlanExecutor::make(
&_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::NO_YIELD);
@@ -364,21 +364,21 @@ public:
// Sort by c:-1
MergeSortStageParams msparams;
msparams.pattern = BSON("c" << -1);
- MergeSortStage* ms = new MergeSortStage(&_opCtx, msparams, ws.get());
+ auto ms = std::make_unique<MergeSortStage>(&_opCtx, msparams, ws.get());
// a:1
auto params = makeIndexScanParams(&_opCtx, getIndex(firstIndex, coll));
params.bounds.startKey = objWithMaxKey(1);
params.bounds.endKey = objWithMinKey(1);
- ms->addChild(new IndexScan(&_opCtx, params, ws.get(), nullptr));
+ ms->addChild(std::make_unique<IndexScan>(&_opCtx, params, ws.get(), nullptr));
// b:1
params = makeIndexScanParams(&_opCtx, getIndex(secondIndex, coll));
params.bounds.startKey = objWithMaxKey(1);
params.bounds.endKey = objWithMinKey(1);
- ms->addChild(new IndexScan(&_opCtx, params, ws.get(), nullptr));
+ ms->addChild(std::make_unique<IndexScan>(&_opCtx, params, ws.get(), nullptr));
unique_ptr<FetchStage> fetchStage =
- make_unique<FetchStage>(&_opCtx, ws.get(), ms, nullptr, coll);
+ make_unique<FetchStage>(&_opCtx, ws.get(), std::move(ms), nullptr, coll);
auto statusWithPlanExecutor = PlanExecutor::make(
&_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::NO_YIELD);
@@ -432,19 +432,19 @@ public:
// Sort by c:1
MergeSortStageParams msparams;
msparams.pattern = BSON("c" << 1);
- MergeSortStage* ms = new MergeSortStage(&_opCtx, msparams, ws.get());
+ auto ms = std::make_unique<MergeSortStage>(&_opCtx, msparams, ws.get());
// a:1
auto params = makeIndexScanParams(&_opCtx, getIndex(firstIndex, coll));
- ms->addChild(new IndexScan(&_opCtx, params, ws.get(), nullptr));
+ ms->addChild(std::make_unique<IndexScan>(&_opCtx, params, ws.get(), nullptr));
// b:51 (EOF)
params = makeIndexScanParams(&_opCtx, getIndex(secondIndex, coll));
params.bounds.startKey = BSON("" << 51 << "" << MinKey);
params.bounds.endKey = BSON("" << 51 << "" << MaxKey);
- ms->addChild(new IndexScan(&_opCtx, params, ws.get(), nullptr));
+ ms->addChild(std::make_unique<IndexScan>(&_opCtx, params, ws.get(), nullptr));
unique_ptr<FetchStage> fetchStage =
- make_unique<FetchStage>(&_opCtx, ws.get(), ms, nullptr, coll);
+ make_unique<FetchStage>(&_opCtx, ws.get(), std::move(ms), nullptr, coll);
auto statusWithPlanExecutor = PlanExecutor::make(
&_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::NO_YIELD);
@@ -482,7 +482,7 @@ public:
// Sort by foo:1
MergeSortStageParams msparams;
msparams.pattern = BSON("foo" << 1);
- MergeSortStage* ms = new MergeSortStage(&_opCtx, msparams, ws.get());
+ auto ms = std::make_unique<MergeSortStage>(&_opCtx, msparams, ws.get());
int numIndices = 20;
for (int i = 0; i < numIndices; ++i) {
@@ -493,10 +493,10 @@ public:
BSONObj indexSpec = BSON(index << 1 << "foo" << 1);
addIndex(indexSpec);
auto params = makeIndexScanParams(&_opCtx, getIndex(indexSpec, coll));
- ms->addChild(new IndexScan(&_opCtx, params, ws.get(), nullptr));
+ ms->addChild(std::make_unique<IndexScan>(&_opCtx, params, ws.get(), nullptr));
}
unique_ptr<FetchStage> fetchStage =
- make_unique<FetchStage>(&_opCtx, ws.get(), ms, nullptr, coll);
+ make_unique<FetchStage>(&_opCtx, ws.get(), std::move(ms), nullptr, coll);
auto statusWithPlanExecutor = PlanExecutor::make(
&_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::NO_YIELD);
@@ -547,7 +547,7 @@ public:
BSONObj indexSpec = BSON(index << 1 << "foo" << 1);
addIndex(indexSpec);
auto params = makeIndexScanParams(&_opCtx, getIndex(indexSpec, coll));
- ms->addChild(new IndexScan(&_opCtx, params, &ws, nullptr));
+ ms->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
}
set<RecordId> recordIds;
@@ -670,8 +670,12 @@ public:
params.bounds.startKey = BSON("" << 5);
params.bounds.endKey = BSON("" << 10);
auto fetchStage = std::make_unique<FetchStage>(
- &_opCtx, &ws, new IndexScan(&_opCtx, params, &ws, nullptr), nullptr, coll);
- ms->addChild(fetchStage.release());
+ &_opCtx,
+ &ws,
+ std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr),
+ nullptr,
+ coll);
+ ms->addChild(std::move(fetchStage));
}
// Second child scans [4, 10].
@@ -680,8 +684,12 @@ public:
params.bounds.startKey = BSON("" << 4);
params.bounds.endKey = BSON("" << 10);
auto fetchStage = std::make_unique<FetchStage>(
- &_opCtx, &ws, new IndexScan(&_opCtx, params, &ws, nullptr), nullptr, coll);
- ms->addChild(fetchStage.release());
+ &_opCtx,
+ &ws,
+ std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr),
+ nullptr,
+ coll);
+ ms->addChild(std::move(fetchStage));
}
// First doc should be {a: 4}.
@@ -761,18 +769,17 @@ public:
MergeSortStageParams msparams;
msparams.pattern = BSON("c" << 1 << "d" << 1);
msparams.collator = nullptr;
- MergeSortStage* ms = new MergeSortStage(&_opCtx, msparams, ws.get());
+ auto ms = std::make_unique<MergeSortStage>(&_opCtx, msparams, ws.get());
// a:1
auto params = makeIndexScanParams(&_opCtx, getIndex(firstIndex, coll));
- ms->addChild(new IndexScan(&_opCtx, params, ws.get(), nullptr));
+ ms->addChild(std::make_unique<IndexScan>(&_opCtx, params, ws.get(), nullptr));
// b:1
params = makeIndexScanParams(&_opCtx, getIndex(secondIndex, coll));
- ms->addChild(new IndexScan(&_opCtx, params, ws.get(), nullptr));
+ ms->addChild(std::make_unique<IndexScan>(&_opCtx, params, ws.get(), nullptr));
- unique_ptr<FetchStage> fetchStage =
- make_unique<FetchStage>(&_opCtx, ws.get(), ms, nullptr, coll);
+ auto fetchStage = make_unique<FetchStage>(&_opCtx, ws.get(), std::move(ms), nullptr, coll);
// Must fetch if we want to easily pull out an obj.
auto statusWithPlanExecutor = PlanExecutor::make(
&_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::NO_YIELD);
@@ -829,18 +836,18 @@ public:
msparams.pattern = BSON("c" << 1 << "d" << 1);
CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
msparams.collator = &collator;
- MergeSortStage* ms = new MergeSortStage(&_opCtx, msparams, ws.get());
+ auto ms = std::make_unique<MergeSortStage>(&_opCtx, msparams, ws.get());
// a:1
auto params = makeIndexScanParams(&_opCtx, getIndex(firstIndex, coll));
- ms->addChild(new IndexScan(&_opCtx, params, ws.get(), nullptr));
+ ms->addChild(std::make_unique<IndexScan>(&_opCtx, params, ws.get(), nullptr));
// b:1
params = makeIndexScanParams(&_opCtx, getIndex(secondIndex, coll));
- ms->addChild(new IndexScan(&_opCtx, params, ws.get(), nullptr));
+ ms->addChild(std::make_unique<IndexScan>(&_opCtx, params, ws.get(), nullptr));
unique_ptr<FetchStage> fetchStage =
- make_unique<FetchStage>(&_opCtx, ws.get(), ms, nullptr, coll);
+ make_unique<FetchStage>(&_opCtx, ws.get(), std::move(ms), nullptr, coll);
// Must fetch if we want to easily pull out an obj.
auto statusWithPlanExecutor = PlanExecutor::make(
&_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::NO_YIELD);
diff --git a/src/mongo/dbtests/query_stage_multiplan.cpp b/src/mongo/dbtests/query_stage_multiplan.cpp
index 20d82757c3e..c08ca0abad0 100644
--- a/src/mongo/dbtests/query_stage_multiplan.cpp
+++ b/src/mongo/dbtests/query_stage_multiplan.cpp
@@ -148,10 +148,8 @@ unique_ptr<PlanStage> getIxScanPlan(OperationContext* opCtx,
ixparams.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
ixparams.direction = 1;
- IndexScan* ix = new IndexScan(opCtx, ixparams, sharedWs, nullptr);
- unique_ptr<PlanStage> root(new FetchStage(opCtx, sharedWs, ix, nullptr, coll));
-
- return root;
+ auto ixscan = std::make_unique<IndexScan>(opCtx, ixparams, sharedWs, nullptr);
+ return std::make_unique<FetchStage>(opCtx, sharedWs, std::move(ixscan), nullptr, coll);
}
unique_ptr<MatchExpression> makeMatchExpressionFromFilter(OperationContext* opCtx,
@@ -197,8 +195,8 @@ std::unique_ptr<MultiPlanStage> runMultiPlanner(OperationContext* opCtx,
auto cq = makeCanonicalQuery(opCtx, nss, BSON("foo" << desiredFooValue));
unique_ptr<MultiPlanStage> mps = std::make_unique<MultiPlanStage>(opCtx, coll, cq.get());
- mps->addPlan(createQuerySolution(), ixScanRoot.release(), sharedWs.get());
- mps->addPlan(createQuerySolution(), collScanRoot.release(), sharedWs.get());
+ mps->addPlan(createQuerySolution(), std::move(ixScanRoot), sharedWs.get());
+ mps->addPlan(createQuerySolution(), std::move(collScanRoot), sharedWs.get());
// Plan 0 aka the first plan aka the index scan should be the best.
PlanYieldPolicy yieldPolicy(PlanExecutor::NO_YIELD,
@@ -245,8 +243,8 @@ TEST_F(QueryStageMultiPlanTest, MPSCollectionScanVsHighlySelectiveIXScan) {
unique_ptr<MultiPlanStage> mps =
std::make_unique<MultiPlanStage>(_opCtx.get(), ctx.getCollection(), cq.get());
- mps->addPlan(createQuerySolution(), ixScanRoot.release(), sharedWs.get());
- mps->addPlan(createQuerySolution(), collScanRoot.release(), sharedWs.get());
+ mps->addPlan(createQuerySolution(), std::move(ixScanRoot), sharedWs.get());
+ mps->addPlan(createQuerySolution(), std::move(collScanRoot), sharedWs.get());
// Plan 0 aka the first plan aka the index scan should be the best.
PlanYieldPolicy yieldPolicy(PlanExecutor::NO_YIELD, _clock);
@@ -403,10 +401,8 @@ TEST_F(QueryStageMultiPlanTest, MPSBackupPlan) {
unique_ptr<WorkingSet> ws(new WorkingSet());
// Put each solution from the planner into the MPR.
for (size_t i = 0; i < solutions.size(); ++i) {
- PlanStage* root;
- ASSERT(StageBuilder::build(_opCtx.get(), collection, *cq, *solutions[i], ws.get(), &root));
- // Takes ownership of 'root'.
- mps->addPlan(std::move(solutions[i]), root, ws.get());
+ auto root = StageBuilder::build(_opCtx.get(), collection, *cq, *solutions[i], ws.get());
+ mps->addPlan(std::move(solutions[i]), std::move(root), ws.get());
}
// This sets a backup plan.
@@ -492,8 +488,8 @@ TEST_F(QueryStageMultiPlanTest, MPSExplainAllPlans) {
std::make_unique<MultiPlanStage>(_opCtx.get(), ctx.getCollection(), cq.get());
// Put each plan into the MultiPlanStage. Takes ownership of 'firstPlan' and 'secondPlan'.
- mps->addPlan(std::make_unique<QuerySolution>(), firstPlan.release(), ws.get());
- mps->addPlan(std::make_unique<QuerySolution>(), secondPlan.release(), ws.get());
+ mps->addPlan(std::make_unique<QuerySolution>(), std::move(firstPlan), ws.get());
+ mps->addPlan(std::make_unique<QuerySolution>(), std::move(secondPlan), ws.get());
// Making a PlanExecutor chooses the best plan.
auto exec = uassertStatusOK(PlanExecutor::make(
@@ -595,8 +591,8 @@ TEST_F(QueryStageMultiPlanTest, ShouldReportErrorIfExceedsTimeLimitDuringPlannin
ctx.getCollection(),
canonicalQuery.get(),
MultiPlanStage::CachingMode::NeverCache);
- multiPlanStage.addPlan(createQuerySolution(), ixScanRoot.release(), sharedWs.get());
- multiPlanStage.addPlan(createQuerySolution(), collScanRoot.release(), sharedWs.get());
+ multiPlanStage.addPlan(createQuerySolution(), std::move(ixScanRoot), sharedWs.get());
+ multiPlanStage.addPlan(createQuerySolution(), std::move(collScanRoot), sharedWs.get());
AlwaysTimeOutYieldPolicy alwaysTimeOutPolicy(serviceContext()->getFastClockSource());
ASSERT_EQ(ErrorCodes::ExceededTimeLimit, multiPlanStage.pickBestPlan(&alwaysTimeOutPolicy));
@@ -635,8 +631,8 @@ TEST_F(QueryStageMultiPlanTest, ShouldReportErrorIfKilledDuringPlanning) {
ctx.getCollection(),
canonicalQuery.get(),
MultiPlanStage::CachingMode::NeverCache);
- multiPlanStage.addPlan(createQuerySolution(), ixScanRoot.release(), sharedWs.get());
- multiPlanStage.addPlan(createQuerySolution(), collScanRoot.release(), sharedWs.get());
+ multiPlanStage.addPlan(createQuerySolution(), std::move(ixScanRoot), sharedWs.get());
+ multiPlanStage.addPlan(createQuerySolution(), std::move(collScanRoot), sharedWs.get());
AlwaysPlanKilledYieldPolicy alwaysPlanKilledYieldPolicy(serviceContext()->getFastClockSource());
ASSERT_EQ(ErrorCodes::QueryPlanKilled,
diff --git a/src/mongo/dbtests/query_stage_sort.cpp b/src/mongo/dbtests/query_stage_sort.cpp
index dcf7687ded7..27561076276 100644
--- a/src/mongo/dbtests/query_stage_sort.cpp
+++ b/src/mongo/dbtests/query_stage_sort.cpp
@@ -121,9 +121,9 @@ public:
params.limit = limit();
auto keyGenStage = std::make_unique<SortKeyGeneratorStage>(
- _pExpCtx, queuedDataStage.release(), ws.get(), params.pattern);
+ _pExpCtx, std::move(queuedDataStage), ws.get(), params.pattern);
- auto ss = std::make_unique<SortStage>(&_opCtx, params, ws.get(), keyGenStage.release());
+ auto ss = std::make_unique<SortStage>(&_opCtx, params, ws.get(), std::move(keyGenStage));
// The PlanExecutor will be automatically registered on construction due to the auto
// yield policy, so it can receive invalidations when we remove documents later.
@@ -159,13 +159,13 @@ public:
params.limit = limit();
auto keyGenStage = std::make_unique<SortKeyGeneratorStage>(
- _pExpCtx, queuedDataStage.release(), ws.get(), params.pattern);
+ _pExpCtx, std::move(queuedDataStage), ws.get(), params.pattern);
auto sortStage =
- std::make_unique<SortStage>(&_opCtx, params, ws.get(), keyGenStage.release());
+ std::make_unique<SortStage>(&_opCtx, params, ws.get(), std::move(keyGenStage));
auto fetchStage =
- std::make_unique<FetchStage>(&_opCtx, ws.get(), sortStage.release(), nullptr, coll);
+ std::make_unique<FetchStage>(&_opCtx, ws.get(), std::move(sortStage), nullptr, coll);
// Must fetch so we can look at the doc as a BSONObj.
auto statusWithPlanExecutor = PlanExecutor::make(
@@ -561,13 +561,13 @@ public:
params.pattern = BSON("b" << -1 << "c" << 1 << "a" << 1);
auto keyGenStage = std::make_unique<SortKeyGeneratorStage>(
- _pExpCtx, queuedDataStage.release(), ws.get(), params.pattern);
+ _pExpCtx, std::move(queuedDataStage), ws.get(), params.pattern);
auto sortStage =
- std::make_unique<SortStage>(&_opCtx, params, ws.get(), keyGenStage.release());
+ std::make_unique<SortStage>(&_opCtx, params, ws.get(), std::move(keyGenStage));
auto fetchStage =
- std::make_unique<FetchStage>(&_opCtx, ws.get(), sortStage.release(), nullptr, coll);
+ std::make_unique<FetchStage>(&_opCtx, ws.get(), std::move(sortStage), nullptr, coll);
// We don't get results back since we're sorting some parallel arrays.
auto statusWithPlanExecutor = PlanExecutor::make(
diff --git a/src/mongo/dbtests/query_stage_sort_key_generator.cpp b/src/mongo/dbtests/query_stage_sort_key_generator.cpp
index 18185250ff9..51fd476078c 100644
--- a/src/mongo/dbtests/query_stage_sort_key_generator.cpp
+++ b/src/mongo/dbtests/query_stage_sort_key_generator.cpp
@@ -78,7 +78,7 @@ BSONObj extractSortKey(const char* sortSpec, const char* doc, const CollatorInte
BSONObj sortPattern = fromjson(sortSpec);
SortKeyGeneratorStage sortKeyGen{
- pExpCtx, mockStage.release(), &workingSet, std::move(sortPattern)};
+ pExpCtx, std::move(mockStage), &workingSet, std::move(sortPattern)};
return extractKeyFromKeyGenStage(&sortKeyGen, &workingSet);
}
@@ -107,7 +107,7 @@ BSONObj extractSortKeyCovered(const char* sortSpec,
BSONObj sortPattern = fromjson(sortSpec);
SortKeyGeneratorStage sortKeyGen{
- pExpCtx, mockStage.release(), &workingSet, std::move(sortPattern)};
+ pExpCtx, std::move(mockStage), &workingSet, std::move(sortPattern)};
return extractKeyFromKeyGenStage(&sortKeyGen, &workingSet);
}