summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/mongo/db/commands/index_filter_commands_test.cpp3
-rw-r--r--src/mongo/db/commands/plan_cache_commands_test.cpp3
-rw-r--r--src/mongo/db/exec/2dnear.cpp7
-rw-r--r--src/mongo/db/exec/2dnear.h2
-rw-r--r--src/mongo/db/exec/and_hash.cpp12
-rw-r--r--src/mongo/db/exec/and_hash.h2
-rw-r--r--src/mongo/db/exec/and_sorted.cpp13
-rw-r--r--src/mongo/db/exec/and_sorted.h2
-rw-r--r--src/mongo/db/exec/cached_plan.cpp6
-rw-r--r--src/mongo/db/exec/cached_plan.h2
-rw-r--r--src/mongo/db/exec/collection_scan.cpp14
-rw-r--r--src/mongo/db/exec/collection_scan.h3
-rw-r--r--src/mongo/db/exec/count.cpp6
-rw-r--r--src/mongo/db/exec/count.h4
-rw-r--r--src/mongo/db/exec/distinct_scan.cpp6
-rw-r--r--src/mongo/db/exec/distinct_scan.h2
-rw-r--r--src/mongo/db/exec/fetch.cpp23
-rw-r--r--src/mongo/db/exec/fetch.h2
-rw-r--r--src/mongo/db/exec/index_scan.cpp31
-rw-r--r--src/mongo/db/exec/index_scan.h2
-rw-r--r--src/mongo/db/exec/keep_mutations.cpp6
-rw-r--r--src/mongo/db/exec/keep_mutations.h2
-rw-r--r--src/mongo/db/exec/limit.cpp7
-rw-r--r--src/mongo/db/exec/limit.h3
-rw-r--r--src/mongo/db/exec/merge_sort.cpp8
-rw-r--r--src/mongo/db/exec/merge_sort.h2
-rw-r--r--src/mongo/db/exec/multi_plan.cpp96
-rw-r--r--src/mongo/db/exec/multi_plan.h36
-rw-r--r--src/mongo/db/exec/or.cpp12
-rw-r--r--src/mongo/db/exec/or.h2
-rw-r--r--src/mongo/db/exec/plan_stats.h67
-rw-r--r--src/mongo/db/exec/projection.cpp6
-rw-r--r--src/mongo/db/exec/projection.h3
-rw-r--r--src/mongo/db/exec/s2near.cpp9
-rw-r--r--src/mongo/db/exec/s2near.h2
-rw-r--r--src/mongo/db/exec/shard_filter.cpp5
-rw-r--r--src/mongo/db/exec/shard_filter.h2
-rw-r--r--src/mongo/db/exec/skip.cpp7
-rw-r--r--src/mongo/db/exec/skip.h3
-rw-r--r--src/mongo/db/exec/sort.cpp6
-rw-r--r--src/mongo/db/exec/sort.h2
-rw-r--r--src/mongo/db/exec/text.cpp15
-rw-r--r--src/mongo/db/exec/text.h2
-rw-r--r--src/mongo/db/matcher/expression.cpp8
-rw-r--r--src/mongo/db/matcher/expression.h6
-rw-r--r--src/mongo/db/matcher/expression_array.cpp40
-rw-r--r--src/mongo/db/matcher/expression_array.h8
-rw-r--r--src/mongo/db/matcher/expression_geo.cpp8
-rw-r--r--src/mongo/db/matcher/expression_geo.h4
-rw-r--r--src/mongo/db/matcher/expression_leaf.cpp49
-rw-r--r--src/mongo/db/matcher/expression_leaf.h14
-rw-r--r--src/mongo/db/matcher/expression_text.cpp4
-rw-r--r--src/mongo/db/matcher/expression_text.h2
-rw-r--r--src/mongo/db/matcher/expression_tree.cpp33
-rw-r--r--src/mongo/db/matcher/expression_tree.h10
-rw-r--r--src/mongo/db/matcher/expression_where.cpp6
-rw-r--r--src/mongo/db/matcher/expression_where_noop.cpp6
-rw-r--r--src/mongo/db/query/SConscript1
-rw-r--r--src/mongo/db/query/explain.cpp356
-rw-r--r--src/mongo/db/query/explain.h155
-rw-r--r--src/mongo/db/query/get_runner.cpp3
-rw-r--r--src/mongo/db/query/index_bounds.cpp21
-rw-r--r--src/mongo/db/query/index_bounds.h19
-rw-r--r--src/mongo/db/query/new_find.cpp51
-rw-r--r--src/mongo/db/query/plan_cache_test.cpp3
-rw-r--r--src/mongo/db/query/query_planner.cpp3
-rw-r--r--src/mongo/db/query/query_planner.h3
67 files changed, 1211 insertions, 50 deletions
diff --git a/src/mongo/db/commands/index_filter_commands_test.cpp b/src/mongo/db/commands/index_filter_commands_test.cpp
index d80d51d7d82..c3a5fc1d6a5 100644
--- a/src/mongo/db/commands/index_filter_commands_test.cpp
+++ b/src/mongo/db/commands/index_filter_commands_test.cpp
@@ -94,7 +94,8 @@ namespace {
PlanRankingDecision* createDecision(size_t numPlans) {
auto_ptr<PlanRankingDecision> why(new PlanRankingDecision());
for (size_t i = 0; i < numPlans; ++i) {
- auto_ptr<PlanStageStats> stats(new PlanStageStats(CommonStats(), STAGE_COLLSCAN));
+ CommonStats common("COLLSCAN");
+ auto_ptr<PlanStageStats> stats(new PlanStageStats(common, STAGE_COLLSCAN));
stats->specific.reset(new CollectionScanStats());
why->stats.mutableVector().push_back(stats.release());
why->scores.push_back(0U);
diff --git a/src/mongo/db/commands/plan_cache_commands_test.cpp b/src/mongo/db/commands/plan_cache_commands_test.cpp
index 45088250fd9..5a6775cdb74 100644
--- a/src/mongo/db/commands/plan_cache_commands_test.cpp
+++ b/src/mongo/db/commands/plan_cache_commands_test.cpp
@@ -104,7 +104,8 @@ namespace {
PlanRankingDecision* createDecision(size_t numPlans) {
auto_ptr<PlanRankingDecision> why(new PlanRankingDecision());
for (size_t i = 0; i < numPlans; ++i) {
- auto_ptr<PlanStageStats> stats(new PlanStageStats(CommonStats(), STAGE_COLLSCAN));
+ CommonStats common("COLLSCAN");
+ auto_ptr<PlanStageStats> stats(new PlanStageStats(common, STAGE_COLLSCAN));
stats->specific.reset(new CollectionScanStats());
why->stats.mutableVector().push_back(stats.release());
why->scores.push_back(0U);
diff --git a/src/mongo/db/exec/2dnear.cpp b/src/mongo/db/exec/2dnear.cpp
index dda74d064e3..236d062858f 100644
--- a/src/mongo/db/exec/2dnear.cpp
+++ b/src/mongo/db/exec/2dnear.cpp
@@ -37,7 +37,11 @@
namespace mongo {
- TwoDNear::TwoDNear(const TwoDNearParams& params, WorkingSet* ws) {
+ // static
+ const char* TwoDNear::kStageType = "GEO_NEAR_2D";
+
+ TwoDNear::TwoDNear(const TwoDNearParams& params, WorkingSet* ws)
+ : _commonStats(kStageType) {
_params = params;
_workingSet = ws;
_initted = false;
@@ -149,6 +153,7 @@ namespace mongo {
PlanStageStats* TwoDNear::getStats() {
_commonStats.isEOF = isEOF();
+ _specificStats.keyPattern = _params.indexKeyPattern;
auto_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_GEO_NEAR_2D));
ret->specific.reset(new TwoDNearStats(_specificStats));
return ret.release();
diff --git a/src/mongo/db/exec/2dnear.h b/src/mongo/db/exec/2dnear.h
index 65d07908a73..9ac7eae3a7a 100644
--- a/src/mongo/db/exec/2dnear.h
+++ b/src/mongo/db/exec/2dnear.h
@@ -76,6 +76,8 @@ namespace mongo {
virtual PlanStageStats* getStats();
+ static const char* kStageType;
+
private:
WorkingSet* _workingSet;
diff --git a/src/mongo/db/exec/and_hash.cpp b/src/mongo/db/exec/and_hash.cpp
index 2185b12e3c5..7569ff36703 100644
--- a/src/mongo/db/exec/and_hash.cpp
+++ b/src/mongo/db/exec/and_hash.cpp
@@ -48,6 +48,9 @@ namespace mongo {
const size_t AndHashStage::kLookAheadWorks = 10;
+ // static
+ const char* AndHashStage::kStageType = "AND_HASH";
+
AndHashStage::AndHashStage(WorkingSet* ws,
const MatchExpression* filter,
const Collection* collection)
@@ -56,6 +59,7 @@ namespace mongo {
_filter(filter),
_hashingChildren(true),
_currentChild(0),
+ _commonStats(kStageType),
_memUsage(0),
_maxMemUsage(kDefaultMaxMemUsageBytes) {}
@@ -68,6 +72,7 @@ namespace mongo {
_filter(filter),
_hashingChildren(true),
_currentChild(0),
+ _commonStats(kStageType),
_memUsage(0),
_maxMemUsage(maxMemUsage) {}
@@ -501,6 +506,13 @@ namespace mongo {
_specificStats.memLimit = _maxMemUsage;
_specificStats.memUsage = _memUsage;
+ // Add a BSON representation of the filter to the stats tree, if there is one.
+ if (NULL != _filter) {
+ BSONObjBuilder bob;
+ _filter->toBSON(&bob);
+ _commonStats.filter = bob.obj();
+ }
+
auto_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_AND_HASH));
ret->specific.reset(new AndHashStats(_specificStats));
for (size_t i = 0; i < _children.size(); ++i) {
diff --git a/src/mongo/db/exec/and_hash.h b/src/mongo/db/exec/and_hash.h
index 957bf21785c..d72edc19fa4 100644
--- a/src/mongo/db/exec/and_hash.h
+++ b/src/mongo/db/exec/and_hash.h
@@ -81,6 +81,8 @@ namespace mongo {
virtual PlanStageStats* getStats();
+ static const char* kStageType;
+
private:
static const size_t kLookAheadWorks;
diff --git a/src/mongo/db/exec/and_sorted.cpp b/src/mongo/db/exec/and_sorted.cpp
index 9ba1bc52584..71979872a4a 100644
--- a/src/mongo/db/exec/and_sorted.cpp
+++ b/src/mongo/db/exec/and_sorted.cpp
@@ -35,6 +35,9 @@
namespace mongo {
+ // static
+ const char* AndSortedStage::kStageType = "AND_SORTED";
+
AndSortedStage::AndSortedStage(WorkingSet* ws,
const MatchExpression* filter,
const Collection* collection)
@@ -42,7 +45,8 @@ namespace mongo {
_ws(ws),
_filter(filter),
_targetNode(numeric_limits<size_t>::max()),
- _targetId(WorkingSet::INVALID_ID), _isEOF(false) { }
+ _targetId(WorkingSet::INVALID_ID), _isEOF(false),
+ _commonStats(kStageType) { }
AndSortedStage::~AndSortedStage() {
for (size_t i = 0; i < _children.size(); ++i) { delete _children[i]; }
@@ -296,6 +300,13 @@ namespace mongo {
PlanStageStats* AndSortedStage::getStats() {
_commonStats.isEOF = isEOF();
+ // Add a BSON representation of the filter to the stats tree, if there is one.
+ if (NULL != _filter) {
+ BSONObjBuilder bob;
+ _filter->toBSON(&bob);
+ _commonStats.filter = bob.obj();
+ }
+
auto_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_AND_SORTED));
ret->specific.reset(new AndSortedStats(_specificStats));
for (size_t i = 0; i < _children.size(); ++i) {
diff --git a/src/mongo/db/exec/and_sorted.h b/src/mongo/db/exec/and_sorted.h
index 667638b684e..afcf4de5c49 100644
--- a/src/mongo/db/exec/and_sorted.h
+++ b/src/mongo/db/exec/and_sorted.h
@@ -67,6 +67,8 @@ namespace mongo {
virtual PlanStageStats* getStats();
+ static const char* kStageType;
+
private:
// Find a node to AND against.
PlanStage::StageState getTargetLoc(WorkingSetID* out);
diff --git a/src/mongo/db/exec/cached_plan.cpp b/src/mongo/db/exec/cached_plan.cpp
index a18b2132890..874e52fd526 100644
--- a/src/mongo/db/exec/cached_plan.cpp
+++ b/src/mongo/db/exec/cached_plan.cpp
@@ -40,6 +40,9 @@
namespace mongo {
+ // static
+ const char* CachedPlanStage::kStageType = "CACHED_PLAN";
+
CachedPlanStage::CachedPlanStage(const Collection* collection,
CanonicalQuery* cq,
PlanStage* mainChild,
@@ -50,7 +53,8 @@ namespace mongo {
_backupChildPlan(backupChild),
_usingBackupChild(false),
_alreadyProduced(false),
- _updatedCache(false) { }
+ _updatedCache(false),
+ _commonStats(kStageType) {}
CachedPlanStage::~CachedPlanStage() {
// We may have produced all necessary results without hitting EOF.
diff --git a/src/mongo/db/exec/cached_plan.h b/src/mongo/db/exec/cached_plan.h
index 7bdd16f4f6a..37962fbd456 100644
--- a/src/mongo/db/exec/cached_plan.h
+++ b/src/mongo/db/exec/cached_plan.h
@@ -62,6 +62,8 @@ namespace mongo {
virtual PlanStageStats* getStats();
+ static const char* kStageType;
+
private:
PlanStage* getActiveChild() const;
void updateCache();
diff --git a/src/mongo/db/exec/collection_scan.cpp b/src/mongo/db/exec/collection_scan.cpp
index e1ed35481e3..47541cf7239 100644
--- a/src/mongo/db/exec/collection_scan.cpp
+++ b/src/mongo/db/exec/collection_scan.cpp
@@ -39,13 +39,17 @@
namespace mongo {
+ // static
+ const char* CollectionScan::kStageType = "COLLSCAN";
+
CollectionScan::CollectionScan(const CollectionScanParams& params,
WorkingSet* workingSet,
const MatchExpression* filter)
: _workingSet(workingSet),
_filter(filter),
_params(params),
- _nsDropped(false) { }
+ _nsDropped(false),
+ _commonStats(kStageType) { }
PlanStage::StageState CollectionScan::work(WorkingSetID* out) {
++_commonStats.works;
@@ -152,6 +156,14 @@ namespace mongo {
PlanStageStats* CollectionScan::getStats() {
_commonStats.isEOF = isEOF();
+
+ // Add a BSON representation of the filter to the stats tree, if there is one.
+ if (NULL != _filter) {
+ BSONObjBuilder bob;
+ _filter->toBSON(&bob);
+ _commonStats.filter = bob.obj();
+ }
+
auto_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_COLLSCAN));
ret->specific.reset(new CollectionScanStats(_specificStats));
return ret.release();
diff --git a/src/mongo/db/exec/collection_scan.h b/src/mongo/db/exec/collection_scan.h
index 2b00c5ce3e7..a6cd87dc68d 100644
--- a/src/mongo/db/exec/collection_scan.h
+++ b/src/mongo/db/exec/collection_scan.h
@@ -58,6 +58,9 @@ namespace mongo {
virtual void recoverFromYield();
virtual PlanStageStats* getStats();
+
+ static const char* kStageType;
+
private:
/**
* Returns true if the record 'loc' references is in memory, false otherwise.
diff --git a/src/mongo/db/exec/count.cpp b/src/mongo/db/exec/count.cpp
index 29abe83d7b2..afe6e32e606 100644
--- a/src/mongo/db/exec/count.cpp
+++ b/src/mongo/db/exec/count.cpp
@@ -33,6 +33,9 @@
namespace mongo {
+ // static
+ const char* Count::kStageType = "COUNT";
+
Count::Count(const CountParams& params, WorkingSet* workingSet)
: _workingSet(workingSet),
_descriptor(params.descriptor),
@@ -40,7 +43,8 @@ namespace mongo {
_btreeCursor(NULL),
_params(params),
_hitEnd(false),
- _shouldDedup(params.descriptor->isMultikey()) { }
+ _shouldDedup(params.descriptor->isMultikey()),
+ _commonStats(kStageType) { }
void Count::initIndexCursor() {
CursorOptions cursorOptions;
diff --git a/src/mongo/db/exec/count.h b/src/mongo/db/exec/count.h
index 31e2ad8ba96..51054dfe4dd 100644
--- a/src/mongo/db/exec/count.h
+++ b/src/mongo/db/exec/count.h
@@ -78,6 +78,8 @@ namespace mongo {
virtual PlanStageStats* getStats();
+ static const char* kStageType;
+
private:
/**
* Initialize the underlying IndexCursor
@@ -110,6 +112,8 @@ namespace mongo {
bool _hitEnd;
bool _shouldDedup;
+
+ CommonStats _commonStats;
};
} // namespace mongo
diff --git a/src/mongo/db/exec/distinct_scan.cpp b/src/mongo/db/exec/distinct_scan.cpp
index f88134c2a5a..765e52d9ae7 100644
--- a/src/mongo/db/exec/distinct_scan.cpp
+++ b/src/mongo/db/exec/distinct_scan.cpp
@@ -36,13 +36,17 @@
namespace mongo {
+ // static
+ const char* DistinctScan::kStageType = "DISTINCT";
+
DistinctScan::DistinctScan(const DistinctParams& params, WorkingSet* workingSet)
: _workingSet(workingSet),
_descriptor(params.descriptor),
_iam(params.descriptor->getIndexCatalog()->getIndex(params.descriptor)),
_btreeCursor(NULL),
_hitEnd(false),
- _params(params) { }
+ _params(params),
+ _commonStats(kStageType) { }
void DistinctScan::initIndexCursor() {
// Create an IndexCursor over the btree we're distinct-ing over.
diff --git a/src/mongo/db/exec/distinct_scan.h b/src/mongo/db/exec/distinct_scan.h
index 381a77f99d7..c3b1e110dd8 100644
--- a/src/mongo/db/exec/distinct_scan.h
+++ b/src/mongo/db/exec/distinct_scan.h
@@ -90,6 +90,8 @@ namespace mongo {
virtual PlanStageStats* getStats();
+ static const char* kStageType;
+
private:
/**
* Initialize the underlying IndexCursor
diff --git a/src/mongo/db/exec/fetch.cpp b/src/mongo/db/exec/fetch.cpp
index 8077dcacdb6..c8c0ff105dd 100644
--- a/src/mongo/db/exec/fetch.cpp
+++ b/src/mongo/db/exec/fetch.cpp
@@ -37,14 +37,18 @@
namespace mongo {
- FetchStage::FetchStage(WorkingSet* ws,
- PlanStage* child,
- const MatchExpression* filter,
+ // static
+ const char* FetchStage::kStageType = "FETCH";
+
+ FetchStage::FetchStage(WorkingSet* ws,
+ PlanStage* child,
+ const MatchExpression* filter,
const Collection* collection)
: _collection(collection),
- _ws(ws),
- _child(child),
- _filter(filter) { }
+ _ws(ws),
+ _child(child),
+ _filter(filter),
+ _commonStats(kStageType) { }
FetchStage::~FetchStage() { }
@@ -143,6 +147,13 @@ namespace mongo {
PlanStageStats* FetchStage::getStats() {
_commonStats.isEOF = isEOF();
+ // Add a BSON representation of the filter to the stats tree, if there is one.
+ if (NULL != _filter) {
+ BSONObjBuilder bob;
+ _filter->toBSON(&bob);
+ _commonStats.filter = bob.obj();
+ }
+
auto_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_FETCH));
ret->specific.reset(new FetchStats(_specificStats));
ret->children.push_back(_child->getStats());
diff --git a/src/mongo/db/exec/fetch.h b/src/mongo/db/exec/fetch.h
index 668a51f961b..870e67a9148 100644
--- a/src/mongo/db/exec/fetch.h
+++ b/src/mongo/db/exec/fetch.h
@@ -61,6 +61,8 @@ namespace mongo {
PlanStageStats* getStats();
+ static const char* kStageType;
+
private:
/**
diff --git a/src/mongo/db/exec/index_scan.cpp b/src/mongo/db/exec/index_scan.cpp
index fb0fa30ed45..c03d5940d72 100644
--- a/src/mongo/db/exec/index_scan.cpp
+++ b/src/mongo/db/exec/index_scan.cpp
@@ -33,6 +33,7 @@
#include "mongo/db/index/index_access_method.h"
#include "mongo/db/index/index_cursor.h"
#include "mongo/db/index/index_descriptor.h"
+#include "mongo/db/query/explain.h"
namespace {
@@ -47,6 +48,9 @@ namespace {
namespace mongo {
+ // static
+ const char* IndexScan::kStageType = "IXSCAN";
+
IndexScan::IndexScan(const IndexScanParams& params, WorkingSet* workingSet,
const MatchExpression* filter)
: _workingSet(workingSet),
@@ -55,13 +59,14 @@ namespace mongo {
_shouldDedup(true),
_yieldMovedCursor(false),
_params(params),
- _btreeCursor(NULL) { }
-
- void IndexScan::initIndexScan() {
- // Perform the possibly heavy-duty initialization of the underlying index cursor.
+ _btreeCursor(NULL),
+ _commonStats(kStageType) {
_iam = _params.descriptor->getIndexCatalog()->getIndex(_params.descriptor);
_keyPattern = _params.descriptor->keyPattern().getOwned();
+ }
+ void IndexScan::initIndexScan() {
+ // Perform the possibly heavy-duty initialization of the underlying index cursor.
if (_params.doNotDedup) {
_shouldDedup = false;
}
@@ -338,10 +343,26 @@ namespace mongo {
// catalog information here.
_commonStats.isEOF = isEOF();
+ // Add a BSON representation of the filter to the stats tree, if there is one.
+ if (NULL != _filter) {
+ BSONObjBuilder bob;
+ _filter->toBSON(&bob);
+ _commonStats.filter = bob.obj();
+ }
+
// These specific stats fields never change.
if (_specificStats.indexType.empty()) {
_specificStats.indexType = "BtreeCursor"; // TODO amName;
- _specificStats.indexBounds = _params.bounds.toBSON();
+
+ // TODO this can be simplified once the new explain format is
+ // the default. Probably won't need to include explain.h here either.
+ if (enableNewExplain) {
+ _specificStats.indexBounds = _params.bounds.toBSON();
+ }
+ else {
+ _specificStats.indexBounds = _params.bounds.toLegacyBSON();
+ }
+
_specificStats.indexBoundsVerbose = _params.bounds.toString();
_specificStats.direction = _params.direction;
_specificStats.keyPattern = _keyPattern;
diff --git a/src/mongo/db/exec/index_scan.h b/src/mongo/db/exec/index_scan.h
index 5a029e05990..7ad2ef316b2 100644
--- a/src/mongo/db/exec/index_scan.h
+++ b/src/mongo/db/exec/index_scan.h
@@ -93,6 +93,8 @@ namespace mongo {
virtual PlanStageStats* getStats();
+ static const char* kStageType;
+
private:
/**
* Initialize the underlying IndexCursor, grab information from the catalog for stats.
diff --git a/src/mongo/db/exec/keep_mutations.cpp b/src/mongo/db/exec/keep_mutations.cpp
index da6852d8ca2..b52b7c0540b 100644
--- a/src/mongo/db/exec/keep_mutations.cpp
+++ b/src/mongo/db/exec/keep_mutations.cpp
@@ -31,6 +31,9 @@
namespace mongo {
+ // static
+ const char* KeepMutationsStage::kStageType = "KEEP_MUTATIONS";
+
KeepMutationsStage::KeepMutationsStage(const MatchExpression* filter,
WorkingSet* ws,
PlanStage* child)
@@ -38,7 +41,8 @@ namespace mongo {
_child(child),
_filter(filter),
_doneReadingChild(false),
- _doneReturningFlagged(false) { }
+ _doneReturningFlagged(false),
+ _commonStats(kStageType) { }
KeepMutationsStage::~KeepMutationsStage() { }
diff --git a/src/mongo/db/exec/keep_mutations.h b/src/mongo/db/exec/keep_mutations.h
index 66fd1516814..02bc8da319a 100644
--- a/src/mongo/db/exec/keep_mutations.h
+++ b/src/mongo/db/exec/keep_mutations.h
@@ -57,6 +57,8 @@ namespace mongo {
virtual PlanStageStats* getStats();
+ static const char* kStageType;
+
private:
// Not owned here.
WorkingSet* _workingSet;
diff --git a/src/mongo/db/exec/limit.cpp b/src/mongo/db/exec/limit.cpp
index 94cc040edc7..21f6ade2107 100644
--- a/src/mongo/db/exec/limit.cpp
+++ b/src/mongo/db/exec/limit.cpp
@@ -32,8 +32,11 @@
namespace mongo {
+ // static
+ const char* LimitStage::kStageType = "LIMIT";
+
LimitStage::LimitStage(int limit, WorkingSet* ws, PlanStage* child)
- : _ws(ws), _child(child), _numToReturn(limit) { }
+ : _ws(ws), _child(child), _numToReturn(limit), _commonStats(kStageType) { }
LimitStage::~LimitStage() { }
@@ -94,7 +97,9 @@ namespace mongo {
PlanStageStats* LimitStage::getStats() {
_commonStats.isEOF = isEOF();
+ _specificStats.limit = _numToReturn;
auto_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_LIMIT));
+ ret->specific.reset(new LimitStats(_specificStats));
ret->children.push_back(_child->getStats());
return ret.release();
}
diff --git a/src/mongo/db/exec/limit.h b/src/mongo/db/exec/limit.h
index 8742dcbb0bd..5b5293823c2 100644
--- a/src/mongo/db/exec/limit.h
+++ b/src/mongo/db/exec/limit.h
@@ -55,6 +55,8 @@ namespace mongo {
virtual PlanStageStats* getStats();
+ static const char* kStageType;
+
private:
WorkingSet* _ws;
scoped_ptr<PlanStage> _child;
@@ -64,6 +66,7 @@ namespace mongo {
// Stats
CommonStats _commonStats;
+ LimitStats _specificStats;
};
} // namespace mongo
diff --git a/src/mongo/db/exec/merge_sort.cpp b/src/mongo/db/exec/merge_sort.cpp
index 527d4d508b8..e35d0570423 100644
--- a/src/mongo/db/exec/merge_sort.cpp
+++ b/src/mongo/db/exec/merge_sort.cpp
@@ -34,6 +34,9 @@
namespace mongo {
+ // static
+ const char* MergeSortStage::kStageType = "SORT_MERGE";
+
MergeSortStage::MergeSortStage(const MergeSortStageParams& params,
WorkingSet* ws,
const Collection* collection)
@@ -41,7 +44,8 @@ namespace mongo {
_ws(ws),
_pattern(params.pattern),
_dedup(params.dedup),
- _merging(StageWithValueComparison(ws, params.pattern)) { }
+ _merging(StageWithValueComparison(ws, params.pattern)),
+ _commonStats(kStageType) { }
MergeSortStage::~MergeSortStage() {
for (size_t i = 0; i < _children.size(); ++i) { delete _children[i]; }
@@ -245,6 +249,8 @@ namespace mongo {
PlanStageStats* MergeSortStage::getStats() {
_commonStats.isEOF = isEOF();
+ _specificStats.sortPattern = _pattern;
+
auto_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_SORT_MERGE));
ret->specific.reset(new MergeSortStats(_specificStats));
for (size_t i = 0; i < _children.size(); ++i) {
diff --git a/src/mongo/db/exec/merge_sort.h b/src/mongo/db/exec/merge_sort.h
index 22adfd21fdf..3eda2add8ff 100644
--- a/src/mongo/db/exec/merge_sort.h
+++ b/src/mongo/db/exec/merge_sort.h
@@ -71,6 +71,8 @@ namespace mongo {
PlanStageStats* getStats();
+ static const char* kStageType;
+
private:
// Not owned by us.
const Collection* _collection;
diff --git a/src/mongo/db/exec/multi_plan.cpp b/src/mongo/db/exec/multi_plan.cpp
index 6263aeba836..2268c8ed58b 100644
--- a/src/mongo/db/exec/multi_plan.cpp
+++ b/src/mongo/db/exec/multi_plan.cpp
@@ -42,6 +42,10 @@
#include "mongo/db/query/qlog.h"
namespace mongo {
+
+ // static
+ const char* MultiPlanStage::kStageType = "MULTI_PLAN";
+
MultiPlanStage::MultiPlanStage(const Collection* collection, CanonicalQuery* cq)
: _collection(collection),
_query(cq),
@@ -49,7 +53,8 @@ namespace mongo {
_backupPlanIdx(kNoSuchPlan),
_failure(false),
_failureCount(0),
- _statusMemberId(WorkingSet::INVALID_ID) { }
+ _statusMemberId(WorkingSet::INVALID_ID),
+ _commonStats(kStageType) { }
MultiPlanStage::~MultiPlanStage() {
if (bestPlanChosen()) {
@@ -65,6 +70,9 @@ namespace mongo {
delete _candidates[_backupPlanIdx].solution;
delete _candidates[_backupPlanIdx].root;
}
+
+ // Clean up the losing candidates.
+ clearCandidates();
}
else {
for (size_t ix = 0; ix < _candidates.size(); ++ix) {
@@ -243,18 +251,12 @@ namespace mongo {
_collection->infoCache()->getPlanCache()->add(*_query, solutions, ranking.release());
}
}
+ }
- // Clear out the candidate plans, leaving only stats as we're all done w/them.
- // Traverse candidate plans in order or score
- for (size_t orderingIndex = 0;
- orderingIndex < candidateOrder.size(); ++orderingIndex) {
- // index into candidates/ranking
- int ix = candidateOrder[orderingIndex];
-
- if (ix == _bestPlanIdx) { continue; }
- if (ix == _backupPlanIdx) { continue; }
-
- delete _candidates[ix].solution;
+ vector<PlanStageStats*>* MultiPlanStage::generateCandidateStats() {
+ for (size_t ix = 0; ix < _candidates.size(); ix++) {
+ if (ix == (size_t)_bestPlanIdx) { continue; }
+ if (ix == (size_t)_backupPlanIdx) { continue; }
// Remember the stats for the candidate plan because we always show it on an
// explain. (The {verbose:false} in explain() is client-side trick; we always
@@ -263,7 +265,20 @@ namespace mongo {
if (stats) {
_candidateStats.push_back(stats);
}
+ }
+
+ return &_candidateStats;
+ }
+
+ void MultiPlanStage::clearCandidates() {
+ // Clear out the candidate plans, leaving only stats as we're all done w/them.
+ // Traverse candidate plans in order or score
+ for (size_t ix = 0; ix < _candidates.size(); ix++) {
+ if (ix == (size_t)_bestPlanIdx) { continue; }
+ if (ix == (size_t)_backupPlanIdx) { continue; }
+
delete _candidates[ix].root;
+ delete _candidates[ix].solution;
}
}
@@ -315,6 +330,63 @@ namespace mongo {
return !doneWorking;
}
+ Status MultiPlanStage::executeWinningPlan() {
+ invariant(_bestPlanIdx != kNoSuchPlan);
+ PlanStage* winner = _candidates[_bestPlanIdx].root;
+ WorkingSet* ws = _candidates[_bestPlanIdx].ws;
+
+ bool doneWorking = false;
+
+ while (!doneWorking) {
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ PlanStage::StageState state = winner->work(&id);
+
+ if (PlanStage::IS_EOF == state || PlanStage::DEAD == state) {
+ doneWorking = true;
+ }
+ else if (PlanStage::FAILURE == state) {
+ // Propogate error.
+ BSONObj errObj;
+ WorkingSetCommon::getStatusMemberObject(*ws, id, &errObj);
+ return Status(ErrorCodes::BadValue, WorkingSetCommon::toStatusString(errObj));
+ }
+ }
+
+ return Status::OK();
+ }
+
+ Status MultiPlanStage::executeAllPlans() {
+ // Boolean vector keeping track of which plans are done.
+ vector<bool> planDone(_candidates.size(), false);
+
+ // Number of plans that are done.
+ size_t doneCount = 0;
+
+ while (doneCount < _candidates.size()) {
+ for (size_t i = 0; i < _candidates.size(); i++) {
+ if (planDone[i]) {
+ continue;
+ }
+
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ PlanStage::StageState state = _candidates[i].root->work(&id);
+
+ if (PlanStage::IS_EOF == state || PlanStage::DEAD == state) {
+ doneCount++;
+ planDone[i] = true;
+ }
+ else if (PlanStage::FAILURE == state) {
+ // Propogate error.
+ BSONObj errObj;
+ WorkingSetCommon::getStatusMemberObject(*_candidates[i].ws, id, &errObj);
+ return Status(ErrorCodes::BadValue, WorkingSetCommon::toStatusString(errObj));
+ }
+ }
+ }
+
+ return Status::OK();
+ }
+
void MultiPlanStage::prepareToYield() {
if (_failure) return;
diff --git a/src/mongo/db/exec/multi_plan.h b/src/mongo/db/exec/multi_plan.h
index 359f3381198..fe25dcddfde 100644
--- a/src/mongo/db/exec/multi_plan.h
+++ b/src/mongo/db/exec/multi_plan.h
@@ -53,6 +53,11 @@ namespace mongo {
virtual ~MultiPlanStage();
+ /**
+ * Helper used by the destructor to delete losing candidate plans.
+ */
+ void clearCandidates();
+
virtual bool isEOF();
virtual StageState work(WorkingSetID* out);
@@ -69,7 +74,7 @@ namespace mongo {
void addPlan(QuerySolution* solution, PlanStage* root, WorkingSet* sharedWs);
/**
- * Runs all plans added by addPlan, ranks them, and picks a best. Deletes all loser plans.
+ * Runs all plans added by addPlan, ranks them, and picks a best.
* All further calls to getNext(...) will return results from the best plan.
*/
void pickBestPlan();
@@ -90,6 +95,35 @@ namespace mongo {
*/
bool hasBackupPlan() const;
+ /**
+ * Gathers execution stats for all losing plans.
+ */
+ vector<PlanStageStats*>* generateCandidateStats();
+
+ //
+ // Used by explain.
+ //
+
+ /**
+ * Runs the winning plan into it hits EOF or returns DEAD, throwing out the results.
+ * Execution stats are gathered in the process.
+ *
+ * You can call this after calling pickBestPlan(...). It expects that a winning plan
+ * has already been selected.
+ */
+ Status executeWinningPlan();
+
+ /**
+ * Runs the candidate plans until each has either hit EOF or returned DEAD. Results
+ * from the plans are thrown out, but execution stats are gathered.
+ *
+ * You can call this after calling pickBestPlan(...). It expects that a winning plan
+ * has already been selected.
+ */
+ Status executeAllPlans();
+
+ static const char* kStageType;
+
private:
//
// Have all our candidate plans do something.
diff --git a/src/mongo/db/exec/or.cpp b/src/mongo/db/exec/or.cpp
index c56e00f6731..9266c6fec10 100644
--- a/src/mongo/db/exec/or.cpp
+++ b/src/mongo/db/exec/or.cpp
@@ -33,8 +33,11 @@
namespace mongo {
+ // static
+ const char* OrStage::kStageType = "OR";
+
OrStage::OrStage(WorkingSet* ws, bool dedup, const MatchExpression* filter)
- : _ws(ws), _filter(filter), _currentChild(0), _dedup(dedup) { }
+ : _ws(ws), _filter(filter), _currentChild(0), _dedup(dedup), _commonStats(kStageType) { }
OrStage::~OrStage() {
for (size_t i = 0; i < _children.size(); ++i) {
@@ -168,6 +171,13 @@ namespace mongo {
PlanStageStats* OrStage::getStats() {
_commonStats.isEOF = isEOF();
+ // Add a BSON representation of the filter to the stats tree, if there is one.
+ if (NULL != _filter) {
+ BSONObjBuilder bob;
+ _filter->toBSON(&bob);
+ _commonStats.filter = bob.obj();
+ }
+
auto_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_OR));
ret->specific.reset(new OrStats(_specificStats));
for (size_t i = 0; i < _children.size(); ++i) {
diff --git a/src/mongo/db/exec/or.h b/src/mongo/db/exec/or.h
index 9382fbae7f8..3e51bbcd94e 100644
--- a/src/mongo/db/exec/or.h
+++ b/src/mongo/db/exec/or.h
@@ -60,6 +60,8 @@ namespace mongo {
virtual PlanStageStats* getStats();
+ static const char* kStageType;
+
private:
// Not owned by us.
WorkingSet* _ws;
diff --git a/src/mongo/db/exec/plan_stats.h b/src/mongo/db/exec/plan_stats.h
index fa326b536d2..dab069ed6c2 100644
--- a/src/mongo/db/exec/plan_stats.h
+++ b/src/mongo/db/exec/plan_stats.h
@@ -55,13 +55,17 @@ namespace mongo {
// Every stage has CommonStats.
struct CommonStats {
- CommonStats() : works(0),
+ CommonStats(const char* type)
+ : stageTypeStr(type),
+ works(0),
yields(0),
unyields(0),
invalidates(0),
advanced(0),
needTime(0),
isEOF(false) { }
+ // String giving the type of the stage. Not owned.
+ const char* stageTypeStr;
// Count calls into the stage.
size_t works;
@@ -73,6 +77,10 @@ namespace mongo {
size_t advanced;
size_t needTime;
+ // BSON representation of a MatchExpression affixed to this node. If there
+ // is no filter affixed, then 'filter' should be an empty BSONObj.
+ BSONObj filter;
+
// TODO: have some way of tracking WSM sizes (or really any series of #s). We can measure
// the size of our inputs and the size of our outputs. We can do a lot with the WS here.
@@ -80,6 +88,9 @@ namespace mongo {
// the user, eg. time_t totalTimeSpent;
bool isEOF;
+ private:
+ // Default constructor is illegal.
+ CommonStats();
};
// The universal container for a stage's stats.
@@ -292,6 +303,17 @@ namespace mongo {
};
+ struct LimitStats : public SpecificStats {
+ LimitStats() : limit(0) { }
+
+ virtual SpecificStats* clone() const {
+ LimitStats* specific = new LimitStats(*this);
+ return specific;
+ }
+
+ size_t limit;
+ };
+
struct MultiPlanStats : public SpecificStats {
MultiPlanStats() { }
@@ -322,6 +344,18 @@ namespace mongo {
std::vector<size_t> matchTested;
};
+ struct ProjectionStats : public SpecificStats {
+ ProjectionStats() { }
+
+ virtual SpecificStats* clone() const {
+ ProjectionStats* specific = new ProjectionStats(*this);
+ return specific;
+ }
+
+ // Object specifying the projection transformation to apply.
+ BSONObj projObj;
+ };
+
struct SortStats : public SpecificStats {
SortStats() : forcedFetches(0), memUsage(0), memLimit(0) { }
@@ -340,6 +374,12 @@ namespace mongo {
// What's our memory limit?
size_t memLimit;
+
+ // The number of results to return from the sort.
+ size_t limit;
+
+ // The pattern according to which we are sorting.
+ BSONObj sortPattern;
};
struct MergeSortStats : public SpecificStats {
@@ -359,6 +399,9 @@ namespace mongo {
// How many records were we forced to fetch as the result of an invalidation?
size_t forcedFetches;
+
+ // The pattern according to which we are sorting.
+ BSONObj sortPattern;
};
struct ShardingFilterStats : public SpecificStats {
@@ -372,6 +415,17 @@ namespace mongo {
size_t chunkSkips;
};
+ struct SkipStats : public SpecificStats {
+ SkipStats() : skip(0) { }
+
+ virtual SpecificStats* clone() const {
+ SkipStats* specific = new SkipStats(*this);
+ return specific;
+ }
+
+ size_t skip;
+ };
+
struct TwoDStats : public SpecificStats {
TwoDStats() { }
@@ -394,6 +448,8 @@ namespace mongo {
// Geo hashes generated by GeoBrowse::fillStack.
// Raw data for explain index bounds.
std::vector<GeoHash> expPrefixes;
+
+ BSONObj keyPattern;
};
struct TwoDNearStats : public SpecificStats {
@@ -406,9 +462,11 @@ namespace mongo {
size_t objectsLoaded;
- // Since 2d's near does all its work in one go we can't divine the real nscanned from
- // anything else.
+ // Since 2d's near does all its work in one go we can't divine the real number of
+ // keys scanned from anything else.
size_t nscanned;
+
+ BSONObj keyPattern;
};
struct TextStats : public SpecificStats {
@@ -425,6 +483,9 @@ namespace mongo {
// Human-readable form of the FTSQuery associated with the text stage.
BSONObj parsedTextQuery;
+
+ // Index keys that precede the "text" index key.
+ BSONObj indexPrefix;
};
} // namespace mongo
diff --git a/src/mongo/db/exec/projection.cpp b/src/mongo/db/exec/projection.cpp
index 4251ada524b..60c0aa221f3 100644
--- a/src/mongo/db/exec/projection.cpp
+++ b/src/mongo/db/exec/projection.cpp
@@ -39,11 +39,15 @@ namespace mongo {
static const char* kIdField = "_id";
+ // static
+ const char* ProjectionStage::kStageType = "PROJECTION";
+
ProjectionStage::ProjectionStage(const ProjectionStageParams& params,
WorkingSet* ws,
PlanStage* child)
: _ws(ws),
_child(child),
+ _commonStats(kStageType),
_projImpl(params.projImpl) {
if (ProjectionStageParams::NO_FAST_PATH == _projImpl) {
@@ -245,7 +249,9 @@ namespace mongo {
PlanStageStats* ProjectionStage::getStats() {
_commonStats.isEOF = isEOF();
+ _specificStats.projObj = _projObj;
auto_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_PROJECTION));
+ ret->specific.reset(new ProjectionStats(_specificStats));
ret->children.push_back(_child->getStats());
return ret.release();
}
diff --git a/src/mongo/db/exec/projection.h b/src/mongo/db/exec/projection.h
index 5284cfaac02..4b66e0dc8af 100644
--- a/src/mongo/db/exec/projection.h
+++ b/src/mongo/db/exec/projection.h
@@ -109,6 +109,8 @@ namespace mongo {
const FieldSet& includedFields,
BSONObjBuilder& bob);
+ static const char* kStageType;
+
private:
Status transform(WorkingSetMember* member);
@@ -120,6 +122,7 @@ namespace mongo {
// Stats
CommonStats _commonStats;
+ ProjectionStats _specificStats;
// Fast paths:
ProjectionStageParams::ProjectionImplementation _projImpl;
diff --git a/src/mongo/db/exec/s2near.cpp b/src/mongo/db/exec/s2near.cpp
index ed1113823a5..faaeddaca22 100644
--- a/src/mongo/db/exec/s2near.cpp
+++ b/src/mongo/db/exec/s2near.cpp
@@ -40,7 +40,11 @@
namespace mongo {
- S2NearStage::S2NearStage(const S2NearParams& params, WorkingSet* ws) {
+ // static
+ const char* S2NearStage::kStageType = "GEO_NEAR_2DSPHERE";
+
+ S2NearStage::S2NearStage(const S2NearParams& params, WorkingSet* ws)
+ : _commonStats(kStageType) {
_initted = false;
_params = params;
_ws = ws;
@@ -209,6 +213,9 @@ namespace mongo {
virtual void debugString( StringBuilder& debug, int level = 0 ) const {
}
+ virtual void toBSON(BSONObjBuilder* out) const {
+ }
+
virtual bool equivalent( const MatchExpression* other ) const {
return false;
}
diff --git a/src/mongo/db/exec/s2near.h b/src/mongo/db/exec/s2near.h
index add9cf6ef29..5286d4b4302 100644
--- a/src/mongo/db/exec/s2near.h
+++ b/src/mongo/db/exec/s2near.h
@@ -76,6 +76,8 @@ namespace mongo {
PlanStageStats* getStats();
+ static const char* kStageType;
+
private:
void init();
StageState addResultToQueue(WorkingSetID* out);
diff --git a/src/mongo/db/exec/shard_filter.cpp b/src/mongo/db/exec/shard_filter.cpp
index 72b4bb6887b..784afb0fad3 100644
--- a/src/mongo/db/exec/shard_filter.cpp
+++ b/src/mongo/db/exec/shard_filter.cpp
@@ -32,10 +32,13 @@
namespace mongo {
+ // static
+ const char* ShardFilterStage::kStageType = "SHARDING_FILTER";
+
ShardFilterStage::ShardFilterStage(const CollectionMetadataPtr& metadata,
WorkingSet* ws,
PlanStage* child)
- : _ws(ws), _child(child), _metadata(metadata) { }
+ : _ws(ws), _child(child), _commonStats(kStageType), _metadata(metadata) { }
ShardFilterStage::~ShardFilterStage() { }
diff --git a/src/mongo/db/exec/shard_filter.h b/src/mongo/db/exec/shard_filter.h
index 6c01c1e1691..ce1a4ac64de 100644
--- a/src/mongo/db/exec/shard_filter.h
+++ b/src/mongo/db/exec/shard_filter.h
@@ -86,6 +86,8 @@ namespace mongo {
virtual PlanStageStats* getStats();
+ static const char* kStageType;
+
private:
WorkingSet* _ws;
scoped_ptr<PlanStage> _child;
diff --git a/src/mongo/db/exec/skip.cpp b/src/mongo/db/exec/skip.cpp
index eb6ff6f3d1f..90450169d6d 100644
--- a/src/mongo/db/exec/skip.cpp
+++ b/src/mongo/db/exec/skip.cpp
@@ -32,8 +32,11 @@
namespace mongo {
+ // static
+ const char* SkipStage::kStageType = "SKIP";
+
SkipStage::SkipStage(int toSkip, WorkingSet* ws, PlanStage* child)
- : _ws(ws), _child(child), _toSkip(toSkip) { }
+ : _ws(ws), _child(child), _toSkip(toSkip), _commonStats(kStageType) { }
SkipStage::~SkipStage() { }
@@ -98,7 +101,9 @@ namespace mongo {
PlanStageStats* SkipStage::getStats() {
_commonStats.isEOF = isEOF();
+ _specificStats.skip = _toSkip;
auto_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_SKIP));
+ ret->specific.reset(new SkipStats(_specificStats));
ret->children.push_back(_child->getStats());
return ret.release();
}
diff --git a/src/mongo/db/exec/skip.h b/src/mongo/db/exec/skip.h
index 9c907ac8bb0..48366a71616 100644
--- a/src/mongo/db/exec/skip.h
+++ b/src/mongo/db/exec/skip.h
@@ -54,6 +54,8 @@ namespace mongo {
virtual PlanStageStats* getStats();
+ static const char* kStageType;
+
private:
WorkingSet* _ws;
scoped_ptr<PlanStage> _child;
@@ -63,6 +65,7 @@ namespace mongo {
// Stats
CommonStats _commonStats;
+ SkipStats _specificStats;
};
} // namespace mongo
diff --git a/src/mongo/db/exec/sort.cpp b/src/mongo/db/exec/sort.cpp
index ff2e6f94e1d..b30a198a1c1 100644
--- a/src/mongo/db/exec/sort.cpp
+++ b/src/mongo/db/exec/sort.cpp
@@ -45,6 +45,9 @@ namespace mongo {
const size_t kMaxBytes = 32 * 1024 * 1024;
+ // static
+ const char* SortStage::kStageType = "SORT";
+
SortStageKeyGenerator::SortStageKeyGenerator(const Collection* collection,
const BSONObj& sortSpec,
const BSONObj& queryObj) {
@@ -286,6 +289,7 @@ namespace mongo {
_limit(params.limit),
_sorted(false),
_resultIterator(_data.end()),
+ _commonStats(kStageType),
_memUsage(0) {
}
@@ -449,6 +453,8 @@ namespace mongo {
_commonStats.isEOF = isEOF();
_specificStats.memLimit = kMaxBytes;
_specificStats.memUsage = _memUsage;
+ _specificStats.limit = _limit;
+ _specificStats.sortPattern = _pattern.getOwned();
auto_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_SORT));
ret->specific.reset(new SortStats(_specificStats));
diff --git a/src/mongo/db/exec/sort.h b/src/mongo/db/exec/sort.h
index f0ead740dc2..ac4ae63b3f1 100644
--- a/src/mongo/db/exec/sort.h
+++ b/src/mongo/db/exec/sort.h
@@ -155,6 +155,8 @@ namespace mongo {
PlanStageStats* getStats();
+ static const char* kStageType;
+
private:
//
diff --git a/src/mongo/db/exec/text.cpp b/src/mongo/db/exec/text.cpp
index 190ec4c19e6..170a800eed9 100644
--- a/src/mongo/db/exec/text.cpp
+++ b/src/mongo/db/exec/text.cpp
@@ -38,6 +38,9 @@
namespace mongo {
+ // static
+ const char* TextStage::kStageType = "TEXT";
+
TextStage::TextStage(const TextStageParams& params,
WorkingSet* ws,
const MatchExpression* filter)
@@ -45,9 +48,9 @@ namespace mongo {
_ftsMatcher(params.query, params.spec),
_ws(ws),
_filter(filter),
+ _commonStats(kStageType),
_internalState(INIT_SCANS),
_currentIndexScanner(0) {
-
_scoreIterator = _scores.end();
}
@@ -133,6 +136,16 @@ namespace mongo {
PlanStageStats* TextStage::getStats() {
_commonStats.isEOF = isEOF();
+
+ // Add a BSON representation of the filter to the stats tree, if there is one.
+ if (NULL != _filter) {
+ BSONObjBuilder bob;
+ _filter->toBSON(&bob);
+ _commonStats.filter = bob.obj();
+ }
+
+ _specificStats.indexPrefix = _params.indexPrefix;
+
auto_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_TEXT));
ret->specific.reset(new TextStats(_specificStats));
return ret.release();
diff --git a/src/mongo/db/exec/text.h b/src/mongo/db/exec/text.h
index 29c1210deab..17f28766ae0 100644
--- a/src/mongo/db/exec/text.h
+++ b/src/mongo/db/exec/text.h
@@ -106,6 +106,8 @@ namespace mongo {
PlanStageStats* getStats();
+ static const char* kStageType;
+
private:
/**
* Initializes sub-scanners.
diff --git a/src/mongo/db/matcher/expression.cpp b/src/mongo/db/matcher/expression.cpp
index 35e32c447a1..46c1546ef5a 100644
--- a/src/mongo/db/matcher/expression.cpp
+++ b/src/mongo/db/matcher/expression.cpp
@@ -62,11 +62,19 @@ namespace mongo {
debug << "$atomic\n";
}
+ void AtomicMatchExpression::toBSON(BSONObjBuilder* out) const {
+ out->append("$isolated", 1);
+ }
+
void FalseMatchExpression::debugString( StringBuilder& debug, int level ) const {
_debugAddSpace( debug, level );
debug << "$false\n";
}
+ void FalseMatchExpression::toBSON(BSONObjBuilder* out) const {
+ out->append("$false", 1);
+ }
+
}
diff --git a/src/mongo/db/matcher/expression.h b/src/mongo/db/matcher/expression.h
index 1970b4bfd6f..1b9635221af 100644
--- a/src/mongo/db/matcher/expression.h
+++ b/src/mongo/db/matcher/expression.h
@@ -35,6 +35,7 @@
#include "mongo/base/disallow_copying.h"
#include "mongo/base/status.h"
#include "mongo/bson/bsonobj.h"
+#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/db/matcher/matchable.h"
#include "mongo/db/matcher/match_details.h"
@@ -192,6 +193,7 @@ namespace mongo {
//
virtual std::string toString() const;
virtual void debugString( StringBuilder& debug, int level = 0 ) const = 0;
+ virtual void toBSON(BSONObjBuilder* out) const = 0;
protected:
void _debugAddSpace( StringBuilder& debug, int level ) const;
@@ -223,6 +225,8 @@ namespace mongo {
virtual void debugString( StringBuilder& debug, int level = 0 ) const;
+ virtual void toBSON(BSONObjBuilder* out) const;
+
virtual bool equivalent( const MatchExpression* other ) const {
return other->matchType() == ATOMIC;
}
@@ -246,6 +250,8 @@ namespace mongo {
virtual void debugString( StringBuilder& debug, int level = 0 ) const;
+ virtual void toBSON(BSONObjBuilder* out) const;
+
virtual bool equivalent( const MatchExpression* other ) const {
return other->matchType() == ALWAYS_FALSE;
}
diff --git a/src/mongo/db/matcher/expression_array.cpp b/src/mongo/db/matcher/expression_array.cpp
index 9ba1c956f9a..e70175e967a 100644
--- a/src/mongo/db/matcher/expression_array.cpp
+++ b/src/mongo/db/matcher/expression_array.cpp
@@ -127,6 +127,17 @@ namespace mongo {
_sub->debugString( debug, level + 1 );
}
+ void ElemMatchObjectMatchExpression::toBSON(BSONObjBuilder* out) const {
+ BSONObjBuilder subBob;
+ _sub->toBSON(&subBob);
+ if (path().empty()) {
+ out->append("$elemMatch", subBob.obj());
+ }
+ else {
+ out->append(path(), BSON("$elemMatch" << subBob.obj()));
+ }
+ }
+
// -------
@@ -190,6 +201,19 @@ namespace mongo {
}
}
+ void ElemMatchValueMatchExpression::toBSON(BSONObjBuilder* out) const {
+ BSONObjBuilder emBob;
+ for ( unsigned i = 0; i < _subs.size(); i++ ) {
+ _subs[i]->toBSON(&emBob);
+ }
+ if (path().empty()) {
+ out->append("$elemMatch", emBob.obj());
+ }
+ else {
+ out->append(path(), BSON("$elemMatch" << emBob.obj()));
+ }
+ }
+
// ------
@@ -254,7 +278,19 @@ namespace mongo {
for ( size_t i = 0; i < _list.size(); i++ ) {
_list[i]->debugString( debug, level + 1);
}
+ }
+
+ void AllElemMatchOp::toBSON(BSONObjBuilder* out) const {
+ BSONObjBuilder allBob(out->subobjStart(path()));
+
+ BSONArrayBuilder arrBob(allBob.subarrayStart("$all"));
+ for (unsigned i = 0; i < _list.size(); i++) {
+ BSONObjBuilder childBob(arrBob.subobjStart());
+ _list[i]->toBSON(&childBob);
+ }
+ arrBob.doneFast();
+ allBob.doneFast();
}
bool AllElemMatchOp::equivalent( const MatchExpression* other ) const {
@@ -300,6 +336,10 @@ namespace mongo {
}
}
+ void SizeMatchExpression::toBSON(BSONObjBuilder* out) const {
+ out->append(path(), BSON("$size" << _size));
+ }
+
bool SizeMatchExpression::equivalent( const MatchExpression* other ) const {
if ( matchType() != other->matchType() )
return false;
diff --git a/src/mongo/db/matcher/expression_array.h b/src/mongo/db/matcher/expression_array.h
index 06f9283bd57..872f3d82b46 100644
--- a/src/mongo/db/matcher/expression_array.h
+++ b/src/mongo/db/matcher/expression_array.h
@@ -86,6 +86,8 @@ namespace mongo {
virtual void debugString( StringBuilder& debug, int level ) const;
+ virtual void toBSON(BSONObjBuilder* out) const;
+
virtual size_t numChildren() const { return 1; }
virtual MatchExpression* getChild( size_t i ) const { return _sub.get(); }
@@ -119,6 +121,8 @@ namespace mongo {
virtual void debugString( StringBuilder& debug, int level ) const;
+ virtual void toBSON(BSONObjBuilder* out) const;
+
virtual size_t numChildren() const { return _subs.size(); }
virtual MatchExpression* getChild( size_t i ) const { return _subs[i]; }
@@ -147,6 +151,8 @@ namespace mongo {
virtual void debugString( StringBuilder& debug, int level ) const;
+ virtual void toBSON(BSONObjBuilder* out) const;
+
virtual bool equivalent( const MatchExpression* other ) const;
int getData() const { return _size; }
@@ -188,6 +194,8 @@ namespace mongo {
virtual void debugString( StringBuilder& debug, int level ) const;
+ virtual void toBSON(BSONObjBuilder* out) const;
+
virtual bool equivalent( const MatchExpression* other ) const;
virtual size_t numChildren() const { return _list.size(); }
diff --git a/src/mongo/db/matcher/expression_geo.cpp b/src/mongo/db/matcher/expression_geo.cpp
index c701d0916dc..1702c26740f 100644
--- a/src/mongo/db/matcher/expression_geo.cpp
+++ b/src/mongo/db/matcher/expression_geo.cpp
@@ -72,6 +72,10 @@ namespace mongo {
debug << "\n";
}
+ void GeoMatchExpression::toBSON(BSONObjBuilder* out) const {
+ out->appendElements(_rawObj);
+ }
+
bool GeoMatchExpression::equivalent( const MatchExpression* other ) const {
if ( matchType() != other->matchType() )
return false;
@@ -125,6 +129,10 @@ namespace mongo {
debug << "\n";
}
+ void GeoNearMatchExpression::toBSON(BSONObjBuilder* out) const {
+ out->appendElements(_rawObj);
+ }
+
bool GeoNearMatchExpression::equivalent( const MatchExpression* other ) const {
if ( matchType() != other->matchType() )
return false;
diff --git a/src/mongo/db/matcher/expression_geo.h b/src/mongo/db/matcher/expression_geo.h
index 3a3121a728a..a227dea159b 100644
--- a/src/mongo/db/matcher/expression_geo.h
+++ b/src/mongo/db/matcher/expression_geo.h
@@ -51,6 +51,8 @@ namespace mongo {
virtual void debugString( StringBuilder& debug, int level = 0 ) const;
+ virtual void toBSON(BSONObjBuilder* out) const;
+
virtual bool equivalent( const MatchExpression* other ) const;
virtual LeafMatchExpression* shallowClone() const;
@@ -76,6 +78,8 @@ namespace mongo {
virtual void debugString( StringBuilder& debug, int level = 0 ) const;
+ virtual void toBSON(BSONObjBuilder* out) const;
+
virtual bool equivalent( const MatchExpression* other ) const;
virtual LeafMatchExpression* shallowClone() const;
diff --git a/src/mongo/db/matcher/expression_leaf.cpp b/src/mongo/db/matcher/expression_leaf.cpp
index d1f62f7c18c..dc2c3d10d6e 100644
--- a/src/mongo/db/matcher/expression_leaf.cpp
+++ b/src/mongo/db/matcher/expression_leaf.cpp
@@ -184,6 +184,20 @@ namespace mongo {
debug << "\n";
}
+ void ComparisonMatchExpression::toBSON(BSONObjBuilder* out) const {
+ string opString = "";
+ switch ( matchType() ) {
+ case LT: opString = "$lt"; break;
+ case LTE: opString = "$lte"; break;
+ case EQ: opString = "$eq"; break;
+ case GT: opString = "$gt"; break;
+ case GTE: opString = "$gte"; break;
+ default: opString = " UNKNOWN - should be impossible"; break;
+ }
+
+ out->append(path(), BSON(opString << _rhs));
+ }
+
// ---------------
// TODO: move
@@ -264,6 +278,10 @@ namespace mongo {
debug << "\n";
}
+ void RegexMatchExpression::toBSON(BSONObjBuilder* out) const {
+ out->appendRegex(path(), _regex, _flags);
+ }
+
void RegexMatchExpression::shortDebugString( StringBuilder& debug ) const {
debug << "/" << _regex << "/" << _flags;
}
@@ -295,6 +313,10 @@ namespace mongo {
debug << "\n";
}
+ void ModMatchExpression::toBSON(BSONObjBuilder* out) const {
+ out->append(path(), BSON("$mod" << BSON_ARRAY(_divisor << _remainder)));
+ }
+
bool ModMatchExpression::equivalent( const MatchExpression* other ) const {
if ( matchType() != other->matchType() )
return false;
@@ -328,6 +350,10 @@ namespace mongo {
debug << "\n";
}
+ void ExistsMatchExpression::toBSON(BSONObjBuilder* out) const {
+ out->append(path(), BSON("$exists" << true));
+ }
+
bool ExistsMatchExpression::equivalent( const MatchExpression* other ) const {
if ( matchType() != other->matchType() )
return false;
@@ -392,6 +418,9 @@ namespace mongo {
debug << "\n";
}
+ void TypeMatchExpression::toBSON(BSONObjBuilder* out) const {
+ out->append(path(), BSON("$type" << _type));
+ }
bool TypeMatchExpression::equivalent( const MatchExpression* other ) const {
if ( matchType() != other->matchType() )
@@ -474,6 +503,19 @@ namespace mongo {
debug << "]";
}
+ void ArrayFilterEntries::toBSON(BSONArrayBuilder* out) const {
+ for (BSONElementSet::const_iterator it = _equalities.begin();
+ it != _equalities.end(); ++it) {
+ out->append(*it);
+ }
+ for (size_t i = 0; i < _regexes.size(); ++i) {
+ BSONObjBuilder regexBob;
+ _regexes[i]->toBSON(&regexBob);
+ out->append(regexBob.obj().firstElement());
+ }
+ out->doneFast();
+ }
+
// -----------
Status InMatchExpression::init( const StringData& path ) {
@@ -525,6 +567,13 @@ namespace mongo {
debug << "\n";
}
+ void InMatchExpression::toBSON(BSONObjBuilder* out) const {
+ BSONObjBuilder inBob(out->subobjStart(path()));
+ BSONArrayBuilder arrBob(inBob.subarrayStart("$in"));
+ _arrayEntries.toBSON(&arrBob);
+ inBob.doneFast();
+ }
+
bool InMatchExpression::equivalent( const MatchExpression* other ) const {
if ( matchType() != other->matchType() )
return false;
diff --git a/src/mongo/db/matcher/expression_leaf.h b/src/mongo/db/matcher/expression_leaf.h
index 8a843dd7703..a6ca073f068 100644
--- a/src/mongo/db/matcher/expression_leaf.h
+++ b/src/mongo/db/matcher/expression_leaf.h
@@ -92,6 +92,8 @@ namespace mongo {
virtual void debugString( StringBuilder& debug, int level = 0 ) const;
+ virtual void toBSON(BSONObjBuilder* out) const;
+
virtual bool equivalent( const MatchExpression* other ) const;
const BSONElement& getData() const { return _rhs; }
@@ -204,6 +206,8 @@ namespace mongo {
virtual void debugString( StringBuilder& debug, int level ) const;
+ virtual void toBSON(BSONObjBuilder* out) const;
+
void shortDebugString( StringBuilder& debug ) const;
virtual bool equivalent( const MatchExpression* other ) const;
@@ -236,6 +240,8 @@ namespace mongo {
virtual void debugString( StringBuilder& debug, int level ) const;
+ virtual void toBSON(BSONObjBuilder* out) const;
+
virtual bool equivalent( const MatchExpression* other ) const;
int getDivisor() const { return _divisor; }
@@ -265,6 +271,8 @@ namespace mongo {
virtual void debugString( StringBuilder& debug, int level ) const;
+ virtual void toBSON(BSONObjBuilder* out) const;
+
virtual bool equivalent( const MatchExpression* other ) const;
};
@@ -300,6 +308,8 @@ namespace mongo {
void debugString( StringBuilder& debug ) const;
+ void toBSON(BSONArrayBuilder* out) const;
+
private:
bool _hasNull; // if _equalities has a jstNULL element in it
bool _hasEmptyArray;
@@ -323,6 +333,8 @@ namespace mongo {
virtual void debugString( StringBuilder& debug, int level ) const;
+ virtual void toBSON(BSONObjBuilder* out) const;
+
virtual bool equivalent( const MatchExpression* other ) const;
void copyTo( InMatchExpression* toFillIn ) const;
@@ -363,6 +375,8 @@ namespace mongo {
virtual void debugString( StringBuilder& debug, int level ) const;
+ virtual void toBSON(BSONObjBuilder* out) const;
+
virtual bool equivalent( const MatchExpression* other ) const;
/**
diff --git a/src/mongo/db/matcher/expression_text.cpp b/src/mongo/db/matcher/expression_text.cpp
index f81c3dc1fa3..899ef0362ad 100644
--- a/src/mongo/db/matcher/expression_text.cpp
+++ b/src/mongo/db/matcher/expression_text.cpp
@@ -59,6 +59,10 @@ namespace mongo {
debug << "\n";
}
+ void TextMatchExpression::toBSON(BSONObjBuilder* out) const {
+ out->append("$text", BSON("$search" << _query << "$language" << _language));
+ }
+
bool TextMatchExpression::equivalent( const MatchExpression* other ) const {
if ( matchType() != other->matchType() ) {
return false;
diff --git a/src/mongo/db/matcher/expression_text.h b/src/mongo/db/matcher/expression_text.h
index 4fc603d9c61..6ebea78ba3d 100644
--- a/src/mongo/db/matcher/expression_text.h
+++ b/src/mongo/db/matcher/expression_text.h
@@ -47,6 +47,8 @@ namespace mongo {
virtual void debugString( StringBuilder& debug, int level = 0 ) const;
+ virtual void toBSON(BSONObjBuilder* out) const;
+
virtual bool equivalent( const MatchExpression* other ) const;
virtual LeafMatchExpression* shallowClone() const;
diff --git a/src/mongo/db/matcher/expression_tree.cpp b/src/mongo/db/matcher/expression_tree.cpp
index 6b7304b024a..95e272c1cb3 100644
--- a/src/mongo/db/matcher/expression_tree.cpp
+++ b/src/mongo/db/matcher/expression_tree.cpp
@@ -30,9 +30,11 @@
#include "mongo/db/matcher/expression_tree.h"
-#include "mongo/bson/bsonobjiterator.h"
#include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsonmisc.h"
+#include "mongo/bson/bsonobjbuilder.h"
+#include "mongo/bson/bsonobjiterator.h"
+#include "mongo/bson/bson-inl.h"
#include "mongo/util/log.h"
namespace mongo {
@@ -54,6 +56,14 @@ namespace mongo {
_expressions[i]->debugString( debug, level + 1 );
}
+ void ListOfMatchExpression::_listToBSON(BSONArrayBuilder* out) const {
+ for ( unsigned i = 0; i < _expressions.size(); i++ ) {
+ BSONObjBuilder childBob(out->subobjStart());
+ _expressions[i]->toBSON(&childBob);
+ }
+ out->doneFast();
+ }
+
bool ListOfMatchExpression::equivalent( const MatchExpression* other ) const {
if ( matchType() != other->matchType() )
return false;
@@ -100,6 +110,11 @@ namespace mongo {
_debugList( debug, level );
}
+ void AndMatchExpression::toBSON(BSONObjBuilder* out) const {
+ BSONArrayBuilder arrBob(out->subarrayStart("$and"));
+ _listToBSON(&arrBob);
+ }
+
// -----
bool OrMatchExpression::matches( const MatchableDocument* doc, MatchDetails* details ) const {
@@ -127,6 +142,11 @@ namespace mongo {
_debugList( debug, level );
}
+ void OrMatchExpression::toBSON(BSONObjBuilder* out) const {
+ BSONArrayBuilder arrBob(out->subarrayStart("$or"));
+ _listToBSON(&arrBob);
+ }
+
// ----
bool NorMatchExpression::matches( const MatchableDocument* doc, MatchDetails* details ) const {
@@ -153,6 +173,11 @@ namespace mongo {
_debugList( debug, level );
}
+ void NorMatchExpression::toBSON(BSONObjBuilder* out) const {
+ BSONArrayBuilder arrBob(out->subarrayStart("$nor"));
+ _listToBSON(&arrBob);
+ }
+
// -------
void NotMatchExpression::debugString( StringBuilder& debug, int level ) const {
@@ -161,6 +186,12 @@ namespace mongo {
_exp->debugString( debug, level + 1 );
}
+ void NotMatchExpression::toBSON(BSONObjBuilder* out) const {
+ BSONObjBuilder childBob(out->subobjStart("$not"));
+ _exp->toBSON(&childBob);
+ childBob.doneFast();
+ }
+
bool NotMatchExpression::equivalent( const MatchExpression* other ) const {
if ( matchType() != other->matchType() )
return false;
diff --git a/src/mongo/db/matcher/expression_tree.h b/src/mongo/db/matcher/expression_tree.h
index 9105b0d2b79..b0caebd1e71 100644
--- a/src/mongo/db/matcher/expression_tree.h
+++ b/src/mongo/db/matcher/expression_tree.h
@@ -67,6 +67,8 @@ namespace mongo {
protected:
void _debugList( StringBuilder& debug, int level ) const;
+ void _listToBSON(BSONArrayBuilder* out) const;
+
private:
std::vector< MatchExpression* > _expressions;
};
@@ -91,6 +93,8 @@ namespace mongo {
}
virtual void debugString( StringBuilder& debug, int level = 0 ) const;
+
+ virtual void toBSON(BSONObjBuilder* out) const;
};
class OrMatchExpression : public ListOfMatchExpression {
@@ -113,6 +117,8 @@ namespace mongo {
}
virtual void debugString( StringBuilder& debug, int level = 0 ) const;
+
+ virtual void toBSON(BSONObjBuilder* out) const;
};
class NorMatchExpression : public ListOfMatchExpression {
@@ -135,6 +141,8 @@ namespace mongo {
}
virtual void debugString( StringBuilder& debug, int level = 0 ) const;
+
+ virtual void toBSON(BSONObjBuilder* out) const;
};
class NotMatchExpression : public MatchExpression {
@@ -169,6 +177,8 @@ namespace mongo {
virtual void debugString( StringBuilder& debug, int level = 0 ) const;
+ virtual void toBSON(BSONObjBuilder* out) const;
+
bool equivalent( const MatchExpression* other ) const;
virtual size_t numChildren() const { return 1; }
diff --git a/src/mongo/db/matcher/expression_where.cpp b/src/mongo/db/matcher/expression_where.cpp
index 2bc99116d22..909c648063c 100644
--- a/src/mongo/db/matcher/expression_where.cpp
+++ b/src/mongo/db/matcher/expression_where.cpp
@@ -65,6 +65,8 @@ namespace mongo {
virtual void debugString( StringBuilder& debug, int level = 0 ) const;
+ virtual void toBSON(BSONObjBuilder* out) const;
+
virtual bool equivalent( const MatchExpression* other ) const ;
virtual void resetTag() { setTag(NULL); }
@@ -143,6 +145,10 @@ namespace mongo {
debug << "scope: " << _userScope << "\n";
}
+ void WhereMatchExpression::toBSON(BSONObjBuilder* out) const {
+ out->append("$where", _code);
+ }
+
bool WhereMatchExpression::equivalent( const MatchExpression* other ) const {
if ( matchType() != other->matchType() )
return false;
diff --git a/src/mongo/db/matcher/expression_where_noop.cpp b/src/mongo/db/matcher/expression_where_noop.cpp
index 10aee8ff457..f88a65d93f6 100644
--- a/src/mongo/db/matcher/expression_where_noop.cpp
+++ b/src/mongo/db/matcher/expression_where_noop.cpp
@@ -67,6 +67,8 @@ namespace mongo {
virtual void debugString( StringBuilder& debug, int level = 0 ) const;
+ virtual void toBSON(BSONObjBuilder* out) const;
+
virtual bool equivalent( const MatchExpression* other ) const ;
virtual void resetTag() { setTag(NULL); }
@@ -92,6 +94,10 @@ namespace mongo {
debug << "code: " << _code << "\n";
}
+ void WhereNoOpMatchExpression::toBSON(BSONObjBuilder* out) const {
+ out->append("$where", _code);
+ }
+
bool WhereNoOpMatchExpression::equivalent( const MatchExpression* other ) const {
if ( matchType() != other->matchType() )
return false;
diff --git a/src/mongo/db/query/SConscript b/src/mongo/db/query/SConscript
index bf7eb904379..e48861a11f5 100644
--- a/src/mongo/db/query/SConscript
+++ b/src/mongo/db/query/SConscript
@@ -32,6 +32,7 @@ env.Library(
target='query',
source=[
"eof_runner.cpp",
+ "explain.cpp",
"explain_plan.cpp",
"get_runner.cpp",
"idhack_runner.cpp",
diff --git a/src/mongo/db/query/explain.cpp b/src/mongo/db/query/explain.cpp
new file mode 100644
index 00000000000..7477c1a0285
--- /dev/null
+++ b/src/mongo/db/query/explain.cpp
@@ -0,0 +1,356 @@
+/**
+ * Copyright (C) 2013-2014 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/db/query/explain.h"
+
+#include "mongo/db/exec/multi_plan.h"
+#include "mongo/db/query/get_runner.h"
+#include "mongo/db/query/plan_executor.h"
+#include "mongo/db/query/query_planner.h"
+#include "mongo/db/query/stage_builder.h"
+#include "mongo/db/exec/working_set_common.h"
+#include "mongo/db/server_options.h"
+#include "mongo/db/server_parameters.h"
+#include "mongo/util/mongoutils/str.h"
+#include "mongo/util/processinfo.h"
+#include "mongo/util/version.h"
+
+namespace mongo {
+
+ using mongoutils::str::stream;
+
+ MONGO_EXPORT_SERVER_PARAMETER(enableNewExplain, bool, false);
+
+ // static
+ void Explain::explainTree(const PlanStageStats& stats,
+ Explain::Verbosity verbosity,
+ BSONObjBuilder* bob) {
+ invariant(bob);
+
+ // Stage name.
+ bob->append("stage", stats.common.stageTypeStr);
+
+ if (!stats.common.filter.isEmpty()) {
+ bob->append("filter", stats.common.filter);
+ }
+
+ // Stage-specific stats
+ if (STAGE_IXSCAN == stats.stageType) {
+ IndexScanStats* spec = static_cast<IndexScanStats*>(stats.specific.get());
+ bob->append("keyPattern", spec->keyPattern);
+ bob->appendBool("isMultiKey", spec->isMultiKey);
+ bob->append("indexBounds", spec->indexBounds);
+ }
+ else if (STAGE_GEO_NEAR_2D == stats.stageType) {
+ TwoDNearStats* spec = static_cast<TwoDNearStats*>(stats.specific.get());
+ bob->append("keyPattern", spec->keyPattern);
+
+ // TODO these things are execution stats
+ /*bob->appendNumber("keysExamined", spec->nscanned);
+ bob->appendNumber("objectsLoaded", spec->objectsLoaded);*/
+ }
+ else if (STAGE_TEXT == stats.stageType) {
+ TextStats* spec = static_cast<TextStats*>(stats.specific.get());
+ bob->append("indexPrefix", spec->indexPrefix);
+ bob->append("parsedTextQuery", spec->parsedTextQuery);
+
+ // TODO these things are execution stats
+ /*bob->appendNumber("keysExamined", spec->keysExamined);
+ bob->appendNumber("fetches", spec->fetches);*/
+ }
+ else if (STAGE_SORT == stats.stageType) {
+ SortStats* spec = static_cast<SortStats*>(stats.specific.get());
+ bob->append("sortPattern", spec->sortPattern);
+ if (spec->limit > 0) {
+ bob->appendNumber("limitAmount", spec->limit);
+ }
+ bob->appendNumber("memUsage", spec->memUsage);
+ }
+ else if (STAGE_SORT_MERGE == stats.stageType) {
+ MergeSortStats* spec = static_cast<MergeSortStats*>(stats.specific.get());
+ bob->append("sortPattern", spec->sortPattern);
+ }
+ else if (STAGE_PROJECTION == stats.stageType) {
+ ProjectionStats* spec = static_cast<ProjectionStats*>(stats.specific.get());
+ bob->append("transformBy", spec->projObj);
+ }
+ else if (STAGE_SKIP == stats.stageType) {
+ SkipStats* spec = static_cast<SkipStats*>(stats.specific.get());
+ bob->appendNumber("skipAmount", spec->skip);
+ }
+ else if (STAGE_LIMIT == stats.stageType) {
+ LimitStats* spec = static_cast<LimitStats*>(stats.specific.get());
+ bob->appendNumber("limitAmount", spec->limit);
+ }
+
+ // We're done if there are no children.
+ if (stats.children.empty()) {
+ return;
+ }
+
+ // If there's just one child (a common scenario), avoid making an array. This makes
+ // the output more readable by saving a level of nesting. Name the field 'inputStage'
+ // rather than 'inputStages'.
+ if (1 == stats.children.size()) {
+ BSONObjBuilder childBob;
+ explainTree(*stats.children[0], verbosity, &childBob);
+ bob->append("inputStage", childBob.obj());
+ return;
+ }
+
+ // There is more than one child. Recursively explainTree(...) on each
+ // of them and add them to the 'inputStages' array.
+
+ BSONArrayBuilder childrenBob(bob->subarrayStart("inputStages"));
+ for (size_t i = 0; i < stats.children.size(); ++i) {
+ BSONObjBuilder childBob(childrenBob.subobjStart());
+ explainTree(*stats.children[i], verbosity, &childBob);
+ }
+ childrenBob.doneFast();
+ }
+
+ // static
+ void Explain::generatePlannerInfo(CanonicalQuery* query,
+ PlanStageStats* winnerStats,
+ vector<PlanStageStats*>& rejectedStats,
+ BSONObjBuilder* out) {
+ BSONObjBuilder plannerBob(out->subobjStart("queryPlanner"));;
+
+ plannerBob.append("plannerVersion", QueryPlanner::kPlannerVersion);
+
+ BSONObjBuilder parsedQueryBob(plannerBob.subobjStart("parsedQuery"));
+ query->root()->toBSON(&parsedQueryBob);
+ parsedQueryBob.doneFast();
+
+ BSONObjBuilder winningPlanBob(plannerBob.subobjStart("winningPlan"));
+ explainTree(*winnerStats, Explain::QUERY_PLANNER, &winningPlanBob);
+ winningPlanBob.doneFast();
+
+ // Genenerate array of rejected plans.
+ BSONArrayBuilder allPlansBob(plannerBob.subarrayStart("rejectedPlans"));
+ for (size_t i = 0; i < rejectedStats.size(); i++) {
+ BSONObjBuilder childBob(allPlansBob.subobjStart());
+ explainTree(*rejectedStats.at(i), Explain::QUERY_PLANNER, &childBob);
+ }
+ allPlansBob.doneFast();
+
+ plannerBob.doneFast();
+ }
+
+ // static
+ void Explain::generateServerInfo(BSONObjBuilder* out) {
+ BSONObjBuilder serverBob(out->subobjStart("serverInfo"));
+ out->append("host", getHostNameCached());
+ out->appendNumber("port", serverGlobalParams.port);
+ out->append("version", versionString);
+ out->append("gitVersion", gitVersion());
+
+ ProcessInfo p;
+ BSONObjBuilder bOs;
+ bOs.append("type", p.getOsType());
+ bOs.append("name", p.getOsName());
+ bOs.append("version", p.getOsVersion());
+ serverBob.append(StringData("os"), bOs.obj());
+
+ serverBob.doneFast();
+ }
+
+ // static
+ Status Explain::explainSinglePlan(Collection* collection,
+ CanonicalQuery* rawCanonicalQuery,
+ QuerySolution* solution,
+ Explain::Verbosity verbosity,
+ BSONObjBuilder* out) {
+ // Only one possible plan. Build the stages from the solution.
+ WorkingSet* ws = new WorkingSet();
+ PlanStage* root;
+ verify(StageBuilder::build(collection, *solution, ws, &root));
+
+ // Wrap the exec stages in a plan executor. Takes ownership of 'ws' and 'root'.
+ scoped_ptr<PlanExecutor> exec(new PlanExecutor(ws, root, collection));
+
+ // If we need execution stats, then we should run the plan.
+ if (verbosity > Explain::QUERY_PLANNER) {
+ Runner::RunnerState state;
+ BSONObj obj;
+ while (Runner::RUNNER_ADVANCED == (state = exec->getNext(&obj, NULL)));
+
+ if (Runner::RUNNER_ERROR == state) {
+ return Status(ErrorCodes::BadValue,
+ "Exec error: " + WorkingSetCommon::toStatusString(obj));
+ }
+ }
+
+ scoped_ptr<PlanStageStats> stats(exec->getStats());
+
+ // Actually generate the explain results.
+
+ if (verbosity >= Explain::QUERY_PLANNER) {
+ vector<PlanStageStats*> rejected;
+ generatePlannerInfo(rawCanonicalQuery, stats.get(), rejected, out);
+ generateServerInfo(out);
+ }
+
+ if (verbosity >= Explain::EXEC_STATS) {
+ // TODO: generate executionStats section
+ }
+
+ if (verbosity >= Explain::EXEC_ALL_PLANS) {
+ // TODO: generate rejected plans execution stats
+ }
+
+ return Status::OK();
+ }
+
+ // static
+ Status Explain::explainMultiPlan(Collection* collection,
+ CanonicalQuery* rawCanonicalQuery,
+ vector<QuerySolution*>& solutions,
+ Explain::Verbosity verbosity,
+ BSONObjBuilder* out) {
+ scoped_ptr<WorkingSet> sharedWorkingSet(new WorkingSet());
+
+ scoped_ptr<MultiPlanStage> multiPlanStage(
+ new MultiPlanStage(collection, rawCanonicalQuery));
+
+ for (size_t ix = 0; ix < solutions.size(); ++ix) {
+ // version of StageBuild::build when WorkingSet is shared
+ PlanStage* nextPlanRoot;
+ verify(StageBuilder::build(collection, *solutions[ix],
+ sharedWorkingSet.get(), &nextPlanRoot));
+
+ // Takes ownership of the solution and the root PlanStage, but not the working set.
+ multiPlanStage->addPlan(solutions[ix], nextPlanRoot, sharedWorkingSet.get());
+ }
+
+ // Run the plan / do the plan selection as required by the requested verbosity.
+ multiPlanStage->pickBestPlan();
+ if (Explain::EXEC_STATS == verbosity) {
+ Status execStatus = multiPlanStage->executeWinningPlan();
+ if (!execStatus.isOK()) {
+ return execStatus;
+ }
+ }
+ else if (Explain::EXEC_ALL_PLANS == verbosity) {
+ Status execStatus = multiPlanStage->executeAllPlans();
+ if (!execStatus.isOK()) {
+ return execStatus;
+ }
+ }
+
+ // Get stats for the winning plan.
+ scoped_ptr<PlanStageStats> stats(multiPlanStage->getStats());
+
+ // Actually generate the explain results.
+
+ if (verbosity >= Explain::QUERY_PLANNER) {
+ vector<PlanStageStats*>* rejected = multiPlanStage->generateCandidateStats();
+ generatePlannerInfo(rawCanonicalQuery, stats.get(), *rejected, out);
+ generateServerInfo(out);
+ }
+
+ if (verbosity >= Explain::EXEC_STATS) {
+ // TODO: generate executionStats section
+ }
+
+ if (verbosity >= Explain::EXEC_ALL_PLANS) {
+ // TODO: generate rejected plans execution stats
+ }
+
+ return Status::OK();
+ }
+
+ // static
+ void Explain::explainEmptyColl(CanonicalQuery* rawCanonicalQuery,
+ BSONObjBuilder* out) {
+ BSONObjBuilder plannerBob(out->subobjStart("queryPlanner"));
+
+ plannerBob.append("plannerVersion", QueryPlanner::kPlannerVersion);
+
+ BSONObjBuilder parsedQueryBob(plannerBob.subobjStart("parsedQuery"));
+ rawCanonicalQuery->root()->toBSON(&parsedQueryBob);
+ parsedQueryBob.doneFast();
+
+ plannerBob.appendBool("emptyCollection", true);
+
+ plannerBob.append("winningPlan", "EOF");
+
+ // Empty array of rejected plans.
+ BSONArrayBuilder allPlansBob(plannerBob.subarrayStart("rejectedPlans"));
+ allPlansBob.doneFast();
+
+ plannerBob.doneFast();
+
+ generateServerInfo(out);
+ }
+
+ // static
+ Status Explain::explain(Collection* collection,
+ CanonicalQuery* rawCanonicalQuery,
+ size_t plannerOptions,
+ Explain::Verbosity verbosity,
+ BSONObjBuilder* out) {
+ invariant(rawCanonicalQuery);
+ auto_ptr<CanonicalQuery> canonicalQuery(rawCanonicalQuery);
+
+ if (NULL == collection) {
+ explainEmptyColl(rawCanonicalQuery, out);
+ return Status::OK();
+ }
+
+ QueryPlannerParams plannerParams;
+ plannerParams.options = plannerOptions;
+ fillOutPlannerParams(collection, rawCanonicalQuery, &plannerParams);
+
+ vector<QuerySolution*> solutions;
+ Status status = QueryPlanner::plan(*canonicalQuery, plannerParams, &solutions);
+ if (!status.isOK()) {
+ return Status(ErrorCodes::BadValue,
+ "error processing explain: " + canonicalQuery->toString() +
+ " planner returned error: " + status.reason());
+ }
+
+ // We cannot figure out how to answer the query. Perhaps it requires an index
+ // we do not have?
+ if (0 == solutions.size()) {
+ stream ss;
+ ss << "error processing explain: " << canonicalQuery->toString()
+ << " No query solutions";
+ return Status(ErrorCodes::BadValue, ss);
+ }
+ else if (1 == solutions.size()) {
+ return explainSinglePlan(collection, rawCanonicalQuery, solutions[0], verbosity, out);
+ }
+ else {
+ return explainMultiPlan(collection, rawCanonicalQuery, solutions, verbosity, out);
+ }
+ }
+
+} // namespace mongo
diff --git a/src/mongo/db/query/explain.h b/src/mongo/db/query/explain.h
new file mode 100644
index 00000000000..675109bc294
--- /dev/null
+++ b/src/mongo/db/query/explain.h
@@ -0,0 +1,155 @@
+/**
+ * Copyright (C) 2013-2014 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#pragma once
+
+#include "mongo/db/exec/plan_stats.h"
+#include "mongo/db/query/canonical_query.h"
+#include "mongo/db/query/query_planner_params.h"
+#include "mongo/db/query/query_solution.h"
+
+namespace mongo {
+
+ class Collection;
+
+ // Temporarily hide the new explain implementation behind a setParameter.
+ // TODO: take this out, and make the new implementation the default.
+ extern bool enableNewExplain;
+
+ /**
+ * Namespace for the collection of static methods used to generate explain information.
+ */
+ class Explain {
+ public:
+ /**
+ * The various supported verbosity levels for explain. The order is
+ * significant: the enum values are assigned in order of increasing verbosity.
+ */
+ enum Verbosity {
+ // At all verbosities greater than or equal to QUERY_PLANNER, we display information
+ // about the plan selected and alternate rejected plans. Does not include any execution-
+ // related info. String alias is "queryPlanner".
+ QUERY_PLANNER = 0,
+
+ // At all verbosities greater than or equal to EXEC_STATS, we display a section of
+ // output containing both overall execution stats, and stats per stage in the
+ // execution tree. String alias is "execStats".
+ EXEC_STATS = 1,
+
+ // At this highest verbosity level, we generate the execution stats for each rejected
+ // plan as well as the winning plan. String alias is "allPlansExecution".
+ EXEC_ALL_PLANS = 2
+ };
+
+ /**
+ * Adds the 'queryPlanner' explain section to the BSON object being built
+ * by 'out'.
+ *
+ * @param query -- the query part of the operation being explained.
+ * @param winnerStats -- the stats tree for the winning plan.
+ * @param rejectedStats -- an array of stats trees, one per rejected plan
+ */
+ static void generatePlannerInfo(CanonicalQuery* query,
+ PlanStageStats* winnerStats,
+ vector<PlanStageStats*>& rejectedStats,
+ BSONObjBuilder* out);
+
+ /**
+ * Adds the 'serverInfo' explain section to the BSON object being build
+ * by 'out'.
+ */
+ static void generateServerInfo(BSONObjBuilder* out);
+
+ /**
+ * Converts the stats tree 'stats' into a corresponding BSON object containing
+ * explain information.
+ *
+ * Explain info is added to 'bob' according to the verbosity level passed in
+ * 'verbosity'.
+ */
+ static void explainTree(const PlanStageStats& stats,
+ Explain::Verbosity verbosity,
+ BSONObjBuilder* bob);
+
+ /**
+ * Add explain info to 'out' at verbosity 'verbosity' in the case that there is
+ * only one query solution available.
+ *
+ * The query 'rawCanonicalQuery' has one viable query solution 'solution' in the
+ * collection 'collection'.
+ *
+ * May use a PlanExecutor to run the solution in order to produce exec stats.
+ */
+ static Status explainSinglePlan(Collection* collection,
+ CanonicalQuery* rawCanonicalQuery,
+ QuerySolution* solution,
+ Explain::Verbosity verbosity,
+ BSONObjBuilder* out);
+
+ /**
+ * Add explain info to 'out' at verbosity 'verbosity' in the case that there are
+ * multiple query solutions available.
+ *
+ * The query 'rawCanonicalQuery' has the corresponding query solutions in 'solutions'.
+ *
+ * Uses a MultiPlan stage to choose the best plan, and to run the winning plan or the
+ * rejected plans as required by the verbosity level.
+ */
+ static Status explainMultiPlan(Collection* collection,
+ CanonicalQuery* rawCanonicalQuery,
+ vector<QuerySolution*>& solutions,
+ Explain::Verbosity verbosity,
+ BSONObjBuilder* out);
+
+ /**
+ * The format of the explain output is special if the collection is empty.
+ *
+ * Assuming that the collection is empty, adds the explain info for query
+ * 'rawCanonicalQuery' to 'out'.
+ */
+ static void explainEmptyColl(CanonicalQuery* rawCanonicalQuery,
+ BSONObjBuilder* out);
+
+ /**
+ * Top-level explain entry point for a query. Plans 'rawCanonicalQuery' in collection
+ * 'collection' using the planner parameters in 'plannerOptions'.
+ *
+ * The resulting explain BSON is added to 'out'. The level of detail in the output is
+ * controlled by 'verbosity'.
+ *
+ * If necessary, run the query in order to generate execution stats (but throw out
+ * the results of the query).
+ */
+ static Status explain(Collection* collection,
+ CanonicalQuery* rawCanonicalQuery,
+ size_t plannerOptions,
+ Explain::Verbosity verbosity,
+ BSONObjBuilder* out);
+ };
+
+} // namespace
diff --git a/src/mongo/db/query/get_runner.cpp b/src/mongo/db/query/get_runner.cpp
index 8436dff3164..b95c431caf0 100644
--- a/src/mongo/db/query/get_runner.cpp
+++ b/src/mongo/db/query/get_runner.cpp
@@ -397,7 +397,10 @@ namespace mongo {
// Owns none of the arguments
multiPlanStage->addPlan(solutions[ix], nextPlanRoot, sharedWorkingSet);
}
+
multiPlanStage->pickBestPlan();
+ multiPlanStage->generateCandidateStats();
+
*out = new SingleSolutionRunner(collection,
canonicalQuery.release(),
multiPlanStage->bestSolution(),
diff --git a/src/mongo/db/query/index_bounds.cpp b/src/mongo/db/query/index_bounds.cpp
index f90fa9b6ca0..446a3dc4e02 100644
--- a/src/mongo/db/query/index_bounds.cpp
+++ b/src/mongo/db/query/index_bounds.cpp
@@ -184,7 +184,7 @@ namespace mongo {
return ss;
}
- BSONObj IndexBounds::toBSON() const {
+ BSONObj IndexBounds::toLegacyBSON() const {
BSONObjBuilder builder;
if (isSimpleRange) {
// TODO
@@ -238,6 +238,25 @@ namespace mongo {
return builder.obj();
}
+ BSONObj IndexBounds::toBSON() const {
+ BSONObjBuilder bob;
+ vector<OrderedIntervalList>::const_iterator itField;
+ for (itField = fields.begin(); itField != fields.end(); ++itField) {
+ BSONArrayBuilder fieldBuilder(bob.subarrayStart(itField->name));
+
+ vector<Interval>::const_iterator itInterval;
+ for (itInterval = itField->intervals.begin()
+ ; itInterval != itField->intervals.end()
+ ; ++itInterval) {
+ fieldBuilder.append(itInterval->toString());
+ }
+
+ fieldBuilder.doneFast();
+ }
+
+ return bob.obj();
+ }
+
//
// Validity checking for bounds
//
diff --git a/src/mongo/db/query/index_bounds.h b/src/mongo/db/query/index_bounds.h
index b5236d2d4e4..514070b9db4 100644
--- a/src/mongo/db/query/index_bounds.h
+++ b/src/mongo/db/query/index_bounds.h
@@ -93,6 +93,25 @@ namespace mongo {
size_t getNumIntervals(size_t i) const;
Interval getInterval(size_t i, size_t j) const;
std::string toString() const;
+
+ /**
+ * Legacy BSON format for explain. The format is an array of arrays for each field.
+ *
+ * TODO remove this function once the new explain format is on by default.
+ *
+ * Ex.
+ * {a: [ [1, 1], [3, 10] ], b: [ [Infinity, 10] ] }
+ */
+ BSONObj toLegacyBSON() const;
+
+ /**
+ * BSON format for explain. The format is an array of strings for each field.
+ * Each string represents an interval. The strings use "[" and "]" if the interval
+ * bounds are inclusive, and "(" / ")" if exclusive.
+ *
+ * Ex.
+ * {a: ["[1, 1]", "(3, 10)"], b: ["[Infinity, 10)"] }
+ */
BSONObj toBSON() const;
// TODO: we use this for max/min scan. Consider migrating that.
diff --git a/src/mongo/db/query/new_find.cpp b/src/mongo/db/query/new_find.cpp
index efdaf0e7c2a..d1c81173d2e 100644
--- a/src/mongo/db/query/new_find.cpp
+++ b/src/mongo/db/query/new_find.cpp
@@ -35,6 +35,7 @@
#include "mongo/db/exec/oplogstart.h"
#include "mongo/db/exec/working_set_common.h"
#include "mongo/db/keypattern.h"
+#include "mongo/db/query/explain.h"
#include "mongo/db/query/find_constants.h"
#include "mongo/db/query/get_runner.h"
#include "mongo/db/query/internal_plans.h"
@@ -479,6 +480,52 @@ namespace mongo {
// We use this a lot below.
const LiteParsedQuery& pq = cq->getParsed();
+ // set this outside loop. we will need to use this both within loop and when deciding
+ // to fill in explain information
+ const bool isExplain = pq.isExplain();
+
+ // New-style explains get diverted through a separate path which calls back into the
+ // query planner and query execution mechanisms.
+ //
+ // TODO temporary until find() becomes a real command.
+ if (isExplain && enableNewExplain) {
+ size_t options = QueryPlannerParams::DEFAULT;
+ if (shardingState.needCollectionMetadata(pq.ns())) {
+ options |= QueryPlannerParams::INCLUDE_SHARD_FILTER;
+ }
+
+ BufBuilder bb;
+ bb.skip(sizeof(QueryResult));
+
+ BSONObjBuilder explainBob;
+ Status explainStatus = Explain::explain(collection, cq, options,
+ Explain::QUERY_PLANNER, &explainBob);
+ if (!explainStatus.isOK()) {
+ uasserted(17510, "Explain error: " + explainStatus.reason());
+ }
+
+ // Add the resulting object to the return buffer.
+ BSONObj explainObj = explainBob.obj();
+ bb.appendBuf((void*)explainObj.objdata(), explainObj.objsize());
+
+ curop.debug().iscommand = true;
+ // TODO: Does this get overwritten/do we really need to set this twice?
+ curop.debug().query = q.query;
+
+ // Set query result fields.
+ QueryResult* qr = reinterpret_cast<QueryResult*>(bb.buf());
+ bb.decouple();
+ qr->setResultFlagsToOk();
+ qr->len = bb.len();
+ curop.debug().responseLength = bb.len();
+ qr->setOperation(opReply);
+ qr->cursorId = 0;
+ qr->startingFrom = 0;
+ qr->nReturned = 1;
+ result.setData(qr, true);
+ return "";
+ }
+
// We'll now try to get the query runner that will execute this query for us. There
// are a few cases in which we know upfront which runner we should get and, therefore,
// we shortcut the selection process here.
@@ -562,10 +609,6 @@ namespace mongo {
Runner::RunnerState state;
// uint64_t numMisplacedDocs = 0;
- // set this outside loop. we will need to use this both within loop and when deciding
- // to fill in explain information
- const bool isExplain = pq.isExplain();
-
// Have we retrieved info about which plan the runner will
// use to execute the query yet?
bool gotPlanInfo = false;
diff --git a/src/mongo/db/query/plan_cache_test.cpp b/src/mongo/db/query/plan_cache_test.cpp
index 7a1d1cd03af..be7c098c955 100644
--- a/src/mongo/db/query/plan_cache_test.cpp
+++ b/src/mongo/db/query/plan_cache_test.cpp
@@ -182,7 +182,8 @@ namespace {
PlanRankingDecision* createDecision(size_t numPlans) {
auto_ptr<PlanRankingDecision> why(new PlanRankingDecision());
for (size_t i = 0; i < numPlans; ++i) {
- auto_ptr<PlanStageStats> stats(new PlanStageStats(CommonStats(), STAGE_COLLSCAN));
+ CommonStats common("COLLSCAN");
+ auto_ptr<PlanStageStats> stats(new PlanStageStats(common, STAGE_COLLSCAN));
stats->specific.reset(new CollectionScanStats());
why->stats.mutableVector().push_back(stats.release());
why->scores.push_back(0U);
diff --git a/src/mongo/db/query/query_planner.cpp b/src/mongo/db/query/query_planner.cpp
index 1e16eb47815..98e2e723702 100644
--- a/src/mongo/db/query/query_planner.cpp
+++ b/src/mongo/db/query/query_planner.cpp
@@ -168,6 +168,9 @@ namespace mongo {
return !sortIt.more();
}
+ // static
+ const int QueryPlanner::kPlannerVersion = 1;
+
Status QueryPlanner::cacheDataFromTaggedTree(const MatchExpression* const taggedTree,
const vector<IndexEntry>& relevantIndices,
PlanCacheIndexTree** out) {
diff --git a/src/mongo/db/query/query_planner.h b/src/mongo/db/query/query_planner.h
index 151c7278728..b212b49694e 100644
--- a/src/mongo/db/query/query_planner.h
+++ b/src/mongo/db/query/query_planner.h
@@ -43,6 +43,9 @@ namespace mongo {
*/
class QueryPlanner {
public:
+ // Identifies the version of the query planner module. Reported in explain.
+ static const int kPlannerVersion;
+
/**
* Outputs a series of possible solutions for the provided 'query' into 'out'. Uses the
* indices and other data in 'params' to plan with.