diff options
author | Benety Goh <benety@mongodb.com> | 2014-01-15 21:06:20 -0500 |
---|---|---|
committer | Benety Goh <benety@mongodb.com> | 2014-01-21 18:19:28 -0500 |
commit | 7ca37c59bb1527ed613425a7575574c1c574df4b (patch) | |
tree | 067ad8692ba3f9106de06a4238edd45b0b682f9e /src | |
parent | 06f7f3ca68c95277b1c11f469f01e61cbeb3cc76 (diff) | |
download | mongo-7ca37c59bb1527ed613425a7575574c1c574df4b.tar.gz |
SERVER-12372 added plan stats for all solutions and feedback to planCacheListPlans result
Diffstat (limited to 'src')
-rw-r--r-- | src/mongo/db/commands/hint_commands.cpp | 37 | ||||
-rw-r--r-- | src/mongo/db/commands/hint_commands_test.cpp | 40 | ||||
-rw-r--r-- | src/mongo/db/commands/plan_cache_commands.cpp | 64 | ||||
-rw-r--r-- | src/mongo/db/commands/plan_cache_commands_test.cpp | 31 | ||||
-rw-r--r-- | src/mongo/db/exec/plan_stats.h | 92 | ||||
-rw-r--r-- | src/mongo/db/query/explain_plan.cpp | 103 | ||||
-rw-r--r-- | src/mongo/db/query/explain_plan.h | 2 | ||||
-rw-r--r-- | src/mongo/db/query/multi_plan_runner.cpp | 25 | ||||
-rw-r--r-- | src/mongo/db/query/plan_cache.cpp | 99 | ||||
-rw-r--r-- | src/mongo/db/query/plan_cache.h | 45 | ||||
-rw-r--r-- | src/mongo/db/query/plan_cache_test.cpp | 29 | ||||
-rw-r--r-- | src/mongo/db/query/plan_ranker.cpp | 66 | ||||
-rw-r--r-- | src/mongo/db/query/plan_ranker.h | 45 | ||||
-rw-r--r-- | src/mongo/db/query/query_settings.cpp | 10 |
14 files changed, 468 insertions, 220 deletions
diff --git a/src/mongo/db/commands/hint_commands.cpp b/src/mongo/db/commands/hint_commands.cpp index 3f0fdbcdb33..8d597fdd8c2 100644 --- a/src/mongo/db/commands/hint_commands.cpp +++ b/src/mongo/db/commands/hint_commands.cpp @@ -30,6 +30,7 @@ #include <sstream> #include "mongo/base/init.h" +#include "mongo/base/owned_pointer_vector.h" #include "mongo/base/status.h" #include "mongo/db/client.h" #include "mongo/db/catalog/database.h" @@ -46,30 +47,6 @@ namespace { using namespace mongo; /** - * Releases memory for container of pointers. - * XXX: move elsewhere when something similar is available in util libraries. - */ - template <typename T> - class ContainerPointersDeleter { - public: - ContainerPointersDeleter(T* container) : _container(container) { - invariant(_container); - } - - ~ContainerPointersDeleter() { - for (typename T::const_iterator i = _container->begin(); - i != _container->end(); ++i) { - invariant(*i); - delete *i; - } - _container->clear(); - } - private: - MONGO_DISALLOW_COPYING(ContainerPointersDeleter); - T* _container; - }; - - /** * Utility function to extract error code and message from status * and append to BSON results. */ @@ -224,9 +201,8 @@ namespace mongo { // } // } BSONArrayBuilder hintsBuilder(bob->subarrayStart("hints")); - vector<AllowedIndexEntry*> entries = querySettings.getAllAllowedIndices(); - // Frees resources in entries on destruction. - ContainerPointersDeleter<vector<AllowedIndexEntry*> > deleter(&entries); + OwnedPointerVector<AllowedIndexEntry> entries; + entries.mutableVector() = querySettings.getAllAllowedIndices(); for (vector<AllowedIndexEntry*>::const_iterator i = entries.begin(); i != entries.end(); ++i) { AllowedIndexEntry* entry = *i; @@ -302,9 +278,8 @@ namespace mongo { // Get entries from query settings. We need to remove corresponding entries from the plan // cache shortly. - vector<AllowedIndexEntry*> entries = querySettings->getAllAllowedIndices(); - // Frees resources in entries on destruction. - ContainerPointersDeleter<vector<AllowedIndexEntry*> > deleter(&entries); + OwnedPointerVector<AllowedIndexEntry> entries; + entries.mutableVector() = querySettings->getAllAllowedIndices(); // OK to proceed with clearing entire cache. querySettings->clearAllowedIndices(); @@ -385,7 +360,7 @@ namespace mongo { if (obj.isEmpty()) { return Status(ErrorCodes::BadValue, "index specification cannot be empty"); } - indexes.push_back(obj.copy()); + indexes.push_back(obj.getOwned()); } CanonicalQuery* cqRaw; diff --git a/src/mongo/db/commands/hint_commands_test.cpp b/src/mongo/db/commands/hint_commands_test.cpp index 33d911c25dc..f322ab482a9 100644 --- a/src/mongo/db/commands/hint_commands_test.cpp +++ b/src/mongo/db/commands/hint_commands_test.cpp @@ -82,13 +82,28 @@ namespace { ASSERT_EQUALS(indexesElt.type(), mongo::Array); // All fields OK. Append to vector. - hints.push_back(obj.copy()); + hints.push_back(obj.getOwned()); } return hints; } /** + * Utility function to create a PlanRankingDecision + */ + PlanRankingDecision* createDecision(size_t numPlans) { + auto_ptr<PlanRankingDecision> why(new PlanRankingDecision()); + for (size_t i = 0; i < numPlans; ++i) { + auto_ptr<PlanStageStats> stats(new PlanStageStats(CommonStats(), STAGE_COLLSCAN)); + stats->specific.reset(new CollectionScanStats()); + why->stats.mutableVector().push_back(stats.release()); + why->scores.push_back(0U); + why->candidateOrder.push_back(i); + } + return why.release(); + } + + /** * Injects an entry into plan cache for query shape. */ void addQueryShapeToPlanCache(PlanCache* planCache, const char* queryStr, const char* sortStr, @@ -107,7 +122,7 @@ namespace { qs.cacheData->tree.reset(new PlanCacheIndexTree()); std::vector<QuerySolution*> solns; solns.push_back(&qs); - ASSERT_OK(planCache->add(*cq, solns, new PlanRankingDecision())); + ASSERT_OK(planCache->add(*cq, solns, createDecision(1U))); } /** @@ -124,19 +139,26 @@ namespace { ASSERT_OK(CanonicalQuery::canonicalize(ns, queryObj, sortObj, projectionObj, &cqRaw)); scoped_ptr<CanonicalQuery> cq(cqRaw); - // Retrieve cached solutions from plan cache. - vector<CachedSolution*> solutions = planCache.getAllSolutions(); + // Retrieve cache entries from plan cache. + vector<PlanCacheEntry*> entries = planCache.getAllEntries(); // Search keys. bool found = false; - for (vector<CachedSolution*>::const_iterator i = solutions.begin(); i != solutions.end(); i++) { - CachedSolution* cs = *i; - const PlanCacheKey& currentKey = cs->key; + for (vector<PlanCacheEntry*>::const_iterator i = entries.begin(); i != entries.end(); i++) { + PlanCacheEntry* entry = *i; + + // Canonicalizing query shape in cache entry to get cache key. + // Alternatively, we could add key to PlanCacheEntry but that would be used in one place only. + ASSERT_OK(CanonicalQuery::canonicalize(ns, entry->query, entry->sort, + entry->projection, &cqRaw)); + scoped_ptr<CanonicalQuery> currentQuery(cqRaw); + + const PlanCacheKey& currentKey = currentQuery->getPlanCacheKey(); if (currentKey == cq->getPlanCacheKey()) { found = true; } - // Release resources for cached solution after extracting key. - delete cs; + // Release resources for cache entry after extracting key. + delete entry; } return found; } diff --git a/src/mongo/db/commands/plan_cache_commands.cpp b/src/mongo/db/commands/plan_cache_commands.cpp index 2e6f6c9ea06..1d4d93c620c 100644 --- a/src/mongo/db/commands/plan_cache_commands.cpp +++ b/src/mongo/db/commands/plan_cache_commands.cpp @@ -37,6 +37,8 @@ #include "mongo/db/auth/authorization_session.h" #include "mongo/db/commands/plan_cache_commands.h" #include "mongo/db/catalog/collection.h" +#include "mongo/db/query/explain_plan.h" +#include "mongo/db/query/plan_ranker.h" namespace { @@ -220,21 +222,21 @@ namespace mongo { invariant(bob); // Fetch all cached solutions from plan cache. - vector<CachedSolution*> solutions = planCache.getAllSolutions(); + vector<PlanCacheEntry*> solutions = planCache.getAllEntries(); BSONArrayBuilder arrayBuilder(bob->subarrayStart("shapes")); - for (vector<CachedSolution*>::const_iterator i = solutions.begin(); i != solutions.end(); i++) { - CachedSolution* cs = *i; - invariant(cs); + for (vector<PlanCacheEntry*>::const_iterator i = solutions.begin(); i != solutions.end(); i++) { + PlanCacheEntry* entry = *i; + invariant(entry); BSONObjBuilder shapeBuilder(arrayBuilder.subobjStart()); - shapeBuilder.append("query", cs->query); - shapeBuilder.append("sort", cs->sort); - shapeBuilder.append("projection", cs->projection); + shapeBuilder.append("query", entry->query); + shapeBuilder.append("sort", entry->sort); + shapeBuilder.append("projection", entry->projection); shapeBuilder.doneFast(); // Release resources for cached solution after extracting query shape. - delete cs; + delete entry; } arrayBuilder.doneFast(); @@ -325,15 +327,17 @@ namespace mongo { } scoped_ptr<CanonicalQuery> cq(cqRaw); - CachedSolution* crRaw; - Status result = planCache.get(*cq, &crRaw); + PlanCacheEntry* entryRaw; + Status result = planCache.getEntry(*cq, &entryRaw); if (!result.isOK()) { return result; } - scoped_ptr<CachedSolution> cr(crRaw); + scoped_ptr<PlanCacheEntry> entry(entryRaw); BSONArrayBuilder plansBuilder(bob->subarrayStart("plans")); - size_t numPlans = cr->plannerData.size(); + size_t numPlans = entry->plannerData.size(); + invariant(numPlans == entry->decision->stats.size()); + invariant(numPlans == entry->decision->scores.size()); for (size_t i = 0; i < numPlans; ++i) { BSONObjBuilder planBob(plansBuilder.subobjStart()); @@ -341,16 +345,40 @@ namespace mongo { // Currently, simple string representationg of // SolutionCacheData. Need to revisit format when we // need to parse user-provided plan details for planCacheAddPlan. - SolutionCacheData* scd = cr->plannerData[i]; + SolutionCacheData* scd = entry->plannerData[i]; BSONObjBuilder detailsBob(planBob.subobjStart("details")); detailsBob.append("solution", scd->toString()); detailsBob.doneFast(); - // XXX: Fix these field values once we have fleshed out cache entries. - // reason should contain initial plan stats and score from ranking process. - // feedback should contain execution stats from running the query to completion. - planBob.append("reason", BSONObj()); - planBob.append("feedback", BSONObj()); + // reason is comprised of score and initial stats provided by + // multi plan runner. + BSONObjBuilder reasonBob(planBob.subobjStart("reason")); + reasonBob.append("score", entry->decision->scores[i]); + BSONObjBuilder statsBob(reasonBob.subobjStart("stats")); + PlanStageStats* stats = entry->decision->stats.vector()[i]; + if (stats) { + statsToBSON(*stats, &statsBob); + } + statsBob.doneFast(); + reasonBob.doneFast(); + + // BSON object for 'feedback' field is created from query executions + // and shows number of executions since this cached solution was + // created as well as score data (average and standard deviation). + BSONObjBuilder feedbackBob(planBob.subobjStart("feedback")); + if (i == 0U) { + feedbackBob.append("nfeedback", int(entry->feedback.size())); + feedbackBob.append("averageScore", entry->averageScore.get_value_or(0)); + feedbackBob.append("stdDevScore",entry->stddevScore.get_value_or(0)); + BSONArrayBuilder scoresBob(feedbackBob.subarrayStart("scores")); + for (size_t i = 0; i < entry->feedback.size(); ++i) { + BSONObjBuilder scoreBob(scoresBob.subobjStart()); + scoreBob.append("score", entry->feedback[i]->score); + } + scoresBob.doneFast(); + } + feedbackBob.doneFast(); + planBob.append("hint", scd->adminHintApplied); } plansBuilder.doneFast(); diff --git a/src/mongo/db/commands/plan_cache_commands_test.cpp b/src/mongo/db/commands/plan_cache_commands_test.cpp index 0b9c591e00e..3f6d286e7ac 100644 --- a/src/mongo/db/commands/plan_cache_commands_test.cpp +++ b/src/mongo/db/commands/plan_cache_commands_test.cpp @@ -84,7 +84,7 @@ namespace { ASSERT_TRUE(projectionElt.isABSONObj()); // All fields OK. Append to vector. - shapes.push_back(obj.copy()); + shapes.push_back(obj.getOwned()); } return shapes; } @@ -98,6 +98,21 @@ namespace { return scd.release(); } + /** + * Utility function to create a PlanRankingDecision + */ + PlanRankingDecision* createDecision(size_t numPlans) { + auto_ptr<PlanRankingDecision> why(new PlanRankingDecision()); + for (size_t i = 0; i < numPlans; ++i) { + auto_ptr<PlanStageStats> stats(new PlanStageStats(CommonStats(), STAGE_COLLSCAN)); + stats->specific.reset(new CollectionScanStats()); + why->stats.mutableVector().push_back(stats.release()); + why->scores.push_back(0U); + why->candidateOrder.push_back(i); + } + return why.release(); + } + TEST(PlanCacheCommandsTest, planCacheListQueryShapesEmpty) { PlanCache empty; vector<BSONObj> shapes = getShapes(empty); @@ -116,7 +131,7 @@ namespace { qs.cacheData.reset(createSolutionCacheData()); std::vector<QuerySolution*> solns; solns.push_back(&qs); - planCache.add(*cq, solns, new PlanRankingDecision()); + planCache.add(*cq, solns, createDecision(1U)); vector<BSONObj> shapes = getShapes(planCache); ASSERT_EQUALS(shapes.size(), 1U); @@ -141,7 +156,7 @@ namespace { qs.cacheData.reset(createSolutionCacheData()); std::vector<QuerySolution*> solns; solns.push_back(&qs); - planCache.add(*cq, solns, new PlanRankingDecision()); + planCache.add(*cq, solns, createDecision(1U)); ASSERT_EQUALS(getShapes(planCache).size(), 1U); // Clear cache and confirm number of keys afterwards. @@ -223,8 +238,8 @@ namespace { qs.cacheData.reset(createSolutionCacheData()); std::vector<QuerySolution*> solns; solns.push_back(&qs); - planCache.add(*cqA, solns, new PlanRankingDecision()); - planCache.add(*cqB, solns, new PlanRankingDecision()); + planCache.add(*cqA, solns, createDecision(1U)); + planCache.add(*cqB, solns, createDecision(1U)); // Check keys in cache before dropping {b: 1} vector<BSONObj> shapesBefore = getShapes(planCache); @@ -278,7 +293,7 @@ namespace { BSONElement feedbackElt = obj.getField("feedback"); ASSERT_TRUE(feedbackElt.isABSONObj()); - return obj.copy(); + return obj.getOwned(); } /** @@ -332,7 +347,7 @@ namespace { qs.cacheData.reset(createSolutionCacheData()); std::vector<QuerySolution*> solns; solns.push_back(&qs); - planCache.add(*cq, solns, new PlanRankingDecision()); + planCache.add(*cq, solns, createDecision(1U)); vector<BSONObj> plans = getPlans(planCache, cq->getQueryObj(), cq->getParsed().getSort(), cq->getParsed().getProj()); @@ -353,7 +368,7 @@ namespace { std::vector<QuerySolution*> solns; solns.push_back(&qs); solns.push_back(&qs); - planCache.add(*cq, solns, new PlanRankingDecision()); + planCache.add(*cq, solns, createDecision(2U)); vector<BSONObj> plans = getPlans(planCache, cq->getQueryObj(), cq->getParsed().getSort(), cq->getParsed().getProj()); diff --git a/src/mongo/db/exec/plan_stats.h b/src/mongo/db/exec/plan_stats.h index 2061a456a99..ac693b17ad1 100644 --- a/src/mongo/db/exec/plan_stats.h +++ b/src/mongo/db/exec/plan_stats.h @@ -40,7 +40,17 @@ namespace mongo { - struct SpecificStats; + /** + * The interface all specific-to-stage stats provide. + */ + struct SpecificStats { + virtual ~SpecificStats() { } + + /** + * Make a deep copy. + */ + virtual SpecificStats* clone() const = 0; + }; // Every stage has CommonStats. struct CommonStats { @@ -85,6 +95,21 @@ namespace mongo { } } + /** + * Make a deep copy. + */ + PlanStageStats* clone() const { + PlanStageStats* stats = new PlanStageStats(common, stageType); + if (specific.get()) { + stats->specific.reset(specific->clone()); + } + for (size_t i = 0; i < children.size(); ++i) { + invariant(children[i]); + stats->children.push_back(children[i]->clone()); + } + return stats; + } + // See query/stage_type.h StageType stageType; @@ -101,19 +126,17 @@ namespace mongo { MONGO_DISALLOW_COPYING(PlanStageStats); }; - /** - * The interface all specific-to-stage stats provide. - */ - struct SpecificStats { - virtual ~SpecificStats() { } - }; - struct AndHashStats : public SpecificStats { AndHashStats() : flaggedButPassed(0), flaggedInProgress(0) { } virtual ~AndHashStats() { } + virtual SpecificStats* clone() const { + AndHashStats* specific = new AndHashStats(*this); + return specific; + } + // Invalidation counters. // How many results had the AND fully evaluated but were invalidated? size_t flaggedButPassed; @@ -136,6 +159,11 @@ namespace mongo { virtual ~AndSortedStats() { } + virtual SpecificStats* clone() const { + AndSortedStats* specific = new AndSortedStats(*this); + return specific; + } + // How many results from each child did not pass the AND? std::vector<size_t> failedAnd; @@ -149,6 +177,11 @@ namespace mongo { struct CollectionScanStats : public SpecificStats { CollectionScanStats() : docsTested(0) { } + virtual SpecificStats* clone() const { + CollectionScanStats* specific = new CollectionScanStats(*this); + return specific; + } + // How many documents did we check against our filter? size_t docsTested; }; @@ -160,6 +193,11 @@ namespace mongo { virtual ~FetchStats() { } + virtual SpecificStats* clone() const { + FetchStats* specific = new FetchStats(*this); + return specific; + } + // Have we seen anything that already had an object? size_t alreadyHasObj; @@ -185,6 +223,14 @@ namespace mongo { virtual ~IndexScanStats() { } + virtual SpecificStats* clone() const { + IndexScanStats* specific = new IndexScanStats(*this); + // BSON objects have to be explicitly copied. + specific->keyPattern = keyPattern.getOwned(); + specific->indexBounds = indexBounds.getOwned(); + return specific; + } + // Index type being used. std::string indexType; @@ -226,6 +272,11 @@ namespace mongo { virtual ~OrStats() { } + virtual SpecificStats* clone() const { + OrStats* specific = new OrStats(*this); + return specific; + } + size_t dupsTested; size_t dupsDropped; @@ -241,6 +292,11 @@ namespace mongo { virtual ~SortStats() { } + virtual SpecificStats* clone() const { + SortStats* specific = new SortStats(*this); + return specific; + } + // How many records were we forced to fetch as the result of an invalidation? size_t forcedFetches; }; @@ -252,6 +308,11 @@ namespace mongo { virtual ~MergeSortStats() { } + virtual SpecificStats* clone() const { + MergeSortStats* specific = new MergeSortStats(*this); + return specific; + } + size_t dupsTested; size_t dupsDropped; @@ -262,12 +323,22 @@ namespace mongo { struct ShardingFilterStats : public SpecificStats { ShardingFilterStats() : chunkSkips(0) { } + virtual SpecificStats* clone() const { + ShardingFilterStats* specific = new ShardingFilterStats(*this); + return specific; + } + size_t chunkSkips; }; struct TwoDNearStats : public SpecificStats { TwoDNearStats() : objectsLoaded(0), nscanned(0) { } + virtual SpecificStats* clone() const { + TwoDNearStats* specific = new TwoDNearStats(*this); + return specific; + } + size_t objectsLoaded; // Since 2d's near does all its work in one go we can't divine the real nscanned from @@ -278,6 +349,11 @@ namespace mongo { struct TextStats : public SpecificStats { TextStats() : keysExamined(0), fetches(0) { } + virtual SpecificStats* clone() const { + TextStats* specific = new TextStats(*this); + return specific; + } + size_t keysExamined; size_t fetches; diff --git a/src/mongo/db/query/explain_plan.cpp b/src/mongo/db/query/explain_plan.cpp index bc7311f3f41..c9472146120 100644 --- a/src/mongo/db/query/explain_plan.cpp +++ b/src/mongo/db/query/explain_plan.cpp @@ -115,7 +115,9 @@ namespace mongo { if (fullDetails) { res->setNYields(stats.common.yields); - res->stats = statsToBSON(stats); + BSONObjBuilder bob; + statsToBSON(stats, &bob); + res->stats = bob.obj(); } *explain = res.release(); @@ -273,7 +275,9 @@ namespace mongo { // TODO: if we can get this from the runner, we can kill "detailed mode" if (fullDetails) { res->setNYields(root->common.yields); - res->stats = statsToBSON(*root); + BSONObjBuilder bob; + statsToBSON(*root, &bob); + res->stats = bob.obj(); } *explain = res.release(); @@ -339,103 +343,102 @@ namespace mongo { } } - BSONObj statsToBSON(const PlanStageStats& stats) { - BSONObjBuilder bob; + void statsToBSON(const PlanStageStats& stats, BSONObjBuilder* bob) { + invariant(bob); // Common details. - bob.append("type", stageTypeString(stats.stageType)); - bob.appendNumber("works", stats.common.works); - bob.appendNumber("yields", stats.common.yields); - bob.appendNumber("unyields", stats.common.unyields); - bob.appendNumber("invalidates", stats.common.invalidates); - bob.appendNumber("advanced", stats.common.advanced); - bob.appendNumber("needTime", stats.common.needTime); - bob.appendNumber("needFetch", stats.common.needFetch); - bob.appendNumber("isEOF", stats.common.isEOF); + bob->append("type", stageTypeString(stats.stageType)); + bob->appendNumber("works", stats.common.works); + bob->appendNumber("yields", stats.common.yields); + bob->appendNumber("unyields", stats.common.unyields); + bob->appendNumber("invalidates", stats.common.invalidates); + bob->appendNumber("advanced", stats.common.advanced); + bob->appendNumber("needTime", stats.common.needTime); + bob->appendNumber("needFetch", stats.common.needFetch); + bob->appendNumber("isEOF", stats.common.isEOF); // Stage-specific stats if (STAGE_AND_HASH == stats.stageType) { AndHashStats* spec = static_cast<AndHashStats*>(stats.specific.get()); - bob.appendNumber("flaggedButPassed", spec->flaggedButPassed); - bob.appendNumber("flaggedInProgress", spec->flaggedInProgress); + bob->appendNumber("flaggedButPassed", spec->flaggedButPassed); + bob->appendNumber("flaggedInProgress", spec->flaggedInProgress); for (size_t i = 0; i < spec->mapAfterChild.size(); ++i) { - bob.appendNumber(string(stream() << "mapAfterChild_" << i), spec->mapAfterChild[i]); + bob->appendNumber(string(stream() << "mapAfterChild_" << i), spec->mapAfterChild[i]); } } else if (STAGE_AND_SORTED == stats.stageType) { AndSortedStats* spec = static_cast<AndSortedStats*>(stats.specific.get()); - bob.appendNumber("flagged", spec->flagged); - bob.appendNumber("matchTested", spec->matchTested); + bob->appendNumber("flagged", spec->flagged); + bob->appendNumber("matchTested", spec->matchTested); for (size_t i = 0; i < spec->failedAnd.size(); ++i) { - bob.appendNumber(string(stream() << "failedAnd_" << i), spec->failedAnd[i]); + bob->appendNumber(string(stream() << "failedAnd_" << i), spec->failedAnd[i]); } } else if (STAGE_COLLSCAN == stats.stageType) { CollectionScanStats* spec = static_cast<CollectionScanStats*>(stats.specific.get()); - bob.appendNumber("docsTested", spec->docsTested); + bob->appendNumber("docsTested", spec->docsTested); } else if (STAGE_FETCH == stats.stageType) { FetchStats* spec = static_cast<FetchStats*>(stats.specific.get()); - bob.appendNumber("alreadyHasObj", spec->alreadyHasObj); - bob.appendNumber("forcedFetches", spec->forcedFetches); - bob.appendNumber("matchTested", spec->matchTested); + bob->appendNumber("alreadyHasObj", spec->alreadyHasObj); + bob->appendNumber("forcedFetches", spec->forcedFetches); + bob->appendNumber("matchTested", spec->matchTested); } else if (STAGE_GEO_NEAR_2D == stats.stageType) { TwoDNearStats* spec = static_cast<TwoDNearStats*>(stats.specific.get()); - bob.appendNumber("objectsLoaded", spec->objectsLoaded); - bob.appendNumber("nscanned", spec->nscanned); + bob->appendNumber("objectsLoaded", spec->objectsLoaded); + bob->appendNumber("nscanned", spec->nscanned); } else if (STAGE_IXSCAN == stats.stageType) { IndexScanStats* spec = static_cast<IndexScanStats*>(stats.specific.get()); // XXX: how much do we really want here? runtime stats vs. tree structure (soln // tostring). - bob.append("keyPattern", spec->keyPattern.toString()); - bob.append("bounds", spec->indexBounds); - bob.appendNumber("isMultiKey", spec->isMultiKey); + bob->append("keyPattern", spec->keyPattern.toString()); + bob->append("bounds", spec->indexBounds); + bob->appendNumber("isMultiKey", spec->isMultiKey); - bob.appendNumber("yieldMovedCursor", spec->yieldMovedCursor); - bob.appendNumber("dupsTested", spec->dupsTested); - bob.appendNumber("dupsDropped", spec->dupsDropped); - bob.appendNumber("seenInvalidated", spec->seenInvalidated); - bob.appendNumber("matchTested", spec->matchTested); - bob.appendNumber("keysExamined", spec->keysExamined); + bob->appendNumber("yieldMovedCursor", spec->yieldMovedCursor); + bob->appendNumber("dupsTested", spec->dupsTested); + bob->appendNumber("dupsDropped", spec->dupsDropped); + bob->appendNumber("seenInvalidated", spec->seenInvalidated); + bob->appendNumber("matchTested", spec->matchTested); + bob->appendNumber("keysExamined", spec->keysExamined); } else if (STAGE_OR == stats.stageType) { OrStats* spec = static_cast<OrStats*>(stats.specific.get()); - bob.appendNumber("dupsTested", spec->dupsTested); - bob.appendNumber("dupsDropped", spec->dupsDropped); - bob.appendNumber("locsForgotten", spec->locsForgotten); + bob->appendNumber("dupsTested", spec->dupsTested); + bob->appendNumber("dupsDropped", spec->dupsDropped); + bob->appendNumber("locsForgotten", spec->locsForgotten); for (size_t i = 0 ; i < spec->matchTested.size(); ++i) { - bob.appendNumber(string(stream() << "matchTested_" << i), spec->matchTested[i]); + bob->appendNumber(string(stream() << "matchTested_" << i), spec->matchTested[i]); } } else if (STAGE_SHARDING_FILTER == stats.stageType) { ShardingFilterStats* spec = static_cast<ShardingFilterStats*>(stats.specific.get()); - bob.appendNumber("chunkSkips", spec->chunkSkips); + bob->appendNumber("chunkSkips", spec->chunkSkips); } else if (STAGE_SORT == stats.stageType) { SortStats* spec = static_cast<SortStats*>(stats.specific.get()); - bob.appendNumber("forcedFetches", spec->forcedFetches); + bob->appendNumber("forcedFetches", spec->forcedFetches); } else if (STAGE_SORT_MERGE == stats.stageType) { MergeSortStats* spec = static_cast<MergeSortStats*>(stats.specific.get()); - bob.appendNumber("dupsTested", spec->dupsTested); - bob.appendNumber("dupsDropped", spec->dupsDropped); - bob.appendNumber("forcedFetches", spec->forcedFetches); + bob->appendNumber("dupsTested", spec->dupsTested); + bob->appendNumber("dupsDropped", spec->dupsDropped); + bob->appendNumber("forcedFetches", spec->forcedFetches); } else if (STAGE_TEXT == stats.stageType) { TextStats* spec = static_cast<TextStats*>(stats.specific.get()); - bob.appendNumber("keysExamined", spec->keysExamined); - bob.appendNumber("fetches", spec->fetches); + bob->appendNumber("keysExamined", spec->keysExamined); + bob->appendNumber("fetches", spec->fetches); } - BSONArrayBuilder childBob(bob.subarrayStart("children")); + BSONArrayBuilder childrenBob(bob->subarrayStart("children")); for (size_t i = 0; i < stats.children.size(); ++i) { - childBob.append(statsToBSON(*stats.children[i])); + BSONObjBuilder childBob(childrenBob.subobjStart()); + statsToBSON(*stats.children[i], &childBob); } - - childBob.done(); - return bob.obj(); + childrenBob.doneFast(); } } // namespace mongo diff --git a/src/mongo/db/query/explain_plan.h b/src/mongo/db/query/explain_plan.h index 7dba04d3e9d..e73bc44adec 100644 --- a/src/mongo/db/query/explain_plan.h +++ b/src/mongo/db/query/explain_plan.h @@ -52,6 +52,6 @@ namespace mongo { */ Status explainPlan(const PlanStageStats& stats, TypeExplain** explain, bool fullDetails); - BSONObj statsToBSON(const PlanStageStats& stats); + void statsToBSON(const PlanStageStats& stats, BSONObjBuilder* bob); } // namespace mongo diff --git a/src/mongo/db/query/multi_plan_runner.cpp b/src/mongo/db/query/multi_plan_runner.cpp index c5d5caa8fc6..5a74050e948 100644 --- a/src/mongo/db/query/multi_plan_runner.cpp +++ b/src/mongo/db/query/multi_plan_runner.cpp @@ -28,6 +28,7 @@ #include "mongo/db/query/multi_plan_runner.h" +#include <memory> #include "mongo/db/client.h" #include "mongo/db/catalog/database.h" #include "mongo/db/diskloc.h" @@ -319,9 +320,15 @@ namespace mongo { if (_failure || _killed) { return false; } - auto_ptr<PlanRankingDecision> ranking(new PlanRankingDecision()); + // After picking best plan, ranking will own plan stats from + // candidate solutions (winner and losers). + std::auto_ptr<PlanRankingDecision> ranking(new PlanRankingDecision); size_t bestChild = PlanRanker::pickBestPlan(_candidates, ranking.get()); + // Copy candidate order. We will need this to sort candidate stats for explain + // after transferring ownership of 'ranking' to plan cache. + std::vector<size_t> candidateOrder = ranking->candidateOrder; + // Run the best plan. Store it. _bestPlan.reset(new PlanExecutor(_candidates[bestChild].ws, _candidates[bestChild].root)); @@ -357,9 +364,12 @@ namespace mongo { // Create list of candidate solutions for the cache with // the best solution at the front. std::vector<QuerySolution*> solutions; - solutions.push_back(_bestSolution.get()); - for (size_t i = 0; i < _candidates.size(); ++i) { - if (i == bestChild) { continue; } + + // Generate solutions and ranking decisions sorted by score. + for (size_t orderingIndex = 0; + orderingIndex < candidateOrder.size(); ++orderingIndex) { + // index into candidates/ranking + size_t i = candidateOrder[orderingIndex]; solutions.push_back(_candidates[i].solution); } @@ -382,7 +392,12 @@ namespace mongo { } // Clear out the candidate plans, leaving only stats as we're all done w/them. - for (size_t i = 0; i < _candidates.size(); ++i) { + // Traverse candidate plans in order or score + for (size_t orderingIndex = 0; + orderingIndex < candidateOrder.size(); ++orderingIndex) { + // index into candidates/ranking + size_t i = candidateOrder[orderingIndex]; + if (i == bestChild) { continue; } if (i == backupChild) { continue; } diff --git a/src/mongo/db/query/plan_cache.cpp b/src/mongo/db/query/plan_cache.cpp index 978b7603a17..340f3456cb9 100644 --- a/src/mongo/db/query/plan_cache.cpp +++ b/src/mongo/db/query/plan_cache.cpp @@ -32,6 +32,7 @@ #include <math.h> #include <memory> #include "boost/thread/locks.hpp" +#include "mongo/base/owned_pointer_vector.h" #include "mongo/db/query/plan_ranker.h" #include "mongo/db/query/query_solution.h" #include "mongo/db/query/qlog.h" @@ -84,9 +85,9 @@ namespace mongo { : plannerData(entry.plannerData.size()), backupSoln(entry.backupSoln), key(key), - query(entry.query.copy()), - sort(entry.sort.copy()), - projection(entry.projection.copy()) { + query(entry.query.getOwned()), + sort(entry.sort.getOwned()), + projection(entry.projection.getOwned()) { // CachedSolution should not having any references into // cache entry. All relevant data should be cloned/copied. for (size_t i = 0; i < entry.plannerData.size(); ++i) { @@ -108,19 +109,20 @@ namespace mongo { // PlanCacheEntry::PlanCacheEntry(const std::vector<QuerySolution*>& solutions, - PlanRankingDecision* d) - : plannerData(solutions.size()) { + PlanRankingDecision* why) + : plannerData(solutions.size()), + decision(why) { + invariant(why); + // The caller of this constructor is responsible for ensuring // that the QuerySolution 's' has valid cacheData. If there's no // data to cache you shouldn't be trying to construct a PlanCacheEntry. // Copy the solution's cache data into the plan cache entry. for (size_t i = 0; i < solutions.size(); ++i) { - verify(solutions[i]->cacheData.get()); + invariant(solutions[i]->cacheData.get()); plannerData[i] = solutions[i]->cacheData->clone(); } - - decision.reset(d); } PlanCacheEntry::~PlanCacheEntry() { @@ -132,6 +134,34 @@ namespace mongo { } } + PlanCacheEntry* PlanCacheEntry::clone() const { + OwnedPointerVector<QuerySolution> solutions; + for (size_t i = 0; i < plannerData.size(); ++i) { + QuerySolution* qs = new QuerySolution(); + qs->cacheData.reset(plannerData[i]->clone()); + solutions.mutableVector().push_back(qs); + } + PlanCacheEntry* entry = new PlanCacheEntry(solutions.vector(), decision->clone()); + + entry->backupSoln = backupSoln; + + // Copy query shape. + entry->query = query.getOwned(); + entry->sort = sort.getOwned(); + entry->projection = projection.getOwned(); + + // Copy performance stats. + for (size_t i = 0; i < feedback.size(); ++i) { + PlanCacheEntryFeedback* fb = new PlanCacheEntryFeedback(); + fb->stats.reset(feedback[i]->stats->clone()); + fb->score = feedback[i]->score; + entry->feedback.push_back(fb); + } + entry->averageScore = averageScore; + entry->stddevScore = stddevScore; + return entry; + } + string PlanCacheEntry::toString() const { mongoutils::str::stream ss; ss << "(query: " << query.toString() @@ -247,17 +277,32 @@ namespace mongo { Status PlanCache::add(const CanonicalQuery& query, const std::vector<QuerySolution*>& solns, PlanRankingDecision* why) { - verify(why); + invariant(why); if (solns.empty()) { return Status(ErrorCodes::BadValue, "no solutions provided"); } + if (why->stats.size() != solns.size()) { + return Status(ErrorCodes::BadValue, + "number of stats in decision must match solutions"); + } + + if (why->scores.size() != solns.size()) { + return Status(ErrorCodes::BadValue, + "number of scores in decision must match solutions"); + } + + if (why->candidateOrder.size() != solns.size()) { + return Status(ErrorCodes::BadValue, + "candidate ordering entries in decision must match solutions"); + } + PlanCacheEntry* entry = new PlanCacheEntry(solns, why); const LiteParsedQuery& pq = query.getParsed(); - entry->query = pq.getFilter().copy(); - entry->sort = pq.getSort().copy(); - entry->projection = pq.getProj().copy(); + entry->query = pq.getFilter().getOwned(); + entry->sort = pq.getSort().getOwned(); + entry->projection = pq.getProj().getOwned(); // If the winning solution uses a blocking sort, then try and // find a fallback solution that has no blocking sort. @@ -328,7 +373,7 @@ namespace mongo { // If the score has gotten more than a standard deviation lower than // its initial value, we should uncache the entry. - double initialScore = entry->decision->score; + double initialScore = entry->decision->scores[0]; if ((initialScore - mean) > (PlanCacheEntry::kStdDevThreshold * stddev)) { return true; } @@ -398,18 +443,34 @@ namespace mongo { _writeOperations.store(0); } - std::vector<CachedSolution*> PlanCache::getAllSolutions() const { + Status PlanCache::getEntry(const CanonicalQuery& query, PlanCacheEntry** entryOut) const { + const PlanCacheKey& key = query.getPlanCacheKey(); + verify(entryOut); + + boost::lock_guard<boost::mutex> cacheLock(_cacheMutex); + typedef unordered_map<PlanCacheKey, PlanCacheEntry*>::const_iterator ConstIterator; + ConstIterator i = _cache.find(key); + if (i == _cache.end()) { + return Status(ErrorCodes::BadValue, "no such key in cache"); + } + PlanCacheEntry* entry = i->second; + verify(entry); + + *entryOut = entry->clone(); + + return Status::OK(); + } + + std::vector<PlanCacheEntry*> PlanCache::getAllEntries() const { boost::lock_guard<boost::mutex> cacheLock(_cacheMutex); - std::vector<CachedSolution*> solutions; + std::vector<PlanCacheEntry*> entries; typedef unordered_map<PlanCacheKey, PlanCacheEntry*>::const_iterator ConstIterator; for (ConstIterator i = _cache.begin(); i != _cache.end(); i++) { - const PlanCacheKey& key = i->first; PlanCacheEntry* entry = i->second; - CachedSolution* cs = new CachedSolution(key, *entry); - solutions.push_back(cs); + entries.push_back(entry->clone()); } - return solutions; + return entries; } size_t PlanCache::size() const { diff --git a/src/mongo/db/query/plan_cache.h b/src/mongo/db/query/plan_cache.h index 9438f5fa88b..09a28f1fe1d 100644 --- a/src/mongo/db/query/plan_cache.h +++ b/src/mongo/db/query/plan_cache.h @@ -205,25 +205,28 @@ namespace mongo { }; /** - * Used internally by the cache to track entries and their performance over time. + * Used by the cache to track entries and their performance over time. + * Also used by the plan cache commands to display plan cache state. */ class PlanCacheEntry { private: MONGO_DISALLOW_COPYING(PlanCacheEntry); public: - // TODO: Do we want to store more information about the query here? - /** * Create a new PlanCacheEntry. * Grabs any planner-specific data required from the solutions. * Takes ownership of the PlanRankingDecision that placed the plan in the cache. - * XXX: what else should this take? */ PlanCacheEntry(const std::vector<QuerySolution*>& solutions, - PlanRankingDecision* d); + PlanRankingDecision* why); ~PlanCacheEntry(); + /** + * Make a deep copy. + */ + PlanCacheEntry* clone() const; + // For debugging. std::string toString() const; @@ -251,8 +254,8 @@ namespace mongo { // Performance stats // - // Why the best solution was picked. - // TODO: Do we want to store other information like the other plans considered? + // Information that went into picking the winning plan and also why + // the other plans lost. boost::scoped_ptr<PlanRankingDecision> decision; // Annotations from cached runs. The CachedSolutionRunner provides these stats about its @@ -299,16 +302,6 @@ namespace mongo { */ static bool shouldCacheQuery(const CanonicalQuery& query); - /** - * Generates a key for a normalized (for caching) canonical query - * from the match expression and sort order. - * This is an expensive operation because it clones and sorts - * the expression tree in order to generate a string from - * the normalized expression tree. The string generation is also - * potentially expensive. - */ - static PlanCacheKey getPlanCacheKey(const CanonicalQuery& query); - PlanCache() { } ~PlanCache(); @@ -369,11 +362,23 @@ namespace mongo { void clear(); /** - * Returns a vector of all cached solutions. + * Returns a copy of a cache entry. + * Used by planCacheListPlans to display plan details. + * + * If there is no entry in the cache for the 'query', returns an error Status. + * + * If there is an entry in the cache, populates 'entryOut' and returns Status::OK(). Caller + * owns '*entryOut'. + */ + Status getEntry(const CanonicalQuery& cq, PlanCacheEntry** entryOut) const; + + /** + * Returns a vector of all cache entries. * Caller owns the result vector and is responsible for cleaning up - * the cached solutions. + * the cache entry copies. + * Used by planCacheListQueryShapes and hint_commands_test.cpp. */ - std::vector<CachedSolution*> getAllSolutions() const; + std::vector<PlanCacheEntry*> getAllEntries() const; /** * Returns number of entries in cache. diff --git a/src/mongo/db/query/plan_cache_test.cpp b/src/mongo/db/query/plan_cache_test.cpp index 4d27a0bf0bb..7dd272276e6 100644 --- a/src/mongo/db/query/plan_cache_test.cpp +++ b/src/mongo/db/query/plan_cache_test.cpp @@ -144,15 +144,18 @@ namespace { }; /** - * Clean up query solutions vector. + * Utility function to create a PlanRankingDecision */ - void deleteQuerySolutions(std::vector<QuerySolution*>* solns) { - for (std::vector<QuerySolution*>::const_iterator i = solns->begin(); - i != solns->end(); ++i) { - QuerySolution* qs = *i; - delete qs; + PlanRankingDecision* createDecision(size_t numPlans) { + auto_ptr<PlanRankingDecision> why(new PlanRankingDecision()); + for (size_t i = 0; i < numPlans; ++i) { + auto_ptr<PlanStageStats> stats(new PlanStageStats(CommonStats(), STAGE_COLLSCAN)); + stats->specific.reset(new CollectionScanStats()); + why->stats.mutableVector().push_back(stats.release()); + why->scores.push_back(0U); + why->candidateOrder.push_back(i); } - solns->clear(); + return why.release(); } /** @@ -248,7 +251,7 @@ namespace { PlanCache planCache; auto_ptr<CanonicalQuery> cq(canonicalize("{a: 1}")); std::vector<QuerySolution*> solns; - ASSERT_NOT_OK(planCache.add(*cq, solns, new PlanRankingDecision())); + ASSERT_NOT_OK(planCache.add(*cq, solns, createDecision(1U))); } TEST(PlanCacheTest, AddValidSolution) { @@ -259,7 +262,7 @@ namespace { qs.cacheData->tree.reset(new PlanCacheIndexTree()); std::vector<QuerySolution*> solns; solns.push_back(&qs); - ASSERT_OK(planCache.add(*cq, solns, new PlanRankingDecision())); + ASSERT_OK(planCache.add(*cq, solns, createDecision(1U))); ASSERT_EQUALS(planCache.size(), 1U); } @@ -271,7 +274,7 @@ namespace { qs.cacheData->tree.reset(new PlanCacheIndexTree()); std::vector<QuerySolution*> solns; solns.push_back(&qs); - ASSERT_OK(planCache.add(*cq, solns, new PlanRankingDecision())); + ASSERT_OK(planCache.add(*cq, solns, createDecision(1U))); ASSERT_EQUALS(planCache.size(), 1U); // First (PlanCache::kPlanCacheMaxWriteOperations - 1) notifications should have @@ -292,13 +295,13 @@ namespace { // Add cache entry again. // After clearing and adding a new entry, the next write operation should not // clear the cache. - ASSERT_OK(planCache.add(*cq, solns, new PlanRankingDecision())); + ASSERT_OK(planCache.add(*cq, solns, createDecision(1U))); for (int i = 0; i < (PlanCache::kPlanCacheMaxWriteOperations - 1); ++i) { planCache.notifyOfWriteOp(); } ASSERT_EQUALS(planCache.size(), 1U); planCache.clear(); - ASSERT_OK(planCache.add(*cq, solns, new PlanRankingDecision())); + ASSERT_OK(planCache.add(*cq, solns, createDecision(1U))); // Notification after clearing will not flush cache. planCache.notifyOfWriteOp(); ASSERT_EQUALS(planCache.size(), 1U); @@ -464,7 +467,7 @@ namespace { qs.cacheData.reset(soln.cacheData->clone()); std::vector<QuerySolution*> solutions; solutions.push_back(&qs); - PlanCacheEntry entry(solutions, new PlanRankingDecision()); + PlanCacheEntry entry(solutions, createDecision(1U)); CachedSolution cachedSoln(ck, entry); QuerySolution *out, *backupOut; diff --git a/src/mongo/db/query/plan_ranker.cpp b/src/mongo/db/query/plan_ranker.cpp index d55f46cb1ea..b07dd92ffcc 100644 --- a/src/mongo/db/query/plan_ranker.cpp +++ b/src/mongo/db/query/plan_ranker.cpp @@ -26,7 +26,9 @@ * it in the license file. */ +#include <algorithm> #include <vector> +#include <utility> #include "mongo/db/query/plan_ranker.h" @@ -35,49 +37,71 @@ #include "mongo/db/query/query_solution.h" #include "mongo/db/query/qlog.h" +namespace { + + /** + * Comparator for (scores, candidateIndex) in pickBestPlan(). + */ + bool scoreComparator(const std::pair<double, size_t>& lhs, + const std::pair<double, size_t>& rhs) { + // Just compare score in lhs.first and rhs.first; + // Ignore candidate array index in lhs.second and rhs.second. + return lhs.first > rhs.first; + } + +} // namespace + namespace mongo { + using std::vector; + // static size_t PlanRanker::pickBestPlan(const vector<CandidatePlan>& candidates, PlanRankingDecision* why) { + invariant(!candidates.empty()); + invariant(why); + // Each plan will have a stat tree. vector<PlanStageStats*> statTrees; // Get stat trees from each plan. + // Copy stats trees instead of transferring ownership + // because multi plan runner will need its own stats + // trees for explain. for (size_t i = 0; i < candidates.size(); ++i) { statTrees.push_back(candidates[i].root->getStats()); } + // Holds (score, candidateInndex). + // Used to derive scores and candidate ordering. + vector<std::pair<double, size_t> > scoresAndCandidateindices; + // Compute score for each tree. Record the best. - double maxScore = 0; - size_t bestChild = numeric_limits<size_t>::max(); for (size_t i = 0; i < statTrees.size(); ++i) { QLOG() << "scoring plan " << i << ":\n" << candidates[i].solution->toString(); double score = scoreTree(statTrees[i]); QLOG() << "score = " << score << endl; - why->score = score; - if (score > maxScore) { - maxScore = score; - bestChild = i; - } - } - - // Make sure we got something. - verify(numeric_limits<size_t>::max() != bestChild); - - if (NULL != why) { - // Record the stats of the winner. - why->statsOfWinner = statTrees[bestChild]; + scoresAndCandidateindices.push_back(std::make_pair(score, i)); } - // Clean up stats of losers. - for (size_t i = 0; i < statTrees.size(); ++i) { - // If why is null we're not saving the bestChild's stats and we can delete it. - if (i != bestChild || NULL == why) { - delete statTrees[i]; - } + // Sort (scores, candidateIndex). Get best child and populate candidate ordering. + std::stable_sort(scoresAndCandidateindices.begin(), scoresAndCandidateindices.end(), + scoreComparator); + + // Update results in 'why' + // Stats and scores in 'why' are sorted in descending order by score. + why->stats.clear(); + why->scores.clear(); + why->candidateOrder.clear(); + for (size_t i = 0; i < scoresAndCandidateindices.size(); ++i) { + double score = scoresAndCandidateindices[i].first; + size_t candidateIndex = scoresAndCandidateindices[i].second; + why->stats.mutableVector().push_back(statTrees[candidateIndex]); + why->scores.push_back(score); + why->candidateOrder.push_back(candidateIndex); } + size_t bestChild = scoresAndCandidateindices[0].second; return bestChild; } diff --git a/src/mongo/db/query/plan_ranker.h b/src/mongo/db/query/plan_ranker.h index bb3bcee3bdb..17e19490911 100644 --- a/src/mongo/db/query/plan_ranker.h +++ b/src/mongo/db/query/plan_ranker.h @@ -31,6 +31,7 @@ #include <list> #include <vector> +#include "mongo/base/owned_pointer_vector.h" #include "mongo/db/exec/plan_stage.h" #include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/working_set.h" @@ -48,7 +49,9 @@ namespace mongo { public: /** * Returns index in 'candidates' of which plan is best. - * If 'why' is not NULL, populates it with information relevant to why that plan was picked. + * Populates 'why' with information relevant to how each plan fared in the ranking process. + * Caller owns pointers in 'why'. + * 'candidateOrder' holds indices into candidates ordered by score (winner in first element). */ static size_t pickBestPlan(const vector<CandidatePlan>& candidates, PlanRankingDecision* why); @@ -86,18 +89,36 @@ namespace mongo { * and used by the CachedPlanRunner to compare expected performance with actual. */ struct PlanRankingDecision { - PlanRankingDecision() : statsOfWinner(NULL), score(0), onlyOneSolution(false) { } - + /** + * Make a deep copy. + */ + PlanRankingDecision* clone() const { + PlanRankingDecision* decision = new PlanRankingDecision(); + for (size_t i = 0; i < stats.size(); ++i) { + PlanStageStats* s = stats.vector()[i]; + invariant(s); + decision->stats.mutableVector().push_back(s->clone()); + } + decision->scores = scores; + decision->candidateOrder = candidateOrder; + return decision; + } + + // Stats of all plans sorted in descending order by score. // Owned by us. - PlanStageStats* statsOfWinner; - - // The "goodness" score corresponging to 'statsOfWinner'. - double score; - - bool onlyOneSolution; - - // TODO: We can place anything we want here. What's useful to the cache? What's useful to - // planning and optimization? + OwnedPointerVector<PlanStageStats> stats; + + // The "goodness" score corresponding to 'stats'. + // Sorted in descending order. + std::vector<double> scores; + + // Ordering of original plans in descending of score. + // Filled in by PlanRanker::pickBestPlan(candidates, ...) + // so that candidates[candidateOrder[0]] refers to the best plan + // with corresponding cores[0] and stats[0]. Runner-up would be + // candidates[candidateOrder[1]] followed by + // candidates[candidateOrder[2]], ... + std::vector<size_t> candidateOrder; }; } // namespace mongo diff --git a/src/mongo/db/query/query_settings.cpp b/src/mongo/db/query/query_settings.cpp index 35ab6de5694..8de33cf4db8 100644 --- a/src/mongo/db/query/query_settings.cpp +++ b/src/mongo/db/query/query_settings.cpp @@ -42,7 +42,7 @@ namespace mongo { for (std::vector<BSONObj>::const_iterator i = indexKeyPatterns.begin(); i != indexKeyPatterns.end(); ++i) { const BSONObj& indexKeyPattern = *i; - this->indexKeyPatterns.push_back(indexKeyPattern.copy()); + this->indexKeyPatterns.push_back(indexKeyPattern.getOwned()); } } @@ -55,13 +55,13 @@ namespace mongo { AllowedIndexEntry::AllowedIndexEntry(const BSONObj& query, const BSONObj& sort, const BSONObj& projection, const std::vector<BSONObj>& indexKeyPatterns) - : query(query.copy()), - sort(sort.copy()), - projection(projection.copy()) { + : query(query.getOwned()), + sort(sort.getOwned()), + projection(projection.getOwned()) { for (std::vector<BSONObj>::const_iterator i = indexKeyPatterns.begin(); i != indexKeyPatterns.end(); ++i) { const BSONObj& indexKeyPattern = *i; - this->indexKeyPatterns.push_back(indexKeyPattern.copy()); + this->indexKeyPatterns.push_back(indexKeyPattern.getOwned()); } } |