summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorArun Banala <arun.banala@10gen.com>2019-10-02 15:27:39 +0000
committerevergreen <evergreen@mongodb.com>2019-10-02 15:27:39 +0000
commitbda366f0b0e432ca143bc41da54d8732bd8d03c0 (patch)
tree43b09749ba831ad5f26bf0f8af636cfcb24e2fb4
parenta9674251a5f0fc9785d787f3f7922312c37d4a04 (diff)
downloadmongo-bda366f0b0e432ca143bc41da54d8732bd8d03c0.tar.gz
SERVER-40382 Add a serverStatus metric to report plan cache memory consumptionr4.0.13-rc0r4.0.13
(cherry picked from commit 5105fa2377d3e86b2011691d5acbd8c531113929) (cherry picked from commit 5eeba7b2c9dda32a37c2c16ca14edd9a9099a996)
-rw-r--r--jstests/noPassthrough/plan_cache_metrics.js138
-rw-r--r--src/mongo/db/commands/index_filter_commands_test.cpp4
-rw-r--r--src/mongo/db/commands/plan_cache_commands.cpp5
-rw-r--r--src/mongo/db/commands/plan_cache_commands_test.cpp4
-rw-r--r--src/mongo/db/exec/multi_plan.cpp2
-rw-r--r--src/mongo/db/exec/plan_stats.h157
-rw-r--r--src/mongo/db/field_ref.h33
-rw-r--r--src/mongo/db/query/SConscript1
-rw-r--r--src/mongo/db/query/index_entry.h20
-rw-r--r--src/mongo/db/query/plan_cache.cpp136
-rw-r--r--src/mongo/db/query/plan_cache.h86
-rw-r--r--src/mongo/db/query/plan_cache_test.cpp202
-rw-r--r--src/mongo/db/query/plan_ranker.h13
-rw-r--r--src/mongo/util/SConscript20
-rw-r--r--src/mongo/util/container_size_helper.h73
-rw-r--r--src/mongo/util/container_size_helper_test.cpp78
16 files changed, 889 insertions, 83 deletions
diff --git a/jstests/noPassthrough/plan_cache_metrics.js b/jstests/noPassthrough/plan_cache_metrics.js
new file mode 100644
index 00000000000..f2a528f36a8
--- /dev/null
+++ b/jstests/noPassthrough/plan_cache_metrics.js
@@ -0,0 +1,138 @@
+/**
+ * Test that the plan cache memory estimate increases and decreases correctly as plans are added to
+ * and cleared from the cache.
+ */
+(function() {
+ "use strict";
+ const conn = MongoRunner.runMongod({});
+ const db = conn.getDB('test');
+ const coll1 = db.query_metrics1;
+ const coll2 = db.query_metrics2;
+ coll1.drop();
+ coll2.drop();
+
+ const queryObj = {a: {$gte: 99}, b: -1};
+ const projectionObj = {_id: 0, b: 1};
+ const sortObj = {c: -1};
+
+ function getPlanCacheSize() {
+ const serverStatus = assert.commandWorked(db.serverStatus());
+ return serverStatus.metrics.query.planCacheTotalSizeEstimateBytes;
+ }
+
+ function assertCacheLength(coll, length) {
+ assert.eq(coll.getPlanCache().listQueryShapes().length, length);
+ }
+
+ function verifyPlanCacheSizeIncrease(coll) {
+ // Add data and indices.
+ for (let i = 0; i < 100; i++) {
+ assert.commandWorked(coll.insert({a: i, b: -1, c: 1}));
+ }
+ assert.commandWorked(coll.createIndex({a: 1}));
+ assert.commandWorked(coll.createIndex({b: 1}));
+
+ let prevCacheSize = getPlanCacheSize();
+ // Populate plan cache.
+ assert.eq(1,
+ coll.find(queryObj, projectionObj).sort(sortObj).itcount(),
+ 'unexpected document count');
+
+ // Verify that the plan cache entry exists.
+ assertCacheLength(coll, 1);
+
+ // Verify that the plan cache size increased.
+ assert.gt(getPlanCacheSize(), prevCacheSize);
+ prevCacheSize = getPlanCacheSize();
+
+ // Verify that the total plan cache memory consumption estimate increases when 'projection'
+ // plan cache entry is added.
+ assert.eq(1, coll.find(queryObj, projectionObj).itcount(), 'unexpected document count');
+ assert.gt(getPlanCacheSize(), prevCacheSize);
+
+ // Verify that the total plan cache memory consumption estimate increases when 'sort' plan
+ // cache entry is added.
+ prevCacheSize = getPlanCacheSize();
+ assert.eq(1, coll.find(queryObj).sort(sortObj).itcount(), 'unexpected document count');
+ assert.gt(getPlanCacheSize(), prevCacheSize);
+
+ // Verify that the total plan cache memory consumption estimate increases when 'query' plan
+ // cache entry is added.
+ prevCacheSize = getPlanCacheSize();
+ assert.eq(1, coll.find(queryObj).itcount(), 'unexpected document count');
+ assert.gt(getPlanCacheSize(), prevCacheSize);
+ assertCacheLength(coll, 4);
+ }
+
+ function verifyPlanCacheSizeDecrease(coll) {
+ let prevCacheSize = getPlanCacheSize();
+ assertCacheLength(coll, 4);
+
+ // Verify that the total plan cache memory consumption estimate decreases when 'projection'
+ // plan cache entry is cleared.
+ const planCache = coll.getPlanCache();
+ planCache.clearPlansByQuery(queryObj, projectionObj);
+ assertCacheLength(coll, 3);
+ assert.lt(getPlanCacheSize(), prevCacheSize);
+
+ // Verify that the total plan cache memory consumption estimate decreases when 'sort' plan
+ // cache entry is cleared.
+ prevCacheSize = getPlanCacheSize();
+ planCache.clearPlansByQuery(queryObj, undefined, sortObj);
+ assertCacheLength(coll, 2);
+ assert.lt(getPlanCacheSize(), prevCacheSize);
+
+ // Verify that the total plan cache memory consumption estimate decreases when all the
+ // entries for a collection are cleared.
+ prevCacheSize = getPlanCacheSize();
+ planCache.clear();
+ assertCacheLength(coll, 0);
+ assert.lt(getPlanCacheSize(), prevCacheSize);
+ }
+
+ const originalPlanCacheSize = getPlanCacheSize();
+
+ // Verify that the cache size is zero when the database is started.
+ assert.eq(originalPlanCacheSize, 0);
+
+ // Test plan cache size estimates using multiple collections.
+
+ // Verify that the cache size increases when entires are added.
+ verifyPlanCacheSizeIncrease(coll1);
+
+ // Verify that the cache size increases in the presence of cache from another collection.
+ verifyPlanCacheSizeIncrease(coll2);
+
+ // Verify that the cache size decreases as plans are removed from either collection.
+ verifyPlanCacheSizeDecrease(coll2);
+ verifyPlanCacheSizeDecrease(coll1);
+
+ // Verify that cache size gets reset to original size after clearing all the cache entires.
+ assert.eq(getPlanCacheSize(), originalPlanCacheSize);
+
+ // Test by dropping collection.
+
+ let coll = db.query_metrics_drop_coll;
+ coll.drop();
+
+ // Populate cache entries.
+ verifyPlanCacheSizeIncrease(coll);
+
+ // Verify that cache size gets reset to original size after dropping the collection.
+ coll.drop();
+ assert.eq(getPlanCacheSize(), originalPlanCacheSize);
+
+ // Test by dropping indexes.
+
+ coll = db.query_metrics_drop_indexes;
+ coll.drop();
+
+ // Populate cache entries.
+ verifyPlanCacheSizeIncrease(coll);
+
+ // Verify that cache size gets reset to original size after dropping indexes.
+ assert.commandWorked(coll.dropIndexes());
+ assert.eq(getPlanCacheSize(), originalPlanCacheSize);
+
+ MongoRunner.stopMongod(conn);
+})();
diff --git a/src/mongo/db/commands/index_filter_commands_test.cpp b/src/mongo/db/commands/index_filter_commands_test.cpp
index 00c4ff6775a..910d404efec 100644
--- a/src/mongo/db/commands/index_filter_commands_test.cpp
+++ b/src/mongo/db/commands/index_filter_commands_test.cpp
@@ -105,7 +105,7 @@ vector<BSONObj> getFilters(const QuerySettings& querySettings) {
/**
* Utility function to create a PlanRankingDecision
*/
-PlanRankingDecision* createDecision(size_t numPlans) {
+std::unique_ptr<PlanRankingDecision> createDecision(size_t numPlans) {
unique_ptr<PlanRankingDecision> why(new PlanRankingDecision());
for (size_t i = 0; i < numPlans; ++i) {
CommonStats common("COLLSCAN");
@@ -115,7 +115,7 @@ PlanRankingDecision* createDecision(size_t numPlans) {
why->scores.push_back(0U);
why->candidateOrder.push_back(i);
}
- return why.release();
+ return why;
}
/**
diff --git a/src/mongo/db/commands/plan_cache_commands.cpp b/src/mongo/db/commands/plan_cache_commands.cpp
index 0a40549acbf..b48c498916d 100644
--- a/src/mongo/db/commands/plan_cache_commands.cpp
+++ b/src/mongo/db/commands/plan_cache_commands.cpp
@@ -407,9 +407,8 @@ Status PlanCacheListPlans::list(OperationContext* opCtx,
// Create the plan details field. Currently, this is a simple string representation of
// SolutionCacheData.
- SolutionCacheData* scd = entry->plannerData[i];
BSONObjBuilder detailsBob(planBob.subobjStart("details"));
- detailsBob.append("solution", scd->toString());
+ detailsBob.append("solution", entry->plannerData[i]->toString());
detailsBob.doneFast();
// reason is comprised of score and initial stats provided by
@@ -437,7 +436,7 @@ Status PlanCacheListPlans::list(OperationContext* opCtx,
}
feedbackBob.doneFast();
- planBob.append("filterSet", scd->indexFilterApplied);
+ planBob.append("filterSet", entry->plannerData[i]->indexFilterApplied);
}
plansBuilder.doneFast();
diff --git a/src/mongo/db/commands/plan_cache_commands_test.cpp b/src/mongo/db/commands/plan_cache_commands_test.cpp
index 174b64abbb9..09bb15e1abe 100644
--- a/src/mongo/db/commands/plan_cache_commands_test.cpp
+++ b/src/mongo/db/commands/plan_cache_commands_test.cpp
@@ -114,7 +114,7 @@ SolutionCacheData* createSolutionCacheData() {
/**
* Utility function to create a PlanRankingDecision
*/
-PlanRankingDecision* createDecision(size_t numPlans) {
+std::unique_ptr<PlanRankingDecision> createDecision(size_t numPlans) {
unique_ptr<PlanRankingDecision> why(new PlanRankingDecision());
for (size_t i = 0; i < numPlans; ++i) {
CommonStats common("COLLSCAN");
@@ -124,7 +124,7 @@ PlanRankingDecision* createDecision(size_t numPlans) {
why->scores.push_back(0U);
why->candidateOrder.push_back(i);
}
- return why.release();
+ return why;
}
TEST(PlanCacheCommandsTest, planCacheListQueryShapesEmpty) {
diff --git a/src/mongo/db/exec/multi_plan.cpp b/src/mongo/db/exec/multi_plan.cpp
index c7cb07c7f5b..5d7bd1a26bc 100644
--- a/src/mongo/db/exec/multi_plan.cpp
+++ b/src/mongo/db/exec/multi_plan.cpp
@@ -330,7 +330,7 @@ Status MultiPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
->getPlanCache()
->add(*_query,
solutions,
- ranking.release(),
+ std::move(ranking),
getOpCtx()->getServiceContext()->getPreciseClockSource()->now())
.transitional_ignore();
}
diff --git a/src/mongo/db/exec/plan_stats.h b/src/mongo/db/exec/plan_stats.h
index b6f81b5b901..13e74cc3d9b 100644
--- a/src/mongo/db/exec/plan_stats.h
+++ b/src/mongo/db/exec/plan_stats.h
@@ -39,6 +39,7 @@
#include "mongo/db/index/multikey_paths.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/query/stage_types.h"
+#include "mongo/util/container_size_helper.h"
#include "mongo/util/time_support.h"
namespace mongo {
@@ -53,6 +54,8 @@ struct SpecificStats {
* Make a deep copy.
*/
virtual SpecificStats* clone() const = 0;
+
+ virtual uint64_t estimateObjectSizeInBytes() const = 0;
};
// Every stage has CommonStats.
@@ -68,6 +71,10 @@ struct CommonStats {
needYield(0),
executionTimeMillis(0),
isEOF(false) {}
+
+ uint64_t estimateObjectSizeInBytes() const {
+ return filter.objsize() + sizeof(*this);
+ }
// String giving the type of the stage. Not owned.
const char* stageTypeStr;
@@ -123,6 +130,20 @@ struct PlanStageStats {
return stats;
}
+ uint64_t estimateObjectSizeInBytes() const {
+ return // Add size of each element in 'children' vector.
+ container_size_helper::estimateObjectSizeInBytes(
+ children,
+ [](const auto& child) { return child->estimateObjectSizeInBytes(); },
+ true) +
+ // Exclude the size of 'common' object since is being added later.
+ (common.estimateObjectSizeInBytes() - sizeof(common)) +
+ // Add 'specific' object size if exists.
+ (specific ? specific->estimateObjectSizeInBytes() : 0) +
+ // Add size of the object.
+ sizeof(*this);
+ }
+
// See query/stage_type.h
StageType stageType;
@@ -154,6 +175,10 @@ struct AndHashStats : public SpecificStats {
// How many results were mid-AND but got flagged?
size_t flaggedInProgress;
+ uint64_t estimateObjectSizeInBytes() const {
+ return container_size_helper::estimateObjectSizeInBytes(mapAfterChild) + sizeof(*this);
+ }
+
// How many entries are in the map after each child?
// child 'i' produced children[i].common.advanced RecordIds, of which mapAfterChild[i] were
// intersections.
@@ -177,6 +202,10 @@ struct AndSortedStats : public SpecificStats {
return specific;
}
+ uint64_t estimateObjectSizeInBytes() const {
+ return container_size_helper::estimateObjectSizeInBytes(failedAnd) + sizeof(*this);
+ }
+
// How many results from each child did not pass the AND?
std::vector<size_t> failedAnd;
@@ -191,6 +220,10 @@ struct CachedPlanStats : public SpecificStats {
return new CachedPlanStats(*this);
}
+ uint64_t estimateObjectSizeInBytes() const {
+ return sizeof(*this);
+ }
+
bool replanned;
};
@@ -202,6 +235,11 @@ struct CollectionScanStats : public SpecificStats {
return specific;
}
+ uint64_t estimateObjectSizeInBytes() const {
+ return sizeof(*this);
+ }
+
+
// How many documents did we check against our filter?
size_t docsTested;
@@ -223,6 +261,10 @@ struct CountStats : public SpecificStats {
return specific;
}
+ uint64_t estimateObjectSizeInBytes() const {
+ return sizeof(*this);
+ }
+
// The result of the count.
long long nCounted;
@@ -252,6 +294,18 @@ struct CountScanStats : public SpecificStats {
return specific;
}
+ uint64_t estimateObjectSizeInBytes() const {
+ return container_size_helper::estimateObjectSizeInBytes(
+ multiKeyPaths,
+ [](const auto& keyPath) {
+ // Calculate the size of each std::set in 'multiKeyPaths'.
+ return container_size_helper::estimateObjectSizeInBytes(keyPath);
+ },
+ true) +
+ keyPattern.objsize() + collation.objsize() + startKey.objsize() + endKey.objsize() +
+ indexName.capacity() + sizeof(*this);
+ }
+
std::string indexName;
BSONObj keyPattern;
@@ -294,6 +348,10 @@ struct DeleteStats : public SpecificStats {
// Invalidated documents can be force-fetched, causing the now invalid RecordId to
// be thrown out. The delete stage skips over any results which do not have a RecordId.
size_t nInvalidateSkips;
+
+ uint64_t estimateObjectSizeInBytes() const {
+ return sizeof(*this);
+ }
};
struct DistinctScanStats : public SpecificStats {
@@ -306,6 +364,18 @@ struct DistinctScanStats : public SpecificStats {
return specific;
}
+ uint64_t estimateObjectSizeInBytes() const {
+ return container_size_helper::estimateObjectSizeInBytes(
+ multiKeyPaths,
+ [](const auto& keyPath) {
+ // Calculate the size of each std::set in 'multiKeyPaths'.
+ return container_size_helper::estimateObjectSizeInBytes(keyPath);
+ },
+ true) +
+ keyPattern.objsize() + collation.objsize() + indexBounds.objsize() +
+ indexName.capacity() + sizeof(*this);
+ }
+
// How many keys did we look at while distinct-ing?
size_t keysExamined = 0;
@@ -342,6 +412,10 @@ struct EnsureSortedStats : public SpecificStats {
return specific;
}
+ uint64_t estimateObjectSizeInBytes() const {
+ return sizeof(*this);
+ }
+
// The number of out-of-order results that were dropped.
long long nDropped;
};
@@ -354,6 +428,10 @@ struct FetchStats : public SpecificStats {
return specific;
}
+ uint64_t estimateObjectSizeInBytes() const {
+ return sizeof(*this);
+ }
+
// Have we seen anything that already had an object?
size_t alreadyHasObj;
@@ -372,6 +450,10 @@ struct GroupStats : public SpecificStats {
return specific;
}
+ uint64_t estimateObjectSizeInBytes() const {
+ return sizeof(*this);
+ }
+
// The total number of groups.
size_t nGroups;
};
@@ -384,6 +466,10 @@ struct IDHackStats : public SpecificStats {
return specific;
}
+ uint64_t estimateObjectSizeInBytes() const {
+ return indexName.capacity() + sizeof(*this);
+ }
+
std::string indexName;
// Number of entries retrieved from the index while executing the idhack.
@@ -416,6 +502,18 @@ struct IndexScanStats : public SpecificStats {
return specific;
}
+ uint64_t estimateObjectSizeInBytes() const {
+ return container_size_helper::estimateObjectSizeInBytes(
+ multiKeyPaths,
+ [](const auto& keyPath) {
+ // Calculate the size of each std::set in 'multiKeyPaths'.
+ return container_size_helper::estimateObjectSizeInBytes(keyPath);
+ },
+ true) +
+ keyPattern.objsize() + collation.objsize() + indexBounds.objsize() +
+ indexName.capacity() + indexType.capacity() + sizeof(*this);
+ }
+
// Index type being used.
std::string indexType;
@@ -468,6 +566,10 @@ struct LimitStats : public SpecificStats {
return specific;
}
+ uint64_t estimateObjectSizeInBytes() const {
+ return sizeof(*this);
+ }
+
size_t limit;
};
@@ -477,6 +579,10 @@ struct MockStats : public SpecificStats {
SpecificStats* clone() const final {
return new MockStats(*this);
}
+
+ uint64_t estimateObjectSizeInBytes() const {
+ return sizeof(*this);
+ }
};
struct MultiPlanStats : public SpecificStats {
@@ -485,6 +591,10 @@ struct MultiPlanStats : public SpecificStats {
SpecificStats* clone() const final {
return new MultiPlanStats(*this);
}
+
+ uint64_t estimateObjectSizeInBytes() const {
+ return sizeof(*this);
+ }
};
struct OrStats : public SpecificStats {
@@ -500,6 +610,10 @@ struct OrStats : public SpecificStats {
// How many calls to invalidate(...) actually removed a RecordId from our deduping map?
size_t recordIdsForgotten;
+
+ uint64_t estimateObjectSizeInBytes() const {
+ return sizeof(*this);
+ }
};
struct ProjectionStats : public SpecificStats {
@@ -510,6 +624,10 @@ struct ProjectionStats : public SpecificStats {
return specific;
}
+ uint64_t estimateObjectSizeInBytes() const {
+ return projObj.objsize() + sizeof(*this);
+ }
+
// Object specifying the projection transformation to apply.
BSONObj projObj;
};
@@ -525,6 +643,10 @@ struct SortStats : public SpecificStats {
// How many records were we forced to fetch as the result of an invalidation?
size_t forcedFetches;
+ uint64_t estimateObjectSizeInBytes() const {
+ return sortPattern.objsize() + sizeof(*this);
+ }
+
// What's our current memory usage?
size_t memUsage;
@@ -552,6 +674,10 @@ struct MergeSortStats : public SpecificStats {
// How many records were we forced to fetch as the result of an invalidation?
size_t forcedFetches;
+ uint64_t estimateObjectSizeInBytes() const {
+ return sortPattern.objsize() + sizeof(*this);
+ }
+
// The pattern according to which we are sorting.
BSONObj sortPattern;
};
@@ -564,6 +690,10 @@ struct ShardingFilterStats : public SpecificStats {
return specific;
}
+ uint64_t estimateObjectSizeInBytes() const {
+ return sizeof(*this);
+ }
+
size_t chunkSkips;
};
@@ -575,6 +705,10 @@ struct SkipStats : public SpecificStats {
return specific;
}
+ uint64_t estimateObjectSizeInBytes() const {
+ return sizeof(*this);
+ }
+
size_t skip;
};
@@ -599,6 +733,11 @@ struct NearStats : public SpecificStats {
return new NearStats(*this);
}
+ uint64_t estimateObjectSizeInBytes() const {
+ return container_size_helper::estimateObjectSizeInBytes(intervalStats) +
+ keyPattern.objsize() + indexName.capacity() + sizeof(*this);
+ }
+
std::vector<IntervalStats> intervalStats;
std::string indexName;
// btree index version, not geo index version
@@ -619,6 +758,10 @@ struct UpdateStats : public SpecificStats {
return new UpdateStats(*this);
}
+ uint64_t estimateObjectSizeInBytes() const {
+ return objInserted.objsize() + sizeof(*this);
+ }
+
// The number of documents which match the query part of the update.
size_t nMatched;
@@ -653,6 +796,11 @@ struct TextStats : public SpecificStats {
return specific;
}
+ uint64_t estimateObjectSizeInBytes() const {
+ return parsedTextQuery.objsize() + indexPrefix.objsize() + indexName.capacity() +
+ sizeof(*this);
+ }
+
std::string indexName;
// Human-readable form of the FTSQuery associated with the text stage.
@@ -672,6 +820,10 @@ struct TextMatchStats : public SpecificStats {
return specific;
}
+ uint64_t estimateObjectSizeInBytes() const {
+ return sizeof(*this);
+ }
+
size_t docsRejected;
};
@@ -683,7 +835,10 @@ struct TextOrStats : public SpecificStats {
return specific;
}
+ uint64_t estimateObjectSizeInBytes() const {
+ return sizeof(*this);
+ }
+
size_t fetches;
};
-
} // namespace mongo
diff --git a/src/mongo/db/field_ref.h b/src/mongo/db/field_ref.h
index c1a50aaa969..39c985ec623 100644
--- a/src/mongo/db/field_ref.h
+++ b/src/mongo/db/field_ref.h
@@ -37,6 +37,7 @@
#include "mongo/base/disallow_copying.h"
#include "mongo/base/string_data.h"
+#include "mongo/util/container_size_helper.h"
namespace mongo {
@@ -59,8 +60,8 @@ public:
explicit FieldRef(StringData path);
/**
- * Field parts accessed through getPart() calls no longer would be valid, after the
- * destructor ran.
+ * Field parts accessed through getPart() calls no longer would be valid, after the destructor
+ * ran.
*/
~FieldRef() {}
@@ -70,8 +71,8 @@ public:
void parse(StringData dottedField);
/**
- * Sets the 'i-th' field part to point to 'part'. Assumes i < size(). Behavior is
- * undefined otherwise.
+ * Sets the 'i-th' field part to point to 'part'. Assumes i < size(). Behavior is undefined
+ * otherwise.
*/
void setPart(size_t i, StringData part);
@@ -81,8 +82,8 @@ public:
void appendPart(StringData part);
/**
- * Removes the last part from the path, decreasing its size by 1. Has no effect on a
- * FieldRef with size 0.
+ * Removes the last part from the path, decreasing its size by 1. Has no effect on a FieldRef
+ * with size 0.
*/
void removeLastPart();
@@ -109,8 +110,8 @@ public:
StringData dottedField(size_t offsetFromStart = 0) const;
/**
- * Returns a StringData of parts of the dotted field from startPart to endPart in its
- * current state (i.e., some parts may have been replaced since the parse() call).
+ * Returns a StringData of parts of the dotted field from startPart to endPart in its current
+ * state (i.e., some parts may have been replaced since the parse() call).
*/
StringData dottedSubstring(size_t startPart, size_t endPart) const;
@@ -120,8 +121,8 @@ public:
bool equalsDottedField(StringData other) const;
/**
- * Return 0 if 'this' is equal to 'other' lexicographically, -1 if is it less than or
- * +1 if it is greater than.
+ * Return 0 if 'this' is equal to 'other' lexicographically, -1 if is it less than or +1 if it
+ * is greater than.
*/
int compare(const FieldRef& other) const;
@@ -145,6 +146,18 @@ public:
return numParts() == 0;
}
+ uint64_t estimateObjectSizeInBytes() const {
+ return // Add size of each element in '_replacements' vector.
+ container_size_helper::estimateObjectSizeInBytes(
+ _replacements, [](const std::string& s) { return s.capacity(); }, true) +
+ // Add size of each element in '_variable' vector.
+ container_size_helper::estimateObjectSizeInBytes(_variable) +
+ // Add runtime size of '_dotted' string.
+ _dotted.capacity() +
+ // Add size of the object.
+ sizeof(*this);
+ }
+
private:
// Dotted fields are most often not longer than four parts. We use a mixed structure
// here that will not require any extra memory allocation when that is the case. And
diff --git a/src/mongo/db/query/SConscript b/src/mongo/db/query/SConscript
index 486dda916b9..f4a37ab200d 100644
--- a/src/mongo/db/query/SConscript
+++ b/src/mongo/db/query/SConscript
@@ -35,6 +35,7 @@ env.Library(
LIBDEPS=[
"$BUILD_DIR/mongo/base",
"$BUILD_DIR/mongo/db/bson/dotted_path_support",
+ '$BUILD_DIR/mongo/db/commands/server_status_core',
"$BUILD_DIR/mongo/db/index/expression_params",
"$BUILD_DIR/mongo/db/index_names",
"$BUILD_DIR/mongo/db/matcher/expressions",
diff --git a/src/mongo/db/query/index_entry.h b/src/mongo/db/query/index_entry.h
index 5f6786640b9..18978d53f70 100644
--- a/src/mongo/db/query/index_entry.h
+++ b/src/mongo/db/query/index_entry.h
@@ -35,6 +35,7 @@
#include "mongo/db/index/multikey_paths.h"
#include "mongo/db/index_names.h"
#include "mongo/db/jsobj.h"
+#include "mongo/util/container_size_helper.h"
#include "mongo/util/mongoutils/str.h"
namespace mongo {
@@ -112,6 +113,25 @@ struct IndexEntry {
std::string toString() const;
+ uint64_t estimateObjectSizeInBytes() const {
+
+ return // For each element in 'multikeyPaths' add the 'length of the vector * size of the
+ // vector element'.
+ container_size_helper::estimateObjectSizeInBytes(
+ multikeyPaths,
+ [](const auto& keyPath) {
+ // Calculate the size of each std::set in 'multiKeyPaths'.
+ return container_size_helper::estimateObjectSizeInBytes(keyPath);
+ },
+ true) +
+ // Add the runtime BSONObj size of 'keyPattern' and capacity of 'name'.
+ keyPattern.objsize() + name.capacity() +
+ // The BSON size of the 'infoObj' is purposefully excluded since its ownership is shared
+ // with the index catalog.
+ // Add size of the object.
+ sizeof(*this);
+ }
+
BSONObj keyPattern;
bool multikey;
diff --git a/src/mongo/db/query/plan_cache.cpp b/src/mongo/db/query/plan_cache.cpp
index 29f8d603976..9be8931f52d 100644
--- a/src/mongo/db/query/plan_cache.cpp
+++ b/src/mongo/db/query/plan_cache.cpp
@@ -43,6 +43,7 @@
#include "mongo/base/owned_pointer_vector.h"
#include "mongo/client/dbclientinterface.h" // For QueryOption_foobar
+#include "mongo/db/commands/server_status_metric.h"
#include "mongo/db/matcher/expression_array.h"
#include "mongo/db/matcher/expression_geo.h"
#include "mongo/db/query/collation/collator_interface.h"
@@ -56,8 +57,13 @@
#include "mongo/util/transitional_tools_do_not_use/vector_spooling.h"
namespace mongo {
+
+Counter64 PlanCacheEntry::planCacheTotalSizeEstimateBytes;
namespace {
+ServerStatusMetricField<Counter64> totalPlanCacheSizeEstimateBytesMetric(
+ "query.planCacheTotalSizeEstimateBytes", &PlanCacheEntry::planCacheTotalSizeEstimateBytes);
+
// Delimiters for cache key encoding.
const char kEncodeChildrenBegin = '[';
const char kEncodeChildrenEnd = ']';
@@ -436,47 +442,89 @@ CachedSolution::~CachedSolution() {
// PlanCacheEntry
//
-PlanCacheEntry::PlanCacheEntry(const std::vector<QuerySolution*>& solutions,
- PlanRankingDecision* why)
- : plannerData(solutions.size()), decision(why) {
- invariant(why);
+std::unique_ptr<PlanCacheEntry> PlanCacheEntry::create(
+ const std::vector<QuerySolution*>& solutions,
+ std::unique_ptr<const PlanRankingDecision> decision,
+ const CanonicalQuery& query,
+ Date_t timeOfCreation) {
+ invariant(decision);
// The caller of this constructor is responsible for ensuring
// that the QuerySolution 's' has valid cacheData. If there's no
// data to cache you shouldn't be trying to construct a PlanCacheEntry.
// Copy the solution's cache data into the plan cache entry.
+ std::vector<std::unique_ptr<const SolutionCacheData>> solutionCacheData(solutions.size());
for (size_t i = 0; i < solutions.size(); ++i) {
invariant(solutions[i]->cacheData.get());
- plannerData[i] = solutions[i]->cacheData->clone();
+ solutionCacheData[i] =
+ std::unique_ptr<const SolutionCacheData>(solutions[i]->cacheData->clone());
}
+
+ const QueryRequest& qr = query.getQueryRequest();
+ BSONObjBuilder projBuilder;
+ for (auto elem : qr.getProj()) {
+ if (elem.fieldName()[0] == '$') {
+ continue;
+ }
+ projBuilder.append(elem);
+ }
+
+ return std::unique_ptr<PlanCacheEntry>(new PlanCacheEntry(
+ std::move(solutionCacheData),
+ qr.getFilter(),
+ qr.getSort(),
+ projBuilder.obj(),
+ query.getCollator() ? query.getCollator()->getSpec().toBSON() : BSONObj(),
+ timeOfCreation,
+ std::move(decision),
+ {}));
+}
+
+PlanCacheEntry::PlanCacheEntry(std::vector<std::unique_ptr<const SolutionCacheData>> plannerData,
+ const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& projection,
+ const BSONObj& collation,
+ const Date_t timeOfCreation,
+ std::unique_ptr<const PlanRankingDecision> decision,
+ std::vector<PlanCacheEntryFeedback*> feedback)
+ : plannerData(std::move(plannerData)),
+ query(query),
+ sort(sort),
+ projection(projection),
+ collation(collation),
+ timeOfCreation(timeOfCreation),
+ decision(std::move(decision)),
+ feedback(std::move(feedback)),
+ _entireObjectSize(_estimateObjectSizeInBytes()) {
+ // Account for the object in the global metric for estimating the server's total plan cache
+ // memory consumption.
+ planCacheTotalSizeEstimateBytes.increment(_entireObjectSize);
}
PlanCacheEntry::~PlanCacheEntry() {
for (size_t i = 0; i < feedback.size(); ++i) {
delete feedback[i];
}
- for (size_t i = 0; i < plannerData.size(); ++i) {
- delete plannerData[i];
- }
+ planCacheTotalSizeEstimateBytes.decrement(_entireObjectSize);
}
PlanCacheEntry* PlanCacheEntry::clone() const {
- std::vector<std::unique_ptr<QuerySolution>> solutions;
+ std::vector<std::unique_ptr<const SolutionCacheData>> solutionCacheData(plannerData.size());
for (size_t i = 0; i < plannerData.size(); ++i) {
- auto qs = stdx::make_unique<QuerySolution>();
- qs->cacheData.reset(plannerData[i]->clone());
- solutions.push_back(std::move(qs));
- }
- PlanCacheEntry* entry = new PlanCacheEntry(
- transitional_tools_do_not_use::unspool_vector(solutions), decision->clone());
-
- // Copy query shape.
- entry->query = query.getOwned();
- entry->sort = sort.getOwned();
- entry->projection = projection.getOwned();
- entry->collation = collation.getOwned();
- entry->timeOfCreation = timeOfCreation;
+ invariant(plannerData[i]);
+ solutionCacheData[i] = std::unique_ptr<const SolutionCacheData>(plannerData[i]->clone());
+ }
+ auto decisionPtr = std::unique_ptr<PlanRankingDecision>(decision->clone());
+ PlanCacheEntry* entry = new PlanCacheEntry(std::move(solutionCacheData),
+ query,
+ sort,
+ projection,
+ collation,
+ timeOfCreation,
+ std::move(decisionPtr),
+ {});
// Copy performance stats.
for (size_t i = 0; i < feedback.size(); ++i) {
@@ -488,6 +536,25 @@ PlanCacheEntry* PlanCacheEntry::clone() const {
return entry;
}
+uint64_t PlanCacheEntry::_estimateObjectSizeInBytes() const {
+ return // Add the size of each entry in 'plannerData' vector.
+ container_size_helper::estimateObjectSizeInBytes(
+ plannerData,
+ [](const auto& cacheData) { return cacheData->estimateObjectSizeInBytes(); },
+ true) +
+ // Add the size of each entry in 'feedback' vector.
+ container_size_helper::estimateObjectSizeInBytes(
+ feedback,
+ [](const auto& feedbackEntry) { return feedbackEntry->estimateObjectSizeInBytes(); },
+ true) +
+ // Add the entire size of 'decision' object.
+ (decision ? decision->estimateObjectSizeInBytes() : 0) +
+ // Add the size of all the owned BSON objects.
+ query.objsize() + sort.objsize() + projection.objsize() + collation.objsize() +
+ // Add size of the object.
+ sizeof(*this);
+}
+
std::string PlanCacheEntry::toString() const {
return str::stream() << "(query: " << query.toString() << ";sort: " << sort.toString()
<< ";projection: " << projection.toString()
@@ -760,7 +827,7 @@ void PlanCache::encodeKeyForProj(const BSONObj& projObj, StringBuilder* keyBuild
Status PlanCache::add(const CanonicalQuery& query,
const std::vector<QuerySolution*>& solns,
- PlanRankingDecision* why,
+ std::unique_ptr<PlanRankingDecision> why,
Date_t now) {
invariant(why);
@@ -781,29 +848,10 @@ Status PlanCache::add(const CanonicalQuery& query,
"candidate ordering entries in decision must match solutions");
}
- PlanCacheEntry* entry = new PlanCacheEntry(solns, why);
- const QueryRequest& qr = query.getQueryRequest();
- entry->query = qr.getFilter().getOwned();
- entry->sort = qr.getSort().getOwned();
- if (query.getCollator()) {
- entry->collation = query.getCollator()->getSpec().toBSON();
- }
- entry->timeOfCreation = now;
-
-
- // Strip projections on $-prefixed fields, as these are added by internal callers of the query
- // system and are not considered part of the user projection.
- BSONObjBuilder projBuilder;
- for (auto elem : qr.getProj()) {
- if (elem.fieldName()[0] == '$') {
- continue;
- }
- projBuilder.append(elem);
- }
- entry->projection = projBuilder.obj();
+ auto entry(PlanCacheEntry::create(solns, std::move(why), query, now));
stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
- std::unique_ptr<PlanCacheEntry> evictedEntry = _cache.add(computeKey(query), entry);
+ std::unique_ptr<PlanCacheEntry> evictedEntry = _cache.add(computeKey(query), entry.release());
if (NULL != evictedEntry.get()) {
LOG(1) << _ns << ": plan cache maximum size exceeded - "
diff --git a/src/mongo/db/query/plan_cache.h b/src/mongo/db/query/plan_cache.h
index 7c38e94ccbb..e1e529d198f 100644
--- a/src/mongo/db/query/plan_cache.h
+++ b/src/mongo/db/query/plan_cache.h
@@ -33,6 +33,7 @@
#include <boost/optional/optional.hpp>
#include <set>
+#include "mongo/base/counter.h"
#include "mongo/db/exec/plan_stats.h"
#include "mongo/db/query/canonical_query.h"
#include "mongo/db/query/index_tag.h"
@@ -41,6 +42,7 @@
#include "mongo/db/query/query_planner_params.h"
#include "mongo/platform/atomic_word.h"
#include "mongo/stdx/mutex.h"
+#include "mongo/util/container_size_helper.h"
namespace mongo {
@@ -62,6 +64,10 @@ struct PlanCacheEntryFeedback {
// The "goodness" score produced by the plan ranker
// corresponding to 'stats'.
double score;
+
+ uint64_t estimateObjectSizeInBytes() const {
+ return stats->estimateObjectSizeInBytes() + sizeof(*this);
+ }
};
// TODO: Replace with opaque type.
@@ -96,6 +102,14 @@ struct PlanCacheIndexTree {
*/
struct OrPushdown {
std::string indexName;
+ uint64_t estimateObjectSizeInBytes() const {
+ return // Add size of each element in 'route' vector.
+ container_size_helper::estimateObjectSizeInBytes(route) +
+ // Add size of each element in 'route' vector.
+ indexName.size() +
+ // Add size of the object.
+ sizeof(*this);
+ }
size_t position;
bool canCombineBounds;
std::deque<size_t> route;
@@ -126,6 +140,22 @@ struct PlanCacheIndexTree {
*/
std::string toString(int indents = 0) const;
+ uint64_t estimateObjectSizeInBytes() const {
+ return // Recursively add size of each element in 'children' vector.
+ container_size_helper::estimateObjectSizeInBytes(
+ children,
+ [](const auto& child) { return child->estimateObjectSizeInBytes(); },
+ true) +
+ // Add size of each element in 'orPushdowns' vector.
+ container_size_helper::estimateObjectSizeInBytes(
+ orPushdowns,
+ [](const auto& orPushdown) { return orPushdown.estimateObjectSizeInBytes(); },
+ false) +
+ // Add size of 'entry' if present.
+ (entry ? entry->estimateObjectSizeInBytes() : 0) +
+ // Add size of the object.
+ sizeof(*this);
+ }
// Children owned here.
std::vector<PlanCacheIndexTree*> children;
@@ -161,6 +191,10 @@ struct SolutionCacheData {
// For debugging.
std::string toString() const;
+ uint64_t estimateObjectSizeInBytes() const {
+ return (tree ? tree->estimateObjectSizeInBytes() : 0) + sizeof(*this);
+ }
+
// Owned here. If 'wholeIXSoln' is false, then 'tree'
// can be used to tag an isomorphic match expression. If 'wholeIXSoln'
// is true, then 'tree' is used to store the relevant IndexEntry.
@@ -237,9 +271,12 @@ public:
/**
* Create a new PlanCacheEntry.
* Grabs any planner-specific data required from the solutions.
- * Takes ownership of the PlanRankingDecision that placed the plan in the cache.
*/
- PlanCacheEntry(const std::vector<QuerySolution*>& solutions, PlanRankingDecision* why);
+ static std::unique_ptr<PlanCacheEntry> create(
+ const std::vector<QuerySolution*>& solutions,
+ std::unique_ptr<const PlanRankingDecision> decision,
+ const CanonicalQuery& query,
+ Date_t timeOfCreation);
~PlanCacheEntry();
@@ -258,30 +295,53 @@ public:
// Data provided to the planner to allow it to recreate the solutions this entry
// represents. Each SolutionCacheData is fully owned here, so in order to return
// it from the cache a deep copy is made and returned inside CachedSolution.
- std::vector<SolutionCacheData*> plannerData;
+ const std::vector<std::unique_ptr<const SolutionCacheData>> plannerData;
// TODO: Do we really want to just hold a copy of the CanonicalQuery? For now we just
// extract the data we need.
//
// Used by the plan cache commands to display an example query
// of the appropriate shape.
- BSONObj query;
- BSONObj sort;
- BSONObj projection;
- BSONObj collation;
- Date_t timeOfCreation;
+ const BSONObj query;
+ const BSONObj sort;
+ const BSONObj projection;
+ const BSONObj collation;
+ const Date_t timeOfCreation;
//
// Performance stats
//
- // Information that went into picking the winning plan and also why
- // the other plans lost.
- std::unique_ptr<PlanRankingDecision> decision;
+ // Information that went into picking the winning plan and also why the other plans lost.
+ const std::unique_ptr<const PlanRankingDecision> decision;
// Annotations from cached runs. The CachedPlanStage provides these stats about its
// runs when they complete.
std::vector<PlanCacheEntryFeedback*> feedback;
+
+ /**
+ * Tracks the approximate cumulative size of the plan cache entries across all the collections.
+ */
+ static Counter64 planCacheTotalSizeEstimateBytes;
+
+private:
+ /**
+ * All arguments constructor.
+ */
+ PlanCacheEntry(std::vector<std::unique_ptr<const SolutionCacheData>> plannerData,
+ const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& projection,
+ const BSONObj& collation,
+ Date_t timeOfCreation,
+ std::unique_ptr<const PlanRankingDecision> decision,
+ std::vector<PlanCacheEntryFeedback*> feedback);
+
+ uint64_t _estimateObjectSizeInBytes() const;
+
+ // The total runtime size of the current object in bytes. This is the deep size, obtained by
+ // recursively following references to all owned objects.
+ const uint64_t _entireObjectSize;
};
/**
@@ -326,7 +386,7 @@ public:
*/
Status add(const CanonicalQuery& query,
const std::vector<QuerySolution*>& solns,
- PlanRankingDecision* why,
+ std::unique_ptr<PlanRankingDecision> why,
Date_t now);
/**
@@ -380,7 +440,7 @@ public:
/**
* Returns a copy of a cache entry.
* Used by planCacheListPlans to display plan details.
- *
+ *
* If there is no entry in the cache for the 'query', returns an error Status.
*
* If there is an entry in the cache, populates 'entryOut' and returns Status::OK(). Caller
diff --git a/src/mongo/db/query/plan_cache_test.cpp b/src/mongo/db/query/plan_cache_test.cpp
index c79341deaba..ab8cc1421fd 100644
--- a/src/mongo/db/query/plan_cache_test.cpp
+++ b/src/mongo/db/query/plan_cache_test.cpp
@@ -86,8 +86,8 @@ unique_ptr<CanonicalQuery> canonicalize(const BSONObj& queryObj) {
return std::move(statusWithCQ.getValue());
}
-unique_ptr<CanonicalQuery> canonicalize(const char* queryStr) {
- BSONObj queryObj = fromjson(queryStr);
+unique_ptr<CanonicalQuery> canonicalize(StringData queryStr) {
+ BSONObj queryObj = fromjson(queryStr.toString());
return canonicalize(queryObj);
}
@@ -247,7 +247,7 @@ struct GenerateQuerySolution {
/**
* Utility function to create a PlanRankingDecision
*/
-PlanRankingDecision* createDecision(size_t numPlans) {
+std::unique_ptr<PlanRankingDecision> createDecision(size_t numPlans) {
unique_ptr<PlanRankingDecision> why(new PlanRankingDecision());
for (size_t i = 0; i < numPlans; ++i) {
CommonStats common("COLLSCAN");
@@ -257,7 +257,7 @@ PlanRankingDecision* createDecision(size_t numPlans) {
why->scores.push_back(0U);
why->candidateOrder.push_back(i);
}
- return why.release();
+ return why;
}
/**
@@ -294,6 +294,13 @@ void assertShouldNotCacheQuery(const char* queryStr) {
assertShouldNotCacheQuery(*cq);
}
+std::unique_ptr<QuerySolution> getQuerySolutionForCaching() {
+ std::unique_ptr<QuerySolution> qs = std::make_unique<QuerySolution>();
+ qs->cacheData = std::make_unique<SolutionCacheData>();
+ qs->cacheData->tree = std::make_unique<PlanCacheIndexTree>();
+ return qs;
+}
+
/**
* Cacheable queries
* These queries will be added to the cache with run-time statistics
@@ -435,7 +442,7 @@ TEST(PlanCacheTest, AddEmptySolutions) {
std::vector<QuerySolution*> solns;
unique_ptr<PlanRankingDecision> decision(createDecision(1U));
QueryTestServiceContext serviceContext;
- ASSERT_NOT_OK(planCache.add(*cq, solns, decision.get(), Date_t{}));
+ ASSERT_NOT_OK(planCache.add(*cq, solns, std::move(decision), Date_t{}));
}
TEST(PlanCacheTest, AddValidSolution) {
@@ -695,8 +702,9 @@ protected:
qs.cacheData.reset(soln.cacheData->clone());
std::vector<QuerySolution*> solutions;
solutions.push_back(&qs);
- PlanCacheEntry entry(solutions, createDecision(1U));
- CachedSolution cachedSoln(ck, entry);
+
+ auto entry = PlanCacheEntry::create(solutions, createDecision(1U), *scopedCq, Date_t());
+ CachedSolution cachedSoln(ck, *entry);
auto statusWithQs = QueryPlanner::planFromCache(*scopedCq, params, cachedSoln);
ASSERT_OK(statusWithQs.getStatus());
@@ -1759,4 +1767,184 @@ TEST(PlanCacheTest, ComputeNeArrayInOrStagePreserversOrder) {
}));
}
+TEST(PlanCacheTest, PlanCacheSizeWithCRUDOperations) {
+ PlanCache planCache;
+ unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1, b: 1}"));
+ auto qs = getQuerySolutionForCaching();
+ std::vector<QuerySolution*> solns = {qs.get()};
+ long long previousSize, originalSize = PlanCacheEntry::planCacheTotalSizeEstimateBytes.get();
+
+ // Verify that the plan cache size increases after adding new entry to cache.
+ previousSize = PlanCacheEntry::planCacheTotalSizeEstimateBytes.get();
+ ASSERT_OK(planCache.add(*cq, solns, createDecision(1U), Date_t{}));
+ ASSERT_GT(PlanCacheEntry::planCacheTotalSizeEstimateBytes.get(), previousSize);
+
+ // Verify that trying to set the same entry won't change the plan cache size.
+ previousSize = PlanCacheEntry::planCacheTotalSizeEstimateBytes.get();
+ ASSERT_OK(planCache.add(*cq, solns, createDecision(1U), Date_t{}));
+ ASSERT_EQ(PlanCacheEntry::planCacheTotalSizeEstimateBytes.get(), previousSize);
+
+ // Verify that the plan cache size increases after updating the same entry with more solutions.
+ solns.push_back(qs.get());
+ ASSERT_OK(planCache.add(*cq, solns, createDecision(2U), Date_t{}));
+ ASSERT_GT(PlanCacheEntry::planCacheTotalSizeEstimateBytes.get(), previousSize);
+
+ // Verify that the plan cache size decreases after updating the same entry with fewer solutions.
+ solns.erase(solns.end() - 1);
+ previousSize = PlanCacheEntry::planCacheTotalSizeEstimateBytes.get();
+ ASSERT_OK(planCache.add(*cq, solns, createDecision(1U), Date_t{}));
+ ASSERT_LT(PlanCacheEntry::planCacheTotalSizeEstimateBytes.get(), previousSize);
+ ASSERT_GT(PlanCacheEntry::planCacheTotalSizeEstimateBytes.get(), originalSize);
+
+ // Verify that adding multiple entries will increasing the cache size.
+ long long sizeWithOneEntry = PlanCacheEntry::planCacheTotalSizeEstimateBytes.get();
+ std::string queryString = "{a: 1, c: 1}";
+ for (int i = 0; i < 5; ++i) {
+ // Update the field name in the query string so that plan cache creates a new entry.
+ queryString[1] = 'b' + i;
+ unique_ptr<CanonicalQuery> query(canonicalize(queryString));
+ previousSize = PlanCacheEntry::planCacheTotalSizeEstimateBytes.get();
+ ASSERT_OK(planCache.add(*query, solns, createDecision(1U), Date_t{}));
+ ASSERT_GT(PlanCacheEntry::planCacheTotalSizeEstimateBytes.get(), previousSize);
+ }
+
+ // Verify that removing multiple entries will decreasing the cache size.
+ for (int i = 0; i < 5; ++i) {
+ // Update the field name in the query to match the previously created plan cache entry key.
+ queryString[1] = 'b' + i;
+ unique_ptr<CanonicalQuery> query(canonicalize(queryString));
+ previousSize = PlanCacheEntry::planCacheTotalSizeEstimateBytes.get();
+ ASSERT_OK(planCache.remove(*query));
+ ASSERT_LT(PlanCacheEntry::planCacheTotalSizeEstimateBytes.get(), previousSize);
+ }
+ // Verify that size is reset to the size when there is only entry.
+ ASSERT_EQ(PlanCacheEntry::planCacheTotalSizeEstimateBytes.get(), sizeWithOneEntry);
+
+ // Verify that trying to remove a non-existing key won't change the plan cache size.
+ previousSize = PlanCacheEntry::planCacheTotalSizeEstimateBytes.get();
+ unique_ptr<CanonicalQuery> newQuery(canonicalize("{a: 1}"));
+ ASSERT_NOT_OK(planCache.remove(*newQuery));
+ ASSERT_EQ(PlanCacheEntry::planCacheTotalSizeEstimateBytes.get(), previousSize);
+
+ // Verify that the plan cache size goes back to original size when the entry is removed.
+ ASSERT_OK(planCache.remove(*cq));
+ ASSERT_EQ(planCache.size(), 0U);
+ ASSERT_EQ(PlanCacheEntry::planCacheTotalSizeEstimateBytes.get(), originalSize);
+}
+
+TEST(PlanCacheTest, PlanCacheSizeWithEviction) {
+ const size_t kCacheSize = 5;
+ internalQueryCacheSize.store(kCacheSize);
+ PlanCache planCache;
+ unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1, b: 1}"));
+ auto qs = getQuerySolutionForCaching();
+ std::vector<QuerySolution*> solns = {qs.get(), qs.get()};
+ long long originalSize = PlanCacheEntry::planCacheTotalSizeEstimateBytes.get();
+ long long previousSize = PlanCacheEntry::planCacheTotalSizeEstimateBytes.get();
+
+ // Add entries until plan cache is full and verify that the size keeps increasing.
+ std::string queryString = "{a: 1, c: 1}";
+ for (size_t i = 0; i < kCacheSize; ++i) {
+ // Update the field name in the query string so that plan cache creates a new entry.
+ queryString[1]++;
+ unique_ptr<CanonicalQuery> query(canonicalize(queryString));
+ previousSize = PlanCacheEntry::planCacheTotalSizeEstimateBytes.get();
+ ASSERT_OK(planCache.add(*query, solns, createDecision(2U), Date_t{}));
+ ASSERT_GT(PlanCacheEntry::planCacheTotalSizeEstimateBytes.get(), previousSize);
+ }
+
+ // Verify that adding entry of same size as evicted entry wouldn't change the plan cache size.
+ queryString = "{k: 1, c: 1}";
+ cq = unique_ptr<CanonicalQuery>(canonicalize(queryString));
+ previousSize = PlanCacheEntry::planCacheTotalSizeEstimateBytes.get();
+ ASSERT_EQ(planCache.size(), kCacheSize);
+ ASSERT_OK(planCache.add(*cq, solns, createDecision(2U), Date_t{}));
+ ASSERT_EQ(planCache.size(), kCacheSize);
+ ASSERT_EQ(PlanCacheEntry::planCacheTotalSizeEstimateBytes.get(), previousSize);
+
+ // Verify that adding entry with query bigger than the evicted entry's key should change the
+ // plan cache size.
+ queryString = "{k: 1, c: 1, extraField: 1}";
+ unique_ptr<CanonicalQuery> queryBiggerKey(canonicalize(queryString));
+ previousSize = PlanCacheEntry::planCacheTotalSizeEstimateBytes.get();
+ ASSERT_OK(planCache.add(*queryBiggerKey, solns, createDecision(2U), Date_t{}));
+ ASSERT_GT(PlanCacheEntry::planCacheTotalSizeEstimateBytes.get(), previousSize);
+
+ // Verify that adding entry with query solutions larger than the evicted entry's query solutions
+ // should increase the plan cache size.
+ queryString = "{l: 1, c: 1}";
+ cq = unique_ptr<CanonicalQuery>(canonicalize(queryString));
+ previousSize = PlanCacheEntry::planCacheTotalSizeEstimateBytes.get();
+ solns.push_back(qs.get());
+ ASSERT_OK(planCache.add(*cq, solns, createDecision(3U), Date_t{}));
+ ASSERT_GT(PlanCacheEntry::planCacheTotalSizeEstimateBytes.get(), previousSize);
+
+ // Verify that adding entry with query solutions smaller than the evicted entry's query
+ // solutions should decrease the plan cache size.
+ queryString = "{m: 1, c: 1}";
+ cq = unique_ptr<CanonicalQuery>(canonicalize(queryString));
+ previousSize = PlanCacheEntry::planCacheTotalSizeEstimateBytes.get();
+ solns = {qs.get()};
+ ASSERT_OK(planCache.add(*cq, solns, createDecision(1U), Date_t{}));
+ ASSERT_LT(PlanCacheEntry::planCacheTotalSizeEstimateBytes.get(), previousSize);
+
+ // clear() should reset the size.
+ planCache.clear();
+ ASSERT_EQ(PlanCacheEntry::planCacheTotalSizeEstimateBytes.get(), originalSize);
+}
+
+TEST(PlanCacheTest, PlanCacheSizeWithMultiplePlanCaches) {
+ PlanCache planCache1;
+ PlanCache planCache2;
+ unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1, b: 1}"));
+ auto qs = getQuerySolutionForCaching();
+ std::vector<QuerySolution*> solns = {qs.get()};
+ long long previousSize, originalSize = PlanCacheEntry::planCacheTotalSizeEstimateBytes.get();
+
+ // Verify that adding entries to both plan caches will keep increasing the cache size.
+ std::string queryString = "{a: 1, c: 1}";
+ for (int i = 0; i < 5; ++i) {
+ // Update the field name in the query string so that plan cache creates a new entry.
+ queryString[1] = 'b' + i;
+ unique_ptr<CanonicalQuery> query(canonicalize(queryString));
+ previousSize = PlanCacheEntry::planCacheTotalSizeEstimateBytes.get();
+ ASSERT_OK(planCache1.add(*query, solns, createDecision(1U), Date_t{}));
+ ASSERT_GT(PlanCacheEntry::planCacheTotalSizeEstimateBytes.get(), previousSize);
+
+ previousSize = PlanCacheEntry::planCacheTotalSizeEstimateBytes.get();
+ ASSERT_OK(planCache2.add(*query, solns, createDecision(1U), Date_t{}));
+ ASSERT_GT(PlanCacheEntry::planCacheTotalSizeEstimateBytes.get(), previousSize);
+ }
+
+ // Verify that removing entries from one plan caches will keep decreasing the cache size.
+ for (int i = 0; i < 5; ++i) {
+ // Update the field name in the query to match the previously created plan cache entry key.
+ queryString[1] = 'b' + i;
+ unique_ptr<CanonicalQuery> query(canonicalize(queryString));
+ previousSize = PlanCacheEntry::planCacheTotalSizeEstimateBytes.get();
+ ASSERT_OK(planCache1.remove(*query));
+ ASSERT_LT(PlanCacheEntry::planCacheTotalSizeEstimateBytes.get(), previousSize);
+ }
+
+ // Verify for scoped PlanCache object.
+ long long sizeBeforeScopedPlanCache = PlanCacheEntry::planCacheTotalSizeEstimateBytes.get();
+ {
+ PlanCache planCache;
+ previousSize = PlanCacheEntry::planCacheTotalSizeEstimateBytes.get();
+ ASSERT_OK(planCache.add(*cq, solns, createDecision(1U), Date_t{}));
+ ASSERT_GT(PlanCacheEntry::planCacheTotalSizeEstimateBytes.get(), previousSize);
+ }
+
+ // Verify that size is reset to 'sizeBeforeScopedPlanCache' after the destructor of 'planCache'
+ // is called.
+ ASSERT_EQ(PlanCacheEntry::planCacheTotalSizeEstimateBytes.get(), sizeBeforeScopedPlanCache);
+
+ // Clear 'planCache2' to remove all entries.
+ previousSize = PlanCacheEntry::planCacheTotalSizeEstimateBytes.get();
+ planCache2.clear();
+ ASSERT_LT(PlanCacheEntry::planCacheTotalSizeEstimateBytes.get(), previousSize);
+
+ // Verify that size is reset to the original size after removing all entries.
+ ASSERT_EQ(PlanCacheEntry::planCacheTotalSizeEstimateBytes.get(), originalSize);
+}
} // namespace
diff --git a/src/mongo/db/query/plan_ranker.h b/src/mongo/db/query/plan_ranker.h
index 698f92d0cb9..217c21f8744 100644
--- a/src/mongo/db/query/plan_ranker.h
+++ b/src/mongo/db/query/plan_ranker.h
@@ -39,6 +39,7 @@
#include "mongo/db/exec/plan_stats.h"
#include "mongo/db/exec/working_set.h"
#include "mongo/db/query/query_solution.h"
+#include "mongo/util/container_size_helper.h"
namespace mongo {
@@ -106,6 +107,18 @@ struct PlanRankingDecision {
return decision;
}
+ uint64_t estimateObjectSizeInBytes() const {
+ return // Add size of each element in 'stats' vector.
+ container_size_helper::estimateObjectSizeInBytes(
+ stats, [](const auto& stat) { return stat->estimateObjectSizeInBytes(); }, true) +
+ // Add size of each element in 'candidateOrder' vector.
+ container_size_helper::estimateObjectSizeInBytes(candidateOrder) +
+ // Add size of each element in 'scores' vector.
+ container_size_helper::estimateObjectSizeInBytes(scores) +
+ // Add size of the object.
+ sizeof(*this);
+ }
+
// Stats of all plans sorted in descending order by score.
// Owned by us.
std::vector<std::unique_ptr<PlanStageStats>> stats;
diff --git a/src/mongo/util/SConscript b/src/mongo/util/SConscript
index 547f379eacc..2456eecf5da 100644
--- a/src/mongo/util/SConscript
+++ b/src/mongo/util/SConscript
@@ -631,6 +631,26 @@ env.CppUnitTest(
]
)
+env.Library(
+ target='container_size_helper',
+ source=[
+ 'container_size_helper.cpp',
+ ],
+ LIBDEPS=[
+ '$BUILD_DIR/mongo/base',
+ ],
+)
+
+env.CppUnitTest(
+ target='container_size_helper_test',
+ source=[
+ 'container_size_helper_test.cpp',
+ ],
+ LIBDEPS=[
+ '$BUILD_DIR/mongo/base',
+ ]
+)
+
env.CppUnitTest(
target='producer_consumer_queue_test',
source=[
diff --git a/src/mongo/util/container_size_helper.h b/src/mongo/util/container_size_helper.h
new file mode 100644
index 00000000000..406e3607d1c
--- /dev/null
+++ b/src/mongo/util/container_size_helper.h
@@ -0,0 +1,73 @@
+/**
+ * Copyright (C) 2019-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#pragma once
+
+#include <vector>
+
+namespace mongo {
+namespace container_size_helper {
+/**
+ * Returns the estimate of the number of bytes consumed by the vector, based on the current capacity
+ * of the vector and the size of objects of type T. Does not incorporate the size of any owned
+ * objects that are pointed to by T. Also, does not incorporate sizeof(vector).
+ */
+template <class T>
+uint64_t estimateObjectSizeInBytes(const std::vector<T>& vector) {
+ return static_cast<uint64_t>(vector.capacity()) * sizeof(T);
+}
+
+/**
+ * Returns the estimate of the number of bytes consumed by the container, based on the current
+ * capacity of the container and the size of objects of type 'T::value_type'. Does not incorporate
+ * the size of any owned objects that are pointed to by 'T::value_type'. Also, does not incorporate
+ * sizeof(container).
+ */
+template <class T>
+uint64_t estimateObjectSizeInBytes(const T& container) {
+ return static_cast<uint64_t>(container.size()) * sizeof(typename T::value_type);
+}
+
+/**
+ * Returns the estimate by recursively calculating the memory owned by each element of the
+ * container. The 'function' should calculate the overall size of each individual element of the
+ * 'container'.
+ * When 'includeShallowSize' is true, adds the size of each container element.
+ */
+template <class T, class Function>
+uint64_t estimateObjectSizeInBytes(const T& container, Function function, bool includeShallowSize) {
+ uint64_t result = 0;
+ for (const auto& element : container) {
+ result += function(element);
+ }
+ result += includeShallowSize ? estimateObjectSizeInBytes(container) : 0;
+ return result;
+}
+} // namespace container_size_helper
+} // namespace mongo
diff --git a/src/mongo/util/container_size_helper_test.cpp b/src/mongo/util/container_size_helper_test.cpp
new file mode 100644
index 00000000000..51b2f2a48d3
--- /dev/null
+++ b/src/mongo/util/container_size_helper_test.cpp
@@ -0,0 +1,78 @@
+/**
+ * Copyright (C) 2019-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/platform/basic.h"
+
+#include <vector>
+
+#include "mongo/unittest/unittest.h"
+#include "mongo/util/container_size_helper.h"
+
+namespace {
+
+struct Mock {
+ int size;
+};
+
+TEST(ContainerSizeHelper, TestEstimateObjectSizeInBytes) {
+ std::vector<Mock> vect = {{1}, {2}, {3}, {4}};
+
+ // Sum of 'size' of each element '1 + 2 + 3 + 4'.
+ uint64_t expectedSize = 10;
+
+ // When 'includeComplieTimeSize' is false should return only the sum of sizes calculated by the
+ // 'function'.
+ ASSERT_EQ(mongo::container_size_helper::estimateObjectSizeInBytes(
+ vect, [](const auto& obj) { return obj.size; }, false),
+ expectedSize);
+
+ // When 'includeShallowSize' is true, should add size of 'Mock' object.
+ ASSERT_EQ(mongo::container_size_helper::estimateObjectSizeInBytes(
+ vect, [](const auto& obj) { return obj.size; }, true),
+ expectedSize + sizeof(Mock) * vect.capacity());
+}
+
+TEST(ContainerSizeHelper, TestEstimateObjectSizeInBytesWithPointers) {
+ Mock obj1 = {2};
+ Mock obj2 = {1};
+ std::vector<Mock*> vect = {&obj1, &obj1, &obj2};
+
+ // Sum of 'size' of each element '2 + 2 + 1'.
+ uint64_t expectedSize = 5;
+
+ // Reserve extra space for the vector.
+ vect.reserve(10);
+ ASSERT_EQ(static_cast<size_t>(10), vect.capacity());
+
+ // When 'includeShallowSize' is true, should add size of 'Mock*' pointer.
+ ASSERT_EQ(mongo::container_size_helper::estimateObjectSizeInBytes(
+ vect, [](const auto& obj) { return obj->size; }, true),
+ expectedSize + sizeof(Mock*) * vect.capacity());
+}
+} // namespace