summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
authorAlexander Ignatyev <alexander.ignatyev@mongodb.com>2021-09-17 17:08:11 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-09-17 18:03:55 +0000
commitd22a8b46abe0bcdbcbe1c84b0e22f6b2c9dc1d87 (patch)
treedc77dbb35fabeb889137424fe394e3debb4db35c /src/mongo/db
parented888257db60e851557a2e2bf6c602e39f7dc849 (diff)
downloadmongo-d22a8b46abe0bcdbcbe1c84b0e22f6b2c9dc1d87.tar.gz
SERVER-59683 Extract BudgetEstimator logic from LRU cache
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/commands/index_filter_commands_test.cpp12
-rw-r--r--src/mongo/db/commands/plan_cache_commands_test.cpp8
-rw-r--r--src/mongo/db/query/canonical_query_encoder_test.cpp2
-rw-r--r--src/mongo/db/query/classic_plan_cache.h9
-rw-r--r--src/mongo/db/query/collection_query_info.cpp12
-rw-r--r--src/mongo/db/query/collection_query_info.h1
-rw-r--r--src/mongo/db/query/get_executor_test.cpp2
-rw-r--r--src/mongo/db/query/lru_key_value.h81
-rw-r--r--src/mongo/db/query/lru_key_value_test.cpp29
-rw-r--r--src/mongo/db/query/plan_cache.h60
-rw-r--r--src/mongo/db/query/plan_cache_test.cpp60
-rw-r--r--src/mongo/db/query/sbe_plan_cache.cpp4
-rw-r--r--src/mongo/db/query/sbe_plan_cache.h11
13 files changed, 184 insertions, 107 deletions
diff --git a/src/mongo/db/commands/index_filter_commands_test.cpp b/src/mongo/db/commands/index_filter_commands_test.cpp
index 5f9e768033e..2c3af6d7aae 100644
--- a/src/mongo/db/commands/index_filter_commands_test.cpp
+++ b/src/mongo/db/commands/index_filter_commands_test.cpp
@@ -214,7 +214,7 @@ TEST(IndexFilterCommandsTest, ListFiltersEmpty) {
TEST(IndexFilterCommandsTest, ClearFiltersInvalidParameter) {
QuerySettings empty;
- PlanCache planCache;
+ PlanCache planCache(5000);
OperationContextNoop opCtx;
// If present, query has to be an object.
@@ -239,7 +239,7 @@ TEST(IndexFilterCommandsTest, ClearFiltersInvalidParameter) {
TEST(IndexFilterCommandsTest, ClearNonexistentHint) {
QuerySettings querySettings;
- PlanCache planCache;
+ PlanCache planCache(5000);
OperationContextNoop opCtx;
ASSERT_OK(SetFilter::set(&opCtx,
@@ -264,7 +264,7 @@ TEST(IndexFilterCommandsTest, ClearNonexistentHint) {
TEST(IndexFilterCommandsTest, SetFilterInvalidParameter) {
QuerySettings empty;
- PlanCache planCache;
+ PlanCache planCache(5000);
OperationContextNoop opCtx;
ASSERT_NOT_OK(SetFilter::set(&opCtx, &empty, &planCache, nss.ns(), fromjson("{}")));
@@ -324,7 +324,7 @@ TEST(IndexFilterCommandsTest, SetFilterInvalidParameter) {
TEST(IndexFilterCommandsTest, SetAndClearFilters) {
QuerySettings querySettings;
- PlanCache planCache;
+ PlanCache planCache(5000);
QueryTestServiceContext serviceContext;
auto opCtx = serviceContext.makeOperationContext();
@@ -432,7 +432,7 @@ TEST(IndexFilterCommandsTest, SetAndClearFiltersCollation) {
QuerySettings querySettings;
// Create a plan cache. Add an index so that indexability is included in the plan cache keys.
- PlanCache planCache;
+ PlanCache planCache(5000);
const auto keyPattern = fromjson("{a: 1}");
planCache.notifyOfIndexUpdates(
{CoreIndexInfo(keyPattern,
@@ -506,7 +506,7 @@ TEST(IndexFilterCommandsTest, SetAndClearFiltersCollation) {
TEST(IndexFilterCommandsTest, SetFilterAcceptsIndexNames) {
CollatorInterfaceMock reverseCollator(CollatorInterfaceMock::MockType::kReverseString);
- PlanCache planCache;
+ PlanCache planCache(5000);
const auto keyPattern = fromjson("{a: 1}");
CoreIndexInfo collatedIndex(keyPattern,
IndexNames::nameToType(IndexNames::findPluginName(keyPattern)),
diff --git a/src/mongo/db/commands/plan_cache_commands_test.cpp b/src/mongo/db/commands/plan_cache_commands_test.cpp
index 3764382a414..707a7329969 100644
--- a/src/mongo/db/commands/plan_cache_commands_test.cpp
+++ b/src/mongo/db/commands/plan_cache_commands_test.cpp
@@ -85,7 +85,7 @@ TEST(PlanCacheCommandsTest, CannotCanonicalizeWhenSortObjectIsMalformed) {
}
TEST(PlanCacheCommandsTest, CanCanonicalizeWithValidQuery) {
- PlanCache planCache;
+ PlanCache planCache(5000);
QueryTestServiceContext serviceContext;
auto opCtx = serviceContext.makeOperationContext();
@@ -103,7 +103,7 @@ TEST(PlanCacheCommandsTest, CanCanonicalizeWithValidQuery) {
}
TEST(PlanCacheCommandsTest, SortQueryResultsInDifferentPlanCacheKeyFromUnsorted) {
- PlanCache planCache;
+ PlanCache planCache(5000);
QueryTestServiceContext serviceContext;
auto opCtx = serviceContext.makeOperationContext();
@@ -122,7 +122,7 @@ TEST(PlanCacheCommandsTest, SortQueryResultsInDifferentPlanCacheKeyFromUnsorted)
// Regression test for SERVER-17158.
TEST(PlanCacheCommandsTest, SortsAreProperlyDelimitedInPlanCacheKey) {
- PlanCache planCache;
+ PlanCache planCache(5000);
QueryTestServiceContext serviceContext;
auto opCtx = serviceContext.makeOperationContext();
@@ -140,7 +140,7 @@ TEST(PlanCacheCommandsTest, SortsAreProperlyDelimitedInPlanCacheKey) {
}
TEST(PlanCacheCommandsTest, ProjectQueryResultsInDifferentPlanCacheKeyFromUnprojected) {
- PlanCache planCache;
+ PlanCache planCache(5000);
QueryTestServiceContext serviceContext;
auto opCtx = serviceContext.makeOperationContext();
diff --git a/src/mongo/db/query/canonical_query_encoder_test.cpp b/src/mongo/db/query/canonical_query_encoder_test.cpp
index cf6f0b7a613..d925fc227b7 100644
--- a/src/mongo/db/query/canonical_query_encoder_test.cpp
+++ b/src/mongo/db/query/canonical_query_encoder_test.cpp
@@ -212,7 +212,7 @@ TEST(CanonicalQueryEncoderTest, ComputeKeyEscaped) {
// Cache keys for $geoWithin queries with legacy and GeoJSON coordinates should
// not be the same.
TEST(CanonicalQueryEncoderTest, ComputeKeyGeoWithin) {
- PlanCache planCache;
+ PlanCache planCache(5000);
// Legacy coordinates.
unique_ptr<CanonicalQuery> cqLegacy(
diff --git a/src/mongo/db/query/classic_plan_cache.h b/src/mongo/db/query/classic_plan_cache.h
index b1a21aaad38..93770fcb139 100644
--- a/src/mongo/db/query/classic_plan_cache.h
+++ b/src/mongo/db/query/classic_plan_cache.h
@@ -263,6 +263,13 @@ using PlanCacheEntry = PlanCacheEntryBase<SolutionCacheData>;
using CachedSolution = CachedPlanHolder<SolutionCacheData>;
-using PlanCache = PlanCacheBase<PlanCacheKey, SolutionCacheData, PlanCacheKeyHasher>;
+struct BudgetEstimator {
+ size_t operator()(const PlanCacheEntry&) {
+ return 1;
+ }
+};
+
+using PlanCache =
+ PlanCacheBase<PlanCacheKey, SolutionCacheData, BudgetEstimator, PlanCacheKeyHasher>;
} // namespace mongo
diff --git a/src/mongo/db/query/collection_query_info.cpp b/src/mongo/db/query/collection_query_info.cpp
index 2489bad469a..4abf7b82ecb 100644
--- a/src/mongo/db/query/collection_query_info.cpp
+++ b/src/mongo/db/query/collection_query_info.cpp
@@ -79,14 +79,18 @@ CoreIndexInfo indexInfoFromIndexCatalogEntry(const IndexCatalogEntry& ice) {
} // namespace
-CollectionQueryInfo::CollectionQueryInfo()
- : _keysComputed(false), _planCache(std::make_shared<PlanCache>()) {}
+CollectionQueryInfo::CollectionQueryInfo() : _keysComputed(false), _planCache(makePlanCache()) {}
const UpdateIndexData& CollectionQueryInfo::getIndexKeys(OperationContext* opCtx) const {
invariant(_keysComputed);
return _indexedPaths;
}
+std::shared_ptr<PlanCache> CollectionQueryInfo::makePlanCache() {
+ return std::make_shared<PlanCache>(
+ PlanCache::BudgetTracker(internalQueryCacheMaxEntriesPerCollection.load()));
+}
+
void CollectionQueryInfo::computeIndexKeys(OperationContext* opCtx, const CollectionPtr& coll) {
_indexedPaths.clear();
@@ -193,7 +197,7 @@ void CollectionQueryInfo::clearQueryCache(OperationContext* opCtx, const Collect
"Clearing plan cache - collection info cache reinstantiated",
"namespace"_attr = coll->ns());
- _planCache = std::make_shared<PlanCache>();
+ _planCache = makePlanCache();
updatePlanCacheIndexEntries(opCtx, coll);
}
}
@@ -241,7 +245,7 @@ void CollectionQueryInfo::init(OperationContext* opCtx, const CollectionPtr& col
}
void CollectionQueryInfo::rebuildIndexData(OperationContext* opCtx, const CollectionPtr& coll) {
- _planCache = std::make_shared<PlanCache>();
+ _planCache = makePlanCache();
_keysComputed = false;
computeIndexKeys(opCtx, coll);
diff --git a/src/mongo/db/query/collection_query_info.h b/src/mongo/db/query/collection_query_info.h
index 00cedd19158..ac7438b3803 100644
--- a/src/mongo/db/query/collection_query_info.h
+++ b/src/mongo/db/query/collection_query_info.h
@@ -99,6 +99,7 @@ public:
const PlanSummaryStats& summaryStats) const;
private:
+ static std::shared_ptr<PlanCache> makePlanCache();
void computeIndexKeys(OperationContext* opCtx, const CollectionPtr& coll);
void updatePlanCacheIndexEntries(OperationContext* opCtx, const CollectionPtr& coll);
diff --git a/src/mongo/db/query/get_executor_test.cpp b/src/mongo/db/query/get_executor_test.cpp
index 09691cf775a..5d9594bf627 100644
--- a/src/mongo/db/query/get_executor_test.cpp
+++ b/src/mongo/db/query/get_executor_test.cpp
@@ -104,7 +104,7 @@ void testAllowedIndices(std::vector<IndexEntry> indexes,
BSONObjSet keyPatterns,
stdx::unordered_set<std::string> indexNames,
stdx::unordered_set<std::string> expectedFilteredNames) {
- PlanCache planCache;
+ PlanCache planCache(5000);
QuerySettings querySettings;
// getAllowedIndices should return false when query shape is not yet in query settings.
diff --git a/src/mongo/db/query/lru_key_value.h b/src/mongo/db/query/lru_key_value.h
index 0440e621244..abb5cf475fd 100644
--- a/src/mongo/db/query/lru_key_value.h
+++ b/src/mongo/db/query/lru_key_value.h
@@ -29,6 +29,7 @@
#pragma once
+#include <fmt/format.h>
#include <list>
#include <memory>
@@ -39,9 +40,52 @@
namespace mongo {
/**
+ * This class tracks a size of entries in 'LRUKeyValue'.
+ * The size can be understood as a number of the entries, an amount of memory they occupied,
+ * or any other value defined by the template parameter 'Estimator'.
+ * The 'Estimator' must be deterministic and always return the same value for the same entry.
+ */
+template <typename V, typename Estimator>
+class LRUBudgetTracker {
+public:
+ LRUBudgetTracker(size_t maxBudget) : _max(maxBudget), _current(0) {}
+
+ void onAdd(const V& v) {
+ _current += _estimator(v);
+ }
+
+ void onRemove(const V& v) {
+ using namespace fmt::literals;
+ size_t budget = _estimator(v);
+ tassert(5968300,
+ "LRU budget underflow: current={}, budget={} "_format(_current, budget),
+ _current >= budget);
+ _current -= budget;
+ }
+
+ void onClear() {
+ _current = 0;
+ }
+
+ // Returns true if the cache runs over budget.
+ bool isOverBudget() const {
+ return _current > _max;
+ }
+
+ size_t currentBudget() const {
+ return _current;
+ }
+
+private:
+ const size_t _max;
+ size_t _current;
+ Estimator _estimator;
+};
+
+/**
* A key-value store structure with a least recently used (LRU) replacement
- * policy. The number of entries allowed in the kv-store is set as a constant
- * upon construction.
+ * policy. The size allowed in the kv-store is controlled by 'LRUBudgetTracker'
+ * set in the constructor.
*
* Caveat:
* This kv-store is NOT thread safe! The client to this utility is responsible
@@ -57,10 +101,12 @@ namespace mongo {
* TODO: We could move this into the util/ directory and do any cleanup necessary to make it
* fully general.
*/
-template <class K, class V, class KeyHasher = std::hash<K>>
+template <class K, class V, class BudgetEstimator, class KeyHasher = std::hash<K>>
class LRUKeyValue {
public:
- LRUKeyValue(size_t maxSize) : _maxSize(maxSize), _currentSize(0){};
+ using BudgetTracker = LRUBudgetTracker<V, BudgetEstimator>;
+
+ LRUKeyValue(BudgetTracker&& bt) : _budgetTracker{std::move(bt)} {}
~LRUKeyValue() {
clear();
@@ -95,26 +141,26 @@ public:
KVMapConstIt i = _kvMap.find(key);
if (i != _kvMap.end()) {
KVListIt found = i->second;
+ _budgetTracker.onRemove(*found->second);
delete found->second;
_kvMap.erase(i);
_kvList.erase(found);
- _currentSize--;
}
_kvList.push_front(std::make_pair(key, entry));
_kvMap[key] = _kvList.begin();
- _currentSize++;
+ _budgetTracker.onAdd(*entry);
// If the store has grown beyond its allowed size,
- // evict the least recently used entry.
- if (_currentSize > _maxSize) {
+ // evict the least recently used entries.
+ while (_budgetTracker.isOverBudget()) {
+ invariant(!_kvList.empty());
V* evictedEntry = _kvList.back().second;
invariant(evictedEntry);
+ _budgetTracker.onRemove(*evictedEntry);
_kvMap.erase(_kvList.back().first);
_kvList.pop_back();
- _currentSize--;
- invariant(_currentSize == _maxSize);
// Pass ownership of evicted entry to caller.
// If caller chooses to ignore this unique_ptr,
@@ -163,10 +209,10 @@ public:
return Status(ErrorCodes::NoSuchKey, "no such key in LRU key-value store");
}
KVListIt found = i->second;
+ _budgetTracker.onRemove(*i->second->second);
delete found->second;
_kvMap.erase(i);
_kvList.erase(found);
- _currentSize--;
return Status::OK();
}
@@ -177,9 +223,10 @@ public:
for (KVListIt i = _kvList.begin(); i != _kvList.end(); i++) {
delete i->second;
}
+
+ _budgetTracker.onClear();
_kvList.clear();
_kvMap.clear();
- _currentSize = 0;
}
/**
@@ -190,10 +237,10 @@ public:
}
/**
- * Returns the number of entries currently in the kv-store.
+ * Returns the size (current budget) of the kv-store.
*/
size_t size() const {
- return _currentSize;
+ return _budgetTracker.currentBudget();
}
/**
@@ -210,11 +257,7 @@ public:
}
private:
- // The maximum allowable number of entries in the kv-store.
- const size_t _maxSize;
-
- // The number of entries currently in the kv-store.
- size_t _currentSize;
+ BudgetTracker _budgetTracker;
// (K, V*) pairs are stored in this std::list. They are sorted in order
// of use, where the front is the most recently used and the back is the
diff --git a/src/mongo/db/query/lru_key_value_test.cpp b/src/mongo/db/query/lru_key_value_test.cpp
index 9f15aad6c9a..2cdcd28ff38 100644
--- a/src/mongo/db/query/lru_key_value_test.cpp
+++ b/src/mongo/db/query/lru_key_value_test.cpp
@@ -37,10 +37,19 @@ using namespace mongo;
namespace {
//
-// Convenience functions
+// Convenience types and functions.
//
-void assertInKVStore(LRUKeyValue<int, int>& cache, int key, int value) {
+struct BudgetEstimator {
+ size_t operator()(int) {
+ return 1;
+ }
+};
+
+using TestKeyValue = LRUKeyValue<int, int, BudgetEstimator>;
+using BudgetTracker = LRUBudgetTracker<int, BudgetEstimator>;
+
+void assertInKVStore(TestKeyValue& cache, int key, int value) {
int* cachedValue = nullptr;
ASSERT_TRUE(cache.hasKey(key));
Status s = cache.get(key, &cachedValue);
@@ -48,7 +57,7 @@ void assertInKVStore(LRUKeyValue<int, int>& cache, int key, int value) {
ASSERT_EQUALS(*cachedValue, value);
}
-void assertNotInKVStore(LRUKeyValue<int, int>& cache, int key) {
+void assertNotInKVStore(TestKeyValue& cache, int key) {
int* cachedValue = nullptr;
ASSERT_FALSE(cache.hasKey(key));
Status s = cache.get(key, &cachedValue);
@@ -59,7 +68,7 @@ void assertNotInKVStore(LRUKeyValue<int, int>& cache, int key) {
* Test that we can add an entry and get it back out.
*/
TEST(LRUKeyValueTest, BasicAddGet) {
- LRUKeyValue<int, int> cache(100);
+ TestKeyValue cache{BudgetTracker(100)};
cache.add(1, new int(2));
assertInKVStore(cache, 1, 2);
}
@@ -69,7 +78,7 @@ TEST(LRUKeyValueTest, BasicAddGet) {
* that at the very least we don't blow up.
*/
TEST(LRUKeyValueTest, SizeZeroCache) {
- LRUKeyValue<int, int> cache(0);
+ TestKeyValue cache{BudgetTracker(0)};
cache.add(1, new int(2));
assertNotInKVStore(cache, 1);
}
@@ -79,7 +88,7 @@ TEST(LRUKeyValueTest, SizeZeroCache) {
* a kv-store of size 1.
*/
TEST(LRUKeyValueTest, SizeOneCache) {
- LRUKeyValue<int, int> cache(1);
+ TestKeyValue cache{BudgetTracker(1)};
cache.add(0, new int(0));
assertInKVStore(cache, 0, 0);
@@ -96,7 +105,7 @@ TEST(LRUKeyValueTest, SizeOneCache) {
*/
TEST(LRUKeyValueTest, EvictionTest) {
int maxSize = 10;
- LRUKeyValue<int, int> cache(maxSize);
+ TestKeyValue cache{BudgetTracker(maxSize)};
for (int i = 0; i < maxSize; ++i) {
std::unique_ptr<int> evicted = cache.add(i, new int(i));
ASSERT(nullptr == evicted.get());
@@ -136,7 +145,7 @@ TEST(LRUKeyValueTest, EvictionTest) {
*/
TEST(LRUKeyValueTest, PromotionTest) {
int maxSize = 10;
- LRUKeyValue<int, int> cache(maxSize);
+ TestKeyValue cache{BudgetTracker(maxSize)};
for (int i = 0; i < maxSize; ++i) {
std::unique_ptr<int> evicted = cache.add(i, new int(i));
ASSERT(nullptr == evicted.get());
@@ -169,7 +178,7 @@ TEST(LRUKeyValueTest, PromotionTest) {
* in the kv-store deletes the existing entry.
*/
TEST(LRUKeyValueTest, ReplaceKeyTest) {
- LRUKeyValue<int, int> cache(10);
+ TestKeyValue cache{BudgetTracker(10)};
cache.add(4, new int(4));
assertInKVStore(cache, 4, 4);
cache.add(4, new int(5));
@@ -180,7 +189,7 @@ TEST(LRUKeyValueTest, ReplaceKeyTest) {
* Test iteration over the kv-store.
*/
TEST(LRUKeyValueTest, IterationTest) {
- LRUKeyValue<int, int> cache(2);
+ TestKeyValue cache{BudgetTracker(2)};
cache.add(1, new int(1));
cache.add(2, new int(2));
diff --git a/src/mongo/db/query/plan_cache.h b/src/mongo/db/query/plan_cache.h
index 86882c46c30..8fcc4522c50 100644
--- a/src/mongo/db/query/plan_cache.h
+++ b/src/mongo/db/query/plan_cache.h
@@ -379,13 +379,19 @@ private:
* mapping, the cache contains information on why that mapping was made and statistics on the
* cache entry's actual performance on subsequent runs.
*/
-template <class KeyType, class CachedPlanType, class KeyHasher = std::hash<KeyType>>
+template <class KeyType,
+ class CachedPlanType,
+ class BudgetEstimator,
+ class KeyHasher = std::hash<KeyType>>
class PlanCacheBase {
private:
PlanCacheBase(const PlanCacheBase&) = delete;
PlanCacheBase& operator=(const PlanCacheBase&) = delete;
public:
+ using Entry = PlanCacheEntryBase<CachedPlanType>;
+ using BudgetTracker = LRUBudgetTracker<Entry, BudgetEstimator>;
+
// We have three states for a cache entry to be in. Rather than just 'present' or 'not
// present', we use a notion of 'inactive entries' as a way of remembering how performant our
// original solution to the query was. This information is useful to prevent much slower
@@ -462,12 +468,9 @@ public:
return true;
}
- /**
- * If omitted, namespace set to empty string.
- */
- PlanCacheBase() : PlanCacheBase(internalQueryCacheMaxEntriesPerCollection.load()) {}
+ PlanCacheBase(size_t size) : PlanCacheBase(BudgetTracker(size)) {}
- PlanCacheBase(size_t size) : _cache(size) {}
+ PlanCacheBase(BudgetTracker&& budgetTracker) : _cache{std::move(budgetTracker)} {}
~PlanCacheBase() = default;
@@ -536,7 +539,7 @@ public:
planCacheKey = key.planCacheKeyHash();
queryHash = key.queryHash();
} else {
- PlanCacheEntryBase<CachedPlanType>* oldEntry = nullptr;
+ Entry* oldEntry = nullptr;
Status cacheStatus = _cache.get(key, &oldEntry);
invariant(cacheStatus.isOK() || cacheStatus == ErrorCodes::NoSuchKey);
if (oldEntry) {
@@ -561,15 +564,15 @@ public:
isNewEntryActive = newState.shouldBeActive;
}
- auto newEntry(PlanCacheEntryBase<CachedPlanType>::create(solns,
- std::move(why),
- query,
- std::move(cachedPlan),
- queryHash,
- planCacheKey,
- now,
- isNewEntryActive,
- newWorks));
+ auto newEntry(Entry::create(solns,
+ std::move(why),
+ query,
+ std::move(cachedPlan),
+ queryHash,
+ planCacheKey,
+ now,
+ isNewEntryActive,
+ newWorks));
auto evictedEntry = _cache.add(key, newEntry.release());
@@ -593,7 +596,7 @@ public:
KeyType key = computeKey(query);
stdx::lock_guard<Latch> cacheLock(_cacheMutex);
- PlanCacheEntryBase<CachedPlanType>* entry = nullptr;
+ Entry* entry = nullptr;
Status cacheStatus = _cache.get(key, &entry);
if (!cacheStatus.isOK()) {
invariant(cacheStatus == ErrorCodes::NoSuchKey);
@@ -624,7 +627,7 @@ public:
*/
GetResult get(const KeyType& key) const {
stdx::lock_guard<Latch> cacheLock(_cacheMutex);
- PlanCacheEntryBase<CachedPlanType>* entry = nullptr;
+ Entry* entry = nullptr;
Status cacheStatus = _cache.get(key, &entry);
if (!cacheStatus.isOK()) {
invariant(cacheStatus == ErrorCodes::NoSuchKey);
@@ -694,39 +697,38 @@ public:
*
* If there is no entry in the cache for the 'query', returns an error Status.
*/
- StatusWith<std::unique_ptr<PlanCacheEntryBase<CachedPlanType>>> getEntry(
- const CanonicalQuery& cq) const {
+ StatusWith<std::unique_ptr<Entry>> getEntry(const CanonicalQuery& cq) const {
KeyType key = computeKey(cq);
stdx::lock_guard<Latch> cacheLock(_cacheMutex);
- PlanCacheEntryBase<CachedPlanType>* entry;
+ Entry* entry;
Status cacheStatus = _cache.get(key, &entry);
if (!cacheStatus.isOK()) {
return cacheStatus;
}
invariant(entry);
- return std::unique_ptr<PlanCacheEntryBase<CachedPlanType>>(entry->clone());
+ return std::unique_ptr<Entry>(entry->clone());
}
/**
* Returns a vector of all cache entries.
* Used by planCacheListQueryShapes and index_filter_commands_test.cpp.
*/
- std::vector<std::unique_ptr<PlanCacheEntryBase<CachedPlanType>>> getAllEntries() const {
+ std::vector<std::unique_ptr<Entry>> getAllEntries() const {
stdx::lock_guard<Latch> cacheLock(_cacheMutex);
- std::vector<std::unique_ptr<PlanCacheEntryBase<CachedPlanType>>> entries;
+ std::vector<std::unique_ptr<Entry>> entries;
for (auto&& cacheEntry : _cache) {
auto entry = cacheEntry.second;
- entries.push_back(std::unique_ptr<PlanCacheEntryBase<CachedPlanType>>(entry->clone()));
+ entries.push_back(std::unique_ptr<Entry>(entry->clone()));
}
return entries;
}
/**
- * Returns number of entries in cache. Includes inactive entries.
+ * Returns the size of the cache.
* Used for testing.
*/
size_t size() const {
@@ -749,7 +751,7 @@ public:
* 'serializationFunc'. Returns a vector of all serialized entries which match 'filterFunc'.
*/
std::vector<BSONObj> getMatchingStats(
- const std::function<BSONObj(const PlanCacheEntryBase<CachedPlanType>&)>& serializationFunc,
+ const std::function<BSONObj(const Entry&)>& serializationFunc,
const std::function<bool(const BSONObj&)>& filterFunc) const {
std::vector<BSONObj> results;
stdx::lock_guard<Latch> cacheLock(_cacheMutex);
@@ -780,7 +782,7 @@ private:
NewEntryState getNewEntryState(const CanonicalQuery& query,
uint32_t queryHash,
uint32_t planCacheKey,
- PlanCacheEntryBase<CachedPlanType>* oldEntry,
+ Entry* oldEntry,
size_t newWorks,
double growthCoefficient) {
NewEntryState res;
@@ -851,7 +853,7 @@ private:
return res;
}
- LRUKeyValue<KeyType, PlanCacheEntryBase<CachedPlanType>, KeyHasher> _cache;
+ LRUKeyValue<KeyType, PlanCacheEntryBase<CachedPlanType>, BudgetEstimator, KeyHasher> _cache;
// Protects _cache.
mutable Mutex _cacheMutex = MONGO_MAKE_LATCH("PlanCache::_cacheMutex");
diff --git a/src/mongo/db/query/plan_cache_test.cpp b/src/mongo/db/query/plan_cache_test.cpp
index 2e1152bc0d8..1931f797320 100644
--- a/src/mongo/db/query/plan_cache_test.cpp
+++ b/src/mongo/db/query/plan_cache_test.cpp
@@ -523,7 +523,7 @@ TEST(PlanCacheTest, ShouldNotCacheQueryExplain) {
// Adding an empty vector of query solutions should fail.
TEST(PlanCacheTest, AddEmptySolutions) {
- PlanCache planCache;
+ PlanCache planCache(5000);
unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
std::vector<QuerySolution*> solns;
unique_ptr<plan_ranker::PlanRankingDecision> decision(createDecision(1U));
@@ -545,7 +545,7 @@ TEST(PlanCacheTest, InactiveEntriesDisabled) {
internalQueryCacheDisableInactiveEntries.store(true);
ON_BLOCK_EXIT([] { internalQueryCacheDisableInactiveEntries.store(false); });
- PlanCache planCache;
+ PlanCache planCache(5000);
unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
auto qs = getQuerySolutionForCaching();
std::vector<QuerySolution*> solns = {qs.get()};
@@ -605,7 +605,7 @@ TEST(PlanCacheTest, PlanCacheLRUPolicyRemovesInactiveEntries) {
}
TEST(PlanCacheTest, PlanCacheRemoveDeletesInactiveEntries) {
- PlanCache planCache;
+ PlanCache planCache(5000);
unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
auto qs = getQuerySolutionForCaching();
std::vector<QuerySolution*> solns = {qs.get()};
@@ -624,7 +624,7 @@ TEST(PlanCacheTest, PlanCacheRemoveDeletesInactiveEntries) {
}
TEST(PlanCacheTest, PlanCacheFlushDeletesInactiveEntries) {
- PlanCache planCache;
+ PlanCache planCache(5000);
unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
auto qs = getQuerySolutionForCaching();
std::vector<QuerySolution*> solns = {qs.get()};
@@ -643,7 +643,7 @@ TEST(PlanCacheTest, PlanCacheFlushDeletesInactiveEntries) {
}
TEST(PlanCacheTest, AddActiveCacheEntry) {
- PlanCache planCache;
+ PlanCache planCache(5000);
unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
auto qs = getQuerySolutionForCaching();
std::vector<QuerySolution*> solns = {qs.get()};
@@ -669,7 +669,7 @@ TEST(PlanCacheTest, AddActiveCacheEntry) {
}
TEST(PlanCacheTest, WorksValueIncreases) {
- PlanCache planCache;
+ PlanCache planCache(5000);
unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
auto qs = getQuerySolutionForCaching();
std::vector<QuerySolution*> solns = {qs.get()};
@@ -732,7 +732,7 @@ TEST(PlanCacheTest, WorksValueIncreasesByAtLeastOne) {
// Will use a very small growth coefficient.
const double kWorksCoeff = 1.10;
- PlanCache planCache;
+ PlanCache planCache(5000);
unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
auto qs = getQuerySolutionForCaching();
std::vector<QuerySolution*> solns = {qs.get()};
@@ -767,7 +767,7 @@ TEST(PlanCacheTest, WorksValueIncreasesByAtLeastOne) {
}
TEST(PlanCacheTest, SetIsNoopWhenNewEntryIsWorse) {
- PlanCache planCache;
+ PlanCache planCache(5000);
unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
auto qs = getQuerySolutionForCaching();
std::vector<QuerySolution*> solns = {qs.get()};
@@ -800,7 +800,7 @@ TEST(PlanCacheTest, SetIsNoopWhenNewEntryIsWorse) {
}
TEST(PlanCacheTest, SetOverwritesWhenNewEntryIsBetter) {
- PlanCache planCache;
+ PlanCache planCache(5000);
unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
auto qs = getQuerySolutionForCaching();
std::vector<QuerySolution*> solns = {qs.get()};
@@ -832,7 +832,7 @@ TEST(PlanCacheTest, SetOverwritesWhenNewEntryIsBetter) {
}
TEST(PlanCacheTest, DeactivateCacheEntry) {
- PlanCache planCache;
+ PlanCache planCache(5000);
unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
auto qs = getQuerySolutionForCaching();
std::vector<QuerySolution*> solns = {qs.get()};
@@ -864,7 +864,7 @@ TEST(PlanCacheTest, DeactivateCacheEntry) {
}
TEST(PlanCacheTest, GetMatchingStatsMatchesAndSerializesCorrectly) {
- PlanCache planCache;
+ PlanCache planCache(5000);
// Create a cache entry with 5 works.
{
@@ -1871,7 +1871,7 @@ TEST_F(CachePlanSelectionTest, ContainedOrAndIntersection) {
// When a sparse index is present, computeKey() should generate different keys depending on
// whether or not the predicates in the given query can use the index.
TEST(PlanCacheTest, ComputeKeySparseIndex) {
- PlanCache planCache;
+ PlanCache planCache(5000);
const auto keyPattern = BSON("a" << 1);
planCache.notifyOfIndexUpdates(
{CoreIndexInfo(keyPattern,
@@ -1904,7 +1904,7 @@ TEST(PlanCacheTest, ComputeKeyPartialIndex) {
BSONObj filterObj = BSON("f" << BSON("$gt" << 0));
unique_ptr<MatchExpression> filterExpr(parseMatchExpression(filterObj));
- PlanCache planCache;
+ PlanCache planCache(5000);
const auto keyPattern = BSON("a" << 1);
planCache.notifyOfIndexUpdates(
{CoreIndexInfo(keyPattern,
@@ -1929,7 +1929,7 @@ TEST(PlanCacheTest, ComputeKeyPartialIndex) {
TEST(PlanCacheTest, ComputeKeyCollationIndex) {
CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
- PlanCache planCache;
+ PlanCache planCache(5000);
const auto keyPattern = BSON("a" << 1);
planCache.notifyOfIndexUpdates(
{CoreIndexInfo(keyPattern,
@@ -1996,11 +1996,11 @@ TEST(PlanCacheTest, ComputeKeyCollationIndex) {
TEST(PlanCacheTest, ComputeKeyWildcardIndex) {
auto entryProjUpdatePair = makeWildcardUpdate(BSON("a.$**" << 1));
- PlanCache planCache;
+ PlanCache planCache(5000);
planCache.notifyOfIndexUpdates({entryProjUpdatePair.first});
// Used to check that two queries have the same shape when no indexes are present.
- PlanCache planCacheWithNoIndexes;
+ PlanCache planCacheWithNoIndexes(5000);
// Compatible with index.
unique_ptr<CanonicalQuery> usesPathWithScalar(canonicalize("{a: 'abcdef'}"));
@@ -2061,7 +2061,7 @@ TEST(PlanCacheTest, ComputeKeyWildcardIndex) {
TEST(PlanCacheTest, ComputeKeyWildcardIndexDiscriminatesEqualityToEmptyObj) {
auto entryProjUpdatePair = makeWildcardUpdate(BSON("a.$**" << 1));
- PlanCache planCache;
+ PlanCache planCache(5000);
planCache.notifyOfIndexUpdates({entryProjUpdatePair.first});
// Equality to empty obj and equality to non-empty obj have different plan cache keys.
@@ -2089,7 +2089,7 @@ TEST(PlanCacheTest, ComputeKeyWildcardDiscriminatesCorrectlyBasedOnPartialFilter
auto indexInfo = std::move(entryProjUpdatePair.first);
indexInfo.filterExpr = filterExpr.get();
- PlanCache planCache;
+ PlanCache planCache(5000);
planCache.notifyOfIndexUpdates({indexInfo});
// Test that queries on field 'x' are discriminated based on their relationship with the partial
@@ -2159,7 +2159,7 @@ TEST(PlanCacheTest, ComputeKeyWildcardDiscriminatesCorrectlyWithPartialFilterAnd
auto indexInfo = std::move(entryProjUpdatePair.first);
indexInfo.filterExpr = filterExpr.get();
- PlanCache planCache;
+ PlanCache planCache(5000);
planCache.notifyOfIndexUpdates({indexInfo});
{
@@ -2187,7 +2187,7 @@ TEST(PlanCacheTest, ComputeKeyWildcardDiscriminatesCorrectlyWithPartialFilterOnN
auto indexInfo = std::move(entryProjUpdatePair.first);
indexInfo.filterExpr = filterExpr.get();
- PlanCache planCache;
+ PlanCache planCache(5000);
planCache.notifyOfIndexUpdates({indexInfo});
{
@@ -2213,7 +2213,7 @@ TEST(PlanCacheTest, ComputeKeyDiscriminatesCorrectlyWithPartialFilterAndWildcard
auto indexInfo = std::move(entryProjUpdatePair.first);
indexInfo.filterExpr = filterExpr.get();
- PlanCache planCache;
+ PlanCache planCache(5000);
planCache.notifyOfIndexUpdates({indexInfo});
{
@@ -2240,7 +2240,7 @@ TEST(PlanCacheTest, ComputeKeyDiscriminatesCorrectlyWithPartialFilterAndWildcard
}
TEST(PlanCacheTest, StableKeyDoesNotChangeAcrossIndexCreation) {
- PlanCache planCache;
+ PlanCache planCache(5000);
unique_ptr<CanonicalQuery> cq(canonicalize("{a: 0}}"));
const PlanCacheKey preIndexKey = planCache.computeKey(*cq);
const auto preIndexStableKey = preIndexKey.getStableKey();
@@ -2262,7 +2262,7 @@ TEST(PlanCacheTest, StableKeyDoesNotChangeAcrossIndexCreation) {
}
TEST(PlanCacheTest, ComputeKeyNotEqualsArray) {
- PlanCache planCache;
+ PlanCache planCache(5000);
unique_ptr<CanonicalQuery> cqNeArray(canonicalize("{a: {$ne: [1]}}"));
unique_ptr<CanonicalQuery> cqNeScalar(canonicalize("{a: {$ne: 123}}"));
@@ -2296,7 +2296,7 @@ TEST(PlanCacheTest, ComputeKeyNotEqualsArray) {
}
TEST(PlanCacheTest, ComputeKeyNinArray) {
- PlanCache planCache;
+ PlanCache planCache(5000);
unique_ptr<CanonicalQuery> cqNinArray(canonicalize("{a: {$nin: [123, [1]]}}"));
unique_ptr<CanonicalQuery> cqNinScalar(canonicalize("{a: {$nin: [123, 456]}}"));
@@ -2337,7 +2337,7 @@ TEST(PlanCacheTest, ComputeKeyNinArray) {
// ambiguous. This would make it possible for two queries with different shapes (and different
// plans) to get the same plan cache key. We test that this does not happen for a simple example.
TEST(PlanCacheTest, PlanCacheKeyCollision) {
- PlanCache planCache;
+ PlanCache planCache(5000);
unique_ptr<CanonicalQuery> cqNeA(canonicalize("{$or: [{a: {$ne: 5}}, {a: {$ne: [12]}}]}"));
unique_ptr<CanonicalQuery> cqNeB(canonicalize("{$or: [{a: {$ne: [12]}}, {a: {$ne: 5}}]}"));
@@ -2362,7 +2362,7 @@ TEST(PlanCacheTest, PlanCacheKeyCollision) {
}
TEST(PlanCacheTest, PlanCacheSizeWithCRUDOperations) {
- PlanCache planCache;
+ PlanCache planCache(5000);
unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1, b: 1}"));
auto qs = getQuerySolutionForCaching();
std::vector<QuerySolution*> solns = {qs.get()};
@@ -2490,8 +2490,8 @@ TEST(PlanCacheTest, PlanCacheSizeWithEviction) {
}
TEST(PlanCacheTest, PlanCacheSizeWithMultiplePlanCaches) {
- PlanCache planCache1;
- PlanCache planCache2;
+ PlanCache planCache1(5000);
+ PlanCache planCache2(5000);
unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1, b: 1}"));
auto qs = getQuerySolutionForCaching();
std::vector<QuerySolution*> solns = {qs.get()};
@@ -2527,7 +2527,7 @@ TEST(PlanCacheTest, PlanCacheSizeWithMultiplePlanCaches) {
// Verify for scoped PlanCache object.
long long sizeBeforeScopedPlanCache = PlanCacheEntry::planCacheTotalSizeEstimateBytes.get();
{
- PlanCache planCache;
+ PlanCache planCache(5000);
previousSize = PlanCacheEntry::planCacheTotalSizeEstimateBytes.get();
ASSERT_OK(planCache.set(*cq, qs->cacheData->clone(), solns, createDecision(1U), Date_t{}));
ASSERT_GT(PlanCacheEntry::planCacheTotalSizeEstimateBytes.get(), previousSize);
@@ -2557,7 +2557,7 @@ TEST(PlanCacheTest, DifferentQueryEngines) {
return pc.computeKey(*cq);
};
- PlanCache planCache;
+ PlanCache planCache(5000);
const auto keyPattern = BSON("a" << 1);
// Create a normal btree index. It will have a discriminator.
diff --git a/src/mongo/db/query/sbe_plan_cache.cpp b/src/mongo/db/query/sbe_plan_cache.cpp
index bc6fe100d0f..1c2f04998bf 100644
--- a/src/mongo/db/query/sbe_plan_cache.cpp
+++ b/src/mongo/db/query/sbe_plan_cache.cpp
@@ -39,9 +39,11 @@ const auto sbePlanCacheDecoration =
ServiceContext::ConstructorActionRegisterer planCacheRegisterer{
"PlanCacheRegisterer", [](ServiceContext* serviceCtx) {
+ // Max memory size in bytes of the PlanCache.
+ constexpr size_t kQueryCacheMaxSizeInBytes = 100 * 1024 * 1024;
if (feature_flags::gFeatureFlagSbePlanCache.isEnabledAndIgnoreFCV()) {
auto& globalPlanCache = sbePlanCacheDecoration(serviceCtx);
- globalPlanCache = std::make_unique<sbe::PlanCache>();
+ globalPlanCache = std::make_unique<sbe::PlanCache>(kQueryCacheMaxSizeInBytes);
}
}};
diff --git a/src/mongo/db/query/sbe_plan_cache.h b/src/mongo/db/query/sbe_plan_cache.h
index eb546fbc006..4c9c248242c 100644
--- a/src/mongo/db/query/sbe_plan_cache.h
+++ b/src/mongo/db/query/sbe_plan_cache.h
@@ -96,7 +96,16 @@ struct CachedSbePlan {
stage_builder::PlanStageData planStageData;
};
-using PlanCache = PlanCacheBase<sbe::PlanCacheKey, CachedSbePlan, sbe::PlanCacheKeyHasher>;
+using PlanCacheEntry = PlanCacheEntryBase<CachedSbePlan>;
+
+struct BudgetEstimator {
+ size_t operator()(const PlanCacheEntry& entry) {
+ return entry.estimatedEntrySizeBytes;
+ }
+};
+
+using PlanCache =
+ PlanCacheBase<sbe::PlanCacheKey, CachedSbePlan, BudgetEstimator, sbe::PlanCacheKeyHasher>;
/**
* A helper method to get the global SBE plan cache decorated in 'serviceCtx'.