diff options
author | Rui Liu <rui.liu@mongodb.com> | 2022-06-22 09:52:36 +0000 |
---|---|---|
committer | Evergreen Agent <no-reply@evergreen.mongodb.com> | 2022-06-22 10:42:32 +0000 |
commit | 2c2d85e73d0620d779544ce67218db171c154e8b (patch) | |
tree | c6172fde4fb0500c2254ceaf9cd92ac14c8b128d /src | |
parent | ad2b9ae23ad19f871f2033dc338c96ad9aa8d161 (diff) | |
download | mongo-2c2d85e73d0620d779544ce67218db171c154e8b.tar.gz |
SERVER-64432 Integrate SBE plan cache for $lookup
Diffstat (limited to 'src')
73 files changed, 765 insertions, 341 deletions
diff --git a/src/mongo/db/catalog/SConscript b/src/mongo/db/catalog/SConscript index a02d9350e93..2cae97495a4 100644 --- a/src/mongo/db/catalog/SConscript +++ b/src/mongo/db/catalog/SConscript @@ -537,6 +537,7 @@ env.Library( '$BUILD_DIR/mongo/db/query/query_plan_cache', '$BUILD_DIR/mongo/db/query/query_planner', '$BUILD_DIR/mongo/db/update_index_data', + 'collection', ], LIBDEPS_PRIVATE=[ '$BUILD_DIR/mongo/base', diff --git a/src/mongo/db/commands/index_filter_commands.cpp b/src/mongo/db/commands/index_filter_commands.cpp index f040bd9eea3..5deb5ecd339 100644 --- a/src/mongo/db/commands/index_filter_commands.cpp +++ b/src/mongo/db/commands/index_filter_commands.cpp @@ -99,7 +99,7 @@ void removePlanCacheEntriesByIndexFilterKeys(const stdx::unordered_set<uint32_t> sbe::PlanCache* planCache) { planCache->removeIf([&](const sbe::PlanCacheKey& key, const sbe::PlanCacheEntry& entry) { return indexFilterKeys.contains(entry.indexFilterKey) && - key.getCollectionUuid() == collectionUuid; + key.getMainCollectionState().uuid == collectionUuid; }); } } // namespace diff --git a/src/mongo/db/commands/plan_cache_commands_test.cpp b/src/mongo/db/commands/plan_cache_commands_test.cpp index 4ef229c36f4..3495ee127d6 100644 --- a/src/mongo/db/commands/plan_cache_commands_test.cpp +++ b/src/mongo/db/commands/plan_cache_commands_test.cpp @@ -40,7 +40,7 @@ namespace { static const NamespaceString nss{"test.collection"_sd}; -PlanCacheKey makeKey(const CanonicalQuery& cq) { +PlanCacheKey makeClassicKey(const CanonicalQuery& cq) { CollectionMock coll(nss); return plan_cache_key_factory::make<PlanCacheKey>(cq, &coll); } @@ -106,7 +106,7 @@ TEST(PlanCacheCommandsTest, CanCanonicalizeWithValidQuery) { plan_cache_commands::canonicalize(opCtx.get(), nss.ns(), fromjson("{query: {b: 3, a: 4}}")); ASSERT_OK(statusWithCQ.getStatus()); std::unique_ptr<CanonicalQuery> equivQuery = std::move(statusWithCQ.getValue()); - ASSERT_EQUALS(makeKey(*query), makeKey(*equivQuery)); + ASSERT_EQUALS(makeClassicKey(*query), makeClassicKey(*equivQuery)); } TEST(PlanCacheCommandsTest, SortQueryResultsInDifferentPlanCacheKeyFromUnsorted) { @@ -124,7 +124,7 @@ TEST(PlanCacheCommandsTest, SortQueryResultsInDifferentPlanCacheKeyFromUnsorted) opCtx.get(), nss.ns(), fromjson("{query: {a: 1, b: 1}, sort: {a: 1, b: 1}}")); ASSERT_OK(statusWithCQ.getStatus()); std::unique_ptr<CanonicalQuery> sortQuery = std::move(statusWithCQ.getValue()); - ASSERT_NOT_EQUALS(makeKey(*query), makeKey(*sortQuery)); + ASSERT_NOT_EQUALS(makeClassicKey(*query), makeClassicKey(*sortQuery)); } // Regression test for SERVER-17158. @@ -143,7 +143,7 @@ TEST(PlanCacheCommandsTest, SortsAreProperlyDelimitedInPlanCacheKey) { opCtx.get(), nss.ns(), fromjson("{query: {a: 1, b: 1}, sort: {aab: 1}}")); ASSERT_OK(statusWithCQ.getStatus()); std::unique_ptr<CanonicalQuery> sortQuery2 = std::move(statusWithCQ.getValue()); - ASSERT_NOT_EQUALS(makeKey(*sortQuery1), makeKey(*sortQuery2)); + ASSERT_NOT_EQUALS(makeClassicKey(*sortQuery1), makeClassicKey(*sortQuery2)); } TEST(PlanCacheCommandsTest, ProjectQueryResultsInDifferentPlanCacheKeyFromUnprojected) { @@ -160,7 +160,7 @@ TEST(PlanCacheCommandsTest, ProjectQueryResultsInDifferentPlanCacheKeyFromUnproj opCtx.get(), nss.ns(), fromjson("{query: {a: 1, b: 1}, projection: {_id: 0, a: 1}}")); ASSERT_OK(statusWithCQ.getStatus()); std::unique_ptr<CanonicalQuery> projectionQuery = std::move(statusWithCQ.getValue()); - ASSERT_NOT_EQUALS(makeKey(*query), makeKey(*projectionQuery)); + ASSERT_NOT_EQUALS(makeClassicKey(*query), makeClassicKey(*projectionQuery)); } } // namespace diff --git a/src/mongo/db/commands/run_aggregate.cpp b/src/mongo/db/commands/run_aggregate.cpp index 6230b6da99d..42053578913 100644 --- a/src/mongo/db/commands/run_aggregate.cpp +++ b/src/mongo/db/commands/run_aggregate.cpp @@ -1031,7 +1031,7 @@ Status runAggregate(OperationContext* opCtx, // yet. invariant(ctx); Explain::explainStages(explainExecutor, - ctx->getCollection(), + collections, *(expCtx->explain), BSON("optimizedPipeline" << true), cmdObj, diff --git a/src/mongo/db/db_raii.cpp b/src/mongo/db/db_raii.cpp index ddc53c40db1..688577f8e28 100644 --- a/src/mongo/db/db_raii.cpp +++ b/src/mongo/db/db_raii.cpp @@ -804,6 +804,14 @@ const CollectionPtr& AutoGetCollectionForReadMaybeLockFree::getCollection() cons } } +bool AutoGetCollectionForReadMaybeLockFree::isAnySecondaryNamespaceAViewOrSharded() const { + if (_autoGet) { + return _autoGet->isAnySecondaryNamespaceAViewOrSharded(); + } else { + return _autoGetLockFree->isAnySecondaryNamespaceAViewOrSharded(); + } +} + template <typename AutoGetCollectionForReadType> AutoGetCollectionForReadCommandBase<AutoGetCollectionForReadType>:: AutoGetCollectionForReadCommandBase( diff --git a/src/mongo/db/db_raii.h b/src/mongo/db/db_raii.h index 117a9eba220..63bdf8c621d 100644 --- a/src/mongo/db/db_raii.h +++ b/src/mongo/db/db_raii.h @@ -311,6 +311,7 @@ public: const CollectionPtr& getCollection() const; const ViewDefinition* getView() const; const NamespaceString& getNss() const; + bool isAnySecondaryNamespaceAViewOrSharded() const; private: boost::optional<AutoGetCollectionForRead> _autoGet; diff --git a/src/mongo/db/exec/multi_plan.cpp b/src/mongo/db/exec/multi_plan.cpp index 1db8860dc2e..0dbb0c4a405 100644 --- a/src/mongo/db/exec/multi_plan.cpp +++ b/src/mongo/db/exec/multi_plan.cpp @@ -46,6 +46,7 @@ #include "mongo/db/query/classic_plan_cache.h" #include "mongo/db/query/collection_query_info.h" #include "mongo/db/query/explain.h" +#include "mongo/db/query/multiple_collection_accessor.h" #include "mongo/db/query/plan_cache_key_factory.h" #include "mongo/db/query/plan_ranker.h" #include "mongo/db/query/plan_ranker_util.h" @@ -280,8 +281,12 @@ Status MultiPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) { } } - plan_cache_util::updatePlanCache( - expCtx()->opCtx, collection(), _cachingMode, *_query, std::move(ranking), _candidates); + plan_cache_util::updatePlanCache(expCtx()->opCtx, + MultipleCollectionAccessor(collection()), + _cachingMode, + *_query, + std::move(ranking), + _candidates); return Status::OK(); } diff --git a/src/mongo/db/exec/plan_cache_util.cpp b/src/mongo/db/exec/plan_cache_util.cpp index 85d5c823849..a3fc5ff19d1 100644 --- a/src/mongo/db/exec/plan_cache_util.cpp +++ b/src/mongo/db/exec/plan_cache_util.cpp @@ -74,17 +74,17 @@ void logNotCachingNoData(std::string&& solution) { } // namespace log_detail void updatePlanCache(OperationContext* opCtx, - const CollectionPtr& collection, + const MultipleCollectionAccessor& collections, const CanonicalQuery& query, const QuerySolution& solution, const sbe::PlanStage& root, const stage_builder::PlanStageData& data) { - // TODO SERVER-61507: Integration between lowering parts of aggregation pipeline into the find - // subsystem and the new SBE cache isn't implemented yet. Remove cq->pipeline().empty() check - // once it's implemented. - if (shouldCacheQuery(query) && collection && query.pipeline().empty() && + // TODO SERVER-61507: Remove canUseSbePlanCache check once $group pushdown is + // integrated with SBE plan cache. + if (shouldCacheQuery(query) && collections.getMainCollection() && + canonical_query_encoder::canUseSbePlanCache(query) && feature_flags::gFeatureFlagSbePlanCache.isEnabledAndIgnoreFCV()) { - auto key = plan_cache_key_factory::make<sbe::PlanCacheKey>(query, collection); + auto key = plan_cache_key_factory::make(query, collections); auto plan = std::make_unique<sbe::CachedSbePlan>(root.clone(), data); plan->indexFilterApplied = solution.indexFilterApplied; sbe::getPlanCache(opCtx).setPinned( diff --git a/src/mongo/db/exec/plan_cache_util.h b/src/mongo/db/exec/plan_cache_util.h index 630458cbcd4..2fb16d8be89 100644 --- a/src/mongo/db/exec/plan_cache_util.h +++ b/src/mongo/db/exec/plan_cache_util.h @@ -32,6 +32,7 @@ #include "mongo/db/exec/plan_stats.h" #include "mongo/db/query/canonical_query.h" #include "mongo/db/query/collection_query_info.h" +#include "mongo/db/query/multiple_collection_accessor.h" #include "mongo/db/query/plan_cache_debug_info.h" #include "mongo/db/query/plan_cache_key_factory.h" #include "mongo/db/query/plan_explainer_factory.h" @@ -98,7 +99,7 @@ plan_cache_debug_info::DebugInfoSBE buildDebugInfo(const QuerySolution* solution template <typename PlanStageType, typename ResultType, typename Data> void updatePlanCache( OperationContext* opCtx, - const CollectionPtr& collection, + const MultipleCollectionAccessor& collections, PlanCachingMode cachingMode, const CanonicalQuery& query, std::unique_ptr<plan_ranker::PlanRankingDecision> ranking, @@ -183,6 +184,7 @@ void updatePlanCache( callbacks{query, buildDebugInfoFn}; winningPlan.solution->cacheData->indexFilterApplied = winningPlan.solution->indexFilterApplied; + auto& collection = collections.getMainCollection(); uassertStatusOK(CollectionQueryInfo::get(collection) .getPlanCache() ->set(plan_cache_key_factory::make<PlanCacheKey>(query, collection), @@ -195,10 +197,10 @@ void updatePlanCache( if (winningPlan.solution->cacheData != nullptr) { if constexpr (std::is_same_v<PlanStageType, std::unique_ptr<sbe::PlanStage>>) { - // TODO SERVER-61507: Integration between lowering parts of aggregation pipeline - // into the find subsystem and the new SBE cache isn't implemented yet. + // TODO SERVER-61507: Remove canUseSbePlanCache check once $group pushdown + // is integrated with SBE plan cache. if (feature_flags::gFeatureFlagSbePlanCache.isEnabledAndIgnoreFCV() && - query.pipeline().empty()) { + canonical_query_encoder::canUseSbePlanCache(query)) { tassert(6142201, "The winning CandidatePlan should contain the original plan", winningPlan.clonedPlan); @@ -215,16 +217,16 @@ void updatePlanCache( plan_cache_debug_info::DebugInfoSBE> callbacks{query, buildDebugInfoFn}; uassertStatusOK(sbe::getPlanCache(opCtx).set( - plan_cache_key_factory::make<sbe::PlanCacheKey>(query, collection), + plan_cache_key_factory::make(query, collections), std::move(cachedPlan), *rankingDecision, opCtx->getServiceContext()->getPreciseClockSource()->now(), &callbacks, boost::none /* worksGrowthCoefficient */)); } else { - // TODO(SERVER-61507, SERVER-64882): Fall back to use the classic plan cache. - // Remove this branch after "gFeatureFlagSbePlanCache" is removed and lowering - // parts of pipeline is integrated with SBE cache. + // TODO(SERVER-64882, SERVER-61507): Fall back to use the classic plan cache. + // Remove this branch after "gFeatureFlagSbePlanCache" is removed and $group + // pushdown is integrated with SBE plan cache. cacheClassicPlan(); } } else { @@ -245,7 +247,7 @@ void updatePlanCache( * the cache, the plan immediately becomes "active". */ void updatePlanCache(OperationContext* opCtx, - const CollectionPtr& collection, + const MultipleCollectionAccessor& collections, const CanonicalQuery& query, const QuerySolution& solution, const sbe::PlanStage& root, diff --git a/src/mongo/db/exec/sbe/stages/branch.cpp b/src/mongo/db/exec/sbe/stages/branch.cpp index bec12b12ee2..adbbd533273 100644 --- a/src/mongo/db/exec/sbe/stages/branch.cpp +++ b/src/mongo/db/exec/sbe/stages/branch.cpp @@ -42,8 +42,9 @@ BranchStage::BranchStage(std::unique_ptr<PlanStage> inputThen, value::SlotVector inputThenVals, value::SlotVector inputElseVals, value::SlotVector outputVals, - PlanNodeId planNodeId) - : PlanStage("branch"_sd, planNodeId), + PlanNodeId planNodeId, + bool participateInTrialRunTracking) + : PlanStage("branch"_sd, planNodeId, participateInTrialRunTracking), _filter(std::move(filter)), _inputThenVals(std::move(inputThenVals)), _inputElseVals(std::move(inputElseVals)), @@ -61,7 +62,8 @@ std::unique_ptr<PlanStage> BranchStage::clone() const { _inputThenVals, _inputElseVals, _outputVals, - _commonStats.nodeId); + _commonStats.nodeId, + _participateInTrialRunTracking); } void BranchStage::prepare(CompileCtx& ctx) { diff --git a/src/mongo/db/exec/sbe/stages/branch.h b/src/mongo/db/exec/sbe/stages/branch.h index 67b5af8a517..df813e762a4 100644 --- a/src/mongo/db/exec/sbe/stages/branch.h +++ b/src/mongo/db/exec/sbe/stages/branch.h @@ -52,7 +52,8 @@ public: value::SlotVector inputThenVals, value::SlotVector inputElseVals, value::SlotVector outputVals, - PlanNodeId planNodeId); + PlanNodeId planNodeId, + bool participateInTrialRunTracking = true); std::unique_ptr<PlanStage> clone() const final; diff --git a/src/mongo/db/exec/sbe/stages/bson_scan.cpp b/src/mongo/db/exec/sbe/stages/bson_scan.cpp index c340071ba0e..3a4c3b50512 100644 --- a/src/mongo/db/exec/sbe/stages/bson_scan.cpp +++ b/src/mongo/db/exec/sbe/stages/bson_scan.cpp @@ -42,8 +42,9 @@ BSONScanStage::BSONScanStage(const char* bsonBegin, boost::optional<value::SlotId> recordSlot, std::vector<std::string> fields, value::SlotVector vars, - PlanNodeId planNodeId) - : PlanStage("bsonscan"_sd, planNodeId), + PlanNodeId planNodeId, + bool participateInTrialRunTracking) + : PlanStage("bsonscan"_sd, planNodeId, participateInTrialRunTracking), _bsonBegin(bsonBegin), _bsonEnd(bsonEnd), _recordSlot(recordSlot), @@ -52,8 +53,13 @@ BSONScanStage::BSONScanStage(const char* bsonBegin, _bsonCurrent(bsonBegin) {} std::unique_ptr<PlanStage> BSONScanStage::clone() const { - return std::make_unique<BSONScanStage>( - _bsonBegin, _bsonEnd, _recordSlot, _fields, _vars, _commonStats.nodeId); + return std::make_unique<BSONScanStage>(_bsonBegin, + _bsonEnd, + _recordSlot, + _fields, + _vars, + _commonStats.nodeId, + _participateInTrialRunTracking); } void BSONScanStage::prepare(CompileCtx& ctx) { diff --git a/src/mongo/db/exec/sbe/stages/bson_scan.h b/src/mongo/db/exec/sbe/stages/bson_scan.h index 7804bcd4149..79238f695a2 100644 --- a/src/mongo/db/exec/sbe/stages/bson_scan.h +++ b/src/mongo/db/exec/sbe/stages/bson_scan.h @@ -51,7 +51,8 @@ public: boost::optional<value::SlotId> recordSlot, std::vector<std::string> fields, value::SlotVector vars, - PlanNodeId planNodeId); + PlanNodeId planNodeId, + bool participateInTrialRunTracking = true); std::unique_ptr<PlanStage> clone() const final; diff --git a/src/mongo/db/exec/sbe/stages/check_bounds.cpp b/src/mongo/db/exec/sbe/stages/check_bounds.cpp index 483e9f50260..bc62b089005 100644 --- a/src/mongo/db/exec/sbe/stages/check_bounds.cpp +++ b/src/mongo/db/exec/sbe/stages/check_bounds.cpp @@ -39,8 +39,9 @@ CheckBoundsStage::CheckBoundsStage(std::unique_ptr<PlanStage> input, value::SlotId inKeySlot, value::SlotId inRecordIdSlot, value::SlotId outSlot, - PlanNodeId planNodeId) - : PlanStage{"chkbounds"_sd, planNodeId}, + PlanNodeId planNodeId, + bool participateInTrialRunTracking) + : PlanStage{"chkbounds"_sd, planNodeId, participateInTrialRunTracking}, _params{std::move(params)}, _inKeySlot{inKeySlot}, _inRecordIdSlot{inRecordIdSlot}, @@ -49,8 +50,13 @@ CheckBoundsStage::CheckBoundsStage(std::unique_ptr<PlanStage> input, } std::unique_ptr<PlanStage> CheckBoundsStage::clone() const { - return std::make_unique<CheckBoundsStage>( - _children[0]->clone(), _params, _inKeySlot, _inRecordIdSlot, _outSlot, _commonStats.nodeId); + return std::make_unique<CheckBoundsStage>(_children[0]->clone(), + _params, + _inKeySlot, + _inRecordIdSlot, + _outSlot, + _commonStats.nodeId, + _participateInTrialRunTracking); } void CheckBoundsStage::prepare(CompileCtx& ctx) { diff --git a/src/mongo/db/exec/sbe/stages/check_bounds.h b/src/mongo/db/exec/sbe/stages/check_bounds.h index 29f52faa523..dbdf87938f7 100644 --- a/src/mongo/db/exec/sbe/stages/check_bounds.h +++ b/src/mongo/db/exec/sbe/stages/check_bounds.h @@ -76,7 +76,8 @@ public: value::SlotId inKeySlot, value::SlotId inRecordIdSlot, value::SlotId outSlot, - PlanNodeId planNodeId); + PlanNodeId planNodeId, + bool participateInTrialRunTracking = true); std::unique_ptr<PlanStage> clone() const final; diff --git a/src/mongo/db/exec/sbe/stages/co_scan.cpp b/src/mongo/db/exec/sbe/stages/co_scan.cpp index 73e89a5e87e..9666d03cf01 100644 --- a/src/mongo/db/exec/sbe/stages/co_scan.cpp +++ b/src/mongo/db/exec/sbe/stages/co_scan.cpp @@ -34,11 +34,14 @@ #include "mongo/db/exec/sbe/expressions/expression.h" namespace mongo::sbe { -CoScanStage::CoScanStage(PlanNodeId planNodeId, PlanYieldPolicy* yieldPolicy) - : PlanStage("coscan"_sd, yieldPolicy, planNodeId) {} +CoScanStage::CoScanStage(PlanNodeId planNodeId, + PlanYieldPolicy* yieldPolicy, + bool participateInTrialRunTracking) + : PlanStage("coscan"_sd, yieldPolicy, planNodeId, participateInTrialRunTracking) {} std::unique_ptr<PlanStage> CoScanStage::clone() const { - return std::make_unique<CoScanStage>(_commonStats.nodeId); + return std::make_unique<CoScanStage>( + _commonStats.nodeId, _yieldPolicy, _participateInTrialRunTracking); } void CoScanStage::prepare(CompileCtx& ctx) {} value::SlotAccessor* CoScanStage::getAccessor(CompileCtx& ctx, value::SlotId slot) { diff --git a/src/mongo/db/exec/sbe/stages/co_scan.h b/src/mongo/db/exec/sbe/stages/co_scan.h index 4625b636a14..1f8c8d5404d 100644 --- a/src/mongo/db/exec/sbe/stages/co_scan.h +++ b/src/mongo/db/exec/sbe/stages/co_scan.h @@ -42,7 +42,9 @@ namespace mongo::sbe { */ class CoScanStage final : public PlanStage { public: - explicit CoScanStage(PlanNodeId, PlanYieldPolicy* yieldPolicy = nullptr); + explicit CoScanStage(PlanNodeId, + PlanYieldPolicy* yieldPolicy = nullptr, + bool participateInTrialRunTracking = true); std::unique_ptr<PlanStage> clone() const final; diff --git a/src/mongo/db/exec/sbe/stages/column_scan.cpp b/src/mongo/db/exec/sbe/stages/column_scan.cpp index 8058307a916..24f769fa2c7 100644 --- a/src/mongo/db/exec/sbe/stages/column_scan.cpp +++ b/src/mongo/db/exec/sbe/stages/column_scan.cpp @@ -59,8 +59,9 @@ ColumnScanStage::ColumnScanStage(UUID collectionUuid, std::vector<std::unique_ptr<EExpression>> pathExprs, value::SlotId rowStoreSlot, PlanYieldPolicy* yieldPolicy, - PlanNodeId nodeId) - : PlanStage("columnscan"_sd, yieldPolicy, nodeId), + PlanNodeId nodeId, + bool participateInTrialRunTracking) + : PlanStage("columnscan"_sd, yieldPolicy, nodeId, participateInTrialRunTracking), _collUuid(collectionUuid), _columnIndexName(columnIndexName), _fieldSlots(std::move(fieldSlots)), @@ -89,7 +90,8 @@ std::unique_ptr<PlanStage> ColumnScanStage::clone() const { std::move(pathExprs), _rowStoreSlot, _yieldPolicy, - _commonStats.nodeId); + _commonStats.nodeId, + _participateInTrialRunTracking); } void ColumnScanStage::prepare(CompileCtx& ctx) { diff --git a/src/mongo/db/exec/sbe/stages/column_scan.h b/src/mongo/db/exec/sbe/stages/column_scan.h index d00d4641171..1efeef25bca 100644 --- a/src/mongo/db/exec/sbe/stages/column_scan.h +++ b/src/mongo/db/exec/sbe/stages/column_scan.h @@ -53,7 +53,8 @@ public: std::vector<std::unique_ptr<EExpression>> pathExprs, value::SlotId internalSlot, PlanYieldPolicy* yieldPolicy, - PlanNodeId nodeId); + PlanNodeId planNodeId, + bool participateInTrialRunTracking = true); std::unique_ptr<PlanStage> clone() const final; diff --git a/src/mongo/db/exec/sbe/stages/exchange.cpp b/src/mongo/db/exec/sbe/stages/exchange.cpp index 8cd7b065559..fdbb6531913 100644 --- a/src/mongo/db/exec/sbe/stages/exchange.cpp +++ b/src/mongo/db/exec/sbe/stages/exchange.cpp @@ -171,8 +171,9 @@ ExchangeConsumer::ExchangeConsumer(std::unique_ptr<PlanStage> input, ExchangePolicy policy, std::unique_ptr<EExpression> partition, std::unique_ptr<EExpression> orderLess, - PlanNodeId planNodeId) - : PlanStage("exchange"_sd, planNodeId) { + PlanNodeId planNodeId, + bool participateInTrialRunTracking) + : PlanStage("exchange"_sd, planNodeId, participateInTrialRunTracking) { _children.emplace_back(std::move(input)); _state = std::make_shared<ExchangeState>( numOfProducers, std::move(fields), policy, std::move(partition), std::move(orderLess)); @@ -186,13 +187,16 @@ ExchangeConsumer::ExchangeConsumer(std::unique_ptr<PlanStage> input, uassert(5922202, "partition expression must not be present", !_state->partitionExpr()); } } -ExchangeConsumer::ExchangeConsumer(std::shared_ptr<ExchangeState> state, PlanNodeId planNodeId) - : PlanStage("exchange"_sd, planNodeId), _state(state) { +ExchangeConsumer::ExchangeConsumer(std::shared_ptr<ExchangeState> state, + PlanNodeId planNodeId, + bool participateInTrialRunTracking) + : PlanStage("exchange"_sd, planNodeId, participateInTrialRunTracking), _state(state) { _tid = _state->addConsumer(this); _orderPreserving = _state->isOrderPreserving(); } std::unique_ptr<PlanStage> ExchangeConsumer::clone() const { - return std::make_unique<ExchangeConsumer>(_state, _commonStats.nodeId); + return std::make_unique<ExchangeConsumer>( + _state, _commonStats.nodeId, _participateInTrialRunTracking); } void ExchangeConsumer::prepare(CompileCtx& ctx) { for (size_t idx = 0; idx < _state->fields().size(); ++idx) { @@ -486,8 +490,9 @@ void ExchangeProducer::closePipes() { ExchangeProducer::ExchangeProducer(std::unique_ptr<PlanStage> input, std::shared_ptr<ExchangeState> state, - PlanNodeId planNodeId) - : PlanStage("exchangep"_sd, planNodeId), _state(state) { + PlanNodeId planNodeId, + bool participateInTrialRunTracking) + : PlanStage("exchangep"_sd, planNodeId, participateInTrialRunTracking), _state(state) { _children.emplace_back(std::move(input)); _tid = _state->addProducer(this); diff --git a/src/mongo/db/exec/sbe/stages/exchange.h b/src/mongo/db/exec/sbe/stages/exchange.h index b94b4968f66..15928cd50fb 100644 --- a/src/mongo/db/exec/sbe/stages/exchange.h +++ b/src/mongo/db/exec/sbe/stages/exchange.h @@ -261,9 +261,12 @@ public: ExchangePolicy policy, std::unique_ptr<EExpression> partition, std::unique_ptr<EExpression> orderLess, - PlanNodeId planNodeId); + PlanNodeId planNodeId, + bool participateInTrialRunTracking = true); - ExchangeConsumer(std::shared_ptr<ExchangeState> state, PlanNodeId planNodeId); + ExchangeConsumer(std::shared_ptr<ExchangeState> state, + PlanNodeId planNodeId, + bool participateInTrialRunTracking = true); std::unique_ptr<PlanStage> clone() const final; @@ -311,7 +314,8 @@ class ExchangeProducer final : public PlanStage { public: ExchangeProducer(std::unique_ptr<PlanStage> input, std::shared_ptr<ExchangeState> state, - PlanNodeId planNodeId); + PlanNodeId planNodeId, + bool participateInTrialRunTracking = true); static void start(OperationContext* opCtx, CompileCtx& ctx, diff --git a/src/mongo/db/exec/sbe/stages/filter.h b/src/mongo/db/exec/sbe/stages/filter.h index 2120be1c062..059dd1c7ab4 100644 --- a/src/mongo/db/exec/sbe/stages/filter.h +++ b/src/mongo/db/exec/sbe/stages/filter.h @@ -58,16 +58,21 @@ class FilterStage final : public PlanStage { public: FilterStage(std::unique_ptr<PlanStage> input, std::unique_ptr<EExpression> filter, - PlanNodeId planNodeId) - : PlanStage(IsConst ? "cfilter"_sd : (IsEof ? "efilter" : "filter"_sd), planNodeId), + PlanNodeId planNodeId, + bool participateInTrialRunTracking = true) + : PlanStage(IsConst ? "cfilter"_sd : (IsEof ? "efilter" : "filter"_sd), + planNodeId, + participateInTrialRunTracking), _filter(std::move(filter)) { static_assert(!IsEof || !IsConst); _children.emplace_back(std::move(input)); } std::unique_ptr<PlanStage> clone() const final { - return std::make_unique<FilterStage<IsConst, IsEof>>( - _children[0]->clone(), _filter->clone(), _commonStats.nodeId); + return std::make_unique<FilterStage<IsConst, IsEof>>(_children[0]->clone(), + _filter->clone(), + _commonStats.nodeId, + _participateInTrialRunTracking); } void prepare(CompileCtx& ctx) final { diff --git a/src/mongo/db/exec/sbe/stages/hash_agg.cpp b/src/mongo/db/exec/sbe/stages/hash_agg.cpp index 1dcbc500ec8..f930d4b5e95 100644 --- a/src/mongo/db/exec/sbe/stages/hash_agg.cpp +++ b/src/mongo/db/exec/sbe/stages/hash_agg.cpp @@ -47,8 +47,9 @@ HashAggStage::HashAggStage(std::unique_ptr<PlanStage> input, bool optimizedClose, boost::optional<value::SlotId> collatorSlot, bool allowDiskUse, - PlanNodeId planNodeId) - : PlanStage("group"_sd, planNodeId), + PlanNodeId planNodeId, + bool participateInTrialRunTracking) + : PlanStage("group"_sd, planNodeId, participateInTrialRunTracking), _gbs(std::move(gbs)), _aggs(std::move(aggs)), _collatorSlot(collatorSlot), @@ -74,7 +75,8 @@ std::unique_ptr<PlanStage> HashAggStage::clone() const { _optimizedClose, _collatorSlot, _allowDiskUse, - _commonStats.nodeId); + _commonStats.nodeId, + _participateInTrialRunTracking); } void HashAggStage::doSaveState(bool relinquishCursor) { diff --git a/src/mongo/db/exec/sbe/stages/hash_agg.h b/src/mongo/db/exec/sbe/stages/hash_agg.h index 19fbca9d1c7..d200c4b9c3d 100644 --- a/src/mongo/db/exec/sbe/stages/hash_agg.h +++ b/src/mongo/db/exec/sbe/stages/hash_agg.h @@ -75,7 +75,8 @@ public: bool optimizedClose, boost::optional<value::SlotId> collatorSlot, bool allowDiskUse, - PlanNodeId planNodeId); + PlanNodeId planNodeId, + bool participateInTrialRunTracking = true); std::unique_ptr<PlanStage> clone() const final; diff --git a/src/mongo/db/exec/sbe/stages/hash_join.cpp b/src/mongo/db/exec/sbe/stages/hash_join.cpp index 86675029c0e..bad53262acb 100644 --- a/src/mongo/db/exec/sbe/stages/hash_join.cpp +++ b/src/mongo/db/exec/sbe/stages/hash_join.cpp @@ -44,8 +44,9 @@ HashJoinStage::HashJoinStage(std::unique_ptr<PlanStage> outer, value::SlotVector innerCond, value::SlotVector innerProjects, boost::optional<value::SlotId> collatorSlot, - PlanNodeId planNodeId) - : PlanStage("hj"_sd, planNodeId), + PlanNodeId planNodeId, + bool participateInTrialRunTracking) + : PlanStage("hj"_sd, planNodeId, participateInTrialRunTracking), _outerCond(std::move(outerCond)), _outerProjects(std::move(outerProjects)), _innerCond(std::move(innerCond)), @@ -68,7 +69,8 @@ std::unique_ptr<PlanStage> HashJoinStage::clone() const { _innerCond, _innerProjects, _collatorSlot, - _commonStats.nodeId); + _commonStats.nodeId, + _participateInTrialRunTracking); } void HashJoinStage::prepare(CompileCtx& ctx) { diff --git a/src/mongo/db/exec/sbe/stages/hash_join.h b/src/mongo/db/exec/sbe/stages/hash_join.h index ed4781116d9..a3997074db0 100644 --- a/src/mongo/db/exec/sbe/stages/hash_join.h +++ b/src/mongo/db/exec/sbe/stages/hash_join.h @@ -66,7 +66,8 @@ public: value::SlotVector innerCond, value::SlotVector innerProjects, boost::optional<value::SlotId> collatorSlot, - PlanNodeId planNodeId); + PlanNodeId planNodeId, + bool participateInTrialRunTracking = true); std::unique_ptr<PlanStage> clone() const final; diff --git a/src/mongo/db/exec/sbe/stages/hash_lookup.cpp b/src/mongo/db/exec/sbe/stages/hash_lookup.cpp index a65f2f8bd89..16e61d68630 100644 --- a/src/mongo/db/exec/sbe/stages/hash_lookup.cpp +++ b/src/mongo/db/exec/sbe/stages/hash_lookup.cpp @@ -47,8 +47,9 @@ HashLookupStage::HashLookupStage(std::unique_ptr<PlanStage> outer, value::SlotVector innerProjects, value::SlotMap<std::unique_ptr<EExpression>> innerAggs, boost::optional<value::SlotId> collatorSlot, - PlanNodeId planNodeId) - : PlanStage("hash_lookup"_sd, planNodeId), + PlanNodeId planNodeId, + bool participateInTrialRunTracking) + : PlanStage("hash_lookup"_sd, planNodeId, participateInTrialRunTracking), _outerCond(outerCond), _innerCond(innerCond), _innerProjects(innerProjects), @@ -72,7 +73,8 @@ std::unique_ptr<PlanStage> HashLookupStage::clone() const { _innerProjects, std::move(innerAggs), _collatorSlot, - _commonStats.nodeId); + _commonStats.nodeId, + _participateInTrialRunTracking); } void HashLookupStage::prepare(CompileCtx& ctx) { diff --git a/src/mongo/db/exec/sbe/stages/hash_lookup.h b/src/mongo/db/exec/sbe/stages/hash_lookup.h index 2e3f0b34816..611c5603606 100644 --- a/src/mongo/db/exec/sbe/stages/hash_lookup.h +++ b/src/mongo/db/exec/sbe/stages/hash_lookup.h @@ -86,7 +86,8 @@ public: value::SlotVector innerProjects, value::SlotMap<std::unique_ptr<EExpression>> innerAggs, boost::optional<value::SlotId> collatorSlot, - PlanNodeId planNodeId); + PlanNodeId planNodeId, + bool participateInTrialRunTracking = true); std::unique_ptr<PlanStage> clone() const final; diff --git a/src/mongo/db/exec/sbe/stages/ix_scan.cpp b/src/mongo/db/exec/sbe/stages/ix_scan.cpp index bfad6d9a2ae..2029ac4d356 100644 --- a/src/mongo/db/exec/sbe/stages/ix_scan.cpp +++ b/src/mongo/db/exec/sbe/stages/ix_scan.cpp @@ -50,8 +50,12 @@ IndexScanStage::IndexScanStage(UUID collUuid, boost::optional<value::SlotId> seekKeySlotLow, boost::optional<value::SlotId> seekKeySlotHigh, PlanYieldPolicy* yieldPolicy, - PlanNodeId nodeId) - : PlanStage(seekKeySlotLow ? "ixseek"_sd : "ixscan"_sd, yieldPolicy, nodeId), + PlanNodeId nodeId, + bool participateInTrialRunTracking) + : PlanStage(seekKeySlotLow ? "ixseek"_sd : "ixscan"_sd, + yieldPolicy, + nodeId, + participateInTrialRunTracking), _collUuid(collUuid), _indexName(indexName), _forward(forward), @@ -81,7 +85,8 @@ std::unique_ptr<PlanStage> IndexScanStage::clone() const { _seekKeySlotLow, _seekKeySlotHigh, _yieldPolicy, - _commonStats.nodeId); + _commonStats.nodeId, + _participateInTrialRunTracking); } void IndexScanStage::prepare(CompileCtx& ctx) { diff --git a/src/mongo/db/exec/sbe/stages/ix_scan.h b/src/mongo/db/exec/sbe/stages/ix_scan.h index ce00ef17128..da61cb544ec 100644 --- a/src/mongo/db/exec/sbe/stages/ix_scan.h +++ b/src/mongo/db/exec/sbe/stages/ix_scan.h @@ -83,7 +83,8 @@ public: boost::optional<value::SlotId> seekKeySlotLow, boost::optional<value::SlotId> seekKeySlotHigh, PlanYieldPolicy* yieldPolicy, - PlanNodeId nodeId); + PlanNodeId planNodeId, + bool participateInTrialRunTracking = true); std::unique_ptr<PlanStage> clone() const final; diff --git a/src/mongo/db/exec/sbe/stages/limit_skip.cpp b/src/mongo/db/exec/sbe/stages/limit_skip.cpp index 359355582ac..8343f56ca96 100644 --- a/src/mongo/db/exec/sbe/stages/limit_skip.cpp +++ b/src/mongo/db/exec/sbe/stages/limit_skip.cpp @@ -37,8 +37,9 @@ namespace mongo::sbe { LimitSkipStage::LimitSkipStage(std::unique_ptr<PlanStage> input, boost::optional<long long> limit, boost::optional<long long> skip, - PlanNodeId planNodeId) - : PlanStage(!skip ? "limit"_sd : "limitskip"_sd, planNodeId), + PlanNodeId planNodeId, + bool participateInTrialRunTracking) + : PlanStage(!skip ? "limit"_sd : "limitskip"_sd, planNodeId, participateInTrialRunTracking), _limit(limit), _skip(skip), _current(0), @@ -51,7 +52,7 @@ LimitSkipStage::LimitSkipStage(std::unique_ptr<PlanStage> input, std::unique_ptr<PlanStage> LimitSkipStage::clone() const { return std::make_unique<LimitSkipStage>( - _children[0]->clone(), _limit, _skip, _commonStats.nodeId); + _children[0]->clone(), _limit, _skip, _commonStats.nodeId, _participateInTrialRunTracking); } void LimitSkipStage::prepare(CompileCtx& ctx) { diff --git a/src/mongo/db/exec/sbe/stages/limit_skip.h b/src/mongo/db/exec/sbe/stages/limit_skip.h index f0f62b34239..7fc366a2174 100644 --- a/src/mongo/db/exec/sbe/stages/limit_skip.h +++ b/src/mongo/db/exec/sbe/stages/limit_skip.h @@ -50,7 +50,8 @@ public: LimitSkipStage(std::unique_ptr<PlanStage> input, boost::optional<long long> limit, boost::optional<long long> skip, - PlanNodeId planNodeId); + PlanNodeId planNodeId, + bool participateInTrialRunTracking = true); std::unique_ptr<PlanStage> clone() const final; diff --git a/src/mongo/db/exec/sbe/stages/loop_join.cpp b/src/mongo/db/exec/sbe/stages/loop_join.cpp index 6c49f2e700a..3df5e179a09 100644 --- a/src/mongo/db/exec/sbe/stages/loop_join.cpp +++ b/src/mongo/db/exec/sbe/stages/loop_join.cpp @@ -41,7 +41,8 @@ LoopJoinStage::LoopJoinStage(std::unique_ptr<PlanStage> outer, value::SlotVector outerProjects, value::SlotVector outerCorrelated, std::unique_ptr<EExpression> predicate, - PlanNodeId nodeId) + PlanNodeId nodeId, + bool participateInTrialRunTracking) : LoopJoinStage(std::move(outer), std::move(inner), std::move(outerProjects), @@ -49,7 +50,8 @@ LoopJoinStage::LoopJoinStage(std::unique_ptr<PlanStage> outer, value::SlotVector{}, std::move(predicate), JoinType::Inner, - nodeId) {} + nodeId, + participateInTrialRunTracking) {} LoopJoinStage::LoopJoinStage(std::unique_ptr<PlanStage> outer, std::unique_ptr<PlanStage> inner, @@ -58,8 +60,9 @@ LoopJoinStage::LoopJoinStage(std::unique_ptr<PlanStage> outer, value::SlotVector innerProjects, std::unique_ptr<EExpression> predicate, JoinType joinType, - PlanNodeId nodeId) - : PlanStage("nlj"_sd, nodeId), + PlanNodeId nodeId, + bool participateInTrialRunTracking) + : PlanStage("nlj"_sd, nodeId, participateInTrialRunTracking), _outerProjects(std::move(outerProjects)), _outerCorrelated(std::move(outerCorrelated)), _innerProjects(std::move(innerProjects)), @@ -80,7 +83,8 @@ std::unique_ptr<PlanStage> LoopJoinStage::clone() const { _innerProjects, _predicate ? _predicate->clone() : nullptr, _joinType, - _commonStats.nodeId); + _commonStats.nodeId, + _participateInTrialRunTracking); } void LoopJoinStage::prepare(CompileCtx& ctx) { diff --git a/src/mongo/db/exec/sbe/stages/loop_join.h b/src/mongo/db/exec/sbe/stages/loop_join.h index 076655bca4c..c69010071fd 100644 --- a/src/mongo/db/exec/sbe/stages/loop_join.h +++ b/src/mongo/db/exec/sbe/stages/loop_join.h @@ -63,7 +63,8 @@ public: value::SlotVector outerProjects, value::SlotVector outerCorrelated, std::unique_ptr<EExpression> predicate, - PlanNodeId nodeId); + PlanNodeId planNodeId, + bool participateInTrialRunTracking = true); LoopJoinStage(std::unique_ptr<PlanStage> outer, std::unique_ptr<PlanStage> inner, @@ -72,7 +73,8 @@ public: value::SlotVector innerProjects, std::unique_ptr<EExpression> predicate, JoinType joinType, - PlanNodeId nodeId); + PlanNodeId planNodeId, + bool participateInTrialRunTracking = true); std::unique_ptr<PlanStage> clone() const final; diff --git a/src/mongo/db/exec/sbe/stages/makeobj.cpp b/src/mongo/db/exec/sbe/stages/makeobj.cpp index cefd83d0035..0c84fde3083 100644 --- a/src/mongo/db/exec/sbe/stages/makeobj.cpp +++ b/src/mongo/db/exec/sbe/stages/makeobj.cpp @@ -46,8 +46,11 @@ MakeObjStageBase<O>::MakeObjStageBase(std::unique_ptr<PlanStage> input, value::SlotVector projectVars, bool forceNewObject, bool returnOldObject, - PlanNodeId planNodeId) - : PlanStage(O == MakeObjOutputType::object ? "mkobj"_sd : "mkbson"_sd, planNodeId), + PlanNodeId planNodeId, + bool participateInTrialRunTracking) + : PlanStage(O == MakeObjOutputType::object ? "mkobj"_sd : "mkbson"_sd, + planNodeId, + participateInTrialRunTracking), _objSlot(objSlot), _rootSlot(rootSlot), _fieldBehavior(fieldBehavior), @@ -95,7 +98,8 @@ std::unique_ptr<PlanStage> MakeObjStageBase<O>::clone() const { _projectVars, _forceNewObject, _returnOldObject, - _commonStats.nodeId); + _commonStats.nodeId, + _participateInTrialRunTracking); } template <MakeObjOutputType O> diff --git a/src/mongo/db/exec/sbe/stages/makeobj.h b/src/mongo/db/exec/sbe/stages/makeobj.h index 1f2dc183d97..3034470b95a 100644 --- a/src/mongo/db/exec/sbe/stages/makeobj.h +++ b/src/mongo/db/exec/sbe/stages/makeobj.h @@ -87,7 +87,8 @@ public: value::SlotVector projectVars, bool forceNewObject, bool returnOldObject, - PlanNodeId planNodeId); + PlanNodeId planNodeId, + bool participateInTrialRunTracking = true); /** * A convenience constructor that takes a set instead of a vector for 'fields' and diff --git a/src/mongo/db/exec/sbe/stages/merge_join.cpp b/src/mongo/db/exec/sbe/stages/merge_join.cpp index 170227e0575..d6f03af7502 100644 --- a/src/mongo/db/exec/sbe/stages/merge_join.cpp +++ b/src/mongo/db/exec/sbe/stages/merge_join.cpp @@ -76,8 +76,9 @@ MergeJoinStage::MergeJoinStage(std::unique_ptr<PlanStage> outer, value::SlotVector innerKeys, value::SlotVector innerProjects, std::vector<value::SortDirection> sortDirs, - PlanNodeId planNodeId) - : PlanStage("mj"_sd, planNodeId), + PlanNodeId planNodeId, + bool participateInTrialRunTracking) + : PlanStage("mj"_sd, planNodeId, participateInTrialRunTracking), _outerKeys(std::move(outerKeys)), _outerProjects(std::move(outerProjects)), _innerKeys(std::move(innerKeys)), @@ -104,7 +105,8 @@ std::unique_ptr<PlanStage> MergeJoinStage::clone() const { _innerKeys, _innerProjects, _dirs, - _commonStats.nodeId); + _commonStats.nodeId, + _participateInTrialRunTracking); } void MergeJoinStage::prepare(CompileCtx& ctx) { diff --git a/src/mongo/db/exec/sbe/stages/merge_join.h b/src/mongo/db/exec/sbe/stages/merge_join.h index b0f61cd677c..ff94784ac0d 100644 --- a/src/mongo/db/exec/sbe/stages/merge_join.h +++ b/src/mongo/db/exec/sbe/stages/merge_join.h @@ -62,7 +62,8 @@ public: value::SlotVector innerKeys, value::SlotVector innerProjects, std::vector<value::SortDirection> sortDirs, - PlanNodeId planNodeId); + PlanNodeId planNodeId, + bool participateInTrialRunTracking = true); std::unique_ptr<PlanStage> clone() const final; diff --git a/src/mongo/db/exec/sbe/stages/project.cpp b/src/mongo/db/exec/sbe/stages/project.cpp index 736110bc83a..c534c5c8cdc 100644 --- a/src/mongo/db/exec/sbe/stages/project.cpp +++ b/src/mongo/db/exec/sbe/stages/project.cpp @@ -37,8 +37,10 @@ namespace mongo { namespace sbe { ProjectStage::ProjectStage(std::unique_ptr<PlanStage> input, value::SlotMap<std::unique_ptr<EExpression>> projects, - PlanNodeId nodeId) - : PlanStage("project"_sd, nodeId), _projects(std::move(projects)) { + PlanNodeId nodeId, + bool participateInTrialRunTracking) + : PlanStage("project"_sd, nodeId, participateInTrialRunTracking), + _projects(std::move(projects)) { _children.emplace_back(std::move(input)); } @@ -47,8 +49,10 @@ std::unique_ptr<PlanStage> ProjectStage::clone() const { for (auto& [k, v] : _projects) { projects.emplace(k, v->clone()); } - return std::make_unique<ProjectStage>( - _children[0]->clone(), std::move(projects), _commonStats.nodeId); + return std::make_unique<ProjectStage>(_children[0]->clone(), + std::move(projects), + _commonStats.nodeId, + _participateInTrialRunTracking); } void ProjectStage::prepare(CompileCtx& ctx) { diff --git a/src/mongo/db/exec/sbe/stages/project.h b/src/mongo/db/exec/sbe/stages/project.h index 1754dd7d2a9..bf4e169c8c9 100644 --- a/src/mongo/db/exec/sbe/stages/project.h +++ b/src/mongo/db/exec/sbe/stages/project.h @@ -47,7 +47,8 @@ class ProjectStage final : public PlanStage { public: ProjectStage(std::unique_ptr<PlanStage> input, value::SlotMap<std::unique_ptr<EExpression>> projects, - PlanNodeId nodeId); + PlanNodeId planNodeId, + bool participateInTrialRunTracking = true); std::unique_ptr<PlanStage> clone() const final; diff --git a/src/mongo/db/exec/sbe/stages/scan.cpp b/src/mongo/db/exec/sbe/stages/scan.cpp index 678d3f84ef9..fbbc3a9ae0d 100644 --- a/src/mongo/db/exec/sbe/stages/scan.cpp +++ b/src/mongo/db/exec/sbe/stages/scan.cpp @@ -56,8 +56,10 @@ ScanStage::ScanStage(UUID collectionUuid, PlanYieldPolicy* yieldPolicy, PlanNodeId nodeId, ScanCallbacks scanCallbacks, - bool useRandomCursor) - : PlanStage(seekKeySlot ? "seek"_sd : "scan"_sd, yieldPolicy, nodeId), + bool useRandomCursor, + bool participateInTrialRunTracking) + : PlanStage( + seekKeySlot ? "seek"_sd : "scan"_sd, yieldPolicy, nodeId, participateInTrialRunTracking), _collUuid(collectionUuid), _recordSlot(recordSlot), _recordIdSlot(recordIdSlot), @@ -98,7 +100,9 @@ std::unique_ptr<PlanStage> ScanStage::clone() const { _forward, _yieldPolicy, _commonStats.nodeId, - _scanCallbacks); + _scanCallbacks, + _useRandomCursor, + _participateInTrialRunTracking); } void ScanStage::prepare(CompileCtx& ctx) { @@ -592,8 +596,9 @@ ParallelScanStage::ParallelScanStage(UUID collectionUuid, value::SlotVector vars, PlanYieldPolicy* yieldPolicy, PlanNodeId nodeId, - ScanCallbacks callbacks) - : PlanStage("pscan"_sd, yieldPolicy, nodeId), + ScanCallbacks callbacks, + bool participateInTrialRunTracking) + : PlanStage("pscan"_sd, yieldPolicy, nodeId, participateInTrialRunTracking), _collUuid(collectionUuid), _recordSlot(recordSlot), _recordIdSlot(recordIdSlot), @@ -621,8 +626,9 @@ ParallelScanStage::ParallelScanStage(const std::shared_ptr<ParallelState>& state value::SlotVector vars, PlanYieldPolicy* yieldPolicy, PlanNodeId nodeId, - ScanCallbacks callbacks) - : PlanStage("pscan"_sd, yieldPolicy, nodeId), + ScanCallbacks callbacks, + bool participateInTrialRunTracking) + : PlanStage("pscan"_sd, yieldPolicy, nodeId, participateInTrialRunTracking), _collUuid(collectionUuid), _recordSlot(recordSlot), _recordIdSlot(recordIdSlot), @@ -650,7 +656,8 @@ std::unique_ptr<PlanStage> ParallelScanStage::clone() const { _vars, _yieldPolicy, _commonStats.nodeId, - _scanCallbacks); + _scanCallbacks, + _participateInTrialRunTracking); } void ParallelScanStage::prepare(CompileCtx& ctx) { diff --git a/src/mongo/db/exec/sbe/stages/scan.h b/src/mongo/db/exec/sbe/stages/scan.h index 37462ac5e14..ed138f6302e 100644 --- a/src/mongo/db/exec/sbe/stages/scan.h +++ b/src/mongo/db/exec/sbe/stages/scan.h @@ -108,7 +108,8 @@ public: PlanYieldPolicy* yieldPolicy, PlanNodeId nodeId, ScanCallbacks scanCallbacks, - bool useRandomCursor = false); + bool useRandomCursor = false, + bool participateInTrialRunTracking = true); std::unique_ptr<PlanStage> clone() const final; @@ -227,7 +228,8 @@ public: value::SlotVector vars, PlanYieldPolicy* yieldPolicy, PlanNodeId nodeId, - ScanCallbacks callbacks); + ScanCallbacks callbacks, + bool participateInTrialRunTracking = true); ParallelScanStage(const std::shared_ptr<ParallelState>& state, const UUID& collectionUuid, @@ -241,7 +243,8 @@ public: value::SlotVector vars, PlanYieldPolicy* yieldPolicy, PlanNodeId nodeId, - ScanCallbacks callbacks); + ScanCallbacks callbacks, + bool participateInTrialRunTracking = true); std::unique_ptr<PlanStage> clone() const final; diff --git a/src/mongo/db/exec/sbe/stages/sort.cpp b/src/mongo/db/exec/sbe/stages/sort.cpp index 5acf73afe8d..0968b0bea68 100644 --- a/src/mongo/db/exec/sbe/stages/sort.cpp +++ b/src/mongo/db/exec/sbe/stages/sort.cpp @@ -55,8 +55,9 @@ SortStage::SortStage(std::unique_ptr<PlanStage> input, size_t limit, size_t memoryLimit, bool allowDiskUse, - PlanNodeId planNodeId) - : PlanStage("sort"_sd, planNodeId), + PlanNodeId planNodeId, + bool participateInTrialRunTracking) + : PlanStage("sort"_sd, planNodeId, participateInTrialRunTracking), _obs(std::move(obs)), _dirs(std::move(dirs)), _vals(std::move(vals)), @@ -80,7 +81,8 @@ std::unique_ptr<PlanStage> SortStage::clone() const { _specificStats.limit, _specificStats.maxMemoryUsageBytes, _allowDiskUse, - _commonStats.nodeId); + _commonStats.nodeId, + _participateInTrialRunTracking); } void SortStage::prepare(CompileCtx& ctx) { diff --git a/src/mongo/db/exec/sbe/stages/sort.h b/src/mongo/db/exec/sbe/stages/sort.h index 2bfc9e1d9fb..dda9716b75b 100644 --- a/src/mongo/db/exec/sbe/stages/sort.h +++ b/src/mongo/db/exec/sbe/stages/sort.h @@ -70,7 +70,8 @@ public: size_t limit, size_t memoryLimit, bool allowDiskUse, - PlanNodeId planNodeId); + PlanNodeId planNodeId, + bool participateInTrialRunTracking = true); ~SortStage(); diff --git a/src/mongo/db/exec/sbe/stages/sorted_merge.cpp b/src/mongo/db/exec/sbe/stages/sorted_merge.cpp index f0a648f38ad..39cee407a00 100644 --- a/src/mongo/db/exec/sbe/stages/sorted_merge.cpp +++ b/src/mongo/db/exec/sbe/stages/sorted_merge.cpp @@ -41,8 +41,9 @@ SortedMergeStage::SortedMergeStage(PlanStage::Vector inputStages, std::vector<value::SortDirection> dirs, std::vector<value::SlotVector> inputVals, value::SlotVector outputVals, - PlanNodeId planNodeId) - : PlanStage("smerge"_sd, planNodeId), + PlanNodeId planNodeId, + bool participateInTrialRunTracking) + : PlanStage("smerge"_sd, planNodeId, participateInTrialRunTracking), _inputKeys(std::move(inputKeys)), _dirs(std::move(dirs)), _inputVals(std::move(inputVals)), @@ -69,8 +70,13 @@ std::unique_ptr<PlanStage> SortedMergeStage::clone() const { for (auto& child : _children) { inputStages.emplace_back(child->clone()); } - return std::make_unique<SortedMergeStage>( - std::move(inputStages), _inputKeys, _dirs, _inputVals, _outputVals, _commonStats.nodeId); + return std::make_unique<SortedMergeStage>(std::move(inputStages), + _inputKeys, + _dirs, + _inputVals, + _outputVals, + _commonStats.nodeId, + _participateInTrialRunTracking); } void SortedMergeStage::prepare(CompileCtx& ctx) { diff --git a/src/mongo/db/exec/sbe/stages/sorted_merge.h b/src/mongo/db/exec/sbe/stages/sorted_merge.h index 3b87e4c8849..436ddfce080 100644 --- a/src/mongo/db/exec/sbe/stages/sorted_merge.h +++ b/src/mongo/db/exec/sbe/stages/sorted_merge.h @@ -61,7 +61,8 @@ public: // Each element of 'inputVals' must be the same size as 'outputVals'. std::vector<value::SlotVector> inputVals, value::SlotVector outputVals, - PlanNodeId planNodeId); + PlanNodeId planNodeId, + bool participateInTrialRunTracking = true); std::unique_ptr<PlanStage> clone() const final; diff --git a/src/mongo/db/exec/sbe/stages/spool.cpp b/src/mongo/db/exec/sbe/stages/spool.cpp index 4550f569b09..47ca744962c 100644 --- a/src/mongo/db/exec/sbe/stages/spool.cpp +++ b/src/mongo/db/exec/sbe/stages/spool.cpp @@ -35,14 +35,20 @@ namespace mongo::sbe { SpoolEagerProducerStage::SpoolEagerProducerStage(std::unique_ptr<PlanStage> input, SpoolId spoolId, value::SlotVector vals, - PlanNodeId planNodeId) - : PlanStage{"espool"_sd, planNodeId}, _spoolId{spoolId}, _vals{std::move(vals)} { + PlanNodeId planNodeId, + bool participateInTrialRunTracking) + : PlanStage{"espool"_sd, planNodeId, participateInTrialRunTracking}, + _spoolId{spoolId}, + _vals{std::move(vals)} { _children.emplace_back(std::move(input)); } std::unique_ptr<PlanStage> SpoolEagerProducerStage::clone() const { - return std::make_unique<SpoolEagerProducerStage>( - _children[0]->clone(), _spoolId, _vals, _commonStats.nodeId); + return std::make_unique<SpoolEagerProducerStage>(_children[0]->clone(), + _spoolId, + _vals, + _commonStats.nodeId, + _participateInTrialRunTracking); } void SpoolEagerProducerStage::prepare(CompileCtx& ctx) { @@ -171,8 +177,9 @@ SpoolLazyProducerStage::SpoolLazyProducerStage(std::unique_ptr<PlanStage> input, SpoolId spoolId, value::SlotVector vals, std::unique_ptr<EExpression> predicate, - PlanNodeId planNodeId) - : PlanStage{"lspool"_sd, planNodeId}, + PlanNodeId planNodeId, + bool participateInTrialRunTracking) + : PlanStage{"lspool"_sd, planNodeId, participateInTrialRunTracking}, _spoolId{spoolId}, _vals{std::move(vals)}, _predicate{std::move(predicate)} { @@ -180,8 +187,12 @@ SpoolLazyProducerStage::SpoolLazyProducerStage(std::unique_ptr<PlanStage> input, } std::unique_ptr<PlanStage> SpoolLazyProducerStage::clone() const { - return std::make_unique<SpoolLazyProducerStage>( - _children[0]->clone(), _spoolId, _vals, _predicate->clone(), _commonStats.nodeId); + return std::make_unique<SpoolLazyProducerStage>(_children[0]->clone(), + _spoolId, + _vals, + _predicate->clone(), + _commonStats.nodeId, + _participateInTrialRunTracking); } void SpoolLazyProducerStage::prepare(CompileCtx& ctx) { diff --git a/src/mongo/db/exec/sbe/stages/spool.h b/src/mongo/db/exec/sbe/stages/spool.h index a2dd6f81657..09a453e0e0e 100644 --- a/src/mongo/db/exec/sbe/stages/spool.h +++ b/src/mongo/db/exec/sbe/stages/spool.h @@ -56,7 +56,8 @@ public: SpoolEagerProducerStage(std::unique_ptr<PlanStage> input, SpoolId spoolId, value::SlotVector vals, - PlanNodeId planNodeId); + PlanNodeId planNodeId, + bool participateInTrialRunTracking = true); std::unique_ptr<PlanStage> clone() const final; @@ -109,7 +110,8 @@ public: SpoolId spoolId, value::SlotVector vals, std::unique_ptr<EExpression> predicate, - PlanNodeId planNodeId); + PlanNodeId planNodeId, + bool participateInTrialRunTracking = true); std::unique_ptr<PlanStage> clone() const final; @@ -165,13 +167,17 @@ private: template <bool IsStack> class SpoolConsumerStage final : public PlanStage { public: - SpoolConsumerStage(SpoolId spoolId, value::SlotVector vals, PlanNodeId planNodeId) - : PlanStage{IsStack ? "sspool"_sd : "cspool"_sd, planNodeId}, + SpoolConsumerStage(SpoolId spoolId, + value::SlotVector vals, + PlanNodeId planNodeId, + bool participateInTrialRunTracking = true) + : PlanStage{IsStack ? "sspool"_sd : "cspool"_sd, planNodeId, participateInTrialRunTracking}, _spoolId{spoolId}, _vals{std::move(vals)} {} std::unique_ptr<PlanStage> clone() const { - return std::make_unique<SpoolConsumerStage<IsStack>>(_spoolId, _vals, _commonStats.nodeId); + return std::make_unique<SpoolConsumerStage<IsStack>>( + _spoolId, _vals, _commonStats.nodeId, _participateInTrialRunTracking); } void prepare(CompileCtx& ctx) { diff --git a/src/mongo/db/exec/sbe/stages/stages.h b/src/mongo/db/exec/sbe/stages/stages.h index 59f6746a005..02dd6ae62fb 100644 --- a/src/mongo/db/exec/sbe/stages/stages.h +++ b/src/mongo/db/exec/sbe/stages/stages.h @@ -254,7 +254,9 @@ protected: template <typename T> class CanTrackStats { public: - CanTrackStats(StringData stageType, PlanNodeId nodeId) : _commonStats(stageType, nodeId) {} + CanTrackStats(StringData stageType, PlanNodeId nodeId, bool participateInTrialRunTracking) + : _commonStats(stageType, nodeId), + _participateInTrialRunTracking(participateInTrialRunTracking) {} /** * Returns a tree of stats. If the stage has any children it must propagate the request for @@ -414,6 +416,12 @@ protected: CommonStats _commonStats; + // Flag which determines whether this node and its children can participate in trial run + // tracking. A stage and its children are not eligible for trial run tracking when they are + // planned deterministically (that is, the amount of work they perform is independent of + // other parts of the tree which are multiplanned). + bool _participateInTrialRunTracking{true}; + private: /** * In general, accessors can be accessed only after getNext returns a row. It is most definitely @@ -422,14 +430,6 @@ private: * that feature is retired we can then simply revisit all stages and simplify them. */ bool _slotsAccessible{false}; - - /** - * Flag which determines whether this node and its children can participate in trial run - * tracking. A stage and its children are not eligible for trial run tracking when they are - * planned deterministically (that is, the amount of work they perform is independent of - * other parts of the tree which are multiplanned). - */ - bool _participateInTrialRunTracking{true}; }; /** @@ -496,10 +496,15 @@ class PlanStage : public CanSwitchOperationContext<PlanStage>, public: using Vector = absl::InlinedVector<std::unique_ptr<PlanStage>, 2>; - PlanStage(StringData stageType, PlanYieldPolicy* yieldPolicy, PlanNodeId nodeId) - : CanTrackStats{stageType, nodeId}, CanInterrupt{yieldPolicy} {} + PlanStage(StringData stageType, + PlanYieldPolicy* yieldPolicy, + PlanNodeId nodeId, + bool participateInTrialRunTracking) + : CanTrackStats{stageType, nodeId, participateInTrialRunTracking}, + CanInterrupt{yieldPolicy} {} - PlanStage(StringData stageType, PlanNodeId nodeId) : PlanStage(stageType, nullptr, nodeId) {} + PlanStage(StringData stageType, PlanNodeId nodeId, bool participateInTrialRunTracking) + : PlanStage(stageType, nullptr, nodeId, participateInTrialRunTracking) {} virtual ~PlanStage() = default; diff --git a/src/mongo/db/exec/sbe/stages/traverse.cpp b/src/mongo/db/exec/sbe/stages/traverse.cpp index d1e0a040b3e..654a1a160fa 100644 --- a/src/mongo/db/exec/sbe/stages/traverse.cpp +++ b/src/mongo/db/exec/sbe/stages/traverse.cpp @@ -42,8 +42,9 @@ TraverseStage::TraverseStage(std::unique_ptr<PlanStage> outer, std::unique_ptr<EExpression> foldExpr, std::unique_ptr<EExpression> finalExpr, PlanNodeId planNodeId, - boost::optional<size_t> nestedArraysDepth) - : PlanStage("traverse"_sd, planNodeId), + boost::optional<size_t> nestedArraysDepth, + bool participateInTrialRunTracking) + : PlanStage("traverse"_sd, planNodeId, participateInTrialRunTracking), _inField(inField), _outField(outField), _outFieldInner(outFieldInner), @@ -69,7 +70,8 @@ std::unique_ptr<PlanStage> TraverseStage::clone() const { _fold ? _fold->clone() : nullptr, _final ? _final->clone() : nullptr, _commonStats.nodeId, - _nestedArraysDepth); + _nestedArraysDepth, + _participateInTrialRunTracking); } void TraverseStage::prepare(CompileCtx& ctx) { diff --git a/src/mongo/db/exec/sbe/stages/traverse.h b/src/mongo/db/exec/sbe/stages/traverse.h index 2b3fee33a47..09e5dc3dfcf 100644 --- a/src/mongo/db/exec/sbe/stages/traverse.h +++ b/src/mongo/db/exec/sbe/stages/traverse.h @@ -74,7 +74,8 @@ public: std::unique_ptr<EExpression> foldExpr, std::unique_ptr<EExpression> finalExpr, PlanNodeId planNodeId, - boost::optional<size_t> nestedArraysDepth); + boost::optional<size_t> nestedArraysDepth, + bool participateInTrialRunTracking = true); std::unique_ptr<PlanStage> clone() const final; diff --git a/src/mongo/db/exec/sbe/stages/union.cpp b/src/mongo/db/exec/sbe/stages/union.cpp index a661e6c579f..2fd6d0b4fc5 100644 --- a/src/mongo/db/exec/sbe/stages/union.cpp +++ b/src/mongo/db/exec/sbe/stages/union.cpp @@ -38,8 +38,9 @@ namespace mongo::sbe { UnionStage::UnionStage(PlanStage::Vector inputStages, std::vector<value::SlotVector> inputVals, value::SlotVector outputVals, - PlanNodeId planNodeId) - : PlanStage("union"_sd, planNodeId), + PlanNodeId planNodeId, + bool participateInTrialRunTracking) + : PlanStage("union"_sd, planNodeId, participateInTrialRunTracking), _inputVals{std::move(inputVals)}, _outputVals{std::move(outputVals)} { _children = std::move(inputStages); @@ -57,8 +58,11 @@ std::unique_ptr<PlanStage> UnionStage::clone() const { for (auto& child : _children) { inputStages.emplace_back(child->clone()); } - return std::make_unique<UnionStage>( - std::move(inputStages), _inputVals, _outputVals, _commonStats.nodeId); + return std::make_unique<UnionStage>(std::move(inputStages), + _inputVals, + _outputVals, + _commonStats.nodeId, + _participateInTrialRunTracking); } void UnionStage::prepare(CompileCtx& ctx) { diff --git a/src/mongo/db/exec/sbe/stages/union.h b/src/mongo/db/exec/sbe/stages/union.h index 2ec0ec73df9..b21d5e6caf5 100644 --- a/src/mongo/db/exec/sbe/stages/union.h +++ b/src/mongo/db/exec/sbe/stages/union.h @@ -53,7 +53,8 @@ public: UnionStage(PlanStage::Vector inputStages, std::vector<value::SlotVector> inputVals, value::SlotVector outputVals, - PlanNodeId planNodeId); + PlanNodeId planNodeId, + bool participateInTrialRunTracking = true); std::unique_ptr<PlanStage> clone() const final; diff --git a/src/mongo/db/exec/sbe/stages/unique.cpp b/src/mongo/db/exec/sbe/stages/unique.cpp index 355927ff912..c88fa9ab43e 100644 --- a/src/mongo/db/exec/sbe/stages/unique.cpp +++ b/src/mongo/db/exec/sbe/stages/unique.cpp @@ -37,13 +37,15 @@ namespace mongo { namespace sbe { UniqueStage::UniqueStage(std::unique_ptr<PlanStage> input, value::SlotVector keys, - PlanNodeId planNodeId) - : PlanStage("unique"_sd, planNodeId), _keySlots(keys) { + PlanNodeId planNodeId, + bool participateInTrialRunTracking) + : PlanStage("unique"_sd, planNodeId, participateInTrialRunTracking), _keySlots(keys) { _children.emplace_back(std::move(input)); } std::unique_ptr<PlanStage> UniqueStage::clone() const { - return std::make_unique<UniqueStage>(_children[0]->clone(), _keySlots, _commonStats.nodeId); + return std::make_unique<UniqueStage>( + _children[0]->clone(), _keySlots, _commonStats.nodeId, _participateInTrialRunTracking); } void UniqueStage::prepare(CompileCtx& ctx) { diff --git a/src/mongo/db/exec/sbe/stages/unique.h b/src/mongo/db/exec/sbe/stages/unique.h index 1165743a0cc..c344cd09d24 100644 --- a/src/mongo/db/exec/sbe/stages/unique.h +++ b/src/mongo/db/exec/sbe/stages/unique.h @@ -53,7 +53,10 @@ namespace mongo::sbe { */ class UniqueStage final : public PlanStage { public: - UniqueStage(std::unique_ptr<PlanStage> input, value::SlotVector keys, PlanNodeId planNodeId); + UniqueStage(std::unique_ptr<PlanStage> input, + value::SlotVector keys, + PlanNodeId planNodeId, + bool participateInTrialRunTracking = true); std::unique_ptr<PlanStage> clone() const final; diff --git a/src/mongo/db/exec/sbe/stages/unwind.cpp b/src/mongo/db/exec/sbe/stages/unwind.cpp index b4c5e225adc..7ad10eecb23 100644 --- a/src/mongo/db/exec/sbe/stages/unwind.cpp +++ b/src/mongo/db/exec/sbe/stages/unwind.cpp @@ -40,8 +40,9 @@ UnwindStage::UnwindStage(std::unique_ptr<PlanStage> input, value::SlotId outField, value::SlotId outIndex, bool preserveNullAndEmptyArrays, - PlanNodeId planNodeId) - : PlanStage("unwind"_sd, planNodeId), + PlanNodeId planNodeId, + bool participateInTrialRunTracking) + : PlanStage("unwind"_sd, planNodeId, participateInTrialRunTracking), _inField(inField), _outField(outField), _outIndex(outIndex), @@ -59,7 +60,8 @@ std::unique_ptr<PlanStage> UnwindStage::clone() const { _outField, _outIndex, _preserveNullAndEmptyArrays, - _commonStats.nodeId); + _commonStats.nodeId, + _participateInTrialRunTracking); } void UnwindStage::prepare(CompileCtx& ctx) { diff --git a/src/mongo/db/exec/sbe/stages/unwind.h b/src/mongo/db/exec/sbe/stages/unwind.h index 049fee4a069..57b28d9c1cf 100644 --- a/src/mongo/db/exec/sbe/stages/unwind.h +++ b/src/mongo/db/exec/sbe/stages/unwind.h @@ -52,7 +52,8 @@ public: value::SlotId outField, value::SlotId outIndex, bool preserveNullAndEmptyArrays, - PlanNodeId planNodeId); + PlanNodeId planNodeId, + bool participateInTrialRunTracking = true); std::unique_ptr<PlanStage> clone() const final; diff --git a/src/mongo/db/pipeline/document_source_cursor.cpp b/src/mongo/db/pipeline/document_source_cursor.cpp index b98af917d99..c992288a0e4 100644 --- a/src/mongo/db/pipeline/document_source_cursor.cpp +++ b/src/mongo/db/pipeline/document_source_cursor.cpp @@ -33,6 +33,7 @@ #include "mongo/db/pipeline/document_source_cursor.h" #include "mongo/db/catalog/collection.h" +#include "mongo/db/db_raii.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/working_set_common.h" #include "mongo/db/query/collection_query_info.h" @@ -225,15 +226,20 @@ Value DocumentSourceCursor::serialize(boost::optional<ExplainOptions::Verbosity> { auto opCtx = pExpCtx->opCtx; - auto lockMode = getLockModeForQuery(opCtx, _exec->nss()); - AutoGetDb dbLock(opCtx, _exec->nss().db(), lockMode); - Lock::CollectionLock collLock(opCtx, _exec->nss(), lockMode); - auto collection = dbLock.getDb() - ? CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, _exec->nss()) - : nullptr; + auto secondaryNssList = _exec->getSecondaryNamespaces(); + AutoGetCollectionForReadMaybeLockFree readLock(opCtx, + _exec->nss(), + AutoGetCollectionViewMode::kViewsForbidden, + Date_t::max(), + secondaryNssList); + MultipleCollectionAccessor collections(opCtx, + &readLock.getCollection(), + readLock.getNss(), + readLock.isAnySecondaryNamespaceAViewOrSharded(), + secondaryNssList); Explain::explainStages(_exec.get(), - collection, + collections, verbosity.get(), _execStatus, _winningPlanTrialStats, diff --git a/src/mongo/db/pipeline/process_interface/common_mongod_process_interface.cpp b/src/mongo/db/pipeline/process_interface/common_mongod_process_interface.cpp index e0ac0211ef2..2fecb18bebe 100644 --- a/src/mongo/db/pipeline/process_interface/common_mongod_process_interface.cpp +++ b/src/mongo/db/pipeline/process_interface/common_mongod_process_interface.cpp @@ -559,7 +559,8 @@ std::vector<BSONObj> CommonMongodProcessInterface::getMatchingPlanCacheEntryStat collVersion = collQueryInfo.getPlanCacheInvalidatorVersion()]( const sbe::PlanCacheKey& key) { // Only fetch plan cache entries with keys matching given UUID and collectionVersion. - return uuid == key.getCollectionUuid() && collVersion == key.getCollectionVersion(); + return uuid == key.getMainCollectionState().uuid && + collVersion == key.getMainCollectionState().version; }; auto planCacheEntriesSBE = diff --git a/src/mongo/db/query/canonical_query.cpp b/src/mongo/db/query/canonical_query.cpp index c4d2de8fcb0..6449c5241fc 100644 --- a/src/mongo/db/query/canonical_query.cpp +++ b/src/mongo/db/query/canonical_query.cpp @@ -538,10 +538,11 @@ std::string CanonicalQuery::toStringShort() const { } CanonicalQuery::QueryShapeString CanonicalQuery::encodeKey() const { - // TODO SERVER-61507: remove '_pipeline.empty()' check. Canonical queries with pushed down - // $group/$lookup stages are not SBE-compatible until SERVER-61507 is complete. + // TODO SERVER-61507: remove 'canUseSbePlanCache' check. Canonical queries with pushed + // down $group stages are not compatible with the SBE plan cache until SERVER-61507 is complete. return (feature_flags::gFeatureFlagSbePlanCache.isEnabledAndIgnoreFCV() && - !_forceClassicEngine && _sbeCompatible && _pipeline.empty()) + !_forceClassicEngine && _sbeCompatible && + canonical_query_encoder::canUseSbePlanCache(*this)) ? canonical_query_encoder::encodeSBE(*this) : canonical_query_encoder::encode(*this); } diff --git a/src/mongo/db/query/canonical_query_encoder.cpp b/src/mongo/db/query/canonical_query_encoder.cpp index 2013c8a635e..11b1a99479a 100644 --- a/src/mongo/db/query/canonical_query_encoder.cpp +++ b/src/mongo/db/query/canonical_query_encoder.cpp @@ -40,6 +40,7 @@ #include "mongo/db/matcher/expression_text_noop.h" #include "mongo/db/matcher/expression_where.h" #include "mongo/db/matcher/expression_where_noop.h" +#include "mongo/db/pipeline/document_source_lookup.h" #include "mongo/db/query/analyze_regex.h" #include "mongo/db/query/projection.h" #include "mongo/db/query/query_feature_flags_gen.h" @@ -86,6 +87,7 @@ const char kEncodeProjectionRequirementSeparator = '-'; const char kEncodeRegexFlagsSeparator = '/'; const char kEncodeSortSection = '~'; const char kEncodeEngineSection = '@'; +const char kEncodePipelineSection = '^'; // These special bytes are used in the encoding of auto-parameterized match expressions in the SBE // plan cache key. @@ -135,6 +137,7 @@ void encodeUserString(StringData s, BuilderType* builder) { case kEncodeEngineSection: case kEncodeParamMarker: case kEncodeConstantLiteralMarker: + case kEncodePipelineSection: case '\\': if constexpr (hasAppendChar<BuilderType>) { builder->appendChar('\\'); @@ -431,6 +434,26 @@ void encodeCollation(const CollatorInterface* collation, StringBuilder* keyBuild // not be stable between versions. } +void encodePipeline(const std::vector<std::unique_ptr<InnerPipelineStageInterface>>& pipeline, + BufBuilder* bufBuilder) { + bufBuilder->appendChar(kEncodePipelineSection); + for (auto& stage : pipeline) { + std::vector<Value> serializedArray; + if (auto lookupStage = dynamic_cast<DocumentSourceLookUp*>(stage->documentSource())) { + lookupStage->serializeToArray(serializedArray, boost::none); + tassert(6443201, + "$lookup stage isn't serialized to a single bson object", + serializedArray.size() == 1 && serializedArray[0].getType() == Object); + const auto bson = serializedArray[0].getDocument().toBson(); + bufBuilder->appendBuf(bson.objdata(), bson.objsize()); + } else { + tasserted(6443200, + str::stream() << "Pipeline stage cannot be encoded in plan cache key: " + << stage->documentSource()->getSourceName()); + } + } +} + template <class RegexIterator> void encodeRegexFlagsForMatch(RegexIterator first, RegexIterator last, StringBuilder* keyBuilder) { // We sort the flags, so that queries with the same regex flags in different orders will have @@ -1085,6 +1108,8 @@ std::string encodeSBE(const CanonicalQuery& cq) { encodeFindCommandRequest(cq.getFindCommandRequest(), &bufBuilder); + encodePipeline(cq.pipeline(), &bufBuilder); + return base64::encode(StringData(bufBuilder.buf(), bufBuilder.len())); } @@ -1106,5 +1131,14 @@ CanonicalQuery::IndexFilterKey encodeForIndexFilters(const CanonicalQuery& cq) { uint32_t computeHash(StringData key) { return SimpleStringDataComparator::kInstance.hash(key); } + +bool canUseSbePlanCache(const CanonicalQuery& cq) { + for (auto& stage : cq.pipeline()) { + if (StringData{stage->documentSource()->getSourceName()} != "$lookup") { + return false; + } + } + return true; +} } // namespace canonical_query_encoder } // namespace mongo diff --git a/src/mongo/db/query/canonical_query_encoder.h b/src/mongo/db/query/canonical_query_encoder.h index 3164ddbec67..4bfbb68c2f2 100644 --- a/src/mongo/db/query/canonical_query_encoder.h +++ b/src/mongo/db/query/canonical_query_encoder.h @@ -68,5 +68,11 @@ CanonicalQuery::IndexFilterKey encodeForIndexFilters(const CanonicalQuery& cq); * Returns a hash of the given key (produced from either a QueryShapeString or a PlanCacheKey). */ uint32_t computeHash(StringData key); + +/** + * Returns whether a plan generated from this query can be stored in the SBE plan cache. + */ +bool canUseSbePlanCache(const CanonicalQuery& cq); + } // namespace canonical_query_encoder } // namespace mongo diff --git a/src/mongo/db/query/canonical_query_encoder_test.cpp b/src/mongo/db/query/canonical_query_encoder_test.cpp index 486b4f2d14f..3394e048be8 100644 --- a/src/mongo/db/query/canonical_query_encoder_test.cpp +++ b/src/mongo/db/query/canonical_query_encoder_test.cpp @@ -29,10 +29,11 @@ #include "mongo/db/query/canonical_query_encoder.h" -#include "mongo/db/catalog/collection_mock.h" #include "mongo/db/jsobj.h" #include "mongo/db/json.h" +#include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/pipeline/inner_pipeline_stage_impl.h" #include "mongo/db/query/canonical_query.h" #include "mongo/db/query/plan_cache_key_factory.h" #include "mongo/db/query/query_test_service_context.h" @@ -46,10 +47,17 @@ namespace { using std::unique_ptr; static const NamespaceString nss("testdb.testcoll"); +static const NamespaceString foreignNss("testdb.foreigncoll"); -PlanCacheKey makeKey(const CanonicalQuery& cq) { - CollectionMock coll(nss); - return plan_cache_key_factory::make<PlanCacheKey>(cq, &coll); +std::vector<std::unique_ptr<InnerPipelineStageInterface>> parsePipeline( + const boost::intrusive_ptr<ExpressionContext> expCtx, const std::vector<BSONObj>& rawPipeline) { + auto pipeline = Pipeline::parse(rawPipeline, expCtx); + + std::vector<std::unique_ptr<InnerPipelineStageInterface>> stages; + for (auto&& source : pipeline->getSources()) { + stages.emplace_back(std::make_unique<InnerPipelineStageImpl>(source)); + } + return stages; } /** @@ -59,7 +67,8 @@ unique_ptr<CanonicalQuery> canonicalize(BSONObj query, BSONObj sort, BSONObj proj, BSONObj collation, - std::unique_ptr<FindCommandRequest> findCommand = nullptr) { + std::unique_ptr<FindCommandRequest> findCommand = nullptr, + std::vector<BSONObj> pipelineObj = {}) { QueryTestServiceContext serviceContext; auto opCtx = serviceContext.makeOperationContext(); @@ -70,14 +79,26 @@ unique_ptr<CanonicalQuery> canonicalize(BSONObj query, findCommand->setSort(sort.getOwned()); findCommand->setProjection(proj.getOwned()); findCommand->setCollation(collation.getOwned()); - const boost::intrusive_ptr<ExpressionContext> expCtx; + + const auto expCtx = make_intrusive<ExpressionContextForTest>(opCtx.get(), nss); + expCtx->addResolvedNamespaces({foreignNss}); + if (!findCommand->getCollation().isEmpty()) { + auto statusWithCollator = CollatorFactoryInterface::get(opCtx->getServiceContext()) + ->makeFromBSON(findCommand->getCollation()); + ASSERT_OK(statusWithCollator.getStatus()); + expCtx->setCollator(std::move(statusWithCollator.getValue())); + } + auto pipeline = parsePipeline(expCtx, pipelineObj); + auto statusWithCQ = CanonicalQuery::canonicalize(opCtx.get(), std::move(findCommand), false, expCtx, ExtensionsCallbackNoop(), - MatchExpressionParser::kAllowAllSpecialFeatures); + MatchExpressionParser::kAllowAllSpecialFeatures, + ProjectionPolicies::findProjectionPolicies(), + std::move(pipeline)); ASSERT_OK(statusWithCQ.getStatus()); return std::move(statusWithCQ.getValue()); } @@ -115,13 +136,14 @@ void testComputeSBEKey(BSONObj query, BSONObj sort, BSONObj proj, std::string expectedStr, - std::unique_ptr<FindCommandRequest> findCommand = nullptr) { + std::unique_ptr<FindCommandRequest> findCommand = nullptr, + std::vector<BSONObj> pipelineObj = {}) { BSONObj collation; unique_ptr<CanonicalQuery> cq( - canonicalize(query, sort, proj, collation, std::move(findCommand))); + canonicalize(query, sort, proj, collation, std::move(findCommand), std::move(pipelineObj))); cq->setSbeCompatible(true); - auto key = makeKey(*cq); - ASSERT_EQUALS(key.toString(), expectedStr); + const auto key = canonical_query_encoder::encodeSBE(*cq); + ASSERT_EQUALS(key, expectedStr); } void testComputeKey(const char* queryStr, @@ -135,12 +157,14 @@ void testComputeSBEKey(const char* queryStr, const char* sortStr, const char* projStr, std::string expectedStr, - std::unique_ptr<FindCommandRequest> findCommand = nullptr) { + std::unique_ptr<FindCommandRequest> findCommand = nullptr, + std::vector<BSONObj> pipelineObj = {}) { testComputeSBEKey(fromjson(queryStr), fromjson(sortStr), fromjson(projStr), expectedStr, - std::move(findCommand)); + std::move(findCommand), + std::move(pipelineObj)); } TEST(CanonicalQueryEncoderTest, ComputeKey) { @@ -262,8 +286,6 @@ TEST(CanonicalQueryEncoderTest, ComputeKeyEscaped) { // Cache keys for $geoWithin queries with legacy and GeoJSON coordinates should // not be the same. TEST(CanonicalQueryEncoderTest, ComputeKeyGeoWithin) { - PlanCache planCache(5000); - // Legacy coordinates. unique_ptr<CanonicalQuery> cqLegacy( canonicalize("{a: {$geoWithin: " @@ -273,7 +295,8 @@ TEST(CanonicalQueryEncoderTest, ComputeKeyGeoWithin) { canonicalize("{a: {$geoWithin: " "{$geometry: {type: 'Polygon', coordinates: " "[[[0, 0], [0, 90], [90, 0], [0, 0]]]}}}}")); - ASSERT_NOT_EQUALS(makeKey(*cqLegacy), makeKey(*cqNew)); + ASSERT_NOT_EQUALS(canonical_query_encoder::encode(*cqLegacy), + canonical_query_encoder::encode(*cqNew)); } // GEO_NEAR cache keys should include information on geometry and CRS in addition @@ -395,85 +418,87 @@ TEST(CanonicalQueryEncoderTest, ComputeKeySBE) { // SBE must be enabled in order to generate SBE plan cache keys. RAIIServerParameterControllerForTest controllerSBE("internalQueryForceClassicEngine", false); - // TODO SERVER-61314: Remove when featureFlagSbePlanCache is removed. RAIIServerParameterControllerForTest controllerSBEPlanCache("featureFlagSbePlanCache", true); - testComputeSBEKey("{}", "{}", "{}", "YW4ABQAAAAAAAAAAAAAAAG5ubm4FAAAAAAUAAAAABQAAAAA="); + testComputeSBEKey("{}", "{}", "{}", "YW4ABQAAAAAAAAAAAAAAAG5ubm4FAAAAAAUAAAAABQAAAABe"); testComputeSBEKey( "{$or: [{a: 1}, {b: 2}]}", "{}", "{}", - "b3IAW2VxAGE/AAAAACxlcQBiPwEAAABdBQAAAAAAAAAAAAAAAG5ubm4FAAAAAAUAAAAABQAAAAA="); + "b3IAW2VxAGE/AAAAACxlcQBiPwEAAABdBQAAAAAAAAAAAAAAAG5ubm4FAAAAAAUAAAAABQAAAABe"); testComputeSBEKey( - "{a: 1}", "{}", "{}", "ZXEAYT8AAAAABQAAAAAAAAAAAAAAAG5ubm4FAAAAAAUAAAAABQAAAAA="); + "{a: 1}", "{}", "{}", "ZXEAYT8AAAAABQAAAAAAAAAAAAAAAG5ubm4FAAAAAAUAAAAABQAAAABe"); testComputeSBEKey( - "{b: 1}", "{}", "{}", "ZXEAYj8AAAAABQAAAAAAAAAAAAAAAG5ubm4FAAAAAAUAAAAABQAAAAA="); + "{b: 1}", "{}", "{}", "ZXEAYj8AAAAABQAAAAAAAAAAAAAAAG5ubm4FAAAAAAUAAAAABQAAAABe"); testComputeSBEKey( "{a: 1, b: 1, c: 1}", "{}", "{}", - "YW4AW2VxAGE/AAAAACxlcQBiPwEAAAAsZXEAYz8CAAAAXQUAAAAAAAAAAAAAAABubm5uBQAAAAAFAAAAAAUAAAAA"); + "YW4AW2VxAGE/" + "AAAAACxlcQBiPwEAAAAsZXEAYz8CAAAAXQUAAAAAAAAAAAAAAABubm5uBQAAAAAFAAAAAAUAAAAAXg=="); // With sort - testComputeSBEKey("{}", "{a: 1}", "{}", "YW4ABQAAAAB+YWEAAAAAAAAAAG5ubm4FAAAAAAUAAAAABQAAAAA="); + testComputeSBEKey("{}", "{a: 1}", "{}", "YW4ABQAAAAB+YWEAAAAAAAAAAG5ubm4FAAAAAAUAAAAABQAAAABe"); testComputeSBEKey( - "{}", "{a: -1}", "{}", "YW4ABQAAAAB+ZGEAAAAAAAAAAG5ubm4FAAAAAAUAAAAABQAAAAA="); + "{}", "{a: -1}", "{}", "YW4ABQAAAAB+ZGEAAAAAAAAAAG5ubm4FAAAAAAUAAAAABQAAAABe"); testComputeSBEKey( - "{a: 1}", "{a: 1}", "{}", "ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAG5ubm4FAAAAAAUAAAAABQAAAAA="); + "{a: 1}", "{a: 1}", "{}", "ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAG5ubm4FAAAAAAUAAAAABQAAAABe"); // With projection testComputeSBEKey("{a: 1}", "{a: 1}", "{a: 1}", - "ZXEAYT8AAAAADAAAABBhAAEAAAAAfmFhAAAAAAAAAABubm5uBQAAAAAFAAAAAAUAAAAA"); - testComputeSBEKey( - "{}", "{a: 1}", "{a: 1}", "YW4ADAAAABBhAAEAAAAAfmFhAAAAAAAAAABubm5uBQAAAAAFAAAAAAUAAAAA"); + "ZXEAYT8AAAAADAAAABBhAAEAAAAAfmFhAAAAAAAAAABubm5uBQAAAAAFAAAAAAUAAAAAXg=="); + testComputeSBEKey("{}", + "{a: 1}", + "{a: 1}", + "YW4ADAAAABBhAAEAAAAAfmFhAAAAAAAAAABubm5uBQAAAAAFAAAAAAUAAAAAXg=="); testComputeSBEKey("{}", "{a: 1}", "{a: 1, b: [{$const: 1}]}", "YW4AKAAAABBhAAEAAAAEYgAZAAAAAzAAEQAAABAkY29uc3QAAQAAAAAAAH5hYQAAAAAAAAAAbm5u" - "bgUAAAAABQAAAAAFAAAAAA=="); + "bgUAAAAABQAAAAAFAAAAAF4="); testComputeSBEKey( - "{}", "{}", "{a: 1}", "YW4ADAAAABBhAAEAAAAAAAAAAAAAAABubm5uBQAAAAAFAAAAAAUAAAAA"); + "{}", "{}", "{a: 1}", "YW4ADAAAABBhAAEAAAAAAAAAAAAAAABubm5uBQAAAAAFAAAAAAUAAAAAXg=="); testComputeSBEKey( - "{}", "{}", "{a: true}", "YW4ACQAAAAhhAAEAAAAAAAAAAABubm5uBQAAAAAFAAAAAAUAAAAA"); + "{}", "{}", "{a: true}", "YW4ACQAAAAhhAAEAAAAAAAAAAABubm5uBQAAAAAFAAAAAAUAAAAAXg=="); testComputeSBEKey( - "{}", "{}", "{a: false}", "YW4ACQAAAAhhAAAAAAAAAAAAAABubm5uBQAAAAAFAAAAAAUAAAAA"); + "{}", "{}", "{a: false}", "YW4ACQAAAAhhAAAAAAAAAAAAAABubm5uBQAAAAAFAAAAAAUAAAAAXg=="); // With FindCommandRequest auto findCommand = std::make_unique<FindCommandRequest>(nss); testComputeSBEKey("{a: 1}", "{a: 1}", "{}", - "ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAG5ubm4FAAAAAAUAAAAABQAAAAA=", + "ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAG5ubm4FAAAAAAUAAAAABQAAAABe", std::move(findCommand)); findCommand = std::make_unique<FindCommandRequest>(nss); findCommand->setAllowDiskUse(true); testComputeSBEKey("{a: 1}", "{a: 1}", "{}", - "ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAHRubm4FAAAAAAUAAAAABQAAAAA=", + "ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAHRubm4FAAAAAAUAAAAABQAAAABe", std::move(findCommand)); findCommand = std::make_unique<FindCommandRequest>(nss); findCommand->setAllowDiskUse(false); testComputeSBEKey("{a: 1}", "{a: 1}", "{}", - "ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAGZubm4FAAAAAAUAAAAABQAAAAA=", + "ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAGZubm4FAAAAAAUAAAAABQAAAABe", std::move(findCommand)); findCommand = std::make_unique<FindCommandRequest>(nss); findCommand->setReturnKey(true); testComputeSBEKey("{a: 1}", "{a: 1}", "{}", - "ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAG50bm4FAAAAAAUAAAAABQAAAAA=", + "ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAG50bm4FAAAAAAUAAAAABQAAAABe", std::move(findCommand)); findCommand = std::make_unique<FindCommandRequest>(nss); findCommand->setRequestResumeToken(false); testComputeSBEKey("{a: 1}", "{a: 1}", "{}", - "ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAG5uZm4FAAAAAAUAAAAABQAAAAA=", + "ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAG5uZm4FAAAAAAUAAAAABQAAAABe", std::move(findCommand)); findCommand = std::make_unique<FindCommandRequest>(nss); @@ -481,7 +506,7 @@ TEST(CanonicalQueryEncoderTest, ComputeKeySBE) { testComputeSBEKey("{a: 1}", "{a: 1}", "{}", - "ZXEAYT8AAAAABQAAAAB+YWEKAAAAAAAAAAAAAABubm5uBQAAAAAFAAAAAAUAAAAA", + "ZXEAYT8AAAAABQAAAAB+YWEKAAAAAAAAAAAAAABubm5uBQAAAAAFAAAAAAUAAAAAXg==", std::move(findCommand)); findCommand = std::make_unique<FindCommandRequest>(nss); @@ -489,7 +514,7 @@ TEST(CanonicalQueryEncoderTest, ComputeKeySBE) { testComputeSBEKey("{a: 1}", "{a: 1}", "{}", - "ZXEAYT8AAAAABQAAAAB+YWEAAAAACgAAAAAAAABubm5uBQAAAAAFAAAAAAUAAAAA", + "ZXEAYT8AAAAABQAAAAB+YWEAAAAACgAAAAAAAABubm5uBQAAAAAFAAAAAAUAAAAAXg==", std::move(findCommand)); findCommand = std::make_unique<FindCommandRequest>(nss); @@ -497,14 +522,14 @@ TEST(CanonicalQueryEncoderTest, ComputeKeySBE) { testComputeSBEKey("{a: 1}", "{a: 1}", "{}", - "ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAG5ubm4FAAAAAAwAAAAQYQABAAAAAAUAAAAA", + "ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAG5ubm4FAAAAAAwAAAAQYQABAAAAAAUAAAAAXg==", std::move(findCommand)); findCommand = std::make_unique<FindCommandRequest>(nss); findCommand->setMax(mongo::fromjson("{ a : 1 }")); testComputeSBEKey("{a: 1}", "{a: 1}", "{}", - "ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAG5ubm4FAAAAAAUAAAAADAAAABBhAAEAAAAA", + "ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAG5ubm4FAAAAAAUAAAAADAAAABBhAAEAAAAAXg==", std::move(findCommand)); findCommand = std::make_unique<FindCommandRequest>(nss); findCommand->setRequestResumeToken(true); @@ -515,9 +540,74 @@ TEST(CanonicalQueryEncoderTest, ComputeKeySBE) { "{a: 1}", "{}", "{}", - "ZXEAYT8AAAAABQAAAAAAAAAAAAAAAG5udG4YAAAAEiRyZWNvcmRJZAABAAAAAAAAAAAFAAAAAAUAAAAA", + "ZXEAYT8AAAAABQAAAAAAAAAAAAAAAG5udG4YAAAAEiRyZWNvcmRJZAABAAAAAAAAAAAFAAAAAAUAAAAAXg==", std::move(findCommand)); } +TEST(CanonicalQueryEncoderTest, ComputeKeySBEWithPipeline) { + // SBE must be enabled in order to generate SBE plan cache keys. + RAIIServerParameterControllerForTest controllerSBE("internalQueryForceClassicEngine", false); + + RAIIServerParameterControllerForTest controllerSBEPlanCache("featureFlagSbePlanCache", true); + + auto getLookupBson = [](StringData localField, StringData foreignField, StringData asField) { + return BSON("$lookup" << BSON("from" << foreignNss.coll() << "localField" << localField + << "foreignField" << foreignField << "as" << asField)); + }; + + // No pipeline stage. + testComputeSBEKey("{a: 1}", + "{}", + "{}", + "ZXEAYT8AAAAABQAAAAAAAAAAAAAAAG5ubm4FAAAAAAUAAAAABQAAAABe", + nullptr, + {}); + + // Different $lookup stage options. + testComputeSBEKey( + "{a: 1}", + "{}", + "{}", + "ZXEAYT8AAAAABQAAAAAAAAAAAAAAAG5ubm4FAAAAAAUAAAAABQAAAABeWgAAAAMkbG9va3VwAEwAAAACZnJvbQAMAA" + "AAZm9yZWlnbmNvbGwAAmFzAAMAAABhcwACbG9jYWxGaWVsZAACAAAAYQACZm9yZWlnbkZpZWxkAAIAAABiAAAA", + nullptr, + {getLookupBson("a", "b", "as")}); + testComputeSBEKey("{a: 1}", + "{}", + "{}", + "ZXEAYT8AAAAABQAAAAAAAAAAAAAAAG5ubm4FAAAAAAUAAAAABQAAAABeWwAAAAMkbG9va3VwAE0A" + "AAACZnJvbQAMAAAAZm9yZWlnbmNvbGwAAmFzAAMAAABhcwACbG9jYWxGaWVsZAADAAAAYTEAAmZv" + "cmVpZ25GaWVsZAACAAAAYgAAAA==", + nullptr, + {getLookupBson("a1", "b", "as")}); + testComputeSBEKey("{a: 1}", + "{}", + "{}", + "ZXEAYT8AAAAABQAAAAAAAAAAAAAAAG5ubm4FAAAAAAUAAAAABQAAAABeWwAAAAMkbG9va3VwAE0A" + "AAACZnJvbQAMAAAAZm9yZWlnbmNvbGwAAmFzAAMAAABhcwACbG9jYWxGaWVsZAACAAAAYQACZm9y" + "ZWlnbkZpZWxkAAMAAABiMQAAAA==", + nullptr, + {getLookupBson("a", "b1", "as")}); + testComputeSBEKey("{a: 1}", + "{}", + "{}", + "ZXEAYT8AAAAABQAAAAAAAAAAAAAAAG5ubm4FAAAAAAUAAAAABQAAAABeWwAAAAMkbG9va3VwAE0A" + "AAACZnJvbQAMAAAAZm9yZWlnbmNvbGwAAmFzAAQAAABhczEAAmxvY2FsRmllbGQAAgAAAGEAAmZv" + "cmVpZ25GaWVsZAACAAAAYgAAAA==", + nullptr, + {getLookupBson("a", "b", "as1")}); + + // Multiple $lookup stages. + testComputeSBEKey("{a: 1}", + "{}", + "{}", + "ZXEAYT8AAAAABQAAAAAAAAAAAAAAAG5ubm4FAAAAAAUAAAAABQAAAABeWgAAAAMkbG9va3VwAEwA" + "AAACZnJvbQAMAAAAZm9yZWlnbmNvbGwAAmFzAAMAAABhcwACbG9jYWxGaWVsZAACAAAAYQACZm9y" + "ZWlnbkZpZWxkAAIAAABiAAAAXQAAAAMkbG9va3VwAE8AAAACZnJvbQAMAAAAZm9yZWlnbmNvbGwA" + "AmFzAAQAAABhczEAAmxvY2FsRmllbGQAAwAAAGExAAJmb3JlaWduRmllbGQAAwAAAGIxAAAA", + nullptr, + {getLookupBson("a", "b", "as"), getLookupBson("a1", "b1", "as1")}); +} + } // namespace } // namespace mongo diff --git a/src/mongo/db/query/explain.cpp b/src/mongo/db/query/explain.cpp index 414badb8332..568c3da9fe0 100644 --- a/src/mongo/db/query/explain.cpp +++ b/src/mongo/db/query/explain.cpp @@ -48,6 +48,7 @@ #include "mongo/db/query/collection_query_info.h" #include "mongo/db/query/explain_common.h" #include "mongo/db/query/get_executor.h" +#include "mongo/db/query/multiple_collection_accessor.h" #include "mongo/db/query/plan_cache_key_factory.h" #include "mongo/db/query/plan_executor.h" #include "mongo/db/query/plan_executor_impl.h" @@ -79,7 +80,7 @@ namespace { * - 'out' is a builder for the explain output. */ void generatePlannerInfo(PlanExecutor* exec, - const CollectionPtr& collection, + const MultipleCollectionAccessor& collections, BSONObj extraInfo, BSONObjBuilder* out) { BSONObjBuilder plannerBob(out->subobjStart("queryPlanner")); @@ -91,22 +92,23 @@ void generatePlannerInfo(PlanExecutor* exec, bool indexFilterSet = false; boost::optional<uint32_t> queryHash; boost::optional<uint32_t> planCacheKeyHash; - if (collection && exec->getCanonicalQuery()) { + const auto& mainCollection = collections.getMainCollection(); + if (mainCollection && exec->getCanonicalQuery()) { const QuerySettings* querySettings = - QuerySettingsDecoration::get(collection->getSharedDecorations()); + QuerySettingsDecoration::get(mainCollection->getSharedDecorations()); if (exec->getCanonicalQuery()->isSbeCompatible() && feature_flags::gFeatureFlagSbePlanCache.isEnabledAndIgnoreFCV() && !exec->getCanonicalQuery()->getForceClassicEngine() && - // TODO(SERVER-61507): Remove pipeline check once lowered pipelines are integrated with - // SBE plan cache. - exec->getCanonicalQuery()->pipeline().empty()) { - const auto planCacheKeyInfo = plan_cache_key_factory::make<sbe::PlanCacheKey>( - *exec->getCanonicalQuery(), collection); + // TODO SERVER-61507: remove canUseSbePlanCache check when $group pushdown is + // integrated with SBE plan cache. + canonical_query_encoder::canUseSbePlanCache(*exec->getCanonicalQuery())) { + const auto planCacheKeyInfo = + plan_cache_key_factory::make(*exec->getCanonicalQuery(), collections); planCacheKeyHash = planCacheKeyInfo.planCacheKeyHash(); queryHash = planCacheKeyInfo.queryHash(); } else { - const auto planCacheKeyInfo = - plan_cache_key_factory::make<PlanCacheKey>(*exec->getCanonicalQuery(), collection); + const auto planCacheKeyInfo = plan_cache_key_factory::make<PlanCacheKey>( + *exec->getCanonicalQuery(), mainCollection); planCacheKeyHash = planCacheKeyInfo.planCacheKeyHash(); queryHash = planCacheKeyInfo.queryHash(); } @@ -310,7 +312,7 @@ void appendBasicPlanCacheEntryInfoToBSON(const EntryType& entry, BSONObjBuilder* } // namespace void Explain::explainStages(PlanExecutor* exec, - const CollectionPtr& collection, + const MultipleCollectionAccessor& collections, ExplainOptions::Verbosity verbosity, Status executePlanStatus, boost::optional<PlanExplainer::PlanStatsDetails> winningPlanTrialStats, @@ -325,7 +327,7 @@ void Explain::explainStages(PlanExecutor* exec, out->appendElements(explainVersionToBson(explainer.getVersion())); if (verbosity >= ExplainOptions::Verbosity::kQueryPlanner) { - generatePlannerInfo(exec, collection, extraInfo, out); + generatePlannerInfo(exec, collections, extraInfo, out); } if (verbosity >= ExplainOptions::Verbosity::kExecStats) { @@ -364,7 +366,7 @@ void Explain::explainPipeline(PlanExecutor* exec, } void Explain::explainStages(PlanExecutor* exec, - const CollectionPtr& collection, + const MultipleCollectionAccessor& collections, ExplainOptions::Verbosity verbosity, BSONObj extraInfo, const BSONObj& command, @@ -372,9 +374,10 @@ void Explain::explainStages(PlanExecutor* exec, auto&& explainer = exec->getPlanExplainer(); auto winningPlanTrialStats = explainer.getWinningPlanTrialStats(); Status executePlanStatus = Status::OK(); - const CollectionPtr* collectionPtr = &collection; + const MultipleCollectionAccessor* collectionsPtr = &collections; // If we need execution stats, then run the plan in order to gather the stats. + const MultipleCollectionAccessor emptyCollections; if (verbosity >= ExplainOptions::Verbosity::kExecStats) { try { executePlan(exec); @@ -386,12 +389,12 @@ void Explain::explainStages(PlanExecutor* exec, // then the collection may no longer be valid. We conservatively set our collection pointer // to null in case it is invalid. if (!executePlanStatus.isOK() && executePlanStatus != ErrorCodes::NoQueryExecutionPlans) { - collectionPtr = &CollectionPtr::null; + collectionsPtr = &emptyCollections; } } explainStages(exec, - *collectionPtr, + *collectionsPtr, verbosity, executePlanStatus, winningPlanTrialStats, @@ -403,6 +406,15 @@ void Explain::explainStages(PlanExecutor* exec, explain_common::generateServerParameters(out); } +void Explain::explainStages(PlanExecutor* exec, + const CollectionPtr& collection, + ExplainOptions::Verbosity verbosity, + BSONObj extraInfo, + const BSONObj& command, + BSONObjBuilder* out) { + explainStages(exec, MultipleCollectionAccessor(collection), verbosity, extraInfo, command, out); +} + void Explain::planCacheEntryToBSON(const PlanCacheEntry& entry, BSONObjBuilder* out) { out->append("version", "1"); diff --git a/src/mongo/db/query/explain.h b/src/mongo/db/query/explain.h index d41dd3a1725..1dcabdeb7e3 100644 --- a/src/mongo/db/query/explain.h +++ b/src/mongo/db/query/explain.h @@ -39,6 +39,7 @@ namespace mongo { class Collection; class CollectionPtr; +class MultipleCollectionAccessor; class OperationContext; class PlanExecutorPipeline; struct PlanSummaryStats; @@ -77,15 +78,26 @@ public: BSONObj extraInfo, const BSONObj& command, BSONObjBuilder* out); + + /** + * Similar to the above function, but takes in multiple collections instead to support + * aggregation that involves multiple collections (e.g. $lookup). + */ + static void explainStages(PlanExecutor* exec, + const MultipleCollectionAccessor& collections, + ExplainOptions::Verbosity verbosity, + BSONObj extraInfo, + const BSONObj& command, + BSONObjBuilder* out); + /** * Adds "queryPlanner" and "executionStats" (if requested in verbosity) fields to 'out'. Unlike * the other overload of explainStages() above, this one does not add the "serverInfo" section. * * - 'exec' is the stage tree for the operation being explained. - * - 'collection' is the relevant collection. During this call it may be required to execute the - * plan to collect statistics. If the PlanExecutor uses 'kLockExternally' lock policy, the - * caller should hold at least an IS lock on the collection the that the query runs on, even if - * 'collection' parameter is nullptr. + * - 'collections' are the relevant main and secondary collections (e.g. for $lookup). If the + * PlanExecutor uses 'kLockExternally' lock policy, the caller should hold the necessary db_raii + * object on the involved collections. * - 'verbosity' is the verbosity level of the explain. * - 'extraInfo' specifies additional information to include into the output. * - 'executePlanStatus' is the status returned after executing the query (Status::OK if the @@ -97,7 +109,7 @@ public: */ static void explainStages( PlanExecutor* exec, - const CollectionPtr& collection, + const MultipleCollectionAccessor& collections, ExplainOptions::Verbosity verbosity, Status executePlanStatus, boost::optional<PlanExplainer::PlanStatsDetails> winningPlanTrialStats, diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp index 5c22beab210..6c77f43ae1a 100644 --- a/src/mongo/db/query/get_executor.cpp +++ b/src/mongo/db/query/get_executor.cpp @@ -647,7 +647,7 @@ public: _cq->setCollator(mainColl->getDefaultCollator()->clone()); } - auto planCacheKey = plan_cache_key_factory::make<KeyType>(*_cq, mainColl); + auto planCacheKey = buildPlanCacheKey(); // Fill in some opDebug information, unless it has already been filled by an outer pipeline. OpDebug& opDebug = CurOp::get(_opCtx)->debug(); if (!opDebug.queryHash) { @@ -743,6 +743,11 @@ protected: virtual PlanStageType buildExecutableTree(const QuerySolution& solution) const = 0; /** + * Constructs the plan cache key. + */ + virtual KeyType buildPlanCacheKey() const = 0; + + /** * Either constructs a PlanStage tree from a cached plan (if exists in the plan cache), or * constructs a "id hack" PlanStage tree. Returns nullptr if no cached plan or id hack plan can * be constructed. @@ -879,6 +884,10 @@ protected: return result; } + PlanCacheKey buildPlanCacheKey() const { + return plan_cache_key_factory::make<PlanCacheKey>(*_cq, _collection); + } + std::unique_ptr<ClassicPrepareExecutionResult> buildCachedPlan( const PlanCacheKey& planCacheKey) final { initializePlannerParamsIfNeeded(); @@ -1083,13 +1092,17 @@ protected: return result; } + sbe::PlanCacheKey buildPlanCacheKey() const { + return plan_cache_key_factory::make(*_cq, _collections); + } + std::unique_ptr<SlotBasedPrepareExecutionResult> buildCachedPlan( const sbe::PlanCacheKey& planCacheKey) final { if (shouldCacheQuery(*_cq)) { - // TODO SERVER-61507: remove _cq->pipeline().empty() check when $group pushdown is + // TODO SERVER-61507: remove canUseSbePlanCache check when $group pushdown is // integrated with SBE plan cache. if (!feature_flags::gFeatureFlagSbePlanCache.isEnabledAndIgnoreFCV() || - !_cq->pipeline().empty()) { + !canonical_query_encoder::canUseSbePlanCache(*_cq)) { // If the feature flag is off, we first try to build an "id hack" plan because the // id hack plans are not cached in the classic cache. We then fall back to use the // classic plan cache. @@ -1346,18 +1359,19 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getSlotBasedExe // No need for runtime planning, just use the constructed plan stage tree. invariant(solutions.size() == 1); invariant(roots.size() == 1); - if (!cq->pipeline().empty()) { - // Need to extend the solution with the agg pipeline and rebuild the execution tree. - solutions[0] = QueryPlanner::extendWithAggPipeline( - *cq, - std::move(solutions[0]), - fillOutSecondaryCollectionsInformation(opCtx, collections, cq.get())); - roots[0] = helper.buildExecutableTree(*(solutions[0])); - } auto&& [root, data] = roots[0]; + if (!planningResult->recoveredPinnedCacheEntry()) { - plan_cache_util::updatePlanCache( - opCtx, collections.getMainCollection(), *cq, *solutions[0], *root, data); + if (!cq->pipeline().empty()) { + // Need to extend the solution with the agg pipeline and rebuild the execution tree. + solutions[0] = QueryPlanner::extendWithAggPipeline( + *cq, + std::move(solutions[0]), + fillOutSecondaryCollectionsInformation(opCtx, collections, cq.get())); + roots[0] = helper.buildExecutableTree(*(solutions[0])); + } + + plan_cache_util::updatePlanCache(opCtx, collections, *cq, *solutions[0], *root, data); } // Prepare the SBE tree for execution. diff --git a/src/mongo/db/query/plan_cache_key_factory.cpp b/src/mongo/db/query/plan_cache_key_factory.cpp index 6b154b29105..b330fa5ccd6 100644 --- a/src/mongo/db/query/plan_cache_key_factory.cpp +++ b/src/mongo/db/query/plan_cache_key_factory.cpp @@ -89,12 +89,6 @@ PlanCacheKeyInfo makePlanCacheKeyInfo(const CanonicalQuery& query, return PlanCacheKeyInfo(shapeString, indexabilityKeyBuilder.str()); } -PlanCacheKey make(const CanonicalQuery& query, - const CollectionPtr& collection, - PlanCacheKeyTag<PlanCacheKey>) { - return {makePlanCacheKeyInfo(query, collection)}; -} - namespace { /** * Returns the highest index commit timestamp associated with an index on 'collection' that is @@ -129,24 +123,62 @@ boost::optional<Timestamp> computeNewestVisibleIndexTimestamp(OperationContext* return currentNewestVisible.isNull() ? boost::optional<Timestamp>{} : currentNewestVisible; } + +sbe::PlanCacheKeyCollectionState computeCollectionState(OperationContext* opCtx, + const CollectionPtr& collection, + bool isSecondaryColl) { + boost::optional<sbe::PlanCacheKeyShardingEpoch> keyShardingEpoch; + // We don't version secondary collections in the current shard versioning protocol. Also, since + // currently we only push down $lookup to SBE when secondary collections (and main collection) + // are unsharded, it's OK to not encode the sharding information here. + if (!isSecondaryColl) { + const auto shardVersion{ + OperationShardingState::get(opCtx).getShardVersion(collection->ns())}; + if (shardVersion) { + keyShardingEpoch = + sbe::PlanCacheKeyShardingEpoch{shardVersion->epoch(), shardVersion->getTimestamp()}; + } + } + return {collection->uuid(), + CollectionQueryInfo::get(collection).getPlanCacheInvalidatorVersion(), + plan_cache_detail::computeNewestVisibleIndexTimestamp(opCtx, collection), + keyShardingEpoch}; +} } // namespace +PlanCacheKey make(const CanonicalQuery& query, + const CollectionPtr& collection, + PlanCacheKeyTag<PlanCacheKey> tag) { + return {plan_cache_detail::makePlanCacheKeyInfo(query, collection)}; +} + sbe::PlanCacheKey make(const CanonicalQuery& query, const CollectionPtr& collection, - PlanCacheKeyTag<sbe::PlanCacheKey>) { - OperationContext* opCtx = query.getOpCtx(); - auto collectionVersion = CollectionQueryInfo::get(collection).getPlanCacheInvalidatorVersion(); - const auto shardVersion{OperationShardingState::get(opCtx).getShardVersion(collection->ns())}; - const auto keyShardingEpoch = shardVersion - ? boost::make_optional( - sbe::PlanCacheKeyShardingEpoch{shardVersion->epoch(), shardVersion->getTimestamp()}) - : boost::none; - - return {makePlanCacheKeyInfo(query, collection), - collection->uuid(), - collectionVersion, - computeNewestVisibleIndexTimestamp(opCtx, collection), - keyShardingEpoch}; + PlanCacheKeyTag<sbe::PlanCacheKey> tag) { + return plan_cache_key_factory::make(query, MultipleCollectionAccessor(collection)); } } // namespace plan_cache_detail + +namespace plan_cache_key_factory { +sbe::PlanCacheKey make(const CanonicalQuery& query, const MultipleCollectionAccessor& collections) { + OperationContext* opCtx = query.getOpCtx(); + auto mainCollectionState = plan_cache_detail::computeCollectionState( + opCtx, collections.getMainCollection(), false /* isSecondaryColl */); + std::vector<sbe::PlanCacheKeyCollectionState> secondaryCollectionStates; + secondaryCollectionStates.reserve(collections.getSecondaryCollections().size()); + // We always use the collection order saved in MultipleCollectionAccessor to populate the plan + // cache key, which is ordered by the secondary collection namespaces. + for (auto& [_, collection] : collections.getSecondaryCollections()) { + if (collection) { + secondaryCollectionStates.emplace_back(plan_cache_detail::computeCollectionState( + opCtx, collection, true /* isSecondaryColl */)); + } + } + + return {plan_cache_detail::makePlanCacheKeyInfo(query, collections.getMainCollection()), + std::move(mainCollectionState), + std::move(secondaryCollectionStates)}; +} +} // namespace plan_cache_key_factory + } // namespace mongo diff --git a/src/mongo/db/query/plan_cache_key_factory.h b/src/mongo/db/query/plan_cache_key_factory.h index 8d811793211..663297093c7 100644 --- a/src/mongo/db/query/plan_cache_key_factory.h +++ b/src/mongo/db/query/plan_cache_key_factory.h @@ -52,14 +52,14 @@ template <typename KeyType> struct PlanCacheKeyTag {}; /** - * Creates a key for the classic plan cache from the canonical query and collection instances. + * Creates a key for the classic plan cache from the canonical query and a single collection. */ PlanCacheKey make(const CanonicalQuery& query, const CollectionPtr& collection, PlanCacheKeyTag<PlanCacheKey> tag); /** - * Creates a key for the SBE plan cache from the canonical query and collection instances. + * Similar to above, but for the SBE plan cache key. */ sbe::PlanCacheKey make(const CanonicalQuery& query, const CollectionPtr& collection, @@ -77,5 +77,12 @@ template <typename Key> Key make(const CanonicalQuery& query, const CollectionPtr& collection) { return plan_cache_detail::make(query, collection, plan_cache_detail::PlanCacheKeyTag<Key>{}); } + +/** + * Similar to above, a factory helper to make a SBE plan cache key, but used for agg queries that + * might involve multiple collections. + */ +sbe::PlanCacheKey make(const CanonicalQuery& query, const MultipleCollectionAccessor& collections); + } // namespace plan_cache_key_factory } // namespace mongo diff --git a/src/mongo/db/query/sbe_cached_solution_planner.cpp b/src/mongo/db/query/sbe_cached_solution_planner.cpp index 5f1b8f008d6..0ecd5ba50f5 100644 --- a/src/mongo/db/query/sbe_cached_solution_planner.cpp +++ b/src/mongo/db/query/sbe_cached_solution_planner.cpp @@ -53,10 +53,17 @@ CandidatePlans CachedSolutionPlanner::plan( // If the cached plan is accepted we'd like to keep the results from the trials even if there // are parts of agg pipelines being lowered into SBE, so we run the trial with the extended - // plan. This works because TrialRunTracker, attached to HashAgg stage, tracks as "results" the - // results of its child stage. Thus, we can use the number of reads the plan was cached with - // during multiplanning even though multiplanning ran trials of pre-extended plans. - if (!_cq.pipeline().empty()) { + // plan. This works because TrialRunTracker, attached to HashAgg stage in $group queries, tracks + // as "results" the results of its child stage. For $lookup queries, the TrialRunTracker will + // only track the number of reads from the local side. Thus, we can use the number of reads the + // plan was cached with during multiplanning even though multiplanning ran trials of + // pre-extended plans. + // + // TODO SERVER-61507: Remove canUseSbePlanCache check once $group pushdown is integrated with + // SBE plan cache. + if (!_cq.pipeline().empty() && + !(feature_flags::gFeatureFlagSbePlanCache.isEnabledAndIgnoreFCV() && + canonical_query_encoder::canUseSbePlanCache(_cq))) { _yieldPolicy->clearRegisteredPlans(); auto secondaryCollectionsInfo = fillOutSecondaryCollectionsInformation(_opCtx, _collections, &_cq); @@ -184,7 +191,7 @@ CandidatePlans CachedSolutionPlanner::replan(bool shouldCache, std::string reaso cache->deactivate(plan_cache_key_factory::make<mongo::PlanCacheKey>(_cq, mainColl)); if (feature_flags::gFeatureFlagSbePlanCache.isEnabledAndIgnoreFCV()) { auto&& sbePlanCache = sbe::getPlanCache(_opCtx); - sbePlanCache.deactivate(plan_cache_key_factory::make<sbe::PlanCacheKey>(_cq, mainColl)); + sbePlanCache.deactivate(plan_cache_key_factory::make(_cq, _collections)); } } diff --git a/src/mongo/db/query/sbe_multi_planner.cpp b/src/mongo/db/query/sbe_multi_planner.cpp index b9966e74683..c4ba4f7efad 100644 --- a/src/mongo/db/query/sbe_multi_planner.cpp +++ b/src/mongo/db/query/sbe_multi_planner.cpp @@ -130,13 +130,13 @@ CandidatePlans MultiPlanner::finalizeExecutionPlans( winner.root->open(false); } - // Writes a cache entry for the winning plan to the plan cache if possible. - plan_cache_util::updatePlanCache(_opCtx, - _collections.getMainCollection(), - _cachingMode, - _cq, - std::move(decision), - candidates); + // If there is a pushed down pipeline that cannot use SBE plan cache, then write a cache entry + // before extending the pipeline. + // TODO SERVER-61507: Remove this block once $group pushdown is integrated with SBE plan cache. + if (!canonical_query_encoder::canUseSbePlanCache(_cq)) { + plan_cache_util::updatePlanCache( + _opCtx, _collections, _cachingMode, _cq, std::move(decision), candidates); + } // Extend the winning candidate with the agg pipeline and rebuild the execution tree. Because // the trial was done with find-only part of the query, we cannot reuse the results. The @@ -152,10 +152,16 @@ CandidatePlans MultiPlanner::finalizeExecutionPlans( // The winner might have been replanned. So, pass through the replanning reason to the new // plan. data.replanReason = std::move(winner.data.replanReason); + + // We need to clone the plan here for the plan cache to use. The clone will be stored in the + // cache prior to preparation, whereas the original copy of the tree will be prepared and + // used to execute this query. + auto clonedPlan = std::make_pair(rootStage->clone(), stage_builder::PlanStageData(data)); stage_builder::prepareSlotBasedExecutableTree( _opCtx, rootStage.get(), &data, _cq, _collections, _yieldPolicy); candidates[winnerIdx] = sbe::plan_ranker::CandidatePlan{ std::move(solution), std::move(rootStage), std::move(data)}; + candidates[winnerIdx].clonedPlan.emplace(std::move(clonedPlan)); candidates[winnerIdx].root->open(false); if (_cq.getExplain()) { @@ -173,6 +179,16 @@ CandidatePlans MultiPlanner::finalizeExecutionPlans( } } + // If pipeline can use SBE plan cache or there is no pushed down pipeline, then write a cache + // entry after extending the pipeline. + // TODO SERVER-61507: Remove canUseSbePlanCache check once $group pushdown is + // integrated with SBE plan cache. + if (canonical_query_encoder::canUseSbePlanCache(_cq)) { + // Writes a cache entry for the winning plan to the plan cache if possible. + plan_cache_util::updatePlanCache( + _opCtx, _collections, _cachingMode, _cq, std::move(decision), candidates); + } + return {std::move(candidates), winnerIdx}; } } // namespace mongo::sbe diff --git a/src/mongo/db/query/sbe_plan_cache.cpp b/src/mongo/db/query/sbe_plan_cache.cpp index 0d7a90e9ed5..bbd6db6418a 100644 --- a/src/mongo/db/query/sbe_plan_cache.cpp +++ b/src/mongo/db/query/sbe_plan_cache.cpp @@ -160,8 +160,17 @@ void clearPlanCacheEntriesWith(ServiceContext* serviceCtx, sbe::getPlanCache(serviceCtx) .removeIf([&collectionUuid, collectionVersion](const PlanCacheKey& key, const sbe::PlanCacheEntry& entry) { - return key.getCollectionVersion() == collectionVersion && - key.getCollectionUuid() == collectionUuid; + if (key.getMainCollectionState().version == collectionVersion && + key.getMainCollectionState().uuid == collectionUuid) { + return true; + } + for (auto& collectionState : key.getSecondaryCollectionStates()) { + if (collectionState.version == collectionVersion && + collectionState.uuid == collectionUuid) { + return true; + } + } + return false; }); LOGV2_DEBUG(6006600, diff --git a/src/mongo/db/query/sbe_plan_cache.h b/src/mongo/db/query/sbe_plan_cache.h index 6e7853fa817..b33488ade0f 100644 --- a/src/mongo/db/query/sbe_plan_cache.h +++ b/src/mongo/db/query/sbe_plan_cache.h @@ -56,35 +56,91 @@ struct PlanCacheKeyShardingEpoch { Timestamp ts; }; +struct PlanCacheKeyCollectionState { + bool operator==(const PlanCacheKeyCollectionState& other) const { + return other.uuid == uuid && other.version == version && + other.newestVisibleIndexTimestamp == newestVisibleIndexTimestamp && + other.shardVersion == shardVersion; + } + + size_t hashCode() const { + size_t hash = UUID::Hash{}(uuid); + boost::hash_combine(hash, version); + if (newestVisibleIndexTimestamp) { + boost::hash_combine(hash, newestVisibleIndexTimestamp->asULL()); + } + if (shardVersion) { + shardVersion->epoch.hash_combine(hash); + boost::hash_combine(hash, shardVersion->ts.asULL()); + } + return hash; + } + + UUID uuid; + + // There is a special collection versioning scheme associated with the SBE plan cache. Whenever + // an action against a collection is made which should invalidate the plan cache entries for the + // collection -- in particular index builds and drops -- the version number is incremented. + // Readers specify the version number that they are reading at so that they only pick up cache + // entries with the right set of indexes. + // + // We also clean up all cache entries for a particular (collectionUuid, versionNumber) pair when + // all readers seeing this version of the collection have drained. + size_t version; + + // The '_collectionVersion' is not currently sufficient in order to ensure that the indexes + // visible to the reader are consistent with the indexes present in the cache entry. The reason + // is that all readers see the latest copy-on-write version of the 'Collection' object, even + // though they are allowed to read at an older timestamp, potentially at a time before an index + // build completed. + // + // To solve this problem, we incorporate the timestamp of the newest index visible to the reader + // into the plan cache key. This ensures that the set of indexes visible to the reader match + // those present in the plan cache entry, preventing a situation where the plan cache entry + // reflects a newer version of the index catalog than the one visible to the reader. + // + // In the future, this could instead be solved with point-in-time catalog lookups. + boost::optional<Timestamp> newestVisibleIndexTimestamp; + + // Ensures that a cached SBE plan cannot be reused if the collection has since become sharded or + // changed its shard key. The cached plan may no longer be valid after sharding or shard key + // refining since the structure of the plan depends on whether the collection is sharded, and if + // sharded depends on the shard key. + const boost::optional<PlanCacheKeyShardingEpoch> shardVersion; +}; + /** * Represents the "key" used in the PlanCache mapping from query shape -> query plan. */ class PlanCacheKey { public: PlanCacheKey(PlanCacheKeyInfo&& info, - UUID collectionUuid, - size_t collectionVersion, - boost::optional<Timestamp> newestVisibleIndexTimestamp, - boost::optional<PlanCacheKeyShardingEpoch> shardVersion) + PlanCacheKeyCollectionState mainCollectionState, + std::vector<PlanCacheKeyCollectionState> secondaryCollectionStates) : _info{std::move(info)}, - _collectionUuid{collectionUuid}, - _collectionVersion{collectionVersion}, - _newestVisibleIndexTimestamp{newestVisibleIndexTimestamp}, - _shardVersion{shardVersion} {} + _mainCollectionState{std::move(mainCollectionState)}, + _secondaryCollectionStates{std::move(secondaryCollectionStates)} { + // For secondary collections, we don't encode shard version in the key since we don't shard + // version these collections. This is OK because we only push down $lookup queries to SBE + // when involved collections are unsharded. + for (const auto& collState : _secondaryCollectionStates) { + tassert(6443202, + "Secondary collections should not encode shard version in plan cache key", + collState.shardVersion == boost::none); + } + } - const UUID& getCollectionUuid() const { - return _collectionUuid; + const PlanCacheKeyCollectionState& getMainCollectionState() const { + return _mainCollectionState; } - size_t getCollectionVersion() const { - return _collectionVersion; + const std::vector<PlanCacheKeyCollectionState>& getSecondaryCollectionStates() const { + return _secondaryCollectionStates; } bool operator==(const PlanCacheKey& other) const { - return other._collectionVersion == _collectionVersion && - other._collectionUuid == _collectionUuid && - other._newestVisibleIndexTimestamp == _newestVisibleIndexTimestamp && - other._info == _info && other._shardVersion == _shardVersion; + return other._info == _info && other._mainCollectionState == _mainCollectionState && + other._secondaryCollectionStates == _secondaryCollectionStates; } bool operator!=(const PlanCacheKey& other) const { @@ -97,14 +153,9 @@ public: uint32_t planCacheKeyHash() const { size_t hash = _info.planCacheKeyHash(); - boost::hash_combine(hash, UUID::Hash{}(_collectionUuid)); - boost::hash_combine(hash, _collectionVersion); - if (_newestVisibleIndexTimestamp) { - boost::hash_combine(hash, _newestVisibleIndexTimestamp->asULL()); - } - if (_shardVersion) { - _shardVersion->epoch.hash_combine(hash); - boost::hash_combine(hash, _shardVersion->ts.asULL()); + boost::hash_combine(hash, _mainCollectionState.hashCode()); + for (auto& collectionState : _secondaryCollectionStates) { + boost::hash_combine(hash, collectionState.hashCode()); } return hash; } @@ -117,37 +168,12 @@ private: // Contains the actual encoding of the query shape as well as the index discriminators. const PlanCacheKeyInfo _info; - const UUID _collectionUuid; - - // There is a special collection versioning scheme associated with the SBE plan cache. Whenever - // an action against a collection is made which should invalidate the plan cache entries for the - // collection -- in particular index builds and drops -- the version number is incremented. - // Readers specify the version number that they are reading at so that they only pick up cache - // entries with the right set of indexes. - // - // We also clean up all cache entries for a particular (collectionUuid, versionNumber) pair when - // all readers seeing this version of the collection have drained. - const size_t _collectionVersion; - - // The '_collectionVersion' is not currently sufficient in order to ensure that the indexes - // visible to the reader are consistent with the indexes present in the cache entry. The reason - // is that all readers see the latest copy-on-write version of the 'Collection' object, even - // though they are allowed to read at an older timestamp, potentially at a time before an index - // build completed. - // - // To solve this problem, we incorporate the timestamp of the newest index visible to the reader - // into the plan cache key. This ensures that the set of indexes visible to the reader match - // those present in the plan cache entry, preventing a situation where the plan cache entry - // reflects a newer version of the index catalog than the one visible to the reader. - // - // In the future, this could instead be solved with point-in-time catalog lookups. - const boost::optional<Timestamp> _newestVisibleIndexTimestamp; + const PlanCacheKeyCollectionState _mainCollectionState; - // Ensures that a cached SBE plan cannot be reused if the collection has since become sharded or - // changed its shard key. The cached plan may no longer be valid after sharding or shard key - // refining since the structure of the plan depends on whether the collection is sharded, and if - // sharded depends on the shard key. - const boost::optional<PlanCacheKeyShardingEpoch> _shardVersion; + // To make sure the plan cache key matches, the secondary collection states need to be passed + // in a defined order. Currently, we use the collection order stored in + // MultipleCollectionAccessor, which is ordered by the collection namespaces. + const std::vector<PlanCacheKeyCollectionState> _secondaryCollectionStates; }; class PlanCacheKeyHasher { diff --git a/src/mongo/db/query/sbe_sub_planner.cpp b/src/mongo/db/query/sbe_sub_planner.cpp index e5e714ad3aa..c6ce37cb434 100644 --- a/src/mongo/db/query/sbe_sub_planner.cpp +++ b/src/mongo/db/query/sbe_sub_planner.cpp @@ -116,8 +116,9 @@ CandidatePlans SubPlanner::plan( // TODO SERVER-61507: do it unconditionally when $group pushdown is integrated with the SBE plan // cache. - if (_cq.pipeline().empty()) { - plan_cache_util::updatePlanCache(_opCtx, mainColl, _cq, *compositeSolution, *root, data); + if (canonical_query_encoder::canUseSbePlanCache(_cq)) { + plan_cache_util::updatePlanCache( + _opCtx, _collections, _cq, *compositeSolution, *root, data); } return {makeVector(plan_ranker::CandidatePlan{ |