summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
authorMihai Andrei <mihai.andrei@mongodb.com>2023-02-07 00:58:25 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2023-02-07 02:32:02 +0000
commitb185d04364c298e701943a1e800c4036ce8df6e7 (patch)
tree6a91575c0096863e38b1b0fd12d1bfd90d55ac7f /src/mongo/db
parent967a6745ba487edaff558e36fdc572c6ca066fea (diff)
downloadmongo-b185d04364c298e701943a1e800c4036ce8df6e7.tar.gz
SERVER-71798 Expand the set of queries eligible for SBE in the 6.3 release
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/commands/external_data_source_commands_test.cpp118
-rw-r--r--src/mongo/db/commands/index_filter_commands.cpp14
-rw-r--r--src/mongo/db/commands/plan_cache_clear_command.cpp19
-rw-r--r--src/mongo/db/exec/plan_cache_util.cpp3
-rw-r--r--src/mongo/db/exec/plan_cache_util.h54
-rw-r--r--src/mongo/db/exec/sbe/expressions/expression.cpp16
-rw-r--r--src/mongo/db/exec/sbe/values/value.cpp3
-rw-r--r--src/mongo/db/exec/sbe/values/value.h13
-rw-r--r--src/mongo/db/exec/sbe/values/value_printer.cpp6
-rw-r--r--src/mongo/db/exec/sbe/vm/vm.cpp42
-rw-r--r--src/mongo/db/exec/sbe/vm/vm.h6
-rw-r--r--src/mongo/db/exec/sbe/vm/vm_printer.cpp36
-rw-r--r--src/mongo/db/pipeline/process_interface/common_mongod_process_interface.cpp31
-rw-r--r--src/mongo/db/query/canonical_query.cpp9
-rw-r--r--src/mongo/db/query/canonical_query_encoder.cpp4
-rw-r--r--src/mongo/db/query/canonical_query_encoder_test.cpp5
-rw-r--r--src/mongo/db/query/canonical_query_test.cpp2
-rw-r--r--src/mongo/db/query/classic_plan_cache.cpp3
-rw-r--r--src/mongo/db/query/explain.cpp1
-rw-r--r--src/mongo/db/query/get_executor.cpp176
-rw-r--r--src/mongo/db/query/projection.h7
-rw-r--r--src/mongo/db/query/query_feature_flags.idl3
-rw-r--r--src/mongo/db/query/query_utils.cpp22
-rw-r--r--src/mongo/db/query/query_utils.h6
-rw-r--r--src/mongo/db/query/sbe_cached_solution_planner.cpp60
-rw-r--r--src/mongo/db/query/sbe_plan_cache.cpp116
-rw-r--r--src/mongo/db/query/sbe_stage_builder.cpp14
-rw-r--r--src/mongo/db/query/sbe_stage_builder_filter.cpp43
-rw-r--r--src/mongo/db/s/collection_sharding_runtime.cpp86
29 files changed, 282 insertions, 636 deletions
diff --git a/src/mongo/db/commands/external_data_source_commands_test.cpp b/src/mongo/db/commands/external_data_source_commands_test.cpp
index 217773fa023..8c662446995 100644
--- a/src/mongo/db/commands/external_data_source_commands_test.cpp
+++ b/src/mongo/db/commands/external_data_source_commands_test.cpp
@@ -455,124 +455,6 @@ TEST_F(ExternalDataSourceCommandsTest, SimpleMatchAggRequest) {
verifyExplainAggCommand(client, aggCmdObj);
}
-TEST_F(ExternalDataSourceCommandsTest, ScanOverRandomInvalidDataAggRequest) {
- const auto nDocs = _random.nextInt32(100) + 1;
- std::vector<BSONObj> srcDocs = generateRandomSimpleDocs(nDocs);
- PipeWaiter pw;
-
- stdx::thread producer([&] {
- NamedPipeOutput pipeWriter("EDSCTest_ScanOverRandomInvalidDataAggRequestPipe");
- pw.notify();
- const size_t failPoint = _random.nextInt32(nDocs);
- pipeWriter.open();
- for (size_t i = 0; i < srcDocs.size(); ++i) {
- if (i == failPoint) {
- // Intentionally pushes invalid data at the fail point so that an error happens at
- // the reader-side
- pipeWriter.write(srcDocs[i].objdata(), srcDocs[i].objsize() / 2);
- } else {
- pipeWriter.write(srcDocs[i].objdata(), srcDocs[i].objsize());
- }
- }
- pipeWriter.close();
- });
- ON_BLOCK_EXIT([&] { producer.join(); });
-
- // Gives some time to the producer so that it can initialize a named pipe.
- pw.wait();
-
- DBDirectClient client(_opCtx);
- auto aggCmdObj = fromjson(R"(
-{
- aggregate: "coll",
- pipeline: [{$match: {a: {$lt: 5}}}],
- cursor: {},
- $_externalDataSources: [{
- collName: "coll",
- dataSources: [{url: "file://EDSCTest_ScanOverRandomInvalidDataAggRequestPipe", storageType: "pipe", fileType: "bson"}]
- }]
-}
- )");
-
- BSONObj res;
- ASSERT_FALSE(client.runCommand(kDatabaseName, aggCmdObj.getOwned(), res));
- ASSERT_EQ(res["ok"].Number(), 0.0);
- // The fail point is randomly chosen and different error codes are expected, depending on the
- // chosen fail point.
- ASSERT_NE(ErrorCodes::Error(res["code"].Int()), ErrorCodes::OK);
-
- // The second explain request. This verifies that virtual collections are cleaned up after
- // the aggregation request fails.
- verifyExplainAggCommand(client, aggCmdObj);
-}
-
-TEST_F(ExternalDataSourceCommandsTest, ScanOverRandomInvalidDataAtSecondBatchAggRequest) {
- // This 'nDocs' causes a cursor to be created for a simple scan aggregate command.
- const auto nDocs = _random.nextInt32(100) + 102; // 201 >= nDocs >= 102
- std::vector<BSONObj> srcDocs = generateRandomSimpleDocs(nDocs);
- PipeWaiter pw;
-
- stdx::thread producer([&] {
- NamedPipeOutput pipeWriter("EDSCTest_ScanOverRandomInvalidDataAtSecondBatchAggRequestPipe");
- pw.notify();
- // The fail point occurs at the second batch.
- const size_t failPoint = 101 + _random.nextInt32(nDocs - 101); // 200 >= failPoint >= 101
- pipeWriter.open();
- for (size_t i = 0; i < srcDocs.size(); ++i) {
- if (i == failPoint) {
- // Intentionally pushes invalid data at the fail point so that an error happens at
- // the reader-side
- pipeWriter.write(srcDocs[i].objdata(), srcDocs[i].objsize() / 2);
- } else {
- pipeWriter.write(srcDocs[i].objdata(), srcDocs[i].objsize());
- }
- }
- pipeWriter.close();
- });
- ON_BLOCK_EXIT([&] { producer.join(); });
-
- // Gives some time to the producer so that it can initialize a named pipe.
- pw.wait();
-
- DBDirectClient client(_opCtx);
- auto aggCmdObj = fromjson(R"(
-{
- aggregate: "coll",
- pipeline: [],
- cursor: {},
- $_externalDataSources: [{
- collName: "coll",
- dataSources: [{url: "file://EDSCTest_ScanOverRandomInvalidDataAtSecondBatchAggRequestPipe", storageType: "pipe", fileType: "bson"}]
- }]
-}
- )");
-
- auto swAggReq = aggregation_request_helper::parseFromBSONForTests(kDatabaseName, aggCmdObj);
- ASSERT_OK(swAggReq.getStatus());
- auto swCursor = DBClientCursor::fromAggregationRequest(
- &client, swAggReq.getValue(), /*secondaryOk*/ false, /*useExhaust*/ false);
- ASSERT_OK(swCursor.getStatus());
-
- auto cursor = std::move(swCursor.getValue());
- int resCnt = 0;
- bool errorOccurred = false;
- try {
- while (cursor->more()) {
- auto doc = cursor->next();
- ASSERT_BSONOBJ_EQ(doc, srcDocs[resCnt]);
- ++resCnt;
- }
- } catch (const DBException& ex) {
- errorOccurred = true;
- ASSERT_NE(ex.code(), ErrorCodes::OK);
- }
- ASSERT_TRUE(errorOccurred);
-
- // The second explain request. This verifies that virtual collections are cleaned up after
- // the getMore request for the aggregation results fails.
- verifyExplainAggCommand(client, aggCmdObj);
-}
-
TEST_F(ExternalDataSourceCommandsTest, KillCursorAfterAggRequest) {
// This 'nDocs' causes a cursor to be created for a simple scan aggregate command.
const auto nDocs = _random.nextInt32(100) + 102;
diff --git a/src/mongo/db/commands/index_filter_commands.cpp b/src/mongo/db/commands/index_filter_commands.cpp
index 423014c0c04..79e9b8a18da 100644
--- a/src/mongo/db/commands/index_filter_commands.cpp
+++ b/src/mongo/db/commands/index_filter_commands.cpp
@@ -203,12 +203,9 @@ Status ClearFilters::runIndexFilterCommand(OperationContext* opCtx,
invariant(querySettings);
PlanCache* planCacheClassic = CollectionQueryInfo::get(collection).getPlanCache();
- sbe::PlanCache* planCacheSBE = nullptr;
invariant(planCacheClassic);
-
- if (feature_flags::gFeatureFlagSbeFull.isEnabledAndIgnoreFCV()) {
- planCacheSBE = &sbe::getPlanCache(opCtx);
- }
+ sbe::PlanCache* planCacheSBE = &sbe::getPlanCache(opCtx);
+ invariant(planCacheSBE);
return clear(opCtx, collection, cmdObj, querySettings, planCacheClassic, planCacheSBE);
}
@@ -327,12 +324,9 @@ Status SetFilter::runIndexFilterCommand(OperationContext* opCtx,
invariant(querySettings);
PlanCache* planCacheClassic = CollectionQueryInfo::get(collection).getPlanCache();
- sbe::PlanCache* planCacheSBE = nullptr;
invariant(planCacheClassic);
-
- if (feature_flags::gFeatureFlagSbeFull.isEnabledAndIgnoreFCV()) {
- planCacheSBE = &sbe::getPlanCache(opCtx);
- }
+ sbe::PlanCache* planCacheSBE = &sbe::getPlanCache(opCtx);
+ invariant(planCacheSBE);
return set(opCtx, collection, cmdObj, querySettings, planCacheClassic, planCacheSBE);
}
diff --git a/src/mongo/db/commands/plan_cache_clear_command.cpp b/src/mongo/db/commands/plan_cache_clear_command.cpp
index 88b3c9ef07d..9e640a5863e 100644
--- a/src/mongo/db/commands/plan_cache_clear_command.cpp
+++ b/src/mongo/db/commands/plan_cache_clear_command.cpp
@@ -90,11 +90,8 @@ Status clear(OperationContext* opCtx,
canonical_query_encoder::encodeForPlanCacheCommand(*cq))};
plan_cache_commands::removePlanCacheEntriesByPlanCacheCommandKeys(planCacheCommandKeys,
planCache);
-
- if (feature_flags::gFeatureFlagSbeFull.isEnabledAndIgnoreFCV()) {
- plan_cache_commands::removePlanCacheEntriesByPlanCacheCommandKeys(
- planCacheCommandKeys, collection->uuid(), &sbe::getPlanCache(opCtx));
- }
+ plan_cache_commands::removePlanCacheEntriesByPlanCacheCommandKeys(
+ planCacheCommandKeys, collection->uuid(), &sbe::getPlanCache(opCtx));
return Status::OK();
}
@@ -109,13 +106,11 @@ Status clear(OperationContext* opCtx,
planCache->clear();
- if (feature_flags::gFeatureFlagSbeFull.isEnabledAndIgnoreFCV()) {
- auto version = CollectionQueryInfo::get(collection).getPlanCacheInvalidatorVersion();
- sbe::clearPlanCacheEntriesWith(opCtx->getServiceContext(),
- collection->uuid(),
- version,
- false /*matchSecondaryCollections*/);
- }
+ auto version = CollectionQueryInfo::get(collection).getPlanCacheInvalidatorVersion();
+ sbe::clearPlanCacheEntriesWith(opCtx->getServiceContext(),
+ collection->uuid(),
+ version,
+ false /*matchSecondaryCollections*/);
LOGV2_DEBUG(
23908, 1, "{namespace}: Cleared plan cache", "Cleared plan cache", "namespace"_attr = ns);
diff --git a/src/mongo/db/exec/plan_cache_util.cpp b/src/mongo/db/exec/plan_cache_util.cpp
index 265eb1e70e7..f223afc79f9 100644
--- a/src/mongo/db/exec/plan_cache_util.cpp
+++ b/src/mongo/db/exec/plan_cache_util.cpp
@@ -76,8 +76,7 @@ void updatePlanCache(OperationContext* opCtx,
const stage_builder::PlanStageData& data) {
// TODO SERVER-67576: re-enable caching of "explode for sort" plans in the SBE cache.
if (shouldCacheQuery(query) && collections.getMainCollection() &&
- !solution.hasExplodedForSort &&
- feature_flags::gFeatureFlagSbeFull.isEnabledAndIgnoreFCV()) {
+ !solution.hasExplodedForSort) {
auto key = plan_cache_key_factory::make(query, collections);
auto plan = std::make_unique<sbe::CachedSbePlan>(root.clone(), data);
plan->indexFilterApplied = solution.indexFilterApplied;
diff --git a/src/mongo/db/exec/plan_cache_util.h b/src/mongo/db/exec/plan_cache_util.h
index b233338882f..05b8e3d6a36 100644
--- a/src/mongo/db/exec/plan_cache_util.h
+++ b/src/mongo/db/exec/plan_cache_util.h
@@ -200,35 +200,31 @@ void updatePlanCache(
if (winningPlan.solution->cacheData != nullptr) {
if constexpr (std::is_same_v<PlanStageType, std::unique_ptr<sbe::PlanStage>>) {
- if (feature_flags::gFeatureFlagSbeFull.isEnabledAndIgnoreFCV()) {
- tassert(6142201,
- "The winning CandidatePlan should contain the original plan",
- winningPlan.clonedPlan);
- // Clone the winning SBE plan and its auxiliary data.
- auto cachedPlan = std::make_unique<sbe::CachedSbePlan>(
- std::move(winningPlan.clonedPlan->first),
- std::move(winningPlan.clonedPlan->second));
- cachedPlan->indexFilterApplied = winningPlan.solution->indexFilterApplied;
-
- auto buildDebugInfoFn = [soln = winningPlan.solution.get()]()
- -> plan_cache_debug_info::DebugInfoSBE { return buildDebugInfo(soln); };
- PlanCacheCallbacksImpl<sbe::PlanCacheKey,
- sbe::CachedSbePlan,
- plan_cache_debug_info::DebugInfoSBE>
- callbacks{query, buildDebugInfoFn};
- uassertStatusOK(sbe::getPlanCache(opCtx).set(
- plan_cache_key_factory::make(query, collections),
- std::move(cachedPlan),
- *rankingDecision,
- opCtx->getServiceContext()->getPreciseClockSource()->now(),
- &callbacks,
- boost::none /* worksGrowthCoefficient */));
- } else {
- // Fall back to use the classic plan cache.
- //
- // TODO SERVER-64882: Remove this branch after "gFeatureFlagSbeFull" is removed.
- cacheClassicPlan();
- }
+ tassert(6142201,
+ "The winning CandidatePlan should contain the original plan",
+ winningPlan.clonedPlan);
+
+ // Clone the winning SBE plan and its auxiliary data.
+ auto cachedPlan =
+ std::make_unique<sbe::CachedSbePlan>(std::move(winningPlan.clonedPlan->first),
+ std::move(winningPlan.clonedPlan->second));
+ cachedPlan->indexFilterApplied = winningPlan.solution->indexFilterApplied;
+
+ auto buildDebugInfoFn =
+ [soln = winningPlan.solution.get()]() -> plan_cache_debug_info::DebugInfoSBE {
+ return buildDebugInfo(soln);
+ };
+ PlanCacheCallbacksImpl<sbe::PlanCacheKey,
+ sbe::CachedSbePlan,
+ plan_cache_debug_info::DebugInfoSBE>
+ callbacks{query, buildDebugInfoFn};
+ uassertStatusOK(sbe::getPlanCache(opCtx).set(
+ plan_cache_key_factory::make(query, collections),
+ std::move(cachedPlan),
+ *rankingDecision,
+ opCtx->getServiceContext()->getPreciseClockSource()->now(),
+ &callbacks,
+ boost::none /* worksGrowthCoefficient */));
} else {
static_assert(std::is_same_v<PlanStageType, PlanStage*>);
cacheClassicPlan();
diff --git a/src/mongo/db/exec/sbe/expressions/expression.cpp b/src/mongo/db/exec/sbe/expressions/expression.cpp
index 1c44cdb8585..120164e76fd 100644
--- a/src/mongo/db/exec/sbe/expressions/expression.cpp
+++ b/src/mongo/db/exec/sbe/expressions/expression.cpp
@@ -933,21 +933,6 @@ vm::CodeFragment generateTraverseCellTypes(CompileCtx& ctx,
return generatorLegacy<&vm::CodeFragment::appendTraverseCellTypes>(ctx, nodes, false);
}
-vm::CodeFragment generateClassicMatcher(CompileCtx& ctx, const EExpression::Vector& nodes, bool) {
- tassert(6681400,
- "First argument to applyClassicMatcher must be constant",
- nodes[0]->as<EConstant>());
- auto [matcherTag, matcherVal] = nodes[0]->as<EConstant>()->getConstant();
- tassert(6681409,
- "First argument to applyClassicMatcher must be a classic matcher",
- matcherTag == value::TypeTags::classicMatchExpresion);
-
- vm::CodeFragment code;
- code.append(nodes[1]->compileDirect(ctx));
- code.appendApplyClassicMatcher(value::getClassicMatchExpressionView(matcherVal));
- return code;
-}
-
/**
* The map of functions that resolve directly to instructions.
*/
@@ -986,7 +971,6 @@ static stdx::unordered_map<std::string, InstrFn> kInstrFunctions = {
{"isMinKey", InstrFn{1, generator<1, &vm::CodeFragment::appendIsMinKey>, false}},
{"isMaxKey", InstrFn{1, generator<1, &vm::CodeFragment::appendIsMaxKey>, false}},
{"isTimestamp", InstrFn{1, generator<1, &vm::CodeFragment::appendIsTimestamp>, false}},
- {"applyClassicMatcher", InstrFn{2, generateClassicMatcher, false}},
};
} // namespace
diff --git a/src/mongo/db/exec/sbe/values/value.cpp b/src/mongo/db/exec/sbe/values/value.cpp
index e21bc694784..a24883a7be9 100644
--- a/src/mongo/db/exec/sbe/values/value.cpp
+++ b/src/mongo/db/exec/sbe/values/value.cpp
@@ -348,9 +348,6 @@ void releaseValueDeep(TypeTags tag, Value val) noexcept {
case TypeTags::indexBounds:
delete getIndexBoundsView(val);
break;
- case TypeTags::classicMatchExpresion:
- delete getClassicMatchExpressionView(val);
- break;
default:
break;
}
diff --git a/src/mongo/db/exec/sbe/values/value.h b/src/mongo/db/exec/sbe/values/value.h
index 0459118ddf1..88cf3d3b011 100644
--- a/src/mongo/db/exec/sbe/values/value.h
+++ b/src/mongo/db/exec/sbe/values/value.h
@@ -191,9 +191,6 @@ enum class TypeTags : uint8_t {
// Pointer to a IndexBounds object.
indexBounds,
-
- // Pointer to a classic engine match expression.
- classicMatchExpresion,
};
inline constexpr bool isNumber(TypeTags tag) noexcept {
@@ -1261,10 +1258,6 @@ inline IndexBounds* getIndexBoundsView(Value val) noexcept {
return reinterpret_cast<IndexBounds*>(val);
}
-inline MatchExpression* getClassicMatchExpressionView(Value val) noexcept {
- return reinterpret_cast<MatchExpression*>(val);
-}
-
inline sbe::value::CsiCell* getCsiCellView(Value val) noexcept {
return reinterpret_cast<sbe::value::CsiCell*>(val);
}
@@ -1479,12 +1472,6 @@ inline std::pair<TypeTags, Value> copyValue(TypeTags tag, Value val) {
return makeCopyCollator(*getCollatorView(val));
case TypeTags::indexBounds:
return makeCopyIndexBounds(*getIndexBoundsView(val));
- case TypeTags::classicMatchExpresion:
- // Beware: "shallow cloning" a match expression does not copy the underlying BSON. The
- // original BSON must remain alive for both the original MatchExpression and the clone.
- return {TypeTags::classicMatchExpresion,
- bitcastFrom<const MatchExpression*>(
- getClassicMatchExpressionView(val)->shallowClone().release())};
default:
break;
}
diff --git a/src/mongo/db/exec/sbe/values/value_printer.cpp b/src/mongo/db/exec/sbe/values/value_printer.cpp
index 4cd70a9196b..2405f698f3f 100644
--- a/src/mongo/db/exec/sbe/values/value_printer.cpp
+++ b/src/mongo/db/exec/sbe/values/value_printer.cpp
@@ -163,9 +163,6 @@ void ValuePrinter<T>::writeTagToStream(TypeTags tag) {
case TypeTags::indexBounds:
stream << "indexBounds";
break;
- case TypeTags::classicMatchExpresion:
- stream << "classicMatchExpression";
- break;
case TypeTags::csiCell:
stream << "csiCell";
break;
@@ -539,9 +536,6 @@ void ValuePrinter<T>::writeValueToStream(TypeTags tag, Value val, size_t depth)
getIndexBoundsView(val)->toString(true /* hasNonSimpleCollation */));
stream << ")";
break;
- case TypeTags::classicMatchExpresion:
- stream << "ClassicMatcher(" << getClassicMatchExpressionView(val)->toString() << ")";
- break;
case TypeTags::csiCell:
stream << "CsiCell(" << getCsiCellView(val) << ")";
break;
diff --git a/src/mongo/db/exec/sbe/vm/vm.cpp b/src/mongo/db/exec/sbe/vm/vm.cpp
index 3a8e48daf0a..35dab757964 100644
--- a/src/mongo/db/exec/sbe/vm/vm.cpp
+++ b/src/mongo/db/exec/sbe/vm/vm.cpp
@@ -166,7 +166,6 @@ int Instruction::stackOffset[Instruction::Tags::lastInstruction] = {
-1, // fail
- 0, // applyClassicMatcher
0, // dateTruncImm
};
@@ -482,18 +481,6 @@ void CodeFragment::appendNumericConvert(value::TypeTags targetTag) {
adjustStackSimple(i);
}
-void CodeFragment::appendApplyClassicMatcher(const MatchExpression* matcher) {
- Instruction i;
- i.tag = Instruction::applyClassicMatcher;
-
- auto offset = allocateSpace(sizeof(Instruction) + sizeof(matcher));
-
- offset += writeToMemory(offset, i);
- offset += writeToMemory(offset, matcher);
-
- adjustStackSimple(i);
-}
-
void CodeFragment::appendSub(Instruction::Parameter lhs, Instruction::Parameter rhs) {
appendSimpleInstruction(Instruction::sub, lhs, rhs);
}
@@ -5750,28 +5737,6 @@ MONGO_COMPILER_NORETURN void ByteCode::runFailInstruction() {
uasserted(code, message);
}
-
-void ByteCode::runClassicMatcher(const MatchExpression* matcher) {
- auto [ownedObj, tagObj, valObj] = getFromStack(0);
-
- BSONObj bsonObjForMatching;
- if (tagObj == value::TypeTags::Object) {
- BSONObjBuilder builder;
- sbe::bson::convertToBsonObj(builder, sbe::value::getObjectView(valObj));
- bsonObjForMatching = builder.obj();
- } else if (tagObj == value::TypeTags::bsonObject) {
- auto bson = value::getRawPointerView(valObj);
- bsonObjForMatching = BSONObj(bson);
- } else {
- MONGO_UNREACHABLE_TASSERT(6681402);
- }
-
- bool res = matcher->matchesBSON(bsonObjForMatching);
- if (ownedObj) {
- value::releaseValue(tagObj, valObj);
- }
- topStack(false, value::TypeTags::Boolean, value::bitcastFrom<bool>(res));
-}
template <typename T>
void ByteCode::runTagCheck(const uint8_t*& pcPointer, T&& predicate) {
auto [popParam, offsetParam] = decodeParam(pcPointer);
@@ -6782,13 +6747,6 @@ void ByteCode::runInternal(const CodeFragment* code, int64_t position) {
runFailInstruction();
break;
}
- case Instruction::applyClassicMatcher: {
- const auto* matcher = readFromMemory<const MatchExpression*>(pcPointer);
- pcPointer += sizeof(matcher);
-
- runClassicMatcher(matcher);
- break;
- }
case Instruction::dateTruncImm: {
auto unit = readFromMemory<TimeUnit>(pcPointer);
pcPointer += sizeof(unit);
diff --git a/src/mongo/db/exec/sbe/vm/vm.h b/src/mongo/db/exec/sbe/vm/vm.h
index 3fc5c173df4..87de6331f31 100644
--- a/src/mongo/db/exec/sbe/vm/vm.h
+++ b/src/mongo/db/exec/sbe/vm/vm.h
@@ -349,8 +349,6 @@ struct Instruction {
fail,
- applyClassicMatcher, // Instruction which calls into the classic engine MatchExpression.
-
dateTruncImm,
lastInstruction // this is just a marker used to calculate number of instructions
@@ -577,8 +575,6 @@ struct Instruction {
return "allocStack";
case fail:
return "fail";
- case applyClassicMatcher:
- return "applyClassicMatcher";
case dateTruncImm:
return "dateTruncImm";
default:
@@ -890,7 +886,6 @@ public:
void appendAllocStack(uint32_t size);
void appendFail();
void appendNumericConvert(value::TypeTags targetTag);
- void appendApplyClassicMatcher(const MatchExpression*);
// For printing from an interactive debugger.
std::string toString() const;
@@ -994,7 +989,6 @@ private:
void runLambdaInternal(const CodeFragment* code, int64_t position);
MONGO_COMPILER_NORETURN void runFailInstruction();
- void runClassicMatcher(const MatchExpression* matcher);
template <typename T>
void runTagCheck(const uint8_t*& pcPointer, T&& predicate);
diff --git a/src/mongo/db/exec/sbe/vm/vm_printer.cpp b/src/mongo/db/exec/sbe/vm/vm_printer.cpp
index 8ac6fe532c2..85c59dc9957 100644
--- a/src/mongo/db/exec/sbe/vm/vm_printer.cpp
+++ b/src/mongo/db/exec/sbe/vm/vm_printer.cpp
@@ -49,10 +49,6 @@ struct CodeFragmentFormatter<CodeFragmentPrinter::PrintFormat::Debug> {
return SlotAccessorFmt{accessor};
}
- auto matchExpression(const MatchExpression* matcher) {
- return MatchExpressionFmt{matcher};
- }
-
struct PcPointerFmt {
const uint8_t* pcPointer;
};
@@ -60,10 +56,6 @@ struct CodeFragmentFormatter<CodeFragmentPrinter::PrintFormat::Debug> {
struct SlotAccessorFmt {
value::SlotAccessor* accessor;
};
-
- struct MatchExpressionFmt {
- const MatchExpression* matcher;
- };
};
template <typename charT, typename traits>
@@ -80,13 +72,6 @@ std::basic_ostream<charT, traits>& operator<<(
return os << static_cast<const void*>(a.accessor);
}
-template <typename charT, typename traits>
-std::basic_ostream<charT, traits>& operator<<(
- std::basic_ostream<charT, traits>& os,
- const CodeFragmentFormatter<CodeFragmentPrinter::PrintFormat::Debug>::MatchExpressionFmt& a) {
- return os << static_cast<const void*>(a.matcher);
-}
-
template <>
struct CodeFragmentFormatter<CodeFragmentPrinter::PrintFormat::Stable> {
CodeFragmentFormatter(const CodeFragment& code) : code(code) {}
@@ -99,10 +84,6 @@ struct CodeFragmentFormatter<CodeFragmentPrinter::PrintFormat::Stable> {
return SlotAccessorFmt{accessor};
}
- auto matchExpression(const MatchExpression* matcher) {
- return MatchExpressionFmt{matcher};
- }
-
struct PcPointerFmt {
const uint8_t* pcPointer;
const uint8_t* pcBegin;
@@ -112,10 +93,6 @@ struct CodeFragmentFormatter<CodeFragmentPrinter::PrintFormat::Stable> {
value::SlotAccessor* accessor;
};
- struct MatchExpressionFmt {
- const MatchExpression* matcher;
- };
-
const CodeFragment& code;
};
@@ -137,13 +114,6 @@ std::basic_ostream<charT, traits>& operator<<(
return os << "<accessor>";
}
-template <typename charT, typename traits>
-std::basic_ostream<charT, traits>& operator<<(
- std::basic_ostream<charT, traits>& os,
- const CodeFragmentFormatter<CodeFragmentPrinter::PrintFormat::Stable>::MatchExpressionFmt& a) {
- return os << "<matchExpression>";
-}
-
template <typename Formatter>
class CodeFragmentPrinterImpl {
public:
@@ -328,12 +298,6 @@ public:
os << "accessor: " << _formatter.slotAccessor(accessor);
break;
}
- case Instruction::applyClassicMatcher: {
- const auto* matcher = readFromMemory<const MatchExpression*>(pcPointer);
- pcPointer += sizeof(matcher);
- os << "matcher: " << _formatter.matchExpression(matcher);
- break;
- }
case Instruction::numConvert: {
auto tag = readFromMemory<value::TypeTags>(pcPointer);
pcPointer += sizeof(tag);
diff --git a/src/mongo/db/pipeline/process_interface/common_mongod_process_interface.cpp b/src/mongo/db/pipeline/process_interface/common_mongod_process_interface.cpp
index 1ce2581a1e7..471727946c4 100644
--- a/src/mongo/db/pipeline/process_interface/common_mongod_process_interface.cpp
+++ b/src/mongo/db/pipeline/process_interface/common_mongod_process_interface.cpp
@@ -569,22 +569,21 @@ std::vector<BSONObj> CommonMongodProcessInterface::getMatchingPlanCacheEntryStat
auto planCacheEntries =
planCache->getMatchingStats({} /* cacheKeyFilterFunc */, serializer, predicate);
- if (feature_flags::gFeatureFlagSbeFull.isEnabledAndIgnoreFCV()) {
- // Retrieve plan cache entries from the SBE plan cache.
- const auto cacheKeyFilter = [uuid = collection->uuid(),
- collVersion = collQueryInfo.getPlanCacheInvalidatorVersion()](
- const sbe::PlanCacheKey& key) {
- // Only fetch plan cache entries with keys matching given UUID and collectionVersion.
- return uuid == key.getMainCollectionState().uuid &&
- collVersion == key.getMainCollectionState().version;
- };
-
- auto planCacheEntriesSBE =
- sbe::getPlanCache(opCtx).getMatchingStats(cacheKeyFilter, serializer, predicate);
-
- planCacheEntries.insert(
- planCacheEntries.end(), planCacheEntriesSBE.begin(), planCacheEntriesSBE.end());
- }
+ // Retrieve plan cache entries from the SBE plan cache.
+ const auto cacheKeyFilter = [uuid = collection->uuid(),
+ collVersion = collQueryInfo.getPlanCacheInvalidatorVersion()](
+ const sbe::PlanCacheKey& key) {
+ // Only fetch plan cache entries with keys matching given UUID and collectionVersion.
+ return uuid == key.getMainCollectionState().uuid &&
+ collVersion == key.getMainCollectionState().version;
+ };
+
+ auto planCacheEntriesSBE =
+ sbe::getPlanCache(opCtx).getMatchingStats(cacheKeyFilter, serializer, predicate);
+
+ planCacheEntries.insert(
+ planCacheEntries.end(), planCacheEntriesSBE.begin(), planCacheEntriesSBE.end());
+
return planCacheEntries;
}
diff --git a/src/mongo/db/query/canonical_query.cpp b/src/mongo/db/query/canonical_query.cpp
index 516e33fe15a..45fc3ef9c41 100644
--- a/src/mongo/db/query/canonical_query.cpp
+++ b/src/mongo/db/query/canonical_query.cpp
@@ -208,8 +208,7 @@ Status CanonicalQuery::init(OperationContext* opCtx,
_root = MatchExpression::normalize(std::move(root));
// If caching is disabled, do not perform any autoparameterization.
- if (!internalQueryDisablePlanCache.load() &&
- feature_flags::gFeatureFlagSbeFull.isEnabledAndIgnoreFCV()) {
+ if (!internalQueryDisablePlanCache.load()) {
const bool hasNoTextNodes =
!QueryPlannerCommon::hasNode(_root.get(), MatchExpression::TEXT);
if (hasNoTextNodes) {
@@ -548,10 +547,8 @@ std::string CanonicalQuery::toStringShort() const {
}
CanonicalQuery::QueryShapeString CanonicalQuery::encodeKey() const {
- return (feature_flags::gFeatureFlagSbeFull.isEnabledAndIgnoreFCV() && !_forceClassicEngine &&
- _sbeCompatible)
- ? canonical_query_encoder::encodeSBE(*this)
- : canonical_query_encoder::encode(*this);
+ return (!_forceClassicEngine && _sbeCompatible) ? canonical_query_encoder::encodeSBE(*this)
+ : canonical_query_encoder::encode(*this);
}
CanonicalQuery::QueryShapeString CanonicalQuery::encodeKeyForPlanCacheCommand() const {
diff --git a/src/mongo/db/query/canonical_query_encoder.cpp b/src/mongo/db/query/canonical_query_encoder.cpp
index 93036c4feda..4313b6efc2b 100644
--- a/src/mongo/db/query/canonical_query_encoder.cpp
+++ b/src/mongo/db/query/canonical_query_encoder.cpp
@@ -44,7 +44,6 @@
#include "mongo/db/pipeline/document_source_lookup.h"
#include "mongo/db/query/analyze_regex.h"
#include "mongo/db/query/projection.h"
-#include "mongo/db/query/query_feature_flags_gen.h"
#include "mongo/db/query/query_knobs_gen.h"
#include "mongo/db/query/tree_walker.h"
#include "mongo/logv2/log.h"
@@ -1089,9 +1088,6 @@ void encodeKeyForAutoParameterizedMatchSBE(MatchExpression* matchExpr, BufBuilde
} // namespace
std::string encodeSBE(const CanonicalQuery& cq) {
- tassert(6512900,
- "using the SBE plan cache key encoding requires SBE to be fully enabled",
- feature_flags::gFeatureFlagSbeFull.isEnabledAndIgnoreFCV());
tassert(6142104,
"attempting to encode SBE plan cache key for SBE-incompatible query",
cq.isSbeCompatible());
diff --git a/src/mongo/db/query/canonical_query_encoder_test.cpp b/src/mongo/db/query/canonical_query_encoder_test.cpp
index 4987272cf80..12593f56490 100644
--- a/src/mongo/db/query/canonical_query_encoder_test.cpp
+++ b/src/mongo/db/query/canonical_query_encoder_test.cpp
@@ -427,9 +427,6 @@ TEST(CanonicalQueryEncoderTest, ComputeKeySBE) {
// SBE must be enabled in order to generate SBE plan cache keys.
RAIIServerParameterControllerForTest controllerSBE("internalQueryFrameworkControl",
"trySbeEngine");
-
- RAIIServerParameterControllerForTest controllerSBEPlanCache("featureFlagSbeFull", true);
-
testComputeSBEKey(gctx, "{}", "{}", "{}");
testComputeSBEKey(gctx, "{$or: [{a: 1}, {b: 2}]}", "{}", "{}");
testComputeSBEKey(gctx, "{a: 1}", "{}", "{}");
@@ -502,7 +499,6 @@ TEST(CanonicalQueryEncoderTest, ComputeKeySBEWithPipeline) {
RAIIServerParameterControllerForTest controllerSBE("internalQueryFrameworkControl",
"trySbeEngine");
- RAIIServerParameterControllerForTest controllerSBEPlanCache("featureFlagSbeFull", true);
auto getLookupBson = [](StringData localField, StringData foreignField, StringData asField) {
return BSON("$lookup" << BSON("from" << foreignNss.coll() << "localField" << localField
@@ -532,7 +528,6 @@ TEST(CanonicalQueryEncoderTest, ComputeKeySBEWithReadConcern) {
// SBE must be enabled in order to generate SBE plan cache keys.
RAIIServerParameterControllerForTest controllerSBE("internalQueryFrameworkControl",
"trySbeEngine");
- RAIIServerParameterControllerForTest controllerSBEPlanCache("featureFlagSbeFull", true);
// Find command without read concern.
auto findCommand = std::make_unique<FindCommandRequest>(nss);
diff --git a/src/mongo/db/query/canonical_query_test.cpp b/src/mongo/db/query/canonical_query_test.cpp
index a15a3b918b0..2fb5614fd16 100644
--- a/src/mongo/db/query/canonical_query_test.cpp
+++ b/src/mongo/db/query/canonical_query_test.cpp
@@ -456,7 +456,6 @@ TEST(CanonicalQueryTest, InvalidSortOrdersFailToCanonicalize) {
}
TEST(CanonicalQueryTest, DoNotParameterizeTextExpressions) {
- RAIIServerParameterControllerForTest controllerSBEPlanCache("featureFlagSbeFull", true);
auto cq =
canonicalize("{$text: {$search: \"Hello World!\"}}",
MatchExpressionParser::kDefaultSpecialFeatures | MatchExpressionParser::kText);
@@ -464,7 +463,6 @@ TEST(CanonicalQueryTest, DoNotParameterizeTextExpressions) {
}
TEST(CanonicalQueryTest, DoParameterizeRegularExpressions) {
- RAIIServerParameterControllerForTest controllerSBEPlanCache("featureFlagSbeFull", true);
auto cq = canonicalize("{a: 1, b: {$lt: 5}}");
ASSERT_TRUE(cq->isParameterized());
}
diff --git a/src/mongo/db/query/classic_plan_cache.cpp b/src/mongo/db/query/classic_plan_cache.cpp
index 7789d894cb5..41874a76d8a 100644
--- a/src/mongo/db/query/classic_plan_cache.cpp
+++ b/src/mongo/db/query/classic_plan_cache.cpp
@@ -130,8 +130,7 @@ bool shouldCacheQuery(const CanonicalQuery& query) {
const MatchExpression* expr = query.root();
if (!query.getSortPattern() && expr->matchType() == MatchExpression::AND &&
- expr->numChildren() == 0 &&
- !(feature_flags::gFeatureFlagSbeFull.isEnabledAndIgnoreFCV() && query.isSbeCompatible())) {
+ expr->numChildren() == 0 && !query.isSbeCompatible()) {
return false;
}
diff --git a/src/mongo/db/query/explain.cpp b/src/mongo/db/query/explain.cpp
index 545ec8553f4..b9b69eac18d 100644
--- a/src/mongo/db/query/explain.cpp
+++ b/src/mongo/db/query/explain.cpp
@@ -98,7 +98,6 @@ void generatePlannerInfo(PlanExecutor* exec,
const QuerySettings* querySettings =
QuerySettingsDecoration::get(mainCollection->getSharedDecorations());
if (exec->getCanonicalQuery()->isSbeCompatible() &&
- feature_flags::gFeatureFlagSbeFull.isEnabledAndIgnoreFCV() &&
!exec->getCanonicalQuery()->getForceClassicEngine()) {
const auto planCacheKeyInfo =
plan_cache_key_factory::make(*exec->getCanonicalQuery(), collections);
diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp
index b3a28aef907..acc8c2e528f 100644
--- a/src/mongo/db/query/get_executor.cpp
+++ b/src/mongo/db/query/get_executor.cpp
@@ -666,7 +666,7 @@ public:
_fromPlanCache = val;
}
- bool isRecoveredFromPlanCache() {
+ bool isRecoveredFromPlanCache() const {
return _fromPlanCache;
}
@@ -1148,64 +1148,25 @@ protected:
std::unique_ptr<SlotBasedPrepareExecutionResult> buildCachedPlan(
const sbe::PlanCacheKey& planCacheKey) final {
if (shouldCacheQuery(*_cq)) {
- if (!feature_flags::gFeatureFlagSbeFull.isEnabledAndIgnoreFCV()) {
- return buildCachedPlanFromClassicCache();
- } else {
- getResult()->planCacheInfo().planCacheKey = planCacheKey.planCacheKeyHash();
-
- auto&& planCache = sbe::getPlanCache(_opCtx);
- auto cacheEntry = planCache.getCacheEntryIfActive(planCacheKey);
- if (!cacheEntry) {
- return nullptr;
- }
+ getResult()->planCacheInfo().planCacheKey = planCacheKey.planCacheKeyHash();
- auto&& cachedPlan = std::move(cacheEntry->cachedPlan);
- auto root = std::move(cachedPlan->root);
- auto stageData = std::move(cachedPlan->planStageData);
- stageData.debugInfo = cacheEntry->debugInfo;
-
- auto result = releaseResult();
- result->setDecisionWorks(cacheEntry->decisionWorks);
- result->setRecoveredPinnedCacheEntry(cacheEntry->isPinned());
- result->emplace(std::make_pair(std::move(root), std::move(stageData)));
- result->setRecoveredFromPlanCache(true);
- return result;
+ auto&& planCache = sbe::getPlanCache(_opCtx);
+ auto cacheEntry = planCache.getCacheEntryIfActive(planCacheKey);
+ if (!cacheEntry) {
+ return nullptr;
}
- }
- return nullptr;
- }
-
- // A temporary function to allow recovering SBE plans from the classic plan cache. When the
- // feature flag for "SBE full" is disabled, we are still able to use the classic plan cache for
- // queries that execute in SBE.
- //
- // TODO SERVER-64882: Remove this function when "featureFlagSbeFull" is removed.
- std::unique_ptr<SlotBasedPrepareExecutionResult> buildCachedPlanFromClassicCache() {
- const auto& mainColl = getMainCollection();
- auto planCacheKey = plan_cache_key_factory::make<PlanCacheKey>(*_cq, mainColl);
- getResult()->planCacheInfo().planCacheKey = planCacheKey.planCacheKeyHash();
-
- // Try to look up a cached solution for the query.
- if (auto cs = CollectionQueryInfo::get(mainColl).getPlanCache()->getCacheEntryIfActive(
- planCacheKey)) {
- initializePlannerParamsIfNeeded();
- // We have a CachedSolution. Have the planner turn it into a QuerySolution.
- auto statusWithQs = QueryPlanner::planFromCache(*_cq, _plannerParams, *cs);
-
- if (statusWithQs.isOK()) {
- auto querySolution = std::move(statusWithQs.getValue());
- if (_cq->isCountLike() && turnIxscanIntoCount(querySolution.get())) {
- LOGV2_DEBUG(
- 20923, 2, "Using fast count", "query"_attr = redact(_cq->toStringShort()));
- }
+ auto&& cachedPlan = std::move(cacheEntry->cachedPlan);
+ auto root = std::move(cachedPlan->root);
+ auto stageData = std::move(cachedPlan->planStageData);
+ stageData.debugInfo = cacheEntry->debugInfo;
- auto result = releaseResult();
- addSolutionToResult(result.get(), std::move(querySolution));
- result->setDecisionWorks(cs->decisionWorks);
- result->setRecoveredFromPlanCache(true);
- return result;
- }
+ auto result = releaseResult();
+ result->setDecisionWorks(cacheEntry->decisionWorks);
+ result->setRecoveredPinnedCacheEntry(cacheEntry->isPinned());
+ result->emplace(std::make_pair(std::move(root), std::move(stageData)));
+ result->setRecoveredFromPlanCache(true);
+ return result;
}
return nullptr;
@@ -1429,56 +1390,52 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getSlotBasedExe
}
/**
- * Checks if the result of query planning is SBE compatible.
+ * Checks if the result of query planning is SBE compatible. In this function, 'sbeFull' indicates
+ * whether the full set of features supported by SBE is enabled, while 'canUseRegularSbe' indicates
+ * whether the query is compatible with the subset of SBE enabled by default.
*/
bool shouldPlanningResultUseSbe(bool sbeFull,
+ bool canUseRegularSbe,
bool columnIndexPresent,
- bool aggSpecificStagesPushedDown,
const SlotBasedPrepareExecutionResult& planningResult) {
+ // If we have an entry in the SBE plan cache, then we can use SBE.
+ if (planningResult.isRecoveredFromPlanCache()) {
+ return true;
+ }
+
// For now this function assumes one of these is true. If all are false, we should not use
// SBE.
tassert(6164401,
- "Expected sbeFull, or a CSI present, or agg specific stages pushed down",
- sbeFull || columnIndexPresent || aggSpecificStagesPushedDown);
+ "Expected sbeFull, or a regular SBE compatiable query, or a CSI present",
+ sbeFull || canUseRegularSbe || columnIndexPresent);
const auto& solutions = planningResult.solutions();
if (solutions.empty()) {
// Query needs subplanning (plans are generated later, we don't have access yet).
invariant(planningResult.needsSubplanning());
- // TODO: SERVER-71798 if the below conditions are not met, a column index will not be used
- // even if it could be.
- return sbeFull || aggSpecificStagesPushedDown;
+ // Use SBE for rooted $or queries if SBE is fully enabled or the query is SBE compatible to
+ // begin with.
+ return sbeFull || canUseRegularSbe;
}
// Check that the query solution is SBE compatible.
const bool allStagesCompatible =
std::all_of(solutions.begin(), solutions.end(), [](const auto& solution) {
- return solution->root() ==
- nullptr /* we won't have a query solution if we pulled it from the cache */
- || isQueryPlanSbeCompatible(solution.get());
+ // We must have a solution, otherwise we would have early exited.
+ invariant(solution->root());
+ return isQueryPlanSbeCompatible(solution.get());
});
if (!allStagesCompatible) {
return false;
}
- if (sbeFull || aggSpecificStagesPushedDown) {
+ if (sbeFull || canUseRegularSbe) {
return true;
}
- // If no pipeline is pushed down and SBE full is off, the only other case we'll use SBE for
- // is when a column index plan was constructed.
- tassert(6164400, "Expected CSI to be present", columnIndexPresent);
-
- // The only time a query solution is not available is when the plan comes from the SBE plan
- // cache. The plan cache is gated by sbeFull, which was already checked earlier. So, at this
- // point we're guaranteed sbeFull is off, and this further implies that the returned plan(s)
- // did not come from the cache.
- tassert(6164402,
- "Did not expect a plan from the plan cache",
- !sbeFull && solutions.front()->root());
-
+ // Return true if we have a column scan plan, and false otherwise.
return solutions.size() == 1 &&
solutions.front()->root()->hasNode(StageType::STAGE_COLUMN_SCAN);
}
@@ -1518,6 +1475,38 @@ bool maybeQueryIsColumnScanEligible(OperationContext* opCtx,
}
/**
+ * Function which returns true if 'cq' uses features that are currently supported in SBE without
+ * 'featureFlagSbeFull' being set; false otherwise.
+ */
+bool shouldUseRegularSbe(const CanonicalQuery& cq) {
+ const auto* proj = cq.getProj();
+
+ // Disallow projections which use expressions.
+ if (proj && proj->hasExpressions()) {
+ return false;
+ }
+
+ // Disallow projections which have dotted paths.
+ if (proj && proj->hasDottedPaths()) {
+ return false;
+ }
+
+ // Disallow filters which feature $expr.
+ if (cq.countNodes(cq.root(), MatchExpression::MatchType::EXPRESSION) > 0) {
+ return false;
+ }
+
+ const auto& sortPattern = cq.getSortPattern();
+
+ // Disallow sorts which have a common prefix.
+ if (sortPattern && sortPatternHasPartsWithCommonPrefix(*sortPattern)) {
+ return false;
+ }
+
+ return true;
+}
+
+/**
* Attempts to create a slot-based executor for the query, if the query plan is eligible for SBE
* execution. This function has three possible return values:
*
@@ -1543,18 +1532,20 @@ attemptToGetSlotBasedExecutor(
}
const bool sbeFull = feature_flags::gFeatureFlagSbeFull.isEnabledAndIgnoreFCV();
- const bool aggSpecificStagesPushedDown = !canonicalQuery->pipeline().empty();
+ const bool canUseRegularSbe = shouldUseRegularSbe(*canonicalQuery);
- // Attempt to use SBE if we find any $group/$lookup stages eligible for execution in SBE, if the
- // query may be eligible for column scan, or if SBE is fully enabled. Otherwise, fallback to the
- // classic engine right away.
- if (aggSpecificStagesPushedDown || sbeFull ||
+ // Attempt to use SBE if the query may be eligible for column scan, if the currently supported
+ // subset of SBE is being used, or if SBE is fully enabled. Otherwise, fallback to the classic
+ // engine right away.
+ if (sbeFull || canUseRegularSbe ||
maybeQueryIsColumnScanEligible(opCtx, collections, canonicalQuery.get())) {
+ // Create the SBE prepare execution helper and initialize the params for the planner. Our
+ // decision about using SBE will depend on whether there is a column index present.
+
auto sbeYieldPolicy = makeSbeYieldPolicy(
opCtx, yieldPolicy, &collections.getMainCollection(), canonicalQuery->nss());
SlotBasedPrepareExecutionHelper helper{
opCtx, collections, canonicalQuery.get(), sbeYieldPolicy.get(), plannerParams.options};
-
auto planningResultWithStatus = helper.prepare();
if (!planningResultWithStatus.isOK()) {
return planningResultWithStatus.getStatus();
@@ -1563,10 +1554,8 @@ attemptToGetSlotBasedExecutor(
const bool csiPresent =
helper.plannerParams() && !helper.plannerParams()->columnStoreIndexes.empty();
- if (shouldPlanningResultUseSbe(sbeFull,
- csiPresent,
- aggSpecificStagesPushedDown,
- *planningResultWithStatus.getValue())) {
+ if (shouldPlanningResultUseSbe(
+ sbeFull, canUseRegularSbe, csiPresent, *planningResultWithStatus.getValue())) {
if (extractAndAttachPipelineStages) {
// We know now that we will use SBE, so we need to remove the pushed-down stages
// from the original pipeline object.
@@ -1585,15 +1574,12 @@ attemptToGetSlotBasedExecutor(
return statusWithExecutor.getStatus();
}
}
- // Query plan was not SBE compatible - reset any fields that may have been modified, and
- // fall back to classic engine.
- canonicalQuery->setPipeline({});
-
- // Fall through to below.
}
// Either we did not meet the criteria for attempting SBE, or we attempted query planning and
- // determined that SBE should not be used.
+ // determined that SBE should not be used. Reset any fields that may have been modified, and
+ // fall back to classic engine.
+ canonicalQuery->setPipeline({});
canonicalQuery->setSbeCompatible(false);
return std::move(canonicalQuery);
}
@@ -1646,6 +1632,8 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutor(
std::move(stdx::get<std::unique_ptr<CanonicalQuery>>(maybeExecutor));
}
}
+ // Ensure that 'sbeCompatible' is set accordingly.
+ canonicalQuery->setSbeCompatible(false);
return getClassicExecutor(
opCtx, mainColl, std::move(canonicalQuery), yieldPolicy, plannerParams);
}();
diff --git a/src/mongo/db/query/projection.h b/src/mongo/db/query/projection.h
index c093083c068..f920a778e9a 100644
--- a/src/mongo/db/query/projection.h
+++ b/src/mongo/db/query/projection.h
@@ -118,6 +118,13 @@ public:
*/
bool isFieldRetainedExactly(StringData path) const;
+
+ /**
+ * Returns true if this projection has any dotted paths; false otherwise.
+ */
+ bool hasDottedPaths() const {
+ return _deps.hasDottedPath;
+ }
/**
* A projection is considered "simple" if it operates only on top-level fields,
* has no positional projection or expressions, and doesn't require metadata.
diff --git a/src/mongo/db/query/query_feature_flags.idl b/src/mongo/db/query/query_feature_flags.idl
index f8b7335a1a4..579b2dd7977 100644
--- a/src/mongo/db/query/query_feature_flags.idl
+++ b/src/mongo/db/query/query_feature_flags.idl
@@ -77,8 +77,7 @@ feature_flags:
default: false
featureFlagSbeFull:
- description: "Feature flag for enabling full SBE support. Enables SBE for a much larger class
- of queries, including NLJ $lookup plans. Also enables the SBE plan cache."
+ description: "Feature flag for enabling SBE for a much larger class of queries than what is exposed by default"
cpp_varname: gFeatureFlagSbeFull
default: false
diff --git a/src/mongo/db/query/query_utils.cpp b/src/mongo/db/query/query_utils.cpp
index 917817c739e..71dd2acded6 100644
--- a/src/mongo/db/query/query_utils.cpp
+++ b/src/mongo/db/query/query_utils.cpp
@@ -34,6 +34,21 @@
namespace mongo {
+bool sortPatternHasPartsWithCommonPrefix(const SortPattern& sortPattern) {
+ StringDataSet prefixSet;
+ for (const auto& part : sortPattern) {
+ // Ignore any $meta sorts that may be present.
+ if (!part.fieldPath) {
+ continue;
+ }
+ auto [_, inserted] = prefixSet.insert(part.fieldPath->getFieldName(0));
+ if (!inserted) {
+ return true;
+ }
+ }
+ return false;
+}
+
bool isIdHackEligibleQuery(const CollectionPtr& collection, const CanonicalQuery& query) {
const auto& findCommand = query.getFindCommandRequest();
return !findCommand.getShowRecordId() && findCommand.getHint().isEmpty() &&
@@ -66,10 +81,11 @@ bool isQuerySbeCompatible(const CollectionPtr* collection, const CanonicalQuery*
const bool isQueryNotAgainstClusteredCollection =
!(collection->get() && collection->get()->isClustered());
- const bool doesNotRequireMatchDetails =
- !cq->getProj() || !cq->getProj()->requiresMatchDetails();
+ const auto* proj = cq->getProj();
+
+ const bool doesNotRequireMatchDetails = !proj || !proj->requiresMatchDetails();
- const bool doesNotHaveElemMatchProject = !cq->getProj() || !cq->getProj()->containsElemMatch();
+ const bool doesNotHaveElemMatchProject = !proj || !proj->containsElemMatch();
const bool isNotInnerSideOfLookup = !(expCtx && expCtx->inLookup);
diff --git a/src/mongo/db/query/query_utils.h b/src/mongo/db/query/query_utils.h
index 97165860da1..55a5e069ad3 100644
--- a/src/mongo/db/query/query_utils.h
+++ b/src/mongo/db/query/query_utils.h
@@ -34,6 +34,12 @@
namespace mongo {
/**
+ * Returns 'true' if 'sortPattern' contains any sort pattern parts that share a common prefix, false
+ * otherwise.
+ */
+bool sortPatternHasPartsWithCommonPrefix(const SortPattern& sortPattern);
+
+/**
* Returns 'true' if 'query' on the given 'collection' can be answered using a special IDHACK plan.
*/
bool isIdHackEligibleQuery(const CollectionPtr& collection, const CanonicalQuery& query);
diff --git a/src/mongo/db/query/sbe_cached_solution_planner.cpp b/src/mongo/db/query/sbe_cached_solution_planner.cpp
index 5927bf5722c..5fbe8be2ec3 100644
--- a/src/mongo/db/query/sbe_cached_solution_planner.cpp
+++ b/src/mongo/db/query/sbe_cached_solution_planner.cpp
@@ -52,45 +52,24 @@ CandidatePlans CachedSolutionPlanner::plan(
std::vector<std::unique_ptr<QuerySolution>> solutions,
std::vector<std::pair<std::unique_ptr<PlanStage>, stage_builder::PlanStageData>> roots) {
if (!_cq.pipeline().empty()) {
- // When "featureFlagSbeFull" is enabled we use the SBE plan cache. If the plan cache is
- // enabled we'd like to check if there is any foreign collection in the hash_lookup stage
- // that is no longer eligible for it. In this case we invalidate the cache and immediately
- // replan without ever running a trial period.
+ // We'd like to check if there is any foreign collection in the hash_lookup stage that is no
+ // longer eligible for using a hash_lookup plan. In this case we invalidate the cache and
+ // immediately replan without ever running a trial period.
auto secondaryCollectionsInfo =
fillOutSecondaryCollectionsInformation(_opCtx, _collections, &_cq);
- if (feature_flags::gFeatureFlagSbeFull.isEnabledAndIgnoreFCV()) {
- for (const auto& foreignCollection : roots[0].second.foreignHashJoinCollections) {
- const auto collectionInfo = secondaryCollectionsInfo.find(foreignCollection);
- tassert(6693500,
- "Foreign collection must be present in the collections info",
- collectionInfo != secondaryCollectionsInfo.end());
- tassert(6693501, "Foreign collection must exist", collectionInfo->second.exists);
+ for (const auto& foreignCollection : roots[0].second.foreignHashJoinCollections) {
+ const auto collectionInfo = secondaryCollectionsInfo.find(foreignCollection);
+ tassert(6693500,
+ "Foreign collection must be present in the collections info",
+ collectionInfo != secondaryCollectionsInfo.end());
+ tassert(6693501, "Foreign collection must exist", collectionInfo->second.exists);
- if (!QueryPlannerAnalysis::isEligibleForHashJoin(collectionInfo->second)) {
- return replan(/* shouldCache */ true,
- str::stream() << "Foreign collection " << foreignCollection
- << " is not eligible for hash join anymore");
- }
+ if (!QueryPlannerAnalysis::isEligibleForHashJoin(collectionInfo->second)) {
+ return replan(/* shouldCache */ true,
+ str::stream() << "Foreign collection " << foreignCollection
+ << " is not eligible for hash join anymore");
}
- } else {
- // The SBE plan cache is not enabled. If the cached plan is accepted we'd like to keep
- // the results from the trials even if there are parts of agg pipelines being lowered
- // into SBE, so we run the trial with the extended plan. This works because
- // TrialRunTracker, attached to HashAgg stage in $group queries, tracks as "results" the
- // results of its child stage. For $lookup queries, the TrialRunTracker will only track
- // the number of reads from the local side. Thus, we can use the number of reads the
- // plan was cached with during multiplanning even though multiplanning ran trials of
- // pre-extended plans.
- //
- // The SBE plan cache stores the entire plan, including the part for any agg pipeline
- // pushed down to SBE. Therefore, this logic is only necessary when "featureFlagSbeFull"
- // is disabled.
- _yieldPolicy->clearRegisteredPlans();
- solutions[0] = QueryPlanner::extendWithAggPipeline(
- _cq, std::move(solutions[0]), secondaryCollectionsInfo);
- roots[0] = stage_builder::buildSlotBasedExecutableTree(
- _opCtx, _collections, _cq, *solutions[0], _yieldPolicy);
}
}
// If the '_decisionReads' is not present then we do not run a trial period, keeping the current
@@ -227,18 +206,9 @@ CandidatePlans CachedSolutionPlanner::replan(bool shouldCache, std::string reaso
_yieldPolicy->clearRegisteredPlans();
if (shouldCache) {
- const auto& mainColl = _collections.getMainCollection();
// Deactivate the current cache entry.
- //
- // TODO SERVER-64882: We currently deactivate cache entries in both the classic and SBE plan
- // caches. Once we always use the SBE plan cache for queries eligible for SBE, this code can
- // be simplified to only deactivate the entry in the SBE plan cache.
- auto cache = CollectionQueryInfo::get(mainColl).getPlanCache();
- cache->deactivate(plan_cache_key_factory::make<mongo::PlanCacheKey>(_cq, mainColl));
- if (feature_flags::gFeatureFlagSbeFull.isEnabledAndIgnoreFCV()) {
- auto&& sbePlanCache = sbe::getPlanCache(_opCtx);
- sbePlanCache.deactivate(plan_cache_key_factory::make(_cq, _collections));
- }
+ auto&& sbePlanCache = sbe::getPlanCache(_opCtx);
+ sbePlanCache.deactivate(plan_cache_key_factory::make(_cq, _collections));
}
auto buildExecutableTree = [&](const QuerySolution& sol) {
diff --git a/src/mongo/db/query/sbe_plan_cache.cpp b/src/mongo/db/query/sbe_plan_cache.cpp
index 1498fa28932..2129554a2d9 100644
--- a/src/mongo/db/query/sbe_plan_cache.cpp
+++ b/src/mongo/db/query/sbe_plan_cache.cpp
@@ -48,27 +48,23 @@ const auto sbePlanCacheDecoration =
class PlanCacheOnParamChangeUpdaterImpl final : public plan_cache_util::OnParamChangeUpdater {
public:
void updateCacheSize(ServiceContext* serviceCtx, memory_util::MemorySize memSize) final {
- if (feature_flags::gFeatureFlagSbeFull.isEnabledAndIgnoreFCV()) {
- auto newSizeBytes = memory_util::getRequestedMemSizeInBytes(memSize);
- auto cappedCacheSize = memory_util::capMemorySize(newSizeBytes /*requestedSizeBytes*/,
- 500 /*maximumSizeGB*/,
- 25 /*percentTotalSystemMemory*/);
- if (cappedCacheSize < newSizeBytes) {
- LOGV2_DEBUG(6007001,
- 1,
- "The plan cache size has been capped",
- "cappedSize"_attr = cappedCacheSize);
- }
- auto& globalPlanCache = sbePlanCacheDecoration(serviceCtx);
- globalPlanCache->reset(cappedCacheSize);
+ auto newSizeBytes = memory_util::getRequestedMemSizeInBytes(memSize);
+ auto cappedCacheSize = memory_util::capMemorySize(newSizeBytes /*requestedSizeBytes*/,
+ 500 /*maximumSizeGB*/,
+ 25 /*percentTotalSystemMemory*/);
+ if (cappedCacheSize < newSizeBytes) {
+ LOGV2_DEBUG(6007001,
+ 1,
+ "The plan cache size has been capped",
+ "cappedSize"_attr = cappedCacheSize);
}
+ auto& globalPlanCache = sbePlanCacheDecoration(serviceCtx);
+ globalPlanCache->reset(cappedCacheSize);
}
void clearCache(ServiceContext* serviceCtx) final {
- if (feature_flags::gFeatureFlagSbeFull.isEnabledAndIgnoreFCV()) {
- auto& globalPlanCache = sbePlanCacheDecoration(serviceCtx);
- globalPlanCache->clear();
- }
+ auto& globalPlanCache = sbePlanCacheDecoration(serviceCtx);
+ globalPlanCache->clear();
}
};
@@ -77,38 +73,29 @@ ServiceContext::ConstructorActionRegisterer planCacheRegisterer{
plan_cache_util::sbePlanCacheOnParamChangeUpdater(serviceCtx) =
std::make_unique<PlanCacheOnParamChangeUpdaterImpl>();
- if (feature_flags::gFeatureFlagSbeFull.isEnabledAndIgnoreFCV()) {
- auto status = memory_util::MemorySize::parse(planCacheSize.get());
- uassertStatusOK(status);
- auto size = memory_util::getRequestedMemSizeInBytes(status.getValue());
- auto cappedCacheSize = memory_util::capMemorySize(size /*requestedSizeBytes*/,
- 500 /*maximumSizeGB*/,
- 25 /*percentTotalSystemMemory*/);
- if (cappedCacheSize < size) {
- LOGV2_DEBUG(6007000,
- 1,
- "The plan cache size has been capped",
- "cappedSize"_attr = cappedCacheSize);
- }
- auto& globalPlanCache = sbePlanCacheDecoration(serviceCtx);
- globalPlanCache =
- std::make_unique<sbe::PlanCache>(cappedCacheSize, ProcessInfo::getNumCores());
+ auto status = memory_util::MemorySize::parse(planCacheSize.get());
+ uassertStatusOK(status);
+ auto size = memory_util::getRequestedMemSizeInBytes(status.getValue());
+ auto cappedCacheSize = memory_util::capMemorySize(
+ size /*requestedSizeBytes*/, 500 /*maximumSizeGB*/, 25 /*percentTotalSystemMemory*/);
+ if (cappedCacheSize < size) {
+ LOGV2_DEBUG(6007000,
+ 1,
+ "The plan cache size has been capped",
+ "cappedSize"_attr = cappedCacheSize);
}
+ auto& globalPlanCache = sbePlanCacheDecoration(serviceCtx);
+ globalPlanCache =
+ std::make_unique<sbe::PlanCache>(cappedCacheSize, ProcessInfo::getNumCores());
}};
} // namespace
sbe::PlanCache& getPlanCache(ServiceContext* serviceCtx) {
- uassert(5933402,
- "Cannot getPlanCache() if 'featureFlagSbeFull' is disabled",
- feature_flags::gFeatureFlagSbeFull.isEnabledAndIgnoreFCV());
return *sbePlanCacheDecoration(serviceCtx);
}
sbe::PlanCache& getPlanCache(OperationContext* opCtx) {
- uassert(5933401,
- "Cannot getPlanCache() if 'featureFlagSbeFull' is disabled",
- feature_flags::gFeatureFlagSbeFull.isEnabledAndIgnoreFCV());
tassert(5933400, "Cannot get the global SBE plan cache by a nullptr", opCtx);
return getPlanCache(opCtx->getServiceContext());
}
@@ -117,32 +104,29 @@ void clearPlanCacheEntriesWith(ServiceContext* serviceCtx,
UUID collectionUuid,
size_t collectionVersion,
bool matchSecondaryCollections) {
- if (feature_flags::gFeatureFlagSbeFull.isEnabledAndIgnoreFCV()) {
- auto removed =
- sbe::getPlanCache(serviceCtx)
- .removeIf([&collectionUuid, collectionVersion, matchSecondaryCollections](
- const PlanCacheKey& key, const sbe::PlanCacheEntry& entry) {
- if (key.getMainCollectionState().version == collectionVersion &&
- key.getMainCollectionState().uuid == collectionUuid) {
- return true;
- }
- if (matchSecondaryCollections) {
- for (auto& collectionState : key.getSecondaryCollectionStates()) {
- if (collectionState.version == collectionVersion &&
- collectionState.uuid == collectionUuid) {
- return true;
- }
- }
- }
- return false;
- });
-
- LOGV2_DEBUG(6006600,
- 1,
- "Clearing SBE Plan Cache",
- "collectionUuid"_attr = collectionUuid,
- "collectionVersion"_attr = collectionVersion,
- "removedEntries"_attr = removed);
- }
+ auto removed = sbe::getPlanCache(serviceCtx)
+ .removeIf([&collectionUuid, collectionVersion, matchSecondaryCollections](
+ const PlanCacheKey& key, const sbe::PlanCacheEntry& entry) {
+ if (key.getMainCollectionState().version == collectionVersion &&
+ key.getMainCollectionState().uuid == collectionUuid) {
+ return true;
+ }
+ if (matchSecondaryCollections) {
+ for (auto& collectionState : key.getSecondaryCollectionStates()) {
+ if (collectionState.version == collectionVersion &&
+ collectionState.uuid == collectionUuid) {
+ return true;
+ }
+ }
+ }
+ return false;
+ });
+
+ LOGV2_DEBUG(6006600,
+ 1,
+ "Clearing SBE Plan Cache",
+ "collectionUuid"_attr = collectionUuid,
+ "collectionVersion"_attr = collectionVersion,
+ "removedEntries"_attr = removed);
}
} // namespace mongo::sbe
diff --git a/src/mongo/db/query/sbe_stage_builder.cpp b/src/mongo/db/query/sbe_stage_builder.cpp
index 9c354a469ae..8fe87acf72b 100644
--- a/src/mongo/db/query/sbe_stage_builder.cpp
+++ b/src/mongo/db/query/sbe_stage_builder.cpp
@@ -60,6 +60,8 @@
#include "mongo/db/query/bind_input_params.h"
#include "mongo/db/query/expression_walker.h"
#include "mongo/db/query/index_bounds_builder.h"
+#include "mongo/db/query/optimizer/rewrites/const_eval.h"
+#include "mongo/db/query/query_utils.h"
#include "mongo/db/query/sbe_stage_builder_abt_helpers.h"
#include "mongo/db/query/sbe_stage_builder_accumulator.h"
#include "mongo/db/query/sbe_stage_builder_coll_scan.h"
@@ -1215,19 +1217,13 @@ std::pair<std::unique_ptr<sbe::PlanStage>, PlanStageSlots> SlotBasedStageBuilder
return buildSortCovered(root, reqs);
}
- StringDataSet prefixSet;
- bool hasPartsWithCommonPrefix = false;
+ // getExecutor() should never call into buildSlotBasedExecutableTree() when the query
+ // contains $meta, so this assertion should always be true.
for (const auto& part : sortPattern) {
- // getExecutor() should never call into buildSlotBasedExecutableTree() when the query
- // contains $meta, so this assertion should always be true.
tassert(5037002, "Sort with $meta is not supported in SBE", part.fieldPath);
-
- if (!hasPartsWithCommonPrefix) {
- auto [_, prefixWasNotPresent] = prefixSet.insert(part.fieldPath->getFieldName(0));
- hasPartsWithCommonPrefix = !prefixWasNotPresent;
- }
}
+ const bool hasPartsWithCommonPrefix = sortPatternHasPartsWithCommonPrefix(sortPattern);
auto fields = reqs.getFields();
if (!hasPartsWithCommonPrefix) {
diff --git a/src/mongo/db/query/sbe_stage_builder_filter.cpp b/src/mongo/db/query/sbe_stage_builder_filter.cpp
index 57210c4b0c8..ffc9f38260b 100644
--- a/src/mongo/db/query/sbe_stage_builder_filter.cpp
+++ b/src/mongo/db/query/sbe_stage_builder_filter.cpp
@@ -1194,37 +1194,6 @@ public:
private:
MatchExpressionVisitorContext* _context;
};
-
-EvalExpr applyClassicMatcher(const MatchExpression* root,
- EvalExpr inputExpr,
- StageBuilderState& state) {
- return makeFunction("applyClassicMatcher",
- makeConstant(sbe::value::TypeTags::classicMatchExpresion,
- sbe::value::bitcastFrom<const MatchExpression*>(
- root->shallowClone().release())),
- inputExpr.extractExpr(state));
-}
-
-EvalExpr applyClassicMatcherOverIndexScan(const MatchExpression* root,
- const PlanStageSlots* slots,
- const std::vector<std::string>& keyFields) {
- BSONObjBuilder keyPatternBuilder;
- auto keySlots = sbe::makeSV();
- for (const auto& field : keyFields) {
- keyPatternBuilder.append(field, 1);
- keySlots.emplace_back(
- slots->get(std::make_pair(PlanStageSlots::kField, StringData(field))));
- }
-
- auto keyPatternTree = buildKeyPatternTree(keyPatternBuilder.obj(), keySlots);
- auto mkObjExpr = buildNewObjExpr(keyPatternTree.get());
-
- return makeFunction("applyClassicMatcher",
- makeConstant(sbe::value::TypeTags::classicMatchExpresion,
- sbe::value::bitcastFrom<const MatchExpression*>(
- root->shallowClone().release())),
- std::move(mkObjExpr));
-}
} // namespace
EvalExpr generateFilter(StageBuilderState& state,
@@ -1239,18 +1208,6 @@ EvalExpr generateFilter(StageBuilderState& state,
return EvalExpr{};
}
- // We only use the classic matcher path (aka "franken matcher") when SBE is not fully enabled.
- // Fully enabling SBE turns on the SBE plan cache, and embedding the classic matcher into the
- // query execution tree is not compatible with the plan cache's use of auto-parameterization.
- // This is because when embedding the classic matcher all of the constants used in the filter
- // are in the MatchExpression itself rather than in slots.
- if (!feature_flags::gFeatureFlagSbeFull.isEnabledAndIgnoreFCV()) {
- tassert(7097207, "Expected input slot to be defined", rootSlot || isFilterOverIxscan);
-
- return isFilterOverIxscan ? applyClassicMatcherOverIndexScan(root, slots, keyFields)
- : applyClassicMatcher(root, toEvalExpr(rootSlot), state);
- }
-
MatchExpressionVisitorContext context{state, rootSlot, root, slots, isFilterOverIxscan};
MatchExpressionPreVisitor preVisitor{&context};
diff --git a/src/mongo/db/s/collection_sharding_runtime.cpp b/src/mongo/db/s/collection_sharding_runtime.cpp
index bc13cdd914c..be3c4ff4b4a 100644
--- a/src/mongo/db/s/collection_sharding_runtime.cpp
+++ b/src/mongo/db/s/collection_sharding_runtime.cpp
@@ -662,52 +662,48 @@ void CollectionShardingRuntime::_cleanupBeforeInstallingNewCollectionMetadata(
return;
}
- if (feature_flags::gFeatureFlagSbeFull.isEnabledAndIgnoreFCV()) {
- const auto oldUUID = _metadataManager->getCollectionUuid();
- const auto oldShardVersion = _metadataManager->getActiveShardVersion();
- ExecutorFuture<void>{Grid::get(opCtx)->getExecutorPool()->getFixedExecutor()}
- .then([svcCtx{opCtx->getServiceContext()}, oldUUID, oldShardVersion] {
- ThreadClient tc{"CleanUpShardedMetadata", svcCtx};
- {
- stdx::lock_guard<Client> lk{*tc.get()};
- tc->setSystemOperationKillableByStepdown(lk);
- }
- auto uniqueOpCtx{tc->makeOperationContext()};
- auto opCtx{uniqueOpCtx.get()};
-
- try {
- auto& planCache = sbe::getPlanCache(opCtx);
- planCache.removeIf([&](const sbe::PlanCacheKey& key,
- const sbe::PlanCacheEntry& entry) -> bool {
- const auto matchingCollState =
- [&](const sbe::PlanCacheKeyCollectionState& entryCollState) {
- return entryCollState.uuid == oldUUID &&
- entryCollState.shardVersion &&
- entryCollState.shardVersion->epoch == oldShardVersion.epoch() &&
- entryCollState.shardVersion->ts ==
- oldShardVersion.getTimestamp();
- };
-
- // Check whether the main collection of this plan is the one being removed
- if (matchingCollState(key.getMainCollectionState()))
+ const auto oldUUID = _metadataManager->getCollectionUuid();
+ const auto oldShardVersion = _metadataManager->getActiveShardVersion();
+ ExecutorFuture<void>{Grid::get(opCtx)->getExecutorPool()->getFixedExecutor()}
+ .then([svcCtx{opCtx->getServiceContext()}, oldUUID, oldShardVersion] {
+ ThreadClient tc{"CleanUpShardedMetadata", svcCtx};
+ {
+ stdx::lock_guard<Client> lk{*tc.get()};
+ tc->setSystemOperationKillableByStepdown(lk);
+ }
+ auto uniqueOpCtx{tc->makeOperationContext()};
+ auto opCtx{uniqueOpCtx.get()};
+
+ try {
+ auto& planCache = sbe::getPlanCache(opCtx);
+ planCache.removeIf([&](const sbe::PlanCacheKey& key,
+ const sbe::PlanCacheEntry& entry) -> bool {
+ const auto matchingCollState =
+ [&](const sbe::PlanCacheKeyCollectionState& entryCollState) {
+ return entryCollState.uuid == oldUUID && entryCollState.shardVersion &&
+ entryCollState.shardVersion->epoch == oldShardVersion.epoch() &&
+ entryCollState.shardVersion->ts == oldShardVersion.getTimestamp();
+ };
+
+ // Check whether the main collection of this plan is the one being removed
+ if (matchingCollState(key.getMainCollectionState()))
+ return true;
+
+ // Check whether a secondary collection is the one being removed
+ for (const auto& secCollState : key.getSecondaryCollectionStates()) {
+ if (matchingCollState(secCollState))
return true;
-
- // Check whether a secondary collection is the one being removed
- for (const auto& secCollState : key.getSecondaryCollectionStates()) {
- if (matchingCollState(secCollState))
- return true;
- }
-
- return false;
- });
- } catch (const DBException& ex) {
- LOGV2(6549200,
- "Interrupted deferred clean up of sharded metadata",
- "error"_attr = redact(ex));
- }
- })
- .getAsync([](auto) {});
- }
+ }
+
+ return false;
+ });
+ } catch (const DBException& ex) {
+ LOGV2(6549200,
+ "Interrupted deferred clean up of sharded metadata",
+ "error"_attr = redact(ex));
+ }
+ })
+ .getAsync([](auto) {});
}
void CollectionShardingRuntime::_checkCritSecForIndexMetadata(OperationContext* opCtx) const {