summaryrefslogtreecommitdiff
path: root/src/mongo/db/query
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/db/query')
-rw-r--r--src/mongo/db/query/collection_query_info.cpp6
-rw-r--r--src/mongo/db/query/find.cpp59
-rw-r--r--src/mongo/db/query/find_common.cpp6
-rw-r--r--src/mongo/db/query/get_executor.cpp96
-rw-r--r--src/mongo/db/query/index_bounds_builder.cpp19
-rw-r--r--src/mongo/db/query/plan_cache.cpp98
-rw-r--r--src/mongo/db/query/plan_enumerator.cpp8
-rw-r--r--src/mongo/db/query/plan_executor_impl.cpp6
-rw-r--r--src/mongo/db/query/plan_ranker.cpp45
-rw-r--r--src/mongo/db/query/planner_access.cpp9
-rw-r--r--src/mongo/db/query/planner_analysis.cpp18
-rw-r--r--src/mongo/db/query/planner_ixselect.cpp18
-rw-r--r--src/mongo/db/query/planner_wildcard_helpers.cpp12
-rw-r--r--src/mongo/db/query/query_planner.cpp112
-rw-r--r--src/mongo/db/query/query_planner_test_fixture.cpp3
15 files changed, 375 insertions, 140 deletions
diff --git a/src/mongo/db/query/collection_query_info.cpp b/src/mongo/db/query/collection_query_info.cpp
index 9d18d8175d9..2ff45ae8fe5 100644
--- a/src/mongo/db/query/collection_query_info.cpp
+++ b/src/mongo/db/query/collection_query_info.cpp
@@ -48,6 +48,7 @@
#include "mongo/db/query/plan_cache.h"
#include "mongo/db/query/planner_ixselect.h"
#include "mongo/db/service_context.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/clock_source.h"
#include "mongo/util/log.h"
@@ -178,7 +179,10 @@ void CollectionQueryInfo::notifyOfQuery(OperationContext* opCtx,
void CollectionQueryInfo::clearQueryCache() {
const Collection* coll = get.owner(this);
- LOG(1) << coll->ns() << ": clearing plan cache - collection info cache reset";
+ LOGV2_DEBUG(20907,
+ 1,
+ "{coll_ns}: clearing plan cache - collection info cache reset",
+ "coll_ns"_attr = coll->ns());
if (nullptr != _planCache.get()) {
_planCache->clear();
}
diff --git a/src/mongo/db/query/find.cpp b/src/mongo/db/query/find.cpp
index a87df84a6df..197475a9173 100644
--- a/src/mongo/db/query/find.cpp
+++ b/src/mongo/db/query/find.cpp
@@ -63,6 +63,7 @@
#include "mongo/db/stats/top.h"
#include "mongo/db/storage/storage_options.h"
#include "mongo/db/views/view_catalog.h"
+#include "mongo/logv2/log.h"
#include "mongo/s/chunk_version.h"
#include "mongo/s/stale_exception.h"
#include "mongo/util/fail_point.h"
@@ -203,8 +204,10 @@ void generateBatch(int ntoreturn,
switch (*state) {
// Log an error message and then perform the cleanup.
case PlanExecutor::FAILURE: {
- error() << "getMore executor error, stats: "
- << redact(Explain::getWinningPlanStats(exec));
+ LOGV2_ERROR(20918,
+ "getMore executor error, stats: {Explain_getWinningPlanStats_exec}",
+ "Explain_getWinningPlanStats_exec"_attr =
+ redact(Explain::getWinningPlanStats(exec)));
// We should always have a valid status object by this point.
auto status = WorkingSetCommon::getMemberObjectStatus(doc);
invariant(!status.isOK());
@@ -244,7 +247,7 @@ Message getMore(OperationContext* opCtx,
bool* isCursorAuthorized) {
invariant(ntoreturn >= 0);
- LOG(5) << "Running getMore, cursorid: " << cursorid;
+ LOGV2_DEBUG(20909, 5, "Running getMore, cursorid: {cursorid}", "cursorid"_attr = cursorid);
CurOp& curOp = *CurOp::get(opCtx);
curOp.ensureStarted();
@@ -525,8 +528,11 @@ Message getMore(OperationContext* opCtx,
cursorid = 0;
curOp.debug().cursorExhausted = true;
- LOG(5) << "getMore NOT saving client cursor, ended with state "
- << PlanExecutor::statestr(state);
+ LOGV2_DEBUG(
+ 20910,
+ 5,
+ "getMore NOT saving client cursor, ended with state {PlanExecutor_statestr_state}",
+ "PlanExecutor_statestr_state"_attr = PlanExecutor::statestr(state));
} else {
cursorFreer.dismiss();
// Continue caching the ClientCursor.
@@ -534,7 +540,10 @@ Message getMore(OperationContext* opCtx,
cursorPin->incNBatches();
exec->saveState();
exec->detachFromOperationContext();
- LOG(5) << "getMore saving client cursor ended with state " << PlanExecutor::statestr(state);
+ LOGV2_DEBUG(20911,
+ 5,
+ "getMore saving client cursor ended with state {PlanExecutor_statestr_state}",
+ "PlanExecutor_statestr_state"_attr = PlanExecutor::statestr(state));
// Set 'exhaust' if the client requested exhaust and the cursor is not exhausted.
*exhaust = opCtx->isExhaust();
@@ -567,7 +576,8 @@ Message getMore(OperationContext* opCtx,
qr.setCursorId(cursorid);
qr.setStartingFrom(startingResult);
qr.setNReturned(numResults);
- LOG(5) << "getMore returned " << numResults << " results\n";
+ LOGV2_DEBUG(
+ 20912, 5, "getMore returned {numResults} results\n", "numResults"_attr = numResults);
return Message(bb.release());
}
@@ -605,8 +615,9 @@ bool runQuery(OperationContext* opCtx,
"Can't canonicalize query");
invariant(cq.get());
- LOG(5) << "Running query:\n" << redact(cq->toString());
- LOG(2) << "Running query: " << redact(cq->toStringShort());
+ LOGV2_DEBUG(20913, 5, "Running query:\n{cq}", "cq"_attr = redact(cq->toString()));
+ LOGV2_DEBUG(
+ 20914, 2, "Running query: {cq_Short}", "cq_Short"_attr = redact(cq->toStringShort()));
// Parse, canonicalize, plan, transcribe, and get a plan executor.
AutoGetCollectionForReadCommand ctx(opCtx, nss, AutoGetCollection::ViewMode::kViewsForbidden);
@@ -704,17 +715,25 @@ bool runQuery(OperationContext* opCtx,
++numResults;
if (FindCommon::enoughForFirstBatch(qr, numResults)) {
- LOG(5) << "Enough for first batch, wantMore=" << qr.wantMore()
- << " ntoreturn=" << qr.getNToReturn().value_or(0)
- << " numResults=" << numResults;
+ LOGV2_DEBUG(20915,
+ 5,
+ "Enough for first batch, wantMore={qr_wantMore} "
+ "ntoreturn={qr_getNToReturn_value_or_0} numResults={numResults}",
+ "qr_wantMore"_attr = qr.wantMore(),
+ "qr_getNToReturn_value_or_0"_attr = qr.getNToReturn().value_or(0),
+ "numResults"_attr = numResults);
break;
}
}
// Caller expects exceptions thrown in certain cases.
if (PlanExecutor::FAILURE == state) {
- error() << "Plan executor error during find: " << PlanExecutor::statestr(state)
- << ", stats: " << redact(Explain::getWinningPlanStats(exec.get()));
+ LOGV2_ERROR(20919,
+ "Plan executor error during find: {PlanExecutor_statestr_state}, stats: "
+ "{Explain_getWinningPlanStats_exec_get}",
+ "PlanExecutor_statestr_state"_attr = PlanExecutor::statestr(state),
+ "Explain_getWinningPlanStats_exec_get"_attr =
+ redact(Explain::getWinningPlanStats(exec.get())));
uassertStatusOKWithContext(WorkingSetCommon::getMemberObjectStatus(doc),
"Executor error during OP_QUERY find");
MONGO_UNREACHABLE;
@@ -746,8 +765,11 @@ bool runQuery(OperationContext* opCtx,
});
ccId = pinnedCursor.getCursor()->cursorid();
- LOG(5) << "caching executor with cursorid " << ccId << " after returning " << numResults
- << " results";
+ LOGV2_DEBUG(20916,
+ 5,
+ "caching executor with cursorid {ccId} after returning {numResults} results",
+ "ccId"_attr = ccId,
+ "numResults"_attr = numResults);
// Set curOp.debug().exhaust if the client requested exhaust and the cursor is not
// exhausted.
@@ -768,7 +790,10 @@ bool runQuery(OperationContext* opCtx,
endQueryOp(opCtx, collection, *pinnedCursor.getCursor()->getExecutor(), numResults, ccId);
} else {
- LOG(5) << "Not caching executor but returning " << numResults << " results.";
+ LOGV2_DEBUG(20917,
+ 5,
+ "Not caching executor but returning {numResults} results.",
+ "numResults"_attr = numResults);
endQueryOp(opCtx, collection, *exec, numResults, ccId);
}
diff --git a/src/mongo/db/query/find_common.cpp b/src/mongo/db/query/find_common.cpp
index e236faccfb7..6b8a3a97b30 100644
--- a/src/mongo/db/query/find_common.cpp
+++ b/src/mongo/db/query/find_common.cpp
@@ -38,6 +38,7 @@
#include "mongo/db/curop_failpoint_helpers.h"
#include "mongo/db/query/canonical_query.h"
#include "mongo/db/query/query_request.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/log.h"
@@ -79,8 +80,9 @@ bool FindCommon::haveSpaceForNext(const BSONObj& nextDoc, long long numDocs, int
void FindCommon::waitInFindBeforeMakingBatch(OperationContext* opCtx, const CanonicalQuery& cq) {
auto whileWaitingFunc = [&, hasLogged = false]() mutable {
if (!std::exchange(hasLogged, true)) {
- log() << "Waiting in find before making batch for query - "
- << redact(cq.toStringShort());
+ LOGV2(20908,
+ "Waiting in find before making batch for query - {cq_Short}",
+ "cq_Short"_attr = redact(cq.toStringShort()));
}
};
diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp
index 255b814f97b..4738f0fa6be 100644
--- a/src/mongo/db/query/get_executor.cpp
+++ b/src/mongo/db/query/get_executor.cpp
@@ -87,6 +87,7 @@
#include "mongo/db/server_options.h"
#include "mongo/db/service_context.h"
#include "mongo/db/storage/storage_options.h"
+#include "mongo/logv2/log.h"
#include "mongo/scripting/engine.h"
#include "mongo/util/log.h"
#include "mongo/util/str.h"
@@ -186,9 +187,14 @@ IndexEntry indexEntryFromIndexCatalogEntry(OperationContext* opCtx,
multikeyPathSet = accessMethod->getMultikeyPathSet(opCtx, &mkAccessStats);
}
- LOG(2) << "Multikey path metadata range index scan stats: { index: "
- << desc->indexName() << ", numSeeks: " << mkAccessStats.keysExamined
- << ", keysExamined: " << mkAccessStats.keysExamined << "}";
+ LOGV2_DEBUG(20920,
+ 2,
+ "Multikey path metadata range index scan stats: {{ index: "
+ "{desc_indexName}, numSeeks: {mkAccessStats_keysExamined}, keysExamined: "
+ "{mkAccessStats_keysExamined2}}}",
+ "desc_indexName"_attr = desc->indexName(),
+ "mkAccessStats_keysExamined"_attr = mkAccessStats.keysExamined,
+ "mkAccessStats_keysExamined2"_attr = mkAccessStats.keysExamined);
}
}
@@ -358,8 +364,11 @@ StatusWith<PrepareExecutionResult> prepareExecution(OperationContext* opCtx,
// This can happen as we're called by internal clients as well.
if (nullptr == collection) {
const string& ns = canonicalQuery->ns();
- LOG(2) << "Collection " << ns << " does not exist."
- << " Using EOF plan: " << redact(canonicalQuery->toStringShort());
+ LOGV2_DEBUG(20921,
+ 2,
+ "Collection {ns} does not exist. Using EOF plan: {canonicalQuery_Short}",
+ "ns"_attr = ns,
+ "canonicalQuery_Short"_attr = redact(canonicalQuery->toStringShort()));
root = std::make_unique<EOFStage>(opCtx);
return PrepareExecutionResult(std::move(canonicalQuery), nullptr, std::move(root));
}
@@ -380,7 +389,10 @@ StatusWith<PrepareExecutionResult> prepareExecution(OperationContext* opCtx,
// If we have an _id index we can use an idhack plan.
if (descriptor && IDHackStage::supportsQuery(collection, *canonicalQuery)) {
- LOG(2) << "Using idhack: " << redact(canonicalQuery->toStringShort());
+ LOGV2_DEBUG(20922,
+ 2,
+ "Using idhack: {canonicalQuery_Short}",
+ "canonicalQuery_Short"_attr = redact(canonicalQuery->toStringShort()));
root = std::make_unique<IDHackStage>(opCtx, canonicalQuery.get(), ws, descriptor);
@@ -473,7 +485,11 @@ StatusWith<PrepareExecutionResult> prepareExecution(OperationContext* opCtx,
auto querySolution = std::move(statusWithQs.getValue());
if ((plannerParams.options & QueryPlannerParams::IS_COUNT) &&
turnIxscanIntoCount(querySolution.get())) {
- LOG(2) << "Using fast count: " << redact(canonicalQuery->toStringShort());
+ LOGV2_DEBUG(20923,
+ 2,
+ "Using fast count: {canonicalQuery_Short}",
+ "canonicalQuery_Short"_attr =
+ redact(canonicalQuery->toStringShort()));
}
auto root =
@@ -499,7 +515,10 @@ StatusWith<PrepareExecutionResult> prepareExecution(OperationContext* opCtx,
if (internalQueryPlanOrChildrenIndependently.load() &&
SubplanStage::canUseSubplanning(*canonicalQuery)) {
- LOG(2) << "Running query as sub-queries: " << redact(canonicalQuery->toStringShort());
+ LOGV2_DEBUG(20924,
+ 2,
+ "Running query as sub-queries: {canonicalQuery_Short}",
+ "canonicalQuery_Short"_attr = redact(canonicalQuery->toStringShort()));
root = std::make_unique<SubplanStage>(
opCtx, collection, ws, plannerParams, canonicalQuery.get());
@@ -524,8 +543,13 @@ StatusWith<PrepareExecutionResult> prepareExecution(OperationContext* opCtx,
auto root =
StageBuilder::build(opCtx, collection, *canonicalQuery, *solutions[i], ws);
- LOG(2) << "Using fast count: " << redact(canonicalQuery->toStringShort())
- << ", planSummary: " << Explain::getPlanSummary(root.get());
+ LOGV2_DEBUG(20925,
+ 2,
+ "Using fast count: {canonicalQuery_Short}, planSummary: "
+ "{Explain_getPlanSummary_root_get}",
+ "canonicalQuery_Short"_attr = redact(canonicalQuery->toStringShort()),
+ "Explain_getPlanSummary_root_get"_attr =
+ Explain::getPlanSummary(root.get()));
return PrepareExecutionResult(
std::move(canonicalQuery), std::move(solutions[i]), std::move(root));
@@ -537,9 +561,12 @@ StatusWith<PrepareExecutionResult> prepareExecution(OperationContext* opCtx,
// Only one possible plan. Run it. Build the stages from the solution.
auto root = StageBuilder::build(opCtx, collection, *canonicalQuery, *solutions[0], ws);
- LOG(2) << "Only one plan is available; it will be run but will not be cached. "
- << redact(canonicalQuery->toStringShort())
- << ", planSummary: " << Explain::getPlanSummary(root.get());
+ LOGV2_DEBUG(20926,
+ 2,
+ "Only one plan is available; it will be run but will not be cached. "
+ "{canonicalQuery_Short}, planSummary: {Explain_getPlanSummary_root_get}",
+ "canonicalQuery_Short"_attr = redact(canonicalQuery->toStringShort()),
+ "Explain_getPlanSummary_root_get"_attr = Explain::getPlanSummary(root.get()));
return PrepareExecutionResult(
std::move(canonicalQuery), std::move(solutions[0]), std::move(root));
@@ -731,8 +758,11 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorDelete(
if (!collection) {
// Treat collections that do not exist as empty collections. Return a PlanExecutor which
// contains an EOF stage.
- LOG(2) << "Collection " << nss.ns() << " does not exist."
- << " Using EOF stage: " << redact(request->getQuery());
+ LOGV2_DEBUG(20927,
+ 2,
+ "Collection {nss_ns} does not exist. Using EOF stage: {request_getQuery}",
+ "nss_ns"_attr = nss.ns(),
+ "request_getQuery"_attr = redact(request->getQuery()));
return PlanExecutor::make(
opCtx, std::move(ws), std::make_unique<EOFStage>(opCtx), nullptr, policy, nss);
}
@@ -759,7 +789,10 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorDelete(
if (descriptor && CanonicalQuery::isSimpleIdQuery(unparsedQuery) &&
request->getProj().isEmpty() && hasCollectionDefaultCollation) {
- LOG(2) << "Using idhack: " << redact(unparsedQuery);
+ LOGV2_DEBUG(20928,
+ 2,
+ "Using idhack: {unparsedQuery}",
+ "unparsedQuery"_attr = redact(unparsedQuery));
auto idHackStage = std::make_unique<IDHackStage>(
opCtx, unparsedQuery["_id"].wrap(), ws.get(), descriptor);
@@ -880,8 +913,11 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorUpdate(
// we are an explain. If the collection doesn't exist, we're not an explain, and the upsert flag
// is true, we expect the caller to have created the collection already.
if (!collection) {
- LOG(2) << "Collection " << nss.ns() << " does not exist."
- << " Using EOF stage: " << redact(request->getQuery());
+ LOGV2_DEBUG(20929,
+ 2,
+ "Collection {nss_ns} does not exist. Using EOF stage: {request_getQuery}",
+ "nss_ns"_attr = nss.ns(),
+ "request_getQuery"_attr = redact(request->getQuery()));
return PlanExecutor::make(
opCtx, std::move(ws), std::make_unique<EOFStage>(opCtx), nullptr, policy, nss);
}
@@ -906,7 +942,10 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorUpdate(
if (descriptor && CanonicalQuery::isSimpleIdQuery(unparsedQuery) &&
request->getProj().isEmpty() && hasCollectionDefaultCollation) {
- LOG(2) << "Using idhack: " << redact(unparsedQuery);
+ LOGV2_DEBUG(20930,
+ 2,
+ "Using idhack: {unparsedQuery}",
+ "unparsedQuery"_attr = redact(unparsedQuery));
// Working set 'ws' is discarded. InternalPlanner::updateWithIdHack() makes its own
// WorkingSet.
@@ -1507,8 +1546,13 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorForS
auto root =
StageBuilder::build(opCtx, collection, *parsedDistinct->getQuery(), *soln, ws.get());
- LOG(2) << "Using fast distinct: " << redact(parsedDistinct->getQuery()->toStringShort())
- << ", planSummary: " << Explain::getPlanSummary(root.get());
+ LOGV2_DEBUG(20931,
+ 2,
+ "Using fast distinct: {parsedDistinct_getQuery_Short}, planSummary: "
+ "{Explain_getPlanSummary_root_get}",
+ "parsedDistinct_getQuery_Short"_attr =
+ redact(parsedDistinct->getQuery()->toStringShort()),
+ "Explain_getPlanSummary_root_get"_attr = Explain::getPlanSummary(root.get()));
return PlanExecutor::make(parsedDistinct->releaseQuery(),
std::move(ws),
@@ -1548,8 +1592,14 @@ getExecutorDistinctFromIndexSolutions(OperationContext* opCtx,
auto root = StageBuilder::build(
opCtx, collection, *parsedDistinct->getQuery(), *currentSolution, ws.get());
- LOG(2) << "Using fast distinct: " << redact(parsedDistinct->getQuery()->toStringShort())
- << ", planSummary: " << Explain::getPlanSummary(root.get());
+ LOGV2_DEBUG(20932,
+ 2,
+ "Using fast distinct: {parsedDistinct_getQuery_Short}, planSummary: "
+ "{Explain_getPlanSummary_root_get}",
+ "parsedDistinct_getQuery_Short"_attr =
+ redact(parsedDistinct->getQuery()->toStringShort()),
+ "Explain_getPlanSummary_root_get"_attr =
+ Explain::getPlanSummary(root.get()));
return PlanExecutor::make(parsedDistinct->releaseQuery(),
std::move(ws),
diff --git a/src/mongo/db/query/index_bounds_builder.cpp b/src/mongo/db/query/index_bounds_builder.cpp
index 1e3a5bf9d47..9bb746228e1 100644
--- a/src/mongo/db/query/index_bounds_builder.cpp
+++ b/src/mongo/db/query/index_bounds_builder.cpp
@@ -50,6 +50,7 @@
#include "mongo/db/query/planner_ixselect.h"
#include "mongo/db/query/planner_wildcard_helpers.h"
#include "mongo/db/query/query_knobs_gen.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/log.h"
#include "mongo/util/str.h"
#include "third_party/s2/s2cell.h"
@@ -887,13 +888,15 @@ void IndexBoundsBuilder::_translatePredicate(const MatchExpression* expr,
*tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
} else {
- warning() << "Planner error trying to build geo bounds for " << elt.toString()
- << " index element.";
+ LOGV2_WARNING(20934,
+ "Planner error trying to build geo bounds for {elt} index element.",
+ "elt"_attr = elt.toString());
verify(0);
}
} else {
- warning() << "Planner error, trying to build bounds for expression: "
- << redact(expr->debugString());
+ LOGV2_WARNING(20935,
+ "Planner error, trying to build bounds for expression: {expr_debugString}",
+ "expr_debugString"_attr = redact(expr->debugString()));
verify(0);
}
}
@@ -1198,9 +1201,11 @@ void IndexBoundsBuilder::alignBounds(IndexBounds* bounds, const BSONObj& kp, int
}
if (!bounds->isValidFor(kp, scanDir)) {
- log() << "INVALID BOUNDS: " << redact(bounds->toString()) << endl
- << "kp = " << redact(kp) << endl
- << "scanDir = " << scanDir;
+ LOGV2(20933,
+ "INVALID BOUNDS: {bounds}\nkp = {kp}\nscanDir = {scanDir}",
+ "bounds"_attr = redact(bounds->toString()),
+ "kp"_attr = redact(kp),
+ "scanDir"_attr = scanDir);
MONGO_UNREACHABLE;
}
}
diff --git a/src/mongo/db/query/plan_cache.cpp b/src/mongo/db/query/plan_cache.cpp
index 9d03e0b1012..5e25131d41f 100644
--- a/src/mongo/db/query/plan_cache.cpp
+++ b/src/mongo/db/query/plan_cache.cpp
@@ -51,6 +51,7 @@
#include "mongo/db/query/planner_ixselect.h"
#include "mongo/db/query/query_knobs_gen.h"
#include "mongo/db/query/query_solution.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/hex.h"
#include "mongo/util/log.h"
@@ -438,8 +439,10 @@ std::unique_ptr<CachedSolution> PlanCache::getCacheEntryIfActive(const PlanCache
PlanCache::GetResult res = get(key);
if (res.state == PlanCache::CacheEntryState::kPresentInactive) {
- LOG(2) << "Not using cached entry for " << redact(res.cachedSolution->toString())
- << " since it is inactive";
+ LOGV2_DEBUG(20936,
+ 2,
+ "Not using cached entry for {res_cachedSolution} since it is inactive",
+ "res_cachedSolution"_attr = redact(res.cachedSolution->toString()));
return nullptr;
}
@@ -460,9 +463,17 @@ PlanCache::NewEntryState PlanCache::getNewEntryState(const CanonicalQuery& query
double growthCoefficient) {
NewEntryState res;
if (!oldEntry) {
- LOG(1) << "Creating inactive cache entry for query shape " << redact(query.toStringShort())
- << " queryHash " << unsignedIntToFixedLengthHex(queryHash) << " planCacheKey "
- << unsignedIntToFixedLengthHex(planCacheKey) << " with works value " << newWorks;
+ LOGV2_DEBUG(20937,
+ 1,
+ "Creating inactive cache entry for query shape {query_Short} queryHash "
+ "{unsignedIntToFixedLengthHex_queryHash} planCacheKey "
+ "{unsignedIntToFixedLengthHex_planCacheKey} with works value {newWorks}",
+ "query_Short"_attr = redact(query.toStringShort()),
+ "unsignedIntToFixedLengthHex_queryHash"_attr =
+ unsignedIntToFixedLengthHex(queryHash),
+ "unsignedIntToFixedLengthHex_planCacheKey"_attr =
+ unsignedIntToFixedLengthHex(planCacheKey),
+ "newWorks"_attr = newWorks);
res.shouldBeCreated = true;
res.shouldBeActive = false;
return res;
@@ -472,18 +483,35 @@ PlanCache::NewEntryState PlanCache::getNewEntryState(const CanonicalQuery& query
// The new plan did better than the currently stored active plan. This case may
// occur if many MultiPlanners are run simultaneously.
- LOG(1) << "Replacing active cache entry for query " << redact(query.toStringShort())
- << " queryHash " << unsignedIntToFixedLengthHex(queryHash) << " planCacheKey "
- << unsignedIntToFixedLengthHex(planCacheKey) << " with works " << oldEntry->works
- << " with a plan with works " << newWorks;
+ LOGV2_DEBUG(20938,
+ 1,
+ "Replacing active cache entry for query {query_Short} queryHash "
+ "{unsignedIntToFixedLengthHex_queryHash} planCacheKey "
+ "{unsignedIntToFixedLengthHex_planCacheKey} with works {oldEntry_works} with a "
+ "plan with works {newWorks}",
+ "query_Short"_attr = redact(query.toStringShort()),
+ "unsignedIntToFixedLengthHex_queryHash"_attr =
+ unsignedIntToFixedLengthHex(queryHash),
+ "unsignedIntToFixedLengthHex_planCacheKey"_attr =
+ unsignedIntToFixedLengthHex(planCacheKey),
+ "oldEntry_works"_attr = oldEntry->works,
+ "newWorks"_attr = newWorks);
res.shouldBeCreated = true;
res.shouldBeActive = true;
} else if (oldEntry->isActive) {
- LOG(1) << "Attempt to write to the planCache for query " << redact(query.toStringShort())
- << " queryHash " << unsignedIntToFixedLengthHex(queryHash) << " planCacheKey "
- << unsignedIntToFixedLengthHex(planCacheKey) << " with a plan with works "
- << newWorks << " is a noop, since there's already a plan with works value "
- << oldEntry->works;
+ LOGV2_DEBUG(20939,
+ 1,
+ "Attempt to write to the planCache for query {query_Short} queryHash "
+ "{unsignedIntToFixedLengthHex_queryHash} planCacheKey "
+ "{unsignedIntToFixedLengthHex_planCacheKey} with a plan with works {newWorks} "
+ "is a noop, since there's already a plan with works value {oldEntry_works}",
+ "query_Short"_attr = redact(query.toStringShort()),
+ "unsignedIntToFixedLengthHex_queryHash"_attr =
+ unsignedIntToFixedLengthHex(queryHash),
+ "unsignedIntToFixedLengthHex_planCacheKey"_attr =
+ unsignedIntToFixedLengthHex(planCacheKey),
+ "newWorks"_attr = newWorks,
+ "oldEntry_works"_attr = oldEntry->works);
// There is already an active cache entry with a higher works value.
// We do nothing.
res.shouldBeCreated = false;
@@ -499,11 +527,18 @@ PlanCache::NewEntryState PlanCache::getNewEntryState(const CanonicalQuery& query
const double increasedWorks = std::max(
oldEntry->works + 1u, static_cast<size_t>(oldEntry->works * growthCoefficient));
- LOG(1) << "Increasing work value associated with cache entry for query "
- << redact(query.toStringShort()) << " queryHash "
- << unsignedIntToFixedLengthHex(queryHash) << " planCacheKey "
- << unsignedIntToFixedLengthHex(planCacheKey) << " from " << oldEntry->works << " to "
- << increasedWorks;
+ LOGV2_DEBUG(
+ 20940,
+ 1,
+ "Increasing work value associated with cache entry for query {query_Short} queryHash "
+ "{unsignedIntToFixedLengthHex_queryHash} planCacheKey "
+ "{unsignedIntToFixedLengthHex_planCacheKey} from {oldEntry_works} to {increasedWorks}",
+ "query_Short"_attr = redact(query.toStringShort()),
+ "unsignedIntToFixedLengthHex_queryHash"_attr = unsignedIntToFixedLengthHex(queryHash),
+ "unsignedIntToFixedLengthHex_planCacheKey"_attr =
+ unsignedIntToFixedLengthHex(planCacheKey),
+ "oldEntry_works"_attr = oldEntry->works,
+ "increasedWorks"_attr = increasedWorks);
oldEntry->works = increasedWorks;
// Don't create a new entry.
@@ -512,10 +547,19 @@ PlanCache::NewEntryState PlanCache::getNewEntryState(const CanonicalQuery& query
// This plan performed just as well or better than we expected, based on the
// inactive entry's works. We use this as an indicator that it's safe to
// cache (as an active entry) the plan this query used for the future.
- LOG(1) << "Inactive cache entry for query " << redact(query.toStringShort())
- << " queryHash " << unsignedIntToFixedLengthHex(queryHash) << " planCacheKey "
- << unsignedIntToFixedLengthHex(planCacheKey) << " with works " << oldEntry->works
- << " is being promoted to active entry with works value " << newWorks;
+ LOGV2_DEBUG(20941,
+ 1,
+ "Inactive cache entry for query {query_Short} queryHash "
+ "{unsignedIntToFixedLengthHex_queryHash} planCacheKey "
+ "{unsignedIntToFixedLengthHex_planCacheKey} with works {oldEntry_works} is "
+ "being promoted to active entry with works value {newWorks}",
+ "query_Short"_attr = redact(query.toStringShort()),
+ "unsignedIntToFixedLengthHex_queryHash"_attr =
+ unsignedIntToFixedLengthHex(queryHash),
+ "unsignedIntToFixedLengthHex_planCacheKey"_attr =
+ unsignedIntToFixedLengthHex(planCacheKey),
+ "oldEntry_works"_attr = oldEntry->works,
+ "newWorks"_attr = newWorks);
// We'll replace the old inactive entry with an active entry.
res.shouldBeCreated = true;
res.shouldBeActive = true;
@@ -594,8 +638,12 @@ Status PlanCache::set(const CanonicalQuery& query,
std::unique_ptr<PlanCacheEntry> evictedEntry = _cache.add(key, newEntry.release());
if (nullptr != evictedEntry.get()) {
- LOG(1) << query.nss() << ": plan cache maximum size exceeded - "
- << "removed least recently used entry " << redact(evictedEntry->toString());
+ LOGV2_DEBUG(20942,
+ 1,
+ "{query_nss}: plan cache maximum size exceeded - removed least recently used "
+ "entry {evictedEntry}",
+ "query_nss"_attr = query.nss(),
+ "evictedEntry"_attr = redact(evictedEntry->toString()));
}
return Status::OK();
diff --git a/src/mongo/db/query/plan_enumerator.cpp b/src/mongo/db/query/plan_enumerator.cpp
index f213e98c6c9..733b17056eb 100644
--- a/src/mongo/db/query/plan_enumerator.cpp
+++ b/src/mongo/db/query/plan_enumerator.cpp
@@ -35,6 +35,7 @@
#include "mongo/db/query/index_tag.h"
#include "mongo/db/query/indexability.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/log.h"
#include "mongo/util/string_map.h"
@@ -340,7 +341,7 @@ PlanEnumerator::MemoID PlanEnumerator::memoIDForNode(MatchExpression* node) {
stdx::unordered_map<MatchExpression*, MemoID>::iterator it = _nodeToId.find(node);
if (_nodeToId.end() == it) {
- error() << "Trying to look up memo entry for node, none found.";
+ LOGV2_ERROR(20945, "Trying to look up memo entry for node, none found.");
MONGO_UNREACHABLE;
}
@@ -359,7 +360,8 @@ unique_ptr<MatchExpression> PlanEnumerator::getNext() {
tagForSort(tree.get());
_root->resetTag();
- LOG(5) << "Enumerator: memo just before moving:" << endl << dumpMemo();
+ LOGV2_DEBUG(
+ 20943, 5, "Enumerator: memo just before moving:\n{dumpMemo}", "dumpMemo"_attr = dumpMemo());
_done = nextMemo(memoIDForNode(_root));
return tree;
}
@@ -1568,7 +1570,7 @@ void PlanEnumerator::compound(const vector<MatchExpression*>& tryCompound,
//
void PlanEnumerator::tagMemo(size_t id) {
- LOG(5) << "Tagging memoID " << id;
+ LOGV2_DEBUG(20944, 5, "Tagging memoID {id}", "id"_attr = id);
NodeAssignment* assign = _memo[id];
verify(nullptr != assign);
diff --git a/src/mongo/db/query/plan_executor_impl.cpp b/src/mongo/db/query/plan_executor_impl.cpp
index c4e1bb83c7d..fae86c1e8ce 100644
--- a/src/mongo/db/query/plan_executor_impl.cpp
+++ b/src/mongo/db/query/plan_executor_impl.cpp
@@ -56,6 +56,7 @@
#include "mongo/db/query/plan_yield_policy.h"
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/service_context.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/fail_point.h"
#include "mongo/util/log.h"
#include "mongo/util/scopeguard.h"
@@ -635,8 +636,9 @@ PlanExecutor::ExecState PlanExecutorImpl::_getNextImpl(Snapshotted<Document>* ob
}
return true;
}))) {
- log() << "PlanExecutor - planExecutorHangBeforeShouldWaitForInserts fail point "
- "enabled. Blocking until fail point is disabled.";
+ LOGV2(20946,
+ "PlanExecutor - planExecutorHangBeforeShouldWaitForInserts fail point "
+ "enabled. Blocking until fail point is disabled.");
planExecutorHangBeforeShouldWaitForInserts.pauseWhileSet();
}
if (!_shouldWaitForInserts()) {
diff --git a/src/mongo/db/query/plan_ranker.cpp b/src/mongo/db/query/plan_ranker.cpp
index 7b998c6d583..2c5abd7cad6 100644
--- a/src/mongo/db/query/plan_ranker.cpp
+++ b/src/mongo/db/query/plan_ranker.cpp
@@ -44,6 +44,7 @@
#include "mongo/db/query/query_knobs_gen.h"
#include "mongo/db/query/query_solution.h"
#include "mongo/db/server_options.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/log.h"
namespace {
@@ -93,25 +94,42 @@ StatusWith<std::unique_ptr<PlanRankingDecision>> PlanRanker::pickBestPlan(
// Compute score for each tree. Record the best.
for (size_t i = 0; i < statTrees.size(); ++i) {
if (!candidates[i].failed) {
- LOG(5) << "Scoring plan " << i << ":" << endl
- << redact(candidates[i].solution->toString()) << "Stats:\n"
- << redact(Explain::statsToBSON(*statTrees[i])
- .jsonString(ExtendedRelaxedV2_0_0, true));
- LOG(2) << "Scoring query plan: " << Explain::getPlanSummary(candidates[i].root)
- << " planHitEOF=" << statTrees[i]->common.isEOF;
+ LOGV2_DEBUG(
+ 20956,
+ 5,
+ "Scoring plan "
+ "{i}:\n{candidates_i_solution}Stats:\n{Explain_statsToBSON_statTrees_i_jsonString_"
+ "ExtendedRelaxedV2_0_0_true}",
+ "i"_attr = i,
+ "candidates_i_solution"_attr = redact(candidates[i].solution->toString()),
+ "Explain_statsToBSON_statTrees_i_jsonString_ExtendedRelaxedV2_0_0_true"_attr =
+ redact(Explain::statsToBSON(*statTrees[i])
+ .jsonString(ExtendedRelaxedV2_0_0, true)));
+ LOGV2_DEBUG(20957,
+ 2,
+ "Scoring query plan: {Explain_getPlanSummary_candidates_i_root} "
+ "planHitEOF={statTrees_i_common_isEOF}",
+ "Explain_getPlanSummary_candidates_i_root"_attr =
+ Explain::getPlanSummary(candidates[i].root),
+ "statTrees_i_common_isEOF"_attr = statTrees[i]->common.isEOF);
double score = scoreTree(statTrees[i].get());
- LOG(5) << "score = " << score;
+ LOGV2_DEBUG(20958, 5, "score = {score}", "score"_attr = score);
if (statTrees[i]->common.isEOF) {
- LOG(5) << "Adding +" << eofBonus << " EOF bonus to score.";
+ LOGV2_DEBUG(
+ 20959, 5, "Adding +{eofBonus} EOF bonus to score.", "eofBonus"_attr = eofBonus);
score += 1;
}
scoresAndCandidateindices.push_back(std::make_pair(score, i));
} else {
failed.push_back(i);
- LOG(2) << "Not scording plan: " << Explain::getPlanSummary(candidates[i].root)
- << " because the plan failed.";
+ LOGV2_DEBUG(20960,
+ 2,
+ "Not scording plan: {Explain_getPlanSummary_candidates_i_root} because the "
+ "plan failed.",
+ "Explain_getPlanSummary_candidates_i_root"_attr =
+ Explain::getPlanSummary(candidates[i].root));
}
}
@@ -265,14 +283,17 @@ double PlanRanker::scoreTree(const PlanStageStats* stats) {
<< str::convertDoubleToString(noSortBonus) << " noSortBonus + "
<< str::convertDoubleToString(noIxisectBonus)
<< " noIxisectBonus = " << str::convertDoubleToString(tieBreakers) << ")";
- LOG(2) << sb.str();
+ LOGV2_DEBUG(20961, 2, "{sb_str}", "sb_str"_attr = sb.str());
if (internalQueryForceIntersectionPlans.load()) {
if (hasStage(STAGE_AND_HASH, stats) || hasStage(STAGE_AND_SORTED, stats)) {
// The boost should be >2.001 to make absolutely sure the ixisect plan will win due
// to the combination of 1) productivity, 2) eof bonus, and 3) no ixisect bonus.
score += 3;
- LOG(5) << "Score boosted to " << score << " due to intersection forcing.";
+ LOGV2_DEBUG(20962,
+ 5,
+ "Score boosted to {score} due to intersection forcing.",
+ "score"_attr = score);
}
}
diff --git a/src/mongo/db/query/planner_access.cpp b/src/mongo/db/query/planner_access.cpp
index 7a09019424c..9510cc85481 100644
--- a/src/mongo/db/query/planner_access.cpp
+++ b/src/mongo/db/query/planner_access.cpp
@@ -50,6 +50,7 @@
#include "mongo/db/query/query_knobs_gen.h"
#include "mongo/db/query/query_planner.h"
#include "mongo/db/query/query_planner_common.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/log.h"
#include "mongo/util/transitional_tools_do_not_use/vector_spooling.h"
@@ -1112,8 +1113,10 @@ std::unique_ptr<QuerySolutionNode> QueryPlannerAccess::buildIndexedAnd(
} else {
// We can't use sort-based intersection, and hash-based intersection is disabled.
// Clean up the index scans and bail out by returning NULL.
- LOG(5) << "Can't build index intersection solution: "
- << "AND_SORTED is not possible and AND_HASH is disabled.";
+ LOGV2_DEBUG(20947,
+ 5,
+ "Can't build index intersection solution: AND_SORTED is not possible and "
+ "AND_HASH is disabled.");
return nullptr;
}
}
@@ -1180,7 +1183,7 @@ std::unique_ptr<QuerySolutionNode> QueryPlannerAccess::buildIndexedOr(
// when any of our children lack index tags. If a node lacks an index tag it cannot
// be answered via an index.
if (!inArrayOperator && 0 != root->numChildren()) {
- warning() << "planner OR error, non-indexed child of OR.";
+ LOGV2_WARNING(20948, "planner OR error, non-indexed child of OR.");
// We won't enumerate an OR without indices for each child, so this isn't an issue, even
// if we have an AND with an OR child -- we won't get here unless the OR is fully
// indexed.
diff --git a/src/mongo/db/query/planner_analysis.cpp b/src/mongo/db/query/planner_analysis.cpp
index 0dcde5c00d7..942d27d7512 100644
--- a/src/mongo/db/query/planner_analysis.cpp
+++ b/src/mongo/db/query/planner_analysis.cpp
@@ -42,6 +42,7 @@
#include "mongo/db/matcher/expression_geo.h"
#include "mongo/db/query/query_planner.h"
#include "mongo/db/query/query_planner_common.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/log.h"
namespace mongo {
@@ -344,7 +345,10 @@ std::unique_ptr<QuerySolutionNode> addSortKeyGeneratorStageIfNeeded(
std::unique_ptr<ProjectionNode> analyzeProjection(const CanonicalQuery& query,
std::unique_ptr<QuerySolutionNode> solnRoot,
const bool hasSortStage) {
- LOG(5) << "PROJECTION: Current plan is:\n" << redact(solnRoot->toString());
+ LOGV2_DEBUG(20949,
+ 5,
+ "PROJECTION: Current plan is:\n{solnRoot}",
+ "solnRoot"_attr = redact(solnRoot->toString()));
// If the projection requires the entire document we add a fetch stage if not present. Otherwise
// we add a fetch stage if we are not covered.
@@ -644,8 +648,11 @@ bool QueryPlannerAnalysis::explodeForSort(const CanonicalQuery& query,
// Too many ixscans spoil the performance.
if (totalNumScans > (size_t)internalQueryMaxScansToExplode.load()) {
- LOG(5) << "Could expand ixscans to pull out sort order but resulting scan count"
- << "(" << totalNumScans << ") is too high.";
+ LOGV2_DEBUG(20950,
+ 5,
+ "Could expand ixscans to pull out sort order but resulting scan "
+ "count({totalNumScans}) is too high.",
+ "totalNumScans"_attr = totalNumScans);
return false;
}
@@ -705,7 +712,10 @@ QuerySolutionNode* QueryPlannerAnalysis::analyzeSort(const CanonicalQuery& query
BSONObj reverseSort = QueryPlannerCommon::reverseSortObj(sortObj);
if (sorts.end() != sorts.find(reverseSort)) {
QueryPlannerCommon::reverseScans(solnRoot);
- LOG(5) << "Reversing ixscan to provide sort. Result: " << redact(solnRoot->toString());
+ LOGV2_DEBUG(20951,
+ 5,
+ "Reversing ixscan to provide sort. Result: {solnRoot}",
+ "solnRoot"_attr = redact(solnRoot->toString()));
return solnRoot;
}
diff --git a/src/mongo/db/query/planner_ixselect.cpp b/src/mongo/db/query/planner_ixselect.cpp
index b97433015f5..27ad04ac703 100644
--- a/src/mongo/db/query/planner_ixselect.cpp
+++ b/src/mongo/db/query/planner_ixselect.cpp
@@ -47,6 +47,7 @@
#include "mongo/db/query/indexability.h"
#include "mongo/db/query/planner_wildcard_helpers.h"
#include "mongo/db/query/query_planner_common.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/log.h"
namespace mongo {
@@ -287,15 +288,20 @@ std::vector<IndexEntry> QueryPlannerIXSelect::findIndexesByHint(
auto hintName = firstHintElt.valueStringData();
for (auto&& entry : allIndices) {
if (entry.identifier.catalogName == hintName) {
- LOG(5) << "Hint by name specified, restricting indices to "
- << entry.keyPattern.toString();
+ LOGV2_DEBUG(20952,
+ 5,
+ "Hint by name specified, restricting indices to {entry_keyPattern}",
+ "entry_keyPattern"_attr = entry.keyPattern.toString());
out.push_back(entry);
}
}
} else {
for (auto&& entry : allIndices) {
if (SimpleBSONObjComparator::kInstance.evaluate(entry.keyPattern == hintedIndex)) {
- LOG(5) << "Hint specified, restricting indices to " << hintedIndex.toString();
+ LOGV2_DEBUG(20953,
+ 5,
+ "Hint specified, restricting indices to {hintedIndex}",
+ "hintedIndex"_attr = hintedIndex.toString());
out.push_back(entry);
}
}
@@ -579,8 +585,10 @@ bool QueryPlannerIXSelect::_compatible(const BSONElement& keyPatternElt,
} else if (IndexNames::GEO_HAYSTACK == indexedFieldType) {
return false;
} else {
- warning() << "Unknown indexing for node " << node->debugString() << " and field "
- << keyPatternElt.toString();
+ LOGV2_WARNING(20954,
+ "Unknown indexing for node {node_debugString} and field {keyPatternElt}",
+ "node_debugString"_attr = node->debugString(),
+ "keyPatternElt"_attr = keyPatternElt.toString());
verify(0);
}
}
diff --git a/src/mongo/db/query/planner_wildcard_helpers.cpp b/src/mongo/db/query/planner_wildcard_helpers.cpp
index 4f1743d7316..c04c33e02d0 100644
--- a/src/mongo/db/query/planner_wildcard_helpers.cpp
+++ b/src/mongo/db/query/planner_wildcard_helpers.cpp
@@ -39,6 +39,7 @@
#include "mongo/db/exec/projection_executor_utils.h"
#include "mongo/db/index/wildcard_key_generator.h"
#include "mongo/db/query/index_bounds.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/log.h"
namespace mongo {
@@ -245,9 +246,14 @@ bool validateNumericPathComponents(const MultikeyPaths& multikeyPaths,
// all paths with and without array indices. Because this is O(2^n), we decline to answer
// queries that traverse more than 8 levels of array indices.
if (arrayIndices.size() > kWildcardMaxArrayIndexTraversalDepth) {
- LOG(2) << "Declining to answer query on field '" << queryPath.dottedField()
- << "' with $** index, as it traverses through more than "
- << kWildcardMaxArrayIndexTraversalDepth << " nested array indices.";
+ LOGV2_DEBUG(20955,
+ 2,
+ "Declining to answer query on field '{queryPath_dottedField}' with $** index, "
+ "as it traverses through more than {kWildcardMaxArrayIndexTraversalDepth} "
+ "nested array indices.",
+ "queryPath_dottedField"_attr = queryPath.dottedField(),
+ "kWildcardMaxArrayIndexTraversalDepth"_attr =
+ kWildcardMaxArrayIndexTraversalDepth);
return false;
}
diff --git a/src/mongo/db/query/query_planner.cpp b/src/mongo/db/query/query_planner.cpp
index d95ff7dd03f..966fb61275e 100644
--- a/src/mongo/db/query/query_planner.cpp
+++ b/src/mongo/db/query/query_planner.cpp
@@ -54,6 +54,7 @@
#include "mongo/db/query/planner_ixselect.h"
#include "mongo/db/query/query_planner_common.h"
#include "mongo/db/query/query_solution.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/log.h"
namespace mongo {
@@ -475,10 +476,12 @@ StatusWith<std::unique_ptr<QuerySolution>> QueryPlanner::planFromCache(
// Create a copy of the expression tree. We use cachedSoln to annotate this with indices.
unique_ptr<MatchExpression> clone = query.root()->shallowClone();
- LOG(5) << "Tagging the match expression according to cache data: " << endl
- << "Filter:" << endl
- << redact(clone->debugString()) << "Cache data:" << endl
- << redact(winnerCacheData.toString());
+ LOGV2_DEBUG(20963,
+ 5,
+ "Tagging the match expression according to cache data: "
+ "\nFilter:\n{clone_debugString}Cache data:\n{winnerCacheData}",
+ "clone_debugString"_attr = redact(clone->debugString()),
+ "winnerCacheData"_attr = redact(winnerCacheData.toString()));
stdx::unordered_set<string> fields;
QueryPlannerIXSelect::getFields(query.root(), &fields);
@@ -492,7 +495,11 @@ StatusWith<std::unique_ptr<QuerySolution>> QueryPlanner::planFromCache(
const auto insertionRes = indexMap.insert(std::make_pair(ie.identifier, i));
// Be sure the key was not already in the map.
invariant(insertionRes.second);
- LOG(5) << "Index " << i << ": " << ie.identifier;
+ LOGV2_DEBUG(20964,
+ 5,
+ "Index {i}: {ie_identifier}",
+ "i"_attr = i,
+ "ie_identifier"_attr = ie.identifier);
}
Status s = tagAccordingToCache(clone.get(), winnerCacheData.tree.get(), indexMap);
@@ -503,7 +510,10 @@ StatusWith<std::unique_ptr<QuerySolution>> QueryPlanner::planFromCache(
// The MatchExpression tree is in canonical order. We must order the nodes for access planning.
prepareForAccessPlanning(clone.get());
- LOG(5) << "Tagged tree:" << endl << redact(clone->debugString());
+ LOGV2_DEBUG(20965,
+ 5,
+ "Tagged tree:\n{clone_debugString}",
+ "clone_debugString"_attr = redact(clone->debugString()));
// Use the cached index assignments to build solnRoot.
std::unique_ptr<QuerySolutionNode> solnRoot(QueryPlannerAccess::buildIndexedDataAccess(
@@ -522,21 +532,30 @@ StatusWith<std::unique_ptr<QuerySolution>> QueryPlanner::planFromCache(
<< "Failed to analyze plan from cache. Query: " << query.toStringShort());
}
- LOG(5) << "Planner: solution constructed from the cache:\n" << redact(soln->toString());
+ LOGV2_DEBUG(20966,
+ 5,
+ "Planner: solution constructed from the cache:\n{soln}",
+ "soln"_attr = redact(soln->toString()));
return {std::move(soln)};
}
// static
StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan(
const CanonicalQuery& query, const QueryPlannerParams& params) {
- LOG(5) << "Beginning planning..." << endl
- << "=============================" << endl
- << "Options = " << optionString(params.options) << endl
- << "Canonical query:" << endl
- << redact(query.toString()) << "=============================";
+ LOGV2_DEBUG(
+ 20967,
+ 5,
+ "Beginning planning...\n=============================\nOptions = "
+ "{optionString_params_options}\nCanonical query:\n{query}=============================",
+ "optionString_params_options"_attr = optionString(params.options),
+ "query"_attr = redact(query.toString()));
for (size_t i = 0; i < params.indices.size(); ++i) {
- LOG(5) << "Index " << i << " is " << params.indices[i].toString();
+ LOGV2_DEBUG(20968,
+ 5,
+ "Index {i} is {params_indices_i}",
+ "i"_attr = i,
+ "params_indices_i"_attr = params.indices[i].toString());
}
const bool canTableScan = !(params.options & QueryPlannerParams::NO_TABLE_SCAN);
@@ -572,7 +591,7 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan(
BSONElement naturalHint = dps::extractElementAtPath(hintObj, "$natural");
if (naturalHint) {
- LOG(5) << "Forcing a table scan due to hinted $natural";
+ LOGV2_DEBUG(20969, 5, "Forcing a table scan due to hinted $natural");
if (!canTableScan) {
return Status(ErrorCodes::NoQueryExecutionPlans,
"hint $natural is not allowed, because 'notablescan' is enabled");
@@ -632,7 +651,7 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan(
stdx::unordered_set<string> fields;
QueryPlannerIXSelect::getFields(query.root(), &fields);
for (auto&& field : fields) {
- LOG(5) << "Predicate over field '" << field << "'";
+ LOGV2_DEBUG(20970, 5, "Predicate over field '{field}'", "field"_attr = field);
}
fullIndexList = QueryPlannerIXSelect::expandIndexes(fields, std::move(fullIndexList));
@@ -706,7 +725,11 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan(
}
for (size_t i = 0; i < relevantIndices.size(); ++i) {
- LOG(2) << "Relevant index " << i << " is " << relevantIndices[i].toString();
+ LOGV2_DEBUG(20971,
+ 2,
+ "Relevant index {i} is {relevantIndices_i}",
+ "i"_attr = i,
+ "relevantIndices_i"_attr = relevantIndices[i].toString());
}
// Figure out how useful each index is to each predicate.
@@ -728,7 +751,10 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan(
}
// query.root() is now annotated with RelevantTag(s).
- LOG(5) << "Rated tree:" << endl << redact(query.root()->debugString());
+ LOGV2_DEBUG(20972,
+ 5,
+ "Rated tree:\n{query_root_debugString}",
+ "query_root_debugString"_attr = redact(query.root()->debugString()));
// If there is a GEO_NEAR it must have an index it can use directly.
const MatchExpression* gnNode = nullptr;
@@ -736,14 +762,17 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan(
// No index for GEO_NEAR? No query.
RelevantTag* tag = static_cast<RelevantTag*>(gnNode->getTag());
if (!tag || (0 == tag->first.size() && 0 == tag->notFirst.size())) {
- LOG(5) << "Unable to find index for $geoNear query.";
+ LOGV2_DEBUG(20973, 5, "Unable to find index for $geoNear query.");
// Don't leave tags on query tree.
query.root()->resetTag();
return Status(ErrorCodes::NoQueryExecutionPlans,
"unable to find index for $geoNear query");
}
- LOG(5) << "Rated tree after geonear processing:" << redact(query.root()->debugString());
+ LOGV2_DEBUG(20974,
+ 5,
+ "Rated tree after geonear processing:{query_root_debugString}",
+ "query_root_debugString"_attr = redact(query.root()->debugString()));
}
// Likewise, if there is a TEXT it must have an index it can use directly.
@@ -780,7 +809,10 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan(
// assigned to it.
invariant(1 == tag->first.size() + tag->notFirst.size());
- LOG(5) << "Rated tree after text processing:" << redact(query.root()->debugString());
+ LOGV2_DEBUG(20975,
+ 5,
+ "Rated tree after text processing:{query_root_debugString}",
+ "query_root_debugString"_attr = redact(query.root()->debugString()));
}
std::vector<std::unique_ptr<QuerySolution>> out;
@@ -798,8 +830,10 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan(
unique_ptr<MatchExpression> nextTaggedTree;
while ((nextTaggedTree = isp.getNext()) && (out.size() < params.maxIndexedSolutions)) {
- LOG(5) << "About to build solntree from tagged tree:" << endl
- << redact(nextTaggedTree->debugString());
+ LOGV2_DEBUG(20976,
+ 5,
+ "About to build solntree from tagged tree:\n{nextTaggedTree_debugString}",
+ "nextTaggedTree_debugString"_attr = redact(nextTaggedTree->debugString()));
// Store the plan cache index tree before calling prepareForAccessingPlanning(), so that
// the PlanCacheIndexTree has the same sort as the MatchExpression used to generate the
@@ -808,8 +842,11 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan(
std::unique_ptr<PlanCacheIndexTree> cacheData;
auto statusWithCacheData = cacheDataFromTaggedTree(clone.get(), relevantIndices);
if (!statusWithCacheData.isOK()) {
- LOG(5) << "Query is not cachable: "
- << redact(statusWithCacheData.getStatus().reason());
+ LOGV2_DEBUG(20977,
+ 5,
+ "Query is not cachable: {statusWithCacheData_getStatus_reason}",
+ "statusWithCacheData_getStatus_reason"_attr =
+ redact(statusWithCacheData.getStatus().reason()));
} else {
cacheData = std::move(statusWithCacheData.getValue());
}
@@ -828,7 +865,10 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan(
auto soln = QueryPlannerAnalysis::analyzeDataAccess(query, params, std::move(solnRoot));
if (soln) {
- LOG(5) << "Planner: adding solution:" << endl << redact(soln->toString());
+ LOGV2_DEBUG(20978,
+ 5,
+ "Planner: adding solution:\n{soln}",
+ "soln"_attr = redact(soln->toString()));
if (statusWithCacheData.isOK()) {
SolutionCacheData* scd = new SolutionCacheData();
scd->tree = std::move(cacheData);
@@ -842,7 +882,8 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan(
// Don't leave tags on query tree.
query.root()->resetTag();
- LOG(5) << "Planner: outputted " << out.size() << " indexed solutions.";
+ LOGV2_DEBUG(
+ 20979, 5, "Planner: outputted {out_size} indexed solutions.", "out_size"_attr = out.size());
// Produce legible error message for failed OR planning with a TEXT child.
// TODO: support collection scan for non-TEXT children of OR.
@@ -877,7 +918,7 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan(
return Status(ErrorCodes::NoQueryExecutionPlans,
"Failed to build whole-index solution for $hint");
}
- LOG(5) << "Planner: outputting soln that uses hinted index as scan.";
+ LOGV2_DEBUG(20980, 5, "Planner: outputting soln that uses hinted index as scan.");
std::vector<std::unique_ptr<QuerySolution>> out;
out.push_back(std::move(soln));
return {std::move(out)};
@@ -938,7 +979,8 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan(
const BSONObj kp = QueryPlannerAnalysis::getSortPattern(index.keyPattern);
if (providesSort(query, kp)) {
- LOG(5) << "Planner: outputting soln that uses index to provide sort.";
+ LOGV2_DEBUG(
+ 20981, 5, "Planner: outputting soln that uses index to provide sort.");
auto soln = buildWholeIXSoln(fullIndexList[i], query, params);
if (soln) {
PlanCacheIndexTree* indexTree = new PlanCacheIndexTree();
@@ -953,8 +995,10 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan(
}
}
if (providesSort(query, QueryPlannerCommon::reverseSortObj(kp))) {
- LOG(5) << "Planner: outputting soln that uses (reverse) index "
- << "to provide sort.";
+ LOGV2_DEBUG(
+ 20982,
+ 5,
+ "Planner: outputting soln that uses (reverse) index to provide sort.");
auto soln = buildWholeIXSoln(fullIndexList[i], query, params, -1);
if (soln) {
PlanCacheIndexTree* indexTree = new PlanCacheIndexTree();
@@ -988,7 +1032,8 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan(
QueryPlannerParams paramsForCoveredIxScan;
auto soln = buildWholeIXSoln(index, query, paramsForCoveredIxScan);
if (soln && !soln->root->fetched()) {
- LOG(5) << "Planner: outputting soln that uses index to provide projection.";
+ LOGV2_DEBUG(
+ 20983, 5, "Planner: outputting soln that uses index to provide projection.");
PlanCacheIndexTree* indexTree = new PlanCacheIndexTree();
indexTree->setIndexEntry(index);
@@ -1030,7 +1075,10 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan(
"Failed to build collection scan soln");
}
if (collscan) {
- LOG(5) << "Planner: outputting a collscan:" << endl << redact(collscan->toString());
+ LOGV2_DEBUG(20984,
+ 5,
+ "Planner: outputting a collscan:\n{collscan}",
+ "collscan"_attr = redact(collscan->toString()));
SolutionCacheData* scd = new SolutionCacheData();
scd->solnType = SolutionCacheData::COLLSCAN_SOLN;
collscan->cacheData.reset(scd);
diff --git a/src/mongo/db/query/query_planner_test_fixture.cpp b/src/mongo/db/query/query_planner_test_fixture.cpp
index db4ae678f87..c7c9bd342c6 100644
--- a/src/mongo/db/query/query_planner_test_fixture.cpp
+++ b/src/mongo/db/query/query_planner_test_fixture.cpp
@@ -42,6 +42,7 @@
#include "mongo/db/query/query_knobs_gen.h"
#include "mongo/db/query/query_planner.h"
#include "mongo/db/query/query_planner_test_lib.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/log.h"
#include "mongo/util/transitional_tools_do_not_use/vector_spooling.h"
@@ -478,7 +479,7 @@ size_t QueryPlannerTest::getNumSolutions() const {
void QueryPlannerTest::dumpSolutions() const {
str::stream ost;
dumpSolutions(ost);
- log() << std::string(ost);
+ LOGV2(20985, "{std_string_ost}", "std_string_ost"_attr = std::string(ost));
}
void QueryPlannerTest::dumpSolutions(str::stream& ost) const {