summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/mongo/db/exec/cached_plan.cpp41
-rw-r--r--src/mongo/db/exec/geo_near.cpp8
-rw-r--r--src/mongo/db/exec/inclusion_projection_executor_test.cpp5
-rw-r--r--src/mongo/db/exec/multi_plan.cpp15
-rw-r--r--src/mongo/db/exec/projection.cpp3
-rw-r--r--src/mongo/db/exec/projection_executor_builder_test.cpp5
-rw-r--r--src/mongo/db/exec/shard_filter.cpp9
-rw-r--r--src/mongo/db/exec/stagedebug_cmd.cpp3
-rw-r--r--src/mongo/db/exec/subplan.cpp3
-rw-r--r--src/mongo/db/exec/trial_stage.cpp3
-rw-r--r--src/mongo/db/exec/working_set_common.cpp4
-rw-r--r--src/mongo/db/matcher/expression_geo.cpp5
-rw-r--r--src/mongo/db/matcher/rewrite_expr.cpp9
-rw-r--r--src/mongo/db/query/canonical_query_encoder.cpp2
-rw-r--r--src/mongo/db/query/classic_stage_builder.cpp5
-rw-r--r--src/mongo/db/query/collection_query_info.cpp1
-rw-r--r--src/mongo/db/query/find.cpp53
-rw-r--r--src/mongo/db/query/find_common.cpp1
-rw-r--r--src/mongo/db/query/get_executor.cpp35
-rw-r--r--src/mongo/db/query/index_bounds_builder.cpp5
-rw-r--r--src/mongo/db/query/plan_cache.cpp16
-rw-r--r--src/mongo/db/query/plan_enumerator.cpp8
-rw-r--r--src/mongo/db/query/plan_ranker.cpp25
-rw-r--r--src/mongo/db/query/planner_analysis.cpp9
-rw-r--r--src/mongo/db/query/planner_ixselect.cpp3
-rw-r--r--src/mongo/db/query/planner_wildcard_helpers.cpp2
-rw-r--r--src/mongo/db/query/query_planner.cpp74
-rw-r--r--src/mongo/db/query/query_planner_test_fixture.cpp2
-rw-r--r--src/mongo/db/query/query_planner_test_lib.cpp5
-rw-r--r--src/mongo/db/query/sbe_cached_solution_planner.cpp37
-rw-r--r--src/mongo/db/query/sbe_multi_planner.cpp2
-rw-r--r--src/mongo/db/query/sbe_stage_builder_filter.cpp3
-rw-r--r--src/mongo/db/views/durable_view_catalog.cpp2
-rw-r--r--src/mongo/db/views/view_catalog.cpp7
-rw-r--r--src/mongo/s/query/cluster_aggregation_planner.cpp1
-rw-r--r--src/mongo/s/query/cluster_cursor_manager.cpp6
-rw-r--r--src/mongo/s/query/cluster_find.cpp4
-rw-r--r--src/mongo/s/query/establish_cursors.cpp24
38 files changed, 116 insertions, 329 deletions
diff --git a/src/mongo/db/exec/cached_plan.cpp b/src/mongo/db/exec/cached_plan.cpp
index a281fb744e5..f39253281de 100644
--- a/src/mongo/db/exec/cached_plan.cpp
+++ b/src/mongo/db/exec/cached_plan.cpp
@@ -113,9 +113,7 @@ Status CachedPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
// replanning.
LOGV2_DEBUG(20579,
1,
- "Execution of cached plan failed, falling back to replan. query: "
- "{query} planSummary: {planSummary} status: {status}",
- "Execution of cached plan failed, failling back to replan",
+ "Execution of cached plan failed, falling back to replan",
"query"_attr = redact(_canonicalQuery->toStringShort()),
"planSummary"_attr = Explain::getPlanSummary(child().get()),
"status"_attr = redact(ex.toStatus()));
@@ -161,16 +159,13 @@ Status CachedPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
// If we're here, the trial period took more than 'maxWorksBeforeReplan' work cycles. This
// plan is taking too long, so we replan from scratch.
- LOGV2_DEBUG(
- 20580,
- 1,
- "Execution of cached plan required {maxWorksBeforeReplan} works, but was originally cached "
- "with only {decisionWorks} works. Evicting cache entry and replanning query: "
- "{canonicalQuery_Short} plan summary before replan: {Explain_getPlanSummary_child_get}",
- "maxWorksBeforeReplan"_attr = maxWorksBeforeReplan,
- "decisionWorks"_attr = _decisionWorks,
- "canonicalQuery_Short"_attr = redact(_canonicalQuery->toStringShort()),
- "Explain_getPlanSummary_child_get"_attr = Explain::getPlanSummary(child().get()));
+ LOGV2_DEBUG(20580,
+ 1,
+ "Evicting cache entry and replanning query",
+ "maxWorksBeforeReplan"_attr = maxWorksBeforeReplan,
+ "decisionWorks"_attr = _decisionWorks,
+ "query"_attr = redact(_canonicalQuery->toStringShort()),
+ "planSummary"_attr = Explain::getPlanSummary(child().get()));
const bool shouldCache = true;
return replan(
@@ -233,12 +228,10 @@ Status CachedPlanStage::replan(PlanYieldPolicy* yieldPolicy, bool shouldCache, s
LOGV2_DEBUG(
20581,
1,
- "Replanning of query resulted in single query solution, which will not be cached. "
- "{canonicalQuery_Short} plan summary after replan: {Explain_getPlanSummary_child_get} "
- "previous cache entry evicted: {shouldCache_yes_no}",
- "canonicalQuery_Short"_attr = redact(_canonicalQuery->toStringShort()),
- "Explain_getPlanSummary_child_get"_attr = Explain::getPlanSummary(child().get()),
- "shouldCache_yes_no"_attr = (shouldCache ? "yes" : "no"));
+ "Replanning of query resulted in single query solution, which will not be cached.",
+ "query"_attr = redact(_canonicalQuery->toStringShort()),
+ "planSummary"_attr = Explain::getPlanSummary(child().get()),
+ "shouldCache"_attr = (shouldCache ? "yes" : "no"));
return Status::OK();
}
@@ -268,12 +261,10 @@ Status CachedPlanStage::replan(PlanYieldPolicy* yieldPolicy, bool shouldCache, s
LOGV2_DEBUG(20582,
1,
- "Replanning {canonicalQuery_Short} resulted in plan with summary: "
- "{Explain_getPlanSummary_child_get}, which {shouldCache_has_has_not} been written "
- "to the cache",
- "canonicalQuery_Short"_attr = redact(_canonicalQuery->toStringShort()),
- "Explain_getPlanSummary_child_get"_attr = Explain::getPlanSummary(child().get()),
- "shouldCache_has_has_not"_attr = (shouldCache ? "has" : "has not"));
+ "Query plan after replanning and its cache status",
+ "query"_attr = redact(_canonicalQuery->toStringShort()),
+ "planSummary"_attr = Explain::getPlanSummary(child().get()),
+ "shouldCache"_attr = (shouldCache ? "yes" : "no"));
return Status::OK();
}
diff --git a/src/mongo/db/exec/geo_near.cpp b/src/mongo/db/exec/geo_near.cpp
index 76290e52fe2..e55f085a85f 100644
--- a/src/mongo/db/exec/geo_near.cpp
+++ b/src/mongo/db/exec/geo_near.cpp
@@ -126,14 +126,14 @@ static void extractGeometries(const BSONObj& doc,
geometries->push_back(std::move(stored));
} else {
LOGV2_WARNING(23760,
- "geoNear stage read non-geometry element {nextEl} in array {el}",
- "nextEl"_attr = redact(nextEl),
- "el"_attr = redact(el));
+ "geoNear stage read non-geometry element in array",
+ "nextElement"_attr = redact(nextEl),
+ "element"_attr = redact(el));
}
}
} else {
LOGV2_WARNING(
- 23761, "geoNear stage read non-geometry element {el}", "el"_attr = redact(el));
+ 23761, "geoNear stage read non-geometry element", "element"_attr = redact(el));
}
}
}
diff --git a/src/mongo/db/exec/inclusion_projection_executor_test.cpp b/src/mongo/db/exec/inclusion_projection_executor_test.cpp
index dd66aa31122..2176652c437 100644
--- a/src/mongo/db/exec/inclusion_projection_executor_test.cpp
+++ b/src/mongo/db/exec/inclusion_projection_executor_test.cpp
@@ -83,10 +83,9 @@ public:
}
} catch (...) {
LOGV2(20587,
- "exception while testing with allowFastPath={allowFastPath} and "
- "allowFallBackToDefault={AllowFallBackToDefault}",
+ "Exception while testing",
"allowFastPath"_attr = _allowFastPath,
- "AllowFallBackToDefault"_attr = AllowFallBackToDefault);
+ "allowFallBackToDefault"_attr = AllowFallBackToDefault);
throw;
}
}
diff --git a/src/mongo/db/exec/multi_plan.cpp b/src/mongo/db/exec/multi_plan.cpp
index 2ac307edfac..ab89c47642c 100644
--- a/src/mongo/db/exec/multi_plan.cpp
+++ b/src/mongo/db/exec/multi_plan.cpp
@@ -197,22 +197,17 @@ Status MultiPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
const auto& alreadyProduced = bestCandidate.results;
const auto& bestSolution = bestCandidate.solution;
- LOGV2_DEBUG(20590,
- 5,
- "Winning solution:\n{bestSolution}",
- "bestSolution"_attr = redact(bestSolution->toString()));
- LOGV2_DEBUG(20591,
- 2,
- "Winning plan: {Explain_getPlanSummary_bestCandidate_root}",
- "Explain_getPlanSummary_bestCandidate_root"_attr =
- Explain::getPlanSummary(bestCandidate.root));
+ LOGV2_DEBUG(
+ 20590, 5, "Winning solution", "bestSolution"_attr = redact(bestSolution->toString()));
+ LOGV2_DEBUG(
+ 20591, 2, "Winning plan", "planSummary"_attr = Explain::getPlanSummary(bestCandidate.root));
_backupPlanIdx = kNoSuchPlan;
if (bestSolution->hasBlockingStage && (0 == alreadyProduced.size())) {
LOGV2_DEBUG(20592, 5, "Winner has blocking stage, looking for backup plan...");
for (auto&& ix : ranking->candidateOrder) {
if (!_candidates[ix].solution->hasBlockingStage) {
- LOGV2_DEBUG(20593, 5, "Candidate {ix} is backup child", "ix"_attr = ix);
+ LOGV2_DEBUG(20593, 5, "Backup child", "ix"_attr = ix);
_backupPlanIdx = ix;
break;
}
diff --git a/src/mongo/db/exec/projection.cpp b/src/mongo/db/exec/projection.cpp
index 4b205739032..5f8a1bee2b0 100644
--- a/src/mongo/db/exec/projection.cpp
+++ b/src/mongo/db/exec/projection.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery
-
#include "mongo/db/exec/projection.h"
#include <boost/optional.hpp>
@@ -42,7 +40,6 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/matcher/expression.h"
#include "mongo/db/record_id.h"
-#include "mongo/logv2/log.h"
#include "mongo/util/str.h"
namespace mongo {
diff --git a/src/mongo/db/exec/projection_executor_builder_test.cpp b/src/mongo/db/exec/projection_executor_builder_test.cpp
index 5fa2668a6f5..a29f09c34af 100644
--- a/src/mongo/db/exec/projection_executor_builder_test.cpp
+++ b/src/mongo/db/exec/projection_executor_builder_test.cpp
@@ -65,10 +65,9 @@ public:
base->run();
} catch (...) {
LOGV2(20597,
- "exception while testing with allowFastPath={allowFastPath} and "
- "allowFallBackToDefault={AllowFallBackToDefault}",
+ "Exception while testing",
"allowFastPath"_attr = _allowFastPath,
- "AllowFallBackToDefault"_attr = AllowFallBackToDefault);
+ "allowFallBackToDefault"_attr = AllowFallBackToDefault);
throw;
}
}
diff --git a/src/mongo/db/exec/shard_filter.cpp b/src/mongo/db/exec/shard_filter.cpp
index 519031cab1e..19c8ad4cbb2 100644
--- a/src/mongo/db/exec/shard_filter.cpp
+++ b/src/mongo/db/exec/shard_filter.cpp
@@ -89,11 +89,10 @@ PlanStage::StageState ShardFilterStage::doWork(WorkingSetID* out) {
// possible unless manually inserting data into a shard
LOGV2_WARNING(
23787,
- "no shard key found in document {member_doc_value_toBson} for shard key "
- "pattern {shardFilterer_getKeyPattern}, document may have been inserted "
- "manually into shard",
- "member_doc_value_toBson"_attr = redact(member->doc.value().toBson()),
- "shardFilterer_getKeyPattern"_attr = _shardFilterer.getKeyPattern());
+ "No shard key found in document, it may have been inserted manually "
+ "into shard",
+ "document"_attr = redact(member->doc.value().toBson()),
+ "keyPattern"_attr = _shardFilterer.getKeyPattern());
} else {
invariant(res == ShardFilterer::DocumentBelongsResult::kDoesNotBelong);
}
diff --git a/src/mongo/db/exec/stagedebug_cmd.cpp b/src/mongo/db/exec/stagedebug_cmd.cpp
index 7966f34aa2d..e07930d549a 100644
--- a/src/mongo/db/exec/stagedebug_cmd.cpp
+++ b/src/mongo/db/exec/stagedebug_cmd.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery
-
#include "mongo/platform/basic.h"
#include <memory>
@@ -63,7 +61,6 @@
#include "mongo/db/matcher/extensions_callback_real.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/query/plan_executor_factory.h"
-#include "mongo/logv2/log.h"
namespace mongo {
diff --git a/src/mongo/db/exec/subplan.cpp b/src/mongo/db/exec/subplan.cpp
index b8abd0d620c..667a0cecfec 100644
--- a/src/mongo/db/exec/subplan.cpp
+++ b/src/mongo/db/exec/subplan.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery
-
#include "mongo/platform/basic.h"
#include "mongo/db/exec/subplan.h"
@@ -47,7 +45,6 @@
#include "mongo/db/query/planner_analysis.h"
#include "mongo/db/query/query_planner_common.h"
#include "mongo/db/query/stage_builder_util.h"
-#include "mongo/logv2/log.h"
#include "mongo/util/scopeguard.h"
#include "mongo/util/transitional_tools_do_not_use/vector_spooling.h"
diff --git a/src/mongo/db/exec/trial_stage.cpp b/src/mongo/db/exec/trial_stage.cpp
index 0d21af2ac6b..a6d59112d16 100644
--- a/src/mongo/db/exec/trial_stage.cpp
+++ b/src/mongo/db/exec/trial_stage.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery
-
#include "mongo/platform/basic.h"
#include "mongo/db/exec/trial_stage.h"
@@ -40,7 +38,6 @@
#include "mongo/db/exec/or.h"
#include "mongo/db/exec/queued_data_stage.h"
#include "mongo/db/exec/working_set_common.h"
-#include "mongo/logv2/log.h"
namespace mongo {
diff --git a/src/mongo/db/exec/working_set_common.cpp b/src/mongo/db/exec/working_set_common.cpp
index ef57aef5043..718f6bd4e1e 100644
--- a/src/mongo/db/exec/working_set_common.cpp
+++ b/src/mongo/db/exec/working_set_common.cpp
@@ -99,10 +99,6 @@ bool WorkingSetCommon::fetch(OperationContext* opCtx,
LOGV2_ERROR_OPTIONS(
4615603,
{logv2::UserAssertAfterLog(ErrorCodes::DataCorruptionDetected)},
- "Erroneous index key found in {namespace} with reference to non-existent "
- "record id {recordId}: {indexKeyData}. Consider dropping and then re-creating "
- "the index with key pattern {indexKeyPattern} and then running the validate "
- "command on the collection.",
"Erroneous index key found with reference to non-existent record id. Consider "
"dropping and then re-creating the index and then running the validate command "
"on the collection.",
diff --git a/src/mongo/db/matcher/expression_geo.cpp b/src/mongo/db/matcher/expression_geo.cpp
index 721dbe7553f..2129662560e 100644
--- a/src/mongo/db/matcher/expression_geo.cpp
+++ b/src/mongo/db/matcher/expression_geo.cpp
@@ -79,10 +79,7 @@ Status GeoExpression::parseQuery(const BSONObj& obj) {
BSONElement elt = geoIt.next();
if (elt.fieldNameStringData() == "$uniqueDocs") {
// Deprecated "$uniqueDocs" field
- LOGV2_WARNING(23847,
- "Deprecated $uniqueDocs option: {query}",
- "Deprecated $uniqueDocs option",
- "query"_attr = redact(obj));
+ LOGV2_WARNING(23847, "Deprecated $uniqueDocs option", "query"_attr = redact(obj));
} else {
// The element must be a geo specifier. "$box", "$center", "$geometry", etc.
geoContainer.reset(new GeometryContainer());
diff --git a/src/mongo/db/matcher/rewrite_expr.cpp b/src/mongo/db/matcher/rewrite_expr.cpp
index ef2cb036cc2..b5b12278d17 100644
--- a/src/mongo/db/matcher/rewrite_expr.cpp
+++ b/src/mongo/db/matcher/rewrite_expr.cpp
@@ -44,11 +44,8 @@ using CmpOp = ExpressionCompare::CmpOp;
RewriteExpr::RewriteResult RewriteExpr::rewrite(const boost::intrusive_ptr<Expression>& expression,
const CollatorInterface* collator) {
- LOGV2_DEBUG(20725,
- 5,
- "Expression prior to rewrite: {expression}",
- "Expression prior to rewrite",
- "expression"_attr = expression->serialize(false));
+ LOGV2_DEBUG(
+ 20725, 5, "Expression prior to rewrite", "expression"_attr = expression->serialize(false));
RewriteExpr rewriteExpr(collator);
std::unique_ptr<MatchExpression> matchExpression;
@@ -57,13 +54,11 @@ RewriteExpr::RewriteResult RewriteExpr::rewrite(const boost::intrusive_ptr<Expre
matchExpression = std::move(matchTree);
LOGV2_DEBUG(20726,
5,
- "Post-rewrite MatchExpression: {expression}",
"Post-rewrite MatchExpression",
"expression"_attr = matchExpression->debugString());
matchExpression = MatchExpression::optimize(std::move(matchExpression));
LOGV2_DEBUG(20727,
5,
- "Post-rewrite/post-optimized MatchExpression: {expression}",
"Post-rewrite/post-optimized MatchExpression",
"expression"_attr = matchExpression->debugString());
}
diff --git a/src/mongo/db/query/canonical_query_encoder.cpp b/src/mongo/db/query/canonical_query_encoder.cpp
index ce30e6f1353..fede266acc1 100644
--- a/src/mongo/db/query/canonical_query_encoder.cpp
+++ b/src/mongo/db/query/canonical_query_encoder.cpp
@@ -269,7 +269,6 @@ void encodeGeoMatchExpression(const GeoMatchExpression* tree, StringBuilder* key
*keyBuilder << "ss";
} else {
LOGV2_ERROR(23849,
- "Unknown CRS type {crsType} in geometry of type {geometryType}",
"Unknown CRS type in geometry",
"crsType"_attr = (int)geoQuery.getGeometry().getNativeCRS(),
"geometryType"_attr = geoQuery.getGeometry().getDebugType());
@@ -302,7 +301,6 @@ void encodeGeoNearMatchExpression(const GeoNearMatchExpression* tree, StringBuil
break;
case UNSET:
LOGV2_ERROR(23850,
- "Unknown CRS type {crsType} in point geometry for near query",
"Unknown CRS type in point geometry for near query",
"crsType"_attr = (int)nearQuery.centroid->crs);
MONGO_UNREACHABLE;
diff --git a/src/mongo/db/query/classic_stage_builder.cpp b/src/mongo/db/query/classic_stage_builder.cpp
index bf904550f9c..ad10da0c88c 100644
--- a/src/mongo/db/query/classic_stage_builder.cpp
+++ b/src/mongo/db/query/classic_stage_builder.cpp
@@ -365,10 +365,7 @@ std::unique_ptr<PlanStage> ClassicStageBuilder::build(const QuerySolutionNode* r
case STAGE_TRIAL:
case STAGE_UNKNOWN:
case STAGE_UPDATE: {
- LOGV2_WARNING(4615604,
- "Can't build exec tree for node {node}",
- "Can't build exec tree for node",
- "node"_attr = *root);
+ LOGV2_WARNING(4615604, "Can't build exec tree for node", "node"_attr = *root);
}
}
diff --git a/src/mongo/db/query/collection_query_info.cpp b/src/mongo/db/query/collection_query_info.cpp
index 43d67831ecf..aaef235f02b 100644
--- a/src/mongo/db/query/collection_query_info.cpp
+++ b/src/mongo/db/query/collection_query_info.cpp
@@ -184,7 +184,6 @@ void CollectionQueryInfo::notifyOfQuery(OperationContext* opCtx,
void CollectionQueryInfo::clearQueryCache(const Collection* coll) {
LOGV2_DEBUG(20907,
1,
- "{namespace}: clearing plan cache - collection info cache reset",
"Clearing plan cache - collection info cache reset",
"namespace"_attr = coll->ns());
if (nullptr != _planCache.get()) {
diff --git a/src/mongo/db/query/find.cpp b/src/mongo/db/query/find.cpp
index 90eddfe4cd1..d16460eb3b9 100644
--- a/src/mongo/db/query/find.cpp
+++ b/src/mongo/db/query/find.cpp
@@ -180,10 +180,7 @@ void generateBatch(int ntoreturn,
(*numResults)++;
}
} catch (DBException& exception) {
- LOGV2_ERROR(20918,
- "getMore executor error, stats: {stats}",
- "getMore executor error",
- "stats"_attr = redact(exec->getStats()));
+ LOGV2_ERROR(20918, "getMore executor error", "stats"_attr = redact(exec->getStats()));
exception.addContext("Executor error during OP_GET_MORE");
throw;
}
@@ -216,11 +213,7 @@ Message getMore(OperationContext* opCtx,
bool* isCursorAuthorized) {
invariant(ntoreturn >= 0);
- LOGV2_DEBUG(20909,
- 5,
- "Running getMore, cursorid: {cursorId}",
- "Running getMore",
- "cursorId"_attr = cursorid);
+ LOGV2_DEBUG(20909, 5, "Running getMore", "cursorId"_attr = cursorid);
CurOp& curOp = *CurOp::get(opCtx);
curOp.ensureStarted();
@@ -512,7 +505,6 @@ Message getMore(OperationContext* opCtx,
LOGV2_DEBUG(20910,
5,
- "getMore NOT saving client cursor, ended with state {planExecutorState}",
"getMore NOT saving client cursor",
"planExecutorState"_attr = PlanExecutor::statestr(state));
} else {
@@ -524,7 +516,6 @@ Message getMore(OperationContext* opCtx,
exec->detachFromOperationContext();
LOGV2_DEBUG(20911,
5,
- "getMore saving client cursor, ended with state {planExecutorState}",
"getMore saving client cursor",
"planExecutorState"_attr = PlanExecutor::statestr(state));
@@ -559,11 +550,7 @@ Message getMore(OperationContext* opCtx,
qr.setCursorId(cursorid);
qr.setStartingFrom(startingResult);
qr.setNReturned(numResults);
- LOGV2_DEBUG(20912,
- 5,
- "getMore returned {numResults} results",
- "getMore returned results",
- "numResults"_attr = numResults);
+ LOGV2_DEBUG(20912, 5, "getMore returned results", "numResults"_attr = numResults);
return Message(bb.release());
}
@@ -601,16 +588,8 @@ bool runQuery(OperationContext* opCtx,
"Can't canonicalize query");
invariant(cq.get());
- LOGV2_DEBUG(20913,
- 5,
- "Running query:\n{query}",
- "Running query",
- "query"_attr = redact(cq->toString()));
- LOGV2_DEBUG(20914,
- 2,
- "Running query: {query}",
- "Running query",
- "query"_attr = redact(cq->toStringShort()));
+ LOGV2_DEBUG(20913, 5, "Running query", "query"_attr = redact(cq->toString()));
+ LOGV2_DEBUG(20914, 2, "Running query", "query"_attr = redact(cq->toStringShort()));
// Parse, canonicalize, plan, transcribe, and get a plan executor.
AutoGetCollectionForReadCommand ctx(opCtx, nss, AutoGetCollectionViewMode::kViewsForbidden);
@@ -708,8 +687,6 @@ bool runQuery(OperationContext* opCtx,
if (FindCommon::enoughForFirstBatch(qr, numResults)) {
LOGV2_DEBUG(20915,
5,
- "Enough for first batch, wantMore={wantMore} ntoreturn={numToReturn} "
- "numResults={numResults}",
"Enough for first batch",
"wantMore"_attr = qr.wantMore(),
"numToReturn"_attr = qr.getNToReturn().value_or(0),
@@ -719,7 +696,6 @@ bool runQuery(OperationContext* opCtx,
}
} catch (DBException& exception) {
LOGV2_ERROR(20919,
- "Plan executor error during find: {error}, stats: {stats}",
"Plan executor error during find",
"error"_attr = redact(exception.toStatus()),
"stats"_attr = redact(exec->getStats()));
@@ -751,13 +727,11 @@ bool runQuery(OperationContext* opCtx,
{Privilege(ResourcePattern::forExactNamespace(nss), ActionType::find)}});
ccId = pinnedCursor.getCursor()->cursorid();
- LOGV2_DEBUG(
- 20916,
- 5,
- "Caching executor with cursorid {cursorId} after returning {numResults} results",
- "Caching executor after returning results",
- "cursorId"_attr = ccId,
- "numResults"_attr = numResults);
+ LOGV2_DEBUG(20916,
+ 5,
+ "Caching executor after returning results",
+ "cursorId"_attr = ccId,
+ "numResults"_attr = numResults);
// Set curOp.debug().exhaust if the client requested exhaust and the cursor is not
// exhausted.
@@ -778,11 +752,8 @@ bool runQuery(OperationContext* opCtx,
endQueryOp(opCtx, collection, *pinnedCursor.getCursor()->getExecutor(), numResults, ccId);
} else {
- LOGV2_DEBUG(20917,
- 5,
- "Not caching executor but returning {numResults} results",
- "Not caching executor but returning results",
- "numResults"_attr = numResults);
+ LOGV2_DEBUG(
+ 20917, 5, "Not caching executor but returning results", "numResults"_attr = numResults);
endQueryOp(opCtx, collection, *exec, numResults, ccId);
}
diff --git a/src/mongo/db/query/find_common.cpp b/src/mongo/db/query/find_common.cpp
index 9d27341a806..20629f417c9 100644
--- a/src/mongo/db/query/find_common.cpp
+++ b/src/mongo/db/query/find_common.cpp
@@ -82,7 +82,6 @@ void FindCommon::waitInFindBeforeMakingBatch(OperationContext* opCtx, const Cano
auto whileWaitingFunc = [&, hasLogged = false]() mutable {
if (!std::exchange(hasLogged, true)) {
LOGV2(20908,
- "Waiting in find before making batch for query - {query}",
"Waiting in find before making batch for query",
"query"_attr = redact(cq.toStringShort()));
}
diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp
index 4572fedf7a5..b5850ebca81 100644
--- a/src/mongo/db/query/get_executor.cpp
+++ b/src/mongo/db/query/get_executor.cpp
@@ -217,8 +217,6 @@ IndexEntry indexEntryFromIndexCatalogEntry(OperationContext* opCtx,
LOGV2_DEBUG(20920,
2,
- "Multikey path metadata range index scan stats: {{ index: {index}, "
- "numSeeks: {numSeeks}, keysExamined: {keysExamined}}}",
"Multikey path metadata range index scan stats",
"index"_attr = desc->indexName(),
"numSeeks"_attr = mkAccessStats.keysExamined,
@@ -528,7 +526,6 @@ public:
if (nullptr == _collection) {
LOGV2_DEBUG(20921,
2,
- "Collection {namespace} does not exist. Using EOF plan: {query}",
"Collection does not exist. Using EOF plan",
"namespace"_attr = _cq->ns(),
"canonicalQuery"_attr = redact(_cq->toStringShort()));
@@ -591,7 +588,6 @@ public:
turnIxscanIntoCount(querySolution.get())) {
LOGV2_DEBUG(20923,
2,
- "Using fast count: {query}",
"Using fast count",
"query"_attr = redact(_cq->toStringShort()));
}
@@ -607,7 +603,6 @@ public:
SubplanStage::canUseSubplanning(*_cq)) {
LOGV2_DEBUG(20924,
2,
- "Running query as sub-queries: {query}",
"Running query as sub-queries",
"query"_attr = redact(_cq->toStringShort()));
return buildSubPlan(plannerParams);
@@ -633,7 +628,6 @@ public:
LOGV2_DEBUG(20925,
2,
- "Using fast count: {query}, planSummary: {planSummary}",
"Using fast count",
"query"_attr = redact(_cq->toStringShort()),
"planSummary"_attr = result->getPlanSummary());
@@ -648,14 +642,11 @@ public:
auto root = buildExecutableTree(*solutions[0]);
result->emplace(std::move(root), std::move(solutions[0]));
- LOGV2_DEBUG(
- 20926,
- 2,
- "Only one plan is available; it will be run but will not be cached. {query}, "
- "planSummary: {planSummary}",
- "Only one plan is available; it will be run but will not be cached",
- "query"_attr = redact(_cq->toStringShort()),
- "planSummary"_attr = result->getPlanSummary());
+ LOGV2_DEBUG(20926,
+ 2,
+ "Only one plan is available; it will be run but will not be cached",
+ "query"_attr = redact(_cq->toStringShort()),
+ "planSummary"_attr = result->getPlanSummary());
return std::move(result);
}
@@ -1237,7 +1228,6 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorDele
// contains an EOF stage.
LOGV2_DEBUG(20927,
2,
- "Collection {namespace} does not exist. Using EOF stage: {query}",
"Collection does not exist. Using EOF stage",
"namespace"_attr = nss.ns(),
"query"_attr = redact(request->getQuery()));
@@ -1270,11 +1260,7 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorDele
if (descriptor && CanonicalQuery::isSimpleIdQuery(unparsedQuery) &&
request->getProj().isEmpty() && hasCollectionDefaultCollation) {
- LOGV2_DEBUG(20928,
- 2,
- "Using idhack: {query}",
- "Using idhack",
- "query"_attr = redact(unparsedQuery));
+ LOGV2_DEBUG(20928, 2, "Using idhack", "query"_attr = redact(unparsedQuery));
auto idHackStage = std::make_unique<IDHackStage>(
expCtx.get(), unparsedQuery["_id"].wrap(), ws.get(), collection, descriptor);
@@ -1410,7 +1396,6 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorUpda
if (!collection) {
LOGV2_DEBUG(20929,
2,
- "Collection {namespace} does not exist. Using EOF stage: {query}",
"Collection does not exist. Using EOF stage",
"namespace"_attr = nss.ns(),
"query"_attr = redact(request->getQuery()));
@@ -1438,11 +1423,7 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorUpda
if (descriptor && CanonicalQuery::isSimpleIdQuery(unparsedQuery) &&
request->getProj().isEmpty() && hasCollectionDefaultCollation) {
- LOGV2_DEBUG(20930,
- 2,
- "Using idhack: {query}",
- "Using idhack",
- "query"_attr = redact(unparsedQuery));
+ LOGV2_DEBUG(20930, 2, "Using idhack", "query"_attr = redact(unparsedQuery));
// Working set 'ws' is discarded. InternalPlanner::updateWithIdHack() makes its own
// WorkingSet.
@@ -2056,7 +2037,6 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorForS
LOGV2_DEBUG(20931,
2,
- "Using fast distinct: {query}, planSummary: {planSummary}",
"Using fast distinct",
"query"_attr = redact(parsedDistinct->getQuery()->toStringShort()),
"planSummary"_attr = Explain::getPlanSummary(root.get()));
@@ -2101,7 +2081,6 @@ getExecutorDistinctFromIndexSolutions(OperationContext* opCtx,
LOGV2_DEBUG(20932,
2,
- "Using fast distinct: {query}, planSummary: {planSummary}",
"Using fast distinct",
"query"_attr = redact(parsedDistinct->getQuery()->toStringShort()),
"planSummary"_attr = Explain::getPlanSummary(root.get()));
diff --git a/src/mongo/db/query/index_bounds_builder.cpp b/src/mongo/db/query/index_bounds_builder.cpp
index faed197b1c6..dc826b1ebdb 100644
--- a/src/mongo/db/query/index_bounds_builder.cpp
+++ b/src/mongo/db/query/index_bounds_builder.cpp
@@ -888,14 +888,12 @@ void IndexBoundsBuilder::_translatePredicate(const MatchExpression* expr,
*tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
} else {
LOGV2_WARNING(20934,
- "Planner error trying to build geo bounds for {element} index element",
"Planner error trying to build geo bounds for an index element",
"element"_attr = elt.toString());
verify(0);
}
} else {
LOGV2_WARNING(20935,
- "Planner error, trying to build bounds for expression: {expression}",
"Planner error while trying to build bounds for expression",
"expression"_attr = redact(expr->debugString()));
verify(0);
@@ -1203,8 +1201,7 @@ void IndexBoundsBuilder::alignBounds(IndexBounds* bounds, const BSONObj& kp, int
if (!bounds->isValidFor(kp, scanDir)) {
LOGV2(20933,
- "INVALID BOUNDS: {bounds}\nkp = {keyPattern}\nscanDir = {scanDirection}",
- "INVALID BOUNDS",
+ "Invalid bounds",
"bounds"_attr = redact(bounds->toString()),
"keyPattern"_attr = redact(kp),
"scanDirection"_attr = scanDir);
diff --git a/src/mongo/db/query/plan_cache.cpp b/src/mongo/db/query/plan_cache.cpp
index 800f42c9f51..4c61cac98cd 100644
--- a/src/mongo/db/query/plan_cache.cpp
+++ b/src/mongo/db/query/plan_cache.cpp
@@ -435,7 +435,6 @@ std::unique_ptr<CachedSolution> PlanCache::getCacheEntryIfActive(const PlanCache
if (res.state == PlanCache::CacheEntryState::kPresentInactive) {
LOGV2_DEBUG(20936,
2,
- "Not using cached entry for {cachedSolution} since it is inactive",
"Not using cached entry since it is inactive",
"cachedSolution"_attr = redact(res.cachedSolution->toString()));
return nullptr;
@@ -460,8 +459,6 @@ PlanCache::NewEntryState PlanCache::getNewEntryState(const CanonicalQuery& query
if (!oldEntry) {
LOGV2_DEBUG(20937,
1,
- "Creating inactive cache entry for query shape {query} queryHash {queryHash} "
- "planCacheKey {planCacheKey} with works value {newWorks}",
"Creating inactive cache entry for query",
"query"_attr = redact(query.toStringShort()),
"queryHash"_attr = unsignedIntToFixedLengthHex(queryHash),
@@ -478,9 +475,6 @@ PlanCache::NewEntryState PlanCache::getNewEntryState(const CanonicalQuery& query
LOGV2_DEBUG(20938,
1,
- "Replacing active cache entry for query {query} queryHash {queryHash} "
- "planCacheKey {planCacheKey} with works {oldWorks} with a plan with "
- "works {newWorks}",
"Replacing active cache entry for query",
"query"_attr = redact(query.toStringShort()),
"queryHash"_attr = unsignedIntToFixedLengthHex(queryHash),
@@ -492,9 +486,6 @@ PlanCache::NewEntryState PlanCache::getNewEntryState(const CanonicalQuery& query
} else if (oldEntry->isActive) {
LOGV2_DEBUG(20939,
1,
- "Attempt to write to the planCache for query {query} queryHash {queryHash} "
- "planCacheKey {planCacheKey} with a plan with works {newWorks} is a noop, "
- "since there's already a plan with works value {oldWorks}",
"Attempt to write to the planCache resulted in a noop, since there's already "
"an active cache entry with a lower works value",
"query"_attr = redact(query.toStringShort()),
@@ -519,8 +510,6 @@ PlanCache::NewEntryState PlanCache::getNewEntryState(const CanonicalQuery& query
LOGV2_DEBUG(20940,
1,
- "Increasing work value associated with cache entry for query {query} queryHash "
- "{queryHash} planCacheKey {planCacheKey} from {oldWorks} to {increasedWorks}",
"Increasing work value associated with cache entry",
"query"_attr = redact(query.toStringShort()),
"queryHash"_attr = unsignedIntToFixedLengthHex(queryHash),
@@ -537,9 +526,6 @@ PlanCache::NewEntryState PlanCache::getNewEntryState(const CanonicalQuery& query
// cache (as an active entry) the plan this query used for the future.
LOGV2_DEBUG(20941,
1,
- "Inactive cache entry for query {query} queryHash {queryHash} planCacheKey "
- "{planCacheKey} with works {oldWorks} is being promoted to active entry with "
- "works value {newWorks}",
"Inactive cache entry for query is being promoted to active entry",
"query"_attr = redact(query.toStringShort()),
"queryHash"_attr = unsignedIntToFixedLengthHex(queryHash),
@@ -634,8 +620,6 @@ Status PlanCache::set(const CanonicalQuery& query,
if (nullptr != evictedEntry.get()) {
LOGV2_DEBUG(20942,
1,
- "{namespace}: plan cache maximum size exceeded - removed least recently used "
- "entry {evictedEntry}",
"Plan cache maximum size exceeded - removed least recently used entry",
"namespace"_attr = query.nss(),
"evictedEntry"_attr = redact(evictedEntry->toString()));
diff --git a/src/mongo/db/query/plan_enumerator.cpp b/src/mongo/db/query/plan_enumerator.cpp
index b31f45904a4..2eb9ac010d1 100644
--- a/src/mongo/db/query/plan_enumerator.cpp
+++ b/src/mongo/db/query/plan_enumerator.cpp
@@ -359,11 +359,7 @@ unique_ptr<MatchExpression> PlanEnumerator::getNext() {
tagForSort(tree.get());
_root->resetTag();
- LOGV2_DEBUG(20943,
- 5,
- "Enumerator: memo just before moving:\n{memo}",
- "Enumerator: memo just before moving",
- "memo"_attr = dumpMemo());
+ LOGV2_DEBUG(20943, 5, "Enumerator: memo just before moving", "memo"_attr = dumpMemo());
_done = nextMemo(memoIDForNode(_root));
return tree;
}
@@ -1572,7 +1568,7 @@ void PlanEnumerator::compound(const vector<MatchExpression*>& tryCompound,
//
void PlanEnumerator::tagMemo(size_t id) {
- LOGV2_DEBUG(20944, 5, "Tagging memoID {id}", "Tagging memoID", "id"_attr = id);
+ LOGV2_DEBUG(20944, 5, "Tagging memoID", "id"_attr = id);
NodeAssignment* assign = _memo[id];
verify(nullptr != assign);
diff --git a/src/mongo/db/query/plan_ranker.cpp b/src/mongo/db/query/plan_ranker.cpp
index d5d05132455..1c79ec5845b 100644
--- a/src/mongo/db/query/plan_ranker.cpp
+++ b/src/mongo/db/query/plan_ranker.cpp
@@ -45,7 +45,7 @@ void logScoreFormula(std::function<std::string()> formula,
double noSortBonus,
double noIxisectBonus,
double tieBreakers) {
- LOGV2_DEBUG(20961, 2, "{sb_str}", "sb_str"_attr = [&]() {
+ LOGV2_DEBUG(20961, 2, "Score formula", "formula"_attr = [&]() {
StringBuilder sb;
sb << "score(" << str::convertDoubleToString(score) << ") = baseScore("
<< str::convertDoubleToString(baseScore) << ")"
@@ -60,11 +60,7 @@ void logScoreFormula(std::function<std::string()> formula,
}
void logScoreBoost(double score) {
- LOGV2_DEBUG(20962,
- 5,
- "Score boosted to {newScore} due to intersection forcing",
- "Score boosted due to intersection forcing",
- "newScore"_attr = score);
+ LOGV2_DEBUG(20962, 5, "Score boosted due to intersection forcing", "newScore"_attr = score);
}
void logScoringPlan(std::function<std::string()> solution,
@@ -74,37 +70,28 @@ void logScoringPlan(std::function<std::string()> solution,
bool isEOF) {
LOGV2_DEBUG(20956,
5,
- "Scoring plan {planIndex}:\n{querySolution}Stats:\n{stats}",
"Scoring plan",
"planIndex"_attr = planIndex,
"querySolution"_attr = redact(solution()),
"stats"_attr = redact(explain()));
LOGV2_DEBUG(20957,
2,
- "Scoring query plan: {planSummary} planHitEOF={planHitEOF}",
"Scoring query plan",
"planSummary"_attr = planSummary(),
"planHitEOF"_attr = isEOF);
}
void logScore(double score) {
- LOGV2_DEBUG(20958, 5, "Basic plan score: {score}", "Basic plan score", "score"_attr = score);
+ LOGV2_DEBUG(20958, 5, "Basic plan score", "score"_attr = score);
}
void logEOFBonus(double eofBonus) {
- LOGV2_DEBUG(20959,
- 5,
- "Adding +{eofBonus} EOF bonus to score",
- "Adding EOF bonus to score",
- "eofBonus"_attr = eofBonus);
+ LOGV2_DEBUG(20959, 5, "Adding EOF bonus to score", "eofBonus"_attr = eofBonus);
}
void logFailedPlan(std::function<std::string()> planSummary) {
- LOGV2_DEBUG(20960,
- 2,
- "Not scoring plan: {planSummary} because the plan failed",
- "Not scoring a plan because the plan failed",
- "planSummary"_attr = planSummary());
+ LOGV2_DEBUG(
+ 20960, 2, "Not scoring a plan because the plan failed", "planSummary"_attr = planSummary());
}
} // namespace log_detail
diff --git a/src/mongo/db/query/planner_analysis.cpp b/src/mongo/db/query/planner_analysis.cpp
index f5dd21a114b..7f446b8c757 100644
--- a/src/mongo/db/query/planner_analysis.cpp
+++ b/src/mongo/db/query/planner_analysis.cpp
@@ -386,11 +386,7 @@ std::unique_ptr<QuerySolutionNode> addSortKeyGeneratorStageIfNeeded(
std::unique_ptr<ProjectionNode> analyzeProjection(const CanonicalQuery& query,
std::unique_ptr<QuerySolutionNode> solnRoot,
const bool hasSortStage) {
- LOGV2_DEBUG(20949,
- 5,
- "PROJECTION: Current plan is:\n{plan}",
- "PROJECTION: Current plan",
- "plan"_attr = redact(solnRoot->toString()));
+ LOGV2_DEBUG(20949, 5, "PROJECTION: Current plan", "plan"_attr = redact(solnRoot->toString()));
// If the projection requires the entire document we add a fetch stage if not present. Otherwise
// we add a fetch stage if we are not covered.
@@ -707,8 +703,6 @@ bool QueryPlannerAnalysis::explodeForSort(const CanonicalQuery& query,
LOGV2_DEBUG(
20950,
5,
- "Could expand ixscans to pull out sort order but resulting scan count({numScans}) is "
- "too high",
"Could expand ixscans to pull out sort order but resulting scan count is too high",
"numScans"_attr = totalNumScans);
return false;
@@ -768,7 +762,6 @@ QuerySolutionNode* QueryPlannerAnalysis::analyzeSort(const CanonicalQuery& query
QueryPlannerCommon::reverseScans(solnRoot);
LOGV2_DEBUG(20951,
5,
- "Reversing ixscan to provide sort. Result: {newPlan}",
"Reversing ixscan to provide sort",
"newPlan"_attr = redact(solnRoot->toString()));
return solnRoot;
diff --git a/src/mongo/db/query/planner_ixselect.cpp b/src/mongo/db/query/planner_ixselect.cpp
index 13a9d59b702..642074fea09 100644
--- a/src/mongo/db/query/planner_ixselect.cpp
+++ b/src/mongo/db/query/planner_ixselect.cpp
@@ -286,7 +286,6 @@ std::vector<IndexEntry> QueryPlannerIXSelect::findIndexesByHint(
if (entry.identifier.catalogName == hintName) {
LOGV2_DEBUG(20952,
5,
- "Hint by name specified, restricting indices to {keyPattern}",
"Hint by name specified, restricting indices",
"name"_attr = entry.identifier.catalogName,
"keyPattern"_attr = entry.keyPattern);
@@ -298,7 +297,6 @@ std::vector<IndexEntry> QueryPlannerIXSelect::findIndexesByHint(
if (SimpleBSONObjComparator::kInstance.evaluate(entry.keyPattern == hintedIndex)) {
LOGV2_DEBUG(20953,
5,
- "Hint specified, restricting indices to {keyPattern}",
"Hint specified, restricting indices",
"name"_attr = entry.identifier.catalogName,
"keyPattern"_attr = entry.keyPattern);
@@ -586,7 +584,6 @@ bool QueryPlannerIXSelect::_compatible(const BSONElement& keyPatternElt,
return false;
} else {
LOGV2_WARNING(20954,
- "Unknown indexing for node {node} and field {field}",
"Unknown indexing for given node and field",
"node"_attr = node->debugString(),
"field"_attr = keyPatternElt.toString());
diff --git a/src/mongo/db/query/planner_wildcard_helpers.cpp b/src/mongo/db/query/planner_wildcard_helpers.cpp
index 7893b6158f5..837180ff85b 100644
--- a/src/mongo/db/query/planner_wildcard_helpers.cpp
+++ b/src/mongo/db/query/planner_wildcard_helpers.cpp
@@ -247,8 +247,6 @@ bool validateNumericPathComponents(const MultikeyPaths& multikeyPaths,
if (arrayIndices.size() > kWildcardMaxArrayIndexTraversalDepth) {
LOGV2_DEBUG(20955,
2,
- "Declining to answer query on field '{field}' with $** index, as it traverses "
- "through more than {maxNestedArrayIndices} nested array indices",
"Declining to answer query on a field with $** index, as it traverses through "
"more than the maximum permitted depth of nested array indices",
"field"_attr = queryPath.dottedField(),
diff --git a/src/mongo/db/query/query_planner.cpp b/src/mongo/db/query/query_planner.cpp
index f97bb74f065..dc8bc35ef64 100644
--- a/src/mongo/db/query/query_planner.cpp
+++ b/src/mongo/db/query/query_planner.cpp
@@ -518,8 +518,6 @@ StatusWith<std::unique_ptr<QuerySolution>> QueryPlanner::planFromCache(
LOGV2_DEBUG(20963,
5,
- "Tagging the match expression according to cache data: \nFilter:\n{filter}Cache "
- "data:\n{cacheData}",
"Tagging the match expression according to cache data",
"filter"_attr = redact(clone->debugString()),
"cacheData"_attr = redact(winnerCacheData.toString()));
@@ -538,7 +536,6 @@ StatusWith<std::unique_ptr<QuerySolution>> QueryPlanner::planFromCache(
invariant(insertionRes.second);
LOGV2_DEBUG(20964,
5,
- "Index {indexNumber}: {id}",
"Index mapping: number and identifier",
"indexNumber"_attr = i,
"id"_attr = ie.identifier);
@@ -552,11 +549,7 @@ StatusWith<std::unique_ptr<QuerySolution>> QueryPlanner::planFromCache(
// The MatchExpression tree is in canonical order. We must order the nodes for access planning.
prepareForAccessPlanning(clone.get());
- LOGV2_DEBUG(20965,
- 5,
- "Tagged tree:\n{tree}",
- "Tagged tree",
- "tree"_attr = redact(clone->debugString()));
+ LOGV2_DEBUG(20965, 5, "Tagged tree", "tree"_attr = redact(clone->debugString()));
// Use the cached index assignments to build solnRoot.
std::unique_ptr<QuerySolutionNode> solnRoot(QueryPlannerAccess::buildIndexedDataAccess(
@@ -577,7 +570,6 @@ StatusWith<std::unique_ptr<QuerySolution>> QueryPlanner::planFromCache(
LOGV2_DEBUG(20966,
5,
- "Planner: solution constructed from the cache:\n{solution}",
"Planner: solution constructed from the cache",
"solution"_attr = redact(soln->toString()));
return {std::move(soln)};
@@ -588,8 +580,6 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan(
const CanonicalQuery& query, const QueryPlannerParams& params) {
LOGV2_DEBUG(20967,
5,
- "Beginning planning...\n=============================\nOptions = "
- "{options}\nCanonical query:\n{query}=============================",
"Beginning planning",
"options"_attr = optionString(params.options),
"query"_attr = redact(query.toString()));
@@ -597,7 +587,6 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan(
for (size_t i = 0; i < params.indices.size(); ++i) {
LOGV2_DEBUG(20968,
5,
- "Index {indexNumber} is {value}",
"Index number and details",
"indexNumber"_attr = i,
"index"_attr = params.indices[i].toString());
@@ -690,11 +679,7 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan(
stdx::unordered_set<string> fields;
QueryPlannerIXSelect::getFields(query.root(), &fields);
for (auto&& field : fields) {
- LOGV2_DEBUG(20970,
- 5,
- "Predicate over field '{field}'",
- "Predicate over field",
- "field"_attr = field);
+ LOGV2_DEBUG(20970, 5, "Predicate over field", "field"_attr = field);
}
fullIndexList = QueryPlannerIXSelect::expandIndexes(fields, std::move(fullIndexList));
@@ -770,7 +755,6 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan(
for (size_t i = 0; i < relevantIndices.size(); ++i) {
LOGV2_DEBUG(20971,
2,
- "Relevant index {indexNumber} is {value}",
"Relevant index",
"indexNumber"_attr = i,
"index"_attr = relevantIndices[i].toString());
@@ -795,11 +779,7 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan(
}
// query.root() is now annotated with RelevantTag(s).
- LOGV2_DEBUG(20972,
- 5,
- "Rated tree:\n{tree}",
- "Rated tree",
- "tree"_attr = redact(query.root()->debugString()));
+ LOGV2_DEBUG(20972, 5, "Rated tree", "tree"_attr = redact(query.root()->debugString()));
// If there is a GEO_NEAR it must have an index it can use directly.
const MatchExpression* gnNode = nullptr;
@@ -816,7 +796,6 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan(
LOGV2_DEBUG(20974,
5,
- "Rated tree after geonear processing:{tree}",
"Rated tree after geonear processing",
"tree"_attr = redact(query.root()->debugString()));
}
@@ -857,7 +836,6 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan(
LOGV2_DEBUG(20975,
5,
- "Rated tree after text processing:{tree}",
"Rated tree after text processing",
"tree"_attr = redact(query.root()->debugString()));
}
@@ -879,7 +857,6 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan(
while ((nextTaggedTree = isp.getNext()) && (out.size() < params.maxIndexedSolutions)) {
LOGV2_DEBUG(20976,
5,
- "About to build solntree from tagged tree:\n{tree}",
"About to build solntree from tagged tree",
"tree"_attr = redact(nextTaggedTree->debugString()));
@@ -892,7 +869,6 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan(
if (!statusWithCacheData.isOK()) {
LOGV2_DEBUG(20977,
5,
- "Query is not cachable: {reason}",
"Query is not cachable",
"reason"_attr = redact(statusWithCacheData.getStatus().reason()));
} else {
@@ -915,7 +891,6 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan(
if (soln) {
LOGV2_DEBUG(20978,
5,
- "Planner: adding solution:\n{solution}",
"Planner: adding solution",
"solution"_attr = redact(soln->toString()));
if (statusWithCacheData.isOK()) {
@@ -931,11 +906,7 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan(
// Don't leave tags on query tree.
query.root()->resetTag();
- LOGV2_DEBUG(20979,
- 5,
- "Planner: outputted {numSolutions} indexed solutions",
- "Planner: outputted indexed solutions",
- "numSolutions"_attr = out.size());
+ LOGV2_DEBUG(20979, 5, "Planner: outputted indexed solutions", "numSolutions"_attr = out.size());
// Produce legible error message for failed OR planning with a TEXT child.
// TODO: support collection scan for non-TEXT children of OR.
@@ -1129,7 +1100,6 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan(
if (collscan) {
LOGV2_DEBUG(20984,
5,
- "Planner: outputting a collection scan:\n{collectionScan}",
"Planner: outputting a collection scan",
"collectionScan"_attr = redact(collscan->toString()));
SolutionCacheData* scd = new SolutionCacheData();
@@ -1158,7 +1128,11 @@ StatusWith<QueryPlanner::SubqueriesPlanningResult> QueryPlanner::planSubqueries(
const auto insertionRes = planningResult.indexMap.insert(std::make_pair(ie.identifier, i));
// Be sure the key was not already in the map.
invariant(insertionRes.second);
- LOGV2_DEBUG(20598, 5, "Subplanner: index {i} is {ie}", "i"_attr = i, "ie"_attr = ie);
+ LOGV2_DEBUG(20598,
+ 5,
+ "Subplanner: index number and entry",
+ "indexNumber"_attr = i,
+ "indexEntry"_attr = ie);
}
for (size_t i = 0; i < planningResult.orExpression->numChildren(); ++i) {
@@ -1187,12 +1161,11 @@ StatusWith<QueryPlanner::SubqueriesPlanningResult> QueryPlanner::planSubqueries(
auto planCacheKey = planCache->computeKey(*branchResult->canonicalQuery);
if (auto cachedSol = planCache->getCacheEntryIfActive(planCacheKey)) {
// We have a CachedSolution. Store it for later.
- LOGV2_DEBUG(
- 20599,
- 5,
- "Subplanner: cached plan found for child {i} of {orExpression_numChildren}",
- "i"_attr = i,
- "orExpression_numChildren"_attr = planningResult.orExpression->numChildren());
+ LOGV2_DEBUG(20599,
+ 5,
+ "Subplanner: cached plan found",
+ "childIndex"_attr = i,
+ "numChildren"_attr = planningResult.orExpression->numChildren());
branchResult->cachedSolution = std::move(cachedSol);
}
@@ -1202,10 +1175,9 @@ StatusWith<QueryPlanner::SubqueriesPlanningResult> QueryPlanner::planSubqueries(
// No CachedSolution found. We'll have to plan from scratch.
LOGV2_DEBUG(20600,
5,
- "Subplanner: planning child {i} of {orExpression_numChildren}",
- "i"_attr = i,
- "orExpression_numChildren"_attr =
- planningResult.orExpression->numChildren());
+ "Subplanner: planning child",
+ "childIndex"_attr = i,
+ "numChildren"_attr = planningResult.orExpression->numChildren());
// We don't set NO_TABLE_SCAN because peeking at the cache data will keep us from
// considering any plan that's a collscan.
@@ -1221,8 +1193,8 @@ StatusWith<QueryPlanner::SubqueriesPlanningResult> QueryPlanner::planSubqueries(
LOGV2_DEBUG(20601,
5,
- "Subplanner: got {branchResult_solutions_size} solutions",
- "branchResult_solutions_size"_attr = branchResult->solutions.size());
+ "Subplanner: number of solutions",
+ "numSolutions"_attr = branchResult->solutions.size());
}
}
@@ -1313,10 +1285,8 @@ StatusWith<std::unique_ptr<QuerySolution>> QueryPlanner::choosePlanForSubqueries
return Status(ErrorCodes::NoQueryExecutionPlans, ss);
}
- LOGV2_DEBUG(20602,
- 5,
- "Subplanner: fully tagged tree is {solnRoot}",
- "solnRoot"_attr = redact(solnRoot->toString()));
+ LOGV2_DEBUG(
+ 20602, 5, "Subplanner: fully tagged tree", "solnRoot"_attr = redact(solnRoot->toString()));
auto compositeSolution =
QueryPlannerAnalysis::analyzeDataAccess(query, params, std::move(solnRoot));
@@ -1329,7 +1299,7 @@ StatusWith<std::unique_ptr<QuerySolution>> QueryPlanner::choosePlanForSubqueries
LOGV2_DEBUG(20603,
5,
- "Subplanner: Composite solution is {compositeSolution}",
+ "Subplanner: Composite solution",
"compositeSolution"_attr = redact(compositeSolution->toString()));
return std::move(compositeSolution);
diff --git a/src/mongo/db/query/query_planner_test_fixture.cpp b/src/mongo/db/query/query_planner_test_fixture.cpp
index 67a1afe5f2e..25189abcc81 100644
--- a/src/mongo/db/query/query_planner_test_fixture.cpp
+++ b/src/mongo/db/query/query_planner_test_fixture.cpp
@@ -477,7 +477,7 @@ size_t QueryPlannerTest::getNumSolutions() const {
void QueryPlannerTest::dumpSolutions() const {
str::stream ost;
dumpSolutions(ost);
- LOGV2(20985, "{value}", "Solutions", "value"_attr = std::string(ost));
+ LOGV2(20985, "Solutions", "value"_attr = std::string(ost));
}
void QueryPlannerTest::dumpSolutions(str::stream& ost) const {
diff --git a/src/mongo/db/query/query_planner_test_lib.cpp b/src/mongo/db/query/query_planner_test_lib.cpp
index b0422e35bdc..63f94f14293 100644
--- a/src/mongo/db/query/query_planner_test_lib.cpp
+++ b/src/mongo/db/query/query_planner_test_lib.cpp
@@ -137,10 +137,7 @@ bool bsonObjFieldsAreInSet(BSONObj obj, const std::set<std::string>& allowedFiel
while (i.more()) {
BSONElement child = i.next();
if (!allowedFields.count(child.fieldName())) {
- LOGV2_ERROR(23932,
- "Did not expect to find {field}",
- "Unexpected field",
- "field"_attr = child.fieldName());
+ LOGV2_ERROR(23932, "Unexpected field", "field"_attr = child.fieldName());
return false;
}
}
diff --git a/src/mongo/db/query/sbe_cached_solution_planner.cpp b/src/mongo/db/query/sbe_cached_solution_planner.cpp
index bf8a6613463..abc3027ce35 100644
--- a/src/mongo/db/query/sbe_cached_solution_planner.cpp
+++ b/src/mongo/db/query/sbe_cached_solution_planner.cpp
@@ -57,11 +57,9 @@ plan_ranker::CandidatePlan CachedSolutionPlanner::plan(
// entry, nor cache the result of replanning.
LOGV2_DEBUG(2057901,
1,
- "Execution of cached plan failed, falling back to replan. query: "
- "{canonicalQuery_Short} planSummary: {Explain_getPlanSummary_child_get} ",
- "canonicalQuery_Short"_attr = redact(_cq.toStringShort()),
- "Explain_getPlanSummary_child_get"_attr =
- Explain::getPlanSummary(candidate.root.get()));
+ "Execution of cached plan failed, falling back to replan",
+ "query"_attr = redact(_cq.toStringShort()),
+ "planSummary"_attr = Explain::getPlanSummary(candidate.root.get()));
return replan(false);
}
@@ -78,13 +76,12 @@ plan_ranker::CandidatePlan CachedSolutionPlanner::plan(
LOGV2_DEBUG(
2058001,
1,
- "Execution of cached plan required {maxReadsBeforeReplan} works, but was originally cached "
- "with only {decisionReads} works. Evicting cache entry and replanning query: "
- "{canonicalQuery_Short} plan summary before replan: {Explain_getPlanSummary_child_get}",
+ "Evicting cache entry for a query and replanning it since the number of required works "
+ "mismatch the number of cached works",
"maxReadsBeforeReplan"_attr = numReads,
"decisionReads"_attr = _decisionReads,
- "canonicalQuery_Short"_attr = redact(_cq.toStringShort()),
- "Explain_getPlanSummary_child_get"_attr = Explain::getPlanSummary(candidate.root.get()));
+ "query"_attr = redact(_cq.toStringShort()),
+ "planSummary"_attr = Explain::getPlanSummary(candidate.root.get()));
return replan(true);
}
@@ -120,12 +117,10 @@ plan_ranker::CandidatePlan CachedSolutionPlanner::replan(bool shouldCache) const
LOGV2_DEBUG(
2058101,
1,
- "Replanning of query resulted in single query solution, which will not be cached. "
- "{canonicalQuery_Short} plan summary after replan: {Explain_getPlanSummary_child_get} "
- "previous cache entry evicted: {shouldCache_yes_no}",
- "canonicalQuery_Short"_attr = redact(_cq.toStringShort()),
- "Explain_getPlanSummary_child_get"_attr = Explain::getPlanSummary(root.get()),
- "shouldCache_yes_no"_attr = (shouldCache ? "yes" : "no"));
+ "Replanning of query resulted in a single query solution, which will not be cached. ",
+ "query"_attr = redact(_cq.toStringShort()),
+ "planSummary"_attr = Explain::getPlanSummary(root.get()),
+ "shouldCache"_attr = (shouldCache ? "yes" : "no"));
return {std::move(solutions[0]), std::move(root), std::move(data)};
}
@@ -147,12 +142,10 @@ plan_ranker::CandidatePlan CachedSolutionPlanner::replan(bool shouldCache) const
auto plan = multiPlanner.plan(std::move(solutions), std::move(roots));
LOGV2_DEBUG(2058201,
1,
- "Replanning {canonicalQuery_Short} resulted in plan with summary: "
- "{Explain_getPlanSummary_child_get}, which {shouldCache_has_has_not} been written "
- "to the cache",
- "canonicalQuery_Short"_attr = redact(_cq.toStringShort()),
- "Explain_getPlanSummary_child_get"_attr = Explain::getPlanSummary(plan.root.get()),
- "shouldCache_has_has_not"_attr = (shouldCache ? "has" : "has not"));
+ "Query plan after replanning and its cache status",
+ "query"_attr = redact(_cq.toStringShort()),
+ "planSummary"_attr = Explain::getPlanSummary(plan.root.get()),
+ "shouldCache"_attr = (shouldCache ? "yes" : "no"));
return plan;
}
} // namespace mongo::sbe
diff --git a/src/mongo/db/query/sbe_multi_planner.cpp b/src/mongo/db/query/sbe_multi_planner.cpp
index e6d4aedb39f..e07cc01f2e7 100644
--- a/src/mongo/db/query/sbe_multi_planner.cpp
+++ b/src/mongo/db/query/sbe_multi_planner.cpp
@@ -66,7 +66,7 @@ plan_ranker::CandidatePlan MultiPlanner::finalizeExecutionPlans(
LOGV2_DEBUG(4822876,
2,
"Winning plan",
- "planSumamry"_attr = Explain::getPlanSummary(winner.root.get()));
+ "planSummary"_attr = Explain::getPlanSummary(winner.root.get()));
// Close all candidate plans but the winner.
for (size_t ix = 1; ix < decision->candidateOrder.size(); ++ix) {
diff --git a/src/mongo/db/query/sbe_stage_builder_filter.cpp b/src/mongo/db/query/sbe_stage_builder_filter.cpp
index ec303984939..2fe8bb53b6d 100644
--- a/src/mongo/db/query/sbe_stage_builder_filter.cpp
+++ b/src/mongo/db/query/sbe_stage_builder_filter.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery
-
#include "mongo/platform/basic.h"
#include "mongo/db/query/sbe_stage_builder_filter.h"
@@ -71,7 +69,6 @@
#include "mongo/db/matcher/schema/expression_internal_schema_unique_items.h"
#include "mongo/db/matcher/schema/expression_internal_schema_xor.h"
#include "mongo/db/query/sbe_stage_builder_expression.h"
-#include "mongo/logv2/log.h"
#include "mongo/util/str.h"
namespace mongo::stage_builder {
diff --git a/src/mongo/db/views/durable_view_catalog.cpp b/src/mongo/db/views/durable_view_catalog.cpp
index a9b221b3e64..806e6e8d87e 100644
--- a/src/mongo/db/views/durable_view_catalog.cpp
+++ b/src/mongo/db/views/durable_view_catalog.cpp
@@ -197,7 +197,6 @@ void DurableViewCatalogImpl::upsert(OperationContext* opCtx,
if (!id.isValid() || !systemViews->findDoc(opCtx, id, &oldView)) {
LOGV2_DEBUG(22544,
2,
- "Insert view {view} into {viewCatalog}",
"Insert view to system views catalog",
"view"_attr = view,
"viewCatalog"_attr = _db->getSystemViewsName());
@@ -232,7 +231,6 @@ void DurableViewCatalogImpl::remove(OperationContext* opCtx, const NamespaceStri
LOGV2_DEBUG(22545,
2,
- "Remove view {view} from {viewCatalog}",
"Remove view from system views catalog",
"view"_attr = name,
"viewCatalog"_attr = _db->getSystemViewsName());
diff --git a/src/mongo/db/views/view_catalog.cpp b/src/mongo/db/views/view_catalog.cpp
index 9a3918feb3d..e5e67c7a73e 100644
--- a/src/mongo/db/views/view_catalog.cpp
+++ b/src/mongo/db/views/view_catalog.cpp
@@ -93,11 +93,7 @@ Status ViewCatalog::reload(OperationContext* opCtx, ViewCatalogLookupBehavior lo
Status ViewCatalog::_reload(WithLock,
OperationContext* opCtx,
ViewCatalogLookupBehavior lookupBehavior) {
- LOGV2_DEBUG(22546,
- 1,
- "Reloading view catalog for database {db}",
- "Reloading view catalog for database",
- "db"_attr = _durable->getName());
+ LOGV2_DEBUG(22546, 1, "Reloading view catalog for database", "db"_attr = _durable->getName());
_viewMap.clear();
_valid = false;
@@ -141,7 +137,6 @@ Status ViewCatalog::_reload(WithLock,
} catch (const DBException& ex) {
auto status = ex.toStatus();
LOGV2(22547,
- "Could not load view catalog for database {db}: {error}",
"Could not load view catalog for database",
"db"_attr = _durable->getName(),
"error"_attr = status);
diff --git a/src/mongo/s/query/cluster_aggregation_planner.cpp b/src/mongo/s/query/cluster_aggregation_planner.cpp
index 6632dee755a..3f589ba2df5 100644
--- a/src/mongo/s/query/cluster_aggregation_planner.cpp
+++ b/src/mongo/s/query/cluster_aggregation_planner.cpp
@@ -215,7 +215,6 @@ Status dispatchMergingPipeline(const boost::intrusive_ptr<ExpressionContext>& ex
LOGV2_DEBUG(22835,
1,
- "Dispatching merge pipeline {command} to designated shard",
"Dispatching merge pipeline to designated shard",
"command"_attr = redact(mergeCmdObj));
diff --git a/src/mongo/s/query/cluster_cursor_manager.cpp b/src/mongo/s/query/cluster_cursor_manager.cpp
index 73ae523f978..892f1ab9d37 100644
--- a/src/mongo/s/query/cluster_cursor_manager.cpp
+++ b/src/mongo/s/query/cluster_cursor_manager.cpp
@@ -416,7 +416,6 @@ std::size_t ClusterCursorManager::killMortalCursorsInactiveSince(OperationContex
if (res) {
LOGV2(22837,
- "Cursor id {cursorId} timed out, idle since {idleSince}",
"Cursor timed out",
"cursorId"_attr = cursorId,
"idleSince"_attr = entry.getLastActive().toString());
@@ -625,10 +624,7 @@ std::pair<Status, int> ClusterCursorManager::killCursorsWithMatchingSessions(
return;
}
uassertStatusOK(mgr.killCursor(opCtx, *cursorNss, id));
- LOGV2(22838,
- "Killing cursor: {cursorId} as part of killing session(s)",
- "Killing cursor as part of killing session(s)",
- "cursorId"_attr = id);
+ LOGV2(22838, "Killing cursor as part of killing session(s)", "cursorId"_attr = id);
};
auto bySessionCursorKiller = makeKillCursorsBySessionAdaptor(opCtx, matcher, std::move(eraser));
diff --git a/src/mongo/s/query/cluster_find.cpp b/src/mongo/s/query/cluster_find.cpp
index a74b8817294..8dabfd0bf63 100644
--- a/src/mongo/s/query/cluster_find.cpp
+++ b/src/mongo/s/query/cluster_find.cpp
@@ -528,8 +528,6 @@ CursorId ClusterFind::runQuery(OperationContext* opCtx,
LOGV2_DEBUG(22839,
1,
- "Received error status for query {query} on attempt {attemptNumber} of "
- "{maxRetries}: {error}",
"Received error status for query",
"query"_attr = redact(query.toStringShort()),
"attemptNumber"_attr = retries,
@@ -571,8 +569,6 @@ CursorId ClusterFind::runQuery(OperationContext* opCtx,
LOGV2_DEBUG(22840,
1,
- "Received error status for query {query} on attempt {attemptNumber} of "
- "{maxRetries}: {error}",
"Received error status for query",
"query"_attr = redact(query.toStringShort()),
"attemptNumber"_attr = retries,
diff --git a/src/mongo/s/query/establish_cursors.cpp b/src/mongo/s/query/establish_cursors.cpp
index aefed711bd9..80bb399191e 100644
--- a/src/mongo/s/query/establish_cursors.cpp
+++ b/src/mongo/s/query/establish_cursors.cpp
@@ -75,7 +75,6 @@ void killOpOnShards(std::shared_ptr<executor::TaskExecutor> executor,
if (!args.response.isOK()) {
LOGV2_DEBUG(4625504,
2,
- "killOperations for {remoteHost} failed with {error}",
"killOperations failed",
"remoteHost"_attr = host.toString(),
"error"_attr = args.response);
@@ -84,11 +83,8 @@ void killOpOnShards(std::shared_ptr<executor::TaskExecutor> executor,
}));
}
} catch (const AssertionException& ex) {
- LOGV2_DEBUG(4625503,
- 2,
- "Failed to cleanup remote operations: {error}",
- "Failed to cleanup remote operations",
- "error"_attr = ex.toStatus());
+ LOGV2_DEBUG(
+ 4625503, 2, "Failed to cleanup remote operations", "error"_attr = ex.toStatus());
}
}
@@ -117,14 +113,12 @@ std::vector<RemoteCursor> establishCursors(OperationContext* opCtx,
requests.emplace_back(remote.first, requestWithOpKey.obj());
}
- LOGV2_DEBUG(
- 4625502,
- 3,
- "Establishing cursors on {opId} for {numRemotes} remotes with operation key {opKey}",
- "Establishing cursors on remotes",
- "opId"_attr = opCtx->getOpID(),
- "numRemotes"_attr = remotes.size(),
- "opKey"_attr = opKey);
+ LOGV2_DEBUG(4625502,
+ 3,
+ "Establishing cursors on remotes",
+ "opId"_attr = opCtx->getOpID(),
+ "numRemotes"_attr = remotes.size(),
+ "opKey"_attr = opKey);
// Send the requests
MultiStatementTransactionRequestsSender ars(
@@ -224,7 +218,6 @@ std::vector<RemoteCursor> establishCursors(OperationContext* opCtx,
}
LOGV2(4625501,
- "ARS failed with {error}, attempting to clean up {nRemotes} remote operations",
"ARS failed. Attempting to clean up remote operations",
"error"_attr = ex.toStatus(),
"nRemotes"_attr = remotesToClean.size());
@@ -243,7 +236,6 @@ std::vector<RemoteCursor> establishCursors(OperationContext* opCtx,
const executor::TaskExecutor::CallbackArgs& args) {
if (!args.status.isOK()) {
LOGV2_WARNING(48038,
- "Failed to schedule remote cursor cleanup: {error}",
"Failed to schedule remote cursor cleanup",
"error"_attr = args.status);
return;