diff options
author | Mindaugas Malinauskas <mindaugas.malinauskas@mongodb.com> | 2020-06-04 13:08:27 +0300 |
---|---|---|
committer | Evergreen Agent <no-reply@evergreen.mongodb.com> | 2020-06-09 09:33:12 +0000 |
commit | 6bb53b0cd61385eb357fe6b3d9bfad0b4f23f0fd (patch) | |
tree | 66fed91ddbc868ffd14abd71a92969254c8bdd02 | |
parent | 816ba22112096f783ecbfd1f76b386942d5d9ee7 (diff) | |
download | mongo-6bb53b0cd61385eb357fe6b3d9bfad0b4f23f0fd.tar.gz |
SERVER-48353 logv2 cleanup for db/query, s/query, db/views, db/matcher cherry picked from commit 210053393a311b942835445134794d1bb38268a2
30 files changed, 418 insertions, 311 deletions
diff --git a/jstests/noPassthrough/log_and_profile_query_hash.js b/jstests/noPassthrough/log_and_profile_query_hash.js index 1a8192f1770..e1d6564dfe8 100644 --- a/jstests/noPassthrough/log_and_profile_query_hash.js +++ b/jstests/noPassthrough/log_and_profile_query_hash.js @@ -156,7 +156,7 @@ const log = assert.commandWorked(testDB.adminCommand({getLog: "global"})).log; // 'planCacheKey' and 'queryHash'. Confirm only one line does this. const creationLogList = log.filter( logLine => ( - logLine.indexOf("Creating inactive cache entry for query shape") != -1 && + logLine.indexOf("Creating inactive cache entry for query") != -1 && (!isJsonLog(conn) ? (logLine.indexOf("planCacheKey " + String(onCreationHashes.planCacheKey)) != -1 && logLine.indexOf("queryHash " + String(onCreationHashes.queryHash)) != -1) diff --git a/src/mongo/db/matcher/expression_geo.cpp b/src/mongo/db/matcher/expression_geo.cpp index 92785dd0d79..d16aa413242 100644 --- a/src/mongo/db/matcher/expression_geo.cpp +++ b/src/mongo/db/matcher/expression_geo.cpp @@ -79,7 +79,10 @@ Status GeoExpression::parseQuery(const BSONObj& obj) { BSONElement elt = geoIt.next(); if (elt.fieldNameStringData() == "$uniqueDocs") { // Deprecated "$uniqueDocs" field - LOGV2_WARNING(23847, "deprecated $uniqueDocs option: {obj}", "obj"_attr = redact(obj)); + LOGV2_WARNING(23847, + "Deprecated $uniqueDocs option: {query}", + "Deprecated $uniqueDocs option", + "query"_attr = redact(obj)); } else { // The element must be a geo specifier. "$box", "$center", "$geometry", etc. geoContainer.reset(new GeometryContainer()); @@ -194,7 +197,7 @@ bool GeoNearExpression::parseLegacyQuery(const BSONObj& obj) { maxDistance = e.Number(); uassert(16896, "$maxDistance must be non-negative", maxDistance >= 0.0); } else if (fieldName == "$uniqueDocs") { - LOGV2_WARNING(23848, "ignoring deprecated option $uniqueDocs"); + LOGV2_WARNING(23848, "Ignoring deprecated option $uniqueDocs"); } else { // In a query document, $near queries can have no non-geo sibling parameters. uasserted(34413, diff --git a/src/mongo/db/matcher/rewrite_expr.cpp b/src/mongo/db/matcher/rewrite_expr.cpp index 6afd2afab52..bda2d8eed24 100644 --- a/src/mongo/db/matcher/rewrite_expr.cpp +++ b/src/mongo/db/matcher/rewrite_expr.cpp @@ -46,8 +46,9 @@ RewriteExpr::RewriteResult RewriteExpr::rewrite(const boost::intrusive_ptr<Expre const CollatorInterface* collator) { LOGV2_DEBUG(20725, 5, - "Expression prior to rewrite: {expression_serialize_false}", - "expression_serialize_false"_attr = expression->serialize(false)); + "Expression prior to rewrite: {expression}", + "Expression prior to rewrite", + "expression"_attr = expression->serialize(false)); RewriteExpr rewriteExpr(collator); std::unique_ptr<MatchExpression> matchExpression; @@ -56,13 +57,15 @@ RewriteExpr::RewriteResult RewriteExpr::rewrite(const boost::intrusive_ptr<Expre matchExpression = std::move(matchTree); LOGV2_DEBUG(20726, 5, - "Post-rewrite MatchExpression: {matchExpression_debugString}", - "matchExpression_debugString"_attr = matchExpression->debugString()); + "Post-rewrite MatchExpression: {expression}", + "Post-rewrite MatchExpression", + "expression"_attr = matchExpression->debugString()); matchExpression = MatchExpression::optimize(std::move(matchExpression)); LOGV2_DEBUG(20727, 5, - "Post-rewrite/post-optimized MatchExpression: {matchExpression_debugString}", - "matchExpression_debugString"_attr = matchExpression->debugString()); + "Post-rewrite/post-optimized MatchExpression: {expression}", + "Post-rewrite/post-optimized MatchExpression", + "expression"_attr = matchExpression->debugString()); } return {std::move(matchExpression), std::move(rewriteExpr._matchExprElemStorage)}; diff --git a/src/mongo/db/matcher/schema/json_schema_parser.cpp b/src/mongo/db/matcher/schema/json_schema_parser.cpp index 82a101fb005..df3c683d493 100644 --- a/src/mongo/db/matcher/schema/json_schema_parser.cpp +++ b/src/mongo/db/matcher/schema/json_schema_parser.cpp @@ -1673,17 +1673,17 @@ StatusWithMatchExpression JSONSchemaParser::parse( bool ignoreUnknownKeywords) { LOGV2_DEBUG(20728, 5, - "Parsing JSON Schema: {schema_jsonString_JsonStringFormat_LegacyStrict}", - "schema_jsonString_JsonStringFormat_LegacyStrict"_attr = - schema.jsonString(JsonStringFormat::LegacyStrict)); + "Parsing JSON Schema: {schema}", + "Parsing JSON Schema", + "schema"_attr = schema.jsonString(JsonStringFormat::LegacyStrict)); try { auto translation = _parse(expCtx, ""_sd, schema, allowedFeatures, ignoreUnknownKeywords); if (shouldLog(logv2::LogSeverity::Debug(5)) && translation.isOK()) { LOGV2_DEBUG(20729, 5, - "Translated schema match expression: {translation_getValue_debugString}", - "translation_getValue_debugString"_attr = - translation.getValue()->debugString()); + "Translated schema match expression: {expression}", + "Translated schema match expression", + "expression"_attr = translation.getValue()->debugString()); } return translation; } catch (const DBException& ex) { diff --git a/src/mongo/db/ops/update_result.cpp b/src/mongo/db/ops/update_result.cpp index 5b17018659b..a47fd3067ae 100644 --- a/src/mongo/db/ops/update_result.cpp +++ b/src/mongo/db/ops/update_result.cpp @@ -53,7 +53,16 @@ UpdateResult::UpdateResult(bool existing_, if (!existing && numMatched == 0 && !id.eoo()) { upserted = id.wrap(kUpsertedFieldName); } - LOGV2_DEBUG(20885, 4, "UpdateResult -- {}", ""_attr = redact(toString())); + LOGV2_DEBUG(20885, + 4, + "UpdateResult -- upserted: {upserted} modifiers: {modifiers} existing: {existing} " + "numDocsModified: {numModified} numMatched: {numMatched}", + "UpdateResult", + "numMatched"_attr = numMatched, + "numModified"_attr = numDocsModified, + "upserted"_attr = redact(upserted), + "modifiers"_attr = modifiers, + "existing"_attr = existing); } std::string UpdateResult::toString() const { diff --git a/src/mongo/db/ops/write_ops_exec.cpp b/src/mongo/db/ops/write_ops_exec.cpp index 92c5b9f4cae..21cd93cbf01 100644 --- a/src/mongo/db/ops/write_ops_exec.cpp +++ b/src/mongo/db/ops/write_ops_exec.cpp @@ -129,13 +129,12 @@ void finishCurOp(OperationContext* opCtx, CurOp* curOp) { curOp->getReadWriteType()); if (!curOp->debug().errInfo.isOK()) { - LOGV2_DEBUG( - 20886, - 3, - "Caught Assertion in {logicalOpToString_curOp_getLogicalOp}: {curOp_debug_errInfo}", - "logicalOpToString_curOp_getLogicalOp"_attr = - redact(logicalOpToString(curOp->getLogicalOp())), - "curOp_debug_errInfo"_attr = curOp->debug().errInfo.toString()); + LOGV2_DEBUG(20886, + 3, + "Caught Assertion in finishCurOp. Op: {operation}, error: {error}", + "Caught Assertion in finishCurOp", + "operation"_attr = redact(logicalOpToString(curOp->getLogicalOp())), + "error"_attr = curOp->debug().errInfo.toString()); } // Mark the op as complete, and log it if appropriate. Returns a boolean indicating whether @@ -153,7 +152,10 @@ void finishCurOp(OperationContext* opCtx, CurOp* curOp) { // We need to ignore all errors here. We don't want a successful op to fail because of a // failure to record stats. We also don't want to replace the error reported for an op that // is failing. - LOGV2(20887, "Ignoring error from finishCurOp: {ex}", "ex"_attr = redact(ex)); + LOGV2(20887, + "Ignoring error from finishCurOp: {error}", + "Ignoring error from finishCurOp", + "error"_attr = redact(ex)); } } @@ -176,9 +178,9 @@ public: replClientInfo().setLastOpToSystemLastOpTimeIgnoringInterrupt(_opCtx); LOGV2_DEBUG(20888, 5, - "Set last op to system time: {replClientInfo_getLastOp_getTimestamp}", - "replClientInfo_getLastOp_getTimestamp"_attr = - replClientInfo().getLastOp().getTimestamp()); + "Set last op to system time: {timestamp}", + "Set last op to system time", + "timestamp"_attr = replClientInfo().getLastOp().getTimestamp()); } } @@ -398,10 +400,11 @@ bool insertBatchAndHandleErrors(OperationContext* opCtx, "hangDuringBatchInsert", [&wholeOp]() { LOGV2(20889, - "batch insert - hangDuringBatchInsert fail point enabled for namespace " - "{wholeOp_getNamespace}. Blocking " - "until fail point is disabled.", - "wholeOp_getNamespace"_attr = wholeOp.getNamespace()); + "Batch insert - hangDuringBatchInsert fail point enabled for namespace " + "{namespace}. Blocking until fail point is disabled", + "Batch insert - hangDuringBatchInsert fail point enabled for a namespace. " + "Blocking until fail point is disabled", + "namespace"_attr = wholeOp.getNamespace()); }, true, // Check for interrupt periodically. wholeOp.getNamespace()); @@ -653,10 +656,11 @@ static SingleWriteResult performSingleUpdateOp(OperationContext* opCtx, "hangDuringBatchUpdate", [&ns]() { LOGV2(20890, - "batch update - hangDuringBatchUpdate fail point enabled for nss {ns}. Blocking " - "until " - "fail point is disabled.", - "ns"_attr = ns); + "Batch update - hangDuringBatchUpdate fail point enabled for namespace " + "{namespace}. Blocking until fail point is disabled", + "Batch update - hangDuringBatchUpdate fail point enabled for a namespace. " + "Blocking until fail point is disabled", + "namespace"_attr = ns); }, false /*checkForInterrupt*/, ns); @@ -906,8 +910,8 @@ static SingleWriteResult performSingleDeleteOp(OperationContext* opCtx, "hangDuringBatchRemove", []() { LOGV2(20891, - "batch remove - hangDuringBatchRemove fail point enabled. Blocking " - "until fail point is disabled."); + "Batch remove - hangDuringBatchRemove fail point enabled. Blocking until fail " + "point is disabled"); }, true // Check for interrupt periodically. ); diff --git a/src/mongo/db/query/canonical_query_encoder.cpp b/src/mongo/db/query/canonical_query_encoder.cpp index 4b1f3a17953..ce30e6f1353 100644 --- a/src/mongo/db/query/canonical_query_encoder.cpp +++ b/src/mongo/db/query/canonical_query_encoder.cpp @@ -269,12 +269,10 @@ void encodeGeoMatchExpression(const GeoMatchExpression* tree, StringBuilder* key *keyBuilder << "ss"; } else { LOGV2_ERROR(23849, - "unknown CRS type {int_geoQuery_getGeometry_getNativeCRS} in geometry of type " - "{geoQuery_getGeometry_getDebugType}", - "int_geoQuery_getGeometry_getNativeCRS"_attr = - (int)geoQuery.getGeometry().getNativeCRS(), - "geoQuery_getGeometry_getDebugType"_attr = - geoQuery.getGeometry().getDebugType()); + "Unknown CRS type {crsType} in geometry of type {geometryType}", + "Unknown CRS type in geometry", + "crsType"_attr = (int)geoQuery.getGeometry().getNativeCRS(), + "geometryType"_attr = geoQuery.getGeometry().getDebugType()); MONGO_UNREACHABLE; } } @@ -303,10 +301,10 @@ void encodeGeoNearMatchExpression(const GeoNearMatchExpression* tree, StringBuil *keyBuilder << "ss"; break; case UNSET: - LOGV2_ERROR( - 23850, - "unknown CRS type {int_nearQuery_centroid_crs} in point geometry for near query", - "int_nearQuery_centroid_crs"_attr = (int)nearQuery.centroid->crs); + LOGV2_ERROR(23850, + "Unknown CRS type {crsType} in point geometry for near query", + "Unknown CRS type in point geometry for near query", + "crsType"_attr = (int)nearQuery.centroid->crs); MONGO_UNREACHABLE; break; } diff --git a/src/mongo/db/query/collection_query_info.cpp b/src/mongo/db/query/collection_query_info.cpp index 68770d6cc3e..646f30eafad 100644 --- a/src/mongo/db/query/collection_query_info.cpp +++ b/src/mongo/db/query/collection_query_info.cpp @@ -181,8 +181,9 @@ void CollectionQueryInfo::clearQueryCache() { const Collection* coll = get.owner(this); LOGV2_DEBUG(20907, 1, - "{coll_ns}: clearing plan cache - collection info cache reset", - "coll_ns"_attr = coll->ns()); + "{namespace}: clearing plan cache - collection info cache reset", + "Clearing plan cache - collection info cache reset", + "namespace"_attr = coll->ns()); if (nullptr != _planCache.get()) { _planCache->clear(); } diff --git a/src/mongo/db/query/find.cpp b/src/mongo/db/query/find.cpp index 87a85b628d4..808fa941319 100644 --- a/src/mongo/db/query/find.cpp +++ b/src/mongo/db/query/find.cpp @@ -204,9 +204,9 @@ void generateBatch(int ntoreturn, // Log an error message and then perform the cleanup. case PlanExecutor::FAILURE: { LOGV2_ERROR(20918, - "getMore executor error, stats: {Explain_getWinningPlanStats_exec}", - "Explain_getWinningPlanStats_exec"_attr = - redact(Explain::getWinningPlanStats(exec))); + "getMore executor error, stats: {stats}", + "getMore executor error", + "stats"_attr = redact(Explain::getWinningPlanStats(exec))); // We should always have a valid status object by this point. auto status = WorkingSetCommon::getMemberObjectStatus(doc); invariant(!status.isOK()); @@ -246,7 +246,11 @@ Message getMore(OperationContext* opCtx, bool* isCursorAuthorized) { invariant(ntoreturn >= 0); - LOGV2_DEBUG(20909, 5, "Running getMore, cursorid: {cursorid}", "cursorid"_attr = cursorid); + LOGV2_DEBUG(20909, + 5, + "Running getMore, cursorid: {cursorId}", + "Running getMore", + "cursorId"_attr = cursorid); CurOp& curOp = *CurOp::get(opCtx); curOp.ensureStarted(); @@ -543,11 +547,11 @@ Message getMore(OperationContext* opCtx, cursorid = 0; curOp.debug().cursorExhausted = true; - LOGV2_DEBUG( - 20910, - 5, - "getMore NOT saving client cursor, ended with state {PlanExecutor_statestr_state}", - "PlanExecutor_statestr_state"_attr = PlanExecutor::statestr(state)); + LOGV2_DEBUG(20910, + 5, + "getMore NOT saving client cursor, ended with state {planExecutorState}", + "getMore NOT saving client cursor", + "planExecutorState"_attr = PlanExecutor::statestr(state)); } else { cursorFreer.dismiss(); // Continue caching the ClientCursor. @@ -557,8 +561,9 @@ Message getMore(OperationContext* opCtx, exec->detachFromOperationContext(); LOGV2_DEBUG(20911, 5, - "getMore saving client cursor ended with state {PlanExecutor_statestr_state}", - "PlanExecutor_statestr_state"_attr = PlanExecutor::statestr(state)); + "getMore saving client cursor, ended with state {planExecutorState}", + "getMore saving client cursor", + "planExecutorState"_attr = PlanExecutor::statestr(state)); // Set 'exhaust' if the client requested exhaust and the cursor is not exhausted. *exhaust = opCtx->isExhaust(); @@ -591,8 +596,11 @@ Message getMore(OperationContext* opCtx, qr.setCursorId(cursorid); qr.setStartingFrom(startingResult); qr.setNReturned(numResults); - LOGV2_DEBUG( - 20912, 5, "getMore returned {numResults} results\n", "numResults"_attr = numResults); + LOGV2_DEBUG(20912, + 5, + "getMore returned {numResults} results", + "getMore returned results", + "numResults"_attr = numResults); return Message(bb.release()); } @@ -630,9 +638,16 @@ bool runQuery(OperationContext* opCtx, "Can't canonicalize query"); invariant(cq.get()); - LOGV2_DEBUG(20913, 5, "Running query:\n{cq}", "cq"_attr = redact(cq->toString())); - LOGV2_DEBUG( - 20914, 2, "Running query: {cq_Short}", "cq_Short"_attr = redact(cq->toStringShort())); + LOGV2_DEBUG(20913, + 5, + "Running query:\n{query}", + "Running query", + "query"_attr = redact(cq->toString())); + LOGV2_DEBUG(20914, + 2, + "Running query: {query}", + "Running query", + "query"_attr = redact(cq->toStringShort())); // Parse, canonicalize, plan, transcribe, and get a plan executor. AutoGetCollectionForReadCommand ctx(opCtx, nss, AutoGetCollection::ViewMode::kViewsForbidden); @@ -732,10 +747,11 @@ bool runQuery(OperationContext* opCtx, if (FindCommon::enoughForFirstBatch(qr, numResults)) { LOGV2_DEBUG(20915, 5, - "Enough for first batch, wantMore={qr_wantMore} " - "ntoreturn={qr_getNToReturn_value_or_0} numResults={numResults}", - "qr_wantMore"_attr = qr.wantMore(), - "qr_getNToReturn_value_or_0"_attr = qr.getNToReturn().value_or(0), + "Enough for first batch, wantMore={wantMore} ntoreturn={numToReturn} " + "numResults={numResults}", + "Enough for first batch", + "wantMore"_attr = qr.wantMore(), + "numToReturn"_attr = qr.getNToReturn().value_or(0), "numResults"_attr = numResults); break; } @@ -780,11 +796,13 @@ bool runQuery(OperationContext* opCtx, }); ccId = pinnedCursor.getCursor()->cursorid(); - LOGV2_DEBUG(20916, - 5, - "caching executor with cursorid {ccId} after returning {numResults} results", - "ccId"_attr = ccId, - "numResults"_attr = numResults); + LOGV2_DEBUG( + 20916, + 5, + "Caching executor with cursorid {cursorId} after returning {numResults} results", + "Caching executor after returning results", + "cursorId"_attr = ccId, + "numResults"_attr = numResults); // Set curOp.debug().exhaust if the client requested exhaust and the cursor is not // exhausted. @@ -807,7 +825,8 @@ bool runQuery(OperationContext* opCtx, } else { LOGV2_DEBUG(20917, 5, - "Not caching executor but returning {numResults} results.", + "Not caching executor but returning {numResults} results", + "Not caching executor but returning results", "numResults"_attr = numResults); endQueryOp(opCtx, collection, *exec, numResults, ccId); } diff --git a/src/mongo/db/query/find_common.cpp b/src/mongo/db/query/find_common.cpp index db39e0fcb98..9d27341a806 100644 --- a/src/mongo/db/query/find_common.cpp +++ b/src/mongo/db/query/find_common.cpp @@ -82,8 +82,9 @@ void FindCommon::waitInFindBeforeMakingBatch(OperationContext* opCtx, const Cano auto whileWaitingFunc = [&, hasLogged = false]() mutable { if (!std::exchange(hasLogged, true)) { LOGV2(20908, - "Waiting in find before making batch for query - {cq_Short}", - "cq_Short"_attr = redact(cq.toStringShort())); + "Waiting in find before making batch for query - {query}", + "Waiting in find before making batch for query", + "query"_attr = redact(cq.toStringShort())); } }; diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp index 0f0a496b8ec..9db484cf32f 100644 --- a/src/mongo/db/query/get_executor.cpp +++ b/src/mongo/db/query/get_executor.cpp @@ -201,12 +201,12 @@ IndexEntry indexEntryFromIndexCatalogEntry(OperationContext* opCtx, LOGV2_DEBUG(20920, 2, - "Multikey path metadata range index scan stats: {{ index: " - "{desc_indexName}, numSeeks: {mkAccessStats_keysExamined}, keysExamined: " - "{mkAccessStats_keysExamined2}}}", - "desc_indexName"_attr = desc->indexName(), - "mkAccessStats_keysExamined"_attr = mkAccessStats.keysExamined, - "mkAccessStats_keysExamined2"_attr = mkAccessStats.keysExamined); + "Multikey path metadata range index scan stats: {{ index: {index}, " + "numSeeks: {numSeeks}, keysExamined: {keysExamined}}}", + "Multikey path metadata range index scan stats", + "index"_attr = desc->indexName(), + "numSeeks"_attr = mkAccessStats.keysExamined, + "keysExamined"_attr = mkAccessStats.keysExamined); } } @@ -382,9 +382,10 @@ StatusWith<PrepareExecutionResult> prepareExecution(OperationContext* opCtx, const string& ns = canonicalQuery->ns(); LOGV2_DEBUG(20921, 2, - "Collection {ns} does not exist. Using EOF plan: {canonicalQuery_Short}", - "ns"_attr = ns, - "canonicalQuery_Short"_attr = redact(canonicalQuery->toStringShort())); + "Collection {namespace} does not exist. Using EOF plan: {query}", + "Collection does not exist. Using EOF plan", + "namespace"_attr = ns, + "query"_attr = redact(canonicalQuery->toStringShort())); root = std::make_unique<EOFStage>(canonicalQuery->getExpCtx().get()); return PrepareExecutionResult(std::move(canonicalQuery), nullptr, std::move(root)); } @@ -504,9 +505,9 @@ StatusWith<PrepareExecutionResult> prepareExecution(OperationContext* opCtx, turnIxscanIntoCount(querySolution.get())) { LOGV2_DEBUG(20923, 2, - "Using fast count: {canonicalQuery_Short}", - "canonicalQuery_Short"_attr = - redact(canonicalQuery->toStringShort())); + "Using fast count: {query}", + "Using fast count", + "query"_attr = redact(canonicalQuery->toStringShort())); } auto root = @@ -535,8 +536,9 @@ StatusWith<PrepareExecutionResult> prepareExecution(OperationContext* opCtx, SubplanStage::canUseSubplanning(*canonicalQuery)) { LOGV2_DEBUG(20924, 2, - "Running query as sub-queries: {canonicalQuery_Short}", - "canonicalQuery_Short"_attr = redact(canonicalQuery->toStringShort())); + "Running query as sub-queries: {query}", + "Running query as sub-queries", + "query"_attr = redact(canonicalQuery->toStringShort())); root = std::make_unique<SubplanStage>( canonicalQuery->getExpCtx().get(), collection, ws, plannerParams, canonicalQuery.get()); @@ -563,11 +565,10 @@ StatusWith<PrepareExecutionResult> prepareExecution(OperationContext* opCtx, LOGV2_DEBUG(20925, 2, - "Using fast count: {canonicalQuery_Short}, planSummary: " - "{Explain_getPlanSummary_root_get}", - "canonicalQuery_Short"_attr = redact(canonicalQuery->toStringShort()), - "Explain_getPlanSummary_root_get"_attr = - Explain::getPlanSummary(root.get())); + "Using fast count: {query}, planSummary: {planSummary}", + "Using fast count", + "query"_attr = redact(canonicalQuery->toStringShort()), + "planSummary"_attr = Explain::getPlanSummary(root.get())); return PrepareExecutionResult( std::move(canonicalQuery), std::move(solutions[i]), std::move(root)); @@ -581,10 +582,11 @@ StatusWith<PrepareExecutionResult> prepareExecution(OperationContext* opCtx, LOGV2_DEBUG(20926, 2, - "Only one plan is available; it will be run but will not be cached. " - "{canonicalQuery_Short}, planSummary: {Explain_getPlanSummary_root_get}", - "canonicalQuery_Short"_attr = redact(canonicalQuery->toStringShort()), - "Explain_getPlanSummary_root_get"_attr = Explain::getPlanSummary(root.get())); + "Only one plan is available; it will be run but will not be cached. {query}, " + "planSummary: {planSummary}", + "Only one plan is available; it will be run but will not be cached", + "query"_attr = redact(canonicalQuery->toStringShort()), + "planSummary"_attr = Explain::getPlanSummary(root.get())); return PrepareExecutionResult( std::move(canonicalQuery), std::move(solutions[0]), std::move(root)); @@ -779,9 +781,10 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorDelete( // contains an EOF stage. LOGV2_DEBUG(20927, 2, - "Collection {nss_ns} does not exist. Using EOF stage: {request_getQuery}", - "nss_ns"_attr = nss.ns(), - "request_getQuery"_attr = redact(request->getQuery())); + "Collection {namespace} does not exist. Using EOF stage: {query}", + "Collection does not exist. Using EOF stage", + "namespace"_attr = nss.ns(), + "query"_attr = redact(request->getQuery())); return PlanExecutor::make( opCtx, std::move(ws), std::make_unique<EOFStage>(expCtx.get()), nullptr, policy, nss); } @@ -813,8 +816,9 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorDelete( request->getProj().isEmpty() && hasCollectionDefaultCollation) { LOGV2_DEBUG(20928, 2, - "Using idhack: {unparsedQuery}", - "unparsedQuery"_attr = redact(unparsedQuery)); + "Using idhack: {query}", + "Using idhack", + "query"_attr = redact(unparsedQuery)); auto idHackStage = std::make_unique<IDHackStage>( expCtx.get(), unparsedQuery["_id"].wrap(), ws.get(), descriptor); @@ -945,9 +949,10 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorUpdate( if (!collection) { LOGV2_DEBUG(20929, 2, - "Collection {nss_ns} does not exist. Using EOF stage: {request_getQuery}", - "nss_ns"_attr = nss.ns(), - "request_getQuery"_attr = redact(request->getQuery())); + "Collection {namespace} does not exist. Using EOF stage: {query}", + "Collection does not exist. Using EOF stage", + "namespace"_attr = nss.ns(), + "query"_attr = redact(request->getQuery())); return PlanExecutor::make( opCtx, std::move(ws), std::make_unique<EOFStage>(expCtx.get()), nullptr, policy, nss); } @@ -974,8 +979,9 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorUpdate( request->getProj().isEmpty() && hasCollectionDefaultCollation) { LOGV2_DEBUG(20930, 2, - "Using idhack: {unparsedQuery}", - "unparsedQuery"_attr = redact(unparsedQuery)); + "Using idhack: {query}", + "Using idhack", + "query"_attr = redact(unparsedQuery)); // Working set 'ws' is discarded. InternalPlanner::updateWithIdHack() makes its own // WorkingSet. @@ -1585,11 +1591,10 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorForS LOGV2_DEBUG(20931, 2, - "Using fast distinct: {parsedDistinct_getQuery_Short}, planSummary: " - "{Explain_getPlanSummary_root_get}", - "parsedDistinct_getQuery_Short"_attr = - redact(parsedDistinct->getQuery()->toStringShort()), - "Explain_getPlanSummary_root_get"_attr = Explain::getPlanSummary(root.get())); + "Using fast distinct: {query}, planSummary: {planSummary}", + "Using fast distinct", + "query"_attr = redact(parsedDistinct->getQuery()->toStringShort()), + "planSummary"_attr = Explain::getPlanSummary(root.get())); return PlanExecutor::make(parsedDistinct->releaseQuery(), std::move(ws), @@ -1631,12 +1636,10 @@ getExecutorDistinctFromIndexSolutions(OperationContext* opCtx, LOGV2_DEBUG(20932, 2, - "Using fast distinct: {parsedDistinct_getQuery_Short}, planSummary: " - "{Explain_getPlanSummary_root_get}", - "parsedDistinct_getQuery_Short"_attr = - redact(parsedDistinct->getQuery()->toStringShort()), - "Explain_getPlanSummary_root_get"_attr = - Explain::getPlanSummary(root.get())); + "Using fast distinct: {query}, planSummary: {planSummary}", + "Using fast distinct", + "query"_attr = redact(parsedDistinct->getQuery()->toStringShort()), + "planSummary"_attr = Explain::getPlanSummary(root.get())); return PlanExecutor::make(parsedDistinct->releaseQuery(), std::move(ws), diff --git a/src/mongo/db/query/index_bounds_builder.cpp b/src/mongo/db/query/index_bounds_builder.cpp index cc856d9c092..1f8dcae97e9 100644 --- a/src/mongo/db/query/index_bounds_builder.cpp +++ b/src/mongo/db/query/index_bounds_builder.cpp @@ -888,14 +888,16 @@ void IndexBoundsBuilder::_translatePredicate(const MatchExpression* expr, *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH; } else { LOGV2_WARNING(20934, - "Planner error trying to build geo bounds for {elt} index element.", - "elt"_attr = elt.toString()); + "Planner error trying to build geo bounds for {element} index element", + "Planner error trying to build geo bounds for an index element", + "element"_attr = elt.toString()); verify(0); } } else { LOGV2_WARNING(20935, - "Planner error, trying to build bounds for expression: {expr_debugString}", - "expr_debugString"_attr = redact(expr->debugString())); + "Planner error, trying to build bounds for expression: {expression}", + "Planner error while trying to build bounds for expression", + "expression"_attr = redact(expr->debugString())); verify(0); } } @@ -1201,10 +1203,11 @@ void IndexBoundsBuilder::alignBounds(IndexBounds* bounds, const BSONObj& kp, int if (!bounds->isValidFor(kp, scanDir)) { LOGV2(20933, - "INVALID BOUNDS: {bounds}\nkp = {kp}\nscanDir = {scanDir}", + "INVALID BOUNDS: {bounds}\nkp = {keyPattern}\nscanDir = {scanDirection}", + "INVALID BOUNDS", "bounds"_attr = redact(bounds->toString()), - "kp"_attr = redact(kp), - "scanDir"_attr = scanDir); + "keyPattern"_attr = redact(kp), + "scanDirection"_attr = scanDir); MONGO_UNREACHABLE; } } diff --git a/src/mongo/db/query/plan_cache.cpp b/src/mongo/db/query/plan_cache.cpp index f0c7e0384cd..6b3c15bc4f8 100644 --- a/src/mongo/db/query/plan_cache.cpp +++ b/src/mongo/db/query/plan_cache.cpp @@ -440,8 +440,9 @@ std::unique_ptr<CachedSolution> PlanCache::getCacheEntryIfActive(const PlanCache if (res.state == PlanCache::CacheEntryState::kPresentInactive) { LOGV2_DEBUG(20936, 2, - "Not using cached entry for {res_cachedSolution} since it is inactive", - "res_cachedSolution"_attr = redact(res.cachedSolution->toString())); + "Not using cached entry for {cachedSolution} since it is inactive", + "Not using cached entry since it is inactive", + "cachedSolution"_attr = redact(res.cachedSolution->toString())); return nullptr; } @@ -464,9 +465,10 @@ PlanCache::NewEntryState PlanCache::getNewEntryState(const CanonicalQuery& query if (!oldEntry) { LOGV2_DEBUG(20937, 1, - "Creating inactive cache entry for query shape {query_Short} queryHash " - "{queryHash} planCacheKey {planCacheKey} with works value {newWorks}", - "query_Short"_attr = redact(query.toStringShort()), + "Creating inactive cache entry for query shape {query} queryHash {queryHash} " + "planCacheKey {planCacheKey} with works value {newWorks}", + "Creating inactive cache entry for query", + "query"_attr = redact(query.toStringShort()), "queryHash"_attr = unsignedIntToFixedLengthHex(queryHash), "planCacheKey"_attr = unsignedIntToFixedLengthHex(planCacheKey), "newWorks"_attr = newWorks); @@ -481,28 +483,31 @@ PlanCache::NewEntryState PlanCache::getNewEntryState(const CanonicalQuery& query LOGV2_DEBUG(20938, 1, - "Replacing active cache entry for query {query_Short} queryHash {queryHash} " - "planCacheKey {planCacheKey} with works {oldEntry_works} with a plan with " + "Replacing active cache entry for query {query} queryHash {queryHash} " + "planCacheKey {planCacheKey} with works {oldWorks} with a plan with " "works {newWorks}", - "query_Short"_attr = redact(query.toStringShort()), + "Replacing active cache entry for query", + "query"_attr = redact(query.toStringShort()), "queryHash"_attr = unsignedIntToFixedLengthHex(queryHash), "planCacheKey"_attr = unsignedIntToFixedLengthHex(planCacheKey), - "oldEntry_works"_attr = oldEntry->works, + "oldWorks"_attr = oldEntry->works, "newWorks"_attr = newWorks); res.shouldBeCreated = true; res.shouldBeActive = true; } else if (oldEntry->isActive) { LOGV2_DEBUG(20939, 1, - "Attempt to write to the planCache for query {query_Short} queryHash " - "{queryHash} planCacheKey {planCacheKey} with a plan with works {newWorks} is " - "a noop, since there's already a plan with works value {oldEntry_works}", - "query_Short"_attr = redact(query.toStringShort()), + "Attempt to write to the planCache for query {query} queryHash {queryHash} " + "planCacheKey {planCacheKey} with a plan with works {newWorks} is a noop, " + "since there's already a plan with works value {oldWorks}", + "Attempt to write to the planCache resulted in a noop, since there's already " + "an active cache entry with a lower works value", + "query"_attr = redact(query.toStringShort()), "queryHash"_attr = unsignedIntToFixedLengthHex(queryHash), "planCacheKey"_attr = unsignedIntToFixedLengthHex(planCacheKey), "newWorks"_attr = newWorks, - "oldEntry_works"_attr = oldEntry->works); - // There is already an active cache entry with a higher works value. + "oldWorks"_attr = oldEntry->works); + // There is already an active cache entry with a lower works value. // We do nothing. res.shouldBeCreated = false; } else if (newWorks > oldEntry->works) { @@ -517,16 +522,16 @@ PlanCache::NewEntryState PlanCache::getNewEntryState(const CanonicalQuery& query const double increasedWorks = std::max( oldEntry->works + 1u, static_cast<size_t>(oldEntry->works * growthCoefficient)); - LOGV2_DEBUG( - 20940, - 1, - "Increasing work value associated with cache entry for query {query_Short} queryHash " - "{queryHash} planCacheKey {planCacheKey} from {oldEntry_works} to {increasedWorks}", - "query_Short"_attr = redact(query.toStringShort()), - "queryHash"_attr = unsignedIntToFixedLengthHex(queryHash), - "planCacheKey"_attr = unsignedIntToFixedLengthHex(planCacheKey), - "oldEntry_works"_attr = oldEntry->works, - "increasedWorks"_attr = increasedWorks); + LOGV2_DEBUG(20940, + 1, + "Increasing work value associated with cache entry for query {query} queryHash " + "{queryHash} planCacheKey {planCacheKey} from {oldWorks} to {increasedWorks}", + "Increasing work value associated with cache entry", + "query"_attr = redact(query.toStringShort()), + "queryHash"_attr = unsignedIntToFixedLengthHex(queryHash), + "planCacheKey"_attr = unsignedIntToFixedLengthHex(planCacheKey), + "oldWorks"_attr = oldEntry->works, + "increasedWorks"_attr = increasedWorks); oldEntry->works = increasedWorks; // Don't create a new entry. @@ -537,13 +542,14 @@ PlanCache::NewEntryState PlanCache::getNewEntryState(const CanonicalQuery& query // cache (as an active entry) the plan this query used for the future. LOGV2_DEBUG(20941, 1, - "Inactive cache entry for query {query_Short} queryHash {queryHash} " - "planCacheKey {planCacheKey} with works {oldEntry_works} is being promoted to " - "active entry with works value {newWorks}", - "query_Short"_attr = redact(query.toStringShort()), + "Inactive cache entry for query {query} queryHash {queryHash} planCacheKey " + "{planCacheKey} with works {oldWorks} is being promoted to active entry with " + "works value {newWorks}", + "Inactive cache entry for query is being promoted to active entry", + "query"_attr = redact(query.toStringShort()), "queryHash"_attr = unsignedIntToFixedLengthHex(queryHash), "planCacheKey"_attr = unsignedIntToFixedLengthHex(planCacheKey), - "oldEntry_works"_attr = oldEntry->works, + "oldWorks"_attr = oldEntry->works, "newWorks"_attr = newWorks); // We'll replace the old inactive entry with an active entry. res.shouldBeCreated = true; @@ -625,9 +631,10 @@ Status PlanCache::set(const CanonicalQuery& query, if (nullptr != evictedEntry.get()) { LOGV2_DEBUG(20942, 1, - "{query_nss}: plan cache maximum size exceeded - removed least recently used " + "{namespace}: plan cache maximum size exceeded - removed least recently used " "entry {evictedEntry}", - "query_nss"_attr = query.nss(), + "Plan cache maximum size exceeded - removed least recently used entry", + "namespace"_attr = query.nss(), "evictedEntry"_attr = redact(evictedEntry->toString())); } diff --git a/src/mongo/db/query/plan_enumerator.cpp b/src/mongo/db/query/plan_enumerator.cpp index 52036f2d76b..87a6263c44f 100644 --- a/src/mongo/db/query/plan_enumerator.cpp +++ b/src/mongo/db/query/plan_enumerator.cpp @@ -340,7 +340,7 @@ PlanEnumerator::MemoID PlanEnumerator::memoIDForNode(MatchExpression* node) { stdx::unordered_map<MatchExpression*, MemoID>::iterator it = _nodeToId.find(node); if (_nodeToId.end() == it) { - LOGV2_ERROR(20945, "Trying to look up memo entry for node, none found."); + LOGV2_ERROR(20945, "Trying to look up memo entry for node, none found"); MONGO_UNREACHABLE; } @@ -359,8 +359,11 @@ unique_ptr<MatchExpression> PlanEnumerator::getNext() { tagForSort(tree.get()); _root->resetTag(); - LOGV2_DEBUG( - 20943, 5, "Enumerator: memo just before moving:\n{dumpMemo}", "dumpMemo"_attr = dumpMemo()); + LOGV2_DEBUG(20943, + 5, + "Enumerator: memo just before moving:\n{memo}", + "Enumerator: memo just before moving", + "memo"_attr = dumpMemo()); _done = nextMemo(memoIDForNode(_root)); return tree; } @@ -1569,7 +1572,7 @@ void PlanEnumerator::compound(const vector<MatchExpression*>& tryCompound, // void PlanEnumerator::tagMemo(size_t id) { - LOGV2_DEBUG(20944, 5, "Tagging memoID {id}", "id"_attr = id); + LOGV2_DEBUG(20944, 5, "Tagging memoID {id}", "Tagging memoID", "id"_attr = id); NodeAssignment* assign = _memo[id]; verify(nullptr != assign); diff --git a/src/mongo/db/query/plan_executor_impl.cpp b/src/mongo/db/query/plan_executor_impl.cpp index e01f4110f4d..5abe8bdd239 100644 --- a/src/mongo/db/query/plan_executor_impl.cpp +++ b/src/mongo/db/query/plan_executor_impl.cpp @@ -502,8 +502,8 @@ PlanExecutor::ExecState PlanExecutorImpl::_waitForInserts(CappedInsertNotifierDa notifierData->notifier->waitUntil(notifierData->lastEOFVersion, deadline); if (MONGO_unlikely(planExecutorHangWhileYieldedInWaitForInserts.shouldFail())) { LOGV2(4452903, - "PlanExecutor - planExecutorHangWhileYieldedInWaitForInserts fail point " - "enabled. Blocking until fail point is disabled"); + "PlanExecutor - planExecutorHangWhileYieldedInWaitForInserts fail point enabled. " + "Blocking until fail point is disabled"); planExecutorHangWhileYieldedInWaitForInserts.pauseWhileSet(); } }); @@ -654,7 +654,7 @@ PlanExecutor::ExecState PlanExecutorImpl::_getNextImpl(Snapshotted<Document>* ob }))) { LOGV2(20946, "PlanExecutor - planExecutorHangBeforeShouldWaitForInserts fail point " - "enabled. Blocking until fail point is disabled."); + "enabled. Blocking until fail point is disabled"); planExecutorHangBeforeShouldWaitForInserts.pauseWhileSet(); } if (!_shouldWaitForInserts()) { diff --git a/src/mongo/db/query/plan_ranker.cpp b/src/mongo/db/query/plan_ranker.cpp index 232419bbc99..e81612cf7e7 100644 --- a/src/mongo/db/query/plan_ranker.cpp +++ b/src/mongo/db/query/plan_ranker.cpp @@ -96,27 +96,28 @@ StatusWith<std::unique_ptr<PlanRankingDecision>> PlanRanker::pickBestPlan( LOGV2_DEBUG( 20956, 5, - "Scoring plan " - "{i}:\n{candidates_i_solution}Stats:\n{Explain_statsToBSON_statTrees_i_jsonString_" - "ExtendedRelaxedV2_0_0_true}", - "i"_attr = i, - "candidates_i_solution"_attr = redact(candidates[i].solution->toString()), - "Explain_statsToBSON_statTrees_i_jsonString_ExtendedRelaxedV2_0_0_true"_attr = - redact(Explain::statsToBSON(*statTrees[i]) - .jsonString(ExtendedRelaxedV2_0_0, true))); + "Scoring plan {planIndex}:\n{querySolution}Stats:\n{stats}", + "Scoring plan", + "planIndex"_attr = i, + "querySolution"_attr = redact(candidates[i].solution->toString()), + "stats"_attr = redact( + Explain::statsToBSON(*statTrees[i]).jsonString(ExtendedRelaxedV2_0_0, true))); LOGV2_DEBUG(20957, 2, - "Scoring query plan: {Explain_getPlanSummary_candidates_i_root} " - "planHitEOF={statTrees_i_common_isEOF}", - "Explain_getPlanSummary_candidates_i_root"_attr = - Explain::getPlanSummary(candidates[i].root), - "statTrees_i_common_isEOF"_attr = statTrees[i]->common.isEOF); + "Scoring query plan: {planSummary} planHitEOF={planHitEOF}", + "Scoring query plan", + "planSummary"_attr = Explain::getPlanSummary(candidates[i].root), + "planHitEOF"_attr = statTrees[i]->common.isEOF); double score = scoreTree(statTrees[i].get()); - LOGV2_DEBUG(20958, 5, "score = {score}", "score"_attr = score); + LOGV2_DEBUG( + 20958, 5, "Basic plan score: {score}", "Basic plan score", "score"_attr = score); if (statTrees[i]->common.isEOF) { - LOGV2_DEBUG( - 20959, 5, "Adding +{eofBonus} EOF bonus to score.", "eofBonus"_attr = eofBonus); + LOGV2_DEBUG(20959, + 5, + "Adding +{eofBonus} EOF bonus to score", + "Adding EOF bonus to score", + "eofBonus"_attr = eofBonus); score += 1; } @@ -125,10 +126,9 @@ StatusWith<std::unique_ptr<PlanRankingDecision>> PlanRanker::pickBestPlan( failed.push_back(i); LOGV2_DEBUG(20960, 2, - "Not scording plan: {Explain_getPlanSummary_candidates_i_root} because the " - "plan failed.", - "Explain_getPlanSummary_candidates_i_root"_attr = - Explain::getPlanSummary(candidates[i].root)); + "Not scoring plan: {planSummary} because the plan failed", + "Not scoring a plan because the plan failed", + "planSummary"_attr = Explain::getPlanSummary(candidates[i].root)); } } @@ -273,16 +273,22 @@ double PlanRanker::scoreTree(const PlanStageStats* stats) { double tieBreakers = noFetchBonus + noSortBonus + noIxisectBonus; double score = baseScore + productivity + tieBreakers; - StringBuilder sb; - sb << "score(" << str::convertDoubleToString(score) << ") = baseScore(" - << str::convertDoubleToString(baseScore) << ")" - << " + productivity((" << stats->common.advanced << " advanced)/(" << stats->common.works - << " works) = " << str::convertDoubleToString(productivity) << ")" - << " + tieBreakers(" << str::convertDoubleToString(noFetchBonus) << " noFetchBonus + " - << str::convertDoubleToString(noSortBonus) << " noSortBonus + " - << str::convertDoubleToString(noIxisectBonus) - << " noIxisectBonus = " << str::convertDoubleToString(tieBreakers) << ")"; - LOGV2_DEBUG(20961, 2, "{sb_str}", "sb_str"_attr = sb.str()); + if (shouldLog(logv2::LogSeverity::Debug(2))) { + StringBuilder sb; + sb << "baseScore(" << str::convertDoubleToString(baseScore) << ")" + << " + productivity((" << stats->common.advanced << " advanced)/(" << stats->common.works + << " works) = " << str::convertDoubleToString(productivity) << ")" + << " + tieBreakers(" << str::convertDoubleToString(noFetchBonus) << " noFetchBonus + " + << str::convertDoubleToString(noSortBonus) << " noSortBonus + " + << str::convertDoubleToString(noIxisectBonus) + << " noIxisectBonus = " << str::convertDoubleToString(tieBreakers) << ")"; + LOGV2_DEBUG(20961, + 2, + "score({score}) = {calculation}", + "Plan score calculation", + "score"_attr = score, + "calculation"_attr = sb.str()); + } if (internalQueryForceIntersectionPlans.load()) { if (hasStage(STAGE_AND_HASH, stats) || hasStage(STAGE_AND_SORTED, stats)) { @@ -291,8 +297,9 @@ double PlanRanker::scoreTree(const PlanStageStats* stats) { score += 3; LOGV2_DEBUG(20962, 5, - "Score boosted to {score} due to intersection forcing.", - "score"_attr = score); + "Score boosted to {newScore} due to intersection forcing", + "Score boosted due to intersection forcing", + "newScore"_attr = score); } } diff --git a/src/mongo/db/query/planner_access.cpp b/src/mongo/db/query/planner_access.cpp index acacf1921ba..2519fd05b0d 100644 --- a/src/mongo/db/query/planner_access.cpp +++ b/src/mongo/db/query/planner_access.cpp @@ -1115,7 +1115,7 @@ std::unique_ptr<QuerySolutionNode> QueryPlannerAccess::buildIndexedAnd( LOGV2_DEBUG(20947, 5, "Can't build index intersection solution: AND_SORTED is not possible and " - "AND_HASH is disabled."); + "AND_HASH is disabled"); return nullptr; } } @@ -1182,7 +1182,7 @@ std::unique_ptr<QuerySolutionNode> QueryPlannerAccess::buildIndexedOr( // when any of our children lack index tags. If a node lacks an index tag it cannot // be answered via an index. if (!inArrayOperator && 0 != root->numChildren()) { - LOGV2_WARNING(20948, "planner OR error, non-indexed child of OR."); + LOGV2_WARNING(20948, "Planner OR error, non-indexed child of OR"); // We won't enumerate an OR without indices for each child, so this isn't an issue, even // if we have an AND with an OR child -- we won't get here unless the OR is fully // indexed. diff --git a/src/mongo/db/query/planner_analysis.cpp b/src/mongo/db/query/planner_analysis.cpp index 84789a9898f..130a3e13cf9 100644 --- a/src/mongo/db/query/planner_analysis.cpp +++ b/src/mongo/db/query/planner_analysis.cpp @@ -346,8 +346,9 @@ std::unique_ptr<ProjectionNode> analyzeProjection(const CanonicalQuery& query, const bool hasSortStage) { LOGV2_DEBUG(20949, 5, - "PROJECTION: Current plan is:\n{solnRoot}", - "solnRoot"_attr = redact(solnRoot->toString())); + "PROJECTION: Current plan is:\n{plan}", + "PROJECTION: Current plan", + "plan"_attr = redact(solnRoot->toString())); // If the projection requires the entire document we add a fetch stage if not present. Otherwise // we add a fetch stage if we are not covered. @@ -647,11 +648,13 @@ bool QueryPlannerAnalysis::explodeForSort(const CanonicalQuery& query, // Too many ixscans spoil the performance. if (totalNumScans > (size_t)internalQueryMaxScansToExplode.load()) { - LOGV2_DEBUG(20950, - 5, - "Could expand ixscans to pull out sort order but resulting scan " - "count({totalNumScans}) is too high.", - "totalNumScans"_attr = totalNumScans); + LOGV2_DEBUG( + 20950, + 5, + "Could expand ixscans to pull out sort order but resulting scan count({numScans}) is " + "too high", + "Could expand ixscans to pull out sort order but resulting scan count is too high", + "numScans"_attr = totalNumScans); return false; } @@ -712,8 +715,9 @@ QuerySolutionNode* QueryPlannerAnalysis::analyzeSort(const CanonicalQuery& query QueryPlannerCommon::reverseScans(solnRoot); LOGV2_DEBUG(20951, 5, - "Reversing ixscan to provide sort. Result: {solnRoot}", - "solnRoot"_attr = redact(solnRoot->toString())); + "Reversing ixscan to provide sort. Result: {newPlan}", + "Reversing ixscan to provide sort", + "newPlan"_attr = redact(solnRoot->toString())); return solnRoot; } diff --git a/src/mongo/db/query/planner_ixselect.cpp b/src/mongo/db/query/planner_ixselect.cpp index e228153bb72..054e6182c9a 100644 --- a/src/mongo/db/query/planner_ixselect.cpp +++ b/src/mongo/db/query/planner_ixselect.cpp @@ -289,8 +289,10 @@ std::vector<IndexEntry> QueryPlannerIXSelect::findIndexesByHint( if (entry.identifier.catalogName == hintName) { LOGV2_DEBUG(20952, 5, - "Hint by name specified, restricting indices to {entry_keyPattern}", - "entry_keyPattern"_attr = entry.keyPattern.toString()); + "Hint by name specified, restricting indices to {keyPattern}", + "Hint by name specified, restricting indices", + "name"_attr = entry.identifier.catalogName, + "keyPattern"_attr = entry.keyPattern); out.push_back(entry); } } @@ -299,8 +301,10 @@ std::vector<IndexEntry> QueryPlannerIXSelect::findIndexesByHint( if (SimpleBSONObjComparator::kInstance.evaluate(entry.keyPattern == hintedIndex)) { LOGV2_DEBUG(20953, 5, - "Hint specified, restricting indices to {hintedIndex}", - "hintedIndex"_attr = hintedIndex.toString()); + "Hint specified, restricting indices to {keyPattern}", + "Hint specified, restricting indices", + "name"_attr = entry.identifier.catalogName, + "keyPattern"_attr = entry.keyPattern); out.push_back(entry); } } @@ -585,9 +589,10 @@ bool QueryPlannerIXSelect::_compatible(const BSONElement& keyPatternElt, return false; } else { LOGV2_WARNING(20954, - "Unknown indexing for node {node_debugString} and field {keyPatternElt}", - "node_debugString"_attr = node->debugString(), - "keyPatternElt"_attr = keyPatternElt.toString()); + "Unknown indexing for node {node} and field {field}", + "Unknown indexing for given node and field", + "node"_attr = node->debugString(), + "field"_attr = keyPatternElt.toString()); verify(0); } } diff --git a/src/mongo/db/query/planner_wildcard_helpers.cpp b/src/mongo/db/query/planner_wildcard_helpers.cpp index 5535d1bf8da..f09c16495b1 100644 --- a/src/mongo/db/query/planner_wildcard_helpers.cpp +++ b/src/mongo/db/query/planner_wildcard_helpers.cpp @@ -247,12 +247,12 @@ bool validateNumericPathComponents(const MultikeyPaths& multikeyPaths, if (arrayIndices.size() > kWildcardMaxArrayIndexTraversalDepth) { LOGV2_DEBUG(20955, 2, - "Declining to answer query on field '{queryPath_dottedField}' with $** index, " - "as it traverses through more than {kWildcardMaxArrayIndexTraversalDepth} " - "nested array indices.", - "queryPath_dottedField"_attr = queryPath.dottedField(), - "kWildcardMaxArrayIndexTraversalDepth"_attr = - kWildcardMaxArrayIndexTraversalDepth); + "Declining to answer query on field '{field}' with $** index, as it traverses " + "through more than {maxNestedArrayIndices} nested array indices", + "Declining to answer query on a field with $** index, as it traverses through " + "more than the maximum permitted depth of nested array indices", + "field"_attr = queryPath.dottedField(), + "maxNestedArrayIndices"_attr = kWildcardMaxArrayIndexTraversalDepth); return false; } diff --git a/src/mongo/db/query/query_planner.cpp b/src/mongo/db/query/query_planner.cpp index 29413613cc6..bd75a303166 100644 --- a/src/mongo/db/query/query_planner.cpp +++ b/src/mongo/db/query/query_planner.cpp @@ -477,10 +477,11 @@ StatusWith<std::unique_ptr<QuerySolution>> QueryPlanner::planFromCache( LOGV2_DEBUG(20963, 5, - "Tagging the match expression according to cache data: " - "\nFilter:\n{clone_debugString}Cache data:\n{winnerCacheData}", - "clone_debugString"_attr = redact(clone->debugString()), - "winnerCacheData"_attr = redact(winnerCacheData.toString())); + "Tagging the match expression according to cache data: \nFilter:\n{filter}Cache " + "data:\n{cacheData}", + "Tagging the match expression according to cache data", + "filter"_attr = redact(clone->debugString()), + "cacheData"_attr = redact(winnerCacheData.toString())); stdx::unordered_set<string> fields; QueryPlannerIXSelect::getFields(query.root(), &fields); @@ -496,9 +497,10 @@ StatusWith<std::unique_ptr<QuerySolution>> QueryPlanner::planFromCache( invariant(insertionRes.second); LOGV2_DEBUG(20964, 5, - "Index {i}: {ie_identifier}", - "i"_attr = i, - "ie_identifier"_attr = ie.identifier); + "Index {indexNumber}: {id}", + "Index mapping: number and identifier", + "indexNumber"_attr = i, + "id"_attr = ie.identifier); } Status s = tagAccordingToCache(clone.get(), winnerCacheData.tree.get(), indexMap); @@ -511,8 +513,9 @@ StatusWith<std::unique_ptr<QuerySolution>> QueryPlanner::planFromCache( LOGV2_DEBUG(20965, 5, - "Tagged tree:\n{clone_debugString}", - "clone_debugString"_attr = redact(clone->debugString())); + "Tagged tree:\n{tree}", + "Tagged tree", + "tree"_attr = redact(clone->debugString())); // Use the cached index assignments to build solnRoot. std::unique_ptr<QuerySolutionNode> solnRoot(QueryPlannerAccess::buildIndexedDataAccess( @@ -533,28 +536,30 @@ StatusWith<std::unique_ptr<QuerySolution>> QueryPlanner::planFromCache( LOGV2_DEBUG(20966, 5, - "Planner: solution constructed from the cache:\n{soln}", - "soln"_attr = redact(soln->toString())); + "Planner: solution constructed from the cache:\n{solution}", + "Planner: solution constructed from the cache", + "solution"_attr = redact(soln->toString())); return {std::move(soln)}; } // static StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan( const CanonicalQuery& query, const QueryPlannerParams& params) { - LOGV2_DEBUG( - 20967, - 5, - "Beginning planning...\n=============================\nOptions = " - "{optionString_params_options}\nCanonical query:\n{query}=============================", - "optionString_params_options"_attr = optionString(params.options), - "query"_attr = redact(query.toString())); + LOGV2_DEBUG(20967, + 5, + "Beginning planning...\n=============================\nOptions = " + "{options}\nCanonical query:\n{query}=============================", + "Beginning planning", + "options"_attr = optionString(params.options), + "query"_attr = redact(query.toString())); for (size_t i = 0; i < params.indices.size(); ++i) { LOGV2_DEBUG(20968, 5, - "Index {i} is {params_indices_i}", - "i"_attr = i, - "params_indices_i"_attr = params.indices[i].toString()); + "Index {indexNumber} is {value}", + "Index number and details", + "indexNumber"_attr = i, + "index"_attr = params.indices[i].toString()); } const bool canTableScan = !(params.options & QueryPlannerParams::NO_TABLE_SCAN); @@ -644,7 +649,11 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan( stdx::unordered_set<string> fields; QueryPlannerIXSelect::getFields(query.root(), &fields); for (auto&& field : fields) { - LOGV2_DEBUG(20970, 5, "Predicate over field '{field}'", "field"_attr = field); + LOGV2_DEBUG(20970, + 5, + "Predicate over field '{field}'", + "Predicate over field", + "field"_attr = field); } fullIndexList = QueryPlannerIXSelect::expandIndexes(fields, std::move(fullIndexList)); @@ -720,9 +729,10 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan( for (size_t i = 0; i < relevantIndices.size(); ++i) { LOGV2_DEBUG(20971, 2, - "Relevant index {i} is {relevantIndices_i}", - "i"_attr = i, - "relevantIndices_i"_attr = relevantIndices[i].toString()); + "Relevant index {indexNumber} is {value}", + "Relevant index", + "indexNumber"_attr = i, + "index"_attr = relevantIndices[i].toString()); } // Figure out how useful each index is to each predicate. @@ -746,8 +756,9 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan( // query.root() is now annotated with RelevantTag(s). LOGV2_DEBUG(20972, 5, - "Rated tree:\n{query_root_debugString}", - "query_root_debugString"_attr = redact(query.root()->debugString())); + "Rated tree:\n{tree}", + "Rated tree", + "tree"_attr = redact(query.root()->debugString())); // If there is a GEO_NEAR it must have an index it can use directly. const MatchExpression* gnNode = nullptr; @@ -755,7 +766,7 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan( // No index for GEO_NEAR? No query. RelevantTag* tag = static_cast<RelevantTag*>(gnNode->getTag()); if (!tag || (0 == tag->first.size() && 0 == tag->notFirst.size())) { - LOGV2_DEBUG(20973, 5, "Unable to find index for $geoNear query."); + LOGV2_DEBUG(20973, 5, "Unable to find index for $geoNear query"); // Don't leave tags on query tree. query.root()->resetTag(); return Status(ErrorCodes::NoQueryExecutionPlans, @@ -764,8 +775,9 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan( LOGV2_DEBUG(20974, 5, - "Rated tree after geonear processing:{query_root_debugString}", - "query_root_debugString"_attr = redact(query.root()->debugString())); + "Rated tree after geonear processing:{tree}", + "Rated tree after geonear processing", + "tree"_attr = redact(query.root()->debugString())); } // Likewise, if there is a TEXT it must have an index it can use directly. @@ -804,8 +816,9 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan( LOGV2_DEBUG(20975, 5, - "Rated tree after text processing:{query_root_debugString}", - "query_root_debugString"_attr = redact(query.root()->debugString())); + "Rated tree after text processing:{tree}", + "Rated tree after text processing", + "tree"_attr = redact(query.root()->debugString())); } std::vector<std::unique_ptr<QuerySolution>> out; @@ -825,8 +838,9 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan( while ((nextTaggedTree = isp.getNext()) && (out.size() < params.maxIndexedSolutions)) { LOGV2_DEBUG(20976, 5, - "About to build solntree from tagged tree:\n{nextTaggedTree_debugString}", - "nextTaggedTree_debugString"_attr = redact(nextTaggedTree->debugString())); + "About to build solntree from tagged tree:\n{tree}", + "About to build solntree from tagged tree", + "tree"_attr = redact(nextTaggedTree->debugString())); // Store the plan cache index tree before calling prepareForAccessingPlanning(), so that // the PlanCacheIndexTree has the same sort as the MatchExpression used to generate the @@ -837,9 +851,9 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan( if (!statusWithCacheData.isOK()) { LOGV2_DEBUG(20977, 5, - "Query is not cachable: {statusWithCacheData_getStatus_reason}", - "statusWithCacheData_getStatus_reason"_attr = - redact(statusWithCacheData.getStatus().reason())); + "Query is not cachable: {reason}", + "Query is not cachable", + "reason"_attr = redact(statusWithCacheData.getStatus().reason())); } else { cacheData = std::move(statusWithCacheData.getValue()); } @@ -860,8 +874,9 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan( if (soln) { LOGV2_DEBUG(20978, 5, - "Planner: adding solution:\n{soln}", - "soln"_attr = redact(soln->toString())); + "Planner: adding solution:\n{solution}", + "Planner: adding solution", + "solution"_attr = redact(soln->toString())); if (statusWithCacheData.isOK()) { SolutionCacheData* scd = new SolutionCacheData(); scd->tree = std::move(cacheData); @@ -875,8 +890,11 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan( // Don't leave tags on query tree. query.root()->resetTag(); - LOGV2_DEBUG( - 20979, 5, "Planner: outputted {out_size} indexed solutions.", "out_size"_attr = out.size()); + LOGV2_DEBUG(20979, + 5, + "Planner: outputted {numSolutions} indexed solutions", + "Planner: outputted indexed solutions", + "numSolutions"_attr = out.size()); // Produce legible error message for failed OR planning with a TEXT child. // TODO: support collection scan for non-TEXT children of OR. @@ -911,7 +929,7 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan( return Status(ErrorCodes::NoQueryExecutionPlans, "Failed to build whole-index solution for $hint"); } - LOGV2_DEBUG(20980, 5, "Planner: outputting soln that uses hinted index as scan."); + LOGV2_DEBUG(20980, 5, "Planner: outputting soln that uses hinted index as scan"); std::vector<std::unique_ptr<QuerySolution>> out; out.push_back(std::move(soln)); return {std::move(out)}; @@ -973,7 +991,7 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan( const BSONObj kp = QueryPlannerAnalysis::getSortPattern(index.keyPattern); if (providesSort(query, kp)) { LOGV2_DEBUG( - 20981, 5, "Planner: outputting soln that uses index to provide sort."); + 20981, 5, "Planner: outputting soln that uses index to provide sort"); auto soln = buildWholeIXSoln(fullIndexList[i], query, params); if (soln) { PlanCacheIndexTree* indexTree = new PlanCacheIndexTree(); @@ -991,7 +1009,7 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan( LOGV2_DEBUG( 20982, 5, - "Planner: outputting soln that uses (reverse) index to provide sort."); + "Planner: outputting soln that uses (reverse) index to provide sort"); auto soln = buildWholeIXSoln(fullIndexList[i], query, params, -1); if (soln) { PlanCacheIndexTree* indexTree = new PlanCacheIndexTree(); @@ -1026,7 +1044,7 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan( auto soln = buildWholeIXSoln(index, query, paramsForCoveredIxScan); if (soln && !soln->root->fetched()) { LOGV2_DEBUG( - 20983, 5, "Planner: outputting soln that uses index to provide projection."); + 20983, 5, "Planner: outputting soln that uses index to provide projection"); PlanCacheIndexTree* indexTree = new PlanCacheIndexTree(); indexTree->setIndexEntry(index); @@ -1070,8 +1088,9 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan( if (collscan) { LOGV2_DEBUG(20984, 5, - "Planner: outputting a collscan:\n{collscan}", - "collscan"_attr = redact(collscan->toString())); + "Planner: outputting a collection scan:\n{collectionScan}", + "Planner: outputting a collection scan", + "collectionScan"_attr = redact(collscan->toString())); SolutionCacheData* scd = new SolutionCacheData(); scd->solnType = SolutionCacheData::COLLSCAN_SOLN; collscan->cacheData.reset(scd); diff --git a/src/mongo/db/query/query_planner_test_fixture.cpp b/src/mongo/db/query/query_planner_test_fixture.cpp index 891ec8224e6..b43e45d1c48 100644 --- a/src/mongo/db/query/query_planner_test_fixture.cpp +++ b/src/mongo/db/query/query_planner_test_fixture.cpp @@ -477,7 +477,7 @@ size_t QueryPlannerTest::getNumSolutions() const { void QueryPlannerTest::dumpSolutions() const { str::stream ost; dumpSolutions(ost); - LOGV2(20985, "{std_string_ost}", "std_string_ost"_attr = std::string(ost)); + LOGV2(20985, "{value}", "Solutions", "value"_attr = std::string(ost)); } void QueryPlannerTest::dumpSolutions(str::stream& ost) const { diff --git a/src/mongo/db/query/query_planner_test_lib.cpp b/src/mongo/db/query/query_planner_test_lib.cpp index 6082fc79e61..39609566056 100644 --- a/src/mongo/db/query/query_planner_test_lib.cpp +++ b/src/mongo/db/query/query_planner_test_lib.cpp @@ -138,8 +138,9 @@ bool bsonObjFieldsAreInSet(BSONObj obj, const std::set<std::string>& allowedFiel BSONElement child = i.next(); if (!allowedFields.count(child.fieldName())) { LOGV2_ERROR(23932, - "Did not expect to find {child_fieldName}", - "child_fieldName"_attr = child.fieldName()); + "Did not expect to find {field}", + "Unexpected field", + "field"_attr = child.fieldName()); return false; } } diff --git a/src/mongo/db/query/stage_builder.cpp b/src/mongo/db/query/stage_builder.cpp index 01005defd04..ebf833f8f4e 100644 --- a/src/mongo/db/query/stage_builder.cpp +++ b/src/mongo/db/query/stage_builder.cpp @@ -368,7 +368,10 @@ std::unique_ptr<PlanStage> buildStages(OperationContext* opCtx, case STAGE_TRIAL: case STAGE_UNKNOWN: case STAGE_UPDATE: { - LOGV2_WARNING(4615604, "Can't build exec tree for node {node}", "node"_attr = *root); + LOGV2_WARNING(4615604, + "Can't build exec tree for node {node}", + "Can't build exec tree for node", + "node"_attr = *root); } } diff --git a/src/mongo/db/views/durable_view_catalog.cpp b/src/mongo/db/views/durable_view_catalog.cpp index 56cc506dd42..fec912e569e 100644 --- a/src/mongo/db/views/durable_view_catalog.cpp +++ b/src/mongo/db/views/durable_view_catalog.cpp @@ -197,9 +197,10 @@ void DurableViewCatalogImpl::upsert(OperationContext* opCtx, if (!id.isValid() || !systemViews->findDoc(opCtx, id, &oldView)) { LOGV2_DEBUG(22544, 2, - "insert view {view} into {db_getSystemViewsName}", + "Insert view {view} into {viewCatalog}", + "Insert view to system views catalog", "view"_attr = view, - "db_getSystemViewsName"_attr = _db->getSystemViewsName()); + "viewCatalog"_attr = _db->getSystemViewsName()); uassertStatusOK( systemViews->insertDocument(opCtx, InsertStatement(view), &CurOp::get(opCtx)->debug())); } else { @@ -231,9 +232,10 @@ void DurableViewCatalogImpl::remove(OperationContext* opCtx, const NamespaceStri LOGV2_DEBUG(22545, 2, - "remove view {name} from {db_getSystemViewsName}", - "name"_attr = name, - "db_getSystemViewsName"_attr = _db->getSystemViewsName()); + "Remove view {view} from {viewCatalog}", + "Remove view from system views catalog", + "view"_attr = name, + "viewCatalog"_attr = _db->getSystemViewsName()); systemViews->deleteDocument(opCtx, kUninitializedStmtId, id, &CurOp::get(opCtx)->debug()); } } // namespace mongo diff --git a/src/mongo/db/views/view_catalog.cpp b/src/mongo/db/views/view_catalog.cpp index ac084322f6e..3d3da8088b9 100644 --- a/src/mongo/db/views/view_catalog.cpp +++ b/src/mongo/db/views/view_catalog.cpp @@ -96,8 +96,9 @@ Status ViewCatalog::_reload(WithLock, ViewCatalogLookupBehavior lookupBehavior) { LOGV2_DEBUG(22546, 1, - "reloading view catalog for database {durable_getName}", - "durable_getName"_attr = _durable->getName()); + "Reloading view catalog for database {db}", + "Reloading view catalog for database", + "db"_attr = _durable->getName()); _viewMap.clear(); _valid = false; @@ -141,9 +142,9 @@ Status ViewCatalog::_reload(WithLock, } catch (const DBException& ex) { auto status = ex.toStatus(); LOGV2(22547, - "could not load view catalog for database {dbName}: {error}", - "could not load view catalog for database", - "dbName"_attr = _durable->getName(), + "Could not load view catalog for database {db}: {error}", + "Could not load view catalog for database", + "db"_attr = _durable->getName(), "error"_attr = status); return status; } diff --git a/src/mongo/s/query/cluster_aggregation_planner.cpp b/src/mongo/s/query/cluster_aggregation_planner.cpp index ade6903488f..a0cc6eac44b 100644 --- a/src/mongo/s/query/cluster_aggregation_planner.cpp +++ b/src/mongo/s/query/cluster_aggregation_planner.cpp @@ -87,7 +87,7 @@ AsyncRequestsSender::Response establishMergingShardCursor(OperationContext* opCt const BSONObj mergeCmdObj, const ShardId& mergingShardId) { if (MONGO_unlikely(shardedAggregateFailToEstablishMergingShardCursor.shouldFail())) { - LOGV2(22834, "shardedAggregateFailToEstablishMergingShardCursor fail point enabled."); + LOGV2(22834, "shardedAggregateFailToEstablishMergingShardCursor fail point enabled"); uasserted(ErrorCodes::FailPointEnabled, "Asserting on establishing merging shard cursor due to failpoint."); } @@ -216,8 +216,9 @@ Status dispatchMergingPipeline(const boost::intrusive_ptr<ExpressionContext>& ex LOGV2_DEBUG(22835, 1, - "Dispatching merge pipeline {mergeCmdObj} to designated shard", - "mergeCmdObj"_attr = redact(mergeCmdObj)); + "Dispatching merge pipeline {command} to designated shard", + "Dispatching merge pipeline to designated shard", + "command"_attr = redact(mergeCmdObj)); // Dispatch $mergeCursors to the chosen shard, store the resulting cursor, and return. auto mergeResponse = @@ -364,7 +365,7 @@ DispatchShardPipelineResults dispatchExchangeConsumerPipeline( auto opCtx = expCtx->opCtx; if (MONGO_unlikely(shardedAggregateFailToDispatchExchangeConsumerPipeline.shouldFail())) { - LOGV2(22836, "shardedAggregateFailToDispatchExchangeConsumerPipeline fail point enabled."); + LOGV2(22836, "shardedAggregateFailToDispatchExchangeConsumerPipeline fail point enabled"); uasserted(ErrorCodes::FailPointEnabled, "Asserting on exhange consumer pipeline dispatch due to failpoint."); } diff --git a/src/mongo/s/query/cluster_cursor_manager.cpp b/src/mongo/s/query/cluster_cursor_manager.cpp index b35bb578323..b3bb3038eed 100644 --- a/src/mongo/s/query/cluster_cursor_manager.cpp +++ b/src/mongo/s/query/cluster_cursor_manager.cpp @@ -401,9 +401,10 @@ std::size_t ClusterCursorManager::killMortalCursorsInactiveSince(OperationContex if (res) { LOGV2(22837, - "Cursor id {cursorId} timed out, idle since {entry_getLastActive}", + "Cursor id {cursorId} timed out, idle since {idleSince}", + "Cursor timed out", "cursorId"_attr = cursorId, - "entry_getLastActive"_attr = entry.getLastActive().toString()); + "idleSince"_attr = entry.getLastActive().toString()); } return res; @@ -588,7 +589,10 @@ std::pair<Status, int> ClusterCursorManager::killCursorsWithMatchingSessions( return; } uassertStatusOK(mgr.killCursor(opCtx, *cursorNss, id)); - LOGV2(22838, "killing cursor: {id} as part of killing session(s)", "id"_attr = id); + LOGV2(22838, + "Killing cursor: {cursorId} as part of killing session(s)", + "Killing cursor as part of killing session(s)", + "cursorId"_attr = id); }; auto bySessionCursorKiller = makeKillCursorsBySessionAdaptor(opCtx, matcher, std::move(eraser)); diff --git a/src/mongo/s/query/cluster_find.cpp b/src/mongo/s/query/cluster_find.cpp index 96d9461d50f..bdde52a7833 100644 --- a/src/mongo/s/query/cluster_find.cpp +++ b/src/mongo/s/query/cluster_find.cpp @@ -521,12 +521,13 @@ CursorId ClusterFind::runQuery(OperationContext* opCtx, LOGV2_DEBUG(22839, 1, - "Received error status for query {query_Short} on attempt {retries} of " - "{kMaxRetries}: {ex}", - "query_Short"_attr = redact(query.toStringShort()), - "retries"_attr = retries, - "kMaxRetries"_attr = kMaxRetries, - "ex"_attr = redact(ex)); + "Received error status for query {query} on attempt {attemptNumber} of " + "{maxRetries}: {error}", + "Received error status for query", + "query"_attr = redact(query.toStringShort()), + "attemptNumber"_attr = retries, + "maxRetries"_attr = kMaxRetries, + "error"_attr = redact(ex)); Grid::get(opCtx)->catalogCache()->onStaleDatabaseVersion(ex->getDb(), ex->getVersionReceived()); @@ -562,12 +563,13 @@ CursorId ClusterFind::runQuery(OperationContext* opCtx, LOGV2_DEBUG(22840, 1, - "Received error status for query {query_Short} on attempt {retries} of " - "{kMaxRetries}: {ex}", - "query_Short"_attr = redact(query.toStringShort()), - "retries"_attr = retries, - "kMaxRetries"_attr = kMaxRetries, - "ex"_attr = redact(ex)); + "Received error status for query {query} on attempt {attemptNumber} of " + "{maxRetries}: {error}", + "Received error status for query", + "query"_attr = redact(query.toStringShort()), + "attemptNumber"_attr = retries, + "maxRetries"_attr = kMaxRetries, + "error"_attr = redact(ex)); if (ex.code() != ErrorCodes::ShardInvalidatedForTargeting) { if (auto staleInfo = ex.extraInfo<StaleConfigInfo>()) { diff --git a/src/mongo/s/query/establish_cursors.cpp b/src/mongo/s/query/establish_cursors.cpp index 1798f765073..18e78048193 100644 --- a/src/mongo/s/query/establish_cursors.cpp +++ b/src/mongo/s/query/establish_cursors.cpp @@ -75,8 +75,9 @@ void killOpOnShards(std::shared_ptr<executor::TaskExecutor> executor, if (!args.response.isOK()) { LOGV2_DEBUG(4625504, 2, - "killOperations for {remote} failed with {error}", - "remote"_attr = host.toString(), + "killOperations for {remoteHost} failed with {error}", + "killOperations failed", + "remoteHost"_attr = host.toString(), "error"_attr = args.response); return; } @@ -86,6 +87,7 @@ void killOpOnShards(std::shared_ptr<executor::TaskExecutor> executor, LOGV2_DEBUG(4625503, 2, "Failed to cleanup remote operations: {error}", + "Failed to cleanup remote operations", "error"_attr = ex.toStatus()); } } @@ -115,12 +117,14 @@ std::vector<RemoteCursor> establishCursors(OperationContext* opCtx, requests.emplace_back(remote.first, requestWithOpKey.obj()); } - LOGV2_DEBUG(4625502, - 3, - "Establishing cursors on {opId} for {nRemotes} remotes with operation key {opKey}", - "opId"_attr = opCtx->getOpID(), - "nRemotes"_attr = remotes.size(), - "opKey"_attr = opKey); + LOGV2_DEBUG( + 4625502, + 3, + "Establishing cursors on {opId} for {numRemotes} remotes with operation key {opKey}", + "Establishing cursors on remotes", + "opId"_attr = opCtx->getOpID(), + "numRemotes"_attr = remotes.size(), + "opKey"_attr = opKey); // Send the requests MultiStatementTransactionRequestsSender ars( |