diff options
author | Sara Golemon <sara.golemon@mongodb.com> | 2020-05-11 19:47:56 +0000 |
---|---|---|
committer | Evergreen Agent <no-reply@evergreen.mongodb.com> | 2020-05-14 03:00:44 +0000 |
commit | cb6fe33a7616148bc4590ccdb0fcbc65b5c7035e (patch) | |
tree | 3d7fa193f98430730bbf43cac1dc9239a7d09ee2 | |
parent | abf419c46ca4df9b6f3948e450dfa7d8f8ac0a87 (diff) | |
download | mongo-cb6fe33a7616148bc4590ccdb0fcbc65b5c7035e.tar.gz |
SERVER-48084 Lint log lines in mongo/db/commands
(cherry picked from commit 2e46c85d9e90da3bcedd2463a7f6ffb8af0e1c24)
47 files changed, 213 insertions, 203 deletions
diff --git a/jstests/noPassthrough/dropdatabase_respect_maxtimems.js b/jstests/noPassthrough/dropdatabase_respect_maxtimems.js index 63ca53cf888..e3553351f7a 100644 --- a/jstests/noPassthrough/dropdatabase_respect_maxtimems.js +++ b/jstests/noPassthrough/dropdatabase_respect_maxtimems.js @@ -48,7 +48,7 @@ const dropDB = rst.getPrimary().getDB("drop"); ErrorCodes.Interrupted); }, rst.getPrimary().port); - checkLog.contains(dropDB.getMongo(), "test only command sleep invoked"); + checkLog.contains(dropDB.getMongo(), "Test-only command 'sleep' invoked"); // dropDatabase now gets unblocked by the failpoint but will immediately // get blocked by acquiring the database lock for dropping the database. diff --git a/jstests/ssl/x509_client.js b/jstests/ssl/x509_client.js index cd077165b4b..7fe351f25d6 100644 --- a/jstests/ssl/x509_client.js +++ b/jstests/ssl/x509_client.js @@ -80,8 +80,8 @@ function authAndTest(mongo) { function checkAuthSuccess(element, index, array) { const logJson = JSON.parse(element); - return logJson.id === 20429 && logJson.attr.principalName === CLIENT_USER && - logJson.attr.DB === "$external" && + return logJson.id === 20429 && logJson.attr.user === CLIENT_USER && + logJson.attr.db === "$external" && /(?:\d{1,3}\.){3}\d{1,3}:\d+/.test(logJson.attr.client); } assert(log.some(checkAuthSuccess)); diff --git a/src/mongo/db/commands/apply_ops_cmd.cpp b/src/mongo/db/commands/apply_ops_cmd.cpp index c25fcfbb322..c6b60e3166d 100644 --- a/src/mongo/db/commands/apply_ops_cmd.cpp +++ b/src/mongo/db/commands/apply_ops_cmd.cpp @@ -27,8 +27,6 @@ * it in the license file. */ -#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand - #include "mongo/platform/basic.h" #include <vector> diff --git a/src/mongo/db/commands/authentication_commands.cpp b/src/mongo/db/commands/authentication_commands.cpp index 826d936c965..1030b67f336 100644 --- a/src/mongo/db/commands/authentication_commands.cpp +++ b/src/mongo/db/commands/authentication_commands.cpp @@ -258,9 +258,10 @@ bool CmdAuthenticate::run(OperationContext* opCtx, if (!serverGlobalParams.quiet.load()) { mutablebson::Document cmdToLog(cmdObj, mutablebson::Document::kInPlaceDisabled); LOGV2(20427, - " authenticate db: {dbname} {cmdToLog}", - "dbname"_attr = dbname, - "cmdToLog"_attr = cmdToLog); + "Authenticate db: {db} {command}", + "Authenticate", + "db"_attr = dbname, + "command"_attr = cmdToLog); } std::string mechanism = cmdObj.getStringField("mechanism"); if (mechanism.empty()) { @@ -295,11 +296,12 @@ bool CmdAuthenticate::run(OperationContext* opCtx, auto const client = opCtx->getClient(); LOGV2(20428, "Failed to authenticate {user} from client {client} with mechanism " - "{mechanism}: {status}", + "{mechanism}: {error}", + "Failed to authenticate", "user"_attr = user, "client"_attr = client->getRemote(), "mechanism"_attr = mechanism, - "status"_attr = status); + "error"_attr = status); } sleepmillis(saslGlobalParams.authFailedDelay.load()); if (status.code() == ErrorCodes::AuthenticationFailed) { @@ -314,10 +316,10 @@ bool CmdAuthenticate::run(OperationContext* opCtx, if (!serverGlobalParams.quiet.load()) { LOGV2(20429, - "Successfully authenticated as principal {principalName} on {DB} from client " - "{client}", - "principalName"_attr = user.getUser(), - "DB"_attr = user.getDB(), + "Successfully authenticated as principal {user} on {db} from client {client}", + "Successfully authenticated", + "user"_attr = user.getUser(), + "db"_attr = user.getDB(), "client"_attr = opCtx->getClient()->session()->remote()); } diff --git a/src/mongo/db/commands/compact.cpp b/src/mongo/db/commands/compact.cpp index c635c39fe05..006d7387a77 100644 --- a/src/mongo/db/commands/compact.cpp +++ b/src/mongo/db/commands/compact.cpp @@ -27,8 +27,6 @@ * it in the license file. */ -#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand - #include <string> #include <vector> diff --git a/src/mongo/db/commands/count_cmd.cpp b/src/mongo/db/commands/count_cmd.cpp index 72a6c40aeaf..2add45d4acd 100644 --- a/src/mongo/db/commands/count_cmd.cpp +++ b/src/mongo/db/commands/count_cmd.cpp @@ -27,8 +27,6 @@ * it in the license file. */ -#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand - #include "mongo/platform/basic.h" #include "mongo/db/auth/authorization_session.h" diff --git a/src/mongo/db/commands/create_indexes.cpp b/src/mongo/db/commands/create_indexes.cpp index 8d18ba59ed1..7864d9918b7 100644 --- a/src/mongo/db/commands/create_indexes.cpp +++ b/src/mongo/db/commands/create_indexes.cpp @@ -683,16 +683,16 @@ bool runCreateIndexesWithCoordinator(OperationContext* opCtx, // considered an error and the command should return success. if (ErrorCodes::NamespaceNotFound == ex.code()) { LOGV2(20448, - "Index build failed: collection dropped: ", + "Index build failed: collection dropped", "buildUUID"_attr = buildUUID, "ns"_attr = ns, "collectionUUID"_attr = *collectionUUID, - "ex"_attr = ex); + "exception"_attr = ex); return true; } // All other errors should be forwarded to the caller with index build information included. - LOGV2(20449, "Index build failed", "buildUUID"_attr = buildUUID, "ex"_attr = ex.toStatus()); + LOGV2(20449, "Index build failed", "buildUUID"_attr = buildUUID, "error"_attr = ex); ex.addContext(str::stream() << "Index build failed: " << buildUUID << ": Collection " << ns << " ( " << *collectionUUID << " )"); @@ -767,12 +767,16 @@ public: if (shouldLogMessageOnAlreadyBuildingError) { auto bsonElem = cmdObj.getField(kIndexesFieldName); LOGV2(20450, - "Received a request to create indexes: '{bsonElem}', but found that at " - "least one of the indexes is already being built, '{ex_toStatus}'. This " - "request will wait for the pre-existing index build to finish " - "before proceeding.", - "bsonElem"_attr = bsonElem, - "ex_toStatus"_attr = ex.toStatus()); + "Received a request to create indexes: '{indexesFieldName}', but found " + "that at least one of the indexes is already being built, '{error}'. " + "This request will wait for the pre-existing index build to finish " + "before proceeding", + "Received a request to create indexes, " + "but found that at least one of the indexes is already being built." + "This request will wait for the pre-existing index build to finish " + "before proceeding", + "indexesFieldName"_attr = bsonElem, + "error"_attr = ex); shouldLogMessageOnAlreadyBuildingError = false; } // Unset the response fields so we do not write duplicate fields. diff --git a/src/mongo/db/commands/current_op_common.h b/src/mongo/db/commands/current_op_common.h index a8bb73db178..9d03ba274b3 100644 --- a/src/mongo/db/commands/current_op_common.h +++ b/src/mongo/db/commands/current_op_common.h @@ -29,8 +29,6 @@ #pragma once -#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand - #include "mongo/db/commands.h" #include "mongo/db/operation_context.h" #include "mongo/db/pipeline/aggregation_request.h" diff --git a/src/mongo/db/commands/dbcommands.cpp b/src/mongo/db/commands/dbcommands.cpp index 89b9aeb9ff9..67f4666eeaf 100644 --- a/src/mongo/db/commands/dbcommands.cpp +++ b/src/mongo/db/commands/dbcommands.cpp @@ -317,11 +317,10 @@ public: const NamespaceString ns = cmd.getNamespace(); if (cmd.getAutoIndexId()) { - const char* deprecationWarning = - "the autoIndexId option is deprecated and will be removed in a future release"; - LOGV2_WARNING( - 23800, "{deprecationWarning}", "deprecationWarning"_attr = deprecationWarning); - result.append("note", deprecationWarning); +#define DEPR_23800 "The autoIndexId option is deprecated and will be removed in a future release" + LOGV2_WARNING(23800, DEPR_23800); + result.append("note", DEPR_23800); +#undef DEPR_23800 } // Ensure that the 'size' field is present if 'capped' is set to true. @@ -558,7 +557,10 @@ public: } if (PlanExecutor::FAILURE == state) { - LOGV2_WARNING(23801, "Internal error while reading {ns}", "ns"_attr = ns); + LOGV2_WARNING(23801, + "Internal error while reading {namespace}", + "Internal error while reading", + "namespace"_attr = ns); uassertStatusOK(WorkingSetCommon::getMemberObjectStatus(obj).withContext( "Executor error while reading during dataSize command")); } diff --git a/src/mongo/db/commands/dbcommands_d.cpp b/src/mongo/db/commands/dbcommands_d.cpp index 4343fb163b0..c6c1eccb48d 100644 --- a/src/mongo/db/commands/dbcommands_d.cpp +++ b/src/mongo/db/commands/dbcommands_d.cpp @@ -280,8 +280,11 @@ public: if (partialOk) { break; // skipped chunk is probably on another shard } - LOGV2( - 20452, "should have chunk: {n} have:{myn}", "n"_attr = n, "myn"_attr = myn); + LOGV2(20452, + "Should have chunk: {expected} have: {observed}", + "Unexpected chunk", + "expected"_attr = n, + "observed"_attr = myn); dumpChunks(opCtx, nss.ns(), query, sort); uassert(10040, "chunks out of order", n == myn); } @@ -319,7 +322,7 @@ public: LOGV2_DEBUG( 20453, 1, - "chunk metadata changed during filemd5, will retarget and continue"); + "Chunk metadata changed during filemd5, will retarget and continue"); break; } @@ -354,7 +357,7 @@ public: q.sort(sort); unique_ptr<DBClientCursor> c = client.query(NamespaceString(ns), q); while (c->more()) { - LOGV2(20454, "{c_nextSafe}", "c_nextSafe"_attr = c->nextSafe()); + LOGV2(20454, "Chunk: {chunk}", "Dumping chunks", "chunk"_attr = c->nextSafe()); } } diff --git a/src/mongo/db/commands/dbhash.cpp b/src/mongo/db/commands/dbhash.cpp index 0a7bcd5ac0d..fe734f964fe 100644 --- a/src/mongo/db/commands/dbhash.cpp +++ b/src/mongo/db/commands/dbhash.cpp @@ -374,7 +374,10 @@ private: exec = InternalPlanner::collectionScan( opCtx, nss.ns(), collection, PlanExecutor::NO_YIELD); } else { - LOGV2(20455, "can't find _id index for: {nss}", "nss"_attr = nss); + LOGV2(20455, + "Can't find _id index for namespace: {namespace}", + "Can't find _id index for namespace", + "namespace"_attr = nss); return "no _id _index"; } @@ -390,7 +393,8 @@ private: n++; } if (PlanExecutor::IS_EOF != state) { - LOGV2_WARNING(20456, "error while hashing, db dropped? ns={nss}", "nss"_attr = nss); + LOGV2_WARNING( + 20456, "Error while hashing, db possibly dropped", "namespace"_attr = nss); uasserted(34371, "Plan executor error while running dbHash command: " + WorkingSetCommon::toStatusString(c)); diff --git a/src/mongo/db/commands/distinct.cpp b/src/mongo/db/commands/distinct.cpp index 0d3c2318bff..68411e1b6f2 100644 --- a/src/mongo/db/commands/distinct.cpp +++ b/src/mongo/db/commands/distinct.cpp @@ -276,14 +276,14 @@ public: // We should always have a valid status member object at this point. auto status = WorkingSetCommon::getMemberObjectStatus(obj); invariant(!status.isOK()); - LOGV2_WARNING( - 23797, - "Plan executor error during distinct command: {PlanExecutor_statestr_state}, " - "status: {status}, stats: {Explain_getWinningPlanStats_executor_getValue_get}", - "PlanExecutor_statestr_state"_attr = redact(PlanExecutor::statestr(state)), - "status"_attr = status, - "Explain_getWinningPlanStats_executor_getValue_get"_attr = - redact(Explain::getWinningPlanStats(executor.getValue().get()))); + LOGV2_WARNING(23797, + "Plan executor error during distinct command: {state}, status: {error}, " + "stats: {stats}", + "Plan executor error during distinct command", + "state"_attr = redact(PlanExecutor::statestr(state)), + "error"_attr = status, + "stats"_attr = + redact(Explain::getWinningPlanStats(executor.getValue().get()))); uassertStatusOK(status.withContext("Executor error during distinct command")); } diff --git a/src/mongo/db/commands/drop_connections_command.cpp b/src/mongo/db/commands/drop_connections_command.cpp index 3a97b0aeb27..7d6dccfeefd 100644 --- a/src/mongo/db/commands/drop_connections_command.cpp +++ b/src/mongo/db/commands/drop_connections_command.cpp @@ -27,8 +27,6 @@ * it in the license file. */ -#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand - #include "mongo/platform/basic.h" #include "mongo/db/auth/authorization_session.h" diff --git a/src/mongo/db/commands/drop_indexes.cpp b/src/mongo/db/commands/drop_indexes.cpp index f89b61fc726..0d3f7927711 100644 --- a/src/mongo/db/commands/drop_indexes.cpp +++ b/src/mongo/db/commands/drop_indexes.cpp @@ -127,7 +127,7 @@ public: const NamespaceString toReIndexNss = CommandHelpers::parseNsCollectionRequired(dbname, jsobj); - LOGV2(20457, "CMD: reIndex {toReIndexNss}", "toReIndexNss"_attr = toReIndexNss); + LOGV2(20457, "CMD: reIndex {namespace}", "CMD reIndex", "namespace"_attr = toReIndexNss); AutoGetCollection autoColl(opCtx, toReIndexNss, MODE_X); Collection* collection = autoColl.getCollection(); @@ -218,7 +218,7 @@ public: }); if (MONGO_unlikely(reIndexCrashAfterDrop.shouldFail())) { - LOGV2(20458, "exiting because 'reIndexCrashAfterDrop' fail point was set"); + LOGV2(20458, "Exiting because 'reIndexCrashAfterDrop' fail point was set"); quickExit(EXIT_ABRUPT); } diff --git a/src/mongo/db/commands/fail_point_cmd.cpp b/src/mongo/db/commands/fail_point_cmd.cpp index 28d887b1f47..47ce52bf3f1 100644 --- a/src/mongo/db/commands/fail_point_cmd.cpp +++ b/src/mongo/db/commands/fail_point_cmd.cpp @@ -27,8 +27,6 @@ * it in the license file. */ -#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand - #include <vector> #include "mongo/base/init.h" diff --git a/src/mongo/db/commands/feature_compatibility_version.cpp b/src/mongo/db/commands/feature_compatibility_version.cpp index 00612f4b20b..66e645f4a69 100644 --- a/src/mongo/db/commands/feature_compatibility_version.cpp +++ b/src/mongo/db/commands/feature_compatibility_version.cpp @@ -154,15 +154,17 @@ void FeatureCompatibilityVersion::onInsertOrUpdate(OperationContext* opCtx, cons // To avoid extra log messages when the targetVersion is set/unset, only log when the version // changes. - bool isDifferent = serverGlobalParams.featureCompatibility.isVersionInitialized() - ? serverGlobalParams.featureCompatibility.getVersion() != newVersion - : true; + logv2::DynamicAttributes attrs; + bool isDifferent = true; + if (serverGlobalParams.featureCompatibility.isVersionInitialized()) { + const auto currentVersion = serverGlobalParams.featureCompatibility.getVersion(); + attrs.add("currentVersion", FeatureCompatibilityVersionParser::toString(currentVersion)); + isDifferent = currentVersion != newVersion; + } + if (isDifferent) { - LOGV2( - 20459, - "setting featureCompatibilityVersion to {FeatureCompatibilityVersionParser_newVersion}", - "FeatureCompatibilityVersionParser_newVersion"_attr = - FeatureCompatibilityVersionParser::toString(newVersion)); + attrs.add("newVersion", FeatureCompatibilityVersionParser::toString(newVersion)); + LOGV2(20459, "Setting featureCompatibilityVersion", attrs); } opCtx->recoveryUnit()->onCommit( @@ -208,9 +210,9 @@ void FeatureCompatibilityVersion::_setVersion( if (newVersion != ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44) { if (MONGO_unlikely(hangBeforeAbortingRunningTransactionsOnFCVDowngrade.shouldFail())) { LOGV2(20460, - "featureCompatibilityVersion - " - "hangBeforeAbortingRunningTransactionsOnFCVDowngrade fail point enabled. " - "Blocking until fail point is disabled."); + "FeatureCompatibilityVersion - " + "hangBeforeAbortingRunningTransactionsOnFCVDowngrade fail point enabled, " + "blocking until fail point is disabled"); hangBeforeAbortingRunningTransactionsOnFCVDowngrade.pauseWhileSet(); } // Abort all open transactions when downgrading the featureCompatibilityVersion. diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp index 153adac284b..e63017d8395 100644 --- a/src/mongo/db/commands/find_and_modify.cpp +++ b/src/mongo/db/commands/find_and_modify.cpp @@ -98,13 +98,13 @@ boost::optional<BSONObj> advanceExecutor(OperationContext* opCtx, // We should always have a valid status member object at this point. auto status = WorkingSetCommon::getMemberObjectStatus(value); invariant(!status.isOK()); - LOGV2_WARNING(23802, - "Plan executor error during findAndModify: {PlanExecutor_statestr_state}, " - "status: {status}, stats: {Explain_getWinningPlanStats_exec}", - "PlanExecutor_statestr_state"_attr = PlanExecutor::statestr(state), - "status"_attr = status, - "Explain_getWinningPlanStats_exec"_attr = - redact(Explain::getWinningPlanStats(exec))); + LOGV2_WARNING( + 23802, + "Plan executor error during findAndModify: {state}, status: {error}, stats: {stats}", + "Plan executor error during findAndModify", + "state"_attr = PlanExecutor::statestr(state), + "error"_attr = status, + "stats"_attr = redact(Explain::getWinningPlanStats(exec))); uassertStatusOKWithContext(status, "Plan executor error during findAndModify"); MONGO_UNREACHABLE; diff --git a/src/mongo/db/commands/find_cmd.cpp b/src/mongo/db/commands/find_cmd.cpp index 8c944fc515e..6a410c0c1d3 100644 --- a/src/mongo/db/commands/find_cmd.cpp +++ b/src/mongo/db/commands/find_cmd.cpp @@ -537,14 +537,13 @@ public: // We should always have a valid status member object at this point. auto status = WorkingSetCommon::getMemberObjectStatus(doc); invariant(!status.isOK()); - LOGV2_WARNING( - 23798, - "Plan executor error during find command: {PlanExecutor_statestr_state}, " - "status: {status}, stats: {Explain_getWinningPlanStats_exec_get}", - "PlanExecutor_statestr_state"_attr = PlanExecutor::statestr(state), - "status"_attr = status, - "Explain_getWinningPlanStats_exec_get"_attr = - redact(Explain::getWinningPlanStats(exec.get()))); + LOGV2_WARNING(23798, + "Plan executor error during find command: {state}, status: {error}, " + "stats: {stats}", + "Plan executor error during find command", + "state"_attr = PlanExecutor::statestr(state), + "error"_attr = status, + "stats"_attr = redact(Explain::getWinningPlanStats(exec.get()))); uassertStatusOK(status.withContext("Executor error during find command")); } diff --git a/src/mongo/db/commands/fsync.cpp b/src/mongo/db/commands/fsync.cpp index 4af2fe9559f..4721170e89f 100644 --- a/src/mongo/db/commands/fsync.cpp +++ b/src/mongo/db/commands/fsync.cpp @@ -136,7 +136,7 @@ public: } const bool lock = cmdObj["lock"].trueValue(); - LOGV2(20461, "CMD fsync: lock:{lock}", "lock"_attr = lock); + LOGV2(20461, "CMD fsync: lock:{lock}", "CMD fsync", "lock"_attr = lock); // fsync + lock is sometimes used to block writes out of the system and does not care if // the `BackupCursorService::fsyncLock` call succeeds. @@ -183,17 +183,18 @@ public: if (!status.isOK()) { releaseLock(); LOGV2_WARNING(20468, - "fsyncLock failed. Lock count reset to 0. Status: {status}", - "status"_attr = status); + "fsyncLock failed. Lock count reset to 0. Status: {error}", + "error"_attr = status); uassertStatusOK(status); } } - LOGV2(20462, "mongod is locked and no writes are allowed. db.fsyncUnlock() to unlock"); - LOGV2(20463, "Lock count is {getLockCount}", "getLockCount"_attr = getLockCount()); - LOGV2(20464, - " For more info see {FSyncCommand_url}", - "FSyncCommand_url"_attr = FSyncCommand::url()); + LOGV2(20462, + "mongod is locked and no writes are allowed. db.fsyncUnlock() to unlock, " + "lock count is {lockCount}, for more info see {seeAlso}", + "mongod is locked and no writes are allowed", + "lockCount"_attr = getLockCount(), + "seeAlso"_attr = FSyncCommand::url()); result.append("info", "now locked against writes, use db.fsyncUnlock() to unlock"); result.append("lockCount", getLockCount()); result.append("seeAlso", FSyncCommand::url()); @@ -361,7 +362,7 @@ void FSyncLockThread::run() { registerShutdownTask([&] { stdx::unique_lock<Latch> stateLock(fsyncCmd.lockStateMutex); if (fsyncCmd.getLockCount_inLock() > 0) { - LOGV2_WARNING(20469, "Interrupting fsync because the server is shutting down."); + LOGV2_WARNING(20469, "Interrupting fsync because the server is shutting down"); while (fsyncCmd.getLockCount_inLock()) { // Relies on the lock to be released in 'releaseLock_inLock()' when the // release brings the lock count to 0. @@ -375,7 +376,10 @@ void FSyncLockThread::run() { try { storageEngine->flushAllFiles(&opCtx, /*callerHoldsReadLock*/ true); } catch (const std::exception& e) { - LOGV2_ERROR(20472, "error doing flushAll: {e_what}", "e_what"_attr = e.what()); + LOGV2_ERROR(20472, + "Error doing flushAll: {error}", + "Error doing flushAll", + "error"_attr = e.what()); fsyncCmd.threadStatus = Status(ErrorCodes::CommandFailed, e.what()); fsyncCmd.acquireFsyncLockSyncCV.notify_one(); return; @@ -401,12 +405,16 @@ void FSyncLockThread::run() { }); } catch (const DBException& e) { if (_allowFsyncFailure) { - LOGV2_WARNING(20470, - "Locking despite storage engine being unable to begin backup : {e}", - "e"_attr = e.toString()); + LOGV2_WARNING( + 20470, + "Locking despite storage engine being unable to begin backup: {error}", + "Locking despite storage engine being unable to begin backup", + "error"_attr = e); } else { - LOGV2_ERROR( - 20473, "storage engine unable to begin backup : {e}", "e"_attr = e.toString()); + LOGV2_ERROR(20473, + "Storage engine unable to begin backup: {error}", + "Storage engine unable to begin backup", + "error"_attr = e); fsyncCmd.threadStatus = e.toStatus(); fsyncCmd.acquireFsyncLockSyncCV.notify_one(); return; @@ -433,7 +441,10 @@ void FSyncLockThread::run() { } } catch (const std::exception& e) { - LOGV2_FATAL(40350, "FSyncLockThread exception: {e_what}", "e_what"_attr = e.what()); + LOGV2_FATAL(40350, + "FSyncLockThread exception: {error}", + "FSyncLockThread exception", + "error"_attr = e.what()); } } diff --git a/src/mongo/db/commands/generic.cpp b/src/mongo/db/commands/generic.cpp index 2595c0e2ca2..0296b180f05 100644 --- a/src/mongo/db/commands/generic.cpp +++ b/src/mongo/db/commands/generic.cpp @@ -27,8 +27,6 @@ * it in the license file. */ -#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand - #include "mongo/platform/basic.h" #include "mongo/bson/util/bson_extract.h" diff --git a/src/mongo/db/commands/get_last_error.cpp b/src/mongo/db/commands/get_last_error.cpp index f8df8317b55..6c44a68e520 100644 --- a/src/mongo/db/commands/get_last_error.cpp +++ b/src/mongo/db/commands/get_last_error.cpp @@ -275,10 +275,10 @@ public: if (electionId != repl::ReplicationCoordinator::get(opCtx)->getElectionId()) { LOGV2_DEBUG(20476, 3, - "oid passed in is {electionId}, but our id is " - "{repl_ReplicationCoordinator_get_opCtx_getElectionId}", - "electionId"_attr = electionId, - "repl_ReplicationCoordinator_get_opCtx_getElectionId"_attr = + "OID passed in is {passedOID}, but our id is {ourOID}", + "OID mismatch during election", + "passedOID"_attr = electionId, + "ourOID"_attr = repl::ReplicationCoordinator::get(opCtx)->getElectionId()); errmsg = "election occurred after write"; result.append("code", ErrorCodes::WriteConcernFailed); diff --git a/src/mongo/db/commands/getmore_cmd.cpp b/src/mongo/db/commands/getmore_cmd.cpp index 58623cd173a..181c9511806 100644 --- a/src/mongo/db/commands/getmore_cmd.cpp +++ b/src/mongo/db/commands/getmore_cmd.cpp @@ -328,14 +328,13 @@ public: auto status = WorkingSetCommon::getMemberObjectStatus(doc); invariant(!status.isOK()); // Log an error message and then perform the cleanup. - LOGV2_WARNING(20478, - "GetMore command executor error: {PlanExecutor_statestr_state}, " - "status: {status}, stats: {Explain_getWinningPlanStats_exec}", - "PlanExecutor_statestr_state"_attr = - PlanExecutor::statestr(*state), - "status"_attr = status, - "Explain_getWinningPlanStats_exec"_attr = - redact(Explain::getWinningPlanStats(exec))); + LOGV2_WARNING( + 20478, + "getMore command executor error: {state}, status: {error}, stats: {stats}", + "getMore command executor error", + "state"_attr = PlanExecutor::statestr(*state), + "error"_attr = status, + "stats"_attr = redact(Explain::getWinningPlanStats(exec))); nextBatch->abandon(); return status; @@ -420,7 +419,7 @@ public: if (MONGO_unlikely(GetMoreHangBeforeReadLock.shouldFail())) { LOGV2(20477, "GetMoreHangBeforeReadLock fail point enabled. Blocking until fail " - "point is disabled."); + "point is disabled"); GetMoreHangBeforeReadLock.pauseWhileSet(opCtx); } diff --git a/src/mongo/db/commands/http_client.cpp b/src/mongo/db/commands/http_client.cpp index 7f55cecf15f..1bdc6116fd9 100644 --- a/src/mongo/db/commands/http_client.cpp +++ b/src/mongo/db/commands/http_client.cpp @@ -27,8 +27,6 @@ * it in the license file. */ -#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kNetwork - #include "mongo/platform/basic.h" #include "mongo/base/init.h" @@ -38,7 +36,6 @@ #include "mongo/db/commands/http_client_gen.h" #include "mongo/db/commands/test_commands_enabled.h" #include "mongo/db/operation_context.h" -#include "mongo/logv2/log.h" #include "mongo/util/net/hostandport.h" #include "mongo/util/net/http_client.h" diff --git a/src/mongo/db/commands/index_filter_commands.cpp b/src/mongo/db/commands/index_filter_commands.cpp index f30317364ce..7c6cada4787 100644 --- a/src/mongo/db/commands/index_filter_commands.cpp +++ b/src/mongo/db/commands/index_filter_commands.cpp @@ -265,8 +265,9 @@ Status ClearFilters::clear(OperationContext* opCtx, planCache->remove(*cq).transitional_ignore(); LOGV2(20479, - "Removed index filter on {cq_Short}", - "cq_Short"_attr = redact(cq->toStringShort())); + "Removed index filter on {query}", + "Removed index filter on query", + "query"_attr = redact(cq->toStringShort())); return Status::OK(); } @@ -322,7 +323,10 @@ Status ClearFilters::clear(OperationContext* opCtx, planCache->remove(*cq).transitional_ignore(); } - LOGV2(20480, "Removed all index filters for collection: {ns}", "ns"_attr = ns); + LOGV2(20480, + "Removed all index filters for collection: {namespace}", + "Removed all index filters for collection", + "namespace"_attr = ns); return Status::OK(); } @@ -400,9 +404,10 @@ Status SetFilter::set(OperationContext* opCtx, planCache->remove(*cq).transitional_ignore(); LOGV2(20481, - "Index filter set on {cq_Short} {indexesElt}", - "cq_Short"_attr = redact(cq->toStringShort()), - "indexesElt"_attr = indexesElt); + "Index filter set on {query} {indexes}", + "Index filter set on query", + "query"_attr = redact(cq->toStringShort()), + "indexes"_attr = indexesElt); return Status::OK(); } diff --git a/src/mongo/db/commands/kill_all_sessions_by_pattern_command.cpp b/src/mongo/db/commands/kill_all_sessions_by_pattern_command.cpp index 93e0037e7a7..c7687db6782 100644 --- a/src/mongo/db/commands/kill_all_sessions_by_pattern_command.cpp +++ b/src/mongo/db/commands/kill_all_sessions_by_pattern_command.cpp @@ -27,8 +27,6 @@ * it in the license file. */ -#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand - #include "mongo/platform/basic.h" #include "mongo/base/init.h" diff --git a/src/mongo/db/commands/kill_all_sessions_command.cpp b/src/mongo/db/commands/kill_all_sessions_command.cpp index 81ac726c642..27c201e6b1b 100644 --- a/src/mongo/db/commands/kill_all_sessions_command.cpp +++ b/src/mongo/db/commands/kill_all_sessions_command.cpp @@ -27,8 +27,6 @@ * it in the license file. */ -#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand - #include "mongo/platform/basic.h" #include "mongo/base/init.h" diff --git a/src/mongo/db/commands/kill_op.cpp b/src/mongo/db/commands/kill_op.cpp index 3e5aad9883b..0f792e21f09 100644 --- a/src/mongo/db/commands/kill_op.cpp +++ b/src/mongo/db/commands/kill_op.cpp @@ -58,7 +58,7 @@ public: // Used by tests to check if auth checks passed. result.append("info", "attempting to kill op"); - LOGV2(20482, "going to kill op: {opId}", "opId"_attr = opId); + LOGV2(20482, "Going to kill op: {opId}", "Going to kill op", "opId"_attr = opId); KillOpCmdBase::killLocalOperation(opCtx, opId); // killOp always reports success once past the auth check. diff --git a/src/mongo/db/commands/kill_sessions_command.cpp b/src/mongo/db/commands/kill_sessions_command.cpp index debba8b956c..41e3596938f 100644 --- a/src/mongo/db/commands/kill_sessions_command.cpp +++ b/src/mongo/db/commands/kill_sessions_command.cpp @@ -27,8 +27,6 @@ * it in the license file. */ -#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand - #include "mongo/platform/basic.h" #include "mongo/base/init.h" diff --git a/src/mongo/db/commands/killoperations_cmd.cpp b/src/mongo/db/commands/killoperations_cmd.cpp index fd01fd1b4b5..0d5128f11f0 100644 --- a/src/mongo/db/commands/killoperations_cmd.cpp +++ b/src/mongo/db/commands/killoperations_cmd.cpp @@ -43,9 +43,8 @@ public: auto status = cursorManager->killCursor(opCtx, cursorId, true /* shouldAudit */); if (!status.isOK()) { - LOGV2(4664803, - "Failed to kill the cursor ", - "status"_attr = redact(status.toString())); + LOGV2( + 4664803, "Failed to kill the cursor", "error"_attr = redact(status.toString())); } else { LOGV2(4664804, "Killed cursor", "cursorId"_attr = cursorId); } diff --git a/src/mongo/db/commands/parameters.cpp b/src/mongo/db/commands/parameters.cpp index 2bd6327d7e7..d6ade3f7a18 100644 --- a/src/mongo/db/commands/parameters.cpp +++ b/src/mongo/db/commands/parameters.cpp @@ -360,23 +360,26 @@ public: uassertStatusOK(foundParameter->second->set(parameter)); } catch (const DBException& ex) { LOGV2(20496, - "error setting parameter {parameterName} to {newValue} errMsg: {ex}", + "Error setting parameter {parameterName} to {newValue} errMsg: {error}", + "Error setting parameter to new value", "parameterName"_attr = parameterName, "newValue"_attr = redact(parameter.toString(false)), - "ex"_attr = redact(ex)); + "error"_attr = redact(ex)); throw; } if (oldValue) { LOGV2(23435, - "successfully set parameter {parameterName} to {newValue} (was " + "Successfully set parameter {parameterName} to {newValue} (was " "{oldValue})", + "Successfully set parameter to new value", "parameterName"_attr = parameterName, "newValue"_attr = redact(parameter.toString(false)), "oldValue"_attr = redact(oldValue.toString(false))); } else { LOGV2(23436, - "successfully set parameter {parameterName} to {newValue}", + "Successfully set parameter {parameterName} to {newValue}", + "Successfully set parameter to new value", "parameterName"_attr = parameterName, "newValue"_attr = redact(parameter.toString(false))); } diff --git a/src/mongo/db/commands/parse_log_component_settings.cpp b/src/mongo/db/commands/parse_log_component_settings.cpp index ce55af6cc79..5c741369904 100644 --- a/src/mongo/db/commands/parse_log_component_settings.cpp +++ b/src/mongo/db/commands/parse_log_component_settings.cpp @@ -27,8 +27,6 @@ * it in the license file. */ -#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault - #include "mongo/platform/basic.h" #include "mongo/db/commands/parse_log_component_settings.h" diff --git a/src/mongo/db/commands/plan_cache_clear_command.cpp b/src/mongo/db/commands/plan_cache_clear_command.cpp index e03f39a46cf..490af16b497 100644 --- a/src/mongo/db/commands/plan_cache_clear_command.cpp +++ b/src/mongo/db/commands/plan_cache_clear_command.cpp @@ -81,28 +81,27 @@ Status clear(OperationContext* opCtx, invariant(result.code() == ErrorCodes::NoSuchKey); LOGV2_DEBUG(23906, 1, - "{ns}: query shape doesn't exist in PlanCache - {cq_getQueryObj}(sort: " - "{cq_getQueryRequest_getSort}; projection: {cq_getQueryRequest_getProj}; " - "collation: {cq_getQueryRequest_getCollation})", - "ns"_attr = ns, - "cq_getQueryObj"_attr = redact(cq->getQueryObj()), - "cq_getQueryRequest_getSort"_attr = cq->getQueryRequest().getSort(), - "cq_getQueryRequest_getProj"_attr = cq->getQueryRequest().getProj(), - "cq_getQueryRequest_getCollation"_attr = - cq->getQueryRequest().getCollation()); + "{namespace}: Query shape doesn't exist in PlanCache - {query}" + "(sort: {sort}; projection: {projection}; collation: {collation})", + "Query shape doesn't exist in PlanCache", + "namespace"_attr = ns, + "query"_attr = redact(cq->getQueryObj()), + "sort"_attr = cq->getQueryRequest().getSort(), + "projection"_attr = cq->getQueryRequest().getProj(), + "collation"_attr = cq->getQueryRequest().getCollation()); return Status::OK(); } LOGV2_DEBUG(23907, 1, - "{ns}: removed plan cache entry - {cq_getQueryObj}(sort: " - "{cq_getQueryRequest_getSort}; projection: {cq_getQueryRequest_getProj}; " - "collation: {cq_getQueryRequest_getCollation})", - "ns"_attr = ns, - "cq_getQueryObj"_attr = redact(cq->getQueryObj()), - "cq_getQueryRequest_getSort"_attr = cq->getQueryRequest().getSort(), - "cq_getQueryRequest_getProj"_attr = cq->getQueryRequest().getProj(), - "cq_getQueryRequest_getCollation"_attr = cq->getQueryRequest().getCollation()); + "{namespace}: Removed plan cache entry - {query}" + "(sort: {sort}; projection: {projection}; collation: {collation})", + "Removed plan cache entry", + "namespace"_attr = ns, + "query"_attr = redact(cq->getQueryObj()), + "sort"_attr = cq->getQueryRequest().getSort(), + "projection"_attr = cq->getQueryRequest().getProj(), + "collation"_attr = cq->getQueryRequest().getCollation()); return Status::OK(); } @@ -117,7 +116,8 @@ Status clear(OperationContext* opCtx, planCache->clear(); - LOGV2_DEBUG(23908, 1, "{ns}: cleared plan cache", "ns"_attr = ns); + LOGV2_DEBUG( + 23908, 1, "{namespace}: Cleared plan cache", "Cleared plan cache", "namespace"_attr = ns); return Status::OK(); } diff --git a/src/mongo/db/commands/resize_oplog.cpp b/src/mongo/db/commands/resize_oplog.cpp index 83e54384a30..b7d88507f66 100644 --- a/src/mongo/db/commands/resize_oplog.cpp +++ b/src/mongo/db/commands/resize_oplog.cpp @@ -109,7 +109,7 @@ public: wunit.commit(); LOGV2(20497, - "replSetResizeOplog success.", + "replSetResizeOplog success", "size"_attr = DurableCatalog::get(opCtx) ->getCollectionOptions(opCtx, coll->getCatalogId()) .cappedSize, diff --git a/src/mongo/db/commands/run_aggregate.cpp b/src/mongo/db/commands/run_aggregate.cpp index f01e9ac533d..362864fe3db 100644 --- a/src/mongo/db/commands/run_aggregate.cpp +++ b/src/mongo/db/commands/run_aggregate.cpp @@ -201,13 +201,13 @@ bool handleCursorCommand(OperationContext* opCtx, // We should always have a valid status member object at this point. auto status = WorkingSetCommon::getMemberObjectStatus(nextDoc); invariant(!status.isOK()); - LOGV2_WARNING(23799, - "Aggregate command executor error: {PlanExecutor_statestr_state}, " - "status: {status}, stats: {Explain_getWinningPlanStats_exec}", - "PlanExecutor_statestr_state"_attr = PlanExecutor::statestr(state), - "status"_attr = status, - "Explain_getWinningPlanStats_exec"_attr = - redact(Explain::getWinningPlanStats(exec))); + LOGV2_WARNING( + 23799, + "Aggregate command executor error: {state}, status: {error}, stats: {stats}", + "Aggregate command executor error", + "state"_attr = PlanExecutor::statestr(state), + "error"_attr = status, + "stats"_attr = redact(Explain::getWinningPlanStats(exec))); uassertStatusOK(status.withContext("PlanExecutor error during aggregation")); } diff --git a/src/mongo/db/commands/rwc_defaults_commands.cpp b/src/mongo/db/commands/rwc_defaults_commands.cpp index 528157077a3..5398c4dec99 100644 --- a/src/mongo/db/commands/rwc_defaults_commands.cpp +++ b/src/mongo/db/commands/rwc_defaults_commands.cpp @@ -125,8 +125,9 @@ public: updatePersistedDefaultRWConcernDocument(opCtx, newDefaults); LOGV2(20498, - "successfully set RWC defaults to {newDefaults}", - "newDefaults"_attr = newDefaults.toBSON()); + "Successfully set RWC defaults to {value}", + "Successfully set RWC defaults", + "value"_attr = newDefaults); // Refresh to populate the cache with the latest defaults. rwcDefaults.refreshIfNecessary(opCtx); diff --git a/src/mongo/db/commands/server_status.cpp b/src/mongo/db/commands/server_status.cpp index d4cf9d14f49..4fc7aeba20a 100644 --- a/src/mongo/db/commands/server_status.cpp +++ b/src/mongo/db/commands/server_status.cpp @@ -145,7 +145,10 @@ public: timeBuilder.appendNumber("at end", durationCount<Milliseconds>(runElapsed)); if (runElapsed > Milliseconds(1000)) { BSONObj t = timeBuilder.obj(); - LOGV2(20499, "serverStatus was very slow: {t}", "t"_attr = t); + LOGV2(20499, + "serverStatus was very slow: {timeStats}", + "serverStatus was very slow", + "timeStats"_attr = t); bool include_timing = true; const auto& elem = cmdObj[kTimingSection]; diff --git a/src/mongo/db/commands/server_status_servers.cpp b/src/mongo/db/commands/server_status_servers.cpp index 18db05de53f..c208ae96e36 100644 --- a/src/mongo/db/commands/server_status_servers.cpp +++ b/src/mongo/db/commands/server_status_servers.cpp @@ -27,8 +27,6 @@ * it in the license file. */ -#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand - #include "mongo/platform/basic.h" #include "mongo/config.h" diff --git a/src/mongo/db/commands/set_index_commit_quorum_command.cpp b/src/mongo/db/commands/set_index_commit_quorum_command.cpp index bb37d8f2e54..35a0c45122b 100644 --- a/src/mongo/db/commands/set_index_commit_quorum_command.cpp +++ b/src/mongo/db/commands/set_index_commit_quorum_command.cpp @@ -27,8 +27,6 @@ * it in the license file. */ -#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand - #include "mongo/platform/basic.h" #include <iostream> diff --git a/src/mongo/db/commands/sleep_command.cpp b/src/mongo/db/commands/sleep_command.cpp index 8e62d09d2b4..625dab44b37 100644 --- a/src/mongo/db/commands/sleep_command.cpp +++ b/src/mongo/db/commands/sleep_command.cpp @@ -118,7 +118,7 @@ public: const std::string& ns, const BSONObj& cmdObj, BSONObjBuilder& result) { - LOGV2(20504, "test only command sleep invoked"); + LOGV2(20504, "Test-only command 'sleep' invoked"); long long msToSleep = 0; if (cmdObj["secs"] || cmdObj["seconds"] || cmdObj["millis"]) { diff --git a/src/mongo/db/commands/snapshot_management.cpp b/src/mongo/db/commands/snapshot_management.cpp index acf3f6e876e..be2e432c243 100644 --- a/src/mongo/db/commands/snapshot_management.cpp +++ b/src/mongo/db/commands/snapshot_management.cpp @@ -27,8 +27,6 @@ * it in the license file. */ -#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage - #include "mongo/platform/basic.h" #include "mongo/base/init.h" diff --git a/src/mongo/db/commands/test_commands.cpp b/src/mongo/db/commands/test_commands.cpp index 00e71efa209..e478a6bca44 100644 --- a/src/mongo/db/commands/test_commands.cpp +++ b/src/mongo/db/commands/test_commands.cpp @@ -81,8 +81,9 @@ public: BSONObjBuilder& result) { const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbname, cmdObj)); LOGV2(20505, - "test only command godinsert invoked coll:{nss_coll}", - "nss_coll"_attr = nss.coll()); + "Test-only command 'godinsert' invoked coll:{collection}", + "Test-only command 'godinsert' invoked", + "collection"_attr = nss.coll()); BSONObj obj = cmdObj["obj"].embeddedObjectUserCheck(); Lock::DBLock lk(opCtx, dbname, MODE_X); diff --git a/src/mongo/db/commands/txn_cmds.cpp b/src/mongo/db/commands/txn_cmds.cpp index 9f179dc1c17..29e76b6ffce 100644 --- a/src/mongo/db/commands/txn_cmds.cpp +++ b/src/mongo/db/commands/txn_cmds.cpp @@ -98,9 +98,10 @@ public: LOGV2_DEBUG(20507, 3, "Received commitTransaction for transaction with txnNumber " - "{opCtx_getTxnNumber} on session {opCtx_getLogicalSessionId}", - "opCtx_getTxnNumber"_attr = opCtx->getTxnNumber(), - "opCtx_getLogicalSessionId"_attr = opCtx->getLogicalSessionId()->toBSON()); + "{txnNumber} on session {sessionId}", + "Received commitTransaction", + "txnNumber"_attr = opCtx->getTxnNumber(), + "sessionId"_attr = opCtx->getLogicalSessionId()->toBSON()); // commitTransaction is retryable. if (txnParticipant.transactionIsCommitted()) { @@ -205,10 +206,11 @@ public: LOGV2_DEBUG(20508, 3, - "Received abortTransaction for transaction with txnNumber {opCtx_getTxnNumber} " - "on session {opCtx_getLogicalSessionId}", - "opCtx_getTxnNumber"_attr = opCtx->getTxnNumber(), - "opCtx_getLogicalSessionId"_attr = opCtx->getLogicalSessionId()->toBSON()); + "Received abortTransaction for transaction with txnNumber {txnNumber} " + "on session {sessionId}", + "Received abortTransaction", + "txnNumber"_attr = opCtx->getTxnNumber(), + "sessionId"_attr = opCtx->getLogicalSessionId()->toBSON()); uassert(ErrorCodes::NoSuchTransaction, "Transaction isn't in progress", diff --git a/src/mongo/db/commands/user_management_commands.cpp b/src/mongo/db/commands/user_management_commands.cpp index b16a84b8999..ab52778e48c 100644 --- a/src/mongo/db/commands/user_management_commands.cpp +++ b/src/mongo/db/commands/user_management_commands.cpp @@ -570,7 +570,7 @@ public: } if (_authzManager->getCacheGeneration() == _cacheGeneration) { - LOGV2_DEBUG(20509, 1, "User management command did not invalidate the user cache."); + LOGV2_DEBUG(20509, 1, "User management command did not invalidate the user cache"); _authzManager->invalidateUserCache(_opCtx); } } @@ -2493,9 +2493,10 @@ public: // Match the behavior of mongorestore to continue on failure LOGV2_WARNING( 20510, - "Could not update user {userName} in _mergeAuthzCollections command: {status}", - "userName"_attr = userName, - "status"_attr = redact(status)); + "Could not update user {user} in _mergeAuthzCollections command: {error}", + "Could not update user during _mergeAuthzCollections command", + "user"_attr = userName, + "error"_attr = redact(status)); } } else { auditCreateOrUpdateUser(userObj, true); @@ -2504,9 +2505,10 @@ public: // Match the behavior of mongorestore to continue on failure LOGV2_WARNING( 20511, - "Could not insert user {userName} in _mergeAuthzCollections command: {status}", - "userName"_attr = userName, - "status"_attr = redact(status)); + "Could not insert user {user} in _mergeAuthzCollections command: {error}", + "Could not insert user during _mergeAuthzCollections command", + "user"_attr = userName, + "error"_attr = redact(status)); } } usersToDrop->erase(userName); @@ -2537,9 +2539,10 @@ public: // Match the behavior of mongorestore to continue on failure LOGV2_WARNING( 20512, - "Could not update role {roleName} in _mergeAuthzCollections command: {status}", - "roleName"_attr = roleName, - "status"_attr = redact(status)); + "Could not update role {role} in _mergeAuthzCollections command: {error}", + "Could not update role during _mergeAuthzCollections command", + "role"_attr = roleName, + "error"_attr = redact(status)); } } else { auditCreateOrUpdateRole(roleObj, true); @@ -2548,9 +2551,10 @@ public: // Match the behavior of mongorestore to continue on failure LOGV2_WARNING( 20513, - "Could not insert role {roleName} in _mergeAuthzCollections command: {status}", - "roleName"_attr = roleName, - "status"_attr = redact(status)); + "Could not insert role {role} in _mergeAuthzCollections command: {error}", + "Could not insert role during _mergeAuthzCollections command", + "role"_attr = roleName, + "error"_attr = redact(status)); } } rolesToDrop->erase(roleName); diff --git a/src/mongo/db/commands/user_management_commands_common.cpp b/src/mongo/db/commands/user_management_commands_common.cpp index 6fb7c160a2f..c2be6896ced 100644 --- a/src/mongo/db/commands/user_management_commands_common.cpp +++ b/src/mongo/db/commands/user_management_commands_common.cpp @@ -27,8 +27,6 @@ * it in the license file. */ -#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kAccessControl - #include "mongo/platform/basic.h" #include "mongo/db/commands/user_management_commands_common.h" diff --git a/src/mongo/db/commands/validate.cpp b/src/mongo/db/commands/validate.cpp index a5134c7b929..badb47a4852 100644 --- a/src/mongo/db/commands/validate.cpp +++ b/src/mongo/db/commands/validate.cpp @@ -129,7 +129,7 @@ public: if (background && storageGlobalParams.engine == "ephemeralForTest") { LOGV2(4775400, "ephemeralForTest does not support background validation, switching to " - "foreground validation."); + "foreground validation"); background = false; } diff --git a/src/mongo/db/commands/vote_commit_index_build_command.cpp b/src/mongo/db/commands/vote_commit_index_build_command.cpp index cdf50c95cb2..d9b2bfbde44 100644 --- a/src/mongo/db/commands/vote_commit_index_build_command.cpp +++ b/src/mongo/db/commands/vote_commit_index_build_command.cpp @@ -76,9 +76,10 @@ public: const auto& cmd = request(); LOGV2_DEBUG(3856208, 1, - "Received voteCommitIndexBuild request for index build: {builduuid}, from " - "host: {host} ", - "builduuid"_attr = cmd.getCommandParameter(), + "Received voteCommitIndexBuild request for index build: {buildUUID}, " + "from host: {host}", + "Received voteCommitIndexBuild request", + "buildUUID"_attr = cmd.getCommandParameter(), "host"_attr = cmd.getHostAndPort().toString()); auto voteStatus = IndexBuildsCoordinator::get(opCtx)->voteCommitIndexBuild( opCtx, cmd.getCommandParameter(), cmd.getHostAndPort()); diff --git a/src/mongo/db/commands/whats_my_sni_command.cpp b/src/mongo/db/commands/whats_my_sni_command.cpp index 5976904cb6f..c70d8b82a13 100644 --- a/src/mongo/db/commands/whats_my_sni_command.cpp +++ b/src/mongo/db/commands/whats_my_sni_command.cpp @@ -27,8 +27,6 @@ * it in the license file. */ -#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand - #include "mongo/db/commands.h" namespace mongo { |