summaryrefslogtreecommitdiff
path: root/src/mongo/db/commands
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/db/commands')
-rw-r--r--src/mongo/db/commands/authentication_commands.cpp28
-rw-r--r--src/mongo/db/commands/create_indexes.cpp68
-rw-r--r--src/mongo/db/commands/dbcheck.cpp3
-rw-r--r--src/mongo/db/commands/dbcommands_d.cpp11
-rw-r--r--src/mongo/db/commands/dbhash.cpp5
-rw-r--r--src/mongo/db/commands/drop_indexes.cpp5
-rw-r--r--src/mongo/db/commands/feature_compatibility_version.cpp15
-rw-r--r--src/mongo/db/commands/fsync.cpp43
-rw-r--r--src/mongo/db/commands/generic_servers.cpp3
-rw-r--r--src/mongo/db/commands/get_last_error.cpp10
-rw-r--r--src/mongo/db/commands/getmore_cmd.cpp17
-rw-r--r--src/mongo/db/commands/index_filter_commands.cpp12
-rw-r--r--src/mongo/db/commands/kill_op.cpp3
-rw-r--r--src/mongo/db/commands/mr.cpp51
-rw-r--r--src/mongo/db/commands/oplog_note.cpp3
-rw-r--r--src/mongo/db/commands/parameters.cpp8
-rw-r--r--src/mongo/db/commands/resize_oplog.cpp3
-rw-r--r--src/mongo/db/commands/rwc_defaults_commands.cpp5
-rw-r--r--src/mongo/db/commands/server_status.cpp3
-rw-r--r--src/mongo/db/commands/set_feature_compatibility_version_command.cpp9
-rw-r--r--src/mongo/db/commands/sleep_command.cpp3
-rw-r--r--src/mongo/db/commands/test_commands.cpp5
-rw-r--r--src/mongo/db/commands/traffic_recording_cmds.cpp8
-rw-r--r--src/mongo/db/commands/txn_cmds.cpp17
-rw-r--r--src/mongo/db/commands/user_management_commands.cpp31
-rw-r--r--src/mongo/db/commands/validate.cpp8
26 files changed, 258 insertions, 119 deletions
diff --git a/src/mongo/db/commands/authentication_commands.cpp b/src/mongo/db/commands/authentication_commands.cpp
index d51d9815d08..9ce82d61531 100644
--- a/src/mongo/db/commands/authentication_commands.cpp
+++ b/src/mongo/db/commands/authentication_commands.cpp
@@ -54,6 +54,7 @@
#include "mongo/db/commands/test_commands_enabled.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/stats/counters.h"
+#include "mongo/logv2/log.h"
#include "mongo/platform/random.h"
#include "mongo/rpc/metadata/client_metadata.h"
#include "mongo/rpc/metadata/client_metadata_ismaster.h"
@@ -116,8 +117,9 @@ Status _authenticateX509(OperationContext* opCtx, const UserName& user, const BS
if (!clientMetadata->getApplicationName().empty() ||
(driverName != "MongoDB Internal Client" &&
driverName != "NetworkInterfaceTL")) {
- warning() << "Client isn't a mongod or mongos, but is connecting with a "
- "certificate with cluster membership";
+ LOGV2_WARNING(20430,
+ "Client isn't a mongod or mongos, but is connecting with a "
+ "certificate with cluster membership");
}
}
@@ -256,7 +258,10 @@ bool CmdAuthenticate::run(OperationContext* opCtx,
CommandHelpers::handleMarkKillOnClientDisconnect(opCtx);
if (!serverGlobalParams.quiet.load()) {
mutablebson::Document cmdToLog(cmdObj, mutablebson::Document::kInPlaceDisabled);
- log() << " authenticate db: " << dbname << " " << cmdToLog;
+ LOGV2(20427,
+ " authenticate db: {dbname} {cmdToLog}",
+ "dbname"_attr = dbname,
+ "cmdToLog"_attr = cmdToLog);
}
std::string mechanism = cmdObj.getStringField("mechanism");
if (mechanism.empty()) {
@@ -285,8 +290,13 @@ bool CmdAuthenticate::run(OperationContext* opCtx,
if (!status.isOK()) {
if (!serverGlobalParams.quiet.load()) {
auto const client = opCtx->getClient();
- log() << "Failed to authenticate " << user << " from client " << client->getRemote()
- << " with mechanism " << mechanism << ": " << status;
+ LOGV2(20428,
+ "Failed to authenticate {user} from client {client_getRemote} with mechanism "
+ "{mechanism}: {status}",
+ "user"_attr = user,
+ "client_getRemote"_attr = client->getRemote(),
+ "mechanism"_attr = mechanism,
+ "status"_attr = status);
}
sleepmillis(saslGlobalParams.authFailedDelay.load());
if (status.code() == ErrorCodes::AuthenticationFailed) {
@@ -300,8 +310,12 @@ bool CmdAuthenticate::run(OperationContext* opCtx,
}
if (!serverGlobalParams.quiet.load()) {
- log() << "Successfully authenticated as principal " << user.getUser() << " on "
- << user.getDB() << " from client " << opCtx->getClient()->session()->remote();
+ LOGV2(20429,
+ "Successfully authenticated as principal {user_getUser} on {user_getDB} from client "
+ "{opCtx_getClient_session_remote}",
+ "user_getUser"_attr = user.getUser(),
+ "user_getDB"_attr = user.getDB(),
+ "opCtx_getClient_session_remote"_attr = opCtx->getClient()->session()->remote());
}
result.append("dbname", user.getDB());
diff --git a/src/mongo/db/commands/create_indexes.cpp b/src/mongo/db/commands/create_indexes.cpp
index ba8eb767daa..3ea9f94aea4 100644
--- a/src/mongo/db/commands/create_indexes.cpp
+++ b/src/mongo/db/commands/create_indexes.cpp
@@ -61,6 +61,7 @@
#include "mongo/db/s/database_sharding_state.h"
#include "mongo/db/server_options.h"
#include "mongo/db/views/view_catalog.h"
+#include "mongo/logv2/log.h"
#include "mongo/platform/compiler.h"
#include "mongo/s/shard_key_pattern.h"
#include "mongo/util/log.h"
@@ -413,8 +414,9 @@ BSONObj runCreateIndexesOnNewCollection(OperationContext* opCtx,
if (MONGO_unlikely(hangBeforeCreateIndexesCollectionCreate.shouldFail())) {
// Simulate a scenario where a conflicting collection creation occurs
// mid-index build.
- log() << "Hanging create collection due to failpoint "
- "'hangBeforeCreateIndexesCollectionCreate'";
+ LOGV2(20437,
+ "Hanging create collection due to failpoint "
+ "'hangBeforeCreateIndexesCollectionCreate'");
hangBeforeCreateIndexesCollectionCreate.pauseWhileSet();
}
@@ -557,7 +559,7 @@ bool runCreateIndexesWithCoordinator(OperationContext* opCtx,
auto protocol = IndexBuildsCoordinator::supportsTwoPhaseIndexBuild()
? IndexBuildProtocol::kTwoPhase
: IndexBuildProtocol::kSinglePhase;
- log() << "Registering index build: " << buildUUID;
+ LOGV2(20438, "Registering index build: {buildUUID}", "buildUUID"_attr = buildUUID);
ReplIndexBuildState::IndexCatalogStats stats;
IndexBuildsCoordinator::IndexBuildOptions indexBuildOptions = {commitQuorum};
@@ -568,17 +570,24 @@ bool runCreateIndexesWithCoordinator(OperationContext* opCtx,
auto deadline = opCtx->getDeadline();
// Date_t::max() means no deadline.
if (deadline == Date_t::max()) {
- log() << "Waiting for index build to complete: " << buildUUID;
+ LOGV2(20439,
+ "Waiting for index build to complete: {buildUUID}",
+ "buildUUID"_attr = buildUUID);
} else {
- log() << "Waiting for index build to complete: " << buildUUID
- << " (deadline: " << deadline << ")";
+ LOGV2(20440,
+ "Waiting for index build to complete: {buildUUID} (deadline: {deadline})",
+ "buildUUID"_attr = buildUUID,
+ "deadline"_attr = deadline);
}
// Throws on error.
try {
stats = buildIndexFuture.get(opCtx);
} catch (const ExceptionForCat<ErrorCategory::Interruption>& interruptionEx) {
- log() << "Index build interrupted: " << buildUUID << ": " << interruptionEx;
+ LOGV2(20441,
+ "Index build interrupted: {buildUUID}: {interruptionEx}",
+ "buildUUID"_attr = buildUUID,
+ "interruptionEx"_attr = interruptionEx);
hangBeforeIndexBuildAbortOnInterrupt.pauseWhileSet();
@@ -588,7 +597,9 @@ bool runCreateIndexesWithCoordinator(OperationContext* opCtx,
// background and will complete when this node receives a commitIndexBuild oplog
// entry from the new primary.
if (ErrorCodes::InterruptedDueToReplStateChange == interruptionEx.code()) {
- log() << "Index build continuing in background: " << buildUUID;
+ LOGV2(20442,
+ "Index build continuing in background: {buildUUID}",
+ "buildUUID"_attr = buildUUID);
throw;
}
@@ -621,18 +632,22 @@ bool runCreateIndexesWithCoordinator(OperationContext* opCtx,
Timestamp(),
str::stream() << "Index build interrupted: " << buildUUID << ": "
<< interruptionEx.toString());
- log() << "Index build aborted: " << buildUUID;
+ LOGV2(20443, "Index build aborted: {buildUUID}", "buildUUID"_attr = buildUUID);
throw;
} catch (const ExceptionForCat<ErrorCategory::NotMasterError>& ex) {
- log() << "Index build interrupted due to change in replication state: " << buildUUID
- << ": " << ex;
+ LOGV2(20444,
+ "Index build interrupted due to change in replication state: {buildUUID}: {ex}",
+ "buildUUID"_attr = buildUUID,
+ "ex"_attr = ex);
// The index build will continue to run in the background and will complete when this
// node receives a commitIndexBuild oplog entry from the new primary.
if (IndexBuildProtocol::kTwoPhase == protocol) {
- log() << "Index build continuing in background: " << buildUUID;
+ LOGV2(20445,
+ "Index build continuing in background: {buildUUID}",
+ "buildUUID"_attr = buildUUID);
throw;
}
@@ -644,24 +659,32 @@ bool runCreateIndexesWithCoordinator(OperationContext* opCtx,
Timestamp(),
str::stream() << "Index build interrupted due to change in replication state: "
<< buildUUID << ": " << ex.toString());
- log() << "Index build aborted due to NotMaster error: " << buildUUID;
+ LOGV2(20446,
+ "Index build aborted due to NotMaster error: {buildUUID}",
+ "buildUUID"_attr = buildUUID);
throw;
}
- log() << "Index build completed: " << buildUUID;
+ LOGV2(20447, "Index build completed: {buildUUID}", "buildUUID"_attr = buildUUID);
} catch (DBException& ex) {
// If the collection is dropped after the initial checks in this function (before the
// AutoStatsTracker is created), the IndexBuildsCoordinator (either startIndexBuild() or
// the the task running the index build) may return NamespaceNotFound. This is not
// considered an error and the command should return success.
if (ErrorCodes::NamespaceNotFound == ex.code()) {
- log() << "Index build failed: " << buildUUID << ": collection dropped: " << ns;
+ LOGV2(20448,
+ "Index build failed: {buildUUID}: collection dropped: {ns}",
+ "buildUUID"_attr = buildUUID,
+ "ns"_attr = ns);
return true;
}
// All other errors should be forwarded to the caller with index build information included.
- log() << "Index build failed: " << buildUUID << ": " << ex.toStatus();
+ LOGV2(20449,
+ "Index build failed: {buildUUID}: {ex_toStatus}",
+ "buildUUID"_attr = buildUUID,
+ "ex_toStatus"_attr = ex.toStatus());
ex.addContext(str::stream() << "Index build failed: " << buildUUID << ": Collection " << ns
<< " ( " << *collectionUUID << " )");
@@ -734,12 +757,13 @@ public:
}
if (shouldLogMessageOnAlreadyBuildingError) {
auto bsonElem = cmdObj.getField(kIndexesFieldName);
- log()
- << "Received a request to create indexes: '" << bsonElem
- << "', but found that at least one of the indexes is already being built, '"
- << ex.toStatus()
- << "'. This request will wait for the pre-existing index build to finish "
- "before proceeding.";
+ LOGV2(20450,
+ "Received a request to create indexes: '{bsonElem}', but found that at "
+ "least one of the indexes is already being built, '{ex_toStatus}'. This "
+ "request will wait for the pre-existing index build to finish "
+ "before proceeding.",
+ "bsonElem"_attr = bsonElem,
+ "ex_toStatus"_attr = ex.toStatus());
shouldLogMessageOnAlreadyBuildingError = false;
}
// Unset the response fields so we do not write duplicate fields.
diff --git a/src/mongo/db/commands/dbcheck.cpp b/src/mongo/db/commands/dbcheck.cpp
index c933dd6ad34..42cfdf10cd6 100644
--- a/src/mongo/db/commands/dbcheck.cpp
+++ b/src/mongo/db/commands/dbcheck.cpp
@@ -47,6 +47,7 @@
#include "mongo/db/repl/optime.h"
#include "mongo/util/background.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/log.h"
namespace mongo {
@@ -210,7 +211,7 @@ protected:
}
if (_done) {
- log() << "dbCheck terminated due to stepdown";
+ LOGV2(20451, "dbCheck terminated due to stepdown");
return;
}
}
diff --git a/src/mongo/db/commands/dbcommands_d.cpp b/src/mongo/db/commands/dbcommands_d.cpp
index aa82cdef7d3..8044d6abaef 100644
--- a/src/mongo/db/commands/dbcommands_d.cpp
+++ b/src/mongo/db/commands/dbcommands_d.cpp
@@ -86,6 +86,7 @@
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/stats/storage_stats.h"
#include "mongo/db/write_concern.h"
+#include "mongo/logv2/log.h"
#include "mongo/s/stale_exception.h"
#include "mongo/scripting/engine.h"
#include "mongo/util/fail_point.h"
@@ -280,7 +281,8 @@ public:
if (partialOk) {
break; // skipped chunk is probably on another shard
}
- log() << "should have chunk: " << n << " have:" << myn;
+ LOGV2(
+ 20452, "should have chunk: {n} have:{myn}", "n"_attr = n, "myn"_attr = myn);
dumpChunks(opCtx, nss.ns(), query, sort);
uassert(10040, "chunks out of order", n == myn);
}
@@ -315,7 +317,10 @@ public:
// RELOCKED
ctx.reset(new AutoGetCollectionForReadCommand(opCtx, nss));
} catch (const StaleConfigException&) {
- LOG(1) << "chunk metadata changed during filemd5, will retarget and continue";
+ LOGV2_DEBUG(
+ 20453,
+ 1,
+ "chunk metadata changed during filemd5, will retarget and continue");
break;
}
@@ -350,7 +355,7 @@ public:
q.sort(sort);
unique_ptr<DBClientCursor> c = client.query(NamespaceString(ns), q);
while (c->more()) {
- log() << c->nextSafe();
+ LOGV2(20454, "{c_nextSafe}", "c_nextSafe"_attr = c->nextSafe());
}
}
diff --git a/src/mongo/db/commands/dbhash.cpp b/src/mongo/db/commands/dbhash.cpp
index da181689560..7da824f9a10 100644
--- a/src/mongo/db/commands/dbhash.cpp
+++ b/src/mongo/db/commands/dbhash.cpp
@@ -50,6 +50,7 @@
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/storage/storage_engine.h"
#include "mongo/db/transaction_participant.h"
+#include "mongo/logv2/log.h"
#include "mongo/platform/mutex.h"
#include "mongo/util/log.h"
#include "mongo/util/md5.hpp"
@@ -370,7 +371,7 @@ private:
exec = InternalPlanner::collectionScan(
opCtx, nss.ns(), collection, PlanExecutor::NO_YIELD);
} else {
- log() << "can't find _id index for: " << nss;
+ LOGV2(20455, "can't find _id index for: {nss}", "nss"_attr = nss);
return "no _id _index";
}
@@ -386,7 +387,7 @@ private:
n++;
}
if (PlanExecutor::IS_EOF != state) {
- warning() << "error while hashing, db dropped? ns=" << nss;
+ LOGV2_WARNING(20456, "error while hashing, db dropped? ns={nss}", "nss"_attr = nss);
uasserted(34371,
"Plan executor error while running dbHash command: " +
WorkingSetCommon::toStatusString(c));
diff --git a/src/mongo/db/commands/drop_indexes.cpp b/src/mongo/db/commands/drop_indexes.cpp
index 033023ec710..58fee79a890 100644
--- a/src/mongo/db/commands/drop_indexes.cpp
+++ b/src/mongo/db/commands/drop_indexes.cpp
@@ -53,6 +53,7 @@
#include "mongo/db/service_context.h"
#include "mongo/db/storage/durable_catalog.h"
#include "mongo/db/views/view_catalog.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/log.h"
#include "mongo/util/quick_exit.h"
@@ -126,7 +127,7 @@ public:
const NamespaceString toReIndexNss =
CommandHelpers::parseNsCollectionRequired(dbname, jsobj);
- LOG(0) << "CMD: reIndex " << toReIndexNss;
+ LOGV2(20457, "CMD: reIndex {toReIndexNss}", "toReIndexNss"_attr = toReIndexNss);
AutoGetCollection autoColl(opCtx, toReIndexNss, MODE_X);
Collection* collection = autoColl.getCollection();
@@ -219,7 +220,7 @@ public:
}
if (MONGO_unlikely(reIndexCrashAfterDrop.shouldFail())) {
- log() << "exiting because 'reIndexCrashAfterDrop' fail point was set";
+ LOGV2(20458, "exiting because 'reIndexCrashAfterDrop' fail point was set");
quickExit(EXIT_ABRUPT);
}
diff --git a/src/mongo/db/commands/feature_compatibility_version.cpp b/src/mongo/db/commands/feature_compatibility_version.cpp
index c98457c1f86..0e9509623b6 100644
--- a/src/mongo/db/commands/feature_compatibility_version.cpp
+++ b/src/mongo/db/commands/feature_compatibility_version.cpp
@@ -52,6 +52,7 @@
#include "mongo/db/wire_version.h"
#include "mongo/db/write_concern_options.h"
#include "mongo/executor/egress_tag_closer_manager.h"
+#include "mongo/logv2/log.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/s/catalog_cache.h"
#include "mongo/s/grid.h"
@@ -158,8 +159,11 @@ void FeatureCompatibilityVersion::onInsertOrUpdate(OperationContext* opCtx, cons
? serverGlobalParams.featureCompatibility.getVersion() != newVersion
: true;
if (isDifferent) {
- log() << "setting featureCompatibilityVersion to "
- << FeatureCompatibilityVersionParser::toString(newVersion);
+ LOGV2(
+ 20459,
+ "setting featureCompatibilityVersion to {FeatureCompatibilityVersionParser_newVersion}",
+ "FeatureCompatibilityVersionParser_newVersion"_attr =
+ FeatureCompatibilityVersionParser::toString(newVersion));
}
// Remove term field of config document on downgrade.
@@ -190,9 +194,10 @@ void FeatureCompatibilityVersion::onInsertOrUpdate(OperationContext* opCtx, cons
if (newVersion != ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44) {
if (MONGO_unlikely(hangBeforeAbortingRunningTransactionsOnFCVDowngrade.shouldFail())) {
- log() << "featureCompatibilityVersion - "
- "hangBeforeAbortingRunningTransactionsOnFCVDowngrade fail point enabled. "
- "Blocking until fail point is disabled.";
+ LOGV2(20460,
+ "featureCompatibilityVersion - "
+ "hangBeforeAbortingRunningTransactionsOnFCVDowngrade fail point enabled. "
+ "Blocking until fail point is disabled.");
hangBeforeAbortingRunningTransactionsOnFCVDowngrade.pauseWhileSet();
}
// Abort all open transactions when downgrading the featureCompatibilityVersion.
diff --git a/src/mongo/db/commands/fsync.cpp b/src/mongo/db/commands/fsync.cpp
index 45ade956647..becd1c00ba6 100644
--- a/src/mongo/db/commands/fsync.cpp
+++ b/src/mongo/db/commands/fsync.cpp
@@ -49,6 +49,7 @@
#include "mongo/db/service_context.h"
#include "mongo/db/storage/backup_cursor_hooks.h"
#include "mongo/db/storage/storage_engine.h"
+#include "mongo/logv2/log.h"
#include "mongo/stdx/condition_variable.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/background.h"
@@ -134,7 +135,7 @@ public:
}
const bool lock = cmdObj["lock"].trueValue();
- log() << "CMD fsync: lock:" << lock;
+ LOGV2(20461, "CMD fsync: lock:{lock}", "lock"_attr = lock);
// fsync + lock is sometimes used to block writes out of the system and does not care if
// the `BackupCursorService::fsyncLock` call succeeds.
@@ -180,14 +181,18 @@ public:
if (!status.isOK()) {
releaseLock();
- warning() << "fsyncLock failed. Lock count reset to 0. Status: " << status;
+ LOGV2_WARNING(20468,
+ "fsyncLock failed. Lock count reset to 0. Status: {status}",
+ "status"_attr = status);
uassertStatusOK(status);
}
}
- log() << "mongod is locked and no writes are allowed. db.fsyncUnlock() to unlock";
- log() << "Lock count is " << getLockCount();
- log() << " For more info see " << FSyncCommand::url();
+ LOGV2(20462, "mongod is locked and no writes are allowed. db.fsyncUnlock() to unlock");
+ LOGV2(20463, "Lock count is {getLockCount}", "getLockCount"_attr = getLockCount());
+ LOGV2(20464,
+ " For more info see {FSyncCommand_url}",
+ "FSyncCommand_url"_attr = FSyncCommand::url());
result.append("info", "now locked against writes, use db.fsyncUnlock() to unlock");
result.append("lockCount", getLockCount());
result.append("seeAlso", FSyncCommand::url());
@@ -296,7 +301,7 @@ public:
const BSONObj& cmdObj,
std::string& errmsg,
BSONObjBuilder& result) override {
- log() << "command: unlock requested";
+ LOGV2(20465, "command: unlock requested");
Lock::ExclusiveLock lk(opCtx->lockState(), commandMutex);
@@ -316,11 +321,13 @@ public:
// If we're still locked then lock count is not zero.
invariant(lockCount > 0);
lockCount = fsyncCmd.getLockCount_inLock();
- log() << "fsyncUnlock completed. Lock count is now " << lockCount;
+ LOGV2(20466,
+ "fsyncUnlock completed. Lock count is now {lockCount}",
+ "lockCount"_attr = lockCount);
} else {
invariant(fsyncCmd.getLockCount() == 0);
lockCount = 0;
- log() << "fsyncUnlock completed. mongod is now unlocked and free to accept writes";
+ LOGV2(20467, "fsyncUnlock completed. mongod is now unlocked and free to accept writes");
}
result.append("info", str::stream() << "fsyncUnlock completed");
@@ -357,7 +364,7 @@ void FSyncLockThread::run() {
registerShutdownTask([&] {
stdx::unique_lock<Latch> stateLock(fsyncCmd.lockStateMutex);
if (fsyncCmd.getLockCount_inLock() > 0) {
- warning() << "Interrupting fsync because the server is shutting down.";
+ LOGV2_WARNING(20469, "Interrupting fsync because the server is shutting down.");
while (fsyncCmd.getLockCount_inLock()) {
// Relies on the lock to be released in 'releaseLock_inLock()' when the
// release brings the lock count to 0.
@@ -371,7 +378,7 @@ void FSyncLockThread::run() {
try {
storageEngine->flushAllFiles(&opCtx, /*callerHoldsReadLock*/ true);
} catch (const std::exception& e) {
- error() << "error doing flushAll: " << e.what();
+ LOGV2_ERROR(20472, "error doing flushAll: {e_what}", "e_what"_attr = e.what());
fsyncCmd.threadStatus = Status(ErrorCodes::CommandFailed, e.what());
fsyncCmd.acquireFsyncLockSyncCV.notify_one();
return;
@@ -397,10 +404,12 @@ void FSyncLockThread::run() {
});
} catch (const DBException& e) {
if (_allowFsyncFailure) {
- warning() << "Locking despite storage engine being unable to begin backup : "
- << e.toString();
+ LOGV2_WARNING(20470,
+ "Locking despite storage engine being unable to begin backup : {e}",
+ "e"_attr = e.toString());
} else {
- error() << "storage engine unable to begin backup : " << e.toString();
+ LOGV2_ERROR(
+ 20473, "storage engine unable to begin backup : {e}", "e"_attr = e.toString());
fsyncCmd.threadStatus = e.toStatus();
fsyncCmd.acquireFsyncLockSyncCV.notify_one();
return;
@@ -411,8 +420,10 @@ void FSyncLockThread::run() {
fsyncCmd.acquireFsyncLockSyncCV.notify_one();
while (fsyncCmd.getLockCount_inLock() > 0) {
- warning() << "WARNING: instance is locked, blocking all writes. The fsync command has "
- "finished execution, remember to unlock the instance using fsyncUnlock().";
+ LOGV2_WARNING(
+ 20471,
+ "WARNING: instance is locked, blocking all writes. The fsync command has "
+ "finished execution, remember to unlock the instance using fsyncUnlock().");
fsyncCmd.releaseFsyncLockSyncCV.wait_for(lk, Seconds(60).toSystemDuration());
}
@@ -425,7 +436,7 @@ void FSyncLockThread::run() {
}
} catch (const std::exception& e) {
- severe() << "FSyncLockThread exception: " << e.what();
+ LOGV2_FATAL(20474, "FSyncLockThread exception: {e_what}", "e_what"_attr = e.what());
fassertFailed(40350);
}
}
diff --git a/src/mongo/db/commands/generic_servers.cpp b/src/mongo/db/commands/generic_servers.cpp
index b398e6d59f2..51bfd5ab8b4 100644
--- a/src/mongo/db/commands/generic_servers.cpp
+++ b/src/mongo/db/commands/generic_servers.cpp
@@ -37,6 +37,7 @@
#include "mongo/db/commands/shutdown.h"
#include "mongo/db/commands/test_commands_enabled.h"
#include "mongo/db/log_process_details.h"
+#include "mongo/logv2/log.h"
#include "mongo/logv2/ramlog.h"
#include "mongo/scripting/engine.h"
#include "mongo/util/exit.h"
@@ -354,7 +355,7 @@ void CmdShutdown::shutdownHelper(const BSONObj& cmdObj) {
::abort();
});
- log() << "terminating, shutdown command received " << cmdObj;
+ LOGV2(20475, "terminating, shutdown command received {cmdObj}", "cmdObj"_attr = cmdObj);
#if defined(_WIN32)
// Signal the ServiceMain thread to shutdown.
diff --git a/src/mongo/db/commands/get_last_error.cpp b/src/mongo/db/commands/get_last_error.cpp
index 1b674dd6ef7..531952af58e 100644
--- a/src/mongo/db/commands/get_last_error.cpp
+++ b/src/mongo/db/commands/get_last_error.cpp
@@ -41,6 +41,7 @@
#include "mongo/db/repl/repl_client_info.h"
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/write_concern.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/log.h"
namespace mongo {
@@ -263,8 +264,13 @@ public:
}
} else {
if (electionId != repl::ReplicationCoordinator::get(opCtx)->getElectionId()) {
- LOG(3) << "oid passed in is " << electionId << ", but our id is "
- << repl::ReplicationCoordinator::get(opCtx)->getElectionId();
+ LOGV2_DEBUG(20476,
+ 3,
+ "oid passed in is {electionId}, but our id is "
+ "{repl_ReplicationCoordinator_get_opCtx_getElectionId}",
+ "electionId"_attr = electionId,
+ "repl_ReplicationCoordinator_get_opCtx_getElectionId"_attr =
+ repl::ReplicationCoordinator::get(opCtx)->getElectionId());
errmsg = "election occurred after write";
result.append("code", ErrorCodes::WriteConcernFailed);
result.append("codeName",
diff --git a/src/mongo/db/commands/getmore_cmd.cpp b/src/mongo/db/commands/getmore_cmd.cpp
index 1795337753d..91b6804f691 100644
--- a/src/mongo/db/commands/getmore_cmd.cpp
+++ b/src/mongo/db/commands/getmore_cmd.cpp
@@ -57,6 +57,7 @@
#include "mongo/db/service_context.h"
#include "mongo/db/stats/counters.h"
#include "mongo/db/stats/top.h"
+#include "mongo/logv2/log.h"
#include "mongo/s/chunk_version.h"
#include "mongo/util/fail_point.h"
#include "mongo/util/log.h"
@@ -328,9 +329,14 @@ public:
auto status = WorkingSetCommon::getMemberObjectStatus(doc);
invariant(!status.isOK());
// Log an error message and then perform the cleanup.
- warning() << "GetMore command executor error: "
- << PlanExecutor::statestr(*state) << ", status: " << status
- << ", stats: " << redact(Explain::getWinningPlanStats(exec));
+ LOGV2_WARNING(20478,
+ "GetMore command executor error: {PlanExecutor_statestr_state}, "
+ "status: {status}, stats: {Explain_getWinningPlanStats_exec}",
+ "PlanExecutor_statestr_state"_attr =
+ PlanExecutor::statestr(*state),
+ "status"_attr = status,
+ "Explain_getWinningPlanStats_exec"_attr =
+ redact(Explain::getWinningPlanStats(exec)));
nextBatch->abandon();
return status;
@@ -413,8 +419,9 @@ public:
ClientCursorParams::LockPolicy::kLockExternally);
if (MONGO_unlikely(GetMoreHangBeforeReadLock.shouldFail())) {
- log() << "GetMoreHangBeforeReadLock fail point enabled. Blocking until fail "
- "point is disabled.";
+ LOGV2(20477,
+ "GetMoreHangBeforeReadLock fail point enabled. Blocking until fail "
+ "point is disabled.");
GetMoreHangBeforeReadLock.pauseWhileSet(opCtx);
}
diff --git a/src/mongo/db/commands/index_filter_commands.cpp b/src/mongo/db/commands/index_filter_commands.cpp
index 59dd614002a..e1348bb8eda 100644
--- a/src/mongo/db/commands/index_filter_commands.cpp
+++ b/src/mongo/db/commands/index_filter_commands.cpp
@@ -50,6 +50,7 @@
#include "mongo/db/matcher/extensions_callback_real.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/query/collection_query_info.h"
+#include "mongo/logv2/log.h"
#include "mongo/stdx/unordered_set.h"
#include "mongo/util/log.h"
@@ -264,7 +265,9 @@ Status ClearFilters::clear(OperationContext* opCtx,
// Remove entry from plan cache
planCache->remove(*cq).transitional_ignore();
- LOG(0) << "Removed index filter on " << redact(cq->toStringShort());
+ LOGV2(20479,
+ "Removed index filter on {cq_Short}",
+ "cq_Short"_attr = redact(cq->toStringShort()));
return Status::OK();
}
@@ -320,7 +323,7 @@ Status ClearFilters::clear(OperationContext* opCtx,
planCache->remove(*cq).transitional_ignore();
}
- LOG(0) << "Removed all index filters for collection: " << ns;
+ LOGV2(20480, "Removed all index filters for collection: {ns}", "ns"_attr = ns);
return Status::OK();
}
@@ -397,7 +400,10 @@ Status SetFilter::set(OperationContext* opCtx,
// Remove entry from plan cache.
planCache->remove(*cq).transitional_ignore();
- LOG(0) << "Index filter set on " << redact(cq->toStringShort()) << " " << indexesElt;
+ LOGV2(20481,
+ "Index filter set on {cq_Short} {indexesElt}",
+ "cq_Short"_attr = redact(cq->toStringShort()),
+ "indexesElt"_attr = indexesElt);
return Status::OK();
}
diff --git a/src/mongo/db/commands/kill_op.cpp b/src/mongo/db/commands/kill_op.cpp
index 5a9b639427e..fa088731393 100644
--- a/src/mongo/db/commands/kill_op.cpp
+++ b/src/mongo/db/commands/kill_op.cpp
@@ -43,6 +43,7 @@
#include "mongo/db/commands/kill_op_cmd_base.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/service_context.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/log.h"
#include "mongo/util/str.h"
@@ -58,7 +59,7 @@ public:
// Used by tests to check if auth checks passed.
result.append("info", "attempting to kill op");
- log() << "going to kill op: " << opId;
+ LOGV2(20482, "going to kill op: {opId}", "opId"_attr = opId);
KillOpCmdBase::killLocalOperation(opCtx, opId);
// killOp always reports success once past the auth check.
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index a9a8582f136..05f3d15e2ea 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -65,6 +65,7 @@
#include "mongo/db/server_options.h"
#include "mongo/db/service_context.h"
#include "mongo/db/storage/durable_catalog.h"
+#include "mongo/logv2/log.h"
#include "mongo/platform/mutex.h"
#include "mongo/s/catalog_cache.h"
#include "mongo/s/client/parallel.h"
@@ -467,7 +468,7 @@ Config::Config(const string& _dbname, const BSONObj& cmdObj) {
// DEPRECATED
if (auto mapParamsElem = cmdObj["mapparams"]) {
if (mapParamsDeprecationSampler.tick()) {
- warning() << "The mapparams option to MapReduce is deprecated.";
+ LOGV2_WARNING(20493, "The mapparams option to MapReduce is deprecated.");
}
if (mapParamsElem.type() == Array) {
mapParams = mapParamsElem.embeddedObjectUserCheck().getOwned();
@@ -929,11 +930,12 @@ State::~State() {
_config.tempNamespace,
_useIncremental ? _config.incLong : NamespaceString());
} catch (...) {
- error() << "Unable to drop temporary collection created by mapReduce: "
- << _config.tempNamespace
- << ". This collection will be removed automatically "
- "the next time the server starts up. "
- << exceptionToStatus();
+ LOGV2_ERROR(20494,
+ "Unable to drop temporary collection created by mapReduce: "
+ "{config_tempNamespace}. This collection will be removed automatically "
+ "the next time the server starts up. {exceptionToStatus}",
+ "config_tempNamespace"_attr = _config.tempNamespace,
+ "exceptionToStatus"_attr = exceptionToStatus());
}
}
if (_scope && !_scope->isKillPending() && _scope->getError().empty()) {
@@ -944,7 +946,7 @@ State::~State() {
_scope->invoke(cleanup, nullptr, nullptr, 0, true);
} catch (const DBException&) {
// not important because properties will be reset if scope is reused
- LOG(1) << "MapReduce terminated during state destruction";
+ LOGV2_DEBUG(20483, 1, "MapReduce terminated during state destruction");
}
}
}
@@ -1086,7 +1088,7 @@ void State::switchMode(bool jsMode) {
}
void State::bailFromJS() {
- LOG(1) << "M/R: Switching from JS mode to mixed mode";
+ LOGV2_DEBUG(20484, 1, "M/R: Switching from JS mode to mixed mode");
// reduce and reemit into c++
switchMode(false);
@@ -1363,9 +1365,14 @@ void State::reduceAndSpillInMemoryStateIfNeeded() {
// reduce now to lower mem usage
Timer t;
_scope->invoke(_reduceAll, nullptr, nullptr, 0, true);
- LOG(3) << " MR - did reduceAll: keys=" << keyCt << " dups=" << dupCt
- << " newKeys=" << _scope->getNumberInt("_keyCt") << " time=" << t.millis()
- << "ms";
+ LOGV2_DEBUG(20485,
+ 3,
+ " MR - did reduceAll: keys={keyCt} dups={dupCt} "
+ "newKeys={scope_getNumberInt_keyCt} time={t_millis}ms",
+ "keyCt"_attr = keyCt,
+ "dupCt"_attr = dupCt,
+ "scope_getNumberInt_keyCt"_attr = _scope->getNumberInt("_keyCt"),
+ "t_millis"_attr = t.millis());
return;
}
}
@@ -1378,13 +1385,19 @@ void State::reduceAndSpillInMemoryStateIfNeeded() {
long oldSize = _size;
Timer t;
reduceInMemory();
- LOG(3) << " MR - did reduceInMemory: size=" << oldSize << " dups=" << _dupCount
- << " newSize=" << _size << " time=" << t.millis() << "ms";
+ LOGV2_DEBUG(20486,
+ 3,
+ " MR - did reduceInMemory: size={oldSize} dups={dupCount} newSize={size} "
+ "time={t_millis}ms",
+ "oldSize"_attr = oldSize,
+ "dupCount"_attr = _dupCount,
+ "size"_attr = _size,
+ "t_millis"_attr = t.millis());
// if size is still high, or values are not reducing well, dump
if (_onDisk && (_size > _config.maxInMemSize || _size > oldSize / 2)) {
dumpToInc();
- LOG(3) << " MR - dumping to db";
+ LOGV2_DEBUG(20487, 3, " MR - dumping to db");
}
}
}
@@ -1411,7 +1424,7 @@ bool runMapReduce(OperationContext* opCtx,
const Config config(dbname, cmd);
- LOG(1) << "mr ns: " << config.nss;
+ LOGV2_DEBUG(20488, 1, "mr ns: {config_nss}", "config_nss"_attr = config.nss);
uassert(16149, "cannot run map reduce without the js engine", getGlobalScriptEngine());
@@ -1638,19 +1651,19 @@ bool runMapReduce(OperationContext* opCtx,
invariant(e.extraInfo<StaleConfigInfo>()->getShardId());
}
- log() << "mr detected stale config, should retry" << redact(e);
+ LOGV2(20489, "mr detected stale config, should retry{e}", "e"_attr = redact(e));
throw;
}
// TODO: The error handling code for queries is v. fragile,
// *requires* rethrow AssertionExceptions - should probably fix.
catch (AssertionException& e) {
- log() << "mr failed, removing collection" << redact(e);
+ LOGV2(20490, "mr failed, removing collection{e}", "e"_attr = redact(e));
throw;
} catch (std::exception& e) {
- log() << "mr failed, removing collection" << causedBy(e);
+ LOGV2(20491, "mr failed, removing collection{causedBy_e}", "causedBy_e"_attr = causedBy(e));
throw;
} catch (...) {
- log() << "mr failed for unknown reason, removing collection";
+ LOGV2(20492, "mr failed for unknown reason, removing collection");
throw;
}
diff --git a/src/mongo/db/commands/oplog_note.cpp b/src/mongo/db/commands/oplog_note.cpp
index 2899d5b602f..b25debc548b 100644
--- a/src/mongo/db/commands/oplog_note.cpp
+++ b/src/mongo/db/commands/oplog_note.cpp
@@ -48,6 +48,7 @@
#include "mongo/db/repl/oplog.h"
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/service_context.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/log.h"
namespace mongo {
@@ -60,7 +61,7 @@ Status _performNoopWrite(OperationContext* opCtx, BSONObj msgObj, StringData not
opCtx, MODE_IX, Date_t::now() + Milliseconds(1), Lock::InterruptBehavior::kLeaveUnlocked);
if (!lock.isLocked()) {
- LOG(1) << "Global lock is not available skipping noopWrite";
+ LOGV2_DEBUG(20495, 1, "Global lock is not available skipping noopWrite");
return {ErrorCodes::LockFailed, "Global lock is not available"};
}
diff --git a/src/mongo/db/commands/parameters.cpp b/src/mongo/db/commands/parameters.cpp
index a189e6f3426..e474b7f6353 100644
--- a/src/mongo/db/commands/parameters.cpp
+++ b/src/mongo/db/commands/parameters.cpp
@@ -44,6 +44,7 @@
#include "mongo/db/storage/storage_options.h"
#include "mongo/logger/logger.h"
#include "mongo/logger/parse_log_component_settings.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/log.h"
#include "mongo/util/str.h"
@@ -358,8 +359,11 @@ public:
try {
uassertStatusOK(foundParameter->second->set(parameter));
} catch (const DBException& ex) {
- log() << "error setting parameter " << parameterName << " to "
- << redact(parameter.toString(false)) << " errMsg: " << redact(ex);
+ LOGV2(20496,
+ "error setting parameter {parameterName} to {parameter_false} errMsg: {ex}",
+ "parameterName"_attr = parameterName,
+ "parameter_false"_attr = redact(parameter.toString(false)),
+ "ex"_attr = redact(ex));
throw;
}
diff --git a/src/mongo/db/commands/resize_oplog.cpp b/src/mongo/db/commands/resize_oplog.cpp
index 31c2153a8da..f5c124d2451 100644
--- a/src/mongo/db/commands/resize_oplog.cpp
+++ b/src/mongo/db/commands/resize_oplog.cpp
@@ -41,6 +41,7 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/storage/durable_catalog.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/log.h"
namespace mongo {
@@ -107,7 +108,7 @@ public:
uassertStatusOK(status);
DurableCatalog::get(opCtx)->updateCappedSize(opCtx, coll->getCatalogId(), size);
wunit.commit();
- LOG(0) << "replSetResizeOplog success, currentSize:" << size;
+ LOGV2(20497, "replSetResizeOplog success, currentSize:{size}", "size"_attr = size);
return true;
});
}
diff --git a/src/mongo/db/commands/rwc_defaults_commands.cpp b/src/mongo/db/commands/rwc_defaults_commands.cpp
index b4ada0db3ec..2352c1650d7 100644
--- a/src/mongo/db/commands/rwc_defaults_commands.cpp
+++ b/src/mongo/db/commands/rwc_defaults_commands.cpp
@@ -41,6 +41,7 @@
#include "mongo/db/repl/read_concern_args.h"
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/rw_concern_default_gen.h"
+#include "mongo/logv2/log.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/util/log.h"
@@ -114,7 +115,9 @@ public:
opCtx, request().getDefaultReadConcern(), request().getDefaultWriteConcern());
updatePersistedDefaultRWConcernDocument(opCtx, newDefaults);
- log() << "successfully set RWC defaults to " << newDefaults.toBSON();
+ LOGV2(20498,
+ "successfully set RWC defaults to {newDefaults}",
+ "newDefaults"_attr = newDefaults.toBSON());
// Refresh to populate the cache with the latest defaults.
rwcDefaults.refreshIfNecessary(opCtx);
diff --git a/src/mongo/db/commands/server_status.cpp b/src/mongo/db/commands/server_status.cpp
index 685332111b3..b7469ceb8fd 100644
--- a/src/mongo/db/commands/server_status.cpp
+++ b/src/mongo/db/commands/server_status.cpp
@@ -37,6 +37,7 @@
#include "mongo/db/commands/server_status_internal.h"
#include "mongo/db/service_context.h"
#include "mongo/db/stats/counters.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/log.h"
#include "mongo/util/net/http_client.h"
#include "mongo/util/net/socket_utils.h"
@@ -145,7 +146,7 @@ public:
timeBuilder.appendNumber("at end", durationCount<Milliseconds>(runElapsed));
if (runElapsed > Milliseconds(1000)) {
BSONObj t = timeBuilder.obj();
- log() << "serverStatus was very slow: " << t;
+ LOGV2(20499, "serverStatus was very slow: {t}", "t"_attr = t);
bool include_timing = true;
const auto& elem = cmdObj[kTimingSection];
diff --git a/src/mongo/db/commands/set_feature_compatibility_version_command.cpp b/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
index 9ca817e1d66..6dc12d4af3f 100644
--- a/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
+++ b/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
@@ -54,6 +54,7 @@
#include "mongo/db/s/migration_util.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/db/server_options.h"
+#include "mongo/logv2/log.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/s/catalog/type_collection.h"
#include "mongo/s/database_version_helpers.h"
@@ -213,7 +214,7 @@ public:
if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) {
const auto shardingState = ShardingState::get(opCtx);
if (shardingState->enabled()) {
- LOG(0) << "Upgrade: submitting orphaned ranges for cleanup";
+ LOGV2(20500, "Upgrade: submitting orphaned ranges for cleanup");
migrationutil::submitOrphanRangesForCleanup(opCtx);
}
@@ -236,7 +237,7 @@ public:
<< requestedVersion)))));
if (MONGO_unlikely(pauseBeforeUpgradingConfigMetadata.shouldFail())) {
- log() << "Hit pauseBeforeUpgradingConfigMetadata";
+ LOGV2(20501, "Hit pauseBeforeUpgradingConfigMetadata");
pauseBeforeUpgradingConfigMetadata.pauseWhileSet(opCtx);
}
ShardingCatalogManager::get(opCtx)->upgradeOrDowngradeChunksAndTags(
@@ -308,7 +309,7 @@ public:
repl::ReplicationCoordinator::modeReplSet;
if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) {
- LOG(0) << "Downgrade: dropping config.rangeDeletions collection";
+ LOGV2(20502, "Downgrade: dropping config.rangeDeletions collection");
migrationutil::dropRangeDeletionsCollection(opCtx);
// The primary shard sharding a collection will write the initial chunks for a
@@ -334,7 +335,7 @@ public:
<< requestedVersion)))));
if (MONGO_unlikely(pauseBeforeDowngradingConfigMetadata.shouldFail())) {
- log() << "Hit pauseBeforeDowngradingConfigMetadata";
+ LOGV2(20503, "Hit pauseBeforeDowngradingConfigMetadata");
pauseBeforeDowngradingConfigMetadata.pauseWhileSet(opCtx);
}
ShardingCatalogManager::get(opCtx)->upgradeOrDowngradeChunksAndTags(
diff --git a/src/mongo/db/commands/sleep_command.cpp b/src/mongo/db/commands/sleep_command.cpp
index 21407950cd8..c7c74a1c01f 100644
--- a/src/mongo/db/commands/sleep_command.cpp
+++ b/src/mongo/db/commands/sleep_command.cpp
@@ -35,6 +35,7 @@
#include "mongo/db/commands/test_commands_enabled.h"
#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/concurrency/lock_manager_defs.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/log.h"
namespace mongo {
@@ -118,7 +119,7 @@ public:
const std::string& ns,
const BSONObj& cmdObj,
BSONObjBuilder& result) {
- log() << "test only command sleep invoked";
+ LOGV2(20504, "test only command sleep invoked");
long long msToSleep = 0;
if (cmdObj["secs"] || cmdObj["seconds"] || cmdObj["millis"]) {
diff --git a/src/mongo/db/commands/test_commands.cpp b/src/mongo/db/commands/test_commands.cpp
index 81b7332b31a..bf5f1101fcc 100644
--- a/src/mongo/db/commands/test_commands.cpp
+++ b/src/mongo/db/commands/test_commands.cpp
@@ -45,6 +45,7 @@
#include "mongo/db/op_observer.h"
#include "mongo/db/query/internal_plans.h"
#include "mongo/db/service_context.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/log.h"
namespace mongo {
@@ -80,7 +81,9 @@ public:
string& errmsg,
BSONObjBuilder& result) {
const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbname, cmdObj));
- log() << "test only command godinsert invoked coll:" << nss.coll();
+ LOGV2(20505,
+ "test only command godinsert invoked coll:{nss_coll}",
+ "nss_coll"_attr = nss.coll());
BSONObj obj = cmdObj["obj"].embeddedObjectUserCheck();
Lock::DBLock lk(opCtx, dbname, MODE_X);
diff --git a/src/mongo/db/commands/traffic_recording_cmds.cpp b/src/mongo/db/commands/traffic_recording_cmds.cpp
index 7fd2894c9f6..9cb02bcf45a 100644
--- a/src/mongo/db/commands/traffic_recording_cmds.cpp
+++ b/src/mongo/db/commands/traffic_recording_cmds.cpp
@@ -38,6 +38,7 @@
#include "mongo/db/commands.h"
#include "mongo/db/traffic_recorder.h"
#include "mongo/db/traffic_recorder_gen.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/log.h"
namespace mongo {
@@ -53,9 +54,10 @@ public:
void typedRun(OperationContext* opCtx) {
TrafficRecorder::get(opCtx->getServiceContext()).start(request());
- log() << "** Warning: The recording file contains unencrypted user traffic."
- << " We recommend that you limit retention of this file and "
- << "store it on an encrypted filesystem volume.";
+ LOGV2(20506,
+ "** Warning: The recording file contains unencrypted user traffic. We recommend "
+ "that you limit retention of this file and store it on an encrypted filesystem "
+ "volume.");
}
private:
diff --git a/src/mongo/db/commands/txn_cmds.cpp b/src/mongo/db/commands/txn_cmds.cpp
index 9f9e7afa02e..91dfc7fa8dd 100644
--- a/src/mongo/db/commands/txn_cmds.cpp
+++ b/src/mongo/db/commands/txn_cmds.cpp
@@ -44,6 +44,7 @@
#include "mongo/db/service_context.h"
#include "mongo/db/transaction_participant.h"
#include "mongo/db/transaction_validation.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/log.h"
namespace mongo {
@@ -95,8 +96,12 @@ public:
"commitTransaction must be run within a transaction",
txnParticipant);
- LOG(3) << "Received commitTransaction for transaction with txnNumber "
- << opCtx->getTxnNumber() << " on session " << opCtx->getLogicalSessionId()->toBSON();
+ LOGV2_DEBUG(20507,
+ 3,
+ "Received commitTransaction for transaction with txnNumber "
+ "{opCtx_getTxnNumber} on session {opCtx_getLogicalSessionId}",
+ "opCtx_getTxnNumber"_attr = opCtx->getTxnNumber(),
+ "opCtx_getLogicalSessionId"_attr = opCtx->getLogicalSessionId()->toBSON());
// commitTransaction is retryable.
if (txnParticipant.transactionIsCommitted()) {
@@ -199,8 +204,12 @@ public:
"abortTransaction must be run within a transaction",
txnParticipant);
- LOG(3) << "Received abortTransaction for transaction with txnNumber "
- << opCtx->getTxnNumber() << " on session " << opCtx->getLogicalSessionId()->toBSON();
+ LOGV2_DEBUG(20508,
+ 3,
+ "Received abortTransaction for transaction with txnNumber {opCtx_getTxnNumber} "
+ "on session {opCtx_getLogicalSessionId}",
+ "opCtx_getTxnNumber"_attr = opCtx->getTxnNumber(),
+ "opCtx_getLogicalSessionId"_attr = opCtx->getLogicalSessionId()->toBSON());
uassert(ErrorCodes::NoSuchTransaction,
"Transaction isn't in progress",
diff --git a/src/mongo/db/commands/user_management_commands.cpp b/src/mongo/db/commands/user_management_commands.cpp
index 90bec146f42..2ccf5d6d97f 100644
--- a/src/mongo/db/commands/user_management_commands.cpp
+++ b/src/mongo/db/commands/user_management_commands.cpp
@@ -67,6 +67,7 @@
#include "mongo/db/ops/write_ops.h"
#include "mongo/db/query/cursor_response.h"
#include "mongo/db/service_context.h"
+#include "mongo/logv2/log.h"
#include "mongo/platform/mutex.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/s/write_ops/batched_command_response.h"
@@ -579,7 +580,7 @@ public:
}
if (_authzManager->getCacheGeneration() == _cacheGeneration) {
- LOG(1) << "User management command did not invalidate the user cache.";
+ LOGV2_DEBUG(20509, 1, "User management command did not invalidate the user cache.");
_authzManager->invalidateUserCache(_opCtx);
}
}
@@ -2494,16 +2495,22 @@ public:
Status status = updatePrivilegeDocument(opCtx, userName, userObj);
if (!status.isOK()) {
// Match the behavior of mongorestore to continue on failure
- warning() << "Could not update user " << userName
- << " in _mergeAuthzCollections command: " << redact(status);
+ LOGV2_WARNING(
+ 20510,
+ "Could not update user {userName} in _mergeAuthzCollections command: {status}",
+ "userName"_attr = userName,
+ "status"_attr = redact(status));
}
} else {
auditCreateOrUpdateUser(userObj, true);
Status status = insertPrivilegeDocument(opCtx, userObj);
if (!status.isOK()) {
// Match the behavior of mongorestore to continue on failure
- warning() << "Could not insert user " << userName
- << " in _mergeAuthzCollections command: " << redact(status);
+ LOGV2_WARNING(
+ 20511,
+ "Could not insert user {userName} in _mergeAuthzCollections command: {status}",
+ "userName"_attr = userName,
+ "status"_attr = redact(status));
}
}
usersToDrop->erase(userName);
@@ -2532,16 +2539,22 @@ public:
Status status = updateRoleDocument(opCtx, roleName, roleObj);
if (!status.isOK()) {
// Match the behavior of mongorestore to continue on failure
- warning() << "Could not update role " << roleName
- << " in _mergeAuthzCollections command: " << redact(status);
+ LOGV2_WARNING(
+ 20512,
+ "Could not update role {roleName} in _mergeAuthzCollections command: {status}",
+ "roleName"_attr = roleName,
+ "status"_attr = redact(status));
}
} else {
auditCreateOrUpdateRole(roleObj, true);
Status status = insertRoleDocument(opCtx, roleObj);
if (!status.isOK()) {
// Match the behavior of mongorestore to continue on failure
- warning() << "Could not insert role " << roleName
- << " in _mergeAuthzCollections command: " << redact(status);
+ LOGV2_WARNING(
+ 20513,
+ "Could not insert role {roleName} in _mergeAuthzCollections command: {status}",
+ "roleName"_attr = roleName,
+ "status"_attr = redact(status));
}
}
rolesToDrop->erase(roleName);
diff --git a/src/mongo/db/commands/validate.cpp b/src/mongo/db/commands/validate.cpp
index 3cdf9353749..f6b32d28eab 100644
--- a/src/mongo/db/commands/validate.cpp
+++ b/src/mongo/db/commands/validate.cpp
@@ -38,6 +38,7 @@
#include "mongo/db/query/internal_plans.h"
#include "mongo/db/storage/durable_catalog.h"
#include "mongo/db/storage/record_store.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/fail_point.h"
#include "mongo/util/log.h"
#include "mongo/util/scopeguard.h"
@@ -142,8 +143,11 @@ public:
}
if (!serverGlobalParams.quiet.load()) {
- LOG(0) << "CMD: validate " << nss.ns() << (background ? ", background:true" : "")
- << (fullValidate ? ", full:true" : "");
+ LOGV2(20514,
+ "CMD: validate {nss_ns}{background_background_true}{fullValidate_full_true}",
+ "nss_ns"_attr = nss.ns(),
+ "background_background_true"_attr = (background ? ", background:true" : ""),
+ "fullValidate_full_true"_attr = (fullValidate ? ", full:true" : ""));
}
// Only one validation per collection can be in progress, the rest wait.