summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGabriel Russell <gabriel.russell@mongodb.com>2020-02-20 12:23:12 -0500
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-02-20 21:25:37 +0000
commitc7f41ef10eefdd8abb5a671a0046f55af310705c (patch)
treeaf697623e733b153d3d190f8f1fc90fccb3f54c2
parent11bfeda8152267fa95bc63b4e93723845737217f (diff)
downloadmongo-c7f41ef10eefdd8abb5a671a0046f55af310705c.tar.gz
SERVER-45869 more automatically converted structured
-rw-r--r--src/mongo/base/secure_allocator.cpp37
-rw-r--r--src/mongo/base/status.cpp8
-rw-r--r--src/mongo/client/async_client.cpp5
-rw-r--r--src/mongo/client/server_ping_monitor.cpp37
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_local.cpp63
-rw-r--r--src/mongo/db/auth/user_document_parser.cpp29
-rw-r--r--src/mongo/db/commands/dbcommands.cpp6
-rw-r--r--src/mongo/db/commands/distinct.cpp13
-rw-r--r--src/mongo/db/commands/find_and_modify.cpp11
-rw-r--r--src/mongo/db/commands/find_cmd.cpp12
-rw-r--r--src/mongo/db/commands/map_reduce_out_options.cpp3
-rw-r--r--src/mongo/db/commands/mr_common.cpp3
-rw-r--r--src/mongo/db/commands/run_aggregate.cpp11
-rw-r--r--src/mongo/db/exec/geo_near.cpp10
-rw-r--r--src/mongo/db/exec/projection.cpp5
-rw-r--r--src/mongo/db/exec/shard_filter.cpp12
-rw-r--r--src/mongo/db/exec/stagedebug_cmd.cpp8
-rw-r--r--src/mongo/db/ftdc/ftdc_system_stats_windows.cpp6
-rw-r--r--src/mongo/db/index/expression_keys_private.cpp8
-rw-r--r--src/mongo/db/index/s2_access_method.cpp5
-rw-r--r--src/mongo/db/pipeline/document_source_geo_near.cpp3
-rw-r--r--src/mongo/db/repl/check_quorum_for_config_change.cpp21
-rw-r--r--src/mongo/db/repl/session_update_tracker.cpp15
-rw-r--r--src/mongo/db/s/move_chunk_command.cpp5
-rw-r--r--src/mongo/db/s/move_primary_command.cpp5
-rw-r--r--src/mongo/db/s/move_timing_helper.cpp7
-rw-r--r--src/mongo/db/s/range_deletion_util.cpp103
-rw-r--r--src/mongo/db/s/shard_server_op_observer.cpp11
-rw-r--r--src/mongo/db/s/split_chunk.cpp7
-rw-r--r--src/mongo/db/service_context.cpp8
-rw-r--r--src/mongo/db/snapshot_window_util.cpp39
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp13
-rw-r--r--src/mongo/db/storage/remove_saver.cpp39
-rw-r--r--src/mongo/db/storage/storage_repair_observer.cpp14
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_cursor.cpp3
-rw-r--r--src/mongo/executor/network_interface_thread_pool.cpp5
-rw-r--r--src/mongo/idl/server_parameter.cpp26
-rw-r--r--src/mongo/platform/random.cpp31
-rw-r--r--src/mongo/s/chunk_manager.cpp6
-rw-r--r--src/mongo/s/sharding_initialization.cpp8
-rw-r--r--src/mongo/scripting/mozjs/valuereader.cpp3
-rw-r--r--src/mongo/util/fail_point.cpp20
-rw-r--r--src/mongo/util/net/private/ssl_expiration.cpp16
-rw-r--r--src/mongo/util/net/ssl_parameters.cpp7
-rw-r--r--src/mongo/util/processinfo_windows.cpp43
-rw-r--r--src/mongo/util/signal_handlers_synchronous.cpp32
-rw-r--r--src/mongo/watchdog/watchdog_mongod.cpp9
47 files changed, 549 insertions, 242 deletions
diff --git a/src/mongo/base/secure_allocator.cpp b/src/mongo/base/secure_allocator.cpp
index d342f0e1bbf..62dbe147e00 100644
--- a/src/mongo/base/secure_allocator.cpp
+++ b/src/mongo/base/secure_allocator.cpp
@@ -44,6 +44,7 @@
#endif
#include "mongo/base/init.h"
+#include "mongo/logv2/log.h"
#include "mongo/platform/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/assert_util.h"
@@ -74,7 +75,7 @@ void EnablePrivilege(const wchar_t* name) {
LUID luid;
if (!LookupPrivilegeValueW(nullptr, name, &luid)) {
auto str = errnoWithPrefix("Failed to LookupPrivilegeValue");
- warning() << str;
+ LOGV2_WARNING(23704, "{str}", "str"_attr = str);
return;
}
@@ -82,7 +83,7 @@ void EnablePrivilege(const wchar_t* name) {
HANDLE accessToken;
if (!OpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES, &accessToken)) {
auto str = errnoWithPrefix("Failed to OpenProcessToken");
- warning() << str;
+ LOGV2_WARNING(23705, "{str}", "str"_attr = str);
return;
}
@@ -97,12 +98,13 @@ void EnablePrivilege(const wchar_t* name) {
if (!AdjustTokenPrivileges(
accessToken, false, &privileges, sizeof(privileges), nullptr, nullptr)) {
auto str = errnoWithPrefix("Failed to AdjustTokenPrivileges");
- warning() << str;
+ LOGV2_WARNING(23706, "{str}", "str"_attr = str);
}
if (GetLastError() == ERROR_NOT_ALL_ASSIGNED) {
- warning() << "Failed to adjust token privilege for privilege '" << toUtf8String(name)
- << "'";
+ LOGV2_WARNING(23707,
+ "Failed to adjust token privilege for privilege '{toUtf8String_name}'",
+ "toUtf8String_name"_attr = toUtf8String(name));
}
}
@@ -132,7 +134,7 @@ void growWorkingSize(std::size_t bytes) {
if (!GetProcessWorkingSetSize(GetCurrentProcess(), &minWorkingSetSize, &maxWorkingSetSize)) {
auto str = errnoWithPrefix("Failed to GetProcessWorkingSetSize");
- severe() << str;
+ LOGV2_FATAL(23708, "{str}", "str"_attr = str);
fassertFailed(40285);
}
@@ -147,7 +149,7 @@ void growWorkingSize(std::size_t bytes) {
QUOTA_LIMITS_HARDWS_MIN_ENABLE |
QUOTA_LIMITS_HARDWS_MAX_DISABLE)) {
auto str = errnoWithPrefix("Failed to SetProcessWorkingSetSizeEx");
- severe() << str;
+ LOGV2_FATAL(23709, "{str}", "str"_attr = str);
fassertFailed(40286);
}
}
@@ -167,7 +169,7 @@ void* systemAllocate(std::size_t bytes) {
if (!ptr) {
auto str = errnoWithPrefix("Failed to VirtualAlloc");
- severe() << str;
+ LOGV2_FATAL(23710, "{str}", "str"_attr = str);
fassertFailed(28835);
}
@@ -184,7 +186,7 @@ void* systemAllocate(std::size_t bytes) {
}
auto str = errnoWithPrefix("Failed to VirtualLock");
- severe() << str;
+ LOGV2_FATAL(23711, "{str}", "str"_attr = str);
fassertFailed(28828);
}
@@ -194,7 +196,7 @@ void* systemAllocate(std::size_t bytes) {
void systemDeallocate(void* ptr, std::size_t bytes) {
if (VirtualUnlock(ptr, bytes) == 0) {
auto str = errnoWithPrefix("Failed to VirtualUnlock");
- severe() << str;
+ LOGV2_FATAL(23712, "{str}", "str"_attr = str);
fassertFailed(28829);
}
@@ -202,7 +204,7 @@ void systemDeallocate(void* ptr, std::size_t bytes) {
// (that's how the api works).
if (VirtualFree(ptr, 0, MEM_RELEASE) == 0) {
auto str = errnoWithPrefix("Failed to VirtualFree");
- severe() << str;
+ LOGV2_FATAL(23713, "{str}", "str"_attr = str);
fassertFailed(28830);
}
}
@@ -242,7 +244,7 @@ void* systemAllocate(std::size_t bytes) {
if (!ptr) {
auto str = errnoWithPrefix("Failed to mmap");
- severe() << str;
+ LOGV2_FATAL(23714, "{str}", "str"_attr = str);
fassertFailed(28831);
}
@@ -250,7 +252,7 @@ void* systemAllocate(std::size_t bytes) {
auto str = errnoWithPrefix(
"Failed to mlock: Cannot allocate locked memory. For more details see: "
"https://dochub.mongodb.org/core/cannot-allocate-locked-memory");
- severe() << str;
+ LOGV2_FATAL(23715, "{str}", "str"_attr = str);
fassertFailed(28832);
}
@@ -270,12 +272,17 @@ void systemDeallocate(void* ptr, std::size_t bytes) {
#endif
if (munlock(ptr, bytes) != 0) {
- severe() << errnoWithPrefix("Failed to munlock");
+ LOGV2_FATAL(23716,
+ "{errnoWithPrefix_Failed_to_munlock}",
+ "errnoWithPrefix_Failed_to_munlock"_attr =
+ errnoWithPrefix("Failed to munlock"));
fassertFailed(28833);
}
if (munmap(ptr, bytes) != 0) {
- severe() << errnoWithPrefix("Failed to munmap");
+ LOGV2_FATAL(23717,
+ "{errnoWithPrefix_Failed_to_munmap}",
+ "errnoWithPrefix_Failed_to_munmap"_attr = errnoWithPrefix("Failed to munmap"));
fassertFailed(28834);
}
}
diff --git a/src/mongo/base/status.cpp b/src/mongo/base/status.cpp
index a644dba5cfa..589574efc69 100644
--- a/src/mongo/base/status.cpp
+++ b/src/mongo/base/status.cpp
@@ -31,6 +31,7 @@
#include "mongo/base/status.h"
#include "mongo/db/jsobj.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/log.h"
#include "mongo/util/str.h"
@@ -57,7 +58,7 @@ Status::ErrorInfo* Status::ErrorInfo::create(ErrorCodes::Error code,
// have extra info.
if (kDebugBuild) {
// Make it easier to find this issue by fatally failing in debug builds.
- severe() << "Code " << code << " is supposed to have extra info";
+ LOGV2_FATAL(23805, "Code {code} is supposed to have extra info", "code"_attr = code);
fassertFailed(40680);
}
@@ -126,8 +127,9 @@ StringBuilderImpl<Allocator>& operator<<(StringBuilderImpl<Allocator>& sb, const
// This really shouldn't happen but it would be really annoying if it broke error
// logging in production.
if (kDebugBuild) {
- severe() << "Error serializing extra info for " << status.code()
- << " in Status::toString()";
+ LOGV2_FATAL(23806,
+ "Error serializing extra info for {status_code} in Status::toString()",
+ "status_code"_attr = status.code());
std::terminate();
}
}
diff --git a/src/mongo/client/async_client.cpp b/src/mongo/client/async_client.cpp
index 1aceecbfd94..82a0eeff20c 100644
--- a/src/mongo/client/async_client.cpp
+++ b/src/mongo/client/async_client.cpp
@@ -44,6 +44,7 @@
#include "mongo/db/server_options.h"
#include "mongo/db/wire_version.h"
#include "mongo/executor/egress_tag_closer_manager.h"
+#include "mongo/logv2/log.h"
#include "mongo/rpc/factory.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/rpc/legacy_request_builder.h"
@@ -111,7 +112,9 @@ void AsyncDBClient::_parseIsMasterResponse(BSONObj request,
auto validateStatus =
rpc::validateWireVersion(WireSpec::instance().outgoing, protocolSet.version);
if (!validateStatus.isOK()) {
- warning() << "remote host has incompatible wire version: " << validateStatus;
+ LOGV2_WARNING(23741,
+ "remote host has incompatible wire version: {validateStatus}",
+ "validateStatus"_attr = validateStatus);
uasserted(validateStatus.code(),
str::stream() << "remote host has incompatible wire version: "
<< validateStatus.reason());
diff --git a/src/mongo/client/server_ping_monitor.cpp b/src/mongo/client/server_ping_monitor.cpp
index 47c2a8e52c4..6896a7a2aab 100644
--- a/src/mongo/client/server_ping_monitor.cpp
+++ b/src/mongo/client/server_ping_monitor.cpp
@@ -36,6 +36,7 @@
#include "mongo/executor/network_interface_factory.h"
#include "mongo/executor/network_interface_thread_pool.h"
#include "mongo/executor/thread_pool_task_executor.h"
+#include "mongo/logv2/log.h"
#include "mongo/rpc/metadata/egress_metadata_hook_list.h"
#include "mongo/util/log.h"
@@ -98,13 +99,19 @@ void SingleServerPingMonitor::_scheduleServerPing() {
}
if (ErrorCodes::isShutdownError(schedulePingHandle.getStatus().code())) {
- LOG(1) << "Can't schedule ping for " << _hostAndPort << ". Executor shutdown in progress";
+ LOGV2_DEBUG(23727,
+ 1,
+ "Can't schedule ping for {hostAndPort}. Executor shutdown in progress",
+ "hostAndPort"_attr = _hostAndPort);
return;
}
if (!schedulePingHandle.isOK()) {
- severe() << "Can't continue scheduling pings to " << _hostAndPort << " due to "
- << redact(schedulePingHandle.getStatus());
+ LOGV2_FATAL(23732,
+ "Can't continue scheduling pings to {hostAndPort} due to "
+ "{schedulePingHandle_getStatus}",
+ "hostAndPort"_attr = _hostAndPort,
+ "schedulePingHandle_getStatus"_attr = redact(schedulePingHandle.getStatus()));
fassertFailed(31434);
}
@@ -149,13 +156,18 @@ void SingleServerPingMonitor::_doServerPing() {
});
if (ErrorCodes::isShutdownError(remotePingHandle.getStatus().code())) {
- LOG(1) << "Can't ping " << _hostAndPort << ". Executor shutdown in progress";
+ LOGV2_DEBUG(23728,
+ 1,
+ "Can't ping {hostAndPort}. Executor shutdown in progress",
+ "hostAndPort"_attr = _hostAndPort);
return;
}
if (!remotePingHandle.isOK()) {
- severe() << "Can't continue pinging " << _hostAndPort << " due to "
- << redact(remotePingHandle.getStatus());
+ LOGV2_FATAL(23733,
+ "Can't continue pinging {hostAndPort} due to {remotePingHandle_getStatus}",
+ "hostAndPort"_attr = _hostAndPort,
+ "remotePingHandle_getStatus"_attr = redact(remotePingHandle.getStatus()));
fassertFailed(31435);
}
@@ -228,21 +240,26 @@ void ServerPingMonitor::onServerHandshakeCompleteEvent(const sdam::ServerAddress
std::make_shared<SingleServerPingMonitor>(address, _rttListener, _pingFrequency, _executor);
_serverPingMonitorMap[address] = newSingleMonitor;
newSingleMonitor->init();
- LOG(1) << "ServerPingMonitor is now monitoring " << address;
+ LOGV2_DEBUG(
+ 23729, 1, "ServerPingMonitor is now monitoring {address}", "address"_attr = address);
}
void ServerPingMonitor::onServerClosedEvent(const sdam::ServerAddress& address, OID topologyId) {
stdx::lock_guard lk(_mutex);
if (_isShutdown) {
- LOG(1) << "ServerPingMonitor is in shutdown and will stop monitoring " << address
- << " if it has not already done so.";
+ LOGV2_DEBUG(23730,
+ 1,
+ "ServerPingMonitor is in shutdown and will stop monitoring {address} if it has "
+ "not already done so.",
+ "address"_attr = address);
return;
}
auto it = _serverPingMonitorMap.find(address);
invariant(it != _serverPingMonitorMap.end());
it->second->drop();
_serverPingMonitorMap.erase(it);
- LOG(1) << "ServerPingMonitor stopped monitoring " << address;
+ LOGV2_DEBUG(
+ 23731, 1, "ServerPingMonitor stopped monitoring {address}", "address"_attr = address);
}
diff --git a/src/mongo/db/auth/authz_manager_external_state_local.cpp b/src/mongo/db/auth/authz_manager_external_state_local.cpp
index 923acc650fa..289d2d4dec7 100644
--- a/src/mongo/db/auth/authz_manager_external_state_local.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_local.cpp
@@ -43,6 +43,7 @@
#include "mongo/db/auth/user_document_parser.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/server_options.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/log.h"
#include "mongo/util/net/ssl_types.h"
#include "mongo/util/str.h"
@@ -55,13 +56,16 @@ Status AuthzManagerExternalStateLocal::initialize(OperationContext* opCtx) {
Status status = _initializeRoleGraph(opCtx);
if (!status.isOK()) {
if (status == ErrorCodes::GraphContainsCycle) {
- error() << "Cycle detected in admin.system.roles; role inheritance disabled. "
- "Remove the listed cycle and any others to re-enable role inheritance. "
- << redact(status);
+ LOGV2_ERROR(
+ 23750,
+ "Cycle detected in admin.system.roles; role inheritance disabled. "
+ "Remove the listed cycle and any others to re-enable role inheritance. {status}",
+ "status"_attr = redact(status));
} else {
- error() << "Could not generate role graph from admin.system.roles; "
- "only system roles available: "
- << redact(status);
+ LOGV2_ERROR(23751,
+ "Could not generate role graph from admin.system.roles; "
+ "only system roles available: {status}",
+ "status"_attr = redact(status));
}
}
@@ -466,9 +470,11 @@ namespace {
void addRoleFromDocumentOrWarn(RoleGraph* roleGraph, const BSONObj& doc) {
Status status = roleGraph->addRoleFromDocument(doc);
if (!status.isOK()) {
- warning() << "Skipping invalid admin.system.roles document while calculating privileges"
- " for user-defined roles: "
- << redact(status) << "; document " << redact(doc);
+ LOGV2_WARNING(23747,
+ "Skipping invalid admin.system.roles document while calculating privileges"
+ " for user-defined roles: {status}; document {doc}",
+ "status"_attr = redact(status),
+ "doc"_attr = redact(doc));
}
}
@@ -495,9 +501,10 @@ Status AuthzManagerExternalStateLocal::_initializeRoleGraph(OperationContext* op
RoleGraphState newState;
if (status == ErrorCodes::GraphContainsCycle) {
- error() << "Inconsistent role graph during authorization manager initialization. Only "
- "direct privileges available. "
- << redact(status);
+ LOGV2_ERROR(23752,
+ "Inconsistent role graph during authorization manager initialization. Only "
+ "direct privileges available. {status}",
+ "status"_attr = redact(status));
newState = roleGraphStateHasCycle;
status = Status::OK();
} else if (status.isOK()) {
@@ -574,21 +581,30 @@ private:
if (_o2) {
oplogEntryBuilder << "o2" << *_o2;
}
- error() << "Unsupported modification to roles collection in oplog; "
- "restart this process to reenable user-defined roles; "
- << redact(status) << "; Oplog entry: " << redact(oplogEntryBuilder.done());
+ LOGV2_ERROR(23753,
+ "Unsupported modification to roles collection in oplog; "
+ "restart this process to reenable user-defined roles; {status}; Oplog "
+ "entry: {oplogEntryBuilder_done}",
+ "status"_attr = redact(status),
+ "oplogEntryBuilder_done"_attr = redact(oplogEntryBuilder.done()));
// If a setParameter is enabled, this condition is fatal.
fassert(51152, !roleGraphInvalidationIsFatal);
} else if (!status.isOK()) {
- warning() << "Skipping bad update to roles collection in oplog. " << redact(status)
- << " Oplog entry: " << redact(_op);
+ LOGV2_WARNING(
+ 23748,
+ "Skipping bad update to roles collection in oplog. {status} Oplog entry: {op}",
+ "status"_attr = redact(status),
+ "op"_attr = redact(_op));
}
status = _externalState->_roleGraph.recomputePrivilegeData();
if (status == ErrorCodes::GraphContainsCycle) {
_externalState->_roleGraphState = _externalState->roleGraphStateHasCycle;
- error() << "Inconsistent role graph during authorization manager initialization. "
- "Only direct privileges available. "
- << redact(status) << " after applying oplog entry " << redact(_op);
+ LOGV2_ERROR(
+ 23754,
+ "Inconsistent role graph during authorization manager initialization. "
+ "Only direct privileges available. {status} after applying oplog entry {op}",
+ "status"_attr = redact(status),
+ "op"_attr = redact(_op));
} else {
fassert(17183, status);
_externalState->_roleGraphState = _externalState->roleGraphStateConsistent;
@@ -612,9 +628,10 @@ private:
: extractUserNameFromIdString(_o["_id"].str());
if (!userName.isOK()) {
- warning() << "Invalidating user cache based on user being updated failed, will "
- "invalidate the entire cache instead: "
- << userName.getStatus();
+ LOGV2_WARNING(23749,
+ "Invalidating user cache based on user being updated failed, will "
+ "invalidate the entire cache instead: {userName_getStatus}",
+ "userName_getStatus"_attr = userName.getStatus());
_authzManager->invalidateUserCache(_opCtx);
return;
}
diff --git a/src/mongo/db/auth/user_document_parser.cpp b/src/mongo/db/auth/user_document_parser.cpp
index 8eb6dc7a94b..3b9548f93dc 100644
--- a/src/mongo/db/auth/user_document_parser.cpp
+++ b/src/mongo/db/auth/user_document_parser.cpp
@@ -41,6 +41,7 @@
#include "mongo/db/auth/user.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/namespace_string.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/log.h"
#include "mongo/util/str.h"
@@ -456,30 +457,42 @@ Status V2UserDocumentParser::initializeUserPrivilegesFromUserDocument(const BSON
std::string errmsg;
for (BSONObjIterator it(privilegesElement.Obj()); it.more(); it.next()) {
if ((*it).type() != Object) {
- warning() << "Wrong type of element in inheritedPrivileges array for "
- << user->getName() << ": " << *it;
+ LOGV2_WARNING(
+ 23743,
+ "Wrong type of element in inheritedPrivileges array for {user_getName}: {it}",
+ "user_getName"_attr = user->getName(),
+ "it"_attr = *it);
continue;
}
Privilege privilege;
ParsedPrivilege pp;
if (!pp.parseBSON((*it).Obj(), &errmsg)) {
- warning() << "Could not parse privilege element in user document for "
- << user->getName() << ": " << errmsg;
+ LOGV2_WARNING(
+ 23744,
+ "Could not parse privilege element in user document for {user_getName}: {errmsg}",
+ "user_getName"_attr = user->getName(),
+ "errmsg"_attr = errmsg);
continue;
}
std::vector<std::string> unrecognizedActions;
Status status =
ParsedPrivilege::parsedPrivilegeToPrivilege(pp, &privilege, &unrecognizedActions);
if (!status.isOK()) {
- warning() << "Could not parse privilege element in user document for "
- << user->getName() << causedBy(status);
+ LOGV2_WARNING(23745,
+ "Could not parse privilege element in user document for "
+ "{user_getName}{causedBy_status}",
+ "user_getName"_attr = user->getName(),
+ "causedBy_status"_attr = causedBy(status));
continue;
}
if (unrecognizedActions.size()) {
std::string unrecognizedActionsString;
str::joinStringDelim(unrecognizedActions, &unrecognizedActionsString, ',');
- warning() << "Encountered unrecognized actions \" " << unrecognizedActionsString
- << "\" while parsing user document for " << user->getName();
+ LOGV2_WARNING(23746,
+ "Encountered unrecognized actions \" {unrecognizedActionsString}\" while "
+ "parsing user document for {user_getName}",
+ "unrecognizedActionsString"_attr = unrecognizedActionsString,
+ "user_getName"_attr = user->getName());
}
privileges.push_back(privilege);
}
diff --git a/src/mongo/db/commands/dbcommands.cpp b/src/mongo/db/commands/dbcommands.cpp
index 5b880adfdd9..ce4087e9026 100644
--- a/src/mongo/db/commands/dbcommands.cpp
+++ b/src/mongo/db/commands/dbcommands.cpp
@@ -87,6 +87,7 @@
#include "mongo/db/stats/storage_stats.h"
#include "mongo/db/storage/storage_engine_init.h"
#include "mongo/db/write_concern.h"
+#include "mongo/logv2/log.h"
#include "mongo/scripting/engine.h"
#include "mongo/util/fail_point.h"
#include "mongo/util/log.h"
@@ -319,7 +320,8 @@ public:
if (cmd.getAutoIndexId()) {
const char* deprecationWarning =
"the autoIndexId option is deprecated and will be removed in a future release";
- warning() << deprecationWarning;
+ LOGV2_WARNING(
+ 23800, "{deprecationWarning}", "deprecationWarning"_attr = deprecationWarning);
result.append("note", deprecationWarning);
}
@@ -535,7 +537,7 @@ public:
}
if (PlanExecutor::FAILURE == state) {
- warning() << "Internal error while reading " << ns;
+ LOGV2_WARNING(23801, "Internal error while reading {ns}", "ns"_attr = ns);
uassertStatusOK(WorkingSetCommon::getMemberObjectStatus(obj).withContext(
"Executor error while reading during dataSize command"));
}
diff --git a/src/mongo/db/commands/distinct.cpp b/src/mongo/db/commands/distinct.cpp
index d04cb68ebbc..0c2d046b9cb 100644
--- a/src/mongo/db/commands/distinct.cpp
+++ b/src/mongo/db/commands/distinct.cpp
@@ -57,6 +57,7 @@
#include "mongo/db/query/view_response_formatter.h"
#include "mongo/db/s/collection_sharding_state.h"
#include "mongo/db/views/resolved_view.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/log.h"
namespace mongo {
@@ -271,10 +272,14 @@ public:
// We should always have a valid status member object at this point.
auto status = WorkingSetCommon::getMemberObjectStatus(obj);
invariant(!status.isOK());
- warning() << "Plan executor error during distinct command: "
- << redact(PlanExecutor::statestr(state)) << ", status: " << status
- << ", stats: "
- << redact(Explain::getWinningPlanStats(executor.getValue().get()));
+ LOGV2_WARNING(
+ 23797,
+ "Plan executor error during distinct command: {PlanExecutor_statestr_state}, "
+ "status: {status}, stats: {Explain_getWinningPlanStats_executor_getValue_get}",
+ "PlanExecutor_statestr_state"_attr = redact(PlanExecutor::statestr(state)),
+ "status"_attr = status,
+ "Explain_getWinningPlanStats_executor_getValue_get"_attr =
+ redact(Explain::getWinningPlanStats(executor.getValue().get())));
uassertStatusOK(status.withContext("Executor error during distinct command"));
}
diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp
index e18e0dcadf0..80dd15884ea 100644
--- a/src/mongo/db/commands/find_and_modify.cpp
+++ b/src/mongo/db/commands/find_and_modify.cpp
@@ -70,6 +70,7 @@
#include "mongo/db/stats/top.h"
#include "mongo/db/transaction_participant.h"
#include "mongo/db/write_concern.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/log.h"
#include "mongo/util/scopeguard.h"
@@ -97,9 +98,13 @@ boost::optional<BSONObj> advanceExecutor(OperationContext* opCtx,
// We should always have a valid status member object at this point.
auto status = WorkingSetCommon::getMemberObjectStatus(value);
invariant(!status.isOK());
- warning() << "Plan executor error during findAndModify: " << PlanExecutor::statestr(state)
- << ", status: " << status
- << ", stats: " << redact(Explain::getWinningPlanStats(exec));
+ LOGV2_WARNING(23802,
+ "Plan executor error during findAndModify: {PlanExecutor_statestr_state}, "
+ "status: {status}, stats: {Explain_getWinningPlanStats_exec}",
+ "PlanExecutor_statestr_state"_attr = PlanExecutor::statestr(state),
+ "status"_attr = status,
+ "Explain_getWinningPlanStats_exec"_attr =
+ redact(Explain::getWinningPlanStats(exec)));
uassertStatusOKWithContext(status, "Plan executor error during findAndModify");
MONGO_UNREACHABLE;
diff --git a/src/mongo/db/commands/find_cmd.cpp b/src/mongo/db/commands/find_cmd.cpp
index abeb4641446..eec207cd98f 100644
--- a/src/mongo/db/commands/find_cmd.cpp
+++ b/src/mongo/db/commands/find_cmd.cpp
@@ -54,6 +54,7 @@
#include "mongo/db/stats/server_read_concern_metrics.h"
#include "mongo/db/storage/storage_engine.h"
#include "mongo/db/transaction_participant.h"
+#include "mongo/logv2/log.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/util/log.h"
@@ -535,9 +536,14 @@ public:
// We should always have a valid status member object at this point.
auto status = WorkingSetCommon::getMemberObjectStatus(doc);
invariant(!status.isOK());
- warning() << "Plan executor error during find command: "
- << PlanExecutor::statestr(state) << ", status: " << status
- << ", stats: " << redact(Explain::getWinningPlanStats(exec.get()));
+ LOGV2_WARNING(
+ 23798,
+ "Plan executor error during find command: {PlanExecutor_statestr_state}, "
+ "status: {status}, stats: {Explain_getWinningPlanStats_exec_get}",
+ "PlanExecutor_statestr_state"_attr = PlanExecutor::statestr(state),
+ "status"_attr = status,
+ "Explain_getWinningPlanStats_exec_get"_attr =
+ redact(Explain::getWinningPlanStats(exec.get())));
uassertStatusOK(status.withContext("Executor error during find command"));
}
diff --git a/src/mongo/db/commands/map_reduce_out_options.cpp b/src/mongo/db/commands/map_reduce_out_options.cpp
index 2aa96605de8..d8ab3fcfe55 100644
--- a/src/mongo/db/commands/map_reduce_out_options.cpp
+++ b/src/mongo/db/commands/map_reduce_out_options.cpp
@@ -34,6 +34,7 @@
#include "mongo/db/commands/map_reduce_out_options.h"
#include "mongo/db/namespace_string.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/log.h"
namespace mongo {
@@ -68,7 +69,7 @@ MapReduceOutOptions MapReduceOutOptions::parseFromBSON(const BSONElement& elemen
uassert(
ErrorCodes::BadValue, "sharded field value must be true", sharded.boolean());
if (shardedDeprecationSampler.tick()) {
- warning() << "The out.sharded option in MapReduce is deprecated";
+ LOGV2_WARNING(23703, "The out.sharded option in MapReduce is deprecated");
}
return true;
} else {
diff --git a/src/mongo/db/commands/mr_common.cpp b/src/mongo/db/commands/mr_common.cpp
index 4943ef16307..c6ee2483487 100644
--- a/src/mongo/db/commands/mr_common.cpp
+++ b/src/mongo/db/commands/mr_common.cpp
@@ -55,6 +55,7 @@
#include "mongo/db/pipeline/expression_function.h"
#include "mongo/db/pipeline/expression_js_emit.h"
#include "mongo/db/query/util/make_data_structure.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/intrusive_counter.h"
#include "mongo/util/log.h"
#include "mongo/util/str.h"
@@ -310,7 +311,7 @@ OutputOptions parseOutputOptions(const std::string& dbname, const BSONObj& cmdOb
(outputOptions.outType == OutputType::Reduce ||
outputOptions.outType == OutputType::Merge));
} else if (nonAtomicDeprecationSampler.tick()) {
- warning() << "Setting out.nonAtomic to false in MapReduce is deprecated.";
+ LOGV2_WARNING(23796, "Setting out.nonAtomic to false in MapReduce is deprecated.");
}
}
} else {
diff --git a/src/mongo/db/commands/run_aggregate.cpp b/src/mongo/db/commands/run_aggregate.cpp
index 8a306ca2014..a61456df9c8 100644
--- a/src/mongo/db/commands/run_aggregate.cpp
+++ b/src/mongo/db/commands/run_aggregate.cpp
@@ -71,6 +71,7 @@
#include "mongo/db/storage/storage_options.h"
#include "mongo/db/views/view.h"
#include "mongo/db/views/view_catalog.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/log.h"
#include "mongo/util/scopeguard.h"
#include "mongo/util/string_map.h"
@@ -201,9 +202,13 @@ bool handleCursorCommand(OperationContext* opCtx,
// We should always have a valid status member object at this point.
auto status = WorkingSetCommon::getMemberObjectStatus(nextDoc);
invariant(!status.isOK());
- warning() << "Aggregate command executor error: " << PlanExecutor::statestr(state)
- << ", status: " << status
- << ", stats: " << redact(Explain::getWinningPlanStats(exec));
+ LOGV2_WARNING(23799,
+ "Aggregate command executor error: {PlanExecutor_statestr_state}, "
+ "status: {status}, stats: {Explain_getWinningPlanStats_exec}",
+ "PlanExecutor_statestr_state"_attr = PlanExecutor::statestr(state),
+ "status"_attr = status,
+ "Explain_getWinningPlanStats_exec"_attr =
+ redact(Explain::getWinningPlanStats(exec)));
uassertStatusOK(status.withContext("PlanExecutor error during aggregation"));
}
diff --git a/src/mongo/db/exec/geo_near.cpp b/src/mongo/db/exec/geo_near.cpp
index 204b840f24e..0ec4d890707 100644
--- a/src/mongo/db/exec/geo_near.cpp
+++ b/src/mongo/db/exec/geo_near.cpp
@@ -31,6 +31,7 @@
#include "mongo/db/exec/geo_near.h"
+#include "mongo/logv2/log.h"
#include <memory>
#include <vector>
@@ -125,12 +126,15 @@ static void extractGeometries(const BSONObj& doc,
// Valid geometry element
geometries->push_back(std::move(stored));
} else {
- warning() << "geoNear stage read non-geometry element " << redact(nextEl)
- << " in array " << redact(el);
+ LOGV2_WARNING(23760,
+ "geoNear stage read non-geometry element {nextEl} in array {el}",
+ "nextEl"_attr = redact(nextEl),
+ "el"_attr = redact(el));
}
}
} else {
- warning() << "geoNear stage read non-geometry element " << redact(el);
+ LOGV2_WARNING(
+ 23761, "geoNear stage read non-geometry element {el}", "el"_attr = redact(el));
}
}
}
diff --git a/src/mongo/db/exec/projection.cpp b/src/mongo/db/exec/projection.cpp
index 4fc1ad9d9f1..ecf20579300 100644
--- a/src/mongo/db/exec/projection.cpp
+++ b/src/mongo/db/exec/projection.cpp
@@ -42,6 +42,7 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/matcher/expression.h"
#include "mongo/db/record_id.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/log.h"
#include "mongo/util/str.h"
@@ -141,7 +142,9 @@ PlanStage::StageState ProjectionStage::doWork(WorkingSetID* out) {
// Punt to our specific projection impl.
Status projStatus = transform(member);
if (!projStatus.isOK()) {
- warning() << "Couldn't execute projection, status = " << redact(projStatus);
+ LOGV2_WARNING(23827,
+ "Couldn't execute projection, status = {projStatus}",
+ "projStatus"_attr = redact(projStatus));
*out = WorkingSetCommon::allocateStatusMember(&_ws, projStatus);
return PlanStage::FAILURE;
}
diff --git a/src/mongo/db/exec/shard_filter.cpp b/src/mongo/db/exec/shard_filter.cpp
index fe2435a54fb..ed28c9f890c 100644
--- a/src/mongo/db/exec/shard_filter.cpp
+++ b/src/mongo/db/exec/shard_filter.cpp
@@ -38,6 +38,7 @@
#include "mongo/db/exec/filter.h"
#include "mongo/db/exec/scoped_timer.h"
#include "mongo/db/exec/working_set_common.h"
+#include "mongo/logv2/log.h"
#include "mongo/s/shard_key_pattern.h"
#include "mongo/util/log.h"
@@ -87,10 +88,13 @@ PlanStage::StageState ShardFilterStage::doWork(WorkingSetID* out) {
// Skip this working set member with a warning - no shard key should not be
// possible unless manually inserting data into a shard
- warning() << "no shard key found in document "
- << redact(member->doc.value().toBson()) << " for shard key pattern "
- << _shardFilterer.getKeyPattern() << ", "
- << "document may have been inserted manually into shard";
+ LOGV2_WARNING(
+ 23787,
+ "no shard key found in document {member_doc_value_toBson} for shard key "
+ "pattern {shardFilterer_getKeyPattern}, document may have been inserted "
+ "manually into shard",
+ "member_doc_value_toBson"_attr = redact(member->doc.value().toBson()),
+ "shardFilterer_getKeyPattern"_attr = _shardFilterer.getKeyPattern());
} else {
invariant(res == ShardFilterer::DocumentBelongsResult::kDoesNotBelong);
}
diff --git a/src/mongo/db/exec/stagedebug_cmd.cpp b/src/mongo/db/exec/stagedebug_cmd.cpp
index dfa5052b1d3..5867bbb0b96 100644
--- a/src/mongo/db/exec/stagedebug_cmd.cpp
+++ b/src/mongo/db/exec/stagedebug_cmd.cpp
@@ -63,6 +63,7 @@
#include "mongo/db/matcher/extensions_callback_real.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/query/plan_executor.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/log.h"
namespace mongo {
@@ -195,8 +196,11 @@ public:
resultBuilder.done();
if (PlanExecutor::FAILURE == state) {
- error() << "Plan executor error during StageDebug command: FAILURE, stats: "
- << redact(Explain::getWinningPlanStats(exec.get()));
+ LOGV2_ERROR(23795,
+ "Plan executor error during StageDebug command: FAILURE, stats: "
+ "{Explain_getWinningPlanStats_exec_get}",
+ "Explain_getWinningPlanStats_exec_get"_attr =
+ redact(Explain::getWinningPlanStats(exec.get())));
uassertStatusOK(WorkingSetCommon::getMemberObjectStatus(obj).withContext(
"Executor error during StageDebug command"));
diff --git a/src/mongo/db/ftdc/ftdc_system_stats_windows.cpp b/src/mongo/db/ftdc/ftdc_system_stats_windows.cpp
index 92045f16e4d..cf2ac6a4e61 100644
--- a/src/mongo/db/ftdc/ftdc_system_stats_windows.cpp
+++ b/src/mongo/db/ftdc/ftdc_system_stats_windows.cpp
@@ -42,6 +42,7 @@
#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/db/ftdc/collector.h"
#include "mongo/db/ftdc/controller.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/log.h"
#include "mongo/util/perfctr_collect.h"
@@ -144,8 +145,9 @@ void installSystemMetricsCollector(FTDCController* controller) {
auto swCollector = createCollector();
if (!swCollector.getStatus().isOK()) {
- warning() << "Failed to initialize Performance Counters for FTDC: "
- << swCollector.getStatus();
+ LOGV2_WARNING(23718,
+ "Failed to initialize Performance Counters for FTDC: {swCollector_getStatus}",
+ "swCollector_getStatus"_attr = swCollector.getStatus());
return;
}
diff --git a/src/mongo/db/index/expression_keys_private.cpp b/src/mongo/db/index/expression_keys_private.cpp
index cf2f81a7020..a9ef9f1ba30 100644
--- a/src/mongo/db/index/expression_keys_private.cpp
+++ b/src/mongo/db/index/expression_keys_private.cpp
@@ -46,6 +46,7 @@
#include "mongo/db/index/s2_common.h"
#include "mongo/db/index_names.h"
#include "mongo/db/query/collation/collation_index_key.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/log.h"
#include "mongo/util/str.h"
@@ -635,8 +636,11 @@ void ExpressionKeysPrivate::getS2Keys(const BSONObj& obj,
}
if (keysToAdd.size() > params.maxKeysPerInsert) {
- warning() << "Insert of geo object generated a high number of keys."
- << " num keys: " << keysToAdd.size() << " obj inserted: " << redact(obj);
+ LOGV2_WARNING(23755,
+ "Insert of geo object generated a high number of keys. num keys: "
+ "{keysToAdd_size} obj inserted: {obj}",
+ "keysToAdd_size"_attr = keysToAdd.size(),
+ "obj"_attr = redact(obj));
}
invariant(keys->empty());
diff --git a/src/mongo/db/index/s2_access_method.cpp b/src/mongo/db/index/s2_access_method.cpp
index da5bc634ee9..8db6683b567 100644
--- a/src/mongo/db/index/s2_access_method.cpp
+++ b/src/mongo/db/index/s2_access_method.cpp
@@ -41,6 +41,7 @@
#include "mongo/db/index/expression_params.h"
#include "mongo/db/index_names.h"
#include "mongo/db/jsobj.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/log.h"
namespace mongo {
@@ -77,7 +78,9 @@ S2AccessMethod::S2AccessMethod(IndexCatalogEntry* btreeState,
geoFields >= 1);
if (descriptor->isSparse()) {
- warning() << "Sparse option ignored for index spec " << descriptor->keyPattern().toString();
+ LOGV2_WARNING(23742,
+ "Sparse option ignored for index spec {descriptor_keyPattern}",
+ "descriptor_keyPattern"_attr = descriptor->keyPattern().toString());
}
}
diff --git a/src/mongo/db/pipeline/document_source_geo_near.cpp b/src/mongo/db/pipeline/document_source_geo_near.cpp
index 5081865eeba..9726ad37caf 100644
--- a/src/mongo/db/pipeline/document_source_geo_near.cpp
+++ b/src/mongo/db/pipeline/document_source_geo_near.cpp
@@ -36,6 +36,7 @@
#include "mongo/db/exec/document_value/document.h"
#include "mongo/db/pipeline/document_source_sort.h"
#include "mongo/db/pipeline/lite_parsed_document_source.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/log.h"
namespace mongo {
@@ -171,7 +172,7 @@ void DocumentSourceGeoNear::parseOptions(BSONObj options) {
}
if (options.hasField("uniqueDocs"))
- warning() << "ignoring deprecated uniqueDocs option in $geoNear aggregation stage";
+ LOGV2_WARNING(23758, "ignoring deprecated uniqueDocs option in $geoNear aggregation stage");
if (auto keyElt = options[kKeyFieldName]) {
uassert(ErrorCodes::TypeMismatch,
diff --git a/src/mongo/db/repl/check_quorum_for_config_change.cpp b/src/mongo/db/repl/check_quorum_for_config_change.cpp
index 3faf2b20fa5..01f4e6c5e55 100644
--- a/src/mongo/db/repl/check_quorum_for_config_change.cpp
+++ b/src/mongo/db/repl/check_quorum_for_config_change.cpp
@@ -40,6 +40,7 @@
#include "mongo/db/repl/scatter_gather_algorithm.h"
#include "mongo/db/repl/scatter_gather_runner.h"
#include "mongo/db/server_options.h"
+#include "mongo/logv2/log.h"
#include "mongo/rpc/metadata/repl_set_metadata.h"
#include "mongo/util/log.h"
#include "mongo/util/str.h"
@@ -187,8 +188,10 @@ void QuorumChecker::_tabulateHeartbeatResponse(const RemoteCommandRequest& reque
const executor::RemoteCommandResponse& response) {
++_numResponses;
if (!response.isOK()) {
- warning() << "Failed to complete heartbeat request to " << request.target << "; "
- << response.status;
+ LOGV2_WARNING(23722,
+ "Failed to complete heartbeat request to {request_target}; {response_status}",
+ "request_target"_attr = request.target,
+ "response_status"_attr = response.status);
_badResponses.push_back(std::make_pair(request.target, response.status));
return;
}
@@ -201,13 +204,17 @@ void QuorumChecker::_tabulateHeartbeatResponse(const RemoteCommandRequest& reque
std::string message = str::stream()
<< "Our set name did not match that of " << request.target.toString();
_vetoStatus = Status(ErrorCodes::NewReplicaSetConfigurationIncompatible, message);
- warning() << message;
+ LOGV2_WARNING(23723, "{message}", "message"_attr = message);
return;
}
if (!hbStatus.isOK() && hbStatus != ErrorCodes::InvalidReplicaSetConfig) {
- warning() << "Got error (" << hbStatus << ") response on heartbeat request to "
- << request.target << "; " << hbResp;
+ LOGV2_WARNING(
+ 23724,
+ "Got error ({hbStatus}) response on heartbeat request to {request_target}; {hbResp}",
+ "hbStatus"_attr = hbStatus,
+ "request_target"_attr = request.target,
+ "hbResp"_attr = hbResp);
_badResponses.push_back(std::make_pair(request.target, hbStatus));
return;
}
@@ -219,7 +226,7 @@ void QuorumChecker::_tabulateHeartbeatResponse(const RemoteCommandRequest& reque
<< " is no larger than the version on " << request.target.toString()
<< ", which is " << hbResp.getConfigVersion();
_vetoStatus = Status(ErrorCodes::NewReplicaSetConfigurationIncompatible, message);
- warning() << message;
+ LOGV2_WARNING(23725, "{message}", "message"_attr = message);
return;
}
}
@@ -234,7 +241,7 @@ void QuorumChecker::_tabulateHeartbeatResponse(const RemoteCommandRequest& reque
<< " did not match that of " << request.target.toString() << ", which is "
<< replMetadata.getValue().getReplicaSetId();
_vetoStatus = Status(ErrorCodes::NewReplicaSetConfigurationIncompatible, message);
- warning() << message;
+ LOGV2_WARNING(23726, "{message}", "message"_attr = message);
}
}
diff --git a/src/mongo/db/repl/session_update_tracker.cpp b/src/mongo/db/repl/session_update_tracker.cpp
index c539e608670..f3e381995e2 100644
--- a/src/mongo/db/repl/session_update_tracker.cpp
+++ b/src/mongo/db/repl/session_update_tracker.cpp
@@ -39,6 +39,7 @@
#include "mongo/db/session.h"
#include "mongo/db/session_txn_record_gen.h"
#include "mongo/db/transaction_participant_gen.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/log.h"
@@ -191,10 +192,16 @@ void SessionUpdateTracker::_updateSessionInfo(const OplogEntry& entry) {
return;
}
- severe() << "Entry for session " << lsid->toBSON() << " has txnNumber "
- << *sessionInfo.getTxnNumber() << " < " << *existingSessionInfo.getTxnNumber();
- severe() << "New oplog entry: " << redact(entry.toString());
- severe() << "Existing oplog entry: " << redact(iter->second.toString());
+ LOGV2_FATAL(23792,
+ "Entry for session {lsid} has txnNumber {sessionInfo_getTxnNumber} < "
+ "{existingSessionInfo_getTxnNumber}",
+ "lsid"_attr = lsid->toBSON(),
+ "sessionInfo_getTxnNumber"_attr = *sessionInfo.getTxnNumber(),
+ "existingSessionInfo_getTxnNumber"_attr = *existingSessionInfo.getTxnNumber());
+ LOGV2_FATAL(23793, "New oplog entry: {entry}", "entry"_attr = redact(entry.toString()));
+ LOGV2_FATAL(23794,
+ "Existing oplog entry: {iter_second}",
+ "iter_second"_attr = redact(iter->second.toString()));
fassertFailedNoTrace(50843);
}
diff --git a/src/mongo/db/s/move_chunk_command.cpp b/src/mongo/db/s/move_chunk_command.cpp
index 645930188ac..23713ad2c55 100644
--- a/src/mongo/db/s/move_chunk_command.cpp
+++ b/src/mongo/db/s/move_chunk_command.cpp
@@ -43,6 +43,7 @@
#include "mongo/db/s/move_timing_helper.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/db/s/sharding_statistics.h"
+#include "mongo/logv2/log.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/grid.h"
#include "mongo/s/request_types/migration_secondary_throttle_options.h"
@@ -61,7 +62,9 @@ namespace {
*/
void uassertStatusOKWithWarning(const Status& status) {
if (!status.isOK()) {
- warning() << "Chunk move failed" << causedBy(redact(status));
+ LOGV2_WARNING(23777,
+ "Chunk move failed{causedBy_status}",
+ "causedBy_status"_attr = causedBy(redact(status)));
uassertStatusOK(status);
}
}
diff --git a/src/mongo/db/s/move_primary_command.cpp b/src/mongo/db/s/move_primary_command.cpp
index 58b9deec87d..0d29b270f28 100644
--- a/src/mongo/db/s/move_primary_command.cpp
+++ b/src/mongo/db/s/move_primary_command.cpp
@@ -36,6 +36,7 @@
#include "mongo/db/s/active_move_primaries_registry.h"
#include "mongo/db/s/move_primary_source_manager.h"
#include "mongo/db/s/sharding_state.h"
+#include "mongo/logv2/log.h"
#include "mongo/s/grid.h"
#include "mongo/s/request_types/move_primary_gen.h"
#include "mongo/util/log.h"
@@ -49,7 +50,9 @@ namespace {
*/
void uassertStatusOKWithWarning(const Status& status) {
if (!status.isOK()) {
- warning() << "movePrimary failed" << causedBy(redact(status));
+ LOGV2_WARNING(23762,
+ "movePrimary failed{causedBy_status}",
+ "causedBy_status"_attr = causedBy(redact(status)));
uassertStatusOK(status);
}
}
diff --git a/src/mongo/db/s/move_timing_helper.cpp b/src/mongo/db/s/move_timing_helper.cpp
index e0bdce57620..7090cf6e009 100644
--- a/src/mongo/db/s/move_timing_helper.cpp
+++ b/src/mongo/db/s/move_timing_helper.cpp
@@ -36,6 +36,7 @@
#include "mongo/db/client.h"
#include "mongo/db/curop.h"
#include "mongo/db/s/sharding_logging.h"
+#include "mongo/logv2/log.h"
#include "mongo/s/grid.h"
#include "mongo/util/log.h"
@@ -90,8 +91,10 @@ MoveTimingHelper::~MoveTimingHelper() {
_b.obj(),
ShardingCatalogClient::kMajorityWriteConcern);
} catch (const std::exception& e) {
- warning() << "couldn't record timing for moveChunk '" << _where
- << "': " << redact(e.what());
+ LOGV2_WARNING(23759,
+ "couldn't record timing for moveChunk '{where}': {e_what}",
+ "where"_attr = _where,
+ "e_what"_attr = redact(e.what()));
}
}
diff --git a/src/mongo/db/s/range_deletion_util.cpp b/src/mongo/db/s/range_deletion_util.cpp
index c5a65400279..b9a979279de 100644
--- a/src/mongo/db/s/range_deletion_util.cpp
+++ b/src/mongo/db/s/range_deletion_util.cpp
@@ -60,6 +60,7 @@
#include "mongo/db/storage/remove_saver.h"
#include "mongo/db/write_concern.h"
#include "mongo/executor/task_executor.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/future_util.h"
#include "mongo/util/log.h"
@@ -85,15 +86,25 @@ bool collectionUuidHasChanged(const NamespaceString& nss,
UUID expectedCollectionUuid) {
if (!currentCollection) {
- LOG(1) << "Abandoning range deletion task for " << nss.ns() << " with UUID "
- << expectedCollectionUuid << " because the collection has been dropped";
+ LOGV2_DEBUG(23763,
+ 1,
+ "Abandoning range deletion task for {nss_ns} with UUID "
+ "{expectedCollectionUuid} because the collection has been dropped",
+ "nss_ns"_attr = nss.ns(),
+ "expectedCollectionUuid"_attr = expectedCollectionUuid);
return true;
}
if (currentCollection->uuid() != expectedCollectionUuid) {
- LOG(1) << "Abandoning range deletion task for " << nss.ns() << " with UUID "
- << expectedCollectionUuid << " because UUID of " << nss.ns()
- << "has changed (current is " << currentCollection->uuid() << ")";
+ LOGV2_DEBUG(
+ 23764,
+ 1,
+ "Abandoning range deletion task for {nss_ns} with UUID {expectedCollectionUuid} "
+ "because UUID of {nss_ns2}has changed (current is {currentCollection_uuid})",
+ "nss_ns"_attr = nss.ns(),
+ "expectedCollectionUuid"_attr = expectedCollectionUuid,
+ "nss_ns2"_attr = nss.ns(),
+ "currentCollection_uuid"_attr = currentCollection->uuid());
return true;
}
@@ -123,7 +134,7 @@ StatusWith<int> deleteNextBatch(OperationContext* opCtx,
if (!idx) {
std::string msg = str::stream()
<< "Unable to find shard key index for " << keyPattern.toString() << " in " << nss.ns();
- LOG(0) << msg;
+ LOGV2(23765, "{msg}", "msg"_attr = msg);
return {ErrorCodes::InternalError, msg};
}
@@ -136,7 +147,12 @@ StatusWith<int> deleteNextBatch(OperationContext* opCtx,
const auto min = extend(range.getMin());
const auto max = extend(range.getMax());
- LOG(1) << "begin removal of " << min << " to " << max << " in " << nss.ns();
+ LOGV2_DEBUG(23766,
+ 1,
+ "begin removal of {min} to {max} in {nss_ns}",
+ "min"_attr = min,
+ "max"_attr = max,
+ "nss_ns"_attr = nss.ns());
const auto indexName = idx->indexName();
const IndexDescriptor* descriptor =
@@ -144,7 +160,7 @@ StatusWith<int> deleteNextBatch(OperationContext* opCtx,
if (!descriptor) {
std::string msg = str::stream()
<< "shard key index with name " << indexName << " on '" << nss.ns() << "' was dropped";
- LOG(0) << msg;
+ LOGV2(23767, "{msg}", "msg"_attr = msg);
return {ErrorCodes::InternalError, msg};
}
@@ -169,7 +185,7 @@ StatusWith<int> deleteNextBatch(OperationContext* opCtx,
InternalPlanner::FORWARD);
if (MONGO_unlikely(hangBeforeDoingDeletion.shouldFail())) {
- LOG(0) << "Hit hangBeforeDoingDeletion failpoint";
+ LOGV2(23768, "Hit hangBeforeDoingDeletion failpoint");
hangBeforeDoingDeletion.pauseWhileSet(opCtx);
}
@@ -194,9 +210,16 @@ StatusWith<int> deleteNextBatch(OperationContext* opCtx,
}
if (state == PlanExecutor::FAILURE) {
- warning() << PlanExecutor::statestr(state) << " - cursor error while trying to delete "
- << redact(min) << " to " << redact(max) << " in " << nss
- << ": FAILURE, stats: " << Explain::getWinningPlanStats(exec.get());
+ LOGV2_WARNING(
+ 23776,
+ "{PlanExecutor_statestr_state} - cursor error while trying to delete {min} to "
+ "{max} in {nss}: FAILURE, stats: {Explain_getWinningPlanStats_exec_get}",
+ "PlanExecutor_statestr_state"_attr = PlanExecutor::statestr(state),
+ "min"_attr = redact(min),
+ "max"_attr = redact(max),
+ "nss"_attr = nss,
+ "Explain_getWinningPlanStats_exec_get"_attr =
+ Explain::getWinningPlanStats(exec.get()));
break;
}
@@ -247,9 +270,13 @@ ExecutorFuture<void> deleteRangeInBatches(const std::shared_ptr<executor::TaskEx
auto numDeleted = uassertStatusOK(deleteNextBatch(
opCtx, collection, keyPattern, range, numDocsToRemovePerBatch));
- LOG(0) << "Deleted " << numDeleted << " documents in pass in namespace "
- << nss.ns() << " with UUID " << collectionUuid << " for range "
- << range.toString();
+ LOGV2(23769,
+ "Deleted {numDeleted} documents in pass in namespace {nss_ns} with UUID "
+ "{collectionUuid} for range {range}",
+ "numDeleted"_attr = numDeleted,
+ "nss_ns"_attr = nss.ns(),
+ "collectionUuid"_attr = collectionUuid,
+ "range"_attr = range.toString());
return numDeleted;
});
@@ -299,8 +326,12 @@ void removePersistentRangeDeletionTask(const NamespaceString& nss,
<< collectionUuid << RangeDeletionTask::kRangeFieldName
<< range.toBSON()));
} catch (const DBException& e) {
- LOG(0) << "Failed to delete range deletion task for range " << range
- << " in collection " << nss << causedBy(e.what());
+ LOGV2(23770,
+ "Failed to delete range deletion task for range {range} in collection "
+ "{nss}{causedBy_e_what}",
+ "range"_attr = range,
+ "nss"_attr = nss,
+ "causedBy_e_what"_attr = causedBy(e.what()));
}
});
}
@@ -314,8 +345,12 @@ ExecutorFuture<void> waitForDeletionsToMajorityReplicate(
repl::ReplClientInfo::forClient(opCtx->getClient()).setLastOpToSystemLastOpTime(opCtx);
auto clientOpTime = repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp();
- LOG(0) << "Waiting for majority replication of local deletions in namespace " << nss.ns()
- << " with UUID " << collectionUuid << " for range " << redact(range.toString());
+ LOGV2(23771,
+ "Waiting for majority replication of local deletions in namespace {nss_ns} with UUID "
+ "{collectionUuid} for range {range}",
+ "nss_ns"_attr = nss.ns(),
+ "collectionUuid"_attr = collectionUuid,
+ "range"_attr = redact(range.toString()));
// Asynchronously wait for majority write concern.
return WaitForMajorityService::get(opCtx->getServiceContext())
@@ -350,9 +385,12 @@ SharedSemiFuture<void> removeDocumentsInRange(
executor->now() + delayForActiveQueriesOnSecondariesToComplete);
})
.then([=]() mutable {
- LOG(0) << "Beginning deletion of any documents in " << nss.ns() << " range "
- << redact(range.toString()) << " with numDocsToRemovePerBatch "
- << numDocsToRemovePerBatch;
+ LOGV2(23772,
+ "Beginning deletion of any documents in {nss_ns} range {range} with "
+ "numDocsToRemovePerBatch {numDocsToRemovePerBatch}",
+ "nss_ns"_attr = nss.ns(),
+ "range"_attr = redact(range.toString()),
+ "numDocsToRemovePerBatch"_attr = numDocsToRemovePerBatch);
notifySecondariesThatDeletionIsOccurring(nss, collectionUuid, range);
@@ -372,11 +410,16 @@ SharedSemiFuture<void> removeDocumentsInRange(
})
.onCompletion([=](Status s) {
if (s.isOK()) {
- LOG(0) << "Completed deletion of documents in " << nss.ns() << " range "
- << redact(range.toString());
+ LOGV2(23773,
+ "Completed deletion of documents in {nss_ns} range {range}",
+ "nss_ns"_attr = nss.ns(),
+ "range"_attr = redact(range.toString()));
} else {
- LOG(0) << "Failed to delete of documents in " << nss.ns() << " range "
- << redact(range.toString()) << causedBy(redact(s));
+ LOGV2(23774,
+ "Failed to delete of documents in {nss_ns} range {range}{causedBy_s}",
+ "nss_ns"_attr = nss.ns(),
+ "range"_attr = redact(range.toString()),
+ "causedBy_s"_attr = causedBy(redact(s)));
}
if (s.isOK() ||
@@ -384,8 +427,12 @@ SharedSemiFuture<void> removeDocumentsInRange(
ErrorCodes::RangeDeletionAbandonedBecauseCollectionWithUUIDDoesNotExist) {
removePersistentRangeDeletionTask(nss, collectionUuid, range);
- LOG(1) << "Completed removal of persistent range deletion task for " << nss.ns()
- << " range " << redact(range.toString());
+ LOGV2_DEBUG(23775,
+ 1,
+ "Completed removal of persistent range deletion task for {nss_ns} "
+ "range {range}",
+ "nss_ns"_attr = nss.ns(),
+ "range"_attr = redact(range.toString()));
}
// Propagate any errors to callers waiting on the result.
diff --git a/src/mongo/db/s/shard_server_op_observer.cpp b/src/mongo/db/s/shard_server_op_observer.cpp
index 3ad62a62953..caf4b68b561 100644
--- a/src/mongo/db/s/shard_server_op_observer.cpp
+++ b/src/mongo/db/s/shard_server_op_observer.cpp
@@ -48,6 +48,7 @@
#include "mongo/db/s/sharding_initialization_mongod.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/db/s/type_shard_identity.h"
+#include "mongo/logv2/log.h"
#include "mongo/s/balancer_configuration.h"
#include "mongo/s/catalog/type_shard_collection.h"
#include "mongo/s/catalog/type_shard_database.h"
@@ -434,8 +435,9 @@ void ShardServerOpObserver::onDelete(OperationContext* opCtx,
uasserted(40070,
"cannot delete shardIdentity document while in --shardsvr mode");
} else {
- warning() << "Shard identity document rolled back. Will shut down after "
- "finishing rollback.";
+ LOGV2_WARNING(23779,
+ "Shard identity document rolled back. Will shut down after "
+ "finishing rollback.");
ShardIdentityRollbackNotifier::get(opCtx)->recordThatRollbackHappened();
}
}
@@ -455,8 +457,9 @@ repl::OpTime ShardServerOpObserver::onDropCollection(OperationContext* opCtx,
// Can't confirm whether there was a ShardIdentity document or not yet, so assume there was
// one and shut down the process to clear the in-memory sharding state
- warning() << "admin.system.version collection rolled back. Will shut down after finishing "
- "rollback";
+ LOGV2_WARNING(23780,
+ "admin.system.version collection rolled back. Will shut down after finishing "
+ "rollback");
ShardIdentityRollbackNotifier::get(opCtx)->recordThatRollbackHappened();
}
diff --git a/src/mongo/db/s/split_chunk.cpp b/src/mongo/db/s/split_chunk.cpp
index 5ecdbd5f04d..ee29084c850 100644
--- a/src/mongo/db/s/split_chunk.cpp
+++ b/src/mongo/db/s/split_chunk.cpp
@@ -47,6 +47,7 @@
#include "mongo/db/s/collection_sharding_state.h"
#include "mongo/db/s/shard_filtering_metadata_refresh.h"
#include "mongo/db/s/sharding_state.h"
+#include "mongo/logv2/log.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/grid.h"
@@ -213,8 +214,10 @@ StatusWith<boost::optional<ChunkRange>> splitChunk(OperationContext* opCtx,
Collection* const collection = autoColl.getCollection();
if (!collection) {
- warning() << "will not perform top-chunk checking since " << nss.toString()
- << " does not exist after splitting";
+ LOGV2_WARNING(
+ 23778,
+ "will not perform top-chunk checking since {nss} does not exist after splitting",
+ "nss"_attr = nss.toString());
return boost::optional<ChunkRange>(boost::none);
}
diff --git a/src/mongo/db/service_context.cpp b/src/mongo/db/service_context.cpp
index 760efa6c4e8..4f83efcff15 100644
--- a/src/mongo/db/service_context.cpp
+++ b/src/mongo/db/service_context.cpp
@@ -44,6 +44,7 @@
#include "mongo/db/op_observer.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/storage/recovery_unit_noop.h"
+#include "mongo/logv2/log.h"
#include "mongo/transport/service_entry_point.h"
#include "mongo/transport/session.h"
#include "mongo/transport/transport_layer.h"
@@ -110,8 +111,11 @@ ServiceContext::ServiceContext()
ServiceContext::~ServiceContext() {
stdx::lock_guard<Latch> lk(_mutex);
for (const auto& client : _clients) {
- severe() << "Client " << client->desc() << " still exists while destroying ServiceContext@"
- << reinterpret_cast<uint64_t>(this);
+ LOGV2_FATAL(23828,
+ "Client {client_desc} still exists while destroying "
+ "ServiceContext@{reinterpret_cast_uint64_t_this}",
+ "client_desc"_attr = client->desc(),
+ "reinterpret_cast_uint64_t_this"_attr = reinterpret_cast<uint64_t>(this));
}
invariant(_clients.empty());
}
diff --git a/src/mongo/db/snapshot_window_util.cpp b/src/mongo/db/snapshot_window_util.cpp
index eaf8f9f45cc..fb9e7f4f9b2 100644
--- a/src/mongo/db/snapshot_window_util.cpp
+++ b/src/mongo/db/snapshot_window_util.cpp
@@ -38,6 +38,7 @@
#include "mongo/db/service_context.h"
#include "mongo/db/snapshot_window_options.h"
#include "mongo/db/storage/storage_engine.h"
+#include "mongo/logv2/log.h"
#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/with_lock.h"
#include "mongo/util/fail_point.h"
@@ -111,28 +112,34 @@ void increaseTargetSnapshotWindowSize(OperationContext* opCtx) {
StorageEngine* engine = opCtx->getServiceContext()->getStorageEngine();
if (engine && engine->isCacheUnderPressure(opCtx)) {
invariant(!engine->isEphemeral() || getTestCommandsEnabled());
- warning() << "Attempted to increase the time window of available snapshots for "
- "point-in-time operations (readConcern level 'snapshot' or transactions), but "
- "the storage engine cache pressure, per the cachePressureThreshold setting of "
- "'"
- << snapshotWindowParams.cachePressureThreshold.load()
- << "', is too high to allow it to increase. If this happens frequently, consider "
- "either increasing the cache pressure threshold or increasing the memory "
- "available to the storage engine cache, in order to improve the success rate "
- "or speed of point-in-time requests.";
+ LOGV2_WARNING(
+ 23788,
+ "Attempted to increase the time window of available snapshots for "
+ "point-in-time operations (readConcern level 'snapshot' or transactions), but "
+ "the storage engine cache pressure, per the cachePressureThreshold setting of "
+ "'{snapshotWindowParams_cachePressureThreshold_load}', is too high to allow it to "
+ "increase. If this happens frequently, consider "
+ "either increasing the cache pressure threshold or increasing the memory "
+ "available to the storage engine cache, in order to improve the success rate "
+ "or speed of point-in-time requests.",
+ "snapshotWindowParams_cachePressureThreshold_load"_attr =
+ snapshotWindowParams.cachePressureThreshold.load());
_decreaseTargetSnapshotWindowSize(lock, opCtx);
return;
}
if (snapshotWindowParams.targetSnapshotHistoryWindowInSeconds.load() ==
snapshotWindowParams.maxTargetSnapshotHistoryWindowInSeconds.load()) {
- warning() << "Attempted to increase the time window of available snapshots for "
- "point-in-time operations (readConcern level 'snapshot' or transactions), but "
- "maxTargetSnapshotHistoryWindowInSeconds has already been reached. If this "
- "happens frequently, consider increasing the "
- "maxTargetSnapshotHistoryWindowInSeconds setting value, which is currently "
- "set to '"
- << snapshotWindowParams.maxTargetSnapshotHistoryWindowInSeconds.load() << "'.";
+ LOGV2_WARNING(
+ 23789,
+ "Attempted to increase the time window of available snapshots for "
+ "point-in-time operations (readConcern level 'snapshot' or transactions), but "
+ "maxTargetSnapshotHistoryWindowInSeconds has already been reached. If this "
+ "happens frequently, consider increasing the "
+ "maxTargetSnapshotHistoryWindowInSeconds setting value, which is currently "
+ "set to '{snapshotWindowParams_maxTargetSnapshotHistoryWindowInSeconds_load}'.",
+ "snapshotWindowParams_maxTargetSnapshotHistoryWindowInSeconds_load"_attr =
+ snapshotWindowParams.maxTargetSnapshotHistoryWindowInSeconds.load());
return;
}
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp
index 69e553375e0..6760f71a89c 100644
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp
@@ -38,6 +38,7 @@
#include "mongo/db/operation_context.h"
#include "mongo/db/storage/oplog_hack.h"
#include "mongo/db/storage/recovery_unit.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/log.h"
#include "mongo/util/str.h"
#include "mongo/util/unowned_ptr.h"
@@ -305,8 +306,10 @@ const EphemeralForTestRecordStore::EphemeralForTestRecord* EphemeralForTestRecor
WithLock, const RecordId& loc) const {
Records::const_iterator it = _data->records.find(loc);
if (it == _data->records.end()) {
- error() << "EphemeralForTestRecordStore::recordFor cannot find record for " << ns() << ":"
- << loc;
+ LOGV2_ERROR(23720,
+ "EphemeralForTestRecordStore::recordFor cannot find record for {ns}:{loc}",
+ "ns"_attr = ns(),
+ "loc"_attr = loc);
}
invariant(it != _data->records.end());
return &it->second;
@@ -316,8 +319,10 @@ EphemeralForTestRecordStore::EphemeralForTestRecord* EphemeralForTestRecordStore
WithLock, const RecordId& loc) {
Records::iterator it = _data->records.find(loc);
if (it == _data->records.end()) {
- error() << "EphemeralForTestRecordStore::recordFor cannot find record for " << ns() << ":"
- << loc;
+ LOGV2_ERROR(23721,
+ "EphemeralForTestRecordStore::recordFor cannot find record for {ns}:{loc}",
+ "ns"_attr = ns(),
+ "loc"_attr = loc);
}
invariant(it != _data->records.end());
return &it->second;
diff --git a/src/mongo/db/storage/remove_saver.cpp b/src/mongo/db/storage/remove_saver.cpp
index 7012f671c37..65d30c49518 100644
--- a/src/mongo/db/storage/remove_saver.cpp
+++ b/src/mongo/db/storage/remove_saver.cpp
@@ -39,6 +39,7 @@
#include "mongo/db/service_context.h"
#include "mongo/db/storage/encryption_hooks.h"
#include "mongo/db/storage/storage_options.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/errno_util.h"
#include "mongo/util/log.h"
@@ -84,30 +85,39 @@ RemoveSaver::~RemoveSaver() {
size_t resultLen;
Status status = _protector->finalize(protectedBuffer.get(), protectedSizeMax, &resultLen);
if (!status.isOK()) {
- severe() << "Unable to finalize DataProtector while closing RemoveSaver: "
- << redact(status);
+ LOGV2_FATAL(23736,
+ "Unable to finalize DataProtector while closing RemoveSaver: {status}",
+ "status"_attr = redact(status));
fassertFailed(34350);
}
_out->write(reinterpret_cast<const char*>(protectedBuffer.get()), resultLen);
if (_out->fail()) {
- severe() << "Couldn't write finalized DataProtector data to: " << _file.string()
- << " for remove saving: " << redact(errnoWithDescription());
+ LOGV2_FATAL(23737,
+ "Couldn't write finalized DataProtector data to: {file_string} for remove "
+ "saving: {errnoWithDescription}",
+ "file_string"_attr = _file.string(),
+ "errnoWithDescription"_attr = redact(errnoWithDescription()));
fassertFailed(34351);
}
protectedBuffer.reset(new uint8_t[protectedSizeMax]);
status = _protector->finalizeTag(protectedBuffer.get(), protectedSizeMax, &resultLen);
if (!status.isOK()) {
- severe() << "Unable to get finalizeTag from DataProtector while closing RemoveSaver: "
- << redact(status);
+ LOGV2_FATAL(
+ 23738,
+ "Unable to get finalizeTag from DataProtector while closing RemoveSaver: {status}",
+ "status"_attr = redact(status));
fassertFailed(34352);
}
if (resultLen != _protector->getNumberOfBytesReservedForTag()) {
- severe() << "Attempted to write tag of size " << resultLen
- << " when DataProtector only reserved "
- << _protector->getNumberOfBytesReservedForTag() << " bytes";
+ LOGV2_FATAL(23739,
+ "Attempted to write tag of size {resultLen} when DataProtector only "
+ "reserved {protector_getNumberOfBytesReservedForTag} bytes",
+ "resultLen"_attr = resultLen,
+ "protector_getNumberOfBytesReservedForTag"_attr =
+ _protector->getNumberOfBytesReservedForTag());
fassertFailed(34353);
}
@@ -115,8 +125,11 @@ RemoveSaver::~RemoveSaver() {
_out->write(reinterpret_cast<const char*>(protectedBuffer.get()), resultLen);
if (_out->fail()) {
- severe() << "Couldn't write finalizeTag from DataProtector to: " << _file.string()
- << " for remove saving: " << redact(errnoWithDescription());
+ LOGV2_FATAL(23740,
+ "Couldn't write finalizeTag from DataProtector to: {file_string} for "
+ "remove saving: {errnoWithDescription}",
+ "file_string"_attr = _file.string(),
+ "errnoWithDescription"_attr = redact(errnoWithDescription()));
fassertFailed(34354);
}
}
@@ -133,7 +146,7 @@ Status RemoveSaver::goingToDelete(const BSONObj& o) {
if (_out->fail()) {
string msg = str::stream() << "couldn't create file: " << _file.string()
<< " for remove saving: " << redact(errnoWithDescription());
- error() << msg;
+ LOGV2_ERROR(23734, "{msg}", "msg"_attr = msg);
_out.reset();
_out = nullptr;
return Status(ErrorCodes::FileNotOpen, msg);
@@ -168,7 +181,7 @@ Status RemoveSaver::goingToDelete(const BSONObj& o) {
if (_out->fail()) {
string msg = str::stream() << "couldn't write document to file: " << _file.string()
<< " for remove saving: " << redact(errnoWithDescription());
- error() << msg;
+ LOGV2_ERROR(23735, "{msg}", "msg"_attr = msg);
return Status(ErrorCodes::OperationFailed, msg);
}
diff --git a/src/mongo/db/storage/storage_repair_observer.cpp b/src/mongo/db/storage/storage_repair_observer.cpp
index 8aab8e82979..c5506aac4a4 100644
--- a/src/mongo/db/storage/storage_repair_observer.cpp
+++ b/src/mongo/db/storage/storage_repair_observer.cpp
@@ -49,6 +49,7 @@
#include "mongo/db/service_context.h"
#include "mongo/db/storage/storage_file_util.h"
#include "mongo/db/storage/storage_options.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/file.h"
#include "mongo/util/log.h"
@@ -123,8 +124,11 @@ void StorageRepairObserver::_touchRepairIncompleteFile() {
boost::filesystem::ofstream fileStream(_repairIncompleteFilePath);
fileStream << "This file indicates that a repair operation is in progress or incomplete.";
if (fileStream.fail()) {
- severe() << "Failed to write to file " << _repairIncompleteFilePath.string() << ": "
- << errnoWithDescription();
+ LOGV2_FATAL(
+ 23756,
+ "Failed to write to file {repairIncompleteFilePath_string}: {errnoWithDescription}",
+ "repairIncompleteFilePath_string"_attr = _repairIncompleteFilePath.string(),
+ "errnoWithDescription"_attr = errnoWithDescription());
fassertFailedNoTrace(50920);
}
fileStream.close();
@@ -138,8 +142,10 @@ void StorageRepairObserver::_removeRepairIncompleteFile() {
boost::filesystem::remove(_repairIncompleteFilePath, ec);
if (ec) {
- severe() << "Failed to remove file " << _repairIncompleteFilePath.string() << ": "
- << ec.message();
+ LOGV2_FATAL(23757,
+ "Failed to remove file {repairIncompleteFilePath_string}: {ec_message}",
+ "repairIncompleteFilePath_string"_attr = _repairIncompleteFilePath.string(),
+ "ec_message"_attr = ec.message());
fassertFailedNoTrace(50921);
}
fassertNoTrace(50927, fsyncParentDirectory(_repairIncompleteFilePath));
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_cursor.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_cursor.cpp
index ba3899d9a6e..1008ad1b460 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_cursor.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_cursor.cpp
@@ -35,6 +35,7 @@
#include "mongo/db/storage/recovery_unit.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_util.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/log.h"
namespace mongo {
@@ -81,7 +82,7 @@ WiredTigerCursor::WiredTigerCursor(const std::string& uri,
// A WiredTiger table will not be available in the latest checkpoint if the checkpoint
// thread hasn't ran after the initial WiredTiger table was created.
if (!_isCheckpoint) {
- error() << ex;
+ LOGV2_ERROR(23719, "{ex}", "ex"_attr = ex);
fassertFailedNoTrace(50883);
}
throw;
diff --git a/src/mongo/executor/network_interface_thread_pool.cpp b/src/mongo/executor/network_interface_thread_pool.cpp
index f40a298aea1..e275c5fdff7 100644
--- a/src/mongo/executor/network_interface_thread_pool.cpp
+++ b/src/mongo/executor/network_interface_thread_pool.cpp
@@ -34,6 +34,7 @@
#include "mongo/executor/network_interface_thread_pool.h"
#include "mongo/executor/network_interface.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/destructor_guard.h"
#include "mongo/util/log.h"
#include "mongo/util/scopeguard.h"
@@ -65,7 +66,7 @@ void NetworkInterfaceThreadPool::_dtorImpl() {
void NetworkInterfaceThreadPool::startup() {
stdx::unique_lock<Latch> lk(_mutex);
if (_started) {
- severe() << "Attempting to start pool, but it has already started";
+ LOGV2_FATAL(23790, "Attempting to start pool, but it has already started");
fassertFailed(34358);
}
_started = true;
@@ -87,7 +88,7 @@ void NetworkInterfaceThreadPool::join() {
stdx::unique_lock<Latch> lk(_mutex);
if (_joining) {
- severe() << "Attempted to join pool more than once";
+ LOGV2_FATAL(23791, "Attempted to join pool more than once");
fassertFailed(34357);
}
diff --git a/src/mongo/idl/server_parameter.cpp b/src/mongo/idl/server_parameter.cpp
index 3f9d2c50d3c..14b7afb3456 100644
--- a/src/mongo/idl/server_parameter.cpp
+++ b/src/mongo/idl/server_parameter.cpp
@@ -31,6 +31,7 @@
#include "mongo/idl/server_parameter.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/log.h"
namespace mongo {
@@ -82,7 +83,9 @@ ServerParameterSet* ServerParameterSet::getGlobal() {
void ServerParameterSet::add(ServerParameter* sp) {
ServerParameter*& x = _map[sp->name()];
if (x) {
- severe() << "'" << x->name() << "' already exists in the server parameter set.";
+ LOGV2_FATAL(23784,
+ "'{x_name}' already exists in the server parameter set.",
+ "x_name"_attr = x->name());
abort();
}
x = sp;
@@ -133,24 +136,33 @@ void IDLServerParameterDeprecatedAlias::append(OperationContext* opCtx,
BSONObjBuilder& b,
const std::string& fieldName) {
std::call_once(_warnOnce, [&] {
- warning() << "Use of deprecated server parameter '" << name() << "', please use '"
- << _sp->name() << "' instead.";
+ LOGV2_WARNING(
+ 23781,
+ "Use of deprecated server parameter '{name}', please use '{sp_name}' instead.",
+ "name"_attr = name(),
+ "sp_name"_attr = _sp->name());
});
_sp->append(opCtx, b, fieldName);
}
Status IDLServerParameterDeprecatedAlias::set(const BSONElement& newValueElement) {
std::call_once(_warnOnce, [&] {
- warning() << "Use of deprecated server parameter '" << name() << "', please use '"
- << _sp->name() << "' instead.";
+ LOGV2_WARNING(
+ 23782,
+ "Use of deprecated server parameter '{name}', please use '{sp_name}' instead.",
+ "name"_attr = name(),
+ "sp_name"_attr = _sp->name());
});
return _sp->set(newValueElement);
}
Status IDLServerParameterDeprecatedAlias::setFromString(const std::string& str) {
std::call_once(_warnOnce, [&] {
- warning() << "Use of deprecated server parameter '" << name() << "', please use '"
- << _sp->name() << "' instead.";
+ LOGV2_WARNING(
+ 23783,
+ "Use of deprecated server parameter '{name}', please use '{sp_name}' instead.",
+ "name"_attr = name(),
+ "sp_name"_attr = _sp->name());
});
return _sp->setFromString(str);
}
diff --git a/src/mongo/platform/random.cpp b/src/mongo/platform/random.cpp
index 9ed392adf1c..fafb8f9d8ea 100644
--- a/src/mongo/platform/random.cpp
+++ b/src/mongo/platform/random.cpp
@@ -51,6 +51,7 @@
#include <memory>
#include <random>
+#include "mongo/logv2/log.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/log.h"
@@ -95,9 +96,10 @@ public:
auto ntstatus = ::BCryptOpenAlgorithmProvider(
&_algHandle, BCRYPT_RNG_ALGORITHM, MS_PRIMITIVE_PROVIDER, 0);
if (ntstatus != STATUS_SUCCESS) {
- error() << "Failed to open crypto algorithm provider while creating secure random "
- "object; NTSTATUS: "
- << ntstatus;
+ LOGV2_ERROR(23822,
+ "Failed to open crypto algorithm provider while creating secure random "
+ "object; NTSTATUS: {ntstatus}",
+ "ntstatus"_attr = ntstatus);
fassertFailed(28815);
}
}
@@ -105,17 +107,20 @@ public:
~Source() {
auto ntstatus = ::BCryptCloseAlgorithmProvider(_algHandle, 0);
if (ntstatus != STATUS_SUCCESS) {
- warning() << "Failed to close crypto algorithm provider destroying secure random "
- "object; NTSTATUS: "
- << ntstatus;
+ LOGV2_WARNING(23821,
+ "Failed to close crypto algorithm provider destroying secure random "
+ "object; NTSTATUS: {ntstatus}",
+ "ntstatus"_attr = ntstatus);
}
}
size_t refill(uint8_t* buf, size_t n) {
auto ntstatus = ::BCryptGenRandom(_algHandle, reinterpret_cast<PUCHAR>(buf), n, 0);
if (ntstatus != STATUS_SUCCESS) {
- error() << "Failed to generate random number from secure random object; NTSTATUS: "
- << ntstatus;
+ LOGV2_ERROR(
+ 23823,
+ "Failed to generate random number from secure random object; NTSTATUS: {ntstatus}",
+ "ntstatus"_attr = ntstatus);
fassertFailed(28814);
}
return n;
@@ -138,7 +143,10 @@ public:
continue;
} else {
auto errSave = errno;
- error() << "SecureRandom: read `" << kFn << "`: " << strerror(errSave);
+ LOGV2_ERROR(23824,
+ "SecureRandom: read `{kFn}`: {strerror_errSave}",
+ "kFn"_attr = kFn,
+ "strerror_errSave"_attr = strerror(errSave));
fassertFailed(28840);
}
}
@@ -160,7 +168,10 @@ private:
continue;
} else {
auto errSave = errno;
- error() << "SecureRandom: open `" << kFn << "`: " << strerror(errSave);
+ LOGV2_ERROR(23825,
+ "SecureRandom: open `{kFn}`: {strerror_errSave}",
+ "kFn"_attr = kFn,
+ "strerror_errSave"_attr = strerror(errSave));
fassertFailed(28839);
}
}
diff --git a/src/mongo/s/chunk_manager.cpp b/src/mongo/s/chunk_manager.cpp
index 6b3973846fb..a8af892ff28 100644
--- a/src/mongo/s/chunk_manager.cpp
+++ b/src/mongo/s/chunk_manager.cpp
@@ -41,6 +41,7 @@
#include "mongo/db/query/query_planner.h"
#include "mongo/db/query/query_planner_common.h"
#include "mongo/db/storage/key_string.h"
+#include "mongo/logv2/log.h"
#include "mongo/s/chunk_writes_tracker.h"
#include "mongo/s/shard_invalidated_for_targeting_exception.h"
#include "mongo/util/log.h"
@@ -350,8 +351,9 @@ IndexBounds ChunkManager::collapseQuerySolution(const QuerySolutionNode* node) {
// children.size() > 1, assert it's OR / SORT_MERGE.
if (node->getType() != STAGE_OR && node->getType() != STAGE_SORT_MERGE) {
// Unexpected node. We should never reach here.
- error() << "could not generate index bounds on query solution tree: "
- << redact(node->toString());
+ LOGV2_ERROR(23833,
+ "could not generate index bounds on query solution tree: {node}",
+ "node"_attr = redact(node->toString()));
dassert(false); // We'd like to know this error in testing.
// Bail out with all shards in production, since this isn't a fatal error.
diff --git a/src/mongo/s/sharding_initialization.cpp b/src/mongo/s/sharding_initialization.cpp
index 313f170fcd2..04bcabf11b5 100644
--- a/src/mongo/s/sharding_initialization.cpp
+++ b/src/mongo/s/sharding_initialization.cpp
@@ -54,6 +54,7 @@
#include "mongo/executor/task_executor.h"
#include "mongo/executor/task_executor_pool.h"
#include "mongo/executor/thread_pool_task_executor.h"
+#include "mongo/logv2/log.h"
#include "mongo/rpc/metadata/config_server_metadata.h"
#include "mongo/rpc/metadata/metadata_hook.h"
#include "mongo/s/balancer_configuration.h"
@@ -255,9 +256,10 @@ Status waitForShardRegistryReload(OperationContext* opCtx) {
continue;
} catch (const DBException& ex) {
Status status = ex.toStatus();
- warning()
- << "Error initializing sharding state, sleeping for 2 seconds and trying again"
- << causedBy(status);
+ LOGV2_WARNING(23834,
+ "Error initializing sharding state, sleeping for 2 seconds and trying "
+ "again{causedBy_status}",
+ "causedBy_status"_attr = causedBy(status));
sleepFor(kRetryInterval);
continue;
}
diff --git a/src/mongo/scripting/mozjs/valuereader.cpp b/src/mongo/scripting/mozjs/valuereader.cpp
index aad93721a01..5d321473d70 100644
--- a/src/mongo/scripting/mozjs/valuereader.cpp
+++ b/src/mongo/scripting/mozjs/valuereader.cpp
@@ -39,6 +39,7 @@
#include <js/Date.h>
#include "mongo/base/error_codes.h"
+#include "mongo/logv2/log.h"
#include "mongo/platform/decimal128.h"
#include "mongo/scripting/mozjs/implscope.h"
#include "mongo/scripting/mozjs/objectwrapper.h"
@@ -79,7 +80,7 @@ void ValueReader::fromBSONElement(const BSONElement& elem, const BSONObj& parent
scope->getProto<CodeInfo>().newInstance(args, _value);
} else {
if (!elem.codeWScopeObject().isEmpty())
- warning() << "CodeWScope doesn't transfer to db.eval";
+ LOGV2_WARNING(23826, "CodeWScope doesn't transfer to db.eval");
scope->newFunction(StringData(elem.codeWScopeCode(), elem.codeWScopeCodeLen() - 1),
_value);
}
diff --git a/src/mongo/util/fail_point.cpp b/src/mongo/util/fail_point.cpp
index e6ecad41b89..26a02782f09 100644
--- a/src/mongo/util/fail_point.cpp
+++ b/src/mongo/util/fail_point.cpp
@@ -41,6 +41,7 @@
#include "mongo/base/init.h"
#include "mongo/bson/json.h"
#include "mongo/bson/util/bson_extract.h"
+#include "mongo/logv2/log.h"
#include "mongo/platform/random.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/assert_util.h"
@@ -174,7 +175,9 @@ FailPoint::RetCode FailPoint::_slowShouldFailOpenBlockWithoutIncrementingTimesEn
return slowOff;
}
default:
- error() << "FailPoint Mode not supported: " << static_cast<int>(_mode);
+ LOGV2_ERROR(23832,
+ "FailPoint Mode not supported: {static_cast_int_mode}",
+ "static_cast_int_mode"_attr = static_cast<int>(_mode));
fassertFailed(16444);
}
}
@@ -302,7 +305,10 @@ auto setGlobalFailPoint(const std::string& failPointName, const BSONObj& cmdObj)
if (failPoint == nullptr)
uasserted(ErrorCodes::FailPointSetFailed, failPointName + " not found");
auto timesEntered = failPoint->setMode(uassertStatusOK(FailPoint::parseBSON(cmdObj)));
- warning() << "failpoint: " << failPointName << " set to: " << failPoint->toBSON();
+ LOGV2_WARNING(23829,
+ "failpoint: {failPointName} set to: {failPoint}",
+ "failPointName"_attr = failPointName,
+ "failPoint"_attr = failPoint->toBSON());
return timesEntered;
}
@@ -316,12 +322,18 @@ FailPointEnableBlock::FailPointEnableBlock(std::string failPointName, BSONObj da
_initialTimesEntered = _failPoint->setMode(FailPoint::alwaysOn, 0, std::move(data));
- warning() << "failpoint: " << _failPointName << " set to: " << _failPoint->toBSON();
+ LOGV2_WARNING(23830,
+ "failpoint: {failPointName} set to: {failPoint}",
+ "failPointName"_attr = _failPointName,
+ "failPoint"_attr = _failPoint->toBSON());
}
FailPointEnableBlock::~FailPointEnableBlock() {
_failPoint->setMode(FailPoint::off);
- warning() << "failpoint: " << _failPointName << " set to: " << _failPoint->toBSON();
+ LOGV2_WARNING(23831,
+ "failpoint: {failPointName} set to: {failPoint}",
+ "failPointName"_attr = _failPointName,
+ "failPoint"_attr = _failPoint->toBSON());
}
FailPointRegistry::FailPointRegistry() : _frozen(false) {}
diff --git a/src/mongo/util/net/private/ssl_expiration.cpp b/src/mongo/util/net/private/ssl_expiration.cpp
index 3ef6236d2c8..1e00f07b0d0 100644
--- a/src/mongo/util/net/private/ssl_expiration.cpp
+++ b/src/mongo/util/net/private/ssl_expiration.cpp
@@ -33,6 +33,7 @@
#include <string>
+#include "mongo/logv2/log.h"
#include "mongo/util/log.h"
#include "mongo/util/time_support.h"
@@ -58,8 +59,10 @@ void CertificateExpirationMonitor::taskDoWork() {
if (_certExpiration <= now) {
// The certificate has expired.
- warning() << "Server certificate is now invalid. It expired on "
- << dateToISOStringUTC(_certExpiration);
+ LOGV2_WARNING(
+ 23785,
+ "Server certificate is now invalid. It expired on {dateToISOStringUTC_certExpiration}",
+ "dateToISOStringUTC_certExpiration"_attr = dateToISOStringUTC(_certExpiration));
return;
}
@@ -67,8 +70,13 @@ void CertificateExpirationMonitor::taskDoWork() {
if (remainingValidDuration <= 30 * oneDay) {
// The certificate will expire in the next 30 days.
- warning() << "Server certificate will expire on " << dateToISOStringUTC(_certExpiration)
- << " in " << durationCount<Hours>(remainingValidDuration) / 24 << " days.";
+ LOGV2_WARNING(23786,
+ "Server certificate will expire on {dateToISOStringUTC_certExpiration} in "
+ "{durationCount_Hours_remainingValidDuration_24} days.",
+ "dateToISOStringUTC_certExpiration"_attr =
+ dateToISOStringUTC(_certExpiration),
+ "durationCount_Hours_remainingValidDuration_24"_attr =
+ durationCount<Hours>(remainingValidDuration) / 24);
}
}
diff --git a/src/mongo/util/net/ssl_parameters.cpp b/src/mongo/util/net/ssl_parameters.cpp
index 66d5fdc5721..92790983c70 100644
--- a/src/mongo/util/net/ssl_parameters.cpp
+++ b/src/mongo/util/net/ssl_parameters.cpp
@@ -37,6 +37,7 @@
#include "mongo/config.h"
#include "mongo/db/auth/sasl_command_constants.h"
#include "mongo/db/server_options.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/log.h"
#include "mongo/util/net/ssl_options.h"
#include "mongo/util/net/ssl_parameters_gen.h"
@@ -111,7 +112,8 @@ void SSLModeServerParameter::append(OperationContext*,
BSONObjBuilder& builder,
const std::string& fieldName) {
std::call_once(warnForSSLMode, [] {
- warning() << "Use of deprecated server parameter 'sslMode', please use 'tlsMode' instead.";
+ LOGV2_WARNING(
+ 23803, "Use of deprecated server parameter 'sslMode', please use 'tlsMode' instead.");
});
builder.append(fieldName, SSLParams::sslModeFormat(sslGlobalParams.sslMode.load()));
@@ -127,7 +129,8 @@ void TLSModeServerParameter::append(OperationContext*,
Status SSLModeServerParameter::setFromString(const std::string& strMode) {
std::call_once(warnForSSLMode, [] {
- warning() << "Use of deprecated server parameter 'sslMode', please use 'tlsMode' instead.";
+ LOGV2_WARNING(
+ 23804, "Use of deprecated server parameter 'sslMode', please use 'tlsMode' instead.");
});
auto swNewMode = checkTLSModeTransition(
diff --git a/src/mongo/util/processinfo_windows.cpp b/src/mongo/util/processinfo_windows.cpp
index c8bdb2b633a..dd73eb49f81 100644
--- a/src/mongo/util/processinfo_windows.cpp
+++ b/src/mongo/util/processinfo_windows.cpp
@@ -37,6 +37,7 @@
#include <iostream>
#include <psapi.h>
+#include "mongo/logv2/log.h"
#include "mongo/util/log.h"
#include "mongo/util/processinfo.h"
@@ -72,7 +73,9 @@ int ProcessInfo::getVirtualMemorySize() {
BOOL status = GlobalMemoryStatusEx(&mse);
if (!status) {
DWORD gle = GetLastError();
- error() << "GlobalMemoryStatusEx failed with " << errnoWithDescription(gle);
+ LOGV2_ERROR(23812,
+ "GlobalMemoryStatusEx failed with {errnoWithDescription_gle}",
+ "errnoWithDescription_gle"_attr = errnoWithDescription(gle));
fassert(28621, status);
}
@@ -86,7 +89,9 @@ int ProcessInfo::getResidentSize() {
BOOL status = GetProcessMemoryInfo(GetCurrentProcess(), &pmc, sizeof(pmc));
if (!status) {
DWORD gle = GetLastError();
- error() << "GetProcessMemoryInfo failed with " << errnoWithDescription(gle);
+ LOGV2_ERROR(23813,
+ "GetProcessMemoryInfo failed with {errnoWithDescription_gle}",
+ "errnoWithDescription_gle"_attr = errnoWithDescription(gle));
fassert(28622, status);
}
@@ -99,7 +104,9 @@ double ProcessInfo::getSystemMemoryPressurePercentage() {
BOOL status = GlobalMemoryStatusEx(&mse);
if (!status) {
DWORD gle = GetLastError();
- error() << "GlobalMemoryStatusEx failed with " << errnoWithDescription(gle);
+ LOGV2_ERROR(23814,
+ "GlobalMemoryStatusEx failed with {errnoWithDescription_gle}",
+ "errnoWithDescription_gle"_attr = errnoWithDescription(gle));
fassert(28623, status);
}
@@ -155,16 +162,22 @@ bool getFileVersion(const char* filePath, DWORD& fileVersionMS, DWORD& fileVersi
DWORD verSize = GetFileVersionInfoSizeA(filePath, NULL);
if (verSize == 0) {
DWORD gle = GetLastError();
- warning() << "GetFileVersionInfoSizeA on " << filePath << " failed with "
- << errnoWithDescription(gle);
+ LOGV2_WARNING(
+ 23807,
+ "GetFileVersionInfoSizeA on {filePath} failed with {errnoWithDescription_gle}",
+ "filePath"_attr = filePath,
+ "errnoWithDescription_gle"_attr = errnoWithDescription(gle));
return false;
}
std::unique_ptr<char[]> verData(new char[verSize]);
if (GetFileVersionInfoA(filePath, NULL, verSize, verData.get()) == 0) {
DWORD gle = GetLastError();
- warning() << "GetFileVersionInfoSizeA on " << filePath << " failed with "
- << errnoWithDescription(gle);
+ LOGV2_WARNING(
+ 23808,
+ "GetFileVersionInfoSizeA on {filePath} failed with {errnoWithDescription_gle}",
+ "filePath"_attr = filePath,
+ "errnoWithDescription_gle"_attr = errnoWithDescription(gle));
return false;
}
@@ -172,13 +185,17 @@ bool getFileVersion(const char* filePath, DWORD& fileVersionMS, DWORD& fileVersi
VS_FIXEDFILEINFO* verInfo;
if (VerQueryValueA(verData.get(), "\\", (LPVOID*)&verInfo, &size) == 0) {
DWORD gle = GetLastError();
- warning() << "VerQueryValueA on " << filePath << " failed with "
- << errnoWithDescription(gle);
+ LOGV2_WARNING(23809,
+ "VerQueryValueA on {filePath} failed with {errnoWithDescription_gle}",
+ "filePath"_attr = filePath,
+ "errnoWithDescription_gle"_attr = errnoWithDescription(gle));
return false;
}
if (size != sizeof(VS_FIXEDFILEINFO)) {
- warning() << "VerQueryValueA on " << filePath << " returned structure with unexpected size";
+ LOGV2_WARNING(23810,
+ "VerQueryValueA on {filePath} returned structure with unexpected size",
+ "filePath"_attr = filePath);
return false;
}
@@ -310,8 +327,10 @@ bool ProcessInfo::checkNumaEnabled() {
new BYTE[returnLength]));
} else {
DWORD gle = GetLastError();
- warning() << "GetLogicalProcessorInformation failed with "
- << errnoWithDescription(gle);
+ LOGV2_WARNING(
+ 23811,
+ "GetLogicalProcessorInformation failed with {errnoWithDescription_gle}",
+ "errnoWithDescription_gle"_attr = errnoWithDescription(gle));
return false;
}
}
diff --git a/src/mongo/util/signal_handlers_synchronous.cpp b/src/mongo/util/signal_handlers_synchronous.cpp
index 8f7fa0347e0..27c12f76616 100644
--- a/src/mongo/util/signal_handlers_synchronous.cpp
+++ b/src/mongo/util/signal_handlers_synchronous.cpp
@@ -46,6 +46,7 @@
#include "mongo/base/string_data.h"
#include "mongo/logger/log_domain.h"
#include "mongo/logger/logger.h"
+#include "mongo/logv2/log.h"
#include "mongo/platform/compiler.h"
#include "mongo/stdx/exception.h"
#include "mongo/stdx/thread.h"
@@ -251,17 +252,23 @@ void myInvalidParameterHandler(const wchar_t* expression,
const wchar_t* file,
unsigned int line,
uintptr_t pReserved) {
- severe() << "Invalid parameter detected in function " << toUtf8String(function)
- << " File: " << toUtf8String(file) << " Line: " << line;
- severe() << "Expression: " << toUtf8String(expression);
- severe() << "immediate exit due to invalid parameter";
+ LOGV2_FATAL(23815,
+ "Invalid parameter detected in function {toUtf8String_function} File: "
+ "{toUtf8String_file} Line: {line}",
+ "toUtf8String_function"_attr = toUtf8String(function),
+ "toUtf8String_file"_attr = toUtf8String(file),
+ "line"_attr = line);
+ LOGV2_FATAL(23816,
+ "Expression: {toUtf8String_expression}",
+ "toUtf8String_expression"_attr = toUtf8String(expression));
+ LOGV2_FATAL(23817, "immediate exit due to invalid parameter");
abruptQuit(SIGABRT);
}
void myPureCallHandler() {
- severe() << "Pure call handler invoked";
- severe() << "immediate exit due to invalid pure call";
+ LOGV2_FATAL(23818, "Pure call handler invoked");
+ LOGV2_FATAL(23819, "immediate exit due to invalid pure call");
abruptQuit(SIGABRT);
}
@@ -330,10 +337,15 @@ void setupSynchronousSignalHandlers() {
}
if (sigaction(spec.signal, &sa, nullptr) != 0) {
int savedErr = errno;
- severe() << format(
- FMT_STRING("Failed to install signal handler for signal {} with sigaction: {}"),
- spec.signal,
- strerror(savedErr));
+ LOGV2_FATAL(
+ 23820,
+ "{format_FMT_STRING_Failed_to_install_signal_handler_for_signal_with_sigaction_"
+ "spec_signal_strerror_savedErr}",
+ "format_FMT_STRING_Failed_to_install_signal_handler_for_signal_with_sigaction_spec_signal_strerror_savedErr"_attr =
+ format(FMT_STRING(
+ "Failed to install signal handler for signal {} with sigaction: {}"),
+ spec.signal,
+ strerror(savedErr)));
fassertFailed(31334);
}
}
diff --git a/src/mongo/watchdog/watchdog_mongod.cpp b/src/mongo/watchdog/watchdog_mongod.cpp
index e01f693662f..f473b1fedea 100644
--- a/src/mongo/watchdog/watchdog_mongod.cpp
+++ b/src/mongo/watchdog/watchdog_mongod.cpp
@@ -43,6 +43,7 @@
#include "mongo/db/server_options.h"
#include "mongo/db/service_context.h"
#include "mongo/db/storage/storage_options.h"
+#include "mongo/logv2/log.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/clock_source.h"
#include "mongo/util/clock_source_mock.h"
@@ -162,9 +163,11 @@ void startWatchdog() {
checks.push_back(std::move(journalCheck));
} else {
- warning()
- << "Watchdog is skipping check for journal directory since it does not exist: '"
- << journalDirectory.generic_string() << "'";
+ LOGV2_WARNING(23835,
+ "Watchdog is skipping check for journal directory since it does not "
+ "exist: '{journalDirectory_generic_string}'",
+ "journalDirectory_generic_string"_attr =
+ journalDirectory.generic_string());
}
}