summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorGabriel Russell <gabriel.russell@mongodb.com>2020-05-21 12:06:47 -0400
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-06-02 22:09:56 +0000
commit451dcf99afb7a67e9a5b849e846391402b5928c9 (patch)
tree9ca45d512d6489e84a0e334cda3fe9303c6bf6ad /src
parent3bef557cab5c17de9374f602c743b87808af590e (diff)
downloadmongo-451dcf99afb7a67e9a5b849e846391402b5928c9.tar.gz
SERVER-47075 Clean up some log lines
* all logs under mongo/transport and mongo/util/net were reviewed and many were cleaned up * all "status"_attr were reviewed and most changed to "error"_attr
Diffstat (limited to 'src')
-rw-r--r--src/mongo/db/auth/authorization_manager_impl.cpp2
-rw-r--r--src/mongo/db/auth/authorization_session_impl.cpp2
-rw-r--r--src/mongo/db/catalog/database_impl.cpp29
-rw-r--r--src/mongo/db/catalog/index_catalog_impl.cpp2
-rw-r--r--src/mongo/db/catalog/rename_collection.cpp4
-rw-r--r--src/mongo/db/concurrency/deferred_writer.cpp2
-rw-r--r--src/mongo/db/cursor_manager.cpp4
-rw-r--r--src/mongo/db/free_mon/free_mon_processor.cpp38
-rw-r--r--src/mongo/db/index/index_access_method.cpp8
-rw-r--r--src/mongo/db/index_builds_coordinator.cpp7
-rw-r--r--src/mongo/db/read_concern_mongod.cpp6
-rw-r--r--src/mongo/db/repair_database_and_check_version.cpp2
-rw-r--r--src/mongo/db/s/transaction_coordinator_util.cpp4
-rw-r--r--src/mongo/db/storage/remove_saver.cpp2
-rw-r--r--src/mongo/db/storage/storage_engine_lock_file_windows.cpp2
-rw-r--r--src/mongo/db/views/view_catalog.cpp7
-rw-r--r--src/mongo/s/commands/cluster_killoperations_cmd.cpp2
-rw-r--r--src/mongo/s/query/establish_cursors.cpp12
-rw-r--r--src/mongo/scripting/mozjs/cursor_handle.cpp5
-rw-r--r--src/mongo/scripting/mozjs/session.cpp5
-rw-r--r--src/mongo/transport/baton_asio_linux.h17
-rw-r--r--src/mongo/transport/ismaster_metrics.cpp3
-rw-r--r--src/mongo/transport/message_compressor_manager.cpp38
-rw-r--r--src/mongo/transport/message_compressor_snappy.cpp2
-rw-r--r--src/mongo/transport/message_compressor_zlib.cpp2
-rw-r--r--src/mongo/transport/message_compressor_zstd.cpp2
-rw-r--r--src/mongo/transport/service_entry_point_impl.cpp29
-rw-r--r--src/mongo/transport/service_entry_point_utils.cpp15
-rw-r--r--src/mongo/transport/service_executor_adaptive.cpp52
-rw-r--r--src/mongo/transport/service_executor_adaptive_test.cpp5
-rw-r--r--src/mongo/transport/service_executor_reserved.cpp17
-rw-r--r--src/mongo/transport/service_executor_test.cpp5
-rw-r--r--src/mongo/transport/service_state_machine.cpp45
-rw-r--r--src/mongo/transport/session_asio.h24
-rw-r--r--src/mongo/transport/transport_layer_asio.cpp70
-rw-r--r--src/mongo/transport/transport_layer_asio_integration_test.cpp7
-rw-r--r--src/mongo/transport/transport_layer_egress_init.cpp2
-rw-r--r--src/mongo/util/net/hostname_canonicalization.cpp31
-rw-r--r--src/mongo/util/net/http_client_curl.cpp2
-rw-r--r--src/mongo/util/net/http_client_winhttp.cpp2
-rw-r--r--src/mongo/util/net/ssl_manager_openssl.cpp4
-rw-r--r--src/mongo/util/net/ssl_options.cpp2
-rw-r--r--src/mongo/util/processinfo_freebsd.cpp28
-rw-r--r--src/mongo/util/processinfo_openbsd.cpp26
44 files changed, 328 insertions, 247 deletions
diff --git a/src/mongo/db/auth/authorization_manager_impl.cpp b/src/mongo/db/auth/authorization_manager_impl.cpp
index 5f6e9ab1380..7e161867017 100644
--- a/src/mongo/db/auth/authorization_manager_impl.cpp
+++ b/src/mongo/db/auth/authorization_manager_impl.cpp
@@ -581,7 +581,7 @@ void AuthorizationManagerImpl::_pinnedUsersThreadRoutine() noexcept try {
LOGV2_WARNING(20239,
"Unable to fetch pinned user",
"user"_attr = userName.toString(),
- "status"_attr = status);
+ "error"_attr = status);
} else {
LOGV2_DEBUG(20233, 2, "Pinned user not found", "user"_attr = userName);
}
diff --git a/src/mongo/db/auth/authorization_session_impl.cpp b/src/mongo/db/auth/authorization_session_impl.cpp
index 6689dcb8458..73ec76620b1 100644
--- a/src/mongo/db/auth/authorization_session_impl.cpp
+++ b/src/mongo/db/auth/authorization_session_impl.cpp
@@ -811,7 +811,7 @@ void AuthorizationSessionImpl::_refreshUserInfoAsNeeded(OperationContext* opCtx)
// out-of-date privilege data.
LOGV2_WARNING(20247,
"Could not fetch updated user privilege information for {user}; "
- "continuing to use old information. Reason is {status}",
+ "continuing to use old information. Reason is {error}",
"Could not fetch updated user privilege information, continuing "
"to use old information"
"user"_attr = name,
diff --git a/src/mongo/db/catalog/database_impl.cpp b/src/mongo/db/catalog/database_impl.cpp
index def4c60d9c3..e9f3dec0468 100644
--- a/src/mongo/db/catalog/database_impl.cpp
+++ b/src/mongo/db/catalog/database_impl.cpp
@@ -183,8 +183,8 @@ void DatabaseImpl::clearTmpCollections(OperationContext* opCtx) const {
Status status = dropCollection(opCtx, collection->ns(), {});
if (!status.isOK()) {
LOGV2_WARNING(20327,
- "could not drop temp collection '{collection_ns}': {status}",
- "Could not drop temp collection",
+ "could not drop temp collection '{namespace}': {error}",
+ "could not drop temp collection",
"namespace"_attr = collection->ns(),
"error"_attr = redact(status));
}
@@ -192,8 +192,8 @@ void DatabaseImpl::clearTmpCollections(OperationContext* opCtx) const {
} catch (const WriteConflictException&) {
LOGV2_WARNING(
20328,
- "could not drop temp collection '{collection_ns}' due to WriteConflictException",
- "Could not drop temp collection due to WriteConflictException",
+ "could not drop temp collection '{namespace}' due to WriteConflictException",
+ "could not drop temp collection due to WriteConflictException",
"namespace"_attr = collection->ns());
opCtx->recoveryUnit()->abandonSnapshot();
}
@@ -325,7 +325,7 @@ Status DatabaseImpl::dropCollectionEvenIfSystem(OperationContext* opCtx,
repl::OpTime dropOpTime) const {
invariant(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_X));
- LOGV2_DEBUG(20313, 1, "dropCollection: {nss}", "nss"_attr = nss);
+ LOGV2_DEBUG(20313, 1, "dropCollection: {namespace}", "dropCollection", "namespace"_attr = nss);
// A valid 'dropOpTime' is not allowed when writes are replicated.
if (!dropOpTime.isNull() && opCtx->writesAreReplicated()) {
@@ -379,7 +379,8 @@ Status DatabaseImpl::dropCollectionEvenIfSystem(OperationContext* opCtx,
auto commitTimestamp = opCtx->recoveryUnit()->getCommitTimestamp();
LOGV2(20314,
- "dropCollection: {nss} ({uuid}) - storage engine will take ownership of drop-pending "
+ "dropCollection: {namespace} ({uuid}) - storage engine will take ownership of "
+ "drop-pending "
"collection with optime {dropOpTime} and commit timestamp {commitTimestamp}",
"dropCollection: storage engine will take ownership of drop-pending "
"collection",
@@ -450,12 +451,14 @@ void DatabaseImpl::_dropCollectionIndexes(OperationContext* opCtx,
const NamespaceString& nss,
Collection* collection) const {
invariant(_name == nss.db());
- LOGV2_DEBUG(20316, 1, "dropCollection: {nss} - dropAllIndexes start", "nss"_attr = nss);
+ LOGV2_DEBUG(
+ 20316, 1, "dropCollection: {namespace} - dropAllIndexes start", "namespace"_attr = nss);
collection->getIndexCatalog()->dropAllIndexes(opCtx, true);
invariant(DurableCatalog::get(opCtx)->getTotalIndexCount(opCtx, collection->getCatalogId()) ==
0);
- LOGV2_DEBUG(20317, 1, "dropCollection: {nss} - dropAllIndexes done", "nss"_attr = nss);
+ LOGV2_DEBUG(
+ 20317, 1, "dropCollection: {namespace} - dropAllIndexes done", "namespace"_attr = nss);
}
Status DatabaseImpl::_finishDropCollection(OperationContext* opCtx,
@@ -463,7 +466,7 @@ Status DatabaseImpl::_finishDropCollection(OperationContext* opCtx,
Collection* collection) const {
UUID uuid = collection->uuid();
LOGV2(20318,
- "Finishing collection drop for {nss} ({uuid}).",
+ "Finishing collection drop for {namespace} ({uuid}).",
"Finishing collection drop",
"namespace"_attr = nss,
"uuid"_attr = uuid);
@@ -636,7 +639,7 @@ Collection* DatabaseImpl::createCollection(OperationContext* opCtx,
audit::logCreateCollection(&cc(), nss.ns());
LOGV2(20320,
- "createCollection: {nss} with {generatedUUID_generated_provided} UUID: "
+ "createCollection: {namespace} with {generatedUUID_generated_provided} UUID: "
"{optionsWithUUID_uuid_get} and options: {options}",
"createCollection",
"namespace"_attr = nss,
@@ -768,7 +771,7 @@ void DatabaseImpl::checkForIdIndexesAndDropPendingCollections(OperationContext*
if (nss.isDropPendingNamespace()) {
auto dropOpTime = fassert(40459, nss.getDropPendingNamespaceOpTime());
LOGV2(20321,
- "Found drop-pending namespace {nss} with drop optime {dropOpTime}",
+ "Found drop-pending namespace {namespace} with drop optime {dropOpTime}",
"Found drop-pending namespace",
"namespace"_attr = nss,
"dropOpTime"_attr = dropOpTime);
@@ -803,8 +806,8 @@ Status DatabaseImpl::userCreateNS(OperationContext* opCtx,
const BSONObj& idIndex) const {
LOGV2_DEBUG(20324,
1,
- "create collection {nss} {collectionOptions}",
- "nss"_attr = nss,
+ "create collection {namespace} {collectionOptions}",
+ "namespace"_attr = nss,
"collectionOptions"_attr = collectionOptions.toBSON());
if (!NamespaceString::validCollectionComponent(nss.ns()))
return Status(ErrorCodes::InvalidNamespace, str::stream() << "invalid ns: " << nss);
diff --git a/src/mongo/db/catalog/index_catalog_impl.cpp b/src/mongo/db/catalog/index_catalog_impl.cpp
index 32e2df9000b..7ab4638e63e 100644
--- a/src/mongo/db/catalog/index_catalog_impl.cpp
+++ b/src/mongo/db/catalog/index_catalog_impl.cpp
@@ -1550,7 +1550,7 @@ void IndexCatalogImpl::_unindexKeys(OperationContext* opCtx,
if (!status.isOK()) {
LOGV2(20362,
- "Couldn't unindex record {obj} from collection {collection_ns}. Status: {status}",
+ "Couldn't unindex record {obj} from collection {namespace}: {error}",
"Couldn't unindex record",
"record"_attr = redact(obj),
"namespace"_attr = _collection->ns(),
diff --git a/src/mongo/db/catalog/rename_collection.cpp b/src/mongo/db/catalog/rename_collection.cpp
index bd95a4d50e1..69ceb35a556 100644
--- a/src/mongo/db/catalog/rename_collection.cpp
+++ b/src/mongo/db/catalog/rename_collection.cpp
@@ -593,12 +593,12 @@ Status renameBetweenDBs(OperationContext* opCtx,
// the rename operation has already failed for another reason.
LOGV2(20399,
"Unable to drop temporary collection {tmpName} while renaming from {source} to "
- "{target}: {status}",
+ "{target}: {error}",
"Unable to drop temporary collection while renaming",
"tempCollection"_attr = tmpName,
"source"_attr = source,
"target"_attr = target,
- "reason"_attr = status);
+ "error"_attr = status);
}
});
diff --git a/src/mongo/db/concurrency/deferred_writer.cpp b/src/mongo/db/concurrency/deferred_writer.cpp
index e04e43f0c1a..f533a22dfe9 100644
--- a/src/mongo/db/concurrency/deferred_writer.cpp
+++ b/src/mongo/db/concurrency/deferred_writer.cpp
@@ -48,7 +48,7 @@ auto kLogInterval = stdx::chrono::minutes(1);
void DeferredWriter::_logFailure(const Status& status) {
if (TimePoint::clock::now() - _lastLogged > kLogInterval) {
LOGV2(20516,
- "Unable to write to collection {nss}: {status}",
+ "Unable to write to collection {namespace}: {error}",
"Unable to write to collection",
"namespace"_attr = _nss.toString(),
"error"_attr = status);
diff --git a/src/mongo/db/cursor_manager.cpp b/src/mongo/db/cursor_manager.cpp
index a02412fabe4..322516ecc8d 100644
--- a/src/mongo/db/cursor_manager.cpp
+++ b/src/mongo/db/cursor_manager.cpp
@@ -221,10 +221,10 @@ void CursorManager::unpin(OperationContext* opCtx,
// will see the reason the cursor was killed when asking for the next batch.
if (interruptStatus == ErrorCodes::Interrupted || interruptStatus == ErrorCodes::CursorKilled) {
LOGV2(20530,
- "removing cursor {cursor_cursorid} after completing batch: {status}",
+ "removing cursor {cursor_cursorid} after completing batch: {error}",
"Removing cursor after completing batch",
"cursorId"_attr = cursor->cursorid(),
- "status"_attr = interruptStatus);
+ "error"_attr = interruptStatus);
return deregisterAndDestroyCursor(std::move(partition), opCtx, std::move(cursor));
} else if (!interruptStatus.isOK()) {
cursor->markAsKilled(interruptStatus);
diff --git a/src/mongo/db/free_mon/free_mon_processor.cpp b/src/mongo/db/free_mon/free_mon_processor.cpp
index ddf5110b27c..ac2d9355635 100644
--- a/src/mongo/db/free_mon/free_mon_processor.cpp
+++ b/src/mongo/db/free_mon/free_mon_processor.cpp
@@ -608,8 +608,10 @@ void FreeMonProcessor::doAsyncRegisterComplete(
Status s = validateRegistrationResponse(resp);
if (!s.isOK()) {
- LOGV2_WARNING(
- 20620, "Free Monitoring registration halted due to {status}", "status"_attr = s);
+ LOGV2_WARNING(20620,
+ "Free Monitoring registration halted due to {error}",
+ "Free Monitoring registration halted due to error",
+ "error"_attr = s);
// Disable on any error
_state->setState(StorageStateEnum::disabled);
@@ -682,13 +684,12 @@ void FreeMonProcessor::doAsyncRegisterFail(
return;
}
- LOGV2_DEBUG(
- 20616,
- 1,
- "Free Monitoring Registration Failed with status '{status}', retrying in {interval}",
- "Free Monitoring Registration Failed",
- "status"_attr = msg->getPayload(),
- "interval"_attr = _registrationRetry->getNextDuration());
+ LOGV2_DEBUG(20616,
+ 1,
+ "Free Monitoring Registration Failed with status '{error}', retrying in {interval}",
+ "Free Monitoring Registration Failed, will retry after interval",
+ "error"_attr = msg->getPayload(),
+ "interval"_attr = _registrationRetry->getNextDuration());
// Enqueue a register retry
enqueue(FreeMonRegisterCommandMessage::createWithDeadline(
@@ -795,8 +796,10 @@ void FreeMonProcessor::doAsyncMetricsComplete(
Status s = validateMetricsResponse(resp);
if (!s.isOK()) {
- LOGV2_WARNING(
- 20622, "Free Monitoring metrics uploading halted due to {status}", "status"_attr = s);
+ LOGV2_WARNING(20622,
+ "Free Monitoring metrics uploading halted due to {error}",
+ "Free Monitoring metrics uploading halted due to error",
+ "error"_attr = s);
// Disable free monitoring on validation errors
_state->setState(StorageStateEnum::disabled);
@@ -873,13 +876,12 @@ void FreeMonProcessor::doAsyncMetricsFail(
return;
}
- LOGV2_DEBUG(
- 20618,
- 1,
- "Free Monitoring Metrics upload failed with status {status}, retrying in {interval}",
- "Free Monitoring Metrics upload failed",
- "status"_attr = msg->getPayload(),
- "interval"_attr = _metricsRetry->getNextDuration());
+ LOGV2_DEBUG(20618,
+ 1,
+ "Free Monitoring Metrics upload failed with status {error}, retrying in {interval}",
+ "Free Monitoring Metrics upload failed, will retry after interval",
+ "error"_attr = msg->getPayload(),
+ "interval"_attr = _metricsRetry->getNextDuration());
// Enqueue next metrics upload
enqueue(FreeMonMessage::createWithDeadline(FreeMonMessageType::MetricsSend,
diff --git a/src/mongo/db/index/index_access_method.cpp b/src/mongo/db/index/index_access_method.cpp
index 631437cb140..d73fae36e8a 100644
--- a/src/mongo/db/index/index_access_method.cpp
+++ b/src/mongo/db/index/index_access_method.cpp
@@ -530,8 +530,8 @@ Status AbstractIndexAccessMethod::BulkBuilderImpl::insert(OperationContext* opCt
LOGV2_DEBUG(20684,
1,
"Recording suppressed key generation error to retry later: "
- "{status} on {loc}: {obj}",
- "status"_attr = status,
+ "{error} on {loc}: {obj}",
+ "error"_attr = status,
"loc"_attr = loc,
"obj"_attr = redact(obj));
interceptor->getSkippedRecordTracker()->record(opCtx, loc);
@@ -701,8 +701,8 @@ IndexAccessMethod::OnSuppressedErrorFn IndexAccessMethod::kNoopOnSuppressedError
LOGV2_DEBUG(
20686,
1,
- "Suppressed key generation error: {status} when getting index keys for {loc}: {obj}",
- "status"_attr = redact(status),
+ "Suppressed key generation error: {error} when getting index keys for {loc}: {obj}",
+ "error"_attr = redact(status),
"loc"_attr = loc,
"obj"_attr = redact(obj));
};
diff --git a/src/mongo/db/index_builds_coordinator.cpp b/src/mongo/db/index_builds_coordinator.cpp
index 2a4ecccb69a..637f276653f 100644
--- a/src/mongo/db/index_builds_coordinator.cpp
+++ b/src/mongo/db/index_builds_coordinator.cpp
@@ -1810,8 +1810,11 @@ IndexBuildsCoordinator::PostSetupAction IndexBuildsCoordinator::_setUpIndexBuild
((status == ErrorCodes::IndexOptionsConflict ||
status == ErrorCodes::IndexKeySpecsConflict) &&
options.indexConstraints == IndexBuildsManager::IndexConstraints::kRelax)) {
- LOGV2_DEBUG(
- 20662, 1, "Ignoring indexing error: {status}", "status"_attr = redact(status));
+ LOGV2_DEBUG(20662,
+ 1,
+ "Ignoring indexing error: {error}",
+ "Ignoring indexing error",
+ "error"_attr = redact(status));
return PostSetupAction::kCompleteIndexBuildEarly;
}
diff --git a/src/mongo/db/read_concern_mongod.cpp b/src/mongo/db/read_concern_mongod.cpp
index 012d6c41e35..3663b9cdcc5 100644
--- a/src/mongo/db/read_concern_mongod.cpp
+++ b/src/mongo/db/read_concern_mongod.cpp
@@ -216,9 +216,9 @@ Status makeNoopWriteIfNeeded(OperationContext* opCtx, LogicalTime clusterTime) {
if (!status.isOK()) {
LOGV2_DEBUG(20989,
1,
- "Reached clusterTime {lastAppliedOpTime} but failed noop write due to {status}",
+ "Reached clusterTime {lastAppliedOpTime} but failed noop write due to {error}",
"lastAppliedOpTime"_attr = lastAppliedOpTime.toString(),
- "status"_attr = status.toString());
+ "error"_attr = status.toString());
}
return Status::OK();
}
@@ -347,7 +347,7 @@ Status waitForReadConcernImpl(OperationContext* opCtx,
auto status = makeNoopWriteIfNeeded(opCtx, *targetClusterTime);
if (!status.isOK()) {
LOGV2(20990,
- "Failed noop write at clusterTime: {targetClusterTime} due to {status}",
+ "Failed noop write at clusterTime: {targetClusterTime} due to {error}",
"Failed noop write",
"targetClusterTime"_attr = targetClusterTime,
"error"_attr = status);
diff --git a/src/mongo/db/repair_database_and_check_version.cpp b/src/mongo/db/repair_database_and_check_version.cpp
index fb7a10d7556..8d959ee2bd1 100644
--- a/src/mongo/db/repair_database_and_check_version.cpp
+++ b/src/mongo/db/repair_database_and_check_version.cpp
@@ -237,7 +237,7 @@ Status ensureCollectionProperties(OperationContext* opCtx,
auto status = buildMissingIdIndex(opCtx, coll);
if (!status.isOK()) {
LOGV2_ERROR(21021,
- "could not build an _id index on collection {coll_ns}: {status}",
+ "could not build an _id index on collection {coll_ns}: {error}",
"Could not build an _id index on collection",
logAttrs(*coll),
"error"_attr = status);
diff --git a/src/mongo/db/s/transaction_coordinator_util.cpp b/src/mongo/db/s/transaction_coordinator_util.cpp
index ec5f858cb5c..e682b5c56d1 100644
--- a/src/mongo/db/s/transaction_coordinator_util.cpp
+++ b/src/mongo/db/s/transaction_coordinator_util.cpp
@@ -677,11 +677,11 @@ Future<PrepareResponse> sendPrepareToShard(ServiceContext* service,
LOGV2_DEBUG(22479,
3,
"{sessionId}:{txnNumber} Coordinator shard received "
- "{status} from shard {shardId} for {command}",
+ "{error} from shard {shardId} for {command}",
"Coordinator shard received response from shard",
"sessionId"_attr = lsid.getId(),
"txnNumber"_attr = txnNumber,
- "status"_attr = status,
+ "error"_attr = status,
"shardId"_attr = shardId,
"command"_attr = commandObj);
diff --git a/src/mongo/db/storage/remove_saver.cpp b/src/mongo/db/storage/remove_saver.cpp
index 9ed21bbe234..6652068c6b7 100644
--- a/src/mongo/db/storage/remove_saver.cpp
+++ b/src/mongo/db/storage/remove_saver.cpp
@@ -85,7 +85,7 @@ RemoveSaver::~RemoveSaver() {
Status status = _protector->finalize(protectedBuffer.get(), protectedSizeMax, &resultLen);
if (!status.isOK()) {
LOGV2_FATAL(34350,
- "Unable to finalize DataProtector while closing RemoveSaver: {status}",
+ "Unable to finalize DataProtector while closing RemoveSaver: {error}",
"Unable to finalize DataProtector while closing RemoveSaver",
"error"_attr = redact(status));
}
diff --git a/src/mongo/db/storage/storage_engine_lock_file_windows.cpp b/src/mongo/db/storage/storage_engine_lock_file_windows.cpp
index 85d0e0c303c..0c45fed0a70 100644
--- a/src/mongo/db/storage/storage_engine_lock_file_windows.cpp
+++ b/src/mongo/db/storage/storage_engine_lock_file_windows.cpp
@@ -190,7 +190,7 @@ void StorageEngineLockFile::clearPidAndUnlock() {
Status status = _truncateFile(_lockFileHandle->_handle);
if (!status.isOK()) {
LOGV2(22282,
- "couldn't remove fs lock {status}",
+ "couldn't remove fs lock: {error}",
"Couldn't remove fs lock",
"error"_attr = status);
}
diff --git a/src/mongo/db/views/view_catalog.cpp b/src/mongo/db/views/view_catalog.cpp
index 922186ac219..9d4895076ba 100644
--- a/src/mongo/db/views/view_catalog.cpp
+++ b/src/mongo/db/views/view_catalog.cpp
@@ -141,9 +141,10 @@ Status ViewCatalog::_reload(WithLock,
} catch (const DBException& ex) {
auto status = ex.toStatus();
LOGV2(22547,
- "could not load view catalog for database {durable_getName}: {status}",
- "durable_getName"_attr = _durable->getName(),
- "status"_attr = status);
+ "could not load view catalog for database {dbName}: {error}",
+ "could not load view catalog for database",
+ "dbName"_attr = _durable->getName(),
+ "error"_attr = status);
return status;
}
diff --git a/src/mongo/s/commands/cluster_killoperations_cmd.cpp b/src/mongo/s/commands/cluster_killoperations_cmd.cpp
index 2457958d351..d636fc58adc 100644
--- a/src/mongo/s/commands/cluster_killoperations_cmd.cpp
+++ b/src/mongo/s/commands/cluster_killoperations_cmd.cpp
@@ -52,7 +52,7 @@ public:
if (!status.isOK()) {
LOGV2(4664806,
"Failed to kill the cursor ",
- "status"_attr = redact(status.toString()));
+ "error"_attr = redact(status.toString()));
} else {
LOGV2(4664807, "Killed cursor", "cursorId"_attr = cursorId);
}
diff --git a/src/mongo/s/query/establish_cursors.cpp b/src/mongo/s/query/establish_cursors.cpp
index c80171bdb03..c75478ed3ce 100644
--- a/src/mongo/s/query/establish_cursors.cpp
+++ b/src/mongo/s/query/establish_cursors.cpp
@@ -75,7 +75,7 @@ void killOpOnShards(std::shared_ptr<executor::TaskExecutor> executor,
if (!args.response.isOK()) {
LOGV2_DEBUG(4625504,
2,
- "killOperations for {remote} failed with {status}",
+ "killOperations for {remote} failed with {error}",
"remote"_attr = host.toString(),
"error"_attr = args.response);
return;
@@ -220,8 +220,9 @@ std::vector<RemoteCursor> establishCursors(OperationContext* opCtx,
}
LOGV2(4625501,
- "ARS failed with {status}, attempting to clean up {nRemotes} remote operations",
- "status"_attr = ex.toStatus(),
+ "ARS failed with {error}, attempting to clean up {nRemotes} remote operations",
+ "ARS failed. Attempting to clean up remote operations",
+ "error"_attr = ex.toStatus(),
"nRemotes"_attr = remotesToClean.size());
// Check whether we have any remote operations to kill.
@@ -240,8 +241,9 @@ std::vector<RemoteCursor> establishCursors(OperationContext* opCtx,
invariant(0);
uasserted(ErrorCodes::CallbackCanceled, "YOOO");
LOGV2_WARNING(48038,
- "Failed to schedule remote cursor cleanup: {status}",
- "status"_attr = args.status);
+ "Failed to schedule remote cursor cleanup: {error}",
+ "Failed to schedule remote cursor cleanup",
+ "error"_attr = args.status);
return;
}
killOpOnShards(
diff --git a/src/mongo/scripting/mozjs/cursor_handle.cpp b/src/mongo/scripting/mozjs/cursor_handle.cpp
index be97580db07..80be58b5370 100644
--- a/src/mongo/scripting/mozjs/cursor_handle.cpp
+++ b/src/mongo/scripting/mozjs/cursor_handle.cpp
@@ -78,9 +78,10 @@ void CursorHandleInfo::finalize(js::FreeOp* fop, JSObject* obj) {
try {
LOGV2_INFO(22782,
- "Failed to kill cursor {cursorId} due to {status}",
+ "Failed to kill cursor {cursorId} due to {error}",
+ "Failed to kill cursor",
"cursorId"_attr = cursorId,
- "status"_attr = status);
+ "error"_attr = status);
} catch (...) {
// This is here in case logging fails.
}
diff --git a/src/mongo/scripting/mozjs/session.cpp b/src/mongo/scripting/mozjs/session.cpp
index 16d2906edf2..c7202314799 100644
--- a/src/mongo/scripting/mozjs/session.cpp
+++ b/src/mongo/scripting/mozjs/session.cpp
@@ -153,9 +153,10 @@ void SessionInfo::finalize(js::FreeOp* fop, JSObject* obj) {
try {
LOGV2_INFO(22791,
- "Failed to end session {lsid} due to {status}",
+ "Failed to end session {lsid} due to {error}",
+ "Failed to end session",
"lsid"_attr = lsid,
- "status"_attr = status);
+ "error"_attr = status);
} catch (...) {
// This is here in case logging fails.
}
diff --git a/src/mongo/transport/baton_asio_linux.h b/src/mongo/transport/baton_asio_linux.h
index 57762c9af4b..becc205cd2b 100644
--- a/src/mongo/transport/baton_asio_linux.h
+++ b/src/mongo/transport/baton_asio_linux.h
@@ -87,12 +87,13 @@ class TransportLayerASIO::BatonASIO : public NetworkingBaton {
struct EventFDHolder {
EventFDHolder() : fd(::eventfd(0, EFD_CLOEXEC)) {
if (fd < 0) {
- auto e = errno;
+ auto savedErrno = errno;
std::string reason = str::stream()
- << "error in creating eventfd: " << errnoWithDescription(e);
+ << "error in creating eventfd: " << errnoWithDescription(savedErrno);
- auto code = (e == EMFILE || e == ENFILE) ? ErrorCodes::TooManyFilesOpen
- : ErrorCodes::UnknownError;
+ auto code = (savedErrno == EMFILE || savedErrno == ENFILE)
+ ? ErrorCodes::TooManyFilesOpen
+ : ErrorCodes::UnknownError;
uasserted(code, reason);
}
@@ -348,14 +349,16 @@ public:
rval = ::poll(_pollSet.data(),
_pollSet.size(),
deadline ? Milliseconds(*deadline - now).count() : -1);
+ auto savedErrno = errno;
lk.lock();
_inPoll = false;
// If poll failed, it better be in EINTR
- if (rval < 0 && errno != EINTR) {
+ if (rval < 0 && savedErrno != EINTR) {
LOGV2_FATAL(50834,
- "error in poll: {errnoWithDescription_errno}",
- "errnoWithDescription_errno"_attr = errnoWithDescription(errno));
+ "error in poll: {error}",
+ "error in poll",
+ "error"_attr = errnoWithDescription(savedErrno));
}
}
diff --git a/src/mongo/transport/ismaster_metrics.cpp b/src/mongo/transport/ismaster_metrics.cpp
index 98e772ebfb7..0a68a46c4aa 100644
--- a/src/mongo/transport/ismaster_metrics.cpp
+++ b/src/mongo/transport/ismaster_metrics.cpp
@@ -26,7 +26,6 @@
* exception statement from all source files in the program, then also delete
* it in the license file.
*/
-#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kNetwork
#include "mongo/transport/ismaster_metrics.h"
namespace mongo {
@@ -94,4 +93,4 @@ void InExhaustIsMaster::setInExhaustIsMaster(bool inExhaustIsMaster) {
_inExhaustIsMaster = inExhaustIsMaster;
}
-} // namespace mongo \ No newline at end of file
+} // namespace mongo
diff --git a/src/mongo/transport/message_compressor_manager.cpp b/src/mongo/transport/message_compressor_manager.cpp
index 1341a4323f0..7eba1b38fc6 100644
--- a/src/mongo/transport/message_compressor_manager.cpp
+++ b/src/mongo/transport/message_compressor_manager.cpp
@@ -97,8 +97,9 @@ StatusWith<Message> MessageCompressorManager::compressMessage(
LOGV2_DEBUG(22925,
3,
- "Compressing message with {compressor_getName}",
- "compressor_getName"_attr = compressor->getName());
+ "Compressing message with {compressor}",
+ "Compressing message",
+ "compressor"_attr = compressor->getName());
auto inputHeader = msg.header();
size_t bufferSize = compressor->getMaxCompressedSize(msg.dataSize()) +
@@ -112,6 +113,8 @@ StatusWith<Message> MessageCompressorManager::compressMessage(
3,
"Compressed message would be larger than {MaxMessageSizeBytes}, returning "
"original uncompressed message",
+ "Compressed message would be larger than maximum allowed, returning original "
+ "uncompressed message",
"MaxMessageSizeBytes"_attr = MaxMessageSizeBytes);
return {msg};
}
@@ -160,8 +163,9 @@ StatusWith<Message> MessageCompressorManager::decompressMessage(const Message& m
LOGV2_DEBUG(22927,
3,
- "Decompressing message with {compressor_getName}",
- "compressor_getName"_attr = compressor->getName());
+ "Decompressing message with {compressor}",
+ "Decompressing message",
+ "compressor"_attr = compressor->getName());
if (compressionHeader.uncompressedSize < 0) {
return {ErrorCodes::BadValue, "Decompressed message would be negative in size"};
@@ -211,7 +215,11 @@ void MessageCompressorManager::clientBegin(BSONObjBuilder* output) {
BSONArrayBuilder sub(output->subarrayStart("compression"));
for (const auto e : _registry->getCompressorNames()) {
- LOGV2_DEBUG(22929, 3, "Offering {e} compressor to server", "e"_attr = e);
+ LOGV2_DEBUG(22929,
+ 3,
+ "Offering {compressor} compressor to server",
+ "Offering compressor to server",
+ "compressor"_attr = e);
sub.append(e);
}
sub.doneFast();
@@ -239,8 +247,11 @@ void MessageCompressorManager::clientFinish(const BSONObj& input) {
for (const auto& e : elem.Obj()) {
auto algoName = e.checkAndGetStringData();
auto ret = _registry->getCompressor(algoName);
- LOGV2_DEBUG(
- 22933, 3, "Adding compressor {ret_getName}", "ret_getName"_attr = ret->getName());
+ LOGV2_DEBUG(22933,
+ 3,
+ "Adding compressor {compressor}",
+ "Adding compressor",
+ "compressor"_attr = ret->getName());
_negotiated.push_back(ret);
}
}
@@ -285,11 +296,18 @@ void MessageCompressorManager::serverNegotiate(const BSONObj& input, BSONObjBuil
// If the MessageCompressorRegistry knows about a compressor with that name, then it is
// valid and we add it to our list of negotiated compressors.
if ((cur = _registry->getCompressor(curName))) {
- LOGV2_DEBUG(
- 22937, 3, "{cur_getName} is supported", "cur_getName"_attr = cur->getName());
+ LOGV2_DEBUG(22937,
+ 3,
+ "{compressor} is supported",
+ "supported compressor",
+ "compressor"_attr = cur->getName());
_negotiated.push_back(cur);
} else { // Otherwise the compressor is not supported and we skip over it.
- LOGV2_DEBUG(22938, 3, "{curName} is not supported", "curName"_attr = curName);
+ LOGV2_DEBUG(22938,
+ 3,
+ "{compressor} is not supported",
+ "compressor is not supported",
+ "compressor"_attr = curName);
}
}
diff --git a/src/mongo/transport/message_compressor_snappy.cpp b/src/mongo/transport/message_compressor_snappy.cpp
index 5b5c7852306..56384ce8854 100644
--- a/src/mongo/transport/message_compressor_snappy.cpp
+++ b/src/mongo/transport/message_compressor_snappy.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kNetwork
-
#include "mongo/platform/basic.h"
#include <memory>
diff --git a/src/mongo/transport/message_compressor_zlib.cpp b/src/mongo/transport/message_compressor_zlib.cpp
index a5eb13a3389..599a2f5e40f 100644
--- a/src/mongo/transport/message_compressor_zlib.cpp
+++ b/src/mongo/transport/message_compressor_zlib.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kNetwork
-
#include "mongo/platform/basic.h"
#include <memory>
diff --git a/src/mongo/transport/message_compressor_zstd.cpp b/src/mongo/transport/message_compressor_zstd.cpp
index 4699f5748ff..79ce2a5b0f1 100644
--- a/src/mongo/transport/message_compressor_zstd.cpp
+++ b/src/mongo/transport/message_compressor_zstd.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kNetwork
-
#include "mongo/platform/basic.h"
#include <memory>
diff --git a/src/mongo/transport/service_entry_point_impl.cpp b/src/mongo/transport/service_entry_point_impl.cpp
index db8cec48516..7c7d39cf6e9 100644
--- a/src/mongo/transport/service_entry_point_impl.cpp
+++ b/src/mongo/transport/service_entry_point_impl.cpp
@@ -94,10 +94,11 @@ ServiceEntryPointImpl::ServiceEntryPointImpl(ServiceContext* svcCtx) : _svcCtx(s
LOGV2_DEBUG(22940,
1,
- "fd limit hard:{limit_rlim_max} soft:{limit_rlim_cur} max conn: {max}",
- "limit_rlim_max"_attr = limit.rlim_max,
- "limit_rlim_cur"_attr = limit.rlim_cur,
- "max"_attr = max);
+ "fd limit hard:{hard} soft:{soft} max conn: {conn}",
+ "file descriptor and connection resource limits",
+ "hard"_attr = limit.rlim_max,
+ "soft"_attr = limit.rlim_cur,
+ "conn"_attr = max);
return std::min(max, serverGlobalParams.maxConns);
#endif
@@ -107,8 +108,9 @@ ServiceEntryPointImpl::ServiceEntryPointImpl(ServiceContext* svcCtx) : _svcCtx(s
if (supportedMax < serverGlobalParams.maxConns &&
serverGlobalParams.maxConns != DEFAULT_MAX_CONN) {
LOGV2(22941,
- " --maxConns too high, can only handle {supportedMax}",
- "supportedMax"_attr = supportedMax);
+ " --maxConns too high, can only handle {limit}",
+ " --maxConns too high",
+ "limit"_attr = supportedMax);
}
_maxNumConnections = supportedMax;
@@ -237,8 +239,9 @@ bool ServiceEntryPointImpl::shutdown(Milliseconds timeout) {
while (timeSpent < timeout &&
!_shutdownCondition.wait_for(lk, checkInterval.toSystemDuration(), noWorkersLeft)) {
LOGV2(22945,
- "shutdown: still waiting on {numOpenSessions} active workers to drain... ",
- "numOpenSessions"_attr = numOpenSessions());
+ "shutdown: still waiting on {workers} active workers to drain... ",
+ "shutdown: still waiting on active workers to drain... ",
+ "workers"_attr = numOpenSessions());
timeSpent += checkInterval;
}
@@ -246,10 +249,12 @@ bool ServiceEntryPointImpl::shutdown(Milliseconds timeout) {
if (result) {
LOGV2(22946, "shutdown: no running workers found...");
} else {
- LOGV2(22947,
- "shutdown: exhausted grace period for{numOpenSessions} active workers to "
- "drain; continuing with shutdown... ",
- "numOpenSessions"_attr = numOpenSessions());
+ LOGV2(
+ 22947,
+ "shutdown: exhausted grace period for {workers} active workers to "
+ "drain; continuing with shutdown...",
+ "shutdown: exhausted grace period active workers to drain; continuing with shutdown...",
+ "workers"_attr = numOpenSessions());
}
return result;
}
diff --git a/src/mongo/transport/service_entry_point_utils.cpp b/src/mongo/transport/service_entry_point_utils.cpp
index f1955f31715..bc2f4722592 100644
--- a/src/mongo/transport/service_entry_point_utils.cpp
+++ b/src/mongo/transport/service_entry_point_utils.cpp
@@ -85,12 +85,16 @@ Status launchServiceWorkerThread(std::function<void()> task) {
int failed = pthread_attr_setstacksize(&attrs, stackSizeToSet);
if (failed) {
const auto ewd = errnoWithDescription(failed);
- LOGV2_WARNING(22949, "pthread_attr_setstacksize failed: {ewd}", "ewd"_attr = ewd);
+ LOGV2_WARNING(22949,
+ "pthread_attr_setstacksize failed: {error}",
+ "pthread_attr_setstacksize failed",
+ "error"_attr = ewd);
}
} else if (limits.rlim_cur < 1024 * 1024) {
LOGV2_WARNING(22950,
- "Stack size set to {limits_rlim_cur_1024}KB. We suggest 1MB",
- "limits_rlim_cur_1024"_attr = (limits.rlim_cur / 1024));
+ "Stack size set to {stackSizeKiB}KiB. We suggest 1024KiB",
+ "Stack size not set to suggested 1024KiB",
+ "stackSizeKiB"_attr = (limits.rlim_cur / 1024));
}
// Wrap the user-specified `task` so it runs with an installed `sigaltstack`.
@@ -109,8 +113,9 @@ Status launchServiceWorkerThread(std::function<void()> task) {
if (failed) {
LOGV2(22948,
- "pthread_create failed: {errnoWithDescription_failed}",
- "errnoWithDescription_failed"_attr = errnoWithDescription(failed));
+ "pthread_create failed: {errno}",
+ "pthread_create failed",
+ "error"_attr = errnoWithDescription(failed));
throw std::system_error(
std::make_error_code(std::errc::resource_unavailable_try_again));
}
diff --git a/src/mongo/transport/service_executor_adaptive.cpp b/src/mongo/transport/service_executor_adaptive.cpp
index 3fa82fcb293..156f1e3d74a 100644
--- a/src/mongo/transport/service_executor_adaptive.cpp
+++ b/src/mongo/transport/service_executor_adaptive.cpp
@@ -79,9 +79,11 @@ struct ServerParameterOptions : public ServiceExecutorAdaptive::Options {
value = ProcessInfo::getNumAvailableCores() / 2;
value = std::max(value, 2);
adaptiveServiceExecutorReservedThreads.store(value);
- LOGV2(22951,
- "No thread count configured for executor. Using number of cores / 2: {value}",
- "value"_attr = value);
+ LOGV2(
+ 22951,
+ "No thread count configured for executor. Using number of cores / 2: {threadCount}",
+ "No thread count configured for executor. Using number of cores / 2",
+ "threadCount"_attr = value);
}
return value;
}
@@ -103,11 +105,13 @@ struct ServerParameterOptions : public ServiceExecutorAdaptive::Options {
Microseconds value{adaptiveServiceExecutorMaxQueueLatencyMicros.load()};
if (value < minTimerResolution) {
LOGV2(22952,
- "Target MaxQueueLatencyMicros ({value}) is less than minimum timer resolution of "
- "OS ({minTimerResolution}). Using {minTimerResolution2}",
- "value"_attr = value,
- "minTimerResolution"_attr = minTimerResolution,
- "minTimerResolution2"_attr = minTimerResolution);
+ "Target MaxQueueLatencyMicros ({targetMaxQueLatencyMicros}) is less than minimum "
+ "timer resolution of "
+ "OS ({OSMinTimerResolution}). Using {OSMinTimerResolution}",
+ "Target MaxQueueLatencyMicros is less than the OS minimum timer resolution. "
+ "Using the OS minimum",
+ "targetMaxQueLatencyMicros"_attr = value,
+ "OSMinTimerResolution"_attr = minTimerResolution);
value = duration_cast<Microseconds>(minTimerResolution) + Microseconds{1};
adaptiveServiceExecutorMaxQueueLatencyMicros.store(value.count());
}
@@ -355,8 +359,10 @@ void ServiceExecutorAdaptive::_controllerThreadRoutine() {
stuckThreadTimeout /= 2;
stuckThreadTimeout = std::max(Milliseconds{10}, stuckThreadTimeout);
LOGV2(22953,
- "Detected blocked worker threads, starting new thread to unblock service "
+ "Detected blocked worker threads. starting new thread to unblock service "
"executor. Stuck thread timeout now: {stuckThreadTimeout}",
+ "Detected blocked worker threads. starting new thread to unblock service "
+ "executor",
"stuckThreadTimeout"_attr = stuckThreadTimeout);
_startWorkerThread(ThreadCreationReason::kStuckDetection);
@@ -373,6 +379,7 @@ void ServiceExecutorAdaptive::_controllerThreadRoutine() {
LOGV2_DEBUG(22954,
1,
"Increasing stuck thread timeout to {newStuckThreadTimeout}",
+ "Increasing stuck thread timeout",
"newStuckThreadTimeout"_attr = newStuckThreadTimeout);
stuckThreadTimeout = newStuckThreadTimeout;
}
@@ -381,10 +388,9 @@ void ServiceExecutorAdaptive::_controllerThreadRoutine() {
auto threadsRunning = _threadsRunning.load();
if (threadsRunning < _config->reservedThreads()) {
LOGV2(22955,
- "Starting {config_reservedThreads_threadsRunning} to replenish reserved worker "
- "threads",
- "config_reservedThreads_threadsRunning"_attr =
- _config->reservedThreads() - threadsRunning);
+ "Starting {numThreads} to replenish reserved worker",
+ "Starting threads to replenish reserved worker",
+ "numThreads"_attr = _config->reservedThreads() - threadsRunning);
while (_threadsRunning.load() < _config->reservedThreads()) {
_startWorkerThread(ThreadCreationReason::kReserveMinimum);
}
@@ -454,8 +460,9 @@ void ServiceExecutorAdaptive::_startWorkerThread(ThreadCreationReason reason) {
if (!launchResult.isOK()) {
LOGV2_WARNING(22959,
- "Failed to launch new worker thread: {launchResult}",
- "launchResult"_attr = launchResult);
+ "Failed to launch new worker thread: {error}",
+ "Failed to launch new worker thread",
+ "error"_attr = launchResult);
lk.lock();
_threadsPending.subtractAndFetch(1);
_threadsRunning.subtractAndFetch(1);
@@ -541,7 +548,10 @@ void ServiceExecutorAdaptive::_workerThreadRoutine(
setThreadName(threadName);
}
- LOGV2(22957, "Started new database worker thread {threadId}", "threadId"_attr = threadId);
+ LOGV2(22957,
+ "Started new database worker thread {id}",
+ "Started new database worker thread",
+ "id"_attr = threadId);
bool guardThreadsRunning = true;
const auto guard = makeGuard([this, &guardThreadsRunning, state] {
@@ -619,10 +629,12 @@ void ServiceExecutorAdaptive::_workerThreadRoutine(
!_threadsRunning.compareAndSwap(&runningThreads, runningThreads - 1));
if (terminateThread) {
LOGV2(22958,
- "Thread was only executing tasks {pctExecuting}% over the last {runTime}. "
- "Exiting thread.",
- "pctExecuting"_attr = pctExecuting,
- "runTime"_attr = runTime);
+ "Thread exiting because utiliaztion {utilizationPct}% was under the idle "
+ "threshold in the runtime window {runtimeWindow}",
+ "Thread exiting because utiliaztion was under the idle threshold in the runtime "
+ "window",
+ "unilizationPercent"_attr = pctExecuting,
+ "runtimeWindow"_attr = runTime);
// Because we've already modified _threadsRunning, make sure the thread guard also
// doesn't do it.
diff --git a/src/mongo/transport/service_executor_adaptive_test.cpp b/src/mongo/transport/service_executor_adaptive_test.cpp
index 711e9cd1a5e..b47a73089a1 100644
--- a/src/mongo/transport/service_executor_adaptive_test.cpp
+++ b/src/mongo/transport/service_executor_adaptive_test.cpp
@@ -227,7 +227,10 @@ TEST_F(ServiceExecutorAdaptiveFixture, TestStuckThreads) {
waitFor.store(6);
auto tasks = waitFor.load() / 2;
- LOGV2(22968, "Scheduling {tasks} blocked tasks", "tasks"_attr = tasks);
+ LOGV2(22968,
+ "Scheduling {tasks} blocked tasks",
+ "Scheduling blocked tasks",
+ "tasks"_attr = tasks);
for (auto i = 0; i < tasks; i++) {
ASSERT_OK(exec->schedule(blockedTask,
ServiceExecutor::kEmptyFlags,
diff --git a/src/mongo/transport/service_executor_reserved.cpp b/src/mongo/transport/service_executor_reserved.cpp
index 1107a61a99f..112721e95a2 100644
--- a/src/mongo/transport/service_executor_reserved.cpp
+++ b/src/mongo/transport/service_executor_reserved.cpp
@@ -78,7 +78,10 @@ Status ServiceExecutorReserved::start() {
}
Status ServiceExecutorReserved::_startWorker() {
- LOGV2(22978, "Starting new worker thread for {name} service executor", "name"_attr = _name);
+ LOGV2(22978,
+ "Starting new worker thread for {name} service executor",
+ "Starting new worker thread for service executor",
+ "name"_attr = _name);
return launchServiceWorkerThread([this] {
stdx::unique_lock<Latch> lk(_mutex);
_numRunningWorkerThreads.addAndFetch(1);
@@ -116,8 +119,9 @@ Status ServiceExecutorReserved::_startWorker() {
auto threadStartStatus = _startWorker();
if (!threadStartStatus.isOK()) {
LOGV2_WARNING(22981,
- "Could not start new reserve worker thread: {threadStartStatus}",
- "threadStartStatus"_attr = threadStartStatus);
+ "Could not start new reserve worker thread: {error}",
+ "Could not start new reserve worker thread",
+ "error"_attr = threadStartStatus);
}
}
@@ -136,8 +140,11 @@ Status ServiceExecutorReserved::_startWorker() {
}
}
- LOGV2_DEBUG(
- 22979, 3, "Exiting worker thread in {name} service executor", "name"_attr = _name);
+ LOGV2_DEBUG(22979,
+ 3,
+ "Exiting worker thread in {name} service executor",
+ "Exiting worker thread in service executor",
+ "name"_attr = _name);
});
}
diff --git a/src/mongo/transport/service_executor_test.cpp b/src/mongo/transport/service_executor_test.cpp
index 7a8cf91a96a..684efc24070 100644
--- a/src/mongo/transport/service_executor_test.cpp
+++ b/src/mongo/transport/service_executor_test.cpp
@@ -103,8 +103,9 @@ public:
_ioContext.run_for(time.toSystemDuration());
} catch (...) {
LOGV2_FATAL(50476,
- "Uncaught exception in reactor: {exceptionToStatus}",
- "exceptionToStatus"_attr = exceptionToStatus());
+ "Uncaught exception in reactor: {error}",
+ "Uncaught exception in reactor",
+ "error"_attr = exceptionToStatus());
}
}
diff --git a/src/mongo/transport/service_state_machine.cpp b/src/mongo/transport/service_state_machine.cpp
index 3f1ad8a05bd..31019cda4cb 100644
--- a/src/mongo/transport/service_state_machine.cpp
+++ b/src/mongo/transport/service_state_machine.cpp
@@ -383,9 +383,10 @@ void ServiceStateMachine::_sourceCallback(Status status) {
LOGV2_DEBUG(
22986,
2,
- "Session from {remote} encountered a network error during SourceMessage: {status}",
+ "Session from {remote} encountered a network error during SourceMessage: {error}",
+ "Session from remote encountered a network error during SourceMessage",
"remote"_attr = remote,
- "status"_attr = status);
+ "error"_attr = status);
_state.store(State::EndSession);
} else if (status == TransportLayer::TicketSessionClosedStatus) {
// Our session may have been closed internally.
@@ -396,11 +397,12 @@ void ServiceStateMachine::_sourceCallback(Status status) {
_state.store(State::EndSession);
} else {
LOGV2(22988,
- "Error receiving request from client: {status}. Ending connection from {remote} "
- "(connection id: {session_id})",
- "status"_attr = status,
+ "Error receiving request from client: {error}. Ending connection from {remote} "
+ "(connection id: {connnection_id})",
+ "Error receiving request from client. Ending connection from remote",
+ "error"_attr = status,
"remote"_attr = remote,
- "session_id"_attr = _session()->id());
+ "connection_id"_attr = _session()->id());
_state.store(State::EndSession);
}
@@ -423,11 +425,12 @@ void ServiceStateMachine::_sinkCallback(Status status) {
// scheduleNext() to unwind the stack and do the next step.
if (!status.isOK()) {
LOGV2(22989,
- "Error sending response to client: {status}. Ending connection from {session_remote} "
- "(connection id: {session_id})",
- "status"_attr = status,
- "session_remote"_attr = _session()->remote(),
- "session_id"_attr = _session()->id());
+ "Error sending response to client: {error}. Ending connection from {remote} "
+ "(connection id: {connection_id})",
+ "Error sending response to client. Ending connection from remote",
+ "error"_attr = status,
+ "remote"_attr = _session()->remote(),
+ "connection_id"_attr = _session()->id());
_state.store(State::EndSession);
return _runNextInGuard(std::move(guard));
} else if (_inExhaust) {
@@ -571,8 +574,9 @@ void ServiceStateMachine::_runNextInGuard(ThreadGuard guard) {
return;
} catch (const DBException& e) {
LOGV2(22990,
- "DBException handling request, closing client connection: {e}",
- "e"_attr = redact(e));
+ "DBException handling request, closing client connection: {error}",
+ "DBException handling request, closing client connection",
+ "error"_attr = redact(e));
}
// No need to catch std::exception, as std::terminate will be called when the exception bubbles
// to the top of the stack
@@ -637,8 +641,9 @@ void ServiceStateMachine::terminateIfTagsDontMatch(transport::Session::TagMask t
// set, then skip the termination check.
if ((sessionTags & tags) || (sessionTags & transport::Session::kPending)) {
LOGV2(22991,
- "Skip closing connection for connection # {session_id}",
- "session_id"_attr = _session()->id());
+ "Skip closing connection for connection # {connection_id}",
+ "Skip closing connection for connection",
+ "connection_id"_attr = _session()->id());
return;
}
@@ -658,8 +663,9 @@ void ServiceStateMachine::_terminateAndLogIfError(Status status) {
if (!status.isOK()) {
LOGV2_WARNING_OPTIONS(22993,
{logv2::LogComponent::kExecutor},
- "Terminating session due to error: {status}",
- "status"_attr = status);
+ "Terminating session due to error: {error}",
+ "Terminating session due to error",
+ "error"_attr = status);
terminate();
}
}
@@ -680,8 +686,9 @@ void ServiceStateMachine::_cleanupExhaustResources() noexcept try {
}
} catch (const DBException& e) {
LOGV2(22992,
- "Error cleaning up resources for exhaust requests: {e_toStatus}",
- "e_toStatus"_attr = e.toStatus());
+ "Error cleaning up resources for exhaust requests: {error}",
+ "Error cleaning up resources for exhaust requests",
+ "error"_attr = e.toStatus());
}
void ServiceStateMachine::_cleanupSession(ThreadGuard guard) {
diff --git a/src/mongo/transport/session_asio.h b/src/mongo/transport/session_asio.h
index 7e9ee62122b..bf19ccc31da 100644
--- a/src/mongo/transport/session_asio.h
+++ b/src/mongo/transport/session_asio.h
@@ -146,8 +146,9 @@ public:
getSocket().shutdown(GenericSocket::shutdown_both, ec);
if ((ec) && (ec != asio::error::not_connected)) {
LOGV2_ERROR(23841,
- "Error shutting down socket: {ec_message}",
- "ec_message"_attr = ec.message());
+ "Error shutting down socket: {error}",
+ "Error shutting down socket",
+ "error"_attr = ec.message());
}
}
}
@@ -188,6 +189,7 @@ public:
LOGV2_DEBUG(4615608,
3,
"Cancelling outstanding I/O operations on connection to {remote}",
+ "Cancelling outstanding I/O operations on connection to remote",
"remote"_attr = _remote);
if (baton && baton->networking() && baton->networking()->cancelSession(*this)) {
// If we have a baton, it was for networking, and it owned our session, then we're done.
@@ -212,8 +214,9 @@ public:
if (!swPollEvents.isOK()) {
if (swPollEvents != ErrorCodes::NetworkTimeout) {
LOGV2_WARNING(4615609,
- "Failed to poll socket for connectivity check: {reason}",
- "reason"_attr = swPollEvents.getStatus());
+ "Failed to poll socket for connectivity check: {error}",
+ "Failed to poll socket for connectivity check",
+ "error"_attr = swPollEvents.getStatus());
return false;
}
return true;
@@ -227,8 +230,9 @@ public:
return true;
} else if (size == -1) {
LOGV2_WARNING(4615610,
- "Failed to check socket connectivity: {errDesc}",
- "errDesc"_attr = errnoWithDescription(errno));
+ "Failed to check socket connectivity: {error}",
+ "Failed to check socket connectivity",
+ "error"_attr = errnoWithDescription(errno));
}
// If size == 0 then we got disconnected and we should return false.
}
@@ -394,6 +398,7 @@ private:
const auto str = sb.str();
LOGV2(4615638,
"recv(): message msgLen {msgLen} is invalid. Min: {min} Max: {max}",
+ "recv(): message mstLen is invalid.",
"msgLen"_attr = msgLen,
"min"_attr = kHeaderSize,
"max"_attr = MaxMessageSizeBytes);
@@ -706,9 +711,10 @@ private:
if (!sslGlobalParams.disableNonSSLConnectionLogging &&
_tl->_sslMode() == SSLParams::SSLMode_preferSSL) {
LOGV2(23838,
- "SSL mode is set to 'preferred' and connection {id} to {remote} is not using "
- "SSL.",
- "id"_attr = id(),
+ "SSL mode is set to 'preferred' and connection {connectionId} to {remote} is "
+ "not using SSL.",
+ "SSL mode is set to 'preferred' and connection to remote is not using SSL.",
+ "connectionId"_attr = id(),
"remote"_attr = remote());
}
return Future<bool>::makeReady(false);
diff --git a/src/mongo/transport/transport_layer_asio.cpp b/src/mongo/transport/transport_layer_asio.cpp
index 6d2401a4c8b..377659c5d8e 100644
--- a/src/mongo/transport/transport_layer_asio.cpp
+++ b/src/mongo/transport/transport_layer_asio.cpp
@@ -140,7 +140,11 @@ private:
armTimer();
return _timer->async_wait(UseFuture{}).tapError([timer = _timer](const Status& status) {
if (status != ErrorCodes::CallbackCanceled) {
- LOGV2_DEBUG(23011, 2, "Timer received error: {status}", "status"_attr = status);
+ LOGV2_DEBUG(23011,
+ 2,
+ "Timer received error: {error}",
+ "Timer received error",
+ "error"_attr = status);
}
});
@@ -634,10 +638,10 @@ Future<SessionHandle> TransportLayerASIO::asyncConnect(HostAndPort peer,
Date_t timeAfter = Date_t::now();
if (timeAfter - timeBefore > kSlowOperationThreshold) {
LOGV2_WARNING(23019,
- "DNS resolution while connecting to {connector_peer} took "
- "{timeAfter_timeBefore}",
- "connector_peer"_attr = connector->peer,
- "timeAfter_timeBefore"_attr = timeAfter - timeBefore);
+ "DNS resolution while connecting to {peer} took {duration}",
+ "DNS resolution while connecting to peer was slow",
+ "peer"_attr = connector->peer,
+ "duration"_attr = timeAfter - timeBefore);
networkCounter.incrementNumSlowDNSOperations();
}
@@ -894,8 +898,9 @@ Status TransportLayerASIO::setup() {
resolver.resolve(HostAndPort(ip, _listenerPort), _listenerOptions.enableIPv6);
if (!swAddrs.isOK()) {
LOGV2_WARNING(23021,
- "Found no addresses for {swAddrs_getStatus}",
- "swAddrs_getStatus"_attr = swAddrs.getStatus());
+ "Found no addresses for {peer}",
+ "Found no addresses for peer",
+ "peer"_attr = swAddrs.getStatus());
continue;
}
auto& addrs = swAddrs.getValue();
@@ -906,11 +911,11 @@ Status TransportLayerASIO::setup() {
#ifndef _WIN32
if (addr.family() == AF_UNIX) {
if (::unlink(addr.toString().c_str()) == -1 && errno != ENOENT) {
- LOGV2_ERROR(
- 23024,
- "Failed to unlink socket file {addr_c_str} {errnoWithDescription_errno}",
- "addr_c_str"_attr = addr.toString().c_str(),
- "errnoWithDescription_errno"_attr = errnoWithDescription(errno));
+ LOGV2_ERROR(23024,
+ "Failed to unlink socket file {path} {error}",
+ "Failed to unlink socket file",
+ "path"_attr = addr.toString().c_str(),
+ "error"_attr = errnoWithDescription(errno));
fassertFailedNoTrace(40486);
}
}
@@ -930,8 +935,9 @@ Status TransportLayerASIO::setup() {
if (errno == EAFNOSUPPORT && _listenerOptions.enableIPv6 && addr.family() == AF_INET6 &&
addr.toString() == bindAllIPv6Addr) {
LOGV2_WARNING(4206501,
- "Failed to bind to {bind_addr} as the platform does not support ipv6",
- "bind_addr"_attr = addr.toString());
+ "Failed to bind to address as the platform does not support ipv6",
+ "Failed to bind to {address} as the platform does not support ipv6",
+ "address"_attr = addr.toString());
continue;
}
@@ -967,9 +973,10 @@ Status TransportLayerASIO::setup() {
if (addr.family() == AF_UNIX) {
if (::chmod(addr.toString().c_str(), serverGlobalParams.unixSocketPermissions) == -1) {
LOGV2_ERROR(23026,
- "Failed to chmod socket file {addr_c_str} {errnoWithDescription_errno}",
- "addr_c_str"_attr = addr.toString().c_str(),
- "errnoWithDescription_errno"_attr = errnoWithDescription(errno));
+ "Failed to chmod socket file {path} {error}",
+ "Failed to chmod socket file",
+ "path"_attr = addr.toString().c_str(),
+ "error"_attr = errnoWithDescription(errno));
fassertFailedNoTrace(40487);
}
}
@@ -1045,9 +1052,10 @@ void TransportLayerASIO::_runListener() noexcept {
acceptor.second.listen(serverGlobalParams.listenBacklog, ec);
if (ec) {
LOGV2_FATAL(31339,
- "Error listening for new connections on {acceptor_first}: {ec_message}",
- "acceptor_first"_attr = acceptor.first,
- "ec_message"_attr = ec.message());
+ "Error listening for new connections on {listenAddress}: {error}",
+ "Error listening for new connections on listen address",
+ "listenAddrs"_attr = acceptor.first,
+ "error"_attr = ec.message());
}
_acceptConnection(acceptor.second);
@@ -1082,13 +1090,15 @@ void TransportLayerASIO::_runListener() noexcept {
auto& addr = acceptor.first;
if (addr.getType() == AF_UNIX && !addr.isAnonymousUNIXSocket()) {
auto path = addr.getAddr();
- LOGV2(23017, "removing socket file: {path}", "path"_attr = path);
+ LOGV2(
+ 23017, "removing socket file: {path}", "removing socket file", "path"_attr = path);
if (::unlink(path.c_str()) != 0) {
const auto ewd = errnoWithDescription();
LOGV2_WARNING(23022,
- "Unable to remove UNIX socket {path}: {ewd}",
+ "Unable to remove UNIX socket {path}: {error}",
+ "Unable to remove UNIX socket",
"path"_attr = path,
- "ewd"_attr = ewd);
+ "error"_attr = ewd);
}
}
}
@@ -1162,11 +1172,10 @@ void TransportLayerASIO::_acceptConnection(GenericAcceptor& acceptor) {
if (ec) {
LOGV2(23018,
- "Error accepting new connection on "
- "{endpointToHostAndPort_acceptor_local_endpoint}: {ec_message}",
- "endpointToHostAndPort_acceptor_local_endpoint"_attr =
- endpointToHostAndPort(acceptor.local_endpoint()),
- "ec_message"_attr = ec.message());
+ "Error accepting new connection on {localEndpoint}: {error}",
+ "Error accepting new connection on local endpoint",
+ "localEndpoint"_attr = endpointToHostAndPort(acceptor.local_endpoint()),
+ "error"_attr = ec.message());
_acceptConnection(acceptor);
return;
}
@@ -1185,7 +1194,10 @@ void TransportLayerASIO::_acceptConnection(GenericAcceptor& acceptor) {
new ASIOSession(this, std::move(peerSocket), true));
_sep->startSession(std::move(session));
} catch (const DBException& e) {
- LOGV2_WARNING(23023, "Error accepting new connection {e}", "e"_attr = e);
+ LOGV2_WARNING(23023,
+ "Error accepting new connection: {error}",
+ "Error accepting new connection",
+ "error"_attr = e);
}
_acceptConnection(acceptor);
diff --git a/src/mongo/transport/transport_layer_asio_integration_test.cpp b/src/mongo/transport/transport_layer_asio_integration_test.cpp
index fe3dc333687..2b8fea1f8f5 100644
--- a/src/mongo/transport/transport_layer_asio_integration_test.cpp
+++ b/src/mongo/transport/transport_layer_asio_integration_test.cpp
@@ -59,7 +59,7 @@ TEST(TransportLayerASIO, HTTPRequestGetsHTTPError) {
asio::ip::tcp::resolver resolver(ioContext);
asio::ip::tcp::socket socket(ioContext);
- LOGV2(23028, "Connecting to {server}", "server"_attr = server);
+ LOGV2(23028, "Connecting to {server}", "Connecting to server", "server"_attr = server);
auto resolverIt = resolver.resolve(server.host(), std::to_string(server.port()));
asio::connect(socket, resolverIt);
@@ -78,7 +78,10 @@ TEST(TransportLayerASIO, HTTPRequestGetsHTTPError) {
auto size = asio::read(socket, asio::buffer(httpRespBuf.data(), httpRespBuf.size()), ec);
StringData httpResp(httpRespBuf.data(), size);
- LOGV2(23031, "Received response: \"{httpResp}\"", "httpResp"_attr = httpResp);
+ LOGV2(23031,
+ "Received http response: {response}",
+ "Received http response",
+ "response"_attr = httpResp);
ASSERT_TRUE(httpResp.startsWith("HTTP/1.0 200 OK"));
// Why oh why can't ASIO unify their error codes
diff --git a/src/mongo/transport/transport_layer_egress_init.cpp b/src/mongo/transport/transport_layer_egress_init.cpp
index 7c8cf66cbb3..4c201829c7c 100644
--- a/src/mongo/transport/transport_layer_egress_init.cpp
+++ b/src/mongo/transport/transport_layer_egress_init.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault
-
#include "mongo/platform/basic.h"
#include "mongo/base/init.h"
diff --git a/src/mongo/util/net/hostname_canonicalization.cpp b/src/mongo/util/net/hostname_canonicalization.cpp
index 3f5b939cb18..b329d0f6149 100644
--- a/src/mongo/util/net/hostname_canonicalization.cpp
+++ b/src/mongo/util/net/hostname_canonicalization.cpp
@@ -92,10 +92,10 @@ std::vector<std::string> getHostFQDNs(std::string hostName, HostnameCanonicaliza
if ((err = shim_getaddrinfo(nativeHostName.c_str(), nullptr, &hints, &info)) != 0) {
LOGV2_DEBUG(23170,
3,
- "Failed to obtain address information for hostname {hostName}: "
- "{getAddrInfoStrError_err}",
+ "Failed to obtain address information for host {hostName}: {error}",
+ "Failed to obtain address information for host",
"hostName"_attr = hostName,
- "getAddrInfoStrError_err"_attr = getAddrInfoStrError(err));
+ "error"_attr = getAddrInfoStrError(err));
return results;
}
const auto guard = makeGuard(shim_freeaddrinfo);
@@ -105,20 +105,14 @@ std::vector<std::string> getHostFQDNs(std::string hostName, HostnameCanonicaliza
return results;
}
- bool encounteredErrors = false;
- std::stringstream getNameInfoErrors;
- getNameInfoErrors << "Failed to obtain name info for: [ ";
+ std::vector<std::string> getNameInfoErrors;
for (shim_addrinfo* p = info; p; p = p->ai_next) {
+ std::stringstream getNameInfoError;
shim_char host[NI_MAXHOST] = {};
if ((err = shim_getnameinfo(
p->ai_addr, p->ai_addrlen, host, sizeof(host), nullptr, 0, NI_NAMEREQD)) == 0) {
results.emplace_back(shim_fromNativeString(host));
} else {
- if (encounteredErrors) {
- getNameInfoErrors << ", ";
- }
- encounteredErrors = true;
-
// Format the addrinfo structure we have into a string for reporting
char ip_str[INET6_ADDRSTRLEN];
struct sockaddr* addr = p->ai_addr;
@@ -132,23 +126,24 @@ std::vector<std::string> getHostFQDNs(std::string hostName, HostnameCanonicaliza
sin_addr = reinterpret_cast<void*>(&addr_in6->sin6_addr);
}
- getNameInfoErrors << "(";
if (sin_addr) {
invariant(inet_ntop(p->ai_family, sin_addr, ip_str, sizeof(ip_str)) != nullptr);
- getNameInfoErrors << ip_str;
+ getNameInfoError << ip_str;
} else {
- getNameInfoErrors << "Unknown address family: " << p->ai_family;
+ getNameInfoError << "Unknown address family: " << p->ai_family;
}
- getNameInfoErrors << ", \"" << getAddrInfoStrError(err) << "\")";
+ getNameInfoError << ": \"" << getAddrInfoStrError(err);
}
+ getNameInfoErrors.push_back(getNameInfoError.str());
}
- if (encounteredErrors) {
+ if (!getNameInfoErrors.empty()) {
LOGV2_DEBUG(23171,
3,
- "{getNameInfoErrors_str} ]",
- "getNameInfoErrors_str"_attr = getNameInfoErrors.str());
+ "Failed to obtain name info: {errors}",
+ "Failed to obtain name info",
+ "errors"_attr = getNameInfoErrors);
}
// Deduplicate the results list
diff --git a/src/mongo/util/net/http_client_curl.cpp b/src/mongo/util/net/http_client_curl.cpp
index c3002d4f062..ca8263595d9 100644
--- a/src/mongo/util/net/http_client_curl.cpp
+++ b/src/mongo/util/net/http_client_curl.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kNetwork
-
#include "mongo/platform/basic.h"
#include <cstddef>
diff --git a/src/mongo/util/net/http_client_winhttp.cpp b/src/mongo/util/net/http_client_winhttp.cpp
index 45e3bcbd205..2368f2826c0 100644
--- a/src/mongo/util/net/http_client_winhttp.cpp
+++ b/src/mongo/util/net/http_client_winhttp.cpp
@@ -34,8 +34,6 @@
#error This file assumes a UNICODE WIN32 build
#endif
-#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kNetwork
-
#include "mongo/platform/basic.h"
#include <string>
diff --git a/src/mongo/util/net/ssl_manager_openssl.cpp b/src/mongo/util/net/ssl_manager_openssl.cpp
index ae9e68a1c23..829311cb692 100644
--- a/src/mongo/util/net/ssl_manager_openssl.cpp
+++ b/src/mongo/util/net/ssl_manager_openssl.cpp
@@ -1393,9 +1393,9 @@ int SSLManagerOpenSSL::password_cb(char* buf, int num, int rwflag, void* userdat
auto swPassword = pwFetcher->fetchPassword();
if (!swPassword.isOK()) {
LOGV2_ERROR(23239,
- "Unable to fetch password: {status}",
+ "Unable to fetch password: {error}",
"Unable to fetch password",
- "status"_attr = swPassword.getStatus());
+ "error"_attr = swPassword.getStatus());
return -1;
}
StringData password = std::move(swPassword.getValue());
diff --git a/src/mongo/util/net/ssl_options.cpp b/src/mongo/util/net/ssl_options.cpp
index 08e135b0cb6..607d4a6a9e8 100644
--- a/src/mongo/util/net/ssl_options.cpp
+++ b/src/mongo/util/net/ssl_options.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kControl
-
#include "mongo/platform/basic.h"
#include "mongo/util/net/ssl_options.h"
diff --git a/src/mongo/util/processinfo_freebsd.cpp b/src/mongo/util/processinfo_freebsd.cpp
index 5a9d80fb417..d7f1d4127ca 100644
--- a/src/mongo/util/processinfo_freebsd.cpp
+++ b/src/mongo/util/processinfo_freebsd.cpp
@@ -131,16 +131,18 @@ void ProcessInfo::SystemInfo::collectSystemInfo() {
int status = getSysctlByNameWithDefault("kern.version", std::string("unknown"), &osVersion);
if (status != 0)
LOGV2(23332,
- "Unable to collect OS Version. (errno: {status} msg: {strerror_status})",
- "status"_attr = status,
- "strerror_status"_attr = strerror(status));
+ "Unable to collect OS Version. (errno: {errno} msg: {msg})",
+ "Unable to collect OS Version.",
+ "errno"_attr = status,
+ "msg"_attr = strerror(status));
status = getSysctlByNameWithDefault("hw.machine_arch", std::string("unknown"), &cpuArch);
if (status != 0)
LOGV2(23333,
- "Unable to collect Machine Architecture. (errno: {status} msg: {strerror_status})",
- "status"_attr = status,
- "strerror_status"_attr = strerror(status));
+ "Unable to collect Machine Architecture. (errno: {errno} msg: {msg})",
+ "Unable to collect Machine Architecture.",
+ "errno"_attr = status,
+ "msg"_attr = strerror(status));
addrSize = cpuArch.find("64") != std::string::npos ? 64 : 32;
uintptr_t numBuffer;
@@ -150,17 +152,19 @@ void ProcessInfo::SystemInfo::collectSystemInfo() {
memLimit = memSize;
if (status != 0)
LOGV2(23334,
- "Unable to collect Physical Memory. (errno: {status} msg: {strerror_status})",
- "status"_attr = status,
- "strerror_status"_attr = strerror(status));
+ "Unable to collect Physical Memory. (errno: {errno} msg: {msg})",
+ "Unable to collect Physical Memory.",
+ "errno"_attr = status,
+ "msg"_attr = strerror(status));
status = getSysctlByNameWithDefault("hw.ncpu", defaultNum, &numBuffer);
numCores = numBuffer;
if (status != 0)
LOGV2(23335,
- "Unable to collect Number of CPUs. (errno: {status} msg: {strerror_status})",
- "status"_attr = status,
- "strerror_status"_attr = strerror(status));
+ "Unable to collect Number of CPUs. (errno: {errno} msg: {msg})",
+ "Unable to collect Number of CPUs.",
+ "errno"_attr = status,
+ "msg"_attr = strerror(status));
pageSize = static_cast<unsigned long long>(sysconf(_SC_PAGESIZE));
diff --git a/src/mongo/util/processinfo_openbsd.cpp b/src/mongo/util/processinfo_openbsd.cpp
index e0c5eb55ab3..5736504ce9c 100644
--- a/src/mongo/util/processinfo_openbsd.cpp
+++ b/src/mongo/util/processinfo_openbsd.cpp
@@ -144,18 +144,20 @@ void ProcessInfo::SystemInfo::collectSystemInfo() {
int status = getSysctlByIDWithDefault(mib, 2, std::string("unknown"), &osVersion);
if (status != 0)
LOGV2(23345,
- "Unable to collect OS Version. (errno: {status} msg: {strerror_status})",
- "status"_attr = status,
- "strerror_status"_attr = strerror(status));
+ "Unable to collect OS Version. (errno: {errno} msg: {msg})",
+ "Unable to collect OS Version.",
+ "errno"_attr = status,
+ "msg"_attr = strerror(status));
mib[0] = CTL_HW;
mib[1] = HW_MACHINE;
status = getSysctlByIDWithDefault(mib, 2, std::string("unknown"), &cpuArch);
if (status != 0)
LOGV2(23346,
- "Unable to collect Machine Architecture. (errno: {status} msg: {strerror_status})",
- "status"_attr = status,
- "strerror_status"_attr = strerror(status));
+ "Unable to collect Machine Architecture. (errno: {errno} msg: {msg})",
+ "Unable to collect Machine Architecture.",
+ "errno"_attr = status,
+ "msg"_attr = strerror(status));
addrSize = cpuArch.find("64") != std::string::npos ? 64 : 32;
uintptr_t numBuffer;
@@ -167,9 +169,9 @@ void ProcessInfo::SystemInfo::collectSystemInfo() {
memLimit = memSize;
if (status != 0)
LOGV2(23347,
- "Unable to collect Physical Memory. (errno: {status} msg: {strerror_status})",
- "status"_attr = status,
- "strerror_status"_attr = strerror(status));
+ "Unable to collect Physical Memory. (errno: {errno} msg: {msg})",
+ "errno"_attr = status,
+ "msg"_attr = strerror(status));
mib[0] = CTL_HW;
mib[1] = HW_NCPU;
@@ -177,9 +179,9 @@ void ProcessInfo::SystemInfo::collectSystemInfo() {
numCores = numBuffer;
if (status != 0)
LOGV2(23348,
- "Unable to collect Number of CPUs. (errno: {status} msg: {strerror_status})",
- "status"_attr = status,
- "strerror_status"_attr = strerror(status));
+ "Unable to collect Number of CPUs. (errno: {errno} msg: {msg})",
+ "errno"_attr = status,
+ "msg"_attr = strerror(status));
pageSize = static_cast<unsigned long long>(sysconf(_SC_PAGESIZE));