summaryrefslogtreecommitdiff
path: root/src/mongo
diff options
context:
space:
mode:
authorsamanca <amirsaman.memaripour@mongodb.com>2020-04-01 13:52:59 -0400
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-04-01 20:05:43 +0000
commit777f54aab1b1fd9b2db1f416a28755b31e1a4662 (patch)
tree7b8d39123c69804ca913b600ee5f59b70e5670ba /src/mongo
parent9d92dcf68cd671686d6d9d02bc079dae2fa12631 (diff)
downloadmongo-777f54aab1b1fd9b2db1f416a28755b31e1a4662.tar.gz
SERVER-47077 Clean up log lines in 'mongo/db'
Cleaning up log lines to adhere to the LOGV2 style guide.
Diffstat (limited to 'src/mongo')
-rw-r--r--src/mongo/db/db.cpp172
-rw-r--r--src/mongo/db/server_options_server_helpers.cpp5
-rw-r--r--src/mongo/db/service_entry_point_common.cpp180
-rw-r--r--src/mongo/db/service_entry_point_mongod.cpp22
-rw-r--r--src/mongo/db/startup_warnings_common.cpp18
-rw-r--r--src/mongo/db/startup_warnings_mongod.cpp73
6 files changed, 253 insertions, 217 deletions
diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp
index e7d20abf4ba..83bfffdbd18 100644
--- a/src/mongo/db/db.cpp
+++ b/src/mongo/db/db.cpp
@@ -310,10 +310,11 @@ ExitCode _initAndListen(ServiceContext* serviceContext, int listenPort) {
ProcessId pid = ProcessId::getCurrent();
const bool is32bit = sizeof(int*) == 4;
LOGV2(4615611,
+ "MongoDB starting : pid={pid} port={port} dbpath={dbPath} {architecture} host={host}",
"MongoDB starting",
"pid"_attr = pid.toNative(),
"port"_attr = serverGlobalParams.port,
- "dbpath"_attr = boost::filesystem::path(storageGlobalParams.dbpath).generic_string(),
+ "dbPath"_attr = boost::filesystem::path(storageGlobalParams.dbpath).generic_string(),
"architecture"_attr = (is32bit ? "32-bit" : "64-bit"),
"host"_attr = getHostNameCached());
}
@@ -378,10 +379,11 @@ ExitCode _initAndListen(ServiceContext* serviceContext, int listenPort) {
// Warn if field name matches non-active registered storage engine.
if (isRegisteredStorageEngine(serviceContext, e.fieldName())) {
LOGV2_WARNING(20566,
- "Detected configuration for non-active storage engine {e_fieldName} "
- "when current storage engine is {storageGlobalParams_engine}",
- "e_fieldName"_attr = e.fieldName(),
- "storageGlobalParams_engine"_attr = storageGlobalParams.engine);
+ "Detected configuration for non-active storage engine {fieldName} "
+ "when current storage engine is {storageEngine}",
+ "Detected configuration for non-active storage engine",
+ "fieldName"_attr = e.fieldName(),
+ "storageEngine"_attr = storageGlobalParams.engine);
}
}
}
@@ -389,20 +391,22 @@ ExitCode _initAndListen(ServiceContext* serviceContext, int listenPort) {
// Disallow running a storage engine that doesn't support capped collections with --profile
if (!serviceContext->getStorageEngine()->supportsCappedCollections() &&
serverGlobalParams.defaultProfile != 0) {
- LOGV2(20534,
- "Running {storageGlobalParams_engine} with profiling is not supported. Make sure you "
- "are not using --profile.",
- "storageGlobalParams_engine"_attr = storageGlobalParams.engine);
+ LOGV2_ERROR(20534,
+ "Running {storageEngine} with profiling is not supported. Make sure you "
+ "are not using --profile",
+ "Running the selected storage engine with profiling is not supported",
+ "storageEngine"_attr = storageGlobalParams.engine);
exitCleanly(EXIT_BADOPTIONS);
}
// Disallow running WiredTiger with --nojournal in a replica set
if (storageGlobalParams.engine == "wiredTiger" && !storageGlobalParams.dur &&
replSettings.usingReplSets()) {
- LOGV2(20535,
- "Running wiredTiger without journaling in a replica set is not supported. Make sure "
- "you are not using --nojournal and that storage.journal.enabled is not set to "
- "'false'.");
+ LOGV2_ERROR(
+ 20535,
+ "Running wiredTiger without journaling in a replica set is not supported. Make sure "
+ "you are not using --nojournal and that storage.journal.enabled is not set to "
+ "'false'");
exitCleanly(EXIT_BADOPTIONS);
}
@@ -448,8 +452,9 @@ ExitCode _initAndListen(ServiceContext* serviceContext, int listenPort) {
LOGV2_FATAL_OPTIONS(
20573,
logv2::LogOptions(logv2::LogComponent::kControl, logv2::FatalMode::kContinue),
- "** IMPORTANT: {error_toStatus_reason}",
- "error_toStatus_reason"_attr = error.toStatus().reason());
+ "** IMPORTANT: {error}",
+ "Wrong mongod version",
+ "error"_attr = error.toStatus().reason());
exitCleanly(EXIT_NEED_DOWNGRADE);
}
@@ -471,11 +476,11 @@ ExitCode _initAndListen(ServiceContext* serviceContext, int listenPort) {
}
if (gFlowControlEnabled.load()) {
- LOGV2(20536, "Flow Control is enabled on this deployment.");
+ LOGV2(20536, "Flow Control is enabled on this deployment");
}
if (storageGlobalParams.upgrade) {
- LOGV2(20537, "finished checking dbs");
+ LOGV2(20537, "Finished checking dbs");
exitCleanly(EXIT_CLEAN);
}
@@ -491,7 +496,10 @@ ExitCode _initAndListen(ServiceContext* serviceContext, int listenPort) {
if (globalAuthzManager->shouldValidateAuthSchemaOnStartup()) {
Status status = verifySystemIndexes(startupOpCtx.get());
if (!status.isOK()) {
- LOGV2(20538, "{status}", "status"_attr = redact(status));
+ LOGV2_WARNING(20538,
+ "Unable to verify system indexes: {error}",
+ "Unable to verify system indexes",
+ "error"_attr = redact(status));
if (status == ErrorCodes::AuthSchemaIncompatible) {
exitCleanly(EXIT_NEED_UPGRADE);
} else if (status == ErrorCodes::NotMaster) {
@@ -507,27 +515,28 @@ ExitCode _initAndListen(ServiceContext* serviceContext, int listenPort) {
status =
globalAuthzManager->getAuthorizationVersion(startupOpCtx.get(), &foundSchemaVersion);
if (!status.isOK()) {
- LOGV2(20539,
- "Auth schema version is incompatible: User and role management commands require "
- "auth data to have at least schema version "
- "{AuthorizationManager_schemaVersion26Final} but startup could not verify schema "
- "version: {status}",
- "AuthorizationManager_schemaVersion26Final"_attr =
- AuthorizationManager::schemaVersion26Final,
- "status"_attr = status);
+ LOGV2_ERROR(
+ 20539,
+ "Auth schema version is incompatible: User and role management commands require "
+ "auth data to have at least schema version {minSchemaVersion} but startup could "
+ "not verify schema version: {error}",
+ "Failed to verify auth schema version",
+ "minSchemaVersion"_attr = AuthorizationManager::schemaVersion26Final,
+ "error"_attr = status);
LOGV2(20540,
"To manually repair the 'authSchema' document in the admin.system.version "
"collection, start up with --setParameter "
- "startupAuthSchemaValidation=false to disable validation.");
+ "startupAuthSchemaValidation=false to disable validation");
exitCleanly(EXIT_NEED_UPGRADE);
}
if (foundSchemaVersion <= AuthorizationManager::schemaVersion26Final) {
- LOGV2(20541,
- "This server is using MONGODB-CR, an authentication mechanism which has been "
- "removed from MongoDB 4.0. In order to upgrade the auth schema, first downgrade "
- "MongoDB binaries to version 3.6 and then run the authSchemaUpgrade command. See "
- "http://dochub.mongodb.org/core/3.0-upgrade-to-scram-sha-1");
+ LOGV2_ERROR(
+ 20541,
+ "This server is using MONGODB-CR, an authentication mechanism which has been "
+ "removed from MongoDB 4.0. In order to upgrade the auth schema, first downgrade "
+ "MongoDB binaries to version 3.6 and then run the authSchemaUpgrade command. See "
+ "http://dochub.mongodb.org/core/3.0-upgrade-to-scram-sha-1");
exitCleanly(EXIT_NEED_UPGRADE);
}
} else if (globalAuthzManager->isAuthEnabled()) {
@@ -536,15 +545,14 @@ ExitCode _initAndListen(ServiceContext* serviceContext, int listenPort) {
} else {
// If authSchemaValidation is disabled and server is running without auth,
// warn the user and continue startup without authSchema metadata checks.
- LOGV2_OPTIONS(20542, {logv2::LogTag::kStartupWarnings}, "");
- LOGV2_OPTIONS(20543,
- {logv2::LogTag::kStartupWarnings},
- "** WARNING: Startup auth schema validation checks are disabled for the "
- "database.");
- LOGV2_OPTIONS(20544,
- {logv2::LogTag::kStartupWarnings},
- "** This mode should only be used to manually repair corrupted auth "
- "data.");
+ LOGV2_WARNING_OPTIONS(
+ 20543,
+ {logv2::LogTag::kStartupWarnings},
+ "** WARNING: Startup auth schema validation checks are disabled for the database");
+ LOGV2_WARNING_OPTIONS(
+ 20544,
+ {logv2::LogTag::kStartupWarnings},
+ "** This mode should only be used to manually repair corrupted auth data");
}
WaitForMajorityService::get(serviceContext).setUp(serviceContext);
@@ -649,34 +657,31 @@ ExitCode _initAndListen(ServiceContext* serviceContext, int listenPort) {
replCoord->startup(startupOpCtx.get());
if (getReplSetMemberInStandaloneMode(serviceContext)) {
- LOGV2_OPTIONS(20546, {logv2::LogTag::kStartupWarnings}, "");
- LOGV2_OPTIONS(20547,
- {logv2::LogTag::kStartupWarnings},
- "** WARNING: mongod started without --replSet yet document(s) are "
- "present in {NamespaceString_kSystemReplSetNamespace}.",
- "NamespaceString_kSystemReplSetNamespace"_attr =
- NamespaceString::kSystemReplSetNamespace);
- LOGV2_OPTIONS(
- 20548,
+ LOGV2_WARNING_OPTIONS(
+ 20547,
{logv2::LogTag::kStartupWarnings},
- "** Database contents may appear inconsistent with the oplog and may "
- "appear to not contain");
- LOGV2_OPTIONS(
- 20549,
+ "** WARNING: mongod started without --replSet yet document(s) are present in "
+ "{namespace}",
+ "Document(s) exist in replSet namespace, but started without --replSet",
+ "namespace"_attr = NamespaceString::kSystemReplSetNamespace);
+ LOGV2_WARNING_OPTIONS(20548,
+ {logv2::LogTag::kStartupWarnings},
+ "** Database contents may appear inconsistent with the "
+ "oplog and may appear to not contain");
+ LOGV2_WARNING_OPTIONS(20549,
+ {logv2::LogTag::kStartupWarnings},
+ "** writes that were visible when this node was "
+ "running as part of a replica set");
+ LOGV2_WARNING_OPTIONS(20550,
+ {logv2::LogTag::kStartupWarnings},
+ "** Restart with --replSet unless you are doing "
+ "maintenance and no other clients are connected");
+ LOGV2_WARNING_OPTIONS(
+ 20551,
{logv2::LogTag::kStartupWarnings},
- "** writes that were visible when this node was running as part of a "
- "replica set.");
- LOGV2_OPTIONS(
- 20550,
- {logv2::LogTag::kStartupWarnings},
- "** Restart with --replSet unless you are doing maintenance and no "
- "other clients are connected.");
- LOGV2_OPTIONS(20551,
- {logv2::LogTag::kStartupWarnings},
- "** The TTL collection monitor will not start because of this.");
- LOGV2(20552, "** ");
- LOGV2(20553, " For more info see http://dochub.mongodb.org/core/ttlcollections");
- LOGV2_OPTIONS(20554, {logv2::LogTag::kStartupWarnings}, "");
+ "** The TTL collection monitor will not start because of this");
+ LOGV2(20553,
+ "** For more info see http://dochub.mongodb.org/core/ttlcollections");
} else {
startTTLBackgroundJob(serviceContext);
}
@@ -767,7 +772,7 @@ ExitCode _initAndListen(ServiceContext* serviceContext, int listenPort) {
#endif
if (MONGO_unlikely(shutdownAtStartup.shouldFail())) {
- LOGV2(20556, "starting clean exit via failpoint");
+ LOGV2(20556, "Starting clean exit via failpoint");
exitCleanly(EXIT_CLEAN);
}
@@ -779,18 +784,25 @@ ExitCode initAndListen(ServiceContext* service, int listenPort) {
try {
return _initAndListen(service, listenPort);
} catch (DBException& e) {
- LOGV2(20557, "exception in initAndListen: {e}, terminating", "e"_attr = e.toString());
+ LOGV2_ERROR(20557,
+ "Exception in initAndListen: {error}, terminating",
+ "DBException in initAndListen, terminating",
+ "error"_attr = e.toString());
return EXIT_UNCAUGHT;
} catch (std::exception& e) {
- LOGV2(20558,
- "exception in initAndListen std::exception: {e_what}, terminating",
- "e_what"_attr = e.what());
+ LOGV2_ERROR(20558,
+ "Exception in initAndListen std::exception: {error}, terminating",
+ "std::exception in initAndListen, terminating",
+ "error"_attr = e.what());
return EXIT_UNCAUGHT;
} catch (int& n) {
- LOGV2(20559, "exception in initAndListen int: {n}, terminating", "n"_attr = n);
+ LOGV2_ERROR(20559,
+ "Exception in initAndListen int: {reason}, terminating",
+ "Exception in initAndListen, terminating",
+ "reason"_attr = n);
return EXIT_UNCAUGHT;
} catch (...) {
- LOGV2(20560, "exception in initAndListen, terminating");
+ LOGV2_ERROR(20560, "Exception in initAndListen, terminating");
return EXIT_UNCAUGHT;
}
}
@@ -1057,10 +1069,10 @@ void shutdownTask(const ShutdownTaskArgs& shutdownArgs) {
} catch (const ExceptionFor<ErrorCodes::NotMaster>&) {
// ignore not master errors
} catch (const DBException& e) {
- LOGV2(20561,
- "Error stepping down in non-command initiated shutdown path {error}",
- "Error stepping down in non-command initiated shutdown path",
- "error"_attr = e);
+ LOGV2_WARNING(20561,
+ "Error stepping down in non-command initiated shutdown path: {error}",
+ "Error stepping down in non-command initiated shutdown path",
+ "error"_attr = e);
}
// Even if the replCoordinator failed to step down, ensure we still shut down the
@@ -1087,7 +1099,7 @@ void shutdownTask(const ShutdownTaskArgs& shutdownArgs) {
if (auto tl = serviceContext->getTransportLayer()) {
LOGV2_OPTIONS(20562,
{logComponentV1toV2(LogComponent::kNetwork)},
- "shutdown: going to close listening sockets...");
+ "Shutdown: going to close listening sockets");
tl->shutdown();
}
@@ -1229,7 +1241,7 @@ void shutdownTask(const ShutdownTaskArgs& shutdownArgs) {
// the memory and makes leak sanitizer happy.
ScriptEngine::dropScopeCache();
- LOGV2_OPTIONS(20565, {logComponentV1toV2(LogComponent::kControl)}, "now exiting");
+ LOGV2_OPTIONS(20565, {logComponentV1toV2(LogComponent::kControl)}, "Now exiting");
audit::logShutdown(client);
diff --git a/src/mongo/db/server_options_server_helpers.cpp b/src/mongo/db/server_options_server_helpers.cpp
index 0d237dfe7bd..fdde1a49c1b 100644
--- a/src/mongo/db/server_options_server_helpers.cpp
+++ b/src/mongo/db/server_options_server_helpers.cpp
@@ -121,7 +121,10 @@ void printCommandLineOpts(std::ostream* os) {
tojson(serverGlobalParams.parsedOpts, ExtendedRelaxedV2_0_0, true))
<< std::endl;
} else {
- LOGV2(21951, "Options set by command line", "options"_attr = serverGlobalParams.parsedOpts);
+ LOGV2(21951,
+ "Options set by command line: {options}",
+ "Options set by command line",
+ "options"_attr = serverGlobalParams.parsedOpts);
}
}
diff --git a/src/mongo/db/service_entry_point_common.cpp b/src/mongo/db/service_entry_point_common.cpp
index f4497a8ccdf..7c1008d0623 100644
--- a/src/mongo/db/service_entry_point_common.cpp
+++ b/src/mongo/db/service_entry_point_common.cpp
@@ -137,23 +137,26 @@ void generateLegacyQueryErrorResponse(const AssertionException& exception,
if (queryMessage.query.valid(BSONVersion::kLatest))
LOGV2_OPTIONS(51777,
{logv2::LogComponent::kQuery},
- "assertion {exception} ns: {ns} query: {query}",
- "exception"_attr = exception,
- "ns"_attr = queryMessage.ns,
+ "Assertion {error} ns: {namespace} query: {query}",
+ "Assertion for valid query",
+ "error"_attr = exception,
+ "namespace"_attr = queryMessage.ns,
"query"_attr = redact(queryMessage.query));
else
LOGV2_OPTIONS(51778,
{logv2::LogComponent::kQuery},
- "assertion {exception} ns: {ns} query object is corrupt",
- "exception"_attr = exception,
- "ns"_attr = queryMessage.ns);
+ "Assertion {error} ns: {namespace} query object is corrupt",
+ "Assertion for query with corrupted object",
+ "error"_attr = exception,
+ "namespace"_attr = queryMessage.ns);
if (queryMessage.ntoskip || queryMessage.ntoreturn) {
LOGV2_OPTIONS(21952,
{logComponentV1toV2(LogComponent::kQuery)},
- " ntoskip:{queryMessage_ntoskip} ntoreturn:{queryMessage_ntoreturn}",
- "queryMessage_ntoskip"_attr = queryMessage.ntoskip,
- "queryMessage_ntoreturn"_attr = queryMessage.ntoreturn);
+ "Query's nToSkip = {nToSkip} and nToReturn = {nToReturn}",
+ "Assertion for query with nToSkip and/or nToReturn",
+ "nToSkip"_attr = queryMessage.ntoskip,
+ "nToReturn"_attr = queryMessage.ntoreturn);
}
BSONObjBuilder err;
@@ -170,9 +173,10 @@ void generateLegacyQueryErrorResponse(const AssertionException& exception,
if (isStaleConfig) {
LOGV2_OPTIONS(21953,
{logComponentV1toV2(LogComponent::kQuery)},
- "stale version detected during query over {queryMessage_ns} : {errObj}",
- "queryMessage_ns"_attr = queryMessage.ns,
- "errObj"_attr = errObj);
+ "Stale version detected during query over {namespace}: {error}",
+ "Detected stale version while querying namespace",
+ "namespace"_attr = queryMessage.ns,
+ "error"_attr = errObj);
}
BufBuilder bb;
@@ -286,8 +290,8 @@ StatusWith<repl::ReadConcernArgs> _extractReadConcern(OperationContext* opCtx,
serverGlobalParams.clusterRole == ClusterRole::ConfigServer) {
if (!readConcernArgs.isSpecified()) {
// TODO: Disabled until after SERVER-44539, to avoid log spam.
- // LOGV2(21954, "Missing readConcern on {invocation_definition_getName}",
- // "invocation_definition_getName"_attr = invocation->definition()->getName());
+ // LOGV2(21954, "Missing readConcern on {command}", "Missing readConcern "
+ // "for command", "command"_attr = invocation->definition()->getName());
}
}
}
@@ -306,11 +310,11 @@ StatusWith<repl::ReadConcernArgs> _extractReadConcern(OperationContext* opCtx,
readConcernArgs = std::move(*rcDefault);
LOGV2_DEBUG(21955,
2,
- "Applying default readConcern on {invocation_definition_getName} "
- "of {rcDefault}",
- "invocation_definition_getName"_attr =
- invocation->definition()->getName(),
- "rcDefault"_attr = *rcDefault);
+ "Applying default readConcern on {command} of {readConcernDefault} "
+ "on {command}",
+ "Applying default readConcern on command",
+ "readConcernDefault"_attr = *rcDefault,
+ "command"_attr = invocation->definition()->getName());
// Update the readConcernSupport, since the default RC was applied.
readConcernSupport =
invocation->supportsReadConcern(readConcernArgs.getLevel());
@@ -530,15 +534,14 @@ void _abortUnpreparedOrStashPreparedTransaction(
txnParticipant->abortTransaction(opCtx);
} catch (...) {
// It is illegal for this to throw so we catch and log this here for diagnosability.
- LOGV2_FATAL_CONTINUE(
- 21974,
- "Caught exception during transaction "
- "{opCtx_getTxnNumber}{isPrepared_stash_abort}{opCtx_getLogicalSessionId}: "
- "{exceptionToStatus}",
- "opCtx_getTxnNumber"_attr = opCtx->getTxnNumber(),
- "isPrepared_stash_abort"_attr = (isPrepared ? " stash " : " abort "),
- "opCtx_getLogicalSessionId"_attr = opCtx->getLogicalSessionId()->toBSON(),
- "exceptionToStatus"_attr = exceptionToStatus());
+ LOGV2_FATAL_CONTINUE(21974,
+ "Caught exception during transaction {txnNumber} {operation} "
+ "{logicalSessionId}: {error}",
+ "Unable to stash/abort transaction",
+ "operation"_attr = (isPrepared ? "stash" : "abort"),
+ "txnNumber"_attr = opCtx->getTxnNumber(),
+ "logicalSessionId"_attr = opCtx->getLogicalSessionId()->toBSON(),
+ "error"_attr = exceptionToStatus());
std::terminate();
}
}
@@ -758,8 +761,8 @@ bool runCommandImpl(OperationContext* opCtx,
serverGlobalParams.clusterRole == ClusterRole::ConfigServer) {
if (!request.body.hasField(WriteConcernOptions::kWriteConcernField)) {
// TODO: Disabled until after SERVER-44539, to avoid log spam.
- // LOGV2(21959, "Missing writeConcern on {command_getName}",
- // "command_getName"_attr = command->getName());
+ // LOGV2(21959, "Missing writeConcern on {command}", "Missing "
+ // "writeConcern on command", "command"_attr = command->getName());
}
}
}
@@ -812,6 +815,7 @@ bool runCommandImpl(OperationContext* opCtx,
LOGV2(21960,
"failWithErrorCodeInRunCommand enabled - failing command with error "
"code: {errorCode}",
+ "failWithErrorCodeInRunCommand enabled, failing command",
"errorCode"_attr = errorCode);
BSONObjBuilder errorBuilder;
errorBuilder.append("ok", 0.0);
@@ -1062,8 +1066,9 @@ void execCommandDatabase(OperationContext* opCtx,
if (command->adminOnly()) {
LOGV2_DEBUG(21961,
2,
- "command: {request_getCommandName}",
- "request_getCommandName"_attr = request.getCommandName());
+ "Admin only command: {command}",
+ "Admin only command",
+ "command"_attr = request.getCommandName());
}
if (command->maintenanceMode()) {
@@ -1168,7 +1173,8 @@ void execCommandDatabase(OperationContext* opCtx,
LOGV2_DEBUG_OPTIONS(4615605,
1,
{logv2::LogComponent::kTracking},
- "{trackingMetadata}",
+ "Command metadata: {trackingMetadata}",
+ "Command metadata",
"trackingMetadata"_attr = rpc::TrackingMetadata::get(opCtx));
rpc::TrackingMetadata::get(opCtx).setIsLogged(true);
}
@@ -1243,17 +1249,16 @@ void execCommandDatabase(OperationContext* opCtx,
}
appendClusterAndOperationTime(opCtx, &extraFieldsBuilder, &metadataBob, startOperationTime);
- LOGV2_DEBUG(
- 21962,
- 1,
- "assertion while executing command '{request_getCommandName}' on database "
- "'{request_getDatabase}' with arguments "
- "'{ServiceEntryPointCommon_getRedactedCopyForLogging_command_request_body}': {e}",
- "request_getCommandName"_attr = request.getCommandName(),
- "request_getDatabase"_attr = request.getDatabase(),
- "ServiceEntryPointCommon_getRedactedCopyForLogging_command_request_body"_attr =
- redact(ServiceEntryPointCommon::getRedactedCopyForLogging(command, request.body)),
- "e"_attr = redact(e.toString()));
+ LOGV2_DEBUG(21962,
+ 1,
+ "Assertion while executing command '{command}' on database '{db}' with "
+ "arguments '{commandArgs}': {error}",
+ "Assertion while executing command",
+ "command"_attr = request.getCommandName(),
+ "db"_attr = request.getDatabase(),
+ "commandArgs"_attr = redact(
+ ServiceEntryPointCommon::getRedactedCopyForLogging(command, request.body)),
+ "error"_attr = redact(e.toString()));
generateErrorResponse(opCtx, replyBuilder, e, metadataBob.obj(), extraFieldsBuilder.obj());
}
@@ -1299,8 +1304,11 @@ DbResponse receivedCommands(OperationContext* opCtx,
// Otherwise, reply with the parse error. This is useful for cases where parsing fails
// due to user-supplied input, such as the document too deep error. Since we failed
// during parsing, we can't log anything about the command.
- LOGV2_DEBUG(
- 21963, 1, "assertion while parsing command: {ex}", "ex"_attr = ex.toString());
+ LOGV2_DEBUG(21963,
+ 1,
+ "Assertion while parsing command: {error}",
+ "Assertion while parsing command",
+ "error"_attr = ex.toString());
generateErrorResponse(
opCtx, replyBuilder.get(), ex, metadataBob.obj(), extraFieldsBuilder.obj());
@@ -1320,18 +1328,21 @@ DbResponse receivedCommands(OperationContext* opCtx,
globalCommandRegistry()->incrementUnknownCommands();
std::string msg = str::stream()
<< "no such command: '" << request.getCommandName() << "'";
- LOGV2_DEBUG(21964, 2, "{msg}", "msg"_attr = msg);
+ LOGV2_DEBUG(21964,
+ 2,
+ "No such command: {command}",
+ "Command not found in registry",
+ "command"_attr = request.getCommandName());
uasserted(ErrorCodes::CommandNotFound, str::stream() << msg);
}
- LOGV2_DEBUG(
- 21965,
- 2,
- "run command {request_getDatabase}.$cmd "
- "{ServiceEntryPointCommon_getRedactedCopyForLogging_c_request_body}",
- "request_getDatabase"_attr = request.getDatabase(),
- "ServiceEntryPointCommon_getRedactedCopyForLogging_c_request_body"_attr =
- redact(ServiceEntryPointCommon::getRedactedCopyForLogging(c, request.body)));
+ LOGV2_DEBUG(21965,
+ 2,
+ "Run command {db}.$cmd {commandArgs}",
+ "About to run the command",
+ "db"_attr = request.getDatabase(),
+ "commandArgs"_attr = redact(
+ ServiceEntryPointCommon::getRedactedCopyForLogging(c, request.body)));
{
// Try to set this as early as possible, as soon as we have figured out the command.
@@ -1359,11 +1370,11 @@ DbResponse receivedCommands(OperationContext* opCtx,
LOGV2_DEBUG(21966,
1,
- "assertion while executing command '{request_getCommandName}' on database "
- "'{request_getDatabase}': {ex}",
- "request_getCommandName"_attr = request.getCommandName(),
- "request_getDatabase"_attr = request.getDatabase(),
- "ex"_attr = ex.toString());
+ "Assertion while executing command '{command}' on database '{db}': {error}",
+ "Assertion while executing command",
+ "command"_attr = request.getCommandName(),
+ "db"_attr = request.getDatabase(),
+ "error"_attr = ex.toString());
generateErrorResponse(
opCtx, replyBuilder.get(), ex, metadataBob.obj(), extraFieldsBuilder.obj());
@@ -1438,12 +1449,24 @@ void receivedKillCursors(OperationContext* opCtx, const Message& m) {
DbMessage dbmessage(m);
int n = dbmessage.pullInt();
- if (n > 2000) {
- if (n < 30000)
- LOGV2_WARNING(4615606, "receivedKillCursors, n={n}", "n"_attr = n);
- else
- LOGV2_ERROR(4615607, "receivedKillCursors, n={n}", "n"_attr = n);
- uassert(51250, "must kill fewer than 30000 cursors", n < 30000);
+ static constexpr int kSoftKillLimit = 2000;
+ static constexpr int kHardKillLimit = 29999;
+
+ if (n > kHardKillLimit) {
+ LOGV2_ERROR(4615607,
+ "Received killCursors, n={numCursors}",
+ "Received killCursors, exceeded kHardKillLimit",
+ "numCursors"_attr = n,
+ "kHardKillLimit"_attr = kHardKillLimit);
+ uasserted(51250, "Must kill fewer than {} cursors"_format(kHardKillLimit));
+ }
+
+ if (n > kSoftKillLimit) {
+ LOGV2_WARNING(4615606,
+ "Received killCursors, n={numCursors}",
+ "Received killCursors, exceeded kSoftKillLimit",
+ "numCursors"_attr = n,
+ "kSoftKillLimit"_attr = kSoftKillLimit);
}
uassert(31289, str::stream() << "must kill at least 1 cursor, n=" << n, n >= 1);
@@ -1457,9 +1480,10 @@ void receivedKillCursors(OperationContext* opCtx, const Message& m) {
if (shouldLog(logv2::LogSeverity::Debug(1)) || found != n) {
LOGV2_DEBUG(21967,
logSeverityV1toV2(found == n ? 1 : 0).toInt(),
- "killcursors: found {found} of {n}",
+ "killCursors: found {found} of {numCursors}",
+ "killCursors found fewer cursors to kill than requested",
"found"_attr = found,
- "n"_attr = n);
+ "numCursors"_attr = n);
}
}
@@ -1675,8 +1699,9 @@ DbResponse ServiceEntryPointCommon::handleRequest(OperationContext* opCtx,
receivedKillCursors(opCtx, m);
} else if (op != dbInsert && op != dbUpdate && op != dbDelete) {
LOGV2(21968,
- " operation isn't supported: {static_cast_int_op}",
- "static_cast_int_op"_attr = static_cast<int>(op));
+ "Operation isn't supported: {operation}",
+ "Operation is not supported",
+ "operation"_attr = static_cast<int>(op));
currentOp.done();
forceLog = true;
} else {
@@ -1704,9 +1729,10 @@ DbResponse ServiceEntryPointCommon::handleRequest(OperationContext* opCtx,
LastError::get(c).setLastError(ue.code(), ue.reason());
LOGV2_DEBUG(21969,
3,
- " Caught Assertion in {networkOpToString_op}, continuing {ue}",
- "networkOpToString_op"_attr = networkOpToString(op),
- "ue"_attr = redact(ue));
+ "Caught Assertion in {networkOp}, continuing: {error}",
+ "Assertion in fire-and-forget operation",
+ "networkOp"_attr = networkOpToString(op),
+ "error"_attr = redact(ue));
debug.errInfo = ue.toStatus();
}
// A NotMaster error can be set either within receivedInsert/receivedUpdate/receivedDelete
@@ -1736,15 +1762,15 @@ DbResponse ServiceEntryPointCommon::handleRequest(OperationContext* opCtx,
if (currentOp.shouldDBProfile(shouldSample)) {
// Performance profiling is on
if (opCtx->lockState()->isReadLocked()) {
- LOGV2_DEBUG(21970, 1, "note: not profiling because recursive read lock");
+ LOGV2_DEBUG(21970, 1, "Note: not profiling because of recursive read lock");
} else if (c.isInDirectClient()) {
- LOGV2_DEBUG(21971, 1, "note: not profiling because we are in DBDirectClient");
+ LOGV2_DEBUG(21971, 1, "Note: not profiling because we are in DBDirectClient");
} else if (behaviors.lockedForWriting()) {
// TODO SERVER-26825: Fix race condition where fsyncLock is acquired post
// lockedForWriting() call but prior to profile collection lock acquisition.
- LOGV2_DEBUG(21972, 1, "note: not profiling because doing fsync+lock");
+ LOGV2_DEBUG(21972, 1, "Note: not profiling because doing fsync+lock");
} else if (storageGlobalParams.readOnly) {
- LOGV2_DEBUG(21973, 1, "note: not profiling because server is read-only");
+ LOGV2_DEBUG(21973, 1, "Note: not profiling because server is read-only");
} else {
invariant(!opCtx->lockState()->inAWriteUnitOfWork());
profile(opCtx, op);
diff --git a/src/mongo/db/service_entry_point_mongod.cpp b/src/mongo/db/service_entry_point_mongod.cpp
index 4c3cfc4738e..6cb373d91b4 100644
--- a/src/mongo/db/service_entry_point_mongod.cpp
+++ b/src/mongo/db/service_entry_point_mongod.cpp
@@ -81,18 +81,16 @@ public:
if (ErrorCodes::isExceededTimeLimitError(rcStatus.code())) {
const int debugLevel =
serverGlobalParams.clusterRole == ClusterRole::ConfigServer ? 0 : 2;
- LOGV2_DEBUG(
- 21975,
- logSeverityV1toV2(debugLevel).toInt(),
- "Command on database {request_getDatabase} timed out waiting for read concern "
- "to be satisfied. Command: "
- "{ServiceEntryPointCommon_getRedactedCopyForLogging_invocation_definition_"
- "request_body}. Info: {rcStatus}",
- "request_getDatabase"_attr = request.getDatabase(),
- "ServiceEntryPointCommon_getRedactedCopyForLogging_invocation_definition_request_body"_attr =
- redact(ServiceEntryPointCommon::getRedactedCopyForLogging(
- invocation->definition(), request.body)),
- "rcStatus"_attr = redact(rcStatus));
+ LOGV2_DEBUG(21975,
+ logSeverityV1toV2(debugLevel).toInt(),
+ "Command on database {db} timed out waiting for read concern to be "
+ "satisfied. Command: {command}. Info: {error}",
+ "Command timed out waiting for read concern to be satisfied",
+ "db"_attr = request.getDatabase(),
+ "command"_attr =
+ redact(ServiceEntryPointCommon::getRedactedCopyForLogging(
+ invocation->definition(), request.body)),
+ "error"_attr = redact(rcStatus));
}
uassertStatusOK(rcStatus);
diff --git a/src/mongo/db/startup_warnings_common.cpp b/src/mongo/db/startup_warnings_common.cpp
index 074ec691aea..8d59e6b1517 100644
--- a/src/mongo/db/startup_warnings_common.cpp
+++ b/src/mongo/db/startup_warnings_common.cpp
@@ -58,7 +58,7 @@ void logCommonStartupWarnings(const ServerGlobalParams& serverParams) {
LOGV2_WARNING_OPTIONS(
22117,
{logv2::LogTag::kStartupWarnings},
- "This is a development version of MongoDB. Not recommended for production.",
+ "This is a development version of MongoDB. Not recommended for production",
"version"_attr = vii.version());
}
}
@@ -67,7 +67,7 @@ void logCommonStartupWarnings(const ServerGlobalParams& serverParams) {
LOGV2_WARNING_OPTIONS(22120,
{logv2::LogTag::kStartupWarnings},
"Access control is not enabled for the database. Read and write "
- "access to data and configuration is unrestricted.");
+ "access to data and configuration is unrestricted");
}
const bool is32bit = sizeof(int*) == 4;
@@ -82,7 +82,7 @@ void logCommonStartupWarnings(const ServerGlobalParams& serverParams) {
22124,
{logv2::LogTag::kStartupWarnings},
"While invalid X509 certificates may be used to connect to this server, they will not "
- "be considered permissible for authentication.");
+ "be considered permissible for authentication");
}
if (sslGlobalParams.sslAllowInvalidHostnames) {
@@ -90,7 +90,7 @@ void logCommonStartupWarnings(const ServerGlobalParams& serverParams) {
22128,
{logv2::LogTag::kStartupWarnings},
"This server will not perform X.509 hostname validation. This may allow your server to "
- "make or accept connections to untrusted parties.");
+ "make or accept connections to untrusted parties");
}
#endif
@@ -107,11 +107,11 @@ void logCommonStartupWarnings(const ServerGlobalParams& serverParams) {
LOGV2_WARNING(22132,
"No client certificate validation can be performed since no CA file has been "
"provided and no sslCertificateSelector has been specified. Please specify "
- "an sslCAFile parameter.");
+ "an sslCAFile parameter");
#else
LOGV2_WARNING(22133,
"No client certificate validation can be performed since no CA file has been "
- "provided. Please specify an sslCAFile parameter.");
+ "provided. Please specify an sslCAFile parameter");
#endif
}
@@ -123,7 +123,7 @@ void logCommonStartupWarnings(const ServerGlobalParams& serverParams) {
LOGV2_OPTIONS(22135,
{logv2::LogTag::kStartupWarnings},
"This is a 32-bit MongoDB binary running on a 64-bit operating system. "
- "Switch to a 64-bit build of MongoDB to support larger databases.");
+ "Switch to a 64-bit build of MongoDB to support larger databases");
}
#endif
@@ -132,7 +132,7 @@ void logCommonStartupWarnings(const ServerGlobalParams& serverParams) {
LOGV2_WARNING_OPTIONS(
22138,
{logv2::LogTag::kStartupWarnings},
- "You are running this process as the root user, which is not recommended.");
+ "You are running this process as the root user, which is not recommended");
}
#endif
@@ -144,7 +144,7 @@ void logCommonStartupWarnings(const ServerGlobalParams& serverParams) {
"server. Start the server with --bind_ip <address> to specify which IP addresses it "
"should serve responses from, or with --bind_ip_all to bind to all interfaces. If this "
"behavior is desired, start the server with --bind_ip 127.0.0.1 to disable this "
- "warning.");
+ "warning");
}
if (auth::hasMultipleInternalAuthKeys()) {
diff --git a/src/mongo/db/startup_warnings_mongod.cpp b/src/mongo/db/startup_warnings_mongod.cpp
index e6d1777c05f..6ea8cee188d 100644
--- a/src/mongo/db/startup_warnings_mongod.cpp
+++ b/src/mongo/db/startup_warnings_mongod.cpp
@@ -142,28 +142,21 @@ void logMongodStartupWarnings(const StorageGlobalParams& storageParams,
LOGV2_WARNING_OPTIONS(
22152,
{logv2::LogTag::kStartupWarnings},
- "This is a 32 bit MongoDB binary.32 bit builds are limited to less than 2GB "
+ "This is a 32 bit MongoDB binary. 32 bit builds are limited to less than 2GB "
"of data (or less with --journal). See http://dochub.mongodb.org/core/32bit");
if (!storageParams.dur) {
LOGV2_WARNING_OPTIONS(22154,
{logv2::LogTag::kStartupWarnings},
- "Journaling defaults to off for 32 bit and is currently off.");
+ "Journaling defaults to off for 32 bit and is currently off");
}
}
- if (!ProcessInfo::blockCheckSupported()) {
- LOGV2_WARNING_OPTIONS(
- 22157,
- {logv2::LogTag::kStartupWarnings},
- "Your operating system version does not support the method that MongoDB uses to detect "
- "impending page faults. This may result in slower performance for certain use cases.");
- }
#ifdef __linux__
if (boost::filesystem::exists("/proc/vz") && !boost::filesystem::exists("/proc/bc")) {
LOGV2_OPTIONS(22161,
{logv2::LogTag::kStartupWarnings},
"You are running in OpenVZ which can cause issues on versions of RHEL older "
- "than RHEL6.");
+ "than RHEL6");
}
bool hasMultipleNumaNodes = false;
@@ -196,7 +189,7 @@ void logMongodStartupWarnings(const StorageGlobalParams& storageParams,
if (f.fail()) {
LOGV2_WARNING_OPTIONS(22200,
{logv2::LogTag::kStartupWarnings},
- "failed to read from /proc/self/numa_maps",
+ "Failed to read from /proc/self/numa_maps",
"error"_attr = errnoWithDescription());
} else {
// skip over pointer
@@ -204,7 +197,8 @@ void logMongodStartupWarnings(const StorageGlobalParams& storageParams,
if ((where == std::string::npos) || (++where == line.size())) {
LOGV2_WARNING_OPTIONS(22165,
{logv2::LogTag::kStartupWarnings},
- "cannot parse numa_maps at line",
+ "Cannot parse numa_maps at line: {line}",
+ "Cannot parse numa_maps",
"line"_attr = line);
}
// if the text following the space doesn't begin with 'interleave', then
@@ -260,14 +254,14 @@ void logMongodStartupWarnings(const StorageGlobalParams& storageParams,
LOGV2_WARNING_OPTIONS(22178,
{logv2::LogTag::kStartupWarnings},
TRANSPARENT_HUGE_PAGES_DIR
- "/enabled is 'always'. We suggest setting it to 'never'.");
+ "/enabled is 'always'. We suggest setting it to 'never'");
}
} else if (transparentHugePagesEnabledResult.getStatus().code() !=
ErrorCodes::NonExistentPath) {
LOGV2_WARNING_OPTIONS(22202,
{logv2::LogTag::kStartupWarnings},
"Failed to read " TRANSPARENT_HUGE_PAGES_DIR "/enabled",
- "reason"_attr =
+ "error"_attr =
transparentHugePagesEnabledResult.getStatus().reason());
}
@@ -279,14 +273,13 @@ void logMongodStartupWarnings(const StorageGlobalParams& storageParams,
LOGV2_WARNING_OPTIONS(22181,
{logv2::LogTag::kStartupWarnings},
TRANSPARENT_HUGE_PAGES_DIR
- "/defrag is 'always'. We suggest setting it to 'never'.");
+ "/defrag is 'always'. We suggest setting it to 'never'");
}
} else if (transparentHugePagesDefragResult.getStatus().code() != ErrorCodes::NonExistentPath) {
LOGV2_WARNING_OPTIONS(22204,
{logv2::LogTag::kStartupWarnings},
"Failed to read " TRANSPARENT_HUGE_PAGES_DIR "/defrag",
- "reason"_attr =
- transparentHugePagesDefragResult.getStatus().reason());
+ "error"_attr = transparentHugePagesDefragResult.getStatus().reason());
}
#endif // __linux__
@@ -297,16 +290,21 @@ void logMongodStartupWarnings(const StorageGlobalParams& storageParams,
if (!getrlimit(RLIMIT_NOFILE, &rlnofile)) {
if (rlnofile.rlim_cur < minNumFiles) {
- LOGV2_WARNING_OPTIONS(22184,
- {logv2::LogTag::kStartupWarnings},
- "soft rlimits too low.",
- "currentValue"_attr = rlnofile.rlim_cur,
- "recommendedMinimum"_attr = minNumFiles);
+ LOGV2_WARNING_OPTIONS(
+ 22184,
+ {logv2::LogTag::kStartupWarnings},
+ "Soft rlimits is {currentValue}, recommended minimum is {recommendedMinimum}",
+ "Soft rlimits too low",
+ "currentValue"_attr = rlnofile.rlim_cur,
+ "recommendedMinimum"_attr = minNumFiles);
}
} else {
const auto errmsg = errnoWithDescription();
- LOGV2_WARNING_OPTIONS(
- 22186, {logv2::LogTag::kStartupWarnings}, "getrlimit failed", "error"_attr = errmsg);
+ LOGV2_WARNING_OPTIONS(22186,
+ {logv2::LogTag::kStartupWarnings},
+ "getrlimit failed: {error}",
+ "getrlimit failed",
+ "error"_attr = errmsg);
}
// Solaris does not have RLIMIT_MEMLOCK, these are exposed via getrctl(2) instead
@@ -318,23 +316,22 @@ void logMongodStartupWarnings(const StorageGlobalParams& storageParams,
if (!getrlimit(RLIMIT_MEMLOCK, &rlmemlock)) {
if ((rlmemlock.rlim_cur / ProcessInfo::getPageSize()) < minLockedPages) {
- LOGV2_OPTIONS(22187, {logv2::LogTag::kStartupWarnings}, "");
- LOGV2_OPTIONS(
+ LOGV2_WARNING_OPTIONS(
22188,
{logv2::LogTag::kStartupWarnings},
- "** WARNING: soft rlimits too low. The locked memory size is {rlmemlock_rlim_cur} "
- "bytes, it should be at least {minLockedPages_ProcessInfo_getPageSize} bytes",
- "rlmemlock_rlim_cur"_attr = rlmemlock.rlim_cur,
- "minLockedPages_ProcessInfo_getPageSize"_attr =
- minLockedPages * ProcessInfo::getPageSize());
+ "** WARNING: Soft rlimits too low. The locked memory size is {lockedMemoryBytes} "
+ "bytes, it should be at least {minLockedMemoryBytes} bytes",
+ "Soft rlimits too low",
+ "lockedMemoryBytes"_attr = rlmemlock.rlim_cur,
+ "minLockedMemoryBytes"_attr = minLockedPages * ProcessInfo::getPageSize());
}
} else {
const auto errmsg = errnoWithDescription();
- LOGV2_OPTIONS(22189, {logv2::LogTag::kStartupWarnings}, "");
- LOGV2_OPTIONS(22190,
- {logv2::LogTag::kStartupWarnings},
- "** WARNING: getrlimit failed. {errmsg}",
- "errmsg"_attr = errmsg);
+ LOGV2_WARNING_OPTIONS(22190,
+ {logv2::LogTag::kStartupWarnings},
+ "** WARNING: getrlimit failed: {error}",
+ "getrlimit failed",
+ "error"_attr = errmsg);
}
#endif
#endif
@@ -347,7 +344,7 @@ void logMongodStartupWarnings(const StorageGlobalParams& storageParams,
{logv2::LogTag::kStartupWarnings},
"You are running on a NUMA machine. We suggest disabling NUMA in the "
"machine BIOS by enabling interleaving to avoid performance "
- "problems. See your BIOS documentation for more information.");
+ "problems. See your BIOS documentation for more information");
}
#endif // #ifdef _WIN32
@@ -356,7 +353,7 @@ void logMongodStartupWarnings(const StorageGlobalParams& storageParams,
LOGV2_OPTIONS(
22197,
{logv2::LogTag::kStartupWarnings},
- "The ephemeralForTest storage engine is for testing only. Do not use in production.");
+ "The ephemeralForTest storage engine is for testing only. Do not use in production");
}
}
} // namespace mongo