summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHenrik Edin <henrik.edin@mongodb.com>2020-02-20 16:54:57 -0500
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-02-23 18:43:31 +0000
commite8f1d29de757d41f7d082d5ca1e667d1b8938741 (patch)
tree40a89fd7ef888cca91c413f4b5ecf981afcbdd99
parent68dfffe822f1888f221b265dc7974e9f98ea383c (diff)
downloadmongo-e8f1d29de757d41f7d082d5ca1e667d1b8938741.tar.gz
SERVER-46156 Deprecate LogstreamBuilder and more manual log conversions
* Remove setPlainConsoleLogger() * Update errorcodes linter to work with custom user defined log macros rename src/mongo/embedded/{embedded_log_appender.h => embedded_log_backend.h} (61%)
-rwxr-xr-xbuildscripts/errorcodes.py3
-rw-r--r--src/mongo/db/catalog/index_catalog.cpp13
-rw-r--r--src/mongo/db/catalog/index_catalog.h2
-rw-r--r--src/mongo/db/catalog/multi_index_block.cpp26
-rw-r--r--src/mongo/db/catalog/multi_index_block.h3
-rw-r--r--src/mongo/db/commands/kill_operations_command.cpp2
-rw-r--r--src/mongo/db/curop.cpp9
-rw-r--r--src/mongo/db/curop.h3
-rw-r--r--src/mongo/db/mongod_options.cpp14
-rw-r--r--src/mongo/db/operation_key_manager.cpp10
-rw-r--r--src/mongo/db/ops/write_ops_exec.cpp3
-rw-r--r--src/mongo/db/record_id.h4
-rw-r--r--src/mongo/db/repl/member_id.h8
-rw-r--r--src/mongo/db/repl/oplog_applier_impl.cpp3
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp203
-rw-r--r--src/mongo/db/repl/topology_coordinator.cpp35
-rw-r--r--src/mongo/db/s/transaction_coordinator_structures.cpp13
-rw-r--r--src/mongo/db/s/transaction_coordinator_structures.h5
-rw-r--r--src/mongo/db/service_entry_point_common.cpp2
-rw-r--r--src/mongo/db/storage/storage_engine_impl.cpp18
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp46
-rw-r--r--src/mongo/db/transaction_participant.cpp2
-rw-r--r--src/mongo/embedded/embedded.cpp13
-rw-r--r--src/mongo/embedded/embedded_log_backend.h (renamed from src/mongo/embedded/embedded_log_appender.h)52
-rw-r--r--src/mongo/embedded/mongo_embedded/mongo_embedded.cpp59
-rw-r--r--src/mongo/logger/log_severity_limiter.h4
-rw-r--r--src/mongo/logger/logstream_builder.cpp27
-rw-r--r--src/mongo/logger/logstream_builder.h70
-rw-r--r--src/mongo/logv2/log_domain_global.cpp16
-rw-r--r--src/mongo/logv2/log_domain_global.h2
-rw-r--r--src/mongo/logv2/log_format.h2
-rw-r--r--src/mongo/logv2/log_options.h2
-rw-r--r--src/mongo/s/async_requests_sender.cpp18
-rw-r--r--src/mongo/s/balancer_configuration.cpp11
-rw-r--r--src/mongo/s/service_entry_point_mongos.cpp4
-rw-r--r--src/mongo/s/transaction_router.cpp7
-rw-r--r--src/mongo/s/version_mongos.cpp7
-rw-r--r--src/mongo/scripting/mozjs/global.cpp27
-rw-r--r--src/mongo/shell/dbshell.cpp44
-rw-r--r--src/mongo/shell/shell_options.cpp10
-rw-r--r--src/mongo/shell/shell_utils.cpp20
-rw-r--r--src/mongo/shell/shell_utils_launcher.cpp7
-rw-r--r--src/mongo/transport/session_asio.h6
-rw-r--r--src/mongo/unittest/unittest.cpp10
-rw-r--r--src/mongo/unittest/unittest.h4
-rw-r--r--src/mongo/util/destructor_guard.h34
-rw-r--r--src/mongo/util/log.cpp7
-rw-r--r--src/mongo/util/log.h112
-rw-r--r--src/mongo/util/log_with_sampling.h4
-rw-r--r--src/mongo/util/log_with_sampling_test.cpp42
-rw-r--r--src/third_party/s2/base/logging_mongo.cc8
51 files changed, 606 insertions, 450 deletions
diff --git a/buildscripts/errorcodes.py b/buildscripts/errorcodes.py
index 1c100dfc985..de3a8175bf7 100755
--- a/buildscripts/errorcodes.py
+++ b/buildscripts/errorcodes.py
@@ -47,8 +47,7 @@ def parse_source_files(callback):
re.compile(r"(?:DB|Assertion)Exception\s*[({]\s*(\d+)", re.MULTILINE),
re.compile(r"fassert(?:Failed)?(?:WithStatus)?(?:NoTrace)?(?:StatusOK)?\s*\(\s*(\d+)",
re.MULTILINE),
- re.compile(r"LOGV2(?:_INFO|_WARNING|_ERROR|_FATAL|_DEBUG)?(?:_OPTIONS)?\s*\(\s*(\d+)",
- re.MULTILINE),
+ re.compile(r"LOGV2(?:\w*)?\s*\(\s*(\d+)", re.MULTILINE),
re.compile(r"ErrorCodes::Error\s*[({]\s*(\d+)", re.MULTILINE)
]
diff --git a/src/mongo/db/catalog/index_catalog.cpp b/src/mongo/db/catalog/index_catalog.cpp
index 682a359885b..60594402b55 100644
--- a/src/mongo/db/catalog/index_catalog.cpp
+++ b/src/mongo/db/catalog/index_catalog.cpp
@@ -99,4 +99,17 @@ const IndexCatalogEntry* AllIndexesIterator::_advance() {
++_iterator;
return entry;
}
+
+StringData toString(IndexBuildMethod method) {
+ switch (method) {
+ case IndexBuildMethod::kHybrid:
+ return "Hybrid"_sd;
+ case IndexBuildMethod::kBackground:
+ return "Background"_sd;
+ case IndexBuildMethod::kForeground:
+ return "Foreground"_sd;
+ }
+
+ MONGO_UNREACHABLE;
+}
} // namespace mongo
diff --git a/src/mongo/db/catalog/index_catalog.h b/src/mongo/db/catalog/index_catalog.h
index 9194874f386..b140a2f3405 100644
--- a/src/mongo/db/catalog/index_catalog.h
+++ b/src/mongo/db/catalog/index_catalog.h
@@ -77,6 +77,8 @@ enum class IndexBuildMethod {
kForeground,
};
+StringData toString(IndexBuildMethod method);
+
enum class CreateIndexEntryFlags : int {
kNone = 0x0,
/**
diff --git a/src/mongo/db/catalog/multi_index_block.cpp b/src/mongo/db/catalog/multi_index_block.cpp
index 0caa2d31609..3ea267de165 100644
--- a/src/mongo/db/catalog/multi_index_block.cpp
+++ b/src/mongo/db/catalog/multi_index_block.cpp
@@ -921,34 +921,22 @@ void MultiIndexBlock::_setStateToAbortedIfNotCommitted(StringData reason) {
_abortReason = reason.toString();
}
-std::ostream& operator<<(std::ostream& os, const MultiIndexBlock::State& state) {
+StringData toString(MultiIndexBlock::State state) {
switch (state) {
case MultiIndexBlock::State::kUninitialized:
- return os << "Uninitialized";
+ return "Uninitialized"_sd;
case MultiIndexBlock::State::kRunning:
- return os << "Running";
+ return "Running"_sd;
case MultiIndexBlock::State::kCommitted:
- return os << "Committed";
+ return "Committed"_sd;
case MultiIndexBlock::State::kAborted:
- return os << "Aborted";
+ return "Aborted"_sd;
}
MONGO_UNREACHABLE;
}
-logger::LogstreamBuilder& operator<<(logger::LogstreamBuilder& out,
- const IndexBuildMethod& method) {
- switch (method) {
- case IndexBuildMethod::kHybrid:
- out.stream() << "Hybrid";
- break;
- case IndexBuildMethod::kBackground:
- out.stream() << "Background";
- break;
- case IndexBuildMethod::kForeground:
- out.stream() << "Foreground";
- break;
- }
- return out;
+std::ostream& operator<<(std::ostream& os, const MultiIndexBlock::State& state) {
+ return os << toString(state);
}
} // namespace mongo
diff --git a/src/mongo/db/catalog/multi_index_block.h b/src/mongo/db/catalog/multi_index_block.h
index 23294c8709e..c6a2058589b 100644
--- a/src/mongo/db/catalog/multi_index_block.h
+++ b/src/mongo/db/catalog/multi_index_block.h
@@ -325,6 +325,7 @@ public:
* For testing only. Callers should not have to query the state of the MultiIndexBlock directly.
*/
enum class State { kUninitialized, kRunning, kCommitted, kAborted };
+ StringData toString(State state);
State getState_forTest() const;
private:
@@ -387,6 +388,4 @@ private:
// For unit tests that need to check MultiIndexBlock states.
// The ASSERT_*() macros use this function to print the value of 'state' when the predicate fails.
std::ostream& operator<<(std::ostream& os, const MultiIndexBlock::State& state);
-
-logger::LogstreamBuilder& operator<<(logger::LogstreamBuilder& out, const IndexBuildMethod& method);
} // namespace mongo
diff --git a/src/mongo/db/commands/kill_operations_command.cpp b/src/mongo/db/commands/kill_operations_command.cpp
index 3ddc052a51a..037eeb554e5 100644
--- a/src/mongo/db/commands/kill_operations_command.cpp
+++ b/src/mongo/db/commands/kill_operations_command.cpp
@@ -60,7 +60,7 @@ public:
void typedRun(OperationContext* opCtx) {
auto opKiller = OperationKiller(opCtx->getClient());
for (auto& opKey : request().getOperationKeys()) {
- LOGV2(4615601,
+ LOGV2(46156011,
"Attempting to kill operation with OperationKey '{operationKey}'",
"operationKey"_attr = opKey);
opKiller.killOperation(OperationKey(opKey));
diff --git a/src/mongo/db/curop.cpp b/src/mongo/db/curop.cpp
index 7a4dcec9f1e..7b12045121b 100644
--- a/src/mongo/db/curop.cpp
+++ b/src/mongo/db/curop.cpp
@@ -422,7 +422,7 @@ void CurOp::raiseDbProfileLevel(int dbProfileLevel) {
}
bool CurOp::completeAndLogOperation(OperationContext* opCtx,
- logger::LogComponent component,
+ logv2::LogComponent component,
boost::optional<size_t> responseLength,
boost::optional<long long> slowMsOverride,
bool forceLog) {
@@ -472,13 +472,13 @@ bool CurOp::completeAndLogOperation(OperationContext* opCtx,
_debug.storageStats = opCtx->recoveryUnit()->getOperationStatistics();
} else {
LOGV2_WARNING_OPTIONS(20525,
- {logComponentV1toV2(component)},
+ {component},
"Unable to gather storage statistics for a slow "
"operation due to lock aquire timeout");
}
} catch (const ExceptionForCat<ErrorCategory::Interruption>&) {
LOGV2_WARNING_OPTIONS(20526,
- {logComponentV1toV2(component)},
+ {component},
"Unable to gather storage statistics for a slow "
"operation due to interrupt");
}
@@ -495,7 +495,8 @@ bool CurOp::completeAndLogOperation(OperationContext* opCtx,
LOGV2(51803, "slow query", attr);
// TODO SERVER-46219: Log also with old log system to not break unit tests
- log(component) << _debug.report(opCtx, (lockerInfo ? &lockerInfo->stats : nullptr));
+ log(logComponentV2toV1(component))
+ << _debug.report(opCtx, (lockerInfo ? &lockerInfo->stats : nullptr));
}
// Return 'true' if this operation should also be added to the profiler.
diff --git a/src/mongo/db/curop.h b/src/mongo/db/curop.h
index f563cb05ed8..41a5e5da50a 100644
--- a/src/mongo/db/curop.h
+++ b/src/mongo/db/curop.h
@@ -37,6 +37,7 @@
#include "mongo/db/server_options.h"
#include "mongo/db/write_concern_options.h"
#include "mongo/logv2/attribute_storage.h"
+#include "mongo/logv2/log_component.h"
#include "mongo/platform/atomic_word.h"
#include "mongo/util/diagnostic_info.h"
#include "mongo/util/progress_meter.h"
@@ -331,7 +332,7 @@ public:
* operation should also be profiled.
*/
bool completeAndLogOperation(OperationContext* opCtx,
- logger::LogComponent logComponent,
+ logv2::LogComponent logComponent,
boost::optional<size_t> responseLength = boost::none,
boost::optional<long long> slowMsOverride = boost::none,
bool forceLog = false);
diff --git a/src/mongo/db/mongod_options.cpp b/src/mongo/db/mongod_options.cpp
index 9d0030df49e..11c769ca95c 100644
--- a/src/mongo/db/mongod_options.cpp
+++ b/src/mongo/db/mongod_options.cpp
@@ -54,6 +54,8 @@
#include "mongo/db/server_options_nongeneral_gen.h"
#include "mongo/db/server_options_server_helpers.h"
#include "mongo/logv2/log.h"
+#include "mongo/logv2/log_domain_global.h"
+#include "mongo/logv2/log_manager.h"
#include "mongo/util/log.h"
#include "mongo/util/net/ssl_options.h"
#include "mongo/util/options_parser/startup_options.h"
@@ -125,15 +127,23 @@ bool handlePreValidationMongodOptions(const moe::Environment& params,
printMongodHelp(moe::startupOptions);
return false;
}
+
+ auto setPlainLogFormat = []() {
+ auto& globalDomain = logv2::LogManager::global().getGlobalDomainInternal();
+ logv2::LogDomainGlobal::ConfigurationOptions config = globalDomain.config();
+ config.format = logv2::LogFormat::kPlain;
+ invariant(globalDomain.configure(config).isOK());
+ };
+
if (params.count("version") && params["version"].as<bool>() == true) {
- setPlainConsoleLogger();
+ setPlainLogFormat();
auto&& vii = VersionInfoInterface::instance();
LOGV2(20876, "{mongodVersion_vii}", "mongodVersion_vii"_attr = mongodVersion(vii));
vii.logBuildInfo();
return false;
}
if (params.count("sysinfo") && params["sysinfo"].as<bool>() == true) {
- setPlainConsoleLogger();
+ setPlainLogFormat();
sysRuntimeInfo();
return false;
}
diff --git a/src/mongo/db/operation_key_manager.cpp b/src/mongo/db/operation_key_manager.cpp
index 2f545083098..51a1e41ac31 100644
--- a/src/mongo/db/operation_key_manager.cpp
+++ b/src/mongo/db/operation_key_manager.cpp
@@ -27,7 +27,7 @@
* it in the license file.
*/
-#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kCommand
+#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand
#include "mongo/platform/basic.h"
@@ -36,8 +36,8 @@
#include <fmt/format.h>
#include "mongo/base/error_codes.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/assert_util.h"
-#include "mongo/util/log.h"
namespace mongo {
namespace {
@@ -57,7 +57,11 @@ OperationKeyManager& OperationKeyManager::get(ServiceContext* serviceContext) {
void OperationKeyManager::add(const OperationKey& key, OperationId id) {
using namespace fmt::literals;
- LOG(2) << "Mapping OperationKey {} to OperationId {}"_format(key.toString(), id);
+ LOGV2_DEBUG(4615636,
+ 2,
+ "Mapping OperationKey {operationKey} to OperationId {operationId}",
+ "operationKey"_attr = key.toString(),
+ "operationId"_attr = id);
stdx::lock_guard lk(_mutex);
auto result = _idByOperationKey.emplace(key, id).second;
diff --git a/src/mongo/db/ops/write_ops_exec.cpp b/src/mongo/db/ops/write_ops_exec.cpp
index 8b39b8aa921..f30b1bd979d 100644
--- a/src/mongo/db/ops/write_ops_exec.cpp
+++ b/src/mongo/db/ops/write_ops_exec.cpp
@@ -28,6 +28,7 @@
*/
#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kWrite
+#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kWrite
#include "mongo/platform/basic.h"
@@ -141,7 +142,7 @@ void finishCurOp(OperationContext* opCtx, CurOp* curOp) {
// Mark the op as complete, and log it if appropriate. Returns a boolean indicating whether
// this op should be sampled for profiling.
const bool shouldSample =
- curOp->completeAndLogOperation(opCtx, MONGO_LOG_DEFAULT_COMPONENT);
+ curOp->completeAndLogOperation(opCtx, MONGO_LOGV2_DEFAULT_COMPONENT);
if (curOp->shouldDBProfile(shouldSample)) {
// Stash the current transaction so that writes to the profile collection are not
diff --git a/src/mongo/db/record_id.h b/src/mongo/db/record_id.h
index dfc75fce7be..42ab3062033 100644
--- a/src/mongo/db/record_id.h
+++ b/src/mongo/db/record_id.h
@@ -202,8 +202,10 @@ inline std::ostream& operator<<(std::ostream& stream, const boost::optional<Reco
return stream << "RecordId(" << (id ? id.get().repr() : 0) << ')';
}
-inline logger::LogstreamBuilder& operator<<(logger::LogstreamBuilder& stream, const RecordId& id) {
+inline logger::LogstreamBuilderDeprecated& operator<<(logger::LogstreamBuilderDeprecated& stream,
+ const RecordId& id) {
stream.stream() << id;
return stream;
}
+
} // namespace mongo
diff --git a/src/mongo/db/repl/member_id.h b/src/mongo/db/repl/member_id.h
index 9b759d0ef90..4f2db2b342c 100644
--- a/src/mongo/db/repl/member_id.h
+++ b/src/mongo/db/repl/member_id.h
@@ -31,7 +31,8 @@
#include <iostream>
-#include "mongo/logger/logstream_builder.h"
+#include "mongo/bson/util/builder.h"
+#include "mongo/util/str.h"
namespace mongo {
namespace repl {
@@ -91,11 +92,6 @@ public:
return stream << id.toString();
}
- friend logger::LogstreamBuilder& operator<<(logger::LogstreamBuilder& stream,
- const MemberId& id) {
- return stream << id.toString();
- }
-
private:
// The default value for an unset memberId.
static const int kUninitializedMemberId = -1;
diff --git a/src/mongo/db/repl/oplog_applier_impl.cpp b/src/mongo/db/repl/oplog_applier_impl.cpp
index 769f5350921..e196704966f 100644
--- a/src/mongo/db/repl/oplog_applier_impl.cpp
+++ b/src/mongo/db/repl/oplog_applier_impl.cpp
@@ -28,6 +28,7 @@
*/
#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kReplication
+#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication
#include "mongo/db/repl/oplog_applier_impl.h"
@@ -109,7 +110,7 @@ Status finishAndLogApply(OperationContext* opCtx,
auto opDuration = durationCount<Milliseconds>(applyEndTime - applyStartTime);
if (shouldLogSlowOpWithSampling(opCtx,
- MONGO_LOG_DEFAULT_COMPONENT,
+ MONGO_LOGV2_DEFAULT_COMPONENT,
Milliseconds(opDuration),
Milliseconds(serverGlobalParams.slowMS))
.first) {
diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
index 3b928f80df0..4a8c1ce7721 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
@@ -27,11 +27,13 @@
* it in the license file.
*/
-#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kReplication
-#define LOG_FOR_ELECTION(level) \
- MONGO_LOG_COMPONENT(level, ::mongo::logger::LogComponent::kReplicationElection)
-#define LOG_FOR_HEARTBEATS(level) \
- MONGO_LOG_COMPONENT(level, ::mongo::logger::LogComponent::kReplicationHeartbeats)
+#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication
+#define LOGV2_FOR_ELECTION(ID, DLEVEL, MESSAGE, ...) \
+ LOGV2_DEBUG_OPTIONS( \
+ ID, DLEVEL, {logv2::LogComponent::kReplicationElection}, MESSAGE, ##__VA_ARGS__)
+#define LOGV2_FOR_HEARTBEATS(ID, DLEVEL, MESSAGE, ...) \
+ LOGV2_DEBUG_OPTIONS( \
+ ID, DLEVEL, {logv2::LogComponent::kReplicationHeartbeats}, MESSAGE, ##__VA_ARGS__)
#include "mongo/platform/basic.h"
@@ -60,7 +62,6 @@
#include "mongo/rpc/metadata/repl_set_metadata.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/fail_point.h"
-#include "mongo/util/log.h"
#include "mongo/util/str.h"
#include "mongo/util/time_support.h"
@@ -117,16 +118,23 @@ void ReplicationCoordinatorImpl::_doMemberHeartbeat(executor::TaskExecutor::Call
return _handleHeartbeatResponse(cbData, targetIndex);
};
- LOG_FOR_HEARTBEATS(2) << "Sending heartbeat (requestId: " << request.id << ") to " << target
- << ", " << heartbeatObj;
+ LOGV2_FOR_HEARTBEATS(4615670,
+ 2,
+ "Sending heartbeat (requestId: {request_id}) to {target} {heartbeatObj}",
+ "request_id"_attr = request.id,
+ "target"_attr = target,
+ "heartbeatObj"_attr = heartbeatObj);
_trackHeartbeatHandle_inlock(_replExecutor->scheduleRemoteCommand(request, callback));
}
void ReplicationCoordinatorImpl::_scheduleHeartbeatToTarget_inlock(const HostAndPort& target,
int targetIndex,
Date_t when) {
- LOG_FOR_HEARTBEATS(2) << "Scheduling heartbeat to " << target << " at "
- << dateToISOStringUTC(when);
+ LOGV2_FOR_HEARTBEATS(4615618,
+ 2,
+ "Scheduling heartbeat to {target} at {dateToISOStringUTC_when}",
+ "target"_attr = target,
+ "dateToISOStringUTC_when"_attr = dateToISOStringUTC(when));
_trackHeartbeatHandle_inlock(_replExecutor->scheduleWorkAt(
when, [=](const executor::TaskExecutor::CallbackArgs& cbData) {
_doMemberHeartbeat(cbData, target, targetIndex);
@@ -146,8 +154,12 @@ void ReplicationCoordinatorImpl::_handleHeartbeatResponse(
const HostAndPort& target = cbData.request.target;
if (responseStatus == ErrorCodes::CallbackCanceled) {
- LOG_FOR_HEARTBEATS(2) << "Received response to heartbeat (requestId: " << cbData.request.id
- << ") from " << target << " but the heartbeat was cancelled.";
+ LOGV2_FOR_HEARTBEATS(4615619,
+ 2,
+ "Received response to heartbeat (requestId: {cbData_request_id}) from "
+ "{target} but the heartbeat was cancelled.",
+ "cbData_request_id"_attr = cbData.request.id,
+ "target"_attr = target);
return;
}
@@ -159,8 +171,13 @@ void ReplicationCoordinatorImpl::_handleHeartbeatResponse(
StatusWith<rpc::ReplSetMetadata> replMetadata =
rpc::ReplSetMetadata::readFromMetadata(cbData.response.data);
- LOG_FOR_HEARTBEATS(2) << "Received response to heartbeat (requestId: " << cbData.request.id
- << ") from " << target << ", " << resp;
+ LOGV2_FOR_HEARTBEATS(
+ 4615620,
+ 2,
+ "Received response to heartbeat (requestId: {cbData_request_id}) from {target}, {resp}",
+ "cbData_request_id"_attr = cbData.request.id,
+ "target"_attr = target,
+ "resp"_attr = resp);
// Reject heartbeat responses (and metadata) from nodes with mismatched replica set IDs.
// It is problematic to perform this check in the heartbeat reconfiguring logic because it
@@ -209,12 +226,18 @@ void ReplicationCoordinatorImpl::_handleHeartbeatResponse(
// Postpone election timeout if we have a successful heartbeat response from the primary.
if (hbResponse.hasState() && hbResponse.getState().primary() &&
hbResponse.getTerm() == _topCoord->getTerm()) {
- LOG_FOR_ELECTION(4) << "Postponing election timeout due to heartbeat from primary";
+ LOGV2_FOR_ELECTION(
+ 4615659, 4, "Postponing election timeout due to heartbeat from primary");
_cancelAndRescheduleElectionTimeout_inlock();
}
} else {
- LOG_FOR_HEARTBEATS(2) << "Error in heartbeat (requestId: " << cbData.request.id << ") to "
- << target << ", response status: " << responseStatus;
+ LOGV2_FOR_HEARTBEATS(4615621,
+ 2,
+ "Error in heartbeat (requestId: {cbData_request_id}) to {target}, "
+ "response status: {responseStatus}",
+ "cbData_request_id"_attr = cbData.request.id,
+ "target"_attr = target,
+ "responseStatus"_attr = responseStatus);
hbStatusResponse = StatusWith<ReplSetHeartbeatResponse>(responseStatus);
}
@@ -292,7 +315,10 @@ stdx::unique_lock<Latch> ReplicationCoordinatorImpl::_handleHeartbeatResponseAct
Milliseconds priorityTakeoverDelay = _rsConfig.getPriorityTakeoverDelay(_selfIndex);
Milliseconds randomOffset = _getRandomizedElectionOffset_inlock();
_priorityTakeoverWhen = _replExecutor->now() + priorityTakeoverDelay + randomOffset;
- LOG_FOR_ELECTION(0) << "Scheduling priority takeover at " << _priorityTakeoverWhen;
+ LOGV2_FOR_ELECTION(4615601,
+ 0,
+ "Scheduling priority takeover at {priorityTakeoverWhen}",
+ "priorityTakeoverWhen"_attr = _priorityTakeoverWhen);
_priorityTakeoverCbh = _scheduleWorkAt(
_priorityTakeoverWhen, [=](const mongo::executor::TaskExecutor::CallbackArgs&) {
_startElectSelfIfEligibleV1(StartElectionReasonEnum::kPriorityTakeover);
@@ -305,7 +331,10 @@ stdx::unique_lock<Latch> ReplicationCoordinatorImpl::_handleHeartbeatResponseAct
if (!_catchupTakeoverCbh.isValid() && !_priorityTakeoverCbh.isValid()) {
Milliseconds catchupTakeoverDelay = _rsConfig.getCatchUpTakeoverDelay();
_catchupTakeoverWhen = _replExecutor->now() + catchupTakeoverDelay;
- LOG_FOR_ELECTION(0) << "Scheduling catchup takeover at " << _catchupTakeoverWhen;
+ LOGV2_FOR_ELECTION(4615648,
+ 0,
+ "Scheduling catchup takeover at {catchupTakeoverWhen}",
+ "catchupTakeoverWhen"_attr = _catchupTakeoverWhen);
_catchupTakeoverCbh = _scheduleWorkAt(
_catchupTakeoverWhen, [=](const mongo::executor::TaskExecutor::CallbackArgs&) {
_startElectSelfIfEligibleV1(StartElectionReasonEnum::kCatchupTakeover);
@@ -456,15 +485,21 @@ void ReplicationCoordinatorImpl::_scheduleHeartbeatReconfig_inlock(const ReplSet
switch (_rsConfigState) {
case kConfigUninitialized:
case kConfigSteady:
- LOG_FOR_HEARTBEATS(1) << "Received new config via heartbeat with "
- << newConfig.getConfigVersionAndTerm();
+ LOGV2_FOR_HEARTBEATS(
+ 4615622,
+ 1,
+ "Received new config via heartbeat with {newConfig_getConfigVersionAndTerm}",
+ "newConfig_getConfigVersionAndTerm"_attr = newConfig.getConfigVersionAndTerm());
break;
case kConfigInitiating:
case kConfigReconfiguring:
case kConfigHBReconfiguring:
- LOG_FOR_HEARTBEATS(1) << "Ignoring new configuration with "
- << newConfig.getConfigVersionAndTerm()
- << " because already in the midst of a configuration process.";
+ LOGV2_FOR_HEARTBEATS(
+ 4615623,
+ 1,
+ "Ignoring new configuration with {newConfig_getConfigVersionAndTerm} because "
+ "already in the midst of a configuration process.",
+ "newConfig_getConfigVersionAndTerm"_attr = newConfig.getConfigVersionAndTerm());
return;
case kConfigPreStart:
case kConfigStartingUp:
@@ -479,9 +514,12 @@ void ReplicationCoordinatorImpl::_scheduleHeartbeatReconfig_inlock(const ReplSet
invariant(!_rsConfig.isInitialized() ||
_rsConfig.getConfigVersionAndTerm() < newConfig.getConfigVersionAndTerm());
if (auto electionFinishedEvent = _cancelElectionIfNeeded_inlock()) {
- LOG_FOR_HEARTBEATS(2) << "Rescheduling heartbeat reconfig to config with "
- << newConfig.getConfigVersionAndTerm()
- << " to be processed after election is cancelled.";
+ LOGV2_FOR_HEARTBEATS(
+ 4615624,
+ 2,
+ "Rescheduling heartbeat reconfig to config with {newConfig_getConfigVersionAndTerm} to "
+ "be processed after election is cancelled.",
+ "newConfig_getConfigVersionAndTerm"_attr = newConfig.getConfigVersionAndTerm());
_replExecutor
->onEvent(electionFinishedEvent,
@@ -519,8 +557,10 @@ void ReplicationCoordinatorImpl::_heartbeatReconfigStore(
// transitioning into the RS_REMOVED state. See SERVER-15740.
if (!_rsConfig.isInitialized()) {
invariant(_rsConfigState == kConfigHBReconfiguring);
- LOG_FOR_HEARTBEATS(1) << "Ignoring new configuration in heartbeat response because we "
- "are uninitialized and not a member of the new configuration";
+ LOGV2_FOR_HEARTBEATS(4615625,
+ 1,
+ "Ignoring new configuration in heartbeat response because we "
+ "are uninitialized and not a member of the new configuration");
_setConfigState_inlock(kConfigUninitialized);
return;
}
@@ -533,8 +573,12 @@ void ReplicationCoordinatorImpl::_heartbeatReconfigStore(
"it is invalid: {myIndex_getStatus}",
"myIndex_getStatus"_attr = myIndex.getStatus());
} else {
- LOG_FOR_HEARTBEATS(2) << "Config with " << newConfig.getConfigVersionAndTerm()
- << " validated for reconfig; persisting to disk.";
+ LOGV2_FOR_HEARTBEATS(4615626,
+ 2,
+ "Config with {newConfig_getConfigVersionAndTerm} validated for "
+ "reconfig; persisting to disk.",
+ "newConfig_getConfigVersionAndTerm"_attr =
+ newConfig.getConfigVersionAndTerm());
auto opCtx = cc().makeOperationContext();
auto status = _externalState->storeLocalConfigDocument(opCtx.get(), newConfig.toBSON());
@@ -571,8 +615,12 @@ void ReplicationCoordinatorImpl::_heartbeatReconfigStore(
shouldStartDataReplication = true;
}
- LOG_FOR_HEARTBEATS(2) << "New configuration with " << newConfig.getConfigVersionAndTerm()
- << " persisted to local storage; installing new config in memory";
+ LOGV2_FOR_HEARTBEATS(4615627,
+ 2,
+ "New configuration with {newConfig_getConfigVersionAndTerm} persisted "
+ "to local storage; installing new config in memory",
+ "newConfig_getConfigVersionAndTerm"_attr =
+ newConfig.getConfigVersionAndTerm());
}
_heartbeatReconfigFinish(cbd, newConfig, myIndex);
@@ -595,8 +643,10 @@ void ReplicationCoordinatorImpl::_heartbeatReconfigFinish(
}
if (MONGO_unlikely(blockHeartbeatReconfigFinish.shouldFail())) {
- LOG_FOR_HEARTBEATS(0) << "blockHeartbeatReconfigFinish fail point enabled. Rescheduling "
- "_heartbeatReconfigFinish until fail point is disabled.";
+ LOGV2_FOR_HEARTBEATS(4615628,
+ 0,
+ "blockHeartbeatReconfigFinish fail point enabled. Rescheduling "
+ "_heartbeatReconfigFinish until fail point is disabled.");
_replExecutor
->scheduleWorkAt(_replExecutor->now() + Milliseconds{10},
[=](const executor::TaskExecutor::CallbackArgs& cbData) {
@@ -613,9 +663,12 @@ void ReplicationCoordinatorImpl::_heartbeatReconfigFinish(
{
stdx::lock_guard<Latch> lk(_mutex);
if (auto electionFinishedEvent = _cancelElectionIfNeeded_inlock()) {
- LOG_FOR_HEARTBEATS(0)
- << "Waiting for election to complete before finishing reconfig to config with "
- << newConfig.getConfigVersionAndTerm();
+ LOGV2_FOR_HEARTBEATS(4615629,
+ 0,
+ "Waiting for election to complete before finishing reconfig to "
+ "config with {newConfig_getConfigVersionAndTerm}",
+ "newConfig_getConfigVersionAndTerm"_attr =
+ newConfig.getConfigVersionAndTerm());
// Wait for the election to complete and the node's Role to be set to follower.
_replExecutor
->onEvent(electionFinishedEvent,
@@ -728,7 +781,7 @@ void ReplicationCoordinatorImpl::_untrackHeartbeatHandle_inlock(
}
void ReplicationCoordinatorImpl::_cancelHeartbeats_inlock() {
- LOG_FOR_HEARTBEATS(2) << "Cancelling all heartbeats.";
+ LOGV2_FOR_HEARTBEATS(4615630, 2, "Cancelling all heartbeats.");
for (const auto& handle : _heartbeatHandles) {
_replExecutor->cancel(handle);
@@ -874,8 +927,10 @@ void ReplicationCoordinatorImpl::_cancelAndRescheduleElectionTimeout_inlock() {
logThrottleTime = now;
}
if (wasActive) {
- LOG_FOR_ELECTION(cancelAndRescheduleLogLevel)
- << "Canceling election timeout callback at " << _handleElectionTimeoutWhen;
+ LOGV2_FOR_ELECTION(4615649,
+ logSeverityV1toV2(cancelAndRescheduleLogLevel).toInt(),
+ "Canceling election timeout callback at {handleElectionTimeoutWhen}",
+ "handleElectionTimeoutWhen"_attr = _handleElectionTimeoutWhen);
_replExecutor->cancel(_handleElectionTimeoutCbh);
_handleElectionTimeoutCbh = CallbackHandle();
_handleElectionTimeoutWhen = Date_t();
@@ -889,10 +944,13 @@ void ReplicationCoordinatorImpl::_cancelAndRescheduleElectionTimeout_inlock() {
invariant(when > now);
if (wasActive) {
// The log level here is 4 once per second, otherwise 5.
- LOG_FOR_ELECTION(cancelAndRescheduleLogLevel)
- << "Rescheduling election timeout callback at " << when;
+ LOGV2_FOR_ELECTION(4615650,
+ logSeverityV1toV2(cancelAndRescheduleLogLevel).toInt(),
+ "Rescheduling election timeout callback at {when}",
+ "when"_attr = when);
} else {
- LOG_FOR_ELECTION(4) << "Scheduling election timeout callback at " << when;
+ LOGV2_FOR_ELECTION(
+ 4615651, 4, "Scheduling election timeout callback at {when}", "when"_attr = when);
}
_handleElectionTimeoutWhen = when;
_handleElectionTimeoutCbh =
@@ -927,7 +985,7 @@ void ReplicationCoordinatorImpl::_startElectSelfIfEligibleV1(WithLock,
_cancelPriorityTakeover_inlock();
_cancelAndRescheduleElectionTimeout_inlock();
if (_inShutdown) {
- LOG_FOR_ELECTION(0) << "Not starting an election, since we are shutting down";
+ LOGV2_FOR_ELECTION(4615654, 0, "Not starting an election, since we are shutting down");
return;
}
}
@@ -936,27 +994,40 @@ void ReplicationCoordinatorImpl::_startElectSelfIfEligibleV1(WithLock,
if (!status.isOK()) {
switch (reason) {
case StartElectionReasonEnum::kElectionTimeout:
- LOG_FOR_ELECTION(0)
- << "Not starting an election, since we are not electable due to: "
- << status.reason();
+ LOGV2_FOR_ELECTION(
+ 4615655,
+ 0,
+ "Not starting an election, since we are not electable due to: {status_reason}",
+ "status_reason"_attr = status.reason());
break;
case StartElectionReasonEnum::kPriorityTakeover:
- LOG_FOR_ELECTION(0) << "Not starting an election for a priority takeover, "
- << "since we are not electable due to: " << status.reason();
+ LOGV2_FOR_ELECTION(4615656,
+ 0,
+ "Not starting an election for a priority takeover, since we are "
+ "not electable due to: {status_reason}",
+ "status_reason"_attr = status.reason());
break;
case StartElectionReasonEnum::kStepUpRequest:
case StartElectionReasonEnum::kStepUpRequestSkipDryRun:
- LOG_FOR_ELECTION(0) << "Not starting an election for a replSetStepUp request, "
- << "since we are not electable due to: " << status.reason();
+ LOGV2_FOR_ELECTION(4615657,
+ 0,
+ "Not starting an election for a replSetStepUp request, since we "
+ "are not electable due to: {status_reason}",
+ "status_reason"_attr = status.reason());
break;
case StartElectionReasonEnum::kCatchupTakeover:
- LOG_FOR_ELECTION(0) << "Not starting an election for a catchup takeover, "
- << "since we are not electable due to: " << status.reason();
+ LOGV2_FOR_ELECTION(4615658,
+ 0,
+ "Not starting an election for a catchup takeover, since we are "
+ "not electable due to: {status_reason}",
+ "status_reason"_attr = status.reason());
break;
case StartElectionReasonEnum::kSingleNodePromptElection:
- LOG_FOR_ELECTION(0)
- << "Not starting an election for a single node replica set prompt election, "
- << "since we are not electable due to: " << status.reason();
+ LOGV2_FOR_ELECTION(4615653,
+ 0,
+ "Not starting an election for a single node replica set prompt "
+ "election, since we are not electable due to: {status_reason}",
+ "status_reason"_attr = status.reason());
break;
default:
MONGO_UNREACHABLE;
@@ -966,22 +1037,26 @@ void ReplicationCoordinatorImpl::_startElectSelfIfEligibleV1(WithLock,
switch (reason) {
case StartElectionReasonEnum::kElectionTimeout:
- LOG_FOR_ELECTION(0) << "Starting an election, since we've seen no PRIMARY in the past "
- << _rsConfig.getElectionTimeoutPeriod();
+ LOGV2_FOR_ELECTION(4615652,
+ 0,
+ "Starting an election, since we've seen no PRIMARY in the past "
+ "{rsConfig_getElectionTimeoutPeriod}",
+ "rsConfig_getElectionTimeoutPeriod"_attr =
+ _rsConfig.getElectionTimeoutPeriod());
break;
case StartElectionReasonEnum::kPriorityTakeover:
- LOG_FOR_ELECTION(0) << "Starting an election for a priority takeover";
+ LOGV2_FOR_ELECTION(4615660, 0, "Starting an election for a priority takeover");
break;
case StartElectionReasonEnum::kStepUpRequest:
case StartElectionReasonEnum::kStepUpRequestSkipDryRun:
- LOG_FOR_ELECTION(0) << "Starting an election due to step up request";
+ LOGV2_FOR_ELECTION(4615661, 0, "Starting an election due to step up request");
break;
case StartElectionReasonEnum::kCatchupTakeover:
- LOG_FOR_ELECTION(0) << "Starting an election for a catchup takeover";
+ LOGV2_FOR_ELECTION(4615662, 0, "Starting an election for a catchup takeover");
break;
case StartElectionReasonEnum::kSingleNodePromptElection:
- LOG_FOR_ELECTION(0)
- << "Starting an election due to single node replica set prompt election";
+ LOGV2_FOR_ELECTION(
+ 4615663, 0, "Starting an election due to single node replica set prompt election");
break;
default:
MONGO_UNREACHABLE;
diff --git a/src/mongo/db/repl/topology_coordinator.cpp b/src/mongo/db/repl/topology_coordinator.cpp
index 33d061f930f..c48c7df9595 100644
--- a/src/mongo/db/repl/topology_coordinator.cpp
+++ b/src/mongo/db/repl/topology_coordinator.cpp
@@ -306,12 +306,16 @@ HostAndPort TopologyCoordinator::chooseNewSyncSource(Date_t now,
return _syncSource;
} else if (_memberData.at(_currentPrimaryIndex).getLastAppliedOpTime() <
lastOpTimeFetched) {
- LOG(1) << "Cannot select a sync source because chaining is not allowed and the primary "
- "is behind me. Last oplog optime of primary {}: {}, my last fetched oplog "
- "optime: {}"_format(
- getCurrentPrimaryMember()->getHostAndPort(),
- _memberData.at(_currentPrimaryIndex).getLastAppliedOpTime().toBSON(),
- lastOpTimeFetched.toBSON());
+ LOGV2_DEBUG(
+ 4615639,
+ 1,
+ "Cannot select a sync source because chaining is not allowed and the primary "
+ "is behind me. Last oplog optime of primary {primary}: {primaryOpTime}, my "
+ "last fetched oplog "
+ "optime: {lastFetchedOpTime}",
+ "primary"_attr = getCurrentPrimaryMember()->getHostAndPort(),
+ "primaryOpTime"_attr = _memberData.at(_currentPrimaryIndex).getLastAppliedOpTime(),
+ "lastFetchedOpTime"_attr = lastOpTimeFetched);
_syncSource = HostAndPort();
return _syncSource;
} else {
@@ -837,13 +841,18 @@ HeartbeatResponseAction TopologyCoordinator::processHeartbeatResponse(
} else {
LOGV2_DEBUG(21804, 2, "Config from heartbeat response was same as ours.");
}
- if (shouldLog(MongoLogDefaultComponent_component,
- ::mongo::LogstreamBuilder::severityCast(2))) {
- LogstreamBuilder lsb = log();
- if (_rsConfig.isInitialized()) {
- lsb << "Current config: " << _rsConfig.toBSON() << "; ";
- }
- lsb << "Config in heartbeat: " << newConfig.toBSON();
+ if (_rsConfig.isInitialized()) {
+ LOGV2_DEBUG(
+ 4615641,
+ 2,
+ "Current config: {currentConfig}; Config in heartbeat: {heartbeatConfig}",
+ "currentConfig"_attr = _rsConfig.toBSON(),
+ "heartbeatConfig"_attr = newConfig.toBSON());
+ } else {
+ LOGV2_DEBUG(4615647,
+ 2,
+ "Config in heartbeat: {heartbeatConfig}",
+ "heartbeatConfig"_attr = newConfig.toBSON());
}
}
}
diff --git a/src/mongo/db/s/transaction_coordinator_structures.cpp b/src/mongo/db/s/transaction_coordinator_structures.cpp
index 65e9a840f99..dd45fa789e6 100644
--- a/src/mongo/db/s/transaction_coordinator_structures.cpp
+++ b/src/mongo/db/s/transaction_coordinator_structures.cpp
@@ -64,14 +64,13 @@ StringData writeCommitDecisionEnumProperty(CommitDecision decision) {
MONGO_UNREACHABLE;
}
-logger::LogstreamBuilder& operator<<(logger::LogstreamBuilder& stream,
- const CommitDecision& decision) {
- // clang-format off
- switch (decision) {
- case txn::CommitDecision::kCommit: return stream << "kCommit";
- case txn::CommitDecision::kAbort: return stream << "kAbort";
+StringData toString(PrepareVote prepareVote) {
+ switch (prepareVote) {
+ case txn::PrepareVote::kCommit:
+ return "kCommit"_sd;
+ case txn::PrepareVote::kAbort:
+ return "kAbort"_sd;
};
- // clang-format on
MONGO_UNREACHABLE;
}
diff --git a/src/mongo/db/s/transaction_coordinator_structures.h b/src/mongo/db/s/transaction_coordinator_structures.h
index 13462d2a273..bdfda79faa7 100644
--- a/src/mongo/db/s/transaction_coordinator_structures.h
+++ b/src/mongo/db/s/transaction_coordinator_structures.h
@@ -47,6 +47,8 @@ enum class PrepareVote {
kAbort,
};
+StringData toString(PrepareVote prepareVote);
+
using CommitDecision = PrepareVote;
/**
@@ -55,9 +57,6 @@ using CommitDecision = PrepareVote;
CommitDecision readCommitDecisionEnumProperty(StringData decision);
StringData writeCommitDecisionEnumProperty(CommitDecision decision);
-logger::LogstreamBuilder& operator<<(logger::LogstreamBuilder& stream,
- const CommitDecision& decision);
-
/**
* Optional serializer/deserializer for the generic server 'Status' type.
*/
diff --git a/src/mongo/db/service_entry_point_common.cpp b/src/mongo/db/service_entry_point_common.cpp
index 20576ba4bd2..168b8c6f099 100644
--- a/src/mongo/db/service_entry_point_common.cpp
+++ b/src/mongo/db/service_entry_point_common.cpp
@@ -1669,7 +1669,7 @@ DbResponse ServiceEntryPointCommon::handleRequest(OperationContext* opCtx,
// Mark the op as complete, and log it if appropriate. Returns a boolean indicating whether
// this op should be sampled for profiling.
const bool shouldSample = currentOp.completeAndLogOperation(
- opCtx, MONGO_LOG_DEFAULT_COMPONENT, dbresponse.response.size(), slowMsOverride, forceLog);
+ opCtx, MONGO_LOGV2_DEFAULT_COMPONENT, dbresponse.response.size(), slowMsOverride, forceLog);
Top::get(opCtx->getServiceContext())
.incrementGlobalLatencyStats(
diff --git a/src/mongo/db/storage/storage_engine_impl.cpp b/src/mongo/db/storage/storage_engine_impl.cpp
index eaf4cc7a8ea..cf1260a2a14 100644
--- a/src/mongo/db/storage/storage_engine_impl.cpp
+++ b/src/mongo/db/storage/storage_engine_impl.cpp
@@ -28,8 +28,9 @@
*/
#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kStorage
-#define LOG_FOR_RECOVERY(level) \
- MONGO_LOG_COMPONENT(level, ::mongo::logger::LogComponent::kStorageRecovery)
+#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage
+#define LOGV2_FOR_RECOVERY(ID, DLEVEL, MESSAGE, ...) \
+ LOGV2_DEBUG_OPTIONS(ID, DLEVEL, {logv2::LogComponent::kStorageRecovery}, MESSAGE, ##__VA_ARGS__)
#include "mongo/db/storage/storage_engine_impl.h"
@@ -123,7 +124,7 @@ void StorageEngineImpl::loadCatalog(OperationContext* opCtx) {
_catalogRecordStore = _engine->getGroupedRecordStore(
opCtx, catalogInfo, catalogInfo, CollectionOptions(), KVPrefix::kNotPrefixed);
if (shouldLog(::mongo::logger::LogComponent::kStorageRecovery, kCatalogLogLevel)) {
- LOG_FOR_RECOVERY(kCatalogLogLevel) << "loadCatalog:";
+ LOGV2_FOR_RECOVERY(4615631, kCatalogLogLevel.toInt(), "loadCatalog:");
_dumpCatalog(opCtx);
}
@@ -280,7 +281,7 @@ void StorageEngineImpl::_initCollection(OperationContext* opCtx,
void StorageEngineImpl::closeCatalog(OperationContext* opCtx) {
dassert(opCtx->lockState()->isLocked());
if (shouldLog(::mongo::logger::LogComponent::kStorageRecovery, kCatalogLogLevel)) {
- LOG_FOR_RECOVERY(kCatalogLogLevel) << "loadCatalog:";
+ LOGV2_FOR_RECOVERY(4615632, kCatalogLogLevel.toInt(), "loadCatalog:");
_dumpCatalog(opCtx);
}
CollectionCatalog::get(opCtx).deregisterAllCollections();
@@ -354,7 +355,7 @@ StatusWith<StorageEngine::ReconcileResult> StorageEngineImpl::reconcileCatalogAn
engineIdents.erase(catalogInfo);
}
- LOG_FOR_RECOVERY(2) << "Reconciling collection and index idents.";
+ LOGV2_FOR_RECOVERY(4615633, 2, "Reconciling collection and index idents.");
std::set<std::string> catalogIdents;
{
std::vector<std::string> vec = _catalog->getAllIdents(opCtx);
@@ -907,8 +908,11 @@ void StorageEngineImpl::_dumpCatalog(OperationContext* opCtx) {
while (rec) {
// This should only be called by a parent that's done an appropriate `shouldLog` check. Do
// not duplicate the log level policy.
- LOG_FOR_RECOVERY(kCatalogLogLevel)
- << "\tId: " << rec->id << " Value: " << rec->data.toBson();
+ LOGV2_FOR_RECOVERY(4615634,
+ kCatalogLogLevel.toInt(),
+ "Id: {rec_id} Value: {rec_data_toBson}",
+ "rec_id"_attr = rec->id,
+ "rec_data_toBson"_attr = rec->data.toBson());
auto valueBson = rec->data.toBson();
if (valueBson.hasField("md")) {
std::string ns = valueBson.getField("md").Obj().getField("ns").String();
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
index fd2f138d518..4fe859c5527 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
@@ -59,14 +59,14 @@
#define TRACING_ENABLED 0
-#define TRACE_CURSOR(ID, NAME, ...) \
+#define LOGV2_TRACE_CURSOR(ID, NAME, ...) \
if (TRACING_ENABLED) \
LOGV2(ID, \
"WT index ({index}) " #NAME, \
"index"_attr = reinterpret_cast<uint64_t>(&_idx), \
##__VA_ARGS__)
-#define TRACE_INDEX(ID, NAME, ...) \
+#define LOGV2_TRACE_INDEX(ID, NAME, ...) \
if (TRACING_ENABLED) \
LOGV2(ID, \
"WT index ({index}) " #NAME, \
@@ -254,7 +254,7 @@ Status WiredTigerIndex::insert(OperationContext* opCtx,
dassert(opCtx->lockState()->isWriteLocked());
dassert(KeyString::decodeRecordIdAtEnd(keyString.getBuffer(), keyString.getSize()).isValid());
- TRACE_INDEX(20093, "KeyString: {keyString}", "keyString"_attr = keyString);
+ LOGV2_TRACE_INDEX(20093, "KeyString: {keyString}", "keyString"_attr = keyString);
WiredTigerCursor curwrap(_uri, _tableId, false, opCtx);
curwrap.assertInActiveTxn();
@@ -311,7 +311,7 @@ void WiredTigerIndex::fullValidate(OperationContext* opCtx,
auto cursor = newCursor(opCtx);
long long count = 0;
- TRACE_INDEX(20094, "fullValidate");
+ LOGV2_TRACE_INDEX(20094, "fullValidate");
const auto requestedInfo = TRACING_ENABLED ? Cursor::kKeyAndLoc : Cursor::kJustExistance;
@@ -324,7 +324,7 @@ void WiredTigerIndex::fullValidate(OperationContext* opCtx,
);
for (auto kv = cursor->seek(keyStringForSeek, requestedInfo); kv; kv = cursor->next()) {
- TRACE_INDEX(20095, "fullValidate {kv}", "kv"_attr = kv);
+ LOGV2_TRACE_INDEX(20095, "fullValidate {kv}", "kv"_attr = kv);
count++;
}
if (numKeysOut) {
@@ -831,10 +831,10 @@ public:
}
void setEndPosition(const BSONObj& key, bool inclusive) override {
- TRACE_CURSOR(20098,
- "setEndPosition inclusive: {inclusive} {key}",
- "inclusive"_attr = inclusive,
- "key"_attr = key);
+ LOGV2_TRACE_CURSOR(20098,
+ "setEndPosition inclusive: {inclusive} {key}",
+ "inclusive"_attr = inclusive,
+ "key"_attr = key);
if (key.isEmpty()) {
// This means scan to end of index.
_endPosition.reset();
@@ -953,9 +953,9 @@ public:
// Unique indexes can have both kinds of KeyStrings, ie with or without the record id.
// Restore for unique indexes gets handled separately in it's own implementation.
_lastMoveSkippedKey = !seekWTCursor(_key.getValueCopy());
- TRACE_CURSOR(20099,
- "restore _lastMoveSkippedKey: {lastMoveSkippedKey}",
- "lastMoveSkippedKey"_attr = _lastMoveSkippedKey);
+ LOGV2_TRACE_CURSOR(20099,
+ "restore _lastMoveSkippedKey: {lastMoveSkippedKey}",
+ "lastMoveSkippedKey"_attr = _lastMoveSkippedKey);
}
}
@@ -1028,7 +1028,7 @@ protected:
bson =
KeyString::toBson(_key.getBuffer(), _key.getSize(), _idx.getOrdering(), _typeBits);
- TRACE_CURSOR(20000, "returning {bson} {id}", "bson"_attr = bson, "id"_attr = _id);
+ LOGV2_TRACE_CURSOR(20000, "returning {bson} {id}", "bson"_attr = bson, "id"_attr = _id);
}
return {{std::move(bson), _id}};
@@ -1084,13 +1084,13 @@ protected:
int ret = wiredTigerPrepareConflictRetry(_opCtx, [&] { return c->search_near(c, &cmp); });
if (ret == WT_NOTFOUND) {
_cursorAtEof = true;
- TRACE_CURSOR(20088, "not found");
+ LOGV2_TRACE_CURSOR(20088, "not found");
return false;
}
invariantWTOK(ret);
_cursorAtEof = false;
- TRACE_CURSOR(20089, "cmp: {cmp}", "cmp"_attr = cmp);
+ LOGV2_TRACE_CURSOR(20089, "cmp: {cmp}", "cmp"_attr = cmp);
if (cmp == 0) {
// Found it!
@@ -1192,16 +1192,16 @@ protected:
keyWithRecordId.appendRecordId(_id);
keyWithRecordId.setTypeBits(_typeBits);
- TRACE_CURSOR(20090,
- "returning {keyWithRecordId} {id}",
- "keyWithRecordId"_attr = keyWithRecordId,
- "id"_attr = _id);
+ LOGV2_TRACE_CURSOR(20090,
+ "returning {keyWithRecordId} {id}",
+ "keyWithRecordId"_attr = keyWithRecordId,
+ "id"_attr = _id);
return KeyStringEntry(keyWithRecordId.getValueCopy(), _id);
}
_key.setTypeBits(_typeBits);
- TRACE_CURSOR(20091, "returning {key} {id}", "key"_attr = _key, "id"_attr = _id);
+ LOGV2_TRACE_CURSOR(20091, "returning {key} {id}", "key"_attr = _key, "id"_attr = _id);
return KeyStringEntry(_key.getValueCopy(), _id);
}
@@ -1246,7 +1246,7 @@ public:
// Must not throw WriteConflictException, throwing a WriteConflictException will retry the
// operation effectively skipping over this key.
void updateIdAndTypeBits() override {
- TRACE_INDEX(
+ LOGV2_TRACE_INDEX(
20096, "Unique Index KeyString: [{keyString}]", "keyString"_attr = _key.toString());
// After a rolling upgrade an index can have keys from both timestamp unsafe (old) and
@@ -1303,7 +1303,7 @@ public:
// the keys with size greater than or equal to that of the _key.
if (item.size >= keySize && std::memcmp(_key.getBuffer(), item.data, keySize) == 0) {
_lastMoveSkippedKey = false;
- TRACE_CURSOR(20092, "restore _lastMoveSkippedKey changed to false.");
+ LOGV2_TRACE_CURSOR(20092, "restore _lastMoveSkippedKey changed to false.");
}
}
}
@@ -1541,7 +1541,7 @@ Status WiredTigerIndexUnique::_insertTimestampSafe(OperationContext* opCtx,
WT_CURSOR* c,
const KeyString::Value& keyString,
bool dupsAllowed) {
- TRACE_INDEX(
+ LOGV2_TRACE_INDEX(
20097, "Timestamp safe unique idx KeyString: {keyString}", "keyString"_attr = keyString);
int ret;
diff --git a/src/mongo/db/transaction_participant.cpp b/src/mongo/db/transaction_participant.cpp
index efb68e1defc..fed63e4851c 100644
--- a/src/mongo/db/transaction_participant.cpp
+++ b/src/mongo/db/transaction_participant.cpp
@@ -2000,7 +2000,7 @@ void TransactionParticipant::Participant::_logSlowTransaction(
tickSource, tickSource->getTicks()));
if (shouldLogSlowOpWithSampling(opCtx,
- logger::LogComponent::kTransaction,
+ logv2::LogComponent::kTransaction,
opDuration,
Milliseconds(serverGlobalParams.slowMS))
.first) {
diff --git a/src/mongo/embedded/embedded.cpp b/src/mongo/embedded/embedded.cpp
index 95ff681e5cb..d9d99b71285 100644
--- a/src/mongo/embedded/embedded.cpp
+++ b/src/mongo/embedded/embedded.cpp
@@ -218,12 +218,15 @@ ServiceContext* initialize(const char* yaml_config) {
{
ProcessId pid = ProcessId::getCurrent();
- LogstreamBuilder l = log(LogComponent::kControl);
- l << "MongoDB starting : pid=" << pid << " port=" << serverGlobalParams.port
- << " dbpath=" << storageGlobalParams.dbpath;
-
const bool is32bit = sizeof(int*) == 4;
- l << (is32bit ? " 32" : " 64") << "-bit" << endl;
+ LOGV2_OPTIONS(4615667,
+ {logv2::LogComponent::kControl},
+ "MongoDB starting",
+ "pid"_attr = pid.toNative(),
+ "port"_attr = serverGlobalParams.port,
+ "dbpath"_attr =
+ boost::filesystem::path(storageGlobalParams.dbpath).generic_string(),
+ "architecture"_attr = (is32bit ? "32-bit" : "64-bit"));
}
if (kDebugBuild)
diff --git a/src/mongo/embedded/embedded_log_appender.h b/src/mongo/embedded/embedded_log_backend.h
index cee788ac504..e5a50e4ac19 100644
--- a/src/mongo/embedded/embedded_log_appender.h
+++ b/src/mongo/embedded/embedded_log_backend.h
@@ -30,47 +30,47 @@
#pragma once
#include <functional>
-#include <sstream>
+
+#include <boost/log/attributes/attribute_value_set.hpp>
+#include <boost/log/core/record_view.hpp>
+#include <boost/log/sinks.hpp>
+#include <boost/log/utility/formatting_ostream.hpp>
#include "mongo/base/status.h"
-#include "mongo/logger/appender.h"
-#include "mongo/logger/encoder.h"
+#include "mongo/logv2/attributes.h"
+#include "mongo/logv2/log_component.h"
+#include "mongo/logv2/log_severity.h"
namespace mongo {
namespace embedded {
/**
- * Appender for writing to callbacks registered with the embedded C API.
+ * Sink backend for writing to callbacks registered with the embedded C API.
*/
-template <typename Event>
-class EmbeddedLogAppender : public logger::Appender<Event> {
- EmbeddedLogAppender(EmbeddedLogAppender const&) = delete;
- EmbeddedLogAppender& operator=(EmbeddedLogAppender const&) = delete;
-
+class EmbeddedLogBackend
+ : public boost::log::sinks::
+ basic_formatted_sink_backend<char, boost::log::sinks::synchronized_feeding> {
public:
- typedef logger::Encoder<Event> EventEncoder;
-
- explicit EmbeddedLogAppender(
+ EmbeddedLogBackend(
std::function<void(void*, const char*, const char*, const char*, int)> callback,
- void* callbackUserData,
- std::unique_ptr<EventEncoder> encoder)
- : _encoder(std::move(encoder)),
- _callback(std::move(callback)),
- _callbackUserData(callbackUserData) {}
+ void* callbackUserData)
+ : _callback(std::move(callback)), _callbackUserData(callbackUserData) {}
+
+ void consume(boost::log::record_view const& rec, string_type const& formatted_string) {
+ using boost::log::extract;
+
+ auto severity = extract<logv2::LogSeverity>(logv2::attributes::severity(), rec).get();
+ auto component = extract<logv2::LogComponent>(logv2::attributes::component(), rec).get();
+ auto context = extract<StringData>(logv2::attributes::threadName(), rec).get();
- Status append(const Event& event) final {
- std::stringstream output;
- _encoder->encode(event, output);
_callback(_callbackUserData,
- output.str().c_str(),
- event.getComponent().getShortName().c_str(),
- event.getContextName().toString().c_str(),
- event.getSeverity().toInt());
- return Status::OK();
+ formatted_string.c_str(),
+ component.getShortName().c_str(),
+ context.toString().c_str(),
+ severity.toInt());
}
private:
- std::unique_ptr<EventEncoder> _encoder;
std::function<void(void*, const char*, const char*, const char*, int)> _callback;
void* const _callbackUserData;
};
diff --git a/src/mongo/embedded/mongo_embedded/mongo_embedded.cpp b/src/mongo/embedded/mongo_embedded/mongo_embedded.cpp
index 1004122469b..d5ac9ce4bb8 100644
--- a/src/mongo/embedded/mongo_embedded/mongo_embedded.cpp
+++ b/src/mongo/embedded/mongo_embedded/mongo_embedded.cpp
@@ -30,6 +30,7 @@
#include "mongo_embedded/mongo_embedded.h"
+#include <boost/log/core.hpp>
#include <cstring>
#include <exception>
#include <thread>
@@ -40,9 +41,11 @@
#include "mongo/db/client.h"
#include "mongo/db/service_context.h"
#include "mongo/embedded/embedded.h"
-#include "mongo/embedded/embedded_log_appender.h"
-#include "mongo/logger/logger.h"
-#include "mongo/logger/message_event_utf8_encoder.h"
+#include "mongo/embedded/embedded_log_backend.h"
+#include "mongo/logv2/component_settings_filter.h"
+#include "mongo/logv2/log_domain_global.h"
+#include "mongo/logv2/log_manager.h"
+#include "mongo/logv2/plain_formatter.h"
#include "mongo/rpc/message.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/transport/service_entry_point.h"
@@ -107,10 +110,9 @@ struct mongo_embedded_v1_lib {
~mongo_embedded_v1_lib() {
invariant(this->databaseCount.load() == 0);
- if (this->logCallbackHandle) {
- using mongo::logger::globalLogDomain;
- globalLogDomain()->detachAppender(this->logCallbackHandle);
- this->logCallbackHandle.reset();
+ if (this->logSink) {
+ boost::log::core::get()->remove_sink(this->logSink);
+ this->logSink.reset();
}
}
@@ -121,7 +123,8 @@ struct mongo_embedded_v1_lib {
mongo::AtomicWord<int> databaseCount;
- mongo::logger::ComponentMessageLogDomain::AppenderHandle logCallbackHandle;
+ boost::shared_ptr<boost::log::sinks::synchronous_sink<mongo::embedded::EmbeddedLogBackend>>
+ logSink;
std::unique_ptr<mongo_embedded_v1_instance> onlyDB;
};
@@ -204,13 +207,16 @@ std::unique_ptr<mongo_embedded_v1_lib> library;
void registerLogCallback(mongo_embedded_v1_lib* const lib,
const mongo_embedded_v1_log_callback logCallback,
void* const logUserData) {
- using logger::globalLogDomain;
- using logger::MessageEventEphemeral;
- using logger::MessageEventUnadornedEncoder;
-
- lib->logCallbackHandle = globalLogDomain()->attachAppender(
- std::make_unique<embedded::EmbeddedLogAppender<MessageEventEphemeral>>(
- logCallback, logUserData, std::make_unique<MessageEventUnadornedEncoder>()));
+ auto backend = boost::make_shared<embedded::EmbeddedLogBackend>(logCallback, logUserData);
+ auto sink =
+ boost::make_shared<boost::log::sinks::synchronous_sink<embedded::EmbeddedLogBackend>>(
+ std::move(backend));
+ sink->set_filter(
+ logv2::ComponentSettingsFilter(logv2::LogManager::global().getGlobalDomain(),
+ logv2::LogManager::global().getGlobalSettings()));
+ sink->set_formatter(logv2::PlainFormatter());
+ boost::log::core::get()->add_sink(sink);
+ lib->logSink = sink;
}
mongo_embedded_v1_lib* capi_lib_init(mongo_embedded_v1_init_params const* params) try {
@@ -224,15 +230,21 @@ mongo_embedded_v1_lib* capi_lib_init(mongo_embedded_v1_init_params const* params
// TODO(adam.martin): Fold all of this log initialization into the ctor of lib.
if (params) {
- using logger::globalLogManager;
// The standard console log appender may or may not be installed here, depending if this is
// the first time we initialize the library or not. Make sure we handle both cases.
+ using namespace logv2;
+ auto& globalDomain = LogManager::global().getGlobalDomainInternal();
+ LogDomainGlobal::ConfigurationOptions config = globalDomain.config();
if (params->log_flags & MONGO_EMBEDDED_V1_LOG_STDOUT) {
- if (!globalLogManager()->isDefaultConsoleAppenderAttached())
- globalLogManager()->reattachDefaultConsoleAppender();
+ if (!config.consoleEnabled) {
+ config.consoleEnabled = true;
+ invariant(globalDomain.configure(config).isOK());
+ }
} else {
- if (globalLogManager()->isDefaultConsoleAppenderAttached())
- globalLogManager()->detachDefaultConsoleAppender();
+ if (config.consoleEnabled) {
+ config.consoleEnabled = false;
+ invariant(globalDomain.configure(config).isOK());
+ }
}
if ((params->log_flags & MONGO_EMBEDDED_V1_LOG_CALLBACK) && params->log_callback) {
@@ -247,9 +259,10 @@ mongo_embedded_v1_lib* capi_lib_init(mongo_embedded_v1_init_params const* params
// Make sure that no actual logger is attached if library cannot be initialized. Also prevent
// exception leaking failures here.
[]() noexcept {
- using logger::globalLogManager;
- if (globalLogManager()->isDefaultConsoleAppenderAttached())
- globalLogManager()->detachDefaultConsoleAppender();
+ using namespace logv2;
+ LogDomainGlobal::ConfigurationOptions config;
+ config.makeDisabled();
+ LogManager::global().getGlobalDomainInternal().configure(config).ignore();
}
();
throw;
diff --git a/src/mongo/logger/log_severity_limiter.h b/src/mongo/logger/log_severity_limiter.h
index 027393c2adf..912d1fc15fb 100644
--- a/src/mongo/logger/log_severity_limiter.h
+++ b/src/mongo/logger/log_severity_limiter.h
@@ -65,8 +65,8 @@ public:
*/
explicit LogSeverityLimiter(Milliseconds period, int limitedDLevel, int normalDLevel)
: _period{period},
- _limitedLogSeverity{LogstreamBuilder::severityCast(limitedDLevel)},
- _normalLogSeverity{LogstreamBuilder::severityCast(normalDLevel)} {
+ _limitedLogSeverity{LogstreamBuilderDeprecated::severityCast(limitedDLevel)},
+ _normalLogSeverity{LogstreamBuilderDeprecated::severityCast(normalDLevel)} {
// This widget has no point if limited isn't a lower verbosity than normal
invariant(limitedDLevel < normalDLevel);
}
diff --git a/src/mongo/logger/logstream_builder.cpp b/src/mongo/logger/logstream_builder.cpp
index c07c29c3fe5..5dd324b9be2 100644
--- a/src/mongo/logger/logstream_builder.cpp
+++ b/src/mongo/logger/logstream_builder.cpp
@@ -51,7 +51,7 @@ namespace {
/// flag is true, LogstreamBuilder instances must not use the cache.
bool isThreadOstreamCacheInitialized = false;
-MONGO_INITIALIZER(LogstreamBuilder)(InitializerContext*) {
+MONGO_INITIALIZER(LogstreamBuilderDeprecated)(InitializerContext*) {
isThreadOstreamCacheInitialized = true;
return Status::OK();
}
@@ -71,16 +71,17 @@ struct ThreadOstreamCacheFinalizer {
namespace logger {
-LogstreamBuilder::LogstreamBuilder(MessageLogDomain* domain,
- StringData contextName,
- LogSeverity severity)
- : LogstreamBuilder(domain, contextName, std::move(severity), LogComponent::kDefault) {}
+LogstreamBuilderDeprecated::LogstreamBuilderDeprecated(MessageLogDomain* domain,
+ StringData contextName,
+ LogSeverity severity)
+ : LogstreamBuilderDeprecated(domain, contextName, std::move(severity), LogComponent::kDefault) {
+}
-LogstreamBuilder::LogstreamBuilder(MessageLogDomain* domain,
- StringData contextName,
- LogSeverity severity,
- LogComponent component,
- bool shouldCache)
+LogstreamBuilderDeprecated::LogstreamBuilderDeprecated(MessageLogDomain* domain,
+ StringData contextName,
+ LogSeverity severity,
+ LogComponent component,
+ bool shouldCache)
: _domain(domain),
_contextName(contextName.toString()),
_severity(std::move(severity)),
@@ -88,7 +89,7 @@ LogstreamBuilder::LogstreamBuilder(MessageLogDomain* domain,
_tee(nullptr),
_shouldCache(shouldCache) {}
-LogstreamBuilder::~LogstreamBuilder() {
+LogstreamBuilderDeprecated::~LogstreamBuilderDeprecated() {
if (_os) {
if (!_baseMessage.empty())
_baseMessage.push_back(' ');
@@ -113,13 +114,13 @@ LogstreamBuilder::~LogstreamBuilder() {
}
}
-void LogstreamBuilder::operator<<(Tee* tee) {
+void LogstreamBuilderDeprecated::operator<<(Tee* tee) {
makeStream(); // Adding a Tee counts for purposes of deciding to make a log message.
// TODO: dassert(!_tee);
_tee = tee;
}
-void LogstreamBuilder::makeStream() {
+void LogstreamBuilderDeprecated::makeStream() {
if (!_os) {
if (_shouldCache && isThreadOstreamCacheInitialized && threadOstreamCache) {
_os = std::move(threadOstreamCache);
diff --git a/src/mongo/logger/logstream_builder.h b/src/mongo/logger/logstream_builder.h
index ba5cb2ef689..6dd76dee86d 100644
--- a/src/mongo/logger/logstream_builder.h
+++ b/src/mongo/logger/logstream_builder.h
@@ -50,7 +50,7 @@ class Tee;
/**
* Stream-ish object used to build and append log messages.
*/
-class LogstreamBuilder {
+class LogstreamBuilderDeprecated {
public:
static LogSeverity severityCast(int ll) {
return LogSeverity::cast(ll);
@@ -65,7 +65,9 @@ public:
* "contextName" is a short name of the thread or other context.
* "severity" is the logging severity of the message.
*/
- LogstreamBuilder(MessageLogDomain* domain, StringData contextName, LogSeverity severity);
+ LogstreamBuilderDeprecated(MessageLogDomain* domain,
+ StringData contextName,
+ LogSeverity severity);
/**
* Construct a LogstreamBuilder that writes to "domain" on destruction.
@@ -79,31 +81,31 @@ public:
* logs messages. Set "shouldCache" to false to create a new ostream
* for each instance of this class rather than cacheing.
*/
- LogstreamBuilder(MessageLogDomain* domain,
- StringData contextName,
- LogSeverity severity,
- LogComponent component,
- bool shouldCache = true);
+ LogstreamBuilderDeprecated(MessageLogDomain* domain,
+ StringData contextName,
+ LogSeverity severity,
+ LogComponent component,
+ bool shouldCache = true);
- LogstreamBuilder(LogstreamBuilder&& other) = default;
- LogstreamBuilder& operator=(LogstreamBuilder&& other) = default;
+ LogstreamBuilderDeprecated(LogstreamBuilderDeprecated&& other) = default;
+ LogstreamBuilderDeprecated& operator=(LogstreamBuilderDeprecated&& other) = default;
/**
* Destroys a LogstreamBuilder(). If anything was written to it via stream() or operator<<,
* constructs a MessageLogDomain::Event and appends it to the associated domain.
*/
- ~LogstreamBuilder();
+ ~LogstreamBuilderDeprecated();
/**
* Sets an optional prefix for the message.
*/
- LogstreamBuilder& setBaseMessage(const std::string& baseMessage) {
+ LogstreamBuilderDeprecated& setBaseMessage(const std::string& baseMessage) {
_baseMessage = baseMessage;
return *this;
}
- LogstreamBuilder& setIsTruncatable(bool isTruncatable) {
+ LogstreamBuilderDeprecated& setIsTruncatable(bool isTruncatable) {
_isTruncatable = isTruncatable;
return *this;
}
@@ -114,100 +116,100 @@ public:
return *_os;
}
- LogstreamBuilder& operator<<(const char* x) {
+ LogstreamBuilderDeprecated& operator<<(const char* x) {
stream() << x;
return *this;
}
- LogstreamBuilder& operator<<(const std::string& x) {
+ LogstreamBuilderDeprecated& operator<<(const std::string& x) {
stream() << x;
return *this;
}
- LogstreamBuilder& operator<<(StringData x) {
+ LogstreamBuilderDeprecated& operator<<(StringData x) {
stream() << x;
return *this;
}
- LogstreamBuilder& operator<<(char* x) {
+ LogstreamBuilderDeprecated& operator<<(char* x) {
stream() << x;
return *this;
}
- LogstreamBuilder& operator<<(char x) {
+ LogstreamBuilderDeprecated& operator<<(char x) {
stream() << x;
return *this;
}
- LogstreamBuilder& operator<<(int x) {
+ LogstreamBuilderDeprecated& operator<<(int x) {
stream() << x;
return *this;
}
- LogstreamBuilder& operator<<(ExitCode x) {
+ LogstreamBuilderDeprecated& operator<<(ExitCode x) {
stream() << x;
return *this;
}
- LogstreamBuilder& operator<<(long x) {
+ LogstreamBuilderDeprecated& operator<<(long x) {
stream() << x;
return *this;
}
- LogstreamBuilder& operator<<(unsigned long x) {
+ LogstreamBuilderDeprecated& operator<<(unsigned long x) {
stream() << x;
return *this;
}
- LogstreamBuilder& operator<<(unsigned x) {
+ LogstreamBuilderDeprecated& operator<<(unsigned x) {
stream() << x;
return *this;
}
- LogstreamBuilder& operator<<(unsigned short x) {
+ LogstreamBuilderDeprecated& operator<<(unsigned short x) {
stream() << x;
return *this;
}
- LogstreamBuilder& operator<<(double x) {
+ LogstreamBuilderDeprecated& operator<<(double x) {
stream() << x;
return *this;
}
- LogstreamBuilder& operator<<(long long x) {
+ LogstreamBuilderDeprecated& operator<<(long long x) {
stream() << x;
return *this;
}
- LogstreamBuilder& operator<<(unsigned long long x) {
+ LogstreamBuilderDeprecated& operator<<(unsigned long long x) {
stream() << x;
return *this;
}
- LogstreamBuilder& operator<<(bool x) {
+ LogstreamBuilderDeprecated& operator<<(bool x) {
stream() << x;
return *this;
}
template <typename Period>
- LogstreamBuilder& operator<<(const Duration<Period>& d) {
+ LogstreamBuilderDeprecated& operator<<(const Duration<Period>& d) {
stream() << d;
return *this;
}
- LogstreamBuilder& operator<<(BSONType t) {
+ LogstreamBuilderDeprecated& operator<<(BSONType t) {
stream() << typeName(t);
return *this;
}
- LogstreamBuilder& operator<<(ErrorCodes::Error ec) {
+ LogstreamBuilderDeprecated& operator<<(ErrorCodes::Error ec) {
stream() << ErrorCodes::errorString(ec);
return *this;
}
template <typename T>
- LogstreamBuilder& operator<<(const T& x) {
+ LogstreamBuilderDeprecated& operator<<(const T& x) {
stream() << x.toString();
return *this;
}
- LogstreamBuilder& operator<<(std::ostream& (*manip)(std::ostream&)) {
+ LogstreamBuilderDeprecated& operator<<(std::ostream& (*manip)(std::ostream&)) {
stream() << manip;
return *this;
}
- LogstreamBuilder& operator<<(std::ios_base& (*manip)(std::ios_base&)) {
+ LogstreamBuilderDeprecated& operator<<(std::ios_base& (*manip)(std::ios_base&)) {
stream() << manip;
return *this;
}
template <typename OptionalType>
- LogstreamBuilder& operator<<(const boost::optional<OptionalType>& optional) {
+ LogstreamBuilderDeprecated& operator<<(const boost::optional<OptionalType>& optional) {
if (optional) {
(*this << *optional);
} else {
diff --git a/src/mongo/logv2/log_domain_global.cpp b/src/mongo/logv2/log_domain_global.cpp
index 58e900b840f..7b8e117fa02 100644
--- a/src/mongo/logv2/log_domain_global.cpp
+++ b/src/mongo/logv2/log_domain_global.cpp
@@ -66,8 +66,11 @@ struct LogDomainGlobal::Impl {
Status configure(LogDomainGlobal::ConfigurationOptions const& options);
Status rotate(bool rename, StringData renameSuffix);
+ const ConfigurationOptions& config() const;
+
LogDomainGlobal& _parent;
LogComponentSettings _settings;
+ ConfigurationOptions _config;
boost::shared_ptr<boost::log::sinks::unlocked_sink<ConsoleBackend>> _consoleSink;
boost::shared_ptr<boost::log::sinks::unlocked_sink<RotatableFileBackend>> _rotatableFileSink;
#ifndef _WIN32
@@ -182,6 +185,9 @@ Status LogDomainGlobal::Impl::configure(LogDomainGlobal::ConfigurationOptions co
setFormatters(
[&] { return TextFormatter(options.maxAttributeSizeKB, options.timestampFormat); });
break;
+ case LogFormat::kPlain:
+ setFormatters([&] { return PlainFormatter(options.maxAttributeSizeKB); });
+ break;
#if !defined(MONGO_CONFIG_TEXT_LOG_DEFAULT)
case LogFormat::kDefault:
#endif
@@ -191,9 +197,15 @@ Status LogDomainGlobal::Impl::configure(LogDomainGlobal::ConfigurationOptions co
break;
}
+ _config = options;
+
return Status::OK();
}
+const LogDomainGlobal::ConfigurationOptions& LogDomainGlobal::Impl::config() const {
+ return _config;
+}
+
Status LogDomainGlobal::Impl::rotate(bool rename, StringData renameSuffix) {
if (_rotatableFileSink) {
auto backend = _rotatableFileSink->locked_backend()->lockedBackend<0>();
@@ -219,6 +231,10 @@ Status LogDomainGlobal::configure(LogDomainGlobal::ConfigurationOptions const& o
return _impl->configure(options);
}
+const LogDomainGlobal::ConfigurationOptions& LogDomainGlobal::config() const {
+ return _impl->config();
+}
+
Status LogDomainGlobal::rotate(bool rename, StringData renameSuffix) {
return _impl->rotate(rename, renameSuffix);
}
diff --git a/src/mongo/logv2/log_domain_global.h b/src/mongo/logv2/log_domain_global.h
index d3acb55f30f..c19c2a62b9d 100644
--- a/src/mongo/logv2/log_domain_global.h
+++ b/src/mongo/logv2/log_domain_global.h
@@ -63,6 +63,8 @@ public:
Status configure(ConfigurationOptions const& options);
Status rotate(bool rename, StringData renameSuffix);
+ const ConfigurationOptions& config() const;
+
LogComponentSettings& settings();
private:
diff --git a/src/mongo/logv2/log_format.h b/src/mongo/logv2/log_format.h
index 12e42fab859..b1a57287d96 100644
--- a/src/mongo/logv2/log_format.h
+++ b/src/mongo/logv2/log_format.h
@@ -31,7 +31,7 @@
namespace mongo {
namespace logv2 {
-enum class LogFormat { kDefault, kText, kJson };
+enum class LogFormat { kDefault, kText, kJson, kPlain };
enum class LogTimestampFormat { kISO8601UTC, kISO8601Local };
} // namespace logv2
} // namespace mongo
diff --git a/src/mongo/logv2/log_options.h b/src/mongo/logv2/log_options.h
index cc7601baddb..fdd41307b43 100644
--- a/src/mongo/logv2/log_options.h
+++ b/src/mongo/logv2/log_options.h
@@ -54,6 +54,8 @@ public:
LogOptions(LogTruncation truncation) : _truncation(truncation) {}
+ LogOptions(LogTag tags, LogTruncation truncation) : _tags(tags), _truncation(truncation) {}
+
LogOptions(LogComponent component, LogDomain* domain, LogTag tags)
: _domain(domain), _tags(tags), _component(component) {}
diff --git a/src/mongo/s/async_requests_sender.cpp b/src/mongo/s/async_requests_sender.cpp
index 133f9333183..de8004ad182 100644
--- a/src/mongo/s/async_requests_sender.cpp
+++ b/src/mongo/s/async_requests_sender.cpp
@@ -27,7 +27,7 @@
* it in the license file.
*/
-#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kQuery
+#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery
#include "mongo/platform/basic.h"
@@ -38,6 +38,7 @@
#include "mongo/client/remote_command_targeter.h"
#include "mongo/executor/remote_command_request.h"
+#include "mongo/logv2/log.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/grid.h"
@@ -45,7 +46,6 @@
#include "mongo/transport/baton.h"
#include "mongo/transport/transport_layer.h"
#include "mongo/util/assert_util.h"
-#include "mongo/util/log.h"
#include "mongo/util/scopeguard.h"
using namespace fmt::literals;
@@ -247,13 +247,13 @@ auto AsyncRequestsSender::RemoteData::handleResponse(RemoteCommandOnAnyCallbackA
if (!_ars->_stopRetrying && shard->isRetriableError(status.code(), _ars->_retryPolicy) &&
_retryCount < kMaxNumFailedHostRetryAttempts) {
- LOG(1) << "Command to remote " << _shardId
- << (failedTargets.empty()
- ? " "
- : (failedTargets.size() > 1 ? " for hosts " : " at host "))
- << "{}"_format(fmt::join(failedTargets, ", "))
- << "failed with retriable error and will be retried "
- << causedBy(redact(status));
+ LOGV2_DEBUG(4615637,
+ 1,
+ "Command to remote {shardId} for hosts {hosts} failed with retriable error "
+ "and will be retried. Caused by {causedBy}",
+ "shardId"_attr = _shardId,
+ "hosts"_attr = failedTargets,
+ "causedBy"_attr = redact(status));
++_retryCount;
_shardHostAndPort.reset();
diff --git a/src/mongo/s/balancer_configuration.cpp b/src/mongo/s/balancer_configuration.cpp
index ffce7776003..733b45c90e3 100644
--- a/src/mongo/s/balancer_configuration.cpp
+++ b/src/mongo/s/balancer_configuration.cpp
@@ -398,9 +398,14 @@ bool BalancerSettingsType::isTimeInBalancingWindow(const boost::posix_time::ptim
return true;
}
- LOG(1).stream() << "inBalancingWindow: "
- << " now: " << now << " startTime: " << *_activeWindowStart
- << " stopTime: " << *_activeWindowStop;
+ auto timeToString = [](const boost::posix_time::ptime& time) {
+ std::ostringstream ss;
+ ss << time;
+ return ss.str();
+ };
+ LOG(1) << "inBalancingWindow: "
+ << " now: " << timeToString(now) << " startTime: " << timeToString(*_activeWindowStart)
+ << " stopTime: " << timeToString(*_activeWindowStop);
if (*_activeWindowStop > *_activeWindowStart) {
if ((now >= *_activeWindowStart) && (now <= *_activeWindowStop)) {
diff --git a/src/mongo/s/service_entry_point_mongos.cpp b/src/mongo/s/service_entry_point_mongos.cpp
index 84fe595b841..18a7e391c27 100644
--- a/src/mongo/s/service_entry_point_mongos.cpp
+++ b/src/mongo/s/service_entry_point_mongos.cpp
@@ -96,7 +96,7 @@ DbResponse ServiceEntryPointMongos::handleRequest(OperationContext* opCtx, const
// Mark the op as complete, populate the response length, and log it if appropriate.
CurOp::get(opCtx)->completeAndLogOperation(
- opCtx, logger::LogComponent::kCommand, dbResponse.response.size());
+ opCtx, logv2::LogComponent::kCommand, dbResponse.response.size());
return dbResponse;
}
@@ -179,7 +179,7 @@ DbResponse ServiceEntryPointMongos::handleRequest(OperationContext* opCtx, const
// Mark the op as complete, populate the response length, and log it if appropriate.
CurOp::get(opCtx)->completeAndLogOperation(
- opCtx, logger::LogComponent::kCommand, dbResponse.response.size());
+ opCtx, logv2::LogComponent::kCommand, dbResponse.response.size());
return dbResponse;
}
diff --git a/src/mongo/s/transaction_router.cpp b/src/mongo/s/transaction_router.cpp
index 441e7727dbd..bf228420fd5 100644
--- a/src/mongo/s/transaction_router.cpp
+++ b/src/mongo/s/transaction_router.cpp
@@ -28,6 +28,7 @@
*/
#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kTransaction
+#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTransaction
#include "mongo/platform/basic.h"
@@ -1580,8 +1581,10 @@ void TransactionRouter::Router::_endTransactionTrackingIfNecessary(
const auto opDuration =
duration_cast<Milliseconds>(timingStats.getDuration(tickSource, curTicks));
- if (shouldLogSlowOpWithSampling(
- opCtx, MONGO_LOG_DEFAULT_COMPONENT, opDuration, Milliseconds(serverGlobalParams.slowMS))
+ if (shouldLogSlowOpWithSampling(opCtx,
+ MONGO_LOGV2_DEFAULT_COMPONENT,
+ opDuration,
+ Milliseconds(serverGlobalParams.slowMS))
.first) {
_logSlowTransaction(opCtx, terminationCause);
}
diff --git a/src/mongo/s/version_mongos.cpp b/src/mongo/s/version_mongos.cpp
index df35212494b..75f23049c67 100644
--- a/src/mongo/s/version_mongos.cpp
+++ b/src/mongo/s/version_mongos.cpp
@@ -36,6 +36,8 @@
#include "mongo/db/log_process_details.h"
#include "mongo/db/server_options.h"
#include "mongo/logv2/log.h"
+#include "mongo/logv2/log_domain_global.h"
+#include "mongo/logv2/log_manager.h"
#include "mongo/platform/process_id.h"
#include "mongo/util/debug_util.h"
#include "mongo/util/log.h"
@@ -47,7 +49,10 @@ void printShardingVersionInfo(bool isForVersionReportingOnly) {
auto&& vii = VersionInfoInterface::instance();
if (isForVersionReportingOnly) {
- setPlainConsoleLogger();
+ auto& globalDomain = logv2::LogManager::global().getGlobalDomainInternal();
+ logv2::LogDomainGlobal::ConfigurationOptions config = globalDomain.config();
+ config.format = logv2::LogFormat::kPlain;
+ invariant(globalDomain.configure(config).isOK());
LOGV2(22900, "{mongosVersion_vii}", "mongosVersion_vii"_attr = mongosVersion(vii));
vii.logBuildInfo();
} else {
diff --git a/src/mongo/scripting/mozjs/global.cpp b/src/mongo/scripting/mozjs/global.cpp
index 391b1117c86..6092c560966 100644
--- a/src/mongo/scripting/mozjs/global.cpp
+++ b/src/mongo/scripting/mozjs/global.cpp
@@ -27,6 +27,8 @@
* it in the license file.
*/
+#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault
+
#include "mongo/platform/basic.h"
#include "mongo/scripting/mozjs/global.h"
@@ -34,8 +36,7 @@
#include <js/Conversions.h>
#include "mongo/base/init.h"
-#include "mongo/logger/logger.h"
-#include "mongo/logger/logstream_builder.h"
+#include "mongo/logv2/log.h"
#include "mongo/scripting/engine.h"
#include "mongo/scripting/mozjs/implscope.h"
#include "mongo/scripting/mozjs/jsstringwrapper.h"
@@ -59,17 +60,8 @@ const JSFunctionSpec GlobalInfo::freeFunctions[7] = {
const char* const GlobalInfo::className = "Global";
-namespace {
-
-logger::MessageLogDomain* plainShellOutputDomain;
-
-} // namespace
-
void GlobalInfo::Functions::print::call(JSContext* cx, JS::CallArgs args) {
- logger::LogstreamBuilder builder(
- plainShellOutputDomain, getThreadName(), logger::LogSeverity::Log());
- builder.setIsTruncatable(false);
- std::ostream& ss = builder.stream();
+ std::ostringstream ss;
bool first = true;
for (size_t i = 0; i < args.length(); i++) {
@@ -87,9 +79,13 @@ void GlobalInfo::Functions::print::call(JSContext* cx, JS::CallArgs args) {
JSStringWrapper jsstr(cx, JS::ToString(cx, args.get(i)));
ss << jsstr.toStringData();
}
- ss << std::endl;
args.rval().setUndefined();
+
+ LOGV2_OPTIONS(4615635,
+ logv2::LogOptions(logv2::LogTag::kPlainShell, logv2::LogTruncation::Disabled),
+ "{message}",
+ "message"_attr = ss.str());
}
void GlobalInfo::Functions::version::call(JSContext* cx, JS::CallArgs args) {
@@ -127,10 +123,5 @@ void GlobalInfo::Functions::sleep::call(JSContext* cx, JS::CallArgs args) {
args.rval().setUndefined();
}
-MONGO_INITIALIZER(PlainShellOutputDomain)(InitializerContext*) {
- plainShellOutputDomain = logger::globalLogManager()->getNamedDomain("plainShellOutput");
- return Status::OK();
-}
-
} // namespace mozjs
} // namespace mongo
diff --git a/src/mongo/shell/dbshell.cpp b/src/mongo/shell/dbshell.cpp
index d96c030b86b..89616e477f5 100644
--- a/src/mongo/shell/dbshell.cpp
+++ b/src/mongo/shell/dbshell.cpp
@@ -279,7 +279,7 @@ void completionHook(const char* text, linenoiseCompletions* lc) {
void shellHistoryInit() {
Status res = linenoiseHistoryLoad(shell_utils::getHistoryFilePath().string().c_str());
if (!res.isOK()) {
- error() << "Error loading history file: " << res;
+ std::cout << "Error loading history file: " << res << std::endl;
}
linenoiseSetCompletionCallback(completionHook);
}
@@ -287,7 +287,7 @@ void shellHistoryInit() {
void shellHistoryDone() {
Status res = linenoiseHistorySave(shell_utils::getHistoryFilePath().string().c_str());
if (!res.isOK()) {
- error() << "Error saving history file: " << res;
+ std::cout << "Error saving history file: " << res << std::endl;
}
linenoiseHistoryFree();
}
@@ -922,7 +922,7 @@ int _main(int argc, char* argv[], char** envp) {
if (!shellGlobalParams.script.empty()) {
mongo::shell_utils::MongoProgramScope s;
if (!scope->exec(shellGlobalParams.script, "(shell eval)", false, true, false)) {
- error() << "exiting with code " << static_cast<int>(kEvalError);
+ std::cout << "exiting with code " << static_cast<int>(kEvalError) << std::endl;
return kEvalError;
}
scope->exec("shellPrintHelper( __lastres__ );", "(shell2 eval)", true, true, false);
@@ -935,8 +935,8 @@ int _main(int argc, char* argv[], char** envp) {
std::cout << "loading file: " << shellGlobalParams.files[i] << std::endl;
if (!scope->execFile(shellGlobalParams.files[i], false, true)) {
- severe() << "failed to load: " << shellGlobalParams.files[i];
- error() << "exiting with code " << static_cast<int>(kInputFileError);
+ std::cout << "failed to load: " << shellGlobalParams.files[i] << std::endl;
+ std::cout << "exiting with code " << static_cast<int>(kInputFileError) << std::endl;
return kInputFileError;
}
@@ -950,9 +950,10 @@ int _main(int argc, char* argv[], char** envp) {
std::cout << std::endl;
if (mongo::shell_utils::KillMongoProgramInstances() != EXIT_SUCCESS) {
- severe() << "one more more child processes exited with an error during "
- << shellGlobalParams.files[i];
- error() << "exiting with code " << static_cast<int>(kProcessTerminationError);
+ std::cout << "one more more child processes exited with an error during "
+ << shellGlobalParams.files[i] << std::endl;
+ std::cout << "exiting with code " << static_cast<int>(kProcessTerminationError)
+ << std::endl;
return kProcessTerminationError;
}
@@ -965,10 +966,12 @@ int _main(int argc, char* argv[], char** envp) {
failIfUnterminatedProcesses = shellMainScope->getBoolean("__returnValue");
if (failIfUnterminatedProcesses) {
- severe() << "exiting with a failure due to unterminated processes, "
- "a call to MongoRunner.stopMongod(), ReplSetTest#stopSet(), or "
- "ShardingTest#stop() may be missing from the test";
- error() << "exiting with code " << static_cast<int>(kUnterminatedProcess);
+ std::cout << "exiting with a failure due to unterminated processes, "
+ "a call to MongoRunner.stopMongod(), ReplSetTest#stopSet(), or "
+ "ShardingTest#stop() may be missing from the test"
+ << std::endl;
+ std::cout << "exiting with code " << static_cast<int>(kUnterminatedProcess)
+ << std::endl;
return kUnterminatedProcess;
}
}
@@ -996,9 +999,12 @@ int _main(int argc, char* argv[], char** envp) {
if (!rcLocation.empty() && ::mongo::shell_utils::fileExists(rcLocation)) {
hasMongoRC = true;
if (!scope->execFile(rcLocation, false, true)) {
- severe() << "The \".mongorc.js\" file located in your home folder could not be "
- "executed";
- error() << "exiting with code " << static_cast<int>(kMongorcError);
+ std::cout
+ << "The \".mongorc.js\" file located in your home folder could not be "
+ "executed"
+ << std::endl;
+ std::cout << "exiting with code " << static_cast<int>(kMongorcError)
+ << std::endl;
return kMongorcError;
}
}
@@ -1186,8 +1192,8 @@ int wmain(int argc, wchar_t* argvW[], wchar_t* envpW[]) {
WindowsCommandLine wcl(argc, argvW, envpW);
returnCode = _main(argc, wcl.argv(), wcl.envp());
} catch (mongo::DBException& e) {
- severe() << "exception: " << e.what();
- error() << "exiting with code " << static_cast<int>(kDBException);
+ std::cout << "exception: " << e.what() << std::endl;
+ std::cout << "exiting with code " << static_cast<int>(kDBException) << std::endl;
returnCode = kDBException;
}
quickExit(returnCode);
@@ -1198,8 +1204,8 @@ int main(int argc, char* argv[], char** envp) {
try {
returnCode = _main(argc, argv, envp);
} catch (mongo::DBException& e) {
- severe() << "exception: " << e.what();
- error() << "exiting with code " << static_cast<int>(kDBException);
+ std::cout << "exception: " << e.what() << std::endl;
+ std::cout << "exiting with code " << static_cast<int>(kDBException) << std::endl;
returnCode = kDBException;
}
quickExit(returnCode);
diff --git a/src/mongo/shell/shell_options.cpp b/src/mongo/shell/shell_options.cpp
index fed35a6d07b..ee89ed985d4 100644
--- a/src/mongo/shell/shell_options.cpp
+++ b/src/mongo/shell/shell_options.cpp
@@ -89,15 +89,9 @@ bool handlePreValidationMongoShellOptions(const moe::Environment& params,
const std::vector<std::string>& args) {
auto&& vii = VersionInfoInterface::instance();
if (params.count("version") || params.count("help")) {
- setPlainConsoleLogger();
- LOGV2(22808,
- "{mongoShellVersion_vii}",
- "mongoShellVersion_vii"_attr = mongoShellVersion(vii));
+ std::cout << mongoShellVersion(vii) << std::endl;
if (params.count("help")) {
- LOGV2(22809,
- "{getMongoShellHelp_args_0_moe_startupOptions}",
- "getMongoShellHelp_args_0_moe_startupOptions"_attr =
- getMongoShellHelp(args[0], moe::startupOptions));
+ std::cout << getMongoShellHelp(args[0], moe::startupOptions) << std::endl;
} else {
vii.logBuildInfo();
}
diff --git a/src/mongo/shell/shell_utils.cpp b/src/mongo/shell/shell_utils.cpp
index b2b593d1772..28ebda1afaa 100644
--- a/src/mongo/shell/shell_utils.cpp
+++ b/src/mongo/shell/shell_utils.cpp
@@ -582,24 +582,20 @@ void ConnectionRegistry::killOperationsOnAllConnections(bool withPrompt) const {
if (auto elem = op["client"]) {
// mongod currentOp client
if (elem.type() != String) {
- LOGV2_WARNING(23916,
- "Ignoring operation {op_opid_false}; expected 'client' field in "
- "currentOp response to have type "
- "string, but found {typeName_elem_type}",
- "op_opid_false"_attr = op["opid"].toString(false),
- "typeName_elem_type"_attr = typeName(elem.type()));
+ std::cout << "Ignoring operation " << op["opid"].toString(false)
+ << "; expected 'client' field in currentOp response to have type "
+ "string, but found "
+ << typeName(elem.type()) << std::endl;
continue;
}
client = elem.str();
} else if (auto elem = op["client_s"]) {
// mongos currentOp client
if (elem.type() != String) {
- LOGV2_WARNING(23917,
- "Ignoring operation {op_opid_false}; expected 'client_s' field "
- "in currentOp response to have type "
- "string, but found {typeName_elem_type}",
- "op_opid_false"_attr = op["opid"].toString(false),
- "typeName_elem_type"_attr = typeName(elem.type()));
+ std::cout << "Ignoring operation " << op["opid"].toString(false)
+ << "; expected 'client_s' field in currentOp response to have type "
+ "string, but found "
+ << typeName(elem.type()) << std::endl;
continue;
}
client = elem.str();
diff --git a/src/mongo/shell/shell_utils_launcher.cpp b/src/mongo/shell/shell_utils_launcher.cpp
index d1cdd3e36be..a1c76870e36 100644
--- a/src/mongo/shell/shell_utils_launcher.cpp
+++ b/src/mongo/shell/shell_utils_launcher.cpp
@@ -247,12 +247,11 @@ void ProgramOutputMultiplexer::appendLine(int port,
sink << name << pid << "| " << line << endl;
}
};
- auto plainShellOutputDomain = logger::globalLogManager()->getNamedDomain("plainShellOutput");
- logger::LogstreamBuilder builder(
- plainShellOutputDomain, getThreadName(), logger::LogSeverity::Log());
+ std::ostringstream ss;
sinkProgramOutput(_buffer);
- sinkProgramOutput(builder);
+ sinkProgramOutput(ss);
+ LOGV2_OPTIONS(4615640, {logv2::LogTag::kPlainShell}, "{message}", "message"_attr = ss.str());
}
string ProgramOutputMultiplexer::str() const {
diff --git a/src/mongo/transport/session_asio.h b/src/mongo/transport/session_asio.h
index 6efbd104b0a..bf65b37262e 100644
--- a/src/mongo/transport/session_asio.h
+++ b/src/mongo/transport/session_asio.h
@@ -387,7 +387,11 @@ private:
sb << "recv(): message msgLen " << msgLen << " is invalid. "
<< "Min " << kHeaderSize << " Max: " << MaxMessageSizeBytes;
const auto str = sb.str();
- LOGV2(23837, "{str}", "str"_attr = str);
+ LOGV2(4615638,
+ "recv(): message msgLen {msgLen} is invalid. Min: {min} Max: {max}",
+ "msgLen"_attr = msgLen,
+ "min"_attr = kHeaderSize,
+ "max"_attr = MaxMessageSizeBytes);
return Future<Message>::makeReady(Status(ErrorCodes::ProtocolError, str));
}
diff --git a/src/mongo/unittest/unittest.cpp b/src/mongo/unittest/unittest.cpp
index 816c97761f4..5bd0ce6aa61 100644
--- a/src/mongo/unittest/unittest.cpp
+++ b/src/mongo/unittest/unittest.cpp
@@ -83,12 +83,14 @@ auto& suitesMap() {
} // namespace
-logger::LogstreamBuilder log() {
- return LogstreamBuilder(unittestOutput(), getThreadName(), logger::LogSeverity::Log());
+logger::LogstreamBuilderDeprecated log() {
+ return LogstreamBuilderDeprecated(
+ unittestOutput(), getThreadName(), logger::LogSeverity::Log());
}
-logger::LogstreamBuilder warning() {
- return LogstreamBuilder(unittestOutput(), getThreadName(), logger::LogSeverity::Warning());
+logger::LogstreamBuilderDeprecated warning() {
+ return LogstreamBuilderDeprecated(
+ unittestOutput(), getThreadName(), logger::LogSeverity::Warning());
}
void setupTestLogger() {
diff --git a/src/mongo/unittest/unittest.h b/src/mongo/unittest/unittest.h
index 30ff41a6971..f3582d319a0 100644
--- a/src/mongo/unittest/unittest.h
+++ b/src/mongo/unittest/unittest.h
@@ -338,8 +338,8 @@ void setupTestLogger();
* Gets a LogstreamBuilder for logging to the unittest log domain, which may have
* different target from the global log domain.
*/
-logger::LogstreamBuilder log();
-logger::LogstreamBuilder warning();
+logger::LogstreamBuilderDeprecated log();
+logger::LogstreamBuilderDeprecated warning();
/**
* Representation of a collection of tests.
diff --git a/src/mongo/util/destructor_guard.h b/src/mongo/util/destructor_guard.h
index 6b69d3c856d..9dbf9f661e2 100644
--- a/src/mongo/util/destructor_guard.h
+++ b/src/mongo/util/destructor_guard.h
@@ -29,24 +29,24 @@
#pragma once
-#include "mongo/logger/log_severity.h"
-#include "mongo/logger/logger.h"
-#include "mongo/logger/logstream_builder.h"
+#include "mongo/logv2/log_detail.h"
#include "mongo/util/assert_util.h"
#define DESTRUCTOR_GUARD MONGO_DESTRUCTOR_GUARD
-#define MONGO_DESTRUCTOR_GUARD(expression) \
- try { \
- expression; \
- } catch (const std::exception& e) { \
- ::mongo::logger::LogstreamBuilder(::mongo::logger::globalLogDomain(), \
- ::mongo::getThreadName(), \
- ::mongo::logger::LogSeverity::Log()) \
- << "caught exception (" << e.what() << ") in destructor (" << __FUNCTION__ << ")" \
- << std::endl; \
- } catch (...) { \
- ::mongo::logger::LogstreamBuilder(::mongo::logger::globalLogDomain(), \
- ::mongo::getThreadName(), \
- ::mongo::logger::LogSeverity::Log()) \
- << "caught unknown exception in destructor (" << __FUNCTION__ << ")" << std::endl; \
+#define MONGO_DESTRUCTOR_GUARD(expression) \
+ try { \
+ expression; \
+ } catch (const std::exception& e) { \
+ logv2::detail::doLog(4615600, \
+ logv2::LogSeverity::Log(), \
+ {logv2::LogComponent::kDefault}, \
+ "caught exception in destructor", \
+ "exception"_attr = e.what(), \
+ "function"_attr = __FUNCTION__); \
+ } catch (...) { \
+ logv2::detail::doLog(4615601, \
+ logv2::LogSeverity::Log(), \
+ {logv2::LogComponent::kDefault}, \
+ "caught unknown exception in destructor", \
+ "function"_attr = __FUNCTION__); \
}
diff --git a/src/mongo/util/log.cpp b/src/mongo/util/log.cpp
index d20e5ab3d46..ee72ac577a7 100644
--- a/src/mongo/util/log.cpp
+++ b/src/mongo/util/log.cpp
@@ -123,13 +123,6 @@ void logContext(const char* errmsg) {
printStackTrace(log().setIsTruncatable(false).stream());
}
-void setPlainConsoleLogger() {
- logger::globalLogManager()->getGlobalDomain()->clearAppenders();
- logger::globalLogManager()->getGlobalDomain()->attachAppender(
- std::make_unique<logger::ConsoleAppender<logger::MessageEventEphemeral>>(
- std::make_unique<logger::MessageEventUnadornedEncoder>()));
-}
-
Tee* const startupWarningsLog = RamLog::get("startupWarnings"); // intentionally leaked
} // namespace mongo
diff --git a/src/mongo/util/log.h b/src/mongo/util/log.h
index cfe42dd8695..d31fa1eefa8 100644
--- a/src/mongo/util/log.h
+++ b/src/mongo/util/log.h
@@ -88,62 +88,62 @@ const ::mongo::logger::LogComponent MongoLogDefaultComponent_component =
"Please see http://www.mongodb.org/about/contributors/reference/server-logging-rules/ "
#endif // MONGO_LOG_DEFAULT_COMPONENT
-using logger::LogstreamBuilder;
+using logger::LogstreamBuilderDeprecated;
using logger::Tee;
/**
* Returns a LogstreamBuilder for logging a message with LogSeverity::Severe().
*/
-inline LogstreamBuilder severe() {
- return LogstreamBuilder(logger::globalLogDomain(),
- getThreadName(),
- logger::LogSeverity::Severe(),
- MongoLogDefaultComponent_component);
+inline LogstreamBuilderDeprecated severe() {
+ return LogstreamBuilderDeprecated(logger::globalLogDomain(),
+ getThreadName(),
+ logger::LogSeverity::Severe(),
+ MongoLogDefaultComponent_component);
}
-inline LogstreamBuilder severe(logger::LogComponent component) {
- return LogstreamBuilder(
+inline LogstreamBuilderDeprecated severe(logger::LogComponent component) {
+ return LogstreamBuilderDeprecated(
logger::globalLogDomain(), getThreadName(), logger::LogSeverity::Severe(), component);
}
/**
* Returns a LogstreamBuilder for logging a message with LogSeverity::Error().
*/
-inline LogstreamBuilder error() {
- return LogstreamBuilder(logger::globalLogDomain(),
- getThreadName(),
- logger::LogSeverity::Error(),
- MongoLogDefaultComponent_component);
+inline LogstreamBuilderDeprecated error() {
+ return LogstreamBuilderDeprecated(logger::globalLogDomain(),
+ getThreadName(),
+ logger::LogSeverity::Error(),
+ MongoLogDefaultComponent_component);
}
-inline LogstreamBuilder error(logger::LogComponent component) {
- return LogstreamBuilder(
+inline LogstreamBuilderDeprecated error(logger::LogComponent component) {
+ return LogstreamBuilderDeprecated(
logger::globalLogDomain(), getThreadName(), logger::LogSeverity::Error(), component);
}
/**
* Returns a LogstreamBuilder for logging a message with LogSeverity::Warning().
*/
-inline LogstreamBuilder warning() {
- return LogstreamBuilder(logger::globalLogDomain(),
- getThreadName(),
- logger::LogSeverity::Warning(),
- MongoLogDefaultComponent_component);
+inline LogstreamBuilderDeprecated warning() {
+ return LogstreamBuilderDeprecated(logger::globalLogDomain(),
+ getThreadName(),
+ logger::LogSeverity::Warning(),
+ MongoLogDefaultComponent_component);
}
-inline LogstreamBuilder warning(logger::LogComponent component) {
- return LogstreamBuilder(
+inline LogstreamBuilderDeprecated warning(logger::LogComponent component) {
+ return LogstreamBuilderDeprecated(
logger::globalLogDomain(), getThreadName(), logger::LogSeverity::Warning(), component);
}
/**
* Returns a LogstreamBuilder for logging a message with LogSeverity::Log().
*/
-inline LogstreamBuilder log() {
- return LogstreamBuilder(logger::globalLogDomain(),
- getThreadName(),
- logger::LogSeverity::Log(),
- MongoLogDefaultComponent_component);
+inline LogstreamBuilderDeprecated log() {
+ return LogstreamBuilderDeprecated(logger::globalLogDomain(),
+ getThreadName(),
+ logger::LogSeverity::Log(),
+ MongoLogDefaultComponent_component);
}
/**
@@ -154,21 +154,21 @@ inline LogstreamBuilder log() {
*
* Once SERVER-29377 is completed, this overload can be removed.
*/
-inline LogstreamBuilder logNoCache() {
- return LogstreamBuilder(logger::globalLogDomain(),
- getThreadName(),
- logger::LogSeverity::Log(),
- MongoLogDefaultComponent_component,
- false);
+inline LogstreamBuilderDeprecated logNoCache() {
+ return LogstreamBuilderDeprecated(logger::globalLogDomain(),
+ getThreadName(),
+ logger::LogSeverity::Log(),
+ MongoLogDefaultComponent_component,
+ false);
}
-inline LogstreamBuilder log(logger::LogComponent component) {
- return LogstreamBuilder(
+inline LogstreamBuilderDeprecated log(logger::LogComponent component) {
+ return LogstreamBuilderDeprecated(
logger::globalLogDomain(), getThreadName(), logger::LogSeverity::Log(), component);
}
-inline LogstreamBuilder log(logger::LogComponent::Value componentValue) {
- return LogstreamBuilder(
+inline LogstreamBuilderDeprecated log(logger::LogComponent::Value componentValue) {
+ return LogstreamBuilderDeprecated(
logger::globalLogDomain(), getThreadName(), logger::LogSeverity::Log(), componentValue);
}
@@ -183,24 +183,27 @@ inline bool shouldLog(logger::LogSeverity severity) {
}
// MONGO_LOG uses log component from MongoLogDefaultComponent from current or global namespace.
-#define MONGO_LOG(DLEVEL) \
- if (!::mongo::shouldLog(MongoLogDefaultComponent_component, \
- ::mongo::LogstreamBuilder::severityCast(DLEVEL))) { \
- } else \
- ::mongo::logger::LogstreamBuilder(::mongo::logger::globalLogDomain(), \
- ::mongo::getThreadName(), \
- ::mongo::LogstreamBuilder::severityCast(DLEVEL), \
- MongoLogDefaultComponent_component)
+#define MONGO_LOG(DLEVEL) \
+ if (!::mongo::shouldLog(MongoLogDefaultComponent_component, \
+ ::mongo::LogstreamBuilderDeprecated::severityCast(DLEVEL))) { \
+ } else \
+ ::mongo::logger::LogstreamBuilderDeprecated( \
+ ::mongo::logger::globalLogDomain(), \
+ ::mongo::getThreadName(), \
+ ::mongo::LogstreamBuilderDeprecated::severityCast(DLEVEL), \
+ MongoLogDefaultComponent_component)
#define LOG MONGO_LOG
-#define MONGO_LOG_COMPONENT(DLEVEL, COMPONENT1) \
- if (!::mongo::shouldLog((COMPONENT1), ::mongo::LogstreamBuilder::severityCast(DLEVEL))) { \
- } else \
- ::mongo::logger::LogstreamBuilder(::mongo::logger::globalLogDomain(), \
- ::mongo::getThreadName(), \
- ::mongo::LogstreamBuilder::severityCast(DLEVEL), \
- (COMPONENT1))
+#define MONGO_LOG_COMPONENT(DLEVEL, COMPONENT1) \
+ if (!::mongo::shouldLog((COMPONENT1), \
+ ::mongo::LogstreamBuilderDeprecated::severityCast(DLEVEL))) { \
+ } else \
+ ::mongo::logger::LogstreamBuilderDeprecated( \
+ ::mongo::logger::globalLogDomain(), \
+ ::mongo::getThreadName(), \
+ ::mongo::LogstreamBuilderDeprecated::severityCast(DLEVEL), \
+ (COMPONENT1))
/**
@@ -224,11 +227,6 @@ extern Tee* const startupWarningsLog; // Things put here get reported in MMS
*/
void logContext(const char* msg = nullptr);
-/**
- * Turns the global log manager into a plain console logger (no adornments).
- */
-void setPlainConsoleLogger();
-
} // namespace mongo
#endif // MONGO_UTIL_LOG_H_
diff --git a/src/mongo/util/log_with_sampling.h b/src/mongo/util/log_with_sampling.h
index 129bb0c3252..8a17ccd7dcd 100644
--- a/src/mongo/util/log_with_sampling.h
+++ b/src/mongo/util/log_with_sampling.h
@@ -42,12 +42,12 @@ namespace mongo {
* is if we should sample this operation for profiling.
*/
inline std::pair<bool, bool> shouldLogSlowOpWithSampling(OperationContext* opCtx,
- logger::LogComponent logComponent,
+ logv2::LogComponent logComponent,
Milliseconds opDuration,
Milliseconds slowMS) {
// Log the operation if log message verbosity for operation component is >= 1.
const bool componentHasTargetLogVerbosity =
- shouldLog(logComponent, logger::LogSeverity::Debug(1));
+ shouldLog(logComponentV2toV1(logComponent), logger::LogSeverity::Debug(1));
const auto client = opCtx->getClient();
const bool shouldSample =
diff --git a/src/mongo/util/log_with_sampling_test.cpp b/src/mongo/util/log_with_sampling_test.cpp
index f0dc97be046..b47d37f87a4 100644
--- a/src/mongo/util/log_with_sampling_test.cpp
+++ b/src/mongo/util/log_with_sampling_test.cpp
@@ -60,8 +60,11 @@ TEST(LogWithSamplingTest, ShouldLogCorrectlyWhenSampleRateIsSet) {
setMinimumLoggedSeverity(MONGO_LOG_DEFAULT_COMPONENT, logger::LogSeverity::Info());
bool shouldLogSlowOp, shouldSample;
- std::tie(shouldLogSlowOp, shouldSample) = shouldLogSlowOpWithSampling(
- opCtx.get(), MONGO_LOG_DEFAULT_COMPONENT, slowOpDurationMS, slowOpThresholdMS);
+ std::tie(shouldLogSlowOp, shouldSample) =
+ shouldLogSlowOpWithSampling(opCtx.get(),
+ logComponentV1toV2(MONGO_LOG_DEFAULT_COMPONENT),
+ slowOpDurationMS,
+ slowOpThresholdMS);
// Verify that shouldLogSlowOp is true when the sampleRate is 1.
ASSERT_TRUE(shouldLogSlowOp);
@@ -72,8 +75,11 @@ TEST(LogWithSamplingTest, ShouldLogCorrectlyWhenSampleRateIsSet) {
// Set sample rate to never profile a slow operation.
serverGlobalParams.sampleRate = 0;
- std::tie(shouldLogSlowOp, shouldSample) = shouldLogSlowOpWithSampling(
- opCtx.get(), MONGO_LOG_DEFAULT_COMPONENT, slowOpDurationMS, slowOpThresholdMS);
+ std::tie(shouldLogSlowOp, shouldSample) =
+ shouldLogSlowOpWithSampling(opCtx.get(),
+ logComponentV1toV2(MONGO_LOG_DEFAULT_COMPONENT),
+ slowOpDurationMS,
+ slowOpThresholdMS);
// Verify that shouldLogSlowOp is false when the sampleRate is 0.
ASSERT_FALSE(shouldLogSlowOp);
@@ -102,8 +108,11 @@ TEST(LogWithSamplingTest, ShouldAlwaysLogsWithVerbosityLevelDebug) {
setMinimumLoggedSeverity(MONGO_LOG_DEFAULT_COMPONENT, logger::LogSeverity::Debug(1));
bool shouldLogSlowOp, shouldSample;
- std::tie(shouldLogSlowOp, shouldSample) = shouldLogSlowOpWithSampling(
- opCtx.get(), MONGO_LOG_DEFAULT_COMPONENT, slowOpDurationMS, slowOpThresholdMS);
+ std::tie(shouldLogSlowOp, shouldSample) =
+ shouldLogSlowOpWithSampling(opCtx.get(),
+ logComponentV1toV2(MONGO_LOG_DEFAULT_COMPONENT),
+ slowOpDurationMS,
+ slowOpThresholdMS);
// Verify that shouldLogSlowOp is true when the op is slow.
ASSERT_TRUE(shouldLogSlowOp);
@@ -114,8 +123,11 @@ TEST(LogWithSamplingTest, ShouldAlwaysLogsWithVerbosityLevelDebug) {
// Set sample rate to never profile a slow operation.
serverGlobalParams.sampleRate = 0;
- std::tie(shouldLogSlowOp, shouldSample) = shouldLogSlowOpWithSampling(
- opCtx.get(), MONGO_LOG_DEFAULT_COMPONENT, slowOpDurationMS, slowOpThresholdMS);
+ std::tie(shouldLogSlowOp, shouldSample) =
+ shouldLogSlowOpWithSampling(opCtx.get(),
+ logComponentV1toV2(MONGO_LOG_DEFAULT_COMPONENT),
+ slowOpDurationMS,
+ slowOpThresholdMS);
// Verify that we should still log even when the sampleRate is 0.
ASSERT_TRUE(shouldLogSlowOp);
@@ -145,8 +157,11 @@ TEST(LogWithSamplingTest, ShouldNotLogFastOp) {
setMinimumLoggedSeverity(MONGO_LOG_DEFAULT_COMPONENT, logger::LogSeverity::Info());
bool shouldLogSlowOp, shouldSample;
- std::tie(shouldLogSlowOp, shouldSample) = shouldLogSlowOpWithSampling(
- opCtx.get(), MONGO_LOG_DEFAULT_COMPONENT, fastOpDurationMS, slowOpThresholdMS);
+ std::tie(shouldLogSlowOp, shouldSample) =
+ shouldLogSlowOpWithSampling(opCtx.get(),
+ logComponentV1toV2(MONGO_LOG_DEFAULT_COMPONENT),
+ fastOpDurationMS,
+ slowOpThresholdMS);
// Verify that shouldLogSlowOp is false when the op is fast.
ASSERT_FALSE(shouldLogSlowOp);
@@ -157,8 +172,11 @@ TEST(LogWithSamplingTest, ShouldNotLogFastOp) {
// Set sample rate to never profile a slow operation.
serverGlobalParams.sampleRate = 0;
- std::tie(shouldLogSlowOp, shouldSample) = shouldLogSlowOpWithSampling(
- opCtx.get(), MONGO_LOG_DEFAULT_COMPONENT, fastOpDurationMS, slowOpThresholdMS);
+ std::tie(shouldLogSlowOp, shouldSample) =
+ shouldLogSlowOpWithSampling(opCtx.get(),
+ logComponentV1toV2(MONGO_LOG_DEFAULT_COMPONENT),
+ fastOpDurationMS,
+ slowOpThresholdMS);
// Verify that we should still not log when the sampleRate is 0.
ASSERT_FALSE(shouldLogSlowOp);
diff --git a/src/third_party/s2/base/logging_mongo.cc b/src/third_party/s2/base/logging_mongo.cc
index c9f73754dde..863d7e5b58d 100644
--- a/src/third_party/s2/base/logging_mongo.cc
+++ b/src/third_party/s2/base/logging_mongo.cc
@@ -58,17 +58,17 @@ public:
std::ostream& stream() override { return _lsb.stream(); }
private:
int _v;
- ml::LogstreamBuilder _lsb;
+ ml::LogstreamBuilderDeprecated _lsb;
};
class SeverityLogSink : public s2_env::LogMessageSink {
public:
// Fatal message will deconstruct it before abort to flush final message.
- explicit SeverityLogSink(s2_env::LogMessage::Severity severity, ml::LogstreamBuilder builder)
+ explicit SeverityLogSink(s2_env::LogMessage::Severity severity, ml::LogstreamBuilderDeprecated builder)
: _severity(severity),
_lsb(std::move(builder)) {}
- SeverityLogSink(s2_env::LogMessage::Severity severity, ml::LogstreamBuilder builder,
+ SeverityLogSink(s2_env::LogMessage::Severity severity, ml::LogstreamBuilderDeprecated builder,
const char* file, int line)
: _severity(severity),
_lsb(std::move(builder)) {
@@ -86,7 +86,7 @@ public:
std::ostream& stream() override { return _lsb->stream(); }
private:
s2_env::LogMessage::Severity _severity;
- boost::optional<ml::LogstreamBuilder> _lsb;
+ boost::optional<ml::LogstreamBuilderDeprecated> _lsb;
};
template <typename...A>