summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorHaley Connelly <haley.connelly@mongodb.com>2020-04-27 17:05:22 -0400
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-05-14 22:20:30 +0000
commita1a0f08128baa0c625c45d4e16122a4f0badd400 (patch)
treec9cdefd00377c58a4a2b2a3ff7b2b9f0686c1b2a /src
parentde6f6e847295d5e4cb86934662b64c8584c65325 (diff)
downloadmongo-a1a0f08128baa0c625c45d4e16122a4f0badd400.tar.gz
SERVER-47074 Clean up log lines in mongo/client/sdam and RSM-related files
(cherry picked from commit 694b92620fe581f57ed78ca9c4c59ece4d5b9a74)
Diffstat (limited to 'src')
-rw-r--r--src/mongo/client/remote_command_retry_scheduler_test.cpp4
-rw-r--r--src/mongo/client/remote_command_targeter_rs.cpp15
-rw-r--r--src/mongo/client/replica_set_change_notifier.cpp14
-rw-r--r--src/mongo/client/replica_set_monitor_manager.cpp1
-rw-r--r--src/mongo/client/scanning_replica_set_monitor.cpp150
-rw-r--r--src/mongo/client/scanning_replica_set_monitor_scan_test.cpp10
-rw-r--r--src/mongo/client/scanning_replica_set_monitor_test_concurrent.cpp14
-rw-r--r--src/mongo/client/sdam/json_test_arg_parser.cpp8
-rw-r--r--src/mongo/client/sdam/sdam_json_test_runner.cpp65
-rw-r--r--src/mongo/client/sdam/server_description.cpp3
-rw-r--r--src/mongo/client/sdam/server_selection_json_test_runner.cpp44
-rw-r--r--src/mongo/client/sdam/topology_description.cpp5
-rw-r--r--src/mongo/client/sdam/topology_manager.cpp5
-rw-r--r--src/mongo/client/sdam/topology_state_machine.cpp48
-rw-r--r--src/mongo/client/sdam/topology_state_machine.h2
-rw-r--r--src/mongo/client/server_is_master_monitor.cpp59
-rw-r--r--src/mongo/client/server_is_master_monitor_test.cpp22
-rw-r--r--src/mongo/client/server_ping_monitor.cpp41
-rw-r--r--src/mongo/client/server_ping_monitor_test.cpp10
-rw-r--r--src/mongo/client/streamable_replica_set_monitor.cpp34
-rw-r--r--src/mongo/client/streamable_replica_set_monitor_query_processor.cpp3
21 files changed, 273 insertions, 284 deletions
diff --git a/src/mongo/client/remote_command_retry_scheduler_test.cpp b/src/mongo/client/remote_command_retry_scheduler_test.cpp
index 611fb00a4f6..e6fddddb96e 100644
--- a/src/mongo/client/remote_command_retry_scheduler_test.cpp
+++ b/src/mongo/client/remote_command_retry_scheduler_test.cpp
@@ -558,9 +558,7 @@ TEST_F(RemoteCommandRetrySchedulerTest,
request,
[&result,
sharedCallbackData](const executor::TaskExecutor::RemoteCommandCallbackArgs& rcba) {
- LOGV2(20156,
- "setting result to {rcba_response_status}",
- "rcba_response_status"_attr = rcba.response.status);
+ LOGV2(20156, "Setting result", "result"_attr = rcba.response.status);
result = rcba.response.status;
},
std::move(policy));
diff --git a/src/mongo/client/remote_command_targeter_rs.cpp b/src/mongo/client/remote_command_targeter_rs.cpp
index 504c7493b9a..7c0cb78b485 100644
--- a/src/mongo/client/remote_command_targeter_rs.cpp
+++ b/src/mongo/client/remote_command_targeter_rs.cpp
@@ -52,15 +52,12 @@ RemoteCommandTargeterRS::RemoteCommandTargeterRS(const std::string& rsName,
std::set<HostAndPort> seedServers(seedHosts.begin(), seedHosts.end());
_rsMonitor = ReplicaSetMonitor::createIfNeeded(rsName, seedServers);
- LOGV2_DEBUG(
- 20157,
- 1,
- "Started targeter for "
- "{ConnectionString_forReplicaSet_rsName_std_vector_HostAndPort_seedServers_begin_"
- "seedServers_end}",
- "ConnectionString_forReplicaSet_rsName_std_vector_HostAndPort_seedServers_begin_seedServers_end"_attr =
- ConnectionString::forReplicaSet(
- rsName, std::vector<HostAndPort>(seedServers.begin(), seedServers.end())));
+ LOGV2_DEBUG(20157,
+ 1,
+ "Started targeter for {connectionString}",
+ "Started targeter",
+ "connectionString"_attr = ConnectionString::forReplicaSet(
+ rsName, std::vector<HostAndPort>(seedServers.begin(), seedServers.end())));
}
ConnectionString RemoteCommandTargeterRS::connectionString() {
diff --git a/src/mongo/client/replica_set_change_notifier.cpp b/src/mongo/client/replica_set_change_notifier.cpp
index 71b03961b86..25269cc8148 100644
--- a/src/mongo/client/replica_set_change_notifier.cpp
+++ b/src/mongo/client/replica_set_change_notifier.cpp
@@ -47,7 +47,11 @@ void ReplicaSetChangeNotifier::_addListener(std::shared_ptr<Listener> listener)
}
void ReplicaSetChangeNotifier::onFoundSet(const std::string& name) noexcept {
- LOGV2_DEBUG(20158, 2, "Signaling found set {name}", "name"_attr = name);
+ LOGV2_DEBUG(20158,
+ 2,
+ "Signaling found set {replicaSet}",
+ "Signaling found set",
+ "replicaSet"_attr = name);
stdx::unique_lock<Latch> lk(_mutex);
@@ -67,6 +71,7 @@ void ReplicaSetChangeNotifier::onPossibleSet(ConnectionString connectionString)
LOGV2_DEBUG(20159,
2,
"Signaling possible set {connectionString}",
+ "Signaling possible set",
"connectionString"_attr = connectionString);
const auto& name = connectionString.getSetName();
@@ -99,6 +104,7 @@ void ReplicaSetChangeNotifier::onConfirmedSet(ConnectionString connectionString,
LOGV2_DEBUG(20160,
2,
"Signaling confirmed set {connectionString} with primary {primary}",
+ "Signaling confirmed set with primary",
"connectionString"_attr = connectionString,
"primary"_attr = primary);
@@ -127,7 +133,11 @@ void ReplicaSetChangeNotifier::onConfirmedSet(ConnectionString connectionString,
}
void ReplicaSetChangeNotifier::onDroppedSet(const std::string& name) noexcept {
- LOGV2_DEBUG(20161, 2, "Signaling dropped set {name}", "name"_attr = name);
+ LOGV2_DEBUG(20161,
+ 2,
+ "Signaling dropped set {replicaSet}",
+ "Signaling dropped set",
+ "replicaSet"_attr = name);
stdx::unique_lock<Latch> lk(_mutex);
diff --git a/src/mongo/client/replica_set_monitor_manager.cpp b/src/mongo/client/replica_set_monitor_manager.cpp
index 4d382ef15a4..c9ca681a2da 100644
--- a/src/mongo/client/replica_set_monitor_manager.cpp
+++ b/src/mongo/client/replica_set_monitor_manager.cpp
@@ -236,6 +236,7 @@ void ReplicaSetMonitorManager::removeMonitor(StringData setName) {
_monitors.erase(it);
LOGV2(20187,
"Removed ReplicaSetMonitor for replica set {replicaSet}",
+ "Removed ReplicaSetMonitor for replica set",
"replicaSet"_attr = setName);
}
}
diff --git a/src/mongo/client/scanning_replica_set_monitor.cpp b/src/mongo/client/scanning_replica_set_monitor.cpp
index a34fbf5d483..62d6def67a8 100644
--- a/src/mongo/client/scanning_replica_set_monitor.cpp
+++ b/src/mongo/client/scanning_replica_set_monitor.cpp
@@ -232,8 +232,9 @@ void ScanningReplicaSetMonitor::SetState::rescheduleRefresh(SchedulingStrategy s
if (isDropped) { // already removed so no need to refresh
LOGV2_DEBUG(24070,
1,
- "Stopping refresh for replica set {name} because it's removed",
- "name"_attr = name);
+ "Stopping refresh for replica set {replicaSet} because it's removed",
+ "Stopping refresh for replica set because it's removed",
+ "replicaSet"_attr = name);
return;
}
@@ -261,6 +262,7 @@ void ScanningReplicaSetMonitor::SetState::rescheduleRefresh(SchedulingStrategy s
LOGV2_DEBUG(24071,
1,
"Next replica set scan scheduled for {nextScanTime}",
+ "Next replica set scan scheduled",
"nextScanTime"_attr = nextScanTime);
auto swHandle = scheduleWorkAt(nextScanTime, [this](const CallbackArgs& cbArgs) {
if (cbArgs.myHandle != refresherHandle)
@@ -279,16 +281,18 @@ void ScanningReplicaSetMonitor::SetState::rescheduleRefresh(SchedulingStrategy s
if (ErrorCodes::isShutdownError(swHandle.getStatus().code())) {
LOGV2_DEBUG(24072,
1,
- "Cant schedule refresh for {name}. Executor shutdown in progress",
- "name"_attr = name);
+ "Can't schedule refresh for {replicaSet}. Executor shutdown in progress",
+ "Can't schedule refresh for replica set. Executor shutdown in progress",
+ "replicaSet"_attr = name);
return;
}
if (!swHandle.isOK()) {
LOGV2_FATAL(40140,
- "Can't continue refresh for replica set {name} due to {swHandle_getStatus}",
- "name"_attr = name,
- "swHandle_getStatus"_attr = redact(swHandle.getStatus()));
+ "Can't continue refresh for replica set {replicaSet}: {error}",
+ "Can't continue refresh for replica set",
+ "error"_attr = redact(swHandle.getStatus()),
+ "replicaSet"_attr = name);
}
refresherHandle = std::move(swHandle.getValue());
@@ -531,11 +535,11 @@ void Refresher::scheduleNetworkRequests() {
}
if (!swHandle.isOK()) {
- LOGV2_FATAL(
- 31176,
- "Can't continue scan for replica set {set_name} due to {swHandle_getStatus}",
- "set_name"_attr = _set->name,
- "swHandle_getStatus"_attr = redact(swHandle.getStatus()));
+ LOGV2_FATAL(31176,
+ "Can't continue scan for replica set {replicaSet}: {error}",
+ "Can't continue scan for replica set",
+ "error"_attr = redact(swHandle.getStatus()),
+ "replicaSet"_attr = _set->name);
}
node->scheduledIsMasterHandle = uassertStatusOK(std::move(swHandle));
@@ -635,8 +639,10 @@ Refresher::NextStep Refresher::getNextStep() {
if (_scan->hostsToScan.empty()) {
// We've tried all hosts we can, so nothing more to do in this round.
if (!_scan->foundUpMaster) {
- LOGV2_WARNING(
- 24089, "Unable to reach primary for set {set_name}", "set_name"_attr = _set->name);
+ LOGV2_WARNING(24089,
+ "Unable to reach primary for replica set {replicaSet}",
+ "Unable to reach primary for replica set",
+ "replicaSet"_attr = _set->name);
// Since we've talked to everyone we could but still didn't find a primary, we
// do the best we can, and assume all unconfirmedReplies are actually from nodes
@@ -670,11 +676,13 @@ Refresher::NextStep Refresher::getNextStep() {
auto nScans = _set->consecutiveFailedScans++;
if (nScans <= 10 || nScans % 10 == 0) {
LOGV2(24073,
- "Cannot reach any nodes for set {set_name}. Please check network "
+ "Cannot reach any nodes for replica set {replicaSet}. Please check network "
"connectivity and the status of the set. This has happened for "
- "{set_consecutiveFailedScans} checks in a row.",
- "set_name"_attr = _set->name,
- "set_consecutiveFailedScans"_attr = _set->consecutiveFailedScans);
+ "{numConsecutiveFailedScans} checks in a row.",
+ "Cannot reach any nodes for replica set. Please check network connectivity "
+ "and the status of the set. A number of consecutive scan checks have failed",
+ "replicaSet"_attr = _set->name,
+ "numConsecutiveFailedScans"_attr = _set->consecutiveFailedScans);
}
}
@@ -684,9 +692,10 @@ Refresher::NextStep Refresher::getNextStep() {
LOGV2_DEBUG(24074,
1,
- "Refreshing replica set {set_name} took {scan_timer_millis}ms",
- "set_name"_attr = _set->name,
- "scan_timer_millis"_attr = _scan->timer.millis());
+ "Refreshing replica set {replicaSet} took {duration}",
+ "Replica set refreshed",
+ "duration"_attr = Milliseconds(_scan->timer.millis()),
+ "replicaSet"_attr = _set->name);
return NextStep(NextStep::DONE);
}
@@ -724,13 +733,16 @@ void Refresher::receivedIsMaster(const HostAndPort& from,
_scan->possibleNodes.insert(reply.members.begin(), reply.members.end());
}
} else {
- LOGV2_ERROR(24091,
- "replset name mismatch: expected \"{set_name}\", but remote node {from} "
- "has replset name \"{reply_setName}\", ismaster: {replyObj}",
- "set_name"_attr = _set->name,
- "from"_attr = from,
- "reply_setName"_attr = reply.setName,
- "replyObj"_attr = replyObj);
+ LOGV2_ERROR(
+ 24091,
+ "replset name mismatch: expected {expectedSetName}, but remote node "
+ "{fromRemoteNode} has replset name {receivedSetName}, isMaster: {isMasterReply}",
+ "replset name mismatch. The expected set name does not match the set name "
+ "received from the remote node",
+ "expectedSetName"_attr = _set->name,
+ "fromRemoteNode"_attr = from,
+ "receivedSetName"_attr = reply.setName,
+ "isMasterReply"_attr = replyObj);
}
failedHost(from,
@@ -859,10 +871,11 @@ Status Refresher::receivedIsMasterFromMaster(const HostAndPort& from, const IsMa
!std::equal(_set->nodes.begin(), _set->nodes.end(), reply.members.begin(), hostsEqual)) {
LOGV2_DEBUG(24075,
2,
- "Adjusting nodes in our view of replica set {set_name} based on master reply: "
- "{reply_raw}",
- "set_name"_attr = _set->name,
- "reply_raw"_attr = redact(reply.raw));
+ "Adjusting nodes in our view of replica set {replicaSet} based on isMaster "
+ "reply: {isMasterReply}",
+ "Adjusting nodes in our view of the replica set based on isMaster reply",
+ "replicaSet"_attr = _set->name,
+ "isMasterReply"_attr = redact(reply.raw));
// remove non-members from _set->nodes
_set->nodes.erase(
@@ -901,9 +914,10 @@ Status Refresher::receivedIsMasterFromMaster(const HostAndPort& from, const IsMa
// LogLevel can be pretty low, since replica set reconfiguration should be pretty rare
// and we want to record our changes
LOGV2(24076,
- "Confirmed replica set for {set_name} is {set_seedConnStr}",
- "set_name"_attr = _set->name,
- "set_seedConnStr"_attr = _set->seedConnStr);
+ "Confirmed replica set for {replicaSet} is {connectionString}",
+ "Confirmed replica set",
+ "replicaSet"_attr = _set->name,
+ "connectionString"_attr = _set->seedConnStr);
_set->notifier->onConfirmedSet(_set->seedConnStr, reply.host, reply.passives);
}
@@ -975,9 +989,10 @@ void IsMasterReply::parse(const BSONObj& obj) {
} catch (const std::exception& e) {
ok = false;
LOGV2(24077,
- "exception while parsing isMaster reply: {e_what} {obj}",
- "e_what"_attr = e.what(),
- "obj"_attr = obj);
+ "Exception while parsing isMaster reply: {error} {isMasterReply}",
+ "Exception while parsing isMaster reply",
+ "error"_attr = e.what(),
+ "isMasterReply"_attr = obj);
}
}
@@ -986,9 +1001,10 @@ Node::Node(const HostAndPort& host) : host(host), latencyMicros(unknownLatency)
void Node::markFailed(const Status& status) {
if (isUp) {
LOGV2(24078,
- "Marking host {host} as failed{causedBy_status}",
+ "Marking host {host} as failed: {error}",
+ "Marking host as failed",
"host"_attr = host,
- "causedBy_status"_attr = causedBy(redact(status)));
+ "error"_attr = redact(status));
isUp = false;
}
@@ -998,15 +1014,16 @@ void Node::markFailed(const Status& status) {
bool Node::matches(const ReadPreference pref) const {
if (!isUp) {
- LOGV2_DEBUG(24079, 3, "Host {host} is not up", "host"_attr = host);
+ LOGV2_DEBUG(24079, 3, "Host {host} is not up", "Host is not up", "host"_attr = host);
return false;
}
LOGV2_DEBUG(24080,
3,
- "Host {host} is {isMaster_primary_not_primary}",
+ "Host {host} is primary? {isPrimary}",
+ "Host is primary?",
"host"_attr = host,
- "isMaster_primary_not_primary"_attr = (isMaster ? "primary" : "not primary"));
+ "isPrimary"_attr = isMaster);
if (pref == ReadPreference::PrimaryOnly) {
return isMaster;
}
@@ -1035,9 +1052,10 @@ void Node::update(const IsMasterReply& reply) {
LOGV2_DEBUG(24081,
3,
- "Updating host {host} based on ismaster reply: {reply_raw}",
+ "Updating host {host} based on isMaster reply: {isMasterReply}",
+ "Updating host based on isMaster reply",
"host"_attr = host,
- "reply_raw"_attr = reply.raw);
+ "isMasterReply"_attr = reply.raw);
// Nodes that are hidden or neither master or secondary are considered down since we can't
// send any operations to them.
@@ -1062,16 +1080,18 @@ void Node::update(const IsMasterReply& reply) {
LOGV2_DEBUG(24082,
3,
- "Updating {host} lastWriteDate to {reply_lastWriteDate}",
+ "Updating {host} lastWriteDate to {lastWriteDate}",
+ "Updating host lastWriteDate",
"host"_attr = host,
- "reply_lastWriteDate"_attr = reply.lastWriteDate);
+ "lastWriteDate"_attr = reply.lastWriteDate);
lastWriteDate = reply.lastWriteDate;
LOGV2_DEBUG(24083,
3,
- "Updating {host} opTime to {reply_opTime}",
+ "Updating {host} opTime to {opTime}",
+ "Updating host opTime",
"host"_attr = host,
- "reply_opTime"_attr = reply.opTime);
+ "opTime"_attr = reply.opTime);
opTime = reply.opTime;
lastWriteDateUpdateTime = Date_t::now();
}
@@ -1091,8 +1111,9 @@ SetState::SetState(const MongoURI& uri,
if (name.empty())
LOGV2_WARNING(24090,
- "Replica set name empty, first node: {seedNodes_begin}",
- "seedNodes_begin"_attr = *(seedNodes.begin()));
+ "Replica set name empty, first node: {firstNode}",
+ "Replica set name empty, adding first node",
+ "firstNode"_attr = *(seedNodes.begin()));
// This adds the seed hosts to nodes, but they aren't usable for anything except seeding a
// scan until we start a scan and either find a master or contact all hosts without finding
@@ -1297,9 +1318,10 @@ Node* SetState::findOrCreateNode(const HostAndPort& host) {
if (it == nodes.end() || it->host != host) {
LOGV2_DEBUG(24084,
2,
- "Adding node {host} to our view of replica set {name}",
+ "Adding node {host} to our view of replica set {replicaSet}",
+ "Adding node to our view of the replica set",
"host"_attr = host,
- "name"_attr = name);
+ "replicaSet"_attr = name);
it = nodes.insert(it, Node(host));
}
return &(*it);
@@ -1310,10 +1332,12 @@ void SetState::updateNodeIfInNodes(const IsMasterReply& reply) {
if (!node) {
LOGV2_DEBUG(24085,
2,
- "Skipping application of ismaster reply from {reply_host} since it isn't a "
- "confirmed member of set {name}",
- "reply_host"_attr = reply.host,
- "name"_attr = name);
+ "Skipping application of isMaster reply from {host} since it isn't a "
+ "confirmed member of set {replicaSet}",
+ "Skipping application of isMaster reply from host since it isn't a confirmed "
+ "member of the replica set",
+ "host"_attr = reply.host,
+ "replicaSet"_attr = name);
return;
}
@@ -1359,12 +1383,12 @@ void SetState::notify() {
it->promise.emplaceValue(std::move(match));
waiters.erase(it++);
} else if (it->deadline <= cachedNow) {
- LOGV2_DEBUG(
- 24086,
- 1,
- "Unable to statisfy read preference {it_criteria} by deadline {it_deadline}",
- "it_criteria"_attr = it->criteria,
- "it_deadline"_attr = it->deadline);
+ LOGV2_DEBUG(24086,
+ 1,
+ "Unable to statisfy read preference {criteria} by deadline {deadline}",
+ "Unable to statisfy read preference by deadline",
+ "criteria"_attr = it->criteria,
+ "deadline"_attr = it->deadline);
it->promise.setError(makeUnsatisfedReadPrefError(it->criteria));
waiters.erase(it++);
} else if (shouldQuickFail) {
diff --git a/src/mongo/client/scanning_replica_set_monitor_scan_test.cpp b/src/mongo/client/scanning_replica_set_monitor_scan_test.cpp
index 32212f58eba..f680a7d3976 100644
--- a/src/mongo/client/scanning_replica_set_monitor_scan_test.cpp
+++ b/src/mongo/client/scanning_replica_set_monitor_scan_test.cpp
@@ -1416,15 +1416,15 @@ TEST_F(MinOpTimeTest, MinOpTimeIgnored) {
class Listener : public ReplicaSetChangeNotifier::Listener {
public:
void logEvent(StringData name, const Key& key) {
- LOGV2(20190, "{name}: {key}", "name"_attr = name, "key"_attr = key);
+ LOGV2(20190, "Replica set change listener event", "name"_attr = name, "key"_attr = key);
}
void logEvent(StringData name, const State& state) {
LOGV2(20191,
- "{name}: ({state_generation}) {state_connStr} | {state_primary}",
+ "Replica set change listener event",
"name"_attr = name,
- "state_generation"_attr = state.generation,
- "state_connStr"_attr = state.connStr,
- "state_primary"_attr = state.primary);
+ "stateGeneration"_attr = state.generation,
+ "stateConnStr"_attr = state.connStr,
+ "statePrimary"_attr = state.primary);
}
void onFoundSet(const Key& key) noexcept override {
diff --git a/src/mongo/client/scanning_replica_set_monitor_test_concurrent.cpp b/src/mongo/client/scanning_replica_set_monitor_test_concurrent.cpp
index 5b2cb6ae73b..21370178ed5 100644
--- a/src/mongo/client/scanning_replica_set_monitor_test_concurrent.cpp
+++ b/src/mongo/client/scanning_replica_set_monitor_test_concurrent.cpp
@@ -104,17 +104,17 @@ protected:
_numChecks[request.target]++;
LOGV2_DEBUG(20192,
2,
- "at {elapsed} got mock net operation {request}",
+ "Got mock network operation",
"elapsed"_attr = elapsed(),
"request"_attr = request.toString());
const auto node = replSet.getNode(request.target.toString());
if (node->isRunning()) {
const auto opmsg = OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj);
const auto reply = node->runCommand(request.id, opmsg)->getCommandReply();
- LOGV2_DEBUG(20193, 2, "replying {reply}", "reply"_attr = reply);
+ LOGV2_DEBUG(20193, 2, "Replying", "reply"_attr = reply);
net->scheduleSuccessfulResponse(noi, RemoteCommandResponse(reply, Milliseconds(0)));
} else {
- LOGV2_DEBUG(20194, 2, "black hole");
+ LOGV2_DEBUG(20194, 2, "Black hole");
net->blackHole(noi);
}
net->runReadyNetworkOperations();
@@ -128,11 +128,11 @@ protected:
// Operations can happen inline with advanceTime(), so log before and after the call.
LOGV2_DEBUG(20195,
3,
- "Advancing time from {elapsed} to {elapsed_d}",
- "elapsed"_attr = elapsed(),
- "elapsed_d"_attr = (elapsed() + d));
+ "Advancing time",
+ "elapsedStart"_attr = elapsed(),
+ "elapsedEnd"_attr = (elapsed() + d));
net->advanceTime(net->now() + d);
- LOGV2_DEBUG(20196, 3, "Advanced to {elapsed}", "elapsed"_attr = elapsed());
+ LOGV2_DEBUG(20196, 3, "Advanced time", "elapsed"_attr = elapsed());
}
int getNumChecks(HostAndPort host) {
diff --git a/src/mongo/client/sdam/json_test_arg_parser.cpp b/src/mongo/client/sdam/json_test_arg_parser.cpp
index a7138b183ac..51f995c133a 100644
--- a/src/mongo/client/sdam/json_test_arg_parser.cpp
+++ b/src/mongo/client/sdam/json_test_arg_parser.cpp
@@ -86,12 +86,10 @@ ArgParser::ArgParser(int argc, char* argv[]) {
}
void ArgParser::LogParams() const {
- LOGV2(20199, "Verbosity: {verbose}", "verbose"_attr = _verbose);
- LOGV2(20200, "Source Directory: {sourceDirectory}", "sourceDirectory"_attr = _sourceDirectory);
+ LOGV2(20199, "Verbosity", "verbose"_attr = _verbose);
+ LOGV2(20200, "Source directory", "directory"_attr = _sourceDirectory);
if (_testFilters.size()) {
- LOGV2(20201,
- "Filters: {boost_join_testFilters}",
- "boost_join_testFilters"_attr = boost::join(_testFilters, ", "));
+ LOGV2(20201, "Test filters", "filters"_attr = boost::join(_testFilters, ", "));
}
}
diff --git a/src/mongo/client/sdam/sdam_json_test_runner.cpp b/src/mongo/client/sdam/sdam_json_test_runner.cpp
index 115c97c745d..21d7e5d5770 100644
--- a/src/mongo/client/sdam/sdam_json_test_runner.cpp
+++ b/src/mongo/client/sdam/sdam_json_test_runner.cpp
@@ -72,13 +72,6 @@ using namespace mongo::sdam;
namespace mongo::sdam {
-std::string emphasize(const std::string text) {
- std::stringstream output;
- const auto border = std::string(3, '#');
- output << border << " " << text << " " << border << std::endl;
- return output.str();
-}
-
/**
* This class is responsible for parsing and executing a single 'phase' of the json test
*/
@@ -120,17 +113,16 @@ public:
auto descriptionStr =
(response.getResponse()) ? response.getResponse()->toString() : "[ Network Error ]";
LOGV2(20202,
- "Sending server description: {response_getServer} : {descriptionStr}",
- "response_getServer"_attr = response.getServer(),
- "descriptionStr"_attr = descriptionStr);
+ "Sending server description",
+ "server"_attr = response.getServer(),
+ "description"_attr = descriptionStr);
topology.onServerDescription(response);
}
LOGV2(20203,
- "TopologyDescription after Phase {phaseNum}: {topology_getTopologyDescription}",
- "phaseNum"_attr = _phaseNum,
- "topology_getTopologyDescription"_attr =
- topology.getTopologyDescription()->toString());
+ "TopologyDescription after phase",
+ "phaseNumber"_attr = _phaseNum,
+ "topologyDescription"_attr = topology.getTopologyDescription()->toString());
validateServers(
&testResult, topology.getTopologyDescription(), _topologyOutcome["servers"].Obj());
@@ -465,16 +457,11 @@ public:
TestCaseResult result{{}, _testFilePath, _testName};
for (const auto& testPhase : _testPhases) {
- LOGV2(20204,
- "{emphasize_Phase_std_to_string_testPhase_getPhaseNum}",
- "emphasize_Phase_std_to_string_testPhase_getPhaseNum"_attr =
- emphasize("Phase " + std::to_string(testPhase.getPhaseNum())));
+ LOGV2(20204, "### Phase Number ###", "phase"_attr = testPhase.getPhaseNum());
auto phaseResult = testPhase.execute(topology);
result.phaseResults.push_back(phaseResult);
if (!result.Success()) {
- LOGV2(20205,
- "Phase {phaseResult_phaseNumber} failed.",
- "phaseResult_phaseNumber"_attr = phaseResult.phaseNumber);
+ LOGV2(20205, "Phase failed", "phase"_attr = phaseResult.phaseNumber);
break;
}
}
@@ -489,11 +476,7 @@ public:
private:
void parseTest(fs::path testFilePath) {
_testFilePath = testFilePath.string();
- LOGV2(20206, "");
- LOGV2(20207,
- "{emphasize_Parsing_testFilePath_string}",
- "emphasize_Parsing_testFilePath_string"_attr =
- emphasize("Parsing " + testFilePath.string()));
+ LOGV2(20207, "### Parsing Test File ###", "testFilePath"_attr = testFilePath.string());
{
std::ifstream testFile(_testFilePath);
std::ostringstream json;
@@ -556,10 +539,7 @@ public:
for (auto jsonTest : testFiles) {
auto testCase = JsonTestCase(jsonTest);
try {
- LOGV2(20208,
- "{emphasize_Executing_testCase_Name}",
- "emphasize_Executing_testCase_Name"_attr =
- emphasize("Executing " + testCase.Name()));
+ LOGV2(20208, "### Executing Test Case ###", "test"_attr = testCase.Name());
results.push_back(testCase.execute());
} catch (const DBException& ex) {
std::stringstream error;
@@ -584,9 +564,7 @@ public:
results.begin(), results.end(), [](const JsonTestCase::TestCaseResult& result) {
return !result.Success();
})) {
- LOGV2(20209,
- "{emphasize_Failed_Test_Results}",
- "emphasize_Failed_Test_Results"_attr = emphasize("Failed Test Results"));
+ LOGV2(20209, "### Failed Test Results ###");
}
for (const auto result : results) {
@@ -596,20 +574,17 @@ public:
if (result.Success()) {
++numSuccess;
} else {
- LOGV2(
- 20210, "{emphasize_testName}", "emphasize_testName"_attr = emphasize(testName));
- LOGV2(20211, "error in file: {file}", "file"_attr = file);
+ LOGV2(20210, "### Test Name ###", "name"_attr = testName);
+ LOGV2(20211, "Error in file", "file"_attr = file);
++numFailed;
for (auto phaseResult : phaseResults) {
- LOGV2(20212,
- "Phase {phaseResult_phaseNumber}: ",
- "phaseResult_phaseNumber"_attr = phaseResult.phaseNumber);
+ LOGV2(20212, "Phase", "phaseNumber"_attr = phaseResult.phaseNumber);
if (!phaseResult.Success()) {
for (auto error : phaseResult.errorDescriptions) {
LOGV2(20213,
- "\t{error_first}: {error_second}",
- "error_first"_attr = error.first,
- "error_second"_attr = error.second);
+ "Errors",
+ "errorFirst"_attr = error.first,
+ "errorSecond"_attr = error.second);
}
}
}
@@ -617,7 +592,7 @@ public:
}
}
LOGV2(20215,
- "{numTestCases} test cases; {numSuccess} success; {numFailed} failed.",
+ "Test cases summary",
"numTestCases"_attr = numTestCases,
"numSuccess"_attr = numSuccess,
"numFailed"_attr = numFailed);
@@ -657,8 +632,8 @@ private:
} else {
LOGV2_DEBUG(20216,
2,
- "'{filePath_string}' skipped due to filter configuration.",
- "filePath_string"_attr = filePath.string());
+ "Test skipped due to filter configuration",
+ "filePath"_attr = filePath.string());
}
}
diff --git a/src/mongo/client/sdam/server_description.cpp b/src/mongo/client/sdam/server_description.cpp
index c65cdbc2ec0..08500bbe2db 100644
--- a/src/mongo/client/sdam/server_description.cpp
+++ b/src/mongo/client/sdam/server_description.cpp
@@ -213,7 +213,8 @@ void ServerDescription::parseTypeFromIsMaster(const BSONObj isMaster) {
t = ServerType::kRSGhost;
} else {
LOGV2_ERROR(23931,
- "unknown server type from successful ismaster reply: {isMaster}",
+ "Unknown server type from successful isMaster reply: {isMaster}",
+ "Unknown server type from successful isMaster reply",
"isMaster"_attr = isMaster.toString());
t = ServerType::kUnknown;
}
diff --git a/src/mongo/client/sdam/server_selection_json_test_runner.cpp b/src/mongo/client/sdam/server_selection_json_test_runner.cpp
index b2ce37d45a5..e4764c2db2a 100644
--- a/src/mongo/client/sdam/server_selection_json_test_runner.cpp
+++ b/src/mongo/client/sdam/server_selection_json_test_runner.cpp
@@ -73,13 +73,6 @@ using namespace mongo::sdam;
namespace mongo::sdam {
-std::string emphasize(const std::string text) {
- std::stringstream output;
- const auto border = std::string(3, '#');
- output << border << " " << text << " " << border << std::endl;
- return output.str();
-}
-
std::ostream& operator<<(std::ostream& os, const std::vector<HostAndPort>& input) {
for (auto const& i : input) {
os << i.toString() << " ";
@@ -119,8 +112,7 @@ public:
~JsonRttTestCase() = default;
TestCaseResult execute() {
- LOGV2(
- 4333500, "{testFilePath}", "testFilePath"_attr = emphasize("Running " + _testFilePath));
+ LOGV2(4333500, "### Running Test ###", "testFilePath"_attr = _testFilePath);
ServerDescriptionPtr updatedServerDescription;
if (_serverDescription) {
@@ -141,7 +133,7 @@ public:
validateNewAvgRtt(&result, updatedServerDescription);
if (!result.Success()) {
- LOGV2(4333501, "Test {testFilePath} failed.", "testFilePath"_attr = _testFilePath);
+ LOGV2(4333501, "Test failed", "testFilePath"_attr = _testFilePath);
}
return result;
@@ -154,10 +146,7 @@ public:
private:
void parseTest(fs::path testFilePath) {
_testFilePath = testFilePath.string();
- LOGV2(4333502, "");
- LOGV2(4333503,
- "{testFilePath}",
- "testFilePath"_attr = emphasize("Parsing " + testFilePath.string()));
+ LOGV2(4333503, "### Parsing Test ###", "testFilePath"_attr = testFilePath.string());
{
std::ifstream testFile(_testFilePath);
std::ostringstream json;
@@ -222,8 +211,7 @@ public:
~JsonServerSelectionTestCase() = default;
TestCaseResult execute() {
- LOGV2(
- 4333504, "{testFilePath}", "testFilePath"_attr = emphasize("Running " + _testFilePath));
+ LOGV2(4333504, "### Running Test ###", "testFilePath"_attr = _testFilePath);
SdamServerSelector serverSelector(
sdam::ServerSelectionConfiguration::defaultConfiguration());
@@ -233,7 +221,7 @@ public:
validateServersInLatencyWindow(&result, selectedServers);
if (!result.Success()) {
- LOGV2(4333505, "Test {testFilePath} failed.", "testFilePath"_attr = _testFilePath);
+ LOGV2(4333505, "Test failed", "testFilePath"_attr = _testFilePath);
}
return result;
@@ -246,10 +234,7 @@ public:
private:
void parseTest(fs::path testFilePath) {
_testFilePath = testFilePath.string();
- LOGV2(4333506, "");
- LOGV2(4333507,
- "{testFilePath}",
- "testFilePath"_attr = emphasize("Parsing " + testFilePath.string()));
+ LOGV2(4333507, "### Parsing Test ###", "testFilePath"_attr = testFilePath.string());
{
std::ifstream testFile(_testFilePath);
std::ostringstream json;
@@ -400,9 +385,8 @@ public:
}();
try {
- LOGV2(4333508,
- "{testFilePath}",
- "testFilePath"_attr = emphasize("Executing " + testCase->FilePath()));
+ LOGV2(
+ 4333508, "### Executing Test ###", "testFilePath"_attr = testCase->FilePath());
results.push_back(testCase->execute());
} catch (const DBException& ex) {
std::stringstream error;
@@ -425,9 +409,7 @@ public:
results.begin(), results.end(), [](const JsonTestCase::TestCaseResult& result) {
return !result.Success();
})) {
- LOGV2(4333509,
- "{Failed_Test_Results}",
- "Failed_Test_Results"_attr = emphasize("Failed Test Results"));
+ LOGV2(4333509, "### Failed Test Results ###");
}
for (const auto result : results) {
@@ -435,14 +417,12 @@ public:
if (result.Success()) {
++numSuccess;
} else {
- LOGV2(4333510, "{testFilePath}", "testFilePath"_attr = emphasize(file));
- LOGV2(4333511, "error in file: {file}", "file"_attr = file);
+ LOGV2(4333510, "### Failed Test File ###", "testFilePath"_attr = file);
++numFailed;
- LOGV2(4333512, "");
}
}
LOGV2(4333513,
- "{numTestCases} test cases; {numSuccess} success; {numFailed} failed.",
+ "Results summary",
"numTestCases"_attr = numTestCases,
"numSuccess"_attr = numSuccess,
"numFailed"_attr = numFailed);
@@ -482,7 +462,7 @@ private:
} else {
LOGV2_DEBUG(4333514,
2,
- "'{filePath}' skipped due to filter configuration.",
+ "Test skipped due to filter configuration",
"filePath"_attr = filePath.string());
}
}
diff --git a/src/mongo/client/sdam/topology_description.cpp b/src/mongo/client/sdam/topology_description.cpp
index fdd8f36191d..6f688367055 100644
--- a/src/mongo/client/sdam/topology_description.cpp
+++ b/src/mongo/client/sdam/topology_description.cpp
@@ -111,11 +111,6 @@ const boost::optional<ServerDescriptionPtr> TopologyDescription::findServerByAdd
boost::optional<ServerDescriptionPtr> TopologyDescription::installServerDescription(
const ServerDescriptionPtr& newServerDescription) {
- LOGV2_DEBUG(4333202,
- 2,
- "install server description {description}",
- "description"_attr = newServerDescription->toString());
-
boost::optional<ServerDescriptionPtr> previousDescription;
if (getType() == TopologyType::kSingle) {
// For Single, there is always one ServerDescription in TopologyDescription.servers;
diff --git a/src/mongo/client/sdam/topology_manager.cpp b/src/mongo/client/sdam/topology_manager.cpp
index 702978d4530..1c55285a7f0 100644
--- a/src/mongo/client/sdam/topology_manager.cpp
+++ b/src/mongo/client/sdam/topology_manager.cpp
@@ -90,6 +90,8 @@ bool TopologyManager::onServerDescription(const IsMasterOutcome& isMasterOutcome
23930,
"Ignoring this isMaster response because our topologyVersion: {lastTopologyVersion} is "
"fresher than the provided topologyVersion: {newTopologyVersion}",
+ "Ignoring this isMaster response because our last topologyVersion is fresher than the "
+ "new topologyVersion provided",
"lastTopologyVersion"_attr = lastTopologyVersion->toBSON(),
"newTopologyVersion"_attr = newTopologyVersion->toBSON());
return false;
@@ -140,7 +142,8 @@ void TopologyManager::onServerRTTUpdated(HostAndPort hostAndPort, IsMasterRTT rt
// otherwise, the server was removed from the topology. Nothing to do.
LOGV2(4333201,
"Not updating RTT. Server {server} does not exist in {replicaSet}",
- "host"_attr = hostAndPort,
+ "Not updating RTT. The server does not exist in the replica set",
+ "server"_attr = hostAndPort,
"replicaSet"_attr = getTopologyDescription()->getSetName());
}
diff --git a/src/mongo/client/sdam/topology_state_machine.cpp b/src/mongo/client/sdam/topology_state_machine.cpp
index cbdb90be7b1..9cf9c22bd9f 100644
--- a/src/mongo/client/sdam/topology_state_machine.cpp
+++ b/src/mongo/client/sdam/topology_state_machine.cpp
@@ -36,6 +36,10 @@
#include "mongo/logv2/log.h"
namespace mongo::sdam {
+namespace {
+static constexpr auto kLogLevel = 2;
+} // namespace
+
TopologyStateMachine::TopologyStateMachine(const SdamConfiguration& config) : _config(config) {
initTransitionTable();
}
@@ -149,11 +153,12 @@ void mongo::sdam::TopologyStateMachine::initTransitionTable() {
void TopologyStateMachine::onServerDescription(TopologyDescription& topologyDescription,
const ServerDescriptionPtr& serverDescription) {
if (!topologyDescription.containsServerAddress(serverDescription->getAddress())) {
- LOGV2(20219,
- "{kLogPrefix}ignoring ismaster reply from server that is not in the topology: "
- "{serverDescription_getAddress}",
- "kLogPrefix"_attr = kLogPrefix,
- "serverDescription_getAddress"_attr = serverDescription->getAddress());
+ LOGV2_DEBUG(20219,
+ kLogLevel,
+ "Ignoring isMaster reply from server that is not in the topology: "
+ "{serverAddress}",
+ "Ignoring isMaster reply from server that is not in the topology",
+ "serverAddress"_attr = serverDescription->getAddress());
return;
}
@@ -367,59 +372,36 @@ void TopologyStateMachine::setTopologyTypeAndUpdateRSWithoutPrimary(
void TopologyStateMachine::removeServerDescription(TopologyDescription& topologyDescription,
const HostAndPort serverAddress) {
topologyDescription.removeServerDescription(serverAddress);
- LOGV2(20220,
- "{kLogPrefix}server '{serverAddress}' was removed from the topology.",
- "kLogPrefix"_attr = kLogPrefix,
- "serverAddress"_attr = serverAddress);
+ LOGV2_DEBUG(20220,
+ kLogLevel,
+ "Server '{serverAddress}' was removed from the topology",
+ "Server was removed from the topology",
+ "serverAddress"_attr = serverAddress);
}
void TopologyStateMachine::modifyTopologyType(TopologyDescription& topologyDescription,
TopologyType topologyType) {
topologyDescription._type = topologyType;
- LOGV2(20221,
- "{kLogPrefix}the topology type was set to {topologyType}",
- "kLogPrefix"_attr = kLogPrefix,
- "topologyType"_attr = toString(topologyType));
}
void TopologyStateMachine::modifySetName(TopologyDescription& topologyDescription,
const boost::optional<std::string>& setName) {
topologyDescription._setName = setName;
- LOGV2(20222,
- "{kLogPrefix}the topology setName was set to {setName_setName_null}",
- "kLogPrefix"_attr = kLogPrefix,
- "setName_setName_null"_attr = ((setName) ? *setName : "[null]"));
}
void TopologyStateMachine::installServerDescription(TopologyDescription& topologyDescription,
ServerDescriptionPtr newServerDescription,
bool newServer) {
topologyDescription.installServerDescription(newServerDescription);
- LOGV2_DEBUG(20223,
- 1,
- "{kLogPrefix}{newServer_installed_new_updated_existing} server description: "
- "{newServerDescription}",
- "kLogPrefix"_attr = kLogPrefix,
- "newServer_installed_new_updated_existing"_attr =
- ((newServer) ? "installed new" : "updated existing"),
- "newServerDescription"_attr = newServerDescription->toString());
}
void TopologyStateMachine::modifyMaxElectionId(TopologyDescription& topologyDescription,
const OID& newMaxElectionId) {
topologyDescription._maxElectionId = newMaxElectionId;
- LOGV2(20224,
- "{kLogPrefix}topology max election id set to {newMaxElectionId}",
- "kLogPrefix"_attr = kLogPrefix,
- "newMaxElectionId"_attr = newMaxElectionId);
}
void TopologyStateMachine::modifyMaxSetVersion(TopologyDescription& topologyDescription,
int& newMaxSetVersion) {
topologyDescription._maxSetVersion = newMaxSetVersion;
- LOGV2(20225,
- "{kLogPrefix}topology max set version set to {newMaxSetVersion}",
- "kLogPrefix"_attr = kLogPrefix,
- "newMaxSetVersion"_attr = newMaxSetVersion);
}
} // namespace mongo::sdam
diff --git a/src/mongo/client/sdam/topology_state_machine.h b/src/mongo/client/sdam/topology_state_machine.h
index a0559ec238f..c2cdebabd77 100644
--- a/src/mongo/client/sdam/topology_state_machine.h
+++ b/src/mongo/client/sdam/topology_state_machine.h
@@ -98,8 +98,6 @@ private:
StateTransitionTable _stt;
SdamConfiguration _config;
-
- static inline auto kLogPrefix = "sdam : ";
};
using TopologyStateMachinePtr = std::unique_ptr<TopologyStateMachine>;
} // namespace mongo::sdam
diff --git a/src/mongo/client/server_is_master_monitor.cpp b/src/mongo/client/server_is_master_monitor.cpp
index c522b9a185e..4f517ddb73e 100644
--- a/src/mongo/client/server_is_master_monitor.cpp
+++ b/src/mongo/client/server_is_master_monitor.cpp
@@ -83,6 +83,7 @@ SingleServerIsMasterMonitor::SingleServerIsMasterMonitor(
LOGV2_DEBUG(4333217,
kLogLevel + 1,
"RSM {replicaSet} monitoring {host}",
+ "RSM monitoring host",
"host"_attr = host,
"replicaSet"_attr = _setUri.getSetName());
}
@@ -108,6 +109,7 @@ void SingleServerIsMasterMonitor::requestImmediateCheck() {
4333227,
kLogLevel,
"RSM {replicaSet} monitoring {host} in expedited mode until we detect a primary.",
+ "RSM monitoring host in expedited mode until we detect a primary",
"host"_attr = _host,
"replicaSet"_attr = _setUri.getSetName());
@@ -119,11 +121,13 @@ void SingleServerIsMasterMonitor::requestImmediateCheck() {
const auto expeditedRefreshPeriod = _currentRefreshPeriod(lock, false);
if (_isMasterOutstanding) {
- LOGV2_DEBUG(4333216,
- kLogLevel + 2,
- "RSM {replicaSet} immediate isMaster check requested, but there "
- "is already an outstanding request.",
- "replicaSet"_attr = _setUri.getSetName());
+ LOGV2_DEBUG(
+ 4333216,
+ kLogLevel + 2,
+ "RSM {replicaSet} immediate isMaster check requested, but there "
+ "is already an outstanding request",
+ "RSM immediate isMaster check requested, but there is already an outstanding request",
+ "replicaSet"_attr = _setUri.getSetName());
return;
}
@@ -171,7 +175,7 @@ void SingleServerIsMasterMonitor::_rescheduleNextIsMaster(WithLock lock, Millise
"Rescheduling the next replica set monitoring request",
"replicaSet"_attr = _setUri.getSetName(),
"host"_attr = _host,
- "duration"_attr = delay);
+ "delay"_attr = delay);
_cancelOutstandingRequest(lock);
_scheduleNextIsMaster(lock, delay);
}
@@ -254,8 +258,9 @@ SingleServerIsMasterMonitor::_scheduleStreamableIsMaster() {
self->_isMasterOutstanding = false;
LOGV2_DEBUG(4495400,
kLogLevel,
- "RSM {replicaSet} not processing response: {status}",
- "status"_attr = result.response.status,
+ "RSM {replicaSet} not processing response: {error}",
+ "RSM not processing response",
+ "error"_attr = result.response.status,
"replicaSet"_attr = self->_setUri.getSetName());
return;
}
@@ -311,8 +316,9 @@ StatusWith<TaskExecutor::CallbackHandle> SingleServerIsMasterMonitor::_scheduleS
if (self->_isShutdown) {
LOGV2_DEBUG(4333219,
kLogLevel,
- "RSM {replicaSet} not processing response: {status}",
- "status"_attr = result.response.status,
+ "RSM {replicaSet} not processing response: {error}",
+ "RSM not processing response",
+ "error"_attr = result.response.status,
"replicaSet"_attr = self->_setUri.getSetName());
return;
}
@@ -357,6 +363,7 @@ void SingleServerIsMasterMonitor::shutdown() {
LOGV2_DEBUG(4333220,
kLogLevel + 1,
"RSM {replicaSet} Closing host {host}",
+ "RSM closing host",
"host"_attr = _host,
"replicaSet"_attr = _setUri.getSetName());
@@ -367,6 +374,7 @@ void SingleServerIsMasterMonitor::shutdown() {
LOGV2_DEBUG(4333229,
kLogLevel + 1,
"RSM {replicaSet} Done Closing host {host}",
+ "RSM done closing host",
"host"_attr = _host,
"replicaSet"_attr = _setUri.getSetName());
}
@@ -386,22 +394,25 @@ void SingleServerIsMasterMonitor::_cancelOutstandingRequest(WithLock) {
void SingleServerIsMasterMonitor::_onIsMasterSuccess(const BSONObj bson) {
LOGV2_DEBUG(4333221,
kLogLevel + 1,
- "RSM {replicaSet} received successful isMaster for server {host}: {bson}",
+ "RSM {replicaSet} received successful isMaster for server {host}: {isMasterReply}",
+ "RSM received successful isMaster",
"host"_attr = _host,
"replicaSet"_attr = _setUri.getSetName(),
- "bson"_attr = bson.toString());
+ "isMasterReply"_attr = bson.toString());
_eventListener->onServerHeartbeatSucceededEvent(_host, bson);
}
void SingleServerIsMasterMonitor::_onIsMasterFailure(const Status& status, const BSONObj bson) {
- LOGV2_DEBUG(4333222,
- kLogLevel,
- "RSM {replicaSet} received failed isMaster for server {host}: {status}: {bson}",
- "host"_attr = _host,
- "status"_attr = status.toString(),
- "replicaSet"_attr = _setUri.getSetName(),
- "bson"_attr = bson.toString());
+ LOGV2_DEBUG(
+ 4333222,
+ kLogLevel,
+ "RSM {replicaSet} received failed isMaster for server {host}: {error}: {isMasterReply}",
+ "RSM received failed isMaster",
+ "host"_attr = _host,
+ "error"_attr = status.toString(),
+ "replicaSet"_attr = _setUri.getSetName(),
+ "isMasterReply"_attr = bson.toString());
_eventListener->onServerHeartbeatFailureEvent(status, _host, bson);
}
@@ -448,9 +459,10 @@ ServerIsMasterMonitor::ServerIsMasterMonitor(
_setUri(setUri) {
LOGV2_DEBUG(4333223,
kLogLevel,
- "RSM {replicaSet} monitoring {size} members.",
+ "RSM {replicaSet} monitoring {nReplicaSetMembers} members.",
+ "RSM now monitoring replica set",
"replicaSet"_attr = _setUri.getSetName(),
- "size"_attr = initialTopologyDescription->getServers().size());
+ "nReplicaSetMembers"_attr = initialTopologyDescription->getServers().size());
onTopologyDescriptionChangedEvent(nullptr, initialTopologyDescription);
}
@@ -488,9 +500,10 @@ void ServerIsMasterMonitor::onTopologyDescriptionChangedEvent(
singleMonitor->shutdown();
LOGV2_DEBUG(4333225,
kLogLevel,
- "RSM {replicaSet} host {addr} was removed from the topology.",
+ "RSM {replicaSet} host {host} was removed from the topology",
+ "RSM host was removed from the topology",
"replicaSet"_attr = _setUri.getSetName(),
- "addr"_attr = serverAddress);
+ "host"_attr = serverAddress);
it = _singleMonitors.erase(it, std::next(it));
} else {
++it;
diff --git a/src/mongo/client/server_is_master_monitor_test.cpp b/src/mongo/client/server_is_master_monitor_test.cpp
index 006f57fd0e1..485a38f845e 100644
--- a/src/mongo/client/server_is_master_monitor_test.cpp
+++ b/src/mongo/client/server_is_master_monitor_test.cpp
@@ -177,7 +177,7 @@ protected:
}
LOGV2(457991,
- "got mock network operation",
+ "Got mock network operation",
"elapsed"_attr = elapsed(),
"request"_attr = request.toString());
@@ -198,10 +198,10 @@ protected:
LOGV2_DEBUG(457992,
1,
"Advancing time",
- "from_time_elapsed"_attr = elapsed(),
- "to_time_elapsed"_attr = (elapsed() + d));
+ "elpasedStart"_attr = elapsed(),
+ "elapsedEnd"_attr = (elapsed() + d));
_net->advanceTime(_net->now() + d);
- LOGV2_DEBUG(457993, 1, "Advanced time", "time_elapsed"_attr = elapsed());
+ LOGV2_DEBUG(457993, 1, "Advanced time", "timeElapsed"_attr = elapsed());
}
/**
@@ -238,20 +238,6 @@ protected:
void checkNoActivityBefore(Milliseconds deadline,
boost::optional<HostAndPort> hostAndPort = boost::none) {
while (elapsed() < deadline) {
- if (hasReadyRequests()) {
- {
- InNetworkGuard guard(_net);
- _net->runReadyNetworkOperations();
- auto noi = _net->getNextReadyRequest();
- auto request = noi->getRequest();
- LOGV2_DEBUG(4579931,
- 0,
- "mynameisrae about to fail because was activity",
- "request"_attr = request.toString(),
- "elapsed"_attr = elapsed());
- }
- ASSERT_FALSE(true);
- }
ASSERT_FALSE(hasReadyRequests());
if (hostAndPort) {
ASSERT_FALSE(_topologyListener->hasIsMasterResponse(hostAndPort.get()));
diff --git a/src/mongo/client/server_ping_monitor.cpp b/src/mongo/client/server_ping_monitor.cpp
index 3de9aa34fe2..478dd037670 100644
--- a/src/mongo/client/server_ping_monitor.cpp
+++ b/src/mongo/client/server_ping_monitor.cpp
@@ -108,17 +108,20 @@ void SingleServerPingMonitor::_scheduleServerPing() {
if (ErrorCodes::isShutdownError(schedulePingHandle.getStatus().code())) {
LOGV2_DEBUG(23727,
kLogLevel,
- "Can't schedule ping for {hostAndPort}. Executor shutdown in progress",
- "hostAndPort"_attr = _hostAndPort);
+ "Can't schedule ping for {host}. Executor shutdown in progress",
+ "Can't schedule ping for host. Executor shutdown in progress",
+ "host"_attr = _hostAndPort,
+ "replicaSet"_attr = _setUri.getSetName());
return;
}
if (!schedulePingHandle.isOK()) {
LOGV2_FATAL(23732,
- "Can't continue scheduling pings to {hostAndPort} due to "
- "{schedulePingHandle_getStatus}",
- "hostAndPort"_attr = _hostAndPort,
- "schedulePingHandle_getStatus"_attr = redact(schedulePingHandle.getStatus()));
+ "Can't continue scheduling pings to {host} due to {error}",
+ "Can't continue scheduling pings to host",
+ "host"_attr = _hostAndPort,
+ "error"_attr = redact(schedulePingHandle.getStatus()),
+ "replicaSet"_attr = _setUri.getSetName());
fassertFailed(31434);
}
@@ -174,16 +177,20 @@ void SingleServerPingMonitor::_doServerPing() {
if (ErrorCodes::isShutdownError(remotePingHandle.getStatus().code())) {
LOGV2_DEBUG(23728,
kLogLevel,
- "Can't ping {hostAndPort}. Executor shutdown in progress",
- "hostAndPort"_attr = _hostAndPort);
+ "Can't ping {host}. Executor shutdown in progress",
+ "Can't ping host. Executor shutdown in progress",
+ "host"_attr = _hostAndPort,
+ "replicaSet"_attr = _setUri.getSetName());
return;
}
if (!remotePingHandle.isOK()) {
LOGV2_FATAL(23733,
- "Can't continue pinging {hostAndPort} due to {remotePingHandle_getStatus}",
- "hostAndPort"_attr = _hostAndPort,
- "remotePingHandle_getStatus"_attr = redact(remotePingHandle.getStatus()));
+ "Can't continue pinging {host} due to {error}",
+ "Can't continue pinging host",
+ "host"_attr = _hostAndPort,
+ "error"_attr = redact(remotePingHandle.getStatus()),
+ "replicaSet"_attr = _setUri.getSetName());
fassertFailed(31435);
}
@@ -236,8 +243,10 @@ void ServerPingMonitor::onServerHandshakeCompleteEvent(sdam::IsMasterRTT duratio
if (_serverPingMonitorMap.find(address) != _serverPingMonitorMap.end()) {
LOGV2_DEBUG(466811,
kLogLevel + 1,
- "ServerPingMonitor already monitoring {address}",
- "address"_attr = address);
+ "ServerPingMonitor already monitoring {host}",
+ "ServerPingMonitor already monitoring host",
+ "host"_attr = address,
+ "replicaSet"_attr = _setUri.getSetName());
return;
}
auto newSingleMonitor = std::make_shared<SingleServerPingMonitor>(
@@ -246,8 +255,10 @@ void ServerPingMonitor::onServerHandshakeCompleteEvent(sdam::IsMasterRTT duratio
newSingleMonitor->init();
LOGV2_DEBUG(23729,
kLogLevel,
- "ServerPingMonitor is now monitoring {address}",
- "address"_attr = address);
+ "ServerPingMonitor is now monitoring {host}",
+ "ServerPingMonitor is now monitoring host",
+ "host"_attr = address,
+ "replicaSet"_attr = _setUri.getSetName());
}
void ServerPingMonitor::onTopologyDescriptionChangedEvent(
diff --git a/src/mongo/client/server_ping_monitor_test.cpp b/src/mongo/client/server_ping_monitor_test.cpp
index f665e61e9f1..6851f672cc9 100644
--- a/src/mongo/client/server_ping_monitor_test.cpp
+++ b/src/mongo/client/server_ping_monitor_test.cpp
@@ -115,7 +115,7 @@ protected:
executor::TaskExecutorTest::assertRemoteCommandNameEquals("ping", request);
ASSERT_EQ(request.target, hostAndPort);
LOGV2(23925,
- "at {elapsed} got mock network operation {request}",
+ "Got mock network operation",
"elapsed"_attr = elapsed(),
"request"_attr = request.toString());
@@ -138,11 +138,11 @@ protected:
// Operations can happen inline with advanceTime(), so log before and after the call.
LOGV2_DEBUG(23926,
1,
- "Advancing time from {elapsed} to {elapsed_d}",
- "elapsed"_attr = elapsed(),
- "elapsed_d"_attr = (elapsed() + d));
+ "About to advance time",
+ "elapsedStart"_attr = elapsed(),
+ "elapsedEnd"_attr = (elapsed() + d));
_net->advanceTime(_net->now() + d);
- LOGV2_DEBUG(23927, 1, "Advanced to {elapsed}", "elapsed"_attr = elapsed());
+ LOGV2_DEBUG(23927, 1, "Advanced time", "elapsed"_attr = elapsed());
}
/**
diff --git a/src/mongo/client/streamable_replica_set_monitor.cpp b/src/mongo/client/streamable_replica_set_monitor.cpp
index 68ff59d5cb5..9f44c38f5df 100644
--- a/src/mongo/client/streamable_replica_set_monitor.cpp
+++ b/src/mongo/client/streamable_replica_set_monitor.cpp
@@ -181,7 +181,11 @@ ReplicaSetMonitorPtr StreamableReplicaSetMonitor::make(
void StreamableReplicaSetMonitor::init() {
stdx::lock_guard lock(_mutex);
- LOGV2_DEBUG(4333206, kLowerLogLevel, "Starting Replica Set Monitor {uri}", "uri"_attr = _uri);
+ LOGV2_DEBUG(4333206,
+ kLowerLogLevel,
+ "Starting Replica Set Monitor {uri}",
+ "Starting Replica Set Monitor",
+ "uri"_attr = _uri);
_eventsPublisher = std::make_shared<sdam::TopologyEventsPublisher>(_executor);
_topologyManager = std::make_unique<TopologyManager>(
@@ -216,13 +220,19 @@ void StreamableReplicaSetMonitor::drop() {
lock, Status{ErrorCodes::ShutdownInProgress, "the ReplicaSetMonitor is shutting down"});
}
- LOGV2(4333209, "Closing Replica Set Monitor {replicaSet}", "replicaSet"_attr = getName());
+ LOGV2(4333209,
+ "Closing Replica Set Monitor {replicaSet}",
+ "Closing Replica Set Monitor",
+ "replicaSet"_attr = getName());
_queryProcessor->shutdown();
_pingMonitor->shutdown();
_isMasterMonitor->shutdown();
ReplicaSetMonitorManager::get()->getNotifier().onDroppedSet(getName());
- LOGV2(4333210, "Done closing Replica Set Monitor {replicaSet}", "replicaSet"_attr = getName());
+ LOGV2(4333210,
+ "Done closing Replica Set Monitor {replicaSet}",
+ "Done closing Replica Set Monitor",
+ "replicaSet"_attr = getName());
}
SemiFuture<HostAndPort> StreamableReplicaSetMonitor::getHostOrRefresh(
@@ -265,6 +275,7 @@ SemiFuture<std::vector<HostAndPort>> StreamableReplicaSetMonitor::getHostsOrRefr
LOGV2_DEBUG(4333212,
kLowerLogLevel,
"RSM {replicaSet} start async getHosts with {readPref}",
+ "RSM start async getHosts",
"replicaSet"_attr = getName(),
"readPref"_attr = readPrefToStringWithMinOpTime(criteria));
@@ -317,7 +328,8 @@ SemiFuture<std::vector<HostAndPort>> StreamableReplicaSetMonitor::_enqueueOutsta
query->promise.setError(errorStatus);
query->done = true;
LOGV2_INFO(4333208,
- "RSM {replicaSet} host selection timeout: {status}",
+ "RSM {replicaSet} host selection timeout: {error}",
+ "RSM host selection timeout",
"replicaSet"_attr = getName(),
"error"_attr = errorStatus.toString());
};
@@ -325,7 +337,8 @@ SemiFuture<std::vector<HostAndPort>> StreamableReplicaSetMonitor::_enqueueOutsta
if (!swDeadlineHandle.isOK()) {
LOGV2_INFO(4333207,
- "RSM {replicaSet} error scheduling deadline handler: {status}",
+ "RSM {replicaSet} error scheduling deadline handler: {error}",
+ "RSM error scheduling deadline handler",
"replicaSet"_attr = getName(),
"error"_attr = swDeadlineHandle.getStatus());
return SemiFuture<HostAndPortList>::makeReady(swDeadlineHandle.getStatus());
@@ -570,9 +583,11 @@ void StreamableReplicaSetMonitor::onTopologyDescriptionChangedEvent(
// Notify external components if there are membership changes in the topology.
if (_hasMembershipChange(previousDescription, newDescription)) {
LOGV2(4333213,
- "RSM {replicaSet} Topology Change: {topologyDescription}",
+ "RSM {replicaSet} Topology Change: {newTopologyDescription}",
+ "RSM Topology Change",
"replicaSet"_attr = getName(),
- "topologyDescription"_attr = newDescription->toString());
+ "newTopologyDescription"_attr = newDescription->toString(),
+ "previousTopologyDescription"_attr = previousDescription->toString());
auto maybePrimary = newDescription->getPrimary();
if (maybePrimary) {
@@ -717,10 +732,11 @@ void StreamableReplicaSetMonitor::_processOutstanding(
const auto latency = _executor->now() - query->start;
LOGV2_DEBUG(433214,
kLowerLogLevel,
- "RSM {replicaSet} finished async getHosts: {readPref} ({latency})",
+ "RSM {replicaSet} finished async getHosts: {readPref} ({duration})",
+ "RSM finished async getHosts",
"replicaSet"_attr = getName(),
"readPref"_attr = readPrefToStringWithMinOpTime(query->criteria),
- "latency"_attr = latency.toString());
+ "duration"_attr = Milliseconds(latency));
shouldRemove = true;
}
}
diff --git a/src/mongo/client/streamable_replica_set_monitor_query_processor.cpp b/src/mongo/client/streamable_replica_set_monitor_query_processor.cpp
index 3cd44a59f08..e6e60d4920b 100644
--- a/src/mongo/client/streamable_replica_set_monitor_query_processor.cpp
+++ b/src/mongo/client/streamable_replica_set_monitor_query_processor.cpp
@@ -56,7 +56,8 @@ void StreamableReplicaSetMonitor::StreamableReplicaSetMonitorQueryProcessor::
if (!replicaSetMonitor) {
LOGV2_DEBUG(4333215,
kLogLevel,
- "could not find rsm instance {replicaSet} for query processing.",
+ "Could not find rsm instance {replicaSet} for query processing",
+ "Could not find rsm instance for query processing",
"replicaSet"_attr = *setName);
return;
}