summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorBen Caimano <ben.caimano@10gen.com>2020-04-17 15:57:36 -0400
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-04-22 16:03:37 +0000
commit62b0831c4bd1d2af48aa19622cb62eb73fe7a47d (patch)
tree2f2d217828352e435b743356709a1581174ebb0c /src
parenta6424fafa179805e192dc354cae85fdf0359f95e (diff)
downloadmongo-62b0831c4bd1d2af48aa19622cb62eb73fe7a47d.tar.gz
SERVER-47073 Clean up log lines in mongo/client and mongo/executor
Diffstat (limited to 'src')
-rw-r--r--src/mongo/client/async_client.cpp5
-rw-r--r--src/mongo/client/authenticate.cpp4
-rw-r--r--src/mongo/client/connection_string_connect.cpp17
-rw-r--r--src/mongo/client/connpool.cpp90
-rw-r--r--src/mongo/client/dbclient_base.cpp10
-rw-r--r--src/mongo/client/dbclient_connection.cpp58
-rw-r--r--src/mongo/client/dbclient_cursor.cpp5
-rw-r--r--src/mongo/client/dbclient_rs.cpp171
-rw-r--r--src/mongo/client/mongo_uri_test.cpp28
-rw-r--r--src/mongo/client/replica_set_monitor_manager.cpp7
-rw-r--r--src/mongo/client/sdam/topology_manager.cpp4
-rw-r--r--src/mongo/client/server_is_master_monitor.cpp57
-rw-r--r--src/mongo/client/streamable_replica_set_monitor.cpp28
-rw-r--r--src/mongo/client/streamable_replica_set_monitor_error_handler.cpp2
-rw-r--r--src/mongo/client/streamable_replica_set_monitor_query_processor.cpp4
-rw-r--r--src/mongo/executor/connection_pool.cpp132
-rw-r--r--src/mongo/executor/connection_pool_tl.cpp7
-rw-r--r--src/mongo/executor/network_interface_integration_fixture.cpp7
-rw-r--r--src/mongo/executor/network_interface_mock.cpp30
-rw-r--r--src/mongo/executor/network_interface_mock.h5
-rw-r--r--src/mongo/executor/network_interface_perf_test.cpp5
-rw-r--r--src/mongo/executor/network_interface_tl.cpp27
-rw-r--r--src/mongo/executor/network_interface_tl.h1
-rw-r--r--src/mongo/executor/task_executor_test_common.cpp5
-rw-r--r--src/mongo/executor/thread_pool_task_executor.cpp34
25 files changed, 400 insertions, 343 deletions
diff --git a/src/mongo/client/async_client.cpp b/src/mongo/client/async_client.cpp
index f25830de073..2f65ec84a73 100644
--- a/src/mongo/client/async_client.cpp
+++ b/src/mongo/client/async_client.cpp
@@ -112,8 +112,9 @@ void AsyncDBClient::_parseIsMasterResponse(BSONObj request,
rpc::validateWireVersion(WireSpec::instance().outgoing, protocolSet.version);
if (!validateStatus.isOK()) {
LOGV2_WARNING(23741,
- "remote host has incompatible wire version: {validateStatus}",
- "validateStatus"_attr = validateStatus);
+ "Remote host has incompatible wire version: {error}",
+ "Remote host has incompatible wire version",
+ "error"_attr = validateStatus);
uasserted(validateStatus.code(),
str::stream() << "remote host has incompatible wire version: "
<< validateStatus.reason());
diff --git a/src/mongo/client/authenticate.cpp b/src/mongo/client/authenticate.cpp
index 8d4a2dc05ab..b073821bcc9 100644
--- a/src/mongo/client/authenticate.cpp
+++ b/src/mongo/client/authenticate.cpp
@@ -151,8 +151,8 @@ Future<void> authenticateClient(const BSONObj& params,
if (serverGlobalParams.transitionToAuth && !ErrorCodes::isNetworkError(status)) {
// If auth failed in transitionToAuth, just pretend it succeeded.
LOGV2(20108,
- "Failed to authenticate in transitionToAuth, falling back to no "
- "authentication.");
+ "Failed to authenticate in transitionToAuth, "
+ "falling back to no authentication");
return Status::OK();
}
diff --git a/src/mongo/client/connection_string_connect.cpp b/src/mongo/client/connection_string_connect.cpp
index 935888d3e4a..408e018f4b8 100644
--- a/src/mongo/client/connection_string_connect.cpp
+++ b/src/mongo/client/connection_string_connect.cpp
@@ -61,12 +61,15 @@ std::unique_ptr<DBClientBase> ConnectionString::connect(StringData applicationNa
auto c = std::make_unique<DBClientConnection>(true, 0, newURI);
c->setSoTimeout(socketTimeout);
- LOGV2_DEBUG(
- 20109, 1, "creating new connection to:{server}", "server"_attr = server);
+ LOGV2_DEBUG(20109,
+ 1,
+ "Creating new connection to: {hostAndPort}",
+ "Creating new connection",
+ "hostAndPort"_attr = server);
if (!c->connect(server, applicationName, errmsg)) {
continue;
}
- LOGV2_DEBUG(20110, 1, "connected connection!");
+ LOGV2_DEBUG(20110, 1, "Connected connection!");
return std::move(c);
}
return nullptr;
@@ -98,10 +101,10 @@ std::unique_ptr<DBClientBase> ConnectionString::connect(StringData applicationNa
auto replacementConn = _connectHook->connect(*this, errmsg, socketTimeout);
LOGV2(20111,
- "replacing connection to {this} with "
- "{replacementConn_replacementConn_getServerAddress_empty}",
- "this"_attr = this->toString(),
- "replacementConn_replacementConn_getServerAddress_empty"_attr =
+ "Replacing connection to {oldConnString} with {newConnString}",
+ "Replacing connection string",
+ "oldConnString"_attr = this->toString(),
+ "newConnString"_attr =
(replacementConn ? replacementConn->getServerAddress() : "(empty)"));
return replacementConn;
diff --git a/src/mongo/client/connpool.cpp b/src/mongo/client/connpool.cpp
index bdfae49529c..941ca71fdd7 100644
--- a/src/mongo/client/connpool.cpp
+++ b/src/mongo/client/connpool.cpp
@@ -62,6 +62,10 @@ namespace mongo {
namespace {
const int kDefaultIdleTimeout = std::numeric_limits<int>::max();
const int kDefaultMaxInUse = std::numeric_limits<int>::max();
+
+auto makeDuration(double secs) {
+ return Milliseconds(static_cast<Milliseconds::rep>(1000 * secs));
+}
} // namespace
using std::endl;
@@ -91,10 +95,11 @@ PoolForHost::~PoolForHost() {
void PoolForHost::clear() {
if (!_parentDestroyed) {
LOGV2(24124,
- "Dropping all pooled connections to {hostName}(with timeout of {socketTimeoutSecs} "
- "seconds)",
- "hostName"_attr = _hostName,
- "socketTimeoutSecs"_attr = _socketTimeoutSecs);
+ "Dropping all pooled connections to {connString} "
+ "(with timeout of {timeoutSecs} seconds)",
+ "Dropping all pooled connections to a host",
+ "connString"_attr = _hostName,
+ "socketTimeout"_attr = makeDuration(_socketTimeoutSecs));
}
_pool = decltype(_pool){};
@@ -114,23 +119,24 @@ auto PoolForHost::done(DBConnectionPool* pool, DBClientBase* c) -> ConnectionHea
bool isBroken = c->getSockCreationMicroSec() < _minValidCreationTimeMicroSec;
if (isFailed || isBroken) {
_badConns++;
- LOGV2(
- 24125,
- "Ending connection to host {hostName}(with timeout of {socketTimeoutSecs} seconds) due "
- "to bad connection status; {openConnections} connections to that host remain open",
- "hostName"_attr = _hostName,
- "socketTimeoutSecs"_attr = _socketTimeoutSecs,
- "openConnections"_attr = openConnections());
+ LOGV2(24125,
+ "Ending connection to {connString} (with timeout of {socketTimeout}) "
+ "due to bad connection status; {numOpenConns} connections to that host remain open",
+ "Ending connection to a host due to a bad connection status",
+ "connString"_attr = _hostName,
+ "socketTimeout"_attr = makeDuration(_socketTimeoutSecs),
+ "numOpenConns"_attr = openConnections());
return ConnectionHealth::kFailed;
} else if (_maxPoolSize >= 0 && static_cast<int>(_pool.size()) >= _maxPoolSize) {
// We have a pool size that we need to enforce
LOGV2(24126,
- "Ending idle connection to host {hostName}(with timeout of {socketTimeoutSecs} "
- "seconds) because the pool meets constraints; {openConnections} connections to that "
- "host remain open",
- "hostName"_attr = _hostName,
- "socketTimeoutSecs"_attr = _socketTimeoutSecs,
- "openConnections"_attr = openConnections());
+ "Ending idle connection to {connString} (with timeout of {socketTimeout}) "
+ "because its pool meets constraints; "
+ "{numOpenConns} connections to that host remain open",
+ "Ending idle connection to a host because its pool mees constraints",
+ "connString"_attr = _hostName,
+ "socketTimeout"_attr = makeDuration(_socketTimeoutSecs),
+ "numOpenConns"_attr = openConnections());
return ConnectionHealth::kTooMany;
}
@@ -144,11 +150,13 @@ void PoolForHost::reportBadConnectionAt(uint64_t microSec) {
microSec > _minValidCreationTimeMicroSec) {
_minValidCreationTimeMicroSec = microSec;
LOGV2(24127,
- "Detected bad connection created at {minValidCreationTimeMicroSec} microSec, "
- "clearing pool for {hostName} of {openConnections} connections",
- "minValidCreationTimeMicroSec"_attr = _minValidCreationTimeMicroSec,
- "hostName"_attr = _hostName,
- "openConnections"_attr = openConnections());
+ "Detected bad connection created at {currentTime}, "
+ "clearing pool for {connString} of {numOpenConns} connections",
+ "Detected bad connection, clearing pool for host",
+ "currentTime"_attr =
+ Microseconds(static_cast<Microseconds::rep>(_minValidCreationTimeMicroSec)),
+ "connString"_attr = _hostName,
+ "numOpenConns"_attr = openConnections());
clear();
}
}
@@ -280,9 +288,10 @@ public:
if (p.openConnections() >= _this->_maxInUse) {
LOGV2(20112,
- "Too many in-use connections; waiting until there are fewer than "
- "{this_maxInUse}",
- "this_maxInUse"_attr = _this->_maxInUse);
+ "Too many in-use connections; "
+ "waiting until there are fewer than {maxInUseConns}",
+ "Too many in-use connections; waiting until there are fewer than maximum",
+ "maxInUseConns"_attr = _this->_maxInUse);
p.waitForFreeConnection(timeout, lk);
} else {
// Drop the lock here, so we can connect without holding it.
@@ -367,12 +376,13 @@ DBClientBase* DBConnectionPool::_finishCreate(const string& ident,
}
LOGV2(20113,
- "Successfully connected to {ident} ({openConnections_ident_socketTimeout} connections "
- "now open to {ident2} with a {socketTimeout} second timeout)",
- "ident"_attr = ident,
- "openConnections_ident_socketTimeout"_attr = openConnections(ident, socketTimeout),
- "ident2"_attr = ident,
- "socketTimeout"_attr = socketTimeout);
+ "Successfully connected to {connString} "
+ "({numOpenConns} connections now open to that host "
+ "with a {socketTimeoutSecs} second timeout)",
+ "Successfully connected to host",
+ "connString"_attr = ident,
+ "numOpenConns"_attr = openConnections(ident, socketTimeout),
+ "socketTimeoutSecs"_attr = socketTimeout);
return conn;
}
@@ -490,7 +500,11 @@ void DBConnectionPool::flush() {
void DBConnectionPool::clear() {
stdx::lock_guard<Latch> L(_mutex);
- LOGV2_DEBUG(20114, 2, "Removing connections on all pools owned by {name}", "name"_attr = _name);
+ LOGV2_DEBUG(20114,
+ 2,
+ "Removing connections on all pools owned by {poolName}",
+ "Removing all connectionns associated with this set of pools",
+ "poolName"_attr = _name);
for (PoolMap::iterator iter = _pools.begin(); iter != _pools.end(); ++iter) {
iter->second.clear();
}
@@ -498,8 +512,11 @@ void DBConnectionPool::clear() {
void DBConnectionPool::removeHost(const string& host) {
stdx::lock_guard<Latch> L(_mutex);
- LOGV2_DEBUG(
- 20115, 2, "Removing connections from all pools for host: {host}", "host"_attr = host);
+ LOGV2_DEBUG(20115,
+ 2,
+ "Removing connections from all pools to {connString}",
+ "Removing connections from all pools to a host",
+ "connString"_attr = host);
for (PoolMap::iterator i = _pools.begin(); i != _pools.end(); ++i) {
const string& poolHost = i->first.ident;
if (!serverNameCompare()(host, poolHost) && !serverNameCompare()(poolHost, host)) {
@@ -701,8 +718,9 @@ ScopedDbConnection::~ScopedDbConnection() {
} else {
/* see done() comments above for why we log this line */
LOGV2(24128,
- "scoped connection to {conn_getServerAddress} not being returned to the pool",
- "conn_getServerAddress"_attr = _conn->getServerAddress());
+ "Scoped connection to {connString} not being returned to the pool",
+ "Scoped connection not being returned to the pool",
+ "connString"_attr = _conn->getServerAddress());
kill();
}
}
diff --git a/src/mongo/client/dbclient_base.cpp b/src/mongo/client/dbclient_base.cpp
index 8405348b843..dc7dd7cf310 100644
--- a/src/mongo/client/dbclient_base.cpp
+++ b/src/mongo/client/dbclient_base.cpp
@@ -501,11 +501,12 @@ Status DBClientBase::authenticateInternalUser() {
return status;
}
- if (serverGlobalParams.quiet.load()) {
+ if (!serverGlobalParams.quiet.load()) {
LOGV2(20117,
- "can't authenticate to {} as internal user, error: {status_reason}",
- ""_attr = toString(),
- "status_reason"_attr = status.reason());
+ "Can't authenticate to {connString} as internal user, error: {error}",
+ "Can't authenticate as internal user",
+ "connString"_attr = toString(),
+ "error"_attr = status);
}
return status;
@@ -999,6 +1000,7 @@ void DBClientBase::dropIndex(const string& ns,
LOGV2_DEBUG(20118,
logSeverityV1toV2(_logLevel).toInt(),
"dropIndex failed: {info}",
+ "dropIndex failed",
"info"_attr = info);
uassert(10007, "dropIndex failed", 0);
}
diff --git a/src/mongo/client/dbclient_connection.cpp b/src/mongo/client/dbclient_connection.cpp
index a5a6f3005ce..6f0940faf49 100644
--- a/src/mongo/client/dbclient_connection.cpp
+++ b/src/mongo/client/dbclient_connection.cpp
@@ -342,8 +342,9 @@ Status DBClientConnection::connect(const HostAndPort& serverAddress, StringData
rpc::validateWireVersion(WireSpec::instance().outgoing, swProtocolSet.getValue().version);
if (!validateStatus.isOK()) {
LOGV2_WARNING(20126,
- "remote host has incompatible wire version: {validateStatus}",
- "validateStatus"_attr = validateStatus);
+ "Remote host has incompatible wire version: {error}",
+ "Remote host has incompatible wire version",
+ "error"_attr = validateStatus);
return validateStatus;
}
@@ -426,7 +427,11 @@ Status DBClientConnection::connectSocketOnly(const HostAndPort& serverAddress) {
_lastConnectivityCheck = Date_t::now();
_session->setTimeout(_socketTimeout);
_session->setTags(_tagMask);
- LOGV2_DEBUG(20119, 1, "connected to server {}", ""_attr = toString());
+ LOGV2_DEBUG(20119,
+ 1,
+ "Connected to host {connString}",
+ "Connected to host",
+ "connString"_attr = toString());
return Status::OK();
}
@@ -555,17 +560,19 @@ void DBClientConnection::_checkConnection() {
LOGV2_DEBUG(20120,
logSeverityV1toV2(_logLevel).toInt(),
- "trying reconnect to {}",
- ""_attr = toString());
+ "Trying to reconnect to {connString}",
+ "Trying to reconnnect",
+ "connString"_attr = toString());
string errmsg;
auto connectStatus = connect(_serverAddress, _applicationName);
if (!connectStatus.isOK()) {
_markFailed(kSetFlag);
LOGV2_DEBUG(20121,
logSeverityV1toV2(_logLevel).toInt(),
- "reconnect {} failed {errmsg}",
- ""_attr = toString(),
- "errmsg"_attr = errmsg);
+ "Reconnect attempt to {connString} failed: {reason}",
+ "Reconnect attempt failed",
+ "connString"_attr = toString(),
+ "error"_attr = errmsg);
if (connectStatus == ErrorCodes::IncompatibleCatalogManager) {
uassertStatusOK(connectStatus); // Will always throw
} else {
@@ -573,8 +580,11 @@ void DBClientConnection::_checkConnection() {
}
}
- LOGV2_DEBUG(
- 20122, logSeverityV1toV2(_logLevel).toInt(), "reconnect {} ok", ""_attr = toString());
+ LOGV2_DEBUG(20122,
+ logSeverityV1toV2(_logLevel).toInt(),
+ "Reconnected to {connString}",
+ "Reconnected",
+ "connString"_attr = toString());
if (_internalAuthOnReconnect) {
uassertStatusOK(authenticateInternalUser());
} else {
@@ -584,14 +594,11 @@ void DBClientConnection::_checkConnection() {
} catch (ExceptionFor<ErrorCodes::AuthenticationFailed>& ex) {
LOGV2_DEBUG(20123,
logSeverityV1toV2(_logLevel).toInt(),
- "reconnect: auth failed "
- "{kv_second_auth_getSaslCommandUserDBFieldName}{kv_second_auth_"
- "getSaslCommandUserFieldName} {ex_what}",
- "kv_second_auth_getSaslCommandUserDBFieldName"_attr =
- kv.second[auth::getSaslCommandUserDBFieldName()],
- "kv_second_auth_getSaslCommandUserFieldName"_attr =
- kv.second[auth::getSaslCommandUserFieldName()],
- "ex_what"_attr = ex.what());
+ "Reconnect: auth failed for on {db} using {user}: {reason}",
+ "Reconnect: auth failed",
+ "db"_attr = kv.second[auth::getSaslCommandUserDBFieldName()],
+ "user"_attr = kv.second[auth::getSaslCommandUserFieldName()],
+ "error"_attr = ex.what());
}
}
}
@@ -747,9 +754,10 @@ bool DBClientConnection::call(Message& toSend,
auto sinkStatus = _session->sinkMessage(swm.getValue());
if (!sinkStatus.isOK()) {
LOGV2(20124,
- "DBClientConnection failed to send message to {getServerAddress} - {sinkStatus}",
- "getServerAddress"_attr = getServerAddress(),
- "sinkStatus"_attr = redact(sinkStatus));
+ "DBClientConnection failed to send message to {connString}: {error}",
+ "DBClientConnection failed to send message",
+ "connString"_attr = getServerAddress(),
+ "error"_attr = redact(sinkStatus));
return maybeThrow(sinkStatus);
}
@@ -758,10 +766,10 @@ bool DBClientConnection::call(Message& toSend,
response = std::move(swm.getValue());
} else {
LOGV2(20125,
- "DBClientConnection failed to receive message from {getServerAddress} - "
- "{swm_getStatus}",
- "getServerAddress"_attr = getServerAddress(),
- "swm_getStatus"_attr = redact(swm.getStatus()));
+ "DBClientConnection failed to receive message from {connString}: {error}",
+ "DBClientConnection failed to receive message",
+ "connString"_attr = getServerAddress(),
+ "error"_attr = redact(swm.getStatus()));
return maybeThrow(swm.getStatus());
}
diff --git a/src/mongo/client/dbclient_cursor.cpp b/src/mongo/client/dbclient_cursor.cpp
index 60034c05514..c3296fe49ad 100644
--- a/src/mongo/client/dbclient_cursor.cpp
+++ b/src/mongo/client/dbclient_cursor.cpp
@@ -242,8 +242,9 @@ bool DBClientCursor::initLazyFinish(bool& retry) {
if (!recvStatus.isOK() || reply.empty()) {
if (!recvStatus.isOK())
LOGV2(20129,
- "DBClientCursor::init lazy say() failed: {recvStatus}",
- "recvStatus"_attr = redact(recvStatus));
+ "DBClientCursor::init lazy say() failed: {error}",
+ "DBClientCursor::init lazy say() failed",
+ "error"_attr = redact(recvStatus));
if (reply.empty())
LOGV2(20130, "DBClientCursor::init message from say() was empty");
diff --git a/src/mongo/client/dbclient_rs.cpp b/src/mongo/client/dbclient_rs.cpp
index d7744d2c841..e37232c3750 100644
--- a/src/mongo/client/dbclient_rs.cpp
+++ b/src/mongo/client/dbclient_rs.cpp
@@ -159,9 +159,11 @@ ReplicaSetMonitorPtr DBClientReplicaSet::_getMonitor() {
string DBClientReplicaSet::getServerAddress() const {
if (!_rsm) {
LOGV2_WARNING(20147,
- "Trying to get server address for DBClientReplicaSet, but no "
- "ReplicaSetMonitor exists for {setName}",
- "setName"_attr = _setName);
+ "Trying to get server address for DBClientReplicaSet, "
+ "but no ReplicaSetMonitor exists for {replicaSet}",
+ "Trying to get server address for DBClientReplicaSet, "
+ "but no ReplicaSetMonitor exists",
+ "replicaSet"_attr = _setName);
return str::stream() << _setName << "/";
}
return _rsm->getServerAddress();
@@ -360,9 +362,10 @@ void DBClientReplicaSet::_authConnection(DBClientConnection* conn) {
auto status = conn->authenticateInternalUser();
if (!status.isOK()) {
LOGV2_WARNING(20148,
- "cached auth failed for set {setName}: {status}",
- "setName"_attr = _setName,
- "status"_attr = status);
+ "Cached auth failed for set {replicaSet}: {error}",
+ "Cached auth failed",
+ "replicaSet"_attr = _setName,
+ "error"_attr = status);
}
return;
}
@@ -372,14 +375,11 @@ void DBClientReplicaSet::_authConnection(DBClientConnection* conn) {
conn->auth(i->second);
} catch (const AssertionException&) {
LOGV2_WARNING(20149,
- "cached auth failed for set: {setName} db: "
- "{i_second_saslCommandUserDBFieldName_str} user: "
- "{i_second_saslCommandUserFieldName_str}",
- "setName"_attr = _setName,
- "i_second_saslCommandUserDBFieldName_str"_attr =
- i->second[saslCommandUserDBFieldName].str(),
- "i_second_saslCommandUserFieldName_str"_attr =
- i->second[saslCommandUserFieldName].str());
+ "Cached auth failed for set: {replicaSet} db: {db} user: {user}",
+ "Cached auth failed",
+ "replicaSet"_attr = _setName,
+ "db"_attr = i->second[saslCommandUserDBFieldName].str(),
+ "user"_attr = i->second[saslCommandUserFieldName].str());
}
}
}
@@ -392,10 +392,11 @@ void DBClientReplicaSet::logoutAll(DBClientConnection* conn) {
conn->logout(i->first, response);
} catch (const AssertionException& ex) {
LOGV2_WARNING(20150,
- "Failed to logout: {conn_getServerAddress} on db: {i_first}{causedBy_ex}",
- "conn_getServerAddress"_attr = conn->getServerAddress(),
- "i_first"_attr = i->first,
- "causedBy_ex"_attr = causedBy(redact(ex)));
+ "Failed to logout: {connString} on db: {db} with error: {error}",
+ "Failed to logout",
+ "connString"_attr = conn->getServerAddress(),
+ "db"_attr = i->first,
+ "error"_attr = redact(ex));
}
}
}
@@ -431,8 +432,9 @@ Status DBClientReplicaSet::_runAuthLoop(Authenticate authCb) {
LOGV2_DEBUG(20132,
3,
- "dbclient_rs authentication of {getMonitor_getName}",
- "getMonitor_getName"_attr = _getMonitor()->getName());
+ "dbclient_rs attempting authentication of {replicaSet}",
+ "dbclient_rs attempting authentication",
+ "replicaSet"_attr = _getMonitor()->getName());
// NOTE that we retry MAX_RETRY + 1 times, since we're always primary preferred we don't
// fallback to the primary.
@@ -565,15 +567,15 @@ unique_ptr<DBClientCursor> DBClientReplicaSet::query(const NamespaceStringOrUUID
if (_isSecondaryQuery(ns, query.obj, *readPref)) {
LOGV2_DEBUG(20133,
3,
- "dbclient_rs query using secondary or tagged node selection in "
- "{getMonitor_getName}, read pref is {readPref} (primary : "
- "{master_get_nullptr_master_getServerAddress_not_cached}, lastTagged : "
- "{lastSlaveOkConn_get_nullptr_lastSlaveOkConn_getServerAddress_not_cached})",
- "getMonitor_getName"_attr = _getMonitor()->getName(),
+ "dbclient_rs query using secondary or tagged node selection in {replicaSet}, "
+ "read pref is {readPref} "
+ "(primary : {primary}, lastTagged : {lastTagged})",
+ "dbclient_rs query using secondary or tagged node selection",
+ "replicaSet"_attr = _getMonitor()->getName(),
"readPref"_attr = readPref->toString(),
- "master_get_nullptr_master_getServerAddress_not_cached"_attr =
+ "primary"_attr =
(_master.get() != nullptr ? _master->getServerAddress() : "[not cached]"),
- "lastSlaveOkConn_get_nullptr_lastSlaveOkConn_getServerAddress_not_cached"_attr =
+ "lastTagged"_attr =
(_lastSlaveOkConn.get() != nullptr ? _lastSlaveOkConn->getServerAddress()
: "[not cached]"));
@@ -616,8 +618,9 @@ unique_ptr<DBClientCursor> DBClientReplicaSet::query(const NamespaceStringOrUUID
LOGV2_DEBUG(20134,
3,
- "dbclient_rs query to primary node in {getMonitor_getName}",
- "getMonitor_getName"_attr = _getMonitor()->getName());
+ "dbclient_rs query to primary node in {replicaSet}",
+ "dbclient_rs query to primary node",
+ "replicaSet"_attr = _getMonitor()->getName());
return checkMaster()->query(nsOrUuid,
query,
@@ -638,15 +641,15 @@ BSONObj DBClientReplicaSet::findOne(const string& ns,
if (_isSecondaryQuery(ns, query.obj, *readPref)) {
LOGV2_DEBUG(20135,
3,
- "dbclient_rs findOne using secondary or tagged node selection in "
- "{getMonitor_getName}, read pref is {readPref} (primary : "
- "{master_get_nullptr_master_getServerAddress_not_cached}, lastTagged : "
- "{lastSlaveOkConn_get_nullptr_lastSlaveOkConn_getServerAddress_not_cached})",
- "getMonitor_getName"_attr = _getMonitor()->getName(),
+ "dbclient_rs findOne using secondary or tagged node selection in {replicaSet}, "
+ "read pref is {readPref} "
+ "(primary : {primary}, lastTagged : {lastTagged})",
+ "dbclient_rs findOne using secondary or tagged node selection",
+ "replicaSet"_attr = _getMonitor()->getName(),
"readPref"_attr = readPref->toString(),
- "master_get_nullptr_master_getServerAddress_not_cached"_attr =
+ "primary"_attr =
(_master.get() != nullptr ? _master->getServerAddress() : "[not cached]"),
- "lastSlaveOkConn_get_nullptr_lastSlaveOkConn_getServerAddress_not_cached"_attr =
+ "secondaryHostNamme"_attr =
(_lastSlaveOkConn.get() != nullptr ? _lastSlaveOkConn->getServerAddress()
: "[not cached]"));
@@ -680,8 +683,9 @@ BSONObj DBClientReplicaSet::findOne(const string& ns,
LOGV2_DEBUG(20136,
3,
- "dbclient_rs findOne to primary node in {getMonitor_getName}",
- "getMonitor_getName"_attr = _getMonitor()->getName());
+ "dbclient_rs findOne to primary node in {replicaSet}",
+ "dbclient_rs findOne to primary node",
+ "replicaSet"_attr = _getMonitor()->getName());
return checkMaster()->findOne(ns, query, fieldsToReturn, queryOptions, readConcernObj);
}
@@ -743,8 +747,9 @@ DBClientConnection* DBClientReplicaSet::selectNodeUsingTags(
if (checkLastHost(readPref.get())) {
LOGV2_DEBUG(20137,
3,
- "dbclient_rs selecting compatible last used node {lastSlaveOkHost}",
- "lastSlaveOkHost"_attr = _lastSlaveOkHost);
+ "dbclient_rs selecting compatible last used node {lastTagged}",
+ "dbclient_rs selecting compatible last used node",
+ "lastTagged"_attr = _lastSlaveOkHost);
return _lastSlaveOkConn.get();
}
@@ -755,9 +760,9 @@ DBClientConnection* DBClientReplicaSet::selectNodeUsingTags(
if (!selectedNodeStatus.isOK()) {
LOGV2_DEBUG(20138,
3,
- "dbclient_rs no compatible node found{causedBy_selectedNodeStatus_getStatus}",
- "causedBy_selectedNodeStatus_getStatus"_attr =
- causedBy(redact(selectedNodeStatus.getStatus())));
+ "dbclient_rs no compatible node found: {error}",
+ "dbclient_rs no compatible node found",
+ "error"_attr = redact(selectedNodeStatus.getStatus()));
return nullptr;
}
@@ -779,8 +784,9 @@ DBClientConnection* DBClientReplicaSet::selectNodeUsingTags(
LOGV2_DEBUG(20139,
3,
- "dbclient_rs selecting primary node {selectedNode}",
- "selectedNode"_attr = selectedNode);
+ "dbclient_rs selecting primary node {connString}",
+ "dbclient_rs selecting primary node",
+ "connString"_attr = selectedNode);
_lastSlaveOkConn = _master;
@@ -818,8 +824,9 @@ DBClientConnection* DBClientReplicaSet::selectNodeUsingTags(
LOGV2_DEBUG(20140,
3,
- "dbclient_rs selecting node {lastSlaveOkHost}",
- "lastSlaveOkHost"_attr = _lastSlaveOkHost);
+ "dbclient_rs selecting node {connString}",
+ "dbclient_rs selecting node",
+ "connString"_attr = _lastSlaveOkHost);
return _lastSlaveOkConn.get();
}
@@ -837,20 +844,19 @@ void DBClientReplicaSet::say(Message& toSend, bool isRetry, string* actualServer
shared_ptr<ReadPreferenceSetting> readPref(_extractReadPref(qm.query, qm.queryOptions));
if (_isSecondaryQuery(qm.ns, qm.query, *readPref)) {
- LOGV2_DEBUG(
- 20141,
- 3,
- "dbclient_rs say using secondary or tagged node selection in {getMonitor_getName}, "
- "read pref is {readPref} (primary : "
- "{master_get_nullptr_master_getServerAddress_not_cached}, lastTagged : "
- "{lastSlaveOkConn_get_nullptr_lastSlaveOkConn_getServerAddress_not_cached})",
- "getMonitor_getName"_attr = _getMonitor()->getName(),
- "readPref"_attr = readPref->toString(),
- "master_get_nullptr_master_getServerAddress_not_cached"_attr =
- (_master.get() != nullptr ? _master->getServerAddress() : "[not cached]"),
- "lastSlaveOkConn_get_nullptr_lastSlaveOkConn_getServerAddress_not_cached"_attr =
- (_lastSlaveOkConn.get() != nullptr ? _lastSlaveOkConn->getServerAddress()
- : "[not cached]"));
+ LOGV2_DEBUG(20141,
+ 3,
+ "dbclient_rs say using secondary or tagged node selection in {replicaSet}, "
+ "read pref is {readPref} "
+ "(primary : {primary}, lastTagged : {lastTagged})",
+ "dbclient_rs say using secondary or tagged node selection",
+ "replicaSet"_attr = _getMonitor()->getName(),
+ "readPref"_attr = readPref->toString(),
+ "primary"_attr = (_master.get() != nullptr ? _master->getServerAddress()
+ : "[not cached]"),
+ "lastTagged"_attr = (_lastSlaveOkConn.get() != nullptr
+ ? _lastSlaveOkConn->getServerAddress()
+ : "[not cached]"));
string lastNodeErrMsg;
@@ -897,8 +903,9 @@ void DBClientReplicaSet::say(Message& toSend, bool isRetry, string* actualServer
LOGV2_DEBUG(20142,
3,
- "dbclient_rs say to primary node in {getMonitor_getName}",
- "getMonitor_getName"_attr = _getMonitor()->getName());
+ "dbclient_rs say to primary node in {replicaSet}",
+ "dbclient_rs say to primary node",
+ "replicaSet"_attr = _getMonitor()->getName());
DBClientConnection* master = checkMaster();
if (actualServer)
@@ -921,9 +928,10 @@ Status DBClientReplicaSet::recv(Message& m, int lastRequestId) {
return _lazyState._lastClient->recv(m, lastRequestId);
} catch (DBException& e) {
LOGV2(20143,
- "could not receive data from {lazyState_lastClient}{causedBy_e}",
- "lazyState_lastClient"_attr = _lazyState._lastClient->toString(),
- "causedBy_e"_attr = causedBy(redact(e)));
+ "Could not receive data from {connString}: {error}",
+ "Could not receive data",
+ "connString"_attr = _lazyState._lastClient->toString(),
+ "error"_attr = redact(e));
return e.toStatus();
}
}
@@ -974,10 +982,11 @@ void DBClientReplicaSet::checkResponse(const std::vector<BSONObj>& batch,
isntMaster();
} else {
LOGV2_WARNING(20151,
- "passed {dataObj} but last rs client {lazyState_lastClient} is not "
- "master or secondary",
+ "Data {dataObj} is invalid because last rs client {connString} is "
+ "not master or secondary",
+ "Data is invalid because last rs client is not master or secondary",
"dataObj"_attr = redact(dataObj),
- "lazyState_lastClient"_attr = _lazyState._lastClient->toString());
+ "connString"_attr = _lazyState._lastClient->toString());
}
if (_lazyState._retries < static_cast<int>(MAX_RETRY)) {
@@ -985,8 +994,9 @@ void DBClientReplicaSet::checkResponse(const std::vector<BSONObj>& batch,
*retry = true;
} else {
LOGV2(20144,
- "too many retries ({lazyState_retries}), could not get data from replica set",
- "lazyState_retries"_attr = _lazyState._retries);
+ "Too many retries ({numRetries}), could not get data from replica set",
+ "Too many retries, could not get data from replica set",
+ "numRetries"_attr = _lazyState._retries);
}
}
} else if (_lazyState._lastOp == dbQuery) {
@@ -1085,15 +1095,15 @@ bool DBClientReplicaSet::call(Message& toSend,
LOGV2_DEBUG(
20145,
3,
- "dbclient_rs call using secondary or tagged node selection in "
- "{getMonitor_getName}, read pref is {readPref} (primary : "
- "{master_get_nullptr_master_getServerAddress_not_cached}, lastTagged : "
- "{lastSlaveOkConn_get_nullptr_lastSlaveOkConn_getServerAddress_not_cached})",
- "getMonitor_getName"_attr = _getMonitor()->getName(),
+ "dbclient_rs call using secondary or tagged node selection in {replicaSet}, "
+ "read pref is {readPref} "
+ "(primary : {primary}, lastTagged : {lastTagged})",
+ "dbclient_rs call using secondary or tagged node selection",
+ "replicaSet"_attr = _getMonitor()->getName(),
"readPref"_attr = readPref->toString(),
- "master_get_nullptr_master_getServerAddress_not_cached"_attr =
+ "primary"_attr =
(_master.get() != nullptr ? _master->getServerAddress() : "[not cached]"),
- "lastSlaveOkConn_get_nullptr_lastSlaveOkConn_getServerAddress_not_cached"_attr =
+ "lastTagged"_attr =
(_lastSlaveOkConn.get() != nullptr ? _lastSlaveOkConn->getServerAddress()
: "[not cached]"));
@@ -1127,8 +1137,9 @@ bool DBClientReplicaSet::call(Message& toSend,
LOGV2_DEBUG(20146,
3,
- "dbclient_rs call to primary node in {getMonitor_getName}",
- "getMonitor_getName"_attr = _getMonitor()->getName());
+ "dbclient_rs call to primary node in {replicaSet}",
+ "dbclient_rs call to primary node",
+ "replicaSet"_attr = _getMonitor()->getName());
DBClientConnection* m = checkMaster();
if (actualServer)
diff --git a/src/mongo/client/mongo_uri_test.cpp b/src/mongo/client/mongo_uri_test.cpp
index bada38c3df5..d53ca990602 100644
--- a/src/mongo/client/mongo_uri_test.cpp
+++ b/src/mongo/client/mongo_uri_test.cpp
@@ -87,16 +87,15 @@ void compareOptions(size_t lineNumber,
for (std::size_t i = 0; i < std::min(options.size(), expectedOptions.size()); ++i) {
if (options[i] != expectedOptions[i]) {
- LOGV2(
- 20152,
- "Option: \"tolower({options_i_first_original})={options_i_second}\" doesn't equal: "
- "\"tolower({expectedOptions_i_first_original})={expectedOptions_i_second}\" data "
- "on line: {lineNumber}",
- "options_i_first_original"_attr = options[i].first.original(),
- "options_i_second"_attr = options[i].second,
- "expectedOptions_i_first_original"_attr = expectedOptions[i].first.original(),
- "expectedOptions_i_second"_attr = expectedOptions[i].second,
- "lineNumber"_attr = lineNumber);
+ LOGV2(20152,
+ "Option: \"tolower({key})={value}\" doesn't equal: "
+ "\"tolower({expectedKey})={expectedValue}\" data on line: {lineNumber}",
+ "Option key-value pair doesn't equal expected pair",
+ "key"_attr = options[i].first.original(),
+ "value"_attr = options[i].second,
+ "expectedKey"_attr = expectedOptions[i].first.original(),
+ "expectedValue"_attr = expectedOptions[i].second,
+ "lineNumber"_attr = lineNumber);
std::cerr << "Failing URI: \"" << uri << "\""
<< " data on line: " << lineNumber << std::endl;
ASSERT(false);
@@ -602,7 +601,7 @@ std::string returnStringFromElementOrNull(BSONElement element) {
// Helper method to take a valid test case, parse() it, and assure the output is correct
void testValidURIFormat(URITestCase testCase) {
- LOGV2(20153, "Testing URI: {testCase_URI}", "testCase_URI"_attr = testCase.URI);
+ LOGV2(20153, "Testing URI: {mongoUri}", "Testing URI", "mongoUri"_attr = testCase.URI);
std::string errMsg;
const auto cs_status = MongoURI::parse(testCase.URI);
ASSERT_OK(cs_status);
@@ -630,7 +629,7 @@ TEST(MongoURI, InvalidURIs) {
for (size_t i = 0; i != numCases; ++i) {
const InvalidURITestCase testCase = invalidCases[i];
- LOGV2(20154, "Testing URI: {testCase_URI}", "testCase_URI"_attr = testCase.URI);
+ LOGV2(20154, "Testing URI: {mongoUri}", "Testing URI", "mongoUri"_attr = testCase.URI);
auto cs_status = MongoURI::parse(testCase.URI);
ASSERT_NOT_OK(cs_status);
if (testCase.code) {
@@ -712,7 +711,10 @@ TEST(MongoURI, specTests) {
if (!valid) {
// This uri string is invalid --> parse the uri and ensure it fails
const InvalidURITestCase testCase = InvalidURITestCase{uri};
- LOGV2(20155, "Testing URI: {testCase_URI}", "testCase_URI"_attr = testCase.URI);
+ LOGV2(20155,
+ "Testing URI: {mongoUri}",
+ "Testing URI",
+ "mongoUri"_attr = testCase.URI);
auto cs_status = MongoURI::parse(testCase.URI);
ASSERT_NOT_OK(cs_status);
} else {
diff --git a/src/mongo/client/replica_set_monitor_manager.cpp b/src/mongo/client/replica_set_monitor_manager.cpp
index 2391037fba9..828fa9b6c8f 100644
--- a/src/mongo/client/replica_set_monitor_manager.cpp
+++ b/src/mongo/client/replica_set_monitor_manager.cpp
@@ -100,7 +100,7 @@ Status ReplicaSetMonitorManagerNetworkConnectionHook::validateHost(
LOGV2_ERROR(4712101,
"An error occurred publishing a ReplicaSetMonitor handshake event",
"error"_attr = exception.toStatus(),
- "setName"_attr = monitor->getName(),
+ "replicaSet"_attr = monitor->getName(),
"handshakeStatus"_attr = isMasterReply.status);
return exception.toStatus();
}
@@ -235,8 +235,9 @@ void ReplicaSetMonitorManager::removeMonitor(StringData setName) {
monitor->drop();
}
_monitors.erase(it);
- LOGV2(
- 20187, "Removed ReplicaSetMonitor for replica set {setName}", "setName"_attr = setName);
+ LOGV2(20187,
+ "Removed ReplicaSetMonitor for replica set {replicaSet}",
+ "replicaSet"_attr = setName);
}
}
diff --git a/src/mongo/client/sdam/topology_manager.cpp b/src/mongo/client/sdam/topology_manager.cpp
index 74f8de94d27..389b5a8b1a9 100644
--- a/src/mongo/client/sdam/topology_manager.cpp
+++ b/src/mongo/client/sdam/topology_manager.cpp
@@ -147,9 +147,9 @@ void TopologyManager::onServerRTTUpdated(ServerAddress hostAndPort, IsMasterRTT
}
// otherwise, the server was removed from the topology. Nothing to do.
LOGV2(4333201,
- "Not updating RTT. Server {server} does not exist in {setName}",
+ "Not updating RTT. Server {server} does not exist in {replicaSet}",
"host"_attr = hostAndPort,
- "setName"_attr = getTopologyDescription()->getSetName());
+ "replicaSet"_attr = getTopologyDescription()->getSetName());
}
void TopologyManager::_publishTopologyDescriptionChanged(
diff --git a/src/mongo/client/server_is_master_monitor.cpp b/src/mongo/client/server_is_master_monitor.cpp
index eb211c3af97..eab24eddb7e 100644
--- a/src/mongo/client/server_is_master_monitor.cpp
+++ b/src/mongo/client/server_is_master_monitor.cpp
@@ -78,9 +78,9 @@ SingleServerIsMasterMonitor::SingleServerIsMasterMonitor(
_setUri(setUri) {
LOGV2_DEBUG(4333217,
kLogLevel + 1,
- "RSM {setName} monitoring {host}",
+ "RSM {replicaSet} monitoring {host}",
"host"_attr = host,
- "setName"_attr = _setUri.getSetName());
+ "replicaSet"_attr = _setUri.getSetName());
}
void SingleServerIsMasterMonitor::init() {
@@ -100,11 +100,12 @@ void SingleServerIsMasterMonitor::requestImmediateCheck() {
if (!_isExpedited) {
// save some log lines.
- LOGV2_DEBUG(4333227,
- kLogLevel,
- "RSM {setName} monitoring {host} in expedited mode until we detect a primary.",
- "host"_attr = _host,
- "setName"_attr = _setUri.getSetName());
+ LOGV2_DEBUG(
+ 4333227,
+ kLogLevel,
+ "RSM {replicaSet} monitoring {host} in expedited mode until we detect a primary.",
+ "host"_attr = _host,
+ "replicaSet"_attr = _setUri.getSetName());
// This will change the _currentRefreshPeriod to the shorter expedited duration.
_isExpedited = true;
@@ -116,9 +117,9 @@ void SingleServerIsMasterMonitor::requestImmediateCheck() {
if (_isMasterOutstanding) {
LOGV2_DEBUG(4333216,
kLogLevel + 2,
- "RSM {setName} immediate isMaster check requested, but there "
+ "RSM {replicaSet} immediate isMaster check requested, but there "
"is already an outstanding request.",
- "setName"_attr = _setUri.getSetName());
+ "replicaSet"_attr = _setUri.getSetName());
return;
}
@@ -164,7 +165,7 @@ void SingleServerIsMasterMonitor::_rescheduleNextIsMaster(WithLock lock, Millise
LOGV2_DEBUG(4333218,
kLogLevel,
"Rescheduling the next replica set monitoring request",
- "setName"_attr = _setUri.getSetName(),
+ "replicaSet"_attr = _setUri.getSetName(),
"host"_attr = _host,
"duration"_attr = delay);
_cancelOutstandingRequest(lock);
@@ -249,9 +250,9 @@ SingleServerIsMasterMonitor::_scheduleStreamableIsMaster() {
self->_isMasterOutstanding = false;
LOGV2_DEBUG(4495400,
kLogLevel,
- "RSM {setName} not processing response: {status}",
+ "RSM {replicaSet} not processing response: {status}",
"status"_attr = result.response.status,
- "setName"_attr = self->_setUri.getSetName());
+ "replicaSet"_attr = self->_setUri.getSetName());
return;
}
@@ -306,9 +307,9 @@ StatusWith<TaskExecutor::CallbackHandle> SingleServerIsMasterMonitor::_scheduleS
if (self->_isShutdown) {
LOGV2_DEBUG(4333219,
kLogLevel,
- "RSM {setName} not processing response: {status}",
+ "RSM {replicaSet} not processing response: {status}",
"status"_attr = result.response.status,
- "setName"_attr = self->_setUri.getSetName());
+ "replicaSet"_attr = self->_setUri.getSetName());
return;
}
@@ -351,9 +352,9 @@ void SingleServerIsMasterMonitor::shutdown() {
LOGV2_DEBUG(4333220,
kLogLevel + 1,
- "RSM {setName} Closing host {host}",
+ "RSM {replicaSet} Closing host {host}",
"host"_attr = _host,
- "setName"_attr = _setUri.getSetName());
+ "replicaSet"_attr = _setUri.getSetName());
_cancelOutstandingRequest(lock);
@@ -361,9 +362,9 @@ void SingleServerIsMasterMonitor::shutdown() {
LOGV2_DEBUG(4333229,
kLogLevel + 1,
- "RSM {setName} Done Closing host {host}",
+ "RSM {replicaSet} Done Closing host {host}",
"host"_attr = _host,
- "setName"_attr = _setUri.getSetName());
+ "replicaSet"_attr = _setUri.getSetName());
}
void SingleServerIsMasterMonitor::_cancelOutstandingRequest(WithLock) {
@@ -381,9 +382,9 @@ void SingleServerIsMasterMonitor::_cancelOutstandingRequest(WithLock) {
void SingleServerIsMasterMonitor::_onIsMasterSuccess(const BSONObj bson) {
LOGV2_DEBUG(4333221,
kLogLevel + 1,
- "RSM {setName} received successful isMaster for server {host}: {bson}",
+ "RSM {replicaSet} received successful isMaster for server {host}: {bson}",
"host"_attr = _host,
- "setName"_attr = _setUri.getSetName(),
+ "replicaSet"_attr = _setUri.getSetName(),
"bson"_attr = bson.toString());
_eventListener->onServerHeartbeatSucceededEvent(_host, bson);
@@ -392,10 +393,10 @@ void SingleServerIsMasterMonitor::_onIsMasterSuccess(const BSONObj bson) {
void SingleServerIsMasterMonitor::_onIsMasterFailure(const Status& status, const BSONObj bson) {
LOGV2_DEBUG(4333222,
kLogLevel,
- "RSM {setName} received failed isMaster for server {host}: {status}: {bson}",
+ "RSM {replicaSet} received failed isMaster for server {host}: {status}: {bson}",
"host"_attr = _host,
"status"_attr = status.toString(),
- "setName"_attr = _setUri.getSetName(),
+ "replicaSet"_attr = _setUri.getSetName(),
"bson"_attr = bson.toString());
_eventListener->onServerHeartbeatFailureEvent(status, _host, bson);
@@ -443,8 +444,8 @@ ServerIsMasterMonitor::ServerIsMasterMonitor(
_setUri(setUri) {
LOGV2_DEBUG(4333223,
kLogLevel,
- "RSM {setName} monitoring {size} members.",
- "setName"_attr = _setUri.getSetName(),
+ "RSM {replicaSet} monitoring {size} members.",
+ "replicaSet"_attr = _setUri.getSetName(),
"size"_attr = initialTopologyDescription->getServers().size());
onTopologyDescriptionChangedEvent(
initialTopologyDescription->getId(), nullptr, initialTopologyDescription);
@@ -486,8 +487,8 @@ void ServerIsMasterMonitor::onTopologyDescriptionChangedEvent(
singleMonitor->shutdown();
LOGV2_DEBUG(4333225,
kLogLevel,
- "RSM {setName} host {addr} was removed from the topology.",
- "setName"_attr = _setUri.getSetName(),
+ "RSM {replicaSet} host {addr} was removed from the topology.",
+ "replicaSet"_attr = _setUri.getSetName(),
"addr"_attr = serverAddress);
it = _singleMonitors.erase(it, ++it);
} else {
@@ -503,8 +504,8 @@ void ServerIsMasterMonitor::onTopologyDescriptionChangedEvent(
if (isMissing) {
LOGV2_DEBUG(4333226,
kLogLevel,
- "RSM {setName} {addr} was added to the topology.",
- "setName"_attr = _setUri.getSetName(),
+ "RSM {replicaSet} {addr} was added to the topology.",
+ "replicaSet"_attr = _setUri.getSetName(),
"addr"_attr = serverAddress);
_singleMonitors[serverAddress] = std::make_shared<SingleServerIsMasterMonitor>(
_setUri,
diff --git a/src/mongo/client/streamable_replica_set_monitor.cpp b/src/mongo/client/streamable_replica_set_monitor.cpp
index fdbf78aaa44..ed2c75090d5 100644
--- a/src/mongo/client/streamable_replica_set_monitor.cpp
+++ b/src/mongo/client/streamable_replica_set_monitor.cpp
@@ -209,13 +209,13 @@ void StreamableReplicaSetMonitor::drop() {
lock, Status{ErrorCodes::ShutdownInProgress, "the ReplicaSetMonitor is shutting down"});
}
- LOGV2(4333209, "Closing Replica Set Monitor {setName}", "setName"_attr = getName());
+ LOGV2(4333209, "Closing Replica Set Monitor {replicaSet}", "replicaSet"_attr = getName());
_queryProcessor->shutdown();
_pingMonitor->shutdown();
_isMasterMonitor->shutdown();
ReplicaSetMonitorManager::get()->getNotifier().onDroppedSet(getName());
- LOGV2(4333210, "Done closing Replica Set Monitor {setName}", "setName"_attr = getName());
+ LOGV2(4333210, "Done closing Replica Set Monitor {replicaSet}", "replicaSet"_attr = getName());
}
SemiFuture<HostAndPort> StreamableReplicaSetMonitor::getHostOrRefresh(
@@ -257,8 +257,8 @@ SemiFuture<std::vector<HostAndPort>> StreamableReplicaSetMonitor::getHostsOrRefr
_isMasterMonitor->requestImmediateCheck();
LOGV2_DEBUG(4333212,
kLowerLogLevel,
- "RSM {setName} start async getHosts with {readPref}",
- "setName"_attr = getName(),
+ "RSM {replicaSet} start async getHosts with {readPref}",
+ "replicaSet"_attr = getName(),
"readPref"_attr = readPrefToStringWithMinOpTime(criteria));
// fail fast on timeout
@@ -310,16 +310,16 @@ SemiFuture<std::vector<HostAndPort>> StreamableReplicaSetMonitor::_enqueueOutsta
query->promise.setError(errorStatus);
query->done = true;
LOGV2_INFO(4333208,
- "RSM {setName} host selection timeout: {status}",
- "setName"_attr = getName(),
+ "RSM {replicaSet} host selection timeout: {status}",
+ "replicaSet"_attr = getName(),
"error"_attr = errorStatus.toString());
};
auto swDeadlineHandle = _executor->scheduleWorkAt(query->deadline, deadlineCb);
if (!swDeadlineHandle.isOK()) {
LOGV2_INFO(4333207,
- "RSM {setName} error scheduling deadline handler: {status}",
- "setName"_attr = getName(),
+ "RSM {replicaSet} error scheduling deadline handler: {status}",
+ "replicaSet"_attr = getName(),
"error"_attr = swDeadlineHandle.getStatus());
return SemiFuture<HostAndPortList>::makeReady(swDeadlineHandle.getStatus());
}
@@ -579,8 +579,8 @@ void StreamableReplicaSetMonitor::onTopologyDescriptionChangedEvent(
// Notify external components if there are membership changes in the topology.
if (_hasMembershipChange(previousDescription, newDescription)) {
LOGV2(4333213,
- "RSM {setName} Topology Change: {topologyDescription}",
- "setName"_attr = getName(),
+ "RSM {replicaSet} Topology Change: {topologyDescription}",
+ "replicaSet"_attr = getName(),
"topologyDescription"_attr = newDescription->toString());
auto maybePrimary = newDescription->getPrimary();
@@ -604,7 +604,7 @@ void StreamableReplicaSetMonitor::onTopologyDescriptionChangedEvent(
kLowerLogLevel,
"Skip publishing unconfirmed replica set members since there are "
"no primaries or secondaries in the new topology",
- "setName"_attr = getName());
+ "replicaSet"_attr = getName());
return;
}
@@ -647,7 +647,7 @@ void StreamableReplicaSetMonitor::onServerPingSucceededEvent(sdam::IsMasterRTT d
kLowerLogLevel,
"ReplicaSetMonitor ping success",
"host"_attr = hostAndPort,
- "setName"_attr = getName(),
+ "replicaSet"_attr = getName(),
"duration"_attr = durationMS);
_topologyManager->onServerRTTUpdated(hostAndPort, durationMS);
}
@@ -726,8 +726,8 @@ void StreamableReplicaSetMonitor::_processOutstanding(
const auto latency = _executor->now() - query->start;
LOGV2_DEBUG(433214,
kLowerLogLevel,
- "RSM {setName} finished async getHosts: {readPref} ({latency})",
- "setName"_attr = getName(),
+ "RSM {replicaSet} finished async getHosts: {readPref} ({latency})",
+ "replicaSet"_attr = getName(),
"readPref"_attr = readPrefToStringWithMinOpTime(query->criteria),
"latency"_attr = latency.toString());
shouldRemove = true;
diff --git a/src/mongo/client/streamable_replica_set_monitor_error_handler.cpp b/src/mongo/client/streamable_replica_set_monitor_error_handler.cpp
index be19b1e64ec..7f04cc2ee10 100644
--- a/src/mongo/client/streamable_replica_set_monitor_error_handler.cpp
+++ b/src/mongo/client/streamable_replica_set_monitor_error_handler.cpp
@@ -46,7 +46,7 @@ SdamErrorHandler::ErrorActions SdamErrorHandler::computeErrorActions(const HostA
LOGV2(4712102,
"Host failed in replica set",
- "setName"_attr = _setName,
+ "replicaSet"_attr = _setName,
"host"_attr = host,
"error"_attr = status,
"action"_attr = result);
diff --git a/src/mongo/client/streamable_replica_set_monitor_query_processor.cpp b/src/mongo/client/streamable_replica_set_monitor_query_processor.cpp
index bab19ae76fd..4e7a2034396 100644
--- a/src/mongo/client/streamable_replica_set_monitor_query_processor.cpp
+++ b/src/mongo/client/streamable_replica_set_monitor_query_processor.cpp
@@ -57,8 +57,8 @@ void StreamableReplicaSetMonitor::StreamableReplicaSetMonitorQueryProcessor::
if (!replicaSetMonitor) {
LOGV2_DEBUG(4333215,
kLogLevel,
- "could not find rsm instance {setName} for query processing.",
- "setName"_attr = *setName);
+ "could not find rsm instance {replicaSet} for query processing.",
+ "replicaSet"_attr = *setName);
return;
}
replicaSetMonitor->_processOutstanding(newDescription);
diff --git a/src/mongo/executor/connection_pool.cpp b/src/mongo/executor/connection_pool.cpp
index 1594e57bdef..3c4618555ca 100644
--- a/src/mongo/executor/connection_pool.cpp
+++ b/src/mongo/executor/connection_pool.cpp
@@ -120,9 +120,10 @@ void ConnectionPool::ControllerInterface::init(ConnectionPool* pool) {
LOGV2_DEBUG(22558,
2,
- "Controller for {pool_name} is {name}",
- "pool_name"_attr = pool->_name,
- "name"_attr = name());
+ "Initializing connection pool controller for {pool} as a {controller}",
+ "Initializing connection pool controller",
+ "pool"_attr = pool->_name,
+ "controller"_attr = name());
_pool = pool;
}
@@ -218,7 +219,7 @@ auto ConnectionPool::makeLimitController() noexcept -> std::shared_ptr<Controlle
*/
class ConnectionPool::SpecificPool final
: public std::enable_shared_from_this<ConnectionPool::SpecificPool> {
- static constexpr int kDiagnosticLogLevel = 4;
+ static constexpr auto kDiagnosticLogLevel = 4;
public:
/**
@@ -643,11 +644,11 @@ Future<ConnectionPool::ConnectionHandle> ConnectionPool::SpecificPool::getConnec
auto conn = tryGetConnection();
if (conn) {
- LOGV2_DEBUG(
- 22559,
- logSeverityV1toV2(kDiagnosticLogLevel).toInt(),
- "Requesting new connection to {hostAndPort}--using existing idle connection",
- "hostAndPort"_attr = _hostAndPort);
+ LOGV2_DEBUG(22559,
+ kDiagnosticLogLevel,
+ "Using existing idle connection to {hostAndPort}",
+ "Using existing idle connection",
+ "hostAndPort"_attr = _hostAndPort);
return Future<ConnectionPool::ConnectionHandle>::makeReady(std::move(conn));
}
}
@@ -657,8 +658,9 @@ Future<ConnectionPool::ConnectionHandle> ConnectionPool::SpecificPool::getConnec
timeout = pendingTimeout;
}
LOGV2_DEBUG(22560,
- logSeverityV1toV2(kDiagnosticLogLevel).toInt(),
+ kDiagnosticLogLevel,
"Requesting new connection to {hostAndPort} with timeout {timeout}",
+ "Requesting new connection",
"hostAndPort"_attr = _hostAndPort,
"timeout"_attr = timeout);
@@ -693,8 +695,9 @@ ConnectionPool::ConnectionHandle ConnectionPool::SpecificPool::tryGetConnection(
if (!conn->isHealthy()) {
LOGV2(22561,
- "dropping unhealthy pooled connection to {conn_getHostAndPort}",
- "conn_getHostAndPort"_attr = conn->getHostAndPort());
+ "Dropping unhealthy pooled connection to {hostAndPort}",
+ "Dropping unhealthy pooled connection",
+ "hostAndPort"_attr = conn->getHostAndPort());
// Drop the bad connection via scoped destruction and retry
continue;
@@ -728,22 +731,25 @@ void ConnectionPool::SpecificPool::finishRefresh(ConnectionInterface* connPtr, S
// to our internal one.
if (status.code() == ErrorCodes::NetworkInterfaceExceededTimeLimit) {
LOGV2_DEBUG(22562,
- logSeverityV1toV2(kDiagnosticLogLevel).toInt(),
- "Pending connection to host {hostAndPort} did not complete within the "
- "connection timeout, retrying with a new connection;{openConnections} "
- "connections to that host remain open",
+ kDiagnosticLogLevel,
+ "Pending connection to host {hostAndPort} did not complete within the timeout, "
+ "retrying with a new connection; "
+ "{numOpenConns} connections to that host remain open",
+ "Pending connection did not complete within the timeout, "
+ "retrying with a new connection",
"hostAndPort"_attr = _hostAndPort,
- "openConnections"_attr = openConnections());
+ "numOpenConns"_attr = openConnections());
return;
}
// Pass a failure on through
if (!status.isOK()) {
LOGV2_DEBUG(22563,
- logSeverityV1toV2(kDiagnosticLogLevel).toInt(),
- "Connection failed to {hostAndPort} due to {status}",
+ kDiagnosticLogLevel,
+ "Connection failed to {hostAndPort} due to {error}",
+ "Connection failed",
"hostAndPort"_attr = _hostAndPort,
- "status"_attr = redact(status));
+ "error"_attr = redact(status));
processFailure(status);
return;
}
@@ -751,15 +757,17 @@ void ConnectionPool::SpecificPool::finishRefresh(ConnectionInterface* connPtr, S
// If the host and port were dropped, let this lapse and spawn new connections
if (!conn || conn->getGeneration() != _generation) {
LOGV2_DEBUG(22564,
- logSeverityV1toV2(kDiagnosticLogLevel).toInt(),
+ kDiagnosticLogLevel,
"Dropping late refreshed connection to {hostAndPort}",
+ "Dropping late refreshed connection",
"hostAndPort"_attr = _hostAndPort);
return;
}
LOGV2_DEBUG(22565,
- logSeverityV1toV2(kDiagnosticLogLevel).toInt(),
+ kDiagnosticLogLevel,
"Finishing connection refresh for {hostAndPort}",
+ "Finishing connection refresh",
"hostAndPort"_attr = _hostAndPort);
// If the connection refreshed successfully, throw it back in the ready pool
@@ -787,11 +795,12 @@ void ConnectionPool::SpecificPool::returnConnection(ConnectionInterface* connPtr
if (auto status = conn->getStatus(); !status.isOK()) {
// TODO: alert via some callback if the host is bad
LOGV2(22566,
- "Ending connection to host {hostAndPort} due to bad connection status: {status}; "
- "{openConnections} connections to that host remain open",
+ "Ending connection to host {hostAndPort} due to bad connection status: {error}; "
+ "{numOpenConns} connections to that host remain open",
+ "Ending connection due to bad connection status",
"hostAndPort"_attr = _hostAndPort,
- "status"_attr = redact(status),
- "openConnections"_attr = openConnections());
+ "error"_attr = redact(status),
+ "numOpenConns"_attr = openConnections());
return;
}
@@ -805,17 +814,19 @@ void ConnectionPool::SpecificPool::returnConnection(ConnectionInterface* connPtr
// If we already have minConnections, just let the connection lapse
LOGV2(22567,
"Ending idle connection to host {hostAndPort} because the pool meets "
- "constraints; {openConnections} connections to that host remain open",
+ "constraints; {numOpenConns} connections to that host remain open",
+ "Ending idle connection because the pool meets constraints",
"hostAndPort"_attr = _hostAndPort,
- "openConnections"_attr = openConnections());
+ "numOpenConns"_attr = openConnections());
return;
}
_processingPool[connPtr] = std::move(conn);
LOGV2_DEBUG(22568,
- logSeverityV1toV2(kDiagnosticLogLevel).toInt(),
+ kDiagnosticLogLevel,
"Refreshing connection to {hostAndPort}",
+ "Refreshing connection",
"hostAndPort"_attr = _hostAndPort);
connPtr->refresh(_parent->_controller->pendingTimeout(),
guardCallback([this](auto conn, auto status) {
@@ -827,8 +838,9 @@ void ConnectionPool::SpecificPool::returnConnection(ConnectionInterface* connPtr
// If it's fine as it is, just put it in the ready queue
LOGV2_DEBUG(22569,
- logSeverityV1toV2(kDiagnosticLogLevel).toInt(),
+ kDiagnosticLogLevel,
"Returning ready connection to {hostAndPort}",
+ "Returning ready connection",
"hostAndPort"_attr = _hostAndPort);
addToReady(std::move(conn));
@@ -847,8 +859,9 @@ void ConnectionPool::SpecificPool::addToReady(OwnedConnection conn) {
// returnConnection
auto returnConnectionFunc = guardCallback([this, connPtr]() {
LOGV2_DEBUG(22570,
- logSeverityV1toV2(kDiagnosticLogLevel).toInt(),
+ kDiagnosticLogLevel,
"Triggered refresh timeout for {hostAndPort}",
+ "Triggered refresh timeout",
"hostAndPort"_attr = _hostAndPort);
auto conn = takeFromPool(_readyPool, connPtr);
@@ -876,8 +889,11 @@ void ConnectionPool::SpecificPool::triggerShutdown(const Status& status) {
return;
}
- LOGV2_DEBUG(
- 22571, 2, "Delisting connection pool for {hostAndPort}", "hostAndPort"_attr = _hostAndPort);
+ LOGV2_DEBUG(22571,
+ 2,
+ "Delisting connection pool for {hostAndPort}",
+ "Delistinng connection pool",
+ "hostAndPort"_attr = _hostAndPort);
// Make sure the pool lifetime lasts until the end of this function,
// it could be only in the map of pools
@@ -900,9 +916,10 @@ void ConnectionPool::SpecificPool::processFailure(const Status& status) {
auto severity = MONGO_GET_LIMITED_SEVERITY(_hostAndPort, Seconds{1}, 0, 2);
LOGV2_DEBUG(22572,
logSeverityV1toV2(severity).toInt(),
- "Dropping all pooled connections to {hostAndPort} due to {status}",
+ "Dropping all pooled connections to {hostAndPort} due to {error}",
+ "Dropping all pooled connections",
"hostAndPort"_attr = _hostAndPort,
- "status"_attr = redact(status));
+ "error"_attr = redact(status));
}
// When a connection enters the ready pool, its timer is set to eventually refresh the
@@ -931,8 +948,9 @@ void ConnectionPool::SpecificPool::processFailure(const Status& status) {
}
LOGV2_DEBUG(22573,
- logSeverityV1toV2(kDiagnosticLogLevel).toInt(),
+ kDiagnosticLogLevel,
"Failing requests to {hostAndPort}",
+ "Failing requests",
"hostAndPort"_attr = _hostAndPort);
_requests.clear();
}
@@ -972,19 +990,21 @@ void ConnectionPool::SpecificPool::spawnConnections() {
if (_health.isFailed) {
LOGV2_DEBUG(22574,
- logSeverityV1toV2(kDiagnosticLogLevel).toInt(),
- "Pool for {hostAndPort} has failed recently. Postponing any attempts to spawn "
- "connections",
+ kDiagnosticLogLevel,
+ "Pool for {hostAndPort} has failed recently, "
+ "postponing any attempts to spawn connections",
+ "Pool has failed recently, postponing any attempts to spawn connections",
"hostAndPort"_attr = _hostAndPort);
return;
}
auto controls = _parent->_controller->getControls(_id);
LOGV2_DEBUG(22575,
- logSeverityV1toV2(kDiagnosticLogLevel).toInt(),
- "Comparing connection state for {hostAndPort} to Controls: {controls}",
+ kDiagnosticLogLevel,
+ "Comparing connection state for {hostAndPort} to controls: {poolControls}",
+ "Comparing connection state to controls",
"hostAndPort"_attr = _hostAndPort,
- "controls"_attr = controls);
+ "poolControls"_attr = controls);
auto pendingConnections = refreshingConnections();
if (pendingConnections >= controls.maxPendingConnections) {
@@ -1000,14 +1020,16 @@ void ConnectionPool::SpecificPool::spawnConnections() {
LOGV2_DEBUG(22576,
logSeverityV1toV2(severity).toInt(),
"Connecting to {hostAndPort}",
+ "Connecting",
"hostAndPort"_attr = _hostAndPort);
auto allowance = std::min(controls.targetConnections - totalConnections,
controls.maxPendingConnections - pendingConnections);
LOGV2_DEBUG(22577,
- logSeverityV1toV2(kDiagnosticLogLevel).toInt(),
- "Spawning {allowance} connections to {hostAndPort}",
- "allowance"_attr = allowance,
+ kDiagnosticLogLevel,
+ "Spawning {connAllowance} connections to {hostAndPort}",
+ "Spawning connections",
+ "connAllowance"_attr = allowance,
"hostAndPort"_attr = _hostAndPort);
for (decltype(allowance) i = 0; i < allowance; ++i) {
OwnedConnection handle;
@@ -1016,8 +1038,8 @@ void ConnectionPool::SpecificPool::spawnConnections() {
handle = _parent->_factory->makeConnection(_hostAndPort, _sslMode, _generation);
} catch (std::system_error& e) {
LOGV2_FATAL(40336,
- "Failed to construct a new connection object: {e_what}",
- "e_what"_attr = e.what());
+ "Failed to construct a new connection object: {reason}",
+ "reason"_attr = e.what());
}
_processingPool[handle.get()] = handle;
@@ -1136,10 +1158,11 @@ void ConnectionPool::SpecificPool::updateController() {
inUseConnections(),
};
LOGV2_DEBUG(22578,
- logSeverityV1toV2(kDiagnosticLogLevel).toInt(),
- "Updating controller for {hostAndPort} with State: {state}",
+ kDiagnosticLogLevel,
+ "Updating pool controller for {hostAndPort} with state: {poolState}",
+ "Updating pool controller",
"hostAndPort"_attr = _hostAndPort,
- "state"_attr = state);
+ "poolState"_attr = state);
auto hostGroup = controller.updateHost(_id, std::move(state));
// If we can shutdown, then do so
@@ -1157,8 +1180,8 @@ void ConnectionPool::SpecificPool::updateController() {
// connections in use or requests outstanding unless its parent ConnectionPool is
// also shutting down.
LOGV2_WARNING(4293001,
- "Controller requested shutdown but connections still in use. "
- "Connection pool will stay active.",
+ "Controller requested shutdown but connections still in use, "
+ "connection pool will stay active.",
"hostAndPort"_attr = pool->_hostAndPort);
continue;
}
@@ -1195,8 +1218,9 @@ void ConnectionPool::SpecificPool::updateState() {
if (_health.isShutdown) {
// If we're in shutdown, there is nothing to update. Our clients are all gone.
LOGV2_DEBUG(22579,
- logSeverityV1toV2(kDiagnosticLogLevel).toInt(),
- "{hostAndPort} is dead",
+ kDiagnosticLogLevel,
+ "Pool {hostAndPort} is dead",
+ "Pool is dead",
"hostAndPort"_attr = _hostAndPort);
return;
}
diff --git a/src/mongo/executor/connection_pool_tl.cpp b/src/mongo/executor/connection_pool_tl.cpp
index 17d8d9c852e..a7145cb38e1 100644
--- a/src/mongo/executor/connection_pool_tl.cpp
+++ b/src/mongo/executor/connection_pool_tl.cpp
@@ -308,9 +308,10 @@ void TLConnection::setup(Milliseconds timeout, SetupCallback cb) {
} else {
LOGV2_DEBUG(22584,
2,
- "Failed to connect to {peer} - {status}",
- "peer"_attr = _peer,
- "status"_attr = redact(status));
+ "Failed to connect to {hostAndPort} - {error}",
+ "Failed to connect",
+ "hostAndPort"_attr = _peer,
+ "error"_attr = redact(status));
handler->promise.setError(status);
}
});
diff --git a/src/mongo/executor/network_interface_integration_fixture.cpp b/src/mongo/executor/network_interface_integration_fixture.cpp
index ae22469902a..69d37612aa9 100644
--- a/src/mongo/executor/network_interface_integration_fixture.cpp
+++ b/src/mongo/executor/network_interface_integration_fixture.cpp
@@ -159,9 +159,12 @@ RemoteCommandResponse NetworkInterfaceIntegrationFixture::runCommandSync(
auto deferred = runCommand(makeCallbackHandle(), request);
auto& res = deferred.get();
if (res.isOK()) {
- LOGV2(22586, "got command result: {res}", "res"_attr = res.toString());
+ LOGV2(22586,
+ "Got command result: {response}",
+ "Got command result",
+ "response"_attr = res.toString());
} else {
- LOGV2(22587, "command failed: {res_status}", "res_status"_attr = res.status);
+ LOGV2(22587, "Command failed: {error}", "Command failed", "error"_attr = res.status);
}
return res;
}
diff --git a/src/mongo/executor/network_interface_mock.cpp b/src/mongo/executor/network_interface_mock.cpp
index 451f786042b..1b7e97b0ee8 100644
--- a/src/mongo/executor/network_interface_mock.cpp
+++ b/src/mongo/executor/network_interface_mock.cpp
@@ -64,28 +64,6 @@ NetworkInterfaceMock::~NetworkInterfaceMock() {
invariant(_blackHoled.empty());
}
-void NetworkInterfaceMock::logQueues() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
- const std::vector<std::pair<std::string, const NetworkOperationList*>> queues{
- {"unscheduled", &_unscheduled},
- {"scheduled", &_scheduled},
- {"processing", &_processing},
- {"blackholes", &_blackHoled}};
-
- for (auto&& queue : queues) {
- if (queue.second->empty()) {
- continue;
- }
-
- LOGV2(22588, "**** queue: {queue_first} ****", "queue_first"_attr = queue.first);
- for (auto&& item : *queue.second) {
- LOGV2(22589,
- "\t\t {item_getDiagnosticString}",
- "item_getDiagnosticString"_attr = item.getDiagnosticString());
- }
- }
-}
-
std::string NetworkInterfaceMock::getDiagnosticString() {
stdx::unique_lock<stdx::mutex> lk(_mutex);
return str::stream() << "NetworkInterfaceMock -- waitingToRunMask:" << _waitingToRunMask
@@ -249,10 +227,10 @@ void NetworkInterfaceMock::shutdown() {
_waitingToRunMask |= kExecutorThread; // Prevents network thread from scheduling.
lk.unlock();
for (NetworkOperationIterator iter = todo.begin(); iter != todo.end(); ++iter) {
- LOGV2_WARNING(
- 22590,
- "Mock network interface shutting down with outstanding request: {iter_getRequest}",
- "iter_getRequest"_attr = iter->getRequest());
+ LOGV2_WARNING(22590,
+ "Mock network interface shutting down with outstanding request: {request}",
+ "Mock network interface shutting down with outstanding request",
+ "request"_attr = iter->getRequest());
iter->setResponse(
now, {ErrorCodes::ShutdownInProgress, "Shutting down mock network", Milliseconds(0)});
iter->finishResponse();
diff --git a/src/mongo/executor/network_interface_mock.h b/src/mongo/executor/network_interface_mock.h
index bb16bd397ec..8781101329d 100644
--- a/src/mongo/executor/network_interface_mock.h
+++ b/src/mongo/executor/network_interface_mock.h
@@ -82,11 +82,6 @@ public:
NetworkInterfaceMock();
virtual ~NetworkInterfaceMock();
- /**
- * Logs the contents of the queues for diagnostics.
- */
- void logQueues();
-
////////////////////////////////////////////////////////////////////////////////
//
// NetworkInterface methods
diff --git a/src/mongo/executor/network_interface_perf_test.cpp b/src/mongo/executor/network_interface_perf_test.cpp
index 8000293277b..d8ece73bdd2 100644
--- a/src/mongo/executor/network_interface_perf_test.cpp
+++ b/src/mongo/executor/network_interface_perf_test.cpp
@@ -107,7 +107,10 @@ TEST(NetworkInterfaceASIO, SerialPerf) {
int duration = timeNetworkTestMillis(numOperations, &netAsio);
int result = numOperations * 1000 / duration;
- LOGV2(22591, "THROUGHPUT asio ping ops/s: {result}", "result"_attr = result);
+ LOGV2(22591,
+ "THROUGHPUT asio ping ops/s: {throughput}",
+ "THROUGHPUT asio ping ops/s",
+ "throughput"_attr = result);
}
} // namespace
diff --git a/src/mongo/executor/network_interface_tl.cpp b/src/mongo/executor/network_interface_tl.cpp
index d0aaa694407..df9b6ae43e4 100644
--- a/src/mongo/executor/network_interface_tl.cpp
+++ b/src/mongo/executor/network_interface_tl.cpp
@@ -350,7 +350,12 @@ void NetworkInterfaceTL::CommandStateBase::setTimer() {
<< ", deadline was " << deadline.toString()
<< ", op was " << redact(requestOnAny.toString());
- LOGV2_DEBUG(22595, 2, "", "message"_attr = message);
+ LOGV2_DEBUG(22595,
+ 2,
+ "Request timed out",
+ "requestId"_attr = requestOnAny.id,
+ "deadline"_attr = deadline,
+ "request"_attr = requestOnAny);
fulfillFinalPromise(Status(ErrorCodes::NetworkInterfaceExceededTimeLimit, message));
});
}
@@ -437,10 +442,8 @@ Status NetworkInterfaceTL::startCommand(const TaskExecutor::CallbackHandle& cbHa
return kNetworkInterfaceShutdownInProgress;
}
- LOGV2_DEBUG(22596,
- logSeverityV1toV2(kDiagnosticLogLevel).toInt(),
- "startCommand",
- "request"_attr = redact(request.toString()));
+ LOGV2_DEBUG(
+ 22596, kDiagnosticLogLevel, "startCommand", "request"_attr = redact(request.toString()));
if (_metadataHook) {
BSONObjBuilder newMetadata(std::move(request.metadata));
@@ -699,7 +702,7 @@ void NetworkInterfaceTL::RequestManager::killOperationsForPendingRequests() {
"Sending remote _killOperations request to cancel command",
"operationKey"_attr = cmdState.lock()->operationKey,
"target"_attr = requestState->request->target,
- "request_id"_attr = requestState->request->id);
+ "requestId"_attr = requestState->request->id);
auto status = requestState->interface()->_killOperation(requestState);
if (!status.isOK()) {
@@ -768,9 +771,9 @@ void NetworkInterfaceTL::RequestManager::trySend(
LOGV2_DEBUG(4647200,
2,
- "Setup hedge request",
- "request_id"_attr = cmdStatePtr->requestOnAny.id,
- "request"_attr = redact(request.toString()),
+ "Set maxTimeMS for request",
+ "maxTime"_attr = Milliseconds(maxTimeMS),
+ "requestId"_attr = cmdStatePtr->requestOnAny.id,
"target"_attr = cmdStatePtr->requestOnAny.target[idx]);
if (cmdStatePtr->interface->_svcCtx) {
@@ -986,10 +989,8 @@ Status NetworkInterfaceTL::startExhaustCommand(const TaskExecutor::CallbackHandl
return {ErrorCodes::ShutdownInProgress, "NetworkInterface shutdown in progress"};
}
- LOGV2_DEBUG(23909,
- logSeverityV1toV2(kDiagnosticLogLevel).toInt(),
- "startCommand",
- "request"_attr = redact(request.toString()));
+ LOGV2_DEBUG(
+ 23909, kDiagnosticLogLevel, "startCommand", "request"_attr = redact(request.toString()));
if (_metadataHook) {
BSONObjBuilder newMetadata(std::move(request.metadata));
diff --git a/src/mongo/executor/network_interface_tl.h b/src/mongo/executor/network_interface_tl.h
index 4fa5f271b9c..622ba9a0de7 100644
--- a/src/mongo/executor/network_interface_tl.h
+++ b/src/mongo/executor/network_interface_tl.h
@@ -35,6 +35,7 @@
#include "mongo/db/service_context.h"
#include "mongo/executor/connection_pool.h"
#include "mongo/executor/network_interface.h"
+#include "mongo/logv2/log_severity.h"
#include "mongo/platform/mutex.h"
#include "mongo/rpc/metadata/metadata_hook.h"
#include "mongo/stdx/thread.h"
diff --git a/src/mongo/executor/task_executor_test_common.cpp b/src/mongo/executor/task_executor_test_common.cpp
index 21c7dfe19d4..2cede709253 100644
--- a/src/mongo/executor/task_executor_test_common.cpp
+++ b/src/mongo/executor/task_executor_test_common.cpp
@@ -86,8 +86,9 @@ public:
auto& entry = executorTestCaseRegistry()[name];
if (entry) {
LOGV2_FATAL(28713,
- "Multiple attempts to register ExecutorTest named {name}",
- "name"_attr = name);
+ "Multiple attempts to register ExecutorTest named {executor}",
+ "Multiple attempts to register ExecutorTest",
+ "executor"_attr = name);
}
entry = std::move(makeTest);
}
diff --git a/src/mongo/executor/thread_pool_task_executor.cpp b/src/mongo/executor/thread_pool_task_executor.cpp
index 0ca34258a0e..722830d6584 100644
--- a/src/mongo/executor/thread_pool_task_executor.cpp
+++ b/src/mongo/executor/thread_pool_task_executor.cpp
@@ -442,8 +442,9 @@ StatusWith<TaskExecutor::CallbackHandle> ThreadPoolTaskExecutor::scheduleRemoteC
const auto cbState = _networkInProgressQueue.back();
LOGV2_DEBUG(22607,
3,
- "Scheduling remote command request: {scheduledRequest}",
- "scheduledRequest"_attr = redact(scheduledRequest.toString()));
+ "Scheduling remote command request: {request}",
+ "Scheduling remote command request",
+ "request"_attr = redact(scheduledRequest.toString()));
lk.unlock();
auto commandStatus = _net->startCommand(
@@ -458,12 +459,12 @@ StatusWith<TaskExecutor::CallbackHandle> ThreadPoolTaskExecutor::scheduleRemoteC
if (_inShutdown_inlock()) {
return;
}
- LOGV2_DEBUG(
- 22608,
- 3,
- "Received remote response: {response_isOK_response_response_status_toString}",
- "response_isOK_response_response_status_toString"_attr =
- redact(response.isOK() ? response.toString() : response.status.toString()));
+ LOGV2_DEBUG(22608,
+ 3,
+ "Received remote response: {response}",
+ "Received remote response",
+ "response"_attr = redact(response.isOK() ? response.toString()
+ : response.status.toString()));
swap(cbState->callback, newCb);
scheduleIntoPool_inlock(&_networkInProgressQueue, cbState->iter, std::move(lk));
},
@@ -687,8 +688,9 @@ StatusWith<TaskExecutor::CallbackHandle> ThreadPoolTaskExecutor::scheduleExhaust
lk.unlock();
LOGV2_DEBUG(4495133,
3,
- "Scheduling exhaust remote command request: {scheduledRequest}",
- "scheduledRequest"_attr = redact(scheduledRequest.toString()));
+ "Scheduling exhaust remote command request: {request}",
+ "Scheduling exhaust remote command request",
+ "request"_attr = redact(scheduledRequest.toString()));
auto commandStatus = _net->startExhaustCommand(
swCbHandle.getValue(),
@@ -696,12 +698,12 @@ StatusWith<TaskExecutor::CallbackHandle> ThreadPoolTaskExecutor::scheduleExhaust
[this, scheduledRequest, cbState, cb, baton](const ResponseOnAnyStatus& response) {
using std::swap;
- LOGV2_DEBUG(
- 4495134,
- 3,
- "Received remote response: {response_isOK_response_response_status_toString}",
- "response_isOK_response_response_status_toString"_attr =
- redact(response.isOK() ? response.toString() : response.status.toString()));
+ LOGV2_DEBUG(4495134,
+ 3,
+ "Received remote response: {response}",
+ "Received remote response",
+ "response"_attr = redact(response.isOK() ? response.toString()
+ : response.status.toString()));
// The cbState remains in the '_networkInProgressQueue' for the entirety of the
// request's lifetime and is added to and removed from the '_poolInProgressQueue' each