summaryrefslogtreecommitdiff
path: root/src/mongo
diff options
context:
space:
mode:
authorMarcos José Grillo Ramírez <marcos.grillo@mongodb.com>2020-03-17 16:29:21 +0100
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-03-17 15:42:50 +0000
commit47b40d87be09c4fd220d8b8dba769aa39a2ba010 (patch)
tree9eab45c501765f18a1ef2ea729612303ef767033 /src/mongo
parentce6a53e5dd0ea28b3ed71f647d4013cb9a9ad43e (diff)
downloadmongo-47b40d87be09c4fd220d8b8dba769aa39a2ba010.tar.gz
Revert "SERVER-46799 Update sharding log lines to adhere to LOGV2 style guide Files checked: * src/mongo/s/commands/cluster_write_cmd.cpp * src/mongo/s/commands/strategy.cpp * src/mongo/db/kill_sessions_local.cpp * src/mongo/db/logical_session_cache_impl.cpp * src/mongo/db/session_catalog.cpp * src/mongo/db/session_catalog_mongod.cpp"
This reverts commit e4c445f8ea52a710fef4375f538083f958879dd7.
Diffstat (limited to 'src/mongo')
-rw-r--r--src/mongo/db/kill_sessions_local.cpp68
-rw-r--r--src/mongo/db/logical_session_cache_impl.cpp50
-rw-r--r--src/mongo/db/session_catalog.cpp5
-rw-r--r--src/mongo/db/session_catalog_mongod.cpp18
-rw-r--r--src/mongo/s/commands/strategy.cpp97
5 files changed, 110 insertions, 128 deletions
diff --git a/src/mongo/db/kill_sessions_local.cpp b/src/mongo/db/kill_sessions_local.cpp
index ac92dafaa2c..2da3f3ea5cd 100644
--- a/src/mongo/db/kill_sessions_local.cpp
+++ b/src/mongo/db/kill_sessions_local.cpp
@@ -132,14 +132,14 @@ void killAllExpiredTransactions(OperationContext* opCtx) {
// was active and the session kill aborted it. We still want to log
// that as aborted due to transactionLifetimeLimitSessions.
if (txnParticipant.transactionIsInProgress() || txnParticipant.transactionIsAborted()) {
- LOGV2(
- 20707,
- "Aborting transaction with session id {sessionId} and txnNumber {txnNumber} "
- "because it has been running for longer than 'transactionLifetimeLimitSeconds'",
- "Aborting transaction because it has been running for longer than "
- "'transactionLifetimeLimitSeconds'",
- "sessionId"_attr = session.getSessionId().getId(),
- "txnNumber"_attr = txnParticipant.getActiveTxnNumber());
+ LOGV2(20707,
+ "Aborting transaction with txnNumber {txnParticipant_getActiveTxnNumber} on "
+ "session {session_getSessionId_getId} because it has been running for longer "
+ "than "
+ "'transactionLifetimeLimitSeconds'",
+ "txnParticipant_getActiveTxnNumber"_attr =
+ txnParticipant.getActiveTxnNumber(),
+ "session_getSessionId_getId"_attr = session.getSessionId().getId());
if (txnParticipant.transactionIsInProgress()) {
txnParticipant.abortTransaction(opCtx);
}
@@ -192,30 +192,34 @@ void yieldLocksForPreparedTransactions(OperationContext* opCtx) {
// to yield their locks.
SessionKiller::Matcher matcherAllSessions(
KillAllSessionsByPatternSet{makeKillAllSessionsByPattern(newOpCtx.get())});
- killSessionsAction(newOpCtx.get(),
- matcherAllSessions,
- [](const ObservableSession& session) {
- return TransactionParticipant::get(session).transactionIsPrepared();
- },
- [](OperationContext* killerOpCtx, const SessionToKill& session) {
- auto txnParticipant = TransactionParticipant::get(session);
- // Yield locks for prepared transactions. When scanning and killing
- // operations, all prepared transactions are included in the list. Even
- // though new sessions may be created after the scan, none of them can
- // become prepared during stepdown, since the RSTL has been enqueued,
- // preventing any new writes.
- if (txnParticipant.transactionIsPrepared()) {
- LOGV2_DEBUG(20708,
- 3,
- "Yielding locks of prepared transaction. SessionId: "
- "{sessionId} TxnNumber: {txnNumber}",
- "Yielding locks of prepared transaction",
- "sessionId"_attr = session.getSessionId().getId(),
- "txnNumber"_attr = txnParticipant.getActiveTxnNumber());
- txnParticipant.refreshLocksForPreparedTransaction(killerOpCtx, true);
- }
- },
- ErrorCodes::InterruptedDueToReplStateChange);
+ killSessionsAction(
+ newOpCtx.get(),
+ matcherAllSessions,
+ [](const ObservableSession& session) {
+ return TransactionParticipant::get(session).transactionIsPrepared();
+ },
+ [](OperationContext* killerOpCtx, const SessionToKill& session) {
+ auto txnParticipant = TransactionParticipant::get(session);
+ // Yield locks for prepared transactions.
+ // When scanning and killing operations, all prepared transactions are
+ // included in the
+ // list. Even though new sessions may be created after the scan, none of
+ // them can become
+ // prepared during stepdown, since the RSTL has been enqueued, preventing
+ // any new
+ // writes.
+ if (txnParticipant.transactionIsPrepared()) {
+ LOGV2_DEBUG(
+ 20708,
+ 3,
+ "Yielding locks of prepared transaction. SessionId: "
+ "{session_getSessionId_getId} TxnNumber: {txnParticipant_getActiveTxnNumber}",
+ "session_getSessionId_getId"_attr = session.getSessionId().getId(),
+ "txnParticipant_getActiveTxnNumber"_attr = txnParticipant.getActiveTxnNumber());
+ txnParticipant.refreshLocksForPreparedTransaction(killerOpCtx, true);
+ }
+ },
+ ErrorCodes::InterruptedDueToReplStateChange);
}
} // namespace mongo
diff --git a/src/mongo/db/logical_session_cache_impl.cpp b/src/mongo/db/logical_session_cache_impl.cpp
index d112ac42dda..f3c162006b2 100644
--- a/src/mongo/db/logical_session_cache_impl.cpp
+++ b/src/mongo/db/logical_session_cache_impl.cpp
@@ -124,22 +124,18 @@ size_t LogicalSessionCacheImpl::size() {
void LogicalSessionCacheImpl::_periodicRefresh(Client* client) {
try {
_refresh(client);
- } catch (const DBException& ex) {
- LOGV2(
- 20710,
- "Failed to refresh session cache: {error}, will try again at the next refresh interval",
- "Failed to refresh session cache, will try again at the next refresh interval",
- "error"_attr = redact(ex));
+ } catch (...) {
+ LOGV2(20710,
+ "Failed to refresh session cache: {exceptionToStatus}, will try again at the next "
+ "refresh interval",
+ "exceptionToStatus"_attr = exceptionToStatus());
}
}
void LogicalSessionCacheImpl::_periodicReap(Client* client) {
auto res = _reap(client);
if (!res.isOK()) {
- LOGV2(20711,
- "Failed to reap transaction table: {error}",
- "Failed to reap transaction table",
- "error"_attr = redact(res));
+ LOGV2(20711, "Failed to reap transaction table: {res}", "res"_attr = res);
}
return;
@@ -176,18 +172,17 @@ Status LogicalSessionCacheImpl::_reap(Client* client) {
try {
_sessionsColl->checkSessionsCollectionExists(opCtx);
} catch (const DBException& ex) {
+ StringData notSetUpWarning =
+ "Sessions collection is not set up; "
+ "waiting until next sessions reap interval";
if (ex.code() != ErrorCodes::NamespaceNotFound ||
ex.code() != ErrorCodes::NamespaceNotSharded) {
- LOGV2(
- 20712,
- "Sessions collection is not set up: {error}; waiting until next sessions reap "
- "interval",
- "Sessions collection is not set up; waiting until next sessions reap interval",
- "error"_attr = redact(ex));
+ LOGV2(20712,
+ "{notSetUpWarning}: {ex_reason}",
+ "notSetUpWarning"_attr = notSetUpWarning,
+ "ex_reason"_attr = ex.reason());
} else {
- LOGV2(20713,
- "Sessions collection is not set up because the namespace is either not found"
- "or not sharded; waiting until next sessions reap interval");
+ LOGV2(20713, "{notSetUpWarning}", "notSetUpWarning"_attr = notSetUpWarning);
}
return Status::OK();
}
@@ -254,11 +249,10 @@ void LogicalSessionCacheImpl::_refresh(Client* client) {
try {
_sessionsColl->setupSessionsCollection(opCtx);
} catch (const DBException& ex) {
- LOGV2(
- 20714,
- "Failed to refresh session cache, will try again at the next refresh interval {error}",
- "Failed to refresh session cache, will try again at the next refresh interval",
- "error"_attr = redact(ex));
+ LOGV2(20714,
+ "Failed to refresh session cache, will try again at the next refresh "
+ "interval{causedBy_ex}",
+ "causedBy_ex"_attr = causedBy(redact(ex)));
return;
}
@@ -390,12 +384,8 @@ Status LogicalSessionCacheImpl::_addToCacheIfNotFull(WithLock, LogicalSessionRec
MONGO_GET_LIMITED_SEVERITY(ErrorCodes::TooManyLogicalSessions, Seconds{1}, 0, 2);
LOGV2_DEBUG(20715,
logSeverityV1toV2(severity).toInt(),
- "Unable to add session {sessionId} into the cache, too many active sessions: "
- "{sessionCount}, maximum: {maxSessions}",
- "Unable to add session into the cache, too many active sessions",
- "sessionId"_attr = record.getId(),
- "sessionCount"_attr = _activeSessions.size(),
- "maxSessions"_attr = maxSessions);
+ "{status}",
+ "status"_attr = status.toString());
return status;
}
diff --git a/src/mongo/db/session_catalog.cpp b/src/mongo/db/session_catalog.cpp
index 5c897dbd00a..d86b93e0f01 100644
--- a/src/mongo/db/session_catalog.cpp
+++ b/src/mongo/db/session_catalog.cpp
@@ -155,9 +155,8 @@ void SessionCatalog::scanSessions(const SessionKiller::Matcher& matcher,
LOGV2_DEBUG(21976,
2,
- "Scanning {sessionCount} sessions",
- "Scanning sessions",
- "sessionCount"_attr = _sessions.size());
+ "Beginning scanSessions. Scanning {sessions_size} sessions.",
+ "sessions_size"_attr = _sessions.size());
for (auto it = _sessions.begin(); it != _sessions.end(); ++it) {
if (matcher.match(it->first)) {
diff --git a/src/mongo/db/session_catalog_mongod.cpp b/src/mongo/db/session_catalog_mongod.cpp
index a44d7262b3f..8c5c1a4ddec 100644
--- a/src/mongo/db/session_catalog_mongod.cpp
+++ b/src/mongo/db/session_catalog_mongod.cpp
@@ -207,10 +207,10 @@ void abortInProgressTransactions(OperationContext* opCtx) {
auto txnParticipant = TransactionParticipant::get(opCtx);
LOGV2_DEBUG(21978,
3,
- "Aborting transaction sessionId: {sessionId} txnNumber {txnNumber}",
- "Aborting transaction",
- "sessionId"_attr = txnRecord.getSessionId().toBSON(),
- "txnNumber"_attr = txnRecord.getTxnNum());
+ "Aborting transaction sessionId: {txnRecord_getSessionId} txnNumber "
+ "{txnRecord_getTxnNum}",
+ "txnRecord_getSessionId"_attr = txnRecord.getSessionId().toBSON(),
+ "txnRecord_getTxnNum"_attr = txnRecord.getTxnNum());
txnParticipant.abortTransaction(opCtx);
}
}
@@ -254,11 +254,11 @@ void MongoDSessionCatalog::onStepUp(OperationContext* opCtx) {
auto txnParticipant = TransactionParticipant::get(newOpCtx.get());
LOGV2_DEBUG(21979,
3,
- "Restoring locks of prepared transaction. SessionId: {sessionId} "
- "TxnNumber: {txnNumber}",
- "Restoring locks of prepared transaction",
- "sessionId"_attr = sessionId.getId(),
- "txnNumber"_attr = txnParticipant.getActiveTxnNumber());
+ "Restoring locks of prepared transaction. SessionId: {sessionId_getId} "
+ "TxnNumber: {txnParticipant_getActiveTxnNumber}",
+ "sessionId_getId"_attr = sessionId.getId(),
+ "txnParticipant_getActiveTxnNumber"_attr =
+ txnParticipant.getActiveTxnNumber());
txnParticipant.refreshLocksForPreparedTransaction(newOpCtx.get(), false);
}
}
diff --git a/src/mongo/s/commands/strategy.cpp b/src/mongo/s/commands/strategy.cpp
index befba35ecd2..3ffecbf076f 100644
--- a/src/mongo/s/commands/strategy.cpp
+++ b/src/mongo/s/commands/strategy.cpp
@@ -145,18 +145,16 @@ void appendRequiredFieldsToResponse(OperationContext* opCtx, BSONObjBuilder* res
if (operationTime != LogicalTime::kUninitialized) {
LOGV2_DEBUG(22764,
5,
- "Appending operationTime: {operationTime}",
- "Appending operationTime",
- "operationTime"_attr = operationTime.asTimestamp());
+ "Appending operationTime: {operationTime_asTimestamp}",
+ "operationTime_asTimestamp"_attr = operationTime.asTimestamp());
responseBuilder->append(kOperationTime, operationTime.asTimestamp());
} else if (now != LogicalTime::kUninitialized) {
// If we don't know the actual operation time, use the cluster time instead. This is
// safe but not optimal because we can always return a later operation time than actual.
LOGV2_DEBUG(22765,
5,
- "Appending clusterTime as operationTime {clusterTime}",
- "Appending clusterTime as operationTime",
- "clusterTime"_attr = now.asTimestamp());
+ "Appending clusterTime as operationTime {now_asTimestamp}",
+ "now_asTimestamp"_attr = now.asTimestamp());
responseBuilder->append(kOperationTime, now.asTimestamp());
}
@@ -466,12 +464,12 @@ void runCommand(OperationContext* opCtx,
.getDefaultWriteConcern(opCtx)) {
wc = *wcDefault;
customDefaultWriteConcernWasApplied = true;
- LOGV2_DEBUG(22766,
- 2,
- "Applying default writeConcern on {command} of {writeConcern}",
- "Applying default writeConcern on command",
- "command"_attr = request.getCommandName(),
- "writeConcern"_attr = *wcDefault);
+ LOGV2_DEBUG(
+ 22766,
+ 2,
+ "Applying default writeConcern on {request_getCommandName} of {wcDefault}",
+ "request_getCommandName"_attr = request.getCommandName(),
+ "wcDefault"_attr = wcDefault->toBSON());
}
}
@@ -533,10 +531,11 @@ void runCommand(OperationContext* opCtx,
customDefaultReadConcernWasApplied = true;
LOGV2_DEBUG(22767,
2,
- "Applying default readConcern on {command} of {readConcern}",
- "Applying default readConcern on command",
- "command"_attr = invocation->definition()->getName(),
- "readConcern"_attr = *rcDefault);
+ "Applying default readConcern on {invocation_definition_getName} "
+ "of {rcDefault}",
+ "invocation_definition_getName"_attr =
+ invocation->definition()->getName(),
+ "rcDefault"_attr = *rcDefault);
// Update the readConcernSupport, since the default RC was applied.
readConcernSupport =
invocation->supportsReadConcern(readConcernArgs.getLevel());
@@ -892,12 +891,11 @@ DbResponse Strategy::queryOp(OperationContext* opCtx, const NamespaceString& nss
LOGV2_DEBUG(22768,
3,
- "Query: {namespace} {query} ntoreturn: {ntoreturn} options: {queryOptions}",
- "Query",
- "namespace"_attr = q.ns,
- "query"_attr = redact(q.query),
- "ntoreturn"_attr = q.ntoreturn,
- "queryOptions"_attr = q.queryOptions);
+ "query: {q_ns} {q_query} ntoreturn: {q_ntoreturn} options: {q_queryOptions}",
+ "q_ns"_attr = q.ns,
+ "q_query"_attr = redact(q.query),
+ "q_ntoreturn"_attr = q.ntoreturn,
+ "q_queryOptions"_attr = q.queryOptions);
if (q.queryOptions & QueryOption_Exhaust) {
uasserted(18526,
@@ -997,9 +995,8 @@ DbResponse Strategy::clientCommand(OperationContext* opCtx, const Message& m) {
LOGV2_DEBUG(22769,
1,
- "Exception thrown while parsing command {error}",
- "Exception thrown while parsing command",
- "error"_attr = redact(ex));
+ "Exception thrown while parsing command {causedBy_ex}",
+ "causedBy_ex"_attr = causedBy(redact(ex)));
throw;
}
}();
@@ -1017,26 +1014,23 @@ DbResponse Strategy::clientCommand(OperationContext* opCtx, const Message& m) {
try {
LOGV2_DEBUG(22770,
3,
- "Command begin db: {db} msg id: {headerId}",
- "Command begin",
+ "Command begin db: {db} msg id: {m_header_getId}",
"db"_attr = db,
- "headerId"_attr = m.header().getId());
+ "m_header_getId"_attr = m.header().getId());
runCommand(opCtx, request, m.operation(), reply.get(), &errorBuilder);
LOGV2_DEBUG(22771,
3,
- "Command end db: {db} msg id: {headerId}",
- "Command end",
+ "Command end db: {db} msg id: {m_header_getId}",
"db"_attr = db,
- "headerId"_attr = m.header().getId());
+ "m_header_getId"_attr = m.header().getId());
} catch (const DBException& ex) {
- LOGV2_DEBUG(
- 22772,
- 1,
- "Exception thrown while processing command on {db} msg id: {headerId} {error}",
- "Exception thrown while processing command",
- "db"_attr = db,
- "headerId"_attr = m.header().getId(),
- "error"_attr = redact(ex));
+ LOGV2_DEBUG(22772,
+ 1,
+ "Exception thrown while processing command on {db} msg id: "
+ "{m_header_getId}{causedBy_ex}",
+ "db"_attr = db,
+ "m_header_getId"_attr = m.header().getId(),
+ "causedBy_ex"_attr = causedBy(redact(ex)));
// Record the exception in CurOp.
CurOp::get(opCtx)->debug().errInfo = ex.toStatus();
@@ -1175,8 +1169,7 @@ void Strategy::killCursors(OperationContext* opCtx, DbMessage* dbm) {
if (!nss) {
LOGV2_DEBUG(22773,
3,
- "Can't find cursor to kill, no namespace found. Cursor id: {cursorId}",
- "Can't find cursor to kill, no namespace found",
+ "Can't find cursor to kill. Cursor id: {cursorId}.",
"cursorId"_attr = cursorId);
continue;
}
@@ -1191,30 +1184,26 @@ void Strategy::killCursors(OperationContext* opCtx, DbMessage* dbm) {
LOGV2_DEBUG(
22774,
3,
- "Not authorized to kill cursor. Namespace: '{namespace}', cursor id: {cursorId}",
- "Not authorized to kill cursor",
- "namespace"_attr = *nss,
+ "Not authorized to kill cursor. Namespace: '{nss}', cursor id: {cursorId}.",
+ "nss"_attr = *nss,
"cursorId"_attr = cursorId);
continue;
}
Status killCursorStatus = manager->killCursor(opCtx, *nss, cursorId);
if (!killCursorStatus.isOK()) {
- LOGV2_DEBUG(
- 22775,
- 3,
- "Can't find cursor to kill. Namespace: '{namespace}', cursor id: {cursorId}",
- "Can't find cursor to kill",
- "namespace"_attr = *nss,
- "cursorId"_attr = cursorId);
+ LOGV2_DEBUG(22775,
+ 3,
+ "Can't find cursor to kill. Namespace: '{nss}', cursor id: {cursorId}.",
+ "nss"_attr = *nss,
+ "cursorId"_attr = cursorId);
continue;
}
LOGV2_DEBUG(22776,
3,
- "Killed cursor. Namespace: '{namespace}', cursor id: {cursorId}",
- "Killed cursor",
- "namespace"_attr = *nss,
+ "Killed cursor. Namespace: '{nss}', cursor id: {cursorId}.",
+ "nss"_attr = *nss,
"cursorId"_attr = cursorId);
}
}