diff options
-rw-r--r-- | jstests/libs/logv2_helpers.js | 26 | ||||
-rw-r--r-- | jstests/noPassthrough/client_metadata_log.js | 23 | ||||
-rw-r--r-- | jstests/noPassthrough/client_metadata_slowlog.js | 29 | ||||
-rw-r--r-- | jstests/noPassthrough/client_metadata_slowlog_rs.js | 31 | ||||
-rw-r--r-- | src/mongo/db/curop.cpp | 185 | ||||
-rw-r--r-- | src/mongo/db/curop.h | 7 | ||||
-rw-r--r-- | src/mongo/db/repl/oplog_applier_impl.cpp | 33 | ||||
-rw-r--r-- | src/mongo/db/s/transaction_coordinator.cpp | 79 | ||||
-rw-r--r-- | src/mongo/db/transaction_participant.cpp | 92 | ||||
-rw-r--r-- | src/mongo/db/transaction_participant.h | 7 | ||||
-rw-r--r-- | src/mongo/logv2/attribute_storage.h | 47 | ||||
-rw-r--r-- | src/mongo/logv2/json_formatter.cpp | 14 | ||||
-rw-r--r-- | src/mongo/logv2/log_test_v2.cpp | 8 | ||||
-rw-r--r-- | src/mongo/logv2/plain_formatter.cpp | 8 | ||||
-rw-r--r-- | src/mongo/rpc/metadata/client_metadata.cpp | 9 | ||||
-rw-r--r-- | src/mongo/s/transaction_router.cpp | 88 |
16 files changed, 615 insertions, 71 deletions
diff --git a/jstests/libs/logv2_helpers.js b/jstests/libs/logv2_helpers.js new file mode 100644 index 00000000000..0247e6e83d8 --- /dev/null +++ b/jstests/libs/logv2_helpers.js @@ -0,0 +1,26 @@ +// logv2_helper.js + +function isJsonLogNoConn() { + if (typeof TestData !== 'undefined' && typeof TestData.logFormat !== 'undefined') { + return TestData["logFormat"] == "json"; + } + + return false; +} + +function isJsonLog(conn) { + if (typeof TestData !== 'undefined' && typeof TestData.logFormat !== 'undefined') { + return TestData["logFormat"] == "json"; + } + + const opts = assert.commandWorked(conn.getDB("admin").runCommand({"getCmdLineOpts": 1})); + + print(tojson(opts)); + + const parsed = opts["parsed"]; + if (parsed.hasOwnProperty("systemLog") && parsed["systemLog"].hasOwnProperty("logFormat")) { + return parsed["systemLog"]["logFormat"] == "json"; + } + + return false; +} diff --git a/jstests/noPassthrough/client_metadata_log.js b/jstests/noPassthrough/client_metadata_log.js index d2662d8905d..3a999436021 100644 --- a/jstests/noPassthrough/client_metadata_log.js +++ b/jstests/noPassthrough/client_metadata_log.js @@ -2,6 +2,8 @@ * Test that verifies client metadata is logged into log file on new connections. * @tags: [requires_sharding] */ +load("jstests/libs/logv2_helpers.js"); + (function() { 'use strict'; @@ -12,13 +14,20 @@ let checkLog = function(conn) { print(`Checking ${conn.fullOptions.logFile} for client metadata message`); let log = cat(conn.fullOptions.logFile); - assert( - /received client metadata from .*: { application: { name: ".*" }, driver: { name: ".*", version: ".*" }, os: { type: ".*", name: ".*", architecture: ".*", version: ".*" } }/ - .test(log), - "'received client metadata' log line missing in log file!\n" + - "Log file contents: " + conn.fullOptions.logFile + - "\n************************************************************\n" + log + - "\n************************************************************"); + let predicate = null; + if (isJsonLog(conn)) { + predicate = + /received client metadata from .*:.*"doc":{"application":{"name":".*"},"driver":{"name":".*","version":".*"},"os":{"type":".*","name":".*","architecture":".*","version":".*"}}/; + } else { + predicate = + /received client metadata from .*: {"application":{"name":".*"},"driver":{"name":".*","version":".*"},"os":{"type":".*","name":".*","architecture":".*","version":".*"}}/; + } + + assert(predicate.test(log), + "'received client metadata' log line missing in log file!\n" + + "Log file contents: " + conn.fullOptions.logFile + + "\n************************************************************\n" + log + + "\n************************************************************"); }; // Test MongoD diff --git a/jstests/noPassthrough/client_metadata_slowlog.js b/jstests/noPassthrough/client_metadata_slowlog.js index 5ce55a163ac..adbbc64127f 100644 --- a/jstests/noPassthrough/client_metadata_slowlog.js +++ b/jstests/noPassthrough/client_metadata_slowlog.js @@ -1,10 +1,8 @@ /** * Test that verifies client metadata is logged as part of slow query logging in MongoD. - * TODO: Handle JSON logs. See SERVER-45140 - * @tags: [ - * requires_text_logs, - * ] */ +load("jstests/libs/logv2_helpers.js"); + (function() { 'use strict'; @@ -25,13 +23,22 @@ assert.eq(count, 1, "expected 1 document"); print(`Checking ${conn.fullOptions.logFile} for client metadata message`); let log = cat(conn.fullOptions.logFile); -assert( - /COMMAND .* command test.foo appName: "MongoDB Shell" command: count { count: "foo", query: { \$where: function\(\)/ - .test(log), - "'slow query' log line missing in mongod log file!\n" + - "Log file contents: " + conn.fullOptions.logFile + - "\n************************************************************\n" + log + - "\n************************************************************"); +let predicate = null; +if (isJsonLog(conn)) { + predicate = + /slow query.*test.foo.*"appName":"MongoDB Shell".*"command":{"count":"foo","query":{"\$where":{"\$code":"function\(\)/; +} else { + predicate = + /COMMAND .* command test.foo appName: "MongoDB Shell" command: count { count: "foo", query: { \$where: function\(\)/; +} + +// Dump the log line by line to avoid log truncation +for (var a of log.split("\n")) { + print("LOG_FILE_ENTRY: " + a); +} +assert(predicate.test(log), + "'slow query' log line missing in mongod log file!\n" + + "Log file contents: " + conn.fullOptions.logFile); MongoRunner.stopMongod(conn); })(); diff --git a/jstests/noPassthrough/client_metadata_slowlog_rs.js b/jstests/noPassthrough/client_metadata_slowlog_rs.js index 7d124f462c3..feed4dde999 100644 --- a/jstests/noPassthrough/client_metadata_slowlog_rs.js +++ b/jstests/noPassthrough/client_metadata_slowlog_rs.js @@ -2,21 +2,36 @@ * Test that verifies client metadata is logged as part of slow query logging in MongoD in a replica * set. * TODO: Handle JSON logs. See SERVER-45140 - * @tags: [requires_replication, requires_text_logs] + * @tags: [requires_replication] */ +load("jstests/libs/logv2_helpers.js"); + (function() { 'use strict'; let checkLog = function(conn) { print(`Checking ${conn.fullOptions.logFile} for client metadata message`); let log = cat(conn.fullOptions.logFile); - assert( - /COMMAND .* command test.foo appName: "MongoDB Shell" command: find { find: "foo", filter: { \$where: function\(\)/ - .test(log), - "'slow query' log line missing in mongod log file!\n" + - "Log file contents: " + conn.fullOptions.logFile + - "\n************************************************************\n" + log + - "\n************************************************************"); + + let predicate = null; + if (isJsonLog(conn)) { + predicate = + /slow query.*test.foo.*"appName":"MongoDB Shell".*"command":{"find":"foo","filter":{"\$where":{"\$code":"function\(\)/; + } else { + predicate = + /COMMAND .* command test.foo appName: "MongoDB Shell" command: find { find: "foo", filter: { \$where: function\(\)/; + } + + // Dump the log line by line to avoid log truncation + for (var a of log.split("\n")) { + print("LOG_FILE_ENTRY: " + a); + } + + assert(predicate.test(log), + "'slow query' log line missing in mongod log file!\n" + + "Log file contents: " + conn.fullOptions.logFile + + "\n************************************************************\n" + log + + "\n************************************************************"); }; let options = { diff --git a/src/mongo/db/curop.cpp b/src/mongo/db/curop.cpp index 96ded269c07..6842bec55fa 100644 --- a/src/mongo/db/curop.cpp +++ b/src/mongo/db/curop.cpp @@ -490,7 +490,13 @@ bool CurOp::completeAndLogOperation(OperationContext* opCtx, _debug.prepareConflictDurationMillis = duration_cast<Milliseconds>(prepareConflictDurationMicros); - log(component) << _debug.report(opCtx, (lockerInfo ? &lockerInfo->stats : nullptr)); + if (serverGlobalParams.logFormat == logv2::LogFormat::kJson) { + logv2::DynamicAttributes attr; + _debug.report(opCtx, (lockerInfo ? &lockerInfo->stats : nullptr), &attr); + LOGV2(51803, "slow query", attr); + } else { + log(component) << _debug.report(opCtx, (lockerInfo ? &lockerInfo->stats : nullptr)); + } } // Return 'true' if this operation should also be added to the profiler. @@ -821,6 +827,170 @@ string OpDebug::report(OperationContext* opCtx, const SingleThreadedLockStats* l return s.str(); } +#define OPDEBUG_TOATTR_HELP(x) \ + if (x >= 0) \ + pAttrs->add(#x, x) +#define OPDEBUG_TOATTR_HELP_BOOL(x) \ + if (x) \ + pAttrs->add(#x, x) +#define OPDEBUG_TOATTR_HELP_ATOMIC(x, y) \ + if (auto __y = y.load(); __y > 0) \ + pAttrs->add(x, __y) +#define OPDEBUG_TOATTR_HELP_OPTIONAL(x, y) \ + if (y) \ + pAttrs->add(x, *y) + +void OpDebug::report(OperationContext* opCtx, + const SingleThreadedLockStats* lockStats, + logv2::DynamicAttributes* pAttrs) const { + Client* client = opCtx->getClient(); + auto& curop = *CurOp::get(opCtx); + auto flowControlStats = opCtx->lockState()->getFlowControlStats(); + + if (iscommand) { + pAttrs->add("type", "command"); + } else { + pAttrs->add("type", networkOpToString(networkOp)); + } + + pAttrs->addDeepCopy("ns", curop.getNS()); + + const auto& clientMetadata = ClientMetadataIsMasterState::get(client).getClientMetadata(); + if (clientMetadata) { + StringData appName = clientMetadata.get().getApplicationName(); + if (!appName.empty()) { + pAttrs->add("appName", appName); + } + } + + auto query = appendCommentField(opCtx, curop.opDescription()); + if (!query.isEmpty()) { + if (iscommand) { + const Command* curCommand = curop.getCommand(); + if (curCommand) { + mutablebson::Document cmdToLog(query, mutablebson::Document::kInPlaceDisabled); + curCommand->snipForLogging(&cmdToLog); + pAttrs->add("command", redact(cmdToLog.getObject())); + } else { + // Should not happen but we need to handle curCommand == NULL gracefully. + // We don't know what the request payload is intended to be, so it might be + // sensitive, and we don't know how to redact it properly without a 'Command*'. + // So we just don't log it at all. + pAttrs->add("command", "unrecognized"); + } + } else { + pAttrs->add("command", redact(query)); + } + } + + auto originatingCommand = curop.originatingCommand(); + if (!originatingCommand.isEmpty()) { + pAttrs->add("originatingCommand", redact(originatingCommand)); + } + + if (!curop.getPlanSummary().empty()) { + pAttrs->addDeepCopy("planSummary", curop.getPlanSummary().toString()); + } + + if (prepareConflictDurationMillis > Milliseconds::zero()) { + pAttrs->add("prepareConflictDuration", prepareConflictDurationMillis); + } + + if (dataThroughputLastSecond) { + pAttrs->add("dataThroughputLastSecondMBperSec", *dataThroughputLastSecond); + } + + if (dataThroughputAverage) { + pAttrs->add("dataThroughputAverageMBPerSec", *dataThroughputAverage); + } + + OPDEBUG_TOATTR_HELP(nShards); + OPDEBUG_TOATTR_HELP(cursorid); + if (mongotCursorId) { + pAttrs->add("mongot", makeMongotDebugStatsObject()); + } + OPDEBUG_TOATTR_HELP(ntoreturn); + OPDEBUG_TOATTR_HELP(ntoskip); + OPDEBUG_TOATTR_HELP_BOOL(exhaust); + + OPDEBUG_TOATTR_HELP_OPTIONAL("keysExamined", additiveMetrics.keysExamined); + OPDEBUG_TOATTR_HELP_OPTIONAL("docsExamined", additiveMetrics.docsExamined); + OPDEBUG_TOATTR_HELP_BOOL(hasSortStage); + OPDEBUG_TOATTR_HELP_BOOL(usedDisk); + OPDEBUG_TOATTR_HELP_BOOL(fromMultiPlanner); + if (replanReason) { + bool replanned = true; + OPDEBUG_TOATTR_HELP_BOOL(replanned); + pAttrs->add("replanReason", redact(*replanReason)); + } + OPDEBUG_TOATTR_HELP_OPTIONAL("nMatched", additiveMetrics.nMatched); + OPDEBUG_TOATTR_HELP_OPTIONAL("nModified", additiveMetrics.nModified); + OPDEBUG_TOATTR_HELP_OPTIONAL("ninserted", additiveMetrics.ninserted); + OPDEBUG_TOATTR_HELP_OPTIONAL("ndeleted", additiveMetrics.ndeleted); + OPDEBUG_TOATTR_HELP_BOOL(upsert); + OPDEBUG_TOATTR_HELP_BOOL(cursorExhausted); + + OPDEBUG_TOATTR_HELP_OPTIONAL("keysInserted", additiveMetrics.keysInserted); + OPDEBUG_TOATTR_HELP_OPTIONAL("keysDeleted", additiveMetrics.keysDeleted); + OPDEBUG_TOATTR_HELP_ATOMIC("prepareReadConflicts", additiveMetrics.prepareReadConflicts); + OPDEBUG_TOATTR_HELP_ATOMIC("writeConflicts", additiveMetrics.writeConflicts); + + pAttrs->add("numYields", curop.numYields()); + OPDEBUG_TOATTR_HELP(nreturned); + + if (queryHash) { + pAttrs->addDeepCopy("queryHash", unsignedIntToFixedLengthHex(*queryHash)); + invariant(planCacheKey); + pAttrs->addDeepCopy("planCacheKey", unsignedIntToFixedLengthHex(*planCacheKey)); + } + + if (!errInfo.isOK()) { + pAttrs->add("ok", 0); + if (!errInfo.reason().empty()) { + pAttrs->add("errMsg", redact(errInfo.reason())); + } + pAttrs->add("errName", errInfo.code()); + pAttrs->add("errCode", static_cast<int>(errInfo.code())); + } + + if (responseLength > 0) { + pAttrs->add("reslen", responseLength); + } + + if (lockStats) { + BSONObjBuilder locks; + lockStats->report(&locks); + pAttrs->add("locks", locks.obj()); + } + + BSONObj flowControlObj = makeFlowControlObject(flowControlStats); + if (flowControlObj.nFields() > 0) { + pAttrs->add("flowControl", flowControlObj); + } + + { + const auto& readConcern = repl::ReadConcernArgs::get(opCtx); + if (readConcern.isSpecified()) { + pAttrs->add("readConcern", readConcern.toBSONInner()); + } + } + + if (writeConcern && !writeConcern->usedDefault) { + pAttrs->add("writeConcern", writeConcern->toBSON()); + } + + if (storageStats) { + pAttrs->add("storage", storageStats->toBSON()); + } + + if (iscommand) { + pAttrs->add("protocol", getProtoString(networkOp)); + } + + pAttrs->add("durationMillis", (executionTimeMicros / 1000)); +} + + #define OPDEBUG_APPEND_NUMBER(x) \ if (x != -1) \ b.appendNumber(#x, (x)) @@ -1079,4 +1249,17 @@ string OpDebug::AdditiveMetrics::report() const { return s.str(); } +void OpDebug::AdditiveMetrics::report(logv2::DynamicAttributes* pAttrs) const { + OPDEBUG_TOATTR_HELP_OPTIONAL("keysExamined", keysExamined); + OPDEBUG_TOATTR_HELP_OPTIONAL("docsExamined", docsExamined); + OPDEBUG_TOATTR_HELP_OPTIONAL("nMatched", nMatched); + OPDEBUG_TOATTR_HELP_OPTIONAL("nModified", nModified); + OPDEBUG_TOATTR_HELP_OPTIONAL("ninserted", ninserted); + OPDEBUG_TOATTR_HELP_OPTIONAL("ndeleted", ndeleted); + OPDEBUG_TOATTR_HELP_OPTIONAL("keysInserted", keysInserted); + OPDEBUG_TOATTR_HELP_OPTIONAL("keysDeleted", keysDeleted); + OPDEBUG_TOATTR_HELP_ATOMIC("prepareReadConflicts", prepareReadConflicts); + OPDEBUG_TOATTR_HELP_ATOMIC("writeConflicts", writeConflicts); +} + } // namespace mongo diff --git a/src/mongo/db/curop.h b/src/mongo/db/curop.h index ba7532fe86d..f563cb05ed8 100644 --- a/src/mongo/db/curop.h +++ b/src/mongo/db/curop.h @@ -36,6 +36,7 @@ #include "mongo/db/operation_context.h" #include "mongo/db/server_options.h" #include "mongo/db/write_concern_options.h" +#include "mongo/logv2/attribute_storage.h" #include "mongo/platform/atomic_word.h" #include "mongo/util/diagnostic_info.h" #include "mongo/util/progress_meter.h" @@ -117,6 +118,8 @@ public: */ std::string report() const; + void report(logv2::DynamicAttributes* pAttrs) const; + boost::optional<long long> keysExamined; boost::optional<long long> docsExamined; @@ -146,6 +149,10 @@ public: std::string report(OperationContext* opCtx, const SingleThreadedLockStats* lockStats) const; + void report(OperationContext* opCtx, + const SingleThreadedLockStats* lockStats, + logv2::DynamicAttributes* pAttrs) const; + /** * Appends information about the current operation to "builder" * diff --git a/src/mongo/db/repl/oplog_applier_impl.cpp b/src/mongo/db/repl/oplog_applier_impl.cpp index e54bc128219..e801962d0df 100644 --- a/src/mongo/db/repl/oplog_applier_impl.cpp +++ b/src/mongo/db/repl/oplog_applier_impl.cpp @@ -113,20 +113,35 @@ Status finishAndLogApply(OperationContext* opCtx, Milliseconds(opDuration), Milliseconds(serverGlobalParams.slowMS)) .first) { + if (serverGlobalParams.logFormat == logv2::LogFormat::kJson) { - StringBuilder s; - s << "applied op: "; + logv2::DynamicAttributes attrs; - if (entryOrGroupedInserts.getOp().getOpType() == OpTypeEnum::kCommand) { - s << "command "; + auto redacted = redact(entryOrGroupedInserts.toBSON()); + if (entryOrGroupedInserts.getOp().getOpType() == OpTypeEnum::kCommand) { + attrs.add("command", redacted); + } else { + attrs.add("CRUD", redacted); + } + + attrs.add("duration", opDuration); + + LOGV2(51801, "applied op", attrs); } else { - s << "CRUD "; - } + StringBuilder s; + s << "applied op: "; - s << redact(entryOrGroupedInserts.toBSON()); - s << ", took " << opDuration << "ms"; + if (entryOrGroupedInserts.getOp().getOpType() == OpTypeEnum::kCommand) { + s << "command "; + } else { + s << "CRUD "; + } + + s << redact(entryOrGroupedInserts.toBSON()); + s << ", took " << opDuration << "ms"; - LOGV2(21228, "{s_str}", "s_str"_attr = s.str()); + LOGV2(21228, "{s_str}", "s_str"_attr = s.str()); + } } } return finalStatus; diff --git a/src/mongo/db/s/transaction_coordinator.cpp b/src/mongo/db/s/transaction_coordinator.cpp index f890ee3ef5a..f3a6696db93 100644 --- a/src/mongo/db/s/transaction_coordinator.cpp +++ b/src/mongo/db/s/transaction_coordinator.cpp @@ -422,9 +422,82 @@ void TransactionCoordinator::_done(Status status) { void TransactionCoordinator::_logSlowTwoPhaseCommit( const txn::CoordinatorCommitDecision& decision) { - LOGV2(22448, - "{twoPhaseCommitInfoForLog_decision}", - "twoPhaseCommitInfoForLog_decision"_attr = _twoPhaseCommitInfoForLog(decision)); + if (serverGlobalParams.logFormat == logv2::LogFormat::kJson) { + logv2::DynamicAttributes attrs; + + BSONObjBuilder parametersBuilder; + + BSONObjBuilder lsidBuilder(parametersBuilder.subobjStart("lsid")); + _lsid.serialize(&lsidBuilder); + lsidBuilder.doneFast(); + + parametersBuilder.append("txnNumber", _txnNumber); + + attrs.add("parameters", parametersBuilder.obj()); + + std::string decisionTemp; + switch (decision.getDecision()) { + case txn::CommitDecision::kCommit: + attrs.add("terminationCause", "committed"); + decisionTemp = decision.getCommitTimestamp()->toString(); + attrs.add("commitTimestamp", decisionTemp); + break; + case txn::CommitDecision::kAbort: + attrs.add("terminationCause", "aborted"); + attrs.add("terminationDetails", *decision.getAbortStatus()); + break; + default: + MONGO_UNREACHABLE; + }; + + attrs.add("numParticipants", _participants->size()); + + auto tickSource = _serviceContext->getTickSource(); + auto curTick = tickSource->getTicks(); + const auto& singleTransactionCoordinatorStats = + _transactionCoordinatorMetricsObserver->getSingleTransactionCoordinatorStats(); + + BSONObjBuilder stepDurations; + stepDurations.append( + "writingParticipantListMicros", + durationCount<Microseconds>( + singleTransactionCoordinatorStats.getWritingParticipantListDuration(tickSource, + curTick))); + stepDurations.append( + "waitingForVotesMicros", + durationCount<Microseconds>( + singleTransactionCoordinatorStats.getWaitingForVotesDuration(tickSource, curTick))); + stepDurations.append( + "writingDecisionMicros", + durationCount<Microseconds>( + singleTransactionCoordinatorStats.getWritingDecisionDuration(tickSource, curTick))); + stepDurations.append( + "waitingForDecisionAcksMicros", + durationCount<Microseconds>( + singleTransactionCoordinatorStats.getWaitingForDecisionAcksDuration(tickSource, + curTick))); + stepDurations.append( + "deletingCoordinatorDocMicros", + durationCount<Microseconds>( + singleTransactionCoordinatorStats.getDeletingCoordinatorDocDuration(tickSource, + curTick))); + attrs.add("stepDurations", stepDurations.obj()); + + // Total duration of the commit coordination. Logged at the end of the line for consistency + // with slow command logging. Note that this is reported in milliseconds while the step + // durations are reported in microseconds. + attrs.add( + "duration", + duration_cast<Milliseconds>( + singleTransactionCoordinatorStats.getTwoPhaseCommitDuration(tickSource, curTick))); + + LOGV2(51804, "two-phsase commit", attrs); + + } else { + LOGV2(22448, + "{twoPhaseCommitInfoForLog_decision}", + "twoPhaseCommitInfoForLog_decision"_attr = _twoPhaseCommitInfoForLog(decision)); + } } std::string TransactionCoordinator::_twoPhaseCommitInfoForLog( diff --git a/src/mongo/db/transaction_participant.cpp b/src/mongo/db/transaction_participant.cpp index 3cea8461baa..26162528a7c 100644 --- a/src/mongo/db/transaction_participant.cpp +++ b/src/mongo/db/transaction_participant.cpp @@ -1901,6 +1901,77 @@ std::string TransactionParticipant::Participant::_transactionInfoForLog( return s.str(); } + +void TransactionParticipant::Participant::_transactionInfoForLog( + OperationContext* opCtx, + const SingleThreadedLockStats* lockStats, + TerminationCause terminationCause, + repl::ReadConcernArgs readConcernArgs, + logv2::DynamicAttributes* pAttrs) const { + invariant(lockStats); + + // User specified transaction parameters. + BSONObjBuilder parametersBuilder; + + BSONObjBuilder lsidBuilder(parametersBuilder.subobjStart("lsid")); + _sessionId().serialize(&lsidBuilder); + lsidBuilder.doneFast(); + + parametersBuilder.append("txnNumber", o().activeTxnNumber); + parametersBuilder.append("autocommit", p().autoCommit ? *p().autoCommit : true); + readConcernArgs.appendInfo(¶metersBuilder); + + pAttrs->add("parameters", parametersBuilder.obj()); + + const auto& singleTransactionStats = o().transactionMetricsObserver.getSingleTransactionStats(); + + pAttrs->addDeepCopy("readTimestamp", singleTransactionStats.getReadTimestamp().toString()); + + singleTransactionStats.getOpDebug()->additiveMetrics.report(pAttrs); + + std::string terminationCauseString = + terminationCause == TerminationCause::kCommitted ? "committed" : "aborted"; + pAttrs->add("terminationCause", terminationCauseString); + + auto tickSource = opCtx->getServiceContext()->getTickSource(); + auto curTick = tickSource->getTicks(); + + pAttrs->add("timeActive", + durationCount<Microseconds>( + singleTransactionStats.getTimeActiveMicros(tickSource, curTick))); + pAttrs->add("timeInactive", + durationCount<Microseconds>( + singleTransactionStats.getTimeInactiveMicros(tickSource, curTick))); + + // Number of yields is always 0 in multi-document transactions, but it is included mainly to + // match the format with other slow operation logging messages. + pAttrs->add("numYields", 0); + // Aggregate lock statistics. + + BSONObjBuilder locks; + lockStats->report(&locks); + pAttrs->add("locks", locks.obj()); + + if (singleTransactionStats.getOpDebug()->storageStats) + pAttrs->add("storage", singleTransactionStats.getOpDebug()->storageStats->toBSON()); + + // It is possible for a slow transaction to have aborted in the prepared state if an + // exception was thrown before prepareTransaction succeeds. + const auto totalPreparedDuration = durationCount<Microseconds>( + singleTransactionStats.getPreparedDuration(tickSource, curTick)); + const bool txnWasPrepared = totalPreparedDuration > 0; + pAttrs->add("wasPrepared", txnWasPrepared); + if (txnWasPrepared) { + pAttrs->add("totalPreparedDuration", totalPreparedDuration); + pAttrs->add("prepareOpTime", o().prepareOpTime); + } + + // Total duration of the transaction. + pAttrs->add( + "duration", + duration_cast<Milliseconds>(singleTransactionStats.getDuration(tickSource, curTick))); +} + void TransactionParticipant::Participant::_logSlowTransaction( OperationContext* opCtx, const SingleThreadedLockStats* lockStats, @@ -1918,13 +1989,20 @@ void TransactionParticipant::Participant::_logSlowTransaction( opDuration, Milliseconds(serverGlobalParams.slowMS)) .first) { - LOGV2_OPTIONS( - 22523, - {logComponentV1toV2(logger::LogComponent::kTransaction)}, - "transaction " - "{transactionInfoForLog_opCtx_lockStats_terminationCause_readConcernArgs}", - "transactionInfoForLog_opCtx_lockStats_terminationCause_readConcernArgs"_attr = - _transactionInfoForLog(opCtx, lockStats, terminationCause, readConcernArgs)); + if (serverGlobalParams.logFormat == logv2::LogFormat::kJson) { + logv2::DynamicAttributes attr; + _transactionInfoForLog(opCtx, lockStats, terminationCause, readConcernArgs, &attr); + LOGV2_OPTIONS(51802, {logv2::LogComponent::kTransaction}, "transaction", attr); + } else { + LOGV2_OPTIONS( + 22523, + {logComponentV1toV2(logger::LogComponent::kTransaction)}, + "transaction " + "{transactionInfoForLog_opCtx_lockStats_terminationCause_readConcernArgs}", + "transactionInfoForLog_opCtx_lockStats_terminationCause_readConcernArgs"_attr = + _transactionInfoForLog( + opCtx, lockStats, terminationCause, readConcernArgs)); + } } } } diff --git a/src/mongo/db/transaction_participant.h b/src/mongo/db/transaction_participant.h index 1317807cca6..8bcf69a1f4b 100644 --- a/src/mongo/db/transaction_participant.h +++ b/src/mongo/db/transaction_participant.h @@ -52,6 +52,7 @@ #include "mongo/db/storage/storage_engine.h" #include "mongo/db/transaction_metrics_observer.h" #include "mongo/idl/mutable_observer_registry.h" +#include "mongo/logv2/attribute_storage.h" #include "mongo/stdx/unordered_map.h" #include "mongo/util/assert_util.h" #include "mongo/util/concurrency/with_lock.h" @@ -699,6 +700,12 @@ public: TerminationCause terminationCause, repl::ReadConcernArgs readConcernArgs) const; + void _transactionInfoForLog(OperationContext* opCtx, + const SingleThreadedLockStats* lockStats, + TerminationCause terminationCause, + repl::ReadConcernArgs readConcernArgs, + logv2::DynamicAttributes* pAttrs) const; + // Bumps up the transaction number of this transaction and perform the necessary cleanup. void _setNewTxnNumber(OperationContext* opCtx, const TxnNumber& txnNumber); diff --git a/src/mongo/logv2/attribute_storage.h b/src/mongo/logv2/attribute_storage.h index b1d01c9c9de..9b5b6b91f7e 100644 --- a/src/mongo/logv2/attribute_storage.h +++ b/src/mongo/logv2/attribute_storage.h @@ -217,11 +217,11 @@ inline StringData mapValue(char* value) { inline StringData mapValue(const char* value) { return value; } -inline const BSONObj* mapValue(BSONObj const& value) { - return &value; +inline const BSONObj mapValue(BSONObj const& value) { + return value; } -inline const BSONArray* mapValue(BSONArray const& value) { - return &value; +inline const BSONArray mapValue(BSONArray const& value) { + return value; } inline CustomAttributeValue mapValue(BSONElement const& val) { CustomAttributeValue custom; @@ -296,6 +296,7 @@ CustomAttributeValue mapValue(const T& val) { builder.append(fieldName, val); }; } + if constexpr (HasBSONSerialize<T>::value) { custom.BSONSerialize = [&val](BSONObjBuilder& builder) { val.serialize(&builder); }; } else if constexpr (HasToBSON<T>::value) { @@ -309,6 +310,7 @@ CustomAttributeValue mapValue(const T& val) { builder.appendElements(toBSON(val)); }; } + if constexpr (HasStringSerialize<T>::value) { custom.stringSerialize = [&val](fmt::memory_buffer& buffer) { val.serialize(buffer); }; } else if constexpr (HasToString<T>::value) { @@ -551,8 +553,8 @@ public: Minutes, Hours, Days, - const BSONObj*, - const BSONArray*, + BSONObj, + BSONArray, CustomAttributeValue> value; }; @@ -588,18 +590,35 @@ public: // Do not allow rvalue references and temporary objects to avoid lifetime problem issues template <size_t N, typename T, - std::enable_if_t<std::is_arithmetic_v<T> || std::is_pointer_v<T> || std::is_enum_v<T>, + std::enable_if_t<std::is_arithmetic_v<T> || std::is_pointer_v<T> || + std::is_enum_v<T> || detail::IsDuration<T>::value, int> = 0> void add(const char (&name)[N], T value) { _attributes.emplace_back(StringData(name, N - 1), value); } - template <size_t N, typename T, std::enable_if_t<std::is_class_v<T>, int> = 0> + template <size_t N> + void add(const char (&name)[N], BSONObj value) { + BSONObj owned = value.getOwned(); + _attributes.emplace_back(StringData(name, N - 1), owned); + } + + template <size_t N> + void add(const char (&name)[N], BSONArray value) { + BSONArray owned = static_cast<BSONArray>(value.getOwned()); + _attributes.emplace_back(StringData(name, N - 1), owned); + } + + template <size_t N, + typename T, + std::enable_if_t<std::is_class_v<T> && !detail::IsDuration<T>::value, int> = 0> void add(const char (&name)[N], const T& value) { _attributes.emplace_back(StringData(name, N - 1), value); } - template <size_t N, typename T, std::enable_if_t<std::is_class_v<T>, int> = 0> + template <size_t N, + typename T, + std::enable_if_t<std::is_class_v<T> && !detail::IsDuration<T>::value, int> = 0> void add(const char (&name)[N], T&& value) = delete; template <size_t N> @@ -607,6 +626,13 @@ public: _attributes.emplace_back(StringData(name, N - 1), value); } + // Deep copies the string instead of taking it by reference + template <size_t N> + void addDeepCopy(const char (&name)[N], std::string value) { + _copiedStrings.push_front(std::move(value)); + add(name, StringData(_copiedStrings.front())); + } + // Does not have the protections of add() above. Be careful about lifetime of value! template <size_t N, typename T> void addUnsafe(const char (&name)[N], const T& value) { @@ -619,6 +645,9 @@ private: friend class mongo::logv2::TypeErasedAttributeStorage; boost::container::small_vector<detail::NamedAttribute, constants::kNumStaticAttrs> _attributes; + + // Linked list of deep copies to std::string that we can take address-of. + std::forward_list<std::string> _copiedStrings; }; // Wrapper around internal pointer of AttributeStorage so it does not need any template parameters diff --git a/src/mongo/logv2/json_formatter.cpp b/src/mongo/logv2/json_formatter.cpp index 528f30ec807..61c73d9c0a1 100644 --- a/src/mongo/logv2/json_formatter.cpp +++ b/src/mongo/logv2/json_formatter.cpp @@ -99,20 +99,20 @@ struct JSONValueExtractor { } } - void operator()(StringData name, const BSONObj* val) { + void operator()(StringData name, const BSONObj& val) { // This is a JSON subobject, no quotes needed storeUnquoted(name); - BSONObj truncated = val->jsonStringBuffer( + BSONObj truncated = val.jsonStringBuffer( JsonStringFormat::ExtendedRelaxedV2_0_0, 0, false, _buffer, _attributeMaxSize); - addTruncationReport(name, truncated, val->objsize()); + addTruncationReport(name, truncated, val.objsize()); } - void operator()(StringData name, const BSONArray* val) { + void operator()(StringData name, const BSONArray& val) { // This is a JSON subobject, no quotes needed storeUnquoted(name); - BSONObj truncated = val->jsonStringBuffer( + BSONObj truncated = val.jsonStringBuffer( JsonStringFormat::ExtendedRelaxedV2_0_0, 0, true, _buffer, _attributeMaxSize); - addTruncationReport(name, truncated, val->objsize()); + addTruncationReport(name, truncated, val.objsize()); } void operator()(StringData name, StringData value) { @@ -121,6 +121,8 @@ struct JSONValueExtractor { template <typename Period> void operator()(StringData name, const Duration<Period>& value) { + // A suffix is automatically prepended + dassert(!name.endsWith(value.mongoUnitSuffix())); fmt::format_to( _buffer, R"({}"{}{}":{})", _separator, name, value.mongoUnitSuffix(), value.count()); _separator = ","_sd; diff --git a/src/mongo/logv2/log_test_v2.cpp b/src/mongo/logv2/log_test_v2.cpp index 4b852b6c816..7bfb68ba601 100644 --- a/src/mongo/logv2/log_test_v2.cpp +++ b/src/mongo/logv2/log_test_v2.cpp @@ -624,6 +624,10 @@ TEST_F(LogTestV2, TextFormat) { ASSERT(lines.back().rfind(toString(t3)) != std::string::npos); } +std::string hello() { + return "hello"; +} + TEST_F(LogTestV2, JsonBsonFormat) { using namespace constants; @@ -809,6 +813,8 @@ TEST_F(LogTestV2, JsonBsonFormat) { attrs.addUnsafe("unsafe but ok", 1); BSONObj bsonObj; attrs.add("bson", bsonObj); + attrs.add("millis", Milliseconds(1)); + attrs.addDeepCopy("stdstr", hello()); LOGV2(20083, "message", attrs); auto validateDynamic = [](const BSONObj& obj) { const BSONObj& attrObj = obj.getField(kAttributesFieldName).Obj(); @@ -819,6 +825,8 @@ TEST_F(LogTestV2, JsonBsonFormat) { "enum"_sd, "custom"_sd, "bson"_sd, + "millisMillis"_sd, + "stdstr"_sd, "unsafe but ok"_sd}) { ASSERT(attrObj.hasField(f)); } diff --git a/src/mongo/logv2/plain_formatter.cpp b/src/mongo/logv2/plain_formatter.cpp index c0d3ccdf797..234854e0d06 100644 --- a/src/mongo/logv2/plain_formatter.cpp +++ b/src/mongo/logv2/plain_formatter.cpp @@ -61,13 +61,13 @@ struct TextValueExtractor { operator()(name, StringData(_storage.back())); } - void operator()(StringData name, const BSONObj* val) { - _storage.push_back(val->jsonString(JsonStringFormat::ExtendedRelaxedV2_0_0)); + void operator()(StringData name, const BSONObj& val) { + _storage.push_back(val.jsonString(JsonStringFormat::ExtendedRelaxedV2_0_0)); operator()(name, StringData(_storage.back())); } - void operator()(StringData name, const BSONArray* val) { - _storage.push_back(val->jsonString(JsonStringFormat::ExtendedRelaxedV2_0_0, 0, true)); + void operator()(StringData name, const BSONArray& val) { + _storage.push_back(val.jsonString(JsonStringFormat::ExtendedRelaxedV2_0_0, 0, true)); operator()(name, StringData(_storage.back())); } diff --git a/src/mongo/rpc/metadata/client_metadata.cpp b/src/mongo/rpc/metadata/client_metadata.cpp index 9b51a4bc750..455df2abee1 100644 --- a/src/mongo/rpc/metadata/client_metadata.cpp +++ b/src/mongo/rpc/metadata/client_metadata.cpp @@ -41,6 +41,7 @@ #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/operation_context.h" +#include "mongo/logv2/log.h" #include "mongo/s/is_mongos.h" #include "mongo/util/log.h" #include "mongo/util/processinfo.h" @@ -435,8 +436,12 @@ const BSONObj& ClientMetadata::getDocument() const { void ClientMetadata::logClientMetadata(Client* client) const { invariant(!getDocument().isEmpty()); - log() << "received client metadata from " << client->getRemote().toString() << " " - << client->desc() << ": " << getDocument(); + LOGV2(51800, + "received client metadata from {remote} {client}: {doc}", + "client metadata", + "remote"_attr = client->getRemote(), + "client"_attr = client->desc(), + "doc"_attr = getDocument()); } StringData ClientMetadata::fieldName() { diff --git a/src/mongo/s/transaction_router.cpp b/src/mongo/s/transaction_router.cpp index a7e4325e833..2656c0b7b66 100644 --- a/src/mongo/s/transaction_router.cpp +++ b/src/mongo/s/transaction_router.cpp @@ -1333,10 +1333,90 @@ BSONObj TransactionRouter::Router::_commitWithRecoveryToken(OperationContext* op void TransactionRouter::Router::_logSlowTransaction(OperationContext* opCtx, TerminationCause terminationCause) const { - LOGV2(22899, - "transaction {transactionInfoForLog_opCtx_terminationCause}", - "transactionInfoForLog_opCtx_terminationCause"_attr = - _transactionInfoForLog(opCtx, terminationCause)); + if (serverGlobalParams.logFormat == logv2::LogFormat::kJson) { + logv2::DynamicAttributes attrs; + BSONObjBuilder parametersBuilder; + + BSONObjBuilder lsidBuilder(parametersBuilder.subobjStart("lsid")); + _sessionId().serialize(&lsidBuilder); + lsidBuilder.doneFast(); + + parametersBuilder.append("txnNumber", o().txnNumber); + parametersBuilder.append("autocommit", false); + + if (!o().readConcernArgs.isEmpty()) { + o().readConcernArgs.appendInfo(¶metersBuilder); + } + + attrs.add("parameters", parametersBuilder.obj()); + + + std::string globalReadTimestampTemp; + if (_atClusterTimeHasBeenSet()) { + globalReadTimestampTemp = o().atClusterTime->getTime().toString(); + attrs.add("globalReadTimestamp", globalReadTimestampTemp); + } + + if (o().commitType != CommitType::kRecoverWithToken) { + // We don't know the participants if we're recovering the commit. + attrs.add("numParticipants", o().participants.size()); + } + + if (o().commitType == CommitType::kTwoPhaseCommit) { + dassert(o().coordinatorId); + attrs.add("coordinator", *o().coordinatorId); + } + + auto tickSource = opCtx->getServiceContext()->getTickSource(); + auto curTicks = tickSource->getTicks(); + + if (terminationCause == TerminationCause::kCommitted) { + attrs.add("terminationCause", "committed"); + + dassert(o().metricsTracker->commitHasStarted()); + dassert(o().commitType != CommitType::kNotInitiated); + dassert(o().abortCause.empty()); + } else { + attrs.add("terminationCause", "aborted"); + + dassert(!o().abortCause.empty()); + attrs.add("abortCause", o().abortCause); + } + + const auto& timingStats = o().metricsTracker->getTimingStats(); + + std::string commitTypeTemp; + if (o().metricsTracker->commitHasStarted()) { + dassert(o().commitType != CommitType::kNotInitiated); + commitTypeTemp = commitTypeToString(o().commitType); + attrs.add("commitType", commitTypeTemp); + + attrs.add( + "commitDuration", + durationCount<Microseconds>(timingStats.getCommitDuration(tickSource, curTicks))); + } + + attrs.add( + "timeActive", + durationCount<Microseconds>(timingStats.getTimeActiveMicros(tickSource, curTicks))); + + attrs.add( + "timeInactive", + durationCount<Microseconds>(timingStats.getTimeInactiveMicros(tickSource, curTicks))); + + // Total duration of the transaction. Logged at the end of the line for consistency with + // slow command logging. + attrs.add("duration", + duration_cast<Milliseconds>(timingStats.getDuration(tickSource, curTicks))); + + LOGV2(51805, "transaction", attrs); + + } else { + LOGV2(22899, + "transaction {transactionInfoForLog_opCtx_terminationCause}", + "transactionInfoForLog_opCtx_terminationCause"_attr = + _transactionInfoForLog(opCtx, terminationCause)); + } } std::string TransactionRouter::Router::_transactionInfoForLog( |