summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEric Milkie <milkie@10gen.com>2020-04-13 14:29:48 -0400
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-04-22 12:54:28 +0000
commit56b894215fa182dd7668ef7e7c6f52e6da6a536e (patch)
tree4b1d37df17ac58ab033a1ab8a435930c8e58246b
parent40801001754b6bdc15bd2f59eae523c59b6ff055 (diff)
downloadmongo-56b894215fa182dd7668ef7e7c6f52e6da6a536e.tar.gz
SERVER-47397 enhanced logging for Execution owned files (part 1)
-rw-r--r--jstests/core/txns/speculative_snapshot_includes_all_writes.js4
-rw-r--r--jstests/noPassthrough/currentop_secondary_slow_op.js4
-rw-r--r--jstests/replsets/create_drop_database_different_casing.js3
-rw-r--r--jstests/replsets/drop_databases_two_phase.js8
-rw-r--r--src/mongo/db/catalog/catalog_control.cpp10
-rw-r--r--src/mongo/db/catalog/collection_catalog.cpp21
-rw-r--r--src/mongo/db/catalog/collection_impl.cpp17
-rw-r--r--src/mongo/db/catalog/collection_validation.cpp45
-rw-r--r--src/mongo/db/catalog/create_collection.cpp16
-rw-r--r--src/mongo/db/catalog/database_impl.cpp65
-rw-r--r--src/mongo/db/catalog/drop_collection.cpp5
-rw-r--r--src/mongo/db/catalog/drop_database.cpp39
-rw-r--r--src/mongo/db/catalog/drop_indexes.cpp12
-rw-r--r--src/mongo/db/catalog/index_build_block.cpp5
-rw-r--r--src/mongo/db/catalog/index_builds_manager.cpp16
-rw-r--r--src/mongo/db/catalog/index_catalog_entry_impl.cpp5
-rw-r--r--src/mongo/db/catalog/index_catalog_impl.cpp97
-rw-r--r--src/mongo/db/catalog/index_key_validate.cpp1
-rw-r--r--src/mongo/db/catalog/index_timestamp_helper.cpp8
-rw-r--r--src/mongo/db/concurrency/lock_state.cpp6
20 files changed, 199 insertions, 188 deletions
diff --git a/jstests/core/txns/speculative_snapshot_includes_all_writes.js b/jstests/core/txns/speculative_snapshot_includes_all_writes.js
index 052d2abc554..47b34fb28d4 100644
--- a/jstests/core/txns/speculative_snapshot_includes_all_writes.js
+++ b/jstests/core/txns/speculative_snapshot_includes_all_writes.js
@@ -62,9 +62,7 @@ const joinHungWrite = startParallelShell(() => {
});
if (isJsonLogNoConn()) {
- checkLog.contains(db.getMongo(),
- new RegExp("hangAfterCollectionInserts fail point enabled for.*\"ns\":\"" +
- testColl2.getFullName() + "\""));
+ checkLog.containsJson(db.getMongo(), 20289);
} else {
checkLog.contains(
db.getMongo(),
diff --git a/jstests/noPassthrough/currentop_secondary_slow_op.js b/jstests/noPassthrough/currentop_secondary_slow_op.js
index 419153105aa..df52875191d 100644
--- a/jstests/noPassthrough/currentop_secondary_slow_op.js
+++ b/jstests/noPassthrough/currentop_secondary_slow_op.js
@@ -40,9 +40,7 @@ assert.commandWorked(secondaryDB.adminCommand({
try {
assert.commandWorked(coll.insert({_id: 'b'}));
- checkLog.contains(
- secondary,
- new RegExp(`hangAfterCollectionInserts fail point enabled for.*${coll.getFullName()}`));
+ checkLog.containsJson(secondary, 20289);
jsTestLog('Running currentOp() with slow operation logging.');
// Lower slowms to make currentOp() log slow operation while the secondary is procesing the
diff --git a/jstests/replsets/create_drop_database_different_casing.js b/jstests/replsets/create_drop_database_different_casing.js
index 708acf8832f..f839c84526a 100644
--- a/jstests/replsets/create_drop_database_different_casing.js
+++ b/jstests/replsets/create_drop_database_different_casing.js
@@ -48,8 +48,7 @@ rst.awaitReplication();
failPoint.off();
if (isJsonLog(primary)) {
- checkLog.contains(primary,
- `dropDatabase {dbName} - finished","attr":{"dbName":"${dbNameUpper}"}}`);
+ checkLog.contains(primary, `dropDatabase - finished","attr":{"db":"${dbNameUpper}"}}`);
} else {
checkLog.contains(primary, "dropDatabase " + dbNameUpper + " - finished");
}
diff --git a/jstests/replsets/drop_databases_two_phase.js b/jstests/replsets/drop_databases_two_phase.js
index 28a58bbd1be..fbac7c0a721 100644
--- a/jstests/replsets/drop_databases_two_phase.js
+++ b/jstests/replsets/drop_databases_two_phase.js
@@ -154,11 +154,9 @@ var exitCode = dropDatabaseProcess();
let db = primary.getDB(dbNameToDrop);
if (isJsonLog(db.getMongo())) {
checkLog.contains(db.getMongo(),
- `dropping collection: {nss}","attr":{"dbName":"${dbNameToDrop}","nss":"${
- dbNameToDrop}.${collNameToDrop}"`);
- checkLog.contains(
- db.getMongo(),
- 'dropped {numCollections} collection(s)","attr":{"dbName":"dbToDrop","numCollections":1}');
+ `dropDatabase - dropping collection","attr":{"db":"${
+ dbNameToDrop}","namespace":"${dbNameToDrop}.${collNameToDrop}"`);
+ checkLog.contains(db.getMongo(), 'dropDatabase - finished","attr":{"db":"dbToDrop"');
} else {
checkLog.contains(db.getMongo(), "dropping collection: " + dbNameToDrop + "." + collNameToDrop);
checkLog.contains(db.getMongo(), "dropped 1 collection(s)");
diff --git a/src/mongo/db/catalog/catalog_control.cpp b/src/mongo/db/catalog/catalog_control.cpp
index 0af48451cd4..7e476d46594 100644
--- a/src/mongo/db/catalog/catalog_control.cpp
+++ b/src/mongo/db/catalog/catalog_control.cpp
@@ -75,8 +75,7 @@ MinVisibleTimestampMap closeCatalog(OperationContext* opCtx) {
if (uuid && minVisible) {
LOGV2_DEBUG(20269,
1,
- "closeCatalog: preserving min visible timestamp. Collection: {coll_ns} "
- "UUID: {uuid} TS: {minVisible}",
+ "closeCatalog: preserving min visible timestamp.",
"coll_ns"_attr = coll->ns(),
"uuid"_attr = uuid,
"minVisible"_attr = minVisible);
@@ -157,8 +156,9 @@ void openCatalog(OperationContext* opCtx, const MinVisibleTimestampMap& minVisib
for (const auto& indexName : entry.second.first) {
LOGV2(20275,
"openCatalog: rebuilding index: collection: {collNss}, index: {indexName}",
- "collNss"_attr = collNss.toString(),
- "indexName"_attr = indexName);
+ "openCatalog: rebuilding index",
+ "namespace"_attr = collNss.toString(),
+ "index"_attr = indexName);
}
std::vector<BSONObj> indexSpecs = entry.second.second;
@@ -178,7 +178,7 @@ void openCatalog(OperationContext* opCtx, const MinVisibleTimestampMap& minVisib
std::vector<std::string> databasesToOpen = storageEngine->listDatabases();
for (auto&& dbName : databasesToOpen) {
LOGV2_FOR_RECOVERY(
- 23992, 1, "openCatalog: dbholder reopening database {dbName}", "dbName"_attr = dbName);
+ 23992, 1, "openCatalog: dbholder reopening database", "db"_attr = dbName);
auto db = databaseHolder->openDb(opCtx, dbName);
invariant(db, str::stream() << "failed to reopen database " << dbName);
for (auto&& collNss :
diff --git a/src/mongo/db/catalog/collection_catalog.cpp b/src/mongo/db/catalog/collection_catalog.cpp
index bdb5018016f..3bb5a914512 100644
--- a/src/mongo/db/catalog/collection_catalog.cpp
+++ b/src/mongo/db/catalog/collection_catalog.cpp
@@ -429,15 +429,17 @@ void CollectionCatalog::registerCollection(CollectionUUID uuid, std::unique_ptr<
if (_collections.find(ns) != _collections.end()) {
LOGV2(20279,
"Conflicted creating a collection. ns: {coll_ns} ({coll_uuid}).",
- "coll_ns"_attr = (*coll)->ns(),
- "coll_uuid"_attr = (*coll)->uuid());
+ "Conflicted creating a collection.",
+ "namespace"_attr = (*coll)->ns(),
+ "uuid"_attr = (*coll)->uuid());
throw WriteConflictException();
}
LOGV2_DEBUG(20280,
1,
"Registering collection {ns} with UUID {uuid}",
- "ns"_attr = ns,
+ "Registering collection",
+ "namespace"_attr = ns,
"uuid"_attr = uuid);
auto dbName = ns.db().toString();
@@ -468,11 +470,7 @@ std::unique_ptr<Collection> CollectionCatalog::deregisterCollection(CollectionUU
auto dbName = ns.db().toString();
auto dbIdPair = std::make_pair(dbName, uuid);
- LOGV2_DEBUG(20281,
- 1,
- "Deregistering collection {ns} with UUID {uuid}",
- "ns"_attr = ns,
- "uuid"_attr = uuid);
+ LOGV2_DEBUG(20281, 1, "Deregistering collection", "namespace"_attr = ns, "uuid"_attr = uuid);
// Make sure collection object exists.
invariant(_collections.find(ns) != _collections.end());
@@ -507,11 +505,8 @@ void CollectionCatalog::deregisterAllCollections() {
auto dbName = ns.db().toString();
auto dbIdPair = std::make_pair(dbName, uuid);
- LOGV2_DEBUG(20283,
- 1,
- "Deregistering collection {ns} with UUID {uuid}",
- "ns"_attr = ns,
- "uuid"_attr = uuid);
+ LOGV2_DEBUG(
+ 20283, 1, "Deregistering collection", "namespace"_attr = ns, "uuid"_attr = uuid);
entry.second.reset();
}
diff --git a/src/mongo/db/catalog/collection_impl.cpp b/src/mongo/db/catalog/collection_impl.cpp
index ee23e41748a..95ab580df9e 100644
--- a/src/mongo/db/catalog/collection_impl.cpp
+++ b/src/mongo/db/catalog/collection_impl.cpp
@@ -145,7 +145,8 @@ std::unique_ptr<CollatorInterface> parseCollation(OperationContext* opCtx,
LOGV2(20288,
"Collection {nss} has a default collation which is incompatible with this version: "
"{collationSpec}",
- "nss"_attr = nss,
+ "Collection has a default collation incompatible with this version",
+ "namespace"_attr = nss,
"collationSpec"_attr = collationSpec);
fassertFailedNoTrace(40144);
}
@@ -294,7 +295,7 @@ void CollectionImpl::init(OperationContext* opCtx) {
{logv2::LogTag::kStartupWarnings},
"Collection {ns} has malformed validator: {validatorStatus}",
"Collection has malformed validator",
- "ns"_attr = _ns,
+ "namespace"_attr = _ns,
"validatorStatus"_attr = _validator.getStatus());
}
_validationAction = uassertStatusOK(_parseValidationAction(collectionOptions.validationAction));
@@ -378,7 +379,8 @@ Status CollectionImpl::checkValidation(OperationContext* opCtx, const BSONObj& d
if (_validationAction == ValidationAction::WARN) {
LOGV2_WARNING(20294,
"Document would fail validation collection: {ns} doc: {document}",
- "ns"_attr = ns(),
+ "Document would fail validation",
+ "namespace"_attr = ns(),
"document"_attr = redact(document));
return Status::OK();
}
@@ -503,7 +505,7 @@ Status CollectionImpl::insertDocuments(OperationContext* opCtx,
whenFirst += firstIdElem.str();
}
LOGV2(20289,
- "hangAfterCollectionInserts fail point enabled for {ns}{whenFirst}. Blocking "
+ "hangAfterCollectionInserts fail point enabled. Blocking "
"until fail point is disabled.",
"ns"_attr = _ns,
"whenFirst"_attr = whenFirst);
@@ -559,7 +561,7 @@ Status CollectionImpl::insertDocumentForBulkLoader(OperationContext* opCtx,
if (MONGO_unlikely(failAfterBulkLoadDocInsert.shouldFail())) {
LOGV2(20290,
- "Failpoint failAfterBulkLoadDocInsert enabled for {ns_ns}. Throwing "
+ "Failpoint failAfterBulkLoadDocInsert enabled. Throwing "
"WriteConflictException.",
"ns_ns"_attr = _ns.ns());
throw WriteConflictException();
@@ -685,7 +687,10 @@ void CollectionImpl::deleteDocument(OperationContext* opCtx,
bool noWarn,
Collection::StoreDeletedDoc storeDeletedDoc) {
if (isCapped()) {
- LOGV2(20291, "failing remove on a capped ns {ns}", "ns"_attr = _ns);
+ LOGV2(20291,
+ "failing remove on a capped ns {ns}",
+ "failing remove on a capped ns",
+ "namespace"_attr = _ns);
uasserted(10089, "cannot remove from a capped collection");
return;
}
diff --git a/src/mongo/db/catalog/collection_validation.cpp b/src/mongo/db/catalog/collection_validation.cpp
index 3f78cc0800f..61ab1779913 100644
--- a/src/mongo/db/catalog/collection_validation.cpp
+++ b/src/mongo/db/catalog/collection_validation.cpp
@@ -94,10 +94,9 @@ std::map<std::string, int64_t> _validateIndexesInternalStructure(
LOGV2_OPTIONS(20295,
{logComponentV1toV2(LogComponent::kIndex)},
- "validating the internal structure of index {descriptor_indexName} on "
- "collection {descriptor_parentNS}",
- "descriptor_indexName"_attr = descriptor->indexName(),
- "descriptor_parentNS"_attr = descriptor->parentNS());
+ "validating internal structure",
+ "index"_attr = descriptor->indexName(),
+ "namespace"_attr = descriptor->parentNS());
ValidateResults& curIndexResults = (*indexNsResultsMap)[descriptor->indexName()];
@@ -134,10 +133,9 @@ void _validateIndexes(OperationContext* opCtx,
LOGV2_OPTIONS(20296,
{logComponentV1toV2(LogComponent::kIndex)},
- "validating index consistency {descriptor_indexName} on collection "
- "{descriptor_parentNS}",
- "descriptor_indexName"_attr = descriptor->indexName(),
- "descriptor_parentNS"_attr = descriptor->parentNS());
+ "validating index consistency",
+ "index"_attr = descriptor->indexName(),
+ "namespace"_attr = descriptor->parentNS());
ValidateResults& curIndexResults = (*indexNsResultsMap)[descriptor->indexName()];
int64_t numTraversedKeys;
@@ -226,8 +224,8 @@ void _gatherIndexEntryErrors(OperationContext* opCtx,
LOGV2_OPTIONS(20300,
{logComponentV1toV2(LogComponent::kIndex)},
- "Traversing through the index entries for index {descriptor_indexName}.",
- "descriptor_indexName"_attr = descriptor->indexName());
+ "Traversing through the index entries",
+ "index"_attr = descriptor->indexName());
indexValidator->traverseIndex(opCtx,
index.get(),
@@ -316,12 +314,11 @@ void _reportInvalidResults(OperationContext* opCtx,
const string uuidString) {
_reportValidationResults(
opCtx, validateState, indexNsResultsMap, keysPerIndex, results, output);
- LOGV2_OPTIONS(
- 20302,
- {logComponentV1toV2(LogComponent::kIndex)},
- "Validation complete for collection {validateState_nss}{uuidString}. Corruption found.",
- "validateState_nss"_attr = validateState->nss(),
- "uuidString"_attr = uuidString);
+ LOGV2_OPTIONS(20302,
+ {logComponentV1toV2(LogComponent::kIndex)},
+ "Validation complete -- Corruption found.",
+ "namespace"_attr = validateState->nss(),
+ "uuid"_attr = uuidString);
}
template <typename T>
@@ -496,9 +493,9 @@ Status validate(OperationContext* opCtx,
// Validate the record store.
LOGV2_OPTIONS(20303,
{logComponentV1toV2(LogComponent::kIndex)},
- "validating collection {validateState_nss}{uuidString}",
- "validateState_nss"_attr = validateState.nss(),
- "uuidString"_attr = uuidString);
+ "validating collection",
+ "namespace"_attr = validateState.nss(),
+ "uuid"_attr = uuidString);
IndexConsistency indexConsistency(opCtx, &validateState);
ValidateAdaptor indexValidator(&indexConsistency, &validateState, &indexNsResultsMap);
@@ -549,9 +546,9 @@ Status validate(OperationContext* opCtx,
if (indexConsistency.haveEntryMismatch()) {
LOGV2_OPTIONS(20305,
{logComponentV1toV2(LogComponent::kIndex)},
- "Index inconsistencies were detected on collection {validateState_nss}. "
+ "Index inconsistencies were detected. "
"Starting the second phase of index validation to gather concise errors.",
- "validateState_nss"_attr = validateState.nss());
+ "namespace"_attr = validateState.nss());
_gatherIndexEntryErrors(opCtx,
&validateState,
&indexConsistency,
@@ -592,10 +589,10 @@ Status validate(OperationContext* opCtx,
LOGV2_OPTIONS(20306,
{logComponentV1toV2(LogComponent::kIndex)},
- "Validation complete for collection {validateState_nss}{uuidString}. No "
+ "Validation complete for collection. No "
"corruption found.",
- "validateState_nss"_attr = validateState.nss(),
- "uuidString"_attr = uuidString);
+ "namespace"_attr = validateState.nss(),
+ "uuid"_attr = uuidString);
output->append("ns", validateState.nss().ns());
} catch (ExceptionFor<ErrorCodes::CursorNotFound>&) {
diff --git a/src/mongo/db/catalog/create_collection.cpp b/src/mongo/db/catalog/create_collection.cpp
index 4728ca234bd..61f566d1603 100644
--- a/src/mongo/db/catalog/create_collection.cpp
+++ b/src/mongo/db/catalog/create_collection.cpp
@@ -274,9 +274,11 @@ Status createCollectionForApplyOps(OperationContext* opCtx,
LOGV2(20308,
"CMD: create {newCollName} - existing collection with conflicting UUID "
"{uuid} is in a drop-pending state: {currentName}",
- "newCollName"_attr = newCollName,
- "uuid"_attr = uuid,
- "currentName"_attr = *currentName);
+ "CMD: create -- existing collection with conflicting UUID "
+ "is in a drop-pending state",
+ "newCollection"_attr = newCollName,
+ "conflictingUuid"_attr = uuid,
+ "existingCollection"_attr = *currentName);
return Result(Status(ErrorCodes::NamespaceExists,
str::stream()
<< "existing collection " << currentName->toString()
@@ -321,9 +323,11 @@ Status createCollectionForApplyOps(OperationContext* opCtx,
LOGV2(20309,
"CMD: create {newCollName} - renaming existing collection with "
"conflicting UUID {uuid} to temporary collection {tmpName}",
- "newCollName"_attr = newCollName,
- "uuid"_attr = uuid,
- "tmpName"_attr = tmpName);
+ "CMD: create -- renaming existing collection with "
+ "conflicting UUID to temporary collection",
+ "newCollection"_attr = newCollName,
+ "conflictingUuid"_attr = uuid,
+ "tempName"_attr = tmpName);
Status status = db->renameCollection(opCtx, newCollName, tmpName, stayTemp);
if (!status.isOK())
return Result(status);
diff --git a/src/mongo/db/catalog/database_impl.cpp b/src/mongo/db/catalog/database_impl.cpp
index 08472f8d1e7..996359c48c7 100644
--- a/src/mongo/db/catalog/database_impl.cpp
+++ b/src/mongo/db/catalog/database_impl.cpp
@@ -144,7 +144,10 @@ void DatabaseImpl::init(OperationContext* const opCtx) const {
Status status = validateDBName(_name);
if (!status.isOK()) {
- LOGV2_WARNING(20325, "tried to open invalid db: {name}", "name"_attr = _name);
+ LOGV2_WARNING(20325,
+ "tried to open invalid db: {name}",
+ "tried to open invalid db",
+ "db"_attr = _name);
uasserted(10028, status.toString());
}
@@ -167,10 +170,10 @@ void DatabaseImpl::init(OperationContext* const opCtx) const {
if (!reloadStatus.isOK()) {
LOGV2_WARNING_OPTIONS(20326,
{logv2::LogTag::kStartupWarnings},
- "Unable to parse views: {reloadStatus}; remove any invalid views "
- "from the {viewsName} collection to restore server functionality.",
- "reloadStatus"_attr = redact(reloadStatus),
- "viewsName"_attr = _viewsName);
+ "Unable to parse views; remove any invalid views "
+ "from the collection to restore server functionality.",
+ "error"_attr = redact(reloadStatus),
+ "namespace"_attr = _viewsName);
}
}
@@ -184,15 +187,17 @@ void DatabaseImpl::clearTmpCollections(OperationContext* opCtx) const {
if (!status.isOK()) {
LOGV2_WARNING(20327,
"could not drop temp collection '{collection_ns}': {status}",
- "collection_ns"_attr = collection->ns(),
- "status"_attr = redact(status));
+ "could not drop temp collection",
+ "namespace"_attr = collection->ns(),
+ "error"_attr = redact(status));
}
wuow.commit();
} catch (const WriteConflictException&) {
LOGV2_WARNING(
20328,
"could not drop temp collection '{collection_ns}' due to WriteConflictException",
- "collection_ns"_attr = collection->ns());
+ "could not drop temp collection due to WriteConflictException",
+ "namespace"_attr = collection->ns());
opCtx->recoveryUnit()->abandonSnapshot();
}
return true;
@@ -304,8 +309,9 @@ void DatabaseImpl::getStats(OperationContext* opCtx, BSONObjBuilder* output, dou
output->appendNumber("fsTotalSize", -1);
LOGV2(20312,
"Failed to query filesystem disk stats (code: {ec_value}): {ec_message}",
- "ec_value"_attr = ec.value(),
- "ec_message"_attr = ec.message());
+ "Failed to query filesystem disk stats",
+ "errorCode"_attr = ec.value(),
+ "errorMessage"_attr = ec.message());
}
}
}
@@ -411,7 +417,9 @@ Status DatabaseImpl::dropCollectionEvenIfSystem(OperationContext* opCtx,
LOGV2(20314,
"dropCollection: {nss} ({uuid}) - storage engine will take ownership of drop-pending "
"collection with optime {dropOpTime} and commit timestamp {commitTimestamp}",
- "nss"_attr = nss,
+ "dropCollection: storage engine will take ownership of drop-pending "
+ "collection",
+ "namespace"_attr = nss,
"uuid"_attr = uuid,
"dropOpTime"_attr = dropOpTime,
"commitTimestamp"_attr = commitTimestamp);
@@ -456,9 +464,10 @@ Status DatabaseImpl::dropCollectionEvenIfSystem(OperationContext* opCtx,
LOGV2(20315,
"dropCollection: {nss} ({uuid}) - renaming to drop-pending collection: {dpns} with drop "
"optime {dropOpTime}",
- "nss"_attr = nss,
+ "dropCollection: renaming to drop-pending collection",
+ "namespace"_attr = nss,
"uuid"_attr = uuid,
- "dpns"_attr = dpns,
+ "dropPendingName"_attr = dpns,
"dropOpTime"_attr = dropOpTime);
{
Lock::CollectionLock collLk(opCtx, dpns, MODE_X);
@@ -490,7 +499,8 @@ Status DatabaseImpl::_finishDropCollection(OperationContext* opCtx,
UUID uuid = collection->uuid();
LOGV2(20318,
"Finishing collection drop for {nss} ({uuid}).",
- "nss"_attr = nss,
+ "Finishing collection drop",
+ "namespace"_attr = nss,
"uuid"_attr = uuid);
auto status = DurableCatalog::get(opCtx)->dropCollection(opCtx, collection->getCatalogId());
@@ -546,9 +556,10 @@ Status DatabaseImpl::renameCollection(OperationContext* opCtx,
LOGV2(20319,
"renameCollection: renaming collection {collToRename_uuid} from {fromNss} to {toNss}",
- "collToRename_uuid"_attr = collToRename->uuid(),
- "fromNss"_attr = fromNss,
- "toNss"_attr = toNss);
+ "renameCollection: renaming collection",
+ "uuid"_attr = collToRename->uuid(),
+ "fromName"_attr = fromNss,
+ "toName"_attr = toNss);
Top::get(opCtx->getServiceContext()).collectionDropped(fromNss);
@@ -662,8 +673,8 @@ Collection* DatabaseImpl::createCollection(OperationContext* opCtx,
if (!canAcceptWrites) {
LOGV2_ERROR_OPTIONS(20329,
{logv2::UserAssertAfterLog(ErrorCodes::InvalidOptions)},
- "Attempted to create a new collection {nss} without a UUID",
- "nss"_attr = nss);
+ "Attempted to create a new collection without a UUID",
+ "namespace"_attr = nss);
} else {
optionsWithUUID.uuid.emplace(CollectionUUID::gen());
generatedUUID = true;
@@ -696,9 +707,10 @@ Collection* DatabaseImpl::createCollection(OperationContext* opCtx,
LOGV2(20320,
"createCollection: {nss} with {generatedUUID_generated_provided} UUID: "
"{optionsWithUUID_uuid_get} and options: {options}",
- "nss"_attr = nss,
- "generatedUUID_generated_provided"_attr = (generatedUUID ? "generated" : "provided"),
- "optionsWithUUID_uuid_get"_attr = optionsWithUUID.uuid.get(),
+ "createCollection",
+ "namespace"_attr = nss,
+ "uuidDisposition"_attr = (generatedUUID ? "generated" : "provided"),
+ "uuid"_attr = optionsWithUUID.uuid.get(),
"options"_attr = options.toBSON());
// Create Collection object
@@ -826,7 +838,8 @@ void DatabaseImpl::checkForIdIndexesAndDropPendingCollections(OperationContext*
auto dropOpTime = fassert(40459, nss.getDropPendingNamespaceOpTime());
LOGV2(20321,
"Found drop-pending namespace {nss} with drop optime {dropOpTime}",
- "nss"_attr = nss,
+ "Found drop-pending namespace",
+ "namespace"_attr = nss,
"dropOpTime"_attr = dropOpTime);
repl::DropPendingCollectionReaper::get(opCtx)->addDropPendingNamespace(
opCtx, dropOpTime, nss);
@@ -844,12 +857,12 @@ void DatabaseImpl::checkForIdIndexesAndDropPendingCollections(OperationContext*
LOGV2_OPTIONS(20322,
{logv2::LogTag::kStartupWarnings},
- "WARNING: the collection '{nss}' lacks a unique index on _id. This index is "
+ "WARNING: a collection lacks a unique index on _id. This index is "
"needed for replication to function properly",
- "nss"_attr = nss);
+ "namespace"_attr = nss);
LOGV2_OPTIONS(20323,
{logv2::LogTag::kStartupWarnings},
- "\t To fix this, you need to create a unique index on _id. See "
+ "To fix this, you need to create a unique index on _id. See "
"http://dochub.mongodb.org/core/build-replica-set-indexes");
}
}
diff --git a/src/mongo/db/catalog/drop_collection.cpp b/src/mongo/db/catalog/drop_collection.cpp
index 6df38942003..bd049c11ef2 100644
--- a/src/mongo/db/catalog/drop_collection.cpp
+++ b/src/mongo/db/catalog/drop_collection.cpp
@@ -276,7 +276,8 @@ Status dropCollection(OperationContext* opCtx,
BSONObjBuilder& result,
DropCollectionSystemCollectionMode systemCollectionMode) {
if (!serverGlobalParams.quiet.load()) {
- LOGV2(518070, "CMD: drop {collectionName}", "collectionName"_attr = collectionName);
+ LOGV2(
+ 518070, "CMD: drop {collectionName}", "CMD: drop", "collection"_attr = collectionName);
}
if (MONGO_unlikely(hangDropCollectionBeforeLockAcquisition.shouldFail())) {
@@ -324,7 +325,7 @@ Status dropCollectionForApplyOps(OperationContext* opCtx,
const repl::OpTime& dropOpTime,
DropCollectionSystemCollectionMode systemCollectionMode) {
if (!serverGlobalParams.quiet.load()) {
- LOGV2(20332, "CMD: drop {collectionName}", "collectionName"_attr = collectionName);
+ LOGV2(20332, "CMD: drop {collectionName}", "CMD: drop", "collection"_attr = collectionName);
}
if (MONGO_unlikely(hangDropCollectionBeforeLockAcquisition.shouldFail())) {
diff --git a/src/mongo/db/catalog/drop_database.cpp b/src/mongo/db/catalog/drop_database.cpp
index 8a50570b765..22d154bc6ad 100644
--- a/src/mongo/db/catalog/drop_database.cpp
+++ b/src/mongo/db/catalog/drop_database.cpp
@@ -118,9 +118,10 @@ void _finishDropDatabase(OperationContext* opCtx,
LOGV2(20335,
"dropDatabase {dbName} - dropped {numCollections} collection(s)",
- "dbName"_attr = dbName,
- "numCollections"_attr = numCollections);
- LOGV2(20336, "dropDatabase {dbName} - finished", "dbName"_attr = dbName);
+ "dropDatabase",
+ "db"_attr = dbName,
+ "numCollectionsDropped"_attr = numCollections);
+ LOGV2(20336, "dropDatabase {dbName} - finished", "dropDatabase - finished", "db"_attr = dbName);
}
Status _dropDatabase(OperationContext* opCtx, const std::string& dbName, bool abortIndexBuilds) {
@@ -164,7 +165,10 @@ Status _dropDatabase(OperationContext* opCtx, const std::string& dbName, bool ab
<< "The database is currently being dropped. Database: " << dbName);
}
- LOGV2(20337, "dropDatabase {dbName} - starting", "dbName"_attr = dbName);
+ LOGV2(20337,
+ "dropDatabase {dbName} - starting",
+ "dropDatabase - starting",
+ "db"_attr = dbName);
db->setDropPending(opCtx, true);
// If Database::dropCollectionEventIfSystem() fails, we should reset the drop-pending state
@@ -231,15 +235,17 @@ Status _dropDatabase(OperationContext* opCtx, const std::string& dbName, bool ab
LOGV2(20338,
"dropDatabase {dbName} - dropping collection: {nss}",
- "dbName"_attr = dbName,
- "nss"_attr = nss);
+ "dropDatabase - dropping collection",
+ "db"_attr = dbName,
+ "namespace"_attr = nss);
if (nss.isDropPendingNamespace() && replCoord->isReplEnabled() &&
opCtx->writesAreReplicated()) {
LOGV2(20339,
"dropDatabase {dbName} - found drop-pending collection: {nss}",
- "dbName"_attr = dbName,
- "nss"_attr = nss);
+ "dropDatabase - found drop-pending collection",
+ "db"_attr = dbName,
+ "namespace"_attr = nss);
latestDropPendingOpTime = std::max(
latestDropPendingOpTime, uassertStatusOK(nss.getDropPendingNamespaceOpTime()));
continue;
@@ -341,7 +347,8 @@ Status _dropDatabase(OperationContext* opCtx, const std::string& dbName, bool ab
"dropDatabase {dbName} waiting for {awaitOpTime} to be replicated at "
"{dropDatabaseWriteConcern}. Dropping {numCollectionsToDrop} collection(s), with "
"last collection drop at {latestDropPendingOpTime}",
- "dbName"_attr = dbName,
+ "dropDatabase waiting for replication and dropping collections",
+ "db"_attr = dbName,
"awaitOpTime"_attr = awaitOpTime,
"dropDatabaseWriteConcern"_attr = dropDatabaseWriteConcern.toBSON(),
"numCollectionsToDrop"_attr = numCollectionsToDrop,
@@ -354,9 +361,10 @@ Status _dropDatabase(OperationContext* opCtx, const std::string& dbName, bool ab
LOGV2(20341,
"dropDatabase {dbName} waiting for {awaitOpTime} to be replicated at "
"{userWriteConcern}",
- "dbName"_attr = dbName,
+ "dropDatabase waiting for replication",
+ "db"_attr = dbName,
"awaitOpTime"_attr = awaitOpTime,
- "userWriteConcern"_attr = userWriteConcern.toBSON());
+ "writeConcern"_attr = userWriteConcern.toBSON());
result = replCoord->awaitReplication(opCtx, awaitOpTime, userWriteConcern);
}
@@ -371,10 +379,11 @@ Status _dropDatabase(OperationContext* opCtx, const std::string& dbName, bool ab
LOGV2(20342,
"dropDatabase {dbName} - successfully dropped {numCollectionsToDrop} collection(s) "
"(most recent drop optime: {awaitOpTime}) after {result_duration}. dropping database",
- "dbName"_attr = dbName,
- "numCollectionsToDrop"_attr = numCollectionsToDrop,
- "awaitOpTime"_attr = awaitOpTime,
- "result_duration"_attr = result.duration);
+ "dropDatabase - successfully dropped collections",
+ "db"_attr = dbName,
+ "numCollectionsDropped"_attr = numCollectionsToDrop,
+ "mostRecentDropOpTime"_attr = awaitOpTime,
+ "duration"_attr = result.duration);
}
if (MONGO_unlikely(dropDatabaseHangAfterAllCollectionsDrop.shouldFail())) {
diff --git a/src/mongo/db/catalog/drop_indexes.cpp b/src/mongo/db/catalog/drop_indexes.cpp
index a3bf4802de9..1310fba59d7 100644
--- a/src/mongo/db/catalog/drop_indexes.cpp
+++ b/src/mongo/db/catalog/drop_indexes.cpp
@@ -293,9 +293,9 @@ Status dropIndexes(OperationContext* opCtx,
if (!serverGlobalParams.quiet.load()) {
LOGV2(51806,
- "CMD: dropIndexes {nss}: {cmdObj_kIndexFieldName_false}",
- "nss"_attr = nss,
- "cmdObj_kIndexFieldName_false"_attr = cmdObj[kIndexFieldName].toString(false));
+ "CMD: dropIndexes",
+ "namespace"_attr = nss,
+ "indexes"_attr = cmdObj[kIndexFieldName].toString(false));
}
result->appendNumber("nIndexesWas", collection->getIndexCatalog()->numIndexesTotal(opCtx));
@@ -453,9 +453,9 @@ Status dropIndexesForApplyOps(OperationContext* opCtx,
if (!serverGlobalParams.quiet.load()) {
LOGV2(20344,
- "CMD: dropIndexes {nss}: {cmdObj_kIndexFieldName_false}",
- "nss"_attr = nss,
- "cmdObj_kIndexFieldName_false"_attr = cmdObj[kIndexFieldName].toString(false));
+ "CMD: dropIndexes",
+ "namespace"_attr = nss,
+ "indexes"_attr = cmdObj[kIndexFieldName].toString(false));
}
BackgroundOperation::assertNoBgOpInProgForNs(nss);
diff --git a/src/mongo/db/catalog/index_build_block.cpp b/src/mongo/db/catalog/index_build_block.cpp
index 60a4f4ba045..1f7a44abfb2 100644
--- a/src/mongo/db/catalog/index_build_block.cpp
+++ b/src/mongo/db/catalog/index_build_block.cpp
@@ -171,8 +171,9 @@ void IndexBuildBlock::success(OperationContext* opCtx, Collection* collection) {
LOGV2(20345,
"index build: done building index {indexName} on ns {nss}",
- "indexName"_attr = _indexName,
- "nss"_attr = _nss);
+ "index build: done building",
+ "namespace"_attr = _nss,
+ "index"_attr = _indexName);
collection->indexBuildSuccess(opCtx, _indexCatalogEntry);
auto svcCtx = opCtx->getClient()->getServiceContext();
diff --git a/src/mongo/db/catalog/index_builds_manager.cpp b/src/mongo/db/catalog/index_builds_manager.cpp
index dbb307c98af..61a147b481c 100644
--- a/src/mongo/db/catalog/index_builds_manager.cpp
+++ b/src/mongo/db/catalog/index_builds_manager.cpp
@@ -116,10 +116,11 @@ Status IndexBuildsManager::setUpIndexBuild(OperationContext* opCtx,
LOGV2(
20346,
"Index build initialized: {buildUUID}: {nss} ({collection_uuid} ): indexes: {indexes_size}",
- "buildUUID"_attr = buildUUID,
- "nss"_attr = nss,
- "collection_uuid"_attr = collection->uuid(),
- "indexes_size"_attr = indexes.size());
+ "Index build initialized",
+ "indexBuildUUID"_attr = buildUUID,
+ "namespace"_attr = nss,
+ "collectionUuid"_attr = collection->uuid(),
+ "numIndexes"_attr = indexes.size());
return Status::OK();
}
@@ -166,13 +167,15 @@ StatusWith<std::pair<long long, long long>> IndexBuildsManager::startBuildingInd
if (repair == RepairData::kNo) {
LOGV2_FATAL(31396,
"Invalid BSON detected at {id}: {validStatus}",
+ "Invalid BSON detected",
"id"_attr = id,
- "validStatus"_attr = redact(validStatus));
+ "error"_attr = redact(validStatus));
}
LOGV2_WARNING(20348,
"Invalid BSON detected at {id}: {validStatus}. Deleting.",
+ "Invalid BSON detected; deleting.",
"id"_attr = id,
- "validStatus"_attr = redact(validStatus));
+ "error"_attr = redact(validStatus));
rs->deleteRecord(opCtx, id);
} else {
numRecords++;
@@ -287,6 +290,7 @@ bool IndexBuildsManager::abortIndexBuildWithoutCleanup(OperationContext* opCtx,
LOGV2(20347,
"Index build aborted without cleanup: {buildUUID}: {reason}",
+ "Index build aborted without cleanup",
"buildUUID"_attr = buildUUID,
"reason"_attr = reason);
diff --git a/src/mongo/db/catalog/index_catalog_entry_impl.cpp b/src/mongo/db/catalog/index_catalog_entry_impl.cpp
index 44630f8772a..832d3d2767d 100644
--- a/src/mongo/db/catalog/index_catalog_entry_impl.cpp
+++ b/src/mongo/db/catalog/index_catalog_entry_impl.cpp
@@ -288,9 +288,8 @@ Status IndexCatalogEntryImpl::_setMultikeyInMultiDocumentTransaction(
auto status = opCtx->recoveryUnit()->setTimestamp(writeTs);
if (status.code() == ErrorCodes::BadValue) {
LOGV2(20352,
- "Temporarily could not timestamp the multikey catalog write, retrying. "
- "{status_reason}",
- "status_reason"_attr = status.reason());
+ "Temporarily could not timestamp the multikey catalog write, retrying.",
+ "reason"_attr = status.reason());
throw WriteConflictException();
}
fassert(31164, status);
diff --git a/src/mongo/db/catalog/index_catalog_impl.cpp b/src/mongo/db/catalog/index_catalog_impl.cpp
index 5d72de0f91a..c1e444128ff 100644
--- a/src/mongo/db/catalog/index_catalog_impl.cpp
+++ b/src/mongo/db/catalog/index_catalog_impl.cpp
@@ -246,35 +246,29 @@ void IndexCatalogImpl::_logInternalState(OperationContext* opCtx,
bool haveIdIndex) {
invariant(opCtx->lockState()->isCollectionLockedForMode(_collection->ns(), MODE_X));
- LOGV2_ERROR(
- 20365,
- "Internal Index Catalog state: numIndexesTotal(): {numIndexesTotal_opCtx} "
- "numSystemIndexesEntries: {numIndexesInCollectionCatalogEntry} _readyIndexes.size(): "
- "{readyIndexes_size} _buildingIndexes.size(): {buildingIndexes_size} indexNamesToDrop: "
- "{indexNamesToDrop_size} haveIdIndex: {haveIdIndex}",
- "numIndexesTotal_opCtx"_attr = numIndexesTotal(opCtx),
- "numIndexesInCollectionCatalogEntry"_attr = numIndexesInCollectionCatalogEntry,
- "readyIndexes_size"_attr = _readyIndexes.size(),
- "buildingIndexes_size"_attr = _buildingIndexes.size(),
- "indexNamesToDrop_size"_attr = indexNamesToDrop.size(),
- "haveIdIndex"_attr = haveIdIndex);
+ LOGV2_ERROR(20365,
+ "Internal Index Catalog state",
+ "numIndexesTotal"_attr = numIndexesTotal(opCtx),
+ "numIndexesInCollectionCatalogEntry"_attr = numIndexesInCollectionCatalogEntry,
+ "readyIndexes_size"_attr = _readyIndexes.size(),
+ "buildingIndexes_size"_attr = _buildingIndexes.size(),
+ "indexNamesToDrop_size"_attr = indexNamesToDrop.size(),
+ "haveIdIndex"_attr = haveIdIndex);
// Report the ready indexes.
- LOGV2_ERROR(20366, "Ready indexes:");
for (const auto& entry : _readyIndexes) {
const IndexDescriptor* desc = entry->descriptor();
LOGV2_ERROR(20367,
- "Index '{desc_indexName}' with specification: {desc_infoObj}",
+ "readyIndex",
"desc_indexName"_attr = desc->indexName(),
"desc_infoObj"_attr = redact(desc->infoObj()));
}
// Report the in-progress indexes.
- LOGV2_ERROR(20368, "In-progress indexes:");
for (const auto& entry : _buildingIndexes) {
const IndexDescriptor* desc = entry->descriptor();
LOGV2_ERROR(20369,
- "Index '{desc_indexName}' with specification: {desc_infoObj}",
+ "inprogIndex",
"desc_indexName"_attr = desc->indexName(),
"desc_infoObj"_attr = redact(desc->infoObj()));
}
@@ -287,36 +281,28 @@ void IndexCatalogImpl::_logInternalState(OperationContext* opCtx,
durableCatalog->getAllIndexes(opCtx, _collection->getCatalogId(), &allIndexes);
durableCatalog->getReadyIndexes(opCtx, _collection->getCatalogId(), &readyIndexes);
- LOGV2_ERROR(20371, "All indexes:");
for (const auto& index : allIndexes) {
LOGV2_ERROR(20372,
- "Index '{index}' with specification: "
- "{durableCatalog_getIndexSpec_opCtx_collection_getCatalogId_index}",
+ "allIndexes",
"index"_attr = index,
- "durableCatalog_getIndexSpec_opCtx_collection_getCatalogId_index"_attr = redact(
+ "spec"_attr = redact(
durableCatalog->getIndexSpec(opCtx, _collection->getCatalogId(), index)));
}
- LOGV2_ERROR(20373, "Ready indexes:");
for (const auto& index : readyIndexes) {
LOGV2_ERROR(20374,
- "Index '{index}' with specification: "
- "{durableCatalog_getIndexSpec_opCtx_collection_getCatalogId_index}",
+ "readyIndexes",
"index"_attr = index,
- "durableCatalog_getIndexSpec_opCtx_collection_getCatalogId_index"_attr = redact(
+ "spec"_attr = redact(
durableCatalog->getIndexSpec(opCtx, _collection->getCatalogId(), index)));
}
- LOGV2_ERROR(20375, "Index names to drop:");
for (const auto& indexNameToDrop : indexNamesToDrop) {
- LOGV2_ERROR(
- 20376,
- "Index '{indexNameToDrop}' with specification: "
- "{durableCatalog_getIndexSpec_opCtx_collection_getCatalogId_indexNameToDrop}",
- "indexNameToDrop"_attr = indexNameToDrop,
- "durableCatalog_getIndexSpec_opCtx_collection_getCatalogId_indexNameToDrop"_attr =
- redact(durableCatalog->getIndexSpec(
- opCtx, _collection->getCatalogId(), indexNameToDrop)));
+ LOGV2_ERROR(20376,
+ "indexNamesToDrop",
+ "index"_attr = indexNameToDrop,
+ "spec"_attr = redact(durableCatalog->getIndexSpec(
+ opCtx, _collection->getCatalogId(), indexNameToDrop)));
}
}
@@ -422,11 +408,10 @@ IndexCatalogEntry* IndexCatalogImpl::createIndexEntry(OperationContext* opCtx,
Status status = _isSpecOk(opCtx, descriptor->infoObj());
if (!status.isOK()) {
LOGV2_FATAL_NOTRACE(28782,
- "Found an invalid index {descriptor_infoObj} on the {collection_ns} "
- "collection: {status}",
- "descriptor_infoObj"_attr = descriptor->infoObj(),
- "collection_ns"_attr = _collection->ns(),
- "status"_attr = redact(status));
+ "Found an invalid index",
+ "descriptor"_attr = descriptor->infoObj(),
+ "namespace"_attr = _collection->ns(),
+ "error"_attr = redact(status));
}
auto engine = opCtx->getServiceContext()->getStorageEngine();
@@ -874,7 +859,11 @@ Status IndexCatalogImpl::_doesSpecConflictWithExisting(OperationContext* opCtx,
if (numIndexesTotal(opCtx) >= kMaxNumIndexesAllowed) {
string s = str::stream() << "add index fails, too many indexes for " << _collection->ns()
<< " key:" << key;
- LOGV2(20354, "{s}", "s"_attr = s);
+ LOGV2(20354,
+ "Exceeded maximum number of indexes",
+ "namespace"_attr = _collection->ns(),
+ "key"_attr = key,
+ "maxNumIndexes"_attr = kMaxNumIndexesAllowed);
return Status(ErrorCodes::CannotCreateIndex, s);
}
@@ -1090,9 +1079,10 @@ void IndexCatalogImpl::deleteIndexFromDisk(OperationContext* opCtx, const string
LOGV2_WARNING(
20364,
"couldn't drop index {indexName} on collection: {collection_ns} because of {status}",
- "indexName"_attr = indexName,
- "collection_ns"_attr = _collection->ns(),
- "status"_attr = redact(status));
+ "couldn't drop index",
+ "index"_attr = indexName,
+ "namespace"_attr = _collection->ns(),
+ "reason"_attr = redact(status));
}
}
@@ -1148,7 +1138,7 @@ int IndexCatalogImpl::numIndexesTotal(OperationContext* opCtx) const {
}
// Ignore the write conflict for read transactions; we will eventually roll back this
// transaction anyway.
- LOGV2(20356, " Skipping dassert check due to: {ex}", "ex"_attr = ex);
+ LOGV2(20356, "Skipping dassert check", "reason"_attr = ex);
}
}
@@ -1169,19 +1159,15 @@ int IndexCatalogImpl::numIndexesReady(OperationContext* opCtx) const {
// There is a potential inconistency where the index information in the collection catalog
// entry and the index catalog differ. Log as much information as possible here.
if (itIndexes.size() != completedIndexes.size()) {
- LOGV2(20357, "index catalog reports: ");
for (const IndexDescriptor* i : itIndexes) {
- LOGV2(20358, " index: {i}", "i"_attr = *i);
+ LOGV2(20358, "index catalog reports", "index"_attr = *i);
}
- LOGV2(20359, "collection catalog reports: ");
for (auto const& i : completedIndexes) {
- LOGV2(20360, " index: {i}", "i"_attr = i);
+ LOGV2(20360, "collection catalog reports", "index"_attr = i);
}
- LOGV2(20361,
- "collection uuid: {collection_uuid}",
- "collection_uuid"_attr = _collection->uuid());
+ LOGV2(20361, "uuid", "collection_uuid"_attr = _collection->uuid());
invariant(itIndexes.size() == completedIndexes.size(),
"The number of ready indexes reported in the collection metadata catalog did "
@@ -1563,9 +1549,10 @@ void IndexCatalogImpl::_unindexKeys(OperationContext* opCtx,
if (!status.isOK()) {
LOGV2(20362,
"Couldn't unindex record {obj} from collection {collection_ns}. Status: {status}",
- "obj"_attr = redact(obj),
- "collection_ns"_attr = _collection->ns(),
- "status"_attr = redact(status));
+ "Couldn't unindex record",
+ "record"_attr = redact(obj),
+ "namespace"_attr = _collection->ns(),
+ "reason"_attr = redact(status));
}
if (keysDeletedOut) {
@@ -1703,9 +1690,7 @@ Status IndexCatalogImpl::compactIndexes(OperationContext* opCtx) {
"entry_descriptor"_attr = *(entry->descriptor()));
Status status = entry->accessMethod()->compact(opCtx);
if (!status.isOK()) {
- LOGV2_ERROR(20377,
- "failed to compact index: {entry_descriptor}",
- "entry_descriptor"_attr = *(entry->descriptor()));
+ LOGV2_ERROR(20377, "failed to compact index", "index"_attr = *(entry->descriptor()));
return status;
}
}
diff --git a/src/mongo/db/catalog/index_key_validate.cpp b/src/mongo/db/catalog/index_key_validate.cpp
index 66f91fdd655..e6ec964bf25 100644
--- a/src/mongo/db/catalog/index_key_validate.cpp
+++ b/src/mongo/db/catalog/index_key_validate.cpp
@@ -245,6 +245,7 @@ BSONObj removeUnknownFields(const BSONObj& indexSpec) {
} else {
LOGV2_WARNING(23878,
"Removing field '{fieldName}' from index spec: {indexSpec}",
+ "Removing unknown field from index spec",
"fieldName"_attr = redact(fieldName),
"indexSpec"_attr = redact(indexSpec));
}
diff --git a/src/mongo/db/catalog/index_timestamp_helper.cpp b/src/mongo/db/catalog/index_timestamp_helper.cpp
index 2e9f2cc7074..e553b721ee2 100644
--- a/src/mongo/db/catalog/index_timestamp_helper.cpp
+++ b/src/mongo/db/catalog/index_timestamp_helper.cpp
@@ -110,8 +110,8 @@ void IndexTimestampHelper::setGhostCommitTimestampForWrite(OperationContext* opC
auto status = setGhostTimestamp(opCtx, commitTimestamp);
if (status.code() == ErrorCodes::BadValue) {
LOGV2(20379,
- "Temporarily could not apply ghost commit timestamp. {status_reason}",
- "status_reason"_attr = status.reason());
+ "Temporarily could not apply ghost commit timestamp.",
+ "reason"_attr = status.reason());
throw WriteConflictException();
}
LOGV2_DEBUG(20380,
@@ -174,8 +174,8 @@ bool IndexTimestampHelper::setGhostCommitTimestampForCatalogWrite(OperationConte
setGhostTimestamp(opCtx, LogicalClock::get(opCtx)->getClusterTime().asTimestamp());
if (status.code() == ErrorCodes::BadValue) {
LOGV2(20381,
- "Temporarily could not timestamp the index build commit, retrying. {status_reason}",
- "status_reason"_attr = status.reason());
+ "Temporarily could not timestamp the index build commit, retrying.",
+ "reason"_attr = status.reason());
throw WriteConflictException();
}
fassert(50701, status);
diff --git a/src/mongo/db/concurrency/lock_state.cpp b/src/mongo/db/concurrency/lock_state.cpp
index 72232be04e4..13b0dfd55eb 100644
--- a/src/mongo/db/concurrency/lock_state.cpp
+++ b/src/mongo/db/concurrency/lock_state.cpp
@@ -215,7 +215,11 @@ void LockerImpl::dump() const {
for (auto it = _requests.begin(); !it.finished(); it.next())
entries.push_back({it.key(), it->status, it->mode});
}
- LOGV2(20523, "Locker id {id} status: {requests}", "id"_attr = _id, "requests"_attr = entries);
+ LOGV2(20523,
+ "Locker id {id} status: {requests}",
+ "Locker status",
+ "id"_attr = _id,
+ "requests"_attr = entries);
}