summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHenrik Edin <henrik.edin@mongodb.com>2020-05-26 13:45:40 -0400
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-06-05 17:27:44 +0000
commit779f9e866b798cc8b6de7ebe286b516d26f3d84b (patch)
tree4045e019e9d39cbe6a68f6f34aad53b45a6d0ac4
parent959584ed0ea88a74fe49458f9018a97b10b23cc0 (diff)
downloadmongo-779f9e866b798cc8b6de7ebe286b516d26f3d84b.tar.gz
SERVER-48350 Log cleanup in db/catalog, db/index and db/storage/kv
(cherry picked from commit d7e1821e22d21cc20cd3f6930811f3915193a45d)
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_while_creating_collection.js3
-rw-r--r--jstests/noPassthrough/commit_quorum_does_not_hang_with_initial_sync.js2
-rw-r--r--jstests/noPassthrough/drop_database_aborts_in_progress_index_builds.js2
-rw-r--r--jstests/noPassthrough/dropdatabase_respect_maxtimems.js2
-rw-r--r--jstests/noPassthrough/index_stepdown_after_init.js4
-rw-r--r--jstests/noPassthrough/step_down_during_drop_database.js2
-rw-r--r--jstests/replsets/create_drop_database_different_casing.js2
-rw-r--r--jstests/replsets/drop_databases_two_phase.js2
-rw-r--r--jstests/replsets/rollback_drop_database.js2
-rw-r--r--src/mongo/db/catalog/coll_mod.cpp2
-rw-r--r--src/mongo/db/catalog/collection_catalog.cpp2
-rw-r--r--src/mongo/db/catalog/collection_compact.cpp15
-rw-r--r--src/mongo/db/catalog/collection_impl.cpp11
-rw-r--r--src/mongo/db/catalog/collection_validation.cpp20
-rw-r--r--src/mongo/db/catalog/create_collection.cpp12
-rw-r--r--src/mongo/db/catalog/database_impl.cpp35
-rw-r--r--src/mongo/db/catalog/drop_collection.cpp5
-rw-r--r--src/mongo/db/catalog/drop_database.cpp11
-rw-r--r--src/mongo/db/catalog/index_builds_manager.cpp2
-rw-r--r--src/mongo/db/catalog/index_catalog_entry_impl.cpp2
-rw-r--r--src/mongo/db/catalog/index_catalog_impl.cpp22
-rw-r--r--src/mongo/db/catalog/index_timestamp_helper.cpp2
-rw-r--r--src/mongo/db/catalog/multi_index_block.cpp37
-rw-r--r--src/mongo/db/catalog/rename_collection.cpp8
-rw-r--r--src/mongo/db/catalog/rename_collection_test.cpp2
-rw-r--r--src/mongo/db/catalog/validate_adaptor.cpp26
-rw-r--r--src/mongo/db/index/expression_keys_private.cpp7
-rw-r--r--src/mongo/db/index/index_access_method.cpp22
-rw-r--r--src/mongo/db/index/index_access_method_factory_impl.cpp5
-rw-r--r--src/mongo/db/index/index_build_interceptor.cpp17
-rw-r--r--src/mongo/db/index/s2_access_method.cpp3
-rw-r--r--src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.cpp36
32 files changed, 177 insertions, 148 deletions
diff --git a/jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_while_creating_collection.js b/jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_while_creating_collection.js
index b7bcf47541a..4a7f2e3c318 100644
--- a/jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_while_creating_collection.js
+++ b/jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_while_creating_collection.js
@@ -37,7 +37,8 @@ for (let versions of [{from: lastStableFCV, to: latestFCV}, {from: latestFCV, to
}, primary.port);
assert.soon(function() {
- return rawMongoProgramOutput().match(/createCollection.*test.mycoll/);
+ return rawMongoProgramOutput().match(
+ /\"id\":20320.*test.mycoll/); // Create Collection log
});
awaitUpgradeFCV = startParallelShell(
diff --git a/jstests/noPassthrough/commit_quorum_does_not_hang_with_initial_sync.js b/jstests/noPassthrough/commit_quorum_does_not_hang_with_initial_sync.js
index ec7f58d0aef..78dc6792e01 100644
--- a/jstests/noPassthrough/commit_quorum_does_not_hang_with_initial_sync.js
+++ b/jstests/noPassthrough/commit_quorum_does_not_hang_with_initial_sync.js
@@ -104,7 +104,7 @@ checkLog.containsJson(secondary, 21184);
function checkForIndexes(indexes) {
for (let i = 0; i < indexes.length; i++) {
checkLog.containsJson(secondary, 20384, {
- "descriptor": function(obj) {
+ "properties": function(obj) {
return obj.name === indexes[i];
}
});
diff --git a/jstests/noPassthrough/drop_database_aborts_in_progress_index_builds.js b/jstests/noPassthrough/drop_database_aborts_in_progress_index_builds.js
index d42e87e2eda..61311989176 100644
--- a/jstests/noPassthrough/drop_database_aborts_in_progress_index_builds.js
+++ b/jstests/noPassthrough/drop_database_aborts_in_progress_index_builds.js
@@ -68,7 +68,7 @@ try {
checkLog.contains(
testDB.getMongo(),
- "dropDatabase - fail point dropDatabaseHangAfterWaitingForIndexBuilds enabled.");
+ "dropDatabase - fail point dropDatabaseHangAfterWaitingForIndexBuilds enabled");
// Cannot create a collection on the database while it is drop pending.
assert.commandFailedWithCode(testDB.createCollection("third"), ErrorCodes.DatabaseDropPending);
diff --git a/jstests/noPassthrough/dropdatabase_respect_maxtimems.js b/jstests/noPassthrough/dropdatabase_respect_maxtimems.js
index e3553351f7a..d27c42ef1b9 100644
--- a/jstests/noPassthrough/dropdatabase_respect_maxtimems.js
+++ b/jstests/noPassthrough/dropdatabase_respect_maxtimems.js
@@ -38,7 +38,7 @@ const dropDB = rst.getPrimary().getDB("drop");
checkLog.contains(
dropDB.getMongo(),
- "dropDatabase - fail point dropDatabaseHangAfterAllCollectionsDrop enabled. Blocking until fail point is disabled.");
+ "dropDatabase - fail point dropDatabaseHangAfterAllCollectionsDrop enabled. Blocking until fail point is disabled");
let sleepCommand = startParallelShell(() => {
// Make dropDatabase timeout.
diff --git a/jstests/noPassthrough/index_stepdown_after_init.js b/jstests/noPassthrough/index_stepdown_after_init.js
index ddc9cfd69c5..9590f41d3ed 100644
--- a/jstests/noPassthrough/index_stepdown_after_init.js
+++ b/jstests/noPassthrough/index_stepdown_after_init.js
@@ -37,8 +37,8 @@ const createIdx = IndexBuildTest.startIndexBuild(primary, coll.getFullName(), {a
if (isJsonLog(primary)) {
checkLog.containsJson(primary, 20384, {
- ns: coll.getFullName(),
- descriptor: (desc) => {
+ namespace: coll.getFullName(),
+ properties: (desc) => {
return desc.name === 'a_1';
},
});
diff --git a/jstests/noPassthrough/step_down_during_drop_database.js b/jstests/noPassthrough/step_down_during_drop_database.js
index 40b566de806..f627684ce91 100644
--- a/jstests/noPassthrough/step_down_during_drop_database.js
+++ b/jstests/noPassthrough/step_down_during_drop_database.js
@@ -38,7 +38,7 @@ const awaitShell = startParallelShell(() => {
// Ensure the dropDatabase command has begun before stepping down.
checkLog.contains(primary,
"dropDatabase - fail point dropDatabaseHangAfterAllCollectionsDrop " +
- "enabled. Blocking until fail point is disabled.");
+ "enabled. Blocking until fail point is disabled");
assert.commandWorked(testDB.adminCommand({replSetStepDown: 60, force: true}));
replSet.waitForState(primary, ReplSetTest.State.SECONDARY);
diff --git a/jstests/replsets/create_drop_database_different_casing.js b/jstests/replsets/create_drop_database_different_casing.js
index f839c84526a..9b0e201cad1 100644
--- a/jstests/replsets/create_drop_database_different_casing.js
+++ b/jstests/replsets/create_drop_database_different_casing.js
@@ -48,7 +48,7 @@ rst.awaitReplication();
failPoint.off();
if (isJsonLog(primary)) {
- checkLog.contains(primary, `dropDatabase - finished","attr":{"db":"${dbNameUpper}"}}`);
+ checkLog.containsJson(primary, 20336, {"db": dbNameUpper});
} else {
checkLog.contains(primary, "dropDatabase " + dbNameUpper + " - finished");
}
diff --git a/jstests/replsets/drop_databases_two_phase.js b/jstests/replsets/drop_databases_two_phase.js
index fbac7c0a721..91c75da7900 100644
--- a/jstests/replsets/drop_databases_two_phase.js
+++ b/jstests/replsets/drop_databases_two_phase.js
@@ -156,7 +156,7 @@ if (isJsonLog(db.getMongo())) {
checkLog.contains(db.getMongo(),
`dropDatabase - dropping collection","attr":{"db":"${
dbNameToDrop}","namespace":"${dbNameToDrop}.${collNameToDrop}"`);
- checkLog.contains(db.getMongo(), 'dropDatabase - finished","attr":{"db":"dbToDrop"');
+ checkLog.containsJson(db.getMongo(), 20336, {"db": "dbToDrop"});
} else {
checkLog.contains(db.getMongo(), "dropping collection: " + dbNameToDrop + "." + collNameToDrop);
checkLog.contains(db.getMongo(), "dropped 1 collection(s)");
diff --git a/jstests/replsets/rollback_drop_database.js b/jstests/replsets/rollback_drop_database.js
index e4783266296..01d3e3c0659 100644
--- a/jstests/replsets/rollback_drop_database.js
+++ b/jstests/replsets/rollback_drop_database.js
@@ -40,7 +40,7 @@ let waitForDropDatabaseToFinish = startParallelShell(dropDatabaseFn, rollbackNod
// Ensure that we've hit the failpoint before moving on.
checkLog.contains(rollbackNode,
- "dropDatabase - fail point dropDatabaseHangBeforeInMemoryDrop enabled.");
+ "dropDatabase - fail point dropDatabaseHangBeforeInMemoryDrop enabled");
// Wait for the secondary to finish dropping the collection (the last replicated entry).
// We use the default 10-minute timeout for this.
diff --git a/src/mongo/db/catalog/coll_mod.cpp b/src/mongo/db/catalog/coll_mod.cpp
index 999c7e8ffa9..3c8eb66c9ea 100644
--- a/src/mongo/db/catalog/coll_mod.cpp
+++ b/src/mongo/db/catalog/coll_mod.cpp
@@ -466,7 +466,7 @@ Status _collModInternal(OperationContext* opCtx,
oldExpireSecs, newExpireSecs, oldHidden, newHidden, result));
if (MONGO_unlikely(assertAfterIndexUpdate.shouldFail())) {
- LOGV2(20307, "collMod - assertAfterIndexUpdate fail point enabled.");
+ LOGV2(20307, "collMod - assertAfterIndexUpdate fail point enabled");
uasserted(50970, "trigger rollback after the index update");
}
}
diff --git a/src/mongo/db/catalog/collection_catalog.cpp b/src/mongo/db/catalog/collection_catalog.cpp
index 8b1de565c47..0596bad93eb 100644
--- a/src/mongo/db/catalog/collection_catalog.cpp
+++ b/src/mongo/db/catalog/collection_catalog.cpp
@@ -429,7 +429,7 @@ void CollectionCatalog::registerCollection(CollectionUUID uuid, std::unique_ptr<
if (_collections.find(ns) != _collections.end()) {
LOGV2(20279,
"Conflicted creating a collection. ns: {coll_ns} ({coll_uuid}).",
- "Conflicted creating a collection.",
+ "Conflicted creating a collection",
"namespace"_attr = (*coll)->ns(),
"uuid"_attr = (*coll)->uuid());
throw WriteConflictException();
diff --git a/src/mongo/db/catalog/collection_compact.cpp b/src/mongo/db/catalog/collection_compact.cpp
index 5b9fa7a4878..5ff9cbc01b7 100644
--- a/src/mongo/db/catalog/collection_compact.cpp
+++ b/src/mongo/db/catalog/collection_compact.cpp
@@ -105,8 +105,9 @@ StatusWith<int64_t> compactCollection(OperationContext* opCtx,
LOGV2_OPTIONS(20284,
{LogComponent::kCommand},
- "compact {collectionNss} begin",
- "collectionNss"_attr = collectionNss);
+ "compact {namespace} begin",
+ "Compact begin",
+ "namespace"_attr = collectionNss);
auto oldTotalSize = recordStore->storageSize(opCtx) + collection->getIndexSize(opCtx);
auto indexCatalog = collection->getIndexCatalog();
@@ -122,11 +123,11 @@ StatusWith<int64_t> compactCollection(OperationContext* opCtx,
auto totalSizeDiff =
oldTotalSize - recordStore->storageSize(opCtx) - collection->getIndexSize(opCtx);
- LOGV2(20285,
- "compact {collectionNss} bytes freed: {totalSizeDiff}",
- "collectionNss"_attr = collectionNss,
- "totalSizeDiff"_attr = totalSizeDiff);
- LOGV2(20286, "compact {collectionNss} end", "collectionNss"_attr = collectionNss);
+ LOGV2(20286,
+ "compact {namespace} end, bytes freed: {freedBytes}",
+ "Compact end"
+ "namespace"_attr = collectionNss,
+ "freedBytes"_attr = totalSizeDiff);
return totalSizeDiff;
}
diff --git a/src/mongo/db/catalog/collection_impl.cpp b/src/mongo/db/catalog/collection_impl.cpp
index e00a555c43b..c145bd1bc8a 100644
--- a/src/mongo/db/catalog/collection_impl.cpp
+++ b/src/mongo/db/catalog/collection_impl.cpp
@@ -115,7 +115,10 @@ Status checkFailCollectionInsertsFailPoint(const NamespaceString& ns, const BSON
const std::string msg = str::stream()
<< "Failpoint (failCollectionInserts) has been enabled (" << data
<< "), so rejecting insert (first doc): " << firstDoc;
- LOGV2(20287, "{msg}", "msg"_attr = msg);
+ LOGV2(20287,
+ "Failpoint (failCollectionInserts) has been enabled, so rejecting insert",
+ "data"_attr = data,
+ "document"_attr = firstDoc);
s = {ErrorCodes::FailPointEnabled, msg};
},
[&](const BSONObj& data) {
@@ -526,7 +529,7 @@ Status CollectionImpl::insertDocuments(OperationContext* opCtx,
LOGV2(20289,
"hangAfterCollectionInserts fail point enabled. Blocking "
"until fail point is disabled.",
- "ns"_attr = _ns,
+ "namespace"_attr = _ns,
"whenFirst"_attr = whenFirst);
hangAfterCollectionInserts.pauseWhileSet(opCtx);
},
@@ -581,8 +584,8 @@ Status CollectionImpl::insertDocumentForBulkLoader(OperationContext* opCtx,
if (MONGO_unlikely(failAfterBulkLoadDocInsert.shouldFail())) {
LOGV2(20290,
"Failpoint failAfterBulkLoadDocInsert enabled. Throwing "
- "WriteConflictException.",
- "ns_ns"_attr = _ns.ns());
+ "WriteConflictException",
+ "namespace"_attr = _ns);
throw WriteConflictException();
}
diff --git a/src/mongo/db/catalog/collection_validation.cpp b/src/mongo/db/catalog/collection_validation.cpp
index ed762ab4fdd..c7289a9c028 100644
--- a/src/mongo/db/catalog/collection_validation.cpp
+++ b/src/mongo/db/catalog/collection_validation.cpp
@@ -94,7 +94,7 @@ std::map<std::string, int64_t> _validateIndexesInternalStructure(
LOGV2_OPTIONS(20295,
{LogComponent::kIndex},
- "validating internal structure",
+ "Validating internal structure",
"index"_attr = descriptor->indexName(),
"namespace"_attr = descriptor->parentNS());
@@ -133,7 +133,7 @@ void _validateIndexes(OperationContext* opCtx,
LOGV2_OPTIONS(20296,
{LogComponent::kIndex},
- "validating index consistency",
+ "Validating index consistency",
"index"_attr = descriptor->indexName(),
"namespace"_attr = descriptor->parentNS());
@@ -193,7 +193,7 @@ void _gatherIndexEntryErrors(OperationContext* opCtx,
indexConsistency->setSecondPhase();
LOGV2_OPTIONS(
- 20297, {LogComponent::kIndex}, "Starting to traverse through all the document key sets.");
+ 20297, {LogComponent::kIndex}, "Starting to traverse through all the document key sets");
// During the second phase of validation, iterate through each documents key set and only record
// the keys that were inconsistent during the first phase of validation.
@@ -204,8 +204,8 @@ void _gatherIndexEntryErrors(OperationContext* opCtx,
}
LOGV2_OPTIONS(
- 20298, {LogComponent::kIndex}, "Finished traversing through all the document key sets.");
- LOGV2_OPTIONS(20299, {LogComponent::kIndex}, "Starting to traverse through all the indexes.");
+ 20298, {LogComponent::kIndex}, "Finished traversing through all the document key sets");
+ LOGV2_OPTIONS(20299, {LogComponent::kIndex}, "Starting to traverse through all the indexes");
// Iterate through all the indexes in the collection and only record the index entry keys that
// had inconsistencies during the first phase.
@@ -225,7 +225,7 @@ void _gatherIndexEntryErrors(OperationContext* opCtx,
/*ValidateResults=*/nullptr);
}
- LOGV2_OPTIONS(20301, {LogComponent::kIndex}, "Finished traversing through all the indexes.");
+ LOGV2_OPTIONS(20301, {LogComponent::kIndex}, "Finished traversing through all the indexes");
indexConsistency->addIndexEntryErrors(indexNsResultsMap, result);
}
@@ -308,7 +308,7 @@ void _reportInvalidResults(OperationContext* opCtx,
opCtx, validateState, indexNsResultsMap, keysPerIndex, results, output);
LOGV2_OPTIONS(20302,
{LogComponent::kIndex},
- "Validation complete -- Corruption found.",
+ "Validation complete -- Corruption found",
"namespace"_attr = validateState->nss(),
"uuid"_attr = uuidString);
}
@@ -509,7 +509,7 @@ Status validate(OperationContext* opCtx,
if (MONGO_unlikely(pauseCollectionValidationWithLock.shouldFail())) {
_validationIsPausedForTest.store(true);
- LOGV2(20304, "Failpoint 'pauseCollectionValidationWithLock' activated.");
+ LOGV2(20304, "Failpoint 'pauseCollectionValidationWithLock' activated");
pauseCollectionValidationWithLock.pauseWhileSet();
_validationIsPausedForTest.store(false);
}
@@ -538,7 +538,7 @@ Status validate(OperationContext* opCtx,
LOGV2_OPTIONS(20305,
{LogComponent::kIndex},
"Index inconsistencies were detected. "
- "Starting the second phase of index validation to gather concise errors.",
+ "Starting the second phase of index validation to gather concise errors",
"namespace"_attr = validateState.nss());
_gatherIndexEntryErrors(opCtx,
&validateState,
@@ -581,7 +581,7 @@ Status validate(OperationContext* opCtx,
LOGV2_OPTIONS(20306,
{LogComponent::kIndex},
"Validation complete for collection. No "
- "corruption found.",
+ "corruption found",
"namespace"_attr = validateState.nss(),
"uuid"_attr = uuidString);
diff --git a/src/mongo/db/catalog/create_collection.cpp b/src/mongo/db/catalog/create_collection.cpp
index f19d74c40ed..14370f4d8f0 100644
--- a/src/mongo/db/catalog/create_collection.cpp
+++ b/src/mongo/db/catalog/create_collection.cpp
@@ -272,12 +272,12 @@ Status createCollectionForApplyOps(OperationContext* opCtx,
if (currentName && currentName->isDropPendingNamespace()) {
LOGV2(20308,
- "CMD: create {newCollName} - existing collection with conflicting UUID "
- "{uuid} is in a drop-pending state: {currentName}",
+ "CMD: create {newCollection} - existing collection with conflicting UUID "
+ "{conflictingUUID} is in a drop-pending state: {existingCollection}",
"CMD: create -- existing collection with conflicting UUID "
"is in a drop-pending state",
"newCollection"_attr = newCollName,
- "conflictingUuid"_attr = uuid,
+ "conflictingUUID"_attr = uuid,
"existingCollection"_attr = *currentName);
return Result(Status(ErrorCodes::NamespaceExists,
str::stream()
@@ -321,12 +321,12 @@ Status createCollectionForApplyOps(OperationContext* opCtx,
// It is ok to log this because this doesn't happen very frequently.
LOGV2(20309,
- "CMD: create {newCollName} - renaming existing collection with "
- "conflicting UUID {uuid} to temporary collection {tmpName}",
+ "CMD: create {newCollection} - renaming existing collection with "
+ "conflicting UUID {conflictingUUID} to temporary collection {tempName}",
"CMD: create -- renaming existing collection with "
"conflicting UUID to temporary collection",
"newCollection"_attr = newCollName,
- "conflictingUuid"_attr = uuid,
+ "conflictingUUID"_attr = uuid,
"tempName"_attr = tmpName);
Status status = db->renameCollection(opCtx, newCollName, tmpName, stayTemp);
if (!status.isOK())
diff --git a/src/mongo/db/catalog/database_impl.cpp b/src/mongo/db/catalog/database_impl.cpp
index b7767888006..b813dc56a1c 100644
--- a/src/mongo/db/catalog/database_impl.cpp
+++ b/src/mongo/db/catalog/database_impl.cpp
@@ -146,7 +146,7 @@ void DatabaseImpl::init(OperationContext* const opCtx) const {
if (!status.isOK()) {
LOGV2_WARNING(20325,
"tried to open invalid db: {name}",
- "tried to open invalid db",
+ "Tried to open invalid db",
"db"_attr = _name);
uasserted(10028, status.toString());
}
@@ -171,7 +171,7 @@ void DatabaseImpl::init(OperationContext* const opCtx) const {
LOGV2_WARNING_OPTIONS(20326,
{logv2::LogTag::kStartupWarnings},
"Unable to parse views; remove any invalid views "
- "from the collection to restore server functionality.",
+ "from the collection to restore server functionality",
"error"_attr = redact(reloadStatus),
"namespace"_attr = _viewsName);
}
@@ -187,7 +187,7 @@ void DatabaseImpl::clearTmpCollections(OperationContext* opCtx) const {
if (!status.isOK()) {
LOGV2_WARNING(20327,
"could not drop temp collection '{namespace}': {error}",
- "could not drop temp collection",
+ "Could not drop temp collection",
"namespace"_attr = collection->ns(),
"error"_attr = redact(status));
}
@@ -196,7 +196,7 @@ void DatabaseImpl::clearTmpCollections(OperationContext* opCtx) const {
LOGV2_WARNING(
20328,
"could not drop temp collection '{namespace}' due to WriteConflictException",
- "could not drop temp collection due to WriteConflictException",
+ "Could not drop temp collection due to WriteConflictException",
"namespace"_attr = collection->ns());
opCtx->recoveryUnit()->abandonSnapshot();
}
@@ -310,8 +310,8 @@ void DatabaseImpl::getStats(OperationContext* opCtx, BSONObjBuilder* output, dou
LOGV2(20312,
"Failed to query filesystem disk stats (code: {ec_value}): {ec_message}",
"Failed to query filesystem disk stats",
- "errorCode"_attr = ec.value(),
- "errorMessage"_attr = ec.message());
+ "error"_attr = ec.message(),
+ "errorCode"_attr = ec.value());
}
}
}
@@ -463,7 +463,8 @@ Status DatabaseImpl::dropCollectionEvenIfSystem(OperationContext* opCtx,
auto dpns = nss.makeDropPendingNamespace(dropOpTime);
const bool stayTemp = true;
LOGV2(20315,
- "dropCollection: {nss} ({uuid}) - renaming to drop-pending collection: {dpns} with drop "
+ "dropCollection: {namespace} ({uuid}) - renaming to drop-pending collection: "
+ "{dropPendingName} with drop "
"optime {dropOpTime}",
"dropCollection: renaming to drop-pending collection",
"namespace"_attr = nss,
@@ -561,7 +562,7 @@ Status DatabaseImpl::renameCollection(OperationContext* opCtx,
LOGV2(20319,
"renameCollection: renaming collection {collToRename_uuid} from {fromNss} to {toNss}",
- "renameCollection: renaming collection",
+ "renameCollection",
"uuid"_attr = collToRename->uuid(),
"fromName"_attr = fromNss,
"toName"_attr = toNss);
@@ -726,7 +727,7 @@ Collection* DatabaseImpl::createCollection(OperationContext* opCtx,
"namespace"_attr = nss,
"uuidDisposition"_attr = (generatedUUID ? "generated" : "provided"),
"uuid"_attr = optionsWithUUID.uuid.get(),
- "options"_attr = options.toBSON());
+ "options"_attr = options);
// Create Collection object
auto storageEngine = opCtx->getServiceContext()->getStorageEngine();
@@ -870,15 +871,13 @@ void DatabaseImpl::checkForIdIndexesAndDropPendingCollections(OperationContext*
if (coll->getIndexCatalog()->findIdIndex(opCtx))
continue;
- LOGV2_OPTIONS(20322,
- {logv2::LogTag::kStartupWarnings},
- "WARNING: a collection lacks a unique index on _id. This index is "
- "needed for replication to function properly",
- "namespace"_attr = nss);
- LOGV2_OPTIONS(20323,
- {logv2::LogTag::kStartupWarnings},
- "To fix this, you need to create a unique index on _id. See "
- "http://dochub.mongodb.org/core/build-replica-set-indexes");
+ LOGV2_OPTIONS(
+ 20322,
+ {logv2::LogTag::kStartupWarnings},
+ "Collection lacks a unique index on _id. This index is "
+ "needed for replication to function properly. To fix this, you need to create a unique "
+ "index on _id. See http://dochub.mongodb.org/core/build-replica-set-indexes",
+ "namespace"_attr = nss);
}
}
diff --git a/src/mongo/db/catalog/drop_collection.cpp b/src/mongo/db/catalog/drop_collection.cpp
index 4e27032c58c..46bbc0cba7f 100644
--- a/src/mongo/db/catalog/drop_collection.cpp
+++ b/src/mongo/db/catalog/drop_collection.cpp
@@ -276,8 +276,7 @@ Status dropCollection(OperationContext* opCtx,
BSONObjBuilder& result,
DropCollectionSystemCollectionMode systemCollectionMode) {
if (!serverGlobalParams.quiet.load()) {
- LOGV2(
- 518070, "CMD: drop {collectionName}", "CMD: drop", "collection"_attr = collectionName);
+ LOGV2(518070, "CMD: drop {namespace}", "CMD: drop", "namespace"_attr = collectionName);
}
if (MONGO_unlikely(hangDropCollectionBeforeLockAcquisition.shouldFail())) {
@@ -325,7 +324,7 @@ Status dropCollectionForApplyOps(OperationContext* opCtx,
const repl::OpTime& dropOpTime,
DropCollectionSystemCollectionMode systemCollectionMode) {
if (!serverGlobalParams.quiet.load()) {
- LOGV2(20332, "CMD: drop {collectionName}", "CMD: drop", "collection"_attr = collectionName);
+ LOGV2(20332, "CMD: drop {namespace}", "CMD: drop", "namespace"_attr = collectionName);
}
if (MONGO_unlikely(hangDropCollectionBeforeLockAcquisition.shouldFail())) {
diff --git a/src/mongo/db/catalog/drop_database.cpp b/src/mongo/db/catalog/drop_database.cpp
index f762d6784e3..37e5510a823 100644
--- a/src/mongo/db/catalog/drop_database.cpp
+++ b/src/mongo/db/catalog/drop_database.cpp
@@ -108,7 +108,7 @@ void _finishDropDatabase(OperationContext* opCtx,
});
if (MONGO_unlikely(dropDatabaseHangBeforeInMemoryDrop.shouldFail())) {
- LOGV2(20334, "dropDatabase - fail point dropDatabaseHangBeforeInMemoryDrop enabled.");
+ LOGV2(20334, "dropDatabase - fail point dropDatabaseHangBeforeInMemoryDrop enabled");
dropDatabaseHangBeforeInMemoryDrop.pauseWhileSet();
}
@@ -116,12 +116,11 @@ void _finishDropDatabase(OperationContext* opCtx,
databaseHolder->dropDb(opCtx, db);
dropPendingGuard.dismiss();
- LOGV2(20335,
- "dropDatabase {dbName} - dropped {numCollections} collection(s)",
+ LOGV2(20336,
+ "dropDatabase {dbName} - finished, dropped {numCollections} collection(s)",
"dropDatabase",
"db"_attr = dbName,
"numCollectionsDropped"_attr = numCollections);
- LOGV2(20336, "dropDatabase {dbName} - finished", "dropDatabase - finished", "db"_attr = dbName);
}
Status _dropDatabase(OperationContext* opCtx, const std::string& dbName, bool abortIndexBuilds) {
@@ -202,7 +201,7 @@ Status _dropDatabase(OperationContext* opCtx, const std::string& dbName, bool ab
if (MONGO_unlikely(dropDatabaseHangAfterWaitingForIndexBuilds.shouldFail())) {
LOGV2(4612300,
"dropDatabase - fail point dropDatabaseHangAfterWaitingForIndexBuilds "
- "enabled.");
+ "enabled");
dropDatabaseHangAfterWaitingForIndexBuilds.pauseWhileSet();
}
@@ -389,7 +388,7 @@ Status _dropDatabase(OperationContext* opCtx, const std::string& dbName, bool ab
if (MONGO_unlikely(dropDatabaseHangAfterAllCollectionsDrop.shouldFail())) {
LOGV2(20343,
"dropDatabase - fail point dropDatabaseHangAfterAllCollectionsDrop enabled. "
- "Blocking until fail point is disabled. ");
+ "Blocking until fail point is disabled");
dropDatabaseHangAfterAllCollectionsDrop.pauseWhileSet();
}
diff --git a/src/mongo/db/catalog/index_builds_manager.cpp b/src/mongo/db/catalog/index_builds_manager.cpp
index 5eae0e3c1ea..9edcbbb2834 100644
--- a/src/mongo/db/catalog/index_builds_manager.cpp
+++ b/src/mongo/db/catalog/index_builds_manager.cpp
@@ -182,7 +182,7 @@ StatusWith<std::pair<long long, long long>> IndexBuildsManager::startBuildingInd
}
LOGV2_WARNING(20348,
"Invalid BSON detected at {id}: {validStatus}. Deleting.",
- "Invalid BSON detected; deleting.",
+ "Invalid BSON detected; deleting",
"id"_attr = id,
"error"_attr = redact(validStatus));
rs->deleteRecord(opCtx, id);
diff --git a/src/mongo/db/catalog/index_catalog_entry_impl.cpp b/src/mongo/db/catalog/index_catalog_entry_impl.cpp
index a9926cdeb37..dca74a0730e 100644
--- a/src/mongo/db/catalog/index_catalog_entry_impl.cpp
+++ b/src/mongo/db/catalog/index_catalog_entry_impl.cpp
@@ -290,7 +290,7 @@ Status IndexCatalogEntryImpl::_setMultikeyInMultiDocumentTransaction(
opCtx->recoveryUnit()->setTimestamp(recoveryPrepareOpTime.getTimestamp());
if (status.code() == ErrorCodes::BadValue) {
LOGV2(20352,
- "Temporarily could not timestamp the multikey catalog write, retrying.",
+ "Temporarily could not timestamp the multikey catalog write, retrying",
"reason"_attr = status.reason());
throw WriteConflictException();
}
diff --git a/src/mongo/db/catalog/index_catalog_impl.cpp b/src/mongo/db/catalog/index_catalog_impl.cpp
index c67c1338ecd..edc51615d6b 100644
--- a/src/mongo/db/catalog/index_catalog_impl.cpp
+++ b/src/mongo/db/catalog/index_catalog_impl.cpp
@@ -1073,13 +1073,12 @@ void IndexCatalogImpl::deleteIndexFromDisk(OperationContext* opCtx, const string
* This is ok, as we may be partially through index creation.
*/
} else if (!status.isOK()) {
- LOGV2_WARNING(
- 20364,
- "couldn't drop index {indexName} on collection: {collection_ns} because of {status}",
- "couldn't drop index",
- "index"_attr = indexName,
- "namespace"_attr = _collection->ns(),
- "reason"_attr = redact(status));
+ LOGV2_WARNING(20364,
+ "couldn't drop index {index} on collection: {namespace} because of {error}",
+ "Couldn't drop index",
+ "index"_attr = indexName,
+ "namespace"_attr = _collection->ns(),
+ "error"_attr = redact(status));
}
}
@@ -1164,7 +1163,7 @@ int IndexCatalogImpl::numIndexesReady(OperationContext* opCtx) const {
LOGV2(20360, "collection catalog reports", "index"_attr = i);
}
- LOGV2(20361, "uuid", "collection_uuid"_attr = _collection->uuid());
+ LOGV2(20361, "uuid", "uuid"_attr = _collection->uuid());
invariant(itIndexes.size() == completedIndexes.size(),
"The number of ready indexes reported in the collection metadata catalog did "
@@ -1546,7 +1545,7 @@ void IndexCatalogImpl::_unindexKeys(OperationContext* opCtx,
"Couldn't unindex record",
"record"_attr = redact(obj),
"namespace"_attr = _collection->ns(),
- "reason"_attr = redact(status));
+ "error"_attr = redact(status));
}
if (keysDeletedOut) {
@@ -1682,7 +1681,10 @@ Status IndexCatalogImpl::compactIndexes(OperationContext* opCtx) {
"entry_descriptor"_attr = *(entry->descriptor()));
Status status = entry->accessMethod()->compact(opCtx);
if (!status.isOK()) {
- LOGV2_ERROR(20377, "failed to compact index", "index"_attr = *(entry->descriptor()));
+ LOGV2_ERROR(20377,
+ "Failed to compact index",
+ "index"_attr = *(entry->descriptor()),
+ "error"_attr = redact(status));
return status;
}
}
diff --git a/src/mongo/db/catalog/index_timestamp_helper.cpp b/src/mongo/db/catalog/index_timestamp_helper.cpp
index 85b1854356a..48a7585ae69 100644
--- a/src/mongo/db/catalog/index_timestamp_helper.cpp
+++ b/src/mongo/db/catalog/index_timestamp_helper.cpp
@@ -174,7 +174,7 @@ bool IndexTimestampHelper::setGhostCommitTimestampForCatalogWrite(OperationConte
setGhostTimestamp(opCtx, LogicalClock::get(opCtx)->getClusterTime().asTimestamp());
if (status.code() == ErrorCodes::BadValue) {
LOGV2(20381,
- "Temporarily could not timestamp the index build commit, retrying.",
+ "Temporarily could not timestamp the index build commit, retrying",
"reason"_attr = status.reason());
throw WriteConflictException();
}
diff --git a/src/mongo/db/catalog/multi_index_block.cpp b/src/mongo/db/catalog/multi_index_block.cpp
index 00b2df0f5b4..a288db41248 100644
--- a/src/mongo/db/catalog/multi_index_block.cpp
+++ b/src/mongo/db/catalog/multi_index_block.cpp
@@ -114,7 +114,7 @@ void MultiIndexBlock::abortIndexBuild(OperationContext* opCtx,
// Simply get a timestamp to write with here; we can't write to the oplog.
repl::UnreplicatedWritesBlock uwb(opCtx);
if (!IndexTimestampHelper::setGhostCommitTimestampForCatalogWrite(opCtx, nss)) {
- LOGV2(20382, "Did not timestamp index abort write.");
+ LOGV2(20382, "Did not timestamp index abort write");
}
}
@@ -130,14 +130,16 @@ void MultiIndexBlock::abortIndexBuild(OperationContext* opCtx,
continue;
LOGV2_ERROR(20393,
"Caught exception while cleaning up partially built indexes: {e}",
- "e"_attr = redact(e));
+ "Caught exception while cleaning up partially built indexes",
+ "error"_attr = redact(e));
} catch (const std::exception& e) {
LOGV2_ERROR(20394,
"Caught exception while cleaning up partially built indexes: {e_what}",
- "e_what"_attr = e.what());
+ "Caught exception while cleaning up partially built indexes",
+ "error"_attr = e.what());
} catch (...) {
LOGV2_ERROR(20395,
- "Caught unknown exception while cleaning up partially built indexes.");
+ "Caught unknown exception while cleaning up partially built indexes");
}
fassertFailed(18644);
}
@@ -209,7 +211,9 @@ StatusWith<std::vector<BSONObj>> MultiIndexBlock::init(OperationContext* opCtx,
if (info["background"].isBoolean() && !info["background"].Bool()) {
LOGV2(
20383,
- "ignoring obselete {{ background: false }} index build option because all "
+ "ignoring obsolete {{ background: false }} index build option because all "
+ "indexes are built in the background with the hybrid method",
+ "Ignoring obsolete { background: false } index build option because all "
"indexes are built in the background with the hybrid method");
}
continue;
@@ -287,18 +291,21 @@ StatusWith<std::vector<BSONObj>> MultiIndexBlock::init(OperationContext* opCtx,
index.options.dupsAllowed = true;
index.options.fromIndexBuilder = true;
- LOGV2(20384,
- "index build: starting on {ns} properties: {descriptor} using method: {method}",
- "ns"_attr = ns,
- "descriptor"_attr = *descriptor,
- "method"_attr = _method);
+ logv2::DynamicAttributes attrs;
+ attrs.add("namespace", ns);
+ attrs.add("properties", *descriptor);
+ attrs.add("method", _method);
if (index.bulk)
- LOGV2(20385,
- "build may temporarily use up to "
- "{eachIndexBuildMaxMemoryUsageBytes_1024_1024} megabytes of RAM",
- "eachIndexBuildMaxMemoryUsageBytes_1024_1024"_attr =
+ attrs.add("maxTemporaryMemoryUsageMB",
eachIndexBuildMaxMemoryUsageBytes / 1024 / 1024);
+ LOGV2(20384,
+ "index build: starting on {namespace} properties: {properties} using method: "
+ "{method}",
+ "index build: starting",
+ attrs);
+
+
index.filterExpression = index.block->getEntry()->getFilterExpression();
// TODO SERVER-14888 Suppress this in cases we don't want to audit.
@@ -505,7 +512,7 @@ Status MultiIndexBlock::insertAllDocumentsInCollection(OperationContext* opCtx,
if (MONGO_unlikely(leaveIndexBuildUnfinishedForShutdown.shouldFail())) {
LOGV2(20389,
"Index build interrupted due to 'leaveIndexBuildUnfinishedForShutdown' failpoint. "
- "Mimicking shutdown error code.");
+ "Mimicking shutdown error code");
return Status(
ErrorCodes::InterruptedAtShutdown,
"background index build interrupted due to failpoint. returning a shutdown error.");
diff --git a/src/mongo/db/catalog/rename_collection.cpp b/src/mongo/db/catalog/rename_collection.cpp
index 9d17fd71793..a0a3fa3ec4e 100644
--- a/src/mongo/db/catalog/rename_collection.cpp
+++ b/src/mongo/db/catalog/rename_collection.cpp
@@ -260,10 +260,12 @@ Status renameCollectionAndDropTarget(OperationContext* opCtx,
if (!renameOpTime.isNull()) {
LOGV2_FATAL(
40616,
- "renameCollection: {source} to {target} (with dropTarget=true) - unexpected "
+ "renameCollection: {from} to {to} (with dropTarget=true) - unexpected "
"renameCollection oplog entry written to the oplog with optime {renameOpTime}",
- "source"_attr = source,
- "target"_attr = target,
+ "renameCollection (with dropTarget=true): unexpected renameCollection oplog "
+ "entry written to the oplog",
+ "from"_attr = source,
+ "to"_attr = target,
"renameOpTime"_attr = renameOpTime);
}
renameOpTime = renameOpTimeFromApplyOps;
diff --git a/src/mongo/db/catalog/rename_collection_test.cpp b/src/mongo/db/catalog/rename_collection_test.cpp
index acbf3415f1c..a5ee4749d45 100644
--- a/src/mongo/db/catalog/rename_collection_test.cpp
+++ b/src/mongo/db/catalog/rename_collection_test.cpp
@@ -843,7 +843,7 @@ TEST_F(RenameCollectionTest,
DEATH_TEST_F(RenameCollectionTest,
RenameCollectionForApplyOpsTriggersFatalAssertionIfLogOpReturnsValidOpTime,
- "unexpected renameCollection oplog entry written to the oplog with optime") {
+ "unexpected renameCollection oplog entry written to the oplog") {
repl::UnreplicatedWritesBlock uwb(_opCtx.get());
ASSERT_FALSE(_opCtx->writesAreReplicated());
diff --git a/src/mongo/db/catalog/validate_adaptor.cpp b/src/mongo/db/catalog/validate_adaptor.cpp
index 99a8c0c068b..b8f502325a3 100644
--- a/src/mongo/db/catalog/validate_adaptor.cpp
+++ b/src/mongo/db/catalog/validate_adaptor.cpp
@@ -333,20 +333,26 @@ void ValidateAdaptor::traverseRecordStore(OperationContext* opCtx,
// validatedSize = dataSize is not a general requirement as some storage engines may use
// padding, but we still require that they return the unpadded record data.
if (!status.isOK() || validatedSize != static_cast<size_t>(dataSize)) {
- str::stream ss;
if (!status.isOK() && validatedSize != static_cast<size_t>(dataSize)) {
- ss << "Reasons: (1) " << status << "; (2) Validated size of " << validatedSize
- << " bytes does not equal the record size of " << dataSize << " bytes";
+ LOGV2(4835000,
+ "Document corruption details - Multiple causes for document validation "
+ "failure; error status and size mismatch",
+ "recordId"_attr = record->id,
+ "validatedBytes"_attr = validatedSize,
+ "recordBytes"_attr = dataSize,
+ "error"_attr = status);
} else if (!status.isOK()) {
- ss << "Reason: " << status;
+ LOGV2(4835001,
+ "Document corruption details - Document validation failed with error",
+ "recordId"_attr = record->id,
+ "error"_attr = status);
} else {
- ss << "Reason: Validated size of " << validatedSize
- << " bytes does not equal the record size of " << dataSize << " bytes";
+ LOGV2(4835002,
+ "Document corruption details - Document validation failure; size mismatch",
+ "recordId"_attr = record->id,
+ "validatedBytes"_attr = validatedSize,
+ "recordBytes"_attr = dataSize);
}
- LOGV2(20404,
- "Document corruption details",
- "recordId"_attr = record->id,
- "reasons"_attr = std::string(ss));
// Only log once
if (results->valid) {
diff --git a/src/mongo/db/index/expression_keys_private.cpp b/src/mongo/db/index/expression_keys_private.cpp
index 82ea49463b8..b95be9e0a96 100644
--- a/src/mongo/db/index/expression_keys_private.cpp
+++ b/src/mongo/db/index/expression_keys_private.cpp
@@ -637,9 +637,10 @@ void ExpressionKeysPrivate::getS2Keys(const BSONObj& obj,
if (keysToAdd.size() > params.maxKeysPerInsert) {
LOGV2_WARNING(23755,
"Insert of geo object generated a high number of keys. num keys: "
- "{keysToAdd_size} obj inserted: {obj}",
- "keysToAdd_size"_attr = keysToAdd.size(),
- "obj"_attr = redact(obj));
+ "{numKeys} obj inserted: {obj}",
+ "Insert of geo object generated a large number of keys",
+ "obj"_attr = redact(obj),
+ "numKeys"_attr = keysToAdd.size());
}
invariant(keys->empty());
diff --git a/src/mongo/db/index/index_access_method.cpp b/src/mongo/db/index/index_access_method.cpp
index 09fe53bbb96..fa4924ed2ca 100644
--- a/src/mongo/db/index/index_access_method.cpp
+++ b/src/mongo/db/index/index_access_method.cpp
@@ -204,16 +204,15 @@ void AbstractIndexAccessMethod::removeOneKey(OperationContext* opCtx,
try {
_newInterface->unindex(opCtx, keyString, dupsAllowed);
} catch (AssertionException& e) {
- LOGV2(20682,
- "Assertion failure: _unindex failed on: {descriptor_parentNS} for index: "
- "{descriptor_indexName}",
- "descriptor_parentNS"_attr = _descriptor->parentNS(),
- "descriptor_indexName"_attr = _descriptor->indexName());
LOGV2(20683,
- "Assertion failure: _unindex failed: {e} KeyString:{keyString} dl:{loc}",
- "e"_attr = redact(e),
+ "Assertion failure: _unindex failed on: {descriptorParentNamespace} for index: "
+ "{descriptorIndexName}. {error} KeyString:{keyString} dl:{recordId}",
+ "Assertion failure: _unindex failed",
+ "error"_attr = redact(e),
"keyString"_attr = keyString,
- "loc"_attr = loc);
+ "recordId"_attr = loc,
+ "descriptorParentNamespace"_attr = _descriptor->parentNS(),
+ "descriptorIndexName"_attr = _descriptor->indexName());
printStackTrace();
}
}
@@ -606,7 +605,7 @@ Status AbstractIndexAccessMethod::commitBulk(OperationContext* opCtx,
std::unique_ptr<BulkBuilder::Sorter::Iterator> it(bulk->done());
- static const char* message = "Index Build: inserting keys from external sorter into index";
+ static constexpr char message[] = "Index Build: inserting keys from external sorter into index";
ProgressMeterHolder pm;
{
stdx::unique_lock<Client> lk(*opCtx->getClient());
@@ -688,8 +687,9 @@ Status AbstractIndexAccessMethod::commitBulk(OperationContext* opCtx,
LOGV2(20685,
"index build: inserted {bulk_getKeysInserted} keys from external sorter into index in "
"{timer_seconds} seconds",
- "bulk_getKeysInserted"_attr = bulk->getKeysInserted(),
- "timer_seconds"_attr = timer.seconds());
+ message,
+ "keysInserted"_attr = bulk->getKeysInserted(),
+ "duration"_attr = Milliseconds(Seconds(timer.seconds())));
WriteUnitOfWork wunit(opCtx);
builder->commit(true);
diff --git a/src/mongo/db/index/index_access_method_factory_impl.cpp b/src/mongo/db/index/index_access_method_factory_impl.cpp
index 9ed6150b3b5..b2f95134513 100644
--- a/src/mongo/db/index/index_access_method_factory_impl.cpp
+++ b/src/mongo/db/index/index_access_method_factory_impl.cpp
@@ -63,8 +63,9 @@ std::unique_ptr<IndexAccessMethod> IndexAccessMethodFactoryImpl::make(
else if (IndexNames::WILDCARD == type)
return std::make_unique<WildcardAccessMethod>(entry, std::move(sortedDataInterface));
LOGV2(20688,
- "Can't find index for keyPattern {desc_keyPattern}",
- "desc_keyPattern"_attr = desc->keyPattern());
+ "Can't find index for keyPattern {keyPattern}",
+ "Can't find index for keyPattern",
+ "keyPattern"_attr = desc->keyPattern());
fassertFailed(31021);
}
diff --git a/src/mongo/db/index/index_build_interceptor.cpp b/src/mongo/db/index/index_build_interceptor.cpp
index ce7607c5623..cfa537f6345 100644
--- a/src/mongo/db/index/index_build_interceptor.cpp
+++ b/src/mongo/db/index/index_build_interceptor.cpp
@@ -356,13 +356,16 @@ bool IndexBuildInterceptor::areAllWritesApplied(OperationContext* opCtx) const {
if (!record) {
auto writesRecorded = _sideWritesCounter->load();
if (writesRecorded != _numApplied) {
- const std::string message = str::stream()
- << "The number of side writes recorded does not match the number "
- "applied, despite the table appearing empty. Writes recorded: "
- << writesRecorded << ", applied: " << _numApplied;
-
- dassert(writesRecorded == _numApplied, message);
- LOGV2_WARNING(20692, "{message}", "message"_attr = message);
+ dassert(writesRecorded == _numApplied,
+ (str::stream()
+ << "The number of side writes recorded does not match the number "
+ "applied, despite the table appearing empty. Writes recorded: "
+ << writesRecorded << ", applied: " << _numApplied));
+ LOGV2_WARNING(20692,
+ "The number of side writes recorded does not match the number applied, "
+ "despite the table appearing empty",
+ "writesRecorded"_attr = writesRecorded,
+ "applied"_attr = _numApplied);
}
return true;
}
diff --git a/src/mongo/db/index/s2_access_method.cpp b/src/mongo/db/index/s2_access_method.cpp
index 123cb8ec9c6..f42c3887f1f 100644
--- a/src/mongo/db/index/s2_access_method.cpp
+++ b/src/mongo/db/index/s2_access_method.cpp
@@ -79,7 +79,8 @@ S2AccessMethod::S2AccessMethod(IndexCatalogEntry* btreeState,
if (descriptor->isSparse()) {
LOGV2_WARNING(23742,
"Sparse option ignored for index spec {descriptor_keyPattern}",
- "descriptor_keyPattern"_attr = descriptor->keyPattern().toString());
+ "Sparse option ignored for index spec",
+ "indexSpec"_attr = descriptor->keyPattern());
}
}
diff --git a/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.cpp b/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.cpp
index 0844b31e1fd..891ed8fd8d2 100644
--- a/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.cpp
+++ b/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.cpp
@@ -57,12 +57,14 @@ void KVDropPendingIdentReaper::addDropPendingIdent(const Timestamp& dropTimestam
info.ident = ident.toString();
_dropPendingIdents.insert(std::make_pair(dropTimestamp, info));
} else {
- LOGV2_FATAL_NOTRACE(51023,
- "Failed to add drop-pending ident {ident} ({nss}) with drop timestamp "
- "{dropTimestamp}: duplicate timestamp and ident pair.",
- "ident"_attr = ident,
- "nss"_attr = nss,
- "dropTimestamp"_attr = dropTimestamp);
+ LOGV2_FATAL_NOTRACE(
+ 51023,
+ "Failed to add drop-pending ident {ident} ({namespace}) with drop timestamp "
+ "{dropTimestamp}: duplicate timestamp and ident pair.",
+ "Failed to add drop-pending ident, duplicate timestamp and ident pair",
+ "ident"_attr = ident,
+ "namespace"_attr = nss,
+ "dropTimestamp"_attr = dropTimestamp);
}
}
@@ -111,23 +113,25 @@ void KVDropPendingIdentReaper::dropIdentsOlderThan(OperationContext* opCtx, cons
const auto& identInfo = timestampAndIdentInfo.second;
const auto& nss = identInfo.nss;
const auto& ident = identInfo.ident;
- LOGV2(
- 22237,
- "Completing drop for ident {ident} (ns: {nss}) with drop timestamp {dropTimestamp}",
- "ident"_attr = ident,
- "nss"_attr = nss,
- "dropTimestamp"_attr = dropTimestamp);
+ LOGV2(22237,
+ "Completing drop for ident {ident} (ns: {namespace}) with drop timestamp "
+ "{dropTimestamp}",
+ "Completing drop for ident",
+ "ident"_attr = ident,
+ "namespace"_attr = nss,
+ "dropTimestamp"_attr = dropTimestamp);
WriteUnitOfWork wuow(opCtx);
auto status = _engine->dropIdent(opCtx, opCtx->recoveryUnit(), ident);
if (!status.isOK()) {
LOGV2_FATAL_NOTRACE(
51022,
- "Failed to remove drop-pending ident {ident}(ns: {nss}) with drop "
- "timestamp {dropTimestamp}: {status}",
+ "Failed to remove drop-pending ident {ident}(ns: {namespace}) with drop "
+ "timestamp {dropTimestamp}: {error}",
+ "Failed to remove drop-pending ident",
"ident"_attr = ident,
- "nss"_attr = nss,
+ "namespace"_attr = nss,
"dropTimestamp"_attr = dropTimestamp,
- "status"_attr = status);
+ "error"_attr = status);
}
wuow.commit();
}