diff options
author | jannaerin <golden.janna@gmail.com> | 2020-05-28 12:58:17 -0400 |
---|---|---|
committer | Evergreen Agent <no-reply@evergreen.mongodb.com> | 2020-06-01 18:58:46 +0000 |
commit | 5c1cb9b2908b4d1622b44dd1020c76b3e563610d (patch) | |
tree | 60424408af8bcf18a028baaf50b4444bfabbf4fc /src | |
parent | 912d0ead151bc3a5cac261f07c8453a35a73ad10 (diff) | |
download | mongo-5c1cb9b2908b4d1622b44dd1020c76b3e563610d.tar.gz |
SERVER-48361 Additional logv2 message cleanup for sharding
Diffstat (limited to 'src')
-rw-r--r-- | src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp | 1 | ||||
-rw-r--r-- | src/mongo/db/s/move_primary_command.cpp | 5 | ||||
-rw-r--r-- | src/mongo/db/s/range_deletion_util.cpp | 104 |
3 files changed, 63 insertions, 47 deletions
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp index 1b30456d4de..f5638556d7a 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp @@ -1093,6 +1093,7 @@ void ShardingCatalogManager::ensureChunkVersionIsGreaterThan(OperationContext* o 23887, "ensureChunkVersionIsGreaterThan bumped the version of the chunk with minKey {minKey}, " "maxKey {maxKey}, and epoch {epoch}. Chunk is now {newChunk}", + "ensureChunkVersionIsGreaterThan bumped the the chunk version", "minKey"_attr = minKey, "maxKey"_attr = maxKey, "epoch"_attr = version.epoch(), diff --git a/src/mongo/db/s/move_primary_command.cpp b/src/mongo/db/s/move_primary_command.cpp index 4e4b06e664f..a946d64b33f 100644 --- a/src/mongo/db/s/move_primary_command.cpp +++ b/src/mongo/db/s/move_primary_command.cpp @@ -50,8 +50,9 @@ namespace { void uassertStatusOKWithWarning(const Status& status) { if (!status.isOK()) { LOGV2_WARNING(23762, - "movePrimary failed{causedBy_status}", - "causedBy_status"_attr = causedBy(redact(status))); + "movePrimary failed: {error}", + "movePrimary failed", + "error"_attr = redact(status)); uassertStatusOK(status); } } diff --git a/src/mongo/db/s/range_deletion_util.cpp b/src/mongo/db/s/range_deletion_util.cpp index 6149818a332..f7a83bbeb89 100644 --- a/src/mongo/db/s/range_deletion_util.cpp +++ b/src/mongo/db/s/range_deletion_util.cpp @@ -89,9 +89,10 @@ bool collectionUuidHasChanged(const NamespaceString& nss, if (!currentCollection) { LOGV2_DEBUG(23763, 1, - "Abandoning range deletion task for {nss_ns} with UUID " + "Abandoning range deletion task for {namespace} with UUID " "{expectedCollectionUuid} because the collection has been dropped", - "nss_ns"_attr = nss.ns(), + "Abandoning range deletion task for because the collection has been dropped", + "namespace"_attr = nss.ns(), "expectedCollectionUuid"_attr = expectedCollectionUuid); return true; } @@ -100,12 +101,12 @@ bool collectionUuidHasChanged(const NamespaceString& nss, LOGV2_DEBUG( 23764, 1, - "Abandoning range deletion task for {nss_ns} with UUID {expectedCollectionUuid} " - "because UUID of {nss_ns2}has changed (current is {currentCollection_uuid})", - "nss_ns"_attr = nss.ns(), - "expectedCollectionUuid"_attr = expectedCollectionUuid, - "nss_ns2"_attr = nss.ns(), - "currentCollection_uuid"_attr = currentCollection->uuid()); + "Abandoning range deletion task for {namespace} with UUID {expectedCollectionUUID} " + "because UUID of {namespace} has changed (current is {currentCollectionUUID})", + "Abandoning range deletion task because UUID has changed", + "namespace"_attr = nss.ns(), + "expectedCollectionUUID"_attr = expectedCollectionUuid, + "currentCollectionUUID"_attr = currentCollection->uuid()); return true; } @@ -133,10 +134,12 @@ StatusWith<int> deleteNextBatch(OperationContext* opCtx, auto catalog = collection->getIndexCatalog(); const IndexDescriptor* idx = catalog->findShardKeyPrefixedIndex(opCtx, keyPattern, false); if (!idx) { - std::string msg = str::stream() - << "Unable to find shard key index for " << keyPattern.toString() << " in " << nss.ns(); - LOGV2(23765, "{msg}", "msg"_attr = msg); - return {ErrorCodes::InternalError, msg}; + LOGV2_ERROR_OPTIONS(23765, + {logv2::UserAssertAfterLog(ErrorCodes::InternalError)}, + "Unable to find shard key index for {keyPattern} in {namespace}", + "Unable to find shard key index", + "keyPattern"_attr = keyPattern.toString(), + "namespace"_attr = nss.ns()); } // Extend bounds to match the index we found @@ -150,19 +153,22 @@ StatusWith<int> deleteNextBatch(OperationContext* opCtx, LOGV2_DEBUG(23766, 1, - "begin removal of {min} to {max} in {nss_ns}", + "Begin removal of {min} to {max} in {namespace}", + "Begin removal of range", "min"_attr = min, "max"_attr = max, - "nss_ns"_attr = nss.ns()); + "namespace"_attr = nss.ns()); const auto indexName = idx->indexName(); const IndexDescriptor* descriptor = collection->getIndexCatalog()->findIndexByName(opCtx, indexName); if (!descriptor) { - std::string msg = str::stream() - << "shard key index with name " << indexName << " on '" << nss.ns() << "' was dropped"; - LOGV2(23767, "{msg}", "msg"_attr = msg); - return {ErrorCodes::InternalError, msg}; + LOGV2_ERROR_OPTIONS(23767, + {logv2::UserAssertAfterLog(ErrorCodes::InternalError)}, + "Shard key index with name {indexName} on {namespace} was dropped", + "Shard key index was dropped", + "indexName"_attr = indexName, + "namespace"_attr = nss.ns()); } auto deleteStageParams = std::make_unique<DeleteStageParams>(); @@ -210,11 +216,11 @@ StatusWith<int> deleteNextBatch(OperationContext* opCtx, } catch (...) { LOGV2_WARNING( 23776, - "cursor error while trying to delete {min} to {max} in {nss}, stats: {stats}", - "cursor error while trying to delete range", + "Cursor error while trying to delete {min} to {max} in {namespace}, stats: {stats}", + "Cursor error while trying to delete range", "min"_attr = redact(min), "max"_attr = redact(max), - "nss"_attr = nss, + "namespace"_attr = nss, "stats"_attr = Explain::getWinningPlanStats(exec.get())); break; } @@ -311,14 +317,16 @@ ExecutorFuture<void> deleteRangeInBatches(const std::shared_ptr<executor::TaskEx auto numDeleted = uassertStatusOK(deleteNextBatch( opCtx, collection, keyPattern, range, numDocsToRemovePerBatch)); - LOGV2_DEBUG(23769, - 2, - "Deleted {numDeleted} documents in pass in namespace {nss_ns} with " - "UUID {collectionUuid} for range {range}", - "numDeleted"_attr = numDeleted, - "nss_ns"_attr = nss.ns(), - "collectionUuid"_attr = collectionUuid, - "range"_attr = range.toString()); + LOGV2_DEBUG( + 23769, + 2, + "Deleted {numDeleted} documents in pass in namespace {namespace} with " + "UUID {collectionUUID} for range {range}", + "Deleted documents in pass", + "numDeleted"_attr = numDeleted, + "namespace"_attr = nss.ns(), + "collectionUUID"_attr = collectionUuid, + "range"_attr = range.toString()); return numDeleted; }); @@ -377,10 +385,11 @@ ExecutorFuture<void> waitForDeletionsToMajorityReplicate( LOGV2_DEBUG(23771, 2, - "Waiting for majority replication of local deletions in namespace {nss_ns} " - "with UUID {collectionUuid} for range {range}", - "nss_ns"_attr = nss.ns(), - "collectionUuid"_attr = collectionUuid, + "Waiting for majority replication of local deletions in namespace {namespace} " + "with UUID {collectionUUID} for range {range}", + "Waiting for majority replication of local deletions", + "namespace"_attr = nss.ns(), + "collectionUUID"_attr = collectionUuid, "range"_attr = redact(range.toString())); // Asynchronously wait for majority write concern. @@ -419,9 +428,10 @@ SharedSemiFuture<void> removeDocumentsInRange( .then([=]() mutable { LOGV2_DEBUG(23772, 2, - "Beginning deletion of any documents in {nss_ns} range {range} with " + "Beginning deletion of any documents in {namespace} range {range} with " "numDocsToRemovePerBatch {numDocsToRemovePerBatch}", - "nss_ns"_attr = nss.ns(), + "Beginning deletion of documents", + "namespace"_attr = nss.ns(), "range"_attr = redact(range.toString()), "numDocsToRemovePerBatch"_attr = numDocsToRemovePerBatch); @@ -457,15 +467,17 @@ SharedSemiFuture<void> removeDocumentsInRange( if (s.isOK()) { LOGV2_DEBUG(23773, 2, - "Completed deletion of documents in {nss_ns} range {range}", - "nss_ns"_attr = nss.ns(), + "Completed deletion of documents in {namespace} range {range}", + "Completed deletion of documents", + "namespace"_attr = nss.ns(), "range"_attr = redact(range.toString())); } else { LOGV2(23774, - "Failed to delete of documents in {nss_ns} range {range}{causedBy_s}", - "nss_ns"_attr = nss.ns(), + "Failed to delete documents in {namespace} range {range} due to {error}", + "Failed to delete documents", + "namespace"_attr = nss.ns(), "range"_attr = redact(range.toString()), - "causedBy_s"_attr = causedBy(redact(s))); + "error"_attr = redact(s)); } if (s.code() == ErrorCodes::RangeDeletionAbandonedBecauseTaskDocumentDoesNotExist) { @@ -485,19 +497,21 @@ SharedSemiFuture<void> removeDocumentsInRange( } catch (const DBException& e) { LOGV2(23770, "Failed to delete range deletion task for range {range} in collection " - "{nss}{causedBy_e_what}", + "{namespace} due to {error}", + "Failed to delete range deletion task", "range"_attr = range, - "nss"_attr = nss, - "causedBy_e_what"_attr = causedBy(e.what())); + "namespace"_attr = nss, + "error"_attr = e.what()); return e.toStatus(); } LOGV2_DEBUG(23775, 1, - "Completed removal of persistent range deletion task for {nss_ns} " + "Completed removal of persistent range deletion task for {namespace} " "range {range}", - "nss_ns"_attr = nss.ns(), + "Completed removal of persistent range deletion task", + "namespace"_attr = nss.ns(), "range"_attr = redact(range.toString())); // Propagate any errors to callers waiting on the result. |