summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorjannaerin <golden.janna@gmail.com>2020-05-28 12:58:17 -0400
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-06-02 16:08:40 +0000
commite0e94b840bb1f9b30f745adf261623cf34038e7f (patch)
tree2cadae71726d63083d4567573d73fce17692f9e6
parentf6298740638a8144ceda84ad7bf433f46ca41523 (diff)
downloadmongo-e0e94b840bb1f9b30f745adf261623cf34038e7f.tar.gz
SERVER-48361 Additional logv2 message cleanup for sharding
(cherry picked from commit 5c1cb9b2908b4d1622b44dd1020c76b3e563610d)
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp1
-rw-r--r--src/mongo/db/s/move_primary_command.cpp5
-rw-r--r--src/mongo/db/s/range_deletion_util.cpp118
-rw-r--r--src/mongo/s/write_ops/batch_write_exec.cpp5
4 files changed, 73 insertions, 56 deletions
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
index 6908d834888..5f54e6e7781 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
@@ -1183,6 +1183,7 @@ void ShardingCatalogManager::ensureChunkVersionIsGreaterThan(OperationContext* o
23887,
"ensureChunkVersionIsGreaterThan bumped the version of the chunk with minKey {minKey}, "
"maxKey {maxKey}, and epoch {epoch}. Chunk is now {newChunk}",
+ "ensureChunkVersionIsGreaterThan bumped the the chunk version",
"minKey"_attr = minKey,
"maxKey"_attr = maxKey,
"epoch"_attr = version.epoch(),
diff --git a/src/mongo/db/s/move_primary_command.cpp b/src/mongo/db/s/move_primary_command.cpp
index 4e4b06e664f..a946d64b33f 100644
--- a/src/mongo/db/s/move_primary_command.cpp
+++ b/src/mongo/db/s/move_primary_command.cpp
@@ -50,8 +50,9 @@ namespace {
void uassertStatusOKWithWarning(const Status& status) {
if (!status.isOK()) {
LOGV2_WARNING(23762,
- "movePrimary failed{causedBy_status}",
- "causedBy_status"_attr = causedBy(redact(status)));
+ "movePrimary failed: {error}",
+ "movePrimary failed",
+ "error"_attr = redact(status));
uassertStatusOK(status);
}
}
diff --git a/src/mongo/db/s/range_deletion_util.cpp b/src/mongo/db/s/range_deletion_util.cpp
index bf3adab6da8..ffc9235475f 100644
--- a/src/mongo/db/s/range_deletion_util.cpp
+++ b/src/mongo/db/s/range_deletion_util.cpp
@@ -89,9 +89,10 @@ bool collectionUuidHasChanged(const NamespaceString& nss,
if (!currentCollection) {
LOGV2_DEBUG(23763,
1,
- "Abandoning range deletion task for {nss_ns} with UUID "
+ "Abandoning range deletion task for {namespace} with UUID "
"{expectedCollectionUuid} because the collection has been dropped",
- "nss_ns"_attr = nss.ns(),
+ "Abandoning range deletion task for because the collection has been dropped",
+ "namespace"_attr = nss.ns(),
"expectedCollectionUuid"_attr = expectedCollectionUuid);
return true;
}
@@ -100,12 +101,12 @@ bool collectionUuidHasChanged(const NamespaceString& nss,
LOGV2_DEBUG(
23764,
1,
- "Abandoning range deletion task for {nss_ns} with UUID {expectedCollectionUuid} "
- "because UUID of {nss_ns2}has changed (current is {currentCollection_uuid})",
- "nss_ns"_attr = nss.ns(),
- "expectedCollectionUuid"_attr = expectedCollectionUuid,
- "nss_ns2"_attr = nss.ns(),
- "currentCollection_uuid"_attr = currentCollection->uuid());
+ "Abandoning range deletion task for {namespace} with UUID {expectedCollectionUUID} "
+ "because UUID of {namespace} has changed (current is {currentCollectionUUID})",
+ "Abandoning range deletion task because UUID has changed",
+ "namespace"_attr = nss.ns(),
+ "expectedCollectionUUID"_attr = expectedCollectionUuid,
+ "currentCollectionUUID"_attr = currentCollection->uuid());
return true;
}
@@ -133,10 +134,12 @@ StatusWith<int> deleteNextBatch(OperationContext* opCtx,
auto catalog = collection->getIndexCatalog();
const IndexDescriptor* idx = catalog->findShardKeyPrefixedIndex(opCtx, keyPattern, false);
if (!idx) {
- std::string msg = str::stream()
- << "Unable to find shard key index for " << keyPattern.toString() << " in " << nss.ns();
- LOGV2(23765, "{msg}", "msg"_attr = msg);
- return {ErrorCodes::InternalError, msg};
+ LOGV2_ERROR_OPTIONS(23765,
+ {logv2::UserAssertAfterLog(ErrorCodes::InternalError)},
+ "Unable to find shard key index for {keyPattern} in {namespace}",
+ "Unable to find shard key index",
+ "keyPattern"_attr = keyPattern.toString(),
+ "namespace"_attr = nss.ns());
}
// Extend bounds to match the index we found
@@ -150,19 +153,22 @@ StatusWith<int> deleteNextBatch(OperationContext* opCtx,
LOGV2_DEBUG(23766,
1,
- "begin removal of {min} to {max} in {nss_ns}",
+ "Begin removal of {min} to {max} in {namespace}",
+ "Begin removal of range",
"min"_attr = min,
"max"_attr = max,
- "nss_ns"_attr = nss.ns());
+ "namespace"_attr = nss.ns());
const auto indexName = idx->indexName();
const IndexDescriptor* descriptor =
collection->getIndexCatalog()->findIndexByName(opCtx, indexName);
if (!descriptor) {
- std::string msg = str::stream()
- << "shard key index with name " << indexName << " on '" << nss.ns() << "' was dropped";
- LOGV2(23767, "{msg}", "msg"_attr = msg);
- return {ErrorCodes::InternalError, msg};
+ LOGV2_ERROR_OPTIONS(23767,
+ {logv2::UserAssertAfterLog(ErrorCodes::InternalError)},
+ "Shard key index with name {indexName} on {namespace} was dropped",
+ "Shard key index was dropped",
+ "indexName"_attr = indexName,
+ "namespace"_attr = nss.ns());
}
auto deleteStageParams = std::make_unique<DeleteStageParams>();
@@ -211,16 +217,16 @@ StatusWith<int> deleteNextBatch(OperationContext* opCtx,
}
if (state == PlanExecutor::FAILURE) {
- LOGV2_WARNING(
- 23776,
- "{PlanExecutor_statestr_state} - cursor error while trying to delete {min} to "
- "{max} in {nss}: FAILURE, stats: {Explain_getWinningPlanStats_exec_get}",
- "PlanExecutor_statestr_state"_attr = PlanExecutor::statestr(state),
- "min"_attr = redact(min),
- "max"_attr = redact(max),
- "nss"_attr = nss,
- "Explain_getWinningPlanStats_exec_get"_attr =
- Explain::getWinningPlanStats(exec.get()));
+ LOGV2_WARNING(23776,
+ "{planExecutorState} - cursor error while trying to delete {min} to "
+ "{max} in {namespace}: FAILURE, stats: {explainGetWinningPlanStats}",
+ "Cursor error while trying to delete range",
+ "planExecutorState"_attr = PlanExecutor::statestr(state),
+ "min"_attr = redact(min),
+ "max"_attr = redact(max),
+ "namespace"_attr = nss,
+ "explainGetWinningPlanStats"_attr =
+ Explain::getWinningPlanStats(exec.get()));
break;
}
@@ -312,14 +318,16 @@ ExecutorFuture<void> deleteRangeInBatches(const std::shared_ptr<executor::TaskEx
auto numDeleted = uassertStatusOK(deleteNextBatch(
opCtx, collection, keyPattern, range, numDocsToRemovePerBatch));
- LOGV2_DEBUG(23769,
- 2,
- "Deleted {numDeleted} documents in pass in namespace {nss_ns} with "
- "UUID {collectionUuid} for range {range}",
- "numDeleted"_attr = numDeleted,
- "nss_ns"_attr = nss.ns(),
- "collectionUuid"_attr = collectionUuid,
- "range"_attr = range.toString());
+ LOGV2_DEBUG(
+ 23769,
+ 2,
+ "Deleted {numDeleted} documents in pass in namespace {namespace} with "
+ "UUID {collectionUUID} for range {range}",
+ "Deleted documents in pass",
+ "numDeleted"_attr = numDeleted,
+ "namespace"_attr = nss.ns(),
+ "collectionUUID"_attr = collectionUuid,
+ "range"_attr = range.toString());
return numDeleted;
});
@@ -379,10 +387,11 @@ ExecutorFuture<void> waitForDeletionsToMajorityReplicate(
LOGV2_DEBUG(23771,
2,
- "Waiting for majority replication of local deletions in namespace {nss_ns} "
- "with UUID {collectionUuid} for range {range}",
- "nss_ns"_attr = nss.ns(),
- "collectionUuid"_attr = collectionUuid,
+ "Waiting for majority replication of local deletions in namespace {namespace} "
+ "with UUID {collectionUUID} for range {range}",
+ "Waiting for majority replication of local deletions",
+ "namespace"_attr = nss.ns(),
+ "collectionUUID"_attr = collectionUuid,
"range"_attr = redact(range.toString()));
// Asynchronously wait for majority write concern.
@@ -421,9 +430,10 @@ SharedSemiFuture<void> removeDocumentsInRange(
.then([=]() mutable {
LOGV2_DEBUG(23772,
2,
- "Beginning deletion of any documents in {nss_ns} range {range} with "
+ "Beginning deletion of any documents in {namespace} range {range} with "
"numDocsToRemovePerBatch {numDocsToRemovePerBatch}",
- "nss_ns"_attr = nss.ns(),
+ "Beginning deletion of documents",
+ "namespace"_attr = nss.ns(),
"range"_attr = redact(range.toString()),
"numDocsToRemovePerBatch"_attr = numDocsToRemovePerBatch);
@@ -459,15 +469,17 @@ SharedSemiFuture<void> removeDocumentsInRange(
if (s.isOK()) {
LOGV2_DEBUG(23773,
2,
- "Completed deletion of documents in {nss_ns} range {range}",
- "nss_ns"_attr = nss.ns(),
+ "Completed deletion of documents in {namespace} range {range}",
+ "Completed deletion of documents",
+ "namespace"_attr = nss.ns(),
"range"_attr = redact(range.toString()));
} else {
LOGV2(23774,
- "Failed to delete of documents in {nss_ns} range {range}{causedBy_s}",
- "nss_ns"_attr = nss.ns(),
+ "Failed to delete documents in {namespace} range {range} due to {error}",
+ "Failed to delete documents",
+ "namespace"_attr = nss.ns(),
"range"_attr = redact(range.toString()),
- "causedBy_s"_attr = causedBy(redact(s)));
+ "error"_attr = redact(s));
}
if (s.code() == ErrorCodes::RangeDeletionAbandonedBecauseTaskDocumentDoesNotExist) {
@@ -487,19 +499,21 @@ SharedSemiFuture<void> removeDocumentsInRange(
} catch (const DBException& e) {
LOGV2(23770,
"Failed to delete range deletion task for range {range} in collection "
- "{nss}{causedBy_e_what}",
+ "{namespace} due to {error}",
+ "Failed to delete range deletion task",
"range"_attr = range,
- "nss"_attr = nss,
- "causedBy_e_what"_attr = causedBy(e.what()));
+ "namespace"_attr = nss,
+ "error"_attr = e.what());
return e.toStatus();
}
LOGV2_DEBUG(23775,
1,
- "Completed removal of persistent range deletion task for {nss_ns} "
+ "Completed removal of persistent range deletion task for {namespace} "
"range {range}",
- "nss_ns"_attr = nss.ns(),
+ "Completed removal of persistent range deletion task",
+ "namespace"_attr = nss.ns(),
"range"_attr = redact(range.toString()));
// Propagate any errors to callers waiting on the result.
diff --git a/src/mongo/s/write_ops/batch_write_exec.cpp b/src/mongo/s/write_ops/batch_write_exec.cpp
index 5db04e91b20..04344d1d0ef 100644
--- a/src/mongo/s/write_ops/batch_write_exec.cpp
+++ b/src/mongo/s/write_ops/batch_write_exec.cpp
@@ -437,8 +437,9 @@ void BatchWriteExec::executeBatch(OperationContext* opCtx,
// It's okay if we can't refresh, we'll just record errors for the ops if
// needed.
LOGV2_WARNING(22911,
- "could not refresh targeter{causedBy_refreshStatus_reason}",
- "causedBy_refreshStatus_reason"_attr = causedBy(refreshStatus.reason()));
+ "Could not refresh targeter due to {error}",
+ "Could not refresh targeter",
+ "error"_attr = redact(refreshStatus.reason()));
}
//