summaryrefslogtreecommitdiff
path: root/src/mongo/db/s/migration_util.cpp
diff options
context:
space:
mode:
authorBlake Oler <blake.oler@mongodb.com>2020-03-26 15:52:00 -0400
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-03-31 00:07:40 +0000
commita5a4ec4324381c04597bc8d8fd05aa2f1994786b (patch)
tree04e1977e062866b83c432b55c9bb7d6c43ec94e9 /src/mongo/db/s/migration_util.cpp
parentb74a8143ac2160f8687de2a222ce66af85a092de (diff)
downloadmongo-a5a4ec4324381c04597bc8d8fd05aa2f1994786b.tar.gz
SERVER-46799 Update sharding log lines to adhere to LOGV2 style guide
Diffstat (limited to 'src/mongo/db/s/migration_util.cpp')
-rw-r--r--src/mongo/db/s/migration_util.cpp129
1 files changed, 68 insertions, 61 deletions
diff --git a/src/mongo/db/s/migration_util.cpp b/src/mongo/db/s/migration_util.cpp
index a057dacd3e0..c545860f503 100644
--- a/src/mongo/db/s/migration_util.cpp
+++ b/src/mongo/db/s/migration_util.cpp
@@ -112,7 +112,7 @@ void sendToRecipient(OperationContext* opCtx,
uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, recipientId));
auto cmdBSON = cmd.toBSON(passthroughFields);
- LOGV2_DEBUG(22023, 1, "Sending request {cmd} to recipient.", "cmd"_attr = cmdBSON);
+ LOGV2_DEBUG(22023, 1, "Sending request to recipient", "commandToSend"_attr = redact(cmdBSON));
auto response = recipientShard->runCommandWithFixedRetryAttempts(
opCtx,
@@ -196,11 +196,10 @@ void retryIdempotentWorkAsPrimaryUntilSuccessOrStepdown(
} catch (DBException& ex) {
if (attempt % kLogRetryAttemptThreshold == 1) {
LOGV2_WARNING(23937,
- "retrying {taskDescription} after {attempt} failed attempts, last "
- "seen error: {ex}",
- "taskDescription"_attr = taskDescription,
+ "Retrying task after failed attempt",
+ "taskDescription"_attr = redact(taskDescription),
"attempt"_attr = attempt,
- "ex"_attr = ex);
+ "error"_attr = redact(ex));
}
}
}
@@ -261,19 +260,17 @@ ExecutorFuture<void> submitRangeDeletionTask(OperationContext* opCtx,
// does not match the UUID of the deletion task, force a filtering metadata refresh
// once, because this node may have just stepped up and therefore may have a stale
// cache.
- LOGV2(
- 22024,
- "Filtering metadata for namespace in deletion task "
- "{deletionTask} {collectionStatus}, forcing a refresh of {deletionTask_getNss}",
- "deletionTask"_attr = deletionTask.toBSON(),
- "collectionStatus"_attr =
- (css->getCurrentMetadataIfKnown()
- ? (css->getCollectionDescription().isSharded()
- ? "has UUID that does not match UUID of the deletion task"
- : "is unsharded")
- : "is not known"),
- "deletionTask_getNss"_attr = deletionTask.getNss(),
- "migrationId"_attr = deletionTask.getId());
+ LOGV2(22024,
+ "Filtering metadata for this range deletion task; forcing refresh",
+ "deletionTask"_attr = redact(deletionTask.toBSON()),
+ "error"_attr = (css->getCurrentMetadataIfKnown()
+ ? (css->getCollectionDescription().isSharded()
+ ? "Collection has UUID that does not match UUID "
+ "of the deletion task"
+ : "Collection is unsharded")
+ : "Collection's sharding state is not known"),
+ "namespace"_attr = deletionTask.getNss(),
+ "migrationId"_attr = deletionTask.getId());
// TODO (SERVER-46075): Add an asynchronous version of
// forceShardFilteringMetadataRefresh to avoid blocking on the network in the
@@ -294,8 +291,8 @@ ExecutorFuture<void> submitRangeDeletionTask(OperationContext* opCtx,
css->getCollectionDescription().uuidMatches(deletionTask.getCollectionUuid()));
LOGV2(22026,
- "Submitting range deletion task {deletionTask}",
- "deletionTask"_attr = deletionTask.toBSON(),
+ "Submitting range deletion task",
+ "deletionTask"_attr = redact(deletionTask.toBSON()),
"migrationId"_attr = deletionTask.getId());
const auto whenToClean = deletionTask.getWhenToClean() == CleanWhenEnum::kNow
@@ -314,10 +311,9 @@ ExecutorFuture<void> submitRangeDeletionTask(OperationContext* opCtx,
auto opCtx = uniqueOpCtx.get();
LOGV2(22027,
- "Failed to submit range deletion task "
- "{deletionTask}{causedBy_status}",
- "deletionTask"_attr = deletionTask.toBSON(),
- "causedBy_status"_attr = causedBy(status),
+ "Failed to submit range deletion task",
+ "deletionTask"_attr = redact(deletionTask.toBSON()),
+ "error"_attr = redact(status),
"migrationId"_attr = deletionTask.getId());
if (status == ErrorCodes::RangeDeletionAbandonedBecauseCollectionWithUUIDDoesNotExist) {
@@ -360,8 +356,8 @@ void resubmitRangeDeletionsOnStepUp(ServiceContext* serviceContext) {
.getAsync([](const Status& status) {
if (!status.isOK()) {
LOGV2(45739,
- "Error while submitting pending deletions: {status}",
- "status"_attr = status);
+ "Error while submitting pending range deletions",
+ "error"_attr = redact(status));
}
});
}
@@ -383,8 +379,8 @@ void forEachOrphanRange(OperationContext* opCtx, const NamespaceString& nss, Cal
if (!collDesc.isSharded()) {
LOGV2(22029,
- "Upgrade: skipping orphaned range enumeration for {nss}, collection is not sharded",
- "nss"_attr = nss);
+ "Upgrade: Skipping orphaned range enumeration because the collection is not sharded",
+ "namespace"_attr = nss);
return;
}
@@ -395,9 +391,9 @@ void forEachOrphanRange(OperationContext* opCtx, const NamespaceString& nss, Cal
if (!range) {
LOGV2_DEBUG(22030,
2,
- "Upgrade: Completed orphaned range enumeration for {nss} starting from "
- "{startingKey}, no orphan ranges remain",
- "nss"_attr = nss.toString(),
+ "Upgrade: Completed orphanged range enumeration; no orphaned ranges "
+ "remain",
+ "namespace"_attr = nss.toString(),
"startingKey"_attr = redact(startingKey));
return;
@@ -418,8 +414,8 @@ void submitOrphanRanges(OperationContext* opCtx, const NamespaceString& nss, con
LOGV2_DEBUG(22031,
2,
- "Upgrade: Cleaning up existing orphans for {nss} : {uuid}",
- "nss"_attr = nss,
+ "Upgrade: Cleaning up existing orphans",
+ "namespace"_attr = nss,
"uuid"_attr = uuid);
std::vector<RangeDeletionTask> deletions;
@@ -439,17 +435,17 @@ void submitOrphanRanges(OperationContext* opCtx, const NamespaceString& nss, con
for (const auto& task : deletions) {
LOGV2_DEBUG(22032,
2,
- "Upgrade: Submitting range for cleanup: {task_getRange} from {nss}",
- "task_getRange"_attr = task.getRange(),
- "nss"_attr = nss);
+ "Upgrade: Submitting chunk range for cleanup",
+ "range"_attr = redact(task.getRange().toString()),
+ "namespace"_attr = nss);
store.add(opCtx, task);
}
} catch (ExceptionFor<ErrorCodes::NamespaceNotFound>& e) {
LOGV2(22033,
- "Upgrade: Failed to cleanup orphans for {nss} because the namespace was not found: "
- "{e_what}, the collection must have been dropped",
- "nss"_attr = nss,
- "e_what"_attr = e.what());
+ "Upgrade: Failed to clean up orphans because the namespace was not found; the "
+ "collection must have been dropped",
+ "namespace"_attr = nss,
+ "error"_attr = redact(e.what()));
}
}
@@ -464,7 +460,10 @@ void submitOrphanRangesForCleanup(OperationContext* opCtx) {
for (auto collIt = catalog.begin(dbName); collIt != catalog.end(); ++collIt) {
auto uuid = collIt.uuid().get();
auto nss = catalog.lookupNSSByUUID(opCtx, uuid).get();
- LOGV2_DEBUG(22034, 2, "Upgrade: processing collection: {nss}", "nss"_attr = nss);
+ LOGV2_DEBUG(22034,
+ 2,
+ "Upgrade: Processing collection for orphaned range cleanup",
+ "namespace"_attr = nss);
submitOrphanRanges(opCtx, nss, uuid);
}
@@ -784,7 +783,7 @@ void resumeMigrationCoordinationsOnStepUp(OperationContext* opCtx) {
const auto lastOpTime = replClientInfo.getLastOp();
LOGV2_DEBUG(22038,
2,
- "Waiting for OpTime {lastOpTime} to become majority committed",
+ "Waiting for this OpTime to become majority committed",
"lastOpTime"_attr = lastOpTime);
return WaitForMajorityService::get(serviceContext).waitUntilMajority(lastOpTime);
})
@@ -806,7 +805,10 @@ void resumeMigrationCoordinationsOnStepUp(OperationContext* opCtx) {
opCtx,
query,
[&opCtx, &migrationRecoveryCount](const MigrationCoordinatorDocument& doc) {
- LOGV2_DEBUG(22039, 2, "Recovering migration {doc}", "doc"_attr = doc.toBSON());
+ LOGV2_DEBUG(22039,
+ 2,
+ "Recovering migration using a given migration coordinator document",
+ "migrationCoordinatorDoc"_attr = redact(doc.toBSON()));
migrationRecoveryCount++;
@@ -840,22 +842,27 @@ void resumeMigrationCoordinationsOnStepUp(OperationContext* opCtx) {
if (!refreshedMetadata || !(*refreshedMetadata)->isSharded() ||
!(*refreshedMetadata)->uuidMatches(doc.getCollectionUuid())) {
- LOGV2(
- 22040,
- "Even after forced refresh, filtering metadata for namespace in "
- "migration coordinator doc "
- "{doc}{refreshedMetadata_refreshedMetadata_isSharded_is_not_known_has_"
- "UUID_"
- "that_does_not_match_the_collection_UUID_in_the_coordinator_doc}. "
- "Deleting "
- "the range deletion tasks on the donor and recipient as "
- "well as the migration coordinator document on this node.",
- "doc"_attr = doc.toBSON(),
- "refreshedMetadata_refreshedMetadata_isSharded_is_not_known_has_UUID_that_does_not_match_the_collection_UUID_in_the_coordinator_doc"_attr =
- (!refreshedMetadata || !(*refreshedMetadata)->isSharded()
- ? "is not known"
- : "has UUID that does not match the collection UUID in the "
- "coordinator doc"));
+ if (!refreshedMetadata || !(*refreshedMetadata)->isSharded()) {
+ LOGV2(
+ 22040,
+ "Even after forced refresh, filtering metadata for this namespace "
+ "is not known. Deleting the range deletion tasks on the donor "
+ "and recipient as well as the migration coordinator document on "
+ "this node",
+ "migrationCoordinatorDocument"_attr = redact(doc.toBSON()));
+ } else {
+ LOGV2(
+ 46712004,
+ "Even after forced refresh, the filtering metadata has a UUID that "
+ "does not match the collection UUID in the migration coordinator "
+ "document. Deleting the range deletion tasks on the donor and "
+ "recipient as well as the migration coordinator document on this "
+ "node",
+ "migrationCoordinatorDocument"_attr = redact(doc.toBSON()),
+ "refreshedMetadataUUID"_attr =
+ (*refreshedMetadata)->getChunkManager()->getUUID(),
+ "coordinatorDocumentUUID"_attr = doc.getCollectionUuid());
+ }
// TODO (SERVER-45707): Test that range deletion tasks are eventually
// deleted even if the collection is dropped before migration coordination
@@ -884,8 +891,8 @@ void resumeMigrationCoordinationsOnStepUp(OperationContext* opCtx) {
.getAsync([migrationBlockingGuard](const Status& status) {
if (!status.isOK()) {
LOGV2(22041,
- "Failed to resume coordinating migrations on stepup {causedBy_status}",
- "causedBy_status"_attr = causedBy(status));
+ "Failed to resume coordinator migrations on stepup",
+ "error"_attr = redact(status));
}
});
}