summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBlake Oler <blake.oler@mongodb.com>2020-03-26 15:52:00 -0400
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-03-31 00:07:40 +0000
commita5a4ec4324381c04597bc8d8fd05aa2f1994786b (patch)
tree04e1977e062866b83c432b55c9bb7d6c43ec94e9
parentb74a8143ac2160f8687de2a222ce66af85a092de (diff)
downloadmongo-a5a4ec4324381c04597bc8d8fd05aa2f1994786b.tar.gz
SERVER-46799 Update sharding log lines to adhere to LOGV2 style guide
-rw-r--r--src/mongo/db/s/metadata_manager.cpp67
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp44
-rw-r--r--src/mongo/db/s/migration_destination_manager.cpp97
-rw-r--r--src/mongo/db/s/migration_destination_manager_legacy_commands.cpp10
-rw-r--r--src/mongo/db/s/migration_source_manager.cpp45
-rw-r--r--src/mongo/db/s/migration_util.cpp129
-rw-r--r--src/mongo/db/s/move_primary_source_manager.cpp26
-rw-r--r--src/mongo/db/s/periodic_balancer_config_refresher.cpp5
-rw-r--r--src/mongo/db/s/scoped_operation_completion_sharding_actions.cpp12
-rw-r--r--src/mongo/db/s/set_shard_version_command.cpp35
10 files changed, 278 insertions, 192 deletions
diff --git a/src/mongo/db/s/metadata_manager.cpp b/src/mongo/db/s/metadata_manager.cpp
index 73736707a67..ff8b1032197 100644
--- a/src/mongo/db/s/metadata_manager.cpp
+++ b/src/mongo/db/s/metadata_manager.cpp
@@ -225,19 +225,25 @@ void MetadataManager::setFilteringMetadata(CollectionMetadata remoteMetadata) {
activeMetadata.getCollVersion() >= remoteMetadata.getCollVersion()) {
LOGV2_DEBUG(21984,
1,
- "Ignoring update of active metadata {activeMetadata_Basic} with an older "
- "{remoteMetadata_Basic}",
- "activeMetadata_Basic"_attr = activeMetadata.toStringBasic(),
- "remoteMetadata_Basic"_attr = remoteMetadata.toStringBasic());
+ "Ignoring incoming metadata update {activeMetadata} for {namespace} because "
+ "the active (current) metadata {remoteMetadata} has the same or a newer "
+ "collection version",
+ "Ignoring incoming metadata update for this namespace because the active "
+ "(current) metadata has the same or a newer collection version",
+ "namespace"_attr = _nss.ns(),
+ "activeMetadata"_attr = activeMetadata.toStringBasic(),
+ "remoteMetadata"_attr = remoteMetadata.toStringBasic());
return;
}
LOGV2(21985,
- "Updating metadata for collection {nss_ns} from {activeMetadata_Basic} to "
- "{remoteMetadata_Basic} due to version change",
- "nss_ns"_attr = _nss.ns(),
- "activeMetadata_Basic"_attr = activeMetadata.toStringBasic(),
- "remoteMetadata_Basic"_attr = remoteMetadata.toStringBasic());
+ "Updating metadata {activeMetadata} for {namespace} because the remote metadata "
+ "{remoteMetadata} has a newer collection version",
+ "Updating metadata for this namespace because the remote metadata has a newer "
+ "collection version",
+ "namespace"_attr = _nss.ns(),
+ "activeMetadata"_attr = activeMetadata.toStringBasic(),
+ "remoteMetadata"_attr = remoteMetadata.toStringBasic());
// Resolve any receiving chunks, which might have completed by now
for (auto it = _receivingChunks.begin(); it != _receivingChunks.end();) {
@@ -252,10 +258,11 @@ void MetadataManager::setFilteringMetadata(CollectionMetadata remoteMetadata) {
// deem it successfully received
LOGV2_DEBUG(21986,
2,
- "Verified chunk {receivingRange} for collection {nss_ns} has been migrated to "
- "this shard earlier",
- "receivingRange"_attr = redact(receivingRange.toString()),
- "nss_ns"_attr = _nss.ns());
+ "Chunk {range} for {namespace} has already been migrated to this "
+ "shard",
+ "The incoming chunk migration for this shard has already been completed",
+ "range"_attr = redact(receivingRange.toString()),
+ "namespace"_attr = _nss.ns());
_receivingChunks.erase(it);
it = _receivingChunks.begin();
@@ -361,9 +368,11 @@ SharedSemiFuture<void> MetadataManager::beginReceive(ChunkRange const& range) {
LOGV2_OPTIONS(21987,
{logv2::LogComponent::kShardingMigration},
- "Scheduling deletion of any documents in {nss_ns} range {range} before migrating "
- "in a chunk covering the range",
- "nss_ns"_attr = _nss.ns(),
+ "Scheduling deletion of any documents in {namespace} range {range} before "
+ "migrating in a chunk covering the range",
+ "Scheduling deletion of any documents in the collection's specified range "
+ "before migrating chunks into said range",
+ "namespace"_attr = _nss.ns(),
"range"_attr = redact(range.toString()));
return _submitRangeForDeletion(lg,
@@ -379,12 +388,15 @@ void MetadataManager::forgetReceive(ChunkRange const& range) {
// This is potentially a partially received chunk, which needs to be cleaned up. We know none
// of these documents are in use, so they can go straight to the deletion queue.
- LOGV2_OPTIONS(21988,
- {logv2::LogComponent::kShardingMigration},
- "Abandoning in-migration of {nss_ns} range {range}; scheduling deletion of any "
- "documents already copied",
- "nss_ns"_attr = _nss.ns(),
- "range"_attr = range);
+ LOGV2_OPTIONS(
+ 21988,
+ {logv2::LogComponent::kShardingMigration},
+ "Abandoning incoming migration for {namespace} range {range}; scheduling deletion of any "
+ "documents already copied",
+ "Abandoning migration for the collection's specified range; scheduling deletion of any "
+ "documents already copied",
+ "namespace"_attr = _nss.ns(),
+ "range"_attr = redact(range.toString()));
invariant(!_overlapsInUseChunk(lg, range));
@@ -422,9 +434,11 @@ SharedSemiFuture<void> MetadataManager::cleanUpRange(ChunkRange const& range,
if (overlapMetadata) {
LOGV2_OPTIONS(21989,
{logv2::LogComponent::kShardingMigration},
- "Deletion of {nss_ns} range {range} will be scheduled after all possibly "
+ "Deletion of {namespace} range {range} will be scheduled after all possibly "
"dependent queries finish",
- "nss_ns"_attr = _nss.ns(),
+ "Deletion of the collection's specified range will be scheduled after all "
+ "possibly dependent queries finish",
+ "namespace"_attr = _nss.ns(),
"range"_attr = redact(range.toString()));
++overlapMetadata->numContingentRangeDeletionTasks;
// Schedule the range for deletion once the overlapping metadata object is destroyed
@@ -439,8 +453,9 @@ SharedSemiFuture<void> MetadataManager::cleanUpRange(ChunkRange const& range,
// No running queries can depend on this range, so queue it for deletion immediately.
LOGV2_OPTIONS(21990,
{logv2::LogComponent::kShardingMigration},
- "Scheduling deletion of {nss_ns} range {range}",
- "nss_ns"_attr = _nss.ns(),
+ "Scheduling deletion of {namespace} range {range}",
+ "Scheduling deletion of the collection's specified range",
+ "namespace"_attr = _nss.ns(),
"range"_attr = redact(range.toString()));
return _submitRangeForDeletion(lg,
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
index 00a63ba4f49..4da32466823 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
@@ -182,7 +182,8 @@ void LogTransactionOperationsForShardingHandler::commit(boost::optional<Timestam
auto idElement = documentKey["_id"];
if (idElement.eoo()) {
LOGV2_WARNING(21994,
- "Received a document with no id, ignoring: {documentKey}",
+ "Received a document without an _id field, ignoring: {documentKey}",
+ "Received a document without an _id and will ignore that document",
"documentKey"_attr = redact(documentKey));
continue;
}
@@ -474,8 +475,9 @@ void MigrationChunkClonerSourceLegacy::cancelClone(OperationContext* opCtx) {
.getStatus();
if (!status.isOK()) {
LOGV2(21991,
- "Failed to cancel migration {causedBy_status}",
- "causedBy_status"_attr = causedBy(redact(status)));
+ "Failed to cancel migration: {error}",
+ "Failed to cancel migration",
+ "error"_attr = redact(status));
}
}
// Intentional fall through
@@ -499,8 +501,10 @@ void MigrationChunkClonerSourceLegacy::onInsertOp(OperationContext* opCtx,
BSONElement idElement = insertedDoc["_id"];
if (idElement.eoo()) {
LOGV2_WARNING(21995,
- "logInsertOp got a document with no _id field, ignoring inserted document: "
- "{insertedDoc}",
+ "logInsertOp received a document without an _id field, ignoring inserted "
+ "document: {insertedDoc}",
+ "logInsertOp received a document without an _id field and will ignore that "
+ "document",
"insertedDoc"_attr = redact(insertedDoc));
return;
}
@@ -533,7 +537,9 @@ void MigrationChunkClonerSourceLegacy::onUpdateOp(OperationContext* opCtx,
if (idElement.eoo()) {
LOGV2_WARNING(
21996,
- "logUpdateOp got a document with no _id field, ignoring updatedDoc: {postImageDoc}",
+ "logUpdateOp received a document without an _id field, ignoring the updated document: "
+ "{postImageDoc}",
+ "logUpdateOp received a document without an _id field and will ignore that document",
"postImageDoc"_attr = redact(postImageDoc));
return;
}
@@ -573,7 +579,9 @@ void MigrationChunkClonerSourceLegacy::onDeleteOp(OperationContext* opCtx,
if (idElement.eoo()) {
LOGV2_WARNING(
21997,
- "logDeleteOp got a document with no _id field, ignoring deleted doc: {deletedDocId}",
+ "logDeleteOp received a document without an _id field, ignoring deleted doc: "
+ "{deletedDocId}",
+ "logDeleteOp received a document without an _id field and will ignore that document",
"deletedDocId"_attr = redact(deletedDocId));
return;
}
@@ -1080,18 +1088,20 @@ Status MigrationChunkClonerSourceLegacy::_checkRecipientCloningStatus(OperationC
if (_forceJumbo && _jumboChunkCloneState) {
LOGV2(21992,
- "moveChunk data transfer progress: {res} mem used: {memoryUsed} documents cloned "
- "so far: {jumboChunkCloneState_docsCloned}",
- "res"_attr = redact(res),
- "memoryUsed"_attr = _memoryUsed,
- "jumboChunkCloneState_docsCloned"_attr = _jumboChunkCloneState->docsCloned);
+ "moveChunk data transfer progress: {response} mem used: {memoryUsedBytes} "
+ "documents cloned so far: {docsCloned}",
+ "moveChunk data transfer progress",
+ "response"_attr = redact(res),
+ "memoryUsedBytes"_attr = _memoryUsed,
+ "docsCloned"_attr = _jumboChunkCloneState->docsCloned);
} else {
LOGV2(21993,
- "moveChunk data transfer progress: {res} mem used: {memoryUsed} documents "
- "remaining to clone: {cloneLocsRemaining}",
- "res"_attr = redact(res),
- "memoryUsed"_attr = _memoryUsed,
- "cloneLocsRemaining"_attr = cloneLocsRemaining);
+ "moveChunk data transfer progress: {response} mem used: {memoryUsedBytes} "
+ "documents remaining to clone: {docsRemainingToClone}",
+ "moveChunk data transfer progress",
+ "response"_attr = redact(res),
+ "memoryUsedBytes"_attr = _memoryUsed,
+ "docsRemainingToClone"_attr = cloneLocsRemaining);
}
if (res["state"].String() == "steady") {
diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp
index ffa5515393f..8e38066c477 100644
--- a/src/mongo/db/s/migration_destination_manager.cpp
+++ b/src/mongo/db/s/migration_destination_manager.cpp
@@ -241,7 +241,10 @@ void MigrationDestinationManager::setState(State newState) {
}
void MigrationDestinationManager::_setStateFail(StringData msg) {
- LOGV2(21998, "{msg}", "msg"_attr = msg);
+ LOGV2(21998,
+ "Error during migration: {error}",
+ "Error during migration",
+ "error"_attr = redact(msg));
{
stdx::lock_guard<Latch> sl(_mutex);
_errmsg = msg.toString();
@@ -253,7 +256,10 @@ void MigrationDestinationManager::_setStateFail(StringData msg) {
}
void MigrationDestinationManager::_setStateFailWarn(StringData msg) {
- LOGV2_WARNING(22010, "{msg}", "msg"_attr = msg);
+ LOGV2_WARNING(22010,
+ "Error during migration: {error}",
+ "Error during migration",
+ "error"_attr = redact(msg));
{
stdx::lock_guard<Latch> sl(_mutex);
_errmsg = msg.toString();
@@ -433,8 +439,9 @@ repl::OpTime MigrationDestinationManager::cloneDocumentsFromDonor(
stdx::lock_guard<Client> lk(*opCtx->getClient());
opCtx->getServiceContext()->killOperation(lk, opCtx, ErrorCodes::Error(51008));
LOGV2(21999,
- "Batch insertion failed {causedBy_exceptionToStatus}",
- "causedBy_exceptionToStatus"_attr = causedBy(redact(exceptionToStatus())));
+ "Batch insertion failed: {error}",
+ "Batch insertion failed",
+ "error"_attr = redact(exceptionToStatus()));
}
}};
@@ -817,13 +824,14 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* outerOpCtx) {
invariant(!_max.isEmpty());
LOGV2(22000,
- "Starting receiving end of migration of chunk {min} -> {max} for collection {nss_ns} "
- "from {fromShard} at epoch {epoch} with session id {sessionId}",
- "min"_attr = redact(_min),
- "max"_attr = redact(_max),
- "nss_ns"_attr = _nss.ns(),
+ "Starting receiving end of migration of chunk {chunkMin} -> {chunkMax} for collection "
+ "{namespace} from {fromShard} at epoch {epoch} with session id {sessionId}",
+ "Starting receiving end of chunk migration",
+ "chunkMin"_attr = redact(_min),
+ "chunkMax"_attr = redact(_max),
+ "namespace"_attr = _nss.ns(),
"fromShard"_attr = _fromShard,
- "epoch"_attr = _epoch.toString(),
+ "epoch"_attr = _epoch,
"sessionId"_attr = *_sessionId,
"migrationId"_attr = _enableResumableRangeDeleter ? _migrationId->toBSON() : BSONObj());
@@ -834,7 +842,7 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* outerOpCtx) {
if (initialState == ABORT) {
LOGV2_ERROR(22013,
- "Migration abort requested before it started",
+ "Migration abort requested before the migration started",
"migrationId"_attr =
_enableResumableRangeDeleter ? _migrationId->toBSON() : BSONObj());
return;
@@ -857,9 +865,11 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* outerOpCtx) {
while (migrationutil::checkForConflictingDeletions(
outerOpCtx, range, donorCollectionOptionsAndIndexes.uuid)) {
LOGV2(22001,
- "Migration paused because range overlaps with a "
- "range that is scheduled for deletion: collection: {nss_ns} range: {range}",
- "nss_ns"_attr = _nss.ns(),
+ "Migration paused because the requested range {range} for {namespace} "
+ "overlaps with a range already scheduled for deletion",
+ "Migration paused because the requested range overlaps with a range already "
+ "scheduled for deletion",
+ "namespace"_attr = _nss.ns(),
"range"_attr = redact(range.toString()),
"migrationId"_attr =
_enableResumableRangeDeleter ? _migrationId->toBSON() : BSONObj());
@@ -1005,8 +1015,7 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* outerOpCtx) {
_writeConcern);
if (replStatus.status.code() == ErrorCodes::WriteConcernFailed) {
LOGV2_WARNING(22011,
- "secondaryThrottle on, but doc insert timed out; "
- "continuing",
+ "secondaryThrottle on, but doc insert timed out; continuing",
"migrationId"_attr = _enableResumableRangeDeleter
? _migrationId->toBSON()
: BSONObj());
@@ -1292,18 +1301,25 @@ bool MigrationDestinationManager::_applyMigrateOp(OperationContext* opCtx,
autoColl.getDb(),
updatedDoc,
&localDoc)) {
- const std::string errMsg = str::stream()
- << "cannot migrate chunk, local document " << redact(localDoc)
- << " has same _id as reloaded remote document " << redact(updatedDoc);
- LOGV2_WARNING(22012,
- "{errMsg}",
- "errMsg"_attr = errMsg,
- "migrationId"_attr = _enableResumableRangeDeleter
- ? _migrationId->toBSON()
- : BSONObj());
+ const auto migrationId =
+ _enableResumableRangeDeleter ? _migrationId->toBSON() : BSONObj();
+
+ LOGV2_WARNING(
+ 22012,
+ "Cannot migrate chunk because the local document {localDoc} has the same _id "
+ "as the reloaded remote document {remoteDoc}",
+ "Cannot migrate chunk because the local document has the same _id as the "
+ "reloaded remote document",
+ "localDoc"_attr = redact(localDoc),
+ "remoteDoc"_attr = redact(updatedDoc),
+ "migrationId"_attr = migrationId);
// Exception will abort migration cleanly
- uasserted(16977, errMsg);
+ uasserted(16977,
+ str::stream() << "Cannot migrate chunk because the local document "
+ << redact(localDoc)
+ << " has the same _id as the reloaded remote document "
+ << redact(updatedDoc) << "; migrationId: " << migrationId);
}
// We are in write lock here, so sure we aren't killing
@@ -1326,12 +1342,14 @@ bool MigrationDestinationManager::_flushPendingWrites(OperationContext* opCtx,
static Occasionally sampler;
if (sampler.tick()) {
LOGV2(22007,
- "migrate commit waiting for a majority of slaves for '{nss_ns}' {min} -> {max} "
- "waiting for: {op}",
- "nss_ns"_attr = _nss.ns(),
- "min"_attr = redact(_min),
- "max"_attr = redact(_max),
- "op"_attr = op,
+ "Migration commit waiting for majority replication for {namespace}, "
+ "{chunkMin} -> {chunkMax}; waiting to reach this operation: {lastOpApplied}",
+ "Migration commit waiting for majority replication; waiting until the last "
+ "operation applied has been replicated",
+ "namespace"_attr = _nss.ns(),
+ "chunkMin"_attr = redact(_min),
+ "chunkMax"_attr = redact(_max),
+ "lastOpApplied"_attr = op,
"migrationId"_attr =
_enableResumableRangeDeleter ? _migrationId->toBSON() : BSONObj());
}
@@ -1339,10 +1357,11 @@ bool MigrationDestinationManager::_flushPendingWrites(OperationContext* opCtx,
}
LOGV2(22008,
- "migrate commit succeeded flushing to secondaries for '{nss_ns}' {min} -> {max}",
- "nss_ns"_attr = _nss.ns(),
- "min"_attr = redact(_min),
- "max"_attr = redact(_max),
+ "Migration commit succeeded flushing to secondaries for {namespace}, {min} -> {max}",
+ "Migration commit succeeded flushing to secondaries",
+ "namespace"_attr = _nss.ns(),
+ "chunkMin"_attr = redact(_min),
+ "chunkMax"_attr = redact(_max),
"migrationId"_attr = _enableResumableRangeDeleter ? _migrationId->toBSON() : BSONObj());
return true;
@@ -1395,9 +1414,11 @@ void MigrationDestinationManager::_forgetPending(OperationContext* opCtx, ChunkR
if (!optMetadata || !(*optMetadata)->isSharded() ||
(_collUuid && !(*optMetadata)->uuidMatches(*_collUuid))) {
LOGV2(22009,
- "No need to forget pending chunk {range} because the uuid for {nss_ns} changed",
+ "No need to forget pending chunk {range} because the UUID for {namespace} changed",
+ "No need to forget pending chunk for the requested range, because the UUID for the "
+ "namespace changed",
"range"_attr = redact(range.toString()),
- "nss_ns"_attr = _nss.ns());
+ "namespace"_attr = _nss.ns());
return;
}
diff --git a/src/mongo/db/s/migration_destination_manager_legacy_commands.cpp b/src/mongo/db/s/migration_destination_manager_legacy_commands.cpp
index 4144291e7cd..ff883868618 100644
--- a/src/mongo/db/s/migration_destination_manager_legacy_commands.cpp
+++ b/src/mongo/db/s/migration_destination_manager_legacy_commands.cpp
@@ -202,7 +202,10 @@ public:
Status const status = mdm->startCommit(sessionId);
mdm->report(result, opCtx, false);
if (!status.isOK()) {
- LOGV2(22014, "{status_reason}", "status_reason"_attr = status.reason());
+ LOGV2(22014,
+ "_recvChunkCommit failed: {error}",
+ "_recvChunkCommit failed",
+ "error"_attr = redact(status));
uassertStatusOK(status);
}
return true;
@@ -250,7 +253,10 @@ public:
Status const status = mdm->abort(migrationSessionIdStatus.getValue());
mdm->report(result, opCtx, false);
if (!status.isOK()) {
- LOGV2(22015, "{status_reason}", "status_reason"_attr = status.reason());
+ LOGV2(22015,
+ "_recvChunkAbort failed: {error}",
+ "_recvChunkAbort failed",
+ "error"_attr = redact(status));
uassertStatusOK(status);
}
} else if (migrationSessionIdStatus == ErrorCodes::NoSuchKey) {
diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp
index 60b16377c98..68eeade0ac6 100644
--- a/src/mongo/db/s/migration_source_manager.cpp
+++ b/src/mongo/db/s/migration_source_manager.cpp
@@ -153,10 +153,11 @@ MigrationSourceManager::MigrationSourceManager(OperationContext* opCtx,
_args.getFromShardId() != _args.getToShardId());
LOGV2(22016,
- "Starting chunk migration {args} with expected collection version epoch "
- "{args_getVersionEpoch}",
- "args"_attr = redact(_args.toString()),
- "args_getVersionEpoch"_attr = _args.getVersionEpoch(),
+ "Starting chunk migration donation {requestParameters} with expected collection epoch "
+ "{collectionEpoch}",
+ "Starting chunk migration donation",
+ "requestParameters"_attr = redact(_args.toString()),
+ "collectionEpoch"_attr = _args.getVersionEpoch(),
"resumableRangeDeleterEnabled"_attr = _enableResumableRangeDeleter);
// Force refresh of the metadata to ensure we have the latest
@@ -501,11 +502,11 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig() {
}
// Migration succeeded
- LOGV2(
- 22018,
- "Migration succeeded and updated collection version to {refreshedMetadata_getCollVersion}",
- "refreshedMetadata_getCollVersion"_attr = refreshedMetadata->getCollVersion(),
- "migrationId"_attr = getMigrationIdBSON(_coordinator.get()));
+ LOGV2(22018,
+ "Migration succeeded and updated collection version to {updatedCollectionVersion}",
+ "Migration succeeded and updated collection version",
+ "updatedCollectionVersion"_attr = refreshedMetadata->getCollVersion(),
+ "migrationId"_attr = getMigrationIdBSON(_coordinator.get()));
if (_enableResumableRangeDeleter) {
_coordinator->setMigrationDecision(
@@ -550,8 +551,10 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig() {
if (_enableResumableRangeDeleter) {
if (_args.getWaitForDelete()) {
LOGV2(22019,
- "Waiting for cleanup of {getNss_ns} range {range}",
- "getNss_ns"_attr = getNss().ns(),
+ "Waiting for migration cleanup after chunk commit for the namespace {namespace} "
+ "and range {range}",
+ "Waiting for migration cleanup after chunk commit",
+ "namespace"_attr = getNss().ns(),
"range"_attr = redact(range.toString()),
"migrationId"_attr = _coordinator->getMigrationId());
@@ -574,8 +577,10 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig() {
if (_args.getWaitForDelete()) {
LOGV2(22020,
- "Waiting for cleanup of {getNss_ns} range {range}",
- "getNss_ns"_attr = getNss().ns(),
+ "Waiting for migration cleanup after chunk commit for the namespace {namespace} "
+ "and range {range}",
+ "Waiting for migration cleanup after chunk commit",
+ "namespace"_attr = getNss().ns(),
"range"_attr = redact(range.toString()));
auto deleteStatus = cleanupCompleteFuture.getNoThrow(_opCtx);
@@ -593,8 +598,10 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig() {
orphanedRangeCleanUpErrMsg + redact(cleanupCompleteFuture.getNoThrow(_opCtx))};
} else {
LOGV2(22021,
- "Leaving cleanup of {getNss_ns} range {range} to complete in background",
- "getNss_ns"_attr = getNss().ns(),
+ "Leaving migration cleanup after chunk commit to complete in background; "
+ "namespace: {namespace}, range: {range}",
+ "Leaving migration cleanup after chunk commit to complete in background",
+ "namespace"_attr = getNss().ns(),
"range"_attr = redact(range.toString()));
}
}
@@ -619,9 +626,11 @@ void MigrationSourceManager::cleanupOnError() {
_cleanup();
} catch (const DBException& ex) {
LOGV2_WARNING(22022,
- "Failed to clean up migration: {args}due to: {ex}",
- "args"_attr = redact(_args.toString()),
- "ex"_attr = redact(ex),
+ "Failed to clean up migration with request parameters "
+ "{chunkMigrationRequestParameters} due to: {error}",
+ "Failed to clean up migration",
+ "chunkMigrationRequestParameters"_attr = redact(_args.toString()),
+ "error"_attr = redact(ex),
"migrationId"_attr = getMigrationIdBSON(_coordinator.get()));
}
}
diff --git a/src/mongo/db/s/migration_util.cpp b/src/mongo/db/s/migration_util.cpp
index a057dacd3e0..c545860f503 100644
--- a/src/mongo/db/s/migration_util.cpp
+++ b/src/mongo/db/s/migration_util.cpp
@@ -112,7 +112,7 @@ void sendToRecipient(OperationContext* opCtx,
uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, recipientId));
auto cmdBSON = cmd.toBSON(passthroughFields);
- LOGV2_DEBUG(22023, 1, "Sending request {cmd} to recipient.", "cmd"_attr = cmdBSON);
+ LOGV2_DEBUG(22023, 1, "Sending request to recipient", "commandToSend"_attr = redact(cmdBSON));
auto response = recipientShard->runCommandWithFixedRetryAttempts(
opCtx,
@@ -196,11 +196,10 @@ void retryIdempotentWorkAsPrimaryUntilSuccessOrStepdown(
} catch (DBException& ex) {
if (attempt % kLogRetryAttemptThreshold == 1) {
LOGV2_WARNING(23937,
- "retrying {taskDescription} after {attempt} failed attempts, last "
- "seen error: {ex}",
- "taskDescription"_attr = taskDescription,
+ "Retrying task after failed attempt",
+ "taskDescription"_attr = redact(taskDescription),
"attempt"_attr = attempt,
- "ex"_attr = ex);
+ "error"_attr = redact(ex));
}
}
}
@@ -261,19 +260,17 @@ ExecutorFuture<void> submitRangeDeletionTask(OperationContext* opCtx,
// does not match the UUID of the deletion task, force a filtering metadata refresh
// once, because this node may have just stepped up and therefore may have a stale
// cache.
- LOGV2(
- 22024,
- "Filtering metadata for namespace in deletion task "
- "{deletionTask} {collectionStatus}, forcing a refresh of {deletionTask_getNss}",
- "deletionTask"_attr = deletionTask.toBSON(),
- "collectionStatus"_attr =
- (css->getCurrentMetadataIfKnown()
- ? (css->getCollectionDescription().isSharded()
- ? "has UUID that does not match UUID of the deletion task"
- : "is unsharded")
- : "is not known"),
- "deletionTask_getNss"_attr = deletionTask.getNss(),
- "migrationId"_attr = deletionTask.getId());
+ LOGV2(22024,
+ "Filtering metadata for this range deletion task; forcing refresh",
+ "deletionTask"_attr = redact(deletionTask.toBSON()),
+ "error"_attr = (css->getCurrentMetadataIfKnown()
+ ? (css->getCollectionDescription().isSharded()
+ ? "Collection has UUID that does not match UUID "
+ "of the deletion task"
+ : "Collection is unsharded")
+ : "Collection's sharding state is not known"),
+ "namespace"_attr = deletionTask.getNss(),
+ "migrationId"_attr = deletionTask.getId());
// TODO (SERVER-46075): Add an asynchronous version of
// forceShardFilteringMetadataRefresh to avoid blocking on the network in the
@@ -294,8 +291,8 @@ ExecutorFuture<void> submitRangeDeletionTask(OperationContext* opCtx,
css->getCollectionDescription().uuidMatches(deletionTask.getCollectionUuid()));
LOGV2(22026,
- "Submitting range deletion task {deletionTask}",
- "deletionTask"_attr = deletionTask.toBSON(),
+ "Submitting range deletion task",
+ "deletionTask"_attr = redact(deletionTask.toBSON()),
"migrationId"_attr = deletionTask.getId());
const auto whenToClean = deletionTask.getWhenToClean() == CleanWhenEnum::kNow
@@ -314,10 +311,9 @@ ExecutorFuture<void> submitRangeDeletionTask(OperationContext* opCtx,
auto opCtx = uniqueOpCtx.get();
LOGV2(22027,
- "Failed to submit range deletion task "
- "{deletionTask}{causedBy_status}",
- "deletionTask"_attr = deletionTask.toBSON(),
- "causedBy_status"_attr = causedBy(status),
+ "Failed to submit range deletion task",
+ "deletionTask"_attr = redact(deletionTask.toBSON()),
+ "error"_attr = redact(status),
"migrationId"_attr = deletionTask.getId());
if (status == ErrorCodes::RangeDeletionAbandonedBecauseCollectionWithUUIDDoesNotExist) {
@@ -360,8 +356,8 @@ void resubmitRangeDeletionsOnStepUp(ServiceContext* serviceContext) {
.getAsync([](const Status& status) {
if (!status.isOK()) {
LOGV2(45739,
- "Error while submitting pending deletions: {status}",
- "status"_attr = status);
+ "Error while submitting pending range deletions",
+ "error"_attr = redact(status));
}
});
}
@@ -383,8 +379,8 @@ void forEachOrphanRange(OperationContext* opCtx, const NamespaceString& nss, Cal
if (!collDesc.isSharded()) {
LOGV2(22029,
- "Upgrade: skipping orphaned range enumeration for {nss}, collection is not sharded",
- "nss"_attr = nss);
+ "Upgrade: Skipping orphaned range enumeration because the collection is not sharded",
+ "namespace"_attr = nss);
return;
}
@@ -395,9 +391,9 @@ void forEachOrphanRange(OperationContext* opCtx, const NamespaceString& nss, Cal
if (!range) {
LOGV2_DEBUG(22030,
2,
- "Upgrade: Completed orphaned range enumeration for {nss} starting from "
- "{startingKey}, no orphan ranges remain",
- "nss"_attr = nss.toString(),
+ "Upgrade: Completed orphanged range enumeration; no orphaned ranges "
+ "remain",
+ "namespace"_attr = nss.toString(),
"startingKey"_attr = redact(startingKey));
return;
@@ -418,8 +414,8 @@ void submitOrphanRanges(OperationContext* opCtx, const NamespaceString& nss, con
LOGV2_DEBUG(22031,
2,
- "Upgrade: Cleaning up existing orphans for {nss} : {uuid}",
- "nss"_attr = nss,
+ "Upgrade: Cleaning up existing orphans",
+ "namespace"_attr = nss,
"uuid"_attr = uuid);
std::vector<RangeDeletionTask> deletions;
@@ -439,17 +435,17 @@ void submitOrphanRanges(OperationContext* opCtx, const NamespaceString& nss, con
for (const auto& task : deletions) {
LOGV2_DEBUG(22032,
2,
- "Upgrade: Submitting range for cleanup: {task_getRange} from {nss}",
- "task_getRange"_attr = task.getRange(),
- "nss"_attr = nss);
+ "Upgrade: Submitting chunk range for cleanup",
+ "range"_attr = redact(task.getRange().toString()),
+ "namespace"_attr = nss);
store.add(opCtx, task);
}
} catch (ExceptionFor<ErrorCodes::NamespaceNotFound>& e) {
LOGV2(22033,
- "Upgrade: Failed to cleanup orphans for {nss} because the namespace was not found: "
- "{e_what}, the collection must have been dropped",
- "nss"_attr = nss,
- "e_what"_attr = e.what());
+ "Upgrade: Failed to clean up orphans because the namespace was not found; the "
+ "collection must have been dropped",
+ "namespace"_attr = nss,
+ "error"_attr = redact(e.what()));
}
}
@@ -464,7 +460,10 @@ void submitOrphanRangesForCleanup(OperationContext* opCtx) {
for (auto collIt = catalog.begin(dbName); collIt != catalog.end(); ++collIt) {
auto uuid = collIt.uuid().get();
auto nss = catalog.lookupNSSByUUID(opCtx, uuid).get();
- LOGV2_DEBUG(22034, 2, "Upgrade: processing collection: {nss}", "nss"_attr = nss);
+ LOGV2_DEBUG(22034,
+ 2,
+ "Upgrade: Processing collection for orphaned range cleanup",
+ "namespace"_attr = nss);
submitOrphanRanges(opCtx, nss, uuid);
}
@@ -784,7 +783,7 @@ void resumeMigrationCoordinationsOnStepUp(OperationContext* opCtx) {
const auto lastOpTime = replClientInfo.getLastOp();
LOGV2_DEBUG(22038,
2,
- "Waiting for OpTime {lastOpTime} to become majority committed",
+ "Waiting for this OpTime to become majority committed",
"lastOpTime"_attr = lastOpTime);
return WaitForMajorityService::get(serviceContext).waitUntilMajority(lastOpTime);
})
@@ -806,7 +805,10 @@ void resumeMigrationCoordinationsOnStepUp(OperationContext* opCtx) {
opCtx,
query,
[&opCtx, &migrationRecoveryCount](const MigrationCoordinatorDocument& doc) {
- LOGV2_DEBUG(22039, 2, "Recovering migration {doc}", "doc"_attr = doc.toBSON());
+ LOGV2_DEBUG(22039,
+ 2,
+ "Recovering migration using a given migration coordinator document",
+ "migrationCoordinatorDoc"_attr = redact(doc.toBSON()));
migrationRecoveryCount++;
@@ -840,22 +842,27 @@ void resumeMigrationCoordinationsOnStepUp(OperationContext* opCtx) {
if (!refreshedMetadata || !(*refreshedMetadata)->isSharded() ||
!(*refreshedMetadata)->uuidMatches(doc.getCollectionUuid())) {
- LOGV2(
- 22040,
- "Even after forced refresh, filtering metadata for namespace in "
- "migration coordinator doc "
- "{doc}{refreshedMetadata_refreshedMetadata_isSharded_is_not_known_has_"
- "UUID_"
- "that_does_not_match_the_collection_UUID_in_the_coordinator_doc}. "
- "Deleting "
- "the range deletion tasks on the donor and recipient as "
- "well as the migration coordinator document on this node.",
- "doc"_attr = doc.toBSON(),
- "refreshedMetadata_refreshedMetadata_isSharded_is_not_known_has_UUID_that_does_not_match_the_collection_UUID_in_the_coordinator_doc"_attr =
- (!refreshedMetadata || !(*refreshedMetadata)->isSharded()
- ? "is not known"
- : "has UUID that does not match the collection UUID in the "
- "coordinator doc"));
+ if (!refreshedMetadata || !(*refreshedMetadata)->isSharded()) {
+ LOGV2(
+ 22040,
+ "Even after forced refresh, filtering metadata for this namespace "
+ "is not known. Deleting the range deletion tasks on the donor "
+ "and recipient as well as the migration coordinator document on "
+ "this node",
+ "migrationCoordinatorDocument"_attr = redact(doc.toBSON()));
+ } else {
+ LOGV2(
+ 46712004,
+ "Even after forced refresh, the filtering metadata has a UUID that "
+ "does not match the collection UUID in the migration coordinator "
+ "document. Deleting the range deletion tasks on the donor and "
+ "recipient as well as the migration coordinator document on this "
+ "node",
+ "migrationCoordinatorDocument"_attr = redact(doc.toBSON()),
+ "refreshedMetadataUUID"_attr =
+ (*refreshedMetadata)->getChunkManager()->getUUID(),
+ "coordinatorDocumentUUID"_attr = doc.getCollectionUuid());
+ }
// TODO (SERVER-45707): Test that range deletion tasks are eventually
// deleted even if the collection is dropped before migration coordination
@@ -884,8 +891,8 @@ void resumeMigrationCoordinationsOnStepUp(OperationContext* opCtx) {
.getAsync([migrationBlockingGuard](const Status& status) {
if (!status.isOK()) {
LOGV2(22041,
- "Failed to resume coordinating migrations on stepup {causedBy_status}",
- "causedBy_status"_attr = causedBy(status));
+ "Failed to resume coordinator migrations on stepup",
+ "error"_attr = redact(status));
}
});
}
diff --git a/src/mongo/db/s/move_primary_source_manager.cpp b/src/mongo/db/s/move_primary_source_manager.cpp
index d80f0428afb..869808b39e3 100644
--- a/src/mongo/db/s/move_primary_source_manager.cpp
+++ b/src/mongo/db/s/move_primary_source_manager.cpp
@@ -75,8 +75,9 @@ Status MovePrimarySourceManager::clone(OperationContext* opCtx) {
auto scopedGuard = makeGuard([&] { cleanupOnError(opCtx); });
LOGV2(22042,
- "Moving {dbname} primary from: {fromShard} to: {toShard}",
- "dbname"_attr = _dbname,
+ "Moving {db} primary from: {fromShard} to: {toShard}",
+ "Moving primary for database",
+ "db"_attr = _dbname,
"fromShard"_attr = _fromShard,
"toShard"_attr = _toShard);
@@ -231,8 +232,10 @@ Status MovePrimarySourceManager::commitOnConfig(OperationContext* opCtx) {
// done
LOGV2(22044,
"Error occurred while committing the movePrimary. Performing a majority write "
- "against the config server to obtain its latest optime{causedBy_commitStatus}",
- "causedBy_commitStatus"_attr = causedBy(redact(commitStatus)));
+ "against the config server to obtain its latest optime: {error}",
+ "Error occurred while committing the movePrimary. Performing a majority write "
+ "against the config server to obtain its latest optime",
+ "error"_attr = redact(commitStatus));
Status validateStatus = ShardingLogging::get(opCtx)->logChangeChecked(
opCtx,
@@ -324,9 +327,10 @@ Status MovePrimarySourceManager::cleanStaleData(OperationContext* opCtx) {
Status dropStatus = getStatusFromCommandResult(dropCollResult);
if (!dropStatus.isOK()) {
LOGV2(22045,
- "failed to drop cloned collection {coll}{causedBy_dropStatus}",
- "coll"_attr = coll,
- "causedBy_dropStatus"_attr = causedBy(redact(dropStatus)));
+ "Failed to drop cloned collection {namespace} in movePrimary: {error}",
+ "Failed to drop cloned collection in movePrimary",
+ "namespace"_attr = coll,
+ "error"_attr = redact(dropStatus));
}
}
@@ -353,9 +357,11 @@ void MovePrimarySourceManager::cleanupOnError(OperationContext* opCtx) {
BSONObjBuilder requestArgsBSON;
_requestArgs.serialize(&requestArgsBSON);
LOGV2_WARNING(22046,
- "Failed to clean up movePrimary: {requestArgsBSON_obj}due to: {ex}",
- "requestArgsBSON_obj"_attr = redact(requestArgsBSON.obj()),
- "ex"_attr = redact(ex));
+ "Failed to clean up movePrimary with request parameters {request} due to: "
+ "{error}",
+ "Failed to clean up movePrimary",
+ "request"_attr = redact(requestArgsBSON.obj()),
+ "error"_attr = redact(ex));
}
}
diff --git a/src/mongo/db/s/periodic_balancer_config_refresher.cpp b/src/mongo/db/s/periodic_balancer_config_refresher.cpp
index f6ea4a373be..8a8bfb0a7a7 100644
--- a/src/mongo/db/s/periodic_balancer_config_refresher.cpp
+++ b/src/mongo/db/s/periodic_balancer_config_refresher.cpp
@@ -61,8 +61,9 @@ PeriodicJobAnchor launchBalancerConfigRefresher(ServiceContext* serviceContext)
Status status = balancerConfig->refreshAndCheck(opCtx.get());
if (!status.isOK()) {
LOGV2(22048,
- "Failed to refresh balancer configuration{causedBy_status}",
- "causedBy_status"_attr = causedBy(status));
+ "Failed to refresh balancer configuration: {error}",
+ "Failed to refresh balancer configuration",
+ "error"_attr = redact(status));
}
},
Seconds(30));
diff --git a/src/mongo/db/s/scoped_operation_completion_sharding_actions.cpp b/src/mongo/db/s/scoped_operation_completion_sharding_actions.cpp
index aaa09ecbefb..c49f26c39fb 100644
--- a/src/mongo/db/s/scoped_operation_completion_sharding_actions.cpp
+++ b/src/mongo/db/s/scoped_operation_completion_sharding_actions.cpp
@@ -88,8 +88,10 @@ ScopedOperationCompletionShardingActions::~ScopedOperationCompletionShardingActi
_opCtx, staleInfo->getNss(), staleInfo->getVersionReceived());
if (!handleMismatchStatus.isOK())
LOGV2(22053,
- "Failed to handle stale version exception{causedBy_handleMismatchStatus}",
- "causedBy_handleMismatchStatus"_attr = causedBy(redact(handleMismatchStatus)));
+ "Failed to handle stale version exception as part of the current operation: "
+ "{error}",
+ "Failed to handle stale version exception as part of the current operation",
+ "error"_attr = redact(handleMismatchStatus));
} else if (auto staleInfo = status->extraInfo<StaleDbRoutingVersion>()) {
auto handleMismatchStatus = onDbVersionMismatchNoExcept(_opCtx,
staleInfo->getDb(),
@@ -97,8 +99,10 @@ ScopedOperationCompletionShardingActions::~ScopedOperationCompletionShardingActi
staleInfo->getVersionWanted());
if (!handleMismatchStatus.isOK())
LOGV2(22054,
- "Failed to handle database version exception{causedBy_handleMismatchStatus}",
- "causedBy_handleMismatchStatus"_attr = causedBy(redact(handleMismatchStatus)));
+ "Failed to handle database version exception as part of the current operation: "
+ "{error}",
+ "Failed to database version exception as part of the current operation",
+ "error"_attr = redact(handleMismatchStatus));
}
}
diff --git a/src/mongo/db/s/set_shard_version_command.cpp b/src/mongo/db/s/set_shard_version_command.cpp
index 3d1b4b50004..0771558dafc 100644
--- a/src/mongo/db/s/set_shard_version_command.cpp
+++ b/src/mongo/db/s/set_shard_version_command.cpp
@@ -364,14 +364,16 @@ public:
if (!status.isOK()) {
// The reload itself was interrupted or confused here
-
- errmsg = str::stream()
- << "could not refresh metadata for " << nss.ns()
- << " with requested shard version " << requestedVersion.toString()
- << ", stored shard version is " << currVersion.toString()
- << causedBy(redact(status));
-
- LOGV2_WARNING(22058, "{errmsg}", "errmsg"_attr = errmsg);
+ LOGV2_WARNING(
+ 22058,
+ "Could not refresh metadata for the namespace {namespace} with the requested "
+ "shard version {requestedShardVersion}; the current shard version is "
+ "{currentShardVersion}: {error}",
+ "Could not refresh metadata",
+ "namespace"_attr = nss.ns(),
+ "requestedShardVersion"_attr = requestedVersion,
+ "currentShardVersion"_attr = currVersion,
+ "error"_attr = redact(status));
result.append("ns", nss.ns());
result.append("code", status.code());
@@ -383,14 +385,19 @@ public:
} else if (!requestedVersion.isWriteCompatibleWith(currVersion)) {
// We reloaded a version that doesn't match the version mongos was trying to
// set.
- errmsg = str::stream() << "requested shard version differs from"
- << " config shard version for " << nss.ns()
- << ", requested version is " << requestedVersion.toString()
- << " but found version " << currVersion.toString();
-
static Occasionally sampler;
if (sampler.tick()) {
- LOGV2_WARNING(22059, "{errmsg}", "errmsg"_attr = errmsg);
+ LOGV2_WARNING(
+ 22059,
+ "Requested shard version differs from the authoritative (current) shard "
+ "version for the namespace {namespace}; the requested version is "
+ "{requestedShardVersion}, but the current version is "
+ "{currentShardVersion}",
+ "Requested shard version differs from the authoritative (current) shard "
+ "version for this namespace",
+ "namespace"_attr = nss.ns(),
+ "requestedShardVersion"_attr = requestedVersion,
+ "currentShardVersion"_attr = currVersion);
}
// WARNING: the exact fields below are important for compatibility with mongos