summaryrefslogtreecommitdiff
path: root/src/mongo/db/s/migration_destination_manager.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/db/s/migration_destination_manager.cpp')
-rw-r--r--src/mongo/db/s/migration_destination_manager.cpp71
1 files changed, 47 insertions, 24 deletions
diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp
index a11bb49b8a4..54f4a01e87b 100644
--- a/src/mongo/db/s/migration_destination_manager.cpp
+++ b/src/mongo/db/s/migration_destination_manager.cpp
@@ -63,6 +63,7 @@
#include "mongo/db/session_catalog_mongod.h"
#include "mongo/db/storage/remove_saver.h"
#include "mongo/db/transaction_participant.h"
+#include "mongo/logv2/log.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/grid.h"
@@ -240,7 +241,7 @@ void MigrationDestinationManager::setState(State newState) {
}
void MigrationDestinationManager::_setStateFail(StringData msg) {
- log() << msg;
+ LOGV2(21998, "{msg}", "msg"_attr = msg);
{
stdx::lock_guard<Latch> sl(_mutex);
_errmsg = msg.toString();
@@ -252,7 +253,7 @@ void MigrationDestinationManager::_setStateFail(StringData msg) {
}
void MigrationDestinationManager::_setStateFailWarn(StringData msg) {
- warning() << msg;
+ LOGV2_WARNING(22010, "{msg}", "msg"_attr = msg);
{
stdx::lock_guard<Latch> sl(_mutex);
_errmsg = msg.toString();
@@ -436,7 +437,9 @@ repl::OpTime MigrationDestinationManager::cloneDocumentsFromDonor(
} catch (...) {
stdx::lock_guard<Client> lk(*opCtx->getClient());
opCtx->getServiceContext()->killOperation(lk, opCtx, ErrorCodes::Error(51008));
- log() << "Batch insertion failed " << causedBy(redact(exceptionToStatus()));
+ LOGV2(21999,
+ "Batch insertion failed {causedBy_exceptionToStatus}",
+ "causedBy_exceptionToStatus"_attr = causedBy(redact(exceptionToStatus())));
}
}};
auto inserterThreadJoinGuard = makeGuard([&] {
@@ -813,9 +816,15 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* outerOpCtx) {
invariant(!_min.isEmpty());
invariant(!_max.isEmpty());
- log() << "Starting receiving end of migration of chunk " << redact(_min) << " -> "
- << redact(_max) << " for collection " << _nss.ns() << " from " << _fromShard
- << " at epoch " << _epoch.toString() << " with session id " << *_sessionId;
+ LOGV2(22000,
+ "Starting receiving end of migration of chunk {min} -> {max} for collection {nss_ns} "
+ "from {fromShard} at epoch {epoch} with session id {sessionId}",
+ "min"_attr = redact(_min),
+ "max"_attr = redact(_max),
+ "nss_ns"_attr = _nss.ns(),
+ "fromShard"_attr = _fromShard,
+ "epoch"_attr = _epoch.toString(),
+ "sessionId"_attr = *_sessionId);
MoveTimingHelper timing(
outerOpCtx, "to", _nss.ns(), _min, _max, 6 /* steps */, &_errmsg, ShardId(), ShardId());
@@ -823,7 +832,7 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* outerOpCtx) {
const auto initialState = getState();
if (initialState == ABORT) {
- error() << "Migration abort requested before it started";
+ LOGV2_ERROR(22013, "Migration abort requested before it started");
return;
}
@@ -843,9 +852,11 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* outerOpCtx) {
if (_enableResumableRangeDeleter) {
while (migrationutil::checkForConflictingDeletions(
outerOpCtx, range, donorCollectionOptionsAndIndexes.uuid)) {
- LOG(0) << "Migration paused because range overlaps with a "
- "range that is scheduled for deletion: collection: "
- << _nss.ns() << " range: " << redact(range.toString());
+ LOGV2(22001,
+ "Migration paused because range overlaps with a "
+ "range that is scheduled for deletion: collection: {nss_ns} range: {range}",
+ "nss_ns"_attr = _nss.ns(),
+ "range"_attr = redact(range.toString()));
auto status = CollectionShardingRuntime::waitForClean(
outerOpCtx, _nss, donorCollectionOptionsAndIndexes.uuid, range);
@@ -984,8 +995,9 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* outerOpCtx) {
repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp(),
_writeConcern);
if (replStatus.status.code() == ErrorCodes::WriteConcernFailed) {
- warning() << "secondaryThrottle on, but doc insert timed out; "
- "continuing";
+ LOGV2_WARNING(22011,
+ "secondaryThrottle on, but doc insert timed out; "
+ "continuing");
} else {
uassertStatusOK(replStatus.status);
}
@@ -1059,7 +1071,8 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* outerOpCtx) {
opCtx->checkForInterrupt();
if (getState() == ABORT) {
- log() << "Migration aborted while waiting for replication at catch up stage";
+ LOGV2(22002,
+ "Migration aborted while waiting for replication at catch up stage");
return;
}
@@ -1067,7 +1080,7 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* outerOpCtx) {
break;
if (i > 100) {
- log() << "secondaries having hard time keeping up with migrate";
+ LOGV2(22003, "secondaries having hard time keeping up with migrate");
}
sleepmillis(20);
@@ -1087,14 +1100,14 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* outerOpCtx) {
// Pause to wait for replication. This will prevent us from going into critical section
// until we're ready.
- log() << "Waiting for replication to catch up before entering critical section";
+ LOGV2(22004, "Waiting for replication to catch up before entering critical section");
auto awaitReplicationResult = repl::ReplicationCoordinator::get(opCtx)->awaitReplication(
opCtx, lastOpApplied, _writeConcern);
uassertStatusOKWithContext(awaitReplicationResult.status,
awaitReplicationResult.status.codeString());
- log() << "Chunk data replicated successfully.";
+ LOGV2(22005, "Chunk data replicated successfully.");
}
{
@@ -1131,7 +1144,7 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* outerOpCtx) {
}
if (getState() == ABORT) {
- log() << "Migration aborted while transferring mods";
+ LOGV2(22006, "Migration aborted while transferring mods");
return;
}
@@ -1254,7 +1267,7 @@ bool MigrationDestinationManager::_applyMigrateOp(OperationContext* opCtx,
const std::string errMsg = str::stream()
<< "cannot migrate chunk, local document " << redact(localDoc)
<< " has same _id as reloaded remote document " << redact(updatedDoc);
- warning() << errMsg;
+ LOGV2_WARNING(22012, "{errMsg}", "errMsg"_attr = errMsg);
// Exception will abort migration cleanly
uasserted(16977, errMsg);
@@ -1277,14 +1290,22 @@ bool MigrationDestinationManager::_flushPendingWrites(OperationContext* opCtx,
repl::OpTime op(lastOpApplied);
static Occasionally sampler;
if (sampler.tick()) {
- log() << "migrate commit waiting for a majority of slaves for '" << _nss.ns() << "' "
- << redact(_min) << " -> " << redact(_max) << " waiting for: " << op;
+ LOGV2(22007,
+ "migrate commit waiting for a majority of slaves for '{nss_ns}' {min} -> {max} "
+ "waiting for: {op}",
+ "nss_ns"_attr = _nss.ns(),
+ "min"_attr = redact(_min),
+ "max"_attr = redact(_max),
+ "op"_attr = op);
}
return false;
}
- log() << "migrate commit succeeded flushing to secondaries for '" << _nss.ns() << "' "
- << redact(_min) << " -> " << redact(_max);
+ LOGV2(22008,
+ "migrate commit succeeded flushing to secondaries for '{nss_ns}' {min} -> {max}",
+ "nss_ns"_attr = _nss.ns(),
+ "min"_attr = redact(_min),
+ "max"_attr = redact(_max));
return true;
}
@@ -1330,8 +1351,10 @@ void MigrationDestinationManager::_forgetPending(OperationContext* opCtx, ChunkR
// checking this here is that in the future we shouldn't have this problem.
if (!optMetadata || !(*optMetadata)->isSharded() ||
(*optMetadata)->getCollVersion().epoch() != _epoch) {
- LOG(0) << "No need to forget pending chunk " << redact(range.toString())
- << " because the epoch for " << _nss.ns() << " changed";
+ LOGV2(22009,
+ "No need to forget pending chunk {range} because the epoch for {nss_ns} changed",
+ "range"_attr = redact(range.toString()),
+ "nss_ns"_attr = _nss.ns());
return;
}