summaryrefslogtreecommitdiff
path: root/src/mongo/db/repl/initial_syncer.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/db/repl/initial_syncer.cpp')
-rw-r--r--src/mongo/db/repl/initial_syncer.cpp222
1 files changed, 149 insertions, 73 deletions
diff --git a/src/mongo/db/repl/initial_syncer.cpp b/src/mongo/db/repl/initial_syncer.cpp
index 13f2f099ded..2d33f06f66e 100644
--- a/src/mongo/db/repl/initial_syncer.cpp
+++ b/src/mongo/db/repl/initial_syncer.cpp
@@ -63,6 +63,7 @@
#include "mongo/db/session_txn_record_gen.h"
#include "mongo/executor/task_executor.h"
#include "mongo/executor/thread_pool_task_executor.h"
+#include "mongo/logv2/log.h"
#include "mongo/rpc/metadata/repl_set_metadata.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/destructor_guard.h"
@@ -174,13 +175,13 @@ void pauseAtInitialSyncFuzzerSyncronizationPoints(std::string msg) {
// Set and unset by the InitialSyncTest fixture to cause initial sync to pause so that the
// Initial Sync Fuzzer can run commands on the sync source.
if (MONGO_unlikely(initialSyncFuzzerSynchronizationPoint1.shouldFail())) {
- log() << msg;
- log() << "initialSyncFuzzerSynchronizationPoint1 fail point enabled.";
+ LOGV2(21158, "{msg}", "msg"_attr = msg);
+ LOGV2(21159, "initialSyncFuzzerSynchronizationPoint1 fail point enabled.");
initialSyncFuzzerSynchronizationPoint1.pauseWhileSet();
}
if (MONGO_unlikely(initialSyncFuzzerSynchronizationPoint2.shouldFail())) {
- log() << "initialSyncFuzzerSynchronizationPoint2 fail point enabled.";
+ LOGV2(21160, "initialSyncFuzzerSynchronizationPoint2 fail point enabled.");
initialSyncFuzzerSynchronizationPoint2.pauseWhileSet();
}
}
@@ -446,7 +447,7 @@ BSONObj InitialSyncer::_getInitialSyncProgress_inlock() const {
}
return bob.obj();
} catch (const DBException& e) {
- log() << "Error creating initial sync progress object: " << e.toString();
+ LOGV2(21161, "Error creating initial sync progress object: {e}", "e"_attr = e.toString());
}
BSONObjBuilder bob;
_appendInitialSyncProgressMinimal_inlock(&bob);
@@ -495,7 +496,7 @@ void InitialSyncer::_setUp_inlock(OperationContext* opCtx, std::uint32_t initial
_storage->setInitialDataTimestamp(serviceCtx, Timestamp::kAllowUnstableCheckpointsSentinel);
_storage->setStableTimestamp(serviceCtx, Timestamp::min());
- LOG(1) << "Creating oplogBuffer.";
+ LOGV2_DEBUG(21162, 1, "Creating oplogBuffer.");
_oplogBuffer = _dataReplicatorExternalState->makeInitialSyncOplogBuffer(opCtx);
_oplogBuffer->startup(opCtx);
@@ -542,8 +543,11 @@ void InitialSyncer::_tearDown_inlock(OperationContext* opCtx,
invariant(currentLastAppliedOpTime == lastAppliedOpTime);
}
- log() << "initial sync done; took "
- << duration_cast<Seconds>(_stats.initialSyncEnd - _stats.initialSyncStart) << ".";
+ LOGV2(21163,
+ "initial sync done; took "
+ "{duration_cast_Seconds_stats_initialSyncEnd_stats_initialSyncStart}.",
+ "duration_cast_Seconds_stats_initialSyncEnd_stats_initialSyncStart"_attr =
+ duration_cast<Seconds>(_stats.initialSyncEnd - _stats.initialSyncStart));
initialSyncCompletes.increment();
}
@@ -560,8 +564,10 @@ void InitialSyncer::_startInitialSyncAttemptCallback(
return;
}
- log() << "Starting initial sync (attempt " << (initialSyncAttempt + 1) << " of "
- << initialSyncMaxAttempts << ")";
+ LOGV2(21164,
+ "Starting initial sync (attempt {initialSyncAttempt_1} of {initialSyncMaxAttempts})",
+ "initialSyncAttempt_1"_attr = (initialSyncAttempt + 1),
+ "initialSyncMaxAttempts"_attr = initialSyncMaxAttempts);
// This completion guard invokes _finishInitialSyncAttempt on destruction.
auto cancelRemainingWorkInLock = [this]() { _cancelRemainingWork_inlock(); };
@@ -577,15 +583,18 @@ void InitialSyncer::_startInitialSyncAttemptCallback(
_oplogApplier = {};
- LOG(2) << "Resetting sync source so a new one can be chosen for this initial sync attempt.";
+ LOGV2_DEBUG(21165,
+ 2,
+ "Resetting sync source so a new one can be chosen for this initial sync attempt.");
_syncSource = HostAndPort();
- LOG(2) << "Resetting all optimes before starting this initial sync attempt.";
+ LOGV2_DEBUG(21166, 2, "Resetting all optimes before starting this initial sync attempt.");
_opts.resetOptimes();
_lastApplied = {OpTime(), Date_t()};
_lastFetched = {};
- LOG(2) << "Resetting the oldest timestamp before starting this initial sync attempt.";
+ LOGV2_DEBUG(
+ 21167, 2, "Resetting the oldest timestamp before starting this initial sync attempt.");
auto storageEngine = getGlobalServiceContext()->getStorageEngine();
if (storageEngine) {
// Set the oldestTimestamp to one because WiredTiger does not allow us to set it to zero
@@ -595,9 +604,11 @@ void InitialSyncer::_startInitialSyncAttemptCallback(
storageEngine->setOldestTimestamp(kTimestampOne);
}
- LOG(2) << "Resetting feature compatibility version to last-stable. If the sync source is in "
- "latest feature compatibility version, we will find out when we clone the "
- "server configuration collection (admin.system.version).";
+ LOGV2_DEBUG(21168,
+ 2,
+ "Resetting feature compatibility version to last-stable. If the sync source is in "
+ "latest feature compatibility version, we will find out when we clone the "
+ "server configuration collection (admin.system.version).");
serverGlobalParams.featureCompatibility.reset();
// Clear the oplog buffer.
@@ -656,9 +667,17 @@ void InitialSyncer::_chooseSyncSourceCallback(
}
auto when = _exec->now() + _opts.syncSourceRetryWait;
- LOG(1) << "Error getting sync source: '" << syncSource.getStatus() << "', trying again in "
- << _opts.syncSourceRetryWait << " at " << when.toString() << ". Attempt "
- << (chooseSyncSourceAttempt + 1) << " of " << numInitialSyncConnectAttempts.load();
+ LOGV2_DEBUG(21169,
+ 1,
+ "Error getting sync source: '{syncSource_getStatus}', trying again in "
+ "{opts_syncSourceRetryWait} at {when}. Attempt {chooseSyncSourceAttempt_1} of "
+ "{numInitialSyncConnectAttempts_load}",
+ "syncSource_getStatus"_attr = syncSource.getStatus(),
+ "opts_syncSourceRetryWait"_attr = _opts.syncSourceRetryWait,
+ "when"_attr = when.toString(),
+ "chooseSyncSourceAttempt_1"_attr = (chooseSyncSourceAttempt + 1),
+ "numInitialSyncConnectAttempts_load"_attr =
+ numInitialSyncConnectAttempts.load());
auto status = _scheduleWorkAtAndSaveHandle_inlock(
when,
[=](const executor::TaskExecutor::CallbackArgs& args) {
@@ -678,8 +697,9 @@ void InitialSyncer::_chooseSyncSourceCallback(
if (MONGO_unlikely(initialSyncHangBeforeCreatingOplog.shouldFail())) {
// This log output is used in js tests so please leave it.
- log() << "initial sync - initialSyncHangBeforeCreatingOplog fail point "
- "enabled. Blocking until fail point is disabled.";
+ LOGV2(21170,
+ "initial sync - initialSyncHangBeforeCreatingOplog fail point "
+ "enabled. Blocking until fail point is disabled.");
lock.unlock();
while (MONGO_unlikely(initialSyncHangBeforeCreatingOplog.shouldFail()) &&
!_isShuttingDown()) {
@@ -714,8 +734,11 @@ void InitialSyncer::_chooseSyncSourceCallback(
Status InitialSyncer::_truncateOplogAndDropReplicatedDatabases() {
// truncate oplog; drop user databases.
- LOG(1) << "About to truncate the oplog, if it exists, ns:" << _opts.localOplogNS
- << ", and drop all user databases (so that we can clone them).";
+ LOGV2_DEBUG(21171,
+ 1,
+ "About to truncate the oplog, if it exists, ns:{opts_localOplogNS}, and drop all "
+ "user databases (so that we can clone them).",
+ "opts_localOplogNS"_attr = _opts.localOplogNS);
auto opCtx = makeOpCtx();
@@ -723,13 +746,21 @@ Status InitialSyncer::_truncateOplogAndDropReplicatedDatabases() {
UnreplicatedWritesBlock unreplicatedWritesBlock(opCtx.get());
// 1.) Truncate the oplog.
- LOG(2) << "Truncating the existing oplog: " << _opts.localOplogNS;
+ LOGV2_DEBUG(21172,
+ 2,
+ "Truncating the existing oplog: {opts_localOplogNS}",
+ "opts_localOplogNS"_attr = _opts.localOplogNS);
Timer timer;
auto status = _storage->truncateCollection(opCtx.get(), _opts.localOplogNS);
- log() << "Initial syncer oplog truncation finished in: " << timer.millis() << "ms";
+ LOGV2(21173,
+ "Initial syncer oplog truncation finished in: {timer_millis}ms",
+ "timer_millis"_attr = timer.millis());
if (!status.isOK()) {
// 1a.) Create the oplog.
- LOG(2) << "Creating the oplog: " << _opts.localOplogNS;
+ LOGV2_DEBUG(21174,
+ 2,
+ "Creating the oplog: {opts_localOplogNS}",
+ "opts_localOplogNS"_attr = _opts.localOplogNS);
status = _storage->createOplog(opCtx.get(), _opts.localOplogNS);
if (!status.isOK()) {
return status;
@@ -737,7 +768,7 @@ Status InitialSyncer::_truncateOplogAndDropReplicatedDatabases() {
}
// 2.) Drop user databases.
- LOG(2) << "Dropping user databases";
+ LOGV2_DEBUG(21175, 2, "Dropping user databases");
return _storage->dropReplicatedDatabases(opCtx.get());
}
@@ -895,7 +926,7 @@ void InitialSyncer::_getBeginFetchingOpTimeCallback(
pauseAtInitialSyncFuzzerSyncronizationPoints(logMsg);
if (MONGO_unlikely(initialSyncHangAfterGettingBeginFetchingTimestamp.shouldFail())) {
- log() << "initialSyncHangAfterGettingBeginFetchingTimestamp fail point enabled.";
+ LOGV2(21176, "initialSyncHangAfterGettingBeginFetchingTimestamp fail point enabled.");
initialSyncHangAfterGettingBeginFetchingTimestamp.pauseWhileSet();
}
@@ -1064,10 +1095,16 @@ void InitialSyncer::_fcvFetcherCallback(const StatusWith<Fetcher::QueryResponse>
<< _initialSyncState->beginFetchingTimestamp.toBSON());
invariant(!result.getValue().documents.empty());
- LOG(2) << "Setting begin applying timestamp to " << _initialSyncState->beginApplyingTimestamp
- << " using last oplog entry: " << redact(result.getValue().documents.front())
- << ", ns: " << _opts.localOplogNS << " and the begin fetching timestamp to "
- << _initialSyncState->beginFetchingTimestamp;
+ LOGV2_DEBUG(
+ 21177,
+ 2,
+ "Setting begin applying timestamp to {initialSyncState_beginApplyingTimestamp} using last "
+ "oplog entry: {result_getValue_documents_front}, ns: {opts_localOplogNS} and the begin "
+ "fetching timestamp to {initialSyncState_beginFetchingTimestamp}",
+ "initialSyncState_beginApplyingTimestamp"_attr = _initialSyncState->beginApplyingTimestamp,
+ "result_getValue_documents_front"_attr = redact(result.getValue().documents.front()),
+ "opts_localOplogNS"_attr = _opts.localOplogNS,
+ "initialSyncState_beginFetchingTimestamp"_attr = _initialSyncState->beginFetchingTimestamp);
const auto configResult = _dataReplicatorExternalState->getCurrentConfig();
status = configResult.getStatus();
@@ -1098,7 +1135,10 @@ void InitialSyncer::_fcvFetcherCallback(const StatusWith<Fetcher::QueryResponse>
initialSyncOplogFetcherBatchSize,
OplogFetcher::StartingPoint::kEnqueueFirstDoc);
- LOG(2) << "Starting OplogFetcher: " << _oplogFetcher->toString();
+ LOGV2_DEBUG(21178,
+ 2,
+ "Starting OplogFetcher: {oplogFetcher}",
+ "oplogFetcher"_attr = _oplogFetcher->toString());
// _startupComponent_inlock is shutdown-aware.
status = _startupComponent_inlock(_oplogFetcher);
@@ -1113,8 +1153,9 @@ void InitialSyncer::_fcvFetcherCallback(const StatusWith<Fetcher::QueryResponse>
// This could have been done with a scheduleWorkAt but this is used only by JS tests where
// we run with multiple threads so it's fine to spin on this thread.
// This log output is used in js tests so please leave it.
- log() << "initial sync - initialSyncHangBeforeCopyingDatabases fail point "
- "enabled. Blocking until fail point is disabled.";
+ LOGV2(21179,
+ "initial sync - initialSyncHangBeforeCopyingDatabases fail point "
+ "enabled. Blocking until fail point is disabled.");
while (MONGO_unlikely(initialSyncHangBeforeCopyingDatabases.shouldFail()) &&
!_isShuttingDown()) {
mongo::sleepsecs(1);
@@ -1122,7 +1163,11 @@ void InitialSyncer::_fcvFetcherCallback(const StatusWith<Fetcher::QueryResponse>
lock.lock();
}
- LOG(2) << "Starting AllDatabaseCloner: " << _initialSyncState->allDatabaseCloner->toString();
+ LOGV2_DEBUG(21180,
+ 2,
+ "Starting AllDatabaseCloner: {initialSyncState_allDatabaseCloner}",
+ "initialSyncState_allDatabaseCloner"_attr =
+ _initialSyncState->allDatabaseCloner->toString());
auto [startClonerFuture, startCloner] =
_initialSyncState->allDatabaseCloner->runOnExecutorEvent(_clonerExec);
@@ -1166,8 +1211,11 @@ void InitialSyncer::_fcvFetcherCallback(const StatusWith<Fetcher::QueryResponse>
void InitialSyncer::_oplogFetcherCallback(const Status& oplogFetcherFinishStatus,
std::shared_ptr<OnCompletionGuard> onCompletionGuard) {
stdx::lock_guard<Latch> lock(_mutex);
- log() << "Finished fetching oplog during initial sync: " << redact(oplogFetcherFinishStatus)
- << ". Last fetched optime: " << _lastFetched.toString();
+ LOGV2(21181,
+ "Finished fetching oplog during initial sync: {oplogFetcherFinishStatus}. Last fetched "
+ "optime: {lastFetched}",
+ "oplogFetcherFinishStatus"_attr = redact(oplogFetcherFinishStatus),
+ "lastFetched"_attr = _lastFetched.toString());
auto status = _checkForShutdownAndConvertStatus_inlock(
oplogFetcherFinishStatus, "error fetching oplog during initial sync");
@@ -1182,8 +1230,9 @@ void InitialSyncer::_oplogFetcherCallback(const Status& oplogFetcherFinishStatus
// an OK status is when the 'stopReplProducer' fail point is enabled, which causes the
// OplogFetcher to ignore the current sync source response and return early.
if (status.isOK()) {
- log() << "Finished fetching oplog fetching early. Last fetched optime: "
- << _lastFetched.toString();
+ LOGV2(21182,
+ "Finished fetching oplog fetching early. Last fetched optime: {lastFetched}",
+ "lastFetched"_attr = _lastFetched.toString());
return;
}
@@ -1199,16 +1248,18 @@ void InitialSyncer::_oplogFetcherCallback(const Status& oplogFetcherFinishStatus
void InitialSyncer::_allDatabaseClonerCallback(
const Status& databaseClonerFinishStatus,
std::shared_ptr<OnCompletionGuard> onCompletionGuard) {
- log() << "Finished cloning data: " << redact(databaseClonerFinishStatus)
- << ". Beginning oplog replay.";
+ LOGV2(21183,
+ "Finished cloning data: {databaseClonerFinishStatus}. Beginning oplog replay.",
+ "databaseClonerFinishStatus"_attr = redact(databaseClonerFinishStatus));
_client->shutdownAndDisallowReconnect();
if (MONGO_unlikely(initialSyncHangAfterDataCloning.shouldFail())) {
// This could have been done with a scheduleWorkAt but this is used only by JS tests where
// we run with multiple threads so it's fine to spin on this thread.
// This log output is used in js tests so please leave it.
- log() << "initial sync - initialSyncHangAfterDataCloning fail point "
- "enabled. Blocking until fail point is disabled.";
+ LOGV2(21184,
+ "initial sync - initialSyncHangAfterDataCloning fail point "
+ "enabled. Blocking until fail point is disabled.");
while (MONGO_unlikely(initialSyncHangAfterDataCloning.shouldFail()) && !_isShuttingDown()) {
mongo::sleepsecs(1);
}
@@ -1309,7 +1360,10 @@ void InitialSyncer::_lastOplogEntryFetcherCallbackForStopTimestamp(
const auto& documents = result.getValue().documents;
invariant(!documents.empty());
const BSONObj oplogSeedDoc = documents.front();
- LOG(2) << "Inserting oplog seed document: " << oplogSeedDoc;
+ LOGV2_DEBUG(21185,
+ 2,
+ "Inserting oplog seed document: {oplogSeedDoc}",
+ "oplogSeedDoc"_attr = oplogSeedDoc);
auto opCtx = makeOpCtx();
// StorageInterface::insertDocument() has to be called outside the lock because we may
@@ -1333,8 +1387,9 @@ void InitialSyncer::_lastOplogEntryFetcherCallbackForStopTimestamp(
stdx::lock_guard<Latch> lock(_mutex);
_lastApplied = resultOpTimeAndWallTime;
- log() << "No need to apply operations. (currently at "
- << _initialSyncState->stopTimestamp.toBSON() << ")";
+ LOGV2(21186,
+ "No need to apply operations. (currently at {initialSyncState_stopTimestamp})",
+ "initialSyncState_stopTimestamp"_attr = _initialSyncState->stopTimestamp.toBSON());
// This sets the error in 'onCompletionGuard' and shuts down the OplogFetcher on error.
_scheduleRollbackCheckerCheckForRollback_inlock(lock, onCompletionGuard);
@@ -1353,7 +1408,9 @@ void InitialSyncer::_getNextApplierBatchCallback(
auto batchResult = _getNextApplierBatch_inlock();
if (!batchResult.isOK()) {
- warning() << "Failure creating next apply batch: " << redact(batchResult.getStatus());
+ LOGV2_WARNING(21196,
+ "Failure creating next apply batch: {batchResult_getStatus}",
+ "batchResult_getStatus"_attr = redact(batchResult.getStatus()));
onCompletionGuard->setResultAndCancelRemainingWork_inlock(lock, batchResult.getStatus());
return;
}
@@ -1364,8 +1421,9 @@ void InitialSyncer::_getNextApplierBatchCallback(
pauseAtInitialSyncFuzzerSyncronizationPoints(logMsg);
if (MONGO_unlikely(failInitialSyncBeforeApplyingBatch.shouldFail())) {
- log() << "initial sync - failInitialSyncBeforeApplyingBatch fail point enabled. Pausing"
- << "until fail point is disabled, then will fail initial sync.";
+ LOGV2(21187,
+ "initial sync - failInitialSyncBeforeApplyingBatch fail point enabled. Pausinguntil "
+ "fail point is disabled, then will fail initial sync.");
failInitialSyncBeforeApplyingBatch.pauseWhileSet();
status = Status(ErrorCodes::CallbackCanceled,
"failInitialSyncBeforeApplyingBatch fail point enabled");
@@ -1409,7 +1467,7 @@ void InitialSyncer::_getNextApplierBatchCallback(
"in the oplog buffer. Aborting this initial sync attempt. Last applied: "
<< _lastApplied.opTime.toString() << ". Last fetched: " << _lastFetched.toString()
<< ". Number of operations applied: " << _initialSyncState->appliedOps;
- log() << msg;
+ LOGV2(21188, "{msg}", "msg"_attr = msg);
status = Status(ErrorCodes::RemoteResultsUnavailable, msg);
onCompletionGuard->setResultAndCancelRemainingWork_inlock(lock, status);
return;
@@ -1441,12 +1499,13 @@ void InitialSyncer::_multiApplierCallback(const Status& multiApplierStatus,
// Set to cause initial sync to fassert instead of restart if applying a batch fails, so that
// tests can be robust to network errors but not oplog idempotency errors.
if (MONGO_unlikely(initialSyncFassertIfApplyingBatchFails.shouldFail())) {
- log() << "initialSyncFassertIfApplyingBatchFails fail point enabled.";
+ LOGV2(21189, "initialSyncFassertIfApplyingBatchFails fail point enabled.");
fassert(31210, status);
}
if (!status.isOK()) {
- error() << "Failed to apply batch due to '" << redact(status) << "'";
+ LOGV2_ERROR(
+ 21199, "Failed to apply batch due to '{status}'", "status"_attr = redact(status));
onCompletionGuard->setResultAndCancelRemainingWork_inlock(lock, status);
return;
}
@@ -1475,7 +1534,10 @@ void InitialSyncer::_rollbackCheckerCheckForRollbackCallback(
auto status = _checkForShutdownAndConvertStatus_inlock(result.getStatus(),
"error while getting last rollback ID");
if (_shouldRetryError(lock, status)) {
- LOG(1) << "Retrying rollback checker because of network error " << status;
+ LOGV2_DEBUG(21190,
+ 1,
+ "Retrying rollback checker because of network error {status}",
+ "status"_attr = status);
_scheduleRollbackCheckerCheckForRollback_inlock(lock, onCompletionGuard);
return;
}
@@ -1519,17 +1581,20 @@ void InitialSyncer::_finishInitialSyncAttempt(const StatusWith<OpTimeAndWallTime
auto scheduleResult = _exec->scheduleWork(
[=](const mongo::executor::TaskExecutor::CallbackArgs&) { _finishCallback(result); });
if (!scheduleResult.isOK()) {
- warning() << "Unable to schedule initial syncer completion task due to "
- << redact(scheduleResult.getStatus())
- << ". Running callback on current thread.";
+ LOGV2_WARNING(21197,
+ "Unable to schedule initial syncer completion task due to "
+ "{scheduleResult_getStatus}. Running callback on current thread.",
+ "scheduleResult_getStatus"_attr = redact(scheduleResult.getStatus()));
_finishCallback(result);
}
});
- log() << "Initial sync attempt finishing up.";
+ LOGV2(21191, "Initial sync attempt finishing up.");
stdx::lock_guard<Latch> lock(_mutex);
- log() << "Initial Sync Attempt Statistics: " << redact(_getInitialSyncProgress_inlock());
+ LOGV2(21192,
+ "Initial Sync Attempt Statistics: {getInitialSyncProgress_inlock}",
+ "getInitialSyncProgress_inlock"_attr = redact(_getInitialSyncProgress_inlock()));
auto runTime = _initialSyncState ? _initialSyncState->timer.millis() : 0;
int rollBackId = -1;
@@ -1552,7 +1617,7 @@ void InitialSyncer::_finishInitialSyncAttempt(const StatusWith<OpTimeAndWallTime
totalTimeUnreachableMillis});
if (MONGO_unlikely(failAndHangInitialSync.shouldFail())) {
- log() << "failAndHangInitialSync fail point enabled.";
+ LOGV2(21193, "failAndHangInitialSync fail point enabled.");
failAndHangInitialSync.pauseWhileSet();
result = Status(ErrorCodes::InternalError, "failAndHangInitialSync fail point enabled");
}
@@ -1570,15 +1635,19 @@ void InitialSyncer::_finishInitialSyncAttempt(const StatusWith<OpTimeAndWallTime
// startup.
initialSyncFailedAttempts.increment();
- error() << "Initial sync attempt failed -- attempts left: "
- << (_stats.maxFailedInitialSyncAttempts - _stats.failedInitialSyncAttempts)
- << " cause: " << redact(result.getStatus());
+ LOGV2_ERROR(21200,
+ "Initial sync attempt failed -- attempts left: "
+ "{stats_maxFailedInitialSyncAttempts_stats_failedInitialSyncAttempts} cause: "
+ "{result_getStatus}",
+ "stats_maxFailedInitialSyncAttempts_stats_failedInitialSyncAttempts"_attr =
+ (_stats.maxFailedInitialSyncAttempts - _stats.failedInitialSyncAttempts),
+ "result_getStatus"_attr = redact(result.getStatus()));
// Check if need to do more retries.
if (_stats.failedInitialSyncAttempts >= _stats.maxFailedInitialSyncAttempts) {
const std::string err =
"The maximum number of retries have been exhausted for initial sync.";
- severe() << err;
+ LOGV2_FATAL(21202, "{err}", "err"_attr = err);
initialSyncFailures.increment();
@@ -1626,8 +1695,9 @@ void InitialSyncer::_finishCallback(StatusWith<OpTimeAndWallTime> lastApplied) {
if (MONGO_unlikely(initialSyncHangBeforeFinish.shouldFail())) {
// This log output is used in js tests so please leave it.
- log() << "initial sync - initialSyncHangBeforeFinish fail point "
- "enabled. Blocking until fail point is disabled.";
+ LOGV2(21194,
+ "initial sync - initialSyncHangBeforeFinish fail point "
+ "enabled. Blocking until fail point is disabled.");
while (MONGO_unlikely(initialSyncHangBeforeFinish.shouldFail()) && !_isShuttingDown()) {
mongo::sleepsecs(1);
}
@@ -1637,8 +1707,9 @@ void InitialSyncer::_finishCallback(StatusWith<OpTimeAndWallTime> lastApplied) {
try {
onCompletion(lastApplied);
} catch (...) {
- warning() << "initial syncer finish callback threw exception: "
- << redact(exceptionToStatus());
+ LOGV2_WARNING(21198,
+ "initial syncer finish callback threw exception: {exceptionToStatus}",
+ "exceptionToStatus"_attr = redact(exceptionToStatus()));
}
// Destroy the remaining reference to the completion callback before we transition the state to
@@ -1704,7 +1775,7 @@ void InitialSyncer::_checkApplierProgressAndScheduleGetNextApplierBatch_inlock(
<< "Possible rollback on sync source " << _syncSource.toString() << ". Currently at "
<< _initialSyncState->stopTimestamp.toBSON() << ". Started at "
<< _initialSyncState->beginApplyingTimestamp.toBSON();
- error() << msg;
+ LOGV2_ERROR(21201, "{msg}", "msg"_attr = msg);
onCompletionGuard->setResultAndCancelRemainingWork_inlock(
lock, Status(ErrorCodes::OplogOutOfOrder, msg));
return;
@@ -1713,11 +1784,16 @@ void InitialSyncer::_checkApplierProgressAndScheduleGetNextApplierBatch_inlock(
if (_lastApplied.opTime.isNull()) {
// Check if any ops occurred while cloning or any ops need to be fetched.
invariant(_initialSyncState->beginFetchingTimestamp < _initialSyncState->stopTimestamp);
- log() << "Writing to the oplog and applying operations until "
- << _initialSyncState->stopTimestamp.toBSON()
- << " before initial sync can complete. (started fetching at "
- << _initialSyncState->beginFetchingTimestamp.toBSON() << " and applying at "
- << _initialSyncState->beginApplyingTimestamp.toBSON() << ")";
+ LOGV2(21195,
+ "Writing to the oplog and applying operations until {initialSyncState_stopTimestamp} "
+ "before initial sync can complete. (started fetching at "
+ "{initialSyncState_beginFetchingTimestamp} and applying at "
+ "{initialSyncState_beginApplyingTimestamp})",
+ "initialSyncState_stopTimestamp"_attr = _initialSyncState->stopTimestamp.toBSON(),
+ "initialSyncState_beginFetchingTimestamp"_attr =
+ _initialSyncState->beginFetchingTimestamp.toBSON(),
+ "initialSyncState_beginApplyingTimestamp"_attr =
+ _initialSyncState->beginApplyingTimestamp.toBSON());
// Fall through to scheduling _getNextApplierBatchCallback().
} else if (_lastApplied.opTime.getTimestamp() >= _initialSyncState->stopTimestamp) {
// Check for rollback if we have applied far enough to be consistent.