summaryrefslogtreecommitdiff
path: root/src/mongo/db/repl
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/db/repl')
-rw-r--r--src/mongo/db/repl/bgsync.cpp14
-rw-r--r--src/mongo/db/repl/drop_pending_collection_reaper.cpp14
-rw-r--r--src/mongo/db/repl/initial_syncer.cpp3
-rw-r--r--src/mongo/db/repl/oplog.cpp3
-rw-r--r--src/mongo/db/repl/oplog_applier.cpp3
-rw-r--r--src/mongo/db/repl/oplog_applier_impl.cpp56
-rw-r--r--src/mongo/db/repl/oplog_batcher.cpp10
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp57
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp3
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp3
-rw-r--r--src/mongo/db/repl/replication_recovery.cpp119
-rw-r--r--src/mongo/db/repl/rollback_impl.cpp49
-rw-r--r--src/mongo/db/repl/rs_rollback.cpp161
-rw-r--r--src/mongo/db/repl/session_update_tracker.cpp22
-rw-r--r--src/mongo/db/repl/topology_coordinator.cpp9
-rw-r--r--src/mongo/db/repl/topology_coordinator_v1_test.cpp2
16 files changed, 254 insertions, 274 deletions
diff --git a/src/mongo/db/repl/bgsync.cpp b/src/mongo/db/repl/bgsync.cpp
index fe6e56e5fff..0d25c62ca9f 100644
--- a/src/mongo/db/repl/bgsync.cpp
+++ b/src/mongo/db/repl/bgsync.cpp
@@ -216,11 +216,10 @@ void BackgroundSync::_run() {
sleepmillis(100); // sleep a bit to keep from hammering this thread with temp. errors.
} catch (const std::exception& e2) {
// redact(std::exception&) doesn't work
- LOGV2_FATAL(21127,
+ LOGV2_FATAL(28546,
"sync producer exception: {error}",
"Sync producer error",
"error"_attr = redact(e2.what()));
- fassertFailed(28546);
}
}
// No need to reset optimes here because we are shutting down.
@@ -784,10 +783,10 @@ void BackgroundSync::_runRollbackViaRecoverToCheckpoint(
if (status.isOK()) {
LOGV2(21105, "Rollback successful");
} else if (status == ErrorCodes::UnrecoverableRollbackError) {
- LOGV2_FATAL(21128,
- "Rollback failed with unrecoverable error: {error}",
- "Rollback failed with unrecoverable error",
- "error"_attr = status);
+ LOGV2_FATAL_CONTINUE(21128,
+ "Rollback failed with unrecoverable error: {error}",
+ "Rollback failed with unrecoverable error",
+ "error"_attr = status);
fassertFailedWithStatusNoTrace(50666, status);
} else {
LOGV2_WARNING(21124,
@@ -907,12 +906,11 @@ OpTime BackgroundSync::_readLastAppliedOpTime(OperationContext* opCtx) {
} catch (const ExceptionForCat<ErrorCategory::ShutdownError>&) {
throw;
} catch (const DBException& ex) {
- LOGV2_FATAL(21129,
+ LOGV2_FATAL(18904,
"Problem reading {namespace}: {error}",
"Problem reading from namespace",
"namespace"_attr = NamespaceString::kRsOplogNamespace.ns(),
"error"_attr = redact(ex));
- fassertFailed(18904);
}
OplogEntry parsedEntry(oplogEntry);
diff --git a/src/mongo/db/repl/drop_pending_collection_reaper.cpp b/src/mongo/db/repl/drop_pending_collection_reaper.cpp
index 53feefcee92..5b610eca52c 100644
--- a/src/mongo/db/repl/drop_pending_collection_reaper.cpp
+++ b/src/mongo/db/repl/drop_pending_collection_reaper.cpp
@@ -89,13 +89,13 @@ void DropPendingCollectionReaper::addDropPendingNamespace(
};
if (std::find_if(lowerBound, upperBound, matcher) != upperBound) {
- LOGV2_FATAL(21156,
- "Failed to add drop-pending collection {dropPendingNamespace} with drop optime "
- "{dropOpTime}: duplicate optime and namespace pair.",
- "Failed to add drop-pending collection: duplicate optime and namespace pair",
- "dropPendingNamespace"_attr = dropPendingNamespace,
- "dropOpTime"_attr = dropOpTime);
- fassertFailedNoTrace(40448);
+ LOGV2_FATAL_NOTRACE(
+ 40448,
+ "Failed to add drop-pending collection {dropPendingNamespace} with drop optime "
+ "{dropOpTime}: duplicate optime and namespace pair.",
+ "Failed to add drop-pending collection: duplicate optime and namespace pair",
+ "dropPendingNamespace"_attr = dropPendingNamespace,
+ "dropOpTime"_attr = dropOpTime);
}
_dropPendingNamespaces.insert(std::make_pair(dropOpTime, dropPendingNamespace));
diff --git a/src/mongo/db/repl/initial_syncer.cpp b/src/mongo/db/repl/initial_syncer.cpp
index c48e3108a44..f45de57cc67 100644
--- a/src/mongo/db/repl/initial_syncer.cpp
+++ b/src/mongo/db/repl/initial_syncer.cpp
@@ -1702,7 +1702,8 @@ void InitialSyncer::_finishInitialSyncAttempt(const StatusWith<OpTimeAndWallTime
// Check if need to do more retries.
if (_stats.failedInitialSyncAttempts >= _stats.maxFailedInitialSyncAttempts) {
- LOGV2_FATAL(21202, "The maximum number of retries have been exhausted for initial sync");
+ LOGV2_FATAL_CONTINUE(21202,
+ "The maximum number of retries have been exhausted for initial sync");
initialSyncFailures.increment();
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 1def8eb05e6..eb9b8953415 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -272,11 +272,10 @@ void _logOpsInner(OperationContext* opCtx,
Status result = oplogCollection->insertDocumentsForOplog(opCtx, records, timestamps);
if (!result.isOK()) {
- LOGV2_FATAL(21263,
+ LOGV2_FATAL(17322,
"write to oplog failed: {error}",
"Write to oplog failed",
"error"_attr = result.toString());
- fassertFailed(17322);
}
// Set replCoord last optime only after we're sure the WUOW didn't abort and roll back.
diff --git a/src/mongo/db/repl/oplog_applier.cpp b/src/mongo/db/repl/oplog_applier.cpp
index b24f3fd0636..23322f12665 100644
--- a/src/mongo/db/repl/oplog_applier.cpp
+++ b/src/mongo/db/repl/oplog_applier.cpp
@@ -75,8 +75,7 @@ Future<void> OplogApplier::startup() {
void OplogApplier::shutdown() {
// Shutdown will hang if this failpoint is enabled.
if (globalFailPointRegistry().find("rsSyncApplyStop")->shouldFail()) {
- LOGV2_FATAL(21227, "Turn off rsSyncApplyStop before attempting clean shutdown");
- fassertFailedNoTrace(40304);
+ LOGV2_FATAL_NOTRACE(40304, "Turn off rsSyncApplyStop before attempting clean shutdown");
}
stdx::lock_guard<Latch> lock(_mutex);
diff --git a/src/mongo/db/repl/oplog_applier_impl.cpp b/src/mongo/db/repl/oplog_applier_impl.cpp
index a7fe3e6d7ed..71c74ba725c 100644
--- a/src/mongo/db/repl/oplog_applier_impl.cpp
+++ b/src/mongo/db/repl/oplog_applier_impl.cpp
@@ -618,7 +618,7 @@ StatusWith<OpTime> OplogApplierImpl::_applyOplogBatch(OperationContext* opCtx,
invariant(_replCoord);
if (_replCoord->getApplierState() == ReplicationCoordinator::ApplierState::Stopped) {
- LOGV2_FATAL(21234, "Attempting to replicate ops while primary");
+ LOGV2_FATAL_CONTINUE(21234, "Attempting to replicate ops while primary");
return {ErrorCodes::CannotApplyOplogWhilePrimary,
"attempting to replicate ops while primary"};
}
@@ -708,19 +708,19 @@ StatusWith<OpTime> OplogApplierImpl::_applyOplogBatch(OperationContext* opCtx,
for (auto it = statusVector.cbegin(); it != statusVector.cend(); ++it) {
const auto& status = *it;
if (!status.isOK()) {
- LOGV2_FATAL(21235,
- "Failed to apply batch of operations. Number of operations in "
- "batch: {numOperationsInBatch}. First operation: {firstOperation}. "
- "Last operation: "
- "{lastOperation}. Oplog application failed in writer thread "
- "{failedWriterThread}: {error}",
- "Failed to apply batch of operations",
- "numOperationsInBatch"_attr = ops.size(),
- "firstOperation"_attr = redact(ops.front().toBSON()),
- "lastOperation"_attr = redact(ops.back().toBSON()),
- "failedWriterThread"_attr =
- std::distance(statusVector.cbegin(), it),
- "error"_attr = redact(status));
+ LOGV2_FATAL_CONTINUE(
+ 21235,
+ "Failed to apply batch of operations. Number of operations in "
+ "batch: {numOperationsInBatch}. First operation: {firstOperation}. "
+ "Last operation: "
+ "{lastOperation}. Oplog application failed in writer thread "
+ "{failedWriterThread}: {error}",
+ "Failed to apply batch of operations",
+ "numOperationsInBatch"_attr = ops.size(),
+ "firstOperation"_attr = redact(ops.front().toBSON()),
+ "lastOperation"_attr = redact(ops.back().toBSON()),
+ "failedWriterThread"_attr = std::distance(statusVector.cbegin(), it),
+ "error"_attr = redact(status));
return status;
}
}
@@ -741,10 +741,10 @@ StatusWith<OpTime> OplogApplierImpl::_applyOplogBatch(OperationContext* opCtx,
"point is disabled");
while (MONGO_unlikely(pauseBatchApplicationBeforeCompletion.shouldFail())) {
if (inShutdown()) {
- LOGV2_FATAL(21236,
- "Turn off pauseBatchApplicationBeforeCompletion before attempting "
- "clean shutdown");
- fassertFailedNoTrace(50798);
+ LOGV2_FATAL_NOTRACE(
+ 50798,
+ "Turn off pauseBatchApplicationBeforeCompletion before attempting "
+ "clean shutdown");
}
sleepmillis(100);
}
@@ -1067,11 +1067,11 @@ Status OplogApplierImpl::applyOplogBatchPerWorker(OperationContext* opCtx,
continue;
}
- LOGV2_FATAL(21237,
- "Error applying operation ({oplogEntry}): {error}",
- "Error applying operation",
- "oplogEntry"_attr = redact(entry.toBSON()),
- "error"_attr = causedBy(redact(status)));
+ LOGV2_FATAL_CONTINUE(21237,
+ "Error applying operation ({oplogEntry}): {error}",
+ "Error applying operation",
+ "oplogEntry"_attr = redact(entry.toBSON()),
+ "error"_attr = causedBy(redact(status)));
return status;
}
} catch (const DBException& e) {
@@ -1082,11 +1082,11 @@ Status OplogApplierImpl::applyOplogBatchPerWorker(OperationContext* opCtx,
continue;
}
- LOGV2_FATAL(21238,
- "writer worker caught exception: {error} on: {oplogEntry}",
- "Writer worker caught exception",
- "error"_attr = redact(e),
- "oplogEntry"_attr = redact(entry.toBSON()));
+ LOGV2_FATAL_CONTINUE(21238,
+ "writer worker caught exception: {error} on: {oplogEntry}",
+ "Writer worker caught exception",
+ "error"_attr = redact(e),
+ "oplogEntry"_attr = redact(entry.toBSON()));
return e.toStatus();
}
}
diff --git a/src/mongo/db/repl/oplog_batcher.cpp b/src/mongo/db/repl/oplog_batcher.cpp
index 7e393915d58..cfb3d58c863 100644
--- a/src/mongo/db/repl/oplog_batcher.cpp
+++ b/src/mongo/db/repl/oplog_batcher.cpp
@@ -180,11 +180,11 @@ StatusWith<std::vector<OplogEntry>> OplogBatcher::getNextApplierBatch(
// Check for oplog version change.
if (entry.getVersion() != OplogEntry::kOplogVersion) {
static constexpr char message[] = "Unexpected oplog version";
- LOGV2_FATAL(21240,
- message,
- "expectedVersion"_attr = OplogEntry::kOplogVersion,
- "foundVersion"_attr = entry.getVersion(),
- "oplogEntry"_attr = redact(entry.toBSON()));
+ LOGV2_FATAL_CONTINUE(21240,
+ message,
+ "expectedVersion"_attr = OplogEntry::kOplogVersion,
+ "foundVersion"_attr = entry.getVersion(),
+ "oplogEntry"_attr = redact(entry.toBSON()));
return {ErrorCodes::BadValue,
str::stream() << message << ", expected oplog version "
<< OplogEntry::kOplogVersion << ", found version "
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index f0eb982f285..f14a2e8301d 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -445,11 +445,10 @@ bool ReplicationCoordinatorImpl::_startLoadLocalConfig(OperationContext* opCtx)
StatusWith<LastVote> lastVote = _externalState->loadLocalLastVoteDocument(opCtx);
if (!lastVote.isOK()) {
- LOGV2_FATAL(21429,
- "Error loading local voted for document at startup; {error}",
- "Error loading local voted for document at startup",
- "error"_attr = lastVote.getStatus());
- fassertFailedNoTrace(40367);
+ LOGV2_FATAL_NOTRACE(40367,
+ "Error loading local voted for document at startup; {error}",
+ "Error loading local voted for document at startup",
+ "error"_attr = lastVote.getStatus());
}
if (lastVote.getValue().getTerm() == OpTime::kInitialTerm) {
// This log line is checked in unit tests.
@@ -468,11 +467,10 @@ bool ReplicationCoordinatorImpl::_startLoadLocalConfig(OperationContext* opCtx)
auto initializingStatus = _replicationProcess->initializeRollbackID(opCtx);
fassert(40424, initializingStatus);
} else {
- LOGV2_FATAL(21430,
- "Error loading local Rollback ID document at startup; {error}",
- "Error loading local Rollback ID document at startup",
- "error"_attr = status);
- fassertFailedNoTrace(40428);
+ LOGV2_FATAL_NOTRACE(40428,
+ "Error loading local Rollback ID document at startup; {error}",
+ "Error loading local Rollback ID document at startup",
+ "error"_attr = status);
}
}
@@ -488,27 +486,26 @@ bool ReplicationCoordinatorImpl::_startLoadLocalConfig(OperationContext* opCtx)
status = localConfig.initialize(cfg.getValue());
if (!status.isOK()) {
if (status.code() == ErrorCodes::RepairedReplicaSetNode) {
- LOGV2_FATAL(
- 21431,
+ LOGV2_FATAL_NOTRACE(
+ 50923,
"This instance has been repaired and may contain modified replicated data that "
"would not match other replica set members. To see your repaired data, start "
"mongod without the --replSet option. When you are finished recovering your "
"data and would like to perform a complete re-sync, please refer to the "
"documentation here: "
"https://docs.mongodb.com/manual/tutorial/resync-replica-set-member/");
- fassertFailedNoTrace(50923);
}
- LOGV2_ERROR(21414,
- "Locally stored replica set configuration does not parse; See "
- "http://www.mongodb.org/dochub/core/recover-replica-set-from-invalid-config "
- "for information on how to recover from this. Got \"{error}\" while parsing "
- "{config}",
- "Locally stored replica set configuration does not parse; See "
- "hhttp://www.mongodb.org/dochub/core/recover-replica-set-from-invalid-config "
- "for information on how to recover from this",
- "error"_attr = status,
- "config"_attr = cfg.getValue());
- fassertFailedNoTrace(28545);
+ LOGV2_FATAL_NOTRACE(
+ 28545,
+ "Locally stored replica set configuration does not parse; See "
+ "http://www.mongodb.org/dochub/core/recover-replica-set-from-invalid-config "
+ "for information on how to recover from this. Got \"{error}\" while parsing "
+ "{config}",
+ "Locally stored replica set configuration does not parse; See "
+ "hhttp://www.mongodb.org/dochub/core/recover-replica-set-from-invalid-config "
+ "for information on how to recover from this",
+ "error"_attr = status,
+ "config"_attr = cfg.getValue());
}
// Read the last op from the oplog after cleaning up any partially applied batches.
@@ -1162,9 +1159,9 @@ void ReplicationCoordinatorImpl::signalDrainComplete(OperationContext* opCtx,
// occurred after the node became primary and so the concurrent reconfig has updated
// the term appropriately.
if (reconfigStatus != ErrorCodes::ConfigurationInProgress) {
- LOGV2_FATAL(4508101,
- "Reconfig on stepup failed for unknown reasons",
- "error"_attr = reconfigStatus);
+ LOGV2_FATAL_CONTINUE(4508101,
+ "Reconfig on stepup failed for unknown reasons",
+ "error"_attr = reconfigStatus);
fassertFailedWithStatus(31477, reconfigStatus);
}
}
@@ -3245,11 +3242,10 @@ Status ReplicationCoordinatorImpl::doReplSetReconfig(OperationContext* opCtx,
"Cannot run replSetReconfig because the node is currently updating "
"its configuration");
default:
- LOGV2_FATAL(21432,
+ LOGV2_FATAL(18914,
"Unexpected _rsConfigState {_rsConfigState}",
"Unexpected _rsConfigState",
"_rsConfigState"_attr = int(_rsConfigState));
- fassertFailed(18914);
}
invariant(_rsConfig.isInitialized());
@@ -3854,11 +3850,10 @@ void ReplicationCoordinatorImpl::_performPostMemberStateUpdateAction(
_startElectSelfV1(StartElectionReasonEnum::kElectionTimeout);
break;
default:
- LOGV2_FATAL(21433,
+ LOGV2_FATAL(26010,
"Unknown post member state update action {action}",
"Unknown post member state update action",
"action"_attr = static_cast<int>(action));
- fassertFailed(26010);
}
}
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
index a598e513995..847879422f9 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
@@ -110,12 +110,11 @@ void ReplicationCoordinatorImpl::_startElectSelfV1_inlock(StartElectionReasonEnu
_topCoord->processLoseElection();
return;
default:
- LOGV2_FATAL(21452,
+ LOGV2_FATAL(28641,
"Entered replica set election code while in illegal config state "
"{rsConfigState}",
"Entered replica set election code while in illegal config state",
"rsConfigState"_attr = int(_rsConfigState));
- fassertFailed(28641);
}
auto finishedEvent = _makeEvent();
diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
index a3312ce0731..1b056553be9 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
@@ -520,12 +520,11 @@ void ReplicationCoordinatorImpl::_scheduleHeartbeatReconfig_inlock(const ReplSet
case kConfigPreStart:
case kConfigStartingUp:
case kConfigReplicationDisabled:
- LOGV2_FATAL(21491,
+ LOGV2_FATAL(18807,
"Reconfiguration request occurred while _rsConfigState == "
"{_rsConfigState}; aborting.",
"Aborting reconfiguration request",
"_rsConfigState"_attr = int(_rsConfigState));
- fassertFailed(18807);
}
_setConfigState_inlock(kConfigHBReconfiguring);
invariant(!_rsConfig.isInitialized() ||
diff --git a/src/mongo/db/repl/replication_recovery.cpp b/src/mongo/db/repl/replication_recovery.cpp
index 968ec117a0f..2275e0b5d76 100644
--- a/src/mongo/db/repl/replication_recovery.cpp
+++ b/src/mongo/db/repl/replication_recovery.cpp
@@ -157,21 +157,20 @@ public:
attrs.add("oplogApplicationEndPoint", _oplogApplicationEndPoint->toBSON());
}
- LOGV2_FATAL(
- 21559, "Couldn't find any entries in the oplog, which should be impossible", attrs);
- fassertFailedNoTrace(40293);
+ LOGV2_FATAL_NOTRACE(
+ 40293, "Couldn't find any entries in the oplog, which should be impossible", attrs);
}
auto firstTimestampFound =
fassert(40291, OpTime::parseFromOplogEntry(_cursor->nextSafe())).getTimestamp();
if (firstTimestampFound != _oplogApplicationStartPoint) {
- LOGV2_FATAL(21560,
- "Oplog entry at {oplogApplicationStartPoint} is missing; actual entry "
- "found is {firstTimestampFound}",
- "Oplog entry at oplogApplicationStartPoint is missing",
- "oplogApplicationStartPoint"_attr = _oplogApplicationStartPoint.toBSON(),
- "firstTimestampFound"_attr = firstTimestampFound.toBSON());
- fassertFailedNoTrace(40292);
+ LOGV2_FATAL_NOTRACE(
+ 40292,
+ "Oplog entry at {oplogApplicationStartPoint} is missing; actual entry "
+ "found is {firstTimestampFound}",
+ "Oplog entry at oplogApplicationStartPoint is missing",
+ "oplogApplicationStartPoint"_attr = _oplogApplicationStartPoint.toBSON(),
+ "firstTimestampFound"_attr = firstTimestampFound.toBSON());
}
}
@@ -237,10 +236,10 @@ private:
boost::optional<Timestamp> recoverFromOplogPrecursor(OperationContext* opCtx,
StorageInterface* storageInterface) {
if (!storageInterface->supportsRecoveryTimestamp(opCtx->getServiceContext())) {
- LOGV2_FATAL(21561,
- "Cannot recover from the oplog with a storage engine that does not support "
- "recover to stable timestamp");
- fassertFailedNoTrace(50805);
+ LOGV2_FATAL_NOTRACE(
+ 50805,
+ "Cannot recover from the oplog with a storage engine that does not support "
+ "recover to stable timestamp");
}
// A non-existent recoveryTS means the checkpoint is unstable. If the recoveryTS exists but
@@ -248,9 +247,8 @@ boost::optional<Timestamp> recoverFromOplogPrecursor(OperationContext* opCtx,
// happen.
auto recoveryTS = storageInterface->getRecoveryTimestamp(opCtx->getServiceContext());
if (recoveryTS && recoveryTS->isNull()) {
- LOGV2_FATAL(21562,
- "Cannot recover from the oplog with stable checkpoint at null timestamp");
- fassertFailedNoTrace(50806);
+ LOGV2_FATAL_NOTRACE(
+ 50806, "Cannot recover from the oplog with stable checkpoint at null timestamp");
}
return recoveryTS;
@@ -267,50 +265,48 @@ void ReplicationRecoveryImpl::_assertNoRecoveryNeededOnUnstableCheckpoint(Operat
invariant(!_storageInterface->getRecoveryTimestamp(opCtx->getServiceContext()));
if (_consistencyMarkers->getInitialSyncFlag(opCtx)) {
- LOGV2_FATAL(21563, "Unexpected recovery needed, initial sync flag set");
- fassertFailedNoTrace(31362);
+ LOGV2_FATAL_NOTRACE(31362, "Unexpected recovery needed, initial sync flag set");
}
const auto truncateAfterPoint = _consistencyMarkers->getOplogTruncateAfterPoint(opCtx);
if (!truncateAfterPoint.isNull()) {
- LOGV2_FATAL(21564,
- "Unexpected recovery needed, oplog requires truncation. Truncate after point: "
- "{oplogTruncateAfterPoint}",
- "Unexpected recovery needed, oplog requires truncation",
- "oplogTruncateAfterPoint"_attr = truncateAfterPoint.toString());
- fassertFailedNoTrace(31363);
+ LOGV2_FATAL_NOTRACE(
+ 31363,
+ "Unexpected recovery needed, oplog requires truncation. Truncate after point: "
+ "{oplogTruncateAfterPoint}",
+ "Unexpected recovery needed, oplog requires truncation",
+ "oplogTruncateAfterPoint"_attr = truncateAfterPoint.toString());
}
auto topOfOplogSW = _getTopOfOplog(opCtx);
if (!topOfOplogSW.isOK()) {
- LOGV2_FATAL(21565,
- "Recovery not possible, no oplog found: {error}",
- "Recovery not possible, no oplog found",
- "error"_attr = topOfOplogSW.getStatus());
- fassertFailedNoTrace(31364);
+ LOGV2_FATAL_NOTRACE(31364,
+ "Recovery not possible, no oplog found: {error}",
+ "Recovery not possible, no oplog found",
+ "error"_attr = topOfOplogSW.getStatus());
}
const auto topOfOplog = topOfOplogSW.getValue();
const auto appliedThrough = _consistencyMarkers->getAppliedThrough(opCtx);
if (!appliedThrough.isNull() && appliedThrough != topOfOplog) {
- LOGV2_FATAL(21566,
- "Unexpected recovery needed, appliedThrough is not at top of oplog, indicating "
- "oplog has not been fully applied. appliedThrough: {appliedThrough}",
- "Unexpected recovery needed, appliedThrough is not at top of oplog, indicating "
- "oplog has not been fully applied",
- "appliedThrough"_attr = appliedThrough.toString());
- fassertFailedNoTrace(31365);
+ LOGV2_FATAL_NOTRACE(
+ 31365,
+ "Unexpected recovery needed, appliedThrough is not at top of oplog, indicating "
+ "oplog has not been fully applied. appliedThrough: {appliedThrough}",
+ "Unexpected recovery needed, appliedThrough is not at top of oplog, indicating "
+ "oplog has not been fully applied",
+ "appliedThrough"_attr = appliedThrough.toString());
}
const auto minValid = _consistencyMarkers->getMinValid(opCtx);
if (minValid > topOfOplog) {
- LOGV2_FATAL(21567,
- "Unexpected recovery needed, top of oplog is not consistent. topOfOplog: "
- "{topOfOplog}, minValid: {minValid}",
- "Unexpected recovery needed, top of oplog is not consistent",
- "topOfOplog"_attr = topOfOplog,
- "minValid"_attr = minValid);
- fassertFailedNoTrace(31366);
+ LOGV2_FATAL_NOTRACE(
+ 31366,
+ "Unexpected recovery needed, top of oplog is not consistent. topOfOplog: "
+ "{topOfOplog}, minValid: {minValid}",
+ "Unexpected recovery needed, top of oplog is not consistent",
+ "topOfOplog"_attr = topOfOplog,
+ "minValid"_attr = minValid);
}
}
@@ -337,9 +333,8 @@ void ReplicationRecoveryImpl::recoverFromOplogAsStandalone(OperationContext* opC
"Not doing any oplog recovery since there is an unstable checkpoint that is up "
"to date");
} else {
- LOGV2_FATAL(21568,
- "Cannot use 'recoverFromOplogAsStandalone' without a stable checkpoint");
- fassertFailedNoTrace(31229);
+ LOGV2_FATAL_NOTRACE(
+ 31229, "Cannot use 'recoverFromOplogAsStandalone' without a stable checkpoint");
}
}
@@ -359,8 +354,8 @@ void ReplicationRecoveryImpl::recoverFromOplogUpTo(OperationContext* opCtx, Time
auto recoveryTS = recoverFromOplogPrecursor(opCtx, _storageInterface);
if (!recoveryTS) {
- LOGV2_FATAL(21569, "Cannot use 'recoverToOplogTimestamp' without a stable checkpoint");
- fassertFailedNoTrace(31399);
+ LOGV2_FATAL_NOTRACE(31399,
+ "Cannot use 'recoverToOplogTimestamp' without a stable checkpoint");
}
// This may take an IS lock on the oplog collection.
@@ -462,10 +457,10 @@ void ReplicationRecoveryImpl::recoverFromOplog(OperationContext* opCtx,
_recoverFromUnstableCheckpoint(opCtx, appliedThrough, topOfOplog);
}
} catch (...) {
- LOGV2_FATAL(21570,
- "Caught exception during replication recovery: {error}",
- "Caught exception during replication recovery",
- "error"_attr = exceptionToStatus());
+ LOGV2_FATAL_CONTINUE(21570,
+ "Caught exception during replication recovery: {error}",
+ "Caught exception during replication recovery",
+ "error"_attr = exceptionToStatus());
std::terminate();
}
@@ -572,13 +567,12 @@ void ReplicationRecoveryImpl::_applyToEndOfOplog(OperationContext* opCtx,
"No oplog entries to apply for recovery. Start point is at the top of the oplog");
return; // We've applied all the valid oplog we have.
} else if (oplogApplicationStartPoint > topOfOplog) {
- LOGV2_FATAL(
- 21571,
+ LOGV2_FATAL_NOTRACE(
+ 40313,
"Applied op {oplogApplicationStartPoint} not found. Top of oplog is {topOfOplog}.",
"Applied op oplogApplicationStartPoint not found",
"oplogApplicationStartPoint"_attr = oplogApplicationStartPoint.toBSON(),
"topOfOplog"_attr = topOfOplog.toBSON());
- fassertFailedNoTrace(40313);
}
Timestamp appliedUpTo = _applyOplogOperations(opCtx, oplogApplicationStartPoint, topOfOplog);
@@ -679,13 +673,12 @@ void ReplicationRecoveryImpl::_truncateOplogTo(OperationContext* opCtx,
_storageInterface->findOplogEntryLessThanOrEqualToTimestamp(
opCtx, oplogCollection, truncateAfterTimestamp);
if (!truncateAfterOplogEntryBSON) {
- LOGV2_FATAL(21572,
- "Reached end of oplog looking for an oplog entry lte to "
- "{oplogTruncateAfterPoint} but did not find one",
- "Reached end of oplog looking for an oplog entry lte to "
- "oplogTruncateAfterPoint but did not find one",
- "oplogTruncateAfterPoint"_attr = truncateAfterTimestamp.toBSON());
- fassertFailedNoTrace(40296);
+ LOGV2_FATAL_NOTRACE(40296,
+ "Reached end of oplog looking for an oplog entry lte to "
+ "{oplogTruncateAfterPoint} but did not find one",
+ "Reached end of oplog looking for an oplog entry lte to "
+ "oplogTruncateAfterPoint but did not find one",
+ "oplogTruncateAfterPoint"_attr = truncateAfterTimestamp.toBSON());
}
// Parse the response.
diff --git a/src/mongo/db/repl/rollback_impl.cpp b/src/mongo/db/repl/rollback_impl.cpp
index 88ecd1e849a..53c91047487 100644
--- a/src/mongo/db/repl/rollback_impl.cpp
+++ b/src/mongo/db/repl/rollback_impl.cpp
@@ -1025,13 +1025,12 @@ StatusWith<RollBackLocalOperations::RollbackCommonPoint> RollbackImpl::_findComm
if (commonPointOpTime.getTimestamp() < *stableTimestamp) {
// This is an fassert rather than an invariant, since it can happen if the server was
// recently upgraded to enableMajorityReadConcern=true.
- LOGV2_FATAL(21644,
- "Common point must be at least stable timestamp, common point: "
- "{commonPoint}, stable timestamp: {stableTimestamp}",
- "Common point must be at least stable timestamp",
- "commonPoint"_attr = commonPointOpTime.getTimestamp(),
- "stableTimestamp"_attr = *stableTimestamp);
- fassertFailedNoTrace(51121);
+ LOGV2_FATAL_NOTRACE(51121,
+ "Common point must be at least stable timestamp, common point: "
+ "{commonPoint}, stable timestamp: {stableTimestamp}",
+ "Common point must be at least stable timestamp",
+ "commonPoint"_attr = commonPointOpTime.getTimestamp(),
+ "stableTimestamp"_attr = *stableTimestamp);
}
return commonPointSW.getValue();
@@ -1100,14 +1099,15 @@ boost::optional<BSONObj> RollbackImpl::_findDocumentById(OperationContext* opCtx
} else if (document.getStatus().code() == ErrorCodes::NoSuchKey) {
return boost::none;
} else {
- LOGV2_FATAL(21645,
- "Rollback failed to read document with {id} in namespace {namespace} with uuid "
- "{uuid}{error}",
- "Rollback failed to read document",
- "id"_attr = redact(id),
- "namespace"_attr = nss.ns(),
- "uuid"_attr = uuid.toString(),
- "error"_attr = causedBy(document.getStatus()));
+ LOGV2_FATAL_CONTINUE(
+ 21645,
+ "Rollback failed to read document with {id} in namespace {namespace} with uuid "
+ "{uuid}{error}",
+ "Rollback failed to read document",
+ "id"_attr = redact(id),
+ "namespace"_attr = nss.ns(),
+ "uuid"_attr = uuid.toString(),
+ "error"_attr = causedBy(document.getStatus()));
fassert(50751, document.getStatus());
}
@@ -1219,16 +1219,15 @@ void RollbackImpl::_transitionFromRollbackToSecondary(OperationContext* opCtx) {
auto status = _replicationCoordinator->setFollowerMode(MemberState::RS_SECONDARY);
if (!status.isOK()) {
- LOGV2_FATAL(21646,
- "Failed to transition into {targetState}; expected to be in "
- "state {expectedState}; found self in "
- "{actualState} {error}",
- "Failed to perform replica set state transition",
- "targetState"_attr = MemberState(MemberState::RS_SECONDARY),
- "expectedState"_attr = MemberState(MemberState::RS_ROLLBACK),
- "actualState"_attr = _replicationCoordinator->getMemberState(),
- "error"_attr = causedBy(status));
- fassertFailedNoTrace(40408);
+ LOGV2_FATAL_NOTRACE(40408,
+ "Failed to transition into {targetState}; expected to be in "
+ "state {expectedState}; found self in "
+ "{actualState} {error}",
+ "Failed to perform replica set state transition",
+ "targetState"_attr = MemberState(MemberState::RS_SECONDARY),
+ "expectedState"_attr = MemberState(MemberState::RS_ROLLBACK),
+ "actualState"_attr = _replicationCoordinator->getMemberState(),
+ "error"_attr = causedBy(status));
}
}
diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp
index 323afa75b64..f62d8d39998 100644
--- a/src/mongo/db/repl/rs_rollback.cpp
+++ b/src/mongo/db/repl/rs_rollback.cpp
@@ -393,11 +393,11 @@ Status rollback_internal::updateFixUpInfoFromLocalOplogEntry(OperationContext* o
string indexName;
auto status = bsonExtractStringField(obj, "index", &indexName);
if (!status.isOK()) {
- LOGV2_FATAL(21731,
- "Missing index name in dropIndexes operation on rollback, "
- "document: {oplogEntry}",
- "Missing index name in dropIndexes operation on rollback",
- "oplogEntry"_attr = redact(oplogEntry.toBSON()));
+ LOGV2_FATAL_CONTINUE(21731,
+ "Missing index name in dropIndexes operation on rollback, "
+ "document: {oplogEntry}",
+ "Missing index name in dropIndexes operation on rollback",
+ "oplogEntry"_attr = redact(oplogEntry.toBSON()));
throw RSFatalException(
"Missing index name in dropIndexes operation on rollback.");
}
@@ -433,11 +433,12 @@ Status rollback_internal::updateFixUpInfoFromLocalOplogEntry(OperationContext* o
string indexName;
auto status = bsonExtractStringField(obj, "name", &indexName);
if (!status.isOK()) {
- LOGV2_FATAL(21732,
- "Missing index name in createIndexes operation on rollback, "
- "document: {oplogEntry}",
- "Missing index name in createIndexes operation on rollback",
- "oplogEntry"_attr = redact(oplogEntry.toBSON()));
+ LOGV2_FATAL_CONTINUE(
+ 21732,
+ "Missing index name in createIndexes operation on rollback, "
+ "document: {oplogEntry}",
+ "Missing index name in createIndexes operation on rollback",
+ "oplogEntry"_attr = redact(oplogEntry.toBSON()));
throw RSFatalException(
"Missing index name in createIndexes operation on rollback.");
}
@@ -689,7 +690,7 @@ Status rollback_internal::updateFixUpInfoFromLocalOplogEntry(OperationContext* o
}
// Some collMod fields cannot be rolled back, such as the index field.
static constexpr char message[] = "Cannot roll back a collMod command";
- LOGV2_FATAL(21733, message, "oplogEntry"_attr = redact(obj));
+ LOGV2_FATAL_CONTINUE(21733, message, "oplogEntry"_attr = redact(obj));
throw RSFatalException(message);
}
return Status::OK();
@@ -725,7 +726,8 @@ Status rollback_internal::updateFixUpInfoFromLocalOplogEntry(OperationContext* o
if (operations.type() != Array) {
static constexpr char message[] =
"Expected applyOps argument to be an array";
- LOGV2_FATAL(21734, message, "operations"_attr = redact(operations));
+ LOGV2_FATAL_CONTINUE(
+ 21734, message, "operations"_attr = redact(operations));
return Status(ErrorCodes::UnrecoverableRollbackError,
str::stream() << message << "; found " << redact(operations));
}
@@ -733,7 +735,8 @@ Status rollback_internal::updateFixUpInfoFromLocalOplogEntry(OperationContext* o
if (subopElement.type() != Object) {
static constexpr char message[] =
"Expected applyOps operations to be of Object type";
- LOGV2_FATAL(21735, message, "operation"_attr = redact(subopElement));
+ LOGV2_FATAL_CONTINUE(
+ 21735, message, "operation"_attr = redact(subopElement));
return Status(ErrorCodes::UnrecoverableRollbackError,
str::stream()
<< message << ", but found " << redact(subopElement));
@@ -768,10 +771,10 @@ Status rollback_internal::updateFixUpInfoFromLocalOplogEntry(OperationContext* o
}
default: {
static constexpr char message[] = "Can't roll back this command yet";
- LOGV2_FATAL(21736,
- message,
- "commandName"_attr = first.fieldName(),
- "command"_attr = redact(obj));
+ LOGV2_FATAL_CONTINUE(21736,
+ message,
+ "commandName"_attr = first.fieldName(),
+ "command"_attr = redact(obj));
throw RSFatalException(str::stream()
<< message << ": cmdname = " << first.fieldName());
}
@@ -785,10 +788,10 @@ Status rollback_internal::updateFixUpInfoFromLocalOplogEntry(OperationContext* o
doc._id = oplogEntry.getIdElement();
if (doc._id.eoo()) {
static constexpr char message[] = "Cannot roll back op with no _id";
- LOGV2_FATAL(21737,
- message,
- "namespace"_attr = nss.ns(),
- "oplogEntry"_attr = redact(oplogEntry.toBSON()));
+ LOGV2_FATAL_CONTINUE(21737,
+ message,
+ "namespace"_attr = nss.ns(),
+ "oplogEntry"_attr = redact(oplogEntry.toBSON()));
throw RSFatalException(str::stream() << message << ". ns: " << nss.ns());
}
fixUpInfo.docsToRefetch.insert(doc);
@@ -872,7 +875,7 @@ void dropIndex(OperationContext* opCtx,
if (entry->isReady(opCtx)) {
auto status = indexCatalog->dropIndex(opCtx, indexDescriptor);
if (!status.isOK()) {
- LOGV2_FATAL(21738,
+ LOGV2_ERROR(21738,
"Rollback failed to drop index {indexName} in {namespace}: {error}",
"Rollback failed to drop index",
"indexName"_attr = indexName,
@@ -882,7 +885,7 @@ void dropIndex(OperationContext* opCtx,
} else {
auto status = indexCatalog->dropUnfinishedIndex(opCtx, indexDescriptor);
if (!status.isOK()) {
- LOGV2_FATAL(
+ LOGV2_ERROR(
21739,
"Rollback failed to drop unfinished index {indexName} in {namespace}: {error}",
"Rollback failed to drop unfinished index",
@@ -1034,7 +1037,7 @@ void dropCollection(OperationContext* opCtx,
while (PlanExecutor::ADVANCED == (execState = exec->getNext(&curObj, nullptr))) {
auto status = removeSaver.goingToDelete(curObj);
if (!status.isOK()) {
- LOGV2_FATAL(
+ LOGV2_FATAL_CONTINUE(
21740,
"Rolling back createCollection on {namespace} failed to write document to "
"remove saver file: {error}",
@@ -1057,20 +1060,22 @@ void dropCollection(OperationContext* opCtx,
if (execState == PlanExecutor::FAILURE &&
WorkingSetCommon::isValidStatusMemberObject(curObj)) {
Status errorStatus = WorkingSetCommon::getMemberObjectStatus(curObj);
- LOGV2_FATAL(21741,
- "Rolling back createCollection on {namespace} failed with {error}. A "
- "full resync is necessary.",
- "Rolling back createCollection failed. A full resync is necessary",
- "namespace"_attr = nss,
- "error"_attr = redact(errorStatus));
+ LOGV2_FATAL_CONTINUE(
+ 21741,
+ "Rolling back createCollection on {namespace} failed with {error}. A "
+ "full resync is necessary.",
+ "Rolling back createCollection failed. A full resync is necessary",
+ "namespace"_attr = nss,
+ "error"_attr = redact(errorStatus));
throw RSFatalException(
"Rolling back createCollection failed. A full resync is necessary.");
} else {
- LOGV2_FATAL(21742,
- "Rolling back createCollection on {namespace} failed. A full resync is "
- "necessary.",
- "Rolling back createCollection failed. A full resync is necessary",
- "namespace"_attr = nss);
+ LOGV2_FATAL_CONTINUE(
+ 21742,
+ "Rolling back createCollection on {namespace} failed. A full resync is "
+ "necessary.",
+ "Rolling back createCollection failed. A full resync is necessary",
+ "namespace"_attr = nss);
throw RSFatalException(
"Rolling back createCollection failed. A full resync is necessary.");
}
@@ -1105,7 +1110,7 @@ void renameOutOfTheWay(OperationContext* opCtx, RenameCollectionInfo info, Datab
// namespace.
auto tmpNameResult = db->makeUniqueCollectionNamespace(opCtx, "rollback.tmp%%%%%");
if (!tmpNameResult.isOK()) {
- LOGV2_FATAL(
+ LOGV2_FATAL_CONTINUE(
21743,
"Unable to generate temporary namespace to rename collection {renameTo} "
"out of the way. {error}",
@@ -1135,11 +1140,12 @@ void renameOutOfTheWay(OperationContext* opCtx, RenameCollectionInfo info, Datab
auto renameStatus = renameCollectionForRollback(opCtx, tempNss, uuid);
if (!renameStatus.isOK()) {
- LOGV2_FATAL(21744,
- "Unable to rename collection {renameTo} out of the way to {tempNamespace}",
- "Unable to rename renameTo collection out of the way to a temporary namespace",
- "renameTo"_attr = info.renameTo,
- "tempNamespace"_attr = tempNss);
+ LOGV2_FATAL_CONTINUE(
+ 21744,
+ "Unable to rename collection {renameTo} out of the way to {tempNamespace}",
+ "Unable to rename renameTo collection out of the way to a temporary namespace",
+ "renameTo"_attr = info.renameTo,
+ "tempNamespace"_attr = tempNss);
throw RSFatalException("Unable to rename collection out of the way");
}
}
@@ -1177,22 +1183,23 @@ void rollbackRenameCollection(OperationContext* opCtx, UUID uuid, RenameCollecti
status = renameCollectionForRollback(opCtx, info.renameTo, uuid);
if (!status.isOK()) {
- LOGV2_FATAL(21745,
- "Rename collection failed to roll back twice. We were unable to rename "
- "collection {renameFrom} to {renameTo}. {error}",
- "Rename collection failed to roll back twice",
- "renameFrom"_attr = info.renameFrom,
- "renameTo"_attr = info.renameTo,
- "error"_attr = status.toString());
+ LOGV2_FATAL_CONTINUE(
+ 21745,
+ "Rename collection failed to roll back twice. We were unable to rename "
+ "collection {renameFrom} to {renameTo}. {error}",
+ "Rename collection failed to roll back twice",
+ "renameFrom"_attr = info.renameFrom,
+ "renameTo"_attr = info.renameTo,
+ "error"_attr = status.toString());
throw RSFatalException(
"Rename collection failed to roll back twice. We were unable to rename "
"the collection.");
}
} else if (!status.isOK()) {
- LOGV2_FATAL(21746,
- "Unable to roll back renameCollection command: {error}",
- "Unable to roll back renameCollection command",
- "error"_attr = status.toString());
+ LOGV2_FATAL_CONTINUE(21746,
+ "Unable to roll back renameCollection command: {error}",
+ "Unable to roll back renameCollection command",
+ "error"_attr = status.toString());
throw RSFatalException("Unable to rollback renameCollection command");
}
@@ -1754,7 +1761,7 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
if (found) {
auto status = removeSaver->goingToDelete(obj);
if (!status.isOK()) {
- LOGV2_FATAL(
+ LOGV2_FATAL_CONTINUE(
21747,
"Rollback cannot write document in namespace {namespace} to "
"archive file: {error}",
@@ -1994,11 +2001,10 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
Status status = AuthorizationManager::get(opCtx->getServiceContext())->initialize(opCtx);
if (!status.isOK()) {
- LOGV2_FATAL(21748,
- "Failed to reinitialize auth data after rollback: {error}",
- "Failed to reinitialize auth data after rollback",
- "error"_attr = redact(status));
- fassertFailedNoTrace(40496);
+ LOGV2_FATAL_NOTRACE(40496,
+ "Failed to reinitialize auth data after rollback: {error}",
+ "Failed to reinitialize auth data after rollback",
+ "error"_attr = redact(status));
}
// If necessary, clear the memory of existing sessions.
@@ -2121,11 +2127,10 @@ void rollback(OperationContext* opCtx,
// WARNING: these statuses sometimes have location codes which are lost with uassertStatusOK
// so we need to check here first.
if (ErrorCodes::UnrecoverableRollbackError == status.code()) {
- LOGV2_FATAL(21749,
- "Unable to complete rollback. A full resync may be needed: {error}",
- "Unable to complete rollback. A full resync may be needed",
- "error"_attr = redact(status));
- fassertFailedNoTrace(40507);
+ LOGV2_FATAL_NOTRACE(40507,
+ "Unable to complete rollback. A full resync may be needed: {error}",
+ "Unable to complete rollback. A full resync may be needed",
+ "error"_attr = redact(status));
}
// In other cases, we log the message contained in the error status and retry later.
@@ -2149,10 +2154,9 @@ void rollback(OperationContext* opCtx,
// will be unable to successfully perform any more rollback attempts. The knowledge of these
// stopped index builds gets lost after the first attempt.
if (stoppedIndexBuilds.size()) {
- LOGV2_FATAL(4655801,
- "Index builds stopped prior to rollback cannot be restarted by "
- "subsequent rollback attempts");
- fassertFailedNoTrace(4655800);
+ LOGV2_FATAL_NOTRACE(4655800,
+ "Index builds stopped prior to rollback cannot be restarted by "
+ "subsequent rollback attempts");
}
// Sleep a bit to allow upstream node to coalesce, if that was the cause of the failure. If
@@ -2175,22 +2179,21 @@ void rollback(OperationContext* opCtx,
// then we must shut down to clear the in-memory ShardingState associated with the
// shardIdentity document.
if (ShardIdentityRollbackNotifier::get(opCtx)->didRollbackHappen()) {
- LOGV2_FATAL(21750,
- "shardIdentity document rollback detected. Shutting down to clear "
- "in-memory sharding state. Restarting this process should safely return it "
- "to a healthy state");
- fassertFailedNoTrace(40498);
+ LOGV2_FATAL_NOTRACE(
+ 40498,
+ "shardIdentity document rollback detected. Shutting down to clear "
+ "in-memory sharding state. Restarting this process should safely return it "
+ "to a healthy state");
}
auto status = replCoord->setFollowerMode(MemberState::RS_RECOVERING);
if (!status.isOK()) {
- LOGV2_FATAL(21751,
- "Failed to perform replica set state transition",
- "targetState"_attr = MemberState(MemberState::RS_RECOVERING),
- "expectedState"_attr = MemberState(MemberState::RS_ROLLBACK),
- "actualState"_attr = replCoord->getMemberState(),
- "error"_attr = status);
- fassertFailedNoTrace(40499);
+ LOGV2_FATAL_NOTRACE(40499,
+ "Failed to perform replica set state transition",
+ "targetState"_attr = MemberState(MemberState::RS_RECOVERING),
+ "expectedState"_attr = MemberState(MemberState::RS_ROLLBACK),
+ "actualState"_attr = replCoord->getMemberState(),
+ "error"_attr = status);
}
}
diff --git a/src/mongo/db/repl/session_update_tracker.cpp b/src/mongo/db/repl/session_update_tracker.cpp
index edf8654a0af..77fc9d2197f 100644
--- a/src/mongo/db/repl/session_update_tracker.cpp
+++ b/src/mongo/db/repl/session_update_tracker.cpp
@@ -191,18 +191,16 @@ void SessionUpdateTracker::_updateSessionInfo(const OplogEntry& entry) {
return;
}
- LOGV2_FATAL(23792,
- "Entry for session {lsid} has txnNumber {sessionInfo_getTxnNumber} < "
- "{existingSessionInfo_getTxnNumber}",
- "lsid"_attr = lsid->toBSON(),
- "sessionInfo_getTxnNumber"_attr = *sessionInfo.getTxnNumber(),
- "existingSessionInfo_getTxnNumber"_attr = *existingSessionInfo.getTxnNumber());
- LOGV2_FATAL(23793, "New oplog entry: {entry}", "entry"_attr = redact(entry.toString()));
- LOGV2_FATAL(23794,
- "Existing oplog entry: {iter_second}",
- "iter_second"_attr = redact(iter->second.toString()));
-
- fassertFailedNoTrace(50843);
+ LOGV2_FATAL_NOTRACE(50843,
+ "Entry for session {lsid} has txnNumber {sessionInfo_getTxnNumber} < "
+ "{existingSessionInfo_getTxnNumber}. New oplog entry: {newEntry}, Existing "
+ "oplog entry: {existingEntry}",
+ "lsid"_attr = lsid->toBSON(),
+ "sessionInfo_getTxnNumber"_attr = *sessionInfo.getTxnNumber(),
+ "existingSessionInfo_getTxnNumber"_attr =
+ *existingSessionInfo.getTxnNumber(),
+ "newEntry"_attr = redact(entry.toString()),
+ "existingEntry"_attr = redact(iter->second.toString()));
}
std::vector<OplogEntry> SessionUpdateTracker::_flush(const OplogEntry& entry) {
diff --git a/src/mongo/db/repl/topology_coordinator.cpp b/src/mongo/db/repl/topology_coordinator.cpp
index f801a283c9d..c901b7098f8 100644
--- a/src/mongo/db/repl/topology_coordinator.cpp
+++ b/src/mongo/db/repl/topology_coordinator.cpp
@@ -217,9 +217,8 @@ HostAndPort TopologyCoordinator::chooseNewSyncSource(Date_t now,
!_rsConfig.isChainingAllowed())) {
if (readPreference == ReadPreference::SecondaryOnly) {
LOGV2_FATAL(
- 3873102,
+ 3873103,
"Sync source read preference 'secondaryOnly' with chaining disabled is not valid.");
- fassertFailed(3873103);
}
_syncSource = _choosePrimaryAsSyncSource(now, lastOpTimeFetched);
if (_syncSource.empty()) {
@@ -444,12 +443,11 @@ boost::optional<HostAndPort> TopologyCoordinator::_chooseSyncSourceInitialStep(D
const auto& data = sfp.getData();
const auto hostAndPortElem = data["hostAndPort"];
if (!hostAndPortElem) {
- LOGV2_FATAL(21839,
+ LOGV2_FATAL(50835,
"'forceSyncSoureCandidate' parameter set with invalid host and port: "
"{failpointData}",
"'forceSyncSoureCandidate' parameter set with invalid host and port",
"failpointData"_attr = data);
- fassertFailed(50835);
}
const auto hostAndPort = HostAndPort(hostAndPortElem.checkAndGetStringData());
@@ -2368,11 +2366,10 @@ std::string TopologyCoordinator::_getUnelectableReasonString(const UnelectableRe
ss << "node is not a member of a valid replica set configuration";
}
if (!hasWrittenToStream) {
- LOGV2_FATAL(21842,
+ LOGV2_FATAL(26011,
"Invalid UnelectableReasonMask value 0x{value}",
"Invalid UnelectableReasonMask value",
"value"_attr = integerToHex(ur));
- fassertFailed(26011);
}
ss << " (mask 0x" << integerToHex(ur) << ")";
return ss;
diff --git a/src/mongo/db/repl/topology_coordinator_v1_test.cpp b/src/mongo/db/repl/topology_coordinator_v1_test.cpp
index 6d7c0813eea..7363a4e1385 100644
--- a/src/mongo/db/repl/topology_coordinator_v1_test.cpp
+++ b/src/mongo/db/repl/topology_coordinator_v1_test.cpp
@@ -797,7 +797,7 @@ TEST_F(TopoCoordTest, ChooseOnlyPrimaryAsSyncSourceWhenChainingIsDisallowed) {
ASSERT(getTopoCoord().getSyncSourceAddress().empty());
}
-DEATH_TEST_F(TopoCoordTest, SecondaryOnlyAssertsWhenChainingNotAllowed, "3873102") {
+DEATH_TEST_F(TopoCoordTest, SecondaryOnlyAssertsWhenChainingNotAllowed, "3873103") {
updateConfig(BSON("_id"
<< "rs0"
<< "version" << 1 << "settings" << BSON("chainingAllowed" << false)