diff options
author | Henrik Edin <henrik.edin@mongodb.com> | 2020-03-23 10:04:42 -0400 |
---|---|---|
committer | Evergreen Agent <no-reply@evergreen.mongodb.com> | 2020-03-24 20:22:44 +0000 |
commit | edb8778350326d2b33f056b1b5f0b25a4b5b444a (patch) | |
tree | 15afcaa7a707be0872b764cd054aee55d855ba92 /src/mongo/db | |
parent | 51b338ad41653a8188adcc67b682ea12bbe63b4d (diff) | |
download | mongo-edb8778350326d2b33f056b1b5f0b25a4b5b444a.tar.gz |
SERVER-47040 LOGV2_FATAL also fasserts
Added LOGV2_FATAL_NOTRACE and LOGV2_CONTINUE to have different behavior.
Diffstat (limited to 'src/mongo/db')
40 files changed, 398 insertions, 439 deletions
diff --git a/src/mongo/db/catalog/database_impl.cpp b/src/mongo/db/catalog/database_impl.cpp index c7fa24487fd..5ea7746c8ce 100644 --- a/src/mongo/db/catalog/database_impl.cpp +++ b/src/mongo/db/catalog/database_impl.cpp @@ -642,10 +642,10 @@ Collection* DatabaseImpl::createCollection(OperationContext* opCtx, bool generatedUUID = false; if (!optionsWithUUID.uuid) { if (!canAcceptWrites) { - std::string msg = str::stream() - << "Attempted to create a new collection " << nss << " without a UUID"; - LOGV2_FATAL(20329, "{msg}", "msg"_attr = msg); - uasserted(ErrorCodes::InvalidOptions, msg); + LOGV2_ERROR_OPTIONS(20329, + {logv2::UserAssertAfterLog(ErrorCodes::InvalidOptions)}, + "Attempted to create a new collection {nss} without a UUID", + "nss"_attr = nss); } else { optionsWithUUID.uuid.emplace(CollectionUUID::gen()); generatedUUID = true; diff --git a/src/mongo/db/catalog/index_builds_manager.cpp b/src/mongo/db/catalog/index_builds_manager.cpp index 303b89a1e17..1388bf5b4f8 100644 --- a/src/mongo/db/catalog/index_builds_manager.cpp +++ b/src/mongo/db/catalog/index_builds_manager.cpp @@ -163,11 +163,10 @@ StatusWith<std::pair<long long, long long>> IndexBuildsManager::startBuildingInd auto validStatus = validateBSON(data.data(), data.size(), BSONVersion::kLatest); if (!validStatus.isOK()) { if (repair == RepairData::kNo) { - LOGV2_FATAL(20349, + LOGV2_FATAL(31396, "Invalid BSON detected at {id}: {validStatus}", "id"_attr = id, "validStatus"_attr = redact(validStatus)); - fassertFailed(31396); } LOGV2_WARNING(20348, "Invalid BSON detected at {id}: {validStatus}. Deleting.", diff --git a/src/mongo/db/catalog/index_catalog_impl.cpp b/src/mongo/db/catalog/index_catalog_impl.cpp index 336f392a83a..977fb228cad 100644 --- a/src/mongo/db/catalog/index_catalog_impl.cpp +++ b/src/mongo/db/catalog/index_catalog_impl.cpp @@ -387,13 +387,12 @@ IndexCatalogEntry* IndexCatalogImpl::createIndexEntry(OperationContext* opCtx, CreateIndexEntryFlags flags) { Status status = _isSpecOk(opCtx, descriptor->infoObj()); if (!status.isOK()) { - LOGV2_FATAL(20378, - "Found an invalid index {descriptor_infoObj} on the {collection_ns} " - "collection: {status}", - "descriptor_infoObj"_attr = descriptor->infoObj(), - "collection_ns"_attr = _collection->ns(), - "status"_attr = redact(status)); - fassertFailedNoTrace(28782); + LOGV2_FATAL_NOTRACE(28782, + "Found an invalid index {descriptor_infoObj} on the {collection_ns} " + "collection: {status}", + "descriptor_infoObj"_attr = descriptor->infoObj(), + "collection_ns"_attr = _collection->ns(), + "status"_attr = redact(status)); } auto engine = opCtx->getServiceContext()->getStorageEngine(); diff --git a/src/mongo/db/catalog/rename_collection.cpp b/src/mongo/db/catalog/rename_collection.cpp index 6002748f7cf..fbd3ea020f3 100644 --- a/src/mongo/db/catalog/rename_collection.cpp +++ b/src/mongo/db/catalog/rename_collection.cpp @@ -254,13 +254,12 @@ Status renameCollectionAndDropTarget(OperationContext* opCtx, // replicated writes are not enabled. if (!renameOpTime.isNull()) { LOGV2_FATAL( - 20403, + 40616, "renameCollection: {source} to {target} (with dropTarget=true) - unexpected " "renameCollection oplog entry written to the oplog with optime {renameOpTime}", "source"_attr = source, "target"_attr = target, "renameOpTime"_attr = renameOpTime); - fassertFailed(40616); } renameOpTime = renameOpTimeFromApplyOps; } diff --git a/src/mongo/db/commands/fsync.cpp b/src/mongo/db/commands/fsync.cpp index 29cbe97a7fa..5c348f7031e 100644 --- a/src/mongo/db/commands/fsync.cpp +++ b/src/mongo/db/commands/fsync.cpp @@ -433,8 +433,7 @@ void FSyncLockThread::run() { } } catch (const std::exception& e) { - LOGV2_FATAL(20474, "FSyncLockThread exception: {e_what}", "e_what"_attr = e.what()); - fassertFailed(40350); + LOGV2_FATAL(40350, "FSyncLockThread exception: {e_what}", "e_what"_attr = e.what()); } } diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp index 3768b09dc68..e7d20abf4ba 100644 --- a/src/mongo/db/db.cpp +++ b/src/mongo/db/db.cpp @@ -445,10 +445,11 @@ ExitCode _initAndListen(ServiceContext* serviceContext, int listenPort) { try { nonLocalDatabases = repairDatabasesAndCheckVersion(startupOpCtx.get()); } catch (const ExceptionFor<ErrorCodes::MustDowngrade>& error) { - LOGV2_FATAL_OPTIONS(20573, - {logComponentV1toV2(LogComponent::kControl)}, - "** IMPORTANT: {error_toStatus_reason}", - "error_toStatus_reason"_attr = error.toStatus().reason()); + LOGV2_FATAL_OPTIONS( + 20573, + logv2::LogOptions(logv2::LogComponent::kControl, logv2::FatalMode::kContinue), + "** IMPORTANT: {error_toStatus_reason}", + "error_toStatus_reason"_attr = error.toStatus().reason()); exitCleanly(EXIT_NEED_DOWNGRADE); } @@ -1248,11 +1249,12 @@ int mongoDbMain(int argc, char* argv[], char** envp) { Status status = mongo::runGlobalInitializers(argc, argv, envp); if (!status.isOK()) { - LOGV2_FATAL_OPTIONS(20574, - {logComponentV1toV2(LogComponent::kControl)}, - "Error during global initialization: {error}", - "Error during global initialization", - "error"_attr = status); + LOGV2_FATAL_OPTIONS( + 20574, + logv2::LogOptions(logv2::LogComponent::kControl, logv2::FatalMode::kContinue), + "Error during global initialization: {error}", + "Error during global initialization", + "error"_attr = status); quickExit(EXIT_FAILURE); } @@ -1265,11 +1267,12 @@ int mongoDbMain(int argc, char* argv[], char** envp) { return serviceContext; } catch (...) { auto cause = exceptionToStatus(); - LOGV2_FATAL_OPTIONS(20575, - {logComponentV1toV2(LogComponent::kControl)}, - "Error creating service context: {error}", - "Error creating service context", - "error"_attr = redact(cause)); + LOGV2_FATAL_OPTIONS( + 20575, + logv2::LogOptions(logv2::LogComponent::kControl, logv2::FatalMode::kContinue), + "Error creating service context: {error}", + "Error creating service context", + "error"_attr = redact(cause)); quickExit(EXIT_FAILURE); } }(); diff --git a/src/mongo/db/index/index_access_method.cpp b/src/mongo/db/index/index_access_method.cpp index 54b572545f9..67483da9ec7 100644 --- a/src/mongo/db/index/index_access_method.cpp +++ b/src/mongo/db/index/index_access_method.cpp @@ -633,12 +633,12 @@ Status AbstractIndexAccessMethod::commitBulk(OperationContext* opCtx, if (kDebugBuild || _descriptor->unique()) { cmpData = data.first.compareWithoutRecordId(previousKey); if (cmpData < 0) { - LOGV2_FATAL(20687, - "expected the next key{data_first} to be greater than or equal to the " - "previous key{previousKey}", - "data_first"_attr = data.first.toString(), - "previousKey"_attr = previousKey.toString()); - fassertFailedNoTrace(31171); + LOGV2_FATAL_NOTRACE( + 31171, + "expected the next key{data_first} to be greater than or equal to the " + "previous key{previousKey}", + "data_first"_attr = data.first.toString(), + "previousKey"_attr = previousKey.toString()); } } diff --git a/src/mongo/db/mongod_options.cpp b/src/mongo/db/mongod_options.cpp index 78a8befe022..5087f08864a 100644 --- a/src/mongo/db/mongod_options.cpp +++ b/src/mongo/db/mongod_options.cpp @@ -137,7 +137,7 @@ bool handlePreValidationMongodOptions(const moe::Environment& params, } if (params.count("master") || params.count("slave")) { - LOGV2_FATAL(20881, "Master/slave replication is no longer supported"); + LOGV2_FATAL_CONTINUE(20881, "Master/slave replication is no longer supported"); return false; } diff --git a/src/mongo/db/repair_database.cpp b/src/mongo/db/repair_database.cpp index dcb7bb8b31f..1ea89de9c86 100644 --- a/src/mongo/db/repair_database.cpp +++ b/src/mongo/db/repair_database.cpp @@ -210,10 +210,10 @@ Status repairDatabase(OperationContext* opCtx, StorageEngine* engine, const std: auto status = repairCollections(opCtx, engine, dbName); if (!status.isOK()) { - LOGV2_FATAL(21030, - "Failed to repair database {dbName}: {status_reason}", - "dbName"_attr = dbName, - "status_reason"_attr = status.reason()); + LOGV2_FATAL_CONTINUE(21030, + "Failed to repair database {dbName}: {status_reason}", + "dbName"_attr = dbName, + "status_reason"_attr = status.reason()); } try { @@ -239,8 +239,8 @@ Status repairDatabase(OperationContext* opCtx, StorageEngine* engine, const std: // have a UUID. throw; } catch (...) { - LOGV2_FATAL(21031, - "Unexpected exception encountered while reopening database after repair."); + LOGV2_FATAL_CONTINUE( + 21031, "Unexpected exception encountered while reopening database after repair."); std::terminate(); // Logs additional info about the specific error. } diff --git a/src/mongo/db/repair_database_and_check_version.cpp b/src/mongo/db/repair_database_and_check_version.cpp index 66437a78b4f..20e9ec50172 100644 --- a/src/mongo/db/repair_database_and_check_version.cpp +++ b/src/mongo/db/repair_database_and_check_version.cpp @@ -267,11 +267,10 @@ void checkForCappedOplog(OperationContext* opCtx, Database* db) { Collection* oplogCollection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, oplogNss); if (oplogCollection && !oplogCollection->isCapped()) { - LOGV2_FATAL(21022, - "The oplog collection {oplogNss} is not capped; a capped oplog is a " - "requirement for replication to function.", - "oplogNss"_attr = oplogNss); - fassertFailedNoTrace(40115); + LOGV2_FATAL_NOTRACE(40115, + "The oplog collection {oplogNss} is not capped; a capped oplog is a " + "requirement for replication to function.", + "oplogNss"_attr = oplogNss); } } @@ -531,13 +530,12 @@ bool repairDatabasesAndCheckVersion(OperationContext* opCtx) { // current version of mongod with --repair and then proceed with normal startup. status = {ErrorCodes::MustUpgrade, status.reason()}; } - LOGV2_FATAL(21023, - "Unable to start mongod due to an incompatibility with the data files and" - " this version of mongod: {status}", - "status"_attr = redact(status)); - LOGV2_FATAL(21024, - "Please consult our documentation when trying to downgrade to a previous" - " major release"); + LOGV2_FATAL_CONTINUE( + 21023, + "Unable to start mongod due to an incompatibility with the data files and" + " this version of mongod: {status}. Please consult our documentation when trying " + "to downgrade to a previous major release", + "status"_attr = redact(status)); quickExit(EXIT_NEED_UPGRADE); MONGO_UNREACHABLE; } @@ -642,11 +640,9 @@ bool repairDatabasesAndCheckVersion(OperationContext* opCtx) { // Fail to start up if there is no featureCompatibilityVersion document and there are non-local // databases present and we do not need to start up via initial sync. if (!fcvDocumentExists && nonLocalDatabases && !needInitialSync) { - LOGV2_FATAL( - 21025, - "Unable to start up mongod due to missing featureCompatibilityVersion document."); - LOGV2_FATAL(21026, "Please run with --repair to restore the document."); - fassertFailedNoTrace(40652); + LOGV2_FATAL_NOTRACE(40652, + "Unable to start up mongod due to missing featureCompatibilityVersion " + "document. Please run with --repair to restore the document."); } LOGV2_DEBUG(21017, 1, "done repairDatabases"); diff --git a/src/mongo/db/repl/bgsync.cpp b/src/mongo/db/repl/bgsync.cpp index fe6e56e5fff..0d25c62ca9f 100644 --- a/src/mongo/db/repl/bgsync.cpp +++ b/src/mongo/db/repl/bgsync.cpp @@ -216,11 +216,10 @@ void BackgroundSync::_run() { sleepmillis(100); // sleep a bit to keep from hammering this thread with temp. errors. } catch (const std::exception& e2) { // redact(std::exception&) doesn't work - LOGV2_FATAL(21127, + LOGV2_FATAL(28546, "sync producer exception: {error}", "Sync producer error", "error"_attr = redact(e2.what())); - fassertFailed(28546); } } // No need to reset optimes here because we are shutting down. @@ -784,10 +783,10 @@ void BackgroundSync::_runRollbackViaRecoverToCheckpoint( if (status.isOK()) { LOGV2(21105, "Rollback successful"); } else if (status == ErrorCodes::UnrecoverableRollbackError) { - LOGV2_FATAL(21128, - "Rollback failed with unrecoverable error: {error}", - "Rollback failed with unrecoverable error", - "error"_attr = status); + LOGV2_FATAL_CONTINUE(21128, + "Rollback failed with unrecoverable error: {error}", + "Rollback failed with unrecoverable error", + "error"_attr = status); fassertFailedWithStatusNoTrace(50666, status); } else { LOGV2_WARNING(21124, @@ -907,12 +906,11 @@ OpTime BackgroundSync::_readLastAppliedOpTime(OperationContext* opCtx) { } catch (const ExceptionForCat<ErrorCategory::ShutdownError>&) { throw; } catch (const DBException& ex) { - LOGV2_FATAL(21129, + LOGV2_FATAL(18904, "Problem reading {namespace}: {error}", "Problem reading from namespace", "namespace"_attr = NamespaceString::kRsOplogNamespace.ns(), "error"_attr = redact(ex)); - fassertFailed(18904); } OplogEntry parsedEntry(oplogEntry); diff --git a/src/mongo/db/repl/drop_pending_collection_reaper.cpp b/src/mongo/db/repl/drop_pending_collection_reaper.cpp index 53feefcee92..5b610eca52c 100644 --- a/src/mongo/db/repl/drop_pending_collection_reaper.cpp +++ b/src/mongo/db/repl/drop_pending_collection_reaper.cpp @@ -89,13 +89,13 @@ void DropPendingCollectionReaper::addDropPendingNamespace( }; if (std::find_if(lowerBound, upperBound, matcher) != upperBound) { - LOGV2_FATAL(21156, - "Failed to add drop-pending collection {dropPendingNamespace} with drop optime " - "{dropOpTime}: duplicate optime and namespace pair.", - "Failed to add drop-pending collection: duplicate optime and namespace pair", - "dropPendingNamespace"_attr = dropPendingNamespace, - "dropOpTime"_attr = dropOpTime); - fassertFailedNoTrace(40448); + LOGV2_FATAL_NOTRACE( + 40448, + "Failed to add drop-pending collection {dropPendingNamespace} with drop optime " + "{dropOpTime}: duplicate optime and namespace pair.", + "Failed to add drop-pending collection: duplicate optime and namespace pair", + "dropPendingNamespace"_attr = dropPendingNamespace, + "dropOpTime"_attr = dropOpTime); } _dropPendingNamespaces.insert(std::make_pair(dropOpTime, dropPendingNamespace)); diff --git a/src/mongo/db/repl/initial_syncer.cpp b/src/mongo/db/repl/initial_syncer.cpp index c48e3108a44..f45de57cc67 100644 --- a/src/mongo/db/repl/initial_syncer.cpp +++ b/src/mongo/db/repl/initial_syncer.cpp @@ -1702,7 +1702,8 @@ void InitialSyncer::_finishInitialSyncAttempt(const StatusWith<OpTimeAndWallTime // Check if need to do more retries. if (_stats.failedInitialSyncAttempts >= _stats.maxFailedInitialSyncAttempts) { - LOGV2_FATAL(21202, "The maximum number of retries have been exhausted for initial sync"); + LOGV2_FATAL_CONTINUE(21202, + "The maximum number of retries have been exhausted for initial sync"); initialSyncFailures.increment(); diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp index 1def8eb05e6..eb9b8953415 100644 --- a/src/mongo/db/repl/oplog.cpp +++ b/src/mongo/db/repl/oplog.cpp @@ -272,11 +272,10 @@ void _logOpsInner(OperationContext* opCtx, Status result = oplogCollection->insertDocumentsForOplog(opCtx, records, timestamps); if (!result.isOK()) { - LOGV2_FATAL(21263, + LOGV2_FATAL(17322, "write to oplog failed: {error}", "Write to oplog failed", "error"_attr = result.toString()); - fassertFailed(17322); } // Set replCoord last optime only after we're sure the WUOW didn't abort and roll back. diff --git a/src/mongo/db/repl/oplog_applier.cpp b/src/mongo/db/repl/oplog_applier.cpp index b24f3fd0636..23322f12665 100644 --- a/src/mongo/db/repl/oplog_applier.cpp +++ b/src/mongo/db/repl/oplog_applier.cpp @@ -75,8 +75,7 @@ Future<void> OplogApplier::startup() { void OplogApplier::shutdown() { // Shutdown will hang if this failpoint is enabled. if (globalFailPointRegistry().find("rsSyncApplyStop")->shouldFail()) { - LOGV2_FATAL(21227, "Turn off rsSyncApplyStop before attempting clean shutdown"); - fassertFailedNoTrace(40304); + LOGV2_FATAL_NOTRACE(40304, "Turn off rsSyncApplyStop before attempting clean shutdown"); } stdx::lock_guard<Latch> lock(_mutex); diff --git a/src/mongo/db/repl/oplog_applier_impl.cpp b/src/mongo/db/repl/oplog_applier_impl.cpp index a7fe3e6d7ed..71c74ba725c 100644 --- a/src/mongo/db/repl/oplog_applier_impl.cpp +++ b/src/mongo/db/repl/oplog_applier_impl.cpp @@ -618,7 +618,7 @@ StatusWith<OpTime> OplogApplierImpl::_applyOplogBatch(OperationContext* opCtx, invariant(_replCoord); if (_replCoord->getApplierState() == ReplicationCoordinator::ApplierState::Stopped) { - LOGV2_FATAL(21234, "Attempting to replicate ops while primary"); + LOGV2_FATAL_CONTINUE(21234, "Attempting to replicate ops while primary"); return {ErrorCodes::CannotApplyOplogWhilePrimary, "attempting to replicate ops while primary"}; } @@ -708,19 +708,19 @@ StatusWith<OpTime> OplogApplierImpl::_applyOplogBatch(OperationContext* opCtx, for (auto it = statusVector.cbegin(); it != statusVector.cend(); ++it) { const auto& status = *it; if (!status.isOK()) { - LOGV2_FATAL(21235, - "Failed to apply batch of operations. Number of operations in " - "batch: {numOperationsInBatch}. First operation: {firstOperation}. " - "Last operation: " - "{lastOperation}. Oplog application failed in writer thread " - "{failedWriterThread}: {error}", - "Failed to apply batch of operations", - "numOperationsInBatch"_attr = ops.size(), - "firstOperation"_attr = redact(ops.front().toBSON()), - "lastOperation"_attr = redact(ops.back().toBSON()), - "failedWriterThread"_attr = - std::distance(statusVector.cbegin(), it), - "error"_attr = redact(status)); + LOGV2_FATAL_CONTINUE( + 21235, + "Failed to apply batch of operations. Number of operations in " + "batch: {numOperationsInBatch}. First operation: {firstOperation}. " + "Last operation: " + "{lastOperation}. Oplog application failed in writer thread " + "{failedWriterThread}: {error}", + "Failed to apply batch of operations", + "numOperationsInBatch"_attr = ops.size(), + "firstOperation"_attr = redact(ops.front().toBSON()), + "lastOperation"_attr = redact(ops.back().toBSON()), + "failedWriterThread"_attr = std::distance(statusVector.cbegin(), it), + "error"_attr = redact(status)); return status; } } @@ -741,10 +741,10 @@ StatusWith<OpTime> OplogApplierImpl::_applyOplogBatch(OperationContext* opCtx, "point is disabled"); while (MONGO_unlikely(pauseBatchApplicationBeforeCompletion.shouldFail())) { if (inShutdown()) { - LOGV2_FATAL(21236, - "Turn off pauseBatchApplicationBeforeCompletion before attempting " - "clean shutdown"); - fassertFailedNoTrace(50798); + LOGV2_FATAL_NOTRACE( + 50798, + "Turn off pauseBatchApplicationBeforeCompletion before attempting " + "clean shutdown"); } sleepmillis(100); } @@ -1067,11 +1067,11 @@ Status OplogApplierImpl::applyOplogBatchPerWorker(OperationContext* opCtx, continue; } - LOGV2_FATAL(21237, - "Error applying operation ({oplogEntry}): {error}", - "Error applying operation", - "oplogEntry"_attr = redact(entry.toBSON()), - "error"_attr = causedBy(redact(status))); + LOGV2_FATAL_CONTINUE(21237, + "Error applying operation ({oplogEntry}): {error}", + "Error applying operation", + "oplogEntry"_attr = redact(entry.toBSON()), + "error"_attr = causedBy(redact(status))); return status; } } catch (const DBException& e) { @@ -1082,11 +1082,11 @@ Status OplogApplierImpl::applyOplogBatchPerWorker(OperationContext* opCtx, continue; } - LOGV2_FATAL(21238, - "writer worker caught exception: {error} on: {oplogEntry}", - "Writer worker caught exception", - "error"_attr = redact(e), - "oplogEntry"_attr = redact(entry.toBSON())); + LOGV2_FATAL_CONTINUE(21238, + "writer worker caught exception: {error} on: {oplogEntry}", + "Writer worker caught exception", + "error"_attr = redact(e), + "oplogEntry"_attr = redact(entry.toBSON())); return e.toStatus(); } } diff --git a/src/mongo/db/repl/oplog_batcher.cpp b/src/mongo/db/repl/oplog_batcher.cpp index 7e393915d58..cfb3d58c863 100644 --- a/src/mongo/db/repl/oplog_batcher.cpp +++ b/src/mongo/db/repl/oplog_batcher.cpp @@ -180,11 +180,11 @@ StatusWith<std::vector<OplogEntry>> OplogBatcher::getNextApplierBatch( // Check for oplog version change. if (entry.getVersion() != OplogEntry::kOplogVersion) { static constexpr char message[] = "Unexpected oplog version"; - LOGV2_FATAL(21240, - message, - "expectedVersion"_attr = OplogEntry::kOplogVersion, - "foundVersion"_attr = entry.getVersion(), - "oplogEntry"_attr = redact(entry.toBSON())); + LOGV2_FATAL_CONTINUE(21240, + message, + "expectedVersion"_attr = OplogEntry::kOplogVersion, + "foundVersion"_attr = entry.getVersion(), + "oplogEntry"_attr = redact(entry.toBSON())); return {ErrorCodes::BadValue, str::stream() << message << ", expected oplog version " << OplogEntry::kOplogVersion << ", found version " diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp index f0eb982f285..f14a2e8301d 100644 --- a/src/mongo/db/repl/replication_coordinator_impl.cpp +++ b/src/mongo/db/repl/replication_coordinator_impl.cpp @@ -445,11 +445,10 @@ bool ReplicationCoordinatorImpl::_startLoadLocalConfig(OperationContext* opCtx) StatusWith<LastVote> lastVote = _externalState->loadLocalLastVoteDocument(opCtx); if (!lastVote.isOK()) { - LOGV2_FATAL(21429, - "Error loading local voted for document at startup; {error}", - "Error loading local voted for document at startup", - "error"_attr = lastVote.getStatus()); - fassertFailedNoTrace(40367); + LOGV2_FATAL_NOTRACE(40367, + "Error loading local voted for document at startup; {error}", + "Error loading local voted for document at startup", + "error"_attr = lastVote.getStatus()); } if (lastVote.getValue().getTerm() == OpTime::kInitialTerm) { // This log line is checked in unit tests. @@ -468,11 +467,10 @@ bool ReplicationCoordinatorImpl::_startLoadLocalConfig(OperationContext* opCtx) auto initializingStatus = _replicationProcess->initializeRollbackID(opCtx); fassert(40424, initializingStatus); } else { - LOGV2_FATAL(21430, - "Error loading local Rollback ID document at startup; {error}", - "Error loading local Rollback ID document at startup", - "error"_attr = status); - fassertFailedNoTrace(40428); + LOGV2_FATAL_NOTRACE(40428, + "Error loading local Rollback ID document at startup; {error}", + "Error loading local Rollback ID document at startup", + "error"_attr = status); } } @@ -488,27 +486,26 @@ bool ReplicationCoordinatorImpl::_startLoadLocalConfig(OperationContext* opCtx) status = localConfig.initialize(cfg.getValue()); if (!status.isOK()) { if (status.code() == ErrorCodes::RepairedReplicaSetNode) { - LOGV2_FATAL( - 21431, + LOGV2_FATAL_NOTRACE( + 50923, "This instance has been repaired and may contain modified replicated data that " "would not match other replica set members. To see your repaired data, start " "mongod without the --replSet option. When you are finished recovering your " "data and would like to perform a complete re-sync, please refer to the " "documentation here: " "https://docs.mongodb.com/manual/tutorial/resync-replica-set-member/"); - fassertFailedNoTrace(50923); } - LOGV2_ERROR(21414, - "Locally stored replica set configuration does not parse; See " - "http://www.mongodb.org/dochub/core/recover-replica-set-from-invalid-config " - "for information on how to recover from this. Got \"{error}\" while parsing " - "{config}", - "Locally stored replica set configuration does not parse; See " - "hhttp://www.mongodb.org/dochub/core/recover-replica-set-from-invalid-config " - "for information on how to recover from this", - "error"_attr = status, - "config"_attr = cfg.getValue()); - fassertFailedNoTrace(28545); + LOGV2_FATAL_NOTRACE( + 28545, + "Locally stored replica set configuration does not parse; See " + "http://www.mongodb.org/dochub/core/recover-replica-set-from-invalid-config " + "for information on how to recover from this. Got \"{error}\" while parsing " + "{config}", + "Locally stored replica set configuration does not parse; See " + "hhttp://www.mongodb.org/dochub/core/recover-replica-set-from-invalid-config " + "for information on how to recover from this", + "error"_attr = status, + "config"_attr = cfg.getValue()); } // Read the last op from the oplog after cleaning up any partially applied batches. @@ -1162,9 +1159,9 @@ void ReplicationCoordinatorImpl::signalDrainComplete(OperationContext* opCtx, // occurred after the node became primary and so the concurrent reconfig has updated // the term appropriately. if (reconfigStatus != ErrorCodes::ConfigurationInProgress) { - LOGV2_FATAL(4508101, - "Reconfig on stepup failed for unknown reasons", - "error"_attr = reconfigStatus); + LOGV2_FATAL_CONTINUE(4508101, + "Reconfig on stepup failed for unknown reasons", + "error"_attr = reconfigStatus); fassertFailedWithStatus(31477, reconfigStatus); } } @@ -3245,11 +3242,10 @@ Status ReplicationCoordinatorImpl::doReplSetReconfig(OperationContext* opCtx, "Cannot run replSetReconfig because the node is currently updating " "its configuration"); default: - LOGV2_FATAL(21432, + LOGV2_FATAL(18914, "Unexpected _rsConfigState {_rsConfigState}", "Unexpected _rsConfigState", "_rsConfigState"_attr = int(_rsConfigState)); - fassertFailed(18914); } invariant(_rsConfig.isInitialized()); @@ -3854,11 +3850,10 @@ void ReplicationCoordinatorImpl::_performPostMemberStateUpdateAction( _startElectSelfV1(StartElectionReasonEnum::kElectionTimeout); break; default: - LOGV2_FATAL(21433, + LOGV2_FATAL(26010, "Unknown post member state update action {action}", "Unknown post member state update action", "action"_attr = static_cast<int>(action)); - fassertFailed(26010); } } diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp index a598e513995..847879422f9 100644 --- a/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp +++ b/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp @@ -110,12 +110,11 @@ void ReplicationCoordinatorImpl::_startElectSelfV1_inlock(StartElectionReasonEnu _topCoord->processLoseElection(); return; default: - LOGV2_FATAL(21452, + LOGV2_FATAL(28641, "Entered replica set election code while in illegal config state " "{rsConfigState}", "Entered replica set election code while in illegal config state", "rsConfigState"_attr = int(_rsConfigState)); - fassertFailed(28641); } auto finishedEvent = _makeEvent(); diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp index a3312ce0731..1b056553be9 100644 --- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp +++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp @@ -520,12 +520,11 @@ void ReplicationCoordinatorImpl::_scheduleHeartbeatReconfig_inlock(const ReplSet case kConfigPreStart: case kConfigStartingUp: case kConfigReplicationDisabled: - LOGV2_FATAL(21491, + LOGV2_FATAL(18807, "Reconfiguration request occurred while _rsConfigState == " "{_rsConfigState}; aborting.", "Aborting reconfiguration request", "_rsConfigState"_attr = int(_rsConfigState)); - fassertFailed(18807); } _setConfigState_inlock(kConfigHBReconfiguring); invariant(!_rsConfig.isInitialized() || diff --git a/src/mongo/db/repl/replication_recovery.cpp b/src/mongo/db/repl/replication_recovery.cpp index 968ec117a0f..2275e0b5d76 100644 --- a/src/mongo/db/repl/replication_recovery.cpp +++ b/src/mongo/db/repl/replication_recovery.cpp @@ -157,21 +157,20 @@ public: attrs.add("oplogApplicationEndPoint", _oplogApplicationEndPoint->toBSON()); } - LOGV2_FATAL( - 21559, "Couldn't find any entries in the oplog, which should be impossible", attrs); - fassertFailedNoTrace(40293); + LOGV2_FATAL_NOTRACE( + 40293, "Couldn't find any entries in the oplog, which should be impossible", attrs); } auto firstTimestampFound = fassert(40291, OpTime::parseFromOplogEntry(_cursor->nextSafe())).getTimestamp(); if (firstTimestampFound != _oplogApplicationStartPoint) { - LOGV2_FATAL(21560, - "Oplog entry at {oplogApplicationStartPoint} is missing; actual entry " - "found is {firstTimestampFound}", - "Oplog entry at oplogApplicationStartPoint is missing", - "oplogApplicationStartPoint"_attr = _oplogApplicationStartPoint.toBSON(), - "firstTimestampFound"_attr = firstTimestampFound.toBSON()); - fassertFailedNoTrace(40292); + LOGV2_FATAL_NOTRACE( + 40292, + "Oplog entry at {oplogApplicationStartPoint} is missing; actual entry " + "found is {firstTimestampFound}", + "Oplog entry at oplogApplicationStartPoint is missing", + "oplogApplicationStartPoint"_attr = _oplogApplicationStartPoint.toBSON(), + "firstTimestampFound"_attr = firstTimestampFound.toBSON()); } } @@ -237,10 +236,10 @@ private: boost::optional<Timestamp> recoverFromOplogPrecursor(OperationContext* opCtx, StorageInterface* storageInterface) { if (!storageInterface->supportsRecoveryTimestamp(opCtx->getServiceContext())) { - LOGV2_FATAL(21561, - "Cannot recover from the oplog with a storage engine that does not support " - "recover to stable timestamp"); - fassertFailedNoTrace(50805); + LOGV2_FATAL_NOTRACE( + 50805, + "Cannot recover from the oplog with a storage engine that does not support " + "recover to stable timestamp"); } // A non-existent recoveryTS means the checkpoint is unstable. If the recoveryTS exists but @@ -248,9 +247,8 @@ boost::optional<Timestamp> recoverFromOplogPrecursor(OperationContext* opCtx, // happen. auto recoveryTS = storageInterface->getRecoveryTimestamp(opCtx->getServiceContext()); if (recoveryTS && recoveryTS->isNull()) { - LOGV2_FATAL(21562, - "Cannot recover from the oplog with stable checkpoint at null timestamp"); - fassertFailedNoTrace(50806); + LOGV2_FATAL_NOTRACE( + 50806, "Cannot recover from the oplog with stable checkpoint at null timestamp"); } return recoveryTS; @@ -267,50 +265,48 @@ void ReplicationRecoveryImpl::_assertNoRecoveryNeededOnUnstableCheckpoint(Operat invariant(!_storageInterface->getRecoveryTimestamp(opCtx->getServiceContext())); if (_consistencyMarkers->getInitialSyncFlag(opCtx)) { - LOGV2_FATAL(21563, "Unexpected recovery needed, initial sync flag set"); - fassertFailedNoTrace(31362); + LOGV2_FATAL_NOTRACE(31362, "Unexpected recovery needed, initial sync flag set"); } const auto truncateAfterPoint = _consistencyMarkers->getOplogTruncateAfterPoint(opCtx); if (!truncateAfterPoint.isNull()) { - LOGV2_FATAL(21564, - "Unexpected recovery needed, oplog requires truncation. Truncate after point: " - "{oplogTruncateAfterPoint}", - "Unexpected recovery needed, oplog requires truncation", - "oplogTruncateAfterPoint"_attr = truncateAfterPoint.toString()); - fassertFailedNoTrace(31363); + LOGV2_FATAL_NOTRACE( + 31363, + "Unexpected recovery needed, oplog requires truncation. Truncate after point: " + "{oplogTruncateAfterPoint}", + "Unexpected recovery needed, oplog requires truncation", + "oplogTruncateAfterPoint"_attr = truncateAfterPoint.toString()); } auto topOfOplogSW = _getTopOfOplog(opCtx); if (!topOfOplogSW.isOK()) { - LOGV2_FATAL(21565, - "Recovery not possible, no oplog found: {error}", - "Recovery not possible, no oplog found", - "error"_attr = topOfOplogSW.getStatus()); - fassertFailedNoTrace(31364); + LOGV2_FATAL_NOTRACE(31364, + "Recovery not possible, no oplog found: {error}", + "Recovery not possible, no oplog found", + "error"_attr = topOfOplogSW.getStatus()); } const auto topOfOplog = topOfOplogSW.getValue(); const auto appliedThrough = _consistencyMarkers->getAppliedThrough(opCtx); if (!appliedThrough.isNull() && appliedThrough != topOfOplog) { - LOGV2_FATAL(21566, - "Unexpected recovery needed, appliedThrough is not at top of oplog, indicating " - "oplog has not been fully applied. appliedThrough: {appliedThrough}", - "Unexpected recovery needed, appliedThrough is not at top of oplog, indicating " - "oplog has not been fully applied", - "appliedThrough"_attr = appliedThrough.toString()); - fassertFailedNoTrace(31365); + LOGV2_FATAL_NOTRACE( + 31365, + "Unexpected recovery needed, appliedThrough is not at top of oplog, indicating " + "oplog has not been fully applied. appliedThrough: {appliedThrough}", + "Unexpected recovery needed, appliedThrough is not at top of oplog, indicating " + "oplog has not been fully applied", + "appliedThrough"_attr = appliedThrough.toString()); } const auto minValid = _consistencyMarkers->getMinValid(opCtx); if (minValid > topOfOplog) { - LOGV2_FATAL(21567, - "Unexpected recovery needed, top of oplog is not consistent. topOfOplog: " - "{topOfOplog}, minValid: {minValid}", - "Unexpected recovery needed, top of oplog is not consistent", - "topOfOplog"_attr = topOfOplog, - "minValid"_attr = minValid); - fassertFailedNoTrace(31366); + LOGV2_FATAL_NOTRACE( + 31366, + "Unexpected recovery needed, top of oplog is not consistent. topOfOplog: " + "{topOfOplog}, minValid: {minValid}", + "Unexpected recovery needed, top of oplog is not consistent", + "topOfOplog"_attr = topOfOplog, + "minValid"_attr = minValid); } } @@ -337,9 +333,8 @@ void ReplicationRecoveryImpl::recoverFromOplogAsStandalone(OperationContext* opC "Not doing any oplog recovery since there is an unstable checkpoint that is up " "to date"); } else { - LOGV2_FATAL(21568, - "Cannot use 'recoverFromOplogAsStandalone' without a stable checkpoint"); - fassertFailedNoTrace(31229); + LOGV2_FATAL_NOTRACE( + 31229, "Cannot use 'recoverFromOplogAsStandalone' without a stable checkpoint"); } } @@ -359,8 +354,8 @@ void ReplicationRecoveryImpl::recoverFromOplogUpTo(OperationContext* opCtx, Time auto recoveryTS = recoverFromOplogPrecursor(opCtx, _storageInterface); if (!recoveryTS) { - LOGV2_FATAL(21569, "Cannot use 'recoverToOplogTimestamp' without a stable checkpoint"); - fassertFailedNoTrace(31399); + LOGV2_FATAL_NOTRACE(31399, + "Cannot use 'recoverToOplogTimestamp' without a stable checkpoint"); } // This may take an IS lock on the oplog collection. @@ -462,10 +457,10 @@ void ReplicationRecoveryImpl::recoverFromOplog(OperationContext* opCtx, _recoverFromUnstableCheckpoint(opCtx, appliedThrough, topOfOplog); } } catch (...) { - LOGV2_FATAL(21570, - "Caught exception during replication recovery: {error}", - "Caught exception during replication recovery", - "error"_attr = exceptionToStatus()); + LOGV2_FATAL_CONTINUE(21570, + "Caught exception during replication recovery: {error}", + "Caught exception during replication recovery", + "error"_attr = exceptionToStatus()); std::terminate(); } @@ -572,13 +567,12 @@ void ReplicationRecoveryImpl::_applyToEndOfOplog(OperationContext* opCtx, "No oplog entries to apply for recovery. Start point is at the top of the oplog"); return; // We've applied all the valid oplog we have. } else if (oplogApplicationStartPoint > topOfOplog) { - LOGV2_FATAL( - 21571, + LOGV2_FATAL_NOTRACE( + 40313, "Applied op {oplogApplicationStartPoint} not found. Top of oplog is {topOfOplog}.", "Applied op oplogApplicationStartPoint not found", "oplogApplicationStartPoint"_attr = oplogApplicationStartPoint.toBSON(), "topOfOplog"_attr = topOfOplog.toBSON()); - fassertFailedNoTrace(40313); } Timestamp appliedUpTo = _applyOplogOperations(opCtx, oplogApplicationStartPoint, topOfOplog); @@ -679,13 +673,12 @@ void ReplicationRecoveryImpl::_truncateOplogTo(OperationContext* opCtx, _storageInterface->findOplogEntryLessThanOrEqualToTimestamp( opCtx, oplogCollection, truncateAfterTimestamp); if (!truncateAfterOplogEntryBSON) { - LOGV2_FATAL(21572, - "Reached end of oplog looking for an oplog entry lte to " - "{oplogTruncateAfterPoint} but did not find one", - "Reached end of oplog looking for an oplog entry lte to " - "oplogTruncateAfterPoint but did not find one", - "oplogTruncateAfterPoint"_attr = truncateAfterTimestamp.toBSON()); - fassertFailedNoTrace(40296); + LOGV2_FATAL_NOTRACE(40296, + "Reached end of oplog looking for an oplog entry lte to " + "{oplogTruncateAfterPoint} but did not find one", + "Reached end of oplog looking for an oplog entry lte to " + "oplogTruncateAfterPoint but did not find one", + "oplogTruncateAfterPoint"_attr = truncateAfterTimestamp.toBSON()); } // Parse the response. diff --git a/src/mongo/db/repl/rollback_impl.cpp b/src/mongo/db/repl/rollback_impl.cpp index 88ecd1e849a..53c91047487 100644 --- a/src/mongo/db/repl/rollback_impl.cpp +++ b/src/mongo/db/repl/rollback_impl.cpp @@ -1025,13 +1025,12 @@ StatusWith<RollBackLocalOperations::RollbackCommonPoint> RollbackImpl::_findComm if (commonPointOpTime.getTimestamp() < *stableTimestamp) { // This is an fassert rather than an invariant, since it can happen if the server was // recently upgraded to enableMajorityReadConcern=true. - LOGV2_FATAL(21644, - "Common point must be at least stable timestamp, common point: " - "{commonPoint}, stable timestamp: {stableTimestamp}", - "Common point must be at least stable timestamp", - "commonPoint"_attr = commonPointOpTime.getTimestamp(), - "stableTimestamp"_attr = *stableTimestamp); - fassertFailedNoTrace(51121); + LOGV2_FATAL_NOTRACE(51121, + "Common point must be at least stable timestamp, common point: " + "{commonPoint}, stable timestamp: {stableTimestamp}", + "Common point must be at least stable timestamp", + "commonPoint"_attr = commonPointOpTime.getTimestamp(), + "stableTimestamp"_attr = *stableTimestamp); } return commonPointSW.getValue(); @@ -1100,14 +1099,15 @@ boost::optional<BSONObj> RollbackImpl::_findDocumentById(OperationContext* opCtx } else if (document.getStatus().code() == ErrorCodes::NoSuchKey) { return boost::none; } else { - LOGV2_FATAL(21645, - "Rollback failed to read document with {id} in namespace {namespace} with uuid " - "{uuid}{error}", - "Rollback failed to read document", - "id"_attr = redact(id), - "namespace"_attr = nss.ns(), - "uuid"_attr = uuid.toString(), - "error"_attr = causedBy(document.getStatus())); + LOGV2_FATAL_CONTINUE( + 21645, + "Rollback failed to read document with {id} in namespace {namespace} with uuid " + "{uuid}{error}", + "Rollback failed to read document", + "id"_attr = redact(id), + "namespace"_attr = nss.ns(), + "uuid"_attr = uuid.toString(), + "error"_attr = causedBy(document.getStatus())); fassert(50751, document.getStatus()); } @@ -1219,16 +1219,15 @@ void RollbackImpl::_transitionFromRollbackToSecondary(OperationContext* opCtx) { auto status = _replicationCoordinator->setFollowerMode(MemberState::RS_SECONDARY); if (!status.isOK()) { - LOGV2_FATAL(21646, - "Failed to transition into {targetState}; expected to be in " - "state {expectedState}; found self in " - "{actualState} {error}", - "Failed to perform replica set state transition", - "targetState"_attr = MemberState(MemberState::RS_SECONDARY), - "expectedState"_attr = MemberState(MemberState::RS_ROLLBACK), - "actualState"_attr = _replicationCoordinator->getMemberState(), - "error"_attr = causedBy(status)); - fassertFailedNoTrace(40408); + LOGV2_FATAL_NOTRACE(40408, + "Failed to transition into {targetState}; expected to be in " + "state {expectedState}; found self in " + "{actualState} {error}", + "Failed to perform replica set state transition", + "targetState"_attr = MemberState(MemberState::RS_SECONDARY), + "expectedState"_attr = MemberState(MemberState::RS_ROLLBACK), + "actualState"_attr = _replicationCoordinator->getMemberState(), + "error"_attr = causedBy(status)); } } diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp index 323afa75b64..f62d8d39998 100644 --- a/src/mongo/db/repl/rs_rollback.cpp +++ b/src/mongo/db/repl/rs_rollback.cpp @@ -393,11 +393,11 @@ Status rollback_internal::updateFixUpInfoFromLocalOplogEntry(OperationContext* o string indexName; auto status = bsonExtractStringField(obj, "index", &indexName); if (!status.isOK()) { - LOGV2_FATAL(21731, - "Missing index name in dropIndexes operation on rollback, " - "document: {oplogEntry}", - "Missing index name in dropIndexes operation on rollback", - "oplogEntry"_attr = redact(oplogEntry.toBSON())); + LOGV2_FATAL_CONTINUE(21731, + "Missing index name in dropIndexes operation on rollback, " + "document: {oplogEntry}", + "Missing index name in dropIndexes operation on rollback", + "oplogEntry"_attr = redact(oplogEntry.toBSON())); throw RSFatalException( "Missing index name in dropIndexes operation on rollback."); } @@ -433,11 +433,12 @@ Status rollback_internal::updateFixUpInfoFromLocalOplogEntry(OperationContext* o string indexName; auto status = bsonExtractStringField(obj, "name", &indexName); if (!status.isOK()) { - LOGV2_FATAL(21732, - "Missing index name in createIndexes operation on rollback, " - "document: {oplogEntry}", - "Missing index name in createIndexes operation on rollback", - "oplogEntry"_attr = redact(oplogEntry.toBSON())); + LOGV2_FATAL_CONTINUE( + 21732, + "Missing index name in createIndexes operation on rollback, " + "document: {oplogEntry}", + "Missing index name in createIndexes operation on rollback", + "oplogEntry"_attr = redact(oplogEntry.toBSON())); throw RSFatalException( "Missing index name in createIndexes operation on rollback."); } @@ -689,7 +690,7 @@ Status rollback_internal::updateFixUpInfoFromLocalOplogEntry(OperationContext* o } // Some collMod fields cannot be rolled back, such as the index field. static constexpr char message[] = "Cannot roll back a collMod command"; - LOGV2_FATAL(21733, message, "oplogEntry"_attr = redact(obj)); + LOGV2_FATAL_CONTINUE(21733, message, "oplogEntry"_attr = redact(obj)); throw RSFatalException(message); } return Status::OK(); @@ -725,7 +726,8 @@ Status rollback_internal::updateFixUpInfoFromLocalOplogEntry(OperationContext* o if (operations.type() != Array) { static constexpr char message[] = "Expected applyOps argument to be an array"; - LOGV2_FATAL(21734, message, "operations"_attr = redact(operations)); + LOGV2_FATAL_CONTINUE( + 21734, message, "operations"_attr = redact(operations)); return Status(ErrorCodes::UnrecoverableRollbackError, str::stream() << message << "; found " << redact(operations)); } @@ -733,7 +735,8 @@ Status rollback_internal::updateFixUpInfoFromLocalOplogEntry(OperationContext* o if (subopElement.type() != Object) { static constexpr char message[] = "Expected applyOps operations to be of Object type"; - LOGV2_FATAL(21735, message, "operation"_attr = redact(subopElement)); + LOGV2_FATAL_CONTINUE( + 21735, message, "operation"_attr = redact(subopElement)); return Status(ErrorCodes::UnrecoverableRollbackError, str::stream() << message << ", but found " << redact(subopElement)); @@ -768,10 +771,10 @@ Status rollback_internal::updateFixUpInfoFromLocalOplogEntry(OperationContext* o } default: { static constexpr char message[] = "Can't roll back this command yet"; - LOGV2_FATAL(21736, - message, - "commandName"_attr = first.fieldName(), - "command"_attr = redact(obj)); + LOGV2_FATAL_CONTINUE(21736, + message, + "commandName"_attr = first.fieldName(), + "command"_attr = redact(obj)); throw RSFatalException(str::stream() << message << ": cmdname = " << first.fieldName()); } @@ -785,10 +788,10 @@ Status rollback_internal::updateFixUpInfoFromLocalOplogEntry(OperationContext* o doc._id = oplogEntry.getIdElement(); if (doc._id.eoo()) { static constexpr char message[] = "Cannot roll back op with no _id"; - LOGV2_FATAL(21737, - message, - "namespace"_attr = nss.ns(), - "oplogEntry"_attr = redact(oplogEntry.toBSON())); + LOGV2_FATAL_CONTINUE(21737, + message, + "namespace"_attr = nss.ns(), + "oplogEntry"_attr = redact(oplogEntry.toBSON())); throw RSFatalException(str::stream() << message << ". ns: " << nss.ns()); } fixUpInfo.docsToRefetch.insert(doc); @@ -872,7 +875,7 @@ void dropIndex(OperationContext* opCtx, if (entry->isReady(opCtx)) { auto status = indexCatalog->dropIndex(opCtx, indexDescriptor); if (!status.isOK()) { - LOGV2_FATAL(21738, + LOGV2_ERROR(21738, "Rollback failed to drop index {indexName} in {namespace}: {error}", "Rollback failed to drop index", "indexName"_attr = indexName, @@ -882,7 +885,7 @@ void dropIndex(OperationContext* opCtx, } else { auto status = indexCatalog->dropUnfinishedIndex(opCtx, indexDescriptor); if (!status.isOK()) { - LOGV2_FATAL( + LOGV2_ERROR( 21739, "Rollback failed to drop unfinished index {indexName} in {namespace}: {error}", "Rollback failed to drop unfinished index", @@ -1034,7 +1037,7 @@ void dropCollection(OperationContext* opCtx, while (PlanExecutor::ADVANCED == (execState = exec->getNext(&curObj, nullptr))) { auto status = removeSaver.goingToDelete(curObj); if (!status.isOK()) { - LOGV2_FATAL( + LOGV2_FATAL_CONTINUE( 21740, "Rolling back createCollection on {namespace} failed to write document to " "remove saver file: {error}", @@ -1057,20 +1060,22 @@ void dropCollection(OperationContext* opCtx, if (execState == PlanExecutor::FAILURE && WorkingSetCommon::isValidStatusMemberObject(curObj)) { Status errorStatus = WorkingSetCommon::getMemberObjectStatus(curObj); - LOGV2_FATAL(21741, - "Rolling back createCollection on {namespace} failed with {error}. A " - "full resync is necessary.", - "Rolling back createCollection failed. A full resync is necessary", - "namespace"_attr = nss, - "error"_attr = redact(errorStatus)); + LOGV2_FATAL_CONTINUE( + 21741, + "Rolling back createCollection on {namespace} failed with {error}. A " + "full resync is necessary.", + "Rolling back createCollection failed. A full resync is necessary", + "namespace"_attr = nss, + "error"_attr = redact(errorStatus)); throw RSFatalException( "Rolling back createCollection failed. A full resync is necessary."); } else { - LOGV2_FATAL(21742, - "Rolling back createCollection on {namespace} failed. A full resync is " - "necessary.", - "Rolling back createCollection failed. A full resync is necessary", - "namespace"_attr = nss); + LOGV2_FATAL_CONTINUE( + 21742, + "Rolling back createCollection on {namespace} failed. A full resync is " + "necessary.", + "Rolling back createCollection failed. A full resync is necessary", + "namespace"_attr = nss); throw RSFatalException( "Rolling back createCollection failed. A full resync is necessary."); } @@ -1105,7 +1110,7 @@ void renameOutOfTheWay(OperationContext* opCtx, RenameCollectionInfo info, Datab // namespace. auto tmpNameResult = db->makeUniqueCollectionNamespace(opCtx, "rollback.tmp%%%%%"); if (!tmpNameResult.isOK()) { - LOGV2_FATAL( + LOGV2_FATAL_CONTINUE( 21743, "Unable to generate temporary namespace to rename collection {renameTo} " "out of the way. {error}", @@ -1135,11 +1140,12 @@ void renameOutOfTheWay(OperationContext* opCtx, RenameCollectionInfo info, Datab auto renameStatus = renameCollectionForRollback(opCtx, tempNss, uuid); if (!renameStatus.isOK()) { - LOGV2_FATAL(21744, - "Unable to rename collection {renameTo} out of the way to {tempNamespace}", - "Unable to rename renameTo collection out of the way to a temporary namespace", - "renameTo"_attr = info.renameTo, - "tempNamespace"_attr = tempNss); + LOGV2_FATAL_CONTINUE( + 21744, + "Unable to rename collection {renameTo} out of the way to {tempNamespace}", + "Unable to rename renameTo collection out of the way to a temporary namespace", + "renameTo"_attr = info.renameTo, + "tempNamespace"_attr = tempNss); throw RSFatalException("Unable to rename collection out of the way"); } } @@ -1177,22 +1183,23 @@ void rollbackRenameCollection(OperationContext* opCtx, UUID uuid, RenameCollecti status = renameCollectionForRollback(opCtx, info.renameTo, uuid); if (!status.isOK()) { - LOGV2_FATAL(21745, - "Rename collection failed to roll back twice. We were unable to rename " - "collection {renameFrom} to {renameTo}. {error}", - "Rename collection failed to roll back twice", - "renameFrom"_attr = info.renameFrom, - "renameTo"_attr = info.renameTo, - "error"_attr = status.toString()); + LOGV2_FATAL_CONTINUE( + 21745, + "Rename collection failed to roll back twice. We were unable to rename " + "collection {renameFrom} to {renameTo}. {error}", + "Rename collection failed to roll back twice", + "renameFrom"_attr = info.renameFrom, + "renameTo"_attr = info.renameTo, + "error"_attr = status.toString()); throw RSFatalException( "Rename collection failed to roll back twice. We were unable to rename " "the collection."); } } else if (!status.isOK()) { - LOGV2_FATAL(21746, - "Unable to roll back renameCollection command: {error}", - "Unable to roll back renameCollection command", - "error"_attr = status.toString()); + LOGV2_FATAL_CONTINUE(21746, + "Unable to roll back renameCollection command: {error}", + "Unable to roll back renameCollection command", + "error"_attr = status.toString()); throw RSFatalException("Unable to rollback renameCollection command"); } @@ -1754,7 +1761,7 @@ void rollback_internal::syncFixUp(OperationContext* opCtx, if (found) { auto status = removeSaver->goingToDelete(obj); if (!status.isOK()) { - LOGV2_FATAL( + LOGV2_FATAL_CONTINUE( 21747, "Rollback cannot write document in namespace {namespace} to " "archive file: {error}", @@ -1994,11 +2001,10 @@ void rollback_internal::syncFixUp(OperationContext* opCtx, Status status = AuthorizationManager::get(opCtx->getServiceContext())->initialize(opCtx); if (!status.isOK()) { - LOGV2_FATAL(21748, - "Failed to reinitialize auth data after rollback: {error}", - "Failed to reinitialize auth data after rollback", - "error"_attr = redact(status)); - fassertFailedNoTrace(40496); + LOGV2_FATAL_NOTRACE(40496, + "Failed to reinitialize auth data after rollback: {error}", + "Failed to reinitialize auth data after rollback", + "error"_attr = redact(status)); } // If necessary, clear the memory of existing sessions. @@ -2121,11 +2127,10 @@ void rollback(OperationContext* opCtx, // WARNING: these statuses sometimes have location codes which are lost with uassertStatusOK // so we need to check here first. if (ErrorCodes::UnrecoverableRollbackError == status.code()) { - LOGV2_FATAL(21749, - "Unable to complete rollback. A full resync may be needed: {error}", - "Unable to complete rollback. A full resync may be needed", - "error"_attr = redact(status)); - fassertFailedNoTrace(40507); + LOGV2_FATAL_NOTRACE(40507, + "Unable to complete rollback. A full resync may be needed: {error}", + "Unable to complete rollback. A full resync may be needed", + "error"_attr = redact(status)); } // In other cases, we log the message contained in the error status and retry later. @@ -2149,10 +2154,9 @@ void rollback(OperationContext* opCtx, // will be unable to successfully perform any more rollback attempts. The knowledge of these // stopped index builds gets lost after the first attempt. if (stoppedIndexBuilds.size()) { - LOGV2_FATAL(4655801, - "Index builds stopped prior to rollback cannot be restarted by " - "subsequent rollback attempts"); - fassertFailedNoTrace(4655800); + LOGV2_FATAL_NOTRACE(4655800, + "Index builds stopped prior to rollback cannot be restarted by " + "subsequent rollback attempts"); } // Sleep a bit to allow upstream node to coalesce, if that was the cause of the failure. If @@ -2175,22 +2179,21 @@ void rollback(OperationContext* opCtx, // then we must shut down to clear the in-memory ShardingState associated with the // shardIdentity document. if (ShardIdentityRollbackNotifier::get(opCtx)->didRollbackHappen()) { - LOGV2_FATAL(21750, - "shardIdentity document rollback detected. Shutting down to clear " - "in-memory sharding state. Restarting this process should safely return it " - "to a healthy state"); - fassertFailedNoTrace(40498); + LOGV2_FATAL_NOTRACE( + 40498, + "shardIdentity document rollback detected. Shutting down to clear " + "in-memory sharding state. Restarting this process should safely return it " + "to a healthy state"); } auto status = replCoord->setFollowerMode(MemberState::RS_RECOVERING); if (!status.isOK()) { - LOGV2_FATAL(21751, - "Failed to perform replica set state transition", - "targetState"_attr = MemberState(MemberState::RS_RECOVERING), - "expectedState"_attr = MemberState(MemberState::RS_ROLLBACK), - "actualState"_attr = replCoord->getMemberState(), - "error"_attr = status); - fassertFailedNoTrace(40499); + LOGV2_FATAL_NOTRACE(40499, + "Failed to perform replica set state transition", + "targetState"_attr = MemberState(MemberState::RS_RECOVERING), + "expectedState"_attr = MemberState(MemberState::RS_ROLLBACK), + "actualState"_attr = replCoord->getMemberState(), + "error"_attr = status); } } diff --git a/src/mongo/db/repl/session_update_tracker.cpp b/src/mongo/db/repl/session_update_tracker.cpp index edf8654a0af..77fc9d2197f 100644 --- a/src/mongo/db/repl/session_update_tracker.cpp +++ b/src/mongo/db/repl/session_update_tracker.cpp @@ -191,18 +191,16 @@ void SessionUpdateTracker::_updateSessionInfo(const OplogEntry& entry) { return; } - LOGV2_FATAL(23792, - "Entry for session {lsid} has txnNumber {sessionInfo_getTxnNumber} < " - "{existingSessionInfo_getTxnNumber}", - "lsid"_attr = lsid->toBSON(), - "sessionInfo_getTxnNumber"_attr = *sessionInfo.getTxnNumber(), - "existingSessionInfo_getTxnNumber"_attr = *existingSessionInfo.getTxnNumber()); - LOGV2_FATAL(23793, "New oplog entry: {entry}", "entry"_attr = redact(entry.toString())); - LOGV2_FATAL(23794, - "Existing oplog entry: {iter_second}", - "iter_second"_attr = redact(iter->second.toString())); - - fassertFailedNoTrace(50843); + LOGV2_FATAL_NOTRACE(50843, + "Entry for session {lsid} has txnNumber {sessionInfo_getTxnNumber} < " + "{existingSessionInfo_getTxnNumber}. New oplog entry: {newEntry}, Existing " + "oplog entry: {existingEntry}", + "lsid"_attr = lsid->toBSON(), + "sessionInfo_getTxnNumber"_attr = *sessionInfo.getTxnNumber(), + "existingSessionInfo_getTxnNumber"_attr = + *existingSessionInfo.getTxnNumber(), + "newEntry"_attr = redact(entry.toString()), + "existingEntry"_attr = redact(iter->second.toString())); } std::vector<OplogEntry> SessionUpdateTracker::_flush(const OplogEntry& entry) { diff --git a/src/mongo/db/repl/topology_coordinator.cpp b/src/mongo/db/repl/topology_coordinator.cpp index f801a283c9d..c901b7098f8 100644 --- a/src/mongo/db/repl/topology_coordinator.cpp +++ b/src/mongo/db/repl/topology_coordinator.cpp @@ -217,9 +217,8 @@ HostAndPort TopologyCoordinator::chooseNewSyncSource(Date_t now, !_rsConfig.isChainingAllowed())) { if (readPreference == ReadPreference::SecondaryOnly) { LOGV2_FATAL( - 3873102, + 3873103, "Sync source read preference 'secondaryOnly' with chaining disabled is not valid."); - fassertFailed(3873103); } _syncSource = _choosePrimaryAsSyncSource(now, lastOpTimeFetched); if (_syncSource.empty()) { @@ -444,12 +443,11 @@ boost::optional<HostAndPort> TopologyCoordinator::_chooseSyncSourceInitialStep(D const auto& data = sfp.getData(); const auto hostAndPortElem = data["hostAndPort"]; if (!hostAndPortElem) { - LOGV2_FATAL(21839, + LOGV2_FATAL(50835, "'forceSyncSoureCandidate' parameter set with invalid host and port: " "{failpointData}", "'forceSyncSoureCandidate' parameter set with invalid host and port", "failpointData"_attr = data); - fassertFailed(50835); } const auto hostAndPort = HostAndPort(hostAndPortElem.checkAndGetStringData()); @@ -2368,11 +2366,10 @@ std::string TopologyCoordinator::_getUnelectableReasonString(const UnelectableRe ss << "node is not a member of a valid replica set configuration"; } if (!hasWrittenToStream) { - LOGV2_FATAL(21842, + LOGV2_FATAL(26011, "Invalid UnelectableReasonMask value 0x{value}", "Invalid UnelectableReasonMask value", "value"_attr = integerToHex(ur)); - fassertFailed(26011); } ss << " (mask 0x" << integerToHex(ur) << ")"; return ss; diff --git a/src/mongo/db/repl/topology_coordinator_v1_test.cpp b/src/mongo/db/repl/topology_coordinator_v1_test.cpp index 6d7c0813eea..7363a4e1385 100644 --- a/src/mongo/db/repl/topology_coordinator_v1_test.cpp +++ b/src/mongo/db/repl/topology_coordinator_v1_test.cpp @@ -797,7 +797,7 @@ TEST_F(TopoCoordTest, ChooseOnlyPrimaryAsSyncSourceWhenChainingIsDisallowed) { ASSERT(getTopoCoord().getSyncSourceAddress().empty()); } -DEATH_TEST_F(TopoCoordTest, SecondaryOnlyAssertsWhenChainingNotAllowed, "3873102") { +DEATH_TEST_F(TopoCoordTest, SecondaryOnlyAssertsWhenChainingNotAllowed, "3873103") { updateConfig(BSON("_id" << "rs0" << "version" << 1 << "settings" << BSON("chainingAllowed" << false) diff --git a/src/mongo/db/service_context.cpp b/src/mongo/db/service_context.cpp index 708edcbfd2f..9e261ba5b0e 100644 --- a/src/mongo/db/service_context.cpp +++ b/src/mongo/db/service_context.cpp @@ -110,7 +110,7 @@ ServiceContext::ServiceContext() ServiceContext::~ServiceContext() { stdx::lock_guard<Latch> lk(_mutex); for (const auto& client : _clients) { - LOGV2_FATAL(23828, + LOGV2_ERROR(23828, "Client {client_desc} still exists while destroying " "ServiceContext@{reinterpret_cast_uint64_t_this}", "client_desc"_attr = client->desc(), diff --git a/src/mongo/db/service_entry_point_common.cpp b/src/mongo/db/service_entry_point_common.cpp index cc745c0aa38..7681eedaf6e 100644 --- a/src/mongo/db/service_entry_point_common.cpp +++ b/src/mongo/db/service_entry_point_common.cpp @@ -529,14 +529,15 @@ void _abortUnpreparedOrStashPreparedTransaction( txnParticipant->abortTransaction(opCtx); } catch (...) { // It is illegal for this to throw so we catch and log this here for diagnosability. - LOGV2_FATAL(21974, - "Caught exception during transaction " - "{opCtx_getTxnNumber}{isPrepared_stash_abort}{opCtx_getLogicalSessionId}: " - "{exceptionToStatus}", - "opCtx_getTxnNumber"_attr = opCtx->getTxnNumber(), - "isPrepared_stash_abort"_attr = (isPrepared ? " stash " : " abort "), - "opCtx_getLogicalSessionId"_attr = opCtx->getLogicalSessionId()->toBSON(), - "exceptionToStatus"_attr = exceptionToStatus()); + LOGV2_FATAL_CONTINUE( + 21974, + "Caught exception during transaction " + "{opCtx_getTxnNumber}{isPrepared_stash_abort}{opCtx_getLogicalSessionId}: " + "{exceptionToStatus}", + "opCtx_getTxnNumber"_attr = opCtx->getTxnNumber(), + "isPrepared_stash_abort"_attr = (isPrepared ? " stash " : " abort "), + "opCtx_getLogicalSessionId"_attr = opCtx->getLogicalSessionId()->toBSON(), + "exceptionToStatus"_attr = exceptionToStatus()); std::terminate(); } } diff --git a/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.cpp b/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.cpp index 08d8e2ec8a6..dd4d1dadd21 100644 --- a/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.cpp +++ b/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.cpp @@ -57,13 +57,12 @@ void KVDropPendingIdentReaper::addDropPendingIdent(const Timestamp& dropTimestam info.ident = ident.toString(); _dropPendingIdents.insert(std::make_pair(dropTimestamp, info)); } else { - LOGV2_FATAL(22238, - "Failed to add drop-pending ident {ident} ({nss}) with drop timestamp " - "{dropTimestamp}: duplicate timestamp and ident pair.", - "ident"_attr = ident, - "nss"_attr = nss, - "dropTimestamp"_attr = dropTimestamp); - fassertFailedNoTrace(51023); + LOGV2_FATAL_NOTRACE(51023, + "Failed to add drop-pending ident {ident} ({nss}) with drop timestamp " + "{dropTimestamp}: duplicate timestamp and ident pair.", + "ident"_attr = ident, + "nss"_attr = nss, + "dropTimestamp"_attr = dropTimestamp); } } @@ -121,14 +120,14 @@ void KVDropPendingIdentReaper::dropIdentsOlderThan(OperationContext* opCtx, cons WriteUnitOfWork wuow(opCtx); auto status = _engine->dropIdent(opCtx, opCtx->recoveryUnit(), ident); if (!status.isOK()) { - LOGV2_FATAL(22239, - "Failed to remove drop-pending ident {ident}(ns: {nss}) with drop " - "timestamp {dropTimestamp}: {status}", - "ident"_attr = ident, - "nss"_attr = nss, - "dropTimestamp"_attr = dropTimestamp, - "status"_attr = status); - fassertFailedNoTrace(51022); + LOGV2_FATAL_NOTRACE( + 51022, + "Failed to remove drop-pending ident {ident}(ns: {nss}) with drop " + "timestamp {dropTimestamp}: {status}", + "ident"_attr = ident, + "nss"_attr = nss, + "dropTimestamp"_attr = dropTimestamp, + "status"_attr = status); } wuow.commit(); } diff --git a/src/mongo/db/storage/oplog_cap_maintainer_thread.cpp b/src/mongo/db/storage/oplog_cap_maintainer_thread.cpp index 79160f8abbb..cb6c20ff19e 100644 --- a/src/mongo/db/storage/oplog_cap_maintainer_thread.cpp +++ b/src/mongo/db/storage/oplog_cap_maintainer_thread.cpp @@ -75,8 +75,8 @@ bool OplogCapMaintainerThread::_deleteExcessDocuments() { } catch (const ExceptionForCat<ErrorCategory::Interruption>&) { return false; } catch (const std::exception& e) { - LOGV2_FATAL(22243, "error in OplogCapMaintainerThread: {e_what}", "e_what"_attr = e.what()); - fassertFailedNoTrace(!"error in OplogCapMaintainerThread"); + LOGV2_FATAL_NOTRACE( + 22243, "error in OplogCapMaintainerThread: {e_what}", "e_what"_attr = e.what()); } catch (...) { fassertFailedNoTrace(!"unknown error in OplogCapMaintainerThread"); } diff --git a/src/mongo/db/storage/remove_saver.cpp b/src/mongo/db/storage/remove_saver.cpp index 73001e7d5d9..df113d1f0f8 100644 --- a/src/mongo/db/storage/remove_saver.cpp +++ b/src/mongo/db/storage/remove_saver.cpp @@ -84,52 +84,47 @@ RemoveSaver::~RemoveSaver() { size_t resultLen; Status status = _protector->finalize(protectedBuffer.get(), protectedSizeMax, &resultLen); if (!status.isOK()) { - LOGV2_FATAL(23736, + LOGV2_FATAL(34350, "Unable to finalize DataProtector while closing RemoveSaver: {status}", "status"_attr = redact(status)); - fassertFailed(34350); } _out->write(reinterpret_cast<const char*>(protectedBuffer.get()), resultLen); if (_out->fail()) { - LOGV2_FATAL(23737, + LOGV2_FATAL(34351, "Couldn't write finalized DataProtector data to: {file_string} for remove " "saving: {errnoWithDescription}", "file_string"_attr = _file.string(), "errnoWithDescription"_attr = redact(errnoWithDescription())); - fassertFailed(34351); } protectedBuffer.reset(new uint8_t[protectedSizeMax]); status = _protector->finalizeTag(protectedBuffer.get(), protectedSizeMax, &resultLen); if (!status.isOK()) { LOGV2_FATAL( - 23738, + 34352, "Unable to get finalizeTag from DataProtector while closing RemoveSaver: {status}", "status"_attr = redact(status)); - fassertFailed(34352); } if (resultLen != _protector->getNumberOfBytesReservedForTag()) { - LOGV2_FATAL(23739, + LOGV2_FATAL(34353, "Attempted to write tag of size {resultLen} when DataProtector only " "reserved {protector_getNumberOfBytesReservedForTag} bytes", "resultLen"_attr = resultLen, "protector_getNumberOfBytesReservedForTag"_attr = _protector->getNumberOfBytesReservedForTag()); - fassertFailed(34353); } _out->seekp(0); _out->write(reinterpret_cast<const char*>(protectedBuffer.get()), resultLen); if (_out->fail()) { - LOGV2_FATAL(23740, + LOGV2_FATAL(34354, "Couldn't write finalizeTag from DataProtector to: {file_string} for " "remove saving: {errnoWithDescription}", "file_string"_attr = _file.string(), "errnoWithDescription"_attr = redact(errnoWithDescription())); - fassertFailed(34354); } } } diff --git a/src/mongo/db/storage/storage_engine_init.cpp b/src/mongo/db/storage/storage_engine_init.cpp index c9040efc0ee..37174e0dc92 100644 --- a/src/mongo/db/storage/storage_engine_init.cpp +++ b/src/mongo/db/storage/storage_engine_init.cpp @@ -79,12 +79,11 @@ void initializeStorageEngine(ServiceContext* service, const StorageEngineInitFla if (storageGlobalParams.repair) { repairObserver->onRepairStarted(); } else if (repairObserver->isIncomplete()) { - LOGV2_FATAL( - 22272, + LOGV2_FATAL_NOTRACE( + 50922, "An incomplete repair has been detected! This is likely because a repair " "operation unexpectedly failed before completing. MongoDB will not start up " "again without --repair."); - fassertFailedNoTrace(50922); } } @@ -209,10 +208,9 @@ void createLockFile(ServiceContext* service) { if (wasUnclean) { if (storageGlobalParams.readOnly) { - LOGV2_FATAL(22273, - "Attempted to open dbpath in readOnly mode, but the server was " - "previously not shut down cleanly."); - fassertFailedNoTrace(34416); + LOGV2_FATAL_NOTRACE(34416, + "Attempted to open dbpath in readOnly mode, but the server was " + "previously not shut down cleanly."); } LOGV2_WARNING(22271, "Detected unclean shutdown - Lock file is not empty.", diff --git a/src/mongo/db/storage/storage_repair_observer.cpp b/src/mongo/db/storage/storage_repair_observer.cpp index 82a8d6f401d..399dd31ce51 100644 --- a/src/mongo/db/storage/storage_repair_observer.cpp +++ b/src/mongo/db/storage/storage_repair_observer.cpp @@ -123,12 +123,11 @@ void StorageRepairObserver::_touchRepairIncompleteFile() { boost::filesystem::ofstream fileStream(_repairIncompleteFilePath); fileStream << "This file indicates that a repair operation is in progress or incomplete."; if (fileStream.fail()) { - LOGV2_FATAL( - 23756, + LOGV2_FATAL_NOTRACE( + 50920, "Failed to write to file {repairIncompleteFilePath_string}: {errnoWithDescription}", "repairIncompleteFilePath_string"_attr = _repairIncompleteFilePath.string(), "errnoWithDescription"_attr = errnoWithDescription()); - fassertFailedNoTrace(50920); } fileStream.close(); @@ -141,11 +140,11 @@ void StorageRepairObserver::_removeRepairIncompleteFile() { boost::filesystem::remove(_repairIncompleteFilePath, ec); if (ec) { - LOGV2_FATAL(23757, - "Failed to remove file {repairIncompleteFilePath_string}: {ec_message}", - "repairIncompleteFilePath_string"_attr = _repairIncompleteFilePath.string(), - "ec_message"_attr = ec.message()); - fassertFailedNoTrace(50921); + LOGV2_FATAL_NOTRACE(50921, + "Failed to remove file {repairIncompleteFilePath_string}: {ec_message}", + "repairIncompleteFilePath_string"_attr = + _repairIncompleteFilePath.string(), + "ec_message"_attr = ec.message()); } fassertNoTrace(50927, fsyncParentDirectory(_repairIncompleteFilePath)); } diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp index b77b695ee53..e38da42c83e 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp @@ -1331,14 +1331,13 @@ private: _typeBits.resetFromBuffer(&br); if (!br.atEof()) { - LOGV2_FATAL(51795, + LOGV2_FATAL(28608, "Unique index cursor seeing multiple records for key {key} in index " "{index} ({uri}) belonging to collection {collection}", "key"_attr = redact(curr(kWantKey)->key), "index"_attr = _idx.indexName(), "uri"_attr = _idx.uri(), "collection"_attr = _idx.collectionNamespace()); - fassertFailed(28608); } } }; diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp index 9a5c5d4a504..714e35a356e 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp @@ -865,17 +865,16 @@ void WiredTigerKVEngine::_openWiredTiger(const std::string& path, const std::str LOGV2_WARNING(22348, "WiredTiger metadata corruption detected"); if (!_inRepairMode) { - LOGV2_FATAL(22362, "{kWTRepairMsg}", "kWTRepairMsg"_attr = kWTRepairMsg); - fassertFailedNoTrace(50944); + LOGV2_FATAL_NOTRACE(50944, "{kWTRepairMsg}", "kWTRepairMsg"_attr = kWTRepairMsg); } } - LOGV2_FATAL(22363, - "Reason: {wtRCToStatus_ret_reason}", - "wtRCToStatus_ret_reason"_attr = wtRCToStatus(ret).reason()); - if (!_inRepairMode) { - fassertFailedNoTrace(28595); - } + logv2::FatalMode assertMode = + _inRepairMode ? logv2::FatalMode::kContinue : logv2::FatalMode::kAssertNoTrace; + LOGV2_FATAL_OPTIONS(28595, + {assertMode}, + "Reason: {wtRCToStatus_ret_reason}", + "wtRCToStatus_ret_reason"_attr = wtRCToStatus(ret).reason()); // Always attempt to salvage metadata regardless of error code when in repair mode. @@ -888,11 +887,10 @@ void WiredTigerKVEngine::_openWiredTiger(const std::string& path, const std::str return; } - LOGV2_FATAL(22364, - "{Failed_to_salvage_WiredTiger_metadata_wtRCToStatus_ret_reason}", - "Failed_to_salvage_WiredTiger_metadata_wtRCToStatus_ret_reason"_attr = - "Failed to salvage WiredTiger metadata: " + wtRCToStatus(ret).reason()); - fassertFailedNoTrace(50947); + LOGV2_FATAL_NOTRACE(50947, + "{Failed_to_salvage_WiredTiger_metadata_wtRCToStatus_ret_reason}", + "Failed_to_salvage_WiredTiger_metadata_wtRCToStatus_ret_reason"_attr = + "Failed to salvage WiredTiger metadata: " + wtRCToStatus(ret).reason()); } void WiredTigerKVEngine::cleanShutdown() { @@ -1961,8 +1959,7 @@ bool WiredTigerKVEngine::_canRecoverToStableTimestamp() const { StatusWith<Timestamp> WiredTigerKVEngine::recoverToStableTimestamp(OperationContext* opCtx) { if (!supportsRecoverToStableTimestamp()) { - LOGV2_FATAL(22365, "WiredTiger is configured to not support recover to a stable timestamp"); - fassertFailed(50665); + LOGV2_FATAL(50665, "WiredTiger is configured to not support recover to a stable timestamp"); } if (!_canRecoverToStableTimestamp()) { @@ -2040,9 +2037,8 @@ Timestamp WiredTigerKVEngine::getOldestOpenReadTimestamp() const { boost::optional<Timestamp> WiredTigerKVEngine::getRecoveryTimestamp() const { if (!supportsRecoveryTimestamp()) { - LOGV2_FATAL(22366, + LOGV2_FATAL(50745, "WiredTiger is configured to not support providing a recovery timestamp"); - fassertFailed(50745); } if (_recoveryTimestamp.isNull()) { diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp index 8a0dde07498..52eae581d8b 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp @@ -283,10 +283,9 @@ void WiredTigerRecoveryUnit::assertInActiveTxn() const { if (_isActive()) { return; } - LOGV2_FATAL(22417, + LOGV2_FATAL(28575, "Recovery unit is not active. Current state: {getState}", "getState"_attr = toString(_getState())); - fassertFailed(28575); } boost::optional<int64_t> WiredTigerRecoveryUnit::getOplogVisibilityTs() { diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp index 4dc2b0cc079..35d890b941f 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp @@ -626,7 +626,7 @@ Status WiredTigerUtil::setTableLogging(WT_SESSION* session, const std::string& u "on"_attr = on); int ret = session->alter(session, uri.c_str(), setting.c_str()); if (ret) { - LOGV2_FATAL(22437, + LOGV2_FATAL(50756, "Failed to update log setting. Uri: {uri} Enable? {on} Ret: {ret} MD: " "{existingMetadata} Msg: {session_strerror_session_ret}", "uri"_attr = uri, @@ -634,7 +634,6 @@ Status WiredTigerUtil::setTableLogging(WT_SESSION* session, const std::string& u "ret"_attr = ret, "existingMetadata"_attr = redact(existingMetadata), "session_strerror_session_ret"_attr = session->strerror(session, ret)); - fassertFailed(50756); } return Status::OK(); diff --git a/src/mongo/db/system_index.cpp b/src/mongo/db/system_index.cpp index ad31f6bd2bc..a5af67be4db 100644 --- a/src/mongo/db/system_index.cpp +++ b/src/mongo/db/system_index.cpp @@ -117,10 +117,10 @@ void generateSystemIndexForExistingCollection(OperationContext* opCtx, IndexBuildsCoordinator::get(opCtx)->createIndexes( opCtx, collectionUUID, {indexSpec}, indexConstraints, fromMigrate); } catch (const DBException& e) { - LOGV2_FATAL(22490, - "Failed to regenerate index for {ns}. Exception: {e_what}", - "ns"_attr = ns, - "e_what"_attr = e.what()); + LOGV2_FATAL_CONTINUE(22490, + "Failed to regenerate index for {ns}. Exception: {e_what}", + "ns"_attr = ns, + "e_what"_attr = e.what()); throw; } } diff --git a/src/mongo/db/transaction_participant.cpp b/src/mongo/db/transaction_participant.cpp index 936c89a7b52..03d585b55a1 100644 --- a/src/mongo/db/transaction_participant.cpp +++ b/src/mongo/db/transaction_participant.cpp @@ -101,7 +101,7 @@ void fassertOnRepeatedExecution(const LogicalSessionId& lsid, const repl::OpTime& firstOpTime, const repl::OpTime& secondOpTime) { LOGV2_FATAL( - 22524, + 40526, "Statement id {stmtId} from transaction [ {lsid}:{txnNumber} ] was committed once " "with opTime {firstCommitOpTime} and a second time with opTime {secondCommitOpTime}. This " "indicates possible data corruption or server bug and the process will be " @@ -113,7 +113,6 @@ void fassertOnRepeatedExecution(const LogicalSessionId& lsid, "txnNumber"_attr = txnNumber, "firstCommitOpTime"_attr = firstOpTime, "secondCommitOpTime"_attr = secondOpTime); - fassertFailed(40526); } struct ActiveTransactionHistory { @@ -1105,13 +1104,13 @@ Timestamp TransactionParticipant::Participant::prepareTransaction( } catch (...) { // It is illegal for aborting a prepared transaction to fail for any reason, so we crash // instead. - LOGV2_FATAL(22525, - "Caught exception during abort of prepared transaction " - "{txnNumber} on {lsid}: {error}", - "Caught exception during abort of prepared transaction", - "txnNumber"_attr = opCtx->getTxnNumber(), - "lsid"_attr = _sessionId().toBSON(), - "error"_attr = exceptionToStatus()); + LOGV2_FATAL_CONTINUE(22525, + "Caught exception during abort of prepared transaction " + "{txnNumber} on {lsid}: {error}", + "Caught exception during abort of prepared transaction", + "txnNumber"_attr = opCtx->getTxnNumber(), + "lsid"_attr = _sessionId().toBSON(), + "error"_attr = exceptionToStatus()); std::terminate(); } }); @@ -1431,13 +1430,13 @@ void TransactionParticipant::Participant::commitPreparedTransaction( } catch (...) { // It is illegal for committing a prepared transaction to fail for any reason, other than an // invalid command, so we crash instead. - LOGV2_FATAL(22526, - "Caught exception during commit of prepared transaction {txnNumber} " - "on {lsid}: {error}", - "Caught exception during commit of prepared transaction", - "txnNumber"_attr = opCtx->getTxnNumber(), - "lsid"_attr = _sessionId().toBSON(), - "error"_attr = exceptionToStatus()); + LOGV2_FATAL_CONTINUE(22526, + "Caught exception during commit of prepared transaction {txnNumber} " + "on {lsid}: {error}", + "Caught exception during commit of prepared transaction", + "txnNumber"_attr = opCtx->getTxnNumber(), + "lsid"_attr = _sessionId().toBSON(), + "error"_attr = exceptionToStatus()); std::terminate(); } } @@ -1563,14 +1562,15 @@ void TransactionParticipant::Participant::_abortActiveTransaction( } catch (...) { // It is illegal for aborting a transaction that must write an abort oplog entry to fail // after aborting the storage transaction, so we crash instead. - LOGV2_FATAL(22527, - "Caught exception during abort of transaction that must write abort oplog " - "entry {txnNumber} on {lsid}: {error}", - "Caught exception during abort of transaction that must write abort oplog " - "entry", - "txnNumber"_attr = opCtx->getTxnNumber(), - "lsid"_attr = _sessionId().toBSON(), - "error"_attr = exceptionToStatus()); + LOGV2_FATAL_CONTINUE( + 22527, + "Caught exception during abort of transaction that must write abort oplog " + "entry {txnNumber} on {lsid}: {error}", + "Caught exception during abort of transaction that must write abort oplog " + "entry", + "txnNumber"_attr = opCtx->getTxnNumber(), + "lsid"_attr = _sessionId().toBSON(), + "error"_attr = exceptionToStatus()); std::terminate(); } } else { diff --git a/src/mongo/db/write_concern.cpp b/src/mongo/db/write_concern.cpp index 05570734e5e..f7fadb139cc 100644 --- a/src/mongo/db/write_concern.cpp +++ b/src/mongo/db/write_concern.cpp @@ -300,9 +300,8 @@ Status waitForWriteConcern(OperationContext* opCtx, try { switch (writeConcernWithPopulatedSyncMode.syncMode) { case WriteConcernOptions::SyncMode::UNSET: - LOGV2_FATAL(22550, + LOGV2_FATAL(34410, "Attempting to wait on a WriteConcern with an unset sync option"); - fassertFailed(34410); case WriteConcernOptions::SyncMode::NONE: break; case WriteConcernOptions::SyncMode::FSYNC: { |