summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorHenrik Edin <henrik.edin@mongodb.com>2020-03-23 10:04:42 -0400
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-03-24 20:22:44 +0000
commitedb8778350326d2b33f056b1b5f0b25a4b5b444a (patch)
tree15afcaa7a707be0872b764cd054aee55d855ba92 /src
parent51b338ad41653a8188adcc67b682ea12bbe63b4d (diff)
downloadmongo-edb8778350326d2b33f056b1b5f0b25a4b5b444a.tar.gz
SERVER-47040 LOGV2_FATAL also fasserts
Added LOGV2_FATAL_NOTRACE and LOGV2_CONTINUE to have different behavior.
Diffstat (limited to 'src')
-rw-r--r--src/mongo/base/secure_allocator.cpp24
-rw-r--r--src/mongo/base/status.cpp10
-rw-r--r--src/mongo/bson/bsonobj.cpp6
-rw-r--r--src/mongo/client/scanning_replica_set_monitor.cpp6
-rw-r--r--src/mongo/client/server_ping_monitor.cpp6
-rw-r--r--src/mongo/db/catalog/database_impl.cpp8
-rw-r--r--src/mongo/db/catalog/index_builds_manager.cpp3
-rw-r--r--src/mongo/db/catalog/index_catalog_impl.cpp13
-rw-r--r--src/mongo/db/catalog/rename_collection.cpp3
-rw-r--r--src/mongo/db/commands/fsync.cpp3
-rw-r--r--src/mongo/db/db.cpp31
-rw-r--r--src/mongo/db/index/index_access_method.cpp12
-rw-r--r--src/mongo/db/mongod_options.cpp2
-rw-r--r--src/mongo/db/repair_database.cpp12
-rw-r--r--src/mongo/db/repair_database_and_check_version.cpp30
-rw-r--r--src/mongo/db/repl/bgsync.cpp14
-rw-r--r--src/mongo/db/repl/drop_pending_collection_reaper.cpp14
-rw-r--r--src/mongo/db/repl/initial_syncer.cpp3
-rw-r--r--src/mongo/db/repl/oplog.cpp3
-rw-r--r--src/mongo/db/repl/oplog_applier.cpp3
-rw-r--r--src/mongo/db/repl/oplog_applier_impl.cpp56
-rw-r--r--src/mongo/db/repl/oplog_batcher.cpp10
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp57
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp3
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp3
-rw-r--r--src/mongo/db/repl/replication_recovery.cpp119
-rw-r--r--src/mongo/db/repl/rollback_impl.cpp49
-rw-r--r--src/mongo/db/repl/rs_rollback.cpp161
-rw-r--r--src/mongo/db/repl/session_update_tracker.cpp22
-rw-r--r--src/mongo/db/repl/topology_coordinator.cpp9
-rw-r--r--src/mongo/db/repl/topology_coordinator_v1_test.cpp2
-rw-r--r--src/mongo/db/service_context.cpp2
-rw-r--r--src/mongo/db/service_entry_point_common.cpp17
-rw-r--r--src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.cpp29
-rw-r--r--src/mongo/db/storage/oplog_cap_maintainer_thread.cpp4
-rw-r--r--src/mongo/db/storage/remove_saver.cpp15
-rw-r--r--src/mongo/db/storage/storage_engine_init.cpp12
-rw-r--r--src/mongo/db/storage/storage_repair_observer.cpp15
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp3
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp30
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp3
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp3
-rw-r--r--src/mongo/db/system_index.cpp8
-rw-r--r--src/mongo/db/transaction_participant.cpp48
-rw-r--r--src/mongo/db/write_concern.cpp3
-rw-r--r--src/mongo/embedded/embedded.cpp13
-rw-r--r--src/mongo/executor/connection_pool.cpp6
-rw-r--r--src/mongo/executor/network_interface_thread_pool.cpp6
-rw-r--r--src/mongo/executor/task_executor_test_common.cpp3
-rw-r--r--src/mongo/idl/server_parameter.cpp6
-rw-r--r--src/mongo/logger/logv2_appender.h3
-rw-r--r--src/mongo/logv2/README.md6
-rw-r--r--src/mongo/logv2/log.h62
-rw-r--r--src/mongo/logv2/log_options.h23
-rw-r--r--src/mongo/rpc/op_msg.cpp3
-rw-r--r--src/mongo/s/client/shard_registry.cpp6
-rw-r--r--src/mongo/s/server.cpp22
-rw-r--r--src/mongo/s/sharding_task_executor.cpp20
-rw-r--r--src/mongo/shell/bench.cpp6
-rw-r--r--src/mongo/transport/baton_asio_linux.h3
-rw-r--r--src/mongo/transport/service_executor_test.cpp3
-rw-r--r--src/mongo/transport/transport_layer_asio.cpp3
-rw-r--r--src/mongo/unittest/death_test.cpp2
-rw-r--r--src/mongo/util/assert_util.cpp126
-rw-r--r--src/mongo/util/concurrency/thread_pool.cpp38
-rw-r--r--src/mongo/util/concurrency/thread_pool_test_common.cpp6
-rw-r--r--src/mongo/util/concurrency/ticketholder.cpp3
-rw-r--r--src/mongo/util/exception_filter_win32.cpp21
-rw-r--r--src/mongo/util/net/ssl_manager_openssl.cpp3
-rw-r--r--src/mongo/util/net/ssl_manager_windows.cpp6
-rw-r--r--src/mongo/util/signal_handlers.cpp9
-rw-r--r--src/mongo/util/signal_handlers_synchronous.cpp25
-rw-r--r--src/mongo/util/stacktrace_threads.cpp3
-rw-r--r--src/mongo/util/version.cpp3
-rw-r--r--src/mongo/util/version_impl.cpp3
-rw-r--r--src/mongo/watchdog/watchdog.cpp125
76 files changed, 718 insertions, 738 deletions
diff --git a/src/mongo/base/secure_allocator.cpp b/src/mongo/base/secure_allocator.cpp
index 75e673db02d..423557622f5 100644
--- a/src/mongo/base/secure_allocator.cpp
+++ b/src/mongo/base/secure_allocator.cpp
@@ -133,8 +133,7 @@ void growWorkingSize(std::size_t bytes) {
if (!GetProcessWorkingSetSize(GetCurrentProcess(), &minWorkingSetSize, &maxWorkingSetSize)) {
auto str = errnoWithPrefix("Failed to GetProcessWorkingSetSize");
- LOGV2_FATAL(23708, "{str}", "str"_attr = str);
- fassertFailed(40285);
+ LOGV2_FATAL(40285, "{str}", "str"_attr = str);
}
// Since allocation request is aligned to page size, we can just add it to the current working
@@ -148,8 +147,7 @@ void growWorkingSize(std::size_t bytes) {
QUOTA_LIMITS_HARDWS_MIN_ENABLE |
QUOTA_LIMITS_HARDWS_MAX_DISABLE)) {
auto str = errnoWithPrefix("Failed to SetProcessWorkingSetSizeEx");
- LOGV2_FATAL(23709, "{str}", "str"_attr = str);
- fassertFailed(40286);
+ LOGV2_FATAL(40286, "{str}", "str"_attr = str);
}
}
@@ -168,8 +166,7 @@ void* systemAllocate(std::size_t bytes) {
if (!ptr) {
auto str = errnoWithPrefix("Failed to VirtualAlloc");
- LOGV2_FATAL(23710, "{str}", "str"_attr = str);
- fassertFailed(28835);
+ LOGV2_FATAL(28835, "{str}", "str"_attr = str);
}
if (VirtualLock(ptr, bytes) == 0) {
@@ -185,8 +182,7 @@ void* systemAllocate(std::size_t bytes) {
}
auto str = errnoWithPrefix("Failed to VirtualLock");
- LOGV2_FATAL(23711, "{str}", "str"_attr = str);
- fassertFailed(28828);
+ LOGV2_FATAL(28828, "{str}", "str"_attr = str);
}
return ptr;
@@ -195,16 +191,14 @@ void* systemAllocate(std::size_t bytes) {
void systemDeallocate(void* ptr, std::size_t bytes) {
if (VirtualUnlock(ptr, bytes) == 0) {
auto str = errnoWithPrefix("Failed to VirtualUnlock");
- LOGV2_FATAL(23712, "{str}", "str"_attr = str);
- fassertFailed(28829);
+ LOGV2_FATAL(28829, "{str}", "str"_attr = str);
}
// VirtualFree needs to take 0 as the size parameter for MEM_RELEASE
// (that's how the api works).
if (VirtualFree(ptr, 0, MEM_RELEASE) == 0) {
auto str = errnoWithPrefix("Failed to VirtualFree");
- LOGV2_FATAL(23713, "{str}", "str"_attr = str);
- fassertFailed(28830);
+ LOGV2_FATAL(28830, "{str}", "str"_attr = str);
}
}
@@ -271,18 +265,16 @@ void systemDeallocate(void* ptr, std::size_t bytes) {
#endif
if (munlock(ptr, bytes) != 0) {
- LOGV2_FATAL(23716,
+ LOGV2_FATAL(28833,
"{errnoWithPrefix_Failed_to_munlock}",
"errnoWithPrefix_Failed_to_munlock"_attr =
errnoWithPrefix("Failed to munlock"));
- fassertFailed(28833);
}
if (munmap(ptr, bytes) != 0) {
- LOGV2_FATAL(23717,
+ LOGV2_FATAL(28834,
"{errnoWithPrefix_Failed_to_munmap}",
"errnoWithPrefix_Failed_to_munmap"_attr = errnoWithPrefix("Failed to munmap"));
- fassertFailed(28834);
}
}
diff --git a/src/mongo/base/status.cpp b/src/mongo/base/status.cpp
index 80176c2ba57..499e185b442 100644
--- a/src/mongo/base/status.cpp
+++ b/src/mongo/base/status.cpp
@@ -57,8 +57,7 @@ Status::ErrorInfo* Status::ErrorInfo::create(ErrorCodes::Error code,
// have extra info.
if (kDebugBuild) {
// Make it easier to find this issue by fatally failing in debug builds.
- LOGV2_FATAL(23805, "Code {code} is supposed to have extra info", "code"_attr = code);
- fassertFailed(40680);
+ LOGV2_FATAL(40680, "Code {code} is supposed to have extra info", "code"_attr = code);
}
// In release builds, replace the error code. This maintains the invariant that all Statuses
@@ -126,9 +125,10 @@ StringBuilderImpl<Allocator>& operator<<(StringBuilderImpl<Allocator>& sb, const
// This really shouldn't happen but it would be really annoying if it broke error
// logging in production.
if (kDebugBuild) {
- LOGV2_FATAL(23806,
- "Error serializing extra info for {status_code} in Status::toString()",
- "status_code"_attr = status.code());
+ LOGV2_FATAL_CONTINUE(
+ 23806,
+ "Error serializing extra info for {status_code} in Status::toString()",
+ "status_code"_attr = status.code());
std::terminate();
}
}
diff --git a/src/mongo/bson/bsonobj.cpp b/src/mongo/bson/bsonobj.cpp
index 4cccfa89d8e..f120900e866 100644
--- a/src/mongo/bson/bsonobj.cpp
+++ b/src/mongo/bson/bsonobj.cpp
@@ -120,11 +120,10 @@ BSONObj BSONObj::copy() const {
// undefined behavior.
if (int sizeAfter = objsize(); sizeAfter != size) {
LOGV2_FATAL(
- 20103,
+ 31323,
"BSONObj::copy() - size {sizeAfter} differs from previously observed size {size}",
"sizeAfter"_attr = sizeAfter,
"size"_attr = size);
- fassertFailed(31323);
}
memcpy(storage.get(), objdata(), size);
return BSONObj(std::move(storage));
@@ -168,11 +167,10 @@ void BSONObj::_validateUnownedSize(int size) const {
// the size to ever be invalid. This means that the unowned memory we are reading has
// changed, and we must exit immediately to avoid further undefined behavior.
if (!isOwned() && (size < kMinBSONLength || size > BufferMaxSize)) {
- LOGV2_FATAL(51772,
+ LOGV2_FATAL(31322,
"BSONObj::_validateUnownedSize() - size {size} of unowned BSONObj is invalid "
"and differs from previously validated size.",
"size"_attr = size);
- fassertFailed(31322);
}
}
diff --git a/src/mongo/client/scanning_replica_set_monitor.cpp b/src/mongo/client/scanning_replica_set_monitor.cpp
index 74bfbb7bd59..5e400d6b009 100644
--- a/src/mongo/client/scanning_replica_set_monitor.cpp
+++ b/src/mongo/client/scanning_replica_set_monitor.cpp
@@ -285,11 +285,10 @@ void ScanningReplicaSetMonitor::SetState::rescheduleRefresh(SchedulingStrategy s
}
if (!swHandle.isOK()) {
- LOGV2_FATAL(24092,
+ LOGV2_FATAL(40140,
"Can't continue refresh for replica set {name} due to {swHandle_getStatus}",
"name"_attr = name,
"swHandle_getStatus"_attr = redact(swHandle.getStatus()));
- fassertFailed(40140);
}
refresherHandle = std::move(swHandle.getValue());
@@ -521,11 +520,10 @@ void Refresher::scheduleNetworkRequests() {
if (!swHandle.isOK()) {
LOGV2_FATAL(
- 24093,
+ 31176,
"Can't continue scan for replica set {set_name} due to {swHandle_getStatus}",
"set_name"_attr = _set->name,
"swHandle_getStatus"_attr = redact(swHandle.getStatus()));
- fassertFailed(31176);
}
node->scheduledIsMasterHandle = uassertStatusOK(std::move(swHandle));
diff --git a/src/mongo/client/server_ping_monitor.cpp b/src/mongo/client/server_ping_monitor.cpp
index df988789ba0..3cb7f793bf2 100644
--- a/src/mongo/client/server_ping_monitor.cpp
+++ b/src/mongo/client/server_ping_monitor.cpp
@@ -106,12 +106,11 @@ void SingleServerPingMonitor::_scheduleServerPing() {
}
if (!schedulePingHandle.isOK()) {
- LOGV2_FATAL(23732,
+ LOGV2_FATAL(31434,
"Can't continue scheduling pings to {hostAndPort} due to "
"{schedulePingHandle_getStatus}",
"hostAndPort"_attr = _hostAndPort,
"schedulePingHandle_getStatus"_attr = redact(schedulePingHandle.getStatus()));
- fassertFailed(31434);
}
_pingHandle = std::move(schedulePingHandle.getValue());
@@ -162,11 +161,10 @@ void SingleServerPingMonitor::_doServerPing() {
}
if (!remotePingHandle.isOK()) {
- LOGV2_FATAL(23733,
+ LOGV2_FATAL(31435,
"Can't continue pinging {hostAndPort} due to {remotePingHandle_getStatus}",
"hostAndPort"_attr = _hostAndPort,
"remotePingHandle_getStatus"_attr = redact(remotePingHandle.getStatus()));
- fassertFailed(31435);
}
// Update the _pingHandle so the ping can be canceled if the SingleServerPingMonitor gets
diff --git a/src/mongo/db/catalog/database_impl.cpp b/src/mongo/db/catalog/database_impl.cpp
index c7fa24487fd..5ea7746c8ce 100644
--- a/src/mongo/db/catalog/database_impl.cpp
+++ b/src/mongo/db/catalog/database_impl.cpp
@@ -642,10 +642,10 @@ Collection* DatabaseImpl::createCollection(OperationContext* opCtx,
bool generatedUUID = false;
if (!optionsWithUUID.uuid) {
if (!canAcceptWrites) {
- std::string msg = str::stream()
- << "Attempted to create a new collection " << nss << " without a UUID";
- LOGV2_FATAL(20329, "{msg}", "msg"_attr = msg);
- uasserted(ErrorCodes::InvalidOptions, msg);
+ LOGV2_ERROR_OPTIONS(20329,
+ {logv2::UserAssertAfterLog(ErrorCodes::InvalidOptions)},
+ "Attempted to create a new collection {nss} without a UUID",
+ "nss"_attr = nss);
} else {
optionsWithUUID.uuid.emplace(CollectionUUID::gen());
generatedUUID = true;
diff --git a/src/mongo/db/catalog/index_builds_manager.cpp b/src/mongo/db/catalog/index_builds_manager.cpp
index 303b89a1e17..1388bf5b4f8 100644
--- a/src/mongo/db/catalog/index_builds_manager.cpp
+++ b/src/mongo/db/catalog/index_builds_manager.cpp
@@ -163,11 +163,10 @@ StatusWith<std::pair<long long, long long>> IndexBuildsManager::startBuildingInd
auto validStatus = validateBSON(data.data(), data.size(), BSONVersion::kLatest);
if (!validStatus.isOK()) {
if (repair == RepairData::kNo) {
- LOGV2_FATAL(20349,
+ LOGV2_FATAL(31396,
"Invalid BSON detected at {id}: {validStatus}",
"id"_attr = id,
"validStatus"_attr = redact(validStatus));
- fassertFailed(31396);
}
LOGV2_WARNING(20348,
"Invalid BSON detected at {id}: {validStatus}. Deleting.",
diff --git a/src/mongo/db/catalog/index_catalog_impl.cpp b/src/mongo/db/catalog/index_catalog_impl.cpp
index 336f392a83a..977fb228cad 100644
--- a/src/mongo/db/catalog/index_catalog_impl.cpp
+++ b/src/mongo/db/catalog/index_catalog_impl.cpp
@@ -387,13 +387,12 @@ IndexCatalogEntry* IndexCatalogImpl::createIndexEntry(OperationContext* opCtx,
CreateIndexEntryFlags flags) {
Status status = _isSpecOk(opCtx, descriptor->infoObj());
if (!status.isOK()) {
- LOGV2_FATAL(20378,
- "Found an invalid index {descriptor_infoObj} on the {collection_ns} "
- "collection: {status}",
- "descriptor_infoObj"_attr = descriptor->infoObj(),
- "collection_ns"_attr = _collection->ns(),
- "status"_attr = redact(status));
- fassertFailedNoTrace(28782);
+ LOGV2_FATAL_NOTRACE(28782,
+ "Found an invalid index {descriptor_infoObj} on the {collection_ns} "
+ "collection: {status}",
+ "descriptor_infoObj"_attr = descriptor->infoObj(),
+ "collection_ns"_attr = _collection->ns(),
+ "status"_attr = redact(status));
}
auto engine = opCtx->getServiceContext()->getStorageEngine();
diff --git a/src/mongo/db/catalog/rename_collection.cpp b/src/mongo/db/catalog/rename_collection.cpp
index 6002748f7cf..fbd3ea020f3 100644
--- a/src/mongo/db/catalog/rename_collection.cpp
+++ b/src/mongo/db/catalog/rename_collection.cpp
@@ -254,13 +254,12 @@ Status renameCollectionAndDropTarget(OperationContext* opCtx,
// replicated writes are not enabled.
if (!renameOpTime.isNull()) {
LOGV2_FATAL(
- 20403,
+ 40616,
"renameCollection: {source} to {target} (with dropTarget=true) - unexpected "
"renameCollection oplog entry written to the oplog with optime {renameOpTime}",
"source"_attr = source,
"target"_attr = target,
"renameOpTime"_attr = renameOpTime);
- fassertFailed(40616);
}
renameOpTime = renameOpTimeFromApplyOps;
}
diff --git a/src/mongo/db/commands/fsync.cpp b/src/mongo/db/commands/fsync.cpp
index 29cbe97a7fa..5c348f7031e 100644
--- a/src/mongo/db/commands/fsync.cpp
+++ b/src/mongo/db/commands/fsync.cpp
@@ -433,8 +433,7 @@ void FSyncLockThread::run() {
}
} catch (const std::exception& e) {
- LOGV2_FATAL(20474, "FSyncLockThread exception: {e_what}", "e_what"_attr = e.what());
- fassertFailed(40350);
+ LOGV2_FATAL(40350, "FSyncLockThread exception: {e_what}", "e_what"_attr = e.what());
}
}
diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp
index 3768b09dc68..e7d20abf4ba 100644
--- a/src/mongo/db/db.cpp
+++ b/src/mongo/db/db.cpp
@@ -445,10 +445,11 @@ ExitCode _initAndListen(ServiceContext* serviceContext, int listenPort) {
try {
nonLocalDatabases = repairDatabasesAndCheckVersion(startupOpCtx.get());
} catch (const ExceptionFor<ErrorCodes::MustDowngrade>& error) {
- LOGV2_FATAL_OPTIONS(20573,
- {logComponentV1toV2(LogComponent::kControl)},
- "** IMPORTANT: {error_toStatus_reason}",
- "error_toStatus_reason"_attr = error.toStatus().reason());
+ LOGV2_FATAL_OPTIONS(
+ 20573,
+ logv2::LogOptions(logv2::LogComponent::kControl, logv2::FatalMode::kContinue),
+ "** IMPORTANT: {error_toStatus_reason}",
+ "error_toStatus_reason"_attr = error.toStatus().reason());
exitCleanly(EXIT_NEED_DOWNGRADE);
}
@@ -1248,11 +1249,12 @@ int mongoDbMain(int argc, char* argv[], char** envp) {
Status status = mongo::runGlobalInitializers(argc, argv, envp);
if (!status.isOK()) {
- LOGV2_FATAL_OPTIONS(20574,
- {logComponentV1toV2(LogComponent::kControl)},
- "Error during global initialization: {error}",
- "Error during global initialization",
- "error"_attr = status);
+ LOGV2_FATAL_OPTIONS(
+ 20574,
+ logv2::LogOptions(logv2::LogComponent::kControl, logv2::FatalMode::kContinue),
+ "Error during global initialization: {error}",
+ "Error during global initialization",
+ "error"_attr = status);
quickExit(EXIT_FAILURE);
}
@@ -1265,11 +1267,12 @@ int mongoDbMain(int argc, char* argv[], char** envp) {
return serviceContext;
} catch (...) {
auto cause = exceptionToStatus();
- LOGV2_FATAL_OPTIONS(20575,
- {logComponentV1toV2(LogComponent::kControl)},
- "Error creating service context: {error}",
- "Error creating service context",
- "error"_attr = redact(cause));
+ LOGV2_FATAL_OPTIONS(
+ 20575,
+ logv2::LogOptions(logv2::LogComponent::kControl, logv2::FatalMode::kContinue),
+ "Error creating service context: {error}",
+ "Error creating service context",
+ "error"_attr = redact(cause));
quickExit(EXIT_FAILURE);
}
}();
diff --git a/src/mongo/db/index/index_access_method.cpp b/src/mongo/db/index/index_access_method.cpp
index 54b572545f9..67483da9ec7 100644
--- a/src/mongo/db/index/index_access_method.cpp
+++ b/src/mongo/db/index/index_access_method.cpp
@@ -633,12 +633,12 @@ Status AbstractIndexAccessMethod::commitBulk(OperationContext* opCtx,
if (kDebugBuild || _descriptor->unique()) {
cmpData = data.first.compareWithoutRecordId(previousKey);
if (cmpData < 0) {
- LOGV2_FATAL(20687,
- "expected the next key{data_first} to be greater than or equal to the "
- "previous key{previousKey}",
- "data_first"_attr = data.first.toString(),
- "previousKey"_attr = previousKey.toString());
- fassertFailedNoTrace(31171);
+ LOGV2_FATAL_NOTRACE(
+ 31171,
+ "expected the next key{data_first} to be greater than or equal to the "
+ "previous key{previousKey}",
+ "data_first"_attr = data.first.toString(),
+ "previousKey"_attr = previousKey.toString());
}
}
diff --git a/src/mongo/db/mongod_options.cpp b/src/mongo/db/mongod_options.cpp
index 78a8befe022..5087f08864a 100644
--- a/src/mongo/db/mongod_options.cpp
+++ b/src/mongo/db/mongod_options.cpp
@@ -137,7 +137,7 @@ bool handlePreValidationMongodOptions(const moe::Environment& params,
}
if (params.count("master") || params.count("slave")) {
- LOGV2_FATAL(20881, "Master/slave replication is no longer supported");
+ LOGV2_FATAL_CONTINUE(20881, "Master/slave replication is no longer supported");
return false;
}
diff --git a/src/mongo/db/repair_database.cpp b/src/mongo/db/repair_database.cpp
index dcb7bb8b31f..1ea89de9c86 100644
--- a/src/mongo/db/repair_database.cpp
+++ b/src/mongo/db/repair_database.cpp
@@ -210,10 +210,10 @@ Status repairDatabase(OperationContext* opCtx, StorageEngine* engine, const std:
auto status = repairCollections(opCtx, engine, dbName);
if (!status.isOK()) {
- LOGV2_FATAL(21030,
- "Failed to repair database {dbName}: {status_reason}",
- "dbName"_attr = dbName,
- "status_reason"_attr = status.reason());
+ LOGV2_FATAL_CONTINUE(21030,
+ "Failed to repair database {dbName}: {status_reason}",
+ "dbName"_attr = dbName,
+ "status_reason"_attr = status.reason());
}
try {
@@ -239,8 +239,8 @@ Status repairDatabase(OperationContext* opCtx, StorageEngine* engine, const std:
// have a UUID.
throw;
} catch (...) {
- LOGV2_FATAL(21031,
- "Unexpected exception encountered while reopening database after repair.");
+ LOGV2_FATAL_CONTINUE(
+ 21031, "Unexpected exception encountered while reopening database after repair.");
std::terminate(); // Logs additional info about the specific error.
}
diff --git a/src/mongo/db/repair_database_and_check_version.cpp b/src/mongo/db/repair_database_and_check_version.cpp
index 66437a78b4f..20e9ec50172 100644
--- a/src/mongo/db/repair_database_and_check_version.cpp
+++ b/src/mongo/db/repair_database_and_check_version.cpp
@@ -267,11 +267,10 @@ void checkForCappedOplog(OperationContext* opCtx, Database* db) {
Collection* oplogCollection =
CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, oplogNss);
if (oplogCollection && !oplogCollection->isCapped()) {
- LOGV2_FATAL(21022,
- "The oplog collection {oplogNss} is not capped; a capped oplog is a "
- "requirement for replication to function.",
- "oplogNss"_attr = oplogNss);
- fassertFailedNoTrace(40115);
+ LOGV2_FATAL_NOTRACE(40115,
+ "The oplog collection {oplogNss} is not capped; a capped oplog is a "
+ "requirement for replication to function.",
+ "oplogNss"_attr = oplogNss);
}
}
@@ -531,13 +530,12 @@ bool repairDatabasesAndCheckVersion(OperationContext* opCtx) {
// current version of mongod with --repair and then proceed with normal startup.
status = {ErrorCodes::MustUpgrade, status.reason()};
}
- LOGV2_FATAL(21023,
- "Unable to start mongod due to an incompatibility with the data files and"
- " this version of mongod: {status}",
- "status"_attr = redact(status));
- LOGV2_FATAL(21024,
- "Please consult our documentation when trying to downgrade to a previous"
- " major release");
+ LOGV2_FATAL_CONTINUE(
+ 21023,
+ "Unable to start mongod due to an incompatibility with the data files and"
+ " this version of mongod: {status}. Please consult our documentation when trying "
+ "to downgrade to a previous major release",
+ "status"_attr = redact(status));
quickExit(EXIT_NEED_UPGRADE);
MONGO_UNREACHABLE;
}
@@ -642,11 +640,9 @@ bool repairDatabasesAndCheckVersion(OperationContext* opCtx) {
// Fail to start up if there is no featureCompatibilityVersion document and there are non-local
// databases present and we do not need to start up via initial sync.
if (!fcvDocumentExists && nonLocalDatabases && !needInitialSync) {
- LOGV2_FATAL(
- 21025,
- "Unable to start up mongod due to missing featureCompatibilityVersion document.");
- LOGV2_FATAL(21026, "Please run with --repair to restore the document.");
- fassertFailedNoTrace(40652);
+ LOGV2_FATAL_NOTRACE(40652,
+ "Unable to start up mongod due to missing featureCompatibilityVersion "
+ "document. Please run with --repair to restore the document.");
}
LOGV2_DEBUG(21017, 1, "done repairDatabases");
diff --git a/src/mongo/db/repl/bgsync.cpp b/src/mongo/db/repl/bgsync.cpp
index fe6e56e5fff..0d25c62ca9f 100644
--- a/src/mongo/db/repl/bgsync.cpp
+++ b/src/mongo/db/repl/bgsync.cpp
@@ -216,11 +216,10 @@ void BackgroundSync::_run() {
sleepmillis(100); // sleep a bit to keep from hammering this thread with temp. errors.
} catch (const std::exception& e2) {
// redact(std::exception&) doesn't work
- LOGV2_FATAL(21127,
+ LOGV2_FATAL(28546,
"sync producer exception: {error}",
"Sync producer error",
"error"_attr = redact(e2.what()));
- fassertFailed(28546);
}
}
// No need to reset optimes here because we are shutting down.
@@ -784,10 +783,10 @@ void BackgroundSync::_runRollbackViaRecoverToCheckpoint(
if (status.isOK()) {
LOGV2(21105, "Rollback successful");
} else if (status == ErrorCodes::UnrecoverableRollbackError) {
- LOGV2_FATAL(21128,
- "Rollback failed with unrecoverable error: {error}",
- "Rollback failed with unrecoverable error",
- "error"_attr = status);
+ LOGV2_FATAL_CONTINUE(21128,
+ "Rollback failed with unrecoverable error: {error}",
+ "Rollback failed with unrecoverable error",
+ "error"_attr = status);
fassertFailedWithStatusNoTrace(50666, status);
} else {
LOGV2_WARNING(21124,
@@ -907,12 +906,11 @@ OpTime BackgroundSync::_readLastAppliedOpTime(OperationContext* opCtx) {
} catch (const ExceptionForCat<ErrorCategory::ShutdownError>&) {
throw;
} catch (const DBException& ex) {
- LOGV2_FATAL(21129,
+ LOGV2_FATAL(18904,
"Problem reading {namespace}: {error}",
"Problem reading from namespace",
"namespace"_attr = NamespaceString::kRsOplogNamespace.ns(),
"error"_attr = redact(ex));
- fassertFailed(18904);
}
OplogEntry parsedEntry(oplogEntry);
diff --git a/src/mongo/db/repl/drop_pending_collection_reaper.cpp b/src/mongo/db/repl/drop_pending_collection_reaper.cpp
index 53feefcee92..5b610eca52c 100644
--- a/src/mongo/db/repl/drop_pending_collection_reaper.cpp
+++ b/src/mongo/db/repl/drop_pending_collection_reaper.cpp
@@ -89,13 +89,13 @@ void DropPendingCollectionReaper::addDropPendingNamespace(
};
if (std::find_if(lowerBound, upperBound, matcher) != upperBound) {
- LOGV2_FATAL(21156,
- "Failed to add drop-pending collection {dropPendingNamespace} with drop optime "
- "{dropOpTime}: duplicate optime and namespace pair.",
- "Failed to add drop-pending collection: duplicate optime and namespace pair",
- "dropPendingNamespace"_attr = dropPendingNamespace,
- "dropOpTime"_attr = dropOpTime);
- fassertFailedNoTrace(40448);
+ LOGV2_FATAL_NOTRACE(
+ 40448,
+ "Failed to add drop-pending collection {dropPendingNamespace} with drop optime "
+ "{dropOpTime}: duplicate optime and namespace pair.",
+ "Failed to add drop-pending collection: duplicate optime and namespace pair",
+ "dropPendingNamespace"_attr = dropPendingNamespace,
+ "dropOpTime"_attr = dropOpTime);
}
_dropPendingNamespaces.insert(std::make_pair(dropOpTime, dropPendingNamespace));
diff --git a/src/mongo/db/repl/initial_syncer.cpp b/src/mongo/db/repl/initial_syncer.cpp
index c48e3108a44..f45de57cc67 100644
--- a/src/mongo/db/repl/initial_syncer.cpp
+++ b/src/mongo/db/repl/initial_syncer.cpp
@@ -1702,7 +1702,8 @@ void InitialSyncer::_finishInitialSyncAttempt(const StatusWith<OpTimeAndWallTime
// Check if need to do more retries.
if (_stats.failedInitialSyncAttempts >= _stats.maxFailedInitialSyncAttempts) {
- LOGV2_FATAL(21202, "The maximum number of retries have been exhausted for initial sync");
+ LOGV2_FATAL_CONTINUE(21202,
+ "The maximum number of retries have been exhausted for initial sync");
initialSyncFailures.increment();
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 1def8eb05e6..eb9b8953415 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -272,11 +272,10 @@ void _logOpsInner(OperationContext* opCtx,
Status result = oplogCollection->insertDocumentsForOplog(opCtx, records, timestamps);
if (!result.isOK()) {
- LOGV2_FATAL(21263,
+ LOGV2_FATAL(17322,
"write to oplog failed: {error}",
"Write to oplog failed",
"error"_attr = result.toString());
- fassertFailed(17322);
}
// Set replCoord last optime only after we're sure the WUOW didn't abort and roll back.
diff --git a/src/mongo/db/repl/oplog_applier.cpp b/src/mongo/db/repl/oplog_applier.cpp
index b24f3fd0636..23322f12665 100644
--- a/src/mongo/db/repl/oplog_applier.cpp
+++ b/src/mongo/db/repl/oplog_applier.cpp
@@ -75,8 +75,7 @@ Future<void> OplogApplier::startup() {
void OplogApplier::shutdown() {
// Shutdown will hang if this failpoint is enabled.
if (globalFailPointRegistry().find("rsSyncApplyStop")->shouldFail()) {
- LOGV2_FATAL(21227, "Turn off rsSyncApplyStop before attempting clean shutdown");
- fassertFailedNoTrace(40304);
+ LOGV2_FATAL_NOTRACE(40304, "Turn off rsSyncApplyStop before attempting clean shutdown");
}
stdx::lock_guard<Latch> lock(_mutex);
diff --git a/src/mongo/db/repl/oplog_applier_impl.cpp b/src/mongo/db/repl/oplog_applier_impl.cpp
index a7fe3e6d7ed..71c74ba725c 100644
--- a/src/mongo/db/repl/oplog_applier_impl.cpp
+++ b/src/mongo/db/repl/oplog_applier_impl.cpp
@@ -618,7 +618,7 @@ StatusWith<OpTime> OplogApplierImpl::_applyOplogBatch(OperationContext* opCtx,
invariant(_replCoord);
if (_replCoord->getApplierState() == ReplicationCoordinator::ApplierState::Stopped) {
- LOGV2_FATAL(21234, "Attempting to replicate ops while primary");
+ LOGV2_FATAL_CONTINUE(21234, "Attempting to replicate ops while primary");
return {ErrorCodes::CannotApplyOplogWhilePrimary,
"attempting to replicate ops while primary"};
}
@@ -708,19 +708,19 @@ StatusWith<OpTime> OplogApplierImpl::_applyOplogBatch(OperationContext* opCtx,
for (auto it = statusVector.cbegin(); it != statusVector.cend(); ++it) {
const auto& status = *it;
if (!status.isOK()) {
- LOGV2_FATAL(21235,
- "Failed to apply batch of operations. Number of operations in "
- "batch: {numOperationsInBatch}. First operation: {firstOperation}. "
- "Last operation: "
- "{lastOperation}. Oplog application failed in writer thread "
- "{failedWriterThread}: {error}",
- "Failed to apply batch of operations",
- "numOperationsInBatch"_attr = ops.size(),
- "firstOperation"_attr = redact(ops.front().toBSON()),
- "lastOperation"_attr = redact(ops.back().toBSON()),
- "failedWriterThread"_attr =
- std::distance(statusVector.cbegin(), it),
- "error"_attr = redact(status));
+ LOGV2_FATAL_CONTINUE(
+ 21235,
+ "Failed to apply batch of operations. Number of operations in "
+ "batch: {numOperationsInBatch}. First operation: {firstOperation}. "
+ "Last operation: "
+ "{lastOperation}. Oplog application failed in writer thread "
+ "{failedWriterThread}: {error}",
+ "Failed to apply batch of operations",
+ "numOperationsInBatch"_attr = ops.size(),
+ "firstOperation"_attr = redact(ops.front().toBSON()),
+ "lastOperation"_attr = redact(ops.back().toBSON()),
+ "failedWriterThread"_attr = std::distance(statusVector.cbegin(), it),
+ "error"_attr = redact(status));
return status;
}
}
@@ -741,10 +741,10 @@ StatusWith<OpTime> OplogApplierImpl::_applyOplogBatch(OperationContext* opCtx,
"point is disabled");
while (MONGO_unlikely(pauseBatchApplicationBeforeCompletion.shouldFail())) {
if (inShutdown()) {
- LOGV2_FATAL(21236,
- "Turn off pauseBatchApplicationBeforeCompletion before attempting "
- "clean shutdown");
- fassertFailedNoTrace(50798);
+ LOGV2_FATAL_NOTRACE(
+ 50798,
+ "Turn off pauseBatchApplicationBeforeCompletion before attempting "
+ "clean shutdown");
}
sleepmillis(100);
}
@@ -1067,11 +1067,11 @@ Status OplogApplierImpl::applyOplogBatchPerWorker(OperationContext* opCtx,
continue;
}
- LOGV2_FATAL(21237,
- "Error applying operation ({oplogEntry}): {error}",
- "Error applying operation",
- "oplogEntry"_attr = redact(entry.toBSON()),
- "error"_attr = causedBy(redact(status)));
+ LOGV2_FATAL_CONTINUE(21237,
+ "Error applying operation ({oplogEntry}): {error}",
+ "Error applying operation",
+ "oplogEntry"_attr = redact(entry.toBSON()),
+ "error"_attr = causedBy(redact(status)));
return status;
}
} catch (const DBException& e) {
@@ -1082,11 +1082,11 @@ Status OplogApplierImpl::applyOplogBatchPerWorker(OperationContext* opCtx,
continue;
}
- LOGV2_FATAL(21238,
- "writer worker caught exception: {error} on: {oplogEntry}",
- "Writer worker caught exception",
- "error"_attr = redact(e),
- "oplogEntry"_attr = redact(entry.toBSON()));
+ LOGV2_FATAL_CONTINUE(21238,
+ "writer worker caught exception: {error} on: {oplogEntry}",
+ "Writer worker caught exception",
+ "error"_attr = redact(e),
+ "oplogEntry"_attr = redact(entry.toBSON()));
return e.toStatus();
}
}
diff --git a/src/mongo/db/repl/oplog_batcher.cpp b/src/mongo/db/repl/oplog_batcher.cpp
index 7e393915d58..cfb3d58c863 100644
--- a/src/mongo/db/repl/oplog_batcher.cpp
+++ b/src/mongo/db/repl/oplog_batcher.cpp
@@ -180,11 +180,11 @@ StatusWith<std::vector<OplogEntry>> OplogBatcher::getNextApplierBatch(
// Check for oplog version change.
if (entry.getVersion() != OplogEntry::kOplogVersion) {
static constexpr char message[] = "Unexpected oplog version";
- LOGV2_FATAL(21240,
- message,
- "expectedVersion"_attr = OplogEntry::kOplogVersion,
- "foundVersion"_attr = entry.getVersion(),
- "oplogEntry"_attr = redact(entry.toBSON()));
+ LOGV2_FATAL_CONTINUE(21240,
+ message,
+ "expectedVersion"_attr = OplogEntry::kOplogVersion,
+ "foundVersion"_attr = entry.getVersion(),
+ "oplogEntry"_attr = redact(entry.toBSON()));
return {ErrorCodes::BadValue,
str::stream() << message << ", expected oplog version "
<< OplogEntry::kOplogVersion << ", found version "
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index f0eb982f285..f14a2e8301d 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -445,11 +445,10 @@ bool ReplicationCoordinatorImpl::_startLoadLocalConfig(OperationContext* opCtx)
StatusWith<LastVote> lastVote = _externalState->loadLocalLastVoteDocument(opCtx);
if (!lastVote.isOK()) {
- LOGV2_FATAL(21429,
- "Error loading local voted for document at startup; {error}",
- "Error loading local voted for document at startup",
- "error"_attr = lastVote.getStatus());
- fassertFailedNoTrace(40367);
+ LOGV2_FATAL_NOTRACE(40367,
+ "Error loading local voted for document at startup; {error}",
+ "Error loading local voted for document at startup",
+ "error"_attr = lastVote.getStatus());
}
if (lastVote.getValue().getTerm() == OpTime::kInitialTerm) {
// This log line is checked in unit tests.
@@ -468,11 +467,10 @@ bool ReplicationCoordinatorImpl::_startLoadLocalConfig(OperationContext* opCtx)
auto initializingStatus = _replicationProcess->initializeRollbackID(opCtx);
fassert(40424, initializingStatus);
} else {
- LOGV2_FATAL(21430,
- "Error loading local Rollback ID document at startup; {error}",
- "Error loading local Rollback ID document at startup",
- "error"_attr = status);
- fassertFailedNoTrace(40428);
+ LOGV2_FATAL_NOTRACE(40428,
+ "Error loading local Rollback ID document at startup; {error}",
+ "Error loading local Rollback ID document at startup",
+ "error"_attr = status);
}
}
@@ -488,27 +486,26 @@ bool ReplicationCoordinatorImpl::_startLoadLocalConfig(OperationContext* opCtx)
status = localConfig.initialize(cfg.getValue());
if (!status.isOK()) {
if (status.code() == ErrorCodes::RepairedReplicaSetNode) {
- LOGV2_FATAL(
- 21431,
+ LOGV2_FATAL_NOTRACE(
+ 50923,
"This instance has been repaired and may contain modified replicated data that "
"would not match other replica set members. To see your repaired data, start "
"mongod without the --replSet option. When you are finished recovering your "
"data and would like to perform a complete re-sync, please refer to the "
"documentation here: "
"https://docs.mongodb.com/manual/tutorial/resync-replica-set-member/");
- fassertFailedNoTrace(50923);
}
- LOGV2_ERROR(21414,
- "Locally stored replica set configuration does not parse; See "
- "http://www.mongodb.org/dochub/core/recover-replica-set-from-invalid-config "
- "for information on how to recover from this. Got \"{error}\" while parsing "
- "{config}",
- "Locally stored replica set configuration does not parse; See "
- "hhttp://www.mongodb.org/dochub/core/recover-replica-set-from-invalid-config "
- "for information on how to recover from this",
- "error"_attr = status,
- "config"_attr = cfg.getValue());
- fassertFailedNoTrace(28545);
+ LOGV2_FATAL_NOTRACE(
+ 28545,
+ "Locally stored replica set configuration does not parse; See "
+ "http://www.mongodb.org/dochub/core/recover-replica-set-from-invalid-config "
+ "for information on how to recover from this. Got \"{error}\" while parsing "
+ "{config}",
+ "Locally stored replica set configuration does not parse; See "
+ "hhttp://www.mongodb.org/dochub/core/recover-replica-set-from-invalid-config "
+ "for information on how to recover from this",
+ "error"_attr = status,
+ "config"_attr = cfg.getValue());
}
// Read the last op from the oplog after cleaning up any partially applied batches.
@@ -1162,9 +1159,9 @@ void ReplicationCoordinatorImpl::signalDrainComplete(OperationContext* opCtx,
// occurred after the node became primary and so the concurrent reconfig has updated
// the term appropriately.
if (reconfigStatus != ErrorCodes::ConfigurationInProgress) {
- LOGV2_FATAL(4508101,
- "Reconfig on stepup failed for unknown reasons",
- "error"_attr = reconfigStatus);
+ LOGV2_FATAL_CONTINUE(4508101,
+ "Reconfig on stepup failed for unknown reasons",
+ "error"_attr = reconfigStatus);
fassertFailedWithStatus(31477, reconfigStatus);
}
}
@@ -3245,11 +3242,10 @@ Status ReplicationCoordinatorImpl::doReplSetReconfig(OperationContext* opCtx,
"Cannot run replSetReconfig because the node is currently updating "
"its configuration");
default:
- LOGV2_FATAL(21432,
+ LOGV2_FATAL(18914,
"Unexpected _rsConfigState {_rsConfigState}",
"Unexpected _rsConfigState",
"_rsConfigState"_attr = int(_rsConfigState));
- fassertFailed(18914);
}
invariant(_rsConfig.isInitialized());
@@ -3854,11 +3850,10 @@ void ReplicationCoordinatorImpl::_performPostMemberStateUpdateAction(
_startElectSelfV1(StartElectionReasonEnum::kElectionTimeout);
break;
default:
- LOGV2_FATAL(21433,
+ LOGV2_FATAL(26010,
"Unknown post member state update action {action}",
"Unknown post member state update action",
"action"_attr = static_cast<int>(action));
- fassertFailed(26010);
}
}
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
index a598e513995..847879422f9 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
@@ -110,12 +110,11 @@ void ReplicationCoordinatorImpl::_startElectSelfV1_inlock(StartElectionReasonEnu
_topCoord->processLoseElection();
return;
default:
- LOGV2_FATAL(21452,
+ LOGV2_FATAL(28641,
"Entered replica set election code while in illegal config state "
"{rsConfigState}",
"Entered replica set election code while in illegal config state",
"rsConfigState"_attr = int(_rsConfigState));
- fassertFailed(28641);
}
auto finishedEvent = _makeEvent();
diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
index a3312ce0731..1b056553be9 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
@@ -520,12 +520,11 @@ void ReplicationCoordinatorImpl::_scheduleHeartbeatReconfig_inlock(const ReplSet
case kConfigPreStart:
case kConfigStartingUp:
case kConfigReplicationDisabled:
- LOGV2_FATAL(21491,
+ LOGV2_FATAL(18807,
"Reconfiguration request occurred while _rsConfigState == "
"{_rsConfigState}; aborting.",
"Aborting reconfiguration request",
"_rsConfigState"_attr = int(_rsConfigState));
- fassertFailed(18807);
}
_setConfigState_inlock(kConfigHBReconfiguring);
invariant(!_rsConfig.isInitialized() ||
diff --git a/src/mongo/db/repl/replication_recovery.cpp b/src/mongo/db/repl/replication_recovery.cpp
index 968ec117a0f..2275e0b5d76 100644
--- a/src/mongo/db/repl/replication_recovery.cpp
+++ b/src/mongo/db/repl/replication_recovery.cpp
@@ -157,21 +157,20 @@ public:
attrs.add("oplogApplicationEndPoint", _oplogApplicationEndPoint->toBSON());
}
- LOGV2_FATAL(
- 21559, "Couldn't find any entries in the oplog, which should be impossible", attrs);
- fassertFailedNoTrace(40293);
+ LOGV2_FATAL_NOTRACE(
+ 40293, "Couldn't find any entries in the oplog, which should be impossible", attrs);
}
auto firstTimestampFound =
fassert(40291, OpTime::parseFromOplogEntry(_cursor->nextSafe())).getTimestamp();
if (firstTimestampFound != _oplogApplicationStartPoint) {
- LOGV2_FATAL(21560,
- "Oplog entry at {oplogApplicationStartPoint} is missing; actual entry "
- "found is {firstTimestampFound}",
- "Oplog entry at oplogApplicationStartPoint is missing",
- "oplogApplicationStartPoint"_attr = _oplogApplicationStartPoint.toBSON(),
- "firstTimestampFound"_attr = firstTimestampFound.toBSON());
- fassertFailedNoTrace(40292);
+ LOGV2_FATAL_NOTRACE(
+ 40292,
+ "Oplog entry at {oplogApplicationStartPoint} is missing; actual entry "
+ "found is {firstTimestampFound}",
+ "Oplog entry at oplogApplicationStartPoint is missing",
+ "oplogApplicationStartPoint"_attr = _oplogApplicationStartPoint.toBSON(),
+ "firstTimestampFound"_attr = firstTimestampFound.toBSON());
}
}
@@ -237,10 +236,10 @@ private:
boost::optional<Timestamp> recoverFromOplogPrecursor(OperationContext* opCtx,
StorageInterface* storageInterface) {
if (!storageInterface->supportsRecoveryTimestamp(opCtx->getServiceContext())) {
- LOGV2_FATAL(21561,
- "Cannot recover from the oplog with a storage engine that does not support "
- "recover to stable timestamp");
- fassertFailedNoTrace(50805);
+ LOGV2_FATAL_NOTRACE(
+ 50805,
+ "Cannot recover from the oplog with a storage engine that does not support "
+ "recover to stable timestamp");
}
// A non-existent recoveryTS means the checkpoint is unstable. If the recoveryTS exists but
@@ -248,9 +247,8 @@ boost::optional<Timestamp> recoverFromOplogPrecursor(OperationContext* opCtx,
// happen.
auto recoveryTS = storageInterface->getRecoveryTimestamp(opCtx->getServiceContext());
if (recoveryTS && recoveryTS->isNull()) {
- LOGV2_FATAL(21562,
- "Cannot recover from the oplog with stable checkpoint at null timestamp");
- fassertFailedNoTrace(50806);
+ LOGV2_FATAL_NOTRACE(
+ 50806, "Cannot recover from the oplog with stable checkpoint at null timestamp");
}
return recoveryTS;
@@ -267,50 +265,48 @@ void ReplicationRecoveryImpl::_assertNoRecoveryNeededOnUnstableCheckpoint(Operat
invariant(!_storageInterface->getRecoveryTimestamp(opCtx->getServiceContext()));
if (_consistencyMarkers->getInitialSyncFlag(opCtx)) {
- LOGV2_FATAL(21563, "Unexpected recovery needed, initial sync flag set");
- fassertFailedNoTrace(31362);
+ LOGV2_FATAL_NOTRACE(31362, "Unexpected recovery needed, initial sync flag set");
}
const auto truncateAfterPoint = _consistencyMarkers->getOplogTruncateAfterPoint(opCtx);
if (!truncateAfterPoint.isNull()) {
- LOGV2_FATAL(21564,
- "Unexpected recovery needed, oplog requires truncation. Truncate after point: "
- "{oplogTruncateAfterPoint}",
- "Unexpected recovery needed, oplog requires truncation",
- "oplogTruncateAfterPoint"_attr = truncateAfterPoint.toString());
- fassertFailedNoTrace(31363);
+ LOGV2_FATAL_NOTRACE(
+ 31363,
+ "Unexpected recovery needed, oplog requires truncation. Truncate after point: "
+ "{oplogTruncateAfterPoint}",
+ "Unexpected recovery needed, oplog requires truncation",
+ "oplogTruncateAfterPoint"_attr = truncateAfterPoint.toString());
}
auto topOfOplogSW = _getTopOfOplog(opCtx);
if (!topOfOplogSW.isOK()) {
- LOGV2_FATAL(21565,
- "Recovery not possible, no oplog found: {error}",
- "Recovery not possible, no oplog found",
- "error"_attr = topOfOplogSW.getStatus());
- fassertFailedNoTrace(31364);
+ LOGV2_FATAL_NOTRACE(31364,
+ "Recovery not possible, no oplog found: {error}",
+ "Recovery not possible, no oplog found",
+ "error"_attr = topOfOplogSW.getStatus());
}
const auto topOfOplog = topOfOplogSW.getValue();
const auto appliedThrough = _consistencyMarkers->getAppliedThrough(opCtx);
if (!appliedThrough.isNull() && appliedThrough != topOfOplog) {
- LOGV2_FATAL(21566,
- "Unexpected recovery needed, appliedThrough is not at top of oplog, indicating "
- "oplog has not been fully applied. appliedThrough: {appliedThrough}",
- "Unexpected recovery needed, appliedThrough is not at top of oplog, indicating "
- "oplog has not been fully applied",
- "appliedThrough"_attr = appliedThrough.toString());
- fassertFailedNoTrace(31365);
+ LOGV2_FATAL_NOTRACE(
+ 31365,
+ "Unexpected recovery needed, appliedThrough is not at top of oplog, indicating "
+ "oplog has not been fully applied. appliedThrough: {appliedThrough}",
+ "Unexpected recovery needed, appliedThrough is not at top of oplog, indicating "
+ "oplog has not been fully applied",
+ "appliedThrough"_attr = appliedThrough.toString());
}
const auto minValid = _consistencyMarkers->getMinValid(opCtx);
if (minValid > topOfOplog) {
- LOGV2_FATAL(21567,
- "Unexpected recovery needed, top of oplog is not consistent. topOfOplog: "
- "{topOfOplog}, minValid: {minValid}",
- "Unexpected recovery needed, top of oplog is not consistent",
- "topOfOplog"_attr = topOfOplog,
- "minValid"_attr = minValid);
- fassertFailedNoTrace(31366);
+ LOGV2_FATAL_NOTRACE(
+ 31366,
+ "Unexpected recovery needed, top of oplog is not consistent. topOfOplog: "
+ "{topOfOplog}, minValid: {minValid}",
+ "Unexpected recovery needed, top of oplog is not consistent",
+ "topOfOplog"_attr = topOfOplog,
+ "minValid"_attr = minValid);
}
}
@@ -337,9 +333,8 @@ void ReplicationRecoveryImpl::recoverFromOplogAsStandalone(OperationContext* opC
"Not doing any oplog recovery since there is an unstable checkpoint that is up "
"to date");
} else {
- LOGV2_FATAL(21568,
- "Cannot use 'recoverFromOplogAsStandalone' without a stable checkpoint");
- fassertFailedNoTrace(31229);
+ LOGV2_FATAL_NOTRACE(
+ 31229, "Cannot use 'recoverFromOplogAsStandalone' without a stable checkpoint");
}
}
@@ -359,8 +354,8 @@ void ReplicationRecoveryImpl::recoverFromOplogUpTo(OperationContext* opCtx, Time
auto recoveryTS = recoverFromOplogPrecursor(opCtx, _storageInterface);
if (!recoveryTS) {
- LOGV2_FATAL(21569, "Cannot use 'recoverToOplogTimestamp' without a stable checkpoint");
- fassertFailedNoTrace(31399);
+ LOGV2_FATAL_NOTRACE(31399,
+ "Cannot use 'recoverToOplogTimestamp' without a stable checkpoint");
}
// This may take an IS lock on the oplog collection.
@@ -462,10 +457,10 @@ void ReplicationRecoveryImpl::recoverFromOplog(OperationContext* opCtx,
_recoverFromUnstableCheckpoint(opCtx, appliedThrough, topOfOplog);
}
} catch (...) {
- LOGV2_FATAL(21570,
- "Caught exception during replication recovery: {error}",
- "Caught exception during replication recovery",
- "error"_attr = exceptionToStatus());
+ LOGV2_FATAL_CONTINUE(21570,
+ "Caught exception during replication recovery: {error}",
+ "Caught exception during replication recovery",
+ "error"_attr = exceptionToStatus());
std::terminate();
}
@@ -572,13 +567,12 @@ void ReplicationRecoveryImpl::_applyToEndOfOplog(OperationContext* opCtx,
"No oplog entries to apply for recovery. Start point is at the top of the oplog");
return; // We've applied all the valid oplog we have.
} else if (oplogApplicationStartPoint > topOfOplog) {
- LOGV2_FATAL(
- 21571,
+ LOGV2_FATAL_NOTRACE(
+ 40313,
"Applied op {oplogApplicationStartPoint} not found. Top of oplog is {topOfOplog}.",
"Applied op oplogApplicationStartPoint not found",
"oplogApplicationStartPoint"_attr = oplogApplicationStartPoint.toBSON(),
"topOfOplog"_attr = topOfOplog.toBSON());
- fassertFailedNoTrace(40313);
}
Timestamp appliedUpTo = _applyOplogOperations(opCtx, oplogApplicationStartPoint, topOfOplog);
@@ -679,13 +673,12 @@ void ReplicationRecoveryImpl::_truncateOplogTo(OperationContext* opCtx,
_storageInterface->findOplogEntryLessThanOrEqualToTimestamp(
opCtx, oplogCollection, truncateAfterTimestamp);
if (!truncateAfterOplogEntryBSON) {
- LOGV2_FATAL(21572,
- "Reached end of oplog looking for an oplog entry lte to "
- "{oplogTruncateAfterPoint} but did not find one",
- "Reached end of oplog looking for an oplog entry lte to "
- "oplogTruncateAfterPoint but did not find one",
- "oplogTruncateAfterPoint"_attr = truncateAfterTimestamp.toBSON());
- fassertFailedNoTrace(40296);
+ LOGV2_FATAL_NOTRACE(40296,
+ "Reached end of oplog looking for an oplog entry lte to "
+ "{oplogTruncateAfterPoint} but did not find one",
+ "Reached end of oplog looking for an oplog entry lte to "
+ "oplogTruncateAfterPoint but did not find one",
+ "oplogTruncateAfterPoint"_attr = truncateAfterTimestamp.toBSON());
}
// Parse the response.
diff --git a/src/mongo/db/repl/rollback_impl.cpp b/src/mongo/db/repl/rollback_impl.cpp
index 88ecd1e849a..53c91047487 100644
--- a/src/mongo/db/repl/rollback_impl.cpp
+++ b/src/mongo/db/repl/rollback_impl.cpp
@@ -1025,13 +1025,12 @@ StatusWith<RollBackLocalOperations::RollbackCommonPoint> RollbackImpl::_findComm
if (commonPointOpTime.getTimestamp() < *stableTimestamp) {
// This is an fassert rather than an invariant, since it can happen if the server was
// recently upgraded to enableMajorityReadConcern=true.
- LOGV2_FATAL(21644,
- "Common point must be at least stable timestamp, common point: "
- "{commonPoint}, stable timestamp: {stableTimestamp}",
- "Common point must be at least stable timestamp",
- "commonPoint"_attr = commonPointOpTime.getTimestamp(),
- "stableTimestamp"_attr = *stableTimestamp);
- fassertFailedNoTrace(51121);
+ LOGV2_FATAL_NOTRACE(51121,
+ "Common point must be at least stable timestamp, common point: "
+ "{commonPoint}, stable timestamp: {stableTimestamp}",
+ "Common point must be at least stable timestamp",
+ "commonPoint"_attr = commonPointOpTime.getTimestamp(),
+ "stableTimestamp"_attr = *stableTimestamp);
}
return commonPointSW.getValue();
@@ -1100,14 +1099,15 @@ boost::optional<BSONObj> RollbackImpl::_findDocumentById(OperationContext* opCtx
} else if (document.getStatus().code() == ErrorCodes::NoSuchKey) {
return boost::none;
} else {
- LOGV2_FATAL(21645,
- "Rollback failed to read document with {id} in namespace {namespace} with uuid "
- "{uuid}{error}",
- "Rollback failed to read document",
- "id"_attr = redact(id),
- "namespace"_attr = nss.ns(),
- "uuid"_attr = uuid.toString(),
- "error"_attr = causedBy(document.getStatus()));
+ LOGV2_FATAL_CONTINUE(
+ 21645,
+ "Rollback failed to read document with {id} in namespace {namespace} with uuid "
+ "{uuid}{error}",
+ "Rollback failed to read document",
+ "id"_attr = redact(id),
+ "namespace"_attr = nss.ns(),
+ "uuid"_attr = uuid.toString(),
+ "error"_attr = causedBy(document.getStatus()));
fassert(50751, document.getStatus());
}
@@ -1219,16 +1219,15 @@ void RollbackImpl::_transitionFromRollbackToSecondary(OperationContext* opCtx) {
auto status = _replicationCoordinator->setFollowerMode(MemberState::RS_SECONDARY);
if (!status.isOK()) {
- LOGV2_FATAL(21646,
- "Failed to transition into {targetState}; expected to be in "
- "state {expectedState}; found self in "
- "{actualState} {error}",
- "Failed to perform replica set state transition",
- "targetState"_attr = MemberState(MemberState::RS_SECONDARY),
- "expectedState"_attr = MemberState(MemberState::RS_ROLLBACK),
- "actualState"_attr = _replicationCoordinator->getMemberState(),
- "error"_attr = causedBy(status));
- fassertFailedNoTrace(40408);
+ LOGV2_FATAL_NOTRACE(40408,
+ "Failed to transition into {targetState}; expected to be in "
+ "state {expectedState}; found self in "
+ "{actualState} {error}",
+ "Failed to perform replica set state transition",
+ "targetState"_attr = MemberState(MemberState::RS_SECONDARY),
+ "expectedState"_attr = MemberState(MemberState::RS_ROLLBACK),
+ "actualState"_attr = _replicationCoordinator->getMemberState(),
+ "error"_attr = causedBy(status));
}
}
diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp
index 323afa75b64..f62d8d39998 100644
--- a/src/mongo/db/repl/rs_rollback.cpp
+++ b/src/mongo/db/repl/rs_rollback.cpp
@@ -393,11 +393,11 @@ Status rollback_internal::updateFixUpInfoFromLocalOplogEntry(OperationContext* o
string indexName;
auto status = bsonExtractStringField(obj, "index", &indexName);
if (!status.isOK()) {
- LOGV2_FATAL(21731,
- "Missing index name in dropIndexes operation on rollback, "
- "document: {oplogEntry}",
- "Missing index name in dropIndexes operation on rollback",
- "oplogEntry"_attr = redact(oplogEntry.toBSON()));
+ LOGV2_FATAL_CONTINUE(21731,
+ "Missing index name in dropIndexes operation on rollback, "
+ "document: {oplogEntry}",
+ "Missing index name in dropIndexes operation on rollback",
+ "oplogEntry"_attr = redact(oplogEntry.toBSON()));
throw RSFatalException(
"Missing index name in dropIndexes operation on rollback.");
}
@@ -433,11 +433,12 @@ Status rollback_internal::updateFixUpInfoFromLocalOplogEntry(OperationContext* o
string indexName;
auto status = bsonExtractStringField(obj, "name", &indexName);
if (!status.isOK()) {
- LOGV2_FATAL(21732,
- "Missing index name in createIndexes operation on rollback, "
- "document: {oplogEntry}",
- "Missing index name in createIndexes operation on rollback",
- "oplogEntry"_attr = redact(oplogEntry.toBSON()));
+ LOGV2_FATAL_CONTINUE(
+ 21732,
+ "Missing index name in createIndexes operation on rollback, "
+ "document: {oplogEntry}",
+ "Missing index name in createIndexes operation on rollback",
+ "oplogEntry"_attr = redact(oplogEntry.toBSON()));
throw RSFatalException(
"Missing index name in createIndexes operation on rollback.");
}
@@ -689,7 +690,7 @@ Status rollback_internal::updateFixUpInfoFromLocalOplogEntry(OperationContext* o
}
// Some collMod fields cannot be rolled back, such as the index field.
static constexpr char message[] = "Cannot roll back a collMod command";
- LOGV2_FATAL(21733, message, "oplogEntry"_attr = redact(obj));
+ LOGV2_FATAL_CONTINUE(21733, message, "oplogEntry"_attr = redact(obj));
throw RSFatalException(message);
}
return Status::OK();
@@ -725,7 +726,8 @@ Status rollback_internal::updateFixUpInfoFromLocalOplogEntry(OperationContext* o
if (operations.type() != Array) {
static constexpr char message[] =
"Expected applyOps argument to be an array";
- LOGV2_FATAL(21734, message, "operations"_attr = redact(operations));
+ LOGV2_FATAL_CONTINUE(
+ 21734, message, "operations"_attr = redact(operations));
return Status(ErrorCodes::UnrecoverableRollbackError,
str::stream() << message << "; found " << redact(operations));
}
@@ -733,7 +735,8 @@ Status rollback_internal::updateFixUpInfoFromLocalOplogEntry(OperationContext* o
if (subopElement.type() != Object) {
static constexpr char message[] =
"Expected applyOps operations to be of Object type";
- LOGV2_FATAL(21735, message, "operation"_attr = redact(subopElement));
+ LOGV2_FATAL_CONTINUE(
+ 21735, message, "operation"_attr = redact(subopElement));
return Status(ErrorCodes::UnrecoverableRollbackError,
str::stream()
<< message << ", but found " << redact(subopElement));
@@ -768,10 +771,10 @@ Status rollback_internal::updateFixUpInfoFromLocalOplogEntry(OperationContext* o
}
default: {
static constexpr char message[] = "Can't roll back this command yet";
- LOGV2_FATAL(21736,
- message,
- "commandName"_attr = first.fieldName(),
- "command"_attr = redact(obj));
+ LOGV2_FATAL_CONTINUE(21736,
+ message,
+ "commandName"_attr = first.fieldName(),
+ "command"_attr = redact(obj));
throw RSFatalException(str::stream()
<< message << ": cmdname = " << first.fieldName());
}
@@ -785,10 +788,10 @@ Status rollback_internal::updateFixUpInfoFromLocalOplogEntry(OperationContext* o
doc._id = oplogEntry.getIdElement();
if (doc._id.eoo()) {
static constexpr char message[] = "Cannot roll back op with no _id";
- LOGV2_FATAL(21737,
- message,
- "namespace"_attr = nss.ns(),
- "oplogEntry"_attr = redact(oplogEntry.toBSON()));
+ LOGV2_FATAL_CONTINUE(21737,
+ message,
+ "namespace"_attr = nss.ns(),
+ "oplogEntry"_attr = redact(oplogEntry.toBSON()));
throw RSFatalException(str::stream() << message << ". ns: " << nss.ns());
}
fixUpInfo.docsToRefetch.insert(doc);
@@ -872,7 +875,7 @@ void dropIndex(OperationContext* opCtx,
if (entry->isReady(opCtx)) {
auto status = indexCatalog->dropIndex(opCtx, indexDescriptor);
if (!status.isOK()) {
- LOGV2_FATAL(21738,
+ LOGV2_ERROR(21738,
"Rollback failed to drop index {indexName} in {namespace}: {error}",
"Rollback failed to drop index",
"indexName"_attr = indexName,
@@ -882,7 +885,7 @@ void dropIndex(OperationContext* opCtx,
} else {
auto status = indexCatalog->dropUnfinishedIndex(opCtx, indexDescriptor);
if (!status.isOK()) {
- LOGV2_FATAL(
+ LOGV2_ERROR(
21739,
"Rollback failed to drop unfinished index {indexName} in {namespace}: {error}",
"Rollback failed to drop unfinished index",
@@ -1034,7 +1037,7 @@ void dropCollection(OperationContext* opCtx,
while (PlanExecutor::ADVANCED == (execState = exec->getNext(&curObj, nullptr))) {
auto status = removeSaver.goingToDelete(curObj);
if (!status.isOK()) {
- LOGV2_FATAL(
+ LOGV2_FATAL_CONTINUE(
21740,
"Rolling back createCollection on {namespace} failed to write document to "
"remove saver file: {error}",
@@ -1057,20 +1060,22 @@ void dropCollection(OperationContext* opCtx,
if (execState == PlanExecutor::FAILURE &&
WorkingSetCommon::isValidStatusMemberObject(curObj)) {
Status errorStatus = WorkingSetCommon::getMemberObjectStatus(curObj);
- LOGV2_FATAL(21741,
- "Rolling back createCollection on {namespace} failed with {error}. A "
- "full resync is necessary.",
- "Rolling back createCollection failed. A full resync is necessary",
- "namespace"_attr = nss,
- "error"_attr = redact(errorStatus));
+ LOGV2_FATAL_CONTINUE(
+ 21741,
+ "Rolling back createCollection on {namespace} failed with {error}. A "
+ "full resync is necessary.",
+ "Rolling back createCollection failed. A full resync is necessary",
+ "namespace"_attr = nss,
+ "error"_attr = redact(errorStatus));
throw RSFatalException(
"Rolling back createCollection failed. A full resync is necessary.");
} else {
- LOGV2_FATAL(21742,
- "Rolling back createCollection on {namespace} failed. A full resync is "
- "necessary.",
- "Rolling back createCollection failed. A full resync is necessary",
- "namespace"_attr = nss);
+ LOGV2_FATAL_CONTINUE(
+ 21742,
+ "Rolling back createCollection on {namespace} failed. A full resync is "
+ "necessary.",
+ "Rolling back createCollection failed. A full resync is necessary",
+ "namespace"_attr = nss);
throw RSFatalException(
"Rolling back createCollection failed. A full resync is necessary.");
}
@@ -1105,7 +1110,7 @@ void renameOutOfTheWay(OperationContext* opCtx, RenameCollectionInfo info, Datab
// namespace.
auto tmpNameResult = db->makeUniqueCollectionNamespace(opCtx, "rollback.tmp%%%%%");
if (!tmpNameResult.isOK()) {
- LOGV2_FATAL(
+ LOGV2_FATAL_CONTINUE(
21743,
"Unable to generate temporary namespace to rename collection {renameTo} "
"out of the way. {error}",
@@ -1135,11 +1140,12 @@ void renameOutOfTheWay(OperationContext* opCtx, RenameCollectionInfo info, Datab
auto renameStatus = renameCollectionForRollback(opCtx, tempNss, uuid);
if (!renameStatus.isOK()) {
- LOGV2_FATAL(21744,
- "Unable to rename collection {renameTo} out of the way to {tempNamespace}",
- "Unable to rename renameTo collection out of the way to a temporary namespace",
- "renameTo"_attr = info.renameTo,
- "tempNamespace"_attr = tempNss);
+ LOGV2_FATAL_CONTINUE(
+ 21744,
+ "Unable to rename collection {renameTo} out of the way to {tempNamespace}",
+ "Unable to rename renameTo collection out of the way to a temporary namespace",
+ "renameTo"_attr = info.renameTo,
+ "tempNamespace"_attr = tempNss);
throw RSFatalException("Unable to rename collection out of the way");
}
}
@@ -1177,22 +1183,23 @@ void rollbackRenameCollection(OperationContext* opCtx, UUID uuid, RenameCollecti
status = renameCollectionForRollback(opCtx, info.renameTo, uuid);
if (!status.isOK()) {
- LOGV2_FATAL(21745,
- "Rename collection failed to roll back twice. We were unable to rename "
- "collection {renameFrom} to {renameTo}. {error}",
- "Rename collection failed to roll back twice",
- "renameFrom"_attr = info.renameFrom,
- "renameTo"_attr = info.renameTo,
- "error"_attr = status.toString());
+ LOGV2_FATAL_CONTINUE(
+ 21745,
+ "Rename collection failed to roll back twice. We were unable to rename "
+ "collection {renameFrom} to {renameTo}. {error}",
+ "Rename collection failed to roll back twice",
+ "renameFrom"_attr = info.renameFrom,
+ "renameTo"_attr = info.renameTo,
+ "error"_attr = status.toString());
throw RSFatalException(
"Rename collection failed to roll back twice. We were unable to rename "
"the collection.");
}
} else if (!status.isOK()) {
- LOGV2_FATAL(21746,
- "Unable to roll back renameCollection command: {error}",
- "Unable to roll back renameCollection command",
- "error"_attr = status.toString());
+ LOGV2_FATAL_CONTINUE(21746,
+ "Unable to roll back renameCollection command: {error}",
+ "Unable to roll back renameCollection command",
+ "error"_attr = status.toString());
throw RSFatalException("Unable to rollback renameCollection command");
}
@@ -1754,7 +1761,7 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
if (found) {
auto status = removeSaver->goingToDelete(obj);
if (!status.isOK()) {
- LOGV2_FATAL(
+ LOGV2_FATAL_CONTINUE(
21747,
"Rollback cannot write document in namespace {namespace} to "
"archive file: {error}",
@@ -1994,11 +2001,10 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
Status status = AuthorizationManager::get(opCtx->getServiceContext())->initialize(opCtx);
if (!status.isOK()) {
- LOGV2_FATAL(21748,
- "Failed to reinitialize auth data after rollback: {error}",
- "Failed to reinitialize auth data after rollback",
- "error"_attr = redact(status));
- fassertFailedNoTrace(40496);
+ LOGV2_FATAL_NOTRACE(40496,
+ "Failed to reinitialize auth data after rollback: {error}",
+ "Failed to reinitialize auth data after rollback",
+ "error"_attr = redact(status));
}
// If necessary, clear the memory of existing sessions.
@@ -2121,11 +2127,10 @@ void rollback(OperationContext* opCtx,
// WARNING: these statuses sometimes have location codes which are lost with uassertStatusOK
// so we need to check here first.
if (ErrorCodes::UnrecoverableRollbackError == status.code()) {
- LOGV2_FATAL(21749,
- "Unable to complete rollback. A full resync may be needed: {error}",
- "Unable to complete rollback. A full resync may be needed",
- "error"_attr = redact(status));
- fassertFailedNoTrace(40507);
+ LOGV2_FATAL_NOTRACE(40507,
+ "Unable to complete rollback. A full resync may be needed: {error}",
+ "Unable to complete rollback. A full resync may be needed",
+ "error"_attr = redact(status));
}
// In other cases, we log the message contained in the error status and retry later.
@@ -2149,10 +2154,9 @@ void rollback(OperationContext* opCtx,
// will be unable to successfully perform any more rollback attempts. The knowledge of these
// stopped index builds gets lost after the first attempt.
if (stoppedIndexBuilds.size()) {
- LOGV2_FATAL(4655801,
- "Index builds stopped prior to rollback cannot be restarted by "
- "subsequent rollback attempts");
- fassertFailedNoTrace(4655800);
+ LOGV2_FATAL_NOTRACE(4655800,
+ "Index builds stopped prior to rollback cannot be restarted by "
+ "subsequent rollback attempts");
}
// Sleep a bit to allow upstream node to coalesce, if that was the cause of the failure. If
@@ -2175,22 +2179,21 @@ void rollback(OperationContext* opCtx,
// then we must shut down to clear the in-memory ShardingState associated with the
// shardIdentity document.
if (ShardIdentityRollbackNotifier::get(opCtx)->didRollbackHappen()) {
- LOGV2_FATAL(21750,
- "shardIdentity document rollback detected. Shutting down to clear "
- "in-memory sharding state. Restarting this process should safely return it "
- "to a healthy state");
- fassertFailedNoTrace(40498);
+ LOGV2_FATAL_NOTRACE(
+ 40498,
+ "shardIdentity document rollback detected. Shutting down to clear "
+ "in-memory sharding state. Restarting this process should safely return it "
+ "to a healthy state");
}
auto status = replCoord->setFollowerMode(MemberState::RS_RECOVERING);
if (!status.isOK()) {
- LOGV2_FATAL(21751,
- "Failed to perform replica set state transition",
- "targetState"_attr = MemberState(MemberState::RS_RECOVERING),
- "expectedState"_attr = MemberState(MemberState::RS_ROLLBACK),
- "actualState"_attr = replCoord->getMemberState(),
- "error"_attr = status);
- fassertFailedNoTrace(40499);
+ LOGV2_FATAL_NOTRACE(40499,
+ "Failed to perform replica set state transition",
+ "targetState"_attr = MemberState(MemberState::RS_RECOVERING),
+ "expectedState"_attr = MemberState(MemberState::RS_ROLLBACK),
+ "actualState"_attr = replCoord->getMemberState(),
+ "error"_attr = status);
}
}
diff --git a/src/mongo/db/repl/session_update_tracker.cpp b/src/mongo/db/repl/session_update_tracker.cpp
index edf8654a0af..77fc9d2197f 100644
--- a/src/mongo/db/repl/session_update_tracker.cpp
+++ b/src/mongo/db/repl/session_update_tracker.cpp
@@ -191,18 +191,16 @@ void SessionUpdateTracker::_updateSessionInfo(const OplogEntry& entry) {
return;
}
- LOGV2_FATAL(23792,
- "Entry for session {lsid} has txnNumber {sessionInfo_getTxnNumber} < "
- "{existingSessionInfo_getTxnNumber}",
- "lsid"_attr = lsid->toBSON(),
- "sessionInfo_getTxnNumber"_attr = *sessionInfo.getTxnNumber(),
- "existingSessionInfo_getTxnNumber"_attr = *existingSessionInfo.getTxnNumber());
- LOGV2_FATAL(23793, "New oplog entry: {entry}", "entry"_attr = redact(entry.toString()));
- LOGV2_FATAL(23794,
- "Existing oplog entry: {iter_second}",
- "iter_second"_attr = redact(iter->second.toString()));
-
- fassertFailedNoTrace(50843);
+ LOGV2_FATAL_NOTRACE(50843,
+ "Entry for session {lsid} has txnNumber {sessionInfo_getTxnNumber} < "
+ "{existingSessionInfo_getTxnNumber}. New oplog entry: {newEntry}, Existing "
+ "oplog entry: {existingEntry}",
+ "lsid"_attr = lsid->toBSON(),
+ "sessionInfo_getTxnNumber"_attr = *sessionInfo.getTxnNumber(),
+ "existingSessionInfo_getTxnNumber"_attr =
+ *existingSessionInfo.getTxnNumber(),
+ "newEntry"_attr = redact(entry.toString()),
+ "existingEntry"_attr = redact(iter->second.toString()));
}
std::vector<OplogEntry> SessionUpdateTracker::_flush(const OplogEntry& entry) {
diff --git a/src/mongo/db/repl/topology_coordinator.cpp b/src/mongo/db/repl/topology_coordinator.cpp
index f801a283c9d..c901b7098f8 100644
--- a/src/mongo/db/repl/topology_coordinator.cpp
+++ b/src/mongo/db/repl/topology_coordinator.cpp
@@ -217,9 +217,8 @@ HostAndPort TopologyCoordinator::chooseNewSyncSource(Date_t now,
!_rsConfig.isChainingAllowed())) {
if (readPreference == ReadPreference::SecondaryOnly) {
LOGV2_FATAL(
- 3873102,
+ 3873103,
"Sync source read preference 'secondaryOnly' with chaining disabled is not valid.");
- fassertFailed(3873103);
}
_syncSource = _choosePrimaryAsSyncSource(now, lastOpTimeFetched);
if (_syncSource.empty()) {
@@ -444,12 +443,11 @@ boost::optional<HostAndPort> TopologyCoordinator::_chooseSyncSourceInitialStep(D
const auto& data = sfp.getData();
const auto hostAndPortElem = data["hostAndPort"];
if (!hostAndPortElem) {
- LOGV2_FATAL(21839,
+ LOGV2_FATAL(50835,
"'forceSyncSoureCandidate' parameter set with invalid host and port: "
"{failpointData}",
"'forceSyncSoureCandidate' parameter set with invalid host and port",
"failpointData"_attr = data);
- fassertFailed(50835);
}
const auto hostAndPort = HostAndPort(hostAndPortElem.checkAndGetStringData());
@@ -2368,11 +2366,10 @@ std::string TopologyCoordinator::_getUnelectableReasonString(const UnelectableRe
ss << "node is not a member of a valid replica set configuration";
}
if (!hasWrittenToStream) {
- LOGV2_FATAL(21842,
+ LOGV2_FATAL(26011,
"Invalid UnelectableReasonMask value 0x{value}",
"Invalid UnelectableReasonMask value",
"value"_attr = integerToHex(ur));
- fassertFailed(26011);
}
ss << " (mask 0x" << integerToHex(ur) << ")";
return ss;
diff --git a/src/mongo/db/repl/topology_coordinator_v1_test.cpp b/src/mongo/db/repl/topology_coordinator_v1_test.cpp
index 6d7c0813eea..7363a4e1385 100644
--- a/src/mongo/db/repl/topology_coordinator_v1_test.cpp
+++ b/src/mongo/db/repl/topology_coordinator_v1_test.cpp
@@ -797,7 +797,7 @@ TEST_F(TopoCoordTest, ChooseOnlyPrimaryAsSyncSourceWhenChainingIsDisallowed) {
ASSERT(getTopoCoord().getSyncSourceAddress().empty());
}
-DEATH_TEST_F(TopoCoordTest, SecondaryOnlyAssertsWhenChainingNotAllowed, "3873102") {
+DEATH_TEST_F(TopoCoordTest, SecondaryOnlyAssertsWhenChainingNotAllowed, "3873103") {
updateConfig(BSON("_id"
<< "rs0"
<< "version" << 1 << "settings" << BSON("chainingAllowed" << false)
diff --git a/src/mongo/db/service_context.cpp b/src/mongo/db/service_context.cpp
index 708edcbfd2f..9e261ba5b0e 100644
--- a/src/mongo/db/service_context.cpp
+++ b/src/mongo/db/service_context.cpp
@@ -110,7 +110,7 @@ ServiceContext::ServiceContext()
ServiceContext::~ServiceContext() {
stdx::lock_guard<Latch> lk(_mutex);
for (const auto& client : _clients) {
- LOGV2_FATAL(23828,
+ LOGV2_ERROR(23828,
"Client {client_desc} still exists while destroying "
"ServiceContext@{reinterpret_cast_uint64_t_this}",
"client_desc"_attr = client->desc(),
diff --git a/src/mongo/db/service_entry_point_common.cpp b/src/mongo/db/service_entry_point_common.cpp
index cc745c0aa38..7681eedaf6e 100644
--- a/src/mongo/db/service_entry_point_common.cpp
+++ b/src/mongo/db/service_entry_point_common.cpp
@@ -529,14 +529,15 @@ void _abortUnpreparedOrStashPreparedTransaction(
txnParticipant->abortTransaction(opCtx);
} catch (...) {
// It is illegal for this to throw so we catch and log this here for diagnosability.
- LOGV2_FATAL(21974,
- "Caught exception during transaction "
- "{opCtx_getTxnNumber}{isPrepared_stash_abort}{opCtx_getLogicalSessionId}: "
- "{exceptionToStatus}",
- "opCtx_getTxnNumber"_attr = opCtx->getTxnNumber(),
- "isPrepared_stash_abort"_attr = (isPrepared ? " stash " : " abort "),
- "opCtx_getLogicalSessionId"_attr = opCtx->getLogicalSessionId()->toBSON(),
- "exceptionToStatus"_attr = exceptionToStatus());
+ LOGV2_FATAL_CONTINUE(
+ 21974,
+ "Caught exception during transaction "
+ "{opCtx_getTxnNumber}{isPrepared_stash_abort}{opCtx_getLogicalSessionId}: "
+ "{exceptionToStatus}",
+ "opCtx_getTxnNumber"_attr = opCtx->getTxnNumber(),
+ "isPrepared_stash_abort"_attr = (isPrepared ? " stash " : " abort "),
+ "opCtx_getLogicalSessionId"_attr = opCtx->getLogicalSessionId()->toBSON(),
+ "exceptionToStatus"_attr = exceptionToStatus());
std::terminate();
}
}
diff --git a/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.cpp b/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.cpp
index 08d8e2ec8a6..dd4d1dadd21 100644
--- a/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.cpp
+++ b/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.cpp
@@ -57,13 +57,12 @@ void KVDropPendingIdentReaper::addDropPendingIdent(const Timestamp& dropTimestam
info.ident = ident.toString();
_dropPendingIdents.insert(std::make_pair(dropTimestamp, info));
} else {
- LOGV2_FATAL(22238,
- "Failed to add drop-pending ident {ident} ({nss}) with drop timestamp "
- "{dropTimestamp}: duplicate timestamp and ident pair.",
- "ident"_attr = ident,
- "nss"_attr = nss,
- "dropTimestamp"_attr = dropTimestamp);
- fassertFailedNoTrace(51023);
+ LOGV2_FATAL_NOTRACE(51023,
+ "Failed to add drop-pending ident {ident} ({nss}) with drop timestamp "
+ "{dropTimestamp}: duplicate timestamp and ident pair.",
+ "ident"_attr = ident,
+ "nss"_attr = nss,
+ "dropTimestamp"_attr = dropTimestamp);
}
}
@@ -121,14 +120,14 @@ void KVDropPendingIdentReaper::dropIdentsOlderThan(OperationContext* opCtx, cons
WriteUnitOfWork wuow(opCtx);
auto status = _engine->dropIdent(opCtx, opCtx->recoveryUnit(), ident);
if (!status.isOK()) {
- LOGV2_FATAL(22239,
- "Failed to remove drop-pending ident {ident}(ns: {nss}) with drop "
- "timestamp {dropTimestamp}: {status}",
- "ident"_attr = ident,
- "nss"_attr = nss,
- "dropTimestamp"_attr = dropTimestamp,
- "status"_attr = status);
- fassertFailedNoTrace(51022);
+ LOGV2_FATAL_NOTRACE(
+ 51022,
+ "Failed to remove drop-pending ident {ident}(ns: {nss}) with drop "
+ "timestamp {dropTimestamp}: {status}",
+ "ident"_attr = ident,
+ "nss"_attr = nss,
+ "dropTimestamp"_attr = dropTimestamp,
+ "status"_attr = status);
}
wuow.commit();
}
diff --git a/src/mongo/db/storage/oplog_cap_maintainer_thread.cpp b/src/mongo/db/storage/oplog_cap_maintainer_thread.cpp
index 79160f8abbb..cb6c20ff19e 100644
--- a/src/mongo/db/storage/oplog_cap_maintainer_thread.cpp
+++ b/src/mongo/db/storage/oplog_cap_maintainer_thread.cpp
@@ -75,8 +75,8 @@ bool OplogCapMaintainerThread::_deleteExcessDocuments() {
} catch (const ExceptionForCat<ErrorCategory::Interruption>&) {
return false;
} catch (const std::exception& e) {
- LOGV2_FATAL(22243, "error in OplogCapMaintainerThread: {e_what}", "e_what"_attr = e.what());
- fassertFailedNoTrace(!"error in OplogCapMaintainerThread");
+ LOGV2_FATAL_NOTRACE(
+ 22243, "error in OplogCapMaintainerThread: {e_what}", "e_what"_attr = e.what());
} catch (...) {
fassertFailedNoTrace(!"unknown error in OplogCapMaintainerThread");
}
diff --git a/src/mongo/db/storage/remove_saver.cpp b/src/mongo/db/storage/remove_saver.cpp
index 73001e7d5d9..df113d1f0f8 100644
--- a/src/mongo/db/storage/remove_saver.cpp
+++ b/src/mongo/db/storage/remove_saver.cpp
@@ -84,52 +84,47 @@ RemoveSaver::~RemoveSaver() {
size_t resultLen;
Status status = _protector->finalize(protectedBuffer.get(), protectedSizeMax, &resultLen);
if (!status.isOK()) {
- LOGV2_FATAL(23736,
+ LOGV2_FATAL(34350,
"Unable to finalize DataProtector while closing RemoveSaver: {status}",
"status"_attr = redact(status));
- fassertFailed(34350);
}
_out->write(reinterpret_cast<const char*>(protectedBuffer.get()), resultLen);
if (_out->fail()) {
- LOGV2_FATAL(23737,
+ LOGV2_FATAL(34351,
"Couldn't write finalized DataProtector data to: {file_string} for remove "
"saving: {errnoWithDescription}",
"file_string"_attr = _file.string(),
"errnoWithDescription"_attr = redact(errnoWithDescription()));
- fassertFailed(34351);
}
protectedBuffer.reset(new uint8_t[protectedSizeMax]);
status = _protector->finalizeTag(protectedBuffer.get(), protectedSizeMax, &resultLen);
if (!status.isOK()) {
LOGV2_FATAL(
- 23738,
+ 34352,
"Unable to get finalizeTag from DataProtector while closing RemoveSaver: {status}",
"status"_attr = redact(status));
- fassertFailed(34352);
}
if (resultLen != _protector->getNumberOfBytesReservedForTag()) {
- LOGV2_FATAL(23739,
+ LOGV2_FATAL(34353,
"Attempted to write tag of size {resultLen} when DataProtector only "
"reserved {protector_getNumberOfBytesReservedForTag} bytes",
"resultLen"_attr = resultLen,
"protector_getNumberOfBytesReservedForTag"_attr =
_protector->getNumberOfBytesReservedForTag());
- fassertFailed(34353);
}
_out->seekp(0);
_out->write(reinterpret_cast<const char*>(protectedBuffer.get()), resultLen);
if (_out->fail()) {
- LOGV2_FATAL(23740,
+ LOGV2_FATAL(34354,
"Couldn't write finalizeTag from DataProtector to: {file_string} for "
"remove saving: {errnoWithDescription}",
"file_string"_attr = _file.string(),
"errnoWithDescription"_attr = redact(errnoWithDescription()));
- fassertFailed(34354);
}
}
}
diff --git a/src/mongo/db/storage/storage_engine_init.cpp b/src/mongo/db/storage/storage_engine_init.cpp
index c9040efc0ee..37174e0dc92 100644
--- a/src/mongo/db/storage/storage_engine_init.cpp
+++ b/src/mongo/db/storage/storage_engine_init.cpp
@@ -79,12 +79,11 @@ void initializeStorageEngine(ServiceContext* service, const StorageEngineInitFla
if (storageGlobalParams.repair) {
repairObserver->onRepairStarted();
} else if (repairObserver->isIncomplete()) {
- LOGV2_FATAL(
- 22272,
+ LOGV2_FATAL_NOTRACE(
+ 50922,
"An incomplete repair has been detected! This is likely because a repair "
"operation unexpectedly failed before completing. MongoDB will not start up "
"again without --repair.");
- fassertFailedNoTrace(50922);
}
}
@@ -209,10 +208,9 @@ void createLockFile(ServiceContext* service) {
if (wasUnclean) {
if (storageGlobalParams.readOnly) {
- LOGV2_FATAL(22273,
- "Attempted to open dbpath in readOnly mode, but the server was "
- "previously not shut down cleanly.");
- fassertFailedNoTrace(34416);
+ LOGV2_FATAL_NOTRACE(34416,
+ "Attempted to open dbpath in readOnly mode, but the server was "
+ "previously not shut down cleanly.");
}
LOGV2_WARNING(22271,
"Detected unclean shutdown - Lock file is not empty.",
diff --git a/src/mongo/db/storage/storage_repair_observer.cpp b/src/mongo/db/storage/storage_repair_observer.cpp
index 82a8d6f401d..399dd31ce51 100644
--- a/src/mongo/db/storage/storage_repair_observer.cpp
+++ b/src/mongo/db/storage/storage_repair_observer.cpp
@@ -123,12 +123,11 @@ void StorageRepairObserver::_touchRepairIncompleteFile() {
boost::filesystem::ofstream fileStream(_repairIncompleteFilePath);
fileStream << "This file indicates that a repair operation is in progress or incomplete.";
if (fileStream.fail()) {
- LOGV2_FATAL(
- 23756,
+ LOGV2_FATAL_NOTRACE(
+ 50920,
"Failed to write to file {repairIncompleteFilePath_string}: {errnoWithDescription}",
"repairIncompleteFilePath_string"_attr = _repairIncompleteFilePath.string(),
"errnoWithDescription"_attr = errnoWithDescription());
- fassertFailedNoTrace(50920);
}
fileStream.close();
@@ -141,11 +140,11 @@ void StorageRepairObserver::_removeRepairIncompleteFile() {
boost::filesystem::remove(_repairIncompleteFilePath, ec);
if (ec) {
- LOGV2_FATAL(23757,
- "Failed to remove file {repairIncompleteFilePath_string}: {ec_message}",
- "repairIncompleteFilePath_string"_attr = _repairIncompleteFilePath.string(),
- "ec_message"_attr = ec.message());
- fassertFailedNoTrace(50921);
+ LOGV2_FATAL_NOTRACE(50921,
+ "Failed to remove file {repairIncompleteFilePath_string}: {ec_message}",
+ "repairIncompleteFilePath_string"_attr =
+ _repairIncompleteFilePath.string(),
+ "ec_message"_attr = ec.message());
}
fassertNoTrace(50927, fsyncParentDirectory(_repairIncompleteFilePath));
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
index b77b695ee53..e38da42c83e 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
@@ -1331,14 +1331,13 @@ private:
_typeBits.resetFromBuffer(&br);
if (!br.atEof()) {
- LOGV2_FATAL(51795,
+ LOGV2_FATAL(28608,
"Unique index cursor seeing multiple records for key {key} in index "
"{index} ({uri}) belonging to collection {collection}",
"key"_attr = redact(curr(kWantKey)->key),
"index"_attr = _idx.indexName(),
"uri"_attr = _idx.uri(),
"collection"_attr = _idx.collectionNamespace());
- fassertFailed(28608);
}
}
};
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
index 9a5c5d4a504..714e35a356e 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
@@ -865,17 +865,16 @@ void WiredTigerKVEngine::_openWiredTiger(const std::string& path, const std::str
LOGV2_WARNING(22348, "WiredTiger metadata corruption detected");
if (!_inRepairMode) {
- LOGV2_FATAL(22362, "{kWTRepairMsg}", "kWTRepairMsg"_attr = kWTRepairMsg);
- fassertFailedNoTrace(50944);
+ LOGV2_FATAL_NOTRACE(50944, "{kWTRepairMsg}", "kWTRepairMsg"_attr = kWTRepairMsg);
}
}
- LOGV2_FATAL(22363,
- "Reason: {wtRCToStatus_ret_reason}",
- "wtRCToStatus_ret_reason"_attr = wtRCToStatus(ret).reason());
- if (!_inRepairMode) {
- fassertFailedNoTrace(28595);
- }
+ logv2::FatalMode assertMode =
+ _inRepairMode ? logv2::FatalMode::kContinue : logv2::FatalMode::kAssertNoTrace;
+ LOGV2_FATAL_OPTIONS(28595,
+ {assertMode},
+ "Reason: {wtRCToStatus_ret_reason}",
+ "wtRCToStatus_ret_reason"_attr = wtRCToStatus(ret).reason());
// Always attempt to salvage metadata regardless of error code when in repair mode.
@@ -888,11 +887,10 @@ void WiredTigerKVEngine::_openWiredTiger(const std::string& path, const std::str
return;
}
- LOGV2_FATAL(22364,
- "{Failed_to_salvage_WiredTiger_metadata_wtRCToStatus_ret_reason}",
- "Failed_to_salvage_WiredTiger_metadata_wtRCToStatus_ret_reason"_attr =
- "Failed to salvage WiredTiger metadata: " + wtRCToStatus(ret).reason());
- fassertFailedNoTrace(50947);
+ LOGV2_FATAL_NOTRACE(50947,
+ "{Failed_to_salvage_WiredTiger_metadata_wtRCToStatus_ret_reason}",
+ "Failed_to_salvage_WiredTiger_metadata_wtRCToStatus_ret_reason"_attr =
+ "Failed to salvage WiredTiger metadata: " + wtRCToStatus(ret).reason());
}
void WiredTigerKVEngine::cleanShutdown() {
@@ -1961,8 +1959,7 @@ bool WiredTigerKVEngine::_canRecoverToStableTimestamp() const {
StatusWith<Timestamp> WiredTigerKVEngine::recoverToStableTimestamp(OperationContext* opCtx) {
if (!supportsRecoverToStableTimestamp()) {
- LOGV2_FATAL(22365, "WiredTiger is configured to not support recover to a stable timestamp");
- fassertFailed(50665);
+ LOGV2_FATAL(50665, "WiredTiger is configured to not support recover to a stable timestamp");
}
if (!_canRecoverToStableTimestamp()) {
@@ -2040,9 +2037,8 @@ Timestamp WiredTigerKVEngine::getOldestOpenReadTimestamp() const {
boost::optional<Timestamp> WiredTigerKVEngine::getRecoveryTimestamp() const {
if (!supportsRecoveryTimestamp()) {
- LOGV2_FATAL(22366,
+ LOGV2_FATAL(50745,
"WiredTiger is configured to not support providing a recovery timestamp");
- fassertFailed(50745);
}
if (_recoveryTimestamp.isNull()) {
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
index 8a0dde07498..52eae581d8b 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
@@ -283,10 +283,9 @@ void WiredTigerRecoveryUnit::assertInActiveTxn() const {
if (_isActive()) {
return;
}
- LOGV2_FATAL(22417,
+ LOGV2_FATAL(28575,
"Recovery unit is not active. Current state: {getState}",
"getState"_attr = toString(_getState()));
- fassertFailed(28575);
}
boost::optional<int64_t> WiredTigerRecoveryUnit::getOplogVisibilityTs() {
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp
index 4dc2b0cc079..35d890b941f 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp
@@ -626,7 +626,7 @@ Status WiredTigerUtil::setTableLogging(WT_SESSION* session, const std::string& u
"on"_attr = on);
int ret = session->alter(session, uri.c_str(), setting.c_str());
if (ret) {
- LOGV2_FATAL(22437,
+ LOGV2_FATAL(50756,
"Failed to update log setting. Uri: {uri} Enable? {on} Ret: {ret} MD: "
"{existingMetadata} Msg: {session_strerror_session_ret}",
"uri"_attr = uri,
@@ -634,7 +634,6 @@ Status WiredTigerUtil::setTableLogging(WT_SESSION* session, const std::string& u
"ret"_attr = ret,
"existingMetadata"_attr = redact(existingMetadata),
"session_strerror_session_ret"_attr = session->strerror(session, ret));
- fassertFailed(50756);
}
return Status::OK();
diff --git a/src/mongo/db/system_index.cpp b/src/mongo/db/system_index.cpp
index ad31f6bd2bc..a5af67be4db 100644
--- a/src/mongo/db/system_index.cpp
+++ b/src/mongo/db/system_index.cpp
@@ -117,10 +117,10 @@ void generateSystemIndexForExistingCollection(OperationContext* opCtx,
IndexBuildsCoordinator::get(opCtx)->createIndexes(
opCtx, collectionUUID, {indexSpec}, indexConstraints, fromMigrate);
} catch (const DBException& e) {
- LOGV2_FATAL(22490,
- "Failed to regenerate index for {ns}. Exception: {e_what}",
- "ns"_attr = ns,
- "e_what"_attr = e.what());
+ LOGV2_FATAL_CONTINUE(22490,
+ "Failed to regenerate index for {ns}. Exception: {e_what}",
+ "ns"_attr = ns,
+ "e_what"_attr = e.what());
throw;
}
}
diff --git a/src/mongo/db/transaction_participant.cpp b/src/mongo/db/transaction_participant.cpp
index 936c89a7b52..03d585b55a1 100644
--- a/src/mongo/db/transaction_participant.cpp
+++ b/src/mongo/db/transaction_participant.cpp
@@ -101,7 +101,7 @@ void fassertOnRepeatedExecution(const LogicalSessionId& lsid,
const repl::OpTime& firstOpTime,
const repl::OpTime& secondOpTime) {
LOGV2_FATAL(
- 22524,
+ 40526,
"Statement id {stmtId} from transaction [ {lsid}:{txnNumber} ] was committed once "
"with opTime {firstCommitOpTime} and a second time with opTime {secondCommitOpTime}. This "
"indicates possible data corruption or server bug and the process will be "
@@ -113,7 +113,6 @@ void fassertOnRepeatedExecution(const LogicalSessionId& lsid,
"txnNumber"_attr = txnNumber,
"firstCommitOpTime"_attr = firstOpTime,
"secondCommitOpTime"_attr = secondOpTime);
- fassertFailed(40526);
}
struct ActiveTransactionHistory {
@@ -1105,13 +1104,13 @@ Timestamp TransactionParticipant::Participant::prepareTransaction(
} catch (...) {
// It is illegal for aborting a prepared transaction to fail for any reason, so we crash
// instead.
- LOGV2_FATAL(22525,
- "Caught exception during abort of prepared transaction "
- "{txnNumber} on {lsid}: {error}",
- "Caught exception during abort of prepared transaction",
- "txnNumber"_attr = opCtx->getTxnNumber(),
- "lsid"_attr = _sessionId().toBSON(),
- "error"_attr = exceptionToStatus());
+ LOGV2_FATAL_CONTINUE(22525,
+ "Caught exception during abort of prepared transaction "
+ "{txnNumber} on {lsid}: {error}",
+ "Caught exception during abort of prepared transaction",
+ "txnNumber"_attr = opCtx->getTxnNumber(),
+ "lsid"_attr = _sessionId().toBSON(),
+ "error"_attr = exceptionToStatus());
std::terminate();
}
});
@@ -1431,13 +1430,13 @@ void TransactionParticipant::Participant::commitPreparedTransaction(
} catch (...) {
// It is illegal for committing a prepared transaction to fail for any reason, other than an
// invalid command, so we crash instead.
- LOGV2_FATAL(22526,
- "Caught exception during commit of prepared transaction {txnNumber} "
- "on {lsid}: {error}",
- "Caught exception during commit of prepared transaction",
- "txnNumber"_attr = opCtx->getTxnNumber(),
- "lsid"_attr = _sessionId().toBSON(),
- "error"_attr = exceptionToStatus());
+ LOGV2_FATAL_CONTINUE(22526,
+ "Caught exception during commit of prepared transaction {txnNumber} "
+ "on {lsid}: {error}",
+ "Caught exception during commit of prepared transaction",
+ "txnNumber"_attr = opCtx->getTxnNumber(),
+ "lsid"_attr = _sessionId().toBSON(),
+ "error"_attr = exceptionToStatus());
std::terminate();
}
}
@@ -1563,14 +1562,15 @@ void TransactionParticipant::Participant::_abortActiveTransaction(
} catch (...) {
// It is illegal for aborting a transaction that must write an abort oplog entry to fail
// after aborting the storage transaction, so we crash instead.
- LOGV2_FATAL(22527,
- "Caught exception during abort of transaction that must write abort oplog "
- "entry {txnNumber} on {lsid}: {error}",
- "Caught exception during abort of transaction that must write abort oplog "
- "entry",
- "txnNumber"_attr = opCtx->getTxnNumber(),
- "lsid"_attr = _sessionId().toBSON(),
- "error"_attr = exceptionToStatus());
+ LOGV2_FATAL_CONTINUE(
+ 22527,
+ "Caught exception during abort of transaction that must write abort oplog "
+ "entry {txnNumber} on {lsid}: {error}",
+ "Caught exception during abort of transaction that must write abort oplog "
+ "entry",
+ "txnNumber"_attr = opCtx->getTxnNumber(),
+ "lsid"_attr = _sessionId().toBSON(),
+ "error"_attr = exceptionToStatus());
std::terminate();
}
} else {
diff --git a/src/mongo/db/write_concern.cpp b/src/mongo/db/write_concern.cpp
index 05570734e5e..f7fadb139cc 100644
--- a/src/mongo/db/write_concern.cpp
+++ b/src/mongo/db/write_concern.cpp
@@ -300,9 +300,8 @@ Status waitForWriteConcern(OperationContext* opCtx,
try {
switch (writeConcernWithPopulatedSyncMode.syncMode) {
case WriteConcernOptions::SyncMode::UNSET:
- LOGV2_FATAL(22550,
+ LOGV2_FATAL(34410,
"Attempting to wait on a WriteConcern with an unset sync option");
- fassertFailed(34410);
case WriteConcernOptions::SyncMode::NONE:
break;
case WriteConcernOptions::SyncMode::FSYNC: {
diff --git a/src/mongo/embedded/embedded.cpp b/src/mongo/embedded/embedded.cpp
index ca065dd9469..2e9407e0692 100644
--- a/src/mongo/embedded/embedded.cpp
+++ b/src/mongo/embedded/embedded.cpp
@@ -27,7 +27,7 @@
* it in the license file.
*/
-#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kStorage
+#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage
#include "mongo/platform/basic.h"
@@ -66,8 +66,8 @@
#include "mongo/embedded/read_write_concern_defaults_cache_lookup_embedded.h"
#include "mongo/embedded/replication_coordinator_embedded.h"
#include "mongo/embedded/service_entry_point_embedded.h"
-#include "mongo/logger/log_component.h"
#include "mongo/logv2/log.h"
+#include "mongo/logv2/log_component.h"
#include "mongo/scripting/dbdirectclient_factory.h"
#include "mongo/util/background.h"
#include "mongo/util/exit.h"
@@ -146,7 +146,7 @@ GlobalInitializerRegisterer filterAllowedIndexFieldNamesEmbeddedInitializer(
{"FilterAllowedIndexFieldNames"});
} // namespace
-using logger::LogComponent;
+using logv2::LogComponent;
using std::endl;
void shutdown(ServiceContext* srvContext) {
@@ -183,7 +183,7 @@ void shutdown(ServiceContext* srvContext) {
}
setGlobalServiceContext(nullptr);
- LOGV2_OPTIONS(22551, {logComponentV1toV2(LogComponent::kControl)}, "now exiting");
+ LOGV2_OPTIONS(22551, {LogComponent::kControl}, "now exiting");
}
@@ -230,8 +230,7 @@ ServiceContext* initialize(const char* yaml_config) {
}
if (kDebugBuild)
- LOGV2_OPTIONS(
- 22552, {logComponentV1toV2(LogComponent::kControl)}, "DEBUG build (which is slower)");
+ LOGV2_OPTIONS(22552, {LogComponent::kControl}, "DEBUG build (which is slower)");
// The periodic runner is required by the storage engine to be running beforehand.
auto periodicRunner = std::make_unique<PeriodicRunnerEmbedded>(
@@ -295,7 +294,7 @@ ServiceContext* initialize(const char* yaml_config) {
repairDatabasesAndCheckVersion(startupOpCtx.get());
} catch (const ExceptionFor<ErrorCodes::MustDowngrade>& error) {
LOGV2_FATAL_OPTIONS(22555,
- {logComponentV1toV2(LogComponent::kControl)},
+ logv2::LogOptions(LogComponent::kControl, logv2::FatalMode::kContinue),
"** IMPORTANT: {error_toStatus_reason}",
"error_toStatus_reason"_attr = error.toStatus().reason());
quickExit(EXIT_NEED_DOWNGRADE);
diff --git a/src/mongo/executor/connection_pool.cpp b/src/mongo/executor/connection_pool.cpp
index 0cc26800c65..2b4492fbba4 100644
--- a/src/mongo/executor/connection_pool.cpp
+++ b/src/mongo/executor/connection_pool.cpp
@@ -340,8 +340,7 @@ public:
void fassertSSLModeIs(transport::ConnectSSLMode desired) const {
if (desired != _sslMode) {
- LOGV2_FATAL(22580, "Mixing ssl modes for a single host is not supported");
- fassertFailedNoTrace(51043);
+ LOGV2_FATAL_NOTRACE(51043, "Mixing ssl modes for a single host is not supported");
}
}
@@ -1016,10 +1015,9 @@ void ConnectionPool::SpecificPool::spawnConnections() {
// make a new connection and put it in processing
handle = _parent->_factory->makeConnection(_hostAndPort, _sslMode, _generation);
} catch (std::system_error& e) {
- LOGV2_FATAL(22581,
+ LOGV2_FATAL(40336,
"Failed to construct a new connection object: {e_what}",
"e_what"_attr = e.what());
- fassertFailed(40336);
}
_processingPool[handle.get()] = handle;
diff --git a/src/mongo/executor/network_interface_thread_pool.cpp b/src/mongo/executor/network_interface_thread_pool.cpp
index 65abdf0abc2..84fb7663ff5 100644
--- a/src/mongo/executor/network_interface_thread_pool.cpp
+++ b/src/mongo/executor/network_interface_thread_pool.cpp
@@ -65,8 +65,7 @@ void NetworkInterfaceThreadPool::_dtorImpl() {
void NetworkInterfaceThreadPool::startup() {
stdx::unique_lock<Latch> lk(_mutex);
if (_started) {
- LOGV2_FATAL(23790, "Attempting to start pool, but it has already started");
- fassertFailed(34358);
+ LOGV2_FATAL(34358, "Attempting to start pool, but it has already started");
}
_started = true;
@@ -87,8 +86,7 @@ void NetworkInterfaceThreadPool::join() {
stdx::unique_lock<Latch> lk(_mutex);
if (_joining) {
- LOGV2_FATAL(23791, "Attempted to join pool more than once");
- fassertFailed(34357);
+ LOGV2_FATAL(34357, "Attempted to join pool more than once");
}
_joining = true;
diff --git a/src/mongo/executor/task_executor_test_common.cpp b/src/mongo/executor/task_executor_test_common.cpp
index 74966b97c3c..24b65c0952b 100644
--- a/src/mongo/executor/task_executor_test_common.cpp
+++ b/src/mongo/executor/task_executor_test_common.cpp
@@ -85,10 +85,9 @@ public:
CetRegistrationAgent(const std::string& name, ExecutorTestCaseFactory makeTest) {
auto& entry = executorTestCaseRegistry()[name];
if (entry) {
- LOGV2_FATAL(23924,
+ LOGV2_FATAL(28713,
"Multiple attempts to register ExecutorTest named {name}",
"name"_attr = name);
- fassertFailed(28713);
}
entry = std::move(makeTest);
}
diff --git a/src/mongo/idl/server_parameter.cpp b/src/mongo/idl/server_parameter.cpp
index 021f9bfdbdf..59954005c79 100644
--- a/src/mongo/idl/server_parameter.cpp
+++ b/src/mongo/idl/server_parameter.cpp
@@ -82,9 +82,9 @@ ServerParameterSet* ServerParameterSet::getGlobal() {
void ServerParameterSet::add(ServerParameter* sp) {
ServerParameter*& x = _map[sp->name()];
if (x) {
- LOGV2_FATAL(23784,
- "'{x_name}' already exists in the server parameter set.",
- "x_name"_attr = x->name());
+ LOGV2_FATAL_CONTINUE(23784,
+ "'{x_name}' already exists in the server parameter set.",
+ "x_name"_attr = x->name());
abort();
}
x = sp;
diff --git a/src/mongo/logger/logv2_appender.h b/src/mongo/logger/logv2_appender.h
index 4e8735f677f..966e907a439 100644
--- a/src/mongo/logger/logv2_appender.h
+++ b/src/mongo/logger/logv2_appender.h
@@ -113,7 +113,8 @@ public:
static_cast<std::underlying_type_t<logv2::LogTag::Value>>(logTagValue) |
static_cast<std::underlying_type_t<logv2::LogTag::Value>>(_tag))},
event.isTruncatable() ? logv2::LogTruncation::Enabled
- : logv2::LogTruncation::Disabled},
+ : logv2::LogTruncation::Disabled,
+ logv2::FatalMode::kContinue},
"{}",
"message"_attr = message);
diff --git a/src/mongo/logv2/README.md b/src/mongo/logv2/README.md
index 2b09f4f6c0c..970521a1e5f 100644
--- a/src/mongo/logv2/README.md
+++ b/src/mongo/logv2/README.md
@@ -158,6 +158,8 @@ LOGV2_OPTIONS(1003, {LogComponent::kCommand}, "Log event to specified component"
* `LOGV2_WARNING`
* `LOGV2_ERROR`
* `LOGV2_FATAL`
+* `LOGV2_FATAL_NOTRACE`
+* `LOGV2_FATAL_CONTINUE`
There is also variations that take `LogOptions` if needed:
@@ -165,7 +167,9 @@ There is also variations that take `LogOptions` if needed:
* `LOGV2_ERROR_OPTIONS`
* `LOGV2_FATAL_OPTIONS`
-Fatal level log statements perform `fassert` after logging, using the provided ID as assert id.
+Fatal level log statements using `LOGV2_FATAL` perform `fassert` after logging, using the provided ID as assert id. `LOGV2_FATAL_NOTRACE` perform `fassertNoTrace` and `LOGV2_FATAL_CONTINUE` does not `fassert` allowing for continued execution. `LOGV2_FATAL_CONTINUE` is meant to be used when a fatal error has occured but a different way of halting execution is desired such as `std::terminate` or `fassertFailedWithStatus`.
+
+`LOGV2_FATAL_OPTIONS` performs `fassert` by default like `LOGV2_FATAL` but this can be changed by setting the `FatalMode` on the `LogOptions`.
Debug-level logging is slightly different where an additional parameter (as integer) required to indicate the desired debug level:
diff --git a/src/mongo/logv2/log.h b/src/mongo/logv2/log.h
index 5c4a2c1ab7d..7aec7e523be 100644
--- a/src/mongo/logv2/log.h
+++ b/src/mongo/logv2/log.h
@@ -79,6 +79,11 @@ const ::mongo::logv2::LogComponent MongoLogV2DefaultComponent_component =
namespace mongo {
+#define MAKE_OPTIONS_ARG2(ARG0, ARG1) \
+ ::mongo::logv2::LogOptions { \
+ ARG0, ARG1 \
+ }
+
#define LOGV2_IMPL(ID, SEVERITY, OPTIONS, MESSAGE, ...) \
::mongo::logv2::detail::doLog(ID, SEVERITY, OPTIONS, FMT_STRING(MESSAGE), ##__VA_ARGS__)
@@ -142,21 +147,54 @@ namespace mongo {
MESSAGE, \
##__VA_ARGS__)
-#define LOGV2_FATAL(ID, MESSAGE, ...) \
- LOGV2_IMPL(ID, \
- ::mongo::logv2::LogSeverity::Severe(), \
- ::mongo::logv2::LogOptions{MongoLogV2DefaultComponent_component}, \
- MESSAGE, \
- ##__VA_ARGS__)
+#define LOGV2_FATAL(ID, MESSAGE, ...) \
+ do { \
+ LOGV2_IMPL(ID, \
+ ::mongo::logv2::LogSeverity::Severe(), \
+ ::mongo::logv2::LogOptions{MongoLogV2DefaultComponent_component}, \
+ MESSAGE, \
+ ##__VA_ARGS__); \
+ fassertFailed(ID); \
+ } while (false)
-#define LOGV2_FATAL_OPTIONS(ID, OPTIONS, MESSAGE, ...) \
- LOGV2_IMPL(ID, \
- ::mongo::logv2::LogSeverity::Severe(), \
- ::mongo::logv2::LogOptions::ensureValidComponent( \
- OPTIONS, MongoLogV2DefaultComponent_component), \
- MESSAGE, \
+#define LOGV2_FATAL_NOTRACE(ID, MESSAGE, ...) \
+ do { \
+ LOGV2_IMPL(ID, \
+ ::mongo::logv2::LogSeverity::Severe(), \
+ MAKE_OPTIONS_ARG2(MongoLogV2DefaultComponent_component, \
+ ::mongo::logv2::FatalMode::kAssertNoTrace), \
+ MESSAGE, \
+ ##__VA_ARGS__); \
+ fassertFailedNoTrace(ID); \
+ } while (false)
+
+#define LOGV2_FATAL_CONTINUE(ID, MESSAGE, ...) \
+ LOGV2_IMPL(ID, \
+ ::mongo::logv2::LogSeverity::Severe(), \
+ MAKE_OPTIONS_ARG2(MongoLogV2DefaultComponent_component, \
+ ::mongo::logv2::FatalMode::kContinue), \
+ MESSAGE, \
##__VA_ARGS__)
+#define LOGV2_FATAL_OPTIONS(ID, OPTIONS, MESSAGE, ...) \
+ do { \
+ auto optionsMacroLocal_ = ::mongo::logv2::LogOptions::ensureValidComponent( \
+ OPTIONS, MongoLogV2DefaultComponent_component); \
+ LOGV2_IMPL(ID, \
+ ::mongo::logv2::LogSeverity::Severe(), \
+ optionsMacroLocal_, \
+ MESSAGE, \
+ ##__VA_ARGS__); \
+ switch (optionsMacroLocal_.fatalMode()) { \
+ case ::mongo::logv2::FatalMode::kAssert: \
+ fassertFailed(ID); \
+ case ::mongo::logv2::FatalMode::kAssertNoTrace: \
+ fassertFailedNoTrace(ID); \
+ case ::mongo::logv2::FatalMode::kContinue: \
+ break; \
+ }; \
+ } while (false)
+
#define LOGV2_DEBUG_OPTIONS(ID, DLEVEL, OPTIONS, MESSAGE, ...) \
do { \
auto severityMacroLocal_ = ::mongo::logv2::LogSeverity::Debug(DLEVEL); \
diff --git a/src/mongo/logv2/log_options.h b/src/mongo/logv2/log_options.h
index d03a7bf274b..cdf07b03ea5 100644
--- a/src/mongo/logv2/log_options.h
+++ b/src/mongo/logv2/log_options.h
@@ -44,6 +44,8 @@ public:
ErrorCodes::Error errorCode;
};
+enum class FatalMode { kAssert, kAssertNoTrace, kContinue };
+
class LogOptions {
public:
static LogOptions ensureValidComponent(LogOptions options, LogComponent component) {
@@ -55,6 +57,8 @@ public:
LogOptions(LogComponent component) : _component(component) {}
+ LogOptions(LogComponent component, FatalMode mode) : _component(component), _fatalMode(mode) {}
+
LogOptions(LogDomain* domain) : _domain(domain) {}
LogOptions(LogTag tags) : _tags(tags) {}
@@ -64,13 +68,23 @@ public:
LogOptions(UserAssertAfterLog uassertAfterLog)
: _userAssertErrorCode(uassertAfterLog.errorCode) {}
+ LogOptions(FatalMode mode) : _fatalMode(mode) {}
+
LogOptions(LogTag tags, LogTruncation truncation) : _tags(tags), _truncation(truncation) {}
LogOptions(LogComponent component, LogDomain* domain, LogTag tags)
: _domain(domain), _tags(tags), _component(component) {}
- LogOptions(LogComponent component, LogDomain* domain, LogTag tags, LogTruncation truncation)
- : _domain(domain), _tags(tags), _component(component), _truncation(truncation) {}
+ LogOptions(LogComponent component,
+ LogDomain* domain,
+ LogTag tags,
+ LogTruncation truncation,
+ FatalMode fatalMode)
+ : _domain(domain),
+ _tags(tags),
+ _component(component),
+ _truncation(truncation),
+ _fatalMode(fatalMode) {}
LogComponent component() const {
return _component;
@@ -92,12 +106,17 @@ public:
return _userAssertErrorCode;
}
+ FatalMode fatalMode() const {
+ return _fatalMode;
+ }
+
private:
LogDomain* _domain = &LogManager::global().getGlobalDomain();
LogTag _tags;
LogComponent _component = LogComponent::kAutomaticDetermination;
LogTruncation _truncation = constants::kDefaultTruncation;
ErrorCodes::Error _userAssertErrorCode = ErrorCodes::OK;
+ FatalMode _fatalMode = FatalMode::kAssert;
};
} // namespace logv2
diff --git a/src/mongo/rpc/op_msg.cpp b/src/mongo/rpc/op_msg.cpp
index 0ca4f491014..d29b5ac2578 100644
--- a/src/mongo/rpc/op_msg.cpp
+++ b/src/mongo/rpc/op_msg.cpp
@@ -297,12 +297,11 @@ Message OpMsgBuilder::finish() {
std::set<StringData> seenFields;
for (auto elem : resumeBody().asTempObj()) {
if (!(seenFields.insert(elem.fieldNameStringData()).second)) {
- LOGV2_FATAL(22633,
+ LOGV2_FATAL(40474,
"OP_MSG with duplicate field '{elem_fieldNameStringData}' : "
"{resumeBody_asTempObj}",
"elem_fieldNameStringData"_attr = elem.fieldNameStringData(),
"resumeBody_asTempObj"_attr = redact(resumeBody().asTempObj()));
- fassert(40474, false);
}
}
}
diff --git a/src/mongo/s/client/shard_registry.cpp b/src/mongo/s/client/shard_registry.cpp
index 9bfa02546e9..532f926c734 100644
--- a/src/mongo/s/client/shard_registry.cpp
+++ b/src/mongo/s/client/shard_registry.cpp
@@ -240,11 +240,10 @@ void ShardRegistry::startup(OperationContext* opCtx) {
}
if (!status.isOK()) {
- LOGV2_FATAL(22737,
+ LOGV2_FATAL(40252,
"Error scheduling shard registry reload caused by {error}",
"Error scheduling shard registry reload",
"error"_attr = redact(status.getStatus()));
- fassertFailed(40252);
}
}
@@ -285,11 +284,10 @@ void ShardRegistry::_internalReload(const CallbackArgs& cbArgs) {
}
if (!status.isOK()) {
- LOGV2_FATAL(22738,
+ LOGV2_FATAL(40253,
"Error scheduling shard registry reload caused by {error}",
"Error scheduling shard registry reload",
"error"_attr = redact(status.getStatus()));
- fassertFailed(40253);
}
}
diff --git a/src/mongo/s/server.cpp b/src/mongo/s/server.cpp
index 04600263475..281d7eecccf 100644
--- a/src/mongo/s/server.cpp
+++ b/src/mongo/s/server.cpp
@@ -818,11 +818,12 @@ ExitCode mongoSMain(int argc, char* argv[], char** envp) {
Status status = runGlobalInitializers(argc, argv, envp);
if (!status.isOK()) {
- LOGV2_FATAL_OPTIONS(22865,
- {logComponentV1toV2(LogComponent::kDefault)},
- "Error during global initialization: {error}",
- "Error during global initialization",
- "error"_attr = status);
+ LOGV2_FATAL_OPTIONS(
+ 22865,
+ logv2::LogOptions(logv2::LogComponent::kDefault, logv2::FatalMode::kContinue),
+ "Error during global initialization: {error}",
+ "Error during global initialization",
+ "error"_attr = status);
return EXIT_ABRUPT;
}
@@ -830,11 +831,12 @@ ExitCode mongoSMain(int argc, char* argv[], char** envp) {
setGlobalServiceContext(ServiceContext::make());
} catch (...) {
auto cause = exceptionToStatus();
- LOGV2_FATAL_OPTIONS(22866,
- {logComponentV1toV2(LogComponent::kDefault)},
- "Error creating service context: {error}",
- "Error creating service context",
- "error"_attr = redact(cause));
+ LOGV2_FATAL_OPTIONS(
+ 22866,
+ logv2::LogOptions(logv2::LogComponent::kDefault, logv2::FatalMode::kContinue),
+ "Error creating service context: {error}",
+ "Error creating service context",
+ "error"_attr = redact(cause));
return EXIT_ABRUPT;
}
diff --git a/src/mongo/s/sharding_task_executor.cpp b/src/mongo/s/sharding_task_executor.cpp
index aeb21e740db..704c4b45166 100644
--- a/src/mongo/s/sharding_task_executor.cpp
+++ b/src/mongo/s/sharding_task_executor.cpp
@@ -191,15 +191,15 @@ StatusWith<TaskExecutor::CallbackHandle> ShardingTaskExecutor::scheduleRemoteCom
}
if (isMongos() && args.response.status == ErrorCodes::IncompatibleWithUpgradedServer) {
- LOGV2_FATAL(22873,
- "This mongos server must be upgraded. It is attempting to communicate "
- "with "
- "an upgraded cluster with which it is incompatible. Error: "
- "'{args_response_status}' Crashing in order to bring attention to the "
- "incompatibility, rather "
- "than erroring endlessly.",
- "args_response_status"_attr = args.response.status.toString());
- fassertNoTrace(50710, false);
+ LOGV2_FATAL_NOTRACE(
+ 50710,
+ "This mongos server must be upgraded. It is attempting to communicate "
+ "with "
+ "an upgraded cluster with which it is incompatible. Error: "
+ "'{args_response_status}' Crashing in order to bring attention to the "
+ "incompatibility, rather "
+ "than erroring endlessly.",
+ "args_response_status"_attr = args.response.status.toString());
}
if (shard) {
@@ -238,7 +238,7 @@ StatusWith<TaskExecutor::CallbackHandle> ShardingTaskExecutor::scheduleRemoteCom
auto shardConn = ConnectionString::parse(target.toString());
if (!shardConn.isOK()) {
- LOGV2_FATAL(22874,
+ LOGV2_ERROR(22874,
"got bad host string in saveGLEStats: {target}",
"target"_attr = target);
}
diff --git a/src/mongo/shell/bench.cpp b/src/mongo/shell/bench.cpp
index 68967a7c05c..733bbd8c566 100644
--- a/src/mongo/shell/bench.cpp
+++ b/src/mongo/shell/bench.cpp
@@ -841,9 +841,9 @@ BenchRunWorker::~BenchRunWorker() {
// before returning from BenchRunWorker's destructor.
_thread.join();
} catch (...) {
- LOGV2_FATAL(22807,
- "caught exception in destructor: {exceptionToStatus}",
- "exceptionToStatus"_attr = exceptionToStatus());
+ LOGV2_FATAL_CONTINUE(22807,
+ "caught exception in destructor: {exceptionToStatus}",
+ "exceptionToStatus"_attr = exceptionToStatus());
std::terminate();
}
}
diff --git a/src/mongo/transport/baton_asio_linux.h b/src/mongo/transport/baton_asio_linux.h
index 6c102553599..5e8064c2805 100644
--- a/src/mongo/transport/baton_asio_linux.h
+++ b/src/mongo/transport/baton_asio_linux.h
@@ -328,10 +328,9 @@ public:
// If poll failed, it better be in EINTR
if (rval < 0 && errno != EINTR) {
- LOGV2_FATAL(23921,
+ LOGV2_FATAL(50834,
"error in poll: {errnoWithDescription_errno}",
"errnoWithDescription_errno"_attr = errnoWithDescription(errno));
- fassertFailed(50834);
}
}
diff --git a/src/mongo/transport/service_executor_test.cpp b/src/mongo/transport/service_executor_test.cpp
index 3284ce1304a..b33d10f30a4 100644
--- a/src/mongo/transport/service_executor_test.cpp
+++ b/src/mongo/transport/service_executor_test.cpp
@@ -102,10 +102,9 @@ public:
try {
_ioContext.run_for(time.toSystemDuration());
} catch (...) {
- LOGV2_FATAL(22985,
+ LOGV2_FATAL(50476,
"Uncaught exception in reactor: {exceptionToStatus}",
"exceptionToStatus"_attr = exceptionToStatus());
- fassertFailed(50476);
}
}
diff --git a/src/mongo/transport/transport_layer_asio.cpp b/src/mongo/transport/transport_layer_asio.cpp
index be8f40ec06a..daa753adabf 100644
--- a/src/mongo/transport/transport_layer_asio.cpp
+++ b/src/mongo/transport/transport_layer_asio.cpp
@@ -1044,11 +1044,10 @@ void TransportLayerASIO::_runListener() noexcept {
asio::error_code ec;
acceptor.second.listen(serverGlobalParams.listenBacklog, ec);
if (ec) {
- LOGV2_FATAL(23027,
+ LOGV2_FATAL(31339,
"Error listening for new connections on {acceptor_first}: {ec_message}",
"acceptor_first"_attr = acceptor.first,
"ec_message"_attr = ec.message());
- fassertFailed(31339);
}
_acceptConnection(acceptor.second);
diff --git a/src/mongo/unittest/death_test.cpp b/src/mongo/unittest/death_test.cpp
index 962a82649cf..5a41fbb41c0 100644
--- a/src/mongo/unittest/death_test.cpp
+++ b/src/mongo/unittest/death_test.cpp
@@ -54,7 +54,7 @@
do { \
if (-1 == (EXPR)) { \
const int err = errno; \
- LOGV2_FATAL(24138, \
+ LOGV2_ERROR(24138, \
"{expr} failed: {errno}", \
"expr"_attr = #EXPR, \
"errno"_attr = errnoWithDescription(err)); \
diff --git a/src/mongo/util/assert_util.cpp b/src/mongo/util/assert_util.cpp
index fcae3206449..c9dab10006a 100644
--- a/src/mongo/util/assert_util.cpp
+++ b/src/mongo/util/assert_util.cpp
@@ -96,7 +96,8 @@ MONGO_COMPILER_NOINLINE void verifyFailed(const char* expr, const char* file, un
breakpoint();
#if defined(MONGO_CONFIG_DEBUG_BUILD)
// this is so we notice in buildbot
- LOGV2_FATAL(23078, "\n\n***aborting after verify() failure as this is a debug/test build\n\n");
+ LOGV2_FATAL_CONTINUE(
+ 23078, "\n\n***aborting after verify() failure as this is a debug/test build\n\n");
std::abort();
#endif
error_details::throwExceptionForStatus(Status(ErrorCodes::UnknownError, temp.str()));
@@ -105,13 +106,13 @@ MONGO_COMPILER_NOINLINE void verifyFailed(const char* expr, const char* file, un
MONGO_COMPILER_NOINLINE void invariantFailed(const char* expr,
const char* file,
unsigned line) noexcept {
- LOGV2_FATAL(23079,
- "Invariant failure {expr} {file} {line}",
- "expr"_attr = expr,
- "file"_attr = file,
- "line"_attr = line);
+ LOGV2_FATAL_CONTINUE(23079,
+ "Invariant failure {expr} {file} {line}",
+ "expr"_attr = expr,
+ "file"_attr = file,
+ "line"_attr = line);
breakpoint();
- LOGV2_FATAL(23080, "\n\n***aborting after invariant() failure\n\n");
+ LOGV2_FATAL_CONTINUE(23080, "\n\n***aborting after invariant() failure\n\n");
std::abort();
}
@@ -119,14 +120,14 @@ MONGO_COMPILER_NOINLINE void invariantFailedWithMsg(const char* expr,
const std::string& msg,
const char* file,
unsigned line) noexcept {
- LOGV2_FATAL(23081,
- "Invariant failure {expr} {msg} {file} {line}",
- "expr"_attr = expr,
- "msg"_attr = msg,
- "file"_attr = file,
- "line"_attr = line);
+ LOGV2_FATAL_CONTINUE(23081,
+ "Invariant failure {expr} {msg} {file} {line}",
+ "expr"_attr = expr,
+ "msg"_attr = msg,
+ "file"_attr = file,
+ "line"_attr = line);
breakpoint();
- LOGV2_FATAL(23082, "\n\n***aborting after invariant() failure\n\n");
+ LOGV2_FATAL_CONTINUE(23082, "\n\n***aborting after invariant() failure\n\n");
std::abort();
}
@@ -134,14 +135,14 @@ MONGO_COMPILER_NOINLINE void invariantOKFailed(const char* expr,
const Status& status,
const char* file,
unsigned line) noexcept {
- LOGV2_FATAL(23083,
- "Invariant failure: {expr} resulted in status {status} at {file} {line}",
- "expr"_attr = expr,
- "status"_attr = redact(status),
- "file"_attr = file,
- "line"_attr = line);
+ LOGV2_FATAL_CONTINUE(23083,
+ "Invariant failure: {expr} resulted in status {status} at {file} {line}",
+ "expr"_attr = expr,
+ "status"_attr = redact(status),
+ "file"_attr = file,
+ "line"_attr = line);
breakpoint();
- LOGV2_FATAL(23084, "\n\n***aborting after invariant() failure\n\n");
+ LOGV2_FATAL_CONTINUE(23084, "\n\n***aborting after invariant() failure\n\n");
std::abort();
}
@@ -150,54 +151,55 @@ MONGO_COMPILER_NOINLINE void invariantOKFailedWithMsg(const char* expr,
const std::string& msg,
const char* file,
unsigned line) noexcept {
- LOGV2_FATAL(23085,
- "Invariant failure: {expr} {msg} resulted in status {status} at {file} {line}",
- "expr"_attr = expr,
- "msg"_attr = msg,
- "status"_attr = redact(status),
- "file"_attr = file,
- "line"_attr = line);
+ LOGV2_FATAL_CONTINUE(
+ 23085,
+ "Invariant failure: {expr} {msg} resulted in status {status} at {file} {line}",
+ "expr"_attr = expr,
+ "msg"_attr = msg,
+ "status"_attr = redact(status),
+ "file"_attr = file,
+ "line"_attr = line);
breakpoint();
- LOGV2_FATAL(23086, "\n\n***aborting after invariant() failure\n\n");
+ LOGV2_FATAL_CONTINUE(23086, "\n\n***aborting after invariant() failure\n\n");
std::abort();
}
MONGO_COMPILER_NOINLINE void invariantStatusOKFailed(const Status& status,
const char* file,
unsigned line) noexcept {
- LOGV2_FATAL(23087,
- "Invariant failure {status} at {file} {line}",
- "status"_attr = redact(status),
- "file"_attr = file,
- "line"_attr = line);
+ LOGV2_FATAL_CONTINUE(23087,
+ "Invariant failure {status} at {file} {line}",
+ "status"_attr = redact(status),
+ "file"_attr = file,
+ "line"_attr = line);
breakpoint();
- LOGV2_FATAL(23088, "\n\n***aborting after invariant() failure\n\n");
+ LOGV2_FATAL_CONTINUE(23088, "\n\n***aborting after invariant() failure\n\n");
std::abort();
}
MONGO_COMPILER_NOINLINE void fassertFailedWithLocation(int msgid,
const char* file,
unsigned line) noexcept {
- LOGV2_FATAL(23089,
- "Fatal Assertion {msgid} at {file} {line}",
- "msgid"_attr = msgid,
- "file"_attr = file,
- "line"_attr = line);
+ LOGV2_FATAL_CONTINUE(23089,
+ "Fatal Assertion {msgid} at {file} {line}",
+ "msgid"_attr = msgid,
+ "file"_attr = file,
+ "line"_attr = line);
breakpoint();
- LOGV2_FATAL(23090, "\n\n***aborting after fassert() failure\n\n");
+ LOGV2_FATAL_CONTINUE(23090, "\n\n***aborting after fassert() failure\n\n");
std::abort();
}
MONGO_COMPILER_NOINLINE void fassertFailedNoTraceWithLocation(int msgid,
const char* file,
unsigned line) noexcept {
- LOGV2_FATAL(23091,
- "Fatal Assertion {msgid} at {file} {line}",
- "msgid"_attr = msgid,
- "file"_attr = file,
- "line"_attr = line);
+ LOGV2_FATAL_CONTINUE(23091,
+ "Fatal Assertion {msgid} at {file} {line}",
+ "msgid"_attr = msgid,
+ "file"_attr = file,
+ "line"_attr = line);
breakpoint();
- LOGV2_FATAL(23092, "\n\n***aborting after fassert() failure\n\n");
+ LOGV2_FATAL_CONTINUE(23092, "\n\n***aborting after fassert() failure\n\n");
quickExit(EXIT_ABRUPT);
}
@@ -205,14 +207,14 @@ MONGO_COMPILER_NORETURN void fassertFailedWithStatusWithLocation(int msgid,
const Status& status,
const char* file,
unsigned line) noexcept {
- LOGV2_FATAL(23093,
- "Fatal assertion {msgid} {status} at {file} {line}",
- "msgid"_attr = msgid,
- "status"_attr = redact(status),
- "file"_attr = file,
- "line"_attr = line);
+ LOGV2_FATAL_CONTINUE(23093,
+ "Fatal assertion {msgid} {status} at {file} {line}",
+ "msgid"_attr = msgid,
+ "status"_attr = redact(status),
+ "file"_attr = file,
+ "line"_attr = line);
breakpoint();
- LOGV2_FATAL(23094, "\n\n***aborting after fassert() failure\n\n");
+ LOGV2_FATAL_CONTINUE(23094, "\n\n***aborting after fassert() failure\n\n");
std::abort();
}
@@ -220,14 +222,14 @@ MONGO_COMPILER_NORETURN void fassertFailedWithStatusNoTraceWithLocation(int msgi
const Status& status,
const char* file,
unsigned line) noexcept {
- LOGV2_FATAL(23095,
- "Fatal assertion {msgid} {status} at {file} {line}",
- "msgid"_attr = msgid,
- "status"_attr = redact(status),
- "file"_attr = file,
- "line"_attr = line);
+ LOGV2_FATAL_CONTINUE(23095,
+ "Fatal assertion {msgid} {status} at {file} {line}",
+ "msgid"_attr = msgid,
+ "status"_attr = redact(status),
+ "file"_attr = file,
+ "line"_attr = line);
breakpoint();
- LOGV2_FATAL(23096, "\n\n***aborting after fassert() failure\n\n");
+ LOGV2_FATAL_CONTINUE(23096, "\n\n***aborting after fassert() failure\n\n");
quickExit(EXIT_ABRUPT);
}
@@ -317,7 +319,7 @@ Status exceptionToStatus() noexcept {
<< boost::diagnostic_information(ex));
} catch (...) {
- LOGV2_FATAL(23097, "Caught unknown exception in exceptionToStatus()");
+ LOGV2_FATAL_CONTINUE(23097, "Caught unknown exception in exceptionToStatus()");
std::terminate();
}
}
diff --git a/src/mongo/util/concurrency/thread_pool.cpp b/src/mongo/util/concurrency/thread_pool.cpp
index 47051d2fb92..e6b38e4b90f 100644
--- a/src/mongo/util/concurrency/thread_pool.cpp
+++ b/src/mongo/util/concurrency/thread_pool.cpp
@@ -63,22 +63,20 @@ ThreadPool::Options cleanUpOptions(ThreadPool::Options&& options) {
options.threadNamePrefix = str::stream() << options.poolName << '-';
}
if (options.maxThreads < 1) {
- LOGV2_FATAL(23114,
+ LOGV2_FATAL(28702,
"Tried to create pool {options_poolName} with a maximum of "
"{options_maxThreads} but the maximum must be at least 1",
"options_poolName"_attr = options.poolName,
"options_maxThreads"_attr = options.maxThreads);
- fassertFailed(28702);
}
if (options.minThreads > options.maxThreads) {
LOGV2_FATAL(
- 23115,
+ 28686,
"Tried to create pool {options_poolName} with a minimum of {options_minThreads} which "
"is more than the configured maximum of {options_maxThreads}",
"options_poolName"_attr = options.poolName,
"options_minThreads"_attr = options.minThreads,
"options_maxThreads"_attr = options.maxThreads);
- fassertFailed(28686);
}
return {std::move(options)};
}
@@ -95,8 +93,7 @@ ThreadPool::~ThreadPool() {
}
if (shutdownComplete != _state) {
- LOGV2_FATAL(23116, "Failed to shutdown pool during destruction");
- fassertFailed(28704);
+ LOGV2_FATAL(28704, "Failed to shutdown pool during destruction");
}
invariant(_threads.empty());
invariant(_pendingTasks.empty());
@@ -105,10 +102,9 @@ ThreadPool::~ThreadPool() {
void ThreadPool::startup() {
stdx::lock_guard<Latch> lk(_mutex);
if (_state != preStart) {
- LOGV2_FATAL(23117,
+ LOGV2_FATAL(28698,
"Attempting to start pool {options_poolName}, but it has already started",
"options_poolName"_attr = _options.poolName);
- fassertFailed(28698);
}
_setState_inlock(running);
invariant(_threads.empty());
@@ -164,10 +160,9 @@ void ThreadPool::_join_inlock(stdx::unique_lock<Latch>* lk) {
return true;
case joining:
case shutdownComplete:
- LOGV2_FATAL(23118,
+ LOGV2_FATAL(28700,
"Attempted to join pool {options_poolName} more than once",
"options_poolName"_attr = _options.poolName);
- fassertFailed(28700);
}
MONGO_UNREACHABLE;
});
@@ -349,13 +344,13 @@ void ThreadPool::_consumeTasks() {
--_numIdleThreads;
if (_state != running) {
- LOGV2_FATAL(23119,
- "State of pool {options_poolName} is {static_cast_int32_t_state}, but expected "
- "{static_cast_int32_t_running}",
- "options_poolName"_attr = _options.poolName,
- "static_cast_int32_t_state"_attr = static_cast<int32_t>(_state),
- "static_cast_int32_t_running"_attr = static_cast<int32_t>(running));
- fassertFailedNoTrace(28701);
+ LOGV2_FATAL_NOTRACE(
+ 28701,
+ "State of pool {options_poolName} is {static_cast_int32_t_state}, but expected "
+ "{static_cast_int32_t_running}",
+ "options_poolName"_attr = _options.poolName,
+ "static_cast_int32_t_state"_attr = static_cast<int32_t>(_state),
+ "static_cast_int32_t_running"_attr = static_cast<int32_t>(running));
}
// This thread is ending because it was idle for too long. Find self in _threads, remove self
@@ -373,11 +368,10 @@ void ThreadPool::_consumeTasks() {
std::ostringstream threadId;
threadId << stdx::this_thread::get_id();
- LOGV2_FATAL(4615600,
- "Could not find this thread, with id {threadId} in pool {pool}",
- "threadId"_attr = threadId.str(),
- "pool"_attr = _options.poolName);
- fassertFailedNoTrace(28703);
+ LOGV2_FATAL_NOTRACE(28703,
+ "Could not find this thread, with id {threadId} in pool {pool}",
+ "threadId"_attr = threadId.str(),
+ "pool"_attr = _options.poolName);
}
void ThreadPool::_doOneTask(stdx::unique_lock<Latch>* lk) noexcept {
diff --git a/src/mongo/util/concurrency/thread_pool_test_common.cpp b/src/mongo/util/concurrency/thread_pool_test_common.cpp
index dcbc875e5cb..4be2d415a77 100644
--- a/src/mongo/util/concurrency/thread_pool_test_common.cpp
+++ b/src/mongo/util/concurrency/thread_pool_test_common.cpp
@@ -78,10 +78,9 @@ public:
TptRegistrationAgent(const std::string& name, ThreadPoolTestCaseFactory makeTest) {
auto& entry = threadPoolTestCaseRegistry()[name];
if (entry) {
- LOGV2_FATAL(23922,
+ LOGV2_FATAL(34355,
"Multiple attempts to register ThreadPoolTest named {name}",
"name"_attr = name);
- fassertFailed(34355);
}
entry = std::move(makeTest);
}
@@ -96,10 +95,9 @@ public:
TptDeathRegistrationAgent(const std::string& name, ThreadPoolTestCaseFactory makeTest) {
auto& entry = threadPoolTestCaseRegistry()[name];
if (entry) {
- LOGV2_FATAL(23923,
+ LOGV2_FATAL(34356,
"Multiple attempts to register ThreadPoolDeathTest named {name}",
"name"_attr = name);
- fassertFailed(34356);
}
entry = [makeTest](ThreadPoolFactory makeThreadPool) {
return std::make_unique<::mongo::unittest::DeathTest<T>>(std::move(makeThreadPool));
diff --git a/src/mongo/util/concurrency/ticketholder.cpp b/src/mongo/util/concurrency/ticketholder.cpp
index d087b215da4..869b895d2d7 100644
--- a/src/mongo/util/concurrency/ticketholder.cpp
+++ b/src/mongo/util/concurrency/ticketholder.cpp
@@ -47,10 +47,9 @@ namespace {
* Accepts an errno code, prints its error message, and exits.
*/
void failWithErrno(int err) {
- LOGV2_FATAL(23121,
+ LOGV2_FATAL(28604,
"error in Ticketholder: {errnoWithDescription_err}",
"errnoWithDescription_err"_attr = errnoWithDescription(err));
- fassertFailed(28604);
}
/*
diff --git a/src/mongo/util/exception_filter_win32.cpp b/src/mongo/util/exception_filter_win32.cpp
index 26efcd8819e..fd0bbb3055c 100644
--- a/src/mongo/util/exception_filter_win32.cpp
+++ b/src/mongo/util/exception_filter_win32.cpp
@@ -144,10 +144,11 @@ LONG WINAPI exceptionFilter(struct _EXCEPTION_POINTERS* excPointers) {
sizeof(addressString),
"0x%p",
excPointers->ExceptionRecord->ExceptionAddress);
- LOGV2_FATAL(23134,
- "*** unhandled exception {exceptionString} at {addressString}, terminating",
- "exceptionString"_attr = exceptionString,
- "addressString"_attr = addressString);
+ LOGV2_FATAL_CONTINUE(
+ 23134,
+ "*** unhandled exception {exceptionString} at {addressString}, terminating",
+ "exceptionString"_attr = exceptionString,
+ "addressString"_attr = addressString);
if (excPointers->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) {
ULONG acType = excPointers->ExceptionRecord->ExceptionInformation[0];
const char* acTypeString;
@@ -169,13 +170,13 @@ LONG WINAPI exceptionFilter(struct _EXCEPTION_POINTERS* excPointers) {
sizeof(addressString),
" 0x%llx",
excPointers->ExceptionRecord->ExceptionInformation[1]);
- LOGV2_FATAL(23135,
- "*** access violation was a {acTypeString}{addressString}",
- "acTypeString"_attr = acTypeString,
- "addressString"_attr = addressString);
+ LOGV2_FATAL_CONTINUE(23135,
+ "*** access violation was a {acTypeString}{addressString}",
+ "acTypeString"_attr = acTypeString,
+ "addressString"_attr = addressString);
}
- LOGV2_FATAL(23136, "*** stack trace for unhandled exception:");
+ LOGV2_FATAL_CONTINUE(23136, "*** stack trace for unhandled exception:");
// Create a copy of context record because printWindowsStackTrace will mutate it.
CONTEXT contextCopy(*(excPointers->ContextRecord));
@@ -186,7 +187,7 @@ LONG WINAPI exceptionFilter(struct _EXCEPTION_POINTERS* excPointers) {
// Don't go through normal shutdown procedure. It may make things worse.
// Do not go through _exit or ExitProcess(), terminate immediately
- LOGV2_FATAL(23137, "*** immediate exit due to unhandled exception");
+ LOGV2_FATAL_CONTINUE(23137, "*** immediate exit due to unhandled exception");
TerminateProcess(GetCurrentProcess(), EXIT_ABRUPT);
// We won't reach here
diff --git a/src/mongo/util/net/ssl_manager_openssl.cpp b/src/mongo/util/net/ssl_manager_openssl.cpp
index ebc43113fc0..5d7ec9831ab 100644
--- a/src/mongo/util/net/ssl_manager_openssl.cpp
+++ b/src/mongo/util/net/ssl_manager_openssl.cpp
@@ -2008,8 +2008,7 @@ bool SSLManagerOpenSSL::_parseAndValidateCertificate(const std::string& keyFile,
}
if ((notBeforeMillis > Date_t::now()) || (Date_t::now() > notAfterMillis)) {
- LOGV2_FATAL(23875, "The provided SSL certificate is expired or not yet valid.");
- fassertFailedNoTrace(28652);
+ LOGV2_FATAL_NOTRACE(28652, "The provided SSL certificate is expired or not yet valid.");
}
*serverCertificateExpirationDate = notAfterMillis;
diff --git a/src/mongo/util/net/ssl_manager_windows.cpp b/src/mongo/util/net/ssl_manager_windows.cpp
index a907c1da7bf..2dc29893f0c 100644
--- a/src/mongo/util/net/ssl_manager_windows.cpp
+++ b/src/mongo/util/net/ssl_manager_windows.cpp
@@ -402,8 +402,7 @@ SSLManagerWindows::SSLManagerWindows(const SSLParams& params, bool isServer)
BOOLEAN enabled = FALSE;
BCryptGetFipsAlgorithmMode(&enabled);
if (!enabled) {
- LOGV2_FATAL(23281, "FIPS modes is not enabled on the operating system.");
- fassertFailedNoTrace(50744);
+ LOGV2_FATAL_NOTRACE(50744, "FIPS modes is not enabled on the operating system.");
}
}
@@ -1598,8 +1597,7 @@ Status SSLManagerWindows::_validateCertificate(PCCERT_CONTEXT cert,
if ((FiletimeToULL(cert->pCertInfo->NotBefore) > currentTimeLong) ||
(currentTimeLong > FiletimeToULL(cert->pCertInfo->NotAfter))) {
- LOGV2_FATAL(23284, "The provided SSL certificate is expired or not yet valid.");
- fassertFailedNoTrace(50755);
+ LOGV2_FATAL_NOTRACE(50755, "The provided SSL certificate is expired or not yet valid.");
}
*serverCertificateExpirationDate =
diff --git a/src/mongo/util/signal_handlers.cpp b/src/mongo/util/signal_handlers.cpp
index 90b8107c13f..7634e0bd473 100644
--- a/src/mongo/util/signal_handlers.cpp
+++ b/src/mongo/util/signal_handlers.cpp
@@ -179,9 +179,9 @@ bool waitForSignal(const sigset_t& sigset, SignalWaitResult* result) {
if (result->sig == -1) {
if (errsv == EINTR)
continue;
- LOGV2_FATAL(23385,
- "sigwaitinfo failed with error:{strerror_errsv}",
- "strerror_errsv"_attr = strerror(errsv));
+ LOGV2_FATAL_CONTINUE(23385,
+ "sigwaitinfo failed with error:{strerror_errsv}",
+ "strerror_errsv"_attr = strerror(errsv));
return false;
}
return true;
@@ -279,10 +279,9 @@ void signalProcessingThread(LogFileStatus rotate) {
errno = 0;
if (int r = pthread_sigmask(SIG_SETMASK, &waitSignals, nullptr); r != 0) {
int errsv = errno;
- LOGV2_FATAL(23386,
+ LOGV2_FATAL(31377,
"pthread_sigmask failed with error:{strerror_errsv}",
"strerror_errsv"_attr = strerror(errsv));
- fassertFailed(31377);
}
#if defined(MONGO_STACKTRACE_CAN_DUMP_ALL_THREADS)
diff --git a/src/mongo/util/signal_handlers_synchronous.cpp b/src/mongo/util/signal_handlers_synchronous.cpp
index ac0df71c6ac..8b54c9ff21e 100644
--- a/src/mongo/util/signal_handlers_synchronous.cpp
+++ b/src/mongo/util/signal_handlers_synchronous.cpp
@@ -251,23 +251,21 @@ void myInvalidParameterHandler(const wchar_t* expression,
const wchar_t* file,
unsigned int line,
uintptr_t pReserved) {
- LOGV2_FATAL(23815,
- "Invalid parameter detected in function {toUtf8String_function} File: "
- "{toUtf8String_file} Line: {line}",
- "toUtf8String_function"_attr = toUtf8String(function),
- "toUtf8String_file"_attr = toUtf8String(file),
- "line"_attr = line);
- LOGV2_FATAL(23816,
- "Expression: {toUtf8String_expression}",
- "toUtf8String_expression"_attr = toUtf8String(expression));
- LOGV2_FATAL(23817, "immediate exit due to invalid parameter");
+ LOGV2_FATAL_CONTINUE(
+ 23815,
+ "Invalid parameter detected in function {function} File: "
+ "{file} Line: {line} Expression: {expression}. Immediate exit due to invalid parameter",
+ "function"_attr = toUtf8String(function),
+ "file"_attr = toUtf8String(file),
+ "line"_attr = line,
+ "expression"_attr = toUtf8String(expression));
abruptQuit(SIGABRT);
}
void myPureCallHandler() {
- LOGV2_FATAL(23818, "Pure call handler invoked");
- LOGV2_FATAL(23819, "immediate exit due to invalid pure call");
+ LOGV2_FATAL_CONTINUE(23818,
+ "Pure call handler invoked. Immediate exit due to invalid pure call");
abruptQuit(SIGABRT);
}
@@ -337,7 +335,7 @@ void setupSynchronousSignalHandlers() {
if (sigaction(spec.signal, &sa, nullptr) != 0) {
int savedErr = errno;
LOGV2_FATAL(
- 23820,
+ 31334,
"{format_FMT_STRING_Failed_to_install_signal_handler_for_signal_with_sigaction_"
"spec_signal_strerror_savedErr}",
"format_FMT_STRING_Failed_to_install_signal_handler_for_signal_with_sigaction_spec_signal_strerror_savedErr"_attr =
@@ -345,7 +343,6 @@ void setupSynchronousSignalHandlers() {
"Failed to install signal handler for signal {} with sigaction: {}"),
spec.signal,
strerror(savedErr)));
- fassertFailed(31334);
}
}
setupSIGTRAPforDebugger();
diff --git a/src/mongo/util/stacktrace_threads.cpp b/src/mongo/util/stacktrace_threads.cpp
index 7c36e68f592..d2d8e38539e 100644
--- a/src/mongo/util/stacktrace_threads.cpp
+++ b/src/mongo/util/stacktrace_threads.cpp
@@ -644,13 +644,12 @@ void initialize(int signal) {
if (sigaction(signal, &sa, nullptr) != 0) {
int savedErr = errno;
LOGV2_FATAL(
- 23397,
+ 31376,
"{format_FMT_STRING_Failed_to_install_sigaction_for_signal_signal_strerror_savedErr}",
"format_FMT_STRING_Failed_to_install_sigaction_for_signal_signal_strerror_savedErr"_attr =
format(FMT_STRING("Failed to install sigaction for signal {} ({})"),
signal,
strerror(savedErr)));
- fassertFailed(31376);
}
}
diff --git a/src/mongo/util/version.cpp b/src/mongo/util/version.cpp
index 79b6fb3432f..e90b1d4d4a1 100644
--- a/src/mongo/util/version.cpp
+++ b/src/mongo/util/version.cpp
@@ -124,8 +124,7 @@ const VersionInfoInterface& VersionInfoInterface::instance(NotEnabledAction acti
return fallbackVersionInfo;
}
- LOGV2_FATAL(23405, "Terminating because valid version info has not been configured");
- fassertFailed(40278);
+ LOGV2_FATAL(40278, "Terminating because valid version info has not been configured");
}
std::string VersionInfoInterface::makeVersionString(StringData binaryName) const {
diff --git a/src/mongo/util/version_impl.cpp b/src/mongo/util/version_impl.cpp
index 9273da65b47..f20bc23f006 100644
--- a/src/mongo/util/version_impl.cpp
+++ b/src/mongo/util/version_impl.cpp
@@ -88,8 +88,7 @@ public:
#error This targeted Windows version is not supported
#endif // NTDDI_VERSION
#else
- LOGV2_FATAL(23868, "VersionInfoInterface::targetMinOS is only available for Windows");
- fassertFailed(40277);
+ LOGV2_FATAL(40277, "VersionInfoInterface::targetMinOS is only available for Windows");
#endif
}
diff --git a/src/mongo/watchdog/watchdog.cpp b/src/mongo/watchdog/watchdog.cpp
index 401388c0d3d..3a98041bae0 100644
--- a/src/mongo/watchdog/watchdog.cpp
+++ b/src/mongo/watchdog/watchdog.cpp
@@ -160,9 +160,10 @@ void WatchdogPeriodicThread::doLoop() {
} catch (const DBException& e) {
// The only bad status is when we are in shutdown
if (!opCtx->getServiceContext()->getKillAllOperations()) {
- LOGV2_FATAL(23415,
- "Watchdog was interrupted, shutting down, reason: {e_toStatus}",
- "e_toStatus"_attr = e.toStatus());
+ LOGV2_FATAL_CONTINUE(
+ 23415,
+ "Watchdog was interrupted, shutting down, reason: {e_toStatus}",
+ "e_toStatus"_attr = e.toStatus());
exitCleanly(ExitCode::EXIT_ABRUPT);
}
@@ -351,18 +352,18 @@ void checkFile(OperationContext* opCtx, const boost::filesystem::path& file) {
NULL);
if (hFile == INVALID_HANDLE_VALUE) {
std::uint32_t gle = ::GetLastError();
- LOGV2_FATAL(
- 23416,
- "CreateFile failed for '{file_generic_string}' with error: {errnoWithDescription_gle}",
- "file_generic_string"_attr = file.generic_string(),
- "errnoWithDescription_gle"_attr = errnoWithDescription(gle));
+ LOGV2_FATAL_CONTINUE(23416,
+ "CreateFile failed for '{file_generic_string}' with error: "
+ "{errnoWithDescription_gle}",
+ "file_generic_string"_attr = file.generic_string(),
+ "errnoWithDescription_gle"_attr = errnoWithDescription(gle));
fassertNoTrace(4074, gle == 0);
}
DWORD bytesWrittenTotal;
if (!WriteFile(hFile, nowStr.c_str(), nowStr.size(), &bytesWrittenTotal, NULL)) {
std::uint32_t gle = ::GetLastError();
- LOGV2_FATAL(
+ LOGV2_FATAL_CONTINUE(
23417,
"WriteFile failed for '{file_generic_string}' with error: {errnoWithDescription_gle}",
"file_generic_string"_attr = file.generic_string(),
@@ -381,22 +382,22 @@ void checkFile(OperationContext* opCtx, const boost::filesystem::path& file) {
if (!FlushFileBuffers(hFile)) {
std::uint32_t gle = ::GetLastError();
- LOGV2_FATAL(23418,
- "FlushFileBuffers failed for '{file_generic_string}' with error: "
- "{errnoWithDescription_gle}",
- "file_generic_string"_attr = file.generic_string(),
- "errnoWithDescription_gle"_attr = errnoWithDescription(gle));
+ LOGV2_FATAL_CONTINUE(23418,
+ "FlushFileBuffers failed for '{file_generic_string}' with error: "
+ "{errnoWithDescription_gle}",
+ "file_generic_string"_attr = file.generic_string(),
+ "errnoWithDescription_gle"_attr = errnoWithDescription(gle));
fassertNoTrace(4076, gle == 0);
}
DWORD newOffset = SetFilePointer(hFile, 0, 0, FILE_BEGIN);
if (newOffset != 0) {
std::uint32_t gle = ::GetLastError();
- LOGV2_FATAL(23419,
- "SetFilePointer failed for '{file_generic_string}' with error: "
- "{errnoWithDescription_gle}",
- "file_generic_string"_attr = file.generic_string(),
- "errnoWithDescription_gle"_attr = errnoWithDescription(gle));
+ LOGV2_FATAL_CONTINUE(23419,
+ "SetFilePointer failed for '{file_generic_string}' with error: "
+ "{errnoWithDescription_gle}",
+ "file_generic_string"_attr = file.generic_string(),
+ "errnoWithDescription_gle"_attr = errnoWithDescription(gle));
fassertNoTrace(4077, gle == 0);
}
@@ -404,42 +405,41 @@ void checkFile(OperationContext* opCtx, const boost::filesystem::path& file) {
auto readBuffer = std::make_unique<char[]>(nowStr.size());
if (!ReadFile(hFile, readBuffer.get(), nowStr.size(), &bytesRead, NULL)) {
std::uint32_t gle = ::GetLastError();
- LOGV2_FATAL(23420,
- "ReadFile failed for '{file_generic_string}' with error: "
- "{errnoWithDescription_gle}",
- "file_generic_string"_attr = file.generic_string(),
- "errnoWithDescription_gle"_attr = errnoWithDescription(gle));
+ LOGV2_FATAL_CONTINUE(23420,
+ "ReadFile failed for '{file_generic_string}' with error: "
+ "{errnoWithDescription_gle}",
+ "file_generic_string"_attr = file.generic_string(),
+ "errnoWithDescription_gle"_attr = errnoWithDescription(gle));
fassertNoTrace(4078, gle == 0);
}
if (bytesRead != bytesWrittenTotal) {
- LOGV2_FATAL(23421,
- "Read wrong number of bytes for '{file_generic_string}' expected "
- "{bytesWrittenTotal} bytes but read {bytesRead} bytes",
- "file_generic_string"_attr = file.generic_string(),
- "bytesWrittenTotal"_attr = bytesWrittenTotal,
- "bytesRead"_attr = bytesRead);
- fassertNoTrace(50724, false);
+ LOGV2_FATAL_NOTRACE(50724,
+ "Read wrong number of bytes for '{file_generic_string}' expected "
+ "{bytesWrittenTotal} bytes but read {bytesRead} bytes",
+ "file_generic_string"_attr = file.generic_string(),
+ "bytesWrittenTotal"_attr = bytesWrittenTotal,
+ "bytesRead"_attr = bytesRead);
}
if (memcmp(nowStr.c_str(), readBuffer.get(), nowStr.size()) != 0) {
- LOGV2_FATAL(23422,
- "Read wrong string from file '{file_generic_string}{nowStr_size} bytes (in "
- "hex) '{toHexLower_nowStr_c_str_nowStr_size}' but read bytes "
- "'{toHexLower_readBuffer_get_bytesRead}'",
- "file_generic_string"_attr = file.generic_string(),
- "nowStr_size"_attr = nowStr.size(),
- "toHexLower_nowStr_c_str_nowStr_size"_attr =
- toHexLower(nowStr.c_str(), nowStr.size()),
- "toHexLower_readBuffer_get_bytesRead"_attr =
- toHexLower(readBuffer.get(), bytesRead));
- fassertNoTrace(50717, false);
+ LOGV2_FATAL_NOTRACE(
+ 50717,
+ "Read wrong string from file '{file_generic_string}{nowStr_size} bytes (in "
+ "hex) '{toHexLower_nowStr_c_str_nowStr_size}' but read bytes "
+ "'{toHexLower_readBuffer_get_bytesRead}'",
+ "file_generic_string"_attr = file.generic_string(),
+ "nowStr_size"_attr = nowStr.size(),
+ "toHexLower_nowStr_c_str_nowStr_size"_attr =
+ toHexLower(nowStr.c_str(), nowStr.size()),
+ "toHexLower_readBuffer_get_bytesRead"_attr =
+ toHexLower(readBuffer.get(), bytesRead));
}
}
if (!CloseHandle(hFile)) {
std::uint32_t gle = ::GetLastError();
- LOGV2_FATAL(
+ LOGV2_FATAL_CONTINUE(
23423,
"CloseHandle failed for '{file_generic_string}' with error: {errnoWithDescription_gle}",
"file_generic_string"_attr = file.generic_string(),
@@ -468,7 +468,7 @@ void checkFile(OperationContext* opCtx, const boost::filesystem::path& file) {
int fd = open(file.generic_string().c_str(), O_RDWR | O_CREAT, S_IRUSR | S_IWUSR);
if (fd == -1) {
auto err = errno;
- LOGV2_FATAL(
+ LOGV2_FATAL_CONTINUE(
23424,
"open failed for '{file_generic_string}' with error: {errnoWithDescription_err}",
"file_generic_string"_attr = file.generic_string(),
@@ -486,7 +486,7 @@ void checkFile(OperationContext* opCtx, const boost::filesystem::path& file) {
continue;
}
- LOGV2_FATAL(
+ LOGV2_FATAL_CONTINUE(
23425,
"write failed for '{file_generic_string}' with error: {errnoWithDescription_err}",
"file_generic_string"_attr = file.generic_string(),
@@ -509,7 +509,7 @@ void checkFile(OperationContext* opCtx, const boost::filesystem::path& file) {
if (fsync(fd)) {
auto err = errno;
- LOGV2_FATAL(
+ LOGV2_FATAL_CONTINUE(
23426,
"fsync failed for '{file_generic_string}' with error: {errnoWithDescription_err}",
"file_generic_string"_attr = file.generic_string(),
@@ -528,17 +528,17 @@ void checkFile(OperationContext* opCtx, const boost::filesystem::path& file) {
continue;
}
- LOGV2_FATAL(
+ LOGV2_FATAL_CONTINUE(
23427,
"read failed for '{file_generic_string}' with error: {errnoWithDescription_err}",
"file_generic_string"_attr = file.generic_string(),
"errnoWithDescription_err"_attr = errnoWithDescription(err));
fassertNoTrace(4083, err == 0);
} else if (bytesReadInRead == 0) {
- LOGV2_FATAL(23428,
- "read failed for '{file_generic_string}' with unexpected end of file",
- "file_generic_string"_attr = file.generic_string());
- fassertNoTrace(50719, false);
+ LOGV2_FATAL_NOTRACE(
+ 50719,
+ "read failed for '{file_generic_string}' with unexpected end of file",
+ "file_generic_string"_attr = file.generic_string());
}
// Warn if the read was incomplete
@@ -555,22 +555,21 @@ void checkFile(OperationContext* opCtx, const boost::filesystem::path& file) {
}
if (memcmp(nowStr.c_str(), readBuffer.get(), nowStr.size()) != 0) {
- LOGV2_FATAL(23429,
- "Read wrong string from file '{file_generic_string}' expected {nowStr_size} "
- "bytes (in hex) '{toHexLower_nowStr_c_str_nowStr_size}' but read bytes "
- "'{toHexLower_readBuffer_get_bytesReadTotal}'",
- "file_generic_string"_attr = file.generic_string(),
- "nowStr_size"_attr = nowStr.size(),
- "toHexLower_nowStr_c_str_nowStr_size"_attr =
- toHexLower(nowStr.c_str(), nowStr.size()),
- "toHexLower_readBuffer_get_bytesReadTotal"_attr =
- toHexLower(readBuffer.get(), bytesReadTotal));
- fassertNoTrace(50718, false);
+ LOGV2_FATAL_NOTRACE(
+ 50718,
+ "Read wrong string from file '{file_generic_string}' expected {nowStr_size} "
+ "bytes (in hex) '{toHexLower_nowStr_c_str_nowStr_size}' but read bytes "
+ "'{toHexLower_readBuffer_get_bytesReadTotal}'",
+ "file_generic_string"_attr = file.generic_string(),
+ "nowStr_size"_attr = nowStr.size(),
+ "toHexLower_nowStr_c_str_nowStr_size"_attr = toHexLower(nowStr.c_str(), nowStr.size()),
+ "toHexLower_readBuffer_get_bytesReadTotal"_attr =
+ toHexLower(readBuffer.get(), bytesReadTotal));
}
if (close(fd)) {
auto err = errno;
- LOGV2_FATAL(
+ LOGV2_FATAL_CONTINUE(
23430,
"close failed for '{file_generic_string}' with error: {errnoWithDescription_err}",
"file_generic_string"_attr = file.generic_string(),