summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
authorAmirsaman Memaripour <amirsaman.memaripour@mongodb.com>2020-06-29 23:08:09 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-07-01 16:22:45 +0000
commitd4e7ea57599b44353b5393afedee8ae5670837b3 (patch)
tree2c0717c768faa292590bd6018d7db7038129a23e /src/mongo/db
parent5ffbd8f8322651b4953f29da0cde9e31eab039d4 (diff)
downloadmongo-d4e7ea57599b44353b5393afedee8ae5670837b3.tar.gz
SERVER-49151 Fix malformed LOGV2 and assertion IDs
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/auth/sasl_mechanism_registry.cpp2
-rw-r--r--src/mongo/db/catalog/index_catalog_entry_impl.cpp2
-rw-r--r--src/mongo/db/catalog/index_consistency.cpp4
-rw-r--r--src/mongo/db/catalog/validate_adaptor.cpp2
-rw-r--r--src/mongo/db/commands/killoperations_common.h2
-rw-r--r--src/mongo/db/query/sbe_stage_builder_index_scan.cpp4
-rw-r--r--src/mongo/db/repl/repl_set_config.cpp2
-rw-r--r--src/mongo/db/repl/replication_consistency_markers_impl.cpp2
-rw-r--r--src/mongo/db/repl/rollback_impl_test.cpp2
-rw-r--r--src/mongo/db/repl/rollback_test_fixture.h2
-rw-r--r--src/mongo/db/repl/storage_interface_impl.cpp2
-rw-r--r--src/mongo/db/s/collection_sharding_runtime.cpp2
-rw-r--r--src/mongo/db/s/migration_util.cpp16
-rw-r--r--src/mongo/db/startup_warnings_common.cpp8
-rw-r--r--src/mongo/db/storage/control/journal_flusher.cpp4
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp22
-rw-r--r--src/mongo/db/ttl.cpp4
17 files changed, 41 insertions, 41 deletions
diff --git a/src/mongo/db/auth/sasl_mechanism_registry.cpp b/src/mongo/db/auth/sasl_mechanism_registry.cpp
index b2c47eed923..667fa7a811e 100644
--- a/src/mongo/db/auth/sasl_mechanism_registry.cpp
+++ b/src/mongo/db/auth/sasl_mechanism_registry.cpp
@@ -189,7 +189,7 @@ ServiceContext::ConstructorActionRegisterer SASLServerMechanismRegistryValidatio
for (const auto& mech : saslGlobalParams.authenticationMechanisms) {
auto it = std::find(supportedMechanisms.cbegin(), supportedMechanisms.cend(), mech);
if (it == supportedMechanisms.end()) {
- LOGV2_ERROR(47429001,
+ LOGV2_ERROR(4742901,
"SASL Mechanism '{mechanism}' is not supported",
"Unsupported SASL mechanism",
"mechanism"_attr = mech);
diff --git a/src/mongo/db/catalog/index_catalog_entry_impl.cpp b/src/mongo/db/catalog/index_catalog_entry_impl.cpp
index 5bf30200176..4b02e7b26e5 100644
--- a/src/mongo/db/catalog/index_catalog_entry_impl.cpp
+++ b/src/mongo/db/catalog/index_catalog_entry_impl.cpp
@@ -353,7 +353,7 @@ void IndexCatalogEntryImpl::_catalogSetMultikey(OperationContext* opCtx,
}
}
if (indexMetadataHasChanged && _queryInfo) {
- LOGV2_DEBUG(47187005,
+ LOGV2_DEBUG(4718705,
1,
"Index set to multi key, clearing query plan cache",
"namespace"_attr = collection->ns(),
diff --git a/src/mongo/db/catalog/index_consistency.cpp b/src/mongo/db/catalog/index_consistency.cpp
index d910669bf29..07289adf3a9 100644
--- a/src/mongo/db/catalog/index_consistency.cpp
+++ b/src/mongo/db/catalog/index_consistency.cpp
@@ -213,7 +213,7 @@ void IndexConsistency::addDocKey(OperationContext* opCtx,
indexInfo->numRecords++;
if (MONGO_unlikely(_validateState->extraLoggingForTest())) {
- LOGV2(46666002, "[validate](record) {hash_num}", "hash_num"_attr = hash);
+ LOGV2(4666602, "[validate](record) {hash_num}", "hash_num"_attr = hash);
const BSONObj& keyPatternBson = indexInfo->keyPattern;
auto keyStringBson = KeyString::toBsonSafe(
ks.getBuffer(), ks.getSize(), indexInfo->ord, ks.getTypeBits());
@@ -257,7 +257,7 @@ void IndexConsistency::addIndexKey(const KeyString::Value& ks,
indexInfo->numKeys++;
if (MONGO_unlikely(_validateState->extraLoggingForTest())) {
- LOGV2(46666003, "[validate](index) {hash_num}", "hash_num"_attr = hash);
+ LOGV2(4666603, "[validate](index) {hash_num}", "hash_num"_attr = hash);
const BSONObj& keyPatternBson = indexInfo->keyPattern;
auto keyStringBson = KeyString::toBsonSafe(
ks.getBuffer(), ks.getSize(), indexInfo->ord, ks.getTypeBits());
diff --git a/src/mongo/db/catalog/validate_adaptor.cpp b/src/mongo/db/catalog/validate_adaptor.cpp
index c4c2779ea88..145ffbcb982 100644
--- a/src/mongo/db/catalog/validate_adaptor.cpp
+++ b/src/mongo/db/catalog/validate_adaptor.cpp
@@ -75,7 +75,7 @@ Status ValidateAdaptor::validateRecord(OperationContext* opCtx,
}
if (MONGO_unlikely(_validateState->extraLoggingForTest())) {
- LOGV2(46666001, "[validate]", "recordId"_attr = recordId, "recordData"_attr = recordBson);
+ LOGV2(4666601, "[validate]", "recordId"_attr = recordId, "recordData"_attr = recordBson);
}
const Status status = validateBSON(recordBson.objdata(), recordBson.objsize());
diff --git a/src/mongo/db/commands/killoperations_common.h b/src/mongo/db/commands/killoperations_common.h
index faf0f7bea5c..6d18fe8e5a7 100644
--- a/src/mongo/db/commands/killoperations_common.h
+++ b/src/mongo/db/commands/killoperations_common.h
@@ -64,7 +64,7 @@ public:
auto opKeys = Base::request().getOperationKeys();
for (auto& opKey : opKeys) {
- LOGV2(46156011, "Attempting to kill operation", "operationKey"_attr = opKey);
+ LOGV2(4615602, "Attempting to kill operation", "operationKey"_attr = opKey);
opKiller.killOperation(OperationKey(opKey));
}
Derived::killCursors(opCtx, opKeys);
diff --git a/src/mongo/db/query/sbe_stage_builder_index_scan.cpp b/src/mongo/db/query/sbe_stage_builder_index_scan.cpp
index 3b7085e9e6a..d1d7959710b 100644
--- a/src/mongo/db/query/sbe_stage_builder_index_scan.cpp
+++ b/src/mongo/db/query/sbe_stage_builder_index_scan.cpp
@@ -212,11 +212,11 @@ makeIntervalsFromIndexBounds(const IndexBounds& bounds,
}();
LOGV2_DEBUG(
- 47429005, 5, "Number of generated interval(s) for ixscan", "num"_attr = intervals.size());
+ 4742905, 5, "Number of generated interval(s) for ixscan", "num"_attr = intervals.size());
std::vector<std::pair<std::unique_ptr<KeyString::Value>, std::unique_ptr<KeyString::Value>>>
result;
for (auto&& [lowKey, highKey] : intervals) {
- LOGV2_DEBUG(47429006,
+ LOGV2_DEBUG(4742906,
5,
"Generated interval [lowKey, highKey]",
"lowKey"_attr = lowKey,
diff --git a/src/mongo/db/repl/repl_set_config.cpp b/src/mongo/db/repl/repl_set_config.cpp
index c768cedd634..114fd84df18 100644
--- a/src/mongo/db/repl/repl_set_config.cpp
+++ b/src/mongo/db/repl/repl_set_config.cpp
@@ -538,7 +538,7 @@ void ReplSetConfig::_addInternalWriteConcernModes() {
} else if (status != ErrorCodes::NoSuchKey) {
// NoSuchKey means we have no $voter-tagged nodes in this config;
// other errors are unexpected.
- fassert(46712003, status);
+ fassert(4671203, status);
}
// $stepDownCheck: one electable node plus ourselves
diff --git a/src/mongo/db/repl/replication_consistency_markers_impl.cpp b/src/mongo/db/repl/replication_consistency_markers_impl.cpp
index df7fd8b19e8..a4d24555bf9 100644
--- a/src/mongo/db/repl/replication_consistency_markers_impl.cpp
+++ b/src/mongo/db/repl/replication_consistency_markers_impl.cpp
@@ -515,7 +515,7 @@ ReplicationConsistencyMarkersImpl::refreshOplogTruncateAfterPointIfPrimary(
invariant(truncateOplogEntryBSON, "Found no oplog entry lte " + truncateTimestamp.toString());
return fassert(
- 44555001,
+ 4455501,
OpTimeAndWallTime::parseOpTimeAndWallTimeFromOplogEntry(truncateOplogEntryBSON.get()));
}
diff --git a/src/mongo/db/repl/rollback_impl_test.cpp b/src/mongo/db/repl/rollback_impl_test.cpp
index 379fed5007f..29f3fc25984 100644
--- a/src/mongo/db/repl/rollback_impl_test.cpp
+++ b/src/mongo/db/repl/rollback_impl_test.cpp
@@ -665,7 +665,7 @@ TEST_F(RollbackImplTest, RollbackCallsRecoverToStableTimestamp) {
DEATH_TEST_REGEX_F(RollbackImplTest,
RollbackFassertsIfRecoverToStableTimestampFails,
- "Fatal assertion.*45847000") {
+ "Fatal assertion.*4584700") {
auto op = makeOpAndRecordId(1);
_remoteOplog->setOperations({op});
ASSERT_OK(_insertOplogEntry(op.first));
diff --git a/src/mongo/db/repl/rollback_test_fixture.h b/src/mongo/db/repl/rollback_test_fixture.h
index c421d17fa23..4754f7dcf26 100644
--- a/src/mongo/db/repl/rollback_test_fixture.h
+++ b/src/mongo/db/repl/rollback_test_fixture.h
@@ -158,7 +158,7 @@ public:
Timestamp recoverToStableTimestamp(OperationContext* opCtx) override {
stdx::lock_guard<Latch> lock(_mutex);
if (_recoverToTimestampStatus) {
- fassert(45847000, _recoverToTimestampStatus.get());
+ fassert(4584700, _recoverToTimestampStatus.get());
}
_currTimestamp = _stableTimestamp;
diff --git a/src/mongo/db/repl/storage_interface_impl.cpp b/src/mongo/db/repl/storage_interface_impl.cpp
index 88546bd626d..47c0a5a7190 100644
--- a/src/mongo/db/repl/storage_interface_impl.cpp
+++ b/src/mongo/db/repl/storage_interface_impl.cpp
@@ -1119,7 +1119,7 @@ boost::optional<BSONObj> StorageInterfaceImpl::findOplogEntryLessThanOrEqualToTi
// This will log a message about the conflict initially and then every 5 seconds, with
// the current rather arbitrary settings.
if (retries % 10 == 0) {
- LOGV2(47959000,
+ LOGV2(4795900,
"Reading the oplog collection conflicts with a validate cmd. Continuing to "
"retry.",
"retries"_attr = retries);
diff --git a/src/mongo/db/s/collection_sharding_runtime.cpp b/src/mongo/db/s/collection_sharding_runtime.cpp
index 438898ce31f..dd1dacb099b 100644
--- a/src/mongo/db/s/collection_sharding_runtime.cpp
+++ b/src/mongo/db/s/collection_sharding_runtime.cpp
@@ -205,7 +205,7 @@ void CollectionShardingRuntime::setFilteringMetadata(OperationContext* opCtx,
void CollectionShardingRuntime::clearFilteringMetadata() {
stdx::lock_guard lk(_metadataManagerLock);
if (!isNamespaceAlwaysUnsharded(_nss)) {
- LOGV2_DEBUG(47985030,
+ LOGV2_DEBUG(4798530,
1,
"Clearing metadata for collection {namespace}",
"Clearing collection metadata",
diff --git a/src/mongo/db/s/migration_util.cpp b/src/mongo/db/s/migration_util.cpp
index 2fc106918cb..9a2a891f39d 100644
--- a/src/mongo/db/s/migration_util.cpp
+++ b/src/mongo/db/s/migration_util.cpp
@@ -787,7 +787,7 @@ void ensureChunkVersionIsGreaterThan(OperationContext* opCtx,
}
void resumeMigrationCoordinationsOnStepUp(OperationContext* opCtx) {
- LOGV2_DEBUG(47985010, 2, "Starting migration coordinator stepup recovery");
+ LOGV2_DEBUG(4798510, 2, "Starting migration coordinator stepup recovery");
unsigned long long unfinishedMigrationsCount = 0;
PersistentTaskStore<MigrationCoordinatorDocument> store(
@@ -797,7 +797,7 @@ void resumeMigrationCoordinationsOnStepUp(OperationContext* opCtx) {
query,
[&opCtx, &unfinishedMigrationsCount](const MigrationCoordinatorDocument& doc) {
unfinishedMigrationsCount++;
- LOGV2_DEBUG(47985011,
+ LOGV2_DEBUG(4798511,
3,
"Found unfinished migration on step-up",
"migrationCoordinatorDoc"_attr = redact(doc.toBSON()),
@@ -826,7 +826,7 @@ void resumeMigrationCoordinationsOnStepUp(OperationContext* opCtx) {
opCtx.get(), nss, boost::none /* shardVersionReceived */);
})
.onError([](const Status& status) {
- LOGV2_WARNING(47985012,
+ LOGV2_WARNING(4798512,
"Error on deferred shardVersion recovery execution",
"error"_attr = redact(status));
})
@@ -837,14 +837,14 @@ void resumeMigrationCoordinationsOnStepUp(OperationContext* opCtx) {
ShardingStatistics::get(opCtx).unfinishedMigrationFromPreviousPrimary.store(
unfinishedMigrationsCount);
- LOGV2_DEBUG(47985013,
+ LOGV2_DEBUG(4798513,
2,
"Finished migration coordinator stepup recovery",
"unfinishedMigrationsCount"_attr = unfinishedMigrationsCount);
}
void recoverMigrationCoordinations(OperationContext* opCtx, NamespaceString nss) {
- LOGV2_DEBUG(47985001, 2, "Starting migration recovery", "namespace"_attr = nss);
+ LOGV2_DEBUG(4798501, 2, "Starting migration recovery", "namespace"_attr = nss);
unsigned migrationRecoveryCount = 0;
PersistentTaskStore<MigrationCoordinatorDocument> store(
@@ -853,7 +853,7 @@ void recoverMigrationCoordinations(OperationContext* opCtx, NamespaceString nss)
opCtx,
QUERY(MigrationCoordinatorDocument::kNssFieldName << nss.toString()),
[&opCtx, &migrationRecoveryCount](const MigrationCoordinatorDocument& doc) {
- LOGV2_DEBUG(47985002,
+ LOGV2_DEBUG(4798502,
2,
"Recovering migration",
"migrationCoordinatorDocument"_attr = redact(doc.toBSON()));
@@ -912,7 +912,7 @@ void recoverMigrationCoordinations(OperationContext* opCtx, NamespaceString nss)
if (!currentMetadata.isSharded() ||
!currentMetadata.uuidMatches(doc.getCollectionUuid())) {
if (!currentMetadata.isSharded()) {
- LOGV2(47985003,
+ LOGV2(4798503,
"During migration recovery the collection was discovered to have been "
"dropped."
"Deleting the range deletion tasks on the donor and the recipient "
@@ -920,7 +920,7 @@ void recoverMigrationCoordinations(OperationContext* opCtx, NamespaceString nss)
"migrationCoordinatorDocument"_attr = redact(doc.toBSON()));
} else {
// UUID don't match
- LOGV2(47985004,
+ LOGV2(4798504,
"During migration recovery the collection was discovered to have been "
"dropped and recreated. Collection has a UUID that "
"does not match the one in the migration coordinator "
diff --git a/src/mongo/db/startup_warnings_common.cpp b/src/mongo/db/startup_warnings_common.cpp
index 3951a9e17d6..aa7dafc5cf8 100644
--- a/src/mongo/db/startup_warnings_common.cpp
+++ b/src/mongo/db/startup_warnings_common.cpp
@@ -51,7 +51,7 @@ bool CheckPrivilegeEnabled(const wchar_t* name) {
LUID luid;
if (!LookupPrivilegeValueW(nullptr, name, &luid)) {
auto str = errnoWithPrefix("Failed to LookupPrivilegeValue");
- LOGV2_WARNING(47187001, "{str}", "str"_attr = str);
+ LOGV2_WARNING(4718701, "{str}", "str"_attr = str);
return false;
}
@@ -59,7 +59,7 @@ bool CheckPrivilegeEnabled(const wchar_t* name) {
HANDLE accessToken;
if (!OpenProcessToken(GetCurrentProcess(), TOKEN_QUERY, &accessToken)) {
auto str = errnoWithPrefix("Failed to OpenProcessToken");
- LOGV2_WARNING(47187002, "{str}", "str"_attr = str);
+ LOGV2_WARNING(4718702, "{str}", "str"_attr = str);
return false;
}
@@ -75,7 +75,7 @@ bool CheckPrivilegeEnabled(const wchar_t* name) {
if (!PrivilegeCheck(accessToken, &privileges, &ret)) {
auto str = errnoWithPrefix("Failed to PrivilegeCheck");
- LOGV2_WARNING(47187003, "{str}", "str"_attr = str);
+ LOGV2_WARNING(4718703, "{str}", "str"_attr = str);
return false;
}
@@ -168,7 +168,7 @@ void logCommonStartupWarnings(const ServerGlobalParams& serverParams) {
#ifdef _WIN32
if (!CheckPrivilegeEnabled(SE_INC_WORKING_SET_NAME)) {
LOGV2_OPTIONS(
- 47187004,
+ 4718704,
{logv2::LogTag::kStartupWarnings},
"SeIncreaseWorkingSetPrivilege privilege is not granted to the process. Secure memory "
"allocation for SCRAM and/or Encrypted Storage Engine may fail.");
diff --git a/src/mongo/db/storage/control/journal_flusher.cpp b/src/mongo/db/storage/control/journal_flusher.cpp
index 1252ac9a4ea..c5e8629b055 100644
--- a/src/mongo/db/storage/control/journal_flusher.cpp
+++ b/src/mongo/db/storage/control/journal_flusher.cpp
@@ -75,7 +75,7 @@ void JournalFlusher::set(ServiceContext* serviceCtx, std::unique_ptr<JournalFlus
void JournalFlusher::run() {
ThreadClient tc(name(), getGlobalServiceContext());
- LOGV2_DEBUG(45847001, 1, "starting {name} thread", "name"_attr = name());
+ LOGV2_DEBUG(4584701, 1, "starting {name} thread", "name"_attr = name());
// Initialize the thread's opCtx.
_uniqueCtx.emplace(tc->makeOperationContext());
@@ -131,7 +131,7 @@ void JournalFlusher::run() {
_flushJournalNow = false;
if (_shuttingDown) {
- LOGV2_DEBUG(45847002, 1, "stopping {name} thread", "name"_attr = name());
+ LOGV2_DEBUG(4584702, 1, "stopping {name} thread", "name"_attr = name());
_nextSharedPromise->setError(
Status(ErrorCodes::ShutdownInProgress, "The storage catalog is being closed."));
stdx::lock_guard<Latch> lk(_opCtxMutex);
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
index afba93255b2..3fa21ec4f8d 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
@@ -647,7 +647,7 @@ WiredTigerKVEngine::WiredTigerKVEngine(const std::string& canonicalName,
"config"_attr = config);
int ret = wiredtiger_open(
path.c_str(), _eventHandler.getWtEventHandler(), config.c_str(), &_conn);
- LOGV2(47959011, "Recovery complete", "duration"_attr = Date_t::now() - start);
+ LOGV2(4795911, "Recovery complete", "duration"_attr = Date_t::now() - start);
if (ret == EINVAL) {
fassertFailedNoTrace(28717);
} else if (ret != 0) {
@@ -656,7 +656,7 @@ WiredTigerKVEngine::WiredTigerKVEngine(const std::string& canonicalName,
}
start = Date_t::now();
invariantWTOK(_conn->close(_conn, nullptr));
- LOGV2(47959010,
+ LOGV2(4795910,
"WiredTiger closed. Removing journal files",
"duration"_attr = Date_t::now() - start);
// After successful recovery, remove the journal directory.
@@ -672,7 +672,7 @@ WiredTigerKVEngine::WiredTigerKVEngine(const std::string& canonicalName,
"duration"_attr = Date_t::now() - start);
throw;
}
- LOGV2(47959008, "Journal files removed", "duration"_attr = Date_t::now() - start);
+ LOGV2(4795908, "Journal files removed", "duration"_attr = Date_t::now() - start);
}
// This setting overrides the earlier setting because it is later in the config string.
ss << ",log=(enabled=false),";
@@ -682,7 +682,7 @@ WiredTigerKVEngine::WiredTigerKVEngine(const std::string& canonicalName,
LOGV2(22315, "Opening WiredTiger", "config"_attr = config);
auto startTime = Date_t::now();
_openWiredTiger(path, config);
- LOGV2(47959006, "WiredTiger opened", "duration"_attr = Date_t::now() - startTime);
+ LOGV2(4795906, "WiredTiger opened", "duration"_attr = Date_t::now() - startTime);
_eventHandler.setStartupSuccessful();
_wtOpenConfig = config;
@@ -808,7 +808,7 @@ void WiredTigerKVEngine::_openWiredTiger(const std::string& path, const std::str
StorageRepairObserver::get(getGlobalServiceContext())->onRepairDone(nullptr);
}
LOGV2_FATAL_NOTRACE(
- 46712005,
+ 4671205,
"This version of MongoDB is too recent to start up on the existing data files. "
"Try MongoDB 4.2 or earlier.");
}
@@ -928,23 +928,23 @@ void WiredTigerKVEngine::cleanShutdown() {
"Closing WiredTiger in preparation for reconfiguring",
"closeConfig"_attr = closeConfig);
invariantWTOK(_conn->close(_conn, closeConfig.c_str()));
- LOGV2(47959005, "WiredTiger closed", "duration"_attr = Date_t::now() - startTime);
+ LOGV2(4795905, "WiredTiger closed", "duration"_attr = Date_t::now() - startTime);
startTime = Date_t::now();
invariantWTOK(wiredtiger_open(
_path.c_str(), _eventHandler.getWtEventHandler(), _wtOpenConfig.c_str(), &_conn));
- LOGV2(47959004, "WiredTiger re-opened", "duration"_attr = Date_t::now() - startTime);
+ LOGV2(4795904, "WiredTiger re-opened", "duration"_attr = Date_t::now() - startTime);
startTime = Date_t::now();
LOGV2(22325, "Reconfiguring", "newConfig"_attr = _fileVersion.getDowngradeString());
invariantWTOK(_conn->reconfigure(_conn, _fileVersion.getDowngradeString().c_str()));
- LOGV2(47959003, "Reconfigure complete", "duration"_attr = Date_t::now() - startTime);
+ LOGV2(4795903, "Reconfigure complete", "duration"_attr = Date_t::now() - startTime);
}
auto startTime = Date_t::now();
- LOGV2(47959002, "Closing WiredTiger", "closeConfig"_attr = closeConfig);
+ LOGV2(4795902, "Closing WiredTiger", "closeConfig"_attr = closeConfig);
invariantWTOK(_conn->close(_conn, closeConfig.c_str()));
- LOGV2(47959001, "WiredTiger closed", "duration"_attr = Date_t::now() - startTime);
+ LOGV2(4795901, "WiredTiger closed", "duration"_attr = Date_t::now() - startTime);
_conn = nullptr;
}
@@ -1589,7 +1589,7 @@ Status WiredTigerKVEngine::recoverOrphanedIdent(OperationContext* opCtx,
WT_SESSION* session = sessionWrapper.getSession();
status =
wtRCToStatus(session->salvage(session, _uri(ident).c_str(), nullptr), "Salvage failed: ");
- LOGV2(47959007, "Salvage complete", "duration"_attr = Date_t::now() - start);
+ LOGV2(4795907, "Salvage complete", "duration"_attr = Date_t::now() - start);
if (status.isOK()) {
return {ErrorCodes::DataModifiedByRepair,
str::stream() << "Salvaged data for ident " << ident};
diff --git a/src/mongo/db/ttl.cpp b/src/mongo/db/ttl.cpp
index 9eda88a8abe..e224b6876e3 100644
--- a/src/mongo/db/ttl.cpp
+++ b/src/mongo/db/ttl.cpp
@@ -155,14 +155,14 @@ public:
* Signals the thread to quit and then waits until it does.
*/
void shutdown() {
- LOGV2(36841000, "Shutting down TTL collection monitor thread");
+ LOGV2(3684100, "Shutting down TTL collection monitor thread");
{
stdx::lock_guard<Latch> lk(_stateMutex);
_shuttingDown = true;
_shuttingDownCV.notify_one();
}
wait();
- LOGV2(36841001, "Finished shutting down TTL collection monitor thread");
+ LOGV2(3684101, "Finished shutting down TTL collection monitor thread");
}
private: