summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authormathisbessamdb <mathis.bessa@mongodb.com>2023-03-28 14:20:34 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2023-03-28 17:09:46 +0000
commit19578207f09b56f17d7603e7b982f2b88e0a9bc5 (patch)
treee8bc35f417708d8e14103c8d75b36cf18b87e00b
parent058896012d3dc2cffeeb07cbae69d2eab8c278a1 (diff)
downloadmongo-19578207f09b56f17d7603e7b982f2b88e0a9bc5.tar.gz
SERVER-74485 Always include tenant in db name attributes in log lines
-rw-r--r--buildscripts/resmokeconfig/suites/native_tenant_data_isolation_with_dollar_tenant_jscore_passthrough.yml3
-rw-r--r--src/mongo/db/auth/authorization_manager_impl.cpp2
-rw-r--r--src/mongo/db/catalog/catalog_control.cpp3
-rw-r--r--src/mongo/db/catalog/collection_catalog.cpp2
-rw-r--r--src/mongo/db/catalog/database_holder_impl.cpp2
-rw-r--r--src/mongo/db/catalog/database_impl.cpp8
-rw-r--r--src/mongo/db/catalog/drop_database.cpp20
-rw-r--r--src/mongo/db/catalog/views_for_database.cpp5
-rw-r--r--src/mongo/db/commands/authentication_commands.cpp2
-rw-r--r--src/mongo/db/commands/kill_op_cmd_base.cpp2
-rw-r--r--src/mongo/db/commands/profile_common.cpp2
-rw-r--r--src/mongo/db/database_name.h10
-rw-r--r--src/mongo/db/database_name_test.cpp21
-rw-r--r--src/mongo/db/index_builds_coordinator.cpp12
-rw-r--r--src/mongo/db/index_builds_coordinator_mongod.cpp2
-rw-r--r--src/mongo/db/keys_collection_client_direct.cpp2
-rw-r--r--src/mongo/db/repair.cpp4
-rw-r--r--src/mongo/db/repl/all_database_cloner.cpp2
-rw-r--r--src/mongo/db/repl/apply_ops.cpp6
-rw-r--r--src/mongo/db/repl/oplog.cpp8
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.cpp2
-rw-r--r--src/mongo/db/repl/rollback_impl.cpp4
-rw-r--r--src/mongo/db/repl/tenant_migration_access_blocker_util.cpp2
-rw-r--r--src/mongo/db/s/config/configsvr_run_restore_command.cpp8
-rw-r--r--src/mongo/db/s/database_sharding_state.cpp4
-rw-r--r--src/mongo/db/s/move_primary_coordinator.cpp14
-rw-r--r--src/mongo/db/s/move_primary_coordinator_no_resilient.cpp6
-rw-r--r--src/mongo/db/s/range_deleter_service.cpp2
-rw-r--r--src/mongo/db/s/shard_filtering_metadata_refresh.cpp12
-rw-r--r--src/mongo/db/s/shardsvr_drop_database_participant_command.cpp2
-rw-r--r--src/mongo/db/startup_recovery.cpp2
-rw-r--r--src/mongo/db/storage/storage_util.cpp4
-rw-r--r--src/mongo/db/ttl.cpp2
33 files changed, 102 insertions, 80 deletions
diff --git a/buildscripts/resmokeconfig/suites/native_tenant_data_isolation_with_dollar_tenant_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/native_tenant_data_isolation_with_dollar_tenant_jscore_passthrough.yml
index f11a99af5b8..13cc52a24cb 100644
--- a/buildscripts/resmokeconfig/suites/native_tenant_data_isolation_with_dollar_tenant_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/native_tenant_data_isolation_with_dollar_tenant_jscore_passthrough.yml
@@ -82,6 +82,9 @@ selector:
# TODO SERVER-73023 The tenantId is not attached to the namespace provided to failcommand
# failpoint
- jstests/core/failcommand_failpoint.js
+ # This test looks for the presence of a log line that contains a db name. Injecting a tenantId in
+ # the requests causes the test to fails due to a mismatch.
+ - jstests/core/api//apitest_db_profile_level.js
executor:
archive:
diff --git a/src/mongo/db/auth/authorization_manager_impl.cpp b/src/mongo/db/auth/authorization_manager_impl.cpp
index 48d464814e6..5af116369c9 100644
--- a/src/mongo/db/auth/authorization_manager_impl.cpp
+++ b/src/mongo/db/auth/authorization_manager_impl.cpp
@@ -496,7 +496,7 @@ void AuthorizationManagerImpl::invalidateUserByName(OperationContext* opCtx,
void AuthorizationManagerImpl::invalidateUsersFromDB(OperationContext* opCtx,
const DatabaseName& dbname) {
- LOGV2_DEBUG(20236, 2, "Invalidating all users from database", "database"_attr = dbname);
+ LOGV2_DEBUG(20236, 2, "Invalidating all users from database", logAttrs(dbname));
_updateCacheGeneration();
_authSchemaVersionCache.invalidateAll();
_userCache.invalidateKeyIf([&](const UserRequest& userRequest) {
diff --git a/src/mongo/db/catalog/catalog_control.cpp b/src/mongo/db/catalog/catalog_control.cpp
index 4b9de30f076..23817a12d0e 100644
--- a/src/mongo/db/catalog/catalog_control.cpp
+++ b/src/mongo/db/catalog/catalog_control.cpp
@@ -71,8 +71,7 @@ void reopenAllDatabasesAndReloadCollectionCatalog(OperationContext* opCtx,
auto databaseHolder = DatabaseHolder::get(opCtx);
std::vector<DatabaseName> databasesToOpen = storageEngine->listDatabases();
for (auto&& dbName : databasesToOpen) {
- LOGV2_FOR_RECOVERY(
- 23992, 1, "openCatalog: dbholder reopening database", "db"_attr = dbName);
+ LOGV2_FOR_RECOVERY(23992, 1, "openCatalog: dbholder reopening database", logAttrs(dbName));
auto db = databaseHolder->openDb(opCtx, dbName);
invariant(db, str::stream() << "failed to reopen database " << dbName.toString());
for (auto&& collNss : catalogWriter.value()->getAllCollectionNamesFromDb(opCtx, dbName)) {
diff --git a/src/mongo/db/catalog/collection_catalog.cpp b/src/mongo/db/catalog/collection_catalog.cpp
index faa32873155..008296e9041 100644
--- a/src/mongo/db/catalog/collection_catalog.cpp
+++ b/src/mongo/db/catalog/collection_catalog.cpp
@@ -780,7 +780,7 @@ void CollectionCatalog::reloadViews(OperationContext* opCtx, const DatabaseName&
return;
}
- LOGV2_DEBUG(22546, 1, "Reloading view catalog for database", "db"_attr = dbName.toString());
+ LOGV2_DEBUG(22546, 1, "Reloading view catalog for database", logAttrs(dbName));
ViewsForDatabase viewsForDb;
auto status = viewsForDb.reload(opCtx, CollectionPtr(_lookupSystemViews(opCtx, dbName)));
diff --git a/src/mongo/db/catalog/database_holder_impl.cpp b/src/mongo/db/catalog/database_holder_impl.cpp
index 84cd79d584f..fccd39a60a0 100644
--- a/src/mongo/db/catalog/database_holder_impl.cpp
+++ b/src/mongo/db/catalog/database_holder_impl.cpp
@@ -243,7 +243,7 @@ void DatabaseHolderImpl::close(OperationContext* opCtx, const DatabaseName& dbNa
}
auto db = it->second;
- LOGV2_DEBUG(20311, 2, "DatabaseHolder::close", "db"_attr = dbName);
+ LOGV2_DEBUG(20311, 2, "DatabaseHolder::close", logAttrs(dbName));
CollectionCatalog::write(
opCtx, [&](CollectionCatalog& catalog) { catalog.onCloseDatabase(opCtx, dbName); });
diff --git a/src/mongo/db/catalog/database_impl.cpp b/src/mongo/db/catalog/database_impl.cpp
index b650ba2c7f6..9b30c7bb968 100644
--- a/src/mongo/db/catalog/database_impl.cpp
+++ b/src/mongo/db/catalog/database_impl.cpp
@@ -162,10 +162,8 @@ void DatabaseImpl::init(OperationContext* const opCtx) {
Status status = validateDBName(_name.db());
if (!status.isOK()) {
- LOGV2_WARNING(20325,
- "tried to open invalid db: {name}",
- "Tried to open invalid db",
- "db"_attr = _name);
+ LOGV2_WARNING(
+ 20325, "tried to open invalid db: {name}", "Tried to open invalid db", logAttrs(_name));
uasserted(10028, status.toString());
}
@@ -245,7 +243,7 @@ void DatabaseImpl::init(OperationContext* const opCtx) {
} catch (const ExceptionFor<ErrorCodes::InvalidViewDefinition>& e) {
LOGV2_WARNING(6260805,
"Failed to access the view catalog during restore",
- "db"_attr = _name,
+ logAttrs(_name),
"reason"_attr = e.reason());
}
}
diff --git a/src/mongo/db/catalog/drop_database.cpp b/src/mongo/db/catalog/drop_database.cpp
index 8840ee75782..2cea3539f3c 100644
--- a/src/mongo/db/catalog/drop_database.cpp
+++ b/src/mongo/db/catalog/drop_database.cpp
@@ -122,7 +122,7 @@ void _finishDropDatabase(OperationContext* opCtx,
LOGV2(20336,
"dropDatabase {dbName} - finished, dropped {numCollections} collection(s)",
"dropDatabase",
- "db"_attr = dbName,
+ logAttrs(dbName),
"numCollectionsDropped"_attr = numCollections);
});
}
@@ -172,10 +172,8 @@ Status _dropDatabase(OperationContext* opCtx, const DatabaseName& dbName, bool a
<< "The database is currently being dropped. Database: " << dbName);
}
- LOGV2(20337,
- "dropDatabase {dbName} - starting",
- "dropDatabase - starting",
- "db"_attr = dbName);
+ LOGV2(
+ 20337, "dropDatabase {dbName} - starting", "dropDatabase - starting", logAttrs(dbName));
db->setDropPending(opCtx, true);
// If Database::dropCollectionEventIfSystem() fails, we should reset the drop-pending state
@@ -245,7 +243,7 @@ Status _dropDatabase(OperationContext* opCtx, const DatabaseName& dbName, bool a
LOGV2(7193700,
"dropDatabase {dbName} - dropping collection: {nss}",
"dropDatabase - dropping collection",
- "db"_attr = dbName,
+ logAttrs(dbName),
"namespace"_attr = nss);
writeConflictRetry(opCtx, "dropDatabase_views_collection", nss.ns(), [&] {
@@ -272,7 +270,7 @@ Status _dropDatabase(OperationContext* opCtx, const DatabaseName& dbName, bool a
LOGV2(20338,
"dropDatabase {dbName} - dropping collection: {nss}",
"dropDatabase - dropping collection",
- "db"_attr = dbName,
+ logAttrs(dbName),
"namespace"_attr = nss);
if (nss.isDropPendingNamespace() && replCoord->isReplEnabled() &&
@@ -280,7 +278,7 @@ Status _dropDatabase(OperationContext* opCtx, const DatabaseName& dbName, bool a
LOGV2(20339,
"dropDatabase {dbName} - found drop-pending collection: {nss}",
"dropDatabase - found drop-pending collection",
- "db"_attr = dbName,
+ logAttrs(dbName),
"namespace"_attr = nss);
latestDropPendingOpTime = std::max(
latestDropPendingOpTime, uassertStatusOK(nss.getDropPendingNamespaceOpTime()));
@@ -373,7 +371,7 @@ Status _dropDatabase(OperationContext* opCtx, const DatabaseName& dbName, bool a
"{dropDatabaseWriteConcern}. Dropping {numCollectionsToDrop} collection(s), with "
"last collection drop at {latestDropPendingOpTime}",
"dropDatabase waiting for replication and dropping collections",
- "db"_attr = dbName,
+ logAttrs(dbName),
"awaitOpTime"_attr = awaitOpTime,
"dropDatabaseWriteConcern"_attr = dropDatabaseWriteConcern.toBSON(),
"numCollectionsToDrop"_attr = numCollectionsToDrop,
@@ -387,7 +385,7 @@ Status _dropDatabase(OperationContext* opCtx, const DatabaseName& dbName, bool a
"dropDatabase {dbName} waiting for {awaitOpTime} to be replicated at "
"{userWriteConcern}",
"dropDatabase waiting for replication",
- "db"_attr = dbName,
+ logAttrs(dbName),
"awaitOpTime"_attr = awaitOpTime,
"writeConcern"_attr = userWriteConcern.toBSON());
result = replCoord->awaitReplication(opCtx, awaitOpTime, userWriteConcern);
@@ -405,7 +403,7 @@ Status _dropDatabase(OperationContext* opCtx, const DatabaseName& dbName, bool a
"dropDatabase {dbName} - successfully dropped {numCollectionsToDrop} collection(s) "
"(most recent drop optime: {awaitOpTime}) after {result_duration}. dropping database",
"dropDatabase - successfully dropped collections",
- "db"_attr = dbName,
+ logAttrs(dbName),
"numCollectionsDropped"_attr = numCollectionsToDrop,
"mostRecentDropOpTime"_attr = awaitOpTime,
"duration"_attr = result.duration);
diff --git a/src/mongo/db/catalog/views_for_database.cpp b/src/mongo/db/catalog/views_for_database.cpp
index 07706e1f0d8..ac17cf47838 100644
--- a/src/mongo/db/catalog/views_for_database.cpp
+++ b/src/mongo/db/catalog/views_for_database.cpp
@@ -123,7 +123,7 @@ Status ViewsForDatabase::reload(OperationContext* opCtx, const CollectionPtr& sy
!status.isOK()) {
LOGV2(22547,
"Could not load view catalog for database",
- "db"_attr = systemViews->ns().dbName(),
+ logAttrs(systemViews->ns().dbName()),
"error"_attr = status);
return status;
@@ -169,8 +169,7 @@ Status ViewsForDatabase::insert(OperationContext* opCtx,
}
if (auto status = _upsertIntoMap(opCtx, std::move(view)); !status.isOK()) {
- LOGV2(
- 5387000, "Could not insert view", "db"_attr = viewName.dbName(), "error"_attr = status);
+ LOGV2(5387000, "Could not insert view", logAttrs(viewName.dbName()), "error"_attr = status);
return status;
}
diff --git a/src/mongo/db/commands/authentication_commands.cpp b/src/mongo/db/commands/authentication_commands.cpp
index 1c8d96e1dd3..c8fdf9e9be7 100644
--- a/src/mongo/db/commands/authentication_commands.cpp
+++ b/src/mongo/db/commands/authentication_commands.cpp
@@ -283,7 +283,7 @@ AuthenticateReply authCommand(OperationContext* opCtx,
"client"_attr = client->getRemote(),
"mechanism"_attr = mechanism,
"user"_attr = user,
- "db"_attr = dbname);
+ logAttrs(dbname));
}
session->metrics()->restart();
diff --git a/src/mongo/db/commands/kill_op_cmd_base.cpp b/src/mongo/db/commands/kill_op_cmd_base.cpp
index 49e277ae925..ebae3fcbc0a 100644
--- a/src/mongo/db/commands/kill_op_cmd_base.cpp
+++ b/src/mongo/db/commands/kill_op_cmd_base.cpp
@@ -69,7 +69,7 @@ void KillOpCmdBase::reportSuccessfulCompletion(OperationContext* opCtx,
}
}
- attr.add("db", dbName.db());
+ attr.add("db", dbName);
attr.add("command", cmdObj);
LOGV2(558700, "Successful killOp", attr);
diff --git a/src/mongo/db/commands/profile_common.cpp b/src/mongo/db/commands/profile_common.cpp
index ba546e62067..94875fcbd70 100644
--- a/src/mongo/db/commands/profile_common.cpp
+++ b/src/mongo/db/commands/profile_common.cpp
@@ -133,7 +133,7 @@ bool ProfileCmdBase::run(OperationContext* opCtx,
newState.append("filter"_sd, newSettings.filter->serialize());
}
attrs.add("to", newState.obj());
- attrs.add("db", dbName.db());
+ attrs.add("db", dbName);
LOGV2(48742, "Profiler settings changed", attrs);
}
diff --git a/src/mongo/db/database_name.h b/src/mongo/db/database_name.h
index bc1457f192e..e0821c83bd2 100644
--- a/src/mongo/db/database_name.h
+++ b/src/mongo/db/database_name.h
@@ -153,6 +153,14 @@ public:
return _dbString;
}
+ /**
+ * Method to be used only when logging a DatabaseName in a log message.
+ * It is called anytime a DatabaseName is logged by logAttrs or otherwise.
+ */
+ friend std::string toStringForLogging(const DatabaseName& dbName) {
+ return dbName.toStringWithTenantId();
+ }
+
bool equalCaseInsensitive(const DatabaseName& other) const {
return (_tenantId == other._tenantId) && boost::iequals(toString(), other.toString());
}
@@ -198,7 +206,7 @@ public:
}
friend auto logAttrs(const DatabaseName& obj) {
- return "databaseName"_attr = obj;
+ return "db"_attr = obj;
}
private:
diff --git a/src/mongo/db/database_name_test.cpp b/src/mongo/db/database_name_test.cpp
index b9068667242..b03eff51de4 100644
--- a/src/mongo/db/database_name_test.cpp
+++ b/src/mongo/db/database_name_test.cpp
@@ -30,9 +30,12 @@
#include "mongo/db/database_name.h"
#include "mongo/db/server_feature_flags_gen.h"
#include "mongo/idl/server_parameter_test_util.h"
+#include "mongo/logv2/log.h"
#include "mongo/unittest/death_test.h"
#include "mongo/unittest/unittest.h"
+#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault
+
namespace mongo {
namespace {
@@ -151,5 +154,23 @@ TEST(DatabaseNameTest, VerifyCompareFunction) {
ASSERT(dbn3a != dbn1a);
ASSERT(dbn1a != dbn2a);
}
+
+TEST(DatabaseNameTest, CheckDatabaseNameLogAttrs) {
+ TenantId tenantId(OID::gen());
+ DatabaseName dbWithTenant(tenantId, "myLongDbName");
+ startCapturingLogMessages();
+ LOGV2(7448500, "Msg db:", logAttrs(dbWithTenant));
+
+ ASSERT_EQUALS(1,
+ countBSONFormatLogLinesIsSubset(
+ BSON("attr" << BSON("db" << dbWithTenant.toStringWithTenantId()))));
+
+ LOGV2(7448501, "Msg database:", "database"_attr = dbWithTenant);
+ ASSERT_EQUALS(1,
+ countBSONFormatLogLinesIsSubset(
+ BSON("attr" << BSON("database" << dbWithTenant.toStringWithTenantId()))));
+ stopCapturingLogMessages();
+}
+
} // namespace
} // namespace mongo
diff --git a/src/mongo/db/index_builds_coordinator.cpp b/src/mongo/db/index_builds_coordinator.cpp
index b52c93ea088..8e98a2afc26 100644
--- a/src/mongo/db/index_builds_coordinator.cpp
+++ b/src/mongo/db/index_builds_coordinator.cpp
@@ -213,7 +213,7 @@ void removeIndexBuildEntryAfterCommitOrAbort(OperationContext* opCtx,
"Failed to remove index build from system collection",
"buildUUID"_attr = replState.buildUUID,
"collectionUUID"_attr = replState.collectionUUID,
- "db"_attr = replState.dbName,
+ logAttrs(replState.dbName),
"indexNames"_attr = replState.indexNames,
"indexSpecs"_attr = replState.indexSpecs,
"error"_attr = status);
@@ -861,7 +861,7 @@ void IndexBuildsCoordinator::_abortTenantIndexBuilds(
LOGV2(4886204,
"Index build: failed to abort index build for tenant migration",
"buildUUID"_attr = replState->buildUUID,
- "db"_attr = replState->dbName,
+ logAttrs(replState->dbName),
"collectionUUID"_attr = replState->collectionUUID,
"buildAction"_attr = indexBuildActionStr);
buildsWaitingToFinish.push_back(replState);
@@ -871,7 +871,7 @@ void IndexBuildsCoordinator::_abortTenantIndexBuilds(
LOGV2(6221600,
"Waiting on the index build to unregister before continuing the tenant migration.",
"buildUUID"_attr = replState->buildUUID,
- "db"_attr = replState->dbName,
+ logAttrs(replState->dbName),
"collectionUUID"_attr = replState->collectionUUID,
"buildAction"_attr = indexBuildActionStr);
awaitNoIndexBuildInProgressForCollection(
@@ -988,7 +988,7 @@ void IndexBuildsCoordinator::abortUserIndexBuildsForUserWriteBlocking(OperationC
"Index build: failed to abort index build for write blocking, will wait for "
"completion instead",
"buildUUID"_attr = replState->buildUUID,
- "db"_attr = replState->dbName,
+ logAttrs(replState->dbName),
"collectionUUID"_attr = replState->collectionUUID);
buildsWaitingToFinish.push_back(replState);
}
@@ -1001,7 +1001,7 @@ void IndexBuildsCoordinator::abortUserIndexBuildsForUserWriteBlocking(OperationC
LOGV2(6511602,
"Waiting on index build to finish for user write blocking",
"buildUUID"_attr = replState->buildUUID,
- "db"_attr = replState->dbName,
+ logAttrs(replState->dbName),
"collectionUUID"_attr = replState->collectionUUID);
awaitNoIndexBuildInProgressForCollection(
opCtx, replState->collectionUUID, replState->protocol);
@@ -3231,7 +3231,7 @@ IndexBuildsCoordinator::CommitResult IndexBuildsCoordinator::_insertKeysFromSide
"Index build failed while not primary",
"buildUUID"_attr = replState->buildUUID,
"collectionUUID"_attr = replState->collectionUUID,
- "db"_attr = replState->dbName,
+ logAttrs(replState->dbName),
"error"_attr = status);
}
diff --git a/src/mongo/db/index_builds_coordinator_mongod.cpp b/src/mongo/db/index_builds_coordinator_mongod.cpp
index 10804d19e31..4b3d1b3897f 100644
--- a/src/mongo/db/index_builds_coordinator_mongod.cpp
+++ b/src/mongo/db/index_builds_coordinator_mongod.cpp
@@ -686,7 +686,7 @@ void IndexBuildsCoordinatorMongod::_signalPrimaryForAbortAndWaitForExternalAbort
LOGV2(7419402,
"Index build: signaling primary to abort index build",
"buildUUID"_attr = replState->buildUUID,
- "db"_attr = replState->dbName,
+ logAttrs(replState->dbName),
"collectionUUID"_attr = replState->collectionUUID,
"reason"_attr = abortStatus);
replState->requestAbortFromPrimary(abortStatus);
diff --git a/src/mongo/db/keys_collection_client_direct.cpp b/src/mongo/db/keys_collection_client_direct.cpp
index ebe5b667a42..5d20db41bbe 100644
--- a/src/mongo/db/keys_collection_client_direct.cpp
+++ b/src/mongo/db/keys_collection_client_direct.cpp
@@ -190,7 +190,7 @@ Status KeysCollectionClientDirect::_insert(OperationContext* opCtx,
2,
"Batch write command to {nss_db}failed with retriable error and will be "
"retried{causedBy_writeStatus}",
- "nss_db"_attr = nss.db(),
+ logAttrs(nss.dbName()),
"causedBy_writeStatus"_attr = causedBy(redact(writeStatus)));
continue;
}
diff --git a/src/mongo/db/repair.cpp b/src/mongo/db/repair.cpp
index 46b22c064e8..68336b8816e 100644
--- a/src/mongo/db/repair.cpp
+++ b/src/mongo/db/repair.cpp
@@ -146,7 +146,7 @@ Status repairDatabase(OperationContext* opCtx, StorageEngine* engine, const Data
invariant(opCtx->lockState()->isW());
invariant(dbName.db().find('.') == std::string::npos);
- LOGV2(21029, "repairDatabase", "db"_attr = dbName);
+ LOGV2(21029, "repairDatabase", logAttrs(dbName));
opCtx->checkForInterrupt();
@@ -163,7 +163,7 @@ Status repairDatabase(OperationContext* opCtx, StorageEngine* engine, const Data
LOGV2_FATAL_CONTINUE(21030,
"Failed to repair database {dbName}: {status_reason}",
"Failed to repair database",
- "db"_attr = dbName,
+ logAttrs(dbName),
"error"_attr = status);
}
diff --git a/src/mongo/db/repl/all_database_cloner.cpp b/src/mongo/db/repl/all_database_cloner.cpp
index 7bfa4fd4a43..bd3049d565d 100644
--- a/src/mongo/db/repl/all_database_cloner.cpp
+++ b/src/mongo/db/repl/all_database_cloner.cpp
@@ -251,7 +251,7 @@ void AllDatabaseCloner::postStage() {
1,
"Skipping the recording of initial sync data size metrics due "
"to failure in the 'dbStats' command",
- "db"_attr = dbName,
+ logAttrs(dbName),
"status"_attr = status);
}
}
diff --git a/src/mongo/db/repl/apply_ops.cpp b/src/mongo/db/repl/apply_ops.cpp
index c5326d53be0..55cbc1a317d 100644
--- a/src/mongo/db/repl/apply_ops.cpp
+++ b/src/mongo/db/repl/apply_ops.cpp
@@ -256,11 +256,7 @@ Status applyOps(OperationContext* opCtx,
return Status(ErrorCodes::NotWritablePrimary,
str::stream() << "Not primary while applying ops to database " << dbName);
- LOGV2_DEBUG(5854600,
- 2,
- "applyOps command",
- "dbName"_attr = redact(dbName.toStringWithTenantId()),
- "cmd"_attr = redact(applyOpCmd));
+ LOGV2_DEBUG(5854600, 2, "applyOps command", logAttrs(dbName), "cmd"_attr = redact(applyOpCmd));
auto hasDropDatabase = std::any_of(
info.getOperations().begin(), info.getOperations().end(), [](const BSONObj& op) {
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 6a02782960e..190d263c91a 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -2265,7 +2265,7 @@ Status applyCommand_inlock(OperationContext* opCtx,
1,
"Conflicting DDL operation encountered during initial sync; "
"aborting index build and retrying",
- "db"_attr = nss.db());
+ logAttrs(nss.dbName()));
break;
}
case ErrorCodes::BackgroundOperationInProgressForNamespace: {
@@ -2308,7 +2308,7 @@ Status applyCommand_inlock(OperationContext* opCtx,
"application",
"Failed command during oplog application",
"command"_attr = redact(o),
- "db"_attr = nss.db(),
+ logAttrs(nss.dbName()),
"error"_attr = status);
return status;
}
@@ -2317,7 +2317,7 @@ Status applyCommand_inlock(OperationContext* opCtx,
status.code() != ErrorCodes::IndexNotFound) {
LOGV2_WARNING(2170000,
"Acceptable error during oplog application",
- "db"_attr = nss.db(),
+ logAttrs(nss.dbName()),
"error"_attr = status,
"oplogEntry"_attr = redact(op->toBSONForLogging()));
opCounters->gotAcceptableErrorInCommand();
@@ -2325,7 +2325,7 @@ Status applyCommand_inlock(OperationContext* opCtx,
LOGV2_DEBUG(51776,
1,
"Acceptable error during oplog application",
- "db"_attr = nss.db(),
+ logAttrs(nss.dbName()),
"error"_attr = status,
"oplogEntry"_attr = redact(op->toBSONForLogging()));
}
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
index e94a2ccf42c..7342f419fb7 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
@@ -1133,7 +1133,7 @@ void ReplicationCoordinatorExternalStateImpl::_dropAllTempCollections(OperationC
2,
"Removing temporary collections from {db}",
"Removing temporary collections",
- "db"_attr = dbName);
+ logAttrs(dbName));
Lock::DBLock dbLock(opCtx, dbName, MODE_IX);
clearTempCollections(opCtx, dbName);
}
diff --git a/src/mongo/db/repl/rollback_impl.cpp b/src/mongo/db/repl/rollback_impl.cpp
index 5111c012e86..c5319c7a747 100644
--- a/src/mongo/db/repl/rollback_impl.cpp
+++ b/src/mongo/db/repl/rollback_impl.cpp
@@ -372,7 +372,7 @@ void RollbackImpl::_stopAndWaitForIndexBuilds(OperationContext* opCtx) {
// complete.
std::vector<DatabaseName> dbNames(dbs.begin(), dbs.end());
LOGV2(21595, "Waiting for all background operations to complete before starting rollback");
- for (auto dbName : dbNames) {
+ for (const auto& dbName : dbNames) {
auto numInProg = IndexBuildsCoordinator::get(opCtx)->numInProgForDb(dbName);
if (numInProg > 0) {
LOGV2_DEBUG(21596,
@@ -381,7 +381,7 @@ void RollbackImpl::_stopAndWaitForIndexBuilds(OperationContext* opCtx) {
"background operations to complete on database '{db}'",
"Waiting for background operations to complete",
"numBackgroundOperationsInProgress"_attr = numInProg,
- "db"_attr = dbName);
+ logAttrs(dbName));
IndexBuildsCoordinator::get(opCtx)->awaitNoBgOpInProgForDb(opCtx, dbName);
}
}
diff --git a/src/mongo/db/repl/tenant_migration_access_blocker_util.cpp b/src/mongo/db/repl/tenant_migration_access_blocker_util.cpp
index e2aecee1e9d..4e49f5a53bd 100644
--- a/src/mongo/db/repl/tenant_migration_access_blocker_util.cpp
+++ b/src/mongo/db/repl/tenant_migration_access_blocker_util.cpp
@@ -499,7 +499,7 @@ Status checkIfCanBuildIndex(OperationContext* opCtx, const DatabaseName& dbName)
if (MONGO_unlikely(haveCheckedIfIndexBuildableDuringTenantMigration.shouldFail())) {
LOGV2(5835300,
"haveCheckedIfIndexBuildableDuringTenantMigration failpoint enabled",
- "db"_attr = dbName,
+ logAttrs(dbName),
"status"_attr = status);
}
diff --git a/src/mongo/db/s/config/configsvr_run_restore_command.cpp b/src/mongo/db/s/config/configsvr_run_restore_command.cpp
index c2668be6689..26e334e5200 100644
--- a/src/mongo/db/s/config/configsvr_run_restore_command.cpp
+++ b/src/mongo/db/s/config/configsvr_run_restore_command.cpp
@@ -243,7 +243,7 @@ public:
"Found document",
"doc"_attr = doc,
"shouldRestore"_attr = shouldRestore,
- "db"_attr = coll->ns().db().toString(),
+ logAttrs(coll->ns().dbName()),
"docNss"_attr = docNss);
if (shouldRestore == ShouldRestoreDocument::kYes ||
@@ -255,7 +255,7 @@ public:
LOGV2_DEBUG(6938702,
1,
"Deleting collection that was not restored",
- "db"_attr = coll->ns().db().toString(),
+ logAttrs(coll->ns().dbName()),
"uuid"_attr = coll->uuid(),
"_id"_attr = doc.getField("_id"));
NamespaceStringOrUUID nssOrUUID(coll->ns().db().toString(), coll->uuid());
@@ -304,7 +304,7 @@ public:
"Found document",
"doc"_attr = doc,
"shouldRestore"_attr = shouldRestore,
- "db"_attr = coll->ns().db().toString(),
+ logAttrs(coll->ns().dbName()),
"dbNss"_attr = dbNss.toString());
if (shouldRestore) {
@@ -316,7 +316,7 @@ public:
LOGV2_DEBUG(6938703,
1,
"Deleting database that was not restored",
- "db"_attr = coll->ns().db().toString(),
+ logAttrs(coll->ns().dbName()),
"uuid"_attr = coll->uuid(),
"_id"_attr = doc.getField("_id"));
NamespaceStringOrUUID nssOrUUID(coll->ns().db().toString(), coll->uuid());
diff --git a/src/mongo/db/s/database_sharding_state.cpp b/src/mongo/db/s/database_sharding_state.cpp
index 2f312869758..f23f5553ed0 100644
--- a/src/mongo/db/s/database_sharding_state.cpp
+++ b/src/mongo/db/s/database_sharding_state.cpp
@@ -224,7 +224,7 @@ void DatabaseShardingState::setDbInfo(OperationContext* opCtx, const DatabaseTyp
LOGV2(7286900,
"Setting this node's cached database info",
- "db"_attr = _dbName,
+ logAttrs(_dbName),
"dbVersion"_attr = dbInfo.getVersion());
_dbInfo.emplace(dbInfo);
}
@@ -236,7 +236,7 @@ void DatabaseShardingState::clearDbInfo(OperationContext* opCtx, bool cancelOngo
_cancelDbMetadataRefresh();
}
- LOGV2(7286901, "Clearing this node's cached database info", "db"_attr = _dbName);
+ LOGV2(7286901, "Clearing this node's cached database info", logAttrs(_dbName));
_dbInfo = boost::none;
}
diff --git a/src/mongo/db/s/move_primary_coordinator.cpp b/src/mongo/db/s/move_primary_coordinator.cpp
index 82a7bb5ef6f..5f34c64cbfe 100644
--- a/src/mongo/db/s/move_primary_coordinator.cpp
+++ b/src/mongo/db/s/move_primary_coordinator.cpp
@@ -111,7 +111,7 @@ ExecutorFuture<void> MovePrimaryCoordinator::_runImpl(
if (toShardId == ShardingState::get(opCtx)->shardId()) {
LOGV2(7120200,
"Database already on requested primary shard",
- "db"_attr = _dbName,
+ logAttrs(_dbName),
"to"_attr = toShardId);
return ExecutorFuture<void>(**executor);
@@ -170,7 +170,7 @@ ExecutorFuture<void> MovePrimaryCoordinator::runMovePrimaryWorkflow(
LOGV2(7120201,
"Running movePrimary operation",
- "db"_attr = _dbName,
+ logAttrs(_dbName),
"to"_attr = toShardId);
logChange(opCtx, "start");
@@ -281,7 +281,7 @@ ExecutorFuture<void> MovePrimaryCoordinator::runMovePrimaryWorkflow(
LOGV2(7120206,
"Completed movePrimary operation",
- "db"_attr = _dbName,
+ logAttrs(_dbName),
"to"_attr = _doc.getToShardId());
logChange(opCtx, "end");
@@ -296,7 +296,7 @@ ExecutorFuture<void> MovePrimaryCoordinator::runMovePrimaryWorkflow(
LOGV2_DEBUG(7392900,
1,
"Triggering movePrimary cleanup",
- "db"_attr = _dbName,
+ logAttrs(_dbName),
"to"_attr = _doc.getToShardId(),
"phase"_attr = serializePhase(failedPhase),
"error"_attr = redact(status));
@@ -334,7 +334,7 @@ ExecutorFuture<void> MovePrimaryCoordinator::_cleanupOnAbort(
} catch (const ExceptionFor<ErrorCodes::ShardNotFound>&) {
LOGV2_INFO(7392901,
"Failed to remove orphaned data on recipient as it has been removed",
- "db"_attr = _dbName,
+ logAttrs(_dbName),
"to"_attr = toShardId);
}
}
@@ -347,13 +347,13 @@ ExecutorFuture<void> MovePrimaryCoordinator::_cleanupOnAbort(
} catch (const ExceptionFor<ErrorCodes::ShardNotFound>&) {
LOGV2_INFO(7392902,
"Failed to exit critical section on recipient as it has been removed",
- "db"_attr = _dbName,
+ logAttrs(_dbName),
"to"_attr = toShardId);
}
LOGV2_ERROR(7392903,
"Failed movePrimary operation",
- "db"_attr = _dbName,
+ logAttrs(_dbName),
"to"_attr = toShardId,
"phase"_attr = serializePhase(failedPhase),
"error"_attr = redact(status));
diff --git a/src/mongo/db/s/move_primary_coordinator_no_resilient.cpp b/src/mongo/db/s/move_primary_coordinator_no_resilient.cpp
index 420b96e549d..c633278e655 100644
--- a/src/mongo/db/s/move_primary_coordinator_no_resilient.cpp
+++ b/src/mongo/db/s/move_primary_coordinator_no_resilient.cpp
@@ -82,7 +82,7 @@ ExecutorFuture<void> MovePrimaryCoordinatorNoResilient::_runImpl(
// same name.
shardRegistry->reload(opCtx);
- const auto& dbName = nss().db();
+ const auto& dbName = nss().dbName().db();
const auto& toShard =
uassertStatusOK(shardRegistry->getShard(opCtx, _doc.getToShardId()));
@@ -90,7 +90,7 @@ ExecutorFuture<void> MovePrimaryCoordinatorNoResilient::_runImpl(
if (selfShardId == toShard->getId()) {
LOGV2(5275803,
"Database already on the requested primary shard",
- "db"_attr = dbName,
+ logAttrs(nss().dbName()),
"shardId"_attr = _doc.getToShardId());
// The database primary is already the `to` shard
return;
@@ -114,7 +114,7 @@ ExecutorFuture<void> MovePrimaryCoordinatorNoResilient::_runImpl(
.onError([this, anchor = shared_from_this()](const Status& status) {
LOGV2_ERROR(5275804,
"Error running move primary",
- "database"_attr = nss().db(),
+ "database"_attr = nss().dbName(),
"to"_attr = _doc.getToShardId(),
"error"_attr = redact(status));
diff --git a/src/mongo/db/s/range_deleter_service.cpp b/src/mongo/db/s/range_deleter_service.cpp
index 9bd3b75e45f..d1ef5039484 100644
--- a/src/mongo/db/s/range_deleter_service.cpp
+++ b/src/mongo/db/s/range_deleter_service.cpp
@@ -162,7 +162,7 @@ void RangeDeleterService::ReadyRangeDeletionsProcessor::_runRangeDeletions() {
}
auto task = _queue.front();
- const auto dbName = task.getNss().db();
+ const auto dbName = task.getNss().dbName();
const auto collectionUuid = task.getCollectionUuid();
const auto range = task.getRange();
const auto optKeyPattern = task.getKeyPattern();
diff --git a/src/mongo/db/s/shard_filtering_metadata_refresh.cpp b/src/mongo/db/s/shard_filtering_metadata_refresh.cpp
index 7f447e2c773..ac0f949a4a5 100644
--- a/src/mongo/db/s/shard_filtering_metadata_refresh.cpp
+++ b/src/mongo/db/s/shard_filtering_metadata_refresh.cpp
@@ -75,7 +75,7 @@ bool joinDbVersionOperation(OperationContext* opCtx,
LOGV2_DEBUG(6697201,
2,
"Waiting for exit from the critical section",
- "db"_attr = (**scopedDss)->getDbName(),
+ logAttrs((**scopedDss)->getDbName()),
"reason"_attr = (**scopedDss)->getCriticalSectionReason());
scopedDss->reset();
@@ -89,7 +89,7 @@ bool joinDbVersionOperation(OperationContext* opCtx,
LOGV2_DEBUG(6697202,
2,
"Waiting for completion of another database metadata refresh",
- "db"_attr = (**scopedDss)->getDbName());
+ logAttrs((**scopedDss)->getDbName()));
scopedDss->reset();
dbLock->reset();
@@ -142,7 +142,7 @@ Status refreshDbMetadata(OperationContext* opCtx,
LOGV2_DEBUG(7079300,
2,
"Skip setting cached database metadata as there are no updates",
- "db"_attr = dbName,
+ logAttrs(dbName),
"cachedDbVersion"_attr = *cachedDbVersion,
"refreshedDbVersion"_attr = swDbMetadata.getValue()->getVersion());
@@ -186,7 +186,7 @@ SharedSemiFuture<void> recoverRefreshDbVersion(OperationContext* opCtx,
// Forward `users` and `roles` attributes from the original request.
forwardableOpMetadata.setOn(opCtx);
- LOGV2_DEBUG(6697203, 2, "Started database metadata refresh", "db"_attr = dbName);
+ LOGV2_DEBUG(6697203, 2, "Started database metadata refresh", logAttrs(dbName));
return refreshDbMetadata(opCtx, dbName, cancellationToken);
})
@@ -196,11 +196,11 @@ SharedSemiFuture<void> recoverRefreshDbVersion(OperationContext* opCtx,
!cancellationToken.isCanceled());
if (status.isOK() || status == ErrorCodes::NamespaceNotFound) {
- LOGV2(6697204, "Refreshed database metadata", "db"_attr = dbName);
+ LOGV2(6697204, "Refreshed database metadata", logAttrs(dbName));
} else {
LOGV2_ERROR(6697205,
"Failed database metadata refresh",
- "db"_attr = dbName,
+ logAttrs(dbName),
"error"_attr = redact(status));
}
})
diff --git a/src/mongo/db/s/shardsvr_drop_database_participant_command.cpp b/src/mongo/db/s/shardsvr_drop_database_participant_command.cpp
index 305dc99d822..e1c5978c9e4 100644
--- a/src/mongo/db/s/shardsvr_drop_database_participant_command.cpp
+++ b/src/mongo/db/s/shardsvr_drop_database_participant_command.cpp
@@ -82,7 +82,7 @@ public:
1,
"Received a ShardsvrDropDatabaseParticipant but did not find the "
"database locally",
- "database"_attr = dbName);
+ logAttrs(dbName));
}
}
diff --git a/src/mongo/db/startup_recovery.cpp b/src/mongo/db/startup_recovery.cpp
index c43a0a077ac..3f23afdc5ec 100644
--- a/src/mongo/db/startup_recovery.cpp
+++ b/src/mongo/db/startup_recovery.cpp
@@ -266,7 +266,7 @@ void openDatabases(OperationContext* opCtx, const StorageEngine* storageEngine,
auto databaseHolder = DatabaseHolder::get(opCtx);
auto dbNames = storageEngine->listDatabases();
for (const auto& dbName : dbNames) {
- LOGV2_DEBUG(21010, 1, " Opening database: {dbName}", "dbName"_attr = dbName);
+ LOGV2_DEBUG(21010, 1, " Opening database: {dbName}", logAttrs(dbName));
auto db = databaseHolder->openDb(opCtx, dbName);
invariant(db);
onDatabase(db->name());
diff --git a/src/mongo/db/storage/storage_util.cpp b/src/mongo/db/storage/storage_util.cpp
index cbc393aaf05..032ebff02c6 100644
--- a/src/mongo/db/storage/storage_util.cpp
+++ b/src/mongo/db/storage/storage_util.cpp
@@ -68,7 +68,7 @@ auto removeEmptyDirectory =
boost::filesystem::remove(storageEngine->getFilesystemPathForDb(dbName), ec);
if (!ec) {
- LOGV2(4888200, "Removed empty database directory", "db"_attr = dbName.toString());
+ LOGV2(4888200, "Removed empty database directory", logAttrs(dbName));
} else if (collectionCatalog->begin(nullptr, dbName) == collectionCatalog->end(nullptr)) {
// It is possible for a new collection to be created in the database between when we
// check whether the database is empty and actually attempting to remove the directory.
@@ -79,7 +79,7 @@ auto removeEmptyDirectory =
LOGV2_DEBUG(4888201,
1,
"Failed to remove database directory",
- "db"_attr = dbName.toString(),
+ logAttrs(dbName),
"error"_attr = ec.message());
}
};
diff --git a/src/mongo/db/ttl.cpp b/src/mongo/db/ttl.cpp
index 844894407ce..62e90b5e1bf 100644
--- a/src/mongo/db/ttl.cpp
+++ b/src/mongo/db/ttl.cpp
@@ -579,7 +579,7 @@ bool TTLMonitor::_doTTLIndexDelete(OperationContext* opCtx,
1,
"Postpone TTL of DB because of active tenant migration",
"tenantMigrationAccessBlocker"_attr = mtab->getDebugInfo().jsonString(),
- "database"_attr = coll.getDb()->name().toString());
+ logAttrs(coll.getDb()->name()));
return false;
}