summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authormathisbessamdb <mathis.bessa@mongodb.com>2023-05-16 19:16:55 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2023-05-17 00:07:45 +0000
commitf686154c7dcab57316dba532f06d41201fba6b82 (patch)
treeccc75de1cce3a082db63575eab66f4c7483f1bbb
parentf8937c0ae6a1b6afd3d0e4e4dae2afca53cf7390 (diff)
downloadmongo-f686154c7dcab57316dba532f06d41201fba6b82.tar.gz
SERVER-76900 ResourceConsumption::shouldCollectMetricsForDatabase to pass a DatabaseName
-rw-r--r--src/mongo/db/index_builds_coordinator_mongod.cpp4
-rw-r--r--src/mongo/db/service_entry_point_common.cpp5
-rw-r--r--src/mongo/db/stats/resource_consumption_metrics.cpp11
-rw-r--r--src/mongo/db/stats/resource_consumption_metrics.h24
-rw-r--r--src/mongo/db/stats/resource_consumption_metrics_test.cpp215
-rw-r--r--src/mongo/db/ttl.cpp2
6 files changed, 212 insertions, 49 deletions
diff --git a/src/mongo/db/index_builds_coordinator_mongod.cpp b/src/mongo/db/index_builds_coordinator_mongod.cpp
index 5a5773b1877..082045fd17d 100644
--- a/src/mongo/db/index_builds_coordinator_mongod.cpp
+++ b/src/mongo/db/index_builds_coordinator_mongod.cpp
@@ -467,9 +467,9 @@ IndexBuildsCoordinatorMongod::_startIndexBuild(OperationContext* opCtx,
// Start collecting metrics for the index build. The metrics for this operation will only be
// aggregated globally if the node commits or aborts while it is primary.
auto& metricsCollector = ResourceConsumption::MetricsCollector::get(opCtx.get());
- if (ResourceConsumption::shouldCollectMetricsForDatabase(dbName.toStringWithTenantId()) &&
+ if (ResourceConsumption::shouldCollectMetricsForDatabase(dbName) &&
ResourceConsumption::isMetricsCollectionEnabled()) {
- metricsCollector.beginScopedCollecting(opCtx.get(), dbName.toStringWithTenantId());
+ metricsCollector.beginScopedCollecting(opCtx.get(), dbName);
}
// Index builds should never take the PBWM lock, even on a primary. This allows the
diff --git a/src/mongo/db/service_entry_point_common.cpp b/src/mongo/db/service_entry_point_common.cpp
index 11a859255fb..f39f599eeb7 100644
--- a/src/mongo/db/service_entry_point_common.cpp
+++ b/src/mongo/db/service_entry_point_common.cpp
@@ -1510,7 +1510,10 @@ void ExecCommandDatabase::_initiateCommand() {
// Connections from mongod or mongos clients (i.e. initial sync, mirrored reads, etc.) should
// not contribute to resource consumption metrics.
const bool collect = command->collectsResourceConsumptionMetrics() && !_isInternalClient();
- _scopedMetrics.emplace(opCtx, dbname, collect);
+ _scopedMetrics.emplace(
+ opCtx,
+ DatabaseNameUtil::deserialize(request.getValidatedTenantId(), request.getDatabase()),
+ collect);
const auto allowTransactionsOnConfigDatabase =
(serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) ||
diff --git a/src/mongo/db/stats/resource_consumption_metrics.cpp b/src/mongo/db/stats/resource_consumption_metrics.cpp
index 98a804b0694..e4a4bdb4c2a 100644
--- a/src/mongo/db/stats/resource_consumption_metrics.cpp
+++ b/src/mongo/db/stats/resource_consumption_metrics.cpp
@@ -328,7 +328,7 @@ void ResourceConsumption::MetricsCollector::incrementOneIdxEntryWritten(StringDa
}
void ResourceConsumption::MetricsCollector::beginScopedCollecting(OperationContext* opCtx,
- const std::string& dbName) {
+ const DatabaseName& dbName) {
invariant(!isInScope());
_dbName = dbName;
_collecting = ScopedCollectionState::kInScopeCollecting;
@@ -366,7 +366,7 @@ void ResourceConsumption::MetricsCollector::incrementOneCursorSeek(StringData ur
}
ResourceConsumption::ScopedMetricsCollector::ScopedMetricsCollector(OperationContext* opCtx,
- const std::string& dbName,
+ const DatabaseName& dbName,
bool commandCollectsMetrics)
: _opCtx(opCtx) {
@@ -415,9 +415,9 @@ ResourceConsumption& ResourceConsumption::get(OperationContext* opCtx) {
}
void ResourceConsumption::merge(OperationContext* opCtx,
- const std::string& dbName,
+ const DatabaseName& dbName,
const OperationMetrics& metrics) {
- invariant(!dbName.empty());
+ invariant(!dbName.isEmpty());
LOGV2_DEBUG(7527700,
1,
@@ -446,8 +446,9 @@ void ResourceConsumption::merge(OperationContext* opCtx,
}
// Add all metrics into the the globally-aggregated metrics.
+ const auto& dbNameStr = dbName.toStringWithTenantId();
stdx::lock_guard<Mutex> lk(_mutex);
- _dbMetrics[dbName] += newMetrics;
+ _dbMetrics[dbNameStr] += newMetrics;
_cpuTime += newMetrics.cpuNanos;
}
diff --git a/src/mongo/db/stats/resource_consumption_metrics.h b/src/mongo/db/stats/resource_consumption_metrics.h
index 6c5bf853c44..9e88a96dd5e 100644
--- a/src/mongo/db/stats/resource_consumption_metrics.h
+++ b/src/mongo/db/stats/resource_consumption_metrics.h
@@ -284,7 +284,7 @@ public:
* When called, resource consumption metrics should be recorded for this operation. Clears
* any metrics from previous collection periods.
*/
- void beginScopedCollecting(OperationContext* opCtx, const std::string& dbName);
+ void beginScopedCollecting(OperationContext* opCtx, const DatabaseName& dbName);
/**
* When called, sets state that a ScopedMetricsCollector is in scope, but is not recording
@@ -319,7 +319,7 @@ public:
return _hasCollectedMetrics;
}
- const std::string& getDbName() const {
+ const DatabaseName& getDbName() const {
return _dbName;
}
@@ -328,12 +328,12 @@ public:
* Metrics due to the Collector stopping without being associated with any database yet.
*/
OperationMetrics& getMetrics() {
- invariant(!_dbName.empty(), "observing Metrics before a dbName has been set");
+ invariant(!_dbName.isEmpty(), "observing Metrics before a dbName has been set");
return _metrics;
}
const OperationMetrics& getMetrics() const {
- invariant(!_dbName.empty(), "observing Metrics before a dbName has been set");
+ invariant(!_dbName.isEmpty(), "observing Metrics before a dbName has been set");
return _metrics;
}
@@ -437,7 +437,7 @@ public:
};
ScopedCollectionState _collecting = ScopedCollectionState::kInactive;
bool _hasCollectedMetrics = false;
- std::string _dbName;
+ DatabaseName _dbName;
OperationMetrics _metrics;
bool _paused = false;
};
@@ -450,9 +450,9 @@ public:
class ScopedMetricsCollector {
public:
ScopedMetricsCollector(OperationContext* opCtx,
- const std::string& dbName,
+ const DatabaseName& dbName,
bool commandCollectsMetrics);
- ScopedMetricsCollector(OperationContext* opCtx, const std::string& dbName)
+ ScopedMetricsCollector(OperationContext* opCtx, const DatabaseName& dbName)
: ScopedMetricsCollector(opCtx, dbName, true) {}
~ScopedMetricsCollector();
@@ -495,9 +495,9 @@ public:
/**
* Returns whether the database's metrics should be collected.
*/
- static bool shouldCollectMetricsForDatabase(StringData dbName) {
- if (dbName == DatabaseName::kAdmin.db() || dbName == DatabaseName::kConfig.db() ||
- dbName == DatabaseName::kLocal.db()) {
+ static bool shouldCollectMetricsForDatabase(const DatabaseName& dbName) {
+ if (dbName == DatabaseName::kAdmin || dbName == DatabaseName::kConfig ||
+ dbName == DatabaseName::kLocal) {
return false;
}
return true;
@@ -526,7 +526,9 @@ public:
*
* The database name must not be an empty string.
*/
- void merge(OperationContext* opCtx, const std::string& dbName, const OperationMetrics& metrics);
+ void merge(OperationContext* opCtx,
+ const DatabaseName& dbName,
+ const OperationMetrics& metrics);
/**
* Returns a copy of the per-database metrics map.
diff --git a/src/mongo/db/stats/resource_consumption_metrics_test.cpp b/src/mongo/db/stats/resource_consumption_metrics_test.cpp
index 938415b2a30..87d004276fc 100644
--- a/src/mongo/db/stats/resource_consumption_metrics_test.cpp
+++ b/src/mongo/db/stats/resource_consumption_metrics_test.cpp
@@ -74,7 +74,8 @@ TEST_F(ResourceConsumptionMetricsTest, Merge) {
auto& operationMetrics = ResourceConsumption::MetricsCollector::get(_opCtx.get());
- operationMetrics.beginScopedCollecting(_opCtx.get(), "db1");
+ operationMetrics.beginScopedCollecting(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1"));
globalResourceConsumption.merge(
_opCtx.get(), operationMetrics.getDbName(), operationMetrics.getMetrics());
globalResourceConsumption.merge(
@@ -87,7 +88,8 @@ TEST_F(ResourceConsumptionMetricsTest, Merge) {
ASSERT_EQ(dbMetrics.count("db3"), 0);
operationMetrics.endScopedCollecting();
- operationMetrics.beginScopedCollecting(_opCtx.get(), "db2");
+ operationMetrics.beginScopedCollecting(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db2"));
globalResourceConsumption.merge(
_opCtx.get(), operationMetrics.getDbName(), operationMetrics.getMetrics());
globalResourceConsumption.merge(
@@ -106,7 +108,10 @@ TEST_F(ResourceConsumptionMetricsTest, ScopedMetricsCollector) {
// Collect
{
const bool collectMetrics = true;
- ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1", collectMetrics);
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(),
+ DatabaseName::createDatabaseName_forTest(boost::none, "db1"),
+ collectMetrics);
ASSERT_TRUE(operationMetrics.isCollecting());
}
@@ -118,7 +123,10 @@ TEST_F(ResourceConsumptionMetricsTest, ScopedMetricsCollector) {
// Don't collect
{
const bool collectMetrics = false;
- ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1", collectMetrics);
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(),
+ DatabaseName::createDatabaseName_forTest(boost::none, "db1"),
+ collectMetrics);
ASSERT_FALSE(operationMetrics.isCollecting());
}
@@ -128,13 +136,19 @@ TEST_F(ResourceConsumptionMetricsTest, ScopedMetricsCollector) {
ASSERT_EQ(metricsCopy.count("db1"), 0);
// Collect
- { ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1"); }
+ {
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1"));
+ }
metricsCopy = globalResourceConsumption.getDbMetrics();
ASSERT_EQ(metricsCopy.count("db1"), 1);
// Collect on a different database
- { ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db2"); }
+ {
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db2"));
+ }
metricsCopy = globalResourceConsumption.getDbMetrics();
ASSERT_EQ(metricsCopy.count("db1"), 1);
@@ -156,16 +170,21 @@ TEST_F(ResourceConsumptionMetricsTest, NestedScopedMetricsCollector) {
// Collect, nesting does not override that behavior or change the collection database.
{
- ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1"));
ASSERT(operationMetrics.hasCollectedMetrics());
{
const bool collectMetrics = false;
- ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db2", collectMetrics);
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(),
+ DatabaseName::createDatabaseName_forTest(boost::none, "db2"),
+ collectMetrics);
ASSERT_TRUE(operationMetrics.isCollecting());
{
- ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db3");
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db3"));
ASSERT_TRUE(operationMetrics.isCollecting());
}
}
@@ -181,17 +200,23 @@ TEST_F(ResourceConsumptionMetricsTest, NestedScopedMetricsCollector) {
// Don't collect, nesting does not override that behavior.
{
const bool collectMetrics = false;
- ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db2", collectMetrics);
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(),
+ DatabaseName::createDatabaseName_forTest(boost::none, "db2"),
+ collectMetrics);
ASSERT_FALSE(operationMetrics.hasCollectedMetrics());
{
- ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db3");
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db3"));
ASSERT_FALSE(operationMetrics.isCollecting());
{
ResourceConsumption::ScopedMetricsCollector scope(
- _opCtx.get(), "db4", collectMetrics);
+ _opCtx.get(),
+ DatabaseName::createDatabaseName_forTest(boost::none, "db4"),
+ collectMetrics);
ASSERT_FALSE(operationMetrics.isCollecting());
}
}
@@ -225,7 +250,8 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetrics) {
auto& operationMetrics = ResourceConsumption::MetricsCollector::get(_opCtx.get());
{
- ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1"));
operationMetrics.incrementOneDocRead("", 2);
operationMetrics.incrementOneIdxEntryRead("", 8);
@@ -252,7 +278,8 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetrics) {
reset(operationMetrics);
{
- ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1"));
operationMetrics.incrementOneDocRead("", 32);
operationMetrics.incrementOneIdxEntryRead("", 128);
@@ -282,7 +309,8 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetricsSecondary) {
->setFollowerMode(repl::MemberState::RS_SECONDARY));
{
- ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1"));
operationMetrics.incrementOneDocRead("", 2);
operationMetrics.incrementOneIdxEntryRead("", 8);
@@ -307,7 +335,8 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetricsSecondary) {
reset(operationMetrics);
{
- ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1"));
operationMetrics.incrementOneDocRead("", 32);
operationMetrics.incrementOneIdxEntryRead("", 128);
@@ -336,7 +365,8 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetricsAcrossStates) {
// Start collecting metrics in the primary state, then change to secondary. Metrics should be
// attributed to the secondary state, since that is the state where the operation completed.
{
- ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1"));
operationMetrics.incrementOneDocRead("", 2);
operationMetrics.incrementOneIdxEntryRead("", 8);
@@ -380,7 +410,8 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetricsAcrossStates) {
// Start collecting metrics in the secondary state, then change to primary. Metrics should be
// attributed to the primary state only.
{
- ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1"));
operationMetrics.incrementOneDocRead("", 2);
operationMetrics.incrementOneIdxEntryRead("", 8);
@@ -429,7 +460,8 @@ TEST_F(ResourceConsumptionMetricsTest, DocumentUnitsRead) {
int expectedUnits = 0;
{
- ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1"));
// Each of these should be counted as 1 document unit (unit size = 128).
operationMetrics.incrementOneDocRead("", 2);
@@ -464,7 +496,8 @@ TEST_F(ResourceConsumptionMetricsTest, DocumentUnitsWritten) {
int expectedUnits = 0;
{
- ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1"));
// Each of these should be counted as 1 document unit (unit size = 128).
operationMetrics.incrementOneDocWritten("", 2);
@@ -498,7 +531,8 @@ TEST_F(ResourceConsumptionMetricsTest, TotalUnitsWritten) {
int expectedUnits = 0;
{
- ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1"));
// Each of these should be counted as 1 total unit (unit size = 128).
operationMetrics.incrementOneDocWritten("", 2);
@@ -560,7 +594,8 @@ TEST_F(ResourceConsumptionMetricsTest, IdxEntryUnitsRead) {
int expectedUnits = 0;
{
- ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1"));
gIndexEntryUnitSizeBytes = 16;
@@ -609,7 +644,8 @@ TEST_F(ResourceConsumptionMetricsTest, IdxEntryUnitsWritten) {
int expectedUnits = 0;
{
- ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1"));
gIndexEntryUnitSizeBytes = 16;
@@ -674,7 +710,8 @@ TEST_F(ResourceConsumptionMetricsTest, CpuNanos) {
{
// Ensure that the CPU timer increases relative to a single operation.
- ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1"));
auto lastNanos = operationMetrics.getMetrics().cpuTimer->getElapsed();
spinFor(Milliseconds(1));
ASSERT_GT(operationMetrics.getMetrics().cpuTimer->getElapsed(), lastNanos);
@@ -690,7 +727,8 @@ TEST_F(ResourceConsumptionMetricsTest, CpuNanos) {
ASSERT_EQ(dbMetrics["db1"].cpuNanos, nanos);
{
- ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1"));
spinFor(Milliseconds(1));
}
@@ -716,7 +754,8 @@ TEST_F(ResourceConsumptionMetricsTest, CursorSeeks) {
int expectedSeeks = 0;
{
- ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1"));
operationMetrics.incrementOneCursorSeek("");
operationMetrics.incrementOneCursorSeek("");
operationMetrics.incrementOneCursorSeek("");
@@ -733,7 +772,8 @@ TEST_F(ResourceConsumptionMetricsTest, PauseMetricsCollectorBlock) {
auto& operationMetrics = ResourceConsumption::MetricsCollector::get(_opCtx.get());
{
- ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1"));
{
// Metrics increase within this scope should not be applied.
ResourceConsumption::PauseMetricsCollectorBlock pauseMetricsCollection(_opCtx.get());
@@ -772,7 +812,8 @@ TEST_F(ResourceConsumptionMetricsTest, ResetMetricsBetweenCollection) {
auto& operationMetrics = ResourceConsumption::MetricsCollector::get(_opCtx.get());
{
- ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1"));
operationMetrics.incrementOneDocRead("", 2);
operationMetrics.incrementOneIdxEntryRead("", 4);
@@ -795,7 +836,8 @@ TEST_F(ResourceConsumptionMetricsTest, ResetMetricsBetweenCollection) {
// We expect this metrics collection to wipe out the metrics from the previous one.
{
- ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db2");
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db2"));
operationMetrics.incrementOneDocRead("", 64);
operationMetrics.incrementOneIdxEntryRead("", 128);
operationMetrics.incrementKeysSorted(256);
@@ -828,4 +870,119 @@ TEST_F(ResourceConsumptionMetricsTest, ResetMetricsBetweenCollection) {
ASSERT_EQ(metricsCopy["db2"].primaryReadMetrics.cursorSeeks, 1);
}
+TEST_F(ResourceConsumptionMetricsTest, MetricsWithTenantId) {
+ auto& globalResourceConsumption = ResourceConsumption::get(getServiceContext());
+ auto& operationMetrics = ResourceConsumption::MetricsCollector::get(_opCtx.get());
+ const TenantId tenantId = TenantId(OID::gen());
+
+ std::string dbName1Str = str::stream() << tenantId.toString() << "_db1";
+ operationMetrics.beginScopedCollecting(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(tenantId, "db1"));
+ globalResourceConsumption.merge(
+ _opCtx.get(), operationMetrics.getDbName(), operationMetrics.getMetrics());
+ globalResourceConsumption.merge(
+ _opCtx.get(), operationMetrics.getDbName(), operationMetrics.getMetrics());
+
+ auto dbMetrics = globalResourceConsumption.getDbMetrics();
+ ASSERT_EQ(dbMetrics.count(dbName1Str), 1);
+ ASSERT_EQ(dbMetrics.count("db2"), 0);
+ operationMetrics.endScopedCollecting();
+
+ std::string dbName2Str = str::stream() << tenantId.toString() << "_db2";
+ operationMetrics.beginScopedCollecting(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(tenantId, "db2"));
+ globalResourceConsumption.merge(
+ _opCtx.get(), operationMetrics.getDbName(), operationMetrics.getMetrics());
+ globalResourceConsumption.merge(
+ _opCtx.get(), operationMetrics.getDbName(), operationMetrics.getMetrics());
+
+ dbMetrics = globalResourceConsumption.getDbMetrics();
+ ASSERT_EQ(dbMetrics.count(dbName1Str), 1);
+ ASSERT_EQ(dbMetrics.count(dbName2Str), 1);
+ operationMetrics.endScopedCollecting();
+
+ // Same '_db2' but different tenant.
+ const TenantId otherTenantId = TenantId(OID::gen());
+ dbMetrics = globalResourceConsumption.getDbMetrics();
+
+ std::string otherDbName2Str = str::stream() << otherTenantId.toString() << "_db2";
+ operationMetrics.beginScopedCollecting(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(otherTenantId, "db2"));
+ globalResourceConsumption.merge(
+ _opCtx.get(), operationMetrics.getDbName(), operationMetrics.getMetrics());
+ globalResourceConsumption.merge(
+ _opCtx.get(), operationMetrics.getDbName(), operationMetrics.getMetrics());
+
+ dbMetrics = globalResourceConsumption.getDbMetrics();
+ ASSERT_EQ(dbMetrics.count(dbName1Str), 1);
+ ASSERT_EQ(dbMetrics.count(dbName2Str), 1);
+ ASSERT_EQ(dbMetrics.count(otherDbName2Str), 1);
+ operationMetrics.endScopedCollecting();
+}
+
+TEST_F(ResourceConsumptionMetricsTest, MergeWithTenantId) {
+ auto& globalResourceConsumption = ResourceConsumption::get(getServiceContext());
+ auto& operationMetrics = ResourceConsumption::MetricsCollector::get(_opCtx.get());
+ const TenantId tenantId = TenantId(OID::gen());
+ const TenantId otherTenantId = TenantId(OID::gen());
+
+ {
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(tenantId, "db1"));
+
+ operationMetrics.incrementOneDocRead("", 2);
+ operationMetrics.incrementOneIdxEntryRead("", 4);
+ operationMetrics.incrementKeysSorted(8);
+ operationMetrics.incrementSorterSpills(16);
+ operationMetrics.incrementDocUnitsReturned("", makeDocUnits(32));
+ operationMetrics.incrementOneCursorSeek("");
+ }
+
+ std::string dbName1Str = str::stream() << tenantId.toString() << "_db1";
+ auto metricsCopy = globalResourceConsumption.getAndClearDbMetrics();
+ ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.docsRead.bytes(), 2);
+ ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.docsRead.units(), 1);
+ ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.idxEntriesRead.bytes(), 4);
+ ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.idxEntriesRead.units(), 1);
+ ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.keysSorted, 8);
+ ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.sorterSpills, 16);
+ ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.docsReturned.bytes(), 32);
+ ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.docsReturned.units(), 1);
+ ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.cursorSeeks, 1);
+
+ {
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(otherTenantId, "db1"));
+
+ operationMetrics.incrementOneDocRead("", 2);
+ operationMetrics.incrementOneIdxEntryRead("", 4);
+ operationMetrics.incrementKeysSorted(8);
+ operationMetrics.incrementSorterSpills(16);
+ operationMetrics.incrementDocUnitsReturned("", makeDocUnits(32));
+ operationMetrics.incrementOneCursorSeek("");
+ }
+
+ metricsCopy = globalResourceConsumption.getAndClearDbMetrics();
+ ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.docsRead.bytes(), 0);
+ ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.docsRead.units(), 0);
+ ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.idxEntriesRead.bytes(), 0);
+ ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.idxEntriesRead.units(), 0);
+ ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.keysSorted, 0);
+ ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.sorterSpills, 0);
+ ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.docsReturned.bytes(), 0);
+ ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.docsReturned.units(), 0);
+ ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.cursorSeeks, 0);
+
+ std::string otherDbName1Str = str::stream() << otherTenantId.toString() << "_db1";
+ ASSERT_EQ(metricsCopy[otherDbName1Str].primaryReadMetrics.docsRead.bytes(), 2);
+ ASSERT_EQ(metricsCopy[otherDbName1Str].primaryReadMetrics.docsRead.units(), 1);
+ ASSERT_EQ(metricsCopy[otherDbName1Str].primaryReadMetrics.idxEntriesRead.bytes(), 4);
+ ASSERT_EQ(metricsCopy[otherDbName1Str].primaryReadMetrics.idxEntriesRead.units(), 1);
+ ASSERT_EQ(metricsCopy[otherDbName1Str].primaryReadMetrics.keysSorted, 8);
+ ASSERT_EQ(metricsCopy[otherDbName1Str].primaryReadMetrics.sorterSpills, 16);
+ ASSERT_EQ(metricsCopy[otherDbName1Str].primaryReadMetrics.docsReturned.bytes(), 32);
+ ASSERT_EQ(metricsCopy[otherDbName1Str].primaryReadMetrics.docsReturned.units(), 1);
+ ASSERT_EQ(metricsCopy[otherDbName1Str].primaryReadMetrics.cursorSeeks, 1);
+}
+
} // namespace mongo
diff --git a/src/mongo/db/ttl.cpp b/src/mongo/db/ttl.cpp
index b2a94b47ad1..918d9962500 100644
--- a/src/mongo/db/ttl.cpp
+++ b/src/mongo/db/ttl.cpp
@@ -589,7 +589,7 @@ bool TTLMonitor::_doTTLIndexDelete(OperationContext* opCtx,
return false;
}
- ResourceConsumption::ScopedMetricsCollector scopedMetrics(opCtx, nss->db().toString());
+ ResourceConsumption::ScopedMetricsCollector scopedMetrics(opCtx, nss->dbName());
if (info.isClustered()) {
return _deleteExpiredWithCollscan(opCtx, ttlCollectionCache, coll);