summaryrefslogtreecommitdiff
path: root/src/mongo/db/stats/resource_consumption_metrics_test.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/db/stats/resource_consumption_metrics_test.cpp')
-rw-r--r--src/mongo/db/stats/resource_consumption_metrics_test.cpp215
1 files changed, 186 insertions, 29 deletions
diff --git a/src/mongo/db/stats/resource_consumption_metrics_test.cpp b/src/mongo/db/stats/resource_consumption_metrics_test.cpp
index 938415b2a30..87d004276fc 100644
--- a/src/mongo/db/stats/resource_consumption_metrics_test.cpp
+++ b/src/mongo/db/stats/resource_consumption_metrics_test.cpp
@@ -74,7 +74,8 @@ TEST_F(ResourceConsumptionMetricsTest, Merge) {
auto& operationMetrics = ResourceConsumption::MetricsCollector::get(_opCtx.get());
- operationMetrics.beginScopedCollecting(_opCtx.get(), "db1");
+ operationMetrics.beginScopedCollecting(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1"));
globalResourceConsumption.merge(
_opCtx.get(), operationMetrics.getDbName(), operationMetrics.getMetrics());
globalResourceConsumption.merge(
@@ -87,7 +88,8 @@ TEST_F(ResourceConsumptionMetricsTest, Merge) {
ASSERT_EQ(dbMetrics.count("db3"), 0);
operationMetrics.endScopedCollecting();
- operationMetrics.beginScopedCollecting(_opCtx.get(), "db2");
+ operationMetrics.beginScopedCollecting(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db2"));
globalResourceConsumption.merge(
_opCtx.get(), operationMetrics.getDbName(), operationMetrics.getMetrics());
globalResourceConsumption.merge(
@@ -106,7 +108,10 @@ TEST_F(ResourceConsumptionMetricsTest, ScopedMetricsCollector) {
// Collect
{
const bool collectMetrics = true;
- ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1", collectMetrics);
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(),
+ DatabaseName::createDatabaseName_forTest(boost::none, "db1"),
+ collectMetrics);
ASSERT_TRUE(operationMetrics.isCollecting());
}
@@ -118,7 +123,10 @@ TEST_F(ResourceConsumptionMetricsTest, ScopedMetricsCollector) {
// Don't collect
{
const bool collectMetrics = false;
- ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1", collectMetrics);
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(),
+ DatabaseName::createDatabaseName_forTest(boost::none, "db1"),
+ collectMetrics);
ASSERT_FALSE(operationMetrics.isCollecting());
}
@@ -128,13 +136,19 @@ TEST_F(ResourceConsumptionMetricsTest, ScopedMetricsCollector) {
ASSERT_EQ(metricsCopy.count("db1"), 0);
// Collect
- { ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1"); }
+ {
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1"));
+ }
metricsCopy = globalResourceConsumption.getDbMetrics();
ASSERT_EQ(metricsCopy.count("db1"), 1);
// Collect on a different database
- { ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db2"); }
+ {
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db2"));
+ }
metricsCopy = globalResourceConsumption.getDbMetrics();
ASSERT_EQ(metricsCopy.count("db1"), 1);
@@ -156,16 +170,21 @@ TEST_F(ResourceConsumptionMetricsTest, NestedScopedMetricsCollector) {
// Collect, nesting does not override that behavior or change the collection database.
{
- ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1"));
ASSERT(operationMetrics.hasCollectedMetrics());
{
const bool collectMetrics = false;
- ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db2", collectMetrics);
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(),
+ DatabaseName::createDatabaseName_forTest(boost::none, "db2"),
+ collectMetrics);
ASSERT_TRUE(operationMetrics.isCollecting());
{
- ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db3");
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db3"));
ASSERT_TRUE(operationMetrics.isCollecting());
}
}
@@ -181,17 +200,23 @@ TEST_F(ResourceConsumptionMetricsTest, NestedScopedMetricsCollector) {
// Don't collect, nesting does not override that behavior.
{
const bool collectMetrics = false;
- ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db2", collectMetrics);
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(),
+ DatabaseName::createDatabaseName_forTest(boost::none, "db2"),
+ collectMetrics);
ASSERT_FALSE(operationMetrics.hasCollectedMetrics());
{
- ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db3");
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db3"));
ASSERT_FALSE(operationMetrics.isCollecting());
{
ResourceConsumption::ScopedMetricsCollector scope(
- _opCtx.get(), "db4", collectMetrics);
+ _opCtx.get(),
+ DatabaseName::createDatabaseName_forTest(boost::none, "db4"),
+ collectMetrics);
ASSERT_FALSE(operationMetrics.isCollecting());
}
}
@@ -225,7 +250,8 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetrics) {
auto& operationMetrics = ResourceConsumption::MetricsCollector::get(_opCtx.get());
{
- ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1"));
operationMetrics.incrementOneDocRead("", 2);
operationMetrics.incrementOneIdxEntryRead("", 8);
@@ -252,7 +278,8 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetrics) {
reset(operationMetrics);
{
- ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1"));
operationMetrics.incrementOneDocRead("", 32);
operationMetrics.incrementOneIdxEntryRead("", 128);
@@ -282,7 +309,8 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetricsSecondary) {
->setFollowerMode(repl::MemberState::RS_SECONDARY));
{
- ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1"));
operationMetrics.incrementOneDocRead("", 2);
operationMetrics.incrementOneIdxEntryRead("", 8);
@@ -307,7 +335,8 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetricsSecondary) {
reset(operationMetrics);
{
- ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1"));
operationMetrics.incrementOneDocRead("", 32);
operationMetrics.incrementOneIdxEntryRead("", 128);
@@ -336,7 +365,8 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetricsAcrossStates) {
// Start collecting metrics in the primary state, then change to secondary. Metrics should be
// attributed to the secondary state, since that is the state where the operation completed.
{
- ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1"));
operationMetrics.incrementOneDocRead("", 2);
operationMetrics.incrementOneIdxEntryRead("", 8);
@@ -380,7 +410,8 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetricsAcrossStates) {
// Start collecting metrics in the secondary state, then change to primary. Metrics should be
// attributed to the primary state only.
{
- ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1"));
operationMetrics.incrementOneDocRead("", 2);
operationMetrics.incrementOneIdxEntryRead("", 8);
@@ -429,7 +460,8 @@ TEST_F(ResourceConsumptionMetricsTest, DocumentUnitsRead) {
int expectedUnits = 0;
{
- ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1"));
// Each of these should be counted as 1 document unit (unit size = 128).
operationMetrics.incrementOneDocRead("", 2);
@@ -464,7 +496,8 @@ TEST_F(ResourceConsumptionMetricsTest, DocumentUnitsWritten) {
int expectedUnits = 0;
{
- ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1"));
// Each of these should be counted as 1 document unit (unit size = 128).
operationMetrics.incrementOneDocWritten("", 2);
@@ -498,7 +531,8 @@ TEST_F(ResourceConsumptionMetricsTest, TotalUnitsWritten) {
int expectedUnits = 0;
{
- ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1"));
// Each of these should be counted as 1 total unit (unit size = 128).
operationMetrics.incrementOneDocWritten("", 2);
@@ -560,7 +594,8 @@ TEST_F(ResourceConsumptionMetricsTest, IdxEntryUnitsRead) {
int expectedUnits = 0;
{
- ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1"));
gIndexEntryUnitSizeBytes = 16;
@@ -609,7 +644,8 @@ TEST_F(ResourceConsumptionMetricsTest, IdxEntryUnitsWritten) {
int expectedUnits = 0;
{
- ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1"));
gIndexEntryUnitSizeBytes = 16;
@@ -674,7 +710,8 @@ TEST_F(ResourceConsumptionMetricsTest, CpuNanos) {
{
// Ensure that the CPU timer increases relative to a single operation.
- ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1"));
auto lastNanos = operationMetrics.getMetrics().cpuTimer->getElapsed();
spinFor(Milliseconds(1));
ASSERT_GT(operationMetrics.getMetrics().cpuTimer->getElapsed(), lastNanos);
@@ -690,7 +727,8 @@ TEST_F(ResourceConsumptionMetricsTest, CpuNanos) {
ASSERT_EQ(dbMetrics["db1"].cpuNanos, nanos);
{
- ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1"));
spinFor(Milliseconds(1));
}
@@ -716,7 +754,8 @@ TEST_F(ResourceConsumptionMetricsTest, CursorSeeks) {
int expectedSeeks = 0;
{
- ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1"));
operationMetrics.incrementOneCursorSeek("");
operationMetrics.incrementOneCursorSeek("");
operationMetrics.incrementOneCursorSeek("");
@@ -733,7 +772,8 @@ TEST_F(ResourceConsumptionMetricsTest, PauseMetricsCollectorBlock) {
auto& operationMetrics = ResourceConsumption::MetricsCollector::get(_opCtx.get());
{
- ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1"));
{
// Metrics increase within this scope should not be applied.
ResourceConsumption::PauseMetricsCollectorBlock pauseMetricsCollection(_opCtx.get());
@@ -772,7 +812,8 @@ TEST_F(ResourceConsumptionMetricsTest, ResetMetricsBetweenCollection) {
auto& operationMetrics = ResourceConsumption::MetricsCollector::get(_opCtx.get());
{
- ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1"));
operationMetrics.incrementOneDocRead("", 2);
operationMetrics.incrementOneIdxEntryRead("", 4);
@@ -795,7 +836,8 @@ TEST_F(ResourceConsumptionMetricsTest, ResetMetricsBetweenCollection) {
// We expect this metrics collection to wipe out the metrics from the previous one.
{
- ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db2");
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db2"));
operationMetrics.incrementOneDocRead("", 64);
operationMetrics.incrementOneIdxEntryRead("", 128);
operationMetrics.incrementKeysSorted(256);
@@ -828,4 +870,119 @@ TEST_F(ResourceConsumptionMetricsTest, ResetMetricsBetweenCollection) {
ASSERT_EQ(metricsCopy["db2"].primaryReadMetrics.cursorSeeks, 1);
}
+TEST_F(ResourceConsumptionMetricsTest, MetricsWithTenantId) {
+ auto& globalResourceConsumption = ResourceConsumption::get(getServiceContext());
+ auto& operationMetrics = ResourceConsumption::MetricsCollector::get(_opCtx.get());
+ const TenantId tenantId = TenantId(OID::gen());
+
+ std::string dbName1Str = str::stream() << tenantId.toString() << "_db1";
+ operationMetrics.beginScopedCollecting(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(tenantId, "db1"));
+ globalResourceConsumption.merge(
+ _opCtx.get(), operationMetrics.getDbName(), operationMetrics.getMetrics());
+ globalResourceConsumption.merge(
+ _opCtx.get(), operationMetrics.getDbName(), operationMetrics.getMetrics());
+
+ auto dbMetrics = globalResourceConsumption.getDbMetrics();
+ ASSERT_EQ(dbMetrics.count(dbName1Str), 1);
+ ASSERT_EQ(dbMetrics.count("db2"), 0);
+ operationMetrics.endScopedCollecting();
+
+ std::string dbName2Str = str::stream() << tenantId.toString() << "_db2";
+ operationMetrics.beginScopedCollecting(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(tenantId, "db2"));
+ globalResourceConsumption.merge(
+ _opCtx.get(), operationMetrics.getDbName(), operationMetrics.getMetrics());
+ globalResourceConsumption.merge(
+ _opCtx.get(), operationMetrics.getDbName(), operationMetrics.getMetrics());
+
+ dbMetrics = globalResourceConsumption.getDbMetrics();
+ ASSERT_EQ(dbMetrics.count(dbName1Str), 1);
+ ASSERT_EQ(dbMetrics.count(dbName2Str), 1);
+ operationMetrics.endScopedCollecting();
+
+ // Same '_db2' but different tenant.
+ const TenantId otherTenantId = TenantId(OID::gen());
+ dbMetrics = globalResourceConsumption.getDbMetrics();
+
+ std::string otherDbName2Str = str::stream() << otherTenantId.toString() << "_db2";
+ operationMetrics.beginScopedCollecting(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(otherTenantId, "db2"));
+ globalResourceConsumption.merge(
+ _opCtx.get(), operationMetrics.getDbName(), operationMetrics.getMetrics());
+ globalResourceConsumption.merge(
+ _opCtx.get(), operationMetrics.getDbName(), operationMetrics.getMetrics());
+
+ dbMetrics = globalResourceConsumption.getDbMetrics();
+ ASSERT_EQ(dbMetrics.count(dbName1Str), 1);
+ ASSERT_EQ(dbMetrics.count(dbName2Str), 1);
+ ASSERT_EQ(dbMetrics.count(otherDbName2Str), 1);
+ operationMetrics.endScopedCollecting();
+}
+
+TEST_F(ResourceConsumptionMetricsTest, MergeWithTenantId) {
+ auto& globalResourceConsumption = ResourceConsumption::get(getServiceContext());
+ auto& operationMetrics = ResourceConsumption::MetricsCollector::get(_opCtx.get());
+ const TenantId tenantId = TenantId(OID::gen());
+ const TenantId otherTenantId = TenantId(OID::gen());
+
+ {
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(tenantId, "db1"));
+
+ operationMetrics.incrementOneDocRead("", 2);
+ operationMetrics.incrementOneIdxEntryRead("", 4);
+ operationMetrics.incrementKeysSorted(8);
+ operationMetrics.incrementSorterSpills(16);
+ operationMetrics.incrementDocUnitsReturned("", makeDocUnits(32));
+ operationMetrics.incrementOneCursorSeek("");
+ }
+
+ std::string dbName1Str = str::stream() << tenantId.toString() << "_db1";
+ auto metricsCopy = globalResourceConsumption.getAndClearDbMetrics();
+ ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.docsRead.bytes(), 2);
+ ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.docsRead.units(), 1);
+ ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.idxEntriesRead.bytes(), 4);
+ ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.idxEntriesRead.units(), 1);
+ ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.keysSorted, 8);
+ ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.sorterSpills, 16);
+ ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.docsReturned.bytes(), 32);
+ ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.docsReturned.units(), 1);
+ ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.cursorSeeks, 1);
+
+ {
+ ResourceConsumption::ScopedMetricsCollector scope(
+ _opCtx.get(), DatabaseName::createDatabaseName_forTest(otherTenantId, "db1"));
+
+ operationMetrics.incrementOneDocRead("", 2);
+ operationMetrics.incrementOneIdxEntryRead("", 4);
+ operationMetrics.incrementKeysSorted(8);
+ operationMetrics.incrementSorterSpills(16);
+ operationMetrics.incrementDocUnitsReturned("", makeDocUnits(32));
+ operationMetrics.incrementOneCursorSeek("");
+ }
+
+ metricsCopy = globalResourceConsumption.getAndClearDbMetrics();
+ ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.docsRead.bytes(), 0);
+ ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.docsRead.units(), 0);
+ ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.idxEntriesRead.bytes(), 0);
+ ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.idxEntriesRead.units(), 0);
+ ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.keysSorted, 0);
+ ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.sorterSpills, 0);
+ ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.docsReturned.bytes(), 0);
+ ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.docsReturned.units(), 0);
+ ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.cursorSeeks, 0);
+
+ std::string otherDbName1Str = str::stream() << otherTenantId.toString() << "_db1";
+ ASSERT_EQ(metricsCopy[otherDbName1Str].primaryReadMetrics.docsRead.bytes(), 2);
+ ASSERT_EQ(metricsCopy[otherDbName1Str].primaryReadMetrics.docsRead.units(), 1);
+ ASSERT_EQ(metricsCopy[otherDbName1Str].primaryReadMetrics.idxEntriesRead.bytes(), 4);
+ ASSERT_EQ(metricsCopy[otherDbName1Str].primaryReadMetrics.idxEntriesRead.units(), 1);
+ ASSERT_EQ(metricsCopy[otherDbName1Str].primaryReadMetrics.keysSorted, 8);
+ ASSERT_EQ(metricsCopy[otherDbName1Str].primaryReadMetrics.sorterSpills, 16);
+ ASSERT_EQ(metricsCopy[otherDbName1Str].primaryReadMetrics.docsReturned.bytes(), 32);
+ ASSERT_EQ(metricsCopy[otherDbName1Str].primaryReadMetrics.docsReturned.units(), 1);
+ ASSERT_EQ(metricsCopy[otherDbName1Str].primaryReadMetrics.cursorSeeks, 1);
+}
+
} // namespace mongo