summaryrefslogtreecommitdiff
path: root/src/mongo/db/stats/resource_consumption_metrics_test.cpp
diff options
context:
space:
mode:
authorLouis Williams <louis.williams@mongodb.com>2020-10-16 10:57:32 -0400
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-10-20 14:54:46 +0000
commitd0ff449f4904ec101a1c8dd8da9199509e21980d (patch)
tree8f532852f877679a7025f9d8bdcf63a98a9d5ec6 /src/mongo/db/stats/resource_consumption_metrics_test.cpp
parent63c4374e19a21d0ff9cf0ab926b7b99474f194cf (diff)
downloadmongo-d0ff449f4904ec101a1c8dd8da9199509e21980d.tar.gz
SERVER-51024 Collect document units read and written per operation
Diffstat (limited to 'src/mongo/db/stats/resource_consumption_metrics_test.cpp')
-rw-r--r--src/mongo/db/stats/resource_consumption_metrics_test.cpp93
1 files changed, 79 insertions, 14 deletions
diff --git a/src/mongo/db/stats/resource_consumption_metrics_test.cpp b/src/mongo/db/stats/resource_consumption_metrics_test.cpp
index 2f88f0072cc..8f34be4f2b3 100644
--- a/src/mongo/db/stats/resource_consumption_metrics_test.cpp
+++ b/src/mongo/db/stats/resource_consumption_metrics_test.cpp
@@ -47,7 +47,7 @@ ServerParameter* getServerParameter(const std::string& name) {
ASSERT(sp);
return sp;
}
-
+} // namespace
class ResourceConsumptionMetricsTest : public ServiceContextTest {
public:
@@ -55,6 +55,7 @@ public:
_opCtx = makeOperationContext();
ASSERT_OK(getServerParameter("measureOperationResourceConsumption")->setFromString("true"));
gAggregateOperationResourceConsumptionMetrics = true;
+ gDocumentUnitSizeBytes = 128;
auto svcCtx = getServiceContext();
auto replCoord = std::make_unique<repl::ReplicationCoordinatorMock>(svcCtx);
@@ -215,8 +216,7 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetrics) {
{
ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
- operationMetrics.incrementDocBytesRead(_opCtx.get(), 2);
- operationMetrics.incrementDocUnitsRead(_opCtx.get(), 4);
+ operationMetrics.incrementOneDocRead(_opCtx.get(), 2);
operationMetrics.incrementIdxEntriesRead(_opCtx.get(), 8);
operationMetrics.incrementKeysSorted(_opCtx.get(), 16);
}
@@ -225,7 +225,7 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetrics) {
auto metricsCopy = globalResourceConsumption.getMetrics();
ASSERT_EQ(metricsCopy["db1"].primaryMetrics.docBytesRead, 2);
- ASSERT_EQ(metricsCopy["db1"].primaryMetrics.docUnitsRead, 4);
+ ASSERT_EQ(metricsCopy["db1"].primaryMetrics.docUnitsRead, 1);
ASSERT_EQ(metricsCopy["db1"].primaryMetrics.idxEntriesRead, 8);
ASSERT_EQ(metricsCopy["db1"].primaryMetrics.keysSorted, 16);
@@ -235,15 +235,14 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetrics) {
{
ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
- operationMetrics.incrementDocBytesRead(_opCtx.get(), 32);
- operationMetrics.incrementDocUnitsRead(_opCtx.get(), 64);
+ operationMetrics.incrementOneDocRead(_opCtx.get(), 32);
operationMetrics.incrementIdxEntriesRead(_opCtx.get(), 128);
operationMetrics.incrementKeysSorted(_opCtx.get(), 256);
}
metricsCopy = globalResourceConsumption.getMetrics();
ASSERT_EQ(metricsCopy["db1"].primaryMetrics.docBytesRead, 2 + 32);
- ASSERT_EQ(metricsCopy["db1"].primaryMetrics.docUnitsRead, 4 + 64);
+ ASSERT_EQ(metricsCopy["db1"].primaryMetrics.docUnitsRead, 2);
ASSERT_EQ(metricsCopy["db1"].primaryMetrics.idxEntriesRead, 8 + 128);
ASSERT_EQ(metricsCopy["db1"].primaryMetrics.keysSorted, 16 + 256);
}
@@ -258,15 +257,14 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetricsSecondary) {
{
ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
- operationMetrics.incrementDocBytesRead(_opCtx.get(), 2);
- operationMetrics.incrementDocUnitsRead(_opCtx.get(), 4);
+ operationMetrics.incrementOneDocRead(_opCtx.get(), 2);
operationMetrics.incrementIdxEntriesRead(_opCtx.get(), 8);
operationMetrics.incrementKeysSorted(_opCtx.get(), 16);
}
auto metricsCopy = globalResourceConsumption.getMetrics();
ASSERT_EQ(metricsCopy["db1"].secondaryMetrics.docBytesRead, 2);
- ASSERT_EQ(metricsCopy["db1"].secondaryMetrics.docUnitsRead, 4);
+ ASSERT_EQ(metricsCopy["db1"].secondaryMetrics.docUnitsRead, 1);
ASSERT_EQ(metricsCopy["db1"].secondaryMetrics.idxEntriesRead, 8);
ASSERT_EQ(metricsCopy["db1"].secondaryMetrics.keysSorted, 16);
@@ -276,18 +274,85 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetricsSecondary) {
{
ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
- operationMetrics.incrementDocBytesRead(_opCtx.get(), 32);
- operationMetrics.incrementDocUnitsRead(_opCtx.get(), 64);
+ operationMetrics.incrementOneDocRead(_opCtx.get(), 32);
operationMetrics.incrementIdxEntriesRead(_opCtx.get(), 128);
operationMetrics.incrementKeysSorted(_opCtx.get(), 256);
}
metricsCopy = globalResourceConsumption.getMetrics();
ASSERT_EQ(metricsCopy["db1"].secondaryMetrics.docBytesRead, 2 + 32);
- ASSERT_EQ(metricsCopy["db1"].secondaryMetrics.docUnitsRead, 4 + 64);
+ ASSERT_EQ(metricsCopy["db1"].secondaryMetrics.docUnitsRead, 2);
ASSERT_EQ(metricsCopy["db1"].secondaryMetrics.idxEntriesRead, 8 + 128);
ASSERT_EQ(metricsCopy["db1"].secondaryMetrics.keysSorted, 16 + 256);
}
-} // namespace
+TEST_F(ResourceConsumptionMetricsTest, DocumentUnitsRead) {
+ auto& globalResourceConsumption = ResourceConsumption::get(getServiceContext());
+ auto& operationMetrics = ResourceConsumption::MetricsCollector::get(_opCtx.get());
+
+ int expectedBytes = 0;
+ int expectedUnits = 0;
+
+ {
+ ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
+
+ // Each of these should be counted as 1 document unit (unit size = 128).
+ operationMetrics.incrementOneDocRead(_opCtx.get(), 2);
+ operationMetrics.incrementOneDocRead(_opCtx.get(), 4);
+ operationMetrics.incrementOneDocRead(_opCtx.get(), 8);
+ operationMetrics.incrementOneDocRead(_opCtx.get(), 16);
+ operationMetrics.incrementOneDocRead(_opCtx.get(), 32);
+ operationMetrics.incrementOneDocRead(_opCtx.get(), 64);
+ operationMetrics.incrementOneDocRead(_opCtx.get(), 128);
+ expectedBytes += 2 + 4 + 8 + 16 + 32 + 64 + 128;
+ expectedUnits += 7;
+
+ // Each of these should be counted as 2 document units (unit size = 128).
+ operationMetrics.incrementOneDocRead(_opCtx.get(), 129);
+ operationMetrics.incrementOneDocRead(_opCtx.get(), 200);
+ operationMetrics.incrementOneDocRead(_opCtx.get(), 255);
+ operationMetrics.incrementOneDocRead(_opCtx.get(), 256);
+ expectedBytes += 129 + 200 + 255 + 256;
+ expectedUnits += 8;
+ }
+
+ auto metricsCopy = globalResourceConsumption.getMetrics();
+ ASSERT_EQ(metricsCopy["db1"].primaryMetrics.docBytesRead, expectedBytes);
+ ASSERT_EQ(metricsCopy["db1"].primaryMetrics.docUnitsRead, expectedUnits);
+}
+
+TEST_F(ResourceConsumptionMetricsTest, DocumentUnitsWritten) {
+ auto& globalResourceConsumption = ResourceConsumption::get(getServiceContext());
+ auto& operationMetrics = ResourceConsumption::MetricsCollector::get(_opCtx.get());
+
+ int expectedBytes = 0;
+ int expectedUnits = 0;
+
+ {
+ ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
+
+ // Each of these should be counted as 1 document unit (unit size = 128).
+ operationMetrics.incrementOneDocWritten(2);
+ operationMetrics.incrementOneDocWritten(4);
+ operationMetrics.incrementOneDocWritten(8);
+ operationMetrics.incrementOneDocWritten(16);
+ operationMetrics.incrementOneDocWritten(32);
+ operationMetrics.incrementOneDocWritten(64);
+ operationMetrics.incrementOneDocWritten(128);
+ expectedBytes += 2 + 4 + 8 + 16 + 32 + 64 + 128;
+ expectedUnits += 7;
+
+ // Each of these should be counted as 2 document units (unit size = 128).
+ operationMetrics.incrementOneDocWritten(129);
+ operationMetrics.incrementOneDocWritten(200);
+ operationMetrics.incrementOneDocWritten(255);
+ operationMetrics.incrementOneDocWritten(256);
+ expectedBytes += 129 + 200 + 255 + 256;
+ expectedUnits += 8;
+ }
+
+ auto metricsCopy = globalResourceConsumption.getMetrics();
+ ASSERT_EQ(metricsCopy["db1"].docBytesWritten, expectedBytes);
+ ASSERT_EQ(metricsCopy["db1"].docUnitsWritten, expectedUnits);
+}
} // namespace mongo