summaryrefslogtreecommitdiff
path: root/src/mongo
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo')
-rw-r--r--src/mongo/db/stats/operation_resource_consumption.idl12
-rw-r--r--src/mongo/db/stats/resource_consumption_metrics.cpp31
-rw-r--r--src/mongo/db/stats/resource_consumption_metrics.h19
-rw-r--r--src/mongo/db/stats/resource_consumption_metrics_test.cpp93
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp24
5 files changed, 129 insertions, 50 deletions
diff --git a/src/mongo/db/stats/operation_resource_consumption.idl b/src/mongo/db/stats/operation_resource_consumption.idl
index c6c95891493..63e71cee4f8 100644
--- a/src/mongo/db/stats/operation_resource_consumption.idl
+++ b/src/mongo/db/stats/operation_resource_consumption.idl
@@ -36,7 +36,7 @@ feature_flags:
default: false
server_parameters:
- aggregateOperationResourceConsumptionMetrics:
+ aggregateOperationResourceConsumptionMetrics:
description: "When true, globally aggregates per-operation resource consumption metrics.
Requires measureOperationResourceConsumption to also be set"
set_at:
@@ -44,3 +44,13 @@ server_parameters:
cpp_varname: gAggregateOperationResourceConsumptionMetrics
cpp_vartype: bool
default: false
+
+ documentUnitSizeBytes:
+ description: "The size of a document unit in bytes for resource consumption metrics collection"
+ set_at:
+ - startup
+ cpp_varname: gDocumentUnitSizeBytes
+ cpp_vartype: int32_t
+ default: 128
+ validator:
+ gte: 1 \ No newline at end of file
diff --git a/src/mongo/db/stats/resource_consumption_metrics.cpp b/src/mongo/db/stats/resource_consumption_metrics.cpp
index 5d15e8cd68e..91f3a6d980b 100644
--- a/src/mongo/db/stats/resource_consumption_metrics.cpp
+++ b/src/mongo/db/stats/resource_consumption_metrics.cpp
@@ -29,6 +29,8 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kControl
+#include <cmath>
+
#include "mongo/db/stats/resource_consumption_metrics.h"
#include "mongo/db/repl/replication_coordinator.h"
@@ -160,16 +162,15 @@ void ResourceConsumption::MetricsCollector::_updateReadMetrics(OperationContext*
});
}
-void ResourceConsumption::MetricsCollector::incrementDocBytesRead(OperationContext* opCtx,
- size_t docBytesRead) {
- _updateReadMetrics(opCtx,
- [&](ReadMetrics& readMetrics) { readMetrics.docBytesRead += docBytesRead; });
-}
-void ResourceConsumption::MetricsCollector::incrementDocUnitsRead(OperationContext* opCtx,
- size_t docUnitsRead) {
- _updateReadMetrics(opCtx,
- [&](ReadMetrics& readMetrics) { readMetrics.docUnitsRead += docUnitsRead; });
+void ResourceConsumption::MetricsCollector::incrementOneDocRead(OperationContext* opCtx,
+ size_t docBytesRead) {
+ _updateReadMetrics(opCtx, [&](ReadMetrics& readMetrics) {
+ size_t docUnits = std::ceil(docBytesRead / static_cast<float>(gDocumentUnitSizeBytes));
+ readMetrics.docBytesRead += docBytesRead;
+ readMetrics.docUnitsRead += docUnits;
+ });
}
+
void ResourceConsumption::MetricsCollector::incrementIdxEntriesRead(OperationContext* opCtx,
size_t idxEntriesRead) {
_updateReadMetrics(
@@ -181,12 +182,12 @@ void ResourceConsumption::MetricsCollector::incrementKeysSorted(OperationContext
[&](ReadMetrics& readMetrics) { readMetrics.keysSorted += keysSorted; });
}
-void ResourceConsumption::MetricsCollector::incrementDocBytesWritten(size_t bytesWritten) {
- _doIfCollecting([&] { _metrics.docBytesWritten += bytesWritten; });
-}
-
-void ResourceConsumption::MetricsCollector::incrementDocUnitsWritten(size_t unitsWritten) {
- _doIfCollecting([&] { _metrics.docUnitsWritten += unitsWritten; });
+void ResourceConsumption::MetricsCollector::incrementOneDocWritten(size_t bytesWritten) {
+ _doIfCollecting([&] {
+ size_t docUnits = std::ceil(bytesWritten / static_cast<float>(gDocumentUnitSizeBytes));
+ _metrics.docBytesWritten += bytesWritten;
+ _metrics.docUnitsWritten += docUnits;
+ });
}
void ResourceConsumption::MetricsCollector::incrementCpuMillis(size_t cpuMillis) {
diff --git a/src/mongo/db/stats/resource_consumption_metrics.h b/src/mongo/db/stats/resource_consumption_metrics.h
index f665588e8be..aa2221b6dea 100644
--- a/src/mongo/db/stats/resource_consumption_metrics.h
+++ b/src/mongo/db/stats/resource_consumption_metrics.h
@@ -215,21 +215,22 @@ public:
}
/**
- * These setters are replication-state aware and increment the desired metrics based on the
- * current replication state. This is a no-op when metrics collection is disabled on this
- * operation.
+ * This should be called once per document read with the number of bytes read for that
+ * document. This is replication-state aware and increments the metric based on the current
+ * replication state. This is a no-op when metrics collection is disabled on this operation.
*/
- void incrementDocBytesRead(OperationContext* opCtx, size_t docBytesRead);
- void incrementDocUnitsRead(OperationContext* opCtx, size_t docUnitsRead);
+ void incrementOneDocRead(OperationContext* opCtx, size_t docBytesRead);
+
void incrementIdxEntriesRead(OperationContext* opCtx, size_t idxEntriesRead);
void incrementKeysSorted(OperationContext* opCtx, size_t keysSorted);
/**
- * These setters increment the desired metrics independent of replication state, and only
- * when metrics collection is enabled for this operation.
+ * This should be called once per document written with the number of bytes written for that
+ * document. This increments the metric independent of replication state, and only when
+ * metrics collection is enabled for this operation.
*/
- void incrementDocBytesWritten(size_t docBytesWritten);
- void incrementDocUnitsWritten(size_t docUnitsWitten);
+ void incrementOneDocWritten(size_t docBytesWritten);
+
void incrementCpuMillis(size_t cpuMillis);
void incrementDocUnitsReturned(size_t docUnitsReturned);
diff --git a/src/mongo/db/stats/resource_consumption_metrics_test.cpp b/src/mongo/db/stats/resource_consumption_metrics_test.cpp
index 2f88f0072cc..8f34be4f2b3 100644
--- a/src/mongo/db/stats/resource_consumption_metrics_test.cpp
+++ b/src/mongo/db/stats/resource_consumption_metrics_test.cpp
@@ -47,7 +47,7 @@ ServerParameter* getServerParameter(const std::string& name) {
ASSERT(sp);
return sp;
}
-
+} // namespace
class ResourceConsumptionMetricsTest : public ServiceContextTest {
public:
@@ -55,6 +55,7 @@ public:
_opCtx = makeOperationContext();
ASSERT_OK(getServerParameter("measureOperationResourceConsumption")->setFromString("true"));
gAggregateOperationResourceConsumptionMetrics = true;
+ gDocumentUnitSizeBytes = 128;
auto svcCtx = getServiceContext();
auto replCoord = std::make_unique<repl::ReplicationCoordinatorMock>(svcCtx);
@@ -215,8 +216,7 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetrics) {
{
ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
- operationMetrics.incrementDocBytesRead(_opCtx.get(), 2);
- operationMetrics.incrementDocUnitsRead(_opCtx.get(), 4);
+ operationMetrics.incrementOneDocRead(_opCtx.get(), 2);
operationMetrics.incrementIdxEntriesRead(_opCtx.get(), 8);
operationMetrics.incrementKeysSorted(_opCtx.get(), 16);
}
@@ -225,7 +225,7 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetrics) {
auto metricsCopy = globalResourceConsumption.getMetrics();
ASSERT_EQ(metricsCopy["db1"].primaryMetrics.docBytesRead, 2);
- ASSERT_EQ(metricsCopy["db1"].primaryMetrics.docUnitsRead, 4);
+ ASSERT_EQ(metricsCopy["db1"].primaryMetrics.docUnitsRead, 1);
ASSERT_EQ(metricsCopy["db1"].primaryMetrics.idxEntriesRead, 8);
ASSERT_EQ(metricsCopy["db1"].primaryMetrics.keysSorted, 16);
@@ -235,15 +235,14 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetrics) {
{
ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
- operationMetrics.incrementDocBytesRead(_opCtx.get(), 32);
- operationMetrics.incrementDocUnitsRead(_opCtx.get(), 64);
+ operationMetrics.incrementOneDocRead(_opCtx.get(), 32);
operationMetrics.incrementIdxEntriesRead(_opCtx.get(), 128);
operationMetrics.incrementKeysSorted(_opCtx.get(), 256);
}
metricsCopy = globalResourceConsumption.getMetrics();
ASSERT_EQ(metricsCopy["db1"].primaryMetrics.docBytesRead, 2 + 32);
- ASSERT_EQ(metricsCopy["db1"].primaryMetrics.docUnitsRead, 4 + 64);
+ ASSERT_EQ(metricsCopy["db1"].primaryMetrics.docUnitsRead, 2);
ASSERT_EQ(metricsCopy["db1"].primaryMetrics.idxEntriesRead, 8 + 128);
ASSERT_EQ(metricsCopy["db1"].primaryMetrics.keysSorted, 16 + 256);
}
@@ -258,15 +257,14 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetricsSecondary) {
{
ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
- operationMetrics.incrementDocBytesRead(_opCtx.get(), 2);
- operationMetrics.incrementDocUnitsRead(_opCtx.get(), 4);
+ operationMetrics.incrementOneDocRead(_opCtx.get(), 2);
operationMetrics.incrementIdxEntriesRead(_opCtx.get(), 8);
operationMetrics.incrementKeysSorted(_opCtx.get(), 16);
}
auto metricsCopy = globalResourceConsumption.getMetrics();
ASSERT_EQ(metricsCopy["db1"].secondaryMetrics.docBytesRead, 2);
- ASSERT_EQ(metricsCopy["db1"].secondaryMetrics.docUnitsRead, 4);
+ ASSERT_EQ(metricsCopy["db1"].secondaryMetrics.docUnitsRead, 1);
ASSERT_EQ(metricsCopy["db1"].secondaryMetrics.idxEntriesRead, 8);
ASSERT_EQ(metricsCopy["db1"].secondaryMetrics.keysSorted, 16);
@@ -276,18 +274,85 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetricsSecondary) {
{
ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
- operationMetrics.incrementDocBytesRead(_opCtx.get(), 32);
- operationMetrics.incrementDocUnitsRead(_opCtx.get(), 64);
+ operationMetrics.incrementOneDocRead(_opCtx.get(), 32);
operationMetrics.incrementIdxEntriesRead(_opCtx.get(), 128);
operationMetrics.incrementKeysSorted(_opCtx.get(), 256);
}
metricsCopy = globalResourceConsumption.getMetrics();
ASSERT_EQ(metricsCopy["db1"].secondaryMetrics.docBytesRead, 2 + 32);
- ASSERT_EQ(metricsCopy["db1"].secondaryMetrics.docUnitsRead, 4 + 64);
+ ASSERT_EQ(metricsCopy["db1"].secondaryMetrics.docUnitsRead, 2);
ASSERT_EQ(metricsCopy["db1"].secondaryMetrics.idxEntriesRead, 8 + 128);
ASSERT_EQ(metricsCopy["db1"].secondaryMetrics.keysSorted, 16 + 256);
}
-} // namespace
+TEST_F(ResourceConsumptionMetricsTest, DocumentUnitsRead) {
+ auto& globalResourceConsumption = ResourceConsumption::get(getServiceContext());
+ auto& operationMetrics = ResourceConsumption::MetricsCollector::get(_opCtx.get());
+
+ int expectedBytes = 0;
+ int expectedUnits = 0;
+
+ {
+ ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
+
+ // Each of these should be counted as 1 document unit (unit size = 128).
+ operationMetrics.incrementOneDocRead(_opCtx.get(), 2);
+ operationMetrics.incrementOneDocRead(_opCtx.get(), 4);
+ operationMetrics.incrementOneDocRead(_opCtx.get(), 8);
+ operationMetrics.incrementOneDocRead(_opCtx.get(), 16);
+ operationMetrics.incrementOneDocRead(_opCtx.get(), 32);
+ operationMetrics.incrementOneDocRead(_opCtx.get(), 64);
+ operationMetrics.incrementOneDocRead(_opCtx.get(), 128);
+ expectedBytes += 2 + 4 + 8 + 16 + 32 + 64 + 128;
+ expectedUnits += 7;
+
+ // Each of these should be counted as 2 document units (unit size = 128).
+ operationMetrics.incrementOneDocRead(_opCtx.get(), 129);
+ operationMetrics.incrementOneDocRead(_opCtx.get(), 200);
+ operationMetrics.incrementOneDocRead(_opCtx.get(), 255);
+ operationMetrics.incrementOneDocRead(_opCtx.get(), 256);
+ expectedBytes += 129 + 200 + 255 + 256;
+ expectedUnits += 8;
+ }
+
+ auto metricsCopy = globalResourceConsumption.getMetrics();
+ ASSERT_EQ(metricsCopy["db1"].primaryMetrics.docBytesRead, expectedBytes);
+ ASSERT_EQ(metricsCopy["db1"].primaryMetrics.docUnitsRead, expectedUnits);
+}
+
+TEST_F(ResourceConsumptionMetricsTest, DocumentUnitsWritten) {
+ auto& globalResourceConsumption = ResourceConsumption::get(getServiceContext());
+ auto& operationMetrics = ResourceConsumption::MetricsCollector::get(_opCtx.get());
+
+ int expectedBytes = 0;
+ int expectedUnits = 0;
+
+ {
+ ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
+
+ // Each of these should be counted as 1 document unit (unit size = 128).
+ operationMetrics.incrementOneDocWritten(2);
+ operationMetrics.incrementOneDocWritten(4);
+ operationMetrics.incrementOneDocWritten(8);
+ operationMetrics.incrementOneDocWritten(16);
+ operationMetrics.incrementOneDocWritten(32);
+ operationMetrics.incrementOneDocWritten(64);
+ operationMetrics.incrementOneDocWritten(128);
+ expectedBytes += 2 + 4 + 8 + 16 + 32 + 64 + 128;
+ expectedUnits += 7;
+
+ // Each of these should be counted as 2 document units (unit size = 128).
+ operationMetrics.incrementOneDocWritten(129);
+ operationMetrics.incrementOneDocWritten(200);
+ operationMetrics.incrementOneDocWritten(255);
+ operationMetrics.incrementOneDocWritten(256);
+ expectedBytes += 129 + 200 + 255 + 256;
+ expectedUnits += 8;
+ }
+
+ auto metricsCopy = globalResourceConsumption.getMetrics();
+ ASSERT_EQ(metricsCopy["db1"].docBytesWritten, expectedBytes);
+ ASSERT_EQ(metricsCopy["db1"].docUnitsWritten, expectedUnits);
+}
} // namespace mongo
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
index e0d9b0750e3..5504d5d846d 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
@@ -671,7 +671,7 @@ public:
invariantWTOK(_cursor->get_value(_cursor, &value));
auto& metricsCollector = ResourceConsumption::MetricsCollector::get(_opCtx);
- metricsCollector.incrementDocBytesRead(_opCtx, value.size);
+ metricsCollector.incrementOneDocRead(_opCtx, value.size);
return {{id, {static_cast<const char*>(value.data), static_cast<int>(value.size)}}};
}
@@ -1033,7 +1033,7 @@ bool WiredTigerRecordStore::findRecord(OperationContext* opCtx,
*out = _getData(curwrap);
auto& metricsCollector = ResourceConsumption::MetricsCollector::get(opCtx);
- metricsCollector.incrementDocBytesRead(opCtx, out->size());
+ metricsCollector.incrementOneDocRead(opCtx, out->size());
return true;
}
@@ -1066,7 +1066,7 @@ void WiredTigerRecordStore::deleteRecord(OperationContext* opCtx, const RecordId
invariantWTOK(ret);
auto& metricsCollector = ResourceConsumption::MetricsCollector::get(opCtx);
- metricsCollector.incrementDocBytesWritten(old_length);
+ metricsCollector.incrementOneDocWritten(old_length);
_changeNumRecords(opCtx, -1);
_increaseDataSize(opCtx, -old_length);
@@ -1509,14 +1509,16 @@ Status WiredTigerRecordStore::_insertRecords(OperationContext* opCtx,
int ret = WT_OP_CHECK(wiredTigerCursorInsert(opCtx, c));
if (ret)
return wtRCToStatus(ret, "WiredTigerRecordStore::insertRecord");
+
+ // Increment metrics for each insert separately, as opposed to outside of the loop. The API
+ // requires that each record be accounted for separately.
+ auto& metricsCollector = ResourceConsumption::MetricsCollector::get(opCtx);
+ metricsCollector.incrementOneDocWritten(value.size);
}
_changeNumRecords(opCtx, nRecords);
_increaseDataSize(opCtx, totalLength);
- auto& metricsCollector = ResourceConsumption::MetricsCollector::get(opCtx);
- metricsCollector.incrementDocBytesWritten(totalLength);
-
if (_oplogStones) {
_oplogStones->updateCurrentStoneAfterInsertOnCommit(
opCtx, totalLength, highestIdRecord, nRecords);
@@ -1654,7 +1656,7 @@ Status WiredTigerRecordStore::updateRecord(OperationContext* opCtx,
// are inserting (data.size).
modifiedDataSize += entries[i].size + entries[i].data.size;
};
- metricsCollector.incrementDocBytesWritten(modifiedDataSize);
+ metricsCollector.incrementOneDocWritten(modifiedDataSize);
WT_ITEM new_value;
dassert(nentries == 0 ||
@@ -1669,7 +1671,7 @@ Status WiredTigerRecordStore::updateRecord(OperationContext* opCtx,
if (!skip_update) {
c->set_value(c, value.Get());
ret = WT_OP_CHECK(wiredTigerCursorInsert(opCtx, c));
- metricsCollector.incrementDocBytesWritten(value.size);
+ metricsCollector.incrementOneDocWritten(value.size);
}
invariantWTOK(ret);
@@ -1721,7 +1723,7 @@ StatusWith<RecordData> WiredTigerRecordStore::updateWithDamages(
invariantWTOK(WT_OP_CHECK(wiredTigerCursorModify(opCtx, c, entries.data(), nentries)));
auto& metricsCollector = ResourceConsumption::MetricsCollector::get(opCtx);
- metricsCollector.incrementDocBytesWritten(modifiedDataSize);
+ metricsCollector.incrementOneDocWritten(modifiedDataSize);
WT_ITEM value;
invariantWTOK(c->get_value(c, &value));
@@ -2231,7 +2233,7 @@ boost::optional<Record> WiredTigerRecordStoreCursorBase::next() {
invariantWTOK(c->get_value(c, &value));
auto& metricsCollector = ResourceConsumption::MetricsCollector::get(_opCtx);
- metricsCollector.incrementDocBytesRead(_opCtx, value.size);
+ metricsCollector.incrementOneDocRead(_opCtx, value.size);
_lastReturnedId = id;
return {{id, {static_cast<const char*>(value.data), static_cast<int>(value.size)}}};
@@ -2265,7 +2267,7 @@ boost::optional<Record> WiredTigerRecordStoreCursorBase::seekExact(const RecordI
invariantWTOK(c->get_value(c, &value));
auto& metricsCollector = ResourceConsumption::MetricsCollector::get(_opCtx);
- metricsCollector.incrementDocBytesRead(_opCtx, value.size);
+ metricsCollector.incrementOneDocRead(_opCtx, value.size);
_lastReturnedId = id;
_eof = false;