diff options
author | Louis Williams <louis.williams@mongodb.com> | 2020-12-07 14:56:57 -0500 |
---|---|---|
committer | Evergreen Agent <no-reply@evergreen.mongodb.com> | 2020-12-08 01:43:05 +0000 |
commit | 58febe4996944263d331c3f8deb8cefd10ace9a6 (patch) | |
tree | 079f31c9e8a81b97dd7702b4a974ba4155ee5682 /src/mongo/db/stats | |
parent | d92d9ef00751254aeec402374ba359911c3d85af (diff) | |
download | mongo-58febe4996944263d331c3f8deb8cefd10ace9a6.tar.gz |
SERVER-51030 Collect document units returned in command responses
Diffstat (limited to 'src/mongo/db/stats')
-rw-r--r-- | src/mongo/db/stats/resource_consumption_metrics.cpp | 78 | ||||
-rw-r--r-- | src/mongo/db/stats/resource_consumption_metrics.h | 125 | ||||
-rw-r--r-- | src/mongo/db/stats/resource_consumption_metrics_test.cpp | 128 |
3 files changed, 200 insertions, 131 deletions
diff --git a/src/mongo/db/stats/resource_consumption_metrics.cpp b/src/mongo/db/stats/resource_consumption_metrics.cpp index bf689584759..88d4d4cfeaf 100644 --- a/src/mongo/db/stats/resource_consumption_metrics.cpp +++ b/src/mongo/db/stats/resource_consumption_metrics.cpp @@ -116,22 +116,35 @@ ResourceConsumption::MetricsCollector& ResourceConsumption::MetricsCollector::ge return getMetricsCollector(opCtx); } +void ResourceConsumption::UnitCounter::observeOne(size_t datumBytes) { + _units += std::ceil(datumBytes / static_cast<float>(unitSize())); + _bytes += datumBytes; +} + +int ResourceConsumption::DocumentUnitCounter::unitSize() const { + return gDocumentUnitSizeBytes; +} + +int ResourceConsumption::IdxEntryUnitCounter::unitSize() const { + return gIndexEntryUnitSizeBytes; +} + void ResourceConsumption::ReadMetrics::toBson(BSONObjBuilder* builder) const { - builder->appendNumber(kDocBytesRead, docBytesRead); - builder->appendNumber(kDocUnitsRead, docUnitsRead); - builder->appendNumber(kIdxEntryBytesRead, idxEntryBytesRead); - builder->appendNumber(kIdxEntryUnitsRead, idxEntryUnitsRead); + builder->appendNumber(kDocBytesRead, docsRead.bytes()); + builder->appendNumber(kDocUnitsRead, docsRead.units()); + builder->appendNumber(kIdxEntryBytesRead, idxEntriesRead.bytes()); + builder->appendNumber(kIdxEntryUnitsRead, idxEntriesRead.units()); builder->appendNumber(kKeysSorted, keysSorted); builder->appendNumber(kSorterSpills, sorterSpills); - builder->appendNumber(kDocUnitsReturned, docUnitsReturned); + builder->appendNumber(kDocUnitsReturned, docsReturned.units()); builder->appendNumber(kCursorSeeks, cursorSeeks); } void ResourceConsumption::WriteMetrics::toBson(BSONObjBuilder* builder) const { - builder->appendNumber(kDocBytesWritten, docBytesWritten); - builder->appendNumber(kDocUnitsWritten, docUnitsWritten); - builder->appendNumber(kIdxEntryBytesWritten, idxEntryBytesWritten); - builder->appendNumber(kIdxEntryUnitsWritten, idxEntryUnitsWritten); + builder->appendNumber(kDocBytesWritten, docsWritten.bytes()); + builder->appendNumber(kDocUnitsWritten, docsWritten.units()); + builder->appendNumber(kIdxEntryBytesWritten, idxEntriesWritten.bytes()); + builder->appendNumber(kIdxEntryUnitsWritten, idxEntriesWritten.units()); } void ResourceConsumption::AggregatedMetrics::toBson(BSONObjBuilder* builder) const { @@ -160,22 +173,22 @@ void ResourceConsumption::OperationMetrics::toBson(BSONObjBuilder* builder) cons } void ResourceConsumption::OperationMetrics::toBsonNonZeroFields(BSONObjBuilder* builder) const { - appendNonZeroMetric(builder, kDocBytesRead, readMetrics.docBytesRead); - appendNonZeroMetric(builder, kDocUnitsRead, readMetrics.docUnitsRead); - appendNonZeroMetric(builder, kIdxEntryBytesRead, readMetrics.idxEntryBytesRead); - appendNonZeroMetric(builder, kIdxEntryUnitsRead, readMetrics.idxEntryUnitsRead); + appendNonZeroMetric(builder, kDocBytesRead, readMetrics.docsRead.bytes()); + appendNonZeroMetric(builder, kDocUnitsRead, readMetrics.docsRead.units()); + appendNonZeroMetric(builder, kIdxEntryBytesRead, readMetrics.idxEntriesRead.bytes()); + appendNonZeroMetric(builder, kIdxEntryUnitsRead, readMetrics.idxEntriesRead.units()); appendNonZeroMetric(builder, kKeysSorted, readMetrics.keysSorted); appendNonZeroMetric(builder, kSorterSpills, readMetrics.sorterSpills); - appendNonZeroMetric(builder, kDocUnitsReturned, readMetrics.docUnitsReturned); + appendNonZeroMetric(builder, kDocUnitsReturned, readMetrics.docsReturned.units()); appendNonZeroMetric(builder, kCursorSeeks, readMetrics.cursorSeeks); if (cpuTimer) { appendNonZeroMetric(builder, kCpuNanos, durationCount<Nanoseconds>(cpuTimer->getElapsed())); } - appendNonZeroMetric(builder, kDocBytesWritten, writeMetrics.docBytesWritten); - appendNonZeroMetric(builder, kDocUnitsWritten, writeMetrics.docUnitsWritten); - appendNonZeroMetric(builder, kIdxEntryBytesWritten, writeMetrics.idxEntryBytesWritten); - appendNonZeroMetric(builder, kIdxEntryUnitsWritten, writeMetrics.idxEntryUnitsWritten); + appendNonZeroMetric(builder, kDocBytesWritten, writeMetrics.docsWritten.bytes()); + appendNonZeroMetric(builder, kDocUnitsWritten, writeMetrics.docsWritten.units()); + appendNonZeroMetric(builder, kIdxEntryBytesWritten, writeMetrics.idxEntriesWritten.bytes()); + appendNonZeroMetric(builder, kIdxEntryUnitsWritten, writeMetrics.idxEntriesWritten.units()); } template <typename Func> @@ -187,19 +200,11 @@ inline void ResourceConsumption::MetricsCollector::_doIfCollecting(Func&& func) } void ResourceConsumption::MetricsCollector::incrementOneDocRead(size_t docBytesRead) { - _doIfCollecting([&]() { - size_t docUnits = std::ceil(docBytesRead / static_cast<float>(gDocumentUnitSizeBytes)); - _metrics.readMetrics.docBytesRead += docBytesRead; - _metrics.readMetrics.docUnitsRead += docUnits; - }); + _doIfCollecting([&]() { _metrics.readMetrics.docsRead.observeOne(docBytesRead); }); } void ResourceConsumption::MetricsCollector::incrementOneIdxEntryRead(size_t bytesRead) { - _doIfCollecting([&]() { - size_t units = std::ceil(bytesRead / static_cast<float>(gIndexEntryUnitSizeBytes)); - _metrics.readMetrics.idxEntryBytesRead += bytesRead; - _metrics.readMetrics.idxEntryUnitsRead += units; - }); + _doIfCollecting([&]() { _metrics.readMetrics.idxEntriesRead.observeOne(bytesRead); }); } void ResourceConsumption::MetricsCollector::incrementKeysSorted(size_t keysSorted) { @@ -210,24 +215,17 @@ void ResourceConsumption::MetricsCollector::incrementSorterSpills(size_t spills) _doIfCollecting([&]() { _metrics.readMetrics.sorterSpills += spills; }); } -void ResourceConsumption::MetricsCollector::incrementDocUnitsReturned(size_t returned) { - _doIfCollecting([&]() { _metrics.readMetrics.docUnitsReturned += returned; }); +void ResourceConsumption::MetricsCollector::incrementDocUnitsReturned( + DocumentUnitCounter docUnits) { + _doIfCollecting([&]() { _metrics.readMetrics.docsReturned += docUnits; }); } void ResourceConsumption::MetricsCollector::incrementOneDocWritten(size_t bytesWritten) { - _doIfCollecting([&] { - size_t docUnits = std::ceil(bytesWritten / static_cast<float>(gDocumentUnitSizeBytes)); - _metrics.writeMetrics.docBytesWritten += bytesWritten; - _metrics.writeMetrics.docUnitsWritten += docUnits; - }); + _doIfCollecting([&] { _metrics.writeMetrics.docsWritten.observeOne(bytesWritten); }); } void ResourceConsumption::MetricsCollector::incrementOneIdxEntryWritten(size_t bytesWritten) { - _doIfCollecting([&] { - size_t idxUnits = std::ceil(bytesWritten / static_cast<float>(gIndexEntryUnitSizeBytes)); - _metrics.writeMetrics.idxEntryBytesWritten += bytesWritten; - _metrics.writeMetrics.idxEntryUnitsWritten += idxUnits; - }); + _doIfCollecting([&] { _metrics.writeMetrics.idxEntriesWritten.observeOne(bytesWritten); }); } void ResourceConsumption::MetricsCollector::beginScopedCollecting(OperationContext* opCtx, diff --git a/src/mongo/db/stats/resource_consumption_metrics.h b/src/mongo/db/stats/resource_consumption_metrics.h index 3e392b7a8f1..25282410324 100644 --- a/src/mongo/db/stats/resource_consumption_metrics.h +++ b/src/mongo/db/stats/resource_consumption_metrics.h @@ -50,17 +50,80 @@ public: static ResourceConsumption& get(OperationContext* opCtx); static ResourceConsumption& get(ServiceContext* svcCtx); + /** + * UnitCounter observes individual input datums and then calculates the total number of bytes + * and whole number units observed. + */ + class UnitCounter { + public: + UnitCounter() = default; + + void add(const UnitCounter& other) { + _bytes += other._bytes; + _units += other._units; + } + + UnitCounter& operator+=(const UnitCounter& other) { + add(other); + return *this; + } + + long long bytes() const { + return _bytes; + } + long long units() const { + return _units; + } + + /** + * Call once per input datum with its size in bytes. + * + * This function calculates the number of units observed based on the implentation-specific + * unitSize(). The function uses the following formula to calculate the number of units per + * datum: + * + * units = ceil (datum bytes / unit size in bytes) + * + * This achieves the goal of counting small datums as at least one unit while ensuring + * larger units are accounted proportionately. This can result in overstating smaller datums + * when the unit size is large. This is desired behavior, and the extent to which small + * datums are overstated is tunable by the unit size of the implementor. + */ + void observeOne(size_t datumBytes); + + protected: + /** + * Returns the implementation-specific unit size. + */ + virtual int unitSize() const = 0; + + long long _bytes = 0; + long long _units = 0; + }; + + /** DocumentUnitCounter records the number of document units observed. */ + class DocumentUnitCounter : public UnitCounter { + private: + int unitSize() const final; + }; + + /** IdxEntryUnitCounter records the number of index entry units observed. */ + class IdxEntryUnitCounter : public UnitCounter { + private: + int unitSize() const final; + }; + /** ReadMetrics maintains metrics for read operations. */ class ReadMetrics { public: + ReadMetrics() = default; + void add(const ReadMetrics& other) { - docBytesRead += other.docBytesRead; - docUnitsRead += other.docUnitsRead; - idxEntryBytesRead += other.idxEntryBytesRead; - idxEntryUnitsRead += other.idxEntryUnitsRead; + docsRead += other.docsRead; + idxEntriesRead += other.idxEntriesRead; + docsReturned += other.docsReturned; keysSorted += other.keysSorted; sorterSpills += other.sorterSpills; - docUnitsReturned += other.docUnitsReturned; cursorSeeks += other.cursorSeeks; } @@ -74,20 +137,17 @@ public: */ void toBson(BSONObjBuilder* builder) const; - // Number of document bytes read - long long docBytesRead = 0; // Number of document units read - long long docUnitsRead = 0; - // Number of index entry bytes read - long long idxEntryBytesRead = 0; - // Number of index entries units read - long long idxEntryUnitsRead = 0; + DocumentUnitCounter docsRead; + // Number of index entry units read + IdxEntryUnitCounter idxEntriesRead; + // Number of document units returned by a query + DocumentUnitCounter docsReturned; + // Number of keys sorted for query operations long long keysSorted = 0; // Number of individual spills of data to disk by the sorter long long sorterSpills = 0; - // Number of document units returned by a query - long long docUnitsReturned = 0; // Number of cursor seeks long long cursorSeeks = 0; }; @@ -96,10 +156,8 @@ public: class WriteMetrics { public: void add(const WriteMetrics& other) { - docBytesWritten += other.docBytesWritten; - docUnitsWritten += other.docUnitsWritten; - idxEntryBytesWritten += other.idxEntryBytesWritten; - idxEntryUnitsWritten += other.idxEntryUnitsWritten; + docsWritten += other.docsWritten; + idxEntriesWritten += other.idxEntriesWritten; } WriteMetrics& operator+=(const WriteMetrics& other) { @@ -112,14 +170,10 @@ public: */ void toBson(BSONObjBuilder* builder) const; - // Number of document bytes written - long long docBytesWritten = 0; - // Number of document units written - long long docUnitsWritten = 0; - // Number of index entry bytes written - long long idxEntryBytesWritten = 0; - // Number of index entry units written - long long idxEntryUnitsWritten = 0; + // Number of documents written + DocumentUnitCounter docsWritten; + // Number of index entries written + IdxEntryUnitCounter idxEntriesWritten; }; /** @@ -191,11 +245,6 @@ public: public: MetricsCollector() = default; - // Delete copy constructors to prevent callers from accidentally copying when this is - // decorated on the OperationContext by reference. - MetricsCollector(const MetricsCollector&) = delete; - MetricsCollector operator=(const MetricsCollector&) = delete; - static MetricsCollector& get(OperationContext* opCtx); /** @@ -256,9 +305,7 @@ public: void reset() { invariant(!isInScope()); - _metrics = {}; - _dbName = {}; - _hasCollectedMetrics = false; + *this = {}; } /** @@ -285,7 +332,10 @@ public: */ void incrementSorterSpills(size_t spills); - void incrementDocUnitsReturned(size_t docUnitsReturned); + /** + * Increments the number of document units returned in the command response. + */ + void incrementDocUnitsReturned(DocumentUnitCounter docUnitsReturned); /** * This should be called once per document written with the number of bytes written for that @@ -310,6 +360,11 @@ public: void incrementOneCursorSeek(); private: + // Privatize copy constructors to prevent callers from accidentally copying when this is + // decorated on the OperationContext by reference. + MetricsCollector(const MetricsCollector&) = default; + MetricsCollector& operator=(const MetricsCollector&) = default; + /** * Helper function that calls the Func when this collector is currently collecting metrics. */ diff --git a/src/mongo/db/stats/resource_consumption_metrics_test.cpp b/src/mongo/db/stats/resource_consumption_metrics_test.cpp index a6ed735dff8..bcb1b5335c5 100644 --- a/src/mongo/db/stats/resource_consumption_metrics_test.cpp +++ b/src/mongo/db/stats/resource_consumption_metrics_test.cpp @@ -214,6 +214,14 @@ TEST_F(ResourceConsumptionMetricsTest, NestedScopedMetricsCollector) { ASSERT_EQ(metricsCopy.count("db2"), 0); } +namespace { +ResourceConsumption::DocumentUnitCounter makeDocUnits(size_t bytes) { + ResourceConsumption::DocumentUnitCounter docUnitsReturned; + docUnitsReturned.observeOne(bytes); + return docUnitsReturned; +} +} // namespace + TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetrics) { auto& globalResourceConsumption = ResourceConsumption::get(getServiceContext()); auto& operationMetrics = ResourceConsumption::MetricsCollector::get(_opCtx.get()); @@ -225,20 +233,21 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetrics) { operationMetrics.incrementOneIdxEntryRead(8); operationMetrics.incrementKeysSorted(16); operationMetrics.incrementSorterSpills(32); - operationMetrics.incrementDocUnitsReturned(64); + operationMetrics.incrementDocUnitsReturned(makeDocUnits(64)); operationMetrics.incrementOneCursorSeek(); } ASSERT(operationMetrics.hasCollectedMetrics()); auto metricsCopy = globalResourceConsumption.getDbMetrics(); - ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docBytesRead, 2); - ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docUnitsRead, 1); - ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.idxEntryBytesRead, 8); - ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.idxEntryUnitsRead, 1); + ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docsRead.bytes(), 2); + ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docsRead.units(), 1); + ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.idxEntriesRead.bytes(), 8); + ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.idxEntriesRead.units(), 1); ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.keysSorted, 16); ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.sorterSpills, 32); - ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docUnitsReturned, 64); + ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docsReturned.bytes(), 64); + ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docsReturned.units(), 1); ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.cursorSeeks, 1); // Clear metrics so we do not double-count. @@ -251,18 +260,19 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetrics) { operationMetrics.incrementOneIdxEntryRead(128); operationMetrics.incrementKeysSorted(256); operationMetrics.incrementSorterSpills(512); - operationMetrics.incrementDocUnitsReturned(1024); + operationMetrics.incrementDocUnitsReturned(makeDocUnits(1024)); operationMetrics.incrementOneCursorSeek(); } metricsCopy = globalResourceConsumption.getDbMetrics(); - ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docBytesRead, 2 + 32); - ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docUnitsRead, 2); - ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.idxEntryBytesRead, 8 + 128); - ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.idxEntryUnitsRead, 1 + 8); + ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docsRead.bytes(), 2 + 32); + ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docsRead.units(), 2); + ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.idxEntriesRead.bytes(), 8 + 128); + ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.idxEntriesRead.units(), 1 + 8); ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.keysSorted, 16 + 256); ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.sorterSpills, 32 + 512); - ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docUnitsReturned, 64 + 1024); + ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docsReturned.bytes(), 64 + 1024); + ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docsReturned.units(), 1 + 8); ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.cursorSeeks, 1 + 1); } @@ -280,18 +290,19 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetricsSecondary) { operationMetrics.incrementOneIdxEntryRead(8); operationMetrics.incrementKeysSorted(16); operationMetrics.incrementSorterSpills(32); - operationMetrics.incrementDocUnitsReturned(64); + operationMetrics.incrementDocUnitsReturned(makeDocUnits(64)); operationMetrics.incrementOneCursorSeek(); } auto metricsCopy = globalResourceConsumption.getDbMetrics(); - ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docBytesRead, 2); - ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docUnitsRead, 1); - ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.idxEntryBytesRead, 8); - ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.idxEntryUnitsRead, 1); + ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docsRead.bytes(), 2); + ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docsRead.units(), 1); + ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.idxEntriesRead.bytes(), 8); + ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.idxEntriesRead.units(), 1); ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.keysSorted, 16); ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.sorterSpills, 32); - ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docUnitsReturned, 64); + ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docsReturned.bytes(), 64); + ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docsReturned.units(), 1); ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.cursorSeeks, 1); // Clear metrics so we do not double-count. @@ -304,18 +315,19 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetricsSecondary) { operationMetrics.incrementOneIdxEntryRead(128); operationMetrics.incrementKeysSorted(256); operationMetrics.incrementSorterSpills(512); - operationMetrics.incrementDocUnitsReturned(1024); + operationMetrics.incrementDocUnitsReturned(makeDocUnits(1024)); operationMetrics.incrementOneCursorSeek(); } metricsCopy = globalResourceConsumption.getDbMetrics(); - ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docBytesRead, 2 + 32); - ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docUnitsRead, 2); - ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.idxEntryBytesRead, 8 + 128); - ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.idxEntryUnitsRead, 1 + 8); + ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docsRead.bytes(), 2 + 32); + ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docsRead.units(), 2); + ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.idxEntriesRead.bytes(), 8 + 128); + ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.idxEntriesRead.units(), 1 + 8); ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.keysSorted, 16 + 256); ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.sorterSpills, 32 + 512); - ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docUnitsReturned, 64 + 1024); + ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docsReturned.bytes(), 64 + 1024); + ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docsReturned.units(), 1 + 8); ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.cursorSeeks, 1 + 1); } @@ -332,7 +344,7 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetricsAcrossStates) { operationMetrics.incrementOneIdxEntryRead(8); operationMetrics.incrementKeysSorted(16); operationMetrics.incrementSorterSpills(32); - operationMetrics.incrementDocUnitsReturned(64); + operationMetrics.incrementDocUnitsReturned(makeDocUnits(64)); operationMetrics.incrementOneCursorSeek(); ASSERT_OK(repl::ReplicationCoordinator::get(_opCtx.get()) @@ -342,25 +354,27 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetricsAcrossStates) { operationMetrics.incrementOneIdxEntryRead(128); operationMetrics.incrementKeysSorted(256); operationMetrics.incrementSorterSpills(512); - operationMetrics.incrementDocUnitsReturned(1024); + operationMetrics.incrementDocUnitsReturned(makeDocUnits(1024)); operationMetrics.incrementOneCursorSeek(); } auto metricsCopy = globalResourceConsumption.getAndClearDbMetrics(); - ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docBytesRead, 0); - ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docUnitsRead, 0); - ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.idxEntryBytesRead, 0); - ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.idxEntryUnitsRead, 0); + ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docsRead.bytes(), 0); + ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docsRead.units(), 0); + ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.idxEntriesRead.bytes(), 0); + ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.idxEntriesRead.units(), 0); ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.keysSorted, 0); - ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docUnitsReturned, 0); + ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docsReturned.bytes(), 0); + ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docsReturned.units(), 0); ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.cursorSeeks, 0); - ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docBytesRead, 2 + 32); - ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docUnitsRead, 2); - ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.idxEntryBytesRead, 8 + 128); - ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.idxEntryUnitsRead, 1 + 8); + ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docsRead.bytes(), 2 + 32); + ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docsRead.units(), 2); + ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.idxEntriesRead.bytes(), 8 + 128); + ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.idxEntriesRead.units(), 1 + 8); ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.keysSorted, 16 + 256); ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.sorterSpills, 32 + 512); - ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docUnitsReturned, 64 + 1024); + ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docsReturned.bytes(), 64 + 1024); + ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docsReturned.units(), 1 + 8); ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.cursorSeeks, 1 + 1); operationMetrics.reset(); @@ -374,7 +388,7 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetricsAcrossStates) { operationMetrics.incrementOneIdxEntryRead(8); operationMetrics.incrementKeysSorted(16); operationMetrics.incrementSorterSpills(32); - operationMetrics.incrementDocUnitsReturned(64); + operationMetrics.incrementDocUnitsReturned(makeDocUnits(64)); operationMetrics.incrementOneCursorSeek(); ASSERT_OK(repl::ReplicationCoordinator::get(_opCtx.get()) @@ -384,26 +398,28 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetricsAcrossStates) { operationMetrics.incrementOneIdxEntryRead(128); operationMetrics.incrementKeysSorted(256); operationMetrics.incrementSorterSpills(512); - operationMetrics.incrementDocUnitsReturned(1024); + operationMetrics.incrementDocUnitsReturned(makeDocUnits(1024)); operationMetrics.incrementOneCursorSeek(); } metricsCopy = globalResourceConsumption.getAndClearDbMetrics(); - ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docBytesRead, 2 + 32); - ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docUnitsRead, 2); - ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.idxEntryBytesRead, 8 + 128); - ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.idxEntryUnitsRead, 1 + 8); + ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docsRead.bytes(), 2 + 32); + ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docsRead.units(), 2); + ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.idxEntriesRead.bytes(), 8 + 128); + ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.idxEntriesRead.units(), 1 + 8); ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.keysSorted, 16 + 256); ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.sorterSpills, 32 + 512); - ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docUnitsReturned, 64 + 1024); + ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docsReturned.bytes(), 64 + 1024); + ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docsReturned.units(), 1 + 8); ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.cursorSeeks, 1 + 1); - ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docBytesRead, 0); - ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docUnitsRead, 0); - ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.idxEntryBytesRead, 0); - ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.idxEntryUnitsRead, 0); + ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docsRead.bytes(), 0); + ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docsRead.units(), 0); + ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.idxEntriesRead.bytes(), 0); + ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.idxEntriesRead.units(), 0); ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.keysSorted, 0); ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.sorterSpills, 0); - ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docUnitsReturned, 0); + ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docsReturned.bytes(), 0); + ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docsReturned.units(), 0); ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.cursorSeeks, 0); } @@ -438,8 +454,8 @@ TEST_F(ResourceConsumptionMetricsTest, DocumentUnitsRead) { } auto metricsCopy = globalResourceConsumption.getDbMetrics(); - ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docBytesRead, expectedBytes); - ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docUnitsRead, expectedUnits); + ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docsRead.bytes(), expectedBytes); + ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docsRead.units(), expectedUnits); } TEST_F(ResourceConsumptionMetricsTest, DocumentUnitsWritten) { @@ -473,8 +489,8 @@ TEST_F(ResourceConsumptionMetricsTest, DocumentUnitsWritten) { } auto metricsCopy = globalResourceConsumption.getDbMetrics(); - ASSERT_EQ(metricsCopy["db1"].writeMetrics.docBytesWritten, expectedBytes); - ASSERT_EQ(metricsCopy["db1"].writeMetrics.docUnitsWritten, expectedUnits); + ASSERT_EQ(metricsCopy["db1"].writeMetrics.docsWritten.bytes(), expectedBytes); + ASSERT_EQ(metricsCopy["db1"].writeMetrics.docsWritten.units(), expectedUnits); } TEST_F(ResourceConsumptionMetricsTest, IdxEntryUnitsRead) { @@ -522,8 +538,8 @@ TEST_F(ResourceConsumptionMetricsTest, IdxEntryUnitsRead) { } auto metricsCopy = globalResourceConsumption.getDbMetrics(); - ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.idxEntryBytesRead, expectedBytes); - ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.idxEntryUnitsRead, expectedUnits); + ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.idxEntriesRead.bytes(), expectedBytes); + ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.idxEntriesRead.units(), expectedUnits); } TEST_F(ResourceConsumptionMetricsTest, IdxEntryUnitsWritten) { @@ -571,8 +587,8 @@ TEST_F(ResourceConsumptionMetricsTest, IdxEntryUnitsWritten) { } auto metricsCopy = globalResourceConsumption.getDbMetrics(); - ASSERT_EQ(metricsCopy["db1"].writeMetrics.idxEntryBytesWritten, expectedBytes); - ASSERT_EQ(metricsCopy["db1"].writeMetrics.idxEntryUnitsWritten, expectedUnits); + ASSERT_EQ(metricsCopy["db1"].writeMetrics.idxEntriesWritten.bytes(), expectedBytes); + ASSERT_EQ(metricsCopy["db1"].writeMetrics.idxEntriesWritten.units(), expectedUnits); } TEST_F(ResourceConsumptionMetricsTest, CpuNanos) { |