summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLouis Williams <louis.williams@mongodb.com>2020-12-07 14:56:57 -0500
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-12-08 01:43:05 +0000
commit58febe4996944263d331c3f8deb8cefd10ace9a6 (patch)
tree079f31c9e8a81b97dd7702b4a974ba4155ee5682
parentd92d9ef00751254aeec402374ba359911c3d85af (diff)
downloadmongo-58febe4996944263d331c3f8deb8cefd10ace9a6.tar.gz
SERVER-51030 Collect document units returned in command responses
-rw-r--r--jstests/noPassthrough/profile_operation_metrics.js77
-rw-r--r--src/mongo/db/commands/find_and_modify.cpp17
-rw-r--r--src/mongo/db/commands/find_cmd.cpp8
-rw-r--r--src/mongo/db/commands/getmore_cmd.cpp14
-rw-r--r--src/mongo/db/commands/run_aggregate.cpp6
-rw-r--r--src/mongo/db/query/find.cpp20
-rw-r--r--src/mongo/db/stats/resource_consumption_metrics.cpp78
-rw-r--r--src/mongo/db/stats/resource_consumption_metrics.h125
-rw-r--r--src/mongo/db/stats/resource_consumption_metrics_test.cpp128
9 files changed, 337 insertions, 136 deletions
diff --git a/jstests/noPassthrough/profile_operation_metrics.js b/jstests/noPassthrough/profile_operation_metrics.js
index 91e1fa2e04b..f6b6bc68dd0 100644
--- a/jstests/noPassthrough/profile_operation_metrics.js
+++ b/jstests/noPassthrough/profile_operation_metrics.js
@@ -121,6 +121,7 @@ const operations = [
assert.gt(profileDoc.cursorSeeks, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
+ assert.eq(profileDoc.docUnitsReturned, 0);
}
},
{
@@ -143,6 +144,7 @@ const operations = [
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
+ assert.eq(profileDoc.docUnitsReturned, 0);
}
},
{
@@ -164,6 +166,7 @@ const operations = [
assert.eq(profileDoc.idxEntryUnitsWritten, 2);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
+ assert.eq(profileDoc.docUnitsReturned, 0);
}
},
{
@@ -196,6 +199,7 @@ const operations = [
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
+ assert.eq(profileDoc.docUnitsReturned, 1);
}
},
{
@@ -217,6 +221,7 @@ const operations = [
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
+ assert.eq(profileDoc.docUnitsReturned, 1);
}
},
{
@@ -238,6 +243,7 @@ const operations = [
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
+ assert.eq(profileDoc.docUnitsReturned, 1);
}
},
{
@@ -259,10 +265,11 @@ const operations = [
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
+ assert.eq(profileDoc.docUnitsReturned, 0);
}
},
{
- name: 'findAndModify',
+ name: 'findAndModifyUpdate',
command: (db) => {
assert(db[collName].findAndModify({query: {_id: 1}, update: {$set: {a: 1}}}));
},
@@ -290,6 +297,7 @@ const operations = [
assert.eq(profileDoc.idxEntryUnitsWritten, 2);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
+ assert.eq(profileDoc.docUnitsReturned, 1);
}
},
{
@@ -321,6 +329,7 @@ const operations = [
assert.eq(profileDoc.idxEntryUnitsWritten, 2);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
+ assert.eq(profileDoc.docUnitsReturned, 0);
}
},
{
@@ -339,6 +348,7 @@ const operations = [
assert.eq(profileDoc.docBytesWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.docUnitsReturned, 0);
}
},
{
@@ -360,6 +370,7 @@ const operations = [
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
+ assert.eq(profileDoc.docUnitsReturned, 0);
}
},
// Clear the profile collection so we can easily identify new operations with similar filters as
@@ -381,6 +392,7 @@ const operations = [
assert.eq(profileDoc.docBytesWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.docUnitsReturned, 0);
}
},
{
@@ -401,6 +413,7 @@ const operations = [
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
+ assert.eq(profileDoc.docUnitsReturned, 0);
}
},
{
@@ -422,6 +435,7 @@ const operations = [
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
+ assert.eq(profileDoc.docUnitsReturned, 0);
}
},
resetProfileColl,
@@ -455,6 +469,38 @@ const operations = [
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
+ assert.eq(profileDoc.docUnitsReturned, 1);
+ }
+ },
+ resetProfileColl,
+ {
+ name: 'findAndModifyRemove',
+ command: (db) => {
+ assert.commandWorked(db[collName].insert({_id: 3, a: 0}));
+ assert(db[collName].findAndModify({query: {_id: 3}, remove: true}));
+ },
+ profileFilter: {op: 'command', 'command.findandmodify': collName},
+ profileAssert: (db, profileDoc) => {
+ // Should read exactly as many bytes are in the document. Debug builds may perform extra
+ // reads of the _mdb_catalog.
+ if (!isDebugBuild(db)) {
+ assert.eq(profileDoc.docBytesRead, 29);
+ assert.eq(profileDoc.docUnitsRead, 1);
+ assert.eq(profileDoc.cursorSeeks, 3);
+ } else {
+ assert.gte(profileDoc.docBytesRead, 29);
+ assert.gte(profileDoc.docUnitsRead, 1);
+ assert.gte(profileDoc.cursorSeeks, 3);
+ }
+ assert.eq(profileDoc.idxEntryBytesRead, 3);
+ assert.eq(profileDoc.idxEntryUnitsRead, 1);
+ assert.eq(profileDoc.docBytesWritten, 29);
+ assert.eq(profileDoc.docUnitsWritten, 1);
+ assert.eq(profileDoc.idxEntryBytesWritten, 3);
+ assert.eq(profileDoc.idxEntryUnitsWritten, 1);
+ assert.eq(profileDoc.keysSorted, 0);
+ assert.eq(profileDoc.sorterSpills, 0);
+ assert.eq(profileDoc.docUnitsReturned, 1);
}
},
{
@@ -482,6 +528,7 @@ const operations = [
assert.eq(profileDoc.idxEntryUnitsWritten, 1);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
+ assert.eq(profileDoc.docUnitsReturned, 0);
}
},
{
@@ -507,6 +554,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 1);
assert.eq(profileDoc.idxEntryBytesWritten, 3);
assert.eq(profileDoc.idxEntryUnitsWritten, 1);
+ assert.eq(profileDoc.docUnitsReturned, 0);
}
},
{
@@ -528,6 +576,7 @@ const operations = [
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
+ assert.eq(profileDoc.docUnitsReturned, 0);
}
},
resetProfileColl,
@@ -555,6 +604,7 @@ const operations = [
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
+ assert.eq(profileDoc.docUnitsReturned, 5);
}
},
resetProfileColl,
@@ -578,6 +628,7 @@ const operations = [
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 150);
assert.eq(profileDoc.sorterSpills, 0);
+ assert.eq(profileDoc.docUnitsReturned, 10);
}
},
{
@@ -602,6 +653,7 @@ const operations = [
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
+ assert.eq(profileDoc.docUnitsReturned, 0);
}
},
resetProfileColl,
@@ -626,6 +678,7 @@ const operations = [
assert.eq(profileDoc.idxEntryUnitsWritten, 2);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
+ assert.eq(profileDoc.docUnitsReturned, 0);
}
},
resetProfileColl,
@@ -654,6 +707,7 @@ const operations = [
assert.eq(profileDoc.idxEntryUnitsWritten, 1);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
+ assert.eq(profileDoc.docUnitsReturned, 0);
}
},
{
@@ -691,6 +745,7 @@ const operations = [
assert.eq(profileDoc.idxEntryUnitsWritten, 2);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
+ assert.eq(profileDoc.docUnitsReturned, 0);
}
},
{
@@ -734,6 +789,7 @@ const operations = [
assert.eq(profileDoc.idxEntryUnitsWritten, 2);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
+ assert.eq(profileDoc.docUnitsReturned, 0);
}
},
{
@@ -770,6 +826,7 @@ const operations = [
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
+ assert.eq(profileDoc.docUnitsReturned, 0);
}
},
resetProfileColl,
@@ -795,6 +852,7 @@ const operations = [
assert.eq(profileDoc.idxEntryUnitsWritten, 1);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
+ assert.eq(profileDoc.docUnitsReturned, 0);
}
},
{
@@ -817,6 +875,7 @@ const operations = [
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
+ assert.eq(profileDoc.docUnitsReturned, 1);
}
},
resetProfileColl,
@@ -849,6 +908,7 @@ const operations = [
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
+ assert.eq(profileDoc.docUnitsReturned, 1);
}
},
{
@@ -875,6 +935,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 1);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.docUnitsReturned, 0);
}
},
{
@@ -903,6 +964,7 @@ const operations = [
assert.eq(profileDoc.idxEntryUnitsWritten, 1);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
+ assert.eq(profileDoc.docUnitsReturned, 0);
}
},
resetProfileColl,
@@ -930,6 +992,7 @@ const operations = [
assert.eq(profileDoc.idxEntryUnitsWritten, 100);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
+ assert.eq(profileDoc.docUnitsReturned, 0);
}
},
{
@@ -953,6 +1016,7 @@ const operations = [
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 100);
assert.eq(profileDoc.sorterSpills, 0);
+ assert.eq(profileDoc.docUnitsReturned, 100);
},
},
resetProfileColl,
@@ -976,6 +1040,7 @@ const operations = [
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 100);
assert.eq(profileDoc.sorterSpills, 0);
+ assert.eq(profileDoc.docUnitsReturned, 100);
},
},
{
@@ -997,6 +1062,7 @@ const operations = [
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 100);
assert.eq(profileDoc.sorterSpills, 0);
+ assert.eq(profileDoc.docUnitsReturned, 100);
},
},
resetProfileColl,
@@ -1019,6 +1085,7 @@ const operations = [
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 100);
assert.eq(profileDoc.sorterSpills, 0);
+ assert.eq(profileDoc.docUnitsReturned, 1);
},
},
resetProfileColl,
@@ -1041,6 +1108,7 @@ const operations = [
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 100);
assert.eq(profileDoc.sorterSpills, 0);
+ assert.eq(profileDoc.docUnitsReturned, 5);
},
},
resetProfileColl,
@@ -1075,6 +1143,7 @@ const operations = [
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 100);
assert.eq(profileDoc.sorterSpills, 101);
+ assert.eq(profileDoc.docUnitsReturned, 100);
},
},
resetProfileColl,
@@ -1109,6 +1178,7 @@ const operations = [
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
}
+ assert.eq(profileDoc.docUnitsReturned, 10);
},
},
resetProfileColl,
@@ -1132,6 +1202,7 @@ const operations = [
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 100);
assert.eq(profileDoc.sorterSpills, 0);
+ assert.eq(profileDoc.docUnitsReturned, 10);
},
},
{
@@ -1153,6 +1224,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 1);
assert.eq(profileDoc.idxEntryBytesWritten, 2);
assert.eq(profileDoc.idxEntryUnitsWritten, 1);
+ assert.eq(profileDoc.docUnitsReturned, 0);
}
},
resetProfileColl,
@@ -1177,6 +1249,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 9);
assert.eq(profileDoc.idxEntryBytesWritten, 27);
assert.eq(profileDoc.idxEntryUnitsWritten, 9);
+ assert.eq(profileDoc.docUnitsReturned, 0);
}
},
resetProfileColl,
@@ -1207,6 +1280,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 2);
assert.eq(profileDoc.idxEntryBytesWritten, 5);
assert.eq(profileDoc.idxEntryUnitsWritten, 2);
+ assert.eq(profileDoc.docUnitsReturned, 0);
}
},
resetProfileColl,
@@ -1243,6 +1317,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 18);
assert.eq(profileDoc.idxEntryBytesWritten, 54);
assert.eq(profileDoc.idxEntryUnitsWritten, 18);
+ assert.eq(profileDoc.docUnitsReturned, 0);
}
}
];
diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp
index fbfe5d10fd3..8cd6756b3a9 100644
--- a/src/mongo/db/commands/find_and_modify.cpp
+++ b/src/mongo/db/commands/find_and_modify.cpp
@@ -69,6 +69,7 @@
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/retryable_writes_stats.h"
#include "mongo/db/s/collection_sharding_state.h"
+#include "mongo/db/stats/resource_consumption_metrics.h"
#include "mongo/db/stats/top.h"
#include "mongo/db/storage/duplicate_key_error_info.h"
#include "mongo/db/transaction_participant.h"
@@ -553,6 +554,14 @@ public:
}
recordStatsForTopCommand(opCtx);
+ if (docFound) {
+ ResourceConsumption::DocumentUnitCounter docUnitsReturned;
+ docUnitsReturned.observeOne(docFound->objsize());
+
+ auto& metricsCollector = ResourceConsumption::MetricsCollector::get(opCtx);
+ metricsCollector.incrementDocUnitsReturned(docUnitsReturned);
+ }
+
appendCommandResponse(exec.get(), request.getRemove().value_or(false), docFound, &result);
return true;
@@ -640,6 +649,14 @@ public:
}
recordStatsForTopCommand(opCtx);
+ if (docFound) {
+ ResourceConsumption::DocumentUnitCounter docUnitsReturned;
+ docUnitsReturned.observeOne(docFound->objsize());
+
+ auto& metricsCollector = ResourceConsumption::MetricsCollector::get(opCtx);
+ metricsCollector.incrementDocUnitsReturned(docUnitsReturned);
+ }
+
appendCommandResponse(exec.get(), request.getRemove().value_or(false), docFound, &result);
return true;
diff --git a/src/mongo/db/commands/find_cmd.cpp b/src/mongo/db/commands/find_cmd.cpp
index 9d4b45e3925..2e65c35d43c 100644
--- a/src/mongo/db/commands/find_cmd.cpp
+++ b/src/mongo/db/commands/find_cmd.cpp
@@ -51,6 +51,7 @@
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/service_context.h"
#include "mongo/db/stats/counters.h"
+#include "mongo/db/stats/resource_consumption_metrics.h"
#include "mongo/db/stats/server_read_concern_metrics.h"
#include "mongo/db/storage/storage_engine.h"
#include "mongo/db/transaction_participant.h"
@@ -465,6 +466,7 @@ public:
PlanExecutor::ExecState state = PlanExecutor::ADVANCED;
std::uint64_t numResults = 0;
bool stashedResult = false;
+ ResourceConsumption::DocumentUnitCounter docUnitsReturned;
try {
while (!FindCommon::enoughForFirstBatch(originalQR, numResults) &&
@@ -483,6 +485,7 @@ public:
// Add result to output buffer.
firstBatch.append(obj);
numResults++;
+ docUnitsReturned.observeOne(obj.objsize());
}
} catch (DBException& exception) {
firstBatch.abandon();
@@ -549,6 +552,11 @@ public:
// Generate the response object to send to the client.
firstBatch.done(cursorId, nss.ns());
+
+ // Increment this metric once we have generated a response and we know it will return
+ // documents.
+ auto& metricsCollector = ResourceConsumption::MetricsCollector::get(opCtx);
+ metricsCollector.incrementDocUnitsReturned(docUnitsReturned);
}
void appendMirrorableRequest(BSONObjBuilder* bob) const override {
diff --git a/src/mongo/db/commands/getmore_cmd.cpp b/src/mongo/db/commands/getmore_cmd.cpp
index 053082b86a9..8fbf454ab26 100644
--- a/src/mongo/db/commands/getmore_cmd.cpp
+++ b/src/mongo/db/commands/getmore_cmd.cpp
@@ -56,6 +56,7 @@
#include "mongo/db/repl/speculative_majority_read_info.h"
#include "mongo/db/service_context.h"
#include "mongo/db/stats/counters.h"
+#include "mongo/db/stats/resource_consumption_metrics.h"
#include "mongo/db/stats/top.h"
#include "mongo/logv2/log.h"
#include "mongo/s/chunk_version.h"
@@ -303,7 +304,8 @@ public:
const GetMoreRequest& request,
const bool isTailable,
CursorResponseBuilder* nextBatch,
- std::uint64_t* numResults) {
+ std::uint64_t* numResults,
+ ResourceConsumption::DocumentUnitCounter* docUnitsReturned) {
PlanExecutor* exec = cursor->getExecutor();
// If an awaitData getMore is killed during this process due to our max time expiring at
@@ -328,6 +330,7 @@ public:
nextBatch->setPostBatchResumeToken(exec->getPostBatchResumeToken());
nextBatch->append(obj);
(*numResults)++;
+ docUnitsReturned->observeOne(obj.objsize());
}
} catch (const ExceptionFor<ErrorCodes::CloseChangeStream>&) {
// This exception indicates that we should close the cursor without reporting an
@@ -561,6 +564,7 @@ public:
CursorResponseBuilder nextBatch(reply, options);
BSONObj obj;
std::uint64_t numResults = 0;
+ ResourceConsumption::DocumentUnitCounter docUnitsReturned;
// We report keysExamined and docsExamined to OpDebug for a given getMore operation. To
// obtain these values we need to take a diff of the pre-execution and post-execution
@@ -610,7 +614,8 @@ public:
_request,
cursorPin->isTailable(),
&nextBatch,
- &numResults);
+ &numResults,
+ &docUnitsReturned);
PlanSummaryStats postExecutionStats;
exec->getPlanExplainer().getSummaryStats(&postExecutionStats);
@@ -653,6 +658,11 @@ public:
nextBatch.done(respondWithId, _request.nss.ns());
+ // Increment this metric once we have generated a response and we know it will return
+ // documents.
+ auto& metricsCollector = ResourceConsumption::MetricsCollector::get(opCtx);
+ metricsCollector.incrementDocUnitsReturned(docUnitsReturned);
+
// Ensure log and profiler include the number of results returned in this getMore's
// response batch.
curOp->debug().nreturned = numResults;
diff --git a/src/mongo/db/commands/run_aggregate.cpp b/src/mongo/db/commands/run_aggregate.cpp
index cf85065e5c7..f3dfa8e4f44 100644
--- a/src/mongo/db/commands/run_aggregate.cpp
+++ b/src/mongo/db/commands/run_aggregate.cpp
@@ -71,6 +71,7 @@
#include "mongo/db/s/operation_sharding_state.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/db/service_context.h"
+#include "mongo/db/stats/resource_consumption_metrics.h"
#include "mongo/db/storage/storage_options.h"
#include "mongo/db/views/view.h"
#include "mongo/db/views/view_catalog.h"
@@ -167,6 +168,7 @@ bool handleCursorCommand(OperationContext* opCtx,
invariant(cursor);
auto exec = cursor->getExecutor();
invariant(exec);
+ ResourceConsumption::DocumentUnitCounter docUnitsReturned;
bool stashedResult = false;
// We are careful to avoid ever calling 'getNext()' on the PlanExecutor when the batchSize is
@@ -221,6 +223,7 @@ bool handleCursorCommand(OperationContext* opCtx,
// If this executor produces a postBatchResumeToken, add it to the cursor response.
responseBuilder.setPostBatchResumeToken(exec->getPostBatchResumeToken());
responseBuilder.append(nextDoc);
+ docUnitsReturned.observeOne(nextDoc.objsize());
}
if (cursor) {
@@ -248,6 +251,9 @@ bool handleCursorCommand(OperationContext* opCtx,
const CursorId cursorId = cursor ? cursor->cursorid() : 0LL;
responseBuilder.done(cursorId, nsForCursor.ns());
+ auto& metricsCollector = ResourceConsumption::MetricsCollector::get(opCtx);
+ metricsCollector.incrementDocUnitsReturned(docUnitsReturned);
+
return static_cast<bool>(cursor);
}
diff --git a/src/mongo/db/query/find.cpp b/src/mongo/db/query/find.cpp
index bb6115b03df..e399de60858 100644
--- a/src/mongo/db/query/find.cpp
+++ b/src/mongo/db/query/find.cpp
@@ -162,6 +162,7 @@ void generateBatch(int ntoreturn,
ClientCursor* cursor,
BufBuilder* bb,
std::uint64_t* numResults,
+ ResourceConsumption::DocumentUnitCounter* docUnitsReturned,
PlanExecutor::ExecState* state) {
PlanExecutor* exec = cursor->getExecutor();
@@ -181,6 +182,8 @@ void generateBatch(int ntoreturn,
// Count the result.
(*numResults)++;
+
+ docUnitsReturned->observeOne(obj.objsize());
}
} catch (DBException& exception) {
auto&& explainer = exec->getPlanExplainer();
@@ -302,6 +305,7 @@ Message getMore(OperationContext* opCtx,
std::uint64_t numResults = 0;
int startingResult = 0;
+ ResourceConsumption::DocumentUnitCounter docUnitsReturned;
const int initialBufSize =
512 + sizeof(QueryResult::Value) + FindCommon::kMaxBytesToReturnToClientAtOnce;
@@ -456,7 +460,7 @@ Message getMore(OperationContext* opCtx,
nullptr);
}
- generateBatch(ntoreturn, cursorPin.getCursor(), &bb, &numResults, &state);
+ generateBatch(ntoreturn, cursorPin.getCursor(), &bb, &numResults, &docUnitsReturned, &state);
// If this is an await data cursor, and we hit EOF without generating any results, then we block
// waiting for new data to arrive.
@@ -480,7 +484,8 @@ Message getMore(OperationContext* opCtx,
// We woke up because either the timed_wait expired, or there was more data. Either way,
// attempt to generate another batch of results.
- generateBatch(ntoreturn, cursorPin.getCursor(), &bb, &numResults, &state);
+ generateBatch(
+ ntoreturn, cursorPin.getCursor(), &bb, &numResults, &docUnitsReturned, &state);
}
PlanSummaryStats postExecutionStats;
@@ -553,6 +558,10 @@ Message getMore(OperationContext* opCtx,
dropAndReaquireReadLock);
}
+ // Increment this metric once the command succeeds and we know it will return documents.
+ auto& metricsCollector = ResourceConsumption::MetricsCollector::get(opCtx);
+ metricsCollector.incrementDocUnitsReturned(docUnitsReturned);
+
QueryResult::View qr = bb.buf();
qr.msgdata().setLen(bb.len());
qr.msgdata().setOperation(opReply);
@@ -678,6 +687,7 @@ bool runQuery(OperationContext* opCtx,
// How many results have we obtained from the executor?
int numResults = 0;
+ ResourceConsumption::DocumentUnitCounter docUnitsReturned;
BSONObj obj;
PlanExecutor::ExecState state;
@@ -702,6 +712,8 @@ bool runQuery(OperationContext* opCtx,
// Count the result.
++numResults;
+ docUnitsReturned.observeOne(obj.objsize());
+
if (FindCommon::enoughForFirstBatch(qr, numResults)) {
LOGV2_DEBUG(20915,
5,
@@ -781,6 +793,10 @@ bool runQuery(OperationContext* opCtx,
endQueryOp(opCtx, collection.getCollection(), *exec, numResults, ccId);
}
+ // Increment this metric once it has succeeded and we know it will return documents.
+ auto& metricsCollector = ResourceConsumption::MetricsCollector::get(opCtx);
+ metricsCollector.incrementDocUnitsReturned(docUnitsReturned);
+
// Fill out the output buffer's header.
QueryResult::View queryResultView = bb.buf();
queryResultView.setCursorId(ccId);
diff --git a/src/mongo/db/stats/resource_consumption_metrics.cpp b/src/mongo/db/stats/resource_consumption_metrics.cpp
index bf689584759..88d4d4cfeaf 100644
--- a/src/mongo/db/stats/resource_consumption_metrics.cpp
+++ b/src/mongo/db/stats/resource_consumption_metrics.cpp
@@ -116,22 +116,35 @@ ResourceConsumption::MetricsCollector& ResourceConsumption::MetricsCollector::ge
return getMetricsCollector(opCtx);
}
+void ResourceConsumption::UnitCounter::observeOne(size_t datumBytes) {
+ _units += std::ceil(datumBytes / static_cast<float>(unitSize()));
+ _bytes += datumBytes;
+}
+
+int ResourceConsumption::DocumentUnitCounter::unitSize() const {
+ return gDocumentUnitSizeBytes;
+}
+
+int ResourceConsumption::IdxEntryUnitCounter::unitSize() const {
+ return gIndexEntryUnitSizeBytes;
+}
+
void ResourceConsumption::ReadMetrics::toBson(BSONObjBuilder* builder) const {
- builder->appendNumber(kDocBytesRead, docBytesRead);
- builder->appendNumber(kDocUnitsRead, docUnitsRead);
- builder->appendNumber(kIdxEntryBytesRead, idxEntryBytesRead);
- builder->appendNumber(kIdxEntryUnitsRead, idxEntryUnitsRead);
+ builder->appendNumber(kDocBytesRead, docsRead.bytes());
+ builder->appendNumber(kDocUnitsRead, docsRead.units());
+ builder->appendNumber(kIdxEntryBytesRead, idxEntriesRead.bytes());
+ builder->appendNumber(kIdxEntryUnitsRead, idxEntriesRead.units());
builder->appendNumber(kKeysSorted, keysSorted);
builder->appendNumber(kSorterSpills, sorterSpills);
- builder->appendNumber(kDocUnitsReturned, docUnitsReturned);
+ builder->appendNumber(kDocUnitsReturned, docsReturned.units());
builder->appendNumber(kCursorSeeks, cursorSeeks);
}
void ResourceConsumption::WriteMetrics::toBson(BSONObjBuilder* builder) const {
- builder->appendNumber(kDocBytesWritten, docBytesWritten);
- builder->appendNumber(kDocUnitsWritten, docUnitsWritten);
- builder->appendNumber(kIdxEntryBytesWritten, idxEntryBytesWritten);
- builder->appendNumber(kIdxEntryUnitsWritten, idxEntryUnitsWritten);
+ builder->appendNumber(kDocBytesWritten, docsWritten.bytes());
+ builder->appendNumber(kDocUnitsWritten, docsWritten.units());
+ builder->appendNumber(kIdxEntryBytesWritten, idxEntriesWritten.bytes());
+ builder->appendNumber(kIdxEntryUnitsWritten, idxEntriesWritten.units());
}
void ResourceConsumption::AggregatedMetrics::toBson(BSONObjBuilder* builder) const {
@@ -160,22 +173,22 @@ void ResourceConsumption::OperationMetrics::toBson(BSONObjBuilder* builder) cons
}
void ResourceConsumption::OperationMetrics::toBsonNonZeroFields(BSONObjBuilder* builder) const {
- appendNonZeroMetric(builder, kDocBytesRead, readMetrics.docBytesRead);
- appendNonZeroMetric(builder, kDocUnitsRead, readMetrics.docUnitsRead);
- appendNonZeroMetric(builder, kIdxEntryBytesRead, readMetrics.idxEntryBytesRead);
- appendNonZeroMetric(builder, kIdxEntryUnitsRead, readMetrics.idxEntryUnitsRead);
+ appendNonZeroMetric(builder, kDocBytesRead, readMetrics.docsRead.bytes());
+ appendNonZeroMetric(builder, kDocUnitsRead, readMetrics.docsRead.units());
+ appendNonZeroMetric(builder, kIdxEntryBytesRead, readMetrics.idxEntriesRead.bytes());
+ appendNonZeroMetric(builder, kIdxEntryUnitsRead, readMetrics.idxEntriesRead.units());
appendNonZeroMetric(builder, kKeysSorted, readMetrics.keysSorted);
appendNonZeroMetric(builder, kSorterSpills, readMetrics.sorterSpills);
- appendNonZeroMetric(builder, kDocUnitsReturned, readMetrics.docUnitsReturned);
+ appendNonZeroMetric(builder, kDocUnitsReturned, readMetrics.docsReturned.units());
appendNonZeroMetric(builder, kCursorSeeks, readMetrics.cursorSeeks);
if (cpuTimer) {
appendNonZeroMetric(builder, kCpuNanos, durationCount<Nanoseconds>(cpuTimer->getElapsed()));
}
- appendNonZeroMetric(builder, kDocBytesWritten, writeMetrics.docBytesWritten);
- appendNonZeroMetric(builder, kDocUnitsWritten, writeMetrics.docUnitsWritten);
- appendNonZeroMetric(builder, kIdxEntryBytesWritten, writeMetrics.idxEntryBytesWritten);
- appendNonZeroMetric(builder, kIdxEntryUnitsWritten, writeMetrics.idxEntryUnitsWritten);
+ appendNonZeroMetric(builder, kDocBytesWritten, writeMetrics.docsWritten.bytes());
+ appendNonZeroMetric(builder, kDocUnitsWritten, writeMetrics.docsWritten.units());
+ appendNonZeroMetric(builder, kIdxEntryBytesWritten, writeMetrics.idxEntriesWritten.bytes());
+ appendNonZeroMetric(builder, kIdxEntryUnitsWritten, writeMetrics.idxEntriesWritten.units());
}
template <typename Func>
@@ -187,19 +200,11 @@ inline void ResourceConsumption::MetricsCollector::_doIfCollecting(Func&& func)
}
void ResourceConsumption::MetricsCollector::incrementOneDocRead(size_t docBytesRead) {
- _doIfCollecting([&]() {
- size_t docUnits = std::ceil(docBytesRead / static_cast<float>(gDocumentUnitSizeBytes));
- _metrics.readMetrics.docBytesRead += docBytesRead;
- _metrics.readMetrics.docUnitsRead += docUnits;
- });
+ _doIfCollecting([&]() { _metrics.readMetrics.docsRead.observeOne(docBytesRead); });
}
void ResourceConsumption::MetricsCollector::incrementOneIdxEntryRead(size_t bytesRead) {
- _doIfCollecting([&]() {
- size_t units = std::ceil(bytesRead / static_cast<float>(gIndexEntryUnitSizeBytes));
- _metrics.readMetrics.idxEntryBytesRead += bytesRead;
- _metrics.readMetrics.idxEntryUnitsRead += units;
- });
+ _doIfCollecting([&]() { _metrics.readMetrics.idxEntriesRead.observeOne(bytesRead); });
}
void ResourceConsumption::MetricsCollector::incrementKeysSorted(size_t keysSorted) {
@@ -210,24 +215,17 @@ void ResourceConsumption::MetricsCollector::incrementSorterSpills(size_t spills)
_doIfCollecting([&]() { _metrics.readMetrics.sorterSpills += spills; });
}
-void ResourceConsumption::MetricsCollector::incrementDocUnitsReturned(size_t returned) {
- _doIfCollecting([&]() { _metrics.readMetrics.docUnitsReturned += returned; });
+void ResourceConsumption::MetricsCollector::incrementDocUnitsReturned(
+ DocumentUnitCounter docUnits) {
+ _doIfCollecting([&]() { _metrics.readMetrics.docsReturned += docUnits; });
}
void ResourceConsumption::MetricsCollector::incrementOneDocWritten(size_t bytesWritten) {
- _doIfCollecting([&] {
- size_t docUnits = std::ceil(bytesWritten / static_cast<float>(gDocumentUnitSizeBytes));
- _metrics.writeMetrics.docBytesWritten += bytesWritten;
- _metrics.writeMetrics.docUnitsWritten += docUnits;
- });
+ _doIfCollecting([&] { _metrics.writeMetrics.docsWritten.observeOne(bytesWritten); });
}
void ResourceConsumption::MetricsCollector::incrementOneIdxEntryWritten(size_t bytesWritten) {
- _doIfCollecting([&] {
- size_t idxUnits = std::ceil(bytesWritten / static_cast<float>(gIndexEntryUnitSizeBytes));
- _metrics.writeMetrics.idxEntryBytesWritten += bytesWritten;
- _metrics.writeMetrics.idxEntryUnitsWritten += idxUnits;
- });
+ _doIfCollecting([&] { _metrics.writeMetrics.idxEntriesWritten.observeOne(bytesWritten); });
}
void ResourceConsumption::MetricsCollector::beginScopedCollecting(OperationContext* opCtx,
diff --git a/src/mongo/db/stats/resource_consumption_metrics.h b/src/mongo/db/stats/resource_consumption_metrics.h
index 3e392b7a8f1..25282410324 100644
--- a/src/mongo/db/stats/resource_consumption_metrics.h
+++ b/src/mongo/db/stats/resource_consumption_metrics.h
@@ -50,17 +50,80 @@ public:
static ResourceConsumption& get(OperationContext* opCtx);
static ResourceConsumption& get(ServiceContext* svcCtx);
+ /**
+ * UnitCounter observes individual input datums and then calculates the total number of bytes
+ * and whole number units observed.
+ */
+ class UnitCounter {
+ public:
+ UnitCounter() = default;
+
+ void add(const UnitCounter& other) {
+ _bytes += other._bytes;
+ _units += other._units;
+ }
+
+ UnitCounter& operator+=(const UnitCounter& other) {
+ add(other);
+ return *this;
+ }
+
+ long long bytes() const {
+ return _bytes;
+ }
+ long long units() const {
+ return _units;
+ }
+
+ /**
+ * Call once per input datum with its size in bytes.
+ *
+ * This function calculates the number of units observed based on the implentation-specific
+ * unitSize(). The function uses the following formula to calculate the number of units per
+ * datum:
+ *
+ * units = ceil (datum bytes / unit size in bytes)
+ *
+ * This achieves the goal of counting small datums as at least one unit while ensuring
+ * larger units are accounted proportionately. This can result in overstating smaller datums
+ * when the unit size is large. This is desired behavior, and the extent to which small
+ * datums are overstated is tunable by the unit size of the implementor.
+ */
+ void observeOne(size_t datumBytes);
+
+ protected:
+ /**
+ * Returns the implementation-specific unit size.
+ */
+ virtual int unitSize() const = 0;
+
+ long long _bytes = 0;
+ long long _units = 0;
+ };
+
+ /** DocumentUnitCounter records the number of document units observed. */
+ class DocumentUnitCounter : public UnitCounter {
+ private:
+ int unitSize() const final;
+ };
+
+ /** IdxEntryUnitCounter records the number of index entry units observed. */
+ class IdxEntryUnitCounter : public UnitCounter {
+ private:
+ int unitSize() const final;
+ };
+
/** ReadMetrics maintains metrics for read operations. */
class ReadMetrics {
public:
+ ReadMetrics() = default;
+
void add(const ReadMetrics& other) {
- docBytesRead += other.docBytesRead;
- docUnitsRead += other.docUnitsRead;
- idxEntryBytesRead += other.idxEntryBytesRead;
- idxEntryUnitsRead += other.idxEntryUnitsRead;
+ docsRead += other.docsRead;
+ idxEntriesRead += other.idxEntriesRead;
+ docsReturned += other.docsReturned;
keysSorted += other.keysSorted;
sorterSpills += other.sorterSpills;
- docUnitsReturned += other.docUnitsReturned;
cursorSeeks += other.cursorSeeks;
}
@@ -74,20 +137,17 @@ public:
*/
void toBson(BSONObjBuilder* builder) const;
- // Number of document bytes read
- long long docBytesRead = 0;
// Number of document units read
- long long docUnitsRead = 0;
- // Number of index entry bytes read
- long long idxEntryBytesRead = 0;
- // Number of index entries units read
- long long idxEntryUnitsRead = 0;
+ DocumentUnitCounter docsRead;
+ // Number of index entry units read
+ IdxEntryUnitCounter idxEntriesRead;
+ // Number of document units returned by a query
+ DocumentUnitCounter docsReturned;
+
// Number of keys sorted for query operations
long long keysSorted = 0;
// Number of individual spills of data to disk by the sorter
long long sorterSpills = 0;
- // Number of document units returned by a query
- long long docUnitsReturned = 0;
// Number of cursor seeks
long long cursorSeeks = 0;
};
@@ -96,10 +156,8 @@ public:
class WriteMetrics {
public:
void add(const WriteMetrics& other) {
- docBytesWritten += other.docBytesWritten;
- docUnitsWritten += other.docUnitsWritten;
- idxEntryBytesWritten += other.idxEntryBytesWritten;
- idxEntryUnitsWritten += other.idxEntryUnitsWritten;
+ docsWritten += other.docsWritten;
+ idxEntriesWritten += other.idxEntriesWritten;
}
WriteMetrics& operator+=(const WriteMetrics& other) {
@@ -112,14 +170,10 @@ public:
*/
void toBson(BSONObjBuilder* builder) const;
- // Number of document bytes written
- long long docBytesWritten = 0;
- // Number of document units written
- long long docUnitsWritten = 0;
- // Number of index entry bytes written
- long long idxEntryBytesWritten = 0;
- // Number of index entry units written
- long long idxEntryUnitsWritten = 0;
+ // Number of documents written
+ DocumentUnitCounter docsWritten;
+ // Number of index entries written
+ IdxEntryUnitCounter idxEntriesWritten;
};
/**
@@ -191,11 +245,6 @@ public:
public:
MetricsCollector() = default;
- // Delete copy constructors to prevent callers from accidentally copying when this is
- // decorated on the OperationContext by reference.
- MetricsCollector(const MetricsCollector&) = delete;
- MetricsCollector operator=(const MetricsCollector&) = delete;
-
static MetricsCollector& get(OperationContext* opCtx);
/**
@@ -256,9 +305,7 @@ public:
void reset() {
invariant(!isInScope());
- _metrics = {};
- _dbName = {};
- _hasCollectedMetrics = false;
+ *this = {};
}
/**
@@ -285,7 +332,10 @@ public:
*/
void incrementSorterSpills(size_t spills);
- void incrementDocUnitsReturned(size_t docUnitsReturned);
+ /**
+ * Increments the number of document units returned in the command response.
+ */
+ void incrementDocUnitsReturned(DocumentUnitCounter docUnitsReturned);
/**
* This should be called once per document written with the number of bytes written for that
@@ -310,6 +360,11 @@ public:
void incrementOneCursorSeek();
private:
+ // Privatize copy constructors to prevent callers from accidentally copying when this is
+ // decorated on the OperationContext by reference.
+ MetricsCollector(const MetricsCollector&) = default;
+ MetricsCollector& operator=(const MetricsCollector&) = default;
+
/**
* Helper function that calls the Func when this collector is currently collecting metrics.
*/
diff --git a/src/mongo/db/stats/resource_consumption_metrics_test.cpp b/src/mongo/db/stats/resource_consumption_metrics_test.cpp
index a6ed735dff8..bcb1b5335c5 100644
--- a/src/mongo/db/stats/resource_consumption_metrics_test.cpp
+++ b/src/mongo/db/stats/resource_consumption_metrics_test.cpp
@@ -214,6 +214,14 @@ TEST_F(ResourceConsumptionMetricsTest, NestedScopedMetricsCollector) {
ASSERT_EQ(metricsCopy.count("db2"), 0);
}
+namespace {
+ResourceConsumption::DocumentUnitCounter makeDocUnits(size_t bytes) {
+ ResourceConsumption::DocumentUnitCounter docUnitsReturned;
+ docUnitsReturned.observeOne(bytes);
+ return docUnitsReturned;
+}
+} // namespace
+
TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetrics) {
auto& globalResourceConsumption = ResourceConsumption::get(getServiceContext());
auto& operationMetrics = ResourceConsumption::MetricsCollector::get(_opCtx.get());
@@ -225,20 +233,21 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetrics) {
operationMetrics.incrementOneIdxEntryRead(8);
operationMetrics.incrementKeysSorted(16);
operationMetrics.incrementSorterSpills(32);
- operationMetrics.incrementDocUnitsReturned(64);
+ operationMetrics.incrementDocUnitsReturned(makeDocUnits(64));
operationMetrics.incrementOneCursorSeek();
}
ASSERT(operationMetrics.hasCollectedMetrics());
auto metricsCopy = globalResourceConsumption.getDbMetrics();
- ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docBytesRead, 2);
- ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docUnitsRead, 1);
- ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.idxEntryBytesRead, 8);
- ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.idxEntryUnitsRead, 1);
+ ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docsRead.bytes(), 2);
+ ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docsRead.units(), 1);
+ ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.idxEntriesRead.bytes(), 8);
+ ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.idxEntriesRead.units(), 1);
ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.keysSorted, 16);
ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.sorterSpills, 32);
- ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docUnitsReturned, 64);
+ ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docsReturned.bytes(), 64);
+ ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docsReturned.units(), 1);
ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.cursorSeeks, 1);
// Clear metrics so we do not double-count.
@@ -251,18 +260,19 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetrics) {
operationMetrics.incrementOneIdxEntryRead(128);
operationMetrics.incrementKeysSorted(256);
operationMetrics.incrementSorterSpills(512);
- operationMetrics.incrementDocUnitsReturned(1024);
+ operationMetrics.incrementDocUnitsReturned(makeDocUnits(1024));
operationMetrics.incrementOneCursorSeek();
}
metricsCopy = globalResourceConsumption.getDbMetrics();
- ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docBytesRead, 2 + 32);
- ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docUnitsRead, 2);
- ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.idxEntryBytesRead, 8 + 128);
- ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.idxEntryUnitsRead, 1 + 8);
+ ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docsRead.bytes(), 2 + 32);
+ ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docsRead.units(), 2);
+ ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.idxEntriesRead.bytes(), 8 + 128);
+ ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.idxEntriesRead.units(), 1 + 8);
ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.keysSorted, 16 + 256);
ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.sorterSpills, 32 + 512);
- ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docUnitsReturned, 64 + 1024);
+ ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docsReturned.bytes(), 64 + 1024);
+ ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docsReturned.units(), 1 + 8);
ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.cursorSeeks, 1 + 1);
}
@@ -280,18 +290,19 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetricsSecondary) {
operationMetrics.incrementOneIdxEntryRead(8);
operationMetrics.incrementKeysSorted(16);
operationMetrics.incrementSorterSpills(32);
- operationMetrics.incrementDocUnitsReturned(64);
+ operationMetrics.incrementDocUnitsReturned(makeDocUnits(64));
operationMetrics.incrementOneCursorSeek();
}
auto metricsCopy = globalResourceConsumption.getDbMetrics();
- ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docBytesRead, 2);
- ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docUnitsRead, 1);
- ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.idxEntryBytesRead, 8);
- ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.idxEntryUnitsRead, 1);
+ ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docsRead.bytes(), 2);
+ ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docsRead.units(), 1);
+ ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.idxEntriesRead.bytes(), 8);
+ ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.idxEntriesRead.units(), 1);
ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.keysSorted, 16);
ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.sorterSpills, 32);
- ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docUnitsReturned, 64);
+ ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docsReturned.bytes(), 64);
+ ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docsReturned.units(), 1);
ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.cursorSeeks, 1);
// Clear metrics so we do not double-count.
@@ -304,18 +315,19 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetricsSecondary) {
operationMetrics.incrementOneIdxEntryRead(128);
operationMetrics.incrementKeysSorted(256);
operationMetrics.incrementSorterSpills(512);
- operationMetrics.incrementDocUnitsReturned(1024);
+ operationMetrics.incrementDocUnitsReturned(makeDocUnits(1024));
operationMetrics.incrementOneCursorSeek();
}
metricsCopy = globalResourceConsumption.getDbMetrics();
- ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docBytesRead, 2 + 32);
- ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docUnitsRead, 2);
- ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.idxEntryBytesRead, 8 + 128);
- ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.idxEntryUnitsRead, 1 + 8);
+ ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docsRead.bytes(), 2 + 32);
+ ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docsRead.units(), 2);
+ ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.idxEntriesRead.bytes(), 8 + 128);
+ ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.idxEntriesRead.units(), 1 + 8);
ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.keysSorted, 16 + 256);
ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.sorterSpills, 32 + 512);
- ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docUnitsReturned, 64 + 1024);
+ ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docsReturned.bytes(), 64 + 1024);
+ ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docsReturned.units(), 1 + 8);
ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.cursorSeeks, 1 + 1);
}
@@ -332,7 +344,7 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetricsAcrossStates) {
operationMetrics.incrementOneIdxEntryRead(8);
operationMetrics.incrementKeysSorted(16);
operationMetrics.incrementSorterSpills(32);
- operationMetrics.incrementDocUnitsReturned(64);
+ operationMetrics.incrementDocUnitsReturned(makeDocUnits(64));
operationMetrics.incrementOneCursorSeek();
ASSERT_OK(repl::ReplicationCoordinator::get(_opCtx.get())
@@ -342,25 +354,27 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetricsAcrossStates) {
operationMetrics.incrementOneIdxEntryRead(128);
operationMetrics.incrementKeysSorted(256);
operationMetrics.incrementSorterSpills(512);
- operationMetrics.incrementDocUnitsReturned(1024);
+ operationMetrics.incrementDocUnitsReturned(makeDocUnits(1024));
operationMetrics.incrementOneCursorSeek();
}
auto metricsCopy = globalResourceConsumption.getAndClearDbMetrics();
- ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docBytesRead, 0);
- ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docUnitsRead, 0);
- ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.idxEntryBytesRead, 0);
- ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.idxEntryUnitsRead, 0);
+ ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docsRead.bytes(), 0);
+ ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docsRead.units(), 0);
+ ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.idxEntriesRead.bytes(), 0);
+ ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.idxEntriesRead.units(), 0);
ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.keysSorted, 0);
- ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docUnitsReturned, 0);
+ ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docsReturned.bytes(), 0);
+ ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docsReturned.units(), 0);
ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.cursorSeeks, 0);
- ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docBytesRead, 2 + 32);
- ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docUnitsRead, 2);
- ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.idxEntryBytesRead, 8 + 128);
- ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.idxEntryUnitsRead, 1 + 8);
+ ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docsRead.bytes(), 2 + 32);
+ ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docsRead.units(), 2);
+ ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.idxEntriesRead.bytes(), 8 + 128);
+ ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.idxEntriesRead.units(), 1 + 8);
ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.keysSorted, 16 + 256);
ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.sorterSpills, 32 + 512);
- ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docUnitsReturned, 64 + 1024);
+ ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docsReturned.bytes(), 64 + 1024);
+ ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docsReturned.units(), 1 + 8);
ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.cursorSeeks, 1 + 1);
operationMetrics.reset();
@@ -374,7 +388,7 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetricsAcrossStates) {
operationMetrics.incrementOneIdxEntryRead(8);
operationMetrics.incrementKeysSorted(16);
operationMetrics.incrementSorterSpills(32);
- operationMetrics.incrementDocUnitsReturned(64);
+ operationMetrics.incrementDocUnitsReturned(makeDocUnits(64));
operationMetrics.incrementOneCursorSeek();
ASSERT_OK(repl::ReplicationCoordinator::get(_opCtx.get())
@@ -384,26 +398,28 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetricsAcrossStates) {
operationMetrics.incrementOneIdxEntryRead(128);
operationMetrics.incrementKeysSorted(256);
operationMetrics.incrementSorterSpills(512);
- operationMetrics.incrementDocUnitsReturned(1024);
+ operationMetrics.incrementDocUnitsReturned(makeDocUnits(1024));
operationMetrics.incrementOneCursorSeek();
}
metricsCopy = globalResourceConsumption.getAndClearDbMetrics();
- ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docBytesRead, 2 + 32);
- ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docUnitsRead, 2);
- ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.idxEntryBytesRead, 8 + 128);
- ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.idxEntryUnitsRead, 1 + 8);
+ ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docsRead.bytes(), 2 + 32);
+ ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docsRead.units(), 2);
+ ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.idxEntriesRead.bytes(), 8 + 128);
+ ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.idxEntriesRead.units(), 1 + 8);
ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.keysSorted, 16 + 256);
ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.sorterSpills, 32 + 512);
- ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docUnitsReturned, 64 + 1024);
+ ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docsReturned.bytes(), 64 + 1024);
+ ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docsReturned.units(), 1 + 8);
ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.cursorSeeks, 1 + 1);
- ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docBytesRead, 0);
- ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docUnitsRead, 0);
- ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.idxEntryBytesRead, 0);
- ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.idxEntryUnitsRead, 0);
+ ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docsRead.bytes(), 0);
+ ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docsRead.units(), 0);
+ ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.idxEntriesRead.bytes(), 0);
+ ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.idxEntriesRead.units(), 0);
ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.keysSorted, 0);
ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.sorterSpills, 0);
- ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docUnitsReturned, 0);
+ ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docsReturned.bytes(), 0);
+ ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docsReturned.units(), 0);
ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.cursorSeeks, 0);
}
@@ -438,8 +454,8 @@ TEST_F(ResourceConsumptionMetricsTest, DocumentUnitsRead) {
}
auto metricsCopy = globalResourceConsumption.getDbMetrics();
- ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docBytesRead, expectedBytes);
- ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docUnitsRead, expectedUnits);
+ ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docsRead.bytes(), expectedBytes);
+ ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docsRead.units(), expectedUnits);
}
TEST_F(ResourceConsumptionMetricsTest, DocumentUnitsWritten) {
@@ -473,8 +489,8 @@ TEST_F(ResourceConsumptionMetricsTest, DocumentUnitsWritten) {
}
auto metricsCopy = globalResourceConsumption.getDbMetrics();
- ASSERT_EQ(metricsCopy["db1"].writeMetrics.docBytesWritten, expectedBytes);
- ASSERT_EQ(metricsCopy["db1"].writeMetrics.docUnitsWritten, expectedUnits);
+ ASSERT_EQ(metricsCopy["db1"].writeMetrics.docsWritten.bytes(), expectedBytes);
+ ASSERT_EQ(metricsCopy["db1"].writeMetrics.docsWritten.units(), expectedUnits);
}
TEST_F(ResourceConsumptionMetricsTest, IdxEntryUnitsRead) {
@@ -522,8 +538,8 @@ TEST_F(ResourceConsumptionMetricsTest, IdxEntryUnitsRead) {
}
auto metricsCopy = globalResourceConsumption.getDbMetrics();
- ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.idxEntryBytesRead, expectedBytes);
- ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.idxEntryUnitsRead, expectedUnits);
+ ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.idxEntriesRead.bytes(), expectedBytes);
+ ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.idxEntriesRead.units(), expectedUnits);
}
TEST_F(ResourceConsumptionMetricsTest, IdxEntryUnitsWritten) {
@@ -571,8 +587,8 @@ TEST_F(ResourceConsumptionMetricsTest, IdxEntryUnitsWritten) {
}
auto metricsCopy = globalResourceConsumption.getDbMetrics();
- ASSERT_EQ(metricsCopy["db1"].writeMetrics.idxEntryBytesWritten, expectedBytes);
- ASSERT_EQ(metricsCopy["db1"].writeMetrics.idxEntryUnitsWritten, expectedUnits);
+ ASSERT_EQ(metricsCopy["db1"].writeMetrics.idxEntriesWritten.bytes(), expectedBytes);
+ ASSERT_EQ(metricsCopy["db1"].writeMetrics.idxEntriesWritten.units(), expectedUnits);
}
TEST_F(ResourceConsumptionMetricsTest, CpuNanos) {