summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLouis Williams <louis.williams@mongodb.com>2020-10-21 16:19:01 -0400
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-10-21 23:26:39 +0000
commit4ca58f1161c74f6f0573399a06e70ed3ff56b145 (patch)
tree9bc7457e7dbd1416790e441f161ddddb4b98f899
parent8958149de0de63f7be2b654e4520ced3dbaa91a5 (diff)
downloadmongo-4ca58f1161c74f6f0573399a06e70ed3ff56b145.tar.gz
SERVER-51686 Collect index entry units read and written per operation
-rw-r--r--jstests/core/profile_operation_metrics.js168
-rw-r--r--jstests/noPassthrough/aggregate_operation_metrics.js36
-rw-r--r--src/mongo/db/stats/operation_resource_consumption.idl12
-rw-r--r--src/mongo/db/stats/resource_consumption_metrics.cpp60
-rw-r--r--src/mongo/db/stats/resource_consumption_metrics.h38
-rw-r--r--src/mongo/db/stats/resource_consumption_metrics_test.cpp127
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp41
7 files changed, 400 insertions, 82 deletions
diff --git a/jstests/core/profile_operation_metrics.js b/jstests/core/profile_operation_metrics.js
index 85e2a0f691e..c4a2441a257 100644
--- a/jstests/core/profile_operation_metrics.js
+++ b/jstests/core/profile_operation_metrics.js
@@ -39,13 +39,16 @@ const assertMetricsExist = (profilerEntry) => {
assert.gte(metrics.docBytesRead, 0);
assert.gte(metrics.docUnitsRead, 0);
- assert.gte(metrics.idxEntriesRead, 0);
+ assert.gte(metrics.idxEntryBytesRead, 0);
+ assert.gte(metrics.idxEntryUnitsRead, 0);
assert.gte(metrics.keysSorted, 0);
+ assert.gte(metrics.docUnitsReturned, 0);
assert.gte(metrics.cpuMillis, 0);
assert.gte(metrics.docBytesWritten, 0);
assert.gte(metrics.docUnitsWritten, 0);
- assert.gte(metrics.docUnitsReturned, 0);
+ assert.gte(metrics.idxEntryBytesWritten, 0);
+ assert.gte(metrics.idxEntryUnitsWritten, 0);
};
const runInLegacyQueryMode = (db, func) => {
@@ -90,9 +93,12 @@ const operations = [
// test run, so only assert this is non-zero.
assert.gt(profileDoc.docBytesRead, 0);
assert.gt(profileDoc.docUnitsRead, 0);
- assert.eq(profileDoc.idxEntriesRead, 0);
+ assert.eq(profileDoc.idxEntryBytesRead, 0);
+ assert.eq(profileDoc.idxEntryUnitsRead, 0);
assert.gt(profileDoc.docBytesWritten, 0);
assert.gt(profileDoc.docUnitsWritten, 0);
+ assert.eq(profileDoc.idxEntryBytesWritten, 0);
+ assert.eq(profileDoc.idxEntryUnitsWritten, 0);
}
},
{
@@ -108,9 +114,12 @@ const operations = [
// metrics for index builds.
assert.gt(profileDoc.docBytesRead, 0);
assert.gt(profileDoc.docUnitsRead, 0);
- assert.eq(profileDoc.idxEntriesRead, 0);
+ assert.eq(profileDoc.idxEntryBytesRead, 0);
+ assert.eq(profileDoc.idxEntryUnitsRead, 0);
assert.gt(profileDoc.docBytesWritten, 0);
assert.gt(profileDoc.docUnitsWritten, 0);
+ assert.eq(profileDoc.idxEntryBytesWritten, 0);
+ assert.eq(profileDoc.idxEntryUnitsWritten, 0);
}
},
{
@@ -123,7 +132,8 @@ const operations = [
// Insert should not perform any reads.
assert.eq(profileDoc.docBytesRead, 0);
assert.eq(profileDoc.docUnitsRead, 0);
- assert.eq(profileDoc.idxEntriesRead, 0);
+ assert.eq(profileDoc.idxEntryBytesRead, 0);
+ assert.eq(profileDoc.idxEntryUnitsRead, 0);
if (isReplSet) {
// Ensure writes to the oplog are counted. Some oplog fields like UUID are
// randomized between runs, but the types are fixed-length, so we can make strong
@@ -137,6 +147,8 @@ const operations = [
assert.eq(profileDoc.docBytesWritten, 29);
assert.eq(profileDoc.docUnitsWritten, 1);
}
+ assert.eq(profileDoc.idxEntryBytesWritten, 7);
+ assert.eq(profileDoc.idxEntryUnitsWritten, 2);
}
},
{
@@ -160,9 +172,12 @@ const operations = [
// Should read exactly as many bytes are in the document.
assert.eq(profileDoc.docBytesRead, 29);
assert.eq(profileDoc.docUnitsRead, 1);
- assert.eq(profileDoc.idxEntriesRead, 1);
+ assert.eq(profileDoc.idxEntryBytesRead, 3);
+ assert.eq(profileDoc.idxEntryUnitsRead, 1);
assert.eq(profileDoc.docBytesWritten, 0);
assert.eq(profileDoc.docUnitsWritten, 0);
+ assert.eq(profileDoc.idxEntryBytesWritten, 0);
+ assert.eq(profileDoc.idxEntryUnitsWritten, 0);
}
},
{
@@ -175,9 +190,12 @@ const operations = [
// Should read exactly as many bytes are in the document.
assert.eq(profileDoc.docBytesRead, 29);
assert.eq(profileDoc.docUnitsRead, 1);
- assert.eq(profileDoc.idxEntriesRead, 0);
+ assert.eq(profileDoc.idxEntryBytesRead, 0);
+ assert.eq(profileDoc.idxEntryUnitsRead, 0);
assert.eq(profileDoc.docBytesWritten, 0);
assert.eq(profileDoc.docUnitsWritten, 0);
+ assert.eq(profileDoc.idxEntryBytesWritten, 0);
+ assert.eq(profileDoc.idxEntryUnitsWritten, 0);
}
},
{
@@ -190,9 +208,12 @@ const operations = [
// Should read exactly as many bytes are in the document.
assert.eq(profileDoc.docBytesRead, 29);
assert.eq(profileDoc.docUnitsRead, 1);
- assert.eq(profileDoc.idxEntriesRead, 0);
+ assert.eq(profileDoc.idxEntryBytesRead, 0);
+ assert.eq(profileDoc.idxEntryUnitsRead, 0);
assert.eq(profileDoc.docBytesWritten, 0);
assert.eq(profileDoc.docUnitsWritten, 0);
+ assert.eq(profileDoc.idxEntryBytesWritten, 0);
+ assert.eq(profileDoc.idxEntryUnitsWritten, 0);
}
},
{
@@ -205,9 +226,12 @@ const operations = [
// Does not read from the collection.
assert.eq(profileDoc.docBytesRead, 0);
assert.eq(profileDoc.docUnitsRead, 0);
- assert.eq(profileDoc.idxEntriesRead, 1);
+ assert.eq(profileDoc.idxEntryBytesRead, 3);
+ assert.eq(profileDoc.idxEntryUnitsRead, 1);
assert.eq(profileDoc.docBytesWritten, 0);
assert.eq(profileDoc.docUnitsWritten, 0);
+ assert.eq(profileDoc.idxEntryBytesWritten, 0);
+ assert.eq(profileDoc.idxEntryUnitsWritten, 0);
}
},
{
@@ -226,7 +250,8 @@ const operations = [
assert.gte(profileDoc.docBytesRead, 29);
assert.gte(profileDoc.docUnitsRead, 1);
}
- assert.eq(profileDoc.idxEntriesRead, 1);
+ assert.eq(profileDoc.idxEntryBytesRead, 3);
+ assert.eq(profileDoc.idxEntryUnitsRead, 1);
if (isReplSet) {
// Ensure writes to the oplog are counted.
assert.eq(profileDoc.docBytesWritten, 224);
@@ -237,6 +262,9 @@ const operations = [
assert.eq(profileDoc.docBytesWritten, 29);
assert.eq(profileDoc.docUnitsWritten, 1);
}
+ // Deletes one index entry and writes another.
+ assert.eq(profileDoc.idxEntryBytesWritten, 9);
+ assert.eq(profileDoc.idxEntryUnitsWritten, 2);
}
},
{
@@ -255,7 +283,8 @@ const operations = [
assert.gte(profileDoc.docUnitsRead, 1);
assert.gte(profileDoc.docBytesRead, 29);
}
- assert.eq(profileDoc.idxEntriesRead, 1);
+ assert.eq(profileDoc.idxEntryBytesRead, 3);
+ assert.eq(profileDoc.idxEntryUnitsRead, 1);
if (isReplSet) {
// Ensure writes to the oplog are counted.
assert.eq(profileDoc.docBytesWritten, 224);
@@ -266,6 +295,9 @@ const operations = [
assert.eq(profileDoc.docBytesWritten, 29);
assert.eq(profileDoc.docUnitsWritten, 1);
}
+ // Deletes one index entry and writes another.
+ assert.eq(profileDoc.idxEntryBytesWritten, 10);
+ assert.eq(profileDoc.idxEntryUnitsWritten, 2);
}
},
{
@@ -278,8 +310,11 @@ const operations = [
// Reads from the fast-count, not the collection.
assert.eq(profileDoc.docBytesRead, 0);
assert.eq(profileDoc.docUnitsRead, 0);
- assert.eq(profileDoc.idxEntriesRead, 0);
+ assert.eq(profileDoc.idxEntryBytesRead, 0);
+ assert.eq(profileDoc.idxEntryUnitsRead, 0);
assert.eq(profileDoc.docBytesWritten, 0);
+ assert.eq(profileDoc.idxEntryBytesWritten, 0);
+ assert.eq(profileDoc.idxEntryUnitsWritten, 0);
}
},
{
@@ -292,9 +327,12 @@ const operations = [
// Should not read from the collection.
assert.eq(profileDoc.docBytesRead, 0);
assert.eq(profileDoc.docUnitsRead, 0);
- assert.eq(profileDoc.idxEntriesRead, 0);
+ assert.eq(profileDoc.idxEntryBytesRead, 0);
+ assert.eq(profileDoc.idxEntryUnitsRead, 0);
assert.eq(profileDoc.docBytesWritten, 0);
assert.eq(profileDoc.docUnitsWritten, 0);
+ assert.eq(profileDoc.idxEntryBytesWritten, 0);
+ assert.eq(profileDoc.idxEntryUnitsWritten, 0);
}
},
// Clear the profile collection so we can easily identify new operations with similar filters as
@@ -310,8 +348,11 @@ const operations = [
// Should read from the collection.
assert.gt(profileDoc.docBytesRead, 0);
assert.gt(profileDoc.docUnitsRead, 0);
- assert.eq(profileDoc.idxEntriesRead, 0);
+ assert.eq(profileDoc.idxEntryBytesRead, 0);
+ assert.eq(profileDoc.idxEntryUnitsRead, 0);
assert.eq(profileDoc.docBytesWritten, 0);
+ assert.eq(profileDoc.idxEntryBytesWritten, 0);
+ assert.eq(profileDoc.idxEntryUnitsWritten, 0);
}
},
{
@@ -324,9 +365,12 @@ const operations = [
// This reads from the collection catalog.
assert.gt(profileDoc.docBytesRead, 0);
assert.gt(profileDoc.docUnitsRead, 0);
- assert.eq(profileDoc.idxEntriesRead, 0);
+ assert.eq(profileDoc.idxEntryBytesRead, 0);
+ assert.eq(profileDoc.idxEntryUnitsRead, 0);
assert.eq(profileDoc.docBytesWritten, 0);
assert.eq(profileDoc.docUnitsWritten, 0);
+ assert.eq(profileDoc.idxEntryBytesWritten, 0);
+ assert.eq(profileDoc.idxEntryUnitsWritten, 0);
}
},
{
@@ -339,9 +383,12 @@ const operations = [
// This reads from the collection catalog.
assert.gt(profileDoc.docBytesRead, 0);
assert.gt(profileDoc.docUnitsRead, 0);
- assert.eq(profileDoc.idxEntriesRead, 0);
+ assert.eq(profileDoc.idxEntryBytesRead, 0);
+ assert.eq(profileDoc.idxEntryUnitsRead, 0);
assert.gt(profileDoc.docBytesWritten, 0);
assert.gt(profileDoc.docUnitsWritten, 0);
+ assert.eq(profileDoc.idxEntryBytesWritten, 0);
+ assert.eq(profileDoc.idxEntryUnitsWritten, 0);
}
},
resetProfileColl,
@@ -365,9 +412,12 @@ const operations = [
assert.gte(profileDoc.docBytesRead, 29);
assert.gte(profileDoc.docUnitsRead, 1);
}
- assert.eq(profileDoc.idxEntriesRead, 0);
+ assert.eq(profileDoc.idxEntryBytesRead, 0);
+ assert.eq(profileDoc.idxEntryUnitsRead, 0);
assert.eq(profileDoc.docBytesWritten, 0);
assert.eq(profileDoc.docUnitsWritten, 0);
+ assert.eq(profileDoc.idxEntryBytesWritten, 0);
+ assert.eq(profileDoc.idxEntryUnitsWritten, 0);
}
},
{
@@ -386,7 +436,8 @@ const operations = [
assert.gte(profileDoc.docBytesRead, 58);
assert.gte(profileDoc.docUnitsRead, 2);
}
- assert.eq(profileDoc.idxEntriesRead, 1);
+ assert.eq(profileDoc.idxEntryBytesRead, 3);
+ assert.eq(profileDoc.idxEntryUnitsRead, 1);
if (isReplSet) {
// Ensure writes to the oplog are counted.
assert.eq(profileDoc.docBytesWritten, 177);
@@ -396,6 +447,8 @@ const operations = [
assert.eq(profileDoc.docBytesWritten, 29);
assert.eq(profileDoc.docUnitsWritten, 1);
}
+ assert.eq(profileDoc.idxEntryBytesWritten, 3);
+ assert.eq(profileDoc.idxEntryUnitsWritten, 1);
}
},
{
@@ -414,7 +467,8 @@ const operations = [
assert.gte(profileDoc.docBytesRead, 58);
assert.gte(profileDoc.docUnitsRead, 2);
}
- assert.eq(profileDoc.idxEntriesRead, 0);
+ assert.eq(profileDoc.idxEntryBytesRead, 0);
+ assert.eq(profileDoc.idxEntryUnitsRead, 0);
if (isReplSet) {
// Ensure writes to the oplog are counted.
assert.eq(profileDoc.docBytesWritten, 177);
@@ -424,6 +478,8 @@ const operations = [
assert.eq(profileDoc.docBytesWritten, 29);
assert.eq(profileDoc.docUnitsWritten, 1);
}
+ assert.eq(profileDoc.idxEntryBytesWritten, 3);
+ assert.eq(profileDoc.idxEntryUnitsWritten, 1);
}
},
{
@@ -436,9 +492,12 @@ const operations = [
// Reads from the collection catalog.
assert.gt(profileDoc.docBytesRead, 0);
assert.gt(profileDoc.docUnitsRead, 0);
- assert.eq(profileDoc.idxEntriesRead, 0);
+ assert.eq(profileDoc.idxEntryBytesRead, 0);
+ assert.eq(profileDoc.idxEntryUnitsRead, 0);
assert.gt(profileDoc.docBytesWritten, 0);
assert.gt(profileDoc.docUnitsWritten, 0);
+ assert.eq(profileDoc.idxEntryBytesWritten, 0);
+ assert.eq(profileDoc.idxEntryUnitsWritten, 0);
}
},
resetProfileColl,
@@ -457,9 +516,12 @@ const operations = [
// The exact amount of data read is not easily calculable.
assert.gt(profileDoc.docBytesRead, 0);
assert.gt(profileDoc.docUnitsRead, 0);
- assert.eq(profileDoc.idxEntriesRead, 0);
+ assert.eq(profileDoc.idxEntryBytesRead, 0);
+ assert.eq(profileDoc.idxEntryUnitsRead, 0);
assert.eq(profileDoc.docBytesWritten, 0);
assert.eq(profileDoc.docUnitsWritten, 0);
+ assert.eq(profileDoc.idxEntryBytesWritten, 0);
+ assert.eq(profileDoc.idxEntryUnitsWritten, 0);
}
},
{
@@ -475,9 +537,12 @@ const operations = [
// metrics for index builds.
assert.gt(profileDoc.docBytesRead, 0);
assert.gt(profileDoc.docUnitsRead, 0);
- assert.eq(profileDoc.idxEntriesRead, 0);
+ assert.eq(profileDoc.idxEntryBytesRead, 0);
+ assert.eq(profileDoc.idxEntryUnitsRead, 0);
assert.eq(profileDoc.docBytesWritten, 0);
assert.eq(profileDoc.docUnitsWritten, 0);
+ assert.eq(profileDoc.idxEntryBytesWritten, 0);
+ assert.eq(profileDoc.idxEntryUnitsWritten, 0);
}
},
resetProfileColl,
@@ -491,7 +556,9 @@ const operations = [
// Insert should not perform any reads.
assert.eq(profileDoc.docBytesRead, 0);
assert.eq(profileDoc.docUnitsRead, 0);
- assert.eq(profileDoc.idxEntriesRead, 1);
+ // Reads the index entry for 'a' to determine uniqueness.
+ assert.eq(profileDoc.idxEntryBytesRead, 6);
+ assert.eq(profileDoc.idxEntryUnitsRead, 1);
if (isReplSet) {
// Ensure writes to the oplog are counted. Some oplog fields like UUID are
// randomized between runs, but the types are fixed-length, so we can make strong
@@ -503,6 +570,9 @@ const operations = [
assert.eq(profileDoc.docBytesWritten, 29);
assert.eq(profileDoc.docUnitsWritten, 1);
}
+ // Deletes one entry and writes another.
+ assert.eq(profileDoc.idxEntryBytesWritten, 10);
+ assert.eq(profileDoc.idxEntryUnitsWritten, 2);
}
},
resetProfileColl,
@@ -519,12 +589,15 @@ const operations = [
assert.eq(profileDoc.docBytesRead, 0);
assert.eq(profileDoc.docUnitsRead, 0);
// Inserting into a unique index requires reading one key.
- assert.eq(profileDoc.idxEntriesRead, 1);
+ assert.eq(profileDoc.idxEntryBytesRead, 4);
+ assert.eq(profileDoc.idxEntryUnitsRead, 1);
// Despite failing to insert keys into the unique index, the operation first succeeded
// in writing to the collection. Even though the operation was rolled-back, this counts
// towards metrics.
assert.eq(profileDoc.docBytesWritten, 29);
assert.eq(profileDoc.docUnitsWritten, 1);
+ assert.eq(profileDoc.idxEntryBytesWritten, 4);
+ assert.eq(profileDoc.idxEntryUnitsWritten, 1);
}
},
{
@@ -544,7 +617,8 @@ const operations = [
assert.gte(profileDoc.docUnitsRead, 1);
}
// Reads index entries on '_id' for the lookup and 'a' to ensure uniqueness.
- assert.eq(profileDoc.idxEntriesRead, 2);
+ assert.eq(profileDoc.idxEntryBytesRead, 9);
+ assert.eq(profileDoc.idxEntryUnitsRead, 2);
if (isReplSet) {
// Ensure writes to the oplog are counted.
assert.eq(profileDoc.docBytesWritten, 224);
@@ -555,6 +629,9 @@ const operations = [
assert.eq(profileDoc.docBytesWritten, 29);
assert.eq(profileDoc.docUnitsWritten, 1);
}
+ // Removes one entry and inserts another.
+ assert.eq(profileDoc.idxEntryBytesWritten, 11);
+ assert.eq(profileDoc.idxEntryUnitsWritten, 2);
}
},
{
@@ -580,7 +657,8 @@ const operations = [
assert.gte(profileDoc.docUnitsRead, 9);
}
// Reads index entries on '_id' for the lookup and 'a' to ensure uniqueness.
- assert.eq(profileDoc.idxEntriesRead, 2);
+ assert.eq(profileDoc.idxEntryBytesRead, 10);
+ assert.eq(profileDoc.idxEntryUnitsRead, 2);
if (isReplSet) {
// When WT_MODIFY is used on a replicated collection, in addition to writing fewer
// bytes per the comment about WT_MODIFY above, ensure it also inserts into the
@@ -591,6 +669,9 @@ const operations = [
assert.eq(profileDoc.docBytesWritten, 1061);
assert.eq(profileDoc.docUnitsWritten, 9);
}
+ // Removes one entry and inserts another.
+ assert.eq(profileDoc.idxEntryBytesWritten, 10);
+ assert.eq(profileDoc.idxEntryUnitsWritten, 2);
}
},
{
@@ -615,7 +696,8 @@ const operations = [
assert.gte(profileDoc.docBytesRead, 29);
assert.gte(profileDoc.docUnitsRead, 1);
}
- assert.eq(profileDoc.idxEntriesRead, 1);
+ assert.eq(profileDoc.idxEntryBytesRead, 4);
+ assert.eq(profileDoc.idxEntryUnitsRead, 1);
if (isReplSet) {
// When WT_MODIFY is used on a replicated collection, in addition to writing fewer
// bytes per the comment about WT_MODIFY above, ensure it also inserts into the
@@ -628,6 +710,8 @@ const operations = [
assert.eq(profileDoc.docBytesWritten, 16);
assert.eq(profileDoc.docUnitsWritten, 1);
}
+ assert.eq(profileDoc.idxEntryBytesWritten, 0);
+ assert.eq(profileDoc.idxEntryUnitsWritten, 0);
}
},
resetProfileColl,
@@ -644,7 +728,8 @@ const operations = [
// Insert should not perform any reads.
assert.eq(profileDoc.docBytesRead, 0);
assert.eq(profileDoc.docUnitsRead, 0);
- assert.eq(profileDoc.idxEntriesRead, 0);
+ assert.eq(profileDoc.idxEntryBytesRead, 0);
+ assert.eq(profileDoc.idxEntryUnitsRead, 0);
if (isReplSet) {
assert.eq(profileDoc.docBytesWritten, 188);
assert.eq(profileDoc.docUnitsWritten, 3);
@@ -652,6 +737,8 @@ const operations = [
assert.eq(profileDoc.docBytesWritten, 29);
assert.eq(profileDoc.docUnitsWritten, 1);
}
+ assert.eq(profileDoc.idxEntryBytesWritten, 3);
+ assert.eq(profileDoc.idxEntryUnitsWritten, 1);
}
},
{
@@ -665,9 +752,12 @@ const operations = [
profileAssert: (profileDoc) => {
assert.eq(profileDoc.docBytesRead, 29);
assert.eq(profileDoc.docUnitsRead, 1);
- assert.eq(profileDoc.idxEntriesRead, 1);
+ assert.eq(profileDoc.idxEntryBytesRead, 3);
+ assert.eq(profileDoc.idxEntryUnitsRead, 1);
assert.eq(profileDoc.docBytesWritten, 0);
assert.eq(profileDoc.docUnitsWritten, 0);
+ assert.eq(profileDoc.idxEntryBytesWritten, 0);
+ assert.eq(profileDoc.idxEntryUnitsWritten, 0);
}
},
resetProfileColl,
@@ -691,9 +781,12 @@ const operations = [
profileAssert: (profileDoc) => {
assert.eq(profileDoc.docBytesRead, 18);
assert.eq(profileDoc.docUnitsRead, 1);
- assert.eq(profileDoc.idxEntriesRead, 0);
+ assert.eq(profileDoc.idxEntryBytesRead, 0);
+ assert.eq(profileDoc.idxEntryUnitsRead, 0);
assert.eq(profileDoc.docBytesWritten, 0);
assert.eq(profileDoc.docUnitsWritten, 0);
+ assert.eq(profileDoc.idxEntryBytesWritten, 0);
+ assert.eq(profileDoc.idxEntryUnitsWritten, 0);
}
},
{
@@ -712,7 +805,8 @@ const operations = [
assert.gte(profileDoc.docBytesRead, 29);
assert.gte(profileDoc.docUnitsRead, 1);
}
- assert.eq(profileDoc.idxEntriesRead, 1);
+ assert.eq(profileDoc.idxEntryBytesRead, 3);
+ assert.eq(profileDoc.idxEntryUnitsRead, 1);
if (isReplSet) {
assert.eq(profileDoc.docBytesWritten, 211);
assert.eq(profileDoc.docUnitsWritten, 3);
@@ -720,6 +814,8 @@ const operations = [
assert.eq(profileDoc.docBytesWritten, 16);
assert.eq(profileDoc.docUnitsWritten, 1);
}
+ assert.eq(profileDoc.idxEntryBytesWritten, 0);
+ assert.eq(profileDoc.idxEntryUnitsWritten, 0);
}
},
{
@@ -740,7 +836,8 @@ const operations = [
assert.gte(profileDoc.docBytesRead, 58);
assert.gte(profileDoc.docUnitsRead, 2);
}
- assert.eq(profileDoc.idxEntriesRead, 1);
+ assert.eq(profileDoc.idxEntryBytesRead, 3);
+ assert.eq(profileDoc.idxEntryUnitsRead, 1);
if (isReplSet) {
assert.eq(profileDoc.docBytesWritten, 177);
assert.eq(profileDoc.docUnitsWritten, 3);
@@ -748,6 +845,8 @@ const operations = [
assert.eq(profileDoc.docBytesWritten, 29);
assert.eq(profileDoc.docUnitsWritten, 1);
}
+ assert.eq(profileDoc.idxEntryBytesWritten, 3);
+ assert.eq(profileDoc.idxEntryUnitsWritten, 1);
}
},
resetProfileColl,
@@ -765,7 +864,8 @@ const operations = [
profileAssert: (profileDoc) => {
assert.eq(profileDoc.docBytesRead, 0);
assert.eq(profileDoc.docUnitsRead, 0);
- assert.eq(profileDoc.idxEntriesRead, 0);
+ assert.eq(profileDoc.idxEntryBytesRead, 0);
+ assert.eq(profileDoc.idxEntryUnitsRead, 0);
if (isReplSet) {
assert.eq(profileDoc.docBytesWritten, 18800);
// Each inserted document counts for 1 document unit plus 2 document units for its
@@ -775,6 +875,8 @@ const operations = [
assert.eq(profileDoc.docBytesWritten, 2900);
assert.eq(profileDoc.docUnitsWritten, 100);
}
+ assert.eq(profileDoc.idxEntryBytesWritten, 299);
+ assert.eq(profileDoc.idxEntryUnitsWritten, 100);
}
},
diff --git a/jstests/noPassthrough/aggregate_operation_metrics.js b/jstests/noPassthrough/aggregate_operation_metrics.js
index c03950986ad..560c6b2b608 100644
--- a/jstests/noPassthrough/aggregate_operation_metrics.js
+++ b/jstests/noPassthrough/aggregate_operation_metrics.js
@@ -20,20 +20,28 @@ rst.startSet();
rst.initiate();
let assertMetricsExist = function(metrics) {
- assert.neq(metrics, undefined);
- let primaryMetrics = metrics.primaryMetrics;
- let secondaryMetrics = metrics.secondaryMetrics;
- [primaryMetrics, secondaryMetrics].forEach((readMetrics) => {
- assert.gte(readMetrics.docBytesRead, 0);
- assert.gte(readMetrics.docUnitsRead, 0);
- assert.gte(readMetrics.idxEntriesRead, 0);
- assert.gte(readMetrics.keysSorted, 0);
- });
-
- assert.gte(metrics.cpuMillis, 0);
- assert.gte(metrics.docBytesWritten, 0);
- assert.gte(metrics.docUnitsWritten, 0);
- assert.gte(metrics.docUnitsReturned, 0);
+ try {
+ assert.neq(metrics, undefined);
+ let primaryMetrics = metrics.primaryMetrics;
+ let secondaryMetrics = metrics.secondaryMetrics;
+ [primaryMetrics, secondaryMetrics].forEach((readMetrics) => {
+ assert.gte(readMetrics.docBytesRead, 0);
+ assert.gte(readMetrics.docUnitsRead, 0);
+ assert.gte(readMetrics.idxEntryBytesRead, 0);
+ assert.gte(readMetrics.idxEntryUnitsRead, 0);
+ assert.gte(readMetrics.keysSorted, 0);
+ assert.gte(readMetrics.docUnitsReturned, 0);
+ });
+
+ assert.gte(metrics.cpuMillis, 0);
+ assert.gte(metrics.docBytesWritten, 0);
+ assert.gte(metrics.docUnitsWritten, 0);
+ assert.gte(metrics.idxEntryBytesWritten, 0);
+ assert.gte(metrics.idxEntryUnitsWritten, 0);
+ } catch (e) {
+ print("caught exception while checking metrics output: " + tojson(metrics));
+ throw e;
+ }
};
let getDBMetrics = (adminDB) => {
diff --git a/src/mongo/db/stats/operation_resource_consumption.idl b/src/mongo/db/stats/operation_resource_consumption.idl
index 63e71cee4f8..e19746fde36 100644
--- a/src/mongo/db/stats/operation_resource_consumption.idl
+++ b/src/mongo/db/stats/operation_resource_consumption.idl
@@ -53,4 +53,14 @@ server_parameters:
cpp_vartype: int32_t
default: 128
validator:
- gte: 1 \ No newline at end of file
+ gte: 1
+
+ indexEntryUnitSizeBytes:
+ description: "The size of an index entry unit for resource consumption metrics collection"
+ set_at:
+ - startup
+ cpp_varname: gIndexEntryUnitSizeBytes
+ cpp_vartype: int32_t
+ default: 16
+ validator:
+ gte: 1
diff --git a/src/mongo/db/stats/resource_consumption_metrics.cpp b/src/mongo/db/stats/resource_consumption_metrics.cpp
index 91f3a6d980b..362ec248703 100644
--- a/src/mongo/db/stats/resource_consumption_metrics.cpp
+++ b/src/mongo/db/stats/resource_consumption_metrics.cpp
@@ -48,11 +48,14 @@ static const char kPrimaryMetrics[] = "primaryMetrics";
static const char kSecondaryMetrics[] = "secondaryMetrics";
static const char kDocBytesRead[] = "docBytesRead";
static const char kDocUnitsRead[] = "docUnitsRead";
-static const char kIdxEntriesRead[] = "idxEntriesRead";
+static const char kIdxEntryBytesRead[] = "idxEntryBytesRead";
+static const char kIdxEntryUnitsRead[] = "idxEntryUnitsRead";
static const char kKeysSorted[] = "keysSorted";
static const char kCpuMillis[] = "cpuMillis";
static const char kDocBytesWritten[] = "docBytesWritten";
static const char kDocUnitsWritten[] = "docUnitsWritten";
+static const char kIdxEntryBytesWritten[] = "idxEntryBytesWritten";
+static const char kIdxEntryUnitsWritten[] = "idxEntryUnitsWritten";
static const char kDocUnitsReturned[] = "docUnitsReturned";
inline void appendNonZeroMetric(BSONObjBuilder* builder, const char* name, long long value) {
@@ -90,8 +93,10 @@ void ResourceConsumption::Metrics::toBson(BSONObjBuilder* builder) const {
BSONObjBuilder primaryBuilder = builder->subobjStart(kPrimaryMetrics);
primaryBuilder.appendNumber(kDocBytesRead, primaryMetrics.docBytesRead);
primaryBuilder.appendNumber(kDocUnitsRead, primaryMetrics.docUnitsRead);
- primaryBuilder.appendNumber(kIdxEntriesRead, primaryMetrics.idxEntriesRead);
+ primaryBuilder.appendNumber(kIdxEntryBytesRead, primaryMetrics.idxEntryBytesRead);
+ primaryBuilder.appendNumber(kIdxEntryUnitsRead, primaryMetrics.idxEntryUnitsRead);
primaryBuilder.appendNumber(kKeysSorted, primaryMetrics.keysSorted);
+ primaryBuilder.appendNumber(kDocUnitsReturned, primaryMetrics.docUnitsReturned);
primaryBuilder.done();
}
@@ -99,15 +104,18 @@ void ResourceConsumption::Metrics::toBson(BSONObjBuilder* builder) const {
BSONObjBuilder secondaryBuilder = builder->subobjStart(kSecondaryMetrics);
secondaryBuilder.appendNumber(kDocBytesRead, secondaryMetrics.docBytesRead);
secondaryBuilder.appendNumber(kDocUnitsRead, secondaryMetrics.docUnitsRead);
- secondaryBuilder.appendNumber(kIdxEntriesRead, secondaryMetrics.idxEntriesRead);
+ secondaryBuilder.appendNumber(kIdxEntryBytesRead, secondaryMetrics.idxEntryBytesRead);
+ secondaryBuilder.appendNumber(kIdxEntryUnitsRead, secondaryMetrics.idxEntryUnitsRead);
secondaryBuilder.appendNumber(kKeysSorted, secondaryMetrics.keysSorted);
+ secondaryBuilder.appendNumber(kDocUnitsReturned, secondaryMetrics.docUnitsReturned);
secondaryBuilder.done();
}
builder->appendNumber(kCpuMillis, cpuMillis);
builder->appendNumber(kDocBytesWritten, docBytesWritten);
builder->appendNumber(kDocUnitsWritten, docUnitsWritten);
- builder->appendNumber(kDocUnitsReturned, docUnitsReturned);
+ builder->appendNumber(kIdxEntryBytesWritten, idxEntryBytesWritten);
+ builder->appendNumber(kIdxEntryUnitsWritten, idxEntryUnitsWritten);
}
void ResourceConsumption::Metrics::toFlatBsonAllFields(BSONObjBuilder* builder) const {
@@ -115,13 +123,16 @@ void ResourceConsumption::Metrics::toFlatBsonAllFields(BSONObjBuilder* builder)
auto readMetrics = primaryMetrics + secondaryMetrics;
builder->appendNumber(kDocBytesRead, readMetrics.docBytesRead);
builder->appendNumber(kDocUnitsRead, readMetrics.docUnitsRead);
- builder->appendNumber(kIdxEntriesRead, readMetrics.idxEntriesRead);
+ builder->appendNumber(kIdxEntryBytesRead, readMetrics.idxEntryBytesRead);
+ builder->appendNumber(kIdxEntryUnitsRead, readMetrics.idxEntryUnitsRead);
builder->appendNumber(kKeysSorted, readMetrics.keysSorted);
+ builder->appendNumber(kDocUnitsReturned, readMetrics.docUnitsReturned);
builder->appendNumber(kCpuMillis, cpuMillis);
builder->appendNumber(kDocBytesWritten, docBytesWritten);
builder->appendNumber(kDocUnitsWritten, docUnitsWritten);
- builder->appendNumber(kDocUnitsReturned, docUnitsReturned);
+ builder->appendNumber(kIdxEntryBytesWritten, idxEntryBytesWritten);
+ builder->appendNumber(kIdxEntryUnitsWritten, idxEntryUnitsWritten);
}
void ResourceConsumption::Metrics::toFlatBsonNonZeroFields(BSONObjBuilder* builder) const {
@@ -129,13 +140,16 @@ void ResourceConsumption::Metrics::toFlatBsonNonZeroFields(BSONObjBuilder* build
auto readMetrics = primaryMetrics + secondaryMetrics;
appendNonZeroMetric(builder, kDocBytesRead, readMetrics.docBytesRead);
appendNonZeroMetric(builder, kDocUnitsRead, readMetrics.docUnitsRead);
- appendNonZeroMetric(builder, kIdxEntriesRead, readMetrics.idxEntriesRead);
+ appendNonZeroMetric(builder, kIdxEntryBytesRead, readMetrics.idxEntryBytesRead);
+ appendNonZeroMetric(builder, kIdxEntryUnitsRead, readMetrics.idxEntryUnitsRead);
appendNonZeroMetric(builder, kKeysSorted, readMetrics.keysSorted);
+ appendNonZeroMetric(builder, kDocUnitsReturned, readMetrics.docUnitsReturned);
appendNonZeroMetric(builder, kCpuMillis, cpuMillis);
appendNonZeroMetric(builder, kDocBytesWritten, docBytesWritten);
appendNonZeroMetric(builder, kDocUnitsWritten, docUnitsWritten);
- appendNonZeroMetric(builder, kDocUnitsReturned, docUnitsReturned);
+ appendNonZeroMetric(builder, kIdxEntryBytesWritten, idxEntryBytesWritten);
+ appendNonZeroMetric(builder, kIdxEntryUnitsWritten, idxEntryUnitsWritten);
}
template <typename Func>
@@ -171,17 +185,27 @@ void ResourceConsumption::MetricsCollector::incrementOneDocRead(OperationContext
});
}
-void ResourceConsumption::MetricsCollector::incrementIdxEntriesRead(OperationContext* opCtx,
- size_t idxEntriesRead) {
- _updateReadMetrics(
- opCtx, [&](ReadMetrics& readMetrics) { readMetrics.idxEntriesRead += idxEntriesRead; });
+void ResourceConsumption::MetricsCollector::incrementOneIdxEntryRead(OperationContext* opCtx,
+ size_t bytesRead) {
+ _updateReadMetrics(opCtx, [&](ReadMetrics& readMetrics) {
+ size_t units = std::ceil(bytesRead / static_cast<float>(gIndexEntryUnitSizeBytes));
+ readMetrics.idxEntryBytesRead += bytesRead;
+ readMetrics.idxEntryUnitsRead += units;
+ });
}
+
void ResourceConsumption::MetricsCollector::incrementKeysSorted(OperationContext* opCtx,
size_t keysSorted) {
_updateReadMetrics(opCtx,
[&](ReadMetrics& readMetrics) { readMetrics.keysSorted += keysSorted; });
}
+void ResourceConsumption::MetricsCollector::incrementDocUnitsReturned(OperationContext* opCtx,
+ size_t returned) {
+ _updateReadMetrics(opCtx,
+ [&](ReadMetrics& readMetrics) { readMetrics.docUnitsReturned += returned; });
+}
+
void ResourceConsumption::MetricsCollector::incrementOneDocWritten(size_t bytesWritten) {
_doIfCollecting([&] {
size_t docUnits = std::ceil(bytesWritten / static_cast<float>(gDocumentUnitSizeBytes));
@@ -190,12 +214,16 @@ void ResourceConsumption::MetricsCollector::incrementOneDocWritten(size_t bytesW
});
}
-void ResourceConsumption::MetricsCollector::incrementCpuMillis(size_t cpuMillis) {
- _doIfCollecting([&] { _metrics.cpuMillis += cpuMillis; });
+void ResourceConsumption::MetricsCollector::incrementOneIdxEntryWritten(size_t bytesWritten) {
+ _doIfCollecting([&] {
+ size_t idxUnits = std::ceil(bytesWritten / static_cast<float>(gIndexEntryUnitSizeBytes));
+ _metrics.idxEntryBytesWritten += bytesWritten;
+ _metrics.idxEntryUnitsWritten += idxUnits;
+ });
}
-void ResourceConsumption::MetricsCollector::incrementDocUnitsReturned(size_t returned) {
- _doIfCollecting([&] { _metrics.docUnitsReturned += returned; });
+void ResourceConsumption::MetricsCollector::incrementCpuMillis(size_t cpuMillis) {
+ _doIfCollecting([&] { _metrics.cpuMillis += cpuMillis; });
}
ResourceConsumption::ScopedMetricsCollector::ScopedMetricsCollector(OperationContext* opCtx,
diff --git a/src/mongo/db/stats/resource_consumption_metrics.h b/src/mongo/db/stats/resource_consumption_metrics.h
index aa2221b6dea..2968619ee1c 100644
--- a/src/mongo/db/stats/resource_consumption_metrics.h
+++ b/src/mongo/db/stats/resource_consumption_metrics.h
@@ -53,8 +53,10 @@ public:
void add(const ReadMetrics& other) {
docBytesRead += other.docBytesRead;
docUnitsRead += other.docUnitsRead;
- idxEntriesRead += other.idxEntriesRead;
+ idxEntryBytesRead += other.idxEntryBytesRead;
+ idxEntryUnitsRead += other.idxEntryUnitsRead;
keysSorted += other.keysSorted;
+ docUnitsReturned += other.docUnitsReturned;
}
ReadMetrics operator+(const ReadMetrics& other) const {
@@ -72,10 +74,14 @@ public:
long long docBytesRead;
// Number of document units read
long long docUnitsRead;
- // Number of index entries read
- long long idxEntriesRead;
+ // Number of index entry bytes read
+ long long idxEntryBytesRead;
+ // Number of index entries units read
+ long long idxEntryUnitsRead;
// Number of keys sorted for query operations
long long keysSorted;
+ // Number of document units returned by a query
+ long long docUnitsReturned;
};
/**
@@ -92,7 +98,8 @@ public:
cpuMillis += other.cpuMillis;
docBytesWritten += other.docBytesWritten;
docUnitsWritten += other.docUnitsWritten;
- docUnitsReturned += other.docUnitsReturned;
+ idxEntryBytesWritten += other.idxEntryBytesWritten;
+ idxEntryUnitsWritten += other.idxEntryUnitsWritten;
};
Metrics& operator+=(const Metrics& other) {
@@ -110,8 +117,10 @@ public:
long long docBytesWritten;
// Number of document units written
long long docUnitsWritten;
- // Number of document units returned by a query.
- long long docUnitsReturned;
+ // Number of index entry bytes written
+ long long idxEntryBytesWritten;
+ // Number of index entry units written
+ long long idxEntryUnitsWritten;
/**
* Reports all metrics on a BSONObjectBuilder. The generated object has nested fields to
@@ -221,8 +230,15 @@ public:
*/
void incrementOneDocRead(OperationContext* opCtx, size_t docBytesRead);
- void incrementIdxEntriesRead(OperationContext* opCtx, size_t idxEntriesRead);
+ /**
+ * This should be called once per index entry read with the number of bytes read for that
+ * entry. This is replication-state aware and increments the metric based on the current
+ * replication state. This is a no-op when metrics collection is disabled on this operation.
+ */
+ void incrementOneIdxEntryRead(OperationContext* opCtx, size_t idxEntryBytesRead);
+
void incrementKeysSorted(OperationContext* opCtx, size_t keysSorted);
+ void incrementDocUnitsReturned(OperationContext* opCtx, size_t docUnitsReturned);
/**
* This should be called once per document written with the number of bytes written for that
@@ -231,8 +247,14 @@ public:
*/
void incrementOneDocWritten(size_t docBytesWritten);
+ /**
+ * This should be called once per index entry written with the number of bytes written for
+ * that entry. This increments the metric independent of replication state, and only when
+ * metrics collection is enabled for this operation.
+ */
+ void incrementOneIdxEntryWritten(size_t idxEntryBytesWritten);
+
void incrementCpuMillis(size_t cpuMillis);
- void incrementDocUnitsReturned(size_t docUnitsReturned);
private:
/**
diff --git a/src/mongo/db/stats/resource_consumption_metrics_test.cpp b/src/mongo/db/stats/resource_consumption_metrics_test.cpp
index 8f34be4f2b3..79f23103a29 100644
--- a/src/mongo/db/stats/resource_consumption_metrics_test.cpp
+++ b/src/mongo/db/stats/resource_consumption_metrics_test.cpp
@@ -56,6 +56,7 @@ public:
ASSERT_OK(getServerParameter("measureOperationResourceConsumption")->setFromString("true"));
gAggregateOperationResourceConsumptionMetrics = true;
gDocumentUnitSizeBytes = 128;
+ gIndexEntryUnitSizeBytes = 16;
auto svcCtx = getServiceContext();
auto replCoord = std::make_unique<repl::ReplicationCoordinatorMock>(svcCtx);
@@ -217,8 +218,9 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetrics) {
ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
operationMetrics.incrementOneDocRead(_opCtx.get(), 2);
- operationMetrics.incrementIdxEntriesRead(_opCtx.get(), 8);
+ operationMetrics.incrementOneIdxEntryRead(_opCtx.get(), 8);
operationMetrics.incrementKeysSorted(_opCtx.get(), 16);
+ operationMetrics.incrementDocUnitsReturned(_opCtx.get(), 32);
}
ASSERT(operationMetrics.hasCollectedMetrics());
@@ -226,8 +228,10 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetrics) {
auto metricsCopy = globalResourceConsumption.getMetrics();
ASSERT_EQ(metricsCopy["db1"].primaryMetrics.docBytesRead, 2);
ASSERT_EQ(metricsCopy["db1"].primaryMetrics.docUnitsRead, 1);
- ASSERT_EQ(metricsCopy["db1"].primaryMetrics.idxEntriesRead, 8);
+ ASSERT_EQ(metricsCopy["db1"].primaryMetrics.idxEntryBytesRead, 8);
+ ASSERT_EQ(metricsCopy["db1"].primaryMetrics.idxEntryUnitsRead, 1);
ASSERT_EQ(metricsCopy["db1"].primaryMetrics.keysSorted, 16);
+ ASSERT_EQ(metricsCopy["db1"].primaryMetrics.docUnitsReturned, 32);
// Clear metrics so we do not double-count.
operationMetrics.reset();
@@ -236,15 +240,18 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetrics) {
ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
operationMetrics.incrementOneDocRead(_opCtx.get(), 32);
- operationMetrics.incrementIdxEntriesRead(_opCtx.get(), 128);
+ operationMetrics.incrementOneIdxEntryRead(_opCtx.get(), 128);
operationMetrics.incrementKeysSorted(_opCtx.get(), 256);
+ operationMetrics.incrementDocUnitsReturned(_opCtx.get(), 512);
}
metricsCopy = globalResourceConsumption.getMetrics();
ASSERT_EQ(metricsCopy["db1"].primaryMetrics.docBytesRead, 2 + 32);
ASSERT_EQ(metricsCopy["db1"].primaryMetrics.docUnitsRead, 2);
- ASSERT_EQ(metricsCopy["db1"].primaryMetrics.idxEntriesRead, 8 + 128);
+ ASSERT_EQ(metricsCopy["db1"].primaryMetrics.idxEntryBytesRead, 8 + 128);
+ ASSERT_EQ(metricsCopy["db1"].primaryMetrics.idxEntryUnitsRead, 1 + 8);
ASSERT_EQ(metricsCopy["db1"].primaryMetrics.keysSorted, 16 + 256);
+ ASSERT_EQ(metricsCopy["db1"].primaryMetrics.docUnitsReturned, 32 + 512);
}
TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetricsSecondary) {
@@ -258,15 +265,18 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetricsSecondary) {
ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
operationMetrics.incrementOneDocRead(_opCtx.get(), 2);
- operationMetrics.incrementIdxEntriesRead(_opCtx.get(), 8);
+ operationMetrics.incrementOneIdxEntryRead(_opCtx.get(), 8);
operationMetrics.incrementKeysSorted(_opCtx.get(), 16);
+ operationMetrics.incrementDocUnitsReturned(_opCtx.get(), 32);
}
auto metricsCopy = globalResourceConsumption.getMetrics();
ASSERT_EQ(metricsCopy["db1"].secondaryMetrics.docBytesRead, 2);
ASSERT_EQ(metricsCopy["db1"].secondaryMetrics.docUnitsRead, 1);
- ASSERT_EQ(metricsCopy["db1"].secondaryMetrics.idxEntriesRead, 8);
+ ASSERT_EQ(metricsCopy["db1"].secondaryMetrics.idxEntryBytesRead, 8);
+ ASSERT_EQ(metricsCopy["db1"].secondaryMetrics.idxEntryUnitsRead, 1);
ASSERT_EQ(metricsCopy["db1"].secondaryMetrics.keysSorted, 16);
+ ASSERT_EQ(metricsCopy["db1"].secondaryMetrics.docUnitsReturned, 32);
// Clear metrics so we do not double-count.
operationMetrics.reset();
@@ -275,15 +285,18 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetricsSecondary) {
ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
operationMetrics.incrementOneDocRead(_opCtx.get(), 32);
- operationMetrics.incrementIdxEntriesRead(_opCtx.get(), 128);
+ operationMetrics.incrementOneIdxEntryRead(_opCtx.get(), 128);
operationMetrics.incrementKeysSorted(_opCtx.get(), 256);
+ operationMetrics.incrementDocUnitsReturned(_opCtx.get(), 512);
}
metricsCopy = globalResourceConsumption.getMetrics();
ASSERT_EQ(metricsCopy["db1"].secondaryMetrics.docBytesRead, 2 + 32);
ASSERT_EQ(metricsCopy["db1"].secondaryMetrics.docUnitsRead, 2);
- ASSERT_EQ(metricsCopy["db1"].secondaryMetrics.idxEntriesRead, 8 + 128);
+ ASSERT_EQ(metricsCopy["db1"].secondaryMetrics.idxEntryBytesRead, 8 + 128);
+ ASSERT_EQ(metricsCopy["db1"].secondaryMetrics.idxEntryUnitsRead, 1 + 8);
ASSERT_EQ(metricsCopy["db1"].secondaryMetrics.keysSorted, 16 + 256);
+ ASSERT_EQ(metricsCopy["db1"].secondaryMetrics.docUnitsReturned, 32 + 512);
}
TEST_F(ResourceConsumptionMetricsTest, DocumentUnitsRead) {
@@ -355,4 +368,102 @@ TEST_F(ResourceConsumptionMetricsTest, DocumentUnitsWritten) {
ASSERT_EQ(metricsCopy["db1"].docBytesWritten, expectedBytes);
ASSERT_EQ(metricsCopy["db1"].docUnitsWritten, expectedUnits);
}
+
+TEST_F(ResourceConsumptionMetricsTest, IdxEntryUnitsRead) {
+ auto& globalResourceConsumption = ResourceConsumption::get(getServiceContext());
+ auto& operationMetrics = ResourceConsumption::MetricsCollector::get(_opCtx.get());
+
+ int expectedBytes = 0;
+ int expectedUnits = 0;
+
+ {
+ ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
+
+ gIndexEntryUnitSizeBytes = 16;
+
+ // Each of these should be counted as 1 document unit.
+ operationMetrics.incrementOneIdxEntryRead(_opCtx.get(), 2);
+ operationMetrics.incrementOneIdxEntryRead(_opCtx.get(), 4);
+ operationMetrics.incrementOneIdxEntryRead(_opCtx.get(), 8);
+ operationMetrics.incrementOneIdxEntryRead(_opCtx.get(), 16);
+ expectedBytes += 2 + 4 + 8 + 16;
+ expectedUnits += 4;
+
+ // Each of these should be counted as 2 document unit.
+ operationMetrics.incrementOneIdxEntryRead(_opCtx.get(), 17);
+ operationMetrics.incrementOneIdxEntryRead(_opCtx.get(), 31);
+ operationMetrics.incrementOneIdxEntryRead(_opCtx.get(), 32);
+ expectedBytes += 17 + 31 + 32;
+ expectedUnits += 6;
+
+ gIndexEntryUnitSizeBytes = 32;
+
+ // Each of these should be counted as 1 document unit.
+ operationMetrics.incrementOneIdxEntryRead(_opCtx.get(), 17);
+ operationMetrics.incrementOneIdxEntryRead(_opCtx.get(), 31);
+ operationMetrics.incrementOneIdxEntryRead(_opCtx.get(), 32);
+ expectedBytes += 17 + 31 + 32;
+ expectedUnits += 3;
+
+ // Each of these should be counted as 2 document units.
+ operationMetrics.incrementOneIdxEntryRead(_opCtx.get(), 33);
+ operationMetrics.incrementOneIdxEntryRead(_opCtx.get(), 63);
+ operationMetrics.incrementOneIdxEntryRead(_opCtx.get(), 64);
+ expectedBytes += 33 + 63 + 64;
+ expectedUnits += 6;
+ }
+
+ auto metricsCopy = globalResourceConsumption.getMetrics();
+ ASSERT_EQ(metricsCopy["db1"].primaryMetrics.idxEntryBytesRead, expectedBytes);
+ ASSERT_EQ(metricsCopy["db1"].primaryMetrics.idxEntryUnitsRead, expectedUnits);
+}
+
+TEST_F(ResourceConsumptionMetricsTest, IdxEntryUnitsWritten) {
+ auto& globalResourceConsumption = ResourceConsumption::get(getServiceContext());
+ auto& operationMetrics = ResourceConsumption::MetricsCollector::get(_opCtx.get());
+
+ int expectedBytes = 0;
+ int expectedUnits = 0;
+
+ {
+ ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
+
+ gIndexEntryUnitSizeBytes = 16;
+
+ // Each of these should be counted as 1 document unit.
+ operationMetrics.incrementOneIdxEntryWritten(2);
+ operationMetrics.incrementOneIdxEntryWritten(4);
+ operationMetrics.incrementOneIdxEntryWritten(8);
+ operationMetrics.incrementOneIdxEntryWritten(16);
+ expectedBytes += 2 + 4 + 8 + 16;
+ expectedUnits += 4;
+
+ // Each of these should be counted as 2 document units.
+ operationMetrics.incrementOneIdxEntryWritten(17);
+ operationMetrics.incrementOneIdxEntryWritten(31);
+ operationMetrics.incrementOneIdxEntryWritten(32);
+ expectedBytes += 17 + 31 + 32;
+ expectedUnits += 6;
+
+ gIndexEntryUnitSizeBytes = 32;
+
+ // Each of these should be counted as 1 document unit.
+ operationMetrics.incrementOneIdxEntryWritten(17);
+ operationMetrics.incrementOneIdxEntryWritten(31);
+ operationMetrics.incrementOneIdxEntryWritten(32);
+ expectedBytes += 17 + 31 + 32;
+ expectedUnits += 3;
+
+ // Each of these should be counted as 2 document units.
+ operationMetrics.incrementOneIdxEntryWritten(33);
+ operationMetrics.incrementOneIdxEntryWritten(63);
+ operationMetrics.incrementOneIdxEntryWritten(64);
+ expectedBytes += 33 + 63 + 64;
+ expectedUnits += 6;
+ }
+
+ auto metricsCopy = globalResourceConsumption.getMetrics();
+ ASSERT_EQ(metricsCopy["db1"].idxEntryBytesWritten, expectedBytes);
+ ASSERT_EQ(metricsCopy["db1"].idxEntryUnitsWritten, expectedUnits);
+}
} // namespace mongo
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
index 57ea7db4453..7c0aef75b35 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
@@ -117,7 +117,7 @@ void WiredTigerIndex::getKey(OperationContext* opCtx, WT_CURSOR* cursor, WT_ITEM
}
auto& metricsCollector = ResourceConsumption::MetricsCollector::get(opCtx);
- metricsCollector.incrementIdxEntriesRead(opCtx, 1);
+ metricsCollector.incrementOneIdxEntryRead(opCtx, key->size);
}
// static
@@ -666,6 +666,9 @@ public:
invariantWTOK(wiredTigerCursorInsert(_opCtx, _cursor));
+ auto& metricsCollector = ResourceConsumption::MetricsCollector::get(_opCtx);
+ metricsCollector.incrementOneIdxEntryWritten(item.size);
+
return Status::OK();
}
@@ -754,6 +757,9 @@ private:
invariantWTOK(wiredTigerCursorInsert(_opCtx, _cursor));
+ auto& metricsCollector = ResourceConsumption::MetricsCollector::get(_opCtx);
+ metricsCollector.incrementOneIdxEntryWritten(keyItem.size);
+
// Don't copy the key again if dups are allowed.
if (!_dupsAllowed)
_previousKeyString.resetFromBuffer(newKeyString.getBuffer(), newKeyString.getSize());
@@ -817,6 +823,8 @@ private:
_cursor->set_value(_cursor, valueItem.Get());
invariantWTOK(wiredTigerCursorInsert(_opCtx, _cursor));
+ auto& metricsCollector = ResourceConsumption::MetricsCollector::get(_opCtx);
+ metricsCollector.incrementOneIdxEntryWritten(keyItem.size);
_records.clear();
}
@@ -1041,7 +1049,7 @@ protected:
}
auto& metricsCollector = ResourceConsumption::MetricsCollector::get(_opCtx);
- metricsCollector.incrementIdxEntriesRead(_opCtx, 1);
+ metricsCollector.incrementOneIdxEntryRead(_opCtx, key->size);
}
bool hasWrongPrefix(WT_CURSOR* cursor) {
@@ -1526,6 +1534,11 @@ Status WiredTigerIndexUnique::_insertTimestampUnsafe(OperationContext* opCtx,
c->set_value(c, valueItem.Get());
int ret = WT_OP_CHECK(wiredTigerCursorInsert(opCtx, c));
+ // Account for the first insert attempt, but do not attempt to account for the complexity of
+ // any subsequent writes.
+ auto& metricsCollector = ResourceConsumption::MetricsCollector::get(opCtx);
+ metricsCollector.incrementOneIdxEntryWritten(keyItem.size);
+
if (ret != WT_DUPLICATE_KEY) {
if (ret == 0) {
return Status::OK();
@@ -1653,6 +1666,11 @@ Status WiredTigerIndexUnique::_insertTimestampSafe(OperationContext* opCtx,
c->set_value(c, valueItem.Get());
ret = WT_OP_CHECK(wiredTigerCursorInsert(opCtx, c));
+ // Account for the actual key insertion, but do not attempt account for the complexity of any
+ // previous duplicate key detection, which may perform writes.
+ auto& metricsCollector = ResourceConsumption::MetricsCollector::get(opCtx);
+ metricsCollector.incrementOneIdxEntryWritten(keyItem.size);
+
// It is possible that this key is already present during a concurrent background index build.
if (ret != WT_DUPLICATE_KEY)
invariantWTOK(ret);
@@ -1715,6 +1733,12 @@ void WiredTigerIndexUnique::_unindexTimestampUnsafe(OperationContext* opCtx,
fassert(40417, !br.remaining());
}
int ret = WT_OP_CHECK(wiredTigerCursorRemove(opCtx, c));
+
+ // Only account for the actual key removal, but do not attempt account for the complexity
+ // of any previous or subsequent writes.
+ auto& metricsCollector = ResourceConsumption::MetricsCollector::get(opCtx);
+ metricsCollector.incrementOneIdxEntryWritten(keyItem.size);
+
if (ret == WT_NOTFOUND) {
triggerWriteConflictAtPoint(c);
return;
@@ -1796,6 +1820,12 @@ void WiredTigerIndexUnique::_unindexTimestampSafe(OperationContext* opCtx,
WiredTigerItem item(keyString.getBuffer(), keyString.getSize());
setKey(c, item.Get());
int ret = WT_OP_CHECK(wiredTigerCursorRemove(opCtx, c));
+
+ // Account for the first removal attempt, but do not attempt to account for the complexity of
+ // any subsequent removals and insertions when the index's keys are not fully-upgraded.
+ auto& metricsCollector = ResourceConsumption::MetricsCollector::get(opCtx);
+ metricsCollector.incrementOneIdxEntryWritten(item.size);
+
if (ret != WT_NOTFOUND) {
invariantWTOK(ret);
return;
@@ -1863,6 +1893,9 @@ Status WiredTigerIndexStandard::_insert(OperationContext* opCtx,
c->set_value(c, valueItem.Get());
int ret = WT_OP_CHECK(wiredTigerCursorInsert(opCtx, c));
+ auto& metricsCollector = ResourceConsumption::MetricsCollector::get(opCtx);
+ metricsCollector.incrementOneIdxEntryWritten(keyItem.size);
+
// If the record was already in the index, we just return OK.
// This can happen, for example, when building a background index while documents are being
// written and reindexed.
@@ -1880,6 +1913,10 @@ void WiredTigerIndexStandard::_unindex(OperationContext* opCtx,
WiredTigerItem item(keyString.getBuffer(), keyString.getSize());
setKey(c, item.Get());
int ret = WT_OP_CHECK(wiredTigerCursorRemove(opCtx, c));
+
+ auto& metricsCollector = ResourceConsumption::MetricsCollector::get(opCtx);
+ metricsCollector.incrementOneIdxEntryWritten(item.size);
+
if (ret != WT_NOTFOUND) {
invariantWTOK(ret);
} else {