summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEric Milkie <milkie@10gen.com>2021-05-03 14:22:15 -0400
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-05-03 22:13:32 +0000
commita58447691485ec04d39d7b9d42dc28cd6900da6c (patch)
tree88a9360a4bcaea1fc5562841bc082e59a60711b2
parent48531fdbd58769200b279ed5e31c2111376903ac (diff)
downloadmongo-a58447691485ec04d39d7b9d42dc28cd6900da6c.tar.gz
SERVER-55556 add new totalUnitsWritten metric; add new localTime field to operationMetrics outputr4.9.0-rc1
totalUnitsWritten is a metric that represents the number of bytes written to a document plus any index entries that follow, prior to writing another document; these bytes are then translated into Units as per the totalUnitWriteSizeBytes parameter. Additionally, a new field localTime will now appear in every BSONArray (per database) included in the $operationMetrics aggregation stage (cherry picked from commit 960f5deb14520af3076c9164fbf8b3cbcca0560f)
-rw-r--r--jstests/hooks/run_aggregate_metrics_background.js1
-rw-r--r--jstests/noPassthrough/aggregate_operation_metrics.js5
-rw-r--r--jstests/noPassthrough/change_stream_operation_metrics.js10
-rw-r--r--jstests/noPassthrough/initial_sync_operation_metrics.js4
-rw-r--r--jstests/noPassthrough/profile_operation_metrics.js52
-rw-r--r--jstests/noPassthrough/ttl_operation_metrics.js4
-rw-r--r--src/mongo/db/pipeline/document_source_operation_metrics.cpp3
-rw-r--r--src/mongo/db/stats/operation_resource_consumption.idl10
-rw-r--r--src/mongo/db/stats/resource_consumption_metrics.cpp44
-rw-r--r--src/mongo/db/stats/resource_consumption_metrics.h32
-rw-r--r--src/mongo/db/stats/resource_consumption_metrics_test.cpp61
11 files changed, 220 insertions, 6 deletions
diff --git a/jstests/hooks/run_aggregate_metrics_background.js b/jstests/hooks/run_aggregate_metrics_background.js
index e6d53b6a6f0..32335852fcd 100644
--- a/jstests/hooks/run_aggregate_metrics_background.js
+++ b/jstests/hooks/run_aggregate_metrics_background.js
@@ -25,6 +25,7 @@ const aggregateMetricsBackground = function(host) {
"docUnitsWritten",
"idxEntryBytesWritten",
"idxEntryUnitsWritten",
+ "totalUnitsWritten",
"cpuNanos",
"db",
"primaryMetrics",
diff --git a/jstests/noPassthrough/aggregate_operation_metrics.js b/jstests/noPassthrough/aggregate_operation_metrics.js
index 3305daf391b..57b82832ac3 100644
--- a/jstests/noPassthrough/aggregate_operation_metrics.js
+++ b/jstests/noPassthrough/aggregate_operation_metrics.js
@@ -21,6 +21,8 @@ const isLinux = getBuildInfo().buildEnvironment.target_os == "linux";
let assertMetricsExist = function(metrics) {
try {
assert.neq(metrics, undefined);
+ assert(metrics.hasOwnProperty("db"));
+ assert(metrics.hasOwnProperty("localTime"));
let primaryMetrics = metrics.primaryMetrics;
let secondaryMetrics = metrics.secondaryMetrics;
[primaryMetrics, secondaryMetrics].forEach((readMetrics) => {
@@ -39,6 +41,7 @@ let assertMetricsExist = function(metrics) {
assert.gte(metrics.docUnitsWritten, 0);
assert.gte(metrics.idxEntryBytesWritten, 0);
assert.gte(metrics.idxEntryUnitsWritten, 0);
+ assert.gte(metrics.totalUnitsWritten, 0);
} catch (e) {
print("caught exception while checking metrics output: " + tojson(metrics));
throw e;
@@ -250,4 +253,4 @@ const secondary = rst.getSecondary();
});
rst.stopSet();
-}()); \ No newline at end of file
+}());
diff --git a/jstests/noPassthrough/change_stream_operation_metrics.js b/jstests/noPassthrough/change_stream_operation_metrics.js
index a4f64917baf..bf7c75a9e24 100644
--- a/jstests/noPassthrough/change_stream_operation_metrics.js
+++ b/jstests/noPassthrough/change_stream_operation_metrics.js
@@ -81,6 +81,12 @@ assert.commandWorked(primaryDB.createCollection(collName));
assert.eq(metrics[dbName].docBytesWritten, 29 * nDocs);
assert.eq(metrics[dbName].docUnitsWritten, nDocs);
+ // With batch inserts, the index updates are all performed together after all the documents
+ // are inserted, so this has the effect of associating all the index bytes for the batch
+ // with one document, for the purposes of totalUnitsWritten. This effect causes the last
+ // document to have 3 units instead of 1 like the first 99.
+ assert.eq(metrics[dbName].totalUnitsWritten, nDocs + 2);
+
// The inserted keys will vary in size from 2 to 4 bytes depending on their value. Assert
// that the number of bytes fall within that range.
assert.gt(metrics[dbName].idxEntryBytesWritten, 2 * nDocs);
@@ -133,6 +139,7 @@ let nextId = nDocs;
assert.eq(metrics[dbName].docUnitsWritten, 1);
assert.eq(metrics[dbName].idxEntryBytesWritten, 3);
assert.eq(metrics[dbName].idxEntryUnitsWritten, 1);
+ assert.eq(metrics[dbName].totalUnitsWritten, 1);
assert.eq(metrics[dbName].primaryMetrics.docBytesRead, 0);
assert.eq(metrics[dbName].primaryMetrics.docUnitsRead, 0);
assert.eq(metrics[dbName].primaryMetrics.cursorSeeks, 0);
@@ -166,6 +173,7 @@ let nextId = nDocs;
assertMetrics(primary, (metrics) => {
assert.eq(metrics[dbName].docBytesWritten, 40);
assert.eq(metrics[dbName].docUnitsWritten, 1);
+ assert.eq(metrics[dbName].totalUnitsWritten, 1);
assert.eq(metrics[dbName].primaryMetrics.docBytesRead, 29);
assert.eq(metrics[dbName].primaryMetrics.docUnitsRead, 1);
assert.eq(metrics[dbName].primaryMetrics.idxEntryBytesRead, 3);
@@ -285,4 +293,4 @@ let nextId = nDocs;
});
})();
rst.stopSet();
-}()); \ No newline at end of file
+}());
diff --git a/jstests/noPassthrough/initial_sync_operation_metrics.js b/jstests/noPassthrough/initial_sync_operation_metrics.js
index 2a4d7452080..c144780ad68 100644
--- a/jstests/noPassthrough/initial_sync_operation_metrics.js
+++ b/jstests/noPassthrough/initial_sync_operation_metrics.js
@@ -44,6 +44,8 @@ const getDBMetrics = (adminDB) => {
let allMetrics = {};
while (cursor.hasNext()) {
let doc = cursor.next();
+ // Remove localTime field as it stymies us from comparing objects since it always changes.
+ delete doc.localTime;
allMetrics[doc.db] = doc;
}
return allMetrics;
@@ -94,4 +96,4 @@ replSet.awaitReplication();
}
replSet.stopSet();
-})(); \ No newline at end of file
+})();
diff --git a/jstests/noPassthrough/profile_operation_metrics.js b/jstests/noPassthrough/profile_operation_metrics.js
index 0ed01728a91..d905d89ebac 100644
--- a/jstests/noPassthrough/profile_operation_metrics.js
+++ b/jstests/noPassthrough/profile_operation_metrics.js
@@ -46,6 +46,7 @@ const assertMetricsExist = (profilerEntry) => {
assert.gte(metrics.docUnitsWritten, 0);
assert.gte(metrics.idxEntryBytesWritten, 0);
assert.gte(metrics.idxEntryUnitsWritten, 0);
+ assert.gte(metrics.totalUnitsWritten, 0);
};
const runInLegacyQueryMode = (db, func) => {
@@ -96,6 +97,7 @@ const operations = [
assert.gt(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.gt(profileDoc.totalUnitsWritten, 0);
assert.gt(profileDoc.cursorSeeks, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
@@ -120,6 +122,7 @@ const operations = [
assert.gt(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.gt(profileDoc.totalUnitsWritten, 0);
assert.gt(profileDoc.cursorSeeks, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
@@ -144,6 +147,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.totalUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 0);
@@ -166,6 +170,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 1);
assert.eq(profileDoc.idxEntryBytesWritten, 7);
assert.eq(profileDoc.idxEntryUnitsWritten, 2);
+ assert.eq(profileDoc.totalUnitsWritten, 1);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 0);
@@ -199,6 +204,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.totalUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 1);
@@ -221,6 +227,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.totalUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 1);
@@ -243,6 +250,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.totalUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 1);
@@ -265,6 +273,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.totalUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 0);
@@ -297,6 +306,7 @@ const operations = [
// Deletes one index entry and writes another.
assert.eq(profileDoc.idxEntryBytesWritten, 9);
assert.eq(profileDoc.idxEntryUnitsWritten, 2);
+ assert.eq(profileDoc.totalUnitsWritten, 1);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 1);
@@ -329,6 +339,7 @@ const operations = [
// Deletes one index entry and writes another.
assert.eq(profileDoc.idxEntryBytesWritten, 10);
assert.eq(profileDoc.idxEntryUnitsWritten, 2);
+ assert.eq(profileDoc.totalUnitsWritten, 1);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 0);
@@ -370,6 +381,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.totalUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 0);
@@ -413,6 +425,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.totalUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 0);
@@ -435,6 +448,7 @@ const operations = [
assert.gt(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.gt(profileDoc.totalUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 0);
@@ -469,6 +483,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.totalUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 1);
@@ -500,6 +515,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 1);
assert.eq(profileDoc.idxEntryBytesWritten, 3);
assert.eq(profileDoc.idxEntryUnitsWritten, 1);
+ assert.eq(profileDoc.totalUnitsWritten, 1);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 1);
@@ -528,6 +544,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 1);
assert.eq(profileDoc.idxEntryBytesWritten, 3);
assert.eq(profileDoc.idxEntryUnitsWritten, 1);
+ assert.eq(profileDoc.totalUnitsWritten, 1);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 0);
@@ -556,6 +573,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 1);
assert.eq(profileDoc.idxEntryBytesWritten, 3);
assert.eq(profileDoc.idxEntryUnitsWritten, 1);
+ assert.eq(profileDoc.totalUnitsWritten, 1);
assert.eq(profileDoc.docUnitsReturned, 0);
}
},
@@ -576,6 +594,7 @@ const operations = [
assert.gt(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.gt(profileDoc.totalUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 0);
@@ -604,6 +623,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.totalUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 5);
@@ -628,6 +648,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.totalUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 150);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 10);
@@ -653,6 +674,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.totalUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 0);
@@ -678,6 +700,7 @@ const operations = [
// Deletes one entry and writes another.
assert.eq(profileDoc.idxEntryBytesWritten, 10);
assert.eq(profileDoc.idxEntryUnitsWritten, 2);
+ assert.eq(profileDoc.totalUnitsWritten, 1);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 0);
@@ -707,6 +730,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 1);
assert.eq(profileDoc.idxEntryBytesWritten, 4);
assert.eq(profileDoc.idxEntryUnitsWritten, 1);
+ assert.eq(profileDoc.totalUnitsWritten, 1);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 0);
@@ -745,6 +769,7 @@ const operations = [
// Removes one entry and inserts another.
assert.eq(profileDoc.idxEntryBytesWritten, 11);
assert.eq(profileDoc.idxEntryUnitsWritten, 2);
+ assert.eq(profileDoc.totalUnitsWritten, 1);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 0);
@@ -782,9 +807,11 @@ const operations = [
// comment about WT_MODIFY above.
assert.eq(profileDoc.docBytesWritten, 13);
assert.eq(profileDoc.docUnitsWritten, 1);
+ assert.eq(profileDoc.totalUnitsWritten, 1);
} else {
assert.eq(profileDoc.docBytesWritten, 1061);
assert.eq(profileDoc.docUnitsWritten, 9);
+ assert.eq(profileDoc.totalUnitsWritten, 9);
}
// Removes one entry and inserts another.
assert.eq(profileDoc.idxEntryBytesWritten, 10);
@@ -826,6 +853,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 1);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.totalUnitsWritten, 1);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 0);
@@ -852,6 +880,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 1);
assert.eq(profileDoc.idxEntryBytesWritten, 3);
assert.eq(profileDoc.idxEntryUnitsWritten, 1);
+ assert.eq(profileDoc.totalUnitsWritten, 1);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 0);
@@ -875,6 +904,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.totalUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 1);
@@ -908,6 +938,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.totalUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 1);
@@ -937,6 +968,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 1);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.totalUnitsWritten, 1);
assert.eq(profileDoc.docUnitsReturned, 0);
}
},
@@ -964,6 +996,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 1);
assert.eq(profileDoc.idxEntryBytesWritten, 3);
assert.eq(profileDoc.idxEntryUnitsWritten, 1);
+ assert.eq(profileDoc.totalUnitsWritten, 1);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 0);
@@ -992,6 +1025,11 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 100);
assert.eq(profileDoc.idxEntryBytesWritten, 299);
assert.eq(profileDoc.idxEntryUnitsWritten, 100);
+ // This is 102 instead of 100 because all of the index bytes for the batch insert are
+ // lumped together and associated with the last document written in the batch, instead
+ // of being associated with each document written. This causes the last document+index
+ // bytes to exceed the unit size.
+ assert.eq(profileDoc.totalUnitsWritten, 102);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 0);
@@ -1016,6 +1054,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.totalUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 100);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 100);
@@ -1040,6 +1079,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.totalUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 100);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 100);
@@ -1062,6 +1102,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.totalUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 100);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 100);
@@ -1085,6 +1126,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.totalUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 100);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 1);
@@ -1108,6 +1150,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.totalUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 100);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 5);
@@ -1143,6 +1186,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.totalUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 100);
assert.eq(profileDoc.sorterSpills, 101);
assert.eq(profileDoc.docUnitsReturned, 100);
@@ -1167,6 +1211,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.totalUnitsWritten, 0);
if (isDebugBuild(db)) {
// In debug builds we sort and spill for each of the first 20 documents. Once we
// reach that limit, we stop spilling as often. This 26 is the sum of 20 debug sorts
@@ -1202,6 +1247,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.totalUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 100);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 10);
@@ -1226,6 +1272,7 @@ const operations = [
// The key size varies from 2 to 3 bytes.
assert.gte(profileDoc.idxEntryBytesWritten, 2 * 100);
assert.eq(profileDoc.idxEntryUnitsWritten, 100);
+ assert.gte(profileDoc.totalUnitsWritten, 100);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 0);
@@ -1251,6 +1298,7 @@ const operations = [
// The key size varies from 2 to 3 bytes.
assert.gte(profileDoc.idxEntryBytesWritten, 2 * 100);
assert.eq(profileDoc.idxEntryUnitsWritten, 100);
+ assert.gte(profileDoc.totalUnitsWritten, 100);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 0);
@@ -1275,6 +1323,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 1);
assert.eq(profileDoc.idxEntryBytesWritten, 2);
assert.eq(profileDoc.idxEntryUnitsWritten, 1);
+ assert.eq(profileDoc.totalUnitsWritten, 1);
assert.eq(profileDoc.docUnitsReturned, 0);
}
},
@@ -1300,6 +1349,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 9);
assert.eq(profileDoc.idxEntryBytesWritten, 27);
assert.eq(profileDoc.idxEntryUnitsWritten, 9);
+ assert.eq(profileDoc.totalUnitsWritten, 9);
assert.eq(profileDoc.docUnitsReturned, 0);
}
},
@@ -1329,6 +1379,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 2);
assert.eq(profileDoc.idxEntryBytesWritten, 5);
assert.eq(profileDoc.idxEntryUnitsWritten, 2);
+ assert.eq(profileDoc.totalUnitsWritten, 2);
assert.eq(profileDoc.docUnitsReturned, 0);
}
},
@@ -1363,6 +1414,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 18);
assert.eq(profileDoc.idxEntryBytesWritten, 54);
assert.eq(profileDoc.idxEntryUnitsWritten, 18);
+ assert.eq(profileDoc.totalUnitsWritten, 18);
assert.eq(profileDoc.docUnitsReturned, 0);
}
}
diff --git a/jstests/noPassthrough/ttl_operation_metrics.js b/jstests/noPassthrough/ttl_operation_metrics.js
index 5c25eca6744..eb63c02e3bb 100644
--- a/jstests/noPassthrough/ttl_operation_metrics.js
+++ b/jstests/noPassthrough/ttl_operation_metrics.js
@@ -88,6 +88,7 @@ assertMetrics(primary, (metrics) => {
// Document size is 29 bytes.
assert.gte(metrics[dbName].docBytesWritten, 29 * 3);
assert.gte(metrics[dbName].docUnitsWritten, 3);
+ assert.gte(metrics[dbName].totalUnitsWritten, 3);
});
// Clear metrics and wait for a TTL pass to delete the documents.
@@ -104,6 +105,7 @@ assertMetrics(primary, (metrics) => {
assert.gte(metrics[dbName].primaryMetrics.docUnitsRead, 2);
assert.gte(metrics[dbName].docBytesWritten, 29 * 2);
assert.gte(metrics[dbName].docUnitsWritten, 2);
+ assert.gte(metrics[dbName].totalUnitsWritten, 2);
// Key size is 12 bytes.
assert.gte(metrics[dbName].primaryMetrics.idxEntryBytesRead, 12 * 2);
assert.gte(metrics[dbName].primaryMetrics.idxEntryUnitsRead, 2);
@@ -124,4 +126,4 @@ assert.eq(primaryDB[collName].count({}), 1);
assert.eq(secondaryDB[collName].count({}), 1);
rst.stopSet();
-}()); \ No newline at end of file
+}());
diff --git a/src/mongo/db/pipeline/document_source_operation_metrics.cpp b/src/mongo/db/pipeline/document_source_operation_metrics.cpp
index 5855894bf3b..548a7b3679b 100644
--- a/src/mongo/db/pipeline/document_source_operation_metrics.cpp
+++ b/src/mongo/db/pipeline/document_source_operation_metrics.cpp
@@ -53,6 +53,7 @@ const char* DocumentSourceOperationMetrics::getSourceName() const {
namespace {
static constexpr StringData kClearMetrics = "clearMetrics"_sd;
static constexpr StringData kDatabaseName = "db"_sd;
+static constexpr StringData kLocalTimeFieldName = "localTime"_sd;
} // namespace
DocumentSource::GetNextResult DocumentSourceOperationMetrics::doGetNext() {
@@ -63,9 +64,11 @@ DocumentSource::GetNextResult DocumentSourceOperationMetrics::doGetNext() {
}
return ResourceConsumption::get(pExpCtx->opCtx).getDbMetrics();
}();
+ auto localTime = jsTime(); // fetch current time to include in all metrics documents
for (auto& [dbName, metrics] : dbMetrics) {
BSONObjBuilder builder;
builder.append(kDatabaseName, dbName);
+ builder.appendDate(kLocalTimeFieldName, localTime);
metrics.toBson(&builder);
_operationMetrics.push_back(builder.obj());
}
diff --git a/src/mongo/db/stats/operation_resource_consumption.idl b/src/mongo/db/stats/operation_resource_consumption.idl
index 051b0fbcaed..4eaf36c549a 100644
--- a/src/mongo/db/stats/operation_resource_consumption.idl
+++ b/src/mongo/db/stats/operation_resource_consumption.idl
@@ -65,3 +65,13 @@ server_parameters:
default: 16
validator:
gte: 1
+
+ totalUnitWriteSizeBytes:
+ description: "The size of a (doc + index) unit in written bytes for resource consumption metrics collection"
+ set_at:
+ - startup
+ cpp_varname: gTotalUnitWriteSizeBytes
+ cpp_vartype: int32_t
+ default: 128
+ validator:
+ gte: 1
diff --git a/src/mongo/db/stats/resource_consumption_metrics.cpp b/src/mongo/db/stats/resource_consumption_metrics.cpp
index fbc53138f3c..f5735150cd3 100644
--- a/src/mongo/db/stats/resource_consumption_metrics.cpp
+++ b/src/mongo/db/stats/resource_consumption_metrics.cpp
@@ -56,6 +56,7 @@ static const char kIdxEntryBytesRead[] = "idxEntryBytesRead";
static const char kIdxEntryBytesWritten[] = "idxEntryBytesWritten";
static const char kIdxEntryUnitsRead[] = "idxEntryUnitsRead";
static const char kIdxEntryUnitsWritten[] = "idxEntryUnitsWritten";
+static const char kTotalUnitsWritten[] = "totalUnitsWritten";
static const char kKeysSorted[] = "keysSorted";
static const char kMemUsage[] = "memUsage";
static const char kNumMetrics[] = "numMetrics";
@@ -125,6 +126,33 @@ void ResourceConsumption::UnitCounter::observeOne(size_t datumBytes) {
_bytes += datumBytes;
}
+void ResourceConsumption::TotalUnitWriteCounter::observeOneDocument(size_t datumBytes) {
+ // If we have accumulated document bytes, calculate units along with any past index bytes.
+ // Accumulate the current document bytes for use in a later unit calculation.
+ if (_accumulatedDocumentBytes > 0) {
+ _units += std::ceil((_accumulatedIndexBytes + _accumulatedDocumentBytes) /
+ static_cast<float>(unitSize()));
+ _accumulatedIndexBytes = 0;
+ _accumulatedDocumentBytes = datumBytes;
+ return;
+ }
+
+ // If we have accumulated index bytes, associate them with the current document and calculate
+ // units.
+ if (_accumulatedIndexBytes > 0) {
+ _units += std::ceil((_accumulatedIndexBytes + datumBytes) / static_cast<float>(unitSize()));
+ _accumulatedIndexBytes = 0;
+ return;
+ }
+
+ // Nothing has yet accumulated; accumulate this document for use in a later unit calculation.
+ _accumulatedDocumentBytes = datumBytes;
+}
+
+void ResourceConsumption::TotalUnitWriteCounter::observeOneIndexEntry(size_t datumBytes) {
+ _accumulatedIndexBytes += datumBytes;
+}
+
int ResourceConsumption::DocumentUnitCounter::unitSize() const {
return gDocumentUnitSizeBytes;
}
@@ -133,6 +161,10 @@ int ResourceConsumption::IdxEntryUnitCounter::unitSize() const {
return gIndexEntryUnitSizeBytes;
}
+int ResourceConsumption::TotalUnitWriteCounter::unitSize() const {
+ return gTotalUnitWriteSizeBytes;
+}
+
void ResourceConsumption::ReadMetrics::toBson(BSONObjBuilder* builder) const {
builder->appendNumber(kDocBytesRead, docsRead.bytes());
builder->appendNumber(kDocUnitsRead, docsRead.units());
@@ -149,6 +181,7 @@ void ResourceConsumption::WriteMetrics::toBson(BSONObjBuilder* builder) const {
builder->appendNumber(kDocUnitsWritten, docsWritten.units());
builder->appendNumber(kIdxEntryBytesWritten, idxEntriesWritten.bytes());
builder->appendNumber(kIdxEntryUnitsWritten, idxEntriesWritten.units());
+ builder->appendNumber(kTotalUnitsWritten, totalWritten.units());
}
void ResourceConsumption::AggregatedMetrics::toBson(BSONObjBuilder* builder) const {
@@ -193,6 +226,7 @@ void ResourceConsumption::OperationMetrics::toBsonNonZeroFields(BSONObjBuilder*
appendNonZeroMetric(builder, kDocUnitsWritten, writeMetrics.docsWritten.units());
appendNonZeroMetric(builder, kIdxEntryBytesWritten, writeMetrics.idxEntriesWritten.bytes());
appendNonZeroMetric(builder, kIdxEntryUnitsWritten, writeMetrics.idxEntriesWritten.units());
+ appendNonZeroMetric(builder, kTotalUnitsWritten, writeMetrics.totalWritten.units());
}
template <typename Func>
@@ -225,11 +259,17 @@ void ResourceConsumption::MetricsCollector::incrementDocUnitsReturned(
}
void ResourceConsumption::MetricsCollector::incrementOneDocWritten(size_t bytesWritten) {
- _doIfCollecting([&] { _metrics.writeMetrics.docsWritten.observeOne(bytesWritten); });
+ _doIfCollecting([&] {
+ _metrics.writeMetrics.docsWritten.observeOne(bytesWritten);
+ _metrics.writeMetrics.totalWritten.observeOneDocument(bytesWritten);
+ });
}
void ResourceConsumption::MetricsCollector::incrementOneIdxEntryWritten(size_t bytesWritten) {
- _doIfCollecting([&] { _metrics.writeMetrics.idxEntriesWritten.observeOne(bytesWritten); });
+ _doIfCollecting([&] {
+ _metrics.writeMetrics.idxEntriesWritten.observeOne(bytesWritten);
+ _metrics.writeMetrics.totalWritten.observeOneIndexEntry(bytesWritten);
+ });
}
void ResourceConsumption::MetricsCollector::beginScopedCollecting(OperationContext* opCtx,
diff --git a/src/mongo/db/stats/resource_consumption_metrics.h b/src/mongo/db/stats/resource_consumption_metrics.h
index 5e098b5c730..4aef396e6b6 100644
--- a/src/mongo/db/stats/resource_consumption_metrics.h
+++ b/src/mongo/db/stats/resource_consumption_metrics.h
@@ -113,6 +113,35 @@ public:
int unitSize() const final;
};
+ /** TotalUnitWriteCounter records the number of units of document plus associated indexes
+ * observed. */
+ class TotalUnitWriteCounter {
+ public:
+ void observeOneDocument(size_t datumBytes);
+ void observeOneIndexEntry(size_t datumBytes);
+
+ TotalUnitWriteCounter& operator+=(TotalUnitWriteCounter other) {
+ // Flush the accumulators, in case there is anything still pending.
+ other.observeOneDocument(0);
+ observeOneDocument(0);
+ _units += other._units;
+ return *this;
+ }
+
+ long long units() const {
+ // Flush the accumulators, in case there is anything still pending.
+ TotalUnitWriteCounter copy(*this);
+ copy.observeOneDocument(0);
+ return copy._units;
+ }
+
+ private:
+ int unitSize() const;
+ long long _accumulatedDocumentBytes = 0;
+ long long _accumulatedIndexBytes = 0;
+ long long _units = 0;
+ };
+
/** ReadMetrics maintains metrics for read operations. */
class ReadMetrics {
public:
@@ -158,6 +187,7 @@ public:
void add(const WriteMetrics& other) {
docsWritten += other.docsWritten;
idxEntriesWritten += other.idxEntriesWritten;
+ totalWritten += other.totalWritten;
}
WriteMetrics& operator+=(const WriteMetrics& other) {
@@ -174,6 +204,8 @@ public:
DocumentUnitCounter docsWritten;
// Number of index entries written
IdxEntryUnitCounter idxEntriesWritten;
+ // Number of total units written
+ TotalUnitWriteCounter totalWritten;
};
/**
diff --git a/src/mongo/db/stats/resource_consumption_metrics_test.cpp b/src/mongo/db/stats/resource_consumption_metrics_test.cpp
index 4598e411b71..8c7d8a756ef 100644
--- a/src/mongo/db/stats/resource_consumption_metrics_test.cpp
+++ b/src/mongo/db/stats/resource_consumption_metrics_test.cpp
@@ -492,6 +492,67 @@ TEST_F(ResourceConsumptionMetricsTest, DocumentUnitsWritten) {
ASSERT_EQ(metricsCopy["db1"].writeMetrics.docsWritten.units(), expectedUnits);
}
+TEST_F(ResourceConsumptionMetricsTest, TotalUnitsWritten) {
+ auto& globalResourceConsumption = ResourceConsumption::get(getServiceContext());
+ auto& operationMetrics = ResourceConsumption::MetricsCollector::get(_opCtx.get());
+
+ int expectedUnits = 0;
+
+ {
+ ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
+
+ // Each of these should be counted as 1 total unit (unit size = 128).
+ operationMetrics.incrementOneDocWritten(2);
+ operationMetrics.incrementOneDocWritten(4);
+ operationMetrics.incrementOneDocWritten(8);
+ operationMetrics.incrementOneDocWritten(16);
+ operationMetrics.incrementOneDocWritten(32);
+ operationMetrics.incrementOneDocWritten(64);
+ operationMetrics.incrementOneDocWritten(128);
+ expectedUnits += 7;
+
+ // Each of these should be counted as 2 total units (unit size = 128).
+ operationMetrics.incrementOneDocWritten(129);
+ operationMetrics.incrementOneDocWritten(200);
+ operationMetrics.incrementOneDocWritten(255);
+ operationMetrics.incrementOneDocWritten(256);
+ expectedUnits += 8;
+
+ // Each of these groups should be counted as 1 total unit, combining documents with index
+ // bytes written.
+
+ // Index writes prior to document write.
+ operationMetrics.incrementOneDocWritten(0);
+ operationMetrics.incrementOneIdxEntryWritten(2);
+ operationMetrics.incrementOneDocWritten(5);
+ expectedUnits += 1;
+
+ // Index writes after document write.
+ operationMetrics.incrementOneDocWritten(2);
+ operationMetrics.incrementOneIdxEntryWritten(126);
+ expectedUnits += 1;
+
+ // No index writes.
+ operationMetrics.incrementOneDocWritten(129);
+ expectedUnits += 2;
+
+ operationMetrics.incrementOneDocWritten(127);
+ operationMetrics.incrementOneIdxEntryWritten(1);
+ expectedUnits += 1;
+
+ // Exceeds unit size and thus counts as 2 units.
+ operationMetrics.incrementOneDocWritten(1);
+ operationMetrics.incrementOneIdxEntryWritten(1);
+ operationMetrics.incrementOneIdxEntryWritten(1);
+ operationMetrics.incrementOneIdxEntryWritten(1);
+ operationMetrics.incrementOneIdxEntryWritten(128);
+ expectedUnits += 2;
+ }
+
+ auto metricsCopy = globalResourceConsumption.getDbMetrics();
+ ASSERT_EQ(metricsCopy["db1"].writeMetrics.totalWritten.units(), expectedUnits);
+}
+
TEST_F(ResourceConsumptionMetricsTest, IdxEntryUnitsRead) {
auto& globalResourceConsumption = ResourceConsumption::get(getServiceContext());
auto& operationMetrics = ResourceConsumption::MetricsCollector::get(_opCtx.get());