summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEric Milkie <milkie@10gen.com>2021-05-03 14:22:15 -0400
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-05-03 20:30:23 +0000
commit960f5deb14520af3076c9164fbf8b3cbcca0560f (patch)
treec36a300cc4060449974eab032d197813034defb5
parent20b86d14a58f5862fa0eeec1f2bfac3161797730 (diff)
downloadmongo-960f5deb14520af3076c9164fbf8b3cbcca0560f.tar.gz
SERVER-55556 add new totalUnitsWritten metric; add new localTime field to operationMetrics output
totalUnitsWritten is a metric that represents the number of bytes written to a document plus any index entries that follow, prior to writing another document; these bytes are then translated into Units as per the totalUnitWriteSizeBytes parameter. Additionally, a new field localTime will now appear in every BSONArray (per database) included in the $operationMetrics aggregation stage
-rw-r--r--jstests/hooks/run_aggregate_metrics_background.js1
-rw-r--r--jstests/noPassthrough/aggregate_operation_metrics.js5
-rw-r--r--jstests/noPassthrough/change_stream_operation_metrics.js10
-rw-r--r--jstests/noPassthrough/initial_sync_operation_metrics.js4
-rw-r--r--jstests/noPassthrough/profile_operation_metrics.js56
-rw-r--r--jstests/noPassthrough/ttl_operation_metrics.js4
-rw-r--r--src/mongo/db/pipeline/document_source_operation_metrics.cpp3
-rw-r--r--src/mongo/db/stats/operation_resource_consumption.idl10
-rw-r--r--src/mongo/db/stats/resource_consumption_metrics.cpp44
-rw-r--r--src/mongo/db/stats/resource_consumption_metrics.h32
-rw-r--r--src/mongo/db/stats/resource_consumption_metrics_test.cpp61
11 files changed, 224 insertions, 6 deletions
diff --git a/jstests/hooks/run_aggregate_metrics_background.js b/jstests/hooks/run_aggregate_metrics_background.js
index e6d53b6a6f0..32335852fcd 100644
--- a/jstests/hooks/run_aggregate_metrics_background.js
+++ b/jstests/hooks/run_aggregate_metrics_background.js
@@ -25,6 +25,7 @@ const aggregateMetricsBackground = function(host) {
"docUnitsWritten",
"idxEntryBytesWritten",
"idxEntryUnitsWritten",
+ "totalUnitsWritten",
"cpuNanos",
"db",
"primaryMetrics",
diff --git a/jstests/noPassthrough/aggregate_operation_metrics.js b/jstests/noPassthrough/aggregate_operation_metrics.js
index aa42c967f2b..25d71d07bb4 100644
--- a/jstests/noPassthrough/aggregate_operation_metrics.js
+++ b/jstests/noPassthrough/aggregate_operation_metrics.js
@@ -20,6 +20,8 @@ const isLinux = getBuildInfo().buildEnvironment.target_os == "linux";
let assertMetricsExist = function(metrics) {
try {
assert.neq(metrics, undefined);
+ assert(metrics.hasOwnProperty("db"));
+ assert(metrics.hasOwnProperty("localTime"));
let primaryMetrics = metrics.primaryMetrics;
let secondaryMetrics = metrics.secondaryMetrics;
[primaryMetrics, secondaryMetrics].forEach((readMetrics) => {
@@ -38,6 +40,7 @@ let assertMetricsExist = function(metrics) {
assert.gte(metrics.docUnitsWritten, 0);
assert.gte(metrics.idxEntryBytesWritten, 0);
assert.gte(metrics.idxEntryUnitsWritten, 0);
+ assert.gte(metrics.totalUnitsWritten, 0);
} catch (e) {
print("caught exception while checking metrics output: " + tojson(metrics));
throw e;
@@ -249,4 +252,4 @@ const secondary = rst.getSecondary();
});
rst.stopSet();
-}()); \ No newline at end of file
+}());
diff --git a/jstests/noPassthrough/change_stream_operation_metrics.js b/jstests/noPassthrough/change_stream_operation_metrics.js
index a4f64917baf..bf7c75a9e24 100644
--- a/jstests/noPassthrough/change_stream_operation_metrics.js
+++ b/jstests/noPassthrough/change_stream_operation_metrics.js
@@ -81,6 +81,12 @@ assert.commandWorked(primaryDB.createCollection(collName));
assert.eq(metrics[dbName].docBytesWritten, 29 * nDocs);
assert.eq(metrics[dbName].docUnitsWritten, nDocs);
+ // With batch inserts, the index updates are all performed together after all the documents
+ // are inserted, so this has the effect of associating all the index bytes for the batch
+ // with one document, for the purposes of totalUnitsWritten. This effect causes the last
+ // document to have 3 units instead of 1 like the first 99.
+ assert.eq(metrics[dbName].totalUnitsWritten, nDocs + 2);
+
// The inserted keys will vary in size from 2 to 4 bytes depending on their value. Assert
// that the number of bytes fall within that range.
assert.gt(metrics[dbName].idxEntryBytesWritten, 2 * nDocs);
@@ -133,6 +139,7 @@ let nextId = nDocs;
assert.eq(metrics[dbName].docUnitsWritten, 1);
assert.eq(metrics[dbName].idxEntryBytesWritten, 3);
assert.eq(metrics[dbName].idxEntryUnitsWritten, 1);
+ assert.eq(metrics[dbName].totalUnitsWritten, 1);
assert.eq(metrics[dbName].primaryMetrics.docBytesRead, 0);
assert.eq(metrics[dbName].primaryMetrics.docUnitsRead, 0);
assert.eq(metrics[dbName].primaryMetrics.cursorSeeks, 0);
@@ -166,6 +173,7 @@ let nextId = nDocs;
assertMetrics(primary, (metrics) => {
assert.eq(metrics[dbName].docBytesWritten, 40);
assert.eq(metrics[dbName].docUnitsWritten, 1);
+ assert.eq(metrics[dbName].totalUnitsWritten, 1);
assert.eq(metrics[dbName].primaryMetrics.docBytesRead, 29);
assert.eq(metrics[dbName].primaryMetrics.docUnitsRead, 1);
assert.eq(metrics[dbName].primaryMetrics.idxEntryBytesRead, 3);
@@ -285,4 +293,4 @@ let nextId = nDocs;
});
})();
rst.stopSet();
-}()); \ No newline at end of file
+}());
diff --git a/jstests/noPassthrough/initial_sync_operation_metrics.js b/jstests/noPassthrough/initial_sync_operation_metrics.js
index 2b674b64a70..3b313c19d08 100644
--- a/jstests/noPassthrough/initial_sync_operation_metrics.js
+++ b/jstests/noPassthrough/initial_sync_operation_metrics.js
@@ -43,6 +43,8 @@ const getDBMetrics = (adminDB) => {
let allMetrics = {};
while (cursor.hasNext()) {
let doc = cursor.next();
+ // Remove localTime field as it stymies us from comparing objects since it always changes.
+ delete doc.localTime;
allMetrics[doc.db] = doc;
}
return allMetrics;
@@ -93,4 +95,4 @@ replSet.awaitReplication();
}
replSet.stopSet();
-})(); \ No newline at end of file
+})();
diff --git a/jstests/noPassthrough/profile_operation_metrics.js b/jstests/noPassthrough/profile_operation_metrics.js
index a70bdc1dbab..5f5e70742de 100644
--- a/jstests/noPassthrough/profile_operation_metrics.js
+++ b/jstests/noPassthrough/profile_operation_metrics.js
@@ -47,6 +47,7 @@ const assertMetricsExist = (profilerEntry) => {
assert.gte(metrics.docUnitsWritten, 0);
assert.gte(metrics.idxEntryBytesWritten, 0);
assert.gte(metrics.idxEntryUnitsWritten, 0);
+ assert.gte(metrics.totalUnitsWritten, 0);
};
const runInLegacyQueryMode = (db, func) => {
@@ -97,6 +98,7 @@ const operations = [
assert.gt(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.gt(profileDoc.totalUnitsWritten, 0);
assert.gt(profileDoc.cursorSeeks, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
@@ -121,6 +123,7 @@ const operations = [
assert.gt(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.gt(profileDoc.totalUnitsWritten, 0);
assert.gt(profileDoc.cursorSeeks, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
@@ -145,6 +148,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.totalUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 0);
@@ -167,6 +171,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 1);
assert.eq(profileDoc.idxEntryBytesWritten, 7);
assert.eq(profileDoc.idxEntryUnitsWritten, 2);
+ assert.eq(profileDoc.totalUnitsWritten, 1);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 0);
@@ -200,6 +205,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.totalUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 1);
@@ -222,6 +228,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.totalUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 1);
@@ -244,6 +251,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.totalUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 1);
@@ -266,6 +274,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.totalUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 0);
@@ -298,6 +307,7 @@ const operations = [
// Deletes one index entry and writes another.
assert.eq(profileDoc.idxEntryBytesWritten, 9);
assert.eq(profileDoc.idxEntryUnitsWritten, 2);
+ assert.eq(profileDoc.totalUnitsWritten, 1);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 1);
@@ -330,6 +340,7 @@ const operations = [
// Deletes one index entry and writes another.
assert.eq(profileDoc.idxEntryBytesWritten, 10);
assert.eq(profileDoc.idxEntryUnitsWritten, 2);
+ assert.eq(profileDoc.totalUnitsWritten, 1);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 0);
@@ -371,6 +382,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.totalUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 0);
@@ -414,6 +426,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.totalUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 0);
@@ -436,6 +449,7 @@ const operations = [
assert.gt(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.gt(profileDoc.totalUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 0);
@@ -470,6 +484,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.totalUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 1);
@@ -501,6 +516,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 1);
assert.eq(profileDoc.idxEntryBytesWritten, 3);
assert.eq(profileDoc.idxEntryUnitsWritten, 1);
+ assert.eq(profileDoc.totalUnitsWritten, 1);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 1);
@@ -529,6 +545,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 1);
assert.eq(profileDoc.idxEntryBytesWritten, 3);
assert.eq(profileDoc.idxEntryUnitsWritten, 1);
+ assert.eq(profileDoc.totalUnitsWritten, 1);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 0);
@@ -557,6 +574,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 1);
assert.eq(profileDoc.idxEntryBytesWritten, 3);
assert.eq(profileDoc.idxEntryUnitsWritten, 1);
+ assert.eq(profileDoc.totalUnitsWritten, 1);
assert.eq(profileDoc.docUnitsReturned, 0);
}
},
@@ -577,6 +595,7 @@ const operations = [
assert.gt(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.gt(profileDoc.totalUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 0);
@@ -605,6 +624,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.totalUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 5);
@@ -629,6 +649,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.totalUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 150);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 10);
@@ -654,6 +675,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.totalUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 0);
@@ -679,6 +701,7 @@ const operations = [
// Deletes one entry and writes another.
assert.eq(profileDoc.idxEntryBytesWritten, 10);
assert.eq(profileDoc.idxEntryUnitsWritten, 2);
+ assert.eq(profileDoc.totalUnitsWritten, 1);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 0);
@@ -708,6 +731,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 1);
assert.eq(profileDoc.idxEntryBytesWritten, 4);
assert.eq(profileDoc.idxEntryUnitsWritten, 1);
+ assert.eq(profileDoc.totalUnitsWritten, 1);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 0);
@@ -746,6 +770,7 @@ const operations = [
// Removes one entry and inserts another.
assert.eq(profileDoc.idxEntryBytesWritten, 11);
assert.eq(profileDoc.idxEntryUnitsWritten, 2);
+ assert.eq(profileDoc.totalUnitsWritten, 1);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 0);
@@ -783,9 +808,11 @@ const operations = [
// comment about WT_MODIFY above.
assert.eq(profileDoc.docBytesWritten, 13);
assert.eq(profileDoc.docUnitsWritten, 1);
+ assert.eq(profileDoc.totalUnitsWritten, 1);
} else {
assert.eq(profileDoc.docBytesWritten, 1061);
assert.eq(profileDoc.docUnitsWritten, 9);
+ assert.eq(profileDoc.totalUnitsWritten, 9);
}
// Removes one entry and inserts another.
assert.eq(profileDoc.idxEntryBytesWritten, 10);
@@ -827,6 +854,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 1);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.totalUnitsWritten, 1);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 0);
@@ -853,6 +881,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 1);
assert.eq(profileDoc.idxEntryBytesWritten, 3);
assert.eq(profileDoc.idxEntryUnitsWritten, 1);
+ assert.eq(profileDoc.totalUnitsWritten, 1);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 0);
@@ -876,6 +905,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.totalUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 1);
@@ -909,6 +939,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.totalUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 1);
@@ -938,6 +969,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 1);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.totalUnitsWritten, 1);
assert.eq(profileDoc.docUnitsReturned, 0);
}
},
@@ -965,6 +997,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 1);
assert.eq(profileDoc.idxEntryBytesWritten, 3);
assert.eq(profileDoc.idxEntryUnitsWritten, 1);
+ assert.eq(profileDoc.totalUnitsWritten, 1);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 0);
@@ -993,6 +1026,11 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 100);
assert.eq(profileDoc.idxEntryBytesWritten, 299);
assert.eq(profileDoc.idxEntryUnitsWritten, 100);
+ // This is 102 instead of 100 because all of the index bytes for the batch insert are
+ // lumped together and associated with the last document written in the batch, instead
+ // of being associated with each document written. This causes the last document+index
+ // bytes to exceed the unit size.
+ assert.eq(profileDoc.totalUnitsWritten, 102);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 0);
@@ -1017,6 +1055,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.totalUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 100);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 100);
@@ -1041,6 +1080,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.totalUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 100);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 100);
@@ -1063,6 +1103,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.totalUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 100);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 100);
@@ -1086,6 +1127,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.totalUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 100);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 1);
@@ -1109,6 +1151,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.totalUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 100);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 5);
@@ -1144,6 +1187,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.totalUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 100);
assert.eq(profileDoc.sorterSpills, 101);
assert.eq(profileDoc.docUnitsReturned, 100);
@@ -1168,6 +1212,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.totalUnitsWritten, 0);
if (isDebugBuild(db)) {
// In debug builds we sort and spill for each of the first 20 documents. Once we
// reach that limit, we stop spilling as often. This 26 is the sum of 20 debug sorts
@@ -1203,6 +1248,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.totalUnitsWritten, 0);
assert.eq(profileDoc.keysSorted, 100);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 10);
@@ -1227,6 +1273,7 @@ const operations = [
// The key size varies from 2 to 3 bytes.
assert.gte(profileDoc.idxEntryBytesWritten, 2 * 100);
assert.eq(profileDoc.idxEntryUnitsWritten, 100);
+ assert.gte(profileDoc.totalUnitsWritten, 100);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 0);
@@ -1252,6 +1299,7 @@ const operations = [
// The key size varies from 2 to 3 bytes.
assert.gte(profileDoc.idxEntryBytesWritten, 2 * 100);
assert.eq(profileDoc.idxEntryUnitsWritten, 100);
+ assert.gte(profileDoc.totalUnitsWritten, 100);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docUnitsReturned, 0);
@@ -1276,6 +1324,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 1);
assert.eq(profileDoc.idxEntryBytesWritten, 2);
assert.eq(profileDoc.idxEntryUnitsWritten, 1);
+ assert.eq(profileDoc.totalUnitsWritten, 1);
assert.eq(profileDoc.docUnitsReturned, 0);
}
},
@@ -1301,6 +1350,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 9);
assert.eq(profileDoc.idxEntryBytesWritten, 27);
assert.eq(profileDoc.idxEntryUnitsWritten, 9);
+ assert.eq(profileDoc.totalUnitsWritten, 9);
assert.eq(profileDoc.docUnitsReturned, 0);
}
},
@@ -1331,6 +1381,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 2);
assert.eq(profileDoc.idxEntryBytesWritten, 5);
assert.eq(profileDoc.idxEntryUnitsWritten, 2);
+ assert.eq(profileDoc.totalUnitsWritten, 2);
assert.eq(profileDoc.docUnitsReturned, 0);
}
},
@@ -1366,6 +1417,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 18);
assert.eq(profileDoc.idxEntryBytesWritten, 54);
assert.eq(profileDoc.idxEntryUnitsWritten, 18);
+ assert.eq(profileDoc.totalUnitsWritten, 18);
assert.eq(profileDoc.docUnitsReturned, 0);
}
},
@@ -1391,6 +1443,7 @@ const operations = [
assert.gte(profileDoc.docUnitsWritten, 0);
assert.gte(profileDoc.idxEntryBytesWritten, 0);
assert.gte(profileDoc.idxEntryUnitsWritten, 0);
+ assert.gte(profileDoc.totalUnitsWritten, 0);
assert.gt(profileDoc.cursorSeeks, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
@@ -1415,6 +1468,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 2);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.totalUnitsWritten, 2);
assert.eq(profileDoc.cursorSeeks, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
@@ -1464,6 +1518,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 2);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.totalUnitsWritten, 2);
assert.eq(profileDoc.cursorSeeks, 2);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
@@ -1511,6 +1566,7 @@ const operations = [
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.eq(profileDoc.totalUnitsWritten, 0);
assert.eq(profileDoc.cursorSeeks, 0);
assert.eq(profileDoc.keysSorted, 0);
assert.eq(profileDoc.sorterSpills, 0);
diff --git a/jstests/noPassthrough/ttl_operation_metrics.js b/jstests/noPassthrough/ttl_operation_metrics.js
index 4b57fe9481f..0d24af78932 100644
--- a/jstests/noPassthrough/ttl_operation_metrics.js
+++ b/jstests/noPassthrough/ttl_operation_metrics.js
@@ -87,6 +87,7 @@ assertMetrics(primary, (metrics) => {
// Document size is 29 bytes.
assert.gte(metrics[dbName].docBytesWritten, 29 * 3);
assert.gte(metrics[dbName].docUnitsWritten, 3);
+ assert.gte(metrics[dbName].totalUnitsWritten, 3);
});
// Clear metrics and wait for a TTL pass to delete the documents.
@@ -103,6 +104,7 @@ assertMetrics(primary, (metrics) => {
assert.gte(metrics[dbName].primaryMetrics.docUnitsRead, 2);
assert.gte(metrics[dbName].docBytesWritten, 29 * 2);
assert.gte(metrics[dbName].docUnitsWritten, 2);
+ assert.gte(metrics[dbName].totalUnitsWritten, 2);
// Key size is 12 bytes.
assert.gte(metrics[dbName].primaryMetrics.idxEntryBytesRead, 12 * 2);
assert.gte(metrics[dbName].primaryMetrics.idxEntryUnitsRead, 2);
@@ -123,4 +125,4 @@ assert.eq(primaryDB[collName].count({}), 1);
assert.eq(secondaryDB[collName].count({}), 1);
rst.stopSet();
-}()); \ No newline at end of file
+}());
diff --git a/src/mongo/db/pipeline/document_source_operation_metrics.cpp b/src/mongo/db/pipeline/document_source_operation_metrics.cpp
index 5855894bf3b..548a7b3679b 100644
--- a/src/mongo/db/pipeline/document_source_operation_metrics.cpp
+++ b/src/mongo/db/pipeline/document_source_operation_metrics.cpp
@@ -53,6 +53,7 @@ const char* DocumentSourceOperationMetrics::getSourceName() const {
namespace {
static constexpr StringData kClearMetrics = "clearMetrics"_sd;
static constexpr StringData kDatabaseName = "db"_sd;
+static constexpr StringData kLocalTimeFieldName = "localTime"_sd;
} // namespace
DocumentSource::GetNextResult DocumentSourceOperationMetrics::doGetNext() {
@@ -63,9 +64,11 @@ DocumentSource::GetNextResult DocumentSourceOperationMetrics::doGetNext() {
}
return ResourceConsumption::get(pExpCtx->opCtx).getDbMetrics();
}();
+ auto localTime = jsTime(); // fetch current time to include in all metrics documents
for (auto& [dbName, metrics] : dbMetrics) {
BSONObjBuilder builder;
builder.append(kDatabaseName, dbName);
+ builder.appendDate(kLocalTimeFieldName, localTime);
metrics.toBson(&builder);
_operationMetrics.push_back(builder.obj());
}
diff --git a/src/mongo/db/stats/operation_resource_consumption.idl b/src/mongo/db/stats/operation_resource_consumption.idl
index 051b0fbcaed..4eaf36c549a 100644
--- a/src/mongo/db/stats/operation_resource_consumption.idl
+++ b/src/mongo/db/stats/operation_resource_consumption.idl
@@ -65,3 +65,13 @@ server_parameters:
default: 16
validator:
gte: 1
+
+ totalUnitWriteSizeBytes:
+ description: "The size of a (doc + index) unit in written bytes for resource consumption metrics collection"
+ set_at:
+ - startup
+ cpp_varname: gTotalUnitWriteSizeBytes
+ cpp_vartype: int32_t
+ default: 128
+ validator:
+ gte: 1
diff --git a/src/mongo/db/stats/resource_consumption_metrics.cpp b/src/mongo/db/stats/resource_consumption_metrics.cpp
index fbc53138f3c..f5735150cd3 100644
--- a/src/mongo/db/stats/resource_consumption_metrics.cpp
+++ b/src/mongo/db/stats/resource_consumption_metrics.cpp
@@ -56,6 +56,7 @@ static const char kIdxEntryBytesRead[] = "idxEntryBytesRead";
static const char kIdxEntryBytesWritten[] = "idxEntryBytesWritten";
static const char kIdxEntryUnitsRead[] = "idxEntryUnitsRead";
static const char kIdxEntryUnitsWritten[] = "idxEntryUnitsWritten";
+static const char kTotalUnitsWritten[] = "totalUnitsWritten";
static const char kKeysSorted[] = "keysSorted";
static const char kMemUsage[] = "memUsage";
static const char kNumMetrics[] = "numMetrics";
@@ -125,6 +126,33 @@ void ResourceConsumption::UnitCounter::observeOne(size_t datumBytes) {
_bytes += datumBytes;
}
+void ResourceConsumption::TotalUnitWriteCounter::observeOneDocument(size_t datumBytes) {
+ // If we have accumulated document bytes, calculate units along with any past index bytes.
+ // Accumulate the current document bytes for use in a later unit calculation.
+ if (_accumulatedDocumentBytes > 0) {
+ _units += std::ceil((_accumulatedIndexBytes + _accumulatedDocumentBytes) /
+ static_cast<float>(unitSize()));
+ _accumulatedIndexBytes = 0;
+ _accumulatedDocumentBytes = datumBytes;
+ return;
+ }
+
+ // If we have accumulated index bytes, associate them with the current document and calculate
+ // units.
+ if (_accumulatedIndexBytes > 0) {
+ _units += std::ceil((_accumulatedIndexBytes + datumBytes) / static_cast<float>(unitSize()));
+ _accumulatedIndexBytes = 0;
+ return;
+ }
+
+ // Nothing has yet accumulated; accumulate this document for use in a later unit calculation.
+ _accumulatedDocumentBytes = datumBytes;
+}
+
+void ResourceConsumption::TotalUnitWriteCounter::observeOneIndexEntry(size_t datumBytes) {
+ _accumulatedIndexBytes += datumBytes;
+}
+
int ResourceConsumption::DocumentUnitCounter::unitSize() const {
return gDocumentUnitSizeBytes;
}
@@ -133,6 +161,10 @@ int ResourceConsumption::IdxEntryUnitCounter::unitSize() const {
return gIndexEntryUnitSizeBytes;
}
+int ResourceConsumption::TotalUnitWriteCounter::unitSize() const {
+ return gTotalUnitWriteSizeBytes;
+}
+
void ResourceConsumption::ReadMetrics::toBson(BSONObjBuilder* builder) const {
builder->appendNumber(kDocBytesRead, docsRead.bytes());
builder->appendNumber(kDocUnitsRead, docsRead.units());
@@ -149,6 +181,7 @@ void ResourceConsumption::WriteMetrics::toBson(BSONObjBuilder* builder) const {
builder->appendNumber(kDocUnitsWritten, docsWritten.units());
builder->appendNumber(kIdxEntryBytesWritten, idxEntriesWritten.bytes());
builder->appendNumber(kIdxEntryUnitsWritten, idxEntriesWritten.units());
+ builder->appendNumber(kTotalUnitsWritten, totalWritten.units());
}
void ResourceConsumption::AggregatedMetrics::toBson(BSONObjBuilder* builder) const {
@@ -193,6 +226,7 @@ void ResourceConsumption::OperationMetrics::toBsonNonZeroFields(BSONObjBuilder*
appendNonZeroMetric(builder, kDocUnitsWritten, writeMetrics.docsWritten.units());
appendNonZeroMetric(builder, kIdxEntryBytesWritten, writeMetrics.idxEntriesWritten.bytes());
appendNonZeroMetric(builder, kIdxEntryUnitsWritten, writeMetrics.idxEntriesWritten.units());
+ appendNonZeroMetric(builder, kTotalUnitsWritten, writeMetrics.totalWritten.units());
}
template <typename Func>
@@ -225,11 +259,17 @@ void ResourceConsumption::MetricsCollector::incrementDocUnitsReturned(
}
void ResourceConsumption::MetricsCollector::incrementOneDocWritten(size_t bytesWritten) {
- _doIfCollecting([&] { _metrics.writeMetrics.docsWritten.observeOne(bytesWritten); });
+ _doIfCollecting([&] {
+ _metrics.writeMetrics.docsWritten.observeOne(bytesWritten);
+ _metrics.writeMetrics.totalWritten.observeOneDocument(bytesWritten);
+ });
}
void ResourceConsumption::MetricsCollector::incrementOneIdxEntryWritten(size_t bytesWritten) {
- _doIfCollecting([&] { _metrics.writeMetrics.idxEntriesWritten.observeOne(bytesWritten); });
+ _doIfCollecting([&] {
+ _metrics.writeMetrics.idxEntriesWritten.observeOne(bytesWritten);
+ _metrics.writeMetrics.totalWritten.observeOneIndexEntry(bytesWritten);
+ });
}
void ResourceConsumption::MetricsCollector::beginScopedCollecting(OperationContext* opCtx,
diff --git a/src/mongo/db/stats/resource_consumption_metrics.h b/src/mongo/db/stats/resource_consumption_metrics.h
index 5e098b5c730..4aef396e6b6 100644
--- a/src/mongo/db/stats/resource_consumption_metrics.h
+++ b/src/mongo/db/stats/resource_consumption_metrics.h
@@ -113,6 +113,35 @@ public:
int unitSize() const final;
};
+ /** TotalUnitWriteCounter records the number of units of document plus associated indexes
+ * observed. */
+ class TotalUnitWriteCounter {
+ public:
+ void observeOneDocument(size_t datumBytes);
+ void observeOneIndexEntry(size_t datumBytes);
+
+ TotalUnitWriteCounter& operator+=(TotalUnitWriteCounter other) {
+ // Flush the accumulators, in case there is anything still pending.
+ other.observeOneDocument(0);
+ observeOneDocument(0);
+ _units += other._units;
+ return *this;
+ }
+
+ long long units() const {
+ // Flush the accumulators, in case there is anything still pending.
+ TotalUnitWriteCounter copy(*this);
+ copy.observeOneDocument(0);
+ return copy._units;
+ }
+
+ private:
+ int unitSize() const;
+ long long _accumulatedDocumentBytes = 0;
+ long long _accumulatedIndexBytes = 0;
+ long long _units = 0;
+ };
+
/** ReadMetrics maintains metrics for read operations. */
class ReadMetrics {
public:
@@ -158,6 +187,7 @@ public:
void add(const WriteMetrics& other) {
docsWritten += other.docsWritten;
idxEntriesWritten += other.idxEntriesWritten;
+ totalWritten += other.totalWritten;
}
WriteMetrics& operator+=(const WriteMetrics& other) {
@@ -174,6 +204,8 @@ public:
DocumentUnitCounter docsWritten;
// Number of index entries written
IdxEntryUnitCounter idxEntriesWritten;
+ // Number of total units written
+ TotalUnitWriteCounter totalWritten;
};
/**
diff --git a/src/mongo/db/stats/resource_consumption_metrics_test.cpp b/src/mongo/db/stats/resource_consumption_metrics_test.cpp
index 4598e411b71..8c7d8a756ef 100644
--- a/src/mongo/db/stats/resource_consumption_metrics_test.cpp
+++ b/src/mongo/db/stats/resource_consumption_metrics_test.cpp
@@ -492,6 +492,67 @@ TEST_F(ResourceConsumptionMetricsTest, DocumentUnitsWritten) {
ASSERT_EQ(metricsCopy["db1"].writeMetrics.docsWritten.units(), expectedUnits);
}
+TEST_F(ResourceConsumptionMetricsTest, TotalUnitsWritten) {
+ auto& globalResourceConsumption = ResourceConsumption::get(getServiceContext());
+ auto& operationMetrics = ResourceConsumption::MetricsCollector::get(_opCtx.get());
+
+ int expectedUnits = 0;
+
+ {
+ ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
+
+ // Each of these should be counted as 1 total unit (unit size = 128).
+ operationMetrics.incrementOneDocWritten(2);
+ operationMetrics.incrementOneDocWritten(4);
+ operationMetrics.incrementOneDocWritten(8);
+ operationMetrics.incrementOneDocWritten(16);
+ operationMetrics.incrementOneDocWritten(32);
+ operationMetrics.incrementOneDocWritten(64);
+ operationMetrics.incrementOneDocWritten(128);
+ expectedUnits += 7;
+
+ // Each of these should be counted as 2 total units (unit size = 128).
+ operationMetrics.incrementOneDocWritten(129);
+ operationMetrics.incrementOneDocWritten(200);
+ operationMetrics.incrementOneDocWritten(255);
+ operationMetrics.incrementOneDocWritten(256);
+ expectedUnits += 8;
+
+ // Each of these groups should be counted as 1 total unit, combining documents with index
+ // bytes written.
+
+ // Index writes prior to document write.
+ operationMetrics.incrementOneDocWritten(0);
+ operationMetrics.incrementOneIdxEntryWritten(2);
+ operationMetrics.incrementOneDocWritten(5);
+ expectedUnits += 1;
+
+ // Index writes after document write.
+ operationMetrics.incrementOneDocWritten(2);
+ operationMetrics.incrementOneIdxEntryWritten(126);
+ expectedUnits += 1;
+
+ // No index writes.
+ operationMetrics.incrementOneDocWritten(129);
+ expectedUnits += 2;
+
+ operationMetrics.incrementOneDocWritten(127);
+ operationMetrics.incrementOneIdxEntryWritten(1);
+ expectedUnits += 1;
+
+ // Exceeds unit size and thus counts as 2 units.
+ operationMetrics.incrementOneDocWritten(1);
+ operationMetrics.incrementOneIdxEntryWritten(1);
+ operationMetrics.incrementOneIdxEntryWritten(1);
+ operationMetrics.incrementOneIdxEntryWritten(1);
+ operationMetrics.incrementOneIdxEntryWritten(128);
+ expectedUnits += 2;
+ }
+
+ auto metricsCopy = globalResourceConsumption.getDbMetrics();
+ ASSERT_EQ(metricsCopy["db1"].writeMetrics.totalWritten.units(), expectedUnits);
+}
+
TEST_F(ResourceConsumptionMetricsTest, IdxEntryUnitsRead) {
auto& globalResourceConsumption = ResourceConsumption::get(getServiceContext());
auto& operationMetrics = ResourceConsumption::MetricsCollector::get(_opCtx.get());