summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBrian DeLeonardis <brian.deleonardis@mongodb.com>2020-11-04 21:12:00 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-11-12 01:45:39 +0000
commitd91d8ef3fbb1cb6bd164c5da97d2651318d36041 (patch)
tree122029d49c05e46a848e03af206cad5d47c91a91
parent990e7bab42431fed082b4d909220494be577d495 (diff)
downloadmongo-d91d8ef3fbb1cb6bd164c5da97d2651318d36041.tar.gz
SERVER-51724 Collect the number of cursor seeks per operation
-rw-r--r--jstests/core/profile_operation_metrics.js66
-rw-r--r--jstests/noPassthrough/aggregate_operation_metrics.js5
-rw-r--r--src/mongo/db/stats/resource_consumption_metrics.cpp7
-rw-r--r--src/mongo/db/stats/resource_consumption_metrics.h11
-rw-r--r--src/mongo/db/stats/resource_consumption_metrics_test.cpp35
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp19
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp27
7 files changed, 166 insertions, 4 deletions
diff --git a/jstests/core/profile_operation_metrics.js b/jstests/core/profile_operation_metrics.js
index 1bd780f3a61..3215a93b677 100644
--- a/jstests/core/profile_operation_metrics.js
+++ b/jstests/core/profile_operation_metrics.js
@@ -43,6 +43,7 @@ const assertMetricsExist = (profilerEntry) => {
assert.gte(metrics.idxEntryUnitsRead, 0);
assert.gte(metrics.keysSorted, 0);
assert.gte(metrics.docUnitsReturned, 0);
+ assert.gte(metrics.cursorSeeks, 0);
// Every test should perform enough work to be measured as non-zero CPU activity in
// nanoseconds.
@@ -104,6 +105,7 @@ const operations = [
assert.gt(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.gt(profileDoc.cursorSeeks, 0);
}
},
{
@@ -125,6 +127,27 @@ const operations = [
assert.gt(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
+ assert.gt(profileDoc.cursorSeeks, 0);
+ }
+ },
+ {
+ name: 'findEmpty',
+ command: (db) => {
+ assert.eq(db[collName].find({a: 1}).itcount(), 0);
+ },
+ profileFilter: {op: 'query', 'command.find': collName, 'command.filter': {a: 1}},
+ profileAssert: (profileDoc) => {
+ assert.eq(profileDoc.docBytesRead, 0);
+ assert.eq(profileDoc.docUnitsRead, 0);
+ assert.eq(profileDoc.idxEntryBytesRead, 0);
+ assert.eq(profileDoc.idxEntryUnitsRead, 0);
+ // This tests to make sure we only increment the cusorSeeks counter if the cursor seek
+ // is successful. In this case, the seek is not successful because the index is empty.
+ assert.eq(profileDoc.cursorSeeks, 0);
+ assert.eq(profileDoc.docBytesWritten, 0);
+ assert.eq(profileDoc.docUnitsWritten, 0);
+ assert.eq(profileDoc.idxEntryBytesWritten, 0);
+ assert.eq(profileDoc.idxEntryUnitsWritten, 0);
}
},
{
@@ -139,6 +162,7 @@ const operations = [
assert.eq(profileDoc.docUnitsRead, 0);
assert.eq(profileDoc.idxEntryBytesRead, 0);
assert.eq(profileDoc.idxEntryUnitsRead, 0);
+ assert.eq(profileDoc.cursorSeeks, 0);
assert.eq(profileDoc.docBytesWritten, 29);
assert.eq(profileDoc.docUnitsWritten, 1);
assert.eq(profileDoc.idxEntryBytesWritten, 7);
@@ -168,6 +192,7 @@ const operations = [
assert.eq(profileDoc.docUnitsRead, 1);
assert.eq(profileDoc.idxEntryBytesRead, 3);
assert.eq(profileDoc.idxEntryUnitsRead, 1);
+ assert.eq(profileDoc.cursorSeeks, 2);
assert.eq(profileDoc.docBytesWritten, 0);
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
@@ -186,6 +211,7 @@ const operations = [
assert.eq(profileDoc.docUnitsRead, 1);
assert.eq(profileDoc.idxEntryBytesRead, 0);
assert.eq(profileDoc.idxEntryUnitsRead, 0);
+ assert.eq(profileDoc.cursorSeeks, 0);
assert.eq(profileDoc.docBytesWritten, 0);
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
@@ -204,6 +230,7 @@ const operations = [
assert.eq(profileDoc.docUnitsRead, 1);
assert.eq(profileDoc.idxEntryBytesRead, 0);
assert.eq(profileDoc.idxEntryUnitsRead, 0);
+ assert.eq(profileDoc.cursorSeeks, 0);
assert.eq(profileDoc.docBytesWritten, 0);
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
@@ -222,6 +249,7 @@ const operations = [
assert.eq(profileDoc.docUnitsRead, 0);
assert.eq(profileDoc.idxEntryBytesRead, 3);
assert.eq(profileDoc.idxEntryUnitsRead, 1);
+ assert.eq(profileDoc.cursorSeeks, 2);
assert.eq(profileDoc.docBytesWritten, 0);
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
@@ -240,9 +268,11 @@ const operations = [
if (!debugBuild) {
assert.eq(profileDoc.docBytesRead, 29);
assert.eq(profileDoc.docUnitsRead, 1);
+ assert.eq(profileDoc.cursorSeeks, 3);
} else {
assert.gte(profileDoc.docBytesRead, 29);
assert.gte(profileDoc.docUnitsRead, 1);
+ assert.gte(profileDoc.cursorSeeks, 3);
}
assert.eq(profileDoc.idxEntryBytesRead, 3);
assert.eq(profileDoc.idxEntryUnitsRead, 1);
@@ -267,9 +297,11 @@ const operations = [
if (!debugBuild) {
assert.eq(profileDoc.docUnitsRead, 1);
assert.eq(profileDoc.docBytesRead, 29);
+ assert.eq(profileDoc.cursorSeeks, 3);
} else {
assert.gte(profileDoc.docUnitsRead, 1);
assert.gte(profileDoc.docBytesRead, 29);
+ assert.gte(profileDoc.cursorSeeks, 3);
}
assert.eq(profileDoc.idxEntryBytesRead, 3);
assert.eq(profileDoc.idxEntryUnitsRead, 1);
@@ -294,6 +326,7 @@ const operations = [
assert.eq(profileDoc.docUnitsRead, 0);
assert.eq(profileDoc.idxEntryBytesRead, 0);
assert.eq(profileDoc.idxEntryUnitsRead, 0);
+ assert.eq(profileDoc.cursorSeeks, 0);
assert.eq(profileDoc.docBytesWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
@@ -311,6 +344,7 @@ const operations = [
assert.eq(profileDoc.docUnitsRead, 0);
assert.eq(profileDoc.idxEntryBytesRead, 0);
assert.eq(profileDoc.idxEntryUnitsRead, 0);
+ assert.eq(profileDoc.cursorSeeks, 0);
assert.eq(profileDoc.docBytesWritten, 0);
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
@@ -332,6 +366,7 @@ const operations = [
assert.gt(profileDoc.docUnitsRead, 0);
assert.eq(profileDoc.idxEntryBytesRead, 0);
assert.eq(profileDoc.idxEntryUnitsRead, 0);
+ assert.eq(profileDoc.cursorSeeks, 0);
assert.eq(profileDoc.docBytesWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
assert.eq(profileDoc.idxEntryUnitsWritten, 0);
@@ -367,6 +402,7 @@ const operations = [
assert.gt(profileDoc.docUnitsRead, 0);
assert.eq(profileDoc.idxEntryBytesRead, 0);
assert.eq(profileDoc.idxEntryUnitsRead, 0);
+ assert.gt(profileDoc.cursorSeeks, 0);
assert.gt(profileDoc.docBytesWritten, 0);
assert.gt(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
@@ -390,9 +426,11 @@ const operations = [
if (!debugBuild) {
assert.eq(profileDoc.docBytesRead, 29);
assert.eq(profileDoc.docUnitsRead, 1);
+ assert.eq(profileDoc.cursorSeeks, 0);
} else {
assert.gte(profileDoc.docBytesRead, 29);
assert.gte(profileDoc.docUnitsRead, 1);
+ assert.gte(profileDoc.cursorSeeks, 0);
}
assert.eq(profileDoc.idxEntryBytesRead, 0);
assert.eq(profileDoc.idxEntryUnitsRead, 0);
@@ -412,9 +450,11 @@ const operations = [
if (!debugBuild) {
assert.eq(profileDoc.docBytesRead, 29);
assert.eq(profileDoc.docUnitsRead, 1);
+ assert.eq(profileDoc.cursorSeeks, 3);
} else {
assert.gte(profileDoc.docBytesRead, 29);
assert.gte(profileDoc.docUnitsRead, 1);
+ assert.gte(profileDoc.cursorSeeks, 3);
}
assert.eq(profileDoc.idxEntryBytesRead, 3);
assert.eq(profileDoc.idxEntryUnitsRead, 1);
@@ -435,9 +475,11 @@ const operations = [
if (!debugBuild) {
assert.eq(profileDoc.docBytesRead, 29);
assert.eq(profileDoc.docUnitsRead, 1);
+ assert.eq(profileDoc.cursorSeeks, 1);
} else {
assert.gte(profileDoc.docBytesRead, 29);
assert.gte(profileDoc.docUnitsRead, 1);
+ assert.gte(profileDoc.cursorSeeks, 1);
}
assert.eq(profileDoc.idxEntryBytesRead, 0);
assert.eq(profileDoc.idxEntryUnitsRead, 0);
@@ -460,6 +502,7 @@ const operations = [
assert.gt(profileDoc.docUnitsRead, 0);
assert.eq(profileDoc.idxEntryBytesRead, 0);
assert.eq(profileDoc.idxEntryUnitsRead, 0);
+ assert.gt(profileDoc.cursorSeeks, 0);
assert.gt(profileDoc.docBytesWritten, 0);
assert.gt(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
@@ -484,6 +527,7 @@ const operations = [
assert.gt(profileDoc.docUnitsRead, 0);
assert.eq(profileDoc.idxEntryBytesRead, 0);
assert.eq(profileDoc.idxEntryUnitsRead, 0);
+ assert.eq(profileDoc.cursorSeeks, 0);
assert.eq(profileDoc.docBytesWritten, 0);
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
@@ -505,6 +549,7 @@ const operations = [
assert.gt(profileDoc.docUnitsRead, 0);
assert.eq(profileDoc.idxEntryBytesRead, 0);
assert.eq(profileDoc.idxEntryUnitsRead, 0);
+ assert.eq(profileDoc.cursorSeeks, 0);
assert.eq(profileDoc.docBytesWritten, 0);
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
@@ -525,6 +570,7 @@ const operations = [
// Reads the index entry for 'a' to determine uniqueness.
assert.eq(profileDoc.idxEntryBytesRead, 6);
assert.eq(profileDoc.idxEntryUnitsRead, 1);
+ assert.eq(profileDoc.cursorSeeks, 1);
assert.eq(profileDoc.docBytesWritten, 29);
assert.eq(profileDoc.docUnitsWritten, 1);
// Deletes one entry and writes another.
@@ -548,6 +594,7 @@ const operations = [
// Inserting into a unique index requires reading one key.
assert.eq(profileDoc.idxEntryBytesRead, 4);
assert.eq(profileDoc.idxEntryUnitsRead, 1);
+ assert.eq(profileDoc.cursorSeeks, 1);
// Despite failing to insert keys into the unique index, the operation first succeeded
// in writing to the collection. Even though the operation was rolled-back, this counts
// towards metrics.
@@ -569,9 +616,16 @@ const operations = [
if (!debugBuild) {
assert.eq(profileDoc.docBytesRead, 29);
assert.eq(profileDoc.docUnitsRead, 1);
+ // There are 4 seeks:
+ // 1) Reading the _id index.
+ // 2) Reading the document on the collection.
+ // 3) Reading the document again before updating.
+ // 4) Seeking on the _id index to check for uniqueness.
+ assert.eq(profileDoc.cursorSeeks, 4);
} else {
assert.gte(profileDoc.docBytesRead, 29);
assert.gte(profileDoc.docUnitsRead, 1);
+ assert.gte(profileDoc.cursorSeeks, 4);
}
// Reads index entries on '_id' for the lookup and 'a' to ensure uniqueness.
assert.eq(profileDoc.idxEntryBytesRead, 9);
@@ -603,9 +657,11 @@ const operations = [
if (!debugBuild) {
assert.eq(profileDoc.docBytesRead, 1050);
assert.eq(profileDoc.docUnitsRead, 9);
+ assert.eq(profileDoc.cursorSeeks, 4);
} else {
assert.gte(profileDoc.docBytesRead, 1050);
assert.gte(profileDoc.docUnitsRead, 9);
+ assert.gte(profileDoc.cursorSeeks, 4);
}
// Reads index entries on '_id' for the lookup and 'a' to ensure uniqueness.
assert.eq(profileDoc.idxEntryBytesRead, 10);
@@ -642,9 +698,11 @@ const operations = [
if (!debugBuild) {
assert.eq(profileDoc.docBytesRead, 29);
assert.eq(profileDoc.docUnitsRead, 1);
+ assert.eq(profileDoc.cursorSeeks, 2);
} else {
assert.gte(profileDoc.docBytesRead, 29);
assert.gte(profileDoc.docUnitsRead, 1);
+ assert.gte(profileDoc.cursorSeeks, 2);
}
assert.eq(profileDoc.idxEntryBytesRead, 4);
assert.eq(profileDoc.idxEntryUnitsRead, 1);
@@ -672,6 +730,7 @@ const operations = [
assert.eq(profileDoc.docUnitsRead, 0);
assert.eq(profileDoc.idxEntryBytesRead, 0);
assert.eq(profileDoc.idxEntryUnitsRead, 0);
+ assert.eq(profileDoc.cursorSeeks, 0);
assert.eq(profileDoc.docBytesWritten, 29);
assert.eq(profileDoc.docUnitsWritten, 1);
assert.eq(profileDoc.idxEntryBytesWritten, 3);
@@ -691,6 +750,7 @@ const operations = [
assert.eq(profileDoc.docUnitsRead, 1);
assert.eq(profileDoc.idxEntryBytesRead, 3);
assert.eq(profileDoc.idxEntryUnitsRead, 1);
+ assert.eq(profileDoc.cursorSeeks, 2);
assert.eq(profileDoc.docBytesWritten, 0);
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
@@ -720,6 +780,7 @@ const operations = [
assert.eq(profileDoc.docUnitsRead, 1);
assert.eq(profileDoc.idxEntryBytesRead, 0);
assert.eq(profileDoc.idxEntryUnitsRead, 0);
+ assert.eq(profileDoc.cursorSeeks, 0);
assert.eq(profileDoc.docBytesWritten, 0);
assert.eq(profileDoc.docUnitsWritten, 0);
assert.eq(profileDoc.idxEntryBytesWritten, 0);
@@ -738,9 +799,11 @@ const operations = [
if (!debugBuild) {
assert.eq(profileDoc.docBytesRead, 29);
assert.eq(profileDoc.docUnitsRead, 1);
+ assert.eq(profileDoc.cursorSeeks, 2);
} else {
assert.gte(profileDoc.docBytesRead, 29);
assert.gte(profileDoc.docUnitsRead, 1);
+ assert.gte(profileDoc.cursorSeeks, 2);
}
assert.eq(profileDoc.idxEntryBytesRead, 3);
assert.eq(profileDoc.idxEntryUnitsRead, 1);
@@ -762,9 +825,11 @@ const operations = [
if (!debugBuild) {
assert.eq(profileDoc.docBytesRead, 29);
assert.eq(profileDoc.docUnitsRead, 1);
+ assert.eq(profileDoc.cursorSeeks, 3);
} else {
assert.gte(profileDoc.docBytesRead, 29);
assert.gte(profileDoc.docUnitsRead, 1);
+ assert.gte(profileDoc.cursorSeeks, 3);
}
assert.eq(profileDoc.idxEntryBytesRead, 3);
assert.eq(profileDoc.idxEntryUnitsRead, 1);
@@ -791,6 +856,7 @@ const operations = [
assert.eq(profileDoc.docUnitsRead, 0);
assert.eq(profileDoc.idxEntryBytesRead, 0);
assert.eq(profileDoc.idxEntryUnitsRead, 0);
+ assert.eq(profileDoc.cursorSeeks, 0);
assert.eq(profileDoc.docBytesWritten, 2900);
assert.eq(profileDoc.docUnitsWritten, 100);
assert.eq(profileDoc.idxEntryBytesWritten, 299);
diff --git a/jstests/noPassthrough/aggregate_operation_metrics.js b/jstests/noPassthrough/aggregate_operation_metrics.js
index 4a893f59b30..bd9a560b982 100644
--- a/jstests/noPassthrough/aggregate_operation_metrics.js
+++ b/jstests/noPassthrough/aggregate_operation_metrics.js
@@ -32,6 +32,7 @@ let assertMetricsExist = function(metrics) {
assert.gte(readMetrics.idxEntryUnitsRead, 0);
assert.gte(readMetrics.keysSorted, 0);
assert.gte(readMetrics.docUnitsReturned, 0);
+ assert.gte(readMetrics.cursorSeeks, 0);
});
assert.gte(metrics.cpuNanos, 0);
@@ -124,8 +125,10 @@ const secondary = rst.getSecondary();
[db1Name, db2Name].forEach((db) => {
assert.gt(allMetrics[db].primaryMetrics.docBytesRead, 0);
assert.gt(allMetrics[db].primaryMetrics.docUnitsRead, 0);
+ assert.eq(allMetrics[db].primaryMetrics.cursorSeeks, 0);
assert.eq(allMetrics[db].secondaryMetrics.docBytesRead, 0);
assert.eq(allMetrics[db].secondaryMetrics.docUnitsRead, 0);
+ assert.eq(allMetrics[db].secondaryMetrics.cursorSeeks, 0);
});
assert.eq(allMetrics[db1Name].primaryMetrics.docBytesRead,
allMetrics[db2Name].primaryMetrics.docBytesRead);
@@ -134,8 +137,10 @@ const secondary = rst.getSecondary();
[db1Name, db2Name].forEach((db) => {
assert.gt(allMetrics[db].secondaryMetrics.docBytesRead, 0);
assert.gt(allMetrics[db].secondaryMetrics.docUnitsRead, 0);
+ assert.eq(allMetrics[db].secondaryMetrics.cursorSeeks, 0);
assert.eq(allMetrics[db].primaryMetrics.docBytesRead, 0);
assert.eq(allMetrics[db].primaryMetrics.docUnitsRead, 0);
+ assert.eq(allMetrics[db].primaryMetrics.cursorSeeks, 0);
});
assert.eq(allMetrics[db1Name].secondaryMetrics.docBytesRead,
allMetrics[db2Name].secondaryMetrics.docBytesRead);
diff --git a/src/mongo/db/stats/resource_consumption_metrics.cpp b/src/mongo/db/stats/resource_consumption_metrics.cpp
index 90d278ea072..072488d593d 100644
--- a/src/mongo/db/stats/resource_consumption_metrics.cpp
+++ b/src/mongo/db/stats/resource_consumption_metrics.cpp
@@ -58,6 +58,7 @@ static const char kDocUnitsWritten[] = "docUnitsWritten";
static const char kIdxEntryBytesWritten[] = "idxEntryBytesWritten";
static const char kIdxEntryUnitsWritten[] = "idxEntryUnitsWritten";
static const char kDocUnitsReturned[] = "docUnitsReturned";
+static const char kCursorSeeks[] = "cursorSeeks";
inline void appendNonZeroMetric(BSONObjBuilder* builder, const char* name, long long value) {
if (value != 0) {
@@ -121,6 +122,7 @@ void ResourceConsumption::ReadMetrics::toBson(BSONObjBuilder* builder) const {
builder->appendNumber(kIdxEntryUnitsRead, idxEntryUnitsRead);
builder->appendNumber(kKeysSorted, keysSorted);
builder->appendNumber(kDocUnitsReturned, docUnitsReturned);
+ builder->appendNumber(kCursorSeeks, cursorSeeks);
}
void ResourceConsumption::WriteMetrics::toBson(BSONObjBuilder* builder) const {
@@ -162,6 +164,7 @@ void ResourceConsumption::OperationMetrics::toBsonNonZeroFields(BSONObjBuilder*
appendNonZeroMetric(builder, kIdxEntryUnitsRead, readMetrics.idxEntryUnitsRead);
appendNonZeroMetric(builder, kKeysSorted, readMetrics.keysSorted);
appendNonZeroMetric(builder, kDocUnitsReturned, readMetrics.docUnitsReturned);
+ appendNonZeroMetric(builder, kCursorSeeks, readMetrics.cursorSeeks);
if (cpuTimer) {
appendNonZeroMetric(builder, kCpuNanos, durationCount<Nanoseconds>(cpuTimer->getElapsed()));
@@ -247,6 +250,10 @@ bool ResourceConsumption::MetricsCollector::endScopedCollecting() {
return wasCollecting;
}
+void ResourceConsumption::MetricsCollector::incrementOneCursorSeek() {
+ _doIfCollecting([&] { _metrics.readMetrics.cursorSeeks++; });
+}
+
ResourceConsumption::ScopedMetricsCollector::ScopedMetricsCollector(OperationContext* opCtx,
const std::string& dbName,
bool commandCollectsMetrics)
diff --git a/src/mongo/db/stats/resource_consumption_metrics.h b/src/mongo/db/stats/resource_consumption_metrics.h
index 5e49b6f5f9a..9ca51be3fa8 100644
--- a/src/mongo/db/stats/resource_consumption_metrics.h
+++ b/src/mongo/db/stats/resource_consumption_metrics.h
@@ -60,6 +60,7 @@ public:
idxEntryUnitsRead += other.idxEntryUnitsRead;
keysSorted += other.keysSorted;
docUnitsReturned += other.docUnitsReturned;
+ cursorSeeks += other.cursorSeeks;
}
ReadMetrics& operator+=(const ReadMetrics& other) {
@@ -84,6 +85,8 @@ public:
long long keysSorted = 0;
// Number of document units returned by a query
long long docUnitsReturned = 0;
+ // Number of cursor seeks
+ long long cursorSeeks = 0;
};
/* WriteMetrics maintains metrics for write operations. */
@@ -277,6 +280,14 @@ public:
*/
void incrementOneIdxEntryWritten(size_t idxEntryBytesWritten);
+ /**
+ * This should be called once every time the storage engine successfully does a cursor seek.
+ * Note that if it takes multiple attempts to do a successful seek, this function should
+ * only be called once. If the seek does not find anything, this function should not be
+ * called.
+ */
+ void incrementOneCursorSeek();
+
private:
/**
* Helper function that calls the Func when this collector is currently collecting metrics.
diff --git a/src/mongo/db/stats/resource_consumption_metrics_test.cpp b/src/mongo/db/stats/resource_consumption_metrics_test.cpp
index fea3bb0a456..b53e754b032 100644
--- a/src/mongo/db/stats/resource_consumption_metrics_test.cpp
+++ b/src/mongo/db/stats/resource_consumption_metrics_test.cpp
@@ -225,6 +225,7 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetrics) {
operationMetrics.incrementOneIdxEntryRead(_opCtx.get(), 8);
operationMetrics.incrementKeysSorted(_opCtx.get(), 16);
operationMetrics.incrementDocUnitsReturned(_opCtx.get(), 32);
+ operationMetrics.incrementOneCursorSeek();
}
ASSERT(operationMetrics.hasCollectedMetrics());
@@ -236,6 +237,7 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetrics) {
ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.idxEntryUnitsRead, 1);
ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.keysSorted, 16);
ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docUnitsReturned, 32);
+ ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.cursorSeeks, 1);
// Clear metrics so we do not double-count.
operationMetrics.reset();
@@ -247,6 +249,7 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetrics) {
operationMetrics.incrementOneIdxEntryRead(_opCtx.get(), 128);
operationMetrics.incrementKeysSorted(_opCtx.get(), 256);
operationMetrics.incrementDocUnitsReturned(_opCtx.get(), 512);
+ operationMetrics.incrementOneCursorSeek();
}
metricsCopy = globalResourceConsumption.getDbMetrics();
@@ -256,6 +259,7 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetrics) {
ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.idxEntryUnitsRead, 1 + 8);
ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.keysSorted, 16 + 256);
ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docUnitsReturned, 32 + 512);
+ ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.cursorSeeks, 1 + 1);
}
TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetricsSecondary) {
@@ -272,6 +276,7 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetricsSecondary) {
operationMetrics.incrementOneIdxEntryRead(_opCtx.get(), 8);
operationMetrics.incrementKeysSorted(_opCtx.get(), 16);
operationMetrics.incrementDocUnitsReturned(_opCtx.get(), 32);
+ operationMetrics.incrementOneCursorSeek();
}
auto metricsCopy = globalResourceConsumption.getDbMetrics();
@@ -281,6 +286,7 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetricsSecondary) {
ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.idxEntryUnitsRead, 1);
ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.keysSorted, 16);
ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docUnitsReturned, 32);
+ ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.cursorSeeks, 1);
// Clear metrics so we do not double-count.
operationMetrics.reset();
@@ -292,6 +298,7 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetricsSecondary) {
operationMetrics.incrementOneIdxEntryRead(_opCtx.get(), 128);
operationMetrics.incrementKeysSorted(_opCtx.get(), 256);
operationMetrics.incrementDocUnitsReturned(_opCtx.get(), 512);
+ operationMetrics.incrementOneCursorSeek();
}
metricsCopy = globalResourceConsumption.getDbMetrics();
@@ -301,6 +308,7 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetricsSecondary) {
ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.idxEntryUnitsRead, 1 + 8);
ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.keysSorted, 16 + 256);
ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docUnitsReturned, 32 + 512);
+ ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.cursorSeeks, 1 + 1);
}
TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetricsAcrossStates) {
@@ -316,6 +324,7 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetricsAcrossStates) {
operationMetrics.incrementOneIdxEntryRead(_opCtx.get(), 8);
operationMetrics.incrementKeysSorted(_opCtx.get(), 16);
operationMetrics.incrementDocUnitsReturned(_opCtx.get(), 32);
+ operationMetrics.incrementOneCursorSeek();
ASSERT_OK(repl::ReplicationCoordinator::get(_opCtx.get())
->setFollowerMode(repl::MemberState::RS_SECONDARY));
@@ -324,6 +333,7 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetricsAcrossStates) {
operationMetrics.incrementOneIdxEntryRead(_opCtx.get(), 128);
operationMetrics.incrementKeysSorted(_opCtx.get(), 256);
operationMetrics.incrementDocUnitsReturned(_opCtx.get(), 512);
+ operationMetrics.incrementOneCursorSeek();
}
auto metricsCopy = globalResourceConsumption.getAndClearDbMetrics();
@@ -333,12 +343,14 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetricsAcrossStates) {
ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.idxEntryUnitsRead, 0);
ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.keysSorted, 0);
ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docUnitsReturned, 0);
+ ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.cursorSeeks, 0);
ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docBytesRead, 2 + 32);
ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docUnitsRead, 2);
ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.idxEntryBytesRead, 8 + 128);
ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.idxEntryUnitsRead, 1 + 8);
ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.keysSorted, 16 + 256);
ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docUnitsReturned, 32 + 512);
+ ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.cursorSeeks, 1 + 1);
operationMetrics.reset();
@@ -351,6 +363,7 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetricsAcrossStates) {
operationMetrics.incrementOneIdxEntryRead(_opCtx.get(), 8);
operationMetrics.incrementKeysSorted(_opCtx.get(), 16);
operationMetrics.incrementDocUnitsReturned(_opCtx.get(), 32);
+ operationMetrics.incrementOneCursorSeek();
ASSERT_OK(repl::ReplicationCoordinator::get(_opCtx.get())
->setFollowerMode(repl::MemberState::RS_PRIMARY));
@@ -359,6 +372,7 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetricsAcrossStates) {
operationMetrics.incrementOneIdxEntryRead(_opCtx.get(), 128);
operationMetrics.incrementKeysSorted(_opCtx.get(), 256);
operationMetrics.incrementDocUnitsReturned(_opCtx.get(), 512);
+ operationMetrics.incrementOneCursorSeek();
}
metricsCopy = globalResourceConsumption.getAndClearDbMetrics();
@@ -368,12 +382,14 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetricsAcrossStates) {
ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.idxEntryUnitsRead, 1 + 8);
ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.keysSorted, 16 + 256);
ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.docUnitsReturned, 32 + 512);
+ ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.cursorSeeks, 1 + 1);
ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docBytesRead, 0);
ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docUnitsRead, 0);
ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.idxEntryBytesRead, 0);
ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.idxEntryUnitsRead, 0);
ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.keysSorted, 0);
ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.docUnitsReturned, 0);
+ ASSERT_EQ(metricsCopy["db1"].secondaryReadMetrics.cursorSeeks, 0);
}
TEST_F(ResourceConsumptionMetricsTest, DocumentUnitsRead) {
@@ -596,4 +612,23 @@ TEST_F(ResourceConsumptionMetricsTest, CpuNanos) {
globalCpuTime = globalResourceConsumption.getCpuTime();
ASSERT_EQ(Nanoseconds(0), globalCpuTime);
}
+
+TEST_F(ResourceConsumptionMetricsTest, CursorSeeks) {
+ auto& globalResourceConsumption = ResourceConsumption::get(getServiceContext());
+ auto& operationMetrics = ResourceConsumption::MetricsCollector::get(_opCtx.get());
+
+ int expectedSeeks = 0;
+
+ {
+ ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1");
+ operationMetrics.incrementOneCursorSeek();
+ operationMetrics.incrementOneCursorSeek();
+ operationMetrics.incrementOneCursorSeek();
+
+ expectedSeeks += 3;
+ }
+
+ auto metricsCopy = globalResourceConsumption.getDbMetrics();
+ ASSERT_EQ(metricsCopy["db1"].primaryReadMetrics.cursorSeeks, expectedSeeks);
+}
} // namespace mongo
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
index 7c0aef75b35..0c8b296fc12 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
@@ -497,6 +497,9 @@ bool WiredTigerIndex::isDup(OperationContext* opCtx, WT_CURSOR* c, const KeyStri
}
invariantWTOK(ret);
+ auto& metricsCollector = ResourceConsumption::MetricsCollector::get(opCtx);
+ metricsCollector.incrementOneCursorSeek();
+
// If the key exists, check if we already have this id at this key. If so, we don't
// consider that to be a dup.
WT_ITEM value;
@@ -1138,6 +1141,10 @@ protected:
return false;
}
invariantWTOK(ret);
+
+ auto& metricsCollector = ResourceConsumption::MetricsCollector::get(_opCtx);
+ metricsCollector.incrementOneCursorSeek();
+
_cursorAtEof = false;
LOGV2_TRACE_CURSOR(20089, "cmp: {cmp}", "cmp"_attr = cmp);
@@ -1440,6 +1447,9 @@ bool WiredTigerIndexUnique::_keyExists(OperationContext* opCtx,
return false;
invariantWTOK(ret);
+ auto& metricsCollector = ResourceConsumption::MetricsCollector::get(opCtx);
+ metricsCollector.incrementOneCursorSeek();
+
if (cmp == 0)
return true;
@@ -1554,6 +1564,8 @@ Status WiredTigerIndexUnique::_insertTimestampUnsafe(OperationContext* opCtx,
ret = wiredTigerPrepareConflictRetry(opCtx, [&] { return c->search(c); });
invariantWTOK(ret);
+ metricsCollector.incrementOneCursorSeek();
+
WT_ITEM old;
invariantWTOK(c->get_value(c, &old));
@@ -1721,6 +1733,10 @@ void WiredTigerIndexUnique::_unindexTimestampUnsafe(OperationContext* opCtx,
return;
}
invariantWTOK(ret);
+
+ auto& metricsCollector = ResourceConsumption::MetricsCollector::get(opCtx);
+ metricsCollector.incrementOneCursorSeek();
+
WT_ITEM value;
invariantWTOK(c->get_value(c, &value));
BufReader br(value.data, value.size);
@@ -1756,6 +1772,9 @@ void WiredTigerIndexUnique::_unindexTimestampUnsafe(OperationContext* opCtx,
}
invariantWTOK(ret);
+ auto& metricsCollector = ResourceConsumption::MetricsCollector::get(opCtx);
+ metricsCollector.incrementOneCursorSeek();
+
WT_ITEM old;
invariantWTOK(c->get_value(c, &old));
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
index 02aad79e6ed..09a474522ff 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
@@ -1030,9 +1030,12 @@ bool WiredTigerRecordStore::findRecord(OperationContext* opCtx,
return false;
}
invariantWTOK(ret);
- *out = _getData(curwrap);
auto& metricsCollector = ResourceConsumption::MetricsCollector::get(opCtx);
+ metricsCollector.incrementOneCursorSeek();
+
+ *out = _getData(curwrap);
+
metricsCollector.incrementOneDocRead(opCtx, out->size());
return true;
@@ -1056,6 +1059,9 @@ void WiredTigerRecordStore::deleteRecord(OperationContext* opCtx, const RecordId
int ret = wiredTigerPrepareConflictRetry(opCtx, [&] { return c->search(c); });
invariantWTOK(ret);
+ auto& metricsCollector = ResourceConsumption::MetricsCollector::get(opCtx);
+ metricsCollector.incrementOneCursorSeek();
+
WT_ITEM old_value;
ret = c->get_value(c, &old_value);
invariantWTOK(ret);
@@ -1065,7 +1071,6 @@ void WiredTigerRecordStore::deleteRecord(OperationContext* opCtx, const RecordId
ret = WT_OP_CHECK(wiredTigerCursorRemove(opCtx, c));
invariantWTOK(ret);
- auto& metricsCollector = ResourceConsumption::MetricsCollector::get(opCtx);
metricsCollector.incrementOneDocWritten(old_length);
_changeNumRecords(opCtx, -1);
@@ -1173,6 +1178,9 @@ void WiredTigerRecordStore::_positionAtFirstRecordId(OperationContext* opCtx,
opCtx, [&] { return cursor->search_near(cursor, &cmp); });
invariantWTOK(ret);
+ auto& metricsCollector = ResourceConsumption::MetricsCollector::get(opCtx);
+ metricsCollector.incrementOneCursorSeek();
+
// This is (or was) the first recordId, so it should never be the case that we have a
// RecordId before that.
invariant(cmp >= 0);
@@ -1221,6 +1229,10 @@ int64_t WiredTigerRecordStore::_cappedDeleteAsNeeded_inlock(OperationContext* op
setKey(truncateEnd, _cappedFirstRecord);
ret = wiredTigerPrepareConflictRetry(opCtx,
[&] { return truncateEnd->search(truncateEnd); });
+
+ auto& metricsCollector = ResourceConsumption::MetricsCollector::get(opCtx);
+ metricsCollector.incrementOneCursorSeek();
+
if (ret == 0) {
positioned = true;
savedFirstKey = _cappedFirstRecord;
@@ -1616,6 +1628,9 @@ Status WiredTigerRecordStore::updateRecord(OperationContext* opCtx,
int ret = wiredTigerPrepareConflictRetry(opCtx, [&] { return c->search(c); });
invariantWTOK(ret);
+ auto& metricsCollector = ResourceConsumption::MetricsCollector::get(opCtx);
+ metricsCollector.incrementOneCursorSeek();
+
WT_ITEM old_value;
ret = c->get_value(c, &old_value);
invariantWTOK(ret);
@@ -1637,7 +1652,6 @@ Status WiredTigerRecordStore::updateRecord(OperationContext* opCtx,
const int kMaxEntries = 16;
const int kMaxDiffBytes = len / 10;
- auto& metricsCollector = ResourceConsumption::MetricsCollector::get(opCtx);
bool skip_update = false;
if (!_isLogged && len > kMinLengthForDiff && len <= old_length + kMaxDiffBytes) {
int nentries = kMaxEntries;
@@ -2265,10 +2279,12 @@ boost::optional<Record> WiredTigerRecordStoreCursorBase::seekExact(const RecordI
}
invariantWTOK(seekRet);
+ auto& metricsCollector = ResourceConsumption::MetricsCollector::get(_opCtx);
+ metricsCollector.incrementOneCursorSeek();
+
WT_ITEM value;
invariantWTOK(c->get_value(c, &value));
- auto& metricsCollector = ResourceConsumption::MetricsCollector::get(_opCtx);
metricsCollector.incrementOneDocRead(_opCtx, value.size);
_lastReturnedId = id;
@@ -2513,6 +2529,9 @@ void WiredTigerRecordStorePrefixedCursor::initCursorToBeginning() {
}
invariantWTOK(err);
+ auto& metricsCollector = ResourceConsumption::MetricsCollector::get(_opCtx);
+ metricsCollector.incrementOneCursorSeek();
+
RecordId recordId;
if (_forward) {
invariant(exact != 0); // `RecordId::min` cannot exist.