summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMaddie Zechar <mez2113@columbia.edu>2022-11-15 04:59:03 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-11-15 05:29:18 +0000
commit148c522ffefc93dcc8227579b583691335cd4f2f (patch)
treee2c3dfb18abe2e60d71f11f810b9b4fb3dbfd4b9
parent0fff039b24fa1fdf14d940c5de7dbfc90e59d5a6 (diff)
downloadmongo-148c522ffefc93dcc8227579b583691335cd4f2f.tar.gz
SERVER-71232 Telemetry end to end test
-rw-r--r--jstests/noPassthrough/telemetry_cache_metrics.js76
-rw-r--r--jstests/telemetry/query_planning_time_metric.js2
-rw-r--r--jstests/telemetry/telemetry_metrics_across_getMore_calls.js86
3 files changed, 87 insertions, 77 deletions
diff --git a/jstests/noPassthrough/telemetry_cache_metrics.js b/jstests/noPassthrough/telemetry_cache_metrics.js
deleted file mode 100644
index 2a261a0cbc6..00000000000
--- a/jstests/noPassthrough/telemetry_cache_metrics.js
+++ /dev/null
@@ -1,76 +0,0 @@
-/**
- * Test that the telemetry metrics are updated correctly and persist across getMores.
- */
-(function() {
-"use strict";
-
-const conn = MongoRunner.runMongod({});
-const db = conn.getDB('test');
-
-var coll = db[jsTestName()];
-var collTwo = db[jsTestName() + 'Two'];
-coll.drop();
-
-for (var i = 0; i < 100; i++) {
- coll.insert({foo: 0});
- coll.insert({foo: 1});
- collTwo.insert({foo: Math.random(0, 1), bar: Math.random(0, 1)});
-}
-
-function verifyTelemetryMetrics() {
- const telStore = db.adminCommand({aggregate: 1, pipeline: [{$telemetry: {}}], cursor: {}});
- // print(tojson(telStore));
- const metrics = telStore.cursor.firstBatch[0].metrics;
- print(tojson(metrics));
- assert(metrics.execCount > 0);
- assert(metrics.firstSeenTimestamp);
- // assert(metrics.lastExecutionMicros > 0);
- // assert(metrics.queryOptMicros.sum > 0);
- // assert(metrics.queryExecMicros.sum > 0);
- // assert(metrics.docsReturned.sum > 0);
- // assert(metrics.docsScanned.sum > 0);
- // assert(metrics.keysScanned.sum > 0);
-}
-
-let query;
-
-// agg query
-query = {
- $setWindowFields: {
- sortBy: {_id: 1},
- output: {foo: {$linearFill: "$foo"}},
- }
-};
-coll.aggregate([query]);
-verifyTelemetryMetrics();
-
-// agg query with some stages pushed to find layer.
-coll.aggregate([{$match: {foo: 0}}, {$group: {_id: null, count: {$sum: 1}}}]);
-verifyTelemetryMetrics();
-
-// agg query with all stages pushed to find layer.
-coll.aggregate([{$sort: {foo: 1}}]);
-verifyTelemetryMetrics();
-
-// multiple batches require multiple plan executors. We want to confirm we are only storing the
-// metrics for the outer executor associated with planning the query, and not a subsequent executor
-// that is constructed when a new operation context gets created during getMore() calls.
-// coll.aggregate([{$unionWith: collTwo.getName()}], {cursor: {batchSize: 2}});
-// verifyTelemetryMetrics();
-
-// $lookup has inner executor (cursor??), we want to confirm we are only reporting metrics from the
-// outer executor associated with planning the query.
-coll.aggregate({
- $lookup: {from: collTwo.getName(), localField: "foo", foreignField: "bar", as: "merged_docs"}
-});
-verifyTelemetryMetrics();
-
-// Count and find have different entry points (eg different run() methods) from agg and we want to
-// confirm we are starting the timer as planning begins in each of these workflows/paths.
-coll.count({foo: 0});
-verifyTelemetryMetrics();
-
-query = coll.findOne({});
-verifyTelemetryMetrics(query);
-MongoRunner.stopMongod(conn);
-})();
diff --git a/jstests/telemetry/query_planning_time_metric.js b/jstests/telemetry/query_planning_time_metric.js
index 6cfafe8fdc7..1d2cf1ce5af 100644
--- a/jstests/telemetry/query_planning_time_metric.js
+++ b/jstests/telemetry/query_planning_time_metric.js
@@ -43,7 +43,7 @@ verifyProfilerLog(commandProfilerFilter);
// multiple batches require multiple plan executors. We want to confirm we are only storing the
// metrics for the outer executor associated with planning the query, and not a subsequent executor
// that is constructed when a new operation context gets created during getMore() calls.
-coll.aggregate([{$unionWith: collTwo.getName()}], {cursor: {batchSize: 2}});
+coll.aggregate([{$unionWith: {coll: collTwo.getName()}}], {cursor: {batchSize: 2}});
verifyProfilerLog(commandProfilerFilter);
// $lookup has inner executor (cursor??), we want to confirm we are only reporting metrics from the
diff --git a/jstests/telemetry/telemetry_metrics_across_getMore_calls.js b/jstests/telemetry/telemetry_metrics_across_getMore_calls.js
new file mode 100644
index 00000000000..c36bc61604c
--- /dev/null
+++ b/jstests/telemetry/telemetry_metrics_across_getMore_calls.js
@@ -0,0 +1,86 @@
+/**
+ * Test that the telemetry metrics are updated correctly across getMores.
+ */
+load('jstests/libs/analyze_plan.js');
+load("jstests/libs/profiler.js"); // For getLatestProfilerEntry.
+
+(function() {
+"use strict";
+
+// Turn on the collection of telemetry metrics.
+let options = {
+ setParameter: "internalQueryConfigureTelemetrySamplingRate=2147483647",
+};
+
+const conn = MongoRunner.runMongod(options);
+const testDB = conn.getDB('test');
+var coll = testDB[jsTestName()];
+var collTwo = db[jsTestName() + 'Two'];
+coll.drop();
+
+function verifyMetrics(batch) {
+ batch.forEach(element => {
+ assert(element.metrics.docsScanned.sum > element.metrics.docsScanned.min);
+ assert(element.metrics.docsScanned.sum >= element.metrics.docsScanned.max);
+ assert(element.metrics.docsScanned.min <= element.metrics.docsScanned.max);
+
+ // Ensure execution count does not increase with subsequent getMore() calls.
+ assert.eq(element.metrics.execCount.sum,
+ element.metrics.execCount.min,
+ element.metrics.execCount.max);
+
+ if (element.metrics.execCount === 1) {
+ // Ensure planning time is > 0 after first batch and does not change with subsequent
+ // getMore() calls.
+ assert(queryOptMicros.min > 0);
+ assert.eq(queryOptMicros.sum, queryOptMicros.min, queryOptMicros.max);
+ }
+ // Confirm that execution time increases with getMore() calls
+ assert(element.metrics.queryExecMicros.sum > element.metrics.queryExecMicros.min);
+ assert(element.metrics.queryExecMicros.sum > element.metrics.queryExecMicros.max);
+ assert(element.metrics.queryExecMicros.min <= element.metrics.queryExecMicros.max);
+ });
+}
+
+for (var i = 0; i < 200; i++) {
+ coll.insert({foo: 0, bar: Math.floor(Math.random() * 3)});
+ coll.insert({foo: 1, bar: Math.floor(Math.random() * -2)});
+ collTwo.insert({foo: Math.floor(Math.random() * 2), bar: Math.floor(Math.random() * 2)});
+}
+
+// Assert that two queries with identical structures are represented by the same key
+coll.aggregate([{$match: {foo: 1}}], {cursor: {batchSize: 2}});
+coll.aggregate([{$match: {foo: 0}}], {cursor: {batchSize: 2}});
+// This command will return all telemetry store entires.
+let telStore = testDB.adminCommand({aggregate: 1, pipeline: [{$telemetry: {}}], cursor: {}});
+assert.eq(telStore.cursor.firstBatch.length, 1);
+// Assert we update execution count for identically shaped queries.
+assert.eq(telStore.cursor.firstBatch[0].metrics.execCount, 2);
+verifyMetrics(telStore.cursor.firstBatch);
+
+// Assert that options such as limit/sort create different keys
+coll.find({foo: {$eq: 0}}).batchSize(2).toArray();
+coll.find({foo: {$eq: 1}}).limit(50).batchSize(2).toArray();
+coll.find().sort({"foo": 1}).batchSize(2).toArray();
+// This filters telemetry entires to just the ones entered when running above find queries.
+telStore = testDB.adminCommand({
+ aggregate: 1,
+ pipeline: [{$telemetry: {}}, {$match: {"key.find.find": {$eq: "###"}}}],
+ cursor: {}
+});
+assert.eq(telStore.cursor.firstBatch.length, 3);
+verifyMetrics(telStore.cursor.firstBatch);
+
+// Ensure that for queries using an index, keys scanned is nonzero.
+assert.commandWorked(coll.createIndex({bar: 1}));
+coll.aggregate([{$match: {bar: 1}}], {cursor: {batchSize: 2}});
+// This filters telemetry entries to just the one entered for the above agg command.
+telStore = testDB.adminCommand({
+ aggregate: 1,
+ pipeline: [{$telemetry: {}}, {$match: {"key.pipeline.$match.bar": {$eq: "###"}}}],
+ cursor: {}
+});
+assert(telStore.cursor.firstBatch[0].metrics.keysScanned.sum > 0);
+
+MongoRunner.stopMongod(conn);
+}());