summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorWill Buerger <will.buerger@mongodb.com>2023-05-16 20:32:42 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2023-05-17 00:09:49 +0000
commitd646e44b7801a3e5b3230bbae7dcfe05a5ed8707 (patch)
tree2eebf945a3f958e3adddc157523a49deb72cc63d
parent8219766da20351edcda5fe21a18da254b382cd35 (diff)
downloadmongo-d646e44b7801a3e5b3230bbae7dcfe05a5ed8707.tar.gz
SERVER-76427: Rename $telemetry to $queryStats
Co-authored-by: Ted Tuckman <ted.tuckman@mongodb.com>
-rw-r--r--buildscripts/resmokeconfig/suites/telemetry_passthrough.yml2
-rw-r--r--jstests/auth/lib/commands_lib.js6
-rw-r--r--jstests/libs/telemetry_utils.js6
-rw-r--r--jstests/noPassthrough/queryStats/application_name_find.js (renamed from jstests/noPassthrough/telemetry/application_name_find.js)4
-rw-r--r--jstests/noPassthrough/queryStats/clear_query_stats_store.js (renamed from jstests/noPassthrough/telemetry/clear_telemetry_store.js)23
-rw-r--r--jstests/noPassthrough/queryStats/documentSourceQueryStats_redaction_parameters.js (renamed from jstests/noPassthrough/telemetry/documentSourceTelemetry_redaction_parameters.js)28
-rw-r--r--jstests/noPassthrough/queryStats/feature_flag_off_sampling_rate_on.js (renamed from jstests/noPassthrough/telemetry/feature_flag_off_sampling_rate_on.js)8
-rw-r--r--jstests/noPassthrough/queryStats/query_stats_collect_on_mongos.js (renamed from jstests/noPassthrough/telemetry/telemetry_collect_on_mongos.js)10
-rw-r--r--jstests/noPassthrough/queryStats/query_stats_feature_flag.js (renamed from jstests/noPassthrough/telemetry/telemetry_feature_flag.js)8
-rw-r--r--jstests/noPassthrough/queryStats/query_stats_key.js (renamed from jstests/noPassthrough/telemetry/query_stats_key.js)2
-rw-r--r--jstests/noPassthrough/queryStats/query_stats_metrics_across_getMore_calls.js (renamed from jstests/noPassthrough/telemetry/telemetry_metrics_across_getMore_calls.js)16
-rw-r--r--jstests/noPassthrough/queryStats/query_stats_redact_find_cmd.js (renamed from jstests/noPassthrough/telemetry/telemetry_redact_find_cmd.js)10
-rw-r--r--jstests/noPassthrough/queryStats/query_stats_sampling_rate.js (renamed from jstests/noPassthrough/telemetry/telemetry_sampling_rate.js)12
-rw-r--r--jstests/noPassthrough/queryStats/query_stats_server_status_metrics.js (renamed from jstests/noPassthrough/telemetry/telemetry_server_status_metrics.js)48
-rw-r--r--jstests/noPassthrough/queryStats/query_stats_upgrade.js (renamed from jstests/noPassthrough/telemetry/telemetry_upgrade.js)8
-rw-r--r--jstests/noPassthrough/queryStats/redact_queries_with_nonobject_fields.js (renamed from jstests/noPassthrough/telemetry/redact_queries_with_nonobject_fields.js)4
-rw-r--r--jstests/noPassthroughWithMongod/telemetry_configuration.js5
-rw-r--r--src/mongo/db/auth/action_type.idl2
-rw-r--r--src/mongo/db/auth/builtin_roles.yml2
-rw-r--r--src/mongo/db/clientcursor.cpp36
-rw-r--r--src/mongo/db/clientcursor.h14
-rw-r--r--src/mongo/db/commands/find_cmd.cpp21
-rw-r--r--src/mongo/db/commands/getmore_cmd.cpp2
-rw-r--r--src/mongo/db/commands/run_aggregate.cpp18
-rw-r--r--src/mongo/db/curop.h8
-rw-r--r--src/mongo/db/cursor_manager.cpp8
-rw-r--r--src/mongo/db/pipeline/SConscript4
-rw-r--r--src/mongo/db/pipeline/abt/document_source_visitor.cpp2
-rw-r--r--src/mongo/db/pipeline/aggregate_command.idl6
-rw-r--r--src/mongo/db/pipeline/aggregate_request_shapifier.cpp12
-rw-r--r--src/mongo/db/pipeline/aggregate_request_shapifier.h11
-rw-r--r--src/mongo/db/pipeline/document_source_query_stats.cpp (renamed from src/mongo/db/pipeline/document_source_telemetry.cpp)56
-rw-r--r--src/mongo/db/pipeline/document_source_query_stats.h (renamed from src/mongo/db/pipeline/document_source_telemetry.h)24
-rw-r--r--src/mongo/db/pipeline/document_source_query_stats_test.cpp (renamed from src/mongo/db/pipeline/document_source_telemetry_test.cpp)42
-rw-r--r--src/mongo/db/pipeline/visitors/document_source_visitor_registry_mongod.h4
-rw-r--r--src/mongo/db/query/SConscript6
-rw-r--r--src/mongo/db/query/cqf_command_utils.cpp2
-rw-r--r--src/mongo/db/query/find.cpp6
-rw-r--r--src/mongo/db/query/find_request_shapifier.cpp12
-rw-r--r--src/mongo/db/query/find_request_shapifier.h11
-rw-r--r--src/mongo/db/query/query_feature_flags.idl6
-rw-r--r--src/mongo/db/query/query_knobs.idl28
-rw-r--r--src/mongo/db/query/query_shape.cpp2
-rw-r--r--src/mongo/db/query/query_shape.h2
-rw-r--r--src/mongo/db/query/query_stats.cpp (renamed from src/mongo/db/query/telemetry.cpp)248
-rw-r--r--src/mongo/db/query/query_stats.h (renamed from src/mongo/db/query/telemetry.h)82
-rw-r--r--src/mongo/db/query/query_stats_store_test.cpp (renamed from src/mongo/db/query/telemetry_store_test.cpp)99
-rw-r--r--src/mongo/db/query/query_stats_util.cpp (renamed from src/mongo/db/query/telemetry_util.cpp)18
-rw-r--r--src/mongo/db/query/query_stats_util.h (renamed from src/mongo/db/query/telemetry_util.h)26
-rw-r--r--src/mongo/db/query/request_shapifier.h20
-rw-r--r--src/mongo/s/commands/cluster_find_cmd.h12
-rw-r--r--src/mongo/s/query/cluster_aggregate.cpp4
-rw-r--r--src/mongo/s/query/cluster_aggregation_planner.cpp8
-rw-r--r--src/mongo/s/query/cluster_client_cursor.h4
-rw-r--r--src/mongo/s/query/cluster_client_cursor_impl.cpp27
-rw-r--r--src/mongo/s/query/cluster_client_cursor_impl.h10
-rw-r--r--src/mongo/s/query/cluster_client_cursor_mock.cpp2
-rw-r--r--src/mongo/s/query/cluster_client_cursor_mock.h2
-rw-r--r--src/mongo/s/query/cluster_cursor_manager.cpp18
-rw-r--r--src/mongo/s/query/cluster_cursor_manager.h10
-rw-r--r--src/mongo/s/query/cluster_find.cpp8
-rw-r--r--src/mongo/s/query/store_possible_cursor.cpp4
62 files changed, 575 insertions, 574 deletions
diff --git a/buildscripts/resmokeconfig/suites/telemetry_passthrough.yml b/buildscripts/resmokeconfig/suites/telemetry_passthrough.yml
index 1aa2a490a5f..08fa435a07d 100644
--- a/buildscripts/resmokeconfig/suites/telemetry_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/telemetry_passthrough.yml
@@ -27,4 +27,4 @@ executor:
mongod_options:
set_parameters:
enableTestCommands: 1
- internalQueryConfigureTelemetrySamplingRate: -1
+ internalQueryStatsSamplingRate: -1
diff --git a/jstests/auth/lib/commands_lib.js b/jstests/auth/lib/commands_lib.js
index 170223762b4..1d60c7aa308 100644
--- a/jstests/auth/lib/commands_lib.js
+++ b/jstests/auth/lib/commands_lib.js
@@ -6629,12 +6629,12 @@ export const authCommandsLib = {
]
},
{
- // Test that only clusterManager has permission to run $telemetry
+ // Test that only clusterManager has permission to run $queryStats
testname: "testTelemetryReadPrivilege",
- command: {aggregate: 1, pipeline: [{$telemetry: {}}], cursor: {}},
+ command: {aggregate: 1, pipeline: [{$queryStats: {}}], cursor: {}},
skipSharded: false,
skipTest: (conn) => {
- return !TestData.setParameters.featureFlagTelemetry;
+ return !TestData.setParameters.featureFlagQueryStats;
},
testcases: [{runOnDb: adminDbName, roles: roles_clusterManager}]
},
diff --git a/jstests/libs/telemetry_utils.js b/jstests/libs/telemetry_utils.js
index 11e2d236827..0bb9e90fb58 100644
--- a/jstests/libs/telemetry_utils.js
+++ b/jstests/libs/telemetry_utils.js
@@ -45,7 +45,7 @@ function getTelemetry(conn) {
const result = conn.adminCommand({
aggregate: 1,
pipeline: [
- {$telemetry: {}},
+ {$queryStats: {}},
// Sort on telemetry key so entries are in a deterministic order.
{$sort: {key: 1}},
{$match: {"key.applicationName": kApplicationName}}
@@ -62,7 +62,7 @@ function getTelemetryRedacted(
hmacKey = BinData(0, "MjM0NTY3ODkxMDExMTIxMzE0MTUxNjE3MTgxOTIwMjE=")) {
// Hashed application name is generated using the default hmacKey argument.
const kApplicationName = "MongoDB Shell";
- // Filter out agg queries, including $telemetry.
+ // Filter out agg queries, including $queryStats.
const match = {
$match: {"key.queryShape.command": "find", "key.applicationName": kApplicationName}
};
@@ -70,7 +70,7 @@ function getTelemetryRedacted(
const result = conn.adminCommand({
aggregate: 1,
pipeline: [
- {$telemetry: {applyHmacToIdentifiers: applyHmacToIdentifiers, hmacKey: hmacKey}},
+ {$queryStats: {applyHmacToIdentifiers: applyHmacToIdentifiers, hmacKey: hmacKey}},
match,
// Sort on telemetry key so entries are in a deterministic order.
{$sort: {key: 1}},
diff --git a/jstests/noPassthrough/telemetry/application_name_find.js b/jstests/noPassthrough/queryStats/application_name_find.js
index 35b86a95f53..36245a31514 100644
--- a/jstests/noPassthrough/telemetry/application_name_find.js
+++ b/jstests/noPassthrough/queryStats/application_name_find.js
@@ -1,6 +1,6 @@
/**
* Test that applicationName and namespace appear in telemetry for the find command.
- * @tags: [featureFlagTelemetry]
+ * @tags: [featureFlagQueryStats]
*/
load("jstests/libs/telemetry_utils.js");
(function() {
@@ -12,7 +12,7 @@ const kHashedFieldName = "lU7Z0mLRPRUL+RfAD5jhYPRRpXBsZBxS/20EzDwfOG4=";
// Turn on the collecting of telemetry metrics.
let options = {
- setParameter: {internalQueryConfigureTelemetrySamplingRate: -1},
+ setParameter: {internalQueryStatsSamplingRate: -1},
};
const conn = MongoRunner.runMongod(options);
diff --git a/jstests/noPassthrough/telemetry/clear_telemetry_store.js b/jstests/noPassthrough/queryStats/clear_query_stats_store.js
index b2409cc0bbb..056c565ec02 100644
--- a/jstests/noPassthrough/telemetry/clear_telemetry_store.js
+++ b/jstests/noPassthrough/queryStats/clear_query_stats_store.js
@@ -1,6 +1,6 @@
/**
* Test that the telemetry store can be cleared when the cache size is reset to 0.
- * @tags: [featureFlagTelemetry]
+ * @tags: [featureFlagQueryStats]
*/
load("jstests/libs/telemetry_utils.js"); // For verifyMetrics.
@@ -9,10 +9,8 @@ load("jstests/libs/telemetry_utils.js"); // For verifyMetrics.
// Turn on the collecting of telemetry metrics.
let options = {
- setParameter: {
- internalQueryConfigureTelemetrySamplingRate: -1,
- internalQueryConfigureTelemetryCacheSize: "10MB"
- },
+ setParameter:
+ {internalQueryStatsSamplingRate: -1, internalQueryConfigureQueryStatsCacheSize: "10MB"},
};
const conn = MongoRunner.runMongod(options);
@@ -29,18 +27,19 @@ for (var j = 0; j < 10; ++j) {
}
// Confirm number of entries in the store and that none have been evicted.
-let telemetryResults = testDB.getSiblingDB("admin").aggregate([{$telemetry: {}}]).toArray();
+let telemetryResults = testDB.getSiblingDB("admin").aggregate([{$queryStats: {}}]).toArray();
assert.eq(telemetryResults.length, 10, telemetryResults);
-assert.eq(testDB.serverStatus().metrics.telemetry.numEvicted, 0);
+assert.eq(testDB.serverStatus().metrics.queryStats.numEvicted, 0);
// Command to clear the cache.
assert.commandWorked(
- testDB.adminCommand({setParameter: 1, internalQueryConfigureTelemetryCacheSize: "0MB"}));
+ testDB.adminCommand({setParameter: 1, internalQueryConfigureQueryStatsCacheSize: "0MB"}));
-// 10 regular queries plus the $telemetry query, means 11 entries evicted when the cache is cleared.
-assert.eq(testDB.serverStatus().metrics.telemetry.numEvicted, 11);
+// 10 regular queries plus the $queryStats query, means 11 entries evicted when the cache is
+// cleared.
+assert.eq(testDB.serverStatus().metrics.queryStats.numEvicted, 11);
-// Calling $telemetry should fail when the telemetry store size is 0 bytes.
-assert.throwsWithCode(() => testDB.getSiblingDB("admin").aggregate([{$telemetry: {}}]), 6579000);
+// Calling $queryStats should fail when the telemetry store size is 0 bytes.
+assert.throwsWithCode(() => testDB.getSiblingDB("admin").aggregate([{$queryStats: {}}]), 6579000);
MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/telemetry/documentSourceTelemetry_redaction_parameters.js b/jstests/noPassthrough/queryStats/documentSourceQueryStats_redaction_parameters.js
index c4f785abf6a..8facb106072 100644
--- a/jstests/noPassthrough/telemetry/documentSourceTelemetry_redaction_parameters.js
+++ b/jstests/noPassthrough/queryStats/documentSourceQueryStats_redaction_parameters.js
@@ -1,6 +1,6 @@
/**
- * Test the $telemetry hmac properties.
- * @tags: [featureFlagTelemetry]
+ * Test the $queryStats hmac properties.
+ * @tags: [featureFlagQueryStats]
*/
load("jstests/aggregation/extras/utils.js"); // For assertAdminDBErrCodeAndErrMsgContains.
@@ -42,41 +42,41 @@ function runTest(conn) {
assertTelemetryKeyWithoutHmac(getTelemetryRedacted(conn, false)[0]["key"].queryShape);
// Wrong parameter name throws error.
- let pipeline = [{$telemetry: {redactFields: true}}];
+ let pipeline = [{$queryStats: {redactFields: true}}];
assertAdminDBErrCodeAndErrMsgContains(
coll,
pipeline,
ErrorCodes.FailedToParse,
- "$telemetry parameters object may only contain 'applyHmacToIdentifiers' or 'hmacKey' options. Found: redactFields");
+ "$queryStats parameters object may only contain 'applyHmacToIdentifiers' or 'hmacKey' options. Found: redactFields");
// Wrong parameter type throws error.
- pipeline = [{$telemetry: {applyHmacToIdentifiers: 1}}];
+ pipeline = [{$queryStats: {applyHmacToIdentifiers: 1}}];
assertAdminDBErrCodeAndErrMsgContains(
coll,
pipeline,
ErrorCodes.FailedToParse,
- "$telemetry applyHmacToIdentifiers parameter must be boolean. Found type: double");
+ "$queryStats applyHmacToIdentifiers parameter must be boolean. Found type: double");
- pipeline = [{$telemetry: {hmacKey: 1}}];
+ pipeline = [{$queryStats: {hmacKey: 1}}];
assertAdminDBErrCodeAndErrMsgContains(
coll,
pipeline,
ErrorCodes.FailedToParse,
- "$telemetry hmacKey parameter must be bindata of length 32 or greater. Found type: double");
+ "$queryStats hmacKey parameter must be bindata of length 32 or greater. Found type: double");
// Parameter object with unrecognized key throws error.
- pipeline = [{$telemetry: {applyHmacToIdentifiers: true, hmacStrategy: "on"}}];
+ pipeline = [{$queryStats: {applyHmacToIdentifiers: true, hmacStrategy: "on"}}];
assertAdminDBErrCodeAndErrMsgContains(
coll,
pipeline,
ErrorCodes.FailedToParse,
- "$telemetry parameters object may only contain 'applyHmacToIdentifiers' or 'hmacKey' options. Found: hmacStrategy");
+ "$queryStats parameters object may only contain 'applyHmacToIdentifiers' or 'hmacKey' options. Found: hmacStrategy");
}
const conn = MongoRunner.runMongod({
setParameter: {
- internalQueryConfigureTelemetrySamplingRate: -1,
- featureFlagTelemetry: true,
+ internalQueryStatsSamplingRate: -1,
+ featureFlagQueryStats: true,
}
});
runTest(conn);
@@ -89,8 +89,8 @@ const st = new ShardingTest({
rs: {nodes: 1},
mongosOptions: {
setParameter: {
- internalQueryConfigureTelemetrySamplingRate: -1,
- featureFlagTelemetry: true,
+ internalQueryStatsSamplingRate: -1,
+ featureFlagQueryStats: true,
'failpoint.skipClusterParameterRefresh': "{'mode':'alwaysOn'}"
}
},
diff --git a/jstests/noPassthrough/telemetry/feature_flag_off_sampling_rate_on.js b/jstests/noPassthrough/queryStats/feature_flag_off_sampling_rate_on.js
index 7fbc079cc7b..38474b944d0 100644
--- a/jstests/noPassthrough/telemetry/feature_flag_off_sampling_rate_on.js
+++ b/jstests/noPassthrough/queryStats/feature_flag_off_sampling_rate_on.js
@@ -10,14 +10,14 @@ load("jstests/libs/feature_flag_util.js");
// Set sampling rate to -1.
let options = {
- setParameter: {internalQueryConfigureTelemetrySamplingRate: -1},
+ setParameter: {internalQueryStatsSamplingRate: -1},
};
const conn = MongoRunner.runMongod(options);
const testdb = conn.getDB('test');
// This test specifically tests error handling when the feature flag is not on.
// TODO SERVER-65800 This test can be deleted when the feature is on by default.
-if (!conn || FeatureFlagUtil.isEnabled(testdb, "Telemetry")) {
+if (!conn || FeatureFlagUtil.isEnabled(testdb, "QueryStats")) {
jsTestLog(`Skipping test since feature flag is disabled. conn: ${conn}`);
if (conn) {
MongoRunner.stopMongod(conn);
@@ -38,14 +38,14 @@ assert.commandWorked(bulk.execute());
// Pipeline to read telemetry store should fail without feature flag turned on even though sampling
// rate is > 0.
assert.commandFailedWithCode(
- testdb.adminCommand({aggregate: 1, pipeline: [{$telemetry: {}}], cursor: {}}),
+ testdb.adminCommand({aggregate: 1, pipeline: [{$queryStats: {}}], cursor: {}}),
ErrorCodes.QueryFeatureNotAllowed);
// Pipeline, with a filter, to read telemetry store fails without feature flag turned on even though
// sampling rate is > 0.
assert.commandFailedWithCode(testdb.adminCommand({
aggregate: 1,
- pipeline: [{$telemetry: {}}, {$match: {"key.queryShape.find": {$eq: "###"}}}],
+ pipeline: [{$queryStats: {}}, {$match: {"key.queryShape.find": {$eq: "###"}}}],
cursor: {}
}),
ErrorCodes.QueryFeatureNotAllowed);
diff --git a/jstests/noPassthrough/telemetry/telemetry_collect_on_mongos.js b/jstests/noPassthrough/queryStats/query_stats_collect_on_mongos.js
index ff9fadc85c7..97057269527 100644
--- a/jstests/noPassthrough/telemetry/telemetry_collect_on_mongos.js
+++ b/jstests/noPassthrough/queryStats/query_stats_collect_on_mongos.js
@@ -1,6 +1,6 @@
/**
* Test that mongos is collecting telemetry metrics.
- * @tags: [featureFlagTelemetry]
+ * @tags: [featureFlagQueryStats]
*/
load('jstests/libs/telemetry_utils.js');
@@ -18,7 +18,7 @@ const setup = () => {
rs: {nodes: 1},
mongosOptions: {
setParameter: {
- internalQueryConfigureTelemetrySamplingRate: -1,
+ internalQueryStatsSamplingRate: -1,
'failpoint.skipClusterParameterRefresh': "{'mode':'alwaysOn'}"
}
},
@@ -95,7 +95,8 @@ const assertExpectedResults = (results,
collection: coll.getName(),
batchSize: 2
})); // returns 1 doc, exhausts the cursor
- // The $telemetry query for the previous `getTelemetry` is included in this call to $telemetry.
+ // The $queryStats query for the previous `getTelemetry` is included in this call to
+ // $queryStats.
telemetry = getTelemetry(db);
assert.eq(2, telemetry.length, telemetry);
assertExpectedResults(telemetry[0],
@@ -159,7 +160,8 @@ const assertExpectedResults = (results,
collection: coll.getName(),
batchSize: 2
})); // returns 1 doc, exhausts the cursor
- // The $telemetry query for the previous `getTelemetry` is included in this call to $telemetry.
+ // The $queryStats query for the previous `getTelemetry` is included in this call to
+ // $queryStats.
telemetry = getTelemetry(db);
assert.eq(2, telemetry.length, telemetry);
assertExpectedResults(telemetry[0],
diff --git a/jstests/noPassthrough/telemetry/telemetry_feature_flag.js b/jstests/noPassthrough/queryStats/query_stats_feature_flag.js
index 4071b732796..bcce489d8da 100644
--- a/jstests/noPassthrough/telemetry/telemetry_feature_flag.js
+++ b/jstests/noPassthrough/queryStats/query_stats_feature_flag.js
@@ -11,21 +11,21 @@ load("jstests/libs/feature_flag_util.js");
// TODO SERVER-65800 this test can be removed when the feature flag is removed.
const conn = MongoRunner.runMongod();
const testDB = conn.getDB('test');
-if (FeatureFlagUtil.isEnabled(testDB, "Telemetry")) {
- jsTestLog("Skipping test since telemetry is enabled.");
+if (FeatureFlagUtil.isEnabled(testDB, "QueryStats")) {
+ jsTestLog("Skipping test since query stats are enabled.");
MongoRunner.stopMongod(conn);
return;
}
// Pipeline to read telemetry store should fail without feature flag turned on.
assert.commandFailedWithCode(
- testDB.adminCommand({aggregate: 1, pipeline: [{$telemetry: {}}], cursor: {}}),
+ testDB.adminCommand({aggregate: 1, pipeline: [{$queryStats: {}}], cursor: {}}),
ErrorCodes.QueryFeatureNotAllowed);
// Pipeline, with a filter, to read telemetry store fails without feature flag turned on.
assert.commandFailedWithCode(testDB.adminCommand({
aggregate: 1,
- pipeline: [{$telemetry: {}}, {$match: {"key.queryShape.find": {$eq: "###"}}}],
+ pipeline: [{$queryStats: {}}, {$match: {"key.queryShape.find": {$eq: "###"}}}],
cursor: {}
}),
ErrorCodes.QueryFeatureNotAllowed);
diff --git a/jstests/noPassthrough/telemetry/query_stats_key.js b/jstests/noPassthrough/queryStats/query_stats_key.js
index 68d77110bc6..8b63417078a 100644
--- a/jstests/noPassthrough/telemetry/query_stats_key.js
+++ b/jstests/noPassthrough/queryStats/query_stats_key.js
@@ -64,7 +64,7 @@ function confirmAllFieldsPresent(queryStatsEntries) {
// Turn on the collecting of telemetry metrics.
let options = {
- setParameter: {internalQueryConfigureTelemetrySamplingRate: -1},
+ setParameter: {internalQueryStatsSamplingRate: -1},
};
const conn = MongoRunner.runMongod(options);
diff --git a/jstests/noPassthrough/telemetry/telemetry_metrics_across_getMore_calls.js b/jstests/noPassthrough/queryStats/query_stats_metrics_across_getMore_calls.js
index 91605c5e069..d5caea74cf7 100644
--- a/jstests/noPassthrough/telemetry/telemetry_metrics_across_getMore_calls.js
+++ b/jstests/noPassthrough/queryStats/query_stats_metrics_across_getMore_calls.js
@@ -1,7 +1,7 @@
/**
* Test that the telemetry metrics are aggregated properly by distinct query shape over getMore
* calls.
- * @tags: [featureFlagTelemetry]
+ * @tags: [featureFlagQueryStats]
*/
load("jstests/libs/telemetry_utils.js"); // For verifyMetrics.
@@ -10,7 +10,7 @@ load("jstests/libs/telemetry_utils.js"); // For verifyMetrics.
// Turn on the collecting of telemetry metrics.
let options = {
- setParameter: {internalQueryConfigureTelemetrySamplingRate: -1},
+ setParameter: {internalQueryStatsSamplingRate: -1},
};
const conn = MongoRunner.runMongod(options);
@@ -35,7 +35,7 @@ assert.commandWorked(bulk.execute());
coll.aggregate([{$match: {foo: 0}}], {cursor: {batchSize: 2}}).toArray();
// This command will return all telemetry store entires.
- const telemetryResults = testDB.getSiblingDB("admin").aggregate([{$telemetry: {}}]).toArray();
+ const telemetryResults = testDB.getSiblingDB("admin").aggregate([{$queryStats: {}}]).toArray();
// Assert there is only one entry.
assert.eq(telemetryResults.length, 1, telemetryResults);
const telemetryEntry = telemetryResults[0];
@@ -71,7 +71,7 @@ const fooNeBatchSize = 3;
// This filters telemetry entires to just the ones entered when running above find queries.
const telemetryResults = testDB.getSiblingDB("admin")
.aggregate([
- {$telemetry: {}},
+ {$queryStats: {}},
{$match: {"key.queryShape.filter.foo": {$exists: true}}},
{$sort: {key: 1}},
])
@@ -102,7 +102,7 @@ const fooNeBatchSize = 3;
// This filters telemetry entires to just the ones entered when running above find queries.
let telemetryResults =
testDB.getSiblingDB("admin")
- .aggregate([{$telemetry: {}}, {$match: {"key.queryShape.command": "find"}}])
+ .aggregate([{$queryStats: {}}, {$match: {"key.queryShape.command": "find"}}])
.toArray();
assert.eq(telemetryResults.length, 4, telemetryResults);
@@ -110,7 +110,7 @@ const fooNeBatchSize = 3;
// This filters to just the telemetry for query coll.find().sort({"foo": 1}).batchSize(2).
telemetryResults = testDB.getSiblingDB("admin")
- .aggregate([{$telemetry: {}}, {$match: {"key.queryShape.sort.foo": 1}}])
+ .aggregate([{$queryStats: {}}, {$match: {"key.queryShape.sort.foo": 1}}])
.toArray();
assert.eq(telemetryResults.length, 1, telemetryResults);
assert.eq(telemetryResults[0].key.queryShape.cmdNs.db, "test");
@@ -123,7 +123,7 @@ const fooNeBatchSize = 3;
// 1}}).limit(query2Limit).batchSize(2).
telemetryResults =
testDB.getSiblingDB("admin")
- .aggregate([{$telemetry: {}}, {$match: {"key.queryShape.limit": '?number'}}])
+ .aggregate([{$queryStats: {}}, {$match: {"key.queryShape.limit": '?number'}}])
.toArray();
assert.eq(telemetryResults.length, 1, telemetryResults);
assert.eq(telemetryResults[0].key.queryShape.cmdNs.db, "test");
@@ -135,7 +135,7 @@ const fooNeBatchSize = 3;
// This filters to just the telemetry for query coll.find({foo: {$eq: 0}}).batchSize(2).
telemetryResults = testDB.getSiblingDB("admin")
.aggregate([
- {$telemetry: {}},
+ {$queryStats: {}},
{
$match: {
"key.queryShape.filter.foo": {$eq: {$eq: "?number"}},
diff --git a/jstests/noPassthrough/telemetry/telemetry_redact_find_cmd.js b/jstests/noPassthrough/queryStats/query_stats_redact_find_cmd.js
index 54b909adae9..b2cce48cdb7 100644
--- a/jstests/noPassthrough/telemetry/telemetry_redact_find_cmd.js
+++ b/jstests/noPassthrough/queryStats/query_stats_redact_find_cmd.js
@@ -1,5 +1,5 @@
/**
- * Test that $telemetry properly applies hmac to find commands, on mongod and mongos.
+ * Test that $queryStats properly applies hmac to find commands, on mongod and mongos.
*/
load("jstests/libs/telemetry_utils.js");
(function() {
@@ -44,8 +44,8 @@ function runTest(conn) {
const conn = MongoRunner.runMongod({
setParameter: {
- internalQueryConfigureTelemetrySamplingRate: -1,
- featureFlagTelemetry: true,
+ internalQueryStatsSamplingRate: -1,
+ featureFlagQueryStats: true,
}
});
runTest(conn);
@@ -58,8 +58,8 @@ const st = new ShardingTest({
rs: {nodes: 1},
mongosOptions: {
setParameter: {
- internalQueryConfigureTelemetrySamplingRate: -1,
- featureFlagTelemetry: true,
+ internalQueryStatsSamplingRate: -1,
+ featureFlagQueryStats: true,
'failpoint.skipClusterParameterRefresh': "{'mode':'alwaysOn'}"
}
},
diff --git a/jstests/noPassthrough/telemetry/telemetry_sampling_rate.js b/jstests/noPassthrough/queryStats/query_stats_sampling_rate.js
index 1bada398a03..009c59737fa 100644
--- a/jstests/noPassthrough/telemetry/telemetry_sampling_rate.js
+++ b/jstests/noPassthrough/queryStats/query_stats_sampling_rate.js
@@ -1,7 +1,7 @@
/**
* Test that calls to read from telemetry store fail when sampling rate is not greater than 0 even
* if feature flag is on.
- * @tags: [featureFlagTelemetry]
+ * @tags: [featureFlagQueryStats]
*/
load('jstests/libs/analyze_plan.js');
@@ -9,7 +9,7 @@ load('jstests/libs/analyze_plan.js');
"use strict";
let options = {
- setParameter: {internalQueryConfigureTelemetrySamplingRate: 0},
+ setParameter: {internalQueryStatsSamplingRate: 0},
};
const conn = MongoRunner.runMongod(options);
@@ -23,15 +23,15 @@ for (var i = 0; i < 20; i++) {
coll.aggregate([{$match: {foo: 1}}], {cursor: {batchSize: 2}});
// Reading telemetry store with a sampling rate of 0 should return 0 documents.
-let telStore = testdb.adminCommand({aggregate: 1, pipeline: [{$telemetry: {}}], cursor: {}});
+let telStore = testdb.adminCommand({aggregate: 1, pipeline: [{$queryStats: {}}], cursor: {}});
assert.eq(telStore.cursor.firstBatch.length, 0);
// Reading telemetry store should work now with a sampling rate of greater than 0.
-assert.commandWorked(testdb.adminCommand(
- {setParameter: 1, internalQueryConfigureTelemetrySamplingRate: 2147483647}));
+assert.commandWorked(
+ testdb.adminCommand({setParameter: 1, internalQueryStatsSamplingRate: 2147483647}));
coll.aggregate([{$match: {foo: 1}}], {cursor: {batchSize: 2}});
telStore = assert.commandWorked(
- testdb.adminCommand({aggregate: 1, pipeline: [{$telemetry: {}}], cursor: {}}));
+ testdb.adminCommand({aggregate: 1, pipeline: [{$queryStats: {}}], cursor: {}}));
assert.eq(telStore.cursor.firstBatch.length, 1);
MongoRunner.stopMongod(conn);
diff --git a/jstests/noPassthrough/telemetry/telemetry_server_status_metrics.js b/jstests/noPassthrough/queryStats/query_stats_server_status_metrics.js
index 2235d272a9f..b142d901a7f 100644
--- a/jstests/noPassthrough/telemetry/telemetry_server_status_metrics.js
+++ b/jstests/noPassthrough/queryStats/query_stats_server_status_metrics.js
@@ -1,6 +1,6 @@
/**
* Test the telemetry related serverStatus metrics.
- * @tags: [featureFlagTelemetry]
+ * @tags: [featureFlagQueryStats]
*/
load('jstests/libs/analyze_plan.js');
@@ -23,7 +23,7 @@ function runTestWithMongodOptions(mongodOptions, test, testOptions) {
* testOptions must include `resetCacheSize` bool field; e.g., { resetCacheSize : true }
*/
function evictionTest(conn, testDB, coll, testOptions) {
- const evictedBefore = testDB.serverStatus().metrics.telemetry.numEvicted;
+ const evictedBefore = testDB.serverStatus().metrics.queryStats.numEvicted;
assert.eq(evictedBefore, 0);
for (var i = 0; i < 4000; i++) {
let query = {};
@@ -31,16 +31,16 @@ function evictionTest(conn, testDB, coll, testOptions) {
coll.aggregate([{$match: query}]).itcount();
}
if (!testOptions.resetCacheSize) {
- const evictedAfter = testDB.serverStatus().metrics.telemetry.numEvicted;
+ const evictedAfter = testDB.serverStatus().metrics.queryStats.numEvicted;
assert.gt(evictedAfter, 0);
return;
}
// Make sure number of evicted entries increases when the cache size is reset, which forces out
// least recently used entries to meet the new, smaller size requirement.
- assert.eq(testDB.serverStatus().metrics.telemetry.numEvicted, 0);
+ assert.eq(testDB.serverStatus().metrics.queryStats.numEvicted, 0);
assert.commandWorked(
- testDB.adminCommand({setParameter: 1, internalQueryConfigureTelemetryCacheSize: "1MB"}));
- const evictedAfter = testDB.serverStatus().metrics.telemetry.numEvicted;
+ testDB.adminCommand({setParameter: 1, internalQueryConfigureQueryStatsCacheSize: "1MB"}));
+ const evictedAfter = testDB.serverStatus().metrics.queryStats.numEvicted;
assert.gt(evictedAfter, 0);
}
@@ -53,7 +53,7 @@ function evictionTest(conn, testDB, coll, testOptions) {
*/
function countRateLimitedRequestsTest(conn, testDB, coll, testOptions) {
const numRateLimitedRequestsBefore =
- testDB.serverStatus().metrics.telemetry.numRateLimitedRequests;
+ testDB.serverStatus().metrics.queryStats.numRateLimitedRequests;
assert.eq(numRateLimitedRequestsBefore, 0);
coll.insert({a: 0});
@@ -65,7 +65,7 @@ function countRateLimitedRequestsTest(conn, testDB, coll, testOptions) {
}
const numRateLimitedRequestsAfter =
- testDB.serverStatus().metrics.telemetry.numRateLimitedRequests;
+ testDB.serverStatus().metrics.queryStats.numRateLimitedRequests;
if (testOptions.samplingRate === 0) {
// Telemetry should not be collected for any requests.
@@ -81,7 +81,7 @@ function countRateLimitedRequestsTest(conn, testDB, coll, testOptions) {
}
function telemetryStoreSizeEstimateTest(conn, testDB, coll, testOptions) {
- assert.eq(testDB.serverStatus().metrics.telemetry.telemetryStoreSizeEstimateBytes, 0);
+ assert.eq(testDB.serverStatus().metrics.queryStats.queryStatsStoreSizeEstimateBytes, 0);
let halfWayPointSize;
// Only using three digit numbers (eg 100, 101) means the string length will be the same for all
// entries and therefore the key size will be the same for all entries, which makes predicting
@@ -90,12 +90,12 @@ function telemetryStoreSizeEstimateTest(conn, testDB, coll, testOptions) {
coll.aggregate([{$match: {["foo" + i]: "bar"}}]).itcount();
if (i == 150) {
halfWayPointSize =
- testDB.serverStatus().metrics.telemetry.telemetryStoreSizeEstimateBytes;
+ testDB.serverStatus().metrics.queryStats.queryStatsStoreSizeEstimateBytes;
}
}
// Confirm that telemetry store has grown and size is non-zero.
assert.gt(halfWayPointSize, 0);
- const fullSize = testDB.serverStatus().metrics.telemetry.telemetryStoreSizeEstimateBytes;
+ const fullSize = testDB.serverStatus().metrics.queryStats.queryStatsStoreSizeEstimateBytes;
assert.gt(fullSize, 0);
// Make sure the final telemetry store size is twice as much as the halfway point size (+/- 5%)
assert(fullSize >= halfWayPointSize * 1.95 && fullSize <= halfWayPointSize * 2.05,
@@ -109,7 +109,7 @@ function telemetryStoreWriteErrorsTest(conn, testDB, coll, testOptions) {
return;
}
- const errorsBefore = testDB.serverStatus().metrics.telemetry.numTelemetryStoreWriteErrors;
+ const errorsBefore = testDB.serverStatus().metrics.queryStats.numQueryStatsStoreWriteErrors;
assert.eq(errorsBefore, 0);
for (let i = 0; i < 5; i++) {
// Command should succeed and record the error.
@@ -121,7 +121,7 @@ function telemetryStoreWriteErrorsTest(conn, testDB, coll, testOptions) {
// Make sure that we recorded a write error for each run.
// TODO SERVER-73152 we attempt to write to the telemetry store twice for each aggregate, which
// seems wrong.
- assert.eq(testDB.serverStatus().metrics.telemetry.numTelemetryStoreWriteErrors, 10);
+ assert.eq(testDB.serverStatus().metrics.queryStats.numQueryStatsStoreWriteErrors, 10);
}
/**
@@ -129,10 +129,8 @@ function telemetryStoreWriteErrorsTest(conn, testDB, coll, testOptions) {
* eviction.
*/
runTestWithMongodOptions({
- setParameter: {
- internalQueryConfigureTelemetryCacheSize: "1MB",
- internalQueryConfigureTelemetrySamplingRate: -1
- },
+ setParameter:
+ {internalQueryConfigureQueryStatsCacheSize: "1MB", internalQueryStatsSamplingRate: -1},
},
evictionTest,
{resetCacheSize: false});
@@ -140,10 +138,8 @@ runTestWithMongodOptions({
* In this configuration, eviction is triggered only when the telemetry store size is reset.
* */
runTestWithMongodOptions({
- setParameter: {
- internalQueryConfigureTelemetryCacheSize: "4MB",
- internalQueryConfigureTelemetrySamplingRate: -1
- },
+ setParameter:
+ {internalQueryConfigureQueryStatsCacheSize: "4MB", internalQueryStatsSamplingRate: -1},
},
evictionTest,
{resetCacheSize: true});
@@ -152,7 +148,7 @@ runTestWithMongodOptions({
* In this configuration, every query is sampled, so no requests should be rate-limited.
*/
runTestWithMongodOptions({
- setParameter: {internalQueryConfigureTelemetrySamplingRate: -1},
+ setParameter: {internalQueryStatsSamplingRate: -1},
},
countRateLimitedRequestsTest,
{samplingRate: 2147483647, numRequests: 20});
@@ -162,7 +158,7 @@ runTestWithMongodOptions({
* rate-limited.
*/
runTestWithMongodOptions({
- setParameter: {internalQueryConfigureTelemetrySamplingRate: 10},
+ setParameter: {internalQueryStatsSamplingRate: 10},
},
countRateLimitedRequestsTest,
{samplingRate: 10, numRequests: 20});
@@ -172,7 +168,7 @@ runTestWithMongodOptions({
* size
*/
runTestWithMongodOptions({
- setParameter: {internalQueryConfigureTelemetrySamplingRate: -1},
+ setParameter: {internalQueryStatsSamplingRate: -1},
},
telemetryStoreSizeEstimateTest);
@@ -182,8 +178,8 @@ runTestWithMongodOptions({
*/
runTestWithMongodOptions({
setParameter: {
- internalQueryConfigureTelemetryCacheSize: "0.00001MB",
- internalQueryConfigureTelemetrySamplingRate: -1
+ internalQueryConfigureQueryStatsCacheSize: "0.00001MB",
+ internalQueryStatsSamplingRate: -1
},
},
telemetryStoreWriteErrorsTest);
diff --git a/jstests/noPassthrough/telemetry/telemetry_upgrade.js b/jstests/noPassthrough/queryStats/query_stats_upgrade.js
index f396d23b948..919d9f87baf 100644
--- a/jstests/noPassthrough/telemetry/telemetry_upgrade.js
+++ b/jstests/noPassthrough/queryStats/query_stats_upgrade.js
@@ -1,6 +1,6 @@
/**
* Test that telemetry doesn't work on a lower FCV version but works after an FCV upgrade.
- * @tags: [featureFlagTelemetry]
+ * @tags: [featureFlagQueryStats]
*/
load('jstests/libs/analyze_plan.js');
load("jstests/libs/feature_flag_util.js");
@@ -12,7 +12,7 @@ const dbpath = MongoRunner.dataPath + jsTestName();
let conn = MongoRunner.runMongod({dbpath: dbpath});
let testDB = conn.getDB(jsTestName());
// This test should only be run with the flag enabled.
-assert(FeatureFlagUtil.isEnabled(testDB, "Telemetry"));
+assert(FeatureFlagUtil.isEnabled(testDB, "QueryStats"));
function testLower(restart = false) {
let adminDB = conn.getDB("admin");
@@ -26,7 +26,7 @@ function testLower(restart = false) {
}
assert.commandFailedWithCode(
- testDB.adminCommand({aggregate: 1, pipeline: [{$telemetry: {}}], cursor: {}}), 6579000);
+ testDB.adminCommand({aggregate: 1, pipeline: [{$queryStats: {}}], cursor: {}}), 6579000);
// Upgrade FCV.
assert.commandWorked(adminDB.runCommand(
@@ -34,7 +34,7 @@ function testLower(restart = false) {
// We should be able to run a telemetry pipeline now that the FCV is correct.
assert.commandWorked(
- testDB.adminCommand({aggregate: 1, pipeline: [{$telemetry: {}}], cursor: {}}),
+ testDB.adminCommand({aggregate: 1, pipeline: [{$queryStats: {}}], cursor: {}}),
);
}
testLower(true);
diff --git a/jstests/noPassthrough/telemetry/redact_queries_with_nonobject_fields.js b/jstests/noPassthrough/queryStats/redact_queries_with_nonobject_fields.js
index 25cac47555e..7528ab9a4ab 100644
--- a/jstests/noPassthrough/telemetry/redact_queries_with_nonobject_fields.js
+++ b/jstests/noPassthrough/queryStats/redact_queries_with_nonobject_fields.js
@@ -1,6 +1,6 @@
/**
* Test that telemetry key generation works for queries with non-object fields.
- * @tags: [featureFlagTelemetry]
+ * @tags: [featureFlagQueryStats]
*/
load('jstests/libs/analyze_plan.js');
@@ -9,7 +9,7 @@ load('jstests/libs/analyze_plan.js');
// Turn on the collecting of telemetry metrics.
let options = {
- setParameter: {internalQueryConfigureTelemetrySamplingRate: -1},
+ setParameter: {internalQueryStatsSamplingRate: -1},
};
const conn = MongoRunner.runMongod(options);
diff --git a/jstests/noPassthroughWithMongod/telemetry_configuration.js b/jstests/noPassthroughWithMongod/telemetry_configuration.js
index 0ae4e8408c3..370733a4480 100644
--- a/jstests/noPassthroughWithMongod/telemetry_configuration.js
+++ b/jstests/noPassthroughWithMongod/telemetry_configuration.js
@@ -22,14 +22,13 @@ if (FeatureFlagUtil.isEnabled(db, "Telemetry")) {
}
}
testTelemetrySetting("internalQueryConfigureTelemetryCacheSize", "2MB");
- testTelemetrySetting("internalQueryConfigureTelemetrySamplingRate", 2147483647);
+ testTelemetrySetting("internalQueryStatsSamplingRate", 2147483647);
} else {
// The feature flag is disabled - make sure the telemetry store *cannot* be configured.
assert.commandFailedWithCode(
db.adminCommand({setParameter: 1, internalQueryConfigureTelemetryCacheSize: '2MB'}),
7373500);
assert.commandFailedWithCode(
- db.adminCommand({setParameter: 1, internalQueryConfigureTelemetrySamplingRate: 2147483647}),
- 7506200);
+ db.adminCommand({setParameter: 1, internalQueryStatsSamplingRate: 2147483647}), 7506200);
}
}());
diff --git a/src/mongo/db/auth/action_type.idl b/src/mongo/db/auth/action_type.idl
index 137ac3c9542..6837625e6f1 100644
--- a/src/mongo/db/auth/action_type.idl
+++ b/src/mongo/db/auth/action_type.idl
@@ -149,7 +149,7 @@ enums:
planCacheIndexFilter : "planCacheIndexFilter" # view/update index filters
planCacheRead : "planCacheRead" # view contents of plan cache
planCacheWrite : "planCacheWrite" # clear cache, drop cache entry, pin/unpin/shun plans
- telemetryRead: "telemetryRead" # view contents of telemetry store
+ queryStatsRead: "queryStatsRead" # view contents of queryStats store
refineCollectionShardKey : "refineCollectionShardKey"
reIndex : "reIndex"
remove : "remove"
diff --git a/src/mongo/db/auth/builtin_roles.yml b/src/mongo/db/auth/builtin_roles.yml
index a29a476a91c..e384a959f1d 100644
--- a/src/mongo/db/auth/builtin_roles.yml
+++ b/src/mongo/db/auth/builtin_roles.yml
@@ -353,7 +353,7 @@ roles:
- getClusterParameter
- setChangeStreamState
- getChangeStreamState
- - telemetryRead
+ - queryStatsRead
- checkMetadataConsistency
- transitionFromDedicatedConfigServer
- transitionToDedicatedConfigServer
diff --git a/src/mongo/db/clientcursor.cpp b/src/mongo/db/clientcursor.cpp
index 55e116e5893..3b4f0143876 100644
--- a/src/mongo/db/clientcursor.cpp
+++ b/src/mongo/db/clientcursor.cpp
@@ -48,7 +48,7 @@
#include "mongo/db/cursor_server_params.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/query/explain.h"
-#include "mongo/db/query/telemetry.h"
+#include "mongo/db/query/query_stats.h"
#include "mongo/db/repl/repl_client_info.h"
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/util/background.h"
@@ -124,10 +124,10 @@ ClientCursor::ClientCursor(ClientCursorParams params,
_planSummary(_exec->getPlanExplainer().getPlanSummary()),
_planCacheKey(CurOp::get(operationUsingCursor)->debug().planCacheKey),
_queryHash(CurOp::get(operationUsingCursor)->debug().queryHash),
- _telemetryStoreKeyHash(CurOp::get(operationUsingCursor)->debug().telemetryStoreKeyHash),
- _telemetryStoreKey(CurOp::get(operationUsingCursor)->debug().telemetryStoreKey),
- _telemetryRequestShapifier(
- std::move(CurOp::get(operationUsingCursor)->debug().telemetryRequestShapifier)),
+ _queryStatsStoreKeyHash(CurOp::get(operationUsingCursor)->debug().queryStatsStoreKeyHash),
+ _queryStatsStoreKey(CurOp::get(operationUsingCursor)->debug().queryStatsStoreKey),
+ _queryStatsRequestShapifier(
+ std::move(CurOp::get(operationUsingCursor)->debug().queryStatsRequestShapifier)),
_shouldOmitDiagnosticInformation(
CurOp::get(operationUsingCursor)->debug().shouldOmitDiagnosticInformation),
_opKey(operationUsingCursor->getOperationKey()) {
@@ -161,13 +161,13 @@ void ClientCursor::dispose(OperationContext* opCtx, boost::optional<Date_t> now)
return;
}
- if (_telemetryStoreKeyHash && opCtx) {
- telemetry::writeTelemetry(opCtx,
- _telemetryStoreKeyHash,
- _telemetryStoreKey,
- std::move(_telemetryRequestShapifier),
- _metrics.executionTime.value_or(Microseconds{0}).count(),
- _metrics.nreturned.value_or(0));
+ if (_queryStatsStoreKeyHash && opCtx) {
+ query_stats::writeQueryStats(opCtx,
+ _queryStatsStoreKeyHash,
+ _queryStatsStoreKey,
+ std::move(_queryStatsRequestShapifier),
+ _metrics.executionTime.value_or(Microseconds{0}).count(),
+ _metrics.nreturned.value_or(0));
}
if (now) {
@@ -397,19 +397,19 @@ void startClientCursorMonitor() {
getClientCursorMonitor(getGlobalServiceContext()).go();
}
-void collectTelemetryMongod(OperationContext* opCtx, ClientCursorPin& pinnedCursor) {
+void collectQueryStatsMongod(OperationContext* opCtx, ClientCursorPin& pinnedCursor) {
pinnedCursor->incrementCursorMetrics(CurOp::get(opCtx)->debug().additiveMetrics);
}
-void collectTelemetryMongod(OperationContext* opCtx,
- std::unique_ptr<telemetry::RequestShapifier> requestShapifier) {
+void collectQueryStatsMongod(OperationContext* opCtx,
+ std::unique_ptr<query_stats::RequestShapifier> requestShapifier) {
// If we haven't registered a cursor to prepare for getMore requests, we record
// telemetry directly.
auto& opDebug = CurOp::get(opCtx)->debug();
- telemetry::writeTelemetry(
+ query_stats::writeQueryStats(
opCtx,
- opDebug.telemetryStoreKeyHash,
- opDebug.telemetryStoreKey,
+ opDebug.queryStatsStoreKeyHash,
+ opDebug.queryStatsStoreKey,
std::move(requestShapifier),
opDebug.additiveMetrics.executionTime.value_or(Microseconds{0}).count(),
opDebug.additiveMetrics.nreturned.value_or(0));
diff --git a/src/mongo/db/clientcursor.h b/src/mongo/db/clientcursor.h
index 9e7d35ade9a..8ae75473496 100644
--- a/src/mongo/db/clientcursor.h
+++ b/src/mongo/db/clientcursor.h
@@ -448,15 +448,15 @@ private:
boost::optional<uint32_t> _queryHash;
// If boost::none, telemetry should not be collected for this cursor.
- boost::optional<std::size_t> _telemetryStoreKeyHash;
+ boost::optional<std::size_t> _queryStatsStoreKeyHash;
// TODO: SERVER-73152 remove telemetryStoreKey when RequestShapifier is used for agg.
- boost::optional<BSONObj> _telemetryStoreKey;
+ boost::optional<BSONObj> _queryStatsStoreKey;
// Metrics that are accumulated over the lifetime of the cursor, incremented with each getMore.
- // Useful for diagnostics like telemetry.
+ // Useful for diagnostics like queryStats.
OpDebug::AdditiveMetrics _metrics;
// The RequestShapifier used by telemetry to shapify the request payload into the telemetry
// store key.
- std::unique_ptr<telemetry::RequestShapifier> _telemetryRequestShapifier;
+ std::unique_ptr<query_stats::RequestShapifier> _queryStatsRequestShapifier;
// Flag to decide if diagnostic information should be omitted.
bool _shouldOmitDiagnosticInformation{false};
@@ -598,7 +598,7 @@ void startClientCursorMonitor();
* Currently, telemetry is only collected for find and aggregate requests (and their subsequent
* getMore requests), so these should only be called from those request paths.
*/
-void collectTelemetryMongod(OperationContext* opCtx, ClientCursorPin& cursor);
-void collectTelemetryMongod(OperationContext* opCtx,
- std::unique_ptr<telemetry::RequestShapifier> requestShapifier);
+void collectQueryStatsMongod(OperationContext* opCtx, ClientCursorPin& cursor);
+void collectQueryStatsMongod(OperationContext* opCtx,
+ std::unique_ptr<query_stats::RequestShapifier> requestShapifier);
} // namespace mongo
diff --git a/src/mongo/db/commands/find_cmd.cpp b/src/mongo/db/commands/find_cmd.cpp
index 90e6fa15ca1..983661fbd15 100644
--- a/src/mongo/db/commands/find_cmd.cpp
+++ b/src/mongo/db/commands/find_cmd.cpp
@@ -55,7 +55,7 @@
#include "mongo/db/query/find_request_shapifier.h"
#include "mongo/db/query/get_executor.h"
#include "mongo/db/query/query_knobs_gen.h"
-#include "mongo/db/query/telemetry.h"
+#include "mongo/db/query/query_stats.h"
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/s/query_analysis_writer.h"
#include "mongo/db/service_context.h"
@@ -561,13 +561,14 @@ public:
cq->setUseCqfIfEligible(true);
if (collection) {
- // Collect telemetry. Exclude queries against collections with encrypted fields.
+ // Collect queryStats. Exclude queries against collections with encrypted fields.
if (!collection.get()->getCollectionOptions().encryptedFieldConfig) {
- telemetry::registerRequest(std::make_unique<telemetry::FindRequestShapifier>(
- cq->getFindCommandRequest(), opCtx),
- collection.get()->ns(),
- opCtx,
- cq->getExpCtx());
+ query_stats::registerRequest(
+ std::make_unique<query_stats::FindRequestShapifier>(
+ cq->getFindCommandRequest(), opCtx),
+ collection.get()->ns(),
+ opCtx,
+ cq->getExpCtx());
}
}
@@ -780,9 +781,9 @@ public:
processFLEFindD(
opCtx, findCommand->getNamespaceOrUUID().nss().value(), findCommand.get());
}
- // Set the telemetryStoreKey to none so telemetry isn't collected when we've done a
- // FLE rewrite.
- CurOp::get(opCtx)->debug().telemetryStoreKeyHash = boost::none;
+ // Set the queryStatsStoreKey to none so queryStats isn't collected when we've done
+ // a FLE rewrite.
+ CurOp::get(opCtx)->debug().queryStatsStoreKeyHash = boost::none;
CurOp::get(opCtx)->debug().shouldOmitDiagnosticInformation = true;
}
diff --git a/src/mongo/db/commands/getmore_cmd.cpp b/src/mongo/db/commands/getmore_cmd.cpp
index 8f91862d002..54035c99829 100644
--- a/src/mongo/db/commands/getmore_cmd.cpp
+++ b/src/mongo/db/commands/getmore_cmd.cpp
@@ -703,7 +703,7 @@ public:
metricsCollector.incrementDocUnitsReturned(curOp->getNS(), docUnitsReturned);
curOp->debug().additiveMetrics.nBatches = 1;
curOp->setEndOfOpMetrics(numResults);
- collectTelemetryMongod(opCtx, cursorPin);
+ collectQueryStatsMongod(opCtx, cursorPin);
if (respondWithId) {
cursorDeleter.dismiss();
diff --git a/src/mongo/db/commands/run_aggregate.cpp b/src/mongo/db/commands/run_aggregate.cpp
index a290ef56713..0bc7d7f6415 100644
--- a/src/mongo/db/commands/run_aggregate.cpp
+++ b/src/mongo/db/commands/run_aggregate.cpp
@@ -76,7 +76,7 @@
#include "mongo/db/query/query_feature_flags_gen.h"
#include "mongo/db/query/query_knobs_gen.h"
#include "mongo/db/query/query_planner_common.h"
-#include "mongo/db/query/telemetry.h"
+#include "mongo/db/query/query_stats.h"
#include "mongo/db/read_concern.h"
#include "mongo/db/repl/oplog.h"
#include "mongo/db/repl/read_concern_args.h"
@@ -836,11 +836,11 @@ Status runAggregate(OperationContext* opCtx,
};
auto registerTelemetry = [&]() -> void {
- // Register telemetry. Exclude queries against collections with encrypted fields.
- // We still collect telemetry on collection-less aggregations.
+ // Register queryStats. Exclude queries against collections with encrypted fields.
+ // We still collect queryStats on collection-less aggregations.
if (!(ctx && ctx->getCollection() &&
ctx->getCollection()->getCollectionOptions().encryptedFieldConfig)) {
- telemetry::registerAggRequest(request, opCtx);
+ query_stats::registerAggRequest(request, opCtx);
}
};
@@ -1051,9 +1051,9 @@ Status runAggregate(OperationContext* opCtx,
request.getEncryptionInformation()->setCrudProcessed(true);
}
- // Set the telemetryStoreKey to none so telemetry isn't collected when we've done a FLE
- // rewrite.
- CurOp::get(opCtx)->debug().telemetryStoreKeyHash = boost::none;
+ // Set the queryStatsStoreKey to none so queryStats isn't collected when we've done a
+ // FLE rewrite.
+ CurOp::get(opCtx)->debug().queryStatsStoreKeyHash = boost::none;
}
pipeline->optimizePipeline();
@@ -1223,9 +1223,9 @@ Status runAggregate(OperationContext* opCtx,
curOp->setEndOfOpMetrics(stats.nReturned);
if (keepCursor) {
- collectTelemetryMongod(opCtx, pins[0]);
+ collectQueryStatsMongod(opCtx, pins[0]);
} else {
- collectTelemetryMongod(opCtx, std::move(curOp->debug().telemetryRequestShapifier));
+ collectQueryStatsMongod(opCtx, std::move(curOp->debug().queryStatsRequestShapifier));
}
// For an optimized away pipeline, signal the cache that a query operation has completed.
diff --git a/src/mongo/db/curop.h b/src/mongo/db/curop.h
index 8851993b015..9f5c32b10d1 100644
--- a/src/mongo/db/curop.h
+++ b/src/mongo/db/curop.h
@@ -294,12 +294,12 @@ public:
boost::optional<uint32_t> queryHash;
// The shape of the original query serialized with readConcern, application name, and namespace.
// If boost::none, telemetry should not be collected for this operation.
- boost::optional<std::size_t> telemetryStoreKeyHash;
+ boost::optional<std::size_t> queryStatsStoreKeyHash;
// TODO: SERVER-73152 remove telemetryStoreKey when RequestShapifier is used for agg.
- boost::optional<BSONObj> telemetryStoreKey;
+ boost::optional<BSONObj> queryStatsStoreKey;
// The RequestShapifier used by telemetry to shapify the request payload into the telemetry
// store key.
- std::unique_ptr<telemetry::RequestShapifier> telemetryRequestShapifier;
+ std::unique_ptr<query_stats::RequestShapifier> queryStatsRequestShapifier;
// The query framework that this operation used. Will be unknown for non query operations.
PlanExecutor::QueryFramework queryFramework{PlanExecutor::QueryFramework::kUnknown};
@@ -776,7 +776,7 @@ public:
return computeElapsedTimeTotal(start, _end.load()) - _totalPausedDuration;
}
/**
- * The planningTimeMicros metric, reported in the system profiler and in telemetry, is measured
+ * The planningTimeMicros metric, reported in the system profiler and in queryStats, is measured
* using the Curop instance's _tickSource. Currently, _tickSource is only paused in places where
logical work is being done. If this were to change, and _tickSource
were to be paused during query planning for reasons unrelated to the work of
diff --git a/src/mongo/db/cursor_manager.cpp b/src/mongo/db/cursor_manager.cpp
index ac9c41accfd..34f7d7bdce0 100644
--- a/src/mongo/db/cursor_manager.cpp
+++ b/src/mongo/db/cursor_manager.cpp
@@ -214,10 +214,10 @@ StatusWith<ClientCursorPin> CursorManager::pinCursor(
CurOp::get(opCtx)->debug().queryHash = cursor->_queryHash;
CurOp::get(opCtx)->debug().planCacheKey = cursor->_planCacheKey;
- // Pass along telemetry context so it is retrievable after query execution for storing metrics.
- CurOp::get(opCtx)->debug().telemetryStoreKeyHash = cursor->_telemetryStoreKeyHash;
- // TODO: SERVER-73152 remove telemetryStoreKey when RequestShapifier is used for agg.
- CurOp::get(opCtx)->debug().telemetryStoreKey = cursor->_telemetryStoreKey;
+ // Pass along queryStats context so it is retrievable after query execution for storing metrics.
+ CurOp::get(opCtx)->debug().queryStatsStoreKeyHash = cursor->_queryStatsStoreKeyHash;
+ // TODO: SERVER-73152 remove queryStatsStoreKey when RequestShapifier is used for agg.
+ CurOp::get(opCtx)->debug().queryStatsStoreKey = cursor->_queryStatsStoreKey;
cursor->_operationUsingCursor = opCtx;
diff --git a/src/mongo/db/pipeline/SConscript b/src/mongo/db/pipeline/SConscript
index 72a1ab942b4..eacb62bb6ea 100644
--- a/src/mongo/db/pipeline/SConscript
+++ b/src/mongo/db/pipeline/SConscript
@@ -328,7 +328,7 @@ pipelineEnv.Library(
'document_source_sort_by_count.cpp',
'document_source_streaming_group.cpp',
'document_source_tee_consumer.cpp',
- 'document_source_telemetry.cpp',
+ 'document_source_query_stats.cpp',
'document_source_union_with.cpp',
'document_source_unwind.cpp',
'group_from_first_document_transformation.cpp',
@@ -634,7 +634,7 @@ env.CppUnitTest(
'document_source_skip_test.cpp',
'document_source_sort_by_count_test.cpp',
'document_source_sort_test.cpp',
- 'document_source_telemetry_test.cpp',
+ 'document_source_query_stats_test.cpp',
'document_source_union_with_test.cpp',
'document_source_internal_compute_geo_near_distance_test.cpp',
'document_source_internal_convert_bucket_index_stats_test.cpp',
diff --git a/src/mongo/db/pipeline/abt/document_source_visitor.cpp b/src/mongo/db/pipeline/abt/document_source_visitor.cpp
index 2170ab14407..9b7b27d3af0 100644
--- a/src/mongo/db/pipeline/abt/document_source_visitor.cpp
+++ b/src/mongo/db/pipeline/abt/document_source_visitor.cpp
@@ -58,6 +58,7 @@
#include "mongo/db/pipeline/document_source_operation_metrics.h"
#include "mongo/db/pipeline/document_source_out.h"
#include "mongo/db/pipeline/document_source_plan_cache_stats.h"
+#include "mongo/db/pipeline/document_source_query_stats.h"
#include "mongo/db/pipeline/document_source_queue.h"
#include "mongo/db/pipeline/document_source_redact.h"
#include "mongo/db/pipeline/document_source_sample.h"
@@ -67,7 +68,6 @@
#include "mongo/db/pipeline/document_source_skip.h"
#include "mongo/db/pipeline/document_source_sort.h"
#include "mongo/db/pipeline/document_source_tee_consumer.h"
-#include "mongo/db/pipeline/document_source_telemetry.h"
#include "mongo/db/pipeline/document_source_union_with.h"
#include "mongo/db/pipeline/document_source_unwind.h"
#include "mongo/db/pipeline/visitors/document_source_visitor_registry_mongod.h"
diff --git a/src/mongo/db/pipeline/aggregate_command.idl b/src/mongo/db/pipeline/aggregate_command.idl
index b53ea540f8e..476fc8dbb9d 100644
--- a/src/mongo/db/pipeline/aggregate_command.idl
+++ b/src/mongo/db/pipeline/aggregate_command.idl
@@ -96,10 +96,10 @@ commands:
- privilege: # $planCacheStats
resource_pattern: exact_namespace
action_type: planCacheRead
- - privilege: # $telemetry
- agg_stage: telemetry
+ - privilege: # $queryStats
+ agg_stage: queryStats
resource_pattern: cluster
- action_type: telemetryRead
+ action_type: queryStatsRead
- privilege: # $changeStream
resource_pattern: exact_namespace
action_type: changeStream
diff --git a/src/mongo/db/pipeline/aggregate_request_shapifier.cpp b/src/mongo/db/pipeline/aggregate_request_shapifier.cpp
index 40ed6c2ce79..485b97e2c22 100644
--- a/src/mongo/db/pipeline/aggregate_request_shapifier.cpp
+++ b/src/mongo/db/pipeline/aggregate_request_shapifier.cpp
@@ -31,20 +31,20 @@
#include "mongo/db/query/query_shape.h"
-namespace mongo::telemetry {
+namespace mongo::query_stats {
-BSONObj AggregateRequestShapifier::makeTelemetryKey(const SerializationOptions& opts,
- OperationContext* opCtx) const {
+BSONObj AggregateRequestShapifier::makeQueryStatsKey(const SerializationOptions& opts,
+ OperationContext* opCtx) const {
// TODO SERVER-76087 We will likely want to set a flag here to stop $search from calling out
// to mongot.
auto expCtx = make_intrusive<ExpressionContext>(opCtx, nullptr, _request.getNamespace());
expCtx->variables.setDefaultRuntimeConstants(opCtx);
expCtx->maxFeatureCompatibilityVersion = boost::none; // Ensure all features are allowed.
expCtx->stopExpressionCounters();
- return makeTelemetryKey(opts, expCtx);
+ return makeQueryStatsKey(opts, expCtx);
}
-BSONObj AggregateRequestShapifier::makeTelemetryKey(
+BSONObj AggregateRequestShapifier::makeQueryStatsKey(
const SerializationOptions& opts, const boost::intrusive_ptr<ExpressionContext>& expCtx) const {
BSONObjBuilder bob;
@@ -84,4 +84,4 @@ BSONObj AggregateRequestShapifier::makeTelemetryKey(
return bob.obj();
}
-} // namespace mongo::telemetry
+} // namespace mongo::query_stats
diff --git a/src/mongo/db/pipeline/aggregate_request_shapifier.h b/src/mongo/db/pipeline/aggregate_request_shapifier.h
index 3a0c41f8dd9..d78dae31be7 100644
--- a/src/mongo/db/pipeline/aggregate_request_shapifier.h
+++ b/src/mongo/db/pipeline/aggregate_request_shapifier.h
@@ -33,7 +33,7 @@
#include "mongo/db/pipeline/pipeline.h"
#include "mongo/db/query/request_shapifier.h"
-namespace mongo::telemetry {
+namespace mongo::query_stats {
/**
* Handles shapification for AggregateCommandRequests. Requires a pre-parsed pipeline in order to
@@ -50,13 +50,14 @@ public:
virtual ~AggregateRequestShapifier() = default;
- BSONObj makeTelemetryKey(const SerializationOptions& opts, OperationContext* opCtx) const final;
+ BSONObj makeQueryStatsKey(const SerializationOptions& opts,
+ OperationContext* opCtx) const final;
- BSONObj makeTelemetryKey(const SerializationOptions& opts,
- const boost::intrusive_ptr<ExpressionContext>& expCtx) const final;
+ BSONObj makeQueryStatsKey(const SerializationOptions& opts,
+ const boost::intrusive_ptr<ExpressionContext>& expCtx) const final;
private:
const AggregateCommandRequest& _request;
const Pipeline& _pipeline;
};
-} // namespace mongo::telemetry
+} // namespace mongo::query_stats
diff --git a/src/mongo/db/pipeline/document_source_telemetry.cpp b/src/mongo/db/pipeline/document_source_query_stats.cpp
index b037515796f..48f14e0ade6 100644
--- a/src/mongo/db/pipeline/document_source_telemetry.cpp
+++ b/src/mongo/db/pipeline/document_source_query_stats.cpp
@@ -27,7 +27,7 @@
* it in the license file.
*/
-#include "mongo/db/pipeline/document_source_telemetry.h"
+#include "mongo/db/pipeline/document_source_query_stats.h"
#include "mongo/bson/bsontypes.h"
#include "mongo/bson/timestamp.h"
@@ -38,14 +38,14 @@
namespace mongo {
namespace {
-CounterMetric telemetryHmacApplicationErrors("telemetry.numHmacApplicationErrors");
+CounterMetric queryStatsHmacApplicationErrors("queryStats.numHmacApplicationErrors");
}
-REGISTER_DOCUMENT_SOURCE_WITH_FEATURE_FLAG(telemetry,
- DocumentSourceTelemetry::LiteParsed::parse,
- DocumentSourceTelemetry::createFromBson,
+REGISTER_DOCUMENT_SOURCE_WITH_FEATURE_FLAG(queryStats,
+ DocumentSourceQueryStats::LiteParsed::parse,
+ DocumentSourceQueryStats::createFromBson,
AllowedWithApiStrict::kNeverInVersion1,
- feature_flags::gFeatureFlagTelemetry);
+ feature_flags::gFeatureFlagQueryStats);
namespace {
/**
@@ -55,7 +55,7 @@ boost::optional<bool> parseApplyHmacToIdentifiers(const BSONElement& el) {
if (el.fieldNameStringData() == "applyHmacToIdentifiers"_sd) {
auto type = el.type();
uassert(ErrorCodes::FailedToParse,
- str::stream() << DocumentSourceTelemetry::kStageName
+ str::stream() << DocumentSourceQueryStats::kStageName
<< " applyHmacToIdentifiers parameter must be boolean. Found type: "
<< typeName(type),
type == BSONType::Bool);
@@ -74,14 +74,14 @@ boost::optional<std::string> parseHmacKey(const BSONElement& el) {
int len;
auto data = el.binData(len);
uassert(ErrorCodes::FailedToParse,
- str::stream() << DocumentSourceTelemetry::kStageName
+ str::stream() << DocumentSourceQueryStats::kStageName
<< "hmacKey must be greater than or equal to 32 bytes",
len >= 32);
return {{data, (size_t)len}};
}
uasserted(ErrorCodes::FailedToParse,
str::stream()
- << DocumentSourceTelemetry::kStageName
+ << DocumentSourceQueryStats::kStageName
<< " hmacKey parameter must be bindata of length 32 or greater. Found type: "
<< typeName(type));
}
@@ -95,7 +95,7 @@ boost::optional<std::string> parseHmacKey(const BSONElement& el) {
template <typename Ctor>
auto parseSpec(const BSONElement& spec, const Ctor& ctor) {
uassert(ErrorCodes::FailedToParse,
- str::stream() << DocumentSourceTelemetry::kStageName
+ str::stream() << DocumentSourceQueryStats::kStageName
<< " value must be an object. Found: " << typeName(spec.type()),
spec.type() == BSONType::Object);
@@ -110,7 +110,7 @@ auto parseSpec(const BSONElement& spec, const Ctor& ctor) {
} else {
uasserted(ErrorCodes::FailedToParse,
str::stream()
- << DocumentSourceTelemetry::kStageName
+ << DocumentSourceQueryStats::kStageName
<< " parameters object may only contain 'applyHmacToIdentifiers' or "
"'hmacKey' options. Found: "
<< el.fieldName());
@@ -122,34 +122,34 @@ auto parseSpec(const BSONElement& spec, const Ctor& ctor) {
} // namespace
-std::unique_ptr<DocumentSourceTelemetry::LiteParsed> DocumentSourceTelemetry::LiteParsed::parse(
+std::unique_ptr<DocumentSourceQueryStats::LiteParsed> DocumentSourceQueryStats::LiteParsed::parse(
const NamespaceString& nss, const BSONElement& spec) {
return parseSpec(spec, [&](bool applyHmacToIdentifiers, std::string hmacKey) {
- return std::make_unique<DocumentSourceTelemetry::LiteParsed>(
+ return std::make_unique<DocumentSourceQueryStats::LiteParsed>(
spec.fieldName(), applyHmacToIdentifiers, hmacKey);
});
}
-boost::intrusive_ptr<DocumentSource> DocumentSourceTelemetry::createFromBson(
+boost::intrusive_ptr<DocumentSource> DocumentSourceQueryStats::createFromBson(
BSONElement spec, const boost::intrusive_ptr<ExpressionContext>& pExpCtx) {
const NamespaceString& nss = pExpCtx->ns;
uassert(ErrorCodes::InvalidNamespace,
- "$telemetry must be run against the 'admin' database with {aggregate: 1}",
+ "$queryStats must be run against the 'admin' database with {aggregate: 1}",
nss.db() == DatabaseName::kAdmin.db() && nss.isCollectionlessAggregateNS());
return parseSpec(spec, [&](bool applyHmacToIdentifiers, std::string hmacKey) {
- return new DocumentSourceTelemetry(pExpCtx, applyHmacToIdentifiers, hmacKey);
+ return new DocumentSourceQueryStats(pExpCtx, applyHmacToIdentifiers, hmacKey);
});
}
-Value DocumentSourceTelemetry::serialize(SerializationOptions opts) const {
+Value DocumentSourceQueryStats::serialize(SerializationOptions opts) const {
// This document source never contains any user information, so no need for any work when
// applying hmac.
return Value{Document{{kStageName, Document{}}}};
}
-DocumentSource::GetNextResult DocumentSourceTelemetry::doGetNext() {
+DocumentSource::GetNextResult DocumentSourceQueryStats::doGetNext() {
/**
* We maintain nested iterators:
* - Outer one over the set of partitions.
@@ -158,7 +158,7 @@ DocumentSource::GetNextResult DocumentSourceTelemetry::doGetNext() {
* When an inner iterator is present and contains more elements, we can return the next element.
* When the inner iterator is exhausted, we move to the next element in the outer iterator and
* create a new inner iterator. When the outer iterator is exhausted, we have finished iterating
- * over the telemetry store entries.
+ * over the queryStats store entries.
*
* The inner iterator iterates over a materialized container of all entries in the partition.
* This is done to reduce the time under which the partition lock is held.
@@ -172,17 +172,17 @@ DocumentSource::GetNextResult DocumentSourceTelemetry::doGetNext() {
return {std::move(doc)};
}
- TelemetryStore& _telemetryStore = getTelemetryStore(getContext()->opCtx);
+ QueryStatsStore& _queryStatsStore = getQueryStatsStore(getContext()->opCtx);
// Materialized partition is exhausted, move to the next.
_currentPartition++;
- if (_currentPartition >= _telemetryStore.numPartitions()) {
+ if (_currentPartition >= _queryStatsStore.numPartitions()) {
return DocumentSource::GetNextResult::makeEOF();
}
// We only keep the partition (which holds a lock) for the time needed to materialize it to
// a set of Document instances.
- auto&& partition = _telemetryStore.getPartition(_currentPartition);
+ auto&& partition = _queryStatsStore.getPartition(_currentPartition);
// Capture the time at which reading the partition begins to indicate to the caller
// when the snapshot began.
@@ -190,22 +190,22 @@ DocumentSource::GetNextResult DocumentSourceTelemetry::doGetNext() {
Timestamp{Timestamp(Date_t::now().toMillisSinceEpoch() / 1000, 0)};
for (auto&& [key, metrics] : *partition) {
try {
- auto telemetryKey =
- metrics->computeTelemetryKey(pExpCtx->opCtx, _applyHmacToIdentifiers, _hmacKey);
- _materializedPartition.push_back({{"key", std::move(telemetryKey)},
+ auto queryStatsKey = metrics->computeQueryStatsKey(
+ pExpCtx->opCtx, _applyHmacToIdentifiers, _hmacKey);
+ _materializedPartition.push_back({{"key", std::move(queryStatsKey)},
{"metrics", metrics->toBSON()},
{"asOf", partitionReadTime}});
} catch (const DBException& ex) {
- telemetryHmacApplicationErrors.increment();
+ queryStatsHmacApplicationErrors.increment();
LOGV2_DEBUG(7349403,
3,
"Error encountered when applying hmac to query shape, will not publish "
- "telemetry for this entry.",
+ "queryStats for this entry.",
"status"_attr = ex.toStatus(),
"hash"_attr = key);
if (kDebugBuild) {
tasserted(7349401,
- "Was not able to re-parse telemetry key when reading telemetry.");
+ "Was not able to re-parse queryStats key when reading queryStats.");
}
}
}
diff --git a/src/mongo/db/pipeline/document_source_telemetry.h b/src/mongo/db/pipeline/document_source_query_stats.h
index c71bff210ac..74d40583a6a 100644
--- a/src/mongo/db/pipeline/document_source_telemetry.h
+++ b/src/mongo/db/pipeline/document_source_query_stats.h
@@ -31,16 +31,16 @@
#include "mongo/db/pipeline/document_source.h"
#include "mongo/db/pipeline/lite_parsed_document_source.h"
-#include "mongo/db/query/telemetry.h"
+#include "mongo/db/query/query_stats.h"
#include "mongo/util/producer_consumer_queue.h"
namespace mongo {
-using namespace telemetry;
+using namespace query_stats;
-class DocumentSourceTelemetry final : public DocumentSource {
+class DocumentSourceQueryStats final : public DocumentSource {
public:
- static constexpr StringData kStageName = "$telemetry"_sd;
+ static constexpr StringData kStageName = "$queryStats"_sd;
class LiteParsed final : public LiteParsedDocumentSource {
public:
@@ -58,12 +58,12 @@ public:
PrivilegeVector requiredPrivileges(bool isMongos,
bool bypassDocumentValidation) const override {
- return {Privilege(ResourcePattern::forClusterResource(), ActionType::telemetryRead)};
+ return {Privilege(ResourcePattern::forClusterResource(), ActionType::queryStatsRead)};
;
}
bool allowedToPassthroughFromMongos() const final {
- // $telemetry must be run locally on a mongod.
+ // $queryStats must be run locally on a mongod.
return false;
}
@@ -83,7 +83,7 @@ public:
static boost::intrusive_ptr<DocumentSource> createFromBson(
BSONElement elem, const boost::intrusive_ptr<ExpressionContext>& pExpCtx);
- virtual ~DocumentSourceTelemetry() = default;
+ virtual ~DocumentSourceQueryStats() = default;
StageConstraints constraints(
Pipeline::SplitState = Pipeline::SplitState::kUnsplit) const override {
@@ -114,9 +114,9 @@ public:
void addVariableRefs(std::set<Variables::Id>* refs) const final {}
private:
- DocumentSourceTelemetry(const boost::intrusive_ptr<ExpressionContext>& expCtx,
- bool applyHmacToIdentifiers = false,
- std::string hmacKey = {})
+ DocumentSourceQueryStats(const boost::intrusive_ptr<ExpressionContext>& expCtx,
+ bool applyHmacToIdentifiers = false,
+ std::string hmacKey = {})
: DocumentSource(kStageName, expCtx),
_applyHmacToIdentifiers(applyHmacToIdentifiers),
_hmacKey(hmacKey) {}
@@ -130,10 +130,10 @@ private:
std::deque<Document> _materializedPartition;
/**
- * Iterator over all telemetry partitions. This is incremented when we exhaust the current
+ * Iterator over all queryStats partitions. This is incremented when we exhaust the current
* _materializedPartition.
*/
- TelemetryStore::PartitionId _currentPartition = -1;
+ QueryStatsStore::PartitionId _currentPartition = -1;
// When true, apply hmac to field names from returned query shapes.
bool _applyHmacToIdentifiers;
diff --git a/src/mongo/db/pipeline/document_source_telemetry_test.cpp b/src/mongo/db/pipeline/document_source_query_stats_test.cpp
index d08ce06b98c..7e29a44d591 100644
--- a/src/mongo/db/pipeline/document_source_telemetry_test.cpp
+++ b/src/mongo/db/pipeline/document_source_query_stats_test.cpp
@@ -32,7 +32,7 @@
#include "mongo/db/exec/document_value/document.h"
#include "mongo/db/exec/document_value/document_value_test_util.h"
#include "mongo/db/pipeline/aggregation_context_fixture.h"
-#include "mongo/db/pipeline/document_source_telemetry.h"
+#include "mongo/db/pipeline/document_source_query_stats.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/str.h"
@@ -45,50 +45,50 @@ namespace {
* {aggregate: 1} by default, so that parsing tests other than those which validate the namespace do
* not need to explicitly set it.
*/
-class DocumentSourceTelemetryTest : public AggregationContextFixture {
+class DocumentSourceQueryStatsTest : public AggregationContextFixture {
public:
- DocumentSourceTelemetryTest()
+ DocumentSourceQueryStatsTest()
: AggregationContextFixture(
NamespaceString::makeCollectionlessAggregateNSS(DatabaseName::kAdmin)) {}
};
-TEST_F(DocumentSourceTelemetryTest, ShouldFailToParseIfSpecIsNotObject) {
- ASSERT_THROWS_CODE(DocumentSourceTelemetry::createFromBson(
- fromjson("{$telemetry: 1}").firstElement(), getExpCtx()),
+TEST_F(DocumentSourceQueryStatsTest, ShouldFailToParseIfSpecIsNotObject) {
+ ASSERT_THROWS_CODE(DocumentSourceQueryStats::createFromBson(
+ fromjson("{$queryStats: 1}").firstElement(), getExpCtx()),
AssertionException,
ErrorCodes::FailedToParse);
}
-TEST_F(DocumentSourceTelemetryTest, ShouldFailToParseIfNotRunOnAdmin) {
+TEST_F(DocumentSourceQueryStatsTest, ShouldFailToParseIfNotRunOnAdmin) {
getExpCtx()->ns = NamespaceString::makeCollectionlessAggregateNSS(
DatabaseName::createDatabaseName_forTest(boost::none, "foo"));
- ASSERT_THROWS_CODE(DocumentSourceTelemetry::createFromBson(
- fromjson("{$telemetry: {}}").firstElement(), getExpCtx()),
+ ASSERT_THROWS_CODE(DocumentSourceQueryStats::createFromBson(
+ fromjson("{$queryStats: {}}").firstElement(), getExpCtx()),
AssertionException,
ErrorCodes::InvalidNamespace);
}
-TEST_F(DocumentSourceTelemetryTest, ShouldFailToParseIfNotRunWithAggregateOne) {
+TEST_F(DocumentSourceQueryStatsTest, ShouldFailToParseIfNotRunWithAggregateOne) {
getExpCtx()->ns = NamespaceString::createNamespaceString_forTest("admin.foo");
- ASSERT_THROWS_CODE(DocumentSourceTelemetry::createFromBson(
- fromjson("{$telemetry: {}}").firstElement(), getExpCtx()),
+ ASSERT_THROWS_CODE(DocumentSourceQueryStats::createFromBson(
+ fromjson("{$queryStats: {}}").firstElement(), getExpCtx()),
AssertionException,
ErrorCodes::InvalidNamespace);
}
-TEST_F(DocumentSourceTelemetryTest, ShouldFailToParseIfUnrecognisedParameterSpecified) {
- ASSERT_THROWS_CODE(DocumentSourceTelemetry::createFromBson(
- fromjson("{$telemetry: {foo: true}}").firstElement(), getExpCtx()),
+TEST_F(DocumentSourceQueryStatsTest, ShouldFailToParseIfUnrecognisedParameterSpecified) {
+ ASSERT_THROWS_CODE(DocumentSourceQueryStats::createFromBson(
+ fromjson("{$queryStats: {foo: true}}").firstElement(), getExpCtx()),
AssertionException,
ErrorCodes::FailedToParse);
}
-TEST_F(DocumentSourceTelemetryTest, ParseAndSerialize) {
- auto obj = fromjson("{$telemetry: {}}");
- auto doc = DocumentSourceTelemetry::createFromBson(obj.firstElement(), getExpCtx());
- auto telemetryOp = static_cast<DocumentSourceTelemetry*>(doc.get());
- auto expected = Document{{"$telemetry", Document{}}};
- ASSERT_DOCUMENT_EQ(telemetryOp->serialize().getDocument(), expected);
+TEST_F(DocumentSourceQueryStatsTest, ParseAndSerialize) {
+ auto obj = fromjson("{$queryStats: {}}");
+ auto doc = DocumentSourceQueryStats::createFromBson(obj.firstElement(), getExpCtx());
+ auto queryStatsOp = static_cast<DocumentSourceQueryStats*>(doc.get());
+ auto expected = Document{{"$queryStats", Document{}}};
+ ASSERT_DOCUMENT_EQ(queryStatsOp->serialize().getDocument(), expected);
}
} // namespace
diff --git a/src/mongo/db/pipeline/visitors/document_source_visitor_registry_mongod.h b/src/mongo/db/pipeline/visitors/document_source_visitor_registry_mongod.h
index 24d11c814be..32ec042f6dc 100644
--- a/src/mongo/db/pipeline/visitors/document_source_visitor_registry_mongod.h
+++ b/src/mongo/db/pipeline/visitors/document_source_visitor_registry_mongod.h
@@ -70,6 +70,7 @@
#include "mongo/db/pipeline/document_source_operation_metrics.h"
#include "mongo/db/pipeline/document_source_out.h"
#include "mongo/db/pipeline/document_source_plan_cache_stats.h"
+#include "mongo/db/pipeline/document_source_query_stats.h"
#include "mongo/db/pipeline/document_source_queue.h"
#include "mongo/db/pipeline/document_source_redact.h"
#include "mongo/db/pipeline/document_source_replace_root.h"
@@ -83,7 +84,6 @@
#include "mongo/db/pipeline/document_source_sort.h"
#include "mongo/db/pipeline/document_source_streaming_group.h"
#include "mongo/db/pipeline/document_source_tee_consumer.h"
-#include "mongo/db/pipeline/document_source_telemetry.h"
#include "mongo/db/pipeline/document_source_union_with.h"
#include "mongo/db/pipeline/document_source_unwind.h"
#include "mongo/db/pipeline/visitors/document_source_visitor_registry.h"
@@ -169,7 +169,7 @@ void registerMongodVisitor(ServiceContext* service) {
DocumentSourceSort,
DocumentSourceStreamingGroup,
DocumentSourceTeeConsumer,
- DocumentSourceTelemetry,
+ DocumentSourceQueryStats,
DocumentSourceUnionWith,
DocumentSourceUnwind>(&registry);
}
diff --git a/src/mongo/db/query/SConscript b/src/mongo/db/query/SConscript
index 22e24674e1d..7f9e1c69a00 100644
--- a/src/mongo/db/query/SConscript
+++ b/src/mongo/db/query/SConscript
@@ -262,7 +262,7 @@ env.Library(
'query_feature_flags.idl',
'query_knobs.idl',
'sbe_plan_cache_on_parameter_change.cpp',
- 'telemetry_util.cpp',
+ 'query_stats_util.cpp',
],
LIBDEPS_PRIVATE=[
'$BUILD_DIR/mongo/db/commands/test_commands_enabled',
@@ -366,7 +366,7 @@ env.Library(
target='op_metrics',
source=[
'query_shape.cpp',
- 'telemetry.cpp',
+ 'query_stats.cpp',
],
LIBDEPS=[
'$BUILD_DIR/mongo/base',
@@ -448,6 +448,7 @@ env.CppUnitTest(
"query_settings_test.cpp",
"query_shape_test.cpp",
"query_shape_test.idl",
+ "query_stats_store_test.cpp",
"query_solution_test.cpp",
"rate_limiting_test.cpp",
"sbe_and_hash_test.cpp",
@@ -461,7 +462,6 @@ env.CppUnitTest(
"sbe_stage_builder_type_checker_test.cpp",
"shard_filterer_factory_mock.cpp",
"sort_pattern_test.cpp",
- "telemetry_store_test.cpp",
"util/memory_util_test.cpp",
"view_response_formatter_test.cpp",
'map_reduce_output_format_test.cpp',
diff --git a/src/mongo/db/query/cqf_command_utils.cpp b/src/mongo/db/query/cqf_command_utils.cpp
index a6279c43400..5db15a5ceee 100644
--- a/src/mongo/db/query/cqf_command_utils.cpp
+++ b/src/mongo/db/query/cqf_command_utils.cpp
@@ -111,6 +111,7 @@
#include "mongo/db/pipeline/document_source_operation_metrics.h"
#include "mongo/db/pipeline/document_source_out.h"
#include "mongo/db/pipeline/document_source_plan_cache_stats.h"
+#include "mongo/db/pipeline/document_source_query_stats.h"
#include "mongo/db/pipeline/document_source_queue.h"
#include "mongo/db/pipeline/document_source_redact.h"
#include "mongo/db/pipeline/document_source_replace_root.h"
@@ -124,7 +125,6 @@
#include "mongo/db/pipeline/document_source_sort.h"
#include "mongo/db/pipeline/document_source_streaming_group.h"
#include "mongo/db/pipeline/document_source_tee_consumer.h"
-#include "mongo/db/pipeline/document_source_telemetry.h"
#include "mongo/db/pipeline/document_source_union_with.h"
#include "mongo/db/pipeline/document_source_unwind.h"
#include "mongo/db/pipeline/visitors/document_source_visitor_registry_mongod.h"
diff --git a/src/mongo/db/query/find.cpp b/src/mongo/db/query/find.cpp
index c54138afd4b..dcd402e9c70 100644
--- a/src/mongo/db/query/find.cpp
+++ b/src/mongo/db/query/find.cpp
@@ -112,7 +112,7 @@ void endQueryOp(OperationContext* opCtx,
auto curOp = CurOp::get(opCtx);
// Fill out basic CurOp query exec properties. More metrics (nreturned and executionTime)
- // are collected within collectTelemetryMongod.
+ // are collected within collectQueryStatsMongod.
curOp->debug().cursorid = (cursor.has_value() ? cursor->getCursor()->cursorid() : -1);
curOp->debug().cursorExhausted = !cursor.has_value();
curOp->debug().additiveMetrics.nBatches = 1;
@@ -125,9 +125,9 @@ void endQueryOp(OperationContext* opCtx,
curOp->setEndOfOpMetrics(numResults);
if (cursor) {
- collectTelemetryMongod(opCtx, *cursor);
+ collectQueryStatsMongod(opCtx, *cursor);
} else {
- collectTelemetryMongod(opCtx, std::move(curOp->debug().telemetryRequestShapifier));
+ collectQueryStatsMongod(opCtx, std::move(curOp->debug().queryStatsRequestShapifier));
}
if (collection) {
diff --git a/src/mongo/db/query/find_request_shapifier.cpp b/src/mongo/db/query/find_request_shapifier.cpp
index 8002a152a13..83560f3acdb 100644
--- a/src/mongo/db/query/find_request_shapifier.cpp
+++ b/src/mongo/db/query/find_request_shapifier.cpp
@@ -34,7 +34,7 @@
#include "mongo/db/query/query_request_helper.h"
#include "mongo/db/query/query_shape.h"
-namespace mongo::telemetry {
+namespace mongo::query_stats {
void addNonShapeObjCmdLiterals(BSONObjBuilder* bob,
const FindCommandRequest& findCommand,
@@ -58,8 +58,8 @@ void addNonShapeObjCmdLiterals(BSONObjBuilder* bob,
}
-BSONObj FindRequestShapifier::makeTelemetryKey(const SerializationOptions& opts,
- OperationContext* opCtx) const {
+BSONObj FindRequestShapifier::makeQueryStatsKey(const SerializationOptions& opts,
+ OperationContext* opCtx) const {
auto expCtx = make_intrusive<ExpressionContext>(
opCtx, _request, nullptr /* collator doesn't matter here.*/, false /* mayDbProfile */);
expCtx->maxFeatureCompatibilityVersion = boost::none; // Ensure all features are allowed.
@@ -67,10 +67,10 @@ BSONObj FindRequestShapifier::makeTelemetryKey(const SerializationOptions& opts,
// expressions/stages, so it's a side effect tied to parsing. We must stop expression counters
// before re-parsing to avoid adding to the counters more than once per a given query.
expCtx->stopExpressionCounters();
- return makeTelemetryKey(opts, expCtx);
+ return makeQueryStatsKey(opts, expCtx);
}
-BSONObj FindRequestShapifier::makeTelemetryKey(
+BSONObj FindRequestShapifier::makeQueryStatsKey(
const SerializationOptions& opts, const boost::intrusive_ptr<ExpressionContext>& expCtx) const {
BSONObjBuilder bob;
@@ -102,4 +102,4 @@ BSONObj FindRequestShapifier::makeTelemetryKey(
return bob.obj();
}
-} // namespace mongo::telemetry
+} // namespace mongo::query_stats
diff --git a/src/mongo/db/query/find_request_shapifier.h b/src/mongo/db/query/find_request_shapifier.h
index b03f84eb1ab..79f8223052a 100644
--- a/src/mongo/db/query/find_request_shapifier.h
+++ b/src/mongo/db/query/find_request_shapifier.h
@@ -32,7 +32,7 @@
#include "mongo/db/query/find_command_gen.h"
#include "mongo/db/query/request_shapifier.h"
-namespace mongo::telemetry {
+namespace mongo::query_stats {
/**
* Handles shapification for FindCommandRequests.
@@ -49,12 +49,13 @@ public:
virtual ~FindRequestShapifier() = default;
- BSONObj makeTelemetryKey(const SerializationOptions& opts, OperationContext* opCtx) const final;
+ BSONObj makeQueryStatsKey(const SerializationOptions& opts,
+ OperationContext* opCtx) const final;
- BSONObj makeTelemetryKey(const SerializationOptions& opts,
- const boost::intrusive_ptr<ExpressionContext>& expCtx) const final;
+ BSONObj makeQueryStatsKey(const SerializationOptions& opts,
+ const boost::intrusive_ptr<ExpressionContext>& expCtx) const final;
private:
FindCommandRequest _request;
};
-} // namespace mongo::telemetry
+} // namespace mongo::query_stats
diff --git a/src/mongo/db/query/query_feature_flags.idl b/src/mongo/db/query/query_feature_flags.idl
index cbd970ca47d..e18477beb9c 100644
--- a/src/mongo/db/query/query_feature_flags.idl
+++ b/src/mongo/db/query/query_feature_flags.idl
@@ -90,9 +90,9 @@ feature_flags:
default: false
shouldBeFCVGated: true
- featureFlagTelemetry:
- description: "Feature flag for enabling the telemetry store."
- cpp_varname: gFeatureFlagTelemetry
+ featureFlagQueryStats:
+ description: "Feature flag for enabling the queryStats store."
+ cpp_varname: gFeatureFlagQueryStats
default: false
shouldBeFCVGated: true
diff --git a/src/mongo/db/query/query_knobs.idl b/src/mongo/db/query/query_knobs.idl
index d631ab42d3d..4fc1e362524 100644
--- a/src/mongo/db/query/query_knobs.idl
+++ b/src/mongo/db/query/query_knobs.idl
@@ -36,7 +36,7 @@ global:
- "mongo/db/query/ce_mode_parameter.h"
- "mongo/db/query/explain_version_validator.h"
- "mongo/db/query/sbe_plan_cache_on_parameter_change.h"
- - "mongo/db/query/telemetry_util.h"
+ - "mongo/db/query/query_stats_util.h"
- "mongo/platform/atomic_proxy.h"
- "mongo/platform/atomic_word.h"
@@ -1018,32 +1018,32 @@ server_parameters:
default: false
test_only: true
- internalQueryConfigureTelemetrySamplingRate:
- description: "The maximum number of queries per second that are sampled for query telemetry.
+ internalQueryStatsSamplingRate:
+ description: "The maximum number of queries per second that are sampled for query stats.
If the rate of queries goes above this number, then rate limiting will kick in, and any
further queries will not be sampled. To sample all queries, this can be set to -1. This can be
- set to 0 to turn telemetry off completely."
+ set to 0 to turn queryStats off completely."
set_at: [ startup, runtime ]
- cpp_varname: "queryTelemetrySamplingRate"
+ cpp_varname: "queryQueryStatsSamplingRate"
cpp_vartype: AtomicWord<int>
default: 0
validator:
gte: -1
- on_update: telemetry_util::onTelemetrySamplingRateUpdate
+ on_update: query_stats_util::onQueryStatsSamplingRateUpdate
- internalQueryConfigureTelemetryCacheSize:
- description: "The maximum amount of memory that the system will allocate for the query telemetry
+ internalQueryConfigureQueryStatsCacheSize:
+ description: "The maximum amount of memory that the system will allocate for the query queryStats
cache. This will accept values in either of the following formats:
1. <number>% indicates a percentage of the physical memory available to the process. E.g.: 15%.
2. <number>(MB|GB), indicates the amount of memory in MB or GB. E.g.: 1.5GB, 100MB.
The default value is 1%, which means 1% of the physical memory available to the process."
set_at: [ startup, runtime ]
- cpp_varname: "queryTelemetryStoreSize"
+ cpp_varname: "queryQueryStatsStoreSize"
cpp_vartype: synchronized_value<std::string>
default: "1%"
- on_update: telemetry_util::onTelemetryStoreSizeUpdate
+ on_update: query_stats_util::onQueryStatsStoreSizeUpdate
validator:
- callback: telemetry_util::validateTelemetryStoreSize
+ callback: query_stats_util::validateQueryStatsStoreSize
internalQueryColumnScanMinCollectionSizeBytes:
description: "The min collection size threshold for which column scan will always be allowed. If
@@ -1130,7 +1130,7 @@ server_parameters:
default: 60000
validator:
gte: 0
-
+
internalQueryAggMulticastMaxConcurrency:
description: "Max number of concurrent requests when aggregations are sent to all shard servers"
set_at: startup
@@ -1173,8 +1173,8 @@ server_parameters:
gte: 0
internalQueryAutoParameterizationMaxParameterCount:
- description: "The maximum numbers of parameters that query auto-parameterization can extract from a query.
- If auto parameterizating a query would result in a greater number of parameters than the limit,
+ description: "The maximum numbers of parameters that query auto-parameterization can extract from a query.
+ If auto parameterizating a query would result in a greater number of parameters than the limit,
then auto parameterization will not be performed.
If set to 0, then no limit will be applied."
set_at: [ startup, runtime ]
diff --git a/src/mongo/db/query/query_shape.cpp b/src/mongo/db/query/query_shape.cpp
index 519b1115558..3f9ed7fbfb6 100644
--- a/src/mongo/db/query/query_shape.cpp
+++ b/src/mongo/db/query/query_shape.cpp
@@ -227,7 +227,7 @@ BSONObj extractQueryShape(const FindCommandRequest& findCommand,
expCtx,
ExtensionsCallbackNoop(),
MatchExpressionParser::kAllowAllSpecialFeatures),
- "Failed to parse 'filter' option when making telemetry key");
+ "Failed to parse 'filter' option when making queryStats key");
bob.append(FindCommandRequest::kFilterFieldName, filterExpr->serialize(opts));
}
diff --git a/src/mongo/db/query/query_shape.h b/src/mongo/db/query/query_shape.h
index 0fa0d7c863e..c0d4328d08b 100644
--- a/src/mongo/db/query/query_shape.h
+++ b/src/mongo/db/query/query_shape.h
@@ -40,7 +40,7 @@ constexpr StringData kLiteralArgString = "?"_sd;
/**
* Computes a BSONObj that is meant to be used to classify queries according to their shape, for the
- * purposes of collecting telemetry.
+ * purposes of collecting queryStats.
*
* For example, if the MatchExpression represents {a: 2}, it will return the same BSONObj as the
* MatchExpression for {a: 1}, {a: 10}, and {a: {$eq: 2}} (identical bits but not sharing memory)
diff --git a/src/mongo/db/query/telemetry.cpp b/src/mongo/db/query/query_stats.cpp
index af17da7af02..6b99a43fc3f 100644
--- a/src/mongo/db/query/telemetry.cpp
+++ b/src/mongo/db/query/query_stats.cpp
@@ -27,7 +27,7 @@
* it in the license file.
*/
-#include "mongo/db/query/telemetry.h"
+#include "mongo/db/query/query_stats.h"
#include "mongo/crypto/hash_block.h"
#include "mongo/crypto/sha256_block.h"
@@ -45,10 +45,10 @@
#include "mongo/db/query/query_feature_flags_gen.h"
#include "mongo/db/query/query_planner_params.h"
#include "mongo/db/query/query_request_helper.h"
+#include "mongo/db/query/query_stats_util.h"
#include "mongo/db/query/rate_limiting.h"
#include "mongo/db/query/serialization_options.h"
#include "mongo/db/query/sort_pattern.h"
-#include "mongo/db/query/telemetry_util.h"
#include "mongo/logv2/log.h"
#include "mongo/rpc/metadata/client_metadata.h"
#include "mongo/util/assert_util.h"
@@ -62,7 +62,7 @@
namespace mongo {
-namespace telemetry {
+namespace query_stats {
/**
* Redacts all BSONObj field names as if they were paths, unless the field name is a special hint
@@ -78,63 +78,63 @@ boost::optional<std::string> getApplicationName(const OperationContext* opCtx) {
}
} // namespace
-CounterMetric telemetryStoreSizeEstimateBytesMetric("telemetry.telemetryStoreSizeEstimateBytes");
+CounterMetric queryStatsStoreSizeEstimateBytesMetric("queryStats.queryStatsStoreSizeEstimateBytes");
namespace {
-CounterMetric telemetryEvictedMetric("telemetry.numEvicted");
-CounterMetric telemetryRateLimitedRequestsMetric("telemetry.numRateLimitedRequests");
-CounterMetric telemetryStoreWriteErrorsMetric("telemetry.numTelemetryStoreWriteErrors");
+CounterMetric queryStatsEvictedMetric("queryStats.numEvicted");
+CounterMetric queryStatsRateLimitedRequestsMetric("queryStats.numRateLimitedRequests");
+CounterMetric queryStatsStoreWriteErrorsMetric("queryStats.numQueryStatsStoreWriteErrors");
/**
- * Cap the telemetry store size.
+ * Cap the queryStats store size.
*/
-size_t capTelemetryStoreSize(size_t requestedSize) {
+size_t capQueryStatsStoreSize(size_t requestedSize) {
size_t cappedStoreSize = memory_util::capMemorySize(
requestedSize /*requestedSizeBytes*/, 1 /*maximumSizeGB*/, 25 /*percentTotalSystemMemory*/);
- // If capped size is less than requested size, the telemetry store has been capped at its
+ // If capped size is less than requested size, the queryStats store has been capped at its
// upper limit.
if (cappedStoreSize < requestedSize) {
LOGV2_DEBUG(7106502,
1,
- "The telemetry store size has been capped",
+ "The queryStats store size has been capped",
"cappedSize"_attr = cappedStoreSize);
}
return cappedStoreSize;
}
/**
- * Get the telemetry store size based on the query job's value.
+ * Get the queryStats store size based on the query job's value.
*/
-size_t getTelemetryStoreSize() {
- auto status = memory_util::MemorySize::parse(queryTelemetryStoreSize.get());
+size_t getQueryStatsStoreSize() {
+ auto status = memory_util::MemorySize::parse(queryQueryStatsStoreSize.get());
uassertStatusOK(status);
size_t requestedSize = memory_util::convertToSizeInBytes(status.getValue());
- return capTelemetryStoreSize(requestedSize);
+ return capQueryStatsStoreSize(requestedSize);
}
/**
- * A manager for the telemetry store allows a "pointer swap" on the telemetry store itself. The
+ * A manager for the queryStats store allows a "pointer swap" on the queryStats store itself. The
* usage patterns are as follows:
*
- * - Updating the telemetry store uses the `getTelemetryStore()` method. The telemetry store
+ * - Updating the queryStats store uses the `getQueryStatsStore()` method. The queryStats store
* instance is obtained, entries are looked up and mutated, or created anew.
- * - The telemetry store is "reset". This involves atomically allocating a new instance, once
+ * - The queryStats store is "reset". This involves atomically allocating a new instance, once
* there are no more updaters (readers of the store "pointer"), and returning the existing
* instance.
*/
-class TelemetryStoreManager {
+class QueryStatsStoreManager {
public:
- template <typename... TelemetryStoreArgs>
- TelemetryStoreManager(size_t cacheSize, size_t numPartitions)
- : _telemetryStore(std::make_unique<TelemetryStore>(cacheSize, numPartitions)),
+ template <typename... QueryStatsStoreArgs>
+ QueryStatsStoreManager(size_t cacheSize, size_t numPartitions)
+ : _queryStatsStore(std::make_unique<QueryStatsStore>(cacheSize, numPartitions)),
_maxSize(cacheSize) {}
/**
- * Acquire the instance of the telemetry store.
+ * Acquire the instance of the queryStats store.
*/
- TelemetryStore& getTelemetryStore() {
- return *_telemetryStore;
+ QueryStatsStore& getQueryStatsStore() {
+ return *_queryStatsStore;
}
size_t getMaxSize() {
@@ -142,92 +142,93 @@ public:
}
/**
- * Resize the telemetry store and return the number of evicted
+ * Resize the queryStats store and return the number of evicted
* entries.
*/
size_t resetSize(size_t cacheSize) {
_maxSize = cacheSize;
- return _telemetryStore->reset(cacheSize);
+ return _queryStatsStore->reset(cacheSize);
}
private:
- std::unique_ptr<TelemetryStore> _telemetryStore;
+ std::unique_ptr<QueryStatsStore> _queryStatsStore;
/**
- * Max size of the telemetry store. Tracked here to avoid having to recompute after it's divided
- * up into partitions.
+ * Max size of the queryStats store. Tracked here to avoid having to recompute after it's
+ * divided up into partitions.
*/
size_t _maxSize;
};
-const auto telemetryStoreDecoration =
- ServiceContext::declareDecoration<std::unique_ptr<TelemetryStoreManager>>();
+const auto queryStatsStoreDecoration =
+ ServiceContext::declareDecoration<std::unique_ptr<QueryStatsStoreManager>>();
-const auto telemetryRateLimiter =
+const auto queryStatsRateLimiter =
ServiceContext::declareDecoration<std::unique_ptr<RateLimiting>>();
-class TelemetryOnParamChangeUpdaterImpl final : public telemetry_util::OnParamChangeUpdater {
+class TelemetryOnParamChangeUpdaterImpl final : public query_stats_util::OnParamChangeUpdater {
public:
void updateCacheSize(ServiceContext* serviceCtx, memory_util::MemorySize memSize) final {
auto requestedSize = memory_util::convertToSizeInBytes(memSize);
- auto cappedSize = capTelemetryStoreSize(requestedSize);
- auto& telemetryStoreManager = telemetryStoreDecoration(serviceCtx);
- size_t numEvicted = telemetryStoreManager->resetSize(cappedSize);
- telemetryEvictedMetric.increment(numEvicted);
+ auto cappedSize = capQueryStatsStoreSize(requestedSize);
+ auto& queryStatsStoreManager = queryStatsStoreDecoration(serviceCtx);
+ size_t numEvicted = queryStatsStoreManager->resetSize(cappedSize);
+ queryStatsEvictedMetric.increment(numEvicted);
}
void updateSamplingRate(ServiceContext* serviceCtx, int samplingRate) {
- telemetryRateLimiter(serviceCtx).get()->setSamplingRate(samplingRate);
+ queryStatsRateLimiter(serviceCtx).get()->setSamplingRate(samplingRate);
}
};
-ServiceContext::ConstructorActionRegisterer telemetryStoreManagerRegisterer{
- "TelemetryStoreManagerRegisterer", [](ServiceContext* serviceCtx) {
+ServiceContext::ConstructorActionRegisterer queryStatsStoreManagerRegisterer{
+ "QueryStatsStoreManagerRegisterer", [](ServiceContext* serviceCtx) {
// It is possible that this is called before FCV is properly set up. Setting up the store if
// the flag is enabled but FCV is incorrect is safe, and guards against the FCV being
// changed to a supported version later.
- if (!feature_flags::gFeatureFlagTelemetry.isEnabledAndIgnoreFCVUnsafeAtStartup()) {
+ if (!feature_flags::gFeatureFlagQueryStats.isEnabledAndIgnoreFCVUnsafeAtStartup()) {
// featureFlags are not allowed to be changed at runtime. Therefore it's not an issue
- // to not create a telemetry store in ConstructorActionRegisterer at start up with the
+ // to not create a queryStats store in ConstructorActionRegisterer at start up with the
// flag off - because the flag can not be turned on at any point afterwards.
- telemetry_util::telemetryStoreOnParamChangeUpdater(serviceCtx) =
- std::make_unique<telemetry_util::NoChangesAllowedTelemetryParamUpdater>();
+ query_stats_util::queryStatsStoreOnParamChangeUpdater(serviceCtx) =
+ std::make_unique<query_stats_util::NoChangesAllowedTelemetryParamUpdater>();
return;
}
- telemetry_util::telemetryStoreOnParamChangeUpdater(serviceCtx) =
+ query_stats_util::queryStatsStoreOnParamChangeUpdater(serviceCtx) =
std::make_unique<TelemetryOnParamChangeUpdaterImpl>();
- size_t size = getTelemetryStoreSize();
- auto&& globalTelemetryStoreManager = telemetryStoreDecoration(serviceCtx);
- // The plan cache and telemetry store should use the same number of partitions.
+ size_t size = getQueryStatsStoreSize();
+ auto&& globalQueryStatsStoreManager = queryStatsStoreDecoration(serviceCtx);
+ // The plan cache and queryStats store should use the same number of partitions.
// That is, the number of cpu cores.
size_t numPartitions = ProcessInfo::getNumCores();
size_t partitionBytes = size / numPartitions;
- size_t metricsSize = sizeof(TelemetryEntry);
+ size_t metricsSize = sizeof(QueryStatsEntry);
if (partitionBytes < metricsSize * 10) {
numPartitions = size / metricsSize;
if (numPartitions < 1) {
numPartitions = 1;
}
}
- globalTelemetryStoreManager = std::make_unique<TelemetryStoreManager>(size, numPartitions);
- auto configuredSamplingRate = queryTelemetrySamplingRate.load();
- telemetryRateLimiter(serviceCtx) = std::make_unique<RateLimiting>(
+ globalQueryStatsStoreManager =
+ std::make_unique<QueryStatsStoreManager>(size, numPartitions);
+ auto configuredSamplingRate = queryQueryStatsSamplingRate.load();
+ queryStatsRateLimiter(serviceCtx) = std::make_unique<RateLimiting>(
configuredSamplingRate < 0 ? INT_MAX : configuredSamplingRate);
}};
/**
- * Top-level checks for whether telemetry collection is enabled. If this returns false, we must go
+ * Top-level checks for whether queryStats collection is enabled. If this returns false, we must go
* no further.
*/
-bool isTelemetryEnabled(const ServiceContext* serviceCtx) {
+bool isQueryStatsEnabled(const ServiceContext* serviceCtx) {
// During initialization FCV may not yet be setup but queries could be run. We can't
- // check whether telemetry should be enabled without FCV, so default to not recording
+ // check whether queryStats should be enabled without FCV, so default to not recording
// those queries.
// TODO SERVER-75935 Remove FCV Check.
- return feature_flags::gFeatureFlagTelemetry.isEnabled(
+ return feature_flags::gFeatureFlagQueryStats.isEnabled(
serverGlobalParams.featureCompatibility) &&
- telemetryStoreDecoration(serviceCtx)->getMaxSize() > 0;
+ queryStatsStoreDecoration(serviceCtx)->getMaxSize() > 0;
}
/**
@@ -235,26 +236,26 @@ bool isTelemetryEnabled(const ServiceContext* serviceCtx) {
* configuration for a global on/off decision and, if enabled, delegates to the rate limiter.
*/
bool shouldCollect(const ServiceContext* serviceCtx) {
- // Quick escape if telemetry is turned off.
- if (!isTelemetryEnabled(serviceCtx)) {
+ // Quick escape if queryStats is turned off.
+ if (!isQueryStatsEnabled(serviceCtx)) {
return false;
}
- // Cannot collect telemetry if sampling rate is not greater than 0. Note that we do not
- // increment telemetryRateLimitedRequestsMetric here since telemetry is entirely disabled.
- if (telemetryRateLimiter(serviceCtx)->getSamplingRate() <= 0) {
+ // Cannot collect queryStats if sampling rate is not greater than 0. Note that we do not
+ // increment queryStatsRateLimitedRequestsMetric here since queryStats is entirely disabled.
+ if (queryStatsRateLimiter(serviceCtx)->getSamplingRate() <= 0) {
return false;
}
- // Check if rate limiting allows us to collect telemetry for this request.
- if (telemetryRateLimiter(serviceCtx)->getSamplingRate() < INT_MAX &&
- !telemetryRateLimiter(serviceCtx)->handleRequestSlidingWindow()) {
- telemetryRateLimitedRequestsMetric.increment();
+ // Check if rate limiting allows us to collect queryStats for this request.
+ if (queryStatsRateLimiter(serviceCtx)->getSamplingRate() < INT_MAX &&
+ !queryStatsRateLimiter(serviceCtx)->handleRequestSlidingWindow()) {
+ queryStatsRateLimitedRequestsMetric.increment();
return false;
}
return true;
}
/**
- * Add a field to the find op's telemetry key. The `value` will have hmac applied.
+ * Add a field to the find op's queryStats key. The `value` will have hmac applied.
*/
void addToFindKey(BSONObjBuilder& builder, const StringData& fieldName, const BSONObj& value) {
serializeBSONWhenNotEmpty(value.redact(false), fieldName, &builder);
@@ -288,7 +289,7 @@ void throwIfEncounteringFLEPayload(const BSONElement& e) {
/**
* Upon reading telemetry data, we apply hmac to some keys. This is the list. See
- * TelemetryEntry::makeTelemetryKey().
+ * QueryStatsEntry::makeQueryStatsKey().
*/
const stdx::unordered_set<std::string> kKeysToApplyHmac = {"pipeline", "find"};
@@ -309,7 +310,7 @@ std::string constantFieldNameHasher(const BSONElement& e) {
/**
* Admittedly an abuse of the BSON redaction interface, we recognize FLE payloads here and avoid
- * collecting telemetry for the query.
+ * collecting queryStats for the query.
*/
std::string fleSafeFieldNameRedactor(const BSONElement& e) {
throwIfEncounteringFLEPayload(e);
@@ -344,9 +345,9 @@ std::size_t hash(const BSONObj& obj) {
} // namespace
-BSONObj TelemetryEntry::computeTelemetryKey(OperationContext* opCtx,
- bool applyHmacToIdentifiers,
- std::string hmacKey) const {
+BSONObj QueryStatsEntry::computeQueryStatsKey(OperationContext* opCtx,
+ bool applyHmacToIdentifiers,
+ std::string hmacKey) const {
// The telemetry key for find queries is generated by serializing all the command fields
// and applying hmac if SerializationOptions indicate to do so. The resulting key is of the
// form:
@@ -369,7 +370,7 @@ BSONObj TelemetryEntry::computeTelemetryKey(OperationContext* opCtx,
[&](StringData sd) { return sha256HmacStringDataHasher(hmacKey, sd); },
LiteralSerializationPolicy::kToDebugTypeString)
: SerializationOptions(LiteralSerializationPolicy::kToDebugTypeString);
- return requestShapifier->makeTelemetryKey(serializationOpts, opCtx);
+ return requestShapifier->makeQueryStatsKey(serializationOpts, opCtx);
}
// TODO SERVER-73152 remove all special aggregation logic below
@@ -377,7 +378,7 @@ BSONObj TelemetryEntry::computeTelemetryKey(OperationContext* opCtx,
// { "agg": {...}, "namespace": "...", "applicationName": "...", ... }
//
// The part of the key we need to apply hmac to is the object in the <CMD_TYPE> element. In the
- // case of an aggregate() command, it will look something like: > "pipeline" : [ { "$telemetry"
+ // case of an aggregate() command, it will look something like: > "pipeline" : [ { "$queryStats"
// : {} },
// { "$addFields" : { "x" : { "$someExpr" {} } } } ],
// We should preserve the top-level stage names in the pipeline but apply hmac to all field
@@ -385,10 +386,10 @@ BSONObj TelemetryEntry::computeTelemetryKey(OperationContext* opCtx,
// TODO: SERVER-73152 literal and field name redaction for aggregate command.
if (!applyHmacToIdentifiers) {
- return oldTelemetryKey;
+ return oldQueryStatsKey;
}
BSONObjBuilder hmacAppliedBuilder;
- for (BSONElement e : oldTelemetryKey) {
+ for (BSONElement e : oldQueryStatsKey) {
if ((e.type() == Object || e.type() == Array) &&
kKeysToApplyHmac.count(e.fieldNameStringData().toString()) == 1) {
auto hmacApplicator = [&](BSONObjBuilder subObj, const BSONObj& obj) {
@@ -423,25 +424,24 @@ BSONObj TelemetryEntry::computeTelemetryKey(OperationContext* opCtx,
}
// The originating command/query does not persist through the end of query execution. In order to
-// pair the telemetry metrics that are collected at the end of execution with the original query, it
-// is necessary to register the original query during planning and persist it after
-// execution.
+// pair the queryStats metrics that are collected at the end of execution with the original query,
+// it is necessary to register the original query during planning and persist it after execution.
// During planning, registerRequest is called to serialize the query shape and context (together,
-// the telemetry context) and save it to OpDebug. Moreover, as query execution may span more than
+// the queryStats context) and save it to OpDebug. Moreover, as query execution may span more than
// one request/operation and OpDebug does not persist through cursor iteration, it is necessary to
-// communicate the telemetry context across operations. In this way, the telemetry context is
-// registered to the cursor, so upon getMore() calls, the cursor manager passes the telemetry key
+// communicate the queryStats context across operations. In this way, the queryStats context is
+// registered to the cursor, so upon getMore() calls, the cursor manager passes the queryStats key
// from the pinned cursor to the new OpDebug.
-// Once query execution is complete, the telemetry context is grabbed from OpDebug, a telemetry key
-// is generated from this and metrics are paired to this key in the telemetry store.
+// Once query execution is complete, the queryStats context is grabbed from OpDebug, a queryStats
+// key is generated from this and metrics are paired to this key in the queryStats store.
void registerAggRequest(const AggregateCommandRequest& request, OperationContext* opCtx) {
- if (!isTelemetryEnabled(opCtx->getServiceContext())) {
+ if (!isQueryStatsEnabled(opCtx->getServiceContext())) {
return;
}
- // Queries against metadata collections should never appear in telemetry data.
+ // Queries against metadata collections should never appear in queryStats data.
if (request.getNamespace().isFLE2StateCollection()) {
return;
}
@@ -450,8 +450,8 @@ void registerAggRequest(const AggregateCommandRequest& request, OperationContext
return;
}
- BSONObjBuilder telemetryKey;
- BSONObjBuilder pipelineBuilder = telemetryKey.subarrayStart("pipeline"_sd);
+ BSONObjBuilder queryStatsKey;
+ BSONObjBuilder pipelineBuilder = queryStatsKey.subarrayStart("pipeline"_sd);
try {
for (auto&& stage : request.getPipeline()) {
BSONObjBuilder stageBuilder = pipelineBuilder.subobjStart("stage"_sd);
@@ -459,31 +459,31 @@ void registerAggRequest(const AggregateCommandRequest& request, OperationContext
stageBuilder.done();
}
pipelineBuilder.done();
- telemetryKey.append("namespace", request.getNamespace().toString());
+ queryStatsKey.append("namespace", request.getNamespace().toString());
if (request.getReadConcern()) {
- telemetryKey.append("readConcern", *request.getReadConcern());
+ queryStatsKey.append("readConcern", *request.getReadConcern());
}
if (auto metadata = ClientMetadata::get(opCtx->getClient())) {
- telemetryKey.append("applicationName", metadata->getApplicationName());
+ queryStatsKey.append("applicationName", metadata->getApplicationName());
}
} catch (ExceptionFor<ErrorCodes::EncounteredFLEPayloadWhileApplyingHmac>&) {
return;
}
- BSONObj key = telemetryKey.obj();
- CurOp::get(opCtx)->debug().telemetryStoreKeyHash = hash(key);
- CurOp::get(opCtx)->debug().telemetryStoreKey = key.getOwned();
+ BSONObj key = queryStatsKey.obj();
+ CurOp::get(opCtx)->debug().queryStatsStoreKeyHash = hash(key);
+ CurOp::get(opCtx)->debug().queryStatsStoreKey = key.getOwned();
}
void registerRequest(std::unique_ptr<RequestShapifier> requestShapifier,
const NamespaceString& collection,
OperationContext* opCtx,
const boost::intrusive_ptr<ExpressionContext>& expCtx) {
- if (!isTelemetryEnabled(opCtx->getServiceContext())) {
+ if (!isQueryStatsEnabled(opCtx->getServiceContext())) {
return;
}
- // Queries against metadata collections should never appear in telemetry data.
+ // Queries against metadata collections should never appear in queryStats data.
if (collection.isFLE2StateCollection()) {
return;
}
@@ -494,53 +494,53 @@ void registerRequest(std::unique_ptr<RequestShapifier> requestShapifier,
SerializationOptions options;
options.literalPolicy = LiteralSerializationPolicy::kToDebugTypeString;
options.replacementForLiteralArgs = replacementForLiteralArgs;
- CurOp::get(opCtx)->debug().telemetryStoreKeyHash =
- hash(requestShapifier->makeTelemetryKey(options, expCtx));
- CurOp::get(opCtx)->debug().telemetryRequestShapifier = std::move(requestShapifier);
+ CurOp::get(opCtx)->debug().queryStatsStoreKeyHash =
+ hash(requestShapifier->makeQueryStatsKey(options, expCtx));
+ CurOp::get(opCtx)->debug().queryStatsRequestShapifier = std::move(requestShapifier);
}
-TelemetryStore& getTelemetryStore(OperationContext* opCtx) {
+QueryStatsStore& getQueryStatsStore(OperationContext* opCtx) {
uassert(6579000,
"Telemetry is not enabled without the feature flag on and a cache size greater than 0 "
"bytes",
- isTelemetryEnabled(opCtx->getServiceContext()));
- return telemetryStoreDecoration(opCtx->getServiceContext())->getTelemetryStore();
+ isQueryStatsEnabled(opCtx->getServiceContext()));
+ return queryStatsStoreDecoration(opCtx->getServiceContext())->getQueryStatsStore();
}
-void writeTelemetry(OperationContext* opCtx,
- boost::optional<size_t> telemetryKeyHash,
- boost::optional<BSONObj> telemetryKey,
- std::unique_ptr<RequestShapifier> requestShapifier,
- const uint64_t queryExecMicros,
- const uint64_t docsReturned) {
- if (!telemetryKeyHash) {
+void writeQueryStats(OperationContext* opCtx,
+ boost::optional<size_t> queryStatsKeyHash,
+ boost::optional<BSONObj> queryStatsKey,
+ std::unique_ptr<RequestShapifier> requestShapifier,
+ const uint64_t queryExecMicros,
+ const uint64_t docsReturned) {
+ if (!queryStatsKeyHash) {
return;
}
- auto&& telemetryStore = getTelemetryStore(opCtx);
+ auto&& queryStatsStore = getQueryStatsStore(opCtx);
auto&& [statusWithMetrics, partitionLock] =
- telemetryStore.getWithPartitionLock(*telemetryKeyHash);
- std::shared_ptr<TelemetryEntry> metrics;
+ queryStatsStore.getWithPartitionLock(*queryStatsKeyHash);
+ std::shared_ptr<QueryStatsEntry> metrics;
if (statusWithMetrics.isOK()) {
metrics = *statusWithMetrics.getValue();
} else {
- BSONObj key = telemetryKey.value_or(BSONObj{});
+ BSONObj key = queryStatsKey.value_or(BSONObj{});
size_t numEvicted =
- telemetryStore.put(*telemetryKeyHash,
- std::make_shared<TelemetryEntry>(
- std::move(requestShapifier), CurOp::get(opCtx)->getNSS(), key),
- partitionLock);
- telemetryEvictedMetric.increment(numEvicted);
- auto newMetrics = partitionLock->get(*telemetryKeyHash);
+ queryStatsStore.put(*queryStatsKeyHash,
+ std::make_shared<QueryStatsEntry>(
+ std::move(requestShapifier), CurOp::get(opCtx)->getNSS(), key),
+ partitionLock);
+ queryStatsEvictedMetric.increment(numEvicted);
+ auto newMetrics = partitionLock->get(*queryStatsKeyHash);
if (!newMetrics.isOK()) {
// This can happen if the budget is immediately exceeded. Specifically if the there is
// not enough room for a single new entry if the number of partitions is too high
// relative to the size.
- telemetryStoreWriteErrorsMetric.increment();
+ queryStatsStoreWriteErrorsMetric.increment();
LOGV2_DEBUG(7560900,
1,
- "Failed to store telemetry entry.",
+ "Failed to store queryStats entry.",
"status"_attr = newMetrics.getStatus(),
- "telemetryKeyHash"_attr = telemetryKeyHash);
+ "queryStatsKeyHash"_attr = queryStatsKeyHash);
return;
}
metrics = newMetrics.getValue()->second;
@@ -551,5 +551,5 @@ void writeTelemetry(OperationContext* opCtx,
metrics->queryExecMicros.aggregate(queryExecMicros);
metrics->docsReturned.aggregate(docsReturned);
}
-} // namespace telemetry
+} // namespace query_stats
} // namespace mongo
diff --git a/src/mongo/db/query/telemetry.h b/src/mongo/db/query/query_stats.h
index e7e0f3ccfd1..59d79d6c114 100644
--- a/src/mongo/db/query/telemetry.h
+++ b/src/mongo/db/query/query_stats.h
@@ -55,7 +55,7 @@ namespace {
using BSONNumeric = long long;
} // namespace
-namespace telemetry {
+namespace query_stats {
/**
* An aggregated metric stores a compressed view of data. It balances the loss of information
@@ -95,26 +95,26 @@ struct AggregatedMetric {
uint64_t sumOfSquares = 0;
};
-extern CounterMetric telemetryStoreSizeEstimateBytesMetric;
+extern CounterMetric queryStatsStoreSizeEstimateBytesMetric;
// Used to aggregate the metrics for one telemetry key over all its executions.
-class TelemetryEntry {
+class QueryStatsEntry {
public:
- TelemetryEntry(std::unique_ptr<RequestShapifier> requestShapifier,
- NamespaceStringOrUUID nss,
- const BSONObj& cmdObj)
+ QueryStatsEntry(std::unique_ptr<RequestShapifier> requestShapifier,
+ NamespaceStringOrUUID nss,
+ const BSONObj& cmdObj)
: firstSeenTimestamp(Date_t::now().toMillisSinceEpoch() / 1000, 0),
requestShapifier(std::move(requestShapifier)),
nss(nss),
- oldTelemetryKey(cmdObj.copy()) {
- telemetryStoreSizeEstimateBytesMetric.increment(sizeof(TelemetryEntry) + sizeof(BSONObj));
+ oldQueryStatsKey(cmdObj.copy()) {
+ queryStatsStoreSizeEstimateBytesMetric.increment(sizeof(QueryStatsEntry) + sizeof(BSONObj));
}
- ~TelemetryEntry() {
- telemetryStoreSizeEstimateBytesMetric.decrement(sizeof(TelemetryEntry) + sizeof(BSONObj));
+ ~QueryStatsEntry() {
+ queryStatsStoreSizeEstimateBytesMetric.decrement(sizeof(QueryStatsEntry) + sizeof(BSONObj));
}
BSONObj toBSON() const {
- BSONObjBuilder builder{sizeof(TelemetryEntry) + 100};
+ BSONObjBuilder builder{sizeof(QueryStatsEntry) + 100};
builder.append("lastExecutionMicros", (BSONNumeric)lastExecutionMicros);
builder.append("execCount", (BSONNumeric)execCount);
queryExecMicros.appendTo(builder, "queryExecMicros");
@@ -124,11 +124,11 @@ public:
}
/**
- * Redact a given telemetry key and set _keySize.
+ * Redact a given queryStats key and set _keySize.
*/
- BSONObj computeTelemetryKey(OperationContext* opCtx,
- bool applyHmacToIdentifiers,
- std::string hmacKey) const;
+ BSONObj computeQueryStatsKey(OperationContext* opCtx,
+ bool applyHmacToIdentifiers,
+ std::string hmacKey) const;
/**
* Timestamp for when this query shape was added to the store. Set on construction.
@@ -153,8 +153,8 @@ public:
NamespaceStringOrUUID nss;
- // TODO: SERVER-73152 remove oldTelemetryKey when RequestShapifier is used for agg.
- BSONObj oldTelemetryKey;
+ // TODO: SERVER-73152 remove oldQueryStatsKey when RequestShapifier is used for agg.
+ BSONObj oldQueryStatsKey;
};
struct TelemetryPartitioner {
@@ -164,32 +164,32 @@ struct TelemetryPartitioner {
}
};
-struct TelemetryStoreEntryBudgetor {
- size_t operator()(const std::size_t key, const std::shared_ptr<TelemetryEntry>& value) {
+struct QueryStatsStoreEntryBudgetor {
+ size_t operator()(const std::size_t key, const std::shared_ptr<QueryStatsEntry>& value) {
// The buget estimator for <key,value> pair in LRU cache accounts for the size of the key
// and the size of the metrics, including the bson object used for generating the telemetry
// key at read time.
- return sizeof(TelemetryEntry) + sizeof(std::size_t) + value->oldTelemetryKey.objsize();
+ return sizeof(QueryStatsEntry) + sizeof(std::size_t) + value->oldQueryStatsKey.objsize();
}
};
-using TelemetryStore = PartitionedCache<std::size_t,
- std::shared_ptr<TelemetryEntry>,
- TelemetryStoreEntryBudgetor,
- TelemetryPartitioner>;
+using QueryStatsStore = PartitionedCache<std::size_t,
+ std::shared_ptr<QueryStatsEntry>,
+ QueryStatsStoreEntryBudgetor,
+ TelemetryPartitioner>;
/**
- * Acquire a reference to the global telemetry store.
+ * Acquire a reference to the global queryStats store.
*/
-TelemetryStore& getTelemetryStore(OperationContext* opCtx);
+QueryStatsStore& getQueryStatsStore(OperationContext* opCtx);
/**
- * Register a request for telemetry collection. The telemetry machinery may decide not to
+ * Register a request for queryStats collection. The queryStats machinery may decide not to
* collect anything but this should be called for all requests. The decision is made based on
- * the feature flag and telemetry parameters such as rate limiting.
+ * the feature flag and queryStats parameters such as rate limiting.
*
- * The caller is still responsible for subsequently calling writeTelemetry() once the request is
+ * The caller is still responsible for subsequently calling writeQueryStats() once the request is
* completed.
*
* Note that calling this affects internal state. It should be called once for each request for
@@ -203,22 +203,22 @@ void registerRequest(std::unique_ptr<RequestShapifier> requestShapifier,
const boost::intrusive_ptr<ExpressionContext>& expCtx);
/**
- * Writes telemetry to the telemetry store for the operation identified by `telemetryKey`.
+ * Writes queryStats to the queryStats store for the operation identified by `queryStatsKey`.
*/
-void writeTelemetry(OperationContext* opCtx,
- boost::optional<size_t> telemetryKeyHash,
- boost::optional<BSONObj> telemetryKey,
- std::unique_ptr<RequestShapifier> requestShapifier,
- uint64_t queryExecMicros,
- uint64_t docsReturned);
+void writeQueryStats(OperationContext* opCtx,
+ boost::optional<size_t> queryStatsKeyHash,
+ boost::optional<BSONObj> queryStatsKey,
+ std::unique_ptr<RequestShapifier> requestShapifier,
+ uint64_t queryExecMicros,
+ uint64_t docsReturned);
/**
* Serialize the FindCommandRequest according to the Options passed in. Returns the serialized BSON
* with hmac applied to all field names and literals.
*/
-BSONObj makeTelemetryKey(const FindCommandRequest& findCommand,
- const SerializationOptions& opts,
- const boost::intrusive_ptr<ExpressionContext>& expCtx,
- boost::optional<const TelemetryEntry&> existingMetrics = boost::none);
-} // namespace telemetry
+BSONObj makeQueryStatsKey(const FindCommandRequest& findCommand,
+ const SerializationOptions& opts,
+ const boost::intrusive_ptr<ExpressionContext>& expCtx,
+ boost::optional<const QueryStatsEntry&> existingMetrics = boost::none);
+} // namespace query_stats
} // namespace mongo
diff --git a/src/mongo/db/query/telemetry_store_test.cpp b/src/mongo/db/query/query_stats_store_test.cpp
index 8d68ee566c6..e36ac7ccd98 100644
--- a/src/mongo/db/query/telemetry_store_test.cpp
+++ b/src/mongo/db/query/query_stats_store_test.cpp
@@ -33,13 +33,13 @@
#include "mongo/db/pipeline/expression_context_for_test.h"
#include "mongo/db/query/find_request_shapifier.h"
#include "mongo/db/query/query_feature_flags_gen.h"
-#include "mongo/db/query/telemetry.h"
+#include "mongo/db/query/query_stats.h"
#include "mongo/db/service_context_test_fixture.h"
#include "mongo/idl/server_parameter_test_util.h"
#include "mongo/unittest/inline_auto_update.h"
#include "mongo/unittest/unittest.h"
-namespace mongo::telemetry {
+namespace mongo::query_stats {
/**
* A default hmac application strategy that generates easy to check results for testing purposes.
*/
@@ -51,9 +51,9 @@ std::size_t hash(const BSONObj& obj) {
return absl::hash_internal::CityHash64(obj.objdata(), obj.objsize());
}
-class TelemetryStoreTest : public ServiceContextTest {
+class QueryStatsStoreTest : public ServiceContextTest {
public:
- BSONObj makeTelemetryKeyFindRequest(
+ BSONObj makeQueryStatsKeyFindRequest(
FindCommandRequest fcr,
const boost::intrusive_ptr<ExpressionContext>& expCtx,
bool applyHmac = false,
@@ -71,12 +71,12 @@ public:
opts.applyHmacToIdentifiers = true;
opts.identifierHmacPolicy = applyHmacForTest;
}
- return findShapifier.makeTelemetryKey(opts, expCtx);
+ return findShapifier.makeQueryStatsKey(opts, expCtx);
}
};
-TEST_F(TelemetryStoreTest, BasicUsage) {
- TelemetryStore telStore{5000000, 1000};
+TEST_F(QueryStatsStoreTest, BasicUsage) {
+ QueryStatsStore telStore{5000000, 1000};
auto getMetrics = [&](const BSONObj& key) {
auto lookupResult = telStore.lookup(hash(key));
@@ -84,11 +84,11 @@ TEST_F(TelemetryStoreTest, BasicUsage) {
};
auto collectMetrics = [&](BSONObj& key) {
- std::shared_ptr<TelemetryEntry> metrics;
+ std::shared_ptr<QueryStatsEntry> metrics;
auto lookupResult = telStore.lookup(hash(key));
if (!lookupResult.isOK()) {
telStore.put(hash(key),
- std::make_shared<TelemetryEntry>(nullptr, NamespaceString{}, key));
+ std::make_shared<QueryStatsEntry>(nullptr, NamespaceString{}, key));
lookupResult = telStore.lookup(hash(key));
}
metrics = *lookupResult.getValue();
@@ -127,39 +127,39 @@ TEST_F(TelemetryStoreTest, BasicUsage) {
int numKeys = 0;
telStore.forEach(
- [&](std::size_t key, const std::shared_ptr<TelemetryEntry>& entry) { numKeys++; });
+ [&](std::size_t key, const std::shared_ptr<QueryStatsEntry>& entry) { numKeys++; });
ASSERT_EQ(numKeys, 2);
}
-TEST_F(TelemetryStoreTest, EvictEntries) {
- // This creates a telemetry store with 2 partitions, each with a size of 1200 bytes.
+TEST_F(QueryStatsStoreTest, EvictEntries) {
+ // This creates a queryStats store with 2 partitions, each with a size of 1200 bytes.
const auto cacheSize = 2400;
const auto numPartitions = 2;
- TelemetryStore telStore{cacheSize, numPartitions};
+ QueryStatsStore telStore{cacheSize, numPartitions};
for (int i = 0; i < 20; i++) {
auto query = BSON("query" + std::to_string(i) << 1 << "xEquals" << 42);
telStore.put(hash(query),
- std::make_shared<TelemetryEntry>(nullptr, NamespaceString{}, BSONObj{}));
+ std::make_shared<QueryStatsEntry>(nullptr, NamespaceString{}, BSONObj{}));
}
int numKeys = 0;
telStore.forEach(
- [&](std::size_t key, const std::shared_ptr<TelemetryEntry>& entry) { numKeys++; });
+ [&](std::size_t key, const std::shared_ptr<QueryStatsEntry>& entry) { numKeys++; });
int entriesPerPartition = (cacheSize / numPartitions) /
- (sizeof(std::size_t) + sizeof(TelemetryEntry) + BSONObj().objsize());
+ (sizeof(std::size_t) + sizeof(QueryStatsEntry) + BSONObj().objsize());
ASSERT_EQ(numKeys, entriesPerPartition * numPartitions);
}
-TEST_F(TelemetryStoreTest, CorrectlyRedactsFindCommandRequestAllFields) {
+TEST_F(QueryStatsStoreTest, CorrectlyRedactsFindCommandRequestAllFields) {
auto expCtx = make_intrusive<ExpressionContextForTest>();
FindCommandRequest fcr(NamespaceStringOrUUID(NamespaceString("testDB.testColl")));
fcr.setFilter(BSON("a" << 1));
- auto key = makeTelemetryKeyFindRequest(
+ auto key = makeQueryStatsKeyFindRequest(
fcr, expCtx, true, LiteralSerializationPolicy::kToDebugTypeString);
ASSERT_BSONOBJ_EQ_AUTO( // NOLINT
@@ -181,7 +181,7 @@ TEST_F(TelemetryStoreTest, CorrectlyRedactsFindCommandRequestAllFields) {
// Add sort.
fcr.setSort(BSON("sortVal" << 1 << "otherSort" << -1));
- key = makeTelemetryKeyFindRequest(
+ key = makeQueryStatsKeyFindRequest(
fcr, expCtx, true, LiteralSerializationPolicy::kToDebugTypeString);
ASSERT_BSONOBJ_EQ_AUTO( // NOLINT
R"({
@@ -206,7 +206,7 @@ TEST_F(TelemetryStoreTest, CorrectlyRedactsFindCommandRequestAllFields) {
// Add inclusion projection.
fcr.setProjection(BSON("e" << true << "f" << true));
- key = makeTelemetryKeyFindRequest(
+ key = makeQueryStatsKeyFindRequest(
fcr, expCtx, true, LiteralSerializationPolicy::kToDebugTypeString);
ASSERT_BSONOBJ_EQ_AUTO( // NOLINT
R"({
@@ -239,7 +239,7 @@ TEST_F(TelemetryStoreTest, CorrectlyRedactsFindCommandRequestAllFields) {
<< "$a"
<< "var2"
<< "const1"));
- key = makeTelemetryKeyFindRequest(
+ key = makeQueryStatsKeyFindRequest(
fcr, expCtx, true, LiteralSerializationPolicy::kToDebugTypeString);
ASSERT_BSONOBJ_EQ_AUTO( // NOLINT
R"({
@@ -275,7 +275,7 @@ TEST_F(TelemetryStoreTest, CorrectlyRedactsFindCommandRequestAllFields) {
fcr.setHint(BSON("z" << 1 << "c" << 1));
fcr.setMax(BSON("z" << 25));
fcr.setMin(BSON("z" << 80));
- key = makeTelemetryKeyFindRequest(
+ key = makeQueryStatsKeyFindRequest(
fcr, expCtx, true, LiteralSerializationPolicy::kToDebugTypeString);
ASSERT_BSONOBJ_EQ_AUTO( // NOLINT
R"({
@@ -324,7 +324,7 @@ TEST_F(TelemetryStoreTest, CorrectlyRedactsFindCommandRequestAllFields) {
fcr.setMaxTimeMS(1000);
fcr.setNoCursorTimeout(false);
- key = makeTelemetryKeyFindRequest(
+ key = makeQueryStatsKeyFindRequest(
fcr, expCtx, true, LiteralSerializationPolicy::kToDebugTypeString);
ASSERT_BSONOBJ_EQ_AUTO( // NOLINT
@@ -380,7 +380,7 @@ TEST_F(TelemetryStoreTest, CorrectlyRedactsFindCommandRequestAllFields) {
fcr.setShowRecordId(true);
fcr.setAwaitData(false);
fcr.setMirrored(true);
- key = makeTelemetryKeyFindRequest(
+ key = makeQueryStatsKeyFindRequest(
fcr, expCtx, true, LiteralSerializationPolicy::kToDebugTypeString);
ASSERT_BSONOBJ_EQ_AUTO( // NOLINT
@@ -434,7 +434,7 @@ TEST_F(TelemetryStoreTest, CorrectlyRedactsFindCommandRequestAllFields) {
key);
fcr.setAllowPartialResults(false);
- key = makeTelemetryKeyFindRequest(
+ key = makeQueryStatsKeyFindRequest(
fcr, expCtx, true, LiteralSerializationPolicy::kToDebugTypeString);
// Make sure that a false allowPartialResults is also accurately captured.
ASSERT_BSONOBJ_EQ_AUTO( // NOLINT
@@ -488,7 +488,7 @@ TEST_F(TelemetryStoreTest, CorrectlyRedactsFindCommandRequestAllFields) {
key);
}
-TEST_F(TelemetryStoreTest, CorrectlyRedactsFindCommandRequestEmptyFields) {
+TEST_F(QueryStatsStoreTest, CorrectlyRedactsFindCommandRequestEmptyFields) {
auto expCtx = make_intrusive<ExpressionContextForTest>();
FindCommandRequest fcr(NamespaceStringOrUUID(NamespaceString("testDB.testColl")));
FindRequestShapifier findShapifier(fcr, expCtx->opCtx);
@@ -500,7 +500,7 @@ TEST_F(TelemetryStoreTest, CorrectlyRedactsFindCommandRequestEmptyFields) {
opts.applyHmacToIdentifiers = true;
opts.identifierHmacPolicy = applyHmacForTest;
- auto hmacApplied = findShapifier.makeTelemetryKey(opts, expCtx);
+ auto hmacApplied = findShapifier.makeQueryStatsKey(opts, expCtx);
ASSERT_BSONOBJ_EQ_AUTO( // NOLINT
R"({
"queryShape": {
@@ -515,7 +515,7 @@ TEST_F(TelemetryStoreTest, CorrectlyRedactsFindCommandRequestEmptyFields) {
hmacApplied); // NOLINT (test auto-update)
}
-TEST_F(TelemetryStoreTest, CorrectlyRedactsHintsWithOptions) {
+TEST_F(QueryStatsStoreTest, CorrectlyRedactsHintsWithOptions) {
auto expCtx = make_intrusive<ExpressionContextForTest>();
FindCommandRequest fcr(NamespaceStringOrUUID(NamespaceString("testDB.testColl")));
FindRequestShapifier findShapifier(fcr, expCtx->opCtx);
@@ -525,7 +525,7 @@ TEST_F(TelemetryStoreTest, CorrectlyRedactsHintsWithOptions) {
fcr.setMax(BSON("z" << 25));
fcr.setMin(BSON("z" << 80));
- auto key = makeTelemetryKeyFindRequest(
+ auto key = makeQueryStatsKeyFindRequest(
fcr, expCtx, false, LiteralSerializationPolicy::kToDebugTypeString);
ASSERT_BSONOBJ_EQ_AUTO( // NOLINT
@@ -559,7 +559,7 @@ TEST_F(TelemetryStoreTest, CorrectlyRedactsHintsWithOptions) {
fcr.setHint(BSON("$hint"
<< "z"));
- key = makeTelemetryKeyFindRequest(
+ key = makeQueryStatsKeyFindRequest(
fcr, expCtx, false, LiteralSerializationPolicy::kToDebugTypeString);
ASSERT_BSONOBJ_EQ_AUTO( // NOLINT
R"({
@@ -588,7 +588,7 @@ TEST_F(TelemetryStoreTest, CorrectlyRedactsHintsWithOptions) {
key);
fcr.setHint(BSON("z" << 1 << "c" << 1));
- key = makeTelemetryKeyFindRequest(fcr, expCtx, true, LiteralSerializationPolicy::kUnchanged);
+ key = makeQueryStatsKeyFindRequest(fcr, expCtx, true, LiteralSerializationPolicy::kUnchanged);
ASSERT_BSONOBJ_EQ_AUTO( // NOLINT
R"({
"queryShape": {
@@ -616,7 +616,7 @@ TEST_F(TelemetryStoreTest, CorrectlyRedactsHintsWithOptions) {
})",
key);
- key = makeTelemetryKeyFindRequest(
+ key = makeQueryStatsKeyFindRequest(
fcr, expCtx, true, LiteralSerializationPolicy::kToDebugTypeString);
ASSERT_BSONOBJ_EQ_AUTO( // NOLINT
R"({
@@ -647,7 +647,7 @@ TEST_F(TelemetryStoreTest, CorrectlyRedactsHintsWithOptions) {
// Test that $natural comes through unmodified.
fcr.setHint(BSON("$natural" << -1));
- key = makeTelemetryKeyFindRequest(
+ key = makeQueryStatsKeyFindRequest(
fcr, expCtx, true, LiteralSerializationPolicy::kToDebugTypeString);
ASSERT_BSONOBJ_EQ_AUTO( // NOLINT
R"({
@@ -676,12 +676,12 @@ TEST_F(TelemetryStoreTest, CorrectlyRedactsHintsWithOptions) {
key);
}
-TEST_F(TelemetryStoreTest, DefinesLetVariables) {
+TEST_F(QueryStatsStoreTest, DefinesLetVariables) {
// Test that the expression context we use to apply hmac will understand the 'let' part of the
// find command while parsing the other pieces of the command.
// Note that this ExpressionContext will not have the let variables defined - we expect the
- // 'makeTelemetryKey' call to do that.
+ // 'makeQueryStatsKey' call to do that.
auto opCtx = makeOperationContext();
FindCommandRequest fcr(NamespaceStringOrUUID(NamespaceString("testDB.testColl")));
fcr.setLet(BSON("var" << 2));
@@ -690,13 +690,14 @@ TEST_F(TelemetryStoreTest, DefinesLetVariables) {
const auto cmdObj = fcr.toBSON(BSON("$db"
<< "testDB"));
- TelemetryEntry testMetrics{std::make_unique<telemetry::FindRequestShapifier>(fcr, opCtx.get()),
- fcr.getNamespaceOrUUID(),
- cmdObj};
+ QueryStatsEntry testMetrics{
+ std::make_unique<query_stats::FindRequestShapifier>(fcr, opCtx.get()),
+ fcr.getNamespaceOrUUID(),
+ cmdObj};
bool applyHmacToIdentifiers = false;
auto hmacApplied =
- testMetrics.computeTelemetryKey(opCtx.get(), applyHmacToIdentifiers, std::string{});
+ testMetrics.computeQueryStatsKey(opCtx.get(), applyHmacToIdentifiers, std::string{});
ASSERT_BSONOBJ_EQ_AUTO( // NOLINT
R"({
"queryShape": {
@@ -730,7 +731,7 @@ TEST_F(TelemetryStoreTest, DefinesLetVariables) {
// do the hashing, so we'll just stick with the big long strings here for now.
applyHmacToIdentifiers = true;
hmacApplied =
- testMetrics.computeTelemetryKey(opCtx.get(), applyHmacToIdentifiers, std::string{});
+ testMetrics.computeQueryStatsKey(opCtx.get(), applyHmacToIdentifiers, std::string{});
ASSERT_BSONOBJ_EQ_AUTO( // NOLINT
R"({
"queryShape": {
@@ -761,7 +762,7 @@ TEST_F(TelemetryStoreTest, DefinesLetVariables) {
hmacApplied);
}
-TEST_F(TelemetryStoreTest, CorrectlyRedactsAggregateCommandRequestAllFieldsSimplePipeline) {
+TEST_F(QueryStatsStoreTest, CorrectlyRedactsAggregateCommandRequestAllFieldsSimplePipeline) {
auto expCtx = make_intrusive<ExpressionContextForTest>();
AggregateCommandRequest acr(NamespaceString("testDB.testColl"));
auto matchStage = fromjson(R"({
@@ -790,7 +791,7 @@ TEST_F(TelemetryStoreTest, CorrectlyRedactsAggregateCommandRequestAllFieldsSimpl
opts.applyHmacToIdentifiers = false;
opts.identifierHmacPolicy = applyHmacForTest;
- auto shapified = aggShapifier.makeTelemetryKey(opts, expCtx);
+ auto shapified = aggShapifier.makeQueryStatsKey(opts, expCtx);
ASSERT_BSONOBJ_EQ_AUTO( // NOLINT
R"({
"queryShape": {
@@ -849,7 +850,7 @@ TEST_F(TelemetryStoreTest, CorrectlyRedactsAggregateCommandRequestAllFieldsSimpl
opts.replacementForLiteralArgs = "?";
opts.literalPolicy = LiteralSerializationPolicy::kToDebugTypeString;
opts.applyHmacToIdentifiers = true;
- shapified = aggShapifier.makeTelemetryKey(opts, expCtx);
+ shapified = aggShapifier.makeQueryStatsKey(opts, expCtx);
ASSERT_BSONOBJ_EQ_AUTO( // NOLINT
R"({
"queryShape": {
@@ -911,7 +912,7 @@ TEST_F(TelemetryStoreTest, CorrectlyRedactsAggregateCommandRequestAllFieldsSimpl
acr.setHint(BSON("z" << 1 << "c" << 1));
acr.setCollation(BSON("locale"
<< "simple"));
- shapified = aggShapifier.makeTelemetryKey(opts, expCtx);
+ shapified = aggShapifier.makeQueryStatsKey(opts, expCtx);
ASSERT_BSONOBJ_EQ_AUTO( // NOLINT
R"({
"queryShape": {
@@ -981,7 +982,7 @@ TEST_F(TelemetryStoreTest, CorrectlyRedactsAggregateCommandRequestAllFieldsSimpl
<< "$foo"
<< "var2"
<< "bar"));
- shapified = aggShapifier.makeTelemetryKey(opts, expCtx);
+ shapified = aggShapifier.makeQueryStatsKey(opts, expCtx);
ASSERT_BSONOBJ_EQ_AUTO( // NOLINT
R"({
"queryShape": {
@@ -1058,7 +1059,7 @@ TEST_F(TelemetryStoreTest, CorrectlyRedactsAggregateCommandRequestAllFieldsSimpl
acr.setBypassDocumentValidation(true);
expCtx->opCtx->setComment(BSON("comment"
<< "note to self"));
- shapified = aggShapifier.makeTelemetryKey(opts, expCtx);
+ shapified = aggShapifier.makeQueryStatsKey(opts, expCtx);
ASSERT_BSONOBJ_EQ_AUTO( // NOLINT
R"({
"queryShape": {
@@ -1132,7 +1133,7 @@ TEST_F(TelemetryStoreTest, CorrectlyRedactsAggregateCommandRequestAllFieldsSimpl
})",
shapified);
}
-TEST_F(TelemetryStoreTest, CorrectlyRedactsAggregateCommandRequestEmptyFields) {
+TEST_F(QueryStatsStoreTest, CorrectlyRedactsAggregateCommandRequestEmptyFields) {
auto expCtx = make_intrusive<ExpressionContextForTest>();
AggregateCommandRequest acr(NamespaceString("testDB.testColl"));
acr.setPipeline({});
@@ -1146,7 +1147,7 @@ TEST_F(TelemetryStoreTest, CorrectlyRedactsAggregateCommandRequestEmptyFields) {
opts.applyHmacToIdentifiers = true;
opts.identifierHmacPolicy = applyHmacForTest;
- auto shapified = aggShapifier.makeTelemetryKey(opts, expCtx);
+ auto shapified = aggShapifier.makeQueryStatsKey(opts, expCtx);
ASSERT_BSONOBJ_EQ_AUTO( // NOLINT
R"({
"queryShape": {
@@ -1160,4 +1161,4 @@ TEST_F(TelemetryStoreTest, CorrectlyRedactsAggregateCommandRequestEmptyFields) {
})",
shapified); // NOLINT (test auto-update)
}
-} // namespace mongo::telemetry
+} // namespace mongo::query_stats
diff --git a/src/mongo/db/query/telemetry_util.cpp b/src/mongo/db/query/query_stats_util.cpp
index eeaf7da71e6..4c102d983dc 100644
--- a/src/mongo/db/query/telemetry_util.cpp
+++ b/src/mongo/db/query/query_stats_util.cpp
@@ -27,7 +27,7 @@
* it in the license file.
*/
-#include "mongo/db/query/telemetry_util.h"
+#include "mongo/db/query/query_stats_util.h"
#include "mongo/base/status.h"
#include "mongo/db/concurrency/d_concurrency.h"
@@ -40,25 +40,25 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery
-namespace mongo::telemetry_util {
+namespace mongo::query_stats_util {
namespace {
/**
* Given the current 'Client', returns a pointer to the 'ServiceContext' and an interface for
- * updating the telemetry store.
+ * updating the queryStats store.
*/
std::pair<ServiceContext*, OnParamChangeUpdater*> getUpdater(const Client& client) {
auto serviceCtx = client.getServiceContext();
tassert(7106500, "ServiceContext must be non null", serviceCtx);
- auto updater = telemetryStoreOnParamChangeUpdater(serviceCtx).get();
+ auto updater = queryStatsStoreOnParamChangeUpdater(serviceCtx).get();
tassert(7106501, "Telemetry store size updater must be non null", updater);
return {serviceCtx, updater};
}
} // namespace
-Status onTelemetryStoreSizeUpdate(const std::string& str) {
+Status onQueryStatsStoreSizeUpdate(const std::string& str) {
auto newSize = memory_util::MemorySize::parse(str);
if (!newSize.isOK()) {
return newSize.getStatus();
@@ -75,11 +75,11 @@ Status onTelemetryStoreSizeUpdate(const std::string& str) {
return Status::OK();
}
-Status validateTelemetryStoreSize(const std::string& str, const boost::optional<TenantId>&) {
+Status validateQueryStatsStoreSize(const std::string& str, const boost::optional<TenantId>&) {
return memory_util::MemorySize::parse(str).getStatus();
}
-Status onTelemetrySamplingRateUpdate(int samplingRate) {
+Status onQueryStatsSamplingRateUpdate(int samplingRate) {
// The client is nullptr if the parameter is supplied from the command line. In this case, we
// ignore the update event, the parameter will be processed when initializing the service
// context.
@@ -92,6 +92,6 @@ Status onTelemetrySamplingRateUpdate(int samplingRate) {
}
const Decorable<ServiceContext>::Decoration<std::unique_ptr<OnParamChangeUpdater>>
- telemetryStoreOnParamChangeUpdater =
+ queryStatsStoreOnParamChangeUpdater =
ServiceContext::declareDecoration<std::unique_ptr<OnParamChangeUpdater>>();
-} // namespace mongo::telemetry_util
+} // namespace mongo::query_stats_util
diff --git a/src/mongo/db/query/telemetry_util.h b/src/mongo/db/query/query_stats_util.h
index c8fc37dc5c4..ebd8f1e2fbd 100644
--- a/src/mongo/db/query/telemetry_util.h
+++ b/src/mongo/db/query/query_stats_util.h
@@ -35,52 +35,52 @@
#include "mongo/db/query/util/memory_util.h"
-namespace mongo::telemetry_util {
+namespace mongo::query_stats_util {
-Status onTelemetryStoreSizeUpdate(const std::string& str);
+Status onQueryStatsStoreSizeUpdate(const std::string& str);
-Status validateTelemetryStoreSize(const std::string& str, const boost::optional<TenantId>&);
+Status validateQueryStatsStoreSize(const std::string& str, const boost::optional<TenantId>&);
-Status onTelemetrySamplingRateUpdate(int samplingRate);
+Status onQueryStatsSamplingRateUpdate(int samplingRate);
/**
- * An interface used to modify the telemetry store when query setParameters are modified. This is
+ * An interface used to modify the queryStats store when query setParameters are modified. This is
* done via an interface decorating the 'ServiceContext' in order to avoid a link-time dependency
- * of the query knobs library on the telemetry code.
+ * of the query knobs library on the queryStats code.
*/
class OnParamChangeUpdater {
public:
virtual ~OnParamChangeUpdater() = default;
/**
- * Resizes the telemetry store decorating 'serviceCtx' to the new size given by 'memSize'. If
+ * Resizes the queryStats store decorating 'serviceCtx' to the new size given by 'memSize'. If
* the new size is smaller than the old, cache entries are evicted in order to ensure the
* cache fits within the new size bound.
*/
virtual void updateCacheSize(ServiceContext* serviceCtx, memory_util::MemorySize memSize) = 0;
/**
- * Updates the sampling rate for the telemetry rate limiter.
+ * Updates the sampling rate for the queryStats rate limiter.
*/
virtual void updateSamplingRate(ServiceContext* serviceCtx, int samplingRate) = 0;
};
/**
- * A stub implementation that does not allow changing any parameters - to be used if the telemetry
+ * A stub implementation that does not allow changing any parameters - to be used if the queryStats
* store is disabled and cannot be re-enabled without restarting, as with a feature flag.
*/
class NoChangesAllowedTelemetryParamUpdater : public OnParamChangeUpdater {
public:
void updateCacheSize(ServiceContext* serviceCtx, memory_util::MemorySize memSize) final {
uasserted(7373500,
- "Cannot configure telemetry store - it is currently disabled and a restart is "
+ "Cannot configure queryStats store - it is currently disabled and a restart is "
"required to activate.");
}
void updateSamplingRate(ServiceContext* serviceCtx, int samplingRate) {
uasserted(7506200,
- "Cannot configure telemetry store - it is currently disabled and a restart is "
+ "Cannot configure queryStats store - it is currently disabled and a restart is "
"required to activate.");
}
};
@@ -89,5 +89,5 @@ public:
* Decorated accessor to the 'OnParamChangeUpdater' stored in 'ServiceContext'.
*/
extern const Decorable<ServiceContext>::Decoration<std::unique_ptr<OnParamChangeUpdater>>
- telemetryStoreOnParamChangeUpdater;
-} // namespace mongo::telemetry_util
+ queryStatsStoreOnParamChangeUpdater;
+} // namespace mongo::query_stats_util
diff --git a/src/mongo/db/query/request_shapifier.h b/src/mongo/db/query/request_shapifier.h
index 1bae8f913f9..37004197fd0 100644
--- a/src/mongo/db/query/request_shapifier.h
+++ b/src/mongo/db/query/request_shapifier.h
@@ -34,27 +34,27 @@
#include "mongo/db/query/serialization_options.h"
#include "mongo/rpc/metadata/client_metadata.h"
-namespace mongo::telemetry {
+namespace mongo::query_stats {
/**
- * An abstract base class to handle query shapification for telemetry. Each request type should
- * define its own shapification strategy in its implementation of makeTelemetryKey(), and then a
- * request should be registered with telemetry via telemetry::registerRequest(RequestShapifier).
+ * An abstract base class to handle query shapification for queryStats. Each request type should
+ * define its own shapification strategy in its implementation of makeQueryStatsKey(), and then a
+ * request should be registered with queryStats via query_stats::registerRequest(RequestShapifier).
*/
class RequestShapifier {
public:
virtual ~RequestShapifier() = default;
/**
- * makeTelemetryKey generates the telemetry key representative of the specific request's
+ * makeQueryStatsKey generates the telemetry key representative of the specific request's
* payload. If there exists an ExpressionContext set up to parse and evaluate the request,
- * makeTelemetryKey should be called with that ExpressionContext. If not, you can call the
+ * makeQueryStatsKey should be called with that ExpressionContext. If not, you can call the
* overload that accepts the OperationContext and will construct a minimally-acceptable
* ExpressionContext for the sake of generating the key.
*/
- virtual BSONObj makeTelemetryKey(const SerializationOptions& opts,
- OperationContext* opCtx) const = 0;
- virtual BSONObj makeTelemetryKey(
+ virtual BSONObj makeQueryStatsKey(const SerializationOptions& opts,
+ OperationContext* opCtx) const = 0;
+ virtual BSONObj makeQueryStatsKey(
const SerializationOptions& opts,
const boost::intrusive_ptr<ExpressionContext>& expCtx) const = 0;
@@ -79,4 +79,4 @@ protected:
BSONObj _commentObj;
boost::optional<BSONElement> _comment = boost::none;
};
-} // namespace mongo::telemetry
+} // namespace mongo::query_stats
diff --git a/src/mongo/s/commands/cluster_find_cmd.h b/src/mongo/s/commands/cluster_find_cmd.h
index 942e0893434..6ab7d513d86 100644
--- a/src/mongo/s/commands/cluster_find_cmd.h
+++ b/src/mongo/s/commands/cluster_find_cmd.h
@@ -39,7 +39,7 @@
#include "mongo/db/matcher/extensions_callback_noop.h"
#include "mongo/db/query/cursor_response.h"
#include "mongo/db/query/find_request_shapifier.h"
-#include "mongo/db/query/telemetry.h"
+#include "mongo/db/query/query_stats.h"
#include "mongo/db/stats/counters.h"
#include "mongo/db/views/resolved_view.h"
#include "mongo/rpc/get_status_from_command_result.h"
@@ -225,11 +225,11 @@ public:
MatchExpressionParser::kAllowAllSpecialFeatures));
if (!_didDoFLERewrite) {
- telemetry::registerRequest(std::make_unique<telemetry::FindRequestShapifier>(
- cq->getFindCommandRequest(), opCtx),
- cq->nss(),
- opCtx,
- cq->getExpCtx());
+ query_stats::registerRequest(std::make_unique<query_stats::FindRequestShapifier>(
+ cq->getFindCommandRequest(), opCtx),
+ cq->nss(),
+ opCtx,
+ cq->getExpCtx());
}
try {
diff --git a/src/mongo/s/query/cluster_aggregate.cpp b/src/mongo/s/query/cluster_aggregate.cpp
index 6c9351efe57..9fd49e2e004 100644
--- a/src/mongo/s/query/cluster_aggregate.cpp
+++ b/src/mongo/s/query/cluster_aggregate.cpp
@@ -56,7 +56,7 @@
#include "mongo/db/query/explain_common.h"
#include "mongo/db/query/find_common.h"
#include "mongo/db/query/fle/server_rewrite.h"
-#include "mongo/db/query/telemetry.h"
+#include "mongo/db/query/query_stats.h"
#include "mongo/db/timeseries/timeseries_gen.h"
#include "mongo/db/timeseries/timeseries_options.h"
#include "mongo/db/views/resolved_view.h"
@@ -324,7 +324,7 @@ Status ClusterAggregate::runAggregate(OperationContext* opCtx,
auto startsWithDocuments = liteParsedPipeline.startsWithDocuments();
if (!shouldDoFLERewrite) {
- telemetry::registerAggRequest(request, opCtx);
+ query_stats::registerAggRequest(request, opCtx);
}
// If the routing table is not already taken by the higher level, fill it now.
diff --git a/src/mongo/s/query/cluster_aggregation_planner.cpp b/src/mongo/s/query/cluster_aggregation_planner.cpp
index 5aa643c0a85..8f2c6fcdb19 100644
--- a/src/mongo/s/query/cluster_aggregation_planner.cpp
+++ b/src/mongo/s/query/cluster_aggregation_planner.cpp
@@ -360,16 +360,16 @@ BSONObj establishMergingMongosCursor(OperationContext* opCtx,
int nShards = ccc->getNumRemotes();
auto&& opDebug = CurOp::get(opCtx)->debug();
- // Fill out the aggregation metrics in CurOp, and record telemetry metrics, before detaching the
- // cursor from its opCtx.
+ // Fill out the aggregation metrics in CurOp, and record queryStats metrics, before detaching
+ // the cursor from its opCtx.
opDebug.nShards = std::max(opDebug.nShards, nShards);
opDebug.cursorExhausted = exhausted;
opDebug.additiveMetrics.nBatches = 1;
CurOp::get(opCtx)->setEndOfOpMetrics(responseBuilder.numDocs());
if (exhausted) {
- collectTelemetryMongos(opCtx, ccc->getRequestShapifier());
+ collectQueryStatsMongos(opCtx, ccc->getRequestShapifier());
} else {
- collectTelemetryMongos(opCtx, ccc);
+ collectQueryStatsMongos(opCtx, ccc);
}
ccc->detachFromOperationContext();
diff --git a/src/mongo/s/query/cluster_client_cursor.h b/src/mongo/s/query/cluster_client_cursor.h
index 1f0d9be54a7..008bacd5ef6 100644
--- a/src/mongo/s/query/cluster_client_cursor.h
+++ b/src/mongo/s/query/cluster_client_cursor.h
@@ -270,11 +270,11 @@ public:
* Returns and releases ownership of the RequestShapifier associated with the request this
* cursor is handling.
*/
- virtual std::unique_ptr<telemetry::RequestShapifier> getRequestShapifier() = 0;
+ virtual std::unique_ptr<query_stats::RequestShapifier> getRequestShapifier() = 0;
protected:
// Metrics that are accumulated over the lifetime of the cursor, incremented with each getMore.
- // Useful for diagnostics like telemetry.
+ // Useful for diagnostics like queryStats.
OpDebug::AdditiveMetrics _metrics;
private:
diff --git a/src/mongo/s/query/cluster_client_cursor_impl.cpp b/src/mongo/s/query/cluster_client_cursor_impl.cpp
index 939637d0f32..9da06d36881 100644
--- a/src/mongo/s/query/cluster_client_cursor_impl.cpp
+++ b/src/mongo/s/query/cluster_client_cursor_impl.cpp
@@ -32,7 +32,7 @@
#include <memory>
#include "mongo/db/curop.h"
-#include "mongo/db/query/telemetry.h"
+#include "mongo/db/query/query_stats.h"
#include "mongo/logv2/log.h"
#include "mongo/s/query/router_stage_limit.h"
#include "mongo/s/query/router_stage_merge.h"
@@ -75,9 +75,10 @@ ClusterClientCursorImpl::ClusterClientCursorImpl(OperationContext* opCtx,
_lastUseDate(_createdDate),
_queryHash(CurOp::get(opCtx)->debug().queryHash),
_shouldOmitDiagnosticInformation(CurOp::get(opCtx)->debug().shouldOmitDiagnosticInformation),
- _telemetryStoreKeyHash(CurOp::get(opCtx)->debug().telemetryStoreKeyHash),
- _telemetryStoreKey(CurOp::get(opCtx)->debug().telemetryStoreKey),
- _telemetryRequestShapifier(std::move(CurOp::get(opCtx)->debug().telemetryRequestShapifier)) {
+ _queryStatsStoreKeyHash(CurOp::get(opCtx)->debug().queryStatsStoreKeyHash),
+ _queryStatsStoreKey(CurOp::get(opCtx)->debug().queryStatsStoreKey),
+ _queryStatsRequestShapifier(
+ std::move(CurOp::get(opCtx)->debug().queryStatsRequestShapifier)) {
dassert(!_params.compareWholeSortKeyOnRouter ||
SimpleBSONObjComparator::kInstance.evaluate(
_params.sortToApplyOnRouter == AsyncResultsMerger::kWholeSortKeySortPattern));
@@ -137,13 +138,13 @@ void ClusterClientCursorImpl::kill(OperationContext* opCtx) {
"Cannot kill a cluster client cursor that has already been killed",
!_hasBeenKilled);
- if (_telemetryStoreKeyHash && opCtx) {
- telemetry::writeTelemetry(opCtx,
- _telemetryStoreKeyHash,
- _telemetryStoreKey,
- std::move(_telemetryRequestShapifier),
- _metrics.executionTime.value_or(Microseconds{0}).count(),
- _metrics.nreturned.value_or(0));
+ if (_queryStatsStoreKeyHash && opCtx) {
+ query_stats::writeQueryStats(opCtx,
+ _queryStatsStoreKeyHash,
+ _queryStatsStoreKey,
+ std::move(_queryStatsRequestShapifier),
+ _metrics.executionTime.value_or(Microseconds{0}).count(),
+ _metrics.nreturned.value_or(0));
}
_root->kill(opCtx);
@@ -285,8 +286,8 @@ bool ClusterClientCursorImpl::shouldOmitDiagnosticInformation() const {
return _shouldOmitDiagnosticInformation;
}
-std::unique_ptr<telemetry::RequestShapifier> ClusterClientCursorImpl::getRequestShapifier() {
- return std::move(_telemetryRequestShapifier);
+std::unique_ptr<query_stats::RequestShapifier> ClusterClientCursorImpl::getRequestShapifier() {
+ return std::move(_queryStatsRequestShapifier);
}
} // namespace mongo
diff --git a/src/mongo/s/query/cluster_client_cursor_impl.h b/src/mongo/s/query/cluster_client_cursor_impl.h
index ecb7535715c..9d9168d6afb 100644
--- a/src/mongo/s/query/cluster_client_cursor_impl.h
+++ b/src/mongo/s/query/cluster_client_cursor_impl.h
@@ -121,7 +121,7 @@ public:
bool shouldOmitDiagnosticInformation() const final;
- std::unique_ptr<telemetry::RequestShapifier> getRequestShapifier() final;
+ std::unique_ptr<query_stats::RequestShapifier> getRequestShapifier() final;
public:
/**
@@ -186,12 +186,12 @@ private:
bool _shouldOmitDiagnosticInformation = false;
// If boost::none, telemetry should not be collected for this cursor.
- boost::optional<std::size_t> _telemetryStoreKeyHash;
- // TODO: SERVER-73152 remove telemetryStoreKey when RequestShapifier is used for agg.
- boost::optional<BSONObj> _telemetryStoreKey;
+ boost::optional<std::size_t> _queryStatsStoreKeyHash;
+ // TODO: SERVER-73152 remove queryStatsStoreKey when RequestShapifier is used for agg.
+ boost::optional<BSONObj> _queryStatsStoreKey;
// The RequestShapifier used by telemetry to shapify the request payload into the telemetry
// store key.
- std::unique_ptr<telemetry::RequestShapifier> _telemetryRequestShapifier;
+ std::unique_ptr<query_stats::RequestShapifier> _queryStatsRequestShapifier;
// Tracks if kill() has been called on the cursor. Multiple calls to kill() is an error.
bool _hasBeenKilled = false;
diff --git a/src/mongo/s/query/cluster_client_cursor_mock.cpp b/src/mongo/s/query/cluster_client_cursor_mock.cpp
index e495227b704..1e8b3561f5c 100644
--- a/src/mongo/s/query/cluster_client_cursor_mock.cpp
+++ b/src/mongo/s/query/cluster_client_cursor_mock.cpp
@@ -170,7 +170,7 @@ bool ClusterClientCursorMock::shouldOmitDiagnosticInformation() const {
return false;
}
-std::unique_ptr<telemetry::RequestShapifier> ClusterClientCursorMock::getRequestShapifier() {
+std::unique_ptr<query_stats::RequestShapifier> ClusterClientCursorMock::getRequestShapifier() {
return nullptr;
}
diff --git a/src/mongo/s/query/cluster_client_cursor_mock.h b/src/mongo/s/query/cluster_client_cursor_mock.h
index 131ca234287..750a67abdde 100644
--- a/src/mongo/s/query/cluster_client_cursor_mock.h
+++ b/src/mongo/s/query/cluster_client_cursor_mock.h
@@ -121,7 +121,7 @@ public:
bool shouldOmitDiagnosticInformation() const final;
- std::unique_ptr<telemetry::RequestShapifier> getRequestShapifier() final;
+ std::unique_ptr<query_stats::RequestShapifier> getRequestShapifier() final;
private:
bool _killed = false;
diff --git a/src/mongo/s/query/cluster_cursor_manager.cpp b/src/mongo/s/query/cluster_cursor_manager.cpp
index d8e47e55ecf..68436d25c6e 100644
--- a/src/mongo/s/query/cluster_cursor_manager.cpp
+++ b/src/mongo/s/query/cluster_cursor_manager.cpp
@@ -38,7 +38,7 @@
#include "mongo/db/allocate_cursor_id.h"
#include "mongo/db/curop.h"
#include "mongo/db/query/query_knobs_gen.h"
-#include "mongo/db/query/telemetry.h"
+#include "mongo/db/query/query_stats.h"
#include "mongo/db/session/kill_sessions_common.h"
#include "mongo/db/session/logical_session_cache.h"
#include "mongo/logv2/log.h"
@@ -591,25 +591,25 @@ StatusWith<ClusterClientCursorGuard> ClusterCursorManager::_detachCursor(WithLoc
return std::move(cursor);
}
-void collectTelemetryMongos(OperationContext* opCtx,
- std::unique_ptr<telemetry::RequestShapifier> requestShapifier) {
+void collectQueryStatsMongos(OperationContext* opCtx,
+ std::unique_ptr<query_stats::RequestShapifier> requestShapifier) {
// If we haven't registered a cursor to prepare for getMore requests, we record
- // telemetry directly.
+ // queryStats directly.
auto&& opDebug = CurOp::get(opCtx)->debug();
- telemetry::writeTelemetry(
+ query_stats::writeQueryStats(
opCtx,
- opDebug.telemetryStoreKeyHash,
- opDebug.telemetryStoreKey,
+ opDebug.queryStatsStoreKeyHash,
+ opDebug.queryStatsStoreKey,
std::move(requestShapifier),
opDebug.additiveMetrics.executionTime.value_or(Microseconds{0}).count(),
opDebug.additiveMetrics.nreturned.value_or(0));
}
-void collectTelemetryMongos(OperationContext* opCtx, ClusterClientCursorGuard& cursor) {
+void collectQueryStatsMongos(OperationContext* opCtx, ClusterClientCursorGuard& cursor) {
cursor->incrementCursorMetrics(CurOp::get(opCtx)->debug().additiveMetrics);
}
-void collectTelemetryMongos(OperationContext* opCtx, ClusterCursorManager::PinnedCursor& cursor) {
+void collectQueryStatsMongos(OperationContext* opCtx, ClusterCursorManager::PinnedCursor& cursor) {
cursor->incrementCursorMetrics(CurOp::get(opCtx)->debug().additiveMetrics);
}
diff --git a/src/mongo/s/query/cluster_cursor_manager.h b/src/mongo/s/query/cluster_cursor_manager.h
index 219dd773f82..b10824baf09 100644
--- a/src/mongo/s/query/cluster_cursor_manager.h
+++ b/src/mongo/s/query/cluster_cursor_manager.h
@@ -600,7 +600,7 @@ private:
};
/**
- * Record metrics for the current operation on opDebug and aggregates those metrics for telemetry
+ * Record metrics for the current operation on opDebug and aggregates those metrics for queryStats
* use. If a cursor is provided (via ClusterClientCursorGuard or
* ClusterCursorManager::PinnedCursor), metrics are aggregated on the cursor; otherwise, metrics are
* written directly to the telemetry store.
@@ -610,9 +610,9 @@ private:
* Currently, telemetry is only collected for find and aggregate requests (and their subsequent
* getMore requests), so these should only be called from those request paths.
*/
-void collectTelemetryMongos(OperationContext* opCtx,
- std::unique_ptr<telemetry::RequestShapifier> requestShapifier);
-void collectTelemetryMongos(OperationContext* opCtx, ClusterClientCursorGuard& cursor);
-void collectTelemetryMongos(OperationContext* opCtx, ClusterCursorManager::PinnedCursor& cursor);
+void collectQueryStatsMongos(OperationContext* opCtx,
+ std::unique_ptr<query_stats::RequestShapifier> requestShapifier);
+void collectQueryStatsMongos(OperationContext* opCtx, ClusterClientCursorGuard& cursor);
+void collectQueryStatsMongos(OperationContext* opCtx, ClusterCursorManager::PinnedCursor& cursor);
} // namespace mongo
diff --git a/src/mongo/s/query/cluster_find.cpp b/src/mongo/s/query/cluster_find.cpp
index 5b340ec098a..d0bd48a0d51 100644
--- a/src/mongo/s/query/cluster_find.cpp
+++ b/src/mongo/s/query/cluster_find.cpp
@@ -48,7 +48,7 @@
#include "mongo/db/query/find_common.h"
#include "mongo/db/query/getmore_command_gen.h"
#include "mongo/db/query/query_planner_common.h"
-#include "mongo/db/query/telemetry.h"
+#include "mongo/db/query/query_stats.h"
#include "mongo/executor/task_executor_pool.h"
#include "mongo/logv2/log.h"
#include "mongo/platform/overflow_arithmetic.h"
@@ -444,7 +444,7 @@ CursorId runQueryWithoutRetrying(OperationContext* opCtx,
if (shardIds.size() > 0) {
updateNumHostsTargetedMetrics(opCtx, cm, shardIds.size());
}
- collectTelemetryMongos(opCtx, ccc->getRequestShapifier());
+ collectQueryStatsMongos(opCtx, ccc->getRequestShapifier());
return CursorId(0);
}
@@ -455,7 +455,7 @@ CursorId runQueryWithoutRetrying(OperationContext* opCtx,
? ClusterCursorManager::CursorLifetime::Immortal
: ClusterCursorManager::CursorLifetime::Mortal;
auto authUser = AuthorizationSession::get(opCtx->getClient())->getAuthenticatedUserName();
- collectTelemetryMongos(opCtx, ccc);
+ collectQueryStatsMongos(opCtx, ccc);
auto cursorId = uassertStatusOK(cursorManager->registerCursor(
opCtx, ccc.releaseCursor(), query.nss(), cursorType, cursorLifetime, authUser));
@@ -923,7 +923,7 @@ StatusWith<CursorResponse> ClusterFind::runGetMore(OperationContext* opCtx,
const bool partialResultsReturned = pinnedCursor.getValue()->partialResultsReturned();
pinnedCursor.getValue()->setLeftoverMaxTimeMicros(opCtx->getRemainingMaxTimeMicros());
- collectTelemetryMongos(opCtx, pinnedCursor.getValue());
+ collectQueryStatsMongos(opCtx, pinnedCursor.getValue());
// Upon successful completion, transfer ownership of the cursor back to the cursor manager. If
// the cursor has been exhausted, the cursor manager will clean it up for us.
diff --git a/src/mongo/s/query/store_possible_cursor.cpp b/src/mongo/s/query/store_possible_cursor.cpp
index 38cec4024ed..a5c6759f4d1 100644
--- a/src/mongo/s/query/store_possible_cursor.cpp
+++ b/src/mongo/s/query/store_possible_cursor.cpp
@@ -98,7 +98,7 @@ StatusWith<BSONObj> storePossibleCursor(OperationContext* opCtx,
if (incomingCursorResponse.getValue().getCursorId() == CursorId(0)) {
opDebug.cursorExhausted = true;
- collectTelemetryMongos(opCtx, std::move(opDebug.telemetryRequestShapifier));
+ collectQueryStatsMongos(opCtx, std::move(opDebug.queryStatsRequestShapifier));
return cmdResult;
}
@@ -130,7 +130,7 @@ StatusWith<BSONObj> storePossibleCursor(OperationContext* opCtx,
}
auto ccc = ClusterClientCursorImpl::make(opCtx, std::move(executor), std::move(params));
- collectTelemetryMongos(opCtx, ccc);
+ collectQueryStatsMongos(opCtx, ccc);
// We don't expect to use this cursor until a subsequent getMore, so detach from the current
// OperationContext until then.
ccc->detachFromOperationContext();