summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorliubov.molchanova <liubov.molchanova@mongodb.com>2023-05-17 08:16:48 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2023-05-17 10:20:42 +0000
commit87160f876c6fb94f5d03062b2caee57539ec5d8e (patch)
treeb78d9ca9e853a236e0a2f6432a6ac02dc50119cd
parent1c390a0c50104a04cbd8ecbefb99eaf22e1bc914 (diff)
downloadmongo-87160f876c6fb94f5d03062b2caee57539ec5d8e.tar.gz
Revert "SERVER-76427: Rename $telemetry to $queryStats"
This reverts commit d646e44b7801a3e5b3230bbae7dcfe05a5ed8707.
-rw-r--r--buildscripts/resmokeconfig/suites/telemetry_passthrough.yml2
-rw-r--r--jstests/auth/lib/commands_lib.js6
-rw-r--r--jstests/libs/telemetry_utils.js6
-rw-r--r--jstests/noPassthrough/telemetry/application_name_find.js (renamed from jstests/noPassthrough/queryStats/application_name_find.js)4
-rw-r--r--jstests/noPassthrough/telemetry/clear_telemetry_store.js (renamed from jstests/noPassthrough/queryStats/clear_query_stats_store.js)23
-rw-r--r--jstests/noPassthrough/telemetry/documentSourceTelemetry_redaction_parameters.js (renamed from jstests/noPassthrough/queryStats/documentSourceQueryStats_redaction_parameters.js)28
-rw-r--r--jstests/noPassthrough/telemetry/feature_flag_off_sampling_rate_on.js (renamed from jstests/noPassthrough/queryStats/feature_flag_off_sampling_rate_on.js)8
-rw-r--r--jstests/noPassthrough/telemetry/query_stats_key.js (renamed from jstests/noPassthrough/queryStats/query_stats_key.js)2
-rw-r--r--jstests/noPassthrough/telemetry/redact_queries_with_nonobject_fields.js (renamed from jstests/noPassthrough/queryStats/redact_queries_with_nonobject_fields.js)4
-rw-r--r--jstests/noPassthrough/telemetry/telemetry_collect_on_mongos.js (renamed from jstests/noPassthrough/queryStats/query_stats_collect_on_mongos.js)10
-rw-r--r--jstests/noPassthrough/telemetry/telemetry_feature_flag.js (renamed from jstests/noPassthrough/queryStats/query_stats_feature_flag.js)8
-rw-r--r--jstests/noPassthrough/telemetry/telemetry_metrics_across_getMore_calls.js (renamed from jstests/noPassthrough/queryStats/query_stats_metrics_across_getMore_calls.js)16
-rw-r--r--jstests/noPassthrough/telemetry/telemetry_redact_find_cmd.js (renamed from jstests/noPassthrough/queryStats/query_stats_redact_find_cmd.js)10
-rw-r--r--jstests/noPassthrough/telemetry/telemetry_sampling_rate.js (renamed from jstests/noPassthrough/queryStats/query_stats_sampling_rate.js)12
-rw-r--r--jstests/noPassthrough/telemetry/telemetry_server_status_metrics.js (renamed from jstests/noPassthrough/queryStats/query_stats_server_status_metrics.js)48
-rw-r--r--jstests/noPassthrough/telemetry/telemetry_upgrade.js (renamed from jstests/noPassthrough/queryStats/query_stats_upgrade.js)8
-rw-r--r--jstests/noPassthroughWithMongod/telemetry_configuration.js5
-rw-r--r--src/mongo/db/auth/action_type.idl2
-rw-r--r--src/mongo/db/auth/builtin_roles.yml2
-rw-r--r--src/mongo/db/clientcursor.cpp36
-rw-r--r--src/mongo/db/clientcursor.h14
-rw-r--r--src/mongo/db/commands/find_cmd.cpp21
-rw-r--r--src/mongo/db/commands/getmore_cmd.cpp2
-rw-r--r--src/mongo/db/commands/run_aggregate.cpp18
-rw-r--r--src/mongo/db/curop.h8
-rw-r--r--src/mongo/db/cursor_manager.cpp8
-rw-r--r--src/mongo/db/pipeline/SConscript4
-rw-r--r--src/mongo/db/pipeline/abt/document_source_visitor.cpp2
-rw-r--r--src/mongo/db/pipeline/aggregate_command.idl6
-rw-r--r--src/mongo/db/pipeline/aggregate_request_shapifier.cpp12
-rw-r--r--src/mongo/db/pipeline/aggregate_request_shapifier.h11
-rw-r--r--src/mongo/db/pipeline/document_source_telemetry.cpp (renamed from src/mongo/db/pipeline/document_source_query_stats.cpp)56
-rw-r--r--src/mongo/db/pipeline/document_source_telemetry.h (renamed from src/mongo/db/pipeline/document_source_query_stats.h)24
-rw-r--r--src/mongo/db/pipeline/document_source_telemetry_test.cpp (renamed from src/mongo/db/pipeline/document_source_query_stats_test.cpp)42
-rw-r--r--src/mongo/db/pipeline/visitors/document_source_visitor_registry_mongod.h4
-rw-r--r--src/mongo/db/query/SConscript6
-rw-r--r--src/mongo/db/query/cqf_command_utils.cpp2
-rw-r--r--src/mongo/db/query/find.cpp6
-rw-r--r--src/mongo/db/query/find_request_shapifier.cpp12
-rw-r--r--src/mongo/db/query/find_request_shapifier.h11
-rw-r--r--src/mongo/db/query/query_feature_flags.idl6
-rw-r--r--src/mongo/db/query/query_knobs.idl28
-rw-r--r--src/mongo/db/query/query_shape.cpp2
-rw-r--r--src/mongo/db/query/query_shape.h2
-rw-r--r--src/mongo/db/query/request_shapifier.h20
-rw-r--r--src/mongo/db/query/telemetry.cpp (renamed from src/mongo/db/query/query_stats.cpp)248
-rw-r--r--src/mongo/db/query/telemetry.h (renamed from src/mongo/db/query/query_stats.h)82
-rw-r--r--src/mongo/db/query/telemetry_store_test.cpp (renamed from src/mongo/db/query/query_stats_store_test.cpp)99
-rw-r--r--src/mongo/db/query/telemetry_util.cpp (renamed from src/mongo/db/query/query_stats_util.cpp)18
-rw-r--r--src/mongo/db/query/telemetry_util.h (renamed from src/mongo/db/query/query_stats_util.h)26
-rw-r--r--src/mongo/s/commands/cluster_find_cmd.h12
-rw-r--r--src/mongo/s/query/cluster_aggregate.cpp4
-rw-r--r--src/mongo/s/query/cluster_aggregation_planner.cpp8
-rw-r--r--src/mongo/s/query/cluster_client_cursor.h4
-rw-r--r--src/mongo/s/query/cluster_client_cursor_impl.cpp27
-rw-r--r--src/mongo/s/query/cluster_client_cursor_impl.h10
-rw-r--r--src/mongo/s/query/cluster_client_cursor_mock.cpp2
-rw-r--r--src/mongo/s/query/cluster_client_cursor_mock.h2
-rw-r--r--src/mongo/s/query/cluster_cursor_manager.cpp18
-rw-r--r--src/mongo/s/query/cluster_cursor_manager.h10
-rw-r--r--src/mongo/s/query/cluster_find.cpp8
-rw-r--r--src/mongo/s/query/store_possible_cursor.cpp4
62 files changed, 574 insertions, 575 deletions
diff --git a/buildscripts/resmokeconfig/suites/telemetry_passthrough.yml b/buildscripts/resmokeconfig/suites/telemetry_passthrough.yml
index 08fa435a07d..1aa2a490a5f 100644
--- a/buildscripts/resmokeconfig/suites/telemetry_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/telemetry_passthrough.yml
@@ -27,4 +27,4 @@ executor:
mongod_options:
set_parameters:
enableTestCommands: 1
- internalQueryStatsSamplingRate: -1
+ internalQueryConfigureTelemetrySamplingRate: -1
diff --git a/jstests/auth/lib/commands_lib.js b/jstests/auth/lib/commands_lib.js
index 1d60c7aa308..170223762b4 100644
--- a/jstests/auth/lib/commands_lib.js
+++ b/jstests/auth/lib/commands_lib.js
@@ -6629,12 +6629,12 @@ export const authCommandsLib = {
]
},
{
- // Test that only clusterManager has permission to run $queryStats
+ // Test that only clusterManager has permission to run $telemetry
testname: "testTelemetryReadPrivilege",
- command: {aggregate: 1, pipeline: [{$queryStats: {}}], cursor: {}},
+ command: {aggregate: 1, pipeline: [{$telemetry: {}}], cursor: {}},
skipSharded: false,
skipTest: (conn) => {
- return !TestData.setParameters.featureFlagQueryStats;
+ return !TestData.setParameters.featureFlagTelemetry;
},
testcases: [{runOnDb: adminDbName, roles: roles_clusterManager}]
},
diff --git a/jstests/libs/telemetry_utils.js b/jstests/libs/telemetry_utils.js
index 0bb9e90fb58..11e2d236827 100644
--- a/jstests/libs/telemetry_utils.js
+++ b/jstests/libs/telemetry_utils.js
@@ -45,7 +45,7 @@ function getTelemetry(conn) {
const result = conn.adminCommand({
aggregate: 1,
pipeline: [
- {$queryStats: {}},
+ {$telemetry: {}},
// Sort on telemetry key so entries are in a deterministic order.
{$sort: {key: 1}},
{$match: {"key.applicationName": kApplicationName}}
@@ -62,7 +62,7 @@ function getTelemetryRedacted(
hmacKey = BinData(0, "MjM0NTY3ODkxMDExMTIxMzE0MTUxNjE3MTgxOTIwMjE=")) {
// Hashed application name is generated using the default hmacKey argument.
const kApplicationName = "MongoDB Shell";
- // Filter out agg queries, including $queryStats.
+ // Filter out agg queries, including $telemetry.
const match = {
$match: {"key.queryShape.command": "find", "key.applicationName": kApplicationName}
};
@@ -70,7 +70,7 @@ function getTelemetryRedacted(
const result = conn.adminCommand({
aggregate: 1,
pipeline: [
- {$queryStats: {applyHmacToIdentifiers: applyHmacToIdentifiers, hmacKey: hmacKey}},
+ {$telemetry: {applyHmacToIdentifiers: applyHmacToIdentifiers, hmacKey: hmacKey}},
match,
// Sort on telemetry key so entries are in a deterministic order.
{$sort: {key: 1}},
diff --git a/jstests/noPassthrough/queryStats/application_name_find.js b/jstests/noPassthrough/telemetry/application_name_find.js
index 36245a31514..35b86a95f53 100644
--- a/jstests/noPassthrough/queryStats/application_name_find.js
+++ b/jstests/noPassthrough/telemetry/application_name_find.js
@@ -1,6 +1,6 @@
/**
* Test that applicationName and namespace appear in telemetry for the find command.
- * @tags: [featureFlagQueryStats]
+ * @tags: [featureFlagTelemetry]
*/
load("jstests/libs/telemetry_utils.js");
(function() {
@@ -12,7 +12,7 @@ const kHashedFieldName = "lU7Z0mLRPRUL+RfAD5jhYPRRpXBsZBxS/20EzDwfOG4=";
// Turn on the collecting of telemetry metrics.
let options = {
- setParameter: {internalQueryStatsSamplingRate: -1},
+ setParameter: {internalQueryConfigureTelemetrySamplingRate: -1},
};
const conn = MongoRunner.runMongod(options);
diff --git a/jstests/noPassthrough/queryStats/clear_query_stats_store.js b/jstests/noPassthrough/telemetry/clear_telemetry_store.js
index 056c565ec02..b2409cc0bbb 100644
--- a/jstests/noPassthrough/queryStats/clear_query_stats_store.js
+++ b/jstests/noPassthrough/telemetry/clear_telemetry_store.js
@@ -1,6 +1,6 @@
/**
* Test that the telemetry store can be cleared when the cache size is reset to 0.
- * @tags: [featureFlagQueryStats]
+ * @tags: [featureFlagTelemetry]
*/
load("jstests/libs/telemetry_utils.js"); // For verifyMetrics.
@@ -9,8 +9,10 @@ load("jstests/libs/telemetry_utils.js"); // For verifyMetrics.
// Turn on the collecting of telemetry metrics.
let options = {
- setParameter:
- {internalQueryStatsSamplingRate: -1, internalQueryConfigureQueryStatsCacheSize: "10MB"},
+ setParameter: {
+ internalQueryConfigureTelemetrySamplingRate: -1,
+ internalQueryConfigureTelemetryCacheSize: "10MB"
+ },
};
const conn = MongoRunner.runMongod(options);
@@ -27,19 +29,18 @@ for (var j = 0; j < 10; ++j) {
}
// Confirm number of entries in the store and that none have been evicted.
-let telemetryResults = testDB.getSiblingDB("admin").aggregate([{$queryStats: {}}]).toArray();
+let telemetryResults = testDB.getSiblingDB("admin").aggregate([{$telemetry: {}}]).toArray();
assert.eq(telemetryResults.length, 10, telemetryResults);
-assert.eq(testDB.serverStatus().metrics.queryStats.numEvicted, 0);
+assert.eq(testDB.serverStatus().metrics.telemetry.numEvicted, 0);
// Command to clear the cache.
assert.commandWorked(
- testDB.adminCommand({setParameter: 1, internalQueryConfigureQueryStatsCacheSize: "0MB"}));
+ testDB.adminCommand({setParameter: 1, internalQueryConfigureTelemetryCacheSize: "0MB"}));
-// 10 regular queries plus the $queryStats query, means 11 entries evicted when the cache is
-// cleared.
-assert.eq(testDB.serverStatus().metrics.queryStats.numEvicted, 11);
+// 10 regular queries plus the $telemetry query, means 11 entries evicted when the cache is cleared.
+assert.eq(testDB.serverStatus().metrics.telemetry.numEvicted, 11);
-// Calling $queryStats should fail when the telemetry store size is 0 bytes.
-assert.throwsWithCode(() => testDB.getSiblingDB("admin").aggregate([{$queryStats: {}}]), 6579000);
+// Calling $telemetry should fail when the telemetry store size is 0 bytes.
+assert.throwsWithCode(() => testDB.getSiblingDB("admin").aggregate([{$telemetry: {}}]), 6579000);
MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/queryStats/documentSourceQueryStats_redaction_parameters.js b/jstests/noPassthrough/telemetry/documentSourceTelemetry_redaction_parameters.js
index 8facb106072..c4f785abf6a 100644
--- a/jstests/noPassthrough/queryStats/documentSourceQueryStats_redaction_parameters.js
+++ b/jstests/noPassthrough/telemetry/documentSourceTelemetry_redaction_parameters.js
@@ -1,6 +1,6 @@
/**
- * Test the $queryStats hmac properties.
- * @tags: [featureFlagQueryStats]
+ * Test the $telemetry hmac properties.
+ * @tags: [featureFlagTelemetry]
*/
load("jstests/aggregation/extras/utils.js"); // For assertAdminDBErrCodeAndErrMsgContains.
@@ -42,41 +42,41 @@ function runTest(conn) {
assertTelemetryKeyWithoutHmac(getTelemetryRedacted(conn, false)[0]["key"].queryShape);
// Wrong parameter name throws error.
- let pipeline = [{$queryStats: {redactFields: true}}];
+ let pipeline = [{$telemetry: {redactFields: true}}];
assertAdminDBErrCodeAndErrMsgContains(
coll,
pipeline,
ErrorCodes.FailedToParse,
- "$queryStats parameters object may only contain 'applyHmacToIdentifiers' or 'hmacKey' options. Found: redactFields");
+ "$telemetry parameters object may only contain 'applyHmacToIdentifiers' or 'hmacKey' options. Found: redactFields");
// Wrong parameter type throws error.
- pipeline = [{$queryStats: {applyHmacToIdentifiers: 1}}];
+ pipeline = [{$telemetry: {applyHmacToIdentifiers: 1}}];
assertAdminDBErrCodeAndErrMsgContains(
coll,
pipeline,
ErrorCodes.FailedToParse,
- "$queryStats applyHmacToIdentifiers parameter must be boolean. Found type: double");
+ "$telemetry applyHmacToIdentifiers parameter must be boolean. Found type: double");
- pipeline = [{$queryStats: {hmacKey: 1}}];
+ pipeline = [{$telemetry: {hmacKey: 1}}];
assertAdminDBErrCodeAndErrMsgContains(
coll,
pipeline,
ErrorCodes.FailedToParse,
- "$queryStats hmacKey parameter must be bindata of length 32 or greater. Found type: double");
+ "$telemetry hmacKey parameter must be bindata of length 32 or greater. Found type: double");
// Parameter object with unrecognized key throws error.
- pipeline = [{$queryStats: {applyHmacToIdentifiers: true, hmacStrategy: "on"}}];
+ pipeline = [{$telemetry: {applyHmacToIdentifiers: true, hmacStrategy: "on"}}];
assertAdminDBErrCodeAndErrMsgContains(
coll,
pipeline,
ErrorCodes.FailedToParse,
- "$queryStats parameters object may only contain 'applyHmacToIdentifiers' or 'hmacKey' options. Found: hmacStrategy");
+ "$telemetry parameters object may only contain 'applyHmacToIdentifiers' or 'hmacKey' options. Found: hmacStrategy");
}
const conn = MongoRunner.runMongod({
setParameter: {
- internalQueryStatsSamplingRate: -1,
- featureFlagQueryStats: true,
+ internalQueryConfigureTelemetrySamplingRate: -1,
+ featureFlagTelemetry: true,
}
});
runTest(conn);
@@ -89,8 +89,8 @@ const st = new ShardingTest({
rs: {nodes: 1},
mongosOptions: {
setParameter: {
- internalQueryStatsSamplingRate: -1,
- featureFlagQueryStats: true,
+ internalQueryConfigureTelemetrySamplingRate: -1,
+ featureFlagTelemetry: true,
'failpoint.skipClusterParameterRefresh': "{'mode':'alwaysOn'}"
}
},
diff --git a/jstests/noPassthrough/queryStats/feature_flag_off_sampling_rate_on.js b/jstests/noPassthrough/telemetry/feature_flag_off_sampling_rate_on.js
index 38474b944d0..7fbc079cc7b 100644
--- a/jstests/noPassthrough/queryStats/feature_flag_off_sampling_rate_on.js
+++ b/jstests/noPassthrough/telemetry/feature_flag_off_sampling_rate_on.js
@@ -10,14 +10,14 @@ load("jstests/libs/feature_flag_util.js");
// Set sampling rate to -1.
let options = {
- setParameter: {internalQueryStatsSamplingRate: -1},
+ setParameter: {internalQueryConfigureTelemetrySamplingRate: -1},
};
const conn = MongoRunner.runMongod(options);
const testdb = conn.getDB('test');
// This test specifically tests error handling when the feature flag is not on.
// TODO SERVER-65800 This test can be deleted when the feature is on by default.
-if (!conn || FeatureFlagUtil.isEnabled(testdb, "QueryStats")) {
+if (!conn || FeatureFlagUtil.isEnabled(testdb, "Telemetry")) {
jsTestLog(`Skipping test since feature flag is disabled. conn: ${conn}`);
if (conn) {
MongoRunner.stopMongod(conn);
@@ -38,14 +38,14 @@ assert.commandWorked(bulk.execute());
// Pipeline to read telemetry store should fail without feature flag turned on even though sampling
// rate is > 0.
assert.commandFailedWithCode(
- testdb.adminCommand({aggregate: 1, pipeline: [{$queryStats: {}}], cursor: {}}),
+ testdb.adminCommand({aggregate: 1, pipeline: [{$telemetry: {}}], cursor: {}}),
ErrorCodes.QueryFeatureNotAllowed);
// Pipeline, with a filter, to read telemetry store fails without feature flag turned on even though
// sampling rate is > 0.
assert.commandFailedWithCode(testdb.adminCommand({
aggregate: 1,
- pipeline: [{$queryStats: {}}, {$match: {"key.queryShape.find": {$eq: "###"}}}],
+ pipeline: [{$telemetry: {}}, {$match: {"key.queryShape.find": {$eq: "###"}}}],
cursor: {}
}),
ErrorCodes.QueryFeatureNotAllowed);
diff --git a/jstests/noPassthrough/queryStats/query_stats_key.js b/jstests/noPassthrough/telemetry/query_stats_key.js
index 8b63417078a..68d77110bc6 100644
--- a/jstests/noPassthrough/queryStats/query_stats_key.js
+++ b/jstests/noPassthrough/telemetry/query_stats_key.js
@@ -64,7 +64,7 @@ function confirmAllFieldsPresent(queryStatsEntries) {
// Turn on the collecting of telemetry metrics.
let options = {
- setParameter: {internalQueryStatsSamplingRate: -1},
+ setParameter: {internalQueryConfigureTelemetrySamplingRate: -1},
};
const conn = MongoRunner.runMongod(options);
diff --git a/jstests/noPassthrough/queryStats/redact_queries_with_nonobject_fields.js b/jstests/noPassthrough/telemetry/redact_queries_with_nonobject_fields.js
index 7528ab9a4ab..25cac47555e 100644
--- a/jstests/noPassthrough/queryStats/redact_queries_with_nonobject_fields.js
+++ b/jstests/noPassthrough/telemetry/redact_queries_with_nonobject_fields.js
@@ -1,6 +1,6 @@
/**
* Test that telemetry key generation works for queries with non-object fields.
- * @tags: [featureFlagQueryStats]
+ * @tags: [featureFlagTelemetry]
*/
load('jstests/libs/analyze_plan.js');
@@ -9,7 +9,7 @@ load('jstests/libs/analyze_plan.js');
// Turn on the collecting of telemetry metrics.
let options = {
- setParameter: {internalQueryStatsSamplingRate: -1},
+ setParameter: {internalQueryConfigureTelemetrySamplingRate: -1},
};
const conn = MongoRunner.runMongod(options);
diff --git a/jstests/noPassthrough/queryStats/query_stats_collect_on_mongos.js b/jstests/noPassthrough/telemetry/telemetry_collect_on_mongos.js
index 97057269527..ff9fadc85c7 100644
--- a/jstests/noPassthrough/queryStats/query_stats_collect_on_mongos.js
+++ b/jstests/noPassthrough/telemetry/telemetry_collect_on_mongos.js
@@ -1,6 +1,6 @@
/**
* Test that mongos is collecting telemetry metrics.
- * @tags: [featureFlagQueryStats]
+ * @tags: [featureFlagTelemetry]
*/
load('jstests/libs/telemetry_utils.js');
@@ -18,7 +18,7 @@ const setup = () => {
rs: {nodes: 1},
mongosOptions: {
setParameter: {
- internalQueryStatsSamplingRate: -1,
+ internalQueryConfigureTelemetrySamplingRate: -1,
'failpoint.skipClusterParameterRefresh': "{'mode':'alwaysOn'}"
}
},
@@ -95,8 +95,7 @@ const assertExpectedResults = (results,
collection: coll.getName(),
batchSize: 2
})); // returns 1 doc, exhausts the cursor
- // The $queryStats query for the previous `getTelemetry` is included in this call to
- // $queryStats.
+ // The $telemetry query for the previous `getTelemetry` is included in this call to $telemetry.
telemetry = getTelemetry(db);
assert.eq(2, telemetry.length, telemetry);
assertExpectedResults(telemetry[0],
@@ -160,8 +159,7 @@ const assertExpectedResults = (results,
collection: coll.getName(),
batchSize: 2
})); // returns 1 doc, exhausts the cursor
- // The $queryStats query for the previous `getTelemetry` is included in this call to
- // $queryStats.
+ // The $telemetry query for the previous `getTelemetry` is included in this call to $telemetry.
telemetry = getTelemetry(db);
assert.eq(2, telemetry.length, telemetry);
assertExpectedResults(telemetry[0],
diff --git a/jstests/noPassthrough/queryStats/query_stats_feature_flag.js b/jstests/noPassthrough/telemetry/telemetry_feature_flag.js
index bcce489d8da..4071b732796 100644
--- a/jstests/noPassthrough/queryStats/query_stats_feature_flag.js
+++ b/jstests/noPassthrough/telemetry/telemetry_feature_flag.js
@@ -11,21 +11,21 @@ load("jstests/libs/feature_flag_util.js");
// TODO SERVER-65800 this test can be removed when the feature flag is removed.
const conn = MongoRunner.runMongod();
const testDB = conn.getDB('test');
-if (FeatureFlagUtil.isEnabled(testDB, "QueryStats")) {
- jsTestLog("Skipping test since query stats are enabled.");
+if (FeatureFlagUtil.isEnabled(testDB, "Telemetry")) {
+ jsTestLog("Skipping test since telemetry is enabled.");
MongoRunner.stopMongod(conn);
return;
}
// Pipeline to read telemetry store should fail without feature flag turned on.
assert.commandFailedWithCode(
- testDB.adminCommand({aggregate: 1, pipeline: [{$queryStats: {}}], cursor: {}}),
+ testDB.adminCommand({aggregate: 1, pipeline: [{$telemetry: {}}], cursor: {}}),
ErrorCodes.QueryFeatureNotAllowed);
// Pipeline, with a filter, to read telemetry store fails without feature flag turned on.
assert.commandFailedWithCode(testDB.adminCommand({
aggregate: 1,
- pipeline: [{$queryStats: {}}, {$match: {"key.queryShape.find": {$eq: "###"}}}],
+ pipeline: [{$telemetry: {}}, {$match: {"key.queryShape.find": {$eq: "###"}}}],
cursor: {}
}),
ErrorCodes.QueryFeatureNotAllowed);
diff --git a/jstests/noPassthrough/queryStats/query_stats_metrics_across_getMore_calls.js b/jstests/noPassthrough/telemetry/telemetry_metrics_across_getMore_calls.js
index d5caea74cf7..91605c5e069 100644
--- a/jstests/noPassthrough/queryStats/query_stats_metrics_across_getMore_calls.js
+++ b/jstests/noPassthrough/telemetry/telemetry_metrics_across_getMore_calls.js
@@ -1,7 +1,7 @@
/**
* Test that the telemetry metrics are aggregated properly by distinct query shape over getMore
* calls.
- * @tags: [featureFlagQueryStats]
+ * @tags: [featureFlagTelemetry]
*/
load("jstests/libs/telemetry_utils.js"); // For verifyMetrics.
@@ -10,7 +10,7 @@ load("jstests/libs/telemetry_utils.js"); // For verifyMetrics.
// Turn on the collecting of telemetry metrics.
let options = {
- setParameter: {internalQueryStatsSamplingRate: -1},
+ setParameter: {internalQueryConfigureTelemetrySamplingRate: -1},
};
const conn = MongoRunner.runMongod(options);
@@ -35,7 +35,7 @@ assert.commandWorked(bulk.execute());
coll.aggregate([{$match: {foo: 0}}], {cursor: {batchSize: 2}}).toArray();
// This command will return all telemetry store entires.
- const telemetryResults = testDB.getSiblingDB("admin").aggregate([{$queryStats: {}}]).toArray();
+ const telemetryResults = testDB.getSiblingDB("admin").aggregate([{$telemetry: {}}]).toArray();
// Assert there is only one entry.
assert.eq(telemetryResults.length, 1, telemetryResults);
const telemetryEntry = telemetryResults[0];
@@ -71,7 +71,7 @@ const fooNeBatchSize = 3;
// This filters telemetry entires to just the ones entered when running above find queries.
const telemetryResults = testDB.getSiblingDB("admin")
.aggregate([
- {$queryStats: {}},
+ {$telemetry: {}},
{$match: {"key.queryShape.filter.foo": {$exists: true}}},
{$sort: {key: 1}},
])
@@ -102,7 +102,7 @@ const fooNeBatchSize = 3;
// This filters telemetry entires to just the ones entered when running above find queries.
let telemetryResults =
testDB.getSiblingDB("admin")
- .aggregate([{$queryStats: {}}, {$match: {"key.queryShape.command": "find"}}])
+ .aggregate([{$telemetry: {}}, {$match: {"key.queryShape.command": "find"}}])
.toArray();
assert.eq(telemetryResults.length, 4, telemetryResults);
@@ -110,7 +110,7 @@ const fooNeBatchSize = 3;
// This filters to just the telemetry for query coll.find().sort({"foo": 1}).batchSize(2).
telemetryResults = testDB.getSiblingDB("admin")
- .aggregate([{$queryStats: {}}, {$match: {"key.queryShape.sort.foo": 1}}])
+ .aggregate([{$telemetry: {}}, {$match: {"key.queryShape.sort.foo": 1}}])
.toArray();
assert.eq(telemetryResults.length, 1, telemetryResults);
assert.eq(telemetryResults[0].key.queryShape.cmdNs.db, "test");
@@ -123,7 +123,7 @@ const fooNeBatchSize = 3;
// 1}}).limit(query2Limit).batchSize(2).
telemetryResults =
testDB.getSiblingDB("admin")
- .aggregate([{$queryStats: {}}, {$match: {"key.queryShape.limit": '?number'}}])
+ .aggregate([{$telemetry: {}}, {$match: {"key.queryShape.limit": '?number'}}])
.toArray();
assert.eq(telemetryResults.length, 1, telemetryResults);
assert.eq(telemetryResults[0].key.queryShape.cmdNs.db, "test");
@@ -135,7 +135,7 @@ const fooNeBatchSize = 3;
// This filters to just the telemetry for query coll.find({foo: {$eq: 0}}).batchSize(2).
telemetryResults = testDB.getSiblingDB("admin")
.aggregate([
- {$queryStats: {}},
+ {$telemetry: {}},
{
$match: {
"key.queryShape.filter.foo": {$eq: {$eq: "?number"}},
diff --git a/jstests/noPassthrough/queryStats/query_stats_redact_find_cmd.js b/jstests/noPassthrough/telemetry/telemetry_redact_find_cmd.js
index b2cce48cdb7..54b909adae9 100644
--- a/jstests/noPassthrough/queryStats/query_stats_redact_find_cmd.js
+++ b/jstests/noPassthrough/telemetry/telemetry_redact_find_cmd.js
@@ -1,5 +1,5 @@
/**
- * Test that $queryStats properly applies hmac to find commands, on mongod and mongos.
+ * Test that $telemetry properly applies hmac to find commands, on mongod and mongos.
*/
load("jstests/libs/telemetry_utils.js");
(function() {
@@ -44,8 +44,8 @@ function runTest(conn) {
const conn = MongoRunner.runMongod({
setParameter: {
- internalQueryStatsSamplingRate: -1,
- featureFlagQueryStats: true,
+ internalQueryConfigureTelemetrySamplingRate: -1,
+ featureFlagTelemetry: true,
}
});
runTest(conn);
@@ -58,8 +58,8 @@ const st = new ShardingTest({
rs: {nodes: 1},
mongosOptions: {
setParameter: {
- internalQueryStatsSamplingRate: -1,
- featureFlagQueryStats: true,
+ internalQueryConfigureTelemetrySamplingRate: -1,
+ featureFlagTelemetry: true,
'failpoint.skipClusterParameterRefresh': "{'mode':'alwaysOn'}"
}
},
diff --git a/jstests/noPassthrough/queryStats/query_stats_sampling_rate.js b/jstests/noPassthrough/telemetry/telemetry_sampling_rate.js
index 009c59737fa..1bada398a03 100644
--- a/jstests/noPassthrough/queryStats/query_stats_sampling_rate.js
+++ b/jstests/noPassthrough/telemetry/telemetry_sampling_rate.js
@@ -1,7 +1,7 @@
/**
* Test that calls to read from telemetry store fail when sampling rate is not greater than 0 even
* if feature flag is on.
- * @tags: [featureFlagQueryStats]
+ * @tags: [featureFlagTelemetry]
*/
load('jstests/libs/analyze_plan.js');
@@ -9,7 +9,7 @@ load('jstests/libs/analyze_plan.js');
"use strict";
let options = {
- setParameter: {internalQueryStatsSamplingRate: 0},
+ setParameter: {internalQueryConfigureTelemetrySamplingRate: 0},
};
const conn = MongoRunner.runMongod(options);
@@ -23,15 +23,15 @@ for (var i = 0; i < 20; i++) {
coll.aggregate([{$match: {foo: 1}}], {cursor: {batchSize: 2}});
// Reading telemetry store with a sampling rate of 0 should return 0 documents.
-let telStore = testdb.adminCommand({aggregate: 1, pipeline: [{$queryStats: {}}], cursor: {}});
+let telStore = testdb.adminCommand({aggregate: 1, pipeline: [{$telemetry: {}}], cursor: {}});
assert.eq(telStore.cursor.firstBatch.length, 0);
// Reading telemetry store should work now with a sampling rate of greater than 0.
-assert.commandWorked(
- testdb.adminCommand({setParameter: 1, internalQueryStatsSamplingRate: 2147483647}));
+assert.commandWorked(testdb.adminCommand(
+ {setParameter: 1, internalQueryConfigureTelemetrySamplingRate: 2147483647}));
coll.aggregate([{$match: {foo: 1}}], {cursor: {batchSize: 2}});
telStore = assert.commandWorked(
- testdb.adminCommand({aggregate: 1, pipeline: [{$queryStats: {}}], cursor: {}}));
+ testdb.adminCommand({aggregate: 1, pipeline: [{$telemetry: {}}], cursor: {}}));
assert.eq(telStore.cursor.firstBatch.length, 1);
MongoRunner.stopMongod(conn);
diff --git a/jstests/noPassthrough/queryStats/query_stats_server_status_metrics.js b/jstests/noPassthrough/telemetry/telemetry_server_status_metrics.js
index b142d901a7f..2235d272a9f 100644
--- a/jstests/noPassthrough/queryStats/query_stats_server_status_metrics.js
+++ b/jstests/noPassthrough/telemetry/telemetry_server_status_metrics.js
@@ -1,6 +1,6 @@
/**
* Test the telemetry related serverStatus metrics.
- * @tags: [featureFlagQueryStats]
+ * @tags: [featureFlagTelemetry]
*/
load('jstests/libs/analyze_plan.js');
@@ -23,7 +23,7 @@ function runTestWithMongodOptions(mongodOptions, test, testOptions) {
* testOptions must include `resetCacheSize` bool field; e.g., { resetCacheSize : true }
*/
function evictionTest(conn, testDB, coll, testOptions) {
- const evictedBefore = testDB.serverStatus().metrics.queryStats.numEvicted;
+ const evictedBefore = testDB.serverStatus().metrics.telemetry.numEvicted;
assert.eq(evictedBefore, 0);
for (var i = 0; i < 4000; i++) {
let query = {};
@@ -31,16 +31,16 @@ function evictionTest(conn, testDB, coll, testOptions) {
coll.aggregate([{$match: query}]).itcount();
}
if (!testOptions.resetCacheSize) {
- const evictedAfter = testDB.serverStatus().metrics.queryStats.numEvicted;
+ const evictedAfter = testDB.serverStatus().metrics.telemetry.numEvicted;
assert.gt(evictedAfter, 0);
return;
}
// Make sure number of evicted entries increases when the cache size is reset, which forces out
// least recently used entries to meet the new, smaller size requirement.
- assert.eq(testDB.serverStatus().metrics.queryStats.numEvicted, 0);
+ assert.eq(testDB.serverStatus().metrics.telemetry.numEvicted, 0);
assert.commandWorked(
- testDB.adminCommand({setParameter: 1, internalQueryConfigureQueryStatsCacheSize: "1MB"}));
- const evictedAfter = testDB.serverStatus().metrics.queryStats.numEvicted;
+ testDB.adminCommand({setParameter: 1, internalQueryConfigureTelemetryCacheSize: "1MB"}));
+ const evictedAfter = testDB.serverStatus().metrics.telemetry.numEvicted;
assert.gt(evictedAfter, 0);
}
@@ -53,7 +53,7 @@ function evictionTest(conn, testDB, coll, testOptions) {
*/
function countRateLimitedRequestsTest(conn, testDB, coll, testOptions) {
const numRateLimitedRequestsBefore =
- testDB.serverStatus().metrics.queryStats.numRateLimitedRequests;
+ testDB.serverStatus().metrics.telemetry.numRateLimitedRequests;
assert.eq(numRateLimitedRequestsBefore, 0);
coll.insert({a: 0});
@@ -65,7 +65,7 @@ function countRateLimitedRequestsTest(conn, testDB, coll, testOptions) {
}
const numRateLimitedRequestsAfter =
- testDB.serverStatus().metrics.queryStats.numRateLimitedRequests;
+ testDB.serverStatus().metrics.telemetry.numRateLimitedRequests;
if (testOptions.samplingRate === 0) {
// Telemetry should not be collected for any requests.
@@ -81,7 +81,7 @@ function countRateLimitedRequestsTest(conn, testDB, coll, testOptions) {
}
function telemetryStoreSizeEstimateTest(conn, testDB, coll, testOptions) {
- assert.eq(testDB.serverStatus().metrics.queryStats.queryStatsStoreSizeEstimateBytes, 0);
+ assert.eq(testDB.serverStatus().metrics.telemetry.telemetryStoreSizeEstimateBytes, 0);
let halfWayPointSize;
// Only using three digit numbers (eg 100, 101) means the string length will be the same for all
// entries and therefore the key size will be the same for all entries, which makes predicting
@@ -90,12 +90,12 @@ function telemetryStoreSizeEstimateTest(conn, testDB, coll, testOptions) {
coll.aggregate([{$match: {["foo" + i]: "bar"}}]).itcount();
if (i == 150) {
halfWayPointSize =
- testDB.serverStatus().metrics.queryStats.queryStatsStoreSizeEstimateBytes;
+ testDB.serverStatus().metrics.telemetry.telemetryStoreSizeEstimateBytes;
}
}
// Confirm that telemetry store has grown and size is non-zero.
assert.gt(halfWayPointSize, 0);
- const fullSize = testDB.serverStatus().metrics.queryStats.queryStatsStoreSizeEstimateBytes;
+ const fullSize = testDB.serverStatus().metrics.telemetry.telemetryStoreSizeEstimateBytes;
assert.gt(fullSize, 0);
// Make sure the final telemetry store size is twice as much as the halfway point size (+/- 5%)
assert(fullSize >= halfWayPointSize * 1.95 && fullSize <= halfWayPointSize * 2.05,
@@ -109,7 +109,7 @@ function telemetryStoreWriteErrorsTest(conn, testDB, coll, testOptions) {
return;
}
- const errorsBefore = testDB.serverStatus().metrics.queryStats.numQueryStatsStoreWriteErrors;
+ const errorsBefore = testDB.serverStatus().metrics.telemetry.numTelemetryStoreWriteErrors;
assert.eq(errorsBefore, 0);
for (let i = 0; i < 5; i++) {
// Command should succeed and record the error.
@@ -121,7 +121,7 @@ function telemetryStoreWriteErrorsTest(conn, testDB, coll, testOptions) {
// Make sure that we recorded a write error for each run.
// TODO SERVER-73152 we attempt to write to the telemetry store twice for each aggregate, which
// seems wrong.
- assert.eq(testDB.serverStatus().metrics.queryStats.numQueryStatsStoreWriteErrors, 10);
+ assert.eq(testDB.serverStatus().metrics.telemetry.numTelemetryStoreWriteErrors, 10);
}
/**
@@ -129,8 +129,10 @@ function telemetryStoreWriteErrorsTest(conn, testDB, coll, testOptions) {
* eviction.
*/
runTestWithMongodOptions({
- setParameter:
- {internalQueryConfigureQueryStatsCacheSize: "1MB", internalQueryStatsSamplingRate: -1},
+ setParameter: {
+ internalQueryConfigureTelemetryCacheSize: "1MB",
+ internalQueryConfigureTelemetrySamplingRate: -1
+ },
},
evictionTest,
{resetCacheSize: false});
@@ -138,8 +140,10 @@ runTestWithMongodOptions({
* In this configuration, eviction is triggered only when the telemetry store size is reset.
* */
runTestWithMongodOptions({
- setParameter:
- {internalQueryConfigureQueryStatsCacheSize: "4MB", internalQueryStatsSamplingRate: -1},
+ setParameter: {
+ internalQueryConfigureTelemetryCacheSize: "4MB",
+ internalQueryConfigureTelemetrySamplingRate: -1
+ },
},
evictionTest,
{resetCacheSize: true});
@@ -148,7 +152,7 @@ runTestWithMongodOptions({
* In this configuration, every query is sampled, so no requests should be rate-limited.
*/
runTestWithMongodOptions({
- setParameter: {internalQueryStatsSamplingRate: -1},
+ setParameter: {internalQueryConfigureTelemetrySamplingRate: -1},
},
countRateLimitedRequestsTest,
{samplingRate: 2147483647, numRequests: 20});
@@ -158,7 +162,7 @@ runTestWithMongodOptions({
* rate-limited.
*/
runTestWithMongodOptions({
- setParameter: {internalQueryStatsSamplingRate: 10},
+ setParameter: {internalQueryConfigureTelemetrySamplingRate: 10},
},
countRateLimitedRequestsTest,
{samplingRate: 10, numRequests: 20});
@@ -168,7 +172,7 @@ runTestWithMongodOptions({
* size
*/
runTestWithMongodOptions({
- setParameter: {internalQueryStatsSamplingRate: -1},
+ setParameter: {internalQueryConfigureTelemetrySamplingRate: -1},
},
telemetryStoreSizeEstimateTest);
@@ -178,8 +182,8 @@ runTestWithMongodOptions({
*/
runTestWithMongodOptions({
setParameter: {
- internalQueryConfigureQueryStatsCacheSize: "0.00001MB",
- internalQueryStatsSamplingRate: -1
+ internalQueryConfigureTelemetryCacheSize: "0.00001MB",
+ internalQueryConfigureTelemetrySamplingRate: -1
},
},
telemetryStoreWriteErrorsTest);
diff --git a/jstests/noPassthrough/queryStats/query_stats_upgrade.js b/jstests/noPassthrough/telemetry/telemetry_upgrade.js
index 919d9f87baf..f396d23b948 100644
--- a/jstests/noPassthrough/queryStats/query_stats_upgrade.js
+++ b/jstests/noPassthrough/telemetry/telemetry_upgrade.js
@@ -1,6 +1,6 @@
/**
* Test that telemetry doesn't work on a lower FCV version but works after an FCV upgrade.
- * @tags: [featureFlagQueryStats]
+ * @tags: [featureFlagTelemetry]
*/
load('jstests/libs/analyze_plan.js');
load("jstests/libs/feature_flag_util.js");
@@ -12,7 +12,7 @@ const dbpath = MongoRunner.dataPath + jsTestName();
let conn = MongoRunner.runMongod({dbpath: dbpath});
let testDB = conn.getDB(jsTestName());
// This test should only be run with the flag enabled.
-assert(FeatureFlagUtil.isEnabled(testDB, "QueryStats"));
+assert(FeatureFlagUtil.isEnabled(testDB, "Telemetry"));
function testLower(restart = false) {
let adminDB = conn.getDB("admin");
@@ -26,7 +26,7 @@ function testLower(restart = false) {
}
assert.commandFailedWithCode(
- testDB.adminCommand({aggregate: 1, pipeline: [{$queryStats: {}}], cursor: {}}), 6579000);
+ testDB.adminCommand({aggregate: 1, pipeline: [{$telemetry: {}}], cursor: {}}), 6579000);
// Upgrade FCV.
assert.commandWorked(adminDB.runCommand(
@@ -34,7 +34,7 @@ function testLower(restart = false) {
// We should be able to run a telemetry pipeline now that the FCV is correct.
assert.commandWorked(
- testDB.adminCommand({aggregate: 1, pipeline: [{$queryStats: {}}], cursor: {}}),
+ testDB.adminCommand({aggregate: 1, pipeline: [{$telemetry: {}}], cursor: {}}),
);
}
testLower(true);
diff --git a/jstests/noPassthroughWithMongod/telemetry_configuration.js b/jstests/noPassthroughWithMongod/telemetry_configuration.js
index 370733a4480..0ae4e8408c3 100644
--- a/jstests/noPassthroughWithMongod/telemetry_configuration.js
+++ b/jstests/noPassthroughWithMongod/telemetry_configuration.js
@@ -22,13 +22,14 @@ if (FeatureFlagUtil.isEnabled(db, "Telemetry")) {
}
}
testTelemetrySetting("internalQueryConfigureTelemetryCacheSize", "2MB");
- testTelemetrySetting("internalQueryStatsSamplingRate", 2147483647);
+ testTelemetrySetting("internalQueryConfigureTelemetrySamplingRate", 2147483647);
} else {
// The feature flag is disabled - make sure the telemetry store *cannot* be configured.
assert.commandFailedWithCode(
db.adminCommand({setParameter: 1, internalQueryConfigureTelemetryCacheSize: '2MB'}),
7373500);
assert.commandFailedWithCode(
- db.adminCommand({setParameter: 1, internalQueryStatsSamplingRate: 2147483647}), 7506200);
+ db.adminCommand({setParameter: 1, internalQueryConfigureTelemetrySamplingRate: 2147483647}),
+ 7506200);
}
}());
diff --git a/src/mongo/db/auth/action_type.idl b/src/mongo/db/auth/action_type.idl
index 6837625e6f1..137ac3c9542 100644
--- a/src/mongo/db/auth/action_type.idl
+++ b/src/mongo/db/auth/action_type.idl
@@ -149,7 +149,7 @@ enums:
planCacheIndexFilter : "planCacheIndexFilter" # view/update index filters
planCacheRead : "planCacheRead" # view contents of plan cache
planCacheWrite : "planCacheWrite" # clear cache, drop cache entry, pin/unpin/shun plans
- queryStatsRead: "queryStatsRead" # view contents of queryStats store
+ telemetryRead: "telemetryRead" # view contents of telemetry store
refineCollectionShardKey : "refineCollectionShardKey"
reIndex : "reIndex"
remove : "remove"
diff --git a/src/mongo/db/auth/builtin_roles.yml b/src/mongo/db/auth/builtin_roles.yml
index e384a959f1d..a29a476a91c 100644
--- a/src/mongo/db/auth/builtin_roles.yml
+++ b/src/mongo/db/auth/builtin_roles.yml
@@ -353,7 +353,7 @@ roles:
- getClusterParameter
- setChangeStreamState
- getChangeStreamState
- - queryStatsRead
+ - telemetryRead
- checkMetadataConsistency
- transitionFromDedicatedConfigServer
- transitionToDedicatedConfigServer
diff --git a/src/mongo/db/clientcursor.cpp b/src/mongo/db/clientcursor.cpp
index 3b4f0143876..55e116e5893 100644
--- a/src/mongo/db/clientcursor.cpp
+++ b/src/mongo/db/clientcursor.cpp
@@ -48,7 +48,7 @@
#include "mongo/db/cursor_server_params.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/query/explain.h"
-#include "mongo/db/query/query_stats.h"
+#include "mongo/db/query/telemetry.h"
#include "mongo/db/repl/repl_client_info.h"
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/util/background.h"
@@ -124,10 +124,10 @@ ClientCursor::ClientCursor(ClientCursorParams params,
_planSummary(_exec->getPlanExplainer().getPlanSummary()),
_planCacheKey(CurOp::get(operationUsingCursor)->debug().planCacheKey),
_queryHash(CurOp::get(operationUsingCursor)->debug().queryHash),
- _queryStatsStoreKeyHash(CurOp::get(operationUsingCursor)->debug().queryStatsStoreKeyHash),
- _queryStatsStoreKey(CurOp::get(operationUsingCursor)->debug().queryStatsStoreKey),
- _queryStatsRequestShapifier(
- std::move(CurOp::get(operationUsingCursor)->debug().queryStatsRequestShapifier)),
+ _telemetryStoreKeyHash(CurOp::get(operationUsingCursor)->debug().telemetryStoreKeyHash),
+ _telemetryStoreKey(CurOp::get(operationUsingCursor)->debug().telemetryStoreKey),
+ _telemetryRequestShapifier(
+ std::move(CurOp::get(operationUsingCursor)->debug().telemetryRequestShapifier)),
_shouldOmitDiagnosticInformation(
CurOp::get(operationUsingCursor)->debug().shouldOmitDiagnosticInformation),
_opKey(operationUsingCursor->getOperationKey()) {
@@ -161,13 +161,13 @@ void ClientCursor::dispose(OperationContext* opCtx, boost::optional<Date_t> now)
return;
}
- if (_queryStatsStoreKeyHash && opCtx) {
- query_stats::writeQueryStats(opCtx,
- _queryStatsStoreKeyHash,
- _queryStatsStoreKey,
- std::move(_queryStatsRequestShapifier),
- _metrics.executionTime.value_or(Microseconds{0}).count(),
- _metrics.nreturned.value_or(0));
+ if (_telemetryStoreKeyHash && opCtx) {
+ telemetry::writeTelemetry(opCtx,
+ _telemetryStoreKeyHash,
+ _telemetryStoreKey,
+ std::move(_telemetryRequestShapifier),
+ _metrics.executionTime.value_or(Microseconds{0}).count(),
+ _metrics.nreturned.value_or(0));
}
if (now) {
@@ -397,19 +397,19 @@ void startClientCursorMonitor() {
getClientCursorMonitor(getGlobalServiceContext()).go();
}
-void collectQueryStatsMongod(OperationContext* opCtx, ClientCursorPin& pinnedCursor) {
+void collectTelemetryMongod(OperationContext* opCtx, ClientCursorPin& pinnedCursor) {
pinnedCursor->incrementCursorMetrics(CurOp::get(opCtx)->debug().additiveMetrics);
}
-void collectQueryStatsMongod(OperationContext* opCtx,
- std::unique_ptr<query_stats::RequestShapifier> requestShapifier) {
+void collectTelemetryMongod(OperationContext* opCtx,
+ std::unique_ptr<telemetry::RequestShapifier> requestShapifier) {
// If we haven't registered a cursor to prepare for getMore requests, we record
// telemetry directly.
auto& opDebug = CurOp::get(opCtx)->debug();
- query_stats::writeQueryStats(
+ telemetry::writeTelemetry(
opCtx,
- opDebug.queryStatsStoreKeyHash,
- opDebug.queryStatsStoreKey,
+ opDebug.telemetryStoreKeyHash,
+ opDebug.telemetryStoreKey,
std::move(requestShapifier),
opDebug.additiveMetrics.executionTime.value_or(Microseconds{0}).count(),
opDebug.additiveMetrics.nreturned.value_or(0));
diff --git a/src/mongo/db/clientcursor.h b/src/mongo/db/clientcursor.h
index 8ae75473496..9e7d35ade9a 100644
--- a/src/mongo/db/clientcursor.h
+++ b/src/mongo/db/clientcursor.h
@@ -448,15 +448,15 @@ private:
boost::optional<uint32_t> _queryHash;
// If boost::none, telemetry should not be collected for this cursor.
- boost::optional<std::size_t> _queryStatsStoreKeyHash;
+ boost::optional<std::size_t> _telemetryStoreKeyHash;
// TODO: SERVER-73152 remove telemetryStoreKey when RequestShapifier is used for agg.
- boost::optional<BSONObj> _queryStatsStoreKey;
+ boost::optional<BSONObj> _telemetryStoreKey;
// Metrics that are accumulated over the lifetime of the cursor, incremented with each getMore.
- // Useful for diagnostics like queryStats.
+ // Useful for diagnostics like telemetry.
OpDebug::AdditiveMetrics _metrics;
// The RequestShapifier used by telemetry to shapify the request payload into the telemetry
// store key.
- std::unique_ptr<query_stats::RequestShapifier> _queryStatsRequestShapifier;
+ std::unique_ptr<telemetry::RequestShapifier> _telemetryRequestShapifier;
// Flag to decide if diagnostic information should be omitted.
bool _shouldOmitDiagnosticInformation{false};
@@ -598,7 +598,7 @@ void startClientCursorMonitor();
* Currently, telemetry is only collected for find and aggregate requests (and their subsequent
* getMore requests), so these should only be called from those request paths.
*/
-void collectQueryStatsMongod(OperationContext* opCtx, ClientCursorPin& cursor);
-void collectQueryStatsMongod(OperationContext* opCtx,
- std::unique_ptr<query_stats::RequestShapifier> requestShapifier);
+void collectTelemetryMongod(OperationContext* opCtx, ClientCursorPin& cursor);
+void collectTelemetryMongod(OperationContext* opCtx,
+ std::unique_ptr<telemetry::RequestShapifier> requestShapifier);
} // namespace mongo
diff --git a/src/mongo/db/commands/find_cmd.cpp b/src/mongo/db/commands/find_cmd.cpp
index 983661fbd15..90e6fa15ca1 100644
--- a/src/mongo/db/commands/find_cmd.cpp
+++ b/src/mongo/db/commands/find_cmd.cpp
@@ -55,7 +55,7 @@
#include "mongo/db/query/find_request_shapifier.h"
#include "mongo/db/query/get_executor.h"
#include "mongo/db/query/query_knobs_gen.h"
-#include "mongo/db/query/query_stats.h"
+#include "mongo/db/query/telemetry.h"
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/s/query_analysis_writer.h"
#include "mongo/db/service_context.h"
@@ -561,14 +561,13 @@ public:
cq->setUseCqfIfEligible(true);
if (collection) {
- // Collect queryStats. Exclude queries against collections with encrypted fields.
+ // Collect telemetry. Exclude queries against collections with encrypted fields.
if (!collection.get()->getCollectionOptions().encryptedFieldConfig) {
- query_stats::registerRequest(
- std::make_unique<query_stats::FindRequestShapifier>(
- cq->getFindCommandRequest(), opCtx),
- collection.get()->ns(),
- opCtx,
- cq->getExpCtx());
+ telemetry::registerRequest(std::make_unique<telemetry::FindRequestShapifier>(
+ cq->getFindCommandRequest(), opCtx),
+ collection.get()->ns(),
+ opCtx,
+ cq->getExpCtx());
}
}
@@ -781,9 +780,9 @@ public:
processFLEFindD(
opCtx, findCommand->getNamespaceOrUUID().nss().value(), findCommand.get());
}
- // Set the queryStatsStoreKey to none so queryStats isn't collected when we've done
- // a FLE rewrite.
- CurOp::get(opCtx)->debug().queryStatsStoreKeyHash = boost::none;
+ // Set the telemetryStoreKey to none so telemetry isn't collected when we've done a
+ // FLE rewrite.
+ CurOp::get(opCtx)->debug().telemetryStoreKeyHash = boost::none;
CurOp::get(opCtx)->debug().shouldOmitDiagnosticInformation = true;
}
diff --git a/src/mongo/db/commands/getmore_cmd.cpp b/src/mongo/db/commands/getmore_cmd.cpp
index 54035c99829..8f91862d002 100644
--- a/src/mongo/db/commands/getmore_cmd.cpp
+++ b/src/mongo/db/commands/getmore_cmd.cpp
@@ -703,7 +703,7 @@ public:
metricsCollector.incrementDocUnitsReturned(curOp->getNS(), docUnitsReturned);
curOp->debug().additiveMetrics.nBatches = 1;
curOp->setEndOfOpMetrics(numResults);
- collectQueryStatsMongod(opCtx, cursorPin);
+ collectTelemetryMongod(opCtx, cursorPin);
if (respondWithId) {
cursorDeleter.dismiss();
diff --git a/src/mongo/db/commands/run_aggregate.cpp b/src/mongo/db/commands/run_aggregate.cpp
index 0bc7d7f6415..a290ef56713 100644
--- a/src/mongo/db/commands/run_aggregate.cpp
+++ b/src/mongo/db/commands/run_aggregate.cpp
@@ -76,7 +76,7 @@
#include "mongo/db/query/query_feature_flags_gen.h"
#include "mongo/db/query/query_knobs_gen.h"
#include "mongo/db/query/query_planner_common.h"
-#include "mongo/db/query/query_stats.h"
+#include "mongo/db/query/telemetry.h"
#include "mongo/db/read_concern.h"
#include "mongo/db/repl/oplog.h"
#include "mongo/db/repl/read_concern_args.h"
@@ -836,11 +836,11 @@ Status runAggregate(OperationContext* opCtx,
};
auto registerTelemetry = [&]() -> void {
- // Register queryStats. Exclude queries against collections with encrypted fields.
- // We still collect queryStats on collection-less aggregations.
+ // Register telemetry. Exclude queries against collections with encrypted fields.
+ // We still collect telemetry on collection-less aggregations.
if (!(ctx && ctx->getCollection() &&
ctx->getCollection()->getCollectionOptions().encryptedFieldConfig)) {
- query_stats::registerAggRequest(request, opCtx);
+ telemetry::registerAggRequest(request, opCtx);
}
};
@@ -1051,9 +1051,9 @@ Status runAggregate(OperationContext* opCtx,
request.getEncryptionInformation()->setCrudProcessed(true);
}
- // Set the queryStatsStoreKey to none so queryStats isn't collected when we've done a
- // FLE rewrite.
- CurOp::get(opCtx)->debug().queryStatsStoreKeyHash = boost::none;
+ // Set the telemetryStoreKey to none so telemetry isn't collected when we've done a FLE
+ // rewrite.
+ CurOp::get(opCtx)->debug().telemetryStoreKeyHash = boost::none;
}
pipeline->optimizePipeline();
@@ -1223,9 +1223,9 @@ Status runAggregate(OperationContext* opCtx,
curOp->setEndOfOpMetrics(stats.nReturned);
if (keepCursor) {
- collectQueryStatsMongod(opCtx, pins[0]);
+ collectTelemetryMongod(opCtx, pins[0]);
} else {
- collectQueryStatsMongod(opCtx, std::move(curOp->debug().queryStatsRequestShapifier));
+ collectTelemetryMongod(opCtx, std::move(curOp->debug().telemetryRequestShapifier));
}
// For an optimized away pipeline, signal the cache that a query operation has completed.
diff --git a/src/mongo/db/curop.h b/src/mongo/db/curop.h
index 9f5c32b10d1..8851993b015 100644
--- a/src/mongo/db/curop.h
+++ b/src/mongo/db/curop.h
@@ -294,12 +294,12 @@ public:
boost::optional<uint32_t> queryHash;
// The shape of the original query serialized with readConcern, application name, and namespace.
// If boost::none, telemetry should not be collected for this operation.
- boost::optional<std::size_t> queryStatsStoreKeyHash;
+ boost::optional<std::size_t> telemetryStoreKeyHash;
// TODO: SERVER-73152 remove telemetryStoreKey when RequestShapifier is used for agg.
- boost::optional<BSONObj> queryStatsStoreKey;
+ boost::optional<BSONObj> telemetryStoreKey;
// The RequestShapifier used by telemetry to shapify the request payload into the telemetry
// store key.
- std::unique_ptr<query_stats::RequestShapifier> queryStatsRequestShapifier;
+ std::unique_ptr<telemetry::RequestShapifier> telemetryRequestShapifier;
// The query framework that this operation used. Will be unknown for non query operations.
PlanExecutor::QueryFramework queryFramework{PlanExecutor::QueryFramework::kUnknown};
@@ -776,7 +776,7 @@ public:
return computeElapsedTimeTotal(start, _end.load()) - _totalPausedDuration;
}
/**
- * The planningTimeMicros metric, reported in the system profiler and in queryStats, is measured
+ * The planningTimeMicros metric, reported in the system profiler and in telemetry, is measured
* using the Curop instance's _tickSource. Currently, _tickSource is only paused in places where
logical work is being done. If this were to change, and _tickSource
were to be paused during query planning for reasons unrelated to the work of
diff --git a/src/mongo/db/cursor_manager.cpp b/src/mongo/db/cursor_manager.cpp
index 34f7d7bdce0..ac9c41accfd 100644
--- a/src/mongo/db/cursor_manager.cpp
+++ b/src/mongo/db/cursor_manager.cpp
@@ -214,10 +214,10 @@ StatusWith<ClientCursorPin> CursorManager::pinCursor(
CurOp::get(opCtx)->debug().queryHash = cursor->_queryHash;
CurOp::get(opCtx)->debug().planCacheKey = cursor->_planCacheKey;
- // Pass along queryStats context so it is retrievable after query execution for storing metrics.
- CurOp::get(opCtx)->debug().queryStatsStoreKeyHash = cursor->_queryStatsStoreKeyHash;
- // TODO: SERVER-73152 remove queryStatsStoreKey when RequestShapifier is used for agg.
- CurOp::get(opCtx)->debug().queryStatsStoreKey = cursor->_queryStatsStoreKey;
+ // Pass along telemetry context so it is retrievable after query execution for storing metrics.
+ CurOp::get(opCtx)->debug().telemetryStoreKeyHash = cursor->_telemetryStoreKeyHash;
+ // TODO: SERVER-73152 remove telemetryStoreKey when RequestShapifier is used for agg.
+ CurOp::get(opCtx)->debug().telemetryStoreKey = cursor->_telemetryStoreKey;
cursor->_operationUsingCursor = opCtx;
diff --git a/src/mongo/db/pipeline/SConscript b/src/mongo/db/pipeline/SConscript
index eacb62bb6ea..72a1ab942b4 100644
--- a/src/mongo/db/pipeline/SConscript
+++ b/src/mongo/db/pipeline/SConscript
@@ -328,7 +328,7 @@ pipelineEnv.Library(
'document_source_sort_by_count.cpp',
'document_source_streaming_group.cpp',
'document_source_tee_consumer.cpp',
- 'document_source_query_stats.cpp',
+ 'document_source_telemetry.cpp',
'document_source_union_with.cpp',
'document_source_unwind.cpp',
'group_from_first_document_transformation.cpp',
@@ -634,7 +634,7 @@ env.CppUnitTest(
'document_source_skip_test.cpp',
'document_source_sort_by_count_test.cpp',
'document_source_sort_test.cpp',
- 'document_source_query_stats_test.cpp',
+ 'document_source_telemetry_test.cpp',
'document_source_union_with_test.cpp',
'document_source_internal_compute_geo_near_distance_test.cpp',
'document_source_internal_convert_bucket_index_stats_test.cpp',
diff --git a/src/mongo/db/pipeline/abt/document_source_visitor.cpp b/src/mongo/db/pipeline/abt/document_source_visitor.cpp
index 9b7b27d3af0..2170ab14407 100644
--- a/src/mongo/db/pipeline/abt/document_source_visitor.cpp
+++ b/src/mongo/db/pipeline/abt/document_source_visitor.cpp
@@ -58,7 +58,6 @@
#include "mongo/db/pipeline/document_source_operation_metrics.h"
#include "mongo/db/pipeline/document_source_out.h"
#include "mongo/db/pipeline/document_source_plan_cache_stats.h"
-#include "mongo/db/pipeline/document_source_query_stats.h"
#include "mongo/db/pipeline/document_source_queue.h"
#include "mongo/db/pipeline/document_source_redact.h"
#include "mongo/db/pipeline/document_source_sample.h"
@@ -68,6 +67,7 @@
#include "mongo/db/pipeline/document_source_skip.h"
#include "mongo/db/pipeline/document_source_sort.h"
#include "mongo/db/pipeline/document_source_tee_consumer.h"
+#include "mongo/db/pipeline/document_source_telemetry.h"
#include "mongo/db/pipeline/document_source_union_with.h"
#include "mongo/db/pipeline/document_source_unwind.h"
#include "mongo/db/pipeline/visitors/document_source_visitor_registry_mongod.h"
diff --git a/src/mongo/db/pipeline/aggregate_command.idl b/src/mongo/db/pipeline/aggregate_command.idl
index 476fc8dbb9d..b53ea540f8e 100644
--- a/src/mongo/db/pipeline/aggregate_command.idl
+++ b/src/mongo/db/pipeline/aggregate_command.idl
@@ -96,10 +96,10 @@ commands:
- privilege: # $planCacheStats
resource_pattern: exact_namespace
action_type: planCacheRead
- - privilege: # $queryStats
- agg_stage: queryStats
+ - privilege: # $telemetry
+ agg_stage: telemetry
resource_pattern: cluster
- action_type: queryStatsRead
+ action_type: telemetryRead
- privilege: # $changeStream
resource_pattern: exact_namespace
action_type: changeStream
diff --git a/src/mongo/db/pipeline/aggregate_request_shapifier.cpp b/src/mongo/db/pipeline/aggregate_request_shapifier.cpp
index 485b97e2c22..40ed6c2ce79 100644
--- a/src/mongo/db/pipeline/aggregate_request_shapifier.cpp
+++ b/src/mongo/db/pipeline/aggregate_request_shapifier.cpp
@@ -31,20 +31,20 @@
#include "mongo/db/query/query_shape.h"
-namespace mongo::query_stats {
+namespace mongo::telemetry {
-BSONObj AggregateRequestShapifier::makeQueryStatsKey(const SerializationOptions& opts,
- OperationContext* opCtx) const {
+BSONObj AggregateRequestShapifier::makeTelemetryKey(const SerializationOptions& opts,
+ OperationContext* opCtx) const {
// TODO SERVER-76087 We will likely want to set a flag here to stop $search from calling out
// to mongot.
auto expCtx = make_intrusive<ExpressionContext>(opCtx, nullptr, _request.getNamespace());
expCtx->variables.setDefaultRuntimeConstants(opCtx);
expCtx->maxFeatureCompatibilityVersion = boost::none; // Ensure all features are allowed.
expCtx->stopExpressionCounters();
- return makeQueryStatsKey(opts, expCtx);
+ return makeTelemetryKey(opts, expCtx);
}
-BSONObj AggregateRequestShapifier::makeQueryStatsKey(
+BSONObj AggregateRequestShapifier::makeTelemetryKey(
const SerializationOptions& opts, const boost::intrusive_ptr<ExpressionContext>& expCtx) const {
BSONObjBuilder bob;
@@ -84,4 +84,4 @@ BSONObj AggregateRequestShapifier::makeQueryStatsKey(
return bob.obj();
}
-} // namespace mongo::query_stats
+} // namespace mongo::telemetry
diff --git a/src/mongo/db/pipeline/aggregate_request_shapifier.h b/src/mongo/db/pipeline/aggregate_request_shapifier.h
index d78dae31be7..3a0c41f8dd9 100644
--- a/src/mongo/db/pipeline/aggregate_request_shapifier.h
+++ b/src/mongo/db/pipeline/aggregate_request_shapifier.h
@@ -33,7 +33,7 @@
#include "mongo/db/pipeline/pipeline.h"
#include "mongo/db/query/request_shapifier.h"
-namespace mongo::query_stats {
+namespace mongo::telemetry {
/**
* Handles shapification for AggregateCommandRequests. Requires a pre-parsed pipeline in order to
@@ -50,14 +50,13 @@ public:
virtual ~AggregateRequestShapifier() = default;
- BSONObj makeQueryStatsKey(const SerializationOptions& opts,
- OperationContext* opCtx) const final;
+ BSONObj makeTelemetryKey(const SerializationOptions& opts, OperationContext* opCtx) const final;
- BSONObj makeQueryStatsKey(const SerializationOptions& opts,
- const boost::intrusive_ptr<ExpressionContext>& expCtx) const final;
+ BSONObj makeTelemetryKey(const SerializationOptions& opts,
+ const boost::intrusive_ptr<ExpressionContext>& expCtx) const final;
private:
const AggregateCommandRequest& _request;
const Pipeline& _pipeline;
};
-} // namespace mongo::query_stats
+} // namespace mongo::telemetry
diff --git a/src/mongo/db/pipeline/document_source_query_stats.cpp b/src/mongo/db/pipeline/document_source_telemetry.cpp
index 48f14e0ade6..b037515796f 100644
--- a/src/mongo/db/pipeline/document_source_query_stats.cpp
+++ b/src/mongo/db/pipeline/document_source_telemetry.cpp
@@ -27,7 +27,7 @@
* it in the license file.
*/
-#include "mongo/db/pipeline/document_source_query_stats.h"
+#include "mongo/db/pipeline/document_source_telemetry.h"
#include "mongo/bson/bsontypes.h"
#include "mongo/bson/timestamp.h"
@@ -38,14 +38,14 @@
namespace mongo {
namespace {
-CounterMetric queryStatsHmacApplicationErrors("queryStats.numHmacApplicationErrors");
+CounterMetric telemetryHmacApplicationErrors("telemetry.numHmacApplicationErrors");
}
-REGISTER_DOCUMENT_SOURCE_WITH_FEATURE_FLAG(queryStats,
- DocumentSourceQueryStats::LiteParsed::parse,
- DocumentSourceQueryStats::createFromBson,
+REGISTER_DOCUMENT_SOURCE_WITH_FEATURE_FLAG(telemetry,
+ DocumentSourceTelemetry::LiteParsed::parse,
+ DocumentSourceTelemetry::createFromBson,
AllowedWithApiStrict::kNeverInVersion1,
- feature_flags::gFeatureFlagQueryStats);
+ feature_flags::gFeatureFlagTelemetry);
namespace {
/**
@@ -55,7 +55,7 @@ boost::optional<bool> parseApplyHmacToIdentifiers(const BSONElement& el) {
if (el.fieldNameStringData() == "applyHmacToIdentifiers"_sd) {
auto type = el.type();
uassert(ErrorCodes::FailedToParse,
- str::stream() << DocumentSourceQueryStats::kStageName
+ str::stream() << DocumentSourceTelemetry::kStageName
<< " applyHmacToIdentifiers parameter must be boolean. Found type: "
<< typeName(type),
type == BSONType::Bool);
@@ -74,14 +74,14 @@ boost::optional<std::string> parseHmacKey(const BSONElement& el) {
int len;
auto data = el.binData(len);
uassert(ErrorCodes::FailedToParse,
- str::stream() << DocumentSourceQueryStats::kStageName
+ str::stream() << DocumentSourceTelemetry::kStageName
<< "hmacKey must be greater than or equal to 32 bytes",
len >= 32);
return {{data, (size_t)len}};
}
uasserted(ErrorCodes::FailedToParse,
str::stream()
- << DocumentSourceQueryStats::kStageName
+ << DocumentSourceTelemetry::kStageName
<< " hmacKey parameter must be bindata of length 32 or greater. Found type: "
<< typeName(type));
}
@@ -95,7 +95,7 @@ boost::optional<std::string> parseHmacKey(const BSONElement& el) {
template <typename Ctor>
auto parseSpec(const BSONElement& spec, const Ctor& ctor) {
uassert(ErrorCodes::FailedToParse,
- str::stream() << DocumentSourceQueryStats::kStageName
+ str::stream() << DocumentSourceTelemetry::kStageName
<< " value must be an object. Found: " << typeName(spec.type()),
spec.type() == BSONType::Object);
@@ -110,7 +110,7 @@ auto parseSpec(const BSONElement& spec, const Ctor& ctor) {
} else {
uasserted(ErrorCodes::FailedToParse,
str::stream()
- << DocumentSourceQueryStats::kStageName
+ << DocumentSourceTelemetry::kStageName
<< " parameters object may only contain 'applyHmacToIdentifiers' or "
"'hmacKey' options. Found: "
<< el.fieldName());
@@ -122,34 +122,34 @@ auto parseSpec(const BSONElement& spec, const Ctor& ctor) {
} // namespace
-std::unique_ptr<DocumentSourceQueryStats::LiteParsed> DocumentSourceQueryStats::LiteParsed::parse(
+std::unique_ptr<DocumentSourceTelemetry::LiteParsed> DocumentSourceTelemetry::LiteParsed::parse(
const NamespaceString& nss, const BSONElement& spec) {
return parseSpec(spec, [&](bool applyHmacToIdentifiers, std::string hmacKey) {
- return std::make_unique<DocumentSourceQueryStats::LiteParsed>(
+ return std::make_unique<DocumentSourceTelemetry::LiteParsed>(
spec.fieldName(), applyHmacToIdentifiers, hmacKey);
});
}
-boost::intrusive_ptr<DocumentSource> DocumentSourceQueryStats::createFromBson(
+boost::intrusive_ptr<DocumentSource> DocumentSourceTelemetry::createFromBson(
BSONElement spec, const boost::intrusive_ptr<ExpressionContext>& pExpCtx) {
const NamespaceString& nss = pExpCtx->ns;
uassert(ErrorCodes::InvalidNamespace,
- "$queryStats must be run against the 'admin' database with {aggregate: 1}",
+ "$telemetry must be run against the 'admin' database with {aggregate: 1}",
nss.db() == DatabaseName::kAdmin.db() && nss.isCollectionlessAggregateNS());
return parseSpec(spec, [&](bool applyHmacToIdentifiers, std::string hmacKey) {
- return new DocumentSourceQueryStats(pExpCtx, applyHmacToIdentifiers, hmacKey);
+ return new DocumentSourceTelemetry(pExpCtx, applyHmacToIdentifiers, hmacKey);
});
}
-Value DocumentSourceQueryStats::serialize(SerializationOptions opts) const {
+Value DocumentSourceTelemetry::serialize(SerializationOptions opts) const {
// This document source never contains any user information, so no need for any work when
// applying hmac.
return Value{Document{{kStageName, Document{}}}};
}
-DocumentSource::GetNextResult DocumentSourceQueryStats::doGetNext() {
+DocumentSource::GetNextResult DocumentSourceTelemetry::doGetNext() {
/**
* We maintain nested iterators:
* - Outer one over the set of partitions.
@@ -158,7 +158,7 @@ DocumentSource::GetNextResult DocumentSourceQueryStats::doGetNext() {
* When an inner iterator is present and contains more elements, we can return the next element.
* When the inner iterator is exhausted, we move to the next element in the outer iterator and
* create a new inner iterator. When the outer iterator is exhausted, we have finished iterating
- * over the queryStats store entries.
+ * over the telemetry store entries.
*
* The inner iterator iterates over a materialized container of all entries in the partition.
* This is done to reduce the time under which the partition lock is held.
@@ -172,17 +172,17 @@ DocumentSource::GetNextResult DocumentSourceQueryStats::doGetNext() {
return {std::move(doc)};
}
- QueryStatsStore& _queryStatsStore = getQueryStatsStore(getContext()->opCtx);
+ TelemetryStore& _telemetryStore = getTelemetryStore(getContext()->opCtx);
// Materialized partition is exhausted, move to the next.
_currentPartition++;
- if (_currentPartition >= _queryStatsStore.numPartitions()) {
+ if (_currentPartition >= _telemetryStore.numPartitions()) {
return DocumentSource::GetNextResult::makeEOF();
}
// We only keep the partition (which holds a lock) for the time needed to materialize it to
// a set of Document instances.
- auto&& partition = _queryStatsStore.getPartition(_currentPartition);
+ auto&& partition = _telemetryStore.getPartition(_currentPartition);
// Capture the time at which reading the partition begins to indicate to the caller
// when the snapshot began.
@@ -190,22 +190,22 @@ DocumentSource::GetNextResult DocumentSourceQueryStats::doGetNext() {
Timestamp{Timestamp(Date_t::now().toMillisSinceEpoch() / 1000, 0)};
for (auto&& [key, metrics] : *partition) {
try {
- auto queryStatsKey = metrics->computeQueryStatsKey(
- pExpCtx->opCtx, _applyHmacToIdentifiers, _hmacKey);
- _materializedPartition.push_back({{"key", std::move(queryStatsKey)},
+ auto telemetryKey =
+ metrics->computeTelemetryKey(pExpCtx->opCtx, _applyHmacToIdentifiers, _hmacKey);
+ _materializedPartition.push_back({{"key", std::move(telemetryKey)},
{"metrics", metrics->toBSON()},
{"asOf", partitionReadTime}});
} catch (const DBException& ex) {
- queryStatsHmacApplicationErrors.increment();
+ telemetryHmacApplicationErrors.increment();
LOGV2_DEBUG(7349403,
3,
"Error encountered when applying hmac to query shape, will not publish "
- "queryStats for this entry.",
+ "telemetry for this entry.",
"status"_attr = ex.toStatus(),
"hash"_attr = key);
if (kDebugBuild) {
tasserted(7349401,
- "Was not able to re-parse queryStats key when reading queryStats.");
+ "Was not able to re-parse telemetry key when reading telemetry.");
}
}
}
diff --git a/src/mongo/db/pipeline/document_source_query_stats.h b/src/mongo/db/pipeline/document_source_telemetry.h
index 74d40583a6a..c71bff210ac 100644
--- a/src/mongo/db/pipeline/document_source_query_stats.h
+++ b/src/mongo/db/pipeline/document_source_telemetry.h
@@ -31,16 +31,16 @@
#include "mongo/db/pipeline/document_source.h"
#include "mongo/db/pipeline/lite_parsed_document_source.h"
-#include "mongo/db/query/query_stats.h"
+#include "mongo/db/query/telemetry.h"
#include "mongo/util/producer_consumer_queue.h"
namespace mongo {
-using namespace query_stats;
+using namespace telemetry;
-class DocumentSourceQueryStats final : public DocumentSource {
+class DocumentSourceTelemetry final : public DocumentSource {
public:
- static constexpr StringData kStageName = "$queryStats"_sd;
+ static constexpr StringData kStageName = "$telemetry"_sd;
class LiteParsed final : public LiteParsedDocumentSource {
public:
@@ -58,12 +58,12 @@ public:
PrivilegeVector requiredPrivileges(bool isMongos,
bool bypassDocumentValidation) const override {
- return {Privilege(ResourcePattern::forClusterResource(), ActionType::queryStatsRead)};
+ return {Privilege(ResourcePattern::forClusterResource(), ActionType::telemetryRead)};
;
}
bool allowedToPassthroughFromMongos() const final {
- // $queryStats must be run locally on a mongod.
+ // $telemetry must be run locally on a mongod.
return false;
}
@@ -83,7 +83,7 @@ public:
static boost::intrusive_ptr<DocumentSource> createFromBson(
BSONElement elem, const boost::intrusive_ptr<ExpressionContext>& pExpCtx);
- virtual ~DocumentSourceQueryStats() = default;
+ virtual ~DocumentSourceTelemetry() = default;
StageConstraints constraints(
Pipeline::SplitState = Pipeline::SplitState::kUnsplit) const override {
@@ -114,9 +114,9 @@ public:
void addVariableRefs(std::set<Variables::Id>* refs) const final {}
private:
- DocumentSourceQueryStats(const boost::intrusive_ptr<ExpressionContext>& expCtx,
- bool applyHmacToIdentifiers = false,
- std::string hmacKey = {})
+ DocumentSourceTelemetry(const boost::intrusive_ptr<ExpressionContext>& expCtx,
+ bool applyHmacToIdentifiers = false,
+ std::string hmacKey = {})
: DocumentSource(kStageName, expCtx),
_applyHmacToIdentifiers(applyHmacToIdentifiers),
_hmacKey(hmacKey) {}
@@ -130,10 +130,10 @@ private:
std::deque<Document> _materializedPartition;
/**
- * Iterator over all queryStats partitions. This is incremented when we exhaust the current
+ * Iterator over all telemetry partitions. This is incremented when we exhaust the current
* _materializedPartition.
*/
- QueryStatsStore::PartitionId _currentPartition = -1;
+ TelemetryStore::PartitionId _currentPartition = -1;
// When true, apply hmac to field names from returned query shapes.
bool _applyHmacToIdentifiers;
diff --git a/src/mongo/db/pipeline/document_source_query_stats_test.cpp b/src/mongo/db/pipeline/document_source_telemetry_test.cpp
index 7e29a44d591..d08ce06b98c 100644
--- a/src/mongo/db/pipeline/document_source_query_stats_test.cpp
+++ b/src/mongo/db/pipeline/document_source_telemetry_test.cpp
@@ -32,7 +32,7 @@
#include "mongo/db/exec/document_value/document.h"
#include "mongo/db/exec/document_value/document_value_test_util.h"
#include "mongo/db/pipeline/aggregation_context_fixture.h"
-#include "mongo/db/pipeline/document_source_query_stats.h"
+#include "mongo/db/pipeline/document_source_telemetry.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/str.h"
@@ -45,50 +45,50 @@ namespace {
* {aggregate: 1} by default, so that parsing tests other than those which validate the namespace do
* not need to explicitly set it.
*/
-class DocumentSourceQueryStatsTest : public AggregationContextFixture {
+class DocumentSourceTelemetryTest : public AggregationContextFixture {
public:
- DocumentSourceQueryStatsTest()
+ DocumentSourceTelemetryTest()
: AggregationContextFixture(
NamespaceString::makeCollectionlessAggregateNSS(DatabaseName::kAdmin)) {}
};
-TEST_F(DocumentSourceQueryStatsTest, ShouldFailToParseIfSpecIsNotObject) {
- ASSERT_THROWS_CODE(DocumentSourceQueryStats::createFromBson(
- fromjson("{$queryStats: 1}").firstElement(), getExpCtx()),
+TEST_F(DocumentSourceTelemetryTest, ShouldFailToParseIfSpecIsNotObject) {
+ ASSERT_THROWS_CODE(DocumentSourceTelemetry::createFromBson(
+ fromjson("{$telemetry: 1}").firstElement(), getExpCtx()),
AssertionException,
ErrorCodes::FailedToParse);
}
-TEST_F(DocumentSourceQueryStatsTest, ShouldFailToParseIfNotRunOnAdmin) {
+TEST_F(DocumentSourceTelemetryTest, ShouldFailToParseIfNotRunOnAdmin) {
getExpCtx()->ns = NamespaceString::makeCollectionlessAggregateNSS(
DatabaseName::createDatabaseName_forTest(boost::none, "foo"));
- ASSERT_THROWS_CODE(DocumentSourceQueryStats::createFromBson(
- fromjson("{$queryStats: {}}").firstElement(), getExpCtx()),
+ ASSERT_THROWS_CODE(DocumentSourceTelemetry::createFromBson(
+ fromjson("{$telemetry: {}}").firstElement(), getExpCtx()),
AssertionException,
ErrorCodes::InvalidNamespace);
}
-TEST_F(DocumentSourceQueryStatsTest, ShouldFailToParseIfNotRunWithAggregateOne) {
+TEST_F(DocumentSourceTelemetryTest, ShouldFailToParseIfNotRunWithAggregateOne) {
getExpCtx()->ns = NamespaceString::createNamespaceString_forTest("admin.foo");
- ASSERT_THROWS_CODE(DocumentSourceQueryStats::createFromBson(
- fromjson("{$queryStats: {}}").firstElement(), getExpCtx()),
+ ASSERT_THROWS_CODE(DocumentSourceTelemetry::createFromBson(
+ fromjson("{$telemetry: {}}").firstElement(), getExpCtx()),
AssertionException,
ErrorCodes::InvalidNamespace);
}
-TEST_F(DocumentSourceQueryStatsTest, ShouldFailToParseIfUnrecognisedParameterSpecified) {
- ASSERT_THROWS_CODE(DocumentSourceQueryStats::createFromBson(
- fromjson("{$queryStats: {foo: true}}").firstElement(), getExpCtx()),
+TEST_F(DocumentSourceTelemetryTest, ShouldFailToParseIfUnrecognisedParameterSpecified) {
+ ASSERT_THROWS_CODE(DocumentSourceTelemetry::createFromBson(
+ fromjson("{$telemetry: {foo: true}}").firstElement(), getExpCtx()),
AssertionException,
ErrorCodes::FailedToParse);
}
-TEST_F(DocumentSourceQueryStatsTest, ParseAndSerialize) {
- auto obj = fromjson("{$queryStats: {}}");
- auto doc = DocumentSourceQueryStats::createFromBson(obj.firstElement(), getExpCtx());
- auto queryStatsOp = static_cast<DocumentSourceQueryStats*>(doc.get());
- auto expected = Document{{"$queryStats", Document{}}};
- ASSERT_DOCUMENT_EQ(queryStatsOp->serialize().getDocument(), expected);
+TEST_F(DocumentSourceTelemetryTest, ParseAndSerialize) {
+ auto obj = fromjson("{$telemetry: {}}");
+ auto doc = DocumentSourceTelemetry::createFromBson(obj.firstElement(), getExpCtx());
+ auto telemetryOp = static_cast<DocumentSourceTelemetry*>(doc.get());
+ auto expected = Document{{"$telemetry", Document{}}};
+ ASSERT_DOCUMENT_EQ(telemetryOp->serialize().getDocument(), expected);
}
} // namespace
diff --git a/src/mongo/db/pipeline/visitors/document_source_visitor_registry_mongod.h b/src/mongo/db/pipeline/visitors/document_source_visitor_registry_mongod.h
index 32ec042f6dc..24d11c814be 100644
--- a/src/mongo/db/pipeline/visitors/document_source_visitor_registry_mongod.h
+++ b/src/mongo/db/pipeline/visitors/document_source_visitor_registry_mongod.h
@@ -70,7 +70,6 @@
#include "mongo/db/pipeline/document_source_operation_metrics.h"
#include "mongo/db/pipeline/document_source_out.h"
#include "mongo/db/pipeline/document_source_plan_cache_stats.h"
-#include "mongo/db/pipeline/document_source_query_stats.h"
#include "mongo/db/pipeline/document_source_queue.h"
#include "mongo/db/pipeline/document_source_redact.h"
#include "mongo/db/pipeline/document_source_replace_root.h"
@@ -84,6 +83,7 @@
#include "mongo/db/pipeline/document_source_sort.h"
#include "mongo/db/pipeline/document_source_streaming_group.h"
#include "mongo/db/pipeline/document_source_tee_consumer.h"
+#include "mongo/db/pipeline/document_source_telemetry.h"
#include "mongo/db/pipeline/document_source_union_with.h"
#include "mongo/db/pipeline/document_source_unwind.h"
#include "mongo/db/pipeline/visitors/document_source_visitor_registry.h"
@@ -169,7 +169,7 @@ void registerMongodVisitor(ServiceContext* service) {
DocumentSourceSort,
DocumentSourceStreamingGroup,
DocumentSourceTeeConsumer,
- DocumentSourceQueryStats,
+ DocumentSourceTelemetry,
DocumentSourceUnionWith,
DocumentSourceUnwind>(&registry);
}
diff --git a/src/mongo/db/query/SConscript b/src/mongo/db/query/SConscript
index 7f9e1c69a00..22e24674e1d 100644
--- a/src/mongo/db/query/SConscript
+++ b/src/mongo/db/query/SConscript
@@ -262,7 +262,7 @@ env.Library(
'query_feature_flags.idl',
'query_knobs.idl',
'sbe_plan_cache_on_parameter_change.cpp',
- 'query_stats_util.cpp',
+ 'telemetry_util.cpp',
],
LIBDEPS_PRIVATE=[
'$BUILD_DIR/mongo/db/commands/test_commands_enabled',
@@ -366,7 +366,7 @@ env.Library(
target='op_metrics',
source=[
'query_shape.cpp',
- 'query_stats.cpp',
+ 'telemetry.cpp',
],
LIBDEPS=[
'$BUILD_DIR/mongo/base',
@@ -448,7 +448,6 @@ env.CppUnitTest(
"query_settings_test.cpp",
"query_shape_test.cpp",
"query_shape_test.idl",
- "query_stats_store_test.cpp",
"query_solution_test.cpp",
"rate_limiting_test.cpp",
"sbe_and_hash_test.cpp",
@@ -462,6 +461,7 @@ env.CppUnitTest(
"sbe_stage_builder_type_checker_test.cpp",
"shard_filterer_factory_mock.cpp",
"sort_pattern_test.cpp",
+ "telemetry_store_test.cpp",
"util/memory_util_test.cpp",
"view_response_formatter_test.cpp",
'map_reduce_output_format_test.cpp',
diff --git a/src/mongo/db/query/cqf_command_utils.cpp b/src/mongo/db/query/cqf_command_utils.cpp
index 5db15a5ceee..a6279c43400 100644
--- a/src/mongo/db/query/cqf_command_utils.cpp
+++ b/src/mongo/db/query/cqf_command_utils.cpp
@@ -111,7 +111,6 @@
#include "mongo/db/pipeline/document_source_operation_metrics.h"
#include "mongo/db/pipeline/document_source_out.h"
#include "mongo/db/pipeline/document_source_plan_cache_stats.h"
-#include "mongo/db/pipeline/document_source_query_stats.h"
#include "mongo/db/pipeline/document_source_queue.h"
#include "mongo/db/pipeline/document_source_redact.h"
#include "mongo/db/pipeline/document_source_replace_root.h"
@@ -125,6 +124,7 @@
#include "mongo/db/pipeline/document_source_sort.h"
#include "mongo/db/pipeline/document_source_streaming_group.h"
#include "mongo/db/pipeline/document_source_tee_consumer.h"
+#include "mongo/db/pipeline/document_source_telemetry.h"
#include "mongo/db/pipeline/document_source_union_with.h"
#include "mongo/db/pipeline/document_source_unwind.h"
#include "mongo/db/pipeline/visitors/document_source_visitor_registry_mongod.h"
diff --git a/src/mongo/db/query/find.cpp b/src/mongo/db/query/find.cpp
index dcd402e9c70..c54138afd4b 100644
--- a/src/mongo/db/query/find.cpp
+++ b/src/mongo/db/query/find.cpp
@@ -112,7 +112,7 @@ void endQueryOp(OperationContext* opCtx,
auto curOp = CurOp::get(opCtx);
// Fill out basic CurOp query exec properties. More metrics (nreturned and executionTime)
- // are collected within collectQueryStatsMongod.
+ // are collected within collectTelemetryMongod.
curOp->debug().cursorid = (cursor.has_value() ? cursor->getCursor()->cursorid() : -1);
curOp->debug().cursorExhausted = !cursor.has_value();
curOp->debug().additiveMetrics.nBatches = 1;
@@ -125,9 +125,9 @@ void endQueryOp(OperationContext* opCtx,
curOp->setEndOfOpMetrics(numResults);
if (cursor) {
- collectQueryStatsMongod(opCtx, *cursor);
+ collectTelemetryMongod(opCtx, *cursor);
} else {
- collectQueryStatsMongod(opCtx, std::move(curOp->debug().queryStatsRequestShapifier));
+ collectTelemetryMongod(opCtx, std::move(curOp->debug().telemetryRequestShapifier));
}
if (collection) {
diff --git a/src/mongo/db/query/find_request_shapifier.cpp b/src/mongo/db/query/find_request_shapifier.cpp
index 83560f3acdb..8002a152a13 100644
--- a/src/mongo/db/query/find_request_shapifier.cpp
+++ b/src/mongo/db/query/find_request_shapifier.cpp
@@ -34,7 +34,7 @@
#include "mongo/db/query/query_request_helper.h"
#include "mongo/db/query/query_shape.h"
-namespace mongo::query_stats {
+namespace mongo::telemetry {
void addNonShapeObjCmdLiterals(BSONObjBuilder* bob,
const FindCommandRequest& findCommand,
@@ -58,8 +58,8 @@ void addNonShapeObjCmdLiterals(BSONObjBuilder* bob,
}
-BSONObj FindRequestShapifier::makeQueryStatsKey(const SerializationOptions& opts,
- OperationContext* opCtx) const {
+BSONObj FindRequestShapifier::makeTelemetryKey(const SerializationOptions& opts,
+ OperationContext* opCtx) const {
auto expCtx = make_intrusive<ExpressionContext>(
opCtx, _request, nullptr /* collator doesn't matter here.*/, false /* mayDbProfile */);
expCtx->maxFeatureCompatibilityVersion = boost::none; // Ensure all features are allowed.
@@ -67,10 +67,10 @@ BSONObj FindRequestShapifier::makeQueryStatsKey(const SerializationOptions& opts
// expressions/stages, so it's a side effect tied to parsing. We must stop expression counters
// before re-parsing to avoid adding to the counters more than once per a given query.
expCtx->stopExpressionCounters();
- return makeQueryStatsKey(opts, expCtx);
+ return makeTelemetryKey(opts, expCtx);
}
-BSONObj FindRequestShapifier::makeQueryStatsKey(
+BSONObj FindRequestShapifier::makeTelemetryKey(
const SerializationOptions& opts, const boost::intrusive_ptr<ExpressionContext>& expCtx) const {
BSONObjBuilder bob;
@@ -102,4 +102,4 @@ BSONObj FindRequestShapifier::makeQueryStatsKey(
return bob.obj();
}
-} // namespace mongo::query_stats
+} // namespace mongo::telemetry
diff --git a/src/mongo/db/query/find_request_shapifier.h b/src/mongo/db/query/find_request_shapifier.h
index 79f8223052a..b03f84eb1ab 100644
--- a/src/mongo/db/query/find_request_shapifier.h
+++ b/src/mongo/db/query/find_request_shapifier.h
@@ -32,7 +32,7 @@
#include "mongo/db/query/find_command_gen.h"
#include "mongo/db/query/request_shapifier.h"
-namespace mongo::query_stats {
+namespace mongo::telemetry {
/**
* Handles shapification for FindCommandRequests.
@@ -49,13 +49,12 @@ public:
virtual ~FindRequestShapifier() = default;
- BSONObj makeQueryStatsKey(const SerializationOptions& opts,
- OperationContext* opCtx) const final;
+ BSONObj makeTelemetryKey(const SerializationOptions& opts, OperationContext* opCtx) const final;
- BSONObj makeQueryStatsKey(const SerializationOptions& opts,
- const boost::intrusive_ptr<ExpressionContext>& expCtx) const final;
+ BSONObj makeTelemetryKey(const SerializationOptions& opts,
+ const boost::intrusive_ptr<ExpressionContext>& expCtx) const final;
private:
FindCommandRequest _request;
};
-} // namespace mongo::query_stats
+} // namespace mongo::telemetry
diff --git a/src/mongo/db/query/query_feature_flags.idl b/src/mongo/db/query/query_feature_flags.idl
index e18477beb9c..cbd970ca47d 100644
--- a/src/mongo/db/query/query_feature_flags.idl
+++ b/src/mongo/db/query/query_feature_flags.idl
@@ -90,9 +90,9 @@ feature_flags:
default: false
shouldBeFCVGated: true
- featureFlagQueryStats:
- description: "Feature flag for enabling the queryStats store."
- cpp_varname: gFeatureFlagQueryStats
+ featureFlagTelemetry:
+ description: "Feature flag for enabling the telemetry store."
+ cpp_varname: gFeatureFlagTelemetry
default: false
shouldBeFCVGated: true
diff --git a/src/mongo/db/query/query_knobs.idl b/src/mongo/db/query/query_knobs.idl
index 4fc1e362524..d631ab42d3d 100644
--- a/src/mongo/db/query/query_knobs.idl
+++ b/src/mongo/db/query/query_knobs.idl
@@ -36,7 +36,7 @@ global:
- "mongo/db/query/ce_mode_parameter.h"
- "mongo/db/query/explain_version_validator.h"
- "mongo/db/query/sbe_plan_cache_on_parameter_change.h"
- - "mongo/db/query/query_stats_util.h"
+ - "mongo/db/query/telemetry_util.h"
- "mongo/platform/atomic_proxy.h"
- "mongo/platform/atomic_word.h"
@@ -1018,32 +1018,32 @@ server_parameters:
default: false
test_only: true
- internalQueryStatsSamplingRate:
- description: "The maximum number of queries per second that are sampled for query stats.
+ internalQueryConfigureTelemetrySamplingRate:
+ description: "The maximum number of queries per second that are sampled for query telemetry.
If the rate of queries goes above this number, then rate limiting will kick in, and any
further queries will not be sampled. To sample all queries, this can be set to -1. This can be
- set to 0 to turn queryStats off completely."
+ set to 0 to turn telemetry off completely."
set_at: [ startup, runtime ]
- cpp_varname: "queryQueryStatsSamplingRate"
+ cpp_varname: "queryTelemetrySamplingRate"
cpp_vartype: AtomicWord<int>
default: 0
validator:
gte: -1
- on_update: query_stats_util::onQueryStatsSamplingRateUpdate
+ on_update: telemetry_util::onTelemetrySamplingRateUpdate
- internalQueryConfigureQueryStatsCacheSize:
- description: "The maximum amount of memory that the system will allocate for the query queryStats
+ internalQueryConfigureTelemetryCacheSize:
+ description: "The maximum amount of memory that the system will allocate for the query telemetry
cache. This will accept values in either of the following formats:
1. <number>% indicates a percentage of the physical memory available to the process. E.g.: 15%.
2. <number>(MB|GB), indicates the amount of memory in MB or GB. E.g.: 1.5GB, 100MB.
The default value is 1%, which means 1% of the physical memory available to the process."
set_at: [ startup, runtime ]
- cpp_varname: "queryQueryStatsStoreSize"
+ cpp_varname: "queryTelemetryStoreSize"
cpp_vartype: synchronized_value<std::string>
default: "1%"
- on_update: query_stats_util::onQueryStatsStoreSizeUpdate
+ on_update: telemetry_util::onTelemetryStoreSizeUpdate
validator:
- callback: query_stats_util::validateQueryStatsStoreSize
+ callback: telemetry_util::validateTelemetryStoreSize
internalQueryColumnScanMinCollectionSizeBytes:
description: "The min collection size threshold for which column scan will always be allowed. If
@@ -1130,7 +1130,7 @@ server_parameters:
default: 60000
validator:
gte: 0
-
+
internalQueryAggMulticastMaxConcurrency:
description: "Max number of concurrent requests when aggregations are sent to all shard servers"
set_at: startup
@@ -1173,8 +1173,8 @@ server_parameters:
gte: 0
internalQueryAutoParameterizationMaxParameterCount:
- description: "The maximum numbers of parameters that query auto-parameterization can extract from a query.
- If auto parameterizating a query would result in a greater number of parameters than the limit,
+ description: "The maximum numbers of parameters that query auto-parameterization can extract from a query.
+ If auto parameterizating a query would result in a greater number of parameters than the limit,
then auto parameterization will not be performed.
If set to 0, then no limit will be applied."
set_at: [ startup, runtime ]
diff --git a/src/mongo/db/query/query_shape.cpp b/src/mongo/db/query/query_shape.cpp
index 3f9ed7fbfb6..519b1115558 100644
--- a/src/mongo/db/query/query_shape.cpp
+++ b/src/mongo/db/query/query_shape.cpp
@@ -227,7 +227,7 @@ BSONObj extractQueryShape(const FindCommandRequest& findCommand,
expCtx,
ExtensionsCallbackNoop(),
MatchExpressionParser::kAllowAllSpecialFeatures),
- "Failed to parse 'filter' option when making queryStats key");
+ "Failed to parse 'filter' option when making telemetry key");
bob.append(FindCommandRequest::kFilterFieldName, filterExpr->serialize(opts));
}
diff --git a/src/mongo/db/query/query_shape.h b/src/mongo/db/query/query_shape.h
index c0d4328d08b..0fa0d7c863e 100644
--- a/src/mongo/db/query/query_shape.h
+++ b/src/mongo/db/query/query_shape.h
@@ -40,7 +40,7 @@ constexpr StringData kLiteralArgString = "?"_sd;
/**
* Computes a BSONObj that is meant to be used to classify queries according to their shape, for the
- * purposes of collecting queryStats.
+ * purposes of collecting telemetry.
*
* For example, if the MatchExpression represents {a: 2}, it will return the same BSONObj as the
* MatchExpression for {a: 1}, {a: 10}, and {a: {$eq: 2}} (identical bits but not sharing memory)
diff --git a/src/mongo/db/query/request_shapifier.h b/src/mongo/db/query/request_shapifier.h
index 37004197fd0..1bae8f913f9 100644
--- a/src/mongo/db/query/request_shapifier.h
+++ b/src/mongo/db/query/request_shapifier.h
@@ -34,27 +34,27 @@
#include "mongo/db/query/serialization_options.h"
#include "mongo/rpc/metadata/client_metadata.h"
-namespace mongo::query_stats {
+namespace mongo::telemetry {
/**
- * An abstract base class to handle query shapification for queryStats. Each request type should
- * define its own shapification strategy in its implementation of makeQueryStatsKey(), and then a
- * request should be registered with queryStats via query_stats::registerRequest(RequestShapifier).
+ * An abstract base class to handle query shapification for telemetry. Each request type should
+ * define its own shapification strategy in its implementation of makeTelemetryKey(), and then a
+ * request should be registered with telemetry via telemetry::registerRequest(RequestShapifier).
*/
class RequestShapifier {
public:
virtual ~RequestShapifier() = default;
/**
- * makeQueryStatsKey generates the telemetry key representative of the specific request's
+ * makeTelemetryKey generates the telemetry key representative of the specific request's
* payload. If there exists an ExpressionContext set up to parse and evaluate the request,
- * makeQueryStatsKey should be called with that ExpressionContext. If not, you can call the
+ * makeTelemetryKey should be called with that ExpressionContext. If not, you can call the
* overload that accepts the OperationContext and will construct a minimally-acceptable
* ExpressionContext for the sake of generating the key.
*/
- virtual BSONObj makeQueryStatsKey(const SerializationOptions& opts,
- OperationContext* opCtx) const = 0;
- virtual BSONObj makeQueryStatsKey(
+ virtual BSONObj makeTelemetryKey(const SerializationOptions& opts,
+ OperationContext* opCtx) const = 0;
+ virtual BSONObj makeTelemetryKey(
const SerializationOptions& opts,
const boost::intrusive_ptr<ExpressionContext>& expCtx) const = 0;
@@ -79,4 +79,4 @@ protected:
BSONObj _commentObj;
boost::optional<BSONElement> _comment = boost::none;
};
-} // namespace mongo::query_stats
+} // namespace mongo::telemetry
diff --git a/src/mongo/db/query/query_stats.cpp b/src/mongo/db/query/telemetry.cpp
index 6b99a43fc3f..af17da7af02 100644
--- a/src/mongo/db/query/query_stats.cpp
+++ b/src/mongo/db/query/telemetry.cpp
@@ -27,7 +27,7 @@
* it in the license file.
*/
-#include "mongo/db/query/query_stats.h"
+#include "mongo/db/query/telemetry.h"
#include "mongo/crypto/hash_block.h"
#include "mongo/crypto/sha256_block.h"
@@ -45,10 +45,10 @@
#include "mongo/db/query/query_feature_flags_gen.h"
#include "mongo/db/query/query_planner_params.h"
#include "mongo/db/query/query_request_helper.h"
-#include "mongo/db/query/query_stats_util.h"
#include "mongo/db/query/rate_limiting.h"
#include "mongo/db/query/serialization_options.h"
#include "mongo/db/query/sort_pattern.h"
+#include "mongo/db/query/telemetry_util.h"
#include "mongo/logv2/log.h"
#include "mongo/rpc/metadata/client_metadata.h"
#include "mongo/util/assert_util.h"
@@ -62,7 +62,7 @@
namespace mongo {
-namespace query_stats {
+namespace telemetry {
/**
* Redacts all BSONObj field names as if they were paths, unless the field name is a special hint
@@ -78,63 +78,63 @@ boost::optional<std::string> getApplicationName(const OperationContext* opCtx) {
}
} // namespace
-CounterMetric queryStatsStoreSizeEstimateBytesMetric("queryStats.queryStatsStoreSizeEstimateBytes");
+CounterMetric telemetryStoreSizeEstimateBytesMetric("telemetry.telemetryStoreSizeEstimateBytes");
namespace {
-CounterMetric queryStatsEvictedMetric("queryStats.numEvicted");
-CounterMetric queryStatsRateLimitedRequestsMetric("queryStats.numRateLimitedRequests");
-CounterMetric queryStatsStoreWriteErrorsMetric("queryStats.numQueryStatsStoreWriteErrors");
+CounterMetric telemetryEvictedMetric("telemetry.numEvicted");
+CounterMetric telemetryRateLimitedRequestsMetric("telemetry.numRateLimitedRequests");
+CounterMetric telemetryStoreWriteErrorsMetric("telemetry.numTelemetryStoreWriteErrors");
/**
- * Cap the queryStats store size.
+ * Cap the telemetry store size.
*/
-size_t capQueryStatsStoreSize(size_t requestedSize) {
+size_t capTelemetryStoreSize(size_t requestedSize) {
size_t cappedStoreSize = memory_util::capMemorySize(
requestedSize /*requestedSizeBytes*/, 1 /*maximumSizeGB*/, 25 /*percentTotalSystemMemory*/);
- // If capped size is less than requested size, the queryStats store has been capped at its
+ // If capped size is less than requested size, the telemetry store has been capped at its
// upper limit.
if (cappedStoreSize < requestedSize) {
LOGV2_DEBUG(7106502,
1,
- "The queryStats store size has been capped",
+ "The telemetry store size has been capped",
"cappedSize"_attr = cappedStoreSize);
}
return cappedStoreSize;
}
/**
- * Get the queryStats store size based on the query job's value.
+ * Get the telemetry store size based on the query job's value.
*/
-size_t getQueryStatsStoreSize() {
- auto status = memory_util::MemorySize::parse(queryQueryStatsStoreSize.get());
+size_t getTelemetryStoreSize() {
+ auto status = memory_util::MemorySize::parse(queryTelemetryStoreSize.get());
uassertStatusOK(status);
size_t requestedSize = memory_util::convertToSizeInBytes(status.getValue());
- return capQueryStatsStoreSize(requestedSize);
+ return capTelemetryStoreSize(requestedSize);
}
/**
- * A manager for the queryStats store allows a "pointer swap" on the queryStats store itself. The
+ * A manager for the telemetry store allows a "pointer swap" on the telemetry store itself. The
* usage patterns are as follows:
*
- * - Updating the queryStats store uses the `getQueryStatsStore()` method. The queryStats store
+ * - Updating the telemetry store uses the `getTelemetryStore()` method. The telemetry store
* instance is obtained, entries are looked up and mutated, or created anew.
- * - The queryStats store is "reset". This involves atomically allocating a new instance, once
+ * - The telemetry store is "reset". This involves atomically allocating a new instance, once
* there are no more updaters (readers of the store "pointer"), and returning the existing
* instance.
*/
-class QueryStatsStoreManager {
+class TelemetryStoreManager {
public:
- template <typename... QueryStatsStoreArgs>
- QueryStatsStoreManager(size_t cacheSize, size_t numPartitions)
- : _queryStatsStore(std::make_unique<QueryStatsStore>(cacheSize, numPartitions)),
+ template <typename... TelemetryStoreArgs>
+ TelemetryStoreManager(size_t cacheSize, size_t numPartitions)
+ : _telemetryStore(std::make_unique<TelemetryStore>(cacheSize, numPartitions)),
_maxSize(cacheSize) {}
/**
- * Acquire the instance of the queryStats store.
+ * Acquire the instance of the telemetry store.
*/
- QueryStatsStore& getQueryStatsStore() {
- return *_queryStatsStore;
+ TelemetryStore& getTelemetryStore() {
+ return *_telemetryStore;
}
size_t getMaxSize() {
@@ -142,93 +142,92 @@ public:
}
/**
- * Resize the queryStats store and return the number of evicted
+ * Resize the telemetry store and return the number of evicted
* entries.
*/
size_t resetSize(size_t cacheSize) {
_maxSize = cacheSize;
- return _queryStatsStore->reset(cacheSize);
+ return _telemetryStore->reset(cacheSize);
}
private:
- std::unique_ptr<QueryStatsStore> _queryStatsStore;
+ std::unique_ptr<TelemetryStore> _telemetryStore;
/**
- * Max size of the queryStats store. Tracked here to avoid having to recompute after it's
- * divided up into partitions.
+ * Max size of the telemetry store. Tracked here to avoid having to recompute after it's divided
+ * up into partitions.
*/
size_t _maxSize;
};
-const auto queryStatsStoreDecoration =
- ServiceContext::declareDecoration<std::unique_ptr<QueryStatsStoreManager>>();
+const auto telemetryStoreDecoration =
+ ServiceContext::declareDecoration<std::unique_ptr<TelemetryStoreManager>>();
-const auto queryStatsRateLimiter =
+const auto telemetryRateLimiter =
ServiceContext::declareDecoration<std::unique_ptr<RateLimiting>>();
-class TelemetryOnParamChangeUpdaterImpl final : public query_stats_util::OnParamChangeUpdater {
+class TelemetryOnParamChangeUpdaterImpl final : public telemetry_util::OnParamChangeUpdater {
public:
void updateCacheSize(ServiceContext* serviceCtx, memory_util::MemorySize memSize) final {
auto requestedSize = memory_util::convertToSizeInBytes(memSize);
- auto cappedSize = capQueryStatsStoreSize(requestedSize);
- auto& queryStatsStoreManager = queryStatsStoreDecoration(serviceCtx);
- size_t numEvicted = queryStatsStoreManager->resetSize(cappedSize);
- queryStatsEvictedMetric.increment(numEvicted);
+ auto cappedSize = capTelemetryStoreSize(requestedSize);
+ auto& telemetryStoreManager = telemetryStoreDecoration(serviceCtx);
+ size_t numEvicted = telemetryStoreManager->resetSize(cappedSize);
+ telemetryEvictedMetric.increment(numEvicted);
}
void updateSamplingRate(ServiceContext* serviceCtx, int samplingRate) {
- queryStatsRateLimiter(serviceCtx).get()->setSamplingRate(samplingRate);
+ telemetryRateLimiter(serviceCtx).get()->setSamplingRate(samplingRate);
}
};
-ServiceContext::ConstructorActionRegisterer queryStatsStoreManagerRegisterer{
- "QueryStatsStoreManagerRegisterer", [](ServiceContext* serviceCtx) {
+ServiceContext::ConstructorActionRegisterer telemetryStoreManagerRegisterer{
+ "TelemetryStoreManagerRegisterer", [](ServiceContext* serviceCtx) {
// It is possible that this is called before FCV is properly set up. Setting up the store if
// the flag is enabled but FCV is incorrect is safe, and guards against the FCV being
// changed to a supported version later.
- if (!feature_flags::gFeatureFlagQueryStats.isEnabledAndIgnoreFCVUnsafeAtStartup()) {
+ if (!feature_flags::gFeatureFlagTelemetry.isEnabledAndIgnoreFCVUnsafeAtStartup()) {
// featureFlags are not allowed to be changed at runtime. Therefore it's not an issue
- // to not create a queryStats store in ConstructorActionRegisterer at start up with the
+ // to not create a telemetry store in ConstructorActionRegisterer at start up with the
// flag off - because the flag can not be turned on at any point afterwards.
- query_stats_util::queryStatsStoreOnParamChangeUpdater(serviceCtx) =
- std::make_unique<query_stats_util::NoChangesAllowedTelemetryParamUpdater>();
+ telemetry_util::telemetryStoreOnParamChangeUpdater(serviceCtx) =
+ std::make_unique<telemetry_util::NoChangesAllowedTelemetryParamUpdater>();
return;
}
- query_stats_util::queryStatsStoreOnParamChangeUpdater(serviceCtx) =
+ telemetry_util::telemetryStoreOnParamChangeUpdater(serviceCtx) =
std::make_unique<TelemetryOnParamChangeUpdaterImpl>();
- size_t size = getQueryStatsStoreSize();
- auto&& globalQueryStatsStoreManager = queryStatsStoreDecoration(serviceCtx);
- // The plan cache and queryStats store should use the same number of partitions.
+ size_t size = getTelemetryStoreSize();
+ auto&& globalTelemetryStoreManager = telemetryStoreDecoration(serviceCtx);
+ // The plan cache and telemetry store should use the same number of partitions.
// That is, the number of cpu cores.
size_t numPartitions = ProcessInfo::getNumCores();
size_t partitionBytes = size / numPartitions;
- size_t metricsSize = sizeof(QueryStatsEntry);
+ size_t metricsSize = sizeof(TelemetryEntry);
if (partitionBytes < metricsSize * 10) {
numPartitions = size / metricsSize;
if (numPartitions < 1) {
numPartitions = 1;
}
}
- globalQueryStatsStoreManager =
- std::make_unique<QueryStatsStoreManager>(size, numPartitions);
- auto configuredSamplingRate = queryQueryStatsSamplingRate.load();
- queryStatsRateLimiter(serviceCtx) = std::make_unique<RateLimiting>(
+ globalTelemetryStoreManager = std::make_unique<TelemetryStoreManager>(size, numPartitions);
+ auto configuredSamplingRate = queryTelemetrySamplingRate.load();
+ telemetryRateLimiter(serviceCtx) = std::make_unique<RateLimiting>(
configuredSamplingRate < 0 ? INT_MAX : configuredSamplingRate);
}};
/**
- * Top-level checks for whether queryStats collection is enabled. If this returns false, we must go
+ * Top-level checks for whether telemetry collection is enabled. If this returns false, we must go
* no further.
*/
-bool isQueryStatsEnabled(const ServiceContext* serviceCtx) {
+bool isTelemetryEnabled(const ServiceContext* serviceCtx) {
// During initialization FCV may not yet be setup but queries could be run. We can't
- // check whether queryStats should be enabled without FCV, so default to not recording
+ // check whether telemetry should be enabled without FCV, so default to not recording
// those queries.
// TODO SERVER-75935 Remove FCV Check.
- return feature_flags::gFeatureFlagQueryStats.isEnabled(
+ return feature_flags::gFeatureFlagTelemetry.isEnabled(
serverGlobalParams.featureCompatibility) &&
- queryStatsStoreDecoration(serviceCtx)->getMaxSize() > 0;
+ telemetryStoreDecoration(serviceCtx)->getMaxSize() > 0;
}
/**
@@ -236,26 +235,26 @@ bool isQueryStatsEnabled(const ServiceContext* serviceCtx) {
* configuration for a global on/off decision and, if enabled, delegates to the rate limiter.
*/
bool shouldCollect(const ServiceContext* serviceCtx) {
- // Quick escape if queryStats is turned off.
- if (!isQueryStatsEnabled(serviceCtx)) {
+ // Quick escape if telemetry is turned off.
+ if (!isTelemetryEnabled(serviceCtx)) {
return false;
}
- // Cannot collect queryStats if sampling rate is not greater than 0. Note that we do not
- // increment queryStatsRateLimitedRequestsMetric here since queryStats is entirely disabled.
- if (queryStatsRateLimiter(serviceCtx)->getSamplingRate() <= 0) {
+ // Cannot collect telemetry if sampling rate is not greater than 0. Note that we do not
+ // increment telemetryRateLimitedRequestsMetric here since telemetry is entirely disabled.
+ if (telemetryRateLimiter(serviceCtx)->getSamplingRate() <= 0) {
return false;
}
- // Check if rate limiting allows us to collect queryStats for this request.
- if (queryStatsRateLimiter(serviceCtx)->getSamplingRate() < INT_MAX &&
- !queryStatsRateLimiter(serviceCtx)->handleRequestSlidingWindow()) {
- queryStatsRateLimitedRequestsMetric.increment();
+ // Check if rate limiting allows us to collect telemetry for this request.
+ if (telemetryRateLimiter(serviceCtx)->getSamplingRate() < INT_MAX &&
+ !telemetryRateLimiter(serviceCtx)->handleRequestSlidingWindow()) {
+ telemetryRateLimitedRequestsMetric.increment();
return false;
}
return true;
}
/**
- * Add a field to the find op's queryStats key. The `value` will have hmac applied.
+ * Add a field to the find op's telemetry key. The `value` will have hmac applied.
*/
void addToFindKey(BSONObjBuilder& builder, const StringData& fieldName, const BSONObj& value) {
serializeBSONWhenNotEmpty(value.redact(false), fieldName, &builder);
@@ -289,7 +288,7 @@ void throwIfEncounteringFLEPayload(const BSONElement& e) {
/**
* Upon reading telemetry data, we apply hmac to some keys. This is the list. See
- * QueryStatsEntry::makeQueryStatsKey().
+ * TelemetryEntry::makeTelemetryKey().
*/
const stdx::unordered_set<std::string> kKeysToApplyHmac = {"pipeline", "find"};
@@ -310,7 +309,7 @@ std::string constantFieldNameHasher(const BSONElement& e) {
/**
* Admittedly an abuse of the BSON redaction interface, we recognize FLE payloads here and avoid
- * collecting queryStats for the query.
+ * collecting telemetry for the query.
*/
std::string fleSafeFieldNameRedactor(const BSONElement& e) {
throwIfEncounteringFLEPayload(e);
@@ -345,9 +344,9 @@ std::size_t hash(const BSONObj& obj) {
} // namespace
-BSONObj QueryStatsEntry::computeQueryStatsKey(OperationContext* opCtx,
- bool applyHmacToIdentifiers,
- std::string hmacKey) const {
+BSONObj TelemetryEntry::computeTelemetryKey(OperationContext* opCtx,
+ bool applyHmacToIdentifiers,
+ std::string hmacKey) const {
// The telemetry key for find queries is generated by serializing all the command fields
// and applying hmac if SerializationOptions indicate to do so. The resulting key is of the
// form:
@@ -370,7 +369,7 @@ BSONObj QueryStatsEntry::computeQueryStatsKey(OperationContext* opCtx,
[&](StringData sd) { return sha256HmacStringDataHasher(hmacKey, sd); },
LiteralSerializationPolicy::kToDebugTypeString)
: SerializationOptions(LiteralSerializationPolicy::kToDebugTypeString);
- return requestShapifier->makeQueryStatsKey(serializationOpts, opCtx);
+ return requestShapifier->makeTelemetryKey(serializationOpts, opCtx);
}
// TODO SERVER-73152 remove all special aggregation logic below
@@ -378,7 +377,7 @@ BSONObj QueryStatsEntry::computeQueryStatsKey(OperationContext* opCtx,
// { "agg": {...}, "namespace": "...", "applicationName": "...", ... }
//
// The part of the key we need to apply hmac to is the object in the <CMD_TYPE> element. In the
- // case of an aggregate() command, it will look something like: > "pipeline" : [ { "$queryStats"
+ // case of an aggregate() command, it will look something like: > "pipeline" : [ { "$telemetry"
// : {} },
// { "$addFields" : { "x" : { "$someExpr" {} } } } ],
// We should preserve the top-level stage names in the pipeline but apply hmac to all field
@@ -386,10 +385,10 @@ BSONObj QueryStatsEntry::computeQueryStatsKey(OperationContext* opCtx,
// TODO: SERVER-73152 literal and field name redaction for aggregate command.
if (!applyHmacToIdentifiers) {
- return oldQueryStatsKey;
+ return oldTelemetryKey;
}
BSONObjBuilder hmacAppliedBuilder;
- for (BSONElement e : oldQueryStatsKey) {
+ for (BSONElement e : oldTelemetryKey) {
if ((e.type() == Object || e.type() == Array) &&
kKeysToApplyHmac.count(e.fieldNameStringData().toString()) == 1) {
auto hmacApplicator = [&](BSONObjBuilder subObj, const BSONObj& obj) {
@@ -424,24 +423,25 @@ BSONObj QueryStatsEntry::computeQueryStatsKey(OperationContext* opCtx,
}
// The originating command/query does not persist through the end of query execution. In order to
-// pair the queryStats metrics that are collected at the end of execution with the original query,
-// it is necessary to register the original query during planning and persist it after execution.
+// pair the telemetry metrics that are collected at the end of execution with the original query, it
+// is necessary to register the original query during planning and persist it after
+// execution.
// During planning, registerRequest is called to serialize the query shape and context (together,
-// the queryStats context) and save it to OpDebug. Moreover, as query execution may span more than
+// the telemetry context) and save it to OpDebug. Moreover, as query execution may span more than
// one request/operation and OpDebug does not persist through cursor iteration, it is necessary to
-// communicate the queryStats context across operations. In this way, the queryStats context is
-// registered to the cursor, so upon getMore() calls, the cursor manager passes the queryStats key
+// communicate the telemetry context across operations. In this way, the telemetry context is
+// registered to the cursor, so upon getMore() calls, the cursor manager passes the telemetry key
// from the pinned cursor to the new OpDebug.
-// Once query execution is complete, the queryStats context is grabbed from OpDebug, a queryStats
-// key is generated from this and metrics are paired to this key in the queryStats store.
+// Once query execution is complete, the telemetry context is grabbed from OpDebug, a telemetry key
+// is generated from this and metrics are paired to this key in the telemetry store.
void registerAggRequest(const AggregateCommandRequest& request, OperationContext* opCtx) {
- if (!isQueryStatsEnabled(opCtx->getServiceContext())) {
+ if (!isTelemetryEnabled(opCtx->getServiceContext())) {
return;
}
- // Queries against metadata collections should never appear in queryStats data.
+ // Queries against metadata collections should never appear in telemetry data.
if (request.getNamespace().isFLE2StateCollection()) {
return;
}
@@ -450,8 +450,8 @@ void registerAggRequest(const AggregateCommandRequest& request, OperationContext
return;
}
- BSONObjBuilder queryStatsKey;
- BSONObjBuilder pipelineBuilder = queryStatsKey.subarrayStart("pipeline"_sd);
+ BSONObjBuilder telemetryKey;
+ BSONObjBuilder pipelineBuilder = telemetryKey.subarrayStart("pipeline"_sd);
try {
for (auto&& stage : request.getPipeline()) {
BSONObjBuilder stageBuilder = pipelineBuilder.subobjStart("stage"_sd);
@@ -459,31 +459,31 @@ void registerAggRequest(const AggregateCommandRequest& request, OperationContext
stageBuilder.done();
}
pipelineBuilder.done();
- queryStatsKey.append("namespace", request.getNamespace().toString());
+ telemetryKey.append("namespace", request.getNamespace().toString());
if (request.getReadConcern()) {
- queryStatsKey.append("readConcern", *request.getReadConcern());
+ telemetryKey.append("readConcern", *request.getReadConcern());
}
if (auto metadata = ClientMetadata::get(opCtx->getClient())) {
- queryStatsKey.append("applicationName", metadata->getApplicationName());
+ telemetryKey.append("applicationName", metadata->getApplicationName());
}
} catch (ExceptionFor<ErrorCodes::EncounteredFLEPayloadWhileApplyingHmac>&) {
return;
}
- BSONObj key = queryStatsKey.obj();
- CurOp::get(opCtx)->debug().queryStatsStoreKeyHash = hash(key);
- CurOp::get(opCtx)->debug().queryStatsStoreKey = key.getOwned();
+ BSONObj key = telemetryKey.obj();
+ CurOp::get(opCtx)->debug().telemetryStoreKeyHash = hash(key);
+ CurOp::get(opCtx)->debug().telemetryStoreKey = key.getOwned();
}
void registerRequest(std::unique_ptr<RequestShapifier> requestShapifier,
const NamespaceString& collection,
OperationContext* opCtx,
const boost::intrusive_ptr<ExpressionContext>& expCtx) {
- if (!isQueryStatsEnabled(opCtx->getServiceContext())) {
+ if (!isTelemetryEnabled(opCtx->getServiceContext())) {
return;
}
- // Queries against metadata collections should never appear in queryStats data.
+ // Queries against metadata collections should never appear in telemetry data.
if (collection.isFLE2StateCollection()) {
return;
}
@@ -494,53 +494,53 @@ void registerRequest(std::unique_ptr<RequestShapifier> requestShapifier,
SerializationOptions options;
options.literalPolicy = LiteralSerializationPolicy::kToDebugTypeString;
options.replacementForLiteralArgs = replacementForLiteralArgs;
- CurOp::get(opCtx)->debug().queryStatsStoreKeyHash =
- hash(requestShapifier->makeQueryStatsKey(options, expCtx));
- CurOp::get(opCtx)->debug().queryStatsRequestShapifier = std::move(requestShapifier);
+ CurOp::get(opCtx)->debug().telemetryStoreKeyHash =
+ hash(requestShapifier->makeTelemetryKey(options, expCtx));
+ CurOp::get(opCtx)->debug().telemetryRequestShapifier = std::move(requestShapifier);
}
-QueryStatsStore& getQueryStatsStore(OperationContext* opCtx) {
+TelemetryStore& getTelemetryStore(OperationContext* opCtx) {
uassert(6579000,
"Telemetry is not enabled without the feature flag on and a cache size greater than 0 "
"bytes",
- isQueryStatsEnabled(opCtx->getServiceContext()));
- return queryStatsStoreDecoration(opCtx->getServiceContext())->getQueryStatsStore();
+ isTelemetryEnabled(opCtx->getServiceContext()));
+ return telemetryStoreDecoration(opCtx->getServiceContext())->getTelemetryStore();
}
-void writeQueryStats(OperationContext* opCtx,
- boost::optional<size_t> queryStatsKeyHash,
- boost::optional<BSONObj> queryStatsKey,
- std::unique_ptr<RequestShapifier> requestShapifier,
- const uint64_t queryExecMicros,
- const uint64_t docsReturned) {
- if (!queryStatsKeyHash) {
+void writeTelemetry(OperationContext* opCtx,
+ boost::optional<size_t> telemetryKeyHash,
+ boost::optional<BSONObj> telemetryKey,
+ std::unique_ptr<RequestShapifier> requestShapifier,
+ const uint64_t queryExecMicros,
+ const uint64_t docsReturned) {
+ if (!telemetryKeyHash) {
return;
}
- auto&& queryStatsStore = getQueryStatsStore(opCtx);
+ auto&& telemetryStore = getTelemetryStore(opCtx);
auto&& [statusWithMetrics, partitionLock] =
- queryStatsStore.getWithPartitionLock(*queryStatsKeyHash);
- std::shared_ptr<QueryStatsEntry> metrics;
+ telemetryStore.getWithPartitionLock(*telemetryKeyHash);
+ std::shared_ptr<TelemetryEntry> metrics;
if (statusWithMetrics.isOK()) {
metrics = *statusWithMetrics.getValue();
} else {
- BSONObj key = queryStatsKey.value_or(BSONObj{});
+ BSONObj key = telemetryKey.value_or(BSONObj{});
size_t numEvicted =
- queryStatsStore.put(*queryStatsKeyHash,
- std::make_shared<QueryStatsEntry>(
- std::move(requestShapifier), CurOp::get(opCtx)->getNSS(), key),
- partitionLock);
- queryStatsEvictedMetric.increment(numEvicted);
- auto newMetrics = partitionLock->get(*queryStatsKeyHash);
+ telemetryStore.put(*telemetryKeyHash,
+ std::make_shared<TelemetryEntry>(
+ std::move(requestShapifier), CurOp::get(opCtx)->getNSS(), key),
+ partitionLock);
+ telemetryEvictedMetric.increment(numEvicted);
+ auto newMetrics = partitionLock->get(*telemetryKeyHash);
if (!newMetrics.isOK()) {
// This can happen if the budget is immediately exceeded. Specifically if the there is
// not enough room for a single new entry if the number of partitions is too high
// relative to the size.
- queryStatsStoreWriteErrorsMetric.increment();
+ telemetryStoreWriteErrorsMetric.increment();
LOGV2_DEBUG(7560900,
1,
- "Failed to store queryStats entry.",
+ "Failed to store telemetry entry.",
"status"_attr = newMetrics.getStatus(),
- "queryStatsKeyHash"_attr = queryStatsKeyHash);
+ "telemetryKeyHash"_attr = telemetryKeyHash);
return;
}
metrics = newMetrics.getValue()->second;
@@ -551,5 +551,5 @@ void writeQueryStats(OperationContext* opCtx,
metrics->queryExecMicros.aggregate(queryExecMicros);
metrics->docsReturned.aggregate(docsReturned);
}
-} // namespace query_stats
+} // namespace telemetry
} // namespace mongo
diff --git a/src/mongo/db/query/query_stats.h b/src/mongo/db/query/telemetry.h
index 59d79d6c114..e7e0f3ccfd1 100644
--- a/src/mongo/db/query/query_stats.h
+++ b/src/mongo/db/query/telemetry.h
@@ -55,7 +55,7 @@ namespace {
using BSONNumeric = long long;
} // namespace
-namespace query_stats {
+namespace telemetry {
/**
* An aggregated metric stores a compressed view of data. It balances the loss of information
@@ -95,26 +95,26 @@ struct AggregatedMetric {
uint64_t sumOfSquares = 0;
};
-extern CounterMetric queryStatsStoreSizeEstimateBytesMetric;
+extern CounterMetric telemetryStoreSizeEstimateBytesMetric;
// Used to aggregate the metrics for one telemetry key over all its executions.
-class QueryStatsEntry {
+class TelemetryEntry {
public:
- QueryStatsEntry(std::unique_ptr<RequestShapifier> requestShapifier,
- NamespaceStringOrUUID nss,
- const BSONObj& cmdObj)
+ TelemetryEntry(std::unique_ptr<RequestShapifier> requestShapifier,
+ NamespaceStringOrUUID nss,
+ const BSONObj& cmdObj)
: firstSeenTimestamp(Date_t::now().toMillisSinceEpoch() / 1000, 0),
requestShapifier(std::move(requestShapifier)),
nss(nss),
- oldQueryStatsKey(cmdObj.copy()) {
- queryStatsStoreSizeEstimateBytesMetric.increment(sizeof(QueryStatsEntry) + sizeof(BSONObj));
+ oldTelemetryKey(cmdObj.copy()) {
+ telemetryStoreSizeEstimateBytesMetric.increment(sizeof(TelemetryEntry) + sizeof(BSONObj));
}
- ~QueryStatsEntry() {
- queryStatsStoreSizeEstimateBytesMetric.decrement(sizeof(QueryStatsEntry) + sizeof(BSONObj));
+ ~TelemetryEntry() {
+ telemetryStoreSizeEstimateBytesMetric.decrement(sizeof(TelemetryEntry) + sizeof(BSONObj));
}
BSONObj toBSON() const {
- BSONObjBuilder builder{sizeof(QueryStatsEntry) + 100};
+ BSONObjBuilder builder{sizeof(TelemetryEntry) + 100};
builder.append("lastExecutionMicros", (BSONNumeric)lastExecutionMicros);
builder.append("execCount", (BSONNumeric)execCount);
queryExecMicros.appendTo(builder, "queryExecMicros");
@@ -124,11 +124,11 @@ public:
}
/**
- * Redact a given queryStats key and set _keySize.
+ * Redact a given telemetry key and set _keySize.
*/
- BSONObj computeQueryStatsKey(OperationContext* opCtx,
- bool applyHmacToIdentifiers,
- std::string hmacKey) const;
+ BSONObj computeTelemetryKey(OperationContext* opCtx,
+ bool applyHmacToIdentifiers,
+ std::string hmacKey) const;
/**
* Timestamp for when this query shape was added to the store. Set on construction.
@@ -153,8 +153,8 @@ public:
NamespaceStringOrUUID nss;
- // TODO: SERVER-73152 remove oldQueryStatsKey when RequestShapifier is used for agg.
- BSONObj oldQueryStatsKey;
+ // TODO: SERVER-73152 remove oldTelemetryKey when RequestShapifier is used for agg.
+ BSONObj oldTelemetryKey;
};
struct TelemetryPartitioner {
@@ -164,32 +164,32 @@ struct TelemetryPartitioner {
}
};
-struct QueryStatsStoreEntryBudgetor {
- size_t operator()(const std::size_t key, const std::shared_ptr<QueryStatsEntry>& value) {
+struct TelemetryStoreEntryBudgetor {
+ size_t operator()(const std::size_t key, const std::shared_ptr<TelemetryEntry>& value) {
// The buget estimator for <key,value> pair in LRU cache accounts for the size of the key
// and the size of the metrics, including the bson object used for generating the telemetry
// key at read time.
- return sizeof(QueryStatsEntry) + sizeof(std::size_t) + value->oldQueryStatsKey.objsize();
+ return sizeof(TelemetryEntry) + sizeof(std::size_t) + value->oldTelemetryKey.objsize();
}
};
-using QueryStatsStore = PartitionedCache<std::size_t,
- std::shared_ptr<QueryStatsEntry>,
- QueryStatsStoreEntryBudgetor,
- TelemetryPartitioner>;
+using TelemetryStore = PartitionedCache<std::size_t,
+ std::shared_ptr<TelemetryEntry>,
+ TelemetryStoreEntryBudgetor,
+ TelemetryPartitioner>;
/**
- * Acquire a reference to the global queryStats store.
+ * Acquire a reference to the global telemetry store.
*/
-QueryStatsStore& getQueryStatsStore(OperationContext* opCtx);
+TelemetryStore& getTelemetryStore(OperationContext* opCtx);
/**
- * Register a request for queryStats collection. The queryStats machinery may decide not to
+ * Register a request for telemetry collection. The telemetry machinery may decide not to
* collect anything but this should be called for all requests. The decision is made based on
- * the feature flag and queryStats parameters such as rate limiting.
+ * the feature flag and telemetry parameters such as rate limiting.
*
- * The caller is still responsible for subsequently calling writeQueryStats() once the request is
+ * The caller is still responsible for subsequently calling writeTelemetry() once the request is
* completed.
*
* Note that calling this affects internal state. It should be called once for each request for
@@ -203,22 +203,22 @@ void registerRequest(std::unique_ptr<RequestShapifier> requestShapifier,
const boost::intrusive_ptr<ExpressionContext>& expCtx);
/**
- * Writes queryStats to the queryStats store for the operation identified by `queryStatsKey`.
+ * Writes telemetry to the telemetry store for the operation identified by `telemetryKey`.
*/
-void writeQueryStats(OperationContext* opCtx,
- boost::optional<size_t> queryStatsKeyHash,
- boost::optional<BSONObj> queryStatsKey,
- std::unique_ptr<RequestShapifier> requestShapifier,
- uint64_t queryExecMicros,
- uint64_t docsReturned);
+void writeTelemetry(OperationContext* opCtx,
+ boost::optional<size_t> telemetryKeyHash,
+ boost::optional<BSONObj> telemetryKey,
+ std::unique_ptr<RequestShapifier> requestShapifier,
+ uint64_t queryExecMicros,
+ uint64_t docsReturned);
/**
* Serialize the FindCommandRequest according to the Options passed in. Returns the serialized BSON
* with hmac applied to all field names and literals.
*/
-BSONObj makeQueryStatsKey(const FindCommandRequest& findCommand,
- const SerializationOptions& opts,
- const boost::intrusive_ptr<ExpressionContext>& expCtx,
- boost::optional<const QueryStatsEntry&> existingMetrics = boost::none);
-} // namespace query_stats
+BSONObj makeTelemetryKey(const FindCommandRequest& findCommand,
+ const SerializationOptions& opts,
+ const boost::intrusive_ptr<ExpressionContext>& expCtx,
+ boost::optional<const TelemetryEntry&> existingMetrics = boost::none);
+} // namespace telemetry
} // namespace mongo
diff --git a/src/mongo/db/query/query_stats_store_test.cpp b/src/mongo/db/query/telemetry_store_test.cpp
index e36ac7ccd98..8d68ee566c6 100644
--- a/src/mongo/db/query/query_stats_store_test.cpp
+++ b/src/mongo/db/query/telemetry_store_test.cpp
@@ -33,13 +33,13 @@
#include "mongo/db/pipeline/expression_context_for_test.h"
#include "mongo/db/query/find_request_shapifier.h"
#include "mongo/db/query/query_feature_flags_gen.h"
-#include "mongo/db/query/query_stats.h"
+#include "mongo/db/query/telemetry.h"
#include "mongo/db/service_context_test_fixture.h"
#include "mongo/idl/server_parameter_test_util.h"
#include "mongo/unittest/inline_auto_update.h"
#include "mongo/unittest/unittest.h"
-namespace mongo::query_stats {
+namespace mongo::telemetry {
/**
* A default hmac application strategy that generates easy to check results for testing purposes.
*/
@@ -51,9 +51,9 @@ std::size_t hash(const BSONObj& obj) {
return absl::hash_internal::CityHash64(obj.objdata(), obj.objsize());
}
-class QueryStatsStoreTest : public ServiceContextTest {
+class TelemetryStoreTest : public ServiceContextTest {
public:
- BSONObj makeQueryStatsKeyFindRequest(
+ BSONObj makeTelemetryKeyFindRequest(
FindCommandRequest fcr,
const boost::intrusive_ptr<ExpressionContext>& expCtx,
bool applyHmac = false,
@@ -71,12 +71,12 @@ public:
opts.applyHmacToIdentifiers = true;
opts.identifierHmacPolicy = applyHmacForTest;
}
- return findShapifier.makeQueryStatsKey(opts, expCtx);
+ return findShapifier.makeTelemetryKey(opts, expCtx);
}
};
-TEST_F(QueryStatsStoreTest, BasicUsage) {
- QueryStatsStore telStore{5000000, 1000};
+TEST_F(TelemetryStoreTest, BasicUsage) {
+ TelemetryStore telStore{5000000, 1000};
auto getMetrics = [&](const BSONObj& key) {
auto lookupResult = telStore.lookup(hash(key));
@@ -84,11 +84,11 @@ TEST_F(QueryStatsStoreTest, BasicUsage) {
};
auto collectMetrics = [&](BSONObj& key) {
- std::shared_ptr<QueryStatsEntry> metrics;
+ std::shared_ptr<TelemetryEntry> metrics;
auto lookupResult = telStore.lookup(hash(key));
if (!lookupResult.isOK()) {
telStore.put(hash(key),
- std::make_shared<QueryStatsEntry>(nullptr, NamespaceString{}, key));
+ std::make_shared<TelemetryEntry>(nullptr, NamespaceString{}, key));
lookupResult = telStore.lookup(hash(key));
}
metrics = *lookupResult.getValue();
@@ -127,39 +127,39 @@ TEST_F(QueryStatsStoreTest, BasicUsage) {
int numKeys = 0;
telStore.forEach(
- [&](std::size_t key, const std::shared_ptr<QueryStatsEntry>& entry) { numKeys++; });
+ [&](std::size_t key, const std::shared_ptr<TelemetryEntry>& entry) { numKeys++; });
ASSERT_EQ(numKeys, 2);
}
-TEST_F(QueryStatsStoreTest, EvictEntries) {
- // This creates a queryStats store with 2 partitions, each with a size of 1200 bytes.
+TEST_F(TelemetryStoreTest, EvictEntries) {
+ // This creates a telemetry store with 2 partitions, each with a size of 1200 bytes.
const auto cacheSize = 2400;
const auto numPartitions = 2;
- QueryStatsStore telStore{cacheSize, numPartitions};
+ TelemetryStore telStore{cacheSize, numPartitions};
for (int i = 0; i < 20; i++) {
auto query = BSON("query" + std::to_string(i) << 1 << "xEquals" << 42);
telStore.put(hash(query),
- std::make_shared<QueryStatsEntry>(nullptr, NamespaceString{}, BSONObj{}));
+ std::make_shared<TelemetryEntry>(nullptr, NamespaceString{}, BSONObj{}));
}
int numKeys = 0;
telStore.forEach(
- [&](std::size_t key, const std::shared_ptr<QueryStatsEntry>& entry) { numKeys++; });
+ [&](std::size_t key, const std::shared_ptr<TelemetryEntry>& entry) { numKeys++; });
int entriesPerPartition = (cacheSize / numPartitions) /
- (sizeof(std::size_t) + sizeof(QueryStatsEntry) + BSONObj().objsize());
+ (sizeof(std::size_t) + sizeof(TelemetryEntry) + BSONObj().objsize());
ASSERT_EQ(numKeys, entriesPerPartition * numPartitions);
}
-TEST_F(QueryStatsStoreTest, CorrectlyRedactsFindCommandRequestAllFields) {
+TEST_F(TelemetryStoreTest, CorrectlyRedactsFindCommandRequestAllFields) {
auto expCtx = make_intrusive<ExpressionContextForTest>();
FindCommandRequest fcr(NamespaceStringOrUUID(NamespaceString("testDB.testColl")));
fcr.setFilter(BSON("a" << 1));
- auto key = makeQueryStatsKeyFindRequest(
+ auto key = makeTelemetryKeyFindRequest(
fcr, expCtx, true, LiteralSerializationPolicy::kToDebugTypeString);
ASSERT_BSONOBJ_EQ_AUTO( // NOLINT
@@ -181,7 +181,7 @@ TEST_F(QueryStatsStoreTest, CorrectlyRedactsFindCommandRequestAllFields) {
// Add sort.
fcr.setSort(BSON("sortVal" << 1 << "otherSort" << -1));
- key = makeQueryStatsKeyFindRequest(
+ key = makeTelemetryKeyFindRequest(
fcr, expCtx, true, LiteralSerializationPolicy::kToDebugTypeString);
ASSERT_BSONOBJ_EQ_AUTO( // NOLINT
R"({
@@ -206,7 +206,7 @@ TEST_F(QueryStatsStoreTest, CorrectlyRedactsFindCommandRequestAllFields) {
// Add inclusion projection.
fcr.setProjection(BSON("e" << true << "f" << true));
- key = makeQueryStatsKeyFindRequest(
+ key = makeTelemetryKeyFindRequest(
fcr, expCtx, true, LiteralSerializationPolicy::kToDebugTypeString);
ASSERT_BSONOBJ_EQ_AUTO( // NOLINT
R"({
@@ -239,7 +239,7 @@ TEST_F(QueryStatsStoreTest, CorrectlyRedactsFindCommandRequestAllFields) {
<< "$a"
<< "var2"
<< "const1"));
- key = makeQueryStatsKeyFindRequest(
+ key = makeTelemetryKeyFindRequest(
fcr, expCtx, true, LiteralSerializationPolicy::kToDebugTypeString);
ASSERT_BSONOBJ_EQ_AUTO( // NOLINT
R"({
@@ -275,7 +275,7 @@ TEST_F(QueryStatsStoreTest, CorrectlyRedactsFindCommandRequestAllFields) {
fcr.setHint(BSON("z" << 1 << "c" << 1));
fcr.setMax(BSON("z" << 25));
fcr.setMin(BSON("z" << 80));
- key = makeQueryStatsKeyFindRequest(
+ key = makeTelemetryKeyFindRequest(
fcr, expCtx, true, LiteralSerializationPolicy::kToDebugTypeString);
ASSERT_BSONOBJ_EQ_AUTO( // NOLINT
R"({
@@ -324,7 +324,7 @@ TEST_F(QueryStatsStoreTest, CorrectlyRedactsFindCommandRequestAllFields) {
fcr.setMaxTimeMS(1000);
fcr.setNoCursorTimeout(false);
- key = makeQueryStatsKeyFindRequest(
+ key = makeTelemetryKeyFindRequest(
fcr, expCtx, true, LiteralSerializationPolicy::kToDebugTypeString);
ASSERT_BSONOBJ_EQ_AUTO( // NOLINT
@@ -380,7 +380,7 @@ TEST_F(QueryStatsStoreTest, CorrectlyRedactsFindCommandRequestAllFields) {
fcr.setShowRecordId(true);
fcr.setAwaitData(false);
fcr.setMirrored(true);
- key = makeQueryStatsKeyFindRequest(
+ key = makeTelemetryKeyFindRequest(
fcr, expCtx, true, LiteralSerializationPolicy::kToDebugTypeString);
ASSERT_BSONOBJ_EQ_AUTO( // NOLINT
@@ -434,7 +434,7 @@ TEST_F(QueryStatsStoreTest, CorrectlyRedactsFindCommandRequestAllFields) {
key);
fcr.setAllowPartialResults(false);
- key = makeQueryStatsKeyFindRequest(
+ key = makeTelemetryKeyFindRequest(
fcr, expCtx, true, LiteralSerializationPolicy::kToDebugTypeString);
// Make sure that a false allowPartialResults is also accurately captured.
ASSERT_BSONOBJ_EQ_AUTO( // NOLINT
@@ -488,7 +488,7 @@ TEST_F(QueryStatsStoreTest, CorrectlyRedactsFindCommandRequestAllFields) {
key);
}
-TEST_F(QueryStatsStoreTest, CorrectlyRedactsFindCommandRequestEmptyFields) {
+TEST_F(TelemetryStoreTest, CorrectlyRedactsFindCommandRequestEmptyFields) {
auto expCtx = make_intrusive<ExpressionContextForTest>();
FindCommandRequest fcr(NamespaceStringOrUUID(NamespaceString("testDB.testColl")));
FindRequestShapifier findShapifier(fcr, expCtx->opCtx);
@@ -500,7 +500,7 @@ TEST_F(QueryStatsStoreTest, CorrectlyRedactsFindCommandRequestEmptyFields) {
opts.applyHmacToIdentifiers = true;
opts.identifierHmacPolicy = applyHmacForTest;
- auto hmacApplied = findShapifier.makeQueryStatsKey(opts, expCtx);
+ auto hmacApplied = findShapifier.makeTelemetryKey(opts, expCtx);
ASSERT_BSONOBJ_EQ_AUTO( // NOLINT
R"({
"queryShape": {
@@ -515,7 +515,7 @@ TEST_F(QueryStatsStoreTest, CorrectlyRedactsFindCommandRequestEmptyFields) {
hmacApplied); // NOLINT (test auto-update)
}
-TEST_F(QueryStatsStoreTest, CorrectlyRedactsHintsWithOptions) {
+TEST_F(TelemetryStoreTest, CorrectlyRedactsHintsWithOptions) {
auto expCtx = make_intrusive<ExpressionContextForTest>();
FindCommandRequest fcr(NamespaceStringOrUUID(NamespaceString("testDB.testColl")));
FindRequestShapifier findShapifier(fcr, expCtx->opCtx);
@@ -525,7 +525,7 @@ TEST_F(QueryStatsStoreTest, CorrectlyRedactsHintsWithOptions) {
fcr.setMax(BSON("z" << 25));
fcr.setMin(BSON("z" << 80));
- auto key = makeQueryStatsKeyFindRequest(
+ auto key = makeTelemetryKeyFindRequest(
fcr, expCtx, false, LiteralSerializationPolicy::kToDebugTypeString);
ASSERT_BSONOBJ_EQ_AUTO( // NOLINT
@@ -559,7 +559,7 @@ TEST_F(QueryStatsStoreTest, CorrectlyRedactsHintsWithOptions) {
fcr.setHint(BSON("$hint"
<< "z"));
- key = makeQueryStatsKeyFindRequest(
+ key = makeTelemetryKeyFindRequest(
fcr, expCtx, false, LiteralSerializationPolicy::kToDebugTypeString);
ASSERT_BSONOBJ_EQ_AUTO( // NOLINT
R"({
@@ -588,7 +588,7 @@ TEST_F(QueryStatsStoreTest, CorrectlyRedactsHintsWithOptions) {
key);
fcr.setHint(BSON("z" << 1 << "c" << 1));
- key = makeQueryStatsKeyFindRequest(fcr, expCtx, true, LiteralSerializationPolicy::kUnchanged);
+ key = makeTelemetryKeyFindRequest(fcr, expCtx, true, LiteralSerializationPolicy::kUnchanged);
ASSERT_BSONOBJ_EQ_AUTO( // NOLINT
R"({
"queryShape": {
@@ -616,7 +616,7 @@ TEST_F(QueryStatsStoreTest, CorrectlyRedactsHintsWithOptions) {
})",
key);
- key = makeQueryStatsKeyFindRequest(
+ key = makeTelemetryKeyFindRequest(
fcr, expCtx, true, LiteralSerializationPolicy::kToDebugTypeString);
ASSERT_BSONOBJ_EQ_AUTO( // NOLINT
R"({
@@ -647,7 +647,7 @@ TEST_F(QueryStatsStoreTest, CorrectlyRedactsHintsWithOptions) {
// Test that $natural comes through unmodified.
fcr.setHint(BSON("$natural" << -1));
- key = makeQueryStatsKeyFindRequest(
+ key = makeTelemetryKeyFindRequest(
fcr, expCtx, true, LiteralSerializationPolicy::kToDebugTypeString);
ASSERT_BSONOBJ_EQ_AUTO( // NOLINT
R"({
@@ -676,12 +676,12 @@ TEST_F(QueryStatsStoreTest, CorrectlyRedactsHintsWithOptions) {
key);
}
-TEST_F(QueryStatsStoreTest, DefinesLetVariables) {
+TEST_F(TelemetryStoreTest, DefinesLetVariables) {
// Test that the expression context we use to apply hmac will understand the 'let' part of the
// find command while parsing the other pieces of the command.
// Note that this ExpressionContext will not have the let variables defined - we expect the
- // 'makeQueryStatsKey' call to do that.
+ // 'makeTelemetryKey' call to do that.
auto opCtx = makeOperationContext();
FindCommandRequest fcr(NamespaceStringOrUUID(NamespaceString("testDB.testColl")));
fcr.setLet(BSON("var" << 2));
@@ -690,14 +690,13 @@ TEST_F(QueryStatsStoreTest, DefinesLetVariables) {
const auto cmdObj = fcr.toBSON(BSON("$db"
<< "testDB"));
- QueryStatsEntry testMetrics{
- std::make_unique<query_stats::FindRequestShapifier>(fcr, opCtx.get()),
- fcr.getNamespaceOrUUID(),
- cmdObj};
+ TelemetryEntry testMetrics{std::make_unique<telemetry::FindRequestShapifier>(fcr, opCtx.get()),
+ fcr.getNamespaceOrUUID(),
+ cmdObj};
bool applyHmacToIdentifiers = false;
auto hmacApplied =
- testMetrics.computeQueryStatsKey(opCtx.get(), applyHmacToIdentifiers, std::string{});
+ testMetrics.computeTelemetryKey(opCtx.get(), applyHmacToIdentifiers, std::string{});
ASSERT_BSONOBJ_EQ_AUTO( // NOLINT
R"({
"queryShape": {
@@ -731,7 +730,7 @@ TEST_F(QueryStatsStoreTest, DefinesLetVariables) {
// do the hashing, so we'll just stick with the big long strings here for now.
applyHmacToIdentifiers = true;
hmacApplied =
- testMetrics.computeQueryStatsKey(opCtx.get(), applyHmacToIdentifiers, std::string{});
+ testMetrics.computeTelemetryKey(opCtx.get(), applyHmacToIdentifiers, std::string{});
ASSERT_BSONOBJ_EQ_AUTO( // NOLINT
R"({
"queryShape": {
@@ -762,7 +761,7 @@ TEST_F(QueryStatsStoreTest, DefinesLetVariables) {
hmacApplied);
}
-TEST_F(QueryStatsStoreTest, CorrectlyRedactsAggregateCommandRequestAllFieldsSimplePipeline) {
+TEST_F(TelemetryStoreTest, CorrectlyRedactsAggregateCommandRequestAllFieldsSimplePipeline) {
auto expCtx = make_intrusive<ExpressionContextForTest>();
AggregateCommandRequest acr(NamespaceString("testDB.testColl"));
auto matchStage = fromjson(R"({
@@ -791,7 +790,7 @@ TEST_F(QueryStatsStoreTest, CorrectlyRedactsAggregateCommandRequestAllFieldsSimp
opts.applyHmacToIdentifiers = false;
opts.identifierHmacPolicy = applyHmacForTest;
- auto shapified = aggShapifier.makeQueryStatsKey(opts, expCtx);
+ auto shapified = aggShapifier.makeTelemetryKey(opts, expCtx);
ASSERT_BSONOBJ_EQ_AUTO( // NOLINT
R"({
"queryShape": {
@@ -850,7 +849,7 @@ TEST_F(QueryStatsStoreTest, CorrectlyRedactsAggregateCommandRequestAllFieldsSimp
opts.replacementForLiteralArgs = "?";
opts.literalPolicy = LiteralSerializationPolicy::kToDebugTypeString;
opts.applyHmacToIdentifiers = true;
- shapified = aggShapifier.makeQueryStatsKey(opts, expCtx);
+ shapified = aggShapifier.makeTelemetryKey(opts, expCtx);
ASSERT_BSONOBJ_EQ_AUTO( // NOLINT
R"({
"queryShape": {
@@ -912,7 +911,7 @@ TEST_F(QueryStatsStoreTest, CorrectlyRedactsAggregateCommandRequestAllFieldsSimp
acr.setHint(BSON("z" << 1 << "c" << 1));
acr.setCollation(BSON("locale"
<< "simple"));
- shapified = aggShapifier.makeQueryStatsKey(opts, expCtx);
+ shapified = aggShapifier.makeTelemetryKey(opts, expCtx);
ASSERT_BSONOBJ_EQ_AUTO( // NOLINT
R"({
"queryShape": {
@@ -982,7 +981,7 @@ TEST_F(QueryStatsStoreTest, CorrectlyRedactsAggregateCommandRequestAllFieldsSimp
<< "$foo"
<< "var2"
<< "bar"));
- shapified = aggShapifier.makeQueryStatsKey(opts, expCtx);
+ shapified = aggShapifier.makeTelemetryKey(opts, expCtx);
ASSERT_BSONOBJ_EQ_AUTO( // NOLINT
R"({
"queryShape": {
@@ -1059,7 +1058,7 @@ TEST_F(QueryStatsStoreTest, CorrectlyRedactsAggregateCommandRequestAllFieldsSimp
acr.setBypassDocumentValidation(true);
expCtx->opCtx->setComment(BSON("comment"
<< "note to self"));
- shapified = aggShapifier.makeQueryStatsKey(opts, expCtx);
+ shapified = aggShapifier.makeTelemetryKey(opts, expCtx);
ASSERT_BSONOBJ_EQ_AUTO( // NOLINT
R"({
"queryShape": {
@@ -1133,7 +1132,7 @@ TEST_F(QueryStatsStoreTest, CorrectlyRedactsAggregateCommandRequestAllFieldsSimp
})",
shapified);
}
-TEST_F(QueryStatsStoreTest, CorrectlyRedactsAggregateCommandRequestEmptyFields) {
+TEST_F(TelemetryStoreTest, CorrectlyRedactsAggregateCommandRequestEmptyFields) {
auto expCtx = make_intrusive<ExpressionContextForTest>();
AggregateCommandRequest acr(NamespaceString("testDB.testColl"));
acr.setPipeline({});
@@ -1147,7 +1146,7 @@ TEST_F(QueryStatsStoreTest, CorrectlyRedactsAggregateCommandRequestEmptyFields)
opts.applyHmacToIdentifiers = true;
opts.identifierHmacPolicy = applyHmacForTest;
- auto shapified = aggShapifier.makeQueryStatsKey(opts, expCtx);
+ auto shapified = aggShapifier.makeTelemetryKey(opts, expCtx);
ASSERT_BSONOBJ_EQ_AUTO( // NOLINT
R"({
"queryShape": {
@@ -1161,4 +1160,4 @@ TEST_F(QueryStatsStoreTest, CorrectlyRedactsAggregateCommandRequestEmptyFields)
})",
shapified); // NOLINT (test auto-update)
}
-} // namespace mongo::query_stats
+} // namespace mongo::telemetry
diff --git a/src/mongo/db/query/query_stats_util.cpp b/src/mongo/db/query/telemetry_util.cpp
index 4c102d983dc..eeaf7da71e6 100644
--- a/src/mongo/db/query/query_stats_util.cpp
+++ b/src/mongo/db/query/telemetry_util.cpp
@@ -27,7 +27,7 @@
* it in the license file.
*/
-#include "mongo/db/query/query_stats_util.h"
+#include "mongo/db/query/telemetry_util.h"
#include "mongo/base/status.h"
#include "mongo/db/concurrency/d_concurrency.h"
@@ -40,25 +40,25 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery
-namespace mongo::query_stats_util {
+namespace mongo::telemetry_util {
namespace {
/**
* Given the current 'Client', returns a pointer to the 'ServiceContext' and an interface for
- * updating the queryStats store.
+ * updating the telemetry store.
*/
std::pair<ServiceContext*, OnParamChangeUpdater*> getUpdater(const Client& client) {
auto serviceCtx = client.getServiceContext();
tassert(7106500, "ServiceContext must be non null", serviceCtx);
- auto updater = queryStatsStoreOnParamChangeUpdater(serviceCtx).get();
+ auto updater = telemetryStoreOnParamChangeUpdater(serviceCtx).get();
tassert(7106501, "Telemetry store size updater must be non null", updater);
return {serviceCtx, updater};
}
} // namespace
-Status onQueryStatsStoreSizeUpdate(const std::string& str) {
+Status onTelemetryStoreSizeUpdate(const std::string& str) {
auto newSize = memory_util::MemorySize::parse(str);
if (!newSize.isOK()) {
return newSize.getStatus();
@@ -75,11 +75,11 @@ Status onQueryStatsStoreSizeUpdate(const std::string& str) {
return Status::OK();
}
-Status validateQueryStatsStoreSize(const std::string& str, const boost::optional<TenantId>&) {
+Status validateTelemetryStoreSize(const std::string& str, const boost::optional<TenantId>&) {
return memory_util::MemorySize::parse(str).getStatus();
}
-Status onQueryStatsSamplingRateUpdate(int samplingRate) {
+Status onTelemetrySamplingRateUpdate(int samplingRate) {
// The client is nullptr if the parameter is supplied from the command line. In this case, we
// ignore the update event, the parameter will be processed when initializing the service
// context.
@@ -92,6 +92,6 @@ Status onQueryStatsSamplingRateUpdate(int samplingRate) {
}
const Decorable<ServiceContext>::Decoration<std::unique_ptr<OnParamChangeUpdater>>
- queryStatsStoreOnParamChangeUpdater =
+ telemetryStoreOnParamChangeUpdater =
ServiceContext::declareDecoration<std::unique_ptr<OnParamChangeUpdater>>();
-} // namespace mongo::query_stats_util
+} // namespace mongo::telemetry_util
diff --git a/src/mongo/db/query/query_stats_util.h b/src/mongo/db/query/telemetry_util.h
index ebd8f1e2fbd..c8fc37dc5c4 100644
--- a/src/mongo/db/query/query_stats_util.h
+++ b/src/mongo/db/query/telemetry_util.h
@@ -35,52 +35,52 @@
#include "mongo/db/query/util/memory_util.h"
-namespace mongo::query_stats_util {
+namespace mongo::telemetry_util {
-Status onQueryStatsStoreSizeUpdate(const std::string& str);
+Status onTelemetryStoreSizeUpdate(const std::string& str);
-Status validateQueryStatsStoreSize(const std::string& str, const boost::optional<TenantId>&);
+Status validateTelemetryStoreSize(const std::string& str, const boost::optional<TenantId>&);
-Status onQueryStatsSamplingRateUpdate(int samplingRate);
+Status onTelemetrySamplingRateUpdate(int samplingRate);
/**
- * An interface used to modify the queryStats store when query setParameters are modified. This is
+ * An interface used to modify the telemetry store when query setParameters are modified. This is
* done via an interface decorating the 'ServiceContext' in order to avoid a link-time dependency
- * of the query knobs library on the queryStats code.
+ * of the query knobs library on the telemetry code.
*/
class OnParamChangeUpdater {
public:
virtual ~OnParamChangeUpdater() = default;
/**
- * Resizes the queryStats store decorating 'serviceCtx' to the new size given by 'memSize'. If
+ * Resizes the telemetry store decorating 'serviceCtx' to the new size given by 'memSize'. If
* the new size is smaller than the old, cache entries are evicted in order to ensure the
* cache fits within the new size bound.
*/
virtual void updateCacheSize(ServiceContext* serviceCtx, memory_util::MemorySize memSize) = 0;
/**
- * Updates the sampling rate for the queryStats rate limiter.
+ * Updates the sampling rate for the telemetry rate limiter.
*/
virtual void updateSamplingRate(ServiceContext* serviceCtx, int samplingRate) = 0;
};
/**
- * A stub implementation that does not allow changing any parameters - to be used if the queryStats
+ * A stub implementation that does not allow changing any parameters - to be used if the telemetry
* store is disabled and cannot be re-enabled without restarting, as with a feature flag.
*/
class NoChangesAllowedTelemetryParamUpdater : public OnParamChangeUpdater {
public:
void updateCacheSize(ServiceContext* serviceCtx, memory_util::MemorySize memSize) final {
uasserted(7373500,
- "Cannot configure queryStats store - it is currently disabled and a restart is "
+ "Cannot configure telemetry store - it is currently disabled and a restart is "
"required to activate.");
}
void updateSamplingRate(ServiceContext* serviceCtx, int samplingRate) {
uasserted(7506200,
- "Cannot configure queryStats store - it is currently disabled and a restart is "
+ "Cannot configure telemetry store - it is currently disabled and a restart is "
"required to activate.");
}
};
@@ -89,5 +89,5 @@ public:
* Decorated accessor to the 'OnParamChangeUpdater' stored in 'ServiceContext'.
*/
extern const Decorable<ServiceContext>::Decoration<std::unique_ptr<OnParamChangeUpdater>>
- queryStatsStoreOnParamChangeUpdater;
-} // namespace mongo::query_stats_util
+ telemetryStoreOnParamChangeUpdater;
+} // namespace mongo::telemetry_util
diff --git a/src/mongo/s/commands/cluster_find_cmd.h b/src/mongo/s/commands/cluster_find_cmd.h
index 6ab7d513d86..942e0893434 100644
--- a/src/mongo/s/commands/cluster_find_cmd.h
+++ b/src/mongo/s/commands/cluster_find_cmd.h
@@ -39,7 +39,7 @@
#include "mongo/db/matcher/extensions_callback_noop.h"
#include "mongo/db/query/cursor_response.h"
#include "mongo/db/query/find_request_shapifier.h"
-#include "mongo/db/query/query_stats.h"
+#include "mongo/db/query/telemetry.h"
#include "mongo/db/stats/counters.h"
#include "mongo/db/views/resolved_view.h"
#include "mongo/rpc/get_status_from_command_result.h"
@@ -225,11 +225,11 @@ public:
MatchExpressionParser::kAllowAllSpecialFeatures));
if (!_didDoFLERewrite) {
- query_stats::registerRequest(std::make_unique<query_stats::FindRequestShapifier>(
- cq->getFindCommandRequest(), opCtx),
- cq->nss(),
- opCtx,
- cq->getExpCtx());
+ telemetry::registerRequest(std::make_unique<telemetry::FindRequestShapifier>(
+ cq->getFindCommandRequest(), opCtx),
+ cq->nss(),
+ opCtx,
+ cq->getExpCtx());
}
try {
diff --git a/src/mongo/s/query/cluster_aggregate.cpp b/src/mongo/s/query/cluster_aggregate.cpp
index 9fd49e2e004..6c9351efe57 100644
--- a/src/mongo/s/query/cluster_aggregate.cpp
+++ b/src/mongo/s/query/cluster_aggregate.cpp
@@ -56,7 +56,7 @@
#include "mongo/db/query/explain_common.h"
#include "mongo/db/query/find_common.h"
#include "mongo/db/query/fle/server_rewrite.h"
-#include "mongo/db/query/query_stats.h"
+#include "mongo/db/query/telemetry.h"
#include "mongo/db/timeseries/timeseries_gen.h"
#include "mongo/db/timeseries/timeseries_options.h"
#include "mongo/db/views/resolved_view.h"
@@ -324,7 +324,7 @@ Status ClusterAggregate::runAggregate(OperationContext* opCtx,
auto startsWithDocuments = liteParsedPipeline.startsWithDocuments();
if (!shouldDoFLERewrite) {
- query_stats::registerAggRequest(request, opCtx);
+ telemetry::registerAggRequest(request, opCtx);
}
// If the routing table is not already taken by the higher level, fill it now.
diff --git a/src/mongo/s/query/cluster_aggregation_planner.cpp b/src/mongo/s/query/cluster_aggregation_planner.cpp
index 8f2c6fcdb19..5aa643c0a85 100644
--- a/src/mongo/s/query/cluster_aggregation_planner.cpp
+++ b/src/mongo/s/query/cluster_aggregation_planner.cpp
@@ -360,16 +360,16 @@ BSONObj establishMergingMongosCursor(OperationContext* opCtx,
int nShards = ccc->getNumRemotes();
auto&& opDebug = CurOp::get(opCtx)->debug();
- // Fill out the aggregation metrics in CurOp, and record queryStats metrics, before detaching
- // the cursor from its opCtx.
+ // Fill out the aggregation metrics in CurOp, and record telemetry metrics, before detaching the
+ // cursor from its opCtx.
opDebug.nShards = std::max(opDebug.nShards, nShards);
opDebug.cursorExhausted = exhausted;
opDebug.additiveMetrics.nBatches = 1;
CurOp::get(opCtx)->setEndOfOpMetrics(responseBuilder.numDocs());
if (exhausted) {
- collectQueryStatsMongos(opCtx, ccc->getRequestShapifier());
+ collectTelemetryMongos(opCtx, ccc->getRequestShapifier());
} else {
- collectQueryStatsMongos(opCtx, ccc);
+ collectTelemetryMongos(opCtx, ccc);
}
ccc->detachFromOperationContext();
diff --git a/src/mongo/s/query/cluster_client_cursor.h b/src/mongo/s/query/cluster_client_cursor.h
index 008bacd5ef6..1f0d9be54a7 100644
--- a/src/mongo/s/query/cluster_client_cursor.h
+++ b/src/mongo/s/query/cluster_client_cursor.h
@@ -270,11 +270,11 @@ public:
* Returns and releases ownership of the RequestShapifier associated with the request this
* cursor is handling.
*/
- virtual std::unique_ptr<query_stats::RequestShapifier> getRequestShapifier() = 0;
+ virtual std::unique_ptr<telemetry::RequestShapifier> getRequestShapifier() = 0;
protected:
// Metrics that are accumulated over the lifetime of the cursor, incremented with each getMore.
- // Useful for diagnostics like queryStats.
+ // Useful for diagnostics like telemetry.
OpDebug::AdditiveMetrics _metrics;
private:
diff --git a/src/mongo/s/query/cluster_client_cursor_impl.cpp b/src/mongo/s/query/cluster_client_cursor_impl.cpp
index 9da06d36881..939637d0f32 100644
--- a/src/mongo/s/query/cluster_client_cursor_impl.cpp
+++ b/src/mongo/s/query/cluster_client_cursor_impl.cpp
@@ -32,7 +32,7 @@
#include <memory>
#include "mongo/db/curop.h"
-#include "mongo/db/query/query_stats.h"
+#include "mongo/db/query/telemetry.h"
#include "mongo/logv2/log.h"
#include "mongo/s/query/router_stage_limit.h"
#include "mongo/s/query/router_stage_merge.h"
@@ -75,10 +75,9 @@ ClusterClientCursorImpl::ClusterClientCursorImpl(OperationContext* opCtx,
_lastUseDate(_createdDate),
_queryHash(CurOp::get(opCtx)->debug().queryHash),
_shouldOmitDiagnosticInformation(CurOp::get(opCtx)->debug().shouldOmitDiagnosticInformation),
- _queryStatsStoreKeyHash(CurOp::get(opCtx)->debug().queryStatsStoreKeyHash),
- _queryStatsStoreKey(CurOp::get(opCtx)->debug().queryStatsStoreKey),
- _queryStatsRequestShapifier(
- std::move(CurOp::get(opCtx)->debug().queryStatsRequestShapifier)) {
+ _telemetryStoreKeyHash(CurOp::get(opCtx)->debug().telemetryStoreKeyHash),
+ _telemetryStoreKey(CurOp::get(opCtx)->debug().telemetryStoreKey),
+ _telemetryRequestShapifier(std::move(CurOp::get(opCtx)->debug().telemetryRequestShapifier)) {
dassert(!_params.compareWholeSortKeyOnRouter ||
SimpleBSONObjComparator::kInstance.evaluate(
_params.sortToApplyOnRouter == AsyncResultsMerger::kWholeSortKeySortPattern));
@@ -138,13 +137,13 @@ void ClusterClientCursorImpl::kill(OperationContext* opCtx) {
"Cannot kill a cluster client cursor that has already been killed",
!_hasBeenKilled);
- if (_queryStatsStoreKeyHash && opCtx) {
- query_stats::writeQueryStats(opCtx,
- _queryStatsStoreKeyHash,
- _queryStatsStoreKey,
- std::move(_queryStatsRequestShapifier),
- _metrics.executionTime.value_or(Microseconds{0}).count(),
- _metrics.nreturned.value_or(0));
+ if (_telemetryStoreKeyHash && opCtx) {
+ telemetry::writeTelemetry(opCtx,
+ _telemetryStoreKeyHash,
+ _telemetryStoreKey,
+ std::move(_telemetryRequestShapifier),
+ _metrics.executionTime.value_or(Microseconds{0}).count(),
+ _metrics.nreturned.value_or(0));
}
_root->kill(opCtx);
@@ -286,8 +285,8 @@ bool ClusterClientCursorImpl::shouldOmitDiagnosticInformation() const {
return _shouldOmitDiagnosticInformation;
}
-std::unique_ptr<query_stats::RequestShapifier> ClusterClientCursorImpl::getRequestShapifier() {
- return std::move(_queryStatsRequestShapifier);
+std::unique_ptr<telemetry::RequestShapifier> ClusterClientCursorImpl::getRequestShapifier() {
+ return std::move(_telemetryRequestShapifier);
}
} // namespace mongo
diff --git a/src/mongo/s/query/cluster_client_cursor_impl.h b/src/mongo/s/query/cluster_client_cursor_impl.h
index 9d9168d6afb..ecb7535715c 100644
--- a/src/mongo/s/query/cluster_client_cursor_impl.h
+++ b/src/mongo/s/query/cluster_client_cursor_impl.h
@@ -121,7 +121,7 @@ public:
bool shouldOmitDiagnosticInformation() const final;
- std::unique_ptr<query_stats::RequestShapifier> getRequestShapifier() final;
+ std::unique_ptr<telemetry::RequestShapifier> getRequestShapifier() final;
public:
/**
@@ -186,12 +186,12 @@ private:
bool _shouldOmitDiagnosticInformation = false;
// If boost::none, telemetry should not be collected for this cursor.
- boost::optional<std::size_t> _queryStatsStoreKeyHash;
- // TODO: SERVER-73152 remove queryStatsStoreKey when RequestShapifier is used for agg.
- boost::optional<BSONObj> _queryStatsStoreKey;
+ boost::optional<std::size_t> _telemetryStoreKeyHash;
+ // TODO: SERVER-73152 remove telemetryStoreKey when RequestShapifier is used for agg.
+ boost::optional<BSONObj> _telemetryStoreKey;
// The RequestShapifier used by telemetry to shapify the request payload into the telemetry
// store key.
- std::unique_ptr<query_stats::RequestShapifier> _queryStatsRequestShapifier;
+ std::unique_ptr<telemetry::RequestShapifier> _telemetryRequestShapifier;
// Tracks if kill() has been called on the cursor. Multiple calls to kill() is an error.
bool _hasBeenKilled = false;
diff --git a/src/mongo/s/query/cluster_client_cursor_mock.cpp b/src/mongo/s/query/cluster_client_cursor_mock.cpp
index 1e8b3561f5c..e495227b704 100644
--- a/src/mongo/s/query/cluster_client_cursor_mock.cpp
+++ b/src/mongo/s/query/cluster_client_cursor_mock.cpp
@@ -170,7 +170,7 @@ bool ClusterClientCursorMock::shouldOmitDiagnosticInformation() const {
return false;
}
-std::unique_ptr<query_stats::RequestShapifier> ClusterClientCursorMock::getRequestShapifier() {
+std::unique_ptr<telemetry::RequestShapifier> ClusterClientCursorMock::getRequestShapifier() {
return nullptr;
}
diff --git a/src/mongo/s/query/cluster_client_cursor_mock.h b/src/mongo/s/query/cluster_client_cursor_mock.h
index 750a67abdde..131ca234287 100644
--- a/src/mongo/s/query/cluster_client_cursor_mock.h
+++ b/src/mongo/s/query/cluster_client_cursor_mock.h
@@ -121,7 +121,7 @@ public:
bool shouldOmitDiagnosticInformation() const final;
- std::unique_ptr<query_stats::RequestShapifier> getRequestShapifier() final;
+ std::unique_ptr<telemetry::RequestShapifier> getRequestShapifier() final;
private:
bool _killed = false;
diff --git a/src/mongo/s/query/cluster_cursor_manager.cpp b/src/mongo/s/query/cluster_cursor_manager.cpp
index 68436d25c6e..d8e47e55ecf 100644
--- a/src/mongo/s/query/cluster_cursor_manager.cpp
+++ b/src/mongo/s/query/cluster_cursor_manager.cpp
@@ -38,7 +38,7 @@
#include "mongo/db/allocate_cursor_id.h"
#include "mongo/db/curop.h"
#include "mongo/db/query/query_knobs_gen.h"
-#include "mongo/db/query/query_stats.h"
+#include "mongo/db/query/telemetry.h"
#include "mongo/db/session/kill_sessions_common.h"
#include "mongo/db/session/logical_session_cache.h"
#include "mongo/logv2/log.h"
@@ -591,25 +591,25 @@ StatusWith<ClusterClientCursorGuard> ClusterCursorManager::_detachCursor(WithLoc
return std::move(cursor);
}
-void collectQueryStatsMongos(OperationContext* opCtx,
- std::unique_ptr<query_stats::RequestShapifier> requestShapifier) {
+void collectTelemetryMongos(OperationContext* opCtx,
+ std::unique_ptr<telemetry::RequestShapifier> requestShapifier) {
// If we haven't registered a cursor to prepare for getMore requests, we record
- // queryStats directly.
+ // telemetry directly.
auto&& opDebug = CurOp::get(opCtx)->debug();
- query_stats::writeQueryStats(
+ telemetry::writeTelemetry(
opCtx,
- opDebug.queryStatsStoreKeyHash,
- opDebug.queryStatsStoreKey,
+ opDebug.telemetryStoreKeyHash,
+ opDebug.telemetryStoreKey,
std::move(requestShapifier),
opDebug.additiveMetrics.executionTime.value_or(Microseconds{0}).count(),
opDebug.additiveMetrics.nreturned.value_or(0));
}
-void collectQueryStatsMongos(OperationContext* opCtx, ClusterClientCursorGuard& cursor) {
+void collectTelemetryMongos(OperationContext* opCtx, ClusterClientCursorGuard& cursor) {
cursor->incrementCursorMetrics(CurOp::get(opCtx)->debug().additiveMetrics);
}
-void collectQueryStatsMongos(OperationContext* opCtx, ClusterCursorManager::PinnedCursor& cursor) {
+void collectTelemetryMongos(OperationContext* opCtx, ClusterCursorManager::PinnedCursor& cursor) {
cursor->incrementCursorMetrics(CurOp::get(opCtx)->debug().additiveMetrics);
}
diff --git a/src/mongo/s/query/cluster_cursor_manager.h b/src/mongo/s/query/cluster_cursor_manager.h
index b10824baf09..219dd773f82 100644
--- a/src/mongo/s/query/cluster_cursor_manager.h
+++ b/src/mongo/s/query/cluster_cursor_manager.h
@@ -600,7 +600,7 @@ private:
};
/**
- * Record metrics for the current operation on opDebug and aggregates those metrics for queryStats
+ * Record metrics for the current operation on opDebug and aggregates those metrics for telemetry
* use. If a cursor is provided (via ClusterClientCursorGuard or
* ClusterCursorManager::PinnedCursor), metrics are aggregated on the cursor; otherwise, metrics are
* written directly to the telemetry store.
@@ -610,9 +610,9 @@ private:
* Currently, telemetry is only collected for find and aggregate requests (and their subsequent
* getMore requests), so these should only be called from those request paths.
*/
-void collectQueryStatsMongos(OperationContext* opCtx,
- std::unique_ptr<query_stats::RequestShapifier> requestShapifier);
-void collectQueryStatsMongos(OperationContext* opCtx, ClusterClientCursorGuard& cursor);
-void collectQueryStatsMongos(OperationContext* opCtx, ClusterCursorManager::PinnedCursor& cursor);
+void collectTelemetryMongos(OperationContext* opCtx,
+ std::unique_ptr<telemetry::RequestShapifier> requestShapifier);
+void collectTelemetryMongos(OperationContext* opCtx, ClusterClientCursorGuard& cursor);
+void collectTelemetryMongos(OperationContext* opCtx, ClusterCursorManager::PinnedCursor& cursor);
} // namespace mongo
diff --git a/src/mongo/s/query/cluster_find.cpp b/src/mongo/s/query/cluster_find.cpp
index d0bd48a0d51..5b340ec098a 100644
--- a/src/mongo/s/query/cluster_find.cpp
+++ b/src/mongo/s/query/cluster_find.cpp
@@ -48,7 +48,7 @@
#include "mongo/db/query/find_common.h"
#include "mongo/db/query/getmore_command_gen.h"
#include "mongo/db/query/query_planner_common.h"
-#include "mongo/db/query/query_stats.h"
+#include "mongo/db/query/telemetry.h"
#include "mongo/executor/task_executor_pool.h"
#include "mongo/logv2/log.h"
#include "mongo/platform/overflow_arithmetic.h"
@@ -444,7 +444,7 @@ CursorId runQueryWithoutRetrying(OperationContext* opCtx,
if (shardIds.size() > 0) {
updateNumHostsTargetedMetrics(opCtx, cm, shardIds.size());
}
- collectQueryStatsMongos(opCtx, ccc->getRequestShapifier());
+ collectTelemetryMongos(opCtx, ccc->getRequestShapifier());
return CursorId(0);
}
@@ -455,7 +455,7 @@ CursorId runQueryWithoutRetrying(OperationContext* opCtx,
? ClusterCursorManager::CursorLifetime::Immortal
: ClusterCursorManager::CursorLifetime::Mortal;
auto authUser = AuthorizationSession::get(opCtx->getClient())->getAuthenticatedUserName();
- collectQueryStatsMongos(opCtx, ccc);
+ collectTelemetryMongos(opCtx, ccc);
auto cursorId = uassertStatusOK(cursorManager->registerCursor(
opCtx, ccc.releaseCursor(), query.nss(), cursorType, cursorLifetime, authUser));
@@ -923,7 +923,7 @@ StatusWith<CursorResponse> ClusterFind::runGetMore(OperationContext* opCtx,
const bool partialResultsReturned = pinnedCursor.getValue()->partialResultsReturned();
pinnedCursor.getValue()->setLeftoverMaxTimeMicros(opCtx->getRemainingMaxTimeMicros());
- collectQueryStatsMongos(opCtx, pinnedCursor.getValue());
+ collectTelemetryMongos(opCtx, pinnedCursor.getValue());
// Upon successful completion, transfer ownership of the cursor back to the cursor manager. If
// the cursor has been exhausted, the cursor manager will clean it up for us.
diff --git a/src/mongo/s/query/store_possible_cursor.cpp b/src/mongo/s/query/store_possible_cursor.cpp
index a5c6759f4d1..38cec4024ed 100644
--- a/src/mongo/s/query/store_possible_cursor.cpp
+++ b/src/mongo/s/query/store_possible_cursor.cpp
@@ -98,7 +98,7 @@ StatusWith<BSONObj> storePossibleCursor(OperationContext* opCtx,
if (incomingCursorResponse.getValue().getCursorId() == CursorId(0)) {
opDebug.cursorExhausted = true;
- collectQueryStatsMongos(opCtx, std::move(opDebug.queryStatsRequestShapifier));
+ collectTelemetryMongos(opCtx, std::move(opDebug.telemetryRequestShapifier));
return cmdResult;
}
@@ -130,7 +130,7 @@ StatusWith<BSONObj> storePossibleCursor(OperationContext* opCtx,
}
auto ccc = ClusterClientCursorImpl::make(opCtx, std::move(executor), std::move(params));
- collectQueryStatsMongos(opCtx, ccc);
+ collectTelemetryMongos(opCtx, ccc);
// We don't expect to use this cursor until a subsequent getMore, so detach from the current
// OperationContext until then.
ccc->detachFromOperationContext();