diff options
Diffstat (limited to 'jstests/noPassthrough/queryStats')
13 files changed, 1224 insertions, 0 deletions
diff --git a/jstests/noPassthrough/queryStats/application_name_find.js b/jstests/noPassthrough/queryStats/application_name_find.js new file mode 100644 index 00000000000..36245a31514 --- /dev/null +++ b/jstests/noPassthrough/queryStats/application_name_find.js @@ -0,0 +1,39 @@ +/** + * Test that applicationName and namespace appear in telemetry for the find command. + * @tags: [featureFlagQueryStats] + */ +load("jstests/libs/telemetry_utils.js"); +(function() { +"use strict"; + +const kApplicationName = "MongoDB Shell"; +const kHashedCollName = "w6Ax20mVkbJu4wQWAMjL8Sl+DfXAr2Zqdc3kJRB7Oo0="; +const kHashedFieldName = "lU7Z0mLRPRUL+RfAD5jhYPRRpXBsZBxS/20EzDwfOG4="; + +// Turn on the collecting of telemetry metrics. +let options = { + setParameter: {internalQueryStatsSamplingRate: -1}, +}; + +const conn = MongoRunner.runMongod(options); +conn.setLogLevel(3, "query"); +const testDB = conn.getDB('test'); +var coll = testDB[jsTestName()]; +coll.drop(); + +coll.insert({v: 1}); +coll.insert({v: 2}); +coll.insert({v: 3}); + +coll.find({v: 1}).toArray(); + +let telemetry = getTelemetry(conn); +assert.eq(1, telemetry.length, telemetry); +assert.eq(kApplicationName, telemetry[0].key.applicationName, telemetry); + +telemetry = getTelemetryRedacted(conn, true); +assert.eq(1, telemetry.length, telemetry); +assert.eq(kApplicationName, telemetry[0].key.applicationName, telemetry); + +MongoRunner.stopMongod(conn); +}()); diff --git a/jstests/noPassthrough/queryStats/clear_query_stats_store.js b/jstests/noPassthrough/queryStats/clear_query_stats_store.js new file mode 100644 index 00000000000..9c2b888cd76 --- /dev/null +++ b/jstests/noPassthrough/queryStats/clear_query_stats_store.js @@ -0,0 +1,43 @@ +/** + * Test that the telemetry store can be cleared when the cache size is reset to 0. + * @tags: [featureFlagQueryStats] + */ +load("jstests/libs/telemetry_utils.js"); // For verifyMetrics. + +(function() { +"use strict"; + +// Turn on the collecting of telemetry metrics. +let options = { + setParameter: {internalQueryStatsSamplingRate: -1, internalQueryStatsCacheSize: "10MB"}, +}; + +const conn = MongoRunner.runMongod(options); +const testDB = conn.getDB('test'); +var coll = testDB[jsTestName()]; +coll.drop(); + +let query = {}; +for (var j = 0; j < 10; ++j) { + query["foo.field.xyz." + j] = 1; + query["bar.field.xyz." + j] = 2; + query["baz.field.xyz." + j] = 3; + coll.aggregate([{$match: query}]).itcount(); +} + +// Confirm number of entries in the store and that none have been evicted. +let telemetryResults = testDB.getSiblingDB("admin").aggregate([{$queryStats: {}}]).toArray(); +assert.eq(telemetryResults.length, 10, telemetryResults); +assert.eq(testDB.serverStatus().metrics.queryStats.numEvicted, 0); + +// Command to clear the cache. +assert.commandWorked(testDB.adminCommand({setParameter: 1, internalQueryStatsCacheSize: "0MB"})); + +// 10 regular queries plus the $queryStats query, means 11 entries evicted when the cache is +// cleared. +assert.eq(testDB.serverStatus().metrics.queryStats.numEvicted, 11); + +// Calling $queryStats should fail when the telemetry store size is 0 bytes. +assert.throwsWithCode(() => testDB.getSiblingDB("admin").aggregate([{$queryStats: {}}]), 6579000); +MongoRunner.stopMongod(conn); +}()); diff --git a/jstests/noPassthrough/queryStats/documentSourceQueryStats_redaction_parameters.js b/jstests/noPassthrough/queryStats/documentSourceQueryStats_redaction_parameters.js new file mode 100644 index 00000000000..8facb106072 --- /dev/null +++ b/jstests/noPassthrough/queryStats/documentSourceQueryStats_redaction_parameters.js @@ -0,0 +1,100 @@ +/** + * Test the $queryStats hmac properties. + * @tags: [featureFlagQueryStats] + */ + +load("jstests/aggregation/extras/utils.js"); // For assertAdminDBErrCodeAndErrMsgContains. +load("jstests/libs/telemetry_utils.js"); + +(function() { +"use strict"; + +// Assert the expected telemetry key with no hmac. +function assertTelemetryKeyWithoutHmac(telemetryKey) { + assert.eq(telemetryKey.filter, {"foo": {"$lte": "?number"}}); + assert.eq(telemetryKey.sort, {"bar": -1}); + assert.eq(telemetryKey.limit, "?number"); +} + +function runTest(conn) { + const testDB = conn.getDB('test'); + var coll = testDB[jsTestName()]; + coll.drop(); + + coll.insert({foo: 1}); + coll.find({foo: {$lte: 2}}).sort({bar: -1}).limit(2).toArray(); + // Default is no hmac. + assertTelemetryKeyWithoutHmac(getTelemetry(conn)[0].key.queryShape); + + // Turning on hmac should apply hmac to all field names on all entries, even previously cached + // ones. + const telemetryKey = getTelemetryRedacted(conn)[0]["key"]; + assert.eq(telemetryKey.queryShape.filter, + {"fNWkKfogMv6MJ77LpBcuPrO7Nq+R+7TqtD+Lgu3Umc4=": {"$lte": "?number"}}); + assert.eq(telemetryKey.queryShape.sort, {"CDDQIXZmDehLKmQcRxtdOQjMqoNqfI2nGt2r4CgJ52o=": -1}); + assert.eq(telemetryKey.queryShape.limit, "?number"); + + // Turning hmac back off should preserve field names on all entries, even previously cached + // ones. + assertTelemetryKeyWithoutHmac(getTelemetry(conn)[0]["key"].queryShape); + + // Explicitly set applyHmacToIdentifiers to false. + assertTelemetryKeyWithoutHmac(getTelemetryRedacted(conn, false)[0]["key"].queryShape); + + // Wrong parameter name throws error. + let pipeline = [{$queryStats: {redactFields: true}}]; + assertAdminDBErrCodeAndErrMsgContains( + coll, + pipeline, + ErrorCodes.FailedToParse, + "$queryStats parameters object may only contain 'applyHmacToIdentifiers' or 'hmacKey' options. Found: redactFields"); + + // Wrong parameter type throws error. + pipeline = [{$queryStats: {applyHmacToIdentifiers: 1}}]; + assertAdminDBErrCodeAndErrMsgContains( + coll, + pipeline, + ErrorCodes.FailedToParse, + "$queryStats applyHmacToIdentifiers parameter must be boolean. Found type: double"); + + pipeline = [{$queryStats: {hmacKey: 1}}]; + assertAdminDBErrCodeAndErrMsgContains( + coll, + pipeline, + ErrorCodes.FailedToParse, + "$queryStats hmacKey parameter must be bindata of length 32 or greater. Found type: double"); + + // Parameter object with unrecognized key throws error. + pipeline = [{$queryStats: {applyHmacToIdentifiers: true, hmacStrategy: "on"}}]; + assertAdminDBErrCodeAndErrMsgContains( + coll, + pipeline, + ErrorCodes.FailedToParse, + "$queryStats parameters object may only contain 'applyHmacToIdentifiers' or 'hmacKey' options. Found: hmacStrategy"); +} + +const conn = MongoRunner.runMongod({ + setParameter: { + internalQueryStatsSamplingRate: -1, + featureFlagQueryStats: true, + } +}); +runTest(conn); +MongoRunner.stopMongod(conn); + +const st = new ShardingTest({ + mongos: 1, + shards: 1, + config: 1, + rs: {nodes: 1}, + mongosOptions: { + setParameter: { + internalQueryStatsSamplingRate: -1, + featureFlagQueryStats: true, + 'failpoint.skipClusterParameterRefresh': "{'mode':'alwaysOn'}" + } + }, +}); +runTest(st.s); +st.stop(); +}()); diff --git a/jstests/noPassthrough/queryStats/feature_flag_off_sampling_rate_on.js b/jstests/noPassthrough/queryStats/feature_flag_off_sampling_rate_on.js new file mode 100644 index 00000000000..38474b944d0 --- /dev/null +++ b/jstests/noPassthrough/queryStats/feature_flag_off_sampling_rate_on.js @@ -0,0 +1,54 @@ +/** + * Test that calls to read from telemetry store fail when feature flag is turned off and sampling + * rate > 0. + */ +load('jstests/libs/analyze_plan.js'); +load("jstests/libs/feature_flag_util.js"); + +(function() { +"use strict"; + +// Set sampling rate to -1. +let options = { + setParameter: {internalQueryStatsSamplingRate: -1}, +}; +const conn = MongoRunner.runMongod(options); +const testdb = conn.getDB('test'); + +// This test specifically tests error handling when the feature flag is not on. +// TODO SERVER-65800 This test can be deleted when the feature is on by default. +if (!conn || FeatureFlagUtil.isEnabled(testdb, "QueryStats")) { + jsTestLog(`Skipping test since feature flag is disabled. conn: ${conn}`); + if (conn) { + MongoRunner.stopMongod(conn); + } + return; +} + +var coll = testdb[jsTestName()]; +coll.drop(); + +// Bulk insert documents to reduces roundtrips and make timeout on a slow machine less likely. +const bulk = coll.initializeUnorderedBulkOp(); +for (let i = 1; i <= 20; i++) { + bulk.insert({foo: 0, bar: Math.floor(Math.random() * 3)}); +} +assert.commandWorked(bulk.execute()); + +// Pipeline to read telemetry store should fail without feature flag turned on even though sampling +// rate is > 0. +assert.commandFailedWithCode( + testdb.adminCommand({aggregate: 1, pipeline: [{$queryStats: {}}], cursor: {}}), + ErrorCodes.QueryFeatureNotAllowed); + +// Pipeline, with a filter, to read telemetry store fails without feature flag turned on even though +// sampling rate is > 0. +assert.commandFailedWithCode(testdb.adminCommand({ + aggregate: 1, + pipeline: [{$queryStats: {}}, {$match: {"key.queryShape.find": {$eq: "###"}}}], + cursor: {} +}), + ErrorCodes.QueryFeatureNotAllowed); + +MongoRunner.stopMongod(conn); +}()); diff --git a/jstests/noPassthrough/queryStats/query_stats_collect_on_mongos.js b/jstests/noPassthrough/queryStats/query_stats_collect_on_mongos.js new file mode 100644 index 00000000000..97057269527 --- /dev/null +++ b/jstests/noPassthrough/queryStats/query_stats_collect_on_mongos.js @@ -0,0 +1,277 @@ +/** + * Test that mongos is collecting telemetry metrics. + * @tags: [featureFlagQueryStats] + */ + +load('jstests/libs/telemetry_utils.js'); + +(function() { +"use strict"; + +// Redacted literal replacement string. This may change in the future, so it's factored out. +const aggRedactString = "###"; +const setup = () => { + const st = new ShardingTest({ + mongos: 1, + shards: 1, + config: 1, + rs: {nodes: 1}, + mongosOptions: { + setParameter: { + internalQueryStatsSamplingRate: -1, + 'failpoint.skipClusterParameterRefresh': "{'mode':'alwaysOn'}" + } + }, + }); + const mongos = st.s; + const db = mongos.getDB("test"); + const coll = db.coll; + coll.insert({v: 1}); + coll.insert({v: 4}); + return st; +}; + +const assertExpectedResults = (results, + expectedTelemetryKey, + expectedExecCount, + expectedDocsReturnedSum, + expectedDocsReturnedMax, + expectedDocsReturnedMin, + expectedDocsReturnedSumOfSq) => { + const {key, metrics} = results; + assert.eq(expectedTelemetryKey, key); + assert.eq(expectedExecCount, metrics.execCount); + assert.docEq({ + sum: NumberLong(expectedDocsReturnedSum), + max: NumberLong(expectedDocsReturnedMax), + min: NumberLong(expectedDocsReturnedMin), + sumOfSquares: NumberLong(expectedDocsReturnedSumOfSq) + }, + metrics.docsReturned); + + // This test can't predict exact timings, so just assert these three fields have been set (are + // non-zero). + const {firstSeenTimestamp, lastExecutionMicros, queryExecMicros} = metrics; + + assert.neq(timestampCmp(firstSeenTimestamp, Timestamp(0, 0)), 0); + assert.neq(lastExecutionMicros, NumberLong(0)); + + const distributionFields = ['sum', 'max', 'min', 'sumOfSquares']; + for (const field of distributionFields) { + assert.neq(queryExecMicros[field], NumberLong(0)); + } +}; + +// Assert that, for find queries, no telemetry results are written until a cursor has reached +// exhaustion; ensure accurate results once they're written. +{ + const st = setup(); + const db = st.s.getDB("test"); + const collName = "coll"; + const coll = db[collName]; + + const telemetryKey = { + queryShape: { + cmdNs: {db: "test", coll: "coll"}, + command: "find", + filter: {$and: [{v: {$gt: "?number"}}, {v: {$lt: "?number"}}]}, + }, + readConcern: {level: "local", provenance: "implicitDefault"}, + batchSize: "?number", + applicationName: "MongoDB Shell", + }; + + const cursor = coll.find({v: {$gt: 0, $lt: 5}}).batchSize(1); // returns 1 doc + + // Since the cursor hasn't been exhausted yet, ensure no telemetry results have been written + // yet. + let telemetry = getTelemetry(db); + assert.eq(0, telemetry.length, telemetry); + + // Run a getMore to exhaust the cursor, then ensure telemetry results have been written + // accurately. batchSize must be 2 so the cursor recognizes exhaustion. + assert.commandWorked(db.runCommand({ + getMore: cursor.getId(), + collection: coll.getName(), + batchSize: 2 + })); // returns 1 doc, exhausts the cursor + // The $queryStats query for the previous `getTelemetry` is included in this call to + // $queryStats. + telemetry = getTelemetry(db); + assert.eq(2, telemetry.length, telemetry); + assertExpectedResults(telemetry[0], + telemetryKey, + /* expectedExecCount */ 1, + /* expectedDocsReturnedSum */ 2, + /* expectedDocsReturnedMax */ 2, + /* expectedDocsReturnedMin */ 2, + /* expectedDocsReturnedSumOfSq */ 4); + + // Run more queries (to exhaustion) with the same query shape, and ensure telemetry results are + // accurate. + coll.find({v: {$gt: 2, $lt: 3}}).batchSize(10).toArray(); // returns 0 docs + coll.find({v: {$gt: 0, $lt: 1}}).batchSize(10).toArray(); // returns 0 docs + coll.find({v: {$gt: 0, $lt: 2}}).batchSize(10).toArray(); // return 1 doc + telemetry = getTelemetry(db); + assert.eq(2, telemetry.length, telemetry); + assertExpectedResults(telemetry[0], + telemetryKey, + /* expectedExecCount */ 4, + /* expectedDocsReturnedSum */ 3, + /* expectedDocsReturnedMax */ 2, + /* expectedDocsReturnedMin */ 0, + /* expectedDocsReturnedSumOfSq */ 5); + + st.stop(); +} + +// Assert that, for agg queries, no telemetry results are written until a cursor has reached +// exhaustion; ensure accurate results once they're written. +{ + const st = setup(); + const db = st.s.getDB("test"); + const coll = db.coll; + + const telemetryKey = { + pipeline: [ + {$match: {v: {$gt: aggRedactString, $lt: aggRedactString}}}, + {$project: {hello: aggRedactString}}, + ], + namespace: "test.coll", + applicationName: "MongoDB Shell" + }; + + const cursor = coll.aggregate( + [ + {$match: {v: {$gt: 0, $lt: 5}}}, + {$project: {hello: "$world"}}, + ], + {cursor: {batchSize: 1}}); // returns 1 doc + + // Since the cursor hasn't been exhausted yet, ensure no telemetry results have been written + // yet. + let telemetry = getTelemetry(db); + assert.eq(0, telemetry.length, telemetry); + + // Run a getMore to exhaust the cursor, then ensure telemetry results have been written + // accurately. batchSize must be 2 so the cursor recognizes exhaustion. + assert.commandWorked(db.runCommand({ + getMore: cursor.getId(), + collection: coll.getName(), + batchSize: 2 + })); // returns 1 doc, exhausts the cursor + // The $queryStats query for the previous `getTelemetry` is included in this call to + // $queryStats. + telemetry = getTelemetry(db); + assert.eq(2, telemetry.length, telemetry); + assertExpectedResults(telemetry[0], + telemetryKey, + /* expectedExecCount */ 1, + /* expectedDocsReturnedSum */ 2, + /* expectedDocsReturnedMax */ 2, + /* expectedDocsReturnedMin */ 2, + /* expectedDocsReturnedSumOfSq */ 4); + + // Run more queries (to exhaustion) with the same query shape, and ensure telemetry results are + // accurate. + coll.aggregate([ + {$match: {v: {$gt: 0, $lt: 5}}}, + {$project: {hello: "$world"}}, + ]); // returns 2 docs + coll.aggregate([ + {$match: {v: {$gt: 2, $lt: 3}}}, + {$project: {hello: "$universe"}}, + ]); // returns 0 docs + coll.aggregate([ + {$match: {v: {$gt: 0, $lt: 2}}}, + {$project: {hello: "$galaxy"}}, + ]); // returns 1 doc + telemetry = getTelemetry(db); + assert.eq(2, telemetry.length, telemetry); + assertExpectedResults(telemetry[0], + telemetryKey, + /* expectedExecCount */ 4, + /* expectedDocsReturnedSum */ 5, + /* expectedDocsReturnedMax */ 2, + /* expectedDocsReturnedMin */ 0, + /* expectedDocsReturnedSumOfSq */ 9); + + st.stop(); +} + +// Assert on batchSize-limited find queries that killCursors will write metrics with partial results +// to the telemetry store. +{ + const st = setup(); + const db = st.s.getDB("test"); + const collName = "coll"; + const coll = db[collName]; + + const telemetryKey = { + queryShape: { + cmdNs: {db: "test", coll: "coll"}, + command: "find", + filter: {$and: [{v: {$gt: "?number"}}, {v: {$lt: "?number"}}]}, + }, + readConcern: {level: "local", provenance: "implicitDefault"}, + batchSize: "?number", + applicationName: "MongoDB Shell" + }; + + const cursor1 = coll.find({v: {$gt: 0, $lt: 5}}).batchSize(1); // returns 1 doc + const cursor2 = coll.find({v: {$gt: 0, $lt: 2}}).batchSize(1); // returns 1 doc + + assert.commandWorked( + db.runCommand({killCursors: coll.getName(), cursors: [cursor1.getId(), cursor2.getId()]})); + + const telemetry = getTelemetry(db); + assert.eq(1, telemetry.length); + assertExpectedResults(telemetry[0], + telemetryKey, + /* expectedExecCount */ 2, + /* expectedDocsReturnedSum */ 2, + /* expectedDocsReturnedMax */ 1, + /* expectedDocsReturnedMin */ 1, + /* expectedDocsReturnedSumOfSq */ 2); + st.stop(); +} + +// Assert on batchSize-limited agg queries that killCursors will write metrics with partial results +// to the telemetry store. +{ + const st = setup(); + const db = st.s.getDB("test"); + const coll = db.coll; + + const telemetryKey = { + pipeline: [{$match: {v: {$gt: aggRedactString, $lt: aggRedactString}}}], + namespace: `test.${coll.getName()}`, + applicationName: "MongoDB Shell" + }; + + const cursor1 = coll.aggregate( + [ + {$match: {v: {$gt: 0, $lt: 5}}}, + ], + {cursor: {batchSize: 1}}); // returns 1 doc + const cursor2 = coll.aggregate( + [ + {$match: {v: {$gt: 0, $lt: 2}}}, + ], + {cursor: {batchSize: 1}}); // returns 1 doc + + assert.commandWorked( + db.runCommand({killCursors: coll.getName(), cursors: [cursor1.getId(), cursor2.getId()]})); + + const telemetry = getTelemetry(db); + assert.eq(1, telemetry.length); + assertExpectedResults(telemetry[0], + telemetryKey, + /* expectedExecCount */ 2, + /* expectedDocsReturnedSum */ 2, + /* expectedDocsReturnedMax */ 1, + /* expectedDocsReturnedMin */ 1, + /* expectedDocsReturnedSumOfSq */ 2); + st.stop(); +} +}()); diff --git a/jstests/noPassthrough/queryStats/query_stats_feature_flag.js b/jstests/noPassthrough/queryStats/query_stats_feature_flag.js new file mode 100644 index 00000000000..bcce489d8da --- /dev/null +++ b/jstests/noPassthrough/queryStats/query_stats_feature_flag.js @@ -0,0 +1,34 @@ +/** + * Test that calls to read from telemetry store fail when feature flag is turned off. + */ +load('jstests/libs/analyze_plan.js'); +load("jstests/libs/feature_flag_util.js"); + +(function() { +"use strict"; + +// This test specifically tests error handling when the feature flag is not on. +// TODO SERVER-65800 this test can be removed when the feature flag is removed. +const conn = MongoRunner.runMongod(); +const testDB = conn.getDB('test'); +if (FeatureFlagUtil.isEnabled(testDB, "QueryStats")) { + jsTestLog("Skipping test since query stats are enabled."); + MongoRunner.stopMongod(conn); + return; +} + +// Pipeline to read telemetry store should fail without feature flag turned on. +assert.commandFailedWithCode( + testDB.adminCommand({aggregate: 1, pipeline: [{$queryStats: {}}], cursor: {}}), + ErrorCodes.QueryFeatureNotAllowed); + +// Pipeline, with a filter, to read telemetry store fails without feature flag turned on. +assert.commandFailedWithCode(testDB.adminCommand({ + aggregate: 1, + pipeline: [{$queryStats: {}}, {$match: {"key.queryShape.find": {$eq: "###"}}}], + cursor: {} +}), + ErrorCodes.QueryFeatureNotAllowed); + +MongoRunner.stopMongod(conn); +}()); diff --git a/jstests/noPassthrough/queryStats/query_stats_key.js b/jstests/noPassthrough/queryStats/query_stats_key.js new file mode 100644 index 00000000000..1c6a47e4ed5 --- /dev/null +++ b/jstests/noPassthrough/queryStats/query_stats_key.js @@ -0,0 +1,111 @@ +/** + * This test confirms that telemetry store key fields are properly nested and none are missing. + * @tags: [featureFlagQueryStats] + */ +load("jstests/libs/telemetry_utils.js"); +(function() { +"use strict"; + +function confirmAllFieldsPresent(queryStatsEntries) { + const kApplicationName = "MongoDB Shell"; + const queryShapeFindFields = [ + "cmdNs", + "command", + "filter", + "sort", + "projection", + "hint", + "skip", + "limit", + "singleBatch", + "max", + "min", + "returnKey", + "showRecordId", + "tailable", + "oplogReplay", + "awaitData", + "collation", + "allowDiskUse", + "let" + ]; + + // The outer fields not nested inside queryShape. + const queryStatsKeyFields = [ + "queryShape", + "batchSize", + "comment", + "maxTimeMS", + "noCursorTimeout", + "readConcern", + "allowPartialResults", + "applicationName" + ]; + + for (const entry of queryStatsEntries) { + let fieldCounter = 0; + assert.eq(entry.key.queryShape.command, "find"); + assert.eq(entry.key.applicationName, kApplicationName); + + for (const field in entry.key.queryShape) { + assert(queryShapeFindFields.includes(field)); + fieldCounter++; + } + assert.eq(fieldCounter, queryShapeFindFields.length); + + fieldCounter = 0; + for (const field in entry.key) { + assert(queryStatsKeyFields.includes(field)); + fieldCounter++; + } + assert.eq(fieldCounter, queryStatsKeyFields.length); + } +} + +// Turn on the collecting of telemetry metrics. +let options = { + setParameter: {internalQueryStatsSamplingRate: -1}, +}; + +const conn = MongoRunner.runMongod(options); +const testDB = conn.getDB('test'); +var coll = testDB[jsTestName()]; +coll.drop(); + +// Have to create an index for hint not to fail. +assert.commandWorked(coll.createIndex({v: 1})); + +let commandObj = { + find: coll.getName(), + filter: {v: {$eq: 2}}, + oplogReplay: true, + comment: "this is a test!!", + min: {"v": 0}, + max: {"v": 4}, + hint: {"v": 1}, + sort: {a: -1}, + returnKey: false, + noCursorTimeout: true, + showRecordId: false, + tailable: false, + awaitData: false, + allowPartialResults: true, + skip: 1, + limit: 2, + maxTimeMS: 500, + collation: {locale: "en_US", strength: 2}, + allowDiskUse: true, + readConcern: {level: "local"}, + batchSize: 2, + singleBatch: true, + let : {}, + projection: {_id: 0}, +}; + +assert.commandWorked(testDB.runCommand(commandObj)); +let telemetry = getTelemetry(conn); +assert.eq(1, telemetry.length); +confirmAllFieldsPresent(telemetry); + +MongoRunner.stopMongod(conn); +}()); diff --git a/jstests/noPassthrough/queryStats/query_stats_metrics_across_getMore_calls.js b/jstests/noPassthrough/queryStats/query_stats_metrics_across_getMore_calls.js new file mode 100644 index 00000000000..d5caea74cf7 --- /dev/null +++ b/jstests/noPassthrough/queryStats/query_stats_metrics_across_getMore_calls.js @@ -0,0 +1,159 @@ +/** + * Test that the telemetry metrics are aggregated properly by distinct query shape over getMore + * calls. + * @tags: [featureFlagQueryStats] + */ +load("jstests/libs/telemetry_utils.js"); // For verifyMetrics. + +(function() { +"use strict"; + +// Turn on the collecting of telemetry metrics. +let options = { + setParameter: {internalQueryStatsSamplingRate: -1}, +}; + +const conn = MongoRunner.runMongod(options); +const testDB = conn.getDB('test'); +var coll = testDB[jsTestName()]; +coll.drop(); + +// Bulk insert documents to reduces roundtrips and make timeout on a slow machine less likely. +const bulk = coll.initializeUnorderedBulkOp(); +const numDocs = 100; +for (let i = 0; i < numDocs / 2; ++i) { + bulk.insert({foo: 0, bar: Math.floor(Math.random() * 3)}); + bulk.insert({foo: 1, bar: Math.floor(Math.random() * -2)}); +} +assert.commandWorked(bulk.execute()); + +// Assert that two queries with identical structures are represented by the same key. +{ + // Note: toArray() is necessary for the batchSize-limited query to run to cursor exhaustion + // (when it writes to the telemetry store). + coll.aggregate([{$match: {foo: 1}}], {cursor: {batchSize: 2}}).toArray(); + coll.aggregate([{$match: {foo: 0}}], {cursor: {batchSize: 2}}).toArray(); + + // This command will return all telemetry store entires. + const telemetryResults = testDB.getSiblingDB("admin").aggregate([{$queryStats: {}}]).toArray(); + // Assert there is only one entry. + assert.eq(telemetryResults.length, 1, telemetryResults); + const telemetryEntry = telemetryResults[0]; + assert.eq(telemetryEntry.key.namespace, `test.${jsTestName()}`); + assert.eq(telemetryEntry.key.applicationName, "MongoDB Shell"); + + // Assert we update execution count for identically shaped queries. + assert.eq(telemetryEntry.metrics.execCount, 2); + + // Assert telemetry values are accurate for the two above queries. + assert.eq(telemetryEntry.metrics.docsReturned.sum, numDocs); + assert.eq(telemetryEntry.metrics.docsReturned.min, numDocs / 2); + assert.eq(telemetryEntry.metrics.docsReturned.max, numDocs / 2); + + verifyMetrics(telemetryResults); +} + +const fooEqBatchSize = 5; +const fooNeBatchSize = 3; +// Assert on batchSize-limited queries that killCursors will write metrics with partial results to +// the telemetry store. +{ + let cursor1 = coll.find({foo: {$eq: 0}}).batchSize(fooEqBatchSize); + let cursor2 = coll.find({foo: {$ne: 0}}).batchSize(fooNeBatchSize); + // Issue one getMore for the first query, so 2 * fooEqBatchSize documents are returned total. + assert.commandWorked(testDB.runCommand( + {getMore: cursor1.getId(), collection: coll.getName(), batchSize: fooEqBatchSize})); + + // Kill both cursors so the telemetry metrics are stored. + assert.commandWorked(testDB.runCommand( + {killCursors: coll.getName(), cursors: [cursor1.getId(), cursor2.getId()]})); + + // This filters telemetry entires to just the ones entered when running above find queries. + const telemetryResults = testDB.getSiblingDB("admin") + .aggregate([ + {$queryStats: {}}, + {$match: {"key.queryShape.filter.foo": {$exists: true}}}, + {$sort: {key: 1}}, + ]) + .toArray(); + assert.eq(telemetryResults.length, 2, telemetryResults); + assert.eq(telemetryResults[0].key.queryShape.cmdNs.db, "test"); + assert.eq(telemetryResults[0].key.queryShape.cmdNs.coll, jsTestName()); + assert.eq(telemetryResults[0].key.applicationName, "MongoDB Shell"); + assert.eq(telemetryResults[1].key.queryShape.cmdNs.db, "test"); + assert.eq(telemetryResults[1].key.queryShape.cmdNs.coll, jsTestName()); + assert.eq(telemetryResults[1].key.applicationName, "MongoDB Shell"); + + assert.eq(telemetryResults[0].metrics.execCount, 1); + assert.eq(telemetryResults[1].metrics.execCount, 1); + assert.eq(telemetryResults[0].metrics.docsReturned.sum, fooEqBatchSize * 2); + assert.eq(telemetryResults[1].metrics.docsReturned.sum, fooNeBatchSize); + + verifyMetrics(telemetryResults); +} + +// Assert that options such as limit/sort create different keys, and that repeating a query shape +// ({foo: {$eq}}) aggregates metrics across executions. +{ + const query2Limit = 50; + coll.find({foo: {$eq: 0}}).batchSize(2).toArray(); + coll.find({foo: {$eq: 1}}).limit(query2Limit).batchSize(2).toArray(); + coll.find().sort({"foo": 1}).batchSize(2).toArray(); + // This filters telemetry entires to just the ones entered when running above find queries. + let telemetryResults = + testDB.getSiblingDB("admin") + .aggregate([{$queryStats: {}}, {$match: {"key.queryShape.command": "find"}}]) + .toArray(); + assert.eq(telemetryResults.length, 4, telemetryResults); + + verifyMetrics(telemetryResults); + + // This filters to just the telemetry for query coll.find().sort({"foo": 1}).batchSize(2). + telemetryResults = testDB.getSiblingDB("admin") + .aggregate([{$queryStats: {}}, {$match: {"key.queryShape.sort.foo": 1}}]) + .toArray(); + assert.eq(telemetryResults.length, 1, telemetryResults); + assert.eq(telemetryResults[0].key.queryShape.cmdNs.db, "test"); + assert.eq(telemetryResults[0].key.queryShape.cmdNs.coll, jsTestName()); + assert.eq(telemetryResults[0].key.applicationName, "MongoDB Shell"); + assert.eq(telemetryResults[0].metrics.execCount, 1); + assert.eq(telemetryResults[0].metrics.docsReturned.sum, numDocs); + + // This filters to just the telemetry for query coll.find({foo: {$eq: + // 1}}).limit(query2Limit).batchSize(2). + telemetryResults = + testDB.getSiblingDB("admin") + .aggregate([{$queryStats: {}}, {$match: {"key.queryShape.limit": '?number'}}]) + .toArray(); + assert.eq(telemetryResults.length, 1, telemetryResults); + assert.eq(telemetryResults[0].key.queryShape.cmdNs.db, "test"); + assert.eq(telemetryResults[0].key.queryShape.cmdNs.coll, jsTestName()); + assert.eq(telemetryResults[0].key.applicationName, "MongoDB Shell"); + assert.eq(telemetryResults[0].metrics.execCount, 1); + assert.eq(telemetryResults[0].metrics.docsReturned.sum, query2Limit); + + // This filters to just the telemetry for query coll.find({foo: {$eq: 0}}).batchSize(2). + telemetryResults = testDB.getSiblingDB("admin") + .aggregate([ + {$queryStats: {}}, + { + $match: { + "key.queryShape.filter.foo": {$eq: {$eq: "?number"}}, + "key.queryShape.limit": {$exists: false}, + "key.queryShape.sort": {$exists: false} + } + } + ]) + .toArray(); + assert.eq(telemetryResults.length, 1, telemetryResults); + assert.eq(telemetryResults[0].key.queryShape.cmdNs.db, "test"); + assert.eq(telemetryResults[0].key.queryShape.cmdNs.coll, jsTestName()); + assert.eq(telemetryResults[0].key.applicationName, "MongoDB Shell"); + assert.eq(telemetryResults[0].metrics.execCount, 2); + assert.eq(telemetryResults[0].metrics.docsReturned.sum, numDocs / 2 + 2 * fooEqBatchSize); + assert.eq(telemetryResults[0].metrics.docsReturned.max, numDocs / 2); + assert.eq(telemetryResults[0].metrics.docsReturned.min, 2 * fooEqBatchSize); +} + +MongoRunner.stopMongod(conn); +}()); diff --git a/jstests/noPassthrough/queryStats/query_stats_redact_find_cmd.js b/jstests/noPassthrough/queryStats/query_stats_redact_find_cmd.js new file mode 100644 index 00000000000..b2cce48cdb7 --- /dev/null +++ b/jstests/noPassthrough/queryStats/query_stats_redact_find_cmd.js @@ -0,0 +1,69 @@ +/** + * Test that $queryStats properly applies hmac to find commands, on mongod and mongos. + */ +load("jstests/libs/telemetry_utils.js"); +(function() { +"use strict"; + +const kHashedCollName = "w6Ax20mVkbJu4wQWAMjL8Sl+DfXAr2Zqdc3kJRB7Oo0="; +const kHashedFieldName = "lU7Z0mLRPRUL+RfAD5jhYPRRpXBsZBxS/20EzDwfOG4="; + +function runTest(conn) { + const db = conn.getDB("test"); + const admin = conn.getDB("admin"); + + db.test.drop(); + db.test.insert({v: 1}); + + db.test.find({v: 1}).toArray(); + + let telemetry = getTelemetryRedacted(admin); + + assert.eq(1, telemetry.length); + assert.eq("find", telemetry[0].key.queryShape.command); + assert.eq({[kHashedFieldName]: {$eq: "?number"}}, telemetry[0].key.queryShape.filter); + + db.test.insert({v: 2}); + + const cursor = db.test.find({v: {$gt: 0, $lt: 3}}).batchSize(1); + telemetry = getTelemetryRedacted(admin); + // Cursor isn't exhausted, so there shouldn't be another entry yet. + assert.eq(1, telemetry.length); + + assert.commandWorked( + db.runCommand({getMore: cursor.getId(), collection: db.test.getName(), batchSize: 2})); + + telemetry = getTelemetryRedacted(admin); + assert.eq(2, telemetry.length); + assert.eq("find", telemetry[1].key.queryShape.command); + assert.eq({ + "$and": [{[kHashedFieldName]: {"$gt": "?number"}}, {[kHashedFieldName]: {"$lt": "?number"}}] + }, + telemetry[1].key.queryShape.filter); +} + +const conn = MongoRunner.runMongod({ + setParameter: { + internalQueryStatsSamplingRate: -1, + featureFlagQueryStats: true, + } +}); +runTest(conn); +MongoRunner.stopMongod(conn); + +const st = new ShardingTest({ + mongos: 1, + shards: 1, + config: 1, + rs: {nodes: 1}, + mongosOptions: { + setParameter: { + internalQueryStatsSamplingRate: -1, + featureFlagQueryStats: true, + 'failpoint.skipClusterParameterRefresh': "{'mode':'alwaysOn'}" + } + }, +}); +runTest(st.s); +st.stop(); +}()); diff --git a/jstests/noPassthrough/queryStats/query_stats_sampling_rate.js b/jstests/noPassthrough/queryStats/query_stats_sampling_rate.js new file mode 100644 index 00000000000..009c59737fa --- /dev/null +++ b/jstests/noPassthrough/queryStats/query_stats_sampling_rate.js @@ -0,0 +1,38 @@ +/** + * Test that calls to read from telemetry store fail when sampling rate is not greater than 0 even + * if feature flag is on. + * @tags: [featureFlagQueryStats] + */ +load('jstests/libs/analyze_plan.js'); + +(function() { +"use strict"; + +let options = { + setParameter: {internalQueryStatsSamplingRate: 0}, +}; + +const conn = MongoRunner.runMongod(options); +const testdb = conn.getDB('test'); +var coll = testdb[jsTestName()]; +coll.drop(); +for (var i = 0; i < 20; i++) { + coll.insert({foo: 0, bar: Math.floor(Math.random() * 3)}); +} + +coll.aggregate([{$match: {foo: 1}}], {cursor: {batchSize: 2}}); + +// Reading telemetry store with a sampling rate of 0 should return 0 documents. +let telStore = testdb.adminCommand({aggregate: 1, pipeline: [{$queryStats: {}}], cursor: {}}); +assert.eq(telStore.cursor.firstBatch.length, 0); + +// Reading telemetry store should work now with a sampling rate of greater than 0. +assert.commandWorked( + testdb.adminCommand({setParameter: 1, internalQueryStatsSamplingRate: 2147483647})); +coll.aggregate([{$match: {foo: 1}}], {cursor: {batchSize: 2}}); +telStore = assert.commandWorked( + testdb.adminCommand({aggregate: 1, pipeline: [{$queryStats: {}}], cursor: {}})); +assert.eq(telStore.cursor.firstBatch.length, 1); + +MongoRunner.stopMongod(conn); +}()); diff --git a/jstests/noPassthrough/queryStats/query_stats_server_status_metrics.js b/jstests/noPassthrough/queryStats/query_stats_server_status_metrics.js new file mode 100644 index 00000000000..2bc7d808898 --- /dev/null +++ b/jstests/noPassthrough/queryStats/query_stats_server_status_metrics.js @@ -0,0 +1,181 @@ +/** + * Test the telemetry related serverStatus metrics. + * @tags: [featureFlagQueryStats] + */ +load('jstests/libs/analyze_plan.js'); + +(function() { +"use strict"; + +function runTestWithMongodOptions(mongodOptions, test, testOptions) { + const conn = MongoRunner.runMongod(mongodOptions); + const testDB = conn.getDB('test'); + const coll = testDB[jsTestName()]; + + test(conn, testDB, coll, testOptions); + + MongoRunner.stopMongod(conn); +} + +/** + * Test serverStatus metric which counts the number of evicted entries. + * + * testOptions must include `resetCacheSize` bool field; e.g., { resetCacheSize : true } + */ +function evictionTest(conn, testDB, coll, testOptions) { + const evictedBefore = testDB.serverStatus().metrics.queryStats.numEvicted; + assert.eq(evictedBefore, 0); + for (var i = 0; i < 4000; i++) { + let query = {}; + query["foo" + i] = "bar"; + coll.aggregate([{$match: query}]).itcount(); + } + if (!testOptions.resetCacheSize) { + const evictedAfter = testDB.serverStatus().metrics.queryStats.numEvicted; + assert.gt(evictedAfter, 0); + return; + } + // Make sure number of evicted entries increases when the cache size is reset, which forces out + // least recently used entries to meet the new, smaller size requirement. + assert.eq(testDB.serverStatus().metrics.queryStats.numEvicted, 0); + assert.commandWorked( + testDB.adminCommand({setParameter: 1, internalQueryStatsCacheSize: "1MB"})); + const evictedAfter = testDB.serverStatus().metrics.queryStats.numEvicted; + assert.gt(evictedAfter, 0); +} + +/** + * Test serverStatus metric which counts the number of requests for which telemetry is not collected + * due to rate-limiting. + * + * testOptions must include `samplingRate` and `numRequests` number fields; + * e.g., { samplingRate: 2147483647, numRequests: 20 } + */ +function countRateLimitedRequestsTest(conn, testDB, coll, testOptions) { + const numRateLimitedRequestsBefore = + testDB.serverStatus().metrics.queryStats.numRateLimitedRequests; + assert.eq(numRateLimitedRequestsBefore, 0); + + coll.insert({a: 0}); + + // Running numRequests / 2 times since we dispatch two requests per iteration + for (var i = 0; i < testOptions.numRequests / 2; i++) { + coll.find({a: 0}).toArray(); + coll.aggregate([{$match: {a: 1}}]); + } + + const numRateLimitedRequestsAfter = + testDB.serverStatus().metrics.queryStats.numRateLimitedRequests; + + if (testOptions.samplingRate === 0) { + // Telemetry should not be collected for any requests. + assert.eq(numRateLimitedRequestsAfter, testOptions.numRequests); + } else if (testOptions.samplingRate >= testOptions.numRequests) { + // Telemetry should be collected for all requests. + assert.eq(numRateLimitedRequestsAfter, 0); + } else { + // Telemetry should be collected for some but not all requests. + assert.gt(numRateLimitedRequestsAfter, 0); + assert.lt(numRateLimitedRequestsAfter, testOptions.numRequests); + } +} + +function telemetryStoreSizeEstimateTest(conn, testDB, coll, testOptions) { + assert.eq(testDB.serverStatus().metrics.queryStats.queryStatsStoreSizeEstimateBytes, 0); + let halfWayPointSize; + // Only using three digit numbers (eg 100, 101) means the string length will be the same for all + // entries and therefore the key size will be the same for all entries, which makes predicting + // the total size of the store clean and easy. + for (var i = 100; i < 200; i++) { + coll.aggregate([{$match: {["foo" + i]: "bar"}}]).itcount(); + if (i == 150) { + halfWayPointSize = + testDB.serverStatus().metrics.queryStats.queryStatsStoreSizeEstimateBytes; + } + } + // Confirm that telemetry store has grown and size is non-zero. + assert.gt(halfWayPointSize, 0); + const fullSize = testDB.serverStatus().metrics.queryStats.queryStatsStoreSizeEstimateBytes; + assert.gt(fullSize, 0); + // Make sure the final telemetry store size is twice as much as the halfway point size (+/- 5%) + assert(fullSize >= halfWayPointSize * 1.95 && fullSize <= halfWayPointSize * 2.05, + tojson({fullSize, halfWayPointSize})); +} + +function telemetryStoreWriteErrorsTest(conn, testDB, coll, testOptions) { + const debugBuild = testDB.adminCommand('buildInfo').debug; + if (debugBuild) { + jsTestLog("Skipping telemetry store write errors test because debug build will tassert."); + return; + } + + const errorsBefore = testDB.serverStatus().metrics.queryStats.numQueryStatsStoreWriteErrors; + assert.eq(errorsBefore, 0); + for (let i = 0; i < 5; i++) { + // Command should succeed and record the error. + let query = {}; + query["foo" + i] = "bar"; + coll.aggregate([{$match: query}]).itcount(); + } + + // Make sure that we recorded a write error for each run. + // TODO SERVER-73152 we attempt to write to the telemetry store twice for each aggregate, which + // seems wrong. + assert.eq(testDB.serverStatus().metrics.queryStats.numQueryStatsStoreWriteErrors, 10); +} + +/** + * In this configuration, we insert enough entries into the telemetry store to trigger LRU + * eviction. + */ +runTestWithMongodOptions({ + setParameter: {internalQueryStatsCacheSize: "1MB", internalQueryStatsSamplingRate: -1}, +}, + evictionTest, + {resetCacheSize: false}); +/** + * In this configuration, eviction is triggered only when the telemetry store size is reset. + * */ +runTestWithMongodOptions({ + setParameter: {internalQueryStatsCacheSize: "4MB", internalQueryStatsSamplingRate: -1}, +}, + evictionTest, + {resetCacheSize: true}); + +/** + * In this configuration, every query is sampled, so no requests should be rate-limited. + */ +runTestWithMongodOptions({ + setParameter: {internalQueryStatsSamplingRate: -1}, +}, + countRateLimitedRequestsTest, + {samplingRate: 2147483647, numRequests: 20}); + +/** + * In this configuration, the sampling rate is set so that some but not all requests are + * rate-limited. + */ +runTestWithMongodOptions({ + setParameter: {internalQueryStatsSamplingRate: 10}, +}, + countRateLimitedRequestsTest, + {samplingRate: 10, numRequests: 20}); + +/** + * Sample all queries and assert that the size of telemetry store is equal to num entries * entry + * size + */ +runTestWithMongodOptions({ + setParameter: {internalQueryStatsSamplingRate: -1}, +}, + telemetryStoreSizeEstimateTest); + +/** + * Use a very small telemetry store size and assert that errors in writing to the telemetry store + * are tracked. + */ +runTestWithMongodOptions({ + setParameter: {internalQueryStatsCacheSize: "0.00001MB", internalQueryStatsSamplingRate: -1}, +}, + telemetryStoreWriteErrorsTest); +}()); diff --git a/jstests/noPassthrough/queryStats/query_stats_upgrade.js b/jstests/noPassthrough/queryStats/query_stats_upgrade.js new file mode 100644 index 00000000000..919d9f87baf --- /dev/null +++ b/jstests/noPassthrough/queryStats/query_stats_upgrade.js @@ -0,0 +1,43 @@ +/** + * Test that telemetry doesn't work on a lower FCV version but works after an FCV upgrade. + * @tags: [featureFlagQueryStats] + */ +load('jstests/libs/analyze_plan.js'); +load("jstests/libs/feature_flag_util.js"); + +(function() { +"use strict"; + +const dbpath = MongoRunner.dataPath + jsTestName(); +let conn = MongoRunner.runMongod({dbpath: dbpath}); +let testDB = conn.getDB(jsTestName()); +// This test should only be run with the flag enabled. +assert(FeatureFlagUtil.isEnabled(testDB, "QueryStats")); + +function testLower(restart = false) { + let adminDB = conn.getDB("admin"); + assert.commandWorked(adminDB.runCommand( + {setFeatureCompatibilityVersion: binVersionToFCV("last-lts"), confirm: true})); + if (restart) { + MongoRunner.stopMongod(conn); + conn = MongoRunner.runMongod({dbpath: dbpath, noCleanData: true}); + testDB = conn.getDB(jsTestName()); + adminDB = conn.getDB("admin"); + } + + assert.commandFailedWithCode( + testDB.adminCommand({aggregate: 1, pipeline: [{$queryStats: {}}], cursor: {}}), 6579000); + + // Upgrade FCV. + assert.commandWorked(adminDB.runCommand( + {setFeatureCompatibilityVersion: binVersionToFCV("latest"), confirm: true})); + + // We should be able to run a telemetry pipeline now that the FCV is correct. + assert.commandWorked( + testDB.adminCommand({aggregate: 1, pipeline: [{$queryStats: {}}], cursor: {}}), + ); +} +testLower(true); +testLower(false); +MongoRunner.stopMongod(conn); +})(); diff --git a/jstests/noPassthrough/queryStats/redact_queries_with_nonobject_fields.js b/jstests/noPassthrough/queryStats/redact_queries_with_nonobject_fields.js new file mode 100644 index 00000000000..7528ab9a4ab --- /dev/null +++ b/jstests/noPassthrough/queryStats/redact_queries_with_nonobject_fields.js @@ -0,0 +1,76 @@ +/** + * Test that telemetry key generation works for queries with non-object fields. + * @tags: [featureFlagQueryStats] + */ +load('jstests/libs/analyze_plan.js'); + +(function() { +"use strict"; + +// Turn on the collecting of telemetry metrics. +let options = { + setParameter: {internalQueryStatsSamplingRate: -1}, +}; + +const conn = MongoRunner.runMongod(options); +const testDB = conn.getDB('test'); +var collA = testDB[jsTestName()]; +var collB = testDB[jsTestName() + 'Two']; +collA.drop(); +collB.drop(); + +for (var i = 0; i < 200; i++) { + collA.insert({foo: 0, bar: Math.floor(Math.random() * 3)}); + collA.insert({foo: 1, bar: Math.floor(Math.random() * -2)}); + collB.insert({foo: Math.floor(Math.random() * 2), bar: Math.floor(Math.random() * 2)}); +} + +function confirmAggSuccess(collName, pipeline) { + const command = {aggregate: collName, cursor: {}}; + command.pipeline = pipeline; + assert.commandWorked(testDB.runCommand(command)); +} +// Test with non-object fields $limit and $skip. +confirmAggSuccess(collA.getName(), [{$sort: {bar: -1}}, {$limit: 2}, {$match: {foo: {$lte: 2}}}]); +confirmAggSuccess(collA.getName(), [{$sort: {bar: -1}}, {$skip: 50}, {$match: {foo: {$lte: 2}}}]); +confirmAggSuccess(collA.getName(), + [{$sort: {bar: -1}}, {$limit: 2}, {$skip: 50}, {$match: {foo: 0}}]); + +// Test non-object field, $unionWith. +confirmAggSuccess(collA.getName(), [{$unionWith: collB.getName()}]); + +// Test $limit in $setWindowFields for good measure. +confirmAggSuccess(collA.getName(), [ + {$_internalInhibitOptimization: {}}, + { + $setWindowFields: { + sortBy: {foo: 1}, + output: {sum: {$sum: "$bar", window: {documents: ["unbounded", "current"]}}} + } + }, + {$sort: {foo: 1}}, + {$limit: 5} +]); +// Test find commands containing non-object fields +assert.commandWorked(testDB.runCommand({find: collA.getName(), limit: 20})); +assert.commandWorked(testDB.runCommand({find: collA.getName(), skip: 199})); +collA.find().skip(100); + +// findOne has a nonobject field, $limit. +collB.findOne(); +collB.findOne({foo: 1}); + +// Test non-object field $unwind +confirmAggSuccess( + collA.getName(), [{ + "$facet": { + "productOfJoin": [ + {"$lookup": {"from": collB.getName(), "pipeline": [{"$match": {}}], "as": "join"}}, + {"$unwind": "$join"}, + {"$project": {"str": 1}} + ] + } + }]); + +MongoRunner.stopMongod(conn); +}()); |