summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorsamontea <merciers.merciers@gmail.com>2019-07-11 11:28:07 -0400
committersamontea <merciers.merciers@gmail.com>2019-07-12 11:53:57 -0400
commita8a8fabb17e9700aab633a67b24fe6147290bb92 (patch)
tree6c3f617f3afad7e2363af59607de3e56146dcc8c
parenta4b8e4e0549ebfffebb545459d34ee4faa4f1521 (diff)
downloadmongo-a8a8fabb17e9700aab633a67b24fe6147290bb92.tar.gz
SERVER-40755 Expose statistics which indicate how many collection scans have executed
-rw-r--r--jstests/aggregation/sources/collStats/query_exec_stats.js83
-rw-r--r--jstests/noPassthrough/server_status_query_exec_stats.js64
-rw-r--r--src/mongo/db/SConscript2
-rw-r--r--src/mongo/db/catalog/collection_info_cache.h13
-rw-r--r--src/mongo/db/catalog/collection_info_cache_impl.cpp12
-rw-r--r--src/mongo/db/catalog/collection_info_cache_impl.h8
-rw-r--r--src/mongo/db/collection_index_usage_tracker.cpp26
-rw-r--r--src/mongo/db/collection_index_usage_tracker.h21
-rw-r--r--src/mongo/db/commands/count_cmd.cpp2
-rw-r--r--src/mongo/db/commands/distinct.cpp2
-rw-r--r--src/mongo/db/commands/find_and_modify.cpp4
-rw-r--r--src/mongo/db/commands/mr.cpp2
-rw-r--r--src/mongo/db/commands/run_aggregate.cpp2
-rw-r--r--src/mongo/db/exec/collection_scan.cpp1
-rw-r--r--src/mongo/db/exec/plan_stats.h2
-rw-r--r--src/mongo/db/ops/write_ops_exec.cpp4
-rw-r--r--src/mongo/db/pipeline/document_source_coll_stats.cpp21
-rw-r--r--src/mongo/db/pipeline/document_source_cursor.cpp2
-rw-r--r--src/mongo/db/pipeline/mongo_process_interface.h6
-rw-r--r--src/mongo/db/pipeline/mongos_process_interface.h6
-rw-r--r--src/mongo/db/pipeline/process_interface_standalone.cpp33
-rw-r--r--src/mongo/db/pipeline/process_interface_standalone.h3
-rw-r--r--src/mongo/db/pipeline/stub_mongo_process_interface.h6
-rw-r--r--src/mongo/db/query/explain.cpp8
-rw-r--r--src/mongo/db/query/find.cpp2
-rw-r--r--src/mongo/db/query/plan_summary_stats.h8
26 files changed, 324 insertions, 19 deletions
diff --git a/jstests/aggregation/sources/collStats/query_exec_stats.js b/jstests/aggregation/sources/collStats/query_exec_stats.js
new file mode 100644
index 00000000000..920f3ed84a6
--- /dev/null
+++ b/jstests/aggregation/sources/collStats/query_exec_stats.js
@@ -0,0 +1,83 @@
+// Test that queryExecStats within a $collStats stage returns the correct execution stats.
+// @tags: [assumes_no_implicit_collection_creation_after_drop]
+(function() {
+ "use strict";
+
+ if (jsTest.options().storageEngine === "mobile") {
+ print("Skipping test because storage engine isn't mobile");
+ return;
+ }
+
+ load("jstests/aggregation/extras/utils.js"); // For "assertErrorCode".
+ load("jstests/libs/fixture_helpers.js"); // For "FixtureHelpers".
+
+ const nDocs = 32;
+
+ const testDB = db.getSiblingDB("aggregation_query_exec_stats");
+ const coll = testDB.aggregation_query_exec_stats;
+ coll.drop();
+ assert.commandWorked(
+ testDB.createCollection("aggregation_query_exec_stats", {capped: true, size: nDocs * 100}));
+
+ for (let i = 0; i < nDocs; i++) {
+ assert.commandWorked(coll.insert({a: i}));
+ }
+
+ // Run a bunch of collection scans on the server.
+ for (let i = 0; i < nDocs; i++) {
+ assert.eq(coll.find({a: i}).itcount(), 1);
+ }
+
+ // Test that an error is returned if queryExecStats is not an object.
+ let pipeline = [{$collStats: {queryExecStats: 1}}];
+ assertErrorCode(coll, pipeline, 31141, "queryExecStats spec must be an object");
+ pipeline = [{$collStats: {queryExecStats: "1"}}];
+ assertErrorCode(coll, pipeline, 31141, "queryExecStats spec must be an object");
+
+ // Test the accuracy of the result of queryExecStats as a standalone option.
+ pipeline = [{$collStats: {queryExecStats: {}}}];
+ let result = coll.aggregate(pipeline).next();
+ assert.eq(nDocs, result.queryExecStats.collectionScans.total);
+ assert.eq(nDocs, result.queryExecStats.collectionScans.nonTailable);
+
+ // Test tailable collection scans update collectionScans counters appropriately.
+ for (let i = 0; i < nDocs; i++) {
+ assert.eq(coll.find({a: i}).tailable().itcount(), 1);
+ }
+ result = coll.aggregate(pipeline).next();
+ assert.eq(nDocs * 2, result.queryExecStats.collectionScans.total);
+ assert.eq(nDocs, result.queryExecStats.collectionScans.nonTailable);
+
+ // Run a query which will require the client to fetch multiple batches from the server. Ensure
+ // that the getMore commands don't increment the counter of collection scans.
+ assert.eq(coll.find({}).batchSize(2).itcount(), nDocs);
+ result = coll.aggregate(pipeline).next();
+ assert.eq((nDocs * 2) + 1, result.queryExecStats.collectionScans.total);
+ assert.eq(nDocs + 1, result.queryExecStats.collectionScans.nonTailable);
+
+ // Create index to test that index scans don't up the collection scan counter.
+ assert.commandWorked(coll.createIndex({a: 1}));
+ // Run a bunch of index scans.
+ for (let i = 0; i < nDocs; i++) {
+ assert.eq(coll.find({a: i}).itcount(), 1);
+ }
+ result = coll.aggregate(pipeline).next();
+ // Assert that the number of collection scans hasn't increased.
+ assert.eq((nDocs * 2) + 1, result.queryExecStats.collectionScans.total);
+ assert.eq(nDocs + 1, result.queryExecStats.collectionScans.nonTailable);
+
+ // Test that we error when the collection does not exist.
+ coll.drop();
+ pipeline = [{$collStats: {queryExecStats: {}}}];
+ assertErrorCode(coll, pipeline, 31142);
+
+ // Test that we error when the database does not exist.
+ // TODO SERVER-33039 When running against a mongos, a non-existent database will cause all
+ // aggregations to return an empty result set.
+ assert.commandWorked(testDB.dropDatabase());
+ if (FixtureHelpers.isMongos(testDB)) {
+ assert.eq([], coll.aggregate(pipeline).toArray());
+ } else {
+ assertErrorCode(coll, pipeline, 31142);
+ }
+}());
diff --git a/jstests/noPassthrough/server_status_query_exec_stats.js b/jstests/noPassthrough/server_status_query_exec_stats.js
new file mode 100644
index 00000000000..3a5a050d428
--- /dev/null
+++ b/jstests/noPassthrough/server_status_query_exec_stats.js
@@ -0,0 +1,64 @@
+/**
+ * Tests for serverStatus metrics.queryExecutor stats.
+ */
+(function() {
+ "use strict";
+
+ if (jsTest.options().storageEngine === "mobile") {
+ print("Skipping test because storage engine isn't mobile");
+ return;
+ }
+
+ const conn = MongoRunner.runMongod();
+ assert.neq(null, conn, "mongod was unable to start up");
+ const db = conn.getDB(jsTest.name());
+ const coll = db[jsTest.name()];
+
+ let getCollectionScans = () => {
+ return db.serverStatus().metrics.queryExecutor.collectionScans.total;
+ };
+ let getCollectionScansNonTailable = () => {
+ return db.serverStatus().metrics.queryExecutor.collectionScans.nonTailable;
+ };
+
+ // Create and populate a capped collection so that we can run tailable queries.
+ const nDocs = 32;
+ coll.drop();
+ assert.commandWorked(db.createCollection(jsTest.name(), {capped: true, size: nDocs * 100}));
+
+ for (let i = 0; i < nDocs; i++) {
+ assert.commandWorked(coll.insert({a: i}));
+ }
+
+ // Test nontailable collection scans update collectionScans counters appropriately.
+ for (let i = 0; i < nDocs; i++) {
+ assert.eq(coll.find({a: i}).itcount(), 1);
+ assert.eq(i + 1, getCollectionScans());
+ assert.eq(i + 1, getCollectionScansNonTailable());
+ }
+
+ // Test tailable collection scans update collectionScans counters appropriately.
+ for (let i = 0; i < nDocs; i++) {
+ assert.eq(coll.find({a: i}).tailable().itcount(), 1);
+ assert.eq(nDocs + i + 1, getCollectionScans());
+ assert.eq(nDocs, getCollectionScansNonTailable());
+ }
+
+ // Run a query which will require the client to fetch multiple batches from the server. Ensure
+ // that the getMore commands don't increment the counter of collection scans.
+ assert.eq(coll.find({}).batchSize(2).itcount(), nDocs);
+ assert.eq((nDocs * 2) + 1, getCollectionScans());
+ assert.eq(nDocs + 1, getCollectionScansNonTailable());
+
+ // Create index to test that index scans don't up the collection scan counter.
+ assert.commandWorked(coll.createIndex({a: 1}));
+ // Run a bunch of index scans.
+ for (let i = 0; i < nDocs; i++) {
+ assert.eq(coll.find({a: i}).itcount(), 1);
+ }
+ // Assert that the number of collection scans hasn't increased.
+ assert.eq((nDocs * 2) + 1, getCollectionScans());
+ assert.eq(nDocs + 1, getCollectionScansNonTailable());
+
+ MongoRunner.stopMongod(conn);
+}());
diff --git a/src/mongo/db/SConscript b/src/mongo/db/SConscript
index 6eef7683b2a..87b15380b0a 100644
--- a/src/mongo/db/SConscript
+++ b/src/mongo/db/SConscript
@@ -201,6 +201,7 @@ env.Library(
],
LIBDEPS=[
'$BUILD_DIR/mongo/base',
+ '$BUILD_DIR/mongo/db/commands/server_status_core',
],
)
@@ -455,6 +456,7 @@ env.Library(
],
LIBDEPS=[
'$BUILD_DIR/mongo/base',
+ '$BUILD_DIR/mongo/db/commands/server_status_core',
],
)
diff --git a/src/mongo/db/catalog/collection_info_cache.h b/src/mongo/db/catalog/collection_info_cache.h
index 254df201721..8ae84fb3eec 100644
--- a/src/mongo/db/catalog/collection_info_cache.h
+++ b/src/mongo/db/catalog/collection_info_cache.h
@@ -31,6 +31,7 @@
#include "mongo/db/collection_index_usage_tracker.h"
#include "mongo/db/query/plan_cache.h"
+#include "mongo/db/query/plan_summary_stats.h"
#include "mongo/db/query/query_settings.h"
#include "mongo/db/update_index_data.h"
@@ -77,6 +78,12 @@ public:
virtual CollectionIndexUsageMap getIndexUsageStats() const = 0;
/**
+ * Returns a struct containing information on the number of collection scans that have been
+ * performed.
+ */
+ virtual CollectionIndexUsageTracker::CollectionScanStats getCollectionScanStats() const = 0;
+
+ /**
* Register a newly-created index with the cache. Must be called whenever an index is
* built on the associated collection.
*
@@ -99,10 +106,12 @@ public:
/**
* Signal to the cache that a query operation has completed. 'indexesUsed' should list the
- * set of indexes used by the winning plan, if any.
+ * set of indexes used by the winning plan, if any. 'summaryStats.collectionScans' and
+ * 'summaryStats.collectionScansNonTailable' should be the number of collections scans and
+ * non-tailable collection scans that occured while executing the winning plan.
*/
virtual void notifyOfQuery(OperationContext* const opCtx,
- const std::set<std::string>& indexesUsed) = 0;
+ const PlanSummaryStats& summaryStats) = 0;
virtual void setNs(NamespaceString ns) = 0;
};
diff --git a/src/mongo/db/catalog/collection_info_cache_impl.cpp b/src/mongo/db/catalog/collection_info_cache_impl.cpp
index bf4866b750a..b79def3cd04 100644
--- a/src/mongo/db/catalog/collection_info_cache_impl.cpp
+++ b/src/mongo/db/catalog/collection_info_cache_impl.cpp
@@ -38,6 +38,7 @@
#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/index_catalog.h"
#include "mongo/db/concurrency/d_concurrency.h"
+#include "mongo/db/curop_metrics.h"
#include "mongo/db/fts/fts_spec.h"
#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/index/wildcard_access_method.h"
@@ -162,7 +163,11 @@ void CollectionInfoCacheImpl::computeIndexKeys(OperationContext* opCtx) {
}
void CollectionInfoCacheImpl::notifyOfQuery(OperationContext* opCtx,
- const std::set<std::string>& indexesUsed) {
+ const PlanSummaryStats& summaryStats) {
+ _indexUsageTracker.recordCollectionScans(summaryStats.collectionScans);
+ _indexUsageTracker.recordCollectionScansNonTailable(summaryStats.collectionScansNonTailable);
+
+ const auto& indexesUsed = summaryStats.indexesUsed;
// Record indexes used to fulfill query.
for (auto it = indexesUsed.begin(); it != indexesUsed.end(); ++it) {
// This index should still exist, since the PlanExecutor would have been killed if the
@@ -263,4 +268,9 @@ void CollectionInfoCacheImpl::setNs(NamespaceString ns) {
}
}
+CollectionIndexUsageTracker::CollectionScanStats CollectionInfoCacheImpl::getCollectionScanStats()
+ const {
+ return _indexUsageTracker.getCollectionScanStats();
+}
+
} // namespace mongo
diff --git a/src/mongo/db/catalog/collection_info_cache_impl.h b/src/mongo/db/catalog/collection_info_cache_impl.h
index ff6566f36e5..cf66d8e2e99 100644
--- a/src/mongo/db/catalog/collection_info_cache_impl.h
+++ b/src/mongo/db/catalog/collection_info_cache_impl.h
@@ -76,6 +76,8 @@ public:
*/
CollectionIndexUsageMap getIndexUsageStats() const;
+ CollectionIndexUsageTracker::CollectionScanStats getCollectionScanStats() const override;
+
/**
* Builds internal cache state based on the current state of the Collection's IndexCatalog
*/
@@ -102,11 +104,7 @@ public:
*/
void clearQueryCache();
- /**
- * Signal to the cache that a query operation has completed. 'indexesUsed' should list the
- * set of indexes used by the winning plan, if any.
- */
- void notifyOfQuery(OperationContext* opCtx, const std::set<std::string>& indexesUsed);
+ void notifyOfQuery(OperationContext* opCtx, const PlanSummaryStats& summaryStats);
void setNs(NamespaceString ns) override;
diff --git a/src/mongo/db/collection_index_usage_tracker.cpp b/src/mongo/db/collection_index_usage_tracker.cpp
index dc0941ff4c8..3f78b3c406a 100644
--- a/src/mongo/db/collection_index_usage_tracker.cpp
+++ b/src/mongo/db/collection_index_usage_tracker.cpp
@@ -31,12 +31,23 @@
#include "mongo/platform/basic.h"
+#include "mongo/base/counter.h"
#include "mongo/db/collection_index_usage_tracker.h"
+#include "mongo/db/commands/server_status_metric.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/clock_source.h"
#include "mongo/util/log.h"
namespace mongo {
+namespace {
+Counter64 collectionScansCounter;
+Counter64 collectionScansNonTailableCounter;
+
+ServerStatusMetricField<Counter64> displayCollectionScans("queryExecutor.collectionScans.total",
+ &collectionScansCounter);
+ServerStatusMetricField<Counter64> displayCollectionScansNonTailable(
+ "queryExecutor.collectionScans.nonTailable", &collectionScansNonTailableCounter);
+}
CollectionIndexUsageTracker::CollectionIndexUsageTracker(ClockSource* clockSource)
: _clockSource(clockSource) {
@@ -50,6 +61,17 @@ void CollectionIndexUsageTracker::recordIndexAccess(StringData indexName) {
_indexUsageMap[indexName].accesses.fetchAndAdd(1);
}
+void CollectionIndexUsageTracker::recordCollectionScans(unsigned long long collectionScans) {
+ _collectionScans.fetchAndAdd(collectionScans);
+ collectionScansCounter.increment(collectionScans);
+}
+
+void CollectionIndexUsageTracker::recordCollectionScansNonTailable(
+ unsigned long long collectionScansNonTailable) {
+ _collectionScansNonTailable.fetchAndAdd(collectionScansNonTailable);
+ collectionScansNonTailableCounter.increment(collectionScansNonTailable);
+}
+
void CollectionIndexUsageTracker::registerIndex(StringData indexName, const BSONObj& indexKey) {
invariant(!indexName.empty());
dassert(_indexUsageMap.find(indexName) == _indexUsageMap.end());
@@ -68,4 +90,8 @@ CollectionIndexUsageMap CollectionIndexUsageTracker::getUsageStats() const {
return _indexUsageMap;
}
+CollectionIndexUsageTracker::CollectionScanStats
+CollectionIndexUsageTracker::getCollectionScanStats() const {
+ return {_collectionScans.load(), _collectionScansNonTailable.load()};
+}
} // namespace mongo
diff --git a/src/mongo/db/collection_index_usage_tracker.h b/src/mongo/db/collection_index_usage_tracker.h
index 62c3b610f41..b70d7887723 100644
--- a/src/mongo/db/collection_index_usage_tracker.h
+++ b/src/mongo/db/collection_index_usage_tracker.h
@@ -44,6 +44,9 @@ class ClockSource;
* considered "used" when it appears as part of a winning plan for an operation that uses the
* query system.
*
+ * It also tracks non-usage of indexes. I.e. it collects information about collection scans that
+ * occur on a collection.
+ *
* Indexes must be registered and deregistered on creation/destruction.
*/
class CollectionIndexUsageTracker {
@@ -51,6 +54,11 @@ class CollectionIndexUsageTracker {
CollectionIndexUsageTracker& operator=(const CollectionIndexUsageTracker&) = delete;
public:
+ struct CollectionScanStats {
+ unsigned long long collectionScans{0};
+ unsigned long long collectionScansNonTailable{0};
+ };
+
struct IndexUsageStats {
IndexUsageStats() = default;
explicit IndexUsageStats(Date_t now, const BSONObj& key)
@@ -111,6 +119,15 @@ public:
*/
StringMap<CollectionIndexUsageTracker::IndexUsageStats> getUsageStats() const;
+ /**
+ * Get the current state of the usage of collection scans. This struct will only include
+ * information about the collection scans that have occured at the time of calling.
+ */
+ CollectionScanStats getCollectionScanStats() const;
+
+ void recordCollectionScans(unsigned long long collectionScans);
+ void recordCollectionScansNonTailable(unsigned long long collectionScansNonTailable);
+
private:
// Map from index name to usage statistics.
StringMap<CollectionIndexUsageTracker::IndexUsageStats> _indexUsageMap;
@@ -118,8 +135,10 @@ private:
// Clock source. Used when the 'trackerStartTime' time for an IndexUsageStats object needs to
// be set.
ClockSource* _clockSource;
+
+ AtomicWord<unsigned long long> _collectionScans{0};
+ AtomicWord<unsigned long long> _collectionScansNonTailable{0};
};
typedef StringMap<CollectionIndexUsageTracker::IndexUsageStats> CollectionIndexUsageMap;
-
} // namespace mongo
diff --git a/src/mongo/db/commands/count_cmd.cpp b/src/mongo/db/commands/count_cmd.cpp
index 82a6408b6e8..545afc0c174 100644
--- a/src/mongo/db/commands/count_cmd.cpp
+++ b/src/mongo/db/commands/count_cmd.cpp
@@ -243,7 +243,7 @@ public:
PlanSummaryStats summaryStats;
Explain::getSummaryStats(*exec, &summaryStats);
if (collection) {
- collection->infoCache()->notifyOfQuery(opCtx, summaryStats.indexesUsed);
+ collection->infoCache()->notifyOfQuery(opCtx, summaryStats);
}
curOp->debug().setPlanSummaryMetrics(summaryStats);
diff --git a/src/mongo/db/commands/distinct.cpp b/src/mongo/db/commands/distinct.cpp
index eee399d134d..ac156b32c30 100644
--- a/src/mongo/db/commands/distinct.cpp
+++ b/src/mongo/db/commands/distinct.cpp
@@ -285,7 +285,7 @@ public:
PlanSummaryStats stats;
Explain::getSummaryStats(*executor.getValue(), &stats);
if (collection) {
- collection->infoCache()->notifyOfQuery(opCtx, stats.indexesUsed);
+ collection->infoCache()->notifyOfQuery(opCtx, stats);
}
curOp->debug().setPlanSummaryMetrics(stats);
diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp
index a60dfa214e0..ee2a87987d2 100644
--- a/src/mongo/db/commands/find_and_modify.cpp
+++ b/src/mongo/db/commands/find_and_modify.cpp
@@ -396,7 +396,7 @@ public:
PlanSummaryStats summaryStats;
Explain::getSummaryStats(*exec, &summaryStats);
if (collection) {
- collection->infoCache()->notifyOfQuery(opCtx, summaryStats.indexesUsed);
+ collection->infoCache()->notifyOfQuery(opCtx, summaryStats);
}
opDebug->setPlanSummaryMetrics(summaryStats);
@@ -494,7 +494,7 @@ public:
PlanSummaryStats summaryStats;
Explain::getSummaryStats(*exec, &summaryStats);
if (collection) {
- collection->infoCache()->notifyOfQuery(opCtx, summaryStats.indexesUsed);
+ collection->infoCache()->notifyOfQuery(opCtx, summaryStats);
}
UpdateStage::recordUpdateStatsInOpDebug(UpdateStage::getUpdateStats(exec.get()),
opDebug);
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index 45f0fd6fbed..74ec1dbfdbe 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -1563,7 +1563,7 @@ bool runMapReduce(OperationContext* opCtx,
// TODO SERVER-23261: Confirm whether this is the correct place to gather all
// metrics. There is no harm adding here for the time being.
curOp->debug().setPlanSummaryMetrics(stats);
- scopedAutoColl->getCollection()->infoCache()->notifyOfQuery(opCtx, stats.indexesUsed);
+ scopedAutoColl->getCollection()->infoCache()->notifyOfQuery(opCtx, stats);
if (curOp->shouldDBProfile()) {
BSONObjBuilder execStatsBob;
diff --git a/src/mongo/db/commands/run_aggregate.cpp b/src/mongo/db/commands/run_aggregate.cpp
index 1a62a4b0b34..ab63dec4dbe 100644
--- a/src/mongo/db/commands/run_aggregate.cpp
+++ b/src/mongo/db/commands/run_aggregate.cpp
@@ -786,7 +786,7 @@ Status runAggregate(OperationContext* opCtx,
// For an optimized away pipeline, signal the cache that a query operation has completed.
// For normal pipelines this is done in DocumentSourceCursor.
if (ctx && ctx->getCollection()) {
- ctx->getCollection()->infoCache()->notifyOfQuery(opCtx, stats.indexesUsed);
+ ctx->getCollection()->infoCache()->notifyOfQuery(opCtx, stats);
}
}
diff --git a/src/mongo/db/exec/collection_scan.cpp b/src/mongo/db/exec/collection_scan.cpp
index 6a1e64dd30f..e06493b39eb 100644
--- a/src/mongo/db/exec/collection_scan.cpp
+++ b/src/mongo/db/exec/collection_scan.cpp
@@ -67,6 +67,7 @@ CollectionScan::CollectionScan(OperationContext* opCtx,
// Explain reports the direction of the collection scan.
_specificStats.direction = params.direction;
_specificStats.maxTs = params.maxTs;
+ _specificStats.tailable = params.tailable;
invariant(!_params.shouldTrackLatestOplogTimestamp || collection->ns().isOplog());
if (params.maxTs) {
diff --git a/src/mongo/db/exec/plan_stats.h b/src/mongo/db/exec/plan_stats.h
index e85a508979f..08d623ffc44 100644
--- a/src/mongo/db/exec/plan_stats.h
+++ b/src/mongo/db/exec/plan_stats.h
@@ -198,6 +198,8 @@ struct CollectionScanStats : public SpecificStats {
// backwards.
int direction;
+ bool tailable{false};
+
// If present, indicates that the collection scan will stop and return EOF the first time it
// sees a document that does not pass the filter and has a "ts" Timestamp field greater than
// 'maxTs'.
diff --git a/src/mongo/db/ops/write_ops_exec.cpp b/src/mongo/db/ops/write_ops_exec.cpp
index 5176503ef73..954479180c3 100644
--- a/src/mongo/db/ops/write_ops_exec.cpp
+++ b/src/mongo/db/ops/write_ops_exec.cpp
@@ -651,7 +651,7 @@ static SingleWriteResult performSingleUpdateOp(OperationContext* opCtx,
PlanSummaryStats summary;
Explain::getSummaryStats(*exec, &summary);
if (collection->getCollection()) {
- collection->getCollection()->infoCache()->notifyOfQuery(opCtx, summary.indexesUsed);
+ collection->getCollection()->infoCache()->notifyOfQuery(opCtx, summary);
}
if (curOp.shouldDBProfile()) {
@@ -892,7 +892,7 @@ static SingleWriteResult performSingleDeleteOp(OperationContext* opCtx,
PlanSummaryStats summary;
Explain::getSummaryStats(*exec, &summary);
if (collection.getCollection()) {
- collection.getCollection()->infoCache()->notifyOfQuery(opCtx, summary.indexesUsed);
+ collection.getCollection()->infoCache()->notifyOfQuery(opCtx, summary);
}
curOp.debug().setPlanSummaryMetrics(summary);
diff --git a/src/mongo/db/pipeline/document_source_coll_stats.cpp b/src/mongo/db/pipeline/document_source_coll_stats.cpp
index df6063dad52..a02a6018231 100644
--- a/src/mongo/db/pipeline/document_source_coll_stats.cpp
+++ b/src/mongo/db/pipeline/document_source_coll_stats.cpp
@@ -85,6 +85,17 @@ intrusive_ptr<DocumentSource> DocumentSourceCollStats::createFromBson(
<< " of type "
<< typeName(elem.type()),
elem.type() == BSONType::Object);
+ } else if ("queryExecStats" == fieldName) {
+ uassert(31141,
+ str::stream() << "queryExecStats argument must be an empty object, but got "
+ << elem
+ << " of type "
+ << typeName(elem.type()),
+ elem.type() == BSONType::Object);
+ uassert(31170,
+ str::stream() << "queryExecStats argument must be an empty object, but got "
+ << elem,
+ elem.embeddedObject().isEmpty());
} else {
uasserted(40168, str::stream() << "unrecognized option to $collStats: " << fieldName);
}
@@ -149,6 +160,16 @@ DocumentSource::GetNextResult DocumentSourceCollStats::getNext() {
}
}
+ if (_collStatsSpec.hasField("queryExecStats")) {
+ Status status = pExpCtx->mongoProcessInterface->appendQueryExecStats(
+ pExpCtx->opCtx, pExpCtx->ns, &builder);
+ if (!status.isOK()) {
+ uasserted(31142,
+ str::stream() << "Unable to retrieve queryExecStats in $collStats stage: "
+ << status.reason());
+ }
+ }
+
return {Document(builder.obj())};
}
diff --git a/src/mongo/db/pipeline/document_source_cursor.cpp b/src/mongo/db/pipeline/document_source_cursor.cpp
index 1d971d188a8..f4e53f20d16 100644
--- a/src/mongo/db/pipeline/document_source_cursor.cpp
+++ b/src/mongo/db/pipeline/document_source_cursor.cpp
@@ -322,7 +322,7 @@ DocumentSourceCursor::DocumentSourceCursor(
}
if (collection) {
- collection->infoCache()->notifyOfQuery(pExpCtx->opCtx, _planSummaryStats.indexesUsed);
+ collection->infoCache()->notifyOfQuery(pExpCtx->opCtx, _planSummaryStats);
}
}
diff --git a/src/mongo/db/pipeline/mongo_process_interface.h b/src/mongo/db/pipeline/mongo_process_interface.h
index ae7e707e3e9..4e4165d29f3 100644
--- a/src/mongo/db/pipeline/mongo_process_interface.h
+++ b/src/mongo/db/pipeline/mongo_process_interface.h
@@ -195,6 +195,12 @@ public:
virtual Status appendRecordCount(OperationContext* opCtx,
const NamespaceString& nss,
BSONObjBuilder* builder) const = 0;
+ /**
+ * Appends the exec stats for the collection 'nss' to 'builder'.
+ */
+ virtual Status appendQueryExecStats(OperationContext* opCtx,
+ const NamespaceString& nss,
+ BSONObjBuilder* builder) const = 0;
/**
* Gets the collection options for the collection given by 'nss'. Throws
diff --git a/src/mongo/db/pipeline/mongos_process_interface.h b/src/mongo/db/pipeline/mongos_process_interface.h
index 39c69b2daf7..fcfe82d6321 100644
--- a/src/mongo/db/pipeline/mongos_process_interface.h
+++ b/src/mongo/db/pipeline/mongos_process_interface.h
@@ -144,6 +144,12 @@ public:
MONGO_UNREACHABLE;
}
+ Status appendQueryExecStats(OperationContext* opCtx,
+ const NamespaceString& nss,
+ BSONObjBuilder* builder) const final {
+ MONGO_UNREACHABLE;
+ }
+
BSONObj getCollectionOptions(const NamespaceString& nss) final {
MONGO_UNREACHABLE;
}
diff --git a/src/mongo/db/pipeline/process_interface_standalone.cpp b/src/mongo/db/pipeline/process_interface_standalone.cpp
index d7a51028503..6e13e969ac4 100644
--- a/src/mongo/db/pipeline/process_interface_standalone.cpp
+++ b/src/mongo/db/pipeline/process_interface_standalone.cpp
@@ -282,6 +282,39 @@ Status MongoInterfaceStandalone::appendRecordCount(OperationContext* opCtx,
return appendCollectionRecordCount(opCtx, nss, builder);
}
+Status MongoInterfaceStandalone::appendQueryExecStats(OperationContext* opCtx,
+ const NamespaceString& nss,
+ BSONObjBuilder* builder) const {
+ AutoGetCollectionForReadCommand autoColl(opCtx, nss);
+
+ if (!autoColl.getDb()) {
+ return {ErrorCodes::NamespaceNotFound,
+ str::stream() << "Database [" << nss.db().toString() << "] not found."};
+ }
+
+ Collection* collection = autoColl.getCollection();
+
+ if (!collection) {
+ return {ErrorCodes::NamespaceNotFound,
+ str::stream() << "Collection [" << nss.toString() << "] not found."};
+ }
+
+ auto collectionScanStats = collection->infoCache()->getCollectionScanStats();
+
+ dassert(collectionScanStats.collectionScans <=
+ static_cast<unsigned long long>(std::numeric_limits<long long>::max()));
+ dassert(collectionScanStats.collectionScansNonTailable <=
+ static_cast<unsigned long long>(std::numeric_limits<long long>::max()));
+ builder->append("queryExecStats",
+ BSON("collectionScans" << BSON(
+ "total" << static_cast<long long>(collectionScanStats.collectionScans)
+ << "nonTailable"
+ << static_cast<long long>(
+ collectionScanStats.collectionScansNonTailable))));
+
+ return Status::OK();
+}
+
BSONObj MongoInterfaceStandalone::getCollectionOptions(const NamespaceString& nss) {
const auto infos = _client.getCollectionInfos(nss.db().toString(), BSON("name" << nss.coll()));
if (infos.empty()) {
diff --git a/src/mongo/db/pipeline/process_interface_standalone.h b/src/mongo/db/pipeline/process_interface_standalone.h
index 1569e090c89..8d7cf4693c4 100644
--- a/src/mongo/db/pipeline/process_interface_standalone.h
+++ b/src/mongo/db/pipeline/process_interface_standalone.h
@@ -88,6 +88,9 @@ public:
Status appendRecordCount(OperationContext* opCtx,
const NamespaceString& nss,
BSONObjBuilder* builder) const final;
+ Status appendQueryExecStats(OperationContext* opCtx,
+ const NamespaceString& nss,
+ BSONObjBuilder* builder) const final override;
BSONObj getCollectionOptions(const NamespaceString& nss) final;
void renameIfOptionsAndIndexesHaveNotChanged(OperationContext* opCtx,
const BSONObj& renameCommandObj,
diff --git a/src/mongo/db/pipeline/stub_mongo_process_interface.h b/src/mongo/db/pipeline/stub_mongo_process_interface.h
index a8a4a02e1e4..82950fdc427 100644
--- a/src/mongo/db/pipeline/stub_mongo_process_interface.h
+++ b/src/mongo/db/pipeline/stub_mongo_process_interface.h
@@ -106,6 +106,12 @@ public:
MONGO_UNREACHABLE;
}
+ Status appendQueryExecStats(OperationContext* opCtx,
+ const NamespaceString& nss,
+ BSONObjBuilder* builder) const override {
+ MONGO_UNREACHABLE;
+ }
+
BSONObj getCollectionOptions(const NamespaceString& nss) override {
MONGO_UNREACHABLE;
}
diff --git a/src/mongo/db/query/explain.cpp b/src/mongo/db/query/explain.cpp
index 709d8ffda4a..eb109928be4 100644
--- a/src/mongo/db/query/explain.cpp
+++ b/src/mongo/db/query/explain.cpp
@@ -34,6 +34,7 @@
#include "mongo/base/owned_pointer_vector.h"
#include "mongo/bson/util/builder.h"
#include "mongo/db/exec/cached_plan.h"
+#include "mongo/db/exec/collection_scan.h"
#include "mongo/db/exec/count_scan.h"
#include "mongo/db/exec/distinct_scan.h"
#include "mongo/db/exec/idhack.h"
@@ -1015,6 +1016,13 @@ void Explain::getSummaryStats(const PlanExecutor& exec, PlanSummaryStats* statsO
statsOut->replanned = cachedStats->replanned;
} else if (STAGE_MULTI_PLAN == stages[i]->stageType()) {
statsOut->fromMultiPlanner = true;
+ } else if (STAGE_COLLSCAN == stages[i]->stageType()) {
+ statsOut->collectionScans++;
+ const auto collScan = static_cast<const CollectionScan*>(stages[i]);
+ const auto collScanStats =
+ static_cast<const CollectionScanStats*>(collScan->getSpecificStats());
+ if (!collScanStats->tailable)
+ statsOut->collectionScansNonTailable++;
}
}
}
diff --git a/src/mongo/db/query/find.cpp b/src/mongo/db/query/find.cpp
index 8f4132b1eaf..f9de0152b5c 100644
--- a/src/mongo/db/query/find.cpp
+++ b/src/mongo/db/query/find.cpp
@@ -150,7 +150,7 @@ void endQueryOp(OperationContext* opCtx,
curOp->debug().setPlanSummaryMetrics(summaryStats);
if (collection) {
- collection->infoCache()->notifyOfQuery(opCtx, summaryStats.indexesUsed);
+ collection->infoCache()->notifyOfQuery(opCtx, summaryStats);
}
if (curOp->shouldDBProfile()) {
diff --git a/src/mongo/db/query/plan_summary_stats.h b/src/mongo/db/query/plan_summary_stats.h
index 35deb4d83c0..a0ded1f2755 100644
--- a/src/mongo/db/query/plan_summary_stats.h
+++ b/src/mongo/db/query/plan_summary_stats.h
@@ -50,6 +50,14 @@ struct PlanSummaryStats {
// The number of milliseconds spent inside the root stage's work() method.
long long executionTimeMillis = 0;
+ // The number of collection scans that occur during execution. Note that more than one
+ // collection scan may happen during execution (e.g. for $lookup execution).
+ long long collectionScans = 0;
+
+ // The number of collection scans that occur during execution which are nontailable. Note that
+ // more than one collection scan may happen during execution (e.g. for $lookup execution).
+ long long collectionScansNonTailable = 0;
+
// Did this plan use an in-memory sort stage?
bool hasSortStage = false;