summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--jstests/auth/lib/commands_lib.js24
-rw-r--r--jstests/core/index_stats.js93
-rw-r--r--src/mongo/db/SConscript23
-rw-r--r--src/mongo/db/auth/action_types.txt1
-rw-r--r--src/mongo/db/auth/role_graph_builtin_roles.cpp2
-rw-r--r--src/mongo/db/catalog/collection.cpp4
-rw-r--r--src/mongo/db/catalog/collection_compact.cpp4
-rw-r--r--src/mongo/db/catalog/collection_info_cache.cpp74
-rw-r--r--src/mongo/db/catalog/collection_info_cache.h69
-rw-r--r--src/mongo/db/catalog/index_catalog.cpp14
-rw-r--r--src/mongo/db/catalog/index_create.cpp7
-rw-r--r--src/mongo/db/collection_index_usage_tracker.cpp70
-rw-r--r--src/mongo/db/collection_index_usage_tracker.h116
-rw-r--r--src/mongo/db/collection_index_usage_tracker_test.cpp144
-rw-r--r--src/mongo/db/commands/count_cmd.cpp6
-rw-r--r--src/mongo/db/commands/distinct.cpp20
-rw-r--r--src/mongo/db/commands/find_and_modify.cpp18
-rw-r--r--src/mongo/db/commands/find_cmd.cpp4
-rw-r--r--src/mongo/db/commands/geo_near_cmd.cpp27
-rw-r--r--src/mongo/db/commands/group_cmd.cpp7
-rw-r--r--src/mongo/db/commands/write_commands/batch_executor.cpp16
-rw-r--r--src/mongo/db/exec/idhack.cpp28
-rw-r--r--src/mongo/db/exec/idhack.h13
-rw-r--r--src/mongo/db/exec/plan_stats.h2
-rw-r--r--src/mongo/db/instance.cpp33
-rw-r--r--src/mongo/db/ops/update_lifecycle_impl.cpp2
-rw-r--r--src/mongo/db/pipeline/SConscript1
-rw-r--r--src/mongo/db/pipeline/document_source.h34
-rw-r--r--src/mongo/db/pipeline/document_source_index_stats.cpp81
-rw-r--r--src/mongo/db/pipeline/pipeline.cpp13
-rw-r--r--src/mongo/db/pipeline/pipeline_d.cpp19
-rw-r--r--src/mongo/db/query/explain.cpp40
-rw-r--r--src/mongo/db/query/explain.h3
-rw-r--r--src/mongo/db/query/find.cpp9
-rw-r--r--src/mongo/db/query/find.h4
-rw-r--r--src/mongo/db/query/get_executor.cpp32
36 files changed, 943 insertions, 114 deletions
diff --git a/jstests/auth/lib/commands_lib.js b/jstests/auth/lib/commands_lib.js
index e44fc56117b..fd56559300e 100644
--- a/jstests/auth/lib/commands_lib.js
+++ b/jstests/auth/lib/commands_lib.js
@@ -327,6 +327,30 @@ var authCommandsLib = {
]
},
{
+ testname: "aggregate_indexStats",
+ command: {aggregate: "foo", pipeline: [{$indexStats: {}}]},
+ setup: function (db) {
+ db.createCollection("foo");
+ },
+ teardown: function (db) {
+ db.foo.drop();
+ },
+ testcases: [
+ {
+ runOnDb: firstDbName,
+ roles: {
+ clusterMonitor: 1,
+ clusterAdmin: 1,
+ root: 1,
+ __system: 1
+ },
+ privileges: [
+ {resource: {anyResource: true}, actions: ["indexStats"]}
+ ]
+ }
+ ]
+ },
+ {
testname: "appendOplogNote",
command: {appendOplogNote: 1, data: {a: 1}},
skipSharded: true,
diff --git a/jstests/core/index_stats.js b/jstests/core/index_stats.js
new file mode 100644
index 00000000000..93fe06f376f
--- /dev/null
+++ b/jstests/core/index_stats.js
@@ -0,0 +1,93 @@
+(function() {
+ "use strict";
+ var colName = "jstests_index_stats";
+ var col = db[colName];
+ col.drop();
+
+ var getUsageCount = function (indexName) {
+ var cursor = col.aggregate([{$indexStats: {}}]);
+ while (cursor.hasNext()) {
+ var doc = cursor.next();
+
+ if (doc.name === indexName) {
+ return doc.usageStats.operations;
+ }
+ }
+
+ return undefined;
+ }
+
+ assert.writeOK(col.insert({a: 1, b: 1, c: 1}));
+ assert.writeOK(col.insert({a: 2, b: 2, c: 2}));
+ assert.writeOK(col.insert({a: 3, b: 3, c: 3}));
+
+ // Confirm no index stats object exists prior to index creation.
+ col.findOne({a: 1});
+ assert.eq(undefined, getUsageCount("a_1"));
+
+ // Create indexes.
+ assert.commandWorked(col.createIndex({a: 1}, {name: "a_1"}));
+ assert.commandWorked(col.createIndex({b: 1, c: 1}, {name: "b_1_c_1"}));
+ var countA = 0;
+ var countB = 0;
+
+ // Confirm a stats object exists post index creation (with 0 count).
+ assert.eq(countA, getUsageCount("a_1"));
+
+ // Confirm index stats tick on find().
+ col.findOne({a: 1});
+ countA++;
+
+ assert.eq(countA, getUsageCount("a_1"));
+
+ // Confirm index stats tick on findAndModify().
+ var res = db.runCommand({findAndModify: colName,
+ query: {a: 1},
+ update: {$set: {d: 1}},
+ 'new': true});
+ assert.commandWorked(res);
+ countA++;
+ assert.eq(countA, getUsageCount("a_1"));
+
+ // Confirm index stats tick on distinct().
+ res = db.runCommand({distinct: colName, key: "b", query: {b: 1}});
+ assert.commandWorked(res);
+ countB++;
+ assert.eq(countB, getUsageCount("b_1_c_1"));
+
+ // Confirm index stats tick on group().
+ res = db.runCommand({group: {ns: colName,
+ key: {b: 1, c: 1},
+ cond: {b: {$gt: 0}},
+ $reduce: function(curr, result) {},
+ initial: {}}});
+ assert.commandWorked(res);
+ countB++;
+ assert.eq(countB, getUsageCount("b_1_c_1"));
+
+ // Confirm index stats tick on update().
+ assert.writeOK(col.update({a: 2}, {$set: {d: 2}}));
+ countA++;
+ assert.eq(countA, getUsageCount("a_1"));
+
+ // Confirm index stats tick on remove().
+ assert.writeOK(col.remove({a: 2}));
+ countA++;
+ assert.eq(countA, getUsageCount("a_1"));
+
+ // Confirm multiple index $or operation ticks all involved indexes.
+ col.findOne({$or: [{a: 1}, {b: 1, c: 1}]});
+ countA++;
+ countB++;
+ assert.eq(countA, getUsageCount("a_1"));
+ assert.eq(countB, getUsageCount("b_1_c_1"));
+
+ // Confirm index stats object does not exist post index drop.
+ assert.commandWorked(col.dropIndex("b_1_c_1"));
+ countB = 0;
+ assert.eq(undefined, getUsageCount("b_1_c_1"));
+
+ // Confirm index stats object exists with count 0 once index is recreated.
+ assert.commandWorked(col.createIndex({b: 1, c: 1}, {name: "b_1_c_1"}));
+ assert.eq(countB, getUsageCount("b_1_c_1"));
+})();
diff --git a/src/mongo/db/SConscript b/src/mongo/db/SConscript
index aad06eea5d9..3ca65f44c82 100644
--- a/src/mongo/db/SConscript
+++ b/src/mongo/db/SConscript
@@ -286,6 +286,27 @@ env.CppUnitTest(
],
)
+env.Library(
+ target='collection_index_usage_tracker',
+ source=[
+ 'collection_index_usage_tracker.cpp'
+ ],
+ LIBDEPS=[
+ '$BUILD_DIR/mongo/base',
+ ],
+)
+
+env.CppUnitTest(
+ target='collection_index_usage_tracker_test',
+ source=[
+ 'collection_index_usage_tracker_test.cpp',
+ ],
+ LIBDEPS=[
+ "$BUILD_DIR/mongo/util/clock_source_mock",
+ 'collection_index_usage_tracker',
+ ],
+)
+
# This library exists because some libraries, such as our networking library, need access to server
# options, but not to the helpers to set them from the command line. libserver_options_core.a just
# has the structure for storing the server options, while libserver_options.a has the code to set
@@ -359,7 +380,6 @@ env.CppUnitTest(
],
)
-
# This library is linked into mongos and mongod only, not into the shell or any tools.
env.Library(
target="mongodandmongos",
@@ -662,6 +682,7 @@ serveronlyLibdeps = [
"catalog/collection_options",
"catalog/index_key_validate",
"commands/killcursors_common",
+ "collection_index_usage_tracker",
"common",
"concurrency/lock_manager",
"concurrency/write_conflict_exception",
diff --git a/src/mongo/db/auth/action_types.txt b/src/mongo/db/auth/action_types.txt
index 8c0f7122874..0e2c2aed753 100644
--- a/src/mongo/db/auth/action_types.txt
+++ b/src/mongo/db/auth/action_types.txt
@@ -57,6 +57,7 @@
"grantRolesToUser", # Not used for permissions checks, but to id the event in logs.
"hostInfo",
"impersonate",
+"indexStats",
"inprog",
"insert",
"internal", # Special action type that represents internal actions
diff --git a/src/mongo/db/auth/role_graph_builtin_roles.cpp b/src/mongo/db/auth/role_graph_builtin_roles.cpp
index bcb8b3f90c5..eff0ce4f9fa 100644
--- a/src/mongo/db/auth/role_graph_builtin_roles.cpp
+++ b/src/mongo/db/auth/role_graph_builtin_roles.cpp
@@ -160,7 +160,7 @@ MONGO_INITIALIZER(AuthorizationBuiltinRoles)(InitializerContext* context) {
// clusterMonitor role actions that target a database (or collection) resource
clusterMonitorRoleDatabaseActions << ActionType::collStats // dbAdmin gets this also
<< ActionType::dbStats // dbAdmin gets this also
- << ActionType::getShardVersion;
+ << ActionType::getShardVersion << ActionType::indexStats;
// hostManager role actions that target the cluster resource
hostManagerRoleClusterActions
diff --git a/src/mongo/db/catalog/collection.cpp b/src/mongo/db/catalog/collection.cpp
index cc8cfed71cc..2302fe29559 100644
--- a/src/mongo/db/catalog/collection.cpp
+++ b/src/mongo/db/catalog/collection.cpp
@@ -175,7 +175,8 @@ Collection::Collection(OperationContext* txn,
_indexCatalog.init(txn);
if (isCapped())
_recordStore->setCappedDeleteCallback(this);
- _infoCache.reset(txn);
+
+ _infoCache.init(txn);
}
Collection::~Collection() {
@@ -718,7 +719,6 @@ Status Collection::truncate(OperationContext* txn) {
if (!status.isOK())
return status;
_cursorManager.invalidateAll(false, "collection truncated");
- _infoCache.reset(txn);
// 3) truncate record store
status = _recordStore->truncate(txn);
diff --git a/src/mongo/db/catalog/collection_compact.cpp b/src/mongo/db/catalog/collection_compact.cpp
index e3f0623006f..7d0a9e266f6 100644
--- a/src/mongo/db/catalog/collection_compact.cpp
+++ b/src/mongo/db/catalog/collection_compact.cpp
@@ -125,10 +125,6 @@ StatusWith<CompactStats> Collection::compact(OperationContext* txn,
return StatusWith<CompactStats>(ErrorCodes::BadValue,
"cannot compact when indexes in progress");
-
- // same data, but might perform a little different after compact?
- _infoCache.reset(txn);
-
vector<BSONObj> indexSpecs;
{
IndexCatalog::IndexIterator ii(_indexCatalog.getIndexIterator(txn, false));
diff --git a/src/mongo/db/catalog/collection_info_cache.cpp b/src/mongo/db/catalog/collection_info_cache.cpp
index ba2e9c6befa..1fe87f116aa 100644
--- a/src/mongo/db/catalog/collection_info_cache.cpp
+++ b/src/mongo/db/catalog/collection_info_cache.cpp
@@ -41,6 +41,8 @@
#include "mongo/db/index_legacy.h"
#include "mongo/db/query/plan_cache.h"
#include "mongo/db/query/planner_ixselect.h"
+#include "mongo/db/service_context.h"
+#include "mongo/util/clock_source.h"
#include "mongo/util/debug_util.h"
#include "mongo/util/log.h"
@@ -50,19 +52,11 @@ CollectionInfoCache::CollectionInfoCache(Collection* collection)
: _collection(collection),
_keysComputed(false),
_planCache(new PlanCache(collection->ns().ns())),
- _querySettings(new QuerySettings()) {}
+ _querySettings(new QuerySettings()),
+ _indexUsageTracker(getGlobalServiceContext()->getClockSource()) {}
-void CollectionInfoCache::reset(OperationContext* txn) {
- LOG(1) << _collection->ns().ns() << ": clearing plan cache - collection info cache reset";
- clearQueryCache();
- _keysComputed = false;
- computeIndexKeys(txn);
- updatePlanCacheIndexEntries(txn);
- // query settings is not affected by info cache reset.
- // index filters should persist throughout life of collection
-}
-const UpdateIndexData& CollectionInfoCache::indexKeys(OperationContext* txn) const {
+const UpdateIndexData& CollectionInfoCache::getIndexKeys(OperationContext* txn) const {
// This requires "some" lock, and MODE_IS is an expression for that, for now.
dassert(txn->lockState()->isCollectionLockedForMode(_collection->ns().ns(), MODE_IS));
invariant(_keysComputed);
@@ -70,8 +64,6 @@ const UpdateIndexData& CollectionInfoCache::indexKeys(OperationContext* txn) con
}
void CollectionInfoCache::computeIndexKeys(OperationContext* txn) {
- // This function modified objects attached to the Collection so we need a write lock
- invariant(txn->lockState()->isCollectionLockedForMode(_collection->ns().ns(), MODE_X));
_indexedPaths.clear();
IndexCatalog::IndexIterator i = _collection->getIndexCatalog()->getIndexIterator(txn, true);
@@ -123,7 +115,20 @@ void CollectionInfoCache::computeIndexKeys(OperationContext* txn) {
_keysComputed = true;
}
+void CollectionInfoCache::notifyOfQuery(OperationContext* txn,
+ const std::set<std::string>& indexesUsed) {
+ // Record indexes used to fulfill query.
+ for (auto it = indexesUsed.begin(); it != indexesUsed.end(); ++it) {
+ if (NULL == _collection->getIndexCatalog()->findIndexByName(txn, *it)) {
+ // Index removed since the operation started. Nothing to report.
+ continue;
+ }
+ _indexUsageTracker.recordIndexAccess(*it);
+ }
+}
+
void CollectionInfoCache::clearQueryCache() {
+ LOG(1) << _collection->ns().ns() << ": clearing plan cache - collection info cache reset";
if (NULL != _planCache.get()) {
_planCache->clear();
}
@@ -160,4 +165,47 @@ void CollectionInfoCache::updatePlanCacheIndexEntries(OperationContext* txn) {
_planCache->notifyOfIndexEntries(indexEntries);
}
+
+void CollectionInfoCache::init(OperationContext* txn) {
+ // Requires exclusive collection lock.
+ invariant(txn->lockState()->isCollectionLockedForMode(_collection->ns().ns(), MODE_X));
+
+ const bool includeUnfinishedIndexes = false;
+ IndexCatalog::IndexIterator ii =
+ _collection->getIndexCatalog()->getIndexIterator(txn, includeUnfinishedIndexes);
+ while (ii.more()) {
+ const IndexDescriptor* desc = ii.next();
+ _indexUsageTracker.registerIndex(desc->indexName());
+ }
+
+ rebuildIndexData(txn);
+}
+
+void CollectionInfoCache::addedIndex(OperationContext* txn, StringData indexName) {
+ // Requires exclusive collection lock.
+ invariant(txn->lockState()->isCollectionLockedForMode(_collection->ns().ns(), MODE_X));
+
+ rebuildIndexData(txn);
+ _indexUsageTracker.registerIndex(indexName);
+}
+
+void CollectionInfoCache::droppedIndex(OperationContext* txn, StringData indexName) {
+ // Requires exclusive collection lock.
+ invariant(txn->lockState()->isCollectionLockedForMode(_collection->ns().ns(), MODE_X));
+
+ rebuildIndexData(txn);
+ _indexUsageTracker.unregisterIndex(indexName);
+}
+
+void CollectionInfoCache::rebuildIndexData(OperationContext* txn) {
+ clearQueryCache();
+
+ _keysComputed = false;
+ computeIndexKeys(txn);
+ updatePlanCacheIndexEntries(txn);
+}
+
+CollectionIndexUsageMap CollectionInfoCache::getIndexUsageStats() const {
+ return _indexUsageTracker.getUsageStats();
+}
}
diff --git a/src/mongo/db/catalog/collection_info_cache.h b/src/mongo/db/catalog/collection_info_cache.h
index 05462ffaad4..46d3a9e1784 100644
--- a/src/mongo/db/catalog/collection_info_cache.h
+++ b/src/mongo/db/catalog/collection_info_cache.h
@@ -30,7 +30,7 @@
#pragma once
-
+#include "mongo/db/collection_index_usage_tracker.h"
#include "mongo/db/query/plan_cache.h"
#include "mongo/db/query/query_settings.h"
#include "mongo/db/update_index_data.h"
@@ -38,6 +38,7 @@
namespace mongo {
class Collection;
+class OperationContext;
/**
* this is for storing things that you want to cache about a single collection
@@ -47,15 +48,6 @@ class CollectionInfoCache {
public:
CollectionInfoCache(Collection* collection);
- /*
- * Resets entire cache state. Must be called under exclusive DB lock.
- */
- void reset(OperationContext* txn);
-
- //
- // New Query Execution
- //
-
/**
* Get the PlanCache for this collection.
*/
@@ -66,24 +58,52 @@ public:
*/
QuerySettings* getQuerySettings() const;
- // -------------------
-
/* get set of index keys for this namespace. handy to quickly check if a given
field is indexed (Note it might be a secondary component of a compound index.)
*/
- const UpdateIndexData& indexKeys(OperationContext* txn) const;
+ const UpdateIndexData& getIndexKeys(OperationContext* txn) const;
- // ---------------------
+ /**
+ * Returns cached index usage statistics for this collection. The map returned will contain
+ * entry for each index in the collection along with both a usage counter and a timestamp
+ * representing the date/time the counter is valid from.
+ *
+ * Note for performance that this method returns a copy of a StringMap.
+ */
+ CollectionIndexUsageMap getIndexUsageStats() const;
+
+ /**
+ * Builds internal cache state based on the current state of the Collection's IndexCatalog
+ */
+ void init(OperationContext* txn);
+
+ /**
+ * Register a newly-created index with the cache. Must be called whenever an index is
+ * built on the associated collection.
+ *
+ * Must be called under exclusive collection lock.
+ */
+ void addedIndex(OperationContext* txn, StringData indexName);
/**
- * Called when an index is added to this collection.
+ * Deregister a newly-dropped index with the cache. Must be called whenever an index is
+ * dropped on the associated collection.
+ *
+ * Must be called under exclusive collection lock.
*/
- void addedIndex(OperationContext* txn) {
- reset(txn);
- }
+ void droppedIndex(OperationContext* txn, StringData indexName);
+ /**
+ * Removes all cached query plans.
+ */
void clearQueryCache();
+ /**
+ * Signal to the cache that a query operation has completed. 'indexesUsed' should list the
+ * set of indexes used by the winning plan, if any.
+ */
+ void notifyOfQuery(OperationContext* txn, const std::set<std::string>& indexesUsed);
+
private:
Collection* _collection; // not owned
@@ -98,12 +118,17 @@ private:
// Includes index filters.
std::unique_ptr<QuerySettings> _querySettings;
- /**
- * Must be called under exclusive DB lock.
- */
- void computeIndexKeys(OperationContext* txn);
+ // Tracks index usage statistics for this collection.
+ CollectionIndexUsageTracker _indexUsageTracker;
+ void computeIndexKeys(OperationContext* txn);
void updatePlanCacheIndexEntries(OperationContext* txn);
+
+ /**
+ * Rebuilds cached information that is dependent on index composition. Must be called
+ * when index composition changes.
+ */
+ void rebuildIndexData(OperationContext* txn);
};
} // namespace mongo
diff --git a/src/mongo/db/catalog/index_catalog.cpp b/src/mongo/db/catalog/index_catalog.cpp
index 09ac1c7f185..80ae8e720b2 100644
--- a/src/mongo/db/catalog/index_catalog.cpp
+++ b/src/mongo/db/catalog/index_catalog.cpp
@@ -141,8 +141,10 @@ public:
virtual void commit() {}
virtual void rollback() {
+ // Need to preserve indexName as _desc no longer exists after remove().
+ const std::string indexName = _desc->indexName();
_entries->remove(_desc);
- _collection->infoCache()->reset(_txn);
+ _collection->infoCache()->droppedIndex(_txn, indexName);
}
private:
@@ -444,7 +446,7 @@ void IndexCatalog::IndexBuildBlock::success() {
_txn->recoveryUnit()->registerChange(new IndexCompletionChange(_txn, entry));
entry->setIsReady(true);
- _catalog->_collection->infoCache()->addedIndex(_txn);
+ _catalog->_collection->infoCache()->addedIndex(_txn, _indexName);
}
namespace {
@@ -822,7 +824,7 @@ public:
void rollback() final {
_entries->add(_entry);
- _collection->infoCache()->reset(_txn);
+ _collection->infoCache()->addedIndex(_txn, _entry->descriptor()->indexName());
}
private:
@@ -860,20 +862,16 @@ Status IndexCatalog::_dropIndex(OperationContext* txn, IndexCatalogEntry* entry)
false, str::stream() << "index '" << indexName << "' dropped");
// --------- START REAL WORK ----------
-
audit::logDropIndex(&cc(), indexName, _collection->ns().ns());
invariant(_entries.release(entry->descriptor()) == entry);
txn->recoveryUnit()->registerChange(new IndexRemoveChange(txn, _collection, &_entries, entry));
entry = NULL;
-
_deleteIndexFromDisk(txn, indexName, indexNamespace);
_checkMagic();
- // Now that we've dropped the index, ask the info cache to rebuild its cached view of
- // collection state.
- _collection->infoCache()->reset(txn);
+ _collection->infoCache()->droppedIndex(txn, indexName);
return Status::OK();
}
diff --git a/src/mongo/db/catalog/index_create.cpp b/src/mongo/db/catalog/index_create.cpp
index 26ed3032010..ecaea371df3 100644
--- a/src/mongo/db/catalog/index_create.cpp
+++ b/src/mongo/db/catalog/index_create.cpp
@@ -208,10 +208,6 @@ Status MultiIndexBlock::init(const std::vector<BSONObj>& indexSpecs) {
_indexes.push_back(std::move(index));
}
- // this is so that operations examining the list of indexes know there are more keys to look
- // at when doing things like in place updates, etc...
- _collection->infoCache()->addedIndex(_txn);
-
if (_buildInBackground)
_backgroundOperation.reset(new BackgroundOperation(ns));
@@ -361,9 +357,6 @@ void MultiIndexBlock::commit() {
_indexes[i].block->success();
}
- // this one is so operations examining the list of indexes know that the index is finished
- _collection->infoCache()->addedIndex(_txn);
-
_txn->recoveryUnit()->registerChange(new SetNeedToCleanupOnRollback(this));
_needToCleanup = false;
}
diff --git a/src/mongo/db/collection_index_usage_tracker.cpp b/src/mongo/db/collection_index_usage_tracker.cpp
new file mode 100644
index 00000000000..10894bcfe6e
--- /dev/null
+++ b/src/mongo/db/collection_index_usage_tracker.cpp
@@ -0,0 +1,70 @@
+/**
+ * Copyright (C) 2014 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kStorage
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/db/collection_index_usage_tracker.h"
+#include "mongo/util/assert_util.h"
+#include "mongo/util/clock_source.h"
+#include "mongo/util/log.h"
+
+namespace mongo {
+
+CollectionIndexUsageTracker::CollectionIndexUsageTracker(ClockSource* clockSource)
+ : _clockSource(clockSource) {
+ invariant(_clockSource);
+}
+
+void CollectionIndexUsageTracker::recordIndexAccess(StringData indexName) {
+ invariant(!indexName.empty());
+ dassert(_indexUsageMap.find(indexName) != _indexUsageMap.end());
+
+ _indexUsageMap[indexName].accesses.fetchAndAdd(1);
+}
+
+void CollectionIndexUsageTracker::registerIndex(StringData indexName) {
+ invariant(!indexName.empty());
+ dassert(_indexUsageMap.find(indexName) == _indexUsageMap.end());
+
+ // Create map entry.
+ _indexUsageMap[indexName] = IndexUsageStats(_clockSource->now());
+}
+
+void CollectionIndexUsageTracker::unregisterIndex(StringData indexName) {
+ invariant(!indexName.empty());
+
+ _indexUsageMap.erase(indexName);
+}
+
+CollectionIndexUsageMap CollectionIndexUsageTracker::getUsageStats() const {
+ return _indexUsageMap;
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/collection_index_usage_tracker.h b/src/mongo/db/collection_index_usage_tracker.h
new file mode 100644
index 00000000000..def45712aad
--- /dev/null
+++ b/src/mongo/db/collection_index_usage_tracker.h
@@ -0,0 +1,116 @@
+/**
+ * Copyright (C) 2015 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#pragma once
+
+#include "mongo/base/disallow_copying.h"
+#include "mongo/base/string_data.h"
+#include "mongo/platform/atomic_word.h"
+#include "mongo/util/string_map.h"
+#include "mongo/util/time_support.h"
+
+namespace mongo {
+
+class ClockSource;
+
+/**
+ * CollectionIndexUsageTracker tracks index usage statistics for a collection. An index is
+ * considered "used" when it appears as part of a winning plan for an operation that uses the
+ * query system.
+ *
+ * Indexes must be registered and deregistered on creation/destruction.
+ */
+class CollectionIndexUsageTracker {
+ MONGO_DISALLOW_COPYING(CollectionIndexUsageTracker);
+
+public:
+ struct IndexUsageStats {
+ IndexUsageStats() = default;
+ explicit IndexUsageStats(Date_t now) : trackerStartTime(now) {}
+
+ IndexUsageStats(const IndexUsageStats& other)
+ : accesses(other.accesses.load()), trackerStartTime(other.trackerStartTime) {}
+
+ IndexUsageStats& operator=(const IndexUsageStats& other) {
+ accesses.store(other.accesses.load());
+ trackerStartTime = other.trackerStartTime;
+ return *this;
+ }
+
+ // Number of operations that have used this index.
+ AtomicInt64 accesses;
+
+ // Date/Time that we started tracking index usage.
+ Date_t trackerStartTime;
+ };
+
+ /**
+ * Constructs a CollectionIndexUsageTracker.
+ *
+ * Does not take ownership of 'clockSource'. 'clockSource' must refer to a non-null clock
+ * source that is valid for the lifetime of the constructed CollectionIndexUsageTracker.
+ */
+ explicit CollectionIndexUsageTracker(ClockSource* clockSource);
+
+ /**
+ * Record that an operation used index 'indexName'. Safe to be called by multiple threads
+ * concurrently.
+ */
+ void recordIndexAccess(StringData indexName);
+
+ /**
+ * Add map entry for 'indexName' stats collection. Must be called under exclusive collection
+ * lock.
+ */
+ void registerIndex(StringData indexName);
+
+ /**
+ * Erase statistics for index 'indexName'. Must be called under exclusive collection lock.
+ * Can be called even if indexName is not registered. This is possible under certain failure
+ * scenarios.
+ */
+ void unregisterIndex(StringData indexName);
+
+ /**
+ * Get the current state of the usage statistics map. This map will only include indexes that
+ * exist at the time of calling. Must be called while holding the collection lock in any mode.
+ */
+ StringMap<CollectionIndexUsageTracker::IndexUsageStats> getUsageStats() const;
+
+private:
+ // Map from index name to usage statistics.
+ StringMap<CollectionIndexUsageTracker::IndexUsageStats> _indexUsageMap;
+
+ // Clock source. Used when the 'trackerStartTime' time for an IndexUsageStats object needs to
+ // be set.
+ ClockSource* _clockSource;
+};
+
+typedef StringMap<CollectionIndexUsageTracker::IndexUsageStats> CollectionIndexUsageMap;
+
+} // namespace mongo
diff --git a/src/mongo/db/collection_index_usage_tracker_test.cpp b/src/mongo/db/collection_index_usage_tracker_test.cpp
new file mode 100644
index 00000000000..39a31120903
--- /dev/null
+++ b/src/mongo/db/collection_index_usage_tracker_test.cpp
@@ -0,0 +1,144 @@
+/**
+ * Copyright (C) 2014 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/db/collection_index_usage_tracker.h"
+#include "mongo/unittest/unittest.h"
+#include "mongo/util/clock_source_mock.h"
+
+namespace mongo {
+namespace {
+
+class CollectionIndexUsageTrackerTest : public unittest::Test {
+protected:
+ CollectionIndexUsageTrackerTest() : _tracker(&_clockSource) {}
+
+ /**
+ * Returns an unowned pointer to the tracker owned by this test fixture.
+ */
+ CollectionIndexUsageTracker* getTracker() {
+ return &_tracker;
+ }
+
+ /**
+ * Returns an unowned pointer to the mock clock source owned by this test fixture.
+ */
+ ClockSourceMock* getClockSource() {
+ return &_clockSource;
+ }
+
+private:
+ ClockSourceMock _clockSource;
+ CollectionIndexUsageTracker _tracker;
+};
+
+// Test that a newly contructed tracker has an empty map.
+TEST_F(CollectionIndexUsageTrackerTest, Empty) {
+ ASSERT(getTracker()->getUsageStats().empty());
+}
+
+// Test that recording of a single index hit is reflected in returned stats map.
+TEST_F(CollectionIndexUsageTrackerTest, SingleHit) {
+ getTracker()->registerIndex("foo");
+ getTracker()->recordIndexAccess("foo");
+ CollectionIndexUsageMap statsMap = getTracker()->getUsageStats();
+ ASSERT(statsMap.find("foo") != statsMap.end());
+ ASSERT_EQUALS(1, statsMap["foo"].accesses.loadRelaxed());
+}
+
+// Test that recording of multiple index hits are reflected in stats map.
+TEST_F(CollectionIndexUsageTrackerTest, MultipleHit) {
+ getTracker()->registerIndex("foo");
+ getTracker()->recordIndexAccess("foo");
+ getTracker()->recordIndexAccess("foo");
+ CollectionIndexUsageMap statsMap = getTracker()->getUsageStats();
+ ASSERT(statsMap.find("foo") != statsMap.end());
+ ASSERT_EQUALS(2, statsMap["foo"].accesses.loadRelaxed());
+}
+
+// Test that index registration generates an entry in the stats map.
+TEST_F(CollectionIndexUsageTrackerTest, Register) {
+ getTracker()->registerIndex("foo");
+ ASSERT_EQUALS(1U, getTracker()->getUsageStats().size());
+ getTracker()->registerIndex("bar");
+ ASSERT_EQUALS(2U, getTracker()->getUsageStats().size());
+}
+
+// Test that index deregistration results in removal of an entry from the stats map.
+TEST_F(CollectionIndexUsageTrackerTest, Deregister) {
+ getTracker()->registerIndex("foo");
+ getTracker()->registerIndex("bar");
+ ASSERT_EQUALS(2U, getTracker()->getUsageStats().size());
+ getTracker()->unregisterIndex("foo");
+ ASSERT_EQUALS(1U, getTracker()->getUsageStats().size());
+ getTracker()->unregisterIndex("bar");
+ ASSERT_EQUALS(0U, getTracker()->getUsageStats().size());
+}
+
+// Test that index deregistration results in reset of the usage counter.
+TEST_F(CollectionIndexUsageTrackerTest, HitAfterDeregister) {
+ getTracker()->registerIndex("foo");
+ getTracker()->recordIndexAccess("foo");
+ getTracker()->recordIndexAccess("foo");
+ CollectionIndexUsageMap statsMap = getTracker()->getUsageStats();
+ ASSERT(statsMap.find("foo") != statsMap.end());
+ ASSERT_EQUALS(2, statsMap["foo"].accesses.loadRelaxed());
+
+ getTracker()->unregisterIndex("foo");
+ statsMap = getTracker()->getUsageStats();
+ ASSERT(statsMap.find("foo") == statsMap.end());
+
+ getTracker()->registerIndex("foo");
+ getTracker()->recordIndexAccess("foo");
+ statsMap = getTracker()->getUsageStats();
+ ASSERT(statsMap.find("foo") != statsMap.end());
+ ASSERT_EQUALS(1, statsMap["foo"].accesses.loadRelaxed());
+}
+
+// Test that index tracker start date/time is reset on index deregistration/registration.
+TEST_F(CollectionIndexUsageTrackerTest, DateTimeAfterDeregister) {
+ getTracker()->registerIndex("foo");
+ CollectionIndexUsageMap statsMap = getTracker()->getUsageStats();
+ ASSERT(statsMap.find("foo") != statsMap.end());
+ ASSERT_EQUALS(statsMap["foo"].trackerStartTime, getClockSource()->now());
+
+ getTracker()->unregisterIndex("foo");
+ statsMap = getTracker()->getUsageStats();
+ ASSERT(statsMap.find("foo") == statsMap.end());
+
+ // Increment clock source so that a new index registration has different start time.
+ getClockSource()->advance(stdx::chrono::milliseconds(1));
+
+ getTracker()->registerIndex("foo");
+ statsMap = getTracker()->getUsageStats();
+ ASSERT(statsMap.find("foo") != statsMap.end());
+ ASSERT_EQUALS(statsMap["foo"].trackerStartTime, getClockSource()->now());
+}
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/commands/count_cmd.cpp b/src/mongo/db/commands/count_cmd.cpp
index c8c5365cf81..5702dc1c2ca 100644
--- a/src/mongo/db/commands/count_cmd.cpp
+++ b/src/mongo/db/commands/count_cmd.cpp
@@ -157,6 +157,12 @@ public:
return appendCommandStatus(result, execPlanStatus);
}
+ PlanSummaryStats summaryStats;
+ Explain::getSummaryStats(*exec, &summaryStats);
+ if (collection) {
+ collection->infoCache()->notifyOfQuery(txn, summaryStats.indexesUsed);
+ }
+
// Plan is done executing. We just need to pull the count out of the root stage.
invariant(STAGE_COUNT == exec->getRootStage()->stageType());
CountStage* countStage = static_cast<CountStage*>(exec->getRootStage());
diff --git a/src/mongo/db/commands/distinct.cpp b/src/mongo/db/commands/distinct.cpp
index 2348c5befdd..e20153f7b5d 100644
--- a/src/mongo/db/commands/distinct.cpp
+++ b/src/mongo/db/commands/distinct.cpp
@@ -28,6 +28,8 @@
* it in the license file.
*/
+#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kQuery
+
#include <string>
#include <vector>
@@ -35,15 +37,18 @@
#include "mongo/db/auth/action_set.h"
#include "mongo/db/auth/action_type.h"
#include "mongo/db/auth/privilege.h"
+#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/database.h"
#include "mongo/db/clientcursor.h"
#include "mongo/db/commands.h"
#include "mongo/db/db_raii.h"
+#include "mongo/db/exec/working_set_common.h"
#include "mongo/db/instance.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/query/explain.h"
#include "mongo/db/query/get_executor.h"
#include "mongo/db/query/query_planner_common.h"
+#include "mongo/util/log.h"
#include "mongo/util/timer.h"
namespace mongo {
@@ -204,9 +209,24 @@ public:
}
}
+ // Return an error if execution fails for any reason.
+ if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) {
+ const std::unique_ptr<PlanStageStats> stats(executor.getValue()->getStats());
+ log() << "Plan executor error during distinct command: "
+ << PlanExecutor::statestr(state) << ", stats: " << Explain::statsToBSON(*stats);
+
+ return appendCommandStatus(result,
+ Status(ErrorCodes::OperationFailed,
+ str::stream()
+ << "Executor error during distinct command: "
+ << WorkingSetCommon::toStatusString(obj)));
+ }
+
+
// Get summary information about the plan.
PlanSummaryStats stats;
Explain::getSummaryStats(*executor.getValue(), &stats);
+ collection->infoCache()->notifyOfQuery(txn, stats.indexesUsed);
verify(start == bb.buf());
diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp
index 8f594b3fa31..fb80965bb36 100644
--- a/src/mongo/db/commands/find_and_modify.cpp
+++ b/src/mongo/db/commands/find_and_modify.cpp
@@ -38,6 +38,7 @@
#include "mongo/base/status_with.h"
#include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsonobjbuilder.h"
+#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/document_validation.h"
#include "mongo/db/client.h"
#include "mongo/db/commands.h"
@@ -96,12 +97,22 @@ const DeleteStats* getDeleteStats(const PlanStageStats* stats) {
*
* If the operation failed, then an error Status is returned.
*/
-StatusWith<boost::optional<BSONObj>> advanceExecutor(PlanExecutor* exec, bool isRemove) {
+StatusWith<boost::optional<BSONObj>> advanceExecutor(OperationContext* txn,
+ PlanExecutor* exec,
+ bool isRemove,
+ Collection* collection) {
BSONObj value;
PlanExecutor::ExecState state = exec->getNext(&value, nullptr);
+ if (collection && PlanExecutor::DEAD != state) {
+ PlanSummaryStats summaryStats;
+ Explain::getSummaryStats(*exec, &summaryStats);
+ collection->infoCache()->notifyOfQuery(txn, summaryStats.indexesUsed);
+ }
+
if (PlanExecutor::ADVANCED == state) {
return boost::optional<BSONObj>(std::move(value));
}
+
if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) {
const std::unique_ptr<PlanStageStats> stats(exec->getStats());
error() << "Plan executor error during findAndModify: " << PlanExecutor::statestr(state)
@@ -117,6 +128,7 @@ StatusWith<boost::optional<BSONObj>> advanceExecutor(PlanExecutor* exec, bool is
str::stream() << "executor returned " << PlanExecutor::statestr(state)
<< " while executing " << opstr};
}
+
invariant(state == PlanExecutor::IS_EOF);
return boost::optional<BSONObj>(boost::none);
}
@@ -384,7 +396,7 @@ public:
std::move(statusWithPlanExecutor.getValue());
StatusWith<boost::optional<BSONObj>> advanceStatus =
- advanceExecutor(exec.get(), args.isRemove());
+ advanceExecutor(txn, exec.get(), args.isRemove(), collection);
if (!advanceStatus.isOK()) {
return appendCommandStatus(result, advanceStatus.getStatus());
}
@@ -454,7 +466,7 @@ public:
std::move(statusWithPlanExecutor.getValue());
StatusWith<boost::optional<BSONObj>> advanceStatus =
- advanceExecutor(exec.get(), args.isRemove());
+ advanceExecutor(txn, exec.get(), args.isRemove(), collection);
if (!advanceStatus.isOK()) {
return appendCommandStatus(result, advanceStatus.getStatus());
}
diff --git a/src/mongo/db/commands/find_cmd.cpp b/src/mongo/db/commands/find_cmd.cpp
index a1821deda97..9f35015462e 100644
--- a/src/mongo/db/commands/find_cmd.cpp
+++ b/src/mongo/db/commands/find_cmd.cpp
@@ -277,7 +277,7 @@ public:
// there is no ClientCursor id, and then return.
const long long numResults = 0;
const CursorId cursorId = 0;
- endQueryOp(txn, *exec, dbProfilingLevel, numResults, cursorId);
+ endQueryOp(txn, collection, *exec, dbProfilingLevel, numResults, cursorId);
appendCursorResponseObject(cursorId, nss.ns(), BSONArray(), &result);
return true;
}
@@ -354,7 +354,7 @@ public:
}
// Fill out curop based on the results.
- endQueryOp(txn, *cursorExec, dbProfilingLevel, numResults, cursorId);
+ endQueryOp(txn, collection, *cursorExec, dbProfilingLevel, numResults, cursorId);
// 7) Generate the response object to send to the client.
appendCursorResponseObject(cursorId, nss.ns(), firstBatch.arr(), &result);
diff --git a/src/mongo/db/commands/geo_near_cmd.cpp b/src/mongo/db/commands/geo_near_cmd.cpp
index eafef874646..163a97028cf 100644
--- a/src/mongo/db/commands/geo_near_cmd.cpp
+++ b/src/mongo/db/commands/geo_near_cmd.cpp
@@ -39,6 +39,7 @@
#include "mongo/db/commands.h"
#include "mongo/db/curop.h"
#include "mongo/db/db_raii.h"
+#include "mongo/db/exec/working_set_common.h"
#include "mongo/db/geo/geoconstants.h"
#include "mongo/db/geo/geoparser.h"
#include "mongo/db/index/index_descriptor.h"
@@ -212,7 +213,8 @@ public:
BSONObj currObj;
long long results = 0;
- while ((results < numWanted) && PlanExecutor::ADVANCED == exec->getNext(&currObj, NULL)) {
+ PlanExecutor::ExecState state;
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(&currObj, NULL))) {
// Come up with the correct distance.
double dist = currObj["$dis"].number() * distanceMultiplier;
totalDistance += dist;
@@ -249,17 +251,40 @@ public:
}
oneResultBuilder.append("obj", resObj);
oneResultBuilder.done();
+
++results;
+
+ // Break if we have the number of requested result documents.
+ if (results >= numWanted) {
+ break;
+ }
}
resultBuilder.done();
+ // Return an error if execution fails for any reason.
+ if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) {
+ const std::unique_ptr<PlanStageStats> stats(exec->getStats());
+ log() << "Plan executor error during geoNear command: " << PlanExecutor::statestr(state)
+ << ", stats: " << Explain::statsToBSON(*stats);
+
+ return appendCommandStatus(result,
+ Status(ErrorCodes::OperationFailed,
+ str::stream()
+ << "Executor error during geoNear command: "
+ << WorkingSetCommon::toStatusString(currObj)));
+ }
+
// Fill out the stats subobj.
BSONObjBuilder stats(result.subobjStart("stats"));
// Fill in nscanned from the explain.
PlanSummaryStats summary;
Explain::getSummaryStats(*exec, &summary);
+ if (collection) {
+ collection->infoCache()->notifyOfQuery(txn, summary.indexesUsed);
+ }
+
stats.appendNumber("nscanned", summary.totalKeysExamined);
stats.appendNumber("objectsLoaded", summary.totalDocsExamined);
diff --git a/src/mongo/db/commands/group_cmd.cpp b/src/mongo/db/commands/group_cmd.cpp
index 60b340be3e5..88a9169599f 100644
--- a/src/mongo/db/commands/group_cmd.cpp
+++ b/src/mongo/db/commands/group_cmd.cpp
@@ -32,6 +32,7 @@
#include "mongo/db/auth/action_type.h"
#include "mongo/db/auth/authorization_session.h"
#include "mongo/db/auth/privilege.h"
+#include "mongo/db/catalog/collection.h"
#include "mongo/db/commands.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/exec/group.h"
@@ -161,6 +162,12 @@ private:
invariant(planExecutor->isEOF());
+ PlanSummaryStats summaryStats;
+ Explain::getSummaryStats(*planExecutor, &summaryStats);
+ if (coll) {
+ coll->infoCache()->notifyOfQuery(txn, summaryStats.indexesUsed);
+ }
+
invariant(STAGE_GROUP == planExecutor->getRootStage()->stageType());
GroupStage* groupStage = static_cast<GroupStage*>(planExecutor->getRootStage());
const GroupStats* groupStats =
diff --git a/src/mongo/db/commands/write_commands/batch_executor.cpp b/src/mongo/db/commands/write_commands/batch_executor.cpp
index 0719c14803f..f678f25137a 100644
--- a/src/mongo/db/commands/write_commands/batch_executor.cpp
+++ b/src/mongo/db/commands/write_commands/batch_executor.cpp
@@ -1250,11 +1250,14 @@ static void multiUpdate(OperationContext* txn,
result->getStats().n = didInsert ? 1 : numMatched;
result->getStats().upsertedID = resUpsertedID;
+ PlanSummaryStats summary;
+ Explain::getSummaryStats(*exec, &summary);
+ collection->infoCache()->notifyOfQuery(txn, summary.indexesUsed);
+
// No-ops need to reset lastOp in the client, for write concern.
if (repl::ReplClientInfo::forClient(client).getLastOp() == lastOpAtOperationStart) {
repl::ReplClientInfo::forClient(client).setLastOpToSystemLastOpTime(txn);
}
-
} catch (const WriteConflictException& dle) {
debug->writeConflicts++;
if (isMulti) {
@@ -1335,18 +1338,23 @@ static void multiRemove(OperationContext* txn,
return;
}
- std::unique_ptr<PlanExecutor> exec = uassertStatusOK(
- getExecutorDelete(txn, autoDb.getDb()->getCollection(nss), &parsedDelete));
+ auto collection = autoDb.getDb()->getCollection(nss);
+
+ std::unique_ptr<PlanExecutor> exec =
+ uassertStatusOK(getExecutorDelete(txn, collection, &parsedDelete));
// Execute the delete and retrieve the number deleted.
uassertStatusOK(exec->executePlan());
result->getStats().n = DeleteStage::getNumDeleted(*exec);
+ PlanSummaryStats summary;
+ Explain::getSummaryStats(*exec, &summary);
+ collection->infoCache()->notifyOfQuery(txn, summary.indexesUsed);
+
// No-ops need to reset lastOp in the client, for write concern.
if (repl::ReplClientInfo::forClient(client).getLastOp() == lastOpAtOperationStart) {
repl::ReplClientInfo::forClient(client).setLastOpToSystemLastOpTime(txn);
}
-
break;
} catch (const WriteConflictException& dle) {
CurOp::get(txn)->debug().writeConflicts++;
diff --git a/src/mongo/db/exec/idhack.cpp b/src/mongo/db/exec/idhack.cpp
index 570f80a19d9..90c3be244ae 100644
--- a/src/mongo/db/exec/idhack.cpp
+++ b/src/mongo/db/exec/idhack.cpp
@@ -53,13 +53,18 @@ const char* IDHackStage::kStageType = "IDHACK";
IDHackStage::IDHackStage(OperationContext* txn,
const Collection* collection,
CanonicalQuery* query,
- WorkingSet* ws)
+ WorkingSet* ws,
+ const IndexDescriptor* descriptor)
: PlanStage(kStageType, txn),
_collection(collection),
_workingSet(ws),
_key(query->getQueryObj()["_id"].wrap()),
_done(false),
_idBeingPagedIn(WorkingSet::INVALID_ID) {
+ const IndexCatalog* catalog = _collection->getIndexCatalog();
+ _specificStats.indexName = descriptor->indexName();
+ _accessMethod = catalog->getIndex(descriptor);
+
if (NULL != query->getProj()) {
_addKeyMetadata = query->getProj()->wantIndexKey();
} else {
@@ -70,14 +75,19 @@ IDHackStage::IDHackStage(OperationContext* txn,
IDHackStage::IDHackStage(OperationContext* txn,
Collection* collection,
const BSONObj& key,
- WorkingSet* ws)
+ WorkingSet* ws,
+ const IndexDescriptor* descriptor)
: PlanStage(kStageType, txn),
_collection(collection),
_workingSet(ws),
_key(key),
_done(false),
_addKeyMetadata(false),
- _idBeingPagedIn(WorkingSet::INVALID_ID) {}
+ _idBeingPagedIn(WorkingSet::INVALID_ID) {
+ const IndexCatalog* catalog = _collection->getIndexCatalog();
+ _specificStats.indexName = descriptor->indexName();
+ _accessMethod = catalog->getIndex(descriptor);
+}
IDHackStage::~IDHackStage() {}
@@ -114,18 +124,8 @@ PlanStage::StageState IDHackStage::work(WorkingSetID* out) {
WorkingSetID id = WorkingSet::INVALID_ID;
try {
- // Use the index catalog to get the id index.
- const IndexCatalog* catalog = _collection->getIndexCatalog();
-
- // Find the index we use.
- IndexDescriptor* idDesc = catalog->findIdIndex(getOpCtx());
- if (NULL == idDesc) {
- _done = true;
- return PlanStage::IS_EOF;
- }
-
// Look up the key by going directly to the index.
- RecordId loc = catalog->getIndex(idDesc)->findSingle(getOpCtx(), _key);
+ RecordId loc = _accessMethod->findSingle(getOpCtx(), _key);
// Key not found.
if (loc.isNull()) {
diff --git a/src/mongo/db/exec/idhack.h b/src/mongo/db/exec/idhack.h
index e178014476a..9eb143e4193 100644
--- a/src/mongo/db/exec/idhack.h
+++ b/src/mongo/db/exec/idhack.h
@@ -37,6 +37,7 @@
namespace mongo {
+class IndexAccessMethod;
class RecordCursor;
/**
@@ -49,9 +50,14 @@ public:
IDHackStage(OperationContext* txn,
const Collection* collection,
CanonicalQuery* query,
- WorkingSet* ws);
+ WorkingSet* ws,
+ const IndexDescriptor* descriptor);
- IDHackStage(OperationContext* txn, Collection* collection, const BSONObj& key, WorkingSet* ws);
+ IDHackStage(OperationContext* txn,
+ Collection* collection,
+ const BSONObj& key,
+ WorkingSet* ws,
+ const IndexDescriptor* descriptor);
~IDHackStage();
@@ -95,6 +101,9 @@ private:
// The WorkingSet we annotate with results. Not owned by us.
WorkingSet* _workingSet;
+ // Not owned here.
+ const IndexAccessMethod* _accessMethod;
+
// The value to match against the _id field.
BSONObj _key;
diff --git a/src/mongo/db/exec/plan_stats.h b/src/mongo/db/exec/plan_stats.h
index 66403f131c5..8a7648bd4d7 100644
--- a/src/mongo/db/exec/plan_stats.h
+++ b/src/mongo/db/exec/plan_stats.h
@@ -342,6 +342,8 @@ struct IDHackStats : public SpecificStats {
return specific;
}
+ std::string indexName;
+
// Number of entries retrieved from the index while executing the idhack.
size_t keysExamined;
diff --git a/src/mongo/db/instance.cpp b/src/mongo/db/instance.cpp
index c79bdf1d7d0..14ea21e4dcb 100644
--- a/src/mongo/db/instance.cpp
+++ b/src/mongo/db/instance.cpp
@@ -687,10 +687,12 @@ void receivedUpdate(OperationContext* txn, const NamespaceString& nsString, Mess
txn->lockState(), nsString.ns(), parsedUpdate.isIsolated() ? MODE_X : MODE_IX);
OldClientContext ctx(txn, nsString.ns());
+ auto collection = ctx.db()->getCollection(nsString);
+
// The common case: no implicit collection creation
- if (!upsert || ctx.db()->getCollection(nsString) != NULL) {
- unique_ptr<PlanExecutor> exec = uassertStatusOK(getExecutorUpdate(
- txn, ctx.db()->getCollection(nsString), &parsedUpdate, &op.debug()));
+ if (!upsert || collection != NULL) {
+ unique_ptr<PlanExecutor> exec =
+ uassertStatusOK(getExecutorUpdate(txn, collection, &parsedUpdate, &op.debug()));
// Run the plan and get stats out.
uassertStatusOK(exec->executePlan());
@@ -699,11 +701,14 @@ void receivedUpdate(OperationContext* txn, const NamespaceString& nsString, Mess
// for getlasterror
LastError::get(client).recordUpdate(res.existing, res.numMatched, res.upserted);
+ PlanSummaryStats summary;
+ Explain::getSummaryStats(*exec, &summary);
+ collection->infoCache()->notifyOfQuery(txn, summary.indexesUsed);
+
// No-ops need to reset lastOp in the client, for write concern.
if (repl::ReplClientInfo::forClient(client).getLastOp() == lastOpAtOperationStart) {
repl::ReplClientInfo::forClient(client).setLastOpToSystemLastOpTime(txn);
}
-
return;
}
break;
@@ -741,8 +746,9 @@ void receivedUpdate(OperationContext* txn, const NamespaceString& nsString, Mess
wuow.commit();
}
- unique_ptr<PlanExecutor> exec = uassertStatusOK(
- getExecutorUpdate(txn, ctx.db()->getCollection(nsString), &parsedUpdate, &op.debug()));
+ auto collection = ctx.db()->getCollection(nsString);
+ unique_ptr<PlanExecutor> exec =
+ uassertStatusOK(getExecutorUpdate(txn, collection, &parsedUpdate, &op.debug()));
// Run the plan and get stats out.
uassertStatusOK(exec->executePlan());
@@ -750,6 +756,10 @@ void receivedUpdate(OperationContext* txn, const NamespaceString& nsString, Mess
LastError::get(client).recordUpdate(res.existing, res.numMatched, res.upserted);
+ PlanSummaryStats summary;
+ Explain::getSummaryStats(*exec, &summary);
+ collection->infoCache()->notifyOfQuery(txn, summary.indexesUsed);
+
// No-ops need to reset lastOp in the client, for write concern.
if (repl::ReplClientInfo::forClient(client).getLastOp() == lastOpAtOperationStart) {
repl::ReplClientInfo::forClient(client).setLastOpToSystemLastOpTime(txn);
@@ -803,8 +813,10 @@ void receivedDelete(OperationContext* txn, const NamespaceString& nsString, Mess
txn->lockState(), nsString.ns(), parsedDelete.isIsolated() ? MODE_X : MODE_IX);
OldClientContext ctx(txn, nsString.ns());
- unique_ptr<PlanExecutor> exec = uassertStatusOK(
- getExecutorDelete(txn, ctx.db()->getCollection(nsString), &parsedDelete));
+ auto collection = ctx.db()->getCollection(nsString);
+
+ unique_ptr<PlanExecutor> exec =
+ uassertStatusOK(getExecutorDelete(txn, collection, &parsedDelete));
// Run the plan and get the number of docs deleted.
uassertStatusOK(exec->executePlan());
@@ -812,11 +824,14 @@ void receivedDelete(OperationContext* txn, const NamespaceString& nsString, Mess
LastError::get(client).recordDelete(n);
op.debug().ndeleted = n;
+ PlanSummaryStats summary;
+ Explain::getSummaryStats(*exec, &summary);
+ collection->infoCache()->notifyOfQuery(txn, summary.indexesUsed);
+
// No-ops need to reset lastOp in the client, for write concern.
if (repl::ReplClientInfo::forClient(client).getLastOp() == lastOpAtOperationStart) {
repl::ReplClientInfo::forClient(client).setLastOpToSystemLastOpTime(txn);
}
-
break;
} catch (const WriteConflictException& dle) {
op.debug().writeConflicts++;
diff --git a/src/mongo/db/ops/update_lifecycle_impl.cpp b/src/mongo/db/ops/update_lifecycle_impl.cpp
index 511f1b59244..40c86f3fd80 100644
--- a/src/mongo/db/ops/update_lifecycle_impl.cpp
+++ b/src/mongo/db/ops/update_lifecycle_impl.cpp
@@ -68,7 +68,7 @@ bool UpdateLifecycleImpl::canContinue() const {
const UpdateIndexData* UpdateLifecycleImpl::getIndexKeys(OperationContext* opCtx) const {
if (_collection)
- return &_collection->infoCache()->indexKeys(opCtx);
+ return &_collection->infoCache()->getIndexKeys(opCtx);
return NULL;
}
diff --git a/src/mongo/db/pipeline/SConscript b/src/mongo/db/pipeline/SConscript
index c9b43aeb967..58167b69f6e 100644
--- a/src/mongo/db/pipeline/SConscript
+++ b/src/mongo/db/pipeline/SConscript
@@ -100,6 +100,7 @@ docSourceEnv.Library(
'document_source.cpp',
'document_source_geo_near.cpp',
'document_source_group.cpp',
+ 'document_source_index_stats.cpp',
'document_source_limit.cpp',
'document_source_match.cpp',
'document_source_merge_cursors.cpp',
diff --git a/src/mongo/db/pipeline/document_source.h b/src/mongo/db/pipeline/document_source.h
index d2a6727a911..e429a00b05a 100644
--- a/src/mongo/db/pipeline/document_source.h
+++ b/src/mongo/db/pipeline/document_source.h
@@ -42,6 +42,7 @@
#include "mongo/base/init.h"
#include "mongo/client/connpool.h"
#include "mongo/db/clientcursor.h"
+#include "mongo/db/collection_index_usage_tracker.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/matcher/matcher.h"
#include "mongo/db/pipeline/accumulator.h"
@@ -177,7 +178,9 @@ public:
virtual void serializeToArray(std::vector<Value>& array, bool explain = false) const;
- /// Returns true if doesn't require an input source (most DocumentSources do).
+ /**
+ * Returns true if doesn't require an input source (most DocumentSources do).
+ */
virtual bool isValidInitialSource() const {
return false;
}
@@ -288,6 +291,9 @@ public:
*/
virtual BSONObj insert(const NamespaceString& ns, const std::vector<BSONObj>& objs) = 0;
+ virtual CollectionIndexUsageMap getIndexStats(OperationContext* opCtx,
+ const NamespaceString& ns) = 0;
+
// Add new methods as needed.
};
@@ -525,6 +531,32 @@ private:
Accumulators _currentAccumulators;
};
+/**
+ * Provides a document source interface to retrieve index statistics for a given namespace.
+ * Each document returned represents a single index and mongod instance.
+ */
+class DocumentSourceIndexStats final : public DocumentSource, public DocumentSourceNeedsMongod {
+public:
+ // virtuals from DocumentSource
+ boost::optional<Document> getNext() final;
+ const char* getSourceName() const final;
+ Value serialize(bool explain = false) const final;
+
+ virtual bool isValidInitialSource() const final {
+ return true;
+ }
+
+ static boost::intrusive_ptr<DocumentSource> createFromBson(
+ BSONElement elem, const boost::intrusive_ptr<ExpressionContext>& pExpCtx);
+
+private:
+ DocumentSourceIndexStats(const boost::intrusive_ptr<ExpressionContext>& pExpCtx);
+
+ CollectionIndexUsageMap _indexStatsMap;
+ CollectionIndexUsageMap::const_iterator _indexStatsIter;
+ std::string _processName;
+};
+
class DocumentSourceMatch final : public DocumentSource {
public:
diff --git a/src/mongo/db/pipeline/document_source_index_stats.cpp b/src/mongo/db/pipeline/document_source_index_stats.cpp
new file mode 100644
index 00000000000..dd40cd90a3b
--- /dev/null
+++ b/src/mongo/db/pipeline/document_source_index_stats.cpp
@@ -0,0 +1,81 @@
+/**
+ * Copyright (C) 2015 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/db/pipeline/document_source.h"
+
+namespace mongo {
+
+using boost::intrusive_ptr;
+
+REGISTER_DOCUMENT_SOURCE(indexStats, DocumentSourceIndexStats::createFromBson);
+
+const char* DocumentSourceIndexStats::getSourceName() const {
+ return "$indexStats";
+}
+
+boost::optional<Document> DocumentSourceIndexStats::getNext() {
+ pExpCtx->checkForInterrupt();
+
+ if (_indexStatsMap.empty()) {
+ _indexStatsMap = _mongod->getIndexStats(pExpCtx->opCtx, pExpCtx->ns);
+ _indexStatsIter = _indexStatsMap.begin();
+ }
+
+ if (_indexStatsIter != _indexStatsMap.end()) {
+ const auto& stats = _indexStatsIter->second;
+ MutableDocument doc;
+ doc["name"] = Value(_indexStatsIter->first);
+ doc["ns"] = Value(pExpCtx->ns.ns());
+ doc["processName"] = Value(_processName);
+ doc["usageStats"]["operations"] = Value(stats.accesses.loadRelaxed());
+ doc["usageStats"]["validSince"] = Value(stats.trackerStartTime);
+ ++_indexStatsIter;
+ return doc.freeze();
+ }
+
+ return boost::none;
+}
+
+DocumentSourceIndexStats::DocumentSourceIndexStats(const intrusive_ptr<ExpressionContext>& pExpCtx)
+ : DocumentSource(pExpCtx),
+ _processName(str::stream() << getHostNameCached() << ":" << serverGlobalParams.port) {}
+
+intrusive_ptr<DocumentSource> DocumentSourceIndexStats::createFromBson(
+ BSONElement elem, const intrusive_ptr<ExpressionContext>& pExpCtx) {
+ uassert(28803,
+ "The $indexStats stage specification must be an empty object",
+ elem.type() == Object && elem.Obj().isEmpty());
+ return new DocumentSourceIndexStats(pExpCtx);
+}
+
+Value DocumentSourceIndexStats::serialize(bool explain) const {
+ return Value(DOC(getSourceName() << Document()));
+}
+}
diff --git a/src/mongo/db/pipeline/pipeline.cpp b/src/mongo/db/pipeline/pipeline.cpp
index c0e317eba3b..413bf9bf07b 100644
--- a/src/mongo/db/pipeline/pipeline.cpp
+++ b/src/mongo/db/pipeline/pipeline.cpp
@@ -320,8 +320,17 @@ Status Pipeline::checkAuthForCommand(ClientBasic* client,
inputNs.isValid());
std::vector<Privilege> privileges;
- Privilege::addPrivilegeToPrivilegeVector(&privileges,
- Privilege(inputResource, ActionType::find));
+
+ if (cmdObj.getFieldDotted("pipeline.0.$indexStats")) {
+ Privilege::addPrivilegeToPrivilegeVector(
+ &privileges,
+ Privilege(ResourcePattern::forAnyNormalResource(), ActionType::indexStats));
+ } else {
+ // If no source requiring an alternative permission scheme is specified then default to
+ // requiring find() privileges on the given namespace.
+ Privilege::addPrivilegeToPrivilegeVector(&privileges,
+ Privilege(inputResource, ActionType::find));
+ }
BSONObj pipeline = cmdObj.getObjectField("pipeline");
BSONForEach(stageElem, pipeline) {
diff --git a/src/mongo/db/pipeline/pipeline_d.cpp b/src/mongo/db/pipeline/pipeline_d.cpp
index 53415ab7658..eee90ecfa33 100644
--- a/src/mongo/db/pipeline/pipeline_d.cpp
+++ b/src/mongo/db/pipeline/pipeline_d.cpp
@@ -93,6 +93,25 @@ public:
return _client.getLastErrorDetailed();
}
+ CollectionIndexUsageMap getIndexStats(OperationContext* opCtx,
+ const NamespaceString& ns) final {
+ AutoGetDb autoDb(opCtx, ns.db(), MODE_IS);
+
+ uassert(28804,
+ str::stream() << "Database not found on index stats retrieval: " << ns.db(),
+ autoDb.getDb());
+
+ Lock::CollectionLock colLock(opCtx->lockState(), ns.ns(), MODE_IS);
+
+ Collection* collection = autoDb.getDb()->getCollection(ns);
+
+ uassert(28795,
+ str::stream() << "Collection not found on index stats retrieval: " << ns.ns(),
+ collection);
+
+ return collection->infoCache()->getIndexUsageStats();
+ }
+
private:
intrusive_ptr<ExpressionContext> _ctx;
DBDirectClient _client;
diff --git a/src/mongo/db/query/explain.cpp b/src/mongo/db/query/explain.cpp
index 4f1f260e1ab..61cb9dfca0e 100644
--- a/src/mongo/db/query/explain.cpp
+++ b/src/mongo/db/query/explain.cpp
@@ -30,9 +30,14 @@
#include "mongo/db/query/explain.h"
-
#include "mongo/base/owned_pointer_vector.h"
+#include "mongo/db/exec/count_scan.h"
+#include "mongo/db/exec/distinct_scan.h"
+#include "mongo/db/exec/idhack.h"
+#include "mongo/db/exec/index_scan.h"
#include "mongo/db/exec/multi_plan.h"
+#include "mongo/db/exec/near.h"
+#include "mongo/db/exec/text.h"
#include "mongo/db/query/get_executor.h"
#include "mongo/db/query/plan_executor.h"
#include "mongo/db/query/query_planner.h"
@@ -725,6 +730,39 @@ void Explain::getSummaryStats(const PlanExecutor& exec, PlanSummaryStats* statsO
if (STAGE_SORT == stages[i]->stageType()) {
statsOut->hasSortStage = true;
}
+
+ if (STAGE_IXSCAN == stages[i]->stageType()) {
+ const IndexScan* ixscan = static_cast<const IndexScan*>(stages[i]);
+ const IndexScanStats* ixscanStats =
+ static_cast<const IndexScanStats*>(ixscan->getSpecificStats());
+ statsOut->indexesUsed.insert(ixscanStats->indexName);
+ } else if (STAGE_COUNT_SCAN == stages[i]->stageType()) {
+ const CountScan* countScan = static_cast<const CountScan*>(stages[i]);
+ const CountScanStats* countScanStats =
+ static_cast<const CountScanStats*>(countScan->getSpecificStats());
+ statsOut->indexesUsed.insert(countScanStats->indexName);
+ } else if (STAGE_IDHACK == stages[i]->stageType()) {
+ const IDHackStage* idHackStage = static_cast<const IDHackStage*>(stages[i]);
+ const IDHackStats* idHackStats =
+ static_cast<const IDHackStats*>(idHackStage->getSpecificStats());
+ statsOut->indexesUsed.insert(idHackStats->indexName);
+ } else if (STAGE_DISTINCT_SCAN == stages[i]->stageType()) {
+ const DistinctScan* distinctScan = static_cast<const DistinctScan*>(stages[i]);
+ const DistinctScanStats* distinctScanStats =
+ static_cast<const DistinctScanStats*>(distinctScan->getSpecificStats());
+ statsOut->indexesUsed.insert(distinctScanStats->indexName);
+ } else if (STAGE_TEXT == stages[i]->stageType()) {
+ const TextStage* textStage = static_cast<const TextStage*>(stages[i]);
+ const TextStats* textStats =
+ static_cast<const TextStats*>(textStage->getSpecificStats());
+ statsOut->indexesUsed.insert(textStats->indexName);
+ } else if (STAGE_GEO_NEAR_2D == stages[i]->stageType() ||
+ STAGE_GEO_NEAR_2DSPHERE == stages[i]->stageType()) {
+ const NearStage* nearStage = static_cast<const NearStage*>(stages[i]);
+ const NearStats* nearStats =
+ static_cast<const NearStats*>(nearStage->getSpecificStats());
+ statsOut->indexesUsed.insert(nearStats->indexName);
+ }
}
}
diff --git a/src/mongo/db/query/explain.h b/src/mongo/db/query/explain.h
index 3640ac0e4cc..1177dc1601c 100644
--- a/src/mongo/db/query/explain.h
+++ b/src/mongo/db/query/explain.h
@@ -71,6 +71,9 @@ struct PlanSummaryStats {
// Did this plan use an in-memory sort stage?
bool hasSortStage;
+
+ // The names of each index used by the plan.
+ std::set<std::string> indexesUsed;
};
/**
diff --git a/src/mongo/db/query/find.cpp b/src/mongo/db/query/find.cpp
index 3b82aa29ce1..d622bca7105 100644
--- a/src/mongo/db/query/find.cpp
+++ b/src/mongo/db/query/find.cpp
@@ -137,6 +137,7 @@ void beginQueryOp(OperationContext* txn,
}
void endQueryOp(OperationContext* txn,
+ Collection* collection,
const PlanExecutor& exec,
int dbProfilingLevel,
long long numResults,
@@ -156,6 +157,10 @@ void endQueryOp(OperationContext* txn,
curop->debug().docsExamined = summaryStats.totalDocsExamined;
curop->debug().idhack = summaryStats.isIdhack;
+ if (collection) {
+ collection->infoCache()->notifyOfQuery(txn, summaryStats.indexesUsed);
+ }
+
const logger::LogComponent queryLogComponent = logger::LogComponent::kQuery;
const logger::LogSeverity logLevelOne = logger::LogSeverity::Debug(1);
@@ -657,10 +662,10 @@ std::string runQuery(OperationContext* txn,
// use by future getmore ops).
cc->setLeftoverMaxTimeMicros(curop.getRemainingMaxTimeMicros());
- endQueryOp(txn, *cc->getExecutor(), dbProfilingLevel, numResults, ccId);
+ endQueryOp(txn, collection, *cc->getExecutor(), dbProfilingLevel, numResults, ccId);
} else {
LOG(5) << "Not caching executor but returning " << numResults << " results.\n";
- endQueryOp(txn, *exec, dbProfilingLevel, numResults, ccId);
+ endQueryOp(txn, collection, *exec, dbProfilingLevel, numResults, ccId);
}
// Add the results from the query into the output buffer.
diff --git a/src/mongo/db/query/find.h b/src/mongo/db/query/find.h
index 562793e7b21..21864168d44 100644
--- a/src/mongo/db/query/find.h
+++ b/src/mongo/db/query/find.h
@@ -84,7 +84,8 @@ void beginQueryOp(OperationContext* txn,
long long ntoskip);
/**
- * Fills out CurOp for "txn" with information regarding this query's execution.
+ * 1) Fills out CurOp for "txn" with information regarding this query's execution.
+ * 2) Reports index usage to the CollectionInfoCache.
*
* Uses explain functionality to extract stats from 'exec'.
*
@@ -92,6 +93,7 @@ void beginQueryOp(OperationContext* txn,
* do expensive stats gathering.
*/
void endQueryOp(OperationContext* txn,
+ Collection* collection,
const PlanExecutor& exec,
int dbProfilingLevel,
long long numResults,
diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp
index 1aaf0f868f6..ad86441dcb4 100644
--- a/src/mongo/db/query/get_executor.cpp
+++ b/src/mongo/db/query/get_executor.cpp
@@ -244,12 +244,13 @@ Status prepareExecution(OperationContext* opCtx,
plannerParams.options = plannerOptions;
fillOutPlannerParams(opCtx, collection, canonicalQuery, &plannerParams);
+ const IndexDescriptor* descriptor = collection->getIndexCatalog()->findIdIndex(opCtx);
+
// If we have an _id index we can use an idhack plan.
- if (IDHackStage::supportsQuery(*canonicalQuery) &&
- collection->getIndexCatalog()->findIdIndex(opCtx)) {
+ if (descriptor && IDHackStage::supportsQuery(*canonicalQuery)) {
LOG(2) << "Using idhack: " << canonicalQuery->toStringShort();
- *rootOut = new IDHackStage(opCtx, collection, canonicalQuery, ws);
+ *rootOut = new IDHackStage(opCtx, collection, canonicalQuery, ws, descriptor);
// Might have to filter out orphaned docs.
if (plannerParams.options & QueryPlannerParams::INCLUDE_SHARD_FILTER) {
@@ -449,8 +450,9 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutor(OperationContext* txn,
return PlanExecutor::make(txn, std::move(ws), std::move(eofStage), ns, yieldPolicy);
}
- if (!CanonicalQuery::isSimpleIdQuery(unparsedQuery) ||
- !collection->getIndexCatalog()->findIdIndex(txn)) {
+ const IndexDescriptor* descriptor = collection->getIndexCatalog()->findIdIndex(txn);
+
+ if (!descriptor || !CanonicalQuery::isSimpleIdQuery(unparsedQuery)) {
const WhereCallbackReal whereCallback(txn, collection->ns().db());
auto statusWithCQ =
CanonicalQuery::canonicalize(collection->ns(), unparsedQuery, whereCallback);
@@ -466,8 +468,8 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutor(OperationContext* txn,
LOG(2) << "Using idhack: " << unparsedQuery.toString();
unique_ptr<WorkingSet> ws = make_unique<WorkingSet>();
- unique_ptr<PlanStage> root =
- make_unique<IDHackStage>(txn, collection, unparsedQuery["_id"].wrap(), ws.get());
+ unique_ptr<PlanStage> root = make_unique<IDHackStage>(
+ txn, collection, unparsedQuery["_id"].wrap(), ws.get(), descriptor);
// Might have to filter out orphaned docs.
if (plannerOptions & QueryPlannerParams::INCLUDE_SHARD_FILTER) {
@@ -711,12 +713,14 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorDelete(OperationContext* txn,
return PlanExecutor::make(txn, std::move(ws), std::move(deleteStage), nss.ns(), policy);
}
- if (CanonicalQuery::isSimpleIdQuery(unparsedQuery) &&
- collection->getIndexCatalog()->findIdIndex(txn) && request->getProj().isEmpty()) {
+ const IndexDescriptor* descriptor = collection->getIndexCatalog()->findIdIndex(txn);
+
+ if (descriptor && CanonicalQuery::isSimpleIdQuery(unparsedQuery) &&
+ request->getProj().isEmpty()) {
LOG(2) << "Using idhack: " << unparsedQuery.toString();
PlanStage* idHackStage =
- new IDHackStage(txn, collection, unparsedQuery["_id"].wrap(), ws.get());
+ new IDHackStage(txn, collection, unparsedQuery["_id"].wrap(), ws.get(), descriptor);
unique_ptr<DeleteStage> root =
make_unique<DeleteStage>(txn, deleteStageParams, ws.get(), collection, idHackStage);
return PlanExecutor::make(txn, std::move(ws), std::move(root), collection, policy);
@@ -853,12 +857,14 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorUpdate(OperationContext* txn,
txn, std::move(ws), std::move(updateStage), nsString.ns(), policy);
}
- if (CanonicalQuery::isSimpleIdQuery(unparsedQuery) &&
- collection->getIndexCatalog()->findIdIndex(txn) && request->getProj().isEmpty()) {
+ const IndexDescriptor* descriptor = collection->getIndexCatalog()->findIdIndex(txn);
+
+ if (descriptor && CanonicalQuery::isSimpleIdQuery(unparsedQuery) &&
+ request->getProj().isEmpty()) {
LOG(2) << "Using idhack: " << unparsedQuery.toString();
PlanStage* idHackStage =
- new IDHackStage(txn, collection, unparsedQuery["_id"].wrap(), ws.get());
+ new IDHackStage(txn, collection, unparsedQuery["_id"].wrap(), ws.get(), descriptor);
unique_ptr<UpdateStage> root =
make_unique<UpdateStage>(txn, updateStageParams, ws.get(), collection, idHackStage);
return PlanExecutor::make(txn, std::move(ws), std::move(root), collection, policy);