diff options
author | Louis Williams <louis.williams@mongodb.com> | 2020-09-24 17:33:11 -0400 |
---|---|---|
committer | Evergreen Agent <no-reply@evergreen.mongodb.com> | 2020-09-29 13:44:28 +0000 |
commit | 53ccac3cf24d322aaaf1de27342d00f46600aaa4 (patch) | |
tree | b240465c2a575ef3cbce7243f230612d8b211ab5 | |
parent | 5bb70750163b5b78fbcf8a84cf0ed4352cd73a25 (diff) | |
download | mongo-53ccac3cf24d322aaaf1de27342d00f46600aaa4.tar.gz |
SERVER-50916 Add $operationMetrics aggregation stage to return resource consumption metrics
-rw-r--r-- | jstests/noPassthrough/aggregate_operation_metrics.js | 140 | ||||
-rw-r--r-- | src/mongo/db/pipeline/SConscript | 2 | ||||
-rw-r--r-- | src/mongo/db/pipeline/document_source_operation_metrics.cpp | 111 | ||||
-rw-r--r-- | src/mongo/db/pipeline/document_source_operation_metrics.h | 105 | ||||
-rw-r--r-- | src/mongo/db/stats/operation_resource_consumption.idl | 8 | ||||
-rw-r--r-- | src/mongo/db/stats/resource_consumption_metrics.cpp | 67 | ||||
-rw-r--r-- | src/mongo/db/stats/resource_consumption_metrics.h | 67 | ||||
-rw-r--r-- | src/mongo/db/stats/resource_consumption_metrics_test.cpp | 20 |
8 files changed, 512 insertions, 8 deletions
diff --git a/jstests/noPassthrough/aggregate_operation_metrics.js b/jstests/noPassthrough/aggregate_operation_metrics.js new file mode 100644 index 00000000000..19c3e4595f5 --- /dev/null +++ b/jstests/noPassthrough/aggregate_operation_metrics.js @@ -0,0 +1,140 @@ +/** + * Tests command output from the $operationMetrics aggregation stage. + * @tags: [ + * requires_replication + * ] + */ +(function() { +'use strict'; + +var rst = new ReplSetTest({ + nodes: 2, + nodeOptions: { + setParameter: { + "measureOperationResourceConsumption": true, + "aggregateOperationResourceConsumptionMetrics": true + } + } +}); +rst.startSet(); +rst.initiate(); + +let assertMetricsExist = function(metrics) { + assert.neq(metrics, undefined); + let primaryMetrics = metrics.primaryMetrics; + let secondaryMetrics = metrics.secondaryMetrics; + [primaryMetrics, secondaryMetrics].forEach((readMetrics) => { + assert.gte(readMetrics.docBytesRead, 0); + assert.gte(readMetrics.docUnitsRead, 0); + assert.gte(readMetrics.idxEntriesRead, 0); + assert.gte(readMetrics.keysSorted, 0); + }); + + assert.gte(metrics.cpuMillis, 0); + assert.gte(metrics.docBytesWritten, 0); + assert.gte(metrics.docUnitsWritten, 0); + assert.gte(metrics.docUnitsReturned, 0); +}; + +// Perform very basic reads and writes on two different databases. +const db1Name = 'db1'; +const primary = rst.getPrimary(); +const db1 = primary.getDB(db1Name); +assert.commandWorked(db1.coll1.insert({a: 1})); +assert.commandWorked(db1.coll2.insert({a: 1})); + +const db2Name = 'db2'; +const db2 = primary.getDB(db2Name); +assert.commandWorked(db2.coll1.insert({a: 1})); +assert.commandWorked(db2.coll2.insert({a: 1})); + +const secondary = rst.getSecondary(); +[primary, secondary].forEach(function(node) { + jsTestLog("Testing node: " + node); + rst.awaitReplication(); + + assert.eq(node.getDB(db1Name).coll1.find({a: 1}).itcount(), 1); + assert.eq(node.getDB(db1Name).coll2.find({a: 1}).itcount(), 1); + assert.eq(node.getDB(db2Name).coll1.find({a: 1}).itcount(), 1); + assert.eq(node.getDB(db2Name).coll2.find({a: 1}).itcount(), 1); + + // Run an aggregation with a batch size of 1. + const adminDB = node.getDB('admin'); + let cursor = adminDB.aggregate([{$operationMetrics: {}}], {cursor: {batchSize: 1}}); + assert(cursor.hasNext()); + + // Merge all returned documents into a single object keyed by database name. + let allMetrics = {}; + let doc = cursor.next(); + allMetrics[doc.db] = doc; + assert.eq(cursor.objsLeftInBatch(), 0); + + // Trigger a getMore to retrieve metrics for the other database. + assert(cursor.hasNext()); + doc = cursor.next(); + allMetrics[doc.db] = doc; + assert(!cursor.hasNext()); + + // Ensure the two user database have present metrics. + assertMetricsExist(allMetrics[db1Name]); + assertMetricsExist(allMetrics[db2Name]); + + // Metrics for these databases should not be collected or reported. + assert.eq(allMetrics['admin'], undefined); + assert.eq(allMetrics['local'], undefined); + assert.eq(allMetrics['config'], undefined); + + // Ensure this stage can be composed with other pipeline stages. + const newDbName = "newDB"; + const newCollName = "metrics_out"; + cursor = adminDB.aggregate([ + {$operationMetrics: {}}, + {$project: {db: 1}}, + {$out: {db: newDbName, coll: newCollName}}, + ]); + + // No results from the aggregation because of the $out. + assert.eq(cursor.itcount(), 0); + + // TODO (SERVER-51176): Ensure metrics are properly recorded for $out. + // This new database should appear with metrics, but it does not. + cursor = adminDB.aggregate([{$operationMetrics: {}}]); + assert.eq(cursor.itcount(), 2); + + // Ensure the output collection has the 2 databases that existed at the start of the operation. + rst.awaitReplication(); + cursor = node.getDB(newDbName)[newCollName].find({}); + assert.eq(cursor.itcount(), 2); + + primary.getDB(newDbName).dropDatabase(); + + // Fetch and don't clear metrics. + cursor = adminDB.aggregate([{$operationMetrics: {clearMetrics: false}}]); + assert.eq(cursor.itcount(), 3); + + // Fetch and clear metrics. + cursor = adminDB.aggregate([{$operationMetrics: {clearMetrics: true}}]); + assert.eq(cursor.itcount(), 3); + + // Ensure no metrics are reported. + cursor = adminDB.aggregate([{$operationMetrics: {}}]); + assert.eq(cursor.itcount(), 0); + + // Insert something and ensure metrics are still reporting. + assert.commandWorked(db1.coll3.insert({a: 1})); + rst.awaitReplication(); + + // On the primary, this insert's metrics should be recorded, but not on the secondary. Since it + // is applied by the batch applier on the secondary, it is not a user operation and should not + // count toward any metrics. + cursor = adminDB.aggregate([{$operationMetrics: {}}]); + if (node === primary) { + assert.eq(cursor.itcount(), 1); + } else { + assert.eq(cursor.itcount(), 0); + } + db1.coll3.drop(); +}); + +rst.stopSet(); +}());
\ No newline at end of file diff --git a/src/mongo/db/pipeline/SConscript b/src/mongo/db/pipeline/SConscript index 4798bc2d247..b90504d8279 100644 --- a/src/mongo/db/pipeline/SConscript +++ b/src/mongo/db/pipeline/SConscript @@ -230,6 +230,7 @@ pipelineEnv.Library( 'document_source_lookup_change_pre_image.cpp', 'document_source_match.cpp', 'document_source_merge.cpp', + 'document_source_operation_metrics.cpp', 'document_source_out.cpp', 'document_source_plan_cache_stats.cpp', 'document_source_project.cpp', @@ -276,6 +277,7 @@ pipelineEnv.Library( '$BUILD_DIR/mongo/db/repl/speculative_majority_read_info', '$BUILD_DIR/mongo/db/service_context', '$BUILD_DIR/mongo/db/sessions_collection', + '$BUILD_DIR/mongo/db/stats/resource_consumption_metrics', '$BUILD_DIR/mongo/db/storage/encryption_hooks', '$BUILD_DIR/mongo/db/storage/storage_options', '$BUILD_DIR/mongo/db/update/update_document_diff', diff --git a/src/mongo/db/pipeline/document_source_operation_metrics.cpp b/src/mongo/db/pipeline/document_source_operation_metrics.cpp new file mode 100644 index 00000000000..5ae4e1e7734 --- /dev/null +++ b/src/mongo/db/pipeline/document_source_operation_metrics.cpp @@ -0,0 +1,111 @@ +/** + * Copyright (C) 2020-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * <http://www.mongodb.com/licensing/server-side-public-license>. + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/platform/basic.h" + +#include "mongo/db/pipeline/document_source_operation_metrics.h" + +#include "mongo/db/pipeline/lite_parsed_document_source.h" +#include "mongo/db/server_options.h" +#include "mongo/db/stats/operation_resource_consumption_gen.h" +#include "mongo/db/stats/resource_consumption_metrics.h" +#include "mongo/util/net/socket_utils.h" + +namespace mongo { + +using boost::intrusive_ptr; + +REGISTER_DOCUMENT_SOURCE(operationMetrics, + DocumentSourceOperationMetrics::LiteParsed::parse, + DocumentSourceOperationMetrics::createFromBson); + +const char* DocumentSourceOperationMetrics::getSourceName() const { + return kStageName.rawData(); +} + +namespace { +static constexpr StringData kClearMetrics = "clearMetrics"_sd; +static constexpr StringData kDatabaseName = "db"_sd; +} // namespace + +DocumentSource::GetNextResult DocumentSourceOperationMetrics::doGetNext() { + if (_operationMetrics.empty()) { + auto globalMetrics = [&]() { + if (_clearMetrics) { + return ResourceConsumption::get(pExpCtx->opCtx).getAndClearMetrics(); + } + return ResourceConsumption::get(pExpCtx->opCtx).getMetrics(); + }(); + for (auto& [dbName, metrics] : globalMetrics) { + BSONObjBuilder builder; + builder.append(kDatabaseName, dbName); + metrics.toBson(&builder); + _operationMetrics.push_back(builder.obj()); + } + + _operationMetricsIter = _operationMetrics.begin(); + } + + if (_operationMetricsIter != _operationMetrics.end()) { + auto doc = Document(std::move(*_operationMetricsIter)); + _operationMetricsIter++; + return doc; + } + + return GetNextResult::makeEOF(); +} + +intrusive_ptr<DocumentSource> DocumentSourceOperationMetrics::createFromBson( + BSONElement elem, const intrusive_ptr<ExpressionContext>& pExpCtx) { + if (!ResourceConsumption::isMetricsAggregationEnabled()) { + uasserted(ErrorCodes::CommandNotSupported, + "The aggregateOperationResourceConsumption server parameter is not set"); + } + + uassert(ErrorCodes::BadValue, + "The $operationMetrics stage specification must be an object", + elem.type() == Object); + + auto stageObj = elem.Obj(); + bool clearMetrics = false; + if (auto clearElem = stageObj.getField(kClearMetrics); !clearElem.eoo()) { + clearMetrics = clearElem.trueValue(); + } else if (!stageObj.isEmpty()) { + uasserted( + ErrorCodes::BadValue, + "The $operationMetrics stage specification must be empty or contain valid options"); + } + return new DocumentSourceOperationMetrics(pExpCtx, clearMetrics); +} + +Value DocumentSourceOperationMetrics::serialize( + boost::optional<ExplainOptions::Verbosity> explain) const { + return Value(DOC(getSourceName() << Document())); +} +} // namespace mongo diff --git a/src/mongo/db/pipeline/document_source_operation_metrics.h b/src/mongo/db/pipeline/document_source_operation_metrics.h new file mode 100644 index 00000000000..c62bfee45e0 --- /dev/null +++ b/src/mongo/db/pipeline/document_source_operation_metrics.h @@ -0,0 +1,105 @@ +/** + * Copyright (C) 2020-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * <http://www.mongodb.com/licensing/server-side-public-license>. + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include "mongo/db/pipeline/document_source.h" + +namespace mongo { + +/** + * Provides a document source interface to retrieve operation resource consumption metrics. + */ +class DocumentSourceOperationMetrics : public DocumentSource { +public: + static constexpr StringData kStageName = "$operationMetrics"_sd; + + class LiteParsed final : public LiteParsedDocumentSource { + public: + static std::unique_ptr<LiteParsed> parse(const NamespaceString& nss, + const BSONElement& spec) { + return std::make_unique<LiteParsed>(spec.fieldName()); + } + + explicit LiteParsed(std::string parseTimeName) + : LiteParsedDocumentSource(std::move(parseTimeName)) {} + + PrivilegeVector requiredPrivileges(bool isMongos, + bool bypassDocumentValidation) const final { + return {Privilege(ResourcePattern::forClusterResource(), ActionType::serverStatus)}; + } + + stdx::unordered_set<NamespaceString> getInvolvedNamespaces() const final { + return {}; + } + + bool isInitialSource() const final { + return true; + } + }; + + DocumentSourceOperationMetrics(const boost::intrusive_ptr<ExpressionContext>& pExpCtx, + bool clearMetrics) + : DocumentSource(kStageName, pExpCtx), _clearMetrics(clearMetrics) {} + + const char* getSourceName() const final; + + StageConstraints constraints(Pipeline::SplitState pipeState) const final { + StageConstraints constraints(StreamType::kStreaming, + PositionRequirement::kFirst, + HostTypeRequirement::kAnyShard, + DiskUseRequirement::kNoDiskUse, + FacetRequirement::kNotAllowed, + TransactionRequirement::kNotAllowed, + LookupRequirement::kAllowed, + UnionRequirement::kAllowed); + + constraints.isIndependentOfAnyCollection = true; + constraints.requiresInputDocSource = false; + return constraints; + } + + boost::optional<DistributedPlanLogic> distributedPlanLogic() final { + return boost::none; + } + + Value serialize(boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final; + + static boost::intrusive_ptr<DocumentSource> createFromBson( + BSONElement elem, const boost::intrusive_ptr<ExpressionContext>& pExpCtx); + +private: + GetNextResult doGetNext() final; + + std::vector<BSONObj> _operationMetrics; + std::vector<BSONObj>::const_iterator _operationMetricsIter; + bool _clearMetrics = false; +}; + +} // namespace mongo diff --git a/src/mongo/db/stats/operation_resource_consumption.idl b/src/mongo/db/stats/operation_resource_consumption.idl index 891aa03adfe..796e9c065b5 100644 --- a/src/mongo/db/stats/operation_resource_consumption.idl +++ b/src/mongo/db/stats/operation_resource_consumption.idl @@ -36,11 +36,11 @@ feature_flags: default: false server_parameters: - aggregateOperationResourceConsumption: - description: "When true, globally aggregates per-operation resource consumption. Implies - measureOperationResourceConsumption" + aggregateOperationResourceConsumptionMetrics: + description: "When true, globally aggregates per-operation resource consumption metrics. + Requires measureOperationResourceConsumption to also be set" set_at: - startup - cpp_varname: gAggregateOperationResourceConsumption + cpp_varname: gAggregateOperationResourceConsumptionMetrics cpp_vartype: bool default: false diff --git a/src/mongo/db/stats/resource_consumption_metrics.cpp b/src/mongo/db/stats/resource_consumption_metrics.cpp index 712704aafb0..7fc3228282f 100644 --- a/src/mongo/db/stats/resource_consumption_metrics.cpp +++ b/src/mongo/db/stats/resource_consumption_metrics.cpp @@ -27,9 +27,12 @@ * it in the license file. */ +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kControl + #include "mongo/db/stats/resource_consumption_metrics.h" #include "mongo/db/stats/operation_resource_consumption_gen.h" +#include "mongo/logv2/log.h" namespace mongo { namespace { @@ -37,14 +40,67 @@ const OperationContext::Decoration<ResourceConsumption::MetricsCollector> getMet OperationContext::declareDecoration<ResourceConsumption::MetricsCollector>(); const ServiceContext::Decoration<ResourceConsumption> getGlobalResourceConsumption = ServiceContext::declareDecoration<ResourceConsumption>(); + +static constexpr StringData kPrimaryMetrics = "primaryMetrics"_sd; +static constexpr StringData kSecondaryMetrics = "secondaryMetrics"_sd; +static constexpr StringData kDocBytesRead = "docBytesRead"_sd; +static constexpr StringData kDocUnitsRead = "docUnitsRead"_sd; +static constexpr StringData kIdxEntriesRead = "idxEntriesRead"_sd; +static constexpr StringData kKeysSorted = "keysSorted"_sd; +static constexpr StringData kCpuMillis = "cpuMillis"_sd; +static constexpr StringData kDocBytesWritten = "docBytesWritten"_sd; +static constexpr StringData kDocUnitsWritten = "docUnitsWritten"_sd; +static constexpr StringData kDocUnitsReturned = "docUnitsReturned"_sd; + } // namespace +bool ResourceConsumption::isMetricsCollectionEnabled() { + return gMeasureOperationResourceConsumption; +} + +bool ResourceConsumption::isMetricsAggregationEnabled() { + return gAggregateOperationResourceConsumptionMetrics; +} + +ResourceConsumption::ResourceConsumption() { + if (gAggregateOperationResourceConsumptionMetrics && !gMeasureOperationResourceConsumption) { + LOGV2_FATAL_NOTRACE( + 5091600, + "measureOperationResourceConsumption feature flag must be enabled to use " + "aggregateOperationResourceConsumptionMetrics"); + } +} ResourceConsumption::MetricsCollector& ResourceConsumption::MetricsCollector::get( OperationContext* opCtx) { return getMetricsCollector(opCtx); } +void ResourceConsumption::Metrics::toBson(BSONObjBuilder* builder) const { + { + BSONObjBuilder primaryBuilder = builder->subobjStart(kPrimaryMetrics); + primaryBuilder.appendNumber(kDocBytesRead, primaryMetrics.docBytesRead); + primaryBuilder.appendNumber(kDocUnitsRead, primaryMetrics.docUnitsRead); + primaryBuilder.appendNumber(kIdxEntriesRead, primaryMetrics.idxEntriesRead); + primaryBuilder.appendNumber(kKeysSorted, primaryMetrics.keysSorted); + primaryBuilder.done(); + } + + { + BSONObjBuilder secondaryBuilder = builder->subobjStart(kSecondaryMetrics); + secondaryBuilder.appendNumber(kDocBytesRead, secondaryMetrics.docBytesRead); + secondaryBuilder.appendNumber(kDocUnitsRead, secondaryMetrics.docUnitsRead); + secondaryBuilder.appendNumber(kIdxEntriesRead, secondaryMetrics.idxEntriesRead); + secondaryBuilder.appendNumber(kKeysSorted, secondaryMetrics.keysSorted); + secondaryBuilder.done(); + } + + builder->appendNumber(kCpuMillis, cpuMillis); + builder->appendNumber(kDocBytesWritten, docBytesWritten); + builder->appendNumber(kDocUnitsWritten, docUnitsWritten); + builder->appendNumber(kDocUnitsReturned, docUnitsReturned); +} + ResourceConsumption::ScopedMetricsCollector::ScopedMetricsCollector(OperationContext* opCtx, bool commandCollectsMetrics) : _opCtx(opCtx) { @@ -57,7 +113,7 @@ ResourceConsumption::ScopedMetricsCollector::ScopedMetricsCollector(OperationCon return; } - if (!commandCollectsMetrics || !gMeasureOperationResourceConsumption) { + if (!commandCollectsMetrics || !isMetricsCollectionEnabled()) { metrics.beginScopedNotCollecting(); return; } @@ -80,7 +136,7 @@ ResourceConsumption::ScopedMetricsCollector::~ScopedMetricsCollector() { return; } - if (!gAggregateOperationResourceConsumption) { + if (!isMetricsAggregationEnabled()) { return; } @@ -107,4 +163,11 @@ ResourceConsumption::MetricsMap ResourceConsumption::getMetrics() const { return _metrics; } +ResourceConsumption::MetricsMap ResourceConsumption::getAndClearMetrics() { + stdx::unique_lock<Mutex> lk(_mutex); + MetricsMap newMap; + _metrics.swap(newMap); + return newMap; +} + } // namespace mongo diff --git a/src/mongo/db/stats/resource_consumption_metrics.h b/src/mongo/db/stats/resource_consumption_metrics.h index 3b0f14bb36f..b0b40436c6b 100644 --- a/src/mongo/db/stats/resource_consumption_metrics.h +++ b/src/mongo/db/stats/resource_consumption_metrics.h @@ -32,6 +32,7 @@ #include <map> #include <string> +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/platform/mutex.h" @@ -43,9 +44,34 @@ namespace mongo { */ class ResourceConsumption { public: + ResourceConsumption(); + static ResourceConsumption& get(OperationContext* opCtx); static ResourceConsumption& get(ServiceContext* svcCtx); + struct ReadMetrics { + void add(const ReadMetrics& other) { + docBytesRead += other.docBytesRead; + docUnitsRead += other.docUnitsRead; + idxEntriesRead += other.idxEntriesRead; + keysSorted += other.keysSorted; + } + + ReadMetrics& operator+=(const ReadMetrics& other) { + add(other); + return *this; + } + + // Number of document bytes read + uint64_t docBytesRead; + // Number of document units read + uint64_t docUnitsRead; + // Number of index entries read + uint64_t idxEntriesRead; + // Number of keys sorted for query operations + uint64_t keysSorted; + }; + /** * Metrics maintains a set of resource consumption metrics. */ @@ -54,11 +80,34 @@ public: /** * Adds other Metrics to this one. */ - void add(const Metrics& other){}; + void add(const Metrics& other) { + primaryMetrics += other.primaryMetrics; + secondaryMetrics += other.secondaryMetrics; + cpuMillis += other.cpuMillis; + docBytesWritten += other.docBytesWritten; + docUnitsWritten += other.docUnitsWritten; + docUnitsReturned += other.docUnitsReturned; + }; + Metrics& operator+=(const Metrics& other) { add(other); return *this; } + + // Read metrics recorded for queries processed while this node was primary + ReadMetrics primaryMetrics; + // Read metrics recorded for queries processed while this node was secondary + ReadMetrics secondaryMetrics; + // Amount of CPU time consumed by an operation in milliseconds + uint64_t cpuMillis; + // Number of document bytes written + uint64_t docBytesWritten; + // Number of document units written + uint64_t docUnitsWritten; + // Number of document units returned by a query. + uint64_t docUnitsReturned; + + void toBson(BSONObjBuilder* builder) const; }; /** @@ -176,6 +225,16 @@ public: } /** + * Returns true if resource consumption metrics should be collected per-operation. + */ + static bool isMetricsCollectionEnabled(); + + /** + * Returns true if resource consumption metrics should be aggregated globally. + */ + static bool isMetricsAggregationEnabled(); + + /** * Adds a MetricsCollector's Metrics to an existing Metrics object in the map, keyed by * database name. If no Metrics exist for the database, the value is initialized with the * provided MetricsCollector's Metrics. @@ -190,6 +249,12 @@ public: using MetricsMap = std::map<std::string, Metrics>; MetricsMap getMetrics() const; + /** + * Returns the Metrics map and then clears the contents. This attempts to swap and return the + * metrics map rather than making a full copy like getMetrics. + */ + MetricsMap getAndClearMetrics(); + private: // Protects _metrics mutable Mutex _mutex = MONGO_MAKE_LATCH("ResourceConsumption::_mutex"); diff --git a/src/mongo/db/stats/resource_consumption_metrics_test.cpp b/src/mongo/db/stats/resource_consumption_metrics_test.cpp index 58f95bf1448..fdfb7ac4e8f 100644 --- a/src/mongo/db/stats/resource_consumption_metrics_test.cpp +++ b/src/mongo/db/stats/resource_consumption_metrics_test.cpp @@ -41,7 +41,7 @@ public: void setUp() { _opCtx = makeOperationContext(); gMeasureOperationResourceConsumption = true; - gAggregateOperationResourceConsumption = true; + gAggregateOperationResourceConsumptionMetrics = true; } typedef std::pair<ServiceContext::UniqueClient, ServiceContext::UniqueOperationContext> @@ -126,6 +126,15 @@ TEST_F(ResourceConsumptionMetricsTest, ScopedMetricsCollector) { metricsCopy = globalResourceConsumption.getMetrics(); ASSERT_EQ(metricsCopy.count("db1"), 1); ASSERT_EQ(metricsCopy.count("db2"), 1); + + // Ensure fetch and clear works. + auto metrics = globalResourceConsumption.getAndClearMetrics(); + ASSERT_EQ(metrics.count("db1"), 1); + ASSERT_EQ(metrics.count("db2"), 1); + + metricsCopy = globalResourceConsumption.getMetrics(); + ASSERT_EQ(metricsCopy.count("db1"), 0); + ASSERT_EQ(metricsCopy.count("db2"), 0); } TEST_F(ResourceConsumptionMetricsTest, NestedScopedMetricsCollector) { @@ -171,5 +180,14 @@ TEST_F(ResourceConsumptionMetricsTest, NestedScopedMetricsCollector) { metricsCopy = globalResourceConsumption.getMetrics(); ASSERT_EQ(metricsCopy.count("db2"), 0); + + // Ensure fetch and clear works. + auto metrics = globalResourceConsumption.getAndClearMetrics(); + ASSERT_EQ(metrics.count("db1"), 1); + ASSERT_EQ(metrics.count("db2"), 0); + + metricsCopy = globalResourceConsumption.getMetrics(); + ASSERT_EQ(metricsCopy.count("db1"), 0); + ASSERT_EQ(metricsCopy.count("db2"), 0); } } // namespace mongo |