summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBrett Nawrocki <brett.nawrocki@mongodb.com>2022-07-12 18:48:40 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-07-13 19:54:29 +0000
commit0268aa7d2fdd9a4f2904d85b9af9d95ce5d16285 (patch)
tree5cbacb04769c49856d0a5fbbfb48d9914a6e4bd8
parentf1a87ed0a2ed04327f31cd408f20648cb4732d6f (diff)
downloadmongo-0268aa7d2fdd9a4f2904d85b9af9d95ce5d16285.tar.gz
SERVER-67107 Create SDT Cumulative Metrics Field Name Provider Base
-rw-r--r--src/mongo/db/s/SConscript1
-rw-r--r--src/mongo/db/s/sharding_data_transform_cumulative_metrics.cpp68
-rw-r--r--src/mongo/db/s/sharding_data_transform_cumulative_metrics.h7
-rw-r--r--src/mongo/db/s/sharding_data_transform_cumulative_metrics_field_name_provider.cpp116
-rw-r--r--src/mongo/db/s/sharding_data_transform_cumulative_metrics_field_name_provider.h66
5 files changed, 214 insertions, 44 deletions
diff --git a/src/mongo/db/s/SConscript b/src/mongo/db/s/SConscript
index ac26896e278..f98385fc858 100644
--- a/src/mongo/db/s/SConscript
+++ b/src/mongo/db/s/SConscript
@@ -126,6 +126,7 @@ env.Library(
'shard_server_catalog_cache_loader.cpp',
'shard_server_op_observer.cpp',
'sharding_data_transform_metrics.cpp',
+ 'sharding_data_transform_cumulative_metrics_field_name_provider.cpp',
'sharding_data_transform_cumulative_metrics.cpp',
'sharding_data_transform_instance_metrics.cpp',
'sharding_data_transform_metrics_observer.cpp',
diff --git a/src/mongo/db/s/sharding_data_transform_cumulative_metrics.cpp b/src/mongo/db/s/sharding_data_transform_cumulative_metrics.cpp
index dca4b07d7cf..7cfd6fab663 100644
--- a/src/mongo/db/s/sharding_data_transform_cumulative_metrics.cpp
+++ b/src/mongo/db/s/sharding_data_transform_cumulative_metrics.cpp
@@ -44,37 +44,14 @@ constexpr int64_t kPlaceholderLong = 0;
namespace {
constexpr auto kResharding = "resharding";
constexpr auto kGlobalIndex = "globalIndex";
-constexpr auto kCountStarted = "countStarted";
-constexpr auto kCountSucceeded = "countSucceeded";
-constexpr auto kCountFailed = "countFailed";
-constexpr auto kCountCanceled = "countCanceled";
-constexpr auto kLastOpEndingChunkImbalance = "lastOpEndingChunkImbalance";
constexpr auto kActive = "active";
-constexpr auto kDocumentsCopied = "documentsCopied";
-constexpr auto kBytesCopied = "bytesCopied";
constexpr auto kOplogEntriesFetched = "oplogEntriesFetched";
constexpr auto kOplogEntriesApplied = "oplogEntriesApplied";
constexpr auto kInsertsApplied = "insertsApplied";
constexpr auto kUpdatesApplied = "updatesApplied";
constexpr auto kDeletesApplied = "deletesApplied";
-constexpr auto kCountWritesToStashCollections = "countWritesToStashCollections";
-constexpr auto kCountWritesDuringCriticalSection = "countWritesDuringCriticalSection";
-constexpr auto kCountReadsDuringCriticalSection = "countReadsDuringCriticalSection";
constexpr auto kOldestActive = "oldestActive";
-constexpr auto kCoordinatorAllShardsLowestRemainingOperationTimeEstimatedMillis =
- "coordinatorAllShardsLowestRemainingOperationTimeEstimatedMillis";
-constexpr auto kCoordinatorAllShardsHighestRemainingOperationTimeEstimatedMillis =
- "coordinatorAllShardsHighestRemainingOperationTimeEstimatedMillis";
-constexpr auto kRecipientRemainingOperationTimeEstimatedMillis =
- "recipientRemainingOperationTimeEstimatedMillis";
constexpr auto kLatencies = "latencies";
-constexpr auto kCollectionCloningTotalRemoteBatchRetrievalTimeMillis =
- "collectionCloningTotalRemoteBatchRetrievalTimeMillis";
-constexpr auto kCollectionCloningTotalRemoteBatchesRetrieved =
- "collectionCloningTotalRemoteBatchesRetrieved";
-constexpr auto kCollectionCloningTotalLocalInsertTimeMillis =
- "collectionCloningTotalLocalInsertTimeMillis";
-constexpr auto kCollectionCloningTotalLocalInserts = "collectionCloningTotalLocalInserts";
constexpr auto kOplogFetchingTotalRemoteBatchRetrievalTimeMillis =
"oplogFetchingTotalRemoteBatchRetrievalTimeMillis";
constexpr auto kOplogFetchingTotalRemoteBatchesRetrieved =
@@ -154,6 +131,7 @@ ShardingDataTransformCumulativeMetrics* ShardingDataTransformCumulativeMetrics::
ShardingDataTransformCumulativeMetrics::ShardingDataTransformCumulativeMetrics(
const std::string& rootSectionName)
: _rootSectionName{rootSectionName},
+ _fieldNames{std::make_unique<ShardingDataTransformCumulativeMetricsFieldNamePlaceholder>()},
_instanceMetricsForAllRoles(ShardingDataTransformMetrics::kRoleCount),
_operationWasAttempted{false},
_coordinatorStateList{AtomicWord<int64_t>{0},
@@ -226,11 +204,12 @@ void ShardingDataTransformCumulativeMetrics::reportForServerStatus(BSONObjBuilde
}
BSONObjBuilder root(bob->subobjStart(_rootSectionName));
- root.append(kCountStarted, _countStarted.load());
- root.append(kCountSucceeded, _countSucceeded.load());
- root.append(kCountFailed, _countFailed.load());
- root.append(kCountCanceled, _countCancelled.load());
- root.append(kLastOpEndingChunkImbalance, _lastOpEndingChunkImbalance.load());
+ root.append(_fieldNames->getForCountStarted(), _countStarted.load());
+ root.append(_fieldNames->getForCountSucceeded(), _countSucceeded.load());
+ root.append(_fieldNames->getForCountFailed(), _countFailed.load());
+ root.append(_fieldNames->getForCountCanceled(), _countCancelled.load());
+ root.append(_fieldNames->getForLastOpEndingChunkImbalance(),
+ _lastOpEndingChunkImbalance.load());
reportActive(&root);
reportOldestActive(&root);
@@ -240,36 +219,41 @@ void ShardingDataTransformCumulativeMetrics::reportForServerStatus(BSONObjBuilde
void ShardingDataTransformCumulativeMetrics::reportActive(BSONObjBuilder* bob) const {
BSONObjBuilder s(bob->subobjStart(kActive));
- s.append(kDocumentsCopied, _documentsCopied.load());
- s.append(kBytesCopied, _bytesCopied.load());
+ s.append(_fieldNames->getForDocumentsProcessed(), _documentsProcessed.load());
+ s.append(_fieldNames->getForBytesWritten(), _bytesWritten.load());
s.append(kOplogEntriesFetched, _oplogEntriesFetched.load());
s.append(kOplogEntriesApplied, _oplogEntriesApplied.load());
s.append(kInsertsApplied, _insertsApplied.load());
s.append(kUpdatesApplied, _updatesApplied.load());
s.append(kDeletesApplied, _deletesApplied.load());
- s.append(kCountWritesToStashCollections, _writesToStashedCollections.load());
- s.append(kCountWritesDuringCriticalSection, _writesDuringCriticalSection.load());
- s.append(kCountReadsDuringCriticalSection, _readsDuringCriticalSection.load());
+ s.append(_fieldNames->getForCountWritesToStashCollections(),
+ _writesToStashedCollections.load());
+ s.append(_fieldNames->getForCountWritesDuringCriticalSection(),
+ _writesDuringCriticalSection.load());
+ s.append(_fieldNames->getForCountReadsDuringCriticalSection(),
+ _readsDuringCriticalSection.load());
}
void ShardingDataTransformCumulativeMetrics::reportOldestActive(BSONObjBuilder* bob) const {
BSONObjBuilder s(bob->subobjStart(kOldestActive));
- s.append(kCoordinatorAllShardsHighestRemainingOperationTimeEstimatedMillis,
+ s.append(_fieldNames->getForCoordinatorAllShardsHighestRemainingOperationTimeEstimatedMillis(),
getOldestOperationHighEstimateRemainingTimeMillis(Role::kCoordinator));
- s.append(kCoordinatorAllShardsLowestRemainingOperationTimeEstimatedMillis,
+ s.append(_fieldNames->getForCoordinatorAllShardsLowestRemainingOperationTimeEstimatedMillis(),
getOldestOperationLowEstimateRemainingTimeMillis(Role::kCoordinator));
- s.append(kRecipientRemainingOperationTimeEstimatedMillis,
+ s.append(_fieldNames->getForRecipientRemainingOperationTimeEstimatedMillis(),
getOldestOperationHighEstimateRemainingTimeMillis(Role::kRecipient));
}
void ShardingDataTransformCumulativeMetrics::reportLatencies(BSONObjBuilder* bob) const {
BSONObjBuilder s(bob->subobjStart(kLatencies));
- s.append(kCollectionCloningTotalRemoteBatchRetrievalTimeMillis,
+ s.append(_fieldNames->getForCollectionCloningTotalRemoteBatchRetrievalTimeMillis(),
_totalBatchRetrievedDuringCloneMillis.load());
- s.append(kCollectionCloningTotalRemoteBatchesRetrieved, _totalBatchRetrievedDuringClone.load());
- s.append(kCollectionCloningTotalLocalInsertTimeMillis,
+ s.append(_fieldNames->getForCollectionCloningTotalRemoteBatchesRetrieved(),
+ _totalBatchRetrievedDuringClone.load());
+ s.append(_fieldNames->getForCollectionCloningTotalLocalInsertTimeMillis(),
_collectionCloningTotalLocalInsertTimeMillis.load());
- s.append(kCollectionCloningTotalLocalInserts, _collectionCloningTotalLocalBatchInserts.load());
+ s.append(_fieldNames->getForCollectionCloningTotalLocalInserts(),
+ _collectionCloningTotalLocalBatchInserts.load());
s.append(kOplogFetchingTotalRemoteBatchRetrievalTimeMillis,
_oplogFetchingTotalRemoteBatchesRetrievalTimeMillis.load());
s.append(kOplogFetchingTotalRemoteBatchesRetrieved,
@@ -422,8 +406,8 @@ const char* ShardingDataTransformCumulativeMetrics::fieldNameFor(
void ShardingDataTransformCumulativeMetrics::onInsertsDuringCloning(
int64_t count, int64_t bytes, const Milliseconds& elapsedTime) {
_collectionCloningTotalLocalBatchInserts.fetchAndAdd(1);
- _documentsCopied.fetchAndAdd(count);
- _bytesCopied.fetchAndAdd(bytes);
+ _documentsProcessed.fetchAndAdd(count);
+ _bytesWritten.fetchAndAdd(bytes);
_collectionCloningTotalLocalInsertTimeMillis.fetchAndAdd(
durationCount<Milliseconds>(elapsedTime));
}
diff --git a/src/mongo/db/s/sharding_data_transform_cumulative_metrics.h b/src/mongo/db/s/sharding_data_transform_cumulative_metrics.h
index 5e6949cf001..fa0b4e6c9fb 100644
--- a/src/mongo/db/s/sharding_data_transform_cumulative_metrics.h
+++ b/src/mongo/db/s/sharding_data_transform_cumulative_metrics.h
@@ -30,6 +30,7 @@
#pragma once
#include "mongo/bson/bsonobjbuilder.h"
+#include "mongo/db/s/sharding_data_transform_cumulative_metrics_field_name_provider.h"
#include "mongo/db/s/sharding_data_transform_metrics_observer_interface.h"
#include "mongo/db/service_context.h"
#include "mongo/platform/atomic_word.h"
@@ -79,6 +80,7 @@ public:
kNumStates
};
+ using NameProvider = ShardingDataTransformCumulativeMetricsFieldNameProvider;
using Role = ShardingDataTransformMetrics::Role;
using InstanceObserver = ShardingDataTransformMetricsObserverInterface;
using DeregistrationFunction = unique_function<void()>;
@@ -173,6 +175,7 @@ private:
mutable Mutex _mutex;
const std::string _rootSectionName;
+ std::unique_ptr<NameProvider> _fieldNames;
std::vector<MetricsSet> _instanceMetricsForAllRoles;
AtomicWord<bool> _operationWasAttempted;
@@ -191,8 +194,8 @@ private:
AtomicWord<int64_t> _totalBatchRetrievedDuringCloneMillis{0};
AtomicWord<int64_t> _oplogBatchApplied{0};
AtomicWord<int64_t> _oplogBatchAppliedMillis{0};
- AtomicWord<int64_t> _documentsCopied{0};
- AtomicWord<int64_t> _bytesCopied{0};
+ AtomicWord<int64_t> _documentsProcessed{0};
+ AtomicWord<int64_t> _bytesWritten{0};
AtomicWord<int64_t> _lastOpEndingChunkImbalance{0};
AtomicWord<int64_t> _readsDuringCriticalSection{0};
diff --git a/src/mongo/db/s/sharding_data_transform_cumulative_metrics_field_name_provider.cpp b/src/mongo/db/s/sharding_data_transform_cumulative_metrics_field_name_provider.cpp
new file mode 100644
index 00000000000..cf0d988fda7
--- /dev/null
+++ b/src/mongo/db/s/sharding_data_transform_cumulative_metrics_field_name_provider.cpp
@@ -0,0 +1,116 @@
+/**
+ * Copyright (C) 2022-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/db/s/sharding_data_transform_cumulative_metrics_field_name_provider.h"
+
+namespace mongo {
+
+namespace {
+using Provider = ShardingDataTransformCumulativeMetricsFieldNameProvider;
+using Placeholder = ShardingDataTransformCumulativeMetricsFieldNamePlaceholder;
+constexpr auto kCountStarted = "countStarted";
+constexpr auto kCountSucceeded = "countSucceeded";
+constexpr auto kCountFailed = "countFailed";
+constexpr auto kCountCanceled = "countCanceled";
+constexpr auto kLastOpEndingChunkImbalance = "lastOpEndingChunkImbalance";
+constexpr auto kDocumentsCopied = "documentsCopied";
+constexpr auto kBytesCopied = "bytesCopied";
+constexpr auto kCountWritesToStashCollections = "countWritesToStashCollections";
+constexpr auto kCountWritesDuringCriticalSection = "countWritesDuringCriticalSection";
+constexpr auto kCountReadsDuringCriticalSection = "countReadsDuringCriticalSection";
+constexpr auto kCoordinatorAllShardsLowestRemainingOperationTimeEstimatedMillis =
+ "coordinatorAllShardsLowestRemainingOperationTimeEstimatedMillis";
+constexpr auto kCoordinatorAllShardsHighestRemainingOperationTimeEstimatedMillis =
+ "coordinatorAllShardsHighestRemainingOperationTimeEstimatedMillis";
+constexpr auto kRecipientRemainingOperationTimeEstimatedMillis =
+ "recipientRemainingOperationTimeEstimatedMillis";
+constexpr auto kCollectionCloningTotalRemoteBatchRetrievalTimeMillis =
+ "collectionCloningTotalRemoteBatchRetrievalTimeMillis";
+constexpr auto kCollectionCloningTotalRemoteBatchesRetrieved =
+ "collectionCloningTotalRemoteBatchesRetrieved";
+constexpr auto kCollectionCloningTotalLocalInsertTimeMillis =
+ "collectionCloningTotalLocalInsertTimeMillis";
+constexpr auto kCollectionCloningTotalLocalInserts = "collectionCloningTotalLocalInserts";
+} // namespace
+
+StringData Provider::getForCountStarted() const {
+ return kCountStarted;
+}
+StringData Provider::getForCountSucceeded() const {
+ return kCountSucceeded;
+}
+StringData Provider::getForCountFailed() const {
+ return kCountFailed;
+}
+StringData Provider::getForCountCanceled() const {
+ return kCountCanceled;
+}
+StringData Provider::getForLastOpEndingChunkImbalance() const {
+ return kLastOpEndingChunkImbalance;
+}
+StringData Provider::getForCountWritesToStashCollections() const {
+ return kCountWritesToStashCollections;
+}
+StringData Provider::getForCountWritesDuringCriticalSection() const {
+ return kCountWritesDuringCriticalSection;
+}
+StringData Provider::getForCountReadsDuringCriticalSection() const {
+ return kCountReadsDuringCriticalSection;
+}
+StringData Provider::getForCoordinatorAllShardsLowestRemainingOperationTimeEstimatedMillis() const {
+ return kCoordinatorAllShardsLowestRemainingOperationTimeEstimatedMillis;
+}
+StringData Provider::getForCoordinatorAllShardsHighestRemainingOperationTimeEstimatedMillis()
+ const {
+ return kCoordinatorAllShardsHighestRemainingOperationTimeEstimatedMillis;
+}
+StringData Provider::getForRecipientRemainingOperationTimeEstimatedMillis() const {
+ return kRecipientRemainingOperationTimeEstimatedMillis;
+}
+StringData Provider::getForCollectionCloningTotalRemoteBatchRetrievalTimeMillis() const {
+ return kCollectionCloningTotalRemoteBatchRetrievalTimeMillis;
+}
+StringData Provider::getForCollectionCloningTotalRemoteBatchesRetrieved() const {
+ return kCollectionCloningTotalRemoteBatchesRetrieved;
+}
+StringData Provider::getForCollectionCloningTotalLocalInsertTimeMillis() const {
+ return kCollectionCloningTotalLocalInsertTimeMillis;
+}
+StringData Provider::getForCollectionCloningTotalLocalInserts() const {
+ return kCollectionCloningTotalLocalInserts;
+}
+
+StringData Placeholder::getForDocumentsProcessed() const {
+ return kDocumentsCopied;
+}
+StringData Placeholder::getForBytesWritten() const {
+ return kBytesCopied;
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/s/sharding_data_transform_cumulative_metrics_field_name_provider.h b/src/mongo/db/s/sharding_data_transform_cumulative_metrics_field_name_provider.h
new file mode 100644
index 00000000000..51cad5ed3fb
--- /dev/null
+++ b/src/mongo/db/s/sharding_data_transform_cumulative_metrics_field_name_provider.h
@@ -0,0 +1,66 @@
+/**
+ * Copyright (C) 2022-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#pragma once
+
+#include "mongo/base/string_data.h"
+
+namespace mongo {
+
+class ShardingDataTransformCumulativeMetricsFieldNameProvider {
+public:
+ virtual ~ShardingDataTransformCumulativeMetricsFieldNameProvider() = default;
+ StringData getForCountStarted() const;
+ StringData getForCountSucceeded() const;
+ StringData getForCountFailed() const;
+ StringData getForCountCanceled() const;
+ StringData getForLastOpEndingChunkImbalance() const;
+ virtual StringData getForDocumentsProcessed() const = 0;
+ virtual StringData getForBytesWritten() const = 0;
+ StringData getForCountWritesToStashCollections() const;
+ StringData getForCountWritesDuringCriticalSection() const;
+ StringData getForCountReadsDuringCriticalSection() const;
+ StringData getForCoordinatorAllShardsLowestRemainingOperationTimeEstimatedMillis() const;
+ StringData getForCoordinatorAllShardsHighestRemainingOperationTimeEstimatedMillis() const;
+ StringData getForRecipientRemainingOperationTimeEstimatedMillis() const;
+ StringData getForCollectionCloningTotalRemoteBatchRetrievalTimeMillis() const;
+ StringData getForCollectionCloningTotalRemoteBatchesRetrieved() const;
+ StringData getForCollectionCloningTotalLocalInsertTimeMillis() const;
+ StringData getForCollectionCloningTotalLocalInserts() const;
+};
+
+class ShardingDataTransformCumulativeMetricsFieldNamePlaceholder
+ : public ShardingDataTransformCumulativeMetricsFieldNameProvider {
+public:
+ virtual ~ShardingDataTransformCumulativeMetricsFieldNamePlaceholder() = default;
+ virtual StringData getForDocumentsProcessed() const override;
+ virtual StringData getForBytesWritten() const override;
+};
+
+} // namespace mongo