summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authornandinibhartiyaMDB <nandini.bhartiya@mongodb.com>2022-07-01 17:14:16 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-07-14 19:18:10 +0000
commita203b1f3cc705e90f8d517e0270a5139da395e68 (patch)
tree6f1f6b11639ca25e3026d36d199e4eedd5c29736
parent5dfda9e29d70115f1af249d540b414ad8227834a (diff)
downloadmongo-a203b1f3cc705e90f8d517e0270a5139da395e68.tar.gz
SERVER-67088: Create the FieldNameProvider classes
Create the base class ShardingDataTransformInstanceMetricsFieldNameProvider & the derived GlobalIndexMetricsFieldNameProvider, ReshardingMetricsFieldNameProvider classes
-rw-r--r--src/mongo/db/s/SConscript3
-rw-r--r--src/mongo/db/s/global_index_metrics.cpp16
-rw-r--r--src/mongo/db/s/global_index_metrics.h1
-rw-r--r--src/mongo/db/s/global_index_metrics_field_name_provider.cpp55
-rw-r--r--src/mongo/db/s/global_index_metrics_field_name_provider.h47
-rw-r--r--src/mongo/db/s/global_index_metrics_test.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_collection_cloner.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_collection_cloner_test.cpp8
-rw-r--r--src/mongo/db/s/resharding/resharding_metrics.cpp33
-rw-r--r--src/mongo/db/s/resharding/resharding_metrics.h27
-rw-r--r--src/mongo/db/s/resharding/resharding_metrics_field_name_provider.cpp87
-rw-r--r--src/mongo/db/s/resharding/resharding_metrics_field_name_provider.h53
-rw-r--r--src/mongo/db/s/resharding/resharding_metrics_test.cpp13
-rw-r--r--src/mongo/db/s/resharding/resharding_recipient_service.cpp10
-rw-r--r--src/mongo/db/s/sharding_data_transform_instance_metrics.cpp103
-rw-r--r--src/mongo/db/s/sharding_data_transform_instance_metrics.h55
-rw-r--r--src/mongo/db/s/sharding_data_transform_instance_metrics_field_name_provider.cpp130
-rw-r--r--src/mongo/db/s/sharding_data_transform_instance_metrics_field_name_provider.h64
-rw-r--r--src/mongo/db/s/sharding_data_transform_instance_metrics_test.cpp74
19 files changed, 618 insertions, 165 deletions
diff --git a/src/mongo/db/s/SConscript b/src/mongo/db/s/SConscript
index 7ecb530d088..ab12d8e1cb3 100644
--- a/src/mongo/db/s/SConscript
+++ b/src/mongo/db/s/SConscript
@@ -101,6 +101,8 @@ env.Library(
'resharding/resharding_manual_cleanup.cpp',
'resharding/resharding_metrics_helpers.cpp',
'resharding/resharding_metrics.cpp',
+ 'resharding/resharding_metrics_field_name_provider.cpp',
+ 'global_index_metrics_field_name_provider.cpp',
'resharding/resharding_op_observer.cpp',
'resharding/resharding_oplog_applier.cpp',
'resharding/resharding_oplog_applier_metrics.cpp',
@@ -132,6 +134,7 @@ env.Library(
'sharding_data_transform_cumulative_metrics.cpp',
'sharding_data_transform_instance_metrics.cpp',
'sharding_data_transform_metrics_observer.cpp',
+ 'sharding_data_transform_instance_metrics_field_name_provider.cpp',
'sharding_initialization_mongod.cpp',
'sharding_runtime_d_params.idl',
'sharding_state_recovery.cpp',
diff --git a/src/mongo/db/s/global_index_metrics.cpp b/src/mongo/db/s/global_index_metrics.cpp
index 29f2fa17dce..2d7d94af858 100644
--- a/src/mongo/db/s/global_index_metrics.cpp
+++ b/src/mongo/db/s/global_index_metrics.cpp
@@ -54,13 +54,15 @@ GlobalIndexMetrics::GlobalIndexMetrics(UUID instanceId,
Date_t startTime,
ClockSource* clockSource,
ShardingDataTransformCumulativeMetrics* cumulativeMetrics)
- : ShardingDataTransformInstanceMetrics{std::move(instanceId),
- std::move(originatingCommand),
- std::move(nss),
- role,
- startTime,
- clockSource,
- cumulativeMetrics} {}
+ : ShardingDataTransformInstanceMetrics{
+ std::move(instanceId),
+ std::move(originatingCommand),
+ std::move(nss),
+ role,
+ startTime,
+ clockSource,
+ cumulativeMetrics,
+ std::make_unique<GlobalIndexMetricsFieldNameProvider>()} {}
std::string GlobalIndexMetrics::createOperationDescription() const noexcept {
return fmt::format("GlobalIndexMetrics{}Service {}",
diff --git a/src/mongo/db/s/global_index_metrics.h b/src/mongo/db/s/global_index_metrics.h
index 04c54686785..9dafa72cdb8 100644
--- a/src/mongo/db/s/global_index_metrics.h
+++ b/src/mongo/db/s/global_index_metrics.h
@@ -31,6 +31,7 @@
#include "mongo/bson/bsonobj.h"
#include "mongo/db/namespace_string.h"
+#include "mongo/db/s/global_index_metrics_field_name_provider.h"
#include "mongo/db/s/sharding_data_transform_instance_metrics.h"
#include "mongo/util/uuid.h"
diff --git a/src/mongo/db/s/global_index_metrics_field_name_provider.cpp b/src/mongo/db/s/global_index_metrics_field_name_provider.cpp
new file mode 100644
index 00000000000..609ab054ea2
--- /dev/null
+++ b/src/mongo/db/s/global_index_metrics_field_name_provider.cpp
@@ -0,0 +1,55 @@
+/**
+ * Copyright (C) 2022-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/db/s/global_index_metrics_field_name_provider.h"
+
+namespace mongo {
+namespace {
+constexpr auto kBytesWritten = "bytesWritten";
+constexpr auto kKeysWrittenFromScan = "keysWrittenFromScan ";
+constexpr auto kApproxBytesToScan = "approxBytesToScan";
+constexpr auto kApproxDocumentsToScan = "approxDocumentsToScan";
+} // namespace
+
+StringData GlobalIndexMetricsFieldNameProvider::getForBytesWritten() const {
+ return kBytesWritten;
+}
+
+StringData GlobalIndexMetricsFieldNameProvider::getForDocumentsProcessed() const {
+ return kKeysWrittenFromScan;
+}
+
+StringData GlobalIndexMetricsFieldNameProvider::getForApproxDocumentsToProcess() const {
+ return kApproxDocumentsToScan;
+}
+
+StringData GlobalIndexMetricsFieldNameProvider::getForApproxBytesToScan() const {
+ return kApproxBytesToScan;
+}
+} // namespace mongo
diff --git a/src/mongo/db/s/global_index_metrics_field_name_provider.h b/src/mongo/db/s/global_index_metrics_field_name_provider.h
new file mode 100644
index 00000000000..bab3f6808e2
--- /dev/null
+++ b/src/mongo/db/s/global_index_metrics_field_name_provider.h
@@ -0,0 +1,47 @@
+/**
+ * Copyright (C) 2022-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#pragma once
+
+#include "mongo/db/namespace_string.h"
+#include "mongo/db/s/sharding_data_transform_instance_metrics_field_name_provider.h"
+#include "mongo/util/duration.h"
+
+namespace mongo {
+
+class GlobalIndexMetricsFieldNameProvider
+ : public ShardingDataTransformInstanceMetricsFieldNameProvider {
+public:
+ StringData getForDocumentsProcessed() const override;
+ StringData getForBytesWritten() const override;
+ StringData getForApproxDocumentsToProcess() const override;
+ StringData getForApproxBytesToScan() const override;
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/s/global_index_metrics_test.cpp b/src/mongo/db/s/global_index_metrics_test.cpp
index fcf7471576a..7d2dcb36f41 100644
--- a/src/mongo/db/s/global_index_metrics_test.cpp
+++ b/src/mongo/db/s/global_index_metrics_test.cpp
@@ -58,7 +58,6 @@ public:
}
};
-
TEST_F(GlobalIndexMetricsTest, ReportForCurrentOpShouldHaveGlobalIndexDescription) {
std::vector<Role> roles{Role::kCoordinator, Role::kDonor, Role::kRecipient};
@@ -73,6 +72,5 @@ TEST_F(GlobalIndexMetricsTest, ReportForCurrentOpShouldHaveGlobalIndexDescriptio
instanceId.toString()));
});
}
-
} // namespace
} // namespace mongo
diff --git a/src/mongo/db/s/resharding/resharding_collection_cloner.cpp b/src/mongo/db/s/resharding/resharding_collection_cloner.cpp
index 8bd04ebfe37..fffac5d1736 100644
--- a/src/mongo/db/s/resharding/resharding_collection_cloner.cpp
+++ b/src/mongo/db/s/resharding/resharding_collection_cloner.cpp
@@ -290,7 +290,7 @@ bool ReshardingCollectionCloner::doOneBatch(OperationContext* opCtx, Pipeline& p
int bytesInserted = resharding::data_copy::withOneStaleConfigRetry(
opCtx, [&] { return resharding::data_copy::insertBatch(opCtx, _outputNss, batch); });
- _metrics->onDocumentsCopied(
+ _metrics->onDocumentsProcessed(
batch.size(), bytesInserted, Milliseconds(batchInsertTimer.millis()));
return true;
diff --git a/src/mongo/db/s/resharding/resharding_collection_cloner_test.cpp b/src/mongo/db/s/resharding/resharding_collection_cloner_test.cpp
index 45679367673..833444ca8cd 100644
--- a/src/mongo/db/s/resharding/resharding_collection_cloner_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_collection_cloner_test.cpp
@@ -174,13 +174,13 @@ protected:
auto opCtx = operationContext();
AutoGetCollection tempColl{opCtx, tempNss, MODE_IS};
while (_cloner->doOneBatch(operationContext(), *_pipeline)) {
- ASSERT_EQ(tempColl->numRecords(opCtx), _metrics->getDocumentsCopiedCount());
- ASSERT_EQ(tempColl->dataSize(opCtx), _metrics->getBytesCopiedCount());
+ ASSERT_EQ(tempColl->numRecords(opCtx), _metrics->getDocumentsProcessedCount());
+ ASSERT_EQ(tempColl->dataSize(opCtx), _metrics->getBytesWrittenCount());
}
ASSERT_EQ(tempColl->numRecords(operationContext()), expectedDocumentsCount);
- ASSERT_EQ(_metrics->getDocumentsCopiedCount(), expectedDocumentsCount);
+ ASSERT_EQ(_metrics->getDocumentsProcessedCount(), expectedDocumentsCount);
ASSERT_GT(tempColl->dataSize(opCtx), 0);
- ASSERT_EQ(tempColl->dataSize(opCtx), _metrics->getBytesCopiedCount());
+ ASSERT_EQ(tempColl->dataSize(opCtx), _metrics->getBytesWrittenCount());
verifyFunction(tempColl->getCursor(opCtx));
}
diff --git a/src/mongo/db/s/resharding/resharding_metrics.cpp b/src/mongo/db/s/resharding/resharding_metrics.cpp
index 3a86497bfd1..f8fe029dcac 100644
--- a/src/mongo/db/s/resharding/resharding_metrics.cpp
+++ b/src/mongo/db/s/resharding/resharding_metrics.cpp
@@ -83,7 +83,8 @@ ReshardingMetrics::ReshardingMetrics(UUID instanceId,
role,
startTime,
clockSource,
- cumulativeMetrics},
+ cumulativeMetrics,
+ std::make_unique<ReshardingMetricsFieldNameProvider>()},
_state{getDefaultState(role)},
_deletesApplied{0},
_insertsApplied{0},
@@ -91,7 +92,8 @@ ReshardingMetrics::ReshardingMetrics(UUID instanceId,
_oplogEntriesApplied{0},
_oplogEntriesFetched{0},
_applyingStartTime{kNoDate},
- _applyingEndTime{kNoDate} {}
+ _applyingEndTime{kNoDate},
+ _reshardingFieldNames{static_cast<ReshardingMetricsFieldNameProvider*>(_fieldNames.get())} {}
ReshardingMetrics::ReshardingMetrics(const CommonReshardingMetadata& metadata,
Role role,
@@ -113,8 +115,8 @@ std::string ReshardingMetrics::createOperationDescription() const noexcept {
Milliseconds ReshardingMetrics::getRecipientHighEstimateRemainingTimeMillis() const {
auto estimate = resharding::estimateRemainingRecipientTime(_applyingStartTime.load() != kNoDate,
- getBytesCopiedCount(),
- getApproxBytesToCopyCount(),
+ getBytesWrittenCount(),
+ getApproxBytesToScanCount(),
getCopyingElapsedTimeSecs(),
_oplogEntriesApplied.load(),
_oplogEntriesFetched.load(),
@@ -153,20 +155,23 @@ StringData ReshardingMetrics::getStateString() const noexcept {
BSONObj ReshardingMetrics::reportForCurrentOp() const noexcept {
BSONObjBuilder builder;
- builder.append(kDescription, createOperationDescription());
switch (_role) {
case Role::kCoordinator:
- builder.append(kApplyTimeElapsed, getApplyingElapsedTimeSecs().count());
+ builder.append(_reshardingFieldNames->getForApplyTimeElapsed(),
+ getApplyingElapsedTimeSecs().count());
break;
case Role::kDonor:
break;
case Role::kRecipient:
- builder.append(kApplyTimeElapsed, getApplyingElapsedTimeSecs().count());
- builder.append(kInsertsApplied, _insertsApplied.load());
- builder.append(kUpdatesApplied, _updatesApplied.load());
- builder.append(kDeletesApplied, _deletesApplied.load());
- builder.append(kOplogEntriesApplied, _oplogEntriesApplied.load());
- builder.append(kOplogEntriesFetched, _oplogEntriesFetched.load());
+ builder.append(_reshardingFieldNames->getForApplyTimeElapsed(),
+ getApplyingElapsedTimeSecs().count());
+ builder.append(_reshardingFieldNames->getForInsertsApplied(), _insertsApplied.load());
+ builder.append(_reshardingFieldNames->getForUpdatesApplied(), _updatesApplied.load());
+ builder.append(_reshardingFieldNames->getForDeletesApplied(), _deletesApplied.load());
+ builder.append(_reshardingFieldNames->getForOplogEntriesApplied(),
+ _oplogEntriesApplied.load());
+ builder.append(_reshardingFieldNames->getForOplogEntriesFetched(),
+ _oplogEntriesFetched.load());
break;
default:
MONGO_UNREACHABLE;
@@ -193,12 +198,12 @@ void ReshardingMetrics::restoreRecipientSpecificFields(
auto docsToCopy = metrics->getApproxDocumentsToCopy();
auto bytesToCopy = metrics->getApproxBytesToCopy();
if (docsToCopy && bytesToCopy) {
- setDocumentsToCopyCounts(*docsToCopy, *bytesToCopy);
+ setDocumentsToProcessCounts(*docsToCopy, *bytesToCopy);
}
auto docsCopied = metrics->getFinalDocumentsCopiedCount();
auto bytesCopied = metrics->getFinalBytesCopiedCount();
if (docsCopied && bytesCopied) {
- restoreDocumentsCopied(*docsCopied, *bytesCopied);
+ restoreDocumentsProcessed(*docsCopied, *bytesCopied);
}
restorePhaseDurationFields(document);
}
diff --git a/src/mongo/db/s/resharding/resharding_metrics.h b/src/mongo/db/s/resharding/resharding_metrics.h
index 263dc6648a3..c49606676a3 100644
--- a/src/mongo/db/s/resharding/resharding_metrics.h
+++ b/src/mongo/db/s/resharding/resharding_metrics.h
@@ -31,6 +31,7 @@
#include "mongo/bson/bsonobj.h"
#include "mongo/db/namespace_string.h"
+#include "mongo/db/s/resharding/resharding_metrics_field_name_provider.h"
#include "mongo/db/s/resharding/resharding_metrics_helpers.h"
#include "mongo/db/s/resharding/resharding_oplog_applier_progress_gen.h"
#include "mongo/db/s/sharding_data_transform_instance_metrics.h"
@@ -41,7 +42,6 @@ namespace mongo {
class ReshardingMetrics : public ShardingDataTransformInstanceMetrics {
public:
using State = stdx::variant<CoordinatorStateEnum, RecipientStateEnum, DonorStateEnum>;
-
class DonorState {
public:
using MetricsType = ShardingDataTransformCumulativeMetrics::DonorStateEnum;
@@ -77,7 +77,10 @@ public:
private:
CoordinatorStateEnum _enumVal;
};
-
+ ReshardingMetrics(const CommonReshardingMetadata& metadata,
+ Role role,
+ ClockSource* clockSource,
+ ShardingDataTransformCumulativeMetrics* cumulativeMetrics);
ReshardingMetrics(UUID instanceId,
BSONObj shardKey,
NamespaceString nss,
@@ -85,11 +88,6 @@ public:
Date_t startTime,
ClockSource* clockSource,
ShardingDataTransformCumulativeMetrics* cumulativeMetrics);
- ReshardingMetrics(const CommonReshardingMetadata& metadata,
- Role role,
- ClockSource* clockSource,
- ShardingDataTransformCumulativeMetrics* cumulativeMetrics);
-
static std::unique_ptr<ReshardingMetrics> makeInstance(UUID instanceId,
BSONObj shardKey,
NamespaceString nss,
@@ -166,19 +164,6 @@ public:
protected:
virtual StringData getStateString() const noexcept override;
-
- static constexpr auto kInsertsApplied = "insertsApplied";
- static constexpr auto kUpdatesApplied = "updatesApplied";
- static constexpr auto kDeletesApplied = "deletesApplied";
- static constexpr auto kOplogEntriesApplied = "oplogEntriesApplied";
- static constexpr auto kOplogEntriesFetched = "oplogEntriesFetched";
- static constexpr auto kApplyTimeElapsed = "totalApplyTimeElapsedSecs";
- static constexpr auto kAllShardsLowestRemainingOperationTimeEstimatedSecs =
- "allShardsLowestRemainingOperationTimeEstimatedSecs";
- static constexpr auto kAllShardsHighestRemainingOperationTimeEstimatedSecs =
- "allShardsHighestRemainingOperationTimeEstimatedSecs";
- static constexpr auto kRemainingOpTimeEstimated = "remainingOperationTimeEstimatedSecs";
-
void restoreApplyingBegin(Date_t date);
void restoreApplyingEnd(Date_t date);
@@ -238,6 +223,8 @@ private:
AtomicWord<int64_t> _oplogEntriesFetched;
AtomicWord<Date_t> _applyingStartTime;
AtomicWord<Date_t> _applyingEndTime;
+
+ ReshardingMetricsFieldNameProvider* _reshardingFieldNames;
};
} // namespace mongo
diff --git a/src/mongo/db/s/resharding/resharding_metrics_field_name_provider.cpp b/src/mongo/db/s/resharding/resharding_metrics_field_name_provider.cpp
new file mode 100644
index 00000000000..1e2615fba62
--- /dev/null
+++ b/src/mongo/db/s/resharding/resharding_metrics_field_name_provider.cpp
@@ -0,0 +1,87 @@
+/**
+ * Copyright (C) 2022-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/db/s/resharding/resharding_metrics_field_name_provider.h"
+
+namespace mongo {
+namespace {
+
+constexpr auto kBytesCopied = "bytesCopied";
+constexpr auto kDocumentsCopied = "documentsCopied";
+constexpr auto kInsertsApplied = "insertsApplied";
+constexpr auto kUpdatesApplied = "updatesApplied";
+constexpr auto kDeletesApplied = "deletesApplied";
+constexpr auto kOplogEntriesApplied = "oplogEntriesApplied";
+constexpr auto kOplogEntriesFetched = "oplogEntriesFetched";
+constexpr auto kApplyTimeElapsed = "totalApplyTimeElapsedSecs";
+constexpr auto kApproxDocumentsToCopy = "approxDocumentsToCopy";
+constexpr auto kApproxBytesToCopy = "approxBytesToCopy";
+
+} // namespace
+
+StringData ReshardingMetricsFieldNameProvider::getForBytesWritten() const {
+ return kBytesCopied;
+}
+
+StringData ReshardingMetricsFieldNameProvider::getForDocumentsProcessed() const {
+ return kDocumentsCopied;
+}
+
+StringData ReshardingMetricsFieldNameProvider::getForApproxDocumentsToProcess() const {
+ return kApproxDocumentsToCopy;
+}
+
+StringData ReshardingMetricsFieldNameProvider::getForInsertsApplied() const {
+ return kInsertsApplied;
+}
+
+StringData ReshardingMetricsFieldNameProvider::getForUpdatesApplied() const {
+ return kUpdatesApplied;
+}
+
+StringData ReshardingMetricsFieldNameProvider::getForDeletesApplied() const {
+ return kDeletesApplied;
+}
+
+StringData ReshardingMetricsFieldNameProvider::getForOplogEntriesApplied() const {
+ return kOplogEntriesApplied;
+}
+
+StringData ReshardingMetricsFieldNameProvider::getForOplogEntriesFetched() const {
+ return kOplogEntriesFetched;
+}
+
+StringData ReshardingMetricsFieldNameProvider::getForApplyTimeElapsed() const {
+ return kApplyTimeElapsed;
+}
+
+StringData ReshardingMetricsFieldNameProvider::getForApproxBytesToScan() const {
+ return kApproxBytesToCopy;
+}
+} // namespace mongo
diff --git a/src/mongo/db/s/resharding/resharding_metrics_field_name_provider.h b/src/mongo/db/s/resharding/resharding_metrics_field_name_provider.h
new file mode 100644
index 00000000000..8203b78845f
--- /dev/null
+++ b/src/mongo/db/s/resharding/resharding_metrics_field_name_provider.h
@@ -0,0 +1,53 @@
+/**
+ * Copyright (C) 2022-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#pragma once
+
+#include "mongo/db/namespace_string.h"
+#include "mongo/db/s/sharding_data_transform_instance_metrics_field_name_provider.h"
+#include "mongo/util/duration.h"
+
+namespace mongo {
+
+class ReshardingMetricsFieldNameProvider
+ : public ShardingDataTransformInstanceMetricsFieldNameProvider {
+public:
+ StringData getForDocumentsProcessed() const override;
+ StringData getForBytesWritten() const override;
+ StringData getForApproxDocumentsToProcess() const override;
+ StringData getForApproxBytesToScan() const override;
+ StringData getForInsertsApplied() const;
+ StringData getForUpdatesApplied() const;
+ StringData getForDeletesApplied() const;
+ StringData getForOplogEntriesApplied() const;
+ StringData getForOplogEntriesFetched() const;
+ StringData getForApplyTimeElapsed() const;
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/s/resharding/resharding_metrics_test.cpp b/src/mongo/db/s/resharding/resharding_metrics_test.cpp
index 1f68be0a54d..1e20c778071 100644
--- a/src/mongo/db/s/resharding/resharding_metrics_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_metrics_test.cpp
@@ -181,8 +181,7 @@ public:
}
};
-
-TEST_F(ReshardingMetricsTest, ReportForCurrentOpShouldHaveGlobalIndexDescription) {
+TEST_F(ReshardingMetricsTest, ReportForCurrentOpShouldHaveReshardingMetricsDescription) {
std::vector<Role> roles{Role::kCoordinator, Role::kDonor, Role::kRecipient};
std::for_each(roles.begin(), roles.end(), [&](Role role) {
@@ -233,7 +232,7 @@ TEST_F(ReshardingMetricsTest, RestoresByteAndDocumentCountsDuringCloning) {
constexpr auto kBytesCopied = 500;
auto metrics = createInstanceMetrics(getClockSource(), UUID::gen(), Role::kRecipient);
- metrics->restoreDocumentsCopied(kDocsCopied, kBytesCopied);
+ metrics->restoreDocumentsProcessed(kDocsCopied, kBytesCopied);
auto report = metrics->reportForCurrentOp();
ASSERT_EQ(report.getIntField("documentsCopied"), kDocsCopied);
@@ -416,7 +415,7 @@ TEST_F(ReshardingMetricsTest, RecipientReportsRemainingTime) {
constexpr auto kOpsPerIncrement = 25;
const auto kIncrementSecs = durationCount<Seconds>(kIncrement);
const auto kExpectedTotal = kIncrementSecs * 8;
- metrics->setDocumentsToCopyCounts(0, kOpsPerIncrement * 4);
+ metrics->setDocumentsToProcessCounts(0, kOpsPerIncrement * 4);
metrics->onOplogEntriesFetched(kOpsPerIncrement * 4, Milliseconds(1));
// Before cloning.
@@ -425,20 +424,20 @@ TEST_F(ReshardingMetricsTest, RecipientReportsRemainingTime) {
// During cloning.
metrics->onCopyingBegin();
- metrics->onDocumentsCopied(0, kOpsPerIncrement, Milliseconds(1));
+ metrics->onDocumentsProcessed(0, kOpsPerIncrement, Milliseconds(1));
clock->advance(kIncrement);
report = metrics->reportForCurrentOp();
ASSERT_EQ(report.getIntField("remainingOperationTimeEstimatedSecs"),
kExpectedTotal - kIncrementSecs);
- metrics->onDocumentsCopied(0, kOpsPerIncrement * 2, Milliseconds(1));
+ metrics->onDocumentsProcessed(0, kOpsPerIncrement * 2, Milliseconds(1));
clock->advance(kIncrement * 2);
report = metrics->reportForCurrentOp();
ASSERT_EQ(report.getIntField("remainingOperationTimeEstimatedSecs"),
kExpectedTotal - (kIncrementSecs * 3));
// During applying.
- metrics->onDocumentsCopied(0, kOpsPerIncrement, Milliseconds(1));
+ metrics->onDocumentsProcessed(0, kOpsPerIncrement, Milliseconds(1));
clock->advance(kIncrement);
metrics->onCopyingEnd();
metrics->onApplyingBegin();
diff --git a/src/mongo/db/s/resharding/resharding_recipient_service.cpp b/src/mongo/db/s/resharding/resharding_recipient_service.cpp
index 5b66d19e8bd..b0f40e06551 100644
--- a/src/mongo/db/s/resharding/resharding_recipient_service.cpp
+++ b/src/mongo/db/s/resharding/resharding_recipient_service.cpp
@@ -132,9 +132,9 @@ void buildStateDocumentApplyMetricsForUpdate(BSONObjBuilder& bob, ReshardingMetr
getIntervalStartFieldName<DocT>(ReshardingRecipientMetrics::kOplogApplicationFieldName),
metrics->getApplyingBegin());
bob.append(metricsPrefix + ReshardingRecipientMetrics::kFinalDocumentsCopiedCountFieldName,
- metrics->getDocumentsCopiedCount());
+ metrics->getDocumentsProcessedCount());
bob.append(metricsPrefix + ReshardingRecipientMetrics::kFinalBytesCopiedCountFieldName,
- metrics->getBytesCopiedCount());
+ metrics->getBytesWrittenCount());
}
void buildStateDocumentStrictConsistencyMetricsForUpdate(BSONObjBuilder& bob,
@@ -557,8 +557,8 @@ ExecutorFuture<void> ReshardingRecipientService::RecipientStateMachine::
ReshardingRecipientService::RecipientStateMachine::CloneDetails cloneDetails) {
_transitionToCreatingCollection(
cloneDetails, (*executor)->now() + _minimumOperationDuration, factory);
- _metrics->setDocumentsToCopyCounts(cloneDetails.approxDocumentsToCopy,
- cloneDetails.approxBytesToCopy);
+ _metrics->setDocumentsToProcessCounts(cloneDetails.approxDocumentsToCopy,
+ cloneDetails.approxBytesToCopy);
});
}
@@ -1162,7 +1162,7 @@ void ReshardingRecipientService::RecipientStateMachine::_restoreMetrics(
// metrics section of the recipient state document and restored during metrics
// initialization. This is so that applied oplog entries that add or remove documents do
// not affect the cloning metrics.
- _metrics->restoreDocumentsCopied(documentCountCopied, documentBytesCopied);
+ _metrics->restoreDocumentsProcessed(documentCountCopied, documentBytesCopied);
}
}
diff --git a/src/mongo/db/s/sharding_data_transform_instance_metrics.cpp b/src/mongo/db/s/sharding_data_transform_instance_metrics.cpp
index 2631323fa6c..70dce2dc0ad 100644
--- a/src/mongo/db/s/sharding_data_transform_instance_metrics.cpp
+++ b/src/mongo/db/s/sharding_data_transform_instance_metrics.cpp
@@ -40,7 +40,8 @@ ShardingDataTransformInstanceMetrics::ShardingDataTransformInstanceMetrics(
Role role,
Date_t startTime,
ClockSource* clockSource,
- ShardingDataTransformCumulativeMetrics* cumulativeMetrics)
+ ShardingDataTransformCumulativeMetrics* cumulativeMetrics,
+ FieldNameProviderPtr fieldNames)
: ShardingDataTransformInstanceMetrics{
std::move(instanceId),
std::move(originalCommand),
@@ -49,6 +50,7 @@ ShardingDataTransformInstanceMetrics::ShardingDataTransformInstanceMetrics(
startTime,
clockSource,
cumulativeMetrics,
+ std::move(fieldNames),
std::make_unique<ShardingDataTransformMetricsObserver>(this)} {}
ShardingDataTransformInstanceMetrics::ShardingDataTransformInstanceMetrics(
@@ -59,11 +61,13 @@ ShardingDataTransformInstanceMetrics::ShardingDataTransformInstanceMetrics(
Date_t startTime,
ClockSource* clockSource,
ShardingDataTransformCumulativeMetrics* cumulativeMetrics,
+ FieldNameProviderPtr fieldNames,
ObserverPtr observer)
: _instanceId{std::move(instanceId)},
_originalCommand{std::move(originalCommand)},
_sourceNs{std::move(sourceNs)},
_role{role},
+ _fieldNames{std::move(fieldNames)},
_startTime{startTime},
_clockSource{clockSource},
_observer{std::move(observer)},
@@ -71,10 +75,10 @@ ShardingDataTransformInstanceMetrics::ShardingDataTransformInstanceMetrics(
_deregister{_cumulativeMetrics->registerInstanceMetrics(_observer.get())},
_copyingStartTime{kNoDate},
_copyingEndTime{kNoDate},
- _approxDocumentsToCopy{0},
- _documentsCopied{0},
- _approxBytesToCopy{0},
- _bytesCopied{0},
+ _approxDocumentsToProcess{0},
+ _documentsProcessed{0},
+ _approxBytesToScan{0},
+ _bytesWritten{0},
_coordinatorHighEstimateRemainingTimeMillis{Milliseconds{0}},
_coordinatorLowEstimateRemainingTimeMillis{Milliseconds{0}},
_criticalSectionStartTime{kNoDate},
@@ -136,41 +140,46 @@ StringData ShardingDataTransformInstanceMetrics::getStateString() const noexcept
BSONObj ShardingDataTransformInstanceMetrics::reportForCurrentOp() const noexcept {
BSONObjBuilder builder;
- builder.append(kType, "op");
- builder.append(kDescription, createOperationDescription());
- builder.append(kOp, "command");
- builder.append(kNamespace, _sourceNs.toString());
- builder.append(kOriginatingCommand, _originalCommand);
- builder.append(kOpTimeElapsed, getOperationRunningTimeSecs().count());
-
+ builder.append(_fieldNames->getForType(), "op");
+ builder.append(_fieldNames->getForDescription(), createOperationDescription());
+ builder.append(_fieldNames->getForOp(), "command");
+ builder.append(_fieldNames->getForNamespace(), _sourceNs.toString());
+ builder.append(_fieldNames->getForOriginatingCommand(), _originalCommand);
+ builder.append(_fieldNames->getForOpTimeElapsed(), getOperationRunningTimeSecs().count());
switch (_role) {
case Role::kCoordinator:
- builder.append(kAllShardsHighestRemainingOperationTimeEstimatedSecs,
+ builder.append(_fieldNames->getForAllShardsHighestRemainingOperationTimeEstimatedSecs(),
durationCount<Seconds>(getHighEstimateRemainingTimeMillis()));
- builder.append(kAllShardsLowestRemainingOperationTimeEstimatedSecs,
+ builder.append(_fieldNames->getForAllShardsLowestRemainingOperationTimeEstimatedSecs(),
durationCount<Seconds>(getLowEstimateRemainingTimeMillis()));
- builder.append(kCoordinatorState, getStateString());
- builder.append(kCopyTimeElapsed, getCopyingElapsedTimeSecs().count());
- builder.append(kCriticalSectionTimeElapsed,
+ builder.append(_fieldNames->getForCoordinatorState(), getStateString());
+ builder.append(_fieldNames->getForCopyTimeElapsed(),
+ getCopyingElapsedTimeSecs().count());
+ builder.append(_fieldNames->getForCriticalSectionTimeElapsed(),
getCriticalSectionElapsedTimeSecs().count());
break;
case Role::kDonor:
- builder.append(kDonorState, getStateString());
- builder.append(kCriticalSectionTimeElapsed,
+ builder.append(_fieldNames->getForDonorState(), getStateString());
+ builder.append(_fieldNames->getForCriticalSectionTimeElapsed(),
getCriticalSectionElapsedTimeSecs().count());
- builder.append(kCountWritesDuringCriticalSection, _writesDuringCriticalSection.load());
- builder.append(kCountReadsDuringCriticalSection, _readsDuringCriticalSection.load());
+ builder.append(_fieldNames->getForCountWritesDuringCriticalSection(),
+ _writesDuringCriticalSection.load());
+ builder.append(_fieldNames->getForCountReadsDuringCriticalSection(),
+ _readsDuringCriticalSection.load());
break;
case Role::kRecipient:
- builder.append(kRecipientState, getStateString());
- builder.append(kCopyTimeElapsed, getCopyingElapsedTimeSecs().count());
- builder.append(kRemainingOpTimeEstimated,
+ builder.append(_fieldNames->getForRecipientState(), getStateString());
+ builder.append(_fieldNames->getForCopyTimeElapsed(),
+ getCopyingElapsedTimeSecs().count());
+ builder.append(_fieldNames->getForRemainingOpTimeEstimated(),
durationCount<Seconds>(getHighEstimateRemainingTimeMillis()));
- builder.append(kApproxDocumentsToCopy, _approxDocumentsToCopy.load());
- builder.append(kApproxBytesToCopy, _approxBytesToCopy.load());
- builder.append(kBytesCopied, _bytesCopied.load());
- builder.append(kCountWritesToStashCollections, _writesToStashCollections.load());
- builder.append(kDocumentsCopied, _documentsCopied.load());
+ builder.append(_fieldNames->getForApproxDocumentsToProcess(),
+ _approxDocumentsToProcess.load());
+ builder.append(_fieldNames->getForApproxBytesToScan(), _approxBytesToScan.load());
+ builder.append(_fieldNames->getForBytesWritten(), _bytesWritten.load());
+ builder.append(_fieldNames->getForCountWritesToStashCollections(),
+ _writesToStashCollections.load());
+ builder.append(_fieldNames->getForDocumentsProcessed(), _documentsProcessed.load());
break;
default:
MONGO_UNREACHABLE;
@@ -203,36 +212,36 @@ Date_t ShardingDataTransformInstanceMetrics::getCopyingEnd() const {
return _copyingEndTime.load();
}
-void ShardingDataTransformInstanceMetrics::onDocumentsCopied(int64_t documentCount,
- int64_t totalDocumentsSizeBytes,
- Milliseconds elapsed) {
- _documentsCopied.addAndFetch(documentCount);
- _bytesCopied.addAndFetch(totalDocumentsSizeBytes);
+void ShardingDataTransformInstanceMetrics::onDocumentsProcessed(int64_t documentCount,
+ int64_t totalDocumentsSizeBytes,
+ Milliseconds elapsed) {
+ _documentsProcessed.addAndFetch(documentCount);
+ _bytesWritten.addAndFetch(totalDocumentsSizeBytes);
_cumulativeMetrics->onInsertsDuringCloning(documentCount, totalDocumentsSizeBytes, elapsed);
}
-int64_t ShardingDataTransformInstanceMetrics::getDocumentsCopiedCount() const {
- return _documentsCopied.load();
+int64_t ShardingDataTransformInstanceMetrics::getDocumentsProcessedCount() const {
+ return _documentsProcessed.load();
}
-int64_t ShardingDataTransformInstanceMetrics::getBytesCopiedCount() const {
- return _bytesCopied.load();
+int64_t ShardingDataTransformInstanceMetrics::getBytesWrittenCount() const {
+ return _bytesWritten.load();
}
-int64_t ShardingDataTransformInstanceMetrics::getApproxBytesToCopyCount() const {
- return _approxBytesToCopy.load();
+int64_t ShardingDataTransformInstanceMetrics::getApproxBytesToScanCount() const {
+ return _approxBytesToScan.load();
}
-void ShardingDataTransformInstanceMetrics::restoreDocumentsCopied(int64_t documentCount,
- int64_t totalDocumentsSizeBytes) {
- _documentsCopied.store(documentCount);
- _bytesCopied.store(totalDocumentsSizeBytes);
+void ShardingDataTransformInstanceMetrics::restoreDocumentsProcessed(
+ int64_t documentCount, int64_t totalDocumentsSizeBytes) {
+ _documentsProcessed.store(documentCount);
+ _bytesWritten.store(totalDocumentsSizeBytes);
}
-void ShardingDataTransformInstanceMetrics::setDocumentsToCopyCounts(
+void ShardingDataTransformInstanceMetrics::setDocumentsToProcessCounts(
int64_t documentCount, int64_t totalDocumentsSizeBytes) {
- _approxDocumentsToCopy.store(documentCount);
- _approxBytesToCopy.store(totalDocumentsSizeBytes);
+ _approxDocumentsToProcess.store(documentCount);
+ _approxBytesToScan.store(totalDocumentsSizeBytes);
}
void ShardingDataTransformInstanceMetrics::setCoordinatorHighEstimateRemainingTimeMillis(
diff --git a/src/mongo/db/s/sharding_data_transform_instance_metrics.h b/src/mongo/db/s/sharding_data_transform_instance_metrics.h
index 80a8a60b8d0..bcc6e091992 100644
--- a/src/mongo/db/s/sharding_data_transform_instance_metrics.h
+++ b/src/mongo/db/s/sharding_data_transform_instance_metrics.h
@@ -31,6 +31,7 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/s/sharding_data_transform_cumulative_metrics.h"
+#include "mongo/db/s/sharding_data_transform_instance_metrics_field_name_provider.h"
#include "mongo/db/s/sharding_data_transform_metrics.h"
#include "mongo/db/s/sharding_data_transform_metrics_observer_interface.h"
#include "mongo/util/duration.h"
@@ -41,6 +42,8 @@ class ShardingDataTransformInstanceMetrics {
public:
using Role = ShardingDataTransformMetrics::Role;
using ObserverPtr = std::unique_ptr<ShardingDataTransformMetricsObserverInterface>;
+ using FieldNameProviderPtr =
+ std::unique_ptr<ShardingDataTransformInstanceMetricsFieldNameProvider>;
ShardingDataTransformInstanceMetrics(UUID instanceId,
BSONObj originalCommand,
@@ -48,7 +51,8 @@ public:
Role role,
Date_t startTime,
ClockSource* clockSource,
- ShardingDataTransformCumulativeMetrics* cumulativeMetrics);
+ ShardingDataTransformCumulativeMetrics* cumulativeMetrics,
+ FieldNameProviderPtr fieldNames);
ShardingDataTransformInstanceMetrics(UUID instanceId,
BSONObj originalCommand,
@@ -57,6 +61,7 @@ public:
Date_t startTime,
ClockSource* clockSource,
ShardingDataTransformCumulativeMetrics* cumulativeMetrics,
+ FieldNameProviderPtr fieldNames,
ObserverPtr observer);
virtual ~ShardingDataTransformInstanceMetrics();
@@ -77,16 +82,16 @@ public:
void onCopyingEnd();
void onApplyingBegin();
void onApplyingEnd();
- void onDocumentsCopied(int64_t documentCount,
- int64_t totalDocumentsSizeBytes,
- Milliseconds elapsed);
+ void onDocumentsProcessed(int64_t documentCount,
+ int64_t totalDocumentsSizeBytes,
+ Milliseconds elapsed);
Date_t getCopyingBegin() const;
Date_t getCopyingEnd() const;
- int64_t getDocumentsCopiedCount() const;
- int64_t getBytesCopiedCount() const;
- int64_t getApproxBytesToCopyCount() const;
- void restoreDocumentsCopied(int64_t documentCount, int64_t totalDocumentsSizeBytes);
- void setDocumentsToCopyCounts(int64_t documentCount, int64_t totalDocumentsSizeBytes);
+ int64_t getDocumentsProcessedCount() const;
+ int64_t getBytesWrittenCount() const;
+ int64_t getApproxBytesToScanCount() const;
+ void restoreDocumentsProcessed(int64_t documentCount, int64_t totalDocumentsSizeBytes);
+ void setDocumentsToProcessCounts(int64_t documentCount, int64_t totalDocumentsSizeBytes);
void setCoordinatorHighEstimateRemainingTimeMillis(Milliseconds milliseconds);
void setCoordinatorLowEstimateRemainingTimeMillis(Milliseconds milliseconds);
void onLocalInsertDuringOplogFetching(Milliseconds elapsed);
@@ -138,29 +143,7 @@ protected:
const BSONObj _originalCommand;
const NamespaceString _sourceNs;
const Role _role;
- static constexpr auto kType = "type";
- static constexpr auto kDescription = "desc";
- static constexpr auto kNamespace = "ns";
- static constexpr auto kOp = "op";
- static constexpr auto kOriginatingCommand = "originatingCommand";
- static constexpr auto kOpTimeElapsed = "totalOperationTimeElapsedSecs";
- static constexpr auto kCriticalSectionTimeElapsed = "totalCriticalSectionTimeElapsedSecs";
- static constexpr auto kRemainingOpTimeEstimated = "remainingOperationTimeEstimatedSecs";
- static constexpr auto kCopyTimeElapsed = "totalCopyTimeElapsedSecs";
- static constexpr auto kApproxDocumentsToCopy = "approxDocumentsToCopy";
- static constexpr auto kApproxBytesToCopy = "approxBytesToCopy";
- static constexpr auto kBytesCopied = "bytesCopied";
- static constexpr auto kCountWritesToStashCollections = "countWritesToStashCollections";
- static constexpr auto kDocumentsCopied = "documentsCopied";
- static constexpr auto kCountWritesDuringCriticalSection = "countWritesDuringCriticalSection";
- static constexpr auto kCountReadsDuringCriticalSection = "countReadsDuringCriticalSection";
- static constexpr auto kCoordinatorState = "coordinatorState";
- static constexpr auto kDonorState = "donorState";
- static constexpr auto kRecipientState = "recipientState";
- static constexpr auto kAllShardsLowestRemainingOperationTimeEstimatedSecs =
- "allShardsLowestRemainingOperationTimeEstimatedSecs";
- static constexpr auto kAllShardsHighestRemainingOperationTimeEstimatedSecs =
- "allShardsHighestRemainingOperationTimeEstimatedSecs";
+ FieldNameProviderPtr _fieldNames;
private:
const Date_t _startTime;
@@ -172,10 +155,10 @@ private:
AtomicWord<Date_t> _copyingStartTime;
AtomicWord<Date_t> _copyingEndTime;
- AtomicWord<int32_t> _approxDocumentsToCopy;
- AtomicWord<int32_t> _documentsCopied;
- AtomicWord<int32_t> _approxBytesToCopy;
- AtomicWord<int32_t> _bytesCopied;
+ AtomicWord<int32_t> _approxDocumentsToProcess;
+ AtomicWord<int32_t> _documentsProcessed;
+ AtomicWord<int32_t> _approxBytesToScan;
+ AtomicWord<int32_t> _bytesWritten;
AtomicWord<int64_t> _writesToStashCollections;
diff --git a/src/mongo/db/s/sharding_data_transform_instance_metrics_field_name_provider.cpp b/src/mongo/db/s/sharding_data_transform_instance_metrics_field_name_provider.cpp
new file mode 100644
index 00000000000..9bb66c265b1
--- /dev/null
+++ b/src/mongo/db/s/sharding_data_transform_instance_metrics_field_name_provider.cpp
@@ -0,0 +1,130 @@
+/**
+ * Copyright (C) 2022-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/db/s/sharding_data_transform_instance_metrics_field_name_provider.h"
+
+namespace mongo {
+namespace {
+constexpr auto kType = "type";
+constexpr auto kDescription = "desc";
+constexpr auto kNamespace = "ns";
+constexpr auto kOp = "op";
+constexpr auto kOriginatingCommand = "originatingCommand";
+constexpr auto kOpTimeElapsed = "totalOperationTimeElapsedSecs";
+constexpr auto kCriticalSectionTimeElapsed = "totalCriticalSectionTimeElapsedSecs";
+constexpr auto kRemainingOpTimeEstimated = "remainingOperationTimeEstimatedSecs";
+constexpr auto kCopyTimeElapsed = "totalCopyTimeElapsedSecs";
+constexpr auto kCountWritesToStashCollections = "countWritesToStashCollections";
+constexpr auto kCountWritesDuringCriticalSection = "countWritesDuringCriticalSection";
+constexpr auto kCountReadsDuringCriticalSection = "countReadsDuringCriticalSection";
+constexpr auto kCoordinatorState = "coordinatorState";
+constexpr auto kDonorState = "donorState";
+constexpr auto kRecipientState = "recipientState";
+constexpr auto kAllShardsLowestRemainingOperationTimeEstimatedSecs =
+ "allShardsLowestRemainingOperationTimeEstimatedSecs";
+constexpr auto kAllShardsHighestRemainingOperationTimeEstimatedSecs =
+ "allShardsHighestRemainingOperationTimeEstimatedSecs";
+} // namespace
+
+StringData ShardingDataTransformInstanceMetricsFieldNameProvider::getForType() const {
+ return kType;
+}
+
+StringData ShardingDataTransformInstanceMetricsFieldNameProvider::getForDescription() const {
+ return kDescription;
+}
+
+StringData ShardingDataTransformInstanceMetricsFieldNameProvider::getForNamespace() const {
+ return kNamespace;
+}
+
+StringData ShardingDataTransformInstanceMetricsFieldNameProvider::getForOp() const {
+ return kOp;
+}
+
+StringData ShardingDataTransformInstanceMetricsFieldNameProvider::getForOriginatingCommand() const {
+ return kOriginatingCommand;
+}
+
+StringData ShardingDataTransformInstanceMetricsFieldNameProvider::getForOpTimeElapsed() const {
+ return kOpTimeElapsed;
+}
+
+StringData ShardingDataTransformInstanceMetricsFieldNameProvider::getForCriticalSectionTimeElapsed()
+ const {
+ return kCriticalSectionTimeElapsed;
+}
+
+StringData ShardingDataTransformInstanceMetricsFieldNameProvider::getForRemainingOpTimeEstimated()
+ const {
+ return kRemainingOpTimeEstimated;
+}
+
+StringData ShardingDataTransformInstanceMetricsFieldNameProvider::getForCopyTimeElapsed() const {
+ return kCopyTimeElapsed;
+}
+
+StringData
+ShardingDataTransformInstanceMetricsFieldNameProvider::getForCountWritesToStashCollections() const {
+ return kCountWritesToStashCollections;
+}
+
+StringData
+ShardingDataTransformInstanceMetricsFieldNameProvider::getForCountWritesDuringCriticalSection()
+ const {
+ return kCountWritesDuringCriticalSection;
+}
+
+StringData
+ShardingDataTransformInstanceMetricsFieldNameProvider::getForCountReadsDuringCriticalSection()
+ const {
+ return kCountReadsDuringCriticalSection;
+}
+
+StringData ShardingDataTransformInstanceMetricsFieldNameProvider::getForCoordinatorState() const {
+ return kCoordinatorState;
+}
+
+StringData ShardingDataTransformInstanceMetricsFieldNameProvider::getForDonorState() const {
+ return kDonorState;
+}
+
+StringData ShardingDataTransformInstanceMetricsFieldNameProvider::getForRecipientState() const {
+ return kRecipientState;
+}
+
+StringData ShardingDataTransformInstanceMetricsFieldNameProvider::
+ getForAllShardsLowestRemainingOperationTimeEstimatedSecs() const {
+ return kAllShardsLowestRemainingOperationTimeEstimatedSecs;
+}
+StringData ShardingDataTransformInstanceMetricsFieldNameProvider::
+ getForAllShardsHighestRemainingOperationTimeEstimatedSecs() const {
+ return kAllShardsHighestRemainingOperationTimeEstimatedSecs;
+}
+} // namespace mongo
diff --git a/src/mongo/db/s/sharding_data_transform_instance_metrics_field_name_provider.h b/src/mongo/db/s/sharding_data_transform_instance_metrics_field_name_provider.h
new file mode 100644
index 00000000000..f6673d6aa58
--- /dev/null
+++ b/src/mongo/db/s/sharding_data_transform_instance_metrics_field_name_provider.h
@@ -0,0 +1,64 @@
+/**
+ * Copyright (C) 2022-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#pragma once
+
+#include "mongo/db/namespace_string.h"
+#include "mongo/util/duration.h"
+
+namespace mongo {
+
+class ShardingDataTransformInstanceMetricsFieldNameProvider {
+public:
+ ShardingDataTransformInstanceMetricsFieldNameProvider() {}
+ virtual ~ShardingDataTransformInstanceMetricsFieldNameProvider() = default;
+
+ virtual StringData getForApproxDocumentsToProcess() const = 0;
+ virtual StringData getForApproxBytesToScan() const = 0;
+ virtual StringData getForBytesWritten() const = 0;
+ virtual StringData getForDocumentsProcessed() const = 0;
+ StringData getForType() const;
+ StringData getForDescription() const;
+ StringData getForNamespace() const;
+ StringData getForOp() const;
+ StringData getForOriginatingCommand() const;
+ StringData getForOpTimeElapsed() const;
+ StringData getForCriticalSectionTimeElapsed() const;
+ StringData getForRemainingOpTimeEstimated() const;
+ StringData getForCopyTimeElapsed() const;
+ StringData getForCountWritesDuringCriticalSection() const;
+ StringData getForCountWritesToStashCollections() const;
+ StringData getForCountReadsDuringCriticalSection() const;
+ StringData getForCoordinatorState() const;
+ StringData getForDonorState() const;
+ StringData getForRecipientState() const;
+ StringData getForAllShardsLowestRemainingOperationTimeEstimatedSecs() const;
+ StringData getForAllShardsHighestRemainingOperationTimeEstimatedSecs() const;
+};
+} // namespace mongo
diff --git a/src/mongo/db/s/sharding_data_transform_instance_metrics_test.cpp b/src/mongo/db/s/sharding_data_transform_instance_metrics_test.cpp
index 9e1d5223e5c..df02d617131 100644
--- a/src/mongo/db/s/sharding_data_transform_instance_metrics_test.cpp
+++ b/src/mongo/db/s/sharding_data_transform_instance_metrics_test.cpp
@@ -41,6 +41,28 @@
namespace mongo {
namespace {
+class ShardingDataTransformInstanceMetricsFieldNameProviderForTest
+ : public ShardingDataTransformInstanceMetricsFieldNameProvider {
+public:
+ StringData getForDocumentsProcessed() const override {
+ return kDocumentsProcessed;
+ }
+ StringData getForBytesWritten() const override {
+ return kBytesWritten;
+ }
+ StringData getForApproxDocumentsToProcess() const override {
+ return kApproxDocumentsToProcess;
+ }
+ StringData getForApproxBytesToScan() const override {
+ return kApproxBytesToScan;
+ }
+
+protected:
+ static constexpr auto kDocumentsProcessed = "documentsProcessed";
+ static constexpr auto kBytesWritten = "bytesWritten";
+ static constexpr auto kApproxDocumentsToProcess = "approxDocumentsToProcess";
+ static constexpr auto kApproxBytesToScan = "approxBytesToScan";
+};
class ShardingDataTransformInstanceMetricsForTest : public ShardingDataTransformInstanceMetrics {
public:
@@ -52,13 +74,15 @@ public:
Date_t startTime,
ClockSource* clockSource,
ShardingDataTransformCumulativeMetrics* cumulativeMetrics)
- : ShardingDataTransformInstanceMetrics{std::move(instanceId),
- std::move(shardKey),
- std::move(nss),
- role,
- startTime,
- clockSource,
- cumulativeMetrics} {}
+ : ShardingDataTransformInstanceMetrics{
+ std::move(instanceId),
+ std::move(shardKey),
+ std::move(nss),
+ role,
+ startTime,
+ clockSource,
+ cumulativeMetrics,
+ std::make_unique<ShardingDataTransformInstanceMetricsFieldNameProviderForTest>()} {}
ShardingDataTransformInstanceMetricsForTest(
UUID instanceId,
BSONObj shardKey,
@@ -67,6 +91,7 @@ public:
Date_t startTime,
ClockSource* clockSource,
ShardingDataTransformCumulativeMetrics* cumulativeMetrics,
+ FieldNameProviderPtr fieldNameProvider,
ObserverPtr observer)
: ShardingDataTransformInstanceMetrics{std::move(instanceId),
std::move(shardKey),
@@ -75,6 +100,7 @@ public:
startTime,
clockSource,
cumulativeMetrics,
+ std::move(fieldNameProvider),
std::move(observer)} {}
Milliseconds getRecipientHighEstimateRemainingTimeMillis() const {
@@ -96,6 +122,7 @@ public:
startTime,
clockSource,
cumulativeMetrics,
+ std::make_unique<ShardingDataTransformInstanceMetricsFieldNameProviderForTest>(),
std::make_unique<ObserverMock>(startTime, timeRemaining)} {}
@@ -118,7 +145,9 @@ public:
}
std::unique_ptr<ShardingDataTransformInstanceMetricsForTest> createInstanceMetrics(
- std::unique_ptr<ObserverMock> mock) {
+ std::unique_ptr<ObserverMock> mock,
+ std::unique_ptr<ShardingDataTransformInstanceMetricsFieldNameProviderForTest>
+ fieldNameProvider) {
return std::make_unique<ShardingDataTransformInstanceMetricsForTest>(
UUID::gen(),
kTestCommand,
@@ -127,6 +156,7 @@ public:
getClockSource()->now(),
getClockSource(),
&_cumulativeMetrics,
+ std::move(fieldNameProvider),
std::move(mock));
}
};
@@ -226,31 +256,31 @@ TEST_F(ShardingDataTransformInstanceMetricsTest, RecipientSetsDocumentsAndBytesT
auto metrics = createInstanceMetrics(UUID::gen(), Role::kRecipient);
auto report = metrics->reportForCurrentOp();
- ASSERT_EQ(report.getIntField("approxDocumentsToCopy"), 0);
- ASSERT_EQ(report.getIntField("approxBytesToCopy"), 0);
- metrics->setDocumentsToCopyCounts(5, 1000);
+ ASSERT_EQ(report.getIntField("approxDocumentsToProcess"), 0);
+ ASSERT_EQ(report.getIntField("approxBytesToScan"), 0);
+ metrics->setDocumentsToProcessCounts(5, 1000);
report = metrics->reportForCurrentOp();
- ASSERT_EQ(report.getIntField("approxDocumentsToCopy"), 5);
- ASSERT_EQ(report.getIntField("approxBytesToCopy"), 1000);
+ ASSERT_EQ(report.getIntField("approxDocumentsToProcess"), 5);
+ ASSERT_EQ(report.getIntField("approxBytesToScan"), 1000);
- metrics->setDocumentsToCopyCounts(3, 750);
+ metrics->setDocumentsToProcessCounts(3, 750);
report = metrics->reportForCurrentOp();
- ASSERT_EQ(report.getIntField("approxDocumentsToCopy"), 3);
- ASSERT_EQ(report.getIntField("approxBytesToCopy"), 750);
+ ASSERT_EQ(report.getIntField("approxDocumentsToProcess"), 3);
+ ASSERT_EQ(report.getIntField("approxBytesToScan"), 750);
}
-TEST_F(ShardingDataTransformInstanceMetricsTest, RecipientIncrementsDocumentsAndBytesCopied) {
+TEST_F(ShardingDataTransformInstanceMetricsTest, RecipientIncrementsDocumentsAndBytesWritten) {
auto metrics = createInstanceMetrics(UUID::gen(), Role::kRecipient);
auto report = metrics->reportForCurrentOp();
- ASSERT_EQ(report.getIntField("documentsCopied"), 0);
- ASSERT_EQ(report.getIntField("bytesCopied"), 0);
- metrics->onDocumentsCopied(5, 1000, Milliseconds(1));
+ ASSERT_EQ(report.getIntField("documentsProcessed"), 0);
+ ASSERT_EQ(report.getIntField("bytesWritten"), 0);
+ metrics->onDocumentsProcessed(5, 1000, Milliseconds(1));
report = metrics->reportForCurrentOp();
- ASSERT_EQ(report.getIntField("documentsCopied"), 5);
- ASSERT_EQ(report.getIntField("bytesCopied"), 1000);
+ ASSERT_EQ(report.getIntField("documentsProcessed"), 5);
+ ASSERT_EQ(report.getIntField("bytesWritten"), 1000);
}
TEST_F(ShardingDataTransformInstanceMetricsTest, CurrentOpReportsCopyingTime) {