summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorCheahuychou Mao <mao.cheahuychou@gmail.com>2022-12-16 04:08:15 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-12-16 11:31:40 +0000
commit92db0dbb956013892bf31107323d69662e5e30fe (patch)
tree876b8180484ddeea84ee0a24b836771ba451e66c /src
parenteaa6ac6109000702682a9dcf3f1dfaf0a2353f22 (diff)
downloadmongo-92db0dbb956013892bf31107323d69662e5e30fe.tar.gz
SERVER-72077 Make write distribution metrics calculation account for the replacement update case
Diffstat (limited to 'src')
-rw-r--r--src/mongo/db/s/analyze_shard_key_read_write_distribution.cpp35
-rw-r--r--src/mongo/db/s/analyze_shard_key_read_write_distribution.h13
-rw-r--r--src/mongo/db/s/analyze_shard_key_read_write_distribution_test.cpp112
3 files changed, 149 insertions, 11 deletions
diff --git a/src/mongo/db/s/analyze_shard_key_read_write_distribution.cpp b/src/mongo/db/s/analyze_shard_key_read_write_distribution.cpp
index 426a8b25c09..e452b1dcb1e 100644
--- a/src/mongo/db/s/analyze_shard_key_read_write_distribution.cpp
+++ b/src/mongo/db/s/analyze_shard_key_read_write_distribution.cpp
@@ -131,12 +131,18 @@ template <typename DistributionMetricsType, typename SampleSizeType>
BSONObj
DistributionMetricsCalculator<DistributionMetricsType, SampleSizeType>::_incrementMetricsForQuery(
OperationContext* opCtx,
- const BSONObj& filter,
+ const BSONObj& primaryFilter,
const BSONObj& collation,
+ const BSONObj& secondaryFilter,
const boost::optional<LegacyRuntimeConstants>& runtimeConstants,
const boost::optional<BSONObj>& letParameters) {
+ auto filter = primaryFilter;
auto shardKey = uassertStatusOK(
- _getShardKeyPattern().extractShardKeyFromQuery(opCtx, _targeter.getNS(), filter));
+ _getShardKeyPattern().extractShardKeyFromQuery(opCtx, _targeter.getNS(), primaryFilter));
+ if (shardKey.isEmpty() && !secondaryFilter.isEmpty()) {
+ filter = secondaryFilter;
+ shardKey = _getShardKeyPattern().extractShardKeyFromDoc(secondaryFilter);
+ }
// Increment metrics about range targeting.
auto&& cif = [&]() {
@@ -272,8 +278,16 @@ void WriteDistributionMetricsCalculator::_addUpdateQuery(
OperationContext* opCtx, const write_ops::UpdateCommandRequest& cmd) {
for (const auto& updateOp : cmd.getUpdates()) {
_numUpdate++;
+ auto primaryFilter = updateOp.getQ();
+ // If this is a non-upsert replacement update, the replacement document can be used as a
+ // filter.
+ auto secondaryFilter = !updateOp.getUpsert() &&
+ updateOp.getU().type() == write_ops::UpdateModification::Type::kReplacement
+ ? updateOp.getU().getUpdateReplacement()
+ : BSONObj();
_incrementMetricsForQuery(opCtx,
- updateOp.getQ(),
+ primaryFilter,
+ secondaryFilter,
write_ops::collationOf(updateOp),
updateOp.getMulti(),
cmd.getLegacyRuntimeConstants(),
@@ -285,8 +299,11 @@ void WriteDistributionMetricsCalculator::_addDeleteQuery(
OperationContext* opCtx, const write_ops::DeleteCommandRequest& cmd) {
for (const auto& deleteOp : cmd.getDeletes()) {
_numDelete++;
+ auto primaryFilter = deleteOp.getQ();
+ auto secondaryFilter = BSONObj();
_incrementMetricsForQuery(opCtx,
- deleteOp.getQ(),
+ primaryFilter,
+ secondaryFilter,
write_ops::collationOf(deleteOp),
deleteOp.getMulti(),
cmd.getLegacyRuntimeConstants(),
@@ -297,8 +314,11 @@ void WriteDistributionMetricsCalculator::_addDeleteQuery(
void WriteDistributionMetricsCalculator::_addFindAndModifyQuery(
OperationContext* opCtx, const write_ops::FindAndModifyCommandRequest& cmd) {
_numFindAndModify++;
+ auto primaryFilter = cmd.getQuery();
+ auto secondaryFilter = BSONObj();
_incrementMetricsForQuery(opCtx,
- cmd.getQuery(),
+ primaryFilter,
+ secondaryFilter,
cmd.getCollation().value_or(BSONObj()),
false /* isMulti */,
cmd.getLegacyRuntimeConstants(),
@@ -307,13 +327,14 @@ void WriteDistributionMetricsCalculator::_addFindAndModifyQuery(
void WriteDistributionMetricsCalculator::_incrementMetricsForQuery(
OperationContext* opCtx,
- const BSONObj& filter,
+ const BSONObj& primaryFilter,
+ const BSONObj& secondaryFilter,
const BSONObj& collation,
bool isMulti,
const boost::optional<LegacyRuntimeConstants>& runtimeConstants,
const boost::optional<BSONObj>& letParameters) {
auto shardKey = DistributionMetricsCalculator::_incrementMetricsForQuery(
- opCtx, filter, collation, runtimeConstants, letParameters);
+ opCtx, primaryFilter, collation, secondaryFilter, runtimeConstants, letParameters);
if (shardKey.isEmpty()) {
// Increment metrics about writes without shard key.
diff --git a/src/mongo/db/s/analyze_shard_key_read_write_distribution.h b/src/mongo/db/s/analyze_shard_key_read_write_distribution.h
index e0066bb8e83..d7c88f996d5 100644
--- a/src/mongo/db/s/analyze_shard_key_read_write_distribution.h
+++ b/src/mongo/db/s/analyze_shard_key_read_write_distribution.h
@@ -92,14 +92,18 @@ protected:
}
/**
- * The helper for 'addQuery'. Increments the metrics for the query with the given filter,
- * collation, run-time contants and let parameters (the last two are only applicable to writes).
+ * The helper for 'addQuery'. Increments the metrics for the query with the given filter(s),
+ * collation, run-time contants and let parameters. The secondary filter is only applicable to
+ * non-upsert replacement updates, and the run-time constants and let parameters are only
+ * applicable to writes.
+ *
* If the query filters by shard key equality, returns the shard key value.
*/
BSONObj _incrementMetricsForQuery(
OperationContext* opCtx,
- const BSONObj& filter,
+ const BSONObj& primaryfilter,
const BSONObj& collation,
+ const BSONObj& secondaryFilter = BSONObj(),
const boost::optional<LegacyRuntimeConstants>& runtimeConstants = boost::none,
const boost::optional<BSONObj>& letParameters = boost::none);
@@ -177,7 +181,8 @@ private:
}
void _incrementMetricsForQuery(OperationContext* opCtx,
- const BSONObj& filter,
+ const BSONObj& primaryFilter,
+ const BSONObj& secondaryFilter,
const BSONObj& collation,
bool isMulti,
const boost::optional<LegacyRuntimeConstants>& runtimeConstants,
diff --git a/src/mongo/db/s/analyze_shard_key_read_write_distribution_test.cpp b/src/mongo/db/s/analyze_shard_key_read_write_distribution_test.cpp
index cc4c047cbb6..bc37e591835 100644
--- a/src/mongo/db/s/analyze_shard_key_read_write_distribution_test.cpp
+++ b/src/mongo/db/s/analyze_shard_key_read_write_distribution_test.cpp
@@ -1280,6 +1280,64 @@ TEST_F(WriteDistributionFilterByShardKeyRangeTest, ShardKeyNonEquality) {
numDispatchedByRange);
}
+class WriteDistributionFilterByShardKeyRangeReplacementUpdateTest
+ : public ReadWriteDistributionTest {
+protected:
+ SampledQueryDocument makeSampledUpdateQueryDocument(const BSONObj& filter,
+ const BSONObj& updateMod,
+ bool upsert) const {
+ auto updateOp = write_ops::UpdateOpEntry(filter, write_ops::UpdateModification(updateMod));
+ updateOp.setMulti(false); // replacement-style update cannot be multi.
+ updateOp.setUpsert(upsert);
+ return ReadWriteDistributionTest::makeSampledUpdateQueryDocument({updateOp});
+ }
+};
+
+TEST_F(WriteDistributionFilterByShardKeyRangeReplacementUpdateTest, NotUpsert) {
+ auto assertTargetMetrics = [&](const CollectionRoutingInfoTargeter& targeter,
+ const SampledQueryDocument& queryDoc,
+ const std::vector<int64_t> numDispatchedByRange) {
+ WriteTargetMetricsBundle metrics;
+ metrics.numTargetedOneShard = 1;
+ metrics.numDispatchedByRange = numDispatchedByRange;
+ assertTargetMetricsForWriteQuery(targeter, queryDoc, metrics);
+ };
+
+ auto targeter = makeCollectionRoutingInfoTargeter(chunkSplitInfoRangeSharding0);
+ auto filter = BSON("a.x" << BSON("$lt" << 0));
+ auto updateMod = BSON("a" << BSON("x" << 0) << "b"
+ << BSON("y"
+ << "A")
+ << "c" << 0);
+ auto numDispatchedByRange = std::vector<int64_t>({0, 1, 0});
+ assertTargetMetrics(targeter,
+ makeSampledUpdateQueryDocument(filter, updateMod, false /* upsert */),
+ numDispatchedByRange);
+}
+
+TEST_F(WriteDistributionFilterByShardKeyRangeReplacementUpdateTest, Upsert) {
+ auto assertTargetMetrics = [&](const CollectionRoutingInfoTargeter& targeter,
+ const SampledQueryDocument& queryDoc,
+ const std::vector<int64_t> numDispatchedByRange) {
+ WriteTargetMetricsBundle metrics;
+ metrics.numTargetedMultipleShards = 1;
+ metrics.numDispatchedByRange = numDispatchedByRange;
+ metrics.numSingleWritesWithoutShardKey = 1;
+ assertTargetMetricsForWriteQuery(targeter, queryDoc, metrics);
+ };
+
+ auto targeter = makeCollectionRoutingInfoTargeter(chunkSplitInfoRangeSharding0);
+ auto filter = BSON("a.x" << BSON("$lt" << 0));
+ auto updateMod = BSON("a" << BSON("x" << 0) << "b"
+ << BSON("y"
+ << "A")
+ << "c" << 0);
+ auto numDispatchedByRange = std::vector<int64_t>({1, 1, 0});
+ assertTargetMetrics(targeter,
+ makeSampledUpdateQueryDocument(filter, updateMod, true /* upsert */),
+ numDispatchedByRange);
+}
+
class WriteDistributionNotFilterByShardKeyTest : public ReadWriteDistributionTest {
protected:
void assertTargetMetrics(const CollectionRoutingInfoTargeter& targeter,
@@ -1380,6 +1438,60 @@ TEST_F(WriteDistributionNotFilterByShardKeyTest, ShardKeyPrefixEqualityDotted) {
targeter, makeSampledFindAndModifyQueryDocument(filter, updateMod), false /* multi */);
}
+class WriteDistributionNotFilterByShardKeyReplacementUpdateTest : public ReadWriteDistributionTest {
+protected:
+ SampledQueryDocument makeSampledUpdateQueryDocument(const BSONObj& filter,
+ const BSONObj& updateMod,
+ bool upsert) const {
+ auto updateOp = write_ops::UpdateOpEntry(filter, write_ops::UpdateModification(updateMod));
+ updateOp.setMulti(false); // replacement-style update cannot be multi.
+ updateOp.setUpsert(upsert);
+ return ReadWriteDistributionTest::makeSampledUpdateQueryDocument({updateOp});
+ }
+};
+
+TEST_F(WriteDistributionNotFilterByShardKeyReplacementUpdateTest, NotUpsert) {
+ auto assertTargetMetrics = [&](const CollectionRoutingInfoTargeter& targeter,
+ const SampledQueryDocument& queryDoc,
+ const std::vector<int64_t> numDispatchedByRange) {
+ WriteTargetMetricsBundle metrics;
+ metrics.numTargetedOneShard = 1;
+ metrics.numDispatchedByRange = numDispatchedByRange;
+ assertTargetMetricsForWriteQuery(targeter, queryDoc, metrics);
+ };
+
+ auto targeter = makeCollectionRoutingInfoTargeter(chunkSplitInfoRangeSharding0);
+ auto filter = BSON("_id" << 0);
+ auto updateMod = BSON("_id" << 0 << "a" << BSON("x" << 0) << "b"
+ << BSON("y"
+ << "A")
+ << "c" << 0);
+ auto numDispatchedByRange = std::vector<int64_t>({0, 1, 0});
+ assertTargetMetrics(targeter,
+ makeSampledUpdateQueryDocument(filter, updateMod, false /* upsert */),
+ numDispatchedByRange);
+}
+
+TEST_F(WriteDistributionNotFilterByShardKeyReplacementUpdateTest, Upsert) {
+ auto assertTargetMetrics = [&](const CollectionRoutingInfoTargeter& targeter,
+ const SampledQueryDocument& queryDoc) {
+ WriteTargetMetricsBundle metrics;
+ metrics.numTargetedAllShards = 1;
+ metrics.numDispatchedByRange = std::vector<int64_t>({1, 1, 1});
+ metrics.numSingleWritesWithoutShardKey = 1;
+ assertTargetMetricsForWriteQuery(targeter, queryDoc, metrics);
+ };
+
+ auto targeter = makeCollectionRoutingInfoTargeter(chunkSplitInfoRangeSharding0);
+ auto filter = BSON("_id" << 0);
+ auto updateMod = BSON("_id" << 0 << "a" << BSON("x" << 0) << "b"
+ << BSON("y"
+ << "A")
+ << "c" << 0);
+ assertTargetMetrics(targeter,
+ makeSampledUpdateQueryDocument(filter, updateMod, true /* upsert */));
+}
+
} // namespace
} // namespace analyze_shard_key
} // namespace mongo