diff options
author | Israel Hsu <israel.hsu@mongodb.com> | 2022-10-17 15:07:44 +0000 |
---|---|---|
committer | Evergreen Agent <no-reply@evergreen.mongodb.com> | 2022-10-17 16:27:39 +0000 |
commit | c73958b4f0ba6180f920105d77af0a51ebd456e2 (patch) | |
tree | 4793855835a51a7952be84b52a53b6a18d7dd332 | |
parent | 87cbc694a8702ef9beec670ed889519706167bf1 (diff) | |
download | mongo-c73958b4f0ba6180f920105d77af0a51ebd456e2.tar.gz |
SERVER-68755 Implement ChunkManagerTargeter outputting chunk ranges
-rw-r--r-- | src/mongo/s/catalog/type_chunk.cpp | 10 | ||||
-rw-r--r-- | src/mongo/s/catalog/type_chunk.h | 6 | ||||
-rw-r--r-- | src/mongo/s/chunk_manager.cpp | 37 | ||||
-rw-r--r-- | src/mongo/s/chunk_manager.h | 22 | ||||
-rw-r--r-- | src/mongo/s/chunk_manager_targeter.cpp | 50 | ||||
-rw-r--r-- | src/mongo/s/chunk_manager_targeter.h | 26 | ||||
-rw-r--r-- | src/mongo/s/chunk_manager_targeter_test.cpp | 502 | ||||
-rw-r--r-- | src/mongo/s/mock_ns_targeter.cpp | 6 | ||||
-rw-r--r-- | src/mongo/s/mock_ns_targeter.h | 40 | ||||
-rw-r--r-- | src/mongo/s/ns_targeter.h | 25 | ||||
-rw-r--r-- | src/mongo/s/write_ops/batch_write_exec_test.cpp | 69 |
11 files changed, 662 insertions, 131 deletions
diff --git a/src/mongo/s/catalog/type_chunk.cpp b/src/mongo/s/catalog/type_chunk.cpp index 88fd1fc3aa1..b330697db0d 100644 --- a/src/mongo/s/catalog/type_chunk.cpp +++ b/src/mongo/s/catalog/type_chunk.cpp @@ -164,6 +164,16 @@ bool ChunkRange::operator!=(const ChunkRange& other) const { return !(*this == other); } +bool ChunkRange::operator<(const ChunkRange& other) const { + auto minCompare = _minKey.woCompare(other._minKey); + if (minCompare < 0) { + return true; + } else if (minCompare == 0 && _maxKey.woCompare(other._maxKey) < 0) { + return true; + } + return false; +} + bool ChunkRange::covers(ChunkRange const& other) const { auto le = [](auto const& a, auto const& b) { return a.woCompare(b) <= 0; }; return le(_minKey, other._minKey) && le(other._maxKey, _maxKey); diff --git a/src/mongo/s/catalog/type_chunk.h b/src/mongo/s/catalog/type_chunk.h index fa1ad7a5ab9..70d20e15c15 100644 --- a/src/mongo/s/catalog/type_chunk.h +++ b/src/mongo/s/catalog/type_chunk.h @@ -103,6 +103,12 @@ public: bool operator!=(const ChunkRange& other) const; /** + * Returns true if either min is less than rhs min, or in the case that min == rhs min, true if + * max is less than rhs max. Otherwise returns false. + */ + bool operator<(const ChunkRange& rhs) const; + + /** * Returns true iff the union of *this and the argument range is the same as *this. */ bool covers(ChunkRange const& other) const; diff --git a/src/mongo/s/chunk_manager.cpp b/src/mongo/s/chunk_manager.cpp index fe58ca2e52a..44278b34b86 100644 --- a/src/mongo/s/chunk_manager.cpp +++ b/src/mongo/s/chunk_manager.cpp @@ -413,7 +413,12 @@ bool ChunkManager::keyBelongsToShard(const BSONObj& shardKey, const ShardId& sha void ChunkManager::getShardIdsForQuery(boost::intrusive_ptr<ExpressionContext> expCtx, const BSONObj& query, const BSONObj& collation, - std::set<ShardId>* shardIds) const { + std::set<ShardId>* shardIds, + std::set<ChunkRange>* chunkRanges) const { + if (chunkRanges) { + invariant(chunkRanges->empty()); + } + auto findCommand = std::make_unique<FindCommandRequest>(_rt->optRt->nss()); findCommand->setFilter(query.getOwned()); @@ -441,6 +446,9 @@ void ChunkManager::getShardIdsForQuery(boost::intrusive_ptr<ExpressionContext> e try { auto chunk = findIntersectingChunk(shardKeyToFind, collation); shardIds->insert(chunk.getShardId()); + if (chunkRanges) { + chunkRanges->insert(chunk.getRange()); + } return; } catch (const DBException&) { // The query uses multiple shards @@ -463,7 +471,7 @@ void ChunkManager::getShardIdsForQuery(boost::intrusive_ptr<ExpressionContext> e BoundList ranges = _rt->optRt->getShardKeyPattern().flattenBounds(bounds); for (BoundList::const_iterator it = ranges.begin(); it != ranges.end(); ++it) { - getShardIdsForRange(it->first /*min*/, it->second /*max*/, shardIds); + getShardIdsForRange(it->first /*min*/, it->second /*max*/, shardIds, chunkRanges); // Once we know we need to visit all shards no need to keep looping. // However, this optimization does not apply when we are reading from a snapshot @@ -481,6 +489,9 @@ void ChunkManager::getShardIdsForQuery(boost::intrusive_ptr<ExpressionContext> e if (shardIds->empty()) { _rt->optRt->forEachChunk([&](const std::shared_ptr<ChunkInfo>& chunkInfo) { shardIds->insert(chunkInfo->getShardIdAt(_clusterTime)); + if (chunkRanges) { + chunkRanges->insert(chunkInfo->getRange()); + } return false; }); } @@ -488,7 +499,12 @@ void ChunkManager::getShardIdsForQuery(boost::intrusive_ptr<ExpressionContext> e void ChunkManager::getShardIdsForRange(const BSONObj& min, const BSONObj& max, - std::set<ShardId>* shardIds) const { + std::set<ShardId>* shardIds, + std::set<ChunkRange>* chunkRanges) const { + if (chunkRanges) { + invariant(chunkRanges->empty()); + } + // If our range is [MinKey, MaxKey], we can simply return all shard ids right away. However, // this optimization does not apply when we are reading from a snapshot because _shardVersions // contains shards with chunks and is built based on the last refresh. Therefore, it is @@ -496,11 +512,17 @@ void ChunkManager::getShardIdsForRange(const BSONObj& min, // used to at _clusterTime. if (!_clusterTime && allElementsAreOfType(MinKey, min) && allElementsAreOfType(MaxKey, max)) { getAllShardIds(shardIds); + if (chunkRanges) { + getAllChunkRanges(chunkRanges); + } return; } _rt->optRt->forEachOverlappingChunk(min, max, true, [&](auto& chunkInfo) { shardIds->insert(chunkInfo->getShardIdAt(_clusterTime)); + if (chunkRanges) { + chunkRanges->insert(chunkInfo->getRange()); + } // No need to iterate through the rest of the ranges, because we already know we need to use // all shards. However, this optimization does not apply when we are reading from a snapshot @@ -554,12 +576,21 @@ ShardId ChunkManager::getMinKeyShardIdWithSimpleCollation() const { } void RoutingTableHistory::getAllShardIds(std::set<ShardId>* all) const { + invariant(all->empty()); + std::transform(_shardVersions.begin(), _shardVersions.end(), std::inserter(*all, all->begin()), [](const ShardVersionMap::value_type& pair) { return pair.first; }); } +void RoutingTableHistory::getAllChunkRanges(std::set<ChunkRange>* all) const { + forEachChunk([&](const std::shared_ptr<ChunkInfo>& chunkInfo) { + all->insert(chunkInfo->getRange()); + return true; + }); +} + int RoutingTableHistory::getNShardsOwningChunks() const { return _shardVersions.size(); } diff --git a/src/mongo/s/chunk_manager.h b/src/mongo/s/chunk_manager.h index d8473b8d91a..b1dcbb3f815 100644 --- a/src/mongo/s/chunk_manager.h +++ b/src/mongo/s/chunk_manager.h @@ -261,6 +261,11 @@ public: void getAllShardIds(std::set<ShardId>* all) const; /** + * Returns all chunk ranges for the collection. + */ + void getAllChunkRanges(std::set<ChunkRange>* all) const; + + /** * Returns the number of shards on which the collection has any chunks */ int getNShardsOwningChunks() const; @@ -611,19 +616,25 @@ public: /** * Finds the shard IDs for a given filter and collation. If collation is empty, we use the * collection default collation for targeting. + * If output parameter `changeRanges` is non-null, the set is populated with ChunkRanges that + * would be targeted by the query; if nullptr, no processing of chunk ranges occurs. */ void getShardIdsForQuery(boost::intrusive_ptr<ExpressionContext> expCtx, const BSONObj& query, const BSONObj& collation, - std::set<ShardId>* shardIds) const; + std::set<ShardId>* shardIds, + std::set<ChunkRange>* chunkRanges = nullptr) const; /** * Returns all shard ids which contain chunks overlapping the range [min, max]. Please note the * inclusive bounds on both sides (SERVER-20768). + * If output parameter `changeRanges` is non-null, the set is populated with ChunkRanges that + * would be targeted by the query. */ void getShardIdsForRange(const BSONObj& min, const BSONObj& max, - std::set<ShardId>* shardIds) const; + std::set<ShardId>* shardIds, + std::set<ChunkRange>* chunkRanges = nullptr) const; /** * Returns the ids of all shards on which the collection has any chunks. @@ -633,6 +644,13 @@ public: } /** + * Returns the chunk ranges of all shards on which the collection has any chunks. + */ + void getAllChunkRanges(std::set<ChunkRange>* all) const { + _rt->optRt->getAllChunkRanges(all); + } + + /** * Returns the number of shards on which the collection has any chunks */ int getNShardsOwningChunks() const { diff --git a/src/mongo/s/chunk_manager_targeter.cpp b/src/mongo/s/chunk_manager_targeter.cpp index 442a1a718b4..980ceb99656 100644 --- a/src/mongo/s/chunk_manager_targeter.cpp +++ b/src/mongo/s/chunk_manager_targeter.cpp @@ -333,7 +333,8 @@ BSONObj ChunkManagerTargeter::extractBucketsShardKeyFromTimeseriesDoc( } ShardEndpoint ChunkManagerTargeter::targetInsert(OperationContext* opCtx, - const BSONObj& doc) const { + const BSONObj& doc, + std::set<ChunkRange>* chunkRanges) const { BSONObj shardKey; if (_cm.isSharded()) { @@ -358,7 +359,7 @@ ShardEndpoint ChunkManagerTargeter::targetInsert(OperationContext* opCtx, // Target the shard key or database primary if (!shardKey.isEmpty()) { - return uassertStatusOK(_targetShardKey(shardKey, CollationSpec::kSimpleSpec)); + return uassertStatusOK(_targetShardKey(shardKey, CollationSpec::kSimpleSpec, chunkRanges)); } // TODO (SERVER-51070): Remove the boost::none when the config server can support shardVersion @@ -369,8 +370,8 @@ ShardEndpoint ChunkManagerTargeter::targetInsert(OperationContext* opCtx, _nss.isOnInternalDb() ? boost::optional<DatabaseVersion>() : _cm.dbVersion()); } -std::vector<ShardEndpoint> ChunkManagerTargeter::targetUpdate(OperationContext* opCtx, - const BatchItemRef& itemRef) const { +std::vector<ShardEndpoint> ChunkManagerTargeter::targetUpdate( + OperationContext* opCtx, const BatchItemRef& itemRef, std::set<ChunkRange>* chunkRanges) const { // If the update is replacement-style: // 1. Attempt to target using the query. If this fails, AND the query targets more than one // shard, @@ -445,12 +446,14 @@ std::vector<ShardEndpoint> ChunkManagerTargeter::targetUpdate(OperationContext* getUpdateExprForTargeting(expCtx, shardKeyPattern, query, updateOp.getU()); // Utility function to target an update by shard key, and to handle any potential error results. - auto targetByShardKey = [this, &collation](StatusWith<BSONObj> swShardKey, std::string msg) { + auto targetByShardKey = [this, &collation, &chunkRanges](StatusWith<BSONObj> swShardKey, + std::string msg) { const auto& shardKey = uassertStatusOKWithContext(std::move(swShardKey), msg); uassert(ErrorCodes::ShardKeyNotFound, str::stream() << msg << " :: could not extract exact shard key", !shardKey.isEmpty()); - return std::vector{uassertStatusOKWithContext(_targetShardKey(shardKey, collation), msg)}; + return std::vector{ + uassertStatusOKWithContext(_targetShardKey(shardKey, collation, chunkRanges), msg)}; }; // If this is an upsert, then the query must contain an exact match on the shard key. If we were @@ -463,7 +466,7 @@ std::vector<ShardEndpoint> ChunkManagerTargeter::targetUpdate(OperationContext* // We first try to target based on the update's query. It is always valid to forward any update // or upsert to a single shard, so return immediately if we are able to target a single shard. - auto endPoints = uassertStatusOK(_targetQuery(expCtx, query, collation)); + auto endPoints = uassertStatusOK(_targetQuery(expCtx, query, collation, chunkRanges)); if (endPoints.size() == 1) { return endPoints; } @@ -471,6 +474,9 @@ std::vector<ShardEndpoint> ChunkManagerTargeter::targetUpdate(OperationContext* // Replacement-style updates must always target a single shard. If we were unable to do so using // the query, we attempt to extract the shard key from the replacement and target based on it. if (updateOp.getU().type() == write_ops::UpdateModification::Type::kReplacement) { + if (chunkRanges) { + chunkRanges->clear(); + } return targetByShardKey(shardKeyPattern.extractShardKeyFromDoc(updateExpr), "Failed to target update by replacement document"); } @@ -495,8 +501,8 @@ std::vector<ShardEndpoint> ChunkManagerTargeter::targetUpdate(OperationContext* return endPoints; } -std::vector<ShardEndpoint> ChunkManagerTargeter::targetDelete(OperationContext* opCtx, - const BatchItemRef& itemRef) const { +std::vector<ShardEndpoint> ChunkManagerTargeter::targetDelete( + OperationContext* opCtx, const BatchItemRef& itemRef, std::set<ChunkRange>* chunkRanges) const { const auto& deleteOp = itemRef.getDelete(); const auto collation = write_ops::collationOf(deleteOp); @@ -548,7 +554,7 @@ std::vector<ShardEndpoint> ChunkManagerTargeter::targetDelete(OperationContext* // Target the shard key or delete query if (!shardKey.isEmpty()) { - auto swEndpoint = _targetShardKey(shardKey, collation); + auto swEndpoint = _targetShardKey(shardKey, collation, chunkRanges); if (swEndpoint.isOK()) { return std::vector{std::move(swEndpoint.getValue())}; } @@ -580,13 +586,17 @@ std::vector<ShardEndpoint> ChunkManagerTargeter::targetDelete(OperationContext* << ", shard key pattern: " << _cm.getShardKeyPattern().toString(), !_cm.isSharded() || deleteOp.getMulti() || isExactIdQuery(opCtx, *cq, _cm)); - return uassertStatusOK(_targetQuery(expCtx, deleteQuery, collation)); + if (chunkRanges) { + chunkRanges->clear(); + } + return uassertStatusOK(_targetQuery(expCtx, deleteQuery, collation, chunkRanges)); } StatusWith<std::vector<ShardEndpoint>> ChunkManagerTargeter::_targetQuery( boost::intrusive_ptr<ExpressionContext> expCtx, const BSONObj& query, - const BSONObj& collation) const { + const BSONObj& collation, + std::set<ChunkRange>* chunkRanges) const { if (!_cm.isSharded()) { // TODO (SERVER-51070): Remove the boost::none when the config server can support // shardVersion in commands @@ -598,7 +608,7 @@ StatusWith<std::vector<ShardEndpoint>> ChunkManagerTargeter::_targetQuery( std::set<ShardId> shardIds; try { - _cm.getShardIdsForQuery(expCtx, query, collation, &shardIds); + _cm.getShardIdsForQuery(expCtx, query, collation, &shardIds, chunkRanges); } catch (const DBException& ex) { return ex.toStatus(); } @@ -615,10 +625,13 @@ StatusWith<std::vector<ShardEndpoint>> ChunkManagerTargeter::_targetQuery( return endpoints; } -StatusWith<ShardEndpoint> ChunkManagerTargeter::_targetShardKey(const BSONObj& shardKey, - const BSONObj& collation) const { +StatusWith<ShardEndpoint> ChunkManagerTargeter::_targetShardKey( + const BSONObj& shardKey, const BSONObj& collation, std::set<ChunkRange>* chunkRanges) const { try { auto chunk = _cm.findIntersectingChunk(shardKey, collation); + if (chunkRanges) { + chunkRanges->insert(chunk.getRange()); + } const auto placementVersion = _cm.getVersion(chunk.getShardId()); return ShardEndpoint( chunk.getShardId(), @@ -630,7 +643,8 @@ StatusWith<ShardEndpoint> ChunkManagerTargeter::_targetShardKey(const BSONObj& s MONGO_UNREACHABLE; } -std::vector<ShardEndpoint> ChunkManagerTargeter::targetAllShards(OperationContext* opCtx) const { +std::vector<ShardEndpoint> ChunkManagerTargeter::targetAllShards( + OperationContext* opCtx, std::set<ChunkRange>* chunkRanges) const { // This function is only called if doing a multi write that targets more than one shard. This // implies the collection is sharded, so we should always have a chunk manager. invariant(_cm.isSharded()); @@ -646,6 +660,10 @@ std::vector<ShardEndpoint> ChunkManagerTargeter::targetAllShards(OperationContex boost::none); } + if (chunkRanges) { + _cm.getAllChunkRanges(chunkRanges); + } + return endpoints; } diff --git a/src/mongo/s/chunk_manager_targeter.h b/src/mongo/s/chunk_manager_targeter.h index 0a92323ef4e..5480a47dbd3 100644 --- a/src/mongo/s/chunk_manager_targeter.h +++ b/src/mongo/s/chunk_manager_targeter.h @@ -30,6 +30,7 @@ #pragma once #include <map> +#include <set> #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobj_comparator_interface.h" @@ -79,15 +80,22 @@ public: const NamespaceString& getNS() const override; - ShardEndpoint targetInsert(OperationContext* opCtx, const BSONObj& doc) const override; + ShardEndpoint targetInsert(OperationContext* opCtx, + const BSONObj& doc, + std::set<ChunkRange>* chunkRange = nullptr) const override; - std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx, - const BatchItemRef& itemRef) const override; + std::vector<ShardEndpoint> targetUpdate( + OperationContext* opCtx, + const BatchItemRef& itemRef, + std::set<ChunkRange>* chunkRange = nullptr) const override; - std::vector<ShardEndpoint> targetDelete(OperationContext* opCtx, - const BatchItemRef& itemRef) const override; + std::vector<ShardEndpoint> targetDelete( + OperationContext* opCtx, + const BatchItemRef& itemRef, + std::set<ChunkRange>* chunkRange = nullptr) const override; - std::vector<ShardEndpoint> targetAllShards(OperationContext* opCtx) const override; + std::vector<ShardEndpoint> targetAllShards( + OperationContext* opCtx, std::set<ChunkRange>* chunkRanges = nullptr) const override; void noteCouldNotTarget() override; @@ -136,7 +144,8 @@ private: StatusWith<std::vector<ShardEndpoint>> _targetQuery( boost::intrusive_ptr<ExpressionContext> expCtx, const BSONObj& query, - const BSONObj& collation) const; + const BSONObj& collation, + std::set<ChunkRange>* chunkRanges) const; /** * Returns a ShardEndpoint for an exact shard key query. @@ -147,7 +156,8 @@ private: * If 'collation' is empty, we use the collection default collation for targeting. */ StatusWith<ShardEndpoint> _targetShardKey(const BSONObj& shardKey, - const BSONObj& collation) const; + const BSONObj& collation, + std::set<ChunkRange>* chunkRanges) const; // Full namespace of the collection for this targeter NamespaceString _nss; diff --git a/src/mongo/s/chunk_manager_targeter_test.cpp b/src/mongo/s/chunk_manager_targeter_test.cpp index 4a60e05e139..f03062479c6 100644 --- a/src/mongo/s/chunk_manager_targeter_test.cpp +++ b/src/mongo/s/chunk_manager_targeter_test.cpp @@ -35,6 +35,7 @@ #include "mongo/db/timeseries/timeseries_constants.h" #include "mongo/db/timeseries/timeseries_index_schema_conversion_functions.h" #include "mongo/db/timeseries/timeseries_options.h" +#include "mongo/logv2/log.h" #include "mongo/s/catalog_cache_test_fixture.h" #include "mongo/s/chunk_manager_targeter.h" #include "mongo/s/session_catalog_router.h" @@ -42,6 +43,8 @@ #include "mongo/s/write_ops/batched_command_request.h" #include "mongo/unittest/unittest.h" +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest + namespace mongo { namespace { @@ -74,10 +77,68 @@ public: chunkManager = makeChunkManager(kNss, ShardKeyPattern(shardKeyPattern), nullptr, false, splitPoints); return ChunkManagerTargeter(operationContext(), kNss); - } + }; boost::optional<ChunkManager> chunkManager; + +protected: + bool checkChunkRanges = false; + + void testTargetInsertWithRangePrefixHashedShardKeyCommon( + OperationContext* opCtx, const ChunkManagerTargeter& cmTargeter); + void testTargetInsertWithRangePrefixHashedShardKeyCustomChunkManager(); + void testTargetInsertWithRangePrefixHashedShardKey(); + void testTargetInsertsWithVaryingHashedPrefixAndConstantRangedSuffix(); + void testTargetInsertsWithConstantHashedPrefixAndVaryingRangedSuffix(); + void testTargetUpdateWithRangePrefixHashedShardKey(); + void testTargetUpdateWithHashedPrefixHashedShardKey(); + void testTargetDeleteWithRangePrefixHashedShardKey(); + void testTargetDeleteWithHashedPrefixHashedShardKey(); + void testTargetDeleteWithExactId(); }; +class ChunkManagerTargeterWithChunkRangesTest : public ChunkManagerTargeterTest { +public: + ChunkManagerTargeterWithChunkRangesTest() { + checkChunkRanges = true; + }; + +protected: + void testTargetInsertWithRangePrefixHashedShardKey() { + ChunkManagerTargeterTest::testTargetInsertWithRangePrefixHashedShardKey(); + }; + + void testTargetInsertWithRangePrefixHashedShardKeyCustomChunkManager() { + ChunkManagerTargeterTest::testTargetInsertWithRangePrefixHashedShardKeyCustomChunkManager(); + }; + + void testTargetInsertsWithVaryingHashedPrefixAndConstantRangedSuffix() { + ChunkManagerTargeterTest::testTargetInsertsWithVaryingHashedPrefixAndConstantRangedSuffix(); + }; + + void testTargetInsertsWithConstantHashedPrefixAndVaryingRangedSuffix() { + ChunkManagerTargeterTest::testTargetInsertsWithConstantHashedPrefixAndVaryingRangedSuffix(); + }; + + void testTargetUpdateWithRangePrefixHashedShardKey() { + ChunkManagerTargeterTest::testTargetUpdateWithRangePrefixHashedShardKey(); + }; + + void testTargetUpdateWithHashedPrefixHashedShardKey() { + ChunkManagerTargeterTest::testTargetUpdateWithHashedPrefixHashedShardKey(); + } + + void testTargetDeleteWithRangePrefixHashedShardKey() { + ChunkManagerTargeterTest::testTargetDeleteWithRangePrefixHashedShardKey(); + } + + void testTargetDeleteWithHashedPrefixHashedShardKey() { + ChunkManagerTargeterTest::testTargetDeleteWithHashedPrefixHashedShardKey(); + } + + void testTargetDeleteWithExactId() { + ChunkManagerTargeterTest::testTargetDeleteWithExactId(); + } +}; /** * This is the common part of test TargetInsertWithRangePrefixHashedShardKey and @@ -85,44 +146,67 @@ public: * Tests that the destination shard is the correct one as defined from the split points * when the ChunkManager was constructed. */ -void testTargetInsertWithRangePrefixHashedShardKeyCommon(OperationContext* opCtx, - const ChunkManagerTargeter& cmTargeter) { +void ChunkManagerTargeterTest::testTargetInsertWithRangePrefixHashedShardKeyCommon( + OperationContext* opCtx, const ChunkManagerTargeter& cmTargeter) { + std::set<ChunkRange> chunkRanges; + // Caller has created 5 chunks and 5 shards such that shardId '0' has chunk [MinKey, null), '1' // has chunk [null, -100), '2' has chunk [-100, 0), '3' has chunk ['0', 100) and '4' has chunk // [100, MaxKey). - auto res = cmTargeter.targetInsert(opCtx, fromjson("{a: {b: -111}, c: {d: '1'}}")); + auto res = cmTargeter.targetInsert( + opCtx, fromjson("{a: {b: -111}, c: {d: '1'}}"), checkChunkRanges ? &chunkRanges : nullptr); ASSERT_EQUALS(res.shardName, "1"); - res = cmTargeter.targetInsert(opCtx, fromjson("{a: {b: -10}}")); + res = cmTargeter.targetInsert( + opCtx, fromjson("{a: {b: -10}}"), checkChunkRanges ? &chunkRanges : nullptr); ASSERT_EQUALS(res.shardName, "2"); - res = cmTargeter.targetInsert(opCtx, fromjson("{a: {b: 0}, c: {d: 4}}")); + res = cmTargeter.targetInsert( + opCtx, fromjson("{a: {b: 0}, c: {d: 4}}"), checkChunkRanges ? &chunkRanges : nullptr); ASSERT_EQUALS(res.shardName, "3"); - res = cmTargeter.targetInsert(opCtx, fromjson("{a: {b: 1000}, c: null, d: {}}")); + res = cmTargeter.targetInsert(opCtx, + fromjson("{a: {b: 1000}, c: null, d: {}}"), + checkChunkRanges ? &chunkRanges : nullptr); ASSERT_EQUALS(res.shardName, "4"); // Missing field will be treated as null and will be targeted to the chunk which holds null, // which is shard '1'. - res = cmTargeter.targetInsert(opCtx, BSONObj()); + res = cmTargeter.targetInsert(opCtx, BSONObj(), checkChunkRanges ? &chunkRanges : nullptr); ASSERT_EQUALS(res.shardName, "1"); - res = cmTargeter.targetInsert(opCtx, BSON("a" << 10)); + res = + cmTargeter.targetInsert(opCtx, BSON("a" << 10), checkChunkRanges ? &chunkRanges : nullptr); ASSERT_EQUALS(res.shardName, "1"); // Arrays along shard key path are not allowed. - ASSERT_THROWS_CODE(cmTargeter.targetInsert(opCtx, fromjson("{a: [1,2]}")), + ASSERT_THROWS_CODE(cmTargeter.targetInsert(opCtx, + fromjson("{a: [1,2]}"), + checkChunkRanges ? &chunkRanges : nullptr), DBException, ErrorCodes::ShardKeyNotFound); - ASSERT_THROWS_CODE(cmTargeter.targetInsert(opCtx, fromjson("{c: [1,2]}")), + ASSERT_THROWS_CODE(cmTargeter.targetInsert(opCtx, + fromjson("{c: [1,2]}"), + checkChunkRanges ? &chunkRanges : nullptr), DBException, ErrorCodes::ShardKeyNotFound); - ASSERT_THROWS_CODE(cmTargeter.targetInsert(opCtx, fromjson("{c: {d: [1,2]}}")), + ASSERT_THROWS_CODE(cmTargeter.targetInsert(opCtx, + fromjson("{c: {d: [1,2]}}"), + checkChunkRanges ? &chunkRanges : nullptr), DBException, ErrorCodes::ShardKeyNotFound); } TEST_F(ChunkManagerTargeterTest, TargetInsertWithRangePrefixHashedShardKey) { + testTargetInsertWithRangePrefixHashedShardKey(); +} + +TEST_F(ChunkManagerTargeterWithChunkRangesTest, TargetInsertWithRangePrefixHashedShardKey) { + testTargetInsertWithRangePrefixHashedShardKey(); +} + +void ChunkManagerTargeterTest::testTargetInsertWithRangePrefixHashedShardKey() { + std::set<ChunkRange> chunkRanges; // Create 5 chunks and 5 shards such that shardId '0' has chunk [MinKey, null), '1' has chunk // [null, -100), '2' has chunk [-100, 0), '3' has chunk ['0', 100) and '4' has chunk // [100, MaxKey). @@ -186,6 +270,16 @@ ChunkManager makeCustomChunkManager(const ShardKeyPattern& shardKeyPattern, TEST_F(ChunkManagerTargeterTest, TargetInsertWithRangePrefixHashedShardKeyCustomChunkManager) { + testTargetInsertWithRangePrefixHashedShardKeyCustomChunkManager(); +} + +TEST_F(ChunkManagerTargeterWithChunkRangesTest, + TargetInsertWithRangePrefixHashedShardKeyCustomChunkManager) { + testTargetInsertWithRangePrefixHashedShardKeyCustomChunkManager(); +} + +void ChunkManagerTargeterTest::testTargetInsertWithRangePrefixHashedShardKeyCustomChunkManager() { + std::set<ChunkRange> chunkRanges; // Create 5 chunks and 5 shards such that shardId '0' has chunk [MinKey, null), '1' has chunk // [null, -100), '2' has chunk [-100, 0), '3' has chunk ['0', 100) and '4' has chunk // [100, MaxKey). @@ -213,6 +307,15 @@ TEST_F(ChunkManagerTargeterTest, TargetInsertWithRangePrefixHashedShardKeyCustom } TEST_F(ChunkManagerTargeterTest, TargetInsertsWithVaryingHashedPrefixAndConstantRangedSuffix) { + testTargetInsertsWithVaryingHashedPrefixAndConstantRangedSuffix(); +} + +TEST_F(ChunkManagerTargeterWithChunkRangesTest, + TargetInsertsWithVaryingHashedPrefixAndConstantRangedSuffix) { + testTargetInsertsWithVaryingHashedPrefixAndConstantRangedSuffix(); +} + +void ChunkManagerTargeterTest::testTargetInsertsWithVaryingHashedPrefixAndConstantRangedSuffix() { // Create 4 chunks and 4 shards such that shardId '0' has chunk [MinKey, -2^62), '1' has chunk // [-2^62, 0), '2' has chunk ['0', 2^62) and '3' has chunk [2^62, MaxKey). std::vector<BSONObj> splitPoints = { @@ -223,23 +326,47 @@ TEST_F(ChunkManagerTargeterTest, TargetInsertsWithVaryingHashedPrefixAndConstant splitPoints); for (int i = 0; i < 1000; i++) { + std::set<ChunkRange> chunkRanges; auto insertObj = BSON("a" << BSON("b" << i) << "c" << BSON("d" << 10)); - auto res = cmTargeter.targetInsert(operationContext(), insertObj); + auto res = cmTargeter.targetInsert( + operationContext(), insertObj, checkChunkRanges ? &chunkRanges : nullptr); // Verify that the given document is being routed based on hashed value of 'i'. - auto chunk = chunkManager->findIntersectingChunkWithSimpleCollation( - BSON("a.b" << BSONElementHasher::hash64(insertObj["a"]["b"], - BSONElementHasher::DEFAULT_HASH_SEED))); + auto hashValue = + BSONElementHasher::hash64(insertObj["a"]["b"], BSONElementHasher::DEFAULT_HASH_SEED); + auto chunk = + chunkManager->findIntersectingChunkWithSimpleCollation(BSON("a.b" << hashValue)); ASSERT_EQUALS(res.shardName, chunk.getShardId()); + if (checkChunkRanges) { + // Verify that the chunk range returned is correct and contains the hashValue. + ASSERT_EQUALS(chunkRanges.size(), 1); + ASSERT_BSONOBJ_EQ(chunkRanges.cbegin()->getMin(), chunk.getMin()); + ASSERT_BSONOBJ_EQ(chunkRanges.cbegin()->getMax(), chunk.getMax()); + ASSERT_BSONOBJ_LTE(chunk.getMin(), BSON("a.b" << hashValue)); + ASSERT_BSONOBJ_LT(BSON("a.b" << hashValue), chunk.getMax()); + } } // Arrays along shard key path are not allowed. - ASSERT_THROWS_CODE(cmTargeter.targetInsert(operationContext(), fromjson("{a: [1,2]}")), + std::set<ChunkRange> chunkRanges; + ASSERT_THROWS_CODE(cmTargeter.targetInsert(operationContext(), + fromjson("{a: [1,2]}"), + checkChunkRanges ? &chunkRanges : nullptr), DBException, ErrorCodes::ShardKeyNotFound); -} +} // namespace TEST_F(ChunkManagerTargeterTest, TargetInsertsWithConstantHashedPrefixAndVaryingRangedSuffix) { + testTargetInsertsWithConstantHashedPrefixAndVaryingRangedSuffix(); +} + +TEST_F(ChunkManagerTargeterWithChunkRangesTest, + TargetInsertsWithConstantHashedPrefixAndVaryingRangedSuffix) { + testTargetInsertsWithConstantHashedPrefixAndVaryingRangedSuffix(); +} + +void ChunkManagerTargeterTest::testTargetInsertsWithConstantHashedPrefixAndVaryingRangedSuffix() { + std::set<ChunkRange> chunkRanges; // For the purpose of this test, we will keep the hashed field constant to 0 so that we can // correctly test the targeting based on range field. auto hashedValueOfZero = BSONElementHasher::hash64(BSON("" << 0).firstElement(), @@ -259,28 +386,95 @@ TEST_F(ChunkManagerTargeterTest, TargetInsertsWithConstantHashedPrefixAndVarying << "c.d" << 1), splitPoints); - auto res = cmTargeter.targetInsert(operationContext(), fromjson("{a: {b: 0}, c: {d: -111}}")); + auto res = cmTargeter.targetInsert(operationContext(), + fromjson("{a: {b: 0}, c: {d: -111}}"), + checkChunkRanges ? &chunkRanges : nullptr); ASSERT_EQUALS(res.shardName, "1"); + if (this->checkChunkRanges) { + ASSERT_EQUALS(chunkRanges.size(), 1); + ASSERT_BSONOBJ_EQ(chunkRanges.cbegin()->getMin(), + BSON("a.b" << hashedValueOfZero << "c.d" << BSONNULL)); + ASSERT_BSONOBJ_EQ(chunkRanges.cbegin()->getMax(), + BSON("a.b" << hashedValueOfZero << "c.d" << -100)); + chunkRanges.clear(); + } - res = cmTargeter.targetInsert(operationContext(), fromjson("{a: {b: 0}, c: {d: -11}}")); + res = cmTargeter.targetInsert(operationContext(), + fromjson("{a: {b: 0}, c: {d: -11}}"), + this->checkChunkRanges ? &chunkRanges : nullptr); ASSERT_EQUALS(res.shardName, "2"); + if (this->checkChunkRanges) { + ASSERT_EQUALS(chunkRanges.size(), 1); + ASSERT_BSONOBJ_EQ(chunkRanges.cbegin()->getMin(), + BSON("a.b" << hashedValueOfZero << "c.d" << -100)); + ASSERT_BSONOBJ_EQ(chunkRanges.cbegin()->getMax(), + BSON("a.b" << hashedValueOfZero << "c.d" << 0)); + chunkRanges.clear(); + } - res = cmTargeter.targetInsert(operationContext(), fromjson("{a: {b: 0}, c: {d: 0}}")); + res = cmTargeter.targetInsert(operationContext(), + fromjson("{a: {b: 0}, c: {d: 0}}"), + this->checkChunkRanges ? &chunkRanges : nullptr); ASSERT_EQUALS(res.shardName, "3"); + if (this->checkChunkRanges) { + ASSERT_EQUALS(chunkRanges.size(), 1); + ASSERT_BSONOBJ_EQ(chunkRanges.cbegin()->getMin(), + BSON("a.b" << hashedValueOfZero << "c.d" << 0)); + ASSERT_BSONOBJ_EQ(chunkRanges.cbegin()->getMax(), + BSON("a.b" << hashedValueOfZero << "c.d" << 100)); + chunkRanges.clear(); + } - res = cmTargeter.targetInsert(operationContext(), fromjson("{a: {b: 0}, c: {d: 111}}")); + res = cmTargeter.targetInsert(operationContext(), + fromjson("{a: {b: 0}, c: {d: 111}}"), + this->checkChunkRanges ? &chunkRanges : nullptr); ASSERT_EQUALS(res.shardName, "4"); + if (this->checkChunkRanges) { + ASSERT_EQUALS(chunkRanges.size(), 1); + ASSERT_BSONOBJ_EQ(chunkRanges.cbegin()->getMin(), + BSON("a.b" << hashedValueOfZero << "c.d" << 100)); + ASSERT_BSONOBJ_EQ(chunkRanges.cbegin()->getMax(), BSON("a.b" << MAXKEY << "c.d" << MAXKEY)); + chunkRanges.clear(); + } // Missing field will be treated as null and will be targeted to the chunk which holds null, // which is shard '1'. - res = cmTargeter.targetInsert(operationContext(), fromjson("{a: {b: 0}}")); + res = cmTargeter.targetInsert( + operationContext(), fromjson("{a: {b: 0}}"), checkChunkRanges ? &chunkRanges : nullptr); ASSERT_EQUALS(res.shardName, "1"); + if (checkChunkRanges) { + ASSERT_EQUALS(chunkRanges.size(), 1); + ASSERT_BSONOBJ_EQ(chunkRanges.cbegin()->getMin(), + BSON("a.b" << hashedValueOfZero << "c.d" << BSONNULL)); + ASSERT_BSONOBJ_EQ(chunkRanges.cbegin()->getMax(), + BSON("a.b" << hashedValueOfZero << "c.d" << -100)); + chunkRanges.clear(); + } - res = cmTargeter.targetInsert(operationContext(), fromjson("{a: {b: 0}}, c: 5}")); + res = cmTargeter.targetInsert(operationContext(), + fromjson("{a: {b: 0}}, c: 5}"), + checkChunkRanges ? &chunkRanges : nullptr); ASSERT_EQUALS(res.shardName, "1"); + if (checkChunkRanges) { + ASSERT_EQUALS(chunkRanges.size(), 1); + ASSERT_BSONOBJ_EQ(chunkRanges.cbegin()->getMin(), + BSON("a.b" << hashedValueOfZero << "c.d" << BSONNULL)); + ASSERT_BSONOBJ_EQ(chunkRanges.cbegin()->getMax(), + BSON("a.b" << hashedValueOfZero << "c.d" << -100)); + chunkRanges.clear(); + } } TEST_F(ChunkManagerTargeterTest, TargetUpdateWithRangePrefixHashedShardKey) { + testTargetUpdateWithRangePrefixHashedShardKey(); +} + +TEST_F(ChunkManagerTargeterWithChunkRangesTest, TargetUpdateWithRangePrefixHashedShardKey) { + testTargetUpdateWithRangePrefixHashedShardKey(); +} + +void ChunkManagerTargeterTest::testTargetUpdateWithRangePrefixHashedShardKey() { + std::set<ChunkRange> chunkRanges; // Create 5 chunks and 5 shards such that shardId '0' has chunk [MinKey, null), '1' has chunk // [null, -100), '2' has chunk [-100, 0), '3' has chunk ['0', 100) and '4' has chunk // [100, MaxKey). @@ -291,68 +485,137 @@ TEST_F(ChunkManagerTargeterTest, TargetUpdateWithRangePrefixHashedShardKey) { splitPoints); // When update targets using replacement object. - auto request = - buildUpdate(kNss, fromjson("{'a.b': {$gt : 2}}"), fromjson("{a: {b: -1}}"), false); - auto res = cmTargeter.targetUpdate(operationContext(), BatchItemRef(&request, 0)); + auto request = buildUpdate( + kNss, fromjson("{'a.b': {$gt : 2}}"), fromjson("{a: {b: -1}}"), /*upsert=*/false); + auto res = cmTargeter.targetUpdate( + operationContext(), BatchItemRef(&request, 0), checkChunkRanges ? &chunkRanges : nullptr); ASSERT_EQUALS(res.size(), 1); ASSERT_EQUALS(res[0].shardName, "2"); + if (checkChunkRanges) { + ASSERT_EQUALS(chunkRanges.size(), 1); + ASSERT_BSONOBJ_EQ(chunkRanges.cbegin()->getMin(), BSON("a.b" << -100 << "c.d" << MINKEY)); + ASSERT_BSONOBJ_EQ(chunkRanges.cbegin()->getMax(), BSON("a.b" << 0 << "c.d" << MINKEY)); + chunkRanges.clear(); + } // When update targets using query. auto requestAndSet = buildUpdate(kNss, fromjson("{$and: [{'a.b': {$gte : 0}}, {'a.b': {$lt: 99}}]}}"), fromjson("{$set: {p : 1}}"), false); - res = cmTargeter.targetUpdate(operationContext(), BatchItemRef(&requestAndSet, 0)); + res = cmTargeter.targetUpdate(operationContext(), + BatchItemRef(&requestAndSet, 0), + checkChunkRanges ? &chunkRanges : nullptr); ASSERT_EQUALS(res.size(), 1); ASSERT_EQUALS(res[0].shardName, "3"); + if (checkChunkRanges) { + ASSERT_EQUALS(chunkRanges.size(), 1); + ASSERT_BSONOBJ_EQ(chunkRanges.cbegin()->getMin(), BSON("a.b" << 0 << "c.d" << MINKEY)); + ASSERT_BSONOBJ_EQ(chunkRanges.cbegin()->getMax(), BSON("a.b" << 100 << "c.d" << MINKEY)); + chunkRanges.clear(); + } auto requestLT = buildUpdate(kNss, fromjson("{'a.b': {$lt : -101}}"), fromjson("{a: {b: 111}}"), false); - res = cmTargeter.targetUpdate(operationContext(), BatchItemRef(&requestLT, 0)); + res = cmTargeter.targetUpdate( + operationContext(), BatchItemRef(&requestLT, 0), checkChunkRanges ? &chunkRanges : nullptr); ASSERT_EQUALS(res.size(), 1); ASSERT_EQUALS(res[0].shardName, "1"); + if (checkChunkRanges) { + ASSERT_EQUALS(chunkRanges.size(), 1); + ASSERT_BSONOBJ_EQ(chunkRanges.cbegin()->getMin(), + BSON("a.b" << BSONNULL << "c.d" << MINKEY)); + ASSERT_BSONOBJ_EQ(chunkRanges.cbegin()->getMax(), BSON("a.b" << -100 << "c.d" << MINKEY)); + chunkRanges.clear(); + } // For op-style updates, query on _id gets targeted to all shards. auto requestOpUpdate = buildUpdate(kNss, fromjson("{_id: 1}"), fromjson("{$set: {p: 111}}"), false); - res = cmTargeter.targetUpdate(operationContext(), BatchItemRef(&requestOpUpdate, 0)); + res = cmTargeter.targetUpdate(operationContext(), + BatchItemRef(&requestOpUpdate, 0), + checkChunkRanges ? &chunkRanges : nullptr); ASSERT_EQUALS(res.size(), 5); + if (checkChunkRanges) { + ASSERT_EQUALS(chunkRanges.size(), 5); + auto itRange = chunkRanges.cbegin(); + ASSERT_BSONOBJ_EQ(itRange->getMin(), BSON("a.b" << MINKEY << "c.d" << MINKEY)); + ASSERT_BSONOBJ_EQ(itRange->getMax(), BSON("a.b" << BSONNULL << "c.d" << MINKEY)); + ++itRange; + ASSERT_BSONOBJ_EQ(itRange->getMin(), BSON("a.b" << BSONNULL << "c.d" << MINKEY)); + ASSERT_BSONOBJ_EQ(itRange->getMax(), BSON("a.b" << -100 << "c.d" << MINKEY)); + ++itRange; + ASSERT_BSONOBJ_EQ(itRange->getMin(), BSON("a.b" << -100 << "c.d" << MINKEY)); + ASSERT_BSONOBJ_EQ(itRange->getMax(), BSON("a.b" << 0 << "c.d" << MINKEY)); + ++itRange; + ASSERT_BSONOBJ_EQ(itRange->getMin(), BSON("a.b" << 0 << "c.d" << MINKEY)); + ASSERT_BSONOBJ_EQ(itRange->getMax(), BSON("a.b" << 100 << "c.d" << MINKEY)); + ++itRange; + ASSERT_BSONOBJ_EQ(itRange->getMin(), BSON("a.b" << 100 << "c.d" << MINKEY)); + ASSERT_BSONOBJ_EQ(itRange->getMax(), BSON("a.b" << MAXKEY << "c.d" << MAXKEY)); + chunkRanges.clear(); + } // For replacement style updates, query on _id uses replacement doc to target. If the // replacement doc doesn't have shard key fields, then update should be routed to the shard // holding 'null' shard key documents. auto requestReplUpdate = buildUpdate(kNss, fromjson("{_id: 1}"), fromjson("{p: 111}}"), false); - res = cmTargeter.targetUpdate(operationContext(), BatchItemRef(&requestReplUpdate, 0)); + res = cmTargeter.targetUpdate(operationContext(), + BatchItemRef(&requestReplUpdate, 0), + checkChunkRanges ? &chunkRanges : nullptr); ASSERT_EQUALS(res.size(), 1); ASSERT_EQUALS(res[0].shardName, "1"); - + if (checkChunkRanges) { + ASSERT_EQUALS(chunkRanges.size(), 1); + ASSERT_BSONOBJ_EQ(chunkRanges.cbegin()->getMin(), + BSON("a.b" << BSONNULL << "c.d" << MINKEY)); + ASSERT_BSONOBJ_EQ(chunkRanges.cbegin()->getMax(), BSON("a.b" << -100 << "c.d" << MINKEY)); + chunkRanges.clear(); + } // Upsert requires full shard key in query, even if the query can target a single shard. auto requestFullKey = buildUpdate(kNss, fromjson("{'a.b': 100, 'c.d' : {$exists: false}}}"), fromjson("{a: {b: -111}}"), true); - ASSERT_THROWS_CODE( - cmTargeter.targetUpdate(operationContext(), BatchItemRef(&requestFullKey, 0)), - DBException, - ErrorCodes::ShardKeyNotFound); + ASSERT_THROWS_CODE(cmTargeter.targetUpdate(operationContext(), + BatchItemRef(&requestFullKey, 0), + checkChunkRanges ? &chunkRanges : nullptr), + DBException, + ErrorCodes::ShardKeyNotFound); // Upsert success case. auto requestSuccess = buildUpdate(kNss, fromjson("{'a.b': 100, 'c.d': 'val'}"), fromjson("{a: {b: -111}}"), true); - res = cmTargeter.targetUpdate(operationContext(), BatchItemRef(&requestSuccess, 0)); + res = cmTargeter.targetUpdate(operationContext(), + BatchItemRef(&requestSuccess, 0), + checkChunkRanges ? &chunkRanges : nullptr); ASSERT_EQUALS(res.size(), 1); ASSERT_EQUALS(res[0].shardName, "4"); + if (checkChunkRanges) { + ASSERT_EQUALS(chunkRanges.size(), 1); + ASSERT_BSONOBJ_EQ(chunkRanges.cbegin()->getMin(), BSON("a.b" << 100 << "c.d" << MINKEY)); + ASSERT_BSONOBJ_EQ(chunkRanges.cbegin()->getMax(), BSON("a.b" << MAXKEY << "c.d" << MAXKEY)); + chunkRanges.clear(); + } } TEST_F(ChunkManagerTargeterTest, TargetUpdateWithHashedPrefixHashedShardKey) { + testTargetUpdateWithHashedPrefixHashedShardKey(); +} + +TEST_F(ChunkManagerTargeterWithChunkRangesTest, TargetUpdateWithHashedPrefixHashedShardKey) { + testTargetUpdateWithHashedPrefixHashedShardKey(); +} + +void ChunkManagerTargeterTest::testTargetUpdateWithHashedPrefixHashedShardKey() { auto findChunk = [&](BSONElement elem) { return chunkManager->findIntersectingChunkWithSimpleCollation( BSON("a.b" << BSONElementHasher::hash64(elem, BSONElementHasher::DEFAULT_HASH_SEED))); }; - // Create 4 chunks and 4 shards such that shardId '0' has chunk [MinKey, -2^62), '1' has chunk - // [-2^62, 0), '2' has chunk ['0', 2^62) and '3' has chunk [2^62, MaxKey). + // Create 4 chunks and 4 shards such that shardId '0' has chunk [MinKey, -2^62), '1' has + // chunk [-2^62, 0), '2' has chunk ['0', 2^62) and '3' has chunk [2^62, MaxKey). std::vector<BSONObj> splitPoints = { BSON("a.b" << -(1LL << 62)), BSON("a.b" << 0LL), BSON("a.b" << (1LL << 62))}; auto cmTargeter = prepare(BSON("a.b" @@ -361,33 +624,101 @@ TEST_F(ChunkManagerTargeterTest, TargetUpdateWithHashedPrefixHashedShardKey) { splitPoints); for (int i = 0; i < 1000; i++) { + std::set<ChunkRange> chunkRanges; auto updateQueryObj = BSON("a" << BSON("b" << i) << "c" << BSON("d" << 10)); // Verify that the given document is being routed based on hashed value of 'i' in // 'updateQueryObj'. auto request = buildUpdate(kNss, updateQueryObj, fromjson("{$set: {p: 1}}"), false); - const auto res = cmTargeter.targetUpdate(operationContext(), BatchItemRef(&request, 0)); + const auto res = cmTargeter.targetUpdate(operationContext(), + BatchItemRef(&request, 0), + checkChunkRanges ? &chunkRanges : nullptr); ASSERT_EQUALS(res.size(), 1); - ASSERT_EQUALS(res[0].shardName, findChunk(updateQueryObj["a"]["b"]).getShardId()); + auto chunk = findChunk(updateQueryObj["a"]["b"]); + ASSERT_EQUALS(res[0].shardName, chunk.getShardId()); + if (checkChunkRanges) { + ASSERT_EQUALS(chunkRanges.size(), 1); + ASSERT_BSONOBJ_EQ(chunkRanges.cbegin()->getMin(), chunk.getMin()); + ASSERT_BSONOBJ_EQ(chunkRanges.cbegin()->getMax(), chunk.getMax()); + } } // Range queries on hashed field cannot be used for targeting. In this case, update will be // targeted based on update document. + std::set<ChunkRange> chunkRanges; const auto updateObj = fromjson("{a: {b: -1}}"); auto requestUpdate = buildUpdate(kNss, fromjson("{'a.b': {$gt : 101}}"), updateObj, false); - auto res = cmTargeter.targetUpdate(operationContext(), BatchItemRef(&requestUpdate, 0)); + auto res = cmTargeter.targetUpdate(operationContext(), + BatchItemRef(&requestUpdate, 0), + checkChunkRanges ? &chunkRanges : nullptr); ASSERT_EQUALS(res.size(), 1); - ASSERT_EQUALS(res[0].shardName, findChunk(updateObj["a"]["b"]).getShardId()); + auto chunk = findChunk(updateObj["a"]["b"]); + ASSERT_EQUALS(res[0].shardName, chunk.getShardId()); + if (checkChunkRanges) { + ASSERT_EQUALS(chunkRanges.size(), 1); + ASSERT_BSONOBJ_EQ(chunkRanges.cbegin()->getMin(), chunk.getMin()); + ASSERT_BSONOBJ_EQ(chunkRanges.cbegin()->getMax(), chunk.getMax()); + chunkRanges.clear(); + } auto requestErr = buildUpdate(kNss, fromjson("{'a.b': {$gt : 101}}"), fromjson("{$set: {p: 1}}"), false); - ASSERT_THROWS_CODE(cmTargeter.targetUpdate(operationContext(), BatchItemRef(&requestErr, 0)), + ASSERT_THROWS_CODE(cmTargeter.targetUpdate(operationContext(), + BatchItemRef(&requestErr, 0), + checkChunkRanges ? &chunkRanges : nullptr), DBException, ErrorCodes::InvalidOptions); } +TEST_F(ChunkManagerTargeterTest, TargetDeleteWithExactId) { + testTargetDeleteWithExactId(); +} + +TEST_F(ChunkManagerTargeterWithChunkRangesTest, TargetDeleteWithExactId) { + testTargetDeleteWithExactId(); +} + +void ChunkManagerTargeterTest::testTargetDeleteWithExactId() { + std::set<ChunkRange> chunkRanges; + std::vector<BSONObj> splitPoints = { + BSON("a.b" << BSONNULL), BSON("a.b" << -100), BSON("a.b" << 0), BSON("a.b" << 100)}; + auto cmTargeter = prepare(BSON("a.b" << 1), splitPoints); + + auto requestId = buildDelete(kNss, fromjson("{_id: 68755000}")); + auto res = cmTargeter.targetDelete( + operationContext(), BatchItemRef(&requestId, 0), checkChunkRanges ? &chunkRanges : nullptr); + ASSERT_EQUALS(res[0].shardName, "0"); + if (checkChunkRanges) { + ASSERT_EQUALS(chunkRanges.size(), 5); + auto itRange = chunkRanges.cbegin(); + ASSERT_BSONOBJ_EQ(itRange->getMin(), BSON("a.b" << MINKEY)); + ASSERT_BSONOBJ_EQ(itRange->getMax(), BSON("a.b" << BSONNULL)); + ++itRange; + ASSERT_BSONOBJ_EQ(itRange->getMin(), BSON("a.b" << BSONNULL)); + ASSERT_BSONOBJ_EQ(itRange->getMax(), BSON("a.b" << -100)); + ++itRange; + ASSERT_BSONOBJ_EQ(itRange->getMin(), BSON("a.b" << -100)); + ASSERT_BSONOBJ_EQ(itRange->getMax(), BSON("a.b" << 0)); + ++itRange; + ASSERT_BSONOBJ_EQ(itRange->getMin(), BSON("a.b" << 0)); + ASSERT_BSONOBJ_EQ(itRange->getMax(), BSON("a.b" << 100)); + ++itRange; + ASSERT_BSONOBJ_EQ(itRange->getMin(), BSON("a.b" << 100)); + ASSERT_BSONOBJ_EQ(itRange->getMax(), BSON("a.b" << MAXKEY)); + } +} + TEST_F(ChunkManagerTargeterTest, TargetDeleteWithRangePrefixHashedShardKey) { - // Create 5 chunks and 5 shards such that shardId '0' has chunk [MinKey, null), '1' has chunk - // [null, -100), '2' has chunk [-100, 0), '3' has chunk ['0', 100) and '4' has chunk + testTargetDeleteWithRangePrefixHashedShardKey(); +} + +TEST_F(ChunkManagerTargeterWithChunkRangesTest, TargetDeleteWithRangePrefixHashedShardKey) { + testTargetDeleteWithRangePrefixHashedShardKey(); +} + +void ChunkManagerTargeterTest::testTargetDeleteWithRangePrefixHashedShardKey() { + std::set<ChunkRange> chunkRanges; + // Create 5 chunks and 5 shards such that shardId '0' has chunk [MinKey, null), '1' has + // chunk [null, -100), '2' has chunk [-100, 0), '3' has chunk ['0', 100) and '4' has chunk // [100, MaxKey). std::vector<BSONObj> splitPoints = { BSON("a.b" << BSONNULL), BSON("a.b" << -100LL), BSON("a.b" << 0LL), BSON("a.b" << 100LL)}; @@ -397,43 +728,81 @@ TEST_F(ChunkManagerTargeterTest, TargetDeleteWithRangePrefixHashedShardKey) { // Cannot delete without full shardkey in the query. auto requestPartialKey = buildDelete(kNss, fromjson("{'a.b': {$gt : 2}}")); - ASSERT_THROWS_CODE( - cmTargeter.targetDelete(operationContext(), BatchItemRef(&requestPartialKey, 0)), - DBException, - ErrorCodes::ShardKeyNotFound); + ASSERT_THROWS_CODE(cmTargeter.targetDelete(operationContext(), + BatchItemRef(&requestPartialKey, 0), + checkChunkRanges ? &chunkRanges : nullptr), + DBException, + ErrorCodes::ShardKeyNotFound); auto requestPartialKey2 = buildDelete(kNss, fromjson("{'a.b': -101}")); - ASSERT_THROWS_CODE( - cmTargeter.targetDelete(operationContext(), BatchItemRef(&requestPartialKey2, 0)), - DBException, - ErrorCodes::ShardKeyNotFound); + ASSERT_THROWS_CODE(cmTargeter.targetDelete(operationContext(), + BatchItemRef(&requestPartialKey2, 0), + checkChunkRanges ? &chunkRanges : nullptr), + DBException, + ErrorCodes::ShardKeyNotFound); // Delete targeted correctly with full shard key in query. auto requestFullKey = buildDelete(kNss, fromjson("{'a.b': -101, 'c.d': 5}")); - auto res = cmTargeter.targetDelete(operationContext(), BatchItemRef(&requestFullKey, 0)); + auto res = cmTargeter.targetDelete(operationContext(), + BatchItemRef(&requestFullKey, 0), + checkChunkRanges ? &chunkRanges : nullptr); ASSERT_EQUALS(res.size(), 1); ASSERT_EQUALS(res[0].shardName, "1"); + if (checkChunkRanges) { + ASSERT_EQUALS(chunkRanges.size(), 1); + ASSERT_BSONOBJ_EQ(chunkRanges.cbegin()->getMin(), + BSON("a.b" << BSONNULL << "c.d" << MINKEY)); + ASSERT_BSONOBJ_EQ(chunkRanges.cbegin()->getMax(), BSON("a.b" << -100 << "c.d" << MINKEY)); + chunkRanges.clear(); + } // Query with MinKey value should go to chunk '0' because MinKey is smaller than BSONNULL. auto requestMinKey = buildDelete(kNss, BSONObjBuilder().appendMinKey("a.b").append("c.d", 4).obj()); - res = cmTargeter.targetDelete(operationContext(), BatchItemRef(&requestMinKey, 0)); + res = cmTargeter.targetDelete(operationContext(), + BatchItemRef(&requestMinKey, 0), + checkChunkRanges ? &chunkRanges : nullptr); ASSERT_EQUALS(res.size(), 1); ASSERT_EQUALS(res[0].shardName, "0"); + if (checkChunkRanges) { + ASSERT_EQUALS(chunkRanges.size(), 1); + ASSERT_BSONOBJ_EQ(chunkRanges.cbegin()->getMin(), BSON("a.b" << MINKEY << "c.d" << MINKEY)); + ASSERT_BSONOBJ_EQ(chunkRanges.cbegin()->getMax(), + BSON("a.b" << BSONNULL << "c.d" << MINKEY)); + chunkRanges.clear(); + } auto requestMinKey2 = buildDelete(kNss, fromjson("{'a.b': 0, 'c.d': 5}")); - res = cmTargeter.targetDelete(operationContext(), BatchItemRef(&requestMinKey2, 0)); + res = cmTargeter.targetDelete(operationContext(), + BatchItemRef(&requestMinKey2, 0), + checkChunkRanges ? &chunkRanges : nullptr); ASSERT_EQUALS(res.size(), 1); ASSERT_EQUALS(res[0].shardName, "3"); + if (checkChunkRanges) { + ASSERT_EQUALS(chunkRanges.size(), 1); + ASSERT_BSONOBJ_EQ(chunkRanges.cbegin()->getMin(), BSON("a.b" << 0 << "c.d" << MINKEY)); + ASSERT_BSONOBJ_EQ(chunkRanges.cbegin()->getMax(), BSON("a.b" << 100 << "c.d" << MINKEY)); + chunkRanges.clear(); + } } TEST_F(ChunkManagerTargeterTest, TargetDeleteWithHashedPrefixHashedShardKey) { + testTargetDeleteWithHashedPrefixHashedShardKey(); +} + +TEST_F(ChunkManagerTargeterWithChunkRangesTest, TargetDeleteWithHashedPrefixHashedShardKey) { + testTargetDeleteWithHashedPrefixHashedShardKey(); +} + +void ChunkManagerTargeterTest::testTargetDeleteWithHashedPrefixHashedShardKey() { + std::set<ChunkRange> chunkRanges; auto findChunk = [&](BSONElement elem) { return chunkManager->findIntersectingChunkWithSimpleCollation( BSON("a.b" << BSONElementHasher::hash64(elem, BSONElementHasher::DEFAULT_HASH_SEED))); }; - // Create 4 chunks and 4 shards such that shardId '0' has chunk [MinKey, -2^62), '1' has chunk + // Create 4 chunks and 4 shards such that shardId '0' has chunk [MinKey, -2^62), '1' has + // chunk // [-2^62, 0), '2' has chunk ['0', 2^62) and '3' has chunk [2^62, MaxKey). std::vector<BSONObj> splitPoints = { BSON("a.b" << -(1LL << 62)), BSON("a.b" << 0LL), BSON("a.b" << (1LL << 62))}; @@ -447,14 +816,25 @@ TEST_F(ChunkManagerTargeterTest, TargetDeleteWithHashedPrefixHashedShardKey) { // Verify that the given document is being routed based on hashed value of 'i' in // 'queryObj'. auto request = buildDelete(kNss, queryObj); - const auto res = cmTargeter.targetDelete(operationContext(), BatchItemRef(&request, 0)); + const auto res = cmTargeter.targetDelete(operationContext(), + BatchItemRef(&request, 0), + checkChunkRanges ? &chunkRanges : nullptr); ASSERT_EQUALS(res.size(), 1); - ASSERT_EQUALS(res[0].shardName, findChunk(queryObj["a"]["b"]).getShardId()); + auto chunk = findChunk(queryObj["a"]["b"]); + ASSERT_EQUALS(res[0].shardName, chunk.getShardId()); + if (checkChunkRanges) { + ASSERT_EQUALS(chunkRanges.size(), 1); + ASSERT_BSONOBJ_EQ(chunkRanges.cbegin()->getMin(), chunk.getMin()); + ASSERT_BSONOBJ_EQ(chunkRanges.cbegin()->getMax(), chunk.getMax()); + chunkRanges.clear(); + } } // Range queries on hashed field cannot be used for targeting. auto request = buildDelete(kNss, fromjson("{'a.b': {$gt : 101}}")); - ASSERT_THROWS_CODE(cmTargeter.targetDelete(operationContext(), BatchItemRef(&request, 0)), + ASSERT_THROWS_CODE(cmTargeter.targetDelete(operationContext(), + BatchItemRef(&request, 0), + checkChunkRanges ? &chunkRanges : nullptr), DBException, ErrorCodes::ShardKeyNotFound); } diff --git a/src/mongo/s/mock_ns_targeter.cpp b/src/mongo/s/mock_ns_targeter.cpp index 640f2974239..d9bd26a5256 100644 --- a/src/mongo/s/mock_ns_targeter.cpp +++ b/src/mongo/s/mock_ns_targeter.cpp @@ -66,7 +66,8 @@ MockNSTargeter::MockNSTargeter(const NamespaceString& nss, std::vector<MockRange ASSERT(!_mockRanges.empty()); } -std::vector<ShardEndpoint> MockNSTargeter::_targetQuery(const BSONObj& query) const { +std::vector<ShardEndpoint> MockNSTargeter::_targetQuery(const BSONObj& query, + std::set<ChunkRange>* chunkRanges) const { const ChunkRange queryRange(parseRange(query)); std::vector<ShardEndpoint> endpoints; @@ -74,6 +75,9 @@ std::vector<ShardEndpoint> MockNSTargeter::_targetQuery(const BSONObj& query) co for (const auto& range : _mockRanges) { if (queryRange.overlapWith(range.range)) { endpoints.push_back(range.endpoint); + if (chunkRanges) { + chunkRanges->emplace(range.range); + } } } diff --git a/src/mongo/s/mock_ns_targeter.h b/src/mongo/s/mock_ns_targeter.h index 4c3e2d796d8..21cdd3d13a9 100644 --- a/src/mongo/s/mock_ns_targeter.h +++ b/src/mongo/s/mock_ns_targeter.h @@ -62,33 +62,43 @@ public: } /** - * Returns a ShardEndpoint for the doc from the mock ranges + * Returns a ShardEndpoint for the doc from the mock ranges. If `chunkRanges` is not nullptr, + * also populates a set of ChunkRange for the chunks that are targeted. */ - ShardEndpoint targetInsert(OperationContext* opCtx, const BSONObj& doc) const override { - auto endpoints = _targetQuery(doc); + ShardEndpoint targetInsert(OperationContext* opCtx, + const BSONObj& doc, + std::set<ChunkRange>* chunkRanges = nullptr) const override { + auto endpoints = _targetQuery(doc, chunkRanges); ASSERT_EQ(1U, endpoints.size()); return endpoints.front(); } /** * Returns the first ShardEndpoint for the query from the mock ranges. Only can handle - * queries of the form { field : { $gte : <value>, $lt : <value> } }. + * queries of the form { field : { $gte : <value>, $lt : <value> } }. If `chunkRanges` is not + * nullptr, also populates a set of ChunkRange for the chunks that are targeted. */ - std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx, - const BatchItemRef& itemRef) const override { - return _targetQuery(itemRef.getUpdate().getQ()); + std::vector<ShardEndpoint> targetUpdate( + OperationContext* opCtx, + const BatchItemRef& itemRef, + std::set<ChunkRange>* chunkRanges = nullptr) const override { + return _targetQuery(itemRef.getUpdate().getQ(), chunkRanges); } /** * Returns the first ShardEndpoint for the query from the mock ranges. Only can handle - * queries of the form { field : { $gte : <value>, $lt : <value> } }. + * queries of the form { field : { $gte : <value>, $lt : <value> } }. If `chunkRanges` is not + * nullptr, also populates a set of ChunkRange for the chunks that are targeted. */ - std::vector<ShardEndpoint> targetDelete(OperationContext* opCtx, - const BatchItemRef& itemRef) const override { - return _targetQuery(itemRef.getDelete().getQ()); + std::vector<ShardEndpoint> targetDelete( + OperationContext* opCtx, + const BatchItemRef& itemRef, + std::set<ChunkRange>* chunkRanges = nullptr) const override { + return _targetQuery(itemRef.getDelete().getQ(), chunkRanges); } - std::vector<ShardEndpoint> targetAllShards(OperationContext* opCtx) const override { + std::vector<ShardEndpoint> targetAllShards( + OperationContext* opCtx, std::set<ChunkRange>* chunkRanges = nullptr) const override { std::vector<ShardEndpoint> endpoints; for (const auto& range : _mockRanges) { endpoints.push_back(range.endpoint); @@ -130,9 +140,11 @@ public: private: /** * Returns the first ShardEndpoint for the query from the mock ranges. Only handles queries of - * the form { field : { $gte : <value>, $lt : <value> } }. + * the form { field : { $gte : <value>, $lt : <value> } }. If chunkRanges is not nullptr, also + * populates set of ChunkRange for the chunks that are targeted. */ - std::vector<ShardEndpoint> _targetQuery(const BSONObj& query) const; + std::vector<ShardEndpoint> _targetQuery(const BSONObj& query, + std::set<ChunkRange>* chunkRanges) const; NamespaceString _nss; diff --git a/src/mongo/s/ns_targeter.h b/src/mongo/s/ns_targeter.h index 4db2397d826..d68dd6f4616 100644 --- a/src/mongo/s/ns_targeter.h +++ b/src/mongo/s/ns_targeter.h @@ -75,27 +75,40 @@ public: /** * Returns a ShardEndpoint for a single document write or throws ShardKeyNotFound if 'doc' is * malformed with respect to the shard key pattern of the collection. + * If output parameter chunkRanges is not nullptr, also returns the set of ChunkRange that + * the query is targeting; otherwise no other processing on chunk ranges is performed. */ - virtual ShardEndpoint targetInsert(OperationContext* opCtx, const BSONObj& doc) const = 0; + virtual ShardEndpoint targetInsert(OperationContext* opCtx, + const BSONObj& doc, + std::set<ChunkRange>* chunkRanges = nullptr) const = 0; /** * Returns a vector of ShardEndpoints for a potentially multi-shard update or throws * ShardKeyNotFound if 'updateOp' misses a shard key, but the type of update requires it. + * If output parameter chunkRanges is not nullptr, also returns the set of ChunkRange that + * the query is targeting; otherwise no other processing on chunk ranges is performed. */ - virtual std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx, - const BatchItemRef& itemRef) const = 0; + virtual std::vector<ShardEndpoint> targetUpdate( + OperationContext* opCtx, + const BatchItemRef& itemRef, + std::set<ChunkRange>* chunkRanges = nullptr) const = 0; /** * Returns a vector of ShardEndpoints for a potentially multi-shard delete or throws * ShardKeyNotFound if 'deleteOp' misses a shard key, but the type of delete requires it. + * If output parameter chunkRanges is not nullptr, also returns the set of ChunkRange that + * the query is targeting; otherwise no other processing on chunk ranges is performed. */ - virtual std::vector<ShardEndpoint> targetDelete(OperationContext* opCtx, - const BatchItemRef& itemRef) const = 0; + virtual std::vector<ShardEndpoint> targetDelete( + OperationContext* opCtx, + const BatchItemRef& itemRef, + std::set<ChunkRange>* chunkRanges = nullptr) const = 0; /** * Returns a vector of ShardEndpoints for all shards. */ - virtual std::vector<ShardEndpoint> targetAllShards(OperationContext* opCtx) const = 0; + virtual std::vector<ShardEndpoint> targetAllShards( + OperationContext* opCtx, std::set<ChunkRange>* chunkRanges = nullptr) const = 0; /** * Informs the targeter that a targeting failure occurred during one of the last targeting diff --git a/src/mongo/s/write_ops/batch_write_exec_test.cpp b/src/mongo/s/write_ops/batch_write_exec_test.cpp index 9c01ab18b4a..86e087c3902 100644 --- a/src/mongo/s/write_ops/batch_write_exec_test.cpp +++ b/src/mongo/s/write_ops/batch_write_exec_test.cpp @@ -406,8 +406,11 @@ TEST_F(BatchWriteExecTest, SingleUpdateTargetsShardWithLet) { public: using MockNSTargeter::MockNSTargeter; - std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx, - const BatchItemRef& itemRef) const override { + std::vector<ShardEndpoint> targetUpdate( + OperationContext* opCtx, + const BatchItemRef& itemRef, + std::set<ChunkRange>* chunkRange = nullptr) const override { + invariant(chunkRange == nullptr); return std::vector{ ShardEndpoint(kShardName2, ShardVersion(ChunkVersion({epoch, timestamp}, {101, 200}), @@ -499,8 +502,11 @@ TEST_F(BatchWriteExecTest, SingleDeleteTargetsShardWithLet) { using MockNSTargeter::MockNSTargeter; protected: - std::vector<ShardEndpoint> targetDelete(OperationContext* opCtx, - const BatchItemRef& itemRef) const override { + std::vector<ShardEndpoint> targetDelete( + OperationContext* opCtx, + const BatchItemRef& itemRef, + std::set<ChunkRange>* chunkRange = nullptr) const override { + invariant(chunkRange == nullptr); return std::vector{ShardEndpoint( kShardName2, ShardVersion(ChunkVersion({epoch, Timestamp(1, 1)}, {101, 200}), @@ -700,8 +706,11 @@ TEST_F(BatchWriteExecTest, StaleShardVersionReturnedFromBatchWithSingleMultiWrit public: using MockNSTargeter::MockNSTargeter; - std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx, - const BatchItemRef& itemRef) const override { + std::vector<ShardEndpoint> targetUpdate( + OperationContext* opCtx, + const BatchItemRef& itemRef, + std::set<ChunkRange>* chunkRange = nullptr) const override { + invariant(chunkRange == nullptr); return std::vector{ ShardEndpoint(kShardName1, ShardVersion(ChunkVersion({epoch, timestamp}, {100, 200}), @@ -810,8 +819,11 @@ TEST_F(BatchWriteExecTest, public: using MockNSTargeter::MockNSTargeter; - std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx, - const BatchItemRef& itemRef) const override { + std::vector<ShardEndpoint> targetUpdate( + OperationContext* opCtx, + const BatchItemRef& itemRef, + std::set<ChunkRange>* chunkRange = nullptr) const override { + invariant(chunkRange == nullptr); return std::vector{ ShardEndpoint(kShardName1, ShardVersion(ChunkVersion({epoch, timestamp}, {100, 200}), @@ -928,8 +940,11 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromMultiWriteWithShard1Firs) { public: using MockNSTargeter::MockNSTargeter; - std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx, - const BatchItemRef& itemRef) const override { + std::vector<ShardEndpoint> targetUpdate( + OperationContext* opCtx, + const BatchItemRef& itemRef, + std::set<ChunkRange>* chunkRange = nullptr) const override { + invariant(chunkRange == nullptr); return std::vector{ ShardEndpoint(kShardName1, ShardVersion(ChunkVersion({epoch, timestamp}, {100, 200}), @@ -1056,8 +1071,11 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromMultiWriteWithShard1FirstOK public: using MockNSTargeter::MockNSTargeter; - std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx, - const BatchItemRef& itemRef) const override { + std::vector<ShardEndpoint> targetUpdate( + OperationContext* opCtx, + const BatchItemRef& itemRef, + std::set<ChunkRange>* chunkRange = nullptr) const override { + invariant(chunkRange == nullptr); return std::vector{ ShardEndpoint(kShardName1, ShardVersion(ChunkVersion({epoch, timestamp}, {100, 200}), @@ -1179,8 +1197,11 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromWriteWithShard1SSVShard2OK) public: using MockNSTargeter::MockNSTargeter; - std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx, - const BatchItemRef& itemRef) const override { + std::vector<ShardEndpoint> targetUpdate( + OperationContext* opCtx, + const BatchItemRef& itemRef, + std::set<ChunkRange>* chunkRange = nullptr) const override { + invariant(chunkRange == nullptr); if (targetAll) { return std::vector{ ShardEndpoint(kShardName1, @@ -1956,8 +1977,10 @@ TEST_F(BatchWriteExecTargeterErrorTest, TargetedFailedAndErrorResponse) { public: using MockNSTargeter::MockNSTargeter; - std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx, - const BatchItemRef& itemRef) const override { + std::vector<ShardEndpoint> targetUpdate( + OperationContext* opCtx, + const BatchItemRef& itemRef, + std::set<ChunkRange>* chunkRanges = nullptr) const override { return std::vector{ ShardEndpoint(kShardName1, ShardVersion(ChunkVersion({epoch, timestamp}, {100, 200}), @@ -2102,8 +2125,11 @@ TEST_F(BatchWriteExecTransactionTargeterErrorTest, TargetedFailedAndErrorRespons public: using MockNSTargeter::MockNSTargeter; - std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx, - const BatchItemRef& itemRef) const override { + std::vector<ShardEndpoint> targetUpdate( + OperationContext* opCtx, + const BatchItemRef& itemRef, + std::set<ChunkRange>* chunkRange = nullptr) const override { + invariant(chunkRange == nullptr); return std::vector{ ShardEndpoint(kShardName1, ShardVersion(ChunkVersion({epoch, timestamp}, {100, 200}), @@ -2256,8 +2282,11 @@ TEST_F(BatchWriteExecTransactionMultiShardTest, TargetedSucceededAndErrorRespons public: using MockNSTargeter::MockNSTargeter; - std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx, - const BatchItemRef& itemRef) const override { + std::vector<ShardEndpoint> targetUpdate( + OperationContext* opCtx, + const BatchItemRef& itemRef, + std::set<ChunkRange>* chunkRange = nullptr) const override { + invariant(chunkRange == nullptr); return std::vector{ ShardEndpoint(kShardName1, ShardVersion(ChunkVersion({epoch, timestamp}, {100, 200}), |