diff options
author | Kaloian Manassiev <kaloian.manassiev@mongodb.com> | 2016-05-02 22:25:26 -0400 |
---|---|---|
committer | Kaloian Manassiev <kaloian.manassiev@mongodb.com> | 2016-05-03 14:55:49 -0400 |
commit | 6b87a77ced925398b7485abaf0fa36bafcccc52a (patch) | |
tree | 422405627d91bef8df441c0d1548f325df44e738 /src/mongo/s | |
parent | 67fbb57210c9f22b03aacc777364cf8ed8cef5d2 (diff) | |
download | mongo-6b87a77ced925398b7485abaf0fa36bafcccc52a.tar.gz |
SERVER-23609 Make top chunk split use the balancer to move chunks
Moves all direct access to balancer structures out of the top chunk split
optimization code and changes it to an explicit call to the balancer.
Diffstat (limited to 'src/mongo/s')
-rw-r--r-- | src/mongo/s/SConscript | 6 | ||||
-rw-r--r-- | src/mongo/s/balancer/balancer.cpp (renamed from src/mongo/s/balance.cpp) | 37 | ||||
-rw-r--r-- | src/mongo/s/balancer/balancer.h (renamed from src/mongo/s/balance.h) | 11 | ||||
-rw-r--r-- | src/mongo/s/balancer/balancer_chunk_selection_policy.h | 15 | ||||
-rw-r--r-- | src/mongo/s/balancer/balancer_chunk_selection_policy_impl.cpp | 36 | ||||
-rw-r--r-- | src/mongo/s/balancer/balancer_chunk_selection_policy_impl.h | 3 | ||||
-rw-r--r-- | src/mongo/s/balancer/balancer_policy.cpp (renamed from src/mongo/s/balancer_policy.cpp) | 27 | ||||
-rw-r--r-- | src/mongo/s/balancer/balancer_policy.h (renamed from src/mongo/s/balancer_policy.h) | 7 | ||||
-rw-r--r-- | src/mongo/s/balancer/balancer_policy_tests.cpp (renamed from src/mongo/s/balancer_policy_tests.cpp) | 2 | ||||
-rw-r--r-- | src/mongo/s/chunk.cpp | 108 | ||||
-rw-r--r-- | src/mongo/s/server.cpp | 2 | ||||
-rw-r--r-- | src/mongo/s/shard_util.cpp | 71 |
12 files changed, 153 insertions, 172 deletions
diff --git a/src/mongo/s/SConscript b/src/mongo/s/SConscript index 9d330c70723..91e6098d15a 100644 --- a/src/mongo/s/SConscript +++ b/src/mongo/s/SConscript @@ -170,11 +170,11 @@ env.Library( target='coreshard', source=[ # This is only here temporarily for auto-split logic in chunk.cpp. - 'balance.cpp', - 'balancer_policy.cpp', + 'balancer/balancer.cpp', 'balancer/balancer_chunk_selection_policy.cpp', 'balancer/balancer_chunk_selection_policy_impl.cpp', 'balancer/balancer_configuration.cpp', + 'balancer/balancer_policy.cpp', 'balancer/cluster_statistics.cpp', 'balancer/cluster_statistics_impl.cpp', 'catalog/catalog_cache.cpp', @@ -241,8 +241,8 @@ env.Library( env.CppUnitTest( target='mongoscore_test', source=[ - 'balancer_policy_tests.cpp', 'balancer/balancer_configuration_test.cpp', + 'balancer/balancer_policy_tests.cpp', 'balancer/cluster_statistics_test.cpp', 'shard_key_pattern_test.cpp', ], diff --git a/src/mongo/s/balance.cpp b/src/mongo/s/balancer/balancer.cpp index afbfac59728..e71c0ac0e21 100644 --- a/src/mongo/s/balance.cpp +++ b/src/mongo/s/balancer/balancer.cpp @@ -30,7 +30,7 @@ #include "mongo/platform/basic.h" -#include "mongo/s/balance.h" +#include "mongo/s/balancer/balancer.h" #include "mongo/base/status_with.h" #include "mongo/client/read_preference.h" @@ -165,6 +165,35 @@ Balancer* Balancer::get(OperationContext* operationContext) { return &getBalancer(operationContext->getServiceContext()); } +Status Balancer::rebalanceSingleChunk(OperationContext* txn, const ChunkType& chunk) { + auto migrateStatus = _chunkSelectionPolicy->selectSpecificChunkToMove(txn, chunk); + if (!migrateStatus.isOK()) { + return migrateStatus.getStatus(); + } + + auto migrateInfo = std::move(migrateStatus.getValue()); + if (!migrateInfo) { + LOG(1) << "Unable to find more appropriate location for chunk " << chunk; + return Status::OK(); + } + + auto balancerConfig = Grid::get(txn)->getBalancerConfiguration(); + Status refreshStatus = balancerConfig->refreshAndCheck(txn); + if (!refreshStatus.isOK()) { + return refreshStatus; + } + + try { + _moveChunks(txn, + {*migrateInfo}, + balancerConfig->getSecondaryThrottle(), + balancerConfig->waitForDelete()); + return Status::OK(); + } catch (const DBException& e) { + return e.toStatus(); + } +} + void Balancer::run() { Client::initThread("Balancer"); @@ -439,10 +468,8 @@ int Balancer::_moveChunks(OperationContext* txn, int movedCount = 0; for (const auto& migrateInfo : candidateChunks) { - // If the balancer was disabled since we started this round, don't start new chunks - // moves. - if (!Grid::get(txn)->getBalancerConfiguration()->isBalancerActive() || - MONGO_FAIL_POINT(skipBalanceRound)) { + // If the balancer was disabled since we started this round, don't start new chunk moves + if (!Grid::get(txn)->getBalancerConfiguration()->isBalancerActive()) { LOG(1) << "Stopping balancing round early as balancing was disabled"; return movedCount; } diff --git a/src/mongo/s/balance.h b/src/mongo/s/balancer/balancer.h index 41c0349716a..63159beceee 100644 --- a/src/mongo/s/balance.h +++ b/src/mongo/s/balancer/balancer.h @@ -37,6 +37,7 @@ namespace mongo { +class ChunkType; class ClusterStatistics; class MigrationSecondaryThrottleOptions; class OperationContext; @@ -63,9 +64,13 @@ public: */ static Balancer* get(OperationContext* operationContext); - ClusterStatistics* getClusterStatistics() const { - return _clusterStats.get(); - } + /** + * Blocking call, which requests the balancer to move a single chunk to a more appropriate + * shard, in accordance with the active balancer policy. It is not guaranteed that the chunk + * will actually move because it may already be at the best shard. An error will be returned if + * the attempt to find a better shard or the actual migration fail for any reason. + */ + Status rebalanceSingleChunk(OperationContext* txn, const ChunkType& chunk); private: // BackgroundJob methods implementation diff --git a/src/mongo/s/balancer/balancer_chunk_selection_policy.h b/src/mongo/s/balancer/balancer_chunk_selection_policy.h index de0761009c6..7bdf049c331 100644 --- a/src/mongo/s/balancer/balancer_chunk_selection_policy.h +++ b/src/mongo/s/balancer/balancer_chunk_selection_policy.h @@ -28,12 +28,16 @@ #pragma once +#include <boost/optional.hpp> + #include "mongo/base/disallow_copying.h" -#include "mongo/s/balancer_policy.h" +#include "mongo/s/balancer/balancer_policy.h" +#include "mongo/s/catalog/type_chunk.h" #include "mongo/s/chunk_version.h" namespace mongo { +class ChunkType; class NamespaceString; class OperationContext; template <typename T> @@ -90,6 +94,15 @@ public: virtual StatusWith<MigrateInfoVector> selectChunksToMove(OperationContext* txn, bool aggressiveBalanceHint) = 0; + /** + * Requests a single chunk to be relocated to a different shard, if possible. If some error + * occurs while trying to determine the best location for the chunk, a failed status is + * returned. If the chunk is already at the best shard that it can be, returns boost::none. + * Otherwise returns migration information for where the chunk should be moved. + */ + virtual StatusWith<boost::optional<MigrateInfo>> selectSpecificChunkToMove( + OperationContext* txn, const ChunkType& chunk) = 0; + protected: BalancerChunkSelectionPolicy(); }; diff --git a/src/mongo/s/balancer/balancer_chunk_selection_policy_impl.cpp b/src/mongo/s/balancer/balancer_chunk_selection_policy_impl.cpp index 4e754eed70d..9993bd0d6f8 100644 --- a/src/mongo/s/balancer/balancer_chunk_selection_policy_impl.cpp +++ b/src/mongo/s/balancer/balancer_chunk_selection_policy_impl.cpp @@ -181,6 +181,42 @@ StatusWith<MigrateInfoVector> BalancerChunkSelectionPolicyImpl::selectChunksToMo return candidateChunks; } +StatusWith<boost::optional<MigrateInfo>> +BalancerChunkSelectionPolicyImpl::selectSpecificChunkToMove(OperationContext* txn, + const ChunkType& chunk) { + const NamespaceString nss(chunk.getNS()); + + auto scopedCMStatus = ScopedChunkManager::getExisting(txn, nss); + if (!scopedCMStatus.isOK()) { + return scopedCMStatus.getStatus(); + } + + auto scopedCM = std::move(scopedCMStatus.getValue()); + ChunkManager* const cm = scopedCM.cm(); + + auto tagForChunkStatus = + Grid::get(txn)->catalogManager(txn)->getTagForChunk(txn, nss.ns(), chunk); + if (!tagForChunkStatus.isOK()) { + return tagForChunkStatus.getStatus(); + } + + auto shardStatsStatus = _clusterStats->getStats(txn); + if (!shardStatsStatus.isOK()) { + return shardStatsStatus.getStatus(); + } + + auto collInfo = createCollectionDistributionInfo(shardStatsStatus.getValue(), cm); + ShardToChunksMap shardToChunksMap = std::move(std::get<0>(collInfo)); + + DistributionStatus distStatus(shardStatsStatus.getValue(), shardToChunksMap); + const ShardId newShardId(distStatus.getBestReceieverShard(tagForChunkStatus.getValue())); + if (newShardId.empty() || newShardId == chunk.getShard()) { + return boost::optional<MigrateInfo>(); + } + + return boost::optional<MigrateInfo>{MigrateInfo(nss.ns(), newShardId, chunk)}; +} + StatusWith<SplitInfoVector> BalancerChunkSelectionPolicyImpl::_getSplitCandidatesForCollection( OperationContext* txn, const NamespaceString& nss) { auto scopedCMStatus = ScopedChunkManager::getExisting(txn, nss); diff --git a/src/mongo/s/balancer/balancer_chunk_selection_policy_impl.h b/src/mongo/s/balancer/balancer_chunk_selection_policy_impl.h index e26c333975d..ab6230243d9 100644 --- a/src/mongo/s/balancer/balancer_chunk_selection_policy_impl.h +++ b/src/mongo/s/balancer/balancer_chunk_selection_policy_impl.h @@ -44,6 +44,9 @@ public: StatusWith<MigrateInfoVector> selectChunksToMove(OperationContext* txn, bool aggressiveBalanceHint) override; + StatusWith<boost::optional<MigrateInfo>> selectSpecificChunkToMove( + OperationContext* txn, const ChunkType& chunk) override; + private: /** * Synchronous method, which iterates the collection's chunks and uses the tags information to diff --git a/src/mongo/s/balancer_policy.cpp b/src/mongo/s/balancer/balancer_policy.cpp index 2d5346ea0fa..17cd2136979 100644 --- a/src/mongo/s/balancer_policy.cpp +++ b/src/mongo/s/balancer/balancer_policy.cpp @@ -30,7 +30,7 @@ #include "mongo/platform/basic.h" -#include "mongo/s/balancer_policy.h" +#include "mongo/s/balancer/balancer_policy.h" #include <algorithm> @@ -227,31 +227,6 @@ void DistributionStatus::dump() const { } } -void DistributionStatus::populateShardToChunksMap(const ShardStatisticsVector& allShards, - const ChunkManager& chunkMgr, - ShardToChunksMap* shardToChunksMap) { - // Makes sure there is an entry in shardToChunksMap for every shard. - for (const auto& stat : allShards) { - (*shardToChunksMap)[stat.shardId]; - } - - const ChunkMap& chunkMap = chunkMgr.getChunkMap(); - for (ChunkMap::const_iterator it = chunkMap.begin(); it != chunkMap.end(); ++it) { - const ChunkPtr chunkPtr = it->second; - - ChunkType chunk; - chunk.setNS(chunkMgr.getns()); - chunk.setMin(chunkPtr->getMin().getOwned()); - chunk.setMax(chunkPtr->getMax().getOwned()); - chunk.setJumbo(chunkPtr->isJumbo()); // TODO: is this reliable? - - const string shardName(chunkPtr->getShardId()); - chunk.setShard(shardName); - - (*shardToChunksMap)[shardName].push_back(chunk); - } -} - MigrateInfo* BalancerPolicy::balance(const string& ns, const DistributionStatus& distribution, int balancedLastTime) { diff --git a/src/mongo/s/balancer_policy.h b/src/mongo/s/balancer/balancer_policy.h index 241b0cc04a4..45fb6409294 100644 --- a/src/mongo/s/balancer_policy.h +++ b/src/mongo/s/balancer/balancer_policy.h @@ -129,13 +129,6 @@ public: return _shardInfo; } - /** - * Note: jumbo and versions are not set. - */ - static void populateShardToChunksMap(const ShardStatisticsVector& allShards, - const ChunkManager& chunkMgr, - ShardToChunksMap* shardToChunksMap); - private: const ShardStatisticsVector _shardInfo; const ShardToChunksMap& _shardChunks; diff --git a/src/mongo/s/balancer_policy_tests.cpp b/src/mongo/s/balancer/balancer_policy_tests.cpp index 347b467fc84..b0ba5c29ce3 100644 --- a/src/mongo/s/balancer_policy_tests.cpp +++ b/src/mongo/s/balancer/balancer_policy_tests.cpp @@ -31,7 +31,7 @@ #include "mongo/platform/basic.h" #include "mongo/platform/random.h" -#include "mongo/s/balancer_policy.h" +#include "mongo/s/balancer/balancer_policy.h" #include "mongo/s/catalog/type_chunk.h" #include "mongo/unittest/unittest.h" #include "mongo/util/log.h" diff --git a/src/mongo/s/chunk.cpp b/src/mongo/s/chunk.cpp index 9dc814af604..44ebb40b5b3 100644 --- a/src/mongo/s/chunk.cpp +++ b/src/mongo/s/chunk.cpp @@ -34,25 +34,18 @@ #include "mongo/client/connpool.h" #include "mongo/client/read_preference.h" -#include "mongo/client/remote_command_targeter.h" #include "mongo/db/commands.h" #include "mongo/db/lasterror.h" -#include "mongo/db/query/query_solution.h" -#include "mongo/db/write_concern.h" -#include "mongo/db/write_concern_options.h" #include "mongo/platform/random.h" #include "mongo/rpc/get_status_from_command_result.h" -#include "mongo/s/balance.h" -#include "mongo/s/balancer_policy.h" +#include "mongo/s/balancer/balancer.h" #include "mongo/s/balancer/balancer_configuration.h" -#include "mongo/s/balancer/cluster_statistics.h" #include "mongo/s/catalog/catalog_manager.h" #include "mongo/s/catalog/type_chunk.h" #include "mongo/s/catalog/type_collection.h" #include "mongo/s/chunk_manager.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" -#include "mongo/s/migration_secondary_throttle_options.h" #include "mongo/s/move_chunk_request.h" #include "mongo/s/shard_util.h" #include "mongo/util/log.h" @@ -72,93 +65,6 @@ namespace { const uint64_t kTooManySplitPoints = 4; -/** - * Attempts to move the given chunk to another shard. - * - * Returns true if the chunk was actually moved. - */ -bool tryMoveToOtherShard(OperationContext* txn, - const ChunkManager* manager, - const ChunkType& chunk) { - auto clusterStatsStatus(Balancer::get(txn)->getClusterStatistics()->getStats(txn)); - if (!clusterStatsStatus.isOK()) { - warning() << "Could not get cluster statistics " - << causedBy(clusterStatsStatus.getStatus()); - return false; - } - - const auto clusterStats(std::move(clusterStatsStatus.getValue())); - - if (clusterStats.size() < 2) { - LOG(0) << "no need to move top chunk since there's only 1 shard"; - return false; - } - - // Reload sharding metadata before starting migration. Only reload the differences though, - // because the entire chunk manager was reloaded during the call to split, which immediately - // precedes this move logic - shared_ptr<ChunkManager> chunkMgr = manager->reload(txn, false); - - map<string, vector<ChunkType>> shardToChunkMap; - DistributionStatus::populateShardToChunksMap(clusterStats, *chunkMgr, &shardToChunkMap); - - StatusWith<string> tagStatus = - grid.catalogManager(txn)->getTagForChunk(txn, manager->getns(), chunk); - if (!tagStatus.isOK()) { - warning() << "Not auto-moving chunk because of an error encountered while " - << "checking tag for chunk: " << tagStatus.getStatus(); - return false; - } - - DistributionStatus chunkDistribution(clusterStats, shardToChunkMap); - const string newLocation(chunkDistribution.getBestReceieverShard(tagStatus.getValue())); - - if (newLocation.empty()) { - LOG(1) << "recently split chunk: " << chunk << " but no suitable shard to move to"; - return false; - } - - if (chunk.getShard() == newLocation) { - // if this is the best shard, then we shouldn't do anything. - LOG(1) << "recently split chunk: " << chunk << " already in the best shard"; - return false; - } - - shared_ptr<Chunk> toMove = chunkMgr->findIntersectingChunk(txn, chunk.getMin()); - - if (!(toMove->getMin() == chunk.getMin() && toMove->getMax() == chunk.getMax())) { - LOG(1) << "recently split chunk: " << chunk << " modified before we could migrate " - << toMove->toString(); - return false; - } - - log() << "moving chunk (auto): " << toMove->toString() << " to: " << newLocation; - - shared_ptr<Shard> newShard = grid.shardRegistry()->getShard(txn, newLocation); - if (!newShard) { - warning() << "Newly selected shard " << newLocation << " could not be found."; - return false; - } - - BSONObj res; - WriteConcernOptions noThrottle; - if (!toMove->moveAndCommit( - txn, - newShard->getId(), - Grid::get(txn)->getBalancerConfiguration()->getMaxChunkSizeBytes(), - MigrationSecondaryThrottleOptions::create(MigrationSecondaryThrottleOptions::kOff), - false, /* waitForDelete - small chunk, no need */ - 0, /* maxTimeMS - don't time out */ - res)) { - msgassertedNoTrace(10412, str::stream() << "moveAndCommit failed: " << res); - } - - // update our config - manager->reload(txn); - - return true; -} - } // namespace Chunk::Chunk(OperationContext* txn, ChunkManager* manager, const ChunkType& from) @@ -522,18 +428,18 @@ bool Chunk::splitIfShould(OperationContext* txn, long dataWritten) { // inserts will fall on the top chunk. if (suggestedMigrateChunk && shouldBalance) { ChunkType chunkToMove; - { - const auto shard = grid.shardRegistry()->getShard(txn, getShardId()); - chunkToMove.setShard(shard->toString()); - } + chunkToMove.setNS(_manager->getns()); + chunkToMove.setShard(getShardId()); chunkToMove.setMin(suggestedMigrateChunk->first); chunkToMove.setMax(suggestedMigrateChunk->second); - tryMoveToOtherShard(txn, _manager, chunkToMove); + msgassertedNoTraceWithStatus( + 10412, Balancer::get(txn)->rebalanceSingleChunk(txn, chunkToMove)); + _manager->reload(txn); } return true; - } catch (DBException& e) { + } catch (const DBException& e) { // TODO: Make this better - there are lots of reasons a split could fail // Random so that we don't sync up with other failed splits _dataWritten = mkDataWritten(); diff --git a/src/mongo/s/server.cpp b/src/mongo/s/server.cpp index 9d508e0fd54..9b2b2125611 100644 --- a/src/mongo/s/server.cpp +++ b/src/mongo/s/server.cpp @@ -58,7 +58,7 @@ #include "mongo/db/startup_warnings_common.h" #include "mongo/db/wire_version.h" #include "mongo/platform/process_id.h" -#include "mongo/s/balance.h" +#include "mongo/s/balancer/balancer.h" #include "mongo/s/catalog/catalog_manager.h" #include "mongo/s/catalog/type_chunk.h" #include "mongo/s/catalog/type_locks.h" diff --git a/src/mongo/s/shard_util.cpp b/src/mongo/s/shard_util.cpp index 3083b8f251c..aa15dc03722 100644 --- a/src/mongo/s/shard_util.cpp +++ b/src/mongo/s/shard_util.cpp @@ -37,7 +37,6 @@ #include "mongo/client/read_preference.h" #include "mongo/client/remote_command_targeter.h" #include "mongo/db/namespace_string.h" -#include "mongo/rpc/get_status_from_command_result.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" #include "mongo/s/shard_key_pattern.h" @@ -48,9 +47,41 @@ namespace mongo { namespace shardutil { namespace { +const char kMinKey[] = "min"; +const char kMaxKey[] = "max"; const char kShouldMigrate[] = "shouldMigrate"; + +/** + * Extracts the bounds of a chunk from a BSON object having the following format: + * { min: <min key>, max: <max key> } + * + * Returns a failed status if the format cannot be matched. + */ +StatusWith<std::pair<BSONObj, BSONObj>> extractChunkBounds(const BSONObj& obj) { + BSONElement minKey; + { + Status minKeyStatus = bsonExtractTypedField(obj, kMinKey, Object, &minKey); + if (!minKeyStatus.isOK()) { + return {minKeyStatus.code(), + str::stream() << "Invalid min key due to " << minKeyStatus.toString()}; + } + } + + BSONElement maxKey; + { + Status maxKeyStatus = bsonExtractTypedField(obj, kMaxKey, Object, &maxKey); + if (!maxKeyStatus.isOK()) { + return {maxKeyStatus.code(), + str::stream() << "Invalid max key due to " << maxKeyStatus.toString()}; + } + } + + return std::pair<BSONObj, BSONObj>( + std::make_pair(minKey.Obj().getOwned(), maxKey.Obj().getOwned())); } +} // namespace + StatusWith<long long> retrieveTotalShardSize(OperationContext* txn, const ShardId& shardId) { auto shard = Grid::get(txn)->shardRegistry()->getShard(txn, shardId); if (!shard) { @@ -87,8 +118,8 @@ StatusWith<BSONObj> selectMedianKey(OperationContext* txn, BSONObjBuilder cmd; cmd.append("splitVector", nss.ns()); cmd.append("keyPattern", shardKeyPattern.toBSON()); - cmd.append("min", minKey); - cmd.append("max", maxKey); + cmd.append(kMinKey, minKey); + cmd.append(kMaxKey, maxKey); cmd.appendBool("force", true); auto shard = Grid::get(txn)->shardRegistry()->getShard(txn, shardId); @@ -130,8 +161,8 @@ StatusWith<std::vector<BSONObj>> selectChunkSplitPoints(OperationContext* txn, BSONObjBuilder cmd; cmd.append("splitVector", nss.ns()); cmd.append("keyPattern", shardKeyPattern.toBSON()); - cmd.append("min", minKey); - cmd.append("max", maxKey); + cmd.append(kMinKey, minKey); + cmd.append(kMaxKey, maxKey); cmd.append("maxChunkSizeBytes", chunkSizeBytes); cmd.append("maxSplitPoints", maxPoints); cmd.append("maxChunkObjects", maxObjs); @@ -187,12 +218,13 @@ StatusWith<boost::optional<std::pair<BSONObj, BSONObj>>> splitChunkAtMultiplePoi BSONObjBuilder cmd; cmd.append("splitChunk", nss.ns()); - cmd.append("configdb", grid.shardRegistry()->getConfigServerConnectionString().toString()); + cmd.append("configdb", + Grid::get(txn)->shardRegistry()->getConfigServerConnectionString().toString()); cmd.append("from", shardId); cmd.append("keyPattern", shardKeyPattern.toBSON()); collectionVersion.appendForCommands(&cmd); - cmd.append("min", minKey); - cmd.append("max", maxKey); + cmd.append(kMinKey, minKey); + cmd.append(kMaxKey, maxKey); cmd.append("splitKeys", splitPoints); BSONObj cmdObj = cmd.obj(); @@ -219,30 +251,21 @@ StatusWith<boost::optional<std::pair<BSONObj, BSONObj>>> splitChunkAtMultiplePoi } if (!status.isOK()) { - log() << "splitChunk cmd " << cmdObj << " failed" << causedBy(status); + log() << "Split chunk " << cmdObj << " failed" << causedBy(status); return {status.code(), str::stream() << "split failed due to " << status.toString()}; } BSONElement shouldMigrateElement; status = bsonExtractTypedField(cmdResponse, kShouldMigrate, Object, &shouldMigrateElement); if (status.isOK()) { - BSONObj shouldMigrateBounds = shouldMigrateElement.embeddedObject(); - BSONElement minKey, maxKey; - - Status minKeyStatus = bsonExtractTypedField(shouldMigrateBounds, "min", Object, &minKey); - Status maxKeyStatus = bsonExtractTypedField(shouldMigrateBounds, "max", Object, &maxKey); - - if (minKeyStatus.isOK() && maxKeyStatus.isOK()) { - return boost::optional<std::pair<BSONObj, BSONObj>>( - std::make_pair(minKey.Obj().getOwned(), maxKey.Obj().getOwned())); - } else if (!minKeyStatus.isOK()) { - status = minKeyStatus; - } else { - status = maxKeyStatus; + auto chunkBoundsStatus = extractChunkBounds(shouldMigrateElement.embeddedObject()); + if (!chunkBoundsStatus.isOK()) { + return chunkBoundsStatus.getStatus(); } - } - if (!status.isOK()) { + return boost::optional<std::pair<BSONObj, BSONObj>>( + std::move(chunkBoundsStatus.getValue())); + } else if (status != ErrorCodes::NoSuchKey) { warning() << "Chunk migration will be skipped because splitChunk returned invalid response: " << cmdResponse << ". Extracting " << kShouldMigrate << " field failed" |