diff options
author | Paolo Polato <paolo.polato@mongodb.com> | 2022-02-10 13:41:13 +0000 |
---|---|---|
committer | Evergreen Agent <no-reply@evergreen.mongodb.com> | 2022-02-10 14:15:25 +0000 |
commit | d018b7e964dafa5085152af03916bde0ac74f947 (patch) | |
tree | 5111f6c4a94ecdce8785d6d3bc8e81fc582234cc /src/mongo/db/s/split_chunk.cpp | |
parent | 9b2fe8666c8a633ed141b86676116094d2a468cb (diff) | |
download | mongo-d018b7e964dafa5085152af03916bde0ac74f947.tar.gz |
SERVER-63203 Do not fail chunk split if more than 8192 points are requested
Diffstat (limited to 'src/mongo/db/s/split_chunk.cpp')
-rw-r--r-- | src/mongo/db/s/split_chunk.cpp | 42 |
1 files changed, 23 insertions, 19 deletions
diff --git a/src/mongo/db/s/split_chunk.cpp b/src/mongo/db/s/split_chunk.cpp index 01c67c703f4..1700ef30170 100644 --- a/src/mongo/db/s/split_chunk.cpp +++ b/src/mongo/db/s/split_chunk.cpp @@ -90,14 +90,14 @@ bool checkIfSingleDoc(OperationContext* opCtx, /** * Checks the collection's metadata for a successful split on the specified chunkRange using the - * specified splitKeys. Returns false if the metadata's chunks don't match the new chunk + * specified split points. Returns false if the metadata's chunks don't match the new chunk * boundaries exactly. */ bool checkMetadataForSuccessfulSplitChunk(OperationContext* opCtx, const NamespaceString& nss, const OID& epoch, const ChunkRange& chunkRange, - const std::vector<BSONObj>& splitKeys) { + const std::vector<BSONObj>& splitPoints) { AutoGetCollection autoColl(opCtx, nss, MODE_IS); const auto metadataAfterSplit = CollectionShardingRuntime::get(opCtx, nss)->getCurrentMetadataIfKnown(); @@ -106,19 +106,19 @@ bool checkMetadataForSuccessfulSplitChunk(OperationContext* opCtx, str::stream() << "Collection " << nss.ns() << " changed since split start", metadataAfterSplit && metadataAfterSplit->getShardVersion().epoch() == epoch); - auto newChunkBounds(splitKeys); - auto startKey = chunkRange.getMin(); - newChunkBounds.push_back(chunkRange.getMax()); - ChunkType nextChunk; - for (const auto& endKey : newChunkBounds) { + for (auto it = splitPoints.begin(); it != splitPoints.end(); ++it) { // Check that all new chunks fit the new chunk boundaries - if (!metadataAfterSplit->getNextChunk(startKey, &nextChunk) || - nextChunk.getMax().woCompare(endKey)) { + const auto& currentChunkMinKey = it == splitPoints.begin() ? chunkRange.getMin() : *it; + if (!metadataAfterSplit->getNextChunk(currentChunkMinKey, &nextChunk) || + nextChunk.getMax().woCompare(*it)) { return false; } - - startKey = endKey; + } + // Special check for the last chunk produced. + if (!metadataAfterSplit->getNextChunk(splitPoints.back(), &nextChunk) || + nextChunk.getMax().woCompare(chunkRange.getMax())) { + return false; } return true; @@ -130,7 +130,7 @@ StatusWith<boost::optional<ChunkRange>> splitChunk(OperationContext* opCtx, const NamespaceString& nss, const BSONObj& keyPatternObj, const ChunkRange& chunkRange, - const std::vector<BSONObj>& splitKeys, + std::vector<BSONObj>&& splitPoints, const std::string& shardName, const OID& expectedCollectionEpoch, const bool fromChunkSplitter) { @@ -141,8 +141,8 @@ StatusWith<boost::optional<ChunkRange>> splitChunk(OperationContext* opCtx, // data types. const auto hashedField = ShardKeyPattern::extractHashedField(keyPatternObj); if (hashedField) { - for (BSONObj splitKey : splitKeys) { - auto hashedSplitElement = splitKey[hashedField.fieldName()]; + for (const auto& splitPoint : splitPoints) { + auto hashedSplitElement = splitPoint[hashedField.fieldName()]; if (!ShardKeyPattern::isValidHashedValue(hashedSplitElement)) { return {ErrorCodes::CannotSplit, str::stream() << "splitChunk cannot split chunk " << chunkRange.toString() @@ -154,8 +154,12 @@ StatusWith<boost::optional<ChunkRange>> splitChunk(OperationContext* opCtx, } // Commit the split to the config server. - auto request = SplitChunkRequest( - nss, shardName, expectedCollectionEpoch, chunkRange, splitKeys, fromChunkSplitter); + auto request = SplitChunkRequest(nss, + shardName, + expectedCollectionEpoch, + chunkRange, + std::move(splitPoints), + fromChunkSplitter); auto configCmdObj = request.toConfigCommandBSON(ShardingCatalogClient::kMajorityWriteConcern.toBSON()); @@ -205,7 +209,7 @@ StatusWith<boost::optional<ChunkRange>> splitChunk(OperationContext* opCtx, if (!commandStatus.isOK() || !writeConcernStatus.isOK()) { if (checkMetadataForSuccessfulSplitChunk( - opCtx, nss, expectedCollectionEpoch, chunkRange, splitKeys)) { + opCtx, nss, expectedCollectionEpoch, chunkRange, request.getSplitPoints())) { // Split was committed. } else if (!commandStatus.isOK()) { return commandStatus; @@ -233,12 +237,12 @@ StatusWith<boost::optional<ChunkRange>> splitChunk(OperationContext* opCtx, } auto backChunk = ChunkType(); - backChunk.setMin(splitKeys.back()); + backChunk.setMin(request.getSplitPoints().back()); backChunk.setMax(chunkRange.getMax()); auto frontChunk = ChunkType(); frontChunk.setMin(chunkRange.getMin()); - frontChunk.setMax(splitKeys.front()); + frontChunk.setMax(request.getSplitPoints().front()); KeyPattern shardKeyPattern(keyPatternObj); if (shardKeyPattern.globalMax().woCompare(backChunk.getMax()) == 0 && |