summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
authorPaolo Polato <paolo.polato@mongodb.com>2022-02-10 13:41:13 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-02-10 14:15:25 +0000
commitd018b7e964dafa5085152af03916bde0ac74f947 (patch)
tree5111f6c4a94ecdce8785d6d3bc8e81fc582234cc /src/mongo/db
parent9b2fe8666c8a633ed141b86676116094d2a468cb (diff)
downloadmongo-d018b7e964dafa5085152af03916bde0ac74f947.tar.gz
SERVER-63203 Do not fail chunk split if more than 8192 points are requested
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/s/chunk_splitter.cpp24
-rw-r--r--src/mongo/db/s/create_collection_coordinator.cpp12
-rw-r--r--src/mongo/db/s/split_chunk.cpp42
-rw-r--r--src/mongo/db/s/split_chunk.h3
-rw-r--r--src/mongo/db/s/split_chunk_command.cpp2
5 files changed, 45 insertions, 38 deletions
diff --git a/src/mongo/db/s/chunk_splitter.cpp b/src/mongo/db/s/chunk_splitter.cpp
index 54a849f8b93..f26341fc66e 100644
--- a/src/mongo/db/s/chunk_splitter.cpp
+++ b/src/mongo/db/s/chunk_splitter.cpp
@@ -52,6 +52,7 @@
#include "mongo/s/config_server_client.h"
#include "mongo/s/grid.h"
#include "mongo/s/shard_key_pattern.h"
+#include "mongo/s/shard_util.h"
#include "mongo/util/assert_util.h"
namespace mongo {
@@ -84,22 +85,23 @@ Status splitChunkAtMultiplePoints(OperationContext* opCtx,
const ShardKeyPattern& shardKeyPattern,
const ChunkVersion& collectionVersion,
const ChunkRange& chunkRange,
- const std::vector<BSONObj>& splitPoints) {
+ std::vector<BSONObj>&& splitPoints) {
invariant(!splitPoints.empty());
- const size_t kMaxSplitPoints = 8192;
-
- if (splitPoints.size() > kMaxSplitPoints) {
- return {ErrorCodes::BadValue,
- str::stream() << "Cannot split chunk in more than " << kMaxSplitPoints
- << " parts at a time."};
+ if (splitPoints.size() > shardutil::kMaxSplitPoints) {
+ LOGV2_WARNING(6320301,
+ "Unable to apply all the split points received. Only the first "
+ "kMaxSplitPoints will be processed",
+ "numSplitPointsReceived"_attr = splitPoints.size(),
+ "kMaxSplitPoints"_attr = shardutil::kMaxSplitPoints);
+ splitPoints.resize(shardutil::kMaxSplitPoints);
}
return splitChunk(opCtx,
nss,
shardKeyPattern.toBSON(),
chunkRange,
- splitPoints,
+ std::move(splitPoints),
shardId.toString(),
collectionVersion.epoch(),
true /* fromChunkSplitter */)
@@ -393,13 +395,15 @@ void ChunkSplitter::_runAutosplit(std::shared_ptr<ChunkSplitStateDriver> chunkSp
}
}
+ auto numSplitPoints = splitPoints.size();
+
uassertStatusOK(splitChunkAtMultiplePoints(opCtx.get(),
chunk.getShardId(),
nss,
shardKeyPattern,
cm.getVersion(),
chunk.getRange(),
- splitPoints));
+ std::move(splitPoints)));
chunkSplitStateDriver->commitSplit();
const bool shouldBalance = isAutoBalanceEnabled(opCtx.get(), nss, balancerConfig);
@@ -412,7 +416,7 @@ void ChunkSplitter::_runAutosplit(std::shared_ptr<ChunkSplitStateDriver> chunkSp
"minKey"_attr = redact(chunk.getMin()),
"maxKey"_attr = redact(chunk.getMax()),
"lastmod"_attr = chunk.getLastmod(),
- "splitPoints"_attr = splitPoints.size(),
+ "splitPoints"_attr = numSplitPoints,
"maxChunkSizeBytes"_attr = maxChunkSizeBytes,
"extraInfo"_attr =
(topChunkMinKey.isEmpty() ? ""
diff --git a/src/mongo/db/s/create_collection_coordinator.cpp b/src/mongo/db/s/create_collection_coordinator.cpp
index b2edc047738..e2f40c66c72 100644
--- a/src/mongo/db/s/create_collection_coordinator.cpp
+++ b/src/mongo/db/s/create_collection_coordinator.cpp
@@ -645,18 +645,18 @@ void CreateCollectionCoordinator::_checkCommandArguments(OperationContext* opCtx
if (_doc.getNumInitialChunks()) {
// Ensure numInitialChunks is within valid bounds.
- // Cannot have more than 8192 initial chunks per shard. Setting a maximum of 1,000,000
- // chunks in total to limit the amount of memory this command consumes so there is less
- // danger of an OOM error.
+ // Cannot have more than kMaxSplitPoints initial chunks per shard. Setting a maximum of
+ // 1,000,000 chunks in total to limit the amount of memory this command consumes so there is
+ // less danger of an OOM error.
const int maxNumInitialChunksForShards =
- Grid::get(opCtx)->shardRegistry()->getNumShardsNoReload() * 8192;
+ Grid::get(opCtx)->shardRegistry()->getNumShardsNoReload() * shardutil::kMaxSplitPoints;
const int maxNumInitialChunksTotal = 1000 * 1000; // Arbitrary limit to memory consumption
int numChunks = _doc.getNumInitialChunks().value();
uassert(ErrorCodes::InvalidOptions,
str::stream() << "numInitialChunks cannot be more than either: "
- << maxNumInitialChunksForShards << ", 8192 * number of shards; or "
- << maxNumInitialChunksTotal,
+ << maxNumInitialChunksForShards << ", " << shardutil::kMaxSplitPoints
+ << " * number of shards; or " << maxNumInitialChunksTotal,
numChunks >= 0 && numChunks <= maxNumInitialChunksForShards &&
numChunks <= maxNumInitialChunksTotal);
}
diff --git a/src/mongo/db/s/split_chunk.cpp b/src/mongo/db/s/split_chunk.cpp
index 01c67c703f4..1700ef30170 100644
--- a/src/mongo/db/s/split_chunk.cpp
+++ b/src/mongo/db/s/split_chunk.cpp
@@ -90,14 +90,14 @@ bool checkIfSingleDoc(OperationContext* opCtx,
/**
* Checks the collection's metadata for a successful split on the specified chunkRange using the
- * specified splitKeys. Returns false if the metadata's chunks don't match the new chunk
+ * specified split points. Returns false if the metadata's chunks don't match the new chunk
* boundaries exactly.
*/
bool checkMetadataForSuccessfulSplitChunk(OperationContext* opCtx,
const NamespaceString& nss,
const OID& epoch,
const ChunkRange& chunkRange,
- const std::vector<BSONObj>& splitKeys) {
+ const std::vector<BSONObj>& splitPoints) {
AutoGetCollection autoColl(opCtx, nss, MODE_IS);
const auto metadataAfterSplit =
CollectionShardingRuntime::get(opCtx, nss)->getCurrentMetadataIfKnown();
@@ -106,19 +106,19 @@ bool checkMetadataForSuccessfulSplitChunk(OperationContext* opCtx,
str::stream() << "Collection " << nss.ns() << " changed since split start",
metadataAfterSplit && metadataAfterSplit->getShardVersion().epoch() == epoch);
- auto newChunkBounds(splitKeys);
- auto startKey = chunkRange.getMin();
- newChunkBounds.push_back(chunkRange.getMax());
-
ChunkType nextChunk;
- for (const auto& endKey : newChunkBounds) {
+ for (auto it = splitPoints.begin(); it != splitPoints.end(); ++it) {
// Check that all new chunks fit the new chunk boundaries
- if (!metadataAfterSplit->getNextChunk(startKey, &nextChunk) ||
- nextChunk.getMax().woCompare(endKey)) {
+ const auto& currentChunkMinKey = it == splitPoints.begin() ? chunkRange.getMin() : *it;
+ if (!metadataAfterSplit->getNextChunk(currentChunkMinKey, &nextChunk) ||
+ nextChunk.getMax().woCompare(*it)) {
return false;
}
-
- startKey = endKey;
+ }
+ // Special check for the last chunk produced.
+ if (!metadataAfterSplit->getNextChunk(splitPoints.back(), &nextChunk) ||
+ nextChunk.getMax().woCompare(chunkRange.getMax())) {
+ return false;
}
return true;
@@ -130,7 +130,7 @@ StatusWith<boost::optional<ChunkRange>> splitChunk(OperationContext* opCtx,
const NamespaceString& nss,
const BSONObj& keyPatternObj,
const ChunkRange& chunkRange,
- const std::vector<BSONObj>& splitKeys,
+ std::vector<BSONObj>&& splitPoints,
const std::string& shardName,
const OID& expectedCollectionEpoch,
const bool fromChunkSplitter) {
@@ -141,8 +141,8 @@ StatusWith<boost::optional<ChunkRange>> splitChunk(OperationContext* opCtx,
// data types.
const auto hashedField = ShardKeyPattern::extractHashedField(keyPatternObj);
if (hashedField) {
- for (BSONObj splitKey : splitKeys) {
- auto hashedSplitElement = splitKey[hashedField.fieldName()];
+ for (const auto& splitPoint : splitPoints) {
+ auto hashedSplitElement = splitPoint[hashedField.fieldName()];
if (!ShardKeyPattern::isValidHashedValue(hashedSplitElement)) {
return {ErrorCodes::CannotSplit,
str::stream() << "splitChunk cannot split chunk " << chunkRange.toString()
@@ -154,8 +154,12 @@ StatusWith<boost::optional<ChunkRange>> splitChunk(OperationContext* opCtx,
}
// Commit the split to the config server.
- auto request = SplitChunkRequest(
- nss, shardName, expectedCollectionEpoch, chunkRange, splitKeys, fromChunkSplitter);
+ auto request = SplitChunkRequest(nss,
+ shardName,
+ expectedCollectionEpoch,
+ chunkRange,
+ std::move(splitPoints),
+ fromChunkSplitter);
auto configCmdObj =
request.toConfigCommandBSON(ShardingCatalogClient::kMajorityWriteConcern.toBSON());
@@ -205,7 +209,7 @@ StatusWith<boost::optional<ChunkRange>> splitChunk(OperationContext* opCtx,
if (!commandStatus.isOK() || !writeConcernStatus.isOK()) {
if (checkMetadataForSuccessfulSplitChunk(
- opCtx, nss, expectedCollectionEpoch, chunkRange, splitKeys)) {
+ opCtx, nss, expectedCollectionEpoch, chunkRange, request.getSplitPoints())) {
// Split was committed.
} else if (!commandStatus.isOK()) {
return commandStatus;
@@ -233,12 +237,12 @@ StatusWith<boost::optional<ChunkRange>> splitChunk(OperationContext* opCtx,
}
auto backChunk = ChunkType();
- backChunk.setMin(splitKeys.back());
+ backChunk.setMin(request.getSplitPoints().back());
backChunk.setMax(chunkRange.getMax());
auto frontChunk = ChunkType();
frontChunk.setMin(chunkRange.getMin());
- frontChunk.setMax(splitKeys.front());
+ frontChunk.setMax(request.getSplitPoints().front());
KeyPattern shardKeyPattern(keyPatternObj);
if (shardKeyPattern.globalMax().woCompare(backChunk.getMax()) == 0 &&
diff --git a/src/mongo/db/s/split_chunk.h b/src/mongo/db/s/split_chunk.h
index 00f30dba6dd..d8787d358eb 100644
--- a/src/mongo/db/s/split_chunk.h
+++ b/src/mongo/db/s/split_chunk.h
@@ -59,9 +59,8 @@ StatusWith<boost::optional<ChunkRange>> splitChunk(OperationContext* opCtx,
const NamespaceString& nss,
const BSONObj& keyPatternObj,
const ChunkRange& chunkRange,
- const std::vector<BSONObj>& splitKeys,
+ std::vector<BSONObj>&& splitPoints,
const std::string& shardName,
const OID& expectedCollectionEpoch,
bool fromChunkSplitter = false);
-
} // namespace mongo
diff --git a/src/mongo/db/s/split_chunk_command.cpp b/src/mongo/db/s/split_chunk_command.cpp
index 3c144d71fea..5476c28e787 100644
--- a/src/mongo/db/s/split_chunk_command.cpp
+++ b/src/mongo/db/s/split_chunk_command.cpp
@@ -175,7 +175,7 @@ public:
nss,
keyPatternObj,
chunkRange,
- splitKeys,
+ std::move(splitKeys),
shardName,
expectedCollectionEpoch,
fromChunkSplitter));