summaryrefslogtreecommitdiff
path: root/src/mongo/db/s
diff options
context:
space:
mode:
authorSilvia Surroca <silvia.surroca@mongodb.com>2022-11-02 16:17:56 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-12-20 16:18:04 +0000
commit515e14784d55ab38a1673f3e9cfb849d3ff932a5 (patch)
treeadbcf29e48b64a1037f27eb13b2de5d147bbbdf0 /src/mongo/db/s
parent81103ed0dc71bb9e3ed5faed6a9f4ed4ccad210e (diff)
downloadmongo-515e14784d55ab38a1673f3e9cfb849d3ff932a5.tar.gz
SERVER-70768 Balancer use wrong chunk size for jumbo chunks
(cherry picked from commit b8c329e86ae43522ff0fa52cdecbc23f1b823c00)
Diffstat (limited to 'src/mongo/db/s')
-rw-r--r--src/mongo/db/s/balancer/balancer.cpp2
-rw-r--r--src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp20
-rw-r--r--src/mongo/db/s/balancer/balancer_policy.cpp35
-rw-r--r--src/mongo/db/s/balancer/balancer_policy.h5
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager.h3
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp28
6 files changed, 72 insertions, 21 deletions
diff --git a/src/mongo/db/s/balancer/balancer.cpp b/src/mongo/db/s/balancer/balancer.cpp
index 7432d038afc..f4752f3d12d 100644
--- a/src/mongo/db/s/balancer/balancer.cpp
+++ b/src/mongo/db/s/balancer/balancer.cpp
@@ -1097,7 +1097,7 @@ int Balancer::_moveChunks(OperationContext* opCtx,
opCtx, migrateInfo.uuid, repl::ReadConcernLevel::kMajorityReadConcern);
ShardingCatalogManager::get(opCtx)->splitOrMarkJumbo(
- opCtx, collection.getNss(), migrateInfo.minKey);
+ opCtx, collection.getNss(), migrateInfo.minKey, migrateInfo.getMaxChunkSizeBytes());
continue;
}
diff --git a/src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp b/src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp
index df80d254e12..d52421edc9e 100644
--- a/src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp
+++ b/src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp
@@ -402,7 +402,8 @@ public:
std::move(collectionChunks),
std::move(shardInfos),
std::move(collectionZones),
- smallChunkSizeThresholdBytes));
+ smallChunkSizeThresholdBytes,
+ maxChunkSizeBytes));
}
DefragmentationPhaseEnum getType() const override {
@@ -465,7 +466,8 @@ public:
availableShards->erase(targetSibling->shard);
auto smallChunkVersion = getShardVersion(opCtx, nextSmallChunk->shard, _nss);
_outstandingMigrations.emplace_back(nextSmallChunk, targetSibling);
- return _outstandingMigrations.back().asMigrateInfo(_uuid, _nss, smallChunkVersion);
+ return _outstandingMigrations.back().asMigrateInfo(
+ _uuid, _nss, smallChunkVersion, _maxChunkSizeBytes);
}
return boost::none;
@@ -717,7 +719,8 @@ private:
MigrateInfo asMigrateInfo(const UUID& collUuid,
const NamespaceString& nss,
- const ChunkVersion& version) const {
+ const ChunkVersion& version,
+ uint64_t maxChunkSizeBytes) const {
return MigrateInfo(chunkToMergeWith->shard,
chunkToMove->shard,
nss,
@@ -725,7 +728,8 @@ private:
chunkToMove->range.getMin(),
chunkToMove->range.getMax(),
version,
- MoveChunkRequest::ForceJumbo::kDoNotForce);
+ MoveChunkRequest::ForceJumbo::kDoNotForce,
+ maxChunkSizeBytes);
}
ChunkRange asMergedRange() const {
@@ -790,6 +794,8 @@ private:
const int64_t _smallChunkSizeThresholdBytes;
+ const uint64_t _maxChunkSizeBytes;
+
bool _aborted{false};
DefragmentationPhaseEnum _nextPhase{DefragmentationPhaseEnum::kMergeChunks};
@@ -799,7 +805,8 @@ private:
std::vector<ChunkType>&& collectionChunks,
stdx::unordered_map<ShardId, ShardInfo>&& shardInfos,
ZoneInfo&& collectionZones,
- uint64_t smallChunkSizeThresholdBytes)
+ uint64_t smallChunkSizeThresholdBytes,
+ uint64_t maxChunkSizeBytes)
: _nss(nss),
_uuid(uuid),
_collectionChunks(),
@@ -810,7 +817,8 @@ private:
_actionableMerges(),
_outstandingMerges(),
_zoneInfo(std::move(collectionZones)),
- _smallChunkSizeThresholdBytes(smallChunkSizeThresholdBytes) {
+ _smallChunkSizeThresholdBytes(smallChunkSizeThresholdBytes),
+ _maxChunkSizeBytes(maxChunkSizeBytes) {
// Load the collection routing table in a std::list to ease later manipulation
for (auto&& chunk : collectionChunks) {
diff --git a/src/mongo/db/s/balancer/balancer_policy.cpp b/src/mongo/db/s/balancer/balancer_policy.cpp
index 59a1b5b1197..0cf71995d04 100644
--- a/src/mongo/db/s/balancer/balancer_policy.cpp
+++ b/src/mongo/db/s/balancer/balancer_policy.cpp
@@ -37,6 +37,7 @@
#include "mongo/db/s/balancer/type_migration.h"
#include "mongo/logv2/log.h"
+#include "mongo/s/balancer_configuration.h"
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/catalog/type_tags.h"
#include "mongo/s/grid.h"
@@ -495,8 +496,19 @@ MigrateInfosWithReason BalancerPolicy::balance(
}
invariant(to != stat.shardId);
- migrations.emplace_back(
- to, distribution.nss(), chunk, MoveChunkRequest::ForceJumbo::kForceBalancer);
+
+ auto maxChunkSizeBytes = [&]() -> boost::optional<int64_t> {
+ if (collDataSizeInfo.has_value()) {
+ return collDataSizeInfo->maxChunkSizeBytes;
+ }
+ return boost::none;
+ }();
+
+ migrations.emplace_back(to,
+ distribution.nss(),
+ chunk,
+ MoveChunkRequest::ForceJumbo::kForceBalancer,
+ maxChunkSizeBytes);
if (firstReason == MigrationReason::none) {
firstReason = MigrationReason::drain;
}
@@ -564,11 +576,20 @@ MigrateInfosWithReason BalancerPolicy::balance(
}
invariant(to != stat.shardId);
+
+ auto maxChunkSizeBytes = [&]() -> boost::optional<int64_t> {
+ if (collDataSizeInfo.has_value()) {
+ return collDataSizeInfo->maxChunkSizeBytes;
+ }
+ return boost::none;
+ }();
+
migrations.emplace_back(to,
distribution.nss(),
chunk,
forceJumbo ? MoveChunkRequest::ForceJumbo::kForceBalancer
- : MoveChunkRequest::ForceJumbo::kDoNotForce);
+ : MoveChunkRequest::ForceJumbo::kDoNotForce,
+ maxChunkSizeBytes);
if (firstReason == MigrationReason::none) {
firstReason = MigrationReason::zoneViolation;
}
@@ -859,7 +880,8 @@ string ZoneRange::toString() const {
MigrateInfo::MigrateInfo(const ShardId& a_to,
const NamespaceString& a_nss,
const ChunkType& a_chunk,
- const MoveChunkRequest::ForceJumbo a_forceJumbo)
+ const MoveChunkRequest::ForceJumbo a_forceJumbo,
+ boost::optional<int64_t> maxChunkSizeBytes)
: nss(a_nss), uuid(a_chunk.getCollectionUUID()) {
invariant(a_to.isValid());
@@ -870,6 +892,7 @@ MigrateInfo::MigrateInfo(const ShardId& a_to,
maxKey = a_chunk.getMax();
version = a_chunk.getVersion();
forceJumbo = a_forceJumbo;
+ optMaxChunkSizeBytes = maxChunkSizeBytes;
}
MigrateInfo::MigrateInfo(const ShardId& a_to,
@@ -921,6 +944,10 @@ string MigrateInfo::toString() const {
<< ", to " << to;
}
+boost::optional<int64_t> MigrateInfo::getMaxChunkSizeBytes() const {
+ return optMaxChunkSizeBytes;
+}
+
SplitInfo::SplitInfo(const ShardId& inShardId,
const NamespaceString& inNss,
const ChunkVersion& inCollectionVersion,
diff --git a/src/mongo/db/s/balancer/balancer_policy.h b/src/mongo/db/s/balancer/balancer_policy.h
index af9377999a6..35f0c8ada30 100644
--- a/src/mongo/db/s/balancer/balancer_policy.h
+++ b/src/mongo/db/s/balancer/balancer_policy.h
@@ -59,7 +59,8 @@ struct MigrateInfo {
MigrateInfo(const ShardId& a_to,
const NamespaceString& a_nss,
const ChunkType& a_chunk,
- MoveChunkRequest::ForceJumbo a_forceJumbo);
+ MoveChunkRequest::ForceJumbo a_forceJumbo,
+ boost::optional<int64_t> maxChunkSizeBytes = boost::none);
MigrateInfo(const ShardId& a_to,
const ShardId& a_from,
@@ -77,6 +78,8 @@ struct MigrateInfo {
std::string toString() const;
+ boost::optional<int64_t> getMaxChunkSizeBytes() const;
+
NamespaceString nss;
UUID uuid;
ShardId to;
diff --git a/src/mongo/db/s/config/sharding_catalog_manager.h b/src/mongo/db/s/config/sharding_catalog_manager.h
index 44fefe25496..f6e791e0589 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager.h
+++ b/src/mongo/db/s/config/sharding_catalog_manager.h
@@ -348,7 +348,8 @@ public:
*/
void splitOrMarkJumbo(OperationContext* opCtx,
const NamespaceString& nss,
- const BSONObj& minKey);
+ const BSONObj& minKey,
+ boost::optional<int64_t> optMaxChunkSizeBytes);
/**
* In a transaction, sets the 'allowMigrations' to the requested state and bumps the collection
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
index 76af2ce840a..3c02f089b4c 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
@@ -1764,19 +1764,31 @@ void ShardingCatalogManager::bumpMultipleCollectionVersionsAndChangeMetadataInTx
void ShardingCatalogManager::splitOrMarkJumbo(OperationContext* opCtx,
const NamespaceString& nss,
- const BSONObj& minKey) {
+ const BSONObj& minKey,
+ boost::optional<int64_t> optMaxChunkSizeBytes) {
const auto cm = uassertStatusOK(
Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithRefresh(opCtx, nss));
auto chunk = cm.findIntersectingChunkWithSimpleCollation(minKey);
try {
- const auto splitPoints = uassertStatusOK(shardutil::selectChunkSplitPoints(
- opCtx,
- chunk.getShardId(),
- nss,
- cm.getShardKeyPattern(),
- ChunkRange(chunk.getMin(), chunk.getMax()),
- Grid::get(opCtx)->getBalancerConfiguration()->getMaxChunkSizeBytes()));
+ const auto maxChunkSizeBytes = [&]() -> int64_t {
+ if (optMaxChunkSizeBytes.has_value()) {
+ return *optMaxChunkSizeBytes;
+ }
+
+ auto coll = Grid::get(opCtx)->catalogClient()->getCollection(
+ opCtx, nss, repl::ReadConcernLevel::kMajorityReadConcern);
+ return coll.getMaxChunkSizeBytes().value_or(
+ Grid::get(opCtx)->getBalancerConfiguration()->getMaxChunkSizeBytes());
+ }();
+
+ const auto splitPoints = uassertStatusOK(
+ shardutil::selectChunkSplitPoints(opCtx,
+ chunk.getShardId(),
+ nss,
+ cm.getShardKeyPattern(),
+ ChunkRange(chunk.getMin(), chunk.getMax()),
+ maxChunkSizeBytes));
if (splitPoints.empty()) {
LOGV2(21873,