diff options
author | Silvia Surroca <silvia.surroca@mongodb.com> | 2023-03-21 09:02:56 +0000 |
---|---|---|
committer | Evergreen Agent <no-reply@evergreen.mongodb.com> | 2023-04-13 13:48:51 +0000 |
commit | 61f6d836338319a4b17fcbf87584aa560e58df23 (patch) | |
tree | a48eae36817b990f107bf2dd0ae4c74005be0d25 | |
parent | cdbde83016a9236d91418fca00c70095841fde44 (diff) | |
download | mongo-61f6d836338319a4b17fcbf87584aa560e58df23.tar.gz |
SERVER-73383 Defragmentation may start over right after finishing
(cherry picked from commit 713d6f3d730c2ae15c68ba406c5572b47c859747)
5 files changed, 237 insertions, 132 deletions
diff --git a/src/mongo/db/s/balancer/balancer.cpp b/src/mongo/db/s/balancer/balancer.cpp index a148fa0a318..354d12b0ba7 100644 --- a/src/mongo/db/s/balancer/balancer.cpp +++ b/src/mongo/db/s/balancer/balancer.cpp @@ -658,7 +658,6 @@ void Balancer::_mainThread() { Client::initThread("Balancer"); auto opCtx = cc().makeOperationContext(); auto shardingContext = Grid::get(opCtx.get()); - const auto catalogClient = ShardingCatalogManager::get(opCtx.get())->localCatalogClient(); LOGV2(21856, "CSRS balancer is starting"); @@ -770,13 +769,7 @@ void Balancer::_mainThread() { } // Collect and apply up-to-date configuration values on the cluster collections. - { - OperationContext* ctx = opCtx.get(); - auto allCollections = catalogClient->getCollections(ctx, {}); - for (const auto& coll : allCollections) { - _defragmentationPolicy->startCollectionDefragmentation(ctx, coll); - } - } + _defragmentationPolicy->startCollectionDefragmentations(opCtx.get()); Status status = _splitChunksIfNeeded(opCtx.get()); if (!status.isOK()) { diff --git a/src/mongo/db/s/balancer/balancer_defragmentation_policy.h b/src/mongo/db/s/balancer/balancer_defragmentation_policy.h index bfd81c9250f..0ccdf2f929a 100644 --- a/src/mongo/db/s/balancer/balancer_defragmentation_policy.h +++ b/src/mongo/db/s/balancer/balancer_defragmentation_policy.h @@ -45,12 +45,9 @@ public: virtual ~BalancerDefragmentationPolicy() {} /** - * Requests the execution of the defragmentation algorithm on the specified collection. - * Returns true if the request is accepted, false if ignored (meaning, the specified collection - * is already being processed) + * Requests the execution of the defragmentation algorithm on the required collections. */ - virtual void startCollectionDefragmentation(OperationContext* opCtx, - const CollectionType& coll) = 0; + virtual void startCollectionDefragmentations(OperationContext* opCtx) = 0; /** * Checks if the collection is currently being defragmented, and signals the defragmentation diff --git a/src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp b/src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp index e4437e85550..ac6fb2028d1 100644 --- a/src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp +++ b/src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp @@ -1432,13 +1432,26 @@ private: } // namespace -void BalancerDefragmentationPolicyImpl::startCollectionDefragmentation(OperationContext* opCtx, - const CollectionType& coll) { - { - stdx::lock_guard<Latch> lk(_stateMutex); - const auto& uuid = coll.getUuid(); - if (!coll.getDefragmentCollection() || _defragmentationStates.contains(uuid)) { - return; +void BalancerDefragmentationPolicyImpl::startCollectionDefragmentations(OperationContext* opCtx) { + stdx::lock_guard<Latch> lk(_stateMutex); + + // Fetch all collections with `defragmentCollection` flag enabled + static const auto query = BSON(CollectionType::kDefragmentCollectionFieldName << true); + auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard(); + const auto& collDocs = uassertStatusOK(configShard->exhaustiveFindOnConfig( + opCtx, + ReadPreferenceSetting(ReadPreference::Nearest), + repl::ReadConcernLevel::kMajorityReadConcern, + NamespaceString::kConfigsvrCollectionsNamespace, + query, + BSONObj(), + boost::none)) + .docs; + + for (const BSONObj& obj : collDocs) { + const CollectionType coll{obj}; + if (_defragmentationStates.contains(coll.getUuid())) { + continue; } _initializeCollectionState(lk, opCtx, coll); } diff --git a/src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.h b/src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.h index dfa284aca13..786b15b9571 100644 --- a/src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.h +++ b/src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.h @@ -99,8 +99,7 @@ public: const DefragmentationAction& action, const DefragmentationActionResponse& response) override; - void startCollectionDefragmentation(OperationContext* opCtx, - const CollectionType& coll) override; + void startCollectionDefragmentations(OperationContext* opCtx) override; void abortCollectionDefragmentation(OperationContext* opCtx, const NamespaceString& nss) override; diff --git a/src/mongo/db/s/balancer/balancer_defragmentation_policy_test.cpp b/src/mongo/db/s/balancer/balancer_defragmentation_policy_test.cpp index 0ffb8b256d2..ba99c68bebc 100644 --- a/src/mongo/db/s/balancer/balancer_defragmentation_policy_test.cpp +++ b/src/mongo/db/s/balancer/balancer_defragmentation_policy_test.cpp @@ -40,8 +40,13 @@ using ShardStatistics = ClusterStatistics::ShardStatistics; class BalancerDefragmentationPolicyTest : public ConfigServerTestFixture { protected: - const NamespaceString kNss{"testDb.testColl"}; - const UUID kUuid = UUID::gen(); + inline static const NamespaceString kNss1{"testDb.testColl1"}; + inline static const UUID kUuid1 = UUID::gen(); + inline static const NamespaceString kNss2{"testDb.testColl2"}; + inline static const UUID kUuid2 = UUID::gen(); + inline static const NamespaceString kNss3{"testDb.testColl3"}; + inline static const UUID kUuid3 = UUID::gen(); + const ShardId kShardId0 = ShardId("shard0"); const ShardId kShardId1 = ShardId("shard1"); const ShardId kShardId2 = ShardId("shard2"); @@ -77,12 +82,12 @@ protected: : _clusterStats(), _defragmentationPolicy(&_clusterStats, onDefragmentationStateUpdated) {} CollectionType setupCollectionWithPhase( + const NamespaceString& nss, const std::vector<ChunkType>& chunkList, boost::optional<DefragmentationPhaseEnum> startingPhase = boost::none, boost::optional<int64_t> maxChunkSizeBytes = boost::none) { - setupShards(kShardList); - setupCollection(kNss, kShardKeyPattern, chunkList); + setupCollection(nss, kShardKeyPattern, chunkList); const auto updateClause = [&] { BSONObjBuilder builder; @@ -101,31 +106,33 @@ protected: return builder.obj(); }(); + const UUID& uuid = chunkList.at(0).getCollectionUUID(); ASSERT_OK(updateToConfigCollection(operationContext(), CollectionType::ConfigNS, - BSON(CollectionType::kUuidFieldName << kUuid), + BSON(CollectionType::kUuidFieldName << uuid), updateClause, false)); return Grid::get(operationContext()) ->catalogClient() - ->getCollection(operationContext(), kUuid); + ->getCollection(operationContext(), uuid); } - ChunkType makeConfigChunkEntry(const boost::optional<int64_t>& estimatedSize = boost::none) { - ChunkType chunk(kUuid, ChunkRange(kKeyAtMin, kKeyAtMax), kCollectionVersion, kShardId0); + ChunkType makeConfigChunkEntry(const UUID& uuid, + const boost::optional<int64_t>& estimatedSize = boost::none) { + ChunkType chunk(uuid, ChunkRange(kKeyAtMin, kKeyAtMax), kCollectionVersion, kShardId0); chunk.setEstimatedSizeBytes(estimatedSize); return chunk; } - std::vector<ChunkType> makeMergeableConfigChunkEntries() { - return {ChunkType(kUuid, ChunkRange(kKeyAtMin, kKeyAtTen), kCollectionVersion, kShardId0), - ChunkType(kUuid, ChunkRange(kKeyAtTen, kKeyAtMax), kCollectionVersion, kShardId0)}; + std::vector<ChunkType> makeMergeableConfigChunkEntries(const UUID& uuid) { + return {ChunkType(uuid, ChunkRange(kKeyAtMin, kKeyAtTen), kCollectionVersion, kShardId0), + ChunkType(uuid, ChunkRange(kKeyAtTen, kKeyAtMax), kCollectionVersion, kShardId0)}; } - BSONObj getConfigCollectionEntry() { + BSONObj getConfigCollectionEntry(const UUID& uuid) { DBDirectClient client(operationContext()); FindCommandRequest findRequest{NamespaceStringOrUUID{CollectionType::ConfigNS}}; - findRequest.setFilter(BSON(CollectionType::kUuidFieldName << kUuid)); + findRequest.setFilter(BSON(CollectionType::kUuidFieldName << uuid)); auto cursor = client.find(std::move(findRequest)); if (!cursor || !cursor->more()) return BSONObj(); @@ -143,7 +150,7 @@ protected: return ShardStatistics(id, currentSizeBytes, draining, zones, ""); } - void setDefaultClusterStats() { + void setDefaultClusterStats(const std::vector<NamespaceString>& nssList = {kNss1}) { uint64_t oneKB = 1024 * 1024; auto shardInstance = 0; std::vector<ShardStatistics> stats; @@ -151,24 +158,29 @@ protected: for (const auto& shard : kShardList) { ++shardInstance; stats.push_back(buildShardStats(shard.getName(), oneKB * 1024 * shardInstance)); - collStats[kNss].push_back(buildShardStats(shard.getName(), oneKB * shardInstance)); + + for (const auto& nss : nssList) { + collStats[nss].push_back(buildShardStats(shard.getName(), oneKB * shardInstance)); + } } _clusterStats.setStats(std::move(stats), std::move(collStats)); } - void verifyExpectedDefragmentationPhaseOndisk( - boost::optional<DefragmentationPhaseEnum> expectedPhase) { + void verifyExpectedDefragmentationStateOnDisk( + const UUID& uuid, boost::optional<DefragmentationPhaseEnum> expectedPhase) { auto configDoc = findOneOnConfigCollection(operationContext(), CollectionType::ConfigNS, - BSON(CollectionType::kUuidFieldName << kUuid)) + BSON(CollectionType::kUuidFieldName << uuid)) .getValue(); if (expectedPhase.has_value()) { auto storedDefragmentationPhase = DefragmentationPhase_parse( IDLParserContext("BalancerDefragmentationPolicyTest"), configDoc.getStringField(CollectionType::kDefragmentationPhaseFieldName)); ASSERT_TRUE(storedDefragmentationPhase == *expectedPhase); + ASSERT_TRUE(configDoc[CollectionType::kDefragmentCollectionFieldName].Bool()); } else { ASSERT_FALSE(configDoc.hasField(CollectionType::kDefragmentationPhaseFieldName)); + ASSERT_FALSE(configDoc.hasField(CollectionType::kDefragmentCollectionFieldName)); } }; @@ -192,32 +204,35 @@ TEST_F(BalancerDefragmentationPolicyTest, TestGetNextActionIsNotReadyWhenNotDefr } TEST_F(BalancerDefragmentationPolicyTest, TestAddCollectionWhenCollectionRemovedFailsGracefully) { - CollectionType coll(kNss, OID::gen(), Timestamp(1, 1), Date_t::now(), kUuid, kShardKeyPattern); + CollectionType coll( + kNss1, OID::gen(), Timestamp(1, 1), Date_t::now(), kUuid1, kShardKeyPattern); coll.setDefragmentCollection(true); // Collection entry is not persisted (to simulate collection dropped), defragmentation should // not begin. ASSERT_FALSE(_defragmentationPolicy.isDefragmentingCollection(coll.getUuid())); - _defragmentationPolicy.startCollectionDefragmentation(operationContext(), coll); + _defragmentationPolicy.startCollectionDefragmentations(operationContext()); ASSERT_FALSE(_defragmentationPolicy.isDefragmentingCollection(coll.getUuid())); auto configDoc = findOneOnConfigCollection(operationContext(), CollectionType::ConfigNS, - BSON(CollectionType::kUuidFieldName << kUuid)); + BSON(CollectionType::kUuidFieldName << kUuid1)); ASSERT_EQ(configDoc.getStatus(), Status(ErrorCodes::NoMatchingDocument, "No document found")); } // Phase 1 tests. TEST_F(BalancerDefragmentationPolicyTest, TestPhaseOneAddSingleChunkCollectionTriggersDataSize) { - auto coll = setupCollectionWithPhase({makeConfigChunkEntry()}); + setupShards(kShardList); + auto coll = setupCollectionWithPhase(kNss1, {makeConfigChunkEntry(kUuid1)}); auto nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext()); ASSERT_TRUE(nextAction == boost::none); ASSERT_FALSE(_defragmentationPolicy.isDefragmentingCollection(coll.getUuid())); - _defragmentationPolicy.startCollectionDefragmentation(operationContext(), coll); + _defragmentationPolicy.startCollectionDefragmentations(operationContext()); // 1. The collection should be marked as undergoing through phase 1 of the algorithm... ASSERT_TRUE(_defragmentationPolicy.isDefragmentingCollection(coll.getUuid())); - verifyExpectedDefragmentationPhaseOndisk(DefragmentationPhaseEnum::kMergeAndMeasureChunks); + verifyExpectedDefragmentationStateOnDisk(coll.getUuid(), + DefragmentationPhaseEnum::kMergeAndMeasureChunks); // 2. The action returned by the stream should be now an actionable DataSizeCommand... nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext()); DataSizeInfo dataSizeAction = stdx::get<DataSizeInfo>(*nextAction); @@ -229,16 +244,18 @@ TEST_F(BalancerDefragmentationPolicyTest, TestPhaseOneAddSingleChunkCollectionTr TEST_F(BalancerDefragmentationPolicyTest, AddSingleChunkCollectionWithKnownDataSizeCompletesDefragmentationWithNoOperationIssued) { - auto coll = setupCollectionWithPhase({makeConfigChunkEntry(1024)}); + setupShards(kShardList); + auto coll = setupCollectionWithPhase(kNss1, {makeConfigChunkEntry(kUuid1, 1024)}); setDefaultClusterStats(); - _defragmentationPolicy.startCollectionDefragmentation(operationContext(), coll); + _defragmentationPolicy.startCollectionDefragmentations(operationContext()); ASSERT_TRUE(_defragmentationPolicy.isDefragmentingCollection(coll.getUuid())); auto nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext()); ASSERT_TRUE(nextAction == boost::none); - verifyExpectedDefragmentationPhaseOndisk(DefragmentationPhaseEnum::kMoveAndMergeChunks); + verifyExpectedDefragmentationStateOnDisk(coll.getUuid(), + DefragmentationPhaseEnum::kMoveAndMergeChunks); // kMoveAndMergeChunks has no stream actions/migrations to offer, but the condition has to be // verified through a sequence of two action requests (the first being selectChunksToMove()) for @@ -247,19 +264,21 @@ TEST_F(BalancerDefragmentationPolicyTest, auto pendingMigrations = _defragmentationPolicy.selectChunksToMove(operationContext(), &availableShards); ASSERT_TRUE(pendingMigrations.empty()); - verifyExpectedDefragmentationPhaseOndisk(DefragmentationPhaseEnum::kMoveAndMergeChunks); + verifyExpectedDefragmentationStateOnDisk(coll.getUuid(), + DefragmentationPhaseEnum::kMoveAndMergeChunks); nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext()); ASSERT_TRUE(nextAction == boost::none); - verifyExpectedDefragmentationPhaseOndisk(boost::none); + verifyExpectedDefragmentationStateOnDisk(coll.getUuid(), boost::none); ASSERT_FALSE(_defragmentationPolicy.isDefragmentingCollection(coll.getUuid())); } TEST_F(BalancerDefragmentationPolicyTest, TestPhaseOneAcknowledgeFinalDataSizeActionCompletesPhase) { - auto coll = setupCollectionWithPhase({makeConfigChunkEntry()}); + setupShards(kShardList); + auto coll = setupCollectionWithPhase(kNss1, {makeConfigChunkEntry(kUuid1)}); setDefaultClusterStats(); - _defragmentationPolicy.startCollectionDefragmentation(operationContext(), coll); + _defragmentationPolicy.startCollectionDefragmentations(operationContext()); auto nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext()); ASSERT_TRUE(nextAction.has_value()); DataSizeInfo dataSizeAction = stdx::get<DataSizeInfo>(*nextAction); @@ -269,7 +288,7 @@ TEST_F(BalancerDefragmentationPolicyTest, // 1. The outcome of the data size has been stored in the expected document... auto chunkQuery = BSON(ChunkType::collectionUUID() - << kUuid << ChunkType::min(kKeyAtMin) << ChunkType::max(kKeyAtMax)); + << kUuid1 << ChunkType::min(kKeyAtMin) << ChunkType::max(kKeyAtMax)); auto configChunkDoc = findOneOnConfigCollection(operationContext(), ChunkType::ConfigNS, chunkQuery).getValue(); ASSERT_EQ(configChunkDoc.getIntField(ChunkType::estimatedSizeBytes.name()), 2000); @@ -278,14 +297,16 @@ TEST_F(BalancerDefragmentationPolicyTest, nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext()); ASSERT_TRUE(nextAction == boost::none); ASSERT_TRUE(_defragmentationPolicy.isDefragmentingCollection(coll.getUuid())); - verifyExpectedDefragmentationPhaseOndisk(DefragmentationPhaseEnum::kMoveAndMergeChunks); + verifyExpectedDefragmentationStateOnDisk(coll.getUuid(), + DefragmentationPhaseEnum::kMoveAndMergeChunks); } TEST_F(BalancerDefragmentationPolicyTest, TestPhaseOneDataSizeResponsesWithMaxSizeReachedCausesChunkToBeSkippedByPhaseTwo) { - auto coll = setupCollectionWithPhase({makeConfigChunkEntry()}); + setupShards(kShardList); + auto coll = setupCollectionWithPhase(kNss1, {makeConfigChunkEntry(kUuid1)}); setDefaultClusterStats(); - _defragmentationPolicy.startCollectionDefragmentation(operationContext(), coll); + _defragmentationPolicy.startCollectionDefragmentations(operationContext()); auto nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext()); ASSERT_TRUE(nextAction.has_value()); DataSizeInfo dataSizeAction = stdx::get<DataSizeInfo>(*nextAction); @@ -295,7 +316,7 @@ TEST_F(BalancerDefragmentationPolicyTest, // 1. The outcome of the data size has been stored in the expected document... auto chunkQuery = BSON(ChunkType::collectionUUID() - << kUuid << ChunkType::min(kKeyAtMin) << ChunkType::max(kKeyAtMax)); + << kUuid1 << ChunkType::min(kKeyAtMin) << ChunkType::max(kKeyAtMax)); auto configChunkDoc = findOneOnConfigCollection(operationContext(), ChunkType::ConfigNS, chunkQuery).getValue(); ASSERT_EQ(configChunkDoc.getField("estimatedDataSizeBytes").safeNumberLong(), @@ -305,12 +326,13 @@ TEST_F(BalancerDefragmentationPolicyTest, nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext()); ASSERT_TRUE(nextAction == boost::none); ASSERT_FALSE(_defragmentationPolicy.isDefragmentingCollection(coll.getUuid())); - verifyExpectedDefragmentationPhaseOndisk(boost::none); + verifyExpectedDefragmentationStateOnDisk(coll.getUuid(), boost::none); } TEST_F(BalancerDefragmentationPolicyTest, TestRetriableFailedDataSizeActionGetsReissued) { - auto coll = setupCollectionWithPhase({makeConfigChunkEntry()}); - _defragmentationPolicy.startCollectionDefragmentation(operationContext(), coll); + setupShards(kShardList); + auto coll = setupCollectionWithPhase(kNss1, {makeConfigChunkEntry(kUuid1)}); + _defragmentationPolicy.startCollectionDefragmentations(operationContext()); auto nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext()); DataSizeInfo failingDataSizeAction = stdx::get<DataSizeInfo>(*nextAction); StatusWith<DataSizeResponse> response( @@ -337,8 +359,9 @@ TEST_F(BalancerDefragmentationPolicyTest, TestRetriableFailedDataSizeActionGetsR } TEST_F(BalancerDefragmentationPolicyTest, TestRemoveCollectionEndsDefragmentation) { - auto coll = setupCollectionWithPhase({makeConfigChunkEntry()}); - _defragmentationPolicy.startCollectionDefragmentation(operationContext(), coll); + setupShards(kShardList); + auto coll = setupCollectionWithPhase(kNss1, {makeConfigChunkEntry(kUuid1)}); + _defragmentationPolicy.startCollectionDefragmentations(operationContext()); auto nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext()); DataSizeInfo dataSizeAction = stdx::get<DataSizeInfo>(*nextAction); @@ -357,44 +380,50 @@ TEST_F(BalancerDefragmentationPolicyTest, TestRemoveCollectionEndsDefragmentatio } TEST_F(BalancerDefragmentationPolicyTest, TestPhaseOneUserCancellationFinishesDefragmentation) { - auto coll = setupCollectionWithPhase({makeConfigChunkEntry()}); - _defragmentationPolicy.startCollectionDefragmentation(operationContext(), coll); + setupShards(kShardList); + auto coll = setupCollectionWithPhase(kNss1, {makeConfigChunkEntry(kUuid1)}); + _defragmentationPolicy.startCollectionDefragmentations(operationContext()); // Collection should be in phase 1 - verifyExpectedDefragmentationPhaseOndisk(DefragmentationPhaseEnum::kMergeAndMeasureChunks); + verifyExpectedDefragmentationStateOnDisk(coll.getUuid(), + DefragmentationPhaseEnum::kMergeAndMeasureChunks); // User cancellation of defragmentation - _defragmentationPolicy.abortCollectionDefragmentation(operationContext(), kNss); + _defragmentationPolicy.abortCollectionDefragmentation(operationContext(), kNss1); // Defragmentation should complete since the NoMoreAutoSplitter feature flag is enabled auto nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext()); ASSERT_TRUE(nextAction == boost::none); ASSERT_FALSE(_defragmentationPolicy.isDefragmentingCollection(coll.getUuid())); - verifyExpectedDefragmentationPhaseOndisk(boost::none); + verifyExpectedDefragmentationStateOnDisk(coll.getUuid(), boost::none); } TEST_F(BalancerDefragmentationPolicyTest, TestPhaseOneUserCancellationBeginsPhase3) { RAIIServerParameterControllerForTest featureFlagNoMoreAutoSplitterOff{ "featureFlagNoMoreAutoSplitter", false}; - auto coll = setupCollectionWithPhase({makeConfigChunkEntry()}); - _defragmentationPolicy.startCollectionDefragmentation(operationContext(), coll); + setupShards(kShardList); + auto coll = setupCollectionWithPhase(kNss1, {makeConfigChunkEntry(kUuid1)}); + _defragmentationPolicy.startCollectionDefragmentations(operationContext()); // Collection should be in phase 1 - verifyExpectedDefragmentationPhaseOndisk(DefragmentationPhaseEnum::kMergeAndMeasureChunks); + verifyExpectedDefragmentationStateOnDisk(coll.getUuid(), + DefragmentationPhaseEnum::kMergeAndMeasureChunks); // User cancellation of defragmentation - _defragmentationPolicy.abortCollectionDefragmentation(operationContext(), kNss); + _defragmentationPolicy.abortCollectionDefragmentation(operationContext(), kNss1); // Defragmentation should transition to phase 3 auto nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext()); - verifyExpectedDefragmentationPhaseOndisk(DefragmentationPhaseEnum::kSplitChunks); + verifyExpectedDefragmentationStateOnDisk(coll.getUuid(), + DefragmentationPhaseEnum::kSplitChunks); ASSERT_TRUE(nextAction.has_value()); auto splitVectorAction = stdx::get<AutoSplitVectorInfo>(*nextAction); } TEST_F(BalancerDefragmentationPolicyTest, TestNonRetriableErrorRebuildsCurrentPhase) { - auto coll = setupCollectionWithPhase({makeConfigChunkEntry()}); - _defragmentationPolicy.startCollectionDefragmentation(operationContext(), coll); + setupShards(kShardList); + auto coll = setupCollectionWithPhase(kNss1, {makeConfigChunkEntry(kUuid1)}); + _defragmentationPolicy.startCollectionDefragmentations(operationContext()); auto nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext()); DataSizeInfo failingDataSizeAction = stdx::get<DataSizeInfo>(*nextAction); StatusWith<DataSizeResponse> response( @@ -405,7 +434,8 @@ TEST_F(BalancerDefragmentationPolicyTest, TestNonRetriableErrorRebuildsCurrentPh // 1. The collection should be marked as undergoing through phase 1 of the algorithm... ASSERT_TRUE(_defragmentationPolicy.isDefragmentingCollection(coll.getUuid())); - verifyExpectedDefragmentationPhaseOndisk(DefragmentationPhaseEnum::kMergeAndMeasureChunks); + verifyExpectedDefragmentationStateOnDisk(coll.getUuid(), + DefragmentationPhaseEnum::kMergeAndMeasureChunks); // 2. The action returned by the stream should be now an actionable DataSizeCommand... ASSERT_TRUE(nextAction.has_value()); DataSizeInfo dataSizeAction = stdx::get<DataSizeInfo>(*nextAction); @@ -417,10 +447,12 @@ TEST_F(BalancerDefragmentationPolicyTest, TestNonRetriableErrorRebuildsCurrentPh TEST_F(BalancerDefragmentationPolicyTest, TestNonRetriableErrorWaitsForAllOutstandingActionsToComplete) { + setupShards(kShardList); auto coll = setupCollectionWithPhase( - {ChunkType{kUuid, ChunkRange(kKeyAtMin, kKeyAtTen), kCollectionVersion, kShardId0}, - ChunkType{kUuid, ChunkRange(BSON("x" << 11), kKeyAtMax), kCollectionVersion, kShardId0}}); - _defragmentationPolicy.startCollectionDefragmentation(operationContext(), coll); + kNss1, + {ChunkType{kUuid1, ChunkRange(kKeyAtMin, kKeyAtTen), kCollectionVersion, kShardId0}, + ChunkType{kUuid1, ChunkRange(BSON("x" << 11), kKeyAtMax), kCollectionVersion, kShardId0}}); + _defragmentationPolicy.startCollectionDefragmentations(operationContext()); auto nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext()); DataSizeInfo failingDataSizeAction = stdx::get<DataSizeInfo>(*nextAction); auto nextAction2 = _defragmentationPolicy.getNextStreamingAction(operationContext()); @@ -451,8 +483,9 @@ TEST_F(BalancerDefragmentationPolicyTest, TEST_F(BalancerDefragmentationPolicyTest, TestPhaseOneAcknowledgeMergeChunkActionsTriggersDataSizeOnResultingRange) { - auto coll = setupCollectionWithPhase({makeMergeableConfigChunkEntries()}); - _defragmentationPolicy.startCollectionDefragmentation(operationContext(), coll); + setupShards(kShardList); + auto coll = setupCollectionWithPhase(kNss1, {makeMergeableConfigChunkEntries(kUuid1)}); + _defragmentationPolicy.startCollectionDefragmentations(operationContext()); auto nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext()); auto mergeChunksAction = stdx::get<MergeInfo>(*nextAction); @@ -473,8 +506,9 @@ TEST_F(BalancerDefragmentationPolicyTest, } TEST_F(BalancerDefragmentationPolicyTest, TestPhaseOneFailedMergeChunksActionGetsReissued) { - auto coll = setupCollectionWithPhase(makeMergeableConfigChunkEntries()); - _defragmentationPolicy.startCollectionDefragmentation(operationContext(), coll); + setupShards(kShardList); + auto coll = setupCollectionWithPhase(kNss1, {makeMergeableConfigChunkEntries(kUuid1)}); + _defragmentationPolicy.startCollectionDefragmentations(operationContext()); auto nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext()); auto failingMergeChunksAction = stdx::get<MergeInfo>(*nextAction); @@ -499,10 +533,11 @@ TEST_F(BalancerDefragmentationPolicyTest, TestPhaseOneFailedMergeChunksActionGet } TEST_F(BalancerDefragmentationPolicyTest, TestPhaseOneAcknowledgeSuccessfulMergeAction) { - auto coll = setupCollectionWithPhase(makeMergeableConfigChunkEntries()); + setupShards(kShardList); + auto coll = setupCollectionWithPhase(kNss1, {makeMergeableConfigChunkEntries(kUuid1)}); auto nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext()); ASSERT_TRUE(nextAction == boost::none); - _defragmentationPolicy.startCollectionDefragmentation(operationContext(), coll); + _defragmentationPolicy.startCollectionDefragmentations(operationContext()); nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext()); ASSERT_TRUE(nextAction.has_value()); MergeInfo mergeInfoAction = stdx::get<MergeInfo>(*nextAction); @@ -526,7 +561,7 @@ TEST_F(BalancerDefragmentationPolicyTest, TestPhaseOneAllConsecutive) { const auto minKey = (i == 0) ? kKeyAtMin : BSON("x" << i); const auto maxKey = BSON("x" << i + 1); ChunkType chunk( - kUuid, + kUuid1, ChunkRange(minKey, maxKey), ChunkVersion({kCollectionVersion.epoch(), kCollectionVersion.getTimestamp()}, {1, uint32_t(i)}), @@ -537,15 +572,16 @@ TEST_F(BalancerDefragmentationPolicyTest, TestPhaseOneAllConsecutive) { const auto minKey = BSON("x" << i); const auto maxKey = (i == 9) ? kKeyAtMax : BSON("x" << i + 1); ChunkType chunk( - kUuid, + kUuid1, ChunkRange(minKey, maxKey), ChunkVersion({kCollectionVersion.epoch(), kCollectionVersion.getTimestamp()}, {1, uint32_t(i)}), kShardId1); chunkList.push_back(chunk); } - auto coll = setupCollectionWithPhase(chunkList, boost::none, boost::none); - _defragmentationPolicy.startCollectionDefragmentation(operationContext(), coll); + setupShards(kShardList); + auto coll = setupCollectionWithPhase(kNss1, chunkList, boost::none, boost::none); + _defragmentationPolicy.startCollectionDefragmentations(operationContext()); // Test auto nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext()); ASSERT_TRUE(nextAction.has_value()); @@ -577,15 +613,16 @@ TEST_F(BalancerDefragmentationPolicyTest, PhaseOneNotConsecutive) { const auto maxKey = (i == 9) ? kKeyAtMax : BSON("x" << i + 1); ShardId chosenShard = (i == 5) ? kShardId1 : kShardId0; ChunkType chunk( - kUuid, + kUuid1, ChunkRange(minKey, maxKey), ChunkVersion({kCollectionVersion.epoch(), kCollectionVersion.getTimestamp()}, {1, uint32_t(i)}), chosenShard); chunkList.push_back(chunk); } - auto coll = setupCollectionWithPhase(chunkList, boost::none, boost::none); - _defragmentationPolicy.startCollectionDefragmentation(operationContext(), coll); + setupShards(kShardList); + auto coll = setupCollectionWithPhase(kNss1, chunkList, boost::none, boost::none); + _defragmentationPolicy.startCollectionDefragmentations(operationContext()); // Three actions (in an unspecified order) should be immediately available. auto nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext()); ASSERT_TRUE(nextAction.has_value()); @@ -635,14 +672,16 @@ TEST_F(BalancerDefragmentationPolicyTest, PhaseOneNotConsecutive) { // Phase 2 tests. TEST_F(BalancerDefragmentationPolicyTest, TestPhaseTwoMissingDataSizeRestartsPhase1) { - auto coll = setupCollectionWithPhase({makeConfigChunkEntry()}, - DefragmentationPhaseEnum::kMoveAndMergeChunks); + setupShards(kShardList); + auto coll = setupCollectionWithPhase( + kNss1, {makeConfigChunkEntry(kUuid1)}, DefragmentationPhaseEnum::kMoveAndMergeChunks); setDefaultClusterStats(); - _defragmentationPolicy.startCollectionDefragmentation(operationContext(), coll); + _defragmentationPolicy.startCollectionDefragmentations(operationContext()); // Should be in phase 1 ASSERT_TRUE(_defragmentationPolicy.isDefragmentingCollection(coll.getUuid())); - verifyExpectedDefragmentationPhaseOndisk(DefragmentationPhaseEnum::kMergeAndMeasureChunks); + verifyExpectedDefragmentationStateOnDisk(kUuid1, + DefragmentationPhaseEnum::kMergeAndMeasureChunks); // There should be a datasize entry and no migrations auto availableShards = getAllShardIds(operationContext()); auto pendingMigrations = @@ -655,26 +694,27 @@ TEST_F(BalancerDefragmentationPolicyTest, TestPhaseTwoMissingDataSizeRestartsPha TEST_F(BalancerDefragmentationPolicyTest, TestPhaseTwoChunkCanBeMovedAndMergedWithSibling) { ChunkType biggestChunk( - kUuid, + kUuid1, ChunkRange(kKeyAtMin, kKeyAtZero), ChunkVersion({kCollectionVersion.epoch(), kCollectionVersion.getTimestamp()}, {1, 0}), kShardId0); biggestChunk.setEstimatedSizeBytes(2048); ChunkType smallestChunk( - kUuid, + kUuid1, ChunkRange(kKeyAtZero, kKeyAtMax), ChunkVersion({kCollectionVersion.epoch(), kCollectionVersion.getTimestamp()}, {1, 1}), kShardId1); smallestChunk.setEstimatedSizeBytes(1024); - auto coll = setupCollectionWithPhase({smallestChunk, biggestChunk}, - DefragmentationPhaseEnum::kMoveAndMergeChunks); + setupShards(kShardList); + auto coll = setupCollectionWithPhase( + kNss1, {smallestChunk, biggestChunk}, DefragmentationPhaseEnum::kMoveAndMergeChunks); std::vector<ShardStatistics> clusterStats{buildShardStats(kShardId0, 4), buildShardStats(kShardId1, 2)}; std::map<NamespaceString, std::vector<ShardStatistics>> collectionStats{ - {kNss, {buildShardStats(kShardId0, 4), buildShardStats(kShardId1, 2)}}}; + {kNss1, {buildShardStats(kShardId0, 4), buildShardStats(kShardId1, 2)}}}; _clusterStats.setStats(std::move(clusterStats), std::move(collectionStats)); - _defragmentationPolicy.startCollectionDefragmentation(operationContext(), coll); + _defragmentationPolicy.startCollectionDefragmentations(operationContext()); ASSERT_TRUE(_defragmentationPolicy.isDefragmentingCollection(coll.getUuid())); @@ -727,48 +767,50 @@ TEST_F(BalancerDefragmentationPolicyTest, // Define a single collection, distributing 6 chunks across the 4 shards so that there cannot be // a merge without migrations ChunkType firstChunkOnShard0( - kUuid, + kUuid1, ChunkRange(kKeyAtMin, kKeyAtZero), ChunkVersion({kCollectionVersion.epoch(), kCollectionVersion.getTimestamp()}, {1, 0}), kShardId0); firstChunkOnShard0.setEstimatedSizeBytes(1); ChunkType firstChunkOnShard1( - kUuid, + kUuid1, ChunkRange(kKeyAtZero, kKeyAtTen), ChunkVersion({kCollectionVersion.epoch(), kCollectionVersion.getTimestamp()}, {1, 1}), kShardId1); firstChunkOnShard1.setEstimatedSizeBytes(1); ChunkType chunkOnShard2( - kUuid, + kUuid1, ChunkRange(kKeyAtTen, kKeyAtTwenty), ChunkVersion({kCollectionVersion.epoch(), kCollectionVersion.getTimestamp()}, {1, 2}), kShardId2); chunkOnShard2.setEstimatedSizeBytes(1); ChunkType chunkOnShard3( - kUuid, + kUuid1, ChunkRange(kKeyAtTwenty, kKeyAtThirty), ChunkVersion({kCollectionVersion.epoch(), kCollectionVersion.getTimestamp()}, {1, 3}), kShardId3); chunkOnShard3.setEstimatedSizeBytes(1); ChunkType secondChunkOnShard0( - kUuid, + kUuid1, ChunkRange(kKeyAtThirty, kKeyAtForty), ChunkVersion({kCollectionVersion.epoch(), kCollectionVersion.getTimestamp()}, {1, 4}), kShardId0); secondChunkOnShard0.setEstimatedSizeBytes(1); ChunkType secondChunkOnShard1( - kUuid, + kUuid1, ChunkRange(kKeyAtForty, kKeyAtMax), ChunkVersion({kCollectionVersion.epoch(), kCollectionVersion.getTimestamp()}, {1, 5}), kShardId1); secondChunkOnShard1.setEstimatedSizeBytes(1); - auto coll = setupCollectionWithPhase({firstChunkOnShard0, + setupShards(kShardList); + auto coll = setupCollectionWithPhase(kNss1, + {firstChunkOnShard0, firstChunkOnShard1, chunkOnShard2, chunkOnShard3, @@ -777,7 +819,7 @@ TEST_F(BalancerDefragmentationPolicyTest, DefragmentationPhaseEnum::kMoveAndMergeChunks, boost::none); setDefaultClusterStats(); - _defragmentationPolicy.startCollectionDefragmentation(operationContext(), coll); + _defragmentationPolicy.startCollectionDefragmentations(operationContext()); // Two move operation should be returned within a single invocation, using all the possible // shards @@ -799,27 +841,32 @@ TEST_F(BalancerDefragmentationPolicyTest, TEST_F(BalancerDefragmentationPolicyTest, DefragmentationBeginsWithPhase3FromPersistedSetting) { RAIIServerParameterControllerForTest featureFlagNoMoreAutoSplitterOff{ "featureFlagNoMoreAutoSplitter", false}; - auto coll = setupCollectionWithPhase({makeConfigChunkEntry(kPhase3DefaultChunkSize)}, + setupShards(kShardList); + auto coll = setupCollectionWithPhase(kNss1, + {makeConfigChunkEntry(kUuid1, kPhase3DefaultChunkSize)}, DefragmentationPhaseEnum::kSplitChunks); - // Defragmentation does not start until startCollectionDefragmentation is called + // Defragmentation does not start until startCollectionDefragmentations is called auto nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext()); ASSERT_TRUE(nextAction == boost::none); ASSERT_FALSE(_defragmentationPolicy.isDefragmentingCollection(coll.getUuid())); - _defragmentationPolicy.startCollectionDefragmentation(operationContext(), coll); + _defragmentationPolicy.startCollectionDefragmentations(operationContext()); ASSERT_TRUE(_defragmentationPolicy.isDefragmentingCollection(coll.getUuid())); - verifyExpectedDefragmentationPhaseOndisk(DefragmentationPhaseEnum::kSplitChunks); + verifyExpectedDefragmentationStateOnDisk(coll.getUuid(), + DefragmentationPhaseEnum::kSplitChunks); } TEST_F(BalancerDefragmentationPolicyTest, SingleLargeChunkCausesAutoSplitAndSplitActions) { RAIIServerParameterControllerForTest featureFlagNoMoreAutoSplitterOff{ "featureFlagNoMoreAutoSplitter", false}; - auto coll = setupCollectionWithPhase({makeConfigChunkEntry(kPhase3DefaultChunkSize)}, + setupShards(kShardList); + auto coll = setupCollectionWithPhase(kNss1, + {makeConfigChunkEntry(kUuid1, kPhase3DefaultChunkSize)}, DefragmentationPhaseEnum::kSplitChunks); auto nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext()); - _defragmentationPolicy.startCollectionDefragmentation(operationContext(), coll); + _defragmentationPolicy.startCollectionDefragmentations(operationContext()); // The new action returned by the stream should be an actionable AutoSplitVector command... nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext()); @@ -834,11 +881,14 @@ TEST_F(BalancerDefragmentationPolicyTest, SingleLargeChunkCausesAutoSplitAndSpli TEST_F(BalancerDefragmentationPolicyTest, CollectionMaxChunkSizeIsUsedForPhase3) { RAIIServerParameterControllerForTest featureFlagNoMoreAutoSplitterOff{ "featureFlagNoMoreAutoSplitter", false}; + setupShards(kShardList); // One chunk > 1KB should trigger AutoSplitVector - auto coll = setupCollectionWithPhase( - {makeConfigChunkEntry(2 * 1024)}, DefragmentationPhaseEnum::kSplitChunks, 1024); + auto coll = setupCollectionWithPhase(kNss1, + {makeConfigChunkEntry(kUuid1, 2 * 1024)}, + DefragmentationPhaseEnum::kSplitChunks, + 1024); - _defragmentationPolicy.startCollectionDefragmentation(operationContext(), coll); + _defragmentationPolicy.startCollectionDefragmentations(operationContext()); auto nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext()); @@ -854,9 +904,11 @@ TEST_F(BalancerDefragmentationPolicyTest, CollectionMaxChunkSizeIsUsedForPhase3) TEST_F(BalancerDefragmentationPolicyTest, TestRetryableFailedAutoSplitActionGetsReissued) { RAIIServerParameterControllerForTest featureFlagNoMoreAutoSplitterOff{ "featureFlagNoMoreAutoSplitter", false}; - auto coll = setupCollectionWithPhase({makeConfigChunkEntry(kPhase3DefaultChunkSize)}, + setupShards(kShardList); + auto coll = setupCollectionWithPhase(kNss1, + {makeConfigChunkEntry(kUuid1, kPhase3DefaultChunkSize)}, DefragmentationPhaseEnum::kSplitChunks); - _defragmentationPolicy.startCollectionDefragmentation(operationContext(), coll); + _defragmentationPolicy.startCollectionDefragmentations(operationContext()); auto nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext()); AutoSplitVectorInfo failingAutoSplitAction = stdx::get<AutoSplitVectorInfo>(*nextAction); StatusWith<AutoSplitVectorResponse> response( @@ -884,9 +936,11 @@ TEST_F(BalancerDefragmentationPolicyTest, TestAcknowledgeAutoSplitActionTriggersSplitOnResultingRange) { RAIIServerParameterControllerForTest featureFlagNoMoreAutoSplitterOff{ "featureFlagNoMoreAutoSplitter", false}; - auto coll = setupCollectionWithPhase({makeConfigChunkEntry(kPhase3DefaultChunkSize)}, + setupShards(kShardList); + auto coll = setupCollectionWithPhase(kNss1, + {makeConfigChunkEntry(kUuid1, kPhase3DefaultChunkSize)}, DefragmentationPhaseEnum::kSplitChunks); - _defragmentationPolicy.startCollectionDefragmentation(operationContext(), coll); + _defragmentationPolicy.startCollectionDefragmentations(operationContext()); auto nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext()); auto autoSplitAction = stdx::get<AutoSplitVectorInfo>(*nextAction); @@ -913,9 +967,11 @@ TEST_F(BalancerDefragmentationPolicyTest, TEST_F(BalancerDefragmentationPolicyTest, TestAutoSplitWithNoSplitPointsDoesNotTriggerSplit) { RAIIServerParameterControllerForTest featureFlagNoMoreAutoSplitterOff{ "featureFlagNoMoreAutoSplitter", false}; - auto coll = setupCollectionWithPhase({makeConfigChunkEntry(kPhase3DefaultChunkSize)}, + setupShards(kShardList); + auto coll = setupCollectionWithPhase(kNss1, + {makeConfigChunkEntry(kUuid1, kPhase3DefaultChunkSize)}, DefragmentationPhaseEnum::kSplitChunks); - _defragmentationPolicy.startCollectionDefragmentation(operationContext(), coll); + _defragmentationPolicy.startCollectionDefragmentations(operationContext()); auto nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext()); auto autoSplitAction = stdx::get<AutoSplitVectorInfo>(*nextAction); @@ -931,9 +987,11 @@ TEST_F(BalancerDefragmentationPolicyTest, TestAutoSplitWithNoSplitPointsDoesNotT TEST_F(BalancerDefragmentationPolicyTest, TestMoreThan16MBSplitPointsTriggersSplitAndAutoSplit) { RAIIServerParameterControllerForTest featureFlagNoMoreAutoSplitterOff{ "featureFlagNoMoreAutoSplitter", false}; - auto coll = setupCollectionWithPhase({makeConfigChunkEntry(kPhase3DefaultChunkSize)}, + setupShards(kShardList); + auto coll = setupCollectionWithPhase(kNss1, + {makeConfigChunkEntry(kUuid1, kPhase3DefaultChunkSize)}, DefragmentationPhaseEnum::kSplitChunks); - _defragmentationPolicy.startCollectionDefragmentation(operationContext(), coll); + _defragmentationPolicy.startCollectionDefragmentations(operationContext()); auto nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext()); auto autoSplitAction = stdx::get<AutoSplitVectorInfo>(*nextAction); @@ -962,9 +1020,11 @@ TEST_F(BalancerDefragmentationPolicyTest, TestMoreThan16MBSplitPointsTriggersSpl TEST_F(BalancerDefragmentationPolicyTest, TestFailedSplitChunkActionGetsReissued) { RAIIServerParameterControllerForTest featureFlagNoMoreAutoSplitterOff{ "featureFlagNoMoreAutoSplitter", false}; - auto coll = setupCollectionWithPhase({makeConfigChunkEntry(kPhase3DefaultChunkSize)}, + setupShards(kShardList); + auto coll = setupCollectionWithPhase(kNss1, + {makeConfigChunkEntry(kUuid1, kPhase3DefaultChunkSize)}, DefragmentationPhaseEnum::kSplitChunks); - _defragmentationPolicy.startCollectionDefragmentation(operationContext(), coll); + _defragmentationPolicy.startCollectionDefragmentations(operationContext()); auto nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext()); auto autoSplitAction = stdx::get<AutoSplitVectorInfo>(*nextAction); @@ -997,9 +1057,11 @@ TEST_F(BalancerDefragmentationPolicyTest, TestAcknowledgeLastSuccessfulSplitActionEndsDefragmentation) { RAIIServerParameterControllerForTest featureFlagNoMoreAutoSplitterOff{ "featureFlagNoMoreAutoSplitter", false}; - auto coll = setupCollectionWithPhase({makeConfigChunkEntry(kPhase3DefaultChunkSize)}, + setupShards(kShardList); + auto coll = setupCollectionWithPhase(kNss1, + {makeConfigChunkEntry(kUuid1, kPhase3DefaultChunkSize)}, DefragmentationPhaseEnum::kSplitChunks); - _defragmentationPolicy.startCollectionDefragmentation(operationContext(), coll); + _defragmentationPolicy.startCollectionDefragmentations(operationContext()); auto nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext()); auto autoSplitAction = stdx::get<AutoSplitVectorInfo>(*nextAction); @@ -1018,7 +1080,48 @@ TEST_F(BalancerDefragmentationPolicyTest, // With phase 3 complete, defragmentation should be completed. ASSERT_FALSE(_defragmentationPolicy.isDefragmentingCollection(coll.getUuid())); - verifyExpectedDefragmentationPhaseOndisk(boost::none); + verifyExpectedDefragmentationStateOnDisk(coll.getUuid(), boost::none); +} + +TEST_F(BalancerDefragmentationPolicyTest, DontStartDefragmentationOnAnyCollection) { + + // Init a collection with defragmentation flag unset + setupShards(kShardList); + setupCollection(kNss1, kShardKeyPattern, {makeConfigChunkEntry(kUuid1)}); + + _defragmentationPolicy.startCollectionDefragmentations(operationContext()); + + verifyExpectedDefragmentationStateOnDisk(kUuid1, boost::none); + ASSERT_FALSE(_defragmentationPolicy.isDefragmentingCollection(kUuid1)); +} + +TEST_F(BalancerDefragmentationPolicyTest, StartDefragmentationOnMultipleCollections) { + + // Setup 3 collections: + // coll1 -> DEFRAGMENTING + // coll2 -> NOT DEFRAGMENTING + // coll3 -> DEFRAGMENTING + + setupShards(kShardList); + auto coll1 = setupCollectionWithPhase(kNss1, {makeConfigChunkEntry(kUuid1)}); + + setupCollection(kNss2, kShardKeyPattern, {makeConfigChunkEntry(kUuid2)}); + auto coll2 = + Grid::get(operationContext())->catalogClient()->getCollection(operationContext(), kUuid2); + + auto coll3 = setupCollectionWithPhase(kNss3, {makeConfigChunkEntry(kUuid3)}); + + _defragmentationPolicy.startCollectionDefragmentations(operationContext()); + + ASSERT_TRUE(_defragmentationPolicy.isDefragmentingCollection(coll1.getUuid())); + ASSERT_FALSE(_defragmentationPolicy.isDefragmentingCollection(coll2.getUuid())); + ASSERT_TRUE(_defragmentationPolicy.isDefragmentingCollection(coll3.getUuid())); + + verifyExpectedDefragmentationStateOnDisk(kUuid1, + DefragmentationPhaseEnum::kMergeAndMeasureChunks); + verifyExpectedDefragmentationStateOnDisk(kUuid2, boost::none); + verifyExpectedDefragmentationStateOnDisk(kUuid3, + DefragmentationPhaseEnum::kMergeAndMeasureChunks); } } // namespace |