diff options
author | Marcos José Grillo Ramírez <marcos.grillo@10gen.com> | 2019-11-27 10:13:34 +0000 |
---|---|---|
committer | evergreen <evergreen@mongodb.com> | 2019-11-27 10:13:34 +0000 |
commit | 996d0dee82219d362f1b31737cb35ee3bca3f092 (patch) | |
tree | 06a144763645e27b786df27b3e49d6b90ba45e5c | |
parent | 253caebbb4531e4852d18446c061de6f4e21f074 (diff) | |
download | mongo-996d0dee82219d362f1b31737cb35ee3bca3f092.tar.gz |
SERVER-44476 Include number of jumbo chunks remaining in output of removeShard
(cherry picked from commit 11754f469d36468e7c649cb9c052230614e4cb44)
4 files changed, 80 insertions, 43 deletions
diff --git a/src/mongo/db/s/config/configsvr_remove_shard_command.cpp b/src/mongo/db/s/config/configsvr_remove_shard_command.cpp index afb76329610..9fd0d19059e 100644 --- a/src/mongo/db/s/config/configsvr_remove_shard_command.cpp +++ b/src/mongo/db/s/config/configsvr_remove_shard_command.cpp @@ -145,33 +145,25 @@ public: }(); // TODO: Standardize/separate how we append to the result object - switch (shardDrainingStatus) { - case ShardDrainingStatus::STARTED: + switch (shardDrainingStatus.status) { + case RemoveShardProgress::STARTED: result.append("msg", "draining started successfully"); result.append("state", "started"); result.append("shard", shard->getId().toString()); result.appendElements(dbInfo); break; - case ShardDrainingStatus::ONGOING: { - const auto swChunks = Grid::get(opCtx)->catalogClient()->getChunks( - opCtx, - BSON(ChunkType::shard(shard->getId().toString())), - BSONObj(), - boost::none, // return all - nullptr, - repl::ReadConcernArgs::get(opCtx).getLevel()); - uassertStatusOK(swChunks.getStatus()); - - const auto& chunks = swChunks.getValue(); + case RemoveShardProgress::ONGOING: { + const auto& remainingCounts = shardDrainingStatus.remainingCounts; result.append("msg", "draining ongoing"); result.append("state", "ongoing"); result.append("remaining", - BSON("chunks" << static_cast<long long>(chunks.size()) << "dbs" - << static_cast<long long>(databases.size()))); + BSON("chunks" << remainingCounts->totalChunks << "dbs" + << remainingCounts->databases << "jumboChunks" + << remainingCounts->jumboChunks)); result.appendElements(dbInfo); break; } - case ShardDrainingStatus::COMPLETED: + case RemoveShardProgress::COMPLETED: result.append("msg", "removeshard completed successfully"); result.append("state", "completed"); result.append("shard", shard->getId().toString()); diff --git a/src/mongo/db/s/config/sharding_catalog_manager.h b/src/mongo/db/s/config/sharding_catalog_manager.h index 0ec9cdbfdd5..8283333b183 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager.h +++ b/src/mongo/db/s/config/sharding_catalog_manager.h @@ -50,14 +50,30 @@ class RemoteCommandTargeter; class ServiceContext; class UUID; -/** - * Used to indicate to the caller of the removeShard method whether draining of chunks for - * a particular shard has started, is ongoing, or has been completed. - */ -enum ShardDrainingStatus { - STARTED, - ONGOING, - COMPLETED, +struct RemoveShardProgress { + + /** + * Used to indicate to the caller of the removeShard method whether draining of chunks for + * a particular shard has started, is ongoing, or has been completed. + */ + enum DrainingShardStatus { + STARTED, + ONGOING, + COMPLETED, + }; + + /** + * Used to indicate to the caller of the removeShard method the remaining amount of chunks, + * jumbo chunks and databases within the shard + */ + struct DrainingShardUsage { + const long long totalChunks; + const long long databases; + const long long jumboChunks; + }; + + DrainingShardStatus status; + boost::optional<DrainingShardUsage> remainingCounts; }; /** @@ -355,7 +371,7 @@ public: * Because of the asynchronous nature of the draining mechanism, this method returns * the current draining status. See ShardDrainingStatus enum definition for more details. */ - ShardDrainingStatus removeShard(OperationContext* opCtx, const ShardId& shardId); + RemoveShardProgress removeShard(OperationContext* opCtx, const ShardId& shardId); // // Cluster Upgrade Operations diff --git a/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp index 7cec59a7ad2..6445d535008 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp @@ -137,14 +137,16 @@ TEST_F(RemoveShardTest, RemoveShardAnotherShardDraining) { setupShards(std::vector<ShardType>{shard1, shard2, shard3}); - ASSERT_EQUALS(ShardDrainingStatus::STARTED, - ShardingCatalogManager::get(operationContext()) - ->removeShard(operationContext(), shard1.getName())); + auto result = ShardingCatalogManager::get(operationContext()) + ->removeShard(operationContext(), shard1.getName()); + ASSERT_EQUALS(RemoveShardProgress::STARTED, result.status); + ASSERT_EQUALS(false, result.remainingCounts.is_initialized()); ASSERT_TRUE(isDraining(shard1.getName())); - ASSERT_EQUALS(ShardDrainingStatus::STARTED, - ShardingCatalogManager::get(operationContext()) - ->removeShard(operationContext(), shard2.getName())); + auto result2 = ShardingCatalogManager::get(operationContext()) + ->removeShard(operationContext(), shard2.getName()); + ASSERT_EQUALS(RemoveShardProgress::STARTED, result2.status); + ASSERT_EQUALS(false, result2.remainingCounts.is_initialized()); ASSERT_TRUE(isDraining(shard2.getName())); } @@ -182,7 +184,8 @@ TEST_F(RemoveShardTest, RemoveShardStartDraining) { auto result = ShardingCatalogManager::get(operationContext()) ->removeShard(operationContext(), shard1.getName()); - ASSERT_EQUALS(ShardDrainingStatus::STARTED, result); + ASSERT_EQUALS(RemoveShardProgress::STARTED, result.status); + ASSERT_EQUALS(false, result.remainingCounts.is_initialized()); ASSERT_TRUE(isDraining(shard1.getName())); } @@ -214,18 +217,25 @@ TEST_F(RemoveShardTest, RemoveShardStillDrainingChunksRemaining) { ChunkVersion(1, 3, epoch), shard1.getName()); + chunk3.setJumbo(true); + setupShards(std::vector<ShardType>{shard1, shard2}); setupDatabase("testDB", shard1.getName(), true); setupChunks(std::vector<ChunkType>{chunk1, chunk2, chunk3}); auto startedResult = ShardingCatalogManager::get(operationContext()) ->removeShard(operationContext(), shard1.getName()); - ASSERT_EQUALS(ShardDrainingStatus::STARTED, startedResult); + ASSERT_EQUALS(RemoveShardProgress::STARTED, startedResult.status); + ASSERT_EQUALS(false, startedResult.remainingCounts.is_initialized()); ASSERT_TRUE(isDraining(shard1.getName())); auto ongoingResult = ShardingCatalogManager::get(operationContext()) ->removeShard(operationContext(), shard1.getName()); - ASSERT_EQUALS(ShardDrainingStatus::ONGOING, ongoingResult); + ASSERT_EQUALS(RemoveShardProgress::ONGOING, ongoingResult.status); + ASSERT_EQUALS(true, ongoingResult.remainingCounts.is_initialized()); + ASSERT_EQUALS(3, ongoingResult.remainingCounts->totalChunks); + ASSERT_EQUALS(1, ongoingResult.remainingCounts->jumboChunks); + ASSERT_EQUALS(1, ongoingResult.remainingCounts->databases); ASSERT_TRUE(isDraining(shard1.getName())); } @@ -248,12 +258,17 @@ TEST_F(RemoveShardTest, RemoveShardStillDrainingDatabasesRemaining) { auto startedResult = ShardingCatalogManager::get(operationContext()) ->removeShard(operationContext(), shard1.getName()); - ASSERT_EQUALS(ShardDrainingStatus::STARTED, startedResult); + ASSERT_EQUALS(RemoveShardProgress::STARTED, startedResult.status); + ASSERT_EQUALS(false, startedResult.remainingCounts.is_initialized()); ASSERT_TRUE(isDraining(shard1.getName())); auto ongoingResult = ShardingCatalogManager::get(operationContext()) ->removeShard(operationContext(), shard1.getName()); - ASSERT_EQUALS(ShardDrainingStatus::ONGOING, ongoingResult); + ASSERT_EQUALS(RemoveShardProgress::ONGOING, ongoingResult.status); + ASSERT_EQUALS(true, ongoingResult.remainingCounts.is_initialized()); + ASSERT_EQUALS(0, ongoingResult.remainingCounts->totalChunks); + ASSERT_EQUALS(0, ongoingResult.remainingCounts->jumboChunks); + ASSERT_EQUALS(1, ongoingResult.remainingCounts->databases); ASSERT_TRUE(isDraining(shard1.getName())); } @@ -293,12 +308,17 @@ TEST_F(RemoveShardTest, RemoveShardCompletion) { auto startedResult = ShardingCatalogManager::get(operationContext()) ->removeShard(operationContext(), shard1.getName()); - ASSERT_EQUALS(ShardDrainingStatus::STARTED, startedResult); + ASSERT_EQUALS(RemoveShardProgress::STARTED, startedResult.status); + ASSERT_EQUALS(false, startedResult.remainingCounts.is_initialized()); ASSERT_TRUE(isDraining(shard1.getName())); auto ongoingResult = ShardingCatalogManager::get(operationContext()) ->removeShard(operationContext(), shard1.getName()); - ASSERT_EQUALS(ShardDrainingStatus::ONGOING, ongoingResult); + ASSERT_EQUALS(RemoveShardProgress::ONGOING, ongoingResult.status); + ASSERT_EQUALS(true, ongoingResult.remainingCounts.is_initialized()); + ASSERT_EQUALS(3, ongoingResult.remainingCounts->totalChunks); + ASSERT_EQUALS(0, ongoingResult.remainingCounts->jumboChunks); + ASSERT_EQUALS(0, ongoingResult.remainingCounts->databases); ASSERT_TRUE(isDraining(shard1.getName())); // Mock the operation during which the chunks are moved to the other shard. @@ -312,7 +332,8 @@ TEST_F(RemoveShardTest, RemoveShardCompletion) { auto completedResult = ShardingCatalogManager::get(operationContext()) ->removeShard(operationContext(), shard1.getName()); - ASSERT_EQUALS(ShardDrainingStatus::COMPLETED, completedResult); + ASSERT_EQUALS(RemoveShardProgress::COMPLETED, completedResult.status); + ASSERT_EQUALS(false, startedResult.remainingCounts.is_initialized()); // Now make sure that the shard no longer exists on config. auto response = assertGet(shardRegistry()->getConfigShard()->exhaustiveFindOnConfig( diff --git a/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp index c3ffc023690..4d81a3f424b 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp @@ -758,7 +758,7 @@ StatusWith<std::string> ShardingCatalogManager::addShard( return shardType.getName(); } -ShardDrainingStatus ShardingCatalogManager::removeShard(OperationContext* opCtx, +RemoveShardProgress ShardingCatalogManager::removeShard(OperationContext* opCtx, const ShardId& shardId) { const auto name = shardId.toString(); @@ -803,7 +803,8 @@ ShardDrainingStatus ShardingCatalogManager::removeShard(OperationContext* opCtx, shardRegistry->reload(opCtx); - return ShardDrainingStatus::STARTED; + return {RemoveShardProgress::STARTED, + boost::optional<RemoveShardProgress::DrainingShardUsage>(boost::none)}; } // Draining has already started, now figure out how many chunks and databases are still on the @@ -814,12 +815,18 @@ ShardDrainingStatus ShardingCatalogManager::removeShard(OperationContext* opCtx, const auto databaseCount = uassertStatusOK( _runCountCommandOnConfig(opCtx, DatabaseType::ConfigNS, BSON(DatabaseType::primary(name)))); + const auto jumboCount = uassertStatusOK(_runCountCommandOnConfig( + opCtx, ChunkType::ConfigNS, BSON(ChunkType::shard(name) << ChunkType::jumbo(true)))); + if (chunkCount > 0 || databaseCount > 0) { // Still more draining to do LOG(0) << "chunkCount: " << chunkCount; LOG(0) << "databaseCount: " << databaseCount; + LOG(0) << "jumboCount: " << jumboCount; - return ShardDrainingStatus::ONGOING; + return {RemoveShardProgress::ONGOING, + boost::optional<RemoveShardProgress::DrainingShardUsage>( + {chunkCount, databaseCount, jumboCount})}; } // Draining is done, now finish removing the shard. @@ -842,7 +849,8 @@ ShardDrainingStatus ShardingCatalogManager::removeShard(OperationContext* opCtx, ShardingLogging::get(opCtx)->logChange( opCtx, "removeShard", "", BSON("shard" << name), ShardingCatalogClient::kLocalWriteConcern); - return ShardDrainingStatus::COMPLETED; + return {RemoveShardProgress::COMPLETED, + boost::optional<RemoveShardProgress::DrainingShardUsage>(boost::none)}; } void ShardingCatalogManager::appendConnectionStats(executor::ConnectionPoolStats* stats) { |