summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2016-07-27 14:15:49 -0400
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2016-07-30 09:51:15 -0400
commited7f95c68256c85c611034e0eb131cd5592338e7 (patch)
tree75d4378827bd303b842d8d00e45f2a4cd4b9e60c /src
parent8c326de66475125ce755c52d36340b52f86ac64e (diff)
downloadmongo-ed7f95c68256c85c611034e0eb131cd5592338e7.tar.gz
SERVER-24856 Make manual moveChunk requests go through the balancer on CSRS primary
Diffstat (limited to 'src')
-rw-r--r--src/mongo/db/s/active_migrations_registry.cpp2
-rw-r--r--src/mongo/db/s/migration_destination_manager_legacy_commands.cpp7
-rw-r--r--src/mongo/db/s/sharding_state.cpp7
-rw-r--r--src/mongo/s/SConscript4
-rw-r--r--src/mongo/s/balancer/balancer.cpp6
-rw-r--r--src/mongo/s/balancer/balancer.h7
-rw-r--r--src/mongo/s/balancer/balancer_chunk_selection_policy_impl.cpp5
-rw-r--r--src/mongo/s/balancer/balancer_chunk_selection_policy_impl.h7
-rw-r--r--src/mongo/s/balancer/cluster_statistics.cpp4
-rw-r--r--src/mongo/s/balancer/cluster_statistics.h4
-rw-r--r--src/mongo/s/balancer/cluster_statistics_impl.cpp61
-rw-r--r--src/mongo/s/balancer/cluster_statistics_impl.h27
-rw-r--r--src/mongo/s/balancer/migration_manager.cpp31
-rw-r--r--src/mongo/s/catalog/replset/SConscript1
-rw-r--r--src/mongo/s/chunk.cpp4
-rw-r--r--src/mongo/s/commands/cluster_move_chunk_cmd.cpp35
-rw-r--r--src/mongo/s/commands/cluster_shard_collection_cmd.cpp4
-rw-r--r--src/mongo/s/query/SConscript1
-rw-r--r--src/mongo/s/server.cpp3
-rw-r--r--src/mongo/s/sharding_raii.cpp30
20 files changed, 130 insertions, 120 deletions
diff --git a/src/mongo/db/s/active_migrations_registry.cpp b/src/mongo/db/s/active_migrations_registry.cpp
index 62d9d287f5a..70e69a05f66 100644
--- a/src/mongo/db/s/active_migrations_registry.cpp
+++ b/src/mongo/db/s/active_migrations_registry.cpp
@@ -55,7 +55,7 @@ StatusWith<ScopedRegisterMigration> ActiveMigrationsRegistry::registerMigration(
return {ErrorCodes::ConflictingOperationInProgress,
str::stream()
- << "Unable start new migration, because there is already an active migration for "
+ << "Unable start new migration because this shard is currently donating chunk for "
<< _activeMoveChunkState->args.getNss().ns()};
}
diff --git a/src/mongo/db/s/migration_destination_manager_legacy_commands.cpp b/src/mongo/db/s/migration_destination_manager_legacy_commands.cpp
index 545925f7190..6c528147df9 100644
--- a/src/mongo/db/s/migration_destination_manager_legacy_commands.cpp
+++ b/src/mongo/db/s/migration_destination_manager_legacy_commands.cpp
@@ -95,10 +95,9 @@ public:
// Active state of TO-side migrations (MigrateStatus) is serialized by distributed
// collection lock.
- if (shardingState->migrationDestinationManager()->isActive()) {
- errmsg = "migrate already in progress";
- return false;
- }
+ uassert(ErrorCodes::ConflictingOperationInProgress,
+ "Shard is already serving as a destination for migration",
+ !shardingState->migrationDestinationManager()->isActive());
// Pending deletes (for migrations) are serialized by the distributed collection lock,
// we are sure we registered a delete for a range *before* we can migrate-in a
diff --git a/src/mongo/db/s/sharding_state.cpp b/src/mongo/db/s/sharding_state.cpp
index 98fc7037c40..44128b1ca0d 100644
--- a/src/mongo/db/s/sharding_state.cpp
+++ b/src/mongo/db/s/sharding_state.cpp
@@ -677,6 +677,13 @@ StatusWith<ChunkVersion> ShardingState::_refreshMetadata(
}
StatusWith<ScopedRegisterMigration> ShardingState::registerMigration(const MoveChunkRequest& args) {
+ if (_migrationDestManager.isActive()) {
+ return {
+ ErrorCodes::ConflictingOperationInProgress,
+ str::stream()
+ << "Unable start new migration because this shard is currently receiving a chunk"};
+ }
+
return _activeMigrationsRegistry.registerMigration(args);
}
diff --git a/src/mongo/s/SConscript b/src/mongo/s/SConscript
index baaa447b7dd..fd062a676a6 100644
--- a/src/mongo/s/SConscript
+++ b/src/mongo/s/SConscript
@@ -266,7 +266,6 @@ env.Library(
'shard_util.cpp',
'sharding_egress_metadata_hook.cpp',
'sharding_raii.cpp',
- 'sharding_uptime_reporter.cpp',
],
LIBDEPS=[
'$BUILD_DIR/mongo/db/audit',
@@ -294,8 +293,6 @@ env.Library(
]
)
-# This library is only used by the mongos execuable and any tests which require mongos runtime
-# objects, such as the request processing pipeline or the balancer.
env.Library(
target='mongoscore',
source=[
@@ -304,6 +301,7 @@ env.Library(
's_only.cpp',
's_sharding_server_status.cpp',
'sharding_egress_metadata_hook_for_mongos.cpp',
+ 'sharding_uptime_reporter.cpp',
'version_mongos.cpp',
],
LIBDEPS=[
diff --git a/src/mongo/s/balancer/balancer.cpp b/src/mongo/s/balancer/balancer.cpp
index 058586d72cb..a5268a81337 100644
--- a/src/mongo/s/balancer/balancer.cpp
+++ b/src/mongo/s/balancer/balancer.cpp
@@ -145,9 +145,9 @@ void warnOnMultiVersion(const vector<ClusterStatistics::ShardStatistics>& cluste
Balancer::Balancer()
: _balancedLastTime(0),
- _chunkSelectionPolicy(stdx::make_unique<BalancerChunkSelectionPolicyImpl>(
- stdx::make_unique<ClusterStatisticsImpl>())),
- _clusterStats(stdx::make_unique<ClusterStatisticsImpl>()) {}
+ _clusterStats(stdx::make_unique<ClusterStatisticsImpl>()),
+ _chunkSelectionPolicy(
+ stdx::make_unique<BalancerChunkSelectionPolicyImpl>(_clusterStats.get())) {}
Balancer::~Balancer() {
// The balancer thread must have been stopped
diff --git a/src/mongo/s/balancer/balancer.h b/src/mongo/s/balancer/balancer.h
index b4dd31ad6a4..9a8da493fa2 100644
--- a/src/mongo/s/balancer/balancer.h
+++ b/src/mongo/s/balancer/balancer.h
@@ -210,11 +210,12 @@ private:
// Number of moved chunks in last round
int _balancedLastTime;
- // Balancer policy
- std::unique_ptr<BalancerChunkSelectionPolicy> _chunkSelectionPolicy;
-
// Source for cluster statistics
std::unique_ptr<ClusterStatistics> _clusterStats;
+
+ // Balancer policy. Depends on the cluster statistics instance above so it should be created
+ // after it and destroyed before it.
+ std::unique_ptr<BalancerChunkSelectionPolicy> _chunkSelectionPolicy;
};
} // namespace mongo
diff --git a/src/mongo/s/balancer/balancer_chunk_selection_policy_impl.cpp b/src/mongo/s/balancer/balancer_chunk_selection_policy_impl.cpp
index e66f371fd3b..ff7daf10d87 100644
--- a/src/mongo/s/balancer/balancer_chunk_selection_policy_impl.cpp
+++ b/src/mongo/s/balancer/balancer_chunk_selection_policy_impl.cpp
@@ -118,9 +118,8 @@ StatusWith<std::pair<DistributionStatus, ChunkMinimumsSet>> createCollectionDist
} // namespace
-BalancerChunkSelectionPolicyImpl::BalancerChunkSelectionPolicyImpl(
- std::unique_ptr<ClusterStatistics> clusterStats)
- : _clusterStats(std::move(clusterStats)) {}
+BalancerChunkSelectionPolicyImpl::BalancerChunkSelectionPolicyImpl(ClusterStatistics* clusterStats)
+ : _clusterStats(clusterStats) {}
BalancerChunkSelectionPolicyImpl::~BalancerChunkSelectionPolicyImpl() = default;
diff --git a/src/mongo/s/balancer/balancer_chunk_selection_policy_impl.h b/src/mongo/s/balancer/balancer_chunk_selection_policy_impl.h
index b2d9abaafaf..ffb769121b7 100644
--- a/src/mongo/s/balancer/balancer_chunk_selection_policy_impl.h
+++ b/src/mongo/s/balancer/balancer_chunk_selection_policy_impl.h
@@ -36,7 +36,7 @@ class ClusterStatistics;
class BalancerChunkSelectionPolicyImpl final : public BalancerChunkSelectionPolicy {
public:
- BalancerChunkSelectionPolicyImpl(std::unique_ptr<ClusterStatistics> clusterStats);
+ BalancerChunkSelectionPolicyImpl(ClusterStatistics* clusterStats);
~BalancerChunkSelectionPolicyImpl();
StatusWith<SplitInfoVector> selectChunksToSplit(OperationContext* txn) override;
@@ -69,8 +69,9 @@ private:
const ShardStatisticsVector& shardStats,
bool aggressiveBalanceHint);
- // Source for obtaining cluster statistics
- std::unique_ptr<ClusterStatistics> _clusterStats;
+ // Source for obtaining cluster statistics. Not owned and must not be destroyed before the
+ // policy object is destroyed.
+ ClusterStatistics* const _clusterStats;
};
} // namespace mongo
diff --git a/src/mongo/s/balancer/cluster_statistics.cpp b/src/mongo/s/balancer/cluster_statistics.cpp
index c951c865ce0..d42994379ef 100644
--- a/src/mongo/s/balancer/cluster_statistics.cpp
+++ b/src/mongo/s/balancer/cluster_statistics.cpp
@@ -45,8 +45,8 @@ ClusterStatistics::ShardStatistics::ShardStatistics(ShardId inShardId,
uint64_t inMaxSizeMB,
uint64_t inCurrSizeMB,
bool inIsDraining,
- const std::set<std::string> inShardTags,
- const std::string inMongoVersion)
+ std::set<std::string> inShardTags,
+ std::string inMongoVersion)
: shardId(std::move(inShardId)),
maxSizeMB(std::move(inMaxSizeMB)),
currSizeMB(std::move(inCurrSizeMB)),
diff --git a/src/mongo/s/balancer/cluster_statistics.h b/src/mongo/s/balancer/cluster_statistics.h
index 748b80c2956..8963720ee6f 100644
--- a/src/mongo/s/balancer/cluster_statistics.h
+++ b/src/mongo/s/balancer/cluster_statistics.h
@@ -62,8 +62,8 @@ public:
uint64_t maxSizeMB,
uint64_t currSizeMB,
bool isDraining,
- const std::set<std::string> shardTags,
- const std::string mongoVersion);
+ std::set<std::string> shardTags,
+ std::string mongoVersion);
/**
* Returns if a shard cannot receive any new chunks because it has reached the per-shard
diff --git a/src/mongo/s/balancer/cluster_statistics_impl.cpp b/src/mongo/s/balancer/cluster_statistics_impl.cpp
index 96014d9dc90..c5a40a8b742 100644
--- a/src/mongo/s/balancer/cluster_statistics_impl.cpp
+++ b/src/mongo/s/balancer/cluster_statistics_impl.cpp
@@ -99,60 +99,59 @@ ClusterStatisticsImpl::ClusterStatisticsImpl() = default;
ClusterStatisticsImpl::~ClusterStatisticsImpl() = default;
StatusWith<vector<ShardStatistics>> ClusterStatisticsImpl::getStats(OperationContext* txn) {
- try {
- _refreshShardStats(txn);
- } catch (const DBException& e) {
- return e.toStatus();
- }
-
- vector<ShardStatistics> stats;
-
- stdx::lock_guard<stdx::mutex> scopedLock(_mutex);
- for (const auto& stat : _shardStatsMap) {
- stats.push_back(stat.second);
- }
-
- return stats;
-}
-
-void ClusterStatisticsImpl::_refreshShardStats(OperationContext* txn) {
// Get a list of all the shards that are participating in this balance round along with any
// maximum allowed quotas and current utilization. We get the latter by issuing
// db.serverStatus() (mem.mapped) to all shards.
//
// TODO: skip unresponsive shards and mark information as stale.
auto shardsStatus = Grid::get(txn)->catalogClient(txn)->getAllShards(txn);
- uassertStatusOK(shardsStatus.getStatus());
+ if (!shardsStatus.isOK()) {
+ return shardsStatus.getStatus();
+ }
const vector<ShardType> shards(std::move(shardsStatus.getValue().value));
+ vector<ShardStatistics> stats;
+
for (const auto& shard : shards) {
auto shardSizeStatus = shardutil::retrieveTotalShardSize(txn, shard.getName());
if (!shardSizeStatus.isOK()) {
- continue;
+ const Status& status = shardSizeStatus.getStatus();
+
+ return {status.code(),
+ str::stream() << "Unable to obtain shard utilization information for "
+ << shard.getName()
+ << " due to "
+ << status.reason()};
}
+ string mongoDVersion;
+
auto mongoDVersionStatus = retrieveShardMongoDVersion(txn, shard.getName());
- if (!mongoDVersionStatus.isOK()) {
- continue;
+ if (mongoDVersionStatus.isOK()) {
+ mongoDVersion = std::move(mongoDVersionStatus.getValue());
+ } else {
+ // Since the mongod version is only used for reporting, there is no need to fail the
+ // entire round if it cannot be retrieved, so just leave it empty
+ log() << "Unable to obtain shard version for " << shard.getName()
+ << causedBy(mongoDVersionStatus.getStatus());
}
- const string mongoDVersion(std::move(mongoDVersionStatus.getValue()));
std::set<string> shardTags;
+
for (const auto& shardTag : shard.getTags()) {
shardTags.insert(shardTag);
}
- ShardStatistics newShardStat(shard.getName(),
- shard.getMaxSizeMB(),
- shardSizeStatus.getValue() / 1024 / 1024,
- shard.getDraining(),
- shardTags,
- mongoDVersion);
-
- stdx::lock_guard<stdx::mutex> scopedLock(_mutex);
- _shardStatsMap[shard.getName()] = std::move(newShardStat);
+ stats.emplace_back(shard.getName(),
+ shard.getMaxSizeMB(),
+ shardSizeStatus.getValue() / 1024 / 1024,
+ shard.getDraining(),
+ std::move(shardTags),
+ std::move(mongoDVersion));
}
+
+ return stats;
}
} // namespace mongo
diff --git a/src/mongo/s/balancer/cluster_statistics_impl.h b/src/mongo/s/balancer/cluster_statistics_impl.h
index a1ea1829ac1..493c792a713 100644
--- a/src/mongo/s/balancer/cluster_statistics_impl.h
+++ b/src/mongo/s/balancer/cluster_statistics_impl.h
@@ -28,19 +28,14 @@
#pragma once
-#include <map>
-
#include "mongo/s/balancer/cluster_statistics.h"
-#include "mongo/stdx/mutex.h"
namespace mongo {
-class OperationContext;
-class Status;
-
/**
* Default implementation for the cluster statistics gathering utility. Uses a blocking method to
- * fetch the statistics and does not perform any caching.
+ * fetch the statistics and does not perform any caching. If any of the shards fails to report
+ * statistics fails the entire refresh.
*/
class ClusterStatisticsImpl final : public ClusterStatistics {
public:
@@ -48,24 +43,6 @@ public:
~ClusterStatisticsImpl();
StatusWith<std::vector<ShardStatistics>> getStats(OperationContext* txn) override;
-
-private:
- typedef std::map<ShardId, ShardStatistics> ShardStatisticsMap;
-
- /**
- * Refreshes the list of available shards and loops through them in order to collect usage
- * statistics. If any of the shards fails to report statistics, skips it and continues with the
- * next.
- *
- * If the list of shards cannot be retrieved throws an exception.
- */
- void _refreshShardStats(OperationContext* txn);
-
- // Mutex to protect the mutable state below
- stdx::mutex _mutex;
-
- // The most up-to-date shard statistics
- ShardStatisticsMap _shardStatsMap;
};
} // namespace mongo
diff --git a/src/mongo/s/balancer/migration_manager.cpp b/src/mongo/s/balancer/migration_manager.cpp
index 209c90b7106..473ba9ce439 100644
--- a/src/mongo/s/balancer/migration_manager.cpp
+++ b/src/mongo/s/balancer/migration_manager.cpp
@@ -121,19 +121,42 @@ void MigrationManager::_executeMigrations(OperationContext* txn,
for (auto& migration : _activeMigrations) {
const NamespaceString nss(migration.chunkInfo.migrateInfo.ns);
+ const auto& migrateInfo = migration.chunkInfo.migrateInfo;
+
auto scopedCMStatus = ScopedChunkManager::getExisting(txn, nss);
if (!scopedCMStatus.isOK()) {
// Unable to find the ChunkManager for "nss" for whatever reason; abandon this
// migration and proceed to the next.
stdx::lock_guard<stdx::mutex> lk(_mutex);
- migrationStatuses->insert(MigrationStatuses::value_type(
- migration.chunkInfo.migrateInfo.getName(), std::move(scopedCMStatus.getStatus())));
+ migrationStatuses->emplace(migrateInfo.getName(),
+ std::move(scopedCMStatus.getStatus()));
continue;
}
ChunkManager* const chunkManager = scopedCMStatus.getValue().cm();
- auto chunk =
- chunkManager->findIntersectingChunk(txn, migration.chunkInfo.migrateInfo.minKey);
+
+ auto chunk = chunkManager->findIntersectingChunk(txn, migrateInfo.minKey);
+ invariant(chunk);
+
+ // If the chunk is not found exactly as requested, the caller must have stale data
+ if (chunk->getMin() != migrateInfo.minKey || chunk->getMax() != migrateInfo.maxKey) {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ migrationStatuses->emplace(
+ migrateInfo.getName(),
+ Status(ErrorCodes::IncompatibleShardingMetadata,
+ str::stream()
+ << "Chunk "
+ << ChunkRange(migrateInfo.minKey, migrateInfo.maxKey).toString()
+ << " does not exist."));
+ continue;
+ }
+
+ // If chunk is already on the correct shard, just treat the operation as success
+ if (chunk->getShardId() == migrateInfo.to) {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ migrationStatuses->emplace(migrateInfo.getName(), Status::OK());
+ continue;
+ }
{
// No need to lock the mutex. Only this function and _takeDistLockForAMigration
diff --git a/src/mongo/s/catalog/replset/SConscript b/src/mongo/s/catalog/replset/SConscript
index 33584b9224b..594a91c8e6f 100644
--- a/src/mongo/s/catalog/replset/SConscript
+++ b/src/mongo/s/catalog/replset/SConscript
@@ -53,7 +53,6 @@ env.CppUnitTest(
'$BUILD_DIR/mongo/s/catalog/dist_lock_catalog_mock',
'$BUILD_DIR/mongo/s/catalog/sharding_catalog_mock',
'$BUILD_DIR/mongo/s/coreshard',
- '$BUILD_DIR/mongo/s/mongoscore',
'$BUILD_DIR/mongo/s/sharding_test_fixture',
'$BUILD_DIR/mongo/util/clock_source_mock',
]
diff --git a/src/mongo/s/chunk.cpp b/src/mongo/s/chunk.cpp
index 339a80e313a..84313d24c95 100644
--- a/src/mongo/s/chunk.cpp
+++ b/src/mongo/s/chunk.cpp
@@ -36,12 +36,12 @@
#include "mongo/db/commands.h"
#include "mongo/db/lasterror.h"
#include "mongo/platform/random.h"
-#include "mongo/s/balancer/balancer.h"
#include "mongo/s/balancer/balancer_configuration.h"
#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/type_collection.h"
#include "mongo/s/chunk_manager.h"
#include "mongo/s/client/shard_registry.h"
+#include "mongo/s/config_server_client.h"
#include "mongo/s/grid.h"
#include "mongo/s/shard_util.h"
#include "mongo/s/sharding_raii.h"
@@ -392,7 +392,7 @@ bool Chunk::splitIfShould(OperationContext* txn, long dataWritten) {
chunkToMove.setMax(suggestedChunk->getMax());
chunkToMove.setVersion(suggestedChunk->getLastmod());
- Status rebalanceStatus = Balancer::get(txn)->rebalanceSingleChunk(txn, chunkToMove);
+ Status rebalanceStatus = configsvr_client::rebalanceChunk(txn, chunkToMove);
if (!rebalanceStatus.isOK()) {
msgassertedNoTraceWithStatus(10412, rebalanceStatus);
}
diff --git a/src/mongo/s/commands/cluster_move_chunk_cmd.cpp b/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
index 58a096ecd02..3d490d6e38d 100644
--- a/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
+++ b/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
@@ -38,15 +38,15 @@
#include "mongo/db/client_basic.h"
#include "mongo/db/commands.h"
#include "mongo/db/write_concern_options.h"
-#include "mongo/s/balancer/balancer.h"
#include "mongo/s/balancer/balancer_configuration.h"
#include "mongo/s/catalog/catalog_cache.h"
-#include "mongo/s/chunk_manager.h"
#include "mongo/s/client/shard_connection.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/config.h"
+#include "mongo/s/config_server_client.h"
#include "mongo/s/grid.h"
#include "mongo/s/migration_secondary_throttle_options.h"
+#include "mongo/s/sharding_raii.h"
#include "mongo/util/log.h"
#include "mongo/util/timer.h"
@@ -159,8 +159,10 @@ public:
return false;
}
- // This refreshes the chunk metadata if stale.
- shared_ptr<ChunkManager> info = config->getChunkManager(txn, nss.ns(), true);
+ // This refreshes the chunk metadata if stale
+ auto scopedCM = uassertStatusOK(ScopedChunkManager::getExisting(txn, nss));
+ ChunkManager* const info = scopedCM.cm();
+
shared_ptr<Chunk> chunk;
if (!find.isEmpty()) {
@@ -202,26 +204,25 @@ public:
}
}
- const auto from = grid.shardRegistry()->getShard(txn, chunk->getShardId());
- if (from->getId() != to->getId()) {
- const auto secondaryThrottle =
- uassertStatusOK(MigrationSecondaryThrottleOptions::createFromCommand(cmdObj));
+ const auto secondaryThrottle =
+ uassertStatusOK(MigrationSecondaryThrottleOptions::createFromCommand(cmdObj));
- ChunkType chunkType;
- chunkType.setNS(nss.ns());
- chunkType.setMin(chunk->getMin());
- chunkType.setMax(chunk->getMax());
- chunkType.setShard(chunk->getShardId());
- chunkType.setVersion(info->getVersion());
+ ChunkType chunkType;
+ chunkType.setNS(nss.ns());
+ chunkType.setMin(chunk->getMin());
+ chunkType.setMax(chunk->getMax());
+ chunkType.setShard(chunk->getShardId());
+ chunkType.setVersion(info->getVersion());
- uassertStatusOK(
- Balancer::get(txn)->moveSingleChunk(txn,
+ uassertStatusOK(configsvr_client::moveChunk(txn,
chunkType,
to->getId(),
maxChunkSizeBytes,
secondaryThrottle,
cmdObj["_waitForDelete"].trueValue()));
- }
+
+ // Make sure the chunk manager is updated with the migrated chunk
+ info->reload(txn);
result.append("millis", t.millis());
return true;
diff --git a/src/mongo/s/commands/cluster_shard_collection_cmd.cpp b/src/mongo/s/commands/cluster_shard_collection_cmd.cpp
index 2a38786656b..f5f2c791fdc 100644
--- a/src/mongo/s/commands/cluster_shard_collection_cmd.cpp
+++ b/src/mongo/s/commands/cluster_shard_collection_cmd.cpp
@@ -47,7 +47,6 @@
#include "mongo/db/operation_context.h"
#include "mongo/db/query/collation/collator_factory_interface.h"
#include "mongo/db/write_concern_options.h"
-#include "mongo/s/balancer/balancer.h"
#include "mongo/s/balancer/balancer_configuration.h"
#include "mongo/s/catalog/catalog_cache.h"
#include "mongo/s/catalog/sharding_catalog_client.h"
@@ -55,6 +54,7 @@
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/cluster_write.h"
#include "mongo/s/config.h"
+#include "mongo/s/config_server_client.h"
#include "mongo/s/grid.h"
#include "mongo/s/migration_secondary_throttle_options.h"
#include "mongo/s/shard_util.h"
@@ -525,7 +525,7 @@ public:
chunkType.setShard(chunk->getShardId());
chunkType.setVersion(chunkManager->getVersion());
- Status moveStatus = Balancer::get(txn)->moveSingleChunk(
+ Status moveStatus = configsvr_client::moveChunk(
txn,
chunkType,
to->getId(),
diff --git a/src/mongo/s/query/SConscript b/src/mongo/s/query/SConscript
index d2f2a03f3ed..ab3a4766fc0 100644
--- a/src/mongo/s/query/SConscript
+++ b/src/mongo/s/query/SConscript
@@ -90,7 +90,6 @@ env.CppUnitTest(
'$BUILD_DIR/mongo/db/query/query_request',
'$BUILD_DIR/mongo/db/service_context_noop_init',
'$BUILD_DIR/mongo/executor/thread_pool_task_executor_test_fixture',
- '$BUILD_DIR/mongo/s/mongoscore',
'$BUILD_DIR/mongo/s/sharding_test_fixture',
],
)
diff --git a/src/mongo/s/server.cpp b/src/mongo/s/server.cpp
index c55ff0e8ca4..155deb809c5 100644
--- a/src/mongo/s/server.cpp
+++ b/src/mongo/s/server.cpp
@@ -62,7 +62,6 @@
#include "mongo/db/wire_version.h"
#include "mongo/executor/task_executor_pool.h"
#include "mongo/platform/process_id.h"
-#include "mongo/s/balancer/balancer.h"
#include "mongo/s/balancer/balancer_configuration.h"
#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/sharding_catalog_manager.h"
@@ -302,8 +301,6 @@ static ExitCode runMongosServer() {
shardingUptimeReporter.emplace();
shardingUptimeReporter->startPeriodicThread();
- Balancer::create(getGlobalServiceContext());
-
clusterCursorCleanupJob.go();
UserCacheInvalidator cacheInvalidatorThread(getGlobalAuthorizationManager());
diff --git a/src/mongo/s/sharding_raii.cpp b/src/mongo/s/sharding_raii.cpp
index a948893d36e..7902657b506 100644
--- a/src/mongo/s/sharding_raii.cpp
+++ b/src/mongo/s/sharding_raii.cpp
@@ -92,18 +92,28 @@ StatusWith<ScopedChunkManager> ScopedChunkManager::getExisting(OperationContext*
auto scopedDb = std::move(scopedDbStatus.getValue());
- shared_ptr<ChunkManager> cm = scopedDb.db()->getChunkManagerIfExists(txn, nss.ns(), true);
- if (!cm) {
- return {ErrorCodes::NamespaceNotSharded,
- str::stream() << "Collection " << nss.ns() << " does not exist or is not sharded."};
- }
+ try {
+ std::shared_ptr<ChunkManager> cm =
+ scopedDb.db()->getChunkManager(txn, nss.ns(), true, false);
+
+ if (!cm) {
+ return {ErrorCodes::NamespaceNotSharded,
+ str::stream() << "Collection " << nss.ns()
+ << " does not exist or is not sharded."};
+ }
- if (cm->getChunkMap().empty()) {
- return {ErrorCodes::NamespaceNotSharded,
- str::stream() << "Collection " << nss.ns() << " does not have any chunks."};
- }
+ if (cm->getChunkMap().empty()) {
+ return {ErrorCodes::NamespaceNotSharded,
+ str::stream() << "Collection " << nss.ns()
+ << " is marked as sharded, but does not have any chunks. This "
+ "most likely indicates a corrupted metadata or "
+ "partially completed 'shardCollection' command."};
+ }
- return {ScopedChunkManager(std::move(scopedDb), std::move(cm))};
+ return {ScopedChunkManager(std::move(scopedDb), std::move(cm))};
+ } catch (const AssertionException& e) {
+ return e.toStatus();
+ }
}
} // namespace mongo