summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCheahuychou Mao <cheahuychou.mao@mongodb.com>2019-11-19 16:58:01 +0000
committerevergreen <evergreen@mongodb.com>2019-11-19 16:58:01 +0000
commit80d1587acb9e445cad81848a0c4b90f01bb9e00a (patch)
tree9a9c2b96c977893c84d0170ea3bbb64dff07732f
parentcffaa3c53fa3a404273f767f55f1a93f1320a209 (diff)
downloadmongo-80d1587acb9e445cad81848a0c4b90f01bb9e00a.tar.gz
SERVER-43898 Add more unit tests for balancer_policy.cpp and balancer_chunk_selection_policy.cpp
-rw-r--r--src/mongo/db/s/SConscript2
-rw-r--r--src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp221
-rw-r--r--src/mongo/db/s/balancer/balancer_policy_test.cpp57
-rw-r--r--src/mongo/db/s/balancer/migration_manager_test.cpp270
-rw-r--r--src/mongo/db/s/balancer/migration_test_fixture.cpp158
-rw-r--r--src/mongo/db/s/balancer/migration_test_fixture.h163
6 files changed, 641 insertions, 230 deletions
diff --git a/src/mongo/db/s/SConscript b/src/mongo/db/s/SConscript
index cc722d4c549..e584d103fd3 100644
--- a/src/mongo/db/s/SConscript
+++ b/src/mongo/db/s/SConscript
@@ -360,9 +360,11 @@ env.CppUnitTest(
env.CppUnitTest(
target='db_s_balancer_test',
source=[
+ 'balancer/balancer_chunk_selection_policy_test.cpp',
'balancer/balancer_policy_test.cpp',
'balancer/cluster_statistics_test.cpp',
'balancer/migration_manager_test.cpp',
+ 'balancer/migration_test_fixture.cpp',
'balancer/scoped_migration_request_test.cpp',
'balancer/type_migration_test.cpp',
],
diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp
new file mode 100644
index 00000000000..b0630aaf92f
--- /dev/null
+++ b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp
@@ -0,0 +1,221 @@
+/**
+ * Copyright (C) 2019-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/db/commands.h"
+#include "mongo/db/s/balancer/balancer_chunk_selection_policy_impl.h"
+#include "mongo/db/s/balancer/cluster_statistics_impl.h"
+#include "mongo/db/s/balancer/migration_test_fixture.h"
+#include "mongo/platform/random.h"
+
+namespace mongo {
+namespace {
+
+const std::string kDbName = "TestDb";
+const NamespaceString kNamespace(kDbName, "TestColl");
+const int kSizeOnDisk = 1;
+
+class BalancerChunkSelectionTest : public MigrationTestFixture {
+protected:
+ BalancerChunkSelectionTest()
+ : _random(std::random_device{}()),
+ _clusterStats(std::make_unique<ClusterStatisticsImpl>(_random)),
+ _chunkSelectionPolicy(
+ std::make_unique<BalancerChunkSelectionPolicyImpl>(_clusterStats.get(), _random)) {}
+
+ /**
+ * Sets up mock network to expect a listDatabases command and returns a BSON response with
+ * a dummy sizeOnDisk.
+ */
+ void expectListDatabasesCommand() {
+ BSONObjBuilder resultBuilder;
+ CommandHelpers::appendCommandStatusNoThrow(resultBuilder, Status::OK());
+
+ onCommand([&resultBuilder](const RemoteCommandRequest& request) {
+ ASSERT(request.cmdObj["listDatabases"]);
+ vector<BSONObj> dbInfos;
+ BSONObjBuilder b;
+ b.append("name", kDbName);
+ b.append("sizeOnDisk", kSizeOnDisk);
+ b.append("empty", kSizeOnDisk > 0);
+ resultBuilder.append("databases", dbInfos);
+ resultBuilder.append("totalSize", kSizeOnDisk);
+ return resultBuilder.obj();
+ });
+ }
+
+ /**
+ * Sets up mock network to expect a serverStatus command and returns a BSON response with
+ * a dummy version.
+ */
+ void expectServerStatusCommand() {
+ BSONObjBuilder resultBuilder;
+ CommandHelpers::appendCommandStatusNoThrow(resultBuilder, Status::OK());
+
+ onCommand([&resultBuilder](const RemoteCommandRequest& request) {
+ ASSERT(request.cmdObj["serverStatus"]);
+ resultBuilder.append("version", "MONGO_VERSION");
+ return resultBuilder.obj();
+ });
+ }
+
+ /**
+ * Sets up mock network for all the shards to expect the commands executed for computing cluster
+ * stats, which include listDatabase and serverStatus.
+ */
+ void expectGetStatsCommands(int numShards) {
+ for (int i = 0; i < numShards; i++) {
+ expectListDatabasesCommand();
+ expectServerStatusCommand();
+ }
+ }
+
+ /**
+ * Returns a new BSON object with the tags appended.
+ */
+ BSONObj appendTags(const BSONObj shardBSON, std::vector<std::string> tags) {
+ BSONObjBuilder appendedShardBSON(shardBSON);
+ BSONArrayBuilder tagsBuilder;
+ for (auto& tag : tags) {
+ tagsBuilder.append(tag);
+ }
+ tagsBuilder.done();
+ appendedShardBSON.append("tags", tagsBuilder.arr());
+ return appendedShardBSON.obj();
+ }
+
+ BalancerRandomSource _random;
+ std::unique_ptr<ClusterStatistics> _clusterStats;
+ std::unique_ptr<BalancerChunkSelectionPolicy> _chunkSelectionPolicy;
+};
+
+TEST_F(BalancerChunkSelectionTest, TagRangesOverlap) {
+ // Set up two shards in the metadata.
+ ASSERT_OK(catalogClient()->insertConfigDocument(
+ operationContext(), ShardType::ConfigNS, kShard0, kMajorityWriteConcern));
+ ASSERT_OK(catalogClient()->insertConfigDocument(
+ operationContext(), ShardType::ConfigNS, kShard1, kMajorityWriteConcern));
+
+ // Set up a database and a sharded collection in the metadata.
+ ChunkVersion version(2, 0, OID::gen());
+ setUpDatabase(kDbName, kShardId0);
+ setUpCollection(kNamespace, version);
+
+ // Set up one chunk for the collection in the metadata.
+ ChunkType chunk = setUpChunk(
+ kNamespace, kKeyPattern.globalMin(), kKeyPattern.globalMax(), kShardId0, version);
+
+ auto assertRangeOverlapConflictWhenMoveChunk = [this, &chunk](const StringMap<ChunkRange>&
+ tagChunkRanges) {
+ // Set up two zones whose ranges overlap.
+ setUpTags(kNamespace, tagChunkRanges);
+
+ auto future = launchAsync([this, &chunk] {
+ // Requesting chunks to be relocated requires running commands on each shard to get
+ // shard statistics. Set up dummy hosts for the source shards.
+ shardTargeterMock(operationContext(), kShardId0)->setFindHostReturnValue(kShardHost0);
+ shardTargeterMock(operationContext(), kShardId1)->setFindHostReturnValue(kShardHost1);
+
+ auto migrateInfoStatus =
+ _chunkSelectionPolicy.get()->selectSpecificChunkToMove(operationContext(), chunk);
+ ASSERT_EQUALS(ErrorCodes::RangeOverlapConflict, migrateInfoStatus.getStatus().code());
+ });
+
+ expectGetStatsCommands(2);
+ future.default_timed_get();
+ removeAllTags(kNamespace);
+ };
+
+ assertRangeOverlapConflictWhenMoveChunk(
+ {{"A", {kKeyPattern.globalMin(), BSON(kPattern << -10)}},
+ {"B", {BSON(kPattern << -15), kKeyPattern.globalMax()}}});
+ assertRangeOverlapConflictWhenMoveChunk(
+ {{"A", {kKeyPattern.globalMin(), BSON(kPattern << -5)}},
+ {"B", {BSON(kPattern << -10), kKeyPattern.globalMax()}}});
+ assertRangeOverlapConflictWhenMoveChunk(
+ {{"A", {kKeyPattern.globalMin(), kKeyPattern.globalMax()}},
+ {"B", {BSON(kPattern << -15), kKeyPattern.globalMax()}}});
+}
+
+TEST_F(BalancerChunkSelectionTest, TagRangeMaxNotAlignedWithChunkMax) {
+ // Set up two shards in the metadata.
+ ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(),
+ ShardType::ConfigNS,
+ appendTags(kShard0, {"A"}),
+ kMajorityWriteConcern));
+ ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(),
+ ShardType::ConfigNS,
+ appendTags(kShard1, {"A"}),
+ kMajorityWriteConcern));
+
+ // Set up a database and a sharded collection in the metadata.
+ ChunkVersion version(2, 0, OID::gen());
+ setUpDatabase(kDbName, kShardId0);
+ setUpCollection(kNamespace, version);
+
+ // Set up the zone.
+ setUpTags(kNamespace, {{"A", {kKeyPattern.globalMin(), BSON(kPattern << -10)}}});
+
+ auto assertErrorWhenMoveChunk = [this, &version](const std::vector<ChunkRange>& chunkRanges) {
+ // Give shard0 all the chunks so the cluster is imbalanced.
+ for (const auto& chunkRange : chunkRanges) {
+ setUpChunk(kNamespace, chunkRange.getMin(), chunkRange.getMax(), kShardId0, version);
+ version.incMinor();
+ }
+
+ auto future = launchAsync([this] {
+ // Requests chunks to be relocated requires running commands on each shard to
+ // get shard statistics. Set up dummy hosts for the source shards.
+ shardTargeterMock(operationContext(), kShardId0)->setFindHostReturnValue(kShardHost0);
+ shardTargeterMock(operationContext(), kShardId1)->setFindHostReturnValue(kShardHost1);
+
+ auto candidateChunksStatus =
+ _chunkSelectionPolicy.get()->selectChunksToMove(operationContext());
+ ASSERT_OK(candidateChunksStatus.getStatus());
+
+ // The balancer does not bubble up the IllegalOperation error, but it is expected
+ // to postpone the balancing work for the zones with the error until the chunks
+ // are split appropriately.
+ ASSERT_EQUALS(0U, candidateChunksStatus.getValue().size());
+ });
+
+ expectGetStatsCommands(2);
+ future.default_timed_get();
+ removeAllChunks(kNamespace);
+ };
+
+ assertErrorWhenMoveChunk({{kKeyPattern.globalMin(), BSON(kPattern << -5)},
+ {BSON(kPattern << -5), kKeyPattern.globalMax()}});
+ assertErrorWhenMoveChunk({{kKeyPattern.globalMin(), BSON(kPattern << -15)},
+ {BSON(kPattern << -15), kKeyPattern.globalMax()}});
+}
+
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/s/balancer/balancer_policy_test.cpp b/src/mongo/db/s/balancer/balancer_policy_test.cpp
index 6ef9f2543a8..81eb8366e80 100644
--- a/src/mongo/db/s/balancer/balancer_policy_test.cpp
+++ b/src/mongo/db/s/balancer/balancer_policy_test.cpp
@@ -575,6 +575,63 @@ TEST(BalancerPolicy, BalancerFixesIncorrectTagsInOtherwiseBalancedCluster) {
ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][0].getMax(), migrations[0].maxKey);
}
+TEST(BalancerPolicy, BalancerTagAlreadyBalanced) {
+ // Chunks are balanced across shards for the tag.
+ auto cluster = generateCluster(
+ {{ShardStatistics(kShardId0, kNoMaxSize, 3, false, {"a"}, emptyShardVersion), 2},
+ {ShardStatistics(kShardId1, kNoMaxSize, 2, false, {"a"}, emptyShardVersion), 2}});
+
+ DistributionStatus distribution(kNamespace, cluster.second);
+ ASSERT_OK(distribution.addRangeToZone(ZoneRange(kMinBSONKey, kMaxBSONKey, "a")));
+ ASSERT(balanceChunks(cluster.first, distribution, false, false).empty());
+}
+
+TEST(BalancerPolicy, BalancerMostOverLoadShardHasMultipleTags) {
+ // shard0 has chunks [MinKey, 1), [1, 2), [2, 3), [3, 4), [4, 5), so two chunks each
+ // for tag "b" and "c". So [1, 2) is expected to be moved to shard1 in round 1.
+ auto cluster = generateCluster(
+ {{ShardStatistics(kShardId0, kNoMaxSize, 5, false, {"a", "b", "c"}, emptyShardVersion), 5},
+ {ShardStatistics(kShardId1, kNoMaxSize, 1, false, {"b"}, emptyShardVersion), 1},
+ {ShardStatistics(kShardId2, kNoMaxSize, 1, false, {"c"}, emptyShardVersion), 1}});
+
+ DistributionStatus distribution(kNamespace, cluster.second);
+ ASSERT_OK(distribution.addRangeToZone(ZoneRange(kMinBSONKey, BSON("x" << 1), "a")));
+ ASSERT_OK(distribution.addRangeToZone(ZoneRange(BSON("x" << 1), BSON("x" << 3), "b")));
+ ASSERT_OK(distribution.addRangeToZone(ZoneRange(BSON("x" << 3), BSON("x" << 5), "c")));
+
+ const auto migrations(balanceChunks(cluster.first, distribution, false, false));
+ ASSERT_EQ(1U, migrations.size());
+
+ ASSERT_EQ(kShardId0, migrations[0].from);
+ ASSERT_EQ(kShardId1, migrations[0].to);
+ ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][1].getMin(), migrations[0].minKey);
+ ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][1].getMax(), migrations[0].maxKey);
+}
+
+TEST(BalancerPolicy, BalancerMostOverLoadShardHasMultipleTagsSkipTagWithShardInUse) {
+ // shard0 has chunks [MinKey, 1), [1, 2), [2, 3), [3, 4), [4, 5), so two chunks each
+ // for tag "b" and "c". So [3, 4) is expected to be moved to shard2 because shard1 is
+ // in use.
+ auto cluster = generateCluster(
+ {{ShardStatistics(kShardId0, kNoMaxSize, 5, false, {"a", "b", "c"}, emptyShardVersion), 5},
+ {ShardStatistics(kShardId1, kNoMaxSize, 1, false, {"b"}, emptyShardVersion), 1},
+ {ShardStatistics(kShardId2, kNoMaxSize, 1, false, {"c"}, emptyShardVersion), 1}});
+
+ DistributionStatus distribution(kNamespace, cluster.second);
+ ASSERT_OK(distribution.addRangeToZone(ZoneRange(kMinBSONKey, BSON("x" << 1), "a")));
+ ASSERT_OK(distribution.addRangeToZone(ZoneRange(BSON("x" << 1), BSON("x" << 3), "b")));
+ ASSERT_OK(distribution.addRangeToZone(ZoneRange(BSON("x" << 3), BSON("x" << 5), "c")));
+
+ std::set<ShardId> usedShards{kShardId1};
+ const auto migrations(BalancerPolicy::balance(cluster.first, distribution, &usedShards, false));
+ ASSERT_EQ(1U, migrations.size());
+
+ ASSERT_EQ(kShardId0, migrations[0].from);
+ ASSERT_EQ(kShardId2, migrations[0].to);
+ ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][3].getMin(), migrations[0].minKey);
+ ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][3].getMax(), migrations[0].maxKey);
+}
+
TEST(BalancerPolicy, BalancerFixesIncorrectTagsInOtherwiseBalancedClusterParallel) {
// Chunks are balanced across shards, but there are wrong tags, which need to be fixed
auto cluster = generateCluster(
diff --git a/src/mongo/db/s/balancer/migration_manager_test.cpp b/src/mongo/db/s/balancer/migration_manager_test.cpp
index 875524a6606..61ee28ba11e 100644
--- a/src/mongo/db/s/balancer/migration_manager_test.cpp
+++ b/src/mongo/db/s/balancer/migration_manager_test.cpp
@@ -31,262 +31,72 @@
#include <memory>
-#include "mongo/client/remote_command_targeter_mock.h"
#include "mongo/db/commands.h"
#include "mongo/db/s/balancer/migration_manager.h"
-#include "mongo/db/s/balancer/type_migration.h"
+#include "mongo/db/s/balancer/migration_test_fixture.h"
#include "mongo/db/s/config/sharding_catalog_manager.h"
-#include "mongo/db/write_concern_options.h"
-#include "mongo/s/catalog/dist_lock_manager_mock.h"
-#include "mongo/s/catalog/type_collection.h"
-#include "mongo/s/catalog/type_database.h"
-#include "mongo/s/catalog/type_locks.h"
-#include "mongo/s/catalog/type_shard.h"
-#include "mongo/s/config_server_test_fixture.h"
-#include "mongo/s/database_version_helpers.h"
#include "mongo/s/request_types/move_chunk_request.h"
-#include "mongo/util/scopeguard.h"
namespace mongo {
namespace {
-using executor::RemoteCommandRequest;
-using executor::RemoteCommandResponse;
-using std::vector;
-using unittest::assertGet;
-
-const auto kShardId0 = ShardId("shard0");
-const auto kShardId1 = ShardId("shard1");
-const auto kShardId2 = ShardId("shard2");
-const auto kShardId3 = ShardId("shard3");
-
-const HostAndPort kShardHost0 = HostAndPort("TestHost0", 12345);
-const HostAndPort kShardHost1 = HostAndPort("TestHost1", 12346);
-const HostAndPort kShardHost2 = HostAndPort("TestHost2", 12347);
-const HostAndPort kShardHost3 = HostAndPort("TestHost3", 12348);
-
const MigrationSecondaryThrottleOptions kDefaultSecondaryThrottle =
MigrationSecondaryThrottleOptions::create(MigrationSecondaryThrottleOptions::kDefault);
-const long long kMaxSizeMB = 100;
-const std::string kPattern = "_id";
-
-const WriteConcernOptions kMajorityWriteConcern(WriteConcernOptions::kMajority,
- WriteConcernOptions::SyncMode::UNSET,
- Seconds(15));
-
-class MigrationManagerTest : public ConfigServerTestFixture {
+class MigrationManagerTest : public MigrationTestFixture {
protected:
- /**
- * Returns the mock targeter for the specified shard. Useful to use like so
- *
- * shardTargeterMock(opCtx, shardId)->setFindHostReturnValue(shardHost);
- *
- * Then calls to RemoteCommandTargeterMock::findHost will return HostAndPort "shardHost" for
- * Shard "shardId".
- *
- * Scheduling a command requires a shard host target. The command will be caught by the mock
- * network, but sending the command requires finding the shard's host.
- */
- std::shared_ptr<RemoteCommandTargeterMock> shardTargeterMock(OperationContext* opCtx,
- ShardId shardId);
+ void setUp() override {
+ MigrationTestFixture::setUp();
+ _migrationManager = std::make_unique<MigrationManager>(getServiceContext());
+ _migrationManager->startRecoveryAndAcquireDistLocks(operationContext());
+ _migrationManager->finishRecovery(operationContext(), 0, kDefaultSecondaryThrottle);
+ }
+
+ void tearDown() override {
+ checkMigrationsCollectionIsEmptyAndLocksAreUnlocked();
+ _migrationManager->interruptAndDisableMigrations();
+ _migrationManager->drainActiveMigrations();
+ _migrationManager.reset();
+ ConfigServerTestFixture::tearDown();
+ }
/**
- * Inserts a document into the config.databases collection to indicate that "dbName" is sharded
- * with primary "primaryShard".
+ * Sets up mock network to expect a moveChunk command and returns a fixed BSON response or a
+ * "returnStatus".
*/
- void setUpDatabase(const std::string& dbName, const ShardId primaryShard);
+ void expectMoveChunkCommand(const ChunkType& chunk,
+ const ShardId& toShardId,
+ const BSONObj& response) {
+ onCommand([&chunk, &toShardId, &response](const RemoteCommandRequest& request) {
+ NamespaceString nss(request.cmdObj.firstElement().valueStringData());
+ ASSERT_EQ(chunk.getNS(), nss);
- /**
- * Inserts a document into the config.collections collection to indicate that "collName" is
- * sharded with version "version". The shard key pattern defaults to "_id".
- */
- void setUpCollection(const NamespaceString& collName, ChunkVersion version);
+ const StatusWith<MoveChunkRequest> moveChunkRequestWithStatus =
+ MoveChunkRequest::createFromCommand(nss, request.cmdObj);
+ ASSERT_OK(moveChunkRequestWithStatus.getStatus());
- /**
- * Inserts a document into the config.chunks collection so that the chunk defined by the
- * parameters exists. Returns a ChunkType defined by the parameters.
- */
- ChunkType setUpChunk(const NamespaceString& collName,
- const BSONObj& chunkMin,
- const BSONObj& chunkMax,
- const ShardId& shardId,
- const ChunkVersion& version);
+ ASSERT_EQ(chunk.getNS(), moveChunkRequestWithStatus.getValue().getNss());
+ ASSERT_BSONOBJ_EQ(chunk.getMin(), moveChunkRequestWithStatus.getValue().getMinKey());
+ ASSERT_BSONOBJ_EQ(chunk.getMax(), moveChunkRequestWithStatus.getValue().getMaxKey());
+ ASSERT_EQ(chunk.getShard(), moveChunkRequestWithStatus.getValue().getFromShardId());
- /**
- * Inserts a document into the config.migrations collection as an active migration.
- */
- void setUpMigration(const ChunkType& chunk, const ShardId& toShard);
+ ASSERT_EQ(toShardId, moveChunkRequestWithStatus.getValue().getToShardId());
- /**
- * Asserts that config.migrations is empty and config.locks contains no locked documents other
- * than the balancer's, both of which should be true if the MigrationManager is inactive and
- * behaving properly.
- */
- void checkMigrationsCollectionIsEmptyAndLocksAreUnlocked();
+ return response;
+ });
+ }
- /**
- * Sets up mock network to expect a moveChunk command and return a fixed BSON response or a
- * "returnStatus".
- */
- void expectMoveChunkCommand(const ChunkType& chunk,
- const ShardId& toShardId,
- const BSONObj& response);
void expectMoveChunkCommand(const ChunkType& chunk,
const ShardId& toShardId,
- const Status& returnStatus);
-
- // Random static initialization order can result in X constructor running before Y constructor
- // if X and Y are defined in different source files. Defining variables here to enforce order.
- const BSONObj kShard0 =
- BSON(ShardType::name(kShardId0.toString())
- << ShardType::host(kShardHost0.toString()) << ShardType::maxSizeMB(kMaxSizeMB));
- const BSONObj kShard1 =
- BSON(ShardType::name(kShardId1.toString())
- << ShardType::host(kShardHost1.toString()) << ShardType::maxSizeMB(kMaxSizeMB));
- const BSONObj kShard2 =
- BSON(ShardType::name(kShardId2.toString())
- << ShardType::host(kShardHost2.toString()) << ShardType::maxSizeMB(kMaxSizeMB));
- const BSONObj kShard3 =
- BSON(ShardType::name(kShardId3.toString())
- << ShardType::host(kShardHost3.toString()) << ShardType::maxSizeMB(kMaxSizeMB));
-
- const KeyPattern kKeyPattern = KeyPattern(BSON(kPattern << 1));
+ const Status& returnStatus) {
+ BSONObjBuilder resultBuilder;
+ CommandHelpers::appendCommandStatusNoThrow(resultBuilder, returnStatus);
+ expectMoveChunkCommand(chunk, toShardId, resultBuilder.obj());
+ }
std::unique_ptr<MigrationManager> _migrationManager;
-
-private:
- void setUp() override;
- void tearDown() override;
};
-void MigrationManagerTest::setUp() {
- setUpAndInitializeConfigDb();
- _migrationManager = std::make_unique<MigrationManager>(getServiceContext());
- _migrationManager->startRecoveryAndAcquireDistLocks(operationContext());
- _migrationManager->finishRecovery(operationContext(), 0, kDefaultSecondaryThrottle);
-}
-
-void MigrationManagerTest::tearDown() {
- checkMigrationsCollectionIsEmptyAndLocksAreUnlocked();
- _migrationManager->interruptAndDisableMigrations();
- _migrationManager->drainActiveMigrations();
- _migrationManager.reset();
- ConfigServerTestFixture::tearDown();
-}
-
-std::shared_ptr<RemoteCommandTargeterMock> MigrationManagerTest::shardTargeterMock(
- OperationContext* opCtx, ShardId shardId) {
- return RemoteCommandTargeterMock::get(
- uassertStatusOK(shardRegistry()->getShard(opCtx, shardId))->getTargeter());
-}
-
-void MigrationManagerTest::setUpDatabase(const std::string& dbName, const ShardId primaryShard) {
- DatabaseType db(dbName, primaryShard, true, databaseVersion::makeNew());
- ASSERT_OK(catalogClient()->insertConfigDocument(
- operationContext(), DatabaseType::ConfigNS, db.toBSON(), kMajorityWriteConcern));
-}
-
-void MigrationManagerTest::setUpCollection(const NamespaceString& collName, ChunkVersion version) {
- CollectionType coll;
- coll.setNs(collName);
- coll.setEpoch(version.epoch());
- coll.setUpdatedAt(Date_t::fromMillisSinceEpoch(version.toLong()));
- coll.setKeyPattern(kKeyPattern);
- coll.setUnique(false);
- ASSERT_OK(catalogClient()->insertConfigDocument(
- operationContext(), CollectionType::ConfigNS, coll.toBSON(), kMajorityWriteConcern));
-}
-
-ChunkType MigrationManagerTest::setUpChunk(const NamespaceString& collName,
- const BSONObj& chunkMin,
- const BSONObj& chunkMax,
- const ShardId& shardId,
- const ChunkVersion& version) {
- ChunkType chunk;
- chunk.setNS(collName);
- chunk.setMin(chunkMin);
- chunk.setMax(chunkMax);
- chunk.setShard(shardId);
- chunk.setVersion(version);
- ASSERT_OK(catalogClient()->insertConfigDocument(
- operationContext(), ChunkType::ConfigNS, chunk.toConfigBSON(), kMajorityWriteConcern));
- return chunk;
-}
-
-void MigrationManagerTest::setUpMigration(const ChunkType& chunk, const ShardId& toShard) {
- BSONObjBuilder builder;
- builder.append(MigrationType::ns(), chunk.getNS().ns());
- builder.append(MigrationType::min(), chunk.getMin());
- builder.append(MigrationType::max(), chunk.getMax());
- builder.append(MigrationType::toShard(), toShard.toString());
- builder.append(MigrationType::fromShard(), chunk.getShard().toString());
- chunk.getVersion().appendWithField(&builder, "chunkVersion");
- builder.append(MigrationType::forceJumbo(), "doNotForceJumbo");
-
- MigrationType migrationType = assertGet(MigrationType::fromBSON(builder.obj()));
- ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(),
- MigrationType::ConfigNS,
- migrationType.toBSON(),
- kMajorityWriteConcern));
-}
-
-void MigrationManagerTest::checkMigrationsCollectionIsEmptyAndLocksAreUnlocked() {
- auto statusWithMigrationsQueryResponse =
- shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
- operationContext(),
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- repl::ReadConcernLevel::kMajorityReadConcern,
- MigrationType::ConfigNS,
- BSONObj(),
- BSONObj(),
- boost::none);
- Shard::QueryResponse migrationsQueryResponse =
- uassertStatusOK(statusWithMigrationsQueryResponse);
- ASSERT_EQUALS(0U, migrationsQueryResponse.docs.size());
-
- auto statusWithLocksQueryResponse = shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
- operationContext(),
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- repl::ReadConcernLevel::kMajorityReadConcern,
- LocksType::ConfigNS,
- BSON(LocksType::state(LocksType::LOCKED) << LocksType::name("{ '$ne' : 'balancer'}")),
- BSONObj(),
- boost::none);
- Shard::QueryResponse locksQueryResponse = uassertStatusOK(statusWithLocksQueryResponse);
- ASSERT_EQUALS(0U, locksQueryResponse.docs.size());
-}
-
-void MigrationManagerTest::expectMoveChunkCommand(const ChunkType& chunk,
- const ShardId& toShardId,
- const BSONObj& response) {
- onCommand([&chunk, &toShardId, &response](const RemoteCommandRequest& request) {
- NamespaceString nss(request.cmdObj.firstElement().valueStringData());
- ASSERT_EQ(chunk.getNS(), nss);
-
- const StatusWith<MoveChunkRequest> moveChunkRequestWithStatus =
- MoveChunkRequest::createFromCommand(nss, request.cmdObj);
- ASSERT_OK(moveChunkRequestWithStatus.getStatus());
-
- ASSERT_EQ(chunk.getNS(), moveChunkRequestWithStatus.getValue().getNss());
- ASSERT_BSONOBJ_EQ(chunk.getMin(), moveChunkRequestWithStatus.getValue().getMinKey());
- ASSERT_BSONOBJ_EQ(chunk.getMax(), moveChunkRequestWithStatus.getValue().getMaxKey());
- ASSERT_EQ(chunk.getShard(), moveChunkRequestWithStatus.getValue().getFromShardId());
-
- ASSERT_EQ(toShardId, moveChunkRequestWithStatus.getValue().getToShardId());
-
- return response;
- });
-}
-
-void MigrationManagerTest::expectMoveChunkCommand(const ChunkType& chunk,
- const ShardId& toShardId,
- const Status& returnStatus) {
- BSONObjBuilder resultBuilder;
- CommandHelpers::appendCommandStatusNoThrow(resultBuilder, returnStatus);
- expectMoveChunkCommand(chunk, toShardId, resultBuilder.obj());
-}
-
TEST_F(MigrationManagerTest, OneCollectionTwoMigrations) {
// Set up two shards in the metadata.
ASSERT_OK(catalogClient()->insertConfigDocument(
diff --git a/src/mongo/db/s/balancer/migration_test_fixture.cpp b/src/mongo/db/s/balancer/migration_test_fixture.cpp
new file mode 100644
index 00000000000..ee20adac338
--- /dev/null
+++ b/src/mongo/db/s/balancer/migration_test_fixture.cpp
@@ -0,0 +1,158 @@
+/**
+ * Copyright (C) 2018-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/platform/basic.h"
+
+#include <memory>
+
+#include "mongo/db/s/balancer/migration_test_fixture.h"
+
+namespace mongo {
+
+void MigrationTestFixture::setUp() {
+ setUpAndInitializeConfigDb();
+}
+
+std::shared_ptr<RemoteCommandTargeterMock> MigrationTestFixture::shardTargeterMock(
+ OperationContext* opCtx, ShardId shardId) {
+ return RemoteCommandTargeterMock::get(
+ uassertStatusOK(shardRegistry()->getShard(opCtx, shardId))->getTargeter());
+}
+
+void MigrationTestFixture::setUpDatabase(const std::string& dbName, const ShardId primaryShard) {
+ DatabaseType db(dbName, primaryShard, true, databaseVersion::makeNew());
+ ASSERT_OK(catalogClient()->insertConfigDocument(
+ operationContext(), DatabaseType::ConfigNS, db.toBSON(), kMajorityWriteConcern));
+}
+
+void MigrationTestFixture::setUpCollection(const NamespaceString& collName, ChunkVersion version) {
+ CollectionType coll;
+ coll.setNs(collName);
+ coll.setEpoch(version.epoch());
+ coll.setUpdatedAt(Date_t::fromMillisSinceEpoch(version.toLong()));
+ coll.setKeyPattern(kKeyPattern);
+ coll.setUnique(false);
+ ASSERT_OK(catalogClient()->insertConfigDocument(
+ operationContext(), CollectionType::ConfigNS, coll.toBSON(), kMajorityWriteConcern));
+}
+
+ChunkType MigrationTestFixture::setUpChunk(const NamespaceString& collName,
+ const BSONObj& chunkMin,
+ const BSONObj& chunkMax,
+ const ShardId& shardId,
+ const ChunkVersion& version) {
+ ChunkType chunk;
+ chunk.setNS(collName);
+ chunk.setMin(chunkMin);
+ chunk.setMax(chunkMax);
+ chunk.setShard(shardId);
+ chunk.setVersion(version);
+ ASSERT_OK(catalogClient()->insertConfigDocument(
+ operationContext(), ChunkType::ConfigNS, chunk.toConfigBSON(), kMajorityWriteConcern));
+ return chunk;
+}
+
+void MigrationTestFixture::setUpTags(const NamespaceString& collName,
+ const StringMap<ChunkRange>& tagChunkRanges) {
+ for (auto const& tagChunkRange : tagChunkRanges) {
+ BSONObjBuilder tagDocBuilder;
+ tagDocBuilder.append(
+ "_id",
+ BSON(TagsType::ns(collName.ns()) << TagsType::min(tagChunkRange.second.getMin())));
+ tagDocBuilder.append(TagsType::ns(), collName.ns());
+ tagDocBuilder.append(TagsType::min(), tagChunkRange.second.getMin());
+ tagDocBuilder.append(TagsType::max(), tagChunkRange.second.getMax());
+ tagDocBuilder.append(TagsType::tag(), tagChunkRange.first);
+
+ ASSERT_OK(catalogClient()->insertConfigDocument(
+ operationContext(), TagsType::ConfigNS, tagDocBuilder.obj(), kMajorityWriteConcern));
+ }
+}
+
+void MigrationTestFixture::removeAllDocs(const NamespaceString& configNS,
+ const NamespaceString& collName) {
+ const auto query = BSON("ns" << collName.ns());
+ ASSERT_OK(catalogClient()->removeConfigDocuments(
+ operationContext(), configNS, query, kMajorityWriteConcern));
+ auto findStatus = findOneOnConfigCollection(operationContext(), configNS, query);
+ ASSERT_EQ(ErrorCodes::NoMatchingDocument, findStatus);
+}
+
+void MigrationTestFixture::removeAllTags(const NamespaceString& collName) {
+ removeAllDocs(TagsType::ConfigNS, collName);
+}
+
+void MigrationTestFixture::removeAllChunks(const NamespaceString& collName) {
+ removeAllDocs(ChunkType::ConfigNS, collName);
+}
+
+void MigrationTestFixture::setUpMigration(const ChunkType& chunk, const ShardId& toShard) {
+ BSONObjBuilder builder;
+ builder.append(MigrationType::ns(), chunk.getNS().ns());
+ builder.append(MigrationType::min(), chunk.getMin());
+ builder.append(MigrationType::max(), chunk.getMax());
+ builder.append(MigrationType::toShard(), toShard.toString());
+ builder.append(MigrationType::fromShard(), chunk.getShard().toString());
+ chunk.getVersion().appendWithField(&builder, "chunkVersion");
+ builder.append(MigrationType::forceJumbo(), "doNotForceJumbo");
+
+ MigrationType migrationType = assertGet(MigrationType::fromBSON(builder.obj()));
+ ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(),
+ MigrationType::ConfigNS,
+ migrationType.toBSON(),
+ kMajorityWriteConcern));
+}
+
+void MigrationTestFixture::checkMigrationsCollectionIsEmptyAndLocksAreUnlocked() {
+ auto statusWithMigrationsQueryResponse =
+ shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
+ operationContext(),
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ repl::ReadConcernLevel::kMajorityReadConcern,
+ MigrationType::ConfigNS,
+ BSONObj(),
+ BSONObj(),
+ boost::none);
+ Shard::QueryResponse migrationsQueryResponse =
+ uassertStatusOK(statusWithMigrationsQueryResponse);
+ ASSERT_EQUALS(0U, migrationsQueryResponse.docs.size());
+
+ auto statusWithLocksQueryResponse = shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
+ operationContext(),
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ repl::ReadConcernLevel::kMajorityReadConcern,
+ LocksType::ConfigNS,
+ BSON(LocksType::state(LocksType::LOCKED) << LocksType::name("{ '$ne' : 'balancer'}")),
+ BSONObj(),
+ boost::none);
+ Shard::QueryResponse locksQueryResponse = uassertStatusOK(statusWithLocksQueryResponse);
+ ASSERT_EQUALS(0U, locksQueryResponse.docs.size());
+}
+
+} // namespace mongo \ No newline at end of file
diff --git a/src/mongo/db/s/balancer/migration_test_fixture.h b/src/mongo/db/s/balancer/migration_test_fixture.h
new file mode 100644
index 00000000000..56757bc96df
--- /dev/null
+++ b/src/mongo/db/s/balancer/migration_test_fixture.h
@@ -0,0 +1,163 @@
+/**
+ * Copyright (C) 2018-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kDefault
+
+#include "mongo/platform/basic.h"
+
+#include <memory>
+
+#include "mongo/client/remote_command_targeter_mock.h"
+#include "mongo/db/s/balancer/type_migration.h"
+#include "mongo/db/write_concern_options.h"
+#include "mongo/s/catalog/dist_lock_manager_mock.h"
+#include "mongo/s/catalog/type_collection.h"
+#include "mongo/s/catalog/type_database.h"
+#include "mongo/s/catalog/type_locks.h"
+#include "mongo/s/catalog/type_shard.h"
+#include "mongo/s/catalog/type_tags.h"
+#include "mongo/s/config_server_test_fixture.h"
+#include "mongo/s/database_version_helpers.h"
+#include "mongo/util/log.h"
+
+namespace mongo {
+
+using executor::RemoteCommandRequest;
+using executor::RemoteCommandResponse;
+using std::vector;
+using unittest::assertGet;
+
+class MigrationTestFixture : public ConfigServerTestFixture {
+protected:
+ void setUp() override;
+
+ /**
+ * Returns the mock targeter for the specified shard. Useful to use like so
+ *
+ * shardTargeterMock(opCtx, shardId)->setFindHostReturnValue(shardHost);
+ *
+ * Then calls to RemoteCommandTargeterMock::findHost will return HostAndPort "shardHost" for
+ * Shard "shardId".
+ *
+ * Scheduling a command requires a shard host target. The command will be caught by the mock
+ * network, but sending the command requires finding the shard's host.
+ */
+ std::shared_ptr<RemoteCommandTargeterMock> shardTargeterMock(OperationContext* opCtx,
+ ShardId shardId);
+
+ /**
+ * Inserts a document into the config.databases collection to indicate that "dbName" is sharded
+ * with primary "primaryShard".
+ */
+ void setUpDatabase(const std::string& dbName, const ShardId primaryShard);
+
+ /**
+ * Inserts a document into the config.collections collection to indicate that "collName" is
+ * sharded with version "version". The shard key pattern defaults to "_id".
+ */
+ void setUpCollection(const NamespaceString& collName, ChunkVersion version);
+
+ /**
+ * Inserts a document into the config.chunks collection so that the chunk defined by the
+ * parameters exists. Returns a ChunkType defined by the parameters.
+ */
+ ChunkType setUpChunk(const NamespaceString& collName,
+ const BSONObj& chunkMin,
+ const BSONObj& chunkMax,
+ const ShardId& shardId,
+ const ChunkVersion& version);
+
+ /**
+ * Inserts a document into the config.tags collection so that the tag defined by the
+ * parameters exists.
+ */
+ void setUpTags(const NamespaceString& collName, const StringMap<ChunkRange>& tagChunkRanges);
+
+ /**
+ * Removes all document in the given config collection for the collection.
+ */
+ void removeAllDocs(const NamespaceString& configNS, const NamespaceString& collName);
+
+ /**
+ * Removes all document in the config.tags for the collection.
+ */
+ void removeAllTags(const NamespaceString& collName);
+
+ /**
+ * Removes all document in the config.chunks for the collection.
+ */
+ void removeAllChunks(const NamespaceString& collName);
+
+ /**
+ * Inserts a document into the config.migrations collection as an active migration.
+ */
+ void setUpMigration(const ChunkType& chunk, const ShardId& toShard);
+
+ /**
+ * Asserts that config.migrations is empty and config.locks contains no locked documents other
+ * than the balancer's, both of which should be true if the MigrationManager is inactive and
+ * behaving properly.
+ */
+ void checkMigrationsCollectionIsEmptyAndLocksAreUnlocked();
+
+ // Random static initialization order can result in X constructor running before Y constructor
+ // if X and Y are defined in different source files. Defining variables here to enforce order.
+ const ShardId kShardId0 = ShardId("shard0");
+ const ShardId kShardId1 = ShardId("shard1");
+ const ShardId kShardId2 = ShardId("shard2");
+ const ShardId kShardId3 = ShardId("shard3");
+
+ const HostAndPort kShardHost0 = HostAndPort("TestHost0", 12345);
+ const HostAndPort kShardHost1 = HostAndPort("TestHost1", 12346);
+ const HostAndPort kShardHost2 = HostAndPort("TestHost2", 12347);
+ const HostAndPort kShardHost3 = HostAndPort("TestHost3", 12348);
+
+ const long long kMaxSizeMB = 100;
+
+ const BSONObj kShard0 =
+ BSON(ShardType::name(kShardId0.toString())
+ << ShardType::host(kShardHost0.toString()) << ShardType::maxSizeMB(kMaxSizeMB));
+ const BSONObj kShard1 =
+ BSON(ShardType::name(kShardId1.toString())
+ << ShardType::host(kShardHost1.toString()) << ShardType::maxSizeMB(kMaxSizeMB));
+ const BSONObj kShard2 =
+ BSON(ShardType::name(kShardId2.toString())
+ << ShardType::host(kShardHost2.toString()) << ShardType::maxSizeMB(kMaxSizeMB));
+ const BSONObj kShard3 =
+ BSON(ShardType::name(kShardId3.toString())
+ << ShardType::host(kShardHost3.toString()) << ShardType::maxSizeMB(kMaxSizeMB));
+
+ const std::string kPattern = "_id";
+ const KeyPattern kKeyPattern = KeyPattern(BSON(kPattern << 1));
+
+ const WriteConcernOptions kMajorityWriteConcern = WriteConcernOptions(
+ WriteConcernOptions::kMajority, WriteConcernOptions::SyncMode::UNSET, Seconds(15));
+};
+
+} // namespace mongo