summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEnrico Golfieri <enrico.golfieri@mongodb.com>2022-09-01 11:59:40 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-09-01 15:03:59 +0000
commit22f21e6e5a8086b3766485cc323ce9673587f30c (patch)
tree47cb614c3f778d643b60b89a93d58d930eba2b62
parentab2d5005886706ebcb3f267181328f00f1707ceb (diff)
downloadmongo-22f21e6e5a8086b3766485cc323ce9673587f30c.tar.gz
SERVER-66297 get rid of maxSize
-rw-r--r--jstests/sharding/addshard1.js9
-rw-r--r--jstests/sharding/shard_max_size.js82
-rw-r--r--jstests/sharding/top_chunk_autosplit.js60
-rw-r--r--src/mongo/db/audit.cpp2
-rw-r--r--src/mongo/db/audit.h2
-rw-r--r--src/mongo/db/s/SConscript1
-rw-r--r--src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp22
-rw-r--r--src/mongo/db/s/balancer/balancer_defragmentation_policy_test.cpp4
-rw-r--r--src/mongo/db/s/balancer/balancer_policy.cpp5
-rw-r--r--src/mongo/db/s/balancer/balancer_policy_test.cpp231
-rw-r--r--src/mongo/db/s/balancer/cluster_statistics.cpp12
-rw-r--r--src/mongo/db/s/balancer/cluster_statistics.h11
-rw-r--r--src/mongo/db/s/balancer/cluster_statistics_impl.cpp7
-rw-r--r--src/mongo/db/s/balancer/cluster_statistics_test.cpp52
-rw-r--r--src/mongo/db/s/balancer/migration_test_fixture.h12
-rw-r--r--src/mongo/db/s/config/configsvr_add_shard_command.cpp9
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager.h8
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp125
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp12
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp17
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_test.cpp2
-rw-r--r--src/mongo/s/catalog/type_shard.h2
-rw-r--r--src/mongo/s/catalog/type_shard_test.cpp12
-rw-r--r--src/mongo/s/commands/cluster_coll_stats_cmd.cpp3
-rw-r--r--src/mongo/s/request_types/add_shard_request_test.cpp61
-rw-r--r--src/mongo/s/request_types/add_shard_request_type.cpp18
-rw-r--r--src/mongo/s/request_types/add_shard_request_type.h12
27 files changed, 176 insertions, 617 deletions
diff --git a/jstests/sharding/addshard1.js b/jstests/sharding/addshard1.js
index 81a161fcc84..2ca178282f8 100644
--- a/jstests/sharding/addshard1.js
+++ b/jstests/sharding/addshard1.js
@@ -22,14 +22,19 @@ var configDB = s.s.getDB('config');
assert.eq(null, configDB.databases.findOne({_id: 'testDB'}));
var newShard = "myShard";
-assert.commandWorked(s.admin.runCommand({addShard: rs1.getURL(), name: newShard, maxSize: 1024}));
+assert.commandWorked(s.admin.runCommand({addShard: rs1.getURL(), name: newShard}));
assert.neq(null, configDB.databases.findOne({_id: 'testDB'}));
var newShardDoc = configDB.shards.findOne({_id: newShard});
-assert.eq(1024, newShardDoc.maxSize);
assert(newShardDoc.topologyTime instanceof Timestamp);
+// maxSize field is no longer supported
+var newShardMaxSize = "myShardMaxSize";
+assert.commandFailedWithCode(
+ s.admin.runCommand({addShard: rs1.getURL(), name: newShardMaxSize, maxSize: 1024}),
+ ErrorCodes.InvalidOptions);
+
// a mongod with an existing database name should not be allowed to become a shard
var rs2 = new ReplSetTest({name: "addshard1-2", nodes: 1});
rs2.startSet({shardsvr: ""});
diff --git a/jstests/sharding/shard_max_size.js b/jstests/sharding/shard_max_size.js
deleted file mode 100644
index 35d4fa70e22..00000000000
--- a/jstests/sharding/shard_max_size.js
+++ /dev/null
@@ -1,82 +0,0 @@
-/**
- * Test the maxSize setting for the addShard command.
- *
- * @tags: [does_not_support_stepdowns]
- */
-(function() {
-'use strict';
-
-load("jstests/sharding/libs/find_chunks_util.js");
-load("jstests/libs/feature_flag_util.js");
-
-var MaxSizeMB = 1;
-
-var s = new ShardingTest({
- shards: 2,
- other: {
- chunkSize: 1,
- manualAddShard: true,
- shardOptions:
- {setParameter: {internalQueryMaxBlockingSortMemoryUsageBytes: 32 * 1024 * 1024}}
- }
-});
-
-var db = s.getDB("test");
-
-var names = s.getConnNames();
-assert.eq(2, names.length);
-assert.commandWorked(s.s0.adminCommand({addshard: names[0]}));
-assert.commandWorked(s.s0.adminCommand({addshard: names[1], maxSize: MaxSizeMB}));
-assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
-s.ensurePrimaryShard('test', names[0]);
-
-var bigString = "";
-while (bigString.length < 10000)
- bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
-
-var inserted = 0;
-var num = 0;
-var bulk = db.foo.initializeUnorderedBulkOp();
-while (inserted < (40 * 1024 * 1024)) {
- bulk.insert({_id: num++, s: bigString});
- inserted += bigString.length;
-}
-assert.commandWorked(bulk.execute());
-
-assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
-
-var getShardSize = function(conn) {
- var listDatabases = conn.getDB('admin').runCommand({listDatabases: 1});
- return listDatabases.totalSize;
-};
-
-var shardConn = new Mongo(names[1]);
-
-// Make sure that shard doesn't have any documents.
-assert.eq(0, shardConn.getDB('test').foo.find().itcount());
-
-var maxSizeBytes = MaxSizeMB * 1024 * 1024;
-
-// Fill the shard with documents to exceed the max size so the balancer won't move
-// chunks to this shard.
-var localColl = shardConn.getDB('local').padding;
-while (getShardSize(shardConn) < maxSizeBytes) {
- var localBulk = localColl.initializeUnorderedBulkOp();
-
- for (var x = 0; x < 20; x++) {
- localBulk.insert({x: x, val: bigString});
- }
- assert.commandWorked(localBulk.execute());
-
- // Force the storage engine to flush files to disk so shardSize will get updated.
- assert.commandWorked(shardConn.getDB('admin').runCommand({fsync: 1}));
-}
-
-s.startBalancer();
-s.awaitBalancerRound();
-
-var chunkCounts = s.chunkCounts('foo', 'test');
-assert.eq(0, chunkCounts[s.rs1.name]);
-
-s.stop();
-})();
diff --git a/jstests/sharding/top_chunk_autosplit.js b/jstests/sharding/top_chunk_autosplit.js
index 44f617adf78..629f9635352 100644
--- a/jstests/sharding/top_chunk_autosplit.js
+++ b/jstests/sharding/top_chunk_autosplit.js
@@ -333,64 +333,4 @@ for (var i = 0; i < singleNodeTests.length; i++) {
}
st.stop();
-
-// maxSize test
-// To set maxSize, must manually add the shards
-st = shardSetup({
- name: "maxSize",
- shards: 2,
- chunkSize: 1,
- other: {manualAddShard: true, enableAutoSplit: true}
-},
- dbName,
- collName);
-db = st.getDB(dbName);
-coll = db[collName];
-configDB = st.s.getDB('config');
-
-var maxSizeTests = [
- {
- // Test auto-split on the "low" top chunk with maxSize on destination shard
- name: "maxSize - low top chunk",
- lowOrHigh: lowChunk,
- movedToShard: st.rs0.name,
- shards: [
- {name: st.rs0.name, range: lowChunkRange, chunks: 10},
- {name: st.rs1.name, range: highChunkRange, chunks: 1}
- ],
- inserts: lowChunkInserts
- },
- {
- // Test auto-split on the "high" top chunk with maxSize on destination shard
- name: "maxSize - high top chunk",
- lowOrHigh: highChunk,
- movedToShard: st.rs0.name,
- shards: [
- {name: st.rs0.name, range: highChunkRange, chunks: 10},
- {name: st.rs1.name, range: lowChunkRange, chunks: 1}
- ],
- inserts: highChunkInserts
- },
-];
-
-// maxSize on st.rs0.name - 5MB, on st.rs1.name - 1MB
-assert.commandWorked(db.adminCommand({addshard: st.getConnNames()[0], maxSize: 5}));
-assert.commandWorked(db.adminCommand({addshard: st.getConnNames()[1], maxSize: 1}));
-
-// SERVER-17070 Auto split moves to shard node running WiredTiger, if exceeding maxSize
-var unsupported = ["wiredTiger", "rocksdb", "inMemory"];
-if (unsupported.indexOf(st.rs0.getPrimary().adminCommand({serverStatus: 1}).storageEngine.name) ==
- -1 &&
- unsupported.indexOf(st.rs1.getPrimary().adminCommand({serverStatus: 1}).storageEngine.name) ==
- -1) {
- assert.commandWorked(db.adminCommand({enableSharding: dbName}));
- st.ensurePrimaryShard(dbName, st.rs0.name);
-
- // Execute all test objects
- for (var i = 0; i < maxSizeTests.length; i++) {
- runTest(maxSizeTests[i]);
- }
-}
-
-st.stop();
})();
diff --git a/src/mongo/db/audit.cpp b/src/mongo/db/audit.cpp
index 23a56d48d53..4c320fed716 100644
--- a/src/mongo/db/audit.cpp
+++ b/src/mongo/db/audit.cpp
@@ -229,7 +229,7 @@ void logEnableSharding(Client* client, StringData dbname) {
invariant(client);
}
-void logAddShard(Client* client, StringData name, const std::string& servers, long long maxSize) {
+void logAddShard(Client* client, StringData name, const std::string& servers) {
invariant(client);
}
diff --git a/src/mongo/db/audit.h b/src/mongo/db/audit.h
index ccd1022b268..bfc7cee3794 100644
--- a/src/mongo/db/audit.h
+++ b/src/mongo/db/audit.h
@@ -384,7 +384,7 @@ void logEnableSharding(Client* client, StringData dbname);
/**
* Logs the result of a addShard command.
*/
-void logAddShard(Client* client, StringData name, const std::string& servers, long long maxSize);
+void logAddShard(Client* client, StringData name, const std::string& servers);
/**
* Logs the result of a removeShard command.
diff --git a/src/mongo/db/s/SConscript b/src/mongo/db/s/SConscript
index b3508f5f8ba..3cccac32d2d 100644
--- a/src/mongo/db/s/SConscript
+++ b/src/mongo/db/s/SConscript
@@ -701,7 +701,6 @@ env.CppUnitTest(
'balancer/balancer_defragmentation_policy_test.cpp',
'balancer/cluster_chunks_resize_policy_test.cpp',
'balancer/balancer_policy_test.cpp',
- 'balancer/cluster_statistics_test.cpp',
'balancer/core_options_stub.cpp',
'balancer/balancer_commands_scheduler_test.cpp',
'balancer/migration_test_fixture.cpp',
diff --git a/src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp b/src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp
index 20e9e141ba9..2cadf4e26b0 100644
--- a/src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp
+++ b/src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp
@@ -384,9 +384,7 @@ public:
stdx::unordered_map<ShardId, ShardInfo> shardInfos;
for (const auto& shardStats : collectionShardStats) {
shardInfos.emplace(shardStats.shardId,
- ShardInfo(shardStats.currSizeBytes,
- shardStats.maxSizeBytes,
- shardStats.isDraining));
+ ShardInfo(shardStats.currSizeBytes, shardStats.isDraining));
}
auto collectionChunks = getCollectionChunks(opCtx, coll);
@@ -664,19 +662,14 @@ private:
};
struct ShardInfo {
- ShardInfo(uint64_t currentSizeBytes, uint64_t maxSizeBytes, bool draining)
- : currentSizeBytes(currentSizeBytes), maxSizeBytes(maxSizeBytes), draining(draining) {}
+ ShardInfo(uint64_t currentSizeBytes, bool draining)
+ : currentSizeBytes(currentSizeBytes), draining(draining) {}
bool isDraining() const {
return draining;
}
- bool hasCapacityFor(uint64_t newDataSize) const {
- return (maxSizeBytes == 0 || currentSizeBytes + newDataSize < maxSizeBytes);
- }
-
uint64_t currentSizeBytes;
- const uint64_t maxSizeBytes;
const bool draining;
};
@@ -970,8 +963,7 @@ private:
uint32_t _rankMergeableSibling(const ChunkRangeInfo& chunkTobeMovedAndMerged,
const ChunkRangeInfo& mergeableSibling) {
- static constexpr uint32_t kNoMoveRequired = 1 << 4;
- static constexpr uint32_t kDestinationNotMaxedOut = 1 << 3;
+ static constexpr uint32_t kNoMoveRequired = 1 << 3;
static constexpr uint32_t kConvenientMove = 1 << 2;
static constexpr uint32_t kMergeSolvesTwoPendingChunks = 1 << 1;
static constexpr uint32_t kMergeSolvesOnePendingChunk = 1;
@@ -989,11 +981,7 @@ private:
? kMergeSolvesTwoPendingChunks
: kMergeSolvesOnePendingChunk;
}
- if (chunkTobeMovedAndMerged.shard == mergeableSibling.shard ||
- _shardInfos.at(mergeableSibling.shard)
- .hasCapacityFor(chunkTobeMovedAndMerged.estimatedSizeBytes)) {
- ranking += kDestinationNotMaxedOut;
- }
+
return ranking;
}
diff --git a/src/mongo/db/s/balancer/balancer_defragmentation_policy_test.cpp b/src/mongo/db/s/balancer/balancer_defragmentation_policy_test.cpp
index ee56d9f32ea..8646b5d965a 100644
--- a/src/mongo/db/s/balancer/balancer_defragmentation_policy_test.cpp
+++ b/src/mongo/db/s/balancer/balancer_defragmentation_policy_test.cpp
@@ -142,11 +142,9 @@ protected:
ShardStatistics buildShardStats(ShardId id,
uint64_t currentSizeBytes,
- bool maxed = false,
bool draining = false,
std::set<std::string>&& zones = {}) {
- return ShardStatistics(
- id, maxed ? currentSizeBytes : 0, currentSizeBytes, draining, zones, "");
+ return ShardStatistics(id, currentSizeBytes, draining, zones, "");
}
void setDefaultClusterStats() {
diff --git a/src/mongo/db/s/balancer/balancer_policy.cpp b/src/mongo/db/s/balancer/balancer_policy.cpp
index 07ed023e774..4e0ff4c3ccb 100644
--- a/src/mongo/db/s/balancer/balancer_policy.cpp
+++ b/src/mongo/db/s/balancer/balancer_policy.cpp
@@ -224,11 +224,6 @@ StatusWith<ZoneInfo> ZoneInfo::getZonesForCollection(OperationContext* opCtx,
Status BalancerPolicy::isShardSuitableReceiver(const ClusterStatistics::ShardStatistics& stat,
const string& chunkZone) {
- if (stat.isSizeMaxed()) {
- return {ErrorCodes::IllegalOperation,
- str::stream() << stat.shardId << " has reached its maximum storage size."};
- }
-
if (stat.isDraining) {
return {ErrorCodes::IllegalOperation,
str::stream() << stat.shardId << " is currently draining."};
diff --git a/src/mongo/db/s/balancer/balancer_policy_test.cpp b/src/mongo/db/s/balancer/balancer_policy_test.cpp
index 06ba27f029a..aa2bddf8c26 100644
--- a/src/mongo/db/s/balancer/balancer_policy_test.cpp
+++ b/src/mongo/db/s/balancer/balancer_policy_test.cpp
@@ -55,7 +55,6 @@ const auto kShardId3 = ShardId("shard3");
const auto kShardId4 = ShardId("shard4");
const auto kShardId5 = ShardId("shard5");
const NamespaceString kNamespace("TestDB", "TestColl");
-const uint64_t kNoMaxSize = 0;
/**
* Constructs a shard statistics vector and a consistent mapping of chunks to shards given the
@@ -121,9 +120,9 @@ MigrateInfosWithReason balanceChunks(const ShardStatisticsVector& shardStats,
TEST(BalancerPolicy, Basic) {
auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, kNoMaxSize, 4, false, emptyZoneSet, emptyShardVersion), 4},
- {ShardStatistics(kShardId1, kNoMaxSize, 0, false, emptyZoneSet, emptyShardVersion), 0},
- {ShardStatistics(kShardId2, kNoMaxSize, 3, false, emptyZoneSet, emptyShardVersion), 3}});
+ {{ShardStatistics(kShardId0, 4, false, emptyZoneSet, emptyShardVersion), 4},
+ {ShardStatistics(kShardId1, 0, false, emptyZoneSet, emptyShardVersion), 0},
+ {ShardStatistics(kShardId2, 3, false, emptyZoneSet, emptyShardVersion), 3}});
const auto [migrations, reason] =
balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
@@ -137,9 +136,9 @@ TEST(BalancerPolicy, Basic) {
TEST(BalancerPolicy, SmallClusterShouldBePerfectlyBalanced) {
auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, kNoMaxSize, 1, false, emptyZoneSet, emptyShardVersion), 1},
- {ShardStatistics(kShardId1, kNoMaxSize, 2, false, emptyZoneSet, emptyShardVersion), 2},
- {ShardStatistics(kShardId2, kNoMaxSize, 0, false, emptyZoneSet, emptyShardVersion), 0}});
+ {{ShardStatistics(kShardId0, 1, false, emptyZoneSet, emptyShardVersion), 1},
+ {ShardStatistics(kShardId1, 2, false, emptyZoneSet, emptyShardVersion), 2},
+ {ShardStatistics(kShardId2, 0, false, emptyZoneSet, emptyShardVersion), 0}});
const auto [migrations, reason] =
balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
@@ -153,8 +152,8 @@ TEST(BalancerPolicy, SmallClusterShouldBePerfectlyBalanced) {
TEST(BalancerPolicy, SingleChunkShouldNotMove) {
auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, kNoMaxSize, 1, false, emptyZoneSet, emptyShardVersion), 1},
- {ShardStatistics(kShardId1, kNoMaxSize, 0, false, emptyZoneSet, emptyShardVersion), 0}});
+ {{ShardStatistics(kShardId0, 1, false, emptyZoneSet, emptyShardVersion), 1},
+ {ShardStatistics(kShardId1, 0, false, emptyZoneSet, emptyShardVersion), 0}});
{
auto [migrations, reason] = balanceChunks(
cluster.first, DistributionStatus(kNamespace, cluster.second), true, false);
@@ -171,10 +170,10 @@ TEST(BalancerPolicy, SingleChunkShouldNotMove) {
TEST(BalancerPolicy, BalanceThresholdObeyed) {
auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, kNoMaxSize, 2, false, emptyZoneSet, emptyShardVersion), 2},
- {ShardStatistics(kShardId1, kNoMaxSize, 2, false, emptyZoneSet, emptyShardVersion), 2},
- {ShardStatistics(kShardId2, kNoMaxSize, 1, false, emptyZoneSet, emptyShardVersion), 1},
- {ShardStatistics(kShardId3, kNoMaxSize, 1, false, emptyZoneSet, emptyShardVersion), 1}});
+ {{ShardStatistics(kShardId0, 2, false, emptyZoneSet, emptyShardVersion), 2},
+ {ShardStatistics(kShardId1, 2, false, emptyZoneSet, emptyShardVersion), 2},
+ {ShardStatistics(kShardId2, 1, false, emptyZoneSet, emptyShardVersion), 1},
+ {ShardStatistics(kShardId3, 1, false, emptyZoneSet, emptyShardVersion), 1}});
{
auto [migrations, reason] = balanceChunks(
@@ -192,10 +191,10 @@ TEST(BalancerPolicy, BalanceThresholdObeyed) {
TEST(BalancerPolicy, ParallelBalancing) {
auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, kNoMaxSize, 4, false, emptyZoneSet, emptyShardVersion), 4},
- {ShardStatistics(kShardId1, kNoMaxSize, 4, false, emptyZoneSet, emptyShardVersion), 4},
- {ShardStatistics(kShardId2, kNoMaxSize, 0, false, emptyZoneSet, emptyShardVersion), 0},
- {ShardStatistics(kShardId3, kNoMaxSize, 0, false, emptyZoneSet, emptyShardVersion), 0}});
+ {{ShardStatistics(kShardId0, 4, false, emptyZoneSet, emptyShardVersion), 4},
+ {ShardStatistics(kShardId1, 4, false, emptyZoneSet, emptyShardVersion), 4},
+ {ShardStatistics(kShardId2, 0, false, emptyZoneSet, emptyShardVersion), 0},
+ {ShardStatistics(kShardId3, 0, false, emptyZoneSet, emptyShardVersion), 0}});
const auto [migrations, reason] =
balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
@@ -215,12 +214,12 @@ TEST(BalancerPolicy, ParallelBalancing) {
TEST(BalancerPolicy, ParallelBalancingDoesNotPutChunksOnShardsAboveTheOptimal) {
auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, kNoMaxSize, 100, false, emptyZoneSet, emptyShardVersion), 100},
- {ShardStatistics(kShardId1, kNoMaxSize, 90, false, emptyZoneSet, emptyShardVersion), 90},
- {ShardStatistics(kShardId2, kNoMaxSize, 90, false, emptyZoneSet, emptyShardVersion), 90},
- {ShardStatistics(kShardId3, kNoMaxSize, 80, false, emptyZoneSet, emptyShardVersion), 80},
- {ShardStatistics(kShardId4, kNoMaxSize, 0, false, emptyZoneSet, emptyShardVersion), 0},
- {ShardStatistics(kShardId5, kNoMaxSize, 0, false, emptyZoneSet, emptyShardVersion), 0}});
+ {{ShardStatistics(kShardId0, 100, false, emptyZoneSet, emptyShardVersion), 100},
+ {ShardStatistics(kShardId1, 90, false, emptyZoneSet, emptyShardVersion), 90},
+ {ShardStatistics(kShardId2, 90, false, emptyZoneSet, emptyShardVersion), 90},
+ {ShardStatistics(kShardId3, 80, false, emptyZoneSet, emptyShardVersion), 80},
+ {ShardStatistics(kShardId4, 0, false, emptyZoneSet, emptyShardVersion), 0},
+ {ShardStatistics(kShardId5, 0, false, emptyZoneSet, emptyShardVersion), 0}});
const auto [migrations, reason] =
balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
@@ -240,10 +239,10 @@ TEST(BalancerPolicy, ParallelBalancingDoesNotPutChunksOnShardsAboveTheOptimal) {
TEST(BalancerPolicy, ParallelBalancingDoesNotMoveChunksFromShardsBelowOptimal) {
auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, kNoMaxSize, 100, false, emptyZoneSet, emptyShardVersion), 100},
- {ShardStatistics(kShardId1, kNoMaxSize, 30, false, emptyZoneSet, emptyShardVersion), 30},
- {ShardStatistics(kShardId2, kNoMaxSize, 5, false, emptyZoneSet, emptyShardVersion), 5},
- {ShardStatistics(kShardId3, kNoMaxSize, 0, false, emptyZoneSet, emptyShardVersion), 0}});
+ {{ShardStatistics(kShardId0, 100, false, emptyZoneSet, emptyShardVersion), 100},
+ {ShardStatistics(kShardId1, 30, false, emptyZoneSet, emptyShardVersion), 30},
+ {ShardStatistics(kShardId2, 5, false, emptyZoneSet, emptyShardVersion), 5},
+ {ShardStatistics(kShardId3, 0, false, emptyZoneSet, emptyShardVersion), 0}});
const auto [migrations, reason] =
balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
@@ -258,10 +257,10 @@ TEST(BalancerPolicy, ParallelBalancingDoesNotMoveChunksFromShardsBelowOptimal) {
TEST(BalancerPolicy, ParallelBalancingNotSchedulingOnInUseSourceShardsWithMoveNecessary) {
auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, kNoMaxSize, 8, false, emptyZoneSet, emptyShardVersion), 8},
- {ShardStatistics(kShardId1, kNoMaxSize, 4, false, emptyZoneSet, emptyShardVersion), 4},
- {ShardStatistics(kShardId2, kNoMaxSize, 0, false, emptyZoneSet, emptyShardVersion), 0},
- {ShardStatistics(kShardId3, kNoMaxSize, 0, false, emptyZoneSet, emptyShardVersion), 0}});
+ {{ShardStatistics(kShardId0, 8, false, emptyZoneSet, emptyShardVersion), 8},
+ {ShardStatistics(kShardId1, 4, false, emptyZoneSet, emptyShardVersion), 4},
+ {ShardStatistics(kShardId2, 0, false, emptyZoneSet, emptyShardVersion), 0},
+ {ShardStatistics(kShardId3, 0, false, emptyZoneSet, emptyShardVersion), 0}});
// Here kShardId0 would have been selected as a donor
stdx::unordered_set<ShardId> usedShards{kShardId0};
@@ -282,10 +281,10 @@ TEST(BalancerPolicy, ParallelBalancingNotSchedulingOnInUseSourceShardsWithMoveNe
TEST(BalancerPolicy, ParallelBalancingNotSchedulingOnInUseSourceShardsWithMoveNotNecessary) {
auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, kNoMaxSize, 12, false, emptyZoneSet, emptyShardVersion), 12},
- {ShardStatistics(kShardId1, kNoMaxSize, 4, false, emptyZoneSet, emptyShardVersion), 4},
- {ShardStatistics(kShardId2, kNoMaxSize, 0, false, emptyZoneSet, emptyShardVersion), 0},
- {ShardStatistics(kShardId3, kNoMaxSize, 0, false, emptyZoneSet, emptyShardVersion), 0}});
+ {{ShardStatistics(kShardId0, 12, false, emptyZoneSet, emptyShardVersion), 12},
+ {ShardStatistics(kShardId1, 4, false, emptyZoneSet, emptyShardVersion), 4},
+ {ShardStatistics(kShardId2, 0, false, emptyZoneSet, emptyShardVersion), 0},
+ {ShardStatistics(kShardId3, 0, false, emptyZoneSet, emptyShardVersion), 0}});
// Here kShardId0 would have been selected as a donor
stdx::unordered_set<ShardId> usedShards{kShardId0};
@@ -300,10 +299,10 @@ TEST(BalancerPolicy, ParallelBalancingNotSchedulingOnInUseSourceShardsWithMoveNo
TEST(BalancerPolicy, ParallelBalancingNotSchedulingOnInUseDestinationShards) {
auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, kNoMaxSize, 4, false, emptyZoneSet, emptyShardVersion), 4},
- {ShardStatistics(kShardId1, kNoMaxSize, 4, false, emptyZoneSet, emptyShardVersion), 4},
- {ShardStatistics(kShardId2, kNoMaxSize, 0, false, emptyZoneSet, emptyShardVersion), 0},
- {ShardStatistics(kShardId3, kNoMaxSize, 1, false, emptyZoneSet, emptyShardVersion), 1}});
+ {{ShardStatistics(kShardId0, 4, false, emptyZoneSet, emptyShardVersion), 4},
+ {ShardStatistics(kShardId1, 4, false, emptyZoneSet, emptyShardVersion), 4},
+ {ShardStatistics(kShardId2, 0, false, emptyZoneSet, emptyShardVersion), 0},
+ {ShardStatistics(kShardId3, 1, false, emptyZoneSet, emptyShardVersion), 1}});
// Here kShardId2 would have been selected as a recipient
stdx::unordered_set<ShardId> usedShards{kShardId2};
@@ -324,8 +323,8 @@ TEST(BalancerPolicy, ParallelBalancingNotSchedulingOnInUseDestinationShards) {
TEST(BalancerPolicy, JumboChunksNotMoved) {
auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, kNoMaxSize, 2, false, emptyZoneSet, emptyShardVersion), 4},
- {ShardStatistics(kShardId1, kNoMaxSize, 0, false, emptyZoneSet, emptyShardVersion), 0}});
+ {{ShardStatistics(kShardId0, 2, false, emptyZoneSet, emptyShardVersion), 4},
+ {ShardStatistics(kShardId1, 0, false, emptyZoneSet, emptyShardVersion), 0}});
cluster.second[kShardId0][0].setJumbo(true);
cluster.second[kShardId0][1].setJumbo(false); // Only chunk 1 is not jumbo
@@ -344,10 +343,10 @@ TEST(BalancerPolicy, JumboChunksNotMoved) {
TEST(BalancerPolicy, JumboChunksNotMovedParallel) {
auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, kNoMaxSize, 2, false, emptyZoneSet, emptyShardVersion), 4},
- {ShardStatistics(kShardId1, kNoMaxSize, 0, false, emptyZoneSet, emptyShardVersion), 0},
- {ShardStatistics(kShardId2, kNoMaxSize, 2, false, emptyZoneSet, emptyShardVersion), 4},
- {ShardStatistics(kShardId3, kNoMaxSize, 0, false, emptyZoneSet, emptyShardVersion), 0}});
+ {{ShardStatistics(kShardId0, 2, false, emptyZoneSet, emptyShardVersion), 4},
+ {ShardStatistics(kShardId1, 0, false, emptyZoneSet, emptyShardVersion), 0},
+ {ShardStatistics(kShardId2, 2, false, emptyZoneSet, emptyShardVersion), 4},
+ {ShardStatistics(kShardId3, 0, false, emptyZoneSet, emptyShardVersion), 0}});
cluster.second[kShardId0][0].setJumbo(true);
cluster.second[kShardId0][1].setJumbo(false); // Only chunk 1 is not jumbo
@@ -378,8 +377,8 @@ TEST(BalancerPolicy, JumboChunksNotMovedParallel) {
TEST(BalancerPolicy, DrainingSingleChunk) {
// shard0 is draining and chunks will go to shard1, even though it has a lot more chunks
auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, kNoMaxSize, 2, true, emptyZoneSet, emptyShardVersion), 1},
- {ShardStatistics(kShardId1, kNoMaxSize, 0, false, emptyZoneSet, emptyShardVersion), 5}});
+ {{ShardStatistics(kShardId0, 2, true, emptyZoneSet, emptyShardVersion), 1},
+ {ShardStatistics(kShardId1, 0, false, emptyZoneSet, emptyShardVersion), 5}});
const auto [migrations, reason] =
balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
@@ -394,10 +393,10 @@ TEST(BalancerPolicy, DrainingSingleChunk) {
TEST(BalancerPolicy, DrainingSingleChunkPerShard) {
// shard0 and shard2 are draining and chunks will go to shard1 and shard3 in parallel
auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, kNoMaxSize, 2, true, emptyZoneSet, emptyShardVersion), 1},
- {ShardStatistics(kShardId1, kNoMaxSize, 0, false, emptyZoneSet, emptyShardVersion), 5},
- {ShardStatistics(kShardId2, kNoMaxSize, 2, true, emptyZoneSet, emptyShardVersion), 1},
- {ShardStatistics(kShardId3, kNoMaxSize, 0, false, emptyZoneSet, emptyShardVersion), 5}});
+ {{ShardStatistics(kShardId0, 2, true, emptyZoneSet, emptyShardVersion), 1},
+ {ShardStatistics(kShardId1, 0, false, emptyZoneSet, emptyShardVersion), 5},
+ {ShardStatistics(kShardId2, 2, true, emptyZoneSet, emptyShardVersion), 1},
+ {ShardStatistics(kShardId3, 0, false, emptyZoneSet, emptyShardVersion), 5}});
const auto [migrations, reason] =
balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
@@ -418,8 +417,8 @@ TEST(BalancerPolicy, DrainingSingleChunkPerShard) {
TEST(BalancerPolicy, DrainingWithTwoChunksFirstOneSelected) {
// shard0 is draining and chunks will go to shard1, even though it has a lot more chunks
auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, kNoMaxSize, 2, true, emptyZoneSet, emptyShardVersion), 2},
- {ShardStatistics(kShardId1, kNoMaxSize, 0, false, emptyZoneSet, emptyShardVersion), 5}});
+ {{ShardStatistics(kShardId0, 2, true, emptyZoneSet, emptyShardVersion), 2},
+ {ShardStatistics(kShardId1, 0, false, emptyZoneSet, emptyShardVersion), 5}});
const auto [migrations, reason] =
balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
@@ -435,9 +434,9 @@ TEST(BalancerPolicy, DrainingMultipleShardsFirstOneSelected) {
// shard0 and shard1 are both draining with very little chunks in them and chunks will go to
// shard2, even though it has a lot more chunks that the other two
auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, kNoMaxSize, 5, true, emptyZoneSet, emptyShardVersion), 1},
- {ShardStatistics(kShardId1, kNoMaxSize, 5, true, emptyZoneSet, emptyShardVersion), 2},
- {ShardStatistics(kShardId2, kNoMaxSize, 5, false, emptyZoneSet, emptyShardVersion), 16}});
+ {{ShardStatistics(kShardId0, 5, true, emptyZoneSet, emptyShardVersion), 1},
+ {ShardStatistics(kShardId1, 5, true, emptyZoneSet, emptyShardVersion), 2},
+ {ShardStatistics(kShardId2, 5, false, emptyZoneSet, emptyShardVersion), 16}});
const auto [migrations, reason] =
balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
@@ -453,9 +452,9 @@ TEST(BalancerPolicy, DrainingMultipleShardsFirstOneSelected) {
TEST(BalancerPolicy, DrainingMultipleShardsWontAcceptChunks) {
// shard0 has many chunks, but can't move them to shard1 or shard2 because they are draining
auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, kNoMaxSize, 2, false, emptyZoneSet, emptyShardVersion), 4},
- {ShardStatistics(kShardId1, kNoMaxSize, 0, true, emptyZoneSet, emptyShardVersion), 0},
- {ShardStatistics(kShardId2, kNoMaxSize, 0, true, emptyZoneSet, emptyShardVersion), 0}});
+ {{ShardStatistics(kShardId0, 2, false, emptyZoneSet, emptyShardVersion), 4},
+ {ShardStatistics(kShardId1, 0, true, emptyZoneSet, emptyShardVersion), 0},
+ {ShardStatistics(kShardId2, 0, true, emptyZoneSet, emptyShardVersion), 0}});
const auto [migrations, reason] =
balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
@@ -463,10 +462,10 @@ TEST(BalancerPolicy, DrainingMultipleShardsWontAcceptChunks) {
}
TEST(BalancerPolicy, DrainingSingleAppropriateShardFoundDueToZone) {
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, kNoMaxSize, 2, false, {"NYC"}, emptyShardVersion), 4},
- {ShardStatistics(kShardId1, kNoMaxSize, 2, false, {"LAX"}, emptyShardVersion), 4},
- {ShardStatistics(kShardId2, kNoMaxSize, 1, true, {"LAX"}, emptyShardVersion), 1}});
+ auto cluster =
+ generateCluster({{ShardStatistics(kShardId0, 2, false, {"NYC"}, emptyShardVersion), 4},
+ {ShardStatistics(kShardId1, 2, false, {"LAX"}, emptyShardVersion), 4},
+ {ShardStatistics(kShardId2, 1, true, {"LAX"}, emptyShardVersion), 1}});
DistributionStatus distribution(kNamespace, cluster.second);
ASSERT_OK(distribution.addRangeToZone(ZoneRange(
@@ -482,10 +481,10 @@ TEST(BalancerPolicy, DrainingSingleAppropriateShardFoundDueToZone) {
}
TEST(BalancerPolicy, DrainingNoAppropriateShardsFoundDueToZone) {
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, kNoMaxSize, 2, false, {"NYC"}, emptyShardVersion), 4},
- {ShardStatistics(kShardId1, kNoMaxSize, 2, false, {"LAX"}, emptyShardVersion), 4},
- {ShardStatistics(kShardId2, kNoMaxSize, 1, true, {"SEA"}, emptyShardVersion), 1}});
+ auto cluster =
+ generateCluster({{ShardStatistics(kShardId0, 2, false, {"NYC"}, emptyShardVersion), 4},
+ {ShardStatistics(kShardId1, 2, false, {"LAX"}, emptyShardVersion), 4},
+ {ShardStatistics(kShardId2, 1, true, {"SEA"}, emptyShardVersion), 1}});
DistributionStatus distribution(kNamespace, cluster.second);
ASSERT_OK(distribution.addRangeToZone(ZoneRange(
@@ -495,44 +494,10 @@ TEST(BalancerPolicy, DrainingNoAppropriateShardsFoundDueToZone) {
ASSERT(migrations.empty());
}
-TEST(BalancerPolicy, NoBalancingDueToAllNodesEitherDrainingOrMaxedOut) {
- // shard0 and shard2 are draining, shard1 is maxed out
+TEST(BalancerPolicy, NoBalancingDueToAllNodesDraining) {
auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, kNoMaxSize, 2, true, emptyZoneSet, emptyShardVersion), 1},
- {ShardStatistics(kShardId1, 1, 1, false, emptyZoneSet, emptyShardVersion), 6},
- {ShardStatistics(kShardId2, kNoMaxSize, 1, true, emptyZoneSet, emptyShardVersion), 1}});
-
- const auto [migrations, reason] =
- balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
- ASSERT(migrations.empty());
-}
-
-TEST(BalancerPolicy, BalancerRespectsMaxShardSizeOnlyBalanceToNonMaxed) {
- // Note that maxSize of shard0 is 1, and it is therefore overloaded with currSize = 3. Other
- // shards have maxSize = 0 = unset. Even though the overloaded shard has the least number of
- // less chunks, we shouldn't move chunks to that shard.
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 1, 3, false, emptyZoneSet, emptyShardVersion), 2},
- {ShardStatistics(kShardId1, kNoMaxSize, 5, false, emptyZoneSet, emptyShardVersion), 5},
- {ShardStatistics(kShardId2, kNoMaxSize, 10, false, emptyZoneSet, emptyShardVersion), 10}});
-
- const auto [migrations, reason] =
- balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
- ASSERT_EQ(1U, migrations.size());
- ASSERT_EQ(kShardId2, migrations[0].from);
- ASSERT_EQ(kShardId1, migrations[0].to);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][0].getMin(), migrations[0].minKey);
- ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][0].getMax(), *migrations[0].maxKey);
-}
-
-TEST(BalancerPolicy, BalancerRespectsMaxShardSizeWhenAllBalanced) {
- // Note that maxSize of shard0 is 1, and it is therefore overloaded with currSize = 4. Other
- // shards have maxSize = 0 = unset. We check that being over the maxSize is NOT equivalent to
- // draining, we don't want to empty shards for no other reason than they are over this limit.
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, 1, 4, false, emptyZoneSet, emptyShardVersion), 4},
- {ShardStatistics(kShardId1, kNoMaxSize, 4, false, emptyZoneSet, emptyShardVersion), 4},
- {ShardStatistics(kShardId2, kNoMaxSize, 4, false, emptyZoneSet, emptyShardVersion), 4}});
+ {{ShardStatistics(kShardId0, 2, true, emptyZoneSet, emptyShardVersion), 1},
+ {ShardStatistics(kShardId2, 1, true, emptyZoneSet, emptyShardVersion), 1}});
const auto [migrations, reason] =
balanceChunks(cluster.first, DistributionStatus(kNamespace, cluster.second), false, false);
@@ -541,10 +506,10 @@ TEST(BalancerPolicy, BalancerRespectsMaxShardSizeWhenAllBalanced) {
TEST(BalancerPolicy, BalancerRespectsZonesWhenDraining) {
// shard1 drains the proper chunk to shard0, even though it is more loaded than shard2
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, kNoMaxSize, 5, false, {"a"}, emptyShardVersion), 6},
- {ShardStatistics(kShardId1, kNoMaxSize, 5, true, {"a", "b"}, emptyShardVersion), 2},
- {ShardStatistics(kShardId2, kNoMaxSize, 5, false, {"b"}, emptyShardVersion), 2}});
+ auto cluster =
+ generateCluster({{ShardStatistics(kShardId0, 5, false, {"a"}, emptyShardVersion), 6},
+ {ShardStatistics(kShardId1, 5, true, {"a", "b"}, emptyShardVersion), 2},
+ {ShardStatistics(kShardId2, 5, false, {"b"}, emptyShardVersion), 2}});
DistributionStatus distribution(kNamespace, cluster.second);
ASSERT_OK(distribution.addRangeToZone(ZoneRange(kMinBSONKey, BSON("x" << 7), "a")));
@@ -563,9 +528,9 @@ TEST(BalancerPolicy, BalancerRespectsZonePolicyBeforeImbalance) {
// There is a large imbalance between shard0 and shard1, but the balancer must first fix the
// chunks, which are on a wrong shard due to zone policy
auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, kNoMaxSize, 5, false, {"a"}, emptyShardVersion), 2},
- {ShardStatistics(kShardId1, kNoMaxSize, 5, false, {"a"}, emptyShardVersion), 6},
- {ShardStatistics(kShardId2, kNoMaxSize, 5, false, emptyZoneSet, emptyShardVersion), 2}});
+ {{ShardStatistics(kShardId0, 5, false, {"a"}, emptyShardVersion), 2},
+ {ShardStatistics(kShardId1, 5, false, {"a"}, emptyShardVersion), 6},
+ {ShardStatistics(kShardId2, 5, false, emptyZoneSet, emptyShardVersion), 2}});
DistributionStatus distribution(kNamespace, cluster.second);
ASSERT_OK(distribution.addRangeToZone(ZoneRange(kMinBSONKey, BSON("x" << 100), "a")));
@@ -582,10 +547,10 @@ TEST(BalancerPolicy, BalancerRespectsZonePolicyBeforeImbalance) {
TEST(BalancerPolicy, BalancerFixesIncorrectZonesWithCrossShardViolationOfZones) {
// The zone policy dictates that the same shard must donate and also receive chunks. The test
// validates that the same shard is not used as a donor and recipient as part of the same round.
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, kNoMaxSize, 5, false, {"a"}, emptyShardVersion), 3},
- {ShardStatistics(kShardId1, kNoMaxSize, 5, false, {"a"}, emptyShardVersion), 3},
- {ShardStatistics(kShardId2, kNoMaxSize, 5, false, {"b"}, emptyShardVersion), 3}});
+ auto cluster =
+ generateCluster({{ShardStatistics(kShardId0, 5, false, {"a"}, emptyShardVersion), 3},
+ {ShardStatistics(kShardId1, 5, false, {"a"}, emptyShardVersion), 3},
+ {ShardStatistics(kShardId2, 5, false, {"b"}, emptyShardVersion), 3}});
DistributionStatus distribution(kNamespace, cluster.second);
ASSERT_OK(distribution.addRangeToZone(ZoneRange(kMinBSONKey, BSON("x" << 1), "b")));
@@ -603,9 +568,9 @@ TEST(BalancerPolicy, BalancerFixesIncorrectZonesWithCrossShardViolationOfZones)
TEST(BalancerPolicy, BalancerFixesIncorrectZonesInOtherwiseBalancedCluster) {
// Chunks are balanced across shards, but there are wrong zones, which need to be fixed
auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, kNoMaxSize, 5, false, {"a"}, emptyShardVersion), 3},
- {ShardStatistics(kShardId1, kNoMaxSize, 5, false, {"a"}, emptyShardVersion), 3},
- {ShardStatistics(kShardId2, kNoMaxSize, 5, false, emptyZoneSet, emptyShardVersion), 3}});
+ {{ShardStatistics(kShardId0, 5, false, {"a"}, emptyShardVersion), 3},
+ {ShardStatistics(kShardId1, 5, false, {"a"}, emptyShardVersion), 3},
+ {ShardStatistics(kShardId2, 5, false, emptyZoneSet, emptyShardVersion), 3}});
DistributionStatus distribution(kNamespace, cluster.second);
ASSERT_OK(distribution.addRangeToZone(ZoneRange(kMinBSONKey, BSON("x" << 10), "a")));
@@ -621,9 +586,9 @@ TEST(BalancerPolicy, BalancerFixesIncorrectZonesInOtherwiseBalancedCluster) {
TEST(BalancerPolicy, BalancerZoneAlreadyBalanced) {
// Chunks are balanced across shards for the zone.
- auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, kNoMaxSize, 3, false, {"a"}, emptyShardVersion), 2},
- {ShardStatistics(kShardId1, kNoMaxSize, 2, false, {"a"}, emptyShardVersion), 2}});
+ auto cluster =
+ generateCluster({{ShardStatistics(kShardId0, 3, false, {"a"}, emptyShardVersion), 2},
+ {ShardStatistics(kShardId1, 2, false, {"a"}, emptyShardVersion), 2}});
DistributionStatus distribution(kNamespace, cluster.second);
ASSERT_OK(distribution.addRangeToZone(ZoneRange(kMinBSONKey, kMaxBSONKey, "a")));
@@ -634,9 +599,9 @@ TEST(BalancerPolicy, BalancerMostOverLoadShardHasMultipleZones) {
// shard0 has chunks [MinKey, 1), [1, 2), [2, 3), [3, 4), [4, 5), so two chunks each
// for zones "b" and "c". So [1, 2) is expected to be moved to shard1 in round 1.
auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, kNoMaxSize, 5, false, {"a", "b", "c"}, emptyShardVersion), 5},
- {ShardStatistics(kShardId1, kNoMaxSize, 1, false, {"b"}, emptyShardVersion), 1},
- {ShardStatistics(kShardId2, kNoMaxSize, 1, false, {"c"}, emptyShardVersion), 1}});
+ {{ShardStatistics(kShardId0, 5, false, {"a", "b", "c"}, emptyShardVersion), 5},
+ {ShardStatistics(kShardId1, 1, false, {"b"}, emptyShardVersion), 1},
+ {ShardStatistics(kShardId2, 1, false, {"c"}, emptyShardVersion), 1}});
DistributionStatus distribution(kNamespace, cluster.second);
ASSERT_OK(distribution.addRangeToZone(ZoneRange(kMinBSONKey, BSON("x" << 1), "a")));
@@ -657,9 +622,9 @@ TEST(BalancerPolicy, BalancerMostOverLoadShardHasMultipleZonesSkipZoneWithShardI
// for zones "b" and "c". So [3, 4) is expected to be moved to shard2 because shard1 is
// in use.
auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, kNoMaxSize, 5, false, {"a", "b", "c"}, emptyShardVersion), 5},
- {ShardStatistics(kShardId1, kNoMaxSize, 1, false, {"b"}, emptyShardVersion), 1},
- {ShardStatistics(kShardId2, kNoMaxSize, 1, false, {"c"}, emptyShardVersion), 1}});
+ {{ShardStatistics(kShardId0, 5, false, {"a", "b", "c"}, emptyShardVersion), 5},
+ {ShardStatistics(kShardId1, 1, false, {"b"}, emptyShardVersion), 1},
+ {ShardStatistics(kShardId2, 1, false, {"c"}, emptyShardVersion), 1}});
DistributionStatus distribution(kNamespace, cluster.second);
ASSERT_OK(distribution.addRangeToZone(ZoneRange(kMinBSONKey, BSON("x" << 1), "a")));
@@ -680,10 +645,10 @@ TEST(BalancerPolicy, BalancerMostOverLoadShardHasMultipleZonesSkipZoneWithShardI
TEST(BalancerPolicy, BalancerFixesIncorrectZonesInOtherwiseBalancedClusterParallel) {
// Chunks are balanced across shards, but there are wrong zones, which need to be fixed
auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, kNoMaxSize, 5, false, {"a"}, emptyShardVersion), 3},
- {ShardStatistics(kShardId1, kNoMaxSize, 5, false, {"a"}, emptyShardVersion), 3},
- {ShardStatistics(kShardId2, kNoMaxSize, 5, false, emptyZoneSet, emptyShardVersion), 3},
- {ShardStatistics(kShardId3, kNoMaxSize, 5, false, emptyZoneSet, emptyShardVersion), 3}});
+ {{ShardStatistics(kShardId0, 5, false, {"a"}, emptyShardVersion), 3},
+ {ShardStatistics(kShardId1, 5, false, {"a"}, emptyShardVersion), 3},
+ {ShardStatistics(kShardId2, 5, false, emptyZoneSet, emptyShardVersion), 3},
+ {ShardStatistics(kShardId3, 5, false, emptyZoneSet, emptyShardVersion), 3}});
DistributionStatus distribution(kNamespace, cluster.second);
ASSERT_OK(distribution.addRangeToZone(ZoneRange(kMinBSONKey, BSON("x" << 20), "a")));
@@ -706,8 +671,8 @@ TEST(BalancerPolicy, BalancerFixesIncorrectZonesInOtherwiseBalancedClusterParall
TEST(BalancerPolicy, BalancerHandlesNoShardsWithZone) {
auto cluster = generateCluster(
- {{ShardStatistics(kShardId0, kNoMaxSize, 5, false, emptyZoneSet, emptyShardVersion), 2},
- {ShardStatistics(kShardId1, kNoMaxSize, 5, false, emptyZoneSet, emptyShardVersion), 2}});
+ {{ShardStatistics(kShardId0, 5, false, emptyZoneSet, emptyShardVersion), 2},
+ {ShardStatistics(kShardId1, 5, false, emptyZoneSet, emptyShardVersion), 2}});
DistributionStatus distribution(kNamespace, cluster.second);
ASSERT_OK(
diff --git a/src/mongo/db/s/balancer/cluster_statistics.cpp b/src/mongo/db/s/balancer/cluster_statistics.cpp
index 505a0beaaa8..817f1e567f0 100644
--- a/src/mongo/db/s/balancer/cluster_statistics.cpp
+++ b/src/mongo/db/s/balancer/cluster_statistics.cpp
@@ -50,39 +50,27 @@ ClusterStatistics::ClusterStatistics() = default;
ClusterStatistics::~ClusterStatistics() = default;
ClusterStatistics::ShardStatistics::ShardStatistics(ShardId inShardId,
- uint64_t inMaxSizeBytes,
uint64_t inCurrSizeBytes,
bool inIsDraining,
std::set<std::string> inShardZones,
std::string inMongoVersion,
use_bytes_t t)
: shardId(std::move(inShardId)),
- maxSizeBytes(inMaxSizeBytes),
currSizeBytes(inCurrSizeBytes),
isDraining(inIsDraining),
shardZones(std::move(inShardZones)),
mongoVersion(std::move(inMongoVersion)) {}
ClusterStatistics::ShardStatistics::ShardStatistics(ShardId inShardId,
- uint64_t inMaxSizeMB,
uint64_t inCurrSizeMB,
bool inIsDraining,
std::set<std::string> inShardZones,
std::string inMongoVersion)
: ShardStatistics(inShardId,
- convertMBToBytes(inMaxSizeMB),
convertMBToBytes(inCurrSizeMB),
inIsDraining,
std::move(inShardZones),
std::move(inMongoVersion),
use_bytes_t{}) {}
-bool ClusterStatistics::ShardStatistics::isSizeMaxed() const {
- if (!maxSizeBytes || !currSizeBytes) {
- return false;
- }
-
- return currSizeBytes >= maxSizeBytes;
-}
-
} // namespace mongo
diff --git a/src/mongo/db/s/balancer/cluster_statistics.h b/src/mongo/db/s/balancer/cluster_statistics.h
index 92dc21d3bc8..fe117689d93 100644
--- a/src/mongo/db/s/balancer/cluster_statistics.h
+++ b/src/mongo/db/s/balancer/cluster_statistics.h
@@ -62,7 +62,6 @@ public:
explicit use_bytes_t() = default;
};
ShardStatistics(ShardId shardId,
- uint64_t maxSizeBytes,
uint64_t currSizeBytes,
bool isDraining,
std::set<std::string> shardZones,
@@ -70,24 +69,14 @@ public:
use_bytes_t t);
ShardStatistics(ShardId shardId,
- uint64_t maxSizeMB,
uint64_t currSizeMB,
bool isDraining,
std::set<std::string> shardZones,
std::string mongoVersion);
- /**
- * Returns true if a shard is not allowed to receive any new chunks because it has reached
- * the per-shard data size limit.
- */
- bool isSizeMaxed() const;
-
// The id of the shard for which this statistic applies
ShardId shardId;
- // The maximum storage size allowed for the shard. Zero means no maximum specified.
- uint64_t maxSizeBytes{0};
-
// The current storage size of the shard.
uint64_t currSizeBytes{0};
diff --git a/src/mongo/db/s/balancer/cluster_statistics_impl.cpp b/src/mongo/db/s/balancer/cluster_statistics_impl.cpp
index 83b022294b4..7897ebc61bc 100644
--- a/src/mongo/db/s/balancer/cluster_statistics_impl.cpp
+++ b/src/mongo/db/s/balancer/cluster_statistics_impl.cpp
@@ -132,11 +132,9 @@ StatusWith<std::vector<ShardStatistics>> ClusterStatisticsImpl::_getStats(
if (ns) {
return shardutil::retrieveCollectionShardSize(opCtx, shard.getName(), *ns);
}
- // optimization for the case where the balancer does not care about the dataSize
- if (!shard.getMaxSizeMB()) {
- return 0;
- }
+
return shardutil::retrieveTotalShardSize(opCtx, shard.getName());
+ // return 0;
}();
if (!shardSizeStatus.isOK()) {
@@ -169,7 +167,6 @@ StatusWith<std::vector<ShardStatistics>> ClusterStatisticsImpl::_getStats(
}
stats.emplace_back(shard.getName(),
- shard.getMaxSizeMB() * 1024 * 1024,
shardSizeStatus.getValue(),
shard.getDraining(),
std::move(shardZones),
diff --git a/src/mongo/db/s/balancer/cluster_statistics_test.cpp b/src/mongo/db/s/balancer/cluster_statistics_test.cpp
deleted file mode 100644
index 3dd5357581b..00000000000
--- a/src/mongo/db/s/balancer/cluster_statistics_test.cpp
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * Copyright (C) 2018-present MongoDB, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the Server Side Public License, version 1,
- * as published by MongoDB, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * Server Side Public License for more details.
- *
- * You should have received a copy of the Server Side Public License
- * along with this program. If not, see
- * <http://www.mongodb.com/licensing/server-side-public-license>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the Server Side Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#include "mongo/platform/basic.h"
-
-#include "mongo/db/s/balancer/cluster_statistics.h"
-#include "mongo/unittest/unittest.h"
-
-namespace mongo {
-namespace {
-
-using ShardStatistics = ClusterStatistics::ShardStatistics;
-
-const auto emptyZoneSet = std::set<std::string>();
-
-TEST(ShardStatistics, SizeMaxedTest) {
- ASSERT(
- !ShardStatistics(ShardId("TestShardId"), 0, 0, false, emptyZoneSet, "3.2.0").isSizeMaxed());
- ASSERT(!ShardStatistics(ShardId("TestShardId"), 100LL, 80LL, false, emptyZoneSet, "3.2.0")
- .isSizeMaxed());
- ASSERT(ShardStatistics(ShardId("TestShardId"), 100LL, 110LL, false, emptyZoneSet, "3.2.0")
- .isSizeMaxed());
-}
-
-} // namespace
-} // namespace mongo
diff --git a/src/mongo/db/s/balancer/migration_test_fixture.h b/src/mongo/db/s/balancer/migration_test_fixture.h
index 70232e2a46e..3a71b296cdb 100644
--- a/src/mongo/db/s/balancer/migration_test_fixture.h
+++ b/src/mongo/db/s/balancer/migration_test_fixture.h
@@ -129,17 +129,13 @@ protected:
const long long kMaxSizeMB = 100;
const BSONObj kShard0 =
- BSON(ShardType::name(kShardId0.toString())
- << ShardType::host(kShardHost0.toString()) << ShardType::maxSizeMB(kMaxSizeMB));
+ BSON(ShardType::name(kShardId0.toString()) << ShardType::host(kShardHost0.toString()));
const BSONObj kShard1 =
- BSON(ShardType::name(kShardId1.toString())
- << ShardType::host(kShardHost1.toString()) << ShardType::maxSizeMB(kMaxSizeMB));
+ BSON(ShardType::name(kShardId1.toString()) << ShardType::host(kShardHost1.toString()));
const BSONObj kShard2 =
- BSON(ShardType::name(kShardId2.toString())
- << ShardType::host(kShardHost2.toString()) << ShardType::maxSizeMB(kMaxSizeMB));
+ BSON(ShardType::name(kShardId2.toString()) << ShardType::host(kShardHost2.toString()));
const BSONObj kShard3 =
- BSON(ShardType::name(kShardId3.toString())
- << ShardType::host(kShardHost3.toString()) << ShardType::maxSizeMB(kMaxSizeMB));
+ BSON(ShardType::name(kShardId3.toString()) << ShardType::host(kShardHost3.toString()));
const std::string kPattern = "_id";
const KeyPattern kKeyPattern = KeyPattern(BSON(kPattern << 1));
diff --git a/src/mongo/db/s/config/configsvr_add_shard_command.cpp b/src/mongo/db/s/config/configsvr_add_shard_command.cpp
index c8d2a1cc685..9ea14319405 100644
--- a/src/mongo/db/s/config/configsvr_add_shard_command.cpp
+++ b/src/mongo/db/s/config/configsvr_add_shard_command.cpp
@@ -52,8 +52,6 @@
namespace mongo {
namespace {
-const long long kMaxSizeMBDefault = 0;
-
/**
* Internal sharding command run on config servers to add a shard to the cluster.
*/
@@ -117,15 +115,12 @@ public:
audit::logAddShard(Client::getCurrent(),
parsedRequest.hasName() ? parsedRequest.getName() : "",
- parsedRequest.getConnString().toString(),
- parsedRequest.hasMaxSize() ? parsedRequest.getMaxSize()
- : kMaxSizeMBDefault);
+ parsedRequest.getConnString().toString());
StatusWith<std::string> addShardResult = ShardingCatalogManager::get(opCtx)->addShard(
opCtx,
parsedRequest.hasName() ? &parsedRequest.getName() : nullptr,
- parsedRequest.getConnString(),
- parsedRequest.hasMaxSize() ? parsedRequest.getMaxSize() : kMaxSizeMBDefault);
+ parsedRequest.getConnString());
if (!addShardResult.isOK()) {
LOGV2(21920,
diff --git a/src/mongo/db/s/config/sharding_catalog_manager.h b/src/mongo/db/s/config/sharding_catalog_manager.h
index c3b588b8fa6..c1f94dbc46a 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager.h
+++ b/src/mongo/db/s/config/sharding_catalog_manager.h
@@ -463,15 +463,12 @@ public:
* nullptr, a name will be automatically generated; if not nullptr, it cannot
* contain the empty string.
* 'shardConnectionString' is the complete connection string of the shard being added.
- * 'maxSize' is the optional space quota in bytes. Zero means there's no limitation to space
- * usage.
*
* On success returns the name of the newly added shard.
*/
StatusWith<std::string> addShard(OperationContext* opCtx,
const std::string* shardProposedName,
- const ConnectionString& shardConnectionString,
- long long maxSize);
+ const ConnectionString& shardConnectionString);
/**
* Tries to remove a shard. To completely remove a shard from a sharded cluster,
@@ -567,8 +564,7 @@ private:
StatusWith<boost::optional<ShardType>> _checkIfShardExists(
OperationContext* opCtx,
const ConnectionString& propsedShardConnectionString,
- const std::string* shardProposedName,
- long long maxSize);
+ const std::string* shardProposedName);
/**
* Validates that the specified endpoint can serve as a shard server. In particular, this
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp
index 3ae9a91fce5..9b4e340e508 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp
@@ -368,7 +368,6 @@ protected:
ASSERT_EQUALS(expectedShard.getName(), foundShard.getName());
ASSERT_EQUALS(expectedShard.getHost(), foundShard.getHost());
- ASSERT_EQUALS(expectedShard.getMaxSizeMB(), foundShard.getMaxSizeMB());
ASSERT_EQUALS(expectedShard.getDraining(), foundShard.getDraining());
ASSERT_EQUALS((int)expectedShard.getState(), (int)foundShard.getState());
ASSERT_TRUE(foundShard.getTags().empty());
@@ -459,7 +458,6 @@ TEST_F(AddShardTest, StandaloneBasicSuccess) {
ShardType expectedShard;
expectedShard.setName(expectedShardName);
expectedShard.setHost("StandaloneHost:12345");
- expectedShard.setMaxSizeMB(100);
expectedShard.setState(ShardType::ShardState::kShardAware);
DatabaseType discoveredDB1(
@@ -477,8 +475,7 @@ TEST_F(AddShardTest, StandaloneBasicSuccess) {
assertGet(ShardingCatalogManager::get(opCtx.get())
->addShard(opCtx.get(),
&expectedShardName,
- assertGet(ConnectionString::parse("StandaloneHost:12345")),
- 100));
+ assertGet(ConnectionString::parse("StandaloneHost:12345"))));
ASSERT_EQUALS(expectedShardName, shardName);
});
@@ -535,7 +532,6 @@ TEST_F(AddShardTest, StandaloneGenerateName) {
ShardType existingShard;
existingShard.setName("shard0005");
existingShard.setHost("existingHost:12345");
- existingShard.setMaxSizeMB(100);
existingShard.setState(ShardType::ShardState::kShardAware);
// Add a pre-existing shard so when generating a name for the new shard it will have to go
@@ -552,7 +548,6 @@ TEST_F(AddShardTest, StandaloneGenerateName) {
ShardType expectedShard;
expectedShard.setName(expectedShardName);
expectedShard.setHost(shardTarget.toString());
- expectedShard.setMaxSizeMB(100);
expectedShard.setState(ShardType::ShardState::kShardAware);
DatabaseType discoveredDB1(
@@ -565,7 +560,7 @@ TEST_F(AddShardTest, StandaloneGenerateName) {
auto opCtx = Client::getCurrent()->makeOperationContext();
auto shardName =
assertGet(ShardingCatalogManager::get(opCtx.get())
- ->addShard(opCtx.get(), nullptr, ConnectionString(shardTarget), 100));
+ ->addShard(opCtx.get(), nullptr, ConnectionString(shardTarget)));
ASSERT_EQUALS(expectedShardName, shardName);
});
@@ -623,7 +618,7 @@ TEST_F(AddShardTest, AddSCCCConnectionStringAsShard) {
auto opCtx = Client::getCurrent()->makeOperationContext();
const std::string shardName("StandaloneShard");
auto status = ShardingCatalogManager::get(opCtx.get())
- ->addShard(opCtx.get(), &shardName, invalidConn, 100);
+ ->addShard(opCtx.get(), &shardName, invalidConn);
ASSERT_EQUALS(ErrorCodes::BadValue, status);
ASSERT_STRING_CONTAINS(status.getStatus().reason(), "Invalid connection string");
});
@@ -642,8 +637,7 @@ TEST_F(AddShardTest, EmptyShardName) {
auto status = ShardingCatalogManager::get(opCtx.get())
->addShard(opCtx.get(),
&expectedShardName,
- assertGet(ConnectionString::parse("StandaloneHost:12345")),
- 100);
+ assertGet(ConnectionString::parse("StandaloneHost:12345")));
ASSERT_EQUALS(ErrorCodes::BadValue, status);
ASSERT_EQUALS("shard name cannot be empty", status.getStatus().reason());
});
@@ -667,7 +661,7 @@ TEST_F(AddShardTest, UnreachableHost) {
auto opCtx = Client::getCurrent()->makeOperationContext();
auto status =
ShardingCatalogManager::get(opCtx.get())
- ->addShard(opCtx.get(), &expectedShardName, ConnectionString(shardTarget), 100);
+ ->addShard(opCtx.get(), &expectedShardName, ConnectionString(shardTarget));
ASSERT_EQUALS(ErrorCodes::OperationFailed, status);
ASSERT_STRING_CONTAINS(status.getStatus().reason(), "host unreachable");
});
@@ -694,7 +688,7 @@ TEST_F(AddShardTest, AddMongosAsShard) {
auto opCtx = Client::getCurrent()->makeOperationContext();
auto status =
ShardingCatalogManager::get(opCtx.get())
- ->addShard(opCtx.get(), &expectedShardName, ConnectionString(shardTarget), 100);
+ ->addShard(opCtx.get(), &expectedShardName, ConnectionString(shardTarget));
ASSERT_EQUALS(ErrorCodes::IllegalOperation, status);
});
@@ -721,7 +715,7 @@ TEST_F(AddShardTest, AddReplicaSetShardAsStandalone) {
auto opCtx = Client::getCurrent()->makeOperationContext();
auto status =
ShardingCatalogManager::get(opCtx.get())
- ->addShard(opCtx.get(), &expectedShardName, ConnectionString(shardTarget), 100);
+ ->addShard(opCtx.get(), &expectedShardName, ConnectionString(shardTarget));
ASSERT_EQUALS(ErrorCodes::OperationFailed, status);
ASSERT_STRING_CONTAINS(status.getStatus().reason(), "use replica set url format");
});
@@ -751,7 +745,7 @@ TEST_F(AddShardTest, AddStandaloneHostShardAsReplicaSet) {
ThreadClient tc(getServiceContext());
auto opCtx = Client::getCurrent()->makeOperationContext();
auto status = ShardingCatalogManager::get(opCtx.get())
- ->addShard(opCtx.get(), &expectedShardName, connString, 100);
+ ->addShard(opCtx.get(), &expectedShardName, connString);
ASSERT_EQUALS(ErrorCodes::OperationFailed, status);
ASSERT_STRING_CONTAINS(status.getStatus().reason(), "host did not return a set name");
});
@@ -780,7 +774,7 @@ TEST_F(AddShardTest, ReplicaSetMistmatchedReplicaSetName) {
ThreadClient tc(getServiceContext());
auto opCtx = Client::getCurrent()->makeOperationContext();
auto status = ShardingCatalogManager::get(opCtx.get())
- ->addShard(opCtx.get(), &expectedShardName, connString, 100);
+ ->addShard(opCtx.get(), &expectedShardName, connString);
ASSERT_EQUALS(ErrorCodes::OperationFailed, status);
ASSERT_STRING_CONTAINS(status.getStatus().reason(), "does not match the actual set name");
});
@@ -810,7 +804,7 @@ TEST_F(AddShardTest, ShardIsCSRSConfigServer) {
ThreadClient tc(getServiceContext());
auto opCtx = Client::getCurrent()->makeOperationContext();
auto status = ShardingCatalogManager::get(opCtx.get())
- ->addShard(opCtx.get(), &expectedShardName, connString, 100);
+ ->addShard(opCtx.get(), &expectedShardName, connString);
ASSERT_EQUALS(ErrorCodes::OperationFailed, status);
ASSERT_STRING_CONTAINS(status.getStatus().reason(),
"as a shard since it is a config server");
@@ -842,7 +836,7 @@ TEST_F(AddShardTest, ReplicaSetMissingHostsProvidedInSeedList) {
ThreadClient tc(getServiceContext());
auto opCtx = Client::getCurrent()->makeOperationContext();
auto status = ShardingCatalogManager::get(opCtx.get())
- ->addShard(opCtx.get(), &expectedShardName, connString, 100);
+ ->addShard(opCtx.get(), &expectedShardName, connString);
ASSERT_EQUALS(ErrorCodes::OperationFailed, status);
ASSERT_STRING_CONTAINS(status.getStatus().reason(),
"host2:12345 does not belong to replica set");
@@ -876,7 +870,7 @@ TEST_F(AddShardTest, AddShardWithNameConfigFails) {
ThreadClient tc(getServiceContext());
auto opCtx = Client::getCurrent()->makeOperationContext();
auto status = ShardingCatalogManager::get(opCtx.get())
- ->addShard(opCtx.get(), &expectedShardName, connString, 100);
+ ->addShard(opCtx.get(), &expectedShardName, connString);
ASSERT_EQUALS(ErrorCodes::BadValue, status);
ASSERT_EQUALS(status.getStatus().reason(),
"use of shard replica set with name 'config' is not allowed");
@@ -921,7 +915,7 @@ TEST_F(AddShardTest, ShardContainsExistingDatabase) {
ThreadClient tc(getServiceContext());
auto opCtx = Client::getCurrent()->makeOperationContext();
auto status = ShardingCatalogManager::get(opCtx.get())
- ->addShard(opCtx.get(), &expectedShardName, connString, 100);
+ ->addShard(opCtx.get(), &expectedShardName, connString);
ASSERT_EQUALS(ErrorCodes::OperationFailed, status);
ASSERT_STRING_CONTAINS(
status.getStatus().reason(),
@@ -958,7 +952,6 @@ TEST_F(AddShardTest, SuccessfullyAddReplicaSet) {
ShardType expectedShard;
expectedShard.setName(expectedShardName);
expectedShard.setHost(connString.toString());
- expectedShard.setMaxSizeMB(100);
expectedShard.setState(ShardType::ShardState::kShardAware);
DatabaseType discoveredDB(
@@ -967,8 +960,8 @@ TEST_F(AddShardTest, SuccessfullyAddReplicaSet) {
auto future = launchAsync([this, &expectedShardName, &connString] {
ThreadClient tc(getServiceContext());
auto opCtx = Client::getCurrent()->makeOperationContext();
- auto shardName = assertGet(ShardingCatalogManager::get(opCtx.get())
- ->addShard(opCtx.get(), nullptr, connString, 100));
+ auto shardName = assertGet(
+ ShardingCatalogManager::get(opCtx.get())->addShard(opCtx.get(), nullptr, connString));
ASSERT_EQUALS(expectedShardName, shardName);
});
@@ -1030,7 +1023,6 @@ TEST_F(AddShardTest, ReplicaSetExtraHostsDiscovered) {
ShardType expectedShard;
expectedShard.setName(expectedShardName);
expectedShard.setHost(fullConnString.toString());
- expectedShard.setMaxSizeMB(100);
expectedShard.setState(ShardType::ShardState::kShardAware);
DatabaseType discoveredDB(
@@ -1039,8 +1031,8 @@ TEST_F(AddShardTest, ReplicaSetExtraHostsDiscovered) {
auto future = launchAsync([this, &expectedShardName, &seedString] {
ThreadClient tc(getServiceContext());
auto opCtx = Client::getCurrent()->makeOperationContext();
- auto shardName = assertGet(ShardingCatalogManager::get(opCtx.get())
- ->addShard(opCtx.get(), nullptr, seedString, 100));
+ auto shardName = assertGet(
+ ShardingCatalogManager::get(opCtx.get())->addShard(opCtx.get(), nullptr, seedString));
ASSERT_EQUALS(expectedShardName, shardName);
});
@@ -1103,7 +1095,6 @@ TEST_F(AddShardTest, AddShardSucceedsEvenIfAddingDBsFromNewShardFails) {
ShardType expectedShard;
expectedShard.setName(expectedShardName);
expectedShard.setHost("StandaloneHost:12345");
- expectedShard.setMaxSizeMB(100);
expectedShard.setState(ShardType::ShardState::kShardAware);
DatabaseType discoveredDB1(
@@ -1124,7 +1115,7 @@ TEST_F(AddShardTest, AddShardSucceedsEvenIfAddingDBsFromNewShardFails) {
auto opCtx = Client::getCurrent()->makeOperationContext();
auto shardName = assertGet(
ShardingCatalogManager::get(opCtx.get())
- ->addShard(opCtx.get(), &expectedShardName, ConnectionString(shardTarget), 100));
+ ->addShard(opCtx.get(), &expectedShardName, ConnectionString(shardTarget)));
ASSERT_EQUALS(expectedShardName, shardName);
});
@@ -1202,7 +1193,6 @@ TEST_F(AddShardTest, AddExistingShardStandalone) {
ShardType existingShard;
existingShard.setName(existingShardName);
existingShard.setHost(shardTarget.toString());
- existingShard.setMaxSizeMB(100);
existingShard.setState(ShardType::ShardState::kShardAware);
// Make sure the shard already exists.
@@ -1219,29 +1209,13 @@ TEST_F(AddShardTest, AddExistingShardStandalone) {
auto opCtx = Client::getCurrent()->makeOperationContext();
ASSERT_EQUALS(ErrorCodes::IllegalOperation,
ShardingCatalogManager::get(opCtx.get())
- ->addShard(opCtx.get(),
- &differentName,
- ConnectionString(shardTarget),
- existingShard.getMaxSizeMB()));
+ ->addShard(opCtx.get(), &differentName, ConnectionString(shardTarget)));
});
future1.timed_get(kLongFutureTimeout);
// Ensure that the shard document was unchanged.
assertShardExists(existingShard);
- // Adding the same standalone host with a different maxSize should fail.
- auto future2 = launchAsync([&] {
- ThreadClient tc(getServiceContext());
- auto opCtx = Client::getCurrent()->makeOperationContext();
- ASSERT_EQUALS(ErrorCodes::IllegalOperation,
- ShardingCatalogManager::get(opCtx.get())
- ->addShard(opCtx.get(),
- nullptr,
- ConnectionString(shardTarget),
- existingShard.getMaxSizeMB() + 100));
- });
- future2.timed_get(kLongFutureTimeout);
-
// Adding the same standalone host but as part of a replica set should fail.
// Ensures that even if the user changed the standalone shard to a single-node replica set, you
// can't change the sharded cluster's notion of the shard from standalone to replica set just
@@ -1253,8 +1227,7 @@ TEST_F(AddShardTest, AddExistingShardStandalone) {
ShardingCatalogManager::get(opCtx.get())
->addShard(opCtx.get(),
nullptr,
- ConnectionString::forReplicaSet("mySet", {shardTarget}),
- existingShard.getMaxSizeMB()));
+ ConnectionString::forReplicaSet("mySet", {shardTarget})));
});
future3.timed_get(kLongFutureTimeout);
@@ -1265,11 +1238,9 @@ TEST_F(AddShardTest, AddExistingShardStandalone) {
auto future4 = launchAsync([&] {
ThreadClient tc(getServiceContext());
auto opCtx = Client::getCurrent()->makeOperationContext();
- auto shardName = assertGet(ShardingCatalogManager::get(opCtx.get())
- ->addShard(opCtx.get(),
- &existingShardName,
- ConnectionString(shardTarget),
- existingShard.getMaxSizeMB()));
+ auto shardName = assertGet(
+ ShardingCatalogManager::get(opCtx.get())
+ ->addShard(opCtx.get(), &existingShardName, ConnectionString(shardTarget)));
ASSERT_EQUALS(existingShardName, shardName);
});
future4.timed_get(kLongFutureTimeout);
@@ -1282,11 +1253,9 @@ TEST_F(AddShardTest, AddExistingShardStandalone) {
auto future5 = launchAsync([&] {
ThreadClient tc(getServiceContext());
auto opCtx = Client::getCurrent()->makeOperationContext();
- auto shardName = assertGet(ShardingCatalogManager::get(opCtx.get())
- ->addShard(opCtx.get(),
- nullptr,
- ConnectionString(shardTarget),
- existingShard.getMaxSizeMB()));
+ auto shardName =
+ assertGet(ShardingCatalogManager::get(opCtx.get())
+ ->addShard(opCtx.get(), nullptr, ConnectionString(shardTarget)));
ASSERT_EQUALS(existingShardName, shardName);
});
future5.timed_get(kLongFutureTimeout);
@@ -1311,7 +1280,6 @@ TEST_F(AddShardTest, AddExistingShardReplicaSet) {
ShardType existingShard;
existingShard.setName(existingShardName);
existingShard.setHost(connString.toString());
- existingShard.setMaxSizeMB(100);
existingShard.setState(ShardType::ShardState::kShardAware);
// Make sure the shard already exists.
@@ -1326,27 +1294,15 @@ TEST_F(AddShardTest, AddExistingShardReplicaSet) {
auto future1 = launchAsync([&] {
ThreadClient tc(getServiceContext());
auto opCtx = Client::getCurrent()->makeOperationContext();
- ASSERT_EQUALS(
- ErrorCodes::IllegalOperation,
- ShardingCatalogManager::get(opCtx.get())
- ->addShard(opCtx.get(), &differentName, connString, existingShard.getMaxSizeMB()));
+ ASSERT_EQUALS(ErrorCodes::IllegalOperation,
+ ShardingCatalogManager::get(opCtx.get())
+ ->addShard(opCtx.get(), &differentName, connString));
});
future1.timed_get(kLongFutureTimeout);
// Ensure that the shard document was unchanged.
assertShardExists(existingShard);
- // Adding the same connection string with a different maxSize should fail.
- auto future2 = launchAsync([&] {
- ThreadClient tc(getServiceContext());
- auto opCtx = Client::getCurrent()->makeOperationContext();
- ASSERT_EQUALS(
- ErrorCodes::IllegalOperation,
- ShardingCatalogManager::get(opCtx.get())
- ->addShard(opCtx.get(), nullptr, connString, existingShard.getMaxSizeMB() + 100));
- });
- future2.timed_get(kLongFutureTimeout);
-
// Ensure that the shard document was unchanged.
assertShardExists(existingShard);
@@ -1360,10 +1316,7 @@ TEST_F(AddShardTest, AddExistingShardReplicaSet) {
auto opCtx = Client::getCurrent()->makeOperationContext();
ASSERT_EQUALS(ErrorCodes::IllegalOperation,
ShardingCatalogManager::get(opCtx.get())
- ->addShard(opCtx.get(),
- nullptr,
- ConnectionString(shardTarget),
- existingShard.getMaxSizeMB()));
+ ->addShard(opCtx.get(), nullptr, ConnectionString(shardTarget)));
});
future3.timed_get(kLongFutureTimeout);
@@ -1382,8 +1335,7 @@ TEST_F(AddShardTest, AddExistingShardReplicaSet) {
->addShard(opCtx.get(),
nullptr,
ConnectionString::forReplicaSet(differentSetName,
- connString.getServers()),
- existingShard.getMaxSizeMB()));
+ connString.getServers())));
});
future4.timed_get(kLongFutureTimeout);
@@ -1394,10 +1346,8 @@ TEST_F(AddShardTest, AddExistingShardReplicaSet) {
auto future5 = launchAsync([&] {
ThreadClient tc(getServiceContext());
auto opCtx = Client::getCurrent()->makeOperationContext();
- auto shardName = assertGet(
- ShardingCatalogManager::get(opCtx.get())
- ->addShard(
- opCtx.get(), &existingShardName, connString, existingShard.getMaxSizeMB()));
+ auto shardName = assertGet(ShardingCatalogManager::get(opCtx.get())
+ ->addShard(opCtx.get(), &existingShardName, connString));
ASSERT_EQUALS(existingShardName, shardName);
});
future5.timed_get(kLongFutureTimeout);
@@ -1408,8 +1358,7 @@ TEST_F(AddShardTest, AddExistingShardReplicaSet) {
ThreadClient tc(getServiceContext());
auto opCtx = Client::getCurrent()->makeOperationContext();
auto shardName = assertGet(
- ShardingCatalogManager::get(opCtx.get())
- ->addShard(opCtx.get(), nullptr, connString, existingShard.getMaxSizeMB()));
+ ShardingCatalogManager::get(opCtx.get())->addShard(opCtx.get(), nullptr, connString));
ASSERT_EQUALS(existingShardName, shardName);
});
future6.timed_get(kLongFutureTimeout);
@@ -1432,10 +1381,8 @@ TEST_F(AddShardTest, AddExistingShardReplicaSet) {
auto future7 = launchAsync([&] {
ThreadClient tc(getServiceContext());
auto opCtx = Client::getCurrent()->makeOperationContext();
- auto shardName = assertGet(
- ShardingCatalogManager::get(opCtx.get())
- ->addShard(
- opCtx.get(), nullptr, otherHostConnString, existingShard.getMaxSizeMB()));
+ auto shardName = assertGet(ShardingCatalogManager::get(opCtx.get())
+ ->addShard(opCtx.get(), nullptr, otherHostConnString));
ASSERT_EQUALS(existingShardName, shardName);
});
future7.timed_get(kLongFutureTimeout);
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp
index 8ac1e44b7bf..7a03900dd42 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp
@@ -135,19 +135,16 @@ TEST_F(RemoveShardTest, RemoveShardAnotherShardDraining) {
ShardType shard1;
shard1.setName("shard1");
shard1.setHost("host1:12345");
- shard1.setMaxSizeMB(100);
shard1.setState(ShardType::ShardState::kShardAware);
ShardType shard2;
shard2.setName("shard2");
shard2.setHost("host2:12345");
- shard2.setMaxSizeMB(100);
shard2.setState(ShardType::ShardState::kShardAware);
ShardType shard3;
shard3.setName("shard3");
shard3.setHost("host3:12345");
- shard3.setMaxSizeMB(100);
shard3.setState(ShardType::ShardState::kShardAware);
setupShards(std::vector<ShardType>{shard1, shard2, shard3});
@@ -170,7 +167,6 @@ TEST_F(RemoveShardTest, RemoveShardCantRemoveLastShard) {
ShardType shard1;
shard1.setName("shard1");
shard1.setHost("host1:12345");
- shard1.setMaxSizeMB(100);
shard1.setState(ShardType::ShardState::kShardAware);
setupShards(std::vector<ShardType>{shard1});
@@ -186,13 +182,11 @@ TEST_F(RemoveShardTest, RemoveShardStartDraining) {
ShardType shard1;
shard1.setName("shard1");
shard1.setHost("host1:12345");
- shard1.setMaxSizeMB(100);
shard1.setState(ShardType::ShardState::kShardAware);
ShardType shard2;
shard2.setName("shard2");
shard2.setHost("host2:12345");
- shard2.setMaxSizeMB(100);
shard2.setState(ShardType::ShardState::kShardAware);
setupShards(std::vector<ShardType>{shard1, shard2});
@@ -209,13 +203,11 @@ TEST_F(RemoveShardTest, RemoveShardStillDrainingChunksRemaining) {
ShardType shard1;
shard1.setName("shard1");
shard1.setHost("host1:12345");
- shard1.setMaxSizeMB(100);
shard1.setState(ShardType::ShardState::kShardAware);
ShardType shard2;
shard2.setName("shard2");
shard2.setHost("host2:12345");
- shard2.setMaxSizeMB(100);
shard2.setState(ShardType::ShardState::kShardAware);
auto epoch = OID::gen();
@@ -263,13 +255,11 @@ TEST_F(RemoveShardTest, RemoveShardStillDrainingDatabasesRemaining) {
ShardType shard1;
shard1.setName("shard1");
shard1.setHost("host1:12345");
- shard1.setMaxSizeMB(100);
shard1.setState(ShardType::ShardState::kShardAware);
ShardType shard2;
shard2.setName("shard2");
shard2.setHost("host2:12345");
- shard2.setMaxSizeMB(100);
shard2.setState(ShardType::ShardState::kShardAware);
setupShards(std::vector<ShardType>{shard1, shard2});
@@ -296,13 +286,11 @@ TEST_F(RemoveShardTest, RemoveShardCompletion) {
ShardType shard1;
shard1.setName("shard1");
shard1.setHost("host1:12345");
- shard1.setMaxSizeMB(100);
shard1.setState(ShardType::ShardState::kShardAware);
ShardType shard2;
shard2.setName("shard2");
shard2.setHost("host2:12345");
- shard2.setMaxSizeMB(100);
shard2.setState(ShardType::ShardState::kShardAware);
auto epoch = OID::gen();
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
index 34fdefaecd5..355c0b5520d 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
@@ -28,6 +28,7 @@
*/
+#include "mongo/db/ops/write_ops_gen.h"
#include "mongo/platform/basic.h"
#include "mongo/db/s/config/sharding_catalog_manager.h"
@@ -226,8 +227,7 @@ StatusWith<Shard::CommandResponse> ShardingCatalogManager::_runCommandForAddShar
StatusWith<boost::optional<ShardType>> ShardingCatalogManager::_checkIfShardExists(
OperationContext* opCtx,
const ConnectionString& proposedShardConnectionString,
- const std::string* proposedShardName,
- long long proposedShardMaxSize) {
+ const std::string* proposedShardName) {
// Check whether any host in the connection is already part of the cluster.
const auto existingShards = Grid::get(opCtx)->catalogClient()->getAllShards(
opCtx, repl::ReadConcernLevel::kLocalReadConcern);
@@ -259,9 +259,6 @@ StatusWith<boost::optional<ShardType>> ShardingCatalogManager::_checkIfShardExis
proposedShardConnectionString.getSetName() != existingShardConnStr.getSetName()) {
return false;
}
- if (proposedShardMaxSize != existingShard.getMaxSizeMB()) {
- return false;
- }
return true;
};
@@ -576,8 +573,7 @@ StatusWith<std::vector<std::string>> ShardingCatalogManager::_getDBNamesListFrom
StatusWith<std::string> ShardingCatalogManager::addShard(
OperationContext* opCtx,
const std::string* shardProposedName,
- const ConnectionString& shardConnectionString,
- const long long maxSize) {
+ const ConnectionString& shardConnectionString) {
if (!shardConnectionString) {
return {ErrorCodes::BadValue, "Invalid connection string"};
}
@@ -593,8 +589,7 @@ StatusWith<std::string> ShardingCatalogManager::addShard(
// Check if this shard has already been added (can happen in the case of a retry after a network
// error, for example) and thus this addShard request should be considered a no-op.
- auto existingShard =
- _checkIfShardExists(opCtx, shardConnectionString, shardProposedName, maxSize);
+ auto existingShard = _checkIfShardExists(opCtx, shardConnectionString, shardProposedName);
if (!existingShard.isOK()) {
return existingShard.getStatus();
}
@@ -664,10 +659,6 @@ StatusWith<std::string> ShardingCatalogManager::addShard(
shardType.setName(result.getValue());
}
- if (maxSize > 0) {
- shardType.setMaxSizeMB(maxSize);
- }
-
// Helper function that runs a command on the to-be shard and returns the status
auto runCmdOnNewShard = [this, &opCtx, &targeter](const BSONObj& cmd) -> Status {
auto swCommandResponse =
diff --git a/src/mongo/s/catalog/sharding_catalog_client_test.cpp b/src/mongo/s/catalog/sharding_catalog_client_test.cpp
index 5ff6a5d5fff..551feda38b9 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_client_test.cpp
@@ -283,7 +283,6 @@ TEST_F(ShardingCatalogClientTest, GetAllShardsValid) {
s1.setName("shard0000");
s1.setHost("ShardHost");
s1.setDraining(false);
- s1.setMaxSizeMB(50);
s1.setTags({"tag1", "tag2", "tag3"});
ShardType s2;
@@ -293,7 +292,6 @@ TEST_F(ShardingCatalogClientTest, GetAllShardsValid) {
ShardType s3;
s3.setName("shard0002");
s3.setHost("ShardHost");
- s3.setMaxSizeMB(65);
const vector<ShardType> expectedShardsList = {s1, s2, s3};
diff --git a/src/mongo/s/catalog/type_shard.h b/src/mongo/s/catalog/type_shard.h
index cc10535de03..4f75dabad74 100644
--- a/src/mongo/s/catalog/type_shard.h
+++ b/src/mongo/s/catalog/type_shard.h
@@ -135,6 +135,8 @@ private:
boost::optional<std::string> _host;
// (O) is it draining chunks?
boost::optional<bool> _draining;
+
+ // TODO SERVER-68430 remove maxSizeMB field after 7.0 branches out
// (O) maximum allowed disk space in MB
boost::optional<long long> _maxSizeMB;
// (O) shard tags
diff --git a/src/mongo/s/catalog/type_shard_test.cpp b/src/mongo/s/catalog/type_shard_test.cpp
index d2c9ab0326e..05e009ff3d5 100644
--- a/src/mongo/s/catalog/type_shard_test.cpp
+++ b/src/mongo/s/catalog/type_shard_test.cpp
@@ -63,17 +63,7 @@ TEST(ShardType, OnlyMandatory) {
TEST(ShardType, AllOptionalsPresent) {
BSONObj obj = BSON(ShardType::name("shard0000")
- << ShardType::host("localhost:27017") << ShardType::draining(true)
- << ShardType::maxSizeMB(100));
- StatusWith<ShardType> shardRes = ShardType::fromBSON(obj);
- ASSERT(shardRes.isOK());
- ShardType shard = shardRes.getValue();
- ASSERT(shard.validate().isOK());
-}
-
-TEST(ShardType, MaxSizeAsFloat) {
- BSONObj obj = BSON(ShardType::name("shard0000")
- << ShardType::host("localhost:27017") << ShardType::maxSizeMB() << 100.0);
+ << ShardType::host("localhost:27017") << ShardType::draining(true));
StatusWith<ShardType> shardRes = ShardType::fromBSON(obj);
ASSERT(shardRes.isOK());
ShardType shard = shardRes.getValue();
diff --git a/src/mongo/s/commands/cluster_coll_stats_cmd.cpp b/src/mongo/s/commands/cluster_coll_stats_cmd.cpp
index 25b3e15b578..db4ac80eb26 100644
--- a/src/mongo/s/commands/cluster_coll_stats_cmd.cpp
+++ b/src/mongo/s/commands/cluster_coll_stats_cmd.cpp
@@ -294,9 +294,6 @@ public:
const auto shardAvgObjSize = e.numberLong();
uassert(5688300, "'avgObjSize' provided but not 'count'", !countField.eoo());
unscaledCollSize += shardAvgObjSize * shardObjCount;
- } else if (fieldName == "maxSize") {
- const auto shardMaxSize = e.numberLong();
- maxSize = std::max(maxSize, shardMaxSize);
} else if (fieldName == "indexSizes") {
BSONObjIterator k(e.Obj());
while (k.more()) {
diff --git a/src/mongo/s/request_types/add_shard_request_test.cpp b/src/mongo/s/request_types/add_shard_request_test.cpp
index 8b28a1921b5..58b1a1f4974 100644
--- a/src/mongo/s/request_types/add_shard_request_test.cpp
+++ b/src/mongo/s/request_types/add_shard_request_test.cpp
@@ -40,7 +40,6 @@ namespace {
const char kConnString[] = "setname/localhost:27017,localhost:27018,localhost:27019";
const char kConnStringNonLocalHost[] = "setname/host1:27017,host2:27017,host3:27017";
const char kShardName[] = "shardName";
-const long long kMaxSizeMB = 10;
// Test parsing the internal fields from a command BSONObj. The internal fields (besides the
// top-level command name) are identical between the external mongos version and internal config
@@ -64,63 +63,26 @@ TEST(AddShardRequest, ParseInternalFieldsInvalidConnectionString) {
}
}
-TEST(AddShardRequest, ParseInternalFieldsMissingMaxSize) {
- {
- BSONObj obj = BSON(AddShardRequest::mongosAddShard
- << kConnString << AddShardRequest::shardName << kShardName);
-
- auto swAddShardRequest = AddShardRequest::parseFromMongosCommand(obj);
- ASSERT_OK(swAddShardRequest.getStatus());
-
- auto req = swAddShardRequest.getValue();
- ASSERT_EQ(req.getConnString().toString(), kConnString);
- ASSERT_TRUE(req.hasName());
- ASSERT_EQ(req.getName(), kShardName);
- ASSERT_FALSE(req.hasMaxSize());
- }
-
- {
- BSONObj obj = BSON(AddShardRequest::configsvrAddShard
- << kConnString << AddShardRequest::shardName << kShardName);
-
-
- auto swAddShardRequest = AddShardRequest::parseFromConfigCommand(obj);
- ASSERT_OK(swAddShardRequest.getStatus());
-
- auto req = swAddShardRequest.getValue();
- ASSERT_EQ(req.getConnString().toString(), kConnString);
- ASSERT_TRUE(req.hasName());
- ASSERT_EQ(req.getName(), kShardName);
- ASSERT_FALSE(req.hasMaxSize());
- }
-}
-
TEST(AddShardRequest, ParseInternalFieldsMissingName) {
{
- BSONObj obj = BSON(AddShardRequest::mongosAddShard
- << kConnString << AddShardRequest::maxSizeMB << kMaxSizeMB);
+ BSONObj obj = BSON(AddShardRequest::mongosAddShard << kConnString);
auto swAddShardRequest = AddShardRequest::parseFromMongosCommand(obj);
ASSERT_OK(swAddShardRequest.getStatus());
auto req = swAddShardRequest.getValue();
ASSERT_EQ(req.getConnString().toString(), kConnString);
- ASSERT_TRUE(req.hasMaxSize());
- ASSERT_EQ(req.getMaxSize(), kMaxSizeMB);
ASSERT_FALSE(req.hasName());
}
{
- BSONObj obj = BSON(AddShardRequest::configsvrAddShard
- << kConnString << AddShardRequest::maxSizeMB << kMaxSizeMB);
+ BSONObj obj = BSON(AddShardRequest::configsvrAddShard << kConnString);
auto swAddShardRequest = AddShardRequest::parseFromConfigCommand(obj);
ASSERT_OK(swAddShardRequest.getStatus());
auto req = swAddShardRequest.getValue();
ASSERT_EQ(req.getConnString().toString(), kConnString);
- ASSERT_TRUE(req.hasMaxSize());
- ASSERT_EQ(req.getMaxSize(), kMaxSizeMB);
ASSERT_FALSE(req.hasName());
}
}
@@ -128,32 +90,26 @@ TEST(AddShardRequest, ParseInternalFieldsMissingName) {
TEST(AddShardRequest, ParseInternalFieldsAllFieldsPresent) {
{
BSONObj obj = BSON(AddShardRequest::mongosAddShard
- << kConnString << AddShardRequest::shardName << kShardName
- << AddShardRequest::maxSizeMB << kMaxSizeMB);
+ << kConnString << AddShardRequest::shardName << kShardName);
auto swAddShardRequest = AddShardRequest::parseFromMongosCommand(obj);
ASSERT_OK(swAddShardRequest.getStatus());
auto req = swAddShardRequest.getValue();
ASSERT_EQ(req.getConnString().toString(), kConnString);
- ASSERT_TRUE(req.hasMaxSize());
- ASSERT_EQ(req.getMaxSize(), kMaxSizeMB);
ASSERT_TRUE(req.hasName());
ASSERT_EQ(req.getName(), kShardName);
}
{
BSONObj obj = BSON(AddShardRequest::configsvrAddShard
- << kConnString << AddShardRequest::shardName << kShardName
- << AddShardRequest::maxSizeMB << kMaxSizeMB);
+ << kConnString << AddShardRequest::shardName << kShardName);
auto swAddShardRequest = AddShardRequest::parseFromConfigCommand(obj);
ASSERT_OK(swAddShardRequest.getStatus());
auto req = swAddShardRequest.getValue();
ASSERT_EQ(req.getConnString().toString(), kConnString);
- ASSERT_TRUE(req.hasMaxSize());
- ASSERT_EQ(req.getMaxSize(), kMaxSizeMB);
ASSERT_TRUE(req.hasName());
ASSERT_EQ(req.getName(), kShardName);
}
@@ -163,8 +119,7 @@ TEST(AddShardRequest, ParseInternalFieldsAllFieldsPresent) {
TEST(AddShardRequest, ToCommandForConfig) {
BSONObj mongosCmdObj = BSON(AddShardRequest::mongosAddShard
- << kConnString << AddShardRequest::shardName << kShardName
- << AddShardRequest::maxSizeMB << kMaxSizeMB);
+ << kConnString << AddShardRequest::shardName << kShardName);
auto swAddShardRequest = AddShardRequest::parseFromMongosCommand(mongosCmdObj);
ASSERT_OK(swAddShardRequest.getStatus());
@@ -173,12 +128,10 @@ TEST(AddShardRequest, ToCommandForConfig) {
auto configCmdObj = req.toCommandForConfig();
ASSERT_EQ(configCmdObj[AddShardRequest::configsvrAddShard.name()].String(), kConnString);
ASSERT_EQ(configCmdObj[AddShardRequest::shardName.name()].String(), kShardName);
- ASSERT_EQ(configCmdObj[AddShardRequest::maxSizeMB.name()].Long(), kMaxSizeMB);
}
TEST(AddShardRequest, ToCommandForConfigMissingName) {
- BSONObj mongosCmdObj = BSON(AddShardRequest::mongosAddShard
- << kConnString << AddShardRequest::maxSizeMB << kMaxSizeMB);
+ BSONObj mongosCmdObj = BSON(AddShardRequest::mongosAddShard << kConnString);
auto swAddShardRequest = AddShardRequest::parseFromMongosCommand(mongosCmdObj);
ASSERT_OK(swAddShardRequest.getStatus());
@@ -186,7 +139,6 @@ TEST(AddShardRequest, ToCommandForConfigMissingName) {
auto configCmdObj = req.toCommandForConfig();
ASSERT_EQ(configCmdObj[AddShardRequest::configsvrAddShard.name()].String(), kConnString);
- ASSERT_EQ(configCmdObj[AddShardRequest::maxSizeMB.name()].Long(), kMaxSizeMB);
ASSERT_FALSE(configCmdObj.hasField(AddShardRequest::shardName.name()));
}
@@ -201,7 +153,6 @@ TEST(AddShardRequest, ToCommandForConfigMissingMaxSize) {
auto configCmdObj = req.toCommandForConfig();
ASSERT_EQ(configCmdObj[AddShardRequest::configsvrAddShard.name()].String(), kConnString);
ASSERT_EQ(configCmdObj[AddShardRequest::shardName.name()].String(), kShardName);
- ASSERT_FALSE(configCmdObj.hasField(AddShardRequest::maxSizeMB.name()));
}
// Test validating an AddShardRequest that was successfully parsed.
diff --git a/src/mongo/s/request_types/add_shard_request_type.cpp b/src/mongo/s/request_types/add_shard_request_type.cpp
index fbb77b17f66..23c918f9883 100644
--- a/src/mongo/s/request_types/add_shard_request_type.cpp
+++ b/src/mongo/s/request_types/add_shard_request_type.cpp
@@ -107,15 +107,10 @@ StatusWith<AddShardRequest> AddShardRequest::parseInternalFields(const BSONObj&
return status;
}
}
- {
- long long requestMaxSizeMB;
- Status status = bsonExtractIntegerField(obj, maxSizeMB.name(), &requestMaxSizeMB);
- if (status.isOK()) {
- request._maxSizeMB = std::move(requestMaxSizeMB);
- } else if (status != ErrorCodes::NoSuchKey) {
- return status;
- }
- }
+
+ uassert(ErrorCodes::InvalidOptions,
+ "addShard no longer supports maxSize field",
+ !obj.hasField(maxSizeMB.name()));
return request;
}
@@ -123,9 +118,6 @@ StatusWith<AddShardRequest> AddShardRequest::parseInternalFields(const BSONObj&
BSONObj AddShardRequest::toCommandForConfig() {
BSONObjBuilder cmdBuilder;
cmdBuilder.append(configsvrAddShard.name(), _connString.toString());
- if (hasMaxSize()) {
- cmdBuilder.append(maxSizeMB.name(), *_maxSizeMB);
- }
if (hasName()) {
cmdBuilder.append(shardName.name(), *_name);
}
@@ -154,8 +146,6 @@ string AddShardRequest::toString() const {
ss << "AddShardRequest shard: " << _connString.toString();
if (hasName())
ss << ", name: " << *_name;
- if (hasMaxSize())
- ss << ", maxSize: " << *_maxSizeMB;
return ss;
}
diff --git a/src/mongo/s/request_types/add_shard_request_type.h b/src/mongo/s/request_types/add_shard_request_type.h
index 7d9cb4fa8ab..a4491dc8f52 100644
--- a/src/mongo/s/request_types/add_shard_request_type.h
+++ b/src/mongo/s/request_types/add_shard_request_type.h
@@ -93,15 +93,6 @@ public:
return *_name;
}
- bool hasMaxSize() const {
- return _maxSizeMB.is_initialized();
- }
-
- long long getMaxSize() const {
- invariant(_maxSizeMB.is_initialized());
- return *_maxSizeMB;
- }
-
private:
explicit AddShardRequest(ConnectionString connString);
@@ -117,9 +108,6 @@ private:
// A name for the shard. If not specified, a unique name is automatically generated.
boost::optional<std::string> _name;
-
- // The maximum size in megabytes of the shard. If set to 0, the size is not limited.
- boost::optional<long long> _maxSizeMB;
};
} // namespace mongo