summaryrefslogtreecommitdiff
path: root/src/mongo/db/s/config
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/db/s/config')
-rw-r--r--src/mongo/db/s/config/configsvr_enable_sharding_command.cpp2
-rw-r--r--src/mongo/db/s/config/configsvr_move_primary_command.cpp7
-rw-r--r--src/mongo/db/s/config/configsvr_refine_collection_shard_key_command.cpp3
-rw-r--r--src/mongo/db/s/config/configsvr_remove_shard_command.cpp4
-rw-r--r--src/mongo/db/s/config/configsvr_shard_collection_command.cpp25
-rw-r--r--src/mongo/db/s/config/initial_split_policy.cpp12
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager.cpp3
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp122
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp55
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp11
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp8
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_enable_sharding_test.cpp16
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp50
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp8
14 files changed, 114 insertions, 212 deletions
diff --git a/src/mongo/db/s/config/configsvr_enable_sharding_command.cpp b/src/mongo/db/s/config/configsvr_enable_sharding_command.cpp
index b1c3717f3ff..e9ca1356b62 100644
--- a/src/mongo/db/s/config/configsvr_enable_sharding_command.cpp
+++ b/src/mongo/db/s/config/configsvr_enable_sharding_command.cpp
@@ -50,8 +50,8 @@
namespace mongo {
-using std::shared_ptr;
using std::set;
+using std::shared_ptr;
using std::string;
namespace {
diff --git a/src/mongo/db/s/config/configsvr_move_primary_command.cpp b/src/mongo/db/s/config/configsvr_move_primary_command.cpp
index eea3b876e46..fe5c843303e 100644
--- a/src/mongo/db/s/config/configsvr_move_primary_command.cpp
+++ b/src/mongo/db/s/config/configsvr_move_primary_command.cpp
@@ -160,10 +160,9 @@ public:
if (!toShardStatus.isOK()) {
log() << "Could not move database '" << dbname << "' to shard '" << to
<< causedBy(toShardStatus.getStatus());
- uassertStatusOKWithContext(
- toShardStatus.getStatus(),
- str::stream() << "Could not move database '" << dbname << "' to shard '" << to
- << "'");
+ uassertStatusOKWithContext(toShardStatus.getStatus(),
+ str::stream() << "Could not move database '" << dbname
+ << "' to shard '" << to << "'");
}
return toShardStatus.getValue();
diff --git a/src/mongo/db/s/config/configsvr_refine_collection_shard_key_command.cpp b/src/mongo/db/s/config/configsvr_refine_collection_shard_key_command.cpp
index 21901105103..ff1334ef1ed 100644
--- a/src/mongo/db/s/config/configsvr_refine_collection_shard_key_command.cpp
+++ b/src/mongo/db/s/config/configsvr_refine_collection_shard_key_command.cpp
@@ -96,8 +96,7 @@ public:
uassert(ErrorCodes::StaleEpoch,
str::stream()
- << "refineCollectionShardKey namespace "
- << nss.toString()
+ << "refineCollectionShardKey namespace " << nss.toString()
<< " has a different epoch than mongos had in its routing table cache",
request().getEpoch() == collType.getEpoch());
diff --git a/src/mongo/db/s/config/configsvr_remove_shard_command.cpp b/src/mongo/db/s/config/configsvr_remove_shard_command.cpp
index 2f39f852bc8..5186128ef8c 100644
--- a/src/mongo/db/s/config/configsvr_remove_shard_command.cpp
+++ b/src/mongo/db/s/config/configsvr_remove_shard_command.cpp
@@ -112,8 +112,8 @@ public:
const auto shardStatus =
Grid::get(opCtx)->shardRegistry()->getShard(opCtx, ShardId(target));
if (!shardStatus.isOK()) {
- std::string msg(str::stream() << "Could not drop shard '" << target
- << "' because it does not exist");
+ std::string msg(str::stream()
+ << "Could not drop shard '" << target << "' because it does not exist");
log() << msg;
uasserted(ErrorCodes::ShardNotFound, msg);
}
diff --git a/src/mongo/db/s/config/configsvr_shard_collection_command.cpp b/src/mongo/db/s/config/configsvr_shard_collection_command.cpp
index e53552916d8..216d3bbaa2c 100644
--- a/src/mongo/db/s/config/configsvr_shard_collection_command.cpp
+++ b/src/mongo/db/s/config/configsvr_shard_collection_command.cpp
@@ -99,8 +99,7 @@ void validateAndDeduceFullRequestOptions(OperationContext* opCtx,
CollatorFactoryInterface::get(opCtx->getServiceContext())->makeFromBSON(collation));
uassert(ErrorCodes::BadValue,
str::stream() << "The collation for shardCollection must be {locale: 'simple'}, "
- << "but found: "
- << collation,
+ << "but found: " << collation,
!collator);
simpleCollationSpecified = true;
}
@@ -114,8 +113,7 @@ void validateAndDeduceFullRequestOptions(OperationContext* opCtx,
int numChunks = request->getNumInitialChunks();
uassert(ErrorCodes::InvalidOptions,
str::stream() << "numInitialChunks cannot be more than either: "
- << maxNumInitialChunksForShards
- << ", 8192 * number of shards; or "
+ << maxNumInitialChunksForShards << ", 8192 * number of shards; or "
<< maxNumInitialChunksTotal,
numChunks >= 0 && numChunks <= maxNumInitialChunksForShards &&
numChunks <= maxNumInitialChunksTotal);
@@ -208,9 +206,7 @@ void migrateAndFurtherSplitInitialChunks(OperationContext* opCtx,
auto chunkManager = routingInfo.cm();
// Move and commit each "big chunk" to a different shard.
- auto nextShardId = [&, indx = 0 ]() mutable {
- return shardIds[indx++ % shardIds.size()];
- };
+ auto nextShardId = [&, indx = 0]() mutable { return shardIds[indx++ % shardIds.size()]; };
for (auto chunk : chunkManager->chunks()) {
const auto shardId = nextShardId();
@@ -323,10 +319,7 @@ boost::optional<UUID> getUUIDFromPrimaryShard(OperationContext* opCtx,
uassert(ErrorCodes::InternalError,
str::stream() << "expected the primary shard host " << primaryShard->getConnString()
- << " for database "
- << nss.db()
- << " to return an entry for "
- << nss.ns()
+ << " for database " << nss.db() << " to return an entry for " << nss.ns()
<< " in its listCollections response, but it did not",
!res.isEmpty());
@@ -338,15 +331,12 @@ boost::optional<UUID> getUUIDFromPrimaryShard(OperationContext* opCtx,
uassert(ErrorCodes::InternalError,
str::stream() << "expected primary shard to return 'info' field as part of "
"listCollections for "
- << nss.ns()
- << ", but got "
- << res,
+ << nss.ns() << ", but got " << res,
!collectionInfo.isEmpty());
uassert(ErrorCodes::InternalError,
str::stream() << "expected primary shard to return a UUID for collection " << nss.ns()
- << " as part of 'info' field but got "
- << res,
+ << " as part of 'info' field but got " << res,
collectionInfo.hasField("uuid"));
return uassertStatusOK(UUID::parse(collectionInfo["uuid"]));
@@ -576,8 +566,7 @@ public:
if (fromMapReduce) {
uassert(ErrorCodes::ConflictingOperationInProgress,
str::stream() << "Map reduce with sharded output to a new collection found "
- << nss.ns()
- << " to be non-empty which is not supported.",
+ << nss.ns() << " to be non-empty which is not supported.",
isEmpty);
}
diff --git a/src/mongo/db/s/config/initial_split_policy.cpp b/src/mongo/db/s/config/initial_split_policy.cpp
index 71931babb73..9d882e45678 100644
--- a/src/mongo/db/s/config/initial_split_policy.cpp
+++ b/src/mongo/db/s/config/initial_split_policy.cpp
@@ -223,7 +223,7 @@ InitialSplitPolicy::generateShardCollectionInitialZonedChunks(
const auto& keyPattern = shardKeyPattern.getKeyPattern();
- auto nextShardIdForHole = [&, indx = 0 ]() mutable {
+ auto nextShardIdForHole = [&, indx = 0]() mutable {
return shardIdsForGaps[indx++ % shardIdsForGaps.size()];
};
@@ -250,10 +250,7 @@ InitialSplitPolicy::generateShardCollectionInitialZonedChunks(
const auto& shardIdsForChunk = it->second;
uassert(50973,
str::stream()
- << "Cannot shard collection "
- << nss.ns()
- << " due to zone "
- << tag.getTag()
+ << "Cannot shard collection " << nss.ns() << " due to zone " << tag.getTag()
<< " which is not assigned to a shard. Please assign this zone to a shard.",
!shardIdsForChunk.empty());
@@ -396,7 +393,7 @@ InitialSplitPolicy::ShardCollectionConfig InitialSplitPolicy::createFirstChunksU
shardSelectedSplitPoints,
shardIds,
1 // numContiguousChunksPerShard
- );
+ );
}
boost::optional<CollectionType> InitialSplitPolicy::checkIfCollectionAlreadyShardedWithSameOptions(
@@ -425,8 +422,7 @@ boost::optional<CollectionType> InitialSplitPolicy::checkIfCollectionAlreadyShar
// match the options the collection was originally sharded with.
uassert(ErrorCodes::AlreadyInitialized,
str::stream() << "sharding already enabled for collection " << nss.ns()
- << " with options "
- << existingOptions.toString(),
+ << " with options " << existingOptions.toString(),
requestedOptions.hasSameOptions(existingOptions));
return existingOptions;
diff --git a/src/mongo/db/s/config/sharding_catalog_manager.cpp b/src/mongo/db/s/config/sharding_catalog_manager.cpp
index fc610ed35a3..424db73a9d0 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager.cpp
@@ -198,8 +198,7 @@ Status ShardingCatalogManager::_initConfigVersion(OperationContext* opCtx) {
if (versionInfo.getCurrentVersion() < CURRENT_CONFIG_VERSION) {
return {ErrorCodes::IncompatibleShardingConfigVersion,
str::stream() << "need to upgrade current cluster version to v"
- << CURRENT_CONFIG_VERSION
- << "; currently at v"
+ << CURRENT_CONFIG_VERSION << "; currently at v"
<< versionInfo.getCurrentVersion()};
}
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp
index eee16cc6aa5..e92588cbe07 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp
@@ -129,8 +129,9 @@ protected:
ASSERT_EQ(request.target, target);
ASSERT_EQ(request.dbname, nss.db());
ASSERT_BSONOBJ_EQ(request.cmdObj,
- BSON("drop" << nss.coll() << "writeConcern" << BSON("w"
- << "majority")));
+ BSON("drop" << nss.coll() << "writeConcern"
+ << BSON("w"
+ << "majority")));
ASSERT_BSONOBJ_EQ(rpc::makeEmptyMetadata(), request.metadata);
return BSON("ok" << 1);
@@ -146,8 +147,7 @@ protected:
ASSERT_BSONOBJ_EQ(request.cmdObj,
BSON("setFeatureCompatibilityVersion"
<< "4.2"
- << "writeConcern"
- << writeConcern));
+ << "writeConcern" << writeConcern));
return response;
});
@@ -315,18 +315,16 @@ protected:
* describing the addShard request for 'addedShard'.
*/
void assertChangeWasLogged(const ShardType& addedShard) {
- auto response = assertGet(
- getConfigShard()->exhaustiveFindOnConfig(operationContext(),
- ReadPreferenceSetting{
- ReadPreference::PrimaryOnly},
- repl::ReadConcernLevel::kLocalReadConcern,
- NamespaceString("config.changelog"),
- BSON("what"
- << "addShard"
- << "details.name"
- << addedShard.getName()),
- BSONObj(),
- 1));
+ auto response = assertGet(getConfigShard()->exhaustiveFindOnConfig(
+ operationContext(),
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ repl::ReadConcernLevel::kLocalReadConcern,
+ NamespaceString("config.changelog"),
+ BSON("what"
+ << "addShard"
+ << "details.name" << addedShard.getName()),
+ BSONObj(),
+ 1));
ASSERT_EQ(1U, response.docs.size());
auto logEntryBSON = response.docs.front();
auto logEntry = assertGet(ChangeLogType::fromBSON(logEntryBSON));
@@ -347,35 +345,24 @@ protected:
TEST_F(AddShardTest, CreateShardIdentityUpsertForAddShard) {
std::string shardName = "shardName";
- BSONObj expectedBSON = BSON("update"
- << "system.version"
- << "bypassDocumentValidation"
- << false
- << "ordered"
- << true
- << "updates"
- << BSON_ARRAY(BSON(
- "q"
- << BSON("_id"
- << "shardIdentity")
- << "u"
- << BSON("shardName" << shardName << "clusterId" << _clusterId
- << "configsvrConnectionString"
- << replicationCoordinator()
- ->getConfig()
- .getConnectionString()
- .toString())
- << "multi"
- << false
- << "upsert"
- << true))
- << "writeConcern"
- << BSON("w"
- << "majority"
- << "wtimeout"
- << 60000)
- << "allowImplicitCollectionCreation"
- << true);
+ BSONObj expectedBSON = BSON(
+ "update"
+ << "system.version"
+ << "bypassDocumentValidation" << false << "ordered" << true << "updates"
+ << BSON_ARRAY(BSON(
+ "q" << BSON("_id"
+ << "shardIdentity")
+ << "u"
+ << BSON(
+ "shardName"
+ << shardName << "clusterId" << _clusterId << "configsvrConnectionString"
+ << replicationCoordinator()->getConfig().getConnectionString().toString())
+ << "multi" << false << "upsert" << true))
+ << "writeConcern"
+ << BSON("w"
+ << "majority"
+ << "wtimeout" << 60000)
+ << "allowImplicitCollectionCreation" << true);
auto addShardCmd = add_shard_util::createAddShardCmd(operationContext(), shardName);
auto actualBSON = add_shard_util::createShardIdentityUpsertForAddShard(addShardCmd);
ASSERT_BSONOBJ_EQ(expectedBSON, actualBSON);
@@ -427,8 +414,7 @@ TEST_F(AddShardTest, StandaloneBasicSuccess) {
shardTarget,
std::vector<BSONObj>{BSON("name"
<< "local"
- << "sizeOnDisk"
- << 1000),
+ << "sizeOnDisk" << 1000),
BSON("name" << discoveredDB1.getName() << "sizeOnDisk" << 2000),
BSON("name" << discoveredDB2.getName() << "sizeOnDisk" << 5000)});
@@ -508,8 +494,7 @@ TEST_F(AddShardTest, StandaloneGenerateName) {
shardTarget,
std::vector<BSONObj>{BSON("name"
<< "local"
- << "sizeOnDisk"
- << 1000),
+ << "sizeOnDisk" << 1000),
BSON("name" << discoveredDB1.getName() << "sizeOnDisk" << 2000),
BSON("name" << discoveredDB2.getName() << "sizeOnDisk" << 5000)});
@@ -648,8 +633,7 @@ TEST_F(AddShardTest, AddReplicaSetShardAsStandalone) {
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "myOtherSet"
- << "maxWireVersion"
- << WireVersion::LATEST_WIRE_VERSION);
+ << "maxWireVersion" << WireVersion::LATEST_WIRE_VERSION);
expectIsMaster(shardTarget, commandResponse);
future.timed_get(kLongFutureTimeout);
@@ -706,8 +690,7 @@ TEST_F(AddShardTest, ReplicaSetMistmatchedReplicaSetName) {
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "myOtherSet"
- << "maxWireVersion"
- << WireVersion::LATEST_WIRE_VERSION);
+ << "maxWireVersion" << WireVersion::LATEST_WIRE_VERSION);
expectIsMaster(shardTarget, commandResponse);
future.timed_get(kLongFutureTimeout);
@@ -735,12 +718,10 @@ TEST_F(AddShardTest, ShardIsCSRSConfigServer) {
"as a shard since it is a config server");
});
- BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
- << "config"
- << "configsvr"
- << true
- << "maxWireVersion"
- << WireVersion::LATEST_WIRE_VERSION);
+ BSONObj commandResponse =
+ BSON("ok" << 1 << "ismaster" << true << "setName"
+ << "config"
+ << "configsvr" << true << "maxWireVersion" << WireVersion::LATEST_WIRE_VERSION);
expectIsMaster(shardTarget, commandResponse);
future.timed_get(kLongFutureTimeout);
@@ -772,9 +753,7 @@ TEST_F(AddShardTest, ReplicaSetMissingHostsProvidedInSeedList) {
hosts.append("host1:12345");
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "mySet"
- << "hosts"
- << hosts.arr()
- << "maxWireVersion"
+ << "hosts" << hosts.arr() << "maxWireVersion"
<< WireVersion::LATEST_WIRE_VERSION);
expectIsMaster(shardTarget, commandResponse);
@@ -808,9 +787,7 @@ TEST_F(AddShardTest, AddShardWithNameConfigFails) {
hosts.append("host2:12345");
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "mySet"
- << "hosts"
- << hosts.arr()
- << "maxWireVersion"
+ << "hosts" << hosts.arr() << "maxWireVersion"
<< WireVersion::LATEST_WIRE_VERSION);
expectIsMaster(shardTarget, commandResponse);
@@ -855,9 +832,7 @@ TEST_F(AddShardTest, ShardContainsExistingDatabase) {
hosts.append("host2:12345");
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "mySet"
- << "hosts"
- << hosts.arr()
- << "maxWireVersion"
+ << "hosts" << hosts.arr() << "maxWireVersion"
<< WireVersion::LATEST_WIRE_VERSION);
expectIsMaster(shardTarget, commandResponse);
@@ -900,9 +875,7 @@ TEST_F(AddShardTest, SuccessfullyAddReplicaSet) {
hosts.append("host2:12345");
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "mySet"
- << "hosts"
- << hosts.arr()
- << "maxWireVersion"
+ << "hosts" << hosts.arr() << "maxWireVersion"
<< WireVersion::LATEST_WIRE_VERSION);
expectIsMaster(shardTarget, commandResponse);
@@ -966,9 +939,7 @@ TEST_F(AddShardTest, ReplicaSetExtraHostsDiscovered) {
hosts.append("host2:12345");
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "mySet"
- << "hosts"
- << hosts.arr()
- << "maxWireVersion"
+ << "hosts" << hosts.arr() << "maxWireVersion"
<< WireVersion::LATEST_WIRE_VERSION);
expectIsMaster(shardTarget, commandResponse);
@@ -1049,8 +1020,7 @@ TEST_F(AddShardTest, AddShardSucceedsEvenIfAddingDBsFromNewShardFails) {
shardTarget,
std::vector<BSONObj>{BSON("name"
<< "local"
- << "sizeOnDisk"
- << 1000),
+ << "sizeOnDisk" << 1000),
BSON("name" << discoveredDB1.getName() << "sizeOnDisk" << 2000),
BSON("name" << discoveredDB2.getName() << "sizeOnDisk" << 5000)});
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
index 0936c9fbb55..4423f7ba458 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
@@ -129,8 +129,7 @@ BSONArray buildMergeChunksTransactionPrecond(const std::vector<ChunkType>& chunk
BSON("query" << BSON(ChunkType::ns(chunk.getNS().ns())
<< ChunkType::min(chunk.getMin())
<< ChunkType::max(chunk.getMax()))
- << "orderby"
- << BSON(ChunkType::lastmod() << -1)));
+ << "orderby" << BSON(ChunkType::lastmod() << -1)));
b.append("res",
BSON(ChunkType::epoch(collVersion.epoch())
<< ChunkType::shard(chunk.getShard().toString())));
@@ -146,8 +145,7 @@ Status checkChunkIsOnShard(OperationContext* opCtx,
const ShardId& shard) {
BSONObj chunkQuery =
BSON(ChunkType::ns() << nss.ns() << ChunkType::min() << min << ChunkType::max() << max
- << ChunkType::shard()
- << shard);
+ << ChunkType::shard() << shard);
// Must use local read concern because we're going to perform subsequent writes.
auto findResponseWith =
@@ -166,8 +164,7 @@ Status checkChunkIsOnShard(OperationContext* opCtx,
if (findResponseWith.getValue().docs.empty()) {
return {ErrorCodes::Error(40165),
str::stream()
- << "Could not find the chunk ("
- << chunkQuery.toString()
+ << "Could not find the chunk (" << chunkQuery.toString()
<< ") on the shard. Cannot execute the migration commit with invalid chunks."};
}
@@ -321,13 +318,9 @@ Status ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx,
if (collVersion.epoch() != requestEpoch) {
return {ErrorCodes::StaleEpoch,
str::stream() << "splitChunk cannot split chunk " << range.toString()
- << ". Collection '"
- << nss.ns()
- << "' was dropped and re-created."
- << " Current epoch: "
- << collVersion.epoch()
- << ", cmd epoch: "
- << requestEpoch};
+ << ". Collection '" << nss.ns() << "' was dropped and re-created."
+ << " Current epoch: " << collVersion.epoch()
+ << ", cmd epoch: " << requestEpoch};
}
// Get the shard version (max chunk version) for the shard requesting the split.
@@ -387,18 +380,14 @@ Status ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx,
return {
ErrorCodes::InvalidOptions,
str::stream() << "Split keys must be specified in strictly increasing order. Key "
- << endKey
- << " was specified after "
- << startKey
- << "."};
+ << endKey << " was specified after " << startKey << "."};
}
// Verify that splitPoints are not repeated
if (endKey.woCompare(startKey) == 0) {
return {ErrorCodes::InvalidOptions,
str::stream() << "Split on lower bound of chunk "
- << ChunkRange(startKey, endKey).toString()
- << "is not allowed"};
+ << ChunkRange(startKey, endKey).toString() << "is not allowed"};
}
// verify that splits don't create too-big shard keys
@@ -468,10 +457,8 @@ Status ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx,
b.append("ns", ChunkType::ConfigNS.ns());
b.append("q",
BSON("query" << BSON(ChunkType::ns(nss.ns()) << ChunkType::min() << range.getMin()
- << ChunkType::max()
- << range.getMax())
- << "orderby"
- << BSON(ChunkType::lastmod() << -1)));
+ << ChunkType::max() << range.getMax())
+ << "orderby" << BSON(ChunkType::lastmod() << -1)));
{
BSONObjBuilder bb(b.subobjStart("res"));
bb.append(ChunkType::epoch(), requestEpoch);
@@ -598,10 +585,7 @@ Status ShardingCatalogManager::commitChunkMerge(OperationContext* opCtx,
ErrorCodes::InvalidOptions,
str::stream()
<< "Chunk boundaries must be specified in strictly increasing order. Boundary "
- << chunkBoundaries[i]
- << " was specified after "
- << itChunk.getMin()
- << "."};
+ << chunkBoundaries[i] << " was specified after " << itChunk.getMin() << "."};
}
itChunk.setMax(chunkBoundaries[i]);
@@ -714,11 +698,9 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration(
<< "' has been dropped and recreated since the migration began."
" The config server's collection version epoch is now '"
<< currentCollectionVersion.epoch().toString()
- << "', but the shard's is "
- << collectionEpoch.toString()
+ << "', but the shard's is " << collectionEpoch.toString()
<< "'. Aborting migration commit for chunk ("
- << migratedChunk.getRange().toString()
- << ")."};
+ << migratedChunk.getRange().toString() << ")."};
}
// Check that migratedChunk is where it should be, on fromShard.
@@ -762,8 +744,7 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration(
if (!newHistory.empty() && newHistory.front().getValidAfter() >= validAfter.get()) {
return {ErrorCodes::IncompatibleShardingMetadata,
str::stream() << "The chunk history for chunk with namespace " << nss.ns()
- << " and min key "
- << migratedChunk.getMin()
+ << " and min key " << migratedChunk.getMin()
<< " is corrupted. The last validAfter "
<< newHistory.back().getValidAfter().toString()
<< " is greater or equal to the new validAfter "
@@ -837,9 +818,7 @@ StatusWith<ChunkType> ShardingCatalogManager::_findChunkOnConfig(OperationContex
if (origChunks.size() != 1) {
return {ErrorCodes::IncompatibleShardingMetadata,
str::stream() << "Tried to find the chunk for namespace " << nss.ns()
- << " and min key "
- << key.toString()
- << ", but found no chunks"};
+ << " and min key " << key.toString() << ", but found no chunks"};
}
return ChunkType::fromConfigBSON(origChunks.front());
@@ -886,9 +865,7 @@ StatusWith<ChunkVersion> ShardingCatalogManager::_findCollectionVersion(
<< "' has been dropped and recreated since the migration began."
" The config server's collection version epoch is now '"
<< currentCollectionVersion.epoch().toString()
- << "', but the shard's is "
- << collectionEpoch.toString()
- << "'."};
+ << "', but the shard's is " << collectionEpoch.toString() << "'."};
}
return currentCollectionVersion;
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
index 5993661a884..2192eaa4599 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
@@ -75,9 +75,9 @@
namespace mongo {
using CollectionUUID = UUID;
+using std::set;
using std::string;
using std::vector;
-using std::set;
namespace {
@@ -113,8 +113,8 @@ boost::optional<UUID> checkCollectionOptions(OperationContext* opCtx,
// TODO: SERVER-33048 check idIndex field
uassert(ErrorCodes::NamespaceExists,
- str::stream() << "ns: " << ns.ns() << " already exists with different options: "
- << actualOptions.toBSON(),
+ str::stream() << "ns: " << ns.ns()
+ << " already exists with different options: " << actualOptions.toBSON(),
options.matchesStorageOptions(
actualOptions, CollatorFactoryInterface::get(opCtx->getServiceContext())));
@@ -170,8 +170,7 @@ void checkForExistingChunks(OperationContext* opCtx, const NamespaceString& nss)
str::stream() << "A previous attempt to shard collection " << nss.ns()
<< " failed after writing some initial chunks to config.chunks. Please "
"manually delete the partially written chunks for collection "
- << nss.ns()
- << " from config.chunks",
+ << nss.ns() << " from config.chunks",
numChunks == 0);
}
@@ -432,7 +431,7 @@ void ShardingCatalogManager::shardCollection(OperationContext* opCtx,
optimizationType,
treatAsEmpty,
1 // numContiguousChunksPerShard
- );
+ );
} else {
initialChunks = InitialSplitPolicy::createFirstChunksUnoptimized(
opCtx, nss, fieldsAndOrder, dbPrimaryShardId);
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp
index 3a408ea6090..11091ef8957 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp
@@ -91,10 +91,7 @@ DatabaseType ShardingCatalogManager::createDatabase(OperationContext* opCtx,
uassert(ErrorCodes::DatabaseDifferCase,
str::stream() << "can't have 2 databases that just differ on case "
- << " have: "
- << actualDbName
- << " want to add: "
- << dbName,
+ << " have: " << actualDbName << " want to add: " << dbName,
actualDbName == dbName);
// We did a local read of the database entry above and found that the database already
@@ -264,8 +261,7 @@ Status ShardingCatalogManager::commitMovePrimary(OperationContext* opCtx,
// are holding the dist lock during the movePrimary operation.
uassert(ErrorCodes::IncompatibleShardingMetadata,
str::stream() << "Tried to update primary shard for database '" << dbname
- << " with version "
- << currentDatabaseVersion.getLastMod(),
+ << " with version " << currentDatabaseVersion.getLastMod(),
updateStatus.getValue());
// Ensure the next attempt to retrieve the database or any of its collections will do a full
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_enable_sharding_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_enable_sharding_test.cpp
index 8cd076b9c28..825236b9575 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_enable_sharding_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_enable_sharding_test.cpp
@@ -149,15 +149,13 @@ TEST_F(EnableShardingTest, dbExistsInvalidFormat) {
setupShards(vector<ShardType>{shard});
// Set up database with bad type for primary field.
- ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(),
- DatabaseType::ConfigNS,
- BSON("_id"
- << "db6"
- << "primary"
- << 12
- << "partitioned"
- << false),
- ShardingCatalogClient::kMajorityWriteConcern));
+ ASSERT_OK(
+ catalogClient()->insertConfigDocument(operationContext(),
+ DatabaseType::ConfigNS,
+ BSON("_id"
+ << "db6"
+ << "primary" << 12 << "partitioned" << false),
+ ShardingCatalogClient::kMajorityWriteConcern));
ASSERT_THROWS_CODE(
ShardingCatalogManager::get(operationContext())->enableSharding(operationContext(), "db6"),
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
index 8e6e2e29423..066405d32b8 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
@@ -181,19 +181,17 @@ StatusWith<Shard::CommandResponse> ShardingCatalogManager::_runCommandForAddShar
Status commandStatus = getStatusFromCommandResult(result);
if (!Shard::shouldErrorBePropagated(commandStatus.code())) {
- commandStatus = {ErrorCodes::OperationFailed,
- str::stream() << "failed to run command " << cmdObj
- << " when attempting to add shard "
- << targeter->connectionString().toString()
- << causedBy(commandStatus)};
+ commandStatus = {
+ ErrorCodes::OperationFailed,
+ str::stream() << "failed to run command " << cmdObj << " when attempting to add shard "
+ << targeter->connectionString().toString() << causedBy(commandStatus)};
}
Status writeConcernStatus = getWriteConcernStatusFromCommandResult(result);
if (!Shard::shouldErrorBePropagated(writeConcernStatus.code())) {
writeConcernStatus = {ErrorCodes::OperationFailed,
str::stream() << "failed to satisfy writeConcern for command "
- << cmdObj
- << " when attempting to add shard "
+ << cmdObj << " when attempting to add shard "
<< targeter->connectionString().toString()
<< causedBy(writeConcernStatus)};
}
@@ -257,8 +255,7 @@ StatusWith<boost::optional<ShardType>> ShardingCatalogManager::_checkIfShardExis
} else {
return {ErrorCodes::IllegalOperation,
str::stream() << "A shard already exists containing the replica set '"
- << existingShardConnStr.getSetName()
- << "'"};
+ << existingShardConnStr.getSetName() << "'"};
}
}
@@ -277,10 +274,8 @@ StatusWith<boost::optional<ShardType>> ShardingCatalogManager::_checkIfShardExis
return {ErrorCodes::IllegalOperation,
str::stream() << "'" << addingHost.toString() << "' "
<< "is already a member of the existing shard '"
- << existingShard.getHost()
- << "' ("
- << existingShard.getName()
- << ")."};
+ << existingShard.getHost() << "' ("
+ << existingShard.getName() << ")."};
}
}
}
@@ -340,8 +335,7 @@ StatusWith<ShardType> ShardingCatalogManager::_validateHostAsShard(
if (!status.isOK()) {
return status.withContext(str::stream() << "isMaster returned invalid 'maxWireVersion' "
<< "field when attempting to add "
- << connectionString.toString()
- << " as a shard");
+ << connectionString.toString() << " as a shard");
}
if (serverGlobalParams.featureCompatibility.getVersion() >
ServerGlobalParams::FeatureCompatibility::Version::kFullyDowngradedTo40) {
@@ -362,8 +356,7 @@ StatusWith<ShardType> ShardingCatalogManager::_validateHostAsShard(
if (!status.isOK()) {
return status.withContext(str::stream() << "isMaster returned invalid 'ismaster' "
<< "field when attempting to add "
- << connectionString.toString()
- << " as a shard");
+ << connectionString.toString() << " as a shard");
}
if (!isMaster) {
return {ErrorCodes::NotMaster,
@@ -387,8 +380,7 @@ StatusWith<ShardType> ShardingCatalogManager::_validateHostAsShard(
if (!providedSetName.empty() && foundSetName.empty()) {
return {ErrorCodes::OperationFailed,
str::stream() << "host did not return a set name; "
- << "is the replica set still initializing? "
- << resIsMaster};
+ << "is the replica set still initializing? " << resIsMaster};
}
// Make sure the set name specified in the connection string matches the one where its hosts
@@ -396,8 +388,7 @@ StatusWith<ShardType> ShardingCatalogManager::_validateHostAsShard(
if (!providedSetName.empty() && (providedSetName != foundSetName)) {
return {ErrorCodes::OperationFailed,
str::stream() << "the provided connection string (" << connectionString.toString()
- << ") does not match the actual set name "
- << foundSetName};
+ << ") does not match the actual set name " << foundSetName};
}
// Is it a config server?
@@ -437,11 +428,8 @@ StatusWith<ShardType> ShardingCatalogManager::_validateHostAsShard(
if (hostSet.find(host) == hostSet.end()) {
return {ErrorCodes::OperationFailed,
str::stream() << "in seed list " << connectionString.toString() << ", host "
- << host
- << " does not belong to replica set "
- << foundSetName
- << "; found "
- << resIsMaster.toString()};
+ << host << " does not belong to replica set " << foundSetName
+ << "; found " << resIsMaster.toString()};
}
}
}
@@ -611,13 +599,9 @@ StatusWith<std::string> ShardingCatalogManager::addShard(
const auto& dbDoc = dbt.getValue().value;
return Status(ErrorCodes::OperationFailed,
str::stream() << "can't add shard "
- << "'"
- << shardConnectionString.toString()
- << "'"
- << " because a local database '"
- << dbName
- << "' exists in another "
- << dbDoc.getPrimary());
+ << "'" << shardConnectionString.toString() << "'"
+ << " because a local database '" << dbName
+ << "' exists in another " << dbDoc.getPrimary());
} else if (dbt != ErrorCodes::NamespaceNotFound) {
return dbt.getStatus();
}
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp
index b1b7b0d9adb..9b5b8eb0f8a 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp
@@ -154,17 +154,13 @@ StatusWith<ChunkRange> includeFullShardKey(OperationContext* opCtx,
if (!range.getMin().isFieldNamePrefixOf(shardKeyBSON)) {
return {ErrorCodes::ShardKeyNotFound,
str::stream() << "min: " << range.getMin() << " is not a prefix of the shard key "
- << shardKeyBSON
- << " of ns: "
- << nss.ns()};
+ << shardKeyBSON << " of ns: " << nss.ns()};
}
if (!range.getMax().isFieldNamePrefixOf(shardKeyBSON)) {
return {ErrorCodes::ShardKeyNotFound,
str::stream() << "max: " << range.getMax() << " is not a prefix of the shard key "
- << shardKeyBSON
- << " of ns: "
- << nss.ns()};
+ << shardKeyBSON << " of ns: " << nss.ns()};
}
return ChunkRange(shardKeyPattern.extendRangeBound(range.getMin(), false),