summaryrefslogtreecommitdiff
path: root/src/mongo/db/s
diff options
context:
space:
mode:
authorclang-format-7.0.1 <adam.martin@10gen.com>2019-07-26 18:20:35 -0400
committerADAM David Alan Martin <adam.martin@10gen.com>2019-07-27 11:02:23 -0400
commit134a4083953270e8a11430395357fb70a29047ad (patch)
treedd428e1230e31d92b20b393dfdc17ffe7fa79cb6 /src/mongo/db/s
parent1e46b5049003f427047e723ea5fab15b5a9253ca (diff)
downloadmongo-134a4083953270e8a11430395357fb70a29047ad.tar.gz
SERVER-41772 Apply clang-format 7.0.1 to the codebase
Diffstat (limited to 'src/mongo/db/s')
-rw-r--r--src/mongo/db/s/active_migrations_registry.cpp9
-rw-r--r--src/mongo/db/s/active_move_primaries_registry.cpp4
-rw-r--r--src/mongo/db/s/active_move_primaries_registry.h2
-rw-r--r--src/mongo/db/s/active_move_primaries_registry_test.cpp2
-rw-r--r--src/mongo/db/s/active_shard_collection_registry.cpp6
-rw-r--r--src/mongo/db/s/add_shard_util.cpp2
-rw-r--r--src/mongo/db/s/add_shard_util.h2
-rw-r--r--src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp12
-rw-r--r--src/mongo/db/s/balancer/balancer_policy.cpp6
-rw-r--r--src/mongo/db/s/balancer/migration_manager.cpp5
-rw-r--r--src/mongo/db/s/balancer/migration_manager_test.cpp16
-rw-r--r--src/mongo/db/s/balancer/scoped_migration_request.cpp12
-rw-r--r--src/mongo/db/s/check_sharding_index_command.cpp9
-rw-r--r--src/mongo/db/s/chunk_splitter.cpp10
-rw-r--r--src/mongo/db/s/cleanup_orphaned_cmd.cpp6
-rw-r--r--src/mongo/db/s/collection_metadata.cpp3
-rw-r--r--src/mongo/db/s/collection_metadata_filtering_test.cpp9
-rw-r--r--src/mongo/db/s/collection_metadata_test.cpp3
-rw-r--r--src/mongo/db/s/collection_range_deleter.cpp18
-rw-r--r--src/mongo/db/s/collection_range_deleter.h16
-rw-r--r--src/mongo/db/s/collection_sharding_runtime.cpp3
-rw-r--r--src/mongo/db/s/collection_sharding_state_test.cpp23
-rw-r--r--src/mongo/db/s/config/configsvr_enable_sharding_command.cpp2
-rw-r--r--src/mongo/db/s/config/configsvr_move_primary_command.cpp7
-rw-r--r--src/mongo/db/s/config/configsvr_refine_collection_shard_key_command.cpp3
-rw-r--r--src/mongo/db/s/config/configsvr_remove_shard_command.cpp4
-rw-r--r--src/mongo/db/s/config/configsvr_shard_collection_command.cpp25
-rw-r--r--src/mongo/db/s/config/initial_split_policy.cpp12
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager.cpp3
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp122
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp55
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp11
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp8
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_enable_sharding_test.cpp16
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp50
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp8
-rw-r--r--src/mongo/db/s/config_server_op_observer_test.cpp2
-rw-r--r--src/mongo/db/s/merge_chunks_command.cpp45
-rw-r--r--src/mongo/db/s/metadata_manager.cpp12
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp23
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp8
-rw-r--r--src/mongo/db/s/migration_destination_manager.cpp37
-rw-r--r--src/mongo/db/s/migration_session_id.cpp4
-rw-r--r--src/mongo/db/s/migration_source_manager.cpp44
-rw-r--r--src/mongo/db/s/migration_util.cpp2
-rw-r--r--src/mongo/db/s/migration_util.h2
-rw-r--r--src/mongo/db/s/move_chunk_command.cpp4
-rw-r--r--src/mongo/db/s/move_primary_source_manager.cpp3
-rw-r--r--src/mongo/db/s/scoped_operation_completion_sharding_actions.h2
-rw-r--r--src/mongo/db/s/session_catalog_migration_destination.cpp37
-rw-r--r--src/mongo/db/s/session_catalog_migration_source.cpp8
-rw-r--r--src/mongo/db/s/set_shard_version_command.cpp16
-rw-r--r--src/mongo/db/s/shard_key_util.cpp7
-rw-r--r--src/mongo/db/s/shard_metadata_util_test.cpp7
-rw-r--r--src/mongo/db/s/shard_server_catalog_cache_loader.cpp71
-rw-r--r--src/mongo/db/s/shard_server_op_observer.cpp5
-rw-r--r--src/mongo/db/s/sharding_initialization_mongod.cpp66
-rw-r--r--src/mongo/db/s/sharding_initialization_mongod_test.cpp45
-rw-r--r--src/mongo/db/s/sharding_logging.cpp14
-rw-r--r--src/mongo/db/s/shardsvr_shard_collection.cpp57
-rw-r--r--src/mongo/db/s/split_chunk.cpp18
-rw-r--r--src/mongo/db/s/transaction_coordinator.cpp7
-rw-r--r--src/mongo/db/s/transaction_coordinator_catalog.cpp4
-rw-r--r--src/mongo/db/s/transaction_coordinator_futures_util.cpp10
-rw-r--r--src/mongo/db/s/transaction_coordinator_futures_util.h43
-rw-r--r--src/mongo/db/s/transaction_coordinator_futures_util_test.cpp6
-rw-r--r--src/mongo/db/s/transaction_coordinator_service.cpp2
-rw-r--r--src/mongo/db/s/transaction_coordinator_structures_test.cpp3
-rw-r--r--src/mongo/db/s/transaction_coordinator_test.cpp65
-rw-r--r--src/mongo/db/s/transaction_coordinator_util.cpp45
-rw-r--r--src/mongo/db/s/txn_two_phase_commit_cmds.cpp11
-rw-r--r--src/mongo/db/s/type_shard_identity_test.cpp21
-rw-r--r--src/mongo/db/s/wait_for_ongoing_chunk_splits_command.cpp4
73 files changed, 503 insertions, 761 deletions
diff --git a/src/mongo/db/s/active_migrations_registry.cpp b/src/mongo/db/s/active_migrations_registry.cpp
index 55cbfecfa9c..a3854cb9038 100644
--- a/src/mongo/db/s/active_migrations_registry.cpp
+++ b/src/mongo/db/s/active_migrations_registry.cpp
@@ -148,9 +148,7 @@ Status ActiveMigrationsRegistry::ActiveMoveChunkState::constructErrorStatus() co
str::stream() << "Unable to start new migration because this shard is currently "
"donating chunk "
<< ChunkRange(args.getMinKey(), args.getMaxKey()).toString()
- << " for namespace "
- << args.getNss().ns()
- << " to "
+ << " for namespace " << args.getNss().ns() << " to "
<< args.getToShardId()};
}
@@ -158,10 +156,7 @@ Status ActiveMigrationsRegistry::ActiveReceiveChunkState::constructErrorStatus()
return {ErrorCodes::ConflictingOperationInProgress,
str::stream() << "Unable to start new migration because this shard is currently "
"receiving chunk "
- << range.toString()
- << " for namespace "
- << nss.ns()
- << " from "
+ << range.toString() << " for namespace " << nss.ns() << " from "
<< fromShardId};
}
diff --git a/src/mongo/db/s/active_move_primaries_registry.cpp b/src/mongo/db/s/active_move_primaries_registry.cpp
index dcc15c2ceb6..fa383581038 100644
--- a/src/mongo/db/s/active_move_primaries_registry.cpp
+++ b/src/mongo/db/s/active_move_primaries_registry.cpp
@@ -90,9 +90,7 @@ Status ActiveMovePrimariesRegistry::ActiveMovePrimaryState::constructErrorStatus
str::stream()
<< "Unable to start new movePrimary operation because this shard is currently "
"moving its primary for namespace "
- << requestArgs.get_shardsvrMovePrimary()->ns()
- << " to "
- << requestArgs.getTo()};
+ << requestArgs.get_shardsvrMovePrimary()->ns() << " to " << requestArgs.getTo()};
}
ScopedMovePrimary::ScopedMovePrimary(ActiveMovePrimariesRegistry* registry,
diff --git a/src/mongo/db/s/active_move_primaries_registry.h b/src/mongo/db/s/active_move_primaries_registry.h
index 8ddd051478e..38b19a6c94f 100644
--- a/src/mongo/db/s/active_move_primaries_registry.h
+++ b/src/mongo/db/s/active_move_primaries_registry.h
@@ -159,4 +159,4 @@ private:
// This is the future, which will be signaled at the end of a movePrimary command.
std::shared_ptr<Notification<Status>> _completionNotification;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/s/active_move_primaries_registry_test.cpp b/src/mongo/db/s/active_move_primaries_registry_test.cpp
index 70d7265d66e..6b6541accf6 100644
--- a/src/mongo/db/s/active_move_primaries_registry_test.cpp
+++ b/src/mongo/db/s/active_move_primaries_registry_test.cpp
@@ -27,9 +27,9 @@
* it in the license file.
*/
-#include "mongo/db/s/active_move_primaries_registry.h"
#include "mongo/bson/bsonmisc.h"
#include "mongo/db/client.h"
+#include "mongo/db/s/active_move_primaries_registry.h"
#include "mongo/db/service_context_d_test_fixture.h"
#include "mongo/s/request_types/move_primary_gen.h"
#include "mongo/unittest/unittest.h"
diff --git a/src/mongo/db/s/active_shard_collection_registry.cpp b/src/mongo/db/s/active_shard_collection_registry.cpp
index eb6c42923a6..6a01fdd90ee 100644
--- a/src/mongo/db/s/active_shard_collection_registry.cpp
+++ b/src/mongo/db/s/active_shard_collection_registry.cpp
@@ -134,11 +134,9 @@ Status ActiveShardCollectionRegistry::ActiveShardCollectionState::constructError
return {ErrorCodes::ConflictingOperationInProgress,
str::stream() << "Unable to shard collection "
<< request.get_shardsvrShardCollection().get().ns()
- << " with arguments: "
- << request.toBSON()
+ << " with arguments: " << request.toBSON()
<< " because this shard is currently running shard collection on this "
- << "collection with arguments: "
- << activeRequest.toBSON()};
+ << "collection with arguments: " << activeRequest.toBSON()};
}
ScopedShardCollection::ScopedShardCollection(std::string nss,
diff --git a/src/mongo/db/s/add_shard_util.cpp b/src/mongo/db/s/add_shard_util.cpp
index 466d1a3fe6d..0dae94c0102 100644
--- a/src/mongo/db/s/add_shard_util.cpp
+++ b/src/mongo/db/s/add_shard_util.cpp
@@ -77,5 +77,5 @@ BSONObj createShardIdentityUpsertForAddShard(const AddShard& addShardCmd) {
return request.toBSON();
}
-} // namespace mongo
} // namespace add_shard_util
+} // namespace mongo
diff --git a/src/mongo/db/s/add_shard_util.h b/src/mongo/db/s/add_shard_util.h
index b7ab9fd0b36..020831833ba 100644
--- a/src/mongo/db/s/add_shard_util.h
+++ b/src/mongo/db/s/add_shard_util.h
@@ -60,5 +60,5 @@ AddShard createAddShardCmd(OperationContext* opCtx, const ShardId& shardName);
*/
BSONObj createShardIdentityUpsertForAddShard(const AddShard& addShardCmd);
-} // namespace mongo
} // namespace add_shard_util
+} // namespace mongo
diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
index e6d358e982b..a8f7cde67f4 100644
--- a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
+++ b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
@@ -441,12 +441,10 @@ StatusWith<MigrateInfoVector> BalancerChunkSelectionPolicyImpl::_getMigrateCandi
if (chunkAtZoneMin.getMin().woCompare(tagRange.min)) {
return {ErrorCodes::IllegalOperation,
str::stream()
- << "Tag boundaries "
- << tagRange.toString()
+ << "Tag boundaries " << tagRange.toString()
<< " fall in the middle of an existing chunk "
<< ChunkRange(chunkAtZoneMin.getMin(), chunkAtZoneMin.getMax()).toString()
- << ". Balancing for collection "
- << nss.ns()
+ << ". Balancing for collection " << nss.ns()
<< " will be postponed until the chunk is split appropriately."};
}
@@ -462,12 +460,10 @@ StatusWith<MigrateInfoVector> BalancerChunkSelectionPolicyImpl::_getMigrateCandi
chunkAtZoneMax.getMax().woCompare(tagRange.max)) {
return {ErrorCodes::IllegalOperation,
str::stream()
- << "Tag boundaries "
- << tagRange.toString()
+ << "Tag boundaries " << tagRange.toString()
<< " fall in the middle of an existing chunk "
<< ChunkRange(chunkAtZoneMax.getMin(), chunkAtZoneMax.getMax()).toString()
- << ". Balancing for collection "
- << nss.ns()
+ << ". Balancing for collection " << nss.ns()
<< " will be postponed until the chunk is split appropriately."};
}
}
diff --git a/src/mongo/db/s/balancer/balancer_policy.cpp b/src/mongo/db/s/balancer/balancer_policy.cpp
index 7b8a7a021c1..8f9cbc9b8ef 100644
--- a/src/mongo/db/s/balancer/balancer_policy.cpp
+++ b/src/mongo/db/s/balancer/balancer_policy.cpp
@@ -127,8 +127,7 @@ Status DistributionStatus::addRangeToZone(const ZoneRange& range) {
return {ErrorCodes::RangeOverlapConflict,
str::stream() << "Zone range: " << range.toString()
- << " is overlapping with existing: "
- << intersectingRange.toString()};
+ << " is overlapping with existing: " << intersectingRange.toString()};
}
// Check for containment
@@ -138,8 +137,7 @@ Status DistributionStatus::addRangeToZone(const ZoneRange& range) {
invariant(SimpleBSONObjComparator::kInstance.evaluate(range.max < nextRange.max));
return {ErrorCodes::RangeOverlapConflict,
str::stream() << "Zone range: " << range.toString()
- << " is overlapping with existing: "
- << nextRange.toString()};
+ << " is overlapping with existing: " << nextRange.toString()};
}
}
diff --git a/src/mongo/db/s/balancer/migration_manager.cpp b/src/mongo/db/s/balancer/migration_manager.cpp
index b131bbafde7..0a988cf1b13 100644
--- a/src/mongo/db/s/balancer/migration_manager.cpp
+++ b/src/mongo/db/s/balancer/migration_manager.cpp
@@ -517,7 +517,7 @@ void MigrationManager::_schedule(WithLock lock,
StatusWith<executor::TaskExecutor::CallbackHandle> callbackHandleWithStatus =
executor->scheduleRemoteCommand(
remoteRequest,
- [ this, service = opCtx->getServiceContext(), itMigration ](
+ [this, service = opCtx->getServiceContext(), itMigration](
const executor::TaskExecutor::RemoteCommandCallbackArgs& args) {
ThreadClient tc(getThreadName(), service);
auto opCtx = cc().makeOperationContext();
@@ -614,8 +614,7 @@ Status MigrationManager::_processRemoteCommandResponse(
scopedMigrationRequest->keepDocumentOnDestruct();
return {ErrorCodes::BalancerInterrupted,
stream() << "Migration interrupted because the balancer is stopping."
- << " Command status: "
- << remoteCommandResponse.status.toString()};
+ << " Command status: " << remoteCommandResponse.status.toString()};
}
if (!remoteCommandResponse.isOK()) {
diff --git a/src/mongo/db/s/balancer/migration_manager_test.cpp b/src/mongo/db/s/balancer/migration_manager_test.cpp
index 37749bba329..ff801ee67e6 100644
--- a/src/mongo/db/s/balancer/migration_manager_test.cpp
+++ b/src/mongo/db/s/balancer/migration_manager_test.cpp
@@ -138,17 +138,17 @@ protected:
// Random static initialization order can result in X constructor running before Y constructor
// if X and Y are defined in different source files. Defining variables here to enforce order.
const BSONObj kShard0 =
- BSON(ShardType::name(kShardId0.toString()) << ShardType::host(kShardHost0.toString())
- << ShardType::maxSizeMB(kMaxSizeMB));
+ BSON(ShardType::name(kShardId0.toString())
+ << ShardType::host(kShardHost0.toString()) << ShardType::maxSizeMB(kMaxSizeMB));
const BSONObj kShard1 =
- BSON(ShardType::name(kShardId1.toString()) << ShardType::host(kShardHost1.toString())
- << ShardType::maxSizeMB(kMaxSizeMB));
+ BSON(ShardType::name(kShardId1.toString())
+ << ShardType::host(kShardHost1.toString()) << ShardType::maxSizeMB(kMaxSizeMB));
const BSONObj kShard2 =
- BSON(ShardType::name(kShardId2.toString()) << ShardType::host(kShardHost2.toString())
- << ShardType::maxSizeMB(kMaxSizeMB));
+ BSON(ShardType::name(kShardId2.toString())
+ << ShardType::host(kShardHost2.toString()) << ShardType::maxSizeMB(kMaxSizeMB));
const BSONObj kShard3 =
- BSON(ShardType::name(kShardId3.toString()) << ShardType::host(kShardHost3.toString())
- << ShardType::maxSizeMB(kMaxSizeMB));
+ BSON(ShardType::name(kShardId3.toString())
+ << ShardType::host(kShardHost3.toString()) << ShardType::maxSizeMB(kMaxSizeMB));
const KeyPattern kKeyPattern = KeyPattern(BSON(kPattern << 1));
diff --git a/src/mongo/db/s/balancer/scoped_migration_request.cpp b/src/mongo/db/s/balancer/scoped_migration_request.cpp
index a0ef6dadf16..40441637ba4 100644
--- a/src/mongo/db/s/balancer/scoped_migration_request.cpp
+++ b/src/mongo/db/s/balancer/scoped_migration_request.cpp
@@ -118,8 +118,7 @@ StatusWith<ScopedMigrationRequest> ScopedMigrationRequest::writeMigration(
if (!statusWithMigrationQueryResult.isOK()) {
return statusWithMigrationQueryResult.getStatus().withContext(
str::stream() << "Failed to verify whether conflicting migration is in "
- << "progress for migration '"
- << redact(migrateInfo.toString())
+ << "progress for migration '" << redact(migrateInfo.toString())
<< "' while trying to query config.migrations.");
}
if (statusWithMigrationQueryResult.getValue().docs.empty()) {
@@ -134,11 +133,9 @@ StatusWith<ScopedMigrationRequest> ScopedMigrationRequest::writeMigration(
if (!statusWithActiveMigration.isOK()) {
return statusWithActiveMigration.getStatus().withContext(
str::stream() << "Failed to verify whether conflicting migration is in "
- << "progress for migration '"
- << redact(migrateInfo.toString())
+ << "progress for migration '" << redact(migrateInfo.toString())
<< "' while trying to parse active migration document '"
- << redact(activeMigrationBSON.toString())
- << "'.");
+ << redact(activeMigrationBSON.toString()) << "'.");
}
MigrateInfo activeMigrateInfo = statusWithActiveMigration.getValue().toMigrateInfo();
@@ -172,8 +169,7 @@ StatusWith<ScopedMigrationRequest> ScopedMigrationRequest::writeMigration(
str::stream() << "Failed to insert the config.migrations document after max "
<< "number of retries. Chunk '"
<< ChunkRange(migrateInfo.minKey, migrateInfo.maxKey).toString()
- << "' in collection '"
- << migrateInfo.nss.ns()
+ << "' in collection '" << migrateInfo.nss.ns()
<< "' was being moved (somewhere) by another operation.");
}
diff --git a/src/mongo/db/s/check_sharding_index_command.cpp b/src/mongo/db/s/check_sharding_index_command.cpp
index 88cd5d4b0eb..b921f530460 100644
--- a/src/mongo/db/s/check_sharding_index_command.cpp
+++ b/src/mongo/db/s/check_sharding_index_command.cpp
@@ -165,8 +165,8 @@ public:
BSONObjIterator i(currKey);
for (int k = 0; k < keyPatternLength; k++) {
if (!i.more()) {
- errmsg = str::stream() << "index key " << currKey << " too short for pattern "
- << keyPattern;
+ errmsg = str::stream()
+ << "index key " << currKey << " too short for pattern " << keyPattern;
return false;
}
BSONElement currKeyElt = i.next();
@@ -192,8 +192,9 @@ public:
const string msg = str::stream()
<< "There are documents which have missing or incomplete shard key fields ("
- << redact(currKey) << "). Please ensure that all documents in the collection "
- "include all fields from the shard key.";
+ << redact(currKey)
+ << "). Please ensure that all documents in the collection "
+ "include all fields from the shard key.";
log() << "checkShardingIndex for '" << nss.toString() << "' failed: " << msg;
errmsg = msg;
diff --git a/src/mongo/db/s/chunk_splitter.cpp b/src/mongo/db/s/chunk_splitter.cpp
index cfe972510a7..049ab0ae261 100644
--- a/src/mongo/db/s/chunk_splitter.cpp
+++ b/src/mongo/db/s/chunk_splitter.cpp
@@ -176,8 +176,7 @@ BSONObj findExtremeKeyForShard(OperationContext* opCtx,
uassert(40618,
str::stream() << "failed to initialize cursor during auto split due to "
- << "connection problem with "
- << client.getServerAddress(),
+ << "connection problem with " << client.getServerAddress(),
cursor.get() != nullptr);
if (cursor->more()) {
@@ -273,8 +272,8 @@ void ChunkSplitter::trySplitting(std::shared_ptr<ChunkSplitStateDriver> chunkSpl
return;
}
_threadPool.schedule(
- [ this, csd = std::move(chunkSplitStateDriver), nss, min, max, dataWritten ](
- auto status) noexcept {
+ [ this, csd = std::move(chunkSplitStateDriver), nss, min, max,
+ dataWritten ](auto status) noexcept {
invariant(status);
_runAutosplit(csd, nss, min, max, dataWritten);
@@ -384,7 +383,8 @@ void ChunkSplitter::_runAutosplit(std::shared_ptr<ChunkSplitStateDriver> chunkSp
log() << "autosplitted " << nss << " chunk: " << redact(chunk.toString()) << " into "
<< (splitPoints.size() + 1) << " parts (maxChunkSizeBytes " << maxChunkSizeBytes
<< ")"
- << (topChunkMinKey.isEmpty() ? "" : " (top chunk migration suggested" +
+ << (topChunkMinKey.isEmpty() ? ""
+ : " (top chunk migration suggested" +
(std::string)(shouldBalance ? ")" : ", but no migrations allowed)"));
// Because the ShardServerOpObserver uses the metadata from the CSS for tracking incoming
diff --git a/src/mongo/db/s/cleanup_orphaned_cmd.cpp b/src/mongo/db/s/cleanup_orphaned_cmd.cpp
index 28eab0d23bb..303c8a7a602 100644
--- a/src/mongo/db/s/cleanup_orphaned_cmd.cpp
+++ b/src/mongo/db/s/cleanup_orphaned_cmd.cpp
@@ -89,9 +89,9 @@ CleanupResult cleanupOrphanedData(OperationContext* opCtx,
BSONObj keyPattern = metadata->getKeyPattern();
if (!startingFromKey.isEmpty()) {
if (!metadata->isValidKey(startingFromKey)) {
- *errMsg = str::stream() << "could not cleanup orphaned data, start key "
- << startingFromKey << " does not match shard key pattern "
- << keyPattern;
+ *errMsg = str::stream()
+ << "could not cleanup orphaned data, start key " << startingFromKey
+ << " does not match shard key pattern " << keyPattern;
log() << *errMsg;
return CleanupResult_Error;
diff --git a/src/mongo/db/s/collection_metadata.cpp b/src/mongo/db/s/collection_metadata.cpp
index bb9063b8395..aca45d987ae 100644
--- a/src/mongo/db/s/collection_metadata.cpp
+++ b/src/mongo/db/s/collection_metadata.cpp
@@ -132,8 +132,7 @@ Status CollectionMetadata::checkChunkIsValid(const ChunkType& chunk) const {
return {ErrorCodes::StaleShardVersion,
str::stream() << "Unable to find chunk with the exact bounds "
<< ChunkRange(chunk.getMin(), chunk.getMax()).toString()
- << " at collection version "
- << getCollVersion().toString()};
+ << " at collection version " << getCollVersion().toString()};
}
return Status::OK();
diff --git a/src/mongo/db/s/collection_metadata_filtering_test.cpp b/src/mongo/db/s/collection_metadata_filtering_test.cpp
index d125e651adc..34ff588020f 100644
--- a/src/mongo/db/s/collection_metadata_filtering_test.cpp
+++ b/src/mongo/db/s/collection_metadata_filtering_test.cpp
@@ -132,8 +132,7 @@ TEST_F(CollectionMetadataFilteringTest, FilterDocumentsInTheFuture) {
{
BSONObj readConcern = BSON("readConcern" << BSON("level"
<< "snapshot"
- << "atClusterTime"
- << Timestamp(100, 0)));
+ << "atClusterTime" << Timestamp(100, 0)));
auto&& readConcernArgs = repl::ReadConcernArgs::get(operationContext());
ASSERT_OK(readConcernArgs.initialize(readConcern["readConcern"]));
@@ -163,8 +162,7 @@ TEST_F(CollectionMetadataFilteringTest, FilterDocumentsInThePast) {
{
BSONObj readConcern = BSON("readConcern" << BSON("level"
<< "snapshot"
- << "atClusterTime"
- << Timestamp(50, 0)));
+ << "atClusterTime" << Timestamp(50, 0)));
auto&& readConcernArgs = repl::ReadConcernArgs::get(operationContext());
ASSERT_OK(readConcernArgs.initialize(readConcern["readConcern"]));
@@ -202,8 +200,7 @@ TEST_F(CollectionMetadataFilteringTest, FilterDocumentsTooFarInThePastThrowsStal
{
BSONObj readConcern = BSON("readConcern" << BSON("level"
<< "snapshot"
- << "atClusterTime"
- << Timestamp(10, 0)));
+ << "atClusterTime" << Timestamp(10, 0)));
auto&& readConcernArgs = repl::ReadConcernArgs::get(operationContext());
ASSERT_OK(readConcernArgs.initialize(readConcern["readConcern"]));
diff --git a/src/mongo/db/s/collection_metadata_test.cpp b/src/mongo/db/s/collection_metadata_test.cpp
index ac36558a234..035bd4777f8 100644
--- a/src/mongo/db/s/collection_metadata_test.cpp
+++ b/src/mongo/db/s/collection_metadata_test.cpp
@@ -112,8 +112,7 @@ TEST_F(NoChunkFixture, IsValidKey) {
ASSERT(makeCollectionMetadata()->isValidKey(BSON("a" << 3)));
ASSERT(!makeCollectionMetadata()->isValidKey(BSON("a"
<< "abcde"
- << "b"
- << 1)));
+ << "b" << 1)));
ASSERT(!makeCollectionMetadata()->isValidKey(BSON("c"
<< "abcde")));
}
diff --git a/src/mongo/db/s/collection_range_deleter.cpp b/src/mongo/db/s/collection_range_deleter.cpp
index 1b63c1ce74c..d5affc26cc0 100644
--- a/src/mongo/db/s/collection_range_deleter.cpp
+++ b/src/mongo/db/s/collection_range_deleter.cpp
@@ -178,14 +178,8 @@ boost::optional<Date_t> CollectionRangeDeleter::cleanUpNextRange(
NamespaceString::kServerConfigurationNamespace.ns(),
BSON("_id"
<< "startRangeDeletion"
- << "ns"
- << nss.ns()
- << "epoch"
- << epoch
- << "min"
- << range->getMin()
- << "max"
- << range->getMax()));
+ << "ns" << nss.ns() << "epoch" << epoch << "min"
+ << range->getMin() << "max" << range->getMax()));
} catch (const DBException& e) {
stdx::lock_guard<stdx::mutex> scopedLock(csr->_metadataManager->_managerLock);
csr->_metadataManager->_clearAllCleanups(
@@ -354,8 +348,8 @@ StatusWith<int> CollectionRangeDeleter::_doDeletion(OperationContext* opCtx,
auto catalog = collection->getIndexCatalog();
const IndexDescriptor* idx = catalog->findShardKeyPrefixedIndex(opCtx, keyPattern, false);
if (!idx) {
- std::string msg = str::stream() << "Unable to find shard key index for "
- << keyPattern.toString() << " in " << nss.ns();
+ std::string msg = str::stream()
+ << "Unable to find shard key index for " << keyPattern.toString() << " in " << nss.ns();
LOG(0) << msg;
return {ErrorCodes::InternalError, msg};
}
@@ -375,8 +369,8 @@ StatusWith<int> CollectionRangeDeleter::_doDeletion(OperationContext* opCtx,
const IndexDescriptor* descriptor =
collection->getIndexCatalog()->findIndexByName(opCtx, indexName);
if (!descriptor) {
- std::string msg = str::stream() << "shard key index with name " << indexName << " on '"
- << nss.ns() << "' was dropped";
+ std::string msg = str::stream()
+ << "shard key index with name " << indexName << " on '" << nss.ns() << "' was dropped";
LOG(0) << msg;
return {ErrorCodes::InternalError, msg};
}
diff --git a/src/mongo/db/s/collection_range_deleter.h b/src/mongo/db/s/collection_range_deleter.h
index 6fae0ee5d18..0ebc79ac8a6 100644
--- a/src/mongo/db/s/collection_range_deleter.h
+++ b/src/mongo/db/s/collection_range_deleter.h
@@ -59,14 +59,14 @@ class CollectionRangeDeleter {
public:
/**
- * This is an object n that asynchronously changes state when a scheduled range deletion
- * completes or fails. Call n.ready() to discover if the event has already occurred. Call
- * n.waitStatus(opCtx) to sleep waiting for the event, and get its result. If the wait is
- * interrupted, waitStatus throws.
- *
- * It is an error to destroy a returned CleanupNotification object n unless either n.ready()
- * is true or n.abandon() has been called. After n.abandon(), n is in a moved-from state.
- */
+ * This is an object n that asynchronously changes state when a scheduled range deletion
+ * completes or fails. Call n.ready() to discover if the event has already occurred. Call
+ * n.waitStatus(opCtx) to sleep waiting for the event, and get its result. If the wait is
+ * interrupted, waitStatus throws.
+ *
+ * It is an error to destroy a returned CleanupNotification object n unless either n.ready()
+ * is true or n.abandon() has been called. After n.abandon(), n is in a moved-from state.
+ */
class DeleteNotification {
public:
DeleteNotification();
diff --git a/src/mongo/db/s/collection_sharding_runtime.cpp b/src/mongo/db/s/collection_sharding_runtime.cpp
index fd09b44ace5..a772a28c8d7 100644
--- a/src/mongo/db/s/collection_sharding_runtime.cpp
+++ b/src/mongo/db/s/collection_sharding_runtime.cpp
@@ -163,8 +163,7 @@ Status CollectionShardingRuntime::waitForClean(OperationContext* opCtx,
Status result = stillScheduled->waitStatus(opCtx);
if (!result.isOK()) {
return result.withContext(str::stream() << "Failed to delete orphaned " << nss.ns()
- << " range "
- << orphanRange.toString());
+ << " range " << orphanRange.toString());
}
}
diff --git a/src/mongo/db/s/collection_sharding_state_test.cpp b/src/mongo/db/s/collection_sharding_state_test.cpp
index d085f9440f3..1ee6cfbeed8 100644
--- a/src/mongo/db/s/collection_sharding_state_test.cpp
+++ b/src/mongo/db/s/collection_sharding_state_test.cpp
@@ -80,12 +80,9 @@ TEST_F(DeleteStateTest, MakeDeleteStateUnsharded) {
auto doc = BSON("key3"
<< "abc"
- << "key"
- << 3
- << "_id"
+ << "key" << 3 << "_id"
<< "hello"
- << "key2"
- << true);
+ << "key2" << true);
// Check that an order for deletion from an unsharded collection extracts just the "_id" field
ASSERT_BSONOBJ_EQ(OpObserverImpl::getDocumentKey(operationContext(), kTestNss, doc),
@@ -103,12 +100,9 @@ TEST_F(DeleteStateTest, MakeDeleteStateShardedWithoutIdInShardKey) {
// The order of fields in `doc` deliberately does not match the shard key
auto doc = BSON("key3"
<< "abc"
- << "key"
- << 100
- << "_id"
+ << "key" << 100 << "_id"
<< "hello"
- << "key2"
- << true);
+ << "key2" << true);
// Verify the shard key is extracted, in correct order, followed by the "_id" field.
ASSERT_BSONOBJ_EQ(OpObserverImpl::getDocumentKey(operationContext(), kTestNss, doc),
@@ -130,15 +124,13 @@ TEST_F(DeleteStateTest, MakeDeleteStateShardedWithIdInShardKey) {
<< "abc"
<< "_id"
<< "hello"
- << "key"
- << 100);
+ << "key" << 100);
// Verify the shard key is extracted with "_id" in the right place.
ASSERT_BSONOBJ_EQ(OpObserverImpl::getDocumentKey(operationContext(), kTestNss, doc),
BSON("key" << 100 << "_id"
<< "hello"
- << "key2"
- << true));
+ << "key2" << true));
ASSERT_FALSE(OpObserverShardingImpl::isMigrating(operationContext(), kTestNss, doc));
}
@@ -151,8 +143,7 @@ TEST_F(DeleteStateTest, MakeDeleteStateShardedWithIdHashInShardKey) {
auto doc = BSON("key2" << true << "_id"
<< "hello"
- << "key"
- << 100);
+ << "key" << 100);
// Verify the shard key is extracted with "_id" in the right place, not hashed.
ASSERT_BSONOBJ_EQ(OpObserverImpl::getDocumentKey(operationContext(), kTestNss, doc),
diff --git a/src/mongo/db/s/config/configsvr_enable_sharding_command.cpp b/src/mongo/db/s/config/configsvr_enable_sharding_command.cpp
index b1c3717f3ff..e9ca1356b62 100644
--- a/src/mongo/db/s/config/configsvr_enable_sharding_command.cpp
+++ b/src/mongo/db/s/config/configsvr_enable_sharding_command.cpp
@@ -50,8 +50,8 @@
namespace mongo {
-using std::shared_ptr;
using std::set;
+using std::shared_ptr;
using std::string;
namespace {
diff --git a/src/mongo/db/s/config/configsvr_move_primary_command.cpp b/src/mongo/db/s/config/configsvr_move_primary_command.cpp
index eea3b876e46..fe5c843303e 100644
--- a/src/mongo/db/s/config/configsvr_move_primary_command.cpp
+++ b/src/mongo/db/s/config/configsvr_move_primary_command.cpp
@@ -160,10 +160,9 @@ public:
if (!toShardStatus.isOK()) {
log() << "Could not move database '" << dbname << "' to shard '" << to
<< causedBy(toShardStatus.getStatus());
- uassertStatusOKWithContext(
- toShardStatus.getStatus(),
- str::stream() << "Could not move database '" << dbname << "' to shard '" << to
- << "'");
+ uassertStatusOKWithContext(toShardStatus.getStatus(),
+ str::stream() << "Could not move database '" << dbname
+ << "' to shard '" << to << "'");
}
return toShardStatus.getValue();
diff --git a/src/mongo/db/s/config/configsvr_refine_collection_shard_key_command.cpp b/src/mongo/db/s/config/configsvr_refine_collection_shard_key_command.cpp
index 21901105103..ff1334ef1ed 100644
--- a/src/mongo/db/s/config/configsvr_refine_collection_shard_key_command.cpp
+++ b/src/mongo/db/s/config/configsvr_refine_collection_shard_key_command.cpp
@@ -96,8 +96,7 @@ public:
uassert(ErrorCodes::StaleEpoch,
str::stream()
- << "refineCollectionShardKey namespace "
- << nss.toString()
+ << "refineCollectionShardKey namespace " << nss.toString()
<< " has a different epoch than mongos had in its routing table cache",
request().getEpoch() == collType.getEpoch());
diff --git a/src/mongo/db/s/config/configsvr_remove_shard_command.cpp b/src/mongo/db/s/config/configsvr_remove_shard_command.cpp
index 2f39f852bc8..5186128ef8c 100644
--- a/src/mongo/db/s/config/configsvr_remove_shard_command.cpp
+++ b/src/mongo/db/s/config/configsvr_remove_shard_command.cpp
@@ -112,8 +112,8 @@ public:
const auto shardStatus =
Grid::get(opCtx)->shardRegistry()->getShard(opCtx, ShardId(target));
if (!shardStatus.isOK()) {
- std::string msg(str::stream() << "Could not drop shard '" << target
- << "' because it does not exist");
+ std::string msg(str::stream()
+ << "Could not drop shard '" << target << "' because it does not exist");
log() << msg;
uasserted(ErrorCodes::ShardNotFound, msg);
}
diff --git a/src/mongo/db/s/config/configsvr_shard_collection_command.cpp b/src/mongo/db/s/config/configsvr_shard_collection_command.cpp
index e53552916d8..216d3bbaa2c 100644
--- a/src/mongo/db/s/config/configsvr_shard_collection_command.cpp
+++ b/src/mongo/db/s/config/configsvr_shard_collection_command.cpp
@@ -99,8 +99,7 @@ void validateAndDeduceFullRequestOptions(OperationContext* opCtx,
CollatorFactoryInterface::get(opCtx->getServiceContext())->makeFromBSON(collation));
uassert(ErrorCodes::BadValue,
str::stream() << "The collation for shardCollection must be {locale: 'simple'}, "
- << "but found: "
- << collation,
+ << "but found: " << collation,
!collator);
simpleCollationSpecified = true;
}
@@ -114,8 +113,7 @@ void validateAndDeduceFullRequestOptions(OperationContext* opCtx,
int numChunks = request->getNumInitialChunks();
uassert(ErrorCodes::InvalidOptions,
str::stream() << "numInitialChunks cannot be more than either: "
- << maxNumInitialChunksForShards
- << ", 8192 * number of shards; or "
+ << maxNumInitialChunksForShards << ", 8192 * number of shards; or "
<< maxNumInitialChunksTotal,
numChunks >= 0 && numChunks <= maxNumInitialChunksForShards &&
numChunks <= maxNumInitialChunksTotal);
@@ -208,9 +206,7 @@ void migrateAndFurtherSplitInitialChunks(OperationContext* opCtx,
auto chunkManager = routingInfo.cm();
// Move and commit each "big chunk" to a different shard.
- auto nextShardId = [&, indx = 0 ]() mutable {
- return shardIds[indx++ % shardIds.size()];
- };
+ auto nextShardId = [&, indx = 0]() mutable { return shardIds[indx++ % shardIds.size()]; };
for (auto chunk : chunkManager->chunks()) {
const auto shardId = nextShardId();
@@ -323,10 +319,7 @@ boost::optional<UUID> getUUIDFromPrimaryShard(OperationContext* opCtx,
uassert(ErrorCodes::InternalError,
str::stream() << "expected the primary shard host " << primaryShard->getConnString()
- << " for database "
- << nss.db()
- << " to return an entry for "
- << nss.ns()
+ << " for database " << nss.db() << " to return an entry for " << nss.ns()
<< " in its listCollections response, but it did not",
!res.isEmpty());
@@ -338,15 +331,12 @@ boost::optional<UUID> getUUIDFromPrimaryShard(OperationContext* opCtx,
uassert(ErrorCodes::InternalError,
str::stream() << "expected primary shard to return 'info' field as part of "
"listCollections for "
- << nss.ns()
- << ", but got "
- << res,
+ << nss.ns() << ", but got " << res,
!collectionInfo.isEmpty());
uassert(ErrorCodes::InternalError,
str::stream() << "expected primary shard to return a UUID for collection " << nss.ns()
- << " as part of 'info' field but got "
- << res,
+ << " as part of 'info' field but got " << res,
collectionInfo.hasField("uuid"));
return uassertStatusOK(UUID::parse(collectionInfo["uuid"]));
@@ -576,8 +566,7 @@ public:
if (fromMapReduce) {
uassert(ErrorCodes::ConflictingOperationInProgress,
str::stream() << "Map reduce with sharded output to a new collection found "
- << nss.ns()
- << " to be non-empty which is not supported.",
+ << nss.ns() << " to be non-empty which is not supported.",
isEmpty);
}
diff --git a/src/mongo/db/s/config/initial_split_policy.cpp b/src/mongo/db/s/config/initial_split_policy.cpp
index 71931babb73..9d882e45678 100644
--- a/src/mongo/db/s/config/initial_split_policy.cpp
+++ b/src/mongo/db/s/config/initial_split_policy.cpp
@@ -223,7 +223,7 @@ InitialSplitPolicy::generateShardCollectionInitialZonedChunks(
const auto& keyPattern = shardKeyPattern.getKeyPattern();
- auto nextShardIdForHole = [&, indx = 0 ]() mutable {
+ auto nextShardIdForHole = [&, indx = 0]() mutable {
return shardIdsForGaps[indx++ % shardIdsForGaps.size()];
};
@@ -250,10 +250,7 @@ InitialSplitPolicy::generateShardCollectionInitialZonedChunks(
const auto& shardIdsForChunk = it->second;
uassert(50973,
str::stream()
- << "Cannot shard collection "
- << nss.ns()
- << " due to zone "
- << tag.getTag()
+ << "Cannot shard collection " << nss.ns() << " due to zone " << tag.getTag()
<< " which is not assigned to a shard. Please assign this zone to a shard.",
!shardIdsForChunk.empty());
@@ -396,7 +393,7 @@ InitialSplitPolicy::ShardCollectionConfig InitialSplitPolicy::createFirstChunksU
shardSelectedSplitPoints,
shardIds,
1 // numContiguousChunksPerShard
- );
+ );
}
boost::optional<CollectionType> InitialSplitPolicy::checkIfCollectionAlreadyShardedWithSameOptions(
@@ -425,8 +422,7 @@ boost::optional<CollectionType> InitialSplitPolicy::checkIfCollectionAlreadyShar
// match the options the collection was originally sharded with.
uassert(ErrorCodes::AlreadyInitialized,
str::stream() << "sharding already enabled for collection " << nss.ns()
- << " with options "
- << existingOptions.toString(),
+ << " with options " << existingOptions.toString(),
requestedOptions.hasSameOptions(existingOptions));
return existingOptions;
diff --git a/src/mongo/db/s/config/sharding_catalog_manager.cpp b/src/mongo/db/s/config/sharding_catalog_manager.cpp
index fc610ed35a3..424db73a9d0 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager.cpp
@@ -198,8 +198,7 @@ Status ShardingCatalogManager::_initConfigVersion(OperationContext* opCtx) {
if (versionInfo.getCurrentVersion() < CURRENT_CONFIG_VERSION) {
return {ErrorCodes::IncompatibleShardingConfigVersion,
str::stream() << "need to upgrade current cluster version to v"
- << CURRENT_CONFIG_VERSION
- << "; currently at v"
+ << CURRENT_CONFIG_VERSION << "; currently at v"
<< versionInfo.getCurrentVersion()};
}
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp
index eee16cc6aa5..e92588cbe07 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp
@@ -129,8 +129,9 @@ protected:
ASSERT_EQ(request.target, target);
ASSERT_EQ(request.dbname, nss.db());
ASSERT_BSONOBJ_EQ(request.cmdObj,
- BSON("drop" << nss.coll() << "writeConcern" << BSON("w"
- << "majority")));
+ BSON("drop" << nss.coll() << "writeConcern"
+ << BSON("w"
+ << "majority")));
ASSERT_BSONOBJ_EQ(rpc::makeEmptyMetadata(), request.metadata);
return BSON("ok" << 1);
@@ -146,8 +147,7 @@ protected:
ASSERT_BSONOBJ_EQ(request.cmdObj,
BSON("setFeatureCompatibilityVersion"
<< "4.2"
- << "writeConcern"
- << writeConcern));
+ << "writeConcern" << writeConcern));
return response;
});
@@ -315,18 +315,16 @@ protected:
* describing the addShard request for 'addedShard'.
*/
void assertChangeWasLogged(const ShardType& addedShard) {
- auto response = assertGet(
- getConfigShard()->exhaustiveFindOnConfig(operationContext(),
- ReadPreferenceSetting{
- ReadPreference::PrimaryOnly},
- repl::ReadConcernLevel::kLocalReadConcern,
- NamespaceString("config.changelog"),
- BSON("what"
- << "addShard"
- << "details.name"
- << addedShard.getName()),
- BSONObj(),
- 1));
+ auto response = assertGet(getConfigShard()->exhaustiveFindOnConfig(
+ operationContext(),
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ repl::ReadConcernLevel::kLocalReadConcern,
+ NamespaceString("config.changelog"),
+ BSON("what"
+ << "addShard"
+ << "details.name" << addedShard.getName()),
+ BSONObj(),
+ 1));
ASSERT_EQ(1U, response.docs.size());
auto logEntryBSON = response.docs.front();
auto logEntry = assertGet(ChangeLogType::fromBSON(logEntryBSON));
@@ -347,35 +345,24 @@ protected:
TEST_F(AddShardTest, CreateShardIdentityUpsertForAddShard) {
std::string shardName = "shardName";
- BSONObj expectedBSON = BSON("update"
- << "system.version"
- << "bypassDocumentValidation"
- << false
- << "ordered"
- << true
- << "updates"
- << BSON_ARRAY(BSON(
- "q"
- << BSON("_id"
- << "shardIdentity")
- << "u"
- << BSON("shardName" << shardName << "clusterId" << _clusterId
- << "configsvrConnectionString"
- << replicationCoordinator()
- ->getConfig()
- .getConnectionString()
- .toString())
- << "multi"
- << false
- << "upsert"
- << true))
- << "writeConcern"
- << BSON("w"
- << "majority"
- << "wtimeout"
- << 60000)
- << "allowImplicitCollectionCreation"
- << true);
+ BSONObj expectedBSON = BSON(
+ "update"
+ << "system.version"
+ << "bypassDocumentValidation" << false << "ordered" << true << "updates"
+ << BSON_ARRAY(BSON(
+ "q" << BSON("_id"
+ << "shardIdentity")
+ << "u"
+ << BSON(
+ "shardName"
+ << shardName << "clusterId" << _clusterId << "configsvrConnectionString"
+ << replicationCoordinator()->getConfig().getConnectionString().toString())
+ << "multi" << false << "upsert" << true))
+ << "writeConcern"
+ << BSON("w"
+ << "majority"
+ << "wtimeout" << 60000)
+ << "allowImplicitCollectionCreation" << true);
auto addShardCmd = add_shard_util::createAddShardCmd(operationContext(), shardName);
auto actualBSON = add_shard_util::createShardIdentityUpsertForAddShard(addShardCmd);
ASSERT_BSONOBJ_EQ(expectedBSON, actualBSON);
@@ -427,8 +414,7 @@ TEST_F(AddShardTest, StandaloneBasicSuccess) {
shardTarget,
std::vector<BSONObj>{BSON("name"
<< "local"
- << "sizeOnDisk"
- << 1000),
+ << "sizeOnDisk" << 1000),
BSON("name" << discoveredDB1.getName() << "sizeOnDisk" << 2000),
BSON("name" << discoveredDB2.getName() << "sizeOnDisk" << 5000)});
@@ -508,8 +494,7 @@ TEST_F(AddShardTest, StandaloneGenerateName) {
shardTarget,
std::vector<BSONObj>{BSON("name"
<< "local"
- << "sizeOnDisk"
- << 1000),
+ << "sizeOnDisk" << 1000),
BSON("name" << discoveredDB1.getName() << "sizeOnDisk" << 2000),
BSON("name" << discoveredDB2.getName() << "sizeOnDisk" << 5000)});
@@ -648,8 +633,7 @@ TEST_F(AddShardTest, AddReplicaSetShardAsStandalone) {
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "myOtherSet"
- << "maxWireVersion"
- << WireVersion::LATEST_WIRE_VERSION);
+ << "maxWireVersion" << WireVersion::LATEST_WIRE_VERSION);
expectIsMaster(shardTarget, commandResponse);
future.timed_get(kLongFutureTimeout);
@@ -706,8 +690,7 @@ TEST_F(AddShardTest, ReplicaSetMistmatchedReplicaSetName) {
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "myOtherSet"
- << "maxWireVersion"
- << WireVersion::LATEST_WIRE_VERSION);
+ << "maxWireVersion" << WireVersion::LATEST_WIRE_VERSION);
expectIsMaster(shardTarget, commandResponse);
future.timed_get(kLongFutureTimeout);
@@ -735,12 +718,10 @@ TEST_F(AddShardTest, ShardIsCSRSConfigServer) {
"as a shard since it is a config server");
});
- BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
- << "config"
- << "configsvr"
- << true
- << "maxWireVersion"
- << WireVersion::LATEST_WIRE_VERSION);
+ BSONObj commandResponse =
+ BSON("ok" << 1 << "ismaster" << true << "setName"
+ << "config"
+ << "configsvr" << true << "maxWireVersion" << WireVersion::LATEST_WIRE_VERSION);
expectIsMaster(shardTarget, commandResponse);
future.timed_get(kLongFutureTimeout);
@@ -772,9 +753,7 @@ TEST_F(AddShardTest, ReplicaSetMissingHostsProvidedInSeedList) {
hosts.append("host1:12345");
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "mySet"
- << "hosts"
- << hosts.arr()
- << "maxWireVersion"
+ << "hosts" << hosts.arr() << "maxWireVersion"
<< WireVersion::LATEST_WIRE_VERSION);
expectIsMaster(shardTarget, commandResponse);
@@ -808,9 +787,7 @@ TEST_F(AddShardTest, AddShardWithNameConfigFails) {
hosts.append("host2:12345");
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "mySet"
- << "hosts"
- << hosts.arr()
- << "maxWireVersion"
+ << "hosts" << hosts.arr() << "maxWireVersion"
<< WireVersion::LATEST_WIRE_VERSION);
expectIsMaster(shardTarget, commandResponse);
@@ -855,9 +832,7 @@ TEST_F(AddShardTest, ShardContainsExistingDatabase) {
hosts.append("host2:12345");
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "mySet"
- << "hosts"
- << hosts.arr()
- << "maxWireVersion"
+ << "hosts" << hosts.arr() << "maxWireVersion"
<< WireVersion::LATEST_WIRE_VERSION);
expectIsMaster(shardTarget, commandResponse);
@@ -900,9 +875,7 @@ TEST_F(AddShardTest, SuccessfullyAddReplicaSet) {
hosts.append("host2:12345");
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "mySet"
- << "hosts"
- << hosts.arr()
- << "maxWireVersion"
+ << "hosts" << hosts.arr() << "maxWireVersion"
<< WireVersion::LATEST_WIRE_VERSION);
expectIsMaster(shardTarget, commandResponse);
@@ -966,9 +939,7 @@ TEST_F(AddShardTest, ReplicaSetExtraHostsDiscovered) {
hosts.append("host2:12345");
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "mySet"
- << "hosts"
- << hosts.arr()
- << "maxWireVersion"
+ << "hosts" << hosts.arr() << "maxWireVersion"
<< WireVersion::LATEST_WIRE_VERSION);
expectIsMaster(shardTarget, commandResponse);
@@ -1049,8 +1020,7 @@ TEST_F(AddShardTest, AddShardSucceedsEvenIfAddingDBsFromNewShardFails) {
shardTarget,
std::vector<BSONObj>{BSON("name"
<< "local"
- << "sizeOnDisk"
- << 1000),
+ << "sizeOnDisk" << 1000),
BSON("name" << discoveredDB1.getName() << "sizeOnDisk" << 2000),
BSON("name" << discoveredDB2.getName() << "sizeOnDisk" << 5000)});
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
index 0936c9fbb55..4423f7ba458 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
@@ -129,8 +129,7 @@ BSONArray buildMergeChunksTransactionPrecond(const std::vector<ChunkType>& chunk
BSON("query" << BSON(ChunkType::ns(chunk.getNS().ns())
<< ChunkType::min(chunk.getMin())
<< ChunkType::max(chunk.getMax()))
- << "orderby"
- << BSON(ChunkType::lastmod() << -1)));
+ << "orderby" << BSON(ChunkType::lastmod() << -1)));
b.append("res",
BSON(ChunkType::epoch(collVersion.epoch())
<< ChunkType::shard(chunk.getShard().toString())));
@@ -146,8 +145,7 @@ Status checkChunkIsOnShard(OperationContext* opCtx,
const ShardId& shard) {
BSONObj chunkQuery =
BSON(ChunkType::ns() << nss.ns() << ChunkType::min() << min << ChunkType::max() << max
- << ChunkType::shard()
- << shard);
+ << ChunkType::shard() << shard);
// Must use local read concern because we're going to perform subsequent writes.
auto findResponseWith =
@@ -166,8 +164,7 @@ Status checkChunkIsOnShard(OperationContext* opCtx,
if (findResponseWith.getValue().docs.empty()) {
return {ErrorCodes::Error(40165),
str::stream()
- << "Could not find the chunk ("
- << chunkQuery.toString()
+ << "Could not find the chunk (" << chunkQuery.toString()
<< ") on the shard. Cannot execute the migration commit with invalid chunks."};
}
@@ -321,13 +318,9 @@ Status ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx,
if (collVersion.epoch() != requestEpoch) {
return {ErrorCodes::StaleEpoch,
str::stream() << "splitChunk cannot split chunk " << range.toString()
- << ". Collection '"
- << nss.ns()
- << "' was dropped and re-created."
- << " Current epoch: "
- << collVersion.epoch()
- << ", cmd epoch: "
- << requestEpoch};
+ << ". Collection '" << nss.ns() << "' was dropped and re-created."
+ << " Current epoch: " << collVersion.epoch()
+ << ", cmd epoch: " << requestEpoch};
}
// Get the shard version (max chunk version) for the shard requesting the split.
@@ -387,18 +380,14 @@ Status ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx,
return {
ErrorCodes::InvalidOptions,
str::stream() << "Split keys must be specified in strictly increasing order. Key "
- << endKey
- << " was specified after "
- << startKey
- << "."};
+ << endKey << " was specified after " << startKey << "."};
}
// Verify that splitPoints are not repeated
if (endKey.woCompare(startKey) == 0) {
return {ErrorCodes::InvalidOptions,
str::stream() << "Split on lower bound of chunk "
- << ChunkRange(startKey, endKey).toString()
- << "is not allowed"};
+ << ChunkRange(startKey, endKey).toString() << "is not allowed"};
}
// verify that splits don't create too-big shard keys
@@ -468,10 +457,8 @@ Status ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx,
b.append("ns", ChunkType::ConfigNS.ns());
b.append("q",
BSON("query" << BSON(ChunkType::ns(nss.ns()) << ChunkType::min() << range.getMin()
- << ChunkType::max()
- << range.getMax())
- << "orderby"
- << BSON(ChunkType::lastmod() << -1)));
+ << ChunkType::max() << range.getMax())
+ << "orderby" << BSON(ChunkType::lastmod() << -1)));
{
BSONObjBuilder bb(b.subobjStart("res"));
bb.append(ChunkType::epoch(), requestEpoch);
@@ -598,10 +585,7 @@ Status ShardingCatalogManager::commitChunkMerge(OperationContext* opCtx,
ErrorCodes::InvalidOptions,
str::stream()
<< "Chunk boundaries must be specified in strictly increasing order. Boundary "
- << chunkBoundaries[i]
- << " was specified after "
- << itChunk.getMin()
- << "."};
+ << chunkBoundaries[i] << " was specified after " << itChunk.getMin() << "."};
}
itChunk.setMax(chunkBoundaries[i]);
@@ -714,11 +698,9 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration(
<< "' has been dropped and recreated since the migration began."
" The config server's collection version epoch is now '"
<< currentCollectionVersion.epoch().toString()
- << "', but the shard's is "
- << collectionEpoch.toString()
+ << "', but the shard's is " << collectionEpoch.toString()
<< "'. Aborting migration commit for chunk ("
- << migratedChunk.getRange().toString()
- << ")."};
+ << migratedChunk.getRange().toString() << ")."};
}
// Check that migratedChunk is where it should be, on fromShard.
@@ -762,8 +744,7 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration(
if (!newHistory.empty() && newHistory.front().getValidAfter() >= validAfter.get()) {
return {ErrorCodes::IncompatibleShardingMetadata,
str::stream() << "The chunk history for chunk with namespace " << nss.ns()
- << " and min key "
- << migratedChunk.getMin()
+ << " and min key " << migratedChunk.getMin()
<< " is corrupted. The last validAfter "
<< newHistory.back().getValidAfter().toString()
<< " is greater or equal to the new validAfter "
@@ -837,9 +818,7 @@ StatusWith<ChunkType> ShardingCatalogManager::_findChunkOnConfig(OperationContex
if (origChunks.size() != 1) {
return {ErrorCodes::IncompatibleShardingMetadata,
str::stream() << "Tried to find the chunk for namespace " << nss.ns()
- << " and min key "
- << key.toString()
- << ", but found no chunks"};
+ << " and min key " << key.toString() << ", but found no chunks"};
}
return ChunkType::fromConfigBSON(origChunks.front());
@@ -886,9 +865,7 @@ StatusWith<ChunkVersion> ShardingCatalogManager::_findCollectionVersion(
<< "' has been dropped and recreated since the migration began."
" The config server's collection version epoch is now '"
<< currentCollectionVersion.epoch().toString()
- << "', but the shard's is "
- << collectionEpoch.toString()
- << "'."};
+ << "', but the shard's is " << collectionEpoch.toString() << "'."};
}
return currentCollectionVersion;
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
index 5993661a884..2192eaa4599 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
@@ -75,9 +75,9 @@
namespace mongo {
using CollectionUUID = UUID;
+using std::set;
using std::string;
using std::vector;
-using std::set;
namespace {
@@ -113,8 +113,8 @@ boost::optional<UUID> checkCollectionOptions(OperationContext* opCtx,
// TODO: SERVER-33048 check idIndex field
uassert(ErrorCodes::NamespaceExists,
- str::stream() << "ns: " << ns.ns() << " already exists with different options: "
- << actualOptions.toBSON(),
+ str::stream() << "ns: " << ns.ns()
+ << " already exists with different options: " << actualOptions.toBSON(),
options.matchesStorageOptions(
actualOptions, CollatorFactoryInterface::get(opCtx->getServiceContext())));
@@ -170,8 +170,7 @@ void checkForExistingChunks(OperationContext* opCtx, const NamespaceString& nss)
str::stream() << "A previous attempt to shard collection " << nss.ns()
<< " failed after writing some initial chunks to config.chunks. Please "
"manually delete the partially written chunks for collection "
- << nss.ns()
- << " from config.chunks",
+ << nss.ns() << " from config.chunks",
numChunks == 0);
}
@@ -432,7 +431,7 @@ void ShardingCatalogManager::shardCollection(OperationContext* opCtx,
optimizationType,
treatAsEmpty,
1 // numContiguousChunksPerShard
- );
+ );
} else {
initialChunks = InitialSplitPolicy::createFirstChunksUnoptimized(
opCtx, nss, fieldsAndOrder, dbPrimaryShardId);
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp
index 3a408ea6090..11091ef8957 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp
@@ -91,10 +91,7 @@ DatabaseType ShardingCatalogManager::createDatabase(OperationContext* opCtx,
uassert(ErrorCodes::DatabaseDifferCase,
str::stream() << "can't have 2 databases that just differ on case "
- << " have: "
- << actualDbName
- << " want to add: "
- << dbName,
+ << " have: " << actualDbName << " want to add: " << dbName,
actualDbName == dbName);
// We did a local read of the database entry above and found that the database already
@@ -264,8 +261,7 @@ Status ShardingCatalogManager::commitMovePrimary(OperationContext* opCtx,
// are holding the dist lock during the movePrimary operation.
uassert(ErrorCodes::IncompatibleShardingMetadata,
str::stream() << "Tried to update primary shard for database '" << dbname
- << " with version "
- << currentDatabaseVersion.getLastMod(),
+ << " with version " << currentDatabaseVersion.getLastMod(),
updateStatus.getValue());
// Ensure the next attempt to retrieve the database or any of its collections will do a full
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_enable_sharding_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_enable_sharding_test.cpp
index 8cd076b9c28..825236b9575 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_enable_sharding_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_enable_sharding_test.cpp
@@ -149,15 +149,13 @@ TEST_F(EnableShardingTest, dbExistsInvalidFormat) {
setupShards(vector<ShardType>{shard});
// Set up database with bad type for primary field.
- ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(),
- DatabaseType::ConfigNS,
- BSON("_id"
- << "db6"
- << "primary"
- << 12
- << "partitioned"
- << false),
- ShardingCatalogClient::kMajorityWriteConcern));
+ ASSERT_OK(
+ catalogClient()->insertConfigDocument(operationContext(),
+ DatabaseType::ConfigNS,
+ BSON("_id"
+ << "db6"
+ << "primary" << 12 << "partitioned" << false),
+ ShardingCatalogClient::kMajorityWriteConcern));
ASSERT_THROWS_CODE(
ShardingCatalogManager::get(operationContext())->enableSharding(operationContext(), "db6"),
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
index 8e6e2e29423..066405d32b8 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
@@ -181,19 +181,17 @@ StatusWith<Shard::CommandResponse> ShardingCatalogManager::_runCommandForAddShar
Status commandStatus = getStatusFromCommandResult(result);
if (!Shard::shouldErrorBePropagated(commandStatus.code())) {
- commandStatus = {ErrorCodes::OperationFailed,
- str::stream() << "failed to run command " << cmdObj
- << " when attempting to add shard "
- << targeter->connectionString().toString()
- << causedBy(commandStatus)};
+ commandStatus = {
+ ErrorCodes::OperationFailed,
+ str::stream() << "failed to run command " << cmdObj << " when attempting to add shard "
+ << targeter->connectionString().toString() << causedBy(commandStatus)};
}
Status writeConcernStatus = getWriteConcernStatusFromCommandResult(result);
if (!Shard::shouldErrorBePropagated(writeConcernStatus.code())) {
writeConcernStatus = {ErrorCodes::OperationFailed,
str::stream() << "failed to satisfy writeConcern for command "
- << cmdObj
- << " when attempting to add shard "
+ << cmdObj << " when attempting to add shard "
<< targeter->connectionString().toString()
<< causedBy(writeConcernStatus)};
}
@@ -257,8 +255,7 @@ StatusWith<boost::optional<ShardType>> ShardingCatalogManager::_checkIfShardExis
} else {
return {ErrorCodes::IllegalOperation,
str::stream() << "A shard already exists containing the replica set '"
- << existingShardConnStr.getSetName()
- << "'"};
+ << existingShardConnStr.getSetName() << "'"};
}
}
@@ -277,10 +274,8 @@ StatusWith<boost::optional<ShardType>> ShardingCatalogManager::_checkIfShardExis
return {ErrorCodes::IllegalOperation,
str::stream() << "'" << addingHost.toString() << "' "
<< "is already a member of the existing shard '"
- << existingShard.getHost()
- << "' ("
- << existingShard.getName()
- << ")."};
+ << existingShard.getHost() << "' ("
+ << existingShard.getName() << ")."};
}
}
}
@@ -340,8 +335,7 @@ StatusWith<ShardType> ShardingCatalogManager::_validateHostAsShard(
if (!status.isOK()) {
return status.withContext(str::stream() << "isMaster returned invalid 'maxWireVersion' "
<< "field when attempting to add "
- << connectionString.toString()
- << " as a shard");
+ << connectionString.toString() << " as a shard");
}
if (serverGlobalParams.featureCompatibility.getVersion() >
ServerGlobalParams::FeatureCompatibility::Version::kFullyDowngradedTo40) {
@@ -362,8 +356,7 @@ StatusWith<ShardType> ShardingCatalogManager::_validateHostAsShard(
if (!status.isOK()) {
return status.withContext(str::stream() << "isMaster returned invalid 'ismaster' "
<< "field when attempting to add "
- << connectionString.toString()
- << " as a shard");
+ << connectionString.toString() << " as a shard");
}
if (!isMaster) {
return {ErrorCodes::NotMaster,
@@ -387,8 +380,7 @@ StatusWith<ShardType> ShardingCatalogManager::_validateHostAsShard(
if (!providedSetName.empty() && foundSetName.empty()) {
return {ErrorCodes::OperationFailed,
str::stream() << "host did not return a set name; "
- << "is the replica set still initializing? "
- << resIsMaster};
+ << "is the replica set still initializing? " << resIsMaster};
}
// Make sure the set name specified in the connection string matches the one where its hosts
@@ -396,8 +388,7 @@ StatusWith<ShardType> ShardingCatalogManager::_validateHostAsShard(
if (!providedSetName.empty() && (providedSetName != foundSetName)) {
return {ErrorCodes::OperationFailed,
str::stream() << "the provided connection string (" << connectionString.toString()
- << ") does not match the actual set name "
- << foundSetName};
+ << ") does not match the actual set name " << foundSetName};
}
// Is it a config server?
@@ -437,11 +428,8 @@ StatusWith<ShardType> ShardingCatalogManager::_validateHostAsShard(
if (hostSet.find(host) == hostSet.end()) {
return {ErrorCodes::OperationFailed,
str::stream() << "in seed list " << connectionString.toString() << ", host "
- << host
- << " does not belong to replica set "
- << foundSetName
- << "; found "
- << resIsMaster.toString()};
+ << host << " does not belong to replica set " << foundSetName
+ << "; found " << resIsMaster.toString()};
}
}
}
@@ -611,13 +599,9 @@ StatusWith<std::string> ShardingCatalogManager::addShard(
const auto& dbDoc = dbt.getValue().value;
return Status(ErrorCodes::OperationFailed,
str::stream() << "can't add shard "
- << "'"
- << shardConnectionString.toString()
- << "'"
- << " because a local database '"
- << dbName
- << "' exists in another "
- << dbDoc.getPrimary());
+ << "'" << shardConnectionString.toString() << "'"
+ << " because a local database '" << dbName
+ << "' exists in another " << dbDoc.getPrimary());
} else if (dbt != ErrorCodes::NamespaceNotFound) {
return dbt.getStatus();
}
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp
index b1b7b0d9adb..9b5b8eb0f8a 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp
@@ -154,17 +154,13 @@ StatusWith<ChunkRange> includeFullShardKey(OperationContext* opCtx,
if (!range.getMin().isFieldNamePrefixOf(shardKeyBSON)) {
return {ErrorCodes::ShardKeyNotFound,
str::stream() << "min: " << range.getMin() << " is not a prefix of the shard key "
- << shardKeyBSON
- << " of ns: "
- << nss.ns()};
+ << shardKeyBSON << " of ns: " << nss.ns()};
}
if (!range.getMax().isFieldNamePrefixOf(shardKeyBSON)) {
return {ErrorCodes::ShardKeyNotFound,
str::stream() << "max: " << range.getMax() << " is not a prefix of the shard key "
- << shardKeyBSON
- << " of ns: "
- << nss.ns()};
+ << shardKeyBSON << " of ns: " << nss.ns()};
}
return ChunkRange(shardKeyPattern.extendRangeBound(range.getMin(), false),
diff --git a/src/mongo/db/s/config_server_op_observer_test.cpp b/src/mongo/db/s/config_server_op_observer_test.cpp
index fc5ff24708d..eca0a3a19b5 100644
--- a/src/mongo/db/s/config_server_op_observer_test.cpp
+++ b/src/mongo/db/s/config_server_op_observer_test.cpp
@@ -27,8 +27,8 @@
* it in the license file.
*/
-#include "mongo/db/s/config_server_op_observer.h"
#include "mongo/db/s/config/sharding_catalog_manager.h"
+#include "mongo/db/s/config_server_op_observer.h"
#include "mongo/s/cluster_identity_loader.h"
#include "mongo/s/config_server_test_fixture.h"
#include "mongo/unittest/death_test.h"
diff --git a/src/mongo/db/s/merge_chunks_command.cpp b/src/mongo/db/s/merge_chunks_command.cpp
index 0a808e8daac..75ea7635773 100644
--- a/src/mongo/db/s/merge_chunks_command.cpp
+++ b/src/mongo/db/s/merge_chunks_command.cpp
@@ -79,16 +79,13 @@ void mergeChunks(OperationContext* opCtx,
const BSONObj& minKey,
const BSONObj& maxKey,
const OID& epoch) {
- const std::string whyMessage = str::stream() << "merging chunks in " << nss.ns() << " from "
- << minKey << " to " << maxKey;
+ const std::string whyMessage = str::stream()
+ << "merging chunks in " << nss.ns() << " from " << minKey << " to " << maxKey;
auto scopedDistLock = uassertStatusOKWithContext(
Grid::get(opCtx)->catalogClient()->getDistLockManager()->lock(
opCtx, nss.ns(), whyMessage, DistLockManager::kSingleLockAttemptTimeout),
str::stream() << "could not acquire collection lock for " << nss.ns()
- << " to merge chunks in ["
- << redact(minKey)
- << ", "
- << redact(maxKey)
+ << " to merge chunks in [" << redact(minKey) << ", " << redact(maxKey)
<< ")");
auto const shardingState = ShardingState::get(opCtx);
@@ -109,20 +106,14 @@ void mergeChunks(OperationContext* opCtx,
const auto shardVersion = metadata->getShardVersion();
uassert(ErrorCodes::StaleEpoch,
str::stream() << "could not merge chunks, collection " << nss.ns()
- << " has changed since merge was sent (sent epoch: "
- << epoch.toString()
- << ", current epoch: "
- << shardVersion.epoch()
- << ")",
+ << " has changed since merge was sent (sent epoch: " << epoch.toString()
+ << ", current epoch: " << shardVersion.epoch() << ")",
shardVersion.epoch() == epoch);
uassert(ErrorCodes::IllegalOperation,
str::stream() << "could not merge chunks, the range "
- << redact(ChunkRange(minKey, maxKey).toString())
- << " is not valid"
- << " for collection "
- << nss.ns()
- << " with key pattern "
+ << redact(ChunkRange(minKey, maxKey).toString()) << " is not valid"
+ << " for collection " << nss.ns() << " with key pattern "
<< metadata->getKeyPattern().toString(),
metadata->isValidKey(minKey) && metadata->isValidKey(maxKey));
@@ -145,11 +136,8 @@ void mergeChunks(OperationContext* opCtx,
uassert(ErrorCodes::IllegalOperation,
str::stream() << "could not merge chunks, collection " << nss.ns()
- << " range starting at "
- << redact(minKey)
- << " and ending at "
- << redact(maxKey)
- << " does not belong to shard "
+ << " range starting at " << redact(minKey) << " and ending at "
+ << redact(maxKey) << " does not belong to shard "
<< shardingState->shardId(),
!chunksToMerge.empty());
@@ -164,9 +152,7 @@ void mergeChunks(OperationContext* opCtx,
uassert(ErrorCodes::IllegalOperation,
str::stream() << "could not merge chunks, collection " << nss.ns()
- << " range starting at "
- << redact(minKey)
- << " does not belong to shard "
+ << " range starting at " << redact(minKey) << " does not belong to shard "
<< shardingState->shardId(),
minKeyInRange);
@@ -177,9 +163,7 @@ void mergeChunks(OperationContext* opCtx,
uassert(ErrorCodes::IllegalOperation,
str::stream() << "could not merge chunks, collection " << nss.ns()
- << " range ending at "
- << redact(maxKey)
- << " does not belong to shard "
+ << " range ending at " << redact(maxKey) << " does not belong to shard "
<< shardingState->shardId(),
maxKeyInRange);
@@ -205,11 +189,8 @@ void mergeChunks(OperationContext* opCtx,
uassert(
ErrorCodes::IllegalOperation,
str::stream()
- << "could not merge chunks, collection "
- << nss.ns()
- << " has a hole in the range "
- << ChunkRange(minKey, maxKey).toString()
- << " at "
+ << "could not merge chunks, collection " << nss.ns() << " has a hole in the range "
+ << ChunkRange(minKey, maxKey).toString() << " at "
<< ChunkRange(chunksToMerge[i - 1].getMax(), chunksToMerge[i].getMin()).toString(),
chunksToMerge[i - 1].getMax().woCompare(chunksToMerge[i].getMin()) == 0);
}
diff --git a/src/mongo/db/s/metadata_manager.cpp b/src/mongo/db/s/metadata_manager.cpp
index 7394d7dae15..5f48778deca 100644
--- a/src/mongo/db/s/metadata_manager.cpp
+++ b/src/mongo/db/s/metadata_manager.cpp
@@ -144,7 +144,7 @@ void scheduleCleanup(executor::TaskExecutor* executor,
Date_t when) {
LOG(1) << "Scheduling cleanup on " << nss.ns() << " at " << when;
auto swCallbackHandle = executor->scheduleWorkAt(
- when, [ executor, nss = std::move(nss), epoch = std::move(epoch) ](auto& args) {
+ when, [executor, nss = std::move(nss), epoch = std::move(epoch)](auto& args) {
auto& status = args.status;
if (ErrorCodes::isCancelationError(status.code())) {
return;
@@ -230,11 +230,11 @@ MetadataManager::~MetadataManager() {
}
void MetadataManager::_clearAllCleanups(WithLock lock) {
- _clearAllCleanups(
- lock,
- {ErrorCodes::InterruptedDueToReplStateChange,
- str::stream() << "Range deletions in " << _nss.ns()
- << " abandoned because collection was dropped or became unsharded"});
+ _clearAllCleanups(lock,
+ {ErrorCodes::InterruptedDueToReplStateChange,
+ str::stream()
+ << "Range deletions in " << _nss.ns()
+ << " abandoned because collection was dropped or became unsharded"});
}
void MetadataManager::_clearAllCleanups(WithLock, Status status) {
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
index 391fb17c937..70c936164f0 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
@@ -328,8 +328,7 @@ Status MigrationChunkClonerSourceLegacy::awaitUntilCriticalSectionIsAppropriate(
return {ErrorCodes::OperationIncomplete,
str::stream() << "Unable to enter critical section because the recipient "
"shard thinks all data is cloned while there are still "
- << cloneLocsRemaining
- << " documents remaining"};
+ << cloneLocsRemaining << " documents remaining"};
}
return Status::OK();
@@ -746,8 +745,7 @@ Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* opC
if (!idx) {
return {ErrorCodes::IndexNotFound,
str::stream() << "can't find index with prefix " << _shardKeyPattern.toBSON()
- << " in storeCurrentLocs for "
- << _args.getNss().ns()};
+ << " in storeCurrentLocs for " << _args.getNss().ns()};
}
// Assume both min and max non-empty, append MinKey's to make them fit chosen index
@@ -819,19 +817,10 @@ Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* opC
return {
ErrorCodes::ChunkTooBig,
str::stream() << "Cannot move chunk: the maximum number of documents for a chunk is "
- << maxRecsWhenFull
- << ", the maximum chunk size is "
- << _args.getMaxChunkSizeBytes()
- << ", average document size is "
- << avgRecSize
- << ". Found "
- << recCount
- << " documents in chunk "
- << " ns: "
- << _args.getNss().ns()
- << " "
- << _args.getMinKey()
- << " -> "
+ << maxRecsWhenFull << ", the maximum chunk size is "
+ << _args.getMaxChunkSizeBytes() << ", average document size is "
+ << avgRecSize << ". Found " << recCount << " documents in chunk "
+ << " ns: " << _args.getNss().ns() << " " << _args.getMinKey() << " -> "
<< _args.getMaxKey()};
}
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp
index 2a751999ca4..f7e325bfafc 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp
@@ -86,8 +86,8 @@ public:
invariant(_chunkCloner);
} else {
uasserted(ErrorCodes::IllegalOperation,
- str::stream() << "No active migrations were found for collection "
- << nss->ns());
+ str::stream()
+ << "No active migrations were found for collection " << nss->ns());
}
}
@@ -317,9 +317,7 @@ public:
auto rollbackId = repl::ReplicationProcess::get(opCtx)->getRollbackID();
uassert(50881,
str::stream() << "rollback detected, rollbackId was "
- << rollbackIdAtMigrationInit
- << " but is now "
- << rollbackId,
+ << rollbackIdAtMigrationInit << " but is now " << rollbackId,
rollbackId == rollbackIdAtMigrationInit);
}
diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp
index 481b0d2a707..75dd569f264 100644
--- a/src/mongo/db/s/migration_destination_manager.cpp
+++ b/src/mongo/db/s/migration_destination_manager.cpp
@@ -436,8 +436,7 @@ Status MigrationDestinationManager::abort(const MigrationSessionId& sessionId) {
if (!_sessionId->matches(sessionId)) {
return {ErrorCodes::CommandFailed,
str::stream() << "received abort request from a stale session "
- << sessionId.toString()
- << ". Current session is "
+ << sessionId.toString() << ". Current session is "
<< _sessionId->toString()};
}
@@ -462,8 +461,7 @@ Status MigrationDestinationManager::startCommit(const MigrationSessionId& sessio
if (_state != STEADY) {
return {ErrorCodes::CommandFailed,
str::stream() << "Migration startCommit attempted when not in STEADY state."
- << " Sender's session is "
- << sessionId.toString()
+ << " Sender's session is " << sessionId.toString()
<< (_sessionId ? (". Current session is " + _sessionId->toString())
: ". No active session on this shard.")};
}
@@ -477,8 +475,7 @@ Status MigrationDestinationManager::startCommit(const MigrationSessionId& sessio
if (!_sessionId->matches(sessionId)) {
return {ErrorCodes::CommandFailed,
str::stream() << "startCommit received commit request from a stale session "
- << sessionId.toString()
- << ". Current session is "
+ << sessionId.toString() << ". Current session is "
<< _sessionId->toString()};
}
@@ -550,9 +547,7 @@ void MigrationDestinationManager::cloneCollectionIndexesAndOptions(OperationCont
auto infos = infosRes.docs;
uassert(ErrorCodes::NamespaceNotFound,
str::stream() << "expected listCollections against the primary shard for "
- << nss.toString()
- << " to return 1 entry, but got "
- << infos.size()
+ << nss.toString() << " to return 1 entry, but got " << infos.size()
<< " entries",
infos.size() == 1);
@@ -574,8 +569,7 @@ void MigrationDestinationManager::cloneCollectionIndexesAndOptions(OperationCont
uassert(ErrorCodes::InvalidUUID,
str::stream() << "The donor shard did not return a UUID for collection " << nss.ns()
- << " as part of its listCollections response: "
- << entry
+ << " as part of its listCollections response: " << entry
<< ", but this node expects to see a UUID.",
!info["uuid"].eoo());
@@ -602,11 +596,9 @@ void MigrationDestinationManager::cloneCollectionIndexesAndOptions(OperationCont
uassert(ErrorCodes::InvalidUUID,
str::stream()
- << "Cannot create collection "
- << nss.ns()
+ << "Cannot create collection " << nss.ns()
<< " because we already have an identically named collection with UUID "
- << collection->uuid()
- << ", which differs from the donor's UUID "
+ << collection->uuid() << ", which differs from the donor's UUID "
<< (donorUUID ? donorUUID->toString() : "(none)")
<< ". Manually drop the collection on this shard if it contains data from "
"a previous incarnation of "
@@ -622,10 +614,10 @@ void MigrationDestinationManager::cloneCollectionIndexesAndOptions(OperationCont
if (!indexSpecs.empty()) {
// Only allow indexes to be copied if the collection does not have any documents.
uassert(ErrorCodes::CannotCreateCollection,
- str::stream() << "aborting, shard is missing " << indexSpecs.size()
- << " indexes and "
- << "collection is not empty. Non-trivial "
- << "index creation should be scheduled manually",
+ str::stream()
+ << "aborting, shard is missing " << indexSpecs.size() << " indexes and "
+ << "collection is not empty. Non-trivial "
+ << "index creation should be scheduled manually",
collection->numRecords(opCtx) == 0);
}
return indexSpecs;
@@ -1152,10 +1144,9 @@ CollectionShardingRuntime::CleanupNotification MigrationDestinationManager::_not
if (!optMetadata || !(*optMetadata)->isSharded() ||
(*optMetadata)->getCollVersion().epoch() != _epoch) {
return Status{ErrorCodes::StaleShardVersion,
- str::stream() << "Not marking chunk " << redact(range.toString())
- << " as pending because the epoch of "
- << _nss.ns()
- << " changed"};
+ str::stream()
+ << "Not marking chunk " << redact(range.toString())
+ << " as pending because the epoch of " << _nss.ns() << " changed"};
}
// Start clearing any leftovers that would be in the new chunk
diff --git a/src/mongo/db/s/migration_session_id.cpp b/src/mongo/db/s/migration_session_id.cpp
index d2cfeab3254..7049a0870cf 100644
--- a/src/mongo/db/s/migration_session_id.cpp
+++ b/src/mongo/db/s/migration_session_id.cpp
@@ -53,8 +53,8 @@ MigrationSessionId MigrationSessionId::generate(StringData donor, StringData rec
invariant(!donor.empty());
invariant(!recipient.empty());
- return MigrationSessionId(str::stream() << donor << "_" << recipient << "_"
- << OID::gen().toString());
+ return MigrationSessionId(str::stream()
+ << donor << "_" << recipient << "_" << OID::gen().toString());
}
StatusWith<MigrationSessionId> MigrationSessionId::extractFromBSON(const BSONObj& obj) {
diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp
index e292320ba53..022df3b0745 100644
--- a/src/mongo/db/s/migration_source_manager.cpp
+++ b/src/mongo/db/s/migration_source_manager.cpp
@@ -190,10 +190,8 @@ MigrationSourceManager::MigrationSourceManager(OperationContext* opCtx,
uassert(ErrorCodes::StaleEpoch,
str::stream() << "cannot move chunk " << _args.toString()
<< " because collection may have been dropped. "
- << "current epoch: "
- << collectionVersion.epoch()
- << ", cmd epoch: "
- << _args.getVersionEpoch(),
+ << "current epoch: " << collectionVersion.epoch()
+ << ", cmd epoch: " << _args.getVersionEpoch(),
_args.getVersionEpoch() == collectionVersion.epoch());
ChunkType chunkToMove;
@@ -228,9 +226,7 @@ Status MigrationSourceManager::startClone(OperationContext* opCtx) {
"moveChunk.start",
getNss().ns(),
BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey() << "from"
- << _args.getFromShardId()
- << "to"
- << _args.getToShardId()),
+ << _args.getFromShardId() << "to" << _args.getToShardId()),
ShardingCatalogClient::kMajorityWriteConcern);
if (logStatus != Status::OK()) {
return logStatus;
@@ -452,9 +448,7 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* opC
"moveChunk.validating",
getNss().ns(),
BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey() << "from"
- << _args.getFromShardId()
- << "to"
- << _args.getToShardId()),
+ << _args.getFromShardId() << "to" << _args.getToShardId()),
ShardingCatalogClient::kMajorityWriteConcern);
if ((ErrorCodes::isInterruption(status.code()) ||
@@ -487,12 +481,11 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* opC
}
fassert(40137,
- status.withContext(
- str::stream() << "Failed to commit migration for chunk " << _args.toString()
- << " due to "
- << redact(migrationCommitStatus)
- << ". Updating the optime with a write before refreshing the "
- << "metadata also failed"));
+ status.withContext(str::stream()
+ << "Failed to commit migration for chunk " << _args.toString()
+ << " due to " << redact(migrationCommitStatus)
+ << ". Updating the optime with a write before refreshing the "
+ << "metadata also failed"));
}
// Do a best effort attempt to incrementally refresh the metadata before leaving the critical
@@ -524,8 +517,7 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* opC
return migrationCommitStatus.withContext(
str::stream() << "Orphaned range not cleaned up. Failed to refresh metadata after"
" migration commit due to '"
- << refreshStatus.toString()
- << "' after commit failed");
+ << refreshStatus.toString() << "' after commit failed");
}
const auto refreshedMetadata = _getCurrentMetadataAndCheckEpoch(opCtx);
@@ -569,10 +561,7 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* opC
"moveChunk.commit",
getNss().ns(),
BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey() << "from"
- << _args.getFromShardId()
- << "to"
- << _args.getToShardId()
- << "counts"
+ << _args.getFromShardId() << "to" << _args.getToShardId() << "counts"
<< _recipientCloneCounts),
ShardingCatalogClient::kMajorityWriteConcern);
@@ -632,9 +621,7 @@ void MigrationSourceManager::cleanupOnError(OperationContext* opCtx) {
"moveChunk.error",
getNss().ns(),
BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey() << "from"
- << _args.getFromShardId()
- << "to"
- << _args.getToShardId()),
+ << _args.getFromShardId() << "to" << _args.getToShardId()),
ShardingCatalogClient::kMajorityWriteConcern);
try {
@@ -661,8 +648,7 @@ ScopedCollectionMetadata MigrationSourceManager::_getCurrentMetadataAndCheckEpoc
uassert(ErrorCodes::ConflictingOperationInProgress,
str::stream() << "The collection was dropped or recreated since the migration began. "
- << "Expected collection epoch: "
- << _collectionEpoch.toString()
+ << "Expected collection epoch: " << _collectionEpoch.toString()
<< ", but found: "
<< (metadata->isSharded() ? metadata->getCollVersion().epoch().toString()
: "unsharded collection."),
@@ -684,9 +670,7 @@ void MigrationSourceManager::_notifyChangeStreamsOnRecipientFirstChunk(
// The message expected by change streams
const auto o2Message = BSON("type"
<< "migrateChunkToNewShard"
- << "from"
- << _args.getFromShardId()
- << "to"
+ << "from" << _args.getFromShardId() << "to"
<< _args.getToShardId());
auto const serviceContext = opCtx->getClient()->getServiceContext();
diff --git a/src/mongo/db/s/migration_util.cpp b/src/mongo/db/s/migration_util.cpp
index ac20cb2f350..a66109e73ba 100644
--- a/src/mongo/db/s/migration_util.cpp
+++ b/src/mongo/db/s/migration_util.cpp
@@ -45,7 +45,7 @@ const char kDestinationShard[] = "destination";
const char kIsDonorShard[] = "isDonorShard";
const char kChunk[] = "chunk";
const char kCollection[] = "collection";
-}
+} // namespace
BSONObj makeMigrationStatusDocument(const NamespaceString& nss,
const ShardId& fromShard,
diff --git a/src/mongo/db/s/migration_util.h b/src/mongo/db/s/migration_util.h
index dc2469d8602..67b59761477 100644
--- a/src/mongo/db/s/migration_util.h
+++ b/src/mongo/db/s/migration_util.h
@@ -56,6 +56,6 @@ BSONObj makeMigrationStatusDocument(const NamespaceString& nss,
const BSONObj& min,
const BSONObj& max);
-} // namespace shardutil
+} // namespace migrationutil
} // namespace mongo
diff --git a/src/mongo/db/s/move_chunk_command.cpp b/src/mongo/db/s/move_chunk_command.cpp
index dd62c984292..8fafb8c0253 100644
--- a/src/mongo/db/s/move_chunk_command.cpp
+++ b/src/mongo/db/s/move_chunk_command.cpp
@@ -149,8 +149,8 @@ public:
} catch (const std::exception& e) {
scopedMigration.signalComplete(
{ErrorCodes::InternalError,
- str::stream() << "Severe error occurred while running moveChunk command: "
- << e.what()});
+ str::stream()
+ << "Severe error occurred while running moveChunk command: " << e.what()});
throw;
}
diff --git a/src/mongo/db/s/move_primary_source_manager.cpp b/src/mongo/db/s/move_primary_source_manager.cpp
index aff155b3bb2..409bbb5d94c 100644
--- a/src/mongo/db/s/move_primary_source_manager.cpp
+++ b/src/mongo/db/s/move_primary_source_manager.cpp
@@ -300,8 +300,7 @@ Status MovePrimarySourceManager::commitOnConfig(OperationContext* opCtx) {
fassert(50762,
validateStatus.withContext(
str::stream() << "Failed to commit movePrimary for database " << getNss().ns()
- << " due to "
- << redact(commitStatus)
+ << " due to " << redact(commitStatus)
<< ". Updating the optime with a write before clearing the "
<< "version also failed"));
diff --git a/src/mongo/db/s/scoped_operation_completion_sharding_actions.h b/src/mongo/db/s/scoped_operation_completion_sharding_actions.h
index de61f5fbfd2..baea9099032 100644
--- a/src/mongo/db/s/scoped_operation_completion_sharding_actions.h
+++ b/src/mongo/db/s/scoped_operation_completion_sharding_actions.h
@@ -37,7 +37,7 @@ namespace mongo {
* This class has a destructor that handles rerouting exceptions that might have occurred
* during an operation. For this reason, there should be only one instance of this object
* on the chain of one OperationContext.
-*/
+ */
class OperationContext;
class ScopedOperationCompletionShardingActions : public PolymorphicScoped {
diff --git a/src/mongo/db/s/session_catalog_migration_destination.cpp b/src/mongo/db/s/session_catalog_migration_destination.cpp
index 1482462fce9..18e16428f63 100644
--- a/src/mongo/db/s/session_catalog_migration_destination.cpp
+++ b/src/mongo/db/s/session_catalog_migration_destination.cpp
@@ -89,10 +89,8 @@ void setPrePostImageTs(const ProcessOplogResult& lastResult, repl::MutableOplogE
if (!lastResult.isPrePostImage) {
uassert(40628,
str::stream() << "expected oplog with ts: " << entry->getTimestamp().toString()
- << " to not have "
- << repl::OplogEntryBase::kPreImageOpTimeFieldName
- << " or "
- << repl::OplogEntryBase::kPostImageOpTimeFieldName,
+ << " to not have " << repl::OplogEntryBase::kPreImageOpTimeFieldName
+ << " or " << repl::OplogEntryBase::kPostImageOpTimeFieldName,
!entry->getPreImageOpTime() && !entry->getPostImageOpTime());
return;
}
@@ -102,14 +100,11 @@ void setPrePostImageTs(const ProcessOplogResult& lastResult, repl::MutableOplogE
uassert(40629,
str::stream() << "expected oplog with ts: " << entry->getTimestamp().toString() << ": "
<< redact(entry->toBSON())
- << " to have session: "
- << lastResult.sessionId,
+ << " to have session: " << lastResult.sessionId,
lastResult.sessionId == entry->getSessionId());
uassert(40630,
str::stream() << "expected oplog with ts: " << entry->getTimestamp().toString() << ": "
- << redact(entry->toBSON())
- << " to have txnNumber: "
- << lastResult.txnNum,
+ << redact(entry->toBSON()) << " to have txnNumber: " << lastResult.txnNum,
lastResult.txnNum == entry->getTxnNumber());
if (entry->getPreImageOpTime()) {
@@ -119,11 +114,8 @@ void setPrePostImageTs(const ProcessOplogResult& lastResult, repl::MutableOplogE
} else {
uasserted(40631,
str::stream() << "expected oplog with opTime: " << entry->getOpTime().toString()
- << ": "
- << redact(entry->toBSON())
- << " to have either "
- << repl::OplogEntryBase::kPreImageOpTimeFieldName
- << " or "
+ << ": " << redact(entry->toBSON()) << " to have either "
+ << repl::OplogEntryBase::kPreImageOpTimeFieldName << " or "
<< repl::OplogEntryBase::kPostImageOpTimeFieldName);
}
}
@@ -142,20 +134,17 @@ repl::MutableOplogEntry parseOplog(const BSONObj& oplogBSON) {
uassert(ErrorCodes::UnsupportedFormat,
str::stream() << "oplog with opTime " << oplogEntry.getTimestamp().toString()
- << " does not have sessionId: "
- << redact(oplogBSON),
+ << " does not have sessionId: " << redact(oplogBSON),
sessionInfo.getSessionId());
uassert(ErrorCodes::UnsupportedFormat,
str::stream() << "oplog with opTime " << oplogEntry.getTimestamp().toString()
- << " does not have txnNumber: "
- << redact(oplogBSON),
+ << " does not have txnNumber: " << redact(oplogBSON),
sessionInfo.getTxnNumber());
uassert(ErrorCodes::UnsupportedFormat,
str::stream() << "oplog with opTime " << oplogEntry.getTimestamp().toString()
- << " does not have stmtId: "
- << redact(oplogBSON),
+ << " does not have stmtId: " << redact(oplogBSON),
oplogEntry.getStatementId());
return oplogEntry;
@@ -225,9 +214,7 @@ ProcessOplogResult processSessionOplog(const BSONObj& oplogBSON,
uassert(40632,
str::stream() << "Can't handle 2 pre/post image oplog in a row. Prevoius oplog "
<< lastResult.oplogTime.getTimestamp().toString()
- << ", oplog ts: "
- << oplogEntry.getTimestamp().toString()
- << ": "
+ << ", oplog ts: " << oplogEntry.getTimestamp().toString() << ": "
<< oplogBSON,
!lastResult.isPrePostImage);
}
@@ -295,9 +282,7 @@ ProcessOplogResult processSessionOplog(const BSONObj& oplogBSON,
const auto& oplogOpTime = result.oplogTime;
uassert(40633,
str::stream() << "Failed to create new oplog entry for oplog with opTime: "
- << oplogEntry.getOpTime().toString()
- << ": "
- << redact(oplogBSON),
+ << oplogEntry.getOpTime().toString() << ": " << redact(oplogBSON),
!oplogOpTime.isNull());
// Do not call onWriteOpCompletedOnPrimary if we inserted a pre/post image, because the
diff --git a/src/mongo/db/s/session_catalog_migration_source.cpp b/src/mongo/db/s/session_catalog_migration_source.cpp
index 94e052851ca..f645174986d 100644
--- a/src/mongo/db/s/session_catalog_migration_source.cpp
+++ b/src/mongo/db/s/session_catalog_migration_source.cpp
@@ -260,8 +260,9 @@ bool SessionCatalogMigrationSource::_handleWriteHistory(WithLock, OperationConte
// Skip the rest of the chain for this session since the ns is unrelated with the
// current one being migrated. It is ok to not check the rest of the chain because
// retryable writes doesn't allow touching different namespaces.
- if (!nextStmtId || (nextStmtId && *nextStmtId != kIncompleteHistoryStmtId &&
- nextOplog->getNss() != _ns)) {
+ if (!nextStmtId ||
+ (nextStmtId && *nextStmtId != kIncompleteHistoryStmtId &&
+ nextOplog->getNss() != _ns)) {
_currentOplogIterator.reset();
return false;
}
@@ -420,8 +421,7 @@ boost::optional<repl::OplogEntry> SessionCatalogMigrationSource::SessionOplogIte
uassert(40656,
str::stream() << "rollback detected, rollbackId was " << _initialRollbackId
- << " but is now "
- << rollbackId,
+ << " but is now " << rollbackId,
rollbackId == _initialRollbackId);
// If the rollbackId hasn't changed, and this record corresponds to a retryable write,
diff --git a/src/mongo/db/s/set_shard_version_command.cpp b/src/mongo/db/s/set_shard_version_command.cpp
index dd03e31b206..10564146ca4 100644
--- a/src/mongo/db/s/set_shard_version_command.cpp
+++ b/src/mongo/db/s/set_shard_version_command.cpp
@@ -164,8 +164,7 @@ public:
const auto storedShardName = shardingState->shardId().toString();
uassert(ErrorCodes::BadValue,
str::stream() << "received shardName " << shardName
- << " which differs from stored shardName "
- << storedShardName,
+ << " which differs from stored shardName " << storedShardName,
storedShardName == shardName);
// Validate config connection string parameter.
@@ -184,8 +183,7 @@ public:
Grid::get(opCtx)->shardRegistry()->getConfigServerConnectionString();
uassert(ErrorCodes::IllegalOperation,
str::stream() << "Given config server set name: " << givenConnStr.getSetName()
- << " differs from known set name: "
- << storedConnStr.getSetName(),
+ << " differs from known set name: " << storedConnStr.getSetName(),
givenConnStr.getSetName() == storedConnStr.getSetName());
// Validate namespace parameter.
@@ -366,11 +364,11 @@ public:
if (!status.isOK()) {
// The reload itself was interrupted or confused here
- errmsg = str::stream() << "could not refresh metadata for " << nss.ns()
- << " with requested shard version "
- << requestedVersion.toString()
- << ", stored shard version is " << currVersion.toString()
- << causedBy(redact(status));
+ errmsg = str::stream()
+ << "could not refresh metadata for " << nss.ns()
+ << " with requested shard version " << requestedVersion.toString()
+ << ", stored shard version is " << currVersion.toString()
+ << causedBy(redact(status));
warning() << errmsg;
diff --git a/src/mongo/db/s/shard_key_util.cpp b/src/mongo/db/s/shard_key_util.cpp
index a056fcd3232..fef5707c039 100644
--- a/src/mongo/db/s/shard_key_util.cpp
+++ b/src/mongo/db/s/shard_key_util.cpp
@@ -113,9 +113,7 @@ void validateShardKeyAgainstExistingIndexes(OperationContext* opCtx,
bool isUnique = idx["unique"].trueValue();
uassert(ErrorCodes::InvalidOptions,
str::stream() << "can't shard collection '" << nss.ns() << "' with unique index on "
- << currentKey
- << " and proposed shard key "
- << proposedKey
+ << currentKey << " and proposed shard key " << proposedKey
<< ". Uniqueness can't be maintained unless shard key is a prefix",
!isUnique || shardKeyPattern.isUniqueIndexCompatible(currentKey));
}
@@ -133,8 +131,7 @@ void validateShardKeyAgainstExistingIndexes(OperationContext* opCtx,
// per field per collection.
uassert(ErrorCodes::InvalidOptions,
str::stream() << "can't shard collection " << nss.ns()
- << " with hashed shard key "
- << proposedKey
+ << " with hashed shard key " << proposedKey
<< " because the hashed index uses a non-default seed of "
<< idx["seed"].numberInt(),
!shardKeyPattern.isHashedPattern() || idx["seed"].eoo() ||
diff --git a/src/mongo/db/s/shard_metadata_util_test.cpp b/src/mongo/db/s/shard_metadata_util_test.cpp
index 86bf071f3ac..110cecee0bb 100644
--- a/src/mongo/db/s/shard_metadata_util_test.cpp
+++ b/src/mongo/db/s/shard_metadata_util_test.cpp
@@ -103,8 +103,7 @@ struct ShardMetadataUtilTest : public ShardServerTestFixture {
maxCollVersion.incMajor();
BSONObj shardChunk =
BSON(ChunkType::minShardID(mins[i])
- << ChunkType::max(maxs[i])
- << ChunkType::shard(kShardId.toString())
+ << ChunkType::max(maxs[i]) << ChunkType::shard(kShardId.toString())
<< ChunkType::lastmod(Date_t::fromMillisSinceEpoch(maxCollVersion.toLong())));
chunks.push_back(
@@ -144,8 +143,8 @@ struct ShardMetadataUtilTest : public ShardServerTestFixture {
try {
DBDirectClient client(operationContext());
for (auto& chunk : chunks) {
- Query query(BSON(ChunkType::minShardID() << chunk.getMin() << ChunkType::max()
- << chunk.getMax()));
+ Query query(BSON(ChunkType::minShardID()
+ << chunk.getMin() << ChunkType::max() << chunk.getMax()));
query.readPref(ReadPreference::Nearest, BSONArray());
std::unique_ptr<DBClientCursor> cursor = client.query(chunkMetadataNss, query, 1);
diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
index d278e8acba3..c889866bfd1 100644
--- a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
+++ b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
@@ -161,9 +161,7 @@ ChunkVersion getPersistedMaxChunkVersion(OperationContext* opCtx, const Namespac
}
uassert(ErrorCodes::OperationFailed,
str::stream() << "Failed to read persisted collections entry for collection '"
- << nss.ns()
- << "' due to '"
- << statusWithCollection.getStatus().toString()
+ << nss.ns() << "' due to '" << statusWithCollection.getStatus().toString()
<< "'.",
statusWithCollection.isOK());
@@ -176,9 +174,7 @@ ChunkVersion getPersistedMaxChunkVersion(OperationContext* opCtx, const Namespac
statusWithCollection.getValue().getEpoch());
uassert(ErrorCodes::OperationFailed,
str::stream() << "Failed to read highest version persisted chunk for collection '"
- << nss.ns()
- << "' due to '"
- << statusWithChunk.getStatus().toString()
+ << nss.ns() << "' due to '" << statusWithChunk.getStatus().toString()
<< "'.",
statusWithChunk.isOK());
@@ -265,8 +261,8 @@ StatusWith<CollectionAndChangedChunks> getIncompletePersistedMetadataSinceVersio
return CollectionAndChangedChunks();
}
return Status(ErrorCodes::OperationFailed,
- str::stream() << "Failed to load local metadata due to '" << status.toString()
- << "'.");
+ str::stream()
+ << "Failed to load local metadata due to '" << status.toString() << "'.");
}
}
@@ -454,8 +450,8 @@ void ShardServerCatalogCacheLoader::getDatabase(
return std::make_tuple(_role == ReplicaSetRole::Primary, _term);
}();
- _threadPool.schedule([ this, name = dbName.toString(), callbackFn, isPrimary, term ](
- auto status) noexcept {
+ _threadPool.schedule([ this, name = dbName.toString(), callbackFn, isPrimary,
+ term ](auto status) noexcept {
invariant(status);
auto context = _contexts.makeOperationContext(*Client::getCurrent());
@@ -628,19 +624,18 @@ void ShardServerCatalogCacheLoader::_schedulePrimaryGetChunksSince(
}();
auto remoteRefreshFn = [this, nss, catalogCacheSinceVersion, maxLoaderVersion, termScheduled](
- OperationContext* opCtx,
- StatusWith<CollectionAndChangedChunks>
- swCollectionAndChangedChunks) -> StatusWith<CollectionAndChangedChunks> {
-
+ OperationContext* opCtx,
+ StatusWith<CollectionAndChangedChunks> swCollectionAndChangedChunks)
+ -> StatusWith<CollectionAndChangedChunks> {
if (swCollectionAndChangedChunks == ErrorCodes::NamespaceNotFound) {
_ensureMajorityPrimaryAndScheduleCollAndChunksTask(
opCtx,
nss,
collAndChunkTask{swCollectionAndChangedChunks, maxLoaderVersion, termScheduled});
- LOG_CATALOG_REFRESH(1) << "Cache loader remotely refreshed for collection " << nss
- << " from version " << maxLoaderVersion
- << " and no metadata was found.";
+ LOG_CATALOG_REFRESH(1)
+ << "Cache loader remotely refreshed for collection " << nss << " from version "
+ << maxLoaderVersion << " and no metadata was found.";
return swCollectionAndChangedChunks;
}
@@ -651,12 +646,11 @@ void ShardServerCatalogCacheLoader::_schedulePrimaryGetChunksSince(
auto& collAndChunks = swCollectionAndChangedChunks.getValue();
if (collAndChunks.changedChunks.back().getVersion().epoch() != collAndChunks.epoch) {
- return Status{
- ErrorCodes::ConflictingOperationInProgress,
- str::stream() << "Invalid chunks found when reloading '" << nss.toString()
+ return Status{ErrorCodes::ConflictingOperationInProgress,
+ str::stream()
+ << "Invalid chunks found when reloading '" << nss.toString()
<< "' Previous collection epoch was '"
- << collAndChunks.epoch.toString()
- << "', but found a new epoch '"
+ << collAndChunks.epoch.toString() << "', but found a new epoch '"
<< collAndChunks.changedChunks.back().getVersion().epoch().toString()
<< "'. Collection was dropped and recreated."};
}
@@ -733,8 +727,8 @@ void ShardServerCatalogCacheLoader::_schedulePrimaryGetDatabase(
StringData dbName,
long long termScheduled,
std::function<void(OperationContext*, StatusWith<DatabaseType>)> callbackFn) {
- auto remoteRefreshFn = [ this, name = dbName.toString(), termScheduled ](
- OperationContext * opCtx, StatusWith<DatabaseType> swDatabaseType) {
+ auto remoteRefreshFn = [this, name = dbName.toString(), termScheduled](
+ OperationContext* opCtx, StatusWith<DatabaseType> swDatabaseType) {
if (swDatabaseType == ErrorCodes::NamespaceNotFound) {
_ensureMajorityPrimaryAndScheduleDbTask(
opCtx, name, DBTask{swDatabaseType, termScheduled});
@@ -794,11 +788,12 @@ StatusWith<CollectionAndChangedChunks> ShardServerCatalogCacheLoader::_getLoader
: ("enqueued metadata from " +
enqueued.changedChunks.front().getVersion().toString() + " to " +
enqueued.changedChunks.back().getVersion().toString()))
- << " and " << (persisted.changedChunks.empty()
- ? "no persisted metadata"
- : ("persisted metadata from " +
- persisted.changedChunks.front().getVersion().toString() + " to " +
- persisted.changedChunks.back().getVersion().toString()))
+ << " and "
+ << (persisted.changedChunks.empty()
+ ? "no persisted metadata"
+ : ("persisted metadata from " +
+ persisted.changedChunks.front().getVersion().toString() + " to " +
+ persisted.changedChunks.back().getVersion().toString()))
<< ", GTE cache version " << catalogCacheSinceVersion;
if (!tasksAreEnqueued) {
@@ -909,7 +904,7 @@ void ShardServerCatalogCacheLoader::_ensureMajorityPrimaryAndScheduleDbTask(Oper
return;
}
- _threadPool.schedule([ this, name = dbName.toString() ](auto status) {
+ _threadPool.schedule([this, name = dbName.toString()](auto status) {
invariant(status);
_runDbTasks(name);
@@ -996,7 +991,7 @@ void ShardServerCatalogCacheLoader::_runDbTasks(StringData dbName) {
}
}
- _threadPool.schedule([ this, name = dbName.toString() ](auto status) {
+ _threadPool.schedule([this, name = dbName.toString()](auto status) {
if (ErrorCodes::isCancelationError(status.code())) {
LOG(0) << "Cache loader failed to schedule a persisted metadata update"
<< " task for namespace '" << name << "' due to '" << redact(status)
@@ -1043,12 +1038,8 @@ void ShardServerCatalogCacheLoader::_updatePersistedCollAndChunksMetadata(
uassertStatusOKWithContext(
persistCollectionAndChangedChunks(opCtx, nss, *task.collectionAndChangedChunks),
str::stream() << "Failed to update the persisted chunk metadata for collection '"
- << nss.ns()
- << "' from '"
- << task.minQueryVersion.toString()
- << "' to '"
- << task.maxQueryVersion.toString()
- << "'. Will be retried.");
+ << nss.ns() << "' from '" << task.minQueryVersion.toString() << "' to '"
+ << task.maxQueryVersion.toString() << "'. Will be retried.");
LOG_CATALOG_REFRESH(1) << "Successfully updated persisted chunk metadata for collection '"
<< nss << "' from '" << task.minQueryVersion
@@ -1074,15 +1065,13 @@ void ShardServerCatalogCacheLoader::_updatePersistedDbMetadata(OperationContext*
// The database was dropped. The persisted metadata for the collection must be cleared.
uassertStatusOKWithContext(deleteDatabasesEntry(opCtx, dbName),
str::stream() << "Failed to clear persisted metadata for db '"
- << dbName.toString()
- << "'. Will be retried.");
+ << dbName.toString() << "'. Will be retried.");
return;
}
uassertStatusOKWithContext(persistDbVersion(opCtx, *task.dbType),
str::stream() << "Failed to update the persisted metadata for db '"
- << dbName.toString()
- << "'. Will be retried.");
+ << dbName.toString() << "'. Will be retried.");
LOG_CATALOG_REFRESH(1) << "Successfully updated persisted metadata for db "
<< dbName.toString();
diff --git a/src/mongo/db/s/shard_server_op_observer.cpp b/src/mongo/db/s/shard_server_op_observer.cpp
index aa1ec89d5ec..9c58f262692 100644
--- a/src/mongo/db/s/shard_server_op_observer.cpp
+++ b/src/mongo/db/s/shard_server_op_observer.cpp
@@ -59,8 +59,9 @@ bool isStandaloneOrPrimary(OperationContext* opCtx) {
auto replCoord = repl::ReplicationCoordinator::get(opCtx);
const bool isReplSet =
replCoord->getReplicationMode() == repl::ReplicationCoordinator::modeReplSet;
- return !isReplSet || (repl::ReplicationCoordinator::get(opCtx)->getMemberState() ==
- repl::MemberState::RS_PRIMARY);
+ return !isReplSet ||
+ (repl::ReplicationCoordinator::get(opCtx)->getMemberState() ==
+ repl::MemberState::RS_PRIMARY);
}
/**
diff --git a/src/mongo/db/s/sharding_initialization_mongod.cpp b/src/mongo/db/s/sharding_initialization_mongod.cpp
index c3ca8877773..a1a40e20392 100644
--- a/src/mongo/db/s/sharding_initialization_mongod.cpp
+++ b/src/mongo/db/s/sharding_initialization_mongod.cpp
@@ -97,36 +97,36 @@ public:
// Update the shard identy config string
void onConfirmedSet(const State& state) final {
- Grid::get(_serviceContext)->getExecutorPool()->getFixedExecutor()->schedule([
- serviceContext = _serviceContext,
- connStr = state.connStr
- ](Status status) {
- if (ErrorCodes::isCancelationError(status.code())) {
- LOG(2) << "Unable to schedule confirmed set update due to " << status;
- return;
- }
- uassertStatusOK(status);
-
- LOG(0) << "Updating config server with confirmed set " << connStr;
- Grid::get(serviceContext)->shardRegistry()->updateReplSetHosts(connStr);
-
- if (MONGO_FAIL_POINT(failUpdateShardIdentityConfigString)) {
- return;
- }
-
- auto configsvrConnStr =
- Grid::get(serviceContext)->shardRegistry()->getConfigServerConnectionString();
-
- // Only proceed if the notification is for the configsvr
- if (configsvrConnStr.getSetName() != connStr.getSetName()) {
- return;
- }
-
- ThreadClient tc("updateShardIdentityConfigString", serviceContext);
- auto opCtx = tc->makeOperationContext();
-
- ShardingInitializationMongoD::updateShardIdentityConfigString(opCtx.get(), connStr);
- });
+ Grid::get(_serviceContext)
+ ->getExecutorPool()
+ ->getFixedExecutor()
+ ->schedule([serviceContext = _serviceContext, connStr = state.connStr](Status status) {
+ if (ErrorCodes::isCancelationError(status.code())) {
+ LOG(2) << "Unable to schedule confirmed set update due to " << status;
+ return;
+ }
+ uassertStatusOK(status);
+
+ LOG(0) << "Updating config server with confirmed set " << connStr;
+ Grid::get(serviceContext)->shardRegistry()->updateReplSetHosts(connStr);
+
+ if (MONGO_FAIL_POINT(failUpdateShardIdentityConfigString)) {
+ return;
+ }
+
+ auto configsvrConnStr =
+ Grid::get(serviceContext)->shardRegistry()->getConfigServerConnectionString();
+
+ // Only proceed if the notification is for the configsvr
+ if (configsvrConnStr.getSetName() != connStr.getSetName()) {
+ return;
+ }
+
+ ThreadClient tc("updateShardIdentityConfigString", serviceContext);
+ auto opCtx = tc->makeOperationContext();
+
+ ShardingInitializationMongoD::updateShardIdentityConfigString(opCtx.get(), connStr);
+ });
}
void onPossibleSet(const State& state) final {
Grid::get(_serviceContext)->shardRegistry()->updateReplSetHosts(state.connStr);
@@ -373,12 +373,14 @@ void initializeGlobalShardingStateForMongoD(OperationContext* opCtx,
auto targeterFactoryPtr = targeterFactory.get();
ShardFactory::BuilderCallable setBuilder = [targeterFactoryPtr](
- const ShardId& shardId, const ConnectionString& connStr) {
+ const ShardId& shardId,
+ const ConnectionString& connStr) {
return std::make_unique<ShardRemote>(shardId, connStr, targeterFactoryPtr->create(connStr));
};
ShardFactory::BuilderCallable masterBuilder = [targeterFactoryPtr](
- const ShardId& shardId, const ConnectionString& connStr) {
+ const ShardId& shardId,
+ const ConnectionString& connStr) {
return std::make_unique<ShardRemote>(shardId, connStr, targeterFactoryPtr->create(connStr));
};
diff --git a/src/mongo/db/s/sharding_initialization_mongod_test.cpp b/src/mongo/db/s/sharding_initialization_mongod_test.cpp
index 21bbe8553ff..5a7a7868259 100644
--- a/src/mongo/db/s/sharding_initialization_mongod_test.cpp
+++ b/src/mongo/db/s/sharding_initialization_mongod_test.cpp
@@ -183,18 +183,19 @@ TEST_F(ShardingInitializationMongoDTest, InitWhilePreviouslyInErrorStateWillStay
shardIdentity.setShardName(kShardName);
shardIdentity.setClusterId(OID::gen());
- shardingInitialization()->setGlobalInitMethodForTest([](
- OperationContext* opCtx, const ShardIdentity& shardIdentity, StringData distLockProcessId) {
+ shardingInitialization()->setGlobalInitMethodForTest([](OperationContext* opCtx,
+ const ShardIdentity& shardIdentity,
+ StringData distLockProcessId) {
uasserted(ErrorCodes::ShutdownInProgress, "Not an actual shutdown");
});
shardingInitialization()->initializeFromShardIdentity(operationContext(), shardIdentity);
// ShardingState is now in error state, attempting to call it again will still result in error.
- shardingInitialization()->setGlobalInitMethodForTest([](
- OperationContext* opCtx, const ShardIdentity& shardIdentity, StringData distLockProcessId) {
- FAIL("Should not be invoked!");
- });
+ shardingInitialization()->setGlobalInitMethodForTest(
+ [](OperationContext* opCtx,
+ const ShardIdentity& shardIdentity,
+ StringData distLockProcessId) { FAIL("Should not be invoked!"); });
ASSERT_THROWS_CODE(
shardingInitialization()->initializeFromShardIdentity(operationContext(), shardIdentity),
@@ -223,10 +224,10 @@ TEST_F(ShardingInitializationMongoDTest, InitializeAgainWithMatchingShardIdentit
shardIdentity2.setShardName(kShardName);
shardIdentity2.setClusterId(clusterID);
- shardingInitialization()->setGlobalInitMethodForTest([](
- OperationContext* opCtx, const ShardIdentity& shardIdentity, StringData distLockProcessId) {
- FAIL("Should not be invoked!");
- });
+ shardingInitialization()->setGlobalInitMethodForTest(
+ [](OperationContext* opCtx,
+ const ShardIdentity& shardIdentity,
+ StringData distLockProcessId) { FAIL("Should not be invoked!"); });
shardingInitialization()->initializeFromShardIdentity(operationContext(), shardIdentity2);
@@ -256,10 +257,10 @@ TEST_F(ShardingInitializationMongoDTest, InitializeAgainWithMatchingReplSetNameS
shardIdentity2.setShardName(kShardName);
shardIdentity2.setClusterId(clusterID);
- shardingInitialization()->setGlobalInitMethodForTest([](
- OperationContext* opCtx, const ShardIdentity& shardIdentity, StringData distLockProcessId) {
- FAIL("Should not be invoked!");
- });
+ shardingInitialization()->setGlobalInitMethodForTest(
+ [](OperationContext* opCtx,
+ const ShardIdentity& shardIdentity,
+ StringData distLockProcessId) { FAIL("Should not be invoked!"); });
shardingInitialization()->initializeFromShardIdentity(operationContext(), shardIdentity2);
@@ -291,13 +292,9 @@ TEST_F(ShardingInitializationMongoDTest,
storageGlobalParams.readOnly = true;
serverGlobalParams.overrideShardIdentity =
BSON("_id"
- << "shardIdentity"
- << ShardIdentity::kShardNameFieldName
- << kShardName
- << ShardIdentity::kClusterIdFieldName
- << OID::gen()
- << ShardIdentity::kConfigsvrConnectionStringFieldName
- << "invalid");
+ << "shardIdentity" << ShardIdentity::kShardNameFieldName << kShardName
+ << ShardIdentity::kClusterIdFieldName << OID::gen()
+ << ShardIdentity::kConfigsvrConnectionStringFieldName << "invalid");
ASSERT_THROWS_CODE(
shardingInitialization()->initializeShardingAwarenessIfNeeded(operationContext()),
@@ -436,10 +433,8 @@ TEST_F(ShardingInitializationMongoDTest,
ScopedSetStandaloneMode standalone(getServiceContext());
BSONObj invalidShardIdentity = BSON("_id"
- << "shardIdentity"
- << ShardIdentity::kShardNameFieldName
- << kShardName
- << ShardIdentity::kClusterIdFieldName
+ << "shardIdentity" << ShardIdentity::kShardNameFieldName
+ << kShardName << ShardIdentity::kClusterIdFieldName
<< OID::gen()
<< ShardIdentity::kConfigsvrConnectionStringFieldName
<< "invalid");
diff --git a/src/mongo/db/s/sharding_logging.cpp b/src/mongo/db/s/sharding_logging.cpp
index 3529a42cfbd..c3d07903ceb 100644
--- a/src/mongo/db/s/sharding_logging.cpp
+++ b/src/mongo/db/s/sharding_logging.cpp
@@ -121,10 +121,10 @@ Status ShardingLogging::_log(OperationContext* opCtx,
const BSONObj& detail,
const WriteConcernOptions& writeConcern) {
Date_t now = Grid::get(opCtx)->getNetwork()->now();
- const std::string serverName = str::stream() << Grid::get(opCtx)->getNetwork()->getHostName()
- << ":" << serverGlobalParams.port;
- const std::string changeId = str::stream() << serverName << "-" << now.toString() << "-"
- << OID::gen();
+ const std::string serverName = str::stream()
+ << Grid::get(opCtx)->getNetwork()->getHostName() << ":" << serverGlobalParams.port;
+ const std::string changeId = str::stream()
+ << serverName << "-" << now.toString() << "-" << OID::gen();
ChangeLogType changeLog;
changeLog.setChangeId(changeId);
@@ -162,9 +162,9 @@ Status ShardingLogging::_createCappedConfigCollection(OperationContext* opCtx,
StringData collName,
int cappedSize,
const WriteConcernOptions& writeConcern) {
- BSONObj createCmd = BSON("create" << collName << "capped" << true << "size" << cappedSize
- << WriteConcernOptions::kWriteConcernField
- << writeConcern.toBSON());
+ BSONObj createCmd =
+ BSON("create" << collName << "capped" << true << "size" << cappedSize
+ << WriteConcernOptions::kWriteConcernField << writeConcern.toBSON());
auto result =
Grid::get(opCtx)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
diff --git a/src/mongo/db/s/shardsvr_shard_collection.cpp b/src/mongo/db/s/shardsvr_shard_collection.cpp
index e59ed3568f7..e229badedbc 100644
--- a/src/mongo/db/s/shardsvr_shard_collection.cpp
+++ b/src/mongo/db/s/shardsvr_shard_collection.cpp
@@ -124,8 +124,7 @@ void checkForExistingChunks(OperationContext* opCtx, const NamespaceString& nss)
str::stream() << "A previous attempt to shard collection " << nss.ns()
<< " failed after writing some initial chunks to config.chunks. Please "
"manually delete the partially written chunks for collection "
- << nss.ns()
- << " from config.chunks",
+ << nss.ns() << " from config.chunks",
numChunks == 0);
}
@@ -229,9 +228,7 @@ void createCollectionOrValidateExisting(OperationContext* opCtx,
bool isUnique = idx["unique"].trueValue();
uassert(ErrorCodes::InvalidOptions,
str::stream() << "can't shard collection '" << nss.ns() << "' with unique index on "
- << currentKey
- << " and proposed shard key "
- << proposedKey
+ << currentKey << " and proposed shard key " << proposedKey
<< ". Uniqueness can't be maintained unless shard key is a prefix",
!isUnique || shardKeyPattern.isUniqueIndexCompatible(currentKey));
}
@@ -249,8 +246,7 @@ void createCollectionOrValidateExisting(OperationContext* opCtx,
// per field per collection.
uassert(ErrorCodes::InvalidOptions,
str::stream() << "can't shard collection " << nss.ns()
- << " with hashed shard key "
- << proposedKey
+ << " with hashed shard key " << proposedKey
<< " because the hashed index uses a non-default seed of "
<< idx["seed"].numberInt(),
!shardKeyPattern.isHashedPattern() || idx["seed"].eoo() ||
@@ -336,9 +332,7 @@ void validateShardKeyAgainstExistingZones(OperationContext* opCtx,
BSONElement tagMaxKeyElement = tagMaxFields.next();
uassert(ErrorCodes::InvalidOptions,
str::stream() << "the min and max of the existing zone " << tag.getMinKey()
- << " -->> "
- << tag.getMaxKey()
- << " have non-matching keys",
+ << " -->> " << tag.getMaxKey() << " have non-matching keys",
tagMinKeyElement.fieldNameStringData() ==
tagMaxKeyElement.fieldNameStringData());
@@ -350,20 +344,15 @@ void validateShardKeyAgainstExistingZones(OperationContext* opCtx,
uassert(ErrorCodes::InvalidOptions,
str::stream() << "the proposed shard key " << proposedKey.toString()
<< " does not match with the shard key of the existing zone "
- << tag.getMinKey()
- << " -->> "
- << tag.getMaxKey(),
+ << tag.getMinKey() << " -->> " << tag.getMaxKey(),
match);
if (ShardKeyPattern::isHashedPatternEl(proposedKeyElement) &&
(tagMinKeyElement.type() != NumberLong || tagMaxKeyElement.type() != NumberLong)) {
uasserted(ErrorCodes::InvalidOptions,
str::stream() << "cannot do hash sharding with the proposed key "
- << proposedKey.toString()
- << " because there exists a zone "
- << tag.getMinKey()
- << " -->> "
- << tag.getMaxKey()
+ << proposedKey.toString() << " because there exists a zone "
+ << tag.getMinKey() << " -->> " << tag.getMaxKey()
<< " whose boundaries are not "
"of type NumberLong");
}
@@ -418,8 +407,7 @@ boost::optional<UUID> getUUIDFromPrimaryShard(OperationContext* opCtx, const Nam
uassert(ErrorCodes::InternalError,
str::stream() << "expected to return a UUID for collection " << nss.ns()
- << " as part of 'info' field but got "
- << res,
+ << " as part of 'info' field but got " << res,
collectionInfo.hasField("uuid"));
return uassertStatusOK(UUID::parse(collectionInfo["uuid"]));
@@ -503,8 +491,7 @@ ShardCollectionTargetState calculateTargetState(OperationContext* opCtx,
if (fromMapReduce) {
uassert(ErrorCodes::ConflictingOperationInProgress,
str::stream() << "Map reduce with sharded output to a new collection found "
- << nss.ns()
- << " to be non-empty which is not supported.",
+ << nss.ns() << " to be non-empty which is not supported.",
isEmpty);
}
@@ -704,17 +691,21 @@ UUID shardCollection(OperationContext* opCtx,
InitialSplitPolicy::ShardCollectionConfig initialChunks;
boost::optional<ShardCollectionTargetState> targetState;
- auto writeChunkDocumentsAndRefreshShards = [&](
- const ShardCollectionTargetState& targetState,
- const InitialSplitPolicy::ShardCollectionConfig& initialChunks) {
- // Insert chunk documents to config.chunks on the config server.
- writeFirstChunksToConfig(opCtx, initialChunks);
-
- updateShardingCatalogEntryForCollection(
- opCtx, nss, targetState, initialChunks, *request.getCollation(), request.getUnique());
-
- refreshAllShards(opCtx, nss, dbPrimaryShardId, initialChunks.chunks);
- };
+ auto writeChunkDocumentsAndRefreshShards =
+ [&](const ShardCollectionTargetState& targetState,
+ const InitialSplitPolicy::ShardCollectionConfig& initialChunks) {
+ // Insert chunk documents to config.chunks on the config server.
+ writeFirstChunksToConfig(opCtx, initialChunks);
+
+ updateShardingCatalogEntryForCollection(opCtx,
+ nss,
+ targetState,
+ initialChunks,
+ *request.getCollation(),
+ request.getUnique());
+
+ refreshAllShards(opCtx, nss, dbPrimaryShardId, initialChunks.chunks);
+ };
{
// From this point onward the collection can only be read, not written to, so it is safe to
diff --git a/src/mongo/db/s/split_chunk.cpp b/src/mongo/db/s/split_chunk.cpp
index be2560efcee..ec8d3e9b530 100644
--- a/src/mongo/db/s/split_chunk.cpp
+++ b/src/mongo/db/s/split_chunk.cpp
@@ -137,15 +137,14 @@ StatusWith<boost::optional<ChunkRange>> splitChunk(OperationContext* opCtx,
//
// TODO(SERVER-25086): Remove distLock acquisition from split chunk
//
- const std::string whyMessage(
- str::stream() << "splitting chunk " << chunkRange.toString() << " in " << nss.toString());
+ const std::string whyMessage(str::stream() << "splitting chunk " << chunkRange.toString()
+ << " in " << nss.toString());
auto scopedDistLock = Grid::get(opCtx)->catalogClient()->getDistLockManager()->lock(
opCtx, nss.ns(), whyMessage, DistLockManager::kDefaultLockTimeout);
if (!scopedDistLock.isOK()) {
return scopedDistLock.getStatus().withContext(
str::stream() << "could not acquire collection lock for " << nss.toString()
- << " to split chunk "
- << chunkRange.toString());
+ << " to split chunk " << chunkRange.toString());
}
// If the shard key is hashed, then we must make sure that the split points are of type
@@ -157,12 +156,11 @@ StatusWith<boost::optional<ChunkRange>> splitChunk(OperationContext* opCtx,
BSONElement splitKeyElement = it.next();
if (splitKeyElement.type() != NumberLong) {
return {ErrorCodes::CannotSplit,
- str::stream() << "splitChunk cannot split chunk "
- << chunkRange.toString()
- << ", split point "
- << splitKeyElement.toString()
- << " must be of type "
- "NumberLong for hashed shard key patterns"};
+ str::stream()
+ << "splitChunk cannot split chunk " << chunkRange.toString()
+ << ", split point " << splitKeyElement.toString()
+ << " must be of type "
+ "NumberLong for hashed shard key patterns"};
}
}
}
diff --git a/src/mongo/db/s/transaction_coordinator.cpp b/src/mongo/db/s/transaction_coordinator.cpp
index eaf91e54c06..76c51e89ca6 100644
--- a/src/mongo/db/s/transaction_coordinator.cpp
+++ b/src/mongo/db/s/transaction_coordinator.cpp
@@ -291,13 +291,13 @@ TransactionCoordinator::TransactionCoordinator(ServiceContext* serviceContext,
return txn::deleteCoordinatorDoc(*_scheduler, _lsid, _txnNumber);
})
- .onCompletion([ this, deadlineFuture = std::move(deadlineFuture) ](Status s) mutable {
+ .onCompletion([this, deadlineFuture = std::move(deadlineFuture)](Status s) mutable {
// Interrupt this coordinator's scheduler hierarchy and join the deadline task's future
// in order to guarantee that there are no more threads running within the coordinator.
_scheduler->shutdown(
{ErrorCodes::TransactionCoordinatorDeadlineTaskCanceled, "Coordinator completed"});
- return std::move(deadlineFuture).onCompletion([ this, s = std::move(s) ](Status) {
+ return std::move(deadlineFuture).onCompletion([this, s = std::move(s)](Status) {
// Notify all the listeners which are interested in the coordinator's lifecycle.
// After this call, the coordinator object could potentially get destroyed by its
// lifetime controller, so there shouldn't be any accesses to `this` after this
@@ -373,8 +373,7 @@ void TransactionCoordinator::_done(Status status) {
if (status == ErrorCodes::TransactionCoordinatorSteppingDown)
status = Status(ErrorCodes::InterruptedDueToReplStateChange,
str::stream() << "Coordinator " << _lsid.getId() << ':' << _txnNumber
- << " stopped due to: "
- << status.reason());
+ << " stopped due to: " << status.reason());
LOG(3) << "Two-phase commit for " << _lsid.getId() << ':' << _txnNumber << " completed with "
<< redact(status);
diff --git a/src/mongo/db/s/transaction_coordinator_catalog.cpp b/src/mongo/db/s/transaction_coordinator_catalog.cpp
index b45b4449838..6fa5d45226e 100644
--- a/src/mongo/db/s/transaction_coordinator_catalog.cpp
+++ b/src/mongo/db/s/transaction_coordinator_catalog.cpp
@@ -61,8 +61,8 @@ void TransactionCoordinatorCatalog::onStepDown() {
stdx::unique_lock<stdx::mutex> ul(_mutex);
std::vector<std::shared_ptr<TransactionCoordinator>> coordinatorsToCancel;
- for (auto && [ sessionId, coordinatorsForSession ] : _coordinatorsBySession) {
- for (auto && [ txnNumber, coordinator ] : coordinatorsForSession) {
+ for (auto&& [sessionId, coordinatorsForSession] : _coordinatorsBySession) {
+ for (auto&& [txnNumber, coordinator] : coordinatorsForSession) {
coordinatorsToCancel.emplace_back(coordinator);
}
}
diff --git a/src/mongo/db/s/transaction_coordinator_futures_util.cpp b/src/mongo/db/s/transaction_coordinator_futures_util.cpp
index c27e4c21eee..79128137b6b 100644
--- a/src/mongo/db/s/transaction_coordinator_futures_util.cpp
+++ b/src/mongo/db/s/transaction_coordinator_futures_util.cpp
@@ -83,8 +83,8 @@ Future<executor::TaskExecutor::ResponseStatus> AsyncWorkScheduler::scheduleRemot
// rather than going through the host targeting below. This ensures that the state changes
// for the participant and coordinator occur sequentially on a single branch of replica set
// history. See SERVER-38142 for details.
- return scheduleWork([ this, shardId, commandObj = commandObj.getOwned() ](OperationContext *
- opCtx) {
+ return scheduleWork([this, shardId, commandObj = commandObj.getOwned()](
+ OperationContext* opCtx) {
// Note: This internal authorization is tied to the lifetime of the client, which will
// be destroyed by 'scheduleWork' immediately after this lambda ends
AuthorizationSession::get(opCtx->getClient())
@@ -114,8 +114,8 @@ Future<executor::TaskExecutor::ResponseStatus> AsyncWorkScheduler::scheduleRemot
}
return _targetHostAsync(shardId, readPref)
- .then([ this, shardId, commandObj = commandObj.getOwned(), readPref ](
- HostAndShard hostAndShard) mutable {
+ .then([this, shardId, commandObj = commandObj.getOwned(), readPref](
+ HostAndShard hostAndShard) mutable {
executor::RemoteCommandRequest request(hostAndShard.hostTargeted,
NamespaceString::kAdminDb.toString(),
commandObj,
@@ -166,7 +166,7 @@ Future<executor::TaskExecutor::ResponseStatus> AsyncWorkScheduler::scheduleRemot
ul.unlock();
return std::move(pf.future).tapAll(
- [ this, it = std::move(it) ](StatusWith<ResponseStatus> s) {
+ [this, it = std::move(it)](StatusWith<ResponseStatus> s) {
stdx::lock_guard<stdx::mutex> lg(_mutex);
_activeHandles.erase(it);
_notifyAllTasksComplete(lg);
diff --git a/src/mongo/db/s/transaction_coordinator_futures_util.h b/src/mongo/db/s/transaction_coordinator_futures_util.h
index 1c654d8707f..7aef1fc8e78 100644
--- a/src/mongo/db/s/transaction_coordinator_futures_util.h
+++ b/src/mongo/db/s/transaction_coordinator_futures_util.h
@@ -115,7 +115,7 @@ public:
ul.unlock();
return std::move(pf.future).tapAll(
- [ this, it = std::move(it) ](StatusOrStatusWith<ReturnType> s) {
+ [this, it = std::move(it)](StatusOrStatusWith<ReturnType> s) {
stdx::lock_guard<stdx::mutex> lg(_mutex);
_activeHandles.erase(it);
_notifyAllTasksComplete(lg);
@@ -284,7 +284,7 @@ Future<GlobalResult> collect(std::vector<Future<IndividualResult>>&& futures,
combiner(std::move(combiner)) {}
/*****************************************************
* The first few fields have fixed values. *
- ******************************************************/
+ ******************************************************/
// Protects all state in the SharedBlock.
stdx::mutex mutex;
@@ -299,7 +299,7 @@ Future<GlobalResult> collect(std::vector<Future<IndividualResult>>&& futures,
/*****************************************************
* The below have initial values based on user input.*
- ******************************************************/
+ ******************************************************/
// The number of input futures that have not yet been resolved and processed.
size_t numOutstandingResponses;
// The variable where the intermediate results and final result is stored.
@@ -374,26 +374,25 @@ Future<FutureContinuationResult<LoopBodyFn>> doWhile(AsyncWorkScheduler& schedul
LoopBodyFn&& f) {
using ReturnType = typename decltype(f())::value_type;
auto future = f();
- return std::move(future).onCompletion([
- &scheduler,
- backoff = std::move(backoff),
- shouldRetryFn = std::forward<ShouldRetryFn>(shouldRetryFn),
- f = std::forward<LoopBodyFn>(f)
- ](StatusOrStatusWith<ReturnType> s) mutable {
- if (!shouldRetryFn(s))
- return Future<ReturnType>(std::move(s));
-
- // Retry after a delay.
- const auto delayMillis = (backoff ? backoff->nextSleep() : Milliseconds(0));
- return scheduler.scheduleWorkIn(delayMillis, [](OperationContext* opCtx) {}).then([
- &scheduler,
- backoff = std::move(backoff),
- shouldRetryFn = std::move(shouldRetryFn),
- f = std::move(f)
- ]() mutable {
- return doWhile(scheduler, std::move(backoff), std::move(shouldRetryFn), std::move(f));
+ return std::move(future).onCompletion(
+ [&scheduler,
+ backoff = std::move(backoff),
+ shouldRetryFn = std::forward<ShouldRetryFn>(shouldRetryFn),
+ f = std::forward<LoopBodyFn>(f)](StatusOrStatusWith<ReturnType> s) mutable {
+ if (!shouldRetryFn(s))
+ return Future<ReturnType>(std::move(s));
+
+ // Retry after a delay.
+ const auto delayMillis = (backoff ? backoff->nextSleep() : Milliseconds(0));
+ return scheduler.scheduleWorkIn(delayMillis, [](OperationContext* opCtx) {})
+ .then([&scheduler,
+ backoff = std::move(backoff),
+ shouldRetryFn = std::move(shouldRetryFn),
+ f = std::move(f)]() mutable {
+ return doWhile(
+ scheduler, std::move(backoff), std::move(shouldRetryFn), std::move(f));
+ });
});
- });
}
} // namespace txn
diff --git a/src/mongo/db/s/transaction_coordinator_futures_util_test.cpp b/src/mongo/db/s/transaction_coordinator_futures_util_test.cpp
index f2054c59f62..fb145b325aa 100644
--- a/src/mongo/db/s/transaction_coordinator_futures_util_test.cpp
+++ b/src/mongo/db/s/transaction_coordinator_futures_util_test.cpp
@@ -359,7 +359,7 @@ TEST_F(AsyncWorkSchedulerTest, ScheduledBlockingWorkSucceeds) {
unittest::Barrier barrier(2);
auto pf = makePromiseFuture<int>();
auto future =
- async.scheduleWork([&barrier, future = std::move(pf.future) ](OperationContext * opCtx) {
+ async.scheduleWork([&barrier, future = std::move(pf.future)](OperationContext* opCtx) {
barrier.countDownAndWait();
return future.get(opCtx);
});
@@ -377,7 +377,7 @@ TEST_F(AsyncWorkSchedulerTest, ScheduledBlockingWorkThrowsException) {
unittest::Barrier barrier(2);
auto pf = makePromiseFuture<int>();
auto future =
- async.scheduleWork([&barrier, future = std::move(pf.future) ](OperationContext * opCtx) {
+ async.scheduleWork([&barrier, future = std::move(pf.future)](OperationContext* opCtx) {
barrier.countDownAndWait();
future.get(opCtx);
uasserted(ErrorCodes::InternalError, "Test error");
@@ -396,7 +396,7 @@ TEST_F(AsyncWorkSchedulerTest, ScheduledBlockingWorkInSucceeds) {
auto pf = makePromiseFuture<int>();
auto future = async.scheduleWorkIn(
Milliseconds{10},
- [future = std::move(pf.future)](OperationContext * opCtx) { return future.get(opCtx); });
+ [future = std::move(pf.future)](OperationContext* opCtx) { return future.get(opCtx); });
pf.promise.emplaceValue(5);
ASSERT(!future.isReady());
diff --git a/src/mongo/db/s/transaction_coordinator_service.cpp b/src/mongo/db/s/transaction_coordinator_service.cpp
index dac4caee608..6be674d1ad7 100644
--- a/src/mongo/db/s/transaction_coordinator_service.cpp
+++ b/src/mongo/db/s/transaction_coordinator_service.cpp
@@ -147,7 +147,7 @@ void TransactionCoordinatorService::onStepUp(OperationContext* opCtx,
_catalogAndScheduler->scheduler
.scheduleWorkIn(
recoveryDelayForTesting,
- [catalogAndScheduler = _catalogAndScheduler](OperationContext * opCtx) {
+ [catalogAndScheduler = _catalogAndScheduler](OperationContext* opCtx) {
auto& replClientInfo = repl::ReplClientInfo::forClient(opCtx->getClient());
replClientInfo.setLastOpToSystemLastOpTime(opCtx);
diff --git a/src/mongo/db/s/transaction_coordinator_structures_test.cpp b/src/mongo/db/s/transaction_coordinator_structures_test.cpp
index f29b442559b..df1d3cc2ade 100644
--- a/src/mongo/db/s/transaction_coordinator_structures_test.cpp
+++ b/src/mongo/db/s/transaction_coordinator_structures_test.cpp
@@ -44,8 +44,7 @@ TEST(CoordinatorCommitDecisionTest, SerializeCommitHasTimestampAndNoAbortStatus)
ASSERT_BSONOBJ_EQ(BSON("decision"
<< "commit"
- << "commitTimestamp"
- << Timestamp(100, 200)),
+ << "commitTimestamp" << Timestamp(100, 200)),
obj);
}
diff --git a/src/mongo/db/s/transaction_coordinator_test.cpp b/src/mongo/db/s/transaction_coordinator_test.cpp
index 7e88a292067..ebcd839b2ab 100644
--- a/src/mongo/db/s/transaction_coordinator_test.cpp
+++ b/src/mongo/db/s/transaction_coordinator_test.cpp
@@ -192,8 +192,7 @@ auto makeDummyPrepareCommand(const LogicalSessionId& lsid, const TxnNumber& txnN
prepareCmd.setDbName(NamespaceString::kAdminDb);
auto prepareObj = prepareCmd.toBSON(
BSON("lsid" << lsid.toBSON() << "txnNumber" << txnNumber << "autocommit" << false
- << WriteConcernOptions::kWriteConcernField
- << WriteConcernOptions::Majority));
+ << WriteConcernOptions::kWriteConcernField << WriteConcernOptions::Majority));
return prepareObj;
@@ -546,17 +545,23 @@ protected:
TxnNumber txnNumber,
const std::vector<ShardId>& participants,
const boost::optional<Timestamp>& commitTimestamp) {
- txn::persistDecision(*_aws, lsid, txnNumber, participants, [&] {
- txn::CoordinatorCommitDecision decision;
- if (commitTimestamp) {
- decision.setDecision(txn::CommitDecision::kCommit);
- decision.setCommitTimestamp(commitTimestamp);
- } else {
- decision.setDecision(txn::CommitDecision::kAbort);
- decision.setAbortStatus(Status(ErrorCodes::NoSuchTransaction, "Test abort status"));
- }
- return decision;
- }()).get();
+ txn::persistDecision(*_aws,
+ lsid,
+ txnNumber,
+ participants,
+ [&] {
+ txn::CoordinatorCommitDecision decision;
+ if (commitTimestamp) {
+ decision.setDecision(txn::CommitDecision::kCommit);
+ decision.setCommitTimestamp(commitTimestamp);
+ } else {
+ decision.setDecision(txn::CommitDecision::kAbort);
+ decision.setAbortStatus(Status(ErrorCodes::NoSuchTransaction,
+ "Test abort status"));
+ }
+ return decision;
+ }())
+ .get();
auto allCoordinatorDocs = txn::readAllCoordinatorDocs(opCtx);
ASSERT_EQUALS(allCoordinatorDocs.size(), size_t(1));
@@ -733,11 +738,17 @@ TEST_F(TransactionCoordinatorDriverPersistenceTest,
// Delete the document for the first transaction and check that only the second transaction's
// document still exists.
- txn::persistDecision(*_aws, _lsid, txnNumber1, _participants, [&] {
- txn::CoordinatorCommitDecision decision(txn::CommitDecision::kAbort);
- decision.setAbortStatus(Status(ErrorCodes::NoSuchTransaction, "Test abort error"));
- return decision;
- }()).get();
+ txn::persistDecision(*_aws,
+ _lsid,
+ txnNumber1,
+ _participants,
+ [&] {
+ txn::CoordinatorCommitDecision decision(txn::CommitDecision::kAbort);
+ decision.setAbortStatus(
+ Status(ErrorCodes::NoSuchTransaction, "Test abort error"));
+ return decision;
+ }())
+ .get();
txn::deleteCoordinatorDoc(*_aws, _lsid, txnNumber1).get();
allCoordinatorDocs = txn::readAllCoordinatorDocs(operationContext());
@@ -1466,8 +1477,7 @@ TEST_F(TransactionCoordinatorMetricsTest, SimpleTwoPhaseCommitRealCoordinator) {
setGlobalFailPoint("hangBeforeWaitingForParticipantListWriteConcern",
BSON("mode"
<< "alwaysOn"
- << "data"
- << BSON("useUninterruptibleSleep" << 1)));
+ << "data" << BSON("useUninterruptibleSleep" << 1)));
coordinator.runCommit(kTwoShardIdList);
waitUntilCoordinatorDocIsPresent();
@@ -1511,8 +1521,7 @@ TEST_F(TransactionCoordinatorMetricsTest, SimpleTwoPhaseCommitRealCoordinator) {
setGlobalFailPoint("hangBeforeWaitingForDecisionWriteConcern",
BSON("mode"
<< "alwaysOn"
- << "data"
- << BSON("useUninterruptibleSleep" << 1)));
+ << "data" << BSON("useUninterruptibleSleep" << 1)));
// Respond to the second prepare request in a separate thread, because the coordinator will
// hijack that thread to run its continuation.
assertPrepareSentAndRespondWithSuccess();
@@ -1562,8 +1571,7 @@ TEST_F(TransactionCoordinatorMetricsTest, SimpleTwoPhaseCommitRealCoordinator) {
setGlobalFailPoint("hangAfterDeletingCoordinatorDoc",
BSON("mode"
<< "alwaysOn"
- << "data"
- << BSON("useUninterruptibleSleep" << 1)));
+ << "data" << BSON("useUninterruptibleSleep" << 1)));
// Respond to the second commit request in a separate thread, because the coordinator will
// hijack that thread to run its continuation.
assertCommitSentAndRespondWithSuccess();
@@ -2122,11 +2130,10 @@ TEST_F(TransactionCoordinatorMetricsTest, SlowLogLineIncludesTransactionParamete
runSimpleTwoPhaseCommitWithCommitDecisionAndCaptureLogLines();
BSONObjBuilder lsidBob;
_lsid.serialize(&lsidBob);
- ASSERT_EQUALS(
- 1,
- countLogLinesContaining(str::stream() << "parameters:{ lsid: " << lsidBob.done().toString()
- << ", txnNumber: "
- << _txnNumber));
+ ASSERT_EQUALS(1,
+ countLogLinesContaining(str::stream()
+ << "parameters:{ lsid: " << lsidBob.done().toString()
+ << ", txnNumber: " << _txnNumber));
}
TEST_F(TransactionCoordinatorMetricsTest,
diff --git a/src/mongo/db/s/transaction_coordinator_util.cpp b/src/mongo/db/s/transaction_coordinator_util.cpp
index f49da0ac61f..dbffc60de1d 100644
--- a/src/mongo/db/s/transaction_coordinator_util.cpp
+++ b/src/mongo/db/s/transaction_coordinator_util.cpp
@@ -126,8 +126,7 @@ repl::OpTime persistParticipantListBlocking(OperationContext* opCtx,
BSONObj sameParticipantList =
BSON("$and" << buildParticipantListMatchesConditions(participantList));
entry.setQ(BSON(TransactionCoordinatorDocument::kIdFieldName
- << sessionInfo.toBSON()
- << "$or"
+ << sessionInfo.toBSON() << "$or"
<< BSON_ARRAY(noParticipantList << sameParticipantList)));
// Update with participant list.
@@ -154,13 +153,9 @@ repl::OpTime persistParticipantListBlocking(OperationContext* opCtx,
QUERY(TransactionCoordinatorDocument::kIdFieldName << sessionInfo.toBSON()));
uasserted(51025,
str::stream() << "While attempting to write participant list "
- << buildParticipantListString(participantList)
- << " for "
- << lsid.getId()
- << ':'
- << txnNumber
- << ", found document with a different participant list: "
- << doc);
+ << buildParticipantListString(participantList) << " for "
+ << lsid.getId() << ':' << txnNumber
+ << ", found document with a different participant list: " << doc);
}
// Throw any other error.
@@ -223,8 +218,7 @@ Future<PrepareVoteConsensus> sendPrepare(ServiceContext* service,
prepareTransaction.setDbName(NamespaceString::kAdminDb);
auto prepareObj = prepareTransaction.toBSON(
BSON("lsid" << lsid.toBSON() << "txnNumber" << txnNumber << "autocommit" << false
- << WriteConcernOptions::kWriteConcernField
- << WriteConcernOptions::Majority));
+ << WriteConcernOptions::kWriteConcernField << WriteConcernOptions::Majority));
std::vector<Future<PrepareResponse>> responses;
@@ -245,7 +239,7 @@ Future<PrepareVoteConsensus> sendPrepare(ServiceContext* service,
// Initial value
PrepareVoteConsensus{int(participants.size())},
// Aggregates an incoming response (next) with the existing aggregate value (result)
- [&prepareScheduler = *prepareScheduler](PrepareVoteConsensus & result,
+ [&prepareScheduler = *prepareScheduler](PrepareVoteConsensus& result,
const PrepareResponse& next) {
result.registerVote(next);
@@ -300,10 +294,8 @@ repl::OpTime persistDecisionBlocking(OperationContext* opCtx,
BSON(TransactionCoordinatorDocument::kDecisionFieldName << decision.toBSON());
entry.setQ(BSON(TransactionCoordinatorDocument::kIdFieldName
- << sessionInfo.toBSON()
- << "$and"
- << buildParticipantListMatchesConditions(participantList)
- << "$or"
+ << sessionInfo.toBSON() << "$and"
+ << buildParticipantListMatchesConditions(participantList) << "$or"
<< BSON_ARRAY(noDecision << sameDecision)));
entry.setU([&] {
@@ -333,11 +325,8 @@ repl::OpTime persistDecisionBlocking(OperationContext* opCtx,
QUERY(TransactionCoordinatorDocument::kIdFieldName << sessionInfo.toBSON()));
uasserted(51026,
str::stream() << "While attempting to write decision "
- << (isCommit ? "'commit'" : "'abort'")
- << " for"
- << lsid.getId()
- << ':'
- << txnNumber
+ << (isCommit ? "'commit'" : "'abort'") << " for" << lsid.getId()
+ << ':' << txnNumber
<< ", either failed to find document for this lsid:txnNumber or "
"document existed with a different participant list, decision "
"or commitTimestamp: "
@@ -379,8 +368,7 @@ Future<void> sendCommit(ServiceContext* service,
commitTransaction.setCommitTimestamp(commitTimestamp);
auto commitObj = commitTransaction.toBSON(
BSON("lsid" << lsid.toBSON() << "txnNumber" << txnNumber << "autocommit" << false
- << WriteConcernOptions::kWriteConcernField
- << WriteConcernOptions::Majority));
+ << WriteConcernOptions::kWriteConcernField << WriteConcernOptions::Majority));
std::vector<Future<void>> responses;
for (const auto& participant : participants) {
@@ -398,8 +386,7 @@ Future<void> sendAbort(ServiceContext* service,
abortTransaction.setDbName(NamespaceString::kAdminDb);
auto abortObj = abortTransaction.toBSON(
BSON("lsid" << lsid.toBSON() << "txnNumber" << txnNumber << "autocommit" << false
- << WriteConcernOptions::kWriteConcernField
- << WriteConcernOptions::Majority));
+ << WriteConcernOptions::kWriteConcernField << WriteConcernOptions::Majority));
std::vector<Future<void>> responses;
for (const auto& participant : participants) {
@@ -529,12 +516,12 @@ Future<PrepareResponse> sendPrepareToShard(ServiceContext* service,
swPrepareResponse != ErrorCodes::TransactionCoordinatorSteppingDown &&
swPrepareResponse != ErrorCodes::TransactionCoordinatorReachedAbortDecision;
},
- [&scheduler, shardId, isLocalShard, commandObj = commandObj.getOwned() ] {
+ [&scheduler, shardId, isLocalShard, commandObj = commandObj.getOwned()] {
LOG(3) << "Coordinator going to send command " << commandObj << " to "
<< (isLocalShard ? " local " : "") << " shard " << shardId;
return scheduler.scheduleRemoteCommand(shardId, kPrimaryReadPreference, commandObj)
- .then([ shardId, commandObj = commandObj.getOwned() ](ResponseStatus response) {
+ .then([shardId, commandObj = commandObj.getOwned()](ResponseStatus response) {
auto status = getStatusFromCommandResult(response.data);
auto wcStatus = getWriteConcernStatusFromCommandResult(response.data);
@@ -621,12 +608,12 @@ Future<void> sendDecisionToShard(ServiceContext* service,
// coordinator-specific code.
return !s.isOK() && s != ErrorCodes::TransactionCoordinatorSteppingDown;
},
- [&scheduler, shardId, isLocalShard, commandObj = commandObj.getOwned() ] {
+ [&scheduler, shardId, isLocalShard, commandObj = commandObj.getOwned()] {
LOG(3) << "Coordinator going to send command " << commandObj << " to "
<< (isLocalShard ? "local" : "") << " shard " << shardId;
return scheduler.scheduleRemoteCommand(shardId, kPrimaryReadPreference, commandObj)
- .then([ shardId, commandObj = commandObj.getOwned() ](ResponseStatus response) {
+ .then([shardId, commandObj = commandObj.getOwned()](ResponseStatus response) {
auto status = getStatusFromCommandResult(response.data);
auto wcStatus = getWriteConcernStatusFromCommandResult(response.data);
diff --git a/src/mongo/db/s/txn_two_phase_commit_cmds.cpp b/src/mongo/db/s/txn_two_phase_commit_cmds.cpp
index 3cb6b8c1cbe..b48811ec994 100644
--- a/src/mongo/db/s/txn_two_phase_commit_cmds.cpp
+++ b/src/mongo/db/s/txn_two_phase_commit_cmds.cpp
@@ -132,12 +132,11 @@ public:
replClient.setLastOp(opCtx, prepareOpTime);
}
- invariant(opCtx->recoveryUnit()->getPrepareTimestamp() ==
- prepareOpTime.getTimestamp(),
- str::stream() << "recovery unit prepareTimestamp: "
- << opCtx->recoveryUnit()->getPrepareTimestamp().toString()
- << " participant prepareOpTime: "
- << prepareOpTime.toString());
+ invariant(
+ opCtx->recoveryUnit()->getPrepareTimestamp() == prepareOpTime.getTimestamp(),
+ str::stream() << "recovery unit prepareTimestamp: "
+ << opCtx->recoveryUnit()->getPrepareTimestamp().toString()
+ << " participant prepareOpTime: " << prepareOpTime.toString());
if (MONGO_FAIL_POINT(
participantReturnNetworkErrorForPrepareAfterExecutingPrepareLogic)) {
diff --git a/src/mongo/db/s/type_shard_identity_test.cpp b/src/mongo/db/s/type_shard_identity_test.cpp
index 56c2ca059de..b4999f5c6eb 100644
--- a/src/mongo/db/s/type_shard_identity_test.cpp
+++ b/src/mongo/db/s/type_shard_identity_test.cpp
@@ -46,9 +46,7 @@ TEST(ShardIdentityType, RoundTrip) {
<< "shardIdentity"
<< "shardName"
<< "s1"
- << "clusterId"
- << clusterId
- << "configsvrConnectionString"
+ << "clusterId" << clusterId << "configsvrConnectionString"
<< "test/a:123");
auto result = ShardIdentityType::fromShardIdentityDocument(doc);
@@ -67,8 +65,7 @@ TEST(ShardIdentityType, ParseMissingId) {
<< "test/a:123"
<< "shardName"
<< "s1"
- << "clusterId"
- << OID::gen());
+ << "clusterId" << OID::gen());
auto result = ShardIdentityType::fromShardIdentityDocument(doc);
ASSERT_NOT_OK(result.getStatus());
@@ -79,8 +76,7 @@ TEST(ShardIdentityType, ParseMissingConfigsvrConnString) {
<< "shardIdentity"
<< "shardName"
<< "s1"
- << "clusterId"
- << OID::gen());
+ << "clusterId" << OID::gen());
auto result = ShardIdentityType::fromShardIdentityDocument(doc);
ASSERT_NOT_OK(result.getStatus());
@@ -91,8 +87,7 @@ TEST(ShardIdentityType, ParseMissingShardName) {
<< "shardIdentity"
<< "configsvrConnectionString"
<< "test/a:123"
- << "clusterId"
- << OID::gen());
+ << "clusterId" << OID::gen());
auto result = ShardIdentityType::fromShardIdentityDocument(doc);
ASSERT_NOT_OK(result.getStatus());
@@ -118,8 +113,7 @@ TEST(ShardIdentityType, InvalidConnectionString) {
<< "test/,,,"
<< "shardName"
<< "s1"
- << "clusterId"
- << clusterId);
+ << "clusterId" << clusterId);
ASSERT_EQ(ErrorCodes::FailedToParse,
ShardIdentityType::fromShardIdentityDocument(doc).getStatus());
@@ -133,8 +127,7 @@ TEST(ShardIdentityType, NonReplSetConnectionString) {
<< "local:123"
<< "shardName"
<< "s1"
- << "clusterId"
- << clusterId);
+ << "clusterId" << clusterId);
ASSERT_EQ(ErrorCodes::UnsupportedFormat,
ShardIdentityType::fromShardIdentityDocument(doc).getStatus());
@@ -147,5 +140,5 @@ TEST(ShardIdentityType, CreateUpdateObject) {
ASSERT_BSONOBJ_EQ(expectedObj, updateObj);
}
+} // namespace
} // namespace mongo
-} // unnamed namespace
diff --git a/src/mongo/db/s/wait_for_ongoing_chunk_splits_command.cpp b/src/mongo/db/s/wait_for_ongoing_chunk_splits_command.cpp
index 1ff67ff3257..d1ceaaeeba6 100644
--- a/src/mongo/db/s/wait_for_ongoing_chunk_splits_command.cpp
+++ b/src/mongo/db/s/wait_for_ongoing_chunk_splits_command.cpp
@@ -90,5 +90,5 @@ MONGO_INITIALIZER(RegisterWaitForOngoingChunkSplitsCommand)(InitializerContext*
}
return Status::OK();
}
-}
-}
+} // namespace
+} // namespace mongo