summaryrefslogtreecommitdiff
path: root/src/mongo/s
diff options
context:
space:
mode:
authorclang-format-7.0.1 <adam.martin@10gen.com>2019-07-26 18:42:24 -0400
committerADAM David Alan Martin <adam.martin@10gen.com>2019-07-26 18:42:24 -0400
commitc1a45ebbb0530e3d0201321d725527f1eb83ffce (patch)
treef523079dc5ded3052eefbdcaae424b7502df5b25 /src/mongo/s
parentc9599d8610c3da0b7c3da65667aff821063cf5b9 (diff)
downloadmongo-c1a45ebbb0530e3d0201321d725527f1eb83ffce.tar.gz
Apply formatting per `clang-format-7.0.1`
Diffstat (limited to 'src/mongo/s')
-rw-r--r--src/mongo/s/async_requests_sender.cpp7
-rw-r--r--src/mongo/s/balancer_configuration_test.cpp3
-rw-r--r--src/mongo/s/catalog/dist_lock_catalog_impl.cpp27
-rw-r--r--src/mongo/s/catalog/dist_lock_catalog_mock.cpp37
-rw-r--r--src/mongo/s/catalog/dist_lock_catalog_mock.h2
-rw-r--r--src/mongo/s/catalog/dist_lock_manager_mock.cpp8
-rw-r--r--src/mongo/s/catalog/dist_lock_ping_info.cpp2
-rw-r--r--src/mongo/s/catalog/dist_lock_ping_info.h2
-rw-r--r--src/mongo/s/catalog/mongo_version_range.cpp2
-rw-r--r--src/mongo/s/catalog/mongo_version_range.h2
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_impl.cpp36
-rw-r--r--src/mongo/s/catalog/sharding_catalog_test.cpp74
-rw-r--r--src/mongo/s/catalog/type_changelog_test.cpp51
-rw-r--r--src/mongo/s/catalog/type_chunk.cpp15
-rw-r--r--src/mongo/s/catalog/type_chunk_test.cpp123
-rw-r--r--src/mongo/s/catalog/type_collection_test.cpp115
-rw-r--r--src/mongo/s/catalog/type_config_version_test.cpp8
-rw-r--r--src/mongo/s/catalog/type_database.cpp8
-rw-r--r--src/mongo/s/catalog/type_database_test.cpp3
-rw-r--r--src/mongo/s/catalog/type_locks_test.cpp94
-rw-r--r--src/mongo/s/catalog/type_mongos_test.cpp80
-rw-r--r--src/mongo/s/catalog/type_shard_database.cpp8
-rw-r--r--src/mongo/s/catalog/type_shard_test.cpp11
-rw-r--r--src/mongo/s/catalog/type_tags_test.cpp14
-rw-r--r--src/mongo/s/catalog_cache.cpp22
-rw-r--r--src/mongo/s/chunk.cpp3
-rw-r--r--src/mongo/s/chunk_manager.cpp6
-rw-r--r--src/mongo/s/chunk_manager_index_bounds_test.cpp3
-rw-r--r--src/mongo/s/client/parallel.cpp31
-rw-r--r--src/mongo/s/client/shard.h20
-rw-r--r--src/mongo/s/client/shard_registry.cpp4
-rw-r--r--src/mongo/s/client/shard_remote.cpp1
-rw-r--r--src/mongo/s/client/shard_remote.h8
-rw-r--r--src/mongo/s/client/sharding_connection_hook.cpp4
-rw-r--r--src/mongo/s/client/version_manager.cpp37
-rw-r--r--src/mongo/s/cluster_commands_helpers.cpp10
-rw-r--r--src/mongo/s/commands/cluster_count_cmd.cpp6
-rw-r--r--src/mongo/s/commands/cluster_data_size_cmd.cpp7
-rw-r--r--src/mongo/s/commands/cluster_explain.cpp19
-rw-r--r--src/mongo/s/commands/cluster_explain_cmd.cpp3
-rw-r--r--src/mongo/s/commands/cluster_find_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_find_test.cpp3
-rw-r--r--src/mongo/s/commands/cluster_kill_op.cpp4
-rw-r--r--src/mongo/s/commands/cluster_map_reduce_cmd.cpp20
-rw-r--r--src/mongo/s/commands/cluster_merge_chunks_cmd.cpp8
-rw-r--r--src/mongo/s/commands/cluster_move_chunk_cmd.cpp15
-rw-r--r--src/mongo/s/commands/cluster_split_cmd.cpp20
-rw-r--r--src/mongo/s/commands/commands_public.cpp4
-rw-r--r--src/mongo/s/commands/strategy.cpp12
-rw-r--r--src/mongo/s/grid.cpp5
-rw-r--r--src/mongo/s/mongos_options.h2
-rw-r--r--src/mongo/s/query/async_results_merger.cpp6
-rw-r--r--src/mongo/s/query/async_results_merger_test.cpp85
-rw-r--r--src/mongo/s/query/blocking_results_merger_test.cpp1
-rw-r--r--src/mongo/s/query/cluster_aggregate.cpp7
-rw-r--r--src/mongo/s/query/cluster_aggregation_planner.cpp3
-rw-r--r--src/mongo/s/query/cluster_client_cursor_params.h2
-rw-r--r--src/mongo/s/query/cluster_cursor_manager.cpp11
-rw-r--r--src/mongo/s/query/cluster_cursor_manager.h2
-rw-r--r--src/mongo/s/query/cluster_find.cpp37
-rw-r--r--src/mongo/s/query/router_stage_pipeline.cpp3
-rw-r--r--src/mongo/s/query/router_stage_remove_metadata_fields_test.cpp5
-rw-r--r--src/mongo/s/query/store_possible_cursor.h2
-rw-r--r--src/mongo/s/request_types/add_shard_request_test.cpp51
-rw-r--r--src/mongo/s/request_types/add_shard_to_zone_request_test.cpp15
-rw-r--r--src/mongo/s/request_types/balance_chunk_request_test.cpp42
-rw-r--r--src/mongo/s/request_types/merge_chunk_request_test.cpp116
-rw-r--r--src/mongo/s/request_types/migration_secondary_throttle_options_test.cpp5
-rw-r--r--src/mongo/s/request_types/remove_shard_from_zone_request_test.cpp15
-rw-r--r--src/mongo/s/request_types/set_shard_version_request_test.cpp256
-rw-r--r--src/mongo/s/request_types/split_chunk_request_test.cpp257
-rw-r--r--src/mongo/s/request_types/split_chunk_request_type.cpp4
-rw-r--r--src/mongo/s/request_types/update_zone_key_range_request_type.cpp5
-rw-r--r--src/mongo/s/server.cpp2
-rw-r--r--src/mongo/s/shard_key_pattern.cpp8
-rw-r--r--src/mongo/s/shard_key_pattern_test.cpp12
-rw-r--r--src/mongo/s/shard_util.cpp14
-rw-r--r--src/mongo/s/sharding_egress_metadata_hook.cpp4
-rw-r--r--src/mongo/s/sharding_initialization.h2
-rw-r--r--src/mongo/s/sharding_mongod_test_fixture.cpp5
-rw-r--r--src/mongo/s/sharding_router_test_fixture.cpp6
-rw-r--r--src/mongo/s/sharding_task_executor.cpp9
-rw-r--r--src/mongo/s/sharding_task_executor_pool_controller.cpp2
-rw-r--r--src/mongo/s/transaction_router.cpp45
-rw-r--r--src/mongo/s/transaction_router.h271
-rw-r--r--src/mongo/s/transaction_router_test.cpp179
-rw-r--r--src/mongo/s/write_ops/batch_downconvert.cpp11
-rw-r--r--src/mongo/s/write_ops/batch_downconvert_test.cpp13
-rw-r--r--src/mongo/s/write_ops/batch_write_exec.cpp13
-rw-r--r--src/mongo/s/write_ops/batch_write_op.cpp22
-rw-r--r--src/mongo/s/write_ops/batched_command_request_test.cpp20
-rw-r--r--src/mongo/s/write_ops/batched_command_response.cpp6
-rw-r--r--src/mongo/s/write_ops/batched_command_response_test.cpp14
-rw-r--r--src/mongo/s/write_ops/chunk_manager_targeter.cpp24
94 files changed, 1029 insertions, 1594 deletions
diff --git a/src/mongo/s/async_requests_sender.cpp b/src/mongo/s/async_requests_sender.cpp
index 609b8db39fb..7ecfe36313e 100644
--- a/src/mongo/s/async_requests_sender.cpp
+++ b/src/mongo/s/async_requests_sender.cpp
@@ -186,7 +186,7 @@ auto AsyncRequestsSender::RemoteData::scheduleRemoteCommand(std::vector<HostAndP
// We have to make a promise future pair because the TaskExecutor doesn't currently support a
// future returning variant of scheduleRemoteCommand
- auto[p, f] = makePromiseFuture<RemoteCommandOnAnyCallbackArgs>();
+ auto [p, f] = makePromiseFuture<RemoteCommandOnAnyCallbackArgs>();
// Failures to schedule skip the retry loop
uassertStatusOK(_ars->_subExecutor->scheduleRemoteCommandOnAny(
@@ -242,8 +242,9 @@ auto AsyncRequestsSender::RemoteData::handleResponse(RemoteCommandOnAnyCallbackA
_retryCount < kMaxNumFailedHostRetryAttempts) {
LOG(1) << "Command to remote " << _shardId
- << (failedTargets.empty() ? " " : (failedTargets.size() > 1 ? " for hosts "
- : " at host "))
+ << (failedTargets.empty()
+ ? " "
+ : (failedTargets.size() > 1 ? " for hosts " : " at host "))
<< "{}"_format(fmt::join(failedTargets, ", "))
<< "failed with retriable error and will be retried "
<< causedBy(redact(status));
diff --git a/src/mongo/s/balancer_configuration_test.cpp b/src/mongo/s/balancer_configuration_test.cpp
index b456aa29039..2081f9ec8b3 100644
--- a/src/mongo/s/balancer_configuration_test.cpp
+++ b/src/mongo/s/balancer_configuration_test.cpp
@@ -310,8 +310,7 @@ TEST(BalancerSettingsType, InvalidBalancingWindowTimeFormat) {
ASSERT_NOT_OK(BalancerSettingsType::fromBSON(BSON("activeWindow" << BSON("start"
<< "23:00"
- << "stop"
- << 6LL)))
+ << "stop" << 6LL)))
.getStatus());
}
diff --git a/src/mongo/s/catalog/dist_lock_catalog_impl.cpp b/src/mongo/s/catalog/dist_lock_catalog_impl.cpp
index 7a4b6e1564a..d8574212532 100644
--- a/src/mongo/s/catalog/dist_lock_catalog_impl.cpp
+++ b/src/mongo/s/catalog/dist_lock_catalog_impl.cpp
@@ -93,8 +93,7 @@ StatusWith<BSONObj> extractFindAndModifyNewObj(StatusWith<Shard::CommandResponse
return {ErrorCodes::UnsupportedFormat,
str::stream() << "expected an object from the findAndModify response '"
<< kFindAndModifyResponseResultDocField
- << "'field, got: "
- << newDocElem};
+ << "'field, got: " << newDocElem};
}
return newDocElem.Obj().getOwned();
@@ -220,14 +219,10 @@ StatusWith<LocksType> DistLockCatalogImpl::grabLock(OperationContext* opCtx,
Date_t time,
StringData why,
const WriteConcernOptions& writeConcern) {
- BSONObj newLockDetails(BSON(
- LocksType::lockID(lockSessionID) << LocksType::state(LocksType::LOCKED) << LocksType::who()
- << who
- << LocksType::process()
- << processId
- << LocksType::when(time)
- << LocksType::why()
- << why));
+ BSONObj newLockDetails(BSON(LocksType::lockID(lockSessionID)
+ << LocksType::state(LocksType::LOCKED) << LocksType::who() << who
+ << LocksType::process() << processId << LocksType::when(time)
+ << LocksType::why() << why));
auto request = FindAndModifyRequest::makeUpdate(
_locksNS,
@@ -281,14 +276,10 @@ StatusWith<LocksType> DistLockCatalogImpl::overtakeLock(OperationContext* opCtx,
BSON(LocksType::name() << lockID << LocksType::state(LocksType::UNLOCKED)));
orQueryBuilder.append(BSON(LocksType::name() << lockID << LocksType::lockID(currentHolderTS)));
- BSONObj newLockDetails(BSON(
- LocksType::lockID(lockSessionID) << LocksType::state(LocksType::LOCKED) << LocksType::who()
- << who
- << LocksType::process()
- << processId
- << LocksType::when(time)
- << LocksType::why()
- << why));
+ BSONObj newLockDetails(BSON(LocksType::lockID(lockSessionID)
+ << LocksType::state(LocksType::LOCKED) << LocksType::who() << who
+ << LocksType::process() << processId << LocksType::when(time)
+ << LocksType::why() << why));
auto request = FindAndModifyRequest::makeUpdate(
_locksNS, BSON("$or" << orQueryBuilder.arr()), BSON("$set" << newLockDetails));
diff --git a/src/mongo/s/catalog/dist_lock_catalog_mock.cpp b/src/mongo/s/catalog/dist_lock_catalog_mock.cpp
index f2eca5abcf7..5dae286da5a 100644
--- a/src/mongo/s/catalog/dist_lock_catalog_mock.cpp
+++ b/src/mongo/s/catalog/dist_lock_catalog_mock.cpp
@@ -53,14 +53,8 @@ void noGrabLockFuncSet(StringData lockID,
Date_t time,
StringData why) {
FAIL(str::stream() << "grabLock not expected to be called. "
- << "lockID: "
- << lockID
- << ", who: "
- << who
- << ", processId: "
- << processId
- << ", why: "
- << why);
+ << "lockID: " << lockID << ", who: " << who << ", processId: " << processId
+ << ", why: " << why);
}
void noOvertakeLockFuncSet(StringData lockID,
@@ -71,22 +65,13 @@ void noOvertakeLockFuncSet(StringData lockID,
Date_t time,
StringData why) {
FAIL(str::stream() << "overtakeLock not expected to be called. "
- << "lockID: "
- << lockID
- << ", currentHolderTS: "
- << currentHolderTS
- << ", who: "
- << who
- << ", processId: "
- << processId
- << ", why: "
- << why);
+ << "lockID: " << lockID << ", currentHolderTS: " << currentHolderTS
+ << ", who: " << who << ", processId: " << processId << ", why: " << why);
}
void noUnLockFuncSet(const OID& lockSessionID) {
FAIL(str::stream() << "unlock not expected to be called. "
- << "lockSessionID: "
- << lockSessionID);
+ << "lockSessionID: " << lockSessionID);
}
void noPingFuncSet(StringData processID, Date_t ping) {
@@ -95,26 +80,22 @@ void noPingFuncSet(StringData processID, Date_t ping) {
void noStopPingFuncSet(StringData processID) {
FAIL(str::stream() << "stopPing not expected to be called. "
- << "processID: "
- << processID);
+ << "processID: " << processID);
}
void noGetLockByTSSet(const OID& lockSessionID) {
FAIL(str::stream() << "getLockByTS not expected to be called. "
- << "lockSessionID: "
- << lockSessionID);
+ << "lockSessionID: " << lockSessionID);
}
void noGetLockByNameSet(StringData name) {
FAIL(str::stream() << "getLockByName not expected to be called. "
- << "lockName: "
- << name);
+ << "lockName: " << name);
}
void noGetPingSet(StringData processId) {
FAIL(str::stream() << "getPing not expected to be called. "
- << "lockName: "
- << processId);
+ << "lockName: " << processId);
}
void noGetServerInfoSet() {
diff --git a/src/mongo/s/catalog/dist_lock_catalog_mock.h b/src/mongo/s/catalog/dist_lock_catalog_mock.h
index d8b9a5a42bc..d407a9c523f 100644
--- a/src/mongo/s/catalog/dist_lock_catalog_mock.h
+++ b/src/mongo/s/catalog/dist_lock_catalog_mock.h
@@ -219,4 +219,4 @@ private:
GetServerInfoFunc _getServerInfoChecker;
StatusWith<DistLockCatalog::ServerInfo> _getServerInfoReturnValue;
};
-}
+} // namespace mongo
diff --git a/src/mongo/s/catalog/dist_lock_manager_mock.cpp b/src/mongo/s/catalog/dist_lock_manager_mock.cpp
index ceb0611669b..6a17de30fad 100644
--- a/src/mongo/s/catalog/dist_lock_manager_mock.cpp
+++ b/src/mongo/s/catalog/dist_lock_manager_mock.cpp
@@ -45,12 +45,8 @@ namespace {
void NoLockFuncSet(StringData name, StringData whyMessage, Milliseconds waitFor) {
FAIL(str::stream() << "Lock not expected to be called. "
- << "Name: "
- << name
- << ", whyMessage: "
- << whyMessage
- << ", waitFor: "
- << waitFor);
+ << "Name: " << name << ", whyMessage: " << whyMessage
+ << ", waitFor: " << waitFor);
}
} // namespace
diff --git a/src/mongo/s/catalog/dist_lock_ping_info.cpp b/src/mongo/s/catalog/dist_lock_ping_info.cpp
index c0643c1fa12..2549e55bb19 100644
--- a/src/mongo/s/catalog/dist_lock_ping_info.cpp
+++ b/src/mongo/s/catalog/dist_lock_ping_info.cpp
@@ -42,4 +42,4 @@ DistLockPingInfo::DistLockPingInfo(
configLocalTime(remoteArg),
lockSessionId(std::move(tsArg)),
electionId(std::move(electionIdArg)) {}
-}
+} // namespace mongo
diff --git a/src/mongo/s/catalog/dist_lock_ping_info.h b/src/mongo/s/catalog/dist_lock_ping_info.h
index e3db046db20..6e236fb5133 100644
--- a/src/mongo/s/catalog/dist_lock_ping_info.h
+++ b/src/mongo/s/catalog/dist_lock_ping_info.h
@@ -64,4 +64,4 @@ struct DistLockPingInfo {
// Note: unused by legacy dist lock.
OID electionId;
};
-}
+} // namespace mongo
diff --git a/src/mongo/s/catalog/mongo_version_range.cpp b/src/mongo/s/catalog/mongo_version_range.cpp
index c92fcb0b749..a0c5f505817 100644
--- a/src/mongo/s/catalog/mongo_version_range.cpp
+++ b/src/mongo/s/catalog/mongo_version_range.cpp
@@ -148,4 +148,4 @@ bool isInMongoVersionRanges(StringData version, const vector<MongoVersionRange>&
return false;
}
-}
+} // namespace mongo
diff --git a/src/mongo/s/catalog/mongo_version_range.h b/src/mongo/s/catalog/mongo_version_range.h
index 5e8d79807a7..f995864a689 100644
--- a/src/mongo/s/catalog/mongo_version_range.h
+++ b/src/mongo/s/catalog/mongo_version_range.h
@@ -60,4 +60,4 @@ struct MongoVersionRange {
};
bool isInMongoVersionRanges(StringData version, const std::vector<MongoVersionRange>& ranges);
-}
+} // namespace mongo
diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
index b64f17d968c..a26142eb958 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
@@ -259,14 +259,14 @@ StatusWith<repl::OpTimeWith<std::vector<DatabaseType>>> ShardingCatalogClientImp
for (const BSONObj& doc : findStatus.getValue().value) {
auto dbRes = DatabaseType::fromBSON(doc);
if (!dbRes.isOK()) {
- return dbRes.getStatus().withContext(stream() << "Failed to parse database document "
- << doc);
+ return dbRes.getStatus().withContext(stream()
+ << "Failed to parse database document " << doc);
}
Status validateStatus = dbRes.getValue().validate();
if (!validateStatus.isOK()) {
- return validateStatus.withContext(stream() << "Failed to validate database document "
- << doc);
+ return validateStatus.withContext(stream()
+ << "Failed to validate database document " << doc);
}
databases.push_back(dbRes.getValue());
@@ -376,9 +376,7 @@ StatusWith<std::vector<CollectionType>> ShardingCatalogClientImpl::getCollection
if (!collectionResult.isOK()) {
return {ErrorCodes::FailedToParse,
str::stream() << "error while parsing " << CollectionType::ConfigNS.ns()
- << " document: "
- << obj
- << " : "
+ << " document: " << obj << " : "
<< collectionResult.getStatus().toString()};
}
@@ -590,14 +588,14 @@ StatusWith<repl::OpTimeWith<std::vector<ShardType>>> ShardingCatalogClientImpl::
for (const BSONObj& doc : findStatus.getValue().value) {
auto shardRes = ShardType::fromBSON(doc);
if (!shardRes.isOK()) {
- return shardRes.getStatus().withContext(stream() << "Failed to parse shard document "
- << doc);
+ return shardRes.getStatus().withContext(stream()
+ << "Failed to parse shard document " << doc);
}
Status validateStatus = shardRes.getValue().validate();
if (!validateStatus.isOK()) {
- return validateStatus.withContext(stream() << "Failed to validate shard document "
- << doc);
+ return validateStatus.withContext(stream()
+ << "Failed to validate shard document " << doc);
}
shards.push_back(shardRes.getValue());
@@ -713,9 +711,9 @@ Status ShardingCatalogClientImpl::applyChunkOpsDeprecated(OperationContext* opCt
invariant(serverGlobalParams.clusterRole == ClusterRole::ConfigServer ||
(readConcern == repl::ReadConcernLevel::kMajorityReadConcern &&
writeConcern.wMode == WriteConcernOptions::kMajority));
- BSONObj cmd = BSON("applyOps" << updateOps << "preCondition" << preCondition
- << WriteConcernOptions::kWriteConcernField
- << writeConcern.toBSON());
+ BSONObj cmd =
+ BSON("applyOps" << updateOps << "preCondition" << preCondition
+ << WriteConcernOptions::kWriteConcernField << writeConcern.toBSON());
auto response =
Grid::get(opCtx)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
@@ -772,11 +770,11 @@ Status ShardingCatalogClientImpl::applyChunkOpsDeprecated(OperationContext* opCt
const auto& newestChunk = chunkWithStatus.getValue();
if (newestChunk.empty()) {
- errMsg = str::stream() << "chunk operation commit failed: version "
- << lastChunkVersion.toString()
- << " doesn't exist in namespace: " << nss.ns()
- << ". Unable to save chunk ops. Command: " << cmd
- << ". Result: " << response.getValue().response;
+ errMsg = str::stream()
+ << "chunk operation commit failed: version " << lastChunkVersion.toString()
+ << " doesn't exist in namespace: " << nss.ns()
+ << ". Unable to save chunk ops. Command: " << cmd
+ << ". Result: " << response.getValue().response;
return status.withContext(errMsg);
};
diff --git a/src/mongo/s/catalog/sharding_catalog_test.cpp b/src/mongo/s/catalog/sharding_catalog_test.cpp
index 10bd2e3afe8..f0a3ec47636 100644
--- a/src/mongo/s/catalog/sharding_catalog_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_test.cpp
@@ -67,8 +67,8 @@ using executor::NetworkInterfaceMock;
using executor::RemoteCommandRequest;
using executor::RemoteCommandResponse;
using executor::TaskExecutor;
-using rpc::ReplSetMetadata;
using repl::OpTime;
+using rpc::ReplSetMetadata;
using std::vector;
using unittest::assertGet;
@@ -101,7 +101,6 @@ TEST_F(ShardingCatalogClientTest, GetCollectionExisting) {
onFindWithMetadataCommand(
[this, &expectedColl, newOpTime](const RemoteCommandRequest& request) {
-
ASSERT_BSONOBJ_EQ(getReplSecondaryOkMetadata(),
rpc::TrackingMetadata::removeTrackingData(request.metadata));
@@ -597,10 +596,8 @@ TEST_F(ShardingCatalogClientTest, RunUserManagementWriteCommandSuccess) {
<< "writeConcern"
<< BSON("w"
<< "majority"
- << "wtimeout"
- << 0)
- << "maxTimeMS"
- << 30000),
+ << "wtimeout" << 0)
+ << "maxTimeMS" << 30000),
request.cmdObj);
ASSERT_BSONOBJ_EQ(BSON(rpc::kReplSetMetadataFieldName << 1),
@@ -620,14 +617,14 @@ TEST_F(ShardingCatalogClientTest, RunUserManagementWriteCommandInvalidWriteConce
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
BSONObjBuilder responseBuilder;
- bool ok = catalogClient()->runUserManagementWriteCommand(operationContext(),
- "dropUser",
- "test",
- BSON("dropUser"
- << "test"
- << "writeConcern"
- << BSON("w" << 2)),
- &responseBuilder);
+ bool ok =
+ catalogClient()->runUserManagementWriteCommand(operationContext(),
+ "dropUser",
+ "test",
+ BSON("dropUser"
+ << "test"
+ << "writeConcern" << BSON("w" << 2)),
+ &responseBuilder);
ASSERT_FALSE(ok);
Status commandStatus = getStatusFromCommandResult(responseBuilder.obj());
@@ -646,22 +643,23 @@ TEST_F(ShardingCatalogClientTest, RunUserManagementWriteCommandRewriteWriteConce
},
Status::OK());
- auto future = launchAsync([this] {
- BSONObjBuilder responseBuilder;
- bool ok = catalogClient()->runUserManagementWriteCommand(operationContext(),
- "dropUser",
- "test",
- BSON("dropUser"
- << "test"
- << "writeConcern"
- << BSON("w" << 1 << "wtimeout"
- << 30)),
- &responseBuilder);
- ASSERT_FALSE(ok);
-
- Status commandStatus = getStatusFromCommandResult(responseBuilder.obj());
- ASSERT_EQUALS(ErrorCodes::UserNotFound, commandStatus);
- });
+ auto future =
+ launchAsync([this] {
+ BSONObjBuilder responseBuilder;
+ bool ok =
+ catalogClient()->runUserManagementWriteCommand(
+ operationContext(),
+ "dropUser",
+ "test",
+ BSON("dropUser"
+ << "test"
+ << "writeConcern" << BSON("w" << 1 << "wtimeout" << 30)),
+ &responseBuilder);
+ ASSERT_FALSE(ok);
+
+ Status commandStatus = getStatusFromCommandResult(responseBuilder.obj());
+ ASSERT_EQUALS(ErrorCodes::UserNotFound, commandStatus);
+ });
onCommand([](const RemoteCommandRequest& request) {
ASSERT_EQUALS("test", request.dbname);
@@ -670,10 +668,8 @@ TEST_F(ShardingCatalogClientTest, RunUserManagementWriteCommandRewriteWriteConce
<< "writeConcern"
<< BSON("w"
<< "majority"
- << "wtimeout"
- << 30)
- << "maxTimeMS"
- << 30000),
+ << "wtimeout" << 30)
+ << "maxTimeMS" << 30000),
request.cmdObj);
ASSERT_BSONOBJ_EQ(BSON(rpc::kReplSetMetadataFieldName << 1),
@@ -761,10 +757,8 @@ TEST_F(ShardingCatalogClientTest, RunUserManagementWriteCommandNotMasterRetrySuc
<< "writeConcern"
<< BSON("w"
<< "majority"
- << "wtimeout"
- << 0)
- << "maxTimeMS"
- << 30000),
+ << "wtimeout" << 0)
+ << "maxTimeMS" << 30000),
request.cmdObj);
ASSERT_BSONOBJ_EQ(BSON(rpc::kReplSetMetadataFieldName << 1),
@@ -799,7 +793,6 @@ TEST_F(ShardingCatalogClientTest, GetCollectionsValidResultsNoDb) {
const OpTime newOpTime(Timestamp(7, 6), 5);
auto future = launchAsync([this, newOpTime] {
-
OpTime opTime;
const auto& collections =
assertGet(catalogClient()->getCollections(operationContext(), nullptr, &opTime));
@@ -1200,8 +1193,7 @@ TEST_F(ShardingCatalogClientTest, ApplyChunkOpsDeprecatedSuccessful) {
ASSERT_EQUALS("config", request.dbname);
ASSERT_BSONOBJ_EQ(BSON("w"
<< "majority"
- << "wtimeout"
- << 60000),
+ << "wtimeout" << 60000),
request.cmdObj["writeConcern"].Obj());
ASSERT_BSONOBJ_EQ(BSON(rpc::kReplSetMetadataFieldName << 1),
rpc::TrackingMetadata::removeTrackingData(request.metadata));
diff --git a/src/mongo/s/catalog/type_changelog_test.cpp b/src/mongo/s/catalog/type_changelog_test.cpp
index b2a2b522299..3142901d06a 100644
--- a/src/mongo/s/catalog/type_changelog_test.cpp
+++ b/src/mongo/s/catalog/type_changelog_test.cpp
@@ -46,12 +46,10 @@ TEST(ChangeLogType, Empty) {
TEST(ChangeLogType, Valid) {
BSONObj obj = BSON(ChangeLogType::changeId("host.local-2012-11-21T19:14:10-8")
- << ChangeLogType::server("host.local")
- << ChangeLogType::shard("shardname")
+ << ChangeLogType::server("host.local") << ChangeLogType::shard("shardname")
<< ChangeLogType::clientAddr("192.168.0.189:51128")
<< ChangeLogType::time(Date_t::fromMillisSinceEpoch(1))
- << ChangeLogType::what("split")
- << ChangeLogType::ns("test.test")
+ << ChangeLogType::what("split") << ChangeLogType::ns("test.test")
<< ChangeLogType::details(BSON("dummy"
<< "info")));
@@ -77,8 +75,7 @@ TEST(ChangeLogType, MissingChangeId) {
<< ChangeLogType::shard("shardname")
<< ChangeLogType::clientAddr("192.168.0.189:51128")
<< ChangeLogType::time(Date_t::fromMillisSinceEpoch(1))
- << ChangeLogType::what("split")
- << ChangeLogType::ns("test.test")
+ << ChangeLogType::what("split") << ChangeLogType::ns("test.test")
<< ChangeLogType::details(BSON("dummy"
<< "info")));
@@ -91,8 +88,7 @@ TEST(ChangeLogType, MissingServer) {
<< ChangeLogType::shard("shardname")
<< ChangeLogType::clientAddr("192.168.0.189:51128")
<< ChangeLogType::time(Date_t::fromMillisSinceEpoch(1))
- << ChangeLogType::what("split")
- << ChangeLogType::ns("test.test")
+ << ChangeLogType::what("split") << ChangeLogType::ns("test.test")
<< ChangeLogType::details(BSON("dummy"
<< "info")));
@@ -102,11 +98,9 @@ TEST(ChangeLogType, MissingServer) {
TEST(ChangeLogType, MissingClientAddr) {
BSONObj obj = BSON(ChangeLogType::changeId("host.local-2012-11-21T19:14:10-8")
- << ChangeLogType::server("host.local")
- << ChangeLogType::shard("shardname")
+ << ChangeLogType::server("host.local") << ChangeLogType::shard("shardname")
<< ChangeLogType::time(Date_t::fromMillisSinceEpoch(1))
- << ChangeLogType::what("split")
- << ChangeLogType::ns("test.test")
+ << ChangeLogType::what("split") << ChangeLogType::ns("test.test")
<< ChangeLogType::details(BSON("dummy"
<< "info")));
@@ -116,11 +110,9 @@ TEST(ChangeLogType, MissingClientAddr) {
TEST(ChangeLogType, MissingTime) {
BSONObj obj = BSON(ChangeLogType::changeId("host.local-2012-11-21T19:14:10-8")
- << ChangeLogType::server("host.local")
- << ChangeLogType::shard("shardname")
+ << ChangeLogType::server("host.local") << ChangeLogType::shard("shardname")
<< ChangeLogType::clientAddr("192.168.0.189:51128")
- << ChangeLogType::what("split")
- << ChangeLogType::ns("test.test")
+ << ChangeLogType::what("split") << ChangeLogType::ns("test.test")
<< ChangeLogType::details(BSON("dummy"
<< "info")));
@@ -130,8 +122,7 @@ TEST(ChangeLogType, MissingTime) {
TEST(ChangeLogType, MissingWhat) {
BSONObj obj = BSON(ChangeLogType::changeId("host.local-2012-11-21T19:14:10-8")
- << ChangeLogType::server("host.local")
- << ChangeLogType::shard("shardname")
+ << ChangeLogType::server("host.local") << ChangeLogType::shard("shardname")
<< ChangeLogType::clientAddr("192.168.0.189:51128")
<< ChangeLogType::time(Date_t::fromMillisSinceEpoch(1))
<< ChangeLogType::ns("test.test")
@@ -143,14 +134,13 @@ TEST(ChangeLogType, MissingWhat) {
}
TEST(ChangeLogType, MissingNS) {
- BSONObj obj = BSON(ChangeLogType::changeId("host.local-2012-11-21T19:14:10-8")
- << ChangeLogType::server("host.local")
- << ChangeLogType::shard("shardname")
- << ChangeLogType::clientAddr("192.168.0.189:51128")
- << ChangeLogType::time(Date_t::fromMillisSinceEpoch(1))
- << ChangeLogType::what("split")
- << ChangeLogType::details(BSON("dummy"
- << "info")));
+ BSONObj obj =
+ BSON(ChangeLogType::changeId("host.local-2012-11-21T19:14:10-8")
+ << ChangeLogType::server("host.local") << ChangeLogType::shard("shardname")
+ << ChangeLogType::clientAddr("192.168.0.189:51128")
+ << ChangeLogType::time(Date_t::fromMillisSinceEpoch(1)) << ChangeLogType::what("split")
+ << ChangeLogType::details(BSON("dummy"
+ << "info")));
auto changeLogResult = ChangeLogType::fromBSON(obj);
ASSERT_OK(changeLogResult.getStatus());
@@ -170,12 +160,10 @@ TEST(ChangeLogType, MissingNS) {
TEST(ChangeLogType, MissingDetails) {
BSONObj obj = BSON(ChangeLogType::changeId("host.local-2012-11-21T19:14:10-8")
- << ChangeLogType::server("host.local")
- << ChangeLogType::shard("shardname")
+ << ChangeLogType::server("host.local") << ChangeLogType::shard("shardname")
<< ChangeLogType::clientAddr("192.168.0.189:51128")
<< ChangeLogType::time(Date_t::fromMillisSinceEpoch(1))
- << ChangeLogType::what("split")
- << ChangeLogType::ns("test.test"));
+ << ChangeLogType::what("split") << ChangeLogType::ns("test.test"));
auto changeLogResult = ChangeLogType::fromBSON(obj);
ASSERT_EQ(ErrorCodes::NoSuchKey, changeLogResult.getStatus());
@@ -186,8 +174,7 @@ TEST(ChangeLogType, MissingShard) {
<< ChangeLogType::server("host.local")
<< ChangeLogType::clientAddr("192.168.0.189:51128")
<< ChangeLogType::time(Date_t::fromMillisSinceEpoch(1))
- << ChangeLogType::what("split")
- << ChangeLogType::ns("test.test")
+ << ChangeLogType::what("split") << ChangeLogType::ns("test.test")
<< ChangeLogType::details(BSON("dummy"
<< "info")));
diff --git a/src/mongo/s/catalog/type_chunk.cpp b/src/mongo/s/catalog/type_chunk.cpp
index 1e2d5dff754..cf97e57845f 100644
--- a/src/mongo/s/catalog/type_chunk.cpp
+++ b/src/mongo/s/catalog/type_chunk.cpp
@@ -68,8 +68,8 @@ const char kMaxKey[] = "max";
Status extractObject(const BSONObj& obj, const std::string& fieldName, BSONElement* bsonElement) {
Status elementStatus = bsonExtractTypedField(obj, fieldName, Object, bsonElement);
if (!elementStatus.isOK()) {
- return elementStatus.withContext(str::stream() << "The field '" << fieldName
- << "' cannot be parsed");
+ return elementStatus.withContext(str::stream()
+ << "The field '" << fieldName << "' cannot be parsed");
}
if (bsonElement->Obj().isEmpty()) {
@@ -108,8 +108,8 @@ StatusWith<ChunkRange> ChunkRange::fromBSON(const BSONObj& obj) {
if (SimpleBSONObjComparator::kInstance.evaluate(minKey.Obj() >= maxKey.Obj())) {
return {ErrorCodes::FailedToParse,
- str::stream() << "min: " << minKey.Obj() << " should be less than max: "
- << maxKey.Obj()};
+ str::stream() << "min: " << minKey.Obj()
+ << " should be less than max: " << maxKey.Obj()};
}
return ChunkRange(minKey.Obj().getOwned(), maxKey.Obj().getOwned());
@@ -135,8 +135,7 @@ const Status ChunkRange::extractKeyPattern(KeyPattern* shardKeyPatternOut) const
(!min.more() && max.more())) {
return {ErrorCodes::ShardKeyNotFound,
str::stream() << "the shard key of min " << _minKey << " doesn't match with "
- << "the shard key of max "
- << _maxKey};
+ << "the shard key of max " << _maxKey};
}
b.append(x.fieldName(), 1);
}
@@ -311,8 +310,8 @@ StatusWith<ChunkType> ChunkType::fromShardBSON(const BSONObj& source, const OID&
if (SimpleBSONObjComparator::kInstance.evaluate(minKey.Obj() >= maxKey.Obj())) {
return {ErrorCodes::FailedToParse,
- str::stream() << "min: " << minKey.Obj() << " should be less than max: "
- << maxKey.Obj()};
+ str::stream() << "min: " << minKey.Obj()
+ << " should be less than max: " << maxKey.Obj()};
}
chunk._min = minKey.Obj().getOwned();
diff --git a/src/mongo/s/catalog/type_chunk_test.cpp b/src/mongo/s/catalog/type_chunk_test.cpp
index 5bc960179e1..49ae676b153 100644
--- a/src/mongo/s/catalog/type_chunk_test.cpp
+++ b/src/mongo/s/catalog/type_chunk_test.cpp
@@ -50,41 +50,32 @@ TEST(ChunkType, MissingConfigRequiredFields) {
ChunkVersion chunkVersion(1, 2, OID::gen());
BSONObj objModNS =
- BSON(ChunkType::name("test.mycol-a_MinKey") << ChunkType::min(BSON("a" << 10 << "b" << 10))
- << ChunkType::max(BSON("a" << 20))
- << "lastmod"
- << Timestamp(chunkVersion.toLong())
- << "lastmodEpoch"
- << chunkVersion.epoch()
- << ChunkType::shard("shard0001"));
+ BSON(ChunkType::name("test.mycol-a_MinKey")
+ << ChunkType::min(BSON("a" << 10 << "b" << 10)) << ChunkType::max(BSON("a" << 20))
+ << "lastmod" << Timestamp(chunkVersion.toLong()) << "lastmodEpoch"
+ << chunkVersion.epoch() << ChunkType::shard("shard0001"));
StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(objModNS);
ASSERT_FALSE(chunkRes.isOK());
BSONObj objModKeys =
- BSON(ChunkType::name("test.mycol-a_MinKey") << ChunkType::ns("test.mycol") << "lastmod"
- << Timestamp(chunkVersion.toLong())
- << "lastmodEpoch"
- << chunkVersion.epoch()
- << ChunkType::shard("shard0001"));
+ BSON(ChunkType::name("test.mycol-a_MinKey")
+ << ChunkType::ns("test.mycol") << "lastmod" << Timestamp(chunkVersion.toLong())
+ << "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001"));
chunkRes = ChunkType::fromConfigBSON(objModKeys);
ASSERT_FALSE(chunkRes.isOK());
BSONObj objModShard =
- BSON(ChunkType::name("test.mycol-a_MinKey") << ChunkType::ns("test.mycol")
- << ChunkType::min(BSON("a" << 10 << "b" << 10))
- << ChunkType::max(BSON("a" << 20))
- << "lastmod"
- << Timestamp(chunkVersion.toLong())
- << "lastmodEpoch"
- << chunkVersion.epoch());
+ BSON(ChunkType::name("test.mycol-a_MinKey")
+ << ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 10 << "b" << 10))
+ << ChunkType::max(BSON("a" << 20)) << "lastmod" << Timestamp(chunkVersion.toLong())
+ << "lastmodEpoch" << chunkVersion.epoch());
chunkRes = ChunkType::fromConfigBSON(objModShard);
ASSERT_FALSE(chunkRes.isOK());
BSONObj objModVersion =
- BSON(ChunkType::name("test.mycol-a_MinKey") << ChunkType::ns("test.mycol")
- << ChunkType::min(BSON("a" << 10 << "b" << 10))
- << ChunkType::max(BSON("a" << 20))
- << ChunkType::shard("shard0001"));
+ BSON(ChunkType::name("test.mycol-a_MinKey")
+ << ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 10 << "b" << 10))
+ << ChunkType::max(BSON("a" << 20)) << ChunkType::shard("shard0001"));
chunkRes = ChunkType::fromConfigBSON(objModVersion);
ASSERT_FALSE(chunkRes.isOK());
}
@@ -100,8 +91,8 @@ TEST(ChunkType, MissingShardRequiredFields) {
ASSERT_EQUALS(chunkRes.getStatus(), ErrorCodes::NoSuchKey);
ASSERT_STRING_CONTAINS(chunkRes.getStatus().reason(), ChunkType::minShardID.name());
- BSONObj objModMax = BSON(
- ChunkType::minShardID(kMin) << ChunkType::shard(kShard.toString()) << "lastmod" << lastmod);
+ BSONObj objModMax = BSON(ChunkType::minShardID(kMin)
+ << ChunkType::shard(kShard.toString()) << "lastmod" << lastmod);
chunkRes = ChunkType::fromShardBSON(objModMax, epoch);
ASSERT_EQUALS(chunkRes.getStatus(), ErrorCodes::NoSuchKey);
ASSERT_STRING_CONTAINS(chunkRes.getStatus().reason(), ChunkType::max.name());
@@ -112,8 +103,8 @@ TEST(ChunkType, MissingShardRequiredFields) {
ASSERT_EQUALS(chunkRes.getStatus(), ErrorCodes::NoSuchKey);
ASSERT_STRING_CONTAINS(chunkRes.getStatus().reason(), ChunkType::shard.name());
- BSONObj objModLastmod = BSON(
- ChunkType::minShardID(kMin) << ChunkType::max(kMax) << ChunkType::shard(kShard.toString()));
+ BSONObj objModLastmod = BSON(ChunkType::minShardID(kMin)
+ << ChunkType::max(kMax) << ChunkType::shard(kShard.toString()));
chunkRes = ChunkType::fromShardBSON(objModLastmod, epoch);
ASSERT_EQUALS(chunkRes.getStatus(), ErrorCodes::NoSuchKey);
}
@@ -123,10 +114,9 @@ TEST(ChunkType, ToFromShardBSON) {
ChunkVersion chunkVersion(1, 2, epoch);
auto lastmod = Timestamp(chunkVersion.toLong());
- BSONObj obj = BSON(ChunkType::minShardID(kMin) << ChunkType::max(kMax)
- << ChunkType::shard(kShard.toString())
- << "lastmod"
- << lastmod);
+ BSONObj obj = BSON(ChunkType::minShardID(kMin)
+ << ChunkType::max(kMax) << ChunkType::shard(kShard.toString()) << "lastmod"
+ << lastmod);
ChunkType shardChunk = assertGet(ChunkType::fromShardBSON(obj, epoch));
ASSERT_BSONOBJ_EQ(obj, shardChunk.toShardBSON());
@@ -140,14 +130,10 @@ TEST(ChunkType, ToFromShardBSON) {
TEST(ChunkType, MinAndMaxShardKeysDifferInNumberOfKeys) {
ChunkVersion chunkVersion(1, 2, OID::gen());
BSONObj obj =
- BSON(ChunkType::name("test.mycol-a_MinKey") << ChunkType::ns("test.mycol")
- << ChunkType::min(BSON("a" << 10 << "b" << 10))
- << ChunkType::max(BSON("a" << 20))
- << "lastmod"
- << Timestamp(chunkVersion.toLong())
- << "lastmodEpoch"
- << chunkVersion.epoch()
- << ChunkType::shard("shard0001"));
+ BSON(ChunkType::name("test.mycol-a_MinKey")
+ << ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 10 << "b" << 10))
+ << ChunkType::max(BSON("a" << 20)) << "lastmod" << Timestamp(chunkVersion.toLong())
+ << "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001"));
StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(obj);
ASSERT_OK(chunkRes.getStatus());
ASSERT_FALSE(chunkRes.getValue().validate().isOK());
@@ -155,14 +141,11 @@ TEST(ChunkType, MinAndMaxShardKeysDifferInNumberOfKeys) {
TEST(ChunkType, MinAndMaxShardKeysDifferInKeyNames) {
ChunkVersion chunkVersion(1, 2, OID::gen());
- BSONObj obj = BSON(ChunkType::name("test.mycol-a_MinKey") << ChunkType::ns("test.mycol")
- << ChunkType::min(BSON("a" << 10))
- << ChunkType::max(BSON("b" << 20))
- << "lastmod"
- << Timestamp(chunkVersion.toLong())
- << "lastmodEpoch"
- << chunkVersion.epoch()
- << ChunkType::shard("shard0001"));
+ BSONObj obj =
+ BSON(ChunkType::name("test.mycol-a_MinKey")
+ << ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 10))
+ << ChunkType::max(BSON("b" << 20)) << "lastmod" << Timestamp(chunkVersion.toLong())
+ << "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001"));
StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(obj);
ASSERT_OK(chunkRes.getStatus());
ASSERT_FALSE(chunkRes.getValue().validate().isOK());
@@ -170,28 +153,22 @@ TEST(ChunkType, MinAndMaxShardKeysDifferInKeyNames) {
TEST(ChunkType, MinToMaxNotAscending) {
ChunkVersion chunkVersion(1, 2, OID::gen());
- BSONObj obj = BSON(ChunkType::name("test.mycol-a_MinKey") << ChunkType::ns("test.mycol")
- << ChunkType::min(BSON("a" << 20))
- << ChunkType::max(BSON("a" << 10))
- << "lastmod"
- << Timestamp(chunkVersion.toLong())
- << "lastmodEpoch"
- << chunkVersion.epoch()
- << ChunkType::shard("shard0001"));
+ BSONObj obj =
+ BSON(ChunkType::name("test.mycol-a_MinKey")
+ << ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 20))
+ << ChunkType::max(BSON("a" << 10)) << "lastmod" << Timestamp(chunkVersion.toLong())
+ << "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001"));
StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(obj);
ASSERT_EQ(ErrorCodes::FailedToParse, chunkRes.getStatus());
}
TEST(ChunkType, ToFromConfigBSON) {
ChunkVersion chunkVersion(1, 2, OID::gen());
- BSONObj obj = BSON(ChunkType::name("test.mycol-a_10") << ChunkType::ns("test.mycol")
- << ChunkType::min(BSON("a" << 10))
- << ChunkType::max(BSON("a" << 20))
- << ChunkType::shard("shard0001")
- << "lastmod"
- << Timestamp(chunkVersion.toLong())
- << "lastmodEpoch"
- << chunkVersion.epoch());
+ BSONObj obj =
+ BSON(ChunkType::name("test.mycol-a_10")
+ << ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 10))
+ << ChunkType::max(BSON("a" << 20)) << ChunkType::shard("shard0001") << "lastmod"
+ << Timestamp(chunkVersion.toLong()) << "lastmodEpoch" << chunkVersion.epoch());
StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(obj);
ASSERT_OK(chunkRes.getStatus());
ChunkType chunk = chunkRes.getValue();
@@ -208,18 +185,14 @@ TEST(ChunkType, ToFromConfigBSON) {
}
TEST(ChunkType, Pre22Format) {
- ChunkType chunk = assertGet(ChunkType::fromConfigBSON(BSON("_id"
- << "test.mycol-a_MinKey"
- << "lastmod"
- << Date_t::fromMillisSinceEpoch(1)
- << "ns"
- << "test.mycol"
- << "min"
- << BSON("a" << 10)
- << "max"
- << BSON("a" << 20)
- << "shard"
- << "shard0001")));
+ ChunkType chunk = assertGet(
+ ChunkType::fromConfigBSON(BSON("_id"
+ << "test.mycol-a_MinKey"
+ << "lastmod" << Date_t::fromMillisSinceEpoch(1) << "ns"
+ << "test.mycol"
+ << "min" << BSON("a" << 10) << "max" << BSON("a" << 20)
+ << "shard"
+ << "shard0001")));
ASSERT_OK(chunk.validate());
ASSERT_EQUALS(chunk.getNS().ns(), "test.mycol");
diff --git a/src/mongo/s/catalog/type_collection_test.cpp b/src/mongo/s/catalog/type_collection_test.cpp
index 68c1e73b096..9130562aaac 100644
--- a/src/mongo/s/catalog/type_collection_test.cpp
+++ b/src/mongo/s/catalog/type_collection_test.cpp
@@ -48,14 +48,13 @@ TEST(CollectionType, Empty) {
TEST(CollectionType, Basic) {
const OID oid = OID::gen();
- StatusWith<CollectionType> status =
- CollectionType::fromBSON(BSON(CollectionType::fullNs("db.coll")
- << CollectionType::epoch(oid)
- << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
- << CollectionType::keyPattern(BSON("a" << 1))
- << CollectionType::defaultCollation(BSON("locale"
- << "fr_CA"))
- << CollectionType::unique(true)));
+ StatusWith<CollectionType> status = CollectionType::fromBSON(BSON(
+ CollectionType::fullNs("db.coll")
+ << CollectionType::epoch(oid) << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
+ << CollectionType::keyPattern(BSON("a" << 1))
+ << CollectionType::defaultCollation(BSON("locale"
+ << "fr_CA"))
+ << CollectionType::unique(true)));
ASSERT_TRUE(status.isOK());
CollectionType coll = status.getValue();
@@ -76,18 +75,14 @@ TEST(CollectionType, Basic) {
TEST(CollectionType, AllFieldsPresent) {
const OID oid = OID::gen();
const auto uuid = UUID::gen();
- StatusWith<CollectionType> status =
- CollectionType::fromBSON(BSON(CollectionType::fullNs("db.coll")
- << CollectionType::epoch(oid)
- << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
- << CollectionType::keyPattern(BSON("a" << 1))
- << CollectionType::defaultCollation(BSON("locale"
- << "fr_CA"))
- << CollectionType::unique(true)
- << CollectionType::uuid()
- << uuid
- << "isAssignedShardKey"
- << false));
+ StatusWith<CollectionType> status = CollectionType::fromBSON(BSON(
+ CollectionType::fullNs("db.coll")
+ << CollectionType::epoch(oid) << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
+ << CollectionType::keyPattern(BSON("a" << 1))
+ << CollectionType::defaultCollation(BSON("locale"
+ << "fr_CA"))
+ << CollectionType::unique(true) << CollectionType::uuid() << uuid << "isAssignedShardKey"
+ << false));
ASSERT_TRUE(status.isOK());
CollectionType coll = status.getValue();
@@ -109,24 +104,20 @@ TEST(CollectionType, AllFieldsPresent) {
TEST(CollectionType, EmptyDefaultCollationFailsToParse) {
const OID oid = OID::gen();
- StatusWith<CollectionType> status =
- CollectionType::fromBSON(BSON(CollectionType::fullNs("db.coll")
- << CollectionType::epoch(oid)
- << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
- << CollectionType::keyPattern(BSON("a" << 1))
- << CollectionType::defaultCollation(BSONObj())
- << CollectionType::unique(true)));
+ StatusWith<CollectionType> status = CollectionType::fromBSON(BSON(
+ CollectionType::fullNs("db.coll")
+ << CollectionType::epoch(oid) << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
+ << CollectionType::keyPattern(BSON("a" << 1)) << CollectionType::defaultCollation(BSONObj())
+ << CollectionType::unique(true)));
ASSERT_FALSE(status.isOK());
}
TEST(CollectionType, MissingDefaultCollationParses) {
const OID oid = OID::gen();
- StatusWith<CollectionType> status =
- CollectionType::fromBSON(BSON(CollectionType::fullNs("db.coll")
- << CollectionType::epoch(oid)
- << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
- << CollectionType::keyPattern(BSON("a" << 1))
- << CollectionType::unique(true)));
+ StatusWith<CollectionType> status = CollectionType::fromBSON(BSON(
+ CollectionType::fullNs("db.coll")
+ << CollectionType::epoch(oid) << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
+ << CollectionType::keyPattern(BSON("a" << 1)) << CollectionType::unique(true)));
ASSERT_TRUE(status.isOK());
CollectionType coll = status.getValue();
@@ -136,14 +127,13 @@ TEST(CollectionType, MissingDefaultCollationParses) {
TEST(CollectionType, DefaultCollationSerializesCorrectly) {
const OID oid = OID::gen();
- StatusWith<CollectionType> status =
- CollectionType::fromBSON(BSON(CollectionType::fullNs("db.coll")
- << CollectionType::epoch(oid)
- << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
- << CollectionType::keyPattern(BSON("a" << 1))
- << CollectionType::defaultCollation(BSON("locale"
- << "fr_CA"))
- << CollectionType::unique(true)));
+ StatusWith<CollectionType> status = CollectionType::fromBSON(BSON(
+ CollectionType::fullNs("db.coll")
+ << CollectionType::epoch(oid) << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
+ << CollectionType::keyPattern(BSON("a" << 1))
+ << CollectionType::defaultCollation(BSON("locale"
+ << "fr_CA"))
+ << CollectionType::unique(true)));
ASSERT_TRUE(status.isOK());
CollectionType coll = status.getValue();
@@ -156,12 +146,10 @@ TEST(CollectionType, DefaultCollationSerializesCorrectly) {
TEST(CollectionType, MissingDefaultCollationIsNotSerialized) {
const OID oid = OID::gen();
- StatusWith<CollectionType> status =
- CollectionType::fromBSON(BSON(CollectionType::fullNs("db.coll")
- << CollectionType::epoch(oid)
- << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
- << CollectionType::keyPattern(BSON("a" << 1))
- << CollectionType::unique(true)));
+ StatusWith<CollectionType> status = CollectionType::fromBSON(BSON(
+ CollectionType::fullNs("db.coll")
+ << CollectionType::epoch(oid) << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
+ << CollectionType::keyPattern(BSON("a" << 1)) << CollectionType::unique(true)));
ASSERT_TRUE(status.isOK());
CollectionType coll = status.getValue();
@@ -194,16 +182,11 @@ TEST(CollectionType, EpochCorrectness) {
}
TEST(CollectionType, Pre22Format) {
- CollectionType coll = assertGet(CollectionType::fromBSON(BSON("_id"
- << "db.coll"
- << "lastmod"
- << Date_t::fromMillisSinceEpoch(1)
- << "dropped"
- << false
- << "key"
- << BSON("a" << 1)
- << "unique"
- << false)));
+ CollectionType coll = assertGet(
+ CollectionType::fromBSON(BSON("_id"
+ << "db.coll"
+ << "lastmod" << Date_t::fromMillisSinceEpoch(1) << "dropped"
+ << false << "key" << BSON("a" << 1) << "unique" << false)));
ASSERT(coll.getNs() == NamespaceString{"db.coll"});
ASSERT(!coll.getEpoch().isSet());
@@ -216,12 +199,10 @@ TEST(CollectionType, Pre22Format) {
TEST(CollectionType, InvalidCollectionNamespace) {
const OID oid = OID::gen();
- StatusWith<CollectionType> result =
- CollectionType::fromBSON(BSON(CollectionType::fullNs("foo\\bar.coll")
- << CollectionType::epoch(oid)
- << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
- << CollectionType::keyPattern(BSON("a" << 1))
- << CollectionType::unique(true)));
+ StatusWith<CollectionType> result = CollectionType::fromBSON(BSON(
+ CollectionType::fullNs("foo\\bar.coll")
+ << CollectionType::epoch(oid) << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
+ << CollectionType::keyPattern(BSON("a" << 1)) << CollectionType::unique(true)));
ASSERT_TRUE(result.isOK());
CollectionType collType = result.getValue();
ASSERT_FALSE(collType.validate().isOK());
@@ -230,10 +211,10 @@ TEST(CollectionType, InvalidCollectionNamespace) {
TEST(CollectionType, BadType) {
const OID oid = OID::gen();
StatusWith<CollectionType> status = CollectionType::fromBSON(
- BSON(CollectionType::fullNs() << 1 << CollectionType::epoch(oid)
- << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
- << CollectionType::keyPattern(BSON("a" << 1))
- << CollectionType::unique(true)));
+ BSON(CollectionType::fullNs()
+ << 1 << CollectionType::epoch(oid)
+ << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
+ << CollectionType::keyPattern(BSON("a" << 1)) << CollectionType::unique(true)));
ASSERT_FALSE(status.isOK());
}
diff --git a/src/mongo/s/catalog/type_config_version_test.cpp b/src/mongo/s/catalog/type_config_version_test.cpp
index df3a9486f64..6bc2d7eaac2 100644
--- a/src/mongo/s/catalog/type_config_version_test.cpp
+++ b/src/mongo/s/catalog/type_config_version_test.cpp
@@ -254,10 +254,10 @@ TEST(Excludes, BadRangeArray) {
<< "1.2.3"); // empty bound
BSONArray includeArr = bab.arr();
- auto versionInfoResult = VersionType::fromBSON(BSON(
- VersionType::minCompatibleVersion(3) << VersionType::currentVersion(4)
- << VersionType::clusterId(OID::gen())
- << VersionType::excludingMongoVersions(includeArr)));
+ auto versionInfoResult = VersionType::fromBSON(
+ BSON(VersionType::minCompatibleVersion(3)
+ << VersionType::currentVersion(4) << VersionType::clusterId(OID::gen())
+ << VersionType::excludingMongoVersions(includeArr)));
ASSERT_EQ(ErrorCodes::FailedToParse, versionInfoResult.getStatus());
}
diff --git a/src/mongo/s/catalog/type_database.cpp b/src/mongo/s/catalog/type_database.cpp
index 5dbeb34ab7d..2caf60f308f 100644
--- a/src/mongo/s/catalog/type_database.cpp
+++ b/src/mongo/s/catalog/type_database.cpp
@@ -83,10 +83,10 @@ StatusWith<DatabaseType> DatabaseType::fromBSON(const BSONObj& source) {
BSONObj versionField = source.getObjectField("version");
if (versionField.isEmpty()) {
return Status{ErrorCodes::InternalError,
- str::stream() << "DatabaseVersion doesn't exist in database entry "
- << source
- << " despite the config server being in binary version 4.2 "
- "or later."};
+ str::stream()
+ << "DatabaseVersion doesn't exist in database entry " << source
+ << " despite the config server being in binary version 4.2 "
+ "or later."};
}
dbtVersion = DatabaseVersion::parse(IDLParserErrorContext("DatabaseType"), versionField);
}
diff --git a/src/mongo/s/catalog/type_database_test.cpp b/src/mongo/s/catalog/type_database_test.cpp
index e4e4b046232..8a9eb73dcda 100644
--- a/src/mongo/s/catalog/type_database_test.cpp
+++ b/src/mongo/s/catalog/type_database_test.cpp
@@ -49,8 +49,7 @@ TEST(DatabaseType, Basic) {
UUID uuid = UUID::gen();
StatusWith<DatabaseType> status = DatabaseType::fromBSON(
BSON(DatabaseType::name("mydb")
- << DatabaseType::primary("shard")
- << DatabaseType::sharded(true)
+ << DatabaseType::primary("shard") << DatabaseType::sharded(true)
<< DatabaseType::version(BSON("uuid" << uuid << "lastMod" << 0))));
ASSERT_TRUE(status.isOK());
diff --git a/src/mongo/s/catalog/type_locks_test.cpp b/src/mongo/s/catalog/type_locks_test.cpp
index b00ffe06c0e..b249bb648a9 100644
--- a/src/mongo/s/catalog/type_locks_test.cpp
+++ b/src/mongo/s/catalog/type_locks_test.cpp
@@ -46,12 +46,12 @@ TEST(Validity, Empty) {
TEST(Validity, UnlockedWithOptional) {
OID testLockID = OID::gen();
- BSONObj obj = BSON(LocksType::name("dummy")
- << LocksType::process("host.local:27017:1352918870:16807")
- << LocksType::state(LocksType::State::UNLOCKED)
- << LocksType::lockID(testLockID)
- << LocksType::who("host.local:27017:1352918870:16807:Dummy:282475249")
- << LocksType::why("twiddling thumbs"));
+ BSONObj obj =
+ BSON(LocksType::name("dummy")
+ << LocksType::process("host.local:27017:1352918870:16807")
+ << LocksType::state(LocksType::State::UNLOCKED) << LocksType::lockID(testLockID)
+ << LocksType::who("host.local:27017:1352918870:16807:Dummy:282475249")
+ << LocksType::why("twiddling thumbs"));
auto locksResult = LocksType::fromBSON(obj);
ASSERT_OK(locksResult.getStatus());
@@ -78,12 +78,12 @@ TEST(Validity, UnlockedWithoutOptional) {
TEST(Validity, LockedValid) {
OID testLockID = OID::gen();
- BSONObj obj = BSON(LocksType::name("dummy")
- << LocksType::process("host.local:27017:1352918870:16807")
- << LocksType::state(LocksType::State::LOCKED)
- << LocksType::lockID(testLockID)
- << LocksType::who("host.local:27017:1352918870:16807:Dummy:282475249")
- << LocksType::why("doing balance round"));
+ BSONObj obj =
+ BSON(LocksType::name("dummy")
+ << LocksType::process("host.local:27017:1352918870:16807")
+ << LocksType::state(LocksType::State::LOCKED) << LocksType::lockID(testLockID)
+ << LocksType::who("host.local:27017:1352918870:16807:Dummy:282475249")
+ << LocksType::why("doing balance round"));
auto locksResult = LocksType::fromBSON(obj);
ASSERT_OK(locksResult.getStatus());
@@ -98,11 +98,11 @@ TEST(Validity, LockedValid) {
}
TEST(Validity, LockedMissingProcess) {
- BSONObj obj = BSON(LocksType::name("dummy")
- << LocksType::state(LocksType::State::LOCKED)
- << LocksType::lockID(OID::gen())
- << LocksType::who("host.local:27017:1352918870:16807:Dummy:282475249")
- << LocksType::why("doing balance round"));
+ BSONObj obj =
+ BSON(LocksType::name("dummy")
+ << LocksType::state(LocksType::State::LOCKED) << LocksType::lockID(OID::gen())
+ << LocksType::who("host.local:27017:1352918870:16807:Dummy:282475249")
+ << LocksType::why("doing balance round"));
auto locksResult = LocksType::fromBSON(obj);
ASSERT_OK(locksResult.getStatus());
@@ -124,11 +124,10 @@ TEST(Validity, LockedMissingLockID) {
}
TEST(Validity, LockedMissingWho) {
- BSONObj obj =
- BSON(LocksType::name("dummy") << LocksType::process("host.local:27017:1352918870:16807")
- << LocksType::state(LocksType::State::LOCKED)
- << LocksType::lockID(OID::gen())
- << LocksType::why("twiddling thumbs"));
+ BSONObj obj = BSON(LocksType::name("dummy")
+ << LocksType::process("host.local:27017:1352918870:16807")
+ << LocksType::state(LocksType::State::LOCKED)
+ << LocksType::lockID(OID::gen()) << LocksType::why("twiddling thumbs"));
auto locksResult = LocksType::fromBSON(obj);
ASSERT_OK(locksResult.getStatus());
@@ -137,11 +136,11 @@ TEST(Validity, LockedMissingWho) {
}
TEST(Validity, LockedMissingWhy) {
- BSONObj obj = BSON(LocksType::name("dummy")
- << LocksType::process("host.local:27017:1352918870:16807")
- << LocksType::state(LocksType::State::LOCKED)
- << LocksType::lockID(OID::gen())
- << LocksType::who("host.local:27017:1352918870:16807:Dummy:282475249"));
+ BSONObj obj =
+ BSON(LocksType::name("dummy")
+ << LocksType::process("host.local:27017:1352918870:16807")
+ << LocksType::state(LocksType::State::LOCKED) << LocksType::lockID(OID::gen())
+ << LocksType::who("host.local:27017:1352918870:16807:Dummy:282475249"));
auto locksResult = LocksType::fromBSON(obj);
ASSERT_OK(locksResult.getStatus());
@@ -150,12 +149,12 @@ TEST(Validity, LockedMissingWhy) {
}
TEST(Validity, ContestedValid) {
- BSONObj obj = BSON(LocksType::name("dummy")
- << LocksType::process("host.local:27017:1352918870:16807")
- << LocksType::state(LocksType::State::LOCK_PREP)
- << LocksType::lockID(OID::gen())
- << LocksType::who("host.local:27017:1352918870:16807:Dummy:282475249")
- << LocksType::why("twiddling thumbs"));
+ BSONObj obj =
+ BSON(LocksType::name("dummy")
+ << LocksType::process("host.local:27017:1352918870:16807")
+ << LocksType::state(LocksType::State::LOCK_PREP) << LocksType::lockID(OID::gen())
+ << LocksType::who("host.local:27017:1352918870:16807:Dummy:282475249")
+ << LocksType::why("twiddling thumbs"));
auto locksResult = LocksType::fromBSON(obj);
ASSERT_OK(locksResult.getStatus());
@@ -164,11 +163,11 @@ TEST(Validity, ContestedValid) {
}
TEST(Validity, ContestedMissingProcess) {
- BSONObj obj = BSON(LocksType::name("dummy")
- << LocksType::state(LocksType::State::LOCK_PREP)
- << LocksType::lockID(OID::gen())
- << LocksType::who("host.local:27017:1352918870:16807:Dummy:282475249")
- << LocksType::why("twiddling thumbs"));
+ BSONObj obj =
+ BSON(LocksType::name("dummy")
+ << LocksType::state(LocksType::State::LOCK_PREP) << LocksType::lockID(OID::gen())
+ << LocksType::who("host.local:27017:1352918870:16807:Dummy:282475249")
+ << LocksType::why("twiddling thumbs"));
auto locksResult = LocksType::fromBSON(obj);
ASSERT_OK(locksResult.getStatus());
@@ -190,11 +189,10 @@ TEST(Validity, ContestedMissingLockID) {
}
TEST(Validity, ContestedMissingWho) {
- BSONObj obj =
- BSON(LocksType::name("dummy") << LocksType::process("host.local:27017:1352918870:16807")
- << LocksType::state(LocksType::State::LOCK_PREP)
- << LocksType::lockID(OID::gen())
- << LocksType::why("doing balance round"));
+ BSONObj obj = BSON(LocksType::name("dummy")
+ << LocksType::process("host.local:27017:1352918870:16807")
+ << LocksType::state(LocksType::State::LOCK_PREP)
+ << LocksType::lockID(OID::gen()) << LocksType::why("doing balance round"));
auto locksResult = LocksType::fromBSON(obj);
ASSERT_OK(locksResult.getStatus());
@@ -203,11 +201,11 @@ TEST(Validity, ContestedMissingWho) {
}
TEST(Validity, ContestedMissingWhy) {
- BSONObj obj = BSON(LocksType::name("dummy")
- << LocksType::process("host.local:27017:1352918870:16807")
- << LocksType::state(LocksType::State::LOCK_PREP)
- << LocksType::lockID(OID::gen())
- << LocksType::who("host.local:27017:1352918870:16807:Dummy:282475249"));
+ BSONObj obj =
+ BSON(LocksType::name("dummy")
+ << LocksType::process("host.local:27017:1352918870:16807")
+ << LocksType::state(LocksType::State::LOCK_PREP) << LocksType::lockID(OID::gen())
+ << LocksType::who("host.local:27017:1352918870:16807:Dummy:282475249"));
auto locksResult = LocksType::fromBSON(obj);
ASSERT_OK(locksResult.getStatus());
diff --git a/src/mongo/s/catalog/type_mongos_test.cpp b/src/mongo/s/catalog/type_mongos_test.cpp
index a253ed68d00..7007305f412 100644
--- a/src/mongo/s/catalog/type_mongos_test.cpp
+++ b/src/mongo/s/catalog/type_mongos_test.cpp
@@ -41,10 +41,8 @@ using namespace mongo;
TEST(Validity, MissingName) {
BSONObj obj = BSON(MongosType::ping(Date_t::fromMillisSinceEpoch(1))
- << MongosType::uptime(100)
- << MongosType::waiting(false)
- << MongosType::mongoVersion("x.x.x")
- << MongosType::configVersion(0)
+ << MongosType::uptime(100) << MongosType::waiting(false)
+ << MongosType::mongoVersion("x.x.x") << MongosType::configVersion(0)
<< MongosType::advisoryHostFQDNs(BSONArrayBuilder().arr()));
auto mongosTypeResult = MongosType::fromBSON(obj);
@@ -53,10 +51,8 @@ TEST(Validity, MissingName) {
TEST(Validity, MissingPing) {
BSONObj obj = BSON(MongosType::name("localhost:27017")
- << MongosType::uptime(100)
- << MongosType::waiting(false)
- << MongosType::mongoVersion("x.x.x")
- << MongosType::configVersion(0)
+ << MongosType::uptime(100) << MongosType::waiting(false)
+ << MongosType::mongoVersion("x.x.x") << MongosType::configVersion(0)
<< MongosType::advisoryHostFQDNs(BSONArrayBuilder().arr()));
auto mongosTypeResult = MongosType::fromBSON(obj);
@@ -64,36 +60,33 @@ TEST(Validity, MissingPing) {
}
TEST(Validity, MissingUp) {
- BSONObj obj = BSON(MongosType::name("localhost:27017")
- << MongosType::ping(Date_t::fromMillisSinceEpoch(1))
- << MongosType::waiting(false)
- << MongosType::mongoVersion("x.x.x")
- << MongosType::configVersion(0)
- << MongosType::advisoryHostFQDNs(BSONArrayBuilder().arr()));
+ BSONObj obj =
+ BSON(MongosType::name("localhost:27017")
+ << MongosType::ping(Date_t::fromMillisSinceEpoch(1)) << MongosType::waiting(false)
+ << MongosType::mongoVersion("x.x.x") << MongosType::configVersion(0)
+ << MongosType::advisoryHostFQDNs(BSONArrayBuilder().arr()));
auto mongosTypeResult = MongosType::fromBSON(obj);
ASSERT_EQ(ErrorCodes::NoSuchKey, mongosTypeResult.getStatus());
}
TEST(Validity, MissingWaiting) {
- BSONObj obj = BSON(MongosType::name("localhost:27017")
- << MongosType::ping(Date_t::fromMillisSinceEpoch(1))
- << MongosType::uptime(100)
- << MongosType::mongoVersion("x.x.x")
- << MongosType::configVersion(0)
- << MongosType::advisoryHostFQDNs(BSONArrayBuilder().arr()));
+ BSONObj obj =
+ BSON(MongosType::name("localhost:27017")
+ << MongosType::ping(Date_t::fromMillisSinceEpoch(1)) << MongosType::uptime(100)
+ << MongosType::mongoVersion("x.x.x") << MongosType::configVersion(0)
+ << MongosType::advisoryHostFQDNs(BSONArrayBuilder().arr()));
auto mongosTypeResult = MongosType::fromBSON(obj);
ASSERT_EQ(ErrorCodes::NoSuchKey, mongosTypeResult.getStatus());
}
TEST(Validity, MissingMongoVersion) {
- BSONObj obj = BSON(MongosType::name("localhost:27017")
- << MongosType::ping(Date_t::fromMillisSinceEpoch(1))
- << MongosType::uptime(100)
- << MongosType::waiting(false)
- << MongosType::configVersion(0)
- << MongosType::advisoryHostFQDNs(BSONArrayBuilder().arr()));
+ BSONObj obj =
+ BSON(MongosType::name("localhost:27017")
+ << MongosType::ping(Date_t::fromMillisSinceEpoch(1)) << MongosType::uptime(100)
+ << MongosType::waiting(false) << MongosType::configVersion(0)
+ << MongosType::advisoryHostFQDNs(BSONArrayBuilder().arr()));
auto mongosTypeResult = MongosType::fromBSON(obj);
ASSERT_OK(mongosTypeResult.getStatus());
@@ -107,12 +100,11 @@ TEST(Validity, MissingMongoVersion) {
}
TEST(Validity, MissingConfigVersion) {
- BSONObj obj = BSON(MongosType::name("localhost:27017")
- << MongosType::ping(Date_t::fromMillisSinceEpoch(1))
- << MongosType::uptime(100)
- << MongosType::waiting(false)
- << MongosType::mongoVersion("x.x.x")
- << MongosType::advisoryHostFQDNs(BSONArrayBuilder().arr()));
+ BSONObj obj =
+ BSON(MongosType::name("localhost:27017")
+ << MongosType::ping(Date_t::fromMillisSinceEpoch(1)) << MongosType::uptime(100)
+ << MongosType::waiting(false) << MongosType::mongoVersion("x.x.x")
+ << MongosType::advisoryHostFQDNs(BSONArrayBuilder().arr()));
auto mongosTypeResult = MongosType::fromBSON(obj);
ASSERT_OK(mongosTypeResult.getStatus());
@@ -128,10 +120,8 @@ TEST(Validity, MissingConfigVersion) {
TEST(Validity, MissingAdvisoryHostFQDNs) {
BSONObj obj = BSON(MongosType::name("localhost:27017")
<< MongosType::ping(Date_t::fromMillisSinceEpoch(1))
- << MongosType::uptime(100)
- << MongosType::waiting(false)
- << MongosType::mongoVersion("x.x.x")
- << MongosType::configVersion(0));
+ << MongosType::uptime(100) << MongosType::waiting(false)
+ << MongosType::mongoVersion("x.x.x") << MongosType::configVersion(0));
auto mongosTypeResult = MongosType::fromBSON(obj);
ASSERT_OK(mongosTypeResult.getStatus());
@@ -144,10 +134,8 @@ TEST(Validity, MissingAdvisoryHostFQDNs) {
TEST(Validity, EmptyAdvisoryHostFQDNs) {
BSONObj obj = BSON(MongosType::name("localhost:27017")
<< MongosType::ping(Date_t::fromMillisSinceEpoch(1))
- << MongosType::uptime(100)
- << MongosType::waiting(false)
- << MongosType::mongoVersion("x.x.x")
- << MongosType::configVersion(0)
+ << MongosType::uptime(100) << MongosType::waiting(false)
+ << MongosType::mongoVersion("x.x.x") << MongosType::configVersion(0)
<< MongosType::advisoryHostFQDNs(BSONArrayBuilder().arr()));
auto mongosTypeResult = MongosType::fromBSON(obj);
@@ -162,10 +150,8 @@ TEST(Validity, EmptyAdvisoryHostFQDNs) {
TEST(Validity, BadTypeAdvisoryHostFQDNs) {
BSONObj obj = BSON(MongosType::name("localhost:27017")
<< MongosType::ping(Date_t::fromMillisSinceEpoch(1))
- << MongosType::uptime(100)
- << MongosType::waiting(false)
- << MongosType::mongoVersion("x.x.x")
- << MongosType::configVersion(0)
+ << MongosType::uptime(100) << MongosType::waiting(false)
+ << MongosType::mongoVersion("x.x.x") << MongosType::configVersion(0)
<< MongosType::advisoryHostFQDNs(BSON_ARRAY("foo" << 0 << "baz")));
auto mongosTypeResult = MongosType::fromBSON(obj);
@@ -175,10 +161,8 @@ TEST(Validity, BadTypeAdvisoryHostFQDNs) {
TEST(Validity, Valid) {
BSONObj obj = BSON(MongosType::name("localhost:27017")
<< MongosType::ping(Date_t::fromMillisSinceEpoch(1))
- << MongosType::uptime(100)
- << MongosType::waiting(false)
- << MongosType::mongoVersion("x.x.x")
- << MongosType::configVersion(0)
+ << MongosType::uptime(100) << MongosType::waiting(false)
+ << MongosType::mongoVersion("x.x.x") << MongosType::configVersion(0)
<< MongosType::advisoryHostFQDNs(BSON_ARRAY("foo"
<< "bar"
<< "baz")));
diff --git a/src/mongo/s/catalog/type_shard_database.cpp b/src/mongo/s/catalog/type_shard_database.cpp
index 268460023e2..059516dc3ac 100644
--- a/src/mongo/s/catalog/type_shard_database.cpp
+++ b/src/mongo/s/catalog/type_shard_database.cpp
@@ -65,10 +65,10 @@ StatusWith<ShardDatabaseType> ShardDatabaseType::fromBSON(const BSONObj& source)
BSONObj versionField = source.getObjectField("version");
if (versionField.isEmpty()) {
return Status{ErrorCodes::InternalError,
- str::stream() << "DatabaseVersion doesn't exist in database entry "
- << source
- << " despite the shard being in binary version 4.2 or "
- "later."};
+ str::stream()
+ << "DatabaseVersion doesn't exist in database entry " << source
+ << " despite the shard being in binary version 4.2 or "
+ "later."};
}
dbVersion = DatabaseVersion::parse(IDLParserErrorContext("DatabaseType"), versionField);
}
diff --git a/src/mongo/s/catalog/type_shard_test.cpp b/src/mongo/s/catalog/type_shard_test.cpp
index b39725e9c0f..d2c9ab0326e 100644
--- a/src/mongo/s/catalog/type_shard_test.cpp
+++ b/src/mongo/s/catalog/type_shard_test.cpp
@@ -62,9 +62,9 @@ TEST(ShardType, OnlyMandatory) {
}
TEST(ShardType, AllOptionalsPresent) {
- BSONObj obj = BSON(ShardType::name("shard0000") << ShardType::host("localhost:27017")
- << ShardType::draining(true)
- << ShardType::maxSizeMB(100));
+ BSONObj obj = BSON(ShardType::name("shard0000")
+ << ShardType::host("localhost:27017") << ShardType::draining(true)
+ << ShardType::maxSizeMB(100));
StatusWith<ShardType> shardRes = ShardType::fromBSON(obj);
ASSERT(shardRes.isOK());
ShardType shard = shardRes.getValue();
@@ -72,9 +72,8 @@ TEST(ShardType, AllOptionalsPresent) {
}
TEST(ShardType, MaxSizeAsFloat) {
- BSONObj obj = BSON(ShardType::name("shard0000") << ShardType::host("localhost:27017")
- << ShardType::maxSizeMB()
- << 100.0);
+ BSONObj obj = BSON(ShardType::name("shard0000")
+ << ShardType::host("localhost:27017") << ShardType::maxSizeMB() << 100.0);
StatusWith<ShardType> shardRes = ShardType::fromBSON(obj);
ASSERT(shardRes.isOK());
ShardType shard = shardRes.getValue();
diff --git a/src/mongo/s/catalog/type_tags_test.cpp b/src/mongo/s/catalog/type_tags_test.cpp
index f466fc234ae..1cd8ed6d276 100644
--- a/src/mongo/s/catalog/type_tags_test.cpp
+++ b/src/mongo/s/catalog/type_tags_test.cpp
@@ -58,8 +58,8 @@ TEST(TagsType, Valid) {
}
TEST(TagsType, MissingNsField) {
- BSONObj obj = BSON(TagsType::tag("tag") << TagsType::min(BSON("a" << 10))
- << TagsType::max(BSON("a" << 20)));
+ BSONObj obj = BSON(TagsType::tag("tag")
+ << TagsType::min(BSON("a" << 10)) << TagsType::max(BSON("a" << 20)));
StatusWith<TagsType> status = TagsType::fromBSON(obj);
ASSERT_FALSE(status.isOK());
@@ -67,8 +67,8 @@ TEST(TagsType, MissingNsField) {
}
TEST(TagsType, MissingTagField) {
- BSONObj obj = BSON(TagsType::ns("test.mycol") << TagsType::min(BSON("a" << 10))
- << TagsType::max(BSON("a" << 20)));
+ BSONObj obj = BSON(TagsType::ns("test.mycol")
+ << TagsType::min(BSON("a" << 10)) << TagsType::max(BSON("a" << 20)));
StatusWith<TagsType> status = TagsType::fromBSON(obj);
ASSERT_FALSE(status.isOK());
@@ -94,9 +94,9 @@ TEST(TagsType, MissingMaxKey) {
}
TEST(TagsType, KeysWithDifferentNumberOfColumns) {
- BSONObj obj = BSON(TagsType::ns("test.mycol") << TagsType::tag("tag")
- << TagsType::min(BSON("a" << 10 << "b" << 10))
- << TagsType::max(BSON("a" << 20)));
+ BSONObj obj = BSON(TagsType::ns("test.mycol")
+ << TagsType::tag("tag") << TagsType::min(BSON("a" << 10 << "b" << 10))
+ << TagsType::max(BSON("a" << 20)));
StatusWith<TagsType> status = TagsType::fromBSON(obj);
const TagsType& tag = status.getValue();
diff --git a/src/mongo/s/catalog_cache.cpp b/src/mongo/s/catalog_cache.cpp
index e4a151f9ccc..c2206848332 100644
--- a/src/mongo/s/catalog_cache.cpp
+++ b/src/mongo/s/catalog_cache.cpp
@@ -373,8 +373,7 @@ void CatalogCache::checkEpochOrThrow(const NamespaceString& nss,
const auto itDb = _collectionsByDb.find(nss.db());
uassert(StaleConfigInfo(nss, targetCollectionVersion, boost::none),
str::stream() << "could not act as router for " << nss.ns()
- << ", no entry for database "
- << nss.db(),
+ << ", no entry for database " << nss.db(),
itDb != _collectionsByDb.end());
auto itColl = itDb->second.find(nss.ns());
@@ -392,8 +391,7 @@ void CatalogCache::checkEpochOrThrow(const NamespaceString& nss,
auto foundVersion = itColl->second->routingInfo->getVersion();
uassert(StaleConfigInfo(nss, targetCollectionVersion, foundVersion),
str::stream() << "could not act as router for " << nss.ns() << ", wanted "
- << targetCollectionVersion.toString()
- << ", but found "
+ << targetCollectionVersion.toString() << ", but found "
<< foundVersion.toString(),
foundVersion.epoch() == targetCollectionVersion.epoch());
}
@@ -467,8 +465,8 @@ void CatalogCache::report(BSONObjBuilder* builder) const {
void CatalogCache::_scheduleDatabaseRefresh(WithLock lk,
const std::string& dbName,
std::shared_ptr<DatabaseInfoEntry> dbEntry) {
- const auto onRefreshCompleted =
- [ this, t = Timer(), dbName, dbEntry ](const StatusWith<DatabaseType>& swDbt) {
+ const auto onRefreshCompleted = [this, t = Timer(), dbName, dbEntry](
+ const StatusWith<DatabaseType>& swDbt) {
// TODO (SERVER-34164): Track and increment stats for database refreshes.
if (!swDbt.isOK()) {
LOG_CATALOG_REFRESH(0) << "Refresh for database " << dbName << " took " << t.millis()
@@ -556,8 +554,9 @@ void CatalogCache::_scheduleCollectionRefresh(WithLock lk,
}
// Invoked when one iteration of getChunksSince has completed, whether with success or error
- const auto onRefreshCompleted = [ this, t = Timer(), nss, isIncremental, existingRoutingInfo ](
- const Status& status, RoutingTableHistory* routingInfoAfterRefresh) {
+ const auto onRefreshCompleted = [this, t = Timer(), nss, isIncremental, existingRoutingInfo](
+ const Status& status,
+ RoutingTableHistory* routingInfoAfterRefresh) {
if (isIncremental) {
_stats.numActiveIncrementalRefreshes.subtractAndFetch(1);
} else {
@@ -570,9 +569,10 @@ void CatalogCache::_scheduleCollectionRefresh(WithLock lk,
LOG_CATALOG_REFRESH(0) << "Refresh for collection " << nss << " took " << t.millis()
<< " ms and failed" << causedBy(redact(status));
} else if (routingInfoAfterRefresh) {
- const int logLevel = (!existingRoutingInfo || (existingRoutingInfo &&
- routingInfoAfterRefresh->getVersion() !=
- existingRoutingInfo->getVersion()))
+ const int logLevel =
+ (!existingRoutingInfo ||
+ (existingRoutingInfo &&
+ routingInfoAfterRefresh->getVersion() != existingRoutingInfo->getVersion()))
? 0
: 1;
LOG_CATALOG_REFRESH(logLevel)
diff --git a/src/mongo/s/chunk.cpp b/src/mongo/s/chunk.cpp
index 6524460ad41..647742c3408 100644
--- a/src/mongo/s/chunk.cpp
+++ b/src/mongo/s/chunk.cpp
@@ -88,8 +88,7 @@ void ChunkInfo::throwIfMovedSince(const Timestamp& ts) const {
uasserted(ErrorCodes::MigrationConflict,
str::stream() << "Chunk has moved since timestamp: " << ts.toString()
- << ", most recently at timestamp: "
- << latestValidAfter.toString());
+ << ", most recently at timestamp: " << latestValidAfter.toString());
}
bool ChunkInfo::containsKey(const BSONObj& shardKey) const {
diff --git a/src/mongo/s/chunk_manager.cpp b/src/mongo/s/chunk_manager.cpp
index 5e59e59a079..c1012cfb0f5 100644
--- a/src/mongo/s/chunk_manager.cpp
+++ b/src/mongo/s/chunk_manager.cpp
@@ -454,15 +454,13 @@ ShardVersionMap RoutingTableHistory::_constructShardVersionMap() const {
str::stream()
<< "Gap exists in the routing table between chunks "
<< _chunkMap.at(_extractKeyString(*lastMax))->getRange().toString()
- << " and "
- << rangeLast->second->getRange().toString());
+ << " and " << rangeLast->second->getRange().toString());
else
uasserted(ErrorCodes::ConflictingOperationInProgress,
str::stream()
<< "Overlap exists in the routing table between chunks "
<< _chunkMap.at(_extractKeyString(*lastMax))->getRange().toString()
- << " and "
- << rangeLast->second->getRange().toString());
+ << " and " << rangeLast->second->getRange().toString());
}
if (!firstMin)
diff --git a/src/mongo/s/chunk_manager_index_bounds_test.cpp b/src/mongo/s/chunk_manager_index_bounds_test.cpp
index bf5b9b4b827..f24cfa72f57 100644
--- a/src/mongo/s/chunk_manager_index_bounds_test.cpp
+++ b/src/mongo/s/chunk_manager_index_bounds_test.cpp
@@ -320,8 +320,7 @@ TEST_F(CMCollapseTreeTest, Regex) {
OrderedIntervalList expected;
expected.intervals.push_back(Interval(BSON(""
<< ""
- << ""
- << BSONObj()),
+ << "" << BSONObj()),
true,
false));
BSONObjBuilder builder;
diff --git a/src/mongo/s/client/parallel.cpp b/src/mongo/s/client/parallel.cpp
index 7903b2c48e5..85df5bf1720 100644
--- a/src/mongo/s/client/parallel.cpp
+++ b/src/mongo/s/client/parallel.cpp
@@ -49,9 +49,9 @@
namespace mongo {
-using std::shared_ptr;
using std::map;
using std::set;
+using std::shared_ptr;
using std::string;
using std::vector;
@@ -565,10 +565,11 @@ void ParallelSortClusteredCursor::startInit(OperationContext* opCtx) {
// shard or if we keep better track of chunks, we can actually add the skip
// value into the cursor and/or make some assumptions about the return value
// size ( (batch size + skip amount) / num_servers ).
- _qSpec.ntoreturn() == 0 ? 0 : (_qSpec.ntoreturn() > 0
- ? _qSpec.ntoreturn() + _qSpec.ntoskip()
- : _qSpec.ntoreturn() -
- _qSpec.ntoskip()))); // batchSize
+ _qSpec.ntoreturn() == 0
+ ? 0
+ : (_qSpec.ntoreturn() > 0
+ ? _qSpec.ntoreturn() + _qSpec.ntoskip()
+ : _qSpec.ntoreturn() - _qSpec.ntoskip()))); // batchSize
} else {
// Single shard query
@@ -596,9 +597,9 @@ void ParallelSortClusteredCursor::startInit(OperationContext* opCtx) {
// Without full initialization, throw an exception
uassert(15987,
- str::stream() << "could not fully initialize cursor on shard " << shardId
- << ", current connection state is "
- << mdata.toBSON().toString(),
+ str::stream()
+ << "could not fully initialize cursor on shard " << shardId
+ << ", current connection state is " << mdata.toBSON().toString(),
success);
mdata.retryNext = false;
@@ -991,8 +992,7 @@ void ParallelSortClusteredCursor::_oldInit(OperationContext* opCtx) {
// Version is zero b/c this is deprecated codepath
staleConfigExs.push_back(str::stream() << "stale config detected for " << _ns
- << " in ParallelCursor::_init "
- << errLoc);
+ << " in ParallelCursor::_init " << errLoc);
break;
}
@@ -1054,8 +1054,8 @@ void ParallelSortClusteredCursor::_oldInit(OperationContext* opCtx) {
_cursors[i].reset(NULL, NULL);
if (!retry) {
- socketExs.push_back(str::stream() << "error querying server: "
- << servers[i]);
+ socketExs.push_back(str::stream()
+ << "error querying server: " << servers[i]);
conns[i]->done();
} else {
retryQueries.insert(i);
@@ -1275,12 +1275,7 @@ void ParallelConnectionMetadata::cleanup(bool full) {
BSONObj ParallelConnectionMetadata::toBSON() const {
return BSON("state" << (pcState ? pcState->toBSON() : BSONObj()) << "retryNext" << retryNext
- << "init"
- << initialized
- << "finish"
- << finished
- << "errored"
- << errored);
+ << "init" << initialized << "finish" << finished << "errored" << errored);
}
std::string ParallelConnectionState::toString() const {
diff --git a/src/mongo/s/client/shard.h b/src/mongo/s/client/shard.h
index 69db3fdbc87..44a2c48c43b 100644
--- a/src/mongo/s/client/shard.h
+++ b/src/mongo/s/client/shard.h
@@ -205,9 +205,9 @@ public:
const BSONObj& cmdObj) = 0;
/**
- * Runs a cursor command, exhausts the cursor, and pulls all data into memory. Performs retries
- * if the command fails in accordance with the kIdempotent RetryPolicy.
- */
+ * Runs a cursor command, exhausts the cursor, and pulls all data into memory. Performs retries
+ * if the command fails in accordance with the kIdempotent RetryPolicy.
+ */
StatusWith<QueryResponse> runExhaustiveCursorCommand(OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
const std::string& dbName,
@@ -225,13 +225,13 @@ public:
RetryPolicy retryPolicy);
/**
- * Warning: This method exhausts the cursor and pulls all data into memory.
- * Do not use other than for very small (i.e., admin or metadata) collections.
- * Performs retries if the query fails in accordance with the kIdempotent RetryPolicy.
- *
- * ShardRemote instances expect "readConcernLevel" to always be kMajorityReadConcern, whereas
- * ShardLocal instances expect either kLocalReadConcern or kMajorityReadConcern.
- */
+ * Warning: This method exhausts the cursor and pulls all data into memory.
+ * Do not use other than for very small (i.e., admin or metadata) collections.
+ * Performs retries if the query fails in accordance with the kIdempotent RetryPolicy.
+ *
+ * ShardRemote instances expect "readConcernLevel" to always be kMajorityReadConcern, whereas
+ * ShardLocal instances expect either kLocalReadConcern or kMajorityReadConcern.
+ */
StatusWith<QueryResponse> exhaustiveFindOnConfig(OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
const repl::ReadConcernLevel& readConcernLevel,
diff --git a/src/mongo/s/client/shard_registry.cpp b/src/mongo/s/client/shard_registry.cpp
index a24e4fd1bbb..dfa120f49c3 100644
--- a/src/mongo/s/client/shard_registry.cpp
+++ b/src/mongo/s/client/shard_registry.cpp
@@ -65,17 +65,17 @@
namespace mongo {
-using std::shared_ptr;
using std::set;
+using std::shared_ptr;
using std::string;
using std::unique_ptr;
using std::vector;
using executor::NetworkInterface;
using executor::NetworkInterfaceThreadPool;
+using executor::TaskExecutor;
using executor::TaskExecutorPool;
using executor::ThreadPoolTaskExecutor;
-using executor::TaskExecutor;
using CallbackArgs = TaskExecutor::CallbackArgs;
using CallbackHandle = TaskExecutor::CallbackHandle;
diff --git a/src/mongo/s/client/shard_remote.cpp b/src/mongo/s/client/shard_remote.cpp
index 362160babec..8602c3d31d2 100644
--- a/src/mongo/s/client/shard_remote.cpp
+++ b/src/mongo/s/client/shard_remote.cpp
@@ -261,7 +261,6 @@ StatusWith<Shard::QueryResponse> ShardRemote::_runExhaustiveCursorCommand(
auto fetcherCallback = [&status, &response](const Fetcher::QueryResponseStatus& dataStatus,
Fetcher::NextAction* nextAction,
BSONObjBuilder* getMoreBob) {
-
// Throw out any accumulated results on error
if (!dataStatus.isOK()) {
status = dataStatus.getStatus();
diff --git a/src/mongo/s/client/shard_remote.h b/src/mongo/s/client/shard_remote.h
index e58ec0a8809..3b19fd8ab0f 100644
--- a/src/mongo/s/client/shard_remote.h
+++ b/src/mongo/s/client/shard_remote.h
@@ -136,10 +136,10 @@ private:
mutable stdx::mutex _lastCommittedOpTimeMutex;
/**
- * Logical time representing the latest opTime timestamp known to be in this shard's majority
- * committed snapshot. Only the latest time is kept because lagged secondaries may return earlier
- * times.
- */
+ * Logical time representing the latest opTime timestamp known to be in this shard's majority
+ * committed snapshot. Only the latest time is kept because lagged secondaries may return
+ * earlier times.
+ */
LogicalTime _lastCommittedOpTime;
/**
diff --git a/src/mongo/s/client/sharding_connection_hook.cpp b/src/mongo/s/client/sharding_connection_hook.cpp
index c16190f3949..cbdad3a1257 100644
--- a/src/mongo/s/client/sharding_connection_hook.cpp
+++ b/src/mongo/s/client/sharding_connection_hook.cpp
@@ -99,9 +99,7 @@ void ShardingConnectionHook::onCreate(DBClientBase* conn) {
uassert(28785,
str::stream() << "Unrecognized configsvr mode number: " << configServerModeNumber
<< ". Range of known configsvr mode numbers is: ["
- << minKnownConfigServerMode
- << ", "
- << maxKnownConfigServerMode
+ << minKnownConfigServerMode << ", " << maxKnownConfigServerMode
<< "]",
configServerModeNumber >= minKnownConfigServerMode &&
configServerModeNumber <= maxKnownConfigServerMode);
diff --git a/src/mongo/s/client/version_manager.cpp b/src/mongo/s/client/version_manager.cpp
index 75c235d6cc6..b9dff2f77cd 100644
--- a/src/mongo/s/client/version_manager.cpp
+++ b/src/mongo/s/client/version_manager.cpp
@@ -48,8 +48,8 @@
namespace mongo {
-using std::shared_ptr;
using std::map;
+using std::shared_ptr;
using std::string;
namespace {
@@ -302,33 +302,24 @@ bool checkShardVersion(OperationContext* opCtx,
const ChunkVersion refVersion(refManager->getVersion(shard->getId()));
const ChunkVersion currentVersion(manager->getVersion(shard->getId()));
- string msg(str::stream() << "manager (" << currentVersion.toString() << " : "
- << manager->getSequenceNumber()
- << ") "
- << "not compatible with reference manager ("
- << refVersion.toString()
- << " : "
- << refManager->getSequenceNumber()
- << ") "
- << "on shard "
- << shard->getId()
- << " ("
- << shard->getConnString().toString()
- << ")");
+ string msg(str::stream()
+ << "manager (" << currentVersion.toString() << " : "
+ << manager->getSequenceNumber() << ") "
+ << "not compatible with reference manager (" << refVersion.toString()
+ << " : " << refManager->getSequenceNumber() << ") "
+ << "on shard " << shard->getId() << " (" << shard->getConnString().toString()
+ << ")");
uasserted(StaleConfigInfo(nss, refVersion, currentVersion), msg);
}
} else if (refManager) {
- string msg(str::stream() << "not sharded (" << (!manager ? string("<none>") : str::stream()
- << manager->getSequenceNumber())
+ string msg(str::stream() << "not sharded ("
+ << (!manager ? string("<none>")
+ : str::stream() << manager->getSequenceNumber())
<< ") but has reference manager ("
- << refManager->getSequenceNumber()
- << ") "
- << "on conn "
- << conn->getServerAddress()
- << " ("
- << conn_in->getServerAddress()
- << ")");
+ << refManager->getSequenceNumber() << ") "
+ << "on conn " << conn->getServerAddress() << " ("
+ << conn_in->getServerAddress() << ")");
uasserted(
StaleConfigInfo(nss, refManager->getVersion(shard->getId()), ChunkVersion::UNSHARDED()),
diff --git a/src/mongo/s/cluster_commands_helpers.cpp b/src/mongo/s/cluster_commands_helpers.cpp
index 3cf00aba6dd..616433389c5 100644
--- a/src/mongo/s/cluster_commands_helpers.cpp
+++ b/src/mongo/s/cluster_commands_helpers.cpp
@@ -218,15 +218,13 @@ std::vector<AsyncRequestsSender::Response> gatherResponses(
if (ErrorCodes::isStaleShardVersionError(status.code())) {
uassertStatusOK(status.withContext(str::stream()
<< "got stale shardVersion response from shard "
- << response.shardId
- << " at host "
+ << response.shardId << " at host "
<< response.shardHostAndPort->toString()));
}
if (ErrorCodes::StaleDbVersion == status) {
uassertStatusOK(status.withContext(
str::stream() << "got stale databaseVersion response from shard "
- << response.shardId
- << " at host "
+ << response.shardId << " at host "
<< response.shardHostAndPort->toString()));
}
@@ -527,8 +525,8 @@ void createShardDatabase(OperationContext* opCtx, StringData dbName) {
if (createDbResponse.commandStatus != ErrorCodes::NamespaceExists) {
uassertStatusOKWithContext(createDbResponse.commandStatus,
- str::stream() << "Database " << dbName
- << " could not be created");
+ str::stream()
+ << "Database " << dbName << " could not be created");
}
dbStatus = Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, dbName);
diff --git a/src/mongo/s/commands/cluster_count_cmd.cpp b/src/mongo/s/commands/cluster_count_cmd.cpp
index 872adb0028a..67e486f9b3a 100644
--- a/src/mongo/s/commands/cluster_count_cmd.cpp
+++ b/src/mongo/s/commands/cluster_count_cmd.cpp
@@ -133,7 +133,11 @@ public:
}
const std::initializer_list<StringData> passthroughFields = {
- "$queryOptions", "collation", "hint", "readConcern", QueryRequest::cmdOptionMaxTimeMS,
+ "$queryOptions",
+ "collation",
+ "hint",
+ "readConcern",
+ QueryRequest::cmdOptionMaxTimeMS,
};
for (auto name : passthroughFields) {
if (auto field = cmdObj[name]) {
diff --git a/src/mongo/s/commands/cluster_data_size_cmd.cpp b/src/mongo/s/commands/cluster_data_size_cmd.cpp
index 6666ccda065..c8d410e1634 100644
--- a/src/mongo/s/commands/cluster_data_size_cmd.cpp
+++ b/src/mongo/s/commands/cluster_data_size_cmd.cpp
@@ -86,9 +86,10 @@ public:
uassert(ErrorCodes::BadValue,
"keyPattern must be empty or must be an object that equals the shard key",
- !keyPattern || (keyPattern.type() == Object &&
- SimpleBSONObjComparator::kInstance.evaluate(
- cm->getShardKeyPattern().toBSON() == keyPattern.Obj())));
+ !keyPattern ||
+ (keyPattern.type() == Object &&
+ SimpleBSONObjComparator::kInstance.evaluate(
+ cm->getShardKeyPattern().toBSON() == keyPattern.Obj())));
uassert(ErrorCodes::BadValue,
str::stream() << "min value " << min << " does not have shard key",
diff --git a/src/mongo/s/commands/cluster_explain.cpp b/src/mongo/s/commands/cluster_explain.cpp
index d814f6080d9..a433d211287 100644
--- a/src/mongo/s/commands/cluster_explain.cpp
+++ b/src/mongo/s/commands/cluster_explain.cpp
@@ -165,17 +165,16 @@ Status ClusterExplain::validateShardResults(const vector<Strategy::CommandResult
for (size_t i = 0; i < shardResults.size(); i++) {
auto status = getStatusFromCommandResult(shardResults[i].result);
if (!status.isOK()) {
- return status.withContext(str::stream() << "Explain command on shard "
- << shardResults[i].target.toString()
- << " failed");
+ return status.withContext(str::stream()
+ << "Explain command on shard "
+ << shardResults[i].target.toString() << " failed");
}
if (Object != shardResults[i].result["queryPlanner"].type()) {
return Status(ErrorCodes::OperationFailed,
- str::stream() << "Explain command on shard "
- << shardResults[i].target.toString()
- << " failed, caused by: "
- << shardResults[i].result);
+ str::stream()
+ << "Explain command on shard " << shardResults[i].target.toString()
+ << " failed, caused by: " << shardResults[i].result);
}
if (shardResults[i].result.hasField("executionStats")) {
@@ -197,9 +196,9 @@ Status ClusterExplain::validateShardResults(const vector<Strategy::CommandResult
// Either all shards should have all plans execution stats, or none should.
if (0 != numShardsAllPlansStats && shardResults.size() != numShardsAllPlansStats) {
return Status(ErrorCodes::InternalError,
- str::stream() << "Only " << numShardsAllPlansStats << "/"
- << shardResults.size()
- << " had allPlansExecution explain information.");
+ str::stream()
+ << "Only " << numShardsAllPlansStats << "/" << shardResults.size()
+ << " had allPlansExecution explain information.");
}
return Status::OK();
diff --git a/src/mongo/s/commands/cluster_explain_cmd.cpp b/src/mongo/s/commands/cluster_explain_cmd.cpp
index a4549e39e51..c188fe19b65 100644
--- a/src/mongo/s/commands/cluster_explain_cmd.cpp
+++ b/src/mongo/s/commands/cluster_explain_cmd.cpp
@@ -148,8 +148,7 @@ BSONObj makeExplainedObj(const BSONObj& outerObj, StringData dbName) {
if (auto innerDb = innerObj["$db"]) {
uassert(ErrorCodes::InvalidNamespace,
str::stream() << "Mismatched $db in explain command. Expected " << dbName
- << " but got "
- << innerDb.checkAndGetStringData(),
+ << " but got " << innerDb.checkAndGetStringData(),
innerDb.checkAndGetStringData() == dbName);
}
diff --git a/src/mongo/s/commands/cluster_find_cmd.cpp b/src/mongo/s/commands/cluster_find_cmd.cpp
index dd726a64cc6..e133875439b 100644
--- a/src/mongo/s/commands/cluster_find_cmd.cpp
+++ b/src/mongo/s/commands/cluster_find_cmd.cpp
@@ -49,8 +49,8 @@
namespace mongo {
namespace {
-using std::unique_ptr;
using std::string;
+using std::unique_ptr;
using std::vector;
const char kTermField[] = "term";
diff --git a/src/mongo/s/commands/cluster_find_test.cpp b/src/mongo/s/commands/cluster_find_test.cpp
index 7ebb923448f..8d0dc6792d4 100644
--- a/src/mongo/s/commands/cluster_find_test.cpp
+++ b/src/mongo/s/commands/cluster_find_test.cpp
@@ -41,8 +41,7 @@ protected:
<< "coll");
const BSONObj kFindCmdTargeted = BSON("find"
<< "coll"
- << "filter"
- << BSON("_id" << 0));
+ << "filter" << BSON("_id" << 0));
// The index of the shard expected to receive the response is used to prevent different shards
// from returning documents with the same shard key. This is expected to be 0 for queries
diff --git a/src/mongo/s/commands/cluster_kill_op.cpp b/src/mongo/s/commands/cluster_kill_op.cpp
index 91cdc8f1e91..2f72968826c 100644
--- a/src/mongo/s/commands/cluster_kill_op.cpp
+++ b/src/mongo/s/commands/cluster_kill_op.cpp
@@ -86,9 +86,7 @@ private:
uassert(28625,
str::stream() << "The op argument to killOp must be of the format shardid:opid"
- << " but found \""
- << opToKill
- << '"',
+ << " but found \"" << opToKill << '"',
(opToKill.size() >= 3) && // must have at least N:N
(opSepPos != std::string::npos) && // must have ':' as separator
(opSepPos != 0) && // can't be :NN
diff --git a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
index 8abfe34d7fb..53432a49499 100644
--- a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
+++ b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
@@ -345,9 +345,7 @@ public:
opCtx, dbname, shardedCommand, nss.ns(), q, collation, &mrCommandResults);
} catch (DBException& e) {
e.addContext(str::stream() << "could not run map command on all shards for ns "
- << nss.ns()
- << " and query "
- << q);
+ << nss.ns() << " and query " << q);
throw;
}
@@ -378,8 +376,8 @@ public:
if (!ok) {
// At this point we will return
- errmsg = str::stream() << "MR parallel processing failed: "
- << singleResult.toString();
+ errmsg = str::stream()
+ << "MR parallel processing failed: " << singleResult.toString();
continue;
}
@@ -498,11 +496,11 @@ public:
// the output collection exists and is unsharded, fail because we should not go
// from unsharded to sharded.
BSONObj listCollsCmdResponse;
- ok = conn->runCommand(
- outDB,
- BSON("listCollections" << 1 << "filter"
+ ok = conn->runCommand(outDB,
+ BSON("listCollections"
+ << 1 << "filter"
<< BSON("name" << outputCollNss.coll())),
- listCollsCmdResponse);
+ listCollsCmdResponse);
BSONObj cursorObj = listCollsCmdResponse.getObjectField("cursor");
BSONObj collections = cursorObj["firstBatch"].Obj();
@@ -575,9 +573,7 @@ public:
ok = true;
} catch (DBException& e) {
e.addContext(str::stream() << "could not run final reduce on all shards for "
- << nss.ns()
- << ", output "
- << outputCollNss.ns());
+ << nss.ns() << ", output " << outputCollNss.ns());
throw;
}
diff --git a/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp b/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp
index ef20c6dde10..c686efce22e 100644
--- a/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp
+++ b/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp
@@ -138,10 +138,10 @@ public:
if (!cm->getShardKeyPattern().isShardKey(minKey) ||
!cm->getShardKeyPattern().isShardKey(maxKey)) {
- errmsg = str::stream() << "shard key bounds "
- << "[" << minKey << "," << maxKey << ")"
- << " are not valid for shard key pattern "
- << cm->getShardKeyPattern().toBSON();
+ errmsg = str::stream()
+ << "shard key bounds "
+ << "[" << minKey << "," << maxKey << ")"
+ << " are not valid for shard key pattern " << cm->getShardKeyPattern().toBSON();
return false;
}
diff --git a/src/mongo/s/commands/cluster_move_chunk_cmd.cpp b/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
index 9b87c67733a..81400604b41 100644
--- a/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
+++ b/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
@@ -116,10 +116,9 @@ public:
const auto toStatus = Grid::get(opCtx)->shardRegistry()->getShard(opCtx, toString);
if (!toStatus.isOK()) {
- std::string msg(str::stream() << "Could not move chunk in '" << nss.ns()
- << "' to shard '"
- << toString
- << "' because that shard does not exist");
+ std::string msg(str::stream()
+ << "Could not move chunk in '" << nss.ns() << "' to shard '" << toString
+ << "' because that shard does not exist");
log() << msg;
uasserted(ErrorCodes::ShardNotFound, msg);
}
@@ -158,10 +157,10 @@ public:
// bounds
if (!cm->getShardKeyPattern().isShardKey(bounds[0].Obj()) ||
!cm->getShardKeyPattern().isShardKey(bounds[1].Obj())) {
- errmsg = str::stream() << "shard key bounds "
- << "[" << bounds[0].Obj() << "," << bounds[1].Obj() << ")"
- << " are not valid for shard key pattern "
- << cm->getShardKeyPattern().toBSON();
+ errmsg = str::stream()
+ << "shard key bounds "
+ << "[" << bounds[0].Obj() << "," << bounds[1].Obj() << ")"
+ << " are not valid for shard key pattern " << cm->getShardKeyPattern().toBSON();
return false;
}
diff --git a/src/mongo/s/commands/cluster_split_cmd.cpp b/src/mongo/s/commands/cluster_split_cmd.cpp
index a3eb246a2b1..47f42767a5a 100644
--- a/src/mongo/s/commands/cluster_split_cmd.cpp
+++ b/src/mongo/s/commands/cluster_split_cmd.cpp
@@ -205,10 +205,10 @@ public:
// bounds
if (!cm->getShardKeyPattern().isShardKey(bounds[0].Obj()) ||
!cm->getShardKeyPattern().isShardKey(bounds[1].Obj())) {
- errmsg = str::stream() << "shard key bounds "
- << "[" << bounds[0].Obj() << "," << bounds[1].Obj() << ")"
- << " are not valid for shard key pattern "
- << cm->getShardKeyPattern().toBSON();
+ errmsg = str::stream()
+ << "shard key bounds "
+ << "[" << bounds[0].Obj() << "," << bounds[1].Obj() << ")"
+ << " are not valid for shard key pattern " << cm->getShardKeyPattern().toBSON();
return false;
}
@@ -225,9 +225,9 @@ public:
} else {
// middle
if (!cm->getShardKeyPattern().isShardKey(middle)) {
- errmsg = str::stream() << "new split key " << middle
- << " is not valid for shard key pattern "
- << cm->getShardKeyPattern().toBSON();
+ errmsg = str::stream()
+ << "new split key " << middle << " is not valid for shard key pattern "
+ << cm->getShardKeyPattern().toBSON();
return false;
}
@@ -239,9 +239,9 @@ public:
chunk.emplace(cm->findIntersectingChunkWithSimpleCollation(middle));
if (chunk->getMin().woCompare(middle) == 0 || chunk->getMax().woCompare(middle) == 0) {
- errmsg = str::stream() << "new split key " << middle
- << " is a boundary key of existing chunk "
- << "[" << chunk->getMin() << "," << chunk->getMax() << ")";
+ errmsg = str::stream()
+ << "new split key " << middle << " is a boundary key of existing chunk "
+ << "[" << chunk->getMin() << "," << chunk->getMax() << ")";
return false;
}
}
diff --git a/src/mongo/s/commands/commands_public.cpp b/src/mongo/s/commands/commands_public.cpp
index 3a47c10867a..9537bc920e9 100644
--- a/src/mongo/s/commands/commands_public.cpp
+++ b/src/mongo/s/commands/commands_public.cpp
@@ -503,8 +503,8 @@ public:
}
return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to list indexes on collection: "
- << ns.coll());
+ str::stream()
+ << "Not authorized to list indexes on collection: " << ns.coll());
}
bool run(OperationContext* opCtx,
diff --git a/src/mongo/s/commands/strategy.cpp b/src/mongo/s/commands/strategy.cpp
index cbe2d514ec8..cba00c83e2c 100644
--- a/src/mongo/s/commands/strategy.cpp
+++ b/src/mongo/s/commands/strategy.cpp
@@ -188,9 +188,7 @@ void addContextForTransactionAbortingError(StringData txnIdAsString,
DBException& ex,
StringData reason) {
ex.addContext(str::stream() << "Transaction " << txnIdAsString << " was aborted on statement "
- << latestStmtId
- << " due to: "
- << reason);
+ << latestStmtId << " due to: " << reason);
}
void execCommandClient(OperationContext* opCtx,
@@ -644,9 +642,7 @@ DbResponse Strategy::queryOp(OperationContext* opCtx, const NamespaceString& nss
if (q.queryOptions & QueryOption_Exhaust) {
uasserted(18526,
str::stream() << "The 'exhaust' query option is invalid for mongos queries: "
- << nss.ns()
- << " "
- << q.query.toString());
+ << nss.ns() << " " << q.query.toString());
}
// Determine the default read preference mode based on the value of the slaveOk flag.
@@ -860,9 +856,7 @@ void Strategy::killCursors(OperationContext* opCtx, DbMessage* dbm) {
const int numCursors = dbm->pullInt();
massert(34425,
str::stream() << "Invalid killCursors message. numCursors: " << numCursors
- << ", message size: "
- << dbm->msg().dataSize()
- << ".",
+ << ", message size: " << dbm->msg().dataSize() << ".",
dbm->msg().dataSize() == 8 + (8 * numCursors));
uassert(28794,
str::stream() << "numCursors must be between 1 and 29999. numCursors: " << numCursors
diff --git a/src/mongo/s/grid.cpp b/src/mongo/s/grid.cpp
index bc9d9abfd23..97e2ccef518 100644
--- a/src/mongo/s/grid.cpp
+++ b/src/mongo/s/grid.cpp
@@ -130,8 +130,9 @@ boost::optional<repl::OpTime> Grid::advanceConfigOpTime(OperationContext* opCtx,
if (opCtx && opCtx->getClient()) {
clientAddr = opCtx->getClient()->clientAddress(true);
}
- log() << "Received " << what << " " << clientAddr << " indicating config server optime "
- "term has increased, previous optime "
+ log() << "Received " << what << " " << clientAddr
+ << " indicating config server optime "
+ "term has increased, previous optime "
<< prevOpTime << ", now " << opTime;
}
return prevOpTime;
diff --git a/src/mongo/s/mongos_options.h b/src/mongo/s/mongos_options.h
index b7adce6e829..97c3bc53e34 100644
--- a/src/mongo/s/mongos_options.h
+++ b/src/mongo/s/mongos_options.h
@@ -78,4 +78,4 @@ Status validateMongosOptions(const moe::Environment& params);
Status canonicalizeMongosOptions(moe::Environment* params);
Status storeMongosOptions(const moe::Environment& params);
-}
+} // namespace mongo
diff --git a/src/mongo/s/query/async_results_merger.cpp b/src/mongo/s/query/async_results_merger.cpp
index 0f76cdd3f67..2d88c7b4f53 100644
--- a/src/mongo/s/query/async_results_merger.cpp
+++ b/src/mongo/s/query/async_results_merger.cpp
@@ -654,15 +654,13 @@ bool AsyncResultsMerger::_addBatchToBuffer(WithLock lk,
remote.status =
Status(ErrorCodes::InternalError,
str::stream() << "Missing field '" << AsyncResultsMerger::kSortKeyField
- << "' in document: "
- << obj);
+ << "' in document: " << obj);
return false;
} else if (!_params.getCompareWholeSortKey() && key.type() != BSONType::Object) {
remote.status =
Status(ErrorCodes::InternalError,
str::stream() << "Field '" << AsyncResultsMerger::kSortKeyField
- << "' was not of type Object in document: "
- << obj);
+ << "' was not of type Object in document: " << obj);
return false;
}
}
diff --git a/src/mongo/s/query/async_results_merger_test.cpp b/src/mongo/s/query/async_results_merger_test.cpp
index 30d3a182c42..f3bd002c89f 100644
--- a/src/mongo/s/query/async_results_merger_test.cpp
+++ b/src/mongo/s/query/async_results_merger_test.cpp
@@ -42,7 +42,6 @@
#include "mongo/s/query/results_merger_test_fixture.h"
#include "mongo/stdx/memory.h"
#include "mongo/unittest/death_test.h"
-#include "mongo/unittest/death_test.h"
#include "mongo/unittest/unittest.h"
namespace mongo {
@@ -1323,8 +1322,7 @@ TEST_F(AsyncResultsMergerTest, GetMoreRequestIncludesMaxTimeMS) {
// The next getMore request should include the maxTimeMS.
expectedCmdObj = BSON("getMore" << CursorId(123) << "collection"
<< "testcoll"
- << "maxTimeMS"
- << 789);
+ << "maxTimeMS" << 789);
ASSERT_BSONOBJ_EQ(getNthPendingRequest(0).cmdObj, expectedCmdObj);
// Clean up.
@@ -1345,11 +1343,10 @@ DEATH_TEST_F(AsyncResultsMergerTest,
// Create one cursor whose initial response has a postBatchResumeToken.
auto pbrtFirstCursor = makePostBatchResumeToken(Timestamp(1, 5));
auto firstDocSortKey = makeResumeToken(Timestamp(1, 4), uuid, BSON("_id" << 1));
- auto firstCursorResponse = fromjson(
- str::stream() << "{_id: {clusterTime: {ts: Timestamp(1, 4)}, uuid: '" << uuid.toString()
- << "', documentKey: {_id: 1}}, $sortKey: {'': '"
- << firstDocSortKey.firstElement().String()
- << "'}}");
+ auto firstCursorResponse =
+ fromjson(str::stream() << "{_id: {clusterTime: {ts: Timestamp(1, 4)}, uuid: '"
+ << uuid.toString() << "', documentKey: {_id: 1}}, $sortKey: {'': '"
+ << firstDocSortKey.firstElement().String() << "'}}");
cursors.push_back(makeRemoteCursor(
kTestShardIds[0],
kTestShardHosts[0],
@@ -1380,11 +1377,10 @@ DEATH_TEST_F(AsyncResultsMergerTest,
std::vector<RemoteCursor> cursors;
BSONObj pbrtFirstCursor;
auto firstDocSortKey = makeResumeToken(Timestamp(1, 4), uuid, BSON("_id" << 1));
- auto firstCursorResponse = fromjson(
- str::stream() << "{_id: {clusterTime: {ts: Timestamp(1, 4)}, uuid: '" << uuid.toString()
- << "', documentKey: {_id: 1}}, $sortKey: {'': '"
- << firstDocSortKey.firstElement().String()
- << "'}}");
+ auto firstCursorResponse =
+ fromjson(str::stream() << "{_id: {clusterTime: {ts: Timestamp(1, 4)}, uuid: '"
+ << uuid.toString() << "', documentKey: {_id: 1}}, $sortKey: {'': '"
+ << firstDocSortKey.firstElement().String() << "'}}");
cursors.push_back(makeRemoteCursor(
kTestShardIds[0],
kTestShardHosts[0],
@@ -1410,11 +1406,10 @@ TEST_F(AsyncResultsMergerTest, SortedTailableCursorNotReadyIfRemoteHasLowerPostB
std::vector<RemoteCursor> cursors;
auto pbrtFirstCursor = makePostBatchResumeToken(Timestamp(1, 5));
auto firstDocSortKey = makeResumeToken(Timestamp(1, 4), uuid, BSON("_id" << 1));
- auto firstCursorResponse = fromjson(
- str::stream() << "{_id: {clusterTime: {ts: Timestamp(1, 4)}, uuid: '" << uuid.toString()
- << "', documentKey: {_id: 1}}, $sortKey: {'': '"
- << firstDocSortKey.firstElement().String()
- << "'}}");
+ auto firstCursorResponse =
+ fromjson(str::stream() << "{_id: {clusterTime: {ts: Timestamp(1, 4)}, uuid: '"
+ << uuid.toString() << "', documentKey: {_id: 1}}, $sortKey: {'': '"
+ << firstDocSortKey.firstElement().String() << "'}}");
cursors.push_back(makeRemoteCursor(
kTestShardIds[0],
kTestShardHosts[0],
@@ -1451,11 +1446,10 @@ DEATH_TEST_F(AsyncResultsMergerTest,
UUID uuid = UUID::gen();
std::vector<RemoteCursor> cursors;
auto firstDocSortKey = makeResumeToken(Timestamp(1, 4), uuid, BSON("_id" << 1));
- auto firstCursorResponse = fromjson(
- str::stream() << "{_id: {clusterTime: {ts: Timestamp(1, 4)}, uuid: '" << uuid.toString()
- << "', documentKey: {_id: 1}}, $sortKey: {'': '"
- << firstDocSortKey.firstElement().String()
- << "'}}");
+ auto firstCursorResponse =
+ fromjson(str::stream() << "{_id: {clusterTime: {ts: Timestamp(1, 4)}, uuid: '"
+ << uuid.toString() << "', documentKey: {_id: 1}}, $sortKey: {'': '"
+ << firstDocSortKey.firstElement().String() << "'}}");
cursors.push_back(makeRemoteCursor(
kTestShardIds[0],
kTestShardHosts[0],
@@ -1483,11 +1477,10 @@ TEST_F(AsyncResultsMergerTest, SortedTailableCursorIgnoresOplogTimestamp) {
auto pbrtFirstCursor = makePostBatchResumeToken(Timestamp(1, 5));
auto firstDocSortKey = makeResumeToken(Timestamp(1, 4), uuid, BSON("_id" << 1));
// Set the first cursor to have both a PBRT and a matching oplog timestamp.
- auto firstCursorResponse = fromjson(
- str::stream() << "{_id: {clusterTime: {ts: Timestamp(1, 4)}, uuid: '" << uuid.toString()
- << "', documentKey: {_id: 1}}, $sortKey: {'': '"
- << firstDocSortKey.firstElement().String()
- << "'}}");
+ auto firstCursorResponse =
+ fromjson(str::stream() << "{_id: {clusterTime: {ts: Timestamp(1, 4)}, uuid: '"
+ << uuid.toString() << "', documentKey: {_id: 1}}, $sortKey: {'': '"
+ << firstDocSortKey.firstElement().String() << "'}}");
cursors.push_back(makeRemoteCursor(
kTestShardIds[0],
kTestShardHosts[0],
@@ -1541,11 +1534,10 @@ TEST_F(AsyncResultsMergerTest, SortedTailableCursorNewShardOrderedAfterExisting)
std::vector<CursorResponse> responses;
auto firstDocSortKey = makeResumeToken(Timestamp(1, 4), uuid, BSON("_id" << 1));
auto pbrtFirstCursor = makePostBatchResumeToken(Timestamp(1, 6));
- auto firstCursorResponse = fromjson(
- str::stream() << "{_id: {clusterTime: {ts: Timestamp(1, 4)}, uuid: '" << uuid.toString()
- << "', documentKey: {_id: 1}}, $sortKey: {'': '"
- << firstDocSortKey.firstElement().String()
- << "'}}");
+ auto firstCursorResponse =
+ fromjson(str::stream() << "{_id: {clusterTime: {ts: Timestamp(1, 4)}, uuid: '"
+ << uuid.toString() << "', documentKey: {_id: 1}}, $sortKey: {'': '"
+ << firstDocSortKey.firstElement().String() << "'}}");
std::vector<BSONObj> batch1 = {firstCursorResponse};
auto firstDoc = batch1.front();
responses.emplace_back(
@@ -1572,11 +1564,10 @@ TEST_F(AsyncResultsMergerTest, SortedTailableCursorNewShardOrderedAfterExisting)
responses.clear();
auto secondDocSortKey = makeResumeToken(Timestamp(1, 5), uuid, BSON("_id" << 2));
auto pbrtSecondCursor = makePostBatchResumeToken(Timestamp(1, 6));
- auto secondCursorResponse = fromjson(
- str::stream() << "{_id: {clusterTime: {ts: Timestamp(1, 5)}, uuid: '" << uuid.toString()
- << "', documentKey: {_id: 2}}, $sortKey: {'': '"
- << secondDocSortKey.firstElement().String()
- << "'}}");
+ auto secondCursorResponse =
+ fromjson(str::stream() << "{_id: {clusterTime: {ts: Timestamp(1, 5)}, uuid: '"
+ << uuid.toString() << "', documentKey: {_id: 2}}, $sortKey: {'': '"
+ << secondDocSortKey.firstElement().String() << "'}}");
std::vector<BSONObj> batch2 = {secondCursorResponse};
auto secondDoc = batch2.front();
responses.emplace_back(
@@ -1623,11 +1614,10 @@ TEST_F(AsyncResultsMergerTest, SortedTailableCursorNewShardOrderedBeforeExisting
std::vector<CursorResponse> responses;
auto firstDocSortKey = makeResumeToken(Timestamp(1, 4), uuid, BSON("_id" << 1));
auto pbrtFirstCursor = makePostBatchResumeToken(Timestamp(1, 5));
- auto firstCursorResponse = fromjson(
- str::stream() << "{_id: {clusterTime: {ts: Timestamp(1, 4)}, uuid: '" << uuid.toString()
- << "', documentKey: {_id: 1}}, $sortKey: {'': '"
- << firstDocSortKey.firstElement().String()
- << "'}}");
+ auto firstCursorResponse =
+ fromjson(str::stream() << "{_id: {clusterTime: {ts: Timestamp(1, 4)}, uuid: '"
+ << uuid.toString() << "', documentKey: {_id: 1}}, $sortKey: {'': '"
+ << firstDocSortKey.firstElement().String() << "'}}");
std::vector<BSONObj> batch1 = {firstCursorResponse};
responses.emplace_back(
kTestNss, CursorId(123), batch1, boost::none, boost::none, pbrtFirstCursor);
@@ -1653,11 +1643,10 @@ TEST_F(AsyncResultsMergerTest, SortedTailableCursorNewShardOrderedBeforeExisting
responses.clear();
auto secondDocSortKey = makeResumeToken(Timestamp(1, 3), uuid, BSON("_id" << 2));
auto pbrtSecondCursor = makePostBatchResumeToken(Timestamp(1, 5));
- auto secondCursorResponse = fromjson(
- str::stream() << "{_id: {clusterTime: {ts: Timestamp(1, 3)}, uuid: '" << uuid.toString()
- << "', documentKey: {_id: 2}}, $sortKey: {'': '"
- << secondDocSortKey.firstElement().String()
- << "'}}");
+ auto secondCursorResponse =
+ fromjson(str::stream() << "{_id: {clusterTime: {ts: Timestamp(1, 3)}, uuid: '"
+ << uuid.toString() << "', documentKey: {_id: 2}}, $sortKey: {'': '"
+ << secondDocSortKey.firstElement().String() << "'}}");
std::vector<BSONObj> batch2 = {secondCursorResponse};
// The last observed time should still be later than the first shard, so we can get the data
// from it.
diff --git a/src/mongo/s/query/blocking_results_merger_test.cpp b/src/mongo/s/query/blocking_results_merger_test.cpp
index 2c269fd2d6b..5d07b0e2c75 100644
--- a/src/mongo/s/query/blocking_results_merger_test.cpp
+++ b/src/mongo/s/query/blocking_results_merger_test.cpp
@@ -169,7 +169,6 @@ TEST_F(ResultsMergerTestFixture, ShouldBeAbleToBlockUntilNextResultIsReadyWithDe
operationContext(), RouterExecStage::ExecContext::kGetMoreNoResultsYet));
ASSERT_FALSE(next.isEOF());
ASSERT_BSONOBJ_EQ(*next.getResult(), BSON("x" << 1));
-
});
// Schedule the response to the getMore which will return the next result and mark the cursor as
diff --git a/src/mongo/s/query/cluster_aggregate.cpp b/src/mongo/s/query/cluster_aggregate.cpp
index 6b3e1902537..d523439eb20 100644
--- a/src/mongo/s/query/cluster_aggregate.cpp
+++ b/src/mongo/s/query/cluster_aggregate.cpp
@@ -284,8 +284,7 @@ Status appendExplainResults(sharded_agg_helpers::DispatchShardPipelineResults&&
auto queryPlannerElement = data["queryPlanner"];
uassert(51157,
str::stream() << "Malformed explain response received from shard " << shardId
- << ": "
- << data.toString(),
+ << ": " << data.toString(),
queryPlannerElement);
explain << "queryPlanner" << queryPlannerElement;
if (auto executionStatsElement = data["executionStats"]) {
@@ -739,9 +738,7 @@ Status ClusterAggregate::runAggregate(OperationContext* opCtx,
!request.getRuntimeConstants());
uassert(51089,
str::stream() << "Internal parameter(s) [" << AggregationRequest::kNeedsMergeName
- << ", "
- << AggregationRequest::kFromMongosName
- << ", "
+ << ", " << AggregationRequest::kFromMongosName << ", "
<< AggregationRequest::kMergeByPBRTName
<< "] cannot be set to 'true' when sent to mongos",
!request.needsMerge() && !request.isFromMongos() && !request.mergeByPBRT());
diff --git a/src/mongo/s/query/cluster_aggregation_planner.cpp b/src/mongo/s/query/cluster_aggregation_planner.cpp
index da55bf71dad..5b25ea371e1 100644
--- a/src/mongo/s/query/cluster_aggregation_planner.cpp
+++ b/src/mongo/s/query/cluster_aggregation_planner.cpp
@@ -317,8 +317,7 @@ BSONObj buildNewKeyPattern(const ShardKeyPattern& shardKey, StringMap<std::strin
auto it = renames.find(elem.fieldNameStringData());
invariant(it != renames.end(),
str::stream() << "Could not find new name of shard key field \""
- << elem.fieldName()
- << "\": rename map was "
+ << elem.fieldName() << "\": rename map was "
<< mapToString(renames));
newPattern.appendAs(elem, it->second);
}
diff --git a/src/mongo/s/query/cluster_client_cursor_params.h b/src/mongo/s/query/cluster_client_cursor_params.h
index 7106afcdfa1..9fff8d392df 100644
--- a/src/mongo/s/query/cluster_client_cursor_params.h
+++ b/src/mongo/s/query/cluster_client_cursor_params.h
@@ -157,4 +157,4 @@ struct ClusterClientCursorParams {
boost::optional<bool> isAutoCommit;
};
-} // mongo
+} // namespace mongo
diff --git a/src/mongo/s/query/cluster_cursor_manager.cpp b/src/mongo/s/query/cluster_cursor_manager.cpp
index b25c26946cf..f5b3290a59a 100644
--- a/src/mongo/s/query/cluster_cursor_manager.cpp
+++ b/src/mongo/s/query/cluster_cursor_manager.cpp
@@ -57,9 +57,8 @@ Status cursorNotFoundStatus(const NamespaceString& nss, CursorId cursorId) {
Status cursorInUseStatus(const NamespaceString& nss, CursorId cursorId) {
return {ErrorCodes::CursorInUse,
- str::stream() << "Cursor already in use (namespace: '" << nss.ns() << "', id: "
- << cursorId
- << ")."};
+ str::stream() << "Cursor already in use (namespace: '" << nss.ns()
+ << "', id: " << cursorId << ")."};
}
//
@@ -349,9 +348,9 @@ StatusWith<ClusterCursorManager::PinnedCursor> ClusterCursorManager::checkOutCur
// Check if the user is coauthorized to access this cursor.
auto authCheckStatus = authChecker(entry->getAuthenticatedUsers());
if (!authCheckStatus.isOK()) {
- return authCheckStatus.withContext(
- str::stream() << "cursor id " << cursorId
- << " was not created by the authenticated user");
+ return authCheckStatus.withContext(str::stream()
+ << "cursor id " << cursorId
+ << " was not created by the authenticated user");
}
if (checkSessionAuth == kCheckSession) {
diff --git a/src/mongo/s/query/cluster_cursor_manager.h b/src/mongo/s/query/cluster_cursor_manager.h
index 3d0afe5db94..88d89a7704b 100644
--- a/src/mongo/s/query/cluster_cursor_manager.h
+++ b/src/mongo/s/query/cluster_cursor_manager.h
@@ -718,4 +718,4 @@ private:
size_t _cursorsTimedOut = 0;
};
-} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/query/cluster_find.cpp b/src/mongo/s/query/cluster_find.cpp
index 07c10167123..cbc0d09d368 100644
--- a/src/mongo/s/query/cluster_find.cpp
+++ b/src/mongo/s/query/cluster_find.cpp
@@ -99,9 +99,7 @@ StatusWith<std::unique_ptr<QueryRequest>> transformQueryForShards(
ErrorCodes::Overflow,
str::stream()
<< "sum of limit and skip cannot be represented as a 64-bit integer, limit: "
- << *qr.getLimit()
- << ", skip: "
- << qr.getSkip().value_or(0));
+ << *qr.getLimit() << ", skip: " << qr.getSkip().value_or(0));
}
newLimit = newLimitValue;
}
@@ -118,9 +116,7 @@ StatusWith<std::unique_ptr<QueryRequest>> transformQueryForShards(
str::stream()
<< "sum of ntoreturn and skip cannot be represented as a 64-bit "
"integer, ntoreturn: "
- << *qr.getNToReturn()
- << ", skip: "
- << qr.getSkip().value_or(0));
+ << *qr.getNToReturn() << ", skip: " << qr.getSkip().value_or(0));
}
newLimit = newLimitValue;
} else {
@@ -131,9 +127,7 @@ StatusWith<std::unique_ptr<QueryRequest>> transformQueryForShards(
str::stream()
<< "sum of ntoreturn and skip cannot be represented as a 64-bit "
"integer, ntoreturn: "
- << *qr.getNToReturn()
- << ", skip: "
- << qr.getSkip().value_or(0));
+ << *qr.getNToReturn() << ", skip: " << qr.getSkip().value_or(0));
}
newNToReturn = newNToReturnValue;
}
@@ -410,8 +404,7 @@ CursorId ClusterFind::runQuery(OperationContext* opCtx,
uasserted(ErrorCodes::BadValue,
str::stream() << "Projection contains illegal field '"
<< AsyncResultsMerger::kSortKeyField
- << "': "
- << query.getQueryRequest().getProj());
+ << "': " << query.getQueryRequest().getProj());
}
auto const catalogCache = Grid::get(opCtx)->catalogCache();
@@ -434,8 +427,8 @@ CursorId ClusterFind::runQuery(OperationContext* opCtx,
if (retries >= kMaxRetries) {
// Check if there are no retries remaining, so the last received error can be
// propagated to the caller.
- ex.addContext(str::stream() << "Failed to run query after " << kMaxRetries
- << " retries");
+ ex.addContext(str::stream()
+ << "Failed to run query after " << kMaxRetries << " retries");
throw;
} else if (!ErrorCodes::isStaleShardVersionError(ex.code()) &&
ex.code() != ErrorCodes::ShardNotFound) {
@@ -485,8 +478,7 @@ void validateLSID(OperationContext* opCtx,
if (!opCtx->getLogicalSessionId() && cursor->getLsid()) {
uasserted(50800,
str::stream() << "Cannot run getMore on cursor " << request.cursorid
- << ", which was created in session "
- << *cursor->getLsid()
+ << ", which was created in session " << *cursor->getLsid()
<< ", without an lsid");
}
@@ -494,10 +486,8 @@ void validateLSID(OperationContext* opCtx,
(*opCtx->getLogicalSessionId() != *cursor->getLsid())) {
uasserted(50801,
str::stream() << "Cannot run getMore on cursor " << request.cursorid
- << ", which was created in session "
- << *cursor->getLsid()
- << ", in session "
- << *opCtx->getLogicalSessionId());
+ << ", which was created in session " << *cursor->getLsid()
+ << ", in session " << *opCtx->getLogicalSessionId());
}
}
@@ -518,8 +508,7 @@ void validateTxnNumber(OperationContext* opCtx,
if (!opCtx->getTxnNumber() && cursor->getTxnNumber()) {
uasserted(50803,
str::stream() << "Cannot run getMore on cursor " << request.cursorid
- << ", which was created in transaction "
- << *cursor->getTxnNumber()
+ << ", which was created in transaction " << *cursor->getTxnNumber()
<< ", without a txnNumber");
}
@@ -527,10 +516,8 @@ void validateTxnNumber(OperationContext* opCtx,
(*opCtx->getTxnNumber() != *cursor->getTxnNumber())) {
uasserted(50804,
str::stream() << "Cannot run getMore on cursor " << request.cursorid
- << ", which was created in transaction "
- << *cursor->getTxnNumber()
- << ", in transaction "
- << *opCtx->getTxnNumber());
+ << ", which was created in transaction " << *cursor->getTxnNumber()
+ << ", in transaction " << *opCtx->getTxnNumber());
}
}
diff --git a/src/mongo/s/query/router_stage_pipeline.cpp b/src/mongo/s/query/router_stage_pipeline.cpp
index ce1c56c103b..aaaad0c3e96 100644
--- a/src/mongo/s/query/router_stage_pipeline.cpp
+++ b/src/mongo/s/query/router_stage_pipeline.cpp
@@ -106,8 +106,7 @@ BSONObj RouterStagePipeline::_validateAndConvertToBSON(const Document& event) {
"event makes it impossible to resume the stream from that point. Only "
"transformations that retain the unmodified _id field are allowed. "
"Expected: "
- << BSON("_id" << resumeToken)
- << " but found: "
+ << BSON("_id" << resumeToken) << " but found: "
<< (eventBSON["_id"] ? BSON("_id" << eventBSON["_id"]) : BSONObj()),
idField.binaryEqual(resumeToken));
diff --git a/src/mongo/s/query/router_stage_remove_metadata_fields_test.cpp b/src/mongo/s/query/router_stage_remove_metadata_fields_test.cpp
index c66f4d6e3d0..b101d1ca37b 100644
--- a/src/mongo/s/query/router_stage_remove_metadata_fields_test.cpp
+++ b/src/mongo/s/query/router_stage_remove_metadata_fields_test.cpp
@@ -49,8 +49,9 @@ OperationContext* opCtx = nullptr;
TEST(RouterStageRemoveMetadataFieldsTest, RemovesMetaDataFields) {
auto mockStage = stdx::make_unique<RouterStageMock>(opCtx);
mockStage->queueResult(BSON("a" << 4 << "$sortKey" << 1 << "b" << 3));
- mockStage->queueResult(BSON("$sortKey" << BSON("" << 3) << "c" << BSON("d"
- << "foo")));
+ mockStage->queueResult(BSON("$sortKey" << BSON("" << 3) << "c"
+ << BSON("d"
+ << "foo")));
mockStage->queueResult(BSON("a" << 3));
mockStage->queueResult(BSON("a" << 3 << "$randVal" << 4 << "$sortKey" << 2));
mockStage->queueResult(
diff --git a/src/mongo/s/query/store_possible_cursor.h b/src/mongo/s/query/store_possible_cursor.h
index 38b13b4ea7a..43157322b0b 100644
--- a/src/mongo/s/query/store_possible_cursor.h
+++ b/src/mongo/s/query/store_possible_cursor.h
@@ -72,7 +72,7 @@ class TaskExecutor;
* @ cursorManager the ClusterCursorManager on which to register the resulting ClusterClientCursor
* @ privileges the PrivilegeVector of privileges needed for the original command, to be used for
* auth checking by GetMore
-*/
+ */
StatusWith<BSONObj> storePossibleCursor(OperationContext* opCtx,
const ShardId& shardId,
const HostAndPort& server,
diff --git a/src/mongo/s/request_types/add_shard_request_test.cpp b/src/mongo/s/request_types/add_shard_request_test.cpp
index 87ae164f2a7..8b28a1921b5 100644
--- a/src/mongo/s/request_types/add_shard_request_test.cpp
+++ b/src/mongo/s/request_types/add_shard_request_test.cpp
@@ -66,9 +66,8 @@ TEST(AddShardRequest, ParseInternalFieldsInvalidConnectionString) {
TEST(AddShardRequest, ParseInternalFieldsMissingMaxSize) {
{
- BSONObj obj =
- BSON(AddShardRequest::mongosAddShard << kConnString << AddShardRequest::shardName
- << kShardName);
+ BSONObj obj = BSON(AddShardRequest::mongosAddShard
+ << kConnString << AddShardRequest::shardName << kShardName);
auto swAddShardRequest = AddShardRequest::parseFromMongosCommand(obj);
ASSERT_OK(swAddShardRequest.getStatus());
@@ -81,9 +80,8 @@ TEST(AddShardRequest, ParseInternalFieldsMissingMaxSize) {
}
{
- BSONObj obj =
- BSON(AddShardRequest::configsvrAddShard << kConnString << AddShardRequest::shardName
- << kShardName);
+ BSONObj obj = BSON(AddShardRequest::configsvrAddShard
+ << kConnString << AddShardRequest::shardName << kShardName);
auto swAddShardRequest = AddShardRequest::parseFromConfigCommand(obj);
@@ -99,9 +97,8 @@ TEST(AddShardRequest, ParseInternalFieldsMissingMaxSize) {
TEST(AddShardRequest, ParseInternalFieldsMissingName) {
{
- BSONObj obj =
- BSON(AddShardRequest::mongosAddShard << kConnString << AddShardRequest::maxSizeMB
- << kMaxSizeMB);
+ BSONObj obj = BSON(AddShardRequest::mongosAddShard
+ << kConnString << AddShardRequest::maxSizeMB << kMaxSizeMB);
auto swAddShardRequest = AddShardRequest::parseFromMongosCommand(obj);
ASSERT_OK(swAddShardRequest.getStatus());
@@ -114,9 +111,8 @@ TEST(AddShardRequest, ParseInternalFieldsMissingName) {
}
{
- BSONObj obj =
- BSON(AddShardRequest::configsvrAddShard << kConnString << AddShardRequest::maxSizeMB
- << kMaxSizeMB);
+ BSONObj obj = BSON(AddShardRequest::configsvrAddShard
+ << kConnString << AddShardRequest::maxSizeMB << kMaxSizeMB);
auto swAddShardRequest = AddShardRequest::parseFromConfigCommand(obj);
ASSERT_OK(swAddShardRequest.getStatus());
@@ -131,11 +127,9 @@ TEST(AddShardRequest, ParseInternalFieldsMissingName) {
TEST(AddShardRequest, ParseInternalFieldsAllFieldsPresent) {
{
- BSONObj obj =
- BSON(AddShardRequest::mongosAddShard << kConnString << AddShardRequest::shardName
- << kShardName
- << AddShardRequest::maxSizeMB
- << kMaxSizeMB);
+ BSONObj obj = BSON(AddShardRequest::mongosAddShard
+ << kConnString << AddShardRequest::shardName << kShardName
+ << AddShardRequest::maxSizeMB << kMaxSizeMB);
auto swAddShardRequest = AddShardRequest::parseFromMongosCommand(obj);
ASSERT_OK(swAddShardRequest.getStatus());
@@ -149,11 +143,9 @@ TEST(AddShardRequest, ParseInternalFieldsAllFieldsPresent) {
}
{
- BSONObj obj =
- BSON(AddShardRequest::configsvrAddShard << kConnString << AddShardRequest::shardName
- << kShardName
- << AddShardRequest::maxSizeMB
- << kMaxSizeMB);
+ BSONObj obj = BSON(AddShardRequest::configsvrAddShard
+ << kConnString << AddShardRequest::shardName << kShardName
+ << AddShardRequest::maxSizeMB << kMaxSizeMB);
auto swAddShardRequest = AddShardRequest::parseFromConfigCommand(obj);
ASSERT_OK(swAddShardRequest.getStatus());
@@ -170,10 +162,9 @@ TEST(AddShardRequest, ParseInternalFieldsAllFieldsPresent) {
// Test converting a valid AddShardRequest to the internal config version of the command.
TEST(AddShardRequest, ToCommandForConfig) {
- BSONObj mongosCmdObj = BSON(
- AddShardRequest::mongosAddShard << kConnString << AddShardRequest::shardName << kShardName
- << AddShardRequest::maxSizeMB
- << kMaxSizeMB);
+ BSONObj mongosCmdObj = BSON(AddShardRequest::mongosAddShard
+ << kConnString << AddShardRequest::shardName << kShardName
+ << AddShardRequest::maxSizeMB << kMaxSizeMB);
auto swAddShardRequest = AddShardRequest::parseFromMongosCommand(mongosCmdObj);
ASSERT_OK(swAddShardRequest.getStatus());
@@ -186,8 +177,8 @@ TEST(AddShardRequest, ToCommandForConfig) {
}
TEST(AddShardRequest, ToCommandForConfigMissingName) {
- BSONObj mongosCmdObj = BSON(
- AddShardRequest::mongosAddShard << kConnString << AddShardRequest::maxSizeMB << kMaxSizeMB);
+ BSONObj mongosCmdObj = BSON(AddShardRequest::mongosAddShard
+ << kConnString << AddShardRequest::maxSizeMB << kMaxSizeMB);
auto swAddShardRequest = AddShardRequest::parseFromMongosCommand(mongosCmdObj);
ASSERT_OK(swAddShardRequest.getStatus());
@@ -200,8 +191,8 @@ TEST(AddShardRequest, ToCommandForConfigMissingName) {
}
TEST(AddShardRequest, ToCommandForConfigMissingMaxSize) {
- BSONObj mongosCmdObj = BSON(
- AddShardRequest::mongosAddShard << kConnString << AddShardRequest::shardName << kShardName);
+ BSONObj mongosCmdObj = BSON(AddShardRequest::mongosAddShard
+ << kConnString << AddShardRequest::shardName << kShardName);
auto swAddShardRequest = AddShardRequest::parseFromMongosCommand(mongosCmdObj);
ASSERT_OK(swAddShardRequest.getStatus());
diff --git a/src/mongo/s/request_types/add_shard_to_zone_request_test.cpp b/src/mongo/s/request_types/add_shard_to_zone_request_test.cpp
index 277302c3c0c..7a9b2b8141e 100644
--- a/src/mongo/s/request_types/add_shard_to_zone_request_test.cpp
+++ b/src/mongo/s/request_types/add_shard_to_zone_request_test.cpp
@@ -92,8 +92,7 @@ TEST(AddShardToZoneRequest, WrongShardNameTypeErrors) {
TEST(AddShardToZoneRequest, WrongZoneNameTypeErrors) {
auto request = AddShardToZoneRequest::parseFromMongosCommand(BSON("addShardToZone"
<< "a"
- << "zone"
- << 1234));
+ << "zone" << 1234));
ASSERT_EQ(ErrorCodes::TypeMismatch, request.getStatus());
}
@@ -141,25 +140,23 @@ TEST(CfgAddShardToZoneRequest, MissingShardNameErrors) {
}
TEST(CfgAddShardToZoneRequest, WrongShardNameTypeErrors) {
- auto request = AddShardToZoneRequest::parseFromConfigCommand(
- BSON("_configsvrAddShardToZone" << 1234 << "zone"
- << "z"));
+ auto request = AddShardToZoneRequest::parseFromConfigCommand(BSON("_configsvrAddShardToZone"
+ << 1234 << "zone"
+ << "z"));
ASSERT_EQ(ErrorCodes::TypeMismatch, request.getStatus());
}
TEST(CfgAddShardToZoneRequest, WrongZoneNameTypeErrors) {
auto request = AddShardToZoneRequest::parseFromConfigCommand(BSON("_configsvrAddShardToZone"
<< "a"
- << "zone"
- << 1234));
+ << "zone" << 1234));
ASSERT_EQ(ErrorCodes::TypeMismatch, request.getStatus());
}
TEST(CfgAddShardToZoneRequest, CannotUseConfigToParseMongosCommand) {
auto request = AddShardToZoneRequest::parseFromConfigCommand(BSON("addShardToZone"
<< "a"
- << "zone"
- << 1234));
+ << "zone" << 1234));
ASSERT_EQ(ErrorCodes::NoSuchKey, request.getStatus());
}
diff --git a/src/mongo/s/request_types/balance_chunk_request_test.cpp b/src/mongo/s/request_types/balance_chunk_request_test.cpp
index f3f0a14b320..df15b79669d 100644
--- a/src/mongo/s/request_types/balance_chunk_request_test.cpp
+++ b/src/mongo/s/request_types/balance_chunk_request_test.cpp
@@ -45,18 +45,13 @@ using unittest::assertGet;
TEST(BalanceChunkRequest, ParseFromConfigCommandNoSecondaryThrottle) {
const ChunkVersion version(1, 0, OID::gen());
auto request = assertGet(BalanceChunkRequest::parseFromConfigCommand(
- BSON("_configsvrMoveChunk" << 1 << "ns"
- << "TestDB.TestColl"
- << "min"
- << BSON("a" << -100LL)
- << "max"
- << BSON("a" << 100LL)
- << "shard"
- << "TestShard0000"
- << "lastmod"
- << Date_t::fromMillisSinceEpoch(version.toLong())
- << "lastmodEpoch"
- << version.epoch())));
+ BSON("_configsvrMoveChunk"
+ << 1 << "ns"
+ << "TestDB.TestColl"
+ << "min" << BSON("a" << -100LL) << "max" << BSON("a" << 100LL) << "shard"
+ << "TestShard0000"
+ << "lastmod" << Date_t::fromMillisSinceEpoch(version.toLong()) << "lastmodEpoch"
+ << version.epoch())));
const auto& chunk = request.getChunk();
ASSERT_EQ("TestDB.TestColl", chunk.getNS().ns());
ASSERT_BSONOBJ_EQ(BSON("a" << -100LL), chunk.getMin());
@@ -72,21 +67,14 @@ TEST(BalanceChunkRequest, ParseFromConfigCommandNoSecondaryThrottle) {
TEST(BalanceChunkRequest, ParseFromConfigCommandWithSecondaryThrottle) {
const ChunkVersion version(1, 0, OID::gen());
auto request = assertGet(BalanceChunkRequest::parseFromConfigCommand(
- BSON("_configsvrMoveChunk" << 1 << "ns"
- << "TestDB.TestColl"
- << "min"
- << BSON("a" << -100LL)
- << "max"
- << BSON("a" << 100LL)
- << "shard"
- << "TestShard0000"
- << "lastmod"
- << Date_t::fromMillisSinceEpoch(version.toLong())
- << "lastmodEpoch"
- << version.epoch()
- << "secondaryThrottle"
- << BSON("_secondaryThrottle" << true << "writeConcern"
- << BSON("w" << 2)))));
+ BSON("_configsvrMoveChunk"
+ << 1 << "ns"
+ << "TestDB.TestColl"
+ << "min" << BSON("a" << -100LL) << "max" << BSON("a" << 100LL) << "shard"
+ << "TestShard0000"
+ << "lastmod" << Date_t::fromMillisSinceEpoch(version.toLong()) << "lastmodEpoch"
+ << version.epoch() << "secondaryThrottle"
+ << BSON("_secondaryThrottle" << true << "writeConcern" << BSON("w" << 2)))));
const auto& chunk = request.getChunk();
ASSERT_EQ("TestDB.TestColl", chunk.getNS().ns());
ASSERT_BSONOBJ_EQ(BSON("a" << -100LL), chunk.getMin());
diff --git a/src/mongo/s/request_types/merge_chunk_request_test.cpp b/src/mongo/s/request_types/merge_chunk_request_test.cpp
index 7a300c5e813..94a7bf68511 100644
--- a/src/mongo/s/request_types/merge_chunk_request_test.cpp
+++ b/src/mongo/s/request_types/merge_chunk_request_test.cpp
@@ -42,11 +42,8 @@ TEST(MergeChunkRequest, BasicValidConfigCommand) {
auto request = assertGet(MergeChunkRequest::parseFromConfigCommand(
BSON("_configsvrCommitChunkMerge"
<< "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "chunkBoundaries"
- << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5) << BSON("a" << 10))
- << "shard"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "chunkBoundaries"
+ << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5) << BSON("a" << 10)) << "shard"
<< "shard0000")));
ASSERT_EQ(NamespaceString("TestDB", "TestColl"), request.getNamespace());
ASSERT_EQ(OID("7fffffff0000000000000001"), request.getEpoch());
@@ -60,14 +57,10 @@ TEST(MergeChunkRequest, ConfigCommandtoBSON) {
BSONObj serializedRequest =
BSON("_configsvrCommitChunkMerge"
<< "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "chunkBoundaries"
- << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5) << BSON("a" << 10))
- << "shard"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "chunkBoundaries"
+ << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5) << BSON("a" << 10)) << "shard"
<< "shard0000"
- << "validAfter"
- << Timestamp{100});
+ << "validAfter" << Timestamp{100});
BSONObj writeConcernObj = BSON("writeConcern" << BSON("w"
<< "majority"));
@@ -84,11 +77,10 @@ TEST(MergeChunkRequest, ConfigCommandtoBSON) {
}
TEST(MergeChunkRequest, MissingNameSpaceErrors) {
- auto request = MergeChunkRequest::parseFromConfigCommand(
- BSON("collEpoch" << OID("7fffffff0000000000000001") << "chunkBoundaries"
- << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5) << BSON("a" << 10))
- << "shard"
- << "shard0000"));
+ auto request = MergeChunkRequest::parseFromConfigCommand(BSON(
+ "collEpoch" << OID("7fffffff0000000000000001") << "chunkBoundaries"
+ << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5) << BSON("a" << 10)) << "shard"
+ << "shard0000"));
ASSERT_EQ(ErrorCodes::NoSuchKey, request.getStatus());
}
@@ -96,20 +88,18 @@ TEST(MergeChunkRequest, MissingCollEpochErrors) {
auto request = MergeChunkRequest::parseFromConfigCommand(
BSON("_configsvrCommitChunkMerge"
<< "TestDB.TestColl"
- << "chunkBoundaries"
- << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5) << BSON("a" << 10))
+ << "chunkBoundaries" << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5) << BSON("a" << 10))
<< "shard"
<< "shard0000"));
ASSERT_EQ(ErrorCodes::NoSuchKey, request.getStatus());
}
TEST(MergeChunkRequest, MissingChunkBoundariesErrors) {
- auto request = MergeChunkRequest::parseFromConfigCommand(BSON("_configsvrCommitChunkMerge"
- << "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "shard"
- << "shard0000"));
+ auto request = MergeChunkRequest::parseFromConfigCommand(
+ BSON("_configsvrCommitChunkMerge"
+ << "TestDB.TestColl"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "shard"
+ << "shard0000"));
ASSERT_EQ(ErrorCodes::NoSuchKey, request.getStatus());
}
@@ -117,21 +107,17 @@ TEST(MergeChunkRequest, MissingShardNameErrors) {
auto request = MergeChunkRequest::parseFromConfigCommand(
BSON("_configsvrCommitChunkMerge"
<< "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "chunkBoundaries"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "chunkBoundaries"
<< BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5) << BSON("a" << 10))));
ASSERT_EQ(ErrorCodes::NoSuchKey, request.getStatus());
}
TEST(MergeChunkRequest, WrongNamespaceTypeErrors) {
auto request = MergeChunkRequest::parseFromConfigCommand(
- BSON("_configsvrCommitChunkMerge" << 1234 << "collEpoch" << OID("7fffffff0000000000000001")
- << "chunkBoundaries"
- << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5)
- << BSON("a" << 10))
- << "shard"
- << "shard0000"));
+ BSON("_configsvrCommitChunkMerge"
+ << 1234 << "collEpoch" << OID("7fffffff0000000000000001") << "chunkBoundaries"
+ << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5) << BSON("a" << 10)) << "shard"
+ << "shard0000"));
ASSERT_EQ(ErrorCodes::TypeMismatch, request.getStatus());
}
@@ -139,37 +125,27 @@ TEST(MergeChunkRequest, WrongCollEpochTypeErrors) {
auto request = MergeChunkRequest::parseFromConfigCommand(
BSON("_configsvrCommitChunkMerge"
<< "TestDB.TestColl"
- << "collEpoch"
- << 1234
- << "chunkBoundaries"
- << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5) << BSON("a" << 10))
- << "shard"
+ << "collEpoch" << 1234 << "chunkBoundaries"
+ << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5) << BSON("a" << 10)) << "shard"
<< "shard0000"));
ASSERT_EQ(ErrorCodes::TypeMismatch, request.getStatus());
}
TEST(MergeChunkRequest, WrongChunkBoundariesTypeErrors) {
- auto request = MergeChunkRequest::parseFromConfigCommand(BSON("_configsvrCommitChunkMerge"
- << "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "chunkBoundaries"
- << 1234
- << "shard"
- << "shard0000"));
+ auto request = MergeChunkRequest::parseFromConfigCommand(BSON(
+ "_configsvrCommitChunkMerge"
+ << "TestDB.TestColl"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "chunkBoundaries" << 1234 << "shard"
+ << "shard0000"));
ASSERT_EQ(ErrorCodes::TypeMismatch, request.getStatus());
}
TEST(MergeChunkRequest, WrongShardNameTypeErrors) {
- auto request = MergeChunkRequest::parseFromConfigCommand(
- BSON("_configsvrCommitChunkMerge"
- << "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "chunkBoundaries"
- << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5) << BSON("a" << 10))
- << "shard"
- << 1234));
+ auto request = MergeChunkRequest::parseFromConfigCommand(BSON(
+ "_configsvrCommitChunkMerge"
+ << "TestDB.TestColl"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "chunkBoundaries"
+ << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5) << BSON("a" << 10)) << "shard" << 1234));
ASSERT_EQ(ErrorCodes::TypeMismatch, request.getStatus());
}
@@ -177,24 +153,19 @@ TEST(MergeChunkRequest, InvalidNamespaceErrors) {
auto request = MergeChunkRequest::parseFromConfigCommand(
BSON("_configsvrCommitChunkMerge"
<< ""
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "chunkBoundaries"
- << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5) << BSON("a" << 10))
- << "shard"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "chunkBoundaries"
+ << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5) << BSON("a" << 10)) << "shard"
<< "shard0000"));
ASSERT_EQ(ErrorCodes::InvalidNamespace, request.getStatus());
}
TEST(MergeChunkRequest, EmptyChunkBoundariesErrors) {
- auto request = MergeChunkRequest::parseFromConfigCommand(BSON("_configsvrCommitChunkMerge"
- << "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "chunkBoundaries"
- << BSONArray()
- << "shard"
- << "shard0000"));
+ auto request = MergeChunkRequest::parseFromConfigCommand(
+ BSON("_configsvrCommitChunkMerge"
+ << "TestDB.TestColl"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "chunkBoundaries" << BSONArray()
+ << "shard"
+ << "shard0000"));
ASSERT_EQ(ErrorCodes::InvalidOptions, request.getStatus());
}
@@ -202,11 +173,8 @@ TEST(MergeChunkRequest, TooFewChunkBoundariesErrors) {
auto request = MergeChunkRequest::parseFromConfigCommand(
BSON("_configsvrCommitChunkMerge"
<< "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "chunkBoundaries"
- << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 10))
- << "shard"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "chunkBoundaries"
+ << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 10)) << "shard"
<< "shard0000"));
ASSERT_EQ(ErrorCodes::InvalidOptions, request.getStatus());
}
diff --git a/src/mongo/s/request_types/migration_secondary_throttle_options_test.cpp b/src/mongo/s/request_types/migration_secondary_throttle_options_test.cpp
index 49332950329..b295e3f0b3d 100644
--- a/src/mongo/s/request_types/migration_secondary_throttle_options_test.cpp
+++ b/src/mongo/s/request_types/migration_secondary_throttle_options_test.cpp
@@ -178,8 +178,9 @@ TEST(MigrationSecondaryThrottleOptions, ParseFailsDisabledInCommandBSONWriteConc
TEST(MigrationSecondaryThrottleOptions, ParseFailsNotSpecifiedInCommandBSONWriteConcernSpecified) {
auto status = MigrationSecondaryThrottleOptions::createFromCommand(
- BSON("someOtherField" << 1 << "writeConcern" << BSON("w"
- << "majority")));
+ BSON("someOtherField" << 1 << "writeConcern"
+ << BSON("w"
+ << "majority")));
ASSERT_EQ(ErrorCodes::UnsupportedFormat, status.getStatus().code());
}
diff --git a/src/mongo/s/request_types/remove_shard_from_zone_request_test.cpp b/src/mongo/s/request_types/remove_shard_from_zone_request_test.cpp
index d8b6c94c61e..67981bd7f67 100644
--- a/src/mongo/s/request_types/remove_shard_from_zone_request_test.cpp
+++ b/src/mongo/s/request_types/remove_shard_from_zone_request_test.cpp
@@ -85,17 +85,16 @@ TEST(RemoveShardFromZoneRequest, MissingShardNameErrors) {
}
TEST(RemoveShardFromZoneRequest, WrongShardNameTypeErrors) {
- auto request = RemoveShardFromZoneRequest::parseFromMongosCommand(
- BSON("removeShardFromZone" << 1234 << "zone"
- << "z"));
+ auto request = RemoveShardFromZoneRequest::parseFromMongosCommand(BSON("removeShardFromZone"
+ << 1234 << "zone"
+ << "z"));
ASSERT_EQ(ErrorCodes::TypeMismatch, request.getStatus());
}
TEST(RemoveShardFromZoneRequest, WrongZoneNameTypeErrors) {
auto request = RemoveShardFromZoneRequest::parseFromMongosCommand(BSON("removeShardFromZone"
<< "a"
- << "zone"
- << 1234));
+ << "zone" << 1234));
ASSERT_EQ(ErrorCodes::TypeMismatch, request.getStatus());
}
@@ -155,16 +154,14 @@ TEST(CfgRemoveShardFromZoneRequest, WrongZoneNameTypeErrors) {
auto request =
RemoveShardFromZoneRequest::parseFromConfigCommand(BSON("_configsvrRemoveShardFromZone"
<< "a"
- << "zone"
- << 1234));
+ << "zone" << 1234));
ASSERT_EQ(ErrorCodes::TypeMismatch, request.getStatus());
}
TEST(CfgRemoveShardFromZoneRequest, CannotUseConfigToParseMongosCommand) {
auto request = RemoveShardFromZoneRequest::parseFromConfigCommand(BSON("removeShardFromZone"
<< "a"
- << "zone"
- << 1234));
+ << "zone" << 1234));
ASSERT_EQ(ErrorCodes::NoSuchKey, request.getStatus());
}
diff --git a/src/mongo/s/request_types/set_shard_version_request_test.cpp b/src/mongo/s/request_types/set_shard_version_request_test.cpp
index fb1052cc48d..59003730f98 100644
--- a/src/mongo/s/request_types/set_shard_version_request_test.cpp
+++ b/src/mongo/s/request_types/set_shard_version_request_test.cpp
@@ -47,15 +47,12 @@ const ConnectionString shardCS = ConnectionString::forReplicaSet(
"ShardRS", {HostAndPort{"shardHost1:12345"}, HostAndPort{"shardHost2:12345"}});
TEST(SetShardVersionRequest, ParseInitMissingAuthoritative) {
- SetShardVersionRequest request =
- assertGet(SetShardVersionRequest::parseFromBSON(BSON("setShardVersion"
- << ""
- << "init"
- << true
- << "shard"
- << "TestShard"
- << "shardHost"
- << shardCS.toString())));
+ SetShardVersionRequest request = assertGet(
+ SetShardVersionRequest::parseFromBSON(BSON("setShardVersion"
+ << ""
+ << "init" << true << "shard"
+ << "TestShard"
+ << "shardHost" << shardCS.toString())));
ASSERT(request.isInit());
ASSERT(!request.isAuthoritative());
@@ -66,16 +63,12 @@ TEST(SetShardVersionRequest, ParseInitMissingAuthoritative) {
TEST(SetShardVersionRequest, ParseInitWithAuthoritative) {
SetShardVersionRequest request =
- assertGet(SetShardVersionRequest::parseFromBSON(BSON("setShardVersion"
- << ""
- << "init"
- << true
- << "authoritative"
- << true
- << "shard"
- << "TestShard"
- << "shardHost"
- << shardCS.toString())));
+ assertGet(SetShardVersionRequest::parseFromBSON(
+ BSON("setShardVersion"
+ << ""
+ << "init" << true << "authoritative" << true << "shard"
+ << "TestShard"
+ << "shardHost" << shardCS.toString())));
ASSERT(request.isInit());
ASSERT(request.isAuthoritative());
@@ -86,18 +79,12 @@ TEST(SetShardVersionRequest, ParseInitWithAuthoritative) {
TEST(SetShardVersionRequest, ParseInitNoConnectionVersioning) {
SetShardVersionRequest request =
- assertGet(SetShardVersionRequest::parseFromBSON(BSON("setShardVersion"
- << ""
- << "init"
- << true
- << "authoritative"
- << true
- << "shard"
- << "TestShard"
- << "shardHost"
- << shardCS.toString()
- << "noConnectionVersioning"
- << true)));
+ assertGet(SetShardVersionRequest::parseFromBSON(
+ BSON("setShardVersion"
+ << ""
+ << "init" << true << "authoritative" << true << "shard"
+ << "TestShard"
+ << "shardHost" << shardCS.toString() << "noConnectionVersioning" << true)));
ASSERT(request.isInit());
ASSERT(request.isAuthoritative());
@@ -110,16 +97,13 @@ TEST(SetShardVersionRequest, ParseFull) {
const ChunkVersion chunkVersion(1, 2, OID::gen());
SetShardVersionRequest request =
- assertGet(SetShardVersionRequest::parseFromBSON(BSON("setShardVersion"
- << "db.coll"
- << "shard"
- << "TestShard"
- << "shardHost"
- << shardCS.toString()
- << "version"
- << Timestamp(chunkVersion.toLong())
- << "versionEpoch"
- << chunkVersion.epoch())));
+ assertGet(SetShardVersionRequest::parseFromBSON(
+ BSON("setShardVersion"
+ << "db.coll"
+ << "shard"
+ << "TestShard"
+ << "shardHost" << shardCS.toString() << "version"
+ << Timestamp(chunkVersion.toLong()) << "versionEpoch" << chunkVersion.epoch())));
ASSERT(!request.isInit());
ASSERT(!request.shouldForceRefresh());
@@ -137,18 +121,14 @@ TEST(SetShardVersionRequest, ParseFullWithAuthoritative) {
const ChunkVersion chunkVersion(1, 2, OID::gen());
SetShardVersionRequest request =
- assertGet(SetShardVersionRequest::parseFromBSON(BSON("setShardVersion"
- << "db.coll"
- << "shard"
- << "TestShard"
- << "shardHost"
- << shardCS.toString()
- << "version"
- << Timestamp(chunkVersion.toLong())
- << "versionEpoch"
- << chunkVersion.epoch()
- << "authoritative"
- << true)));
+ assertGet(SetShardVersionRequest::parseFromBSON(
+ BSON("setShardVersion"
+ << "db.coll"
+ << "shard"
+ << "TestShard"
+ << "shardHost" << shardCS.toString() << "version"
+ << Timestamp(chunkVersion.toLong()) << "versionEpoch" << chunkVersion.epoch()
+ << "authoritative" << true)));
ASSERT(!request.isInit());
ASSERT(!request.shouldForceRefresh());
@@ -166,18 +146,14 @@ TEST(SetShardVersionRequest, ParseFullNoConnectionVersioning) {
const ChunkVersion chunkVersion(1, 2, OID::gen());
SetShardVersionRequest request =
- assertGet(SetShardVersionRequest::parseFromBSON(BSON("setShardVersion"
- << "db.coll"
- << "shard"
- << "TestShard"
- << "shardHost"
- << shardCS.toString()
- << "version"
- << Timestamp(chunkVersion.toLong())
- << "versionEpoch"
- << chunkVersion.epoch()
- << "noConnectionVersioning"
- << true)));
+ assertGet(SetShardVersionRequest::parseFromBSON(
+ BSON("setShardVersion"
+ << "db.coll"
+ << "shard"
+ << "TestShard"
+ << "shardHost" << shardCS.toString() << "version"
+ << Timestamp(chunkVersion.toLong()) << "versionEpoch" << chunkVersion.epoch()
+ << "noConnectionVersioning" << true)));
ASSERT(!request.isInit());
ASSERT(!request.shouldForceRefresh());
@@ -194,16 +170,14 @@ TEST(SetShardVersionRequest, ParseFullNoConnectionVersioning) {
TEST(SetShardVersionRequest, ParseFullNoNS) {
const ChunkVersion chunkVersion(1, 2, OID::gen());
- auto ssvStatus = SetShardVersionRequest::parseFromBSON(BSON("setShardVersion"
- << ""
- << "shard"
- << "TestShard"
- << "shardHost"
- << shardCS.toString()
- << "version"
- << Timestamp(chunkVersion.toLong())
- << "versionEpoch"
- << chunkVersion.epoch()));
+ auto ssvStatus =
+ SetShardVersionRequest::parseFromBSON(BSON("setShardVersion"
+ << ""
+ << "shard"
+ << "TestShard"
+ << "shardHost" << shardCS.toString() << "version"
+ << Timestamp(chunkVersion.toLong())
+ << "versionEpoch" << chunkVersion.epoch()));
ASSERT_EQ(ErrorCodes::InvalidNamespace, ssvStatus.getStatus().code());
}
@@ -211,16 +185,14 @@ TEST(SetShardVersionRequest, ParseFullNoNS) {
TEST(SetShardVersionRequest, ParseFullNSContainsDBOnly) {
const ChunkVersion chunkVersion(1, 2, OID::gen());
- auto ssvStatus = SetShardVersionRequest::parseFromBSON(BSON("setShardVersion"
- << "dbOnly"
- << "shard"
- << "TestShard"
- << "shardHost"
- << shardCS.toString()
- << "version"
- << Timestamp(chunkVersion.toLong())
- << "versionEpoch"
- << chunkVersion.epoch()));
+ auto ssvStatus =
+ SetShardVersionRequest::parseFromBSON(BSON("setShardVersion"
+ << "dbOnly"
+ << "shard"
+ << "TestShard"
+ << "shardHost" << shardCS.toString() << "version"
+ << Timestamp(chunkVersion.toLong())
+ << "versionEpoch" << chunkVersion.epoch()));
ASSERT_EQ(ErrorCodes::InvalidNamespace, ssvStatus.getStatus().code());
}
@@ -239,20 +211,10 @@ TEST(SetShardVersionRequest, ToSSVCommandInit) {
ASSERT_BSONOBJ_EQ(ssv.toBSON(),
BSON("setShardVersion"
<< ""
- << "init"
- << true
- << "forceRefresh"
- << false
- << "authoritative"
- << true
- << "configdb"
- << configCS.toString()
- << "shard"
+ << "init" << true << "forceRefresh" << false << "authoritative" << true
+ << "configdb" << configCS.toString() << "shard"
<< "TestShard"
- << "shardHost"
- << shardCS.toString()
- << "maxTimeMS"
- << 30000));
+ << "shardHost" << shardCS.toString() << "maxTimeMS" << 30000));
}
TEST(SetShardVersionRequest, ToSSVCommandFull) {
@@ -273,21 +235,11 @@ TEST(SetShardVersionRequest, ToSSVCommandFull) {
ASSERT_BSONOBJ_EQ(ssv.toBSON(),
BSON("setShardVersion"
<< "db.coll"
- << "init"
- << false
- << "forceRefresh"
- << false
- << "authoritative"
- << false
- << "configdb"
- << configCS.toString()
- << "shard"
+ << "init" << false << "forceRefresh" << false << "authoritative" << false
+ << "configdb" << configCS.toString() << "shard"
<< "TestShard"
- << "shardHost"
- << shardCS.toString()
- << "version"
- << Timestamp(chunkVersion.toLong())
- << "versionEpoch"
+ << "shardHost" << shardCS.toString() << "version"
+ << Timestamp(chunkVersion.toLong()) << "versionEpoch"
<< chunkVersion.epoch()));
}
@@ -309,21 +261,11 @@ TEST(SetShardVersionRequest, ToSSVCommandFullAuthoritative) {
ASSERT_BSONOBJ_EQ(ssv.toBSON(),
BSON("setShardVersion"
<< "db.coll"
- << "init"
- << false
- << "forceRefresh"
- << false
- << "authoritative"
- << true
- << "configdb"
- << configCS.toString()
- << "shard"
+ << "init" << false << "forceRefresh" << false << "authoritative" << true
+ << "configdb" << configCS.toString() << "shard"
<< "TestShard"
- << "shardHost"
- << shardCS.toString()
- << "version"
- << Timestamp(chunkVersion.toLong())
- << "versionEpoch"
+ << "shardHost" << shardCS.toString() << "version"
+ << Timestamp(chunkVersion.toLong()) << "versionEpoch"
<< chunkVersion.epoch()));
}
@@ -351,21 +293,11 @@ TEST(SetShardVersionRequest, ToSSVCommandFullForceRefresh) {
ASSERT_BSONOBJ_EQ(ssv.toBSON(),
BSON("setShardVersion"
<< "db.coll"
- << "init"
- << false
- << "forceRefresh"
- << true
- << "authoritative"
- << false
- << "configdb"
- << configCS.toString()
- << "shard"
+ << "init" << false << "forceRefresh" << true << "authoritative" << false
+ << "configdb" << configCS.toString() << "shard"
<< "TestShard"
- << "shardHost"
- << shardCS.toString()
- << "version"
- << Timestamp(chunkVersion.toLong())
- << "versionEpoch"
+ << "shardHost" << shardCS.toString() << "version"
+ << Timestamp(chunkVersion.toLong()) << "versionEpoch"
<< chunkVersion.epoch()));
}
@@ -387,24 +319,12 @@ TEST(SetShardVersionRequest, ToSSVCommandFullNoConnectionVersioning) {
ASSERT_BSONOBJ_EQ(ssv.toBSON(),
BSON("setShardVersion"
<< "db.coll"
- << "init"
- << false
- << "forceRefresh"
- << false
- << "authoritative"
- << true
- << "configdb"
- << configCS.toString()
- << "shard"
+ << "init" << false << "forceRefresh" << false << "authoritative" << true
+ << "configdb" << configCS.toString() << "shard"
<< "TestShard"
- << "shardHost"
- << shardCS.toString()
- << "version"
- << Timestamp(chunkVersion.toLong())
- << "versionEpoch"
- << chunkVersion.epoch()
- << "noConnectionVersioning"
- << true));
+ << "shardHost" << shardCS.toString() << "version"
+ << Timestamp(chunkVersion.toLong()) << "versionEpoch"
+ << chunkVersion.epoch() << "noConnectionVersioning" << true));
}
TEST(SetShardVersionRequest, ToSSVCommandFullNoConnectionVersioningForceRefresh) {
@@ -431,24 +351,12 @@ TEST(SetShardVersionRequest, ToSSVCommandFullNoConnectionVersioningForceRefresh)
ASSERT_BSONOBJ_EQ(ssv.toBSON(),
BSON("setShardVersion"
<< "db.coll"
- << "init"
- << false
- << "forceRefresh"
- << true
- << "authoritative"
- << false
- << "configdb"
- << configCS.toString()
- << "shard"
+ << "init" << false << "forceRefresh" << true << "authoritative" << false
+ << "configdb" << configCS.toString() << "shard"
<< "TestShard"
- << "shardHost"
- << shardCS.toString()
- << "version"
- << Timestamp(chunkVersion.toLong())
- << "versionEpoch"
- << chunkVersion.epoch()
- << "noConnectionVersioning"
- << true));
+ << "shardHost" << shardCS.toString() << "version"
+ << Timestamp(chunkVersion.toLong()) << "versionEpoch"
+ << chunkVersion.epoch() << "noConnectionVersioning" << true));
}
diff --git a/src/mongo/s/request_types/split_chunk_request_test.cpp b/src/mongo/s/request_types/split_chunk_request_test.cpp
index d73f6c96591..1727c3aa792 100644
--- a/src/mongo/s/request_types/split_chunk_request_test.cpp
+++ b/src/mongo/s/request_types/split_chunk_request_test.cpp
@@ -41,19 +41,12 @@ namespace {
using unittest::assertGet;
TEST(SplitChunkRequest, BasicValidConfigCommand) {
- auto request =
- assertGet(SplitChunkRequest::parseFromConfigCommand(BSON("_configsvrCommitChunkSplit"
- << "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "min"
- << BSON("a" << 1)
- << "max"
- << BSON("a" << 10)
- << "splitPoints"
- << BSON_ARRAY(BSON("a" << 5))
- << "shard"
- << "shard0000")));
+ auto request = assertGet(SplitChunkRequest::parseFromConfigCommand(
+ BSON("_configsvrCommitChunkSplit"
+ << "TestDB.TestColl"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "min" << BSON("a" << 1) << "max"
+ << BSON("a" << 10) << "splitPoints" << BSON_ARRAY(BSON("a" << 5)) << "shard"
+ << "shard0000")));
ASSERT_EQ(NamespaceString("TestDB", "TestColl"), request.getNamespace());
ASSERT_EQ(OID("7fffffff0000000000000001"), request.getEpoch());
ASSERT(ChunkRange(BSON("a" << 1), BSON("a" << 10)) == request.getChunkRange());
@@ -65,14 +58,8 @@ TEST(SplitChunkRequest, ValidWithMultipleSplits) {
auto request = assertGet(SplitChunkRequest::parseFromConfigCommand(
BSON("_configsvrCommitChunkSplit"
<< "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "min"
- << BSON("a" << 1)
- << "max"
- << BSON("a" << 10)
- << "splitPoints"
- << BSON_ARRAY(BSON("a" << 5) << BSON("a" << 7))
+ << "collEpoch" << OID("7fffffff0000000000000001") << "min" << BSON("a" << 1) << "max"
+ << BSON("a" << 10) << "splitPoints" << BSON_ARRAY(BSON("a" << 5) << BSON("a" << 7))
<< "shard"
<< "shard0000")));
ASSERT_EQ(NamespaceString("TestDB", "TestColl"), request.getNamespace());
@@ -84,18 +71,12 @@ TEST(SplitChunkRequest, ValidWithMultipleSplits) {
}
TEST(SplitChunkRequest, ConfigCommandtoBSON) {
- BSONObj serializedRequest = BSON("_configsvrCommitChunkSplit"
- << "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "min"
- << BSON("a" << 1)
- << "max"
- << BSON("a" << 10)
- << "splitPoints"
- << BSON_ARRAY(BSON("a" << 5))
- << "shard"
- << "shard0000");
+ BSONObj serializedRequest =
+ BSON("_configsvrCommitChunkSplit"
+ << "TestDB.TestColl"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "min" << BSON("a" << 1) << "max"
+ << BSON("a" << 10) << "splitPoints" << BSON_ARRAY(BSON("a" << 5)) << "shard"
+ << "shard0000");
BSONObj writeConcernObj = BSON("writeConcern" << BSON("w"
<< "majority"));
@@ -112,197 +93,129 @@ TEST(SplitChunkRequest, ConfigCommandtoBSON) {
}
TEST(SplitChunkRequest, MissingNamespaceErrors) {
- auto request = SplitChunkRequest::parseFromConfigCommand(
- BSON("collEpoch" << OID("7fffffff0000000000000001") << "min" << BSON("a" << 1) << "max"
- << BSON("a" << 10)
- << "splitPoints"
- << BSON_ARRAY(BSON("a" << 5))
- << "shard"
- << "shard0000"));
+ auto request = SplitChunkRequest::parseFromConfigCommand(BSON(
+ "collEpoch" << OID("7fffffff0000000000000001") << "min" << BSON("a" << 1) << "max"
+ << BSON("a" << 10) << "splitPoints" << BSON_ARRAY(BSON("a" << 5)) << "shard"
+ << "shard0000"));
ASSERT_EQ(ErrorCodes::NoSuchKey, request.getStatus());
}
TEST(SplitChunkRequest, MissingCollEpochErrors) {
- auto request = SplitChunkRequest::parseFromConfigCommand(BSON("_configsvrCommitChunkSplit"
- << "TestDB.TestColl"
- << "min"
- << BSON("a" << 1)
- << "max"
- << BSON("a" << 10)
- << "splitPoints"
- << BSON_ARRAY(BSON("a" << 5))
- << "shard"
- << "shard0000"));
+ auto request = SplitChunkRequest::parseFromConfigCommand(
+ BSON("_configsvrCommitChunkSplit"
+ << "TestDB.TestColl"
+ << "min" << BSON("a" << 1) << "max" << BSON("a" << 10) << "splitPoints"
+ << BSON_ARRAY(BSON("a" << 5)) << "shard"
+ << "shard0000"));
ASSERT_EQ(ErrorCodes::NoSuchKey, request.getStatus());
}
TEST(SplitChunkRequest, MissingChunkToSplitErrors) {
- auto request = SplitChunkRequest::parseFromConfigCommand(BSON("_configsvrCommitChunkSplit"
- << "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "max"
- << BSON("a" << 10)
- << "splitPoints"
- << BSON_ARRAY(BSON("a" << 5))
- << "shard"
- << "shard0000"));
+ auto request = SplitChunkRequest::parseFromConfigCommand(
+ BSON("_configsvrCommitChunkSplit"
+ << "TestDB.TestColl"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "max" << BSON("a" << 10)
+ << "splitPoints" << BSON_ARRAY(BSON("a" << 5)) << "shard"
+ << "shard0000"));
ASSERT_EQ(ErrorCodes::NoSuchKey, request.getStatus());
}
TEST(SplitChunkRequest, MissingSplitPointErrors) {
- auto request = SplitChunkRequest::parseFromConfigCommand(BSON("_configsvrCommitChunkSplit"
- << "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "min"
- << BSON("a" << 1)
- << "max"
- << BSON("a" << 10)
- << "shard"
- << "shard0000"));
+ auto request = SplitChunkRequest::parseFromConfigCommand(
+ BSON("_configsvrCommitChunkSplit"
+ << "TestDB.TestColl"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "min" << BSON("a" << 1) << "max"
+ << BSON("a" << 10) << "shard"
+ << "shard0000"));
ASSERT_EQ(ErrorCodes::NoSuchKey, request.getStatus());
}
TEST(SplitChunkRequest, MissingShardNameErrors) {
- auto request = SplitChunkRequest::parseFromConfigCommand(BSON("_configsvrCommitChunkSplit"
- << "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "min"
- << BSON("a" << 1)
- << "max"
- << BSON("a" << 10)
- << "splitPoints"
- << BSON_ARRAY(BSON("a" << 5))));
+ auto request = SplitChunkRequest::parseFromConfigCommand(
+ BSON("_configsvrCommitChunkSplit"
+ << "TestDB.TestColl"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "min" << BSON("a" << 1) << "max"
+ << BSON("a" << 10) << "splitPoints" << BSON_ARRAY(BSON("a" << 5))));
ASSERT_EQ(ErrorCodes::NoSuchKey, request.getStatus());
}
TEST(SplitChunkRequest, WrongNamespaceTypeErrors) {
auto request = SplitChunkRequest::parseFromConfigCommand(
BSON("_configsvrCommitChunkSplit" << 1234 << "collEpoch" << OID("7fffffff0000000000000001")
- << "min"
- << BSON("a" << 1)
- << "max"
- << BSON("a" << 10)
- << "splitPoints"
- << BSON_ARRAY(BSON("a" << 5))
- << "shard"
+ << "min" << BSON("a" << 1) << "max" << BSON("a" << 10)
+ << "splitPoints" << BSON_ARRAY(BSON("a" << 5)) << "shard"
<< "shard0000"));
ASSERT_EQ(ErrorCodes::TypeMismatch, request.getStatus());
}
TEST(SplitChunkRequest, WrongCollEpochTypeErrors) {
- auto request = SplitChunkRequest::parseFromConfigCommand(BSON("_configsvrCommitChunkSplit"
- << "TestDB.TestColl"
- << "collEpoch"
- << 1234
- << "min"
- << BSON("a" << 1)
- << "max"
- << BSON("a" << 10)
- << "splitPoints"
- << BSON_ARRAY(BSON("a" << 5))
- << "shard"
- << "shard0000"));
+ auto request = SplitChunkRequest::parseFromConfigCommand(
+ BSON("_configsvrCommitChunkSplit"
+ << "TestDB.TestColl"
+ << "collEpoch" << 1234 << "min" << BSON("a" << 1) << "max" << BSON("a" << 10)
+ << "splitPoints" << BSON_ARRAY(BSON("a" << 5)) << "shard"
+ << "shard0000"));
ASSERT_EQ(ErrorCodes::TypeMismatch, request.getStatus());
}
TEST(SplitChunkRequest, WrongChunkToSplitTypeErrors) {
- auto request = SplitChunkRequest::parseFromConfigCommand(BSON("_configsvrCommitChunkSplit"
- << "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "min"
- << 1234
- << "max"
- << BSON("a" << 10)
- << "splitPoints"
- << BSON_ARRAY(BSON("a" << 5))
- << "shard"
- << "shard0000"));
+ auto request = SplitChunkRequest::parseFromConfigCommand(
+ BSON("_configsvrCommitChunkSplit"
+ << "TestDB.TestColl"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "min" << 1234 << "max"
+ << BSON("a" << 10) << "splitPoints" << BSON_ARRAY(BSON("a" << 5)) << "shard"
+ << "shard0000"));
ASSERT_EQ(ErrorCodes::TypeMismatch, request.getStatus());
}
TEST(SplitChunkRequest, WrongSplitPointTypeErrors) {
- auto request = SplitChunkRequest::parseFromConfigCommand(BSON("_configsvrCommitChunkSplit"
- << "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "min"
- << BSON("a" << 1)
- << "max"
- << BSON("a" << 10)
- << "splitPoints"
- << 1234
- << "shard"
- << "shard0000"));
+ auto request = SplitChunkRequest::parseFromConfigCommand(
+ BSON("_configsvrCommitChunkSplit"
+ << "TestDB.TestColl"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "min" << BSON("a" << 1) << "max"
+ << BSON("a" << 10) << "splitPoints" << 1234 << "shard"
+ << "shard0000"));
ASSERT_EQ(ErrorCodes::TypeMismatch, request.getStatus());
}
TEST(SplitChunkRequest, WrongShardNameTypeErrors) {
- auto request = SplitChunkRequest::parseFromConfigCommand(BSON("_configsvrCommitChunkSplit"
- << "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "min"
- << BSON("a" << 1)
- << "max"
- << BSON("a" << 10)
- << "splitPoints"
- << BSON_ARRAY(BSON("a" << 5))
- << "shard"
- << 1234));
+ auto request = SplitChunkRequest::parseFromConfigCommand(
+ BSON("_configsvrCommitChunkSplit"
+ << "TestDB.TestColl"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "min" << BSON("a" << 1) << "max"
+ << BSON("a" << 10) << "splitPoints" << BSON_ARRAY(BSON("a" << 5)) << "shard" << 1234));
ASSERT_EQ(ErrorCodes::TypeMismatch, request.getStatus());
}
TEST(SplitChunkRequest, InvalidNamespaceErrors) {
- auto request = SplitChunkRequest::parseFromConfigCommand(BSON("_configsvrCommitChunkSplit"
- << ""
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "min"
- << BSON("a" << 1)
- << "max"
- << BSON("a" << 10)
- << "splitPoints"
- << BSON_ARRAY(BSON("a" << 5))
- << "shard"
- << "shard0000"));
+ auto request = SplitChunkRequest::parseFromConfigCommand(
+ BSON("_configsvrCommitChunkSplit"
+ << ""
+ << "collEpoch" << OID("7fffffff0000000000000001") << "min" << BSON("a" << 1) << "max"
+ << BSON("a" << 10) << "splitPoints" << BSON_ARRAY(BSON("a" << 5)) << "shard"
+ << "shard0000"));
ASSERT_EQ(ErrorCodes::InvalidNamespace, request.getStatus());
}
TEST(SplitChunkRequest, EmptyChunkToSplitErrors) {
- auto request = SplitChunkRequest::parseFromConfigCommand(BSON("_configsvrCommitChunkSplit"
- << "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "min"
- << BSONObj()
- << "max"
- << BSON("a" << 10)
- << "splitPoints"
- << BSON_ARRAY(BSON("a" << 5))
- << "shard"
- << "shard0000"));
+ auto request = SplitChunkRequest::parseFromConfigCommand(
+ BSON("_configsvrCommitChunkSplit"
+ << "TestDB.TestColl"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "min" << BSONObj() << "max"
+ << BSON("a" << 10) << "splitPoints" << BSON_ARRAY(BSON("a" << 5)) << "shard"
+ << "shard0000"));
ASSERT_EQ(ErrorCodes::BadValue, request.getStatus());
}
TEST(SplitChunkRequest, EmptySplitPointsErrors) {
- auto request = SplitChunkRequest::parseFromConfigCommand(BSON("_configsvrCommitChunkSplit"
- << "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "min"
- << BSON("a" << 1)
- << "max"
- << BSON("a" << 10)
- << "splitPoints"
- << BSONArray()
- << "shard"
- << "shard0000"));
+ auto request = SplitChunkRequest::parseFromConfigCommand(
+ BSON("_configsvrCommitChunkSplit"
+ << "TestDB.TestColl"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "min" << BSON("a" << 1) << "max"
+ << BSON("a" << 10) << "splitPoints" << BSONArray() << "shard"
+ << "shard0000"));
ASSERT_EQ(ErrorCodes::InvalidOptions, request.getStatus());
}
-}
+} // namespace
} // namespace mongo
diff --git a/src/mongo/s/request_types/split_chunk_request_type.cpp b/src/mongo/s/request_types/split_chunk_request_type.cpp
index 8993efac965..6773e413197 100644
--- a/src/mongo/s/request_types/split_chunk_request_type.cpp
+++ b/src/mongo/s/request_types/split_chunk_request_type.cpp
@@ -161,8 +161,8 @@ const string& SplitChunkRequest::getShardName() const {
Status SplitChunkRequest::_validate() {
if (!getNamespace().isValid()) {
return Status(ErrorCodes::InvalidNamespace,
- str::stream() << "invalid namespace '" << _nss.ns()
- << "' specified for request");
+ str::stream()
+ << "invalid namespace '" << _nss.ns() << "' specified for request");
}
if (getSplitPoints().empty()) {
diff --git a/src/mongo/s/request_types/update_zone_key_range_request_type.cpp b/src/mongo/s/request_types/update_zone_key_range_request_type.cpp
index 350489aa242..cfbce859483 100644
--- a/src/mongo/s/request_types/update_zone_key_range_request_type.cpp
+++ b/src/mongo/s/request_types/update_zone_key_range_request_type.cpp
@@ -107,10 +107,7 @@ StatusWith<UpdateZoneKeyRangeRequest> UpdateZoneKeyRangeRequest::_parseFromComma
} else {
return {ErrorCodes::TypeMismatch,
str::stream() << "\"" << kZoneName << "\" had the wrong type. Expected "
- << typeName(String)
- << " or "
- << typeName(jstNULL)
- << ", found "
+ << typeName(String) << " or " << typeName(jstNULL) << ", found "
<< typeName(zoneElem.type())};
}
diff --git a/src/mongo/s/server.cpp b/src/mongo/s/server.cpp
index 782620dc454..36e7290866d 100644
--- a/src/mongo/s/server.cpp
+++ b/src/mongo/s/server.cpp
@@ -424,7 +424,7 @@ public:
void onConfirmedSet(const State& state) final {
auto connStr = state.connStr;
- auto fun = [ serviceContext = _serviceContext, connStr ](auto args) {
+ auto fun = [serviceContext = _serviceContext, connStr](auto args) {
if (ErrorCodes::isCancelationError(args.status.code())) {
return;
}
diff --git a/src/mongo/s/shard_key_pattern.cpp b/src/mongo/s/shard_key_pattern.cpp
index 9be98ba15b6..5d59c25653c 100644
--- a/src/mongo/s/shard_key_pattern.cpp
+++ b/src/mongo/s/shard_key_pattern.cpp
@@ -89,8 +89,7 @@ std::vector<std::unique_ptr<FieldRef>> parseShardKeyPattern(const BSONObj& keyPa
// Numeric and ascending (1.0), or "hashed" and single field
uassert(ErrorCodes::BadValue,
str::stream()
- << "Shard key "
- << keyPattern.toString()
+ << "Shard key " << keyPattern.toString()
<< " can contain either a single 'hashed' field"
<< " or multiple numerical fields set to a value of 1. Failed to parse field "
<< patternEl.fieldNameStringData(),
@@ -163,10 +162,7 @@ Status ShardKeyPattern::checkShardKeySize(const BSONObj& shardKey) {
return {ErrorCodes::ShardKeyTooBig,
str::stream() << "shard keys must be less than " << kMaxShardKeySizeBytes
- << " bytes, but key "
- << shardKey
- << " is "
- << shardKey.objsize()
+ << " bytes, but key " << shardKey << " is " << shardKey.objsize()
<< " bytes"};
}
diff --git a/src/mongo/s/shard_key_pattern_test.cpp b/src/mongo/s/shard_key_pattern_test.cpp
index b8c68a38c9a..ecde034896b 100644
--- a/src/mongo/s/shard_key_pattern_test.cpp
+++ b/src/mongo/s/shard_key_pattern_test.cpp
@@ -140,8 +140,7 @@ TEST(ShardKeyPattern, ExtractDocShardKeySingle) {
BSON("a" << regex));
const BSONObj ref = BSON("$ref"
<< "coll"
- << "$id"
- << 1);
+ << "$id" << 1);
ASSERT_BSONOBJ_EQ(docKey(pattern, BSON("a" << ref)), BSON("a" << ref));
ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:{$dollarPrefixKey:true}}")),
fromjson("{a:{$dollarPrefixKey:true}}"));
@@ -169,8 +168,7 @@ TEST(ShardKeyPattern, ExtractDocShardKeyCompound) {
ASSERT_BSONOBJ_EQ(docKey(pattern,
BSON("c" << 30 << "b"
<< "20"
- << "a"
- << 10)),
+ << "a" << 10)),
fromjson("{a:10, b:'20'}"));
ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:10, b:{$dollarPrefixKey:true}}")),
fromjson("{a:10, b:{$dollarPrefixKey:true}}"));
@@ -199,8 +197,7 @@ TEST(ShardKeyPattern, ExtractDocShardKeyNested) {
fromjson("{'a.b':10, c:30}"));
const BSONObj ref = BSON("$ref"
<< "coll"
- << "$id"
- << 1);
+ << "$id" << 1);
ASSERT_BSONOBJ_EQ(docKey(pattern, BSON("a" << BSON("b" << ref) << "c" << 30)),
BSON("a.b" << ref << "c" << 30));
@@ -308,8 +305,7 @@ TEST(ShardKeyPattern, ExtractQueryShardKeyCompound) {
ASSERT_BSONOBJ_EQ(queryKey(pattern,
BSON("c" << 30 << "b"
<< "20"
- << "a"
- << 10)),
+ << "a" << 10)),
fromjson("{a:10, b:'20'}"));
ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{a:10, b:[1, 2]}")), BSONObj());
diff --git a/src/mongo/s/shard_util.cpp b/src/mongo/s/shard_util.cpp
index 59faf424f90..1f430a0d7be 100644
--- a/src/mongo/s/shard_util.cpp
+++ b/src/mongo/s/shard_util.cpp
@@ -156,18 +156,16 @@ StatusWith<boost::optional<ChunkRange>> splitChunkAtMultiplePoints(
// is already performed at chunk split commit time, but we are performing it here for parity
// with old auto-split code, which might rely on it.
if (SimpleBSONObjComparator::kInstance.evaluate(chunkRange.getMin() == splitPoints.front())) {
- const std::string msg(str::stream() << "not splitting chunk " << chunkRange.toString()
- << ", split point "
- << splitPoints.front()
- << " is exactly on chunk bounds");
+ const std::string msg(str::stream()
+ << "not splitting chunk " << chunkRange.toString() << ", split point "
+ << splitPoints.front() << " is exactly on chunk bounds");
return {ErrorCodes::CannotSplit, msg};
}
if (SimpleBSONObjComparator::kInstance.evaluate(chunkRange.getMax() == splitPoints.back())) {
- const std::string msg(str::stream() << "not splitting chunk " << chunkRange.toString()
- << ", split point "
- << splitPoints.back()
- << " is exactly on chunk bounds");
+ const std::string msg(str::stream()
+ << "not splitting chunk " << chunkRange.toString() << ", split point "
+ << splitPoints.back() << " is exactly on chunk bounds");
return {ErrorCodes::CannotSplit, msg};
}
diff --git a/src/mongo/s/sharding_egress_metadata_hook.cpp b/src/mongo/s/sharding_egress_metadata_hook.cpp
index 468fe77bae1..10e837a2430 100644
--- a/src/mongo/s/sharding_egress_metadata_hook.cpp
+++ b/src/mongo/s/sharding_egress_metadata_hook.cpp
@@ -120,8 +120,8 @@ Status ShardingEgressMetadataHook::_advanceConfigOpTimeFromShard(OperationContex
if (opTime.is_initialized()) {
grid->advanceConfigOpTime(opCtx,
opTime.get(),
- str::stream() << "reply from shard " << shardId
- << " node");
+ str::stream()
+ << "reply from shard " << shardId << " node");
}
}
return Status::OK();
diff --git a/src/mongo/s/sharding_initialization.h b/src/mongo/s/sharding_initialization.h
index e3e7acce876..1c84f4ceda4 100644
--- a/src/mongo/s/sharding_initialization.h
+++ b/src/mongo/s/sharding_initialization.h
@@ -86,7 +86,7 @@ Status initializeGlobalShardingState(OperationContext* opCtx,
/**
* Loads cluster ID and waits for the reload of the Shard Registry.
-*/
+ */
Status waitForShardRegistryReload(OperationContext* opCtx);
diff --git a/src/mongo/s/sharding_mongod_test_fixture.cpp b/src/mongo/s/sharding_mongod_test_fixture.cpp
index 92a27af4c92..93c87da38d9 100644
--- a/src/mongo/s/sharding_mongod_test_fixture.cpp
+++ b/src/mongo/s/sharding_mongod_test_fixture.cpp
@@ -115,9 +115,8 @@ void ShardingMongodTestFixture::setUp() {
serversBob.append(BSON("host" << _servers[i].toString() << "_id" << static_cast<int>(i)));
}
repl::ReplSetConfig replSetConfig;
- ASSERT_OK(replSetConfig.initialize(
- BSON("_id" << _setName << "protocolVersion" << 1 << "version" << 3 << "members"
- << serversBob.arr())));
+ ASSERT_OK(replSetConfig.initialize(BSON("_id" << _setName << "protocolVersion" << 1 << "version"
+ << 3 << "members" << serversBob.arr())));
replCoordPtr->setGetConfigReturnValue(replSetConfig);
repl::ReplicationCoordinator::set(service, std::move(replCoordPtr));
diff --git a/src/mongo/s/sharding_router_test_fixture.cpp b/src/mongo/s/sharding_router_test_fixture.cpp
index 0261a54f8ba..c533f14a125 100644
--- a/src/mongo/s/sharding_router_test_fixture.cpp
+++ b/src/mongo/s/sharding_router_test_fixture.cpp
@@ -336,10 +336,8 @@ void ShardingTestFixture::expectConfigCollectionCreate(const HostAndPort& config
BSON("create" << collName << "capped" << true << "size" << cappedSize << "writeConcern"
<< BSON("w"
<< "majority"
- << "wtimeout"
- << 60000)
- << "maxTimeMS"
- << 30000);
+ << "wtimeout" << 60000)
+ << "maxTimeMS" << 30000);
ASSERT_BSONOBJ_EQ(expectedCreateCmd, request.cmdObj);
return response;
diff --git a/src/mongo/s/sharding_task_executor.cpp b/src/mongo/s/sharding_task_executor.cpp
index 8a3e3c39b60..c8db2851af7 100644
--- a/src/mongo/s/sharding_task_executor.cpp
+++ b/src/mongo/s/sharding_task_executor.cpp
@@ -160,9 +160,12 @@ StatusWith<TaskExecutor::CallbackHandle> ShardingTaskExecutor::scheduleRemoteCom
auto clusterGLE = ClusterLastErrorInfo::get(request.opCtx->getClient());
- auto shardingCb =
- [ timeTracker, clusterGLE, cb, grid = Grid::get(request.opCtx), hosts = request.target ](
- const TaskExecutor::RemoteCommandOnAnyCallbackArgs& args) {
+ auto shardingCb = [timeTracker,
+ clusterGLE,
+ cb,
+ grid = Grid::get(request.opCtx),
+ hosts = request.target](
+ const TaskExecutor::RemoteCommandOnAnyCallbackArgs& args) {
ON_BLOCK_EXIT([&cb, &args]() { cb(args); });
if (!args.response.isOK()) {
diff --git a/src/mongo/s/sharding_task_executor_pool_controller.cpp b/src/mongo/s/sharding_task_executor_pool_controller.cpp
index ffcdd6cd82b..871293699ea 100644
--- a/src/mongo/s/sharding_task_executor_pool_controller.cpp
+++ b/src/mongo/s/sharding_task_executor_pool_controller.cpp
@@ -53,7 +53,7 @@ void emplaceOrInvariant(Map&& map, Args&&... args) noexcept {
invariant(ret.second, "Element already existed in map/set");
}
-} // anonymous
+} // namespace
Status ShardingTaskExecutorPoolController::validateHostTimeout(const int& hostTimeoutMS) {
auto toRefreshTimeoutMS = gParameters.toRefreshTimeoutMS.load();
diff --git a/src/mongo/s/transaction_router.cpp b/src/mongo/s/transaction_router.cpp
index c2bdc3d7f68..8bfb6cbbb39 100644
--- a/src/mongo/s/transaction_router.cpp
+++ b/src/mongo/s/transaction_router.cpp
@@ -41,7 +41,6 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/logical_clock.h"
#include "mongo/db/logical_session_id.h"
-#include "mongo/db/logical_session_id.h"
#include "mongo/db/repl/read_concern_args.h"
#include "mongo/executor/task_executor_pool.h"
#include "mongo/rpc/get_status_from_command_result.h"
@@ -520,17 +519,13 @@ void TransactionRouter::Router::_assertAbortStatusIsOkOrNoSuchTransaction(
auto shardResponse = uassertStatusOKWithContext(
std::move(response.swResponse),
str::stream() << "Failed to send abort to shard " << response.shardId
- << " between retries of statement "
- << p().latestStmtId);
+ << " between retries of statement " << p().latestStmtId);
auto status = getStatusFromCommandResult(shardResponse.data);
uassert(ErrorCodes::NoSuchTransaction,
str::stream() << txnIdToString() << "Transaction aborted between retries of statement "
- << p().latestStmtId
- << " due to error: "
- << status
- << " from shard: "
- << response.shardId,
+ << p().latestStmtId << " due to error: " << status
+ << " from shard: " << response.shardId,
status.isOK() || status.code() == ErrorCodes::NoSuchTransaction);
// abortTransaction is sent with no write concern, so there's no need to check for a write
@@ -658,8 +653,9 @@ void TransactionRouter::Router::onSnapshotError(OperationContext* opCtx,
const Status& errorStatus) {
invariant(canContinueOnSnapshotError());
- LOG(3) << txnIdToString() << " Clearing pending participants and resetting global snapshot "
- "timestamp after snapshot error: "
+ LOG(3) << txnIdToString()
+ << " Clearing pending participants and resetting global snapshot "
+ "timestamp after snapshot error: "
<< errorStatus << ", previous timestamp: " << o().atClusterTime->getTime();
// The transaction must be restarted on all participants because a new read timestamp will be
@@ -711,17 +707,14 @@ void TransactionRouter::Router::beginOrContinueTxn(OperationContext* opCtx,
// This transaction is older than the transaction currently in progress, so throw an error.
uasserted(ErrorCodes::TransactionTooOld,
str::stream() << "txnNumber " << txnNumber << " is less than last txnNumber "
- << o().txnNumber
- << " seen in session "
- << _sessionId());
+ << o().txnNumber << " seen in session " << _sessionId());
} else if (txnNumber == o().txnNumber) {
// This is the same transaction as the one in progress.
switch (action) {
case TransactionActions::kStart: {
uasserted(ErrorCodes::ConflictingOperationInProgress,
str::stream() << "txnNumber " << o().txnNumber << " for session "
- << _sessionId()
- << " already started");
+ << _sessionId() << " already started");
}
case TransactionActions::kContinue: {
uassert(ErrorCodes::InvalidOptions,
@@ -767,11 +760,9 @@ void TransactionRouter::Router::beginOrContinueTxn(OperationContext* opCtx,
}
case TransactionActions::kContinue: {
uasserted(ErrorCodes::NoSuchTransaction,
- str::stream() << "cannot continue txnId " << o().txnNumber
- << " for session "
- << _sessionId()
- << " with txnId "
- << txnNumber);
+ str::stream()
+ << "cannot continue txnId " << o().txnNumber << " for session "
+ << _sessionId() << " with txnId " << txnNumber);
}
case TransactionActions::kCommit: {
_resetRouterState(opCtx, txnNumber);
@@ -896,11 +887,10 @@ BSONObj TransactionRouter::Router::_commitTransaction(
switch (participant.second.readOnly) {
case Participant::ReadOnly::kUnset:
uasserted(ErrorCodes::NoSuchTransaction,
- str::stream() << txnIdToString() << " Failed to commit transaction "
- << "because a previous statement on the transaction "
- << "participant "
- << participant.first
- << " was unsuccessful.");
+ str::stream()
+ << txnIdToString() << " Failed to commit transaction "
+ << "because a previous statement on the transaction "
+ << "participant " << participant.first << " was unsuccessful.");
case Participant::ReadOnly::kReadOnly:
readOnlyShards.push_back(participant.first);
break;
@@ -1019,8 +1009,9 @@ void TransactionRouter::Router::implicitlyAbortTransaction(OperationContext* opC
const Status& errorStatus) {
if (o().commitType == CommitType::kTwoPhaseCommit ||
o().commitType == CommitType::kRecoverWithToken) {
- LOG(3) << txnIdToString() << " Router not sending implicit abortTransaction because commit "
- "may have been handed off to the coordinator";
+ LOG(3) << txnIdToString()
+ << " Router not sending implicit abortTransaction because commit "
+ "may have been handed off to the coordinator";
return;
}
diff --git a/src/mongo/s/transaction_router.h b/src/mongo/s/transaction_router.h
index 4d442f3a225..82e7498523a 100644
--- a/src/mongo/s/transaction_router.h
+++ b/src/mongo/s/transaction_router.h
@@ -215,78 +215,78 @@ public:
}
/**
- * Starts a fresh transaction in this session or continue an existing one. Also cleans up the
- * previous transaction state.
- */
+ * Starts a fresh transaction in this session or continue an existing one. Also cleans up
+ * the previous transaction state.
+ */
void beginOrContinueTxn(OperationContext* opCtx,
TxnNumber txnNumber,
TransactionActions action);
/**
- * Attaches the required transaction related fields for a request to be sent to the given
- * shard.
- *
- * Calling this method has the following side effects:
- * 1. Potentially selecting a coordinator.
- * 2. Adding the shard to the list of participants.
- * 3. Also append fields for first statements (ex. startTransaction, readConcern)
- * if the shard was newly added to the list of participants.
- */
+ * Attaches the required transaction related fields for a request to be sent to the given
+ * shard.
+ *
+ * Calling this method has the following side effects:
+ * 1. Potentially selecting a coordinator.
+ * 2. Adding the shard to the list of participants.
+ * 3. Also append fields for first statements (ex. startTransaction, readConcern)
+ * if the shard was newly added to the list of participants.
+ */
BSONObj attachTxnFieldsIfNeeded(OperationContext* opCtx,
const ShardId& shardId,
const BSONObj& cmdObj);
/**
- * Processes the transaction metadata in the response from the participant if the response
- * indicates the operation succeeded.
- */
+ * Processes the transaction metadata in the response from the participant if the response
+ * indicates the operation succeeded.
+ */
void processParticipantResponse(OperationContext* opCtx,
const ShardId& shardId,
const BSONObj& responseObj);
/**
- * Returns true if the current transaction can retry on a stale version error from a
- * contacted shard. This is always true except for an error received by a write that is not
- * the first overall statement in the sharded transaction. This is because the entire
- * command will be retried, and shards that were not stale and are targeted again may
- * incorrectly execute the command a second time.
- *
- * Note: Even if this method returns true, the retry attempt may still fail, e.g. if one of
- * the shards that returned a stale version error was involved in a previously completed a
- * statement for this transaction.
- *
- * TODO SERVER-37207: Change batch writes to retry only the failed writes in a batch, to
- * allow retrying writes beyond the first overall statement.
- */
+ * Returns true if the current transaction can retry on a stale version error from a
+ * contacted shard. This is always true except for an error received by a write that is not
+ * the first overall statement in the sharded transaction. This is because the entire
+ * command will be retried, and shards that were not stale and are targeted again may
+ * incorrectly execute the command a second time.
+ *
+ * Note: Even if this method returns true, the retry attempt may still fail, e.g. if one of
+ * the shards that returned a stale version error was involved in a previously completed a
+ * statement for this transaction.
+ *
+ * TODO SERVER-37207: Change batch writes to retry only the failed writes in a batch, to
+ * allow retrying writes beyond the first overall statement.
+ */
bool canContinueOnStaleShardOrDbError(StringData cmdName) const;
/**
- * Updates the transaction state to allow for a retry of the current command on a stale
- * version error. This includes sending abortTransaction to all cleared participants. Will
- * throw if the transaction cannot be continued.
- */
+ * Updates the transaction state to allow for a retry of the current command on a stale
+ * version error. This includes sending abortTransaction to all cleared participants. Will
+ * throw if the transaction cannot be continued.
+ */
void onStaleShardOrDbError(OperationContext* opCtx,
StringData cmdName,
const Status& errorStatus);
/**
- * Returns true if the current transaction can retry on a snapshot error. This is only true
- * on the first command recevied for a transaction.
- */
+ * Returns true if the current transaction can retry on a snapshot error. This is only true
+ * on the first command recevied for a transaction.
+ */
bool canContinueOnSnapshotError() const;
/**
- * Resets the transaction state to allow for a retry attempt. This includes clearing all
- * participants, clearing the coordinator, resetting the global read timestamp, and sending
- * abortTransaction to all cleared participants. Will throw if the transaction cannot be
- * continued.
- */
+ * Resets the transaction state to allow for a retry attempt. This includes clearing all
+ * participants, clearing the coordinator, resetting the global read timestamp, and sending
+ * abortTransaction to all cleared participants. Will throw if the transaction cannot be
+ * continued.
+ */
void onSnapshotError(OperationContext* opCtx, const Status& errorStatus);
/**
- * Updates the transaction tracking state to allow for a retry attempt on a view resolution
- * error. This includes sending abortTransaction to all cleared participants.
- */
+ * Updates the transaction tracking state to allow for a retry attempt on a view resolution
+ * error. This includes sending abortTransaction to all cleared participants.
+ */
void onViewResolutionError(OperationContext* opCtx, const NamespaceString& nss);
/**
@@ -301,206 +301,207 @@ public:
LogicalTime getSelectedAtClusterTime() const;
/**
- * Sets the atClusterTime for the current transaction to the latest time in the router's
- * logical clock. Does nothing if the transaction does not have snapshot read concern or an
- * atClusterTime has already been selected and cannot be changed.
- */
+ * Sets the atClusterTime for the current transaction to the latest time in the router's
+ * logical clock. Does nothing if the transaction does not have snapshot read concern or an
+ * atClusterTime has already been selected and cannot be changed.
+ */
void setDefaultAtClusterTime(OperationContext* opCtx);
/**
- * If a coordinator has been selected for the current transaction, returns its id.
- */
+ * If a coordinator has been selected for the current transaction, returns its id.
+ */
const boost::optional<ShardId>& getCoordinatorId() const;
/**
- * If a recovery shard has been selected for the current transaction, returns its id.
- */
+ * If a recovery shard has been selected for the current transaction, returns its id.
+ */
const boost::optional<ShardId>& getRecoveryShardId() const;
/**
- * Commits the transaction.
- *
- * For transactions that only did reads or only wrote to one shard, sends commit directly to
- * the participants and returns the first error response or the last (success) response.
- *
- * For transactions that performed writes to multiple shards, hands off the participant list
- * to the coordinator to do two-phase commit, and returns the coordinator's response.
- */
+ * Commits the transaction.
+ *
+ * For transactions that only did reads or only wrote to one shard, sends commit directly to
+ * the participants and returns the first error response or the last (success) response.
+ *
+ * For transactions that performed writes to multiple shards, hands off the participant list
+ * to the coordinator to do two-phase commit, and returns the coordinator's response.
+ */
BSONObj commitTransaction(OperationContext* opCtx,
const boost::optional<TxnRecoveryToken>& recoveryToken);
/**
- * Sends abort to all participants.
- *
- * Returns the first error response or the last (success) response.
- */
+ * Sends abort to all participants.
+ *
+ * Returns the first error response or the last (success) response.
+ */
BSONObj abortTransaction(OperationContext* opCtx);
/**
- * Sends abort to all shards in the current participant list. Will retry on retryable errors,
- * but ignores the responses from each shard.
- */
+ * Sends abort to all shards in the current participant list. Will retry on retryable
+ * errors, but ignores the responses from each shard.
+ */
void implicitlyAbortTransaction(OperationContext* opCtx, const Status& errorStatus);
/**
- * If a coordinator has been selected for this transaction already, constructs a recovery
- * token, which can be used to resume commit or abort of the transaction from a different
- * router.
- */
+ * If a coordinator has been selected for this transaction already, constructs a recovery
+ * token, which can be used to resume commit or abort of the transaction from a different
+ * router.
+ */
void appendRecoveryToken(BSONObjBuilder* builder) const;
/**
- * Returns a string with the active transaction's transaction number and logical session id
- * (i.e. the transaction id).
- */
+ * Returns a string with the active transaction's transaction number and logical session id
+ * (i.e. the transaction id).
+ */
std::string txnIdToString() const;
/**
- * Returns the participant for this transaction or nullptr if the specified shard is not
- * participant of this transaction.
- */
+ * Returns the participant for this transaction or nullptr if the specified shard is not
+ * participant of this transaction.
+ */
const Participant* getParticipant(const ShardId& shard);
/**
- * Returns the statement id of the latest received command for this transaction.
- */
+ * Returns the statement id of the latest received command for this transaction.
+ */
StmtId getLatestStmtId() const {
return p().latestStmtId;
}
/**
- * Returns a copy of the timing stats of the transaction router's active transaction.
- */
+ * Returns a copy of the timing stats of the transaction router's active transaction.
+ */
const TimingStats& getTimingStats() const {
return o().timingStats;
}
private:
/**
- * Resets the router's state. Used when the router sees a new transaction for the first time.
- * This is required because we don't create a new router object for each transaction, but
- * instead reuse the same object across different transactions.
- */
+ * Resets the router's state. Used when the router sees a new transaction for the first
+ * time. This is required because we don't create a new router object for each transaction,
+ * but instead reuse the same object across different transactions.
+ */
void _resetRouterState(OperationContext* opCtx, const TxnNumber& txnNumber);
/**
- * Internal method for committing a transaction. Should only throw on failure to send commit.
- */
+ * Internal method for committing a transaction. Should only throw on failure to send
+ * commit.
+ */
BSONObj _commitTransaction(OperationContext* opCtx,
const boost::optional<TxnRecoveryToken>& recoveryToken);
/**
- * Retrieves the transaction's outcome from the shard specified in the recovery token.
- */
+ * Retrieves the transaction's outcome from the shard specified in the recovery token.
+ */
BSONObj _commitWithRecoveryToken(OperationContext* opCtx,
const TxnRecoveryToken& recoveryToken);
/**
- * Hands off coordinating a two-phase commit across all participants to the coordinator
- * shard.
- */
+ * Hands off coordinating a two-phase commit across all participants to the coordinator
+ * shard.
+ */
BSONObj _handOffCommitToCoordinator(OperationContext* opCtx);
/**
- * Sets the given logical time as the atClusterTime for the transaction to be the greater of
- * the given time and the user's afterClusterTime, if one was provided.
- */
+ * Sets the given logical time as the atClusterTime for the transaction to be the greater of
+ * the given time and the user's afterClusterTime, if one was provided.
+ */
void _setAtClusterTime(OperationContext* opCtx,
const boost::optional<LogicalTime>& afterClusterTime,
LogicalTime candidateTime);
/**
- * Throws NoSuchTransaction if the response from abortTransaction failed with a code other
- * than NoSuchTransaction. Does not check for write concern errors.
- */
+ * Throws NoSuchTransaction if the response from abortTransaction failed with a code other
+ * than NoSuchTransaction. Does not check for write concern errors.
+ */
void _assertAbortStatusIsOkOrNoSuchTransaction(
const AsyncRequestsSender::Response& response) const;
/**
- * If the transaction's read concern level is snapshot, asserts the participant's
- * atClusterTime matches the transaction's.
- */
+ * If the transaction's read concern level is snapshot, asserts the participant's
+ * atClusterTime matches the transaction's.
+ */
void _verifyParticipantAtClusterTime(const Participant& participant);
/**
- * Removes all participants created during the current statement from the participant list
- * and sends abortTransaction to each. Waits for all responses before returning.
- */
+ * Removes all participants created during the current statement from the participant list
+ * and sends abortTransaction to each. Waits for all responses before returning.
+ */
void _clearPendingParticipants(OperationContext* opCtx);
/**
- * Creates a new participant for the shard.
- */
+ * Creates a new participant for the shard.
+ */
TransactionRouter::Participant& _createParticipant(OperationContext* opCtx,
const ShardId& shard);
/**
- * Sets the new readOnly value for the current participant on the shard.
- */
+ * Sets the new readOnly value for the current participant on the shard.
+ */
void _setReadOnlyForParticipant(OperationContext* opCtx,
const ShardId& shard,
const Participant::ReadOnly readOnly);
/**
- * Updates relevant metrics when a new transaction is begun.
- */
+ * Updates relevant metrics when a new transaction is begun.
+ */
void _onNewTransaction(OperationContext* opCtx);
/**
- * Updates relevant metrics when a router receives commit for a higher txnNumber than it has
- * seen so far.
- */
+ * Updates relevant metrics when a router receives commit for a higher txnNumber than it has
+ * seen so far.
+ */
void _onBeginRecoveringDecision(OperationContext* opCtx);
/**
- * Updates relevant metrics when the router receives an explicit abort from the client.
- */
+ * Updates relevant metrics when the router receives an explicit abort from the client.
+ */
void _onExplicitAbort(OperationContext* opCtx);
/**
- * Updates relevant metrics when the router begins an implicit abort after an error.
- */
+ * Updates relevant metrics when the router begins an implicit abort after an error.
+ */
void _onImplicitAbort(OperationContext* opCtx, const Status& errorStatus);
/**
- * Updates relevant metrics when a transaction is about to begin commit.
- */
+ * Updates relevant metrics when a transaction is about to begin commit.
+ */
void _onStartCommit(WithLock wl, OperationContext* opCtx);
/**
- * Updates relevant metrics when a transaction receives a successful response for commit.
- */
+ * Updates relevant metrics when a transaction receives a successful response for commit.
+ */
void _onSuccessfulCommit(OperationContext* opCtx);
/**
- * Updates relevant metrics when commit receives a response with a non-retryable command
- * error per the retryable writes specification.
- */
+ * Updates relevant metrics when commit receives a response with a non-retryable command
+ * error per the retryable writes specification.
+ */
void _onNonRetryableCommitError(OperationContext* opCtx, Status commitStatus);
/**
- * The first time this method is called it marks the transaction as over in the router's
- * diagnostics and will log transaction information if its duration is over the global slowMS
- * threshold or the transaction log componenet verbosity >= 1. Only meant to be called when
- * the router definitively knows the transaction's outcome, e.g. it should not be invoked
- * after a network error on commit.
- */
+ * The first time this method is called it marks the transaction as over in the router's
+ * diagnostics and will log transaction information if its duration is over the global
+ * slowMS threshold or the transaction log componenet verbosity >= 1. Only meant to be
+ * called when the router definitively knows the transaction's outcome, e.g. it should not
+ * be invoked after a network error on commit.
+ */
void _endTransactionTrackingIfNecessary(OperationContext* opCtx,
TerminationCause terminationCause);
/**
- * Returns all participants created during the current statement.
- */
+ * Returns all participants created during the current statement.
+ */
std::vector<ShardId> _getPendingParticipants() const;
/**
- * Prints slow transaction information to the log.
- */
+ * Prints slow transaction information to the log.
+ */
void _logSlowTransaction(OperationContext* opCtx, TerminationCause terminationCause) const;
/**
- * Returns a string to be logged for slow transactions.
- */
+ * Returns a string to be logged for slow transactions.
+ */
std::string _transactionInfoForLog(OperationContext* opCtx,
TerminationCause terminationCause) const;
diff --git a/src/mongo/s/transaction_router_test.cpp b/src/mongo/s/transaction_router_test.cpp
index 1451e8f3816..d551c2e6399 100644
--- a/src/mongo/s/transaction_router_test.cpp
+++ b/src/mongo/s/transaction_router_test.cpp
@@ -232,16 +232,9 @@ TEST_F(TransactionRouterTestWithDefaultSession,
<< "readConcern"
<< BSON("level"
<< "snapshot"
- << "atClusterTime"
- << kInMemoryLogicalTime.asTimestamp())
- << "startTransaction"
- << true
- << "coordinator"
- << true
- << "autocommit"
- << false
- << "txnNumber"
- << txnNum);
+ << "atClusterTime" << kInMemoryLogicalTime.asTimestamp())
+ << "startTransaction" << true << "coordinator" << true
+ << "autocommit" << false << "txnNumber" << txnNum);
{
auto newCmd = txnRouter.attachTxnFieldsIfNeeded(operationContext(),
@@ -258,11 +251,7 @@ TEST_F(TransactionRouterTestWithDefaultSession,
<< "test"));
ASSERT_BSONOBJ_EQ(BSON("update"
<< "test"
- << "coordinator"
- << true
- << "autocommit"
- << false
- << "txnNumber"
+ << "coordinator" << true << "autocommit" << false << "txnNumber"
<< txnNum),
newCmd);
}
@@ -281,16 +270,9 @@ TEST_F(TransactionRouterTestWithDefaultSession, BasicStartTxnWithAtClusterTime)
<< "readConcern"
<< BSON("level"
<< "snapshot"
- << "atClusterTime"
- << kInMemoryLogicalTime.asTimestamp())
- << "startTransaction"
- << true
- << "coordinator"
- << true
- << "autocommit"
- << false
- << "txnNumber"
- << txnNum);
+ << "atClusterTime" << kInMemoryLogicalTime.asTimestamp())
+ << "startTransaction" << true << "coordinator" << true
+ << "autocommit" << false << "txnNumber" << txnNum);
{
auto newCmd = txnRouter.attachTxnFieldsIfNeeded(operationContext(),
@@ -307,11 +289,7 @@ TEST_F(TransactionRouterTestWithDefaultSession, BasicStartTxnWithAtClusterTime)
<< "test"));
ASSERT_BSONOBJ_EQ(BSON("update"
<< "test"
- << "coordinator"
- << true
- << "autocommit"
- << false
- << "txnNumber"
+ << "coordinator" << true << "autocommit" << false << "txnNumber"
<< txnNum),
newCmd);
}
@@ -341,16 +319,9 @@ TEST_F(TransactionRouterTestWithDefaultSession, NewParticipantMustAttachTxnAndRe
<< "readConcern"
<< BSON("level"
<< "snapshot"
- << "atClusterTime"
- << kInMemoryLogicalTime.asTimestamp())
- << "startTransaction"
- << true
- << "coordinator"
- << true
- << "autocommit"
- << false
- << "txnNumber"
- << txnNum);
+ << "atClusterTime" << kInMemoryLogicalTime.asTimestamp())
+ << "startTransaction" << true << "coordinator" << true
+ << "autocommit" << false << "txnNumber" << txnNum);
{
auto newCmd = txnRouter.attachTxnFieldsIfNeeded(operationContext(),
@@ -367,11 +338,7 @@ TEST_F(TransactionRouterTestWithDefaultSession, NewParticipantMustAttachTxnAndRe
<< "test"));
ASSERT_BSONOBJ_EQ(BSON("update"
<< "test"
- << "coordinator"
- << true
- << "autocommit"
- << false
- << "txnNumber"
+ << "coordinator" << true << "autocommit" << false << "txnNumber"
<< txnNum),
newCmd);
}
@@ -381,13 +348,8 @@ TEST_F(TransactionRouterTestWithDefaultSession, NewParticipantMustAttachTxnAndRe
<< "readConcern"
<< BSON("level"
<< "snapshot"
- << "atClusterTime"
- << kInMemoryLogicalTime.asTimestamp())
- << "startTransaction"
- << true
- << "autocommit"
- << false
- << "txnNumber"
+ << "atClusterTime" << kInMemoryLogicalTime.asTimestamp())
+ << "startTransaction" << true << "autocommit" << false << "txnNumber"
<< txnNum);
{
@@ -405,10 +367,7 @@ TEST_F(TransactionRouterTestWithDefaultSession, NewParticipantMustAttachTxnAndRe
<< "test"));
ASSERT_BSONOBJ_EQ(BSON("update"
<< "test"
- << "autocommit"
- << false
- << "txnNumber"
- << txnNum),
+ << "autocommit" << false << "txnNumber" << txnNum),
newCmd);
}
}
@@ -431,16 +390,9 @@ TEST_F(TransactionRouterTestWithDefaultSession, StartingNewTxnShouldClearState)
<< "readConcern"
<< BSON("level"
<< "snapshot"
- << "atClusterTime"
- << kInMemoryLogicalTime.asTimestamp())
- << "startTransaction"
- << true
- << "coordinator"
- << true
- << "autocommit"
- << false
- << "txnNumber"
- << txnNum),
+ << "atClusterTime" << kInMemoryLogicalTime.asTimestamp())
+ << "startTransaction" << true << "coordinator" << true
+ << "autocommit" << false << "txnNumber" << txnNum),
newCmd);
}
@@ -454,16 +406,9 @@ TEST_F(TransactionRouterTestWithDefaultSession, StartingNewTxnShouldClearState)
<< "readConcern"
<< BSON("level"
<< "snapshot"
- << "atClusterTime"
- << kInMemoryLogicalTime.asTimestamp())
- << "startTransaction"
- << true
- << "coordinator"
- << true
- << "autocommit"
- << false
- << "txnNumber"
- << txnNum2);
+ << "atClusterTime" << kInMemoryLogicalTime.asTimestamp())
+ << "startTransaction" << true << "coordinator" << true
+ << "autocommit" << false << "txnNumber" << txnNum2);
{
auto newCmd = txnRouter.attachTxnFieldsIfNeeded(operationContext(),
@@ -707,26 +652,18 @@ TEST_F(TransactionRouterTestWithDefaultSession, DoesNotAttachTxnNumIfAlreadyTher
BSONObj expectedNewObj = BSON("insert"
<< "test"
- << "txnNumber"
- << txnNum
- << "readConcern"
+ << "txnNumber" << txnNum << "readConcern"
<< BSON("level"
<< "snapshot"
- << "atClusterTime"
- << kInMemoryLogicalTime.asTimestamp())
- << "startTransaction"
- << true
- << "coordinator"
- << true
- << "autocommit"
- << false);
+ << "atClusterTime" << kInMemoryLogicalTime.asTimestamp())
+ << "startTransaction" << true << "coordinator" << true
+ << "autocommit" << false);
auto newCmd = txnRouter.attachTxnFieldsIfNeeded(operationContext(),
shard1,
BSON("insert"
<< "test"
- << "txnNumber"
- << txnNum));
+ << "txnNumber" << txnNum));
ASSERT_BSONOBJ_EQ(expectedNewObj, newCmd);
}
@@ -744,8 +681,7 @@ DEATH_TEST_F(TransactionRouterTestWithDefaultSession,
shard1,
BSON("insert"
<< "test"
- << "txnNumber"
- << TxnNumber(10)));
+ << "txnNumber" << TxnNumber(10)));
}
TEST_F(TransactionRouterTestWithDefaultSession, AttachTxnValidatesReadConcernIfAlreadyOnCmd) {
@@ -769,16 +705,9 @@ TEST_F(TransactionRouterTestWithDefaultSession, AttachTxnValidatesReadConcernIfA
<< "readConcern"
<< BSON("level"
<< "snapshot"
- << "atClusterTime"
- << kInMemoryLogicalTime.asTimestamp())
- << "startTransaction"
- << true
- << "coordinator"
- << true
- << "autocommit"
- << false
- << "txnNumber"
- << txnNum),
+ << "atClusterTime" << kInMemoryLogicalTime.asTimestamp())
+ << "startTransaction" << true << "coordinator" << true
+ << "autocommit" << false << "txnNumber" << txnNum),
newCmd);
}
}
@@ -810,14 +739,8 @@ TEST_F(TransactionRouterTestWithDefaultSession, PassesThroughNoReadConcernToPart
BSONObj expectedNewObj = BSON("insert"
<< "test"
- << "startTransaction"
- << true
- << "coordinator"
- << true
- << "autocommit"
- << false
- << "txnNumber"
- << txnNum);
+ << "startTransaction" << true << "coordinator" << true
+ << "autocommit" << false << "txnNumber" << txnNum);
auto newCmd = txnRouter.attachTxnFieldsIfNeeded(operationContext(),
shard1,
@@ -843,14 +766,8 @@ TEST_F(TransactionRouterTestWithDefaultSession,
<< "test"
<< "readConcern"
<< BSON("afterClusterTime" << kAfterClusterTime.asTimestamp())
- << "startTransaction"
- << true
- << "coordinator"
- << true
- << "autocommit"
- << false
- << "txnNumber"
- << txnNum);
+ << "startTransaction" << true << "coordinator" << true
+ << "autocommit" << false << "txnNumber" << txnNum);
auto newCmd = txnRouter.attachTxnFieldsIfNeeded(operationContext(),
shard1,
@@ -1489,8 +1406,7 @@ TEST_F(TransactionRouterTestWithDefaultSession, SnapshotErrorsResetAtClusterTime
BSONObj expectedReadConcern = BSON("level"
<< "snapshot"
- << "atClusterTime"
- << kInMemoryLogicalTime.asTimestamp());
+ << "atClusterTime" << kInMemoryLogicalTime.asTimestamp());
{
auto newCmd = txnRouter.attachTxnFieldsIfNeeded(operationContext(),
@@ -1516,8 +1432,7 @@ TEST_F(TransactionRouterTestWithDefaultSession, SnapshotErrorsResetAtClusterTime
expectedReadConcern = BSON("level"
<< "snapshot"
- << "atClusterTime"
- << laterTime.asTimestamp());
+ << "atClusterTime" << laterTime.asTimestamp());
{
auto newCmd = txnRouter.attachTxnFieldsIfNeeded(operationContext(),
@@ -1539,8 +1454,7 @@ TEST_F(TransactionRouterTestWithDefaultSession,
BSONObj expectedReadConcern = BSON("level"
<< "snapshot"
- << "atClusterTime"
- << kInMemoryLogicalTime.asTimestamp());
+ << "atClusterTime" << kInMemoryLogicalTime.asTimestamp());
{
auto newCmd = txnRouter.attachTxnFieldsIfNeeded(operationContext(),
@@ -1560,8 +1474,7 @@ TEST_F(TransactionRouterTestWithDefaultSession,
expectedReadConcern = BSON("level"
<< "snapshot"
- << "atClusterTime"
- << laterTimeSameStmt.asTimestamp());
+ << "atClusterTime" << laterTimeSameStmt.asTimestamp());
{
auto newCmd = txnRouter.attachTxnFieldsIfNeeded(operationContext(),
@@ -1835,8 +1748,7 @@ TEST_F(TransactionRouterTestWithDefaultSession,
BSONObj expectedReadConcern = BSON("level"
<< "snapshot"
- << "atClusterTime"
- << kInMemoryLogicalTime.asTimestamp());
+ << "atClusterTime" << kInMemoryLogicalTime.asTimestamp());
auto newCmd = txnRouter.attachTxnFieldsIfNeeded(operationContext(),
shard1,
@@ -2348,8 +2260,7 @@ TEST_F(TransactionRouterTestWithDefaultSession,
BSONObj expectedReadConcern = BSON("level"
<< "snapshot"
- << "atClusterTime"
- << kInMemoryLogicalTime.asTimestamp());
+ << "atClusterTime" << kInMemoryLogicalTime.asTimestamp());
auto newCmd = txnRouter.attachTxnFieldsIfNeeded(operationContext(),
shard1,
@@ -3194,12 +3105,10 @@ TEST_F(TransactionRouterMetricsTest, SlowLoggingPrintsTransactionParameters) {
BSONObjBuilder lsidBob;
getSessionId().serialize(&lsidBob);
- ASSERT_EQUALS(
- 1,
- countLogLinesContaining(str::stream() << "parameters:{ lsid: " << lsidBob.done().toString()
- << ", txnNumber: "
- << kTxnNumber
- << ", autocommit: false"));
+ ASSERT_EQUALS(1,
+ countLogLinesContaining(
+ str::stream() << "parameters:{ lsid: " << lsidBob.done().toString()
+ << ", txnNumber: " << kTxnNumber << ", autocommit: false"));
}
TEST_F(TransactionRouterMetricsTest, SlowLoggingPrintsDurationAtEnd) {
diff --git a/src/mongo/s/write_ops/batch_downconvert.cpp b/src/mongo/s/write_ops/batch_downconvert.cpp
index f313a01b8dd..323af2928c1 100644
--- a/src/mongo/s/write_ops/batch_downconvert.cpp
+++ b/src/mongo/s/write_ops/batch_downconvert.cpp
@@ -78,14 +78,11 @@ Status extractGLEErrors(const BSONObj& gleResponse, GLEErrors* errors) {
}
errors->wcError->setStatus({ErrorCodes::WriteConcernFailed, msg});
errors->wcError->setErrInfo(BSON("wtimeout" << true));
- } else if (code == 10990 /* no longer primary */
- ||
- code == 16805 /* replicatedToNum no longer primary */
- ||
- code == 14830 /* gle wmode changed / invalid */
+ } else if (code == 10990 /* no longer primary */
+ || code == 16805 /* replicatedToNum no longer primary */
+ || code == 14830 /* gle wmode changed / invalid */
// 2.6 Error codes
- ||
- code == ErrorCodes::NotMaster || code == ErrorCodes::UnknownReplWriteConcern ||
+ || code == ErrorCodes::NotMaster || code == ErrorCodes::UnknownReplWriteConcern ||
code == ErrorCodes::WriteConcernFailed || code == ErrorCodes::PrimarySteppedDown) {
// Write concern errors that get returned as regular errors (result may not be ok: 1.0)
errors->wcError.reset(new WriteConcernErrorDetail());
diff --git a/src/mongo/s/write_ops/batch_downconvert_test.cpp b/src/mongo/s/write_ops/batch_downconvert_test.cpp
index ca9a3cd34d9..a45e7ac1aaa 100644
--- a/src/mongo/s/write_ops/batch_downconvert_test.cpp
+++ b/src/mongo/s/write_ops/batch_downconvert_test.cpp
@@ -40,8 +40,8 @@
namespace {
using namespace mongo;
-using std::vector;
using std::deque;
+using std::vector;
//
// Tests for parsing GLE responses into write errors and write concern errors for write
@@ -205,14 +205,9 @@ TEST(LegacyGLESuppress, StripCode) {
TEST(LegacyGLESuppress, TimeoutDupError24) {
const BSONObj gleResponse = BSON("ok" << 0.0 << "err"
<< "message"
- << "code"
- << 12345
- << "err"
+ << "code" << 12345 << "err"
<< "timeout"
- << "code"
- << 56789
- << "wtimeout"
- << true);
+ << "code" << 56789 << "wtimeout" << true);
BSONObj stripped = stripNonWCInfo(gleResponse);
ASSERT_EQUALS(stripped.nFields(), 4);
@@ -221,4 +216,4 @@ TEST(LegacyGLESuppress, TimeoutDupError24) {
ASSERT_EQUALS(stripped["code"].numberInt(), 56789);
ASSERT(stripped["wtimeout"].trueValue());
}
-}
+} // namespace
diff --git a/src/mongo/s/write_ops/batch_write_exec.cpp b/src/mongo/s/write_ops/batch_write_exec.cpp
index 4412cd325ef..b06b0c1c63b 100644
--- a/src/mongo/s/write_ops/batch_write_exec.cpp
+++ b/src/mongo/s/write_ops/batch_write_exec.cpp
@@ -428,14 +428,9 @@ void BatchWriteExec::executeBatch(OperationContext* opCtx,
batchOp.abortBatch(errorFromStatus(
{ErrorCodes::NoProgressMade,
str::stream() << "no progress was made executing batch write op in "
- << clientRequest.getNS().ns()
- << " after "
- << kMaxRoundsWithoutProgress
- << " rounds ("
- << numCompletedOps
- << " ops completed in "
- << rounds
- << " rounds total)"}));
+ << clientRequest.getNS().ns() << " after "
+ << kMaxRoundsWithoutProgress << " rounds (" << numCompletedOps
+ << " ops completed in " << rounds << " rounds total)"}));
break;
}
}
@@ -469,4 +464,4 @@ const HostOpTimeMap& BatchWriteExecStats::getWriteOpTimes() const {
return _writeOpTimes;
}
-} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/write_ops/batch_write_op.cpp b/src/mongo/s/write_ops/batch_write_op.cpp
index c517f92a76f..b046db54ef1 100644
--- a/src/mongo/s/write_ops/batch_write_op.cpp
+++ b/src/mongo/s/write_ops/batch_write_op.cpp
@@ -42,9 +42,9 @@
namespace mongo {
-using std::unique_ptr;
using std::set;
using std::stringstream;
+using std::unique_ptr;
using std::vector;
namespace {
@@ -171,9 +171,9 @@ int getWriteSizeBytes(const WriteOp& writeOp) {
static const auto boolSize = 1;
// Add the size of the 'collation' field, if present.
- estSize +=
- !item.getUpdate().getCollation() ? 0 : (UpdateOpEntry::kCollationFieldName.size() +
- item.getUpdate().getCollation()->objsize());
+ estSize += !item.getUpdate().getCollation() ? 0
+ : (UpdateOpEntry::kCollationFieldName.size() +
+ item.getUpdate().getCollation()->objsize());
// Add the size of the 'arrayFilters' field, if present.
estSize += !item.getUpdate().getArrayFilters() ? 0 : ([&item]() {
@@ -209,9 +209,9 @@ int getWriteSizeBytes(const WriteOp& writeOp) {
static const auto intSize = 4;
// Add the size of the 'collation' field, if present.
- estSize +=
- !item.getDelete().getCollation() ? 0 : (DeleteOpEntry::kCollationFieldName.size() +
- item.getDelete().getCollation()->objsize());
+ estSize += !item.getDelete().getCollation() ? 0
+ : (DeleteOpEntry::kCollationFieldName.size() +
+ item.getDelete().getCollation()->objsize());
// Add the size of the 'limit' field.
estSize += DeleteOpEntry::kMultiFieldName.size() + intSize;
@@ -592,7 +592,7 @@ void BatchWriteOp::noteBatchResponse(const TargetedWriteBatch& targetedBatch,
vector<WriteErrorDetail*>::iterator itemErrorIt = itemErrors.begin();
int index = 0;
WriteErrorDetail* lastError = NULL;
- for (vector<TargetedWrite *>::const_iterator it = targetedBatch.getWrites().begin();
+ for (vector<TargetedWrite*>::const_iterator it = targetedBatch.getWrites().begin();
it != targetedBatch.getWrites().end();
++it, ++index) {
const TargetedWrite* write = *it;
@@ -766,9 +766,9 @@ void BatchWriteOp::buildClientResponse(BatchedCommandResponse* batchResp) {
// Generate the multi-error message below
if (_wcErrors.size() == 1) {
auto status = _wcErrors.front().error.toStatus();
- error->setStatus(
- status.withReason(str::stream() << status.reason() << " at "
- << _wcErrors.front().endpoint.shardName));
+ error->setStatus(status.withReason(str::stream()
+ << status.reason() << " at "
+ << _wcErrors.front().endpoint.shardName));
} else {
StringBuilder msg;
msg << "multiple errors reported : ";
diff --git a/src/mongo/s/write_ops/batched_command_request_test.cpp b/src/mongo/s/write_ops/batched_command_request_test.cpp
index 079960cf320..133d077dc1a 100644
--- a/src/mongo/s/write_ops/batched_command_request_test.cpp
+++ b/src/mongo/s/write_ops/batched_command_request_test.cpp
@@ -43,14 +43,9 @@ TEST(BatchedCommandRequest, BasicInsert) {
BSONObj origInsertRequestObj = BSON("insert"
<< "test"
- << "documents"
- << insertArray
- << "writeConcern"
- << BSON("w" << 1)
- << "ordered"
- << true
- << "allowImplicitCollectionCreation"
- << false);
+ << "documents" << insertArray << "writeConcern"
+ << BSON("w" << 1) << "ordered" << true
+ << "allowImplicitCollectionCreation" << false);
for (auto docSeq : {false, true}) {
const auto opMsgRequest(toOpMsg("TestDB", origInsertRequestObj, docSeq));
@@ -69,13 +64,8 @@ TEST(BatchedCommandRequest, InsertWithShardVersion) {
BSONObj origInsertRequestObj = BSON("insert"
<< "test"
- << "documents"
- << insertArray
- << "writeConcern"
- << BSON("w" << 1)
- << "ordered"
- << true
- << "shardVersion"
+ << "documents" << insertArray << "writeConcern"
+ << BSON("w" << 1) << "ordered" << true << "shardVersion"
<< BSON_ARRAY(Timestamp(1, 2) << epoch));
for (auto docSeq : {false, true}) {
diff --git a/src/mongo/s/write_ops/batched_command_response.cpp b/src/mongo/s/write_ops/batched_command_response.cpp
index 9ec01a62e61..cd40da6ae1b 100644
--- a/src/mongo/s/write_ops/batched_command_response.cpp
+++ b/src/mongo/s/write_ops/batched_command_response.cpp
@@ -40,8 +40,8 @@
namespace mongo {
-using std::unique_ptr;
using std::string;
+using std::unique_ptr;
using str::stream;
@@ -112,8 +112,8 @@ BSONObj BatchedCommandResponse::toBSON() const {
builder.appendOID(electionId(), const_cast<OID*>(&_electionId));
if (_writeErrorDetails.get()) {
- auto errorMessage =
- [ errorCount = size_t(0), errorSize = size_t(0) ](StringData rawMessage) mutable {
+ auto errorMessage = [errorCount = size_t(0),
+ errorSize = size_t(0)](StringData rawMessage) mutable {
// Start truncating error messages once both of these limits are exceeded.
constexpr size_t kErrorSizeTruncationMin = 1024 * 1024;
constexpr size_t kErrorCountTruncationMin = 2;
diff --git a/src/mongo/s/write_ops/batched_command_response_test.cpp b/src/mongo/s/write_ops/batched_command_response_test.cpp
index 09e2b7d0eed..726760554eb 100644
--- a/src/mongo/s/write_ops/batched_command_response_test.cpp
+++ b/src/mongo/s/write_ops/batched_command_response_test.cpp
@@ -58,17 +58,13 @@ TEST(BatchedCommandResponse, Basic) {
BSONObj writeConcernError(
BSON("code" << 8 << "codeName" << ErrorCodes::errorString(ErrorCodes::Error(8)) << "errmsg"
<< "norepl"
- << "errInfo"
- << BSON("a" << 1)));
+ << "errInfo" << BSON("a" << 1)));
BSONObj origResponseObj =
- BSON(BatchedCommandResponse::n(0) << "opTime" << mongo::Timestamp(1ULL)
- << BatchedCommandResponse::writeErrors()
- << writeErrorsArray
- << BatchedCommandResponse::writeConcernError()
- << writeConcernError
- << "ok"
- << 1.0);
+ BSON(BatchedCommandResponse::n(0)
+ << "opTime" << mongo::Timestamp(1ULL) << BatchedCommandResponse::writeErrors()
+ << writeErrorsArray << BatchedCommandResponse::writeConcernError() << writeConcernError
+ << "ok" << 1.0);
string errMsg;
BatchedCommandResponse response;
diff --git a/src/mongo/s/write_ops/chunk_manager_targeter.cpp b/src/mongo/s/write_ops/chunk_manager_targeter.cpp
index d723f59d70c..39bb70a734b 100644
--- a/src/mongo/s/write_ops/chunk_manager_targeter.cpp
+++ b/src/mongo/s/write_ops/chunk_manager_targeter.cpp
@@ -322,9 +322,9 @@ bool isMetadataDifferent(const std::shared_ptr<ChunkManager>& managerA,
}
/**
-* Whether or not the manager/primary pair was changed or refreshed from a previous version
-* of the metadata.
-*/
+ * Whether or not the manager/primary pair was changed or refreshed from a previous version
+ * of the metadata.
+ */
bool wasMetadataRefreshed(const std::shared_ptr<ChunkManager>& managerA,
const std::shared_ptr<Shard>& primaryA,
const std::shared_ptr<ChunkManager>& managerB,
@@ -456,8 +456,9 @@ StatusWith<std::vector<ShardEndpoint>> ChunkManagerTargeter::targetUpdate(
}
// Utility function to target an update by shard key, and to handle any potential error results.
- const auto targetByShardKey = [&collation, this](
- StatusWith<BSONObj> shardKey, StringData msg) -> StatusWith<std::vector<ShardEndpoint>> {
+ const auto targetByShardKey = [&collation,
+ this](StatusWith<BSONObj> shardKey,
+ StringData msg) -> StatusWith<std::vector<ShardEndpoint>> {
if (!shardKey.isOK()) {
return shardKey.getStatus().withContext(msg);
}
@@ -505,10 +506,8 @@ StatusWith<std::vector<ShardEndpoint>> ChunkManagerTargeter::targetUpdate(
"collation) or must target a single shard (and have the simple "
"collation), but this update targeted "
<< shardEndPoints.getValue().size()
- << " shards. Update request: "
- << updateDoc.toBSON()
- << ", shard key pattern: "
- << shardKeyPattern.toString()};
+ << " shards. Update request: " << updateDoc.toBSON()
+ << ", shard key pattern: " << shardKeyPattern.toString()};
}
// If the request is {multi:false}, then this is a single op-style update which we are
@@ -567,8 +566,8 @@ StatusWith<std::vector<ShardEndpoint>> ChunkManagerTargeter::targetDelete(
ExtensionsCallbackNoop(),
MatchExpressionParser::kAllowAllSpecialFeatures);
if (!cq.isOK()) {
- return cq.getStatus().withContext(str::stream() << "Could not parse delete query "
- << deleteDoc.getQ());
+ return cq.getStatus().withContext(str::stream()
+ << "Could not parse delete query " << deleteDoc.getQ());
}
// Single deletes must target a single shard or be exact-ID.
@@ -580,8 +579,7 @@ StatusWith<std::vector<ShardEndpoint>> ChunkManagerTargeter::targetDelete(
"match on _id (and have the collection default collation) or "
"contain the shard key (and have the simple collation). Delete "
"request: "
- << deleteDoc.toBSON()
- << ", shard key pattern: "
+ << deleteDoc.toBSON() << ", shard key pattern: "
<< _routingInfo->cm()->getShardKeyPattern().toString());
}