summaryrefslogtreecommitdiff
path: root/src/mongo/s/catalog
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/s/catalog')
-rw-r--r--src/mongo/s/catalog/dist_lock_catalog_mock.cpp37
-rw-r--r--src/mongo/s/catalog/dist_lock_manager_mock.cpp24
-rw-r--r--src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp82
-rw-r--r--src/mongo/s/catalog/replset/catalog_manager_replica_set_add_shard_test.cpp42
-rw-r--r--src/mongo/s/catalog/replset/catalog_manager_replica_set_shard_collection_test.cpp20
-rw-r--r--src/mongo/s/catalog/replset/catalog_manager_replica_set_test.cpp116
-rw-r--r--src/mongo/s/catalog/replset/catalog_manager_replica_set_upgrade_test.cpp7
-rw-r--r--src/mongo/s/catalog/replset/dist_lock_catalog_impl.cpp30
-rw-r--r--src/mongo/s/catalog/replset/dist_lock_catalog_impl_test.cpp79
-rw-r--r--src/mongo/s/catalog/replset/replset_dist_lock_manager.cpp4
-rw-r--r--src/mongo/s/catalog/replset/replset_dist_lock_manager_test.cpp115
-rw-r--r--src/mongo/s/catalog/type_changelog_test.cpp82
-rw-r--r--src/mongo/s/catalog/type_chunk_test.cpp100
-rw-r--r--src/mongo/s/catalog/type_collection_test.cpp37
-rw-r--r--src/mongo/s/catalog/type_config_version_test.cpp8
-rw-r--r--src/mongo/s/catalog/type_locks_test.cpp94
-rw-r--r--src/mongo/s/catalog/type_mongos_test.cpp57
-rw-r--r--src/mongo/s/catalog/type_shard_test.cpp3
18 files changed, 556 insertions, 381 deletions
diff --git a/src/mongo/s/catalog/dist_lock_catalog_mock.cpp b/src/mongo/s/catalog/dist_lock_catalog_mock.cpp
index 4f19df96a47..db7d3151d6e 100644
--- a/src/mongo/s/catalog/dist_lock_catalog_mock.cpp
+++ b/src/mongo/s/catalog/dist_lock_catalog_mock.cpp
@@ -52,8 +52,14 @@ void noGrabLockFuncSet(StringData lockID,
Date_t time,
StringData why) {
FAIL(str::stream() << "grabLock not expected to be called. "
- << "lockID: " << lockID << ", who: " << who << ", processId: " << processId
- << ", why: " << why);
+ << "lockID: "
+ << lockID
+ << ", who: "
+ << who
+ << ", processId: "
+ << processId
+ << ", why: "
+ << why);
}
void noOvertakeLockFuncSet(StringData lockID,
@@ -64,13 +70,22 @@ void noOvertakeLockFuncSet(StringData lockID,
Date_t time,
StringData why) {
FAIL(str::stream() << "overtakeLock not expected to be called. "
- << "lockID: " << lockID << ", currentHolderTS: " << currentHolderTS
- << ", who: " << who << ", processId: " << processId << ", why: " << why);
+ << "lockID: "
+ << lockID
+ << ", currentHolderTS: "
+ << currentHolderTS
+ << ", who: "
+ << who
+ << ", processId: "
+ << processId
+ << ", why: "
+ << why);
}
void noUnLockFuncSet(const OID& lockSessionID) {
FAIL(str::stream() << "unlock not expected to be called. "
- << "lockSessionID: " << lockSessionID);
+ << "lockSessionID: "
+ << lockSessionID);
}
void noPingFuncSet(StringData processID, Date_t ping) {
@@ -79,22 +94,26 @@ void noPingFuncSet(StringData processID, Date_t ping) {
void noStopPingFuncSet(StringData processID) {
FAIL(str::stream() << "stopPing not expected to be called. "
- << "processID: " << processID);
+ << "processID: "
+ << processID);
}
void noGetLockByTSSet(const OID& lockSessionID) {
FAIL(str::stream() << "getLockByTS not expected to be called. "
- << "lockSessionID: " << lockSessionID);
+ << "lockSessionID: "
+ << lockSessionID);
}
void noGetLockByNameSet(StringData name) {
FAIL(str::stream() << "getLockByName not expected to be called. "
- << "lockName: " << name);
+ << "lockName: "
+ << name);
}
void noGetPingSet(StringData processId) {
FAIL(str::stream() << "getPing not expected to be called. "
- << "lockName: " << processId);
+ << "lockName: "
+ << processId);
}
void noGetServerInfoSet() {
diff --git a/src/mongo/s/catalog/dist_lock_manager_mock.cpp b/src/mongo/s/catalog/dist_lock_manager_mock.cpp
index 7c2c58e83ed..9325caf29d7 100644
--- a/src/mongo/s/catalog/dist_lock_manager_mock.cpp
+++ b/src/mongo/s/catalog/dist_lock_manager_mock.cpp
@@ -34,9 +34,9 @@
#include <algorithm>
+#include "mongo/unittest/unittest.h"
#include "mongo/util/mongoutils/str.h"
#include "mongo/util/time_support.h"
-#include "mongo/unittest/unittest.h"
namespace mongo {
@@ -47,8 +47,14 @@ void NoLockFuncSet(StringData name,
Milliseconds waitFor,
Milliseconds lockTryInterval) {
FAIL(str::stream() << "Lock not expected to be called. "
- << "Name: " << name << ", whyMessage: " << whyMessage
- << ", waitFor: " << waitFor << ", lockTryInterval: " << lockTryInterval);
+ << "Name: "
+ << name
+ << ", whyMessage: "
+ << whyMessage
+ << ", waitFor: "
+ << waitFor
+ << ", lockTryInterval: "
+ << lockTryInterval);
}
} // namespace
@@ -90,9 +96,9 @@ StatusWith<DistLockManager::ScopedDistLock> DistLockManagerMock::lockWithSession
return _lockReturnStatus;
}
- if (_locks.end() != std::find_if(_locks.begin(),
- _locks.end(),
- [name](LockInfo info) -> bool { return info.name == name; })) {
+ if (_locks.end() != std::find_if(_locks.begin(), _locks.end(), [name](LockInfo info) -> bool {
+ return info.name == name;
+ })) {
return Status(ErrorCodes::LockBusy,
str::stream() << "Lock \"" << name << "\" is already taken");
}
@@ -111,9 +117,9 @@ void DistLockManagerMock::unlockAll(OperationContext* txn, const std::string& pr
void DistLockManagerMock::unlock(OperationContext* txn, const DistLockHandle& lockHandle) {
std::vector<LockInfo>::iterator it =
- std::find_if(_locks.begin(),
- _locks.end(),
- [&lockHandle](LockInfo info) -> bool { return info.lockID == lockHandle; });
+ std::find_if(_locks.begin(), _locks.end(), [&lockHandle](LockInfo info) -> bool {
+ return info.lockID == lockHandle;
+ });
if (it == _locks.end()) {
return;
}
diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp
index f5e1522eed6..2db57413984 100644
--- a/src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp
+++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp
@@ -56,16 +56,16 @@
#include "mongo/s/catalog/config_server_version.h"
#include "mongo/s/catalog/dist_lock_manager.h"
#include "mongo/s/catalog/type_changelog.h"
+#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog/type_collection.h"
#include "mongo/s/catalog/type_config_version.h"
-#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog/type_database.h"
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/catalog/type_tags.h"
+#include "mongo/s/chunk_manager.h"
#include "mongo/s/client/shard.h"
#include "mongo/s/client/shard_connection.h"
#include "mongo/s/client/shard_registry.h"
-#include "mongo/s/chunk_manager.h"
#include "mongo/s/config.h"
#include "mongo/s/grid.h"
#include "mongo/s/set_shard_version_request.h"
@@ -188,7 +188,9 @@ StatusWith<ShardType> CatalogManagerReplicaSet::_validateHostAsShard(
return {ErrorCodes::OperationFailed,
str::stream() << "'" << hostAndPort.toString() << "' "
<< "is already a member of the existing shard '"
- << shard->getConnString().toString() << "' (" << shard->getId()
+ << shard->getConnString().toString()
+ << "' ("
+ << shard->getId()
<< ")."};
}
}
@@ -231,7 +233,8 @@ StatusWith<ShardType> CatalogManagerReplicaSet::_validateHostAsShard(
str::stream() << "isMaster returned invalid 'ismaster' "
<< "field when attempting to add "
<< connectionString.toString()
- << " as a shard: " << status.reason());
+ << " as a shard: "
+ << status.reason());
}
if (!isMaster) {
return {ErrorCodes::NotMaster,
@@ -255,7 +258,8 @@ StatusWith<ShardType> CatalogManagerReplicaSet::_validateHostAsShard(
if (!providedSetName.empty() && foundSetName.empty()) {
return {ErrorCodes::OperationFailed,
str::stream() << "host did not return a set name; "
- << "is the replica set still initializing? " << resIsMaster};
+ << "is the replica set still initializing? "
+ << resIsMaster};
}
// Make sure the set name specified in the connection string matches the one where its hosts
@@ -263,7 +267,8 @@ StatusWith<ShardType> CatalogManagerReplicaSet::_validateHostAsShard(
if (!providedSetName.empty() && (providedSetName != foundSetName)) {
return {ErrorCodes::OperationFailed,
str::stream() << "the provided connection string (" << connectionString.toString()
- << ") does not match the actual set name " << foundSetName};
+ << ") does not match the actual set name "
+ << foundSetName};
}
// Is it a config server?
@@ -304,8 +309,11 @@ StatusWith<ShardType> CatalogManagerReplicaSet::_validateHostAsShard(
if (hostSet.find(host) == hostSet.end()) {
return {ErrorCodes::OperationFailed,
str::stream() << "in seed list " << connectionString.toString() << ", host "
- << host << " does not belong to replica set " << foundSetName
- << "; found " << resIsMaster.toString()};
+ << host
+ << " does not belong to replica set "
+ << foundSetName
+ << "; found "
+ << resIsMaster.toString()};
}
}
}
@@ -417,8 +425,7 @@ StatusWith<BSONObj> CatalogManagerReplicaSet::_runCommandForAddShard(
Status(ErrorCodes::InternalError, "Internal error running command");
auto callStatus = _executorForAddShard->scheduleRemoteCommand(
- request,
- [&responseStatus](const executor::TaskExecutor::RemoteCommandCallbackArgs& args) {
+ request, [&responseStatus](const executor::TaskExecutor::RemoteCommandCallbackArgs& args) {
responseStatus = args.response;
});
if (!callStatus.isOK()) {
@@ -465,9 +472,13 @@ StatusWith<string> CatalogManagerReplicaSet::addShard(OperationContext* txn,
const auto& dbDoc = dbt.getValue().value;
return Status(ErrorCodes::OperationFailed,
str::stream() << "can't add shard "
- << "'" << shardConnectionString.toString() << "'"
- << " because a local database '" << dbName
- << "' exists in another " << dbDoc.getPrimary());
+ << "'"
+ << shardConnectionString.toString()
+ << "'"
+ << " because a local database '"
+ << dbName
+ << "' exists in another "
+ << dbDoc.getPrimary());
} else if (dbt != ErrorCodes::NamespaceNotFound) {
return dbt.getStatus();
}
@@ -807,7 +818,8 @@ Status CatalogManagerReplicaSet::shardCollection(OperationContext* txn,
if (countStatus.getValue() > 0) {
return Status(ErrorCodes::AlreadyInitialized,
str::stream() << "collection " << ns << " already sharded with "
- << countStatus.getValue() << " chunks.");
+ << countStatus.getValue()
+ << " chunks.");
}
}
@@ -1094,7 +1106,9 @@ Status CatalogManagerReplicaSet::getCollections(OperationContext* txn,
collections->clear();
return {ErrorCodes::FailedToParse,
str::stream() << "error while parsing " << CollectionType::ConfigNS
- << " document: " << obj << " : "
+ << " document: "
+ << obj
+ << " : "
<< collectionResult.getStatus().toString()};
}
@@ -1334,7 +1348,8 @@ Status CatalogManagerReplicaSet::getChunks(OperationContext* txn,
return {ErrorCodes::FailedToParse,
stream() << "Failed to parse chunk with id ("
<< obj[ChunkType::name()].toString()
- << "): " << chunkRes.getStatus().toString()};
+ << "): "
+ << chunkRes.getStatus().toString()};
}
chunks->push_back(chunkRes.getValue());
@@ -1366,8 +1381,8 @@ Status CatalogManagerReplicaSet::getTagsForCollection(OperationContext* txn,
if (!tagRes.isOK()) {
tags->clear();
return Status(ErrorCodes::FailedToParse,
- str::stream()
- << "Failed to parse tag: " << tagRes.getStatus().toString());
+ str::stream() << "Failed to parse tag: "
+ << tagRes.getStatus().toString());
}
tags->push_back(tagRes.getValue());
@@ -1381,7 +1396,8 @@ StatusWith<string> CatalogManagerReplicaSet::getTagForChunk(OperationContext* tx
const ChunkType& chunk) {
BSONObj query =
BSON(TagsType::ns(collectionNs) << TagsType::min() << BSON("$lte" << chunk.getMin())
- << TagsType::max() << BSON("$gte" << chunk.getMax()));
+ << TagsType::max()
+ << BSON("$gte" << chunk.getMax()));
auto findStatus = _exhaustiveFindOnConfig(
txn, kConfigReadSelector, NamespaceString(TagsType::ConfigNS), query, BSONObj(), 1);
if (!findStatus.isOK()) {
@@ -1400,7 +1416,8 @@ StatusWith<string> CatalogManagerReplicaSet::getTagForChunk(OperationContext* tx
if (!tagsResult.isOK()) {
return {ErrorCodes::FailedToParse,
stream() << "error while parsing " << TagsType::ConfigNS << " document: " << tagsDoc
- << " : " << tagsResult.getStatus().toString()};
+ << " : "
+ << tagsResult.getStatus().toString()};
}
return tagsResult.getValue().getTag();
}
@@ -1424,7 +1441,8 @@ StatusWith<repl::OpTimeWith<std::vector<ShardType>>> CatalogManagerReplicaSet::g
shards.clear();
return {ErrorCodes::FailedToParse,
stream() << "Failed to parse shard with id ("
- << doc[ShardType::name()].toString() << ")"
+ << doc[ShardType::name()].toString()
+ << ")"
<< causedBy(shardRes.getStatus())};
}
@@ -1432,7 +1450,8 @@ StatusWith<repl::OpTimeWith<std::vector<ShardType>>> CatalogManagerReplicaSet::g
if (!validateStatus.isOK()) {
return {validateStatus.code(),
stream() << "Failed to validate shard with id ("
- << doc[ShardType::name()].toString() << ")"
+ << doc[ShardType::name()].toString()
+ << ")"
<< causedBy(validateStatus)};
}
@@ -1550,8 +1569,9 @@ Status CatalogManagerReplicaSet::applyChunkOpsDeprecated(OperationContext* txn,
const BSONArray& preCondition,
const std::string& nss,
const ChunkVersion& lastChunkVersion) {
- BSONObj cmd = BSON("applyOps" << updateOps << "preCondition" << preCondition
- << kWriteConcernField << kMajorityWriteConcern.toBSON());
+ BSONObj cmd =
+ BSON("applyOps" << updateOps << "preCondition" << preCondition << kWriteConcernField
+ << kMajorityWriteConcern.toBSON());
auto response = Grid::get(txn)->shardRegistry()->getConfigShard()->runCommand(
txn,
@@ -1631,7 +1651,8 @@ void CatalogManagerReplicaSet::writeConfigServerDirect(OperationContext* txn,
if (batchRequest.sizeWriteOps() != 1) {
toBatchError(Status(ErrorCodes::InvalidOptions,
str::stream() << "Writes to config servers must have batch size of 1, "
- << "found " << batchRequest.sizeWriteOps()),
+ << "found "
+ << batchRequest.sizeWriteOps()),
batchResponse);
return;
}
@@ -1846,7 +1867,10 @@ Status CatalogManagerReplicaSet::_checkDbDoesNotExist(OperationContext* txn,
return Status(ErrorCodes::DatabaseDifferCase,
str::stream() << "can't have 2 databases that just differ on case "
- << " have: " << actualDbName << " want to add: " << dbName);
+ << " have: "
+ << actualDbName
+ << " want to add: "
+ << dbName);
}
StatusWith<std::string> CatalogManagerReplicaSet::_generateNewShardName(OperationContext* txn) {
@@ -1997,7 +2021,8 @@ Status CatalogManagerReplicaSet::initConfigVersion(OperationContext* txn) {
if (versionInfo.getCurrentVersion() < CURRENT_CONFIG_VERSION) {
return {ErrorCodes::IncompatibleShardingConfigVersion,
str::stream() << "need to upgrade current cluster version to v"
- << CURRENT_CONFIG_VERSION << "; currently at v"
+ << CURRENT_CONFIG_VERSION
+ << "; currently at v"
<< versionInfo.getCurrentVersion()};
}
@@ -2006,7 +2031,8 @@ Status CatalogManagerReplicaSet::initConfigVersion(OperationContext* txn) {
return {ErrorCodes::IncompatibleShardingConfigVersion,
str::stream() << "unable to create new config version document after "
- << kMaxConfigVersionInitRetry << " retries"};
+ << kMaxConfigVersionInitRetry
+ << " retries"};
}
StatusWith<VersionType> CatalogManagerReplicaSet::_getConfigVersion(OperationContext* txn) {
diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set_add_shard_test.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set_add_shard_test.cpp
index 5b877f60f74..4bbdb9bcebd 100644
--- a/src/mongo/s/catalog/replset/catalog_manager_replica_set_add_shard_test.cpp
+++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set_add_shard_test.cpp
@@ -234,13 +234,16 @@ TEST_F(AddShardTest, Standalone) {
expectListDatabases(shardTarget,
std::vector<BSONObj>{BSON("name"
<< "local"
- << "sizeOnDisk" << 1000),
+ << "sizeOnDisk"
+ << 1000),
BSON("name"
<< "TestDB1"
- << "sizeOnDisk" << 2000),
+ << "sizeOnDisk"
+ << 2000),
BSON("name"
<< "TestDB2"
- << "sizeOnDisk" << 5000)});
+ << "sizeOnDisk"
+ << 5000)});
// Make sure the shard add code checks for the presence of each of the two databases we returned
// in the previous call, in the config server metadata
@@ -309,13 +312,16 @@ TEST_F(AddShardTest, StandaloneGenerateName) {
expectListDatabases(shardTarget,
std::vector<BSONObj>{BSON("name"
<< "local"
- << "sizeOnDisk" << 1000),
+ << "sizeOnDisk"
+ << 1000),
BSON("name"
<< "TestDB1"
- << "sizeOnDisk" << 2000),
+ << "sizeOnDisk"
+ << 2000),
BSON("name"
<< "TestDB2"
- << "sizeOnDisk" << 5000)});
+ << "sizeOnDisk"
+ << 5000)});
// Make sure the shard add code checks for the presence of each of the two databases we returned
// in the previous call, in the config server metadata
@@ -629,7 +635,8 @@ TEST_F(AddShardTest, ShardIsCSRSConfigServer) {
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "config"
- << "configsvr" << true);
+ << "configsvr"
+ << true);
expectValidationCheck(shardTarget, commandResponse);
future.timed_get(kFutureTimeout);
@@ -660,7 +667,8 @@ TEST_F(AddShardTest, ReplicaSetMissingHostsProvidedInSeedList) {
hosts.append("host1:12345");
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "mySet"
- << "hosts" << hosts.arr());
+ << "hosts"
+ << hosts.arr());
expectValidationCheck(shardTarget, commandResponse);
future.timed_get(kFutureTimeout);
@@ -692,7 +700,8 @@ TEST_F(AddShardTest, ShardNameIsConfig) {
hosts.append("host2:12345");
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "mySet"
- << "hosts" << hosts.arr());
+ << "hosts"
+ << hosts.arr());
expectValidationCheck(shardTarget, commandResponse);
future.timed_get(kFutureTimeout);
@@ -724,7 +733,8 @@ TEST_F(AddShardTest, ShardContainsExistingDatabase) {
hosts.append("host2:12345");
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "mySet"
- << "hosts" << hosts.arr());
+ << "hosts"
+ << hosts.arr());
expectValidationCheck(shardTarget, commandResponse);
expectListDatabases(shardTarget,
@@ -764,7 +774,8 @@ TEST_F(AddShardTest, ReAddExistingShard) {
hosts.append("host2:12345");
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "mySet"
- << "hosts" << hosts.arr());
+ << "hosts"
+ << hosts.arr());
expectValidationCheck(shardTarget, commandResponse);
expectListDatabases(shardTarget,
@@ -828,7 +839,8 @@ TEST_F(AddShardTest, SuccessfullyAddReplicaSet) {
hosts.append("host2:12345");
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "mySet"
- << "hosts" << hosts.arr());
+ << "hosts"
+ << hosts.arr());
expectValidationCheck(shardTarget, commandResponse);
expectListDatabases(shardTarget,
@@ -884,7 +896,8 @@ TEST_F(AddShardTest, AddShardSucceedsEvenIfAddingDBsFromNewShardFails) {
hosts.append("host2:12345");
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "mySet"
- << "hosts" << hosts.arr());
+ << "hosts"
+ << hosts.arr());
expectValidationCheck(shardTarget, commandResponse);
expectListDatabases(shardTarget,
@@ -964,7 +977,8 @@ TEST_F(AddShardTest, ReplicaSetExtraHostsDiscovered) {
hosts.append("host2:12345");
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "mySet"
- << "hosts" << hosts.arr());
+ << "hosts"
+ << hosts.arr());
expectValidationCheck(shardTarget, commandResponse);
expectListDatabases(shardTarget, {});
diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set_shard_collection_test.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set_shard_collection_test.cpp
index 7e4fdb8330c..901f12798ea 100644
--- a/src/mongo/s/catalog/replset/catalog_manager_replica_set_shard_collection_test.cpp
+++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set_shard_collection_test.cpp
@@ -340,8 +340,11 @@ TEST_F(ShardCollectionTest, noInitialChunksOrData) {
{
BSONObj logChangeDetail =
BSON("shardKey" << keyPattern.toBSON() << "collection" << ns << "primary"
- << shard.getName() + ":" + shard.getHost() << "initShards"
- << BSONArray() << "numChunks" << 1);
+ << shard.getName() + ":" + shard.getHost()
+ << "initShards"
+ << BSONArray()
+ << "numChunks"
+ << 1);
expectChangeLogCreate(configHost, BSON("ok" << 1));
expectChangeLogInsert(
configHost, network()->now(), "shardCollection.start", ns, logChangeDetail);
@@ -511,9 +514,11 @@ TEST_F(ShardCollectionTest, withInitialChunks) {
{
BSONObj logChangeDetail =
BSON("shardKey" << keyPattern.toBSON() << "collection" << ns << "primary"
- << shard0.getName() + ":" + shard0.getHost() << "initShards"
+ << shard0.getName() + ":" + shard0.getHost()
+ << "initShards"
<< BSON_ARRAY(shard0.getName() << shard1.getName() << shard2.getName())
- << "numChunks" << (int)expectedChunks.size());
+ << "numChunks"
+ << (int)expectedChunks.size());
expectChangeLogCreate(configHost, BSON("ok" << 1));
expectChangeLogInsert(
configHost, network()->now(), "shardCollection.start", ns, logChangeDetail);
@@ -655,8 +660,11 @@ TEST_F(ShardCollectionTest, withInitialData) {
{
BSONObj logChangeDetail =
BSON("shardKey" << keyPattern.toBSON() << "collection" << ns << "primary"
- << shard.getName() + ":" + shard.getHost() << "initShards"
- << BSONArray() << "numChunks" << 1);
+ << shard.getName() + ":" + shard.getHost()
+ << "initShards"
+ << BSONArray()
+ << "numChunks"
+ << 1);
expectChangeLogCreate(configHost, BSON("ok" << 1));
expectChangeLogInsert(
configHost, network()->now(), "shardCollection.start", ns, logChangeDetail);
diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set_test.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set_test.cpp
index c539ee6ad5d..574a309c63b 100644
--- a/src/mongo/s/catalog/replset/catalog_manager_replica_set_test.cpp
+++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set_test.cpp
@@ -32,8 +32,8 @@
#include <pcrecpp.h>
-#include "mongo/client/remote_command_targeter_mock.h"
#include "mongo/bson/json.h"
+#include "mongo/client/remote_command_targeter_mock.h"
#include "mongo/db/commands.h"
#include "mongo/db/query/lite_parsed_query.h"
#include "mongo/db/repl/read_concern_args.h"
@@ -658,9 +658,13 @@ TEST_F(CatalogManagerReplSetTest, RunUserManagementWriteCommandSuccess) {
// Since no write concern was sent we will add w:majority
ASSERT_EQUALS(BSON("dropUser"
<< "test"
- << "writeConcern" << BSON("w"
- << "majority"
- << "wtimeout" << 0) << "maxTimeMS" << 30000),
+ << "writeConcern"
+ << BSON("w"
+ << "majority"
+ << "wtimeout"
+ << 0)
+ << "maxTimeMS"
+ << 30000),
request.cmdObj);
ASSERT_EQUALS(BSON(rpc::kReplSetMetadataFieldName << 1), request.metadata);
@@ -679,14 +683,14 @@ TEST_F(CatalogManagerReplSetTest, RunUserManagementWriteCommandInvalidWriteConce
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
BSONObjBuilder responseBuilder;
- bool ok =
- catalogManager()->runUserManagementWriteCommand(operationContext(),
- "dropUser",
- "test",
- BSON("dropUser"
- << "test"
- << "writeConcern" << BSON("w" << 2)),
- &responseBuilder);
+ bool ok = catalogManager()->runUserManagementWriteCommand(operationContext(),
+ "dropUser",
+ "test",
+ BSON("dropUser"
+ << "test"
+ << "writeConcern"
+ << BSON("w" << 2)),
+ &responseBuilder);
ASSERT_FALSE(ok);
Status commandStatus = getStatusFromCommandResult(responseBuilder.obj());
@@ -708,31 +712,35 @@ TEST_F(CatalogManagerReplSetTest, RunUserManagementWriteCommandRewriteWriteConce
},
Status::OK());
- auto future =
- launchAsync([this] {
- BSONObjBuilder responseBuilder;
- bool ok =
- catalogManager()->runUserManagementWriteCommand(
- operationContext(),
- "dropUser",
- "test",
- BSON("dropUser"
- << "test"
- << "writeConcern" << BSON("w" << 1 << "wtimeout" << 30)),
- &responseBuilder);
- ASSERT_FALSE(ok);
-
- Status commandStatus = getStatusFromCommandResult(responseBuilder.obj());
- ASSERT_EQUALS(ErrorCodes::UserNotFound, commandStatus);
- });
+ auto future = launchAsync([this] {
+ BSONObjBuilder responseBuilder;
+ bool ok =
+ catalogManager()->runUserManagementWriteCommand(operationContext(),
+ "dropUser",
+ "test",
+ BSON("dropUser"
+ << "test"
+ << "writeConcern"
+ << BSON("w" << 1 << "wtimeout"
+ << 30)),
+ &responseBuilder);
+ ASSERT_FALSE(ok);
+
+ Status commandStatus = getStatusFromCommandResult(responseBuilder.obj());
+ ASSERT_EQUALS(ErrorCodes::UserNotFound, commandStatus);
+ });
onCommand([](const RemoteCommandRequest& request) {
ASSERT_EQUALS("test", request.dbname);
ASSERT_EQUALS(BSON("dropUser"
<< "test"
- << "writeConcern" << BSON("w"
- << "majority"
- << "wtimeout" << 30) << "maxTimeMS" << 30000),
+ << "writeConcern"
+ << BSON("w"
+ << "majority"
+ << "wtimeout"
+ << 30)
+ << "maxTimeMS"
+ << 30000),
request.cmdObj);
ASSERT_EQUALS(BSON(rpc::kReplSetMetadataFieldName << 1), request.metadata);
@@ -815,9 +823,13 @@ TEST_F(CatalogManagerReplSetTest, RunUserManagementWriteCommandNotMasterRetrySuc
// Since no write concern was sent we will add w:majority
ASSERT_EQUALS(BSON("dropUser"
<< "test"
- << "writeConcern" << BSON("w"
- << "majority"
- << "wtimeout" << 0) << "maxTimeMS" << 30000),
+ << "writeConcern"
+ << BSON("w"
+ << "majority"
+ << "wtimeout"
+ << 0)
+ << "maxTimeMS"
+ << 30000),
request.cmdObj);
ASSERT_EQUALS(BSON(rpc::kReplSetMetadataFieldName << 1), request.metadata);
@@ -1193,9 +1205,10 @@ TEST_F(CatalogManagerReplSetTest, GetTagForChunkOneTagFound) {
ASSERT_EQ(query->ns(), TagsType::ConfigNS);
ASSERT_EQ(query->getFilter(),
- BSON(TagsType::ns(chunk.getNS())
- << TagsType::min() << BSON("$lte" << chunk.getMin()) << TagsType::max()
- << BSON("$gte" << chunk.getMax())));
+ BSON(TagsType::ns(chunk.getNS()) << TagsType::min()
+ << BSON("$lte" << chunk.getMin())
+ << TagsType::max()
+ << BSON("$gte" << chunk.getMax())));
checkReadConcern(request.cmdObj, Timestamp(0, 0), repl::OpTime::kUninitializedTerm);
@@ -1237,9 +1250,10 @@ TEST_F(CatalogManagerReplSetTest, GetTagForChunkNoTagFound) {
ASSERT_EQ(query->ns(), TagsType::ConfigNS);
ASSERT_EQ(query->getFilter(),
- BSON(TagsType::ns(chunk.getNS())
- << TagsType::min() << BSON("$lte" << chunk.getMin()) << TagsType::max()
- << BSON("$gte" << chunk.getMax())));
+ BSON(TagsType::ns(chunk.getNS()) << TagsType::min()
+ << BSON("$lte" << chunk.getMin())
+ << TagsType::max()
+ << BSON("$gte" << chunk.getMax())));
checkReadConcern(request.cmdObj, Timestamp(0, 0), repl::OpTime::kUninitializedTerm);
@@ -1278,9 +1292,10 @@ TEST_F(CatalogManagerReplSetTest, GetTagForChunkInvalidTagDoc) {
ASSERT_EQ(query->ns(), TagsType::ConfigNS);
ASSERT_EQ(query->getFilter(),
- BSON(TagsType::ns(chunk.getNS())
- << TagsType::min() << BSON("$lte" << chunk.getMin()) << TagsType::max()
- << BSON("$gte" << chunk.getMax())));
+ BSON(TagsType::ns(chunk.getNS()) << TagsType::min()
+ << BSON("$lte" << chunk.getMin())
+ << TagsType::max()
+ << BSON("$gte" << chunk.getMax())));
checkReadConcern(request.cmdObj, Timestamp(0, 0), repl::OpTime::kUninitializedTerm);
@@ -1388,7 +1403,8 @@ TEST_F(CatalogManagerReplSetTest, ApplyChunkOpsDeprecatedSuccessful) {
ASSERT_EQUALS("config", request.dbname);
ASSERT_EQUALS(BSON("w"
<< "majority"
- << "wtimeout" << 15000),
+ << "wtimeout"
+ << 15000),
request.cmdObj["writeConcern"].Obj());
ASSERT_EQUALS(BSON(rpc::kReplSetMetadataFieldName << 1), request.metadata);
ASSERT_EQUALS(updateOps, request.cmdObj["applyOps"].Obj());
@@ -1925,10 +1941,12 @@ TEST_F(CatalogManagerReplSetTest, EnableShardingNoDBExists) {
shardRegistry()->getShard(operationContext(), "shard0")->getTargeter());
shardTargeter->setFindHostReturnValue(HostAndPort("shard0:12"));
- distLock()->expectLock([](StringData name, StringData whyMessage, Milliseconds, Milliseconds) {
- ASSERT_EQ("test", name);
- ASSERT_FALSE(whyMessage.empty());
- }, Status::OK());
+ distLock()->expectLock(
+ [](StringData name, StringData whyMessage, Milliseconds, Milliseconds) {
+ ASSERT_EQ("test", name);
+ ASSERT_FALSE(whyMessage.empty());
+ },
+ Status::OK());
auto future = launchAsync([this] {
auto status = catalogManager()->enableSharding(operationContext(), "test");
diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set_upgrade_test.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set_upgrade_test.cpp
index c8e2a975a58..43946d897c2 100644
--- a/src/mongo/s/catalog/replset/catalog_manager_replica_set_upgrade_test.cpp
+++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set_upgrade_test.cpp
@@ -76,8 +76,11 @@ TEST_F(CatalogManagerReplSetTestFixture, UpgradeNotNeeded) {
checkReadConcern(findCmd, Timestamp(0, 0), repl::OpTime::kUninitializedTerm);
BSONObj versionDoc(BSON("_id" << 1 << "minCompatibleVersion"
- << MIN_COMPATIBLE_CONFIG_VERSION << "currentVersion"
- << CURRENT_CONFIG_VERSION << "clusterId" << OID::gen()));
+ << MIN_COMPATIBLE_CONFIG_VERSION
+ << "currentVersion"
+ << CURRENT_CONFIG_VERSION
+ << "clusterId"
+ << OID::gen()));
return vector<BSONObj>{versionDoc};
});
diff --git a/src/mongo/s/catalog/replset/dist_lock_catalog_impl.cpp b/src/mongo/s/catalog/replset/dist_lock_catalog_impl.cpp
index cbdef7a2761..dd9a51c34bc 100644
--- a/src/mongo/s/catalog/replset/dist_lock_catalog_impl.cpp
+++ b/src/mongo/s/catalog/replset/dist_lock_catalog_impl.cpp
@@ -98,7 +98,8 @@ StatusWith<BSONObj> extractFindAndModifyNewObj(StatusWith<Shard::CommandResponse
return {ErrorCodes::UnsupportedFormat,
str::stream() << "expected an object from the findAndModify response '"
<< kFindAndModifyResponseResultDocField
- << "'field, got: " << newDocElem};
+ << "'field, got: "
+ << newDocElem};
}
return newDocElem.Obj().getOwned();
@@ -191,10 +192,14 @@ StatusWith<LocksType> DistLockCatalogImpl::grabLock(OperationContext* txn,
StringData processId,
Date_t time,
StringData why) {
- BSONObj newLockDetails(BSON(LocksType::lockID(lockSessionID)
- << LocksType::state(LocksType::LOCKED) << LocksType::who() << who
- << LocksType::process() << processId << LocksType::when(time)
- << LocksType::why() << why));
+ BSONObj newLockDetails(BSON(
+ LocksType::lockID(lockSessionID) << LocksType::state(LocksType::LOCKED) << LocksType::who()
+ << who
+ << LocksType::process()
+ << processId
+ << LocksType::when(time)
+ << LocksType::why()
+ << why));
auto request = FindAndModifyRequest::makeUpdate(
_locksNS,
@@ -246,10 +251,14 @@ StatusWith<LocksType> DistLockCatalogImpl::overtakeLock(OperationContext* txn,
BSON(LocksType::name() << lockID << LocksType::state(LocksType::UNLOCKED)));
orQueryBuilder.append(BSON(LocksType::name() << lockID << LocksType::lockID(currentHolderTS)));
- BSONObj newLockDetails(BSON(LocksType::lockID(lockSessionID)
- << LocksType::state(LocksType::LOCKED) << LocksType::who() << who
- << LocksType::process() << processId << LocksType::when(time)
- << LocksType::why() << why));
+ BSONObj newLockDetails(BSON(
+ LocksType::lockID(lockSessionID) << LocksType::state(LocksType::LOCKED) << LocksType::who()
+ << who
+ << LocksType::process()
+ << processId
+ << LocksType::when(time)
+ << LocksType::why()
+ << why));
auto request = FindAndModifyRequest::makeUpdate(
_locksNS, BSON("$or" << orQueryBuilder.arr()), BSON("$set" << newLockDetails));
@@ -343,7 +352,8 @@ Status DistLockCatalogImpl::unlockAll(OperationContext* txn, const std::string&
return Status(ErrorCodes::FailedToParse,
str::stream()
<< "Failed to parse config server response to batch request for "
- "unlocking existing distributed locks" << causedBy(errmsg));
+ "unlocking existing distributed locks"
+ << causedBy(errmsg));
}
return batchResponse.toStatus();
}
diff --git a/src/mongo/s/catalog/replset/dist_lock_catalog_impl_test.cpp b/src/mongo/s/catalog/replset/dist_lock_catalog_impl_test.cpp
index 35c9be42c9b..5d538ea3407 100644
--- a/src/mongo/s/catalog/replset/dist_lock_catalog_impl_test.cpp
+++ b/src/mongo/s/catalog/replset/dist_lock_catalog_impl_test.cpp
@@ -1036,14 +1036,17 @@ TEST_F(DistLockCatalogFixture, UnlockWriteConcernError) {
// The dist lock catalog calls into the ShardRegistry, which will retry 3 times for
// WriteConcernFailed errors
- onCommand([&](const RemoteCommandRequest& request)
- -> StatusWith<BSONObj> { return writeConcernFailedResponse; });
+ onCommand([&](const RemoteCommandRequest& request) -> StatusWith<BSONObj> {
+ return writeConcernFailedResponse;
+ });
- onCommand([&](const RemoteCommandRequest& request)
- -> StatusWith<BSONObj> { return writeConcernFailedResponse; });
+ onCommand([&](const RemoteCommandRequest& request) -> StatusWith<BSONObj> {
+ return writeConcernFailedResponse;
+ });
- onCommand([&](const RemoteCommandRequest& request)
- -> StatusWith<BSONObj> { return writeConcernFailedResponse; });
+ onCommand([&](const RemoteCommandRequest& request) -> StatusWith<BSONObj> {
+ return writeConcernFailedResponse;
+ });
future.timed_get(kFutureTimeout);
}
@@ -1090,30 +1093,31 @@ TEST_F(DistLockCatalogFixture, BasicUnlockAll) {
ASSERT_OK(status);
});
- onCommand([](const RemoteCommandRequest& request)
- -> StatusWith<BSONObj> {
- ASSERT_EQUALS(dummyHost, request.target);
- ASSERT_EQUALS("config", request.dbname);
-
- std::string errmsg;
- BatchedUpdateRequest batchRequest;
- ASSERT(batchRequest.parseBSON("config", request.cmdObj, &errmsg));
- ASSERT_EQUALS(LocksType::ConfigNS, batchRequest.getNS().toString());
- ASSERT_EQUALS(BSON("w"
- << "majority"
- << "wtimeout" << 15000),
- batchRequest.getWriteConcern());
- auto updates = batchRequest.getUpdates();
- ASSERT_EQUALS(1U, updates.size());
- auto update = updates.front();
- ASSERT_FALSE(update->getUpsert());
- ASSERT_TRUE(update->getMulti());
- ASSERT_EQUALS(BSON(LocksType::process("processID")), update->getQuery());
- ASSERT_EQUALS(BSON("$set" << BSON(LocksType::state(LocksType::UNLOCKED))),
- update->getUpdateExpr());
-
- return BSON("ok" << 1);
- });
+ onCommand(
+ [](const RemoteCommandRequest& request) -> StatusWith<BSONObj> {
+ ASSERT_EQUALS(dummyHost, request.target);
+ ASSERT_EQUALS("config", request.dbname);
+
+ std::string errmsg;
+ BatchedUpdateRequest batchRequest;
+ ASSERT(batchRequest.parseBSON("config", request.cmdObj, &errmsg));
+ ASSERT_EQUALS(LocksType::ConfigNS, batchRequest.getNS().toString());
+ ASSERT_EQUALS(BSON("w"
+ << "majority"
+ << "wtimeout"
+ << 15000),
+ batchRequest.getWriteConcern());
+ auto updates = batchRequest.getUpdates();
+ ASSERT_EQUALS(1U, updates.size());
+ auto update = updates.front();
+ ASSERT_FALSE(update->getUpsert());
+ ASSERT_TRUE(update->getMulti());
+ ASSERT_EQUALS(BSON(LocksType::process("processID")), update->getQuery());
+ ASSERT_EQUALS(BSON("$set" << BSON(LocksType::state(LocksType::UNLOCKED))),
+ update->getUpdateExpr());
+
+ return BSON("ok" << 1);
+ });
future.timed_get(kFutureTimeout);
}
@@ -1488,8 +1492,9 @@ TEST_F(DistLockCatalogFixture, GetPingNotFound) {
ASSERT_FALSE(status.reason().empty());
});
- onFindCommand([](const RemoteCommandRequest& request)
- -> StatusWith<vector<BSONObj>> { return std::vector<BSONObj>(); });
+ onFindCommand([](const RemoteCommandRequest& request) -> StatusWith<vector<BSONObj>> {
+ return std::vector<BSONObj>();
+ });
future.timed_get(kFutureTimeout);
}
@@ -1572,8 +1577,9 @@ TEST_F(DistLockCatalogFixture, GetLockByTSNotFound) {
ASSERT_FALSE(status.reason().empty());
});
- onFindCommand([](const RemoteCommandRequest& request)
- -> StatusWith<vector<BSONObj>> { return std::vector<BSONObj>(); });
+ onFindCommand([](const RemoteCommandRequest& request) -> StatusWith<vector<BSONObj>> {
+ return std::vector<BSONObj>();
+ });
future.timed_get(kFutureTimeout);
}
@@ -1660,8 +1666,9 @@ TEST_F(DistLockCatalogFixture, GetLockByNameNotFound) {
ASSERT_FALSE(status.reason().empty());
});
- onFindCommand([](const RemoteCommandRequest& request)
- -> StatusWith<vector<BSONObj>> { return std::vector<BSONObj>(); });
+ onFindCommand([](const RemoteCommandRequest& request) -> StatusWith<vector<BSONObj>> {
+ return std::vector<BSONObj>();
+ });
future.timed_get(kFutureTimeout);
}
diff --git a/src/mongo/s/catalog/replset/replset_dist_lock_manager.cpp b/src/mongo/s/catalog/replset/replset_dist_lock_manager.cpp
index 8fcd79fc6cd..8b387f30506 100644
--- a/src/mongo/s/catalog/replset/replset_dist_lock_manager.cpp
+++ b/src/mongo/s/catalog/replset/replset_dist_lock_manager.cpp
@@ -34,8 +34,8 @@
#include "mongo/base/status.h"
#include "mongo/base/status_with.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/operation_context_noop.h"
+#include "mongo/db/service_context.h"
#include "mongo/s/catalog/dist_lock_catalog.h"
#include "mongo/s/catalog/type_lockpings.h"
#include "mongo/s/catalog/type_locks.h"
@@ -47,8 +47,8 @@
#include "mongo/util/fail_point_service.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
-#include "mongo/util/timer.h"
#include "mongo/util/time_support.h"
+#include "mongo/util/timer.h"
namespace mongo {
diff --git a/src/mongo/s/catalog/replset/replset_dist_lock_manager_test.cpp b/src/mongo/s/catalog/replset/replset_dist_lock_manager_test.cpp
index 792bb3ca9d8..1b16e1f35da 100644
--- a/src/mongo/s/catalog/replset/replset_dist_lock_manager_test.cpp
+++ b/src/mongo/s/catalog/replset/replset_dist_lock_manager_test.cpp
@@ -30,19 +30,19 @@
#include "mongo/s/catalog/replset/replset_dist_lock_manager.h"
+#include <boost/optional.hpp>
+#include <boost/optional/optional_io.hpp>
#include <map>
#include <string>
#include <type_traits>
#include <vector>
-#include <boost/optional.hpp>
-#include <boost/optional/optional_io.hpp>
#include "mongo/base/status.h"
#include "mongo/base/status_with.h"
#include "mongo/bson/json.h"
#include "mongo/bson/util/builder.h"
-#include "mongo/client/remote_command_targeter_factory_mock.h"
#include "mongo/client/remote_command_targeter.h"
+#include "mongo/client/remote_command_targeter_factory_mock.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/operation_context_noop.h"
#include "mongo/db/service_context_noop.h"
@@ -364,9 +364,11 @@ TEST_F(RSDistLockMgrWithMockTickSource, LockSuccessAfterRetry) {
getMockCatalog()->expectNoGrabLock();
- getMockCatalog()->expectGetLockByName([](StringData name) {
- FAIL("should not attempt to overtake lock after successful lock");
- }, LocksType());
+ getMockCatalog()->expectGetLockByName(
+ [](StringData name) {
+ FAIL("should not attempt to overtake lock after successful lock");
+ },
+ LocksType());
},
goodLockDoc);
}
@@ -828,10 +830,12 @@ TEST_F(ReplSetDistLockManagerFixture, MultipleQueuedUnlock) {
* Returns true if all values in the map are greater than 2.
*/
auto mapEntriesGreaterThanTwo = [](const decltype(unlockIDMap)& map) -> bool {
- auto iter = find_if(map.begin(),
- map.end(),
- [](const std::remove_reference<decltype(map)>::type::value_type& entry)
- -> bool { return entry.second < 3; });
+ auto iter = find_if(
+ map.begin(),
+ map.end(),
+ [](const std::remove_reference<decltype(map)>::type::value_type& entry) -> bool {
+ return entry.second < 3;
+ });
return iter == map.end();
};
@@ -844,10 +848,12 @@ TEST_F(ReplSetDistLockManagerFixture, MultipleQueuedUnlock) {
// Wait until we see at least 2 unique lockSessionID more than twice.
if (unlockIDMap.size() >= 2 && mapEntriesGreaterThanTwo(unlockIDMap)) {
- getMockCatalog()->expectUnLock([&testMutex, &unlockCV](const OID& lockSessionID) {
- stdx::unique_lock<stdx::mutex> lk(testMutex);
- unlockCV.notify_all();
- }, Status::OK());
+ getMockCatalog()->expectUnLock(
+ [&testMutex, &unlockCV](const OID& lockSessionID) {
+ stdx::unique_lock<stdx::mutex> lk(testMutex);
+ unlockCV.notify_all();
+ },
+ Status::OK());
}
},
{ErrorCodes::NetworkTimeout, "bad test network"});
@@ -910,10 +916,12 @@ TEST_F(ReplSetDistLockManagerFixture, MultipleQueuedUnlock) {
TEST_F(ReplSetDistLockManagerFixture, CleanupPingOnShutdown) {
bool stopPingCalled = false;
- getMockCatalog()->expectStopPing([this, &stopPingCalled](StringData processID) {
- ASSERT_EQUALS(getProcessID(), processID);
- stopPingCalled = true;
- }, Status::OK());
+ getMockCatalog()->expectStopPing(
+ [this, &stopPingCalled](StringData processID) {
+ ASSERT_EQUALS(getProcessID(), processID);
+ stopPingCalled = true;
+ },
+ Status::OK());
getMgr()->shutDown(txn());
ASSERT_TRUE(stopPingCalled);
@@ -989,9 +997,9 @@ TEST_F(ReplSetDistLockManagerFixture, CheckLockStatusNoLongerOwn) {
auto& scopedLock = lockStatus.getValue();
getMockCatalog()->expectNoGrabLock();
- getMockCatalog()->expectGetLockByTS([&lockSessionID](const OID& ts) {
- ASSERT_EQUALS(lockSessionID, ts);
- }, {ErrorCodes::LockNotFound, "no lock"});
+ getMockCatalog()->expectGetLockByTS(
+ [&lockSessionID](const OID& ts) { ASSERT_EQUALS(lockSessionID, ts); },
+ {ErrorCodes::LockNotFound, "no lock"});
ASSERT_NOT_OK(scopedLock.checkStatus());
}
@@ -1028,9 +1036,9 @@ TEST_F(ReplSetDistLockManagerFixture, CheckLockStatusError) {
auto& scopedLock = lockStatus.getValue();
getMockCatalog()->expectNoGrabLock();
- getMockCatalog()->expectGetLockByTS([&lockSessionID](const OID& ts) {
- ASSERT_EQUALS(lockSessionID, ts);
- }, {ErrorCodes::NetworkTimeout, "bad test network"});
+ getMockCatalog()->expectGetLockByTS(
+ [&lockSessionID](const OID& ts) { ASSERT_EQUALS(lockSessionID, ts); },
+ {ErrorCodes::NetworkTimeout, "bad test network"});
ASSERT_NOT_OK(scopedLock.checkStatus());
}
@@ -1264,9 +1272,9 @@ TEST_F(ReplSetDistLockManagerFixture, GetPingErrorWhileOvertaking) {
getMockCatalog()->expectGetLockByName([](StringData name) { ASSERT_EQUALS("bar", name); },
currentLockDoc);
- getMockCatalog()->expectGetPing([](StringData process) {
- ASSERT_EQUALS("otherProcess", process);
- }, {ErrorCodes::NetworkTimeout, "bad test network"});
+ getMockCatalog()->expectGetPing(
+ [](StringData process) { ASSERT_EQUALS("otherProcess", process); },
+ {ErrorCodes::NetworkTimeout, "bad test network"});
auto status = getMgr()->lock(txn(), "bar", "", Milliseconds(0), Milliseconds(0)).getStatus();
ASSERT_NOT_OK(status);
@@ -1406,9 +1414,9 @@ TEST_F(ReplSetDistLockManagerFixture, CannotOvertakeIfPingIsActive) {
getMockCatalog()->expectGetPing(
[](StringData process) { ASSERT_EQUALS("otherProcess", process); }, pingDoc);
- getMockCatalog()->expectGetServerInfo([&getServerInfoCallCount]() {
- getServerInfoCallCount++;
- }, DistLockCatalog::ServerInfo(configServerLocalTime, OID()));
+ getMockCatalog()->expectGetServerInfo(
+ [&getServerInfoCallCount]() { getServerInfoCallCount++; },
+ DistLockCatalog::ServerInfo(configServerLocalTime, OID()));
auto status =
getMgr()->lock(txn(), "bar", "", Milliseconds(0), Milliseconds(0)).getStatus();
@@ -1419,9 +1427,9 @@ TEST_F(ReplSetDistLockManagerFixture, CannotOvertakeIfPingIsActive) {
ASSERT_EQUALS(kLoopCount, getServerInfoCallCount);
configServerLocalTime += kLockExpiration;
- getMockCatalog()->expectGetServerInfo([&getServerInfoCallCount]() {
- getServerInfoCallCount++;
- }, DistLockCatalog::ServerInfo(configServerLocalTime, OID()));
+ getMockCatalog()->expectGetServerInfo(
+ [&getServerInfoCallCount]() { getServerInfoCallCount++; },
+ DistLockCatalog::ServerInfo(configServerLocalTime, OID()));
OID lockTS;
// Make sure that overtake is now ok since ping is no longer updated.
@@ -1505,9 +1513,9 @@ TEST_F(ReplSetDistLockManagerFixture, CannotOvertakeIfOwnerJustChanged) {
getMockCatalog()->expectGetLockByName([](StringData name) { ASSERT_EQUALS("bar", name); },
currentLockDoc);
- getMockCatalog()->expectGetServerInfo([&getServerInfoCallCount]() {
- getServerInfoCallCount++;
- }, DistLockCatalog::ServerInfo(configServerLocalTime, OID()));
+ getMockCatalog()->expectGetServerInfo(
+ [&getServerInfoCallCount]() { getServerInfoCallCount++; },
+ DistLockCatalog::ServerInfo(configServerLocalTime, OID()));
auto status =
getMgr()->lock(txn(), "bar", "", Milliseconds(0), Milliseconds(0)).getStatus();
@@ -1518,9 +1526,9 @@ TEST_F(ReplSetDistLockManagerFixture, CannotOvertakeIfOwnerJustChanged) {
ASSERT_EQUALS(kLoopCount, getServerInfoCallCount);
configServerLocalTime += kLockExpiration;
- getMockCatalog()->expectGetServerInfo([&getServerInfoCallCount]() {
- getServerInfoCallCount++;
- }, DistLockCatalog::ServerInfo(configServerLocalTime, OID()));
+ getMockCatalog()->expectGetServerInfo(
+ [&getServerInfoCallCount]() { getServerInfoCallCount++; },
+ DistLockCatalog::ServerInfo(configServerLocalTime, OID()));
OID lockTS;
// Make sure that overtake is now ok since lock owner didn't change.
@@ -1607,9 +1615,9 @@ TEST_F(ReplSetDistLockManagerFixture, CannotOvertakeIfElectionIdChanged) {
[](StringData process) { ASSERT_EQUALS("otherProcess", process); }, fixedPingDoc);
lastElectionId = OID::gen();
- getMockCatalog()->expectGetServerInfo([&getServerInfoCallCount]() {
- getServerInfoCallCount++;
- }, DistLockCatalog::ServerInfo(configServerLocalTime, lastElectionId));
+ getMockCatalog()->expectGetServerInfo(
+ [&getServerInfoCallCount]() { getServerInfoCallCount++; },
+ DistLockCatalog::ServerInfo(configServerLocalTime, lastElectionId));
auto status =
getMgr()->lock(txn(), "bar", "", Milliseconds(0), Milliseconds(0)).getStatus();
@@ -1620,9 +1628,9 @@ TEST_F(ReplSetDistLockManagerFixture, CannotOvertakeIfElectionIdChanged) {
ASSERT_EQUALS(kLoopCount, getServerInfoCallCount);
configServerLocalTime += kLockExpiration;
- getMockCatalog()->expectGetServerInfo([&getServerInfoCallCount]() {
- getServerInfoCallCount++;
- }, DistLockCatalog::ServerInfo(configServerLocalTime, lastElectionId));
+ getMockCatalog()->expectGetServerInfo(
+ [&getServerInfoCallCount]() { getServerInfoCallCount++; },
+ DistLockCatalog::ServerInfo(configServerLocalTime, lastElectionId));
OID lockTS;
// Make sure that overtake is now ok since electionId didn't change.
@@ -1893,8 +1901,9 @@ TEST_F(ReplSetDistLockManagerFixture, CannotOvertakeIfConfigServerClockGoesBackw
}
// Make config server time go backwards by lock expiration duration.
- getMockCatalog()->expectGetServerInfo([]() {
- }, DistLockCatalog::ServerInfo(configClock - kLockExpiration - Milliseconds(1), OID()));
+ getMockCatalog()->expectGetServerInfo(
+ []() {},
+ DistLockCatalog::ServerInfo(configClock - kLockExpiration - Milliseconds(1), OID()));
// Second attempt should not overtake lock.
{
@@ -1970,9 +1979,9 @@ TEST_F(RSDistLockMgrWithMockTickSource, CanOvertakeIfNoPingDocument) {
getMockCatalog()->expectGetLockByName([](StringData name) { ASSERT_EQUALS("bar", name); },
currentLockDoc);
- getMockCatalog()->expectGetPing([](StringData process) {
- ASSERT_EQUALS("otherProcess", process);
- }, {ErrorCodes::NoMatchingDocument, "no ping"});
+ getMockCatalog()->expectGetPing(
+ [](StringData process) { ASSERT_EQUALS("otherProcess", process); },
+ {ErrorCodes::NoMatchingDocument, "no ping"});
getMockCatalog()->expectGetServerInfo([]() {}, DistLockCatalog::ServerInfo(Date_t(), OID()));
@@ -1994,9 +2003,9 @@ TEST_F(RSDistLockMgrWithMockTickSource, CanOvertakeIfNoPingDocument) {
getMockCatalog()->expectGetLockByName([](StringData name) { ASSERT_EQUALS("bar", name); },
currentLockDoc);
- getMockCatalog()->expectGetPing([](StringData process) {
- ASSERT_EQUALS("otherProcess", process);
- }, {ErrorCodes::NoMatchingDocument, "no ping"});
+ getMockCatalog()->expectGetPing(
+ [](StringData process) { ASSERT_EQUALS("otherProcess", process); },
+ {ErrorCodes::NoMatchingDocument, "no ping"});
getMockCatalog()->expectGetServerInfo(
[]() {}, DistLockCatalog::ServerInfo(Date_t() + kLockExpiration + Milliseconds(1), OID()));
diff --git a/src/mongo/s/catalog/type_changelog_test.cpp b/src/mongo/s/catalog/type_changelog_test.cpp
index 1090b7be0e7..904c9fdfa46 100644
--- a/src/mongo/s/catalog/type_changelog_test.cpp
+++ b/src/mongo/s/catalog/type_changelog_test.cpp
@@ -44,12 +44,14 @@ TEST(ChangeLogType, Empty) {
}
TEST(ChangeLogType, Valid) {
- BSONObj obj = BSON(
- ChangeLogType::changeId("host.local-2012-11-21T19:14:10-8")
- << ChangeLogType::server("host.local") << ChangeLogType::clientAddr("192.168.0.189:51128")
- << ChangeLogType::time(Date_t::fromMillisSinceEpoch(1)) << ChangeLogType::what("split")
- << ChangeLogType::ns("test.test") << ChangeLogType::details(BSON("dummy"
- << "info")));
+ BSONObj obj = BSON(ChangeLogType::changeId("host.local-2012-11-21T19:14:10-8")
+ << ChangeLogType::server("host.local")
+ << ChangeLogType::clientAddr("192.168.0.189:51128")
+ << ChangeLogType::time(Date_t::fromMillisSinceEpoch(1))
+ << ChangeLogType::what("split")
+ << ChangeLogType::ns("test.test")
+ << ChangeLogType::details(BSON("dummy"
+ << "info")));
auto changeLogResult = ChangeLogType::fromBSON(obj);
ASSERT_OK(changeLogResult.getStatus());
@@ -68,60 +70,65 @@ TEST(ChangeLogType, Valid) {
}
TEST(ChangeLogType, MissingChangeId) {
- BSONObj obj =
- BSON(ChangeLogType::server("host.local")
- << ChangeLogType::clientAddr("192.168.0.189:51128")
- << ChangeLogType::time(Date_t::fromMillisSinceEpoch(1)) << ChangeLogType::what("split")
- << ChangeLogType::ns("test.test") << ChangeLogType::details(BSON("dummy"
- << "info")));
+ BSONObj obj = BSON(ChangeLogType::server("host.local")
+ << ChangeLogType::clientAddr("192.168.0.189:51128")
+ << ChangeLogType::time(Date_t::fromMillisSinceEpoch(1))
+ << ChangeLogType::what("split")
+ << ChangeLogType::ns("test.test")
+ << ChangeLogType::details(BSON("dummy"
+ << "info")));
auto changeLogResult = ChangeLogType::fromBSON(obj);
ASSERT_EQ(ErrorCodes::NoSuchKey, changeLogResult.getStatus());
}
TEST(ChangeLogType, MissingServer) {
- BSONObj obj =
- BSON(ChangeLogType::changeId("host.local-2012-11-21T19:14:10-8")
- << ChangeLogType::clientAddr("192.168.0.189:51128")
- << ChangeLogType::time(Date_t::fromMillisSinceEpoch(1)) << ChangeLogType::what("split")
- << ChangeLogType::ns("test.test") << ChangeLogType::details(BSON("dummy"
- << "info")));
+ BSONObj obj = BSON(ChangeLogType::changeId("host.local-2012-11-21T19:14:10-8")
+ << ChangeLogType::clientAddr("192.168.0.189:51128")
+ << ChangeLogType::time(Date_t::fromMillisSinceEpoch(1))
+ << ChangeLogType::what("split")
+ << ChangeLogType::ns("test.test")
+ << ChangeLogType::details(BSON("dummy"
+ << "info")));
auto changeLogResult = ChangeLogType::fromBSON(obj);
ASSERT_EQ(ErrorCodes::NoSuchKey, changeLogResult.getStatus());
}
TEST(ChangeLogType, MissingClientAddr) {
- BSONObj obj =
- BSON(ChangeLogType::changeId("host.local-2012-11-21T19:14:10-8")
- << ChangeLogType::server("host.local")
- << ChangeLogType::time(Date_t::fromMillisSinceEpoch(1)) << ChangeLogType::what("split")
- << ChangeLogType::ns("test.test") << ChangeLogType::details(BSON("dummy"
- << "info")));
+ BSONObj obj = BSON(ChangeLogType::changeId("host.local-2012-11-21T19:14:10-8")
+ << ChangeLogType::server("host.local")
+ << ChangeLogType::time(Date_t::fromMillisSinceEpoch(1))
+ << ChangeLogType::what("split")
+ << ChangeLogType::ns("test.test")
+ << ChangeLogType::details(BSON("dummy"
+ << "info")));
auto changeLogResult = ChangeLogType::fromBSON(obj);
ASSERT_EQ(ErrorCodes::NoSuchKey, changeLogResult.getStatus());
}
TEST(ChangeLogType, MissingTime) {
- BSONObj obj =
- BSON(ChangeLogType::changeId("host.local-2012-11-21T19:14:10-8")
- << ChangeLogType::server("host.local")
- << ChangeLogType::clientAddr("192.168.0.189:51128") << ChangeLogType::what("split")
- << ChangeLogType::ns("test.test") << ChangeLogType::details(BSON("dummy"
- << "info")));
+ BSONObj obj = BSON(ChangeLogType::changeId("host.local-2012-11-21T19:14:10-8")
+ << ChangeLogType::server("host.local")
+ << ChangeLogType::clientAddr("192.168.0.189:51128")
+ << ChangeLogType::what("split")
+ << ChangeLogType::ns("test.test")
+ << ChangeLogType::details(BSON("dummy"
+ << "info")));
auto changeLogResult = ChangeLogType::fromBSON(obj);
ASSERT_EQ(ErrorCodes::NoSuchKey, changeLogResult.getStatus());
}
TEST(ChangeLogType, MissingWhat) {
- BSONObj obj = BSON(
- ChangeLogType::changeId("host.local-2012-11-21T19:14:10-8")
- << ChangeLogType::server("host.local") << ChangeLogType::clientAddr("192.168.0.189:51128")
- << ChangeLogType::time(Date_t::fromMillisSinceEpoch(1)) << ChangeLogType::ns("test.test")
- << ChangeLogType::details(BSON("dummy"
- << "info")));
+ BSONObj obj = BSON(ChangeLogType::changeId("host.local-2012-11-21T19:14:10-8")
+ << ChangeLogType::server("host.local")
+ << ChangeLogType::clientAddr("192.168.0.189:51128")
+ << ChangeLogType::time(Date_t::fromMillisSinceEpoch(1))
+ << ChangeLogType::ns("test.test")
+ << ChangeLogType::details(BSON("dummy"
+ << "info")));
auto changeLogResult = ChangeLogType::fromBSON(obj);
ASSERT_EQ(ErrorCodes::NoSuchKey, changeLogResult.getStatus());
@@ -132,7 +139,8 @@ TEST(ChangeLogType, MissingDetails) {
<< ChangeLogType::server("host.local")
<< ChangeLogType::clientAddr("192.168.0.189:51128")
<< ChangeLogType::time(Date_t::fromMillisSinceEpoch(1))
- << ChangeLogType::what("split") << ChangeLogType::ns("test.test"));
+ << ChangeLogType::what("split")
+ << ChangeLogType::ns("test.test"));
auto changeLogResult = ChangeLogType::fromBSON(obj);
ASSERT_EQ(ErrorCodes::NoSuchKey, changeLogResult.getStatus());
diff --git a/src/mongo/s/catalog/type_chunk_test.cpp b/src/mongo/s/catalog/type_chunk_test.cpp
index cd4bbb4b1d4..a246df00c54 100644
--- a/src/mongo/s/catalog/type_chunk_test.cpp
+++ b/src/mongo/s/catalog/type_chunk_test.cpp
@@ -46,25 +46,33 @@ TEST(ChunkType, MissingRequiredFields) {
ChunkVersion chunkVersion(1, 2, OID::gen());
BSONObj objModNS =
- BSON(ChunkType::name("test.mycol-a_MinKey")
- << ChunkType::min(BSON("a" << 10 << "b" << 10)) << ChunkType::max(BSON("a" << 20))
- << "lastmod" << Timestamp(chunkVersion.toLong()) << "lastmodEpoch"
- << chunkVersion.epoch() << ChunkType::shard("shard0001"));
+ BSON(ChunkType::name("test.mycol-a_MinKey") << ChunkType::min(BSON("a" << 10 << "b" << 10))
+ << ChunkType::max(BSON("a" << 20))
+ << "lastmod"
+ << Timestamp(chunkVersion.toLong())
+ << "lastmodEpoch"
+ << chunkVersion.epoch()
+ << ChunkType::shard("shard0001"));
StatusWith<ChunkType> chunkRes = ChunkType::fromBSON(objModNS);
ASSERT_FALSE(chunkRes.isOK());
BSONObj objModKeys =
- BSON(ChunkType::name("test.mycol-a_MinKey")
- << ChunkType::ns("test.mycol") << "lastmod" << Timestamp(chunkVersion.toLong())
- << "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001"));
+ BSON(ChunkType::name("test.mycol-a_MinKey") << ChunkType::ns("test.mycol") << "lastmod"
+ << Timestamp(chunkVersion.toLong())
+ << "lastmodEpoch"
+ << chunkVersion.epoch()
+ << ChunkType::shard("shard0001"));
chunkRes = ChunkType::fromBSON(objModKeys);
ASSERT_FALSE(chunkRes.isOK());
BSONObj objModShard =
- BSON(ChunkType::name("test.mycol-a_MinKey")
- << ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 10 << "b" << 10))
- << ChunkType::max(BSON("a" << 20)) << "lastmod" << Timestamp(chunkVersion.toLong())
- << "lastmodEpoch" << chunkVersion.epoch());
+ BSON(ChunkType::name("test.mycol-a_MinKey") << ChunkType::ns("test.mycol")
+ << ChunkType::min(BSON("a" << 10 << "b" << 10))
+ << ChunkType::max(BSON("a" << 20))
+ << "lastmod"
+ << Timestamp(chunkVersion.toLong())
+ << "lastmodEpoch"
+ << chunkVersion.epoch());
chunkRes = ChunkType::fromBSON(objModShard);
ASSERT_FALSE(chunkRes.isOK());
}
@@ -72,10 +80,14 @@ TEST(ChunkType, MissingRequiredFields) {
TEST(ChunkType, MinAndMaxShardKeysDifferInNumberOfKeys) {
ChunkVersion chunkVersion(1, 2, OID::gen());
BSONObj obj =
- BSON(ChunkType::name("test.mycol-a_MinKey")
- << ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 10 << "b" << 10))
- << ChunkType::max(BSON("a" << 20)) << "lastmod" << Timestamp(chunkVersion.toLong())
- << "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001"));
+ BSON(ChunkType::name("test.mycol-a_MinKey") << ChunkType::ns("test.mycol")
+ << ChunkType::min(BSON("a" << 10 << "b" << 10))
+ << ChunkType::max(BSON("a" << 20))
+ << "lastmod"
+ << Timestamp(chunkVersion.toLong())
+ << "lastmodEpoch"
+ << chunkVersion.epoch()
+ << ChunkType::shard("shard0001"));
StatusWith<ChunkType> chunkRes = ChunkType::fromBSON(obj);
ASSERT_OK(chunkRes.getStatus());
ASSERT_FALSE(chunkRes.getValue().validate().isOK());
@@ -83,11 +95,14 @@ TEST(ChunkType, MinAndMaxShardKeysDifferInNumberOfKeys) {
TEST(ChunkType, MinAndMaxShardKeysDifferInKeyNames) {
ChunkVersion chunkVersion(1, 2, OID::gen());
- BSONObj obj =
- BSON(ChunkType::name("test.mycol-a_MinKey")
- << ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 10))
- << ChunkType::max(BSON("b" << 20)) << "lastmod" << Timestamp(chunkVersion.toLong())
- << "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001"));
+ BSONObj obj = BSON(ChunkType::name("test.mycol-a_MinKey") << ChunkType::ns("test.mycol")
+ << ChunkType::min(BSON("a" << 10))
+ << ChunkType::max(BSON("b" << 20))
+ << "lastmod"
+ << Timestamp(chunkVersion.toLong())
+ << "lastmodEpoch"
+ << chunkVersion.epoch()
+ << ChunkType::shard("shard0001"));
StatusWith<ChunkType> chunkRes = ChunkType::fromBSON(obj);
ASSERT_OK(chunkRes.getStatus());
ASSERT_FALSE(chunkRes.getValue().validate().isOK());
@@ -95,11 +110,14 @@ TEST(ChunkType, MinAndMaxShardKeysDifferInKeyNames) {
TEST(ChunkType, NotAscending) {
ChunkVersion chunkVersion(1, 2, OID::gen());
- BSONObj obj =
- BSON(ChunkType::name("test.mycol-a_MinKey")
- << ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 20))
- << ChunkType::max(BSON("a" << 10)) << "lastmod" << Timestamp(chunkVersion.toLong())
- << "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001"));
+ BSONObj obj = BSON(ChunkType::name("test.mycol-a_MinKey") << ChunkType::ns("test.mycol")
+ << ChunkType::min(BSON("a" << 20))
+ << ChunkType::max(BSON("a" << 10))
+ << "lastmod"
+ << Timestamp(chunkVersion.toLong())
+ << "lastmodEpoch"
+ << chunkVersion.epoch()
+ << ChunkType::shard("shard0001"));
StatusWith<ChunkType> chunkRes = ChunkType::fromBSON(obj);
ASSERT_OK(chunkRes.getStatus());
ASSERT_FALSE(chunkRes.getValue().validate().isOK());
@@ -107,11 +125,14 @@ TEST(ChunkType, NotAscending) {
TEST(ChunkType, CorrectContents) {
ChunkVersion chunkVersion(1, 2, OID::gen());
- BSONObj obj =
- BSON(ChunkType::name("test.mycol-a_MinKey")
- << ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 10))
- << ChunkType::max(BSON("a" << 20)) << "lastmod" << Timestamp(chunkVersion.toLong())
- << "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001"));
+ BSONObj obj = BSON(ChunkType::name("test.mycol-a_MinKey") << ChunkType::ns("test.mycol")
+ << ChunkType::min(BSON("a" << 10))
+ << ChunkType::max(BSON("a" << 20))
+ << "lastmod"
+ << Timestamp(chunkVersion.toLong())
+ << "lastmodEpoch"
+ << chunkVersion.epoch()
+ << ChunkType::shard("shard0001"));
StatusWith<ChunkType> chunkRes = ChunkType::fromBSON(obj);
ASSERT_OK(chunkRes.getStatus());
ChunkType chunk = chunkRes.getValue();
@@ -126,13 +147,18 @@ TEST(ChunkType, CorrectContents) {
}
TEST(ChunkType, Pre22Format) {
- ChunkType chunk = assertGet(
- ChunkType::fromBSON(BSON("_id"
- << "test.mycol-a_MinKey"
- << "lastmod" << Date_t::fromMillisSinceEpoch(1) << "ns"
- << "test.mycol"
- << "min" << BSON("a" << 10) << "max" << BSON("a" << 20) << "shard"
- << "shard0001")));
+ ChunkType chunk = assertGet(ChunkType::fromBSON(BSON("_id"
+ << "test.mycol-a_MinKey"
+ << "lastmod"
+ << Date_t::fromMillisSinceEpoch(1)
+ << "ns"
+ << "test.mycol"
+ << "min"
+ << BSON("a" << 10)
+ << "max"
+ << BSON("a" << 20)
+ << "shard"
+ << "shard0001")));
ASSERT_OK(chunk.validate());
ASSERT_EQUALS(chunk.getNS(), "test.mycol");
diff --git a/src/mongo/s/catalog/type_collection_test.cpp b/src/mongo/s/catalog/type_collection_test.cpp
index 893f3e6cb56..ead96ca0850 100644
--- a/src/mongo/s/catalog/type_collection_test.cpp
+++ b/src/mongo/s/catalog/type_collection_test.cpp
@@ -28,8 +28,8 @@
#include "mongo/platform/basic.h"
-#include "mongo/bson/oid.h"
#include "mongo/base/status_with.h"
+#include "mongo/bson/oid.h"
#include "mongo/s/catalog/type_collection.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/time_support.h"
@@ -47,10 +47,12 @@ TEST(CollectionType, Empty) {
TEST(CollectionType, Basic) {
const OID oid = OID::gen();
- StatusWith<CollectionType> status = CollectionType::fromBSON(BSON(
- CollectionType::fullNs("db.coll")
- << CollectionType::epoch(oid) << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
- << CollectionType::keyPattern(BSON("a" << 1)) << CollectionType::unique(true)));
+ StatusWith<CollectionType> status =
+ CollectionType::fromBSON(BSON(CollectionType::fullNs("db.coll")
+ << CollectionType::epoch(oid)
+ << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
+ << CollectionType::keyPattern(BSON("a" << 1))
+ << CollectionType::unique(true)));
ASSERT_TRUE(status.isOK());
CollectionType coll = status.getValue();
@@ -88,11 +90,16 @@ TEST(CollectionType, EpochCorrectness) {
}
TEST(CollectionType, Pre22Format) {
- CollectionType coll = assertGet(
- CollectionType::fromBSON(BSON("_id"
- << "db.coll"
- << "lastmod" << Date_t::fromMillisSinceEpoch(1) << "dropped"
- << false << "key" << BSON("a" << 1) << "unique" << false)));
+ CollectionType coll = assertGet(CollectionType::fromBSON(BSON("_id"
+ << "db.coll"
+ << "lastmod"
+ << Date_t::fromMillisSinceEpoch(1)
+ << "dropped"
+ << false
+ << "key"
+ << BSON("a" << 1)
+ << "unique"
+ << false)));
ASSERT(coll.getNs() == NamespaceString{"db.coll"});
ASSERT(!coll.getEpoch().isSet());
@@ -105,10 +112,12 @@ TEST(CollectionType, Pre22Format) {
TEST(CollectionType, InvalidCollectionNamespace) {
const OID oid = OID::gen();
- StatusWith<CollectionType> result = CollectionType::fromBSON(BSON(
- CollectionType::fullNs("foo\\bar.coll")
- << CollectionType::epoch(oid) << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
- << CollectionType::keyPattern(BSON("a" << 1)) << CollectionType::unique(true)));
+ StatusWith<CollectionType> result =
+ CollectionType::fromBSON(BSON(CollectionType::fullNs("foo\\bar.coll")
+ << CollectionType::epoch(oid)
+ << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
+ << CollectionType::keyPattern(BSON("a" << 1))
+ << CollectionType::unique(true)));
ASSERT_TRUE(result.isOK());
CollectionType collType = result.getValue();
ASSERT_FALSE(collType.validate().isOK());
diff --git a/src/mongo/s/catalog/type_config_version_test.cpp b/src/mongo/s/catalog/type_config_version_test.cpp
index dabe32c7782..db3ff5373f0 100644
--- a/src/mongo/s/catalog/type_config_version_test.cpp
+++ b/src/mongo/s/catalog/type_config_version_test.cpp
@@ -253,10 +253,10 @@ TEST(Excludes, BadRangeArray) {
<< "1.2.3"); // empty bound
BSONArray includeArr = bab.arr();
- auto versionInfoResult = VersionType::fromBSON(
- BSON(VersionType::minCompatibleVersion(3)
- << VersionType::currentVersion(4) << VersionType::clusterId(OID::gen())
- << VersionType::excludingMongoVersions(includeArr)));
+ auto versionInfoResult = VersionType::fromBSON(BSON(
+ VersionType::minCompatibleVersion(3) << VersionType::currentVersion(4)
+ << VersionType::clusterId(OID::gen())
+ << VersionType::excludingMongoVersions(includeArr)));
ASSERT_EQ(ErrorCodes::FailedToParse, versionInfoResult.getStatus());
}
diff --git a/src/mongo/s/catalog/type_locks_test.cpp b/src/mongo/s/catalog/type_locks_test.cpp
index aa169872c8a..5425b24b19b 100644
--- a/src/mongo/s/catalog/type_locks_test.cpp
+++ b/src/mongo/s/catalog/type_locks_test.cpp
@@ -45,12 +45,12 @@ TEST(Validity, Empty) {
TEST(Validity, UnlockedWithOptional) {
OID testLockID = OID::gen();
- BSONObj obj =
- BSON(LocksType::name("balancer")
- << LocksType::process("host.local:27017:1352918870:16807")
- << LocksType::state(LocksType::State::UNLOCKED) << LocksType::lockID(testLockID)
- << LocksType::who("host.local:27017:1352918870:16807:Balancer:282475249")
- << LocksType::why("doing balance round"));
+ BSONObj obj = BSON(LocksType::name("balancer")
+ << LocksType::process("host.local:27017:1352918870:16807")
+ << LocksType::state(LocksType::State::UNLOCKED)
+ << LocksType::lockID(testLockID)
+ << LocksType::who("host.local:27017:1352918870:16807:Balancer:282475249")
+ << LocksType::why("doing balance round"));
auto locksResult = LocksType::fromBSON(obj);
ASSERT_OK(locksResult.getStatus());
@@ -77,12 +77,12 @@ TEST(Validity, UnlockedWithoutOptional) {
TEST(Validity, LockedValid) {
OID testLockID = OID::gen();
- BSONObj obj =
- BSON(LocksType::name("balancer")
- << LocksType::process("host.local:27017:1352918870:16807")
- << LocksType::state(LocksType::State::LOCKED) << LocksType::lockID(testLockID)
- << LocksType::who("host.local:27017:1352918870:16807:Balancer:282475249")
- << LocksType::why("doing balance round"));
+ BSONObj obj = BSON(LocksType::name("balancer")
+ << LocksType::process("host.local:27017:1352918870:16807")
+ << LocksType::state(LocksType::State::LOCKED)
+ << LocksType::lockID(testLockID)
+ << LocksType::who("host.local:27017:1352918870:16807:Balancer:282475249")
+ << LocksType::why("doing balance round"));
auto locksResult = LocksType::fromBSON(obj);
ASSERT_OK(locksResult.getStatus());
@@ -97,11 +97,11 @@ TEST(Validity, LockedValid) {
}
TEST(Validity, LockedMissingProcess) {
- BSONObj obj =
- BSON(LocksType::name("balancer")
- << LocksType::state(LocksType::State::LOCKED) << LocksType::lockID(OID::gen())
- << LocksType::who("host.local:27017:1352918870:16807:Balancer:282475249")
- << LocksType::why("doing balance round"));
+ BSONObj obj = BSON(LocksType::name("balancer")
+ << LocksType::state(LocksType::State::LOCKED)
+ << LocksType::lockID(OID::gen())
+ << LocksType::who("host.local:27017:1352918870:16807:Balancer:282475249")
+ << LocksType::why("doing balance round"));
auto locksResult = LocksType::fromBSON(obj);
ASSERT_OK(locksResult.getStatus());
@@ -123,10 +123,11 @@ TEST(Validity, LockedMissingLockID) {
}
TEST(Validity, LockedMissingWho) {
- BSONObj obj = BSON(LocksType::name("balancer")
- << LocksType::process("host.local:27017:1352918870:16807")
- << LocksType::state(LocksType::State::LOCKED)
- << LocksType::lockID(OID::gen()) << LocksType::why("doing balance round"));
+ BSONObj obj =
+ BSON(LocksType::name("balancer") << LocksType::process("host.local:27017:1352918870:16807")
+ << LocksType::state(LocksType::State::LOCKED)
+ << LocksType::lockID(OID::gen())
+ << LocksType::why("doing balance round"));
auto locksResult = LocksType::fromBSON(obj);
ASSERT_OK(locksResult.getStatus());
@@ -135,11 +136,11 @@ TEST(Validity, LockedMissingWho) {
}
TEST(Validity, LockedMissingWhy) {
- BSONObj obj =
- BSON(LocksType::name("balancer")
- << LocksType::process("host.local:27017:1352918870:16807")
- << LocksType::state(LocksType::State::LOCKED) << LocksType::lockID(OID::gen())
- << LocksType::who("host.local:27017:1352918870:16807:Balancer:282475249"));
+ BSONObj obj = BSON(LocksType::name("balancer")
+ << LocksType::process("host.local:27017:1352918870:16807")
+ << LocksType::state(LocksType::State::LOCKED)
+ << LocksType::lockID(OID::gen())
+ << LocksType::who("host.local:27017:1352918870:16807:Balancer:282475249"));
auto locksResult = LocksType::fromBSON(obj);
ASSERT_OK(locksResult.getStatus());
@@ -148,12 +149,12 @@ TEST(Validity, LockedMissingWhy) {
}
TEST(Validity, ContestedValid) {
- BSONObj obj =
- BSON(LocksType::name("balancer")
- << LocksType::process("host.local:27017:1352918870:16807")
- << LocksType::state(LocksType::State::LOCK_PREP) << LocksType::lockID(OID::gen())
- << LocksType::who("host.local:27017:1352918870:16807:Balancer:282475249")
- << LocksType::why("doing balance round"));
+ BSONObj obj = BSON(LocksType::name("balancer")
+ << LocksType::process("host.local:27017:1352918870:16807")
+ << LocksType::state(LocksType::State::LOCK_PREP)
+ << LocksType::lockID(OID::gen())
+ << LocksType::who("host.local:27017:1352918870:16807:Balancer:282475249")
+ << LocksType::why("doing balance round"));
auto locksResult = LocksType::fromBSON(obj);
ASSERT_OK(locksResult.getStatus());
@@ -162,11 +163,11 @@ TEST(Validity, ContestedValid) {
}
TEST(Validity, ContestedMissingProcess) {
- BSONObj obj =
- BSON(LocksType::name("balancer")
- << LocksType::state(LocksType::State::LOCK_PREP) << LocksType::lockID(OID::gen())
- << LocksType::who("host.local:27017:1352918870:16807:Balancer:282475249")
- << LocksType::why("doing balance round"));
+ BSONObj obj = BSON(LocksType::name("balancer")
+ << LocksType::state(LocksType::State::LOCK_PREP)
+ << LocksType::lockID(OID::gen())
+ << LocksType::who("host.local:27017:1352918870:16807:Balancer:282475249")
+ << LocksType::why("doing balance round"));
auto locksResult = LocksType::fromBSON(obj);
ASSERT_OK(locksResult.getStatus());
@@ -188,10 +189,11 @@ TEST(Validity, ContestedMissingLockID) {
}
TEST(Validity, ContestedMissingWho) {
- BSONObj obj = BSON(LocksType::name("balancer")
- << LocksType::process("host.local:27017:1352918870:16807")
- << LocksType::state(LocksType::State::LOCK_PREP)
- << LocksType::lockID(OID::gen()) << LocksType::why("doing balance round"));
+ BSONObj obj =
+ BSON(LocksType::name("balancer") << LocksType::process("host.local:27017:1352918870:16807")
+ << LocksType::state(LocksType::State::LOCK_PREP)
+ << LocksType::lockID(OID::gen())
+ << LocksType::why("doing balance round"));
auto locksResult = LocksType::fromBSON(obj);
ASSERT_OK(locksResult.getStatus());
@@ -200,11 +202,11 @@ TEST(Validity, ContestedMissingWho) {
}
TEST(Validity, ContestedMissingWhy) {
- BSONObj obj =
- BSON(LocksType::name("balancer")
- << LocksType::process("host.local:27017:1352918870:16807")
- << LocksType::state(LocksType::State::LOCK_PREP) << LocksType::lockID(OID::gen())
- << LocksType::who("host.local:27017:1352918870:16807:Balancer:282475249"));
+ BSONObj obj = BSON(LocksType::name("balancer")
+ << LocksType::process("host.local:27017:1352918870:16807")
+ << LocksType::state(LocksType::State::LOCK_PREP)
+ << LocksType::lockID(OID::gen())
+ << LocksType::who("host.local:27017:1352918870:16807:Balancer:282475249"));
auto locksResult = LocksType::fromBSON(obj);
ASSERT_OK(locksResult.getStatus());
diff --git a/src/mongo/s/catalog/type_mongos_test.cpp b/src/mongo/s/catalog/type_mongos_test.cpp
index 291019e0886..8fc449ab6f7 100644
--- a/src/mongo/s/catalog/type_mongos_test.cpp
+++ b/src/mongo/s/catalog/type_mongos_test.cpp
@@ -39,48 +39,54 @@ namespace {
using namespace mongo;
TEST(Validity, MissingName) {
- BSONObj obj = BSON(MongosType::ping(Date_t::fromMillisSinceEpoch(1))
- << MongosType::uptime(100) << MongosType::waiting(false)
- << MongosType::mongoVersion("x.x.x") << MongosType::configVersion(0));
+ BSONObj obj =
+ BSON(MongosType::ping(Date_t::fromMillisSinceEpoch(1)) << MongosType::uptime(100)
+ << MongosType::waiting(false)
+ << MongosType::mongoVersion("x.x.x")
+ << MongosType::configVersion(0));
auto mongosTypeResult = MongosType::fromBSON(obj);
ASSERT_EQ(ErrorCodes::NoSuchKey, mongosTypeResult.getStatus());
}
TEST(Validity, MissingPing) {
- BSONObj obj = BSON(MongosType::name("localhost:27017")
- << MongosType::uptime(100) << MongosType::waiting(false)
- << MongosType::mongoVersion("x.x.x") << MongosType::configVersion(0));
+ BSONObj obj = BSON(MongosType::name("localhost:27017") << MongosType::uptime(100)
+ << MongosType::waiting(false)
+ << MongosType::mongoVersion("x.x.x")
+ << MongosType::configVersion(0));
auto mongosTypeResult = MongosType::fromBSON(obj);
ASSERT_EQ(ErrorCodes::NoSuchKey, mongosTypeResult.getStatus());
}
TEST(Validity, MissingUp) {
- BSONObj obj =
- BSON(MongosType::name("localhost:27017")
- << MongosType::ping(Date_t::fromMillisSinceEpoch(1)) << MongosType::waiting(false)
- << MongosType::mongoVersion("x.x.x") << MongosType::configVersion(0));
+ BSONObj obj = BSON(MongosType::name("localhost:27017")
+ << MongosType::ping(Date_t::fromMillisSinceEpoch(1))
+ << MongosType::waiting(false)
+ << MongosType::mongoVersion("x.x.x")
+ << MongosType::configVersion(0));
auto mongosTypeResult = MongosType::fromBSON(obj);
ASSERT_EQ(ErrorCodes::NoSuchKey, mongosTypeResult.getStatus());
}
TEST(Validity, MissingWaiting) {
- BSONObj obj =
- BSON(MongosType::name("localhost:27017")
- << MongosType::ping(Date_t::fromMillisSinceEpoch(1)) << MongosType::uptime(100)
- << MongosType::mongoVersion("x.x.x") << MongosType::configVersion(0));
+ BSONObj obj = BSON(MongosType::name("localhost:27017")
+ << MongosType::ping(Date_t::fromMillisSinceEpoch(1))
+ << MongosType::uptime(100)
+ << MongosType::mongoVersion("x.x.x")
+ << MongosType::configVersion(0));
auto mongosTypeResult = MongosType::fromBSON(obj);
ASSERT_EQ(ErrorCodes::NoSuchKey, mongosTypeResult.getStatus());
}
TEST(Validity, MissingMongoVersion) {
- BSONObj obj =
- BSON(MongosType::name("localhost:27017")
- << MongosType::ping(Date_t::fromMillisSinceEpoch(1)) << MongosType::uptime(100)
- << MongosType::waiting(false) << MongosType::configVersion(0));
+ BSONObj obj = BSON(MongosType::name("localhost:27017")
+ << MongosType::ping(Date_t::fromMillisSinceEpoch(1))
+ << MongosType::uptime(100)
+ << MongosType::waiting(false)
+ << MongosType::configVersion(0));
auto mongosTypeResult = MongosType::fromBSON(obj);
ASSERT_OK(mongosTypeResult.getStatus());
@@ -94,10 +100,11 @@ TEST(Validity, MissingMongoVersion) {
}
TEST(Validity, MissingConfigVersion) {
- BSONObj obj =
- BSON(MongosType::name("localhost:27017")
- << MongosType::ping(Date_t::fromMillisSinceEpoch(1)) << MongosType::uptime(100)
- << MongosType::waiting(false) << MongosType::mongoVersion("x.x.x"));
+ BSONObj obj = BSON(MongosType::name("localhost:27017")
+ << MongosType::ping(Date_t::fromMillisSinceEpoch(1))
+ << MongosType::uptime(100)
+ << MongosType::waiting(false)
+ << MongosType::mongoVersion("x.x.x"));
auto mongosTypeResult = MongosType::fromBSON(obj);
ASSERT_OK(mongosTypeResult.getStatus());
@@ -113,8 +120,10 @@ TEST(Validity, MissingConfigVersion) {
TEST(Validity, Valid) {
BSONObj obj = BSON(MongosType::name("localhost:27017")
<< MongosType::ping(Date_t::fromMillisSinceEpoch(1))
- << MongosType::uptime(100) << MongosType::waiting(false)
- << MongosType::mongoVersion("x.x.x") << MongosType::configVersion(0));
+ << MongosType::uptime(100)
+ << MongosType::waiting(false)
+ << MongosType::mongoVersion("x.x.x")
+ << MongosType::configVersion(0));
auto mongosTypeResult = MongosType::fromBSON(obj);
MongosType& mType = mongosTypeResult.getValue();
diff --git a/src/mongo/s/catalog/type_shard_test.cpp b/src/mongo/s/catalog/type_shard_test.cpp
index f89a54d5a3f..9461ee47dcb 100644
--- a/src/mongo/s/catalog/type_shard_test.cpp
+++ b/src/mongo/s/catalog/type_shard_test.cpp
@@ -72,7 +72,8 @@ TEST(ShardType, AllOptionalsPresent) {
TEST(ShardType, MaxSizeAsFloat) {
BSONObj obj = BSON(ShardType::name("shard0000") << ShardType::host("localhost:27017")
- << ShardType::maxSizeMB() << 100.0);
+ << ShardType::maxSizeMB()
+ << 100.0);
StatusWith<ShardType> shardRes = ShardType::fromBSON(obj);
ASSERT(shardRes.isOK());
ShardType shard = shardRes.getValue();