diff options
author | Misha Tyulenev <misha@mongodb.com> | 2016-05-18 14:48:27 -0400 |
---|---|---|
committer | Misha Tyulenev <misha@mongodb.com> | 2016-05-18 14:48:53 -0400 |
commit | ed21c6d9ad3e0333732688b71011e69a0cb1d1dd (patch) | |
tree | 410d6ccccd62aaf4bc554543b240c1734f5d663f /src/mongo/s/client | |
parent | f2f6163b0b26f1b18951c3d4d0f88a833c344523 (diff) | |
download | mongo-ed21c6d9ad3e0333732688b71011e69a0cb1d1dd.tar.gz |
SERVER-23341 replace ShardRegistry::reload with swap
Diffstat (limited to 'src/mongo/s/client')
-rw-r--r-- | src/mongo/s/client/SConscript | 12 | ||||
-rw-r--r-- | src/mongo/s/client/shard_local.cpp | 4 | ||||
-rw-r--r-- | src/mongo/s/client/shard_registry.cpp | 304 | ||||
-rw-r--r-- | src/mongo/s/client/shard_registry.h | 123 | ||||
-rw-r--r-- | src/mongo/s/client/shard_registry_data_test.cpp | 97 |
5 files changed, 355 insertions, 185 deletions
diff --git a/src/mongo/s/client/SConscript b/src/mongo/s/client/SConscript index 62ea2fa0fb7..dbc890279c2 100644 --- a/src/mongo/s/client/SConscript +++ b/src/mongo/s/client/SConscript @@ -60,6 +60,18 @@ env.CppUnitTest( ] ) +env.CppUnitTest( + target='shard_registry_test', + source=[ + 'shard_registry_data_test.cpp', + ], + LIBDEPS=[ + '$BUILD_DIR/mongo/s/coreshard', + '$BUILD_DIR/mongo/s/mongoscore', + '$BUILD_DIR/mongo/s/sharding_test_fixture', + ] +) + env.Library( target='shard_interface', source=[ diff --git a/src/mongo/s/client/shard_local.cpp b/src/mongo/s/client/shard_local.cpp index 3249d2d88c1..90d34be70a7 100644 --- a/src/mongo/s/client/shard_local.cpp +++ b/src/mongo/s/client/shard_local.cpp @@ -51,7 +51,7 @@ const Status kInternalErrorStatus{ErrorCodes::InternalError, } // namespace const ConnectionString ShardLocal::getConnString() const { - MONGO_UNREACHABLE; + return ConnectionString::forLocal(); } std::shared_ptr<RemoteCommandTargeter> ShardLocal::getTargeter() const { @@ -59,7 +59,7 @@ std::shared_ptr<RemoteCommandTargeter> ShardLocal::getTargeter() const { }; const ConnectionString ShardLocal::originalConnString() const { - MONGO_UNREACHABLE; + return ConnectionString::forLocal(); } void ShardLocal::updateReplSetMonitor(const HostAndPort& remoteHost, diff --git a/src/mongo/s/client/shard_registry.cpp b/src/mongo/s/client/shard_registry.cpp index fb34871a644..db556719c31 100644 --- a/src/mongo/s/client/shard_registry.cpp +++ b/src/mongo/s/client/shard_registry.cpp @@ -61,31 +61,81 @@ using std::vector; ShardRegistry::ShardRegistry(std::unique_ptr<ShardFactory> shardFactory, ConnectionString configServerCS) - : _shardFactory(std::move(shardFactory)) { - updateConfigServerConnectionString(configServerCS); + : _shardFactory(std::move(shardFactory)), _data() { + log() << "Setting config server connection string to: " << configServerCS.toString(); + auto configShard = _shardFactory->createShard("config", configServerCS); + _data.addConfigShard(configShard); } -ShardRegistry::~ShardRegistry() = default; - ConnectionString ShardRegistry::getConfigServerConnectionString() const { - stdx::lock_guard<stdx::mutex> lk(_mutex); - return _configServerCS; + return getConfigShard()->originalConnString(); } -void ShardRegistry::updateConfigServerConnectionString(ConnectionString configServerCS) { - stdx::lock_guard<stdx::mutex> lk(_mutex); - _updateConfigServerConnectionString_inlock(std::move(configServerCS)); +void ShardRegistry::rebuildConfigShard() { + _data.rebuildConfigShard(_shardFactory.get()); + invariant(_data.getConfigShard()); } -void ShardRegistry::_updateConfigServerConnectionString_inlock(ConnectionString configServerCS) { - log() << "Updating config server connection string to: " << configServerCS.toString(); +shared_ptr<Shard> ShardRegistry::getShard(OperationContext* txn, const ShardId& shardId) { + auto shard = _data.findByShardId(shardId); + if (shard) { + return shard; + } + + // If we can't find the shard, we might just need to reload the cache + bool didReload = reload(txn); + + shard = _data.findByShardId(shardId); - _configServerCS = std::move(configServerCS); - _addConfigShard_inlock(); + if (shard || didReload) { + return shard; + } + + reload(txn); + return _data.findByShardId(shardId); +} + +shared_ptr<Shard> ShardRegistry::getShardNoReload(const ShardId& shardId) { + return _data.findByShardId(shardId); +} + +shared_ptr<Shard> ShardRegistry::getShardForHostNoReload(const HostAndPort& host) { + return _data.findByHostAndPort(host); +} + +shared_ptr<Shard> ShardRegistry::getConfigShard() const { + auto shard = _data.getConfigShard(); + invariant(shard); + return shard; +} + +unique_ptr<Shard> ShardRegistry::createConnection(const ConnectionString& connStr) const { + return _shardFactory->createUniqueShard("<unnamed>", connStr); +} + +shared_ptr<Shard> ShardRegistry::lookupRSName(const string& name) const { + return _data.findByRSName(name); +} + +void ShardRegistry::getAllShardIds(vector<ShardId>* all) const { + std::set<string> seen; + _data.getAllShardIds(seen); + all->assign(seen.begin(), seen.end()); +} + +void ShardRegistry::toBSON(BSONObjBuilder* result) const { + _data.toBSON(result); +} + +void ShardRegistry::updateReplSetHosts(const ConnectionString& newConnString) { + invariant(newConnString.type() == ConnectionString::SET || + newConnString.type() == ConnectionString::CUSTOM); // For dbtests + + _data.rebuildShardIfExists(newConnString, _shardFactory.get()); } bool ShardRegistry::reload(OperationContext* txn) { - stdx::unique_lock<stdx::mutex> lk(_mutex); + stdx::unique_lock<stdx::mutex> reloadLock(_reloadMutex); if (_reloadState == ReloadState::Reloading) { // Another thread is already in the process of reloading so no need to do duplicate work. @@ -93,7 +143,7 @@ bool ShardRegistry::reload(OperationContext* txn) { // simultaneously because there is no good way to determine which of the threads has the // more recent version of the data. do { - _inReloadCV.wait(lk); + _inReloadCV.wait(reloadLock); } while (_reloadState == ReloadState::Reloading); if (_reloadState == ReloadState::Idle) { @@ -104,17 +154,33 @@ bool ShardRegistry::reload(OperationContext* txn) { } _reloadState = ReloadState::Reloading; - lk.unlock(); + reloadLock.unlock(); auto nextReloadState = ReloadState::Failed; + auto failGuard = MakeGuard([&] { - if (!lk.owns_lock()) { - lk.lock(); + if (!reloadLock.owns_lock()) { + reloadLock.lock(); } _reloadState = nextReloadState; _inReloadCV.notify_all(); }); + + ShardRegistryData newData(txn, _shardFactory.get()); + newData.addConfigShard(_data.getConfigShard()); + _data.swap(newData); + + nextReloadState = ReloadState::Idle; + return true; +} + +////////////// ShardRegistryData ////////////////// +ShardRegistryData::ShardRegistryData(OperationContext* txn, ShardFactory* shardFactory) { + _init(txn, shardFactory); +} + +void ShardRegistryData::_init(OperationContext* txn, ShardFactory* shardFactory) { auto shardsStatus = grid.catalogManager(txn)->getAllShards(txn); if (!shardsStatus.isOK()) { @@ -147,129 +213,60 @@ bool ShardRegistry::reload(OperationContext* txn) { shardsInfo.push_back(std::make_tuple(shardType.getName(), shardHostStatus.getValue())); } - lk.lock(); - - _lookup.clear(); - _rsLookup.clear(); - _hostLookup.clear(); - - _addConfigShard_inlock(); - for (auto& shardInfo : shardsInfo) { - // Skip the config host even if there is one left over from legacy installations. The - // config host is installed manually from the catalog manager data. if (std::get<0>(shardInfo) == "config") { continue; } - _addShard_inlock(std::move(std::get<0>(shardInfo)), std::move(std::get<1>(shardInfo))); - } + auto shard = shardFactory->createShard(std::move(std::get<0>(shardInfo)), + std::move(std::get<1>(shardInfo))); - nextReloadState = ReloadState::Idle; - return true; + _addShard_inlock(std::move(shard)); + } } -void ShardRegistry::rebuildConfigShard() { +void ShardRegistryData::swap(ShardRegistryData& other) { stdx::lock_guard<stdx::mutex> lk(_mutex); - _addConfigShard_inlock(); + _lookup.swap(other._lookup); + _rsLookup.swap(other._rsLookup); + _hostLookup.swap(other._hostLookup); + _configShard.swap(other._configShard); } -shared_ptr<Shard> ShardRegistry::getShard(OperationContext* txn, const ShardId& shardId) { - shared_ptr<Shard> shard = _findUsingLookUp(shardId); - if (shard) { - return shard; - } - - // If we can't find the shard, we might just need to reload the cache - bool didReload = reload(txn); - - shard = _findUsingLookUp(shardId); - - if (shard || didReload) { - return shard; - } - - reload(txn); - return _findUsingLookUp(shardId); -} - -shared_ptr<Shard> ShardRegistry::getShardNoReload(const ShardId& shardId) { - return _findUsingLookUp(shardId); +shared_ptr<Shard> ShardRegistryData::getConfigShard() const { + stdx::lock_guard<stdx::mutex> lk(_mutex); + return _configShard; } -shared_ptr<Shard> ShardRegistry::getShardForHostNoReload(const HostAndPort& host) { +void ShardRegistryData::addConfigShard(std::shared_ptr<Shard> shard) { stdx::lock_guard<stdx::mutex> lk(_mutex); - return mapFindWithDefault(_hostLookup, host); + _configShard = shard; + _addShard_inlock(shard); } -shared_ptr<Shard> ShardRegistry::getConfigShard() { - shared_ptr<Shard> shard = _findUsingLookUp("config"); - invariant(shard); - return shard; -} -unique_ptr<Shard> ShardRegistry::createConnection(const ConnectionString& connStr) const { - return _shardFactory->createUniqueShard("<unnamed>", connStr); +shared_ptr<Shard> ShardRegistryData::findByRSName(const string& name) const { + stdx::lock_guard<stdx::mutex> lk(_mutex); + auto i = _rsLookup.find(name); + return (i != _rsLookup.end()) ? i->second : nullptr; } -shared_ptr<Shard> ShardRegistry::lookupRSName(const string& name) const { +shared_ptr<Shard> ShardRegistryData::findByHostAndPort(const HostAndPort& hostAndPort) const { stdx::lock_guard<stdx::mutex> lk(_mutex); - ShardMap::const_iterator i = _rsLookup.find(name); - - return (i == _rsLookup.end()) ? nullptr : i->second; + return mapFindWithDefault(_hostLookup, hostAndPort); } -void ShardRegistry::remove(const ShardId& id) { +shared_ptr<Shard> ShardRegistryData::findByShardId(const ShardId& shardId) const { stdx::lock_guard<stdx::mutex> lk(_mutex); - - set<string> entriesToRemove; - for (const auto& i : _lookup) { - shared_ptr<Shard> s = i.second; - if (s->getId() == id) { - entriesToRemove.insert(i.first); - ConnectionString connStr = s->getConnString(); - for (const auto& host : connStr.getServers()) { - entriesToRemove.insert(host.toString()); - _hostLookup.erase(host); - } - } - } - for (const auto& entry : entriesToRemove) { - _lookup.erase(entry); - } - - for (ShardMap::iterator i = _rsLookup.begin(); i != _rsLookup.end();) { - shared_ptr<Shard> s = i->second; - if (s->getId() == id) { - _rsLookup.erase(i++); - } else { - ++i; - } - } - - shardConnectionPool.removeHost(id); - ReplicaSetMonitor::remove(id); + return _findByShardId_inlock(shardId); } -void ShardRegistry::getAllShardIds(vector<ShardId>* all) const { - std::set<string> seen; - - { - stdx::lock_guard<stdx::mutex> lk(_mutex); - for (ShardMap::const_iterator i = _lookup.begin(); i != _lookup.end(); ++i) { - const shared_ptr<Shard>& s = i->second; - if (s->getId() == "config") { - continue; - } - - seen.insert(s->getId()); - } - } - - all->assign(seen.begin(), seen.end()); +shared_ptr<Shard> ShardRegistryData::_findByShardId_inlock(const ShardId& shardId) const { + auto i = _lookup.find(shardId); + return (i != _lookup.end()) ? i->second : nullptr; } -void ShardRegistry::toBSON(BSONObjBuilder* result) { +void ShardRegistryData::toBSON(BSONObjBuilder* result) const { // Need to copy, then sort by shardId. std::vector<std::pair<ShardId, std::string>> shards; { @@ -288,28 +285,58 @@ void ShardRegistry::toBSON(BSONObjBuilder* result) { } } -void ShardRegistry::_addConfigShard_inlock() { - _addShard_inlock("config", _configServerCS); +void ShardRegistryData::getAllShardIds(std::set<string>& seen) const { + stdx::lock_guard<stdx::mutex> lk(_mutex); + for (auto i = _lookup.begin(); i != _lookup.end(); ++i) { + const auto& s = i->second; + if (s->getId() == "config") { + continue; + } + seen.insert(s->getId()); + } } -void ShardRegistry::updateReplSetHosts(const ConnectionString& newConnString) { - invariant(newConnString.type() == ConnectionString::SET || - newConnString.type() == ConnectionString::CUSTOM); // For dbtests - +void ShardRegistryData::addShard(const std::shared_ptr<Shard>& shard) { stdx::lock_guard<stdx::mutex> lk(_mutex); - ShardMap::const_iterator i = _rsLookup.find(newConnString.getSetName()); - if (i == _rsLookup.end()) + _addShard_inlock(shard); +} + +void ShardRegistryData::rebuildConfigShard(ShardFactory* factory) { + stdx::unique_lock<stdx::mutex> rebuildConfigShardLock(_mutex); + + ConnectionString configConnString = _configShard->originalConnString(); + + _rebuildShard_inlock(configConnString, factory); +} + +void ShardRegistryData::rebuildShardIfExists(const ConnectionString& newConnString, + ShardFactory* factory) { + stdx::unique_lock<stdx::mutex> updateConnStringLock(_mutex); + auto it = _rsLookup.find(newConnString.getSetName()); + if (it == _rsLookup.end()) { return; - auto shard = i->second; + } + + _rebuildShard_inlock(newConnString, factory); +} + + +void ShardRegistryData::_rebuildShard_inlock(const ConnectionString& newConnString, + ShardFactory* factory) { + auto it = _rsLookup.find(newConnString.getSetName()); + invariant(it->second); + auto shard = factory->createShard(it->second->getId(), newConnString); + _addShard_inlock(shard); if (shard->isConfig()) { - _updateConfigServerConnectionString_inlock(newConnString); - } else { - _addShard_inlock(shard->getId(), newConnString); + _configShard = shard; } } -void ShardRegistry::_addShard_inlock(const ShardId& shardId, const ConnectionString& connString) { - auto currentShard = _findUsingLookUp_inlock(shardId); +void ShardRegistryData::_addShard_inlock(const std::shared_ptr<Shard>& shard) { + const ShardId shardId = shard->getId(); + const ConnectionString connString = shard->originalConnString(); + + auto currentShard = _findByShardId_inlock(shardId); if (currentShard) { auto oldConnString = currentShard->originalConnString(); @@ -322,19 +349,16 @@ void ShardRegistry::_addShard_inlock(const ShardId& shardId, const ConnectionStr _lookup.erase(host.toString()); _hostLookup.erase(host); } + _lookup.erase(oldConnString.toString()); } - // TODO: the third argument should pass the bool that will instruct factory to create either - // local or remote shard. - auto shard = _shardFactory->createShard(shardId, connString); - _lookup[shard->getId()] = shard; if (connString.type() == ConnectionString::SET) { _rsLookup[connString.getSetName()] = shard; } else if (connString.type() == ConnectionString::CUSTOM) { // CUSTOM connection strings (ie "$dummy:10000) become DBDirectClient connections which - // always return "localhost" as their resposne to getServerAddress(). This is just for + // always return "localhost" as their response to getServerAddress(). This is just for // making dbtest work. _lookup["localhost"] = shard; _hostLookup[HostAndPort{"localhost"}] = shard; @@ -352,18 +376,4 @@ void ShardRegistry::_addShard_inlock(const ShardId& shardId, const ConnectionStr } } -shared_ptr<Shard> ShardRegistry::_findUsingLookUp(const ShardId& shardId) { - stdx::lock_guard<stdx::mutex> lk(_mutex); - return _findUsingLookUp_inlock(shardId); -} - -shared_ptr<Shard> ShardRegistry::_findUsingLookUp_inlock(const ShardId& shardId) { - ShardMap::iterator it = _lookup.find(shardId); - if (it != _lookup.end()) { - return it->second; - } - - return nullptr; -} - } // namespace mongo diff --git a/src/mongo/s/client/shard_registry.h b/src/mongo/s/client/shard_registry.h index c071c24adb7..94229503057 100644 --- a/src/mongo/s/client/shard_registry.h +++ b/src/mongo/s/client/shard_registry.h @@ -32,6 +32,7 @@ #include <string> #include <unordered_map> #include <vector> +#include <set> #include "mongo/base/disallow_copying.h" #include "mongo/db/jsobj.h" @@ -49,6 +50,85 @@ class ShardFactory; class Shard; class ShardType; +class ShardRegistryData { +public: + ShardRegistryData(OperationContext* txn, ShardFactory* shardFactory); + ShardRegistryData() = default; + ~ShardRegistryData() = default; + + + void swap(ShardRegistryData& other); + + /** + * Creates a shard based on the specified information and puts it into the lookup maps. + */ + void addShard(const std::shared_ptr<Shard>&); + + /** + * Lookup shard by replica set name. Returns nullptr if the name can't be found. + */ + std::shared_ptr<Shard> findByRSName(const std::string& rsName) const; + + /** + * Returns a shared pointer to the shard object with the given shard id. + */ + std::shared_ptr<Shard> findByShardId(const ShardId&) const; + + /** + * Finds the Shard that the mongod listening at this HostAndPort is a member of. + */ + std::shared_ptr<Shard> findByHostAndPort(const HostAndPort&) const; + + /** + * Returns config shard. + */ + std::shared_ptr<Shard> getConfigShard() const; + + /** + * Adds config shard. + */ + void addConfigShard(std::shared_ptr<Shard>); + + void getAllShardIds(std::set<ShardId>& result) const; + void toBSON(BSONObjBuilder* result) const; + /** + * If the shard with same replica set name as in the newConnString already exists then replace + * it with the shard built for the newConnString. + */ + void rebuildShardIfExists(const ConnectionString& newConnString, ShardFactory* factory); + + /** + * Rebuilds config shard. The result is to recreate a ReplicaSetMonitor in the case it does + * not exist. + */ + void rebuildConfigShard(ShardFactory* factory); + +private: + /** + * Reads shards docs from the catalog manager and fills in maps. + */ + void _init(OperationContext* txn, ShardFactory* factory); + + void _addShard_inlock(const std::shared_ptr<Shard>&); + std::shared_ptr<Shard> _findByShardId_inlock(const ShardId&) const; + void _rebuildShard_inlock(const ConnectionString& newConnString, ShardFactory* factory); + + // Protects the lookup maps below. + mutable stdx::mutex _mutex; + using ShardMap = std::unordered_map<ShardId, std::shared_ptr<Shard>>; + + // Map of both shardName -> Shard and hostName -> Shard + ShardMap _lookup; + + // Map from replica set name to shard corresponding to this replica set + ShardMap _rsLookup; + + std::unordered_map<HostAndPort, std::shared_ptr<Shard>> _hostLookup; + + // store configShard separately to always have a reference + std::shared_ptr<Shard> _configShard; +}; + /** * Maintains the set of all shards known to the instance and their connections and exposes * functionality to run commands against shards. All commands which this registry executes are @@ -67,7 +147,7 @@ public: */ ShardRegistry(std::unique_ptr<ShardFactory> shardFactory, ConnectionString configServerCS); - ~ShardRegistry(); + ~ShardRegistry() = default; ConnectionString getConfigServerConnectionString() const; @@ -127,7 +207,7 @@ public: /** * Returns shared pointer to the shard object representing the config servers. */ - std::shared_ptr<Shard> getConfigShard(); + std::shared_ptr<Shard> getConfigShard() const; /** * Instantiates a new detached shard connection, which does not appear in the list of shards @@ -146,37 +226,18 @@ public: */ std::shared_ptr<Shard> lookupRSName(const std::string& name) const; - void remove(const ShardId& id); - void getAllShardIds(std::vector<ShardId>* all) const; - - void toBSON(BSONObjBuilder* result); + void toBSON(BSONObjBuilder* result) const; private: - using ShardMap = std::unordered_map<ShardId, std::shared_ptr<Shard>>; - - /** - * Creates a shard based on the specified information and puts it into the lookup maps. - */ - void _addShard_inlock(const ShardId& shardId, const ConnectionString& connString); - - /** - * Adds the "config" shard (representing the config server) to the shard registry. - */ - void _addConfigShard_inlock(); - - void _updateConfigServerConnectionString_inlock(ConnectionString configServerCS); - - std::shared_ptr<Shard> _findUsingLookUp(const ShardId& shardId); - std::shared_ptr<Shard> _findUsingLookUp_inlock(const ShardId& shardId); - // Factory to create shards. Never changed after startup so safe // to access outside of _mutex. const std::unique_ptr<ShardFactory> _shardFactory; - // Protects the _reloadState, config server connections string, and the lookup maps below. - mutable stdx::mutex _mutex; + ShardRegistryData _data; + // Protects the _reloadState. + mutable stdx::mutex _reloadMutex; stdx::condition_variable _inReloadCV; enum class ReloadState { @@ -186,17 +247,7 @@ private: }; ReloadState _reloadState{ReloadState::Idle}; - - // Config server connection string - ConnectionString _configServerCS; - - // Map of both shardName -> Shard and hostName -> Shard - ShardMap _lookup; - - // Map from replica set name to shard corresponding to this replica set - ShardMap _rsLookup; - - std::unordered_map<HostAndPort, std::shared_ptr<Shard>> _hostLookup; }; + } // namespace mongo diff --git a/src/mongo/s/client/shard_registry_data_test.cpp b/src/mongo/s/client/shard_registry_data_test.cpp new file mode 100644 index 00000000000..39ca223e5d8 --- /dev/null +++ b/src/mongo/s/client/shard_registry_data_test.cpp @@ -0,0 +1,97 @@ +/** + * Copyright (C) 2016 MongoDB Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License, version 3, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the GNU Affero General Public License in all respects + * for all of the code used other than as permitted herein. If you modify + * file(s) with this exception, you may extend this exception to your + * version of the file(s), but you are not obligated to do so. If you do not + * wish to do so, delete this exception statement from your version. If you + * delete this exception statement from all source files in the program, + * then also delete it in the license file. + */ + +#include "mongo/platform/basic.h" + +#include <utility> + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/json.h" +#include "mongo/client/remote_command_targeter_factory_mock.h" +#include "mongo/client/remote_command_targeter_mock.h" +#include "mongo/s/client/shard_factory.h" +#include "mongo/s/client/shard_registry.h" +#include "mongo/s/client/shard_remote.h" +#include "mongo/stdx/memory.h" +#include "mongo/unittest/unittest.h" +#include "mongo/util/time_support.h" + +namespace mongo { +namespace { + +class ShardRegistryDataTest : public mongo::unittest::Test { +public: + ShardFactory* shardFactory() { + return _shardFactory.get(); + } + +private: + void setUp() override { + auto targeterFactory = stdx::make_unique<RemoteCommandTargeterFactoryMock>(); + auto targeterFactoryPtr = targeterFactory.get(); + + ShardFactory::BuilderCallable setBuilder = + [targeterFactoryPtr](const ShardId& shardId, const ConnectionString& connStr) { + return stdx::make_unique<ShardRemote>( + shardId, connStr, targeterFactoryPtr->create(connStr)); + }; + + ShardFactory::BuilderCallable masterBuilder = + [targeterFactoryPtr](const ShardId& shardId, const ConnectionString& connStr) { + return stdx::make_unique<ShardRemote>( + shardId, connStr, targeterFactoryPtr->create(connStr)); + }; + + ShardFactory::BuildersMap buildersMap{ + {ConnectionString::SET, std::move(setBuilder)}, + {ConnectionString::MASTER, std::move(masterBuilder)}, + }; + + _shardFactory = std::move( + stdx::make_unique<ShardFactory>(std::move(buildersMap), std::move(targeterFactory))); + } + + void tearDown() override {} + + std::unique_ptr<ShardFactory> _shardFactory; +}; + + +TEST_F(ShardRegistryDataTest, AddConfigShard) { + ConnectionString configCS("rs/dummy1:1234,dummy2:2345,dummy3:3456", ConnectionString::SET); + auto configShard = shardFactory()->createShard("config", configCS); + + ShardRegistryData data; + data.addConfigShard(configShard); + + ASSERT_EQUALS(configCS.toString(), data.getConfigShard()->originalConnString().toString()); +} + +} // unnamed namespace +} // namespace mongo |