diff options
author | Kaloian Manassiev <kaloian.manassiev@mongodb.com> | 2017-02-06 10:13:39 -0500 |
---|---|---|
committer | Kaloian Manassiev <kaloian.manassiev@mongodb.com> | 2017-02-06 18:10:52 -0500 |
commit | 027b8038e23a6e32d1cd35bb27f5f6286a813d86 (patch) | |
tree | 99feeea18ff5a5019405587423ac3cea4c190561 /src/mongo/s | |
parent | 5a35a6c425b815ecf7795d8e2706b9f3e82c7ce6 (diff) | |
download | mongo-027b8038e23a6e32d1cd35bb27f5f6286a813d86.tar.gz |
SERVER-27382 Remove usages of DBConfig::reload/getChunkManagerOrPrimary from legacy code paths
This reverts commit c5f52fb3265ef68ffe2741f88758082dda393a4b.
Diffstat (limited to 'src/mongo/s')
-rw-r--r-- | src/mongo/s/catalog/catalog_cache.cpp | 20 | ||||
-rw-r--r-- | src/mongo/s/catalog/catalog_cache.h | 27 | ||||
-rw-r--r-- | src/mongo/s/chunk_manager.cpp | 3 | ||||
-rw-r--r-- | src/mongo/s/client/version_manager.cpp | 70 | ||||
-rw-r--r-- | src/mongo/s/client/version_manager.h | 1 | ||||
-rw-r--r-- | src/mongo/s/commands/cluster_commands_common.cpp | 24 | ||||
-rw-r--r-- | src/mongo/s/commands/cluster_map_reduce_cmd.cpp | 2 | ||||
-rw-r--r-- | src/mongo/s/commands/commands_public.cpp | 2 | ||||
-rw-r--r-- | src/mongo/s/commands/strategy.cpp | 14 | ||||
-rw-r--r-- | src/mongo/s/sharding_raii.cpp | 40 | ||||
-rw-r--r-- | src/mongo/s/sharding_raii.h | 37 |
11 files changed, 152 insertions, 88 deletions
diff --git a/src/mongo/s/catalog/catalog_cache.cpp b/src/mongo/s/catalog/catalog_cache.cpp index cde72edbc46..b418977f8b2 100644 --- a/src/mongo/s/catalog/catalog_cache.cpp +++ b/src/mongo/s/catalog/catalog_cache.cpp @@ -41,11 +41,12 @@ namespace mongo { using std::shared_ptr; using std::string; +CatalogCache::CatalogCache() = default; -CatalogCache::CatalogCache() {} +CatalogCache::~CatalogCache() = default; -StatusWith<shared_ptr<DBConfig>> CatalogCache::getDatabase(OperationContext* txn, - const string& dbName) { +StatusWith<std::shared_ptr<DBConfig>> CatalogCache::getDatabase(OperationContext* txn, + StringData dbName) { stdx::lock_guard<stdx::mutex> guard(_mutex); ShardedDatabasesMap::iterator it = _databases.find(dbName); @@ -54,25 +55,26 @@ StatusWith<shared_ptr<DBConfig>> CatalogCache::getDatabase(OperationContext* txn } // Need to load from the store - auto status = Grid::get(txn)->catalogClient(txn)->getDatabase(txn, dbName); + auto status = Grid::get(txn)->catalogClient(txn)->getDatabase(txn, dbName.toString()); if (!status.isOK()) { return status.getStatus(); } - const auto dbOpTimePair = status.getValue(); - shared_ptr<DBConfig> db = std::make_shared<DBConfig>(dbOpTimePair.value, dbOpTimePair.opTime); + const auto& dbOpTimePair = status.getValue(); + auto db = std::make_shared<DBConfig>(dbOpTimePair.value, dbOpTimePair.opTime); try { db->load(txn); } catch (const DBException& excep) { return excep.toStatus(); } - invariant(_databases.insert(std::make_pair(dbName, db)).second); + auto emplaceResult = _databases.try_emplace(dbName, std::move(db)); + invariant(emplaceResult.second); - return db; + return emplaceResult.first->second; } -void CatalogCache::invalidate(const string& dbName) { +void CatalogCache::invalidate(StringData dbName) { stdx::lock_guard<stdx::mutex> guard(_mutex); ShardedDatabasesMap::iterator it = _databases.find(dbName); diff --git a/src/mongo/s/catalog/catalog_cache.h b/src/mongo/s/catalog/catalog_cache.h index 5d87d89aac9..8d30c1aebf0 100644 --- a/src/mongo/s/catalog/catalog_cache.h +++ b/src/mongo/s/catalog/catalog_cache.h @@ -28,12 +28,12 @@ #pragma once -#include <map> #include <memory> -#include <string> #include "mongo/base/disallow_copying.h" +#include "mongo/base/string_data.h" #include "mongo/stdx/mutex.h" +#include "mongo/util/string_map.h" namespace mongo { @@ -42,7 +42,6 @@ class OperationContext; template <typename T> class StatusWith; - /** * This is the root of the "read-only" hierarchy of cached catalog metadata. It is read only * in the sense that it only reads from the persistent store, but never writes to it. Instead @@ -53,24 +52,23 @@ class CatalogCache { public: CatalogCache(); + ~CatalogCache(); /** - * Retrieves the cached metadata for the specified database. The returned value is still - * owned by the cache and it should not be cached elsewhere, but instead only used as a - * local variable. The reason for this is so that if the cache gets invalidated, the caller - * does not miss getting the most up-to-date value. + * Retrieves the cached metadata for the specified database. The returned value is still owned + * by the cache and should not be kept elsewhere. I.e., it should only be used as a local + * variable. The reason for this is so that if the cache gets invalidated, the caller does not + * miss getting the most up-to-date value. * - * @param dbname The name of the database (must not contain dots, etc). - * @return The database if it exists, NULL otherwise. + * Returns the database cache entry if the database exists or a failed status otherwise. */ - StatusWith<std::shared_ptr<DBConfig>> getDatabase(OperationContext* txn, - const std::string& dbName); + StatusWith<std::shared_ptr<DBConfig>> getDatabase(OperationContext* txn, StringData dbName); /** * Removes the database information for the specified name from the cache, so that the * next time getDatabase is called, it will be reloaded. */ - void invalidate(const std::string& dbName); + void invalidate(StringData dbName); /** * Purges all cached database information, which will cause the data to be reloaded again. @@ -78,10 +76,11 @@ public: void invalidateAll(); private: - typedef std::map<std::string, std::shared_ptr<DBConfig>> ShardedDatabasesMap; + using ShardedDatabasesMap = StringMap<std::shared_ptr<DBConfig>>; - // Databases catalog map and mutex to protect it + // Mutex to serialize access to the structures below stdx::mutex _mutex; + ShardedDatabasesMap _databases; }; diff --git a/src/mongo/s/chunk_manager.cpp b/src/mongo/s/chunk_manager.cpp index 324be082da5..3dc01a59dba 100644 --- a/src/mongo/s/chunk_manager.cpp +++ b/src/mongo/s/chunk_manager.cpp @@ -518,8 +518,7 @@ StatusWith<shared_ptr<Chunk>> ChunkManager::findIntersectingChunk(OperationConte // Proactively force a reload on the chunk manager in case it somehow got inconsistent const NamespaceString nss(_ns); - auto config = - uassertStatusOK(Grid::get(txn)->catalogCache()->getDatabase(txn, nss.db().toString())); + auto config = uassertStatusOK(Grid::get(txn)->catalogCache()->getDatabase(txn, nss.db())); config->getChunkManagerIfExists(txn, nss.ns(), true); msgasserted(13141, "Chunk map pointed to incorrect chunk"); diff --git a/src/mongo/s/client/version_manager.cpp b/src/mongo/s/client/version_manager.cpp index 3cb09cc0db6..c82e9afa10d 100644 --- a/src/mongo/s/client/version_manager.cpp +++ b/src/mongo/s/client/version_manager.cpp @@ -36,14 +36,13 @@ #include "mongo/db/namespace_string.h" #include "mongo/s/catalog/catalog_cache.h" #include "mongo/s/catalog/sharding_catalog_client.h" -#include "mongo/s/chunk_manager.h" #include "mongo/s/chunk_version.h" #include "mongo/s/client/shard_connection.h" #include "mongo/s/client/shard_registry.h" -#include "mongo/s/config.h" #include "mongo/s/grid.h" #include "mongo/s/mongos_options.h" #include "mongo/s/set_shard_version_request.h" +#include "mongo/s/sharding_raii.h" #include "mongo/s/stale_exception.h" #include "mongo/util/log.h" @@ -253,24 +252,26 @@ bool checkShardVersion(OperationContext* txn, return initShardVersionEmptyNS(txn, conn_in); } - auto status = grid.catalogCache()->getDatabase(txn, nsToDatabase(ns)); - if (!status.isOK()) { - return false; - } - DBClientBase* const conn = getVersionable(conn_in); verify(conn); // errors thrown above - shared_ptr<DBConfig> conf = status.getValue(); + const NamespaceString nss(ns); if (authoritative) { - conf->getChunkManagerIfExists(txn, ns, true); + ScopedChunkManager::refreshAndGet(txn, nss); } - shared_ptr<Shard> primary; - shared_ptr<ChunkManager> manager; + auto scopedCMStatus = ScopedChunkManager::get(txn, nss); + + if (!scopedCMStatus.isOK()) { + return false; + } - conf->getChunkManagerOrPrimary(txn, ns, manager, primary); + const auto& scopedCM = scopedCMStatus.getValue(); + + auto conf = scopedCM.db(); + const auto manager = scopedCM.cm(); + const auto primary = scopedCM.primary(); unsigned long long officialSequenceNumber = 0; @@ -282,7 +283,9 @@ bool checkShardVersion(OperationContext* txn, return false; } - const auto shard = grid.shardRegistry()->getShardForHostNoReload( + const auto shardRegistry = Grid::get(txn)->shardRegistry(); + + const auto shard = shardRegistry->getShardForHostNoReload( uassertStatusOK(HostAndPort::parse(conn->getServerAddress()))); uassert(ErrorCodes::ShardNotFound, str::stream() << conn->getServerAddress() << " is not recognized as a shard", @@ -311,9 +314,8 @@ bool checkShardVersion(OperationContext* txn, throw SendStaleConfigException(ns, msg, refVersion, currentVersion); } } else if (refManager) { - string msg(str::stream() << "not sharded (" - << ((manager.get() == 0) ? string("<none>") : str::stream() - << manager->getSequenceNumber()) + string msg(str::stream() << "not sharded (" << (!manager ? string("<none>") : str::stream() + << manager->getSequenceNumber()) << ") but has reference manager (" << refManager->getSequenceNumber() << ") " @@ -351,7 +353,7 @@ bool checkShardVersion(OperationContext* txn, if (setShardVersion(txn, conn, ns, - grid.shardRegistry()->getConfigServerConnectionString(), + shardRegistry->getConfigServerConnectionString(), version, manager.get(), authoritative, @@ -382,12 +384,10 @@ bool checkShardVersion(OperationContext* txn, warning() << "reloading full configuration for " << conf->name() << ", connection state indicates significant version changes"; - // reload db - conf->reload(txn); - } else { - // reload config - conf->getChunkManager(txn, ns, true); + Grid::get(txn)->catalogCache()->invalidate(nss.db()); } + + conf->getChunkManager(txn, nss.ns(), true); } const int maxNumTries = 7; @@ -426,32 +426,6 @@ bool VersionManager::isVersionableCB(DBClientBase* conn) { return conn->type() == ConnectionString::MASTER || conn->type() == ConnectionString::SET; } -bool VersionManager::forceRemoteCheckShardVersionCB(OperationContext* txn, const string& ns) { - const NamespaceString nss(ns); - - // This will force the database catalog entry to be reloaded - grid.catalogCache()->invalidate(nss.db().toString()); - - auto status = grid.catalogCache()->getDatabase(txn, nss.db().toString()); - if (!status.isOK()) { - return false; - } - - shared_ptr<DBConfig> conf = status.getValue(); - - // If we don't have a collection, don't refresh the chunk manager - if (nsGetCollection(ns).size() == 0) { - return false; - } - - auto manager = conf->getChunkManagerIfExists(txn, ns, true, true); - if (!manager) { - return false; - } - - return true; -} - bool VersionManager::checkShardVersionCB(OperationContext* txn, DBClientBase* conn_in, const string& ns, diff --git a/src/mongo/s/client/version_manager.h b/src/mongo/s/client/version_manager.h index f03fb4f34c8..8c02f12161e 100644 --- a/src/mongo/s/client/version_manager.h +++ b/src/mongo/s/client/version_manager.h @@ -42,7 +42,6 @@ public: VersionManager() {} bool isVersionableCB(DBClientBase*); - bool forceRemoteCheckShardVersionCB(OperationContext* txn, const std::string&); bool checkShardVersionCB(OperationContext*, DBClientBase*, const std::string&, bool, int); bool checkShardVersionCB(OperationContext*, ShardConnection*, bool, int); void resetShardVersionCB(DBClientBase*); diff --git a/src/mongo/s/commands/cluster_commands_common.cpp b/src/mongo/s/commands/cluster_commands_common.cpp index 8a309d3d31b..020bac131b3 100644 --- a/src/mongo/s/commands/cluster_commands_common.cpp +++ b/src/mongo/s/commands/cluster_commands_common.cpp @@ -35,10 +35,12 @@ #include "mongo/client/parallel.h" #include "mongo/db/commands.h" #include "mongo/db/query/cursor_response.h" +#include "mongo/s/catalog/catalog_cache.h" #include "mongo/s/catalog/type_collection.h" #include "mongo/s/client/shard_connection.h" #include "mongo/s/client/version_manager.h" #include "mongo/s/grid.h" +#include "mongo/s/sharding_raii.h" #include "mongo/s/stale_exception.h" #include "mongo/util/log.h" @@ -47,6 +49,26 @@ namespace mongo { using std::shared_ptr; using std::string; +namespace { + +bool forceRemoteCheckShardVersionCB(OperationContext* txn, const string& ns) { + const NamespaceString nss(ns); + + // This will force the database catalog entry to be reloaded + Grid::get(txn)->catalogCache()->invalidate(nss.db()); + + auto scopedCMStatus = ScopedChunkManager::get(txn, nss); + if (!scopedCMStatus.isOK()) { + return false; + } + + const auto& scopedCM = scopedCMStatus.getValue(); + + return scopedCM.cm() != nullptr; +} + +} // namespace + Future::CommandResult::CommandResult(const string& server, const string& db, const BSONObj& cmd, @@ -133,7 +155,7 @@ bool Future::CommandResult::join(OperationContext* txn, int maxRetries) { } if (i >= maxRetries / 2) { - if (!versionManager.forceRemoteCheckShardVersionCB(txn, staleNS)) { + if (!forceRemoteCheckShardVersionCB(txn, staleNS)) { error() << "Future::spawnCommand (part 2) no config detected" << causedBy(redact(e)); throw e; diff --git a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp index 232aefa5bd2..b9c8f8161b2 100644 --- a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp +++ b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp @@ -477,7 +477,7 @@ public: // If the database has sharding already enabled, we can ignore the error if (status.isOK()) { // Invalidate the output database so it gets reloaded on the next fetch attempt - Grid::get(txn)->catalogCache()->invalidate(outputCollNss.db().toString()); + Grid::get(txn)->catalogCache()->invalidate(outputCollNss.db()); } else if (status != ErrorCodes::AlreadyInitialized) { uassertStatusOK(status); } diff --git a/src/mongo/s/commands/commands_public.cpp b/src/mongo/s/commands/commands_public.cpp index 943f831dfa6..5957cea46ff 100644 --- a/src/mongo/s/commands/commands_public.cpp +++ b/src/mongo/s/commands/commands_public.cpp @@ -1012,7 +1012,7 @@ public: // Note that this implementation will not handle targeting retries and fails when the // sharding metadata is too stale - auto status = Grid::get(txn)->catalogCache()->getDatabase(txn, nss.db().toString()); + auto status = Grid::get(txn)->catalogCache()->getDatabase(txn, nss.db()); if (!status.isOK()) { return Status(status.getStatus().code(), str::stream() << "Passthrough command failed: " << command.toString() diff --git a/src/mongo/s/commands/strategy.cpp b/src/mongo/s/commands/strategy.cpp index 411fae3358f..08fde3b67a4 100644 --- a/src/mongo/s/commands/strategy.cpp +++ b/src/mongo/s/commands/strategy.cpp @@ -1,4 +1,4 @@ -/* +/** * Copyright (C) 2010 10gen Inc. * * This program is free software: you can redistribute it and/or modify @@ -57,7 +57,6 @@ #include "mongo/s/chunk_manager.h" #include "mongo/s/chunk_version.h" #include "mongo/s/client/shard_registry.h" -#include "mongo/s/client/version_manager.h" #include "mongo/s/commands/cluster_explain.h" #include "mongo/s/grid.h" #include "mongo/s/query/cluster_cursor_manager.h" @@ -327,10 +326,11 @@ void Strategy::clientCommandOp(OperationContext* txn, const NamespaceString& nss ShardConnection::checkMyConnectionVersions(txn, staleNS); if (loops < 4) { - if (!versionManager.forceRemoteCheckShardVersionCB(txn, staleNS)) { - LOG(1) << "Database does not exist or collection no longer sharded after a " - "StaleConfigException."; - } + // This throws out the entire database cache entry in response to + // StaleConfigException instead of just the collection which encountered it. There + // is no good reason for it other than the lack of lower-granularity cache + // invalidation. + Grid::get(txn)->catalogCache()->invalidate(NamespaceString(staleNS).db()); } } catch (const DBException& e) { OpQueryReplyBuilder reply; @@ -387,7 +387,7 @@ void Strategy::getMore(OperationContext* txn, const NamespaceString& nss, DbMess // TODO: Handle stale config exceptions here from coll being dropped or sharded during op for // now has same semantics as legacy request. - auto statusGetDb = Grid::get(txn)->catalogCache()->getDatabase(txn, nss.db().toString()); + auto statusGetDb = Grid::get(txn)->catalogCache()->getDatabase(txn, nss.db()); if (statusGetDb == ErrorCodes::NamespaceNotFound) { replyToQuery(ResultFlag_CursorNotFound, client->session(), dbm->msg(), 0, 0, 0); return; diff --git a/src/mongo/s/sharding_raii.cpp b/src/mongo/s/sharding_raii.cpp index 87f1127257d..57b122fbce5 100644 --- a/src/mongo/s/sharding_raii.cpp +++ b/src/mongo/s/sharding_raii.cpp @@ -34,6 +34,7 @@ #include "mongo/s/catalog/catalog_cache.h" #include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/chunk_manager.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" namespace mongo { @@ -81,8 +82,47 @@ StatusWith<ScopedShardDatabase> ScopedShardDatabase::getOrCreate(OperationContex ScopedChunkManager::ScopedChunkManager(ScopedShardDatabase db, std::shared_ptr<ChunkManager> cm) : _db(std::move(db)), _cm(std::move(cm)) {} +ScopedChunkManager::ScopedChunkManager(ScopedShardDatabase db, std::shared_ptr<Shard> primary) + : _db(std::move(db)), _primary(std::move(primary)) {} + ScopedChunkManager::~ScopedChunkManager() = default; +StatusWith<ScopedChunkManager> ScopedChunkManager::get(OperationContext* txn, + const NamespaceString& nss) { + auto scopedDbStatus = ScopedShardDatabase::getExisting(txn, nss.db()); + if (!scopedDbStatus.isOK()) { + return scopedDbStatus.getStatus(); + } + + auto scopedDb = std::move(scopedDbStatus.getValue()); + + auto cm = scopedDb.db()->getChunkManagerIfExists(txn, nss.ns()); + if (cm) { + return {ScopedChunkManager(std::move(scopedDb), std::move(cm))}; + } + + auto shardStatus = + Grid::get(txn)->shardRegistry()->getShard(txn, scopedDb.db()->getPrimaryId()); + if (!shardStatus.isOK()) { + return {ErrorCodes::fromInt(40371), + str::stream() << "The primary shard for collection " << nss.ns() + << " could not be loaded due to error " + << shardStatus.getStatus().toString()}; + } + + return {ScopedChunkManager(std::move(scopedDb), std::move(shardStatus.getValue()))}; +} + +StatusWith<ScopedChunkManager> ScopedChunkManager::getOrCreate(OperationContext* txn, + const NamespaceString& nss) { + auto scopedDbStatus = ScopedShardDatabase::getOrCreate(txn, nss.db()); + if (!scopedDbStatus.isOK()) { + return scopedDbStatus.getStatus(); + } + + return ScopedChunkManager::get(txn, nss); +} + StatusWith<ScopedChunkManager> ScopedChunkManager::refreshAndGet(OperationContext* txn, const NamespaceString& nss) { auto scopedDbStatus = ScopedShardDatabase::getExisting(txn, nss.db()); diff --git a/src/mongo/s/sharding_raii.h b/src/mongo/s/sharding_raii.h index 725c8b86ffe..1ec3da85b14 100644 --- a/src/mongo/s/sharding_raii.h +++ b/src/mongo/s/sharding_raii.h @@ -90,6 +90,23 @@ public: ~ScopedChunkManager(); /** + * If the specified namespace is sharded, returns a ScopedChunkManager initialized with that + * collection's routing information. If it is not, the object returned is initialized with the + * database primary node on which the unsharded collection must reside. + * + * Returns NamespaceNotFound if the database does not exist, or any other error indicating + * problem communicating with the config server. + */ + static StatusWith<ScopedChunkManager> get(OperationContext* txn, const NamespaceString& nss); + + /** + * If the database holding the specified namespace does not exist, creates it and then behaves + * like the 'get' method above. + */ + static StatusWith<ScopedChunkManager> getOrCreate(OperationContext* txn, + const NamespaceString& nss); + + /** * If the specified database and collection do not exist in the cache, tries to load them from * the config server and returns a reference. If they are already in the cache, makes a call to * the config server to check if there are any incremental updates to the collection chunk @@ -107,20 +124,32 @@ public: } /** - * Returns the underlying chunk manager for which we hold reference. + * If the collection is sharded, returns a chunk manager for it. Otherwise, nullptr. */ - ChunkManager* cm() const { - return _cm.get(); + std::shared_ptr<ChunkManager> cm() const { + return _cm; + } + + /** + * If the collection is not sharded, returns its primary shard. Otherwise, nullptr. + */ + std::shared_ptr<Shard> primary() const { + return _primary; } private: ScopedChunkManager(ScopedShardDatabase db, std::shared_ptr<ChunkManager> cm); + ScopedChunkManager(ScopedShardDatabase db, std::shared_ptr<Shard> primary); + // Scoped reference to the owning database. ScopedShardDatabase _db; - // Reference to the corresponding chunk manager. Never null. + // Reference to the corresponding chunk manager (if sharded) or null std::shared_ptr<ChunkManager> _cm; + + // Reference to the primary of the database (if not sharded) or null + std::shared_ptr<Shard> _primary; }; } // namespace mongo |