diff options
author | Kaloian Manassiev <kaloian.manassiev@mongodb.com> | 2017-02-02 15:25:55 -0500 |
---|---|---|
committer | Kaloian Manassiev <kaloian.manassiev@mongodb.com> | 2017-02-03 14:59:16 -0500 |
commit | 58292592979ff9277ec34390469a1541315104c0 (patch) | |
tree | ab55b7731e06ade6bdd1de4f4fe81ae3ee98f06d /src/mongo | |
parent | a3f1f6f45377c44e3e2504fb8ebb26d789e02e61 (diff) | |
download | mongo-58292592979ff9277ec34390469a1541315104c0.tar.gz |
SERVER-27382 Remove usages of DBConfig::reload/getChunkManagerOrPrimary from legacy code paths
Diffstat (limited to 'src/mongo')
-rw-r--r-- | src/mongo/client/parallel.cpp | 32 | ||||
-rw-r--r-- | src/mongo/db/s/balancer/balancer.cpp | 17 | ||||
-rw-r--r-- | src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp | 16 | ||||
-rw-r--r-- | src/mongo/db/s/balancer/migration_manager.cpp | 11 | ||||
-rw-r--r-- | src/mongo/s/catalog/catalog_cache.cpp | 20 | ||||
-rw-r--r-- | src/mongo/s/catalog/catalog_cache.h | 27 | ||||
-rw-r--r-- | src/mongo/s/chunk_manager.cpp | 3 | ||||
-rw-r--r-- | src/mongo/s/client/version_manager.cpp | 68 | ||||
-rw-r--r-- | src/mongo/s/client/version_manager.h | 1 | ||||
-rw-r--r-- | src/mongo/s/commands/cluster_commands_common.cpp | 24 | ||||
-rw-r--r-- | src/mongo/s/commands/cluster_map_reduce_cmd.cpp | 2 | ||||
-rw-r--r-- | src/mongo/s/commands/commands_public.cpp | 2 | ||||
-rw-r--r-- | src/mongo/s/commands/strategy.cpp | 14 | ||||
-rw-r--r-- | src/mongo/s/sharding_raii.cpp | 40 | ||||
-rw-r--r-- | src/mongo/s/sharding_raii.h | 37 |
15 files changed, 186 insertions, 128 deletions
diff --git a/src/mongo/client/parallel.cpp b/src/mongo/client/parallel.cpp index 26ebcea0582..6c3b376c73f 100644 --- a/src/mongo/client/parallel.cpp +++ b/src/mongo/client/parallel.cpp @@ -42,10 +42,9 @@ #include "mongo/db/bson/dotted_path_support.h" #include "mongo/db/query/query_request.h" #include "mongo/s/catalog/catalog_cache.h" -#include "mongo/s/chunk_manager.h" #include "mongo/s/client/shard_registry.h" -#include "mongo/s/config.h" #include "mongo/s/grid.h" +#include "mongo/s/sharding_raii.h" #include "mongo/s/stale_exception.h" #include "mongo/util/log.h" #include "mongo/util/net/socket_exception.h" @@ -357,7 +356,7 @@ void ParallelSortClusteredCursor::_handleStaleNS(OperationContext* txn, const NamespaceString& staleNS, bool forceReload, bool fullReload) { - auto status = grid.catalogCache()->getDatabase(txn, staleNS.db().toString()); + auto status = Grid::get(txn)->catalogCache()->getDatabase(txn, staleNS.db()); if (!status.isOK()) { warning() << "cannot reload database info for stale namespace " << staleNS.ns(); return; @@ -397,7 +396,7 @@ void ParallelSortClusteredCursor::setupVersionAndHandleSlaveOk( // Setup conn if (!state->conn) { - const auto shard = uassertStatusOK(grid.shardRegistry()->getShard(txn, shardId)); + const auto shard = uassertStatusOK(Grid::get(txn)->shardRegistry()->getShard(txn, shardId)); state->conn.reset(new ShardConnection(shard->getConnString(), ns.ns(), manager)); } @@ -464,9 +463,6 @@ void ParallelSortClusteredCursor::startInit(OperationContext* txn) { const bool returnPartial = (_qSpec.options() & QueryOption_PartialResults); const NamespaceString nss(!_cInfo.isEmpty() ? _cInfo.versionedNS : _qSpec.ns()); - shared_ptr<ChunkManager> manager; - shared_ptr<Shard> primary; - string prefix; if (MONGO_unlikely(shouldLog(pc))) { if (_totalTries > 0) { @@ -477,19 +473,22 @@ void ParallelSortClusteredCursor::startInit(OperationContext* txn) { } LOG(pc) << prefix << " pcursor over " << _qSpec << " and " << _cInfo; - set<ShardId> shardIds; - string vinfo; + shared_ptr<ChunkManager> manager; + shared_ptr<Shard> primary; { - shared_ptr<DBConfig> config; - - auto status = grid.catalogCache()->getDatabase(txn, nss.db().toString()); - if (status.getStatus().code() != ErrorCodes::NamespaceNotFound) { - config = uassertStatusOK(status); - config->getChunkManagerOrPrimary(txn, nss.ns(), manager, primary); + auto scopedCMStatus = ScopedChunkManager::get(txn, nss); + if (scopedCMStatus != ErrorCodes::NamespaceNotFound) { + uassertStatusOK(scopedCMStatus.getStatus()); + const auto& scopedCM = scopedCMStatus.getValue(); + manager = scopedCM.cm(); + primary = scopedCM.primary(); } } + set<ShardId> shardIds; + string vinfo; + if (manager) { if (MONGO_unlikely(shouldLog(pc))) { vinfo = str::stream() << "[" << manager->getns() << " @ " @@ -949,7 +948,8 @@ void ParallelSortClusteredCursor::finishInit(OperationContext* txn) { _cursors[index].reset(mdata.pcState->cursor.get(), &mdata); { - const auto shard = uassertStatusOK(grid.shardRegistry()->getShard(txn, i->first)); + const auto shard = + uassertStatusOK(Grid::get(txn)->shardRegistry()->getShard(txn, i->first)); _servers.insert(shard->getConnString().toString()); } diff --git a/src/mongo/db/s/balancer/balancer.cpp b/src/mongo/db/s/balancer/balancer.cpp index 45c00bde686..a979624000d 100644 --- a/src/mongo/db/s/balancer/balancer.cpp +++ b/src/mongo/db/s/balancer/balancer.cpp @@ -538,14 +538,13 @@ Status Balancer::_enforceTagRanges(OperationContext* txn) { return scopedCMStatus.getStatus(); } - auto scopedCM = std::move(scopedCMStatus.getValue()); - ChunkManager* const cm = scopedCM.cm(); + const auto& scopedCM = scopedCMStatus.getValue(); auto splitStatus = shardutil::splitChunkAtMultiplePoints(txn, splitInfo.shardId, splitInfo.nss, - cm->getShardKeyPattern(), + scopedCM.cm()->getShardKeyPattern(), splitInfo.collectionVersion, ChunkRange(splitInfo.minKey, splitInfo.maxKey), splitInfo.splitKeys); @@ -613,17 +612,17 @@ int Balancer::_moveChunks(OperationContext* txn, void Balancer::_splitOrMarkJumbo(OperationContext* txn, const NamespaceString& nss, const BSONObj& minKey) { - auto scopedChunkManager = uassertStatusOK(ScopedChunkManager::refreshAndGet(txn, nss)); - ChunkManager* const chunkManager = scopedChunkManager.cm(); + auto scopedCM = uassertStatusOK(ScopedChunkManager::refreshAndGet(txn, nss)); + const auto cm = scopedCM.cm().get(); - auto chunk = chunkManager->findIntersectingChunkWithSimpleCollation(txn, minKey); + auto chunk = cm->findIntersectingChunkWithSimpleCollation(txn, minKey); try { const auto splitPoints = uassertStatusOK(shardutil::selectChunkSplitPoints( txn, chunk->getShardId(), nss, - chunkManager->getShardKeyPattern(), + cm->getShardKeyPattern(), ChunkRange(chunk->getMin(), chunk->getMax()), Grid::get(txn)->getBalancerConfiguration()->getMaxChunkSizeBytes(), boost::none)); @@ -634,8 +633,8 @@ void Balancer::_splitOrMarkJumbo(OperationContext* txn, shardutil::splitChunkAtMultiplePoints(txn, chunk->getShardId(), nss, - chunkManager->getShardKeyPattern(), - chunkManager->getVersion(), + cm->getShardKeyPattern(), + cm->getVersion(), ChunkRange(chunk->getMin(), chunk->getMax()), splitPoints)); } catch (const DBException& ex) { diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp index 2dc66b905a2..5aa83e53909 100644 --- a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp +++ b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp @@ -308,8 +308,8 @@ BalancerChunkSelectionPolicyImpl::selectSpecificChunkToMove(OperationContext* tx return scopedCMStatus.getStatus(); } - auto scopedCM = std::move(scopedCMStatus.getValue()); - ChunkManager* const cm = scopedCM.cm(); + const auto& scopedCM = scopedCMStatus.getValue(); + const auto cm = scopedCM.cm().get(); const auto collInfoStatus = createCollectionDistributionStatus(txn, shardStats, cm); if (!collInfoStatus.isOK()) { @@ -338,8 +338,8 @@ Status BalancerChunkSelectionPolicyImpl::checkMoveAllowed(OperationContext* txn, return scopedCMStatus.getStatus(); } - auto scopedCM = std::move(scopedCMStatus.getValue()); - ChunkManager* const cm = scopedCM.cm(); + const auto& scopedCM = scopedCMStatus.getValue(); + const auto cm = scopedCM.cm().get(); const auto collInfoStatus = createCollectionDistributionStatus(txn, shardStats, cm); if (!collInfoStatus.isOK()) { @@ -371,8 +371,8 @@ StatusWith<SplitInfoVector> BalancerChunkSelectionPolicyImpl::_getSplitCandidate return scopedCMStatus.getStatus(); } - auto scopedCM = std::move(scopedCMStatus.getValue()); - ChunkManager* const cm = scopedCM.cm(); + const auto& scopedCM = scopedCMStatus.getValue(); + const auto cm = scopedCM.cm().get(); const auto& shardKeyPattern = cm->getShardKeyPattern().getKeyPattern(); @@ -425,8 +425,8 @@ StatusWith<MigrateInfoVector> BalancerChunkSelectionPolicyImpl::_getMigrateCandi return scopedCMStatus.getStatus(); } - auto scopedCM = std::move(scopedCMStatus.getValue()); - ChunkManager* const cm = scopedCM.cm(); + const auto& scopedCM = scopedCMStatus.getValue(); + const auto cm = scopedCM.cm().get(); const auto& shardKeyPattern = cm->getShardKeyPattern().getKeyPattern(); diff --git a/src/mongo/db/s/balancer/migration_manager.cpp b/src/mongo/db/s/balancer/migration_manager.cpp index 6253ebd2b25..381f54b0669 100644 --- a/src/mongo/db/s/balancer/migration_manager.cpp +++ b/src/mongo/db/s/balancer/migration_manager.cpp @@ -183,10 +183,9 @@ Status MigrationManager::executeManualMigration( return scopedCMStatus.getStatus(); } - auto scopedCM = std::move(scopedCMStatus.getValue()); - ChunkManager* const cm = scopedCM.cm(); + const auto& scopedCM = scopedCMStatus.getValue(); - auto chunk = cm->findIntersectingChunkWithSimpleCollation(txn, migrateInfo.minKey); + auto chunk = scopedCM.cm()->findIntersectingChunkWithSimpleCollation(txn, migrateInfo.minKey); invariant(chunk); Status commandStatus = _processRemoteCommandResponse( @@ -320,8 +319,7 @@ void MigrationManager::finishRecovery(OperationContext* txn, return; } - auto scopedCM = std::move(scopedCMStatus.getValue()); - ChunkManager* const cm = scopedCM.cm(); + const auto& scopedCM = scopedCMStatus.getValue(); int scheduledMigrations = 0; @@ -331,7 +329,8 @@ void MigrationManager::finishRecovery(OperationContext* txn, auto waitForDelete = migrationType.getWaitForDelete(); migrateInfos.pop_front(); - auto chunk = cm->findIntersectingChunkWithSimpleCollation(txn, migrationInfo.minKey); + auto chunk = + scopedCM.cm()->findIntersectingChunkWithSimpleCollation(txn, migrationInfo.minKey); invariant(chunk); if (chunk->getShardId() != migrationInfo.from) { diff --git a/src/mongo/s/catalog/catalog_cache.cpp b/src/mongo/s/catalog/catalog_cache.cpp index cde72edbc46..b418977f8b2 100644 --- a/src/mongo/s/catalog/catalog_cache.cpp +++ b/src/mongo/s/catalog/catalog_cache.cpp @@ -41,11 +41,12 @@ namespace mongo { using std::shared_ptr; using std::string; +CatalogCache::CatalogCache() = default; -CatalogCache::CatalogCache() {} +CatalogCache::~CatalogCache() = default; -StatusWith<shared_ptr<DBConfig>> CatalogCache::getDatabase(OperationContext* txn, - const string& dbName) { +StatusWith<std::shared_ptr<DBConfig>> CatalogCache::getDatabase(OperationContext* txn, + StringData dbName) { stdx::lock_guard<stdx::mutex> guard(_mutex); ShardedDatabasesMap::iterator it = _databases.find(dbName); @@ -54,25 +55,26 @@ StatusWith<shared_ptr<DBConfig>> CatalogCache::getDatabase(OperationContext* txn } // Need to load from the store - auto status = Grid::get(txn)->catalogClient(txn)->getDatabase(txn, dbName); + auto status = Grid::get(txn)->catalogClient(txn)->getDatabase(txn, dbName.toString()); if (!status.isOK()) { return status.getStatus(); } - const auto dbOpTimePair = status.getValue(); - shared_ptr<DBConfig> db = std::make_shared<DBConfig>(dbOpTimePair.value, dbOpTimePair.opTime); + const auto& dbOpTimePair = status.getValue(); + auto db = std::make_shared<DBConfig>(dbOpTimePair.value, dbOpTimePair.opTime); try { db->load(txn); } catch (const DBException& excep) { return excep.toStatus(); } - invariant(_databases.insert(std::make_pair(dbName, db)).second); + auto emplaceResult = _databases.try_emplace(dbName, std::move(db)); + invariant(emplaceResult.second); - return db; + return emplaceResult.first->second; } -void CatalogCache::invalidate(const string& dbName) { +void CatalogCache::invalidate(StringData dbName) { stdx::lock_guard<stdx::mutex> guard(_mutex); ShardedDatabasesMap::iterator it = _databases.find(dbName); diff --git a/src/mongo/s/catalog/catalog_cache.h b/src/mongo/s/catalog/catalog_cache.h index 5d87d89aac9..8d30c1aebf0 100644 --- a/src/mongo/s/catalog/catalog_cache.h +++ b/src/mongo/s/catalog/catalog_cache.h @@ -28,12 +28,12 @@ #pragma once -#include <map> #include <memory> -#include <string> #include "mongo/base/disallow_copying.h" +#include "mongo/base/string_data.h" #include "mongo/stdx/mutex.h" +#include "mongo/util/string_map.h" namespace mongo { @@ -42,7 +42,6 @@ class OperationContext; template <typename T> class StatusWith; - /** * This is the root of the "read-only" hierarchy of cached catalog metadata. It is read only * in the sense that it only reads from the persistent store, but never writes to it. Instead @@ -53,24 +52,23 @@ class CatalogCache { public: CatalogCache(); + ~CatalogCache(); /** - * Retrieves the cached metadata for the specified database. The returned value is still - * owned by the cache and it should not be cached elsewhere, but instead only used as a - * local variable. The reason for this is so that if the cache gets invalidated, the caller - * does not miss getting the most up-to-date value. + * Retrieves the cached metadata for the specified database. The returned value is still owned + * by the cache and should not be kept elsewhere. I.e., it should only be used as a local + * variable. The reason for this is so that if the cache gets invalidated, the caller does not + * miss getting the most up-to-date value. * - * @param dbname The name of the database (must not contain dots, etc). - * @return The database if it exists, NULL otherwise. + * Returns the database cache entry if the database exists or a failed status otherwise. */ - StatusWith<std::shared_ptr<DBConfig>> getDatabase(OperationContext* txn, - const std::string& dbName); + StatusWith<std::shared_ptr<DBConfig>> getDatabase(OperationContext* txn, StringData dbName); /** * Removes the database information for the specified name from the cache, so that the * next time getDatabase is called, it will be reloaded. */ - void invalidate(const std::string& dbName); + void invalidate(StringData dbName); /** * Purges all cached database information, which will cause the data to be reloaded again. @@ -78,10 +76,11 @@ public: void invalidateAll(); private: - typedef std::map<std::string, std::shared_ptr<DBConfig>> ShardedDatabasesMap; + using ShardedDatabasesMap = StringMap<std::shared_ptr<DBConfig>>; - // Databases catalog map and mutex to protect it + // Mutex to serialize access to the structures below stdx::mutex _mutex; + ShardedDatabasesMap _databases; }; diff --git a/src/mongo/s/chunk_manager.cpp b/src/mongo/s/chunk_manager.cpp index 324be082da5..3dc01a59dba 100644 --- a/src/mongo/s/chunk_manager.cpp +++ b/src/mongo/s/chunk_manager.cpp @@ -518,8 +518,7 @@ StatusWith<shared_ptr<Chunk>> ChunkManager::findIntersectingChunk(OperationConte // Proactively force a reload on the chunk manager in case it somehow got inconsistent const NamespaceString nss(_ns); - auto config = - uassertStatusOK(Grid::get(txn)->catalogCache()->getDatabase(txn, nss.db().toString())); + auto config = uassertStatusOK(Grid::get(txn)->catalogCache()->getDatabase(txn, nss.db())); config->getChunkManagerIfExists(txn, nss.ns(), true); msgasserted(13141, "Chunk map pointed to incorrect chunk"); diff --git a/src/mongo/s/client/version_manager.cpp b/src/mongo/s/client/version_manager.cpp index 3cb09cc0db6..0f50b42a38f 100644 --- a/src/mongo/s/client/version_manager.cpp +++ b/src/mongo/s/client/version_manager.cpp @@ -36,14 +36,13 @@ #include "mongo/db/namespace_string.h" #include "mongo/s/catalog/catalog_cache.h" #include "mongo/s/catalog/sharding_catalog_client.h" -#include "mongo/s/chunk_manager.h" #include "mongo/s/chunk_version.h" #include "mongo/s/client/shard_connection.h" #include "mongo/s/client/shard_registry.h" -#include "mongo/s/config.h" #include "mongo/s/grid.h" #include "mongo/s/mongos_options.h" #include "mongo/s/set_shard_version_request.h" +#include "mongo/s/sharding_raii.h" #include "mongo/s/stale_exception.h" #include "mongo/util/log.h" @@ -253,24 +252,22 @@ bool checkShardVersion(OperationContext* txn, return initShardVersionEmptyNS(txn, conn_in); } - auto status = grid.catalogCache()->getDatabase(txn, nsToDatabase(ns)); - if (!status.isOK()) { - return false; - } - DBClientBase* const conn = getVersionable(conn_in); verify(conn); // errors thrown above - shared_ptr<DBConfig> conf = status.getValue(); + const NamespaceString nss(ns); - if (authoritative) { - conf->getChunkManagerIfExists(txn, ns, true); + auto scopedCMStatus = authoritative ? ScopedChunkManager::refreshAndGet(txn, nss) + : ScopedChunkManager::get(txn, nss); + if (!scopedCMStatus.isOK()) { + return false; } - shared_ptr<Shard> primary; - shared_ptr<ChunkManager> manager; + const auto& scopedCM = scopedCMStatus.getValue(); - conf->getChunkManagerOrPrimary(txn, ns, manager, primary); + auto conf = scopedCM.db(); + const auto manager = scopedCM.cm(); + const auto primary = scopedCM.primary(); unsigned long long officialSequenceNumber = 0; @@ -282,7 +279,9 @@ bool checkShardVersion(OperationContext* txn, return false; } - const auto shard = grid.shardRegistry()->getShardForHostNoReload( + const auto shardRegistry = Grid::get(txn)->shardRegistry(); + + const auto shard = shardRegistry->getShardForHostNoReload( uassertStatusOK(HostAndPort::parse(conn->getServerAddress()))); uassert(ErrorCodes::ShardNotFound, str::stream() << conn->getServerAddress() << " is not recognized as a shard", @@ -311,9 +310,8 @@ bool checkShardVersion(OperationContext* txn, throw SendStaleConfigException(ns, msg, refVersion, currentVersion); } } else if (refManager) { - string msg(str::stream() << "not sharded (" - << ((manager.get() == 0) ? string("<none>") : str::stream() - << manager->getSequenceNumber()) + string msg(str::stream() << "not sharded (" << (!manager ? string("<none>") : str::stream() + << manager->getSequenceNumber()) << ") but has reference manager (" << refManager->getSequenceNumber() << ") " @@ -351,7 +349,7 @@ bool checkShardVersion(OperationContext* txn, if (setShardVersion(txn, conn, ns, - grid.shardRegistry()->getConfigServerConnectionString(), + shardRegistry->getConfigServerConnectionString(), version, manager.get(), authoritative, @@ -382,12 +380,10 @@ bool checkShardVersion(OperationContext* txn, warning() << "reloading full configuration for " << conf->name() << ", connection state indicates significant version changes"; - // reload db - conf->reload(txn); - } else { - // reload config - conf->getChunkManager(txn, ns, true); + Grid::get(txn)->catalogCache()->invalidate(nss.db()); } + + conf->getChunkManager(txn, nss.ns(), true); } const int maxNumTries = 7; @@ -426,32 +422,6 @@ bool VersionManager::isVersionableCB(DBClientBase* conn) { return conn->type() == ConnectionString::MASTER || conn->type() == ConnectionString::SET; } -bool VersionManager::forceRemoteCheckShardVersionCB(OperationContext* txn, const string& ns) { - const NamespaceString nss(ns); - - // This will force the database catalog entry to be reloaded - grid.catalogCache()->invalidate(nss.db().toString()); - - auto status = grid.catalogCache()->getDatabase(txn, nss.db().toString()); - if (!status.isOK()) { - return false; - } - - shared_ptr<DBConfig> conf = status.getValue(); - - // If we don't have a collection, don't refresh the chunk manager - if (nsGetCollection(ns).size() == 0) { - return false; - } - - auto manager = conf->getChunkManagerIfExists(txn, ns, true, true); - if (!manager) { - return false; - } - - return true; -} - bool VersionManager::checkShardVersionCB(OperationContext* txn, DBClientBase* conn_in, const string& ns, diff --git a/src/mongo/s/client/version_manager.h b/src/mongo/s/client/version_manager.h index f03fb4f34c8..8c02f12161e 100644 --- a/src/mongo/s/client/version_manager.h +++ b/src/mongo/s/client/version_manager.h @@ -42,7 +42,6 @@ public: VersionManager() {} bool isVersionableCB(DBClientBase*); - bool forceRemoteCheckShardVersionCB(OperationContext* txn, const std::string&); bool checkShardVersionCB(OperationContext*, DBClientBase*, const std::string&, bool, int); bool checkShardVersionCB(OperationContext*, ShardConnection*, bool, int); void resetShardVersionCB(DBClientBase*); diff --git a/src/mongo/s/commands/cluster_commands_common.cpp b/src/mongo/s/commands/cluster_commands_common.cpp index 8a309d3d31b..020bac131b3 100644 --- a/src/mongo/s/commands/cluster_commands_common.cpp +++ b/src/mongo/s/commands/cluster_commands_common.cpp @@ -35,10 +35,12 @@ #include "mongo/client/parallel.h" #include "mongo/db/commands.h" #include "mongo/db/query/cursor_response.h" +#include "mongo/s/catalog/catalog_cache.h" #include "mongo/s/catalog/type_collection.h" #include "mongo/s/client/shard_connection.h" #include "mongo/s/client/version_manager.h" #include "mongo/s/grid.h" +#include "mongo/s/sharding_raii.h" #include "mongo/s/stale_exception.h" #include "mongo/util/log.h" @@ -47,6 +49,26 @@ namespace mongo { using std::shared_ptr; using std::string; +namespace { + +bool forceRemoteCheckShardVersionCB(OperationContext* txn, const string& ns) { + const NamespaceString nss(ns); + + // This will force the database catalog entry to be reloaded + Grid::get(txn)->catalogCache()->invalidate(nss.db()); + + auto scopedCMStatus = ScopedChunkManager::get(txn, nss); + if (!scopedCMStatus.isOK()) { + return false; + } + + const auto& scopedCM = scopedCMStatus.getValue(); + + return scopedCM.cm() != nullptr; +} + +} // namespace + Future::CommandResult::CommandResult(const string& server, const string& db, const BSONObj& cmd, @@ -133,7 +155,7 @@ bool Future::CommandResult::join(OperationContext* txn, int maxRetries) { } if (i >= maxRetries / 2) { - if (!versionManager.forceRemoteCheckShardVersionCB(txn, staleNS)) { + if (!forceRemoteCheckShardVersionCB(txn, staleNS)) { error() << "Future::spawnCommand (part 2) no config detected" << causedBy(redact(e)); throw e; diff --git a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp index 232aefa5bd2..b9c8f8161b2 100644 --- a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp +++ b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp @@ -477,7 +477,7 @@ public: // If the database has sharding already enabled, we can ignore the error if (status.isOK()) { // Invalidate the output database so it gets reloaded on the next fetch attempt - Grid::get(txn)->catalogCache()->invalidate(outputCollNss.db().toString()); + Grid::get(txn)->catalogCache()->invalidate(outputCollNss.db()); } else if (status != ErrorCodes::AlreadyInitialized) { uassertStatusOK(status); } diff --git a/src/mongo/s/commands/commands_public.cpp b/src/mongo/s/commands/commands_public.cpp index 943f831dfa6..5957cea46ff 100644 --- a/src/mongo/s/commands/commands_public.cpp +++ b/src/mongo/s/commands/commands_public.cpp @@ -1012,7 +1012,7 @@ public: // Note that this implementation will not handle targeting retries and fails when the // sharding metadata is too stale - auto status = Grid::get(txn)->catalogCache()->getDatabase(txn, nss.db().toString()); + auto status = Grid::get(txn)->catalogCache()->getDatabase(txn, nss.db()); if (!status.isOK()) { return Status(status.getStatus().code(), str::stream() << "Passthrough command failed: " << command.toString() diff --git a/src/mongo/s/commands/strategy.cpp b/src/mongo/s/commands/strategy.cpp index 411fae3358f..08fde3b67a4 100644 --- a/src/mongo/s/commands/strategy.cpp +++ b/src/mongo/s/commands/strategy.cpp @@ -1,4 +1,4 @@ -/* +/** * Copyright (C) 2010 10gen Inc. * * This program is free software: you can redistribute it and/or modify @@ -57,7 +57,6 @@ #include "mongo/s/chunk_manager.h" #include "mongo/s/chunk_version.h" #include "mongo/s/client/shard_registry.h" -#include "mongo/s/client/version_manager.h" #include "mongo/s/commands/cluster_explain.h" #include "mongo/s/grid.h" #include "mongo/s/query/cluster_cursor_manager.h" @@ -327,10 +326,11 @@ void Strategy::clientCommandOp(OperationContext* txn, const NamespaceString& nss ShardConnection::checkMyConnectionVersions(txn, staleNS); if (loops < 4) { - if (!versionManager.forceRemoteCheckShardVersionCB(txn, staleNS)) { - LOG(1) << "Database does not exist or collection no longer sharded after a " - "StaleConfigException."; - } + // This throws out the entire database cache entry in response to + // StaleConfigException instead of just the collection which encountered it. There + // is no good reason for it other than the lack of lower-granularity cache + // invalidation. + Grid::get(txn)->catalogCache()->invalidate(NamespaceString(staleNS).db()); } } catch (const DBException& e) { OpQueryReplyBuilder reply; @@ -387,7 +387,7 @@ void Strategy::getMore(OperationContext* txn, const NamespaceString& nss, DbMess // TODO: Handle stale config exceptions here from coll being dropped or sharded during op for // now has same semantics as legacy request. - auto statusGetDb = Grid::get(txn)->catalogCache()->getDatabase(txn, nss.db().toString()); + auto statusGetDb = Grid::get(txn)->catalogCache()->getDatabase(txn, nss.db()); if (statusGetDb == ErrorCodes::NamespaceNotFound) { replyToQuery(ResultFlag_CursorNotFound, client->session(), dbm->msg(), 0, 0, 0); return; diff --git a/src/mongo/s/sharding_raii.cpp b/src/mongo/s/sharding_raii.cpp index 87f1127257d..57b122fbce5 100644 --- a/src/mongo/s/sharding_raii.cpp +++ b/src/mongo/s/sharding_raii.cpp @@ -34,6 +34,7 @@ #include "mongo/s/catalog/catalog_cache.h" #include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/chunk_manager.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" namespace mongo { @@ -81,8 +82,47 @@ StatusWith<ScopedShardDatabase> ScopedShardDatabase::getOrCreate(OperationContex ScopedChunkManager::ScopedChunkManager(ScopedShardDatabase db, std::shared_ptr<ChunkManager> cm) : _db(std::move(db)), _cm(std::move(cm)) {} +ScopedChunkManager::ScopedChunkManager(ScopedShardDatabase db, std::shared_ptr<Shard> primary) + : _db(std::move(db)), _primary(std::move(primary)) {} + ScopedChunkManager::~ScopedChunkManager() = default; +StatusWith<ScopedChunkManager> ScopedChunkManager::get(OperationContext* txn, + const NamespaceString& nss) { + auto scopedDbStatus = ScopedShardDatabase::getExisting(txn, nss.db()); + if (!scopedDbStatus.isOK()) { + return scopedDbStatus.getStatus(); + } + + auto scopedDb = std::move(scopedDbStatus.getValue()); + + auto cm = scopedDb.db()->getChunkManagerIfExists(txn, nss.ns()); + if (cm) { + return {ScopedChunkManager(std::move(scopedDb), std::move(cm))}; + } + + auto shardStatus = + Grid::get(txn)->shardRegistry()->getShard(txn, scopedDb.db()->getPrimaryId()); + if (!shardStatus.isOK()) { + return {ErrorCodes::fromInt(40371), + str::stream() << "The primary shard for collection " << nss.ns() + << " could not be loaded due to error " + << shardStatus.getStatus().toString()}; + } + + return {ScopedChunkManager(std::move(scopedDb), std::move(shardStatus.getValue()))}; +} + +StatusWith<ScopedChunkManager> ScopedChunkManager::getOrCreate(OperationContext* txn, + const NamespaceString& nss) { + auto scopedDbStatus = ScopedShardDatabase::getOrCreate(txn, nss.db()); + if (!scopedDbStatus.isOK()) { + return scopedDbStatus.getStatus(); + } + + return ScopedChunkManager::get(txn, nss); +} + StatusWith<ScopedChunkManager> ScopedChunkManager::refreshAndGet(OperationContext* txn, const NamespaceString& nss) { auto scopedDbStatus = ScopedShardDatabase::getExisting(txn, nss.db()); diff --git a/src/mongo/s/sharding_raii.h b/src/mongo/s/sharding_raii.h index 725c8b86ffe..1ec3da85b14 100644 --- a/src/mongo/s/sharding_raii.h +++ b/src/mongo/s/sharding_raii.h @@ -90,6 +90,23 @@ public: ~ScopedChunkManager(); /** + * If the specified namespace is sharded, returns a ScopedChunkManager initialized with that + * collection's routing information. If it is not, the object returned is initialized with the + * database primary node on which the unsharded collection must reside. + * + * Returns NamespaceNotFound if the database does not exist, or any other error indicating + * problem communicating with the config server. + */ + static StatusWith<ScopedChunkManager> get(OperationContext* txn, const NamespaceString& nss); + + /** + * If the database holding the specified namespace does not exist, creates it and then behaves + * like the 'get' method above. + */ + static StatusWith<ScopedChunkManager> getOrCreate(OperationContext* txn, + const NamespaceString& nss); + + /** * If the specified database and collection do not exist in the cache, tries to load them from * the config server and returns a reference. If they are already in the cache, makes a call to * the config server to check if there are any incremental updates to the collection chunk @@ -107,20 +124,32 @@ public: } /** - * Returns the underlying chunk manager for which we hold reference. + * If the collection is sharded, returns a chunk manager for it. Otherwise, nullptr. */ - ChunkManager* cm() const { - return _cm.get(); + std::shared_ptr<ChunkManager> cm() const { + return _cm; + } + + /** + * If the collection is not sharded, returns its primary shard. Otherwise, nullptr. + */ + std::shared_ptr<Shard> primary() const { + return _primary; } private: ScopedChunkManager(ScopedShardDatabase db, std::shared_ptr<ChunkManager> cm); + ScopedChunkManager(ScopedShardDatabase db, std::shared_ptr<Shard> primary); + // Scoped reference to the owning database. ScopedShardDatabase _db; - // Reference to the corresponding chunk manager. Never null. + // Reference to the corresponding chunk manager (if sharded) or null std::shared_ptr<ChunkManager> _cm; + + // Reference to the primary of the database (if not sharded) or null + std::shared_ptr<Shard> _primary; }; } // namespace mongo |