summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2017-02-03 23:41:50 -0500
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2017-02-03 23:41:50 -0500
commitc5f52fb3265ef68ffe2741f88758082dda393a4b (patch)
treeffc7073ee412afd9181cbe6dbec869a4922d776f
parent91bb1bca8e8b4f66a610409ec0ebe10245a02e5a (diff)
downloadmongo-c5f52fb3265ef68ffe2741f88758082dda393a4b.tar.gz
Revert "SERVER-27382 Remove usages of DBConfig::reload/getChunkManagerOrPrimary from legacy code paths"
This reverts commit 58292592979ff9277ec34390469a1541315104c0.
-rw-r--r--src/mongo/client/parallel.cpp32
-rw-r--r--src/mongo/db/s/balancer/balancer.cpp17
-rw-r--r--src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp16
-rw-r--r--src/mongo/db/s/balancer/migration_manager.cpp11
-rw-r--r--src/mongo/s/catalog/catalog_cache.cpp20
-rw-r--r--src/mongo/s/catalog/catalog_cache.h27
-rw-r--r--src/mongo/s/chunk_manager.cpp3
-rw-r--r--src/mongo/s/client/version_manager.cpp68
-rw-r--r--src/mongo/s/client/version_manager.h1
-rw-r--r--src/mongo/s/commands/cluster_commands_common.cpp24
-rw-r--r--src/mongo/s/commands/cluster_map_reduce_cmd.cpp2
-rw-r--r--src/mongo/s/commands/commands_public.cpp2
-rw-r--r--src/mongo/s/commands/strategy.cpp14
-rw-r--r--src/mongo/s/sharding_raii.cpp40
-rw-r--r--src/mongo/s/sharding_raii.h37
15 files changed, 128 insertions, 186 deletions
diff --git a/src/mongo/client/parallel.cpp b/src/mongo/client/parallel.cpp
index 6c3b376c73f..26ebcea0582 100644
--- a/src/mongo/client/parallel.cpp
+++ b/src/mongo/client/parallel.cpp
@@ -42,9 +42,10 @@
#include "mongo/db/bson/dotted_path_support.h"
#include "mongo/db/query/query_request.h"
#include "mongo/s/catalog/catalog_cache.h"
+#include "mongo/s/chunk_manager.h"
#include "mongo/s/client/shard_registry.h"
+#include "mongo/s/config.h"
#include "mongo/s/grid.h"
-#include "mongo/s/sharding_raii.h"
#include "mongo/s/stale_exception.h"
#include "mongo/util/log.h"
#include "mongo/util/net/socket_exception.h"
@@ -356,7 +357,7 @@ void ParallelSortClusteredCursor::_handleStaleNS(OperationContext* txn,
const NamespaceString& staleNS,
bool forceReload,
bool fullReload) {
- auto status = Grid::get(txn)->catalogCache()->getDatabase(txn, staleNS.db());
+ auto status = grid.catalogCache()->getDatabase(txn, staleNS.db().toString());
if (!status.isOK()) {
warning() << "cannot reload database info for stale namespace " << staleNS.ns();
return;
@@ -396,7 +397,7 @@ void ParallelSortClusteredCursor::setupVersionAndHandleSlaveOk(
// Setup conn
if (!state->conn) {
- const auto shard = uassertStatusOK(Grid::get(txn)->shardRegistry()->getShard(txn, shardId));
+ const auto shard = uassertStatusOK(grid.shardRegistry()->getShard(txn, shardId));
state->conn.reset(new ShardConnection(shard->getConnString(), ns.ns(), manager));
}
@@ -463,6 +464,9 @@ void ParallelSortClusteredCursor::startInit(OperationContext* txn) {
const bool returnPartial = (_qSpec.options() & QueryOption_PartialResults);
const NamespaceString nss(!_cInfo.isEmpty() ? _cInfo.versionedNS : _qSpec.ns());
+ shared_ptr<ChunkManager> manager;
+ shared_ptr<Shard> primary;
+
string prefix;
if (MONGO_unlikely(shouldLog(pc))) {
if (_totalTries > 0) {
@@ -473,22 +477,19 @@ void ParallelSortClusteredCursor::startInit(OperationContext* txn) {
}
LOG(pc) << prefix << " pcursor over " << _qSpec << " and " << _cInfo;
- shared_ptr<ChunkManager> manager;
- shared_ptr<Shard> primary;
+ set<ShardId> shardIds;
+ string vinfo;
{
- auto scopedCMStatus = ScopedChunkManager::get(txn, nss);
- if (scopedCMStatus != ErrorCodes::NamespaceNotFound) {
- uassertStatusOK(scopedCMStatus.getStatus());
- const auto& scopedCM = scopedCMStatus.getValue();
- manager = scopedCM.cm();
- primary = scopedCM.primary();
+ shared_ptr<DBConfig> config;
+
+ auto status = grid.catalogCache()->getDatabase(txn, nss.db().toString());
+ if (status.getStatus().code() != ErrorCodes::NamespaceNotFound) {
+ config = uassertStatusOK(status);
+ config->getChunkManagerOrPrimary(txn, nss.ns(), manager, primary);
}
}
- set<ShardId> shardIds;
- string vinfo;
-
if (manager) {
if (MONGO_unlikely(shouldLog(pc))) {
vinfo = str::stream() << "[" << manager->getns() << " @ "
@@ -948,8 +949,7 @@ void ParallelSortClusteredCursor::finishInit(OperationContext* txn) {
_cursors[index].reset(mdata.pcState->cursor.get(), &mdata);
{
- const auto shard =
- uassertStatusOK(Grid::get(txn)->shardRegistry()->getShard(txn, i->first));
+ const auto shard = uassertStatusOK(grid.shardRegistry()->getShard(txn, i->first));
_servers.insert(shard->getConnString().toString());
}
diff --git a/src/mongo/db/s/balancer/balancer.cpp b/src/mongo/db/s/balancer/balancer.cpp
index a979624000d..45c00bde686 100644
--- a/src/mongo/db/s/balancer/balancer.cpp
+++ b/src/mongo/db/s/balancer/balancer.cpp
@@ -538,13 +538,14 @@ Status Balancer::_enforceTagRanges(OperationContext* txn) {
return scopedCMStatus.getStatus();
}
- const auto& scopedCM = scopedCMStatus.getValue();
+ auto scopedCM = std::move(scopedCMStatus.getValue());
+ ChunkManager* const cm = scopedCM.cm();
auto splitStatus =
shardutil::splitChunkAtMultiplePoints(txn,
splitInfo.shardId,
splitInfo.nss,
- scopedCM.cm()->getShardKeyPattern(),
+ cm->getShardKeyPattern(),
splitInfo.collectionVersion,
ChunkRange(splitInfo.minKey, splitInfo.maxKey),
splitInfo.splitKeys);
@@ -612,17 +613,17 @@ int Balancer::_moveChunks(OperationContext* txn,
void Balancer::_splitOrMarkJumbo(OperationContext* txn,
const NamespaceString& nss,
const BSONObj& minKey) {
- auto scopedCM = uassertStatusOK(ScopedChunkManager::refreshAndGet(txn, nss));
- const auto cm = scopedCM.cm().get();
+ auto scopedChunkManager = uassertStatusOK(ScopedChunkManager::refreshAndGet(txn, nss));
+ ChunkManager* const chunkManager = scopedChunkManager.cm();
- auto chunk = cm->findIntersectingChunkWithSimpleCollation(txn, minKey);
+ auto chunk = chunkManager->findIntersectingChunkWithSimpleCollation(txn, minKey);
try {
const auto splitPoints = uassertStatusOK(shardutil::selectChunkSplitPoints(
txn,
chunk->getShardId(),
nss,
- cm->getShardKeyPattern(),
+ chunkManager->getShardKeyPattern(),
ChunkRange(chunk->getMin(), chunk->getMax()),
Grid::get(txn)->getBalancerConfiguration()->getMaxChunkSizeBytes(),
boost::none));
@@ -633,8 +634,8 @@ void Balancer::_splitOrMarkJumbo(OperationContext* txn,
shardutil::splitChunkAtMultiplePoints(txn,
chunk->getShardId(),
nss,
- cm->getShardKeyPattern(),
- cm->getVersion(),
+ chunkManager->getShardKeyPattern(),
+ chunkManager->getVersion(),
ChunkRange(chunk->getMin(), chunk->getMax()),
splitPoints));
} catch (const DBException& ex) {
diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
index 5aa83e53909..2dc66b905a2 100644
--- a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
+++ b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
@@ -308,8 +308,8 @@ BalancerChunkSelectionPolicyImpl::selectSpecificChunkToMove(OperationContext* tx
return scopedCMStatus.getStatus();
}
- const auto& scopedCM = scopedCMStatus.getValue();
- const auto cm = scopedCM.cm().get();
+ auto scopedCM = std::move(scopedCMStatus.getValue());
+ ChunkManager* const cm = scopedCM.cm();
const auto collInfoStatus = createCollectionDistributionStatus(txn, shardStats, cm);
if (!collInfoStatus.isOK()) {
@@ -338,8 +338,8 @@ Status BalancerChunkSelectionPolicyImpl::checkMoveAllowed(OperationContext* txn,
return scopedCMStatus.getStatus();
}
- const auto& scopedCM = scopedCMStatus.getValue();
- const auto cm = scopedCM.cm().get();
+ auto scopedCM = std::move(scopedCMStatus.getValue());
+ ChunkManager* const cm = scopedCM.cm();
const auto collInfoStatus = createCollectionDistributionStatus(txn, shardStats, cm);
if (!collInfoStatus.isOK()) {
@@ -371,8 +371,8 @@ StatusWith<SplitInfoVector> BalancerChunkSelectionPolicyImpl::_getSplitCandidate
return scopedCMStatus.getStatus();
}
- const auto& scopedCM = scopedCMStatus.getValue();
- const auto cm = scopedCM.cm().get();
+ auto scopedCM = std::move(scopedCMStatus.getValue());
+ ChunkManager* const cm = scopedCM.cm();
const auto& shardKeyPattern = cm->getShardKeyPattern().getKeyPattern();
@@ -425,8 +425,8 @@ StatusWith<MigrateInfoVector> BalancerChunkSelectionPolicyImpl::_getMigrateCandi
return scopedCMStatus.getStatus();
}
- const auto& scopedCM = scopedCMStatus.getValue();
- const auto cm = scopedCM.cm().get();
+ auto scopedCM = std::move(scopedCMStatus.getValue());
+ ChunkManager* const cm = scopedCM.cm();
const auto& shardKeyPattern = cm->getShardKeyPattern().getKeyPattern();
diff --git a/src/mongo/db/s/balancer/migration_manager.cpp b/src/mongo/db/s/balancer/migration_manager.cpp
index 381f54b0669..6253ebd2b25 100644
--- a/src/mongo/db/s/balancer/migration_manager.cpp
+++ b/src/mongo/db/s/balancer/migration_manager.cpp
@@ -183,9 +183,10 @@ Status MigrationManager::executeManualMigration(
return scopedCMStatus.getStatus();
}
- const auto& scopedCM = scopedCMStatus.getValue();
+ auto scopedCM = std::move(scopedCMStatus.getValue());
+ ChunkManager* const cm = scopedCM.cm();
- auto chunk = scopedCM.cm()->findIntersectingChunkWithSimpleCollation(txn, migrateInfo.minKey);
+ auto chunk = cm->findIntersectingChunkWithSimpleCollation(txn, migrateInfo.minKey);
invariant(chunk);
Status commandStatus = _processRemoteCommandResponse(
@@ -319,7 +320,8 @@ void MigrationManager::finishRecovery(OperationContext* txn,
return;
}
- const auto& scopedCM = scopedCMStatus.getValue();
+ auto scopedCM = std::move(scopedCMStatus.getValue());
+ ChunkManager* const cm = scopedCM.cm();
int scheduledMigrations = 0;
@@ -329,8 +331,7 @@ void MigrationManager::finishRecovery(OperationContext* txn,
auto waitForDelete = migrationType.getWaitForDelete();
migrateInfos.pop_front();
- auto chunk =
- scopedCM.cm()->findIntersectingChunkWithSimpleCollation(txn, migrationInfo.minKey);
+ auto chunk = cm->findIntersectingChunkWithSimpleCollation(txn, migrationInfo.minKey);
invariant(chunk);
if (chunk->getShardId() != migrationInfo.from) {
diff --git a/src/mongo/s/catalog/catalog_cache.cpp b/src/mongo/s/catalog/catalog_cache.cpp
index b418977f8b2..cde72edbc46 100644
--- a/src/mongo/s/catalog/catalog_cache.cpp
+++ b/src/mongo/s/catalog/catalog_cache.cpp
@@ -41,12 +41,11 @@ namespace mongo {
using std::shared_ptr;
using std::string;
-CatalogCache::CatalogCache() = default;
-CatalogCache::~CatalogCache() = default;
+CatalogCache::CatalogCache() {}
-StatusWith<std::shared_ptr<DBConfig>> CatalogCache::getDatabase(OperationContext* txn,
- StringData dbName) {
+StatusWith<shared_ptr<DBConfig>> CatalogCache::getDatabase(OperationContext* txn,
+ const string& dbName) {
stdx::lock_guard<stdx::mutex> guard(_mutex);
ShardedDatabasesMap::iterator it = _databases.find(dbName);
@@ -55,26 +54,25 @@ StatusWith<std::shared_ptr<DBConfig>> CatalogCache::getDatabase(OperationContext
}
// Need to load from the store
- auto status = Grid::get(txn)->catalogClient(txn)->getDatabase(txn, dbName.toString());
+ auto status = Grid::get(txn)->catalogClient(txn)->getDatabase(txn, dbName);
if (!status.isOK()) {
return status.getStatus();
}
- const auto& dbOpTimePair = status.getValue();
- auto db = std::make_shared<DBConfig>(dbOpTimePair.value, dbOpTimePair.opTime);
+ const auto dbOpTimePair = status.getValue();
+ shared_ptr<DBConfig> db = std::make_shared<DBConfig>(dbOpTimePair.value, dbOpTimePair.opTime);
try {
db->load(txn);
} catch (const DBException& excep) {
return excep.toStatus();
}
- auto emplaceResult = _databases.try_emplace(dbName, std::move(db));
- invariant(emplaceResult.second);
+ invariant(_databases.insert(std::make_pair(dbName, db)).second);
- return emplaceResult.first->second;
+ return db;
}
-void CatalogCache::invalidate(StringData dbName) {
+void CatalogCache::invalidate(const string& dbName) {
stdx::lock_guard<stdx::mutex> guard(_mutex);
ShardedDatabasesMap::iterator it = _databases.find(dbName);
diff --git a/src/mongo/s/catalog/catalog_cache.h b/src/mongo/s/catalog/catalog_cache.h
index 8d30c1aebf0..5d87d89aac9 100644
--- a/src/mongo/s/catalog/catalog_cache.h
+++ b/src/mongo/s/catalog/catalog_cache.h
@@ -28,12 +28,12 @@
#pragma once
+#include <map>
#include <memory>
+#include <string>
#include "mongo/base/disallow_copying.h"
-#include "mongo/base/string_data.h"
#include "mongo/stdx/mutex.h"
-#include "mongo/util/string_map.h"
namespace mongo {
@@ -42,6 +42,7 @@ class OperationContext;
template <typename T>
class StatusWith;
+
/**
* This is the root of the "read-only" hierarchy of cached catalog metadata. It is read only
* in the sense that it only reads from the persistent store, but never writes to it. Instead
@@ -52,23 +53,24 @@ class CatalogCache {
public:
CatalogCache();
- ~CatalogCache();
/**
- * Retrieves the cached metadata for the specified database. The returned value is still owned
- * by the cache and should not be kept elsewhere. I.e., it should only be used as a local
- * variable. The reason for this is so that if the cache gets invalidated, the caller does not
- * miss getting the most up-to-date value.
+ * Retrieves the cached metadata for the specified database. The returned value is still
+ * owned by the cache and it should not be cached elsewhere, but instead only used as a
+ * local variable. The reason for this is so that if the cache gets invalidated, the caller
+ * does not miss getting the most up-to-date value.
*
- * Returns the database cache entry if the database exists or a failed status otherwise.
+ * @param dbname The name of the database (must not contain dots, etc).
+ * @return The database if it exists, NULL otherwise.
*/
- StatusWith<std::shared_ptr<DBConfig>> getDatabase(OperationContext* txn, StringData dbName);
+ StatusWith<std::shared_ptr<DBConfig>> getDatabase(OperationContext* txn,
+ const std::string& dbName);
/**
* Removes the database information for the specified name from the cache, so that the
* next time getDatabase is called, it will be reloaded.
*/
- void invalidate(StringData dbName);
+ void invalidate(const std::string& dbName);
/**
* Purges all cached database information, which will cause the data to be reloaded again.
@@ -76,11 +78,10 @@ public:
void invalidateAll();
private:
- using ShardedDatabasesMap = StringMap<std::shared_ptr<DBConfig>>;
+ typedef std::map<std::string, std::shared_ptr<DBConfig>> ShardedDatabasesMap;
- // Mutex to serialize access to the structures below
+ // Databases catalog map and mutex to protect it
stdx::mutex _mutex;
-
ShardedDatabasesMap _databases;
};
diff --git a/src/mongo/s/chunk_manager.cpp b/src/mongo/s/chunk_manager.cpp
index 3dc01a59dba..324be082da5 100644
--- a/src/mongo/s/chunk_manager.cpp
+++ b/src/mongo/s/chunk_manager.cpp
@@ -518,7 +518,8 @@ StatusWith<shared_ptr<Chunk>> ChunkManager::findIntersectingChunk(OperationConte
// Proactively force a reload on the chunk manager in case it somehow got inconsistent
const NamespaceString nss(_ns);
- auto config = uassertStatusOK(Grid::get(txn)->catalogCache()->getDatabase(txn, nss.db()));
+ auto config =
+ uassertStatusOK(Grid::get(txn)->catalogCache()->getDatabase(txn, nss.db().toString()));
config->getChunkManagerIfExists(txn, nss.ns(), true);
msgasserted(13141, "Chunk map pointed to incorrect chunk");
diff --git a/src/mongo/s/client/version_manager.cpp b/src/mongo/s/client/version_manager.cpp
index 0f50b42a38f..3cb09cc0db6 100644
--- a/src/mongo/s/client/version_manager.cpp
+++ b/src/mongo/s/client/version_manager.cpp
@@ -36,13 +36,14 @@
#include "mongo/db/namespace_string.h"
#include "mongo/s/catalog/catalog_cache.h"
#include "mongo/s/catalog/sharding_catalog_client.h"
+#include "mongo/s/chunk_manager.h"
#include "mongo/s/chunk_version.h"
#include "mongo/s/client/shard_connection.h"
#include "mongo/s/client/shard_registry.h"
+#include "mongo/s/config.h"
#include "mongo/s/grid.h"
#include "mongo/s/mongos_options.h"
#include "mongo/s/set_shard_version_request.h"
-#include "mongo/s/sharding_raii.h"
#include "mongo/s/stale_exception.h"
#include "mongo/util/log.h"
@@ -252,22 +253,24 @@ bool checkShardVersion(OperationContext* txn,
return initShardVersionEmptyNS(txn, conn_in);
}
+ auto status = grid.catalogCache()->getDatabase(txn, nsToDatabase(ns));
+ if (!status.isOK()) {
+ return false;
+ }
+
DBClientBase* const conn = getVersionable(conn_in);
verify(conn); // errors thrown above
- const NamespaceString nss(ns);
+ shared_ptr<DBConfig> conf = status.getValue();
- auto scopedCMStatus = authoritative ? ScopedChunkManager::refreshAndGet(txn, nss)
- : ScopedChunkManager::get(txn, nss);
- if (!scopedCMStatus.isOK()) {
- return false;
+ if (authoritative) {
+ conf->getChunkManagerIfExists(txn, ns, true);
}
- const auto& scopedCM = scopedCMStatus.getValue();
+ shared_ptr<Shard> primary;
+ shared_ptr<ChunkManager> manager;
- auto conf = scopedCM.db();
- const auto manager = scopedCM.cm();
- const auto primary = scopedCM.primary();
+ conf->getChunkManagerOrPrimary(txn, ns, manager, primary);
unsigned long long officialSequenceNumber = 0;
@@ -279,9 +282,7 @@ bool checkShardVersion(OperationContext* txn,
return false;
}
- const auto shardRegistry = Grid::get(txn)->shardRegistry();
-
- const auto shard = shardRegistry->getShardForHostNoReload(
+ const auto shard = grid.shardRegistry()->getShardForHostNoReload(
uassertStatusOK(HostAndPort::parse(conn->getServerAddress())));
uassert(ErrorCodes::ShardNotFound,
str::stream() << conn->getServerAddress() << " is not recognized as a shard",
@@ -310,8 +311,9 @@ bool checkShardVersion(OperationContext* txn,
throw SendStaleConfigException(ns, msg, refVersion, currentVersion);
}
} else if (refManager) {
- string msg(str::stream() << "not sharded (" << (!manager ? string("<none>") : str::stream()
- << manager->getSequenceNumber())
+ string msg(str::stream() << "not sharded ("
+ << ((manager.get() == 0) ? string("<none>") : str::stream()
+ << manager->getSequenceNumber())
<< ") but has reference manager ("
<< refManager->getSequenceNumber()
<< ") "
@@ -349,7 +351,7 @@ bool checkShardVersion(OperationContext* txn,
if (setShardVersion(txn,
conn,
ns,
- shardRegistry->getConfigServerConnectionString(),
+ grid.shardRegistry()->getConfigServerConnectionString(),
version,
manager.get(),
authoritative,
@@ -380,10 +382,12 @@ bool checkShardVersion(OperationContext* txn,
warning() << "reloading full configuration for " << conf->name()
<< ", connection state indicates significant version changes";
- Grid::get(txn)->catalogCache()->invalidate(nss.db());
+ // reload db
+ conf->reload(txn);
+ } else {
+ // reload config
+ conf->getChunkManager(txn, ns, true);
}
-
- conf->getChunkManager(txn, nss.ns(), true);
}
const int maxNumTries = 7;
@@ -422,6 +426,32 @@ bool VersionManager::isVersionableCB(DBClientBase* conn) {
return conn->type() == ConnectionString::MASTER || conn->type() == ConnectionString::SET;
}
+bool VersionManager::forceRemoteCheckShardVersionCB(OperationContext* txn, const string& ns) {
+ const NamespaceString nss(ns);
+
+ // This will force the database catalog entry to be reloaded
+ grid.catalogCache()->invalidate(nss.db().toString());
+
+ auto status = grid.catalogCache()->getDatabase(txn, nss.db().toString());
+ if (!status.isOK()) {
+ return false;
+ }
+
+ shared_ptr<DBConfig> conf = status.getValue();
+
+ // If we don't have a collection, don't refresh the chunk manager
+ if (nsGetCollection(ns).size() == 0) {
+ return false;
+ }
+
+ auto manager = conf->getChunkManagerIfExists(txn, ns, true, true);
+ if (!manager) {
+ return false;
+ }
+
+ return true;
+}
+
bool VersionManager::checkShardVersionCB(OperationContext* txn,
DBClientBase* conn_in,
const string& ns,
diff --git a/src/mongo/s/client/version_manager.h b/src/mongo/s/client/version_manager.h
index 8c02f12161e..f03fb4f34c8 100644
--- a/src/mongo/s/client/version_manager.h
+++ b/src/mongo/s/client/version_manager.h
@@ -42,6 +42,7 @@ public:
VersionManager() {}
bool isVersionableCB(DBClientBase*);
+ bool forceRemoteCheckShardVersionCB(OperationContext* txn, const std::string&);
bool checkShardVersionCB(OperationContext*, DBClientBase*, const std::string&, bool, int);
bool checkShardVersionCB(OperationContext*, ShardConnection*, bool, int);
void resetShardVersionCB(DBClientBase*);
diff --git a/src/mongo/s/commands/cluster_commands_common.cpp b/src/mongo/s/commands/cluster_commands_common.cpp
index 020bac131b3..8a309d3d31b 100644
--- a/src/mongo/s/commands/cluster_commands_common.cpp
+++ b/src/mongo/s/commands/cluster_commands_common.cpp
@@ -35,12 +35,10 @@
#include "mongo/client/parallel.h"
#include "mongo/db/commands.h"
#include "mongo/db/query/cursor_response.h"
-#include "mongo/s/catalog/catalog_cache.h"
#include "mongo/s/catalog/type_collection.h"
#include "mongo/s/client/shard_connection.h"
#include "mongo/s/client/version_manager.h"
#include "mongo/s/grid.h"
-#include "mongo/s/sharding_raii.h"
#include "mongo/s/stale_exception.h"
#include "mongo/util/log.h"
@@ -49,26 +47,6 @@ namespace mongo {
using std::shared_ptr;
using std::string;
-namespace {
-
-bool forceRemoteCheckShardVersionCB(OperationContext* txn, const string& ns) {
- const NamespaceString nss(ns);
-
- // This will force the database catalog entry to be reloaded
- Grid::get(txn)->catalogCache()->invalidate(nss.db());
-
- auto scopedCMStatus = ScopedChunkManager::get(txn, nss);
- if (!scopedCMStatus.isOK()) {
- return false;
- }
-
- const auto& scopedCM = scopedCMStatus.getValue();
-
- return scopedCM.cm() != nullptr;
-}
-
-} // namespace
-
Future::CommandResult::CommandResult(const string& server,
const string& db,
const BSONObj& cmd,
@@ -155,7 +133,7 @@ bool Future::CommandResult::join(OperationContext* txn, int maxRetries) {
}
if (i >= maxRetries / 2) {
- if (!forceRemoteCheckShardVersionCB(txn, staleNS)) {
+ if (!versionManager.forceRemoteCheckShardVersionCB(txn, staleNS)) {
error() << "Future::spawnCommand (part 2) no config detected"
<< causedBy(redact(e));
throw e;
diff --git a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
index b9c8f8161b2..232aefa5bd2 100644
--- a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
+++ b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
@@ -477,7 +477,7 @@ public:
// If the database has sharding already enabled, we can ignore the error
if (status.isOK()) {
// Invalidate the output database so it gets reloaded on the next fetch attempt
- Grid::get(txn)->catalogCache()->invalidate(outputCollNss.db());
+ Grid::get(txn)->catalogCache()->invalidate(outputCollNss.db().toString());
} else if (status != ErrorCodes::AlreadyInitialized) {
uassertStatusOK(status);
}
diff --git a/src/mongo/s/commands/commands_public.cpp b/src/mongo/s/commands/commands_public.cpp
index 5957cea46ff..943f831dfa6 100644
--- a/src/mongo/s/commands/commands_public.cpp
+++ b/src/mongo/s/commands/commands_public.cpp
@@ -1012,7 +1012,7 @@ public:
// Note that this implementation will not handle targeting retries and fails when the
// sharding metadata is too stale
- auto status = Grid::get(txn)->catalogCache()->getDatabase(txn, nss.db());
+ auto status = Grid::get(txn)->catalogCache()->getDatabase(txn, nss.db().toString());
if (!status.isOK()) {
return Status(status.getStatus().code(),
str::stream() << "Passthrough command failed: " << command.toString()
diff --git a/src/mongo/s/commands/strategy.cpp b/src/mongo/s/commands/strategy.cpp
index 08fde3b67a4..411fae3358f 100644
--- a/src/mongo/s/commands/strategy.cpp
+++ b/src/mongo/s/commands/strategy.cpp
@@ -1,4 +1,4 @@
-/**
+/*
* Copyright (C) 2010 10gen Inc.
*
* This program is free software: you can redistribute it and/or modify
@@ -57,6 +57,7 @@
#include "mongo/s/chunk_manager.h"
#include "mongo/s/chunk_version.h"
#include "mongo/s/client/shard_registry.h"
+#include "mongo/s/client/version_manager.h"
#include "mongo/s/commands/cluster_explain.h"
#include "mongo/s/grid.h"
#include "mongo/s/query/cluster_cursor_manager.h"
@@ -326,11 +327,10 @@ void Strategy::clientCommandOp(OperationContext* txn, const NamespaceString& nss
ShardConnection::checkMyConnectionVersions(txn, staleNS);
if (loops < 4) {
- // This throws out the entire database cache entry in response to
- // StaleConfigException instead of just the collection which encountered it. There
- // is no good reason for it other than the lack of lower-granularity cache
- // invalidation.
- Grid::get(txn)->catalogCache()->invalidate(NamespaceString(staleNS).db());
+ if (!versionManager.forceRemoteCheckShardVersionCB(txn, staleNS)) {
+ LOG(1) << "Database does not exist or collection no longer sharded after a "
+ "StaleConfigException.";
+ }
}
} catch (const DBException& e) {
OpQueryReplyBuilder reply;
@@ -387,7 +387,7 @@ void Strategy::getMore(OperationContext* txn, const NamespaceString& nss, DbMess
// TODO: Handle stale config exceptions here from coll being dropped or sharded during op for
// now has same semantics as legacy request.
- auto statusGetDb = Grid::get(txn)->catalogCache()->getDatabase(txn, nss.db());
+ auto statusGetDb = Grid::get(txn)->catalogCache()->getDatabase(txn, nss.db().toString());
if (statusGetDb == ErrorCodes::NamespaceNotFound) {
replyToQuery(ResultFlag_CursorNotFound, client->session(), dbm->msg(), 0, 0, 0);
return;
diff --git a/src/mongo/s/sharding_raii.cpp b/src/mongo/s/sharding_raii.cpp
index 57b122fbce5..87f1127257d 100644
--- a/src/mongo/s/sharding_raii.cpp
+++ b/src/mongo/s/sharding_raii.cpp
@@ -34,7 +34,6 @@
#include "mongo/s/catalog/catalog_cache.h"
#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/chunk_manager.h"
-#include "mongo/s/client/shard_registry.h"
#include "mongo/s/grid.h"
namespace mongo {
@@ -82,47 +81,8 @@ StatusWith<ScopedShardDatabase> ScopedShardDatabase::getOrCreate(OperationContex
ScopedChunkManager::ScopedChunkManager(ScopedShardDatabase db, std::shared_ptr<ChunkManager> cm)
: _db(std::move(db)), _cm(std::move(cm)) {}
-ScopedChunkManager::ScopedChunkManager(ScopedShardDatabase db, std::shared_ptr<Shard> primary)
- : _db(std::move(db)), _primary(std::move(primary)) {}
-
ScopedChunkManager::~ScopedChunkManager() = default;
-StatusWith<ScopedChunkManager> ScopedChunkManager::get(OperationContext* txn,
- const NamespaceString& nss) {
- auto scopedDbStatus = ScopedShardDatabase::getExisting(txn, nss.db());
- if (!scopedDbStatus.isOK()) {
- return scopedDbStatus.getStatus();
- }
-
- auto scopedDb = std::move(scopedDbStatus.getValue());
-
- auto cm = scopedDb.db()->getChunkManagerIfExists(txn, nss.ns());
- if (cm) {
- return {ScopedChunkManager(std::move(scopedDb), std::move(cm))};
- }
-
- auto shardStatus =
- Grid::get(txn)->shardRegistry()->getShard(txn, scopedDb.db()->getPrimaryId());
- if (!shardStatus.isOK()) {
- return {ErrorCodes::fromInt(40371),
- str::stream() << "The primary shard for collection " << nss.ns()
- << " could not be loaded due to error "
- << shardStatus.getStatus().toString()};
- }
-
- return {ScopedChunkManager(std::move(scopedDb), std::move(shardStatus.getValue()))};
-}
-
-StatusWith<ScopedChunkManager> ScopedChunkManager::getOrCreate(OperationContext* txn,
- const NamespaceString& nss) {
- auto scopedDbStatus = ScopedShardDatabase::getOrCreate(txn, nss.db());
- if (!scopedDbStatus.isOK()) {
- return scopedDbStatus.getStatus();
- }
-
- return ScopedChunkManager::get(txn, nss);
-}
-
StatusWith<ScopedChunkManager> ScopedChunkManager::refreshAndGet(OperationContext* txn,
const NamespaceString& nss) {
auto scopedDbStatus = ScopedShardDatabase::getExisting(txn, nss.db());
diff --git a/src/mongo/s/sharding_raii.h b/src/mongo/s/sharding_raii.h
index 1ec3da85b14..725c8b86ffe 100644
--- a/src/mongo/s/sharding_raii.h
+++ b/src/mongo/s/sharding_raii.h
@@ -90,23 +90,6 @@ public:
~ScopedChunkManager();
/**
- * If the specified namespace is sharded, returns a ScopedChunkManager initialized with that
- * collection's routing information. If it is not, the object returned is initialized with the
- * database primary node on which the unsharded collection must reside.
- *
- * Returns NamespaceNotFound if the database does not exist, or any other error indicating
- * problem communicating with the config server.
- */
- static StatusWith<ScopedChunkManager> get(OperationContext* txn, const NamespaceString& nss);
-
- /**
- * If the database holding the specified namespace does not exist, creates it and then behaves
- * like the 'get' method above.
- */
- static StatusWith<ScopedChunkManager> getOrCreate(OperationContext* txn,
- const NamespaceString& nss);
-
- /**
* If the specified database and collection do not exist in the cache, tries to load them from
* the config server and returns a reference. If they are already in the cache, makes a call to
* the config server to check if there are any incremental updates to the collection chunk
@@ -124,32 +107,20 @@ public:
}
/**
- * If the collection is sharded, returns a chunk manager for it. Otherwise, nullptr.
+ * Returns the underlying chunk manager for which we hold reference.
*/
- std::shared_ptr<ChunkManager> cm() const {
- return _cm;
- }
-
- /**
- * If the collection is not sharded, returns its primary shard. Otherwise, nullptr.
- */
- std::shared_ptr<Shard> primary() const {
- return _primary;
+ ChunkManager* cm() const {
+ return _cm.get();
}
private:
ScopedChunkManager(ScopedShardDatabase db, std::shared_ptr<ChunkManager> cm);
- ScopedChunkManager(ScopedShardDatabase db, std::shared_ptr<Shard> primary);
-
// Scoped reference to the owning database.
ScopedShardDatabase _db;
- // Reference to the corresponding chunk manager (if sharded) or null
+ // Reference to the corresponding chunk manager. Never null.
std::shared_ptr<ChunkManager> _cm;
-
- // Reference to the primary of the database (if not sharded) or null
- std::shared_ptr<Shard> _primary;
};
} // namespace mongo