summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2017-01-19 15:59:53 -0500
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2017-01-20 09:28:10 -0500
commit6a61933b75ca241f5f5a83a3fac2ebe082084ad0 (patch)
tree81132dcd868e43812bb793483f6e1fccfed84b7e
parentc24166e10c6547ea03da71a13065df742c3fdd69 (diff)
downloadmongo-6a61933b75ca241f5f5a83a3fac2ebe082084ad0.tar.gz
SERVER-27736 Do not store unsharded collections in DBConfig
(cherry picked from commit a9d62c05f1bd56e8a52f12fd096eb4f2d07a8b23)
-rw-r--r--src/mongo/db/commands/mr.cpp21
-rw-r--r--src/mongo/db/s/sharding_state.cpp1
-rw-r--r--src/mongo/s/chunk.cpp12
-rw-r--r--src/mongo/s/chunk_manager.cpp89
-rw-r--r--src/mongo/s/chunk_manager.h5
-rw-r--r--src/mongo/s/commands/cluster_drop_cmd.cpp1
-rw-r--r--src/mongo/s/commands/cluster_find_and_modify_cmd.cpp1
-rw-r--r--src/mongo/s/commands/cluster_flush_router_config_cmd.cpp3
-rw-r--r--src/mongo/s/commands/cluster_get_shard_version_cmd.cpp65
-rw-r--r--src/mongo/s/commands/cluster_move_chunk_cmd.cpp3
-rw-r--r--src/mongo/s/commands/cluster_split_cmd.cpp3
-rw-r--r--src/mongo/s/commands/strategy.cpp1
-rw-r--r--src/mongo/s/config.cpp217
-rw-r--r--src/mongo/s/config.h28
-rw-r--r--src/mongo/s/server.cpp7
15 files changed, 183 insertions, 274 deletions
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index dadb0ac1acb..a486e69ec02 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -74,9 +74,9 @@
#include "mongo/s/chunk.h"
#include "mongo/s/chunk_manager.h"
#include "mongo/s/client/shard_registry.h"
-#include "mongo/s/config.h"
#include "mongo/s/grid.h"
#include "mongo/s/shard_key_pattern.h"
+#include "mongo/s/sharding_raii.h"
#include "mongo/s/stale_exception.h"
#include "mongo/scripting/engine.h"
#include "mongo/stdx/mutex.h"
@@ -1739,17 +1739,14 @@ public:
set<string> servers;
{
- // parse per shard results
+ // Parse per shard results
BSONObjIterator i(shardCounts);
while (i.more()) {
BSONElement e = i.next();
std::string server = e.fieldName();
servers.insert(server);
- auto shardStatus = grid.shardRegistry()->getShard(txn, server);
- if (!shardStatus.isOK()) {
- return appendCommandStatus(result, shardStatus.getStatus());
- }
+ uassertStatusOK(Grid::get(txn)->shardRegistry()->getShard(txn, server));
}
}
@@ -1769,20 +1766,20 @@ public:
result.append("result", config.outputOptions.collectionName);
}
- auto status = grid.catalogCache()->getDatabase(txn, dbname);
- if (!status.isOK()) {
- return appendCommandStatus(result, status.getStatus());
+ auto scopedDbStatus = ScopedShardDatabase::getExisting(txn, dbname);
+ if (!scopedDbStatus.isOK()) {
+ return appendCommandStatus(result, scopedDbStatus.getStatus());
}
- shared_ptr<DBConfig> confOut = status.getValue();
+ auto confOut = scopedDbStatus.getValue().db();
vector<shared_ptr<Chunk>> chunks;
if (confOut->isSharded(config.outputOptions.finalNamespace)) {
shared_ptr<ChunkManager> cm =
confOut->getChunkManager(txn, config.outputOptions.finalNamespace);
- // Fetch result from other shards 1 chunk at a time. It would be better to do
- // just one big $or query, but then the sorting would not be efficient.
+ // Fetch result from other shards 1 chunk at a time. It would be better to do just one
+ // big $or query, but then the sorting would not be efficient.
const string shardName = ShardingState::get(txn)->getShardName();
const ChunkMap& chunkMap = cm->getChunkMap();
diff --git a/src/mongo/db/s/sharding_state.cpp b/src/mongo/db/s/sharding_state.cpp
index 81fc8a132ed..1bcf3f4bfc1 100644
--- a/src/mongo/db/s/sharding_state.cpp
+++ b/src/mongo/db/s/sharding_state.cpp
@@ -61,7 +61,6 @@
#include "mongo/s/chunk_version.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/client/sharding_network_connection_hook.h"
-#include "mongo/s/config.h"
#include "mongo/s/grid.h"
#include "mongo/s/sharding_initialization.h"
#include "mongo/util/log.h"
diff --git a/src/mongo/s/chunk.cpp b/src/mongo/s/chunk.cpp
index 5f33238b24b..cc3f6cbe524 100644
--- a/src/mongo/s/chunk.cpp
+++ b/src/mongo/s/chunk.cpp
@@ -34,13 +34,11 @@
#include "mongo/bson/simple_bsonobj_comparator.h"
#include "mongo/client/connpool.h"
-#include "mongo/db/commands.h"
#include "mongo/db/lasterror.h"
#include "mongo/platform/random.h"
#include "mongo/s/balancer_configuration.h"
#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/type_collection.h"
-#include "mongo/s/chunk_manager.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/config_server_client.h"
#include "mongo/s/grid.h"
@@ -75,6 +73,12 @@ int mkDataWritten() {
return r.nextInt32(grid.getBalancerConfiguration()->getMaxChunkSizeBytes() / splitTestFactor);
}
+void reloadChunkManager(OperationContext* txn, const std::string ns) {
+ const NamespaceString nss(ns);
+ auto config = uassertStatusOK(ScopedShardDatabase::getExisting(txn, nss.db()));
+ config.db()->getChunkManagerIfExists(txn, nss.ns(), true);
+}
+
} // namespace
Chunk::Chunk(OperationContext* txn, ChunkManager* manager, const ChunkType& from)
@@ -301,7 +305,7 @@ StatusWith<boost::optional<ChunkRange>> Chunk::split(OperationContext* txn,
return splitStatus.getStatus();
}
- _manager->reload(txn);
+ reloadChunkManager(txn, _manager->getns());
*resultingSplits = splitPoints.size();
return splitStatus.getValue();
@@ -422,7 +426,7 @@ bool Chunk::splitIfShould(OperationContext* txn, long dataWritten) {
msgassertedNoTraceWithStatus(10412, rebalanceStatus);
}
- _manager->reload(txn);
+ reloadChunkManager(txn, _manager->getns());
}
return true;
diff --git a/src/mongo/s/chunk_manager.cpp b/src/mongo/s/chunk_manager.cpp
index 37b7f71e3e0..735b5e01b9e 100644
--- a/src/mongo/s/chunk_manager.cpp
+++ b/src/mongo/s/chunk_manager.cpp
@@ -365,19 +365,6 @@ bool ChunkManager::_load(OperationContext* txn,
}
}
-shared_ptr<ChunkManager> ChunkManager::reload(OperationContext* txn, bool force) const {
- const NamespaceString nss(_ns);
- auto config = uassertStatusOK(grid.catalogCache()->getDatabase(txn, nss.db().toString()));
-
- return config->getChunkManagerIfExists(txn, getns(), force);
-}
-
-void ChunkManager::_printChunks() const {
- for (ChunkMap::const_iterator it = _chunkMap.begin(), end = _chunkMap.end(); it != end; ++it) {
- log() << redact((*it->second).toString());
- }
-}
-
void ChunkManager::calcInitSplitsAndShards(OperationContext* txn,
const ShardId& primaryShardId,
const vector<BSONObj>* initPoints,
@@ -488,49 +475,55 @@ Status ChunkManager::createFirstChunks(OperationContext* txn,
StatusWith<shared_ptr<Chunk>> ChunkManager::findIntersectingChunk(OperationContext* txn,
const BSONObj& shardKey,
const BSONObj& collation) const {
- {
- const bool hasSimpleCollation = (collation.isEmpty() && !_defaultCollator) ||
- SimpleBSONObjComparator::kInstance.evaluate(collation == CollationSpec::kSimpleSpec);
- if (!hasSimpleCollation) {
- for (BSONElement elt : shardKey) {
- if (CollationIndexKey::isCollatableType(elt.type())) {
- return Status(ErrorCodes::ShardKeyNotFound,
- "cannot target single shard due to collation");
- }
+ const bool hasSimpleCollation = (collation.isEmpty() && !_defaultCollator) ||
+ SimpleBSONObjComparator::kInstance.evaluate(collation == CollationSpec::kSimpleSpec);
+ if (!hasSimpleCollation) {
+ for (BSONElement elt : shardKey) {
+ if (CollationIndexKey::isCollatableType(elt.type())) {
+ return Status(ErrorCodes::ShardKeyNotFound,
+ "cannot target single shard due to collation");
}
}
+ }
- BSONObj chunkMin;
- shared_ptr<Chunk> chunk;
- {
- ChunkMap::const_iterator it = _chunkMap.upper_bound(shardKey);
- if (it != _chunkMap.end()) {
- chunkMin = it->first;
- chunk = it->second;
- }
+ BSONObj chunkMin;
+ shared_ptr<Chunk> chunk;
+ {
+ ChunkMap::const_iterator it = _chunkMap.upper_bound(shardKey);
+ if (it != _chunkMap.end()) {
+ chunkMin = it->first;
+ chunk = it->second;
}
+ }
- if (chunk) {
- if (chunk->containsKey(shardKey)) {
- return chunk;
- }
-
- log() << redact(chunkMin.toString());
- log() << redact((*chunk).toString());
- log() << redact(shardKey);
+ if (!chunk) {
+ // TODO: This should be an invariant
+ msgasserted(8070,
+ str::stream() << "couldn't find a chunk intersecting: " << shardKey
+ << " for ns: "
+ << _ns
+ << " at version: "
+ << _version.toString()
+ << ", number of chunks: "
+ << _chunkMap.size());
+ }
- reload(txn);
- msgasserted(13141, "Chunk map pointed to incorrect chunk");
- }
+ if (chunk->containsKey(shardKey)) {
+ return chunk;
}
- msgasserted(8070,
- str::stream() << "couldn't find a chunk intersecting: " << shardKey << " for ns: "
- << _ns
- << " at version: "
- << _version.toString()
- << ", number of chunks: "
- << _chunkMap.size());
+ // TODO: This should be an invariant
+ log() << redact(chunkMin.toString());
+ log() << redact((*chunk).toString());
+ log() << redact(shardKey);
+
+ // Proactively force a reload on the chunk manager in case it somehow got inconsistent
+ const NamespaceString nss(_ns);
+ auto config =
+ uassertStatusOK(Grid::get(txn)->catalogCache()->getDatabase(txn, nss.db().toString()));
+ config->getChunkManagerIfExists(txn, nss.ns(), true);
+
+ msgasserted(13141, "Chunk map pointed to incorrect chunk");
}
shared_ptr<Chunk> ChunkManager::findIntersectingChunkWithSimpleCollation(
diff --git a/src/mongo/s/chunk_manager.h b/src/mongo/s/chunk_manager.h
index fc5ff6e29c2..d06ac5cfaeb 100644
--- a/src/mongo/s/chunk_manager.h
+++ b/src/mongo/s/chunk_manager.h
@@ -192,13 +192,8 @@ public:
ChunkVersion getVersion(const ShardId& shardName) const;
ChunkVersion getVersion() const;
- void _printChunks() const;
-
uint64_t getCurrentDesiredChunkSize() const;
- std::shared_ptr<ChunkManager> reload(OperationContext* txn,
- bool force = true) const; // doesn't modify self!
-
/**
* Returns the opTime of config server the last time chunks were loaded.
*/
diff --git a/src/mongo/s/commands/cluster_drop_cmd.cpp b/src/mongo/s/commands/cluster_drop_cmd.cpp
index b2402deeb8d..bf2b740ce33 100644
--- a/src/mongo/s/commands/cluster_drop_cmd.cpp
+++ b/src/mongo/s/commands/cluster_drop_cmd.cpp
@@ -40,7 +40,6 @@
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/commands/cluster_commands_common.h"
#include "mongo/s/commands/sharded_command_processing.h"
-#include "mongo/s/config.h"
#include "mongo/s/grid.h"
#include "mongo/s/sharding_raii.h"
#include "mongo/s/stale_exception.h"
diff --git a/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp b/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp
index 0426e43bb1f..e47aa1633f9 100644
--- a/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp
+++ b/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp
@@ -46,7 +46,6 @@
#include "mongo/s/commands/cluster_explain.h"
#include "mongo/s/commands/sharded_command_processing.h"
#include "mongo/s/commands/strategy.h"
-#include "mongo/s/config.h"
#include "mongo/s/grid.h"
#include "mongo/s/mongos_options.h"
#include "mongo/s/sharding_raii.h"
diff --git a/src/mongo/s/commands/cluster_flush_router_config_cmd.cpp b/src/mongo/s/commands/cluster_flush_router_config_cmd.cpp
index d7053eabeb5..de726f47251 100644
--- a/src/mongo/s/commands/cluster_flush_router_config_cmd.cpp
+++ b/src/mongo/s/commands/cluster_flush_router_config_cmd.cpp
@@ -30,7 +30,6 @@
#include "mongo/db/commands.h"
#include "mongo/s/catalog/catalog_cache.h"
-#include "mongo/s/config.h"
#include "mongo/s/grid.h"
namespace mongo {
@@ -71,7 +70,7 @@ public:
int options,
std::string& errmsg,
BSONObjBuilder& result) {
- grid.catalogCache()->invalidateAll();
+ Grid::get(txn)->catalogCache()->invalidateAll();
result.appendBool("flushed", true);
return true;
diff --git a/src/mongo/s/commands/cluster_get_shard_version_cmd.cpp b/src/mongo/s/commands/cluster_get_shard_version_cmd.cpp
index 259142cb6d3..3ab79ef5364 100644
--- a/src/mongo/s/commands/cluster_get_shard_version_cmd.cpp
+++ b/src/mongo/s/commands/cluster_get_shard_version_cmd.cpp
@@ -26,49 +26,45 @@
* it in the license file.
*/
+#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kSharding
+
#include "mongo/platform/basic.h"
#include "mongo/db/auth/action_set.h"
#include "mongo/db/auth/action_type.h"
#include "mongo/db/auth/authorization_manager.h"
#include "mongo/db/auth/authorization_session.h"
-#include "mongo/db/client.h"
#include "mongo/db/commands.h"
-#include "mongo/s/catalog/catalog_cache.h"
-#include "mongo/s/chunk_manager.h"
-#include "mongo/s/config.h"
#include "mongo/s/grid.h"
+#include "mongo/s/sharding_raii.h"
+#include "mongo/util/log.h"
namespace mongo {
-
-using std::shared_ptr;
-
namespace {
class GetShardVersion : public Command {
public:
GetShardVersion() : Command("getShardVersion", false, "getshardversion") {}
- virtual bool slaveOk() const {
+ bool slaveOk() const override {
return true;
}
- virtual bool adminOnly() const {
+ bool adminOnly() const override {
return true;
}
-
- virtual bool supportsWriteConcern(const BSONObj& cmd) const override {
+ bool supportsWriteConcern(const BSONObj& cmd) const override {
return false;
}
- virtual void help(std::stringstream& help) const {
+ void help(std::stringstream& help) const override {
help << " example: { getShardVersion : 'alleyinsider.foo' } ";
}
- virtual Status checkAuthForCommand(Client* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
+ Status checkAuthForCommand(Client* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) override {
if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
ResourcePattern::forExactNamespace(NamespaceString(parseNs(dbname, cmdObj))),
ActionType::getShardVersion)) {
@@ -78,41 +74,28 @@ public:
return Status::OK();
}
- virtual std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const {
+ std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const override {
return parseNsFullyQualified(dbname, cmdObj);
}
- virtual bool run(OperationContext* txn,
- const std::string& dbname,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result) {
+ bool run(OperationContext* txn,
+ const std::string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) override {
const NamespaceString nss(parseNs(dbname, cmdObj));
- if (nss.size() == 0) {
- return appendCommandStatus(
- result, Status(ErrorCodes::InvalidNamespace, "no namespace specified"));
- }
- auto status = grid.catalogCache()->getDatabase(txn, nss.db().toString());
- if (!status.isOK()) {
- return appendCommandStatus(result, status.getStatus());
- }
-
- std::shared_ptr<DBConfig> config = status.getValue();
- if (!config->isSharded(nss.ns())) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::NamespaceNotSharded, "ns [" + nss.ns() + " is not sharded."));
- }
+ auto scopedDb = uassertStatusOK(ScopedShardDatabase::getExisting(txn, nss.db()));
+ auto config = scopedDb.db();
auto cm = config->getChunkManagerIfExists(txn, nss.ns());
- if (!cm) {
- errmsg = "no chunk manager?";
- return false;
+ uassert(ErrorCodes::NamespaceNotSharded, "ns [" + nss.ns() + " is not sharded.", cm);
+
+ for (const auto& cmEntry : cm->getChunkMap()) {
+ log() << redact(cmEntry.second->toString());
}
- cm->_printChunks();
cm->getVersion().addToBSON(result, "version");
return true;
diff --git a/src/mongo/s/commands/cluster_move_chunk_cmd.cpp b/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
index 4404d9663a0..1c986386d0b 100644
--- a/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
+++ b/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
@@ -42,7 +42,6 @@
#include "mongo/s/catalog/catalog_cache.h"
#include "mongo/s/client/shard_connection.h"
#include "mongo/s/client/shard_registry.h"
-#include "mongo/s/config.h"
#include "mongo/s/config_server_client.h"
#include "mongo/s/grid.h"
#include "mongo/s/migration_secondary_throttle_options.h"
@@ -197,7 +196,7 @@ public:
// Proactively refresh the chunk manager. Not strictly necessary, but this way it's
// immediately up-to-date the next time it's used.
- cm->reload(txn);
+ scopedCM.db()->getChunkManagerIfExists(txn, nss.ns(), true);
result.append("millis", t.millis());
return true;
diff --git a/src/mongo/s/commands/cluster_split_cmd.cpp b/src/mongo/s/commands/cluster_split_cmd.cpp
index 750020c1168..ac24dc03a0e 100644
--- a/src/mongo/s/commands/cluster_split_cmd.cpp
+++ b/src/mongo/s/commands/cluster_split_cmd.cpp
@@ -43,7 +43,6 @@
#include "mongo/s/catalog/catalog_cache.h"
#include "mongo/s/chunk_manager.h"
#include "mongo/s/client/shard_connection.h"
-#include "mongo/s/config.h"
#include "mongo/s/grid.h"
#include "mongo/s/shard_util.h"
#include "mongo/s/sharding_raii.h"
@@ -239,7 +238,7 @@ public:
// Proactively refresh the chunk manager. Not strictly necessary, but this way it's
// immediately up-to-date the next time it's used.
- cm->reload(txn);
+ scopedCM.db()->getChunkManagerIfExists(txn, nss.ns(), true);
return true;
}
diff --git a/src/mongo/s/commands/strategy.cpp b/src/mongo/s/commands/strategy.cpp
index 690756ecf60..651575ed3a0 100644
--- a/src/mongo/s/commands/strategy.cpp
+++ b/src/mongo/s/commands/strategy.cpp
@@ -59,7 +59,6 @@
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/client/version_manager.h"
#include "mongo/s/commands/cluster_explain.h"
-#include "mongo/s/config.h"
#include "mongo/s/grid.h"
#include "mongo/s/query/cluster_cursor_manager.h"
#include "mongo/s/query/cluster_find.h"
diff --git a/src/mongo/s/config.cpp b/src/mongo/s/config.cpp
index 256362be582..7ab8766877e 100644
--- a/src/mongo/s/config.cpp
+++ b/src/mongo/s/config.cpp
@@ -32,13 +32,14 @@
#include "mongo/s/config.h"
+#include <vector>
+
#include "mongo/db/lasterror.h"
#include "mongo/s/catalog/catalog_cache.h"
#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog/type_collection.h"
#include "mongo/s/catalog/type_database.h"
-#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/chunk_manager.h"
#include "mongo/s/chunk_version.h"
#include "mongo/s/client/shard_registry.h"
@@ -48,33 +49,13 @@
namespace mongo {
-using std::set;
-using std::string;
-using std::vector;
-
-CollectionInfo::CollectionInfo(OperationContext* txn,
- const CollectionType& coll,
- repl::OpTime opTime)
- : _configOpTime(std::move(opTime)) {
- // Do this *first* so we're invisible to everyone else
- std::unique_ptr<ChunkManager> manager(stdx::make_unique<ChunkManager>(txn, coll));
- manager->loadExistingRanges(txn, nullptr);
-
- // Collections with no chunks are unsharded, no matter what the collections entry says. This
- // helps prevent errors when dropping in a different process.
- if (manager->numChunks()) {
- _cm = std::move(manager);
- }
-}
+struct CollectionInfo {
+ // The config server opTime at which the chunk manager below was loaded
+ const repl::OpTime configOpTime;
-CollectionInfo::~CollectionInfo() = default;
-
-void CollectionInfo::resetCM(ChunkManager* cm) {
- invariant(cm);
- invariant(_cm);
-
- _cm.reset(cm);
-}
+ // The chunk manager
+ const std::shared_ptr<ChunkManager> cm;
+};
DBConfig::DBConfig(const DatabaseType& dbt, repl::OpTime configOpTime)
: _name(dbt.getName()),
@@ -84,15 +65,10 @@ DBConfig::DBConfig(const DatabaseType& dbt, repl::OpTime configOpTime)
DBConfig::~DBConfig() = default;
-bool DBConfig::isSharded(const string& ns) {
+bool DBConfig::isSharded(const std::string& ns) {
stdx::lock_guard<stdx::mutex> lk(_lock);
- CollectionInfoMap::iterator i = _collections.find(ns);
- if (i == _collections.end()) {
- return false;
- }
-
- return i->second.isSharded();
+ return _collections.count(ns) > 0;
}
void DBConfig::markNSNotSharded(const std::string& ns) {
@@ -104,55 +80,38 @@ void DBConfig::markNSNotSharded(const std::string& ns) {
}
}
-// Handles weird logic related to getting *either* a chunk manager *or* the collection primary
-// shard
void DBConfig::getChunkManagerOrPrimary(OperationContext* txn,
- const string& ns,
+ const std::string& ns,
std::shared_ptr<ChunkManager>& manager,
std::shared_ptr<Shard>& primary) {
- // The logic here is basically that at any time, our collection can become sharded or
- // unsharded
- // via a command. If we're not sharded, we want to send data to the primary, if sharded, we
- // want to send data to the correct chunks, and we can't check both w/o the lock.
-
manager.reset();
primary.reset();
const auto shardRegistry = Grid::get(txn)->shardRegistry();
- {
- stdx::lock_guard<stdx::mutex> lk(_lock);
+ stdx::lock_guard<stdx::mutex> lk(_lock);
- CollectionInfoMap::iterator i = _collections.find(ns);
+ auto it = _collections.find(ns);
- // No namespace
- if (i == _collections.end()) {
- // If we don't know about this namespace, it's unsharded by default
- auto primaryStatus = shardRegistry->getShard(txn, _primaryId);
- if (primaryStatus.isOK()) {
- primary = primaryStatus.getValue();
- }
- } else {
- CollectionInfo& cInfo = i->second;
-
- if (cInfo.isSharded()) {
- manager = cInfo.getCM();
- } else {
- auto primaryStatus = shardRegistry->getShard(txn, _primaryId);
- if (primaryStatus.isOK()) {
- primary = primaryStatus.getValue();
- }
- }
+ if (it == _collections.end()) {
+ // If we don't know about this namespace, it's unsharded by default
+ auto shardStatus = shardRegistry->getShard(txn, _primaryId);
+ if (!shardStatus.isOK()) {
+ uasserted(40371,
+ str::stream() << "The primary shard for collection " << ns
+ << " could not be loaded due to error "
+ << shardStatus.getStatus().toString());
}
- }
- invariant(manager || primary);
- invariant(!manager || !primary);
+ primary = std::move(shardStatus.getValue());
+ } else {
+ const auto& ci = it->second;
+ manager = ci.cm;
+ }
}
-
std::shared_ptr<ChunkManager> DBConfig::getChunkManagerIfExists(OperationContext* txn,
- const string& ns,
+ const std::string& ns,
bool shouldReload,
bool forceReload) {
// Don't report exceptions here as errors in GetLastError
@@ -167,7 +126,7 @@ std::shared_ptr<ChunkManager> DBConfig::getChunkManagerIfExists(OperationContext
}
std::shared_ptr<ChunkManager> DBConfig::getChunkManager(OperationContext* txn,
- const string& ns,
+ const std::string& ns,
bool shouldReload,
bool forceReload) {
ChunkVersion oldVersion;
@@ -176,34 +135,39 @@ std::shared_ptr<ChunkManager> DBConfig::getChunkManager(OperationContext* txn,
{
stdx::lock_guard<stdx::mutex> lk(_lock);
- bool earlyReload = !_collections[ns].isSharded() && (shouldReload || forceReload);
+ auto it = _collections.find(ns);
+
+ const bool earlyReload = (it == _collections.end()) && (shouldReload || forceReload);
if (earlyReload) {
// This is to catch cases where there this is a new sharded collection.
// Note: read the _reloadCount inside the _lock mutex, so _loadIfNeeded will always
// be forced to perform a reload.
const auto currentReloadIteration = _reloadCount.load();
_loadIfNeeded(txn, currentReloadIteration);
+
+ it = _collections.find(ns);
}
- const CollectionInfo& ci = _collections[ns];
uassert(ErrorCodes::NamespaceNotSharded,
str::stream() << "Collection is not sharded: " << ns,
- ci.isSharded());
+ it != _collections.end());
+
+ const auto& ci = it->second;
if (!(shouldReload || forceReload) || earlyReload) {
- return ci.getCM();
+ return ci.cm;
}
- if (ci.getCM()) {
- oldManager = ci.getCM();
- oldVersion = ci.getCM()->getVersion();
+ if (ci.cm) {
+ oldManager = ci.cm;
+ oldVersion = ci.cm->getVersion();
}
}
// TODO: We need to keep this first one-chunk check in until we have a more efficient way of
// creating/reusing a chunk manager, as doing so requires copying the full set of chunks
// currently
- vector<ChunkType> newestChunk;
+ std::vector<ChunkType> newestChunk;
if (oldVersion.isSet() && !forceReload) {
uassertStatusOK(Grid::get(txn)->catalogClient(txn)->getChunks(
txn,
@@ -219,14 +183,16 @@ std::shared_ptr<ChunkManager> DBConfig::getChunkManager(OperationContext* txn,
ChunkVersion v = newestChunk[0].getVersion();
if (v.equals(oldVersion)) {
stdx::lock_guard<stdx::mutex> lk(_lock);
- const CollectionInfo& ci = _collections[ns];
+
+ auto it = _collections.find(ns);
uassert(15885,
str::stream() << "not sharded after reloading from chunks : " << ns,
- ci.isSharded());
- return ci.getCM();
+ it != _collections.end());
+
+ const auto& ci = it->second;
+ return ci.cm;
}
}
-
} else if (!oldVersion.isSet()) {
warning() << "version 0 found when " << (forceReload ? "reloading" : "checking")
<< " chunk manager; collection '" << ns << "' initially detected as sharded";
@@ -241,15 +207,17 @@ std::shared_ptr<ChunkManager> DBConfig::getChunkManager(OperationContext* txn,
// If we have a target we're going for see if we've hit already
stdx::lock_guard<stdx::mutex> lk(_lock);
- CollectionInfo& ci = _collections[ns];
+ auto it = _collections.find(ns);
+
+ if (it != _collections.end()) {
+ const auto& ci = it->second;
- if (ci.isSharded() && ci.getCM()) {
ChunkVersion currentVersion = newestChunk[0].getVersion();
// Only reload if the version we found is newer than our own in the same epoch
- if (currentVersion <= ci.getCM()->getVersion() &&
- ci.getCM()->getVersion().hasEqualEpoch(currentVersion)) {
- return ci.getCM();
+ if (currentVersion <= ci.cm->getVersion() &&
+ ci.cm->getVersion().hasEqualEpoch(currentVersion)) {
+ return ci.cm;
}
}
}
@@ -273,11 +241,15 @@ std::shared_ptr<ChunkManager> DBConfig::getChunkManager(OperationContext* txn,
stdx::lock_guard<stdx::mutex> lk(_lock);
- CollectionInfo& ci = _collections[ns];
- uassert(14822, (string) "state changed in the middle: " + ns, ci.isSharded());
+ auto it = _collections.find(ns);
+ uassert(14822,
+ str::stream() << "Collection " << ns << " became unsharded in the middle.",
+ it != _collections.end());
+
+ const auto& ci = it->second;
// Reset if our versions aren't the same
- bool shouldReset = !tempChunkManager->getVersion().equals(ci.getCM()->getVersion());
+ bool shouldReset = !tempChunkManager->getVersion().equals(ci.cm->getVersion());
// Also reset if we're forced to do so
if (!shouldReset && forceReload) {
@@ -296,12 +268,12 @@ std::shared_ptr<ChunkManager> DBConfig::getChunkManager(OperationContext* txn,
// TODO: Assert in next version, to allow smooth upgrades
//
- if (shouldReset && tempChunkManager->getVersion() < ci.getCM()->getVersion()) {
+ if (shouldReset && tempChunkManager->getVersion() < ci.cm->getVersion()) {
shouldReset = false;
warning() << "not resetting chunk manager for collection '" << ns << "', config version is "
<< tempChunkManager->getVersion() << " and "
- << "old version is " << ci.getCM()->getVersion();
+ << "old version is " << ci.cm->getVersion();
}
// end legacy behavior
@@ -309,19 +281,18 @@ std::shared_ptr<ChunkManager> DBConfig::getChunkManager(OperationContext* txn,
if (shouldReset) {
const auto cmOpTime = tempChunkManager->getConfigOpTime();
- // The existing ChunkManager could have been updated since we last checked, so
- // replace the existing chunk manager only if it is strictly newer.
- // The condition should be (>) than instead of (>=), but use (>=) since legacy non-repl
- // config servers will always have an opTime of zero.
- if (cmOpTime >= ci.getCM()->getConfigOpTime()) {
- ci.resetCM(tempChunkManager.release());
+ // The existing ChunkManager could have been updated since we last checked, so replace the
+ // existing chunk manager only if it is strictly newer.
+ if (cmOpTime > ci.cm->getConfigOpTime()) {
+ _collections.erase(ns);
+ auto emplacedEntryIt =
+ _collections.emplace(ns, CollectionInfo{cmOpTime, std::move(tempChunkManager)})
+ .first;
+ return emplacedEntryIt->second.cm;
}
}
- uassert(
- 15883, str::stream() << "not sharded after chunk manager reset : " << ns, ci.isSharded());
-
- return ci.getCM();
+ return ci.cm;
}
bool DBConfig::load(OperationContext* txn) {
@@ -354,35 +325,35 @@ bool DBConfig::_loadIfNeeded(OperationContext* txn, Counter reloadIteration) {
_configOpTime = dbOpTimePair.opTime;
// Load all collections
- vector<CollectionType> collections;
+ std::vector<CollectionType> collections;
repl::OpTime configOpTimeWhenLoadingColl;
uassertStatusOK(
catalogClient->getCollections(txn, &_name, &collections, &configOpTimeWhenLoadingColl));
- int numCollsErased = 0;
- int numCollsSharded = 0;
-
invariant(configOpTimeWhenLoadingColl >= _configOpTime);
for (const auto& coll : collections) {
auto collIter = _collections.find(coll.getNs().ns());
if (collIter != _collections.end()) {
- invariant(configOpTimeWhenLoadingColl >= collIter->second.getConfigOpTime());
+ invariant(configOpTimeWhenLoadingColl >= collIter->second.configOpTime);
}
- if (coll.getDropped()) {
- _collections.erase(coll.getNs().ns());
- numCollsErased++;
- } else {
- _collections[coll.getNs().ns()] =
- CollectionInfo(txn, coll, configOpTimeWhenLoadingColl);
- numCollsSharded++;
+ _collections.erase(coll.getNs().ns());
+
+ if (!coll.getDropped()) {
+ // Do the blocking collection load
+ std::unique_ptr<ChunkManager> manager(stdx::make_unique<ChunkManager>(txn, coll));
+ manager->loadExistingRanges(txn, nullptr);
+
+ // Collections with no chunks are unsharded, no matter what the collections entry says
+ if (manager->numChunks()) {
+ _collections.emplace(
+ coll.getNs().ns(),
+ CollectionInfo{configOpTimeWhenLoadingColl, std::move(manager)});
+ }
}
}
- LOG(2) << "found " << numCollsSharded << " collections left and " << numCollsErased
- << " collections dropped for database " << _name;
-
_reloadCount.fetchAndAdd(1);
return true;
@@ -406,17 +377,13 @@ bool DBConfig::reload(OperationContext* txn) {
return successful;
}
-void DBConfig::getAllShardIds(set<ShardId>* shardIds) {
- dassert(shardIds);
-
+void DBConfig::getAllShardIds(std::set<ShardId>* shardIds) {
stdx::lock_guard<stdx::mutex> lk(_lock);
shardIds->insert(_primaryId);
- for (CollectionInfoMap::const_iterator it(_collections.begin()), end(_collections.end());
- it != end;
- ++it) {
- if (it->second.isSharded()) {
- it->second.getCM()->getAllShardIds(shardIds);
- } // TODO: handle collections on non-primary shard
+
+ for (const auto& ciEntry : _collections) {
+ const auto& ci = ciEntry.second;
+ ci.cm->getAllShardIds(shardIds);
}
}
diff --git a/src/mongo/s/config.h b/src/mongo/s/config.h
index 1dcf98d9529..52b9afe9aa3 100644
--- a/src/mongo/s/config.h
+++ b/src/mongo/s/config.h
@@ -29,8 +29,8 @@
#pragma once
#include <set>
+#include <string>
-#include "mongo/db/jsobj.h"
#include "mongo/db/repl/optime.h"
#include "mongo/platform/atomic_word.h"
#include "mongo/s/client/shard.h"
@@ -39,34 +39,10 @@
namespace mongo {
class ChunkManager;
-class CollectionType;
+struct CollectionInfo;
class DatabaseType;
class OperationContext;
-struct CollectionInfo {
- CollectionInfo() = default;
- CollectionInfo(OperationContext* txn, const CollectionType& coll, repl::OpTime opTime);
- ~CollectionInfo();
-
- bool isSharded() const {
- return _cm.get();
- }
-
- std::shared_ptr<ChunkManager> getCM() const {
- return _cm;
- }
-
- void resetCM(ChunkManager* cm);
-
- repl::OpTime getConfigOpTime() const {
- return _configOpTime;
- }
-
-private:
- repl::OpTime _configOpTime;
- std::shared_ptr<ChunkManager> _cm;
-};
-
/**
* Represents the cache entry for a database.
*/
diff --git a/src/mongo/s/server.cpp b/src/mongo/s/server.cpp
index 6fdbb8a221c..a6e21f8b5f3 100644
--- a/src/mongo/s/server.cpp
+++ b/src/mongo/s/server.cpp
@@ -70,7 +70,6 @@
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/client/shard_remote.h"
#include "mongo/s/client/sharding_connection_hook_for_mongos.h"
-#include "mongo/s/config.h"
#include "mongo/s/grid.h"
#include "mongo/s/mongos_options.h"
#include "mongo/s/query/cluster_cursor_cleanup_job.h"
@@ -368,6 +367,8 @@ static int _main() {
getGlobalServiceContext()->setFastClockSource(FastClockSourceFactory::create(Milliseconds{10}));
+ auto shardingContext = Grid::get(getGlobalServiceContext());
+
// we either have a setting where all processes are in localhost or none are
std::vector<HostAndPort> configServers = mongosGlobalParams.configdbs.getServers();
for (std::vector<HostAndPort>::const_iterator it = configServers.begin();
@@ -376,10 +377,10 @@ static int _main() {
const HostAndPort& configAddr = *it;
if (it == configServers.begin()) {
- grid.setAllowLocalHost(configAddr.isLocalHost());
+ shardingContext->setAllowLocalHost(configAddr.isLocalHost());
}
- if (configAddr.isLocalHost() != grid.allowLocalHost()) {
+ if (configAddr.isLocalHost() != shardingContext->allowLocalHost()) {
mongo::log(LogComponent::kDefault)
<< "cannot mix localhost and ip addresses in configdbs";
return 10;