diff options
author | Kaloian Manassiev <kaloian.manassiev@mongodb.com> | 2017-01-31 16:35:04 -0500 |
---|---|---|
committer | Kaloian Manassiev <kaloian.manassiev@mongodb.com> | 2017-02-27 15:56:04 -0500 |
commit | 0dc94169560fbab9e1761b110c1c4a6a978ac414 (patch) | |
tree | 3a0237720db5d45548d53766ede1e8e441d7ef7a | |
parent | 533fde5e13e39486b8b59e1361f1ce497f32c75e (diff) | |
download | mongo-0dc94169560fbab9e1761b110c1c4a6a978ac414.tar.gz |
SERVER-27382 Get rid of DBConfig::getChunkManagerOrPrimary
(cherry picked from commit 7f71474649f01b8914b4284be44cbe325c1458d0)
-rw-r--r-- | jstests/sharding/coll_epoch_test1.js | 31 | ||||
-rw-r--r-- | src/mongo/s/chunk_manager_targeter.cpp | 46 | ||||
-rw-r--r-- | src/mongo/s/commands/cluster_write.cpp | 23 | ||||
-rw-r--r-- | src/mongo/s/config.cpp | 31 | ||||
-rw-r--r-- | src/mongo/s/config.h | 9 | ||||
-rw-r--r-- | src/mongo/s/query/cluster_find.cpp | 18 |
6 files changed, 52 insertions, 106 deletions
diff --git a/jstests/sharding/coll_epoch_test1.js b/jstests/sharding/coll_epoch_test1.js index 9a16f497cff..b3cca85d495 100644 --- a/jstests/sharding/coll_epoch_test1.js +++ b/jstests/sharding/coll_epoch_test1.js @@ -5,24 +5,23 @@ var st = new ShardingTest({shards: 3, mongos: 3}); - // Use separate mongoses for admin, inserting data, and validating results, so no - // single-mongos tricks will work - var insertMongos = st.s2; - var staleMongos = st.s1; + var config = st.s0.getDB("config"); + var admin = st.s0.getDB("admin"); + var coll = st.s0.getCollection("foo.bar"); - var config = st.s.getDB("config"); - var admin = st.s.getDB("admin"); - var coll = st.s.getCollection("foo.bar"); + // Use separate mongoses for admin, inserting data, and validating results, so no single-mongos + // tricks will work + var staleMongos = st.s1; + var insertMongos = st.s2; - var shards = {}; + var shards = []; config.shards.find().forEach(function(doc) { - shards[doc._id] = new Mongo(doc.host); + shards.push(doc._id); }); // // Test that inserts and queries go to the correct shard even when the collection has been - // sharded - // in the background + // sharded in the background // jsTest.log("Enabling sharding for the first time..."); @@ -74,8 +73,8 @@ var getOtherShard = function(shard) { for (var id in shards) { - if (id != shard) - return id; + if (shards[id] != shard) + return shards[id]; } }; @@ -86,8 +85,9 @@ jsTest.log("moved primary..."); bulk = insertMongos.getCollection(coll + "").initializeUnorderedBulkOp(); - for (var i = 0; i < 100; i++) + for (var i = 0; i < 100; i++) { bulk.insert({test: "c"}); + } assert.writeOK(bulk.execute()); assert.eq(100, staleMongos.getCollection(coll + "").find({test: "c"}).itcount()); @@ -110,8 +110,9 @@ assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}})); bulk = insertMongos.getCollection(coll + "").initializeUnorderedBulkOp(); - for (var i = 0; i < 100; i++) + for (var i = 0; i < 100; i++) { bulk.insert({test: "d"}); + } assert.writeOK(bulk.execute()); assert.eq(100, staleMongos.getCollection(coll + "").find({test: "d"}).itcount()); diff --git a/src/mongo/s/chunk_manager_targeter.cpp b/src/mongo/s/chunk_manager_targeter.cpp index d8304521175..7771fa31db5 100644 --- a/src/mongo/s/chunk_manager_targeter.cpp +++ b/src/mongo/s/chunk_manager_targeter.cpp @@ -289,13 +289,14 @@ ChunkManagerTargeter::ChunkManagerTargeter(const NamespaceString& nss, TargeterS Status ChunkManagerTargeter::init(OperationContext* txn) { - auto dbStatus = ScopedShardDatabase::getOrCreate(txn, _nss.db()); - if (!dbStatus.isOK()) { - return dbStatus.getStatus(); + auto scopedCMStatus = ScopedChunkManager::getOrCreate(txn, _nss); + if (!scopedCMStatus.isOK()) { + return scopedCMStatus.getStatus(); } - auto scopedDb = std::move(dbStatus.getValue()); - scopedDb.db()->getChunkManagerOrPrimary(txn, _nss.ns(), _manager, _primary); + const auto& scopedCM = scopedCMStatus.getValue(); + _manager = scopedCM.cm(); + _primary = scopedCM.primary(); return Status::OK(); } @@ -702,13 +703,14 @@ Status ChunkManagerTargeter::refreshIfNeeded(OperationContext* txn, bool* wasCha shared_ptr<ChunkManager> lastManager = _manager; shared_ptr<Shard> lastPrimary = _primary; - auto dbStatus = ScopedShardDatabase::getOrCreate(txn, _nss.db()); - if (!dbStatus.isOK()) { - return dbStatus.getStatus(); + auto scopedCMStatus = ScopedChunkManager::getOrCreate(txn, _nss); + if (!scopedCMStatus.isOK()) { + return scopedCMStatus.getStatus(); } - auto scopedDb = std::move(dbStatus.getValue()); - scopedDb.db()->getChunkManagerOrPrimary(txn, _nss.ns(), _manager, _primary); + const auto& scopedCM = scopedCMStatus.getValue(); + _manager = scopedCM.cm(); + _primary = scopedCM.primary(); // We now have the latest metadata from the cache. @@ -770,25 +772,17 @@ Status ChunkManagerTargeter::refreshNow(OperationContext* txn, RefreshType refre // Try not to spam the configs refreshBackoff(); - auto dbStatus = ScopedShardDatabase::getOrCreate(txn, _nss.db()); - if (!dbStatus.isOK()) { - return dbStatus.getStatus(); - } - - const auto& scopedDb = dbStatus.getValue(); + ScopedChunkManager::refreshAndGet(txn, _nss); - // TODO: Improve synchronization and make more explicit - if (refreshType == RefreshType_RefreshChunkManager) { - try { - // Forces a remote check of the collection info, synchronization between threads happens - // internally - scopedDb.db()->getChunkManagerIfExists(txn, _nss.ns(), true); - } catch (const DBException& ex) { - return Status(ErrorCodes::UnknownError, ex.toString()); - } + auto scopedCMStatus = ScopedChunkManager::get(txn, _nss); + if (!scopedCMStatus.isOK()) { + return scopedCMStatus.getStatus(); } - scopedDb.db()->getChunkManagerOrPrimary(txn, _nss.ns(), _manager, _primary); + const auto& scopedCM = scopedCMStatus.getValue(); + + _manager = scopedCM.cm(); + _primary = scopedCM.primary(); return Status::OK(); } diff --git a/src/mongo/s/commands/cluster_write.cpp b/src/mongo/s/commands/cluster_write.cpp index 217c37e273b..b3b68e3af04 100644 --- a/src/mongo/s/commands/cluster_write.cpp +++ b/src/mongo/s/commands/cluster_write.cpp @@ -37,15 +37,12 @@ #include "mongo/db/lasterror.h" #include "mongo/db/write_concern_options.h" #include "mongo/s/balancer_configuration.h" -#include "mongo/s/catalog/catalog_cache.h" #include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/catalog/type_collection.h" #include "mongo/s/chunk.h" -#include "mongo/s/chunk_manager.h" #include "mongo/s/chunk_manager_targeter.h" #include "mongo/s/client/dbclient_multi_command.h" #include "mongo/s/client/shard_registry.h" -#include "mongo/s/config.h" #include "mongo/s/config_server_client.h" #include "mongo/s/grid.h" #include "mongo/s/shard_util.h" @@ -176,34 +173,30 @@ BSONObj findExtremeKeyForShard(OperationContext* txn, * Splits the chunks touched based from the targeter stats if needed. */ void splitIfNeeded(OperationContext* txn, const NamespaceString& nss, const TargeterStats& stats) { - auto status = Grid::get(txn)->catalogCache()->getDatabase(txn, nss.db().toString()); - if (!status.isOK()) { - warning() << "failed to get database config for " << nss - << " while checking for auto-split: " << status.getStatus(); + auto scopedCMStatus = ScopedChunkManager::get(txn, nss); + if (!scopedCMStatus.isOK()) { + warning() << "failed to get collection information for " << nss + << " while checking for auto-split" << causedBy(scopedCMStatus.getStatus()); return; } - auto config = status.getValue(); + const auto& scopedCM = scopedCMStatus.getValue(); - std::shared_ptr<ChunkManager> chunkManager; - std::shared_ptr<Shard> dummyShard; - config->getChunkManagerOrPrimary(txn, nss.ns(), chunkManager, dummyShard); - - if (!chunkManager) { + if (!scopedCM.cm()) { return; } for (auto it = stats.chunkSizeDelta.cbegin(); it != stats.chunkSizeDelta.cend(); ++it) { std::shared_ptr<Chunk> chunk; try { - chunk = chunkManager->findIntersectingChunkWithSimpleCollation(txn, it->first); + chunk = scopedCM.cm()->findIntersectingChunkWithSimpleCollation(txn, it->first); } catch (const AssertionException& ex) { warning() << "could not find chunk while checking for auto-split: " << causedBy(redact(ex)); return; } - updateChunkWriteStatsAndSplitIfNeeded(txn, chunkManager.get(), chunk.get(), it->second); + updateChunkWriteStatsAndSplitIfNeeded(txn, scopedCM.cm().get(), chunk.get(), it->second); } } diff --git a/src/mongo/s/config.cpp b/src/mongo/s/config.cpp index 7ab8766877e..fea546c14a4 100644 --- a/src/mongo/s/config.cpp +++ b/src/mongo/s/config.cpp @@ -42,7 +42,6 @@ #include "mongo/s/catalog/type_database.h" #include "mongo/s/chunk_manager.h" #include "mongo/s/chunk_version.h" -#include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" #include "mongo/stdx/memory.h" #include "mongo/util/log.h" @@ -80,36 +79,6 @@ void DBConfig::markNSNotSharded(const std::string& ns) { } } -void DBConfig::getChunkManagerOrPrimary(OperationContext* txn, - const std::string& ns, - std::shared_ptr<ChunkManager>& manager, - std::shared_ptr<Shard>& primary) { - manager.reset(); - primary.reset(); - - const auto shardRegistry = Grid::get(txn)->shardRegistry(); - - stdx::lock_guard<stdx::mutex> lk(_lock); - - auto it = _collections.find(ns); - - if (it == _collections.end()) { - // If we don't know about this namespace, it's unsharded by default - auto shardStatus = shardRegistry->getShard(txn, _primaryId); - if (!shardStatus.isOK()) { - uasserted(40371, - str::stream() << "The primary shard for collection " << ns - << " could not be loaded due to error " - << shardStatus.getStatus().toString()); - } - - primary = std::move(shardStatus.getValue()); - } else { - const auto& ci = it->second; - manager = ci.cm; - } -} - std::shared_ptr<ChunkManager> DBConfig::getChunkManagerIfExists(OperationContext* txn, const std::string& ns, bool shouldReload, diff --git a/src/mongo/s/config.h b/src/mongo/s/config.h index 52b9afe9aa3..acc0c4968c3 100644 --- a/src/mongo/s/config.h +++ b/src/mongo/s/config.h @@ -33,7 +33,7 @@ #include "mongo/db/repl/optime.h" #include "mongo/platform/atomic_word.h" -#include "mongo/s/client/shard.h" +#include "mongo/s/shard_id.h" #include "mongo/stdx/mutex.h" namespace mongo { @@ -81,13 +81,6 @@ public: */ bool isSharded(const std::string& ns); - // Atomically returns *either* the chunk manager *or* the primary shard for the collection, - // neither if the collection doesn't exist. - void getChunkManagerOrPrimary(OperationContext* txn, - const std::string& ns, - std::shared_ptr<ChunkManager>& manager, - std::shared_ptr<Shard>& primary); - std::shared_ptr<ChunkManager> getChunkManager(OperationContext* txn, const std::string& ns, bool reload = false, diff --git a/src/mongo/s/query/cluster_find.cpp b/src/mongo/s/query/cluster_find.cpp index ab5b15b0f49..c1d32e5c6be 100644 --- a/src/mongo/s/query/cluster_find.cpp +++ b/src/mongo/s/query/cluster_find.cpp @@ -320,23 +320,19 @@ StatusWith<CursorId> ClusterFind::runQuery(OperationContext* txn, // Re-target and re-send the initial find command to the shards until we have established the // shard version. for (size_t retries = 1; retries <= kMaxStaleConfigRetries; ++retries) { - auto dbConfigStatus = ScopedShardDatabase::getExisting(txn, query.nss().db()); - if (dbConfigStatus == ErrorCodes::NamespaceNotFound) { + auto scopedCMStatus = ScopedChunkManager::get(txn, query.nss()); + if (scopedCMStatus == ErrorCodes::NamespaceNotFound) { // If the database doesn't exist, we successfully return an empty result set without // creating a cursor. return CursorId(0); - } else if (!dbConfigStatus.isOK()) { - return dbConfigStatus.getStatus(); + } else if (!scopedCMStatus.isOK()) { + return scopedCMStatus.getStatus(); } - const auto& dbConfig = dbConfigStatus.getValue(); - - std::shared_ptr<ChunkManager> chunkManager; - std::shared_ptr<Shard> primary; - dbConfig.db()->getChunkManagerOrPrimary(txn, query.nss().ns(), chunkManager, primary); + const auto& scopedCM = scopedCMStatus.getValue(); auto cursorId = runQueryWithoutRetrying( - txn, query, readPref, chunkManager.get(), std::move(primary), results, viewDefinition); + txn, query, readPref, scopedCM.cm().get(), scopedCM.primary(), results, viewDefinition); if (cursorId.isOK()) { return cursorId; } @@ -357,7 +353,7 @@ StatusWith<CursorId> ClusterFind::runQuery(OperationContext* txn, if (status == ErrorCodes::StaleEpoch) { Grid::get(txn)->catalogCache()->invalidate(query.nss().db().toString()); } else { - dbConfig.db()->getChunkManagerIfExists(txn, query.nss().ns(), true); + scopedCM.db()->getChunkManagerIfExists(txn, query.nss().ns(), true); } } |