summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2017-03-27 17:30:34 -0400
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2017-03-31 13:43:49 -0400
commit84d94351aa308caf2c684b0fe5fbb7f942c75bd0 (patch)
treed3534c779770d08913087a68ca6cde7932d86895
parent896687b8ae6b7f848da88c7186a44bf3163c2254 (diff)
downloadmongo-84d94351aa308caf2c684b0fe5fbb7f942c75bd0.tar.gz
SERVER-22611 Make the catalog cache unit-tests go through the CatalogCache
Instead of calling its internal logic directly.
-rw-r--r--src/mongo/s/catalog_cache.cpp223
-rw-r--r--src/mongo/s/catalog_cache.h26
-rw-r--r--src/mongo/s/chunk_manager_query_test.cpp4
-rw-r--r--src/mongo/s/chunk_manager_refresh_test.cpp333
-rw-r--r--src/mongo/s/chunk_manager_test_fixture.cpp54
-rw-r--r--src/mongo/s/chunk_manager_test_fixture.h19
6 files changed, 383 insertions, 276 deletions
diff --git a/src/mongo/s/catalog_cache.cpp b/src/mongo/s/catalog_cache.cpp
index 8345a2ab0c8..a2345678a27 100644
--- a/src/mongo/s/catalog_cache.cpp
+++ b/src/mongo/s/catalog_cache.cpp
@@ -88,6 +88,126 @@ public:
}
};
+/**
+ * Blocking method, which refreshes the routing information for the specified collection. If
+ * 'existingRoutingInfo' has been specified uses this as a basis to perform an 'incremental'
+ * refresh, which only fetches the chunks which changed. Otherwise does a full refresh, fetching all
+ * the chunks for the collection.
+ *
+ * Returns the refreshed routing information if the collection is still sharded or nullptr if it is
+ * not. If refresh fails for any reason, throws a DBException.
+ *
+ * With the exception of ConflictingOperationInProgress, error codes thrown from this method are
+ * final in that there is nothing that can be done to remedy them other than pass the error to the
+ * user.
+ *
+ * ConflictingOperationInProgress indicates that the chunk metadata was found to be inconsistent.
+ * Since this may be transient, due to the collection being dropped or recreated, the caller must
+ * retry the reload up to some configurable number of attempts.
+ */
+std::shared_ptr<ChunkManager> refreshCollectionRoutingInfo(
+ OperationContext* opCtx,
+ const NamespaceString& nss,
+ std::shared_ptr<ChunkManager> existingRoutingInfo) {
+ Timer t;
+
+ const auto catalogClient = Grid::get(opCtx)->catalogClient(opCtx);
+
+ // Decide whether to do a full or partial load based on the state of the collection
+ auto collStatus = catalogClient->getCollection(opCtx, nss.ns());
+ if (collStatus == ErrorCodes::NamespaceNotFound) {
+ return nullptr;
+ }
+
+ const auto coll = uassertStatusOK(std::move(collStatus)).value;
+ if (coll.getDropped()) {
+ return nullptr;
+ }
+
+ ChunkVersion startingCollectionVersion;
+ ChunkMap chunkMap =
+ SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<std::shared_ptr<Chunk>>();
+
+ if (!existingRoutingInfo) {
+ // If we don't have a basis chunk manager, do a full refresh
+ startingCollectionVersion = ChunkVersion(0, 0, coll.getEpoch());
+ } else if (existingRoutingInfo->getVersion().epoch() != coll.getEpoch()) {
+ // If the collection's epoch has changed, do a full refresh
+ startingCollectionVersion = ChunkVersion(0, 0, coll.getEpoch());
+ } else {
+ // Otherwise do a partial refresh
+ startingCollectionVersion = existingRoutingInfo->getVersion();
+ chunkMap = existingRoutingInfo->chunkMap();
+ }
+
+ log() << "Refreshing chunks for collection " << nss << " based on version "
+ << startingCollectionVersion;
+
+ // Diff tracker should *always* find at least one chunk if collection exists
+ const auto diffQuery =
+ CMConfigDiffTracker::createConfigDiffQuery(nss, startingCollectionVersion);
+
+ // Query the chunks which have changed
+ std::vector<ChunkType> newChunks;
+ repl::OpTime opTime;
+ uassertStatusOK(Grid::get(opCtx)->catalogClient(opCtx)->getChunks(
+ opCtx,
+ diffQuery.query,
+ diffQuery.sort,
+ boost::none,
+ &newChunks,
+ &opTime,
+ repl::ReadConcernLevel::kMajorityReadConcern));
+
+ ChunkVersion collectionVersion = startingCollectionVersion;
+
+ ShardVersionMap unusedShardVersions;
+ CMConfigDiffTracker differ(nss, &chunkMap, &collectionVersion, &unusedShardVersions);
+
+ const int diffsApplied = differ.calculateConfigDiff(opCtx, newChunks);
+
+ if (diffsApplied < 1) {
+ log() << "Refresh for collection " << nss << " took " << t.millis()
+ << " ms and failed because the collection's "
+ "sharding metadata either changed in between or "
+ "became corrupted";
+
+ uasserted(ErrorCodes::ConflictingOperationInProgress,
+ "Collection sharding status changed during refresh or became corrupted");
+ }
+
+ // If at least one diff was applied, the metadata is correct, but it might not have changed so
+ // in this case there is no need to recreate the chunk manager.
+ //
+ // NOTE: In addition to the above statement, it is also important that we return the same chunk
+ // manager object, because the write commands' code relies on changes of the chunk manager's
+ // sequence number to detect batch writes not making progress because of chunks moving across
+ // shards too frequently.
+ if (collectionVersion == startingCollectionVersion) {
+ log() << "Refresh for collection " << nss << " took " << t.millis()
+ << " ms and didn't find any metadata changes";
+
+ return existingRoutingInfo;
+ }
+
+ std::unique_ptr<CollatorInterface> defaultCollator;
+ if (!coll.getDefaultCollation().isEmpty()) {
+ // The collation should have been validated upon collection creation
+ defaultCollator = uassertStatusOK(CollatorFactoryInterface::get(opCtx->getServiceContext())
+ ->makeFromBSON(coll.getDefaultCollation()));
+ }
+
+ log() << "Refresh for collection " << nss << " took " << t.millis() << " ms and found version "
+ << collectionVersion;
+
+ return stdx::make_unique<ChunkManager>(nss,
+ coll.getKeyPattern(),
+ std::move(defaultCollator),
+ coll.getUnique(),
+ std::move(chunkMap),
+ collectionVersion);
+}
+
} // namespace
CatalogCache::CatalogCache() = default;
@@ -267,109 +387,6 @@ void CatalogCache::purgeAllDatabases() {
_databases.clear();
}
-std::shared_ptr<ChunkManager> CatalogCache::refreshCollectionRoutingInfo(
- OperationContext* opCtx,
- const NamespaceString& nss,
- std::shared_ptr<ChunkManager> existingRoutingInfo) {
- Timer t;
-
- const auto catalogClient = Grid::get(opCtx)->catalogClient(opCtx);
-
- // Decide whether to do a full or partial load based on the state of the collection
- auto collStatus = catalogClient->getCollection(opCtx, nss.ns());
- if (collStatus == ErrorCodes::NamespaceNotFound) {
- return nullptr;
- }
-
- const auto coll = uassertStatusOK(std::move(collStatus)).value;
- if (coll.getDropped()) {
- return nullptr;
- }
-
- ChunkVersion startingCollectionVersion;
- ChunkMap chunkMap =
- SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<std::shared_ptr<Chunk>>();
-
- if (!existingRoutingInfo) {
- // If we don't have a basis chunk manager, do a full refresh
- startingCollectionVersion = ChunkVersion(0, 0, coll.getEpoch());
- } else if (existingRoutingInfo->getVersion().epoch() != coll.getEpoch()) {
- // If the collection's epoch has changed, do a full refresh
- startingCollectionVersion = ChunkVersion(0, 0, coll.getEpoch());
- } else {
- // Otherwise do a partial refresh
- startingCollectionVersion = existingRoutingInfo->getVersion();
- chunkMap = existingRoutingInfo->chunkMap();
- }
-
- log() << "Refreshing chunks for collection " << nss << " based on version "
- << startingCollectionVersion;
-
- // Diff tracker should *always* find at least one chunk if collection exists
- const auto diffQuery =
- CMConfigDiffTracker::createConfigDiffQuery(nss, startingCollectionVersion);
-
- // Query the chunks which have changed
- std::vector<ChunkType> newChunks;
- repl::OpTime opTime;
- uassertStatusOK(Grid::get(opCtx)->catalogClient(opCtx)->getChunks(
- opCtx,
- diffQuery.query,
- diffQuery.sort,
- boost::none,
- &newChunks,
- &opTime,
- repl::ReadConcernLevel::kMajorityReadConcern));
-
- ChunkVersion collectionVersion = startingCollectionVersion;
-
- ShardVersionMap unusedShardVersions;
- CMConfigDiffTracker differ(nss, &chunkMap, &collectionVersion, &unusedShardVersions);
-
- const int diffsApplied = differ.calculateConfigDiff(opCtx, newChunks);
-
- if (diffsApplied < 1) {
- log() << "Refresh for collection " << nss << " took " << t.millis()
- << " ms and failed because the collection's "
- "sharding metadata either changed in between or "
- "became corrupted";
-
- uasserted(ErrorCodes::ConflictingOperationInProgress,
- "Collection sharding status changed during refresh or became corrupted");
- }
-
- // If at least one diff was applied, the metadata is correct, but it might not have changed so
- // in this case there is no need to recreate the chunk manager.
- //
- // NOTE: In addition to the above statement, it is also important that we return the same chunk
- // manager object, because the write commands' code relies on changes of the chunk manager's
- // sequence number to detect batch writes not making progress because of chunks moving across
- // shards too frequently.
- if (collectionVersion == startingCollectionVersion) {
- log() << "Refresh for collection " << nss << " took " << t.millis()
- << " ms and didn't find any metadata changes";
-
- return existingRoutingInfo;
- }
-
- std::unique_ptr<CollatorInterface> defaultCollator;
- if (!coll.getDefaultCollation().isEmpty()) {
- // The collation should have been validated upon collection creation
- defaultCollator = uassertStatusOK(CollatorFactoryInterface::get(opCtx->getServiceContext())
- ->makeFromBSON(coll.getDefaultCollation()));
- }
-
- log() << "Refresh for collection " << nss << " took " << t.millis() << " ms and found version "
- << collectionVersion;
-
- return stdx::make_unique<ChunkManager>(nss,
- coll.getKeyPattern(),
- std::move(defaultCollator),
- coll.getUnique(),
- std::move(chunkMap),
- collectionVersion);
-}
-
std::shared_ptr<CatalogCache::DatabaseInfoEntry> CatalogCache::_getDatabase_inlock(
OperationContext* opCtx, StringData dbName) {
auto it = _databases.find(dbName);
diff --git a/src/mongo/s/catalog_cache.h b/src/mongo/s/catalog_cache.h
index 528b2df4673..e76188303a9 100644
--- a/src/mongo/s/catalog_cache.h
+++ b/src/mongo/s/catalog_cache.h
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2015 MongoDB Inc.
+ * Copyright (C) 2017 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -112,30 +112,6 @@ public:
*/
void purgeAllDatabases();
- /**
- * Blocking method, which refreshes the routing information for the specified collection. If
- * 'existingRoutingInfo' has been specified uses this as a basis to perform an 'incremental'
- * refresh, which only fetches the chunks which changed. Otherwise does a full refresh, fetching
- * all the chunks for the collection.
- *
- * Returns the refreshed routing information if the collection is still sharded or nullptr if it
- * is not. If refresh fails for any reason, throws a DBException.
- *
- * With the exception of ConflictingOperationInProgress, error codes thrown from this method are
- * final in that there is nothing that can be done to remedy them other than pass the error to
- * the user.
- *
- * ConflictingOperationInProgress indicates that the chunk metadata was found to be
- * inconsistent. Since this may be transient, due to the collection being dropped or recreated,
- * the caller must retry the reload up to some configurable number of attempts.
- *
- * NOTE: Should never be called directly and is exposed as public for testing purposes only.
- */
- static std::shared_ptr<ChunkManager> refreshCollectionRoutingInfo(
- OperationContext* opCtx,
- const NamespaceString& nss,
- std::shared_ptr<ChunkManager> existingRoutingInfo);
-
private:
// Make the cache entries friends so they can access the private classes below
friend class CachedDatabaseInfo;
diff --git a/src/mongo/s/chunk_manager_query_test.cpp b/src/mongo/s/chunk_manager_query_test.cpp
index 013c4618d6c..4651de4d037 100644
--- a/src/mongo/s/chunk_manager_query_test.cpp
+++ b/src/mongo/s/chunk_manager_query_test.cpp
@@ -39,6 +39,8 @@
namespace mongo {
namespace {
+const NamespaceString kNss("TestDB", "TestColl");
+
class ChunkManagerQueryTest : public ChunkManagerTestFixture {
protected:
void runQueryTest(const BSONObj& shardKey,
@@ -50,7 +52,7 @@ protected:
const std::set<ShardId> expectedShardIds) {
const ShardKeyPattern shardKeyPattern(shardKey);
auto chunkManager =
- makeChunkManager(shardKeyPattern, std::move(defaultCollator), false, splitPoints);
+ makeChunkManager(kNss, shardKeyPattern, std::move(defaultCollator), false, splitPoints);
std::set<ShardId> shardIds;
chunkManager->getShardIdsForQuery(operationContext(), query, queryCollation, &shardIds);
diff --git a/src/mongo/s/chunk_manager_refresh_test.cpp b/src/mongo/s/chunk_manager_refresh_test.cpp
index ef6f6672f47..087baa65471 100644
--- a/src/mongo/s/chunk_manager_refresh_test.cpp
+++ b/src/mongo/s/chunk_manager_refresh_test.cpp
@@ -35,6 +35,8 @@
#include "mongo/db/query/query_request.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog/type_collection.h"
+#include "mongo/s/catalog/type_database.h"
+#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/catalog_cache.h"
#include "mongo/s/chunk_manager_test_fixture.h"
@@ -45,28 +47,60 @@ namespace {
using executor::RemoteCommandRequest;
using unittest::assertGet;
-using ChunkManagerLoadTest = ChunkManagerTestFixture;
+const NamespaceString kNss("TestDB", "TestColl");
+
+class ChunkManagerLoadTest : public ChunkManagerTestFixture {
+protected:
+ void setUp() override {
+ ChunkManagerTestFixture::setUp();
+
+ setupShards([&]() {
+ ShardType shard0;
+ shard0.setName("0");
+ shard0.setHost("Host0:12345");
+
+ ShardType shard1;
+ shard1.setName("1");
+ shard1.setHost("Host1:12345");
+
+ return std::vector<ShardType>{shard0, shard1};
+ }());
+ }
+
+ void expectGetDatabase() {
+ expectFindOnConfigSendBSONObjVector([&]() {
+ DatabaseType db;
+ db.setName(kNss.db().toString());
+ db.setPrimary({"0"});
+ db.setSharded(true);
+
+ return std::vector<BSONObj>{db.toBSON()};
+ }());
+ }
+
+ void expectGetCollection(OID epoch, const ShardKeyPattern& shardKeyPattern) {
+ expectFindOnConfigSendBSONObjVector([&]() {
+ CollectionType collType;
+ collType.setNs(kNss);
+ collType.setEpoch(epoch);
+ collType.setKeyPattern(shardKeyPattern.toBSON());
+ collType.setUnique(false);
+
+ return std::vector<BSONObj>{collType.toBSON()};
+ }());
+ }
+};
TEST_F(ChunkManagerLoadTest, FullLoad) {
const OID epoch = OID::gen();
const ShardKeyPattern shardKeyPattern(BSON("_id" << 1));
- auto future = launchAsync([&] {
- auto client = serviceContext()->makeClient("Test");
- auto opCtx = client->makeOperationContext();
- return CatalogCache::refreshCollectionRoutingInfo(opCtx.get(), kNss, nullptr);
- });
-
- expectFindOnConfigSendBSONObjVector([&]() {
- CollectionType collType;
- collType.setNs(kNss);
- collType.setEpoch(epoch);
- collType.setKeyPattern(shardKeyPattern.toBSON());
- collType.setUnique(false);
+ auto future = scheduleRoutingInfoRefresh(kNss);
- return std::vector<BSONObj>{collType.toBSON()};
- }());
+ expectGetDatabase();
+ expectGetCollection(epoch, shardKeyPattern);
+ expectGetCollection(epoch, shardKeyPattern);
expectFindOnConfigSendBSONObjVector([&]() {
ChunkVersion version(1, 0, epoch);
@@ -94,93 +128,145 @@ TEST_F(ChunkManagerLoadTest, FullLoad) {
chunk4.toConfigBSON()};
}());
- expectFindOnConfigSendBSONObjVector([&]() {
- ShardType shard1;
- shard1.setName("0");
- shard1.setHost(str::stream() << "Host0:12345");
+ auto routingInfo = future.timed_get(kFutureTimeout);
+ ASSERT(routingInfo->cm());
+ auto cm = routingInfo->cm();
- ShardType shard2;
- shard2.setName("1");
- shard2.setHost(str::stream() << "Host1:12345");
+ ASSERT_EQ(4, cm->numChunks());
+}
- return std::vector<BSONObj>{shard1.toBSON(), shard2.toBSON()};
- }());
+TEST_F(ChunkManagerLoadTest, DatabaseNotFound) {
+ auto future = scheduleRoutingInfoRefresh(kNss);
- auto routingInfo = future.timed_get(kFutureTimeout);
- ASSERT_EQ(4, routingInfo->numChunks());
+ // Return an empty database (need to return it twice because for missing databases, the
+ // CatalogClient tries twice)
+ expectFindOnConfigSendBSONObjVector({});
+ expectFindOnConfigSendBSONObjVector({});
+
+ try {
+ auto routingInfo = future.timed_get(kFutureTimeout);
+ auto cm = routingInfo->cm();
+ auto primary = routingInfo->primary();
+
+ FAIL(str::stream() << "Returning no database did not fail and returned "
+ << (cm ? cm->toString() : routingInfo->primaryId().toString()));
+ } catch (const DBException& ex) {
+ ASSERT_EQ(ErrorCodes::NamespaceNotFound, ex.getCode());
+ }
}
TEST_F(ChunkManagerLoadTest, CollectionNotFound) {
- auto future = launchAsync([&] {
- auto client = serviceContext()->makeClient("Test");
- auto opCtx = client->makeOperationContext();
- return CatalogCache::refreshCollectionRoutingInfo(opCtx.get(), kNss, nullptr);
- });
+ auto future = scheduleRoutingInfoRefresh(kNss);
+
+ expectGetDatabase();
// Return an empty collection
expectFindOnConfigSendBSONObjVector({});
- ASSERT(nullptr == future.timed_get(kFutureTimeout));
+ auto routingInfo = future.timed_get(kFutureTimeout);
+ ASSERT(!routingInfo->cm());
+ ASSERT(routingInfo->primary());
+ ASSERT_EQ(ShardId{"0"}, routingInfo->primaryId());
}
TEST_F(ChunkManagerLoadTest, NoChunksFoundForCollection) {
const OID epoch = OID::gen();
const ShardKeyPattern shardKeyPattern(BSON("_id" << 1));
- auto future = launchAsync([&] {
- auto client = serviceContext()->makeClient("Test");
- auto opCtx = client->makeOperationContext();
- return CatalogCache::refreshCollectionRoutingInfo(opCtx.get(), kNss, nullptr);
- });
+ auto future = scheduleRoutingInfoRefresh(kNss);
- expectFindOnConfigSendBSONObjVector([&]() {
- CollectionType collType;
- collType.setNs(kNss);
- collType.setEpoch(epoch);
- collType.setKeyPattern(shardKeyPattern.toBSON());
- collType.setUnique(false);
+ expectGetDatabase();
+ expectGetCollection(epoch, shardKeyPattern);
- return std::vector<BSONObj>{collType.toBSON()};
- }());
+ // Return no chunks three times, which is how frequently the catalog cache retries
+ expectGetCollection(epoch, shardKeyPattern);
+ expectFindOnConfigSendBSONObjVector({});
+
+ expectGetCollection(epoch, shardKeyPattern);
+ expectFindOnConfigSendBSONObjVector({});
- // Return no chunks
+ expectGetCollection(epoch, shardKeyPattern);
expectFindOnConfigSendBSONObjVector({});
try {
auto routingInfo = future.timed_get(kFutureTimeout);
+ auto cm = routingInfo->cm();
+ auto primary = routingInfo->primary();
+
FAIL(str::stream() << "Returning no chunks for collection did not fail and returned "
- << (routingInfo ? routingInfo->toString() : "nullptr"));
+ << (cm ? cm->toString() : routingInfo->primaryId().toString()));
} catch (const DBException& ex) {
ASSERT_EQ(ErrorCodes::ConflictingOperationInProgress, ex.getCode());
}
}
-TEST_F(ChunkManagerLoadTest, ChunkEpochChangeDuringIncrementalLoad) {
+TEST_F(ChunkManagerLoadTest, IncompleteChunksFoundForCollection) {
+ const OID epoch = OID::gen();
const ShardKeyPattern shardKeyPattern(BSON("_id" << 1));
- auto initialRoutingInfo(makeChunkManager(shardKeyPattern, nullptr, true, {}));
- ASSERT_EQ(1, initialRoutingInfo->numChunks());
+ auto future = scheduleRoutingInfoRefresh(kNss);
- auto future = launchAsync([&] {
- auto client = serviceContext()->makeClient("Test");
- auto opCtx = client->makeOperationContext();
- return CatalogCache::refreshCollectionRoutingInfo(opCtx.get(), kNss, initialRoutingInfo);
- });
+ expectGetDatabase();
+ expectGetCollection(epoch, shardKeyPattern);
- expectFindOnConfigSendBSONObjVector([&]() {
- CollectionType collType;
- collType.setNs(kNss);
- collType.setEpoch(initialRoutingInfo->getVersion().epoch());
- collType.setKeyPattern(shardKeyPattern.toBSON());
- collType.setUnique(false);
+ const auto incompleteChunks = [&]() {
+ ChunkVersion version(1, 0, epoch);
- return std::vector<BSONObj>{collType.toBSON()};
- }());
+ // Chunk from (MinKey, -100) is missing (as if someone is dropping the collection
+ // concurrently)
+ version.incMinor();
+
+ ChunkType chunk2(kNss, {BSON("_id" << -100), BSON("_id" << 0)}, version, {"1"});
+ version.incMinor();
+
+ ChunkType chunk3(kNss, {BSON("_id" << 0), BSON("_id" << 100)}, version, {"0"});
+ version.incMinor();
+
+ ChunkType chunk4(kNss,
+ {BSON("_id" << 100), shardKeyPattern.getKeyPattern().globalMax()},
+ version,
+ {"1"});
+ version.incMinor();
+
+ return std::vector<BSONObj>{
+ chunk2.toConfigBSON(), chunk3.toConfigBSON(), chunk4.toConfigBSON()};
+ }();
+
+ // Return incomplete set of chunks three times, which is how frequently the catalog cache
+ // retries
+ expectGetCollection(epoch, shardKeyPattern);
+ expectFindOnConfigSendBSONObjVector(incompleteChunks);
+
+ expectGetCollection(epoch, shardKeyPattern);
+ expectFindOnConfigSendBSONObjVector(incompleteChunks);
+
+ expectGetCollection(epoch, shardKeyPattern);
+ expectFindOnConfigSendBSONObjVector(incompleteChunks);
+
+ try {
+ auto routingInfo = future.timed_get(kFutureTimeout);
+ auto cm = routingInfo->cm();
+ auto primary = routingInfo->primary();
+
+ FAIL(
+ str::stream() << "Returning incomplete chunks for collection did not fail and returned "
+ << (cm ? cm->toString() : routingInfo->primaryId().toString()));
+ } catch (const DBException& ex) {
+ ASSERT_EQ(ErrorCodes::ConflictingOperationInProgress, ex.getCode());
+ }
+}
+
+TEST_F(ChunkManagerLoadTest, ChunkEpochChangeDuringIncrementalLoad) {
+ const ShardKeyPattern shardKeyPattern(BSON("_id" << 1));
+
+ auto initialRoutingInfo(makeChunkManager(kNss, shardKeyPattern, nullptr, true, {}));
+ ASSERT_EQ(1, initialRoutingInfo->numChunks());
+
+ auto future = scheduleRoutingInfoRefresh(kNss);
ChunkVersion version = initialRoutingInfo->getVersion();
- // Return set of chunks, one of which has different epoch
- expectFindOnConfigSendBSONObjVector([&]() {
+ const auto inconsistentChunks = [&]() {
version.incMajor();
ChunkType chunk1(
kNss, {shardKeyPattern.getKeyPattern().globalMin(), BSON("_id" << 0)}, version, {"0"});
@@ -191,12 +277,27 @@ TEST_F(ChunkManagerLoadTest, ChunkEpochChangeDuringIncrementalLoad) {
{"0"});
return std::vector<BSONObj>{chunk1.toConfigBSON(), chunk2.toConfigBSON()};
- }());
+ }();
+
+ // Return set of chunks, one of which has different epoch. Do it three times, which is how
+ // frequently the catalog cache retries.
+ expectGetCollection(initialRoutingInfo->getVersion().epoch(), shardKeyPattern);
+ expectFindOnConfigSendBSONObjVector(inconsistentChunks);
+
+ expectGetCollection(initialRoutingInfo->getVersion().epoch(), shardKeyPattern);
+ expectFindOnConfigSendBSONObjVector(inconsistentChunks);
+
+ expectGetCollection(initialRoutingInfo->getVersion().epoch(), shardKeyPattern);
+ expectFindOnConfigSendBSONObjVector(inconsistentChunks);
try {
auto routingInfo = future.timed_get(kFutureTimeout);
- FAIL(str::stream() << "Returning chunks with different epoch did not fail and returned "
- << (routingInfo ? routingInfo->toString() : "nullptr"));
+ auto cm = routingInfo->cm();
+ auto primary = routingInfo->primary();
+
+ FAIL(str::stream()
+ << "Returning chunks with different epoch for collection did not fail and returned "
+ << (cm ? cm->toString() : routingInfo->primaryId().toString()));
} catch (const DBException& ex) {
ASSERT_EQ(ErrorCodes::ConflictingOperationInProgress, ex.getCode());
}
@@ -205,26 +306,14 @@ TEST_F(ChunkManagerLoadTest, ChunkEpochChangeDuringIncrementalLoad) {
TEST_F(ChunkManagerLoadTest, IncrementalLoadAfterSplit) {
const ShardKeyPattern shardKeyPattern(BSON("_id" << 1));
- auto initialRoutingInfo(makeChunkManager(shardKeyPattern, nullptr, true, {}));
+ auto initialRoutingInfo(makeChunkManager(kNss, shardKeyPattern, nullptr, true, {}));
ASSERT_EQ(1, initialRoutingInfo->numChunks());
- auto future = launchAsync([&] {
- auto client = serviceContext()->makeClient("Test");
- auto opCtx = client->makeOperationContext();
- return CatalogCache::refreshCollectionRoutingInfo(opCtx.get(), kNss, initialRoutingInfo);
- });
-
ChunkVersion version = initialRoutingInfo->getVersion();
- expectFindOnConfigSendBSONObjVector([&]() {
- CollectionType collType;
- collType.setNs(kNss);
- collType.setEpoch(version.epoch());
- collType.setKeyPattern(shardKeyPattern.toBSON());
- collType.setUnique(false);
+ auto future = scheduleRoutingInfoRefresh(kNss);
- return std::vector<BSONObj>{collType.toBSON()};
- }());
+ expectGetCollection(version.epoch(), shardKeyPattern);
// Return set of chunks, which represent a split
onFindCommand([&](const RemoteCommandRequest& request) {
@@ -247,39 +336,31 @@ TEST_F(ChunkManagerLoadTest, IncrementalLoadAfterSplit) {
return std::vector<BSONObj>{chunk1.toConfigBSON(), chunk2.toConfigBSON()};
});
- auto newRoutingInfo(future.timed_get(kFutureTimeout));
- ASSERT_EQ(2, newRoutingInfo->numChunks());
- ASSERT_EQ(version, newRoutingInfo->getVersion());
- ASSERT_EQ(version, newRoutingInfo->getVersion({"0"}));
- ASSERT_EQ(ChunkVersion(0, 0, version.epoch()), newRoutingInfo->getVersion({"1"}));
+ auto routingInfo = future.timed_get(kFutureTimeout);
+ ASSERT(routingInfo->cm());
+ auto cm = routingInfo->cm();
+
+ ASSERT_EQ(2, cm->numChunks());
+ ASSERT_EQ(version, cm->getVersion());
+ ASSERT_EQ(version, cm->getVersion({"0"}));
+ ASSERT_EQ(ChunkVersion(0, 0, version.epoch()), cm->getVersion({"1"}));
}
TEST_F(ChunkManagerLoadTest, IncrementalLoadAfterMove) {
const ShardKeyPattern shardKeyPattern(BSON("_id" << 1));
- auto initialRoutingInfo(makeChunkManager(shardKeyPattern, nullptr, true, {BSON("_id" << 0)}));
+ auto initialRoutingInfo(
+ makeChunkManager(kNss, shardKeyPattern, nullptr, true, {BSON("_id" << 0)}));
ASSERT_EQ(2, initialRoutingInfo->numChunks());
- auto future = launchAsync([&] {
- auto client = serviceContext()->makeClient("Test");
- auto opCtx = client->makeOperationContext();
- return CatalogCache::refreshCollectionRoutingInfo(opCtx.get(), kNss, initialRoutingInfo);
- });
-
ChunkVersion version = initialRoutingInfo->getVersion();
- expectFindOnConfigSendBSONObjVector([&]() {
- CollectionType collType;
- collType.setNs(kNss);
- collType.setEpoch(version.epoch());
- collType.setKeyPattern(shardKeyPattern.toBSON());
- collType.setUnique(false);
-
- return std::vector<BSONObj>{collType.toBSON()};
- }());
+ auto future = scheduleRoutingInfoRefresh(kNss);
ChunkVersion expectedDestShardVersion;
+ expectGetCollection(version.epoch(), shardKeyPattern);
+
// Return set of chunks, which represent a move
expectFindOnConfigSendBSONObjVector([&]() {
version.incMajor();
@@ -294,36 +375,27 @@ TEST_F(ChunkManagerLoadTest, IncrementalLoadAfterMove) {
return std::vector<BSONObj>{chunk1.toConfigBSON(), chunk2.toConfigBSON()};
}());
- auto newRoutingInfo(future.timed_get(kFutureTimeout));
- ASSERT_EQ(2, newRoutingInfo->numChunks());
- ASSERT_EQ(version, newRoutingInfo->getVersion());
- ASSERT_EQ(version, newRoutingInfo->getVersion({"0"}));
- ASSERT_EQ(expectedDestShardVersion, newRoutingInfo->getVersion({"1"}));
+ auto routingInfo = future.timed_get(kFutureTimeout);
+ ASSERT(routingInfo->cm());
+ auto cm = routingInfo->cm();
+
+ ASSERT_EQ(2, cm->numChunks());
+ ASSERT_EQ(version, cm->getVersion());
+ ASSERT_EQ(version, cm->getVersion({"0"}));
+ ASSERT_EQ(expectedDestShardVersion, cm->getVersion({"1"}));
}
TEST_F(ChunkManagerLoadTest, IncrementalLoadAfterMoveLastChunk) {
const ShardKeyPattern shardKeyPattern(BSON("_id" << 1));
- auto initialRoutingInfo(makeChunkManager(shardKeyPattern, nullptr, true, {}));
+ auto initialRoutingInfo(makeChunkManager(kNss, shardKeyPattern, nullptr, true, {}));
ASSERT_EQ(1, initialRoutingInfo->numChunks());
- auto future = launchAsync([&] {
- auto client = serviceContext()->makeClient("Test");
- auto opCtx = client->makeOperationContext();
- return CatalogCache::refreshCollectionRoutingInfo(opCtx.get(), kNss, initialRoutingInfo);
- });
-
ChunkVersion version = initialRoutingInfo->getVersion();
- expectFindOnConfigSendBSONObjVector([&]() {
- CollectionType collType;
- collType.setNs(kNss);
- collType.setEpoch(version.epoch());
- collType.setKeyPattern(shardKeyPattern.toBSON());
- collType.setUnique(false);
+ auto future = scheduleRoutingInfoRefresh(kNss);
- return std::vector<BSONObj>{collType.toBSON()};
- }());
+ expectGetCollection(version.epoch(), shardKeyPattern);
// Return set of chunks, which represent a move
expectFindOnConfigSendBSONObjVector([&]() {
@@ -349,11 +421,14 @@ TEST_F(ChunkManagerLoadTest, IncrementalLoadAfterMoveLastChunk) {
return std::vector<BSONObj>{shard1.toBSON(), shard2.toBSON()};
}());
- auto newRoutingInfo(future.timed_get(kFutureTimeout));
- ASSERT_EQ(1, newRoutingInfo->numChunks());
- ASSERT_EQ(version, newRoutingInfo->getVersion());
- ASSERT_EQ(ChunkVersion(0, 0, version.epoch()), newRoutingInfo->getVersion({"0"}));
- ASSERT_EQ(version, newRoutingInfo->getVersion({"1"}));
+ auto routingInfo = future.timed_get(kFutureTimeout);
+ ASSERT(routingInfo->cm());
+ auto cm = routingInfo->cm();
+
+ ASSERT_EQ(1, cm->numChunks());
+ ASSERT_EQ(version, cm->getVersion());
+ ASSERT_EQ(ChunkVersion(0, 0, version.epoch()), cm->getVersion({"0"}));
+ ASSERT_EQ(version, cm->getVersion({"1"}));
}
} // namespace
diff --git a/src/mongo/s/chunk_manager_test_fixture.cpp b/src/mongo/s/chunk_manager_test_fixture.cpp
index ada08673d70..67989692a78 100644
--- a/src/mongo/s/chunk_manager_test_fixture.cpp
+++ b/src/mongo/s/chunk_manager_test_fixture.cpp
@@ -40,15 +40,16 @@
#include "mongo/db/query/collation/collator_factory_mock.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog/type_collection.h"
+#include "mongo/s/catalog/type_database.h"
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/catalog_cache.h"
+#include "mongo/s/grid.h"
#include "mongo/stdx/memory.h"
+#include "mongo/unittest/unittest.h"
#include "mongo/util/scopeguard.h"
namespace mongo {
-const NamespaceString ChunkManagerTestFixture::kNss("TestDB", "TestColl");
-
void ChunkManagerTestFixture::setUp() {
ShardingCatalogTestFixture::setUp();
setRemote(HostAndPort("FakeRemoteClient:34567"));
@@ -57,16 +58,39 @@ void ChunkManagerTestFixture::setUp() {
CollatorFactoryInterface::set(serviceContext(), stdx::make_unique<CollatorFactoryMock>());
}
+executor::NetworkTestEnv::FutureHandle<boost::optional<CachedCollectionRoutingInfo>>
+ChunkManagerTestFixture::scheduleRoutingInfoRefresh(const NamespaceString& nss) {
+ return launchAsync([this, nss] {
+ auto client = serviceContext()->makeClient("Test");
+ auto opCtx = client->makeOperationContext();
+ auto const catalogCache = Grid::get(serviceContext())->catalogCache();
+ catalogCache->invalidateShardedCollection(nss.ns());
+
+ return boost::make_optional(
+ uassertStatusOK(catalogCache->getCollectionRoutingInfo(opCtx.get(), nss)));
+ });
+}
+
std::shared_ptr<ChunkManager> ChunkManagerTestFixture::makeChunkManager(
+ const NamespaceString& nss,
const ShardKeyPattern& shardKeyPattern,
std::unique_ptr<CollatorInterface> defaultCollator,
bool unique,
const std::vector<BSONObj>& splitPoints) {
ChunkVersion version(1, 0, OID::gen());
+ const BSONObj databaseBSON = [&]() {
+ DatabaseType db;
+ db.setName(nss.db().toString());
+ db.setPrimary({"0"});
+ db.setSharded(true);
+
+ return db.toBSON();
+ }();
+
const BSONObj collectionBSON = [&]() {
CollectionType coll;
- coll.setNs(kNss);
+ coll.setNs(nss);
coll.setEpoch(version.epoch());
coll.setKeyPattern(shardKeyPattern.getKeyPattern());
coll.setUnique(unique);
@@ -78,7 +102,7 @@ std::shared_ptr<ChunkManager> ChunkManagerTestFixture::makeChunkManager(
return coll.toBSON();
}();
- std::vector<BSONObj> shards;
+ std::vector<ShardType> shards;
std::vector<BSONObj> initialChunks;
auto splitPointsIncludingEnds(splitPoints);
@@ -91,10 +115,8 @@ std::shared_ptr<ChunkManager> ChunkManagerTestFixture::makeChunkManager(
shard.setName(str::stream() << (i - 1));
shard.setHost(str::stream() << "Host" << (i - 1) << ":12345");
- shards.push_back(shard.toBSON());
-
ChunkType chunk(
- kNss,
+ nss,
{shardKeyPattern.getKeyPattern().extendRangeBound(splitPointsIncludingEnds[i - 1],
false),
shardKeyPattern.getKeyPattern().extendRangeBound(splitPointsIncludingEnds[i], false)},
@@ -102,21 +124,25 @@ std::shared_ptr<ChunkManager> ChunkManagerTestFixture::makeChunkManager(
shard.getName());
initialChunks.push_back(chunk.toConfigBSON());
+ shards.push_back(std::move(shard));
version.incMajor();
}
- auto future = launchAsync([&] {
- auto client = serviceContext()->makeClient("Test");
- auto opCtx = client->makeOperationContext();
- return CatalogCache::refreshCollectionRoutingInfo(opCtx.get(), kNss, nullptr);
- });
+ setupShards(shards);
+
+ auto future = scheduleRoutingInfoRefresh(nss);
+ expectFindOnConfigSendBSONObjVector({databaseBSON});
+ expectFindOnConfigSendBSONObjVector({collectionBSON});
expectFindOnConfigSendBSONObjVector({collectionBSON});
expectFindOnConfigSendBSONObjVector(initialChunks);
- expectFindOnConfigSendBSONObjVector(shards);
- return future.timed_get(kFutureTimeout);
+ auto routingInfo = future.timed_get(kFutureTimeout);
+ ASSERT(routingInfo->cm());
+ ASSERT(!routingInfo->primary());
+
+ return routingInfo->cm();
}
} // namespace mongo
diff --git a/src/mongo/s/chunk_manager_test_fixture.h b/src/mongo/s/chunk_manager_test_fixture.h
index aaa059dd49d..ef84ed1de4a 100644
--- a/src/mongo/s/chunk_manager_test_fixture.h
+++ b/src/mongo/s/chunk_manager_test_fixture.h
@@ -37,6 +37,7 @@
namespace mongo {
class BSONObj;
+class CachedCollectionRoutingInfo;
class ChunkManager;
class CollatorInterface;
class ShardKeyPattern;
@@ -46,17 +47,27 @@ protected:
void setUp() override;
/**
- * Returns a chunk manager with chunks at the specified split points. Each individual chunk is
- * placed on a separate shard with shard id being a single number ranging from "0" to the number
- * of chunks.
+ * Returns a chunk manager for the specified namespace with chunks at the specified split
+ * points. Each individual chunk is placed on a separate shard with shard id being a single
+ * number ranging from "0" to the number of chunks.
*/
std::shared_ptr<ChunkManager> makeChunkManager(
+ const NamespaceString& nss,
const ShardKeyPattern& shardKeyPattern,
std::unique_ptr<CollatorInterface> defaultCollator,
bool unique,
const std::vector<BSONObj>& splitPoints);
- static const NamespaceString kNss;
+ /**
+ * Invalidates the catalog cache for 'kNss' and schedules a thread to invoke the blocking 'get'
+ * call, returning a future which can be obtained to get the specified routing information.
+ *
+ * NOTE: The returned value is always set. The reason to use optional is a deficiency of
+ * std::future with the MSVC STL library, which requires the templated type to be default
+ * constructible.
+ */
+ executor::NetworkTestEnv::FutureHandle<boost::optional<CachedCollectionRoutingInfo>>
+ scheduleRoutingInfoRefresh(const NamespaceString& nss);
};
} // namespace mongo