summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2017-03-31 17:27:11 -0400
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2017-04-12 17:47:23 -0400
commit9e3a63f9cf9ef3e64dd991824eb87dcf170d3d31 (patch)
tree34175285f580ac6310d7be51d0a497c6bd7b4e5b
parent892058e1cd3ae4744e8d13a589081330ea09f486 (diff)
downloadmongo-9e3a63f9cf9ef3e64dd991824eb87dcf170d3d31.tar.gz
SERVER-22611 Get rid of ChunkDiff and add more CatalogCache tests
This change gets rid of the "chunk differ" which was previously shared between mongos and mongod. Instead its relatively simple logic has been moved inside the CatalogCache. (cherry picked from commit b1fd308ad04a5a6719fe72bcd23b10f1b8266097)
-rw-r--r--src/mongo/s/SConscript16
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client.h1
-rw-r--r--src/mongo/s/catalog/sharding_catalog_shard_collection_test.cpp33
-rw-r--r--src/mongo/s/catalog_cache.cpp101
-rw-r--r--src/mongo/s/catalog_cache_refresh_test.cpp (renamed from src/mongo/s/chunk_manager_refresh_test.cpp)259
-rw-r--r--src/mongo/s/catalog_cache_test_fixture.cpp (renamed from src/mongo/s/chunk_manager_test_fixture.cpp)33
-rw-r--r--src/mongo/s/catalog_cache_test_fixture.h (renamed from src/mongo/s/chunk_manager_test_fixture.h)8
-rw-r--r--src/mongo/s/chunk_diff.cpp202
-rw-r--r--src/mongo/s/chunk_diff.h136
-rw-r--r--src/mongo/s/chunk_diff_test.cpp419
-rw-r--r--src/mongo/s/chunk_manager_query_test.cpp4
-rw-r--r--src/mongo/s/sharding_test_fixture.cpp32
-rw-r--r--src/mongo/s/sharding_test_fixture.h1
13 files changed, 355 insertions, 890 deletions
diff --git a/src/mongo/s/SConscript b/src/mongo/s/SConscript
index 2440b3fcb69..91765872709 100644
--- a/src/mongo/s/SConscript
+++ b/src/mongo/s/SConscript
@@ -64,7 +64,6 @@ env.Library(
'request_types/split_chunk_request_type.cpp',
'request_types/merge_chunk_request_type.cpp',
'request_types/update_zone_key_range_request_type.cpp',
- 'chunk_diff.cpp',
'chunk_version.cpp',
'migration_secondary_throttle_options.cpp',
'move_chunk_request.cpp',
@@ -177,17 +176,6 @@ env.CppUnitTest(
]
)
-# This test is very slow in debug mode, so it is put in a separate binary by itself
-env.CppUnitTest(
- target='chunk_diff_test',
- source=[
- 'chunk_diff_test.cpp',
- ],
- LIBDEPS=[
- 'common',
- ]
-)
-
env.CppUnitTest('request_types_test',
source=[
'request_types/add_shard_request_test.cpp',
@@ -233,10 +221,10 @@ env.Library(
env.CppUnitTest(
target='catalog_cache_test',
source=[
+ 'catalog_cache_refresh_test.cpp',
+ 'catalog_cache_test_fixture.cpp',
'chunk_manager_index_bounds_test.cpp',
'chunk_manager_query_test.cpp',
- 'chunk_manager_refresh_test.cpp',
- 'chunk_manager_test_fixture.cpp',
],
LIBDEPS=[
'$BUILD_DIR/mongo/s/catalog/sharding_catalog_test_fixture',
diff --git a/src/mongo/s/catalog/sharding_catalog_client.h b/src/mongo/s/catalog/sharding_catalog_client.h
index f334c05a477..58012aba67b 100644
--- a/src/mongo/s/catalog/sharding_catalog_client.h
+++ b/src/mongo/s/catalog/sharding_catalog_client.h
@@ -41,7 +41,6 @@
namespace mongo {
-class ActionLogType;
class BatchedCommandRequest;
class BatchedCommandResponse;
struct BSONArray;
diff --git a/src/mongo/s/catalog/sharding_catalog_shard_collection_test.cpp b/src/mongo/s/catalog/sharding_catalog_shard_collection_test.cpp
index 74006f5de6a..f60ead6821e 100644
--- a/src/mongo/s/catalog/sharding_catalog_shard_collection_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_shard_collection_test.cpp
@@ -57,6 +57,7 @@
#include "mongo/s/write_ops/batched_command_request.h"
#include "mongo/s/write_ops/batched_command_response.h"
#include "mongo/stdx/future.h"
+#include "mongo/transport/mock_session.h"
#include "mongo/util/log.h"
#include "mongo/util/time_support.h"
@@ -235,9 +236,13 @@ TEST_F(ShardCollectionTest, anotherMongosSharding) {
Status::OK());
auto future = launchAsync([&] {
- Client::initThreadIfNotAlready();
+ auto client = serviceContext()->makeClient(
+ "Test",
+ transport::MockSession::create(
+ operationContext()->getClient()->getRemote(), HostAndPort{}, nullptr));
+ auto opCtx = client->makeOperationContext();
ASSERT_EQUALS(ErrorCodes::AlreadyInitialized,
- catalogClient()->shardCollection(operationContext(),
+ catalogClient()->shardCollection(opCtx.get(),
ns,
keyPattern,
defaultCollation,
@@ -316,8 +321,12 @@ TEST_F(ShardCollectionTest, noInitialChunksOrData) {
// Now start actually sharding the collection.
auto future = launchAsync([&] {
- Client::initThreadIfNotAlready();
- ASSERT_OK(catalogClient()->shardCollection(operationContext(),
+ auto client = serviceContext()->makeClient(
+ "Test",
+ transport::MockSession::create(
+ operationContext()->getClient()->getRemote(), HostAndPort{}, nullptr));
+ auto opCtx = client->makeOperationContext();
+ ASSERT_OK(catalogClient()->shardCollection(opCtx.get(),
ns,
keyPattern,
defaultCollation,
@@ -505,10 +514,14 @@ TEST_F(ShardCollectionTest, withInitialChunks) {
// Now start actually sharding the collection.
auto future = launchAsync([&] {
- Client::initThreadIfNotAlready();
+ auto client = serviceContext()->makeClient(
+ "Test",
+ transport::MockSession::create(
+ operationContext()->getClient()->getRemote(), HostAndPort{}, nullptr));
+ auto opCtx = client->makeOperationContext();
set<ShardId> shards{shard0.getName(), shard1.getName(), shard2.getName()};
ASSERT_OK(catalogClient()->shardCollection(
- operationContext(),
+ opCtx.get(),
ns,
keyPattern,
defaultCollation,
@@ -674,8 +687,12 @@ TEST_F(ShardCollectionTest, withInitialData) {
// Now start actually sharding the collection.
auto future = launchAsync([&] {
- Client::initThreadIfNotAlready();
- ASSERT_OK(catalogClient()->shardCollection(operationContext(),
+ auto client = serviceContext()->makeClient(
+ "Test",
+ transport::MockSession::create(
+ operationContext()->getClient()->getRemote(), HostAndPort{}, nullptr));
+ auto opCtx = client->makeOperationContext();
+ ASSERT_OK(catalogClient()->shardCollection(opCtx.get(),
ns,
keyPattern,
defaultCollation,
diff --git a/src/mongo/s/catalog_cache.cpp b/src/mongo/s/catalog_cache.cpp
index d43d36774d0..28e1859f530 100644
--- a/src/mongo/s/catalog_cache.cpp
+++ b/src/mongo/s/catalog_cache.cpp
@@ -40,7 +40,6 @@
#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/type_collection.h"
#include "mongo/s/catalog/type_database.h"
-#include "mongo/s/chunk_diff.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/grid.h"
#include "mongo/stdx/memory.h"
@@ -55,39 +54,36 @@ namespace {
const int kMaxInconsistentRoutingInfoRefreshAttempts = 3;
/**
- * This is an adapter so we can use config diffs - mongos and mongod do them slightly differently.
- *
- * The mongos adapter here tracks all shards, and stores ranges by (max, Chunk) in the map.
+ * Structure representing the generated query and sort order for a chunk diffing operation.
*/
-class CMConfigDiffTracker : public ConfigDiffTracker<std::shared_ptr<Chunk>> {
-public:
- CMConfigDiffTracker(const NamespaceString& nss,
- RangeMap* currMap,
- ChunkVersion* maxVersion,
- MaxChunkVersionMap* maxShardVersions)
- : ConfigDiffTracker<std::shared_ptr<Chunk>>(
- nss.ns(), currMap, maxVersion, maxShardVersions) {}
-
- bool isTracked(const ChunkType& chunk) const final {
- // Mongos tracks all shards
- return true;
- }
+struct QueryAndSort {
+ const BSONObj query;
+ const BSONObj sort;
+};
- bool isMinKeyIndexed() const final {
- return false;
+/**
+ * Returns the query needed to find incremental changes to a collection from the config server.
+ */
+QueryAndSort createConfigDiffQuery(const NamespaceString& nss, ChunkVersion collectionVersion) {
+ // The query has to find all the chunks $gte the current max version. Currently, any splits and
+ // merges will increment the current max version.
+ BSONObjBuilder queryB;
+ queryB.append(ChunkType::ns(), nss.ns());
+
+ {
+ BSONObjBuilder tsBuilder(queryB.subobjStart(ChunkType::DEPRECATED_lastmod()));
+ tsBuilder.appendTimestamp("$gte", collectionVersion.toLong());
+ tsBuilder.done();
}
- std::pair<BSONObj, std::shared_ptr<Chunk>> rangeFor(OperationContext* opCtx,
- const ChunkType& chunk) const final {
- return std::make_pair(chunk.getMax(), std::make_shared<Chunk>(chunk));
- }
+ // NOTE: IT IS IMPORTANT FOR CONSISTENCY THAT WE SORT BY ASC VERSION, IN ORDER TO HANDLE CURSOR
+ // YIELDING BETWEEN CHUNKS BEING MIGRATED
+ //
+ // This ensures that changes to chunk version (which will always be higher) will always come
+ // *after* our current position in the chunk cursor
- ShardId shardFor(OperationContext* opCtx, const ShardId& shardId) const final {
- const auto shard =
- uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, shardId));
- return shard->getId();
- }
-};
+ return QueryAndSort{queryB.obj(), BSON(ChunkType::DEPRECATED_lastmod() << 1)};
+}
/**
* Blocking method, which refreshes the routing information for the specified collection. If
@@ -145,8 +141,7 @@ std::shared_ptr<ChunkManager> refreshCollectionRoutingInfo(
<< startingCollectionVersion;
// Diff tracker should *always* find at least one chunk if collection exists
- const auto diffQuery =
- CMConfigDiffTracker::createConfigDiffQuery(nss, startingCollectionVersion);
+ const auto diffQuery = createConfigDiffQuery(nss, startingCollectionVersion);
// Query the chunks which have changed
std::vector<ChunkType> newChunks;
@@ -160,21 +155,41 @@ std::shared_ptr<ChunkManager> refreshCollectionRoutingInfo(
&opTime,
repl::ReadConcernLevel::kMajorityReadConcern));
+ uassert(ErrorCodes::ConflictingOperationInProgress,
+ "No chunks were found for the collection",
+ !newChunks.empty());
+
ChunkVersion collectionVersion = startingCollectionVersion;
- ShardVersionMap unusedShardVersions;
- CMConfigDiffTracker differ(nss, &chunkMap, &collectionVersion, &unusedShardVersions);
+ for (const auto& chunk : newChunks) {
+ const auto& chunkVersion = chunk.getVersion();
- const int diffsApplied = differ.calculateConfigDiff(opCtx, newChunks);
+ uassert(ErrorCodes::ConflictingOperationInProgress,
+ str::stream() << "Chunk " << chunk.genID(nss.ns(), chunk.getMin())
+ << " has epoch different from that of the collection "
+ << chunkVersion.epoch(),
+ collectionVersion.epoch() == chunkVersion.epoch());
- if (diffsApplied < 1) {
- log() << "Refresh for collection " << nss << " took " << t.millis()
- << " ms and failed because the collection's "
- "sharding metadata either changed in between or "
- "became corrupted";
+ // Chunks must always come in incrementally sorted order
+ invariant(chunkVersion >= collectionVersion);
+ collectionVersion = chunkVersion;
+
+ // Ensure chunk references a valid shard and that the shard is available and loaded
+ uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, chunk.getShard()));
+
+ // Returns the first chunk with a max key that is > min - implies that the chunk overlaps
+ // min
+ const auto low = chunkMap.upper_bound(chunk.getMin());
- uasserted(ErrorCodes::ConflictingOperationInProgress,
- "Collection sharding status changed during refresh or became corrupted");
+ // Returns the first chunk with a max key that is > max - implies that the next chunk cannot
+ // not overlap max
+ const auto high = chunkMap.upper_bound(chunk.getMax());
+
+ // Erase all chunks from the map, which overlap the chunk we got from the persistent store
+ chunkMap.erase(low, high);
+
+ // Insert only the chunk itself
+ chunkMap.insert(std::make_pair(chunk.getMax(), std::make_shared<Chunk>(chunk)));
}
// If at least one diff was applied, the metadata is correct, but it might not have changed so
@@ -415,6 +430,10 @@ std::shared_ptr<CatalogCache::DatabaseInfoEntry> CatalogCache::_getDatabase_inlo
StringMap<CollectionRoutingInfoEntry> collectionEntries;
for (const auto& coll : collections) {
+ if (coll.getDropped()) {
+ continue;
+ }
+
collectionEntries[coll.getNs().ns()].needsRefresh = true;
}
diff --git a/src/mongo/s/chunk_manager_refresh_test.cpp b/src/mongo/s/catalog_cache_refresh_test.cpp
index a8facc931b2..c37569d5062 100644
--- a/src/mongo/s/chunk_manager_refresh_test.cpp
+++ b/src/mongo/s/catalog_cache_refresh_test.cpp
@@ -32,15 +32,12 @@
#include <set>
-#include "mongo/db/client.h"
#include "mongo/db/query/query_request.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog/type_collection.h"
#include "mongo/s/catalog/type_database.h"
-#include "mongo/s/catalog/type_shard.h"
-#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/catalog_cache.h"
-#include "mongo/s/chunk_manager_test_fixture.h"
+#include "mongo/s/catalog_cache_test_fixture.h"
namespace mongo {
namespace {
@@ -50,22 +47,12 @@ using unittest::assertGet;
const NamespaceString kNss("TestDB", "TestColl");
-class ChunkManagerLoadTest : public ChunkManagerTestFixture {
+class CatalogCacheRefreshTest : public CatalogCacheTestFixture {
protected:
void setUp() override {
- ChunkManagerTestFixture::setUp();
+ CatalogCacheTestFixture::setUp();
- setupShards([&]() {
- ShardType shard0;
- shard0.setName("0");
- shard0.setHost("Host0:12345");
-
- ShardType shard1;
- shard1.setName("1");
- shard1.setHost("Host1:12345");
-
- return std::vector<ShardType>{shard0, shard1};
- }());
+ setupNShards(2);
}
void expectGetDatabase() {
@@ -92,7 +79,7 @@ protected:
}
};
-TEST_F(ChunkManagerLoadTest, FullLoad) {
+TEST_F(CatalogCacheRefreshTest, FullLoad) {
const OID epoch = OID::gen();
const ShardKeyPattern shardKeyPattern(BSON("_id" << 1));
@@ -134,7 +121,7 @@ TEST_F(ChunkManagerLoadTest, FullLoad) {
ASSERT_EQ(4, cm->numChunks());
}
-TEST_F(ChunkManagerLoadTest, DatabaseNotFound) {
+TEST_F(CatalogCacheRefreshTest, DatabaseNotFound) {
auto future = scheduleRoutingInfoRefresh(kNss);
// Return an empty database (need to return it twice because for missing databases, the
@@ -154,7 +141,27 @@ TEST_F(ChunkManagerLoadTest, DatabaseNotFound) {
}
}
-TEST_F(ChunkManagerLoadTest, CollectionNotFound) {
+TEST_F(CatalogCacheRefreshTest, DatabaseBSONCorrupted) {
+ auto future = scheduleRoutingInfoRefresh(kNss);
+
+ // Return a corrupted database entry
+ expectFindOnConfigSendBSONObjVector(
+ {BSON("BadValue"
+ << "This value should not be in a database config document")});
+
+ try {
+ auto routingInfo = future.timed_get(kFutureTimeout);
+ auto cm = routingInfo->cm();
+ auto primary = routingInfo->primary();
+
+ FAIL(str::stream() << "Returning corrupted database entry did not fail and returned "
+ << (cm ? cm->toString() : routingInfo->primaryId().toString()));
+ } catch (const DBException& ex) {
+ ASSERT_EQ(ErrorCodes::NoSuchKey, ex.getCode());
+ }
+}
+
+TEST_F(CatalogCacheRefreshTest, CollectionNotFound) {
auto future = scheduleRoutingInfoRefresh(kNss);
expectGetDatabase();
@@ -168,7 +175,29 @@ TEST_F(ChunkManagerLoadTest, CollectionNotFound) {
ASSERT_EQ(ShardId{"0"}, routingInfo->primaryId());
}
-TEST_F(ChunkManagerLoadTest, NoChunksFoundForCollection) {
+TEST_F(CatalogCacheRefreshTest, CollectionBSONCorrupted) {
+ auto future = scheduleRoutingInfoRefresh(kNss);
+
+ expectGetDatabase();
+
+ // Return a corrupted collection entry
+ expectFindOnConfigSendBSONObjVector(
+ {BSON("BadValue"
+ << "This value should not be in a collection config document")});
+
+ try {
+ auto routingInfo = future.timed_get(kFutureTimeout);
+ auto cm = routingInfo->cm();
+ auto primary = routingInfo->primary();
+
+ FAIL(str::stream() << "Returning corrupted collection entry did not fail and returned "
+ << (cm ? cm->toString() : routingInfo->primaryId().toString()));
+ } catch (const DBException& ex) {
+ ASSERT_EQ(ErrorCodes::FailedToParse, ex.getCode());
+ }
+}
+
+TEST_F(CatalogCacheRefreshTest, NoChunksFoundForCollection) {
const OID epoch = OID::gen();
const ShardKeyPattern shardKeyPattern(BSON("_id" << 1));
@@ -199,7 +228,41 @@ TEST_F(ChunkManagerLoadTest, NoChunksFoundForCollection) {
}
}
-TEST_F(ChunkManagerLoadTest, IncompleteChunksFoundForCollection) {
+TEST_F(CatalogCacheRefreshTest, ChunksBSONCorrupted) {
+ const OID epoch = OID::gen();
+ const ShardKeyPattern shardKeyPattern(BSON("_id" << 1));
+
+ auto future = scheduleRoutingInfoRefresh(kNss);
+
+ expectGetDatabase();
+ expectGetCollection(epoch, shardKeyPattern);
+
+ // Return no chunks three times, which is how frequently the catalog cache retries
+ expectGetCollection(epoch, shardKeyPattern);
+ expectFindOnConfigSendBSONObjVector([&] {
+ return std::vector<BSONObj>{ChunkType(kNss,
+ {shardKeyPattern.getKeyPattern().globalMin(),
+ BSON("_id" << 0)},
+ ChunkVersion(1, 0, epoch),
+ {"0"})
+ .toBSON(),
+ BSON("BadValue"
+ << "This value should not be in a chunk config document")};
+ }());
+
+ try {
+ auto routingInfo = future.timed_get(kFutureTimeout);
+ auto cm = routingInfo->cm();
+ auto primary = routingInfo->primary();
+
+ FAIL(str::stream() << "Returning no chunks for collection did not fail and returned "
+ << (cm ? cm->toString() : routingInfo->primaryId().toString()));
+ } catch (const DBException& ex) {
+ ASSERT_EQ(ErrorCodes::NoSuchKey, ex.getCode());
+ }
+}
+
+TEST_F(CatalogCacheRefreshTest, IncompleteChunksFoundForCollection) {
const OID epoch = OID::gen();
const ShardKeyPattern shardKeyPattern(BSON("_id" << 1));
@@ -254,7 +317,7 @@ TEST_F(ChunkManagerLoadTest, IncompleteChunksFoundForCollection) {
}
}
-TEST_F(ChunkManagerLoadTest, ChunkEpochChangeDuringIncrementalLoad) {
+TEST_F(CatalogCacheRefreshTest, ChunkEpochChangeDuringIncrementalLoad) {
const ShardKeyPattern shardKeyPattern(BSON("_id" << 1));
auto initialRoutingInfo(makeChunkManager(kNss, shardKeyPattern, nullptr, true, {}));
@@ -272,7 +335,7 @@ TEST_F(ChunkManagerLoadTest, ChunkEpochChangeDuringIncrementalLoad) {
ChunkType chunk2(kNss,
{BSON("_id" << 0), shardKeyPattern.getKeyPattern().globalMax()},
ChunkVersion(1, 0, OID::gen()),
- {"0"});
+ {"1"});
return std::vector<BSONObj>{chunk1.toBSON(), chunk2.toBSON()};
}();
@@ -301,7 +364,135 @@ TEST_F(ChunkManagerLoadTest, ChunkEpochChangeDuringIncrementalLoad) {
}
}
-TEST_F(ChunkManagerLoadTest, IncrementalLoadAfterSplit) {
+TEST_F(CatalogCacheRefreshTest, ChunkEpochChangeDuringIncrementalLoadRecoveryAfterRetry) {
+ const ShardKeyPattern shardKeyPattern(BSON("_id" << 1));
+
+ auto initialRoutingInfo(makeChunkManager(kNss, shardKeyPattern, nullptr, true, {}));
+ ASSERT_EQ(1, initialRoutingInfo->numChunks());
+
+ setupNShards(2);
+
+ auto future = scheduleRoutingInfoRefresh(kNss);
+
+ ChunkVersion oldVersion = initialRoutingInfo->getVersion();
+ const OID newEpoch = OID::gen();
+
+ // On the first attempt, return set of chunks, one of which has different epoch. This simulates
+ // the situation where a collection existed with epoch0, we started a refresh for that
+ // collection, the cursor yielded and while it yielded another node dropped the collection and
+ // recreated it with different epoch and chunks.
+ expectGetCollection(oldVersion.epoch(), shardKeyPattern);
+ onFindCommand([&](const RemoteCommandRequest& request) {
+ const auto diffQuery =
+ assertGet(QueryRequest::makeFromFindCommand(kNss, request.cmdObj, false));
+ ASSERT_BSONOBJ_EQ(BSON("ns" << kNss.ns() << "lastmod"
+ << BSON("$gte" << Timestamp(oldVersion.majorVersion(),
+ oldVersion.minorVersion()))),
+ diffQuery->getFilter());
+
+ oldVersion.incMajor();
+ ChunkType chunk1(kNss,
+ {shardKeyPattern.getKeyPattern().globalMin(), BSON("_id" << 0)},
+ oldVersion,
+ {"0"});
+
+ // "Yield" happens here with drop and recreate in between. This is the "last" chunk from the
+ // recreated collection.
+ ChunkType chunk3(kNss,
+ {BSON("_id" << 100), shardKeyPattern.getKeyPattern().globalMax()},
+ ChunkVersion(5, 2, newEpoch),
+ {"1"});
+
+ return std::vector<BSONObj>{chunk1.toBSON(), chunk3.toBSON()};
+ });
+
+ // On the second retry attempt, return the correct set of chunks from the recreated collection
+ expectGetCollection(newEpoch, shardKeyPattern);
+
+ ChunkVersion newVersion(5, 0, newEpoch);
+ onFindCommand([&](const RemoteCommandRequest& request) {
+ // Ensure it is a differential query but starting from version zero (to fetch all the
+ // chunks) since the incremental refresh above produced a different version
+ const auto diffQuery =
+ assertGet(QueryRequest::makeFromFindCommand(kNss, request.cmdObj, false));
+ ASSERT_BSONOBJ_EQ(BSON("ns" << kNss.ns() << "lastmod" << BSON("$gte" << Timestamp(0, 0))),
+ diffQuery->getFilter());
+
+ ChunkType chunk1(kNss,
+ {shardKeyPattern.getKeyPattern().globalMin(), BSON("_id" << 0)},
+ newVersion,
+ {"0"});
+
+ newVersion.incMinor();
+ ChunkType chunk2(kNss, {BSON("_id" << 0), BSON("_id" << 100)}, newVersion, {"0"});
+
+ newVersion.incMinor();
+ ChunkType chunk3(kNss,
+ {BSON("_id" << 100), shardKeyPattern.getKeyPattern().globalMax()},
+ newVersion,
+ {"1"});
+
+ return std::vector<BSONObj>{chunk1.toBSON(), chunk2.toBSON(), chunk3.toBSON()};
+ });
+
+ auto routingInfo = future.timed_get(kFutureTimeout);
+ ASSERT(routingInfo->cm());
+ auto cm = routingInfo->cm();
+
+ ASSERT_EQ(3, cm->numChunks());
+ ASSERT_EQ(newVersion, cm->getVersion());
+ ASSERT_EQ(ChunkVersion(5, 1, newVersion.epoch()), cm->getVersion({"0"}));
+ ASSERT_EQ(ChunkVersion(5, 2, newVersion.epoch()), cm->getVersion({"1"}));
+}
+
+TEST_F(CatalogCacheRefreshTest, IncrementalLoadAfterCollectionEpochChange) {
+ const ShardKeyPattern shardKeyPattern(BSON("_id" << 1));
+
+ auto initialRoutingInfo(makeChunkManager(kNss, shardKeyPattern, nullptr, true, {}));
+ ASSERT_EQ(1, initialRoutingInfo->numChunks());
+
+ setupNShards(2);
+
+ auto future = scheduleRoutingInfoRefresh(kNss);
+
+ ChunkVersion newVersion(1, 0, OID::gen());
+
+ // Return collection with a different epoch
+ expectGetCollection(newVersion.epoch(), shardKeyPattern);
+
+ // Return set of chunks, which represent a split
+ onFindCommand([&](const RemoteCommandRequest& request) {
+ // Ensure it is a differential query but starting from version zero
+ const auto diffQuery =
+ assertGet(QueryRequest::makeFromFindCommand(kNss, request.cmdObj, false));
+ ASSERT_BSONOBJ_EQ(BSON("ns" << kNss.ns() << "lastmod" << BSON("$gte" << Timestamp(0, 0))),
+ diffQuery->getFilter());
+
+ ChunkType chunk1(kNss,
+ {shardKeyPattern.getKeyPattern().globalMin(), BSON("_id" << 0)},
+ newVersion,
+ {"0"});
+ newVersion.incMinor();
+
+ ChunkType chunk2(kNss,
+ {BSON("_id" << 0), shardKeyPattern.getKeyPattern().globalMax()},
+ newVersion,
+ {"1"});
+
+ return std::vector<BSONObj>{chunk1.toBSON(), chunk2.toBSON()};
+ });
+
+ auto routingInfo = future.timed_get(kFutureTimeout);
+ ASSERT(routingInfo->cm());
+ auto cm = routingInfo->cm();
+
+ ASSERT_EQ(2, cm->numChunks());
+ ASSERT_EQ(newVersion, cm->getVersion());
+ ASSERT_EQ(ChunkVersion(1, 0, newVersion.epoch()), cm->getVersion({"0"}));
+ ASSERT_EQ(ChunkVersion(1, 1, newVersion.epoch()), cm->getVersion({"1"}));
+}
+
+TEST_F(CatalogCacheRefreshTest, IncrementalLoadAfterSplit) {
const ShardKeyPattern shardKeyPattern(BSON("_id" << 1));
auto initialRoutingInfo(makeChunkManager(kNss, shardKeyPattern, nullptr, true, {}));
@@ -344,7 +535,7 @@ TEST_F(ChunkManagerLoadTest, IncrementalLoadAfterSplit) {
ASSERT_EQ(ChunkVersion(0, 0, version.epoch()), cm->getVersion({"1"}));
}
-TEST_F(ChunkManagerLoadTest, IncrementalLoadAfterMove) {
+TEST_F(CatalogCacheRefreshTest, IncrementalLoadAfterMove) {
const ShardKeyPattern shardKeyPattern(BSON("_id" << 1));
auto initialRoutingInfo(
@@ -383,12 +574,14 @@ TEST_F(ChunkManagerLoadTest, IncrementalLoadAfterMove) {
ASSERT_EQ(expectedDestShardVersion, cm->getVersion({"1"}));
}
-TEST_F(ChunkManagerLoadTest, IncrementalLoadAfterMoveLastChunk) {
+TEST_F(CatalogCacheRefreshTest, IncrementalLoadAfterMoveLastChunk) {
const ShardKeyPattern shardKeyPattern(BSON("_id" << 1));
auto initialRoutingInfo(makeChunkManager(kNss, shardKeyPattern, nullptr, true, {}));
ASSERT_EQ(1, initialRoutingInfo->numChunks());
+ setupNShards(2);
+
ChunkVersion version = initialRoutingInfo->getVersion();
auto future = scheduleRoutingInfoRefresh(kNss);
@@ -407,18 +600,6 @@ TEST_F(ChunkManagerLoadTest, IncrementalLoadAfterMoveLastChunk) {
return std::vector<BSONObj>{chunk1.toBSON()};
}());
- expectFindOnConfigSendBSONObjVector([&]() {
- ShardType shard1;
- shard1.setName("0");
- shard1.setHost(str::stream() << "Host0:12345");
-
- ShardType shard2;
- shard2.setName("1");
- shard2.setHost(str::stream() << "Host1:12345");
-
- return std::vector<BSONObj>{shard1.toBSON(), shard2.toBSON()};
- }());
-
auto routingInfo = future.timed_get(kFutureTimeout);
ASSERT(routingInfo->cm());
auto cm = routingInfo->cm();
diff --git a/src/mongo/s/chunk_manager_test_fixture.cpp b/src/mongo/s/catalog_cache_test_fixture.cpp
index 341a43f466d..8a7372209c9 100644
--- a/src/mongo/s/chunk_manager_test_fixture.cpp
+++ b/src/mongo/s/catalog_cache_test_fixture.cpp
@@ -33,7 +33,7 @@
#include <set>
#include <vector>
-#include "mongo/s/chunk_manager_test_fixture.h"
+#include "mongo/s/catalog_cache_test_fixture.h"
#include "mongo/client/remote_command_targeter_mock.h"
#include "mongo/db/client.h"
@@ -50,7 +50,7 @@
namespace mongo {
-void ChunkManagerTestFixture::setUp() {
+void CatalogCacheTestFixture::setUp() {
ShardingCatalogTestFixture::setUp();
setRemote(HostAndPort("FakeRemoteClient:34567"));
configTargeter()->setFindHostReturnValue(HostAndPort{CONFIG_HOST_PORT});
@@ -59,7 +59,7 @@ void ChunkManagerTestFixture::setUp() {
}
executor::NetworkTestEnv::FutureHandle<boost::optional<CachedCollectionRoutingInfo>>
-ChunkManagerTestFixture::scheduleRoutingInfoRefresh(const NamespaceString& nss) {
+CatalogCacheTestFixture::scheduleRoutingInfoRefresh(const NamespaceString& nss) {
return launchAsync([this, nss] {
auto client = serviceContext()->makeClient("Test");
auto opCtx = client->makeOperationContext();
@@ -71,7 +71,22 @@ ChunkManagerTestFixture::scheduleRoutingInfoRefresh(const NamespaceString& nss)
});
}
-std::shared_ptr<ChunkManager> ChunkManagerTestFixture::makeChunkManager(
+void CatalogCacheTestFixture::setupNShards(int numShards) {
+ setupShards([&]() {
+ std::vector<ShardType> shards;
+ for (int i = 0; i < numShards; i++) {
+ ShardType shard;
+ shard.setName(str::stream() << i);
+ shard.setHost(str::stream() << "Host" << i << ":12345");
+
+ shards.emplace_back(std::move(shard));
+ }
+
+ return shards;
+ }());
+}
+
+std::shared_ptr<ChunkManager> CatalogCacheTestFixture::makeChunkManager(
const NamespaceString& nss,
const ShardKeyPattern& shardKeyPattern,
std::unique_ptr<CollatorInterface> defaultCollator,
@@ -102,7 +117,6 @@ std::shared_ptr<ChunkManager> ChunkManagerTestFixture::makeChunkManager(
return coll.toBSON();
}();
- std::vector<ShardType> shards;
std::vector<BSONObj> initialChunks;
auto splitPointsIncludingEnds(splitPoints);
@@ -111,25 +125,20 @@ std::shared_ptr<ChunkManager> ChunkManagerTestFixture::makeChunkManager(
splitPointsIncludingEnds.push_back(shardKeyPattern.getKeyPattern().globalMax());
for (size_t i = 1; i < splitPointsIncludingEnds.size(); ++i) {
- ShardType shard;
- shard.setName(str::stream() << (i - 1));
- shard.setHost(str::stream() << "Host" << (i - 1) << ":12345");
-
ChunkType chunk(
nss,
{shardKeyPattern.getKeyPattern().extendRangeBound(splitPointsIncludingEnds[i - 1],
false),
shardKeyPattern.getKeyPattern().extendRangeBound(splitPointsIncludingEnds[i], false)},
version,
- shard.getName());
+ ShardId{str::stream() << (i - 1)});
initialChunks.push_back(chunk.toBSON());
- shards.push_back(std::move(shard));
version.incMajor();
}
- setupShards(shards);
+ setupNShards(initialChunks.size());
auto future = scheduleRoutingInfoRefresh(nss);
diff --git a/src/mongo/s/chunk_manager_test_fixture.h b/src/mongo/s/catalog_cache_test_fixture.h
index ef84ed1de4a..4c620eafee5 100644
--- a/src/mongo/s/chunk_manager_test_fixture.h
+++ b/src/mongo/s/catalog_cache_test_fixture.h
@@ -42,7 +42,7 @@ class ChunkManager;
class CollatorInterface;
class ShardKeyPattern;
-class ChunkManagerTestFixture : public ShardingCatalogTestFixture {
+class CatalogCacheTestFixture : public ShardingCatalogTestFixture {
protected:
void setUp() override;
@@ -68,6 +68,12 @@ protected:
*/
executor::NetworkTestEnv::FutureHandle<boost::optional<CachedCollectionRoutingInfo>>
scheduleRoutingInfoRefresh(const NamespaceString& nss);
+
+ /**
+ * Ensures that there are 'numShards' available in the shard registry. The shard ids are
+ * generated as "0", "1", etc.
+ */
+ void setupNShards(int numShards);
};
} // namespace mongo
diff --git a/src/mongo/s/chunk_diff.cpp b/src/mongo/s/chunk_diff.cpp
deleted file mode 100644
index 5e9cd73bf84..00000000000
--- a/src/mongo/s/chunk_diff.cpp
+++ /dev/null
@@ -1,202 +0,0 @@
-/**
- * Copyright (C) 2015 MongoDB Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License, version 3,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the GNU Affero General Public License in all respects
- * for all of the code used other than as permitted herein. If you modify
- * file(s) with this exception, you may extend this exception to your
- * version of the file(s), but you are not obligated to do so. If you do not
- * wish to do so, delete this exception statement from your version. If you
- * delete this exception statement from all source files in the program,
- * then also delete it in the license file.
- */
-
-#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kSharding
-
-#include "mongo/platform/basic.h"
-
-#include "mongo/s/chunk_diff.h"
-
-#include "mongo/db/namespace_string.h"
-#include "mongo/db/s/collection_metadata.h"
-#include "mongo/s/catalog/type_chunk.h"
-#include "mongo/s/chunk_version.h"
-#include "mongo/util/log.h"
-#include "mongo/util/mongoutils/str.h"
-
-namespace mongo {
-
-template <class ValType>
-ConfigDiffTracker<ValType>::ConfigDiffTracker(const std::string& ns,
- RangeMap* currMap,
- ChunkVersion* maxVersion,
- MaxChunkVersionMap* maxShardVersions)
- : _ns(ns), _currMap(currMap), _maxVersion(maxVersion), _maxShardVersions(maxShardVersions) {
- invariant(_currMap);
- invariant(_maxVersion);
- invariant(_maxShardVersions);
-}
-
-template <class ValType>
-ConfigDiffTracker<ValType>::~ConfigDiffTracker() = default;
-
-template <class ValType>
-bool ConfigDiffTracker<ValType>::_isOverlapping(const BSONObj& min, const BSONObj& max) {
- RangeOverlap overlap = _overlappingRange(min, max);
-
- return overlap.first != overlap.second;
-}
-
-template <class ValType>
-typename ConfigDiffTracker<ValType>::RangeOverlap ConfigDiffTracker<ValType>::_overlappingRange(
- const BSONObj& min, const BSONObj& max) {
- typename RangeMap::iterator low;
- typename RangeMap::iterator high;
-
- if (isMinKeyIndexed()) {
- // Returns the first chunk with a min key that is >= min - implies the
- // previous chunk cannot overlap min
- low = _currMap->lower_bound(min);
-
- // Returns the first chunk with a min key that is >= max - implies the
- // chunk does not overlap max
- high = _currMap->lower_bound(max);
- } else {
- // Returns the first chunk with a max key that is > min - implies that
- // the chunk overlaps min
- low = _currMap->upper_bound(min);
-
- // Returns the first chunk with a max key that is > max - implies that
- // the next chunk cannot not overlap max
- high = _currMap->upper_bound(max);
- }
-
- return RangeOverlap(low, high);
-}
-
-template <class ValType>
-int ConfigDiffTracker<ValType>::calculateConfigDiff(OperationContext* txn,
- const std::vector<ChunkType>& chunks) {
- // Apply the chunk changes to the ranges and versions
- //
- // Overall idea here is to work in two steps :
- // 1. For all the new chunks we find, increment the maximum version per-shard and
- // per-collection, and remove any conflicting chunks from the ranges.
- // 2. For all the new chunks we're interested in (all of them for mongos, just chunks on
- // the shard for mongod) add them to the ranges.
-
- std::vector<ChunkType> newTracked;
-
- // Store epoch now so it doesn't change when we change max
- OID currEpoch = _maxVersion->epoch();
-
- int validDiffs = 0;
-
- for (const ChunkType& chunk : chunks) {
- const ChunkVersion& chunkVersion = chunk.getVersion();
-
- if (!chunkVersion.hasEqualEpoch(currEpoch)) {
- warning() << "got invalid chunk version " << chunkVersion << " in document "
- << redact(chunk.toString())
- << " when trying to load differing chunks at version "
- << ChunkVersion(
- _maxVersion->majorVersion(), _maxVersion->minorVersion(), currEpoch);
-
- // Don't keep loading, since we know we'll be broken here
- return -1;
- }
-
- validDiffs++;
-
- // Get max changed version and chunk version
- if (chunkVersion > *_maxVersion) {
- *_maxVersion = chunkVersion;
- }
-
- // Chunk version changes
- ShardId shard = shardFor(txn, chunk.getShard());
-
- typename MaxChunkVersionMap::const_iterator shardVersionIt = _maxShardVersions->find(shard);
- if (shardVersionIt == _maxShardVersions->end() || shardVersionIt->second < chunkVersion) {
- (*_maxShardVersions)[shard] = chunkVersion;
- }
-
- // See if we need to remove any chunks we are currently tracking because of this chunk's
- // changes
- {
- RangeOverlap overlap = _overlappingRange(chunk.getMin(), chunk.getMax());
- _currMap->erase(overlap.first, overlap.second);
- }
-
- // Figure out which of the new chunks we need to track
- // Important - we need to actually own this doc, in case the cursor decides to getMore
- // or unbuffer.
- if (isTracked(chunk)) {
- newTracked.push_back(chunk);
- }
- }
-
- LOG(3) << "found " << validDiffs << " new chunks for collection " << _ns << " (tracking "
- << newTracked.size() << "), new version is " << *_maxVersion;
-
- for (const ChunkType& chunk : newTracked) {
- // Invariant enforced by sharding - it's possible to read inconsistent state due to
- // getMore and yielding, so we want to detect it as early as possible.
- //
- // TODO: This checks for overlap, we also should check for holes here iff we're
- // tracking all chunks.
- if (_isOverlapping(chunk.getMin(), chunk.getMax())) {
- return -1;
- }
-
- _currMap->insert(rangeFor(txn, chunk));
- }
-
- return validDiffs;
-}
-
-ConfigDiffTrackerBase::QueryAndSort ConfigDiffTrackerBase::createConfigDiffQuery(
- const NamespaceString& nss, ChunkVersion collectionVersion) {
- // The query has to find all the chunks $gte the current max version. Currently, any splits and
- // merges will increment the current max version.
- BSONObjBuilder queryB;
- queryB.append(ChunkType::ns(), nss.ns());
-
- {
- BSONObjBuilder tsBuilder(queryB.subobjStart(ChunkType::DEPRECATED_lastmod()));
- tsBuilder.appendTimestamp("$gte", collectionVersion.toLong());
- tsBuilder.done();
- }
-
- // NOTE: IT IS IMPORTANT FOR CONSISTENCY THAT WE SORT BY ASC VERSION, IN ORDER TO HANDLE CURSOR
- // YIELDING BETWEEN CHUNKS BEING MIGRATED
- //
- // This ensures that changes to chunk version (which will always be higher) will always come
- // *after* our current position in the chunk cursor.
-
- return QueryAndSort{queryB.obj(), BSON(ChunkType::DEPRECATED_lastmod() << 1)};
-}
-
-// Ensures that these instances of the template are compiled
-class Chunk;
-
-template class ConfigDiffTracker<BSONObj>;
-template class ConfigDiffTracker<CachedChunkInfo>;
-template class ConfigDiffTracker<std::shared_ptr<Chunk>>;
-
-} // namespace mongo
diff --git a/src/mongo/s/chunk_diff.h b/src/mongo/s/chunk_diff.h
deleted file mode 100644
index 9b4aaec150c..00000000000
--- a/src/mongo/s/chunk_diff.h
+++ /dev/null
@@ -1,136 +0,0 @@
-/**
- * Copyright (C) 2008-2015 MongoDB Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License, version 3,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the GNU Affero General Public License in all respects
- * for all of the code used other than as permitted herein. If you modify
- * file(s) with this exception, you may extend this exception to your
- * version of the file(s), but you are not obligated to do so. If you do not
- * wish to do so, delete this exception statement from your version. If you
- * delete this exception statement from all source files in the program,
- * then also delete it in the license file.
- */
-
-#pragma once
-
-#include <string>
-
-#include "mongo/bson/bsonobj.h"
-#include "mongo/bson/simple_bsonobj_comparator.h"
-#include "mongo/s/client/shard.h"
-
-namespace mongo {
-
-class ChunkType;
-struct ChunkVersion;
-class OperationContext;
-
-class ConfigDiffTrackerBase {
-public:
- /**
- * Structure repsenting the generated query and sort order for a chunk diffing operation.
- */
- struct QueryAndSort {
- const BSONObj query;
- const BSONObj sort;
- };
-
- /**
- * Returns the query needed to find incremental changes to a collection from the config server.
- */
- static QueryAndSort createConfigDiffQuery(const NamespaceString& nss,
- ChunkVersion collectionVersion);
-};
-
-/**
- * This class manages and applies diffs from partial config server data reloads. Because the config
- * data can be large, we want to update it in small parts, not all-at-once. Once a
- * ConfigDiffTracker is created, the current config data is *attached* to it, and it is then able
- * to modify the data.
- *
- * The current form is templated b/c the overall algorithm is identical between mongos and mongod,
- * but the actual chunk maps used differ in implementation. We don't want to copy the
- * implementation, because the logic is identical, or the chunk data, because that would be slow
- * for big clusters, so this is the alternative for now.
- *
- * TODO: Standardize between mongos and mongod and convert template parameters to types.
- */
-template <class ValType>
-class ConfigDiffTracker : public ConfigDiffTrackerBase {
-public:
- // Stores ranges indexed by max or min key.
- typedef BSONObjIndexedMap<ValType> RangeMap;
-
- // Pair of iterators defining a subset of ranges
- typedef typename std::pair<typename RangeMap::iterator, typename RangeMap::iterator>
- RangeOverlap;
-
- // Map of shard identifiers to the maximum chunk version on that shard
- typedef typename std::map<ShardId, ChunkVersion> MaxChunkVersionMap;
-
- /**
- * The tracker attaches to a set of ranges with versions, and uses the catalog client to update
- * these. Because the set of ranges and versions may be large, they aren't owned by the tracker,
- * they're just passed in and updated. Therefore they must all stay in scope while the tracker
- * is working.
- */
- ConfigDiffTracker(const std::string& ns,
- RangeMap* currMap,
- ChunkVersion* maxVersion,
- MaxChunkVersionMap* maxShardVersions);
-
- virtual ~ConfigDiffTracker();
-
- // Applies changes to the config data from a vector of chunks passed in. Also includes minor
- // version changes for particular major-version chunks if explicitly specified.
- // Returns the number of diffs processed, or -1 if the diffs were inconsistent.
- int calculateConfigDiff(OperationContext* txn, const std::vector<ChunkType>& chunks);
-
-protected:
- /**
- * Determines which chunks are actually being remembered by our RangeMap. Allows individual
- * shards to filter out results, which belong to the local shard only.
- */
- virtual bool isTracked(const ChunkType& chunk) const = 0;
-
- /**
- * Whether or not our RangeMap uses min or max keys
- */
- virtual bool isMinKeyIndexed() const {
- return true;
- }
-
- virtual std::pair<BSONObj, ValType> rangeFor(OperationContext* txn,
- const ChunkType& chunk) const = 0;
-
- virtual ShardId shardFor(OperationContext* txn, const ShardId& name) const = 0;
-
-private:
- // Whether or not a range exists in the min/max region
- bool _isOverlapping(const BSONObj& min, const BSONObj& max);
-
- // Returns a subset of ranges overlapping the region min/max
- RangeOverlap _overlappingRange(const BSONObj& min, const BSONObj& max);
-
- const std::string _ns;
- RangeMap* const _currMap;
- ChunkVersion* const _maxVersion;
- MaxChunkVersionMap* const _maxShardVersions;
-};
-
-} // namespace mongo
diff --git a/src/mongo/s/chunk_diff_test.cpp b/src/mongo/s/chunk_diff_test.cpp
deleted file mode 100644
index 2a83711a24c..00000000000
--- a/src/mongo/s/chunk_diff_test.cpp
+++ /dev/null
@@ -1,419 +0,0 @@
-/**
- * Copyright (C) 2012 10gen Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License, version 3,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the GNU Affero General Public License in all respects
- * for all of the code used other than as permitted herein. If you modify
- * file(s) with this exception, you may extend this exception to your
- * version of the file(s), but you are not obligated to do so. If you do not
- * wish to do so, delete this exception statement from your version. If you
- * delete this exception statement from all source files in the program,
- * then also delete it in the license file.
- */
-
-#include "mongo/platform/basic.h"
-
-#include <map>
-#include <string>
-#include <utility>
-#include <vector>
-
-#include "mongo/bson/simple_bsonobj_comparator.h"
-#include "mongo/db/jsobj.h"
-#include "mongo/db/operation_context_noop.h"
-#include "mongo/platform/random.h"
-#include "mongo/s/catalog/type_chunk.h"
-#include "mongo/s/chunk_diff.h"
-#include "mongo/unittest/unittest.h"
-
-namespace mongo {
-namespace {
-
-using std::string;
-using std::pair;
-using std::make_pair;
-using std::map;
-using std::vector;
-
-// Generates pseudorandom values
-PseudoRandom rand(1);
-
-/**
- * The default pass-through adapter for using config diffs.
- */
-class DefaultDiffAdapter : public ConfigDiffTracker<BSONObj> {
-public:
- using ConfigDiffTracker<BSONObj>::ConfigDiffTracker;
-
- virtual bool isTracked(const ChunkType& chunk) const {
- return true;
- }
-
- virtual pair<BSONObj, BSONObj> rangeFor(OperationContext* txn, const ChunkType& chunk) const {
- return make_pair(chunk.getMin(), chunk.getMax());
- }
-
- virtual ShardId shardFor(OperationContext* txn, const ShardId& name) const {
- return name;
- }
-};
-
-/**
- * Inverts the storage order for chunks from min to max.
- */
-class InverseDiffAdapter : public DefaultDiffAdapter {
-public:
- using DefaultDiffAdapter::DefaultDiffAdapter;
-
- virtual bool isMinKeyIndexed() const {
- return false;
- }
-
- virtual pair<BSONObj, BSONObj> rangeFor(OperationContext* txn, const ChunkType& chunk) const {
- return make_pair(chunk.getMax(), chunk.getMin());
- }
-};
-
-/**
- * Converts array of raw BSONObj chunks to a vector of ChunkType.
- */
-void convertBSONArrayToChunkTypes(const vector<BSONObj>& chunksArray,
- std::vector<ChunkType>* chunksVector) {
- for (const BSONObj& obj : chunksArray) {
- auto chunkTypeRes = ChunkType::fromBSON(obj);
- ASSERT(chunkTypeRes.isOK());
- chunksVector->push_back(chunkTypeRes.getValue());
- }
-}
-
-class ChunkDiffUnitTest : public mongo::unittest::Test {
-protected:
- typedef BSONObjIndexedMap<BSONObj> RangeMap;
- typedef map<ShardId, ChunkVersion> VersionMap;
-
- ChunkDiffUnitTest() = default;
- ~ChunkDiffUnitTest() = default;
-
- void runTest(bool isInverse) {
- int numShards = 10;
- int numInitialChunks = 5;
-
- // Needed to not overflow the BSONArray's max bytes
- int maxChunks = 100000;
- int keySize = 2;
-
- vector<BSONObj> chunksB;
-
- BSONObj lastSplitPt;
- ChunkVersion version(1, 0, OID());
-
- // Generate numChunks with a given key size over numShards. All chunks have double key
- // values, so we can split them a bunch.
-
- for (int i = -1; i < numInitialChunks; i++) {
- BSONObjBuilder splitPtB;
- for (int k = 0; k < keySize; k++) {
- string field = string("k") + string(1, (char)('0' + k));
- if (i < 0)
- splitPtB.appendMinKey(field);
- else if (i < numInitialChunks - 1)
- splitPtB.append(field, (double)i);
- else
- splitPtB.appendMaxKey(field);
- }
- BSONObj splitPt = splitPtB.obj();
-
- if (i >= 0) {
- BSONObjBuilder chunkB;
-
- chunkB.append(ChunkType::name(), "$dummyname");
- chunkB.append(ChunkType::ns(), "$dummyns");
-
- chunkB.append(ChunkType::min(), lastSplitPt);
- chunkB.append(ChunkType::max(), splitPt);
-
- int shardNum = rand(numShards);
- chunkB.append(ChunkType::shard(), "shard" + string(1, (char)('A' + shardNum)));
-
- rand(2) ? version.incMajor() : version.incMinor();
- version.addToBSON(chunkB, ChunkType::DEPRECATED_lastmod());
-
- chunksB.push_back(chunkB.obj());
- }
-
- lastSplitPt = splitPt;
- }
-
- vector<BSONObj> chunks(std::move(chunksB));
-
- // Setup the empty ranges and versions first
- RangeMap ranges = SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<BSONObj>();
- ChunkVersion maxVersion = ChunkVersion(0, 0, OID());
- VersionMap maxShardVersions;
-
- // Create a differ which will track our progress
- std::shared_ptr<DefaultDiffAdapter> differ(
- isInverse ? new InverseDiffAdapter("test", &ranges, &maxVersion, &maxShardVersions)
- : new DefaultDiffAdapter("test", &ranges, &maxVersion, &maxShardVersions));
-
- std::vector<ChunkType> chunksVector;
- convertBSONArrayToChunkTypes(chunks, &chunksVector);
-
- // Validate initial load
- differ->calculateConfigDiff(nullptr, chunksVector);
- validate(isInverse, chunksVector, ranges, maxVersion, maxShardVersions);
-
- // Generate a lot of diffs, and keep validating that updating from the diffs always gives us
- // the right ranges and versions
-
- // Makes about 100000 chunks overall
- int numDiffs = 135;
- int numChunks = numInitialChunks;
-
- for (int i = 0; i < numDiffs; i++) {
- vector<BSONObj> newChunksB;
-
- vector<BSONObj>::iterator it = chunks.begin();
-
- while (it != chunks.end()) {
- BSONObj chunk = *it;
- ++it;
-
- int randChoice = rand(10);
-
- if (randChoice < 2 && numChunks < maxChunks) {
- // Simulate a split
- BSONObjBuilder leftB;
- BSONObjBuilder rightB;
- BSONObjBuilder midB;
-
- for (int k = 0; k < keySize; k++) {
- string field = string("k") + string(1, (char)('0' + k));
-
- BSONType maxType = chunk[ChunkType::max()].Obj()[field].type();
- double max =
- maxType == NumberDouble ? chunk["max"].Obj()[field].Number() : 0.0;
- BSONType minType = chunk[ChunkType::min()].Obj()[field].type();
- double min = minType == NumberDouble
- ? chunk[ChunkType::min()].Obj()[field].Number()
- : 0.0;
-
- if (minType == MinKey) {
- midB.append(field, max - 1.0);
- } else if (maxType == MaxKey) {
- midB.append(field, min + 1.0);
- } else {
- midB.append(field, (max + min) / 2.0);
- }
- }
-
- BSONObj midPt = midB.obj();
-
- // Only happens if we can't split the min chunk
- if (midPt.isEmpty()) {
- continue;
- }
-
- leftB.append(chunk[ChunkType::min()]);
- leftB.append(ChunkType::max(), midPt);
- rightB.append(ChunkType::min(), midPt);
- rightB.append(chunk[ChunkType::max()]);
-
- // Add required fields for ChunkType
- leftB.append(chunk[ChunkType::name()]);
- leftB.append(chunk[ChunkType::ns()]);
- rightB.append(chunk[ChunkType::name()]);
- rightB.append(chunk[ChunkType::ns()]);
-
- leftB.append(chunk[ChunkType::shard()]);
- rightB.append(chunk[ChunkType::shard()]);
-
- version.incMajor();
- version.addToBSON(leftB, ChunkType::DEPRECATED_lastmod());
- version.incMinor();
- version.addToBSON(rightB, ChunkType::DEPRECATED_lastmod());
-
- BSONObj left = leftB.obj();
- BSONObj right = rightB.obj();
-
- newChunksB.push_back(left);
- newChunksB.push_back(right);
-
- numChunks++;
- } else if (randChoice < 4 && it != chunks.end()) {
- // Simulate a migrate
- BSONObj prevShardChunk;
- while (it != chunks.end()) {
- prevShardChunk = *it;
- ++it;
-
- if (prevShardChunk[ChunkType::shard()].String() ==
- chunk[ChunkType::shard()].String()) {
- break;
- }
-
- newChunksB.push_back(prevShardChunk);
-
- prevShardChunk = BSONObj();
- }
-
- // We need to move between different shards, hence the weirdness in logic here
- if (!prevShardChunk.isEmpty()) {
- BSONObjBuilder newShardB;
- BSONObjBuilder prevShardB;
-
- newShardB.append(chunk[ChunkType::min()]);
- newShardB.append(chunk[ChunkType::max()]);
- prevShardB.append(prevShardChunk[ChunkType::min()]);
- prevShardB.append(prevShardChunk[ChunkType::max()]);
-
- // add required fields for ChunkType
- newShardB.append(chunk[ChunkType::name()]);
- newShardB.append(chunk[ChunkType::ns()]);
- prevShardB.append(chunk[ChunkType::name()]);
- prevShardB.append(chunk[ChunkType::ns()]);
-
- int shardNum = rand(numShards);
- newShardB.append(ChunkType::shard(),
- "shard" + string(1, (char)('A' + shardNum)));
- prevShardB.append(prevShardChunk[ChunkType::shard()]);
-
- version.incMajor();
- version.addToBSON(newShardB, ChunkType::DEPRECATED_lastmod());
- version.incMinor();
- version.addToBSON(prevShardB, ChunkType::DEPRECATED_lastmod());
-
- BSONObj newShard = newShardB.obj();
- BSONObj prevShard = prevShardB.obj();
-
- newChunksB.push_back(newShard);
- newChunksB.push_back(prevShard);
- } else {
- newChunksB.push_back(chunk);
- }
- } else {
- newChunksB.push_back(chunk);
- }
- }
-
- chunks = std::move(newChunksB);
-
- // Rarely entirely clear out our data
- if (rand(10) < 1) {
- ranges.clear();
- maxVersion = ChunkVersion(0, 0, OID());
- maxShardVersions.clear();
- }
-
- std::vector<ChunkType> chunksVector;
- convertBSONArrayToChunkTypes(chunks, &chunksVector);
-
- differ->calculateConfigDiff(nullptr, chunksVector);
-
- validate(isInverse, chunksVector, ranges, maxVersion, maxShardVersions);
- }
- }
-
-private:
- // Allow validating with and without ranges (b/c our splits won't actually be updated by the
- // diffs)
- void validate(bool isInverse,
- const std::vector<ChunkType>& chunks,
- ChunkVersion maxVersion,
- const VersionMap& maxShardVersions) {
- validate(isInverse, chunks, NULL, maxVersion, maxShardVersions);
- }
-
- void validate(bool isInverse,
- const std::vector<ChunkType>& chunks,
- const RangeMap& ranges,
- ChunkVersion maxVersion,
- const VersionMap& maxShardVersions) {
- validate(isInverse, chunks, (RangeMap*)&ranges, maxVersion, maxShardVersions);
- }
-
- // Validates that the ranges and versions are valid given the chunks
- void validate(bool isInverse,
- const std::vector<ChunkType>& chunks,
- RangeMap* ranges,
- ChunkVersion maxVersion,
- const VersionMap& maxShardVersions) {
- int chunkCount = chunks.size();
- ChunkVersion foundMaxVersion;
- VersionMap foundMaxShardVersions;
-
- //
- // Validate that all the chunks are there and collect versions
- //
-
- for (const ChunkType& chunk : chunks) {
- if (ranges != NULL) {
- // log() << "Validating chunk " << chunkDoc << " size : " << ranges->size() << " vs
- // " << chunkCount << endl;
-
- RangeMap::iterator chunkRange =
- ranges->find(isInverse ? chunk.getMax() : chunk.getMin());
-
- ASSERT(chunkRange != ranges->end());
- ASSERT(chunkRange->second.woCompare(isInverse ? chunk.getMin() : chunk.getMax()) ==
- 0);
- }
-
- ChunkVersion version = chunk.getVersion();
- if (version > foundMaxVersion)
- foundMaxVersion = version;
-
- ChunkVersion shardMaxVersion = foundMaxShardVersions[chunk.getShard()];
- if (version > shardMaxVersion) {
- foundMaxShardVersions[chunk.getShard()] = version;
- }
- }
-
- // Make sure all chunks are accounted for
- if (ranges != NULL)
- ASSERT(chunkCount == (int)ranges->size());
-
- // log() << "Validating that all shard versions are up to date..." << endl;
-
- // Validate that all the versions are the same
- ASSERT(foundMaxVersion.equals(maxVersion));
-
- for (VersionMap::iterator it = foundMaxShardVersions.begin();
- it != foundMaxShardVersions.end();
- it++) {
- ChunkVersion foundVersion = it->second;
- VersionMap::const_iterator maxIt = maxShardVersions.find(it->first);
-
- ASSERT(maxIt != maxShardVersions.end());
- ASSERT(foundVersion.equals(maxIt->second));
- }
- // Make sure all shards are accounted for
- ASSERT(foundMaxShardVersions.size() == maxShardVersions.size());
- }
-};
-
-TEST_F(ChunkDiffUnitTest, Normal) {
- runTest(false);
-}
-
-TEST_F(ChunkDiffUnitTest, Inverse) {
- runTest(true);
-}
-
-} // namespace
-} // namespace mongo
diff --git a/src/mongo/s/chunk_manager_query_test.cpp b/src/mongo/s/chunk_manager_query_test.cpp
index 4651de4d037..d9ddb4ec980 100644
--- a/src/mongo/s/chunk_manager_query_test.cpp
+++ b/src/mongo/s/chunk_manager_query_test.cpp
@@ -33,15 +33,15 @@
#include <set>
#include "mongo/db/query/collation/collator_interface_mock.h"
+#include "mongo/s/catalog_cache_test_fixture.h"
#include "mongo/s/chunk_manager.h"
-#include "mongo/s/chunk_manager_test_fixture.h"
namespace mongo {
namespace {
const NamespaceString kNss("TestDB", "TestColl");
-class ChunkManagerQueryTest : public ChunkManagerTestFixture {
+class ChunkManagerQueryTest : public CatalogCacheTestFixture {
protected:
void runQueryTest(const BSONObj& shardKey,
std::unique_ptr<CollatorInterface> defaultCollator,
diff --git a/src/mongo/s/sharding_test_fixture.cpp b/src/mongo/s/sharding_test_fixture.cpp
index c1bf99c59e8..18ae520e062 100644
--- a/src/mongo/s/sharding_test_fixture.cpp
+++ b/src/mongo/s/sharding_test_fixture.cpp
@@ -94,16 +94,23 @@ ShardingTestFixture::~ShardingTestFixture() = default;
const Seconds ShardingTestFixture::kFutureTimeout{5};
void ShardingTestFixture::setUp() {
- _service = stdx::make_unique<ServiceContextNoop>();
- _service->setFastClockSource(stdx::make_unique<ClockSourceMock>());
- _service->setPreciseClockSource(stdx::make_unique<ClockSourceMock>());
- _service->setTickSource(stdx::make_unique<TickSourceMock>());
- auto tlMock = stdx::make_unique<transport::TransportLayerMock>();
- _transportLayer = tlMock.get();
- _service->addAndStartTransportLayer(std::move(tlMock));
- CollatorFactoryInterface::set(_service.get(), stdx::make_unique<CollatorFactoryMock>());
+ {
+ auto service = stdx::make_unique<ServiceContextNoop>();
+ service->setFastClockSource(stdx::make_unique<ClockSourceMock>());
+ service->setPreciseClockSource(stdx::make_unique<ClockSourceMock>());
+ service->setTickSource(stdx::make_unique<TickSourceMock>());
+ auto tlMock = stdx::make_unique<transport::TransportLayerMock>();
+ _transportLayer = tlMock.get();
+ service->addAndStartTransportLayer(std::move(tlMock));
+
+ // Set the newly created service context to be the current global context so that tests,
+ // which invoke code still referencing getGlobalServiceContext will work properly.
+ setGlobalServiceContext(std::move(service));
+ }
+
+ CollatorFactoryInterface::set(serviceContext(), stdx::make_unique<CollatorFactoryMock>());
_transportSession = transport::MockSession::create(_transportLayer);
- _client = _service->makeClient("ShardingTestFixture", _transportSession);
+ _client = serviceContext()->makeClient("ShardingTestFixture", _transportSession);
_opCtx = _client->makeOperationContext();
// Set up executor pool used for most operations.
@@ -171,7 +178,7 @@ void ShardingTestFixture::setUp() {
nullptr,
stdx::make_unique<CatalogCache>(),
std::move(shardRegistry),
- stdx::make_unique<ClusterCursorManager>(_service->getPreciseClockSource()),
+ stdx::make_unique<ClusterCursorManager>(serviceContext()->getPreciseClockSource()),
stdx::make_unique<BalancerConfiguration>(),
std::move(executorPool),
_mockNetwork);
@@ -185,7 +192,6 @@ void ShardingTestFixture::tearDown() {
_transportSession.reset();
_opCtx.reset();
_client.reset();
- _service.reset();
}
void ShardingTestFixture::shutdownExecutor() {
@@ -236,9 +242,7 @@ DistLockManagerMock* ShardingTestFixture::distLock() const {
}
ServiceContext* ShardingTestFixture::serviceContext() const {
- invariant(_service);
-
- return _service.get();
+ return getGlobalServiceContext();
}
OperationContext* ShardingTestFixture::operationContext() const {
diff --git a/src/mongo/s/sharding_test_fixture.h b/src/mongo/s/sharding_test_fixture.h
index 8bd7ad1844a..072f7234319 100644
--- a/src/mongo/s/sharding_test_fixture.h
+++ b/src/mongo/s/sharding_test_fixture.h
@@ -205,7 +205,6 @@ protected:
long long expectedTerm) const;
private:
- std::unique_ptr<ServiceContext> _service;
ServiceContext::UniqueClient _client;
ServiceContext::UniqueOperationContext _opCtx;
transport::TransportLayerMock* _transportLayer;