summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCheahuychou Mao <cheahuychou.mao@mongodb.com>2020-06-03 16:14:11 -0400
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-06-05 00:49:50 +0000
commit7ad1bc37609f084f7eca25045f6ac6fe87a0a8fc (patch)
tree03d41bc2cdbf42f0b4746475423c3bd7e72927d5
parentfe19e20cf11650f7a9c51bff1a460c651d7a36b7 (diff)
downloadmongo-7ad1bc37609f084f7eca25045f6ac6fe87a0a8fc.tar.gz
SERVER-48583 Add unit tests for incremental refresh with incomplete chunks
-rw-r--r--src/mongo/s/catalog_cache_refresh_test.cpp216
1 files changed, 213 insertions, 3 deletions
diff --git a/src/mongo/s/catalog_cache_refresh_test.cpp b/src/mongo/s/catalog_cache_refresh_test.cpp
index b98c6f511cb..c89e670d2c5 100644
--- a/src/mongo/s/catalog_cache_refresh_test.cpp
+++ b/src/mongo/s/catalog_cache_refresh_test.cpp
@@ -230,7 +230,7 @@ TEST_F(CatalogCacheRefreshTest, CollectionBSONCorrupted) {
}
}
-TEST_F(CatalogCacheRefreshTest, NoChunksFoundForCollection) {
+TEST_F(CatalogCacheRefreshTest, FullLoadNoChunksFound) {
const OID epoch = OID::gen();
const ShardKeyPattern shardKeyPattern(BSON("_id" << 1));
@@ -261,6 +261,38 @@ TEST_F(CatalogCacheRefreshTest, NoChunksFoundForCollection) {
}
}
+TEST_F(CatalogCacheRefreshTest, IncrementalLoadNoChunksFound) {
+ const ShardKeyPattern shardKeyPattern(BSON("_id" << 1));
+
+ auto initialRoutingInfo(makeChunkManager(kNss, shardKeyPattern, nullptr, true, {}));
+ const OID epoch = initialRoutingInfo->getVersion().epoch();
+
+ ASSERT_EQ(1, initialRoutingInfo->numChunks());
+
+ auto future = scheduleRoutingInfoForcedRefresh(kNss);
+
+ // Return no chunks three times, which is how frequently the catalog cache retries
+ expectGetCollection(epoch, shardKeyPattern);
+ expectFindSendBSONObjVector(kConfigHostAndPort, {});
+
+ expectGetCollection(epoch, shardKeyPattern);
+ expectFindSendBSONObjVector(kConfigHostAndPort, {});
+
+ expectGetCollection(epoch, shardKeyPattern);
+ expectFindSendBSONObjVector(kConfigHostAndPort, {});
+
+ try {
+ auto routingInfo = future.default_timed_get();
+ auto cm = routingInfo->cm();
+ auto primary = routingInfo->db().primary();
+
+ FAIL(str::stream() << "Returning no chunks for collection did not fail and returned "
+ << (cm ? cm->toString() : routingInfo->db().primaryId().toString()));
+ } catch (const DBException& ex) {
+ ASSERT_EQ(ErrorCodes::ConflictingOperationInProgress, ex.code());
+ }
+}
+
TEST_F(CatalogCacheRefreshTest, ChunksBSONCorrupted) {
const OID epoch = OID::gen();
const ShardKeyPattern shardKeyPattern(BSON("_id" << 1));
@@ -295,7 +327,66 @@ TEST_F(CatalogCacheRefreshTest, ChunksBSONCorrupted) {
}
}
-TEST_F(CatalogCacheRefreshTest, IncompleteChunksFoundForCollection) {
+TEST_F(CatalogCacheRefreshTest, FullLoadMissingChunkWithLowestVersion) {
+ const OID epoch = OID::gen();
+ const ShardKeyPattern shardKeyPattern(BSON("_id" << 1));
+
+ auto future = scheduleRoutingInfoUnforcedRefresh(kNss);
+
+ expectGetDatabase();
+ expectGetCollection(epoch, shardKeyPattern);
+
+ const auto incompleteChunks = [&]() {
+ ChunkVersion version(1, 0, epoch);
+
+ // Chunk from (MinKey, -100) is missing (as if someone is dropping the collection
+ // concurrently) and has the lowest version.
+ version.incMinor();
+
+ ChunkType chunk2(kNss, {BSON("_id" << -100), BSON("_id" << 0)}, version, {"1"});
+ chunk2.setName(OID::gen());
+ version.incMinor();
+
+ ChunkType chunk3(kNss, {BSON("_id" << 0), BSON("_id" << 100)}, version, {"0"});
+ chunk3.setName(OID::gen());
+ version.incMinor();
+
+ ChunkType chunk4(kNss,
+ {BSON("_id" << 100), shardKeyPattern.getKeyPattern().globalMax()},
+ version,
+ {"1"});
+ chunk4.setName(OID::gen());
+ version.incMinor();
+
+ return std::vector<BSONObj>{
+ chunk2.toConfigBSON(), chunk3.toConfigBSON(), chunk4.toConfigBSON()};
+ }();
+
+ // Return incomplete set of chunks three times, which is how frequently the catalog cache
+ // retries
+ expectGetCollection(epoch, shardKeyPattern);
+ expectFindSendBSONObjVector(kConfigHostAndPort, incompleteChunks);
+
+ expectGetCollection(epoch, shardKeyPattern);
+ expectFindSendBSONObjVector(kConfigHostAndPort, incompleteChunks);
+
+ expectGetCollection(epoch, shardKeyPattern);
+ expectFindSendBSONObjVector(kConfigHostAndPort, incompleteChunks);
+
+ try {
+ auto routingInfo = future.default_timed_get();
+ auto cm = routingInfo->cm();
+ auto primary = routingInfo->db().primary();
+
+ FAIL(
+ str::stream() << "Returning incomplete chunks for collection did not fail and returned "
+ << (cm ? cm->toString() : routingInfo->db().primaryId().toString()));
+ } catch (const DBException& ex) {
+ ASSERT_EQ(ErrorCodes::ConflictingOperationInProgress, ex.code());
+ }
+}
+
+TEST_F(CatalogCacheRefreshTest, FullLoadMissingChunkWithHighestVersion) {
const OID epoch = OID::gen();
const ShardKeyPattern shardKeyPattern(BSON("_id" << 1));
@@ -308,9 +399,128 @@ TEST_F(CatalogCacheRefreshTest, IncompleteChunksFoundForCollection) {
ChunkVersion version(1, 0, epoch);
// Chunk from (MinKey, -100) is missing (as if someone is dropping the collection
- // concurrently)
+ // concurrently) and has the higest version.
+ version.incMinor();
+
+ ChunkType chunk2(kNss, {BSON("_id" << -100), BSON("_id" << 0)}, version, {"1"});
+ chunk2.setName(OID::gen());
+ version.incMinor();
+
+ ChunkType chunk3(kNss, {BSON("_id" << 0), BSON("_id" << 100)}, version, {"0"});
+ chunk3.setName(OID::gen());
+ version.incMinor();
+
+ ChunkType chunk4(kNss,
+ {BSON("_id" << 100), shardKeyPattern.getKeyPattern().globalMax()},
+ version,
+ {"1"});
+ chunk4.setName(OID::gen());
+ version.incMinor();
+
+ return std::vector<BSONObj>{
+ chunk2.toConfigBSON(), chunk3.toConfigBSON(), chunk4.toConfigBSON()};
+ }();
+
+ // Return incomplete set of chunks three times, which is how frequently the catalog cache
+ // retries
+ expectGetCollection(epoch, shardKeyPattern);
+ expectFindSendBSONObjVector(kConfigHostAndPort, incompleteChunks);
+
+ expectGetCollection(epoch, shardKeyPattern);
+ expectFindSendBSONObjVector(kConfigHostAndPort, incompleteChunks);
+
+ expectGetCollection(epoch, shardKeyPattern);
+ expectFindSendBSONObjVector(kConfigHostAndPort, incompleteChunks);
+
+ try {
+ auto routingInfo = future.default_timed_get();
+ auto cm = routingInfo->cm();
+ auto primary = routingInfo->db().primary();
+
+ FAIL(
+ str::stream() << "Returning incomplete chunks for collection did not fail and returned "
+ << (cm ? cm->toString() : routingInfo->db().primaryId().toString()));
+ } catch (const DBException& ex) {
+ ASSERT_EQ(ErrorCodes::ConflictingOperationInProgress, ex.code());
+ }
+}
+
+TEST_F(CatalogCacheRefreshTest, IncrementalLoadMissingChunkWithLowestVersion) {
+ const ShardKeyPattern shardKeyPattern(BSON("_id" << 1));
+
+ auto initialRoutingInfo(makeChunkManager(kNss, shardKeyPattern, nullptr, true, {}));
+ const OID epoch = initialRoutingInfo->getVersion().epoch();
+
+ ASSERT_EQ(1, initialRoutingInfo->numChunks());
+
+ auto future = scheduleRoutingInfoForcedRefresh(kNss);
+
+ const auto incompleteChunks = [&]() {
+ ChunkVersion version(1, 0, epoch);
+
+ // Chunk from (MinKey, -100) is missing (as if someone is dropping the collection
+ // concurrently) and has the lowest version.
+ version.incMinor();
+
+ ChunkType chunk2(kNss, {BSON("_id" << -100), BSON("_id" << 0)}, version, {"1"});
+ chunk2.setName(OID::gen());
+ version.incMinor();
+
+ ChunkType chunk3(kNss, {BSON("_id" << 0), BSON("_id" << 100)}, version, {"0"});
+ chunk3.setName(OID::gen());
version.incMinor();
+ ChunkType chunk4(kNss,
+ {BSON("_id" << 100), shardKeyPattern.getKeyPattern().globalMax()},
+ version,
+ {"1"});
+ chunk4.setName(OID::gen());
+ version.incMinor();
+
+ return std::vector<BSONObj>{
+ chunk2.toConfigBSON(), chunk3.toConfigBSON(), chunk4.toConfigBSON()};
+ }();
+
+ // Return incomplete set of chunks three times, which is how frequently the catalog cache
+ // retries
+ expectGetCollection(epoch, shardKeyPattern);
+ expectFindSendBSONObjVector(kConfigHostAndPort, incompleteChunks);
+
+ expectGetCollection(epoch, shardKeyPattern);
+ expectFindSendBSONObjVector(kConfigHostAndPort, incompleteChunks);
+
+ expectGetCollection(epoch, shardKeyPattern);
+ expectFindSendBSONObjVector(kConfigHostAndPort, incompleteChunks);
+
+ try {
+ auto routingInfo = future.default_timed_get();
+ auto cm = routingInfo->cm();
+ auto primary = routingInfo->db().primary();
+
+ FAIL(
+ str::stream() << "Returning incomplete chunks for collection did not fail and returned "
+ << (cm ? cm->toString() : routingInfo->db().primaryId().toString()));
+ } catch (const DBException& ex) {
+ ASSERT_EQ(ErrorCodes::ConflictingOperationInProgress, ex.code());
+ }
+}
+
+TEST_F(CatalogCacheRefreshTest, IncrementalLoadMissingChunkWithHighestVersion) {
+ const ShardKeyPattern shardKeyPattern(BSON("_id" << 1));
+
+ auto initialRoutingInfo(makeChunkManager(kNss, shardKeyPattern, nullptr, true, {}));
+ const OID epoch = initialRoutingInfo->getVersion().epoch();
+
+ ASSERT_EQ(1, initialRoutingInfo->numChunks());
+
+ auto future = scheduleRoutingInfoForcedRefresh(kNss);
+
+ const auto incompleteChunks = [&]() {
+ ChunkVersion version(1, 0, epoch);
+
+ // Chunk from (MinKey, -100) is missing (as if someone is dropping the collection
+ // concurrently) and has the higest version.
+
ChunkType chunk2(kNss, {BSON("_id" << -100), BSON("_id" << 0)}, version, {"1"});
chunk2.setName(OID::gen());
version.incMinor();