summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaolo Polato <paolo.polato@mongodb.com>2021-09-20 07:30:52 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-09-20 20:45:29 +0000
commit4ef58d7c0b03435f5a6b773998296a65f71677b2 (patch)
treeffeabeb476aa8f77a65f82deffb72276f3ee3aae
parent211007fa4a705c02e7c373dd6fc148aa4de3a038 (diff)
downloadmongo-4ef58d7c0b03435f5a6b773998296a65f71677b2.tar.gz
SERVER-59120 Add test coverage to commitChunksMerge()
(cherry picked from commit b589595b1a0f9decf4473cb516a98725f2a60439) (cherry picked from commit 0ae27b4dcc926864ac1a2ae56a0d24bcb591125c)
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp541
-rw-r--r--src/mongo/s/config_server_test_fixture.cpp35
-rw-r--r--src/mongo/s/config_server_test_fixture.h7
3 files changed, 575 insertions, 8 deletions
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp
index 2484319ff5e..ecf0507d682 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp
@@ -43,8 +43,533 @@ namespace {
using MergeChunkTest = ConfigServerTestFixture;
const NamespaceString kNamespace("TestDB.TestColl");
+const std::string kShardName("shard0000");
+const ShardId kShardId(kShardName);
+const KeyPattern kKeyPattern(BSON("a" << 1));
+
TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceed) {
+ const auto collUuid = UUID::gen();
+ ShardType shard;
+ shard.setName(kShardName);
+ shard.setHost(kShardName + ":12");
+ setupShards({shard});
+
+ ChunkType chunk;
+ chunk.setName(OID::gen().toString());
+ chunk.setNS(kNamespace);
+
+ auto origVersion = ChunkVersion(1, 0, OID::gen());
+ chunk.setVersion(origVersion);
+ chunk.setShard(kShardId);
+
+ // Construct chunk to be merged
+ auto chunk2(chunk);
+ chunk2.setName(OID::gen().toString());
+
+ auto chunkMin = BSON("a" << 1);
+ auto chunkBound = BSON("a" << 5);
+ auto chunkMax = BSON("a" << 10);
+ // first chunk boundaries
+ chunk.setMin(chunkMin);
+ chunk.setMax(chunkBound);
+ // second chunk boundaries
+ chunk2.setMin(chunkBound);
+ chunk2.setMax(chunkMax);
+
+ setupCollection(kNamespace, collUuid, kKeyPattern, {chunk, chunk2});
+ ChunkRange rangeToBeMerged(chunk.getMin(), chunk2.getMax());
+
+ Timestamp validAfter{100, 0};
+
+ ASSERT_OK(
+ ShardingCatalogManager::get(operationContext())
+ ->commitChunksMerge(
+ operationContext(), kNamespace, collUuid, rangeToBeMerged, kShardId, validAfter));
+
+ auto findResponse = uassertStatusOK(
+ getConfigShard()->exhaustiveFindOnConfig(operationContext(),
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ repl::ReadConcernLevel::kLocalReadConcern,
+ ChunkType::ConfigNS,
+ BSON(ChunkType::ns() << kNamespace.toString()),
+ BSON(ChunkType::lastmod << -1),
+ boost::none));
+
+ const auto& chunksVector = findResponse.docs;
+
+ // There should be exactly one chunk left in the collection
+ ASSERT_EQ(1u, chunksVector.size());
+
+ // MergedChunk should have range [chunkMin, chunkMax]
+ auto mergedChunk = uassertStatusOK(ChunkType::fromConfigBSON(chunksVector.front()));
+ ASSERT_BSONOBJ_EQ(chunkMin, mergedChunk.getMin());
+ ASSERT_BSONOBJ_EQ(chunkMax, mergedChunk.getMax());
+
+ {
+ // Check for increment on mergedChunk's minor version
+ ASSERT_EQ(origVersion.majorVersion(), mergedChunk.getVersion().majorVersion());
+ ASSERT_EQ(origVersion.minorVersion() + 1, mergedChunk.getVersion().minorVersion());
+ }
+
+ // Make sure history is there
+ ASSERT_EQ(1UL, mergedChunk.getHistory().size());
+ ASSERT_EQ(validAfter, mergedChunk.getHistory().front().getValidAfter());
+}
+
+TEST_F(MergeChunkTest, MergeSeveralChunksCorrectlyShouldSucceed) {
+ const auto collUuid = UUID::gen();
+ ShardType shard;
+ shard.setName(kShardName);
+ shard.setHost(kShardName + ":12");
+ setupShards({shard});
+
+ ChunkType chunk;
+ chunk.setName(OID::gen().toString());
+ chunk.setNS(kNamespace);
+
+ auto origVersion = ChunkVersion(1, 0, OID::gen());
+ chunk.setVersion(origVersion);
+ chunk.setShard(kShardId);
+
+ // Construct chunks to be merged
+ auto chunk2(chunk);
+ auto chunk3(chunk);
+ chunk2.setName(OID::gen().toString());
+ chunk3.setName(OID::gen().toString());
+
+ auto chunkMin = BSON("a" << 1);
+ auto chunkBound = BSON("a" << 5);
+ auto chunkBound2 = BSON("a" << 7);
+ auto chunkMax = BSON("a" << 10);
+ // first chunk boundaries
+ chunk.setMin(chunkMin);
+ chunk.setMax(chunkBound);
+ // second chunk boundaries
+ chunk2.setMin(chunkBound);
+ chunk2.setMax(chunkBound2);
+ // third chunk boundaries
+ chunk3.setMin(chunkBound2);
+ chunk3.setMax(chunkMax);
+
+ // Record chunk boundaries for passing into commitChunkMerge
+ setupCollection(kNamespace, collUuid, kKeyPattern, {chunk, chunk2, chunk3});
+ ChunkRange rangeToBeMerged(chunk.getMin(), chunk3.getMax());
+
+ Timestamp validAfter{100, 0};
+
+ ASSERT_OK(
+ ShardingCatalogManager::get(operationContext())
+ ->commitChunksMerge(
+ operationContext(), kNamespace, collUuid, rangeToBeMerged, kShardId, validAfter));
+
+ auto findResponse = uassertStatusOK(
+ getConfigShard()->exhaustiveFindOnConfig(operationContext(),
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ repl::ReadConcernLevel::kLocalReadConcern,
+ ChunkType::ConfigNS,
+ BSON(ChunkType::ns() << "TestDB.TestColl"),
+ BSON(ChunkType::lastmod << -1),
+ boost::none));
+
+ const auto& chunksVector = findResponse.docs;
+
+ // There should be exactly one chunk left in the collection
+ ASSERT_EQ(1u, chunksVector.size());
+
+ // MergedChunk should have range [chunkMin, chunkMax]
+ auto mergedChunk = uassertStatusOK(ChunkType::fromConfigBSON(chunksVector.front()));
+ ASSERT_BSONOBJ_EQ(chunkMin, mergedChunk.getMin());
+ ASSERT_BSONOBJ_EQ(chunkMax, mergedChunk.getMax());
+
+ {
+ // Check for increment on mergedChunk's minor version
+ ASSERT_EQ(origVersion.majorVersion(), mergedChunk.getVersion().majorVersion());
+ ASSERT_EQ(origVersion.minorVersion() + 1, mergedChunk.getVersion().minorVersion());
+ }
+
+ // Make sure history is there
+ ASSERT_EQ(1UL, mergedChunk.getHistory().size());
+ ASSERT_EQ(validAfter, mergedChunk.getHistory().front().getValidAfter());
+}
+
+TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersion) {
+ const auto collUuid = UUID::gen();
+ ShardType shard;
+ shard.setName(kShardName);
+ shard.setHost(kShardName + ":12");
+ setupShards({shard});
+
+ ChunkType chunk, otherChunk;
+ chunk.setName(OID::gen().toString());
+ chunk.setNS(kNamespace);
+ otherChunk.setName(OID::gen().toString());
+ otherChunk.setNS(kNamespace);
+ auto collEpoch = OID::gen();
+
+ auto origVersion = ChunkVersion(1, 2, collEpoch);
+ chunk.setVersion(origVersion);
+ chunk.setShard(kShardId);
+
+ // Construct chunk to be merged
+ auto chunk2(chunk);
+ chunk2.setName(OID::gen().toString());
+
+ auto chunkMin = BSON("a" << 1);
+ auto chunkBound = BSON("a" << 5);
+ auto chunkMax = BSON("a" << 10);
+ // first chunk boundaries
+ chunk.setMin(chunkMin);
+ chunk.setMax(chunkBound);
+ // second chunk boundaries
+ chunk2.setMin(chunkBound);
+ chunk2.setMax(chunkMax);
+
+
+ // Set up other chunk with competing version
+ auto competingVersion = ChunkVersion(2, 1, collEpoch);
+ otherChunk.setVersion(competingVersion);
+ otherChunk.setShard(kShardId);
+ otherChunk.setMin(BSON("a" << 10));
+ otherChunk.setMax(BSON("a" << 20));
+
+
+ setupCollection(kNamespace, collUuid, kKeyPattern, {chunk, chunk2, otherChunk});
+ ChunkRange rangeToBeMerged(chunk.getMin(), chunk2.getMax());
+
+ Timestamp validAfter{100, 0};
+
+ ASSERT_OK(
+ ShardingCatalogManager::get(operationContext())
+ ->commitChunksMerge(
+ operationContext(), kNamespace, collUuid, rangeToBeMerged, kShardId, validAfter));
+
+ auto findResponse = uassertStatusOK(
+ getConfigShard()->exhaustiveFindOnConfig(operationContext(),
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ repl::ReadConcernLevel::kLocalReadConcern,
+ ChunkType::ConfigNS,
+ BSON(ChunkType::ns() << kNamespace.toString()),
+ BSON(ChunkType::lastmod << -1),
+ boost::none));
+
+ const auto& chunksVector = findResponse.docs;
+
+ // There should be exactly two chunks left in the collection: one merged, one competing
+ ASSERT_EQ(2u, chunksVector.size());
+
+ // MergedChunk should have range [chunkMin, chunkMax]
+ auto mergedChunk = uassertStatusOK(ChunkType::fromConfigBSON(chunksVector.front()));
+ ASSERT_BSONOBJ_EQ(chunkMin, mergedChunk.getMin());
+ ASSERT_BSONOBJ_EQ(chunkMax, mergedChunk.getMax());
+
+ {
+ // Check for minor increment on collection version
+ ASSERT_EQ(competingVersion.majorVersion(), mergedChunk.getVersion().majorVersion());
+ ASSERT_EQ(competingVersion.minorVersion() + 1, mergedChunk.getVersion().minorVersion());
+ }
+
+ // Make sure history is there
+ ASSERT_EQ(1UL, mergedChunk.getHistory().size());
+ ASSERT_EQ(validAfter, mergedChunk.getHistory().front().getValidAfter());
+}
+
+TEST_F(MergeChunkTest, MergeLeavesOtherChunksAlone) {
+ const auto collUuid = UUID::gen();
+ ShardType shard;
+ shard.setName(kShardName);
+ shard.setHost(kShardName + ":12");
+ setupShards({shard});
+
+ ChunkType chunk;
+ chunk.setName(OID::gen().toString());
+ chunk.setNS(kNamespace);
+
+ auto origVersion = ChunkVersion(1, 2, OID::gen());
+ chunk.setVersion(origVersion);
+ chunk.setShard(kShardId);
+
+ // Construct chunk to be merged
+ auto chunk2(chunk);
+ chunk2.setName(OID::gen().toString());
+
+ auto chunkMin = BSON("a" << 1);
+ auto chunkBound = BSON("a" << 5);
+ auto chunkMax = BSON("a" << 10);
+ // first chunk boundaries
+ chunk.setMin(chunkMin);
+ chunk.setMax(chunkBound);
+ // second chunk boundaries
+ chunk2.setMin(chunkBound);
+ chunk2.setMax(chunkMax);
+
+ // Set up unmerged chunk
+ auto otherChunk(chunk);
+ otherChunk.setName(OID::gen().toString());
+ otherChunk.setMin(BSON("a" << 10));
+ otherChunk.setMax(BSON("a" << 20));
+
+ setupCollection(kNamespace, collUuid, kKeyPattern, {chunk, chunk2, otherChunk});
+ ChunkRange rangeToBeMerged(chunk.getMin(), chunk2.getMax());
+
+ Timestamp validAfter{1};
+
+ ASSERT_OK(
+ ShardingCatalogManager::get(operationContext())
+ ->commitChunksMerge(
+ operationContext(), kNamespace, collUuid, rangeToBeMerged, kShardId, validAfter));
+
+ auto findResponse = uassertStatusOK(
+ getConfigShard()->exhaustiveFindOnConfig(operationContext(),
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ repl::ReadConcernLevel::kLocalReadConcern,
+ ChunkType::ConfigNS,
+ BSON(ChunkType::ns() << "TestDB.TestColl"),
+ BSON(ChunkType::lastmod << -1),
+ boost::none));
+
+ const auto& chunksVector = findResponse.docs;
+
+ // There should be exactly two chunks left in the collection: one merged, one untouched
+ ASSERT_EQ(2u, chunksVector.size());
+
+ // MergedChunk should have range [chunkMin, chunkMax]
+ auto mergedChunk = uassertStatusOK(ChunkType::fromConfigBSON(chunksVector.front()));
+ ASSERT_BSONOBJ_EQ(chunkMin, mergedChunk.getMin());
+ ASSERT_BSONOBJ_EQ(chunkMax, mergedChunk.getMax());
+
+ {
+ // Check for increment on mergedChunk's minor version
+ ASSERT_EQ(origVersion.majorVersion(), mergedChunk.getVersion().majorVersion());
+ ASSERT_EQ(origVersion.minorVersion() + 1, mergedChunk.getVersion().minorVersion());
+ }
+
+ // OtherChunk should have been left alone
+ auto foundOtherChunk = uassertStatusOK(ChunkType::fromConfigBSON(chunksVector.back()));
+ ASSERT_BSONOBJ_EQ(otherChunk.getMin(), foundOtherChunk.getMin());
+ ASSERT_BSONOBJ_EQ(otherChunk.getMax(), foundOtherChunk.getMax());
+}
+
+TEST_F(MergeChunkTest, NonExistingNamespace) {
+ const auto collUuid = UUID::gen();
+ ShardType shard;
+ shard.setName(kShardName);
+ shard.setHost(kShardName + ":12");
+ setupShards({shard});
+
+ ChunkType chunk;
+ chunk.setNS(kNamespace);
+
+ auto origVersion = ChunkVersion(1, 0, OID::gen());
+ chunk.setVersion(origVersion);
+ chunk.setShard(kShardId);
+
+ // Construct chunk to be merged
+ auto chunk2(chunk);
+
+ auto chunkMin = BSON("a" << 1);
+ auto chunkBound = BSON("a" << 5);
+ auto chunkMax = BSON("a" << 10);
+ // first chunk boundaries
+ chunk.setMin(chunkMin);
+ chunk.setMax(chunkBound);
+ chunk2.setMin(chunkBound);
+ chunk2.setMax(chunkMax);
+
+ setupCollection(kNamespace, collUuid, kKeyPattern, {chunk, chunk2});
+ ChunkRange rangeToBeMerged(chunk.getMin(), chunk2.getMax());
+
+ Timestamp validAfter{1};
+
+ auto mergeStatus = ShardingCatalogManager::get(operationContext())
+ ->commitChunksMerge(operationContext(),
+ NamespaceString("TestDB.NonExistingColl"),
+ collUuid,
+ rangeToBeMerged,
+ kShardId,
+ validAfter);
+ ASSERT_EQ(ErrorCodes::IllegalOperation, mergeStatus);
+}
+
+TEST_F(MergeChunkTest, NonMatchingEpochsOfChunkAndRequestErrors) {
+ const auto collUuid = UUID::gen();
+ ShardType shard;
+ shard.setName(kShardName);
+ shard.setHost(kShardName + ":12");
+ setupShards({shard});
+
+ ChunkType chunk;
+ chunk.setNS(kNamespace);
+
+ auto origVersion = ChunkVersion(1, 0, OID::gen());
+ chunk.setVersion(origVersion);
+ chunk.setShard(kShardId);
+
+ // Construct chunk to be merged
+ auto chunk2(chunk);
+
+ auto chunkMin = BSON("a" << 1);
+ auto chunkBound = BSON("a" << 5);
+ auto chunkMax = BSON("a" << 10);
+ // first chunk boundaries
+ chunk.setMin(chunkMin);
+ chunk.setMax(chunkBound);
+ chunk2.setMin(chunkBound);
+ chunk2.setMax(chunkMax);
+
+ setupCollection(kNamespace, collUuid, kKeyPattern, {chunk, chunk2});
+ ChunkRange rangeToBeMerged(chunk.getMin(), chunk2.getMax());
+
+ Timestamp validAfter{1};
+
+ auto mergeStatus =
+ ShardingCatalogManager::get(operationContext())
+ ->commitChunksMerge(
+ operationContext(), kNamespace, UUID::gen(), rangeToBeMerged, kShardId, validAfter);
+ ASSERT_EQ(ErrorCodes::InvalidUUID, mergeStatus);
+}
+
+TEST_F(MergeChunkTest, MergeAlreadyHappenedSucceeds) {
+ const auto collUuid = UUID::gen();
+ ShardType shard;
+ shard.setName(kShardName);
+ shard.setHost(kShardName + ":12");
+ setupShards({shard});
+
+ ChunkType chunk;
+ chunk.setName(OID::gen().toString());
+ chunk.setNS(kNamespace);
+
+ auto origVersion = ChunkVersion(1, 0, OID::gen());
+ chunk.setVersion(origVersion);
+ chunk.setShard(kShardId);
+
+ // Construct chunk to be merged
+ auto chunk2(chunk);
+ chunk2.setName(OID::gen().toString());
+
+ auto chunkMin = BSON("a" << 1);
+ auto chunkBound = BSON("a" << 5);
+ auto chunkMax = BSON("a" << 10);
+ // first chunk boundaries
+ chunk.setMin(chunkMin);
+ chunk.setMax(chunkBound);
+ // second chunk boundaries
+ chunk2.setMin(chunkBound);
+ chunk2.setMax(chunkMax);
+
+ ChunkType mergedChunk(chunk);
+ auto mergedVersion = chunk.getVersion();
+ mergedVersion.incMinor();
+ mergedChunk.setVersion(mergedVersion);
+ mergedChunk.setMax(chunkMax);
+
+ setupCollection(kNamespace, collUuid, kKeyPattern, {mergedChunk});
+ ChunkRange rangeToBeMerged(chunk.getMin(), chunk2.getMax());
+
+ Timestamp validAfter{1};
+
+ ASSERT_OK(
+ ShardingCatalogManager::get(operationContext())
+ ->commitChunksMerge(
+ operationContext(), kNamespace, collUuid, rangeToBeMerged, kShardId, validAfter));
+
+ // Verify that no change to config.chunks happened.
+ auto findResponse = uassertStatusOK(
+ getConfigShard()->exhaustiveFindOnConfig(operationContext(),
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ repl::ReadConcernLevel::kLocalReadConcern,
+ ChunkType::ConfigNS,
+ BSON(ChunkType::ns() << "TestDB.TestColl"),
+ BSON(ChunkType::lastmod << -1),
+ boost::none));
+
+ const auto& chunksVector = findResponse.docs;
+
+ // There should be exactly one chunk left in the collection
+ ASSERT_EQ(1u, chunksVector.size());
+
+ // MergedChunk should have range [chunkMin, chunkMax]
+ ChunkType foundChunk = uassertStatusOK(ChunkType::fromConfigBSON(chunksVector.front()));
+ ASSERT_BSONOBJ_EQ(mergedChunk.toConfigBSON(), foundChunk.toConfigBSON());
+}
+
+TEST_F(MergeChunkTest, MergingChunksWithDollarPrefixShouldSucceed) {
+ const auto collUuid = UUID::gen();
+ ShardType shard;
+ shard.setName(kShardName);
+ shard.setHost(kShardName + ":12");
+ setupShards({shard});
+
+ ChunkType chunk1;
+ chunk1.setName(OID::gen().toString());
+ chunk1.setNS(kNamespace);
+
+ auto origVersion = ChunkVersion(1, 0, OID::gen());
+ chunk1.setVersion(origVersion);
+ chunk1.setShard(kShardId);
+
+ auto chunk2(chunk1);
+ auto chunk3(chunk1);
+ chunk2.setName(OID::gen().toString());
+ chunk3.setName(OID::gen().toString());
+
+ auto chunkMin = BSON("a" << kMinBSONKey);
+ auto chunkBound1 = BSON("a" << BSON("$maxKey" << 1));
+ auto chunkBound2 = BSON("a" << BSON("$mixKey" << 1));
+ auto chunkMax = BSON("a" << kMaxBSONKey);
+
+ // first chunk boundaries
+ chunk1.setMin(chunkMin);
+ chunk1.setMax(chunkBound1);
+ // second chunk boundaries
+ chunk2.setMin(chunkBound1);
+ chunk2.setMax(chunkBound2);
+ // third chunk boundaries
+ chunk3.setMin(chunkBound2);
+ chunk3.setMax(chunkMax);
+
+ setupCollection(kNamespace, collUuid, kKeyPattern, {chunk1, chunk2, chunk3});
+ ChunkRange rangeToBeMerged(chunk1.getMin(), chunk3.getMax());
+
+ Timestamp validAfter{100, 0};
+
+ ASSERT_OK(
+ ShardingCatalogManager::get(operationContext())
+ ->commitChunksMerge(
+ operationContext(), kNamespace, collUuid, rangeToBeMerged, kShardId, validAfter));
+
+ auto findResponse = uassertStatusOK(
+ getConfigShard()->exhaustiveFindOnConfig(operationContext(),
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ repl::ReadConcernLevel::kLocalReadConcern,
+ ChunkType::ConfigNS,
+ BSON(ChunkType::ns() << "TestDB.TestColl"),
+ BSON(ChunkType::lastmod << -1),
+ boost::none));
+
+ const auto& chunksVector = findResponse.docs;
+
+ // There should be exactly one chunk left in the collection
+ ASSERT_EQ(1u, chunksVector.size());
+
+ // MergedChunk should have range [chunkMin, chunkMax]
+ auto mergedChunk = uassertStatusOK(ChunkType::fromConfigBSON(chunksVector.front()));
+ ASSERT_BSONOBJ_EQ(chunkMin, mergedChunk.getMin());
+ ASSERT_BSONOBJ_EQ(chunkMax, mergedChunk.getMax());
+
+ {
+ // Check for increment on mergedChunk's minor version
+ ASSERT_EQ(origVersion.majorVersion(), mergedChunk.getVersion().majorVersion());
+ ASSERT_EQ(origVersion.minorVersion() + 1, mergedChunk.getVersion().minorVersion());
+ }
+
+ // Make sure history is there
+ ASSERT_EQ(1UL, mergedChunk.getHistory().size());
+ ASSERT_EQ(validAfter, mergedChunk.getHistory().front().getValidAfter());
+}
+
+TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceedWithLegacyMethod) {
ChunkType chunk;
chunk.setNS(kNamespace);
@@ -109,7 +634,7 @@ TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceed) {
ASSERT_EQ(validAfter, mergedChunk.getHistory().front().getValidAfter());
}
-TEST_F(MergeChunkTest, MergeSeveralChunksCorrectlyShouldSucceed) {
+TEST_F(MergeChunkTest, MergeSeveralChunksCorrectlyShouldSucceedWithLegacyMethod) {
ChunkType chunk;
chunk.setNS(kNamespace);
@@ -180,7 +705,7 @@ TEST_F(MergeChunkTest, MergeSeveralChunksCorrectlyShouldSucceed) {
ASSERT_EQ(validAfter, mergedChunk.getHistory().front().getValidAfter());
}
-TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersion) {
+TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersionWithLegacyMethod) {
ChunkType chunk, otherChunk;
chunk.setNS(kNamespace);
otherChunk.setNS(kNamespace);
@@ -255,7 +780,7 @@ TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersion) {
ASSERT_EQ(validAfter, mergedChunk.getHistory().front().getValidAfter());
}
-TEST_F(MergeChunkTest, MergeLeavesOtherChunksAlone) {
+TEST_F(MergeChunkTest, MergeLeavesOtherChunksAloneWithLegacyMethod) {
ChunkType chunk;
chunk.setNS(kNamespace);
@@ -327,7 +852,7 @@ TEST_F(MergeChunkTest, MergeLeavesOtherChunksAlone) {
ASSERT_BSONOBJ_EQ(otherChunk.getMax(), foundOtherChunk.getMax());
}
-TEST_F(MergeChunkTest, NonExistingNamespace) {
+TEST_F(MergeChunkTest, NonExistingNamespaceWithLegacyMethod) {
ChunkType chunk;
chunk.setNS(kNamespace);
@@ -364,7 +889,7 @@ TEST_F(MergeChunkTest, NonExistingNamespace) {
ASSERT_EQ(ErrorCodes::IllegalOperation, mergeStatus);
}
-TEST_F(MergeChunkTest, NonMatchingEpochsOfChunkAndRequestErrors) {
+TEST_F(MergeChunkTest, NonMatchingEpochsOfChunkAndRequestErrorsWithLegacyMethod) {
ChunkType chunk;
chunk.setNS(kNamespace);
@@ -401,7 +926,7 @@ TEST_F(MergeChunkTest, NonMatchingEpochsOfChunkAndRequestErrors) {
ASSERT_EQ(ErrorCodes::StaleEpoch, mergeStatus);
}
-TEST_F(MergeChunkTest, MergeAlreadyHappenedFails) {
+TEST_F(MergeChunkTest, MergeAlreadyHappenedFailsWithLegacyMethod) {
ChunkType chunk;
chunk.setNS(kNamespace);
@@ -463,7 +988,7 @@ TEST_F(MergeChunkTest, MergeAlreadyHappenedFails) {
ASSERT_BSONOBJ_EQ(mergedChunk.toConfigBSON(), foundChunk.toConfigBSON());
}
-TEST_F(MergeChunkTest, ChunkBoundariesOutOfOrderFails) {
+TEST_F(MergeChunkTest, ChunkBoundariesOutOfOrderFailsWithLegacyMethod) {
const OID epoch = OID::gen();
const std::vector<BSONObj> chunkBoundaries{
BSON("a" << 100), BSON("a" << 200), BSON("a" << 30), BSON("a" << 400)};
@@ -505,7 +1030,7 @@ TEST_F(MergeChunkTest, ChunkBoundariesOutOfOrderFails) {
operationContext(), kNamespace, epoch, chunkBoundaries, "shard0000", validAfter));
}
-TEST_F(MergeChunkTest, MergingChunksWithDollarPrefixShouldSucceed) {
+TEST_F(MergeChunkTest, MergingChunksWithDollarPrefixShouldSucceedWithLegacyMethod) {
ChunkType chunk1;
chunk1.setNS(kNamespace);
diff --git a/src/mongo/s/config_server_test_fixture.cpp b/src/mongo/s/config_server_test_fixture.cpp
index a1a12ad6469..57ba6c5894f 100644
--- a/src/mongo/s/config_server_test_fixture.cpp
+++ b/src/mongo/s/config_server_test_fixture.cpp
@@ -306,6 +306,41 @@ StatusWith<ShardType> ConfigServerTestFixture::getShardDoc(OperationContext* opC
return ShardType::fromBSON(doc.getValue());
}
+
+void ConfigServerTestFixture::setupCollection(const NamespaceString& nss,
+ const boost::optional<mongo::UUID>& uuid,
+ const KeyPattern& shardKey,
+ const std::vector<ChunkType>& chunks) {
+ auto dbDoc = findOneOnConfigCollection(
+ operationContext(), DatabaseType::ConfigNS, BSON(DatabaseType::name(nss.db().toString())));
+ if (!dbDoc.isOK()) {
+ // If the database is not setup, choose the first available shard as primary to implicitly
+ // create the db
+ auto swShardDoc =
+ findOneOnConfigCollection(operationContext(), ShardType::ConfigNS, BSONObj());
+ invariant(swShardDoc.isOK(),
+ "At least one shard should be setup when initializing a collection");
+ auto shard = uassertStatusOK(ShardType::fromBSON(swShardDoc.getValue()));
+ setupDatabase(nss.db().toString(), ShardId(shard.getName()), true /* sharded */);
+ }
+
+ CollectionType coll;
+ coll.setNs(nss);
+ coll.setEpoch(chunks[0].getVersion().epoch());
+ coll.setUpdatedAt(Date_t::now());
+ if (uuid) {
+ coll.setUUID(uuid.get());
+ }
+ coll.setKeyPattern(shardKey);
+ ASSERT_OK(
+ insertToConfigCollection(operationContext(), CollectionType::ConfigNS, coll.toBSON()));
+
+ for (const auto& chunk : chunks) {
+ ASSERT_OK(insertToConfigCollection(
+ operationContext(), ChunkType::ConfigNS, chunk.toConfigBSON()));
+ }
+}
+
void ConfigServerTestFixture::setupChunks(const std::vector<ChunkType>& chunks) {
const NamespaceString chunkNS(ChunkType::ConfigNS);
for (const auto& chunk : chunks) {
diff --git a/src/mongo/s/config_server_test_fixture.h b/src/mongo/s/config_server_test_fixture.h
index 50ea4eb4eb8..dfa70900185 100644
--- a/src/mongo/s/config_server_test_fixture.h
+++ b/src/mongo/s/config_server_test_fixture.h
@@ -102,6 +102,13 @@ public:
/**
* Setup the config.chunks collection to contain the given chunks.
*/
+ void setupCollection(const NamespaceString& nss,
+ const boost::optional<mongo::UUID>& uuid,
+ const KeyPattern& shardKey,
+ const std::vector<ChunkType>& chunks);
+ /**
+ * Setup the config.chunks collection to contain the given chunks.
+ */
void setupChunks(const std::vector<ChunkType>& chunks);
/**