diff options
author | Matthew Saltz <matthew.saltz@mongodb.com> | 2020-07-02 22:52:06 +0000 |
---|---|---|
committer | Evergreen Agent <no-reply@evergreen.mongodb.com> | 2020-07-14 16:03:37 +0000 |
commit | b2d22916f3f28b1534ce3613685713ca027827e7 (patch) | |
tree | d2fb060d40c168e912afe5e122540a55ac10ff64 /src/mongo/db/s | |
parent | 658fccc18c525d0d9ad7aaa32a08b692b19bf0dd (diff) | |
download | mongo-b2d22916f3f28b1534ce3613685713ca027827e7.tar.gz |
SERVER-49233 Introduce a flag to toggle the logic for bumping collection's major version during split
Diffstat (limited to 'src/mongo/db/s')
-rw-r--r-- | src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp | 7 | ||||
-rw-r--r-- | src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp | 226 |
2 files changed, 230 insertions, 3 deletions
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp index 3133094adb4..033a96c6a25 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp @@ -43,6 +43,7 @@ #include "mongo/db/dbdirectclient.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/server_parameters.h" #include "mongo/rpc/get_status_from_command_result.h" #include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/catalog/type_chunk.h" @@ -55,6 +56,9 @@ #include "mongo/util/mongoutils/str.h" namespace mongo { + +MONGO_EXPORT_SERVER_PARAMETER(incrementChunkMajorVersionOnChunkSplits, bool, false); + namespace { MONGO_FAIL_POINT_DEFINE(migrationCommitVersionError); @@ -368,10 +372,11 @@ Status ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx, ChunkVersion currentMaxVersion = collVersion; // Increment the major version only if the shard that owns the chunk being split has version == // collection version. See SERVER-41480 for details. - if (shardVersion == collVersion) { + if (incrementChunkMajorVersionOnChunkSplits.load() && shardVersion == collVersion) { currentMaxVersion.incMajor(); } + auto startKey = range.getMin(); auto newChunkBounds(splitPoints); newChunkBounds.push_back(range.getMax()); diff --git a/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp index 0140245652c..6566f7c258a 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp @@ -33,6 +33,7 @@ #include "mongo/client/read_preference.h" #include "mongo/db/namespace_string.h" #include "mongo/db/s/config/sharding_catalog_manager.h" +#include "mongo/db/server_parameters.h" #include "mongo/s/catalog/type_chunk.h" #include "mongo/s/config_server_test_fixture.h" @@ -40,6 +41,29 @@ namespace mongo { namespace { using SplitChunkTest = ConfigServerTestFixture; +/** + * A fixture which sets the incrementChunkMajorVersionOnChunkSplits server parameter to true. + */ +class SplitChunkWithMajorVersionIncrementTest : public ConfigServerTestFixture { +public: + void setUp() override { + ConfigServerTestFixture::setUp(); + // Ignore the return status. + std::ignore = ServerParameterSet::getGlobal() + ->getMap() + .find("incrementChunkMajorVersionOnChunkSplits") + ->second->setFromString("true"); + } + + void tearDown() override { + // Ignore the return status. + std::ignore = ServerParameterSet::getGlobal() + ->getMap() + .find("incrementChunkMajorVersionOnChunkSplits") + ->second->setFromString("false"); + ConfigServerTestFixture::tearDown(); + } +}; const NamespaceString kNamespace("TestDB", "TestColl"); @@ -78,6 +102,66 @@ TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) { auto chunkDoc = chunkDocStatus.getValue(); ASSERT_BSONOBJ_EQ(chunkSplitPoint, chunkDoc.getMax()); + // Check for increment on first chunkDoc's minor version + ASSERT_EQ(origVersion.majorVersion(), chunkDoc.getVersion().majorVersion()); + ASSERT_EQ(origVersion.minorVersion() + 1, chunkDoc.getVersion().minorVersion()); + + // Make sure the history is there + ASSERT_EQ(2UL, chunkDoc.getHistory().size()); + + // Second chunkDoc should have range [chunkSplitPoint, chunkMax] + auto otherChunkDocStatus = getChunkDoc(operationContext(), chunkSplitPoint); + ASSERT_OK(otherChunkDocStatus.getStatus()); + + auto otherChunkDoc = otherChunkDocStatus.getValue(); + ASSERT_BSONOBJ_EQ(chunkMax, otherChunkDoc.getMax()); + + // Check for increment on second chunkDoc's minor version + ASSERT_EQ(origVersion.majorVersion(), otherChunkDoc.getVersion().majorVersion()); + ASSERT_EQ(origVersion.minorVersion() + 2, otherChunkDoc.getVersion().minorVersion()); + + // Make sure the history is there + ASSERT_EQ(2UL, otherChunkDoc.getHistory().size()); + + // Both chunks should have the same history + ASSERT(chunkDoc.getHistory() == otherChunkDoc.getHistory()); +} + +TEST_F(SplitChunkWithMajorVersionIncrementTest, SplitExistingChunkCorrectlyShouldSucceed) { + ChunkType chunk; + chunk.setNS(kNamespace); + + auto origVersion = ChunkVersion(1, 0, OID::gen()); + chunk.setVersion(origVersion); + chunk.setShard(ShardId("shard0000")); + + auto chunkMin = BSON("a" << 1); + auto chunkMax = BSON("a" << 10); + chunk.setMin(chunkMin); + chunk.setMax(chunkMax); + chunk.setHistory({ChunkHistory(Timestamp(100, 0), ShardId("shard0000")), + ChunkHistory(Timestamp(90, 0), ShardId("shardY"))}); + + auto chunkSplitPoint = BSON("a" << 5); + std::vector<BSONObj> splitPoints{chunkSplitPoint}; + + setupChunks({chunk}); + + ASSERT_OK(ShardingCatalogManager::get(operationContext()) + ->commitChunkSplit(operationContext(), + kNamespace, + origVersion.epoch(), + ChunkRange(chunkMin, chunkMax), + splitPoints, + "shard0000")); + + // First chunkDoc should have range [chunkMin, chunkSplitPoint] + auto chunkDocStatus = getChunkDoc(operationContext(), chunkMin); + ASSERT_OK(chunkDocStatus.getStatus()); + + auto chunkDoc = chunkDocStatus.getValue(); + ASSERT_BSONOBJ_EQ(chunkSplitPoint, chunkDoc.getMax()); + // Check for increment on first chunkDoc's major version. ASSERT_EQ(origVersion.majorVersion() + 1, chunkDoc.getVersion().majorVersion()); ASSERT_EQ(1u, chunkDoc.getVersion().minorVersion()); @@ -139,6 +223,82 @@ TEST_F(SplitChunkTest, MultipleSplitsOnExistingChunkShouldSucceed) { auto chunkDoc = chunkDocStatus.getValue(); ASSERT_BSONOBJ_EQ(chunkSplitPoint, chunkDoc.getMax()); + // Check for increment on first chunkDoc's minor version + ASSERT_EQ(origVersion.majorVersion(), chunkDoc.getVersion().majorVersion()); + ASSERT_EQ(origVersion.minorVersion() + 1, chunkDoc.getVersion().minorVersion()); + + // Make sure the history is there + ASSERT_EQ(2UL, chunkDoc.getHistory().size()); + + // Second chunkDoc should have range [chunkSplitPoint, chunkSplitPoint2] + auto midChunkDocStatus = getChunkDoc(operationContext(), chunkSplitPoint); + ASSERT_OK(midChunkDocStatus.getStatus()); + + auto midChunkDoc = midChunkDocStatus.getValue(); + ASSERT_BSONOBJ_EQ(chunkSplitPoint2, midChunkDoc.getMax()); + + // Check for increment on second chunkDoc's minor version + ASSERT_EQ(origVersion.majorVersion(), midChunkDoc.getVersion().majorVersion()); + ASSERT_EQ(origVersion.minorVersion() + 2, midChunkDoc.getVersion().minorVersion()); + + // Make sure the history is there + ASSERT_EQ(2UL, midChunkDoc.getHistory().size()); + + // Third chunkDoc should have range [chunkSplitPoint2, chunkMax] + auto lastChunkDocStatus = getChunkDoc(operationContext(), chunkSplitPoint2); + ASSERT_OK(lastChunkDocStatus.getStatus()); + + auto lastChunkDoc = lastChunkDocStatus.getValue(); + ASSERT_BSONOBJ_EQ(chunkMax, lastChunkDoc.getMax()); + + // Check for increment on third chunkDoc's minor version + ASSERT_EQ(origVersion.majorVersion(), lastChunkDoc.getVersion().majorVersion()); + ASSERT_EQ(origVersion.minorVersion() + 3, lastChunkDoc.getVersion().minorVersion()); + + // Make sure the history is there + ASSERT_EQ(2UL, lastChunkDoc.getHistory().size()); + + // Both chunks should have the same history + ASSERT(chunkDoc.getHistory() == midChunkDoc.getHistory()); + ASSERT(midChunkDoc.getHistory() == lastChunkDoc.getHistory()); +} + +TEST_F(SplitChunkWithMajorVersionIncrementTest, MultipleSplitsOnExistingChunkShouldSucceed) { + ChunkType chunk; + chunk.setNS(kNamespace); + + auto origVersion = ChunkVersion(1, 0, OID::gen()); + chunk.setVersion(origVersion); + chunk.setShard(ShardId("shard0000")); + + auto chunkMin = BSON("a" << 1); + auto chunkMax = BSON("a" << 10); + chunk.setMin(chunkMin); + chunk.setMax(chunkMax); + chunk.setHistory({ChunkHistory(Timestamp(100, 0), ShardId("shard0000")), + ChunkHistory(Timestamp(90, 0), ShardId("shardY"))}); + + auto chunkSplitPoint = BSON("a" << 5); + auto chunkSplitPoint2 = BSON("a" << 7); + std::vector<BSONObj> splitPoints{chunkSplitPoint, chunkSplitPoint2}; + + setupChunks({chunk}); + + ASSERT_OK(ShardingCatalogManager::get(operationContext()) + ->commitChunkSplit(operationContext(), + kNamespace, + origVersion.epoch(), + ChunkRange(chunkMin, chunkMax), + splitPoints, + "shard0000")); + + // First chunkDoc should have range [chunkMin, chunkSplitPoint] + auto chunkDocStatus = getChunkDoc(operationContext(), chunkMin); + ASSERT_OK(chunkDocStatus.getStatus()); + + auto chunkDoc = chunkDocStatus.getValue(); + ASSERT_BSONOBJ_EQ(chunkSplitPoint, chunkDoc.getMax()); + // Check for increment on first chunkDoc's major version. ASSERT_EQ(origVersion.majorVersion() + 1, chunkDoc.getVersion().majorVersion()); ASSERT_EQ(1u, chunkDoc.getVersion().minorVersion()); @@ -223,6 +383,66 @@ TEST_F(SplitChunkTest, NewSplitShouldClaimHighestVersion) { auto chunkDoc = chunkDocStatus.getValue(); ASSERT_BSONOBJ_EQ(chunkSplitPoint, chunkDoc.getMax()); + // Check for increment based on the competing chunk version + ASSERT_EQ(competingVersion.majorVersion(), chunkDoc.getVersion().majorVersion()); + ASSERT_EQ(competingVersion.minorVersion() + 1, chunkDoc.getVersion().minorVersion()); + + // Second chunkDoc should have range [chunkSplitPoint, chunkMax] + auto otherChunkDocStatus = getChunkDoc(operationContext(), chunkSplitPoint); + ASSERT_OK(otherChunkDocStatus.getStatus()); + + auto otherChunkDoc = otherChunkDocStatus.getValue(); + ASSERT_BSONOBJ_EQ(chunkMax, otherChunkDoc.getMax()); + + // Check for increment based on the competing chunk version + ASSERT_EQ(competingVersion.majorVersion(), otherChunkDoc.getVersion().majorVersion()); + ASSERT_EQ(competingVersion.minorVersion() + 2, otherChunkDoc.getVersion().minorVersion()); +} + +TEST_F(SplitChunkWithMajorVersionIncrementTest, NewSplitShouldClaimHighestVersion) { + ChunkType chunk, chunk2; + chunk.setNS(kNamespace); + chunk2.setNS(kNamespace); + auto collEpoch = OID::gen(); + + // set up first chunk + auto origVersion = ChunkVersion(1, 2, collEpoch); + chunk.setVersion(origVersion); + chunk.setShard(ShardId("shard0000")); + + auto chunkMin = BSON("a" << 1); + auto chunkMax = BSON("a" << 10); + chunk.setMin(chunkMin); + chunk.setMax(chunkMax); + + std::vector<BSONObj> splitPoints; + auto chunkSplitPoint = BSON("a" << 5); + splitPoints.push_back(chunkSplitPoint); + + // set up second chunk (chunk2) + auto competingVersion = ChunkVersion(2, 1, collEpoch); + chunk2.setVersion(competingVersion); + chunk2.setShard(ShardId("shard0000")); + chunk2.setMin(BSON("a" << 10)); + chunk2.setMax(BSON("a" << 20)); + + setupChunks({chunk, chunk2}); + + ASSERT_OK(ShardingCatalogManager::get(operationContext()) + ->commitChunkSplit(operationContext(), + kNamespace, + collEpoch, + ChunkRange(chunkMin, chunkMax), + splitPoints, + "shard0000")); + + // First chunkDoc should have range [chunkMin, chunkSplitPoint] + auto chunkDocStatus = getChunkDoc(operationContext(), chunkMin); + ASSERT_OK(chunkDocStatus.getStatus()); + + auto chunkDoc = chunkDocStatus.getValue(); + ASSERT_BSONOBJ_EQ(chunkSplitPoint, chunkDoc.getMax()); + // Check for major version increment based on the competing chunk version. ASSERT_EQ(competingVersion.majorVersion() + 1, chunkDoc.getVersion().majorVersion()); // The minor version gets reset to 0 when the major version is incremented, and chunk splits @@ -245,7 +465,8 @@ TEST_F(SplitChunkTest, NewSplitShouldClaimHighestVersion) { ASSERT_EQ(2u, otherChunkDoc.getVersion().minorVersion()); } -TEST_F(SplitChunkTest, SplitsOnShardWithLowerShardVersionDoesNotIncreaseCollectionVersion) { +TEST_F(SplitChunkWithMajorVersionIncrementTest, + SplitsOnShardWithLowerShardVersionDoesNotIncreaseCollectionVersion) { ChunkType chunk, chunk2; chunk.setNS(kNamespace); chunk2.setNS(kNamespace); @@ -297,7 +518,8 @@ TEST_F(SplitChunkTest, SplitsOnShardWithLowerShardVersionDoesNotIncreaseCollecti ASSERT_EQ(competingVersion.minorVersion() + 2u, otherChunkDoc.getVersion().minorVersion()); } -TEST_F(SplitChunkTest, SplitsOnShardWithHighestShardVersionIncreasesCollectionVersion) { +TEST_F(SplitChunkWithMajorVersionIncrementTest, + SplitsOnShardWithHighestShardVersionIncreasesCollectionVersion) { ChunkType chunk, chunk2; chunk.setNS(kNamespace); chunk2.setNS(kNamespace); |