diff options
author | Sergi Mateo Bellido <sergi.mateo-bellido@mongodb.com> | 2021-06-01 06:48:00 +0000 |
---|---|---|
committer | Evergreen Agent <no-reply@evergreen.mongodb.com> | 2021-06-14 17:18:34 +0000 |
commit | e7f91b95941b2e636ba4715c2cea5baf5bc3e2d2 (patch) | |
tree | 6d823474977fe3bb42b0b451b8b922fef5946b10 /src/mongo/db | |
parent | 389ecf35aa15a97a3bf855518c19a4ad05075acb (diff) | |
download | mongo-e7f91b95941b2e636ba4715c2cea5baf5bc3e2d2.tar.gz |
SERVER-57313 Pass the collection epoch and timestamp when building a ChunkType from a config.chunks BSON
- Pass the epoch and the timestamp to the functions that build
ChunkTypes from config.chunks BSON
- Fixing our tests
(cherry picked from commit 7b30ab1943ecbb48e8bdbc50bf928eab09f619b5)
Diffstat (limited to 'src/mongo/db')
12 files changed, 399 insertions, 257 deletions
diff --git a/src/mongo/db/s/balancer/type_migration_test.cpp b/src/mongo/db/s/balancer/type_migration_test.cpp index 13e10d94a9f..ec1d52a8a38 100644 --- a/src/mongo/db/s/balancer/type_migration_test.cpp +++ b/src/mongo/db/s/balancer/type_migration_test.cpp @@ -48,7 +48,9 @@ const ShardId kToShard("shard0001"); const bool kWaitForDelete{true}; TEST(MigrationTypeTest, ConvertFromMigrationInfo) { - const ChunkVersion version(1, 2, OID::gen(), boost::none /* timestamp */); + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; + const ChunkVersion version(1, 2, collEpoch, collTimestamp); BSONObjBuilder chunkBuilder; chunkBuilder.append(ChunkType::name(), OID::gen()); @@ -58,7 +60,8 @@ TEST(MigrationTypeTest, ConvertFromMigrationInfo) { version.appendLegacyWithField(&chunkBuilder, ChunkType::lastmod()); chunkBuilder.append(ChunkType::shard(), kFromShard.toString()); - ChunkType chunkType = assertGet(ChunkType::fromConfigBSON(chunkBuilder.obj())); + ChunkType chunkType = + assertGet(ChunkType::fromConfigBSON(chunkBuilder.obj(), collEpoch, collTimestamp)); ASSERT_OK(chunkType.validate()); MigrateInfo migrateInfo(kToShard, diff --git a/src/mongo/db/s/config/config_server_test_fixture.cpp b/src/mongo/db/s/config/config_server_test_fixture.cpp index 2497a538152..f4c7a461e9b 100644 --- a/src/mongo/db/s/config/config_server_test_fixture.cpp +++ b/src/mongo/db/s/config/config_server_test_fixture.cpp @@ -345,14 +345,17 @@ void ConfigServerTestFixture::setupCollection(const NamespaceString& nss, } } -StatusWith<ChunkType> ConfigServerTestFixture::getChunkDoc(OperationContext* opCtx, - const BSONObj& minKey) { +StatusWith<ChunkType> ConfigServerTestFixture::getChunkDoc( + OperationContext* opCtx, + const BSONObj& minKey, + const OID& collEpoch, + const boost::optional<Timestamp>& collTimestamp) { auto doc = findOneOnConfigCollection(opCtx, ChunkType::ConfigNS, BSON(ChunkType::min() << minKey)); if (!doc.isOK()) return doc.getStatus(); - return ChunkType::fromConfigBSON(doc.getValue()); + return ChunkType::fromConfigBSON(doc.getValue(), collEpoch, collTimestamp); } void ConfigServerTestFixture::setupDatabase(const std::string& dbName, diff --git a/src/mongo/db/s/config/config_server_test_fixture.h b/src/mongo/db/s/config/config_server_test_fixture.h index e19c528485a..0e1ee919469 100644 --- a/src/mongo/db/s/config/config_server_test_fixture.h +++ b/src/mongo/db/s/config/config_server_test_fixture.h @@ -108,7 +108,10 @@ protected: /** * Retrieves the chunk document from the config server. */ - StatusWith<ChunkType> getChunkDoc(OperationContext* opCtx, const BSONObj& minKey); + StatusWith<ChunkType> getChunkDoc(OperationContext* opCtx, + const BSONObj& minKey, + const OID& collEpoch, + const boost::optional<Timestamp>& collTimestamp); /** * Inserts a document for the database into the config.databases collection. diff --git a/src/mongo/db/s/config/sharding_catalog_manager.h b/src/mongo/db/s/config/sharding_catalog_manager.h index b28c529b929..d47f1facb63 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager.h +++ b/src/mongo/db/s/config/sharding_catalog_manager.h @@ -566,6 +566,8 @@ private: */ StatusWith<ChunkType> _findChunkOnConfig(OperationContext* opCtx, const NamespaceStringOrUUID& nsOrUUID, + const OID& epoch, + const boost::optional<Timestamp>& timestamp, const BSONObj& key); /** diff --git a/src/mongo/db/s/config/sharding_catalog_manager_bump_shard_versions_and_change_metadata_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_bump_shard_versions_and_change_metadata_test.cpp index 9ff72e16ffe..3f5977caf67 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_bump_shard_versions_and_change_metadata_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_bump_shard_versions_and_change_metadata_test.cpp @@ -118,10 +118,13 @@ protected: */ void assertOnlyOneChunkVersionBumped(OperationContext* opCtx, std::vector<ChunkType> originalChunkTypes, - const ChunkVersion& targetChunkVersion) { + const ChunkVersion& targetChunkVersion, + const OID& collEpoch, + const boost::optional<Timestamp>& collTimestamp) { auto aChunkVersionWasBumped = false; for (auto originalChunkType : originalChunkTypes) { - auto swChunkTypeAfter = getChunkDoc(opCtx, originalChunkType.getMin()); + auto swChunkTypeAfter = + getChunkDoc(opCtx, originalChunkType.getMin(), collEpoch, collTimestamp); auto wasBumped = chunkMajorVersionWasBumpedAndOtherFieldsAreUnchanged( originalChunkType, swChunkTypeAfter, targetChunkVersion); if (aChunkVersionWasBumped) { @@ -137,19 +140,19 @@ protected: TEST_F(ShardingCatalogManagerBumpShardVersionsAndChangeMetadataTest, BumpChunkVersionOneChunkPerShard) { - const auto epoch = OID::gen(); - const auto shard0Chunk0 = - generateChunkType(kNss, - ChunkVersion(10, 1, epoch, boost::none /* timestamp */), - kShard0.getName(), - BSON("a" << 1), - BSON("a" << 10)); - const auto shard1Chunk0 = - generateChunkType(kNss, - ChunkVersion(11, 2, epoch, boost::none /* timestamp */), - kShard1.getName(), - BSON("a" << 11), - BSON("a" << 20)); + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; + + const auto shard0Chunk0 = generateChunkType(kNss, + ChunkVersion(10, 1, collEpoch, collTimestamp), + kShard0.getName(), + BSON("a" << 1), + BSON("a" << 10)); + const auto shard1Chunk0 = generateChunkType(kNss, + ChunkVersion(11, 2, collEpoch, collTimestamp), + kShard1.getName(), + BSON("a" << 11), + BSON("a" << 20)); const auto collectionVersion = shard1Chunk0.getVersion(); ChunkVersion targetChunkVersion(collectionVersion.majorVersion() + 1, @@ -165,33 +168,36 @@ TEST_F(ShardingCatalogManagerBumpShardVersionsAndChangeMetadataTest, opCtx, kNss, [&](OperationContext*, TxnNumber) {}); ASSERT_TRUE(chunkMajorVersionWasBumpedAndOtherFieldsAreUnchanged( - shard0Chunk0, getChunkDoc(operationContext(), shard0Chunk0.getMin()), targetChunkVersion)); + shard0Chunk0, + getChunkDoc(operationContext(), shard0Chunk0.getMin(), collEpoch, collTimestamp), + targetChunkVersion)); ASSERT_TRUE(chunkMajorVersionWasBumpedAndOtherFieldsAreUnchanged( - shard1Chunk0, getChunkDoc(operationContext(), shard1Chunk0.getMin()), targetChunkVersion)); + shard1Chunk0, + getChunkDoc(operationContext(), shard1Chunk0.getMin(), collEpoch, collTimestamp), + targetChunkVersion)); } TEST_F(ShardingCatalogManagerBumpShardVersionsAndChangeMetadataTest, BumpChunkVersionTwoChunksOnOneShard) { - const auto epoch = OID::gen(); - const auto shard0Chunk0 = - generateChunkType(kNss, - ChunkVersion(10, 1, epoch, boost::none /* timestamp */), - kShard0.getName(), - BSON("a" << 1), - BSON("a" << 10)); - const auto shard0Chunk1 = - generateChunkType(kNss, - ChunkVersion(11, 2, epoch, boost::none /* timestamp */), - kShard0.getName(), - BSON("a" << 11), - BSON("a" << 20)); - const auto shard1Chunk0 = - generateChunkType(kNss, - ChunkVersion(8, 1, epoch, boost::none /* timestamp */), - kShard1.getName(), - BSON("a" << 21), - BSON("a" << 100)); + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; + + const auto shard0Chunk0 = generateChunkType(kNss, + ChunkVersion(10, 1, collEpoch, collTimestamp), + kShard0.getName(), + BSON("a" << 1), + BSON("a" << 10)); + const auto shard0Chunk1 = generateChunkType(kNss, + ChunkVersion(11, 2, collEpoch, collTimestamp), + kShard0.getName(), + BSON("a" << 11), + BSON("a" << 20)); + const auto shard1Chunk0 = generateChunkType(kNss, + ChunkVersion(8, 1, collEpoch, collTimestamp), + kShard1.getName(), + BSON("a" << 21), + BSON("a" << 100)); const auto collectionVersion = shard0Chunk1.getVersion(); ChunkVersion targetChunkVersion(collectionVersion.majorVersion() + 1, @@ -205,40 +211,43 @@ TEST_F(ShardingCatalogManagerBumpShardVersionsAndChangeMetadataTest, ShardingCatalogManager::get(opCtx)->bumpCollectionVersionAndChangeMetadataInTxn( opCtx, kNss, [&](OperationContext*, TxnNumber) {}); - assertOnlyOneChunkVersionBumped( - operationContext(), {shard0Chunk0, shard0Chunk1}, targetChunkVersion); + assertOnlyOneChunkVersionBumped(operationContext(), + {shard0Chunk0, shard0Chunk1}, + targetChunkVersion, + collEpoch, + collTimestamp); ASSERT_TRUE(chunkMajorVersionWasBumpedAndOtherFieldsAreUnchanged( - shard1Chunk0, getChunkDoc(operationContext(), shard1Chunk0.getMin()), targetChunkVersion)); + shard1Chunk0, + getChunkDoc(operationContext(), shard1Chunk0.getMin(), collEpoch, collTimestamp), + targetChunkVersion)); } TEST_F(ShardingCatalogManagerBumpShardVersionsAndChangeMetadataTest, BumpChunkVersionTwoChunksOnTwoShards) { - const auto epoch = OID::gen(); - const auto shard0Chunk0 = - generateChunkType(kNss, - ChunkVersion(10, 1, epoch, boost::none /* timestamp */), - kShard0.getName(), - BSON("a" << 1), - BSON("a" << 10)); - const auto shard0Chunk1 = - generateChunkType(kNss, - ChunkVersion(11, 2, epoch, boost::none /* timestamp */), - kShard0.getName(), - BSON("a" << 11), - BSON("a" << 20)); - const auto shard1Chunk0 = - generateChunkType(kNss, - ChunkVersion(8, 1, epoch, boost::none /* timestamp */), - kShard1.getName(), - BSON("a" << 21), - BSON("a" << 100)); - const auto shard1Chunk1 = - generateChunkType(kNss, - ChunkVersion(12, 1, epoch, boost::none /* timestamp */), - kShard1.getName(), - BSON("a" << 101), - BSON("a" << 200)); + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; + + const auto shard0Chunk0 = generateChunkType(kNss, + ChunkVersion(10, 1, collEpoch, collTimestamp), + kShard0.getName(), + BSON("a" << 1), + BSON("a" << 10)); + const auto shard0Chunk1 = generateChunkType(kNss, + ChunkVersion(11, 2, collEpoch, collTimestamp), + kShard0.getName(), + BSON("a" << 11), + BSON("a" << 20)); + const auto shard1Chunk0 = generateChunkType(kNss, + ChunkVersion(8, 1, collEpoch, collTimestamp), + kShard1.getName(), + BSON("a" << 21), + BSON("a" << 100)); + const auto shard1Chunk1 = generateChunkType(kNss, + ChunkVersion(12, 1, collEpoch, collTimestamp), + kShard1.getName(), + BSON("a" << 101), + BSON("a" << 200)); const auto collectionVersion = shard1Chunk1.getVersion(); ChunkVersion targetChunkVersion(collectionVersion.majorVersion() + 1, @@ -252,28 +261,34 @@ TEST_F(ShardingCatalogManagerBumpShardVersionsAndChangeMetadataTest, ShardingCatalogManager::get(opCtx)->bumpCollectionVersionAndChangeMetadataInTxn( opCtx, kNss, [&](OperationContext*, TxnNumber) {}); - assertOnlyOneChunkVersionBumped( - operationContext(), {shard0Chunk0, shard0Chunk1}, targetChunkVersion); - - assertOnlyOneChunkVersionBumped( - operationContext(), {shard1Chunk0, shard1Chunk1}, targetChunkVersion); + assertOnlyOneChunkVersionBumped(operationContext(), + {shard0Chunk0, shard0Chunk1}, + targetChunkVersion, + collEpoch, + collTimestamp); + + assertOnlyOneChunkVersionBumped(operationContext(), + {shard1Chunk0, shard1Chunk1}, + targetChunkVersion, + collEpoch, + collTimestamp); } TEST_F(ShardingCatalogManagerBumpShardVersionsAndChangeMetadataTest, SucceedsInThePresenceOfTransientTransactionErrors) { - const auto epoch = OID::gen(); - const auto shard0Chunk0 = - generateChunkType(kNss, - ChunkVersion(10, 1, epoch, boost::none /* timestamp */), - kShard0.getName(), - BSON("a" << 1), - BSON("a" << 10)); - const auto shard1Chunk0 = - generateChunkType(kNss, - ChunkVersion(11, 2, epoch, boost::none /* timestamp */), - kShard1.getName(), - BSON("a" << 11), - BSON("a" << 20)); + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; + + const auto shard0Chunk0 = generateChunkType(kNss, + ChunkVersion(10, 1, collEpoch, collTimestamp), + kShard0.getName(), + BSON("a" << 1), + BSON("a" << 10)); + const auto shard1Chunk0 = generateChunkType(kNss, + ChunkVersion(11, 2, collEpoch, collTimestamp), + kShard1.getName(), + BSON("a" << 11), + BSON("a" << 20)); const auto initialCollectionVersion = shard1Chunk0.getVersion(); setupCollection(kNss, kKeyPattern, {shard0Chunk0, shard1Chunk0}); @@ -294,10 +309,14 @@ TEST_F(ShardingCatalogManagerBumpShardVersionsAndChangeMetadataTest, initialCollectionVersion.getTimestamp()}; ASSERT_TRUE(chunkMajorVersionWasBumpedAndOtherFieldsAreUnchanged( - shard0Chunk0, getChunkDoc(operationContext(), shard0Chunk0.getMin()), targetChunkVersion)); + shard0Chunk0, + getChunkDoc(operationContext(), shard0Chunk0.getMin(), collEpoch, collTimestamp), + targetChunkVersion)); ASSERT_TRUE(chunkMajorVersionWasBumpedAndOtherFieldsAreUnchanged( - shard1Chunk0, getChunkDoc(operationContext(), shard1Chunk0.getMin()), targetChunkVersion)); + shard1Chunk0, + getChunkDoc(operationContext(), shard1Chunk0.getMin(), collEpoch, collTimestamp), + targetChunkVersion)); ASSERT_EQ(numCalls, 5) << "transaction succeeded after unexpected number of attempts"; @@ -323,29 +342,33 @@ TEST_F(ShardingCatalogManagerBumpShardVersionsAndChangeMetadataTest, initialCollectionVersion.getTimestamp()}; ASSERT_TRUE(chunkMajorVersionWasBumpedAndOtherFieldsAreUnchanged( - shard0Chunk0, getChunkDoc(operationContext(), shard0Chunk0.getMin()), targetChunkVersion)); + shard0Chunk0, + getChunkDoc(operationContext(), shard0Chunk0.getMin(), collEpoch, collTimestamp), + targetChunkVersion)); ASSERT_TRUE(chunkMajorVersionWasBumpedAndOtherFieldsAreUnchanged( - shard1Chunk0, getChunkDoc(operationContext(), shard1Chunk0.getMin()), targetChunkVersion)); + shard1Chunk0, + getChunkDoc(operationContext(), shard1Chunk0.getMin(), collEpoch, collTimestamp), + targetChunkVersion)); ASSERT_EQ(numCalls, 5) << "transaction succeeded after unexpected number of attempts"; } TEST_F(ShardingCatalogManagerBumpShardVersionsAndChangeMetadataTest, StopsRetryingOnPermanentServerErrors) { - const auto epoch = OID::gen(); - const auto shard0Chunk0 = - generateChunkType(kNss, - ChunkVersion(10, 1, epoch, boost::none /* timestamp */), - kShard0.getName(), - BSON("a" << 1), - BSON("a" << 10)); - const auto shard1Chunk0 = - generateChunkType(kNss, - ChunkVersion(11, 2, epoch, boost::none /* timestamp */), - kShard1.getName(), - BSON("a" << 11), - BSON("a" << 20)); + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; + + const auto shard0Chunk0 = generateChunkType(kNss, + ChunkVersion(10, 1, collEpoch, collTimestamp), + kShard0.getName(), + BSON("a" << 1), + BSON("a" << 10)); + const auto shard1Chunk0 = generateChunkType(kNss, + ChunkVersion(11, 2, collEpoch, collTimestamp), + kShard1.getName(), + BSON("a" << 11), + BSON("a" << 20)); setupCollection(kNss, kKeyPattern, {shard0Chunk0, shard1Chunk0}); @@ -364,12 +387,12 @@ TEST_F(ShardingCatalogManagerBumpShardVersionsAndChangeMetadataTest, ASSERT_TRUE(chunkMajorVersionWasBumpedAndOtherFieldsAreUnchanged( shard0Chunk0, - getChunkDoc(operationContext(), shard0Chunk0.getMin()), + getChunkDoc(operationContext(), shard0Chunk0.getMin(), collEpoch, collTimestamp), shard0Chunk0.getVersion())); ASSERT_TRUE(chunkMajorVersionWasBumpedAndOtherFieldsAreUnchanged( shard1Chunk0, - getChunkDoc(operationContext(), shard1Chunk0.getMin()), + getChunkDoc(operationContext(), shard1Chunk0.getMin(), collEpoch, collTimestamp), shard1Chunk0.getVersion())); ASSERT_EQ(numCalls, 1) << "transaction failed after unexpected number of attempts"; @@ -389,12 +412,12 @@ TEST_F(ShardingCatalogManagerBumpShardVersionsAndChangeMetadataTest, ASSERT_TRUE(chunkMajorVersionWasBumpedAndOtherFieldsAreUnchanged( shard0Chunk0, - getChunkDoc(operationContext(), shard0Chunk0.getMin()), + getChunkDoc(operationContext(), shard0Chunk0.getMin(), collEpoch, collTimestamp), shard0Chunk0.getVersion())); ASSERT_TRUE(chunkMajorVersionWasBumpedAndOtherFieldsAreUnchanged( shard1Chunk0, - getChunkDoc(operationContext(), shard1Chunk0.getMin()), + getChunkDoc(operationContext(), shard1Chunk0.getMin(), collEpoch, collTimestamp), shard1Chunk0.getVersion())); ASSERT_EQ(numCalls, 1) << "transaction failed after unexpected number of attempts"; @@ -419,12 +442,12 @@ TEST_F(ShardingCatalogManagerBumpShardVersionsAndChangeMetadataTest, ASSERT_TRUE(chunkMajorVersionWasBumpedAndOtherFieldsAreUnchanged( shard0Chunk0, - getChunkDoc(operationContext(), shard0Chunk0.getMin()), + getChunkDoc(operationContext(), shard0Chunk0.getMin(), collEpoch, collTimestamp), shard0Chunk0.getVersion())); ASSERT_TRUE(chunkMajorVersionWasBumpedAndOtherFieldsAreUnchanged( shard1Chunk0, - getChunkDoc(operationContext(), shard1Chunk0.getMin()), + getChunkDoc(operationContext(), shard1Chunk0.getMin(), collEpoch, collTimestamp), shard1Chunk0.getVersion())); ASSERT_EQ(numCalls, 1) << "transaction failed after unexpected number of attempts"; diff --git a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp index 6a900c3a4a6..bdadd67c1c3 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp @@ -138,7 +138,7 @@ BSONArray buildMergeChunksTransactionPrecond(const std::vector<ChunkType>& chunk const ChunkVersion& collVersion) { BSONArrayBuilder preCond; - for (auto chunk : chunksToMerge) { + for (const auto& chunk : chunksToMerge) { BSONObj query = BSON(ChunkType::min(chunk.getMin()) << ChunkType::max(chunk.getMax())); if (collVersion.getTimestamp()) { query = query.addFields(BSON(ChunkType::collectionUUID << chunk.getCollectionUUID())); @@ -162,6 +162,8 @@ BSONArray buildMergeChunksTransactionPrecond(const std::vector<ChunkType>& chunk */ StatusWith<ChunkType> getCurrentChunk(OperationContext* opCtx, const NamespaceStringOrUUID& nsOrUUID, + const OID& epoch, + const boost::optional<Timestamp>& timestamp, const ChunkType& requestedChunk) { uassert(4683300, "Config server rejecting commitChunkMigration request that does not have a " @@ -199,7 +201,8 @@ StatusWith<ChunkType> getCurrentChunk(OperationContext* opCtx, << "). Cannot execute the migration commit with invalid chunks."}; } - return uassertStatusOK(ChunkType::fromConfigBSON(findResponseWith.getValue().docs.front())); + return uassertStatusOK( + ChunkType::fromConfigBSON(findResponseWith.getValue().docs.front(), epoch, timestamp)); } BSONObj makeCommitChunkTransactionCommand(const NamespaceString& nss, @@ -256,6 +259,8 @@ BSONObj makeCommitChunkTransactionCommand(const NamespaceString& nss, */ boost::optional<ChunkType> getControlChunkForMigrate(OperationContext* opCtx, const NamespaceStringOrUUID& nsOrUUID, + const OID& epoch, + const boost::optional<Timestamp>& timestamp, const ChunkType& migratedChunk, const ShardId& fromShard) { auto const configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard(); @@ -282,7 +287,7 @@ boost::optional<ChunkType> getControlChunkForMigrate(OperationContext* opCtx, return boost::none; } - return uassertStatusOK(ChunkType::fromConfigBSON(response.docs.front())); + return uassertStatusOK(ChunkType::fromConfigBSON(response.docs.front(), epoch, timestamp)); } // Helper function to find collection version and shard version. @@ -571,7 +576,8 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkSplit( // Find the chunk history. const auto collNsOrUUID = getNsOrUUIDForChunkTargeting(coll); - const auto origChunk = _findChunkOnConfig(opCtx, collNsOrUUID, range.getMin()); + const auto origChunk = _findChunkOnConfig( + opCtx, collNsOrUUID, collVersion.epoch(), collVersion.getTimestamp(), range.getMin()); if (!origChunk.isOK()) { repl::ReplClientInfo::forClient(opCtx->getClient()).setLastOpToSystemLastOpTime(opCtx); return origChunk.getStatus(); @@ -794,8 +800,11 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMerge( // Check if the chunk(s) have already been merged. If so, return success. const auto collNsOrUUID = getNsOrUUIDForChunkTargeting(coll); - auto minChunkOnDisk = - uassertStatusOK(_findChunkOnConfig(opCtx, collNsOrUUID, chunkBoundaries.front())); + auto minChunkOnDisk = uassertStatusOK(_findChunkOnConfig(opCtx, + collNsOrUUID, + collVersion.epoch(), + collVersion.getTimestamp(), + chunkBoundaries.front())); if (minChunkOnDisk.getMax().woCompare(chunkBoundaries.back()) == 0) { auto replyWithVersions = getShardAndCollectionVersion(opCtx, coll, ShardId(shardName)); // Makes sure that the last thing we read in getCurrentChunk and @@ -812,8 +821,11 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMerge( // Do not use the first chunk boundary as a max bound while building chunks for (size_t i = 1; i < chunkBoundaries.size(); ++i) { // Read the original chunk from disk to lookup that chunk's '_id' field. - auto currentChunk = - uassertStatusOK(_findChunkOnConfig(opCtx, collNsOrUUID, chunkBoundaries[i - 1])); + auto currentChunk = uassertStatusOK(_findChunkOnConfig(opCtx, + collNsOrUUID, + collVersion.epoch(), + collVersion.getTimestamp(), + chunkBoundaries[i - 1])); // Ensure the chunk boundaries are strictly increasing if (chunkBoundaries[i].woCompare(currentChunk.getMin()) <= 0) { @@ -938,8 +950,8 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunksMerge( // Check if the chunk(s) have already been merged. If so, return success. if (shardChunksInRangeResponse.docs.size() == 1) { - auto chunk = - uassertStatusOK(ChunkType::fromConfigBSON(shardChunksInRangeResponse.docs.back())); + auto chunk = uassertStatusOK(ChunkType::fromConfigBSON( + shardChunksInRangeResponse.docs.back(), coll.getEpoch(), coll.getTimestamp())); uassert( ErrorCodes::IllegalOperation, str::stream() << "could not merge chunks, shard " << shardId @@ -959,7 +971,8 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunksMerge( // and ensure that the retrieved list of chunks covers the whole range. std::vector<ChunkType> chunksToMerge; for (const auto& chunkDoc : shardChunksInRangeResponse.docs) { - auto chunk = uassertStatusOK(ChunkType::fromConfigBSON(chunkDoc)); + auto chunk = uassertStatusOK( + ChunkType::fromConfigBSON(chunkDoc, coll.getEpoch(), coll.getTimestamp())); if (chunksToMerge.empty()) { uassert(ErrorCodes::IllegalOperation, str::stream() @@ -1104,8 +1117,9 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration( << ", but found no chunks", !findResponse.docs.empty()); - const auto chunk = uassertStatusOK(ChunkType::fromConfigBSON(findResponse.docs[0])); - const auto currentCollectionVersion = chunk.getVersion(); + const auto chunk = uassertStatusOK( + ChunkType::fromConfigBSON(findResponse.docs[0], coll.getEpoch(), coll.getTimestamp())); + const auto& currentCollectionVersion = chunk.getVersion(); if (MONGO_unlikely(migrationCommitVersionError.shouldFail())) { uasserted(ErrorCodes::StaleEpoch, @@ -1130,7 +1144,8 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration( // Check if chunk still exists and which shard owns it const auto collNsOrUUID = getNsOrUUIDForChunkTargeting(coll); - auto swCurrentChunk = getCurrentChunk(opCtx, collNsOrUUID, migratedChunk); + auto swCurrentChunk = + getCurrentChunk(opCtx, collNsOrUUID, coll.getEpoch(), coll.getTimestamp(), migratedChunk); if (!swCurrentChunk.isOK()) { return swCurrentChunk.getStatus(); @@ -1165,11 +1180,12 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration( << currentChunk.toConfigBSON() << " on the shard " << fromShard.toString()}; } - auto controlChunk = getControlChunkForMigrate(opCtx, collNsOrUUID, migratedChunk, fromShard); + auto controlChunk = getControlChunkForMigrate( + opCtx, collNsOrUUID, coll.getEpoch(), coll.getTimestamp(), migratedChunk, fromShard); // Find the chunk history. - const auto origChunk = - uassertStatusOK(_findChunkOnConfig(opCtx, collNsOrUUID, migratedChunk.getMin())); + const auto origChunk = uassertStatusOK(_findChunkOnConfig( + opCtx, collNsOrUUID, coll.getEpoch(), coll.getTimestamp(), migratedChunk.getMin())); // Generate the new versions of migratedChunk and controlChunk. Migrating chunk's minor version // will be 0. @@ -1224,8 +1240,8 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration( boost::optional<ChunkType> newControlChunk = boost::none; if (controlChunk) { // Find the chunk history. - auto origControlChunk = - uassertStatusOK(_findChunkOnConfig(opCtx, collNsOrUUID, controlChunk->getMin())); + auto origControlChunk = uassertStatusOK(_findChunkOnConfig( + opCtx, collNsOrUUID, coll.getEpoch(), coll.getTimestamp(), controlChunk->getMin())); newControlChunk = std::move(origControlChunk); newControlChunk->setVersion(ChunkVersion(currentCollectionVersion.majorVersion() + 1, @@ -1257,7 +1273,11 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration( } StatusWith<ChunkType> ShardingCatalogManager::_findChunkOnConfig( - OperationContext* opCtx, const NamespaceStringOrUUID& nsOrUUID, const BSONObj& key) { + OperationContext* opCtx, + const NamespaceStringOrUUID& nsOrUUID, + const OID& epoch, + const boost::optional<Timestamp>& timestamp, + const BSONObj& key) { auto const configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard(); const auto query = [&]() { @@ -1288,7 +1308,7 @@ StatusWith<ChunkType> ShardingCatalogManager::_findChunkOnConfig( << " and min key " << key.toString() << ", but found no chunks"}; } - return ChunkType::fromConfigBSON(origChunks.front()); + return ChunkType::fromConfigBSON(origChunks.front(), epoch, timestamp); } void ShardingCatalogManager::clearJumboFlag(OperationContext* opCtx, @@ -1346,7 +1366,8 @@ void ShardingCatalogManager::clearJumboFlag(OperationContext* opCtx, << " from ns: " << nss.ns(), !targetChunkVector.empty()); - const auto targetChunk = uassertStatusOK(ChunkType::fromConfigBSON(targetChunkVector.front())); + const auto targetChunk = uassertStatusOK( + ChunkType::fromConfigBSON(targetChunkVector.front(), coll.getEpoch(), coll.getTimestamp())); if (!targetChunk.getJumbo()) { return; @@ -1375,8 +1396,8 @@ void ShardingCatalogManager::clearJumboFlag(OperationContext* opCtx, << ", but found no chunks", !chunksVector.empty()); - const auto highestVersionChunk = - uassertStatusOK(ChunkType::fromConfigBSON(chunksVector.front())); + const auto highestVersionChunk = uassertStatusOK( + ChunkType::fromConfigBSON(chunksVector.front(), coll.getEpoch(), coll.getTimestamp())); const auto currentCollectionVersion = highestVersionChunk.getVersion(); // It is possible for a migration to end up running partly without the protection of the @@ -1472,8 +1493,8 @@ void ShardingCatalogManager::ensureChunkVersionIsGreaterThan(OperationContext* o return; } - const auto currentChunk = - uassertStatusOK(ChunkType::fromConfigBSON(matchingChunksVector.front())); + const auto currentChunk = uassertStatusOK(ChunkType::fromConfigBSON( + matchingChunksVector.front(), version.epoch(), version.getTimestamp())); if (version.isOlderThan(currentChunk.getVersion())) { LOGV2(23885, @@ -1512,8 +1533,8 @@ void ShardingCatalogManager::ensureChunkVersionIsGreaterThan(OperationContext* o "epoch"_attr = version.epoch()); return; } - const auto highestChunk = - uassertStatusOK(ChunkType::fromConfigBSON(highestChunksVector.front())); + const auto highestChunk = uassertStatusOK(ChunkType::fromConfigBSON( + highestChunksVector.front(), version.epoch(), version.getTimestamp())); // Generate a new version for the chunk by incrementing the collectionVersion's major version. auto newChunk = currentChunk; diff --git a/src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp index bd428f52954..68845265eae 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp @@ -100,21 +100,29 @@ private: }; TEST_F(ClearJumboFlagTest, ClearJumboShouldBumpVersion) { + const auto collEpoch = epoch(); + const auto collTimestamp = boost::none; + ShardingCatalogManager::get(operationContext()) - ->clearJumboFlag(operationContext(), ns(), epoch(), jumboChunk()); + ->clearJumboFlag(operationContext(), ns(), collEpoch, jumboChunk()); - auto chunkDoc = uassertStatusOK(getChunkDoc(operationContext(), jumboChunk().getMin())); + auto chunkDoc = uassertStatusOK( + getChunkDoc(operationContext(), jumboChunk().getMin(), collEpoch, collTimestamp)); ASSERT_FALSE(chunkDoc.getJumbo()); - ASSERT_EQ(ChunkVersion(15, 0, epoch(), boost::none /* timestamp */), chunkDoc.getVersion()); + ASSERT_EQ(ChunkVersion(15, 0, collEpoch, collTimestamp), chunkDoc.getVersion()); } TEST_F(ClearJumboFlagTest, ClearJumboShouldNotBumpVersionIfChunkNotJumbo) { + const auto collEpoch = epoch(); + const auto collTimestamp = boost::none; + ShardingCatalogManager::get(operationContext()) - ->clearJumboFlag(operationContext(), ns(), epoch(), nonJumboChunk()); + ->clearJumboFlag(operationContext(), ns(), collEpoch, nonJumboChunk()); - auto chunkDoc = uassertStatusOK(getChunkDoc(operationContext(), nonJumboChunk().getMin())); + auto chunkDoc = uassertStatusOK( + getChunkDoc(operationContext(), nonJumboChunk().getMin(), collEpoch, collTimestamp)); ASSERT_FALSE(chunkDoc.getJumbo()); - ASSERT_EQ(ChunkVersion(14, 7, epoch(), boost::none /* timestamp */), chunkDoc.getVersion()); + ASSERT_EQ(ChunkVersion(14, 7, collEpoch, collTimestamp), chunkDoc.getVersion()); } TEST_F(ClearJumboFlagTest, AssertsOnEpochMismatch) { diff --git a/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp index 4a7e74add28..cedad2a905f 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp @@ -50,6 +50,9 @@ const NamespaceString kNamespace("TestDB.TestColl"); const KeyPattern kKeyPattern(BSON("x" << 1)); TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectly) { + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; + ShardType shard0; shard0.setName("shard0"); shard0.setHost("shard0:12"); @@ -62,7 +65,7 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectly) { ChunkType migratedChunk, controlChunk; { - ChunkVersion origVersion(12, 7, OID::gen(), boost::none /* timestamp */); + ChunkVersion origVersion(12, 7, collEpoch, collTimestamp); migratedChunk.setName(OID::gen()); migratedChunk.setNS(kNamespace); @@ -109,14 +112,16 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectly) { ASSERT_TRUE(mver.isOlderOrEqualThan(cver)); // Verify the chunks ended up in the right shards. - auto chunkDoc0 = uassertStatusOK(getChunkDoc(operationContext(), migratedChunk.getMin())); + auto chunkDoc0 = uassertStatusOK( + getChunkDoc(operationContext(), migratedChunk.getMin(), collEpoch, collTimestamp)); ASSERT_EQ("shard1", chunkDoc0.getShard().toString()); // The migrated chunk's history should be updated. ASSERT_EQ(2UL, chunkDoc0.getHistory().size()); ASSERT_EQ(validAfter, chunkDoc0.getHistory().front().getValidAfter()); - auto chunkDoc1 = uassertStatusOK(getChunkDoc(operationContext(), controlChunk.getMin())); + auto chunkDoc1 = uassertStatusOK( + getChunkDoc(operationContext(), controlChunk.getMin(), collEpoch, collTimestamp)); ASSERT_EQ("shard0", chunkDoc1.getShard().toString()); // The control chunk's history and jumbo status should be unchanged. @@ -129,6 +134,8 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectly) { } TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectlyWithoutControlChunk) { + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; ShardType shard0; shard0.setName("shard0"); @@ -141,8 +148,7 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectlyWithoutControlChunk) { setupShards({shard0, shard1}); int origMajorVersion = 15; - auto const origVersion = - ChunkVersion(origMajorVersion, 4, OID::gen(), boost::none /* timestamp */); + auto const origVersion = ChunkVersion(origMajorVersion, 4, collEpoch, collTimestamp); ChunkType chunk0; chunk0.setName(OID::gen()); @@ -179,7 +185,8 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectlyWithoutControlChunk) { ASSERT_EQ(ChunkVersion(0, 0, origVersion.epoch(), origVersion.getTimestamp()), mver.getValue()); // Verify the chunk ended up in the right shard. - auto chunkDoc0 = uassertStatusOK(getChunkDoc(operationContext(), chunkMin)); + auto chunkDoc0 = + uassertStatusOK(getChunkDoc(operationContext(), chunkMin, collEpoch, collTimestamp)); ASSERT_EQ("shard1", chunkDoc0.getShard().toString()); // The history should be updated. ASSERT_EQ(2UL, chunkDoc0.getHistory().size()); @@ -187,6 +194,8 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectlyWithoutControlChunk) { } TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandNoCtlTrimHistory) { + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; ShardType shard0; shard0.setName("shard0"); @@ -199,8 +208,7 @@ TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandNoCtlTrimHistory) { setupShards({shard0, shard1}); int origMajorVersion = 15; - auto const origVersion = - ChunkVersion(origMajorVersion, 4, OID::gen(), boost::none /* timestamp */); + auto const origVersion = ChunkVersion(origMajorVersion, 4, collEpoch, collTimestamp); ChunkType chunk0; chunk0.setName(OID::gen()); @@ -238,7 +246,8 @@ TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandNoCtlTrimHistory) { ASSERT_EQ(ChunkVersion(0, 0, origVersion.epoch(), origVersion.getTimestamp()), mver.getValue()); // Verify the chunk ended up in the right shard. - auto chunkDoc0 = uassertStatusOK(getChunkDoc(operationContext(), chunkMin)); + auto chunkDoc0 = + uassertStatusOK(getChunkDoc(operationContext(), chunkMin, collEpoch, collTimestamp)); ASSERT_EQ("shard1", chunkDoc0.getShard().toString()); // The new history entry should be added, but the old one preserved. @@ -455,6 +464,9 @@ TEST_F(CommitChunkMigrate, RejectChunkMissing0) { } TEST_F(CommitChunkMigrate, CommitWithLastChunkOnShardShouldNotAffectOtherChunks) { + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; + ShardType shard0; shard0.setName("shard0"); shard0.setHost("shard0:12"); @@ -466,8 +478,7 @@ TEST_F(CommitChunkMigrate, CommitWithLastChunkOnShardShouldNotAffectOtherChunks) setupShards({shard0, shard1}); int origMajorVersion = 12; - auto const origVersion = - ChunkVersion(origMajorVersion, 7, OID::gen(), boost::none /* timestamp */); + auto const origVersion = ChunkVersion(origMajorVersion, 7, collEpoch, collTimestamp); ChunkType chunk0; chunk0.setName(OID::gen()); @@ -516,14 +527,16 @@ TEST_F(CommitChunkMigrate, CommitWithLastChunkOnShardShouldNotAffectOtherChunks) ASSERT_EQ(ChunkVersion(0, 0, origVersion.epoch(), origVersion.getTimestamp()), mver.getValue()); // Verify the chunks ended up in the right shards. - auto chunkDoc0 = uassertStatusOK(getChunkDoc(operationContext(), chunkMin)); + auto chunkDoc0 = + uassertStatusOK(getChunkDoc(operationContext(), chunkMin, collEpoch, collTimestamp)); ASSERT_EQ(shard1.getName(), chunkDoc0.getShard().toString()); // The migrated chunk's history should be updated. ASSERT_EQ(2UL, chunkDoc0.getHistory().size()); ASSERT_EQ(validAfter, chunkDoc0.getHistory().front().getValidAfter()); - auto chunkDoc1 = uassertStatusOK(getChunkDoc(operationContext(), chunkMax)); + auto chunkDoc1 = + uassertStatusOK(getChunkDoc(operationContext(), chunkMax, collEpoch, collTimestamp)); ASSERT_EQ(shard1.getName(), chunkDoc1.getShard().toString()); ASSERT_EQ(chunk1.getVersion(), chunkDoc1.getVersion()); diff --git a/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp index ef03914a467..06280859952 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp @@ -106,16 +106,21 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoChunksFoundFoundReturnsSuccess) } TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoChunkWithMatchingEpochFoundReturnsSuccess) { + const auto collEpoch1 = OID::gen(); + const auto collTimestamp1 = boost::none; + const auto requestedChunkType = generateChunkType(kNss, - ChunkVersion(10, 2, OID::gen(), boost::none /* timestamp */), + ChunkVersion(10, 2, collEpoch1, collTimestamp1), ShardId(_shardName), BSON("a" << 1), BSON("a" << 10)); + // Epoch is different. + const auto collEpoch2 = OID::gen(); + const auto collTimestamp2 = boost::none; ChunkType existingChunkType = requestedChunkType; - // Epoch is different. - existingChunkType.setVersion(ChunkVersion(10, 2, OID::gen(), boost::none /* timestamp */)); + existingChunkType.setVersion(ChunkVersion(10, 2, collEpoch2, collTimestamp2)); setupCollection(kNss, kKeyPattern, {existingChunkType}); ShardingCatalogManager::get(operationContext()) @@ -124,17 +129,20 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoChunkWithMatchingEpochFoundRetur requestedChunkType.getMax(), requestedChunkType.getVersion()); - assertChunkHasNotChanged(existingChunkType, - getChunkDoc(operationContext(), existingChunkType.getMin())); + assertChunkHasNotChanged( + existingChunkType, + getChunkDoc(operationContext(), existingChunkType.getMin(), collEpoch2, collTimestamp2)); } TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoChunkWithMatchingMinKeyFoundReturnsSuccess) { - const auto requestedChunkType = - generateChunkType(kNss, - ChunkVersion(10, 2, OID::gen(), boost::none /* timestamp */), - ShardId(_shardName), - BSON("a" << 1), - BSON("a" << 10)); + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; + + const auto requestedChunkType = generateChunkType(kNss, + ChunkVersion(10, 2, collEpoch, collTimestamp), + ShardId(_shardName), + BSON("a" << 1), + BSON("a" << 10)); ChunkType existingChunkType = requestedChunkType; // Min key is different. @@ -147,17 +155,20 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoChunkWithMatchingMinKeyFoundRetu requestedChunkType.getMax(), requestedChunkType.getVersion()); - assertChunkHasNotChanged(existingChunkType, - getChunkDoc(operationContext(), existingChunkType.getMin())); + assertChunkHasNotChanged( + existingChunkType, + getChunkDoc(operationContext(), existingChunkType.getMin(), collEpoch, collTimestamp)); } TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoChunkWithMatchingMaxKeyFoundReturnsSuccess) { - const auto requestedChunkType = - generateChunkType(kNss, - ChunkVersion(10, 2, OID::gen(), boost::none /* timestamp */), - ShardId(_shardName), - BSON("a" << 1), - BSON("a" << 10)); + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; + + const auto requestedChunkType = generateChunkType(kNss, + ChunkVersion(10, 2, collEpoch, collTimestamp), + ShardId(_shardName), + BSON("a" << 1), + BSON("a" << 10)); ChunkType existingChunkType = requestedChunkType; // Max key is different. @@ -170,27 +181,28 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoChunkWithMatchingMaxKeyFoundRetu requestedChunkType.getMax(), requestedChunkType.getVersion()); - assertChunkHasNotChanged(existingChunkType, - getChunkDoc(operationContext(), existingChunkType.getMin())); + assertChunkHasNotChanged( + existingChunkType, + getChunkDoc(operationContext(), existingChunkType.getMin(), collEpoch, collTimestamp)); } TEST_F(EnsureChunkVersionIsGreaterThanTest, IfChunkMatchingRequestedChunkFoundBumpsChunkVersionAndReturnsSuccess) { - const auto epoch = OID::gen(); - const auto requestedChunkType = - generateChunkType(kNss, - ChunkVersion(10, 2, epoch, boost::none /* timestamp */), - ShardId(_shardName), - BSON("a" << 1), - BSON("a" << 10)); + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; + + const auto requestedChunkType = generateChunkType(kNss, + ChunkVersion(10, 2, collEpoch, collTimestamp), + ShardId(_shardName), + BSON("a" << 1), + BSON("a" << 10)); const auto existingChunkType = requestedChunkType; - const auto highestChunkType = - generateChunkType(kNss, - ChunkVersion(20, 3, epoch, boost::none /* timestamp */), - ShardId("shard0001"), - BSON("a" << 11), - BSON("a" << 20)); + const auto highestChunkType = generateChunkType(kNss, + ChunkVersion(20, 3, collEpoch, collTimestamp), + ShardId("shard0001"), + BSON("a" << 11), + BSON("a" << 20)); setupCollection(kNss, kKeyPattern, {existingChunkType, highestChunkType}); ShardingCatalogManager::get(operationContext()) @@ -199,27 +211,27 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest, requestedChunkType.getMax(), requestedChunkType.getVersion()); - assertChunkVersionWasBumpedTo(existingChunkType, - getChunkDoc(operationContext(), existingChunkType.getMin()), - ChunkVersion(highestChunkType.getVersion().majorVersion() + 1, - 0, - epoch, - boost::none /* timestamp */)); + assertChunkVersionWasBumpedTo( + existingChunkType, + getChunkDoc(operationContext(), existingChunkType.getMin(), collEpoch, collTimestamp), + ChunkVersion( + highestChunkType.getVersion().majorVersion() + 1, 0, collEpoch, collTimestamp)); } TEST_F( EnsureChunkVersionIsGreaterThanTest, IfChunkMatchingRequestedChunkFoundAndHasHigherChunkVersionReturnsSuccessWithoutBumpingChunkVersion) { - const auto epoch = OID::gen(); - const auto requestedChunkType = - generateChunkType(kNss, - ChunkVersion(10, 2, epoch, boost::none /* timestamp */), - ShardId(_shardName), - BSON("a" << 1), - BSON("a" << 10)); + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; + + const auto requestedChunkType = generateChunkType(kNss, + ChunkVersion(10, 2, collEpoch, collTimestamp), + ShardId(_shardName), + BSON("a" << 1), + BSON("a" << 10)); ChunkType existingChunkType = requestedChunkType; - existingChunkType.setVersion(ChunkVersion(11, 1, epoch, boost::none /* timestamp */)); + existingChunkType.setVersion(ChunkVersion(11, 1, collEpoch, collTimestamp)); setupCollection(kNss, kKeyPattern, {existingChunkType}); ShardingCatalogManager::get(operationContext()) @@ -228,8 +240,9 @@ TEST_F( requestedChunkType.getMax(), requestedChunkType.getVersion()); - assertChunkHasNotChanged(existingChunkType, - getChunkDoc(operationContext(), existingChunkType.getMin())); + assertChunkHasNotChanged( + existingChunkType, + getChunkDoc(operationContext(), existingChunkType.getMin(), collEpoch, collTimestamp)); } } // namespace diff --git a/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp index 1a1c476b37d..04f002fb135 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp @@ -121,7 +121,8 @@ TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceed) { ASSERT_EQ(1u, chunksVector.size()); // MergedChunk should have range [chunkMin, chunkMax] - auto mergedChunk = uassertStatusOK(ChunkType::fromConfigBSON(chunksVector.front())); + auto mergedChunk = uassertStatusOK(ChunkType::fromConfigBSON( + chunksVector.front(), collVersion.epoch(), collVersion.getTimestamp())); ASSERT_BSONOBJ_EQ(chunkMin, mergedChunk.getMin()); ASSERT_BSONOBJ_EQ(chunkMax, mergedChunk.getMax()); @@ -134,11 +135,14 @@ TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceed) { } TEST_F(MergeChunkTest, MergeSeveralChunksCorrectlyShouldSucceed) { + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; + ChunkType chunk; chunk.setName(OID::gen()); chunk.setNS(kNamespace); - auto origVersion = ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */); + auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp); chunk.setVersion(origVersion); chunk.setShard(ShardId("shard0000")); @@ -192,7 +196,8 @@ TEST_F(MergeChunkTest, MergeSeveralChunksCorrectlyShouldSucceed) { ASSERT_EQ(1u, chunksVector.size()); // MergedChunk should have range [chunkMin, chunkMax] - auto mergedChunk = uassertStatusOK(ChunkType::fromConfigBSON(chunksVector.front())); + auto mergedChunk = + uassertStatusOK(ChunkType::fromConfigBSON(chunksVector.front(), collEpoch, collTimestamp)); ASSERT_BSONOBJ_EQ(chunkMin, mergedChunk.getMin()); ASSERT_BSONOBJ_EQ(chunkMax, mergedChunk.getMax()); @@ -208,14 +213,16 @@ TEST_F(MergeChunkTest, MergeSeveralChunksCorrectlyShouldSucceed) { } TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersion) { + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; + ChunkType chunk, otherChunk; chunk.setName(OID::gen()); chunk.setNS(kNamespace); otherChunk.setName(OID::gen()); otherChunk.setNS(kNamespace); - auto collEpoch = OID::gen(); - auto origVersion = ChunkVersion(1, 2, collEpoch, boost::none /* timestamp */); + auto origVersion = ChunkVersion(1, 2, collEpoch, collTimestamp); chunk.setVersion(origVersion); chunk.setShard(ShardId("shard0000")); @@ -270,7 +277,8 @@ TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersion) { ASSERT_EQ(2u, chunksVector.size()); // MergedChunk should have range [chunkMin, chunkMax] - auto mergedChunk = uassertStatusOK(ChunkType::fromConfigBSON(chunksVector.front())); + auto mergedChunk = + uassertStatusOK(ChunkType::fromConfigBSON(chunksVector.front(), collEpoch, collTimestamp)); ASSERT_BSONOBJ_EQ(chunkMin, mergedChunk.getMin()); ASSERT_BSONOBJ_EQ(chunkMax, mergedChunk.getMax()); @@ -286,11 +294,14 @@ TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersion) { } TEST_F(MergeChunkTest, MergeLeavesOtherChunksAlone) { + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; + ChunkType chunk; chunk.setName(OID::gen()); chunk.setNS(kNamespace); - auto origVersion = ChunkVersion(1, 2, OID::gen(), boost::none /* timestamp */); + auto origVersion = ChunkVersion(1, 2, collEpoch, collTimestamp); chunk.setVersion(origVersion); chunk.setShard(ShardId("shard0000")); @@ -344,7 +355,8 @@ TEST_F(MergeChunkTest, MergeLeavesOtherChunksAlone) { ASSERT_EQ(2u, chunksVector.size()); // MergedChunk should have range [chunkMin, chunkMax] - auto mergedChunk = uassertStatusOK(ChunkType::fromConfigBSON(chunksVector.front())); + auto mergedChunk = + uassertStatusOK(ChunkType::fromConfigBSON(chunksVector.front(), collEpoch, collTimestamp)); ASSERT_BSONOBJ_EQ(chunkMin, mergedChunk.getMin()); ASSERT_BSONOBJ_EQ(chunkMax, mergedChunk.getMax()); @@ -355,7 +367,8 @@ TEST_F(MergeChunkTest, MergeLeavesOtherChunksAlone) { } // OtherChunk should have been left alone - auto foundOtherChunk = uassertStatusOK(ChunkType::fromConfigBSON(chunksVector.back())); + auto foundOtherChunk = + uassertStatusOK(ChunkType::fromConfigBSON(chunksVector.back(), collEpoch, collTimestamp)); ASSERT_BSONOBJ_EQ(otherChunk.getMin(), foundOtherChunk.getMin()); ASSERT_BSONOBJ_EQ(otherChunk.getMax(), foundOtherChunk.getMax()); } @@ -435,11 +448,14 @@ TEST_F(MergeChunkTest, NonMatchingEpochsOfChunkAndRequestErrors) { } TEST_F(MergeChunkTest, MergeAlreadyHappenedSucceeds) { + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; + ChunkType chunk; chunk.setName(OID::gen()); chunk.setNS(kNamespace); - auto origVersion = ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */); + auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp); chunk.setVersion(origVersion); chunk.setShard(ShardId("shard0000")); @@ -493,7 +509,8 @@ TEST_F(MergeChunkTest, MergeAlreadyHappenedSucceeds) { ASSERT_EQ(1u, chunksVector.size()); // MergedChunk should have range [chunkMin, chunkMax] - ChunkType foundChunk = uassertStatusOK(ChunkType::fromConfigBSON(chunksVector.front())); + ChunkType foundChunk = + uassertStatusOK(ChunkType::fromConfigBSON(chunksVector.front(), collEpoch, collTimestamp)); ASSERT_BSONOBJ_EQ(mergedChunk.toConfigBSON(), foundChunk.toConfigBSON()); } @@ -543,11 +560,14 @@ TEST_F(MergeChunkTest, ChunkBoundariesOutOfOrderFails) { } TEST_F(MergeChunkTest, MergingChunksWithDollarPrefixShouldSucceed) { + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; + ChunkType chunk1; chunk1.setName(OID::gen()); chunk1.setNS(kNamespace); - auto origVersion = ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */); + auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp); chunk1.setVersion(origVersion); chunk1.setShard(ShardId("shard0000")); @@ -600,7 +620,8 @@ TEST_F(MergeChunkTest, MergingChunksWithDollarPrefixShouldSucceed) { ASSERT_EQ(1u, chunksVector.size()); // MergedChunk should have range [chunkMin, chunkMax] - auto mergedChunk = uassertStatusOK(ChunkType::fromConfigBSON(chunksVector.front())); + auto mergedChunk = + uassertStatusOK(ChunkType::fromConfigBSON(chunksVector.front(), collEpoch, collTimestamp)); ASSERT_BSONOBJ_EQ(chunkMin, mergedChunk.getMin()); ASSERT_BSONOBJ_EQ(chunkMax, mergedChunk.getMax()); diff --git a/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp index be9997779a7..25d713cc536 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp @@ -55,11 +55,14 @@ protected: }; TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) { + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; + ChunkType chunk; chunk.setName(OID::gen()); chunk.setNS(kNamespace); - auto origVersion = ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */); + auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp); chunk.setVersion(origVersion); chunk.setShard(ShardId(_shardName)); @@ -78,7 +81,7 @@ TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) { auto versions = assertGet(ShardingCatalogManager::get(operationContext()) ->commitChunkSplit(operationContext(), kNamespace, - origVersion.epoch(), + collEpoch, ChunkRange(chunkMin, chunkMax), splitPoints, "shard0000")); @@ -89,15 +92,13 @@ TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) { ASSERT_EQ(collVersion, shardVersion); // Check for increment on mergedChunk's minor version - auto expectedShardVersion = ChunkVersion(origVersion.majorVersion(), - origVersion.minorVersion() + 2, - origVersion.epoch(), - origVersion.getTimestamp()); + auto expectedShardVersion = ChunkVersion( + origVersion.majorVersion(), origVersion.minorVersion() + 2, collEpoch, collTimestamp); ASSERT_EQ(expectedShardVersion, shardVersion); ASSERT_EQ(shardVersion, collVersion); // First chunkDoc should have range [chunkMin, chunkSplitPoint] - auto chunkDocStatus = getChunkDoc(operationContext(), chunkMin); + auto chunkDocStatus = getChunkDoc(operationContext(), chunkMin, collEpoch, collTimestamp); ASSERT_OK(chunkDocStatus.getStatus()); auto chunkDoc = chunkDocStatus.getValue(); @@ -111,7 +112,8 @@ TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) { ASSERT_EQ(2UL, chunkDoc.getHistory().size()); // Second chunkDoc should have range [chunkSplitPoint, chunkMax] - auto otherChunkDocStatus = getChunkDoc(operationContext(), chunkSplitPoint); + auto otherChunkDocStatus = + getChunkDoc(operationContext(), chunkSplitPoint, collEpoch, collTimestamp); ASSERT_OK(otherChunkDocStatus.getStatus()); auto otherChunkDoc = otherChunkDocStatus.getValue(); @@ -129,11 +131,14 @@ TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) { } TEST_F(SplitChunkTest, MultipleSplitsOnExistingChunkShouldSucceed) { + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; + ChunkType chunk; chunk.setName(OID::gen()); chunk.setNS(kNamespace); - auto origVersion = ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */); + auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp); chunk.setVersion(origVersion); chunk.setShard(ShardId(_shardName)); @@ -153,13 +158,13 @@ TEST_F(SplitChunkTest, MultipleSplitsOnExistingChunkShouldSucceed) { ASSERT_OK(ShardingCatalogManager::get(operationContext()) ->commitChunkSplit(operationContext(), kNamespace, - origVersion.epoch(), + collEpoch, ChunkRange(chunkMin, chunkMax), splitPoints, "shard0000")); // First chunkDoc should have range [chunkMin, chunkSplitPoint] - auto chunkDocStatus = getChunkDoc(operationContext(), chunkMin); + auto chunkDocStatus = getChunkDoc(operationContext(), chunkMin, collEpoch, collTimestamp); ASSERT_OK(chunkDocStatus.getStatus()); auto chunkDoc = chunkDocStatus.getValue(); @@ -173,7 +178,8 @@ TEST_F(SplitChunkTest, MultipleSplitsOnExistingChunkShouldSucceed) { ASSERT_EQ(2UL, chunkDoc.getHistory().size()); // Second chunkDoc should have range [chunkSplitPoint, chunkSplitPoint2] - auto midChunkDocStatus = getChunkDoc(operationContext(), chunkSplitPoint); + auto midChunkDocStatus = + getChunkDoc(operationContext(), chunkSplitPoint, collEpoch, collTimestamp); ASSERT_OK(midChunkDocStatus.getStatus()); auto midChunkDoc = midChunkDocStatus.getValue(); @@ -187,7 +193,8 @@ TEST_F(SplitChunkTest, MultipleSplitsOnExistingChunkShouldSucceed) { ASSERT_EQ(2UL, midChunkDoc.getHistory().size()); // Third chunkDoc should have range [chunkSplitPoint2, chunkMax] - auto lastChunkDocStatus = getChunkDoc(operationContext(), chunkSplitPoint2); + auto lastChunkDocStatus = + getChunkDoc(operationContext(), chunkSplitPoint2, collEpoch, collTimestamp); ASSERT_OK(lastChunkDocStatus.getStatus()); auto lastChunkDoc = lastChunkDocStatus.getValue(); @@ -206,15 +213,17 @@ TEST_F(SplitChunkTest, MultipleSplitsOnExistingChunkShouldSucceed) { } TEST_F(SplitChunkTest, NewSplitShouldClaimHighestVersion) { + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; + ChunkType chunk, chunk2; chunk.setName(OID::gen()); chunk.setNS(kNamespace); chunk2.setName(OID::gen()); chunk2.setNS(kNamespace); - auto collEpoch = OID::gen(); // set up first chunk - auto origVersion = ChunkVersion(1, 2, collEpoch, boost::none /* timestamp */); + auto origVersion = ChunkVersion(1, 2, collEpoch, collTimestamp); chunk.setVersion(origVersion); chunk.setShard(ShardId(_shardName)); @@ -228,7 +237,7 @@ TEST_F(SplitChunkTest, NewSplitShouldClaimHighestVersion) { splitPoints.push_back(chunkSplitPoint); // set up second chunk (chunk2) - auto competingVersion = ChunkVersion(2, 1, collEpoch, boost::none /* timestamp */); + auto competingVersion = ChunkVersion(2, 1, collEpoch, collTimestamp); chunk2.setVersion(competingVersion); chunk2.setShard(ShardId(_shardName)); chunk2.setMin(BSON("a" << 10)); @@ -245,7 +254,7 @@ TEST_F(SplitChunkTest, NewSplitShouldClaimHighestVersion) { "shard0000")); // First chunkDoc should have range [chunkMin, chunkSplitPoint] - auto chunkDocStatus = getChunkDoc(operationContext(), chunkMin); + auto chunkDocStatus = getChunkDoc(operationContext(), chunkMin, collEpoch, collTimestamp); ASSERT_OK(chunkDocStatus.getStatus()); auto chunkDoc = chunkDocStatus.getValue(); @@ -256,7 +265,8 @@ TEST_F(SplitChunkTest, NewSplitShouldClaimHighestVersion) { ASSERT_EQ(competingVersion.minorVersion() + 1, chunkDoc.getVersion().minorVersion()); // Second chunkDoc should have range [chunkSplitPoint, chunkMax] - auto otherChunkDocStatus = getChunkDoc(operationContext(), chunkSplitPoint); + auto otherChunkDocStatus = + getChunkDoc(operationContext(), chunkSplitPoint, collEpoch, collTimestamp); ASSERT_OK(otherChunkDocStatus.getStatus()); auto otherChunkDoc = otherChunkDocStatus.getValue(); diff --git a/src/mongo/db/s/resharding/resharding_coordinator_test.cpp b/src/mongo/db/s/resharding/resharding_coordinator_test.cpp index 3582acff3cd..5eb46cc7044 100644 --- a/src/mongo/db/s/resharding/resharding_coordinator_test.cpp +++ b/src/mongo/db/s/resharding/resharding_coordinator_test.cpp @@ -417,15 +417,19 @@ protected: ASSERT(!onDiskReshardingFields.getDonorFields()); } - void readChunkCatalogEntriesAndAssertMatchExpected(OperationContext* opCtx, - std::vector<ChunkType> expectedChunks) { + void readChunkCatalogEntriesAndAssertMatchExpected( + OperationContext* opCtx, + std::vector<ChunkType> expectedChunks, + const OID& collEpoch, + const boost::optional<Timestamp>& collTimestamp) { auto nss = expectedChunks[0].getNS(); DBDirectClient client(opCtx); std::vector<ChunkType> foundChunks; auto cursor = client.query(ChunkType::ConfigNS, Query(BSON("ns" << nss.ns()))); while (cursor->more()) { - auto d = uassertStatusOK(ChunkType::fromConfigBSON(cursor->nextSafe().getOwned())); + auto d = uassertStatusOK( + ChunkType::fromConfigBSON(cursor->nextSafe().getOwned(), collEpoch, collTimestamp)); foundChunks.push_back(d); } @@ -586,7 +590,8 @@ protected: assertStateAndCatalogEntriesMatchExpected(opCtx, expectedCoordinatorDoc, _originalEpoch); // Check that chunks and tags entries have been correctly created - readChunkCatalogEntriesAndAssertMatchExpected(opCtx, initialChunks); + readChunkCatalogEntriesAndAssertMatchExpected( + opCtx, initialChunks, _originalEpoch, _originalTimestamp); readTagCatalogEntriesAndAssertMatchExpected(opCtx, newZones); } @@ -623,7 +628,8 @@ protected: // Check that chunks and tags entries previously under the temporary namespace have been // correctly updated to the original namespace - readChunkCatalogEntriesAndAssertMatchExpected(opCtx, expectedChunks); + readChunkCatalogEntriesAndAssertMatchExpected( + opCtx, expectedChunks, _finalEpoch, _finalTimestamp); readTagCatalogEntriesAndAssertMatchExpected(opCtx, expectedZones); } @@ -674,16 +680,24 @@ protected: } void assertChunkVersionDidNotIncreaseAfterStateTransition( - const ChunkType& chunk, const ChunkVersion& collectionVersion) { - auto chunkAfterTransition = getChunkDoc(operationContext(), chunk.getMin()); + const ChunkType& chunk, + const ChunkVersion& collectionVersion, + const OID& collEpoch, + const boost::optional<Timestamp>& collTimestamp) { + auto chunkAfterTransition = + getChunkDoc(operationContext(), chunk.getMin(), collEpoch, collTimestamp); ASSERT_EQ(chunkAfterTransition.getStatus(), Status::OK()); ASSERT_EQ(chunkAfterTransition.getValue().getVersion().majorVersion(), collectionVersion.majorVersion()); } - void assertChunkVersionIncreasedAfterStateTransition(const ChunkType& chunk, - const ChunkVersion& collectionVersion) { - auto chunkAfterTransition = getChunkDoc(operationContext(), chunk.getMin()); + void assertChunkVersionIncreasedAfterStateTransition( + const ChunkType& chunk, + const ChunkVersion& collectionVersion, + const OID& collEpoch, + const boost::optional<Timestamp>& collTimestamp) { + auto chunkAfterTransition = + getChunkDoc(operationContext(), chunk.getMin(), collEpoch, collTimestamp); ASSERT_EQ(chunkAfterTransition.getStatus(), Status::OK()); ASSERT_EQ(chunkAfterTransition.getValue().getVersion().majorVersion(), collectionVersion.majorVersion() + 1); @@ -692,6 +706,8 @@ protected: NamespaceString _originalNss = NamespaceString("db.foo"); UUID _originalUUID = UUID::gen(); OID _originalEpoch = OID::gen(); + boost::optional<Timestamp> + _originalTimestamp; // TODO: SERVER-53066 Initialize it with a Timestamp. NamespaceString _tempNss = NamespaceString("db.system.resharding." + _originalUUID.toString()); UUID _reshardingUUID = UUID::gen(); @@ -754,7 +770,8 @@ TEST_F(ReshardingCoordinatorPersistenceTest, WriteInitialInfoSucceeds) { // bumped twice in 'writeInitialStateAndCatalogUpdatesExpectSuccess': once when reshardingFields // is inserted to the collection doc, and once again when the state transitions to // kPreparingToDonate. - auto donorChunkPostTransition = getChunkDoc(operationContext(), donorChunk.getMin()); + auto donorChunkPostTransition = + getChunkDoc(operationContext(), donorChunk.getMin(), _originalEpoch, _originalTimestamp); ASSERT_EQ(donorChunkPostTransition.getStatus(), Status::OK()); ASSERT_EQ(donorChunkPostTransition.getValue().getVersion().majorVersion(), collectionVersion.majorVersion() + 2); @@ -779,8 +796,10 @@ TEST_F(ReshardingCoordinatorPersistenceTest, BasicStateTransitionSucceeds) { expectedCoordinatorDoc.setState(CoordinatorStateEnum::kBlockingWrites); writeStateTransitionUpdateExpectSuccess(operationContext(), expectedCoordinatorDoc); - assertChunkVersionIncreasedAfterStateTransition(donorChunk, donorCollectionVersion); - assertChunkVersionIncreasedAfterStateTransition(recipientChunk, recipientCollectionVersion); + assertChunkVersionIncreasedAfterStateTransition( + donorChunk, donorCollectionVersion, _originalEpoch, _originalTimestamp); + assertChunkVersionIncreasedAfterStateTransition( + recipientChunk, recipientCollectionVersion, _originalEpoch, _originalTimestamp); } TEST_F(ReshardingCoordinatorPersistenceTest, StateTransitionWithFetchTimestampSucceeds) { @@ -809,8 +828,10 @@ TEST_F(ReshardingCoordinatorPersistenceTest, StateTransitionWithFetchTimestampSu }()); writeStateTransitionUpdateExpectSuccess(operationContext(), expectedCoordinatorDoc); - assertChunkVersionIncreasedAfterStateTransition(donorChunk, donorCollectionVersion); - assertChunkVersionIncreasedAfterStateTransition(recipientChunk, recipientCollectionVersion); + assertChunkVersionIncreasedAfterStateTransition( + donorChunk, donorCollectionVersion, _originalEpoch, _originalTimestamp); + assertChunkVersionIncreasedAfterStateTransition( + recipientChunk, recipientCollectionVersion, _originalEpoch, _originalTimestamp); } TEST_F(ReshardingCoordinatorPersistenceTest, StateTranstionToDecisionPersistedSucceeds) { @@ -840,8 +861,8 @@ TEST_F(ReshardingCoordinatorPersistenceTest, StateTranstionToDecisionPersistedSu operationContext(), expectedCoordinatorDoc, fetchTimestamp, updatedChunks, updatedZones); // Since the epoch is changed, there is no need to bump the chunk versions with the transition. - assertChunkVersionDidNotIncreaseAfterStateTransition(recipientChunk, - recipientChunk.getVersion()); + assertChunkVersionDidNotIncreaseAfterStateTransition( + recipientChunk, recipientChunk.getVersion(), _finalEpoch, _finalTimestamp); } TEST_F(ReshardingCoordinatorPersistenceTest, StateTransitionToErrorSucceeds) { @@ -863,7 +884,8 @@ TEST_F(ReshardingCoordinatorPersistenceTest, StateTransitionToDoneSucceeds) { auto collectionVersion = finalChunk.getVersion(); removeCoordinatorDocAndReshardingFieldsExpectSuccess(operationContext(), coordinatorDoc); - assertChunkVersionIncreasedAfterStateTransition(finalChunk, collectionVersion); + assertChunkVersionIncreasedAfterStateTransition( + finalChunk, collectionVersion, _finalEpoch, _finalTimestamp); } TEST_F(ReshardingCoordinatorPersistenceTest, StateTransitionWhenCoordinatorDocDoesNotExistFails) { |