diff options
22 files changed, 528 insertions, 309 deletions
diff --git a/src/mongo/db/s/balancer/type_migration_test.cpp b/src/mongo/db/s/balancer/type_migration_test.cpp index 13e10d94a9f..ec1d52a8a38 100644 --- a/src/mongo/db/s/balancer/type_migration_test.cpp +++ b/src/mongo/db/s/balancer/type_migration_test.cpp @@ -48,7 +48,9 @@ const ShardId kToShard("shard0001"); const bool kWaitForDelete{true}; TEST(MigrationTypeTest, ConvertFromMigrationInfo) { - const ChunkVersion version(1, 2, OID::gen(), boost::none /* timestamp */); + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; + const ChunkVersion version(1, 2, collEpoch, collTimestamp); BSONObjBuilder chunkBuilder; chunkBuilder.append(ChunkType::name(), OID::gen()); @@ -58,7 +60,8 @@ TEST(MigrationTypeTest, ConvertFromMigrationInfo) { version.appendLegacyWithField(&chunkBuilder, ChunkType::lastmod()); chunkBuilder.append(ChunkType::shard(), kFromShard.toString()); - ChunkType chunkType = assertGet(ChunkType::fromConfigBSON(chunkBuilder.obj())); + ChunkType chunkType = + assertGet(ChunkType::fromConfigBSON(chunkBuilder.obj(), collEpoch, collTimestamp)); ASSERT_OK(chunkType.validate()); MigrateInfo migrateInfo(kToShard, diff --git a/src/mongo/db/s/config/config_server_test_fixture.cpp b/src/mongo/db/s/config/config_server_test_fixture.cpp index 2497a538152..f4c7a461e9b 100644 --- a/src/mongo/db/s/config/config_server_test_fixture.cpp +++ b/src/mongo/db/s/config/config_server_test_fixture.cpp @@ -345,14 +345,17 @@ void ConfigServerTestFixture::setupCollection(const NamespaceString& nss, } } -StatusWith<ChunkType> ConfigServerTestFixture::getChunkDoc(OperationContext* opCtx, - const BSONObj& minKey) { +StatusWith<ChunkType> ConfigServerTestFixture::getChunkDoc( + OperationContext* opCtx, + const BSONObj& minKey, + const OID& collEpoch, + const boost::optional<Timestamp>& collTimestamp) { auto doc = findOneOnConfigCollection(opCtx, ChunkType::ConfigNS, BSON(ChunkType::min() << minKey)); if (!doc.isOK()) return doc.getStatus(); - return ChunkType::fromConfigBSON(doc.getValue()); + return ChunkType::fromConfigBSON(doc.getValue(), collEpoch, collTimestamp); } void ConfigServerTestFixture::setupDatabase(const std::string& dbName, diff --git a/src/mongo/db/s/config/config_server_test_fixture.h b/src/mongo/db/s/config/config_server_test_fixture.h index e19c528485a..0e1ee919469 100644 --- a/src/mongo/db/s/config/config_server_test_fixture.h +++ b/src/mongo/db/s/config/config_server_test_fixture.h @@ -108,7 +108,10 @@ protected: /** * Retrieves the chunk document from the config server. */ - StatusWith<ChunkType> getChunkDoc(OperationContext* opCtx, const BSONObj& minKey); + StatusWith<ChunkType> getChunkDoc(OperationContext* opCtx, + const BSONObj& minKey, + const OID& collEpoch, + const boost::optional<Timestamp>& collTimestamp); /** * Inserts a document for the database into the config.databases collection. diff --git a/src/mongo/db/s/config/sharding_catalog_manager.h b/src/mongo/db/s/config/sharding_catalog_manager.h index afe44f4cfd1..9474079308d 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager.h +++ b/src/mongo/db/s/config/sharding_catalog_manager.h @@ -567,6 +567,8 @@ private: */ StatusWith<ChunkType> _findChunkOnConfig(OperationContext* opCtx, const NamespaceStringOrUUID& nsOrUUID, + const OID& epoch, + const boost::optional<Timestamp>& timestamp, const BSONObj& key); /** diff --git a/src/mongo/db/s/config/sharding_catalog_manager_bump_shard_versions_and_change_metadata_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_bump_shard_versions_and_change_metadata_test.cpp index 9ff72e16ffe..3f5977caf67 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_bump_shard_versions_and_change_metadata_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_bump_shard_versions_and_change_metadata_test.cpp @@ -118,10 +118,13 @@ protected: */ void assertOnlyOneChunkVersionBumped(OperationContext* opCtx, std::vector<ChunkType> originalChunkTypes, - const ChunkVersion& targetChunkVersion) { + const ChunkVersion& targetChunkVersion, + const OID& collEpoch, + const boost::optional<Timestamp>& collTimestamp) { auto aChunkVersionWasBumped = false; for (auto originalChunkType : originalChunkTypes) { - auto swChunkTypeAfter = getChunkDoc(opCtx, originalChunkType.getMin()); + auto swChunkTypeAfter = + getChunkDoc(opCtx, originalChunkType.getMin(), collEpoch, collTimestamp); auto wasBumped = chunkMajorVersionWasBumpedAndOtherFieldsAreUnchanged( originalChunkType, swChunkTypeAfter, targetChunkVersion); if (aChunkVersionWasBumped) { @@ -137,19 +140,19 @@ protected: TEST_F(ShardingCatalogManagerBumpShardVersionsAndChangeMetadataTest, BumpChunkVersionOneChunkPerShard) { - const auto epoch = OID::gen(); - const auto shard0Chunk0 = - generateChunkType(kNss, - ChunkVersion(10, 1, epoch, boost::none /* timestamp */), - kShard0.getName(), - BSON("a" << 1), - BSON("a" << 10)); - const auto shard1Chunk0 = - generateChunkType(kNss, - ChunkVersion(11, 2, epoch, boost::none /* timestamp */), - kShard1.getName(), - BSON("a" << 11), - BSON("a" << 20)); + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; + + const auto shard0Chunk0 = generateChunkType(kNss, + ChunkVersion(10, 1, collEpoch, collTimestamp), + kShard0.getName(), + BSON("a" << 1), + BSON("a" << 10)); + const auto shard1Chunk0 = generateChunkType(kNss, + ChunkVersion(11, 2, collEpoch, collTimestamp), + kShard1.getName(), + BSON("a" << 11), + BSON("a" << 20)); const auto collectionVersion = shard1Chunk0.getVersion(); ChunkVersion targetChunkVersion(collectionVersion.majorVersion() + 1, @@ -165,33 +168,36 @@ TEST_F(ShardingCatalogManagerBumpShardVersionsAndChangeMetadataTest, opCtx, kNss, [&](OperationContext*, TxnNumber) {}); ASSERT_TRUE(chunkMajorVersionWasBumpedAndOtherFieldsAreUnchanged( - shard0Chunk0, getChunkDoc(operationContext(), shard0Chunk0.getMin()), targetChunkVersion)); + shard0Chunk0, + getChunkDoc(operationContext(), shard0Chunk0.getMin(), collEpoch, collTimestamp), + targetChunkVersion)); ASSERT_TRUE(chunkMajorVersionWasBumpedAndOtherFieldsAreUnchanged( - shard1Chunk0, getChunkDoc(operationContext(), shard1Chunk0.getMin()), targetChunkVersion)); + shard1Chunk0, + getChunkDoc(operationContext(), shard1Chunk0.getMin(), collEpoch, collTimestamp), + targetChunkVersion)); } TEST_F(ShardingCatalogManagerBumpShardVersionsAndChangeMetadataTest, BumpChunkVersionTwoChunksOnOneShard) { - const auto epoch = OID::gen(); - const auto shard0Chunk0 = - generateChunkType(kNss, - ChunkVersion(10, 1, epoch, boost::none /* timestamp */), - kShard0.getName(), - BSON("a" << 1), - BSON("a" << 10)); - const auto shard0Chunk1 = - generateChunkType(kNss, - ChunkVersion(11, 2, epoch, boost::none /* timestamp */), - kShard0.getName(), - BSON("a" << 11), - BSON("a" << 20)); - const auto shard1Chunk0 = - generateChunkType(kNss, - ChunkVersion(8, 1, epoch, boost::none /* timestamp */), - kShard1.getName(), - BSON("a" << 21), - BSON("a" << 100)); + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; + + const auto shard0Chunk0 = generateChunkType(kNss, + ChunkVersion(10, 1, collEpoch, collTimestamp), + kShard0.getName(), + BSON("a" << 1), + BSON("a" << 10)); + const auto shard0Chunk1 = generateChunkType(kNss, + ChunkVersion(11, 2, collEpoch, collTimestamp), + kShard0.getName(), + BSON("a" << 11), + BSON("a" << 20)); + const auto shard1Chunk0 = generateChunkType(kNss, + ChunkVersion(8, 1, collEpoch, collTimestamp), + kShard1.getName(), + BSON("a" << 21), + BSON("a" << 100)); const auto collectionVersion = shard0Chunk1.getVersion(); ChunkVersion targetChunkVersion(collectionVersion.majorVersion() + 1, @@ -205,40 +211,43 @@ TEST_F(ShardingCatalogManagerBumpShardVersionsAndChangeMetadataTest, ShardingCatalogManager::get(opCtx)->bumpCollectionVersionAndChangeMetadataInTxn( opCtx, kNss, [&](OperationContext*, TxnNumber) {}); - assertOnlyOneChunkVersionBumped( - operationContext(), {shard0Chunk0, shard0Chunk1}, targetChunkVersion); + assertOnlyOneChunkVersionBumped(operationContext(), + {shard0Chunk0, shard0Chunk1}, + targetChunkVersion, + collEpoch, + collTimestamp); ASSERT_TRUE(chunkMajorVersionWasBumpedAndOtherFieldsAreUnchanged( - shard1Chunk0, getChunkDoc(operationContext(), shard1Chunk0.getMin()), targetChunkVersion)); + shard1Chunk0, + getChunkDoc(operationContext(), shard1Chunk0.getMin(), collEpoch, collTimestamp), + targetChunkVersion)); } TEST_F(ShardingCatalogManagerBumpShardVersionsAndChangeMetadataTest, BumpChunkVersionTwoChunksOnTwoShards) { - const auto epoch = OID::gen(); - const auto shard0Chunk0 = - generateChunkType(kNss, - ChunkVersion(10, 1, epoch, boost::none /* timestamp */), - kShard0.getName(), - BSON("a" << 1), - BSON("a" << 10)); - const auto shard0Chunk1 = - generateChunkType(kNss, - ChunkVersion(11, 2, epoch, boost::none /* timestamp */), - kShard0.getName(), - BSON("a" << 11), - BSON("a" << 20)); - const auto shard1Chunk0 = - generateChunkType(kNss, - ChunkVersion(8, 1, epoch, boost::none /* timestamp */), - kShard1.getName(), - BSON("a" << 21), - BSON("a" << 100)); - const auto shard1Chunk1 = - generateChunkType(kNss, - ChunkVersion(12, 1, epoch, boost::none /* timestamp */), - kShard1.getName(), - BSON("a" << 101), - BSON("a" << 200)); + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; + + const auto shard0Chunk0 = generateChunkType(kNss, + ChunkVersion(10, 1, collEpoch, collTimestamp), + kShard0.getName(), + BSON("a" << 1), + BSON("a" << 10)); + const auto shard0Chunk1 = generateChunkType(kNss, + ChunkVersion(11, 2, collEpoch, collTimestamp), + kShard0.getName(), + BSON("a" << 11), + BSON("a" << 20)); + const auto shard1Chunk0 = generateChunkType(kNss, + ChunkVersion(8, 1, collEpoch, collTimestamp), + kShard1.getName(), + BSON("a" << 21), + BSON("a" << 100)); + const auto shard1Chunk1 = generateChunkType(kNss, + ChunkVersion(12, 1, collEpoch, collTimestamp), + kShard1.getName(), + BSON("a" << 101), + BSON("a" << 200)); const auto collectionVersion = shard1Chunk1.getVersion(); ChunkVersion targetChunkVersion(collectionVersion.majorVersion() + 1, @@ -252,28 +261,34 @@ TEST_F(ShardingCatalogManagerBumpShardVersionsAndChangeMetadataTest, ShardingCatalogManager::get(opCtx)->bumpCollectionVersionAndChangeMetadataInTxn( opCtx, kNss, [&](OperationContext*, TxnNumber) {}); - assertOnlyOneChunkVersionBumped( - operationContext(), {shard0Chunk0, shard0Chunk1}, targetChunkVersion); - - assertOnlyOneChunkVersionBumped( - operationContext(), {shard1Chunk0, shard1Chunk1}, targetChunkVersion); + assertOnlyOneChunkVersionBumped(operationContext(), + {shard0Chunk0, shard0Chunk1}, + targetChunkVersion, + collEpoch, + collTimestamp); + + assertOnlyOneChunkVersionBumped(operationContext(), + {shard1Chunk0, shard1Chunk1}, + targetChunkVersion, + collEpoch, + collTimestamp); } TEST_F(ShardingCatalogManagerBumpShardVersionsAndChangeMetadataTest, SucceedsInThePresenceOfTransientTransactionErrors) { - const auto epoch = OID::gen(); - const auto shard0Chunk0 = - generateChunkType(kNss, - ChunkVersion(10, 1, epoch, boost::none /* timestamp */), - kShard0.getName(), - BSON("a" << 1), - BSON("a" << 10)); - const auto shard1Chunk0 = - generateChunkType(kNss, - ChunkVersion(11, 2, epoch, boost::none /* timestamp */), - kShard1.getName(), - BSON("a" << 11), - BSON("a" << 20)); + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; + + const auto shard0Chunk0 = generateChunkType(kNss, + ChunkVersion(10, 1, collEpoch, collTimestamp), + kShard0.getName(), + BSON("a" << 1), + BSON("a" << 10)); + const auto shard1Chunk0 = generateChunkType(kNss, + ChunkVersion(11, 2, collEpoch, collTimestamp), + kShard1.getName(), + BSON("a" << 11), + BSON("a" << 20)); const auto initialCollectionVersion = shard1Chunk0.getVersion(); setupCollection(kNss, kKeyPattern, {shard0Chunk0, shard1Chunk0}); @@ -294,10 +309,14 @@ TEST_F(ShardingCatalogManagerBumpShardVersionsAndChangeMetadataTest, initialCollectionVersion.getTimestamp()}; ASSERT_TRUE(chunkMajorVersionWasBumpedAndOtherFieldsAreUnchanged( - shard0Chunk0, getChunkDoc(operationContext(), shard0Chunk0.getMin()), targetChunkVersion)); + shard0Chunk0, + getChunkDoc(operationContext(), shard0Chunk0.getMin(), collEpoch, collTimestamp), + targetChunkVersion)); ASSERT_TRUE(chunkMajorVersionWasBumpedAndOtherFieldsAreUnchanged( - shard1Chunk0, getChunkDoc(operationContext(), shard1Chunk0.getMin()), targetChunkVersion)); + shard1Chunk0, + getChunkDoc(operationContext(), shard1Chunk0.getMin(), collEpoch, collTimestamp), + targetChunkVersion)); ASSERT_EQ(numCalls, 5) << "transaction succeeded after unexpected number of attempts"; @@ -323,29 +342,33 @@ TEST_F(ShardingCatalogManagerBumpShardVersionsAndChangeMetadataTest, initialCollectionVersion.getTimestamp()}; ASSERT_TRUE(chunkMajorVersionWasBumpedAndOtherFieldsAreUnchanged( - shard0Chunk0, getChunkDoc(operationContext(), shard0Chunk0.getMin()), targetChunkVersion)); + shard0Chunk0, + getChunkDoc(operationContext(), shard0Chunk0.getMin(), collEpoch, collTimestamp), + targetChunkVersion)); ASSERT_TRUE(chunkMajorVersionWasBumpedAndOtherFieldsAreUnchanged( - shard1Chunk0, getChunkDoc(operationContext(), shard1Chunk0.getMin()), targetChunkVersion)); + shard1Chunk0, + getChunkDoc(operationContext(), shard1Chunk0.getMin(), collEpoch, collTimestamp), + targetChunkVersion)); ASSERT_EQ(numCalls, 5) << "transaction succeeded after unexpected number of attempts"; } TEST_F(ShardingCatalogManagerBumpShardVersionsAndChangeMetadataTest, StopsRetryingOnPermanentServerErrors) { - const auto epoch = OID::gen(); - const auto shard0Chunk0 = - generateChunkType(kNss, - ChunkVersion(10, 1, epoch, boost::none /* timestamp */), - kShard0.getName(), - BSON("a" << 1), - BSON("a" << 10)); - const auto shard1Chunk0 = - generateChunkType(kNss, - ChunkVersion(11, 2, epoch, boost::none /* timestamp */), - kShard1.getName(), - BSON("a" << 11), - BSON("a" << 20)); + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; + + const auto shard0Chunk0 = generateChunkType(kNss, + ChunkVersion(10, 1, collEpoch, collTimestamp), + kShard0.getName(), + BSON("a" << 1), + BSON("a" << 10)); + const auto shard1Chunk0 = generateChunkType(kNss, + ChunkVersion(11, 2, collEpoch, collTimestamp), + kShard1.getName(), + BSON("a" << 11), + BSON("a" << 20)); setupCollection(kNss, kKeyPattern, {shard0Chunk0, shard1Chunk0}); @@ -364,12 +387,12 @@ TEST_F(ShardingCatalogManagerBumpShardVersionsAndChangeMetadataTest, ASSERT_TRUE(chunkMajorVersionWasBumpedAndOtherFieldsAreUnchanged( shard0Chunk0, - getChunkDoc(operationContext(), shard0Chunk0.getMin()), + getChunkDoc(operationContext(), shard0Chunk0.getMin(), collEpoch, collTimestamp), shard0Chunk0.getVersion())); ASSERT_TRUE(chunkMajorVersionWasBumpedAndOtherFieldsAreUnchanged( shard1Chunk0, - getChunkDoc(operationContext(), shard1Chunk0.getMin()), + getChunkDoc(operationContext(), shard1Chunk0.getMin(), collEpoch, collTimestamp), shard1Chunk0.getVersion())); ASSERT_EQ(numCalls, 1) << "transaction failed after unexpected number of attempts"; @@ -389,12 +412,12 @@ TEST_F(ShardingCatalogManagerBumpShardVersionsAndChangeMetadataTest, ASSERT_TRUE(chunkMajorVersionWasBumpedAndOtherFieldsAreUnchanged( shard0Chunk0, - getChunkDoc(operationContext(), shard0Chunk0.getMin()), + getChunkDoc(operationContext(), shard0Chunk0.getMin(), collEpoch, collTimestamp), shard0Chunk0.getVersion())); ASSERT_TRUE(chunkMajorVersionWasBumpedAndOtherFieldsAreUnchanged( shard1Chunk0, - getChunkDoc(operationContext(), shard1Chunk0.getMin()), + getChunkDoc(operationContext(), shard1Chunk0.getMin(), collEpoch, collTimestamp), shard1Chunk0.getVersion())); ASSERT_EQ(numCalls, 1) << "transaction failed after unexpected number of attempts"; @@ -419,12 +442,12 @@ TEST_F(ShardingCatalogManagerBumpShardVersionsAndChangeMetadataTest, ASSERT_TRUE(chunkMajorVersionWasBumpedAndOtherFieldsAreUnchanged( shard0Chunk0, - getChunkDoc(operationContext(), shard0Chunk0.getMin()), + getChunkDoc(operationContext(), shard0Chunk0.getMin(), collEpoch, collTimestamp), shard0Chunk0.getVersion())); ASSERT_TRUE(chunkMajorVersionWasBumpedAndOtherFieldsAreUnchanged( shard1Chunk0, - getChunkDoc(operationContext(), shard1Chunk0.getMin()), + getChunkDoc(operationContext(), shard1Chunk0.getMin(), collEpoch, collTimestamp), shard1Chunk0.getVersion())); ASSERT_EQ(numCalls, 1) << "transaction failed after unexpected number of attempts"; diff --git a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp index cb7976b85c7..94229292b36 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp @@ -138,7 +138,7 @@ BSONArray buildMergeChunksTransactionPrecond(const std::vector<ChunkType>& chunk const ChunkVersion& collVersion) { BSONArrayBuilder preCond; - for (auto chunk : chunksToMerge) { + for (const auto& chunk : chunksToMerge) { BSONObj query = BSON(ChunkType::min(chunk.getMin()) << ChunkType::max(chunk.getMax())); if (collVersion.getTimestamp()) { query = query.addFields(BSON(ChunkType::collectionUUID << chunk.getCollectionUUID())); @@ -162,6 +162,8 @@ BSONArray buildMergeChunksTransactionPrecond(const std::vector<ChunkType>& chunk */ StatusWith<ChunkType> getCurrentChunk(OperationContext* opCtx, const NamespaceStringOrUUID& nsOrUUID, + const OID& epoch, + const boost::optional<Timestamp>& timestamp, const ChunkType& requestedChunk) { uassert(4683300, "Config server rejecting commitChunkMigration request that does not have a " @@ -199,7 +201,8 @@ StatusWith<ChunkType> getCurrentChunk(OperationContext* opCtx, << "). Cannot execute the migration commit with invalid chunks."}; } - return uassertStatusOK(ChunkType::fromConfigBSON(findResponseWith.getValue().docs.front())); + return uassertStatusOK( + ChunkType::fromConfigBSON(findResponseWith.getValue().docs.front(), epoch, timestamp)); } BSONObj makeCommitChunkTransactionCommand(const NamespaceString& nss, @@ -256,6 +259,8 @@ BSONObj makeCommitChunkTransactionCommand(const NamespaceString& nss, */ boost::optional<ChunkType> getControlChunkForMigrate(OperationContext* opCtx, const NamespaceStringOrUUID& nsOrUUID, + const OID& epoch, + const boost::optional<Timestamp>& timestamp, const ChunkType& migratedChunk, const ShardId& fromShard) { auto const configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard(); @@ -282,7 +287,7 @@ boost::optional<ChunkType> getControlChunkForMigrate(OperationContext* opCtx, return boost::none; } - return uassertStatusOK(ChunkType::fromConfigBSON(response.docs.front())); + return uassertStatusOK(ChunkType::fromConfigBSON(response.docs.front(), epoch, timestamp)); } // Helper function to find collection version and shard version. @@ -571,7 +576,8 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkSplit( // Find the chunk history. const auto collNsOrUUID = getNsOrUUIDForChunkTargeting(coll); - const auto origChunk = _findChunkOnConfig(opCtx, collNsOrUUID, range.getMin()); + const auto origChunk = _findChunkOnConfig( + opCtx, collNsOrUUID, collVersion.epoch(), collVersion.getTimestamp(), range.getMin()); if (!origChunk.isOK()) { repl::ReplClientInfo::forClient(opCtx->getClient()).setLastOpToSystemLastOpTime(opCtx); return origChunk.getStatus(); @@ -794,8 +800,11 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMerge( // Check if the chunk(s) have already been merged. If so, return success. const auto collNsOrUUID = getNsOrUUIDForChunkTargeting(coll); - auto minChunkOnDisk = - uassertStatusOK(_findChunkOnConfig(opCtx, collNsOrUUID, chunkBoundaries.front())); + auto minChunkOnDisk = uassertStatusOK(_findChunkOnConfig(opCtx, + collNsOrUUID, + collVersion.epoch(), + collVersion.getTimestamp(), + chunkBoundaries.front())); if (minChunkOnDisk.getMax().woCompare(chunkBoundaries.back()) == 0) { auto replyWithVersions = getShardAndCollectionVersion(opCtx, coll, ShardId(shardName)); // Makes sure that the last thing we read in getCurrentChunk and @@ -812,8 +821,11 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMerge( // Do not use the first chunk boundary as a max bound while building chunks for (size_t i = 1; i < chunkBoundaries.size(); ++i) { // Read the original chunk from disk to lookup that chunk's '_id' field. - auto currentChunk = - uassertStatusOK(_findChunkOnConfig(opCtx, collNsOrUUID, chunkBoundaries[i - 1])); + auto currentChunk = uassertStatusOK(_findChunkOnConfig(opCtx, + collNsOrUUID, + collVersion.epoch(), + collVersion.getTimestamp(), + chunkBoundaries[i - 1])); // Ensure the chunk boundaries are strictly increasing if (chunkBoundaries[i].woCompare(currentChunk.getMin()) <= 0) { @@ -938,8 +950,8 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunksMerge( // Check if the chunk(s) have already been merged. If so, return success. if (shardChunksInRangeResponse.docs.size() == 1) { - auto chunk = - uassertStatusOK(ChunkType::fromConfigBSON(shardChunksInRangeResponse.docs.back())); + auto chunk = uassertStatusOK(ChunkType::fromConfigBSON( + shardChunksInRangeResponse.docs.back(), coll.getEpoch(), coll.getTimestamp())); uassert( ErrorCodes::IllegalOperation, str::stream() << "could not merge chunks, shard " << shardId @@ -959,7 +971,8 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunksMerge( // and ensure that the retrieved list of chunks covers the whole range. std::vector<ChunkType> chunksToMerge; for (const auto& chunkDoc : shardChunksInRangeResponse.docs) { - auto chunk = uassertStatusOK(ChunkType::fromConfigBSON(chunkDoc)); + auto chunk = uassertStatusOK( + ChunkType::fromConfigBSON(chunkDoc, coll.getEpoch(), coll.getTimestamp())); if (chunksToMerge.empty()) { uassert(ErrorCodes::IllegalOperation, str::stream() @@ -1104,8 +1117,9 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration( << ", but found no chunks", !findResponse.docs.empty()); - const auto chunk = uassertStatusOK(ChunkType::fromConfigBSON(findResponse.docs[0])); - const auto currentCollectionVersion = chunk.getVersion(); + const auto chunk = uassertStatusOK( + ChunkType::fromConfigBSON(findResponse.docs[0], coll.getEpoch(), coll.getTimestamp())); + const auto& currentCollectionVersion = chunk.getVersion(); if (MONGO_unlikely(migrationCommitVersionError.shouldFail())) { uasserted(ErrorCodes::StaleEpoch, @@ -1130,7 +1144,8 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration( // Check if chunk still exists and which shard owns it const auto collNsOrUUID = getNsOrUUIDForChunkTargeting(coll); - auto swCurrentChunk = getCurrentChunk(opCtx, collNsOrUUID, migratedChunk); + auto swCurrentChunk = + getCurrentChunk(opCtx, collNsOrUUID, coll.getEpoch(), coll.getTimestamp(), migratedChunk); if (!swCurrentChunk.isOK()) { return swCurrentChunk.getStatus(); @@ -1165,11 +1180,12 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration( << currentChunk.toConfigBSON() << " on the shard " << fromShard.toString()}; } - auto controlChunk = getControlChunkForMigrate(opCtx, collNsOrUUID, migratedChunk, fromShard); + auto controlChunk = getControlChunkForMigrate( + opCtx, collNsOrUUID, coll.getEpoch(), coll.getTimestamp(), migratedChunk, fromShard); // Find the chunk history. - const auto origChunk = - uassertStatusOK(_findChunkOnConfig(opCtx, collNsOrUUID, migratedChunk.getMin())); + const auto origChunk = uassertStatusOK(_findChunkOnConfig( + opCtx, collNsOrUUID, coll.getEpoch(), coll.getTimestamp(), migratedChunk.getMin())); // Generate the new versions of migratedChunk and controlChunk. Migrating chunk's minor version // will be 0. @@ -1224,8 +1240,8 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration( boost::optional<ChunkType> newControlChunk = boost::none; if (controlChunk) { // Find the chunk history. - auto origControlChunk = - uassertStatusOK(_findChunkOnConfig(opCtx, collNsOrUUID, controlChunk->getMin())); + auto origControlChunk = uassertStatusOK(_findChunkOnConfig( + opCtx, collNsOrUUID, coll.getEpoch(), coll.getTimestamp(), controlChunk->getMin())); newControlChunk = std::move(origControlChunk); newControlChunk->setVersion(ChunkVersion(currentCollectionVersion.majorVersion() + 1, @@ -1257,7 +1273,11 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration( } StatusWith<ChunkType> ShardingCatalogManager::_findChunkOnConfig( - OperationContext* opCtx, const NamespaceStringOrUUID& nsOrUUID, const BSONObj& key) { + OperationContext* opCtx, + const NamespaceStringOrUUID& nsOrUUID, + const OID& epoch, + const boost::optional<Timestamp>& timestamp, + const BSONObj& key) { auto const configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard(); const auto query = [&]() { @@ -1288,7 +1308,7 @@ StatusWith<ChunkType> ShardingCatalogManager::_findChunkOnConfig( << " and min key " << key.toString() << ", but found no chunks"}; } - return ChunkType::fromConfigBSON(origChunks.front()); + return ChunkType::fromConfigBSON(origChunks.front(), epoch, timestamp); } void ShardingCatalogManager::clearJumboFlag(OperationContext* opCtx, @@ -1346,7 +1366,8 @@ void ShardingCatalogManager::clearJumboFlag(OperationContext* opCtx, << " from ns: " << nss.ns(), !targetChunkVector.empty()); - const auto targetChunk = uassertStatusOK(ChunkType::fromConfigBSON(targetChunkVector.front())); + const auto targetChunk = uassertStatusOK( + ChunkType::fromConfigBSON(targetChunkVector.front(), coll.getEpoch(), coll.getTimestamp())); if (!targetChunk.getJumbo()) { return; @@ -1375,8 +1396,8 @@ void ShardingCatalogManager::clearJumboFlag(OperationContext* opCtx, << ", but found no chunks", !chunksVector.empty()); - const auto highestVersionChunk = - uassertStatusOK(ChunkType::fromConfigBSON(chunksVector.front())); + const auto highestVersionChunk = uassertStatusOK( + ChunkType::fromConfigBSON(chunksVector.front(), coll.getEpoch(), coll.getTimestamp())); const auto currentCollectionVersion = highestVersionChunk.getVersion(); // It is possible for a migration to end up running partly without the protection of the @@ -1472,8 +1493,8 @@ void ShardingCatalogManager::ensureChunkVersionIsGreaterThan(OperationContext* o return; } - const auto currentChunk = - uassertStatusOK(ChunkType::fromConfigBSON(matchingChunksVector.front())); + const auto currentChunk = uassertStatusOK(ChunkType::fromConfigBSON( + matchingChunksVector.front(), version.epoch(), version.getTimestamp())); if (version.isOlderThan(currentChunk.getVersion())) { LOGV2(23885, @@ -1512,8 +1533,8 @@ void ShardingCatalogManager::ensureChunkVersionIsGreaterThan(OperationContext* o "epoch"_attr = version.epoch()); return; } - const auto highestChunk = - uassertStatusOK(ChunkType::fromConfigBSON(highestChunksVector.front())); + const auto highestChunk = uassertStatusOK(ChunkType::fromConfigBSON( + highestChunksVector.front(), version.epoch(), version.getTimestamp())); // Generate a new version for the chunk by incrementing the collectionVersion's major version. auto newChunk = currentChunk; diff --git a/src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp index bd428f52954..68845265eae 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp @@ -100,21 +100,29 @@ private: }; TEST_F(ClearJumboFlagTest, ClearJumboShouldBumpVersion) { + const auto collEpoch = epoch(); + const auto collTimestamp = boost::none; + ShardingCatalogManager::get(operationContext()) - ->clearJumboFlag(operationContext(), ns(), epoch(), jumboChunk()); + ->clearJumboFlag(operationContext(), ns(), collEpoch, jumboChunk()); - auto chunkDoc = uassertStatusOK(getChunkDoc(operationContext(), jumboChunk().getMin())); + auto chunkDoc = uassertStatusOK( + getChunkDoc(operationContext(), jumboChunk().getMin(), collEpoch, collTimestamp)); ASSERT_FALSE(chunkDoc.getJumbo()); - ASSERT_EQ(ChunkVersion(15, 0, epoch(), boost::none /* timestamp */), chunkDoc.getVersion()); + ASSERT_EQ(ChunkVersion(15, 0, collEpoch, collTimestamp), chunkDoc.getVersion()); } TEST_F(ClearJumboFlagTest, ClearJumboShouldNotBumpVersionIfChunkNotJumbo) { + const auto collEpoch = epoch(); + const auto collTimestamp = boost::none; + ShardingCatalogManager::get(operationContext()) - ->clearJumboFlag(operationContext(), ns(), epoch(), nonJumboChunk()); + ->clearJumboFlag(operationContext(), ns(), collEpoch, nonJumboChunk()); - auto chunkDoc = uassertStatusOK(getChunkDoc(operationContext(), nonJumboChunk().getMin())); + auto chunkDoc = uassertStatusOK( + getChunkDoc(operationContext(), nonJumboChunk().getMin(), collEpoch, collTimestamp)); ASSERT_FALSE(chunkDoc.getJumbo()); - ASSERT_EQ(ChunkVersion(14, 7, epoch(), boost::none /* timestamp */), chunkDoc.getVersion()); + ASSERT_EQ(ChunkVersion(14, 7, collEpoch, collTimestamp), chunkDoc.getVersion()); } TEST_F(ClearJumboFlagTest, AssertsOnEpochMismatch) { diff --git a/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp index 4a7e74add28..cedad2a905f 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp @@ -50,6 +50,9 @@ const NamespaceString kNamespace("TestDB.TestColl"); const KeyPattern kKeyPattern(BSON("x" << 1)); TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectly) { + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; + ShardType shard0; shard0.setName("shard0"); shard0.setHost("shard0:12"); @@ -62,7 +65,7 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectly) { ChunkType migratedChunk, controlChunk; { - ChunkVersion origVersion(12, 7, OID::gen(), boost::none /* timestamp */); + ChunkVersion origVersion(12, 7, collEpoch, collTimestamp); migratedChunk.setName(OID::gen()); migratedChunk.setNS(kNamespace); @@ -109,14 +112,16 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectly) { ASSERT_TRUE(mver.isOlderOrEqualThan(cver)); // Verify the chunks ended up in the right shards. - auto chunkDoc0 = uassertStatusOK(getChunkDoc(operationContext(), migratedChunk.getMin())); + auto chunkDoc0 = uassertStatusOK( + getChunkDoc(operationContext(), migratedChunk.getMin(), collEpoch, collTimestamp)); ASSERT_EQ("shard1", chunkDoc0.getShard().toString()); // The migrated chunk's history should be updated. ASSERT_EQ(2UL, chunkDoc0.getHistory().size()); ASSERT_EQ(validAfter, chunkDoc0.getHistory().front().getValidAfter()); - auto chunkDoc1 = uassertStatusOK(getChunkDoc(operationContext(), controlChunk.getMin())); + auto chunkDoc1 = uassertStatusOK( + getChunkDoc(operationContext(), controlChunk.getMin(), collEpoch, collTimestamp)); ASSERT_EQ("shard0", chunkDoc1.getShard().toString()); // The control chunk's history and jumbo status should be unchanged. @@ -129,6 +134,8 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectly) { } TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectlyWithoutControlChunk) { + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; ShardType shard0; shard0.setName("shard0"); @@ -141,8 +148,7 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectlyWithoutControlChunk) { setupShards({shard0, shard1}); int origMajorVersion = 15; - auto const origVersion = - ChunkVersion(origMajorVersion, 4, OID::gen(), boost::none /* timestamp */); + auto const origVersion = ChunkVersion(origMajorVersion, 4, collEpoch, collTimestamp); ChunkType chunk0; chunk0.setName(OID::gen()); @@ -179,7 +185,8 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectlyWithoutControlChunk) { ASSERT_EQ(ChunkVersion(0, 0, origVersion.epoch(), origVersion.getTimestamp()), mver.getValue()); // Verify the chunk ended up in the right shard. - auto chunkDoc0 = uassertStatusOK(getChunkDoc(operationContext(), chunkMin)); + auto chunkDoc0 = + uassertStatusOK(getChunkDoc(operationContext(), chunkMin, collEpoch, collTimestamp)); ASSERT_EQ("shard1", chunkDoc0.getShard().toString()); // The history should be updated. ASSERT_EQ(2UL, chunkDoc0.getHistory().size()); @@ -187,6 +194,8 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectlyWithoutControlChunk) { } TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandNoCtlTrimHistory) { + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; ShardType shard0; shard0.setName("shard0"); @@ -199,8 +208,7 @@ TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandNoCtlTrimHistory) { setupShards({shard0, shard1}); int origMajorVersion = 15; - auto const origVersion = - ChunkVersion(origMajorVersion, 4, OID::gen(), boost::none /* timestamp */); + auto const origVersion = ChunkVersion(origMajorVersion, 4, collEpoch, collTimestamp); ChunkType chunk0; chunk0.setName(OID::gen()); @@ -238,7 +246,8 @@ TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandNoCtlTrimHistory) { ASSERT_EQ(ChunkVersion(0, 0, origVersion.epoch(), origVersion.getTimestamp()), mver.getValue()); // Verify the chunk ended up in the right shard. - auto chunkDoc0 = uassertStatusOK(getChunkDoc(operationContext(), chunkMin)); + auto chunkDoc0 = + uassertStatusOK(getChunkDoc(operationContext(), chunkMin, collEpoch, collTimestamp)); ASSERT_EQ("shard1", chunkDoc0.getShard().toString()); // The new history entry should be added, but the old one preserved. @@ -455,6 +464,9 @@ TEST_F(CommitChunkMigrate, RejectChunkMissing0) { } TEST_F(CommitChunkMigrate, CommitWithLastChunkOnShardShouldNotAffectOtherChunks) { + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; + ShardType shard0; shard0.setName("shard0"); shard0.setHost("shard0:12"); @@ -466,8 +478,7 @@ TEST_F(CommitChunkMigrate, CommitWithLastChunkOnShardShouldNotAffectOtherChunks) setupShards({shard0, shard1}); int origMajorVersion = 12; - auto const origVersion = - ChunkVersion(origMajorVersion, 7, OID::gen(), boost::none /* timestamp */); + auto const origVersion = ChunkVersion(origMajorVersion, 7, collEpoch, collTimestamp); ChunkType chunk0; chunk0.setName(OID::gen()); @@ -516,14 +527,16 @@ TEST_F(CommitChunkMigrate, CommitWithLastChunkOnShardShouldNotAffectOtherChunks) ASSERT_EQ(ChunkVersion(0, 0, origVersion.epoch(), origVersion.getTimestamp()), mver.getValue()); // Verify the chunks ended up in the right shards. - auto chunkDoc0 = uassertStatusOK(getChunkDoc(operationContext(), chunkMin)); + auto chunkDoc0 = + uassertStatusOK(getChunkDoc(operationContext(), chunkMin, collEpoch, collTimestamp)); ASSERT_EQ(shard1.getName(), chunkDoc0.getShard().toString()); // The migrated chunk's history should be updated. ASSERT_EQ(2UL, chunkDoc0.getHistory().size()); ASSERT_EQ(validAfter, chunkDoc0.getHistory().front().getValidAfter()); - auto chunkDoc1 = uassertStatusOK(getChunkDoc(operationContext(), chunkMax)); + auto chunkDoc1 = + uassertStatusOK(getChunkDoc(operationContext(), chunkMax, collEpoch, collTimestamp)); ASSERT_EQ(shard1.getName(), chunkDoc1.getShard().toString()); ASSERT_EQ(chunk1.getVersion(), chunkDoc1.getVersion()); diff --git a/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp index ef03914a467..06280859952 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp @@ -106,16 +106,21 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoChunksFoundFoundReturnsSuccess) } TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoChunkWithMatchingEpochFoundReturnsSuccess) { + const auto collEpoch1 = OID::gen(); + const auto collTimestamp1 = boost::none; + const auto requestedChunkType = generateChunkType(kNss, - ChunkVersion(10, 2, OID::gen(), boost::none /* timestamp */), + ChunkVersion(10, 2, collEpoch1, collTimestamp1), ShardId(_shardName), BSON("a" << 1), BSON("a" << 10)); + // Epoch is different. + const auto collEpoch2 = OID::gen(); + const auto collTimestamp2 = boost::none; ChunkType existingChunkType = requestedChunkType; - // Epoch is different. - existingChunkType.setVersion(ChunkVersion(10, 2, OID::gen(), boost::none /* timestamp */)); + existingChunkType.setVersion(ChunkVersion(10, 2, collEpoch2, collTimestamp2)); setupCollection(kNss, kKeyPattern, {existingChunkType}); ShardingCatalogManager::get(operationContext()) @@ -124,17 +129,20 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoChunkWithMatchingEpochFoundRetur requestedChunkType.getMax(), requestedChunkType.getVersion()); - assertChunkHasNotChanged(existingChunkType, - getChunkDoc(operationContext(), existingChunkType.getMin())); + assertChunkHasNotChanged( + existingChunkType, + getChunkDoc(operationContext(), existingChunkType.getMin(), collEpoch2, collTimestamp2)); } TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoChunkWithMatchingMinKeyFoundReturnsSuccess) { - const auto requestedChunkType = - generateChunkType(kNss, - ChunkVersion(10, 2, OID::gen(), boost::none /* timestamp */), - ShardId(_shardName), - BSON("a" << 1), - BSON("a" << 10)); + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; + + const auto requestedChunkType = generateChunkType(kNss, + ChunkVersion(10, 2, collEpoch, collTimestamp), + ShardId(_shardName), + BSON("a" << 1), + BSON("a" << 10)); ChunkType existingChunkType = requestedChunkType; // Min key is different. @@ -147,17 +155,20 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoChunkWithMatchingMinKeyFoundRetu requestedChunkType.getMax(), requestedChunkType.getVersion()); - assertChunkHasNotChanged(existingChunkType, - getChunkDoc(operationContext(), existingChunkType.getMin())); + assertChunkHasNotChanged( + existingChunkType, + getChunkDoc(operationContext(), existingChunkType.getMin(), collEpoch, collTimestamp)); } TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoChunkWithMatchingMaxKeyFoundReturnsSuccess) { - const auto requestedChunkType = - generateChunkType(kNss, - ChunkVersion(10, 2, OID::gen(), boost::none /* timestamp */), - ShardId(_shardName), - BSON("a" << 1), - BSON("a" << 10)); + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; + + const auto requestedChunkType = generateChunkType(kNss, + ChunkVersion(10, 2, collEpoch, collTimestamp), + ShardId(_shardName), + BSON("a" << 1), + BSON("a" << 10)); ChunkType existingChunkType = requestedChunkType; // Max key is different. @@ -170,27 +181,28 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoChunkWithMatchingMaxKeyFoundRetu requestedChunkType.getMax(), requestedChunkType.getVersion()); - assertChunkHasNotChanged(existingChunkType, - getChunkDoc(operationContext(), existingChunkType.getMin())); + assertChunkHasNotChanged( + existingChunkType, + getChunkDoc(operationContext(), existingChunkType.getMin(), collEpoch, collTimestamp)); } TEST_F(EnsureChunkVersionIsGreaterThanTest, IfChunkMatchingRequestedChunkFoundBumpsChunkVersionAndReturnsSuccess) { - const auto epoch = OID::gen(); - const auto requestedChunkType = - generateChunkType(kNss, - ChunkVersion(10, 2, epoch, boost::none /* timestamp */), - ShardId(_shardName), - BSON("a" << 1), - BSON("a" << 10)); + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; + + const auto requestedChunkType = generateChunkType(kNss, + ChunkVersion(10, 2, collEpoch, collTimestamp), + ShardId(_shardName), + BSON("a" << 1), + BSON("a" << 10)); const auto existingChunkType = requestedChunkType; - const auto highestChunkType = - generateChunkType(kNss, - ChunkVersion(20, 3, epoch, boost::none /* timestamp */), - ShardId("shard0001"), - BSON("a" << 11), - BSON("a" << 20)); + const auto highestChunkType = generateChunkType(kNss, + ChunkVersion(20, 3, collEpoch, collTimestamp), + ShardId("shard0001"), + BSON("a" << 11), + BSON("a" << 20)); setupCollection(kNss, kKeyPattern, {existingChunkType, highestChunkType}); ShardingCatalogManager::get(operationContext()) @@ -199,27 +211,27 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest, requestedChunkType.getMax(), requestedChunkType.getVersion()); - assertChunkVersionWasBumpedTo(existingChunkType, - getChunkDoc(operationContext(), existingChunkType.getMin()), - ChunkVersion(highestChunkType.getVersion().majorVersion() + 1, - 0, - epoch, - boost::none /* timestamp */)); + assertChunkVersionWasBumpedTo( + existingChunkType, + getChunkDoc(operationContext(), existingChunkType.getMin(), collEpoch, collTimestamp), + ChunkVersion( + highestChunkType.getVersion().majorVersion() + 1, 0, collEpoch, collTimestamp)); } TEST_F( EnsureChunkVersionIsGreaterThanTest, IfChunkMatchingRequestedChunkFoundAndHasHigherChunkVersionReturnsSuccessWithoutBumpingChunkVersion) { - const auto epoch = OID::gen(); - const auto requestedChunkType = - generateChunkType(kNss, - ChunkVersion(10, 2, epoch, boost::none /* timestamp */), - ShardId(_shardName), - BSON("a" << 1), - BSON("a" << 10)); + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; + + const auto requestedChunkType = generateChunkType(kNss, + ChunkVersion(10, 2, collEpoch, collTimestamp), + ShardId(_shardName), + BSON("a" << 1), + BSON("a" << 10)); ChunkType existingChunkType = requestedChunkType; - existingChunkType.setVersion(ChunkVersion(11, 1, epoch, boost::none /* timestamp */)); + existingChunkType.setVersion(ChunkVersion(11, 1, collEpoch, collTimestamp)); setupCollection(kNss, kKeyPattern, {existingChunkType}); ShardingCatalogManager::get(operationContext()) @@ -228,8 +240,9 @@ TEST_F( requestedChunkType.getMax(), requestedChunkType.getVersion()); - assertChunkHasNotChanged(existingChunkType, - getChunkDoc(operationContext(), existingChunkType.getMin())); + assertChunkHasNotChanged( + existingChunkType, + getChunkDoc(operationContext(), existingChunkType.getMin(), collEpoch, collTimestamp)); } } // namespace diff --git a/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp index 1a1c476b37d..04f002fb135 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp @@ -121,7 +121,8 @@ TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceed) { ASSERT_EQ(1u, chunksVector.size()); // MergedChunk should have range [chunkMin, chunkMax] - auto mergedChunk = uassertStatusOK(ChunkType::fromConfigBSON(chunksVector.front())); + auto mergedChunk = uassertStatusOK(ChunkType::fromConfigBSON( + chunksVector.front(), collVersion.epoch(), collVersion.getTimestamp())); ASSERT_BSONOBJ_EQ(chunkMin, mergedChunk.getMin()); ASSERT_BSONOBJ_EQ(chunkMax, mergedChunk.getMax()); @@ -134,11 +135,14 @@ TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceed) { } TEST_F(MergeChunkTest, MergeSeveralChunksCorrectlyShouldSucceed) { + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; + ChunkType chunk; chunk.setName(OID::gen()); chunk.setNS(kNamespace); - auto origVersion = ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */); + auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp); chunk.setVersion(origVersion); chunk.setShard(ShardId("shard0000")); @@ -192,7 +196,8 @@ TEST_F(MergeChunkTest, MergeSeveralChunksCorrectlyShouldSucceed) { ASSERT_EQ(1u, chunksVector.size()); // MergedChunk should have range [chunkMin, chunkMax] - auto mergedChunk = uassertStatusOK(ChunkType::fromConfigBSON(chunksVector.front())); + auto mergedChunk = + uassertStatusOK(ChunkType::fromConfigBSON(chunksVector.front(), collEpoch, collTimestamp)); ASSERT_BSONOBJ_EQ(chunkMin, mergedChunk.getMin()); ASSERT_BSONOBJ_EQ(chunkMax, mergedChunk.getMax()); @@ -208,14 +213,16 @@ TEST_F(MergeChunkTest, MergeSeveralChunksCorrectlyShouldSucceed) { } TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersion) { + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; + ChunkType chunk, otherChunk; chunk.setName(OID::gen()); chunk.setNS(kNamespace); otherChunk.setName(OID::gen()); otherChunk.setNS(kNamespace); - auto collEpoch = OID::gen(); - auto origVersion = ChunkVersion(1, 2, collEpoch, boost::none /* timestamp */); + auto origVersion = ChunkVersion(1, 2, collEpoch, collTimestamp); chunk.setVersion(origVersion); chunk.setShard(ShardId("shard0000")); @@ -270,7 +277,8 @@ TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersion) { ASSERT_EQ(2u, chunksVector.size()); // MergedChunk should have range [chunkMin, chunkMax] - auto mergedChunk = uassertStatusOK(ChunkType::fromConfigBSON(chunksVector.front())); + auto mergedChunk = + uassertStatusOK(ChunkType::fromConfigBSON(chunksVector.front(), collEpoch, collTimestamp)); ASSERT_BSONOBJ_EQ(chunkMin, mergedChunk.getMin()); ASSERT_BSONOBJ_EQ(chunkMax, mergedChunk.getMax()); @@ -286,11 +294,14 @@ TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersion) { } TEST_F(MergeChunkTest, MergeLeavesOtherChunksAlone) { + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; + ChunkType chunk; chunk.setName(OID::gen()); chunk.setNS(kNamespace); - auto origVersion = ChunkVersion(1, 2, OID::gen(), boost::none /* timestamp */); + auto origVersion = ChunkVersion(1, 2, collEpoch, collTimestamp); chunk.setVersion(origVersion); chunk.setShard(ShardId("shard0000")); @@ -344,7 +355,8 @@ TEST_F(MergeChunkTest, MergeLeavesOtherChunksAlone) { ASSERT_EQ(2u, chunksVector.size()); // MergedChunk should have range [chunkMin, chunkMax] - auto mergedChunk = uassertStatusOK(ChunkType::fromConfigBSON(chunksVector.front())); + auto mergedChunk = + uassertStatusOK(ChunkType::fromConfigBSON(chunksVector.front(), collEpoch, collTimestamp)); ASSERT_BSONOBJ_EQ(chunkMin, mergedChunk.getMin()); ASSERT_BSONOBJ_EQ(chunkMax, mergedChunk.getMax()); @@ -355,7 +367,8 @@ TEST_F(MergeChunkTest, MergeLeavesOtherChunksAlone) { } // OtherChunk should have been left alone - auto foundOtherChunk = uassertStatusOK(ChunkType::fromConfigBSON(chunksVector.back())); + auto foundOtherChunk = + uassertStatusOK(ChunkType::fromConfigBSON(chunksVector.back(), collEpoch, collTimestamp)); ASSERT_BSONOBJ_EQ(otherChunk.getMin(), foundOtherChunk.getMin()); ASSERT_BSONOBJ_EQ(otherChunk.getMax(), foundOtherChunk.getMax()); } @@ -435,11 +448,14 @@ TEST_F(MergeChunkTest, NonMatchingEpochsOfChunkAndRequestErrors) { } TEST_F(MergeChunkTest, MergeAlreadyHappenedSucceeds) { + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; + ChunkType chunk; chunk.setName(OID::gen()); chunk.setNS(kNamespace); - auto origVersion = ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */); + auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp); chunk.setVersion(origVersion); chunk.setShard(ShardId("shard0000")); @@ -493,7 +509,8 @@ TEST_F(MergeChunkTest, MergeAlreadyHappenedSucceeds) { ASSERT_EQ(1u, chunksVector.size()); // MergedChunk should have range [chunkMin, chunkMax] - ChunkType foundChunk = uassertStatusOK(ChunkType::fromConfigBSON(chunksVector.front())); + ChunkType foundChunk = + uassertStatusOK(ChunkType::fromConfigBSON(chunksVector.front(), collEpoch, collTimestamp)); ASSERT_BSONOBJ_EQ(mergedChunk.toConfigBSON(), foundChunk.toConfigBSON()); } @@ -543,11 +560,14 @@ TEST_F(MergeChunkTest, ChunkBoundariesOutOfOrderFails) { } TEST_F(MergeChunkTest, MergingChunksWithDollarPrefixShouldSucceed) { + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; + ChunkType chunk1; chunk1.setName(OID::gen()); chunk1.setNS(kNamespace); - auto origVersion = ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */); + auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp); chunk1.setVersion(origVersion); chunk1.setShard(ShardId("shard0000")); @@ -600,7 +620,8 @@ TEST_F(MergeChunkTest, MergingChunksWithDollarPrefixShouldSucceed) { ASSERT_EQ(1u, chunksVector.size()); // MergedChunk should have range [chunkMin, chunkMax] - auto mergedChunk = uassertStatusOK(ChunkType::fromConfigBSON(chunksVector.front())); + auto mergedChunk = + uassertStatusOK(ChunkType::fromConfigBSON(chunksVector.front(), collEpoch, collTimestamp)); ASSERT_BSONOBJ_EQ(chunkMin, mergedChunk.getMin()); ASSERT_BSONOBJ_EQ(chunkMax, mergedChunk.getMax()); diff --git a/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp index be9997779a7..25d713cc536 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp @@ -55,11 +55,14 @@ protected: }; TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) { + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; + ChunkType chunk; chunk.setName(OID::gen()); chunk.setNS(kNamespace); - auto origVersion = ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */); + auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp); chunk.setVersion(origVersion); chunk.setShard(ShardId(_shardName)); @@ -78,7 +81,7 @@ TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) { auto versions = assertGet(ShardingCatalogManager::get(operationContext()) ->commitChunkSplit(operationContext(), kNamespace, - origVersion.epoch(), + collEpoch, ChunkRange(chunkMin, chunkMax), splitPoints, "shard0000")); @@ -89,15 +92,13 @@ TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) { ASSERT_EQ(collVersion, shardVersion); // Check for increment on mergedChunk's minor version - auto expectedShardVersion = ChunkVersion(origVersion.majorVersion(), - origVersion.minorVersion() + 2, - origVersion.epoch(), - origVersion.getTimestamp()); + auto expectedShardVersion = ChunkVersion( + origVersion.majorVersion(), origVersion.minorVersion() + 2, collEpoch, collTimestamp); ASSERT_EQ(expectedShardVersion, shardVersion); ASSERT_EQ(shardVersion, collVersion); // First chunkDoc should have range [chunkMin, chunkSplitPoint] - auto chunkDocStatus = getChunkDoc(operationContext(), chunkMin); + auto chunkDocStatus = getChunkDoc(operationContext(), chunkMin, collEpoch, collTimestamp); ASSERT_OK(chunkDocStatus.getStatus()); auto chunkDoc = chunkDocStatus.getValue(); @@ -111,7 +112,8 @@ TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) { ASSERT_EQ(2UL, chunkDoc.getHistory().size()); // Second chunkDoc should have range [chunkSplitPoint, chunkMax] - auto otherChunkDocStatus = getChunkDoc(operationContext(), chunkSplitPoint); + auto otherChunkDocStatus = + getChunkDoc(operationContext(), chunkSplitPoint, collEpoch, collTimestamp); ASSERT_OK(otherChunkDocStatus.getStatus()); auto otherChunkDoc = otherChunkDocStatus.getValue(); @@ -129,11 +131,14 @@ TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) { } TEST_F(SplitChunkTest, MultipleSplitsOnExistingChunkShouldSucceed) { + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; + ChunkType chunk; chunk.setName(OID::gen()); chunk.setNS(kNamespace); - auto origVersion = ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */); + auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp); chunk.setVersion(origVersion); chunk.setShard(ShardId(_shardName)); @@ -153,13 +158,13 @@ TEST_F(SplitChunkTest, MultipleSplitsOnExistingChunkShouldSucceed) { ASSERT_OK(ShardingCatalogManager::get(operationContext()) ->commitChunkSplit(operationContext(), kNamespace, - origVersion.epoch(), + collEpoch, ChunkRange(chunkMin, chunkMax), splitPoints, "shard0000")); // First chunkDoc should have range [chunkMin, chunkSplitPoint] - auto chunkDocStatus = getChunkDoc(operationContext(), chunkMin); + auto chunkDocStatus = getChunkDoc(operationContext(), chunkMin, collEpoch, collTimestamp); ASSERT_OK(chunkDocStatus.getStatus()); auto chunkDoc = chunkDocStatus.getValue(); @@ -173,7 +178,8 @@ TEST_F(SplitChunkTest, MultipleSplitsOnExistingChunkShouldSucceed) { ASSERT_EQ(2UL, chunkDoc.getHistory().size()); // Second chunkDoc should have range [chunkSplitPoint, chunkSplitPoint2] - auto midChunkDocStatus = getChunkDoc(operationContext(), chunkSplitPoint); + auto midChunkDocStatus = + getChunkDoc(operationContext(), chunkSplitPoint, collEpoch, collTimestamp); ASSERT_OK(midChunkDocStatus.getStatus()); auto midChunkDoc = midChunkDocStatus.getValue(); @@ -187,7 +193,8 @@ TEST_F(SplitChunkTest, MultipleSplitsOnExistingChunkShouldSucceed) { ASSERT_EQ(2UL, midChunkDoc.getHistory().size()); // Third chunkDoc should have range [chunkSplitPoint2, chunkMax] - auto lastChunkDocStatus = getChunkDoc(operationContext(), chunkSplitPoint2); + auto lastChunkDocStatus = + getChunkDoc(operationContext(), chunkSplitPoint2, collEpoch, collTimestamp); ASSERT_OK(lastChunkDocStatus.getStatus()); auto lastChunkDoc = lastChunkDocStatus.getValue(); @@ -206,15 +213,17 @@ TEST_F(SplitChunkTest, MultipleSplitsOnExistingChunkShouldSucceed) { } TEST_F(SplitChunkTest, NewSplitShouldClaimHighestVersion) { + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; + ChunkType chunk, chunk2; chunk.setName(OID::gen()); chunk.setNS(kNamespace); chunk2.setName(OID::gen()); chunk2.setNS(kNamespace); - auto collEpoch = OID::gen(); // set up first chunk - auto origVersion = ChunkVersion(1, 2, collEpoch, boost::none /* timestamp */); + auto origVersion = ChunkVersion(1, 2, collEpoch, collTimestamp); chunk.setVersion(origVersion); chunk.setShard(ShardId(_shardName)); @@ -228,7 +237,7 @@ TEST_F(SplitChunkTest, NewSplitShouldClaimHighestVersion) { splitPoints.push_back(chunkSplitPoint); // set up second chunk (chunk2) - auto competingVersion = ChunkVersion(2, 1, collEpoch, boost::none /* timestamp */); + auto competingVersion = ChunkVersion(2, 1, collEpoch, collTimestamp); chunk2.setVersion(competingVersion); chunk2.setShard(ShardId(_shardName)); chunk2.setMin(BSON("a" << 10)); @@ -245,7 +254,7 @@ TEST_F(SplitChunkTest, NewSplitShouldClaimHighestVersion) { "shard0000")); // First chunkDoc should have range [chunkMin, chunkSplitPoint] - auto chunkDocStatus = getChunkDoc(operationContext(), chunkMin); + auto chunkDocStatus = getChunkDoc(operationContext(), chunkMin, collEpoch, collTimestamp); ASSERT_OK(chunkDocStatus.getStatus()); auto chunkDoc = chunkDocStatus.getValue(); @@ -256,7 +265,8 @@ TEST_F(SplitChunkTest, NewSplitShouldClaimHighestVersion) { ASSERT_EQ(competingVersion.minorVersion() + 1, chunkDoc.getVersion().minorVersion()); // Second chunkDoc should have range [chunkSplitPoint, chunkMax] - auto otherChunkDocStatus = getChunkDoc(operationContext(), chunkSplitPoint); + auto otherChunkDocStatus = + getChunkDoc(operationContext(), chunkSplitPoint, collEpoch, collTimestamp); ASSERT_OK(otherChunkDocStatus.getStatus()); auto otherChunkDoc = otherChunkDocStatus.getValue(); diff --git a/src/mongo/db/s/resharding/resharding_coordinator_test.cpp b/src/mongo/db/s/resharding/resharding_coordinator_test.cpp index d235748c3e7..dcf4f56e18b 100644 --- a/src/mongo/db/s/resharding/resharding_coordinator_test.cpp +++ b/src/mongo/db/s/resharding/resharding_coordinator_test.cpp @@ -415,15 +415,19 @@ protected: ASSERT(!onDiskReshardingFields.getDonorFields()); } - void readChunkCatalogEntriesAndAssertMatchExpected(OperationContext* opCtx, - std::vector<ChunkType> expectedChunks) { + void readChunkCatalogEntriesAndAssertMatchExpected( + OperationContext* opCtx, + std::vector<ChunkType> expectedChunks, + const OID& collEpoch, + const boost::optional<Timestamp>& collTimestamp) { auto nss = expectedChunks[0].getNS(); DBDirectClient client(opCtx); std::vector<ChunkType> foundChunks; auto cursor = client.query(ChunkType::ConfigNS, Query(BSON("ns" << nss.ns()))); while (cursor->more()) { - auto d = uassertStatusOK(ChunkType::fromConfigBSON(cursor->nextSafe().getOwned())); + auto d = uassertStatusOK( + ChunkType::fromConfigBSON(cursor->nextSafe().getOwned(), collEpoch, collTimestamp)); foundChunks.push_back(d); } @@ -586,7 +590,8 @@ protected: assertStateAndCatalogEntriesMatchExpected(opCtx, expectedCoordinatorDoc, _originalEpoch); // Check that chunks and tags entries have been correctly created - readChunkCatalogEntriesAndAssertMatchExpected(opCtx, initialChunks); + readChunkCatalogEntriesAndAssertMatchExpected( + opCtx, initialChunks, _originalEpoch, _originalTimestamp); readTagCatalogEntriesAndAssertMatchExpected(opCtx, newZones); } @@ -626,7 +631,8 @@ protected: // Check that chunks and tags entries previously under the temporary namespace have been // correctly updated to the original namespace - readChunkCatalogEntriesAndAssertMatchExpected(opCtx, expectedChunks); + readChunkCatalogEntriesAndAssertMatchExpected( + opCtx, expectedChunks, _finalEpoch, _finalTimestamp); readTagCatalogEntriesAndAssertMatchExpected(opCtx, expectedZones); } @@ -679,16 +685,24 @@ protected: } void assertChunkVersionDidNotIncreaseAfterStateTransition( - const ChunkType& chunk, const ChunkVersion& collectionVersion) { - auto chunkAfterTransition = getChunkDoc(operationContext(), chunk.getMin()); + const ChunkType& chunk, + const ChunkVersion& collectionVersion, + const OID& collEpoch, + const boost::optional<Timestamp>& collTimestamp) { + auto chunkAfterTransition = + getChunkDoc(operationContext(), chunk.getMin(), collEpoch, collTimestamp); ASSERT_EQ(chunkAfterTransition.getStatus(), Status::OK()); ASSERT_EQ(chunkAfterTransition.getValue().getVersion().majorVersion(), collectionVersion.majorVersion()); } - void assertChunkVersionIncreasedAfterStateTransition(const ChunkType& chunk, - const ChunkVersion& collectionVersion) { - auto chunkAfterTransition = getChunkDoc(operationContext(), chunk.getMin()); + void assertChunkVersionIncreasedAfterStateTransition( + const ChunkType& chunk, + const ChunkVersion& collectionVersion, + const OID& collEpoch, + const boost::optional<Timestamp>& collTimestamp) { + auto chunkAfterTransition = + getChunkDoc(operationContext(), chunk.getMin(), collEpoch, collTimestamp); ASSERT_EQ(chunkAfterTransition.getStatus(), Status::OK()); ASSERT_EQ(chunkAfterTransition.getValue().getVersion().majorVersion(), collectionVersion.majorVersion() + 1); @@ -697,6 +711,8 @@ protected: NamespaceString _originalNss = NamespaceString("db.foo"); UUID _originalUUID = UUID::gen(); OID _originalEpoch = OID::gen(); + boost::optional<Timestamp> + _originalTimestamp; // TODO: SERVER-53066 Initialize it with a Timestamp. NamespaceString _tempNss = NamespaceString("db.system.resharding." + _originalUUID.toString()); UUID _reshardingUUID = UUID::gen(); @@ -759,7 +775,8 @@ TEST_F(ReshardingCoordinatorPersistenceTest, WriteInitialInfoSucceeds) { // bumped twice in 'writeInitialStateAndCatalogUpdatesExpectSuccess': once when reshardingFields // is inserted to the collection doc, and once again when the state transitions to // kPreparingToDonate. - auto donorChunkPostTransition = getChunkDoc(operationContext(), donorChunk.getMin()); + auto donorChunkPostTransition = + getChunkDoc(operationContext(), donorChunk.getMin(), _originalEpoch, _originalTimestamp); ASSERT_EQ(donorChunkPostTransition.getStatus(), Status::OK()); ASSERT_EQ(donorChunkPostTransition.getValue().getVersion().majorVersion(), collectionVersion.majorVersion() + 2); @@ -784,8 +801,10 @@ TEST_F(ReshardingCoordinatorPersistenceTest, BasicStateTransitionSucceeds) { expectedCoordinatorDoc.setState(CoordinatorStateEnum::kBlockingWrites); writeStateTransitionUpdateExpectSuccess(operationContext(), expectedCoordinatorDoc); - assertChunkVersionIncreasedAfterStateTransition(donorChunk, donorCollectionVersion); - assertChunkVersionIncreasedAfterStateTransition(recipientChunk, recipientCollectionVersion); + assertChunkVersionIncreasedAfterStateTransition( + donorChunk, donorCollectionVersion, _originalEpoch, _originalTimestamp); + assertChunkVersionIncreasedAfterStateTransition( + recipientChunk, recipientCollectionVersion, _originalEpoch, _originalTimestamp); } TEST_F(ReshardingCoordinatorPersistenceTest, StateTransitionWithFetchTimestampSucceeds) { @@ -814,8 +833,10 @@ TEST_F(ReshardingCoordinatorPersistenceTest, StateTransitionWithFetchTimestampSu }()); writeStateTransitionUpdateExpectSuccess(operationContext(), expectedCoordinatorDoc); - assertChunkVersionIncreasedAfterStateTransition(donorChunk, donorCollectionVersion); - assertChunkVersionIncreasedAfterStateTransition(recipientChunk, recipientCollectionVersion); + assertChunkVersionIncreasedAfterStateTransition( + donorChunk, donorCollectionVersion, _originalEpoch, _originalTimestamp); + assertChunkVersionIncreasedAfterStateTransition( + recipientChunk, recipientCollectionVersion, _originalEpoch, _originalTimestamp); } TEST_F(ReshardingCoordinatorPersistenceTest, StateTranstionToDecisionPersistedSucceeds) { @@ -845,8 +866,8 @@ TEST_F(ReshardingCoordinatorPersistenceTest, StateTranstionToDecisionPersistedSu operationContext(), expectedCoordinatorDoc, fetchTimestamp, updatedChunks, updatedZones); // Since the epoch is changed, there is no need to bump the chunk versions with the transition. - assertChunkVersionDidNotIncreaseAfterStateTransition(recipientChunk, - recipientChunk.getVersion()); + assertChunkVersionDidNotIncreaseAfterStateTransition( + recipientChunk, recipientChunk.getVersion(), _finalEpoch, _finalTimestamp); } TEST_F(ReshardingCoordinatorPersistenceTest, StateTransitionToErrorSucceeds) { @@ -868,7 +889,8 @@ TEST_F(ReshardingCoordinatorPersistenceTest, StateTransitionToDoneSucceeds) { auto collectionVersion = finalChunk.getVersion(); removeCoordinatorDocAndReshardingFieldsExpectSuccess(operationContext(), coordinatorDoc); - assertChunkVersionIncreasedAfterStateTransition(finalChunk, collectionVersion); + assertChunkVersionIncreasedAfterStateTransition( + finalChunk, collectionVersion, _finalEpoch, _finalTimestamp); } TEST_F(ReshardingCoordinatorPersistenceTest, StateTransitionWhenCoordinatorDocDoesNotExistFails) { diff --git a/src/mongo/s/catalog/sharding_catalog_client.h b/src/mongo/s/catalog/sharding_catalog_client.h index 6866f2561ef..f33b89e188b 100644 --- a/src/mongo/s/catalog/sharding_catalog_client.h +++ b/src/mongo/s/catalog/sharding_catalog_client.h @@ -165,7 +165,10 @@ public: * @param optime an out parameter that will contain the opTime of the config server. * Can be null. Note that chunks can be fetched in multiple batches and each batch * can have a unique opTime. This opTime will be the one from the last batch. + * @param epoch epoch associated to the collection, needed to build the chunks. + * @param timestamp timestamp associated to the collection, needed to build the chunks. * @param readConcern The readConcern to use while querying for chunks. + * * Returns a vector of ChunkTypes, or a !OK status if an error occurs. */ @@ -175,6 +178,8 @@ public: const BSONObj& sort, boost::optional<int> limit, repl::OpTime* opTime, + const OID& epoch, + const boost::optional<Timestamp>& timestamp, repl::ReadConcernLevel readConcern, const boost::optional<BSONObj>& hint = boost::none) = 0; diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp index 27ad059f532..9b36e63d9ce 100644 --- a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp +++ b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp @@ -683,6 +683,8 @@ StatusWith<std::vector<ChunkType>> ShardingCatalogClientImpl::getChunks( const BSONObj& sort, boost::optional<int> limit, OpTime* opTime, + const OID& epoch, + const boost::optional<Timestamp>& timestamp, repl::ReadConcernLevel readConcern, const boost::optional<BSONObj>& hint) { invariant(serverGlobalParams.clusterRole == ClusterRole::ConfigServer || @@ -700,7 +702,7 @@ StatusWith<std::vector<ChunkType>> ShardingCatalogClientImpl::getChunks( std::vector<ChunkType> chunks; for (const BSONObj& obj : chunkDocsOpTimePair.value) { - auto chunkRes = ChunkType::fromConfigBSON(obj); + auto chunkRes = ChunkType::fromConfigBSON(obj, epoch, timestamp); if (!chunkRes.isOK()) { return chunkRes.getStatus().withContext(stream() << "Failed to parse chunk with id " << obj[ChunkType::name()]); @@ -755,34 +757,50 @@ std::pair<CollectionType, std::vector<ChunkType>> ShardingCatalogClientImpl::get stream() << "Collection " << nss.ns() << " not found", !aggResult.empty()); - boost::optional<CollectionType> coll; - std::vector<ChunkType> chunks; - chunks.reserve(aggResult.size() - 1); // The aggregation may return the config.collections document anywhere between the // config.chunks documents. - for (const auto& elem : aggResult) { - const auto chunkElem = elem.getField("chunks"); - if (chunkElem) { - auto chunkRes = uassertStatusOK(ChunkType::fromConfigBSON(chunkElem.Obj())); - chunks.emplace_back(std::move(chunkRes)); - } else { - uassert(5520100, - "Found more than one 'collections' documents in aggregation response", - !coll); - coll.emplace(elem); - - uassert(ErrorCodes::NamespaceNotFound, - str::stream() << "Collection " << nss.ns() << " is dropped.", - !coll->getDropped()); + // 1st: look for the collection since it is needed to properly build the chunks. + boost::optional<CollectionType> coll; + { + for (const auto& elem : aggResult) { + const auto chunkElem = elem.getField("chunks"); + if (!chunkElem) { + coll.emplace(elem); + break; + } } + uassert(5520101, "'collections' document not found in aggregation response", coll); + + uassert(ErrorCodes::NamespaceNotFound, + str::stream() << "Collection " << nss.ns() << " is dropped.", + !coll->getDropped()); } - uassert(5520101, "'collections' document not found in aggregation response", coll); + // 2nd: Traverse all the elements and build the chunks. + std::vector<ChunkType> chunks; + { + chunks.reserve(aggResult.size() - 1); + bool foundCollection = false; + for (const auto& elem : aggResult) { + const auto chunkElem = elem.getField("chunks"); + if (chunkElem) { + auto chunkRes = uassertStatusOK(ChunkType::fromConfigBSON( + chunkElem.Obj(), coll->getEpoch(), coll->getTimestamp())); + chunks.emplace_back(std::move(chunkRes)); + } else { + uassert(5520100, + "Found more than one 'collections' documents in aggregation response", + !foundCollection); + foundCollection = true; + } + } + + uassert(ErrorCodes::ConflictingOperationInProgress, + stream() << "No chunks were found for the collection " << nss, + !chunks.empty()); + } - uassert(ErrorCodes::ConflictingOperationInProgress, - stream() << "No chunks were found for the collection " << nss, - !chunks.empty()); return {std::move(*coll), std::move(chunks)}; }; @@ -1009,7 +1027,14 @@ Status ShardingCatalogClientImpl::applyChunkOpsDeprecated(OperationContext* opCt } else { query.append(ChunkType::ns(), nsOrUUID.nss()->ns()); } - auto chunkWithStatus = getChunks(opCtx, query.obj(), BSONObj(), 1, nullptr, readConcern); + auto chunkWithStatus = getChunks(opCtx, + query.obj(), + BSONObj(), + 1, + nullptr, + lastChunkVersion.epoch(), + lastChunkVersion.getTimestamp(), + readConcern); if (!chunkWithStatus.isOK()) { errMsg = str::stream() diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.h b/src/mongo/s/catalog/sharding_catalog_client_impl.h index b1e399372d4..1526dfd0302 100644 --- a/src/mongo/s/catalog/sharding_catalog_client_impl.h +++ b/src/mongo/s/catalog/sharding_catalog_client_impl.h @@ -91,6 +91,8 @@ public: const BSONObj& sort, boost::optional<int> limit, repl::OpTime* opTime, + const OID& epoch, + const boost::optional<Timestamp>& timestamp, repl::ReadConcernLevel readConcern, const boost::optional<BSONObj>& hint = boost::none) override; diff --git a/src/mongo/s/catalog/sharding_catalog_client_mock.cpp b/src/mongo/s/catalog/sharding_catalog_client_mock.cpp index ea9696f30ff..90a1661b481 100644 --- a/src/mongo/s/catalog/sharding_catalog_client_mock.cpp +++ b/src/mongo/s/catalog/sharding_catalog_client_mock.cpp @@ -82,6 +82,8 @@ StatusWith<std::vector<ChunkType>> ShardingCatalogClientMock::getChunks( const BSONObj& sort, boost::optional<int> limit, repl::OpTime* opTime, + const OID& epoch, + const boost::optional<Timestamp>& timestamp, repl::ReadConcernLevel readConcern, const boost::optional<BSONObj>& hint) { return {ErrorCodes::InternalError, "Method not implemented"}; diff --git a/src/mongo/s/catalog/sharding_catalog_client_mock.h b/src/mongo/s/catalog/sharding_catalog_client_mock.h index 2b7ade924be..bc58f1fd84e 100644 --- a/src/mongo/s/catalog/sharding_catalog_client_mock.h +++ b/src/mongo/s/catalog/sharding_catalog_client_mock.h @@ -67,6 +67,8 @@ public: const BSONObj& sort, boost::optional<int> limit, repl::OpTime* opTime, + const OID& epoch, + const boost::optional<Timestamp>& timestamp, repl::ReadConcernLevel readConcern, const boost::optional<BSONObj>& hint) override; diff --git a/src/mongo/s/catalog/sharding_catalog_client_test.cpp b/src/mongo/s/catalog/sharding_catalog_client_test.cpp index 8051ad28f35..eaffa9445d9 100644 --- a/src/mongo/s/catalog/sharding_catalog_client_test.cpp +++ b/src/mongo/s/catalog/sharding_catalog_client_test.cpp @@ -351,14 +351,15 @@ TEST_F(ShardingCatalogClientTest, GetAllShardsWithInvalidShard) { TEST_F(ShardingCatalogClientTest, GetChunksForNSWithSortAndLimit) { configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1")); - OID oid = OID::gen(); + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; ChunkType chunkA; chunkA.setName(OID::gen()); chunkA.setNS(kNamespace); chunkA.setMin(BSON("a" << 1)); chunkA.setMax(BSON("a" << 100)); - chunkA.setVersion({1, 2, oid, boost::none /* timestamp */}); + chunkA.setVersion({1, 2, collEpoch, collTimestamp}); chunkA.setShard(ShardId("shard0000")); ChunkType chunkB; @@ -366,10 +367,10 @@ TEST_F(ShardingCatalogClientTest, GetChunksForNSWithSortAndLimit) { chunkB.setNS(kNamespace); chunkB.setMin(BSON("a" << 100)); chunkB.setMax(BSON("a" << 200)); - chunkB.setVersion({3, 4, oid, boost::none /* timestamp */}); + chunkB.setVersion({3, 4, collEpoch, collTimestamp}); chunkB.setShard(ShardId("shard0001")); - ChunkVersion queryChunkVersion({1, 2, oid, boost::none /* timestamp */}); + ChunkVersion queryChunkVersion({1, 2, collEpoch, collTimestamp}); const BSONObj chunksQuery( BSON(ChunkType::ns("TestDB.TestColl") @@ -378,7 +379,7 @@ TEST_F(ShardingCatalogClientTest, GetChunksForNSWithSortAndLimit) { const OpTime newOpTime(Timestamp(7, 6), 5); - auto future = launchAsync([this, &chunksQuery, newOpTime] { + auto future = launchAsync([this, &chunksQuery, newOpTime, &collEpoch, &collTimestamp] { OpTime opTime; const auto chunks = @@ -387,6 +388,8 @@ TEST_F(ShardingCatalogClientTest, GetChunksForNSWithSortAndLimit) { BSON(ChunkType::lastmod() << -1), 1, &opTime, + collEpoch, + collTimestamp, repl::ReadConcernLevel::kMajorityReadConcern)); ASSERT_EQ(2U, chunks.size()); ASSERT_EQ(newOpTime, opTime); @@ -433,20 +436,25 @@ TEST_F(ShardingCatalogClientTest, GetChunksForNSWithSortAndLimit) { TEST_F(ShardingCatalogClientTest, GetChunksForNSNoSortNoLimit) { configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1")); - ChunkVersion queryChunkVersion({1, 2, OID::gen(), boost::none /* timestamp */}); + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; + + ChunkVersion queryChunkVersion({1, 2, collEpoch, collTimestamp}); const BSONObj chunksQuery( BSON(ChunkType::ns("TestDB.TestColl") << ChunkType::lastmod() << BSON("$gte" << static_cast<long long>(queryChunkVersion.toLong())))); - auto future = launchAsync([this, &chunksQuery] { + auto future = launchAsync([this, &chunksQuery, &collEpoch, &collTimestamp] { const auto chunks = assertGet(catalogClient()->getChunks(operationContext(), chunksQuery, BSONObj(), boost::none, nullptr, + collEpoch, + collTimestamp, repl::ReadConcernLevel::kMajorityReadConcern)); ASSERT_EQ(0U, chunks.size()); @@ -484,13 +492,15 @@ TEST_F(ShardingCatalogClientTest, GetChunksForNSInvalidChunk) { << ChunkType::lastmod() << BSON("$gte" << static_cast<long long>(queryChunkVersion.toLong())))); - auto future = launchAsync([this, &chunksQuery] { + auto future = launchAsync([this, &chunksQuery, &queryChunkVersion] { const auto swChunks = catalogClient()->getChunks(operationContext(), chunksQuery, BSONObj(), boost::none, nullptr, + queryChunkVersion.epoch(), + queryChunkVersion.getTimestamp(), repl::ReadConcernLevel::kMajorityReadConcern); ASSERT_EQUALS(ErrorCodes::NoSuchKey, swChunks.getStatus()); diff --git a/src/mongo/s/catalog/type_chunk.cpp b/src/mongo/s/catalog/type_chunk.cpp index ee72ab44f1b..3798e7d5c32 100644 --- a/src/mongo/s/catalog/type_chunk.cpp +++ b/src/mongo/s/catalog/type_chunk.cpp @@ -343,7 +343,9 @@ StatusWith<ChunkType> ChunkType::parseFromConfigBSONCommand(const BSONObj& sourc return chunk; } -StatusWith<ChunkType> ChunkType::fromConfigBSON(const BSONObj& source) { +StatusWith<ChunkType> ChunkType::fromConfigBSON(const BSONObj& source, + const OID& epoch, + const boost::optional<Timestamp>& timestamp) { StatusWith<ChunkType> chunkStatus = parseFromConfigBSONCommand(source); if (!chunkStatus.isOK()) { return chunkStatus.getStatus(); diff --git a/src/mongo/s/catalog/type_chunk.h b/src/mongo/s/catalog/type_chunk.h index bff0d350c8c..778d3af77aa 100644 --- a/src/mongo/s/catalog/type_chunk.h +++ b/src/mongo/s/catalog/type_chunk.h @@ -233,7 +233,9 @@ public: * ErrorCodes::NoSuchKey if the '_id' field is missing while 'fromConfigBSON' does. */ static StatusWith<ChunkType> parseFromConfigBSONCommand(const BSONObj& source); - static StatusWith<ChunkType> fromConfigBSON(const BSONObj& source); + static StatusWith<ChunkType> fromConfigBSON(const BSONObj& source, + const OID& epoch, + const boost::optional<Timestamp>& timestamp); /** * Returns the BSON representation of the entry for the config server's config.chunks diff --git a/src/mongo/s/catalog/type_chunk_test.cpp b/src/mongo/s/catalog/type_chunk_test.cpp index 38de85bba2c..0e2fe0d0eda 100644 --- a/src/mongo/s/catalog/type_chunk_test.cpp +++ b/src/mongo/s/catalog/type_chunk_test.cpp @@ -47,21 +47,24 @@ const BSONObj kMax = BSON("a" << 20); const ShardId kShard("shard0000"); TEST(ChunkType, MissingConfigRequiredFields) { - ChunkVersion chunkVersion(1, 2, OID::gen(), boost::none /* timestamp */); + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; + + ChunkVersion chunkVersion(1, 2, collEpoch, collTimestamp); BSONObj objModNS = BSON(ChunkType::name(OID::gen()) << ChunkType::min(BSON("a" << 10 << "b" << 10)) << ChunkType::max(BSON("a" << 20)) << "lastmod" << Timestamp(chunkVersion.toLong()) << "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001")); - StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(objModNS); + StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(objModNS, collEpoch, collTimestamp); ASSERT_FALSE(chunkRes.isOK()); BSONObj objModKeys = BSON(ChunkType::name(OID::gen()) << ChunkType::ns("test.mycol") << "lastmod" << Timestamp(chunkVersion.toLong()) << "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001")); - chunkRes = ChunkType::fromConfigBSON(objModKeys); + chunkRes = ChunkType::fromConfigBSON(objModKeys, collEpoch, collTimestamp); ASSERT_FALSE(chunkRes.isOK()); BSONObj objModShard = @@ -69,14 +72,14 @@ TEST(ChunkType, MissingConfigRequiredFields) { << ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 10 << "b" << 10)) << ChunkType::max(BSON("a" << 20)) << "lastmod" << Timestamp(chunkVersion.toLong()) << "lastmodEpoch" << chunkVersion.epoch()); - chunkRes = ChunkType::fromConfigBSON(objModShard); + chunkRes = ChunkType::fromConfigBSON(objModShard, collEpoch, collTimestamp); ASSERT_FALSE(chunkRes.isOK()); BSONObj objModVersion = BSON(ChunkType::name(OID::gen()) << ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 10 << "b" << 10)) << ChunkType::max(BSON("a" << 20)) << ChunkType::shard("shard0001")); - chunkRes = ChunkType::fromConfigBSON(objModVersion); + chunkRes = ChunkType::fromConfigBSON(objModVersion, collEpoch, collTimestamp); ASSERT_FALSE(chunkRes.isOK()); } @@ -130,49 +133,61 @@ TEST(ChunkType, ToFromShardBSON) { } TEST(ChunkType, MinAndMaxShardKeysDifferInNumberOfKeys) { - ChunkVersion chunkVersion(1, 2, OID::gen(), boost::none /* timestamp */); + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; + + ChunkVersion chunkVersion(1, 2, collEpoch, collTimestamp); BSONObj obj = BSON(ChunkType::name(OID::gen()) << ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 10 << "b" << 10)) << ChunkType::max(BSON("a" << 20)) << "lastmod" << Timestamp(chunkVersion.toLong()) << "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001")); - StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(obj); + StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(obj, collEpoch, collTimestamp); ASSERT_OK(chunkRes.getStatus()); ASSERT_FALSE(chunkRes.getValue().validate().isOK()); } TEST(ChunkType, MinAndMaxShardKeysDifferInKeyNames) { - ChunkVersion chunkVersion(1, 2, OID::gen(), boost::none /* timestamp */); + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; + + ChunkVersion chunkVersion(1, 2, collEpoch, collTimestamp); BSONObj obj = BSON(ChunkType::name(OID::gen()) << ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 10)) << ChunkType::max(BSON("b" << 20)) << "lastmod" << Timestamp(chunkVersion.toLong()) << "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001")); - StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(obj); + StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(obj, collEpoch, collTimestamp); ASSERT_OK(chunkRes.getStatus()); ASSERT_FALSE(chunkRes.getValue().validate().isOK()); } TEST(ChunkType, MinToMaxNotAscending) { - ChunkVersion chunkVersion(1, 2, OID::gen(), boost::none /* timestamp */); + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; + + ChunkVersion chunkVersion(1, 2, collEpoch, collTimestamp); BSONObj obj = BSON(ChunkType::name(OID::gen()) << ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 20)) << ChunkType::max(BSON("a" << 10)) << "lastmod" << Timestamp(chunkVersion.toLong()) << "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001")); - StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(obj); + StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(obj, collEpoch, collTimestamp); ASSERT_EQ(ErrorCodes::FailedToParse, chunkRes.getStatus()); } TEST(ChunkType, ToFromConfigBSON) { + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; + const auto chunkID = OID::gen(); - ChunkVersion chunkVersion(1, 2, OID::gen(), boost::none /* timestamp */); + ChunkVersion chunkVersion(1, 2, collEpoch, collTimestamp); BSONObj obj = BSON(ChunkType::name(chunkID) << ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 10)) << ChunkType::max(BSON("a" << 20)) << ChunkType::shard("shard0001") << "lastmod" << Timestamp(chunkVersion.toLong()) << "lastmodEpoch" << chunkVersion.epoch()); - StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(obj); + StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(obj, collEpoch, collTimestamp); ASSERT_OK(chunkRes.getStatus()); ChunkType chunk = chunkRes.getValue(); @@ -189,13 +204,19 @@ TEST(ChunkType, ToFromConfigBSON) { } TEST(ChunkType, BadType) { + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; + BSONObj obj = BSON(ChunkType::name() << 0); - StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(obj); + StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(obj, collEpoch, collTimestamp); ASSERT_FALSE(chunkRes.isOK()); } TEST(ChunkType, BothNsAndUUID) { - ChunkVersion chunkVersion(1, 2, OID::gen(), boost::none /* timestamp */); + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; + + ChunkVersion chunkVersion(1, 2, collEpoch, collTimestamp); BSONObj objModNS = BSON(ChunkType::name(OID::gen()) @@ -203,12 +224,15 @@ TEST(ChunkType, BothNsAndUUID) { << ChunkType::min(BSON("a" << 10 << "b" << 10)) << ChunkType::max(BSON("a" << 20)) << "lastmod" << Timestamp(chunkVersion.toLong()) << "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001")); - StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(objModNS); + StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(objModNS, collEpoch, collTimestamp); ASSERT_TRUE(chunkRes.isOK()); } TEST(ChunkType, UUIDPresentAndNsMissing) { - ChunkVersion chunkVersion(1, 2, OID::gen(), boost::none /* timestamp */); + const auto collEpoch = OID::gen(); + const auto collTimestamp = boost::none; + + ChunkVersion chunkVersion(1, 2, collEpoch, collTimestamp); BSONObj objModNS = BSON(ChunkType::name(OID::gen()) @@ -216,7 +240,7 @@ TEST(ChunkType, UUIDPresentAndNsMissing) { << ChunkType::min(BSON("a" << 10 << "b" << 10)) << ChunkType::max(BSON("a" << 20)) << "lastmod" << Timestamp(chunkVersion.toLong()) << "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001")); - StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(objModNS); + StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(objModNS, collEpoch, collTimestamp); ASSERT_TRUE(chunkRes.isOK()); } diff --git a/src/mongo/s/catalog_cache_refresh_test.cpp b/src/mongo/s/catalog_cache_refresh_test.cpp index 1a70482a348..f37436cecf9 100644 --- a/src/mongo/s/catalog_cache_refresh_test.cpp +++ b/src/mongo/s/catalog_cache_refresh_test.cpp @@ -315,7 +315,10 @@ TEST_F(CatalogCacheRefreshTest, ChunksBSONCorrupted) { {shardKeyPattern.getKeyPattern().globalMin(), BSON("_id" << 0)}, ChunkVersion(1, 0, epoch, boost::none /* timestamp */), {"0"}); - return std::vector<BSONObj>{coll.toBSON().addFields( + return std::vector<BSONObj>{/* collection */ + coll.toBSON(), + /* chunks */ + coll.toBSON().addFields( BSON("chunks" << chunk1.toConfigBSON())), BSON("BadValue" << "This value should not be in a chunk config document")}; |