diff options
author | Silvia Surroca <silvia.surroca@mongodb.com> | 2023-01-03 12:49:35 +0000 |
---|---|---|
committer | Evergreen Agent <no-reply@evergreen.mongodb.com> | 2023-01-03 13:26:03 +0000 |
commit | e2dc8a74fa2d708cc560fb79c29c60cbce4e659a (patch) | |
tree | 023c94be2ab566401f3bb123dbebb909a72e25eb | |
parent | 6d8ebdbe466580ef90af022f5946c93608d627ad (diff) | |
download | mongo-e2dc8a74fa2d708cc560fb79c29c60cbce4e659a.tar.gz |
SERVER-71479 Merging chunks must not set `validAfter` to the current wall time
7 files changed, 76 insertions, 58 deletions
diff --git a/src/mongo/db/s/config/configsvr_merge_chunks_command.cpp b/src/mongo/db/s/config/configsvr_merge_chunks_command.cpp index ea2823dcdf0..5df553b5bd3 100644 --- a/src/mongo/db/s/config/configsvr_merge_chunks_command.cpp +++ b/src/mongo/db/s/config/configsvr_merge_chunks_command.cpp @@ -94,8 +94,7 @@ public: request().getTimestamp(), request().getCollectionUUID(), request().getChunkRange(), - request().getShard(), - request().getValidAfter())); + request().getShard())); return ConfigSvrMergeResponse{ChunkVersion::fromBSONPositionalOrNewerFormat( shardAndCollVers[ChunkVersion::kShardVersionField])}; } diff --git a/src/mongo/db/s/config/sharding_catalog_manager.h b/src/mongo/db/s/config/sharding_catalog_manager.h index f6e791e0589..e6102f8b533 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager.h +++ b/src/mongo/db/s/config/sharding_catalog_manager.h @@ -271,8 +271,7 @@ public: const boost::optional<Timestamp>& timestamp, const UUID& requestCollectionUUID, const ChunkRange& chunkRange, - const ShardId& shardId, - const boost::optional<Timestamp>& validAfter); + const ShardId& shardId); /** * Updates metadata in config.chunks collection to show the given chunk in its new shard. @@ -681,7 +680,7 @@ private: const NamespaceString& nss, const UUID& collectionUUID, const ChunkVersion& mergeVersion, - const boost::optional<Timestamp>& validAfter, + const Timestamp& validAfter, const ChunkRange& chunkRange, const ShardId& shardId, std::shared_ptr<std::vector<ChunkType>> chunksToMerge); diff --git a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp index 3c02f089b4c..f9f33a065d2 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp @@ -727,11 +727,10 @@ void ShardingCatalogManager::_mergeChunksInTransaction( const NamespaceString& nss, const UUID& collectionUUID, const ChunkVersion& mergeVersion, - const boost::optional<Timestamp>& validAfter, + const Timestamp& validAfter, const ChunkRange& chunkRange, const ShardId& shardId, std::shared_ptr<std::vector<ChunkType>> chunksToMerge) { - dassert(validAfter); withTransaction( opCtx, ChunkType::ConfigNS, [&, this](OperationContext* opCtx, TxnNumber txnNumber) { // Construct the new chunk by taking `min` from the first merged chunk and `max` @@ -748,7 +747,7 @@ void ShardingCatalogManager::_mergeChunksInTransaction( mergedChunk.setVersion(mergeVersion); mergedChunk.setEstimatedSizeBytes(boost::none); - mergedChunk.setHistory({ChunkHistory(validAfter.value(), mergedChunk.getShard())}); + mergedChunk.setHistory({ChunkHistory(validAfter, mergedChunk.getShard())}); entry.setU(write_ops::UpdateModification::parseFromClassicUpdate( mergedChunk.toConfigBSON())); @@ -806,11 +805,7 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunksMerge( const boost::optional<Timestamp>& timestamp, const UUID& requestCollectionUUID, const ChunkRange& chunkRange, - const ShardId& shardId, - const boost::optional<Timestamp>& validAfter) { - if (!validAfter) { - return {ErrorCodes::IllegalOperation, "chunk operation requires validAfter timestamp"}; - } + const ShardId& shardId) { // Mark opCtx as interruptible to ensure that all reads and writes to the metadata collections // under the exclusive _kChunkOpLock happen on the same term. @@ -890,6 +885,12 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunksMerge( // 3. Prepare the data for the merge // and ensure that the retrieved list of chunks covers the whole range. + + // The `validAfter` field must always be set. If not existing, it means the chunk + // always belonged to the same shard, hence it's valid to set `0` as the time at + // which the chunk started being valid. + Timestamp validAfter{0}; + auto chunksToMerge = std::make_shared<std::vector<ChunkType>>(); chunksToMerge->reserve(shardChunksInRangeResponse.docs.size()); for (const auto& chunkDoc : shardChunksInRangeResponse.docs) { @@ -910,6 +911,15 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunksMerge( << chunkRange.toString(), chunk.getMin().woCompare(chunksToMerge->back().getMax()) == 0); } + + // Get the `validAfter` field from the most recent chunk placed on the shard + if (!chunk.getHistory().empty()) { + const auto& chunkValidAfter = chunk.getHistory().front().getValidAfter(); + if (validAfter < chunkValidAfter) { + validAfter = chunkValidAfter; + } + } + chunksToMerge->push_back(std::move(chunk)); } uassert(ErrorCodes::IllegalOperation, diff --git a/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp index d4a82abfb97..be08bede01c 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp @@ -100,6 +100,11 @@ TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceed) { auto chunk2(chunk); chunk2.setName(OID::gen()); + // set histories + chunk.setHistory({ChunkHistory{Timestamp{100, 0}, _shardId}}); + chunk2.setHistory({ChunkHistory{Timestamp{200, 0}, _shardId}}); + + // set boundaries auto chunkMin = BSON("a" << 1); auto chunkBound = BSON("a" << 5); auto chunkMax = BSON("a" << 10); @@ -112,8 +117,6 @@ TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceed) { setupCollection(_nss1, _keyPattern, {chunk, chunk2}); - Timestamp validAfter{100, 0}; - ChunkRange rangeToBeMerged(chunk.getMin(), chunk2.getMax()); auto versions = assertGet(ShardingCatalogManager::get(operationContext()) @@ -123,8 +126,7 @@ TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceed) { collTimestamp, collUuid, rangeToBeMerged, - _shardId, - validAfter)); + _shardId)); auto collVersion = ChunkVersion::fromBSONPositionalOrNewerFormat(versions["collectionVersion"]); auto shardVersion = ChunkVersion::fromBSONPositionalOrNewerFormat(versions["shardVersion"]); @@ -166,7 +168,8 @@ TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceed) { // Make sure history is there ASSERT_EQ(1UL, mergedChunk.getHistory().size()); - ASSERT_EQ(validAfter, mergedChunk.getHistory().front().getValidAfter()); + ASSERT_EQ(chunk2.getHistory().front().getValidAfter(), + mergedChunk.getHistory().front().getValidAfter()); } TEST_F(MergeChunkTest, MergeSeveralChunksCorrectlyShouldSucceed) { @@ -187,6 +190,11 @@ TEST_F(MergeChunkTest, MergeSeveralChunksCorrectlyShouldSucceed) { chunk2.setName(OID::gen()); chunk3.setName(OID::gen()); + // set histories + chunk.setHistory({ChunkHistory{Timestamp{100, 10}, _shardId}}); + chunk2.setHistory({ChunkHistory{Timestamp{200, 1}, _shardId}}); + chunk3.setHistory({ChunkHistory{Timestamp{50, 0}, _shardId}}); + auto chunkMin = BSON("a" << 1); auto chunkBound = BSON("a" << 5); auto chunkBound2 = BSON("a" << 7); @@ -204,8 +212,6 @@ TEST_F(MergeChunkTest, MergeSeveralChunksCorrectlyShouldSucceed) { setupCollection(_nss1, _keyPattern, {chunk, chunk2, chunk3}); ChunkRange rangeToBeMerged(chunk.getMin(), chunk3.getMax()); - Timestamp validAfter{100, 0}; - ASSERT_OK(ShardingCatalogManager::get(operationContext()) ->commitChunksMerge(operationContext(), _nss1, @@ -213,8 +219,7 @@ TEST_F(MergeChunkTest, MergeSeveralChunksCorrectlyShouldSucceed) { collTimestamp, collUuid, rangeToBeMerged, - _shardId, - validAfter)); + _shardId)); const auto query BSON(ChunkType::collectionUUID() << collUuid); auto findResponse = uassertStatusOK( @@ -245,7 +250,8 @@ TEST_F(MergeChunkTest, MergeSeveralChunksCorrectlyShouldSucceed) { // Make sure history is there ASSERT_EQ(1UL, mergedChunk.getHistory().size()); - ASSERT_EQ(validAfter, mergedChunk.getHistory().front().getValidAfter()); + ASSERT_EQ(chunk2.getHistory().front().getValidAfter(), + mergedChunk.getHistory().front().getValidAfter()); } TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersion) { @@ -266,6 +272,10 @@ TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersion) { auto chunk2(chunk); chunk2.setName(OID::gen()); + // set histories + chunk.setHistory({ChunkHistory{Timestamp{100, 0}, _shardId}}); + chunk2.setHistory({ChunkHistory{Timestamp{200, 0}, _shardId}}); + auto chunkMin = BSON("a" << 1); auto chunkBound = BSON("a" << 5); auto chunkMax = BSON("a" << 10); @@ -288,8 +298,6 @@ TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersion) { setupCollection(_nss1, _keyPattern, {chunk, chunk2, otherChunk}); - Timestamp validAfter{100, 0}; - ASSERT_OK(ShardingCatalogManager::get(operationContext()) ->commitChunksMerge(operationContext(), _nss1, @@ -297,8 +305,7 @@ TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersion) { collTimestamp, collUuid, rangeToBeMerged, - _shardId, - validAfter)); + _shardId)); const auto query = BSON(ChunkType::collectionUUID() << collUuid); auto findResponse = uassertStatusOK( @@ -329,7 +336,8 @@ TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersion) { // Make sure history is there ASSERT_EQ(1UL, mergedChunk.getHistory().size()); - ASSERT_EQ(validAfter, mergedChunk.getHistory().front().getValidAfter()); + ASSERT_EQ(chunk2.getHistory().front().getValidAfter(), + mergedChunk.getHistory().front().getValidAfter()); } TEST_F(MergeChunkTest, MergeLeavesOtherChunksAlone) { @@ -349,6 +357,10 @@ TEST_F(MergeChunkTest, MergeLeavesOtherChunksAlone) { auto chunk2(chunk); chunk2.setName(OID::gen()); + // set histories + chunk.setHistory({ChunkHistory{Timestamp{100, 5}, shardId}}); + chunk2.setHistory({ChunkHistory{Timestamp{200, 1}, shardId}}); + auto chunkMin = BSON("a" << 1); auto chunkBound = BSON("a" << 5); auto chunkMax = BSON("a" << 10); @@ -379,8 +391,7 @@ TEST_F(MergeChunkTest, MergeLeavesOtherChunksAlone) { collTimestamp, collUuid, rangeToBeMerged, - shardId, - validAfter)); + shardId)); const auto query = BSON(ChunkType::collectionUUID() << collUuid); auto findResponse = uassertStatusOK( getConfigShard()->exhaustiveFindOnConfig(operationContext(), @@ -423,11 +434,16 @@ TEST_F(MergeChunkTest, NonExistingNamespace) { chunk.setCollectionUUID(UUID::gen()); auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp); + chunk.setShard(_shardId); chunk.setVersion(origVersion); // Construct chunk to be merged auto chunk2(chunk); + // set history + chunk.setHistory({ChunkHistory{Timestamp{100, 0}, _shardId}}); + chunk2.setHistory({ChunkHistory{Timestamp{200, 0}, _shardId}}); + auto chunkMin = BSON("a" << 1); auto chunkBound = BSON("a" << 5); auto chunkMax = BSON("a" << 10); @@ -442,8 +458,6 @@ TEST_F(MergeChunkTest, NonExistingNamespace) { setupCollection(_nss1, _keyPattern, {chunk, chunk2}); - Timestamp validAfter{1}; - ASSERT_THROWS(ShardingCatalogManager::get(operationContext()) ->commitChunksMerge(operationContext(), NamespaceString("TestDB.NonExistingColl"), @@ -451,8 +465,7 @@ TEST_F(MergeChunkTest, NonExistingNamespace) { collTimestamp, collUuidAtRequest, rangeToBeMerged, - _shardId, - validAfter), + _shardId), DBException); } @@ -471,6 +484,10 @@ TEST_F(MergeChunkTest, NonMatchingUUIDsOfChunkAndRequestErrors) { // Construct chunk to be merged auto chunk2(chunk); + // set histories + chunk.setHistory({ChunkHistory{Timestamp{100, 0}, _shardId}}); + chunk2.setHistory({ChunkHistory{Timestamp{200, 0}, _shardId}}); + auto chunkMin = BSON("a" << 1); auto chunkBound = BSON("a" << 5); auto chunkMax = BSON("a" << 10); @@ -485,8 +502,6 @@ TEST_F(MergeChunkTest, NonMatchingUUIDsOfChunkAndRequestErrors) { setupCollection(_nss1, _keyPattern, {chunk, chunk2}); - Timestamp validAfter{1}; - auto mergeStatus = ShardingCatalogManager::get(operationContext()) ->commitChunksMerge(operationContext(), _nss1, @@ -494,8 +509,7 @@ TEST_F(MergeChunkTest, NonMatchingUUIDsOfChunkAndRequestErrors) { collTimestamp, requestUuid, rangeToBeMerged, - _shardId, - validAfter); + _shardId); ASSERT_EQ(ErrorCodes::InvalidUUID, mergeStatus); } @@ -519,12 +533,11 @@ TEST_F(MergeChunkTest, MergeAlreadyHappenedSucceeds) { mergedChunk.setName(OID::gen()); mergedChunk.setCollectionUUID(collUuid); mergedChunk.setShard(_shardId); + mergedChunk.setHistory({ChunkHistory{Timestamp{100, 0}, _shardId}}); setupCollection(_nss1, _keyPattern, {mergedChunk}); - Timestamp validAfter{1}; - ASSERT_OK(ShardingCatalogManager::get(operationContext()) ->commitChunksMerge(operationContext(), _nss1, @@ -532,8 +545,7 @@ TEST_F(MergeChunkTest, MergeAlreadyHappenedSucceeds) { collTimestamp, collUuid, rangeToBeMerged, - _shardId, - validAfter)); + _shardId)); // Verify that no change to config.chunks happened. const auto query = BSON(ChunkType::collectionUUID() << collUuid); @@ -580,6 +592,11 @@ TEST_F(MergeChunkTest, MergingChunksWithDollarPrefixShouldSucceed) { auto chunkBound2 = BSON("a" << BSON("$mixKey" << 1)); auto chunkMax = BSON("a" << kMaxBSONKey); + // set histories + chunk1.setHistory({ChunkHistory{Timestamp{100, 9}, _shardId}}); + chunk2.setHistory({ChunkHistory{Timestamp{200, 5}, _shardId}}); + chunk3.setHistory({ChunkHistory{Timestamp{156, 1}, _shardId}}); + // first chunk boundaries chunk1.setMin(chunkMin); chunk1.setMax(chunkBound1); @@ -594,7 +611,6 @@ TEST_F(MergeChunkTest, MergingChunksWithDollarPrefixShouldSucceed) { // Record chunk boundaries for passing into commitChunksMerge ChunkRange rangeToBeMerged(chunk1.getMin(), chunk3.getMax()); - Timestamp validAfter{100, 0}; ASSERT_OK(ShardingCatalogManager::get(operationContext()) ->commitChunksMerge(operationContext(), @@ -603,8 +619,7 @@ TEST_F(MergeChunkTest, MergingChunksWithDollarPrefixShouldSucceed) { collTimestamp, collUuid, rangeToBeMerged, - _shardId, - validAfter)); + _shardId)); const auto query = BSON(ChunkType::collectionUUID() << collUuid); auto findResponse = uassertStatusOK( @@ -635,7 +650,8 @@ TEST_F(MergeChunkTest, MergingChunksWithDollarPrefixShouldSucceed) { // Make sure history is there ASSERT_EQ(1UL, mergedChunk.getHistory().size()); - ASSERT_EQ(validAfter, mergedChunk.getHistory().front().getValidAfter()); + ASSERT_EQ(chunk2.getHistory().front().getValidAfter(), + mergedChunk.getHistory().front().getValidAfter()); } } // namespace diff --git a/src/mongo/db/s/shardsvr_merge_chunks_command.cpp b/src/mongo/db/s/shardsvr_merge_chunks_command.cpp index 8c5086a6ae8..8e11f5bfec3 100644 --- a/src/mongo/db/s/shardsvr_merge_chunks_command.cpp +++ b/src/mongo/db/s/shardsvr_merge_chunks_command.cpp @@ -59,10 +59,8 @@ Shard::CommandResponse commitMergeOnConfigServer(OperationContext* opCtx, const ChunkRange& chunkRange, const CollectionMetadata& metadata) { auto const shardingState = ShardingState::get(opCtx); - const auto currentTime = VectorClock::get(opCtx)->getTime(); ConfigSvrMergeChunks request{nss, shardingState->shardId(), metadata.getUUID(), chunkRange}; - request.setValidAfter(currentTime.clusterTime().asTimestamp()); request.setEpoch(epoch); request.setTimestamp(timestamp); diff --git a/src/mongo/s/request_types/merge_chunk_request.idl b/src/mongo/s/request_types/merge_chunk_request.idl index 55d4148902d..5675303b43f 100644 --- a/src/mongo/s/request_types/merge_chunk_request.idl +++ b/src/mongo/s/request_types/merge_chunk_request.idl @@ -80,10 +80,6 @@ commands: type: chunk_range cpp_name: "chunkRange" description: "Chunk bounds to merge." - validAfter: - type: timestamp - description: "The time after which this chunk is at this shard." - optional: true epoch: description: "The expected epoch of the collection that is being committed" type: objectid diff --git a/src/mongo/s/request_types/merge_chunks_request_test.cpp b/src/mongo/s/request_types/merge_chunks_request_test.cpp index b631ca1dffa..f7e77f1adaf 100644 --- a/src/mongo/s/request_types/merge_chunks_request_test.cpp +++ b/src/mongo/s/request_types/merge_chunks_request_test.cpp @@ -60,12 +60,12 @@ TEST(ConfigSvrMergeChunks, BasicValidConfigCommand) { TEST(ConfigSvrMergeChunks, ConfigCommandtoBSON) { auto collUUID = UUID::gen(); - BSONObj serializedRequest = BSON("_configsvrCommitChunksMerge" - << "TestDB.TestColl" - << "shard" - << "shard0000" - << "collUUID" << collUUID.toBSON() << "chunkRange" - << chunkRange.toBSON() << "validAfter" << Timestamp{100}); + BSONObj serializedRequest = + BSON("_configsvrCommitChunksMerge" + << "TestDB.TestColl" + << "shard" + << "shard0000" + << "collUUID" << collUUID.toBSON() << "chunkRange" << chunkRange.toBSON()); BSONObj writeConcernObj = BSON("w" << "majority"); |