summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSergi Mateo Bellido <sergi.mateo-bellido@mongodb.com>2021-06-18 12:34:02 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-06-21 13:32:14 +0000
commit1cb38af8b458f14ff47a2c03fc769dcad4a90b76 (patch)
tree55d748d79fb4737c84fcaef7f9a7698c221d4a7d
parentd906abe7cf0634402bc8a61f66744f510e90fd6e (diff)
downloadmongo-1cb38af8b458f14ff47a2c03fc769dcad4a90b76.tar.gz
SERVER-57844 Minor timestamp-consistency issue between the ChunkManager/SSCCL and config.collections
(cherry picked from commit d80220ae86d0c0cdcf3f939d2f7220b5eb7c69e5)
-rw-r--r--src/mongo/db/s/shard_server_catalog_cache_loader.cpp12
-rw-r--r--src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp223
-rw-r--r--src/mongo/s/catalog_cache_test.cpp100
-rw-r--r--src/mongo/s/chunk_manager.cpp10
-rw-r--r--src/mongo/s/chunk_manager.h10
5 files changed, 291 insertions, 64 deletions
diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
index a229190f8d8..c8da23e4ec7 100644
--- a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
+++ b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
@@ -1332,7 +1332,17 @@ ShardServerCatalogCacheLoader::CollAndChunkTask::CollAndChunkTask(
} else {
collectionAndChangedChunks = std::move(statusWithCollectionAndChangedChunks.getValue());
invariant(!collectionAndChangedChunks->changedChunks.empty());
- maxQueryVersion = collectionAndChangedChunks->changedChunks.back().getVersion();
+ const auto highestVersion =
+ collectionAndChangedChunks->changedChunks.back().getVersion();
+ // Note that due to the way Phase 1 of the FCV upgrade writes timestamps to chunks
+ // (non-atomically), it is possible that chunks exist with timestamps, but the
+ // corresponding config.collections entry doesn't. In this case, the chunks timestamp
+ // should be ignored when computing the max query version and we should use the
+ // timestamp that comes from config.collections.
+ maxQueryVersion = ChunkVersion(highestVersion.majorVersion(),
+ highestVersion.minorVersion(),
+ highestVersion.epoch(),
+ collectionAndChangedChunks->creationTime);
}
} else {
invariant(statusWithCollectionAndChangedChunks == ErrorCodes::NamespaceNotFound);
diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp b/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp
index 06002ad2a1c..a488c7eac41 100644
--- a/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp
+++ b/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp
@@ -79,7 +79,7 @@ public:
/**
* Sets up the _shardLoader with the results of makeFiveChunks().
*/
- vector<ChunkType> setUpChunkLoaderWithFiveChunks();
+ std::pair<CollectionType, vector<ChunkType>> setUpChunkLoaderWithFiveChunks();
void refreshCollectionEpochOnRemoteLoader();
@@ -199,7 +199,8 @@ CollectionType ShardServerCatalogCacheLoaderTest::makeCollectionType(
return coll;
}
-vector<ChunkType> ShardServerCatalogCacheLoaderTest::setUpChunkLoaderWithFiveChunks() {
+std::pair<CollectionType, vector<ChunkType>>
+ShardServerCatalogCacheLoaderTest::setUpChunkLoaderWithFiveChunks() {
ChunkVersion collectionVersion(1, 0, OID::gen(), boost::none /* timestamp */);
CollectionType collectionType = makeCollectionType(collectionVersion);
@@ -216,7 +217,7 @@ vector<ChunkType> ShardServerCatalogCacheLoaderTest::setUpChunkLoaderWithFiveChu
ASSERT_BSONOBJ_EQ(collAndChunksRes.changedChunks[i].toShardBSON(), chunks[i].toShardBSON());
}
- return chunks;
+ return std::pair{collectionType, std::move(chunks)};
}
TEST_F(ShardServerCatalogCacheLoaderTest, PrimaryLoadFromUnshardedToUnsharded) {
@@ -233,8 +234,8 @@ TEST_F(ShardServerCatalogCacheLoaderTest, PrimaryLoadFromUnshardedToUnsharded) {
TEST_F(ShardServerCatalogCacheLoaderTest, PrimaryLoadFromShardedToUnsharded) {
// First set up the shard chunk loader as sharded.
-
- auto chunks = setUpChunkLoaderWithFiveChunks();
+ auto collAndChunks = setUpChunkLoaderWithFiveChunks();
+ auto& chunks = collAndChunks.second;
// Then return a NamespaceNotFound error, which means the collection must have been dropped,
// clearing the chunk metadata.
@@ -251,8 +252,8 @@ TEST_F(ShardServerCatalogCacheLoaderTest, PrimaryLoadFromShardedToUnsharded) {
TEST_F(ShardServerCatalogCacheLoaderTest, PrimaryLoadFromShardedAndFindNoDiff) {
// First set up the shard chunk loader as sharded.
-
- vector<ChunkType> chunks = setUpChunkLoaderWithFiveChunks();
+ auto collAndChunks = setUpChunkLoaderWithFiveChunks();
+ auto& chunks = collAndChunks.second;
// Then set up the remote loader to return a single document we've already seen -- indicates
// there's nothing new.
@@ -275,8 +276,8 @@ TEST_F(ShardServerCatalogCacheLoaderTest, PrimaryLoadFromShardedAndFindNoDiff) {
// routing table, rather than diff from a known version.
TEST_F(ShardServerCatalogCacheLoaderTest, PrimaryLoadFromShardedAndFindNoDiffRequestAll) {
// First set up the shard chunk loader as sharded.
-
- vector<ChunkType> chunks = setUpChunkLoaderWithFiveChunks();
+ auto collAndChunks = setUpChunkLoaderWithFiveChunks();
+ auto& chunks = collAndChunks.second;
// Then set up the remote loader to return a single document we've already seen -- indicates
// there's nothing new.
@@ -297,8 +298,8 @@ TEST_F(ShardServerCatalogCacheLoaderTest, PrimaryLoadFromShardedAndFindNoDiffReq
TEST_F(ShardServerCatalogCacheLoaderTest, PrimaryLoadFromShardedAndFindDiff) {
// First set up the shard chunk loader as sharded.
-
- vector<ChunkType> chunks = setUpChunkLoaderWithFiveChunks();
+ auto collAndChunks = setUpChunkLoaderWithFiveChunks();
+ auto& chunks = collAndChunks.second;
// Then refresh again and find updated chunks.
@@ -323,8 +324,8 @@ TEST_F(ShardServerCatalogCacheLoaderTest, PrimaryLoadFromShardedAndFindDiff) {
TEST_F(ShardServerCatalogCacheLoaderTest, PrimaryLoadFromShardedAndFindDiffRequestAll) {
// First set up the shard chunk loader as sharded.
-
- vector<ChunkType> chunks = setUpChunkLoaderWithFiveChunks();
+ auto collAndChunks = setUpChunkLoaderWithFiveChunks();
+ auto& chunks = collAndChunks.second;
// First cause a remote refresh to find the updated chunks. Then wait for persistence, so that
// we ensure that nothing is enqueued and the next getChunksSince call will return a predictable
@@ -363,8 +364,8 @@ TEST_F(ShardServerCatalogCacheLoaderTest, PrimaryLoadFromShardedAndFindDiffReque
TEST_F(ShardServerCatalogCacheLoaderTest, PrimaryLoadFromShardedAndFindNewEpoch) {
// First set up the shard chunk loader as sharded.
-
- vector<ChunkType> chunks = setUpChunkLoaderWithFiveChunks();
+ auto collAndChunks = setUpChunkLoaderWithFiveChunks();
+ auto& chunks = collAndChunks.second;
// Then refresh again and find that the collection has been dropped and recreated.
@@ -389,14 +390,12 @@ TEST_F(ShardServerCatalogCacheLoaderTest, PrimaryLoadFromShardedAndFindNewEpoch)
TEST_F(ShardServerCatalogCacheLoaderTest, PrimaryLoadFromShardedAndFindMixedChunkVersions) {
// First set up the shard chunk loader as sharded.
-
- vector<ChunkType> chunks = setUpChunkLoaderWithFiveChunks();
+ auto collAndChunks = setUpChunkLoaderWithFiveChunks();
+ auto& chunks = collAndChunks.second;
// Then refresh again and retrieve chunks from the config server that have mixed epoches, like
// as if the chunks read yielded around a drop and recreate of the collection.
- CollectionType originalCollectionType = makeCollectionType(chunks.back().getVersion());
-
ChunkVersion collVersionWithNewEpoch(1, 0, OID::gen(), boost::none /* timestamp */);
CollectionType collectionTypeWithNewEpoch = makeCollectionType(collVersionWithNewEpoch);
vector<ChunkType> chunksWithNewEpoch = makeFiveChunks(collVersionWithNewEpoch);
@@ -439,57 +438,165 @@ TEST_F(ShardServerCatalogCacheLoaderTest, PrimaryLoadFromShardedAndFindMixedChun
}
}
-TEST_F(ShardServerCatalogCacheLoaderTest,
- PrimaryLoadFromShardedAndFindCollAndChunksMetadataFormatChanged) {
+TEST_F(ShardServerCatalogCacheLoaderTest, PrimaryLoadFromShardedWithChangeOnMetadataFormat) {
// First set up the shard chunk loader as sharded.
- vector<ChunkType> chunks = setUpChunkLoaderWithFiveChunks();
-
- // Simulating that the config server added timestamps to all chunks
- {
- vector<ChunkType> newChunks = chunks;
- for (auto& chunk : newChunks) {
- const ChunkVersion v = chunk.getVersion();
- chunk.setVersion(
- ChunkVersion(v.majorVersion(), v.minorVersion(), v.epoch(), Timestamp(42)));
- }
-
- CollectionType collectionTypeWithNewEpoch =
- makeCollectionType(newChunks.back().getVersion());
- _remoteLoaderMock->setCollectionRefreshReturnValue(collectionTypeWithNewEpoch);
- _remoteLoaderMock->setChunkRefreshReturnValue(newChunks);
-
- auto collAndChunksRes = _shardLoader->getChunksSince(kNss, newChunks[0].getVersion()).get();
- ASSERT_EQUALS(collAndChunksRes.epoch, collectionTypeWithNewEpoch.getEpoch());
- ASSERT_EQUALS(collAndChunksRes.creationTime, Timestamp(42));
+ auto collAndChunks = setUpChunkLoaderWithFiveChunks();
+ auto& collType = collAndChunks.first;
+ auto& chunks = collAndChunks.second;
+
+ auto changeMetadataFormat = [&](const boost::optional<Timestamp>& timestamp) {
+ auto lastChunk = chunks.back();
+ lastChunk.setVersion([&]() {
+ const auto v = lastChunk.getVersion();
+ return ChunkVersion(v.majorVersion(), v.minorVersion(), v.epoch(), timestamp);
+ }());
+
+ collType.setTimestamp(timestamp);
+ _remoteLoaderMock->setCollectionRefreshReturnValue(collType);
+ _remoteLoaderMock->setChunkRefreshReturnValue(std::vector{lastChunk});
+
+ auto collAndChunksRes = _shardLoader->getChunksSince(kNss, chunks[0].getVersion()).get();
+ ASSERT_EQUALS(collAndChunksRes.epoch, collType.getEpoch());
+ ASSERT_EQUALS(collAndChunksRes.creationTime, timestamp);
ASSERT_EQUALS(collAndChunksRes.changedChunks.size(), 5UL);
for (const auto& changedChunk : collAndChunksRes.changedChunks) {
- ASSERT_EQUALS(changedChunk.getVersion().getTimestamp(), Timestamp(42));
+ ASSERT_EQUALS(changedChunk.getVersion().getTimestamp(), timestamp);
ASSERT_EQUALS(changedChunk.getVersion().epoch(), collAndChunksRes.epoch);
}
+ };
+
+ // Upgrading the metadata format to 5.0
+ changeMetadataFormat(Timestamp(42));
+ // Downgrading the medata format to 4.4
+ changeMetadataFormat(boost::none /* timestamp */);
+}
+
+TEST_F(ShardServerCatalogCacheLoaderTest,
+ PrimaryLoadFromShardedWithChangeOnMetadataFormatBecauseUpgrade) {
+ const auto timestamp = Timestamp(42);
+ ChunkVersion collectionVersion(1, 0, OID::gen(), boost::none /* timestamp */);
+ CollectionType collectionType = makeCollectionType(collectionVersion);
+ vector<ChunkType> chunks = makeFiveChunks(collectionVersion);
+
+ // 1st refresh as if we were in 4.4: the loader discovers one new chunk without timestamp
+ {
+ _remoteLoaderMock->setCollectionRefreshReturnValue(collectionType);
+ _remoteLoaderMock->setChunkRefreshReturnValue(std::vector{chunks[0]});
+ const auto collAndChunksRes =
+ _shardLoader->getChunksSince(kNss, chunks[0].getVersion()).get();
+ ASSERT_EQUALS(collAndChunksRes.changedChunks.size(), 1UL);
+ ASSERT_EQUALS(collAndChunksRes.creationTime, boost::none);
+ ASSERT_EQUALS(collAndChunksRes.changedChunks[0].getVersion().getTimestamp(), boost::none);
}
- // Simulating that the config server removed timestamps from all chunks
+ // 2nd refresh as if we were in the phase 1 of the setFCV process to upgrade to 5.0: the loader
+ // discovers a few new chunks with timestamp but the collection doesn't have it yet.
{
- vector<ChunkType> newChunks = chunks;
- for (auto& chunk : newChunks) {
- const ChunkVersion v = chunk.getVersion();
- chunk.setVersion(
- ChunkVersion(v.majorVersion(), v.minorVersion(), v.epoch(), boost::none));
+ for (size_t i = 1; i < chunks.size() - 1; ++i) {
+ chunks[i].setVersion([&]() {
+ const auto v = chunks[i].getVersion();
+ return ChunkVersion(v.majorVersion(), v.minorVersion(), v.epoch(), timestamp);
+ }());
}
- CollectionType collectionTypeWithNewEpoch =
- makeCollectionType(newChunks.back().getVersion());
- _remoteLoaderMock->setCollectionRefreshReturnValue(collectionTypeWithNewEpoch);
- _remoteLoaderMock->setChunkRefreshReturnValue(newChunks);
-
- auto collAndChunksRes = _shardLoader->getChunksSince(kNss, newChunks[0].getVersion()).get();
- ASSERT_EQUALS(collAndChunksRes.epoch, collectionTypeWithNewEpoch.getEpoch());
+ _remoteLoaderMock->setCollectionRefreshReturnValue(collectionType);
+ _remoteLoaderMock->setChunkRefreshReturnValue(
+ std::vector<ChunkType>(chunks.begin() + 1, chunks.end() - 1));
+ const auto collAndChunksRes =
+ _shardLoader->getChunksSince(kNss, chunks[0].getVersion()).get();
+ const auto& changedChunks = collAndChunksRes.changedChunks;
+ ASSERT_EQUALS(changedChunks.size(), 4UL);
ASSERT_EQUALS(collAndChunksRes.creationTime, boost::none);
- ASSERT_EQUALS(collAndChunksRes.changedChunks.size(), 5UL);
- for (const auto& changedChunk : collAndChunksRes.changedChunks) {
- ASSERT_EQUALS(changedChunk.getVersion().getTimestamp(), boost::none);
- ASSERT_EQUALS(changedChunk.getVersion().epoch(), collAndChunksRes.epoch);
+ ASSERT_EQUALS(changedChunks[0].getVersion().getTimestamp(), boost::none);
+ for (size_t i = 1; i < chunks.size() - 1; ++i)
+ ASSERT_EQUALS(changedChunks[i].getVersion().getTimestamp(), timestamp);
+ }
+
+ // 3rd refresh as if we were in 5.0: the loader discovers a new chunk. All chunks and the
+ // collection have timestamps.
+ {
+ chunks.back().setVersion([&]() {
+ const auto v = chunks.back().getVersion();
+ return ChunkVersion(v.majorVersion(), v.minorVersion(), v.epoch(), timestamp);
+ }());
+ collectionType.setTimestamp(timestamp);
+
+ _remoteLoaderMock->setCollectionRefreshReturnValue(collectionType);
+ _remoteLoaderMock->setChunkRefreshReturnValue(std::vector{chunks.back()});
+ const auto collAndChunksRes =
+ _shardLoader->getChunksSince(kNss, chunks[0].getVersion()).get();
+ const auto& changedChunks = collAndChunksRes.changedChunks;
+ ASSERT_EQUALS(changedChunks.size(), 5UL);
+ ASSERT_EQUALS(collAndChunksRes.creationTime, timestamp);
+ for (size_t i = 0; i < chunks.size(); ++i)
+ ASSERT_EQUALS(changedChunks[i].getVersion().getTimestamp(), timestamp);
+ }
+}
+
+TEST_F(ShardServerCatalogCacheLoaderTest,
+ PrimaryLoadFromShardedWithChangeOnMetadataFormatBecauseDowngrade) {
+ const auto timestamp = Timestamp(42);
+ ChunkVersion collectionVersion(1, 0, OID::gen(), timestamp);
+ CollectionType collectionType = makeCollectionType(collectionVersion);
+ vector<ChunkType> chunks = makeFiveChunks(collectionVersion);
+
+ // 1st refresh as if we were in 5.0: the loader discovers one new chunk with timestamp. The
+ // collection also has timestamps.
+ {
+ _remoteLoaderMock->setCollectionRefreshReturnValue(collectionType);
+ _remoteLoaderMock->setChunkRefreshReturnValue(std::vector{chunks[0]});
+ const auto collAndChunksRes =
+ _shardLoader->getChunksSince(kNss, chunks[0].getVersion()).get();
+ ASSERT_EQUALS(collAndChunksRes.changedChunks.size(), 1UL);
+ ASSERT_EQUALS(collAndChunksRes.creationTime, timestamp);
+ ASSERT_EQUALS(collAndChunksRes.changedChunks[0].getVersion().getTimestamp(), timestamp);
+ }
+
+ // 2nd refresh: the loader discovers a few new chunks without timestamp but the collection still
+ // has it.
+ {
+ for (size_t i = 1; i < chunks.size() - 1; ++i) {
+ chunks[i].setVersion([&]() {
+ const auto v = chunks[i].getVersion();
+ return ChunkVersion(
+ v.majorVersion(), v.minorVersion(), v.epoch(), boost::none /* timestamp */);
+ }());
}
+
+ _remoteLoaderMock->setCollectionRefreshReturnValue(collectionType);
+ _remoteLoaderMock->setChunkRefreshReturnValue(
+ std::vector<ChunkType>(chunks.begin() + 1, chunks.end() - 1));
+ const auto collAndChunksRes =
+ _shardLoader->getChunksSince(kNss, chunks[0].getVersion()).get();
+ const auto& changedChunks = collAndChunksRes.changedChunks;
+ ASSERT_EQUALS(changedChunks.size(), 4UL);
+ ASSERT_EQUALS(collAndChunksRes.creationTime, timestamp);
+ ASSERT_EQUALS(changedChunks[0].getVersion().getTimestamp(), timestamp);
+ for (size_t i = 1; i < chunks.size() - 1; ++i)
+ ASSERT_EQUALS(changedChunks[i].getVersion().getTimestamp(),
+ boost::none /* timestamp */);
+ }
+
+ // 3rd refresh as if we were in 4.4: the loader discovers a new chunk. All chunks and the
+ // collection don't have timestamps.
+ {
+ chunks.back().setVersion([&]() {
+ const auto v = chunks.back().getVersion();
+ return ChunkVersion(
+ v.majorVersion(), v.minorVersion(), v.epoch(), boost::none /* timestamp */);
+ }());
+ collectionType.setTimestamp(boost::none /* timestamp */);
+
+ _remoteLoaderMock->setCollectionRefreshReturnValue(collectionType);
+ _remoteLoaderMock->setChunkRefreshReturnValue(std::vector{chunks.back()});
+ const auto collAndChunksRes =
+ _shardLoader->getChunksSince(kNss, chunks[0].getVersion()).get();
+ const auto& changedChunks = collAndChunksRes.changedChunks;
+ ASSERT_EQUALS(changedChunks.size(), 5UL);
+ ASSERT_EQUALS(collAndChunksRes.creationTime, boost::none /* timestamp */);
+ for (size_t i = 0; i < chunks.size(); ++i)
+ ASSERT_EQUALS(changedChunks[i].getVersion().getTimestamp(),
+ boost::none /* timestamp */);
}
}
diff --git a/src/mongo/s/catalog_cache_test.cpp b/src/mongo/s/catalog_cache_test.cpp
index ed19cecb862..cbffa3e66f2 100644
--- a/src/mongo/s/catalog_cache_test.cpp
+++ b/src/mongo/s/catalog_cache_test.cpp
@@ -161,7 +161,8 @@ protected:
}
CollectionType makeCollectionType(const ChunkVersion& collVersion) {
- CollectionType coll(kNss, collVersion.epoch(), Date_t::now(), UUID::gen());
+ CollectionType coll(
+ kNss, collVersion.epoch(), collVersion.getTimestamp(), Date_t::now(), UUID::gen());
coll.setKeyPattern(kShardKeyPattern.getKeyPattern());
coll.setUnique(false);
return coll;
@@ -339,6 +340,103 @@ TEST_F(CatalogCacheTest, GetCollectionWithMetadataFormatChange) {
getCollectionWithRefreshAndCheckResults(collVersionWithoutTimestamp);
}
+TEST_F(CatalogCacheTest,
+ GetCollectionWithRefreshDuringUpgradeWithMetadataFormatChangeChunksDontMatchCollection) {
+ const auto dbVersion = DatabaseVersion(UUID::gen());
+ const auto epoch = OID::gen();
+ const auto timestamp = Timestamp(42);
+
+ const auto collVersionWithoutTimestamp = ChunkVersion(1, 0, epoch, boost::none /* timestamp */);
+ const auto collVersionWithTimestamp = ChunkVersion(1, 0, epoch, timestamp);
+
+ loadDatabases({DatabaseType(kNss.db().toString(), kShards[0], true, dbVersion)});
+
+ const auto coll = makeCollectionType(collVersionWithoutTimestamp);
+ const auto scopedCollProv = scopedCollectionProvider(coll);
+ const auto scopedChunksProv = scopedChunksProvider(makeChunks(collVersionWithTimestamp));
+
+ const auto swChunkManager =
+ _catalogCache->getCollectionRoutingInfoWithRefresh(operationContext(), coll.getNss());
+ ASSERT_OK(swChunkManager.getStatus());
+
+ const auto& chunkManager = swChunkManager.getValue();
+ const auto collectionVersion = chunkManager.getVersion();
+
+ ASSERT_EQ(collectionVersion.getTimestamp(), boost::none);
+
+ chunkManager.forEachChunk([&](const Chunk& chunk) {
+ ASSERT_EQ(chunk.getLastmod().getTimestamp(), timestamp);
+ return true;
+ });
+}
+
+TEST_F(CatalogCacheTest,
+ GetCollectionWithRefreshDuringUpgradeWithMetadataFormatChangeSomeChunksMatchCollection) {
+ const auto dbVersion = DatabaseVersion(UUID::gen());
+ const auto epoch = OID::gen();
+ const auto timestamp = Timestamp(42);
+
+ const auto collVersionWithoutTimestamp = ChunkVersion(1, 0, epoch, boost::none /* timestamp */);
+ const auto collVersionWithTimestamp = ChunkVersion(1, 1, epoch, timestamp);
+
+ loadDatabases({DatabaseType(kNss.db().toString(), kShards[0], true, dbVersion)});
+
+ const auto coll = makeCollectionType(collVersionWithoutTimestamp);
+ const auto scopedCollProv = scopedCollectionProvider(coll);
+
+ ChunkType chunk1(kNss,
+ {kShardKeyPattern.getKeyPattern().globalMin(), BSON("_id" << 100)},
+ collVersionWithTimestamp,
+ {"0"});
+ chunk1.setName(OID::gen());
+
+ ChunkType chunk2(kNss,
+ {BSON("_id" << 100), kShardKeyPattern.getKeyPattern().globalMax()},
+ collVersionWithoutTimestamp,
+ {"0"});
+ chunk2.setName(OID::gen());
+
+ const auto scopedChunksProv = scopedChunksProvider(std::vector{chunk1, chunk2});
+
+ const auto swChunkManager =
+ _catalogCache->getCollectionRoutingInfoWithRefresh(operationContext(), coll.getNss());
+ ASSERT_OK(swChunkManager.getStatus());
+
+ const auto& chunkManager = swChunkManager.getValue();
+ const auto collectionVersion = chunkManager.getVersion();
+
+ ASSERT_EQ(collectionVersion.getTimestamp(), boost::none);
+}
+
+TEST_F(CatalogCacheTest, GetCollectionWithRefreshDuringDowngradeWithMetadataFormatChange) {
+ const auto dbVersion = DatabaseVersion(UUID::gen());
+ const auto epoch = OID::gen();
+ const auto timestamp = Timestamp(42);
+
+ const auto collVersionWithoutTimestamp = ChunkVersion(1, 0, epoch, boost::none /* timestamp */);
+ const auto collVersionWithTimestamp = ChunkVersion(1, 0, epoch, timestamp);
+
+ loadDatabases({DatabaseType(kNss.db().toString(), kShards[0], true, dbVersion)});
+
+ const auto coll = makeCollectionType(collVersionWithTimestamp);
+ const auto scopedCollProv = scopedCollectionProvider(coll);
+ const auto scopedChunksProv = scopedChunksProvider(makeChunks(collVersionWithoutTimestamp));
+
+ const auto swChunkManager =
+ _catalogCache->getCollectionRoutingInfoWithRefresh(operationContext(), coll.getNss());
+ ASSERT_OK(swChunkManager.getStatus());
+
+ const auto& chunkManager = swChunkManager.getValue();
+ const auto collectionVersion = chunkManager.getVersion();
+
+ ASSERT_EQ(collectionVersion.getTimestamp(), timestamp);
+
+ chunkManager.forEachChunk([&](const Chunk& chunk) {
+ ASSERT_EQ(chunk.getLastmod().getTimestamp(), boost::none);
+ return true;
+ });
+}
+
TEST_F(CatalogCacheTest, TimeseriesFieldsAreProperlyPropagatedOnCC) {
const auto dbVersion = DatabaseVersion(UUID::gen());
const auto epoch = OID::gen();
diff --git a/src/mongo/s/chunk_manager.cpp b/src/mongo/s/chunk_manager.cpp
index 1e5e250e7bd..29e1457122d 100644
--- a/src/mongo/s/chunk_manager.cpp
+++ b/src/mongo/s/chunk_manager.cpp
@@ -193,9 +193,13 @@ ShardVersionMap ChunkMap::constructShardVersionMap() const {
void ChunkMap::appendChunk(const std::shared_ptr<ChunkInfo>& chunk) {
appendChunkTo(_chunkMap, chunk);
-
- if (_collectionVersion.isOlderThan(chunk->getLastmod()))
- _collectionVersion = chunk->getLastmod();
+ const auto chunkVersion = chunk->getLastmod();
+ if (_collectionVersion.isOlderThan(chunkVersion)) {
+ _collectionVersion = ChunkVersion(chunkVersion.majorVersion(),
+ chunkVersion.minorVersion(),
+ chunkVersion.epoch(),
+ _collTimestamp);
+ }
}
std::shared_ptr<ChunkInfo> ChunkMap::findIntersectingChunk(const BSONObj& shardKey) const {
diff --git a/src/mongo/s/chunk_manager.h b/src/mongo/s/chunk_manager.h
index e3c83b2e7c6..4179d38aa07 100644
--- a/src/mongo/s/chunk_manager.h
+++ b/src/mongo/s/chunk_manager.h
@@ -80,7 +80,7 @@ public:
explicit ChunkMap(OID epoch,
const boost::optional<Timestamp>& timestamp,
size_t initialCapacity = 0)
- : _collectionVersion(0, 0, epoch, timestamp) {
+ : _collectionVersion(0, 0, epoch, timestamp), _collTimestamp(timestamp) {
_chunkMap.reserve(initialCapacity);
}
@@ -134,6 +134,14 @@ private:
// Max version across all chunks
ChunkVersion _collectionVersion;
+
+ // Represents the timestamp present in config.collections for this ChunkMap.
+ //
+ // Note that due to the way Phase 1 of the FCV upgrade writes timestamps to chunks
+ // (non-atomically), it is possible that chunks exist with timestamps, but the corresponding
+ // config.collections entry doesn't. In this case, the chunks timestamp should be ignored when
+ // computing the collection version and we should use _collTimestamp instead.
+ boost::optional<Timestamp> _collTimestamp;
};
/**