From 186c59cdf182e3d26c3443e6e2a0a17197aa8602 Mon Sep 17 00:00:00 2001 From: Sergi Mateo Bellido Date: Thu, 22 Jul 2021 15:51:24 +0000 Subject: SERVER-58792 Disable the UUID-based logic that handles config.cache.chunks.* collections on the ShardServerCatalogCacheLoader --- src/mongo/db/s/shard_metadata_util.cpp | 15 +- src/mongo/db/s/shard_metadata_util_test.cpp | 2 +- .../db/s/shard_server_catalog_cache_loader.cpp | 2 +- .../s/shard_server_catalog_cache_loader_test.cpp | 162 --------------------- 4 files changed, 7 insertions(+), 174 deletions(-) (limited to 'src/mongo') diff --git a/src/mongo/db/s/shard_metadata_util.cpp b/src/mongo/db/s/shard_metadata_util.cpp index 38315ea0d1c..07bcb7c22bd 100644 --- a/src/mongo/db/s/shard_metadata_util.cpp +++ b/src/mongo/db/s/shard_metadata_util.cpp @@ -282,9 +282,7 @@ StatusWith> readShardChunks(OperationContext* opCtx, boost::optional limit, const OID& epoch, const boost::optional& timestamp) { - const NamespaceString chunksNss{ - ChunkType::ShardNSPrefix + - (!nss.isTemporaryReshardingCollection() && uuid ? uuid->toString() : nss.ns())}; + const NamespaceString chunksNss{ChunkType::ShardNSPrefix + nss.ns()}; try { DBDirectClient client(opCtx); @@ -324,9 +322,7 @@ Status updateShardChunks(OperationContext* opCtx, const OID& currEpoch) { invariant(!chunks.empty()); - const NamespaceString chunksNss{ - ChunkType::ShardNSPrefix + - (!nss.isTemporaryReshardingCollection() && uuid ? uuid->toString() : nss.ns())}; + const NamespaceString chunksNss{ChunkType::ShardNSPrefix + nss.ns()}; try { DBDirectClient client(opCtx); @@ -457,12 +453,11 @@ Status dropChunksAndDeleteCollectionsEntry(OperationContext* opCtx, const Namesp void dropChunks(OperationContext* opCtx, const NamespaceString& nss, const boost::optional& uuid) { - const auto chunksNs = ChunkType::ShardNSPrefix + - (!nss.isTemporaryReshardingCollection() && uuid ? uuid->toString() : nss.ns()); + const NamespaceString chunksNss{ChunkType::ShardNSPrefix + nss.ns()}; DBDirectClient client(opCtx); BSONObj result; - if (!client.dropCollection(chunksNs, kLocalWriteConcern, &result)) { + if (!client.dropCollection(chunksNss.ns(), kLocalWriteConcern, &result)) { auto status = getStatusFromCommandResult(result); if (status != ErrorCodes::NamespaceNotFound) { uassertStatusOK(status); @@ -472,7 +467,7 @@ void dropChunks(OperationContext* opCtx, LOGV2_DEBUG(3463201, 1, "Dropped chunks cache", - "chunksNamespace"_attr = chunksNs, + "chunksNamespace"_attr = chunksNss, "collectionNamespace"_attr = nss, "error"_attr = getStatusFromCommandResult(result)); } diff --git a/src/mongo/db/s/shard_metadata_util_test.cpp b/src/mongo/db/s/shard_metadata_util_test.cpp index c4a30df04fd..3fc733cd333 100644 --- a/src/mongo/db/s/shard_metadata_util_test.cpp +++ b/src/mongo/db/s/shard_metadata_util_test.cpp @@ -140,7 +140,7 @@ struct ShardMetadataUtilTest : public ShardServerTestFixture { << chunk.getMin() << ChunkType::max() << chunk.getMax())); query.readPref(ReadPreference::Nearest, BSONArray()); - NamespaceString chunkMetadataNss{ChunkType::ShardNSPrefix + uuid.toString()}; + NamespaceString chunkMetadataNss{ChunkType::ShardNSPrefix + kNss.ns()}; std::unique_ptr cursor = client.query(chunkMetadataNss, query, 1); ASSERT(cursor); diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp index ee4ff25d751..a94d360bcf5 100644 --- a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp +++ b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp @@ -827,7 +827,7 @@ ShardServerCatalogCacheLoader::_schedulePrimaryGetChunksSince( << "'."}; } - if (maxLoaderVersion.isSet() && + if (false && maxLoaderVersion.isSet() && maxLoaderVersion.getTimestamp().is_initialized() != collAndChunks.creationTime.is_initialized() && maxLoaderVersion.epoch() == collAndChunks.epoch) { diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp b/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp index edf5e1a435f..675fd1ee336 100644 --- a/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp +++ b/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp @@ -438,168 +438,6 @@ TEST_F(ShardServerCatalogCacheLoaderTest, PrimaryLoadFromShardedAndFindMixedChun } } -TEST_F(ShardServerCatalogCacheLoaderTest, PrimaryLoadFromShardedWithChangeOnMetadataFormat) { - // First set up the shard chunk loader as sharded. - auto collAndChunks = setUpChunkLoaderWithFiveChunks(); - auto& collType = collAndChunks.first; - auto& chunks = collAndChunks.second; - - auto changeMetadataFormat = [&](const boost::optional& timestamp) { - auto lastChunk = chunks.back(); - lastChunk.setVersion([&]() { - const auto v = lastChunk.getVersion(); - return ChunkVersion(v.majorVersion(), v.minorVersion(), v.epoch(), timestamp); - }()); - - collType.setTimestamp(timestamp); - _remoteLoaderMock->setCollectionRefreshReturnValue(collType); - _remoteLoaderMock->setChunkRefreshReturnValue(std::vector{lastChunk}); - - auto collAndChunksRes = _shardLoader->getChunksSince(kNss, chunks[0].getVersion()).get(); - ASSERT_EQUALS(collAndChunksRes.epoch, collType.getEpoch()); - ASSERT_EQUALS(collAndChunksRes.creationTime, timestamp); - ASSERT_EQUALS(collAndChunksRes.changedChunks.size(), 5UL); - for (const auto& changedChunk : collAndChunksRes.changedChunks) { - ASSERT_EQUALS(changedChunk.getVersion().getTimestamp(), timestamp); - ASSERT_EQUALS(changedChunk.getVersion().epoch(), collAndChunksRes.epoch); - } - }; - - // Upgrading the metadata format to 5.0 - changeMetadataFormat(Timestamp(42)); - // Downgrading the medata format to 4.4 - changeMetadataFormat(boost::none /* timestamp */); -} - -TEST_F(ShardServerCatalogCacheLoaderTest, - PrimaryLoadFromShardedWithChangeOnMetadataFormatBecauseUpgrade) { - const auto timestamp = Timestamp(42); - ChunkVersion collectionVersion(1, 0, OID::gen(), boost::none /* timestamp */); - CollectionType collectionType = makeCollectionType(collectionVersion); - vector chunks = makeFiveChunks(collectionVersion); - - // 1st refresh as if we were in 4.4: the loader discovers one new chunk without timestamp - { - _remoteLoaderMock->setCollectionRefreshReturnValue(collectionType); - _remoteLoaderMock->setChunkRefreshReturnValue(std::vector{chunks[0]}); - const auto collAndChunksRes = - _shardLoader->getChunksSince(kNss, chunks[0].getVersion()).get(); - ASSERT_EQUALS(collAndChunksRes.changedChunks.size(), 1UL); - ASSERT_EQUALS(collAndChunksRes.creationTime, boost::none); - ASSERT_EQUALS(collAndChunksRes.changedChunks[0].getVersion().getTimestamp(), boost::none); - } - - // 2nd refresh as if we were in the phase 1 of the setFCV process to upgrade to 5.0: the loader - // discovers a few new chunks with timestamp but the collection doesn't have it yet. - { - for (size_t i = 1; i < chunks.size() - 1; ++i) { - chunks[i].setVersion([&]() { - const auto v = chunks[i].getVersion(); - return ChunkVersion(v.majorVersion(), v.minorVersion(), v.epoch(), timestamp); - }()); - } - - _remoteLoaderMock->setCollectionRefreshReturnValue(collectionType); - _remoteLoaderMock->setChunkRefreshReturnValue( - std::vector(chunks.begin() + 1, chunks.end() - 1)); - const auto collAndChunksRes = - _shardLoader->getChunksSince(kNss, chunks[0].getVersion()).get(); - const auto& changedChunks = collAndChunksRes.changedChunks; - ASSERT_EQUALS(changedChunks.size(), 4UL); - ASSERT_EQUALS(collAndChunksRes.creationTime, boost::none); - ASSERT_EQUALS(changedChunks[0].getVersion().getTimestamp(), boost::none); - for (size_t i = 1; i < chunks.size() - 1; ++i) - ASSERT_EQUALS(changedChunks[i].getVersion().getTimestamp(), timestamp); - } - - // 3rd refresh as if we were in 5.0: the loader discovers a new chunk. All chunks and the - // collection have timestamps. - { - chunks.back().setVersion([&]() { - const auto v = chunks.back().getVersion(); - return ChunkVersion(v.majorVersion(), v.minorVersion(), v.epoch(), timestamp); - }()); - collectionType.setTimestamp(timestamp); - - _remoteLoaderMock->setCollectionRefreshReturnValue(collectionType); - _remoteLoaderMock->setChunkRefreshReturnValue(std::vector{chunks.back()}); - const auto collAndChunksRes = - _shardLoader->getChunksSince(kNss, chunks[0].getVersion()).get(); - const auto& changedChunks = collAndChunksRes.changedChunks; - ASSERT_EQUALS(changedChunks.size(), 5UL); - ASSERT_EQUALS(collAndChunksRes.creationTime, timestamp); - for (size_t i = 0; i < chunks.size(); ++i) - ASSERT_EQUALS(changedChunks[i].getVersion().getTimestamp(), timestamp); - } -} - -TEST_F(ShardServerCatalogCacheLoaderTest, - PrimaryLoadFromShardedWithChangeOnMetadataFormatBecauseDowngrade) { - const auto timestamp = Timestamp(42); - ChunkVersion collectionVersion(1, 0, OID::gen(), timestamp); - CollectionType collectionType = makeCollectionType(collectionVersion); - vector chunks = makeFiveChunks(collectionVersion); - - // 1st refresh as if we were in 5.0: the loader discovers one new chunk with timestamp. The - // collection also has timestamps. - { - _remoteLoaderMock->setCollectionRefreshReturnValue(collectionType); - _remoteLoaderMock->setChunkRefreshReturnValue(std::vector{chunks[0]}); - const auto collAndChunksRes = - _shardLoader->getChunksSince(kNss, chunks[0].getVersion()).get(); - ASSERT_EQUALS(collAndChunksRes.changedChunks.size(), 1UL); - ASSERT_EQUALS(collAndChunksRes.creationTime, timestamp); - ASSERT_EQUALS(collAndChunksRes.changedChunks[0].getVersion().getTimestamp(), timestamp); - } - - // 2nd refresh: the loader discovers a few new chunks without timestamp but the collection still - // has it. - { - for (size_t i = 1; i < chunks.size() - 1; ++i) { - chunks[i].setVersion([&]() { - const auto v = chunks[i].getVersion(); - return ChunkVersion( - v.majorVersion(), v.minorVersion(), v.epoch(), boost::none /* timestamp */); - }()); - } - - _remoteLoaderMock->setCollectionRefreshReturnValue(collectionType); - _remoteLoaderMock->setChunkRefreshReturnValue( - std::vector(chunks.begin() + 1, chunks.end() - 1)); - const auto collAndChunksRes = - _shardLoader->getChunksSince(kNss, chunks[0].getVersion()).get(); - const auto& changedChunks = collAndChunksRes.changedChunks; - ASSERT_EQUALS(changedChunks.size(), 4UL); - ASSERT_EQUALS(collAndChunksRes.creationTime, timestamp); - ASSERT_EQUALS(changedChunks[0].getVersion().getTimestamp(), timestamp); - for (size_t i = 1; i < chunks.size() - 1; ++i) - ASSERT_EQUALS(changedChunks[i].getVersion().getTimestamp(), - boost::none /* timestamp */); - } - - // 3rd refresh as if we were in 4.4: the loader discovers a new chunk. All chunks and the - // collection don't have timestamps. - { - chunks.back().setVersion([&]() { - const auto v = chunks.back().getVersion(); - return ChunkVersion( - v.majorVersion(), v.minorVersion(), v.epoch(), boost::none /* timestamp */); - }()); - collectionType.setTimestamp(boost::none /* timestamp */); - - _remoteLoaderMock->setCollectionRefreshReturnValue(collectionType); - _remoteLoaderMock->setChunkRefreshReturnValue(std::vector{chunks.back()}); - const auto collAndChunksRes = - _shardLoader->getChunksSince(kNss, chunks[0].getVersion()).get(); - const auto& changedChunks = collAndChunksRes.changedChunks; - ASSERT_EQUALS(changedChunks.size(), 5UL); - ASSERT_EQUALS(collAndChunksRes.creationTime, boost::none /* timestamp */); - for (size_t i = 0; i < chunks.size(); ++i) - ASSERT_EQUALS(changedChunks[i].getVersion().getTimestamp(), - boost::none /* timestamp */); - } -} - TEST_F(ShardServerCatalogCacheLoaderTest, PrimaryLoadFromShardedAndFindDbMetadataFormatChanged) { const std::string dbName("dbName"); DatabaseVersion version(UUID::gen()); -- cgit v1.2.1