From ab5508ea955f5b18521e0298a722535d153fb420 Mon Sep 17 00:00:00 2001 From: Antonio Fuschetto Date: Mon, 7 Feb 2022 10:41:46 +0000 Subject: SERVER-63086 Remove refresh of the routing table during a rename operation --- jstests/sharding/rename_sharded.js | 10 ---------- .../db/s/rename_collection_participant_service.cpp | 23 ++++------------------ 2 files changed, 4 insertions(+), 29 deletions(-) diff --git a/jstests/sharding/rename_sharded.js b/jstests/sharding/rename_sharded.js index 23dd313393b..87f98dee46b 100644 --- a/jstests/sharding/rename_sharded.js +++ b/jstests/sharding/rename_sharded.js @@ -49,16 +49,6 @@ function testRename(st, dbName, toNs, dropTarget, mustFail) { const toColl = mongos.getCollection(toNs); assert.eq(toColl.find({x: 0}).itcount(), 1, 'Expected exactly one document on the shard'); assert.eq(toColl.find({x: 2}).itcount(), 1, 'Expected exactly one document on the shard'); - - // Validate the correctness of the collections metadata in the catalog cache on shards - for (let db of [st.shard0.getDB('config'), st.shard1.getDB('config')]) { - // Validate that the source collection metadata has been cleaned up - assert.eq(db['cache.collections'].countDocuments({_id: fromNs}), 0); - - // Validate that the target collection metadata has been downloaded - assert.eq(db['cache.collections'].countDocuments({_id: toNs}), 1); - assert(db['cache.chunks.' + toNs].exists()); - } } // Never use the third shard, but leave it in order to indirectly check that rename participants diff --git a/src/mongo/db/s/rename_collection_participant_service.cpp b/src/mongo/db/s/rename_collection_participant_service.cpp index 7fc11ceb6ba..a4063e41189 100644 --- a/src/mongo/db/s/rename_collection_participant_service.cpp +++ b/src/mongo/db/s/rename_collection_participant_service.cpp @@ -265,8 +265,10 @@ SemiFuture RenameParticipantInstance::run( service->promoteRecoverableCriticalSectionToBlockAlsoReads( opCtx, toNss(), reason, ShardingCatalogClient::kLocalWriteConcern); - // Clear the filtering metadata to safely create new range deletion tasks: the - // submission will serialize on the renamed collection's metadata refresh. + // Clear the filtering metadata before releasing the critical section (it will be + // recovered the next time is accessed) and to safely create new range deletion + // tasks (the submission will serialize on the renamed collection's metadata + // refresh). clearFilteringMetadata(opCtx, fromNss()); clearFilteringMetadata(opCtx, toNss()); @@ -322,23 +324,6 @@ SemiFuture RenameParticipantInstance::run( auto opCtxHolder = cc().makeOperationContext(); auto* opCtx = opCtxHolder.get(); - // Force the refresh of the catalog cache for both source and destination - // collections to purge outdated information. - // - // (SERVER-58465) Note that we have to wait for the asynchronous tasks submitted to - // the background thread of the ShardServerCatalogCacheLoader because those tasks - // might conflict with the next refresh if the loader relies on UUID-based - // config.cache.chunks.* collections. - const auto catalog = Grid::get(opCtx)->catalogCache(); - uassertStatusOK(catalog->getCollectionRoutingInfoWithRefresh(opCtx, fromNss())); - CatalogCacheLoader::get(opCtx).waitForCollectionFlush(opCtx, fromNss()); - - uassertStatusOK(catalog->getCollectionRoutingInfoWithRefresh(opCtx, toNss())); - CatalogCacheLoader::get(opCtx).waitForCollectionFlush(opCtx, toNss()); - - repl::ReplClientInfo::forClient(opCtx->getClient()) - .setLastOpToSystemLastOpTime(opCtx); - // Release source/target critical sections const auto reason = BSON("command" -- cgit v1.2.1