summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAntonio Fuschetto <antonio.fuschetto@mongodb.com>2022-02-07 10:41:46 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-02-07 11:11:10 +0000
commitab5508ea955f5b18521e0298a722535d153fb420 (patch)
tree7dff2e2de853a99d9acc52d77f9dcaa5ce571480
parentc0aa22815e8540d1efc57425696778ba90df8160 (diff)
downloadmongo-ab5508ea955f5b18521e0298a722535d153fb420.tar.gz
SERVER-63086 Remove refresh of the routing table during a rename operation
-rw-r--r--jstests/sharding/rename_sharded.js10
-rw-r--r--src/mongo/db/s/rename_collection_participant_service.cpp23
2 files changed, 4 insertions, 29 deletions
diff --git a/jstests/sharding/rename_sharded.js b/jstests/sharding/rename_sharded.js
index 23dd313393b..87f98dee46b 100644
--- a/jstests/sharding/rename_sharded.js
+++ b/jstests/sharding/rename_sharded.js
@@ -49,16 +49,6 @@ function testRename(st, dbName, toNs, dropTarget, mustFail) {
const toColl = mongos.getCollection(toNs);
assert.eq(toColl.find({x: 0}).itcount(), 1, 'Expected exactly one document on the shard');
assert.eq(toColl.find({x: 2}).itcount(), 1, 'Expected exactly one document on the shard');
-
- // Validate the correctness of the collections metadata in the catalog cache on shards
- for (let db of [st.shard0.getDB('config'), st.shard1.getDB('config')]) {
- // Validate that the source collection metadata has been cleaned up
- assert.eq(db['cache.collections'].countDocuments({_id: fromNs}), 0);
-
- // Validate that the target collection metadata has been downloaded
- assert.eq(db['cache.collections'].countDocuments({_id: toNs}), 1);
- assert(db['cache.chunks.' + toNs].exists());
- }
}
// Never use the third shard, but leave it in order to indirectly check that rename participants
diff --git a/src/mongo/db/s/rename_collection_participant_service.cpp b/src/mongo/db/s/rename_collection_participant_service.cpp
index 7fc11ceb6ba..a4063e41189 100644
--- a/src/mongo/db/s/rename_collection_participant_service.cpp
+++ b/src/mongo/db/s/rename_collection_participant_service.cpp
@@ -265,8 +265,10 @@ SemiFuture<void> RenameParticipantInstance::run(
service->promoteRecoverableCriticalSectionToBlockAlsoReads(
opCtx, toNss(), reason, ShardingCatalogClient::kLocalWriteConcern);
- // Clear the filtering metadata to safely create new range deletion tasks: the
- // submission will serialize on the renamed collection's metadata refresh.
+ // Clear the filtering metadata before releasing the critical section (it will be
+ // recovered the next time is accessed) and to safely create new range deletion
+ // tasks (the submission will serialize on the renamed collection's metadata
+ // refresh).
clearFilteringMetadata(opCtx, fromNss());
clearFilteringMetadata(opCtx, toNss());
@@ -322,23 +324,6 @@ SemiFuture<void> RenameParticipantInstance::run(
auto opCtxHolder = cc().makeOperationContext();
auto* opCtx = opCtxHolder.get();
- // Force the refresh of the catalog cache for both source and destination
- // collections to purge outdated information.
- //
- // (SERVER-58465) Note that we have to wait for the asynchronous tasks submitted to
- // the background thread of the ShardServerCatalogCacheLoader because those tasks
- // might conflict with the next refresh if the loader relies on UUID-based
- // config.cache.chunks.* collections.
- const auto catalog = Grid::get(opCtx)->catalogCache();
- uassertStatusOK(catalog->getCollectionRoutingInfoWithRefresh(opCtx, fromNss()));
- CatalogCacheLoader::get(opCtx).waitForCollectionFlush(opCtx, fromNss());
-
- uassertStatusOK(catalog->getCollectionRoutingInfoWithRefresh(opCtx, toNss()));
- CatalogCacheLoader::get(opCtx).waitForCollectionFlush(opCtx, toNss());
-
- repl::ReplClientInfo::forClient(opCtx->getClient())
- .setLastOpToSystemLastOpTime(opCtx);
-
// Release source/target critical sections
const auto reason =
BSON("command"