diff options
author | Cheahuychou Mao <cheahuychou.mao@mongodb.com> | 2020-07-23 16:30:16 +0000 |
---|---|---|
committer | Evergreen Agent <no-reply@evergreen.mongodb.com> | 2020-07-24 14:51:25 +0000 |
commit | 5e53ee3ca0a90eb98cdab94b298dec810fb46804 (patch) | |
tree | 4adac5b4b1d754e9768531404f52bbb250ce1c60 | |
parent | 78cc027285359cc21c55f2dac69ce41cb9f23959 (diff) | |
download | mongo-5e53ee3ca0a90eb98cdab94b298dec810fb46804.tar.gz |
SERVER-49427 Delete pending_chunk.js and remove now-defunct code in the MetadataManager
-rw-r--r-- | buildscripts/resmokeconfig/suites/multiversion.yml | 3 | ||||
-rw-r--r-- | buildscripts/resmokeconfig/suites/multiversion_auth.yml | 3 | ||||
-rw-r--r-- | jstests/multiVersion/pending_chunk.js | 84 | ||||
-rw-r--r-- | jstests/sharding/dump_coll_metadata.js | 2 | ||||
-rw-r--r-- | jstests/sharding/initial_split_validate_shard_collections.js | 1 | ||||
-rw-r--r-- | src/mongo/db/s/collection_sharding_runtime.cpp | 28 | ||||
-rw-r--r-- | src/mongo/db/s/get_shard_version_command.cpp | 4 | ||||
-rw-r--r-- | src/mongo/db/s/metadata_manager.cpp | 115 | ||||
-rw-r--r-- | src/mongo/db/s/metadata_manager.h | 31 | ||||
-rw-r--r-- | src/mongo/db/s/metadata_manager_test.cpp | 77 | ||||
-rw-r--r-- | src/mongo/db/s/migration_util.cpp | 12 |
11 files changed, 1 insertions, 359 deletions
diff --git a/buildscripts/resmokeconfig/suites/multiversion.yml b/buildscripts/resmokeconfig/suites/multiversion.yml index 94653715e7d..a2076d5c58e 100644 --- a/buildscripts/resmokeconfig/suites/multiversion.yml +++ b/buildscripts/resmokeconfig/suites/multiversion.yml @@ -19,9 +19,6 @@ selector: - jstests/multiVersion/genericSetFCVUsage/crash_mongos_against_upgraded_cluster.js # TODO: SERVER-48192 - - jstests/multiVersion/pending_chunk.js - - # TODO: SERVER-48192 - jstests/multiVersion/migrations_with_mixed_fcv.js # Multiversion tests start their own mongod's. diff --git a/buildscripts/resmokeconfig/suites/multiversion_auth.yml b/buildscripts/resmokeconfig/suites/multiversion_auth.yml index b119565dc3f..11de88438a0 100644 --- a/buildscripts/resmokeconfig/suites/multiversion_auth.yml +++ b/buildscripts/resmokeconfig/suites/multiversion_auth.yml @@ -23,9 +23,6 @@ selector: - jstests/multiVersion/genericSetFCVUsage/crash_mongos_against_upgraded_cluster.js # TODO: SERVER-48192 - - jstests/multiVersion/pending_chunk.js - - # TODO: SERVER-48192 - jstests/multiVersion/migrations_with_mixed_fcv.js # Multiversion tests start their own mongod's. diff --git a/jstests/multiVersion/pending_chunk.js b/jstests/multiVersion/pending_chunk.js deleted file mode 100644 index 942d8d3b37b..00000000000 --- a/jstests/multiVersion/pending_chunk.js +++ /dev/null @@ -1,84 +0,0 @@ -// -// Tests pending chunk metadata. -// - -(function() { -"use strict"; - -var st = new ShardingTest({shards: 2, mongos: 2, other: {separateConfig: true}}); - -var mongos = st.s0; -var admin = mongos.getDB('admin'); -var coll = mongos.getCollection('foo.bar'); -var ns = coll.getFullName(); -var dbName = coll.getDB().getName(); - -assert.commandWorked(st.s.getDB("admin").runCommand({setFeatureCompatibilityVersion: lastLTSFCV})); - -assert.commandWorked(admin.runCommand({enableSharding: dbName})); -printjson(admin.runCommand({movePrimary: dbName, to: st.shard0.shardName})); -assert.commandWorked(admin.runCommand({shardCollection: ns, key: {_id: 1}})); - -// Turn off best-effort recipient metadata refresh post-migration commit on both shards because -// it would clean up the pending chunks on migration recipients. -assert.commandWorked(st.shard0.getDB('admin').runCommand( - {configureFailPoint: 'doNotRefreshRecipientAfterCommit', mode: 'alwaysOn'})); -assert.commandWorked(st.shard1.getDB('admin').runCommand( - {configureFailPoint: 'doNotRefreshRecipientAfterCommit', mode: 'alwaysOn'})); - -jsTest.log('Moving some chunks to shard1...'); - -assert.commandWorked(admin.runCommand({split: ns, middle: {_id: 0}})); -assert.commandWorked(admin.runCommand({split: ns, middle: {_id: 1}})); -assert.commandWorked(admin.runCommand( - {moveChunk: ns, find: {_id: 0}, to: st.shard1.shardName, _waitForDelete: true})); -assert.commandWorked(admin.runCommand( - {moveChunk: ns, find: {_id: 1}, to: st.shard1.shardName, _waitForDelete: true})); - -function getMetadata(shard) { - var admin = shard.getDB('admin'), - metadata = admin.runCommand({getShardVersion: ns, fullMetadata: true}).metadata; - - jsTest.log('Got metadata: ' + tojson(metadata)); - return metadata; -} - -var metadata = getMetadata(st.shard1); -assert.eq(metadata.pending[0][0]._id, 1); -assert.eq(metadata.pending[0][1]._id, MaxKey); - -jsTest.log('Moving some chunks back to shard0 after empty...'); - -assert.commandWorked(admin.runCommand( - {moveChunk: ns, find: {_id: -1}, to: st.shard1.shardName, _waitForDelete: true})); - -metadata = getMetadata(st.shard0); -assert.eq(metadata.shardVersion.t, 0); -assert.neq(metadata.collVersion.t, 0); -assert.eq(metadata.pending.length, 0); - -assert.commandWorked(admin.runCommand( - {moveChunk: ns, find: {_id: 1}, to: st.shard0.shardName, _waitForDelete: true})); - -metadata = getMetadata(st.shard0); -assert.eq(metadata.shardVersion.t, 0); -assert.neq(metadata.collVersion.t, 0); -assert.eq(metadata.pending[0][0]._id, 1); -assert.eq(metadata.pending[0][1]._id, MaxKey); - -// The pending chunk should be promoted to a real chunk when shard0 reloads -// its config. -jsTest.log('Checking that pending chunk is promoted on reload...'); - -assert.eq(null, coll.findOne({_id: 1})); - -metadata = getMetadata(st.shard0); -assert.neq(metadata.shardVersion.t, 0); -assert.neq(metadata.collVersion.t, 0); -assert.eq(metadata.chunks[0][0]._id, 1); -assert.eq(metadata.chunks[0][1]._id, MaxKey); - -st.printShardingStatus(); - -st.stop(); -})(); diff --git a/jstests/sharding/dump_coll_metadata.js b/jstests/sharding/dump_coll_metadata.js index f8292262249..7c8b134194c 100644 --- a/jstests/sharding/dump_coll_metadata.js +++ b/jstests/sharding/dump_coll_metadata.js @@ -24,7 +24,6 @@ printjson(result); var metadata = result.metadata; assert.eq(metadata.chunks.length, 1); -assert.eq(metadata.pending.length, 0); assert.eq(metadata.chunks[0][0]._id, MinKey); assert.eq(metadata.chunks[0][1]._id, MaxKey); assert.eq(metadata.shardVersion, result.global); @@ -48,7 +47,6 @@ assert.commandWorked(result); metadata = result.metadata; assert.eq(metadata.chunks.length, 2); -assert.eq(metadata.pending.length, 0); assert(metadata.chunks[0][0]._id + "" == MinKey + ""); assert(metadata.chunks[0][1]._id == 0); assert(metadata.chunks[1][0]._id == 0); diff --git a/jstests/sharding/initial_split_validate_shard_collections.js b/jstests/sharding/initial_split_validate_shard_collections.js index 22c81bdc276..9cbedf06053 100644 --- a/jstests/sharding/initial_split_validate_shard_collections.js +++ b/jstests/sharding/initial_split_validate_shard_collections.js @@ -27,7 +27,6 @@ function checkMetadata(metadata) { jsTestLog(tojson(metadata)); assert.eq(1, metadata.chunks.length); - assert.eq(0, metadata.pending.length); // Check that the single chunk on the shard's metadata is a valid chunk (getShardVersion // represents chunks as an array of [min, max]) diff --git a/src/mongo/db/s/collection_sharding_runtime.cpp b/src/mongo/db/s/collection_sharding_runtime.cpp index 83eff883064..5a8204144b9 100644 --- a/src/mongo/db/s/collection_sharding_runtime.cpp +++ b/src/mongo/db/s/collection_sharding_runtime.cpp @@ -216,18 +216,6 @@ void CollectionShardingRuntime::clearFilteringMetadata(OperationContext* opCtx) } } -SharedSemiFuture<void> CollectionShardingRuntime::beginReceive(ChunkRange const& range) { - stdx::lock_guard lk(_metadataManagerLock); - invariant(_metadataType == MetadataType::kSharded); - return _metadataManager->beginReceive(range); -} - -void CollectionShardingRuntime::forgetReceive(const ChunkRange& range) { - stdx::lock_guard lk(_metadataManagerLock); - invariant(_metadataType == MetadataType::kSharded); - _metadataManager->forgetReceive(range); -} - SharedSemiFuture<void> CollectionShardingRuntime::cleanUpRange(ChunkRange const& range, boost::optional<UUID> migrationId, CleanWhen when) { @@ -291,12 +279,6 @@ Status CollectionShardingRuntime::waitForClean(OperationContext* opCtx, MONGO_UNREACHABLE; } -boost::optional<ChunkRange> CollectionShardingRuntime::getNextOrphanRange(BSONObj const& from) { - stdx::lock_guard lk(_metadataManagerLock); - invariant(_metadataType == MetadataType::kSharded); - return _metadataManager->getNextOrphanRange(from); -} - std::shared_ptr<ScopedCollectionDescription::Impl> CollectionShardingRuntime::_getCurrentMetadataIfKnown( const boost::optional<LogicalTime>& atClusterTime) { @@ -393,16 +375,6 @@ void CollectionShardingRuntime::appendShardVersion(BSONObjBuilder* builder) { } } -void CollectionShardingRuntime::appendPendingReceiveChunks(BSONArrayBuilder* builder) { - _metadataManager->toBSONPending(*builder); -} - -void CollectionShardingRuntime::clearReceivingChunks() { - stdx::lock_guard lk(_metadataManagerLock); - invariant(_metadataType == MetadataType::kSharded); - _metadataManager->clearReceivingChunks(); -} - size_t CollectionShardingRuntime::numberOfRangesScheduledForDeletion() const { stdx::lock_guard lk(_metadataManagerLock); if (_metadataManager) { diff --git a/src/mongo/db/s/get_shard_version_command.cpp b/src/mongo/db/s/get_shard_version_command.cpp index 9648635a1e1..a9e1d3aad62 100644 --- a/src/mongo/db/s/get_shard_version_command.cpp +++ b/src/mongo/db/s/get_shard_version_command.cpp @@ -119,10 +119,6 @@ public: BSONArrayBuilder chunksArr(metadataBuilder.subarrayStart("chunks")); metadata.toBSONChunks(&chunksArr); chunksArr.doneFast(); - - BSONArrayBuilder pendingArr(metadataBuilder.subarrayStart("pending")); - csr->appendPendingReceiveChunks(&pendingArr); - pendingArr.doneFast(); } metadataBuilder.doneFast(); } diff --git a/src/mongo/db/s/metadata_manager.cpp b/src/mongo/db/s/metadata_manager.cpp index 953098ac44f..feb943e2462 100644 --- a/src/mongo/db/s/metadata_manager.cpp +++ b/src/mongo/db/s/metadata_manager.cpp @@ -118,8 +118,7 @@ MetadataManager::MetadataManager(ServiceContext* serviceContext, : _serviceContext(serviceContext), _nss(std::move(nss)), _collectionUuid(*initialMetadata.getChunkManager()->getUUID()), - _executor(std::move(executor)), - _receivingChunks(SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<BSONObj>()) { + _executor(std::move(executor)) { _metadata.emplace_back(std::make_shared<CollectionMetadataTracker>(std::move(initialMetadata))); } @@ -207,29 +206,6 @@ void MetadataManager::setFilteringMetadata(CollectionMetadata remoteMetadata) { "activeMetadata"_attr = activeMetadata.toStringBasic(), "remoteMetadata"_attr = remoteMetadata.toStringBasic()); - // Resolve any receiving chunks, which might have completed by now - for (auto it = _receivingChunks.begin(); it != _receivingChunks.end();) { - const ChunkRange receivingRange(it->first, it->second); - - if (!metadataOverlapsRange(remoteMetadata, receivingRange)) { - ++it; - continue; - } - - // The remote metadata contains a chunk we were earlier in the process of receiving, so we - // deem it successfully received - LOGV2_DEBUG(21986, - 2, - "Chunk {range} for {namespace} has already been migrated to this " - "shard", - "The incoming chunk migration for this shard has already been completed", - "range"_attr = redact(receivingRange.toString()), - "namespace"_attr = _nss.ns()); - - _receivingChunks.erase(it); - it = _receivingChunks.begin(); - } - _setActiveMetadata(lg, std::move(remoteMetadata)); } @@ -262,17 +238,6 @@ void MetadataManager::_retireExpiredMetadata(WithLock) { } } -void MetadataManager::toBSONPending(BSONArrayBuilder& bb) const { - stdx::lock_guard<Latch> lg(_managerLock); - - for (auto it = _receivingChunks.begin(); it != _receivingChunks.end(); ++it) { - BSONArrayBuilder pendingBB(bb.subarrayStart()); - pendingBB.append(it->first); - pendingBB.append(it->second); - pendingBB.done(); - } -} - void MetadataManager::append(BSONObjBuilder* builder) const { stdx::lock_guard<Latch> lg(_managerLock); @@ -283,15 +248,6 @@ void MetadataManager::append(BSONObjBuilder* builder) const { arr.append(obj.done()); } - BSONArrayBuilder pcArr(builder->subarrayStart("pendingChunks")); - for (const auto& entry : _receivingChunks) { - BSONObjBuilder obj; - ChunkRange r = ChunkRange(entry.first, entry.second); - r.append(&obj); - pcArr.append(obj.done()); - } - pcArr.done(); - invariant(!_metadata.empty()); BSONArrayBuilder amrArr(builder->subarrayStart("activeMetadataRanges")); @@ -304,59 +260,6 @@ void MetadataManager::append(BSONObjBuilder* builder) const { amrArr.done(); } -SharedSemiFuture<void> MetadataManager::beginReceive(ChunkRange const& range) { - stdx::lock_guard<Latch> lg(_managerLock); - invariant(!_metadata.empty()); - - if (_overlapsInUseChunk(lg, range)) { - return Status{ErrorCodes::RangeOverlapConflict, - "Documents in target range may still be in use on the destination shard."}; - } - - _receivingChunks.emplace(range.getMin().getOwned(), range.getMax().getOwned()); - - LOGV2_OPTIONS(21987, - {logv2::LogComponent::kShardingMigration}, - "Scheduling deletion of any documents in {namespace} range {range} before " - "migrating in a chunk covering the range", - "Scheduling deletion of any documents in the collection's specified range " - "before migrating chunks into said range", - "namespace"_attr = _nss.ns(), - "range"_attr = redact(range.toString())); - - return _submitRangeForDeletion(lg, - SemiFuture<void>::makeReady(), - range, - boost::none, - Seconds(orphanCleanupDelaySecs.load())); -} - -void MetadataManager::forgetReceive(ChunkRange const& range) { - stdx::lock_guard<Latch> lg(_managerLock); - invariant(!_metadata.empty()); - - // This is potentially a partially received chunk, which needs to be cleaned up. We know none - // of these documents are in use, so they can go straight to the deletion queue. - LOGV2_OPTIONS( - 21988, - {logv2::LogComponent::kShardingMigration}, - "Abandoning incoming migration for {namespace} range {range}; scheduling deletion of any " - "documents already copied", - "Abandoning migration for the collection's specified range; scheduling deletion of any " - "documents already copied", - "namespace"_attr = _nss.ns(), - "range"_attr = redact(range.toString())); - - invariant(!_overlapsInUseChunk(lg, range)); - - auto it = _receivingChunks.find(range.getMin()); - invariant(it != _receivingChunks.end()); - _receivingChunks.erase(it); - - std::ignore = - _submitRangeForDeletion(lg, SemiFuture<void>::makeReady(), range, boost::none, Seconds(0)); -} - SharedSemiFuture<void> MetadataManager::cleanUpRange(ChunkRange const& range, boost::optional<UUID> migrationId, bool shouldDelayBeforeDeletion) { @@ -371,12 +274,6 @@ SharedSemiFuture<void> MetadataManager::cleanUpRange(ChunkRange const& range, str::stream() << "Requested deletion range overlaps a live shard chunk"}; } - if (rangeMapOverlaps(_receivingChunks, range.getMin(), range.getMax())) { - return Status{ErrorCodes::RangeOverlapConflict, - str::stream() << "Requested deletion range overlaps a chunk being" - " migrated in"}; - } - auto delayForActiveQueriesOnSecondariesToComplete = shouldDelayBeforeDeletion ? Seconds(orphanCleanupDelaySecs.load()) : Seconds(0); @@ -472,12 +369,6 @@ bool MetadataManager::_overlapsInUseChunk(WithLock lk, ChunkRange const& range) return (cm != nullptr); } -boost::optional<ChunkRange> MetadataManager::getNextOrphanRange(BSONObj const& from) const { - stdx::lock_guard<Latch> lg(_managerLock); - invariant(!_metadata.empty()); - return _metadata.back()->metadata->getNextOrphanRange(_receivingChunks, from); -} - SharedSemiFuture<void> MetadataManager::_submitRangeForDeletion( const WithLock&, SemiFuture<void> waitForActiveQueriesToComplete, @@ -515,8 +406,4 @@ SharedSemiFuture<void> MetadataManager::_submitRangeForDeletion( return cleanupComplete; } -void MetadataManager::clearReceivingChunks() { - stdx::lock_guard<Latch> lg(_managerLock); - _receivingChunks.clear(); -} } // namespace mongo diff --git a/src/mongo/db/s/metadata_manager.h b/src/mongo/db/s/metadata_manager.h index 2844d696874..3685f4adf8b 100644 --- a/src/mongo/db/s/metadata_manager.h +++ b/src/mongo/db/s/metadata_manager.h @@ -104,40 +104,12 @@ public: void setFilteringMetadata(CollectionMetadata newMetadata); - void toBSONPending(BSONArrayBuilder& bb) const; - - /** - * Returns the number of items in the _receivingChunks list. Useful for unit tests. - */ - size_t numberOfReceivingChunks() { - return _receivingChunks.size(); - } - - /** - * Clears the items in the _receivingChunks list. - */ - void clearReceivingChunks(); - /** * Appends information on all the chunk ranges in rangesToClean to builder. */ void append(BSONObjBuilder* builder) const; /** - * Schedules any documents in `range` for immediate cleanup iff no running queries can depend - * on them, and adds the range to the list of ranges currently being received. - * - * Returns a future that will be resolved when the deletion either completes or fail. - */ - SharedSemiFuture<void> beginReceive(ChunkRange const& range); - - /** - * Removes `range` from the list of ranges currently being received, and schedules any documents - * in the range for immediate cleanup. - */ - void forgetReceive(const ChunkRange& range); - - /** * Schedules documents in `range` for cleanup after any running queries that may depend on them * have terminated. Does not block. Fails if the range overlaps any current local shard chunk. * @@ -280,9 +252,6 @@ private: // active collection metadata instances still in use by active server operations or cursors. std::list<std::shared_ptr<CollectionMetadataTracker>> _metadata; - // Chunk ranges being migrated into to the shard. Indexed by the min key of the range. - RangeMap _receivingChunks; - // Ranges being deleted, or scheduled to be deleted, by a background task. std::list<std::pair<ChunkRange, SharedSemiFuture<void>>> _rangesScheduledForDeletion; }; diff --git a/src/mongo/db/s/metadata_manager_test.cpp b/src/mongo/db/s/metadata_manager_test.cpp index 760f04ea78e..e2693e9de1a 100644 --- a/src/mongo/db/s/metadata_manager_test.cpp +++ b/src/mongo/db/s/metadata_manager_test.cpp @@ -158,83 +158,6 @@ protected: std::shared_ptr<MetadataManager> _manager; }; -TEST_F(MetadataManagerTest, CleanUpForMigrateIn) { - _manager->setFilteringMetadata(makeEmptyMetadata()); - - // Sanity checks - ASSERT(_manager->getActiveMetadata(boost::none)->get().isSharded()); - ASSERT_EQ(0UL, _manager->getActiveMetadata(boost::none)->get().getChunks().size()); - - ChunkRange range1(BSON("key" << 0), BSON("key" << 10)); - ChunkRange range2(BSON("key" << 10), BSON("key" << 20)); - - auto notif1 = _manager->beginReceive(range1); - ASSERT(!notif1.isReady()); - - auto notif2 = _manager->beginReceive(range2); - ASSERT(!notif2.isReady()); - - ASSERT_EQ(2UL, _manager->numberOfRangesToClean()); - ASSERT_EQ(0UL, _manager->numberOfRangesToCleanStillInUse()); -} - -TEST_F(MetadataManagerTest, - ChunkInReceivingChunksListIsRemovedAfterShardKeyRefineIfMigrationSucceeded) { - _manager->setFilteringMetadata(makeEmptyMetadata()); - - // Simulate receiving a range. This will add an item to _receivingChunks. - ChunkRange range(BSON("key" << 0), BSON("key" << 10)); - auto notif1 = _manager->beginReceive(range); - - ASSERT_EQ(_manager->numberOfReceivingChunks(), 1); - - // Simulate a situation in which the migration completes, and then the shard key is refined, - // before this shard discovers the updated metadata. - auto uuid = _manager->getActiveMetadata(boost::none)->get().getChunkManager()->getUUID().get(); - ChunkRange refinedRange(BSON("key" << 0 << "other" << MINKEY), - BSON("key" << 10 << "other" << MINKEY)); - auto refinedMetadata = makeEmptyMetadata(BSON(kPattern << 1 << "other" << 1), - ChunkRange(BSON("key" << MINKEY << "other" << MINKEY), - BSON("key" << MAXKEY << "other" << MAXKEY)), - uuid); - - // Set the updated chunk map on the MetadataManager. - _manager->setFilteringMetadata(cloneMetadataPlusChunk(refinedMetadata, refinedRange)); - // Because the refined range overlaps with the received range (pre-refine), this should remove - // the item in _receivingChunks. - ASSERT_EQ(_manager->numberOfReceivingChunks(), 0); -} - -TEST_F(MetadataManagerTest, - ChunkInReceivingChunksListIsNotRemovedAfterShardKeyRefineIfNonOverlappingRangeIsReceived) { - _manager->setFilteringMetadata(makeEmptyMetadata()); - - // Simulate receiving a range. This will add an item to _receivingChunks. - ChunkRange range(BSON("key" << 0), BSON("key" << 10)); - auto notif1 = _manager->beginReceive(range); - ASSERT_EQ(_manager->numberOfReceivingChunks(), 1); - - // Simulate a situation in which the shard key is refined and this shard discovers - // updated metadata where it owns some range that does not overlap with the range being migrated - // in. - auto uuid = _manager->getActiveMetadata(boost::none)->get().getChunkManager()->getUUID().get(); - ChunkRange refinedNonOverlappingRange(BSON("key" << -10 << "other" << MINKEY), - BSON("key" << 0 << "other" << MINKEY)); - - auto refinedMetadata = makeEmptyMetadata(BSON(kPattern << 1 << "other" << 1), - ChunkRange(BSON("key" << MINKEY << "other" << MINKEY), - BSON("key" << MAXKEY << "other" << MAXKEY)), - uuid); - - // Set the updated chunk map on the MetadataManager. - _manager->setFilteringMetadata( - cloneMetadataPlusChunk(refinedMetadata, refinedNonOverlappingRange)); - - // Because the refined range does not overlap with the received range (pre-refine), this should - // NOT remove the item in _receivingChunks. - ASSERT_EQ(_manager->numberOfReceivingChunks(), 1); -} - TEST_F(MetadataManagerTest, TrackOrphanedDataCleanupBlocksOnScheduledRangeDeletions) { ChunkRange cr1(BSON("key" << 0), BSON("key" << 10)); diff --git a/src/mongo/db/s/migration_util.cpp b/src/mongo/db/s/migration_util.cpp index 092c49877e1..1c2cc75c844 100644 --- a/src/mongo/db/s/migration_util.cpp +++ b/src/mongo/db/s/migration_util.cpp @@ -490,18 +490,6 @@ void submitOrphanRanges(OperationContext* opCtx, const NamespaceString& nss, con try { onShardVersionMismatch(opCtx, nss, boost::none); - { - AutoGetCollection autoColl(opCtx, nss, MODE_IS); - auto csr = CollectionShardingRuntime::get(opCtx, nss); - auto metadata = csr->getCurrentMetadataIfKnown(); - if (!metadata || !metadata->isSharded()) { - return; - } - // We clear the list of receiving chunks to ensure that a RangeDeletionTask submitted by - // this setFCV command cannot be blocked behind a chunk received as a part of a - // migration that completed on the recipient (this node) but failed to commit. - csr->clearReceivingChunks(); - } LOGV2_DEBUG(22031, 2, |