diff options
Diffstat (limited to 'src')
7 files changed, 95 insertions, 99 deletions
diff --git a/src/mongo/db/s/config/configsvr_refine_collection_shard_key_command.cpp b/src/mongo/db/s/config/configsvr_refine_collection_shard_key_command.cpp index 45ca003894b..58674d0212b 100644 --- a/src/mongo/db/s/config/configsvr_refine_collection_shard_key_command.cpp +++ b/src/mongo/db/s/config/configsvr_refine_collection_shard_key_command.cpp @@ -34,6 +34,7 @@ #include "mongo/db/audit.h" #include "mongo/db/auth/authorization_session.h" #include "mongo/db/commands.h" +#include "mongo/db/commands/feature_compatibility_version.h" #include "mongo/db/repl/repl_client_info.h" #include "mongo/db/s/config/sharding_catalog_manager.h" #include "mongo/db/s/dist_lock_manager.h" @@ -62,9 +63,16 @@ public: "_configsvrRefineCollectionShardKey can only be run on config servers", serverGlobalParams.clusterRole == ClusterRole::ConfigServer); uassert(ErrorCodes::InvalidOptions, - "refineCollectionShardKey must be called with majority writeConcern", + "_configsvrRefineCollectionShardKey must be called with majority writeConcern", opCtx->getWriteConcern().wMode == WriteConcernOptions::kMajority); + // TODO (SERVER-53283): Delete this code when FCV 5.1 becomes the official one + FixedFCVRegion fcvRegion(opCtx); + uassert(ErrorCodes::ConflictingOperationInProgress, + "Cannot refine collection shard key while the node is being upgraded or " + "downgraded", + !fcvRegion->isUpgradingOrDowngrading()); + const boost::optional<bool>& isFromPrimaryShard = request().getIsFromPrimaryShard(); if (isFromPrimaryShard && *isFromPrimaryShard) { // If the request has been received from the primary shard, the distributed lock has diff --git a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp index 3855d512b88..2617c4201a0 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp @@ -431,13 +431,7 @@ void bumpMajorVersionOneChunkPerShard(OperationContext* opCtx, const CollectionType coll(findCollResponse.docs[0]); for (const auto& shardId : shardIds) { - BSONObjBuilder updateBuilder; - BSONObjBuilder updateVersionClause(updateBuilder.subobjStart("$set")); - targetChunkVersion.appendLegacyWithField(&updateVersionClause, ChunkType::lastmod()); - updateVersionClause.doneFast(); - auto chunkUpdate = updateBuilder.obj(); - - const auto query = [&]() { + const auto query = [&] { if (coll.getTimestamp()) { return BSON(ChunkType::collectionUUID << coll.getUuid() << ChunkType::shard(shardId.toString())); @@ -446,11 +440,16 @@ void bumpMajorVersionOneChunkPerShard(OperationContext* opCtx, << ChunkType::shard(shardId.toString())); } }(); - auto request = BatchedCommandRequest::buildUpdateOp(ChunkType::ConfigNS, - query, // query - chunkUpdate, // update - false, // upsert - false // multi + + BSONObjBuilder updateVersionClause; + updateVersionClause.appendTimestamp(ChunkType::lastmod(), targetChunkVersion.toLong()); + + auto request = BatchedCommandRequest::buildUpdateOp( + ChunkType::ConfigNS, + query, + BSON("$set" << updateVersionClause.obj()), // update + false, // upsert + false // multi ); auto res = ShardingCatalogManager::get(opCtx)->writeToConfigDocumentInTxn( diff --git a/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp index 356d7eb2c83..9cbbe8f9676 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp @@ -278,16 +278,12 @@ std::pair<std::vector<BSONObj>, std::vector<BSONObj>> makeChunkAndTagUpdatesForR << "then" << literalMaxObject << "else" << literalMinObject)))))))); - // The chunk updates change the min and max fields, and additionally set the new epoch and the - // new timestamp and unset the jumbo field. + // The chunk updates change the min and max fields and unset the jumbo field. If the collection + // is in the old (pre-5.0 format, it also sets the new epoch). std::vector<BSONObj> chunkUpdates; - chunkUpdates.emplace_back( - BSON("$set" << extendMinAndMaxModifier.addFields(BSON(ChunkType::epoch(newEpoch))))); - - if (newTimestamp) { - chunkUpdates.emplace_back(BSON("$set" << extendMinAndMaxModifier.addFields( - BSON(ChunkType::timestamp(*newTimestamp))))); - } + chunkUpdates.emplace_back(BSON("$set" << (newTimestamp ? extendMinAndMaxModifier.getOwned() + : extendMinAndMaxModifier.addFields(BSON( + ChunkType::epoch(newEpoch)))))); chunkUpdates.emplace_back(BSON("$unset" << ChunkType::jumbo())); // The tag updates only change the min and max fields. diff --git a/src/mongo/db/s/resharding/resharding_coordinator_service.cpp b/src/mongo/db/s/resharding/resharding_coordinator_service.cpp index 18e8cc3b8aa..93d573c8f75 100644 --- a/src/mongo/db/s/resharding/resharding_coordinator_service.cpp +++ b/src/mongo/db/s/resharding/resharding_coordinator_service.cpp @@ -542,35 +542,23 @@ void updateChunkAndTagsDocsForTempNss(OperationContext* opCtx, OID newCollectionEpoch, boost::optional<Timestamp> newCollectionTimestamp, TxnNumber txnNumber) { - // Update all chunk documents that currently have 'ns' as the temporary collection namespace - // such that 'ns' is now the original collection namespace and 'lastmodEpoch' is - // newCollectionEpoch. - const auto chunksQuery = [&]() { - if (newCollectionTimestamp) { - return BSON(ChunkType::collectionUUID() << coordinatorDoc.getReshardingUUID()); - } else { - return BSON(ChunkType::ns(coordinatorDoc.getTempReshardingNss().ns())); - } - }(); - const auto chunksUpdate = [&]() { - if (newCollectionTimestamp) { - return BSON("$set" << BSON(ChunkType::epoch << newCollectionEpoch - << ChunkType::timestamp - << *newCollectionTimestamp)); - } else { - return BSON("$set" << BSON(ChunkType::ns << coordinatorDoc.getSourceNss().ns() - << ChunkType::epoch << newCollectionEpoch)); - } - }(); - auto chunksRequest = BatchedCommandRequest::buildUpdateOp(ChunkType::ConfigNS, - chunksQuery, // query - chunksUpdate, // update - false, // upsert - true // multi - ); - - auto chunksRes = ShardingCatalogManager::get(opCtx)->writeToConfigDocumentInTxn( - opCtx, ChunkType::ConfigNS, chunksRequest, txnNumber); + // If the collection entry has a timestamp, this means the metadata has been upgraded to the 5.0 + // format in which case chunks are indexed by UUID and do not contain Epochs. Therefore, only + // the update to config.collections is sufficient. + if (!newCollectionTimestamp) { + auto chunksRequest = BatchedCommandRequest::buildUpdateOp( + ChunkType::ConfigNS, + BSON(ChunkType::ns(coordinatorDoc.getTempReshardingNss().ns())), // query + BSON("$set" << BSON(ChunkType::ns << coordinatorDoc.getSourceNss().ns() + << ChunkType::epoch + << newCollectionEpoch)), // update + false, // upsert + true // multi + ); + + auto chunksRes = ShardingCatalogManager::get(opCtx)->writeToConfigDocumentInTxn( + opCtx, ChunkType::ConfigNS, chunksRequest, txnNumber); + } auto tagsRequest = BatchedCommandRequest::buildUpdateOp( TagsType::ConfigNS, @@ -609,12 +597,12 @@ CollectionType createTempReshardingCollectionType( const BSONObj& collation) { CollectionType collType(coordinatorDoc.getTempReshardingNss(), chunkVersion.epoch(), + chunkVersion.getTimestamp(), opCtx->getServiceContext()->getPreciseClockSource()->now(), coordinatorDoc.getReshardingUUID()); collType.setKeyPattern(coordinatorDoc.getReshardingKey()); collType.setDefaultCollation(collation); collType.setUnique(false); - collType.setTimestamp(chunkVersion.getTimestamp()); TypeCollectionReshardingFields tempEntryReshardingFields(coordinatorDoc.getReshardingUUID()); tempEntryReshardingFields.setState(coordinatorDoc.getState()); diff --git a/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp b/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp index 5bb026932fd..785ddd3305d 100644 --- a/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp +++ b/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp @@ -367,8 +367,11 @@ public: shardKey = _oldShardKey.toBSON(); } - CollectionType collType( - coordinatorDoc.getSourceNss(), std::move(epoch), lastUpdated, std::move(uuid)); + CollectionType collType(coordinatorDoc.getSourceNss(), + std::move(epoch), + Timestamp(1, 2), + lastUpdated, + std::move(uuid)); collType.setKeyPattern(shardKey); collType.setUnique(false); if (reshardingFields) @@ -424,7 +427,7 @@ public: } } - std::vector<ChunkType> makeChunks(const NamespaceString& nss, + std::vector<ChunkType> makeChunks(const UUID& uuid, OID epoch, const ShardKeyPattern& shardKey, std::vector<OID> ids) { @@ -433,21 +436,21 @@ public: // Create two chunks, one on each shard with the given namespace and epoch ChunkVersion version(1, 0, epoch, boost::none /* timestamp */); - ChunkType chunk1(nss, chunkRanges[0], version, ShardId("shard0000")); + ChunkType chunk1(uuid, chunkRanges[0], version, ShardId("shard0000")); chunk1.setName(ids[0]); version.incMinor(); - ChunkType chunk2(nss, chunkRanges[1], version, ShardId("shard0001")); + ChunkType chunk2(uuid, chunkRanges[1], version, ShardId("shard0001")); chunk2.setName(ids[1]); return std::vector<ChunkType>{chunk1, chunk2}; } // Returns the chunk for the donor shard. - ChunkType makeAndInsertChunksForDonorShard(const NamespaceString& nss, + ChunkType makeAndInsertChunksForDonorShard(const UUID& uuid, OID epoch, const ShardKeyPattern& shardKey, std::vector<OID> ids) { - auto chunks = makeChunks(nss, epoch, shardKey, ids); + auto chunks = makeChunks(uuid, epoch, shardKey, ids); // Only the chunk corresponding to shard0000 is stored as a donor in the coordinator state // document constructed. @@ -503,10 +506,10 @@ TEST_F(ReshardingCoordinatorServiceTest, ReshardingCoordinatorSuccessfullyTransi auto doc = insertStateAndCatalogEntries(CoordinatorStateEnum::kUnused, _originalEpoch); auto opCtx = operationContext(); auto donorChunk = makeAndInsertChunksForDonorShard( - _originalNss, _originalEpoch, _oldShardKey, std::vector{OID::gen(), OID::gen()}); + _originalUUID, _originalEpoch, _oldShardKey, std::vector{OID::gen(), OID::gen()}); auto initialChunks = - makeChunks(_tempNss, _tempEpoch, _newShardKey, std::vector{OID::gen(), OID::gen()}); + makeChunks(_reshardingUUID, _tempEpoch, _newShardKey, std::vector{OID::gen(), OID::gen()}); std::vector<ReshardedChunk> presetReshardedChunks; for (const auto& chunk : initialChunks) { diff --git a/src/mongo/db/s/resharding/resharding_coordinator_test.cpp b/src/mongo/db/s/resharding/resharding_coordinator_test.cpp index 58de1d6d76b..5a0beb4bf24 100644 --- a/src/mongo/db/s/resharding/resharding_coordinator_test.cpp +++ b/src/mongo/db/s/resharding/resharding_coordinator_test.cpp @@ -112,8 +112,11 @@ protected: shardKey = _oldShardKey.toBSON(); } - CollectionType collType( - coordinatorDoc.getSourceNss(), std::move(epoch), lastUpdated, std::move(uuid)); + CollectionType collType(coordinatorDoc.getSourceNss(), + std::move(epoch), + Timestamp(1, 2), + lastUpdated, + std::move(uuid)); collType.setKeyPattern(shardKey); collType.setUnique(false); if (reshardingFields) @@ -131,11 +134,11 @@ protected: } // Returns the chunk for the donor shard. - ChunkType makeAndInsertChunksForDonorShard(const NamespaceString& nss, + ChunkType makeAndInsertChunksForDonorShard(const UUID& uuid, OID epoch, const ShardKeyPattern& shardKey, std::vector<OID> ids) { - auto chunks = makeChunks(nss, epoch, shardKey, ids); + auto chunks = makeChunks(uuid, epoch, shardKey, ids); // Only the chunk corresponding to shard0000 is stored as a donor in the coordinator state // document constructed. @@ -145,11 +148,11 @@ protected: } // Returns the chunk for the recipient shard. - ChunkType makeAndInsertChunksForRecipientShard(const NamespaceString& nss, + ChunkType makeAndInsertChunksForRecipientShard(const UUID& uuid, OID epoch, const ShardKeyPattern& shardKey, std::vector<OID> ids) { - auto chunks = makeChunks(nss, epoch, shardKey, ids); + auto chunks = makeChunks(uuid, epoch, shardKey, ids); // Only the chunk corresponding to shard0001 is stored as a recipient in the coordinator // state document constructed. @@ -158,7 +161,7 @@ protected: return recipientChunk; } - std::vector<ChunkType> makeChunks(const NamespaceString& nss, + std::vector<ChunkType> makeChunks(const UUID& uuid, OID epoch, const ShardKeyPattern& shardKey, std::vector<OID> ids) { @@ -166,11 +169,11 @@ protected: _newShardKey.isShardKey(shardKey.toBSON()) ? _newChunkRanges : _oldChunkRanges; // Create two chunks, one on each shard with the given namespace and epoch - ChunkVersion version(1, 0, epoch, boost::none /* timestamp */); - ChunkType chunk1(nss, chunkRanges[0], version, ShardId("shard0000")); + ChunkVersion version(1, 0, epoch, Timestamp(1, 2)); + ChunkType chunk1(uuid, chunkRanges[0], version, ShardId("shard0000")); chunk1.setName(ids[0]); version.incMinor(); - ChunkType chunk2(nss, chunkRanges[1], version, ShardId("shard0001")); + ChunkType chunk2(uuid, chunkRanges[1], version, ShardId("shard0001")); chunk2.setName(ids[1]); return std::vector<ChunkType>{chunk1, chunk2}; @@ -213,10 +216,7 @@ protected: client.insert(CollectionType::ConfigNS.ns(), originalNssCatalogEntry.toBSON()); auto tempNssCatalogEntry = createTempReshardingCollectionType( - opCtx, - coordinatorDoc, - ChunkVersion(1, 1, OID::gen(), boost::none /* timestamp */), - BSONObj()); + opCtx, coordinatorDoc, ChunkVersion(1, 1, OID::gen(), Timestamp(1, 2)), BSONObj()); client.insert(CollectionType::ConfigNS.ns(), tempNssCatalogEntry.toBSON()); return coordinatorDoc; @@ -420,14 +420,13 @@ protected: void readChunkCatalogEntriesAndAssertMatchExpected( OperationContext* opCtx, + const UUID& uuid, std::vector<ChunkType> expectedChunks, const OID& collEpoch, const boost::optional<Timestamp>& collTimestamp) { - auto nss = expectedChunks[0].getNS(); - DBDirectClient client(opCtx); std::vector<ChunkType> foundChunks; - auto cursor = client.query(ChunkType::ConfigNS, Query(BSON("ns" << nss.ns()))); + auto cursor = client.query(ChunkType::ConfigNS, Query(BSON("uuid" << uuid))); while (cursor->more()) { auto d = uassertStatusOK( ChunkType::fromConfigBSON(cursor->nextSafe().getOwned(), collEpoch, collTimestamp)); @@ -441,7 +440,9 @@ protected: return c.getName() == id; }); ASSERT(onDiskIt != foundChunks.end()); - ASSERT_EQUALS(onDiskIt->toConfigBSON().woCompare(it->toConfigBSON()), 0); + auto expectedBSON = it->toConfigBSON().removeField(ChunkType::collectionUUID()); + auto onDiskBSON = onDiskIt->toConfigBSON().removeField(ChunkType::collectionUUID()); + ASSERT_BSONOBJ_EQ(expectedBSON, onDiskBSON); } } @@ -503,11 +504,11 @@ protected: // collection should have been removed. boost::optional<CollectionType> expectedTempCollType = boost::none; if (expectedCoordinatorDoc.getState() < CoordinatorStateEnum::kCommitting) { - expectedTempCollType = createTempReshardingCollectionType( - opCtx, - expectedCoordinatorDoc, - ChunkVersion(1, 1, OID::gen(), boost::none /* timestamp */), - BSONObj()); + expectedTempCollType = + createTempReshardingCollectionType(opCtx, + expectedCoordinatorDoc, + ChunkVersion(1, 1, OID::gen(), Timestamp(1, 2)), + BSONObj()); // It's necessary to add the userCanceled field because the call into // createTempReshardingCollectionType assumes that the collection entry is @@ -592,7 +593,7 @@ protected: // Check that chunks and tags entries have been correctly created readChunkCatalogEntriesAndAssertMatchExpected( - opCtx, initialChunks, _originalEpoch, _originalTimestamp); + opCtx, _reshardingUUID, initialChunks, _originalEpoch, _originalTimestamp); readTagCatalogEntriesAndAssertMatchExpected(opCtx, newZones); } @@ -630,7 +631,7 @@ protected: // Check that chunks and tags entries previously under the temporary namespace have been // correctly updated to the original namespace readChunkCatalogEntriesAndAssertMatchExpected( - opCtx, expectedChunks, _finalEpoch, _finalTimestamp); + opCtx, _reshardingUUID, expectedChunks, _finalEpoch, _finalTimestamp); readTagCatalogEntriesAndAssertMatchExpected(opCtx, expectedZones); } @@ -663,12 +664,14 @@ protected: insertStateAndCatalogEntries(CoordinatorStateEnum::kPreparingToDonate, _originalEpoch); auto initialChunksIds = std::vector{OID::gen(), OID::gen()}; - auto tempNssChunks = makeChunks(_tempNss, _tempEpoch, _newShardKey, initialChunksIds); + auto tempNssChunks = + makeChunks(_reshardingUUID, _tempEpoch, _newShardKey, initialChunksIds); auto recipientChunk = tempNssChunks[1]; insertChunkAndZoneEntries(tempNssChunks, makeZones(_tempNss, _newShardKey)); insertChunkAndZoneEntries( - makeChunks(_originalNss, OID::gen(), _oldShardKey, std::vector{OID::gen(), OID::gen()}), + makeChunks( + _originalUUID, OID::gen(), _oldShardKey, std::vector{OID::gen(), OID::gen()}), makeZones(_originalNss, _oldShardKey)); // Persist the updates on disk @@ -737,11 +740,11 @@ TEST_F(ReshardingCoordinatorPersistenceTest, WriteInitialInfoSucceeds) { // Ensure the chunks for the original namespace exist since they will be bumped as a product of // the state transition to kPreparingToDonate. auto donorChunk = makeAndInsertChunksForDonorShard( - _originalNss, _originalEpoch, _oldShardKey, std::vector{OID::gen(), OID::gen()}); + _originalUUID, _originalEpoch, _oldShardKey, std::vector{OID::gen(), OID::gen()}); auto collectionVersion = donorChunk.getVersion(); auto initialChunks = - makeChunks(_tempNss, _tempEpoch, _newShardKey, std::vector{OID::gen(), OID::gen()}); + makeChunks(_reshardingUUID, _tempEpoch, _newShardKey, std::vector{OID::gen(), OID::gen()}); auto newZones = makeZones(_tempNss, _newShardKey); std::vector<BSONObj> zonesBSON; @@ -785,11 +788,11 @@ TEST_F(ReshardingCoordinatorPersistenceTest, BasicStateTransitionSucceeds) { // Ensure the chunks for the original and temporary namespaces exist since they will be bumped // as a product of the state transition to kBlockingWrites. auto donorChunk = makeAndInsertChunksForDonorShard( - _originalNss, _originalEpoch, _oldShardKey, std::vector{OID::gen(), OID::gen()}); + _originalUUID, _originalEpoch, _oldShardKey, std::vector{OID::gen(), OID::gen()}); auto donorCollectionVersion = donorChunk.getVersion(); auto recipientChunk = makeAndInsertChunksForRecipientShard( - _tempNss, _tempEpoch, _newShardKey, std::vector{OID::gen(), OID::gen()}); + _reshardingUUID, _tempEpoch, _newShardKey, std::vector{OID::gen(), OID::gen()}); auto recipientCollectionVersion = donorChunk.getVersion(); // Persist the updates on disk @@ -810,11 +813,11 @@ TEST_F(ReshardingCoordinatorPersistenceTest, StateTransitionWithFetchTimestampSu // Ensure the chunks for the original and temporary namespaces exist since they will be bumped // as a product of the state transition to kCloning. auto donorChunk = makeAndInsertChunksForDonorShard( - _originalNss, _originalEpoch, _oldShardKey, std::vector{OID::gen(), OID::gen()}); + _originalUUID, _originalEpoch, _oldShardKey, std::vector{OID::gen(), OID::gen()}); auto donorCollectionVersion = donorChunk.getVersion(); auto recipientChunk = makeAndInsertChunksForRecipientShard( - _tempNss, _tempEpoch, _newShardKey, std::vector{OID::gen(), OID::gen()}); + _reshardingUUID, _tempEpoch, _newShardKey, std::vector{OID::gen(), OID::gen()}); auto recipientCollectionVersion = recipientChunk.getVersion(); // Persist the updates on disk @@ -841,12 +844,12 @@ TEST_F(ReshardingCoordinatorPersistenceTest, StateTranstionToDecisionPersistedSu CoordinatorStateEnum::kBlockingWrites, _originalEpoch, fetchTimestamp); auto initialChunksIds = std::vector{OID::gen(), OID::gen()}; - auto tempNssChunks = makeChunks(_tempNss, _tempEpoch, _newShardKey, initialChunksIds); + auto tempNssChunks = makeChunks(_reshardingUUID, _tempEpoch, _newShardKey, initialChunksIds); auto recipientChunk = tempNssChunks[1]; insertChunkAndZoneEntries(tempNssChunks, makeZones(_tempNss, _newShardKey)); insertChunkAndZoneEntries( - makeChunks(_originalNss, OID::gen(), _oldShardKey, std::vector{OID::gen(), OID::gen()}), + makeChunks(_originalUUID, OID::gen(), _oldShardKey, std::vector{OID::gen(), OID::gen()}), makeZones(_originalNss, _oldShardKey)); // Persist the updates on disk @@ -855,7 +858,7 @@ TEST_F(ReshardingCoordinatorPersistenceTest, StateTranstionToDecisionPersistedSu // The new epoch to use for the resharded collection to indicate that the collection is a // new incarnation of the namespace - auto updatedChunks = makeChunks(_originalNss, _finalEpoch, _newShardKey, initialChunksIds); + auto updatedChunks = makeChunks(_originalUUID, _finalEpoch, _newShardKey, initialChunksIds); auto updatedZones = makeZones(_originalNss, _newShardKey); writeDecisionPersistedStateExpectSuccess( @@ -881,7 +884,7 @@ TEST_F(ReshardingCoordinatorPersistenceTest, StateTransitionToDoneSucceeds) { // Ensure the chunks for the original namespace exist since they will be bumped as a product of // the state transition to kDone. auto finalChunk = makeAndInsertChunksForRecipientShard( - _originalNss, _finalEpoch, _newShardKey, std::vector{OID::gen(), OID::gen()}); + _reshardingUUID, _finalEpoch, _newShardKey, std::vector{OID::gen(), OID::gen()}); auto collectionVersion = finalChunk.getVersion(); removeCoordinatorDocAndReshardingFieldsExpectSuccess(operationContext(), coordinatorDoc); diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp index 5fc37ad15e5..4d08182ccad 100644 --- a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp +++ b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp @@ -714,7 +714,6 @@ ShardServerCatalogCacheLoader::_schedulePrimaryGetChunksSince( << "'."}; } - if (maxLoaderVersion.isSet() && (maxLoaderVersion.getTimestamp().is_initialized() != collAndChunks.creationTime.is_initialized())) { |