summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
authorAllison Easton <allison.easton@mongodb.com>2022-11-23 16:06:22 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-11-23 16:33:06 +0000
commitd5209179dc52ac6411f8fcacb1b9405fe617d8a5 (patch)
tree917f8b89998e77aefecc7946a42db149f3e0a741 /src/mongo/db
parent9114c3335f84c500eb767d06b7960eeff2802a56 (diff)
downloadmongo-d5209179dc52ac6411f8fcacb1b9405fe617d8a5.tar.gz
SERVER-70382 Replace boost::none index versions with the actual index version from the catalog cache or CSR
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/pipeline/dispatch_shard_pipeline_test.cpp5
-rw-r--r--src/mongo/db/pipeline/process_interface/common_process_interface.cpp12
-rw-r--r--src/mongo/db/pipeline/process_interface/mongos_process_interface.cpp14
-rw-r--r--src/mongo/db/pipeline/process_interface/shardsvr_process_interface.cpp14
-rw-r--r--src/mongo/db/pipeline/sharded_agg_helpers.cpp52
-rw-r--r--src/mongo/db/pipeline/sharded_agg_helpers.h4
-rw-r--r--src/mongo/db/pipeline/sharded_union_test.cpp12
-rw-r--r--src/mongo/db/s/analyze_shard_key_cmd_util.cpp2
-rw-r--r--src/mongo/db/s/balancer/balancer.cpp8
-rw-r--r--src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp8
-rw-r--r--src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp5
-rw-r--r--src/mongo/db/s/balancer/cluster_chunks_resize_policy_impl.cpp2
-rw-r--r--src/mongo/db/s/chunk_operation_precondition_checks.cpp38
-rw-r--r--src/mongo/db/s/chunk_operation_precondition_checks.h17
-rw-r--r--src/mongo/db/s/chunk_splitter.cpp4
-rw-r--r--src/mongo/db/s/collection_sharding_runtime.h2
-rw-r--r--src/mongo/db/s/collmod_coordinator.cpp2
-rw-r--r--src/mongo/db/s/config/configsvr_reshard_collection_cmd.cpp2
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp6
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp6
-rw-r--r--src/mongo/db/s/create_collection_coordinator.cpp45
-rw-r--r--src/mongo/db/s/global_index/global_index_cloning_external_state.cpp4
-rw-r--r--src/mongo/db/s/global_index/global_index_cloning_external_state.h8
-rw-r--r--src/mongo/db/s/global_index/global_index_cloning_service.cpp2
-rw-r--r--src/mongo/db/s/global_index/global_index_cloning_service_test.cpp4
-rw-r--r--src/mongo/db/s/migration_destination_manager.cpp8
-rw-r--r--src/mongo/db/s/migration_destination_manager.h3
-rw-r--r--src/mongo/db/s/migration_source_manager.cpp22
-rw-r--r--src/mongo/db/s/refine_collection_shard_key_coordinator.cpp2
-rw-r--r--src/mongo/db/s/rename_collection_coordinator.cpp6
-rw-r--r--src/mongo/db/s/reshard_collection_coordinator.cpp4
-rw-r--r--src/mongo/db/s/resharding/document_source_resharding_ownership_match.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_collection_cloner.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_coordinator_service.cpp8
-rw-r--r--src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_donor_service.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_recipient_service.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_recipient_service_external_state.cpp12
-rw-r--r--src/mongo/db/s/resharding/resharding_recipient_service_external_state.h8
-rw-r--r--src/mongo/db/s/resharding/resharding_recipient_service_external_state_test.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_recipient_service_test.cpp22
-rw-r--r--src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp8
-rw-r--r--src/mongo/db/s/resharding/resharding_util.cpp2
-rw-r--r--src/mongo/db/s/sessions_collection_config_server.cpp8
-rw-r--r--src/mongo/db/s/shard_filtering_metadata_refresh.cpp4
-rw-r--r--src/mongo/db/s/shard_key_util.cpp13
-rw-r--r--src/mongo/db/s/shard_key_util.h3
-rw-r--r--src/mongo/db/s/sharding_ddl_util.cpp6
-rw-r--r--src/mongo/db/s/sharding_index_catalog_util.cpp2
-rw-r--r--src/mongo/db/s/sharding_write_router.cpp2
-rw-r--r--src/mongo/db/s/shardsvr_drop_collection_command.cpp4
-rw-r--r--src/mongo/db/s/shardsvr_drop_indexes_command.cpp4
-rw-r--r--src/mongo/db/s/shardsvr_merge_chunks_command.cpp7
-rw-r--r--src/mongo/db/s/shardsvr_participant_block_command.cpp1
-rw-r--r--src/mongo/db/s/shardsvr_split_chunk_command.cpp6
-rw-r--r--src/mongo/db/s/split_chunk.cpp24
56 files changed, 266 insertions, 213 deletions
diff --git a/src/mongo/db/pipeline/dispatch_shard_pipeline_test.cpp b/src/mongo/db/pipeline/dispatch_shard_pipeline_test.cpp
index c3cac01efe1..2fa5b1a1545 100644
--- a/src/mongo/db/pipeline/dispatch_shard_pipeline_test.cpp
+++ b/src/mongo/db/pipeline/dispatch_shard_pipeline_test.cpp
@@ -228,7 +228,7 @@ TEST_F(DispatchShardPipelineTest, WrappedDispatchDoesRetryOnStaleConfigError) {
auto results =
router.route(operationContext(),
"dispatch shard pipeline"_sd,
- [&](OperationContext* opCtx, const ChunkManager& cm) {
+ [&](OperationContext* opCtx, const CollectionRoutingInfo& cri) {
return sharded_agg_helpers::dispatchShardPipeline(serializedCommand,
hasChangeStream,
startsWithDocuments,
@@ -273,6 +273,9 @@ TEST_F(DispatchShardPipelineTest, WrappedDispatchDoesRetryOnStaleConfigError) {
expectCollectionAndChunksAggregation(
kTestAggregateNss, epoch, timestamp, uuid, shardKeyPattern, {chunk1, chunk2});
+ expectCollectionAndIndexesAggregation(
+ kTestAggregateNss, epoch, timestamp, uuid, shardKeyPattern, {});
+
// That error should be retried, but only the one on that shard.
onCommand([&](const executor::RemoteCommandRequest& request) {
return CursorResponse(kTestAggregateNss, CursorId{0}, std::vector<BSONObj>{})
diff --git a/src/mongo/db/pipeline/process_interface/common_process_interface.cpp b/src/mongo/db/pipeline/process_interface/common_process_interface.cpp
index dcc200ccb3b..ee408636616 100644
--- a/src/mongo/db/pipeline/process_interface/common_process_interface.cpp
+++ b/src/mongo/db/pipeline/process_interface/common_process_interface.cpp
@@ -154,7 +154,7 @@ std::vector<BSONObj> CommonProcessInterface::getCurrentOps(
std::vector<FieldPath> CommonProcessInterface::collectDocumentKeyFieldsActingAsRouter(
OperationContext* opCtx, const NamespaceString& nss) const {
const auto cm =
- uassertStatusOK(Grid::get(opCtx)->catalogCache()->getCollectionRoutingInfo(opCtx, nss));
+ uassertStatusOK(Grid::get(opCtx)->catalogCache()->getCollectionPlacementInfo(opCtx, nss));
if (cm.isSharded()) {
return _shardKeyToDocumentKeyFields(cm.getShardKeyPattern().getKeyPatternFields());
}
@@ -205,13 +205,11 @@ bool CommonProcessInterface::keyPatternNamesExactPaths(const BSONObj& keyPattern
boost::optional<ShardVersion> CommonProcessInterface::refreshAndGetCollectionVersion(
const boost::intrusive_ptr<ExpressionContext>& expCtx, const NamespaceString& nss) const {
- const auto cm = uassertStatusOK(Grid::get(expCtx->opCtx)
- ->catalogCache()
- ->getCollectionRoutingInfoWithRefresh(expCtx->opCtx, nss));
+ const auto cri = uassertStatusOK(Grid::get(expCtx->opCtx)
+ ->catalogCache()
+ ->getCollectionRoutingInfoWithRefresh(expCtx->opCtx, nss));
- return cm.isSharded() ? boost::make_optional(ShardVersion(
- cm.getVersion(), boost::optional<CollectionIndexes>(boost::none)))
- : boost::none;
+ return cri.cm.isSharded() ? boost::make_optional(cri.getCollectionVersion()) : boost::none;
}
std::vector<FieldPath> CommonProcessInterface::_shardKeyToDocumentKeyFields(
diff --git a/src/mongo/db/pipeline/process_interface/mongos_process_interface.cpp b/src/mongo/db/pipeline/process_interface/mongos_process_interface.cpp
index d2b87d5f2e5..eacb4829b50 100644
--- a/src/mongo/db/pipeline/process_interface/mongos_process_interface.cpp
+++ b/src/mongo/db/pipeline/process_interface/mongos_process_interface.cpp
@@ -60,13 +60,13 @@ namespace {
* Returns the routing information for the namespace set on the passed ExpressionContext. Also
* verifies that the ExpressionContext's UUID, if present, matches that of the routing table entry.
*/
-StatusWith<ChunkManager> getCollectionRoutingInfo(
+StatusWith<CollectionRoutingInfo> getCollectionRoutingInfo(
const boost::intrusive_ptr<ExpressionContext>& expCtx) {
auto catalogCache = Grid::get(expCtx->opCtx)->catalogCache();
auto swRoutingInfo = catalogCache->getCollectionRoutingInfo(expCtx->opCtx, expCtx->ns);
// Additionally check that the ExpressionContext's UUID matches the collection routing info.
- if (swRoutingInfo.isOK() && expCtx->uuid && swRoutingInfo.getValue().isSharded()) {
- if (!swRoutingInfo.getValue().uuidMatches(*expCtx->uuid)) {
+ if (swRoutingInfo.isOK() && expCtx->uuid && swRoutingInfo.getValue().cm.isSharded()) {
+ if (!swRoutingInfo.getValue().cm.uuidMatches(*expCtx->uuid)) {
return {ErrorCodes::NamespaceNotFound,
str::stream() << "The UUID of collection " << expCtx->ns.ns()
<< " changed; it may have been dropped and re-created."};
@@ -160,10 +160,10 @@ boost::optional<Document> MongosProcessInterface::lookupSingleDocument(
str::stream() << "Looking up document matching " << redact(filter.toBson()),
[&]() -> std::vector<RemoteCursor> {
// Verify that the collection exists, with the correct UUID.
- auto cm = uassertStatusOK(getCollectionRoutingInfo(foreignExpCtx));
+ auto cri = uassertStatusOK(getCollectionRoutingInfo(foreignExpCtx));
// Finalize the 'find' command object based on the routing table information.
- if (findCmdIsByUuid && cm.isSharded()) {
+ if (findCmdIsByUuid && cri.cm.isSharded()) {
// Find by UUID and shard versioning do not work together (SERVER-31946). In
// the sharded case we've already checked the UUID, so find by namespace is
// safe. In the unlikely case that the collection has been deleted and a new
@@ -179,7 +179,7 @@ boost::optional<Document> MongosProcessInterface::lookupSingleDocument(
// is present, we may need to scatter-gather the query to all shards in order to
// find the document.
auto requests = getVersionedRequestsForTargetedShards(
- expCtx->opCtx, nss, cm, findCmd, filterObj, CollationSpec::kSimpleSpec);
+ expCtx->opCtx, nss, cri, findCmd, filterObj, CollationSpec::kSimpleSpec);
// Dispatch the requests. The 'establishCursors' method conveniently prepares the
// result into a vector of cursor responses for us.
@@ -299,7 +299,7 @@ std::vector<GenericCursor> MongosProcessInterface::getIdleCursors(
bool MongosProcessInterface::isSharded(OperationContext* opCtx, const NamespaceString& nss) {
auto cm =
- uassertStatusOK(Grid::get(opCtx)->catalogCache()->getCollectionRoutingInfo(opCtx, nss));
+ uassertStatusOK(Grid::get(opCtx)->catalogCache()->getCollectionPlacementInfo(opCtx, nss));
return cm.isSharded();
}
diff --git a/src/mongo/db/pipeline/process_interface/shardsvr_process_interface.cpp b/src/mongo/db/pipeline/process_interface/shardsvr_process_interface.cpp
index e33b6a6f323..62b52ad3282 100644
--- a/src/mongo/db/pipeline/process_interface/shardsvr_process_interface.cpp
+++ b/src/mongo/db/pipeline/process_interface/shardsvr_process_interface.cpp
@@ -63,7 +63,7 @@ using namespace fmt::literals;
bool ShardServerProcessInterface::isSharded(OperationContext* opCtx, const NamespaceString& nss) {
const auto cm =
- uassertStatusOK(Grid::get(opCtx)->catalogCache()->getCollectionRoutingInfo(opCtx, nss));
+ uassertStatusOK(Grid::get(opCtx)->catalogCache()->getCollectionPlacementInfo(opCtx, nss));
return cm.isSharded();
}
@@ -74,15 +74,21 @@ void ShardServerProcessInterface::checkRoutingInfoEpochOrThrow(
auto const shardId = ShardingState::get(expCtx->opCtx)->shardId();
auto* catalogCache = Grid::get(expCtx->opCtx)->catalogCache();
+ // Since we are only checking the epoch, don't advance the time in store of the index cache
+ auto currentGlobalIndexesInfo = catalogCache->getCollectionIndexInfo(expCtx->opCtx, nss);
+
// Mark the cache entry routingInfo for the 'nss' and 'shardId' if the entry is staler than
// 'targetCollectionVersion'.
- const ShardVersion ignoreIndexVersion{targetCollectionVersion,
- boost::optional<CollectionIndexes>(boost::none)};
+ const ShardVersion ignoreIndexVersion{
+ targetCollectionVersion,
+ currentGlobalIndexesInfo
+ ? boost::make_optional(currentGlobalIndexesInfo->getCollectionIndexes())
+ : boost::none};
catalogCache->invalidateShardOrEntireCollectionEntryForShardedCollection(
nss, ignoreIndexVersion, shardId);
const auto routingInfo =
- uassertStatusOK(catalogCache->getCollectionRoutingInfo(expCtx->opCtx, nss));
+ uassertStatusOK(catalogCache->getCollectionPlacementInfo(expCtx->opCtx, nss));
const auto foundVersion =
routingInfo.isSharded() ? routingInfo.getVersion() : ChunkVersion::UNSHARDED();
diff --git a/src/mongo/db/pipeline/sharded_agg_helpers.cpp b/src/mongo/db/pipeline/sharded_agg_helpers.cpp
index 6345ed858ee..154787f0f6c 100644
--- a/src/mongo/db/pipeline/sharded_agg_helpers.cpp
+++ b/src/mongo/db/pipeline/sharded_agg_helpers.cpp
@@ -185,7 +185,7 @@ std::vector<RemoteCursor> establishShardCursors(
std::shared_ptr<executor::TaskExecutor> executor,
const NamespaceString& nss,
bool mustRunOnAll,
- boost::optional<ChunkManager>& cm,
+ const boost::optional<CollectionRoutingInfo>& cri,
const std::set<ShardId>& shardIds,
const BSONObj& cmdObj,
const boost::optional<analyze_shard_key::TargetedSampleId>& sampleId,
@@ -198,7 +198,7 @@ std::vector<RemoteCursor> establishShardCursors(
std::vector<std::pair<ShardId, BSONObj>> requests;
// If we don't need to run on all shards, then we should always have a valid routing table.
- invariant(cm || mustRunOnAll);
+ invariant(cri || mustRunOnAll);
if (mustRunOnAll) {
// The pipeline contains a stage which must be run on all shards. Skip versioning and
@@ -206,14 +206,11 @@ std::vector<RemoteCursor> establishShardCursors(
for (const auto& shardId : shardIds) {
requests.emplace_back(shardId, cmdObj);
}
- } else if (cm->isSharded()) {
+ } else if (cri->cm.isSharded()) {
// The collection is sharded. Use the routing table to decide which shards to target
// based on the query and collation, and build versioned requests for them.
for (const auto& shardId : shardIds) {
- ChunkVersion placementVersion = cm->getVersion(shardId);
- auto versionedCmdObj = appendShardVersion(
- cmdObj,
- ShardVersion(placementVersion, boost::optional<CollectionIndexes>(boost::none)));
+ auto versionedCmdObj = appendShardVersion(cmdObj, cri->getShardVersion(shardId));
if (sampleId && sampleId->isFor(shardId)) {
versionedCmdObj =
@@ -225,17 +222,17 @@ std::vector<RemoteCursor> establishShardCursors(
} else {
// The collection is unsharded. Target only the primary shard for the database.
// Don't append shard version info when contacting the config servers.
- auto versionedCmdObj = cm->dbPrimary() != ShardId::kConfigServerId
+ auto versionedCmdObj = cri->cm.dbPrimary() != ShardId::kConfigServerId
? appendShardVersion(cmdObj, ShardVersion::UNSHARDED())
: cmdObj;
- versionedCmdObj = appendDbVersionIfPresent(versionedCmdObj, cm->dbVersion());
+ versionedCmdObj = appendDbVersionIfPresent(versionedCmdObj, cri->cm.dbVersion());
if (sampleId) {
- invariant(sampleId->isFor(cm->dbPrimary()));
+ invariant(sampleId->isFor(cri->cm.dbPrimary()));
versionedCmdObj = analyze_shard_key::appendSampleId(versionedCmdObj, sampleId->getId());
}
- requests.emplace_back(cm->dbPrimary(), std::move(versionedCmdObj));
+ requests.emplace_back(cri->cm.dbPrimary(), std::move(versionedCmdObj));
}
if (MONGO_unlikely(shardedAggregateHangBeforeEstablishingShardCursors.shouldFail())) {
@@ -258,7 +255,7 @@ std::vector<RemoteCursor> establishShardCursors(
std::set<ShardId> getTargetedShards(boost::intrusive_ptr<ExpressionContext> expCtx,
bool mustRunOnAllShards,
- const boost::optional<ChunkManager>& cm,
+ const boost::optional<CollectionRoutingInfo>& cri,
const BSONObj shardQuery,
const BSONObj collation) {
if (mustRunOnAllShards) {
@@ -267,8 +264,8 @@ std::set<ShardId> getTargetedShards(boost::intrusive_ptr<ExpressionContext> expC
return {std::make_move_iterator(shardIds.begin()), std::make_move_iterator(shardIds.end())};
}
- invariant(cm);
- return getTargetedShardsForQuery(expCtx, *cm, shardQuery, collation);
+ invariant(cri);
+ return getTargetedShardsForQuery(expCtx, cri->cm, shardQuery, collation);
}
/**
@@ -826,15 +823,13 @@ std::unique_ptr<Pipeline, PipelineDeleter> runPipelineDirectlyOnSingleShard(
auto* opCtx = expCtx->opCtx;
auto* catalogCache = Grid::get(opCtx)->catalogCache();
- auto cm =
+ auto cri =
uassertStatusOK(catalogCache->getCollectionRoutingInfo(opCtx, request.getNamespace()));
auto versionedCmdObj = [&] {
- if (cm.isSharded()) {
- ChunkVersion placementVersion = cm.getVersion(shardId);
- return appendShardVersion(
- aggregation_request_helper::serializeToCommandObj(request),
- ShardVersion(placementVersion, boost::optional<CollectionIndexes>(boost::none)));
+ if (cri.cm.isSharded()) {
+ return appendShardVersion(aggregation_request_helper::serializeToCommandObj(request),
+ cri.getShardVersion(shardId));
} else {
// The collection is unsharded. Don't append shard version info when contacting the
// config servers.
@@ -842,7 +837,7 @@ std::unique_ptr<Pipeline, PipelineDeleter> runPipelineDirectlyOnSingleShard(
? appendShardVersion(aggregation_request_helper::serializeToCommandObj(request),
ShardVersion::UNSHARDED())
: aggregation_request_helper::serializeToCommandObj(request);
- return appendDbVersionIfPresent(std::move(cmdObjWithShardVersion), cm.dbVersion());
+ return appendDbVersionIfPresent(std::move(cmdObjWithShardVersion), cri.cm.dbVersion());
}
}();
@@ -888,7 +883,7 @@ boost::optional<ShardedExchangePolicy> checkIfEligibleForExchange(OperationConte
return boost::none;
}
- const auto cm =
+ const auto [cm, _] =
uassertStatusOK(getCollectionRoutingInfoForTxnCmd(opCtx, mergeStage->getOutputNs()));
if (!cm.isSharded()) {
return boost::none;
@@ -1058,7 +1053,7 @@ DispatchShardPipelineResults dispatchShardPipeline(
auto executionNsRoutingInfo = executionNsRoutingInfoStatus.isOK()
? std::move(executionNsRoutingInfoStatus.getValue())
- : boost::optional<ChunkManager>{};
+ : boost::optional<CollectionRoutingInfo>{};
// A $changeStream update lookup attempts to retrieve a single document by documentKey. In this
// case, we wish to target a single shard using the simple collation, but we also want to ensure
@@ -1080,7 +1075,7 @@ DispatchShardPipelineResults dispatchShardPipeline(
// - The pipeline contains one or more stages which must always merge on mongoS.
const bool needsSplit = (shardIds.size() > 1u || needsMongosMerge ||
(needsPrimaryShardMerge && executionNsRoutingInfo &&
- *(shardIds.begin()) != executionNsRoutingInfo->dbPrimary()));
+ *(shardIds.begin()) != executionNsRoutingInfo->cm.dbPrimary()));
boost::optional<ShardedExchangePolicy> exchangeSpec;
boost::optional<SplitPipeline> splitPipelines;
@@ -1224,7 +1219,7 @@ DispatchShardPipelineResults dispatchShardPipeline(
// must increment the number of involved shards.
CurOp::get(opCtx)->debug().nShards = shardIds.size() +
(needsPrimaryShardMerge && executionNsRoutingInfo &&
- !shardIds.count(executionNsRoutingInfo->dbPrimary()));
+ !shardIds.count(executionNsRoutingInfo->cm.dbPrimary()));
return DispatchShardPipelineResults{needsPrimaryShardMerge,
std::move(ownedCursors),
@@ -1504,8 +1499,8 @@ BSONObj targetShardsForExplain(Pipeline* ownedPipeline) {
return BSON("pipeline" << explainBuilder.done());
}
-StatusWith<ChunkManager> getExecutionNsRoutingInfo(OperationContext* opCtx,
- const NamespaceString& execNss) {
+StatusWith<CollectionRoutingInfo> getExecutionNsRoutingInfo(OperationContext* opCtx,
+ const NamespaceString& execNss) {
// First, verify that there are shards present in the cluster. If not, then we return the
// stronger 'ShardNotFound' error rather than 'NamespaceNotFound'. We must do this because
// $changeStream aggregations ignore NamespaceNotFound in order to allow streams to be opened on
@@ -1589,7 +1584,8 @@ std::unique_ptr<Pipeline, PipelineDeleter> attachCursorToPipeline(
return router.route(
expCtx->opCtx,
"targeting pipeline to attach cursors"_sd,
- [&](OperationContext* opCtx, const ChunkManager& cm) {
+ [&](OperationContext* opCtx, const CollectionRoutingInfo& cri) {
+ const auto& cm = cri.cm;
auto pipelineToTarget = pipeline->clone();
if (!cm.isSharded() && expCtx->ns != NamespaceString::kConfigsvrCollectionsNamespace) {
diff --git a/src/mongo/db/pipeline/sharded_agg_helpers.h b/src/mongo/db/pipeline/sharded_agg_helpers.h
index 66694651900..66c494a7e77 100644
--- a/src/mongo/db/pipeline/sharded_agg_helpers.h
+++ b/src/mongo/db/pipeline/sharded_agg_helpers.h
@@ -178,8 +178,8 @@ Status appendExplainResults(DispatchShardPipelineResults&& dispatchResults,
* Returns 'ShardNotFound' or 'NamespaceNotFound' if there are no shards in the cluster or if
* collection 'execNss' does not exist, respectively.
*/
-StatusWith<ChunkManager> getExecutionNsRoutingInfo(OperationContext* opCtx,
- const NamespaceString& execNss);
+StatusWith<CollectionRoutingInfo> getExecutionNsRoutingInfo(OperationContext* opCtx,
+ const NamespaceString& execNss);
/**
* Returns true if an aggregation over 'nss' must run on all shards.
diff --git a/src/mongo/db/pipeline/sharded_union_test.cpp b/src/mongo/db/pipeline/sharded_union_test.cpp
index 3e19dc6e1e8..68090cd8dd1 100644
--- a/src/mongo/db/pipeline/sharded_union_test.cpp
+++ b/src/mongo/db/pipeline/sharded_union_test.cpp
@@ -195,6 +195,9 @@ TEST_F(ShardedUnionTest, RetriesSubPipelineOnStaleConfigError) {
expectCollectionAndChunksAggregation(
kTestAggregateNss, epoch, timestamp, uuid, shardKeyPattern, {chunk1, chunk2});
+ expectCollectionAndIndexesAggregation(
+ kTestAggregateNss, epoch, timestamp, uuid, shardKeyPattern, {});
+
// That error should be retried, but only the one on that shard.
onCommand([&](const executor::RemoteCommandRequest& request) {
return CursorResponse(kTestAggregateNss, CursorId{0}, {expectedResult.toBson()})
@@ -286,6 +289,9 @@ TEST_F(ShardedUnionTest, CorrectlySplitsSubPipelineIfRefreshedDistributionRequir
expectCollectionAndChunksAggregation(
kTestAggregateNss, epoch, timestamp, uuid, shardKeyPattern, {chunk1, chunk2, chunk3});
+ expectCollectionAndIndexesAggregation(
+ kTestAggregateNss, epoch, timestamp, uuid, shardKeyPattern, {});
+
// That error should be retried, this time two shards.
onCommand([&](const executor::RemoteCommandRequest& request) {
return CursorResponse(
@@ -374,6 +380,9 @@ TEST_F(ShardedUnionTest, AvoidsSplittingSubPipelineIfRefreshedDistributionDoesNo
expectCollectionAndChunksAggregation(
kTestAggregateNss, epoch, timestamp, uuid, shardKeyPattern, {chunk1});
+ expectCollectionAndIndexesAggregation(
+ kTestAggregateNss, epoch, timestamp, uuid, shardKeyPattern, {});
+
// That error should be retried, this time targetting only one shard.
onCommand([&](const executor::RemoteCommandRequest& request) {
ASSERT_EQ(request.target, HostAndPort(shards[0].getHost())) << request;
@@ -441,6 +450,9 @@ TEST_F(ShardedUnionTest, IncorporatesViewDefinitionAndRetriesWhenViewErrorReceiv
expectCollectionAndChunksAggregation(
kTestAggregateNss, epoch, timestamp, uuid, shardKeyPattern, {chunk1, chunk2});
+ expectCollectionAndIndexesAggregation(
+ kTestAggregateNss, epoch, timestamp, uuid, shardKeyPattern, {});
+
// Mock out the sharded view error responses from both shards.
std::vector<BSONObj> viewPipeline = {fromjson("{$group: {_id: '$groupKey'}}"),
// Prevent the $match from being pushed into the shards
diff --git a/src/mongo/db/s/analyze_shard_key_cmd_util.cpp b/src/mongo/db/s/analyze_shard_key_cmd_util.cpp
index ea95a8b4b06..e7a46b9021a 100644
--- a/src/mongo/db/s/analyze_shard_key_cmd_util.cpp
+++ b/src/mongo/db/s/analyze_shard_key_cmd_util.cpp
@@ -421,7 +421,7 @@ boost::optional<int64_t> getNumOrphanDocuments(OperationContext* opCtx,
}
auto cm =
- uassertStatusOK(Grid::get(opCtx)->catalogCache()->getCollectionRoutingInfo(opCtx, nss));
+ uassertStatusOK(Grid::get(opCtx)->catalogCache()->getCollectionPlacementInfo(opCtx, nss));
if (!cm.isSharded()) {
return boost::none;
diff --git a/src/mongo/db/s/balancer/balancer.cpp b/src/mongo/db/s/balancer/balancer.cpp
index c4611cbdc77..b0564dc09b0 100644
--- a/src/mongo/db/s/balancer/balancer.cpp
+++ b/src/mongo/db/s/balancer/balancer.cpp
@@ -185,7 +185,7 @@ Status processManualMigrationOutcome(OperationContext* opCtx,
}
auto swCM =
- Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithRefresh(opCtx, nss);
+ Grid::get(opCtx)->catalogCache()->getShardedCollectionPlacementInfoWithRefresh(opCtx, nss);
if (!swCM.isOK()) {
return swCM.getStatus();
}
@@ -432,8 +432,8 @@ Status Balancer::moveRange(OperationContext* opCtx,
const auto [fromShardId, min] = [&]() {
const auto cm = uassertStatusOK(
- Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithRefresh(opCtx,
- nss));
+ Grid::get(opCtx)->catalogCache()->getShardedCollectionPlacementInfoWithRefresh(opCtx,
+ nss));
// TODO SERVER-64926 do not assume min always present
const auto& chunk = cm.findIntersectingChunkWithSimpleCollation(*request.getMin());
return std::tuple<ShardId, BSONObj>{chunk.getShardId(), chunk.getMin()};
@@ -986,7 +986,7 @@ Status Balancer::_splitChunksIfNeeded(OperationContext* opCtx) {
for (const auto& splitInfo : chunksToSplitStatus.getValue()) {
auto routingInfoStatus =
- Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithRefresh(
+ Grid::get(opCtx)->catalogCache()->getShardedCollectionPlacementInfoWithRefresh(
opCtx, splitInfo.nss);
if (!routingInfoStatus.isOK()) {
return routingInfoStatus.getStatus();
diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
index e855f6ce439..98f9709efeb 100644
--- a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
+++ b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
@@ -544,7 +544,7 @@ BalancerChunkSelectionPolicyImpl::selectSpecificChunkToMove(OperationContext* op
const auto& shardStats = shardStatsStatus.getValue();
auto routingInfoStatus =
- Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithRefresh(opCtx, nss);
+ Grid::get(opCtx)->catalogCache()->getShardedCollectionPlacementInfoWithRefresh(opCtx, nss);
if (!routingInfoStatus.isOK()) {
return routingInfoStatus.getStatus();
}
@@ -577,7 +577,7 @@ Status BalancerChunkSelectionPolicyImpl::checkMoveAllowed(OperationContext* opCt
auto shardStats = std::move(shardStatsStatus.getValue());
auto routingInfoStatus =
- Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithRefresh(opCtx, nss);
+ Grid::get(opCtx)->catalogCache()->getShardedCollectionPlacementInfoWithRefresh(opCtx, nss);
if (!routingInfoStatus.isOK()) {
return routingInfoStatus.getStatus();
}
@@ -610,7 +610,7 @@ Status BalancerChunkSelectionPolicyImpl::checkMoveAllowed(OperationContext* opCt
StatusWith<SplitInfoVector> BalancerChunkSelectionPolicyImpl::_getSplitCandidatesForCollection(
OperationContext* opCtx, const NamespaceString& nss, const ShardStatisticsVector& shardStats) {
auto routingInfoStatus =
- Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithRefresh(opCtx, nss);
+ Grid::get(opCtx)->catalogCache()->getShardedCollectionPlacementInfoWithRefresh(opCtx, nss);
if (!routingInfoStatus.isOK()) {
return routingInfoStatus.getStatus();
}
@@ -650,7 +650,7 @@ BalancerChunkSelectionPolicyImpl::_getMigrateCandidatesForCollection(
const boost::optional<CollectionDataSizeInfoForBalancing>& collDataSizeInfo,
stdx::unordered_set<ShardId>* availableShards) {
auto routingInfoStatus =
- Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithRefresh(opCtx, nss);
+ Grid::get(opCtx)->catalogCache()->getShardedCollectionPlacementInfoWithRefresh(opCtx, nss);
if (!routingInfoStatus.isOK()) {
return routingInfoStatus.getStatus();
}
diff --git a/src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp b/src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp
index b1521933098..38803c6981b 100644
--- a/src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp
+++ b/src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp
@@ -64,9 +64,8 @@ const std::string kRemainingChunksToProcess("remainingChunksToProcess");
ShardVersion getShardVersion(OperationContext* opCtx,
const ShardId& shardId,
const NamespaceString& nss) {
- auto cm = Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfo(opCtx, nss);
- const auto placementVersion = cm.getVersion(shardId);
- return ShardVersion(placementVersion, boost::optional<CollectionIndexes>(boost::none));
+ auto cri = Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfo(opCtx, nss);
+ return cri.getShardVersion(shardId);
}
std::vector<ChunkType> getCollectionChunks(OperationContext* opCtx, const CollectionType& coll) {
diff --git a/src/mongo/db/s/balancer/cluster_chunks_resize_policy_impl.cpp b/src/mongo/db/s/balancer/cluster_chunks_resize_policy_impl.cpp
index e29e39a0f41..02f04c0690c 100644
--- a/src/mongo/db/s/balancer/cluster_chunks_resize_policy_impl.cpp
+++ b/src/mongo/db/s/balancer/cluster_chunks_resize_policy_impl.cpp
@@ -43,7 +43,7 @@ namespace {
ChunkVersion getShardVersion(OperationContext* opCtx,
const ShardId& shardId,
const NamespaceString& nss) {
- auto cm = Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfo(opCtx, nss);
+ auto cm = Grid::get(opCtx)->catalogCache()->getShardedCollectionPlacementInfo(opCtx, nss);
return cm.getVersion(shardId);
}
diff --git a/src/mongo/db/s/chunk_operation_precondition_checks.cpp b/src/mongo/db/s/chunk_operation_precondition_checks.cpp
index 2eb6f6836a9..b8652a190b9 100644
--- a/src/mongo/db/s/chunk_operation_precondition_checks.cpp
+++ b/src/mongo/db/s/chunk_operation_precondition_checks.cpp
@@ -34,16 +34,18 @@
namespace mongo {
-CollectionMetadata checkCollectionIdentity(OperationContext* opCtx,
- const NamespaceString& nss,
- const OID& expectedEpoch,
- const boost::optional<Timestamp>& expectedTimestamp) {
+CollectionPlacementAndIndexInfo checkCollectionIdentity(
+ OperationContext* opCtx,
+ const NamespaceString& nss,
+ const OID& expectedEpoch,
+ const boost::optional<Timestamp>& expectedTimestamp) {
AutoGetCollection collection(opCtx, nss, MODE_IS);
const auto shardId = ShardingState::get(opCtx)->shardId();
auto scopedCsr = CollectionShardingRuntime::assertCollectionLockedAndAcquire(
opCtx, nss, CSRAcquisitionMode::kShared);
auto optMetadata = scopedCsr->getCurrentMetadataIfKnown();
+ auto optGlobalIndexInfo = scopedCsr->getIndexes(opCtx);
uassert(StaleConfigInfo(nss,
ShardVersion::IGNORED() /* receivedVersion */,
@@ -67,7 +69,7 @@ CollectionMetadata checkCollectionIdentity(OperationContext* opCtx,
const auto placementVersion = metadata.getShardVersion();
const auto shardVersion =
- ShardVersion(placementVersion, boost::optional<CollectionIndexes>(boost::none));
+ ShardVersion(placementVersion, scopedCsr->getCollectionIndexes(opCtx));
uassert(StaleConfigInfo(nss,
ShardVersion::IGNORED() /* receivedVersion */,
@@ -86,18 +88,20 @@ CollectionMetadata checkCollectionIdentity(OperationContext* opCtx,
str::stream() << "Shard does not contain any chunks for collection.",
placementVersion.majorVersion() > 0);
- return metadata;
+ return std::make_pair(metadata, optGlobalIndexInfo);
}
void checkShardKeyPattern(OperationContext* opCtx,
const NamespaceString& nss,
const CollectionMetadata& metadata,
+ const boost::optional<GlobalIndexesCache>& globalIndexInfo,
const ChunkRange& chunkRange) {
const auto shardId = ShardingState::get(opCtx)->shardId();
const auto& keyPattern = metadata.getKeyPattern();
- const auto placementVersion = metadata.getShardVersion();
const auto shardVersion =
- ShardVersion(placementVersion, boost::optional<CollectionIndexes>(boost::none));
+ ShardVersion(metadata.getShardVersion(),
+ globalIndexInfo ? boost::make_optional(globalIndexInfo->getCollectionIndexes())
+ : boost::none);
uassert(StaleConfigInfo(nss,
ShardVersion::IGNORED() /* receivedVersion */,
@@ -112,11 +116,13 @@ void checkShardKeyPattern(OperationContext* opCtx,
void checkChunkMatchesRange(OperationContext* opCtx,
const NamespaceString& nss,
const CollectionMetadata& metadata,
+ const boost::optional<GlobalIndexesCache>& globalIndexInfo,
const ChunkRange& chunkRange) {
const auto shardId = ShardingState::get(opCtx)->shardId();
- const auto placementVersion = metadata.getShardVersion();
const auto shardVersion =
- ShardVersion(placementVersion, boost::optional<CollectionIndexes>(boost::none));
+ ShardVersion(metadata.getShardVersion(),
+ globalIndexInfo ? boost::make_optional(globalIndexInfo->getCollectionIndexes())
+ : boost::none);
ChunkType existingChunk;
uassert(StaleConfigInfo(nss,
@@ -139,11 +145,13 @@ void checkChunkMatchesRange(OperationContext* opCtx,
void checkRangeWithinChunk(OperationContext* opCtx,
const NamespaceString& nss,
const CollectionMetadata& metadata,
+ const boost::optional<GlobalIndexesCache>& globalIndexInfo,
const ChunkRange& chunkRange) {
const auto shardId = ShardingState::get(opCtx)->shardId();
- const auto placementVersion = metadata.getShardVersion();
const auto shardVersion =
- ShardVersion(placementVersion, boost::optional<CollectionIndexes>(boost::none));
+ ShardVersion(metadata.getShardVersion(),
+ globalIndexInfo ? boost::make_optional(globalIndexInfo->getCollectionIndexes())
+ : boost::none);
ChunkType existingChunk;
uassert(StaleConfigInfo(nss,
@@ -159,11 +167,13 @@ void checkRangeWithinChunk(OperationContext* opCtx,
void checkRangeOwnership(OperationContext* opCtx,
const NamespaceString& nss,
const CollectionMetadata& metadata,
+ const boost::optional<GlobalIndexesCache>& globalIndexInfo,
const ChunkRange& chunkRange) {
const auto shardId = ShardingState::get(opCtx)->shardId();
- const auto placementVersion = metadata.getShardVersion();
const auto shardVersion =
- ShardVersion(placementVersion, boost::optional<CollectionIndexes>(boost::none));
+ ShardVersion(metadata.getShardVersion(),
+ globalIndexInfo ? boost::make_optional(globalIndexInfo->getCollectionIndexes())
+ : boost::none);
ChunkType existingChunk;
BSONObj minKey = chunkRange.getMin();
diff --git a/src/mongo/db/s/chunk_operation_precondition_checks.h b/src/mongo/db/s/chunk_operation_precondition_checks.h
index 4720091c1c2..ae17fac61dc 100644
--- a/src/mongo/db/s/chunk_operation_precondition_checks.h
+++ b/src/mongo/db/s/chunk_operation_precondition_checks.h
@@ -29,7 +29,9 @@
#include "mongo/bson/oid.h"
#include "mongo/bson/timestamp.h"
#include "mongo/db/s/collection_metadata.h"
+#include "mongo/db/s/collection_sharding_runtime.h"
#include "mongo/s/catalog/type_chunk.h"
+#include "mongo/s/global_index_cache.h"
namespace mongo {
/**
@@ -40,14 +42,15 @@ namespace mongo {
/**
* Checks that the metadata for the collection is present in the CSR, that the collection is sharded
* according to that metadata, and that the expected epoch and timestamp match what is present in
- * the CSR. Returns the collection metadata.
+ * the CSR. Returns the collection metadata and index info.
*
* Throws StaleShardVersion otherwise.
*/
-CollectionMetadata checkCollectionIdentity(OperationContext* opCtx,
- const NamespaceString& nss,
- const OID& expectedEpoch,
- const boost::optional<Timestamp>& expectedTimestamp);
+CollectionPlacementAndIndexInfo checkCollectionIdentity(
+ OperationContext* opCtx,
+ const NamespaceString& nss,
+ const OID& expectedEpoch,
+ const boost::optional<Timestamp>& expectedTimestamp);
/**
* Checks that the chunk range matches the shard key pattern in the metadata.
@@ -57,6 +60,7 @@ CollectionMetadata checkCollectionIdentity(OperationContext* opCtx,
void checkShardKeyPattern(OperationContext* opCtx,
const NamespaceString& nss,
const CollectionMetadata& metadata,
+ const boost::optional<GlobalIndexesCache>& indexInfo,
const ChunkRange& chunkRange);
/**
@@ -67,6 +71,7 @@ void checkShardKeyPattern(OperationContext* opCtx,
void checkChunkMatchesRange(OperationContext* opCtx,
const NamespaceString& nss,
const CollectionMetadata& metadata,
+ const boost::optional<GlobalIndexesCache>& indexInfo,
const ChunkRange& chunkRange);
/**
@@ -78,6 +83,7 @@ void checkChunkMatchesRange(OperationContext* opCtx,
void checkRangeWithinChunk(OperationContext* opCtx,
const NamespaceString& nss,
const CollectionMetadata& metadata,
+ const boost::optional<GlobalIndexesCache>& indexInfo,
const ChunkRange& chunkRange);
/**
@@ -88,6 +94,7 @@ void checkRangeWithinChunk(OperationContext* opCtx,
void checkRangeOwnership(OperationContext* opCtx,
const NamespaceString& nss,
const CollectionMetadata& metadata,
+ const boost::optional<GlobalIndexesCache>& indexInfo,
const ChunkRange& chunkRange);
} // namespace mongo
diff --git a/src/mongo/db/s/chunk_splitter.cpp b/src/mongo/db/s/chunk_splitter.cpp
index 043b0139b20..9c403b8d902 100644
--- a/src/mongo/db/s/chunk_splitter.cpp
+++ b/src/mongo/db/s/chunk_splitter.cpp
@@ -115,7 +115,7 @@ Status splitChunkAtMultiplePoints(OperationContext* opCtx,
void moveChunk(OperationContext* opCtx, const NamespaceString& nss, const BSONObj& minKey) {
// We need to have the most up-to-date view of the chunk we are about to move.
const auto cm =
- uassertStatusOK(Grid::get(opCtx)->catalogCache()->getCollectionRoutingInfo(opCtx, nss));
+ uassertStatusOK(Grid::get(opCtx)->catalogCache()->getCollectionPlacementInfo(opCtx, nss));
uassert(ErrorCodes::NamespaceNotSharded,
"Could not move chunk. Collection is no longer sharded",
@@ -305,7 +305,7 @@ void ChunkSplitter::_runAutosplit(std::shared_ptr<ChunkSplitStateDriver> chunkSp
const auto opCtx = cc().makeOperationContext();
const auto cm = uassertStatusOK(
- Grid::get(opCtx.get())->catalogCache()->getCollectionRoutingInfo(opCtx.get(), nss));
+ Grid::get(opCtx.get())->catalogCache()->getCollectionPlacementInfo(opCtx.get(), nss));
uassert(ErrorCodes::NamespaceNotSharded,
"Could not split chunk. Collection is no longer sharded",
cm.isSharded());
diff --git a/src/mongo/db/s/collection_sharding_runtime.h b/src/mongo/db/s/collection_sharding_runtime.h
index cac64e74c5c..c3c089d3db7 100644
--- a/src/mongo/db/s/collection_sharding_runtime.h
+++ b/src/mongo/db/s/collection_sharding_runtime.h
@@ -40,6 +40,8 @@
namespace mongo {
enum class CSRAcquisitionMode { kShared, kExclusive };
+typedef std::pair<CollectionMetadata, boost::optional<GlobalIndexesCache>>
+ CollectionPlacementAndIndexInfo;
/**
* See the comments for CollectionShardingState for more information on how this class fits in the
diff --git a/src/mongo/db/s/collmod_coordinator.cpp b/src/mongo/db/s/collmod_coordinator.cpp
index 7c3baf37370..d4f45a5b8bd 100644
--- a/src/mongo/db/s/collmod_coordinator.cpp
+++ b/src/mongo/db/s/collmod_coordinator.cpp
@@ -134,7 +134,7 @@ void CollModCoordinator::_saveShardingInfoOnCoordinatorIfNecessary(OperationCont
if (!_shardingInfo && _collInfo->isSharded) {
ShardingInfo info;
const auto chunkManager =
- uassertStatusOK(Grid::get(opCtx)->catalogCache()->getCollectionRoutingInfoWithRefresh(
+ uassertStatusOK(Grid::get(opCtx)->catalogCache()->getCollectionPlacementInfoWithRefresh(
opCtx, _collInfo->nsForTargeting));
info.primaryShard = chunkManager.dbPrimary();
diff --git a/src/mongo/db/s/config/configsvr_reshard_collection_cmd.cpp b/src/mongo/db/s/config/configsvr_reshard_collection_cmd.cpp
index 5b604a6e48e..87267b0fbf5 100644
--- a/src/mongo/db/s/config/configsvr_reshard_collection_cmd.cpp
+++ b/src/mongo/db/s/config/configsvr_reshard_collection_cmd.cpp
@@ -144,7 +144,7 @@ public:
!serverGlobalParams.featureCompatibility.isUpgradingOrDowngrading());
const auto cm = uassertStatusOK(
- Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithRefresh(
+ Grid::get(opCtx)->catalogCache()->getShardedCollectionPlacementInfoWithRefresh(
opCtx, nss));
auto tempReshardingNss =
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
index 027ca3b5b1d..56fc013d2ec 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
@@ -1779,7 +1779,7 @@ void ShardingCatalogManager::splitOrMarkJumbo(OperationContext* opCtx,
const BSONObj& minKey,
boost::optional<int64_t> optMaxChunkSizeBytes) {
const auto cm = uassertStatusOK(
- Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithRefresh(opCtx, nss));
+ Grid::get(opCtx)->catalogCache()->getShardedCollectionPlacementInfoWithRefresh(opCtx, nss));
auto chunk = cm.findIntersectingChunkWithSimpleCollation(minKey);
try {
@@ -1884,8 +1884,8 @@ void ShardingCatalogManager::setAllowMigrationsAndBumpOneChunk(
Lock::ExclusiveLock lk(opCtx, _kChunkOpLock);
const auto cm = uassertStatusOK(
- Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithRefresh(opCtx,
- nss));
+ Grid::get(opCtx)->catalogCache()->getShardedCollectionPlacementInfoWithRefresh(opCtx,
+ nss));
uassert(ErrorCodes::InvalidUUID,
str::stream() << "Collection uuid " << collectionUUID
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
index 78167e37bc7..e7ef2314ad3 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
@@ -467,7 +467,7 @@ void ShardingCatalogManager::configureCollectionBalancing(
}
const auto cm = uassertStatusOK(
- Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithRefresh(opCtx, nss));
+ Grid::get(opCtx)->catalogCache()->getShardedCollectionPlacementInfoWithRefresh(opCtx, nss));
std::set<ShardId> shardsIds;
cm.getAllShardIds(&shardsIds);
@@ -511,7 +511,7 @@ void ShardingCatalogManager::applyLegacyConfigurationToSessionsCollection(Operat
opCtx, NamespaceString::kLogicalSessionsNamespace, txnNumber);
});
const auto cm = uassertStatusOK(
- Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithRefresh(
+ Grid::get(opCtx)->catalogCache()->getShardedCollectionPlacementInfoWithRefresh(
opCtx, NamespaceString::kLogicalSessionsNamespace));
std::set<ShardId> shardsIds;
cm.getAllShardIds(&shardsIds);
@@ -568,7 +568,7 @@ void ShardingCatalogManager::updateTimeSeriesGranularity(OperationContext* opCtx
const NamespaceString& nss,
BucketGranularityEnum granularity) {
const auto cm = uassertStatusOK(
- Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithRefresh(opCtx, nss));
+ Grid::get(opCtx)->catalogCache()->getShardedCollectionPlacementInfoWithRefresh(opCtx, nss));
std::set<ShardId> shardIds;
cm.getAllShardIds(&shardIds);
diff --git a/src/mongo/db/s/create_collection_coordinator.cpp b/src/mongo/db/s/create_collection_coordinator.cpp
index 9ab79ac0935..8af984204a9 100644
--- a/src/mongo/db/s/create_collection_coordinator.cpp
+++ b/src/mongo/db/s/create_collection_coordinator.cpp
@@ -679,28 +679,29 @@ CreateCollectionCoordinator::_checkIfCollectionAlreadyShardedWithSameOptions(
}
// Check is there is a standard sharded collection that matches the original request parameters
- auto routingInfo =
+ auto cri =
uassertStatusOK(Grid::get(opCtx)->catalogCache()->getCollectionRoutingInfoWithRefresh(
opCtx, originalNss()));
- if (routingInfo.isSharded()) {
+ auto& cm = cri.cm;
+ auto& gii = cri.gii;
+ if (cm.isSharded()) {
auto requestMatchesExistingCollection = [&] {
// No timeseries fields in request
if (_request.getTimeseries()) {
return false;
}
- if (_request.getUnique().value_or(false) != routingInfo.isUnique()) {
+ if (_request.getUnique().value_or(false) != cm.isUnique()) {
return false;
}
- if (SimpleBSONObjComparator::kInstance.evaluate(
- *_request.getShardKey() != routingInfo.getShardKeyPattern().toBSON())) {
+ if (SimpleBSONObjComparator::kInstance.evaluate(*_request.getShardKey() !=
+ cm.getShardKeyPattern().toBSON())) {
return false;
}
- auto defaultCollator = routingInfo.getDefaultCollator()
- ? routingInfo.getDefaultCollator()->getSpec().toBSON()
- : BSONObj();
+ auto defaultCollator =
+ cm.getDefaultCollator() ? cm.getDefaultCollator()->getSpec().toBSON() : BSONObj();
if (SimpleBSONObjComparator::kInstance.evaluate(
defaultCollator !=
resolveCollationForUserQueries(
@@ -715,37 +716,36 @@ CreateCollectionCoordinator::_checkIfCollectionAlreadyShardedWithSameOptions(
str::stream() << "sharding already enabled for collection " << originalNss(),
requestMatchesExistingCollection);
- CreateCollectionResponse response(
- {routingInfo.getVersion(), boost::optional<CollectionIndexes>(boost::none)});
- response.setCollectionUUID(routingInfo.getUUID());
+ CreateCollectionResponse response(cri.getCollectionVersion());
+ response.setCollectionUUID(cm.getUUID());
return response;
}
// If the request is still unresolved, check if there is an existing TS buckets namespace that
// may be matched by the request.
auto bucketsNss = originalNss().makeTimeseriesBucketsNamespace();
- routingInfo = uassertStatusOK(
+ cri = uassertStatusOK(
Grid::get(opCtx)->catalogCache()->getCollectionRoutingInfoWithRefresh(opCtx, bucketsNss));
- if (!routingInfo.isSharded()) {
+ cm = cri.cm;
+ gii = cri.gii;
+ if (!cm.isSharded()) {
return boost::none;
}
auto requestMatchesExistingCollection = [&] {
- if (routingInfo.isUnique() != _request.getUnique().value_or(false)) {
+ if (cm.isUnique() != _request.getUnique().value_or(false)) {
return false;
}
// Timeseries options match
- const auto& timeseriesOptionsOnDisk =
- (*routingInfo.getTimeseriesFields()).getTimeseriesOptions();
+ const auto& timeseriesOptionsOnDisk = (*cm.getTimeseriesFields()).getTimeseriesOptions();
if (_request.getTimeseries() &&
!timeseries::optionsAreEqual(*_request.getTimeseries(), timeseriesOptionsOnDisk)) {
return false;
}
- auto defaultCollator = routingInfo.getDefaultCollator()
- ? routingInfo.getDefaultCollator()->getSpec().toBSON()
- : BSONObj();
+ auto defaultCollator =
+ cm.getDefaultCollator() ? cm.getDefaultCollator()->getSpec().toBSON() : BSONObj();
if (SimpleBSONObjComparator::kInstance.evaluate(
defaultCollator !=
resolveCollationForUserQueries(opCtx, bucketsNss, _request.getCollation()))) {
@@ -758,7 +758,7 @@ CreateCollectionCoordinator::_checkIfCollectionAlreadyShardedWithSameOptions(
auto requestKeyPattern =
uassertStatusOK(timeseries::createBucketsShardKeySpecFromTimeseriesShardKeySpec(
timeseriesOptions, *_request.getShardKey()));
- if (SimpleBSONObjComparator::kInstance.evaluate(routingInfo.getShardKeyPattern().toBSON() !=
+ if (SimpleBSONObjComparator::kInstance.evaluate(cm.getShardKeyPattern().toBSON() !=
requestKeyPattern)) {
return false;
}
@@ -769,9 +769,8 @@ CreateCollectionCoordinator::_checkIfCollectionAlreadyShardedWithSameOptions(
str::stream() << "sharding already enabled for collection " << bucketsNss,
requestMatchesExistingCollection);
- CreateCollectionResponse response(
- {routingInfo.getVersion(), boost::optional<CollectionIndexes>(boost::none)});
- response.setCollectionUUID(routingInfo.getUUID());
+ CreateCollectionResponse response(cri.getCollectionVersion());
+ response.setCollectionUUID(cm.getUUID());
return response;
}
diff --git a/src/mongo/db/s/global_index/global_index_cloning_external_state.cpp b/src/mongo/db/s/global_index/global_index_cloning_external_state.cpp
index 7473479e716..959411b958c 100644
--- a/src/mongo/db/s/global_index/global_index_cloning_external_state.cpp
+++ b/src/mongo/db/s/global_index/global_index_cloning_external_state.cpp
@@ -39,10 +39,10 @@ ShardId GlobalIndexCloningStateImpl::myShardId(ServiceContext* service) const {
return ShardingState::get(service)->shardId();
}
-ChunkManager GlobalIndexCloningStateImpl::getShardedCollectionRoutingInfo(
+ChunkManager GlobalIndexCloningStateImpl::getShardedCollectionPlacementInfo(
OperationContext* opCtx, const NamespaceString& nss) const {
auto catalogCache = Grid::get(opCtx)->catalogCache();
- return catalogCache->getShardedCollectionRoutingInfo(opCtx, nss);
+ return catalogCache->getShardedCollectionPlacementInfo(opCtx, nss);
}
} // namespace global_index
diff --git a/src/mongo/db/s/global_index/global_index_cloning_external_state.h b/src/mongo/db/s/global_index/global_index_cloning_external_state.h
index ef039841a9c..0851b776a6c 100644
--- a/src/mongo/db/s/global_index/global_index_cloning_external_state.h
+++ b/src/mongo/db/s/global_index/global_index_cloning_external_state.h
@@ -45,16 +45,16 @@ public:
virtual ShardId myShardId(ServiceContext* service) const = 0;
- virtual ChunkManager getShardedCollectionRoutingInfo(OperationContext* opCtx,
- const NamespaceString& nss) const = 0;
+ virtual ChunkManager getShardedCollectionPlacementInfo(OperationContext* opCtx,
+ const NamespaceString& nss) const = 0;
};
class GlobalIndexCloningStateImpl : public GlobalIndexCloningService::CloningExternalState {
public:
ShardId myShardId(ServiceContext* service) const override;
- ChunkManager getShardedCollectionRoutingInfo(OperationContext* opCtx,
- const NamespaceString& nss) const override;
+ ChunkManager getShardedCollectionPlacementInfo(OperationContext* opCtx,
+ const NamespaceString& nss) const override;
};
} // namespace global_index
diff --git a/src/mongo/db/s/global_index/global_index_cloning_service.cpp b/src/mongo/db/s/global_index/global_index_cloning_service.cpp
index ab2ac59228d..2cbd8e5b91b 100644
--- a/src/mongo/db/s/global_index/global_index_cloning_service.cpp
+++ b/src/mongo/db/s/global_index/global_index_cloning_service.cpp
@@ -222,7 +222,7 @@ void GlobalIndexCloningService::CloningStateMachine::_init(
auto opCtx = _serviceContext->makeOperationContext(Client::getCurrent());
auto routingInfo =
- _externalState->getShardedCollectionRoutingInfo(opCtx.get(), _metadata.getNss());
+ _externalState->getShardedCollectionPlacementInfo(opCtx.get(), _metadata.getNss());
uassert(6755901,
str::stream() << "Cannot create global index on unsharded ns "
diff --git a/src/mongo/db/s/global_index/global_index_cloning_service_test.cpp b/src/mongo/db/s/global_index/global_index_cloning_service_test.cpp
index f28aed4c4d9..01cf6a8ac6c 100644
--- a/src/mongo/db/s/global_index/global_index_cloning_service_test.cpp
+++ b/src/mongo/db/s/global_index/global_index_cloning_service_test.cpp
@@ -71,8 +71,8 @@ public:
return kRecipientShardId;
}
- ChunkManager getShardedCollectionRoutingInfo(OperationContext* opCtx,
- const NamespaceString& nss) const override {
+ ChunkManager getShardedCollectionPlacementInfo(OperationContext* opCtx,
+ const NamespaceString& nss) const override {
invariant(nss == kSourceNss);
const OID epoch = OID::gen();
diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp
index 9e3e3a095af..a5d5fbb0fd0 100644
--- a/src/mongo/db/s/migration_destination_manager.cpp
+++ b/src/mongo/db/s/migration_destination_manager.cpp
@@ -798,7 +798,7 @@ MigrationDestinationManager::IndexesAndIdIndex MigrationDestinationManager::getC
OperationContext* opCtx,
const NamespaceStringOrUUID& nssOrUUID,
const ShardId& fromShardId,
- const boost::optional<ChunkManager>& cm,
+ const boost::optional<CollectionRoutingInfo>& cri,
boost::optional<Timestamp> afterClusterTime) {
auto fromShard =
uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, fromShardId));
@@ -813,10 +813,8 @@ MigrationDestinationManager::IndexesAndIdIndex MigrationDestinationManager::getC
auto cmd = nssOrUUID.nss() ? BSON("listIndexes" << nssOrUUID.nss()->coll())
: BSON("listIndexes" << *nssOrUUID.uuid());
- if (cm) {
- ChunkVersion placementVersion = cm->getVersion(fromShardId);
- cmd = appendShardVersion(
- cmd, ShardVersion(placementVersion, boost::optional<CollectionIndexes>(boost::none)));
+ if (cri) {
+ cmd = appendShardVersion(cmd, cri->getShardVersion(fromShardId));
}
if (afterClusterTime) {
cmd = cmd.addFields(makeLocalReadConcernWithAfterClusterTime(*afterClusterTime));
diff --git a/src/mongo/db/s/migration_destination_manager.h b/src/mongo/db/s/migration_destination_manager.h
index d63b6696d57..9a0d90805d3 100644
--- a/src/mongo/db/s/migration_destination_manager.h
+++ b/src/mongo/db/s/migration_destination_manager.h
@@ -44,6 +44,7 @@
#include "mongo/db/s/session_catalog_migration_destination.h"
#include "mongo/db/shard_id.h"
#include "mongo/platform/mutex.h"
+#include "mongo/s/catalog_cache.h"
#include "mongo/s/chunk_manager.h"
#include "mongo/stdx/condition_variable.h"
#include "mongo/stdx/thread.h"
@@ -175,7 +176,7 @@ public:
static IndexesAndIdIndex getCollectionIndexes(OperationContext* opCtx,
const NamespaceStringOrUUID& nssOrUUID,
const ShardId& fromShardId,
- const boost::optional<ChunkManager>& cm,
+ const boost::optional<CollectionRoutingInfo>& cri,
boost::optional<Timestamp> afterClusterTime);
/**
diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp
index 413bd81f5c2..5ea3422c2ef 100644
--- a/src/mongo/db/s/migration_source_manager.cpp
+++ b/src/mongo/db/s/migration_source_manager.cpp
@@ -179,13 +179,14 @@ MigrationSourceManager::MigrationSourceManager(OperationContext* opCtx,
}
// Snapshot the committed metadata from the time the migration starts
- const auto [collectionMetadata, collectionUUID] = [&] {
+ const auto [collectionMetadata, collectionIndexInfo, collectionUUID] = [&] {
UninterruptibleLockGuard noInterrupt(_opCtx->lockState());
AutoGetCollection autoColl(_opCtx, nss(), MODE_IS);
auto scopedCsr = CollectionShardingRuntime::assertCollectionLockedAndAcquire(
opCtx, nss(), CSRAcquisitionMode::kExclusive);
- const auto metadata = checkCollectionIdentity(_opCtx, nss(), _args.getEpoch(), boost::none);
+ const auto [metadata, indexInfo] =
+ checkCollectionIdentity(_opCtx, nss(), _args.getEpoch(), boost::none);
UUID collectionUUID = autoColl.getCollection()->uuid();
@@ -199,7 +200,8 @@ MigrationSourceManager::MigrationSourceManager(OperationContext* opCtx,
_scopedRegisterer.emplace(this, *scopedCsr);
- return std::make_tuple(std::move(metadata), std::move(collectionUUID));
+ return std::make_tuple(
+ std::move(metadata), std::move(indexInfo), std::move(collectionUUID));
}();
// Compute the max bound in case only `min` is set (moveRange)
@@ -219,10 +221,16 @@ MigrationSourceManager::MigrationSourceManager(OperationContext* opCtx,
_moveTimingHelper.setMax(max);
}
- checkShardKeyPattern(
- _opCtx, nss(), collectionMetadata, ChunkRange(*_args.getMin(), *_args.getMax()));
- checkRangeWithinChunk(
- _opCtx, nss(), collectionMetadata, ChunkRange(*_args.getMin(), *_args.getMax()));
+ checkShardKeyPattern(_opCtx,
+ nss(),
+ collectionMetadata,
+ collectionIndexInfo,
+ ChunkRange(*_args.getMin(), *_args.getMax()));
+ checkRangeWithinChunk(_opCtx,
+ nss(),
+ collectionMetadata,
+ collectionIndexInfo,
+ ChunkRange(*_args.getMin(), *_args.getMax()));
_collectionEpoch = _args.getEpoch();
_collectionUUID = collectionUUID;
diff --git a/src/mongo/db/s/refine_collection_shard_key_coordinator.cpp b/src/mongo/db/s/refine_collection_shard_key_coordinator.cpp
index c635f6781e3..d75161057fa 100644
--- a/src/mongo/db/s/refine_collection_shard_key_coordinator.cpp
+++ b/src/mongo/db/s/refine_collection_shard_key_coordinator.cpp
@@ -131,7 +131,7 @@ ExecutorFuture<void> RefineCollectionShardKeyCoordinator::_runImpl(
opCtx, nss(), ShardKeyPattern(_newShardKey.toBSON()));
const auto cm = uassertStatusOK(
- Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithRefresh(
+ Grid::get(opCtx)->catalogCache()->getShardedCollectionPlacementInfoWithRefresh(
opCtx, nss()));
_oldShardKey = cm.getShardKeyPattern().getKeyPattern();
diff --git a/src/mongo/db/s/rename_collection_coordinator.cpp b/src/mongo/db/s/rename_collection_coordinator.cpp
index 33ae5040c6b..5b77bcfb6f2 100644
--- a/src/mongo/db/s/rename_collection_coordinator.cpp
+++ b/src/mongo/db/s/rename_collection_coordinator.cpp
@@ -403,12 +403,10 @@ ExecutorFuture<void> RenameCollectionCoordinator::_runImpl(
// Retrieve the new collection version
const auto catalog = Grid::get(opCtx)->catalogCache();
- const auto cm = uassertStatusOK(
+ const auto cri = uassertStatusOK(
catalog->getCollectionRoutingInfoWithRefresh(opCtx, _request.getTo()));
_response = RenameCollectionResponse(
- cm.isSharded() ? ShardVersion(cm.getVersion(),
- boost::optional<CollectionIndexes>(boost::none))
- : ShardVersion::UNSHARDED());
+ cri.cm.isSharded() ? cri.getCollectionVersion() : ShardVersion::UNSHARDED());
ShardingLogging::get(opCtx)->logChange(
opCtx,
diff --git a/src/mongo/db/s/reshard_collection_coordinator.cpp b/src/mongo/db/s/reshard_collection_coordinator.cpp
index f7a74c7d464..5b1c0c38878 100644
--- a/src/mongo/db/s/reshard_collection_coordinator.cpp
+++ b/src/mongo/db/s/reshard_collection_coordinator.cpp
@@ -147,7 +147,7 @@ ExecutorFuture<void> ReshardCollectionCoordinator::_runImpl(
}
const auto cmOld = uassertStatusOK(
- Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithRefresh(
+ Grid::get(opCtx)->catalogCache()->getShardedCollectionPlacementInfoWithRefresh(
opCtx, nss()));
StateDoc newDoc(_doc);
@@ -178,7 +178,7 @@ ExecutorFuture<void> ReshardCollectionCoordinator::_runImpl(
// Report command completion to the oplog.
const auto cm = uassertStatusOK(
- Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithRefresh(
+ Grid::get(opCtx)->catalogCache()->getShardedCollectionPlacementInfoWithRefresh(
opCtx, nss()));
if (_doc.getOldCollectionUUID() && _doc.getOldCollectionUUID() != cm.getUUID()) {
diff --git a/src/mongo/db/s/resharding/document_source_resharding_ownership_match.cpp b/src/mongo/db/s/resharding/document_source_resharding_ownership_match.cpp
index 51381f3622e..0c9e73c7dee 100644
--- a/src/mongo/db/s/resharding/document_source_resharding_ownership_match.cpp
+++ b/src/mongo/db/s/resharding/document_source_resharding_ownership_match.cpp
@@ -122,7 +122,7 @@ DocumentSource::GetNextResult DocumentSourceReshardingOwnershipMatch::doGetNext(
auto* catalogCache = Grid::get(pExpCtx->opCtx)->catalogCache();
_tempReshardingChunkMgr =
- uassertStatusOK(catalogCache->getShardedCollectionRoutingInfoWithRefresh(
+ uassertStatusOK(catalogCache->getShardedCollectionPlacementInfoWithRefresh(
pExpCtx->opCtx, tempReshardingNss));
}
diff --git a/src/mongo/db/s/resharding/resharding_collection_cloner.cpp b/src/mongo/db/s/resharding/resharding_collection_cloner.cpp
index 3b1954de232..af563527116 100644
--- a/src/mongo/db/s/resharding/resharding_collection_cloner.cpp
+++ b/src/mongo/db/s/resharding/resharding_collection_cloner.cpp
@@ -69,7 +69,7 @@ namespace {
bool collectionHasSimpleCollation(OperationContext* opCtx, const NamespaceString& nss) {
auto catalogCache = Grid::get(opCtx)->catalogCache();
- auto sourceChunkMgr = uassertStatusOK(catalogCache->getCollectionRoutingInfo(opCtx, nss));
+ auto sourceChunkMgr = uassertStatusOK(catalogCache->getCollectionPlacementInfo(opCtx, nss));
uassert(ErrorCodes::NamespaceNotSharded,
str::stream() << "Expected collection " << nss << " to be sharded",
diff --git a/src/mongo/db/s/resharding/resharding_coordinator_service.cpp b/src/mongo/db/s/resharding/resharding_coordinator_service.cpp
index 817f7f58875..1a69d0aecdb 100644
--- a/src/mongo/db/s/resharding/resharding_coordinator_service.cpp
+++ b/src/mongo/db/s/resharding/resharding_coordinator_service.cpp
@@ -874,7 +874,7 @@ ReshardingCoordinatorExternalState::ParticipantShardsAndChunks
ReshardingCoordinatorExternalStateImpl::calculateParticipantShardsAndChunks(
OperationContext* opCtx, const ReshardingCoordinatorDocument& coordinatorDoc) {
const auto cm = uassertStatusOK(
- Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithRefresh(
+ Grid::get(opCtx)->catalogCache()->getShardedCollectionPlacementInfoWithRefresh(
opCtx, coordinatorDoc.getSourceNss()));
std::set<ShardId> donorShardIds;
@@ -1622,7 +1622,7 @@ ExecutorFuture<bool> ReshardingCoordinator::_isReshardingOpRedundant(
auto cancelableOpCtx = _cancelableOpCtxFactory->makeOperationContext(&cc());
auto opCtx = cancelableOpCtx.get();
auto cm = uassertStatusOK(
- Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithRefresh(
+ Grid::get(opCtx)->catalogCache()->getShardedCollectionPlacementInfoWithRefresh(
opCtx, _coordinatorDoc.getSourceNss()));
const auto currentShardKey = cm.getShardKeyPattern().getKeyPattern();
// Verify if there is any work to be done by the resharding operation by checking
@@ -2107,8 +2107,8 @@ void ReshardingCoordinator::_updateChunkImbalanceMetrics(const NamespaceString&
try {
auto routingInfo = uassertStatusOK(
- Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithRefresh(opCtx,
- nss));
+ Grid::get(opCtx)->catalogCache()->getShardedCollectionPlacementInfoWithRefresh(opCtx,
+ nss));
const auto collectionZones =
uassertStatusOK(Grid::get(opCtx)->catalogClient()->getTagsForCollection(opCtx, nss));
diff --git a/src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp b/src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp
index 484a4a28bf0..76e097498c0 100644
--- a/src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp
@@ -332,7 +332,7 @@ TEST_F(DestinedRecipientTest, TestGetDestinedRecipientThrowsOnBlockedRefresh) {
});
}
- auto sw = catalogCache()->getCollectionRoutingInfoWithRefresh(opCtx, env.tempNss);
+ auto sw = catalogCache()->getCollectionPlacementInfoWithRefresh(opCtx, env.tempNss);
}
TEST_F(DestinedRecipientTest, TestOpObserverSetsDestinedRecipientOnInserts) {
diff --git a/src/mongo/db/s/resharding/resharding_donor_service.cpp b/src/mongo/db/s/resharding/resharding_donor_service.cpp
index 646b2b1f743..f01a5daae26 100644
--- a/src/mongo/db/s/resharding/resharding_donor_service.cpp
+++ b/src/mongo/db/s/resharding/resharding_donor_service.cpp
@@ -150,7 +150,7 @@ public:
void refreshCatalogCache(OperationContext* opCtx, const NamespaceString& nss) override {
auto catalogCache = Grid::get(opCtx)->catalogCache();
- uassertStatusOK(catalogCache->getShardedCollectionRoutingInfoWithRefresh(opCtx, nss));
+ uassertStatusOK(catalogCache->getShardedCollectionPlacementInfoWithRefresh(opCtx, nss));
}
void waitForCollectionFlush(OperationContext* opCtx, const NamespaceString& nss) override {
diff --git a/src/mongo/db/s/resharding/resharding_recipient_service.cpp b/src/mongo/db/s/resharding/resharding_recipient_service.cpp
index 0216f048155..067a2561a0b 100644
--- a/src/mongo/db/s/resharding/resharding_recipient_service.cpp
+++ b/src/mongo/db/s/resharding/resharding_recipient_service.cpp
@@ -621,7 +621,7 @@ ReshardingRecipientService::RecipientStateMachine::_makeDataReplication(Operatio
_externalState->refreshCatalogCache(opCtx, _metadata.getSourceNss());
auto myShardId = _externalState->myShardId(opCtx->getServiceContext());
- auto sourceChunkMgr =
+ auto [sourceChunkMgr, _] =
_externalState->getShardedCollectionRoutingInfo(opCtx, _metadata.getSourceNss());
// The metrics map can already be pre-populated if it was recovered from disk.
diff --git a/src/mongo/db/s/resharding/resharding_recipient_service_external_state.cpp b/src/mongo/db/s/resharding/resharding_recipient_service_external_state.cpp
index 4e86f9c1ce6..7ea3c06c96c 100644
--- a/src/mongo/db/s/resharding/resharding_recipient_service_external_state.cpp
+++ b/src/mongo/db/s/resharding/resharding_recipient_service_external_state.cpp
@@ -110,10 +110,10 @@ ShardId RecipientStateMachineExternalStateImpl::myShardId(ServiceContext* servic
void RecipientStateMachineExternalStateImpl::refreshCatalogCache(OperationContext* opCtx,
const NamespaceString& nss) {
auto catalogCache = Grid::get(opCtx)->catalogCache();
- uassertStatusOK(catalogCache->getShardedCollectionRoutingInfoWithRefresh(opCtx, nss));
+ uassertStatusOK(catalogCache->getShardedCollectionPlacementInfoWithRefresh(opCtx, nss));
}
-ChunkManager RecipientStateMachineExternalStateImpl::getShardedCollectionRoutingInfo(
+CollectionRoutingInfo RecipientStateMachineExternalStateImpl::getShardedCollectionRoutingInfo(
OperationContext* opCtx, const NamespaceString& nss) {
auto catalogCache = Grid::get(opCtx)->catalogCache();
return catalogCache->getShardedCollectionRoutingInfo(opCtx, nss);
@@ -127,7 +127,7 @@ RecipientStateMachineExternalStateImpl::getCollectionOptions(OperationContext* o
StringData reason) {
// Load the collection options from the primary shard for the database.
return _withShardVersionRetry(opCtx, nss, reason, [&] {
- auto cm = getShardedCollectionRoutingInfo(opCtx, nss);
+ auto [cm, _] = getShardedCollectionRoutingInfo(opCtx, nss);
return MigrationDestinationManager::getCollectionOptions(
opCtx,
NamespaceStringOrUUID{nss.db().toString(), uuid},
@@ -145,12 +145,12 @@ RecipientStateMachineExternalStateImpl::getCollectionIndexes(OperationContext* o
StringData reason) {
// Load the list of indexes from the shard which owns the global minimum chunk.
return _withShardVersionRetry(opCtx, nss, reason, [&] {
- auto cm = getShardedCollectionRoutingInfo(opCtx, nss);
+ auto cri = getShardedCollectionRoutingInfo(opCtx, nss);
return MigrationDestinationManager::getCollectionIndexes(
opCtx,
NamespaceStringOrUUID{nss.db().toString(), uuid},
- cm.getMinKeyShardIdWithSimpleCollation(),
- cm,
+ cri.cm.getMinKeyShardIdWithSimpleCollation(),
+ cri,
afterClusterTime);
});
}
diff --git a/src/mongo/db/s/resharding/resharding_recipient_service_external_state.h b/src/mongo/db/s/resharding/resharding_recipient_service_external_state.h
index d9ee5bbe574..8fc1d522c82 100644
--- a/src/mongo/db/s/resharding/resharding_recipient_service_external_state.h
+++ b/src/mongo/db/s/resharding/resharding_recipient_service_external_state.h
@@ -64,8 +64,8 @@ public:
virtual void refreshCatalogCache(OperationContext* opCtx, const NamespaceString& nss) = 0;
- virtual ChunkManager getShardedCollectionRoutingInfo(OperationContext* opCtx,
- const NamespaceString& nss) = 0;
+ virtual CollectionRoutingInfo getShardedCollectionRoutingInfo(OperationContext* opCtx,
+ const NamespaceString& nss) = 0;
virtual MigrationDestinationManager::CollectionOptionsAndUUID getCollectionOptions(
OperationContext* opCtx,
@@ -114,8 +114,8 @@ public:
void refreshCatalogCache(OperationContext* opCtx, const NamespaceString& nss) override;
- ChunkManager getShardedCollectionRoutingInfo(OperationContext* opCtx,
- const NamespaceString& nss) override;
+ CollectionRoutingInfo getShardedCollectionRoutingInfo(OperationContext* opCtx,
+ const NamespaceString& nss) override;
MigrationDestinationManager::CollectionOptionsAndUUID getCollectionOptions(
OperationContext* opCtx,
diff --git a/src/mongo/db/s/resharding/resharding_recipient_service_external_state_test.cpp b/src/mongo/db/s/resharding/resharding_recipient_service_external_state_test.cpp
index 7ce77e0329e..69216952db9 100644
--- a/src/mongo/db/s/resharding/resharding_recipient_service_external_state_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_recipient_service_external_state_test.cpp
@@ -183,6 +183,8 @@ public:
return std::vector<BSONObj>{coll.toBSON(), chunkObj};
}());
+ expectCollectionAndIndexesAggregation(tempNss, epoch, timestamp, uuid, skey, {});
+
future.default_timed_get();
}
diff --git a/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp b/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp
index ee2325bf6ba..70d1fbf31a5 100644
--- a/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp
@@ -76,8 +76,8 @@ public:
void refreshCatalogCache(OperationContext* opCtx, const NamespaceString& nss) override {}
- ChunkManager getShardedCollectionRoutingInfo(OperationContext* opCtx,
- const NamespaceString& nss) override {
+ CollectionRoutingInfo getShardedCollectionRoutingInfo(OperationContext* opCtx,
+ const NamespaceString& nss) override {
invariant(nss == _sourceNss);
const OID epoch = OID::gen();
@@ -99,11 +99,19 @@ public:
boost::none /* chunkSizeBytes */,
true /* allowMigrations */,
chunks);
-
- return ChunkManager(_someDonorId,
- DatabaseVersion(UUID::gen(), Timestamp(1, 1)),
- _makeStandaloneRoutingTableHistory(std::move(rt)),
- boost::none /* clusterTime */);
+ IndexCatalogTypeMap globalIndexesMap;
+ globalIndexesMap.emplace(
+ "randomKey_1",
+ IndexCatalogType(
+ "randomKey_1", BSON("randomKey" << 1), BSONObj(), Timestamp(1, 0), _sourceUUID));
+
+ return CollectionRoutingInfo{
+ ChunkManager(_someDonorId,
+ DatabaseVersion(UUID::gen(), Timestamp(1, 1)),
+ _makeStandaloneRoutingTableHistory(std::move(rt)),
+ boost::none /* clusterTime */),
+ GlobalIndexesCache(CollectionIndexes(_sourceUUID, Timestamp(1, 0)),
+ std::move(globalIndexesMap))};
}
MigrationDestinationManager::CollectionOptionsAndUUID getCollectionOptions(
diff --git a/src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp b/src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp
index d7529641b44..4e06157fa92 100644
--- a/src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp
@@ -149,6 +149,14 @@ class ReshardingTxnClonerTest : public ShardServerTestFixture {
return repl::OpTimeWith<std::vector<ShardType>>(shardTypes);
}
+ std::pair<CollectionType, std::vector<IndexCatalogType>> getCollectionAndGlobalIndexes(
+ OperationContext* opCtx,
+ const NamespaceString& nss,
+ const repl::ReadConcernArgs& readConcern) override {
+ uasserted(ErrorCodes::NamespaceNotFound,
+ str::stream() << "Collection " << nss.ns() << " not found");
+ }
+
private:
const std::vector<ShardId> _shardIds;
};
diff --git a/src/mongo/db/s/resharding/resharding_util.cpp b/src/mongo/db/s/resharding/resharding_util.cpp
index e0f050409c6..b60921b4c52 100644
--- a/src/mongo/db/s/resharding/resharding_util.cpp
+++ b/src/mongo/db/s/resharding/resharding_util.cpp
@@ -140,7 +140,7 @@ std::set<ShardId> getRecipientShards(OperationContext* opCtx,
const UUID& reshardingUUID) {
const auto& tempNss = constructTemporaryReshardingNss(sourceNss.db(), reshardingUUID);
auto* catalogCache = Grid::get(opCtx)->catalogCache();
- auto cm = uassertStatusOK(catalogCache->getCollectionRoutingInfo(opCtx, tempNss));
+ auto cm = uassertStatusOK(catalogCache->getCollectionPlacementInfo(opCtx, tempNss));
uassert(ErrorCodes::NamespaceNotSharded,
str::stream() << "Expected collection " << tempNss << " to be sharded",
diff --git a/src/mongo/db/s/sessions_collection_config_server.cpp b/src/mongo/db/s/sessions_collection_config_server.cpp
index 1cf0d58e7f6..1f5e5353a62 100644
--- a/src/mongo/db/s/sessions_collection_config_server.cpp
+++ b/src/mongo/db/s/sessions_collection_config_server.cpp
@@ -75,16 +75,16 @@ void SessionsCollectionConfigServer::_generateIndexesIfNeeded(OperationContext*
nss,
"SessionsCollectionConfigServer::_generateIndexesIfNeeded",
[&] {
- const ChunkManager cm = [&]() {
+ const auto cri = [&]() {
// (SERVER-61214) wait for the catalog cache to acknowledge that the sessions
// collection is sharded in order to be sure to get a valid routing table
while (true) {
- auto cm = uassertStatusOK(
+ auto [cm, gii] = uassertStatusOK(
Grid::get(opCtx)->catalogCache()->getCollectionRoutingInfoWithRefresh(opCtx,
nss));
if (cm.isSharded()) {
- return cm;
+ return CollectionRoutingInfo(std::move(cm), std::move(gii));
}
}
}();
@@ -93,7 +93,7 @@ void SessionsCollectionConfigServer::_generateIndexesIfNeeded(OperationContext*
opCtx,
nss.db(),
nss,
- cm,
+ cri,
SessionsCollection::generateCreateIndexesCmd(),
ReadPreferenceSetting(ReadPreference::PrimaryOnly),
Shard::RetryPolicy::kNoRetry,
diff --git a/src/mongo/db/s/shard_filtering_metadata_refresh.cpp b/src/mongo/db/s/shard_filtering_metadata_refresh.cpp
index 0b42cc81465..d7d5050b75b 100644
--- a/src/mongo/db/s/shard_filtering_metadata_refresh.cpp
+++ b/src/mongo/db/s/shard_filtering_metadata_refresh.cpp
@@ -584,7 +584,7 @@ CollectionMetadata forceGetCurrentMetadata(OperationContext* opCtx, const Namesp
try {
const auto cm = uassertStatusOK(
- Grid::get(opCtx)->catalogCache()->getCollectionRoutingInfoWithRefresh(opCtx, nss));
+ Grid::get(opCtx)->catalogCache()->getCollectionPlacementInfoWithRefresh(opCtx, nss));
if (!cm.isSharded()) {
return CollectionMetadata();
@@ -614,7 +614,7 @@ ChunkVersion forceShardFilteringMetadataRefresh(OperationContext* opCtx,
invariant(shardingState->canAcceptShardedCommands());
const auto cm = uassertStatusOK(
- Grid::get(opCtx)->catalogCache()->getCollectionRoutingInfoWithRefresh(opCtx, nss));
+ Grid::get(opCtx)->catalogCache()->getCollectionPlacementInfoWithRefresh(opCtx, nss));
if (!cm.isSharded()) {
// DBLock and CollectionLock are used here to avoid throwing further recursive stale
diff --git a/src/mongo/db/s/shard_key_util.cpp b/src/mongo/db/s/shard_key_util.cpp
index 580018b4d19..da88bb914a5 100644
--- a/src/mongo/db/s/shard_key_util.cpp
+++ b/src/mongo/db/s/shard_key_util.cpp
@@ -319,22 +319,20 @@ void ValidationBehaviorsShardCollection::createShardKeyIndex(
ValidationBehaviorsRefineShardKey::ValidationBehaviorsRefineShardKey(OperationContext* opCtx,
const NamespaceString& nss)
: _opCtx(opCtx),
- _cm(uassertStatusOK(
+ _cri(uassertStatusOK(
Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithRefresh(opCtx,
nss))),
_indexShard(uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(
- opCtx, _cm.getMinKeyShardIdWithSimpleCollation()))) {}
+ opCtx, _cri.cm.getMinKeyShardIdWithSimpleCollation()))) {}
std::vector<BSONObj> ValidationBehaviorsRefineShardKey::loadIndexes(
const NamespaceString& nss) const {
- ChunkVersion placementVersion = _cm.getVersion(_indexShard->getId());
auto indexesRes = _indexShard->runExhaustiveCursorCommand(
_opCtx,
ReadPreferenceSetting(ReadPreference::PrimaryOnly),
nss.db().toString(),
- appendShardVersion(
- BSON("listIndexes" << nss.coll()),
- ShardVersion(placementVersion, boost::optional<CollectionIndexes>(boost::none))),
+ appendShardVersion(BSON("listIndexes" << nss.coll()),
+ _cri.getShardVersion(_indexShard->getId())),
Milliseconds(-1));
if (indexesRes.getStatus().code() != ErrorCodes::NamespaceNotFound) {
return uassertStatusOK(indexesRes).docs;
@@ -344,14 +342,13 @@ std::vector<BSONObj> ValidationBehaviorsRefineShardKey::loadIndexes(
void ValidationBehaviorsRefineShardKey::verifyUsefulNonMultiKeyIndex(
const NamespaceString& nss, const BSONObj& proposedKey) const {
- ChunkVersion placementVersion = _cm.getVersion(_indexShard->getId());
auto checkShardingIndexRes = uassertStatusOK(_indexShard->runCommand(
_opCtx,
ReadPreferenceSetting(ReadPreference::PrimaryOnly),
"admin",
appendShardVersion(
BSON(kCheckShardingIndexCmdName << nss.ns() << kKeyPatternField << proposedKey),
- ShardVersion(placementVersion, boost::optional<CollectionIndexes>(boost::none))),
+ _cri.getShardVersion(_indexShard->getId())),
Shard::RetryPolicy::kIdempotent));
if (checkShardingIndexRes.commandStatus == ErrorCodes::UnknownError) {
// CheckShardingIndex returns UnknownError if a compatible shard key index cannot be found,
diff --git a/src/mongo/db/s/shard_key_util.h b/src/mongo/db/s/shard_key_util.h
index 55905e7beb7..9aa5926b271 100644
--- a/src/mongo/db/s/shard_key_util.h
+++ b/src/mongo/db/s/shard_key_util.h
@@ -30,6 +30,7 @@
#pragma once
#include "mongo/db/dbdirectclient.h"
+#include "mongo/s/catalog_cache.h"
#include "mongo/s/chunk_manager.h"
#include "mongo/s/client/shard.h"
#include "mongo/s/shard_util.h"
@@ -108,7 +109,7 @@ public:
private:
OperationContext* _opCtx;
- ChunkManager _cm;
+ CollectionRoutingInfo _cri;
std::shared_ptr<Shard> _indexShard;
};
diff --git a/src/mongo/db/s/sharding_ddl_util.cpp b/src/mongo/db/s/sharding_ddl_util.cpp
index c53e01c1c2f..2b9756eb445 100644
--- a/src/mongo/db/s/sharding_ddl_util.cpp
+++ b/src/mongo/db/s/sharding_ddl_util.cpp
@@ -541,8 +541,9 @@ boost::optional<CreateCollectionResponse> checkIfCollectionAlreadySharded(
const BSONObj& key,
const BSONObj& collation,
bool unique) {
- auto cm = uassertStatusOK(
+ auto cri = uassertStatusOK(
Grid::get(opCtx)->catalogCache()->getCollectionRoutingInfoWithRefresh(opCtx, nss));
+ const auto& cm = cri.cm;
if (!cm.isSharded()) {
return boost::none;
@@ -559,8 +560,7 @@ boost::optional<CreateCollectionResponse> checkIfCollectionAlreadySharded(
SimpleBSONObjComparator::kInstance.evaluate(defaultCollator == collation) &&
cm.isUnique() == unique);
- CreateCollectionResponse response(
- {cm.getVersion(), boost::optional<CollectionIndexes>(boost::none)});
+ CreateCollectionResponse response(cri.getCollectionVersion());
response.setCollectionUUID(cm.getUUID());
return response;
}
diff --git a/src/mongo/db/s/sharding_index_catalog_util.cpp b/src/mongo/db/s/sharding_index_catalog_util.cpp
index 367be78f684..17f026824cc 100644
--- a/src/mongo/db/s/sharding_index_catalog_util.cpp
+++ b/src/mongo/db/s/sharding_index_catalog_util.cpp
@@ -105,7 +105,7 @@ void coordinateIndexCatalogModificationAcrossCollectionShards(
// Get an up to date shard distribution.
auto routingInfo =
- uassertStatusOK(Grid::get(opCtx)->catalogCache()->getCollectionRoutingInfoWithRefresh(
+ uassertStatusOK(Grid::get(opCtx)->catalogCache()->getCollectionPlacementInfoWithRefresh(
opCtx, userCollectionNss));
uassert(ErrorCodes::NamespaceNotSharded,
str::stream() << "collection " << userCollectionNss << " is not sharded",
diff --git a/src/mongo/db/s/sharding_write_router.cpp b/src/mongo/db/s/sharding_write_router.cpp
index e961356a108..66a9afdf358 100644
--- a/src/mongo/db/s/sharding_write_router.cpp
+++ b/src/mongo/db/s/sharding_write_router.cpp
@@ -55,7 +55,7 @@ ShardingWriteRouter::ShardingWriteRouter(OperationContext* opCtx,
const auto& donorFields = reshardingFields->getDonorFields();
invariant(donorFields);
- _reshardingChunkMgr = uassertStatusOK(catalogCache->getCollectionRoutingInfo(
+ _reshardingChunkMgr = uassertStatusOK(catalogCache->getCollectionPlacementInfo(
opCtx, donorFields->getTempReshardingNss(), true /* allowLocks */));
tassert(6862800,
diff --git a/src/mongo/db/s/shardsvr_drop_collection_command.cpp b/src/mongo/db/s/shardsvr_drop_collection_command.cpp
index 50a5782e1ee..503d16b1802 100644
--- a/src/mongo/db/s/shardsvr_drop_collection_command.cpp
+++ b/src/mongo/db/s/shardsvr_drop_collection_command.cpp
@@ -36,7 +36,7 @@
#include "mongo/db/s/sharding_state.h"
#include "mongo/logv2/log.h"
#include "mongo/s/catalog/sharding_catalog_client.h"
-#include "mongo/s/chunk_manager_targeter.h"
+#include "mongo/s/collection_routing_info_targeter.h"
#include "mongo/s/grid.h"
#include "mongo/s/request_types/sharded_ddl_commands_gen.h"
#include "mongo/s/sharding_feature_flags_gen.h"
@@ -100,7 +100,7 @@ public:
// If 'ns()' is a sharded time-series view collection, 'targetNs' is a
// namespace for time-series buckets collection. For all other collections,
// 'targetNs' is equal to 'ns()'.
- return ChunkManagerTargeter(opCtx, ns()).getNS();
+ return CollectionRoutingInfoTargeter(opCtx, ns()).getNS();
}
return ns();
}();
diff --git a/src/mongo/db/s/shardsvr_drop_indexes_command.cpp b/src/mongo/db/s/shardsvr_drop_indexes_command.cpp
index dd08a353a44..4da48798f4a 100644
--- a/src/mongo/db/s/shardsvr_drop_indexes_command.cpp
+++ b/src/mongo/db/s/shardsvr_drop_indexes_command.cpp
@@ -184,7 +184,7 @@ ShardsvrDropIndexesCommand::Invocation::Response ShardsvrDropIndexesCommand::Inv
opCtx, Grid::get(opCtx)->catalogCache(), resolvedNs, "dropIndexes", [&] {
// If the collection is sharded, we target only the primary shard and the shards that
// own chunks for the collection.
- const auto routingInfo = uassertStatusOK(
+ const auto cri = uassertStatusOK(
Grid::get(opCtx)->catalogCache()->getCollectionRoutingInfo(opCtx, resolvedNs));
auto cmdToBeSent = CommandHelpers::filterCommandRequestForPassthrough(
@@ -195,7 +195,7 @@ ShardsvrDropIndexesCommand::Invocation::Response ShardsvrDropIndexesCommand::Inv
opCtx,
resolvedNs.db(),
resolvedNs,
- routingInfo,
+ cri,
retryState.shardsWithSuccessResponses,
applyReadWriteConcern(
opCtx,
diff --git a/src/mongo/db/s/shardsvr_merge_chunks_command.cpp b/src/mongo/db/s/shardsvr_merge_chunks_command.cpp
index 4fc798e10b7..4c4ef3c9cac 100644
--- a/src/mongo/db/s/shardsvr_merge_chunks_command.cpp
+++ b/src/mongo/db/s/shardsvr_merge_chunks_command.cpp
@@ -98,9 +98,10 @@ void mergeChunks(OperationContext* opCtx,
onCollectionPlacementVersionMismatch(opCtx, nss, boost::none);
OperationShardingState::unsetShardRoleForLegacyDDLOperationsSentWithShardVersionIfNeeded(
opCtx, nss);
- const auto metadata = checkCollectionIdentity(opCtx, nss, expectedEpoch, expectedTimestamp);
- checkShardKeyPattern(opCtx, nss, metadata, chunkRange);
- checkRangeOwnership(opCtx, nss, metadata, chunkRange);
+ const auto [metadata, indexInfo] =
+ checkCollectionIdentity(opCtx, nss, expectedEpoch, expectedTimestamp);
+ checkShardKeyPattern(opCtx, nss, metadata, indexInfo, chunkRange);
+ checkRangeOwnership(opCtx, nss, metadata, indexInfo, chunkRange);
return metadata;
}();
diff --git a/src/mongo/db/s/shardsvr_participant_block_command.cpp b/src/mongo/db/s/shardsvr_participant_block_command.cpp
index bcbe80f0b27..74843edfac6 100644
--- a/src/mongo/db/s/shardsvr_participant_block_command.cpp
+++ b/src/mongo/db/s/shardsvr_participant_block_command.cpp
@@ -36,7 +36,6 @@
#include "mongo/db/s/sharding_state.h"
#include "mongo/logv2/log.h"
#include "mongo/s/catalog/sharding_catalog_client.h"
-#include "mongo/s/chunk_manager_targeter.h"
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
diff --git a/src/mongo/db/s/shardsvr_split_chunk_command.cpp b/src/mongo/db/s/shardsvr_split_chunk_command.cpp
index 0e540e347a1..52536354781 100644
--- a/src/mongo/db/s/shardsvr_split_chunk_command.cpp
+++ b/src/mongo/db/s/shardsvr_split_chunk_command.cpp
@@ -164,10 +164,10 @@ public:
onCollectionPlacementVersionMismatch(opCtx, nss, boost::none);
OperationShardingState::
unsetShardRoleForLegacyDDLOperationsSentWithShardVersionIfNeeded(opCtx, nss);
- const auto metadata = checkCollectionIdentity(
+ const auto [metadata, indexInfo] = checkCollectionIdentity(
opCtx, nss, expectedCollectionEpoch, expectedCollectionTimestamp);
- checkShardKeyPattern(opCtx, nss, metadata, chunkRange);
- checkChunkMatchesRange(opCtx, nss, metadata, chunkRange);
+ checkShardKeyPattern(opCtx, nss, metadata, indexInfo, chunkRange);
+ checkChunkMatchesRange(opCtx, nss, metadata, indexInfo, chunkRange);
}
auto topChunk = uassertStatusOK(splitChunk(opCtx,
diff --git a/src/mongo/db/s/split_chunk.cpp b/src/mongo/db/s/split_chunk.cpp
index 2697c1463a7..1b9942468af 100644
--- a/src/mongo/db/s/split_chunk.cpp
+++ b/src/mongo/db/s/split_chunk.cpp
@@ -101,9 +101,9 @@ bool checkMetadataForSuccessfulSplitChunk(OperationContext* opCtx,
Lock::DBLock dbLock(opCtx, nss.dbName(), MODE_IS);
Lock::CollectionLock collLock(opCtx, nss, MODE_IS);
- const auto metadataAfterSplit = CollectionShardingRuntime::assertCollectionLockedAndAcquire(
- opCtx, nss, CSRAcquisitionMode::kShared)
- ->getCurrentMetadataIfKnown();
+ const auto scopedCSR = CollectionShardingRuntime::assertCollectionLockedAndAcquire(
+ opCtx, nss, CSRAcquisitionMode::kShared);
+ const auto metadataAfterSplit = scopedCSR->getCurrentMetadataIfKnown();
ShardId shardId = ShardingState::get(opCtx)->shardId();
@@ -121,15 +121,15 @@ bool checkMetadataForSuccessfulSplitChunk(OperationContext* opCtx,
metadataAfterSplit->isSharded());
const auto placementVersion = metadataAfterSplit->getShardVersion();
const auto epoch = placementVersion.epoch();
- uassert(StaleConfigInfo(
- nss,
- ShardVersion::IGNORED() /* receivedVersion */,
- ShardVersion(placementVersion,
- boost::optional<CollectionIndexes>(boost::none)) /* wantedVersion */,
- shardId),
- str::stream() << "Collection " << nss.ns() << " changed since split start",
- epoch == expectedEpoch &&
- (!expectedTimestamp || placementVersion.getTimestamp() == expectedTimestamp));
+ uassert(
+ StaleConfigInfo(nss,
+ ShardVersion::IGNORED() /* receivedVersion */,
+ ShardVersion(placementVersion,
+ scopedCSR->getCollectionIndexes(opCtx)) /* wantedVersion */,
+ shardId),
+ str::stream() << "Collection " << nss.ns() << " changed since split start",
+ epoch == expectedEpoch &&
+ (!expectedTimestamp || placementVersion.getTimestamp() == expectedTimestamp));
ChunkType nextChunk;
for (auto it = splitPoints.begin(); it != splitPoints.end(); ++it) {