summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorEsha Maharishi <esha.maharishi@mongodb.com>2018-04-02 21:31:15 -0400
committerEsha Maharishi <esha.maharishi@mongodb.com>2018-04-03 11:49:44 -0400
commitbe5c6fa35478729bd55c1d18547af90953bfc659 (patch)
treee2db871ab3689fe2268066108a7255d83279e5e6 /src
parent03aba5afae808e6924f0b50338281adf6569c9bc (diff)
downloadmongo-be5c6fa35478729bd55c1d18547af90953bfc659.tar.gz
SERVER-34269 Make CachedCollectionRoutingInfo an explicit 'package' of the database and collection routing info
Diffstat (limited to 'src')
-rw-r--r--src/mongo/db/transaction_reaper.cpp2
-rw-r--r--src/mongo/s/catalog_cache.h24
-rw-r--r--src/mongo/s/catalog_cache_refresh_test.cpp32
-rw-r--r--src/mongo/s/catalog_cache_test_fixture.cpp2
-rw-r--r--src/mongo/s/client/parallel.cpp2
-rw-r--r--src/mongo/s/client/version_manager.cpp2
-rw-r--r--src/mongo/s/commands/cluster_aggregate.cpp18
-rw-r--r--src/mongo/s/commands/cluster_coll_stats_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_commands_helpers.cpp2
-rw-r--r--src/mongo/s/commands/cluster_find_and_modify_cmd.cpp10
-rw-r--r--src/mongo/s/commands/cluster_map_reduce_cmd.cpp6
-rw-r--r--src/mongo/s/commands/commands_public.cpp12
-rw-r--r--src/mongo/s/commands/pipeline_s.cpp2
-rw-r--r--src/mongo/s/write_ops/chunk_manager_targeter.cpp22
14 files changed, 66 insertions, 72 deletions
diff --git a/src/mongo/db/transaction_reaper.cpp b/src/mongo/db/transaction_reaper.cpp
index 9d41635d5ff..e853dd17c2d 100644
--- a/src/mongo/db/transaction_reaper.cpp
+++ b/src/mongo/db/transaction_reaper.cpp
@@ -234,7 +234,7 @@ public:
uassertStatusOK(Grid::get(_opCtx)->catalogCache()->getCollectionRoutingInfo(
_opCtx, NamespaceString(SessionsCollection::kSessionsFullNS)));
_cm = routingInfo.cm();
- _primary = routingInfo.primary();
+ _primary = routingInfo.db().primary();
}
ShardId shardId;
if (_cm) {
diff --git a/src/mongo/s/catalog_cache.h b/src/mongo/s/catalog_cache.h
index 02b396b7a84..0a8895b57a4 100644
--- a/src/mongo/s/catalog_cache.h
+++ b/src/mongo/s/catalog_cache.h
@@ -272,29 +272,21 @@ private:
};
/**
- * Constructed exclusively by the CatalogCache contains a reference to the routing information for
- * the specified collection.
+ * Constructed exclusively by the CatalogCache.
+ *
+ * This RoutingInfo can be considered a "package" of routing info for the database and for the
+ * collection. Once unsharded collections are treated as sharded collections with a single chunk,
+ * they will also have a ChunkManager with a "chunk distribution." At that point, this "package" can
+ * be dismantled: routing for commands that route by database can directly retrieve the
+ * CachedDatabaseInfo, while routing for commands that route by collection can directly retrieve the
+ * ChunkManager.
*/
class CachedCollectionRoutingInfo {
public:
- /**
- * These serve the same purpose: to route to the primary shard for the collection's database.
- * Paths that have been updated to attach a databaseVersion use db(). Once all paths have been
- * updated, primaryId() and primary() can be deleted.
- */
- const ShardId& primaryId() const {
- return _db.primaryId();
- };
- std::shared_ptr<Shard> primary() const {
- return _db.primary();
- };
CachedDatabaseInfo db() const {
return _db;
};
- /**
- * If the collection is sharded, returns a chunk manager for it. Otherwise, nullptr.
- */
std::shared_ptr<ChunkManager> cm() const {
return _cm;
}
diff --git a/src/mongo/s/catalog_cache_refresh_test.cpp b/src/mongo/s/catalog_cache_refresh_test.cpp
index 0cb0ba0c887..c248ae39b5c 100644
--- a/src/mongo/s/catalog_cache_refresh_test.cpp
+++ b/src/mongo/s/catalog_cache_refresh_test.cpp
@@ -128,10 +128,10 @@ TEST_F(CatalogCacheRefreshTest, DatabaseNotFound) {
try {
auto routingInfo = future.timed_get(kFutureTimeout);
auto cm = routingInfo->cm();
- auto primary = routingInfo->primary();
+ auto primary = routingInfo->db().primary();
FAIL(str::stream() << "Returning no database did not fail and returned "
- << (cm ? cm->toString() : routingInfo->primaryId().toString()));
+ << (cm ? cm->toString() : routingInfo->db().primaryId().toString()));
} catch (const DBException& ex) {
ASSERT_EQ(ErrorCodes::NamespaceNotFound, ex.code());
}
@@ -149,10 +149,10 @@ TEST_F(CatalogCacheRefreshTest, DatabaseBSONCorrupted) {
try {
auto routingInfo = future.timed_get(kFutureTimeout);
auto cm = routingInfo->cm();
- auto primary = routingInfo->primary();
+ auto primary = routingInfo->db().primary();
FAIL(str::stream() << "Returning corrupted database entry did not fail and returned "
- << (cm ? cm->toString() : routingInfo->primaryId().toString()));
+ << (cm ? cm->toString() : routingInfo->db().primaryId().toString()));
} catch (const DBException& ex) {
ASSERT_EQ(ErrorCodes::NoSuchKey, ex.code());
}
@@ -168,8 +168,8 @@ TEST_F(CatalogCacheRefreshTest, CollectionNotFound) {
auto routingInfo = future.timed_get(kFutureTimeout);
ASSERT(!routingInfo->cm());
- ASSERT(routingInfo->primary());
- ASSERT_EQ(ShardId{"0"}, routingInfo->primaryId());
+ ASSERT(routingInfo->db().primary());
+ ASSERT_EQ(ShardId{"0"}, routingInfo->db().primaryId());
}
TEST_F(CatalogCacheRefreshTest, CollectionBSONCorrupted) {
@@ -186,10 +186,10 @@ TEST_F(CatalogCacheRefreshTest, CollectionBSONCorrupted) {
try {
auto routingInfo = future.timed_get(kFutureTimeout);
auto cm = routingInfo->cm();
- auto primary = routingInfo->primary();
+ auto primary = routingInfo->db().primary();
FAIL(str::stream() << "Returning corrupted collection entry did not fail and returned "
- << (cm ? cm->toString() : routingInfo->primaryId().toString()));
+ << (cm ? cm->toString() : routingInfo->db().primaryId().toString()));
} catch (const DBException& ex) {
ASSERT_EQ(ErrorCodes::FailedToParse, ex.code());
}
@@ -217,10 +217,10 @@ TEST_F(CatalogCacheRefreshTest, NoChunksFoundForCollection) {
try {
auto routingInfo = future.timed_get(kFutureTimeout);
auto cm = routingInfo->cm();
- auto primary = routingInfo->primary();
+ auto primary = routingInfo->db().primary();
FAIL(str::stream() << "Returning no chunks for collection did not fail and returned "
- << (cm ? cm->toString() : routingInfo->primaryId().toString()));
+ << (cm ? cm->toString() : routingInfo->db().primaryId().toString()));
} catch (const DBException& ex) {
ASSERT_EQ(ErrorCodes::ConflictingOperationInProgress, ex.code());
}
@@ -251,10 +251,10 @@ TEST_F(CatalogCacheRefreshTest, ChunksBSONCorrupted) {
try {
auto routingInfo = future.timed_get(kFutureTimeout);
auto cm = routingInfo->cm();
- auto primary = routingInfo->primary();
+ auto primary = routingInfo->db().primary();
FAIL(str::stream() << "Returning no chunks for collection did not fail and returned "
- << (cm ? cm->toString() : routingInfo->primaryId().toString()));
+ << (cm ? cm->toString() : routingInfo->db().primaryId().toString()));
} catch (const DBException& ex) {
ASSERT_EQ(ErrorCodes::NoSuchKey, ex.code());
}
@@ -306,11 +306,11 @@ TEST_F(CatalogCacheRefreshTest, IncompleteChunksFoundForCollection) {
try {
auto routingInfo = future.timed_get(kFutureTimeout);
auto cm = routingInfo->cm();
- auto primary = routingInfo->primary();
+ auto primary = routingInfo->db().primary();
FAIL(
str::stream() << "Returning incomplete chunks for collection did not fail and returned "
- << (cm ? cm->toString() : routingInfo->primaryId().toString()));
+ << (cm ? cm->toString() : routingInfo->db().primaryId().toString()));
} catch (const DBException& ex) {
ASSERT_EQ(ErrorCodes::ConflictingOperationInProgress, ex.code());
}
@@ -353,11 +353,11 @@ TEST_F(CatalogCacheRefreshTest, ChunkEpochChangeDuringIncrementalLoad) {
try {
auto routingInfo = future.timed_get(kFutureTimeout);
auto cm = routingInfo->cm();
- auto primary = routingInfo->primary();
+ auto primary = routingInfo->db().primary();
FAIL(str::stream()
<< "Returning chunks with different epoch for collection did not fail and returned "
- << (cm ? cm->toString() : routingInfo->primaryId().toString()));
+ << (cm ? cm->toString() : routingInfo->db().primaryId().toString()));
} catch (const DBException& ex) {
ASSERT_EQ(ErrorCodes::ConflictingOperationInProgress, ex.code());
}
diff --git a/src/mongo/s/catalog_cache_test_fixture.cpp b/src/mongo/s/catalog_cache_test_fixture.cpp
index 1f3e14b6a9b..af4f0fc4a85 100644
--- a/src/mongo/s/catalog_cache_test_fixture.cpp
+++ b/src/mongo/s/catalog_cache_test_fixture.cpp
@@ -154,7 +154,7 @@ std::shared_ptr<ChunkManager> CatalogCacheTestFixture::makeChunkManager(
auto routingInfo = future.timed_get(kFutureTimeout);
ASSERT(routingInfo->cm());
- ASSERT(routingInfo->primary());
+ ASSERT(routingInfo->db().primary());
return routingInfo->cm();
}
diff --git a/src/mongo/s/client/parallel.cpp b/src/mongo/s/client/parallel.cpp
index 04fee51c514..65e4af1ebd8 100644
--- a/src/mongo/s/client/parallel.cpp
+++ b/src/mongo/s/client/parallel.cpp
@@ -439,7 +439,7 @@ void ParallelSortClusteredCursor::startInit(OperationContext* opCtx) {
// ParallelSortClusteredCursor has two states - either !cm && primary, which means
// unsharded collection, or cm && !primary, which means sharded collection.
if (!manager) {
- primary = routingInfo.primary();
+ primary = routingInfo.db().primary();
}
}
}
diff --git a/src/mongo/s/client/version_manager.cpp b/src/mongo/s/client/version_manager.cpp
index bbd3d5f8555..f2ca0b1df65 100644
--- a/src/mongo/s/client/version_manager.cpp
+++ b/src/mongo/s/client/version_manager.cpp
@@ -268,7 +268,7 @@ bool checkShardVersion(OperationContext* opCtx,
auto& routingInfo = routingInfoStatus.getValue();
const auto manager = routingInfo.cm();
- const auto primary = routingInfo.primary();
+ const auto primary = routingInfo.db().primary();
unsigned long long officialSequenceNumber = 0;
diff --git a/src/mongo/s/commands/cluster_aggregate.cpp b/src/mongo/s/commands/cluster_aggregate.cpp
index cd977d6b7fa..da880fc886c 100644
--- a/src/mongo/s/commands/cluster_aggregate.cpp
+++ b/src/mongo/s/commands/cluster_aggregate.cpp
@@ -306,8 +306,8 @@ std::vector<ClusterClientCursorParams::RemoteCursor> establishShardCursors(
} else {
// The collection is unsharded. Target only the primary shard for the database.
// Don't append shard version info when contacting the config servers.
- requests.emplace_back(routingInfo->primaryId(),
- !routingInfo->primary()->isConfig()
+ requests.emplace_back(routingInfo->db().primaryId(),
+ !routingInfo->db().primary()->isConfig()
? appendShardVersion(cmdObj, ChunkVersion::UNSHARDED())
: cmdObj);
}
@@ -443,9 +443,9 @@ DispatchShardPipelineResults dispatchShardPipeline(
// - There is a stage that needs to be run on the primary shard and the single target shard
// is not the primary.
// - The pipeline contains one or more stages which must always merge on mongoS.
- const bool needsSplit =
- (shardIds.size() > 1u || needsMongosMerge ||
- (needsPrimaryShardMerge && *(shardIds.begin()) != executionNsRoutingInfo.primaryId()));
+ const bool needsSplit = (shardIds.size() > 1u || needsMongosMerge ||
+ (needsPrimaryShardMerge &&
+ *(shardIds.begin()) != executionNsRoutingInfo.db().primaryId()));
const bool isSplit = pipelineForTargetedShards->isSplitForShards();
@@ -524,7 +524,7 @@ DispatchShardPipelineResults dispatchShardPipeline(
// the primary shard, but the primary shard was not in the set of targeted shards, then we
// must increment the number of involved shards.
CurOp::get(opCtx)->debug().nShards = shardIds.size() +
- (needsPrimaryShardMerge && !shardIds.count(executionNsRoutingInfo.primaryId()));
+ (needsPrimaryShardMerge && !shardIds.count(executionNsRoutingInfo.db().primaryId()));
break; // Success!
}
@@ -757,7 +757,7 @@ Status ClusterAggregate::runAggregate(OperationContext* opCtx,
liteParsedPipeline.allowedToPassthroughFromMongos()) {
return aggPassthrough(opCtx,
namespaces,
- executionNsRoutingInfo.primary()->getId(),
+ executionNsRoutingInfo.db().primary()->getId(),
cmdObj,
request,
liteParsedPipeline,
@@ -775,7 +775,7 @@ Status ClusterAggregate::runAggregate(OperationContext* opCtx,
} else {
// Unsharded collection. Get collection metadata from primary chunk.
auto collationObj = getDefaultCollationForUnshardedCollection(
- executionNsRoutingInfo.primary().get(), namespaces.executionNss);
+ executionNsRoutingInfo.db().primary().get(), namespaces.executionNss);
if (!collationObj.isEmpty()) {
collation = uassertStatusOK(CollatorFactoryInterface::get(opCtx->getServiceContext())
->makeFromBSON(collationObj));
@@ -891,7 +891,7 @@ Status ClusterAggregate::runAggregate(OperationContext* opCtx,
"merigng on a shard",
!opCtx->getTxnNumber());
ShardId mergingShardId =
- pickMergingShard(opCtx, dispatchResults, executionNsRoutingInfo.primaryId());
+ pickMergingShard(opCtx, dispatchResults, executionNsRoutingInfo.db().primaryId());
mergingPipeline->addInitialSource(DocumentSourceMergeCursors::create(
std::move(dispatchResults.remoteCursors),
diff --git a/src/mongo/s/commands/cluster_coll_stats_cmd.cpp b/src/mongo/s/commands/cluster_coll_stats_cmd.cpp
index 88d61370881..8c8bc11be8a 100644
--- a/src/mongo/s/commands/cluster_coll_stats_cmd.cpp
+++ b/src/mongo/s/commands/cluster_coll_stats_cmd.cpp
@@ -79,7 +79,7 @@ public:
result.appendBool("sharded", true);
} else {
result.appendBool("sharded", false);
- result.append("primary", routingInfo.primaryId().toString());
+ result.append("primary", routingInfo.db().primaryId().toString());
}
auto shardResults = scatterGatherVersionedTargetByRoutingTable(
diff --git a/src/mongo/s/commands/cluster_commands_helpers.cpp b/src/mongo/s/commands/cluster_commands_helpers.cpp
index 4037e67bb7e..9002ccb7fff 100644
--- a/src/mongo/s/commands/cluster_commands_helpers.cpp
+++ b/src/mongo/s/commands/cluster_commands_helpers.cpp
@@ -578,7 +578,7 @@ std::set<ShardId> getTargetedShardsForQuery(OperationContext* opCtx,
}
// The collection is unsharded. Target only the primary shard for the database.
- return {routingInfo.primaryId()};
+ return {routingInfo.db().primaryId()};
}
boost::optional<LogicalTime> computeAtClusterTime(OperationContext* opCtx,
diff --git a/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp b/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp
index 3c964dcd97e..321cd8ba4e2 100644
--- a/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp
+++ b/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp
@@ -111,7 +111,7 @@ public:
std::shared_ptr<Shard> shard;
if (!routingInfo.cm()) {
- shard = routingInfo.primary();
+ shard = routingInfo.db().primary();
} else {
chunkMgr = routingInfo.cm();
@@ -162,8 +162,12 @@ public:
const auto routingInfo =
uassertStatusOK(Grid::get(opCtx)->catalogCache()->getCollectionRoutingInfo(opCtx, nss));
if (!routingInfo.cm()) {
- _runCommand(
- opCtx, routingInfo.primaryId(), ChunkVersion::UNSHARDED(), nss, cmdObj, &result);
+ _runCommand(opCtx,
+ routingInfo.db().primaryId(),
+ ChunkVersion::UNSHARDED(),
+ nss,
+ cmdObj,
+ &result);
return true;
}
diff --git a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
index 0650a073fa0..55df16f58e1 100644
--- a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
+++ b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
@@ -281,9 +281,9 @@ public:
if (!shardedInput && !shardedOutput && !customOutDB) {
LOG(1) << "simple MR, just passthrough";
- invariant(inputRoutingInfo.primary());
+ invariant(inputRoutingInfo.db().primary());
- ShardConnection conn(inputRoutingInfo.primary()->getConnString(), "");
+ ShardConnection conn(inputRoutingInfo.db().primary()->getConnString(), "");
BSONObj res;
bool ok = conn->runCommand(
@@ -295,7 +295,7 @@ public:
if (auto wcErrorElem = res["writeConcernError"]) {
appendWriteConcernErrorToCmdResponse(
- inputRoutingInfo.primary()->getId(), wcErrorElem, result);
+ inputRoutingInfo.db().primary()->getId(), wcErrorElem, result);
}
result.appendElementsUnique(CommandHelpers::filterCommandReplyForPassthrough(res));
diff --git a/src/mongo/s/commands/commands_public.cpp b/src/mongo/s/commands/commands_public.cpp
index 175cf3b40b9..c12981e5e19 100644
--- a/src/mongo/s/commands/commands_public.cpp
+++ b/src/mongo/s/commands/commands_public.cpp
@@ -131,9 +131,7 @@ protected:
str::stream() << "can't do command: " << getName() << " on sharded collection",
!routingInfo.cm());
- const auto primaryShardId = routingInfo.primaryId();
- const auto primaryShard =
- uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, primaryShardId));
+ const auto primaryShard = routingInfo.db().primary();
// Here, we first filter the command before appending an UNSHARDED shardVersion, because
// "shardVersion" is one of the fields that gets filtered out.
@@ -156,7 +154,7 @@ protected:
if (!commandResponse.writeConcernStatus.isOK()) {
appendWriteConcernErrorToCmdResponse(
- primaryShardId, commandResponse.response["writeConcernError"], result);
+ primaryShard->getId(), commandResponse.response["writeConcernError"], result);
}
result.appendElementsUnique(
CommandHelpers::filterCommandReplyForPassthrough(std::move(commandResponse.response)));
@@ -217,7 +215,7 @@ public:
uassert(13137,
"Source and destination collections must be on same shard",
- fromRoutingInfo.primaryId() == toRoutingInfo.primaryId());
+ fromRoutingInfo.db().primaryId() == toRoutingInfo.db().primaryId());
return nonShardedCollectionCommandPassthrough(
opCtx,
@@ -439,9 +437,9 @@ public:
}
Strategy::CommandResult cmdResult;
- cmdResult.shardTargetId = routingInfo.primaryId();
+ cmdResult.shardTargetId = routingInfo.db().primaryId();
cmdResult.result = result.done();
- cmdResult.target = routingInfo.primary()->getConnString();
+ cmdResult.target = routingInfo.db().primary()->getConnString();
return ClusterExplain::buildExplainResult(
opCtx, {cmdResult}, ClusterExplain::kSingleShard, timer.millis(), out);
diff --git a/src/mongo/s/commands/pipeline_s.cpp b/src/mongo/s/commands/pipeline_s.cpp
index 7d631715cba..f2f75c157fe 100644
--- a/src/mongo/s/commands/pipeline_s.cpp
+++ b/src/mongo/s/commands/pipeline_s.cpp
@@ -66,7 +66,7 @@ std::pair<ShardId, ChunkVersion> getSingleTargetedShardForQuery(
return {*shardIds.begin(), chunkMgr->getVersion(*shardIds.begin())};
}
- return {routingInfo.primaryId(), ChunkVersion::UNSHARDED()};
+ return {routingInfo.db().primaryId(), ChunkVersion::UNSHARDED()};
}
/**
diff --git a/src/mongo/s/write_ops/chunk_manager_targeter.cpp b/src/mongo/s/write_ops/chunk_manager_targeter.cpp
index 925c50d95df..b7400c0b581 100644
--- a/src/mongo/s/write_ops/chunk_manager_targeter.cpp
+++ b/src/mongo/s/write_ops/chunk_manager_targeter.cpp
@@ -303,13 +303,13 @@ StatusWith<ShardEndpoint> ChunkManagerTargeter::targetInsert(OperationContext* o
if (!shardKey.isEmpty()) {
return _targetShardKey(shardKey, CollationSpec::kSimpleSpec, doc.objsize());
} else {
- if (!_routingInfo->primary()) {
+ if (!_routingInfo->db().primary()) {
return Status(ErrorCodes::NamespaceNotFound,
str::stream() << "could not target insert in collection " << getNS().ns()
<< "; no metadata found");
}
- return ShardEndpoint(_routingInfo->primary()->getId(), ChunkVersion::UNSHARDED());
+ return ShardEndpoint(_routingInfo->db().primary()->getId(), ChunkVersion::UNSHARDED());
}
return Status::OK();
@@ -527,7 +527,7 @@ StatusWith<std::vector<ShardEndpoint>> ChunkManagerTargeter::_targetDoc(
StatusWith<std::vector<ShardEndpoint>> ChunkManagerTargeter::_targetQuery(
OperationContext* opCtx, const BSONObj& query, const BSONObj& collation) const {
- if (!_routingInfo->primary() && !_routingInfo->cm()) {
+ if (!_routingInfo->db().primary() && !_routingInfo->cm()) {
return {ErrorCodes::NamespaceNotFound,
str::stream() << "could not target query in " << getNS().ns()
<< "; no metadata found"};
@@ -541,7 +541,7 @@ StatusWith<std::vector<ShardEndpoint>> ChunkManagerTargeter::_targetQuery(
return ex.toStatus();
}
} else {
- shardIds.insert(_routingInfo->primary()->getId());
+ shardIds.insert(_routingInfo->db().primary()->getId());
}
std::vector<ShardEndpoint> endpoints;
@@ -569,7 +569,7 @@ ShardEndpoint ChunkManagerTargeter::_targetShardKey(const BSONObj& shardKey,
}
StatusWith<std::vector<ShardEndpoint>> ChunkManagerTargeter::targetCollection() const {
- if (!_routingInfo->primary() && !_routingInfo->cm()) {
+ if (!_routingInfo->db().primary() && !_routingInfo->cm()) {
return {ErrorCodes::NamespaceNotFound,
str::stream() << "could not target full range of " << getNS().ns()
<< "; metadata not found"};
@@ -579,7 +579,7 @@ StatusWith<std::vector<ShardEndpoint>> ChunkManagerTargeter::targetCollection()
if (_routingInfo->cm()) {
_routingInfo->cm()->getAllShardIds(&shardIds);
} else {
- shardIds.insert(_routingInfo->primary()->getId());
+ shardIds.insert(_routingInfo->db().primary()->getId());
}
std::vector<ShardEndpoint> endpoints;
@@ -594,7 +594,7 @@ StatusWith<std::vector<ShardEndpoint>> ChunkManagerTargeter::targetCollection()
StatusWith<std::vector<ShardEndpoint>> ChunkManagerTargeter::targetAllShards(
OperationContext* opCtx) const {
- if (!_routingInfo->primary() && !_routingInfo->cm()) {
+ if (!_routingInfo->db().primary() && !_routingInfo->cm()) {
return {ErrorCodes::NamespaceNotFound,
str::stream() << "could not target every shard with versions for " << getNS().ns()
<< "; metadata not found"};
@@ -671,7 +671,7 @@ Status ChunkManagerTargeter::refreshIfNeeded(OperationContext* opCtx, bool* wasC
//
auto lastManager = _routingInfo->cm();
- auto lastPrimary = _routingInfo->primary();
+ auto lastPrimary = _routingInfo->db().primary();
auto initStatus = init(opCtx);
if (!initStatus.isOK()) {
@@ -694,7 +694,7 @@ Status ChunkManagerTargeter::refreshIfNeeded(OperationContext* opCtx, bool* wasC
// metadata since we last got it from the cache.
bool alreadyRefreshed = wasMetadataRefreshed(
- lastManager, lastPrimary, _routingInfo->cm(), _routingInfo->primary());
+ lastManager, lastPrimary, _routingInfo->cm(), _routingInfo->db().primary());
// If didn't already refresh the targeting information, refresh it
if (!alreadyRefreshed) {
@@ -703,7 +703,7 @@ Status ChunkManagerTargeter::refreshIfNeeded(OperationContext* opCtx, bool* wasC
}
*wasChanged = isMetadataDifferent(
- lastManager, lastPrimary, _routingInfo->cm(), _routingInfo->primary());
+ lastManager, lastPrimary, _routingInfo->cm(), _routingInfo->db().primary());
return Status::OK();
} else if (!_remoteShardVersions.empty()) {
// If we got stale shard versions from remote shards, we may need to refresh
@@ -720,7 +720,7 @@ Status ChunkManagerTargeter::refreshIfNeeded(OperationContext* opCtx, bool* wasC
}
*wasChanged = isMetadataDifferent(
- lastManager, lastPrimary, _routingInfo->cm(), _routingInfo->primary());
+ lastManager, lastPrimary, _routingInfo->cm(), _routingInfo->db().primary());
return Status::OK();
}