summaryrefslogtreecommitdiff
path: root/src/mongo/db/pipeline/pipeline_d.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/db/pipeline/pipeline_d.cpp')
-rw-r--r--src/mongo/db/pipeline/pipeline_d.cpp10
1 files changed, 5 insertions, 5 deletions
diff --git a/src/mongo/db/pipeline/pipeline_d.cpp b/src/mongo/db/pipeline/pipeline_d.cpp
index f8a3132b061..1358b6a9857 100644
--- a/src/mongo/db/pipeline/pipeline_d.cpp
+++ b/src/mongo/db/pipeline/pipeline_d.cpp
@@ -129,14 +129,14 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> createRandomCursorEx
// If the incoming operation is sharded, use the CSS to infer the filtering metadata for the
// collection, otherwise treat it as unsharded
- auto shardMetadata =
- CollectionShardingState::get(opCtx, coll->ns())->getOrphansFilter(opCtx, coll);
+ auto collectionFilter =
+ CollectionShardingState::get(opCtx, coll->ns())->getOwnershipFilter(opCtx, coll);
// Because 'numRecords' includes orphan documents, our initial decision to optimize the $sample
// cursor may have been mistaken. For sharded collections, build a TRIAL plan that will switch
// to a collection scan if the ratio of orphaned to owned documents encountered over the first
// 100 works() is such that we would have chosen not to optimize.
- if (shardMetadata->isSharded()) {
+ if (collectionFilter.isSharded()) {
// The ratio of owned to orphaned documents must be at least equal to the ratio between the
// requested sampleSize and the maximum permitted sampleSize for the original constraints to
// be satisfied. For instance, if there are 200 documents and the sampleSize is 5, then at
@@ -147,12 +147,12 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> createRandomCursorEx
sampleSize / (numRecords * kMaxSampleRatioForRandCursor), kMaxSampleRatioForRandCursor);
// The trial plan is SHARDING_FILTER-MULTI_ITERATOR.
auto randomCursorPlan =
- std::make_unique<ShardFilterStage>(opCtx, shardMetadata, ws.get(), std::move(root));
+ std::make_unique<ShardFilterStage>(opCtx, collectionFilter, ws.get(), std::move(root));
// The backup plan is SHARDING_FILTER-COLLSCAN.
std::unique_ptr<PlanStage> collScanPlan = std::make_unique<CollectionScan>(
opCtx, coll, CollectionScanParams{}, ws.get(), nullptr);
collScanPlan = std::make_unique<ShardFilterStage>(
- opCtx, shardMetadata, ws.get(), std::move(collScanPlan));
+ opCtx, collectionFilter, ws.get(), std::move(collScanPlan));
// Place a TRIAL stage at the root of the plan tree, and pass it the trial and backup plans.
root = std::make_unique<TrialStage>(opCtx,
ws.get(),