summaryrefslogtreecommitdiff
path: root/src/mongo/db/query
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/db/query')
-rw-r--r--src/mongo/db/query/idhack_runner.cpp2
-rw-r--r--src/mongo/db/query/multi_plan_runner.cpp6
-rw-r--r--src/mongo/db/query/stage_builder.cpp7
3 files changed, 8 insertions, 7 deletions
diff --git a/src/mongo/db/query/idhack_runner.cpp b/src/mongo/db/query/idhack_runner.cpp
index c57aee98ec2..fcd7b0fce8b 100644
--- a/src/mongo/db/query/idhack_runner.cpp
+++ b/src/mongo/db/query/idhack_runner.cpp
@@ -156,7 +156,7 @@ namespace mongo {
}
// Either the data was in memory or we paged it in.
- *objOut = loc.obj();
+ *objOut = _collection->docFor(loc);
// If we're sharded make sure the key belongs to us. We need the object to do this.
if (shardingState.needCollectionMetadata(_collection->ns().ns())) {
diff --git a/src/mongo/db/query/multi_plan_runner.cpp b/src/mongo/db/query/multi_plan_runner.cpp
index 4cf3e1af23e..a262dfbb62b 100644
--- a/src/mongo/db/query/multi_plan_runner.cpp
+++ b/src/mongo/db/query/multi_plan_runner.cpp
@@ -154,7 +154,7 @@ namespace mongo {
if (member->hasLoc() && member->loc == dl) {
list<WorkingSetID>::iterator next = it;
next++;
- WorkingSetCommon::fetchAndInvalidateLoc(member);
+ WorkingSetCommon::fetchAndInvalidateLoc(member, _collection);
_bestPlan->getWorkingSet()->flagForReview(*it);
_alreadyProduced.erase(it);
it = next;
@@ -171,7 +171,7 @@ namespace mongo {
if (member->hasLoc() && member->loc == dl) {
list<WorkingSetID>::iterator next = it;
next++;
- WorkingSetCommon::fetchAndInvalidateLoc(member);
+ WorkingSetCommon::fetchAndInvalidateLoc(member, _collection);
_backupPlan->getWorkingSet()->flagForReview(*it);
_backupAlreadyProduced.erase(it);
it = next;
@@ -191,7 +191,7 @@ namespace mongo {
if (member->hasLoc() && member->loc == dl) {
list<WorkingSetID>::iterator next = it;
next++;
- WorkingSetCommon::fetchAndInvalidateLoc(member);
+ WorkingSetCommon::fetchAndInvalidateLoc(member, _collection);
_candidates[i].ws->flagForReview(*it);
_candidates[i].results.erase(it);
it = next;
diff --git a/src/mongo/db/query/stage_builder.cpp b/src/mongo/db/query/stage_builder.cpp
index 125c1ef48c1..d017598191d 100644
--- a/src/mongo/db/query/stage_builder.cpp
+++ b/src/mongo/db/query/stage_builder.cpp
@@ -103,6 +103,7 @@ namespace mongo {
PlanStage* childStage = buildStages(collection, qsol, sn->children[0], ws);
if (NULL == childStage) { return NULL; }
SortStageParams params;
+ params.collection = collection;
params.pattern = sn->pattern;
params.query = sn->query;
params.limit = sn->limit;
@@ -146,7 +147,7 @@ namespace mongo {
}
else if (STAGE_AND_HASH == root->getType()) {
const AndHashNode* ahn = static_cast<const AndHashNode*>(root);
- auto_ptr<AndHashStage> ret(new AndHashStage(ws, ahn->filter.get()));
+ auto_ptr<AndHashStage> ret(new AndHashStage(ws, ahn->filter.get(), collection));
for (size_t i = 0; i < ahn->children.size(); ++i) {
PlanStage* childStage = buildStages(collection, qsol, ahn->children[i], ws);
if (NULL == childStage) { return NULL; }
@@ -166,7 +167,7 @@ namespace mongo {
}
else if (STAGE_AND_SORTED == root->getType()) {
const AndSortedNode* asn = static_cast<const AndSortedNode*>(root);
- auto_ptr<AndSortedStage> ret(new AndSortedStage(ws, asn->filter.get()));
+ auto_ptr<AndSortedStage> ret(new AndSortedStage(ws, asn->filter.get(), collection));
for (size_t i = 0; i < asn->children.size(); ++i) {
PlanStage* childStage = buildStages(collection, qsol, asn->children[i], ws);
if (NULL == childStage) { return NULL; }
@@ -179,7 +180,7 @@ namespace mongo {
MergeSortStageParams params;
params.dedup = msn->dedup;
params.pattern = msn->sort;
- auto_ptr<MergeSortStage> ret(new MergeSortStage(params, ws));
+ auto_ptr<MergeSortStage> ret(new MergeSortStage(params, ws, collection));
for (size_t i = 0; i < msn->children.size(); ++i) {
PlanStage* childStage = buildStages(collection, qsol, msn->children[i], ws);
if (NULL == childStage) { return NULL; }