summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/mongo/db/catalog/collection.cpp2
-rw-r--r--src/mongo/db/catalog/collection.h2
-rw-r--r--src/mongo/db/exec/2d.cpp6
-rw-r--r--src/mongo/db/exec/2dcommon.cpp29
-rw-r--r--src/mongo/db/exec/2dcommon.h11
-rw-r--r--src/mongo/db/exec/2dnear.cpp11
-rw-r--r--src/mongo/db/exec/2dnear.h3
-rw-r--r--src/mongo/db/exec/and_hash.cpp19
-rw-r--r--src/mongo/db/exec/and_hash.h10
-rw-r--r--src/mongo/db/exec/and_sorted.cpp11
-rw-r--r--src/mongo/db/exec/and_sorted.h5
-rw-r--r--src/mongo/db/exec/collection_scan.cpp2
-rw-r--r--src/mongo/db/exec/fetch.cpp2
-rw-r--r--src/mongo/db/exec/merge_sort.cpp11
-rw-r--r--src/mongo/db/exec/merge_sort.h7
-rw-r--r--src/mongo/db/exec/oplogstart.cpp6
-rw-r--r--src/mongo/db/exec/oplogstart.h4
-rw-r--r--src/mongo/db/exec/s2near.cpp2
-rw-r--r--src/mongo/db/exec/sort.cpp4
-rw-r--r--src/mongo/db/exec/sort.h13
-rw-r--r--src/mongo/db/exec/stagedebug_cmd.cpp9
-rw-r--r--src/mongo/db/exec/text.cpp19
-rw-r--r--src/mongo/db/exec/working_set_common.cpp6
-rw-r--r--src/mongo/db/exec/working_set_common.h2
-rw-r--r--src/mongo/db/index/2d_access_method.h1
-rw-r--r--src/mongo/db/index/btree_based_access_method.cpp5
-rw-r--r--src/mongo/db/index/btree_based_access_method.h7
-rw-r--r--src/mongo/db/index/haystack_access_method.cpp2
-rw-r--r--src/mongo/db/index/haystack_access_method_internal.h19
-rw-r--r--src/mongo/db/index/index_descriptor.h5
-rw-r--r--src/mongo/db/query/idhack_runner.cpp2
-rw-r--r--src/mongo/db/query/multi_plan_runner.cpp6
-rw-r--r--src/mongo/db/query/stage_builder.cpp7
-rw-r--r--src/mongo/dbtests/query_stage_and.cpp32
-rw-r--r--src/mongo/dbtests/query_stage_merge_sort.cpp14
-rw-r--r--src/mongo/dbtests/query_stage_sort.cpp3
36 files changed, 191 insertions, 108 deletions
diff --git a/src/mongo/db/catalog/collection.cpp b/src/mongo/db/catalog/collection.cpp
index c11d6172b0b..062f88d9faa 100644
--- a/src/mongo/db/catalog/collection.cpp
+++ b/src/mongo/db/catalog/collection.cpp
@@ -155,7 +155,7 @@ namespace mongo {
return count;
}
- BSONObj Collection::docFor( const DiskLoc& loc ) {
+ BSONObj Collection::docFor(const DiskLoc& loc) const {
Record* rec = _recordStore->recordFor( loc );
return BSONObj( rec->accessed()->data() );
}
diff --git a/src/mongo/db/catalog/collection.h b/src/mongo/db/catalog/collection.h
index c05f7cbe112..d12af68011b 100644
--- a/src/mongo/db/catalog/collection.h
+++ b/src/mongo/db/catalog/collection.h
@@ -132,7 +132,7 @@ namespace mongo {
bool requiresIdIndex() const;
- BSONObj docFor( const DiskLoc& loc );
+ BSONObj docFor(const DiskLoc& loc) const;
// ---- things that should move to a CollectionAccessMethod like thing
/**
diff --git a/src/mongo/db/exec/2d.cpp b/src/mongo/db/exec/2d.cpp
index 0d9a4b301c4..5142beb7195 100644
--- a/src/mongo/db/exec/2d.cpp
+++ b/src/mongo/db/exec/2d.cpp
@@ -98,7 +98,7 @@ namespace mongo {
WorkingSetID id = _workingSet->allocate();
WorkingSetMember* member = _workingSet->get(id);
member->loc = _browse->currLoc();
- member->obj = member->loc.obj();
+ member->obj = _params.collection->docFor(member->loc);
member->state = WorkingSetMember::LOC_AND_UNOWNED_OBJ;
_browse->advance();
@@ -129,11 +129,11 @@ namespace mongo {
WorkingSetID id = _workingSet->allocate();
WorkingSetMember* member = _workingSet->get(id);
member->loc = dl;
- member->obj = member->loc.obj();
+ member->obj = _params.collection->docFor(member->loc);
member->state = WorkingSetMember::LOC_AND_UNOWNED_OBJ;
// And flag it for later.
- WorkingSetCommon::fetchAndInvalidateLoc(member);
+ WorkingSetCommon::fetchAndInvalidateLoc(member, _params.collection);
_workingSet->flagForReview(id);
}
}
diff --git a/src/mongo/db/exec/2dcommon.cpp b/src/mongo/db/exec/2dcommon.cpp
index 56ba456ddbc..a469c591c40 100644
--- a/src/mongo/db/exec/2dcommon.cpp
+++ b/src/mongo/db/exec/2dcommon.cpp
@@ -40,15 +40,20 @@ namespace twod_exec {
class GeoMatchableDocument : public MatchableDocument {
public:
- GeoMatchableDocument(const BSONObj& keyPattern, const BSONObj& key, DiskLoc loc, bool *fetched)
- : _keyPattern(keyPattern),
+ GeoMatchableDocument(const BSONObj& keyPattern,
+ const BSONObj& key,
+ DiskLoc loc,
+ const Collection* collection,
+ bool* fetched)
+ : _collection(collection),
+ _keyPattern(keyPattern),
_key(key),
_loc(loc),
_fetched(fetched) { }
BSONObj toBSON() const {
*_fetched = true;
- return _loc.obj();
+ return _collection->docFor(_loc);
}
virtual ElementIterator* allocateIterator(const ElementPath* path) const {
@@ -78,7 +83,7 @@ namespace twod_exec {
// All else fails, fetch.
*_fetched = true;
- return new BSONElementIterator(path, _loc.obj());
+ return new BSONElementIterator(path, _collection->docFor(_loc));
}
virtual void releaseIterator( ElementIterator* iterator ) const {
@@ -86,6 +91,8 @@ namespace twod_exec {
}
private:
+ const Collection* _collection;
+
BSONObj _keyPattern;
BSONObj _key;
DiskLoc _loc;
@@ -128,6 +135,7 @@ namespace twod_exec {
GeoMatchableDocument md(_accessMethod->getDescriptor()->keyPattern(),
node._key,
node.recordLoc,
+ _accessMethod->collection(),
&fetched);
bool good = _filter->matches(&md);
@@ -304,7 +312,8 @@ namespace twod_exec {
_centerPrefix(0, 0, 0),
_descriptor(accessMethod->getDescriptor()),
_converter(accessMethod->getParams().geoHashConverter),
- _params(accessMethod->getParams()) {
+ _params(accessMethod->getParams()),
+ _collection(accessMethod->collection()) {
// Set up the initial expand state
_state = START;
@@ -575,14 +584,16 @@ namespace twod_exec {
// Final check for new doc
// OK to touch, since we're probably returning this object now
- if(remembered(node.recordLoc.obj())) {
+ const BSONObj obj = _collection->docFor(node.recordLoc);
+
+ if (remembered(obj)) {
//cout << "remembered\n";
return 0;
}
if(! onBounds) {
//log() << "Added ind to " << _type << endl;
- _stack.push_front(GeoPoint(node));
+ _stack.push_front(GeoPoint(node, obj));
found++;
} else {
// We now handle every possible point in the document, even those not in the key
@@ -591,7 +602,7 @@ namespace twod_exec {
// If we're filtering by hash, get the original
vector< BSONObj > locs;
- getPointsFor(node._key, node.recordLoc.obj(), locs, true);
+ getPointsFor(node._key, obj, locs, true);
for(vector< BSONObj >::iterator i = locs.begin(); i != locs.end(); ++i){
double d = -1;
Point p(*i);
@@ -601,7 +612,7 @@ namespace twod_exec {
if(! needExact || exactDocCheck(p, d)){
//log() << "Added mult to " << _type << endl;
- _stack.push_front(GeoPoint(node));
+ _stack.push_front(GeoPoint(node, obj));
found++;
// IExit after first point is added
break;
diff --git a/src/mongo/db/exec/2dcommon.h b/src/mongo/db/exec/2dcommon.h
index 7abc2826fc5..a3a2c2a9ac2 100644
--- a/src/mongo/db/exec/2dcommon.h
+++ b/src/mongo/db/exec/2dcommon.h
@@ -63,14 +63,14 @@ namespace twod_exec {
//// Distance not used ////
- GeoPoint(const GeoIndexEntry& node)
- : _key(node._key), _loc(node.recordLoc), _o(node.recordLoc.obj()),
+ GeoPoint(const GeoIndexEntry& node, const BSONObj& obj)
+ : _key(node._key), _loc(node.recordLoc), _o(obj),
_distance(-1), _exact(false) { }
//// Immediate initialization of distance ////
- GeoPoint(const GeoIndexEntry& node, double distance, bool exact)
- : _key(node._key), _loc(node.recordLoc), _o(node.recordLoc.obj()),
+ GeoPoint(const GeoIndexEntry& node, const BSONObj& obj, double distance, bool exact)
+ : _key(node._key), _loc(node.recordLoc), _o(obj),
_distance(distance), _exact(exact) { }
GeoPoint(const GeoPoint& pt, double distance, bool exact)
@@ -266,6 +266,9 @@ namespace twod_exec {
const IndexDescriptor* _descriptor;
shared_ptr<GeoHashConverter> _converter;
TwoDIndexingParams _params;
+
+ private:
+ const Collection* _collection;
};
} // namespace twod_exec
diff --git a/src/mongo/db/exec/2dnear.cpp b/src/mongo/db/exec/2dnear.cpp
index 3f62aeefac9..7c4c4dd8598 100644
--- a/src/mongo/db/exec/2dnear.cpp
+++ b/src/mongo/db/exec/2dnear.cpp
@@ -28,12 +28,12 @@
#include "mongo/db/exec/2dnear.h"
+#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/database.h"
#include "mongo/db/client.h"
#include "mongo/db/exec/working_set_common.h"
#include "mongo/db/exec/working_set_computed_data.h"
#include "mongo/db/jsobj.h"
-#include "mongo/db/catalog/collection.h"
namespace mongo {
@@ -84,7 +84,7 @@ namespace mongo {
WorkingSetID id = _workingSet->allocate();
WorkingSetMember* member = _workingSet->get(id);
member->loc = it->_loc;
- member->obj = member->loc.obj();
+ member->obj = _params.collection->docFor(member->loc);
member->state = WorkingSetMember::LOC_AND_UNOWNED_OBJ;
if (_params.addDistMeta) {
member->addComputed(new GeoDistanceComputedData(it->_distance));
@@ -140,7 +140,7 @@ namespace mongo {
WorkingSetMember* member = _workingSet->get(it->second);
// If it's in the invalidation map it must have a DiskLoc.
verify(member->hasLoc());
- WorkingSetCommon::fetchAndInvalidateLoc(member);
+ WorkingSetCommon::fetchAndInvalidateLoc(member, _params.collection);
verify(!member->hasLoc());
}
_invalidationMap.erase(range.first, range.second);
@@ -176,7 +176,8 @@ namespace twod_exec {
_distError(type == GEO_PLANE
? accessMethod->getParams().geoHashConverter->getError()
: accessMethod->getParams().geoHashConverter->getErrorSphere()),
- _farthest(0) { }
+ _farthest(0),
+ _collection(accessMethod->collection()) {}
GeoAccumulator:: KeyResult GeoHopper::approxKeyCheck(const Point& p, double& d) {
// Always check approximate distance, since it lets us avoid doing
@@ -224,7 +225,7 @@ namespace twod_exec {
int GeoHopper::addSpecific(const GeoIndexEntry& node, const Point& keyP, bool onBounds,
double keyD, bool potentiallyNewDoc) {
// Unique documents
- GeoPoint newPoint(node, keyD, false);
+ GeoPoint newPoint(node, _collection->docFor(node.recordLoc), keyD, false);
int prevSize = _points.size();
// STEP 1 : Remove old duplicate points from the set if needed
diff --git a/src/mongo/db/exec/2dnear.h b/src/mongo/db/exec/2dnear.h
index 9768ebb90c4..5f23f541d93 100644
--- a/src/mongo/db/exec/2dnear.h
+++ b/src/mongo/db/exec/2dnear.h
@@ -139,6 +139,9 @@ namespace twod_exec {
// Safe to use currently since we don't yield in $near searches. If we do start to yield,
// we may need to replace dirtied disklocs in our holder / ensure our logic is correct.
map<DiskLoc, Holder::iterator> _seenPts;
+
+ private:
+ const Collection* _collection;
};
class GeoSearch : public GeoHopper {
diff --git a/src/mongo/db/exec/and_hash.cpp b/src/mongo/db/exec/and_hash.cpp
index 28a6aadd779..418bc000928 100644
--- a/src/mongo/db/exec/and_hash.cpp
+++ b/src/mongo/db/exec/and_hash.cpp
@@ -48,16 +48,23 @@ namespace mongo {
const size_t AndHashStage::kLookAheadWorks = 10;
- AndHashStage::AndHashStage(WorkingSet* ws, const MatchExpression* filter)
- : _ws(ws),
+ AndHashStage::AndHashStage(WorkingSet* ws,
+ const MatchExpression* filter,
+ const Collection* collection)
+ : _collection(collection),
+ _ws(ws),
_filter(filter),
_hashingChildren(true),
_currentChild(0),
_memUsage(0),
_maxMemUsage(kDefaultMaxMemUsageBytes) {}
- AndHashStage::AndHashStage(WorkingSet* ws, const MatchExpression* filter, size_t maxMemUsage)
- : _ws(ws),
+ AndHashStage::AndHashStage(WorkingSet* ws,
+ const MatchExpression* filter,
+ const Collection* collection,
+ size_t maxMemUsage)
+ : _collection(collection),
+ _ws(ws),
_filter(filter),
_hashingChildren(true),
_currentChild(0),
@@ -457,7 +464,7 @@ namespace mongo {
if (WorkingSet::INVALID_ID != _lookAheadResults[i]) {
WorkingSetMember* member = _ws->get(_lookAheadResults[i]);
if (member->hasLoc() && member->loc == dl) {
- WorkingSetCommon::fetchAndInvalidateLoc(member);
+ WorkingSetCommon::fetchAndInvalidateLoc(member, _collection);
_ws->flagForReview(_lookAheadResults[i]);
_lookAheadResults[i] = WorkingSet::INVALID_ID;
}
@@ -487,7 +494,7 @@ namespace mongo {
_memUsage -= member->getMemUsage();
// The loc is about to be invalidated. Fetch it and clear the loc.
- WorkingSetCommon::fetchAndInvalidateLoc(member);
+ WorkingSetCommon::fetchAndInvalidateLoc(member, _collection);
// Add the WSID to the to-be-reviewed list in the WS.
_ws->flagForReview(id);
diff --git a/src/mongo/db/exec/and_hash.h b/src/mongo/db/exec/and_hash.h
index a546057ffe2..957bf21785c 100644
--- a/src/mongo/db/exec/and_hash.h
+++ b/src/mongo/db/exec/and_hash.h
@@ -52,12 +52,15 @@ namespace mongo {
*/
class AndHashStage : public PlanStage {
public:
- AndHashStage(WorkingSet* ws, const MatchExpression* filter);
+ AndHashStage(WorkingSet* ws, const MatchExpression* filter, const Collection* collection);
/**
* For testing only. Allows tests to set memory usage threshold.
*/
- AndHashStage(WorkingSet* ws, const MatchExpression* filter, size_t maxMemUsage);
+ AndHashStage(WorkingSet* ws,
+ const MatchExpression* filter,
+ const Collection* collection,
+ size_t maxMemUsage);
virtual ~AndHashStage();
@@ -86,6 +89,9 @@ namespace mongo {
StageState workChild(size_t childNo, WorkingSetID* out);
// Not owned by us.
+ const Collection* _collection;
+
+ // Not owned by us.
WorkingSet* _ws;
// Not owned by us.
diff --git a/src/mongo/db/exec/and_sorted.cpp b/src/mongo/db/exec/and_sorted.cpp
index 51741e4f974..cc63e03a023 100644
--- a/src/mongo/db/exec/and_sorted.cpp
+++ b/src/mongo/db/exec/and_sorted.cpp
@@ -35,8 +35,13 @@
namespace mongo {
- AndSortedStage::AndSortedStage(WorkingSet* ws, const MatchExpression* filter)
- : _ws(ws), _filter(filter), _targetNode(numeric_limits<size_t>::max()),
+ AndSortedStage::AndSortedStage(WorkingSet* ws,
+ const MatchExpression* filter,
+ const Collection* collection)
+ : _collection(collection),
+ _ws(ws),
+ _filter(filter),
+ _targetNode(numeric_limits<size_t>::max()),
_targetId(WorkingSet::INVALID_ID), _isEOF(false) { }
AndSortedStage::~AndSortedStage() {
@@ -286,7 +291,7 @@ namespace mongo {
++_specificStats.flagged;
// The DiskLoc could still be a valid result so flag it and save it for later.
- WorkingSetCommon::fetchAndInvalidateLoc(_ws->get(_targetId));
+ WorkingSetCommon::fetchAndInvalidateLoc(_ws->get(_targetId), _collection);
_ws->flagForReview(_targetId);
_targetId = WorkingSet::INVALID_ID;
diff --git a/src/mongo/db/exec/and_sorted.h b/src/mongo/db/exec/and_sorted.h
index cf755228433..e8a88e78607 100644
--- a/src/mongo/db/exec/and_sorted.h
+++ b/src/mongo/db/exec/and_sorted.h
@@ -53,7 +53,7 @@ namespace mongo {
*/
class AndSortedStage : public PlanStage {
public:
- AndSortedStage(WorkingSet* ws, const MatchExpression* filter);
+ AndSortedStage(WorkingSet* ws, const MatchExpression* filter, const Collection* collection);
virtual ~AndSortedStage();
void addChild(PlanStage* child);
@@ -76,6 +76,9 @@ namespace mongo {
PlanStage::StageState moveTowardTargetLoc(WorkingSetID* out);
// Not owned by us.
+ const Collection* _collection;
+
+ // Not owned by us.
WorkingSet* _ws;
// Not owned by us.
diff --git a/src/mongo/db/exec/collection_scan.cpp b/src/mongo/db/exec/collection_scan.cpp
index b74682c47c4..e53810c7538 100644
--- a/src/mongo/db/exec/collection_scan.cpp
+++ b/src/mongo/db/exec/collection_scan.cpp
@@ -128,7 +128,7 @@ namespace mongo {
WorkingSetID id = _workingSet->allocate();
WorkingSetMember* member = _workingSet->get(id);
member->loc = nextLoc;
- member->obj = member->loc.obj();
+ member->obj = _params.collection->docFor(member->loc);
member->state = WorkingSetMember::LOC_AND_UNOWNED_OBJ;
++_specificStats.docsTested;
diff --git a/src/mongo/db/exec/fetch.cpp b/src/mongo/db/exec/fetch.cpp
index 923c77b4d72..f7ee88cebbd 100644
--- a/src/mongo/db/exec/fetch.cpp
+++ b/src/mongo/db/exec/fetch.cpp
@@ -171,7 +171,7 @@ namespace mongo {
WorkingSetMember* member = _ws->get(_idBeingPagedIn);
if (member->hasLoc() && (member->loc == dl)) {
// Just fetch it now and kill the DiskLoc.
- WorkingSetCommon::fetchAndInvalidateLoc(member);
+ WorkingSetCommon::fetchAndInvalidateLoc(member, _collection);
}
}
}
diff --git a/src/mongo/db/exec/merge_sort.cpp b/src/mongo/db/exec/merge_sort.cpp
index 8e8c760c336..df72dd5016e 100644
--- a/src/mongo/db/exec/merge_sort.cpp
+++ b/src/mongo/db/exec/merge_sort.cpp
@@ -34,8 +34,13 @@
namespace mongo {
- MergeSortStage::MergeSortStage(const MergeSortStageParams& params, WorkingSet* ws)
- : _ws(ws), _pattern(params.pattern), _dedup(params.dedup),
+ MergeSortStage::MergeSortStage(const MergeSortStageParams& params,
+ WorkingSet* ws,
+ const Collection* collection)
+ : _collection(collection),
+ _ws(ws),
+ _pattern(params.pattern),
+ _dedup(params.dedup),
_merging(StageWithValueComparison(ws, params.pattern)) { }
MergeSortStage::~MergeSortStage() {
@@ -199,7 +204,7 @@ namespace mongo {
WorkingSetMember* member = _ws->get(valueIt->id);
if (member->hasLoc() && (dl == member->loc)) {
// Force a fetch and flag. We could possibly merge this result back in later.
- WorkingSetCommon::fetchAndInvalidateLoc(member);
+ WorkingSetCommon::fetchAndInvalidateLoc(member, _collection);
_ws->flagForReview(valueIt->id);
++_specificStats.forcedFetches;
}
diff --git a/src/mongo/db/exec/merge_sort.h b/src/mongo/db/exec/merge_sort.h
index 82540a467ab..7eeb54ae0d0 100644
--- a/src/mongo/db/exec/merge_sort.h
+++ b/src/mongo/db/exec/merge_sort.h
@@ -55,7 +55,9 @@ namespace mongo {
*/
class MergeSortStage : public PlanStage {
public:
- MergeSortStage(const MergeSortStageParams& params, WorkingSet* ws);
+ MergeSortStage(const MergeSortStageParams& params,
+ WorkingSet* ws,
+ const Collection* collection);
virtual ~MergeSortStage();
void addChild(PlanStage* child);
@@ -71,6 +73,9 @@ namespace mongo {
private:
// Not owned by us.
+ const Collection* _collection;
+
+ // Not owned by us.
WorkingSet* _ws;
// The pattern that we're sorting by.
diff --git a/src/mongo/db/exec/oplogstart.cpp b/src/mongo/db/exec/oplogstart.cpp
index d8e7feaa353..e21f7d20c66 100644
--- a/src/mongo/db/exec/oplogstart.cpp
+++ b/src/mongo/db/exec/oplogstart.cpp
@@ -38,7 +38,7 @@
namespace mongo {
// Does not take ownership.
- OplogStart::OplogStart(Collection* collection, MatchExpression* filter, WorkingSet* ws)
+ OplogStart::OplogStart(const Collection* collection, MatchExpression* filter, WorkingSet* ws)
: _needInit(true),
_backwardsScanning(false),
_extentHopping(false),
@@ -87,12 +87,12 @@ namespace mongo {
const DiskLoc loc = _subIterators.back()->getNext();
_subIterators.popAndDeleteBack();
- if (!loc.isNull() && !_filter->matchesBSON(loc.obj())) {
+ if (!loc.isNull() && !_filter->matchesBSON(_collection->docFor(loc))) {
_done = true;
WorkingSetID id = _workingSet->allocate();
WorkingSetMember* member = _workingSet->get(id);
member->loc = loc;
- member->obj = member->loc.obj();
+ member->obj = _collection->docFor(member->loc);
member->state = WorkingSetMember::LOC_AND_UNOWNED_OBJ;
*out = id;
return PlanStage::ADVANCED;
diff --git a/src/mongo/db/exec/oplogstart.h b/src/mongo/db/exec/oplogstart.h
index 8f3b59df437..4fd42945282 100644
--- a/src/mongo/db/exec/oplogstart.h
+++ b/src/mongo/db/exec/oplogstart.h
@@ -62,7 +62,7 @@ namespace mongo {
class OplogStart : public PlanStage {
public:
// Does not take ownership.
- OplogStart(Collection* collection, MatchExpression* filter, WorkingSet* ws);
+ OplogStart(const Collection* collection, MatchExpression* filter, WorkingSet* ws);
virtual ~OplogStart();
virtual StageState work(WorkingSetID* out);
@@ -105,7 +105,7 @@ namespace mongo {
// Our final state: done.
bool _done;
- Collection* _collection;
+ const Collection* _collection;
const NamespaceDetails* _nsd;
// We only go backwards via a collscan for a few seconds.
diff --git a/src/mongo/db/exec/s2near.cpp b/src/mongo/db/exec/s2near.cpp
index d7eeda1d280..ed1113823a5 100644
--- a/src/mongo/db/exec/s2near.cpp
+++ b/src/mongo/db/exec/s2near.cpp
@@ -405,7 +405,7 @@ namespace mongo {
if (it != _invalidationMap.end()) {
WorkingSetMember* member = _ws->get(it->second);
verify(member->hasLoc());
- WorkingSetCommon::fetchAndInvalidateLoc(member);
+ WorkingSetCommon::fetchAndInvalidateLoc(member, _params.collection);
verify(!member->hasLoc());
// Don't keep it around in the invalidation map since there's no valid DiskLoc anymore.
_invalidationMap.erase(it);
diff --git a/src/mongo/db/exec/sort.cpp b/src/mongo/db/exec/sort.cpp
index f9119cd1a72..50dbc6792de 100644
--- a/src/mongo/db/exec/sort.cpp
+++ b/src/mongo/db/exec/sort.cpp
@@ -44,7 +44,7 @@ namespace mongo {
const size_t kMaxBytes = 32 * 1024 * 1024;
- SortStageKeyGenerator::SortStageKeyGenerator(Collection* collection,
+ SortStageKeyGenerator::SortStageKeyGenerator(const Collection* collection,
const BSONObj& sortSpec,
const BSONObj& queryObj) {
_collection = collection;
@@ -441,7 +441,7 @@ namespace mongo {
WorkingSetMember* member = _ws->get(it->second);
verify(member->loc == dl);
- WorkingSetCommon::fetchAndInvalidateLoc(member);
+ WorkingSetCommon::fetchAndInvalidateLoc(member, _collection);
// Remove the DiskLoc from our set of active DLs.
_wsidByDiskLoc.erase(it);
diff --git a/src/mongo/db/exec/sort.h b/src/mongo/db/exec/sort.h
index e67305504ba..96da33b36b7 100644
--- a/src/mongo/db/exec/sort.h
+++ b/src/mongo/db/exec/sort.h
@@ -47,9 +47,10 @@ namespace mongo {
// Parameters that must be provided to a SortStage
class SortStageParams {
public:
- SortStageParams() : collection( NULL), limit(0) { }
+ SortStageParams() : collection(NULL), limit(0) { }
- Collection* collection;
+ // Used for resolving DiskLocs to BSON
+ const Collection* collection;
// How we're sorting.
BSONObj pattern;
@@ -73,7 +74,7 @@ namespace mongo {
* ensure that the value we select to sort by is within bounds generated by
* executing 'queryObj' using the virtual index with key pattern 'sortSpec'.
*/
- SortStageKeyGenerator(Collection* collection,
+ SortStageKeyGenerator(const Collection* collection,
const BSONObj& sortSpec,
const BSONObj& queryObj);
@@ -104,7 +105,8 @@ namespace mongo {
void getBoundsForSort(const BSONObj& queryObj,
const BSONObj& sortObj);
- Collection* _collection;
+ // Not owned by us
+ const Collection* _collection;
// The object that we use to call woCompare on our resulting key. Is equal to _rawSortSpec
// unless we have some $meta expressions. Each $meta expression has a default sort order.
@@ -160,7 +162,8 @@ namespace mongo {
// Query Stage
//
- Collection* _collection;
+ // Not owned by us.
+ const Collection* _collection;
// Not owned by us.
WorkingSet* _ws;
diff --git a/src/mongo/db/exec/stagedebug_cmd.cpp b/src/mongo/db/exec/stagedebug_cmd.cpp
index 2e17b4447b6..2dc38512646 100644
--- a/src/mongo/db/exec/stagedebug_cmd.cpp
+++ b/src/mongo/db/exec/stagedebug_cmd.cpp
@@ -218,7 +218,7 @@ namespace mongo {
uassert(16921, "Nodes argument must be provided to AND",
nodeArgs["nodes"].isABSONObj());
- auto_ptr<AndHashStage> andStage(new AndHashStage(workingSet, matcher));
+ auto_ptr<AndHashStage> andStage(new AndHashStage(workingSet, matcher, collection));
int nodesAdded = 0;
BSONObjIterator it(nodeArgs["nodes"].Obj());
@@ -243,8 +243,8 @@ namespace mongo {
uassert(16924, "Nodes argument must be provided to AND",
nodeArgs["nodes"].isABSONObj());
- auto_ptr<AndSortedStage> andStage(new AndSortedStage(workingSet,
- matcher));
+ auto_ptr<AndSortedStage> andStage(
+ new AndSortedStage(workingSet, matcher, collection));
int nodesAdded = 0;
BSONObjIterator it(nodeArgs["nodes"].Obj());
@@ -359,7 +359,8 @@ namespace mongo {
params.pattern = nodeArgs["pattern"].Obj();
// Dedup is true by default.
- auto_ptr<MergeSortStage> mergeStage(new MergeSortStage(params, workingSet));
+ auto_ptr<MergeSortStage> mergeStage(
+ new MergeSortStage(params, workingSet, collection));
BSONObjIterator it(nodeArgs["nodes"].Obj());
while (it.more()) {
diff --git a/src/mongo/db/exec/text.cpp b/src/mongo/db/exec/text.cpp
index 7c075587909..4bf3c7ca4bb 100644
--- a/src/mongo/db/exec/text.cpp
+++ b/src/mongo/db/exec/text.cpp
@@ -246,7 +246,7 @@ namespace mongo {
// Filter for phrases and negated terms
if (_params.query.hasNonTermPieces()) {
- if (!_ftsMatcher.matchesNonTerm(loc.obj())) {
+ if (!_ftsMatcher.matchesNonTerm(_params.index->getCollection()->docFor(loc))) {
return PlanStage::NEED_TIME;
}
}
@@ -254,7 +254,7 @@ namespace mongo {
*out = _ws->allocate();
WorkingSetMember* member = _ws->get(*out);
member->loc = loc;
- member->obj = member->loc.obj();
+ member->obj = _params.index->getCollection()->docFor(member->loc);
member->state = WorkingSetMember::LOC_AND_UNOWNED_OBJ;
member->addComputed(new TextScoreComputedData(score));
return PlanStage::ADVANCED;
@@ -265,15 +265,17 @@ namespace mongo {
TextMatchableDocument(const BSONObj& keyPattern,
const BSONObj& key,
DiskLoc loc,
+ const Collection* collection,
bool *fetched)
- : _keyPattern(keyPattern),
+ : _collection(collection),
+ _keyPattern(keyPattern),
_key(key),
_loc(loc),
_fetched(fetched) { }
BSONObj toBSON() const {
*_fetched = true;
- return _loc.obj();
+ return _collection->docFor(_loc);
}
virtual ElementIterator* allocateIterator(const ElementPath* path) const {
@@ -298,7 +300,7 @@ namespace mongo {
// All else fails, fetch.
*_fetched = true;
- return new BSONElementIterator(path, _loc.obj());
+ return new BSONElementIterator(path, _collection->docFor(_loc));
}
virtual void releaseIterator( ElementIterator* iterator ) const {
@@ -306,6 +308,7 @@ namespace mongo {
}
private:
+ const Collection* _collection;
BSONObj _keyPattern;
BSONObj _key;
DiskLoc _loc;
@@ -338,7 +341,11 @@ namespace mongo {
if (_filter) {
// We have not seen this document before and need to apply a filter.
bool fetched = false;
- TextMatchableDocument tdoc(_params.index->keyPattern(), key, loc, &fetched);
+ TextMatchableDocument tdoc(_params.index->keyPattern(),
+ key,
+ loc,
+ _params.index->getCollection(),
+ &fetched);
if (!_filter->matches(&tdoc)) {
// We had to fetch but we're not going to return it.
diff --git a/src/mongo/db/exec/working_set_common.cpp b/src/mongo/db/exec/working_set_common.cpp
index 8b9e07a974c..a0652ea5fc0 100644
--- a/src/mongo/db/exec/working_set_common.cpp
+++ b/src/mongo/db/exec/working_set_common.cpp
@@ -26,6 +26,7 @@
* it in the license file.
*/
+#include "mongo/db/catalog/collection.h"
#include "mongo/db/exec/working_set.h"
#include "mongo/db/exec/working_set_common.h"
#include "mongo/db/pdfile.h"
@@ -33,7 +34,8 @@
namespace mongo {
// static
- bool WorkingSetCommon::fetchAndInvalidateLoc(WorkingSetMember* member) {
+ bool WorkingSetCommon::fetchAndInvalidateLoc(
+ WorkingSetMember* member, const Collection* collection) {
// Already in our desired state.
if (member->state == WorkingSetMember::OWNED_OBJ) { return true; }
@@ -41,7 +43,7 @@ namespace mongo {
if (!member->hasLoc()) { return false; }
// Do the fetch, invalidate the DL.
- member->obj = member->loc.obj().getOwned();
+ member->obj = collection->docFor(member->loc).getOwned();
member->state = WorkingSetMember::OWNED_OBJ;
member->loc = DiskLoc();
diff --git a/src/mongo/db/exec/working_set_common.h b/src/mongo/db/exec/working_set_common.h
index a983ca055fe..2fe0ab97e6d 100644
--- a/src/mongo/db/exec/working_set_common.h
+++ b/src/mongo/db/exec/working_set_common.h
@@ -39,7 +39,7 @@ namespace mongo {
* Requires either a valid BSONObj or valid DiskLoc.
* Returns true if the fetch and invalidate succeeded, false otherwise.
*/
- static bool fetchAndInvalidateLoc(WorkingSetMember* member);
+ static bool fetchAndInvalidateLoc(WorkingSetMember* member, const Collection* collection);
/**
* Initialize the fields in 'dest' from 'src', creating copies of owned objects as needed.
diff --git a/src/mongo/db/index/2d_access_method.h b/src/mongo/db/index/2d_access_method.h
index 70c2ee247c0..e8584bf0ae4 100644
--- a/src/mongo/db/index/2d_access_method.h
+++ b/src/mongo/db/index/2d_access_method.h
@@ -73,7 +73,6 @@ namespace mongo {
private:
friend class TwoDIndexCursor;
- friend class twod_internal::GeoPoint;
friend class twod_internal::GeoAccumulator;
friend class twod_internal::GeoBrowse;
friend class twod_internal::GeoHopper;
diff --git a/src/mongo/db/index/btree_based_access_method.cpp b/src/mongo/db/index/btree_based_access_method.cpp
index 092e5f8d17f..8a3ee652abc 100644
--- a/src/mongo/db/index/btree_based_access_method.cpp
+++ b/src/mongo/db/index/btree_based_access_method.cpp
@@ -130,7 +130,7 @@ namespace mongo {
problem() << "Assertion failure: _unindex failed "
<< _descriptor->indexNamespace() << endl;
out() << "Assertion failure: _unindex failed: " << e.what() << '\n';
- out() << " obj:" << loc.obj().toString() << '\n';
+ out() << " obj:" << _btreeState->collection()->docFor(loc).toString() << '\n';
out() << " key:" << key.toString() << '\n';
out() << " dl:" << loc.toString() << endl;
logContext();
@@ -160,7 +160,8 @@ namespace mongo {
++*numDeleted;
} else if (options.logIfError) {
log() << "unindex failed (key too big?) " << _descriptor->indexNamespace()
- << " key: " << *i << " " << loc.obj()["_id"] << endl;
+ << " key: " << *i << " "
+ << _btreeState->collection()->docFor(loc)["_id"] << endl;
}
}
diff --git a/src/mongo/db/index/btree_based_access_method.h b/src/mongo/db/index/btree_based_access_method.h
index 5d5dab921dc..1ba38e1bc54 100644
--- a/src/mongo/db/index/btree_based_access_method.h
+++ b/src/mongo/db/index/btree_based_access_method.h
@@ -107,6 +107,13 @@ namespace mongo {
IndexCatalogEntry* _btreeState; // owned by IndexCatalogEntry
const IndexDescriptor* _descriptor;
+ /**
+ * The collection is needed for resolving record locations to actual objects.
+ */
+ const Collection* collection() const {
+ return _btreeState->collection();
+ }
+
private:
bool removeOneKey(const BSONObj& key, const DiskLoc& loc);
diff --git a/src/mongo/db/index/haystack_access_method.cpp b/src/mongo/db/index/haystack_access_method.cpp
index 76250d6797e..59e97a6191c 100644
--- a/src/mongo/db/index/haystack_access_method.cpp
+++ b/src/mongo/db/index/haystack_access_method.cpp
@@ -72,7 +72,7 @@ namespace mongo {
}
int scale = static_cast<int>(ceil(maxDistance / _bucketSize));
- GeoHaystackSearchHopper hopper(nearObj, maxDistance, limit, _geoField);
+ GeoHaystackSearchHopper hopper(nearObj, maxDistance, limit, _geoField, collection());
long long btreeMatches = 0;
diff --git a/src/mongo/db/index/haystack_access_method_internal.h b/src/mongo/db/index/haystack_access_method_internal.h
index c563cb3f045..c0ee96d2488 100644
--- a/src/mongo/db/index/haystack_access_method_internal.h
+++ b/src/mongo/db/index/haystack_access_method_internal.h
@@ -46,15 +46,22 @@ namespace mongo {
* @param limit The maximum number of results to return
* @param geoField Which field in the provided DiskLoc has the point to test.
*/
- GeoHaystackSearchHopper(const BSONObj& nearObj, double maxDistance, unsigned limit,
- const string& geoField)
- : _near(nearObj), _maxDistance(maxDistance), _limit(limit), _geoField(geoField) { }
+ GeoHaystackSearchHopper(const BSONObj& nearObj,
+ double maxDistance,
+ unsigned limit,
+ const string& geoField,
+ const Collection* collection)
+ : _collection(collection),
+ _near(nearObj),
+ _maxDistance(maxDistance),
+ _limit(limit),
+ _geoField(geoField) { }
// Consider the point in loc, and keep it if it's within _maxDistance (and we have space for
// it)
void consider(const DiskLoc& loc) {
if (limitReached()) return;
- Point p(loc.obj().getFieldDotted(_geoField));
+ Point p(_collection->docFor(loc).getFieldDotted(_geoField));
if (distance(_near, p) > _maxDistance)
return;
_locs.push_back(loc);
@@ -62,7 +69,7 @@ namespace mongo {
int appendResultsTo(BSONArrayBuilder* b) {
for (unsigned i = 0; i <_locs.size(); i++)
- b->append(_locs[i].obj());
+ b->append(_collection->docFor(_locs[i]));
return _locs.size();
}
@@ -71,6 +78,8 @@ namespace mongo {
return _locs.size() >= _limit;
}
private:
+ const Collection* _collection;
+
Point _near;
double _maxDistance;
unsigned _limit;
diff --git a/src/mongo/db/index/index_descriptor.h b/src/mongo/db/index/index_descriptor.h
index 293100bf3ad..b3eb54aa6b1 100644
--- a/src/mongo/db/index/index_descriptor.h
+++ b/src/mongo/db/index/index_descriptor.h
@@ -153,8 +153,9 @@ namespace mongo {
// Return the info object.
const BSONObj& infoObj() const { _checkOk(); return _infoObj; }
- // this is the owner of this IndexDescriptor
- IndexCatalog* getIndexCatalog() const { return _collection->getIndexCatalog(); }
+ // Both the collection and the catalog must outlive the IndexDescriptor
+ const Collection* getCollection() const { return _collection; }
+ const IndexCatalog* getIndexCatalog() const { return _collection->getIndexCatalog(); }
bool areIndexOptionsEquivalent( const IndexDescriptor* other ) const;
diff --git a/src/mongo/db/query/idhack_runner.cpp b/src/mongo/db/query/idhack_runner.cpp
index c57aee98ec2..fcd7b0fce8b 100644
--- a/src/mongo/db/query/idhack_runner.cpp
+++ b/src/mongo/db/query/idhack_runner.cpp
@@ -156,7 +156,7 @@ namespace mongo {
}
// Either the data was in memory or we paged it in.
- *objOut = loc.obj();
+ *objOut = _collection->docFor(loc);
// If we're sharded make sure the key belongs to us. We need the object to do this.
if (shardingState.needCollectionMetadata(_collection->ns().ns())) {
diff --git a/src/mongo/db/query/multi_plan_runner.cpp b/src/mongo/db/query/multi_plan_runner.cpp
index 4cf3e1af23e..a262dfbb62b 100644
--- a/src/mongo/db/query/multi_plan_runner.cpp
+++ b/src/mongo/db/query/multi_plan_runner.cpp
@@ -154,7 +154,7 @@ namespace mongo {
if (member->hasLoc() && member->loc == dl) {
list<WorkingSetID>::iterator next = it;
next++;
- WorkingSetCommon::fetchAndInvalidateLoc(member);
+ WorkingSetCommon::fetchAndInvalidateLoc(member, _collection);
_bestPlan->getWorkingSet()->flagForReview(*it);
_alreadyProduced.erase(it);
it = next;
@@ -171,7 +171,7 @@ namespace mongo {
if (member->hasLoc() && member->loc == dl) {
list<WorkingSetID>::iterator next = it;
next++;
- WorkingSetCommon::fetchAndInvalidateLoc(member);
+ WorkingSetCommon::fetchAndInvalidateLoc(member, _collection);
_backupPlan->getWorkingSet()->flagForReview(*it);
_backupAlreadyProduced.erase(it);
it = next;
@@ -191,7 +191,7 @@ namespace mongo {
if (member->hasLoc() && member->loc == dl) {
list<WorkingSetID>::iterator next = it;
next++;
- WorkingSetCommon::fetchAndInvalidateLoc(member);
+ WorkingSetCommon::fetchAndInvalidateLoc(member, _collection);
_candidates[i].ws->flagForReview(*it);
_candidates[i].results.erase(it);
it = next;
diff --git a/src/mongo/db/query/stage_builder.cpp b/src/mongo/db/query/stage_builder.cpp
index 125c1ef48c1..d017598191d 100644
--- a/src/mongo/db/query/stage_builder.cpp
+++ b/src/mongo/db/query/stage_builder.cpp
@@ -103,6 +103,7 @@ namespace mongo {
PlanStage* childStage = buildStages(collection, qsol, sn->children[0], ws);
if (NULL == childStage) { return NULL; }
SortStageParams params;
+ params.collection = collection;
params.pattern = sn->pattern;
params.query = sn->query;
params.limit = sn->limit;
@@ -146,7 +147,7 @@ namespace mongo {
}
else if (STAGE_AND_HASH == root->getType()) {
const AndHashNode* ahn = static_cast<const AndHashNode*>(root);
- auto_ptr<AndHashStage> ret(new AndHashStage(ws, ahn->filter.get()));
+ auto_ptr<AndHashStage> ret(new AndHashStage(ws, ahn->filter.get(), collection));
for (size_t i = 0; i < ahn->children.size(); ++i) {
PlanStage* childStage = buildStages(collection, qsol, ahn->children[i], ws);
if (NULL == childStage) { return NULL; }
@@ -166,7 +167,7 @@ namespace mongo {
}
else if (STAGE_AND_SORTED == root->getType()) {
const AndSortedNode* asn = static_cast<const AndSortedNode*>(root);
- auto_ptr<AndSortedStage> ret(new AndSortedStage(ws, asn->filter.get()));
+ auto_ptr<AndSortedStage> ret(new AndSortedStage(ws, asn->filter.get(), collection));
for (size_t i = 0; i < asn->children.size(); ++i) {
PlanStage* childStage = buildStages(collection, qsol, asn->children[i], ws);
if (NULL == childStage) { return NULL; }
@@ -179,7 +180,7 @@ namespace mongo {
MergeSortStageParams params;
params.dedup = msn->dedup;
params.pattern = msn->sort;
- auto_ptr<MergeSortStage> ret(new MergeSortStage(params, ws));
+ auto_ptr<MergeSortStage> ret(new MergeSortStage(params, ws, collection));
for (size_t i = 0; i < msn->children.size(); ++i) {
PlanStage* childStage = buildStages(collection, qsol, msn->children[i], ws);
if (NULL == childStage) { return NULL; }
diff --git a/src/mongo/dbtests/query_stage_and.cpp b/src/mongo/dbtests/query_stage_and.cpp
index ec805f81972..46689927bec 100644
--- a/src/mongo/dbtests/query_stage_and.cpp
+++ b/src/mongo/dbtests/query_stage_and.cpp
@@ -140,7 +140,7 @@ namespace QueryStageAnd {
addIndex(BSON("bar" << 1));
WorkingSet ws;
- scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL));
+ scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL, coll));
// Foo <= 20
IndexScanParams params;
@@ -242,7 +242,7 @@ namespace QueryStageAnd {
addIndex(BSON("baz" << 1));
WorkingSet ws;
- scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL));
+ scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL, coll));
// Foo <= 20 (descending)
IndexScanParams params;
@@ -326,7 +326,7 @@ namespace QueryStageAnd {
addIndex(BSON("bar" << 1));
WorkingSet ws;
- scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL));
+ scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL, coll));
// Foo <= 20
IndexScanParams params;
@@ -379,7 +379,7 @@ namespace QueryStageAnd {
// before hashed AND is done reading the first child (stage has to
// hold 21 keys in buffer for Foo <= 20).
WorkingSet ws;
- scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL, 20 * big.size()));
+ scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL, coll, 20 * big.size()));
// Foo <= 20
IndexScanParams params;
@@ -430,7 +430,7 @@ namespace QueryStageAnd {
// keys in last child's index are not buffered. There are 6 keys
// that satisfy the criteria Foo <= 20 and Bar >= 10 and 5 <= baz <= 15.
WorkingSet ws;
- scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL, 5 * big.size()));
+ scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL, coll, 5 * big.size()));
// Foo <= 20
IndexScanParams params;
@@ -476,7 +476,7 @@ namespace QueryStageAnd {
addIndex(BSON("baz" << 1));
WorkingSet ws;
- scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL));
+ scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL, coll));
// Foo <= 20
IndexScanParams params;
@@ -541,7 +541,7 @@ namespace QueryStageAnd {
// before hashed AND is done reading the second child (stage has to
// hold 11 keys in buffer for Foo <= 20 and Bar >= 10).
WorkingSet ws;
- scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL, 10 * big.size()));
+ scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL, coll, 10 * big.size()));
// Foo <= 20
IndexScanParams params;
@@ -593,7 +593,7 @@ namespace QueryStageAnd {
addIndex(BSON("bar" << 1));
WorkingSet ws;
- scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL));
+ scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL, coll));
// Foo <= 20
IndexScanParams params;
@@ -652,7 +652,7 @@ namespace QueryStageAnd {
addIndex(BSON("bar" << 1));
WorkingSet ws;
- scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL));
+ scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL, coll));
// Foo >= 100
IndexScanParams params;
@@ -702,7 +702,7 @@ namespace QueryStageAnd {
StatusWithMatchExpression swme = MatchExpressionParser::parse(filter);
verify(swme.isOK());
auto_ptr<MatchExpression> filterExpr(swme.getValue());
- scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, filterExpr.get()));
+ scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, filterExpr.get(), coll));
// Foo <= 20
IndexScanParams params;
@@ -753,7 +753,7 @@ namespace QueryStageAnd {
addIndex(BSON("bar" << 1));
WorkingSet ws;
- scoped_ptr<AndSortedStage> ah(new AndSortedStage(&ws, NULL));
+ scoped_ptr<AndSortedStage> ah(new AndSortedStage(&ws, NULL, coll));
// Scan over foo == 1
IndexScanParams params;
@@ -878,7 +878,7 @@ namespace QueryStageAnd {
addIndex(BSON("baz" << 1));
WorkingSet ws;
- scoped_ptr<AndSortedStage> ah(new AndSortedStage(&ws, NULL));
+ scoped_ptr<AndSortedStage> ah(new AndSortedStage(&ws, NULL, coll));
// Scan over foo == 1
IndexScanParams params;
@@ -922,7 +922,7 @@ namespace QueryStageAnd {
addIndex(BSON("bar" << 1));
WorkingSet ws;
- scoped_ptr<AndSortedStage> ah(new AndSortedStage(&ws, NULL));
+ scoped_ptr<AndSortedStage> ah(new AndSortedStage(&ws, NULL, coll));
// Foo == 7. Should be EOF.
IndexScanParams params;
@@ -969,7 +969,7 @@ namespace QueryStageAnd {
addIndex(BSON("bar" << 1));
WorkingSet ws;
- scoped_ptr<AndSortedStage> ah(new AndSortedStage(&ws, NULL));
+ scoped_ptr<AndSortedStage> ah(new AndSortedStage(&ws, NULL, coll));
// foo == 7.
IndexScanParams params;
@@ -1016,7 +1016,7 @@ namespace QueryStageAnd {
StatusWithMatchExpression swme = MatchExpressionParser::parse(filterObj);
verify(swme.isOK());
auto_ptr<MatchExpression> filterExpr(swme.getValue());
- scoped_ptr<AndSortedStage> ah(new AndSortedStage(&ws, filterExpr.get()));
+ scoped_ptr<AndSortedStage> ah(new AndSortedStage(&ws, filterExpr.get(), coll));
// Scan over foo == 1
IndexScanParams params;
@@ -1056,7 +1056,7 @@ namespace QueryStageAnd {
addIndex(BSON("bar" << 1));
WorkingSet ws;
- scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL));
+ scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL, coll));
// Scan over foo == 1
IndexScanParams params;
diff --git a/src/mongo/dbtests/query_stage_merge_sort.cpp b/src/mongo/dbtests/query_stage_merge_sort.cpp
index 78a32521693..97bee896f03 100644
--- a/src/mongo/dbtests/query_stage_merge_sort.cpp
+++ b/src/mongo/dbtests/query_stage_merge_sort.cpp
@@ -129,7 +129,7 @@ namespace QueryStageMergeSortTests {
// Sort by c:1
MergeSortStageParams msparams;
msparams.pattern = BSON("c" << 1);
- MergeSortStage* ms = new MergeSortStage(msparams, ws);
+ MergeSortStage* ms = new MergeSortStage(msparams, ws, coll);
// a:1
IndexScanParams params;
@@ -192,7 +192,7 @@ namespace QueryStageMergeSortTests {
// Sort by c:1
MergeSortStageParams msparams;
msparams.pattern = BSON("c" << 1);
- MergeSortStage* ms = new MergeSortStage(msparams, ws);
+ MergeSortStage* ms = new MergeSortStage(msparams, ws, coll);
// a:1
IndexScanParams params;
@@ -254,7 +254,7 @@ namespace QueryStageMergeSortTests {
MergeSortStageParams msparams;
msparams.dedup = false;
msparams.pattern = BSON("c" << 1);
- MergeSortStage* ms = new MergeSortStage(msparams, ws);
+ MergeSortStage* ms = new MergeSortStage(msparams, ws, coll);
// a:1
IndexScanParams params;
@@ -318,7 +318,7 @@ namespace QueryStageMergeSortTests {
// Sort by c:-1
MergeSortStageParams msparams;
msparams.pattern = BSON("c" << -1);
- MergeSortStage* ms = new MergeSortStage(msparams, ws);
+ MergeSortStage* ms = new MergeSortStage(msparams, ws, coll);
// a:1
IndexScanParams params;
@@ -381,7 +381,7 @@ namespace QueryStageMergeSortTests {
// Sort by c:1
MergeSortStageParams msparams;
msparams.pattern = BSON("c" << 1);
- MergeSortStage* ms = new MergeSortStage(msparams, ws);
+ MergeSortStage* ms = new MergeSortStage(msparams, ws, coll);
// a:1
IndexScanParams params;
@@ -430,7 +430,7 @@ namespace QueryStageMergeSortTests {
// Sort by foo:1
MergeSortStageParams msparams;
msparams.pattern = BSON("foo" << 1);
- MergeSortStage* ms = new MergeSortStage(msparams, ws);
+ MergeSortStage* ms = new MergeSortStage(msparams, ws, coll);
IndexScanParams params;
params.bounds.isSimpleRange = true;
@@ -482,7 +482,7 @@ namespace QueryStageMergeSortTests {
// Sort by foo:1
MergeSortStageParams msparams;
msparams.pattern = BSON("foo" << 1);
- auto_ptr<MergeSortStage> ms(new MergeSortStage(msparams, &ws));
+ auto_ptr<MergeSortStage> ms(new MergeSortStage(msparams, &ws, coll));
IndexScanParams params;
params.bounds.isSimpleRange = true;
diff --git a/src/mongo/dbtests/query_stage_sort.cpp b/src/mongo/dbtests/query_stage_sort.cpp
index 2da0d135aa3..d8e5d2fc340 100644
--- a/src/mongo/dbtests/query_stage_sort.cpp
+++ b/src/mongo/dbtests/query_stage_sort.cpp
@@ -116,6 +116,7 @@ namespace QueryStageSortTests {
insertVarietyOfObjects(ms, coll);
SortStageParams params;
+ params.collection = coll;
params.pattern = BSON("foo" << direction);
params.limit = limit();
@@ -259,6 +260,7 @@ namespace QueryStageSortTests {
insertVarietyOfObjects(ms.get(), coll);
SortStageParams params;
+ params.collection = coll;
params.pattern = BSON("foo" << 1);
params.limit = limit();
auto_ptr<SortStage> ss(new SortStage(params, &ws, ms.get()));
@@ -351,6 +353,7 @@ namespace QueryStageSortTests {
}
SortStageParams params;
+ params.collection = coll;
params.pattern = BSON("b" << -1 << "c" << 1 << "a" << 1);
params.limit = 0;