summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHari Khalsa <hkhalsa@10gen.com>2013-07-10 14:55:24 -0400
committerHari Khalsa <hkhalsa@10gen.com>2013-07-17 14:51:32 -0400
commit5e63001cd9bb86f57af2343536a0a0cd32f35dc2 (patch)
tree55a6cc49a4b5d691250edd12f3fa2aa876c84d2d
parent457e50ba8c19cc6df8d865a870268a5a5462de8b (diff)
downloadmongo-5e63001cd9bb86f57af2343536a0a0cd32f35dc2.tar.gz
SERVER-10026 collection scan
-rw-r--r--jstests/stages_collection_scan.js43
-rw-r--r--src/mongo/db/exec/SConscript2
-rw-r--r--src/mongo/db/exec/collection_iterator.cpp293
-rw-r--r--src/mongo/db/exec/collection_iterator.h134
-rw-r--r--src/mongo/db/exec/collection_scan.cpp88
-rw-r--r--src/mongo/db/exec/collection_scan.h59
-rw-r--r--src/mongo/db/exec/collection_scan_common.h46
-rw-r--r--src/mongo/db/exec/stagedebug_cmd.cpp23
-rw-r--r--src/mongo/dbtests/pdfiletests.cpp1
-rw-r--r--src/mongo/dbtests/query_stage_collscan.cpp597
10 files changed, 1285 insertions, 1 deletions
diff --git a/jstests/stages_collection_scan.js b/jstests/stages_collection_scan.js
new file mode 100644
index 00000000000..d7de30cf8e7
--- /dev/null
+++ b/jstests/stages_collection_scan.js
@@ -0,0 +1,43 @@
+// Test basic query stage collection scan functionality.
+t = db.stages_collection_scan;
+t.drop();
+
+var N = 50;
+for (var i = 0; i < N; ++i) {
+ t.insert({foo: i});
+}
+
+forward = {cscan: {args: {name: "stages_collection_scan", direction: 1}}}
+res = db.runCommand({stageDebug: forward});
+assert(!db.getLastError());
+assert.eq(res.ok, 1);
+assert.eq(res.results.length, N);
+assert.eq(res.results[0].foo, 0);
+assert.eq(res.results[49].foo, 49);
+
+// And, backwards.
+backward = {cscan: {args: {name: "stages_collection_scan", direction: -1}}}
+res = db.runCommand({stageDebug: backward});
+assert(!db.getLastError());
+assert.eq(res.ok, 1);
+assert.eq(res.results.length, N);
+assert.eq(res.results[0].foo, 49);
+assert.eq(res.results[49].foo, 0);
+
+forwardFiltered = {cscan: {args: {name: "stages_collection_scan", direction: 1},
+ filter: {foo: {$lt: 25}}}}
+res = db.runCommand({stageDebug: forwardFiltered});
+assert(!db.getLastError());
+assert.eq(res.ok, 1);
+assert.eq(res.results.length, 25);
+assert.eq(res.results[0].foo, 0);
+assert.eq(res.results[24].foo, 24);
+
+backwardFiltered = {cscan: {args: {name: "stages_collection_scan", direction: -1},
+ filter: {foo: {$lt: 25}}}}
+res = db.runCommand({stageDebug: backwardFiltered});
+assert(!db.getLastError());
+assert.eq(res.ok, 1);
+assert.eq(res.results.length, 25);
+assert.eq(res.results[0].foo, 24);
+assert.eq(res.results[24].foo, 0);
diff --git a/src/mongo/db/exec/SConscript b/src/mongo/db/exec/SConscript
index c0606c4ab1d..6975dee70e5 100644
--- a/src/mongo/db/exec/SConscript
+++ b/src/mongo/db/exec/SConscript
@@ -37,6 +37,8 @@ env.StaticLibrary(
source = [
"and_hash.cpp",
"and_sorted.cpp",
+ "collection_scan.cpp",
+ "collection_iterator.cpp",
"fetch.cpp",
"index_scan.cpp",
"limit.cpp",
diff --git a/src/mongo/db/exec/collection_iterator.cpp b/src/mongo/db/exec/collection_iterator.cpp
new file mode 100644
index 00000000000..ccea0f093d8
--- /dev/null
+++ b/src/mongo/db/exec/collection_iterator.cpp
@@ -0,0 +1,293 @@
+/**
+ * Copyright (C) 2013 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "mongo/db/exec/collection_iterator.h"
+
+#include "mongo/db/pdfile.h"
+
+namespace mongo {
+
+ //
+ // Regular / non-capped collection traversal
+ //
+
+ FlatIterator::FlatIterator(const string& ns, const DiskLoc& start,
+ const CollectionScanParams::Direction& dir)
+ : _curr(start), _ns(ns), _direction(dir) {
+
+ if (_curr.isNull()) {
+ // No valid start provided so we must find the start.
+ NamespaceDetails* details = nsdetails(ns);
+ verify(NULL != details);
+
+ if (CollectionScanParams::FORWARD == _direction) {
+ // Find a non-empty extent and start with the first record in it.
+ Extent* e = DataFileMgr::getExtent(details->firstExtent());
+
+ while (e->firstRecord.isNull() && !e->xnext.isNull()) {
+ e = e->getNextExtent();
+ }
+
+ // _curr may be set to DiskLoc() here if e->lastRecord isNull but there is no
+ // valid e->xnext
+ _curr = e->firstRecord;
+ }
+ else {
+ // Walk backwards, skipping empty extents, and use the last record in the first
+ // non-empty extent we see.
+ Extent* e = details->lastExtent().ext();
+
+ // TODO ELABORATE
+ // Does one of e->lastRecord.isNull(), e.firstRecord.isNull() imply the other?
+ while (e->lastRecord.isNull() && !e->xprev.isNull()) {
+ e = e->getPrevExtent();
+ }
+
+ // _curr may be set to DiskLoc() here if e->lastRecord isNull but there is no
+ // valid e->xprev
+ _curr = e->lastRecord;
+ }
+ }
+ }
+
+ bool FlatIterator::isEOF() { return _curr.isNull(); }
+
+ DiskLoc FlatIterator::getNext() {
+ DiskLoc ret = _curr;
+
+ // Move to the next thing.
+ if (!isEOF()) {
+ if (CollectionScanParams::FORWARD == _direction) {
+ _curr = _curr.rec()->getNext(_curr);
+ }
+ else {
+ _curr = _curr.rec()->getPrev(_curr);
+ }
+ }
+
+ return ret;
+ }
+
+ void FlatIterator::invalidate(const DiskLoc& dl) {
+ // Just move past the thing being deleted.
+ if (dl == _curr) {
+ // TODO: do we need this for sure? Playing it safe for now.
+ if (NULL == nsdetails(_ns)) {
+ return;
+ }
+ // We don't care about the return of getNext so much as the side effect of moving _curr
+ // to the 'next' thing.
+ getNext();
+ }
+ }
+
+ void FlatIterator::prepareToYield() { }
+
+ bool FlatIterator::recoverFromYield() {
+ // If the collection we're iterating over was dropped...
+ if (NULL == nsdetails(_ns)) {
+ // Go right to EOF as a preventative measure.
+ _curr = DiskLoc();
+
+ // And return "NOT OK."
+ return false;
+ }
+
+ return true;
+ }
+
+ //
+ // Capped collection traversal
+ //
+
+ CappedIterator::CappedIterator(const string& ns, const DiskLoc& start, bool tailable,
+ const CollectionScanParams::Direction& dir)
+ : _ns(ns), _curr(start), _tailable(tailable), _direction(dir), _killedByInvalidate(false) {
+
+ _details = nsdetails(ns);
+ verify(NULL != _details);
+
+ if (_curr.isNull()) {
+ // If a start position isn't specified, we fill one out from the start of the
+ // collection.
+ if (CollectionScanParams::FORWARD == _direction) {
+ // Going forwards.
+ if (!_details->capLooped()) {
+ // If our capped collection doesn't loop around, the first record is easy.
+ _curr = _details->firstRecord();
+ }
+ else {
+ // Our capped collection has "looped' around.
+ // Copied verbatim from ForwardCappedCursor::init.
+ // TODO ELABORATE
+ _curr = _details->capExtent().ext()->firstRecord;
+ if (!_curr.isNull() && _curr == _details->capFirstNewRecord()) {
+ _curr = _details->capExtent().ext()->lastRecord;
+ _curr = nextLoop(_details, _curr);
+ }
+ }
+ }
+ else {
+ // Going backwards
+ if (!_details->capLooped()) {
+ // Start at the end.
+ _curr = _details->lastRecord();
+ }
+ else {
+ _curr = _details->capExtent().ext()->lastRecord;
+ }
+ }
+ }
+ }
+
+ bool CappedIterator::isEOF() { return _curr.isNull(); }
+
+ DiskLoc CappedIterator::getNext() {
+ DiskLoc ret = _curr;
+
+ // Move to the next thing.
+ if (!isEOF()) {
+ _curr = getNextCapped(_curr, _direction, _details);
+ }
+ else if (_tailable && !_prev.isNull()) {
+ // If we're tailable, there COULD have been something inserted even though we were
+ // previously EOF. Look at the next thing from 'prev' and see.
+ DiskLoc newCurr = getNextCapped(_prev, _direction, _details);
+
+ if (!newCurr.isNull()) {
+ // There's something new to return. _curr always points to the next thing to
+ // return. Update it, and move _prev to the thing we just returned.
+ ret = _prev = newCurr;
+ _curr = getNextCapped(_prev, _direction, _details);
+ }
+ }
+
+ return ret;
+ }
+
+ void CappedIterator::invalidate(const DiskLoc& dl) {
+ if ((_tailable && _curr.isNull() && dl == _prev) || (dl == _curr)) {
+ // In the _tailable case, we're about to kill the DiskLoc that we're tailing. Nothing
+ // that we can possibly do to survive that.
+ //
+ // In the _curr case, we *could* move to the next thing, since there is actually a next
+ // thing, but according to clientcursor.cpp:
+ // "note we cannot advance here. if this condition occurs, writes to the oplog
+ // have "caught" the reader. skipping ahead, the reader would miss postentially
+ // important data."
+ _curr = _prev = DiskLoc();
+ _killedByInvalidate = true;
+ }
+ }
+
+ void CappedIterator::prepareToYield() {
+ _details = NULL;
+ }
+
+ bool CappedIterator::recoverFromYield() {
+ // If invalidate invalidated the DiskLoc we relied on, give up now.
+ if (_killedByInvalidate) {
+ return false;
+ }
+
+ _details = nsdetails(_ns);
+
+ // If the collection was deleted from under us, stop.
+ if (NULL == _details) {
+ // For paranoia's sake force EOF.
+ _curr = _prev = DiskLoc();
+ return false;
+ }
+
+ return true;
+ }
+
+ DiskLoc CappedIterator::getNextCapped(const DiskLoc& dl,
+ CollectionScanParams::Direction direction,
+ NamespaceDetails* nsd) {
+ verify(!dl.isNull());
+
+ if (CollectionScanParams::FORWARD == direction) {
+ // If it's not looped, it's easy.
+ if (!nsd->capLooped()) { return dl.rec()->getNext(dl); }
+
+ // TODO ELABORATE
+ // EOF.
+ if (dl == nsd->capExtent().ext()->lastRecord) { return DiskLoc(); }
+
+ DiskLoc ret = nextLoop(nsd, dl);
+
+ // If we become capFirstNewRecord from same extent, advance to next extent.
+ if (ret == nsd->capFirstNewRecord() && ret != nsd->capExtent().ext()->firstRecord) {
+ ret = nextLoop(nsd, nsd->capExtent().ext()->lastRecord);
+ }
+
+ // If we have just gotten to beginning of capExtent, skip to capFirstNewRecord
+ if (ret == nsd->capExtent().ext()->firstRecord) { ret = nsd->capFirstNewRecord(); }
+
+ return ret;
+ }
+ else {
+ if (!nsd->capLooped()) { return dl.rec()->getPrev(dl); }
+
+ // TODO ELABORATE
+ // Last record
+ if (nsd->capFirstNewRecord() == nsd->capExtent().ext()->firstRecord) {
+ if (dl == nextLoop(nsd, nsd->capExtent().ext()->lastRecord)) {
+ return DiskLoc();
+ }
+ }
+ else {
+ if (dl == nsd->capExtent().ext()->firstRecord) { return DiskLoc(); }
+ }
+
+ DiskLoc ret;
+ // If we are capFirstNewRecord, advance to prev extent, otherwise just get prev.
+ if (dl == nsd->capFirstNewRecord()) {
+ ret = prevLoop(nsd, nsd->capExtent().ext()->firstRecord);
+ }
+ else {
+ ret = prevLoop(nsd, dl);
+ }
+
+ // If we just became last in cap extent, advance past capFirstNewRecord
+ // (We know capExtent.ext()->firstRecord != capFirstNewRecord, since would
+ // have returned DiskLoc() earlier otherwise.)
+ if (ret == nsd->capExtent().ext()->lastRecord) {
+ ret = nsd->capFirstNewRecord().rec()->getPrev(nsd->capFirstNewRecord());
+ }
+
+ return ret;
+ }
+ }
+
+ DiskLoc CappedIterator::nextLoop(NamespaceDetails* nsd, const DiskLoc& prev) {
+ // TODO ELABORATE
+ verify(nsd->capLooped());
+ DiskLoc next = prev.rec()->getNext(prev);
+ if (!next.isNull()) { return next; }
+ return nsd->firstRecord();
+ }
+
+ DiskLoc CappedIterator::prevLoop(NamespaceDetails* nsd, const DiskLoc& curr) {
+ // TODO ELABORATE
+ verify(nsd->capLooped());
+ DiskLoc prev = curr.rec()->getPrev(curr);
+ if (!prev.isNull()) { return prev; }
+ return nsd->lastRecord();
+ }
+
+} // namespace mongo
diff --git a/src/mongo/db/exec/collection_iterator.h b/src/mongo/db/exec/collection_iterator.h
new file mode 100644
index 00000000000..3e8f58818ea
--- /dev/null
+++ b/src/mongo/db/exec/collection_iterator.h
@@ -0,0 +1,134 @@
+/**
+ * Copyright (C) 2013 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "mongo/db/exec/collection_scan_common.h"
+
+namespace mongo {
+
+ class DiskLoc;
+ class NamespaceDetails;
+
+ /**
+ * A CollectionIterator provides an interface for walking over a collection.
+ * The details of navigating the collection's structure are below this interface.
+ */
+ class CollectionIterator {
+ public:
+ virtual ~CollectionIterator() { }
+
+ // True if getNext will produce no more data, false otherwise.
+ virtual bool isEOF() = 0;
+
+ // Return the next item from the collection. Returns DiskLoc() if isEOF.
+ virtual DiskLoc getNext() = 0;
+
+ // Can only be called after prepareToYield and before recoverFromYield.
+ virtual void invalidate(const DiskLoc& dl) = 0;
+
+ // Save any state required to resume operation (without crashing) after DiskLoc deletion or
+ // a collection drop.
+ virtual void prepareToYield() = 0;
+
+ // Returns true if collection still exists, false otherwise.
+ virtual bool recoverFromYield() = 0;
+ };
+
+ /**
+ * This class iterates over a non-capped collection identified by 'ns'.
+ * The collection must exist when the constructor is called.
+ *
+ * If start is not DiskLoc(), the iteration begins at that DiskLoc.
+ */
+ class FlatIterator : public CollectionIterator {
+ public:
+ FlatIterator(const string& ns, const DiskLoc& start,
+ const CollectionScanParams::Direction& dir);
+ virtual ~FlatIterator() { }
+
+ virtual bool isEOF();
+ virtual DiskLoc getNext();
+
+ virtual void invalidate(const DiskLoc& dl);
+ virtual void prepareToYield();
+ virtual bool recoverFromYield();
+
+ private:
+ // The result returned on the next call to getNext().
+ DiskLoc _curr;
+
+ // The collection we're iterating over.
+ string _ns;
+
+ CollectionScanParams::Direction _direction;
+ };
+
+ /**
+ * This class iterates over a capped collection identified by 'ns'.
+ * The collection must exist when the constructor is called.
+ *
+ * If start is not DiskLoc(), the iteration begins at that DiskLoc.
+ *
+ * If tailable is true, getNext() can be called after isEOF. It will use the last valid
+ * returned DiskLoc and try to find the next record from that.
+ */
+ class CappedIterator : public CollectionIterator {
+ public:
+ CappedIterator(const string& ns, const DiskLoc& start, bool tailable,
+ const CollectionScanParams::Direction& dir);
+ virtual ~CappedIterator() { }
+
+ // If this is a tailable cursor, isEOF could change its mind after a call to getNext().
+ virtual bool isEOF();
+ virtual DiskLoc getNext();
+
+ virtual void invalidate(const DiskLoc& dl);
+ virtual void prepareToYield();
+ virtual bool recoverFromYield();
+
+ private:
+ /**
+ * Internal collection navigation helper methods.
+ */
+ static DiskLoc getNextCapped(const DiskLoc& dl, CollectionScanParams::Direction direction,
+ NamespaceDetails* nsd);
+ static DiskLoc prevLoop(NamespaceDetails* nsd, const DiskLoc& curr);
+ static DiskLoc nextLoop(NamespaceDetails* nsd, const DiskLoc& prev);
+
+ // The collection we're iterating over.
+ string _ns;
+
+ // Not valid between calls to prepareToYield and recoverFromYield.
+ // The actual on-disk catalog information for the collection referred to by '_ns'.
+ // We need the extent information in this to navigate capped collections.
+ NamespaceDetails* _details;
+
+ // The result returned on the next call to getNext().
+ DiskLoc _curr;
+
+ // If we're tailable, we try to progress from the last valid result when we hit the end.
+ DiskLoc _prev;
+ bool _tailable;
+
+ CollectionScanParams::Direction _direction;
+
+ // If invalidate kills the DiskLoc we need to move forward, we kill the iterator. See the
+ // comment in the body of invalidate(...).
+ bool _killedByInvalidate;
+ };
+
+} // namespace mongo
diff --git a/src/mongo/db/exec/collection_scan.cpp b/src/mongo/db/exec/collection_scan.cpp
new file mode 100644
index 00000000000..fb7913c22e3
--- /dev/null
+++ b/src/mongo/db/exec/collection_scan.cpp
@@ -0,0 +1,88 @@
+/**
+ * Copyright (C) 2013 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "mongo/db/exec/collection_scan.h"
+
+#include "mongo/db/exec/collection_scan_common.h"
+#include "mongo/db/exec/collection_iterator.h"
+#include "mongo/db/exec/working_set.h"
+#include "mongo/db/namespace_details.h"
+#include "mongo/db/pdfile.h"
+
+namespace mongo {
+
+ CollectionScan::CollectionScan(const CollectionScanParams& params,
+ WorkingSet* workingSet,
+ Matcher* matcher) : _workingSet(workingSet),
+ _matcher(matcher),
+ _params(params),
+ _nsDropped(false) { }
+
+ PlanStage::StageState CollectionScan::work(WorkingSetID* out) {
+ if (NULL == _iter) {
+ NamespaceDetails* nsd = nsdetails(_params.ns);
+
+ if (NULL == nsd) {
+ _nsDropped = true;
+ return PlanStage::FAILURE;
+ }
+
+ if (nsd->isCapped()) {
+ _iter.reset(new CappedIterator(_params.ns, _params.start, _params.tailable,
+ _params.direction));
+ }
+ else {
+ _iter.reset(new FlatIterator(_params.ns, _params.start, _params.direction));
+ }
+
+ return PlanStage::NEED_TIME;
+ }
+
+ if (isEOF()) { return PlanStage::IS_EOF; }
+
+ WorkingSetID id = _workingSet->allocate();
+ WorkingSetMember* member = _workingSet->get(id);
+ member->loc = _iter->getNext();;
+ member->obj = member->loc.obj();
+ member->state = WorkingSetMember::LOC_AND_UNOWNED_OBJ;
+
+ if (NULL == _matcher || _matcher->matches(member)) {
+ *out = id;
+ return PlanStage::ADVANCED;
+ }
+ else {
+ _workingSet->free(id);
+ return PlanStage::NEED_TIME;
+ }
+ }
+
+ bool CollectionScan::isEOF() {
+ if (_nsDropped) { return true; }
+ if (NULL == _iter) { return false; }
+ return _iter->isEOF();
+ }
+
+ void CollectionScan::invalidate(const DiskLoc& dl) { _iter->invalidate(dl); }
+
+ void CollectionScan::prepareToYield() { _iter->prepareToYield(); }
+
+ void CollectionScan::recoverFromYield() {
+ if (!_iter->recoverFromYield()) {
+ _nsDropped = true;
+ }
+ }
+
+} // namespace mongo
diff --git a/src/mongo/db/exec/collection_scan.h b/src/mongo/db/exec/collection_scan.h
new file mode 100644
index 00000000000..2f93d5e68bb
--- /dev/null
+++ b/src/mongo/db/exec/collection_scan.h
@@ -0,0 +1,59 @@
+/**
+ * Copyright (C) 2013 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "mongo/db/diskloc.h"
+#include "mongo/db/matcher.h"
+#include "mongo/db/exec/collection_iterator.h"
+#include "mongo/db/exec/collection_scan_common.h"
+#include "mongo/db/exec/plan_stage.h"
+
+namespace mongo {
+
+ class WorkingSet;
+
+ /**
+ * Scans over a collection, starting at the DiskLoc provided in params and continuing until
+ * there are no more records in the collection.
+ *
+ * Preconditions: Valid DiskLoc.
+ */
+ class CollectionScan : public PlanStage {
+ public:
+ CollectionScan(const CollectionScanParams& params, WorkingSet* workingSet,
+ Matcher* matcher);
+
+ virtual StageState work(WorkingSetID* out);
+ virtual bool isEOF();
+
+ virtual void invalidate(const DiskLoc& dl);
+ virtual void prepareToYield();
+ virtual void recoverFromYield();
+
+ private:
+ // WorkingSet is not owned by us.
+ WorkingSet* _workingSet;
+ scoped_ptr<Matcher> _matcher;
+ scoped_ptr<CollectionIterator> _iter;
+
+ CollectionScanParams _params;
+
+ // True if nsdetails(_ns) == NULL on our first call to work.
+ bool _nsDropped;
+ };
+
+} // namespace mongo
diff --git a/src/mongo/db/exec/collection_scan_common.h b/src/mongo/db/exec/collection_scan_common.h
new file mode 100644
index 00000000000..cdb42e59fd0
--- /dev/null
+++ b/src/mongo/db/exec/collection_scan_common.h
@@ -0,0 +1,46 @@
+/**
+ * Copyright (C) 2013 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "mongo/db/diskloc.h"
+
+namespace mongo {
+
+ struct CollectionScanParams {
+ enum Direction {
+ FORWARD,
+ BACKWARD,
+ };
+
+ CollectionScanParams() : start(DiskLoc()),
+ direction(FORWARD),
+ tailable(false) { }
+
+ // What collection?
+ string ns;
+
+ // isNull by default. If you specify any value for this, you're responsible for the DiskLoc
+ // not being invalidated before the first call to work(...).
+ DiskLoc start;
+
+ Direction direction;
+
+ // Do we want the scan to be 'tailable'? Only meaningful if the collection is capped.
+ bool tailable;
+ };
+
+} // namespace mongo
diff --git a/src/mongo/db/exec/stagedebug_cmd.cpp b/src/mongo/db/exec/stagedebug_cmd.cpp
index d55a9dd497b..b2a74ecab95 100644
--- a/src/mongo/db/exec/stagedebug_cmd.cpp
+++ b/src/mongo/db/exec/stagedebug_cmd.cpp
@@ -20,6 +20,7 @@
#include "mongo/db/commands.h"
#include "mongo/db/exec/and_hash.h"
#include "mongo/db/exec/and_sorted.h"
+#include "mongo/db/exec/collection_scan.h"
#include "mongo/db/exec/fetch.h"
#include "mongo/db/exec/index_scan.h"
#include "mongo/db/exec/limit.h"
@@ -48,6 +49,7 @@ namespace mongo {
* args: {name: "collectionname", indexKeyPattern: kpObj, start: startObj,
* stop: stopObj, endInclusive: true/false, direction: -1/1,
* limit: int}}}
+ * node -> {cscan: {filter: {filter}, args: {name: "collectionname", direction: -1/1}}}
*
* Internal Nodes:
*
@@ -60,7 +62,6 @@ namespace mongo {
*
* Forthcoming Nodes:
*
- * node -> {cscan: {filter: {filter}, args: {name: "collectionname" }}}
* node -> {sort: {filter: {filter}, args: {node: node, pattern: objWithSortCriterion}}}
* node -> {dedup: {filter: {filter}, args: {node: node, field: field}}}
* node -> {unwind: {filter: filter}, args: {node: node, field: field}}
@@ -253,6 +254,26 @@ namespace mongo {
PlanStage* subNode = parseQuery(dbname, nodeArgs["node"].Obj(), workingSet);
return new SkipStage(nodeArgs["num"].numberInt(), workingSet, subNode);
}
+ else if ("cscan" == nodeName) {
+ CollectionScanParams params;
+
+ // What collection?
+ params.ns = dbname + "." + nodeArgs["name"].String();
+ uassert(16962, "Can't find collection " + nodeArgs["name"].String(),
+ NULL != nsdetails(params.ns));
+
+ // What direction?
+ uassert(16963, "Direction argument must be specified and be a number",
+ nodeArgs["direction"].isNumber());
+ if (1 == nodeArgs["direction"].numberInt()) {
+ params.direction = CollectionScanParams::FORWARD;
+ }
+ else {
+ params.direction = CollectionScanParams::BACKWARD;
+ }
+
+ return new CollectionScan(params, workingSet, matcher.release());
+ }
else {
return NULL;
}
diff --git a/src/mongo/dbtests/pdfiletests.cpp b/src/mongo/dbtests/pdfiletests.cpp
index a6b3d307a43..0f078131b36 100644
--- a/src/mongo/dbtests/pdfiletests.cpp
+++ b/src/mongo/dbtests/pdfiletests.cpp
@@ -27,6 +27,7 @@
namespace PdfileTests {
+ // XXX: These tests have been ported to query_stage_collscan.cpp and are deprecated here.
namespace ScanCapped {
class Base {
diff --git a/src/mongo/dbtests/query_stage_collscan.cpp b/src/mongo/dbtests/query_stage_collscan.cpp
new file mode 100644
index 00000000000..8d83ddd80b4
--- /dev/null
+++ b/src/mongo/dbtests/query_stage_collscan.cpp
@@ -0,0 +1,597 @@
+/**
+ * Copyright (C) 2013 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/**
+ * This file tests db/exec/collection_scan.cpp.
+ */
+
+#include "mongo/client/dbclientcursor.h"
+#include "mongo/db/exec/collection_scan.h"
+#include "mongo/db/exec/plan_stage.h"
+#include "mongo/db/exec/simple_plan_runner.h"
+#include "mongo/db/instance.h"
+#include "mongo/db/json.h"
+#include "mongo/db/matcher.h"
+#include "mongo/db/pdfile.h"
+#include "mongo/dbtests/dbtests.h"
+
+namespace QueryStageCollectionScan {
+
+ //
+ // Test some nitty-gritty capped collection details. Ported and polished from pdfiletests.cpp.
+ //
+ class QueryStageCollectionScanCappedBase {
+ public:
+ QueryStageCollectionScanCappedBase() : _context(ns()) { }
+
+ virtual ~QueryStageCollectionScanCappedBase() {
+ dropNS(ns());
+ }
+
+ void run() {
+ // Create the capped collection.
+ stringstream spec;
+ spec << "{\"capped\":true,\"size\":2000,\"$nExtents\":" << nExtents() << "}";
+
+ string err;
+ ASSERT( userCreateNS( ns(), fromjson( spec.str() ), err, false ) );
+
+ // Tell the test to add data/extents/etc.
+ insertTestData();
+
+ CollectionScanParams params;
+ params.ns = ns();
+ params.direction = CollectionScanParams::FORWARD;
+ params.tailable = false;
+ params.start = DiskLoc();
+
+ // Walk the collection going forward.
+ {
+ // Create a runner to handle the scan.
+ SimplePlanRunner runner;
+ runner.setRoot(new CollectionScan(params, runner.getWorkingSet(), NULL));
+
+ int resultCount = 0;
+ BSONObj obj;
+ while (runner.getNext(&obj)) {
+ ASSERT_EQUALS(resultCount, obj.firstElement().number());
+ ++resultCount;
+ }
+
+ ASSERT_EQUALS(expectedCount(), resultCount);
+ }
+
+ // Walk the collection going backwards.
+ {
+ params.direction = CollectionScanParams::BACKWARD;
+
+ SimplePlanRunner runner;
+ runner.setRoot(new CollectionScan(params, runner.getWorkingSet(), NULL));
+
+ // Going backwards.
+ int resultCount = expectedCount() - 1;
+ BSONObj obj;
+ while (runner.getNext(&obj)) {
+ ASSERT_EQUALS(resultCount, obj.firstElement().number());
+ --resultCount;
+ }
+
+ ASSERT_EQUALS(-1, resultCount);
+ }
+ }
+
+ protected:
+ // Insert records into the collection.
+ virtual void insertTestData() = 0;
+
+ // How many records do we expect to find in our scan?
+ virtual int expectedCount() const = 0;
+
+ // How many extents do we create when we make the collection?
+ virtual int nExtents() const = 0;
+
+ // Quote: bypass standard alloc/insert routines to use the extent we want.
+ static DiskLoc insert( const DiskLoc& ext, int i ) {
+ // Copied verbatim.
+ BSONObjBuilder b;
+ b.append( "a", i );
+ BSONObj o = b.done();
+ int len = o.objsize();
+ Extent *e = ext.ext();
+ e = getDur().writing(e);
+ int ofs;
+ if ( e->lastRecord.isNull() )
+ ofs = ext.getOfs() + ( e->_extentData - (char *)e );
+ else
+ ofs = e->lastRecord.getOfs() + e->lastRecord.rec()->lengthWithHeaders();
+ DiskLoc dl( ext.a(), ofs );
+ Record *r = dl.rec();
+ r = (Record*) getDur().writingPtr(r, Record::HeaderSize + len);
+ r->lengthWithHeaders() = Record::HeaderSize + len;
+ r->extentOfs() = e->myLoc.getOfs();
+ r->nextOfs() = DiskLoc::NullOfs;
+ r->prevOfs() = e->lastRecord.isNull() ? DiskLoc::NullOfs : e->lastRecord.getOfs();
+ memcpy( r->data(), o.objdata(), len );
+ if ( e->firstRecord.isNull() )
+ e->firstRecord = dl;
+ else
+ getDur().writingInt(e->lastRecord.rec()->nextOfs()) = ofs;
+ e->lastRecord = dl;
+ return dl;
+ }
+
+ static const char *ns() { return "unittests.QueryStageCollectionScanCapped"; }
+
+ static NamespaceDetails *nsd() { return nsdetails(ns()); }
+
+ private:
+ Lock::GlobalWrite lk_;
+ Client::Context _context;
+ };
+
+ class Empty : public QueryStageCollectionScanCappedBase {
+ virtual void insertTestData() {}
+ virtual int expectedCount() const { return 0; }
+ virtual int nExtents() const { return 0; }
+ };
+
+ class EmptyLooped : public QueryStageCollectionScanCappedBase {
+ virtual void insertTestData() {
+ nsd()->writingWithExtra()->capFirstNewRecord() = DiskLoc();
+ }
+ virtual int expectedCount() const { return 0; }
+ virtual int nExtents() const { return 0; }
+ };
+
+ class EmptyMultiExtentLooped : public QueryStageCollectionScanCappedBase {
+ virtual void insertTestData() {
+ nsd()->writingWithExtra()->capFirstNewRecord() = DiskLoc();
+ }
+ virtual int expectedCount() const { return 0; }
+ virtual int nExtents() const { return 3; }
+ };
+
+ class Single : public QueryStageCollectionScanCappedBase {
+ virtual void insertTestData() {
+ nsd()->writingWithExtra()->capFirstNewRecord() = insert( nsd()->capExtent(), 0 );
+ }
+ virtual int expectedCount() const { return 1; }
+ virtual int nExtents() const { return 0; }
+ };
+
+ class NewCapFirst : public QueryStageCollectionScanCappedBase {
+ virtual void insertTestData() {
+ DiskLoc x = insert( nsd()->capExtent(), 0 );
+ nsd()->writingWithExtra()->capFirstNewRecord() = x;
+ insert( nsd()->capExtent(), 1 );
+ }
+ virtual int expectedCount() const { return 2; }
+ virtual int nExtents() const { return 0; }
+ };
+
+ class NewCapLast : public QueryStageCollectionScanCappedBase {
+ virtual void insertTestData() {
+ insert( nsd()->capExtent(), 0 );
+ nsd()->capFirstNewRecord().writing() = insert( nsd()->capExtent(), 1 );
+ }
+ virtual int expectedCount() const { return 2; }
+ virtual int nExtents() const { return 0; }
+ };
+
+ class NewCapMiddle : public QueryStageCollectionScanCappedBase {
+ virtual void insertTestData() {
+ insert( nsd()->capExtent(), 0 );
+ nsd()->capFirstNewRecord().writing() = insert( nsd()->capExtent(), 1 );
+ insert( nsd()->capExtent(), 2 );
+ }
+ virtual int expectedCount() const { return 3; }
+ virtual int nExtents() const { return 0; }
+ };
+
+ class FirstExtent : public QueryStageCollectionScanCappedBase {
+ virtual void insertTestData() {
+ insert( nsd()->capExtent(), 0 );
+ insert( nsd()->lastExtent(), 1 );
+ nsd()->capFirstNewRecord().writing() = insert( nsd()->capExtent(), 2 );
+ insert( nsd()->capExtent(), 3 );
+ }
+ virtual int expectedCount() const { return 4; }
+ virtual int nExtents() const { return 2; }
+ };
+
+ class LastExtent : public QueryStageCollectionScanCappedBase {
+ virtual void insertTestData() {
+ nsd()->capExtent().writing() = nsd()->lastExtent();
+ insert( nsd()->capExtent(), 0 );
+ insert( nsd()->firstExtent(), 1 );
+ nsd()->capFirstNewRecord().writing() = insert( nsd()->capExtent(), 2 );
+ insert( nsd()->capExtent(), 3 );
+ }
+ virtual int expectedCount() const { return 4; }
+ virtual int nExtents() const { return 2; }
+ };
+
+ class MidExtent : public QueryStageCollectionScanCappedBase {
+ virtual void insertTestData() {
+ nsd()->capExtent().writing() = nsd()->firstExtent().ext()->xnext;
+ insert( nsd()->capExtent(), 0 );
+ insert( nsd()->lastExtent(), 1 );
+ insert( nsd()->firstExtent(), 2 );
+ nsd()->capFirstNewRecord().writing() = insert( nsd()->capExtent(), 3 );
+ insert( nsd()->capExtent(), 4 );
+ }
+ virtual int expectedCount() const { return 5; }
+ virtual int nExtents() const { return 3; }
+ };
+
+ class AloneInExtent : public QueryStageCollectionScanCappedBase {
+ virtual void insertTestData() {
+ nsd()->capExtent().writing() = nsd()->firstExtent().ext()->xnext;
+ insert( nsd()->lastExtent(), 0 );
+ insert( nsd()->firstExtent(), 1 );
+ nsd()->capFirstNewRecord().writing() = insert( nsd()->capExtent(), 2 );
+ }
+ virtual int expectedCount() const { return 3; }
+ virtual int nExtents() const { return 3; }
+ };
+
+ class FirstInExtent : public QueryStageCollectionScanCappedBase {
+ virtual void insertTestData() {
+ nsd()->capExtent().writing() = nsd()->firstExtent().ext()->xnext;
+ insert( nsd()->lastExtent(), 0 );
+ insert( nsd()->firstExtent(), 1 );
+ nsd()->capFirstNewRecord().writing() = insert( nsd()->capExtent(), 2 );
+ insert( nsd()->capExtent(), 3 );
+ }
+ virtual int expectedCount() const { return 4; }
+ virtual int nExtents() const { return 3; }
+ };
+
+ class LastInExtent : public QueryStageCollectionScanCappedBase {
+ virtual void insertTestData() {
+ nsd()->capExtent().writing() = nsd()->firstExtent().ext()->xnext;
+ insert( nsd()->capExtent(), 0 );
+ insert( nsd()->lastExtent(), 1 );
+ insert( nsd()->firstExtent(), 2 );
+ nsd()->capFirstNewRecord().writing() = insert( nsd()->capExtent(), 3 );
+ }
+ virtual int expectedCount() const { return 4; }
+ virtual int nExtents() const { return 3; }
+ };
+
+ //
+ // Stage-specific tests.
+ //
+
+ class QueryStageCollectionScanBase {
+ public:
+ QueryStageCollectionScanBase() {
+ Client::WriteContext ctx(ns());
+
+ for (int i = 0; i < numObj(); ++i) {
+ BSONObjBuilder bob;
+ bob.append("foo", i);
+ _client.insert(ns(), bob.obj());
+ }
+ }
+
+ virtual ~QueryStageCollectionScanBase() {
+ Client::WriteContext ctx(ns());
+ _client.dropCollection(ns());
+ }
+
+ void remove(const BSONObj& obj) {
+ _client.remove(ns(), obj);
+ }
+
+ int countResults(CollectionScanParams::Direction direction, Matcher* matcher = NULL) {
+ Client::ReadContext ctx(ns());
+
+ // Configure the scan.
+ CollectionScanParams params;
+ params.ns = ns();
+ params.direction = direction;
+ params.tailable = false;
+
+ // Make a scan and have the runner own it.
+ SimplePlanRunner runner;
+ runner.setRoot(new CollectionScan(params, runner.getWorkingSet(), matcher));
+
+ // Use the runner to count the number of objects scanned.
+ int count = 0;
+ for (BSONObj obj; runner.getNext(&obj); ) { ++count; }
+ return count;
+ }
+
+ void getLocs(CollectionScanParams::Direction direction, vector<DiskLoc>* out) {
+ WorkingSet ws;
+
+ CollectionScanParams params;
+ params.ns = ns();
+ params.direction = direction;
+ params.tailable = false;
+
+ scoped_ptr<CollectionScan> scan(new CollectionScan(params, &ws, NULL));
+ while (!scan->isEOF()) {
+ WorkingSetID id;
+ PlanStage::StageState state = scan->work(&id);
+ if (PlanStage::ADVANCED == state) {
+ WorkingSetMember* member = ws.get(id);
+ verify(member->hasLoc());
+ out->push_back(member->loc);
+ }
+ }
+ }
+
+ static int numObj() { return 50; }
+
+ static const char* ns() { return "unittests.QueryStageCollectionScan"; }
+
+ private:
+ static DBDirectClient _client;
+ };
+
+ DBDirectClient QueryStageCollectionScanBase::_client;
+
+ //
+ // Go forwards, get everything.
+ //
+ class BasicForward : public QueryStageCollectionScanBase {
+ public:
+ void run() {
+ ASSERT_EQUALS(numObj(), countResults(CollectionScanParams::FORWARD, NULL));
+ }
+ };
+
+ //
+ // Go backwards, get everything.
+ //
+
+ class BasicBackward : public QueryStageCollectionScanBase {
+ public:
+ void run() {
+ ASSERT_EQUALS(numObj(), countResults(CollectionScanParams::BACKWARD, NULL));
+ }
+ };
+
+ //
+ // Go forwards and match half the docs.
+ //
+
+ class BasicForwardWithMatch : public QueryStageCollectionScanBase {
+ public:
+ void run() {
+ BSONObj obj = BSON("foo" << BSON("$lt" << 25));
+ ASSERT_EQUALS(25, countResults(CollectionScanParams::FORWARD, new Matcher(obj)));
+ }
+ };
+
+ //
+ // Go backwards and match half the docs.
+ //
+
+ class BasicBackwardWithMatch : public QueryStageCollectionScanBase {
+ public:
+ void run() {
+ BSONObj obj = BSON("foo" << BSON("$lt" << 25));
+ ASSERT_EQUALS(25, countResults(CollectionScanParams::BACKWARD, new Matcher(obj)));
+ }
+ };
+
+ //
+ // Get objects in the order we inserted them.
+ //
+
+ class ObjectsInOrderForward : public QueryStageCollectionScanBase {
+ public:
+ void run() {
+ Client::ReadContext ctx(ns());
+
+ // Configure the scan.
+ CollectionScanParams params;
+ params.ns = ns();
+ params.direction = CollectionScanParams::FORWARD;
+ params.tailable = false;
+
+ // Make a scan and have the runner own it.
+ SimplePlanRunner runner;
+ runner.setRoot(new CollectionScan(params, runner.getWorkingSet(), NULL));
+
+ int count = 0;
+ for (BSONObj obj; runner.getNext(&obj); ) {
+ // Make sure we get the objects in the order we want
+ ASSERT_EQUALS(count, obj["foo"].numberInt());
+ ++count;
+ }
+
+ ASSERT_EQUALS(numObj(), count);
+ }
+ };
+
+ //
+ // Get objects in the reverse order we inserted them when we go backwards.
+ //
+
+ class ObjectsInOrderBackward : public QueryStageCollectionScanBase {
+ public:
+ void run() {
+ Client::ReadContext ctx(ns());
+
+ CollectionScanParams params;
+ params.ns = ns();
+ params.direction = CollectionScanParams::BACKWARD;
+ params.tailable = false;
+
+ SimplePlanRunner runner;
+ runner.setRoot(new CollectionScan(params, runner.getWorkingSet(), NULL));
+
+ int count = 0;
+ for (BSONObj obj; runner.getNext(&obj); ) {
+ ++count;
+ ASSERT_EQUALS(numObj() - count, obj["foo"].numberInt());
+ }
+
+ ASSERT_EQUALS(numObj(), count);
+ }
+ };
+
+ //
+ // Scan through half the objects, delete the one we're about to fetch, then expect to get the
+ // "next" object we would have gotten after that.
+ //
+
+ class InvalidateUpcomingObject : public QueryStageCollectionScanBase {
+ public:
+ void run() {
+ Client::WriteContext ctx(ns());
+
+ // Get the DiskLocs that would be returned by an in-order scan.
+ vector<DiskLoc> locs;
+ getLocs(CollectionScanParams::FORWARD, &locs);
+
+ // Configure the scan.
+ CollectionScanParams params;
+ params.ns = ns();
+ params.direction = CollectionScanParams::FORWARD;
+ params.tailable = false;
+
+ WorkingSet ws;
+ scoped_ptr<CollectionScan> scan(new CollectionScan(params, &ws, NULL));
+
+ int count = 0;
+ while (count < 10) {
+ WorkingSetID id;
+ PlanStage::StageState state = scan->work(&id);
+ if (PlanStage::ADVANCED == state) {
+ WorkingSetMember* member = ws.get(id);
+ ASSERT_EQUALS(locs[count].obj()["foo"].numberInt(),
+ member->obj["foo"].numberInt());
+ ++count;
+ }
+ }
+
+ // Remove locs[count].
+ scan->invalidate(locs[count]);
+ remove(locs[count].obj());
+
+ // Skip over locs[count].
+ ++count;
+
+ // Expect the rest.
+ while (!scan->isEOF()) {
+ WorkingSetID id;
+ PlanStage::StageState state = scan->work(&id);
+ if (PlanStage::ADVANCED == state) {
+ WorkingSetMember* member = ws.get(id);
+ ASSERT_EQUALS(locs[count].obj()["foo"].numberInt(),
+ member->obj["foo"].numberInt());
+ ++count;
+ }
+ }
+
+ ASSERT_EQUALS(numObj(), count);
+ }
+ };
+
+ //
+ // Scan through half the objects, delete the one we're about to fetch, then expect to get the
+ // "next" object we would have gotten after that. But, do it in reverse!
+ //
+
+ class InvalidateUpcomingObjectBackward : public QueryStageCollectionScanBase {
+ public:
+ void run() {
+ Client::WriteContext ctx(ns());
+
+ // Get the DiskLocs that would be returned by an in-order scan.
+ vector<DiskLoc> locs;
+ getLocs(CollectionScanParams::BACKWARD, &locs);
+
+ // Configure the scan.
+ CollectionScanParams params;
+ params.ns = ns();
+ params.direction = CollectionScanParams::BACKWARD;
+ params.tailable = false;
+
+ WorkingSet ws;
+ scoped_ptr<CollectionScan> scan(new CollectionScan(params, &ws, NULL));
+
+ int count = 0;
+ while (count < 10) {
+ WorkingSetID id;
+ PlanStage::StageState state = scan->work(&id);
+ if (PlanStage::ADVANCED == state) {
+ WorkingSetMember* member = ws.get(id);
+ ASSERT_EQUALS(locs[count].obj()["foo"].numberInt(),
+ member->obj["foo"].numberInt());
+ ++count;
+ }
+ }
+
+ // Remove locs[count].
+ scan->invalidate(locs[count]);
+ remove(locs[count].obj());
+
+ // Skip over locs[count].
+ ++count;
+
+ // Expect the rest.
+ while (!scan->isEOF()) {
+ WorkingSetID id;
+ PlanStage::StageState state = scan->work(&id);
+ if (PlanStage::ADVANCED == state) {
+ WorkingSetMember* member = ws.get(id);
+ ASSERT_EQUALS(locs[count].obj()["foo"].numberInt(),
+ member->obj["foo"].numberInt());
+ ++count;
+ }
+ }
+
+ ASSERT_EQUALS(numObj(), count);
+ }
+ };
+ class All : public Suite {
+ public:
+ All() : Suite( "QueryStageCollectionScan" ) {}
+
+ void setupTests() {
+ // These tests are ported from pdfile.cpp
+ add<Empty>();
+ add<EmptyLooped>();
+ add<EmptyMultiExtentLooped>();
+ add<Single>();
+ add<NewCapFirst>();
+ add<NewCapLast>();
+ add<NewCapMiddle>();
+ add<FirstExtent>();
+ add<LastExtent>();
+ add<MidExtent>();
+ add<AloneInExtent>();
+ add<FirstInExtent>();
+ add<LastInExtent>();
+ // These are not. Stage-specific tests below.
+ add<BasicForward>();
+ add<BasicBackward>();
+ add<BasicForwardWithMatch>();
+ add<BasicBackwardWithMatch>();
+ add<ObjectsInOrderForward>();
+ add<ObjectsInOrderBackward>();
+ add<InvalidateUpcomingObject>();
+ add<InvalidateUpcomingObjectBackward>();
+ }
+ } all;
+
+}