summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authordwight <dwight@Dwights-MacBook.local>2008-08-24 20:43:48 -0400
committerdwight <dwight@Dwights-MacBook.local>2008-08-24 20:43:48 -0400
commit94f9bdee473f53837e5ec052526f40d857eb9d1e (patch)
tree381f766502c2a18371aa52d47af5cadbbc29abf7
parent247764becc200dd4e4318e98fd67b4461d638205 (diff)
downloadmongo-94f9bdee473f53837e5ec052526f40d857eb9d1e.tar.gz
forgot to implement skip() for scanandorder
-rw-r--r--db/query.cpp90
-rw-r--r--db/scanandorder.h16
2 files changed, 55 insertions, 51 deletions
diff --git a/db/query.cpp b/db/query.cpp
index ed4e8d9654e..85abfca212b 100644
--- a/db/query.cpp
+++ b/db/query.cpp
@@ -995,7 +995,7 @@ QueryResult* runQuery(Message& message, const char *ns, int ntoskip, int _ntoret
if( !order.isEmpty() && !isSorted ) {
ordering = true;
ss << " scanAndOrder ";
- so = auto_ptr<ScanAndOrder>(new ScanAndOrder(ntoreturn,order));
+ so = auto_ptr<ScanAndOrder>(new ScanAndOrder(ntoskip, ntoreturn,order));
wantMore = false;
// scanAndOrder(b, c.get(), order, ntoreturn);
}
@@ -1013,51 +1013,49 @@ QueryResult* runQuery(Message& message, const char *ns, int ntoskip, int _ntoret
}
else if( !deep || !c->getsetdup(c->currLoc()) ) { // i.e., check for dups on deep items only
// got a match.
- if( ntoskip > 0 ) {
- ntoskip--;
- }
- else {
- assert( js.objsize() >= 0 ); //defensive for segfaults
- if( ordering ) {
- // note: no cursors for non-indexed, ordered results. results must be fairly small.
- so->add(js);
- } else {
- bool ok = fillQueryResultFromObj(b, filter.get(), js);
- if( ok ) n++;
- if( ok ) {
- if( (ntoreturn>0 && (n >= ntoreturn || b.len() > MaxBytesToReturnToClientAtOnce)) ||
- (ntoreturn==0 && (b.len()>1*1024*1024 || n>=101)) ) {
- /* if ntoreturn is zero, we return up to 101 objects. on the subsequent getmore, there
- is only a size limit. The idea is that on a find() where one doesn't use much results,
- we don't return much, but once getmore kicks in, we start pushing significant quantities.
-
- The n limit (vs. size) is important when someone fetches only one small field from big
- objects, which causes massive scanning server-side.
- */
- /* if only 1 requested, no cursor saved for efficiency...we assume it is findOne() */
- if( wantMore && ntoreturn != 1 ) {
- if( useCursors ) {
- c->advance();
- if( c->ok() ) {
- // more...so save a cursor
- ClientCursor *cc = new ClientCursor();
- cc->c = c;
- cursorid = cc->cursorid;
- DEV cout << " query has more, cursorid: " << cursorid << endl;
- cc->matcher = matcher;
- cc->ns = ns;
- cc->pos = n;
- cc->filter = filter;
- cc->originalMessage = message;
- cc->updateLocation();
- }
- }
- }
- break;
- }
- }
- }
- }
+ assert( js.objsize() >= 0 ); //defensive for segfaults
+ if( ordering ) {
+ // note: no cursors for non-indexed, ordered results. results must be fairly small.
+ so->add(js);
+ }
+ else if( ntoskip > 0 ) {
+ ntoskip--;
+ } else {
+ bool ok = fillQueryResultFromObj(b, filter.get(), js);
+ if( ok ) n++;
+ if( ok ) {
+ if( (ntoreturn>0 && (n >= ntoreturn || b.len() > MaxBytesToReturnToClientAtOnce)) ||
+ (ntoreturn==0 && (b.len()>1*1024*1024 || n>=101)) ) {
+ /* if ntoreturn is zero, we return up to 101 objects. on the subsequent getmore, there
+ is only a size limit. The idea is that on a find() where one doesn't use much results,
+ we don't return much, but once getmore kicks in, we start pushing significant quantities.
+
+ The n limit (vs. size) is important when someone fetches only one small field from big
+ objects, which causes massive scanning server-side.
+ */
+ /* if only 1 requested, no cursor saved for efficiency...we assume it is findOne() */
+ if( wantMore && ntoreturn != 1 ) {
+ if( useCursors ) {
+ c->advance();
+ if( c->ok() ) {
+ // more...so save a cursor
+ ClientCursor *cc = new ClientCursor();
+ cc->c = c;
+ cursorid = cc->cursorid;
+ DEV cout << " query has more, cursorid: " << cursorid << endl;
+ cc->matcher = matcher;
+ cc->ns = ns;
+ cc->pos = n;
+ cc->filter = filter;
+ cc->originalMessage = message;
+ cc->updateLocation();
+ }
+ }
+ }
+ break;
+ }
+ }
+ }
}
c->advance();
} // end while
diff --git a/db/scanandorder.h b/db/scanandorder.h
index f399a2a1664..a4489b00b1e 100644
--- a/db/scanandorder.h
+++ b/db/scanandorder.h
@@ -46,6 +46,7 @@ inline bool fillQueryResultFromObj(BufBuilder& b, set<string> *filter, JSObj& js
typedef multimap<JSObj,JSObj> BestMap;
class ScanAndOrder {
BestMap best;
+ int startFrom;
int limit; // max to send back.
KeyType order;
int dir;
@@ -66,8 +67,9 @@ class ScanAndOrder {
}
public:
- ScanAndOrder(int _limit, JSObj _order) : order(_order) {
- limit = _limit > 0 ? _limit : 0x7fffffff;
+ ScanAndOrder(int _startFrom, int _limit, JSObj _order) :
+ startFrom(_startFrom), order(_order) {
+ limit = _limit > 0 ? _limit + startFrom : 0x7fffffff;
approxSize = 0;
// todo: do order right for compound keys. this is temp.
@@ -99,17 +101,21 @@ public:
template<class T>
void _fill(BufBuilder& b, set<string> *filter, int& nout, T begin, T end) {
int n = 0;
+ int nFilled = 0;
for( T i = begin; i != end; i++ ) {
+ n++;
+ if( n <= startFrom )
+ continue;
JSObj& o = i->second;
if( fillQueryResultFromObj(b, filter, o) ) {
- n++;
- if( n >= limit )
+ nFilled++;
+ if( nFilled >= limit )
goto done;
uassert( b.len() < 4000000 ); // appserver limit
}
}
done:
- nout = n;
+ nout = nFilled;
}
/* scanning complete. stick the query result in b for n objects. */