summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
authorQingyang Chen <qingyang.chen@10gen.com>2015-07-06 12:02:37 -0400
committerQingyang Chen <qingyang.chen@10gen.com>2015-07-09 12:46:34 -0400
commitd1cb71465274bcb5f3bc962ef2740cf985f32113 (patch)
treefbffc4a780d4eaa9874e1753796f851b0f0cbb91 /src/mongo/db
parentd764e3e6cf1d8a7d510df2f724282f7a053cecda (diff)
downloadmongo-d1cb71465274bcb5f3bc962ef2740cf985f32113.tar.gz
SERVER-18936 skip, limit, and batchsize use 64-bit variables
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/clientcursor.h8
-rw-r--r--src/mongo/db/commands/find_cmd.cpp6
-rw-r--r--src/mongo/db/commands/getmore_cmd.cpp4
-rw-r--r--src/mongo/db/curop.h6
-rw-r--r--src/mongo/db/exec/limit.cpp2
-rw-r--r--src/mongo/db/exec/limit.h4
-rw-r--r--src/mongo/db/exec/skip.cpp2
-rw-r--r--src/mongo/db/exec/skip.h4
-rw-r--r--src/mongo/db/query/find.cpp10
-rw-r--r--src/mongo/db/query/find.h10
-rw-r--r--src/mongo/db/query/getmore_request.cpp6
-rw-r--r--src/mongo/db/query/getmore_request.h4
-rw-r--r--src/mongo/db/query/lite_parsed_query.cpp19
-rw-r--r--src/mongo/db/query/lite_parsed_query.h23
-rw-r--r--src/mongo/db/query/lite_parsed_query_test.cpp63
-rw-r--r--src/mongo/db/query/query_solution.h4
16 files changed, 112 insertions, 63 deletions
diff --git a/src/mongo/db/clientcursor.h b/src/mongo/db/clientcursor.h
index a145802e985..c8bd1853768 100644
--- a/src/mongo/db/clientcursor.h
+++ b/src/mongo/db/clientcursor.h
@@ -170,13 +170,13 @@ public:
}
// Used by ops/query.cpp to stash how many results have been returned by a query.
- int pos() const {
+ long long pos() const {
return _pos;
}
- void incPos(int n) {
+ void incPos(long long n) {
_pos += n;
}
- void setPos(int n) {
+ void setPos(long long n) {
_pos = n;
}
@@ -256,7 +256,7 @@ private:
bool _countedYet;
// How many objects have been returned by the find() so far?
- int _pos;
+ long long _pos;
// If this cursor was created by a find operation, '_query' holds the query predicate for
// the find. If this cursor was created by a command (e.g. the aggregate command), then
diff --git a/src/mongo/db/commands/find_cmd.cpp b/src/mongo/db/commands/find_cmd.cpp
index 70ba269cd85..a300563de62 100644
--- a/src/mongo/db/commands/find_cmd.cpp
+++ b/src/mongo/db/commands/find_cmd.cpp
@@ -218,7 +218,7 @@ public:
}
// Fill out curop information.
- int ntoreturn = lpq->getBatchSize().value_or(0);
+ long long ntoreturn = lpq->getBatchSize().value_or(0);
beginQueryOp(txn, nss, cmdObj, ntoreturn, lpq->getSkip());
// 1b) Finish the parsing step by using the LiteParsedQuery to create a CanonicalQuery.
@@ -264,7 +264,7 @@ public:
if (!collection) {
// No collection. Just fill out curop indicating that there were zero results and
// there is no ClientCursor id, and then return.
- const int numResults = 0;
+ const long long numResults = 0;
const CursorId cursorId = 0;
endQueryOp(txn, *exec, dbProfilingLevel, numResults, cursorId);
appendCursorResponseObject(cursorId, nss.ns(), BSONArray(), &result);
@@ -300,7 +300,7 @@ public:
BSONArrayBuilder firstBatch;
BSONObj obj;
PlanExecutor::ExecState state;
- int numResults = 0;
+ long long numResults = 0;
while (!enoughForFirstBatch(pq, numResults, firstBatch.len()) &&
PlanExecutor::ADVANCED == (state = cursorExec->getNext(&obj, NULL))) {
// If adding this object will cause us to exceed the BSON size limit, then we stash
diff --git a/src/mongo/db/commands/getmore_cmd.cpp b/src/mongo/db/commands/getmore_cmd.cpp
index c3b92bed87f..0bd3c597f15 100644
--- a/src/mongo/db/commands/getmore_cmd.cpp
+++ b/src/mongo/db/commands/getmore_cmd.cpp
@@ -271,7 +271,7 @@ public:
BSONArrayBuilder nextBatch;
BSONObj obj;
PlanExecutor::ExecState state;
- int numResults = 0;
+ long long numResults = 0;
Status batchStatus = generateBatch(cursor, request, &nextBatch, &state, &numResults);
if (!batchStatus.isOK()) {
return appendCommandStatus(result, batchStatus);
@@ -361,7 +361,7 @@ public:
const GetMoreRequest& request,
BSONArrayBuilder* nextBatch,
PlanExecutor::ExecState* state,
- int* numResults) {
+ long long* numResults) {
PlanExecutor* exec = cursor->getExecutor();
const bool isAwaitData = isCursorAwaitData(cursor);
diff --git a/src/mongo/db/curop.h b/src/mongo/db/curop.h
index b09075a0245..a1325210cca 100644
--- a/src/mongo/db/curop.h
+++ b/src/mongo/db/curop.h
@@ -156,8 +156,8 @@ public:
// detailed options
long long cursorid;
- int ntoreturn;
- int ntoskip;
+ long long ntoreturn;
+ long long ntoskip;
bool exhaust;
// debugging/profile info
@@ -187,7 +187,7 @@ public:
// response info
int executionTime;
- int nreturned;
+ long long nreturned;
int responseLength;
};
diff --git a/src/mongo/db/exec/limit.cpp b/src/mongo/db/exec/limit.cpp
index adbe343eb46..06566149cd8 100644
--- a/src/mongo/db/exec/limit.cpp
+++ b/src/mongo/db/exec/limit.cpp
@@ -42,7 +42,7 @@ using stdx::make_unique;
// static
const char* LimitStage::kStageType = "LIMIT";
-LimitStage::LimitStage(int limit, WorkingSet* ws, PlanStage* child)
+LimitStage::LimitStage(long long limit, WorkingSet* ws, PlanStage* child)
: _ws(ws), _child(child), _numToReturn(limit), _commonStats(kStageType) {
_specificStats.limit = _numToReturn;
}
diff --git a/src/mongo/db/exec/limit.h b/src/mongo/db/exec/limit.h
index 6a57f39eb6d..26894b7479f 100644
--- a/src/mongo/db/exec/limit.h
+++ b/src/mongo/db/exec/limit.h
@@ -44,7 +44,7 @@ namespace mongo {
*/
class LimitStage : public PlanStage {
public:
- LimitStage(int limit, WorkingSet* ws, PlanStage* child);
+ LimitStage(long long limit, WorkingSet* ws, PlanStage* child);
virtual ~LimitStage();
virtual bool isEOF();
@@ -73,7 +73,7 @@ private:
std::unique_ptr<PlanStage> _child;
// We only return this many results.
- int _numToReturn;
+ long long _numToReturn;
// Stats
CommonStats _commonStats;
diff --git a/src/mongo/db/exec/skip.cpp b/src/mongo/db/exec/skip.cpp
index 2ce7925add9..b223b75f364 100644
--- a/src/mongo/db/exec/skip.cpp
+++ b/src/mongo/db/exec/skip.cpp
@@ -41,7 +41,7 @@ using stdx::make_unique;
// static
const char* SkipStage::kStageType = "SKIP";
-SkipStage::SkipStage(int toSkip, WorkingSet* ws, PlanStage* child)
+SkipStage::SkipStage(long long toSkip, WorkingSet* ws, PlanStage* child)
: _ws(ws), _child(child), _toSkip(toSkip), _commonStats(kStageType) {}
SkipStage::~SkipStage() {}
diff --git a/src/mongo/db/exec/skip.h b/src/mongo/db/exec/skip.h
index 547b3bf3885..c8f5105e3da 100644
--- a/src/mongo/db/exec/skip.h
+++ b/src/mongo/db/exec/skip.h
@@ -43,7 +43,7 @@ namespace mongo {
*/
class SkipStage : public PlanStage {
public:
- SkipStage(int toSkip, WorkingSet* ws, PlanStage* child);
+ SkipStage(long long toSkip, WorkingSet* ws, PlanStage* child);
virtual ~SkipStage();
virtual bool isEOF();
@@ -72,7 +72,7 @@ private:
std::unique_ptr<PlanStage> _child;
// We drop the first _toSkip results that we would have returned.
- int _toSkip;
+ long long _toSkip;
// Stats
CommonStats _commonStats;
diff --git a/src/mongo/db/query/find.cpp b/src/mongo/db/query/find.cpp
index 72fc8d6a294..2991ce21928 100644
--- a/src/mongo/db/query/find.cpp
+++ b/src/mongo/db/query/find.cpp
@@ -112,14 +112,14 @@ ScopedRecoveryUnitSwapper::~ScopedRecoveryUnitSwapper() {
* If ntoreturn is non-zero, the we stop building the first batch once we either have ntoreturn
* results, or when the result set exceeds 4 MB.
*/
-bool enoughForFirstBatch(const LiteParsedQuery& pq, int numDocs, int bytesBuffered) {
+bool enoughForFirstBatch(const LiteParsedQuery& pq, long long numDocs, int bytesBuffered) {
if (!pq.getBatchSize()) {
return (bytesBuffered > 1024 * 1024) || numDocs >= LiteParsedQuery::kDefaultBatchSize;
}
return numDocs >= *pq.getBatchSize() || bytesBuffered > MaxBytesToReturnToClientAtOnce;
}
-bool enoughForGetMore(int ntoreturn, int numDocs, int bytesBuffered) {
+bool enoughForGetMore(long long ntoreturn, long long numDocs, int bytesBuffered) {
return (ntoreturn && numDocs >= ntoreturn) || (bytesBuffered > MaxBytesToReturnToClientAtOnce);
}
@@ -178,8 +178,8 @@ bool shouldSaveCursorGetMore(PlanExecutor::ExecState finalState,
void beginQueryOp(OperationContext* txn,
const NamespaceString& nss,
const BSONObj& queryObj,
- int ntoreturn,
- int ntoskip) {
+ long long ntoreturn,
+ long long ntoskip) {
auto curop = CurOp::get(txn);
curop->debug().query = queryObj;
curop->debug().ntoreturn = ntoreturn;
@@ -192,7 +192,7 @@ void beginQueryOp(OperationContext* txn,
void endQueryOp(OperationContext* txn,
const PlanExecutor& exec,
int dbProfilingLevel,
- int numResults,
+ long long numResults,
CursorId cursorId) {
auto curop = CurOp::get(txn);
diff --git a/src/mongo/db/query/find.h b/src/mongo/db/query/find.h
index a3d1a246ab6..c4adc91b675 100644
--- a/src/mongo/db/query/find.h
+++ b/src/mongo/db/query/find.h
@@ -67,14 +67,14 @@ private:
*
* Should be called *after* adding to the result set rather than before.
*/
-bool enoughForFirstBatch(const LiteParsedQuery& pq, int numDocs, int bytesBuffered);
+bool enoughForFirstBatch(const LiteParsedQuery& pq, long long numDocs, int bytesBuffered);
/**
* Returns true if enough results have been prepared to stop adding more to a getMore batch.
*
* Should be called *after* adding to the result set rather than before.
*/
-bool enoughForGetMore(int ntoreturn, int numDocs, int bytesBuffered);
+bool enoughForGetMore(long long ntoreturn, long long numDocs, int bytesBuffered);
/**
* Whether or not the ClientCursor* is tailable.
@@ -115,8 +115,8 @@ bool shouldSaveCursorGetMore(PlanExecutor::ExecState finalState,
void beginQueryOp(OperationContext* txn,
const NamespaceString& nss,
const BSONObj& queryObj,
- int ntoreturn,
- int ntoskip);
+ long long ntoreturn,
+ long long ntoskip);
/**
* Fills out CurOp for "txn" with information regarding this query's execution.
@@ -129,7 +129,7 @@ void beginQueryOp(OperationContext* txn,
void endQueryOp(OperationContext* txn,
const PlanExecutor& exec,
int dbProfilingLevel,
- int numResults,
+ long long numResults,
CursorId cursorId);
/**
diff --git a/src/mongo/db/query/getmore_request.cpp b/src/mongo/db/query/getmore_request.cpp
index 6023494ec40..031864cf369 100644
--- a/src/mongo/db/query/getmore_request.cpp
+++ b/src/mongo/db/query/getmore_request.cpp
@@ -54,7 +54,7 @@ GetMoreRequest::GetMoreRequest() : cursorid(0), batchSize(0) {}
GetMoreRequest::GetMoreRequest(NamespaceString namespaceString,
CursorId id,
- boost::optional<int> sizeOfBatch,
+ boost::optional<long long> sizeOfBatch,
boost::optional<long long> term)
: nss(std::move(namespaceString)), cursorid(id), batchSize(sizeOfBatch), term(term) {}
@@ -95,7 +95,7 @@ StatusWith<GetMoreRequest> GetMoreRequest::parseFromBSON(const std::string& dbna
boost::optional<std::string> fullns;
// Optional fields.
- boost::optional<int> batchSize;
+ boost::optional<long long> batchSize;
boost::optional<long long> term;
for (BSONElement el : cmdObj) {
@@ -121,7 +121,7 @@ StatusWith<GetMoreRequest> GetMoreRequest::parseFromBSON(const std::string& dbna
str::stream() << "Field 'batchSize' must be a number in: " << cmdObj};
}
- batchSize = el.numberInt();
+ batchSize = el.numberLong();
} else if (str::equals(fieldName, kMaxTimeMSField)) {
// maxTimeMS is parsed by the command handling code, so we don't repeat the parsing
// here.
diff --git a/src/mongo/db/query/getmore_request.h b/src/mongo/db/query/getmore_request.h
index afc481fd5e1..575e333121f 100644
--- a/src/mongo/db/query/getmore_request.h
+++ b/src/mongo/db/query/getmore_request.h
@@ -49,7 +49,7 @@ struct GetMoreRequest {
*/
GetMoreRequest(NamespaceString namespaceString,
CursorId id,
- boost::optional<int> sizeOfBatch,
+ boost::optional<long long> sizeOfBatch,
boost::optional<long long> term);
/**
@@ -71,7 +71,7 @@ struct GetMoreRequest {
// The batch size is optional. If not provided, we will put as many documents into the batch
// as fit within the byte limit.
- const boost::optional<int> batchSize;
+ const boost::optional<long long> batchSize;
// Only internal queries from replication will typically have a term.
const boost::optional<long long> term;
diff --git a/src/mongo/db/query/lite_parsed_query.cpp b/src/mongo/db/query/lite_parsed_query.cpp
index 02d3748bf79..a616f85b814 100644
--- a/src/mongo/db/query/lite_parsed_query.cpp
+++ b/src/mongo/db/query/lite_parsed_query.cpp
@@ -53,7 +53,7 @@ const string LiteParsedQuery::metaGeoNearPoint("geoNearPoint");
const string LiteParsedQuery::metaRecordId("recordId");
const string LiteParsedQuery::metaIndexKey("indexKey");
-const int LiteParsedQuery::kDefaultBatchSize = 101;
+const long long LiteParsedQuery::kDefaultBatchSize = 101;
namespace {
@@ -161,7 +161,7 @@ StatusWith<unique_ptr<LiteParsedQuery>> LiteParsedQuery::makeFromFindCommand(Nam
return Status(ErrorCodes::FailedToParse, ss);
}
- int skip = el.numberInt();
+ long long skip = el.numberLong();
if (skip < 0) {
return Status(ErrorCodes::BadValue, "skip value must be non-negative");
}
@@ -175,7 +175,7 @@ StatusWith<unique_ptr<LiteParsedQuery>> LiteParsedQuery::makeFromFindCommand(Nam
return Status(ErrorCodes::FailedToParse, ss);
}
- int limit = el.numberInt();
+ long long limit = el.numberLong();
if (limit <= 0) {
return Status(ErrorCodes::BadValue, "limit value must be positive");
}
@@ -189,7 +189,7 @@ StatusWith<unique_ptr<LiteParsedQuery>> LiteParsedQuery::makeFromFindCommand(Nam
return Status(ErrorCodes::FailedToParse, ss);
}
- int batchSize = el.numberInt();
+ long long batchSize = el.numberLong();
if (batchSize < 0) {
return Status(ErrorCodes::BadValue, "batchSize value must be non-negative");
}
@@ -388,10 +388,11 @@ StatusWith<unique_ptr<LiteParsedQuery>> LiteParsedQuery::makeAsOpQuery(Namespace
}
// static
-StatusWith<unique_ptr<LiteParsedQuery>> LiteParsedQuery::makeAsFindCmd(NamespaceString nss,
- const BSONObj& query,
- const BSONObj& sort,
- boost::optional<int> limit) {
+StatusWith<unique_ptr<LiteParsedQuery>> LiteParsedQuery::makeAsFindCmd(
+ NamespaceString nss,
+ const BSONObj& query,
+ const BSONObj& sort,
+ boost::optional<long long> limit) {
unique_ptr<LiteParsedQuery> pq(new LiteParsedQuery(std::move(nss)));
pq->_fromCommand = true;
@@ -399,7 +400,7 @@ StatusWith<unique_ptr<LiteParsedQuery>> LiteParsedQuery::makeAsFindCmd(Namespace
pq->_sort = sort.getOwned();
if (limit) {
- if (limit <= 0) {
+ if (*limit <= 0) {
return Status(ErrorCodes::BadValue, "limit value must be positive");
}
diff --git a/src/mongo/db/query/lite_parsed_query.h b/src/mongo/db/query/lite_parsed_query.h
index 787b5275259..ce5f14a6e0d 100644
--- a/src/mongo/db/query/lite_parsed_query.h
+++ b/src/mongo/db/query/lite_parsed_query.h
@@ -78,10 +78,11 @@ public:
* Constructs a LiteParseQuery object that can be used to serialize to find command
* BSON object.
*/
- static StatusWith<std::unique_ptr<LiteParsedQuery>> makeAsFindCmd(NamespaceString nss,
- const BSONObj& query,
- const BSONObj& sort,
- boost::optional<int> limit);
+ static StatusWith<std::unique_ptr<LiteParsedQuery>> makeAsFindCmd(
+ NamespaceString nss,
+ const BSONObj& query,
+ const BSONObj& sort,
+ boost::optional<long long> limit);
/**
* Converts this LPQ into a find command.
@@ -159,15 +160,15 @@ public:
return _hint;
}
- static const int kDefaultBatchSize;
+ static const long long kDefaultBatchSize;
- int getSkip() const {
+ long long getSkip() const {
return _skip;
}
- boost::optional<int> getLimit() const {
+ boost::optional<long long> getLimit() const {
return _limit;
}
- boost::optional<int> getBatchSize() const {
+ boost::optional<long long> getBatchSize() const {
return _batchSize;
}
bool wantMore() const {
@@ -311,11 +312,11 @@ private:
// {$hint: <String>}, where <String> is the index name hinted.
BSONObj _hint;
- int _skip = 0;
+ long long _skip = 0;
bool _wantMore = true;
- boost::optional<int> _limit;
- boost::optional<int> _batchSize;
+ boost::optional<long long> _limit;
+ boost::optional<long long> _batchSize;
bool _fromCommand = false;
bool _explain = false;
diff --git a/src/mongo/db/query/lite_parsed_query_test.cpp b/src/mongo/db/query/lite_parsed_query_test.cpp
index a79fb6d593c..0b3bf3d6855 100644
--- a/src/mongo/db/query/lite_parsed_query_test.cpp
+++ b/src/mongo/db/query/lite_parsed_query_test.cpp
@@ -107,7 +107,7 @@ TEST(LiteParsedQueryTest, NumToReturn) {
false, // snapshot
false))); // explain
- ASSERT_EQUALS(6, lpq->getBatchSize());
+ ASSERT_EQUALS(6, *lpq->getBatchSize());
ASSERT(lpq->wantMore());
}
@@ -125,7 +125,7 @@ TEST(LiteParsedQueryTest, NumToReturnNegative) {
false, // snapshot
false))); // explain
- ASSERT_EQUALS(6, lpq->getBatchSize());
+ ASSERT_EQUALS(6, *lpq->getBatchSize());
ASSERT(!lpq->wantMore());
}
@@ -263,7 +263,7 @@ TEST(LiteParsedQueryTest, MakeFindCmd) {
auto&& lpq = result.getValue();
ASSERT_EQUALS("test.ns", lpq->ns());
ASSERT_EQUALS(BSON("x" << 1), lpq->getFilter());
- ASSERT_EQUALS(2, lpq->getLimit());
+ ASSERT_EQUALS(2, *lpq->getLimit());
ASSERT_EQUALS(BSONObj(), lpq->getProj());
ASSERT_EQUALS(BSON("y" << -1), lpq->getSort());
@@ -332,12 +332,20 @@ TEST(LiteParsedQueryTest, MakeFindCmdNoLimit) {
}
TEST(LiteParsedQueryTest, MakeFindCmdBadLimit) {
- auto status = LiteParsedQuery::makeAsFindCmd(
- NamespaceString("test.ns"), BSON("x" << 1), BSONObj(), 0).getStatus();
+ Status status = LiteParsedQuery::makeAsFindCmd(
+ NamespaceString("test.ns"), BSON("x" << 1), BSONObj(), 0LL).getStatus();
ASSERT_NOT_OK(status);
ASSERT_EQUALS(ErrorCodes::BadValue, status.code());
}
+TEST(LiteParsedQueryTest, MakeFindCmdLargeLimit) {
+ auto result = LiteParsedQuery::makeAsFindCmd(
+ NamespaceString("test.ns"), BSON("x" << 1), BSON("y" << -1), 8LL * 1000 * 1000 * 1000);
+ ASSERT_OK(result.getStatus());
+
+ ASSERT_EQUALS(8LL * 1000 * 1000 * 1000, *result.getValue()->getLimit());
+}
+
//
// Text meta BSON element validation
//
@@ -537,12 +545,51 @@ TEST(LiteParsedQueryTest, ParseFromCommandAllNonOptionFields) {
ASSERT_EQUALS(0, expectedProj.woCompare(lpq->getProj()));
BSONObj expectedHint = BSON("d" << 1);
ASSERT_EQUALS(0, expectedHint.woCompare(lpq->getHint()));
- ASSERT_EQUALS(3, lpq->getLimit());
+ ASSERT_EQUALS(3, *lpq->getLimit());
ASSERT_EQUALS(5, lpq->getSkip());
- ASSERT_EQUALS(90, lpq->getBatchSize());
+ ASSERT_EQUALS(90, *lpq->getBatchSize());
ASSERT(lpq->wantMore());
}
+TEST(LiteParsedQueryTest, ParseFromCommandLargeLimit) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "limit: 8000000000}"); // 8 * 1000 * 1000 * 1000
+ const NamespaceString nss("test.testns");
+ const bool isExplain = false;
+ unique_ptr<LiteParsedQuery> lpq(
+ assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
+
+ ASSERT_EQUALS(8LL * 1000 * 1000 * 1000, *lpq->getLimit());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandLargeBatchSize) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "batchSize: 8000000000}"); // 8 * 1000 * 1000 * 1000
+ const NamespaceString nss("test.testns");
+ const bool isExplain = false;
+ unique_ptr<LiteParsedQuery> lpq(
+ assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
+
+ ASSERT_EQUALS(8LL * 1000 * 1000 * 1000, *lpq->getBatchSize());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandLargeSkip) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "skip: 8000000000}"); // 8 * 1000 * 1000 * 1000
+ const NamespaceString nss("test.testns");
+ const bool isExplain = false;
+ unique_ptr<LiteParsedQuery> lpq(
+ assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
+
+ ASSERT_EQUALS(8LL * 1000 * 1000 * 1000, lpq->getSkip());
+}
+
//
// Parsing errors where a field has the wrong type.
//
@@ -828,7 +875,7 @@ TEST(LiteParsedQueryTest, ParseFromCommandBatchSizeZero) {
assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
ASSERT(lpq->getBatchSize());
- ASSERT_EQ(0, lpq->getBatchSize());
+ ASSERT_EQ(0, *lpq->getBatchSize());
ASSERT(!lpq->getLimit());
}
diff --git a/src/mongo/db/query/query_solution.h b/src/mongo/db/query/query_solution.h
index 54c63751989..8de5dabb40f 100644
--- a/src/mongo/db/query/query_solution.h
+++ b/src/mongo/db/query/query_solution.h
@@ -628,7 +628,7 @@ struct LimitNode : public QuerySolutionNode {
QuerySolutionNode* clone() const;
- int limit;
+ long long limit;
};
struct SkipNode : public QuerySolutionNode {
@@ -655,7 +655,7 @@ struct SkipNode : public QuerySolutionNode {
QuerySolutionNode* clone() const;
- int skip;
+ long long skip;
};
// This is a standalone stage.