summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Storch <david.storch@10gen.com>2014-03-20 09:28:35 -0400
committerDavid Storch <david.storch@10gen.com>2014-03-20 11:07:25 -0400
commitd3b45e4327368e0a44634b9c5288d729e706975e (patch)
treec1be6cf81eb599afd47c450685e16ad5c7b211e3
parent2b2cf4a9b4812885613ada9fc322d8bd4f8f09d8 (diff)
downloadmongo-d3b45e4327368e0a44634b9c5288d729e706975e.tar.gz
SERVER-12438 better handling of batchSize and limit with sort
(cherry picked from commit b9167f0fe82160967e591aefba5824a8a372353d)
-rw-r--r--jstests/batch_size.js42
-rw-r--r--jstests/core/batch_size.js42
-rw-r--r--src/mongo/db/query/get_runner.cpp52
-rw-r--r--src/mongo/db/query/planner_analysis.cpp37
-rw-r--r--src/mongo/db/query/query_planner_params.h5
-rw-r--r--src/mongo/db/query/query_planner_test.cpp26
-rw-r--r--src/mongo/db/query/query_solution.cpp214
-rw-r--r--src/mongo/db/query/query_solution.h58
8 files changed, 411 insertions, 65 deletions
diff --git a/jstests/batch_size.js b/jstests/batch_size.js
index 2bc144cd554..6cbc45dc803 100644
--- a/jstests/batch_size.js
+++ b/jstests/batch_size.js
@@ -29,13 +29,8 @@ assert.eq(4, t.find().sort({a: 1}).itcount(), 'B');
assert.eq(2, t.find().limit(2).itcount(), 'C');
assert.eq(2, t.find().sort({a: 1}).limit(2).itcount(), 'D');
-// With batchSize, unindexed.
-// SERVER-12438: in general batch size does not mean a hard
-// limit. With an unindexed sort, however, the server interprets
-// batch size as a hard limit so that it can do a top k sort.
-// WARNING: this behavior may change in the future.
assert.eq(4, t.find().batchSize(2).itcount(), 'E');
-assert.eq(2, t.find().sort({a: 1}).batchSize(2).itcount(), 'F');
+assert.eq(4, t.find().sort({a: 1}).batchSize(2).itcount(), 'F');
// Run the tests with the index twice in order to double check plan caching.
t.ensureIndex({a: 1});
@@ -43,3 +38,38 @@ for (var i = 0; i < 2; i++) {
runIndexedTests();
}
+// The next tests make sure that we obey limit and batchSize properly when
+// the sort could be either indexed or unindexed.
+t.drop();
+t.ensureIndex({a: 1});
+t.ensureIndex({b: 1});
+
+for (var i = 0; i < 100; i++) {
+ t.save({_id: i, a: i, b: 1});
+}
+
+// Without a hint. Do it twice to make sure caching is ok.
+for (var i = 0; i < 2; i++) {
+ assert.eq(15, t.find({a: {$gte: 85}}).sort({b: 1}).batchSize(2).itcount(), 'K');
+ assert.eq(6, t.find({a: {$gte: 85}}).sort({b: 1}).limit(6).itcount(), 'L');
+}
+
+// Hinting 'a'.
+assert.eq(15, t.find({a: {$gte: 85}}).sort({b: 1}).hint({a: 1}).batchSize(2).itcount(), 'M');
+assert.eq(6, t.find({a: {$gte: 85}}).sort({b: 1}).hint({a: 1}).limit(6).itcount(), 'N');
+
+// Hinting 'b'.
+assert.eq(15, t.find({a: {$gte: 85}}).sort({b: 1}).hint({b: 1}).batchSize(2).itcount(), 'O');
+assert.eq(6, t.find({a: {$gte: 85}}).sort({b: 1}).hint({b: 1}).limit(6).itcount(), 'P');
+
+// With explain.
+assert.eq(15, t.find({a: {$gte: 85}}).sort({b: 1}).batchSize(2).explain().n, 'Q');
+assert.eq(6, t.find({a: {$gte: 85}}).sort({b: 1}).limit(6).explain().n, 'R');
+
+// Double check that we're not scanning more stuff than we have to.
+// In order to get the sort using index 'a', we should need to scan
+// about 50 keys and 50 documents.
+var explain = t.find({a: {$gte: 50}}).sort({b: 1}).hint({a: 1}).limit(6).explain();
+assert.lte(explain.nscanned, 60, 'S');
+assert.lte(explain.nscannedObjects, 60, 'T');
+assert.eq(explain.n, 6, 'U');
diff --git a/jstests/core/batch_size.js b/jstests/core/batch_size.js
index 2bc144cd554..6cbc45dc803 100644
--- a/jstests/core/batch_size.js
+++ b/jstests/core/batch_size.js
@@ -29,13 +29,8 @@ assert.eq(4, t.find().sort({a: 1}).itcount(), 'B');
assert.eq(2, t.find().limit(2).itcount(), 'C');
assert.eq(2, t.find().sort({a: 1}).limit(2).itcount(), 'D');
-// With batchSize, unindexed.
-// SERVER-12438: in general batch size does not mean a hard
-// limit. With an unindexed sort, however, the server interprets
-// batch size as a hard limit so that it can do a top k sort.
-// WARNING: this behavior may change in the future.
assert.eq(4, t.find().batchSize(2).itcount(), 'E');
-assert.eq(2, t.find().sort({a: 1}).batchSize(2).itcount(), 'F');
+assert.eq(4, t.find().sort({a: 1}).batchSize(2).itcount(), 'F');
// Run the tests with the index twice in order to double check plan caching.
t.ensureIndex({a: 1});
@@ -43,3 +38,38 @@ for (var i = 0; i < 2; i++) {
runIndexedTests();
}
+// The next tests make sure that we obey limit and batchSize properly when
+// the sort could be either indexed or unindexed.
+t.drop();
+t.ensureIndex({a: 1});
+t.ensureIndex({b: 1});
+
+for (var i = 0; i < 100; i++) {
+ t.save({_id: i, a: i, b: 1});
+}
+
+// Without a hint. Do it twice to make sure caching is ok.
+for (var i = 0; i < 2; i++) {
+ assert.eq(15, t.find({a: {$gte: 85}}).sort({b: 1}).batchSize(2).itcount(), 'K');
+ assert.eq(6, t.find({a: {$gte: 85}}).sort({b: 1}).limit(6).itcount(), 'L');
+}
+
+// Hinting 'a'.
+assert.eq(15, t.find({a: {$gte: 85}}).sort({b: 1}).hint({a: 1}).batchSize(2).itcount(), 'M');
+assert.eq(6, t.find({a: {$gte: 85}}).sort({b: 1}).hint({a: 1}).limit(6).itcount(), 'N');
+
+// Hinting 'b'.
+assert.eq(15, t.find({a: {$gte: 85}}).sort({b: 1}).hint({b: 1}).batchSize(2).itcount(), 'O');
+assert.eq(6, t.find({a: {$gte: 85}}).sort({b: 1}).hint({b: 1}).limit(6).itcount(), 'P');
+
+// With explain.
+assert.eq(15, t.find({a: {$gte: 85}}).sort({b: 1}).batchSize(2).explain().n, 'Q');
+assert.eq(6, t.find({a: {$gte: 85}}).sort({b: 1}).limit(6).explain().n, 'R');
+
+// Double check that we're not scanning more stuff than we have to.
+// In order to get the sort using index 'a', we should need to scan
+// about 50 keys and 50 documents.
+var explain = t.find({a: {$gte: 50}}).sort({b: 1}).hint({a: 1}).limit(6).explain();
+assert.lte(explain.nscanned, 60, 'S');
+assert.lte(explain.nscannedObjects, 60, 'T');
+assert.eq(explain.n, 6, 'U');
diff --git a/src/mongo/db/query/get_runner.cpp b/src/mongo/db/query/get_runner.cpp
index 1a0ac49d04c..10b5b3c6e04 100644
--- a/src/mongo/db/query/get_runner.cpp
+++ b/src/mongo/db/query/get_runner.cpp
@@ -200,6 +200,7 @@ namespace mongo {
}
plannerParams->options |= QueryPlannerParams::KEEP_MUTATIONS;
+ plannerParams->options |= QueryPlannerParams::SPLIT_LIMITED_SORT;
}
Status getRunnerFromCache(CanonicalQuery* canonicalQuery,
@@ -230,26 +231,6 @@ namespace mongo {
return status;
}
- // See SERVER-12438. Unfortunately we have to defer to the backup solution
- // if both a batch size is set and a sort is requested.
- //
- // TODO: it would be really nice to delete this block in the future.
- if (NULL != backupQs &&
- 0 < canonicalQuery->getParsed().getNumToReturn() &&
- !canonicalQuery->getParsed().getSort().isEmpty()) {
- delete qs;
-
- WorkingSet* ws;
- PlanStage* root;
- verify(StageBuilder::build(*backupQs, &root, &ws));
-
- // And, run the plan.
- *out = new SingleSolutionRunner(collection,
- canonicalQuery,
- backupQs, root, ws);
- return Status::OK();
- }
-
// If our cached solution is a hit for a count query, try to turn it into a fast count
// thing.
if (plannerParams.options & QueryPlannerParams::PRIVATE_IS_COUNT) {
@@ -445,37 +426,6 @@ namespace mongo {
return Status::OK();
}
else {
- // See SERVER-12438. In an ideal world we should not arbitrarily prefer certain
- // solutions over others. But unfortunately for historical reasons we are forced
- // to prefer a solution where the index provides the sort, if the batch size
- // is set and a sort is requested. Read SERVER-12438 for details, if you dare.
- //
- // TODO: it would be really nice to delete this entire block in the future.
- if (0 < canonicalQuery->getParsed().getNumToReturn()
- && !canonicalQuery->getParsed().getSort().isEmpty()) {
- // Look for a solution without a blocking sort stage.
- for (size_t i = 0; i < solutions.size(); ++i) {
- if (!solutions[i]->hasBlockingStage) {
- WorkingSet* ws;
- PlanStage* root;
- verify(StageBuilder::build(*solutions[i], &root, &ws));
-
- // Free unused solutions.
- for (size_t j = 0; j < solutions.size(); ++j) {
- if (j != i) {
- delete solutions[j];
- }
- }
-
- // And, run the plan.
- *out = new SingleSolutionRunner(collection,
- canonicalQuery.release(),
- solutions[i], root, ws);
- return Status::OK();
- }
- }
- }
-
// Many solutions. Let the MultiPlanRunner pick the best, update the cache, and so on.
auto_ptr<MultiPlanRunner> mpr(new MultiPlanRunner(collection,canonicalQuery.release()));
diff --git a/src/mongo/db/query/planner_analysis.cpp b/src/mongo/db/query/planner_analysis.cpp
index 44372f11333..5eebbb2c8a3 100644
--- a/src/mongo/db/query/planner_analysis.cpp
+++ b/src/mongo/db/query/planner_analysis.cpp
@@ -400,18 +400,51 @@ namespace mongo {
SortNode* sort = new SortNode();
sort->pattern = sortObj;
sort->query = query.getParsed().getFilter();
+ sort->children.push_back(solnRoot);
+ solnRoot = sort;
// When setting the limit on the sort, we need to consider both
// the limit N and skip count M. The sort should return an ordered list
// N + M items so that the skip stage can discard the first M results.
if (0 != query.getParsed().getNumToReturn()) {
sort->limit = query.getParsed().getNumToReturn() +
query.getParsed().getSkip();
+
+ // This is a SORT with a limit. The wire protocol has a single quantity
+ // called "numToReturn" which could mean either limit or batchSize.
+ // We have no idea what the client intended. One way to handle the ambiguity
+ // of a limited OR stage is to use the SPLIT_LIMITED_SORT hack.
+ //
+ // If numToReturn is really a limit, then we want to add a limit to this
+ // SORT stage, and hence perform a topK.
+ //
+ // If numToReturn is really a batchSize, then we want to perform a regular
+ // blocking sort.
+ //
+ // Since we don't know which to use, just join the two options with an OR,
+ // with the topK first. If the client wants a limit, they'll get the efficiency
+ // of topK. If they want a batchSize, the other OR branch will deliver the missing
+ // results. The OR stage handles deduping.
+ if (params.options & QueryPlannerParams::SPLIT_LIMITED_SORT
+ && !QueryPlannerCommon::hasNode(query.root(), MatchExpression::TEXT)
+ && !QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO)
+ && !QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR)) {
+ // If we're here then the SPLIT_LIMITED_SORT hack is turned on,
+ // and the query is of a type that allows the hack.
+ //
+ // Not allowed for geo or text, because we assume elsewhere that those
+ // stages appear just once.
+ OrNode* orn = new OrNode();
+ orn->children.push_back(sort);
+ SortNode* sortClone = static_cast<SortNode*>(sort->clone());
+ sortClone->limit = 0;
+ orn->children.push_back(sortClone);
+ solnRoot = orn;
+ }
}
else {
sort->limit = 0;
}
- sort->children.push_back(solnRoot);
- solnRoot = sort;
+
*blockingSortOut = true;
return solnRoot;
diff --git a/src/mongo/db/query/query_planner_params.h b/src/mongo/db/query/query_planner_params.h
index eacf858b06a..6d51291f1fb 100644
--- a/src/mongo/db/query/query_planner_params.h
+++ b/src/mongo/db/query/query_planner_params.h
@@ -77,6 +77,11 @@ namespace mongo {
// Nobody should set this above the getRunner interface. Internal flag set as a hint to
// the planner that the caller is actually the count command.
PRIVATE_IS_COUNT = 1 << 6,
+
+ // Set this if you want to handle batchSize properly with sort(). If limits on SORT
+ // stages are always actually limits, then this should be left off. If they are
+ // sometimes to be interpreted as batchSize, then this should be turned on.
+ SPLIT_LIMITED_SORT = 1 << 7
};
// See Options enum above.
diff --git a/src/mongo/db/query/query_planner_test.cpp b/src/mongo/db/query/query_planner_test.cpp
index a33c01c0423..f2d304b6773 100644
--- a/src/mongo/db/query/query_planner_test.cpp
+++ b/src/mongo/db/query/query_planner_test.cpp
@@ -3251,6 +3251,32 @@ namespace {
}
//
+ // Test the "split limited sort stages" hack.
+ //
+
+ TEST_F(QueryPlannerTest, SplitLimitedSort) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ params.options |= QueryPlannerParams::SPLIT_LIMITED_SORT;
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+
+ runQuerySortProjSkipLimit(fromjson("{a: 1}"), fromjson("{b: 1}"),
+ BSONObj(), 0, 3);
+
+ assertNumSolutions(2U);
+ // First solution has no blocking stage; no need to split.
+ assertSolutionExists("{fetch: {filter: {a:1}, node: "
+ "{ixscan: {filter: null, pattern: {b: 1}}}}}");
+ // Second solution has a blocking sort with a limit: it gets split and
+ // joined with an OR stage.
+ assertSolutionExists("{or: {nodes: ["
+ "{sort: {pattern: {b: 1}, limit: 3, node: "
+ "{fetch: {node: {ixscan: {pattern: {a: 1}}}}}}}, "
+ "{sort: {pattern: {b: 1}, limit: 0, node: "
+ "{fetch: {node: {ixscan: {pattern: {a: 1}}}}}}}]}}");
+ }
+
+ //
// Test bad input to query planner helpers.
//
diff --git a/src/mongo/db/query/query_solution.cpp b/src/mongo/db/query/query_solution.cpp
index 55b132482d6..ba478512be9 100644
--- a/src/mongo/db/query/query_solution.cpp
+++ b/src/mongo/db/query/query_solution.cpp
@@ -79,6 +79,19 @@ namespace mongo {
addCommon(ss, indent);
}
+ QuerySolutionNode* TextNode::clone() const {
+ TextNode* copy = new TextNode();
+ cloneBaseData(copy);
+
+ copy->_sort = this->_sort;
+ copy->indexKeyPattern = this->indexKeyPattern;
+ copy->query = this->query;
+ copy->language = this->language;
+ copy->indexPrefix = this->indexPrefix;
+
+ return copy;
+ }
+
//
// CollectionScanNode
//
@@ -97,6 +110,19 @@ namespace mongo {
addCommon(ss, indent);
}
+ QuerySolutionNode* CollectionScanNode::clone() const {
+ CollectionScanNode* copy = new CollectionScanNode();
+ cloneBaseData(copy);
+
+ copy->_sort = this->_sort;
+ copy->name = this->name;
+ copy->tailable = this->tailable;
+ copy->direction = this->direction;
+ copy->maxScan = this->maxScan;
+
+ return copy;
+ }
+
//
// AndHashNode
//
@@ -142,6 +168,15 @@ namespace mongo {
return false;
}
+ QuerySolutionNode* AndHashNode::clone() const {
+ AndHashNode* copy = new AndHashNode();
+ cloneBaseData(copy);
+
+ copy->_sort = this->_sort;
+
+ return copy;
+ }
+
//
// AndSortedNode
//
@@ -187,6 +222,15 @@ namespace mongo {
return false;
}
+ QuerySolutionNode* AndSortedNode::clone() const {
+ AndSortedNode* copy = new AndSortedNode();
+ cloneBaseData(copy);
+
+ copy->_sort = this->_sort;
+
+ return copy;
+ }
+
//
// OrNode
//
@@ -237,6 +281,16 @@ namespace mongo {
return true;
}
+ QuerySolutionNode* OrNode::clone() const {
+ OrNode* copy = new OrNode();
+ cloneBaseData(copy);
+
+ copy->_sort = this->_sort;
+ copy->dedup = this->dedup;
+
+ return copy;
+ }
+
//
// MergeSortNode
//
@@ -287,6 +341,17 @@ namespace mongo {
return true;
}
+ QuerySolutionNode* MergeSortNode::clone() const {
+ MergeSortNode* copy = new MergeSortNode();
+ cloneBaseData(copy);
+
+ copy->_sorts = this->_sorts;
+ copy->dedup = this->dedup;
+ copy->sort = this->sort;
+
+ return copy;
+ }
+
//
// FetchNode
//
@@ -309,6 +374,15 @@ namespace mongo {
children[0]->appendToString(ss, indent + 2);
}
+ QuerySolutionNode* FetchNode::clone() const {
+ FetchNode* copy = new FetchNode();
+ cloneBaseData(copy);
+
+ copy->_sorts = this->_sorts;
+
+ return copy;
+ }
+
//
// IndexScanNode
//
@@ -463,6 +537,21 @@ namespace mongo {
}
}
+ QuerySolutionNode* IndexScanNode::clone() const {
+ IndexScanNode* copy = new IndexScanNode();
+ cloneBaseData(copy);
+
+ copy->_sorts = this->_sorts;
+ copy->indexKeyPattern = this->indexKeyPattern;
+ copy->indexIsMultiKey = this->indexIsMultiKey;
+ copy->direction = this->direction;
+ copy->maxScan = this->maxScan;
+ copy->addKeyMetadata = this->addKeyMetadata;
+ copy->bounds = this->bounds;
+
+ return copy;
+ }
+
//
// ProjectionNode
//
@@ -477,6 +566,20 @@ namespace mongo {
children[0]->appendToString(ss, indent + 2);
}
+ QuerySolutionNode* ProjectionNode::clone() const {
+ ProjectionNode* copy = new ProjectionNode();
+ cloneBaseData(copy);
+
+ copy->_sorts = this->_sorts;
+ copy->fullExpression = this->fullExpression;
+
+ // This MatchExpression* is owned by the canonical query, not by the
+ // ProjectionNode. Just copying the pointer is fine.
+ copy->projection = this->projection;
+
+ return copy;
+ }
+
//
// SortNode
//
@@ -495,6 +598,18 @@ namespace mongo {
children[0]->appendToString(ss, indent + 2);
}
+ QuerySolutionNode* SortNode::clone() const {
+ SortNode* copy = new SortNode();
+ cloneBaseData(copy);
+
+ copy->_sorts = this->_sorts;
+ copy->pattern = this->pattern;
+ copy->query = this->query;
+ copy->limit = this->limit;
+
+ return copy;
+ }
+
//
// LimitNode
//
@@ -511,6 +626,15 @@ namespace mongo {
children[0]->appendToString(ss, indent + 2);
}
+ QuerySolutionNode* LimitNode::clone() const {
+ LimitNode* copy = new LimitNode();
+ cloneBaseData(copy);
+
+ copy->limit = this->limit;
+
+ return copy;
+ }
+
//
// SkipNode
//
@@ -525,6 +649,15 @@ namespace mongo {
children[0]->appendToString(ss, indent + 2);
}
+ QuerySolutionNode* SkipNode::clone() const {
+ SkipNode* copy = new SkipNode();
+ cloneBaseData(copy);
+
+ copy->skip = this->skip;
+
+ return copy;
+ }
+
//
// GeoNear2DNode
//
@@ -542,6 +675,20 @@ namespace mongo {
}
}
+ QuerySolutionNode* GeoNear2DNode::clone() const {
+ GeoNear2DNode* copy = new GeoNear2DNode();
+ cloneBaseData(copy);
+
+ copy->_sorts = this->_sorts;
+ copy->nq = this->nq;
+ copy->numWanted = this->numWanted;
+ copy->indexKeyPattern = this->indexKeyPattern;
+ copy->addPointMeta = this->addPointMeta;
+ copy->addDistMeta = this->addDistMeta;
+
+ return copy;
+ }
+
//
// GeoNear2DSphereNode
//
@@ -561,6 +708,20 @@ namespace mongo {
}
}
+ QuerySolutionNode* GeoNear2DSphereNode::clone() const {
+ GeoNear2DSphereNode* copy = new GeoNear2DSphereNode();
+ cloneBaseData(copy);
+
+ copy->_sorts = this->_sorts;
+ copy->nq = this->nq;
+ copy->baseBounds = this->baseBounds;
+ copy->indexKeyPattern = this->indexKeyPattern;
+ copy->addPointMeta = this->addPointMeta;
+ copy->addDistMeta = this->addDistMeta;
+
+ return copy;
+ }
+
//
// Geo2DNode
//
@@ -583,6 +744,17 @@ namespace mongo {
return false;
}
+ QuerySolutionNode* Geo2DNode::clone() const {
+ Geo2DNode* copy = new Geo2DNode();
+ cloneBaseData(copy);
+
+ copy->_sorts = this->_sorts;
+ copy->indexKeyPattern = this->indexKeyPattern;
+ copy->gq = this->gq;
+
+ return copy;
+ }
+
//
// ShardingFilterNode
//
@@ -603,6 +775,12 @@ namespace mongo {
children[0]->appendToString(ss, indent + 2);
}
+ QuerySolutionNode* ShardingFilterNode::clone() const {
+ ShardingFilterNode* copy = new ShardingFilterNode();
+ cloneBaseData(copy);
+ return copy;
+ }
+
//
// KeepMutationsNode
//
@@ -623,6 +801,15 @@ namespace mongo {
children[0]->appendToString(ss, indent + 2);
}
+ QuerySolutionNode* KeepMutationsNode::clone() const {
+ KeepMutationsNode* copy = new KeepMutationsNode();
+ cloneBaseData(copy);
+
+ copy->sorts = this->sorts;
+
+ return copy;
+ }
+
//
// DistinctNode
//
@@ -638,6 +825,19 @@ namespace mongo {
*ss << "bounds = " << bounds.toString() << '\n';
}
+ QuerySolutionNode* DistinctNode::clone() const {
+ DistinctNode* copy = new DistinctNode();
+ cloneBaseData(copy);
+
+ copy->sorts = this->sorts;
+ copy->indexKeyPattern = this->indexKeyPattern;
+ copy->direction = this->direction;
+ copy->bounds = this->bounds;
+ copy->fieldNo = this->fieldNo;
+
+ return copy;
+ }
+
//
// CountNode
//
@@ -653,4 +853,18 @@ namespace mongo {
*ss << "endKey = " << endKey << '\n';
}
+ QuerySolutionNode* CountNode::clone() const {
+ CountNode* copy = new CountNode();
+ cloneBaseData(copy);
+
+ copy->sorts = this->sorts;
+ copy->indexKeyPattern = this->indexKeyPattern;
+ copy->startKey = this->startKey;
+ copy->startKeyInclusive = this->startKeyInclusive;
+ copy->endKey = this->endKey;
+ copy->endKeyInclusive = this->endKeyInclusive;
+
+ return copy;
+ }
+
} // namespace mongo
diff --git a/src/mongo/db/query/query_solution.h b/src/mongo/db/query/query_solution.h
index 5a5f6422ef5..70efebcc4c5 100644
--- a/src/mongo/db/query/query_solution.h
+++ b/src/mongo/db/query/query_solution.h
@@ -125,6 +125,23 @@ namespace mongo {
*/
virtual const BSONObjSet& getSort() const = 0;
+ /**
+ * Make a deep copy.
+ */
+ virtual QuerySolutionNode* clone() const = 0;
+
+ /**
+ * Copy base query solution data from 'this' to 'other'.
+ */
+ void cloneBaseData(QuerySolutionNode* other) const {
+ for (size_t i = 0; i < this->children.size(); i++) {
+ other->children.push_back(this->children[i]->clone());
+ }
+ if (NULL != this->filter) {
+ other->filter.reset(this->filter->shallowClone());
+ }
+ }
+
// These are owned here.
vector<QuerySolutionNode*> children;
@@ -213,6 +230,8 @@ namespace mongo {
bool sortedByDiskLoc() const { return false; }
const BSONObjSet& getSort() const { return _sort; }
+ QuerySolutionNode* clone() const;
+
BSONObjSet _sort;
BSONObj indexKeyPattern;
@@ -238,6 +257,8 @@ namespace mongo {
bool sortedByDiskLoc() const { return false; }
const BSONObjSet& getSort() const { return _sort; }
+ QuerySolutionNode* clone() const;
+
BSONObjSet _sort;
// Name of the namespace.
@@ -265,6 +286,8 @@ namespace mongo {
bool sortedByDiskLoc() const { return false; }
const BSONObjSet& getSort() const { return children.back()->getSort(); }
+ QuerySolutionNode* clone() const;
+
BSONObjSet _sort;
};
@@ -281,6 +304,8 @@ namespace mongo {
bool sortedByDiskLoc() const { return true; }
const BSONObjSet& getSort() const { return _sort; }
+ QuerySolutionNode* clone() const;
+
BSONObjSet _sort;
};
@@ -301,6 +326,8 @@ namespace mongo {
}
const BSONObjSet& getSort() const { return _sort; }
+ QuerySolutionNode* clone() const;
+
BSONObjSet _sort;
bool dedup;
@@ -320,6 +347,8 @@ namespace mongo {
const BSONObjSet& getSort() const { return _sorts; }
+ QuerySolutionNode* clone() const;
+
virtual void computeProperties() {
for (size_t i = 0; i < children.size(); ++i) {
children[i]->computeProperties();
@@ -347,6 +376,8 @@ namespace mongo {
bool sortedByDiskLoc() const { return children[0]->sortedByDiskLoc(); }
const BSONObjSet& getSort() const { return children[0]->getSort(); }
+ QuerySolutionNode* clone() const;
+
BSONObjSet _sorts;
};
@@ -365,6 +396,8 @@ namespace mongo {
bool sortedByDiskLoc() const;
const BSONObjSet& getSort() const { return _sorts; }
+ QuerySolutionNode* clone() const;
+
BSONObjSet _sorts;
BSONObj indexKeyPattern;
@@ -422,6 +455,8 @@ namespace mongo {
return _sorts;
}
+ QuerySolutionNode* clone() const;
+
BSONObjSet _sorts;
// The full query tree. Needed when we have positional operators.
@@ -447,6 +482,8 @@ namespace mongo {
const BSONObjSet& getSort() const { return _sorts; }
+ QuerySolutionNode* clone() const;
+
virtual void computeProperties() {
for (size_t i = 0; i < children.size(); ++i) {
children[i]->computeProperties();
@@ -478,6 +515,8 @@ namespace mongo {
bool sortedByDiskLoc() const { return children[0]->sortedByDiskLoc(); }
const BSONObjSet& getSort() const { return children[0]->getSort(); }
+ QuerySolutionNode* clone() const;
+
int limit;
};
@@ -493,6 +532,8 @@ namespace mongo {
bool sortedByDiskLoc() const { return children[0]->sortedByDiskLoc(); }
const BSONObjSet& getSort() const { return children[0]->getSort(); }
+ QuerySolutionNode* clone() const;
+
int skip;
};
@@ -515,6 +556,8 @@ namespace mongo {
const BSONObjSet& getSort() const { return _sorts; }
BSONObjSet _sorts;
+ QuerySolutionNode* clone() const;
+
BSONObj indexKeyPattern;
GeoQuery gq;
};
@@ -531,6 +574,9 @@ namespace mongo {
bool hasField(const string& field) const { return true; }
bool sortedByDiskLoc() const { return false; }
const BSONObjSet& getSort() const { return _sorts; }
+
+ QuerySolutionNode* clone() const;
+
BSONObjSet _sorts;
NearQuery nq;
@@ -553,6 +599,8 @@ namespace mongo {
bool sortedByDiskLoc() const { return false; }
const BSONObjSet& getSort() const { return _sorts; }
+ QuerySolutionNode* clone() const;
+
BSONObjSet _sorts;
NearQuery nq;
@@ -584,6 +632,8 @@ namespace mongo {
bool hasField(const string& field) const { return children[0]->hasField(field); }
bool sortedByDiskLoc() const { return children[0]->sortedByDiskLoc(); }
const BSONObjSet& getSort() const { return children[0]->getSort(); }
+
+ QuerySolutionNode* clone() const;
};
/**
@@ -607,6 +657,8 @@ namespace mongo {
bool sortedByDiskLoc() const { return false; }
const BSONObjSet& getSort() const { return sorts; }
+ QuerySolutionNode* clone() const;
+
// Since we merge in flagged results we have no sort order.
BSONObjSet sorts;
};
@@ -628,6 +680,9 @@ namespace mongo {
bool hasField(const string& field) const { return !indexKeyPattern[field].eoo(); }
bool sortedByDiskLoc() const { return false; }
const BSONObjSet& getSort() const { return sorts; }
+
+ QuerySolutionNode* clone() const;
+
BSONObjSet sorts;
BSONObj indexKeyPattern;
@@ -652,6 +707,9 @@ namespace mongo {
bool hasField(const string& field) const { return true; }
bool sortedByDiskLoc() const { return false; }
const BSONObjSet& getSort() const { return sorts; }
+
+ QuerySolutionNode* clone() const;
+
BSONObjSet sorts;
BSONObj indexKeyPattern;