summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--jstests/auth/repl_auth.js4
-rw-r--r--jstests/core/and.js31
-rw-r--r--jstests/core/and3.js22
-rw-r--r--jstests/core/arrayfind1.js11
-rw-r--r--jstests/core/arrayfind2.js6
-rw-r--r--jstests/core/arrayfind6.js5
-rw-r--r--jstests/core/arrayfind8.js6
-rw-r--r--jstests/core/batch_size.js12
-rw-r--r--jstests/core/capped_empty.js2
-rw-r--r--jstests/core/coveredIndex1.js58
-rw-r--r--jstests/core/coveredIndex2.js20
-rw-r--r--jstests/core/coveredIndex5.js70
-rw-r--r--jstests/core/covered_index_compound_1.js45
-rw-r--r--jstests/core/covered_index_geo_1.js18
-rw-r--r--jstests/core/covered_index_geo_2.js22
-rw-r--r--jstests/core/covered_index_negative_1.js40
-rw-r--r--jstests/core/covered_index_simple_1.js46
-rw-r--r--jstests/core/covered_index_simple_2.js39
-rw-r--r--jstests/core/covered_index_simple_3.js51
-rw-r--r--jstests/core/covered_index_simple_id.js39
-rw-r--r--jstests/core/covered_index_sort_1.js23
-rw-r--r--jstests/core/covered_index_sort_2.js11
-rw-r--r--jstests/core/covered_index_sort_3.js9
-rw-r--r--jstests/core/cursor6.js35
-rw-r--r--jstests/core/cursora.js9
-rw-r--r--jstests/core/distinct_speed1.js2
-rw-r--r--jstests/core/exists6.js70
-rw-r--r--jstests/core/exists9.js1
-rw-r--r--jstests/core/existsa.js13
-rw-r--r--jstests/core/explain1.js32
-rw-r--r--jstests/core/explain2.js37
-rw-r--r--jstests/core/explain4.js70
-rw-r--r--jstests/core/explain5.js55
-rw-r--r--jstests/core/explain6.js52
-rw-r--r--jstests/core/explain7.js193
-rw-r--r--jstests/core/explain8.js24
-rw-r--r--jstests/core/explain9.js24
-rw-r--r--jstests/core/explain_batch_size.js6
-rw-r--r--jstests/core/explaina.js28
-rw-r--r--jstests/core/explainb.js46
-rw-r--r--jstests/core/find8.js4
-rw-r--r--jstests/core/fts_explain.js12
-rw-r--r--jstests/core/geo_2d_explain.js4
-rw-r--r--jstests/core/geo_box2.js8
-rw-r--r--jstests/core/geo_center_sphere1.js2
-rw-r--r--jstests/core/geo_circle1.js3
-rw-r--r--jstests/core/geo_s2nearComplex.js18
-rw-r--r--jstests/core/geo_s2ordering.js4
-rw-r--r--jstests/core/geo_s2twofields.js8
-rw-r--r--jstests/core/hashindex1.js33
-rw-r--r--jstests/core/hint1.js9
-rw-r--r--jstests/core/idhack.js17
-rw-r--r--jstests/core/in3.js28
-rw-r--r--jstests/core/in4.js63
-rw-r--r--jstests/core/in7.js (renamed from jstests/core/ina.js)0
-rw-r--r--jstests/core/in8.js33
-rw-r--r--jstests/core/in9.js35
-rw-r--r--jstests/core/inb.js19
-rw-r--r--jstests/core/index7.js76
-rw-r--r--jstests/core/indexOtherNamespace.js7
-rw-r--r--jstests/core/index_check2.js19
-rw-r--r--jstests/core/index_check3.js18
-rw-r--r--jstests/core/index_check6.js30
-rw-r--r--jstests/core/index_check7.js6
-rw-r--r--jstests/core/index_check8.js21
-rw-r--r--jstests/core/index_elemmatch1.js4
-rw-r--r--jstests/core/index_many.js2
-rw-r--r--jstests/core/indexj.js37
-rw-r--r--jstests/core/indexm.js2
-rw-r--r--jstests/core/indexn.js20
-rw-r--r--jstests/core/indexq.js20
-rw-r--r--jstests/core/indexr.js9
-rw-r--r--jstests/core/indexs.js3
-rw-r--r--jstests/core/indext.js3
-rw-r--r--jstests/core/indexv.js8
-rw-r--r--jstests/core/indexw.js15
-rw-r--r--jstests/core/mod1.js8
-rw-r--r--jstests/core/mr_index.js14
-rw-r--r--jstests/core/ne2.js4
-rw-r--r--jstests/core/or2.js19
-rw-r--r--jstests/core/or3.js6
-rw-r--r--jstests/core/or4.js10
-rw-r--r--jstests/core/or5.js7
-rw-r--r--jstests/core/or6.js35
-rw-r--r--jstests/core/or9.js44
-rw-r--r--jstests/core/orf.js11
-rw-r--r--jstests/core/orl.js2
-rw-r--r--jstests/core/orq.js22
-rw-r--r--jstests/core/profile4.js2
-rw-r--r--jstests/core/proj_key1.js11
-rw-r--r--jstests/core/regex3.js9
-rw-r--r--jstests/core/regex4.js6
-rw-r--r--jstests/core/regex5.js12
-rw-r--r--jstests/core/regex6.js36
-rw-r--r--jstests/core/rename.js1
-rw-r--r--jstests/core/rename7.js1
-rw-r--r--jstests/core/repair.js30
-rw-r--r--jstests/core/sortg.js5
-rw-r--r--jstests/core/sorth.js4
-rw-r--r--jstests/core/sortk.js4
-rw-r--r--jstests/core/type1.js4
-rw-r--r--jstests/core/type3.js12
-rwxr-xr-xjstests/core/useindexonobjgtlt.js13
-rw-r--r--jstests/libs/analyze_plan.js80
-rw-r--r--jstests/noPassthrough/indexbg1.js5
-rw-r--r--jstests/noPassthroughWithMongod/clonecollection.js2
-rw-r--r--jstests/noPassthroughWithMongod/index_check9.js9
-rw-r--r--jstests/noPassthroughWithMongod/index_multi.js2
-rw-r--r--jstests/sharding/auth_repl.js4
-rw-r--r--jstests/sharding/auth_slaveok_routing.js4
-rw-r--r--jstests/sharding/covered_shard_key_indexes.js57
-rw-r--r--jstests/sharding/explain1.js42
-rw-r--r--jstests/sharding/large_skip_one_shard.js2
-rw-r--r--jstests/sharding/limit_push.js3
-rwxr-xr-xjstests/sharding/read_pref.js34
-rw-r--r--jstests/sharding/shard2.js14
-rw-r--r--jstests/sharding/shard3.js17
-rw-r--r--src/mongo/client/parallel.cpp59
-rw-r--r--src/mongo/db/commands/count.cpp1
-rw-r--r--src/mongo/db/exec/index_scan.cpp10
-rw-r--r--src/mongo/db/exec/multi_plan.cpp14
-rw-r--r--src/mongo/db/exec/subplan.cpp8
-rw-r--r--src/mongo/db/pipeline/document_source_cursor.cpp1
-rw-r--r--src/mongo/db/query/SConscript2
-rw-r--r--src/mongo/db/query/explain.cpp90
-rw-r--r--src/mongo/db/query/explain.h15
-rw-r--r--src/mongo/db/query/explain_plan.cpp322
-rw-r--r--src/mongo/db/query/explain_plan.h60
-rw-r--r--src/mongo/db/query/index_bounds_builder_test.cpp50
-rw-r--r--src/mongo/db/query/new_find.cpp221
-rw-r--r--src/mongo/db/query/planner_analysis.cpp8
-rw-r--r--src/mongo/db/query/query_planner_test.cpp90
-rw-r--r--src/mongo/db/query/type_explain.cpp807
-rw-r--r--src/mongo/db/query/type_explain.h292
-rw-r--r--src/mongo/dbtests/querytests.cpp25
-rw-r--r--src/mongo/s/strategy.cpp2
-rw-r--r--src/mongo/shell/shardingtest.js16
137 files changed, 1161 insertions, 3426 deletions
diff --git a/jstests/auth/repl_auth.js b/jstests/auth/repl_auth.js
index d3791274be0..f2e6739be1e 100644
--- a/jstests/auth/repl_auth.js
+++ b/jstests/auth/repl_auth.js
@@ -50,7 +50,7 @@ rsTest.getSecondaries().forEach(function(sec) {
// a couple of times.
for (var x = 0; x < 20; x++) {
var explain = fooDB0.user.find().readPref('secondary').explain();
- assert.eq(1, explain.n);
+ assert.eq(1, explain.executionStats.nReturned);
assert.throws(function() {
explain = barDB0.user.find().readPref('secondary').explain();
@@ -61,7 +61,7 @@ for (var x = 0; x < 20; x++) {
});
explain = barDB1.user.find().readPref('secondary').explain();
- assert.eq(1, explain.n);
+ assert.eq(1, explain.executionStats.nReturned);
}
rsTest.stopSet();
diff --git a/jstests/core/and.js b/jstests/core/and.js
index 4d8c2cd7d49..f05c289966f 100644
--- a/jstests/core/and.js
+++ b/jstests/core/and.js
@@ -13,7 +13,7 @@ function check() {
assert.throws( function() { t.find( {$and:[]} ).toArray() } );
// $and elements must be objects
assert.throws( function() { t.find( {$and:[4]} ).toArray() } );
-
+
// Check equality matching
assert.eq( 1, t.count( {$and:[{a:1}]} ) );
assert.eq( 1, t.count( {$and:[{a:1},{a:2}]} ) );
@@ -21,13 +21,13 @@ function check() {
assert.eq( 0, t.count( {$and:[{a:1},{a:2},{a:3}]} ) );
assert.eq( 1, t.count( {$and:[{a:'foo'}]} ) );
assert.eq( 0, t.count( {$and:[{a:'foo'},{a:'g'}]} ) );
-
+
// Check $and with other fields
assert.eq( 1, t.count( {a:2,$and:[{a:1}]} ) );
assert.eq( 0, t.count( {a:0,$and:[{a:1}]} ) );
assert.eq( 0, t.count( {a:2,$and:[{a:0}]} ) );
assert.eq( 1, t.count( {a:1,$and:[{a:1}]} ) );
-
+
// Check recursive $and
assert.eq( 1, t.count( {a:2,$and:[{$and:[{a:1}]}]} ) );
assert.eq( 0, t.count( {a:0,$and:[{$and:[{a:1}]}]} ) );
@@ -38,10 +38,10 @@ function check() {
assert.eq( 0, t.count( {$and:[{a:0},{$and:[{a:1}]}]} ) );
assert.eq( 0, t.count( {$and:[{a:2},{$and:[{a:0}]}]} ) );
assert.eq( 1, t.count( {$and:[{a:1},{$and:[{a:1}]}]} ) );
-
+
// Some of these cases were more important with an alternative $and syntax
// that was rejected, but they're still valid checks.
-
+
// Check simple regex
assert.eq( 1, t.count( {$and:[{a:/foo/}]} ) );
// Check multiple regexes
@@ -51,17 +51,15 @@ function check() {
// Check regex flags
assert.eq( 0, t.count( {$and:[{a:/^F/},{a:'foo'}]} ) );
assert.eq( 1, t.count( {$and:[{a:/^F/i},{a:'foo'}]} ) );
-
-
-
+
// Check operator
assert.eq( 1, t.count( {$and:[{a:{$gt:0}}]} ) );
-
+
// Check where
assert.eq( 1, t.count( {a:'foo',$where:'this.a=="foo"'} ) );
assert.eq( 1, t.count( {$and:[{a:'foo'}],$where:'this.a=="foo"'} ) );
assert.eq( 1, t.count( {$and:[{a:'foo'}],$where:'this.a=="foo"'} ) );
-
+
// Nested where ok
assert.eq( 1, t.count({$and:[{$where:'this.a=="foo"'}]}) );
assert.eq( 1, t.count({$and:[{a:'foo'},{$where:'this.a=="foo"'}]}) );
@@ -71,15 +69,6 @@ function check() {
check();
t.ensureIndex( {a:1} );
check();
-var e = t.find( {$and:[{a:1}]} ).explain();
-assert.eq( 'BtreeCursor a_1', e.cursor );
-assert.eq( [[1,1]], e.indexBounds.a );
-
-function checkBounds( query ) {
- var e = t.find( query ).explain(true);
- printjson(e);
- assert.eq( 1, e.n );
-}
-checkBounds( {a:1,$and:[{a:2}]} );
-checkBounds( {$and:[{a:1},{a:2}]} );
+assert.eq( 1, t.find({a:1,$and:[{a:2}]}).itcount() );
+assert.eq( 1, t.find({$and:[{a:1},{a:2}]}).itcount() );
diff --git a/jstests/core/and3.js b/jstests/core/and3.js
index 036c63c02f0..3f223265522 100644
--- a/jstests/core/and3.js
+++ b/jstests/core/and3.js
@@ -8,10 +8,10 @@ t.save( {a:'foo'} );
t.ensureIndex( {a:1} );
-function checkScanMatch( query, nscannedObjects, n ) {
+function checkScanMatch( query, docsExamined, n ) {
var e = t.find( query ).hint( {a:1} ).explain();
- assert.eq( nscannedObjects, e.nscannedObjects );
- assert.eq( n, e.n );
+ assert.eq( docsExamined, e.executionStats.totalDocsExamined );
+ assert.eq( n, e.executionStats.nReturned );
}
checkScanMatch( {a:/o/}, 1, 1 );
@@ -51,17 +51,5 @@ checkScanMatch( {$and:[{a:1},{$where:'this.a==1'}]}, 1, 1 );
checkScanMatch( {$and:[{a:1,$where:'this.a==1'}]}, 1, 1 );
checkScanMatch( {a:1,$and:[{a:1},{a:1,$where:'this.a==1'}]}, 1, 1 );
-function checkImpossibleMatch( query ) {
- var e = t.find( query ).explain();
- assert.eq( 0, e.n );
- // The explain output should include the indexBounds field.
- // The presence of the indexBounds field indicates that the
- // query can make use of an index.
- assert('indexBounds' in e, 'index bounds are missing');
-}
-
-// With a single key index, all bounds are utilized.
-assert.eq( [[1,1]], t.find( {$and:[{a:1}]} ).explain().indexBounds.a );
-assert.eq( [[1,1]], t.find( {a:1,$and:[{a:1}]} ).explain().indexBounds.a );
-checkImpossibleMatch( {a:1,$and:[{a:2}]} );
-checkImpossibleMatch( {$and:[{a:1},{a:2}]} );
+assert.eq( 0, t.find({a:1,$and:[{a:2}]}).itcount() );
+assert.eq( 0, t.find({$and:[{a:1},{a:2}]}).itcount() );
diff --git a/jstests/core/arrayfind1.js b/jstests/core/arrayfind1.js
index 539fa6193a1..a731dd6bdf9 100644
--- a/jstests/core/arrayfind1.js
+++ b/jstests/core/arrayfind1.js
@@ -25,16 +25,9 @@ assert.eq( 1 , t.find( { a : { $elemMatch : { x : 2 } } } ).count() , "B1" );
assert.eq( 2 , t.find( { a : { $elemMatch : { x : { $gt : 2 } } } } ).count() , "B2" );
t.ensureIndex( { "a.x" : 1 } );
-assert( t.find( { "a" : { $elemMatch : { x : 1 } } } ).explain().cursor.indexOf( "BtreeC" ) == 0 , "C1" );
-
assert.eq( 1 , t.find( { a : { $elemMatch : { x : 2 } } } ).count() , "D1" );
-
-t.find( { "a.x" : 1 } ).count();
-t.find( { "a.x" : { $gt : 1 } } ).count();
-
-res = t.find( { "a" : { $elemMatch : { x : { $gt : 2 } } } } ).explain()
-assert( res.cursor.indexOf( "BtreeC" ) == 0 , "D2" );
+assert.eq( 3, t.find( { "a.x" : 1 } ).count() , "D2.1" );
+assert.eq( 3, t.find( { "a.x" : { $gt : 1 } } ).count() , "D2.2" );
assert.eq( 2 , t.find( { a : { $elemMatch : { x : { $gt : 2 } } } } ).count() , "D3" );
assert.eq( 2 , t.find( { a : { $ne:2, $elemMatch : { x : { $gt : 2 } } } } ).count() , "E1" );
-assert( t.find( { a : { $ne:2, $elemMatch : { x : { $gt : 2 } } } } ).explain().cursor.indexOf( "BtreeC" ) == 0 , "E2" );
diff --git a/jstests/core/arrayfind2.js b/jstests/core/arrayfind2.js
index c6a78042c3d..ca6b57c518b 100644
--- a/jstests/core/arrayfind2.js
+++ b/jstests/core/arrayfind2.js
@@ -21,9 +21,3 @@ t.save( { a : [ { x : 3 } , { x : 6 } ] } )
go( "no index" );
t.ensureIndex( { a : 1 } );
go( "index(a)" );
-
-t.ensureIndex( { "a.x": 1 } );
-
-assert.eq( {"a.x":[[3,3]]}, t.find( { a : { $all : [ { $elemMatch : { x : 3 } } ] } } ).explain().indexBounds );
-// only first $elemMatch used to find bounds
-assert.eq( {"a.x":[[3,3]]}, t.find( { a : { $all : [ { $elemMatch : { x : 3 } }, { $elemMatch : { y : 5 } } ] } } ).explain().indexBounds );
diff --git a/jstests/core/arrayfind6.js b/jstests/core/arrayfind6.js
index f4531cea96a..9b54d5b2c07 100644
--- a/jstests/core/arrayfind6.js
+++ b/jstests/core/arrayfind6.js
@@ -19,8 +19,3 @@ function checkElemMatchMatches() {
checkElemMatchMatches();
t.ensureIndex( { 'a.b':1 } );
checkElemMatchMatches();
-
-// We currently never use an index for negations of
-// ELEM_MATCH_OBJECT expressions.
-var explain = t.find( { a:{ $not:{ $elemMatch:{ b:{ $ne:2 }, c:3 } } } } ).explain();
-assert.eq( "BasicCursor", explain.cursor );
diff --git a/jstests/core/arrayfind8.js b/jstests/core/arrayfind8.js
index 07d44ace26e..7845bf38f72 100644
--- a/jstests/core/arrayfind8.js
+++ b/jstests/core/arrayfind8.js
@@ -20,12 +20,6 @@ function setIndexKey( key ) {
setIndexKey( 'a' );
-function indexBounds( query ) {
- debug( query );
- debug( t.find( query ).hint( indexSpec ).explain() );
- return t.find( query ).hint( indexSpec ).explain().indexBounds[ indexKey ];
-}
-
/** Check that the query results match the documents in the 'expected' array. */
function assertResults( expected, query, context ) {
debug( query );
diff --git a/jstests/core/batch_size.js b/jstests/core/batch_size.js
index 5af59ab5391..645ee0031ab 100644
--- a/jstests/core/batch_size.js
+++ b/jstests/core/batch_size.js
@@ -63,16 +63,18 @@ assert.eq(15, t.find({a: {$gte: 85}}).sort({b: 1}).hint({b: 1}).batchSize(2).itc
assert.eq(6, t.find({a: {$gte: 85}}).sort({b: 1}).hint({b: 1}).limit(6).itcount(), 'P');
// With explain.
-assert.eq(15, t.find({a: {$gte: 85}}).sort({b: 1}).batchSize(2).explain().n, 'Q');
-assert.eq(6, t.find({a: {$gte: 85}}).sort({b: 1}).limit(6).explain().n, 'R');
+var explain = t.find({a: {$gte: 85}}).sort({b: 1}).batchSize(2).explain();
+assert.eq(15, explain.executionStats.nReturned, 'Q');
+explain = t.find({a: {$gte: 85}}).sort({b: 1}).limit(6).explain();
+assert.eq(6, explain.executionStats.nReturned, 'R');
// Double check that we're not scanning more stuff than we have to.
// In order to get the sort using index 'a', we should need to scan
// about 50 keys and 50 documents.
var explain = t.find({a: {$gte: 50}}).sort({b: 1}).hint({a: 1}).limit(6).explain();
-assert.lte(explain.nscanned, 60, 'S');
-assert.lte(explain.nscannedObjects, 60, 'T');
-assert.eq(explain.n, 6, 'U');
+assert.lte(explain.executionStats.totalKeysExamined, 60, 'S');
+assert.lte(explain.executionStats.totalDocsExamined, 60, 'T');
+assert.eq(explain.executionStats.nReturned, 6, 'U');
// -------
diff --git a/jstests/core/capped_empty.js b/jstests/core/capped_empty.js
index 5b0fb6b8f8e..02c6bef4e45 100644
--- a/jstests/core/capped_empty.js
+++ b/jstests/core/capped_empty.js
@@ -10,7 +10,6 @@ t.insert( { x : 3 } );
t.ensureIndex( { x : 1 } );
assert.eq( 3 , t.count() );
-assert.eq( 1 , t.find( { x : 2 } ).explain().nscanned );
t.runCommand( "emptycapped" );
@@ -21,4 +20,3 @@ t.insert( { x : 2 } );
t.insert( { x : 3 } );
assert.eq( 3 , t.count() );
-assert.eq( 1 , t.find( { x : 2 } ).explain().nscanned );
diff --git a/jstests/core/coveredIndex1.js b/jstests/core/coveredIndex1.js
index ce11f89ceed..54ef179f2b1 100644
--- a/jstests/core/coveredIndex1.js
+++ b/jstests/core/coveredIndex1.js
@@ -2,6 +2,9 @@
t = db["jstests_coveredIndex1"];
t.drop();
+// Include helpers for analyzing explain output.
+load("jstests/libs/analyze_plan.js");
+
t.save({fn: "john", ln: "doe"})
t.save({fn: "jack", ln: "doe"})
t.save({fn: "john", ln: "smith"})
@@ -13,52 +16,73 @@ assert.eq( t.count(), 6, "Not right length" );
// use simple index
t.ensureIndex({ln: 1});
-assert.eq( t.find({ln: "doe"}).explain().indexOnly, false, "Find using covered index but all fields are returned");
-assert.eq( t.find({ln: "doe"}, {ln: 1}).explain().indexOnly, false, "Find using covered index but _id is returned");
-assert.eq( t.find({ln: "doe"}, {ln: 1, _id: 0}).explain().indexOnly, true, "Find is not using covered index");
+assert( !isIndexOnly(t.find({ln: "doe"}).explain().queryPlanner.winningPlan),
+ "Find using covered index but all fields are returned");
+assert( !isIndexOnly(t.find({ln: "doe"}, {ln: 1}).explain().queryPlanner.winningPlan),
+ "Find using covered index but _id is returned");
+assert( isIndexOnly(t.find({ln: "doe"}, {ln: 1, _id: 0}).explain().queryPlanner.winningPlan),
+ "Find is not using covered index");
// this time, without a query spec
// SERVER-2109
//assert.eq( t.find({}, {ln: 1, _id: 0}).explain().indexOnly, true, "Find is not using covered index");
-assert.eq( t.find({}, {ln: 1, _id: 0}).hint({ln: 1}).explain().indexOnly, true, "Find is not using covered index");
+assert( isIndexOnly(t.find({}, {ln: 1, _id: 0}).hint({ln: 1}).explain().queryPlanner.winningPlan),
+ "Find is not using covered index");
// use compound index
t.dropIndex({ln: 1})
t.ensureIndex({ln: 1, fn: 1});
// return 1 field
-assert.eq( t.find({ln: "doe"}, {ln: 1, _id: 0}).explain().indexOnly, true, "Find is not using covered index");
+var plan = t.find({ln: "doe"}, {ln: 1, _id: 0}).explain();
+assert( isIndexOnly(plan.queryPlanner.winningPlan), "Find is not using covered index");
// return both fields, multiple docs returned
-assert.eq( t.find({ln: "doe"}, {ln: 1, fn: 1, _id: 0}).explain().indexOnly, true, "Find is not using covered index");
+var plan = t.find({ln: "doe"}, {ln: 1, fn: 1, _id: 0}).explain();
+assert( isIndexOnly(plan.queryPlanner.winningPlan), "Find is not using covered index");
// match 1 record using both fields
-assert.eq( t.find({ln: "doe", fn: "john"}, {ln: 1, fn: 1, _id: 0}).explain().indexOnly, true, "Find is not using covered index");
+var plan = t.find({ln: "doe", fn: "john"}, {ln: 1, fn: 1, _id: 0}).explain();
+assert( isIndexOnly(plan.queryPlanner.winningPlan), "Find is not using covered index");
// change ordering
-assert.eq( t.find({fn: "john", ln: "doe"}, {fn: 1, ln: 1, _id: 0}).explain().indexOnly, true, "Find is not using covered index");
+var plan = t.find({fn: "john", ln: "doe"}, {fn: 1, ln: 1, _id: 0}).explain();
+assert( isIndexOnly(plan.queryPlanner.winningPlan), "Find is not using covered index");
// ask from 2nd index key
-assert.eq( t.find({fn: "john"}, {fn: 1, _id: 0}).explain().indexOnly, false, "Find is using covered index, but doesnt have 1st key");
+var plan = t.find({fn: "john"}, {fn: 1, _id: 0}).explain();
+assert( !isIndexOnly(plan.queryPlanner.winningPlan),
+ "Find is using covered index, but doesnt have 1st key");
// repeat above but with _id field
t.dropIndex({ln: 1, fn: 1})
t.ensureIndex({_id: 1, ln: 1});
// return 1 field
-assert.eq( t.find({_id: 123, ln: "doe"}, {_id: 1}).explain().indexOnly, true, "Find is not using covered index");
+var plan = t.find({_id: 123, ln: "doe"}, {_id: 1}).explain();
+assert( isIndexOnly(plan.queryPlanner.winningPlan), "Find is not using covered index");
// match 1 record using both fields
-assert.eq( t.find({_id: 123, ln: "doe"}, {ln: 1}).explain().indexOnly, true, "Find is not using covered index");
+var plan = t.find({_id: 123, ln: "doe"}, {ln: 1}).explain();
+assert( isIndexOnly(plan.queryPlanner.winningPlan), "Find is not using covered index");
// change ordering
-assert.eq( t.find({ln: "doe", _id: 123}, {ln: 1, _id: 1}).explain().indexOnly, true, "Find is not using covered index");
+var plan = t.find({ln: "doe", _id: 123}, {ln: 1, _id: 1}).explain();
+assert( isIndexOnly(plan.queryPlanner.winningPlan), "Find is not using covered index");
// ask from 2nd index key
-assert.eq( t.find({ln: "doe"}, {ln: 1}).explain().indexOnly, false, "Find is using covered index, but doesnt have 1st key");
+var plan = t.find({ln: "doe"}, {ln: 1}).explain();
+assert( !isIndexOnly(plan.queryPlanner.winningPlan),
+ "Find is using covered index, but doesnt have 1st key");
// repeat above but with embedded obj
t.dropIndex({_id: 1, ln: 1})
t.ensureIndex({obj: 1});
-assert.eq( t.find({"obj.a": 1}, {obj: 1}).explain().indexOnly, false, "Shouldnt use index when introspecting object");
-assert.eq( t.find({obj: {a: 1, b: "blah"}}).explain().indexOnly, false, "Index doesnt have all fields to cover");
-assert.eq( t.find({obj: {a: 1, b: "blah"}}, {obj: 1, _id: 0}).explain().indexOnly, true, "Find is not using covered index");
+var plan = t.find({"obj.a": 1}, {obj: 1}).explain();
+assert( !isIndexOnly(plan.queryPlanner.winningPlan),
+ "Shouldnt use index when introspecting object");
+var plan = t.find({obj: {a: 1, b: "blah"}}).explain();
+assert( !isIndexOnly(plan.queryPlanner.winningPlan), "Index doesnt have all fields to cover");
+var plan = t.find({obj: {a: 1, b: "blah"}}, {obj: 1, _id: 0}).explain();
+assert( isIndexOnly(plan.queryPlanner.winningPlan), "Find is not using covered index");
// repeat above but with index on sub obj field
t.dropIndex({obj: 1});
t.ensureIndex({"obj.a": 1, "obj.b": 1})
-assert.eq( t.find({"obj.a": 1}, {obj: 1}).explain().indexOnly, false, "Shouldnt use index when introspecting object");
+var plan = t.find({"obj.a": 1}, {obj: 1}).explain()
+assert( !isIndexOnly(plan.queryPlanner.winningPlan),
+ "Shouldnt use index when introspecting object");
assert(t.validate().valid);
diff --git a/jstests/core/coveredIndex2.js b/jstests/core/coveredIndex2.js
index 56a23f43211..6abb280e422 100644
--- a/jstests/core/coveredIndex2.js
+++ b/jstests/core/coveredIndex2.js
@@ -1,6 +1,9 @@
t = db["jstests_coveredIndex2"];
t.drop();
+// Include helpers for analyzing explain output.
+load("jstests/libs/analyze_plan.js");
+
t.save({a: 1})
t.save({a: 2})
assert.eq( t.findOne({a: 1}).a, 1, "Cannot find right record" );
@@ -8,11 +11,18 @@ assert.eq( t.count(), 2, "Not right length" );
// use simple index
t.ensureIndex({a: 1});
-assert.eq( t.find({a:1}).explain().indexOnly, false, "Find using covered index but all fields are returned");
-assert.eq( t.find({a:1}, {a: 1}).explain().indexOnly, false, "Find using covered index but _id is returned");
-assert.eq( t.find({a:1}, {a: 1, _id: 0}).explain().indexOnly, true, "Find is not using covered index");
+var plan = t.find({a:1}).explain();
+assert( !isIndexOnly(plan.queryPlanner.winningPlan),
+ "Find using covered index but all fields are returned");
+var plan = t.find({a:1}, {a: 1}).explain();
+assert( !isIndexOnly(plan.queryPlanner.winningPlan),
+ "Find using covered index but _id is returned");
+var plan = t.find({a:1}, {a: 1, _id: 0}).explain();
+assert( isIndexOnly(plan.queryPlanner.winningPlan),
+ "Find is not using covered index");
// add multikey
t.save({a:[3,4]})
-assert.eq( t.find({a:1}, {a: 1, _id: 0}).explain().indexOnly, false, "Find is using covered index even after multikey insert");
-
+var plan = t.find({a:1}, {a: 1, _id: 0}).explain();
+assert( !isIndexOnly(plan.queryPlanner.winningPlan),
+ "Find is using covered index even after multikey insert");
diff --git a/jstests/core/coveredIndex5.js b/jstests/core/coveredIndex5.js
deleted file mode 100644
index ee383cd93e2..00000000000
--- a/jstests/core/coveredIndex5.js
+++ /dev/null
@@ -1,70 +0,0 @@
-// Test use of covered indexes when there are multiple candidate indexes.
-
-t = db.jstests_coveredIndex5;
-t.drop();
-
-t.ensureIndex( { a:1, b:1 } );
-t.ensureIndex( { a:1, c:1 } );
-
-function checkFields( query, projection ) {
- t.ensureIndex( { z:1 } ); // clear query patterns
- t.dropIndex( { z:1 } );
-
- results = t.find( query, projection ).toArray();
-
- expectedFields = [];
- for ( k in projection ) {
- if ( k != '_id' ) {
- expectedFields.push( k );
- }
- }
-
- vals = [];
- for ( i in results ) {
- r = results[ i ];
- printjson(r);
- assert.eq( 0, r.a );
- assert.eq( expectedFields, Object.keySet( r ) );
- for ( k in projection ) {
- if ( k != '_id' && k != 'a' ) {
- vals.push( r[ k ] );
- }
- }
- }
-
- if ( vals.length != 0 ) {
- vals.sort();
- assert.eq( [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ], vals );
- }
-}
-
-function checkCursorCovered( cursor, covered, count, query, projection ) {
- checkFields( query, projection );
- explain = t.find( query, projection ).explain( true );
- if (covered) {
- assert.eq( cursor, explain.cursor );
- }
- assert.eq( covered, explain.indexOnly );
- assert.eq( count, explain.n );
-}
-
-for( i = 0; i < 10; ++i ) {
- t.save( { a:0, b:i, c:9-i } );
-}
-
-checkCursorCovered( 'BtreeCursor a_1_b_1', true, 10, { a:0 }, { _id:0, a:1 } );
-
-checkCursorCovered( 'BtreeCursor a_1_b_1', false, 10, { a:0, d:null }, { _id:0, a:1 } );
-checkCursorCovered( 'BtreeCursor a_1_b_1', false, 10, { a:0, d:null }, { _id:0, a:1, b:1 } );
-
-// Covered index on a,c not preferentially selected.
-checkCursorCovered( 'BtreeCursor a_1_b_1', false, 10, { a:0, d:null }, { _id:0, a:1, c:1 } );
-
-t.save( { a:0, c:[ 1, 2 ] } );
-t.save( { a:1 } );
-checkCursorCovered( 'BtreeCursor a_1_b_1', false, 11, { a:0, d:null }, { _id:0, a:1 } );
-
-t.save( { a:0, b:[ 1, 2 ] } );
-t.save( { a:1 } );
-checkCursorCovered( 'BtreeCursor a_1_b_1', false, 12, { a:0, d:null }, { _id:0, a:1 } );
-
diff --git a/jstests/core/covered_index_compound_1.js b/jstests/core/covered_index_compound_1.js
index 7e529785d12..632a2330b44 100644
--- a/jstests/core/covered_index_compound_1.js
+++ b/jstests/core/covered_index_compound_1.js
@@ -1,5 +1,8 @@
// Compound index covered query tests
+// Include helpers for analyzing explain output.
+load("jstests/libs/analyze_plan.js");
+
var coll = db.getCollection("covered_compound_1")
coll.drop()
for (i=0;i<100;i++) {
@@ -9,37 +12,51 @@ coll.ensureIndex({a:1,b:-1,c:1})
// Test equality - all indexed fields queried and projected
var plan = coll.find({a:10, b:"strvar_10", c:0}, {a:1, b:1, c:1, _id:0}).hint({a:1, b:-1, c:1}).explain()
-assert.eq(true, plan.indexOnly, "compound.1.1 - indexOnly should be true on covered query")
-assert.eq(0, plan.nscannedObjects, "compound.1.1 - nscannedObjects should be 0 for covered query")
+assert(isIndexOnly(plan.queryPlanner.winningPlan),
+ "compound.1.1 - indexOnly should be true on covered query")
+assert.eq(0, plan.executionStats.totalDocsExamined,
+ "compound.1.1 - nscannedObjects should be 0 for covered query")
// Test query on subset of fields queried and project all
var plan = coll.find({a:26, b:"strvar_0"}, {a:1, b:1, c:1, _id:0}).hint({a:1, b:-1, c:1}).explain()
-assert.eq(true, plan.indexOnly, "compound.1.2 - indexOnly should be true on covered query")
-assert.eq(0, plan.nscannedObjects, "compound.1.2 - nscannedObjects should be 0 for covered query")
+assert(isIndexOnly(plan.queryPlanner.winningPlan),
+ "compound.1.2 - indexOnly should be true on covered query")
+assert.eq(0, plan.executionStats.totalDocsExamined,
+ "compound.1.2 - nscannedObjects should be 0 for covered query")
// Test query on all fields queried and project subset
var plan = coll.find({a:38, b:"strvar_12", c: 8}, {b:1, c:1, _id:0}).hint({a:1, b:-1, c:1}).explain()
-assert.eq(true, plan.indexOnly, "compound.1.3 - indexOnly should be true on covered query")
-assert.eq(0, plan.nscannedObjects, "compound.1.3 - nscannedObjects should be 0 for covered query")
+assert(isIndexOnly(plan.queryPlanner.winningPlan),
+ "compound.1.3 - indexOnly should be true on covered query")
+assert.eq(0, plan.executionStats.totalDocsExamined,
+ "compound.1.3 - nscannedObjects should be 0 for covered query")
// Test no query
var plan = coll.find({}, {b:1, c:1, _id:0}).hint({a:1, b:-1, c:1}).explain()
-assert.eq(true, plan.indexOnly, "compound.1.4 - indexOnly should be true on covered query")
-assert.eq(0, plan.nscannedObjects, "compound.1.4 - nscannedObjects should be 0 for covered query")
+assert(isIndexOnly(plan.queryPlanner.winningPlan),
+ "compound.1.4 - indexOnly should be true on covered query")
+assert.eq(0, plan.executionStats.totalDocsExamined,
+ "compound.1.4 - nscannedObjects should be 0 for covered query")
// Test range query
var plan = coll.find({a:{$gt:25,$lt:43}}, {b:1, c:1, _id:0}).hint({a:1, b:-1, c:1}).explain()
-assert.eq(true, plan.indexOnly, "compound.1.5 - indexOnly should be true on covered query")
-assert.eq(0, plan.nscannedObjects, "compound.1.5 - nscannedObjects should be 0 for covered query")
+assert(isIndexOnly(plan.queryPlanner.winningPlan),
+ "compound.1.5 - indexOnly should be true on covered query")
+assert.eq(0, plan.executionStats.totalDocsExamined,
+ "compound.1.5 - nscannedObjects should be 0 for covered query")
// Test in query
var plan = coll.find({a:38, b:"strvar_12", c:{$in:[5,8]}}, {b:1, c:1, _id:0}).hint({a:1, b:-1, c:1}).explain()
-assert.eq(true, plan.indexOnly, "compound.1.6 - indexOnly should be true on covered query")
-assert.eq(0, plan.nscannedObjects, "compound.1.6 - nscannedObjects should be 0 for covered query")
+assert(isIndexOnly(plan.queryPlanner.winningPlan),
+ "compound.1.6 - indexOnly should be true on covered query")
+assert.eq(0, plan.executionStats.totalDocsExamined,
+ "compound.1.6 - nscannedObjects should be 0 for covered query")
// Test no result
var plan = coll.find({a:38, b:"strvar_12", c:55},{a:1, b:1, c:1, _id:0}).hint({a:1, b:-1, c:1}).explain()
-assert.eq(true, plan.indexOnly, "compound.1.7 - indexOnly should be true on covered query")
-assert.eq(0, plan.nscannedObjects, "compound.1.7 - nscannedObjects should be 0 for covered query")
+assert(isIndexOnly(plan.queryPlanner.winningPlan),
+ "compound.1.7 - indexOnly should be true on covered query")
+assert.eq(0, plan.executionStats.totalDocsExamined,
+ "compound.1.7 - nscannedObjects should be 0 for covered query")
print('all tests passed')
diff --git a/jstests/core/covered_index_geo_1.js b/jstests/core/covered_index_geo_1.js
deleted file mode 100644
index 1d647dfa94c..00000000000
--- a/jstests/core/covered_index_geo_1.js
+++ /dev/null
@@ -1,18 +0,0 @@
-var coll = db.getCollection("covered_geo_1")
-coll.drop()
-
-coll.insert({_id : 1, loc : [ 5 , 5 ], type : "type1"})
-coll.insert({_id : 2, loc : [ 6 , 6 ], type : "type2"})
-coll.insert({_id : 3, loc : [ 7 , 7 ], type : "type3"})
-
-coll.ensureIndex({loc : "2d", type : 1});
-
-var plan = coll.find({loc : [ 6 , 6 ]}, {loc:1, type:1, _id:0}).hint({loc:"2d", type:1}).explain();
-assert.eq(false, plan.indexOnly, "geo.1.1 - indexOnly should be false on a non covered query")
-assert.neq(0, plan.nscannedObjects, "geo.1.1 - nscannedObjects should not be 0 for a non covered query")
-
-var plan = coll.find({loc : [ 6 , 6 ]}, {type:1, _id:0}).hint({loc:"2d", type:1}).explain();
-assert.eq(false, plan.indexOnly, "geo.1.2 - indexOnly should be false on a non covered query")
-assert.neq(0, plan.nscannedObjects, "geo.1.2 - nscannedObjects should not be 0 for a non covered query")
-
-print("all tests passed") \ No newline at end of file
diff --git a/jstests/core/covered_index_geo_2.js b/jstests/core/covered_index_geo_2.js
deleted file mode 100644
index 52f610b7e64..00000000000
--- a/jstests/core/covered_index_geo_2.js
+++ /dev/null
@@ -1,22 +0,0 @@
-var coll = db.getCollection("covered_geo_2")
-coll.drop()
-
-coll.insert({_id : 1, loc1 : [ 5 , 5 ], type1 : "type1",
- loc2 : [ 5 , 5 ], type2 : 1})
-coll.insert({_id : 2, loc1 : [ 6 , 6 ], type1 : "type2",
- loc2 : [ 5 , 5 ], type2 : 2})
-coll.insert({_id : 3, loc1 : [ 7 , 7 ], type1 : "type3",
- loc2 : [ 5 , 5 ], type2 : 3})
-
-coll.ensureIndex({loc1 : "2dsphere", type1 : 1});
-coll.ensureIndex({type2: 1, loc2 : "2dsphere"});
-
-var plan = coll.find({loc1 : {$nearSphere: [ 6 , 6 ]}}, {loc1:1, type1:1, _id:0}).hint({loc1:"2dsphere", type1:1}).explain();
-assert.eq(false, plan.indexOnly, "geo.2.1 - indexOnly should be false on a non covered query")
-assert.neq(0, plan.nscannedObjects, "geo.2.1 - nscannedObjects should not be 0 for a non covered query")
-
-var plan = coll.find({loc1 : {$nearSphere: [ 6 , 6 ]}}, {type1:1, _id:0}).hint({loc1:"2dsphere", type1:1}).explain();
-assert.eq(false, plan.indexOnly, "geo.2.2 - indexOnly should be false for a non covered query")
-assert.neq(0, plan.nscannedObjects, "geo.2.2 - nscannedObjects should not be 0 for a non covered query")
-
-print("all tests passed")
diff --git a/jstests/core/covered_index_negative_1.js b/jstests/core/covered_index_negative_1.js
index ab03e7566f6..4b538b7c275 100644
--- a/jstests/core/covered_index_negative_1.js
+++ b/jstests/core/covered_index_negative_1.js
@@ -3,6 +3,9 @@
// covered index query. Hence we expect indexOnly=false and
// nscannedObjects > 0
+// Include helpers for analyzing explain output.
+load("jstests/libs/analyze_plan.js");
+
var coll = db.getCollection("covered_negative_1")
coll.drop()
for (i=0;i<100;i++) {
@@ -16,23 +19,31 @@ coll.ensureIndex({f:"hashed"})
// Test no projection
var plan = coll.find({a:10, b:"strvar_10", c:0}).hint({a:1, b:-1, c:1}).explain()
-assert.eq(false, plan.indexOnly, "negative.1.1 - indexOnly should be false on a non covered query")
-assert.neq(0, plan.nscannedObjects, "negative.1.1 - nscannedObjects should not be 0 for a non covered query")
+assert(!isIndexOnly(plan.queryPlanner.winningPlan),
+ "negative.1.1 - indexOnly should be false on a non covered query")
+assert.neq(0, plan.executionStats.totalDocsExamined,
+ "negative.1.1 - docs examined should not be 0 for a non covered query")
// Test projection and not excluding _id
var plan = coll.find({a:10, b:"strvar_10", c:0},{a:1, b:1, c:1}).hint({a:1, b:-1, c:1}).explain()
-assert.eq(false, plan.indexOnly, "negative.1.2 - indexOnly should be false on a non covered query")
-assert.neq(0, plan.nscannedObjects, "negative.1.2 - nscannedObjects should not be 0 for a non covered query")
+assert(!isIndexOnly(plan.queryPlanner.winningPlan),
+ "negative.1.2 - indexOnly should be false on a non covered query")
+assert.neq(0, plan.executionStats.totalDocsExamined,
+ "negative.1.2 - docs examined should not be 0 for a non covered query")
// Test projection of non-indexed field
var plan = coll.find({d:100},{d:1, c:1, _id:0}).hint({d:1}).explain()
-assert.eq(false, plan.indexOnly, "negative.1.3 - indexOnly should be false on a non covered query")
-assert.neq(0, plan.nscannedObjects, "negative.1.3 - nscannedObjects should not be 0 for a non covered query")
+assert(!isIndexOnly(plan.queryPlanner.winningPlan),
+ "negative.1.3 - indexOnly should be false on a non covered query")
+assert.neq(0, plan.executionStats.totalDocsExamined,
+ "negative.1.3 - docs examined should not be 0 for a non covered query")
// Test query and projection on a multi-key index
var plan = coll.find({e:99},{e:1, _id:0}).hint({e:1}).explain()
-assert.eq(false, plan.indexOnly, "negative.1.4 - indexOnly should be false on a non covered query")
-assert.neq(0, plan.nscannedObjects, "negative.1.4 - nscannedObjects should not be 0 for a non covered query")
+assert(!isIndexOnly(plan.queryPlanner.winningPlan),
+ "negative.1.4 - indexOnly should be false on a non covered query")
+assert.neq(0, plan.executionStats.totalDocsExamined,
+ "negative.1.4 - docs examined should not be 0 for a non covered query")
// Commenting out negative.1.5 and 1.6 pending fix in SERVER-8650
// // Test projection and $natural sort
@@ -49,13 +60,16 @@ assert.neq(0, plan.nscannedObjects, "negative.1.4 - nscannedObjects should not b
// Test query on non-indexed field
var plan = coll.find({d:{$lt:1000}},{a:1, b:1, c:1, _id:0}).hint({a:1, b:-1, c:1}).explain()
-//indexOnly should be false but is not due to bug https://jira.mongodb.org/browse/SERVER-8562
-// assert.eq(true, plan.indexOnly, "negative.1.7 - indexOnly should be false on a non covered query")
-assert.neq(0, plan.nscannedObjects, "negative.1.7 - nscannedObjects should not be 0 for a non covered query")
+assert(!isIndexOnly(plan.queryPlanner.winningPlan),
+ "negative.1.7 - indexOnly should be false on a non covered query")
+assert.neq(0, plan.executionStats.totalDocsExamined,
+ "negative.1.7 - docs examined should not be 0 for a non covered query")
// Test query on hashed indexed field
var plan = coll.find({f:10},{f:1, _id:0}).hint({f:"hashed"}).explain()
-assert.eq(false, plan.indexOnly, "negative.1.8 - indexOnly should be false on a non covered query")
-assert.neq(0, plan.nscannedObjects, "negative.1.8 - nscannedObjects should not be 0 for a non covered query")
+assert(!isIndexOnly(plan.queryPlanner.winningPlan),
+ "negative.1.8 - indexOnly should be false on a non covered query")
+assert.neq(0, plan.executionStats.totalDocsExamined,
+ "negative.1.8 - nscannedObjects should not be 0 for a non covered query")
print('all tests passed')
diff --git a/jstests/core/covered_index_simple_1.js b/jstests/core/covered_index_simple_1.js
index 44e3c00a9f8..146f0751f1c 100644
--- a/jstests/core/covered_index_simple_1.js
+++ b/jstests/core/covered_index_simple_1.js
@@ -1,5 +1,8 @@
// Simple covered index query test
+// Include helpers for analyzing explain output.
+load("jstests/libs/analyze_plan.js");
+
var coll = db.getCollection("covered_simple_1")
coll.drop()
for (i=0;i<10;i++) {
@@ -18,38 +21,51 @@ coll.ensureIndex({foo:1})
// Test equality with int value
var plan = coll.find({foo:1}, {foo:1, _id:0}).hint({foo:1}).explain()
-assert.eq(true, plan.indexOnly, "simple.1.1 - indexOnly should be true on covered query")
-assert.eq(0, plan.nscannedObjects, "simple.1.1 - nscannedObjects should be 0 for covered query")
+assert(isIndexOnly(plan.queryPlanner.winningPlan),
+ "simple.1.1 - indexOnly should be true on covered query")
+assert.eq(0, plan.executionStats.totalDocsExamined,
+ "simple.1.1 - docs examined should be 0 for covered query")
// Test equality with string value
var plan = coll.find({foo:"string"}, {foo:1, _id:0}).hint({foo:1}).explain()
-assert.eq(true, plan.indexOnly, "simple.1.2 - indexOnly should be true on covered query")
-assert.eq(0, plan.nscannedObjects, "simple.1.2 - nscannedObjects should be 0 for covered query")
+assert(isIndexOnly(plan.queryPlanner.winningPlan),
+ "simple.1.2 - indexOnly should be true on covered query")
+assert.eq(0, plan.executionStats.totalDocsExamined,
+ "simple.1.2 - docs examined should be 0 for covered query")
// Test equality with doc value
var plan = coll.find({foo:{bar:1}}, {foo:1, _id:0}).hint({foo:1}).explain()
-assert.eq(true, plan.indexOnly, "simple.1.3 - indexOnly should be true on covered query")
-assert.eq(0, plan.nscannedObjects, "simple.1.3 - nscannedObjects should be 0 for covered query")
+assert(isIndexOnly(plan.queryPlanner.winningPlan),
+ "simple.1.3 - indexOnly should be true on covered query")
+assert.eq(0, plan.executionStats.totalDocsExamined,
+ "simple.1.3 - docs examined should be 0 for covered query")
// Test no query
var plan = coll.find({}, {foo:1, _id:0}).hint({foo:1}).explain()
-assert.eq(true, plan.indexOnly, "simple.1.4 - indexOnly should be true on covered query")
-assert.eq(0, plan.nscannedObjects, "simple.1.4 - nscannedObjects should be 0 for covered query")
+assert(isIndexOnly(plan.queryPlanner.winningPlan),
+ "simple.1.4 - indexOnly should be true on covered query")
+assert.eq(0, plan.executionStats.totalDocsExamined,
+ "simple.1.4 - docs examined should be 0 for covered query")
// Test range query
var plan = coll.find({foo:{$gt:2,$lt:6}}, {foo:1, _id:0}).hint({foo:1}).explain()
-assert.eq(true, plan.indexOnly, "simple.1.5 - indexOnly should be true on covered query")
-assert.eq(0, plan.nscannedObjects, "simple.1.5 - nscannedObjects should be 0 for covered query")
+assert(isIndexOnly(plan.queryPlanner.winningPlan),
+ "simple.1.5 - indexOnly should be true on covered query")
+assert.eq(0, plan.executionStats.totalDocsExamined,
+ "simple.1.5 - docs examined should be 0 for covered query")
// Test in query
var plan = coll.find({foo:{$in:[5,8]}}, {foo:1, _id:0}).hint({foo:1}).explain()
-assert.eq(true, plan.indexOnly, "simple.1.6 - indexOnly should be true on covered query")
-assert.eq(0, plan.nscannedObjects, "simple.1.6 - nscannedObjects should be 0 for covered query")
+assert(isIndexOnly(plan.queryPlanner.winningPlan),
+ "simple.1.6 - indexOnly should be true on covered query")
+assert.eq(0, plan.executionStats.totalDocsExamined,
+ "simple.1.6 - docs examined should be 0 for covered query")
// Test no return
var plan = coll.find({foo:"2"}, {foo:1, _id:0}).hint({foo:1}).explain()
-assert.eq(true, plan.indexOnly, "simple.1.7 - indexOnly should be true on covered query")
-assert.eq(0, plan.nscannedObjects, "simple.1.7 - nscannedObjects should be 0 for covered query")
+assert(isIndexOnly(plan.queryPlanner.winningPlan),
+ "simple.1.7 - indexOnly should be true on covered query")
+assert.eq(0, plan.executionStats.totalDocsExamined,
+ "simple.1.7 - nscannedObjects should be 0 for covered query")
print ('all tests pass')
-
diff --git a/jstests/core/covered_index_simple_2.js b/jstests/core/covered_index_simple_2.js
index 313cca439d8..014f235b711 100644
--- a/jstests/core/covered_index_simple_2.js
+++ b/jstests/core/covered_index_simple_2.js
@@ -1,5 +1,8 @@
// Simple covered index query test with unique index
+// Include helpers for analyzing explain output.
+load("jstests/libs/analyze_plan.js");
+
var coll = db.getCollection("covered_simple_2")
coll.drop()
for (i=0;i<10;i++) {
@@ -12,32 +15,44 @@ coll.ensureIndex({foo:1},{unique:true})
// Test equality with int value
var plan = coll.find({foo:1}, {foo:1, _id:0}).hint({foo:1}).explain()
-assert.eq(true, plan.indexOnly, "simple.2.1 - indexOnly should be true on covered query")
-assert.eq(0, plan.nscannedObjects, "simple.2.1 - nscannedObjects should be 0 for covered query")
+assert(isIndexOnly(plan.queryPlanner.winningPlan),
+ "simple.2.1 - indexOnly should be true on covered query")
+assert.eq(0, plan.executionStats.totalDocsExamined,
+ "simple.2.1 - docs examined should be 0 for covered query")
// Test equality with string value
var plan = coll.find({foo:"string"}, {foo:1, _id:0}).hint({foo:1}).explain()
-assert.eq(true, plan.indexOnly, "simple.2.2 - indexOnly should be true on covered query")
-assert.eq(0, plan.nscannedObjects, "simple.2.2 - nscannedObjects should be 0 for covered query")
+assert(isIndexOnly(plan.queryPlanner.winningPlan),
+ "simple.2.2 - indexOnly should be true on covered query")
+assert.eq(0, plan.executionStats.totalDocsExamined,
+ "simple.2.2 - docs examined should be 0 for covered query")
// Test equality with int value on a dotted field
var plan = coll.find({foo:{bar:1}}, {foo:1, _id:0}).hint({foo:1}).explain()
-assert.eq(true, plan.indexOnly, "simple.2.3 - indexOnly should be true on covered query")
-assert.eq(0, plan.nscannedObjects, "simple.2.3 - nscannedObjects should be 0 for covered query")
+assert(isIndexOnly(plan.queryPlanner.winningPlan),
+ "simple.2.3 - indexOnly should be true on covered query")
+assert.eq(0, plan.executionStats.totalDocsExamined,
+ "simple.2.3 - docs examined should be 0 for covered query");
// Test no query
var plan = coll.find({}, {foo:1, _id:0}).hint({foo:1}).explain()
-assert.eq(true, plan.indexOnly, "simple.2.4 - indexOnly should be true on covered query")
-assert.eq(0, plan.nscannedObjects, "simple.2.4 - nscannedObjects should be 0 for covered query")
+assert(isIndexOnly(plan.queryPlanner.winningPlan),
+ "simple.2.4 - indexOnly should be true on covered query")
+assert.eq(0, plan.executionStats.totalDocsExamined,
+ "simple.2.4 - docs examined should be 0 for covered query");
// Test range query
var plan = coll.find({foo:{$gt:2,$lt:6}}, {foo:1, _id:0}).hint({foo:1}).explain()
-assert.eq(true, plan.indexOnly, "simple.2.5 - indexOnly should be true on covered query")
-assert.eq(0, plan.nscannedObjects, "simple.2.5 - nscannedObjects should be 0 for covered query")
+assert(isIndexOnly(plan.queryPlanner.winningPlan),
+ "simple.2.5 - indexOnly should be true on covered query")
+assert.eq(0, plan.executionStats.totalDocsExamined,
+ "simple.2.5 - docs examined should be 0 for covered query");
// Test in query
var plan = coll.find({foo:{$in:[5,8]}}, {foo:1, _id:0}).hint({foo:1}).explain()
-assert.eq(true, plan.indexOnly, "simple.2.6 - indexOnly should be true on covered query")
-assert.eq(0, plan.nscannedObjects, "simple.2.6 - nscannedObjects should be 0 for covered query")
+assert(isIndexOnly(plan.queryPlanner.winningPlan),
+ "simple.2.6 - indexOnly should be true on covered query")
+assert.eq(0, plan.executionStats.totalDocsExamined,
+ "simple.2.6 - docs examined should be 0 for covered query");
print ('all tests pass')
diff --git a/jstests/core/covered_index_simple_3.js b/jstests/core/covered_index_simple_3.js
index ffd80f73b5b..32f411798ec 100644
--- a/jstests/core/covered_index_simple_3.js
+++ b/jstests/core/covered_index_simple_3.js
@@ -1,5 +1,8 @@
// Simple covered index query test with a unique sparse index
+// Include helpers for analyzing explain output.
+load("jstests/libs/analyze_plan.js");
+
var coll = db.getCollection("covered_simple_3");
coll.drop();
for (i=0;i<10;i++) {
@@ -15,44 +18,60 @@ coll.ensureIndex({foo:1}, {sparse:true, unique:true});
// Test equality with int value
var plan = coll.find({foo:1}, {foo:1, _id:0}).hint({foo:1}).explain();
-assert.eq(true, plan.indexOnly, "simple.3.1 - indexOnly should be true on covered query");
-assert.eq(0, plan.nscannedObjects, "simple.3.1 - nscannedObjects should be 0 for covered query");
+assert(isIndexOnly(plan.queryPlanner.winningPlan),
+ "simple.3.1 - indexOnly should be true on covered query")
+assert.eq(0, plan.executionStats.totalDocsExamined,
+ "simple.3.1 - docs examined should be 0 for covered query")
// Test equality with string value
var plan = coll.find({foo:"string"}, {foo:1, _id:0}).hint({foo:1}).explain();
-assert.eq(true, plan.indexOnly, "simple.3.2 - indexOnly should be true on covered query");
-assert.eq(0, plan.nscannedObjects, "simple.3.2 - nscannedObjects should be 0 for covered query");
+assert(isIndexOnly(plan.queryPlanner.winningPlan),
+ "simple.3.2 - indexOnly should be true on covered query")
+assert.eq(0, plan.executionStats.totalDocsExamined,
+ "simple.3.2 - docs examined should be 0 for covered query")
// Test equality with int value on a dotted field
var plan = coll.find({foo:{bar:1}}, {foo:1, _id:0}).hint({foo:1}).explain();
-assert.eq(true, plan.indexOnly, "simple.3.3 - indexOnly should be true on covered query");
-assert.eq(0, plan.nscannedObjects, "simple.3.3 - nscannedObjects should be 0 for covered query");
+assert(isIndexOnly(plan.queryPlanner.winningPlan),
+ "simple.3.3 - indexOnly should be true on covered query")
+assert.eq(0, plan.executionStats.totalDocsExamined,
+ "simple.3.3 - docs examined should be 0 for covered query")
// Test no query
var plan = coll.find({}, {foo:1, _id:0}).hint({foo:1}).explain();
-assert.eq(true, plan.indexOnly, "simple.3.4 - indexOnly should be true on covered query");
-assert.eq(0, plan.nscannedObjects, "simple.3.4 - nscannedObjects should be 0 for covered query");
+assert(isIndexOnly(plan.queryPlanner.winningPlan),
+ "simple.3.4 - indexOnly should be true on covered query")
+assert.eq(0, plan.executionStats.totalDocsExamined,
+ "simple.3.4 - docs examined should be 0 for covered query")
// Test range query
var plan = coll.find({foo:{$gt:2,$lt:6}}, {foo:1, _id:0}).hint({foo:1}).explain();
-assert.eq(true, plan.indexOnly, "simple.3.5 - indexOnly should be true on covered query");
-assert.eq(0, plan.nscannedObjects, "simple.3.5 - nscannedObjects should be 0 for covered query");
+assert(isIndexOnly(plan.queryPlanner.winningPlan),
+ "simple.3.5 - indexOnly should be true on covered query")
+assert.eq(0, plan.executionStats.totalDocsExamined,
+ "simple.3.5 - docs examined should be 0 for covered query")
// Test in query
var plan = coll.find({foo:{$in:[5,8]}}, {foo:1, _id:0}).hint({foo:1}).explain();
-assert.eq(true, plan.indexOnly, "simple.3.6 - indexOnly should be true on covered query");
-assert.eq(0, plan.nscannedObjects, "simple.3.6 - nscannedObjects should be 0 for covered query");
+assert(isIndexOnly(plan.queryPlanner.winningPlan),
+ "simple.3.6 - indexOnly should be true on covered query")
+assert.eq(0, plan.executionStats.totalDocsExamined,
+ "simple.3.6 - docs examined should be 0 for covered query")
// Test $exists true
var plan = coll.find({foo:{$exists:true}}, {foo:1, _id:0}).hint({foo:1}).explain();
-assert.eq(true, plan.indexOnly, "simple.3.7 - indexOnly should be true on covered query");
-assert.eq(0, plan.nscannedObjects, "simple.3.7 - nscannedObjects should be 0 for covered query");
+assert(isIndexOnly(plan.queryPlanner.winningPlan),
+ "simple.3.7 - indexOnly should be true on covered query")
+assert.eq(0, plan.executionStats.totalDocsExamined,
+ "simple.3.7 - docs examined should be 0 for covered query")
// Check that $nin can be covered.
coll.dropIndexes();
coll.ensureIndex({bar: 1});
var plan = coll.find({bar:{$nin:[5,8]}}, {bar:1, _id:0}).hint({bar:1}).explain()
-assert.eq(true, plan.indexOnly, "simple.3.8 - indexOnly should be true on covered query")
-assert.eq(0, plan.nscannedObjects, "simple.3.8 - nscannedObjects should be 0 for covered query")
+assert(isIndexOnly(plan.queryPlanner.winningPlan),
+ "simple.3.8 - indexOnly should be true on covered query")
+assert.eq(0, plan.executionStats.totalDocsExamined,
+ "simple.3.8 - docs examined should be 0 for covered query")
print ('all tests pass')
diff --git a/jstests/core/covered_index_simple_id.js b/jstests/core/covered_index_simple_id.js
index c7f6811a33c..8016854cb0a 100644
--- a/jstests/core/covered_index_simple_id.js
+++ b/jstests/core/covered_index_simple_id.js
@@ -1,5 +1,8 @@
// Simple covered index query test
+// Include helpers for analyzing explain output.
+load("jstests/libs/analyze_plan.js");
+
var coll = db.getCollection("covered_simple_id")
coll.drop()
for (i=0;i<10;i++) {
@@ -11,32 +14,44 @@ coll.insert({_id:null})
// Test equality with int value
var plan = coll.find({_id:1}, {_id:1}).hint({_id:1}).explain()
-assert.eq(true, plan.indexOnly, "simple.id.1 - indexOnly should be true on covered query")
-assert.eq(0, plan.nscannedObjects, "simple.id.1 - nscannedObjects should be 0 for covered query")
+assert(isIndexOnly(plan.queryPlanner.winningPlan),
+ "simple.id.1 - indexOnly should be true on covered query")
+assert.eq(0, plan.executionStats.totalDocsExamined,
+ "simple.id.1 - docs examined should be 0 for covered query")
// Test equality with string value
var plan = coll.find({_id:"string"}, {_id:1}).hint({_id:1}).explain()
-assert.eq(true, plan.indexOnly, "simple.id.2 - indexOnly should be true on covered query")
-assert.eq(0, plan.nscannedObjects, "simple.id.2 - nscannedObjects should be 0 for covered query")
+assert(isIndexOnly(plan.queryPlanner.winningPlan),
+ "simple.id.2 - indexOnly should be true on covered query")
+assert.eq(0, plan.executionStats.totalDocsExamined,
+ "simple.id.2 - docs examined should be 0 for covered query")
// Test equality with int value on a dotted field
var plan = coll.find({_id:{bar:1}}, {_id:1}).hint({_id:1}).explain()
-assert.eq(true, plan.indexOnly, "simple.id.3 - indexOnly should be true on covered query")
-assert.eq(0, plan.nscannedObjects, "simple.id.3 - nscannedObjects should be 0 for covered query")
+assert(isIndexOnly(plan.queryPlanner.winningPlan),
+ "simple.id.3 - indexOnly should be true on covered query")
+assert.eq(0, plan.executionStats.totalDocsExamined,
+ "simple.id.3 - docs examined should be 0 for covered query")
// Test no query
var plan = coll.find({}, {_id:1}).hint({_id:1}).explain()
-assert.eq(true, plan.indexOnly, "simple.id.4 - indexOnly should be true on covered query")
-assert.eq(0, plan.nscannedObjects, "simple.id.4 - nscannedObjects should be 0 for covered query")
+assert(isIndexOnly(plan.queryPlanner.winningPlan),
+ "simple.id.4 - indexOnly should be true on covered query")
+assert.eq(0, plan.executionStats.totalDocsExamined,
+ "simple.id.4 - docs examined should be 0 for covered query")
// Test range query
var plan = coll.find({_id:{$gt:2,$lt:6}}, {_id:1}).hint({_id:1}).explain()
-assert.eq(true, plan.indexOnly, "simple.id.5 - indexOnly should be true on covered query")
-assert.eq(0, plan.nscannedObjects, "simple.id.5 - nscannedObjects should be 0 for covered query")
+assert(isIndexOnly(plan.queryPlanner.winningPlan),
+ "simple.id.5 - indexOnly should be true on covered query")
+assert.eq(0, plan.executionStats.totalDocsExamined,
+ "simple.id.5 - docs examined should be 0 for covered query")
// Test in query
var plan = coll.find({_id:{$in:[5,8]}}, {_id:1}).hint({_id:1}).explain()
-assert.eq(true, plan.indexOnly, "simple.id.6 - indexOnly should be true on covered query")
-assert.eq(0, plan.nscannedObjects, "simple.id.6 - nscannedObjects should be 0 for covered query")
+assert(isIndexOnly(plan.queryPlanner.winningPlan),
+ "simple.id.6 - indexOnly should be true on covered query")
+assert.eq(0, plan.executionStats.totalDocsExamined,
+ "simple.id.6 - docs examined should be 0 for covered query")
print ('all tests pass')
diff --git a/jstests/core/covered_index_sort_1.js b/jstests/core/covered_index_sort_1.js
index adfcb5c6cb6..fd7d77d272e 100644
--- a/jstests/core/covered_index_sort_1.js
+++ b/jstests/core/covered_index_sort_1.js
@@ -1,5 +1,8 @@
// Simple covered index query test with sort
+// Include helpers for analyzing explain output.
+load("jstests/libs/analyze_plan.js");
+
var coll = db.getCollection("covered_sort_1")
coll.drop()
for (i=0;i<10;i++) {
@@ -18,17 +21,23 @@ coll.ensureIndex({foo:1})
// Test no query and sort ascending
var plan = coll.find({}, {foo:1, _id:0}).sort({foo:1}).hint({foo:1}).explain()
-assert.eq(true, plan.indexOnly, "sort.1.1 - indexOnly should be true on covered query")
-assert.eq(0, plan.nscannedObjects, "sort.1.1 - nscannedObjects should be 0 for covered query")
+assert(isIndexOnly(plan.queryPlanner.winningPlan),
+ "sort.1.1 - indexOnly should be true on covered query")
+assert.eq(0, plan.executionStats.totalDocsExamined,
+ "sort.1.1 - docs examined should be 0 for covered query")
// Test no query and sort descending
var plan = coll.find({}, {foo:1, _id:0}).sort({foo:-1}).hint({foo:1}).explain()
-assert.eq(true, plan.indexOnly, "sort.1.2 - indexOnly should be true on covered query")
-assert.eq(0, plan.nscannedObjects, "sort.1.2 - nscannedObjects should be 0 for covered query")
+assert(isIndexOnly(plan.queryPlanner.winningPlan),
+ "sort.1.2 - indexOnly should be true on covered query")
+assert.eq(0, plan.executionStats.totalDocsExamined,
+ "sort.1.2 - docs examined should be 0 for covered query")
// Test range query with sort
var plan = coll.find({foo:{$gt:2}}, {foo:1, _id:0}).sort({foo:-1}).hint({foo:1}).explain()
-assert.eq(true, plan.indexOnly, "sort.1.5 - indexOnly should be true on covered query")
-assert.eq(0, plan.nscannedObjects, "sort.1.5 - nscannedObjects should be 0 for covered query")
+assert(isIndexOnly(plan.queryPlanner.winningPlan),
+ "sort.1.3 - indexOnly should be true on covered query")
+assert.eq(0, plan.executionStats.totalDocsExamined,
+ "sort.1.3 - docs examined should be 0 for covered query")
-print ('all tests pass') \ No newline at end of file
+print ('all tests pass')
diff --git a/jstests/core/covered_index_sort_2.js b/jstests/core/covered_index_sort_2.js
index e5dd48b47af..4315bdc448a 100644
--- a/jstests/core/covered_index_sort_2.js
+++ b/jstests/core/covered_index_sort_2.js
@@ -1,5 +1,8 @@
// Simple covered index query test with sort on _id
+// Include helpers for analyzing explain output.
+load("jstests/libs/analyze_plan.js");
+
var coll = db.getCollection("covered_sort_2")
coll.drop()
for (i=0;i<10;i++) {
@@ -11,7 +14,9 @@ coll.insert({_id:null})
// Test no query
var plan = coll.find({}, {_id:1}).sort({_id:-1}).hint({_id:1}).explain()
-assert.eq(true, plan.indexOnly, "sort.2.1 - indexOnly should be true on covered query")
-assert.eq(0, plan.nscannedObjects, "sort.2.1 - nscannedObjects should be 0 for covered query")
+assert(isIndexOnly(plan.queryPlanner.winningPlan),
+ "sort.2.1 - indexOnly should be true on covered query")
+assert.eq(0, plan.executionStats.totalDocsExamined,
+ "sort.2.1 - docs examined should be 0 for covered query")
-print ('all tests pass') \ No newline at end of file
+print ('all tests pass')
diff --git a/jstests/core/covered_index_sort_3.js b/jstests/core/covered_index_sort_3.js
index 8f5986c4d76..6b5cae9def2 100644
--- a/jstests/core/covered_index_sort_3.js
+++ b/jstests/core/covered_index_sort_3.js
@@ -1,5 +1,8 @@
// Compound index covered query tests with sort
+// Include helpers for analyzing explain output.
+load("jstests/libs/analyze_plan.js");
+
var coll = db.getCollection("covered_sort_3")
coll.drop()
for (i=0;i<100;i++) {
@@ -10,7 +13,9 @@ coll.ensureIndex({a:1,b:-1,c:1})
// Test no query, sort on all fields in index order
var plan = coll.find({}, {b:1, c:1, _id:0}).sort({a:1,b:-1,c:1}).hint({a:1, b:-1, c:1}).explain()
-assert.eq(true, plan.indexOnly, "compound.1.1 - indexOnly should be true on covered query")
-assert.eq(0, plan.nscannedObjects, "compound.1.1 - nscannedObjects should be 0 for covered query")
+assert(isIndexOnly(plan.queryPlanner.winningPlan),
+ "sort.3.1 - indexOnly should be true on covered query")
+assert.eq(0, plan.executionStats.totalDocsExamined,
+ "sort.3.1 - docs examined should be 0 for covered query")
print ('all tests pass')
diff --git a/jstests/core/cursor6.js b/jstests/core/cursor6.js
index 9c50e9ecbb2..05af609e7ea 100644
--- a/jstests/core/cursor6.js
+++ b/jstests/core/cursor6.js
@@ -5,23 +5,6 @@ function eq( one, two ) {
assert.eq( one.b, two.b );
}
-function checkExplain( e, idx, reverse, nScanned ) {
- if ( !reverse ) {
- if ( idx ) {
- assert.eq( "BtreeCursor a_1_b_-1", e.cursor );
- } else {
- assert.eq( "BasicCursor", e.cursor );
- }
- } else {
- if ( idx ) {
- assert.eq( "BtreeCursor a_1_b_-1 reverse", e.cursor );
- } else {
- assert( false );
- }
- }
- assert.eq( nScanned, e.nscanned );
-}
-
function check( indexed ) {
var hint;
if ( indexed ) {
@@ -29,51 +12,37 @@ function check( indexed ) {
} else {
hint = { $natural: 1 };
}
-
- e = r.find().sort( { a: 1, b: 1 } ).hint( hint ).explain();
- checkExplain( e, indexed, false, 4 );
+
f = r.find().sort( { a: 1, b: 1 } ).hint( hint );
eq( z[ 0 ], f[ 0 ] );
eq( z[ 1 ], f[ 1 ] );
eq( z[ 2 ], f[ 2 ] );
eq( z[ 3 ], f[ 3 ] );
- e = r.find().sort( { a: 1, b: -1 } ).hint( hint ).explain();
- checkExplain( e, indexed, false, 4 );
f = r.find().sort( { a: 1, b: -1 } ).hint( hint );
eq( z[ 1 ], f[ 0 ] );
eq( z[ 0 ], f[ 1 ] );
eq( z[ 3 ], f[ 2 ] );
eq( z[ 2 ], f[ 3 ] );
- e = r.find().sort( { a: -1, b: 1 } ).hint( hint ).explain();
- checkExplain( e, indexed, true && indexed, 4 );
f = r.find().sort( { a: -1, b: 1 } ).hint( hint );
eq( z[ 2 ], f[ 0 ] );
eq( z[ 3 ], f[ 1 ] );
eq( z[ 0 ], f[ 2 ] );
eq( z[ 1 ], f[ 3 ] );
- e = r.find( { a: { $gte: 2 } } ).sort( { a: 1, b: -1 } ).hint( hint ).explain();
- checkExplain( e, indexed, false, indexed ? 2 : 4 );
f = r.find( { a: { $gte: 2 } } ).sort( { a: 1, b: -1 } ).hint( hint );
eq( z[ 3 ], f[ 0 ] );
eq( z[ 2 ], f[ 1 ] );
- e = r.find( { a : { $gte: 2 } } ).sort( { a: -1, b: 1 } ).hint( hint ).explain();
- checkExplain( e, indexed, true && indexed, indexed ? 2 : 4 );
f = r.find( { a: { $gte: 2 } } ).sort( { a: -1, b: 1 } ).hint( hint );
eq( z[ 2 ], f[ 0 ] );
eq( z[ 3 ], f[ 1 ] );
- e = r.find( { a : { $gte: 2 } } ).sort( { a: 1, b: 1 } ).hint( hint ).explain();
- checkExplain( e, indexed, false, indexed ? 2 : 4 );
f = r.find( { a: { $gte: 2 } } ).sort( { a: 1, b: 1 } ).hint( hint );
eq( z[ 2 ], f[ 0 ] );
eq( z[ 3 ], f[ 1 ] );
- e = r.find().sort( { a: -1, b: -1 } ).hint( hint ).explain();
- checkExplain( e, indexed, false, 4 );
f = r.find().sort( { a: -1, b: -1 } ).hint( hint );
eq( z[ 3 ], f[ 0 ] );
eq( z[ 2 ], f[ 1 ] );
@@ -97,6 +66,4 @@ r.ensureIndex( { a: 1, b: -1 } );
check( false );
check( true );
-assert.eq( "BasicCursor", r.find().sort( { a: 1, b: -1, z: 1 } ).hint( { $natural: -1 } ).explain().cursor );
-
db.setProfilingLevel( 0 );
diff --git a/jstests/core/cursora.js b/jstests/core/cursora.js
index 6710c1e9dc6..3e565c4835e 100644
--- a/jstests/core/cursora.js
+++ b/jstests/core/cursora.js
@@ -23,8 +23,11 @@ function run( n , atomic ){
var end = null;
try {
start = new Date()
- ex = t.find(function () { num = 2; for (var x = 0; x < 1000; x++) num += 2; return num > 0; }).sort({ _id: -1 }).explain()
- num = ex.n
+ num = t.find(function () {
+ num = 2;
+ for (var x = 0; x < 1000; x++) num += 2;
+ return num > 0;
+ }).sort({ _id: -1 }).itcount();
end = new Date()
}
catch (e) {
@@ -32,7 +35,7 @@ function run( n , atomic ){
join();
throw e;
}
-
+
join()
//print( "cursora.js num: " + num + " time:" + ( end.getTime() - start.getTime() ) )
diff --git a/jstests/core/distinct_speed1.js b/jstests/core/distinct_speed1.js
index 4cae5b0ae06..2e21f5463b9 100644
--- a/jstests/core/distinct_speed1.js
+++ b/jstests/core/distinct_speed1.js
@@ -9,7 +9,7 @@ for ( var i=0; i<10000; i++ ){
assert.eq( 10 , t.distinct("x").length , "A1" );
function fast(){
- t.find().explain().millis;
+ t.find().explain().executionStats.executionTimeMillis;
}
function slow(){
diff --git a/jstests/core/exists6.js b/jstests/core/exists6.js
index 2fa4ba85d49..79d4885283d 100644
--- a/jstests/core/exists6.js
+++ b/jstests/core/exists6.js
@@ -8,64 +8,10 @@ t.save( {} );
t.save( {b:1} );
t.save( {b:null} );
-//---------------------------------
-
-function checkIndexUse( query, usesIndex, index, bounds ) {
- var x = t.find( query ).explain()
- if ( usesIndex ) {
- assert.eq( x.cursor.indexOf(index), 0 , tojson(x) );
- if ( ! x.indexBounds ) x.indexBounds = {}
- assert.eq( bounds, x.indexBounds.b , tojson(x) );
- }
- else {
- assert.eq( 'BasicCursor', x.cursor, tojson(x) );
- }
-}
-
-function checkExists( query, usesIndex, bounds ) {
- checkIndexUse( query, usesIndex, 'BtreeCursor b_1', bounds );
- // Whether we use an index or not, we will always scan all docs.
- assert.eq( 3, t.find( query ).explain().nscanned );
- // 2 docs will match.
- assert.eq( 2, t.find( query ).itcount() );
-}
-
-function checkMissing( query, usesIndex, bounds ) {
- checkIndexUse( query, usesIndex, 'BtreeCursor b_1', bounds );
- // Nscanned changes based on index usage.
- if ( usesIndex ) assert.eq( 2, t.find( query ).explain().nscanned );
- else assert.eq( 3, t.find( query ).explain().nscanned );
- // 1 doc is missing 'b'.
- assert.eq( 1, t.find( query ).itcount() );
-}
-
-function checkExistsCompound( query, usesIndex, bounds ) {
- checkIndexUse( query, usesIndex, 'BtreeCursor', bounds );
- if ( usesIndex ) assert.eq( 3, t.find( query ).explain().nscanned );
- else assert.eq( 3, t.find( query ).explain().nscanned );
- // 2 docs have a:1 and b:exists.
- assert.eq( 2, t.find( query ).itcount() );
-}
-
-function checkMissingCompound( query, usesIndex, bounds ) {
- checkIndexUse( query, usesIndex, 'BtreeCursor', bounds );
- // two possible indexes to use
- // 1 doc should match
- assert.eq( 1, t.find( query ).itcount() );
-}
-
-//---------------------------------
-
-var allValues = [ [ { $minElement:1 }, { $maxElement:1 } ] ];
-var nullNull = [ [ null, null ] ];
-
-// Basic cases
-checkExists( {b:{$exists:true}}, true, allValues );
-// We change this to not -> not -> exists:true, and get allValue for bounds
-// but we use a BasicCursor?
-checkExists( {b:{$not:{$exists:false}}}, false, allValues );
-checkMissing( {b:{$exists:false}}, true, nullNull );
-checkMissing( {b:{$not:{$exists:true}}}, true, nullNull );
+assert.eq( 2, t.find({b:{$exists:true}}).itcount() );
+assert.eq( 2, t.find({b:{$not:{$exists:false}}}).itcount() );
+assert.eq( 1, t.find({b:{$exists:false}}).itcount() );
+assert.eq( 1, t.find({b:{$not:{$exists:true}}}).itcount() );
// Now check existence of second compound field.
t.ensureIndex( {a:1,b:1} );
@@ -73,7 +19,7 @@ t.save( {a:1} );
t.save( {a:1,b:1} );
t.save( {a:1,b:null} );
-checkExistsCompound( {a:1,b:{$exists:true}}, true, allValues );
-checkExistsCompound( {a:1,b:{$not:{$exists:false}}}, true, allValues );
-checkMissingCompound( {a:1,b:{$exists:false}}, true, nullNull );
-checkMissingCompound( {a:1,b:{$not:{$exists:true}}}, true, nullNull );
+assert.eq( 2, t.find({a:1,b:{$exists:true}}).itcount() );
+assert.eq( 2, t.find({a:1,b:{$not:{$exists:false}}}).itcount() );
+assert.eq( 1, t.find({a:1,b:{$exists:false}}).itcount() );
+assert.eq( 1, t.find({a:1,b:{$not:{$exists:true}}}).itcount() );
diff --git a/jstests/core/exists9.js b/jstests/core/exists9.js
index 66378d1b424..75b09018797 100644
--- a/jstests/core/exists9.js
+++ b/jstests/core/exists9.js
@@ -25,7 +25,6 @@ assert.eq( 1, t.count( {a:{$exists:false}} ) );
t.ensureIndex( {a:1} );
assert.eq( 1, t.find( {a:{$exists:true}} ).hint( {a:1} ).itcount() );
assert.eq( 1, t.find( {a:{$exists:false}} ).hint( {a:1} ).itcount() );
-assert.eq( 1, t.find( {a:{$exists:false}} ).hint( {a:1} ).explain().nscanned );
t.drop();
diff --git a/jstests/core/existsa.js b/jstests/core/existsa.js
index 9ef7e9f374c..d1fecc2461e 100644
--- a/jstests/core/existsa.js
+++ b/jstests/core/existsa.js
@@ -13,18 +13,11 @@ function setIndex( _indexKeyField ) {
indexKeySpec = {};
indexKeySpec[ indexKeyField ] = 1;
t.ensureIndex( indexKeySpec, { sparse:true } );
- indexCursorName = 'BtreeCursor ' + indexKeyField + '_1';
}
setIndex( 'a' );
-/** Validate the prefix of 'str'. */
-function assertPrefix( prefix, str ) {
- assert.eq( prefix, str.substring( 0, prefix.length ) );
-}
-
/** @return count when hinting the index to use. */
function hintedCount( query ) {
- assertPrefix( indexCursorName, t.find( query ).hint( indexKeySpec ).explain().cursor );
return t.find( query ).hint( indexKeySpec ).itcount();
}
@@ -33,7 +26,6 @@ function assertMissing( query, expectedMissing, expectedIndexedMissing ) {
expectedMissing = expectedMissing || 1;
expectedIndexedMissing = expectedIndexedMissing || 0;
assert.eq( expectedMissing, t.count( query ) );
- assert.eq( 'BasicCursor', t.find( query ).explain().cursor );
// We also shouldn't get a different count depending on whether
// an index is used or not.
assert.eq( expectedIndexedMissing, hintedCount( query ) );
@@ -43,14 +35,12 @@ function assertMissing( query, expectedMissing, expectedIndexedMissing ) {
function assertExists( query, expectedExists ) {
expectedExists = expectedExists || 2;
assert.eq( expectedExists, t.count( query ) );
- assert.eq( 0, t.find( query ).explain().cursor.indexOf('BtreeCursor') );
// An $exists:true predicate generates no index filters. Add another predicate on the index key
// to trigger use of the index.
andClause = {}
andClause[ indexKeyField ] = { $ne:null };
Object.extend( query, { $and:[ andClause ] } );
assert.eq( expectedExists, t.count( query ) );
- assertPrefix( indexCursorName, t.find( query ).explain().cursor );
assert.eq( expectedExists, hintedCount( query ) );
}
@@ -58,13 +48,11 @@ function assertExists( query, expectedExists ) {
function assertExistsUnindexed( query, expectedExists ) {
expectedExists = expectedExists || 2;
assert.eq( expectedExists, t.count( query ) );
- assert.eq( 'BasicCursor', t.find( query ).explain().cursor );
// Even with another predicate on the index key, the sparse index is disallowed.
andClause = {}
andClause[ indexKeyField ] = { $ne:null };
Object.extend( query, { $and:[ andClause ] } );
assert.eq( expectedExists, t.count( query ) );
- assert.eq( 'BasicCursor', t.find( query ).explain().cursor );
assert.eq( expectedExists, hintedCount( query ) );
}
@@ -111,4 +99,3 @@ t.drop();
t.save( {} );
t.ensureIndex( { a:1 } );
assert.eq( 1, t.find( { a:{ $exists:false } } ).itcount() );
-assert.eq( 'BtreeCursor a_1', t.find( { a:{ $exists:false } } ).explain().cursor );
diff --git a/jstests/core/explain1.js b/jstests/core/explain1.js
index e497bbfb9a9..59d29100507 100644
--- a/jstests/core/explain1.js
+++ b/jstests/core/explain1.js
@@ -18,31 +18,7 @@ assert.eq( 49 , t.find( q ).count() , "D" );
assert.eq( 49 , t.find( q ).itcount() , "E" );
assert.eq( 20 , t.find( q ).limit(20).itcount() , "F" );
-assert.eq( 49 , t.find(q).explain().n , "G" );
-assert.eq( 20 , t.find(q).limit(20).explain().n , "H" );
-assert.eq( 20 , t.find(q).limit(-20).explain().n , "I" );
-assert.eq( 49 , t.find(q).batchSize(20).explain().n , "J" );
-
-// verbose explain output with stats
-// display index bounds
-
-var explainGt = t.find({x: {$gt: 5}}).explain(true);
-var boundsVerboseGt = explainGt.stats.inputStage.indexBounds;
-
-print('explain stats for $gt = ' + tojson(explainGt.stats));
-
-var explainGte = t.find({x: {$gte: 5}}).explain(true);
-var boundsVerboseGte = explainGte.stats.inputStage.indexBounds;
-
-print('explain stats for $gte = ' + tojson(explainGte.stats));
-
-print('index bounds for $gt = ' + tojson(explainGt.indexBounds));
-print('index bounds for $gte = ' + tojson(explainGte.indexBounds));
-
-print('verbose bounds for $gt = ' + tojson(boundsVerboseGt));
-print('verbose bounds for $gte = ' + tojson(boundsVerboseGte));
-
-// Since the verbose bounds are opaque, all we try to confirm is that the
-// verbose bounds for $gt is different from those generated for $gte.
-assert.neq(boundsVerboseGt, boundsVerboseGte,
- 'verbose bounds for $gt and $gte should not be the same');
+assert.eq( 49 , t.find(q).explain().executionStats.nReturned , "G" );
+assert.eq( 20 , t.find(q).limit(20).explain().executionStats.nReturned , "H" );
+assert.eq( 20 , t.find(q).limit(-20).explain().executionStats.nReturned , "I" );
+assert.eq( 49 , t.find(q).batchSize(20).explain().executionStats.nReturned , "J" );
diff --git a/jstests/core/explain2.js b/jstests/core/explain2.js
index b70ffdc0b1e..799f5323598 100644
--- a/jstests/core/explain2.js
+++ b/jstests/core/explain2.js
@@ -1,27 +1,24 @@
+// Test calculation of the 'millis' field in explain output.
-t = db.explain2
+t = db.jstests_explain2;
t.drop();
-t.ensureIndex( { a : 1 , b : 1 } );
-
-for ( i=1; i<10; i++ ){
- t.insert( { _id : i , a : i , b : i , c : i } );
+t.ensureIndex( { a:1 } );
+for( i = 1000; i < 4000; i += 1000 ) {
+ t.save( { a:i } );
}
-function go( q , c , b , o ){
- var e = t.find( q ).hint( {a:1,b:1} ).explain();
- assert.eq( c , e.n , "count " + tojson( q ) )
- assert.eq( b , e.nscanned , "nscanned " + tojson( q ) )
- assert.eq( o , e.nscannedObjects , "nscannedObjects " + tojson( q ) )
+// Run a query with one $or clause per a-value, each of which sleeps for 'a' milliseconds.
+function slow() {
+ sleep( this.a );
+ return true;
}
+clauses = [];
+for( i = 1000; i < 4000; i += 1000 ) {
+ clauses.push( { a:i, $where:slow } );
+}
+explain = t.find( { $or:clauses } ).explain( true );
+printjson( explain );
-q = { a : { $gt : 3 } }
-go( q , 6 , 6 , 6 );
-
-q.b = 5
-go( q , 1 , 6 , 1 );
-
-delete q.b
-q.c = 5
-go( q , 1 , 6 , 6 );
-
+// Verify the duration of the whole query, and of each clause.
+assert.gt( explain.executionStats.executionTimeMillis, 1000 - 500 + 2000 - 500 + 3000 - 500 );
diff --git a/jstests/core/explain4.js b/jstests/core/explain4.js
index d6d3d818a72..effd080d8fd 100644
--- a/jstests/core/explain4.js
+++ b/jstests/core/explain4.js
@@ -1,68 +1,18 @@
-// Basic validation of explain output fields.
+// Test that limit is applied by explain.
t = db.jstests_explain4;
t.drop();
-function checkField( explain, name, value ) {
- assert( explain.hasOwnProperty( name ) );
- if ( value != null ) {
- assert.eq( value, explain[ name ], name );
- // Check that the value is of the expected type. SERVER-5288
- assert.eq( typeof( value ), typeof( explain[ name ] ), 'type ' + name );
- }
-}
-
-function checkNonCursorPlanFields( explain, matches, n ) {
- checkField( explain, "n", n );
- checkField( explain, "nscannedObjects", matches );
- checkField( explain, "nscanned", matches );
-}
-
-function checkPlanFields( explain, matches, n ) {
- checkField( explain, "cursor", "BasicCursor" );
- // index related fields do not appear in non-indexed plan
- assert(!("indexBounds" in explain));
- checkNonCursorPlanFields( explain, matches, n );
-}
+t.ensureIndex( { a:1 } );
-function checkFields( matches, sort, limit ) {
- cursor = t.find();
- if ( sort ) {
- print("sort is {a:1}");
- cursor.sort({a:1});
- }
- if ( limit ) {
- print("limit = " + limit);
- cursor.limit( limit );
- }
- explain = cursor.explain( true );
- printjson( explain );
- checkPlanFields( explain, matches, matches > 0 ? 1 : 0 );
- checkField( explain, "scanAndOrder", sort );
- checkField( explain, "millis" );
- checkField( explain, "nYields" );
- checkField( explain, "nChunkSkips", 0 );
- checkField( explain, "isMultiKey", false );
- checkField( explain, "indexOnly", false );
- checkField( explain, "server" );
- checkField( explain, "allPlans" );
- explain.allPlans.forEach( function( x ) { checkPlanFields( x, matches, matches ); } );
+for( i = 0; i < 10; ++i ) {
+ t.save( { a:i, b:0 } );
}
-checkFields( 0, false );
-
-// If there's nothing in the collection, there's no point in verifying that a sort
-// is done.
-// checkFields( 0, true );
-
-t.save( {} );
-checkFields( 1, false );
-checkFields( 1, true );
-
-t.save( {} );
-checkFields( 1, false, 1 );
+explain = t.find( { a:{ $gte:0 }, b:0 } ).sort( { a:1 } )
+ .hint( { a:1 } )
+ .limit( 5 )
+ .explain( true );
-// Check basic fields with multiple clauses.
-t.save( { _id:0 } );
-explain = t.find( { $or:[ { _id:0 }, { _id:1 } ] } ).explain( true );
-checkNonCursorPlanFields( explain, 1, 1 );
+// Five results are expected, matching the limit spec.
+assert.eq( 5, explain.executionStats.nReturned );
diff --git a/jstests/core/explain5.js b/jstests/core/explain5.js
index cce6eab4fa4..22f8ae9f184 100644
--- a/jstests/core/explain5.js
+++ b/jstests/core/explain5.js
@@ -1,44 +1,29 @@
-// Check that the explain result count does proper deduping.
+// Check explain results for a plan that uses an index to obtain the requested sort order.
t = db.jstests_explain5;
t.drop();
-t.ensureIndex( {a:1} );
-t.ensureIndex( {b:1} );
+t.ensureIndex( { a:1 } );
+t.ensureIndex( { b:1 } );
-t.save( {a:[1,2,3],b:[4,5,6]} );
-for( i = 0; i < 10; ++i ) {
- t.save( {} );
+for( i = 0; i < 1000; ++i ) {
+ t.save( { a:i, b:i%3 } );
}
-// Check with a single in order plan.
+// Query with an initial set of documents.
+var explain1 = t.find( { a:{ $gte:0 }, b:2 } ).sort( { a:1 } ).hint( { a:1 } ).explain();
+printjson(explain1);
+var stats1 = explain1.executionStats;
+assert.eq( 333, stats1.nReturned, 'wrong nReturned for explain1' );
+assert.eq( 1000, stats1.totalKeysExamined, 'wrong totalKeysExamined for explain1' );
-explain = t.find( {a:{$gt:0}} ).explain( true );
-assert.eq( 1, explain.n );
-assert.eq( 1, explain.allPlans[ 0 ].n );
-
-// Check with a single out of order plan.
-
-explain = t.find( {a:{$gt:0}} ).sort( {z:1} ).hint( {a:1} ).explain( true );
-assert.eq( 1, explain.n );
-assert.eq( 1, explain.allPlans[ 0 ].n );
-
-// Check with multiple plans.
-
-/* STAGE_MIGRATION:
-// As part of 2.7 we plan to rework explain (see SERVER-10448 for details)
-// so didn't carry over old behavior from multi_plan_runner into multi_plan stage
-// Specifically, missing call to explainMultiPlan, so can't explain.allPlans[1].n below
-
-explain = t.find( {a:{$gt:0},b:{$gt:0}} ).explain( true );
-assert.eq( 1, explain.n );
-assert.eq( 1, explain.allPlans[ 0 ].n );
-assert.eq( 1, explain.allPlans[ 1 ].n );
-
-explain = t.find( {$or:[{a:{$gt:0},b:{$gt:0}},{a:{$gt:-1},b:{$gt:-1}}]} ).explain( true );
-assert.eq( 1, explain.n );
-// Check 'n' for every alternative query plan.
-for (var i = 0; i < explain.allPlans.length; ++i) {
- assert.eq( 1, explain.allPlans[i].n );
+for( i = 1000; i < 2000; ++i ) {
+ t.save( { a:i, b:i%3 } );
}
-*/
+
+// Query with some additional documents.
+var explain2 = t.find( { a:{ $gte:0 }, b:2 } ).sort( { a:1 } ).hint ( { a:1 } ).explain();
+printjson(explain2);
+var stats2 = explain2.executionStats;
+assert.eq( 666, stats2.nReturned, 'wrong nReturned for explain2' );
+assert.eq( 2000, stats2.totalKeysExamined, 'wrong totalKeysExamined for explain2' );
diff --git a/jstests/core/explain6.js b/jstests/core/explain6.js
index 47d8d2fd731..7bcc09b8f2a 100644
--- a/jstests/core/explain6.js
+++ b/jstests/core/explain6.js
@@ -1,25 +1,35 @@
-// Test explain result count when a skip parameter is used.
+// Basic test which checks the number of documents returned, keys examined, and documents
+// examined as reported by explain.
t = db.jstests_explain6;
t.drop();
-t.save( {} );
-explain = t.find().skip( 1 ).explain( true );
-assert.eq( 0, explain.n );
-// With only one plan, the skip information is known for the plan. This is an arbitrary
-// implementation detail, but it changes the way n is calculated.
-assert.eq( 0, explain.allPlans[ 0 ].n );
-
-t.ensureIndex( {a:1} );
-explain = t.find( {a:null,b:null} ).skip( 1 ).explain( true );
-assert.eq( 0, explain.n );
-
-printjson( explain );
-assert.eq( 0, explain.allPlans[ 0 ].n );
-
-t.dropIndexes();
-explain = t.find().skip( 1 ).sort({a:1}).explain( true );
-// Skip is applied for an in memory sort.
-assert.eq( 0, explain.n );
-printjson(explain);
-assert.eq( 0, explain.allPlans[ 0 ].n );
+t.ensureIndex( { a:1, b:1 } );
+t.ensureIndex( { b:1, a:1 } );
+
+t.save( { a:0, b:1 } );
+t.save( { a:1, b:0 } );
+
+explain = t.find( { a:{ $gte:0 }, b:{ $gte:0 } } ).explain( true );
+
+assert.eq( 2, explain.executionStats.nReturned );
+assert.eq( 2, explain.executionStats.totalKeysExamined );
+assert.eq( 2, explain.executionStats.totalDocsExamined );
+
+// A limit of 2.
+explain = t.find( { a:{ $gte:0 }, b:{ $gte:0 } } ).limit( -2 ).explain( true );
+assert.eq( 2, explain.executionStats.nReturned );
+
+// A $or query.
+explain = t.find( { $or:[ { a:{ $gte:0 }, b:{ $gte:1 } },
+ { a:{ $gte:1 }, b:{ $gte:0 } } ] } ).explain( true );
+assert.eq( 2, explain.executionStats.nReturned );
+
+// A non $or case where totalKeysExamined != number of results
+t.remove({});
+
+t.save( { a:'0', b:'1' } );
+t.save( { a:'1', b:'0' } );
+explain = t.find( { a:/0/, b:/1/ } ).explain( true );
+assert.eq( 1, explain.executionStats.nReturned );
+assert.eq( 2, explain.executionStats.totalKeysExamined );
diff --git a/jstests/core/explain7.js b/jstests/core/explain7.js
deleted file mode 100644
index f2850e56bea..00000000000
--- a/jstests/core/explain7.js
+++ /dev/null
@@ -1,193 +0,0 @@
-// Test cases for explain()'s nscannedObjects. SERVER-4161
-
-t = db.jstests_explain7;
-t.drop();
-
-t.save( { a:1 } );
-t.ensureIndex( { a:1 } );
-
-function assertExplain( expected, explain, checkAllPlans ) {
- for( field in expected ) {
- assert.eq( expected[ field ], explain[ field ], field );
- }
- if ( checkAllPlans && explain.allPlans && explain.allPlans.length == 1 ) {
- for( field in expected ) {
- assert.eq( expected[ field ], explain.allPlans[ 0 ][ field ], field );
- }
- }
- return explain;
-}
-
-function assertHintedExplain( expected, cursor ) {
- return assertExplain( expected, cursor.hint( { a:1 } ).explain( true ), true );
-}
-
-function assertUnhintedExplain( expected, cursor, checkAllPlans ) {
- return assertExplain( expected, cursor.explain( true ), checkAllPlans );
-}
-
-// Standard query.
-assertHintedExplain( { n:1, nscanned:1, nscannedObjects:1 },
- t.find( { a:1 } ) );
-
-// Covered index query.
-assertHintedExplain( { n:1, nscanned:1, nscannedObjects:0 /* no object loaded */ },
- t.find( { a:1 }, { _id:0, a:1 } ) );
-
-// Covered index query, but matching requires loading document.
-assertHintedExplain( { n:1, nscanned:1, nscannedObjects:1 },
- t.find( { a:1, b:null }, { _id:0, a:1 } ) );
-
-// $returnKey query.
-assertHintedExplain( { n:1, nscanned:1, nscannedObjects:0 },
- t.find( { a:1 } )._addSpecial( "$returnKey", true ) );
-
-// $returnKey query but matching requires loading document.
-assertHintedExplain( { n:1, nscanned:1, nscannedObjects:1 },
- t.find( { a:1, b:null } )._addSpecial( "$returnKey", true ) );
-
-// Skip a result.
-assertHintedExplain( { n:0, nscanned:1, nscannedObjects:1 },
- t.find( { a:1 } ).skip( 1 ) );
-
-// Cursor sorted covered index query.
-assertHintedExplain( { n:1, nscanned:1, nscannedObjects:0, scanAndOrder:false },
- t.find( { a:1 }, { _id:0, a:1 } ).sort( { a:1 } ) );
-
-t.dropIndex( { a:1 } );
-t.ensureIndex( { a:1, b:1 } );
-
-// In memory sort covered index query.
-assertUnhintedExplain( { n:1, nscanned:1, nscannedObjects:1, scanAndOrder:true },
- t.find( { a:{ $gt:0 } }, { _id:0, a:1 } ).sort( { b:1 } )
- .hint( { a:1, b:1 } ) );
-
-// In memory sort $returnKey query.
-assertUnhintedExplain( { n:1, nscanned:1, scanAndOrder:true },
- t.find( { a:{ $gt:0 } } )._addSpecial( "$returnKey", true ).sort( { b:1 } )
- .hint( { a:1, b:1 } ) );
-
-// In memory sort with skip.
-assertUnhintedExplain( { n:0, nscanned:1, nscannedObjects:1 /* The record is still loaded. */ },
- t.find( { a:{ $gt:0 } } ).sort( { b:1 } ).skip( 1 ).hint( { a:1, b:1 } ),
- false );
-
-// With a multikey index.
-t.drop();
-t.ensureIndex( { a:1 } );
-t.save( { a:[ 1, 2 ] } );
-
-assertHintedExplain( { n:1, scanAndOrder:false },
- t.find( { a:{ $gt:0 } }, { _id:0, a:1 } ) );
-assertHintedExplain( { n:1, scanAndOrder:true },
- t.find( { a:{ $gt:0 } }, { _id:0, a:1 } ).sort( { b:1 } ) );
-
-// Dedup matches from multiple query plans.
-t.drop();
-t.ensureIndex( { a:1, b:1 } );
-t.ensureIndex( { b:1, a:1 } );
-t.save( { a:1, b:1 } );
-
-// Document matched by three query plans.
-assertUnhintedExplain( { n:1, nscanned:1, nscannedObjects:1 },
- t.find( { a:{ $gt:0 }, b:{ $gt:0 } } ) );
-
-// Document matched by three query plans, with sorting.
-assertUnhintedExplain( { n:1, nscanned:1, nscannedObjects:1 },
- t.find( { a:{ $gt:0 }, b:{ $gt:0 } } ).sort( { c:1 } ) );
-
-// Document matched by three query plans, with a skip.
-assertUnhintedExplain( { n:0, nscanned:1, nscannedObjects:1 },
- t.find( { a:{ $gt:0 }, b:{ $gt:0 } } ).skip( 1 ) );
-
-// Hybrid ordered and unordered plans.
-
-t.drop();
-t.ensureIndex( { a:1, b:1 } );
-t.ensureIndex( { b:1 } );
-for( i = 0; i < 30; ++i ) {
- t.save( { a:i, b:i } );
-}
-
-// Ordered plan chosen.
-assertUnhintedExplain( { cursor:'BtreeCursor a_1_b_1', n:30, nscanned:30, nscannedObjects:30,
- scanAndOrder:false },
- t.find( { b:{ $gte:0 } } ).sort( { a:1 } ) );
-
-// SERVER-12769: When an index is used to provide a sort, our covering
-// analysis isn't good. This could execute as a covered query, but currently
-// does not.
-/*
-// Ordered plan chosen with a covered index.
-//assertUnhintedExplain( { cursor:'BtreeCursor a_1_b_1', n:30, nscanned:30, nscannedObjects:0,
- //scanAndOrder:false },
- //t.find( { b:{ $gte:0 } }, { _id:0, b:1 } ).sort( { a:1 } ) );
-*/
-
-// Ordered plan chosen, with a skip. Skip is not included in counting nscannedObjects for a single
-// plan.
-assertUnhintedExplain( { cursor:'BtreeCursor a_1_b_1', n:29, nscanned:30, nscannedObjects:30,
- scanAndOrder:false },
- t.find( { b:{ $gte:0 } } ).sort( { a:1 } ).skip( 1 ) );
-
-// Unordered plan chosen.
-assertUnhintedExplain( { cursor:'BtreeCursor b_1', n:1, nscanned:1,
- //nscannedObjects:1, nscannedObjectsAllPlans:2,
- scanAndOrder:true },
- t.find( { b:1 } ).sort( { a:1 } ) );
-
-// Unordered plan chosen and projected.
-assertUnhintedExplain( { cursor:'BtreeCursor b_1', n:1, nscanned:1, nscannedObjects:1,
- scanAndOrder:true },
- t.find( { b:1 }, { _id:0, b:1 } ).sort( { a:1 } ) );
-
-// Unordered plan chosen, with a skip.
-// Note that all plans are equally unproductive here, so we can't test which one is picked reliably.
-assertUnhintedExplain( { n:0 },
- t.find( { b:1 }, { _id:0, b:1 } ).sort( { a:1 } ).skip( 1 ) );
-
-// Unordered plan chosen, $returnKey specified.
-assertUnhintedExplain( { cursor:'BtreeCursor b_1', n:1, nscanned:1, scanAndOrder:true },
- t.find( { b:1 }, { _id:0, b:1 } ).sort( { a:1 } )
- ._addSpecial( "$returnKey", true ) );
-
-// Unordered plan chosen, $returnKey specified, matching requires loading document.
-assertUnhintedExplain( { cursor:'BtreeCursor b_1', n:1, nscanned:1, nscannedObjects:1,
- scanAndOrder:true },
- t.find( { b:1, c:null }, { _id:0, b:1 } ).sort( { a:1 } )
- ._addSpecial( "$returnKey", true ) );
-
-t.ensureIndex( { a:1, b:1, c:1 } );
-
-// Documents matched by four query plans.
-assertUnhintedExplain( { n:30, nscanned:30, nscannedObjects:30,
- //nscannedObjectsAllPlans:90 // Not 120 because deduping occurs before
- // loading results.
- },
- t.find( { a:{ $gte:0 }, b:{ $gte:0 } } ).sort( { b:1 } ) );
-
-for( i = 30; i < 150; ++i ) {
- t.save( { a:i, b:i } );
-}
-
-// Non-covered $or query.
-explain = assertUnhintedExplain( { n:150, nscannedObjects:300 },
- t.find( { $or:[ { a:{ $gte:-1, $lte:200 },
- b:{ $gte:0, $lte:201 } },
- { a:{ $gte:0, $lte:201 },
- b:{ $gte:-1, $lte:200 } } ] },
- { _id:1, a:1, b:1 } ).hint( { a:1, b:1 } ) );
-printjson(explain);
-assert.eq( 150, explain.clauses[ 0 ].nscannedObjects );
-assert.eq( 150, explain.clauses[ 1 ].nscannedObjects );
-
-// Covered $or query.
-explain = assertUnhintedExplain( { n:150, nscannedObjects:0 },
- t.find( { $or:[ { a:{ $gte:-1, $lte:200 },
- b:{ $gte:0, $lte:201 } },
- { a:{ $gte:0, $lte:201 },
- b:{ $gte:-1, $lte:200 } } ] },
- { _id:0, a:1, b:1 } ).hint( { a:1, b:1 } ) );
-printjson(explain);
-assert.eq( 0, explain.clauses[ 0 ].nscannedObjects );
-assert.eq( 0, explain.clauses[ 1 ].nscannedObjects );
diff --git a/jstests/core/explain8.js b/jstests/core/explain8.js
deleted file mode 100644
index fde6adbd8f4..00000000000
--- a/jstests/core/explain8.js
+++ /dev/null
@@ -1,24 +0,0 @@
-// Test calculation of the 'millis' field in explain output.
-
-t = db.jstests_explain8;
-t.drop();
-
-t.ensureIndex( { a:1 } );
-for( i = 1000; i < 4000; i += 1000 ) {
- t.save( { a:i } );
-}
-
-// Run a query with one $or clause per a-value, each of which sleeps for 'a' milliseconds.
-function slow() {
- sleep( this.a );
- return true;
-}
-clauses = [];
-for( i = 1000; i < 4000; i += 1000 ) {
- clauses.push( { a:i, $where:slow } );
-}
-explain = t.find( { $or:clauses } ).explain( true );
-//printjson( explain );
-
-// Verify the duration of the whole query, and of each clause.
-assert.gt( explain.millis, 1000 - 500 + 2000 - 500 + 3000 - 500 );
diff --git a/jstests/core/explain9.js b/jstests/core/explain9.js
deleted file mode 100644
index 80cab856aa7..00000000000
--- a/jstests/core/explain9.js
+++ /dev/null
@@ -1,24 +0,0 @@
-// Test that limit is applied by explain when there are both in order and out of order candidate
-// plans. SERVER-4150
-
-t = db.jstests_explain9;
-t.drop();
-
-t.ensureIndex( { a:1 } );
-
-for( i = 0; i < 10; ++i ) {
- t.save( { a:i, b:0 } );
-}
-
-explain = t.find( { a:{ $gte:0 }, b:0 } ).sort( { a:1 } ).limit( 5 ).explain( true );
-// Five results are expected, matching the limit spec.
-assert.eq( 5, explain.n );
-explain.allPlans.forEach( function( x ) {
- // Five results are expected for the in order plan.
- if ( x.cursor == "BtreeCursor a_1" ) {
- assert.eq( 5, x.n );
- }
- else {
- assert.gte( 5, x.n );
- }
- } );
diff --git a/jstests/core/explain_batch_size.js b/jstests/core/explain_batch_size.js
index 65bc1df40d7..1722052c233 100644
--- a/jstests/core/explain_batch_size.js
+++ b/jstests/core/explain_batch_size.js
@@ -7,13 +7,13 @@
t = db.explain_batch_size;
t.drop();
-n = 3
+var n = 3
for (i=0; i<n; i++) {
t.save( { x : i } );
}
-q = {};
+var q = {};
assert.eq( n , t.find( q ).count() , "A" );
assert.eq( n , t.find( q ).itcount() , "B" );
-assert.eq( n , t.find( q ).batchSize(1).explain().n , "C" );
+assert.eq( n , t.find( q ).batchSize(1).explain().executionStats.nReturned , "C" );
diff --git a/jstests/core/explaina.js b/jstests/core/explaina.js
deleted file mode 100644
index 65be1f7bc27..00000000000
--- a/jstests/core/explaina.js
+++ /dev/null
@@ -1,28 +0,0 @@
-// Check explain results when an in order plan is selected among mixed in order and out of order
-// plans.
-
-t = db.jstests_explaina;
-t.drop();
-
-t.ensureIndex( { a:1 } );
-t.ensureIndex( { b:1 } );
-
-for( i = 0; i < 1000; ++i ) {
- t.save( { a:i, b:i%3 } );
-}
-
-// Query with an initial set of documents.
-explain1 = t.find( { a:{ $gte:0 }, b:2 } ).sort( { a:1 } ).hint( { a:1 } ).explain();
-printjson(explain1);
-assert.eq( 333, explain1.n, 'wrong n for explain1' );
-assert.eq( 1000, explain1.nscanned, 'wrong nscanned for explain1' );
-
-for( i = 1000; i < 2000; ++i ) {
- t.save( { a:i, b:i%3 } );
-}
-
-// Query with some additional documents.
-explain2 = t.find( { a:{ $gte:0 }, b:2 } ).sort( { a:1 } ).hint ( { a:1 } ).explain();
-printjson(explain2);
-assert.eq( 666, explain2.n, 'wrong n for explain2' );
-assert.eq( 2000, explain2.nscanned, 'wrong nscanned for explain2' );
diff --git a/jstests/core/explainb.js b/jstests/core/explainb.js
deleted file mode 100644
index ab49a38ca72..00000000000
--- a/jstests/core/explainb.js
+++ /dev/null
@@ -1,46 +0,0 @@
-// nscanned and nscannedObjects report results for the winning plan; nscannedAllPlans and
-// nscannedObjectsAllPlans report results for all plans. SERVER-6268
-//
-// This file tests the output of .explain.
-
-t = db.jstests_explainb;
-t.drop();
-
-t.ensureIndex( { a:1, b:1 } );
-t.ensureIndex( { b:1, a:1 } );
-
-t.save( { a:0, b:1 } );
-t.save( { a:1, b:0 } );
-
-explain = t.find( { a:{ $gte:0 }, b:{ $gte:0 } } ).explain( true );
-
-// We don't check explain.cursor because all plans perform the same.
-assert.eq( 2, explain.n );
-// nscanned and nscannedObjects are reported.
-assert.eq( 2, explain.nscanned );
-assert.eq( 2, explain.nscannedObjects );
-
-// A limit of 2.
-explain = t.find( { a:{ $gte:0 }, b:{ $gte:0 } } ).limit( -2 ).explain( true );
-assert.eq( 2, explain.n );
-
-// A $or query.
-explain = t.find( { $or:[ { a:{ $gte:0 }, b:{ $gte:1 } },
- { a:{ $gte:1 }, b:{ $gte:0 } } ] } ).explain( true );
-// One result from the first $or clause
-assert.eq( 1, explain.clauses[ 0 ].n );
-// But 2 total.
-assert.eq( 2, explain.n );
-
-// These are computed by summing the values for each clause.
-printjson(explain);
-assert.eq( 2, explain.n );
-
-// A non $or case where nscanned != number of results
-t.remove({});
-
-t.save( { a:'0', b:'1' } );
-t.save( { a:'1', b:'0' } );
-explain = t.find( { a:/0/, b:/1/ } ).explain( true );
-assert.eq( 1, explain.n );
-assert.eq( 2, explain.nscanned );
diff --git a/jstests/core/find8.js b/jstests/core/find8.js
index 60f66a500e3..3622eba8ae6 100644
--- a/jstests/core/find8.js
+++ b/jstests/core/find8.js
@@ -21,7 +21,3 @@ t.find( { a: { $gt:5,$lt:2} } ).itcount();
// Check that we can record a plan for an 'invalid' range.
assert( t.find( { a: { $gt:5,$lt:2} } ).explain( true ).oldPlan );
}
-
-t.ensureIndex( {b:1} );
-// Check that if we do a table scan of an 'invalid' range in an or clause we don't check subsequent clauses.
-assert.eq( "BasicCursor", t.find( { $or:[{ a: { $gt:5,$lt:2} }, {b:1}] } ).explain().cursor );
diff --git a/jstests/core/fts_explain.js b/jstests/core/fts_explain.js
index bbb933fa12e..263e4b04b38 100644
--- a/jstests/core/fts_explain.js
+++ b/jstests/core/fts_explain.js
@@ -11,9 +11,9 @@ res = coll.insert({content: "some data"});
assert.writeOK(res);
var explain = coll.find({$text:{$search: "\"a\" -b -\"c\""}}).explain(true);
-assert.eq(explain.cursor, "TextCursor");
-assert.eq(explain.stats.stage, "TEXT");
-assert.eq(explain.stats.parsedTextQuery.terms, ["a"]);
-assert.eq(explain.stats.parsedTextQuery.negatedTerms, ["b"]);
-assert.eq(explain.stats.parsedTextQuery.phrases, ["a"]);
-assert.eq(explain.stats.parsedTextQuery.negatedPhrases, ["c"]);
+var stage = explain.executionStats.executionStages;
+assert.eq(stage.stage, "TEXT");
+assert.eq(stage.parsedTextQuery.terms, ["a"]);
+assert.eq(stage.parsedTextQuery.negatedTerms, ["b"]);
+assert.eq(stage.parsedTextQuery.phrases, ["a"]);
+assert.eq(stage.parsedTextQuery.negatedPhrases, ["c"]);
diff --git a/jstests/core/geo_2d_explain.js b/jstests/core/geo_2d_explain.js
index c9bfe624436..f1a1e2887e4 100644
--- a/jstests/core/geo_2d_explain.js
+++ b/jstests/core/geo_2d_explain.js
@@ -24,5 +24,5 @@ var explain = t.find({loc: {$near: [40, 40]}, _id: {$lt: 50}}).explain();
print('explain = ' + tojson(explain));
-assert.eq(explain.n, explain.nscannedObjects);
-assert.lte(explain.n, explain.nscanned);
+var stats = explain.executionStats;
+assert.eq(stats.nReturned, stats.totalDocsExamined);
diff --git a/jstests/core/geo_box2.js b/jstests/core/geo_box2.js
index 1ebe5843bd7..dd114ae5c2e 100644
--- a/jstests/core/geo_box2.js
+++ b/jstests/core/geo_box2.js
@@ -16,11 +16,3 @@ t.dropIndex( { "loc" : "2d" } )
t.ensureIndex({"loc" : "2d"} , {"min" : 0, "max" : 10})
assert.eq( 9 , t.find({loc : {$within : {$box : [[4,4],[6,6]]}}}).itcount() , "B1" );
-
-// 'indexBounds.loc' in explain output should be filled in with at least
-// one bounding box.
-// Actual values is dependent on implementation of 2d execution stage.
-var explain = t.find({loc : {$within : {$box : [[4,4],[6,6]]}}}).explain(true);
-print( 'explain = ' + tojson(explain) );
-assert.neq( undefined, explain.indexBounds.loc, "C1" );
-assert.gt( explain.indexBounds.loc.length, 0, "C2" );
diff --git a/jstests/core/geo_center_sphere1.js b/jstests/core/geo_center_sphere1.js
index 8beff537d12..a0539965ccc 100644
--- a/jstests/core/geo_center_sphere1.js
+++ b/jstests/core/geo_center_sphere1.js
@@ -89,7 +89,7 @@ function test(index) {
print( 'explain for ' + tojson( q , '' , true ) + ' = ' + tojson( explain ) );
// The index should be at least minimally effective in preventing the full collection
// scan.
- assert.gt( t.find().count(), explain.nscanned ,
+ assert.gt( t.find().count(), explain.executionStats.totalKeysExamined ,
"nscanned : " + tojson( searches[i] ) )
}
}
diff --git a/jstests/core/geo_circle1.js b/jstests/core/geo_circle1.js
index 4a8a83218ab..d74e6a1eea3 100644
--- a/jstests/core/geo_circle1.js
+++ b/jstests/core/geo_circle1.js
@@ -41,5 +41,6 @@ for ( i=0; i<searches.length; i++ ){
print( 'explain for ' + tojson( q , '' , true ) + ' = ' + tojson( explain ) );
// The index should be at least minimally effective in preventing the full collection
// scan.
- assert.gt( t.find().count(), explain.nscanned , "nscanned : " + tojson( searches[i] ) );
+ assert.gt( t.find().count(), explain.executionStats.totalKeysExamined,
+ "nscanned : " + tojson( searches[i] ) );
}
diff --git a/jstests/core/geo_s2nearComplex.js b/jstests/core/geo_s2nearComplex.js
index 835dfe88481..9c6ac0098be 100644
--- a/jstests/core/geo_s2nearComplex.js
+++ b/jstests/core/geo_s2nearComplex.js
@@ -164,7 +164,7 @@ uniformPoints(origin, 1000, 0.5, 1.5);
validateOrdering({geo: {$geoNear: {$geometry: originGeo}}})
print("Millis for uniform:")
-print(t.find(query).explain().millis)
+print(t.find(query).explain().executionStats.executionTimeMillis);
print("Total points:");
print(t.find(query).itcount());
@@ -176,7 +176,7 @@ uniformPointsWithGaps(origin, 1000, 1, 10.0, 5, 10);
validateOrdering({geo: {$geoNear: {$geometry: originGeo}}})
print("Millis for uniform with gaps:")
-print(t.find(query).explain().millis)
+print(t.find(query).explain().executionStats.executionTimeMillis);
print("Total points:");
print(t.find(query).itcount());
@@ -189,7 +189,7 @@ uniformPointsWithClusters(origin, 1000, 1, 10.0, 5, 10, 100);
validateOrdering({geo: {$geoNear: {$geometry: originGeo}}})
print("Millis for uniform with clusters:");
-print(t.find(query).explain().millis);
+print(t.find(query).explain().executionStats.executionTimeMillis);
print("Total points:");
print(t.find(query).itcount());
@@ -209,7 +209,8 @@ uniformPoints(origin, 50, 0.5, 1.5);
validateOrdering({geo: {$geoNear: {$geometry: originGeo}}})
print("Millis for uniform near pole:")
-print(t.find({geo: {$geoNear: {$geometry: originGeo}}}).explain().millis)
+print(t.find({geo: {$geoNear: {$geometry: originGeo}}})
+ .explain().executionStats.executionTimeMillis);
assert.eq(t.find({geo: {$geoNear: {$geometry: originGeo}}}).itcount(), 50);
t.drop()
@@ -226,7 +227,8 @@ uniformPoints(origin, 50, 0.5, 1.5);
validateOrdering({geo: {$geoNear: {$geometry: originGeo}}})
print("Millis for uniform on meridian:")
-print(t.find({geo: {$near: {$geometry: originGeo}}}).explain().millis)
+print(t.find({geo: {$geoNear: {$geometry: originGeo}}})
+ .explain().executionStats.executionTimeMillis);
assert.eq(t.find({geo: {$geoNear: {$geometry: originGeo}}}).itcount(), 50);
t.drop()
@@ -243,7 +245,8 @@ uniformPoints(origin, 50, 0.5, 1.5);
validateOrdering({geo: {$near: {$geometry: originGeo}}})
print("Millis for uniform on negative meridian:");
-print(t.find({geo: {$near: {$geometry: originGeo}}}).explain().millis);
+print(t.find({geo: {$geoNear: {$geometry: originGeo}}})
+ .explain().executionStats.executionTimeMillis);
assert.eq(t.find({geo: {$near: {$geometry: originGeo}}}).itcount(), 50);
// Near search with points that are really far away.
@@ -263,6 +266,7 @@ assert.eq(cur.itcount(), 10);
cur = t.find({geo: {$near: {$geometry: originGeo}}})
print("Near search on very distant points:");
-print(t.find({geo: {$near: {$geometry: originGeo}}}).explain().millis);
+print(t.find({geo: {$geoNear: {$geometry: originGeo}}})
+ .explain().executionStats.executionTimeMillis);
pt = cur.next();
assert(pt)
diff --git a/jstests/core/geo_s2ordering.js b/jstests/core/geo_s2ordering.js
index 13847b08745..3dd75ff5785 100644
--- a/jstests/core/geo_s2ordering.js
+++ b/jstests/core/geo_s2ordering.js
@@ -35,8 +35,8 @@ function runTest(index) {
iterations = 10;
for (var x = 0; x < iterations; ++x) {
res = t.find({nongeo: needle, geo: {$within: {$centerSphere: [[0,0], Math.PI/180.0]}}})
- if (res.explain().millis < mintime) {
- mintime = res.explain().millis
+ if (res.explain().executionStats.executionTimeMillis < mintime) {
+ mintime = res.explain().executionStats.executionTimeMillis;
resultcount = res.itcount()
}
}
diff --git a/jstests/core/geo_s2twofields.js b/jstests/core/geo_s2twofields.js
index 1ee7d8d289f..039223aadcf 100644
--- a/jstests/core/geo_s2twofields.js
+++ b/jstests/core/geo_s2twofields.js
@@ -42,9 +42,13 @@ function semiRigorousTime(func) {
function timeWithoutAndWithAnIndex(index, query) {
t.dropIndex(index);
- var withoutTime = semiRigorousTime(function() { return t.find(query).explain().millis; });
+ var withoutTime = semiRigorousTime(function() {
+ return t.find(query).explain().executionStats.executionTimeMillis;
+ });
t.ensureIndex(index);
- var withTime = semiRigorousTime(function() { return t.find(query).explain().millis; });
+ var withTime = semiRigorousTime(function() {
+ return t.find(query).explain().executionStats.executionTimeMillis;
+ });
t.dropIndex(index);
return [withoutTime, withTime];
}
diff --git a/jstests/core/hashindex1.js b/jstests/core/hashindex1.js
index 34bd6dc0725..dcb75dafaf1 100644
--- a/jstests/core/hashindex1.js
+++ b/jstests/core/hashindex1.js
@@ -1,6 +1,9 @@
var t = db.hashindex1;
t.drop()
+// Include helpers for analyzing explain output.
+load("jstests/libs/analyze_plan.js");
+
//test non-single field hashed indexes don't get created (maybe change later)
var badspec = {a : "hashed" , b : 1};
t.ensureIndex( badspec );
@@ -34,36 +37,30 @@ assert.eq( t.find({a : 3}).hint(goodspec).toArray().length , 1);
//test right obj is found
assert.eq( t.find({a : 3.1}).hint(goodspec).toArray()[0].a , 3.1);
-//test that hashed cursor is used when it should be
-var cursorname = "BtreeCursor a_hashed";
-assert.eq( t.find({a : 1}).explain().cursor ,
- cursorname ,
- "not using hashed cursor");
+// Make sure we're using the hashed index.
+var explain = t.find({a : 1}).explain();
+assert( isIxscan(explain.queryPlanner.winningPlan), "not using hashed index");
// SERVER-12222
//printjson( t.find({a : {$gte : 3 , $lte : 3}}).explain() )
//assert.eq( t.find({a : {$gte : 3 , $lte : 3}}).explain().cursor ,
// cursorname ,
// "not using hashed cursor");
-assert.neq( t.find({c : 1}).explain().cursor ,
- cursorname ,
- "using irrelevant hashed cursor");
+var explain = t.find({c : 1}).explain();
+assert( !isIxscan(explain.queryPlanner.winningPlan), "using irrelevant hashed index");
-printjson( t.find({a : {$in : [1,2]}}).explain() )
// Hash index used with a $in set membership predicate.
-assert.eq( t.find({a : {$in : [1,2]}}).explain()["cursor"],
- "BtreeCursor a_hashed",
- "not using hashed cursor");
+var explain = t.find({a : {$in : [1,2]}}).explain();
+printjson(explain);
+assert( isIxscan(explain.queryPlanner.winningPlan), "not using hashed index");
// Hash index used with a singleton $and predicate conjunction.
-assert.eq( t.find({$and : [{a : 1}]}).explain()["cursor"],
- "BtreeCursor a_hashed",
- "not using hashed cursor");
+var explain = t.find({$and : [{a : 1}]}).explain();
+assert( isIxscan(explain.queryPlanner.winningPlan), "not using hashed index");
// Hash index used with a non singleton $and predicate conjunction.
-assert.eq( t.find({$and : [{a : {$in : [1,2]}},{a : {$gt : 1}}]}).explain()["cursor"],
- "BtreeCursor a_hashed",
- "not using hashed cursor");
+var explain = t.find({$and : [{a : {$in : [1,2]}},{a : {$gt : 1}}]}).explain();
+assert( isIxscan(explain.queryPlanner.winningPlan), "not using hashed index");
//test creation of index based on hash of _id index
var goodspec2 = {'_id' : "hashed"};
diff --git a/jstests/core/hint1.js b/jstests/core/hint1.js
index b5a580f2b93..1de06fd4e41 100644
--- a/jstests/core/hint1.js
+++ b/jstests/core/hint1.js
@@ -1,16 +1,7 @@
-
p = db.jstests_hint1;
p.drop();
p.save( { ts: new Date( 1 ), cls: "entry", verticals: "alleyinsider", live: true } );
p.ensureIndex( { ts: 1 } );
-e = p.find( { live: true, ts: { $lt: new Date( 1234119308272 ) }, cls: "entry", verticals: "alleyinsider" } ).sort( { ts: -1 } ).hint( { ts: 1 } ).explain();
-assert.eq(e.indexBounds.ts[0][0].getTime(), new Date(1234119308272).getTime(), "A");
-
-//printjson(e);
-
-assert.eq( /*just below min date is bool true*/true, e.indexBounds.ts[0][1], "B");
-
assert.eq(1, p.find({ live: true, ts: { $lt: new Date(1234119308272) }, cls: "entry", verticals: "alleyinsider" }).sort({ ts: -1 }).hint({ ts: 1 }).count());
-
diff --git a/jstests/core/idhack.js b/jstests/core/idhack.js
index e40c043d455..fa9ed5287c6 100644
--- a/jstests/core/idhack.js
+++ b/jstests/core/idhack.js
@@ -2,6 +2,8 @@
t = db.idhack
t.drop()
+// Include helpers for analyzing explain output.
+load("jstests/libs/analyze_plan.js");
t.insert( { _id : { x : 1 } , z : 1 } )
t.insert( { _id : { x : 2 } , z : 2 } )
@@ -26,28 +28,25 @@ assert.eq( 8 , t.findOne( { _id : 3 } ).z , "C3" )
var query = { _id : { x : 2 } };
var explain = t.find( query ).explain( true );
print( "explain for " + tojson( query , "" , true ) + " = " + tojson( explain ) );
-assert.eq( 1 , explain.n , "D1" );
-assert.eq( 1 , explain.nscanned , "D2" );
-assert.neq( undefined , explain.cursor , "D3" );
-assert.neq( "" , explain.cursor , "D4" );
+assert.eq( 1 , explain.executionStats.nReturned , "D1" );
+assert.eq( 1 , explain.executionStats.totalKeysExamined , "D2" );
+assert( isIdhack(explain.queryPlanner.winningPlan), "D3" );
// ID hack cannot be used with hint().
-var query = { _id : { x : 2 } };
-var explain = t.find( query ).explain();
t.ensureIndex( { _id : 1 , a : 1 } );
var hintExplain = t.find( query ).hint( { _id : 1 , a : 1 } ).explain();
print( "explain for hinted query = " + tojson( hintExplain ) );
-assert.neq( explain.cursor, hintExplain.cursor, "E1" );
+assert( !isIdhack(hintExplain.queryPlanner.winningPlan), "E1" );
// ID hack cannot be used with skip().
var skipExplain = t.find( query ).skip(1).explain();
print( "explain for skip query = " + tojson( skipExplain ) );
-assert.neq( explain.cursor, skipExplain.cursor, "F1" );
+assert( !isIdhack(skipExplain.queryPlanner.winningPlan), "F1" );
// Covered query returning _id field only can be handled by ID hack.
var coveredExplain = t.find( query, { _id : 1 } ).explain();
print( "explain for covered query = " + tojson( coveredExplain ) );
-assert.eq( explain.cursor, coveredExplain.cursor, "G1" );
+assert( isIdhack(coveredExplain.queryPlanner.winningPlan), "G1" );
// Check doc from covered ID hack query.
assert.eq( { _id : { x: 2 } }, t.findOne( query, { _id : 1 } ), "G2" );
diff --git a/jstests/core/in3.js b/jstests/core/in3.js
index b0a8bb7b81f..5e7e587629f 100644
--- a/jstests/core/in3.js
+++ b/jstests/core/in3.js
@@ -1,11 +1,23 @@
-t = db.jstests_in3;
+// SERVER-2829 Test arrays matching themselves within a $in expression.
-t.drop();
-t.ensureIndex( {i:1} );
-assert.eq( {i:[[3,3]]}, t.find( {i:{$in:[3]}} ).explain().indexBounds , "A1" );
-assert.eq( {i:[[3,3],[6,6]]}, t.find( {i:{$in:[3,6]}} ).explain().indexBounds , "A2" );
+t = db.jstests_in8;
+t.drop();
-for ( var i=0; i<20; i++ )
- t.insert( { i : i } );
+t.save( {key: [1]} );
+t.save( {key: ['1']} );
+t.save( {key: [[2]]} );
-assert.eq( 3 , t.find( {i:{$in:[3,6]}} ).explain().nscanned , "B1" )
+function doTest() {
+ assert.eq( 1, t.count( {key:[1]} ) );
+ assert.eq( 1, t.count( {key:{$in:[[1]]}} ) );
+ assert.eq( 1, t.count( {key:{$in:[[1]],$ne:[2]}} ) );
+ assert.eq( 1, t.count( {key:{$in:[['1']],$type:2}} ) );
+ assert.eq( 1, t.count( {key:['1']} ) );
+ assert.eq( 1, t.count( {key:{$in:[['1']]}} ) );
+ assert.eq( 1, t.count( {key:[2]} ) );
+ assert.eq( 1, t.count( {key:{$in:[[2]]}} ) );
+}
+
+doTest();
+t.ensureIndex( {key:1} );
+doTest();
diff --git a/jstests/core/in4.js b/jstests/core/in4.js
index 3e3dca29528..cbe28e2e2df 100644
--- a/jstests/core/in4.js
+++ b/jstests/core/in4.js
@@ -1,42 +1,35 @@
-t = db.jstests_in4;
+// SERVER-2343 Test $in empty array matching.
-function checkRanges( a, b ) {
- assert.eq( a, b );
-}
+t = db.jstests_in9;
+t.drop();
-t.drop();
-t.ensureIndex( {a:1,b:1} );
-checkRanges( {a:[[2,2]],b:[[3,3]]}, t.find( {a:2,b:3} ).explain().indexBounds );
-checkRanges( {a:[[2,2],[3,3]],b:[[4,4]]}, t.find( {a:{$in:[2,3]},b:4} ).explain().indexBounds );
-checkRanges( {a:[[2,2]],b:[[3,3],[4,4]]}, t.find( {a:2,b:{$in:[3,4]}} ).explain().indexBounds );
-checkRanges( {a:[[2,2],[3,3]],b:[[4,4],[5,5]]}, t.find( {a:{$in:[2,3]},b:{$in:[4,5]}} ).explain().indexBounds );
+function someData() {
+ t.remove({});
+ t.save( {key: []} );
+}
-checkRanges( {a:[[2,2],[3,3]],b:[[4,10]]}, t.find( {a:{$in:[2,3]},b:{$gt:4,$lt:10}} ).explain().indexBounds );
+function moreData() {
+ someData();
+ t.save( {key: [1]} );
+ t.save( {key: ['1']} );
+ t.save( {key: null} );
+ t.save( {} );
+}
-t.save( {a:1,b:1} );
-t.save( {a:2,b:4.5} );
-t.save( {a:2,b:4} );
-assert.eq( 2, t.find( {a:{$in:[2,3]},b:{$in:[4,5]}} ).hint( {a:1,b:1} ).explain().nscanned );
-assert.eq( 2, t.findOne( {a:{$in:[2,3]},b:{$in:[4,5]}} ).a );
-assert.eq( 4, t.findOne( {a:{$in:[2,3]},b:{$in:[4,5]}} ).b );
+function check() {
+ assert.eq( 1, t.count( {key:[]} ) );
+ assert.eq( 1, t.count( {key:{$in:[[]]}} ) );
+}
-t.drop();
-t.ensureIndex( {a:1,b:1,c:1} );
-checkRanges( {a:[[2,2]],b:[[3,3],[4,4]],c:[[5,5]]}, t.find( {a:2,b:{$in:[3,4]},c:5} ).explain().indexBounds );
+function doTest() {
+ someData();
+ check();
+ moreData();
+ check();
+}
-t.save( {a:2,b:3,c:5} );
-t.save( {a:2,b:3,c:4} );
-assert.eq( 1, t.find( {a:2,b:{$in:[3,4]},c:5} ).hint( {a:1,b:1,c:1} ).explain().nscanned );
-t.remove({});
-t.save( {a:2,b:4,c:5} );
-t.save( {a:2,b:4,c:4} );
-assert.eq( 2, t.find( {a:2,b:{$in:[3,4]},c:5} ).hint( {a:1,b:1,c:1} ).explain().nscanned );
+doTest();
-t.drop();
-t.ensureIndex( {a:1,b:-1} );
-ib = t.find( {a:2,b:{$in:[3,4]}} ).explain().indexBounds;
-checkRanges( {a:[[2,2]],b:[[4,4],[3,3]]}, ib );
-assert( ib.b[ 0 ][ 0 ] > ib.b[ 1 ][ 0 ] );
-ib = t.find( {a:2,b:{$in:[3,4]}} ).sort( {a:-1,b:1} ).explain().indexBounds;
-checkRanges( {a:[[2,2]],b:[[3,3],[4,4]]}, ib );
-assert( ib.b[ 0 ][ 0 ] < ib.b[ 1 ][ 0 ] );
+// SERVER-1943 not fixed yet
+t.ensureIndex( {key:1} );
+doTest();
diff --git a/jstests/core/ina.js b/jstests/core/in7.js
index cf614ab994d..cf614ab994d 100644
--- a/jstests/core/ina.js
+++ b/jstests/core/in7.js
diff --git a/jstests/core/in8.js b/jstests/core/in8.js
index 5e7e587629f..be2a696f7c3 100644
--- a/jstests/core/in8.js
+++ b/jstests/core/in8.js
@@ -1,23 +1,18 @@
-// SERVER-2829 Test arrays matching themselves within a $in expression.
+// Test $in regular expressions with overlapping index bounds. SERVER-4677
-t = db.jstests_in8;
-t.drop();
+t = db.jstests_inb;
+t.drop();
-t.save( {key: [1]} );
-t.save( {key: ['1']} );
-t.save( {key: [[2]]} );
+function checkResults( query ) {
+ assert.eq( 4, t.count( query ) );
+ assert.eq( 4, t.find( query ).itcount() );
+}
-function doTest() {
- assert.eq( 1, t.count( {key:[1]} ) );
- assert.eq( 1, t.count( {key:{$in:[[1]]}} ) );
- assert.eq( 1, t.count( {key:{$in:[[1]],$ne:[2]}} ) );
- assert.eq( 1, t.count( {key:{$in:[['1']],$type:2}} ) );
- assert.eq( 1, t.count( {key:['1']} ) );
- assert.eq( 1, t.count( {key:{$in:[['1']]}} ) );
- assert.eq( 1, t.count( {key:[2]} ) );
- assert.eq( 1, t.count( {key:{$in:[[2]]}} ) );
-}
+t.ensureIndex( {x:1} );
+t.save( {x:'aa'} );
+t.save( {x:'ab'} );
+t.save( {x:'ac'} );
+t.save( {x:'ad'} );
-doTest();
-t.ensureIndex( {key:1} );
-doTest();
+checkResults( {x:{$in:[/^a/,/^ab/]}} );
+checkResults( {x:{$in:[/^ab/,/^a/]}} );
diff --git a/jstests/core/in9.js b/jstests/core/in9.js
deleted file mode 100644
index cbe28e2e2df..00000000000
--- a/jstests/core/in9.js
+++ /dev/null
@@ -1,35 +0,0 @@
-// SERVER-2343 Test $in empty array matching.
-
-t = db.jstests_in9;
-t.drop();
-
-function someData() {
- t.remove({});
- t.save( {key: []} );
-}
-
-function moreData() {
- someData();
- t.save( {key: [1]} );
- t.save( {key: ['1']} );
- t.save( {key: null} );
- t.save( {} );
-}
-
-function check() {
- assert.eq( 1, t.count( {key:[]} ) );
- assert.eq( 1, t.count( {key:{$in:[[]]}} ) );
-}
-
-function doTest() {
- someData();
- check();
- moreData();
- check();
-}
-
-doTest();
-
-// SERVER-1943 not fixed yet
-t.ensureIndex( {key:1} );
-doTest();
diff --git a/jstests/core/inb.js b/jstests/core/inb.js
deleted file mode 100644
index 34ec843d36c..00000000000
--- a/jstests/core/inb.js
+++ /dev/null
@@ -1,19 +0,0 @@
-// Test $in regular expressions with overlapping index bounds. SERVER-4677
-
-t = db.jstests_inb;
-t.drop();
-
-function checkBoundsAndResults( query ) {
- assert.eq( [ 'a', 'b' ], t.find( query ).explain().indexBounds.x[0] );
- assert.eq( 4, t.count( query ) );
- assert.eq( 4, t.find( query ).itcount() );
-}
-
-t.ensureIndex( {x:1} );
-t.save( {x:'aa'} );
-t.save( {x:'ab'} );
-t.save( {x:'ac'} );
-t.save( {x:'ad'} );
-
-checkBoundsAndResults( {x:{$in:[/^a/,/^ab/]}} );
-checkBoundsAndResults( {x:{$in:[/^ab/,/^a/]}} );
diff --git a/jstests/core/index7.js b/jstests/core/index7.js
index 9e3a6c66d11..bd7c75b8b08 100644
--- a/jstests/core/index7.js
+++ b/jstests/core/index7.js
@@ -1,67 +1,15 @@
-// index7.js Test that we use an index when and only when we expect to.
+// Check that v0 keys are generated for v0 indexes SERVER-3375
-function index( q ) {
- assert( q.explain().cursor.match( /^BtreeCursor/ ) , "index assert" );
-}
+t = db.jstests_indexw;
+t.drop();
-function noIndex( q ) {
- assert( q.explain().cursor.match( /^BasicCursor/ ) , "noIndex assert" );
-}
+t.save( {a:[]} );
+assert.eq( 1, t.count( {a:[]} ) );
+t.ensureIndex( {a:1} );
+assert.eq( 1, t.count( {a:[]} ) );
+t.dropIndexes();
-function start( k, q, rev) {
- var exp = q.explain().indexBounds;
- var s = {a:exp.a[rev?1:0][0],b:exp.b[0][0]};
- assert.eq( k.a, s.a );
- assert.eq( k.b, s.b );
-}
-function end( k, q, rev) {
- var exp = q.explain().indexBounds
- var e = {a:exp.a[rev?1:0][1],b:exp.b[0][1]};
- assert.eq( k.a, e.a );
- assert.eq( k.b, e.b );
-}
-function both( k, q ) {
- start( k, q );
- end( k, q );
-}
-
-f = db.ed_db_index7;
-f.drop();
-
-f.save( { a : 5 } )
-f.ensureIndex( { a: 1 } );
-index( f.find( { a: 5 } ).sort( { a: 1 } ).hint( { a: 1 } ) );
-noIndex( f.find( { a: 5 } ).sort( { a: 1 } ).hint( { $natural: 1 } ) );
-f.drop();
-
-f.ensureIndex( { a: 1, b: 1 } );
-assert.eq( 1, f.find( { a: 1 } ).hint( { a: 1, b: 1 } ).explain().indexBounds.a[0][0] );
-assert.eq( 1, f.find( { a: 1 } ).hint( { a: 1, b: 1 } ).explain().indexBounds.a[0][1] );
-assert.eq( 1, f.find( { a: 1, c: 1 } ).hint( { a: 1, b: 1 } ).explain().indexBounds.a[0][0] );
-assert.eq( 1, f.find( { a: 1, c: 1 } ).hint( { a: 1, b: 1 } ).explain().indexBounds.a[0][1] );
-assert.eq( null, f.find( { a: 1, c: 1 } ).hint( { a: 1, b: 1 } ).explain().indexBounds.c );
-assert.eq( null, f.find( { a: 1, c: 1 } ).hint( { a: 1, b: 1 } ).explain().indexBounds.c );
-
-start( { a: "a", b: 1 }, f.find( { a: /^a/, b: 1 } ).hint( { a: 1, b: 1 } ) );
-start( { a: "a", b: 1 }, f.find( { a: /^a/, b: 1 } ).sort( { a: 1, b: 1 } ).hint( { a: 1, b: 1 } ) );
-start( { a: "b", b: 1 }, f.find( { a: /^a/, b: 1 } ).sort( { a: -1, b: -1 } ).hint( { a: 1, b: 1 } ), true );
-start( { a: "a", b: 1 }, f.find( { b: 1, a: /^a/ } ).hint( { a: 1, b: 1 } ) );
-end( { a: "b", b: 1 }, f.find( { a: /^a/, b: 1 } ).hint( { a: 1, b: 1 } ) );
-end( { a: "b", b: 1 }, f.find( { a: /^a/, b: 1 } ).sort( { a: 1, b: 1 } ).hint( { a: 1, b: 1 } ) );
-end( { a: "a", b: 1 }, f.find( { a: /^a/, b: 1 } ).sort( { a: -1, b: -1 } ).hint( { a: 1, b: 1 } ), true );
-end( { a: "b", b: 1 }, f.find( { b: 1, a: /^a/ } ).hint( { a: 1, b: 1 } ) );
-
-start( { a: "z", b: 1 }, f.find( { a: /^z/, b: 1 } ).hint( { a: 1, b: 1 } ) );
-end( { a: "{", b: 1 }, f.find( { a: /^z/, b: 1 } ).hint( { a: 1, b: 1 } ) );
-
-start( { a: "az", b: 1 }, f.find( { a: /^az/, b: 1 } ).hint( { a: 1, b: 1 } ) );
-end( { a: "a{", b: 1 }, f.find( { a: /^az/, b: 1 } ).hint( { a: 1, b: 1 } ) );
-
-both( { a: 1, b: 3 }, f.find( { a: 1, b: 3 } ).hint( { a: 1, b: 1 } ) );
-
-both( { a: 1, b: 2 }, f.find( { a: { $gte: 1, $lte: 1 }, b: 2 } ).hint( { a: 1, b: 1 } ) );
-both( { a: 1, b: 2 }, f.find( { a: { $gte: 1, $lte: 1 }, b: 2 } ).sort( { a: 1, b: 1 } ).hint( { a: 1, b: 1 } ) );
-
-f.drop();
-f.ensureIndex( { b: 1, a: 1 } );
-both( { a: 1, b: 3 }, f.find( { a: 1, b: 3 } ).hint( { b: 1, a: 1 } ) );
+// The count result is incorrect - just checking here that v0 key generation is used.
+t.ensureIndex( {a:1}, {v:0} );
+// QUERY_MIGRATION: WE GET THIS RIGHT...BY CHANCE?
+// assert.eq( 0, t.count( {a:[]} ) );
diff --git a/jstests/core/indexOtherNamespace.js b/jstests/core/indexOtherNamespace.js
index f71e6d36558..9876fb841f9 100644
--- a/jstests/core/indexOtherNamespace.js
+++ b/jstests/core/indexOtherNamespace.js
@@ -1,16 +1,19 @@
// SERVER-8814: Test that only the system.indexes namespace can be used to build indexes.
+// Include helpers for analyzing explain output.
+load("jstests/libs/analyze_plan.js");
+
var otherDB = db.getSiblingDB("indexOtherNS");
otherDB.dropDatabase();
otherDB.foo.insert({a:1})
assert.eq(1, otherDB.foo.getIndexes().length);
-assert.eq("BasicCursor", otherDB.foo.find({a:1}).explain().cursor);
+assert(isCollscan(otherDB.foo.find({a:1}).explain().queryPlanner.winningPlan));
assert.writeError(otherDB.randomNS.system.indexes.insert({ ns: "indexOtherNS.foo",
key: { a: 1 }, name: "a_1"}));
// Assert that index didn't actually get built
assert.eq(1, otherDB.foo.getIndexes().length);
-assert.eq("BasicCursor", otherDB.foo.find({a:1}).explain().cursor);
+assert(isCollscan(otherDB.foo.find({a:1}).explain().queryPlanner.winningPlan));
otherDB.dropDatabase();
diff --git a/jstests/core/index_check2.js b/jstests/core/index_check2.js
index eed3b8e42b7..9eade5a68fa 100644
--- a/jstests/core/index_check2.js
+++ b/jstests/core/index_check2.js
@@ -2,6 +2,9 @@
t = db.index_check2;
t.drop();
+// Include helpers for analyzing explain output.
+load("jstests/libs/analyze_plan.js");
+
for ( var i=0; i<1000; i++ ){
var a = [];
for ( var j=1; j<5; j++ ){
@@ -24,18 +27,16 @@ assert.eq( 120 , t.find( q1 ).itcount() , "q1 a");
assert.eq( 120 , t.find( q2 ).itcount() , "q2 a" );
assert.eq( 60 , t.find( q3 ).itcount() , "q3 a");
-assert.eq( "BtreeCursor tags_1" , t.find( q1 ).explain().cursor , "e1" );
-assert.eq( "BtreeCursor tags_1" , t.find( q2 ).explain().cursor , "e2" );
-assert.eq( "BtreeCursor tags_1" , t.find( q3 ).explain().cursor , "e3" );
+// We expect these queries to use index scans over { tags: 1 }.
+assert( isIxscan(t.find(q1).explain().queryPlanner.winningPlan) , "e1" );
+assert( isIxscan(t.find(q2).explain().queryPlanner.winningPlan) , "e2" );
+assert( isIxscan(t.find(q3).explain().queryPlanner.winningPlan) , "e3" );
-scanned1 = t.find(q1).explain().nscanned;
-scanned2 = t.find(q2).explain().nscanned;
-scanned3 = t.find(q3).explain().nscanned;
+scanned1 = t.find(q1).explain().executionStats.totalKeysExamined;
+scanned2 = t.find(q2).explain().executionStats.totalKeysExamined;
+scanned3 = t.find(q3).explain().executionStats.totalKeysExamined;
//print( "scanned1: " + scanned1 + " scanned2: " + scanned2 + " scanned3: " + scanned3 );
// $all should just iterate either of the words
assert( scanned3 <= Math.max( scanned1 , scanned2 ) , "$all makes query optimizer not work well" );
-
-exp3 = t.find( q3 ).explain();
-assert.eq( exp3.indexBounds.tags[0][0], exp3.indexBounds.tags[0][1], "$all range not a single key" );
diff --git a/jstests/core/index_check3.js b/jstests/core/index_check3.js
index 55515aff3f5..bef79fd650d 100644
--- a/jstests/core/index_check3.js
+++ b/jstests/core/index_check3.js
@@ -29,10 +29,10 @@ for ( var i=0; i<100; i++ ){
t.ensureIndex( { foo : 1 } );
-//printjson( t.find( { foo : { $lt : 50 } } ).explain() );
-assert.gt( 30 , t.find( { foo : { $lt : 50 } } ).explain().nscanned , "lt" );
-//printjson( t.find( { foo : { $gt : 50 } } ).explain() );
-assert.gt( 30 , t.find( { foo : { $gt : 50 } } ).explain().nscanned , "gt" );
+var explain = t.find( { foo : { $lt : 50 } } ).explain();
+assert.gt( 30 , explain.executionStats.totalKeysExamined , "lt" );
+var explain = t.find( { foo : { $gt : 50 } } ).explain();
+assert.gt( 30 , explain.executionStats.totalKeysExamined , "gt" );
t.drop();
@@ -43,11 +43,12 @@ for( var i=0; i < 10; ++i ) {
t.ensureIndex( { i : 1 } );
-//printjson( t.find( { i : { $lte : 'a' } } ).explain() );
-assert.gt( 3 , t.find( { i : { $lte : 'a' } } ).explain().nscanned , "lte" );
+var explain = t.find( { i : { $lte : 'a' } } ).explain();
+assert.gt( 3 , explain.executionStats.totalKeysExamined , "lte" );
//printjson( t.find( { i : { $gte : 'a' } } ).explain() );
// bug SERVER-99
-assert.gt( 3 , t.find( { i : { $gte : 'a' } } ).explain().nscanned , "gte" );
+var explain = t.find( { i : { $gte : 'a' } } ).explain();
+assert.gt( 3 , explain.executionStats.totalKeysExamined , "gte" );
assert.eq( 1 , t.find( { i : { $gte : 'a' } } ).count() , "gte a" );
assert.eq( 1 , t.find( { i : { $gte : 'a' } } ).itcount() , "gte b" );
assert.eq( 1 , t.find( { i : { $gte : 'a' } } ).sort( { i : 1 } ).count() , "gte c" );
@@ -55,7 +56,8 @@ assert.eq( 1 , t.find( { i : { $gte : 'a' } } ).sort( { i : 1 } ).itcount() , "g
t.save( { i : "b" } );
-assert.gt( 3 , t.find( { i : { $gte : 'a' } } ).explain().nscanned , "gte" );
+var explain = t.find( { i : { $gte : 'a' } } ).explain();
+assert.gt( 3 , explain.executionStats.totalKeysExamined , "gte" );
assert.eq( 2 , t.find( { i : { $gte : 'a' } } ).count() , "gte a2" );
assert.eq( 2 , t.find( { i : { $gte : 'a' } } ).itcount() , "gte b2" );
assert.eq( 2 , t.find( { i : { $gte : 'a' , $lt : MaxKey } } ).itcount() , "gte c2" );
diff --git a/jstests/core/index_check6.js b/jstests/core/index_check6.js
index be395fb3d2e..090fd27d4c3 100644
--- a/jstests/core/index_check6.js
+++ b/jstests/core/index_check6.js
@@ -2,6 +2,11 @@
t = db.index_check6;
t.drop();
+function keysExamined(query, hint) {
+ var explain = t.find(query).hint(hint).explain();
+ return explain.executionStats.totalKeysExamined;
+}
+
t.ensureIndex( { age : 1 , rating : 1 } );
for ( var age=10; age<50; age++ ){
@@ -10,16 +15,22 @@ for ( var age=10; age<50; age++ ){
}
}
-assert.eq( 10 , t.find( { age : 30 } ).explain().nscanned , "A" );
-assert.eq( 20 , t.find( { age : { $gte : 29 , $lte : 30 } } ).explain().nscanned , "B" );
-assert.eq( 18 , t.find( { age : { $gte : 25 , $lte : 30 }, rating: {$in: [0,9] } } ).hint( {age:1,rating:1} ).explain().nscanned , "C1" );
-assert.eq( 23 , t.find( { age : { $gte : 25 , $lte : 30 }, rating: {$in: [0,8] } } ).hint( {age:1,rating:1} ).explain().nscanned , "C2" );
-assert.eq( 28 , t.find( { age : { $gte : 25 , $lte : 30 }, rating: {$in: [1,8] } } ).hint( {age:1,rating:1} ).explain().nscanned , "C3" );
+assert.eq( 10 , keysExamined( { age : 30 }, {} ) , "A" );
+assert.eq( 20 , keysExamined( { age : { $gte : 29 , $lte : 30 } }, {} ) , "B" );
+assert.eq( 18 , keysExamined( { age : { $gte : 25 , $lte : 30 }, rating: {$in: [0,9] } },
+ {age:1,rating:1} ) , "C1" );
+assert.eq( 23 , keysExamined( { age : { $gte : 25 , $lte : 30 }, rating: {$in: [0,8] } },
+ {age:1,rating:1} ) , "C2" );
+assert.eq( 28 , keysExamined( { age : { $gte : 25 , $lte : 30 }, rating: {$in: [1,8] } },
+ {age:1,rating:1} ) , "C3" );
-assert.eq( 4 , t.find( { age : { $gte : 29 , $lte : 30 } , rating : 5 } ).hint( {age:1,rating:1} ).explain().nscanned , "C" ); // SERVER-371
-assert.eq( 6 , t.find( { age : { $gte : 29 , $lte : 30 } , rating : { $gte : 4 , $lte : 5 } } ).hint( {age:1,rating:1} ).explain().nscanned , "D" ); // SERVER-371
+assert.eq( 4 , keysExamined( { age : { $gte : 29 , $lte : 30 } , rating : 5 },
+ {age:1,rating:1} ) , "C" ); // SERVER-371
+assert.eq( 6 , keysExamined( { age : { $gte : 29 , $lte : 30 } , rating : { $gte : 4 , $lte : 5 } },
+ {age:1,rating:1} ) , "D" ); // SERVER-371
-assert.eq.automsg( "2", "t.find( { age:30, rating:{ $gte:4, $lte:5} } ).explain().nscanned" );
+assert.eq.automsg( "2", "t.find( { age:30, rating:{ $gte:4, $lte:5} } ).explain()" +
+ ".executionStats.totalKeysExamined" );
t.drop();
@@ -32,7 +43,8 @@ for ( var a=1; a<10; a++ ){
}
function doQuery( count, query, sort, index ) {
- var nscanned = t.find( query ).hint( index ).sort( sort ).explain().nscanned;
+ var explain = t.find( query ).hint( index ).sort( sort ).explain();
+ var nscanned = explain.executionStats.totalKeysExamined;
assert(Math.abs(count - nscanned) <= 2);
}
diff --git a/jstests/core/index_check7.js b/jstests/core/index_check7.js
index 1d0aaebba35..c23ef4eda1e 100644
--- a/jstests/core/index_check7.js
+++ b/jstests/core/index_check7.js
@@ -6,10 +6,10 @@ for ( var i=0; i<100; i++ )
t.save( { x : i } )
t.ensureIndex( { x : 1 } )
-assert.eq( 1 , t.find( { x : 27 } ).explain().nscanned , "A" )
+assert.eq( 1 , t.find( { x : 27 } ).explain().executionStats.totalKeysExamined , "A" )
t.ensureIndex( { x : -1 } )
-assert.eq( 1 , t.find( { x : 27 } ).explain().nscanned , "B" )
+assert.eq( 1 , t.find( { x : 27 } ).explain().executionStats.totalKeysExamined , "B" )
-assert.eq( 40 , t.find( { x : { $gt : 59 } } ).explain().nscanned , "C" );
+assert.eq( 40 , t.find( { x : { $gt : 59 } } ).explain().executionStats.totalKeysExamined , "C" );
diff --git a/jstests/core/index_check8.js b/jstests/core/index_check8.js
deleted file mode 100644
index 1964ecbe7fc..00000000000
--- a/jstests/core/index_check8.js
+++ /dev/null
@@ -1,21 +0,0 @@
-
-t = db.index_check8
-t.drop();
-
-t.insert( { a : 1 , b : 1 , c : 1 , d : 1 , e : 1 } )
-t.ensureIndex( { a : 1 , b : 1 , c : 1 } )
-t.ensureIndex({ a: 1, b: 1, d: 1, e: 1 })
-
-// this block could be added to many tests in theory...
-if ((new Date()) % 10 == 0) {
- var coll = t.toString().substring(db.toString().length + 1);
- print("compacting " + coll + " before continuing testing");
- // don't check return code - false for mongos
- print("ok: " + db.runCommand({ compact: coll, dev: true }));
-}
-
-x = t.find( { a : 1 , b : 1 , d : 1 } ).sort( { e : 1 } ).explain()
-assert( ! x.scanAndOrder , "A : " + tojson( x ) )
-
-x = t.find( { a : 1 , b : 1 , c : 1 , d : 1 } ).sort( { e : 1 } ).explain()
-//assert( ! x.scanAndOrder , "B : " + tojson( x ) )
diff --git a/jstests/core/index_elemmatch1.js b/jstests/core/index_elemmatch1.js
index 263eb252364..99418e83839 100644
--- a/jstests/core/index_elemmatch1.js
+++ b/jstests/core/index_elemmatch1.js
@@ -34,8 +34,8 @@ function nscannedForCursor( explain, cursor ) {
return -1;
}
-assert.eq( t.find(q).itcount(),
- nscannedForCursor( t.find(q).explain(true), 'BtreeCursor arr.x_1_a_1' ), "A5" );
+var explain = t.find(q).hint( { "arr.x" : 1 , a : 1 } ).explain();
+assert.eq( t.find(q).itcount(), explain.executionStats.totalKeysExamined );
printjson(t.find(q).explain());
print("Num results:");
diff --git a/jstests/core/index_many.js b/jstests/core/index_many.js
index 70f39c4719c..a9eddbb32f1 100644
--- a/jstests/core/index_many.js
+++ b/jstests/core/index_many.js
@@ -34,10 +34,8 @@ function f() {
assert(lim == 64, "not 64 indexes");
assert(t.find({ x: 9 }).length() == 1, "b");
- assert(t.find({ x: 9 }).explain().cursor.match(/Btree/), "not using index?");
assert(t.find({ y: 99 }).length() == 2, "y idx");
- assert(t.find({ y: 99 }).explain().cursor.match(/Btree/), "not using y index?");
/* check that renamecollection remaps all the indexes right */
assert(t.renameCollection("many2").ok, "rename failed");
diff --git a/jstests/core/indexj.js b/jstests/core/indexj.js
index 6d8ac85c972..e58104d75c6 100644
--- a/jstests/core/indexj.js
+++ b/jstests/core/indexj.js
@@ -3,23 +3,34 @@
t = db.jstests_indexj;
t.drop();
+function keysExamined(query, hint, sort) {
+ if (!hint) {
+ hint = {};
+ }
+ if (!sort) {
+ sort = {};
+ }
+ var explain = t.find(query).sort(sort).hint(hint).explain();
+ return explain.executionStats.totalKeysExamined;
+}
+
t.ensureIndex( {a:1} );
t.save( {a:5} );
-assert.eq( 0, t.find( { a: { $gt:4, $lt:5 } } ).explain().nscanned, "A" );
+assert.eq( 0, keysExamined( { a: { $gt:4, $lt:5 } } ), "A" );
t.drop();
t.ensureIndex( {a:1} );
t.save( {a:4} );
-assert.eq( 0, t.find( { a: { $gt:4, $lt:5 } } ).explain().nscanned, "B" );
+assert.eq( 0, keysExamined( { a: { $gt:4, $lt:5 } } ), "B" );
t.save( {a:5} );
-assert.eq( 0, t.find( { a: { $gt:4, $lt:5 } } ).explain().nscanned, "D" );
+assert.eq( 0, keysExamined( { a: { $gt:4, $lt:5 } } ), "D" );
t.save( {a:4} );
-assert.eq( 0, t.find( { a: { $gt:4, $lt:5 } } ).explain().nscanned, "C" );
+assert.eq( 0, keysExamined( { a: { $gt:4, $lt:5 } } ), "C" );
t.save( {a:5} );
-assert.eq( 0, t.find( { a: { $gt:4, $lt:5 } } ).explain().nscanned, "D" );
+assert.eq( 0, keysExamined( { a: { $gt:4, $lt:5 } } ), "D" );
t.drop();
t.ensureIndex( {a:1,b:1} );
@@ -28,17 +39,17 @@ t.save( { a:1,b:2 } );
t.save( { a:2,b:1 } );
t.save( { a:2,b:2 } );
-assert.eq( 2, t.find( { a:{$in:[1,2]}, b:{$gt:1,$lt:2} } ).hint( {a:1,b:1} ).explain().nscanned );
-assert.eq( 2, t.find( { a:{$in:[1,2]}, b:{$gt:1,$lt:2} } ).hint( {a:1,b:1} ).sort( {a:-1,b:-1} ).explain().nscanned );
+assert.eq( 2, keysExamined( { a:{$in:[1,2]}, b:{$gt:1,$lt:2} }, {a:1,b:1} ) );
+assert.eq( 2, keysExamined( { a:{$in:[1,2]}, b:{$gt:1,$lt:2} }, {a:1,b:1}, {a:-1,b:-1} ) );
t.save( {a:1,b:1} );
t.save( {a:1,b:1} );
-assert.eq( 2, t.find( { a:{$in:[1,2]}, b:{$gt:1,$lt:2} } ).hint( {a:1,b:1} ).explain().nscanned );
-assert.eq( 2, t.find( { a:{$in:[1,2]}, b:{$gt:1,$lt:2} } ).hint( {a:1,b:1} ).explain().nscanned );
-assert.eq( 2, t.find( { a:{$in:[1,2]}, b:{$gt:1,$lt:2} } ).hint( {a:1,b:1} ).sort( {a:-1,b:-1} ).explain().nscanned );
+assert.eq( 2, keysExamined( { a:{$in:[1,2]}, b:{$gt:1,$lt:2} }, {a:1,b:1} ) );
+assert.eq( 2, keysExamined( { a:{$in:[1,2]}, b:{$gt:1,$lt:2} }, {a:1,b:1} ) );
+assert.eq( 2, keysExamined( { a:{$in:[1,2]}, b:{$gt:1,$lt:2} }, {a:1,b:1}, {a:-1,b:-1} ) );
-assert.eq( 1, t.find( { a:{$in:[1,1.9]}, b:{$gt:1,$lt:2} } ).hint( {a:1,b:1} ).explain().nscanned );
-assert.eq( 1, t.find( { a:{$in:[1.1,2]}, b:{$gt:1,$lt:2} } ).hint( {a:1,b:1} ).sort( {a:-1,b:-1} ).explain().nscanned );
+assert.eq( 1, keysExamined( { a:{$in:[1,1.9]}, b:{$gt:1,$lt:2} }, {a:1,b:1} ) );
+assert.eq( 1, keysExamined( { a:{$in:[1.1,2]}, b:{$gt:1,$lt:2} }, {a:1,b:1}, {a:-1,b:-1} ) );
t.save( { a:1,b:1.5} );
-assert.eq( 3, t.find( { a:{$in:[1,2]}, b:{$gt:1,$lt:2} } ).hint( {a:1,b:1} ).explain().nscanned, "F" );
+assert.eq( 3, keysExamined( { a:{$in:[1,2]}, b:{$gt:1,$lt:2} }, {a:1,b:1} ), "F" );
diff --git a/jstests/core/indexm.js b/jstests/core/indexm.js
index 6b31ea628cd..045c3bd2053 100644
--- a/jstests/core/indexm.js
+++ b/jstests/core/indexm.js
@@ -35,4 +35,4 @@ test();
// Drop the indexes.
t.dropIndexes();
-test(); \ No newline at end of file
+test();
diff --git a/jstests/core/indexn.js b/jstests/core/indexn.js
index 9abb001eed9..e868583257e 100644
--- a/jstests/core/indexn.js
+++ b/jstests/core/indexn.js
@@ -4,11 +4,6 @@
t = db.jstests_indexn;
t.drop();
-function checkImpossibleMatch( explain ) {
- printjson(explain);
- assert.eq( 0, explain.n );
-}
-
t.save( {a:1,b:[1,2]} );
t.ensureIndex( {a:1} );
@@ -16,34 +11,19 @@ t.ensureIndex( {b:1} );
// {a:1} is a single key index, so no matches are possible for this query
assert.eq( 0, t.count( {a:{$gt:5,$lt:0}} ) );
-checkImpossibleMatch( t.find( {a:{$gt:5,$lt:0}} ).explain() );
assert.eq( 0, t.count( {a:{$gt:5,$lt:0},b:2} ) );
-checkImpossibleMatch( t.find( {a:{$gt:5,$lt:0},b:2} ).explain() );
assert.eq( 0, t.count( {a:{$gt:5,$lt:0},b:{$gt:0,$lt:5}} ) );
-checkImpossibleMatch( t.find( {a:{$gt:5,$lt:0},b:{$gt:0,$lt:5}} ).explain() );
// One clause of an $or is an "impossible match"
printjson( t.find( {$or:[{a:{$gt:5,$lt:0}},{a:1}]} ).explain() )
assert.eq( 1, t.count( {$or:[{a:{$gt:5,$lt:0}},{a:1}]} ) );
-checkImpossibleMatch( t.find( {$or:[{a:{$gt:5,$lt:0}},{a:1}]} ).explain().clauses[ 0 ] );
// One clause of an $or is an "impossible match"; original order of the $or
// does not matter.
printjson( t.find( {$or:[{a:1},{a:{$gt:5,$lt:0}}]} ).explain() )
assert.eq( 1, t.count( {$or:[{a:1},{a:{$gt:5,$lt:0}}]} ) );
-checkImpossibleMatch( t.find( {$or:[{a:1},{a:{$gt:5,$lt:0}}]} ).explain().clauses[ 0 ] );
t.save( {a:2} );
-
-// Descriptive test: query system sees this query as an $or where
-// one clause of the $or is an $and. The $and bounds get intersected
-// forming a clause with empty index bounds. The union of the $or bounds
-// produces the two point intervals [1, 1] and [2, 2].
assert.eq( 2, t.count( {$or:[{a:1},{a:{$gt:5,$lt:0}},{a:2}]} ) );
-explain = t.find( {$or:[{a:1},{a:{$gt:5,$lt:0}},{a:2}]} ).explain();
-printjson( explain )
-assert.eq( 2, explain.clauses.length );
-checkImpossibleMatch( explain.clauses[ 0 ] );
-assert.eq( [[1, 1], [2,2]], explain.clauses[ 1 ].indexBounds.a );
diff --git a/jstests/core/indexq.js b/jstests/core/indexq.js
deleted file mode 100644
index 38cd27b8798..00000000000
--- a/jstests/core/indexq.js
+++ /dev/null
@@ -1,20 +0,0 @@
-// Test multikey range preference for a fully included range SERVER-958.
-
-t = db.jstests_indexq;
-t.drop();
-
-t.ensureIndex( {a:1} );
-// Single key index
-assert.eq( 5, t.find( {a:{$gt:4,$gte:5}} ).explain().indexBounds.a[ 0 ][ 0 ] );
-assert.eq( [[1,1],[2,2]], t.find( {a:{$in:[1,2,3]},$or:[{a:{$in:[1,2]}}]} ).explain().indexBounds.a );
-
-t.save( {a:[1,3]} );
-// Now with multi key index.
-
-// SERVER-12281: We should know that >4 is worse than >5
-// assert.eq( 5, t.find( {a:{$gt:4,$gte:5}} ).explain().indexBounds.a[ 0 ][ 0 ] );
-
-printjson(t.find( {a:{$in:[1,2,3]},$or:[{a:{$in:[1,2]}}]} ).explain())
-
-// SERVER-12281: We should know that in[1,2] is better than in[1,2,3].
-// assert.eq( [[1,1],[2,2]], t.find( {a:{$in:[1,2,3]},$or:[{a:{$in:[1,2]}}]} ).explain().indexBounds.a );
diff --git a/jstests/core/indexr.js b/jstests/core/indexr.js
index c3eecd045c8..1f7b75bbcf7 100644
--- a/jstests/core/indexr.js
+++ b/jstests/core/indexr.js
@@ -14,14 +14,10 @@ t.ensureIndex( {'a.b':1,'a.c':1} );
t.ensureIndex( {a:1,'a.c':1} );
assert.eq( 0, t.count( { 'a.b':{ $gt:2 }, 'a.c': { $lt:4 } } ) );
assert.eq( 0, t.count( { a:{ b:3, c:6 }, 'a.c': { $lt:4 } } ) );
-assert.eq( 4, t.find( { 'a.b':{ $gt:2 }, 'a.c': { $lt:4 } } ).explain().indexBounds['a.c'][0][1] );
-assert.eq( 4, t.find( { a:{ b:3, c:6 }, 'a.c': { $lt:4 } } ).explain().indexBounds['a.c'][0][1] );
t.save( { a: { b: 3, c: 3 } } );
assert.eq( 1, t.count( { 'a.b':{ $gt:2 }, 'a.c': { $lt:4 } } ) );
assert.eq( 1, t.count( { a:{ b:3, c:3 }, 'a.c': { $lt:4 } } ) );
-assert.eq( 4, t.find( { 'a.b':{ $gt:2 }, 'a.c': { $lt:4 } } ).explain().indexBounds['a.c'][0][1] );
-assert.eq( 4, t.find( { a:{ b:3, c:3 }, 'a.c': { $lt:4 } } ).explain().indexBounds['a.c'][0][1] );
// Check with multikey indexes.
t.remove({});
@@ -29,16 +25,11 @@ t.save( { a: [ { b: 3, c: 6 }, { b: 1, c: 1 } ] } );
assert.eq( 1, t.count( { 'a.b':{ $gt:2 }, 'a.c': { $lt:4 } } ) );
assert.eq( 1, t.count( { a:{ b:3, c:6 }, 'a.c': { $lt:4 } } ) );
-assert.eq( [[{$minElement:1},{$maxElement:1}]], t.find( { 'a.b':{ $gt:2 }, 'a.c': { $lt:4 } } ).explain().indexBounds['a.c'] );
-assert.eq( [[{$minElement:1},{$maxElement:1}]], t.find( { a:{ b:3, c:6 }, 'a.c': { $lt:4 } } ).explain().indexBounds['a.c'] );
// Check reverse direction.
assert.eq( 1, t.find( { 'a.b':{ $gt:2 }, 'a.c': { $lt:4 } } ).sort( {'a.b':-1} ).itcount() );
assert.eq( 1, t.find( { a:{ b:3, c:6 }, 'a.c': { $lt:4 } } ).sort( {a:-1} ).itcount() );
-assert.eq( [[{$maxElement:1},{$minElement:1}]], t.find( { 'a.b':{ $gt:2 }, 'a.c': { $lt:4 } } ).sort( {'a.b':-1} ).explain().indexBounds['a.c'] );
-assert.eq( [[{$maxElement:1},{$minElement:1}]], t.find( { a:{ b:3, c:6 }, 'a.c': { $lt:4 } } ).sort( {a:-1} ).explain().indexBounds['a.c'] );
-
// Check second field is constrained if first is not.
assert.eq( 1, t.find( { 'a.c': { $lt:4 } } ).hint( {'a.b':1,'a.c':1} ).itcount() );
assert.eq( 1, t.find( { 'a.c': { $lt:4 } } ).hint( {a:1,'a.c':1} ).itcount() );
diff --git a/jstests/core/indexs.js b/jstests/core/indexs.js
index 609f912affe..0b7bfe412c4 100644
--- a/jstests/core/indexs.js
+++ b/jstests/core/indexs.js
@@ -11,11 +11,8 @@ t.drop();
t.ensureIndex( {a:1,'a.b':1} );
t.save( { a: { b: 3 } } );
assert.eq( 1, t.count( { a:{ b:3 } } ) );
-ib = t.find( { a:{ b:3 } } ).explain().indexBounds;
t.drop();
t.ensureIndex( {a:1,'a.b':1} );
t.save( { a: [ { b: 3 } ] } );
-assert.eq( ib, t.find( { a:{ b:3 } } ).explain().indexBounds );
-assert.eq( 1, t.find( { a:{ b:3 } } ).explain().nscanned );
assert.eq( 1, t.count( { a:{ b:3 } } ) );
diff --git a/jstests/core/indext.js b/jstests/core/indext.js
index e418dc2e959..134e81acdeb 100644
--- a/jstests/core/indext.js
+++ b/jstests/core/indext.js
@@ -7,15 +7,12 @@ t.ensureIndex( {'a.b':1}, {sparse:true} );
t.save( {a:[]} );
t.save( {a:1} );
assert.eq( 0, t.find().hint( {'a.b':1} ).itcount() );
-assert.eq( 0, t.find().hint( {'a.b':1} ).explain().nscanned );
t.ensureIndex( {'a.b':1,'a.c':1}, {sparse:true} );
t.save( {a:[]} );
t.save( {a:1} );
assert.eq( 0, t.find().hint( {'a.b':1,'a.c':1} ).itcount() );
-assert.eq( 0, t.find().hint( {'a.b':1,'a.c':1} ).explain().nscanned );
t.save( {a:[{b:1}]} );
t.save( {a:1} );
assert.eq( 1, t.find().hint( {'a.b':1,'a.c':1} ).itcount() );
-assert.eq( 1, t.find().hint( {'a.b':1,'a.c':1} ).explain().nscanned );
diff --git a/jstests/core/indexv.js b/jstests/core/indexv.js
index 334ec432d74..a30541de36c 100644
--- a/jstests/core/indexv.js
+++ b/jstests/core/indexv.js
@@ -7,12 +7,12 @@ t.ensureIndex( {'a.b':1} );
t.save( {a:[{},{b:1}]} );
var e = t.find( {'a.b':null} ).explain();
-assert.eq( 1, e.n );
-assert.eq( 1, e.nscanned );
+assert.eq( 1, e.executionStats.nReturned );
+assert.eq( 1, e.executionStats.totalKeysExamined );
t.drop();
t.ensureIndex( {'a.b.c':1} );
t.save( {a:[{b:[]},{b:{c:1}}]} );
var e = t.find( {'a.b.c':null} ).explain();
-assert.eq( 0, e.n );
-assert.eq( 1, e.nscanned );
+assert.eq( 0, e.executionStats.nReturned );
+assert.eq( 1, e.executionStats.totalKeysExamined );
diff --git a/jstests/core/indexw.js b/jstests/core/indexw.js
deleted file mode 100644
index bd7c75b8b08..00000000000
--- a/jstests/core/indexw.js
+++ /dev/null
@@ -1,15 +0,0 @@
-// Check that v0 keys are generated for v0 indexes SERVER-3375
-
-t = db.jstests_indexw;
-t.drop();
-
-t.save( {a:[]} );
-assert.eq( 1, t.count( {a:[]} ) );
-t.ensureIndex( {a:1} );
-assert.eq( 1, t.count( {a:[]} ) );
-t.dropIndexes();
-
-// The count result is incorrect - just checking here that v0 key generation is used.
-t.ensureIndex( {a:1}, {v:0} );
-// QUERY_MIGRATION: WE GET THIS RIGHT...BY CHANCE?
-// assert.eq( 0, t.count( {a:[]} ) );
diff --git a/jstests/core/mod1.js b/jstests/core/mod1.js
index 46e3482bc72..d578190737f 100644
--- a/jstests/core/mod1.js
+++ b/jstests/core/mod1.js
@@ -11,7 +11,8 @@ t.save( { a : "adasdas" } );
assert.eq( 2 , t.find( "this.a % 10 == 1" ).itcount() , "A1" );
assert.eq( 2 , t.find( { a : { $mod : [ 10 , 1 ] } } ).itcount() , "A2" );
-assert.eq( 6 , t.find( { a : { $mod : [ 10 , 1 ] } } ).explain().nscanned , "A3" );
+assert.eq( 0 , t.find( { a : { $mod : [ 10 , 1 ] } } ).explain()
+ .executionStats.totalKeysExamined , "A3" );
t.ensureIndex( { a : 1 } );
@@ -20,6 +21,7 @@ assert.eq( 2 , t.find( { a : { $mod : [ 10 , 1 ] } } ).itcount() , "B2" );
assert.eq( 1 , t.find( "this.a % 10 == 0" ).itcount() , "B3" );
assert.eq( 1 , t.find( { a : { $mod : [ 10 , 0 ] } } ).itcount() , "B4" );
-assert.eq( 4 , t.find( { a : { $mod : [ 10 , 1 ] } } ).explain().nscanned , "B5" );
+assert.eq( 4 , t.find( { a : { $mod : [ 10 , 1 ] } } ).explain()
+ .executionStats.totalKeysExamined, "B5" );
-assert.eq( 1, t.find( { a: { $gt: 5, $mod : [ 10, 1 ] } } ).itcount() ); \ No newline at end of file
+assert.eq( 1, t.find( { a: { $gt: 5, $mod : [ 10, 1 ] } } ).itcount() );
diff --git a/jstests/core/mr_index.js b/jstests/core/mr_index.js
index 521d44d29f0..394ecc3f650 100644
--- a/jstests/core/mr_index.js
+++ b/jstests/core/mr_index.js
@@ -28,16 +28,12 @@ ex = function(){
}
res = t.mapReduce( m , r , { out : outName } )
-
-assert.eq( "BasicCursor" , ex().cursor , "A1" )
+
+assert.eq( 3 , ex().executionStats.nReturned , "A1" )
out.ensureIndex( { value : 1 } )
-assert.eq( "BtreeCursor value_1" , ex().cursor , "A2" )
-assert.eq( 3 , ex().n , "A3" )
+assert.eq( 3 , ex().executionStats.nReturned , "A2" )
res = t.mapReduce( m , r , { out : outName } )
-
-assert.eq( "BtreeCursor value_1" , ex().cursor , "B1" )
-assert.eq( 3 , ex().n , "B2" )
-res.drop()
-
+assert.eq( 3 , ex().executionStats.nReturned , "B1" )
+res.drop()
diff --git a/jstests/core/ne2.js b/jstests/core/ne2.js
index a69bfd6a114..c34f482a389 100644
--- a/jstests/core/ne2.js
+++ b/jstests/core/ne2.js
@@ -10,7 +10,7 @@ t.save( { a:0 } );
t.save( { a:0.5 } );
e = t.find( { a: { $ne: 0 } } ).explain( true );
-assert.eq( 2, e.n, 'A' );
+assert.eq( 2, e.executionStats.nReturned, 'A' );
e = t.find( { a: { $gt: -1, $lt: 1, $ne: 0 } } ).explain();
-assert.eq( 2, e.n, 'B' );
+assert.eq( 2, e.executionStats.nReturned, 'B' );
diff --git a/jstests/core/or2.js b/jstests/core/or2.js
index 00e9f68decf..f8de6c42ef9 100644
--- a/jstests/core/or2.js
+++ b/jstests/core/or2.js
@@ -1,6 +1,9 @@
t = db.jstests_or2;
t.drop();
+// Include helpers for analyzing explain output.
+load("jstests/libs/analyze_plan.js");
+
checkArrs = function( a, b, m ) {
assert.eq( a.length, b.length, m );
aStr = [];
@@ -16,7 +19,7 @@ doTest = function( index ) {
if ( index == null ) {
index = true;
}
-
+
t.save( {_id:0,x:0,a:1} );
t.save( {_id:1,x:0,a:2} );
t.save( {_id:2,x:0,b:1} );
@@ -25,23 +28,25 @@ doTest = function( index ) {
t.save( {_id:5,x:1,a:1,b:2} );
t.save( {_id:6,x:1,a:2,b:1} );
t.save( {_id:7,x:1,a:2,b:2} );
-
+
assert.throws( function() { t.find( { x:0,$or:"a" } ).toArray(); } );
assert.throws( function() { t.find( { x:0,$or:[] } ).toArray(); } );
assert.throws( function() { t.find( { x:0,$or:[ "a" ] } ).toArray(); } );
-
+
a1 = t.find( { x:0, $or: [ { a : 1 } ] } ).toArray();
checkArrs( [ { _id:0, x:0, a:1 } ], a1 );
if ( index ) {
- assert( t.find( { x:0,$or: [ { a : 1 } ] } ).explain().cursor.match( /Btree/ ) );
+ var explain = t.find( { x:0,$or: [ { a : 1 } ] } ).explain();
+ assert( isIxscan(explain.queryPlanner.winningPlan) );
}
-
+
a1b2 = t.find( { x:1, $or: [ { a : 1 }, { b : 2 } ] } ).toArray();
checkArrs( [ { _id:4, x:1, a:1, b:1 }, { _id:5, x:1, a:1, b:2 }, { _id:7, x:1, a:2, b:2 } ], a1b2 );
if ( index ) {
- assert( t.find( { x:0,$or: [ { a : 1 } ] } ).explain().cursor.match( /Btree/ ) );
+ var explain = t.find( { x:0,$or: [ { a : 1 } ] } ).explain();
+ assert( isIxscan(explain.queryPlanner.winningPlan) );
}
-
+
/*
t.drop();
obj = {_id:0,x:10,a:[1,2,3]};
diff --git a/jstests/core/or3.js b/jstests/core/or3.js
index 7759e689f84..1dab4d55ecd 100644
--- a/jstests/core/or3.js
+++ b/jstests/core/or3.js
@@ -1,6 +1,9 @@
t = db.jstests_or3;
t.drop();
+// Include helpers for analyzing explain output.
+load("jstests/libs/analyze_plan.js");
+
checkArrs = function( a, b, m ) {
assert.eq( a.length, b.length, m );
aStr = [];
@@ -37,7 +40,8 @@ doTest = function( index ) {
checkArrs( [ { _id:6, x:1, a:2, b:1 } ], an1bn2 );
checkArrs( t.find( { x:1, a:{$ne:1}, b:{$ne:2} } ).toArray(), an1bn2 );
if ( index ) {
- assert( t.find( { x:1, $nor: [ { a : 1 }, { b : 2 } ] } ).explain().cursor.match( /Btree/ ) );
+ var explain = t.find( { x:1, $nor: [ { a : 1 }, { b : 2 } ] } ).explain();
+ assert( isIxscan(explain.queryPlanner.winningPlan) );
}
an1b2 = t.find( { $nor: [ { a : 1 } ], $or: [ { b : 2 } ] } ).toArray();
diff --git a/jstests/core/or4.js b/jstests/core/or4.js
index 23c10bba8e2..a47884364c3 100644
--- a/jstests/core/or4.js
+++ b/jstests/core/or4.js
@@ -78,8 +78,6 @@ assert.eq.automsg( "[1,2]", "Array.sort( t.distinct( 'a', {$or:[{a:2},{b:3}]} )
assert.eq.automsg( "[{a:2},{a:null},{a:1}]", "t.group( {key:{a:1}, cond:{$or:[{a:2},{b:3}]}, reduce:function( x, y ) { }, initial:{} } )" );
assert.eq.automsg( "5", "t.mapReduce( function() { emit( 'a', this.a ); }, function( key, vals ) { return vals.length; }, {out:{inline:true},query:{$or:[{a:2},{b:3}]}} ).counts.input" );
-explain = t.find( {$or:[{a:2},{b:3}]} ).explain();
-
t.remove( {} );
t.save( {a:[1,2]} );
@@ -89,11 +87,3 @@ assert.eq.automsg( "1", "t.find( {$or:[{a:2},{a:1}]} ).toArray().length" );
assert.eq.automsg( "1", "t.count( {$or:[{a:2},{a:1}]} )" );
t.remove({});
-
-assert.eq.automsg( "'BtreeCursor b_1'", "t.find( {$or:[{a:1}]} ).sort( {b:1} ).explain().cursor" );
-assert.eq.automsg( "'BtreeCursor b_1'", "t.find( {$or:[{}]} ).sort( {b:1} ).explain().cursor" );
-assert.eq.automsg( "'BtreeCursor b_1'", "t.find( {$or:[{b:1}]} ).sort( {b:1} ).explain().cursor" );
-
-assert.eq.automsg( "'BtreeCursor b_1'", "t.find( {$or:[{a:1}]} ).hint( {b:1} ).explain().cursor" );
-assert.eq.automsg( "'BtreeCursor b_1'", "t.find( {$or:[{}]} ).hint( {b:1} ).explain().cursor" );
-assert.eq.automsg( "1", "t.find( {$or:[{b:1}]} ).hint( {b:1} ).explain().indexBounds.b[ 0 ][ 0 ]" );
diff --git a/jstests/core/or5.js b/jstests/core/or5.js
index 6a7316787d4..8d9d8802860 100644
--- a/jstests/core/or5.js
+++ b/jstests/core/or5.js
@@ -4,9 +4,6 @@ t.drop();
t.ensureIndex( {a:1} );
t.ensureIndex( {b:1} );
-assert.eq.automsg( "'BasicCursor'", "t.find( {$or:[{a:2},{b:3},{}]} ).explain().cursor" );
-assert.eq.automsg( "'BasicCursor'", "t.find( {$or:[{a:2},{b:3},{c:4}]} ).explain().cursor" );
-
t.ensureIndex( {c:1} );
t.save( {a:2} );
@@ -36,10 +33,6 @@ assert.eq.automsg( "6", "t.find( {$or:[{a:2},{b:3},{c:6}]} ).batchSize( i ).toAr
t.ensureIndex( {z:"2d"} );
-assert.eq.automsg( "'GeoSearchCursor'", "t.find( {z:{$near:[50,50]},a:2} ).explain().cursor" );
-assert.eq.automsg( "'GeoSearchCursor'", "t.find( {z:{$near:[50,50]},$or:[{a:2}]} ).explain().cursor" );
-assert.eq.automsg( "'GeoSearchCursor'", "t.find( {$or:[{a:2}],z:{$near:[50,50]}} ).explain().cursor" );
-assert.eq.automsg( "'GeoSearchCursor'", "t.find( {$or:[{a:2},{b:3}],z:{$near:[50,50]}} ).explain().cursor" );
assert.throws.automsg( function() { return t.find( {$or:[{z:{$near:[50,50]}},{a:2}]} ).toArray(); } );
function reset() {
diff --git a/jstests/core/or6.js b/jstests/core/or6.js
index 43b75f467aa..2a8263e298f 100644
--- a/jstests/core/or6.js
+++ b/jstests/core/or6.js
@@ -1,23 +1,22 @@
-t = db.jstests_or6;
-t.drop();
-
-t.ensureIndex( {a:1} );
+// A few rooted $or cases.
-assert.eq.automsg( "null", "t.find( {$or:[{a:1},{b:2}]} ).hint( {a:1} ).explain().clauses" );
+var t = db.jstests_orq;
+t.drop();
-assert.eq.automsg( "'BasicCursor'", "t.find( {$or:[{a:1},{a:3}]} ).hint( {$natural:1} ).explain().cursor" );
+t.ensureIndex({a: 1, c: 1});
+t.ensureIndex({b: 1, c: 1});
-t.ensureIndex( {b:1} );
-assert.eq.automsg( "2", "t.find( {$or:[{a:1,b:5},{a:3,b:5}]} ).hint( {a:1} ).explain().clauses.length" );
+t.save({a: 1, c: 9});
+t.save({a: 1, c: 10});
+t.save({b: 2, c: 8});
+t.save({b: 2, c: 7});
-t.drop();
+// This can be answered using a merge sort. See SERVER-13715.
+var cursor = t.find({$or: [{a: 1}, {b: 2}]}).sort({c: 1});
+for (var i = 7; i < 11; i++) {
+ assert.eq(i, cursor.next()["c"]);
+}
+assert(!cursor.hasNext());
-t.ensureIndex( {a:1,b:1} );
-assert.eq.automsg( "2", "t.find( {$or:[{a:{$in:[1,2]},b:5}, {a:2,b:6}]} )" +
- ".hint({a:1,b:1}).explain().clauses.length" );
-assert.eq.automsg( "2", "t.find( {$or:[{a:{$gt:1,$lte:2},b:5}, {a:2,b:6}]} )" +
- ".hint({a:1,b:1}).explain().clauses.length" );
-assert.eq.automsg( "2", "t.find( {$or:[{a:{$gt:1,$lte:3},b:5}, {a:2,b:6}]} )" +
- ".hint({a:1,b:1}).explain().clauses.length" );
-assert.eq.automsg( "null", "t.find( {$or:[{a:{$in:[1,2]}}, {a:2}]} )" +
- ".hint({a:1,b:1}).explain().clauses" );
+// SERVER-13715
+assert.eq(4, t.find({$or: [{a: 1}, {b: 2}]}).sort({a: 1}).itcount());
diff --git a/jstests/core/or9.js b/jstests/core/or9.js
index 7318a532af4..c76c5407b6f 100644
--- a/jstests/core/or9.js
+++ b/jstests/core/or9.js
@@ -7,49 +7,43 @@ t.ensureIndex( {a:1,b:1} );
t.save( {a:2,b:2} );
-function check( a, b, q ) {
+function check( a, q ) {
count = a;
- clauses = b;
query = q;
assert.eq.automsg( "count", "t.count( query )" );
- if ( clauses == 1 ) {
- assert.eq.automsg( "undefined", "t.find( query ).explain().clauses" );
- } else {
- assert.eq.automsg( "clauses", "t.find( query ).hint({a:1, b:1}).explain().clauses.length" );
- }
}
// SERVER-12594: there are two clauses in this case, because we do
// not yet collapse OR of ANDs to a single ixscan.
-check( 1, 2, { $or: [ { a: { $gte:1,$lte:3 } }, { a: 2 } ] } );
+check( 1, { $or: [ { a: { $gte:1,$lte:3 } }, { a: 2 } ] } );
-check( 1, 2, { $or: [ { a: { $gt:2,$lte:3 } }, { a: 2 } ] } );
+check( 1, { $or: [ { a: { $gt:2,$lte:3 } }, { a: 2 } ] } );
-check( 1, 1, { $or: [ { b: { $gte:1,$lte:3 } }, { b: 2 } ] } );
-check( 1, 1, { $or: [ { b: { $gte:2,$lte:3 } }, { b: 2 } ] } );
-check( 1, 1, { $or: [ { b: { $gt:2,$lte:3 } }, { b: 2 } ] } );
+check( 1, { $or: [ { b: { $gte:1,$lte:3 } }, { b: 2 } ] } );
+check( 1, { $or: [ { b: { $gte:2,$lte:3 } }, { b: 2 } ] } );
+check( 1, { $or: [ { b: { $gt:2,$lte:3 } }, { b: 2 } ] } );
// SERVER-12594: there are two clauses in this case, because we do
// not yet collapse OR of ANDs to a single ixscan.
-check( 1, 2, { $or: [ { a: { $gte:1,$lte:3 } }, { a: 2, b: 2 } ] } );
+check( 1, { $or: [ { a: { $gte:1,$lte:3 } }, { a: 2, b: 2 } ] } );
-check( 1, 2, { $or: [ { a: { $gte:1,$lte:3 }, b:3 }, { a: 2 } ] } );
+check( 1, { $or: [ { a: { $gte:1,$lte:3 }, b:3 }, { a: 2 } ] } );
-check( 1, 1, { $or: [ { b: { $gte:1,$lte:3 } }, { b: 2, a: 2 } ] } );
+check( 1, { $or: [ { b: { $gte:1,$lte:3 } }, { b: 2, a: 2 } ] } );
-check( 1, 1, { $or: [ { b: { $gte:1,$lte:3 }, a:3 }, { b: 2 } ] } );
+check( 1, { $or: [ { b: { $gte:1,$lte:3 }, a:3 }, { b: 2 } ] } );
-check( 1, 2, { $or: [ { a: { $gte:1,$lte:3 }, b: 3 }, { a: 2, b: 2 } ] } );
-check( 1, 2, { $or: [ { a: { $gte:2,$lte:3 }, b: 3 }, { a: 2, b: 2 } ] } );
+check( 1, { $or: [ { a: { $gte:1,$lte:3 }, b: 3 }, { a: 2, b: 2 } ] } );
+check( 1, { $or: [ { a: { $gte:2,$lte:3 }, b: 3 }, { a: 2, b: 2 } ] } );
// SERVER-12594: there are two clauses in this case, because we do
// not yet collapse OR of ANDs to a single ixscan.
-check( 1, 2, { $or: [ { a: { $gte:1,$lte:3 }, b: 2 }, { a: 2, b: 2 } ] } );
+check( 1, { $or: [ { a: { $gte:1,$lte:3 }, b: 2 }, { a: 2, b: 2 } ] } );
-check( 1, 2, { $or: [ { b: { $gte:1,$lte:3 }, a: 3 }, { a: 2, b: 2 } ] } );
-check( 1, 2, { $or: [ { b: { $gte:2,$lte:3 }, a: 3 }, { a: 2, b: 2 } ] } );
+check( 1, { $or: [ { b: { $gte:1,$lte:3 }, a: 3 }, { a: 2, b: 2 } ] } );
+check( 1, { $or: [ { b: { $gte:2,$lte:3 }, a: 3 }, { a: 2, b: 2 } ] } );
// SERVER-12594: there are two clauses in this case, because we do
// not yet collapse OR of ANDs to a single ixscan.
-check( 1, 2, { $or: [ { b: { $gte:1,$lte:3 }, a: 2 }, { a: 2, b: 2 } ] } );
+check( 1, { $or: [ { b: { $gte:1,$lte:3 }, a: 2 }, { a: 2, b: 2 } ] } );
t.remove({});
@@ -58,7 +52,7 @@ t.save( {a:5,b:1} );
// SERVER-12594: there are two clauses in the case below, because we do
// not yet collapse OR of ANDs to a single ixscan.
-check( 2, 2, { $or: [ { a: { $in:[1,5] }, b: { $in:[1,5] } }, { a: { $in:[1,5] }, b: { $in:[1,5] } } ] } );
+check( 2, { $or: [ { a: { $in:[1,5] }, b: { $in:[1,5] } }, { a: { $in:[1,5] }, b: { $in:[1,5] } } ] } );
-check( 2, 2, { $or: [ { a: { $in:[1] }, b: { $in:[1,5] } }, { a: { $in:[1,5] }, b: { $in:[1,5] } } ] } );
-check( 2, 2, { $or: [ { a: { $in:[1] }, b: { $in:[1] } }, { a: { $in:[1,5] }, b: { $in:[1,5] } } ] } );
+check( 2, { $or: [ { a: { $in:[1] }, b: { $in:[1,5] } }, { a: { $in:[1,5] }, b: { $in:[1,5] } } ] } );
+check( 2, { $or: [ { a: { $in:[1] }, b: { $in:[1] } }, { a: { $in:[1,5] }, b: { $in:[1,5] } } ] } );
diff --git a/jstests/core/orf.js b/jstests/core/orf.js
index 720b5b31f0c..bae8c61f89a 100644
--- a/jstests/core/orf.js
+++ b/jstests/core/orf.js
@@ -15,13 +15,8 @@ a.forEach( function( x ) { t.save( x ); } );
// a series of _id index point intervals.
explain = t.find( {$or:a} ).hint( {_id: 1} ).explain( true );
printjson( explain );
-assert.eq( 'BtreeCursor _id_', explain.cursor, 'cursor' );
-assert.eq( expectBounds, explain.indexBounds['_id'], 'indexBounds' );
-assert.eq( 200, explain.n, 'n' );
-assert.eq( 200, explain.nscanned, 'nscanned' );
-assert.eq( 200, explain.nscannedObjects, 'nscannedObjects' );
-assert.eq( false, explain.isMultiKey, 'isMultiKey' );
-assert.eq( false, explain.scanAndOrder, 'scanAndOrder' );
-assert.eq( false, explain.indexOnly, 'indexOnly' );
+assert.eq( 200, explain.executionStats.nReturned, 'n' );
+assert.eq( 200, explain.executionStats.totalKeysExamined, 'keys examined' );
+assert.eq( 200, explain.executionStats.totalDocsExamined, 'docs examined' );
assert.eq( 200, t.count( {$or:a} ) );
diff --git a/jstests/core/orl.js b/jstests/core/orl.js
index 2726975d5aa..f76409d0d04 100644
--- a/jstests/core/orl.js
+++ b/jstests/core/orl.js
@@ -10,4 +10,4 @@ t.save( {a:{b:[1,2]}} );
// SERVER-3445
if ( 0 ) {
assert( !t.find( {$or:[{'a.b':2,'a.c':3},{'a.b':2,'a.c':4}]} ).explain().clauses );
-} \ No newline at end of file
+}
diff --git a/jstests/core/orq.js b/jstests/core/orq.js
deleted file mode 100644
index 2a8263e298f..00000000000
--- a/jstests/core/orq.js
+++ /dev/null
@@ -1,22 +0,0 @@
-// A few rooted $or cases.
-
-var t = db.jstests_orq;
-t.drop();
-
-t.ensureIndex({a: 1, c: 1});
-t.ensureIndex({b: 1, c: 1});
-
-t.save({a: 1, c: 9});
-t.save({a: 1, c: 10});
-t.save({b: 2, c: 8});
-t.save({b: 2, c: 7});
-
-// This can be answered using a merge sort. See SERVER-13715.
-var cursor = t.find({$or: [{a: 1}, {b: 2}]}).sort({c: 1});
-for (var i = 7; i < 11; i++) {
- assert.eq(i, cursor.next()["c"]);
-}
-assert(!cursor.hasNext());
-
-// SERVER-13715
-assert.eq(4, t.find({$or: [{a: 1}, {b: 2}]}).sort({a: 1}).itcount());
diff --git a/jstests/core/profile4.js b/jstests/core/profile4.js
index ca27fd2e3ca..fa3fdaecc32 100644
--- a/jstests/core/profile4.js
+++ b/jstests/core/profile4.js
@@ -75,7 +75,7 @@ try {
t.find().skip( 1 ).limit( 4 ).itcount();
checkLastOp( [ [ "ntoreturn", 4 ],
[ "ntoskip", 1 ],
- [ "nscanned", 3 ],
+ [ "nscannedObjects", 3 ],
[ "nreturned", 2 ] ] );
t.find().batchSize( 2 ).next();
diff --git a/jstests/core/proj_key1.js b/jstests/core/proj_key1.js
index ad944f71827..c0720ed0fcb 100644
--- a/jstests/core/proj_key1.js
+++ b/jstests/core/proj_key1.js
@@ -9,20 +9,9 @@ for ( i=0; i<10; i++ ){
t.insert( { a : i , b : i } );
}
-assert( ! t.find( {} , { a : 1 } ).explain().indexOnly , "A1" )
-
t.ensureIndex( { a : 1 } )
-assert( t.find( { a : { $gte : 0 } } , { a : 1 , _id : 0 } ).explain().indexOnly , "A2" )
-
-assert( ! t.find( { a : { $gte : 0 } } , { a : 1 } ).explain().indexOnly , "A3" ) // because id _id
-
// assert( t.find( {} , { a : 1 , _id : 0 } ).explain().indexOnly , "A4" ); // TODO: need to modify query optimier SERVER-2109
assert.eq( as , t.find( { a : { $gte : 0 } } , { a : 1 , _id : 0 } ).toArray() , "B1" )
assert.eq( as , t.find( { a : { $gte : 0 } } , { a : 1 , _id : 0 } ).batchSize(2).toArray() , "B1" )
-
-
-
-
-
diff --git a/jstests/core/regex3.js b/jstests/core/regex3.js
index 5ac8fab4c40..418492ce7f5 100644
--- a/jstests/core/regex3.js
+++ b/jstests/core/regex3.js
@@ -8,10 +8,12 @@ t.save( { name : "bob" } );
t.save( { name : "aaron" } );
assert.eq( 2 , t.find( { name : /^e.*/ } ).itcount() , "no index count" );
-assert.eq( 4 , t.find( { name : /^e.*/ } ).explain().nscanned , "no index explain" );
+assert.eq( 4 , t.find( { name : /^e.*/ } ).explain().executionStats.totalDocsExamined ,
+ "no index explain" );
t.ensureIndex( { name : 1 } );
assert.eq( 2 , t.find( { name : /^e.*/ } ).itcount() , "index count" );
-assert.eq( 2 , t.find( { name : /^e.*/ } ).explain().nscanned , "index explain" ); // SERVER-239
+assert.eq( 2 , t.find( { name : /^e.*/ } ).explain().executionStats.totalKeysExamined ,
+ "index explain" ); // SERVER-239
t.drop();
@@ -23,7 +25,8 @@ t.save( { name : "c" } );
assert.eq( 3 , t.find( { name : /^aa*/ } ).itcount() , "B ni" );
t.ensureIndex( { name : 1 } );
assert.eq( 3 , t.find( { name : /^aa*/ } ).itcount() , "B i 1" );
-assert.eq( 4 , t.find( { name : /^aa*/ } ).explain().nscanned , "B i 1 e" );
+assert.eq( 4 , t.find( { name : /^aa*/ } ).explain().executionStats.totalKeysExamined ,
+ "B i 1 e" );
assert.eq( 2 , t.find( { name : /^a[ab]/ } ).itcount() , "B i 2" );
assert.eq( 2 , t.find( { name : /^a[bc]/ } ).itcount() , "B i 3" );
diff --git a/jstests/core/regex4.js b/jstests/core/regex4.js
index fc26d691c91..e95daeafe7c 100644
--- a/jstests/core/regex4.js
+++ b/jstests/core/regex4.js
@@ -8,11 +8,13 @@ t.save( { name : "bob" } );
t.save( { name : "aaron" } );
assert.eq( 2 , t.find( { name : /^e.*/ } ).count() , "no index count" );
-assert.eq( 4 , t.find( { name : /^e.*/ } ).explain().nscanned , "no index explain" );
+assert.eq( 4 , t.find( { name : /^e.*/ } ).explain().executionStats.totalDocsExamined ,
+ "no index explain" );
//assert.eq( 2 , t.find( { name : { $ne : /^e.*/ } } ).count() , "no index count ne" ); // SERVER-251
t.ensureIndex( { name : 1 } );
assert.eq( 2 , t.find( { name : /^e.*/ } ).count() , "index count" );
-assert.eq( 2 , t.find( { name : /^e.*/ } ).explain().nscanned , "index explain" ); // SERVER-239
+assert.eq( 2 , t.find( { name : /^e.*/ } ).explain().executionStats.totalKeysExamined ,
+ "index explain" ); // SERVER-239
//assert.eq( 2 , t.find( { name : { $ne : /^e.*/ } } ).count() , "index count ne" ); // SERVER-251
diff --git a/jstests/core/regex5.js b/jstests/core/regex5.js
index 9f2549d7146..fab3eb9973c 100644
--- a/jstests/core/regex5.js
+++ b/jstests/core/regex5.js
@@ -36,18 +36,6 @@ t.ensureIndex( {x:1} );
print( "now indexed" );
doit();
-// check bound unions SERVER-322
-assert.eq( {
- x:[[1,1],
- [2.5,2.5],
- ["a","a"],
- ["b","e"],
- [/^b/,/^b/],
- [/^c/,/^c/],
- [/^d/,/^d/]]
- },
- t.find( { x : { $in: [ 1, 2.5, "a", "b", /^b/, /^c/, /^d/ ] } } ).explain().indexBounds );
-
// SERVER-505
assert.eq( 0, t.find( { x : { $all: [ "a", /^a/ ] } } ).itcount());
assert.eq( 2, t.find( { x : { $all: [ /^a/ ] } } ).itcount());
diff --git a/jstests/core/regex6.js b/jstests/core/regex6.js
index 54143248398..9ffa7499deb 100644
--- a/jstests/core/regex6.js
+++ b/jstests/core/regex6.js
@@ -11,19 +11,31 @@ t.save( { name : "[with]some?symbols" } );
t.ensureIndex( { name : 1 } );
assert.eq( 0 , t.find( { name : /^\// } ).count() , "index count" );
-assert.eq( 1 , t.find( { name : /^\// } ).explain().nscanned , "index explain 1" );
-assert.eq( 0 , t.find( { name : /^é/ } ).explain().nscanned , "index explain 2" );
-assert.eq( 0 , t.find( { name : /^\é/ } ).explain().nscanned , "index explain 3" );
-assert.eq( 1 , t.find( { name : /^\./ } ).explain().nscanned , "index explain 4" );
-assert.eq( 5 , t.find( { name : /^./ } ).explain().nscanned , "index explain 5" );
+assert.eq( 1 , t.find( { name : /^\// } ).explain().executionStats.totalKeysExamined ,
+ "index explain 1" );
+assert.eq( 0 , t.find( { name : /^é/ } ).explain().executionStats.totalKeysExamined ,
+ "index explain 2" );
+assert.eq( 0 , t.find( { name : /^\é/ } ).explain().executionStats.totalKeysExamined ,
+ "index explain 3" );
+assert.eq( 1 , t.find( { name : /^\./ } ).explain().executionStats.totalKeysExamined ,
+ "index explain 4" );
+assert.eq( 5 , t.find( { name : /^./ } ).explain().executionStats.totalKeysExamined ,
+ "index explain 5" );
// SERVER-2862
assert.eq( 0 , t.find( { name : /^\Qblah\E/ } ).count() , "index explain 6" );
-assert.eq( 1 , t.find( { name : /^\Qblah\E/ } ).explain().nscanned , "index explain 6" );
-assert.eq( 1 , t.find( { name : /^blah/ } ).explain().nscanned , "index explain 6" );
-assert.eq( 1 , t.find( { name : /^\Q[\Ewi\Qth]some?s\Eym/ } ).count() , "index explain 6" );
-assert.eq( 2 , t.find( { name : /^\Q[\Ewi\Qth]some?s\Eym/ } ).explain().nscanned , "index explain 6" );
-assert.eq( 2 , t.find( { name : /^bob/ } ).explain().nscanned , "index explain 6" ); // proof nscanned == count+1
+assert.eq( 1 , t.find( { name : /^\Qblah\E/ } ).explain().executionStats.totalKeysExamined ,
+ "index explain 6" );
+assert.eq( 1 , t.find( { name : /^blah/ } ).explain().executionStats.totalKeysExamined ,
+ "index explain 6" );
+assert.eq( 1 , t.find( { name : /^\Q[\Ewi\Qth]some?s\Eym/ } ).count() , "index count 2" );
+assert.eq( 2 , t.find( { name : /^\Q[\Ewi\Qth]some?s\Eym/ } ).explain()
+ .executionStats.totalKeysExamined ,
+ "index explain 6" );
+assert.eq( 2 , t.find( { name : /^bob/ } ).explain().executionStats.totalKeysExamined ,
+ "index explain 6" ); // proof executionStats.totalKeysExamined == count+1
-assert.eq( 1, t.find( { name : { $regex : "^e", $gte: "emily" } } ).explain().nscanned , "ie7" );
-assert.eq( 1, t.find( { name : { $gt : "a", $regex: "^emily" } } ).explain().nscanned , "ie7" );
+assert.eq( 1, t.find( { name : { $regex : "^e", $gte: "emily" } } ).explain()
+ .executionStats.totalKeysExamined , "ie7" );
+assert.eq( 1, t.find( { name : { $gt : "a", $regex: "^emily" } } ).explain()
+ .executionStats.totalKeysExamined , "ie7" );
diff --git a/jstests/core/rename.js b/jstests/core/rename.js
index a8eb6c911c4..9c3ce2c18b2 100644
--- a/jstests/core/rename.js
+++ b/jstests/core/rename.js
@@ -24,7 +24,6 @@ assert( db.getCollectionNames().indexOf( "jstests_rename_b" ) >= 0 );
assert( db.getCollectionNames().indexOf( "jstests_rename_a" ) < 0 );
assert.eq( 3, db.jstests_rename_b.getIndexes().length );
assert.eq( 0, db.jstests_rename_a.getIndexes().length );
-assert( b.find( {a:1} ).explain().cursor.match( /^BtreeCursor/ ) );
// now try renaming a capped collection
diff --git a/jstests/core/rename7.js b/jstests/core/rename7.js
index df4267a7583..ed64d2b8ec7 100644
--- a/jstests/core/rename7.js
+++ b/jstests/core/rename7.js
@@ -27,7 +27,6 @@ assert( db_a.getCollectionNames().indexOf( "rename7" ) < 0 );
assert.eq( 3, b.find().count() );
assert( db_b.getCollectionNames().indexOf( "rename7" ) >= 0 );
-assert( b.find( {a: 1} ).explain().cursor.match( /^BtreeCursor/ ) );
a.drop();
b.drop();
diff --git a/jstests/core/repair.js b/jstests/core/repair.js
deleted file mode 100644
index 52dcf3ceafc..00000000000
--- a/jstests/core/repair.js
+++ /dev/null
@@ -1,30 +0,0 @@
-mydb = db.getSisterDB( "repair_test1" )
-
-t = mydb.jstests_repair;
-t.drop();
-
-t.save( { i:1 } );
-doc = t.findOne();
-t.ensureIndex( { i : 1 } );
-assert.eq( 2, t.getIndexes().length );
-ex = t.find( { i : 1 } ).explain();
-
-assert.commandWorked( mydb.repairDatabase() );
-
-v = t.validate();
-assert( v.valid , "not valid! " + tojson( v ) );
-
-assert.eq( 1, t.count() );
-assert.eq( doc, t.findOne() );
-
-assert.eq( 2, t.getIndexes().length, tojson( t.getIndexes() ) );
-var explainAfterRepair = t.find( { i : 1 } ).explain();
-
-// Remove "millis" and "nYields" fields. We're interested in the other fields.
-// It's not relevant for both explain() operations to have
-// the same execution time.
-delete ex[ "millis" ];
-delete ex[ "nYields" ];
-delete explainAfterRepair[ "millis" ];
-delete explainAfterRepair[ "nYields" ];
-assert.eq( ex, explainAfterRepair );
diff --git a/jstests/core/sortg.js b/jstests/core/sortg.js
index bde4ad70061..52b5129f870 100644
--- a/jstests/core/sortg.js
+++ b/jstests/core/sortg.js
@@ -19,16 +19,11 @@ function memoryException( sortSpec, querySpec ) {
t.find( querySpec ).sort( sortSpec ).batchSize( 1000 ).itcount()
} );
assert( ex.toString().match( /sort/ ) );
- assert.throws( function() {
- t.find( querySpec ).sort( sortSpec ).batchSize( 1000 ).explain( true )
- } );
- assert( ex.toString().match( /sort/ ) );
}
function noMemoryException( sortSpec, querySpec ) {
querySpec = querySpec || {};
t.find( querySpec ).sort( sortSpec ).batchSize( 1000 ).itcount();
- t.find( querySpec ).sort( sortSpec ).batchSize( 1000 ).explain( true );
}
// Unindexed sorts.
diff --git a/jstests/core/sorth.js b/jstests/core/sorth.js
index 1072975a3ec..e520ee50454 100644
--- a/jstests/core/sorth.js
+++ b/jstests/core/sorth.js
@@ -33,8 +33,8 @@ function find( query ) {
function checkMatches( expectedMatch, query ) {
result = find( query ).toArray();
assertMatches( expectedMatch, result );
- explain = find( query ).explain();
- assert.eq( expectedMatch.length || 1, explain.n );
+ var count = find( query ).itcount();
+ assert.eq( expectedMatch.length || 1, count );
}
/** Reset data, index, and _sort and _hint globals. */
diff --git a/jstests/core/sortk.js b/jstests/core/sortk.js
index 3895a34c3ac..20ef08f7cca 100644
--- a/jstests/core/sortk.js
+++ b/jstests/core/sortk.js
@@ -40,7 +40,7 @@ assert.eq( 1, simpleQueryWithLimit( -1 ).skip( 1 )[ 0 ].b );
// No limit is applied.
assert.eq( 6, simpleQueryWithLimit( 0 ).itcount() );
-assert.eq( 6, simpleQueryWithLimit( 0 ).explain().nscanned );
+assert.eq( 6, simpleQueryWithLimit( 0 ).explain().executionStats.totalKeysExamined );
assert.eq( 5, simpleQueryWithLimit( 0 ).skip( 1 ).itcount() );
// The query has additional constriants, preventing limit optimization.
@@ -55,7 +55,7 @@ assert.eq( 0, simpleQuery( {}, { a:-1, b:1 } ).limit( -1 )[ 0 ].b );
// Without a hint, multiple cursors are attempted.
assert.eq( 0, t.find( { a:{ $in:[ 1, 2 ] } } ).sort( { b:1 } ).limit( -1 )[ 0 ].b );
explain = t.find( { a:{ $in:[ 1, 2 ] } } ).sort( { b:1 } ).limit( -1 ).explain( true );
-assert.eq( 1, explain.n );
+assert.eq( 1, explain.executionStats.nReturned );
// The expected first result now comes from the first interval.
t.remove( { b:0 } );
diff --git a/jstests/core/type1.js b/jstests/core/type1.js
index 518e36728e7..7f101a2c027 100644
--- a/jstests/core/type1.js
+++ b/jstests/core/type1.js
@@ -11,7 +11,6 @@ assert.eq( 4 , t.find().count() , "A1" );
assert.eq( 1 , t.find( { x : { $type : 1 } } ).count() , "A2" );
assert.eq( 3 , t.find( { x : { $type : 2 } } ).count() , "A3" );
assert.eq( 0 , t.find( { x : { $type : 3 } } ).count() , "A4" );
-assert.eq( 4 , t.find( { x : { $type : 1 } } ).explain().nscanned , "A5" );
t.ensureIndex( { x : 1 } );
@@ -20,5 +19,4 @@ assert.eq( 4 , t.find().count() , "B1" );
assert.eq( 1 , t.find( { x : { $type : 1 } } ).count() , "B2" );
assert.eq( 3 , t.find( { x : { $type : 2 } } ).count() , "B3" );
assert.eq( 0 , t.find( { x : { $type : 3 } } ).count() , "B4" );
-assert.eq( 1 , t.find( { x : { $type : 1 } } ).explain().nscanned , "B5" );
-assert.eq( 1 , t.find( { x : { $regex:"f", $type : 2 } } ).count() , "B3" ); \ No newline at end of file
+assert.eq( 1 , t.find( { x : { $regex:"f", $type : 2 } } ).count() , "B3" );
diff --git a/jstests/core/type3.js b/jstests/core/type3.js
index 82a8b8ae7fc..59eb89f642e 100644
--- a/jstests/core/type3.js
+++ b/jstests/core/type3.js
@@ -21,13 +21,11 @@ assert.eq( 1, t.find( {a:{$type:11}} ).hint( {a:1} ).itcount() );
// Type jstNULL
t.remove({});
-assert.eq( [[null,null]], t.find( {a:{$type:10}} ).hint( {a:1} ).explain().indexBounds.a );
+t.save( {a:null} );
+assert.eq( 1, t.find( {a:{$type:10}} ).hint( {a:1} ).itcount() );
// Type Undefined
t.remove({});
-// 'null' is the client friendly version of undefined.
-assert.eq( [[null,null]], t.find( {a:{$type:6}} ).hint( {a:1} ).explain().indexBounds.a );
-
t.save( {a:undefined} );
assert.eq( 1, t.find( {a:{$type:6}} ).hint( {a:1} ).itcount() );
@@ -35,12 +33,6 @@ assert.eq( 1, t.find( {a:{$type:6}} ).hint( {a:1} ).itcount() );
t.save( {a:null} );
assert.eq( 1, t.find( {a:{$type:6}} ).hint( {a:1} ).itcount() );
-t.remove({});
-// Type MinKey
-assert.eq( [[{$minElement:1},{$minElement:1}]], t.find( {a:{$type:-1}} ).hint( {a:1} ).explain().indexBounds.a );
-// Type MaxKey
-assert.eq( [[{$maxElement:1},{$maxElement:1}]], t.find( {a:{$type:127}} ).hint( {a:1} ).explain().indexBounds.a );
-
// Type Timestamp
t.remove({});
t.save( {a:new Timestamp()} );
diff --git a/jstests/core/useindexonobjgtlt.js b/jstests/core/useindexonobjgtlt.js
index 06e94a812f6..e407ad7cf0a 100755
--- a/jstests/core/useindexonobjgtlt.js
+++ b/jstests/core/useindexonobjgtlt.js
@@ -5,11 +5,10 @@ t.ensureIndex( { metro : 1 } )
assert( db.factories.find().count() )
-assert( db.factories.find( { metro: { city: "New York", state: "NY" } } ).count() )
-
-assert( db.factories.find( { metro: { city: "New York", state: "NY" } } ).explain().cursor == "BtreeCursor metro_1" )
-
-assert( db.factories.find( { metro: { $gte : { city: "New York" } } } ).explain().cursor == "BtreeCursor metro_1" )
-
-assert( db.factories.find( { metro: { $gte : { city: "New York" } } } ).count() == 1 )
+assert.eq( 1, db.factories.find( { metro: { city: "New York", state: "NY" } } )
+ .hint({metro: 1})
+ .count() )
+assert.eq( 1, db.factories.find( { metro: { $gte : { city: "New York" } } } )
+ .hint({metro: 1})
+ .count() )
diff --git a/jstests/libs/analyze_plan.js b/jstests/libs/analyze_plan.js
new file mode 100644
index 00000000000..9c2ebffd890
--- /dev/null
+++ b/jstests/libs/analyze_plan.js
@@ -0,0 +1,80 @@
+// Contains helpers for checking, based on the explain output, properties of a
+// plan. For instance, there are helpers for checking whether a plan is a collection
+// scan or whether the plan is covered (index only).
+
+/**
+ * Given the root stage of explain's BSON representation of a query plan ('root'),
+ * returns true if the plan has a stage called 'stage'.
+ */
+function planHasStage(root, stage) {
+ if (root.stage === stage) {
+ return true;
+ }
+ else if ("inputStage" in root) {
+ return planHasStage(root.inputStage, stage);
+ }
+ else if ("inputStages" in root) {
+ for (var i = 0; i < root.inputStages.length; i++) {
+ if (planHasStage(root.inputStages[i], stage)) {
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+/**
+ * A query is covered iff it does *not* have a FETCH stage or a COLLSCAN.
+ *
+ * Given the root stage of explain's BSON representation of a query plan ('root'),
+ * returns true if the plan is index only. Otherwise returns false.
+ */
+function isIndexOnly(root) {
+ return !planHasStage(root, "FETCH") && !planHasStage(root, "COLLSCAN");
+}
+
+/**
+ * Returns true if the BSON representation of a plan rooted at 'root' is using
+ * an index scan, and false otherwise.
+ */
+function isIxscan(root) {
+ return planHasStage(root, "IXSCAN");
+}
+
+/**
+ * Returns true if the BSON representation of a plan rooted at 'root' is using
+ * the idhack fast path, and false otherwise.
+ */
+function isIdhack(root) {
+ return planHasStage(root, "IDHACK");
+}
+
+/**
+ * Returns true if the BSON representation of a plan rooted at 'root' is using
+ * a collection scan, and false otherwise.
+ */
+function isCollscan(root) {
+ return planHasStage(root, "COLLSCAN");
+}
+
+/**
+ * Get the number of chunk skips for the BSON exec stats tree rooted at 'root'.
+ */
+function getChunkSkips(root) {
+ if (root.stage === "SHARDING_FILTER") {
+ return root.chunkSkips;
+ }
+ else if ("inputStage" in root) {
+ return getChunkSkips(root.inputStage);
+ }
+ else if ("inputStages" in root) {
+ var skips = 0;
+ for (var i = 0; i < root.inputStages.length; i++) {
+ skips += getChunkSkips(root.inputStages[0]);
+ }
+ return skips;
+ }
+
+ return 0;
+}
diff --git a/jstests/noPassthrough/indexbg1.js b/jstests/noPassthrough/indexbg1.js
index 7118fc64a6c..640e703d1aa 100644
--- a/jstests/noPassthrough/indexbg1.js
+++ b/jstests/noPassthrough/indexbg1.js
@@ -61,8 +61,8 @@ while( 1 ) { // if indexing finishes before we can run checks, try indexing w/ m
}
var ex = t.find( {i:100} ).limit(-1).explain()
printjson(ex)
- assert.eq( "BasicCursor", ex.cursor, "used btree cursor" );
- assert( ex.nscanned < 1000 , "took too long to find 100: " + tojson( ex ) );
+ assert( ex.executionStats.totalKeysExamined < 1000 ,
+ "took too long to find 100: " + tojson( ex ) );
assert.writeOK(t.remove({ i: 40 }, true )); // table scan
@@ -101,7 +101,6 @@ print("our tests done, waiting for parallel to finish");
waitParallel();
print("finished");
-assert.eq( "BtreeCursor i_1", t.find( {i:100} ).explain().cursor );
assert.eq( 1, t.count( {i:-10} ) );
assert.eq( 1, t.count( {i:-2} ) );
assert.eq( 1, t.count( {i:-50} ) );
diff --git a/jstests/noPassthroughWithMongod/clonecollection.js b/jstests/noPassthroughWithMongod/clonecollection.js
index c1f11652ce3..e352554e0ac 100644
--- a/jstests/noPassthroughWithMongod/clonecollection.js
+++ b/jstests/noPassthroughWithMongod/clonecollection.js
@@ -35,7 +35,7 @@ assert.eq( 2, t.system.indexes.find().count(), "expected index missing" );
// Verify index works
x = t.a.find( { i: 50 } ).hint( { i: 1 } ).explain()
printjson( x )
-assert.eq( 50, x.indexBounds.i[0][0] , "verify 1" );
+assert.eq( 1, x.executionStats.nReturned , "verify 1" );
assert.eq( 1, t.a.find( { i: 50 } ).hint( { i: 1 } ).toArray().length, "match length did not match expected" );
// Check that capped-ness is preserved on clone
diff --git a/jstests/noPassthroughWithMongod/index_check9.js b/jstests/noPassthroughWithMongod/index_check9.js
index fd1b1d5eaa1..51840645fe1 100644
--- a/jstests/noPassthroughWithMongod/index_check9.js
+++ b/jstests/noPassthroughWithMongod/index_check9.js
@@ -97,13 +97,10 @@ function check() {
}
s = sort();
c1 = t.find( spec, { _id:null } ).sort( s ).hint( idx ).toArray();
- c2 = t.find( spec ).sort( s ).explain().nscanned;
- c3 = t.find( spec, { _id:null } ).sort( s ).hint( {$natural:1} ).toArray();
+ c2 = t.find( spec, { _id:null } ).sort( s ).hint( {$natural:1} ).toArray();
count = t.count( spec );
- // assert.eq( c1, c3, "spec: " + tojson( spec ) + ", sort: " + tojson( s ) );
- // assert.eq( c1.length, c2 );
- assert.eq( c1, c3 );
- assert.eq( c3.length, count );
+ assert.eq( c1, c2 );
+ assert.eq( c2.length, count );
}
var bulk = t.initializeUnorderedBulkOp();
diff --git a/jstests/noPassthroughWithMongod/index_multi.js b/jstests/noPassthroughWithMongod/index_multi.js
index 2f541c9641d..1e6b349dd31 100644
--- a/jstests/noPassthroughWithMongod/index_multi.js
+++ b/jstests/noPassthroughWithMongod/index_multi.js
@@ -119,7 +119,7 @@ print("Make sure we end up with 64 indexes");
for (var i in specs) {
print("trying to hint on "+tojson(specs[i]));
var explain = coll.find().hint(specs[i]).explain();
- assert.eq(multikey[i], explain.isMultiKey, tojson(explain));
+ assert("queryPlanner" in explain, tojson(explain));
}
print("SUCCESS!");
diff --git a/jstests/sharding/auth_repl.js b/jstests/sharding/auth_repl.js
index f4deba6f195..568cbc4a5ac 100644
--- a/jstests/sharding/auth_repl.js
+++ b/jstests/sharding/auth_repl.js
@@ -102,9 +102,9 @@ assert.eq(1, testDB.auth('a', 'a'));
// Find out the current cached secondary in the repl connection
conn.setSlaveOk(true);
-var secHost = testColl.find().readPref('secondary').explain().server;
+var serverInfo = testColl.find().readPref('secondary').explain().serverInfo;
var secNodeIdx = -1;
-var secPortStr = secHost.split(':')[1];
+var secPortStr = serverInfo.port.toString();
for (var x = 0; x < nodeCount; x++) {
var nodePortStr = replTest.nodes[x].host.split(':')[1];
diff --git a/jstests/sharding/auth_slaveok_routing.js b/jstests/sharding/auth_slaveok_routing.js
index f89a6da086a..a6552590351 100644
--- a/jstests/sharding/auth_slaveok_routing.js
+++ b/jstests/sharding/auth_slaveok_routing.js
@@ -13,8 +13,8 @@
* @return {boolean} true if query was routed to a secondary node.
*/
function doesRouteToSec( coll, query ) {
- var explain = coll.find( query ).explain();
- var conn = new Mongo( explain.server );
+ var serverInfo = coll.find( query ).explain().serverInfo;
+ var conn = new Mongo( serverInfo.host + ":" + serverInfo.port.toString());
var cmdRes = conn.getDB( 'admin' ).runCommand({ isMaster: 1 });
jsTest.log('isMaster: ' + tojson(cmdRes));
diff --git a/jstests/sharding/covered_shard_key_indexes.js b/jstests/sharding/covered_shard_key_indexes.js
index 989d3a80198..22e3aebd984 100644
--- a/jstests/sharding/covered_shard_key_indexes.js
+++ b/jstests/sharding/covered_shard_key_indexes.js
@@ -3,6 +3,9 @@
// particular queries
//
+// Include helpers for analyzing explain output.
+load("jstests/libs/analyze_plan.js");
+
var options = { separateConfig : true };
var st = new ShardingTest({ shards : 1, other : options });
@@ -28,7 +31,7 @@ assert.writeOK(coll.insert({ _id : true, a : true, b : true }));
var shardExplain = function(mongosExplainDoc) {
var explainDoc = mongosExplainDoc.shards[shards[0].host][0];
printjson(explainDoc);
- return explainDoc;
+ return explainDoc.executionStats;
};
assert.commandWorked(st.shard0.adminCommand({ setParameter: 1,
@@ -37,23 +40,23 @@ assert.commandWorked(st.shard0.adminCommand({ setParameter: 1,
//
// Index without shard key query - not covered
assert.commandWorked(coll.ensureIndex({ a : 1 }));
-assert.eq(1, shardExplain(coll.find({ a : true }).explain()).nscannedObjects);
-assert.eq(1, shardExplain(coll.find({ a : true }, { _id : 1, a : 1 }).explain()).nscannedObjects);
+assert.eq(1, shardExplain(coll.find({ a : true }).explain()).totalDocsExamined);
+assert.eq(1, shardExplain(coll.find({ a : true }, { _id : 1, a : 1 }).explain()).totalDocsExamined);
//
// Index with shard key query - covered when projecting
assert.commandWorked(coll.dropIndexes());
assert.commandWorked(coll.ensureIndex({ a : 1, _id : 1 }));
-assert.eq(1, shardExplain(coll.find({ a : true }).explain()).nscannedObjects);
-assert.eq(0, shardExplain(coll.find({ a : true }, { _id : 1, a : 1 }).explain()).nscannedObjects);
+assert.eq(1, shardExplain(coll.find({ a : true }).explain()).totalDocsExamined);
+assert.eq(0, shardExplain(coll.find({ a : true }, { _id : 1, a : 1 }).explain()).totalDocsExamined);
//
// Compound index with shard key query - covered when projecting
assert.commandWorked(coll.dropIndexes());
assert.commandWorked(coll.ensureIndex({ a : 1, b : 1, _id : 1 }));
-assert.eq(1, shardExplain(coll.find({ a : true, b : true }).explain()).nscannedObjects);
+assert.eq(1, shardExplain(coll.find({ a : true, b : true }).explain()).totalDocsExamined);
assert.eq(0, shardExplain(coll.find({ a : true, b : true }, { _id : 1, a : 1 })
- .explain()).nscannedObjects);
+ .explain()).totalDocsExamined);
//
//
@@ -68,14 +71,14 @@ assert.writeOK(coll.insert({ _id : true, a : true, b : true }));
//
// Index without shard key query - not covered
assert.commandWorked(coll.ensureIndex({ a : 1 }));
-assert.eq(1, shardExplain(coll.find({ a : true }).explain()).nscannedObjects);
-assert.eq(1, shardExplain(coll.find({ a : true }, { _id : 0, a : 1 }).explain()).nscannedObjects);
+assert.eq(1, shardExplain(coll.find({ a : true }).explain()).totalDocsExamined);
+assert.eq(1, shardExplain(coll.find({ a : true }, { _id : 0, a : 1 }).explain()).totalDocsExamined);
//
// Index with shard key query - can't be covered since hashed index
assert.commandWorked(coll.dropIndex({ a : 1 }));
-assert.eq(1, shardExplain(coll.find({ _id : true }).explain()).nscannedObjects);
-assert.eq(1, shardExplain(coll.find({ _id : true }, { _id : 0 }).explain()).nscannedObjects);
+assert.eq(1, shardExplain(coll.find({ _id : true }).explain()).totalDocsExamined);
+assert.eq(1, shardExplain(coll.find({ _id : true }, { _id : 0 }).explain()).totalDocsExamined);
//
//
@@ -90,25 +93,25 @@ assert.writeOK(coll.insert({ _id : true, a : true, b : true, c : true, d : true
//
// Index without shard key query - not covered
assert.commandWorked(coll.ensureIndex({ c : 1 }));
-assert.eq(1, shardExplain(coll.find({ c : true }).explain()).nscannedObjects);
+assert.eq(1, shardExplain(coll.find({ c : true }).explain()).totalDocsExamined);
assert.eq(1, shardExplain(coll.find({ c : true }, { _id : 0, a : 1, b : 1, c : 1 })
- .explain()).nscannedObjects);
+ .explain()).totalDocsExamined);
//
// Index with shard key query - covered when projecting
assert.commandWorked(coll.dropIndex({ c : 1 }));
assert.commandWorked(coll.ensureIndex({ c : 1, b : 1, a : 1 }));
-assert.eq(1, shardExplain(coll.find({ c : true }).explain()).nscannedObjects);
+assert.eq(1, shardExplain(coll.find({ c : true }).explain()).totalDocsExamined);
assert.eq(0, shardExplain(coll.find({ c : true }, { _id : 0, a : 1, b : 1, c : 1 })
- .explain()).nscannedObjects);
+ .explain()).totalDocsExamined);
//
// Compound index with shard key query - covered when projecting
assert.commandWorked(coll.dropIndex({ c : 1, b : 1, a : 1 }));
assert.commandWorked(coll.ensureIndex({ c : 1, d : 1, a : 1, b : 1, _id : 1 }));
-assert.eq(1, shardExplain(coll.find({ c : true, d : true }).explain()).nscannedObjects);
+assert.eq(1, shardExplain(coll.find({ c : true, d : true }).explain()).totalDocsExamined);
assert.eq(0, shardExplain(coll.find({ c : true, d : true }, { a : 1, b : 1, c : 1, d : 1 })
- .explain()).nscannedObjects);
+ .explain()).totalDocsExamined);
//
//
@@ -123,17 +126,17 @@ assert.writeOK(coll.insert({ _id : true, a : { b : true }, c : true }));
//
// Index without shard key query - not covered
assert.commandWorked(coll.ensureIndex({ c : 1 }));
-assert.eq(1, shardExplain(coll.find({ c : true }).explain()).nscannedObjects);
+assert.eq(1, shardExplain(coll.find({ c : true }).explain()).totalDocsExamined);
assert.eq(1, shardExplain(coll.find({ c : true }, { _id : 0, 'a.b' : 1, c : 1 })
- .explain()).nscannedObjects);
+ .explain()).totalDocsExamined);
//
// Index with shard key query - nested query not covered even when projecting
assert.commandWorked(coll.dropIndex({ c : 1 }));
assert.commandWorked(coll.ensureIndex({ c : 1, 'a.b' : 1 }));
-assert.eq(1, shardExplain(coll.find({ c : true }).explain()).nscannedObjects);
+assert.eq(1, shardExplain(coll.find({ c : true }).explain()).totalDocsExamined);
assert.eq(1, shardExplain(coll.find({ c : true }, { _id : 0, 'a.b' : 1, c : 1 })
- .explain()).nscannedObjects);
+ .explain()).totalDocsExamined);
//
//
@@ -149,9 +152,9 @@ assert.writeOK(st.shard0.getCollection(coll.toString()).insert({ _id : "bad data
// Index without shard key query - not covered but succeeds
assert.commandWorked(coll.ensureIndex({ c : 1 }));
var explain = shardExplain(coll.find({ c : true }).explain());
-assert.eq(0, explain.n);
-assert.eq(1, explain.nscannedObjects);
-assert.eq(1, explain.nChunkSkips);
+assert.eq(0, explain.nReturned);
+assert.eq(1, explain.totalDocsExamined);
+assert.eq(1, getChunkSkips(explain.executionStages));
//
// Index with shard key query - covered and succeeds and returns result
@@ -160,9 +163,9 @@ assert.eq(1, explain.nChunkSkips);
assert.commandWorked(coll.ensureIndex({ c : 1, a : 1 }));
jsTest.log(tojson(coll.find({ c : true }, { _id : 0, a : 1, c : 1 }).toArray()));
var explain = shardExplain(coll.find({ c : true }, { _id : 0, a : 1, c : 1 }).explain());
-assert.eq(1, explain.n);
-assert.eq(0, explain.nscannedObjects);
-assert.eq(0, explain.nChunkSkips);
+assert.eq(1, explain.nReturned);
+assert.eq(0, explain.totalDocsExamined);
+assert.eq(0, getChunkSkips(explain.executionStages));
jsTest.log("DONE!");
st.stop();
diff --git a/jstests/sharding/explain1.js b/jstests/sharding/explain1.js
deleted file mode 100644
index 68e523af1aa..00000000000
--- a/jstests/sharding/explain1.js
+++ /dev/null
@@ -1,42 +0,0 @@
-// Check explain() results reported for a sharded cluster, in particular nscannedObjects.
-// SERVER-4161
-
-s = new ShardingTest( "explain1" , 2 , 2 );
-
-// Tests can be invalidated by the balancer.
-s.stopBalancer()
-
-db = s.getDB( "test" );
-
-s.adminCommand( { enablesharding : "test" } );
-s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
-
-t = db.foo;
-for( i = 0; i < 10; ++i ) {
- t.save( { a:i } );
-}
-
-// Without an index.
-explain = t.find( { a:{ $gte:5 } } ).explain();
-assert.eq( explain.cursor, 'BasicCursor' );
-assert.eq( explain.n, 5 );
-assert.eq( explain.nscanned, 10 );
-assert.eq( explain.nscannedObjects, 10 );
-
-// With an index.
-t.ensureIndex( { a:1 } );
-explain = t.find( { a:{ $gte:5 } } ).explain();
-assert.eq( explain.cursor, 'BtreeCursor a_1' );
-assert.eq( explain.n, 5 );
-assert.eq( explain.nscanned, 5 );
-assert.eq( explain.nscannedObjects, 5 );
-
-// With a covered index.
-t.ensureIndex( { a:1 } );
-explain = t.find( { a:{ $gte:5 } }, { _id:0, a:1 } ).explain();
-assert.eq( explain.cursor, 'BtreeCursor a_1' );
-assert.eq( explain.n, 5 );
-assert.eq( explain.nscanned, 5 );
-assert.eq( explain.nscannedObjects, 5 ); // Queries against sharded collections are never covered.
-
-s.stop();
diff --git a/jstests/sharding/large_skip_one_shard.js b/jstests/sharding/large_skip_one_shard.js
index 0de8615b75a..ec8f250de03 100644
--- a/jstests/sharding/large_skip_one_shard.js
+++ b/jstests/sharding/large_skip_one_shard.js
@@ -42,7 +42,7 @@ function testSelectWithSkip(coll){
}
// What we're actually testing
- assert.lt(explain.n, 90);
+ assert.lt(explain.executionStats.nReturned, 90);
}
testSelectWithSkip(collSharded);
diff --git a/jstests/sharding/limit_push.js b/jstests/sharding/limit_push.js
index b508e307eb7..91754ee9682 100644
--- a/jstests/sharding/limit_push.js
+++ b/jstests/sharding/limit_push.js
@@ -42,7 +42,8 @@ assert.eq("ParallelSort", exp.clusteredType, "Not a ParallelSort");
var k = 0;
for (var j in exp.shards) {
- assert.eq( 1 , exp.shards[j][0].n, "'n' is not 1 from shard000" + k.toString());
+ assert.eq( 1 , exp.shards[j][0].executionStats.nReturned,
+ "'n' is not 1 from shard000" + k.toString());
k++
}
diff --git a/jstests/sharding/read_pref.js b/jstests/sharding/read_pref.js
index b930678a2b5..431fe0aa6d5 100755
--- a/jstests/sharding/read_pref.js
+++ b/jstests/sharding/read_pref.js
@@ -114,20 +114,23 @@ var doTest = function(useDollarQuerySyntax) {
// Read pref should work without slaveOk
var explain = getExplain("secondary");
- assert.neq( primaryNode.name, explain.server );
+ var explainServer = explain.serverInfo.host + ":" + explain.serverInfo.port.toString();
+ assert.neq( primaryNode.name, explainServer );
conn.setSlaveOk();
// It should also work with slaveOk
explain = getExplain("secondary");
- assert.neq( primaryNode.name, explain.server );
+ explainServer = explain.serverInfo.host + ":" + explain.serverInfo.port.toString();
+ assert.neq( primaryNode.name, explainServer );
// Check that $readPreference does not influence the actual query
- assert.eq( 1, explain.n );
+ assert.eq( 1, explain.executionStats.nReturned );
explain = getExplain("secondaryPreferred", [{ s: "2" }]);
- checkTag( explain.server, { s: "2" });
- assert.eq( 1, explain.n );
+ explainServer = explain.serverInfo.host + ":" + explain.serverInfo.port.toString();
+ checkTag( explainServer, { s: "2" });
+ assert.eq( 1, explain.executionStats.nReturned );
// Cannot use tags with primaryOnly
assert.throws( function() {
@@ -136,21 +139,25 @@ var doTest = function(useDollarQuerySyntax) {
// Ok to use empty tags on primaryOnly
explain = coll.find().readPref("primary", [{}]).explain();
- assert.eq(primaryNode.name, explain.server);
+ explainServer = explain.serverInfo.host + ":" + explain.serverInfo.port.toString();
+ assert.eq(primaryNode.name, explainServer);
explain = coll.find().readPref("primary", []).explain();
- assert.eq(primaryNode.name, explain.server);
+ explainServer = explain.serverInfo.host + ":" + explain.serverInfo.port.toString();
+ assert.eq(primaryNode.name, explainServer);
// Check that mongos will try the next tag if nothing matches the first
explain = getExplain("secondary", [{ z: "3" }, { dc: "jp" }]);
- checkTag( explain.server, { dc: "jp" });
- assert.eq( 1, explain.n );
+ explainServer = explain.serverInfo.host + ":" + explain.serverInfo.port.toString();
+ checkTag( explainServer, { dc: "jp" });
+ assert.eq( 1, explain.executionStats.nReturned );
// Check that mongos will fallback to primary if none of tags given matches
explain = getExplain("secondaryPreferred", [{ z: "3" }, { dc: "ph" }]);
+ explainServer = explain.serverInfo.host + ":" + explain.serverInfo.port.toString();
// Call getPrimary again since the primary could have changed after the restart.
- assert.eq(replTest.getPrimary().name, explain.server);
- assert.eq( 1, explain.n );
+ assert.eq(replTest.getPrimary().name, explainServer);
+ assert.eq( 1, explain.executionStats.nReturned );
// Kill all members except one
var stoppedNodes = [];
@@ -171,8 +178,9 @@ var doTest = function(useDollarQuerySyntax) {
// Test to make sure that connection is ok, in prep for priOnly test
explain = getExplain("nearest");
- assert.eq( explain.server, replTest.nodes[NODES - 1].name );
- assert.eq( 1, explain.n );
+ explainServer = explain.serverInfo.host + ":" + explain.serverInfo.port.toString();
+ assert.eq( explainServer, replTest.nodes[NODES - 1].name );
+ assert.eq( 1, explain.executionStats.nReturned );
// Should assert if request with priOnly but no primary
assert.throws( function(){
diff --git a/jstests/sharding/shard2.js b/jstests/sharding/shard2.js
index 006a9340682..a229c4dc4b1 100644
--- a/jstests/sharding/shard2.js
+++ b/jstests/sharding/shard2.js
@@ -141,12 +141,14 @@ placeCheck( 7 );
db.foo.find().sort( { _id : 1 } ).forEach( function(z){ print( z._id ); } )
zzz = db.foo.find().explain();
-assert.eq( 6 , zzz.nscanned , "EX1a" )
-assert.eq( 6 , zzz.n , "EX1b" )
-
-zzz = db.foo.find().sort( { _id : 1 } ).explain();
-assert.eq( 6 , zzz.nscanned , "EX2a" )
-assert.eq( 6 , zzz.n , "EX2a" )
+assert.eq( 0 , zzz.totalKeysExamined , "EX1a" )
+assert.eq( 6 , zzz.nReturned , "EX1b" )
+assert.eq( 6 , zzz.totalDocsExamined , "EX1c" )
+
+zzz = db.foo.find().hint( { _id : 1 } ).sort( { _id : 1 } ).explain();
+assert.eq( 6 , zzz.totalKeysExamined , "EX2a" )
+assert.eq( 6 , zzz.nReturned , "EX2b" )
+assert.eq( 6 , zzz.totalDocsExamined , "EX2c" )
// getMore
assert.eq( 4 , db.foo.find().limit(-4).toArray().length , "getMore 1" );
diff --git a/jstests/sharding/shard3.js b/jstests/sharding/shard3.js
index 8b36f809686..5ecf1fb8140 100644
--- a/jstests/sharding/shard3.js
+++ b/jstests/sharding/shard3.js
@@ -1,5 +1,8 @@
// shard3.js
+// Include helpers for analyzing explain output.
+load("jstests/libs/analyze_plan.js");
+
s = new ShardingTest( "shard3" , 2 , 1 , 2 , { enableBalancer : 1 } );
s2 = s._mongos[1];
@@ -64,10 +67,16 @@ var total = doCounts( "before wrong save" )
assert.writeOK(secondary.insert( { _id : 111 , num : -3 } ));
doCounts( "after wrong save" , total , true )
e = a.find().explain();
-assert.eq( 3 , e.n , "ex1" )
-assert.eq( 4 , e.nscanned , "ex2" )
-assert.eq( 4 , e.nscannedObjects , "ex3" )
-assert.eq( 1 , e.nChunkSkips , "ex4" )
+assert.eq( 3 , e.nReturned , "ex1" )
+assert.eq( 0 , e.totalKeysExamined , "ex2" )
+assert.eq( 4 , e.totalDocsExamined , "ex3" )
+
+var chunkSkips = 0;
+for (var shard in e.shards) {
+ var theShard = e.shards[shard][0];
+ chunkSkips += getChunkSkips(theShard.executionStats.executionStages);
+}
+assert.eq( 1 , chunkSkips , "ex4" )
// SERVER-4612
// make sure idhack obeys chunks
diff --git a/src/mongo/client/parallel.cpp b/src/mongo/client/parallel.cpp
index 3b9acd836e1..9d385823081 100644
--- a/src/mongo/client/parallel.cpp
+++ b/src/mongo/client/parallel.cpp
@@ -121,11 +121,13 @@ namespace mongo {
string cursorType;
BSONObj indexBounds;
BSONObj oldPlan;
-
+
long long millis = 0;
double numExplains = 0;
- map<string,long long> counters;
+ long long nReturned = 0;
+ long long keysExamined = 0;
+ long long docsExamined = 0;
map<string,list<BSONObj> > out;
{
@@ -151,31 +153,34 @@ namespace mongo {
y.append( temp );
- BSONObjIterator k( temp );
- while ( k.more() ) {
- BSONElement z = k.next();
- if ( z.fieldName()[0] != 'n' )
- continue;
- long long& c = counters[z.fieldName()];
- c += z.numberLong();
+ if (temp.hasField("executionStats")) {
+ // Here we assume that the shard gave us back explain 2.0 style output.
+ BSONObj execStats = temp["executionStats"].Obj();
+ if (execStats.hasField("nReturned")) {
+ nReturned += execStats["nReturned"].numberLong();
+ }
+ if (execStats.hasField("totalKeysExamined")) {
+ keysExamined += execStats["totalKeysExamined"].numberLong();
+ }
+ if (execStats.hasField("totalDocsExamined")) {
+ docsExamined += execStats["totalDocsExamined"].numberLong();
+ }
}
-
- millis += temp["millis"].numberLong();
- numExplains++;
-
- if ( temp["cursor"].type() == String ) {
- if ( cursorType.size() == 0 )
- cursorType = temp["cursor"].String();
- else if ( cursorType != temp["cursor"].String() )
- cursorType = "multiple";
+ else {
+ // Here we assume that the shard gave us back explain 1.0 style output.
+ if (temp.hasField("n")) {
+ nReturned += temp["n"].numberLong();
+ }
+ if (temp.hasField("nscanned")) {
+ keysExamined += temp["nscanned"].numberLong();
+ }
+ if (temp.hasField("nscannedObjects")) {
+ docsExamined += temp["nscannedObjects"].numberLong();
+ }
}
- if ( temp["indexBounds"].type() == Object )
- indexBounds = temp["indexBounds"].Obj();
-
- if ( temp["oldPlan"].type() == Object )
- oldPlan = temp["oldPlan"].Obj();
-
+ millis += temp["executionStats"]["executionTimeMillis"].numberLong();
+ numExplains++;
}
y.done();
}
@@ -183,8 +188,10 @@ namespace mongo {
}
b.append( "cursor" , cursorType );
- for ( map<string,long long>::iterator i=counters.begin(); i!=counters.end(); ++i )
- b.appendNumber( i->first , i->second );
+
+ b.appendNumber( "nReturned" , nReturned );
+ b.appendNumber( "totalKeysExamined" , keysExamined );
+ b.appendNumber( "totalDocsExamined" , docsExamined );
b.appendNumber( "millisShardTotal" , millis );
b.append( "millisShardAvg" ,
diff --git a/src/mongo/db/commands/count.cpp b/src/mongo/db/commands/count.cpp
index 89c0de4940b..df0ec5fe62c 100644
--- a/src/mongo/db/commands/count.cpp
+++ b/src/mongo/db/commands/count.cpp
@@ -35,7 +35,6 @@
#include "mongo/db/curop.h"
#include "mongo/db/query/get_executor.h"
#include "mongo/db/query/explain.h"
-#include "mongo/db/query/type_explain.h"
#include "mongo/util/log.h"
namespace mongo {
diff --git a/src/mongo/db/exec/index_scan.cpp b/src/mongo/db/exec/index_scan.cpp
index e5a8cfe2019..9bedd8db6c7 100644
--- a/src/mongo/db/exec/index_scan.cpp
+++ b/src/mongo/db/exec/index_scan.cpp
@@ -33,7 +33,6 @@
#include "mongo/db/index/index_access_method.h"
#include "mongo/db/index/index_cursor.h"
#include "mongo/db/index/index_descriptor.h"
-#include "mongo/db/query/explain.h"
#include "mongo/util/log.h"
namespace {
@@ -381,14 +380,7 @@ namespace mongo {
if (_specificStats.indexType.empty()) {
_specificStats.indexType = "BtreeCursor"; // TODO amName;
- // TODO this can be simplified once the new explain format is
- // the default. Probably won't need to include explain.h here either.
- if (enableNewExplain) {
- _specificStats.indexBounds = _params.bounds.toBSON();
- }
- else {
- _specificStats.indexBounds = _params.bounds.toLegacyBSON();
- }
+ _specificStats.indexBounds = _params.bounds.toBSON();
_specificStats.indexBoundsVerbose = _params.bounds.toString();
_specificStats.direction = _params.direction;
diff --git a/src/mongo/db/exec/multi_plan.cpp b/src/mongo/db/exec/multi_plan.cpp
index 2dd765d9553..19c16bbc7d3 100644
--- a/src/mongo/db/exec/multi_plan.cpp
+++ b/src/mongo/db/exec/multi_plan.cpp
@@ -110,6 +110,7 @@ namespace mongo {
if (!bestPlan.results.empty()) {
*out = bestPlan.results.front();
bestPlan.results.pop_front();
+ _commonStats.advanced++;
return PlanStage::ADVANCED;
}
@@ -140,10 +141,23 @@ namespace mongo {
_backupPlanIdx = kNoSuchPlan;
}
+ // Increment stats.
+ if (PlanStage::ADVANCED == state) {
+ _commonStats.advanced++;
+ }
+ else if (PlanStage::NEED_TIME == state) {
+ _commonStats.needTime++;
+ }
+
return state;
}
void MultiPlanStage::pickBestPlan() {
+ // Adds the amount of time taken by pickBestPlan() to executionTimeMillis. There's lots of
+ // execution work that happens here, so this is needed for the time accounting to
+ // make sense.
+ ScopedTimer timer(&_commonStats.executionTimeMillis);
+
// Run each plan some number of times. This number is at least as great as
// 'internalQueryPlanEvaluationWorks', but may be larger for big collections.
size_t numWorks = internalQueryPlanEvaluationWorks;
diff --git a/src/mongo/db/exec/subplan.cpp b/src/mongo/db/exec/subplan.cpp
index 5d4bcbd2dc0..281c0baafae 100644
--- a/src/mongo/db/exec/subplan.cpp
+++ b/src/mongo/db/exec/subplan.cpp
@@ -148,6 +148,10 @@ namespace mongo {
}
Status SubplanStage::planSubqueries() {
+ // Adds the amount of time taken by planSubqueries() to executionTimeMillis. There's lots of
+ // work that happens here, so this is needed for the time accounting to make sense.
+ ScopedTimer timer(&_commonStats.executionTimeMillis);
+
MatchExpression* theOr = _query->root();
for (size_t i = 0; i < _plannerParams.indices.size(); ++i) {
@@ -209,6 +213,10 @@ namespace mongo {
}
Status SubplanStage::pickBestPlan() {
+ // Adds the amount of time taken by pickBestPlan() to executionTimeMillis. There's lots of
+ // work that happens here, so this is needed for the time accounting to make sense.
+ ScopedTimer timer(&_commonStats.executionTimeMillis);
+
// This is what we annotate with the index selections and then turn into a solution.
auto_ptr<OrMatchExpression> theOr(
static_cast<OrMatchExpression*>(_query->root()->shallowClone()));
diff --git a/src/mongo/db/pipeline/document_source_cursor.cpp b/src/mongo/db/pipeline/document_source_cursor.cpp
index bae0e53a680..5e96f5e7cf2 100644
--- a/src/mongo/db/pipeline/document_source_cursor.cpp
+++ b/src/mongo/db/pipeline/document_source_cursor.cpp
@@ -35,7 +35,6 @@
#include "mongo/db/pipeline/document.h"
#include "mongo/db/query/explain.h"
#include "mongo/db/query/find_constants.h"
-#include "mongo/db/query/type_explain.h"
#include "mongo/db/storage_options.h"
#include "mongo/s/d_state.h"
#include "mongo/s/stale_exception.h" // for SendStaleConfigException
diff --git a/src/mongo/db/query/SConscript b/src/mongo/db/query/SConscript
index 96339abc388..bf0a38ee291 100644
--- a/src/mongo/db/query/SConscript
+++ b/src/mongo/db/query/SConscript
@@ -34,13 +34,11 @@ env.Library(
target='query',
source=[
"explain.cpp",
- "explain_plan.cpp",
"get_executor.cpp",
"new_find.cpp",
"plan_executor.cpp",
"plan_ranker.cpp",
"stage_builder.cpp",
- "type_explain.cpp",
],
LIBDEPS=[
"query_planner",
diff --git a/src/mongo/db/query/explain.cpp b/src/mongo/db/query/explain.cpp
index 12ae7b3884d..4a14debc1f6 100644
--- a/src/mongo/db/query/explain.cpp
+++ b/src/mongo/db/query/explain.cpp
@@ -31,7 +31,6 @@
#include "mongo/db/query/explain.h"
#include "mongo/db/exec/multi_plan.h"
-#include "mongo/db/query/explain_plan.h"
#include "mongo/db/query/get_executor.h"
#include "mongo/db/query/plan_executor.h"
#include "mongo/db/query/query_planner.h"
@@ -195,8 +194,6 @@ namespace mongo {
using mongoutils::str::stream;
- MONGO_EXPORT_SERVER_PARAMETER(enableNewExplain, bool, false);
-
// static
void Explain::statsToBSON(const PlanStageStats& stats,
Explain::Verbosity verbosity,
@@ -545,7 +542,7 @@ namespace mongo {
MultiPlanStage* mps = getMultiPlanStage(exec->getRootStage());
// The executionStats verbosity level requires that we run the winning plan
- // until if finishes.
+ // until it finishes.
if (verbosity >= Explain::EXEC_STATS) {
Status s = exec->executePlan();
if (!s.isOK()) {
@@ -672,89 +669,4 @@ namespace mongo {
}
}
- // TODO: This is temporary and should get deleted. There are a few small ways in which
- // this differs from 2.6 explain, but I'm not too worried because this entire format is
- // going away soon:
- // 1) 'indexBounds' field excluded from idhack explain.
- // 2) 'filterSet' field (for index filters) excluded.
- Status Explain::legacyExplain(PlanExecutor* exec, TypeExplain** explain) {
- invariant(exec);
- invariant(explain);
-
- scoped_ptr<PlanStageStats> stats(exec->getStats());
- if (NULL == stats.get()) {
- return Status(ErrorCodes::InternalError, "no stats available to explain plan");
- }
-
- // Special explain format for EOF.
- if (STAGE_EOF == stats->stageType) {
- *explain = new TypeExplain();
-
- // Fill in mandatory fields.
- (*explain)->setN(0);
- (*explain)->setNScannedObjects(0);
- (*explain)->setNScanned(0);
-
- // Fill in all the main fields that don't have a default in the explain data structure.
- (*explain)->setCursor("BasicCursor");
- (*explain)->setScanAndOrder(false);
- (*explain)->setIsMultiKey(false);
- (*explain)->setIndexOnly(false);
- (*explain)->setNYields(0);
- (*explain)->setNChunkSkips(0);
-
- TypeExplain* allPlans = new TypeExplain;
- allPlans->setCursor("BasicCursor");
- (*explain)->addToAllPlans(allPlans); // ownership xfer
-
- (*explain)->setNScannedObjectsAllPlans(0);
- (*explain)->setNScannedAllPlans(0);
-
- return Status::OK();
- }
-
- // Special explain format for idhack.
- vector<PlanStageStats*> statNodes;
- flattenStatsTree(stats.get(), &statNodes);
- PlanStageStats* idhack = NULL;
- for (size_t i = 0; i < statNodes.size(); i++) {
- if (STAGE_IDHACK == statNodes[i]->stageType) {
- idhack = statNodes[i];
- break;
- }
- }
-
- if (NULL != idhack) {
- // Explain format does not match 2.4 and is intended
- // to indicate clearly that the ID hack has been applied.
- *explain = new TypeExplain();
-
- IDHackStats* idhackStats = static_cast<IDHackStats*>(idhack->specific.get());
-
- (*explain)->setCursor("IDCursor");
- (*explain)->setIDHack(true);
- (*explain)->setN(stats->common.advanced);
- (*explain)->setNScanned(idhackStats->keysExamined);
- (*explain)->setNScannedObjects(idhackStats->docsExamined);
-
- return Status::OK();
- }
-
- Status status = explainPlan(*stats, explain, true /* full details */);
- if (!status.isOK()) {
- return status;
- }
-
- // Fill in explain fields that are accounted by on the runner level.
- TypeExplain* chosenPlan = NULL;
- explainPlan(*stats, &chosenPlan, false /* no full details */);
- if (chosenPlan) {
- (*explain)->addToAllPlans(chosenPlan);
- }
- (*explain)->setNScannedObjectsAllPlans((*explain)->getNScannedObjects());
- (*explain)->setNScannedAllPlans((*explain)->getNScanned());
-
- return Status::OK();
- }
-
} // namespace mongo
diff --git a/src/mongo/db/query/explain.h b/src/mongo/db/query/explain.h
index ec018f6af7a..f617809a34c 100644
--- a/src/mongo/db/query/explain.h
+++ b/src/mongo/db/query/explain.h
@@ -34,17 +34,12 @@
#include "mongo/db/query/plan_executor.h"
#include "mongo/db/query/query_planner_params.h"
#include "mongo/db/query/query_solution.h"
-#include "mongo/db/query/type_explain.h"
namespace mongo {
class Collection;
class OperationContext;
- // Temporarily hide the new explain implementation behind a setParameter.
- // TODO: take this out, and make the new implementation the default.
- extern bool enableNewExplain;
-
/**
* A container for the summary statistics that the profiler, slow query log, and
* other non-explain debug mechanisms may want to collect.
@@ -181,16 +176,6 @@ namespace mongo {
*/
static void explainCountEmptyQuery(BSONObjBuilder* out);
- /**
- * Generate the legacy explain format from a PlanExecutor.
- *
- * On success, the caller owns 'explain'.
- *
- * TODO: THIS IS TEMPORARY. Once the legacy explain code is deleted, we won't
- * need this anymore.
- */
- static Status legacyExplain(PlanExecutor* exec, TypeExplain** explain);
-
private:
/**
* Private helper that does the heavy-lifting for the public statsToBSON(...) functions
diff --git a/src/mongo/db/query/explain_plan.cpp b/src/mongo/db/query/explain_plan.cpp
deleted file mode 100644
index 4c3a9fb06f8..00000000000
--- a/src/mongo/db/query/explain_plan.cpp
+++ /dev/null
@@ -1,322 +0,0 @@
-/**
- * Copyright (C) 2013-2014 MongoDB Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License, version 3,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the GNU Affero General Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-// THIS FILE IS DEPRECATED -- the old explain implementation is being replaced
-
-#include "mongo/db/query/explain_plan.h"
-
-#include "mongo/db/query/explain.h"
-#include "mongo/db/query/stage_types.h"
-#include "mongo/db/query/type_explain.h"
-#include "mongo/util/mongoutils/str.h"
-
-namespace mongo {
-
- using mongoutils::str::stream;
-
- namespace {
-
- bool isOrStage(StageType stageType) {
- return stageType == STAGE_OR || stageType == STAGE_SORT_MERGE;
- }
-
- bool isNearStage(StageType stageType) {
- return stageType == STAGE_GEO_NEAR_2D || stageType == STAGE_GEO_NEAR_2DSPHERE;
- }
-
- bool isIntersectPlan(const PlanStageStats& stats) {
- if (stats.stageType == STAGE_AND_HASH || stats.stageType == STAGE_AND_SORTED) {
- return true;
- }
- for (size_t i = 0; i < stats.children.size(); ++i) {
- if (isIntersectPlan(*stats.children[i])) {
- return true;
- }
- }
- return false;
- }
-
- void getLeafNodes(const PlanStageStats& stats, vector<const PlanStageStats*>* leafNodesOut) {
- if (0 == stats.children.size()) {
- leafNodesOut->push_back(&stats);
- }
- for (size_t i = 0; i < stats.children.size(); ++i) {
- getLeafNodes(*stats.children[i], leafNodesOut);
- }
- }
-
- const PlanStageStats* findNode(const PlanStageStats* root, StageType type) {
- if (root->stageType == type) {
- return root;
- }
- for (size_t i = 0; i < root->children.size(); ++i) {
- const PlanStageStats* ret = findNode(root->children[i], type);
- if (NULL != ret) {
- return ret;
- }
- }
- return NULL;
- }
-
- } // namespace
-
- Status explainIntersectPlan(const PlanStageStats& stats, TypeExplain** explainOut, bool fullDetails) {
- auto_ptr<TypeExplain> res(new TypeExplain);
- res->setCursor("Complex Plan");
- res->setN(stats.common.advanced);
-
- // Sum the various counters at the leaves.
- vector<const PlanStageStats*> leaves;
- getLeafNodes(stats, &leaves);
-
- long long nScanned = 0;
- long long nScannedObjects = 0;
- for (size_t i = 0; i < leaves.size(); ++i) {
- TypeExplain* leafExplain;
- explainPlan(*leaves[i], &leafExplain, false);
- nScanned += leafExplain->getNScanned();
- nScannedObjects += leafExplain->getNScannedObjects();
- delete leafExplain;
- }
-
- res->setNScanned(nScanned);
- // XXX: this isn't exactly "correct" -- for ixscans we have to find out if it's part of a
- // subtree rooted at a fetch, etc. etc. do we want to just add the # of advances of a
- // fetch node minus the number of alreadyHasObj for those nodes?
- res->setNScannedObjects(nScannedObjects);
-
- uint64_t chunkSkips = 0;
- const PlanStageStats* shardFilter = findNode(&stats, STAGE_SHARDING_FILTER);
- if (NULL != shardFilter) {
- const ShardingFilterStats* sfs
- = static_cast<const ShardingFilterStats*>(shardFilter->specific.get());
- chunkSkips = sfs->chunkSkips;
- }
-
- res->setNChunkSkips(chunkSkips);
-
- if (fullDetails) {
- res->setNYields(stats.common.yields);
- BSONObjBuilder bob;
- Explain::statsToBSON(stats, &bob);
- res->stats = bob.obj();
- }
-
- *explainOut = res.release();
- return Status::OK();
- }
-
- namespace {
-
- Status explainPlan(const PlanStageStats& stats, TypeExplain** explainOut,
- bool fullDetails, bool covered) {
- //
- // Temporary explain for index intersection
- //
-
- if (isIntersectPlan(stats)) {
- return explainIntersectPlan(stats, explainOut, fullDetails);
- }
-
- //
- // Legacy explain implementation
- //
-
- // Descend the plan looking for structural properties:
- // + Are there any OR clauses? If so, explain each branch.
- // + What type(s) are the leaf nodes and what are their properties?
- // + Did we need a sort?
-
- bool sortPresent = false;
- size_t chunkSkips = 0;
-
-
- // XXX: TEMPORARY HACK - GEONEAR explains like OR queries (both have children) until the
- // new explain framework makes this file go away.
- const PlanStageStats* orStage = NULL;
- const PlanStageStats* root = &stats;
- const PlanStageStats* leaf = root;
-
- while (leaf->children.size() > 0) {
- // We shouldn't be here if there are any ANDs
- if (leaf->children.size() > 1) {
- verify(isOrStage(leaf->stageType) || isNearStage(leaf->stageType));
- }
-
- if (isOrStage(leaf->stageType) || isNearStage(leaf->stageType)) {
- orStage = leaf;
- break;
- }
-
- if (leaf->stageType == STAGE_FETCH) {
- covered = false;
- }
-
- if (leaf->stageType == STAGE_SORT) {
- sortPresent = true;
- }
-
- if (STAGE_SHARDING_FILTER == leaf->stageType) {
- const ShardingFilterStats* sfs
- = static_cast<const ShardingFilterStats*>(leaf->specific.get());
- chunkSkips = sfs->chunkSkips;
- }
-
- leaf = leaf->children[0];
- }
-
- auto_ptr<TypeExplain> res(new TypeExplain);
-
- // Accounting for 'nscanned' and 'nscannedObjects' is specific to the kind of leaf:
- //
- // + on collection scan, both are the same; all the documents retrieved were
- // fetched in practice. To get how many documents were retrieved, one simply
- // looks at the number of 'advanced' in the stats.
- //
- // + on an index scan, we'd neeed to look into the index scan cursor to extract the
- // number of keys that cursor retrieved, and into the stage's stats 'advanced' for
- // nscannedObjects', which would be the number of keys that survived the IXSCAN
- // filter. Those keys would have been FETCH-ed, if a fetch is present.
-
- if (orStage != NULL) {
- size_t nScanned = 0;
- size_t nScannedObjects = 0;
- const std::vector<PlanStageStats*>& children = orStage->children;
- for (std::vector<PlanStageStats*>::const_iterator it = children.begin();
- it != children.end();
- ++it) {
- TypeExplain* childExplain = NULL;
- explainPlan(**it, &childExplain, false /* no full details */, covered);
- if (childExplain) {
- // Override child's indexOnly value if we have a non-covered
- // query (implied by a FETCH stage).
- //
- // As we run explain on each child, explainPlan() sets indexOnly
- // based only on the information in each child. This does not
- // consider the possibility of a FETCH stage above the OR/MERGE_SORT
- // stage, in which case the child's indexOnly may be erroneously set
- // to true.
- if (!covered && childExplain->isIndexOnlySet()) {
- childExplain->setIndexOnly(false);
- }
-
- // 'res' takes ownership of 'childExplain'.
- res->addToClauses(childExplain);
- nScanned += childExplain->getNScanned();
- nScannedObjects += childExplain->getNScannedObjects();
- }
- }
- // We set the cursor name for backwards compatibility with 2.4.
- if (isOrStage(leaf->stageType)) {
- res->setCursor("QueryOptimizerCursor");
- }
- else {
- if (leaf->stageType == STAGE_GEO_NEAR_2D)
- res->setCursor("GeoSearchCursor");
- else
- res->setCursor("S2NearCursor");
-
- res->setIndexOnly(false);
- res->setIsMultiKey(false);
- }
- res->setNScanned(nScanned);
- res->setNScannedObjects(nScannedObjects);
- }
- else if (leaf->stageType == STAGE_COLLSCAN) {
- CollectionScanStats* csStats = static_cast<CollectionScanStats*>(leaf->specific.get());
- res->setCursor("BasicCursor");
- res->setNScanned(csStats->docsTested);
- res->setNScannedObjects(csStats->docsTested);
- res->setIndexOnly(false);
- res->setIsMultiKey(false);
- }
- else if (leaf->stageType == STAGE_TEXT) {
- TextStats* tStats = static_cast<TextStats*>(leaf->specific.get());
- res->setCursor("TextCursor");
- res->setNScanned(tStats->keysExamined);
- res->setNScannedObjects(tStats->fetches);
- }
- else if (leaf->stageType == STAGE_IXSCAN) {
- IndexScanStats* indexStats = static_cast<IndexScanStats*>(leaf->specific.get());
- verify(indexStats);
- string direction = indexStats->direction > 0 ? "" : " reverse";
- res->setCursor(indexStats->indexType + " " + indexStats->indexName + direction);
- res->setNScanned(indexStats->keysExamined);
-
- // If we're covered, that is, no FETCH is present, then, by definition,
- // nScannedObject would be zero because no full document would have been fetched
- // from disk.
- res->setNScannedObjects(covered ? 0 : leaf->common.advanced);
-
- res->setIndexBounds(indexStats->indexBounds);
- res->setIsMultiKey(indexStats->isMultiKey);
- res->setIndexOnly(covered);
- }
- else if (leaf->stageType == STAGE_DISTINCT) {
- DistinctScanStats* dss = static_cast<DistinctScanStats*>(leaf->specific.get());
- verify(dss);
- res->setCursor("DistinctCursor");
- res->setN(dss->keysExamined);
- res->setNScanned(dss->keysExamined);
- // Distinct hack stage is fully covered.
- res->setNScannedObjects(0);
- }
- else {
- return Status(ErrorCodes::InternalError, "cannot interpret execution plan");
- }
-
- // How many documents did the query return?
- res->setN(root->common.advanced);
- res->setScanAndOrder(sortPresent);
- res->setNChunkSkips(chunkSkips);
-
- // Statistics for the plan (appear only in a detailed mode)
- // TODO: if we can get this from the runner, we can kill "detailed mode"
- if (fullDetails) {
- res->setNYields(root->common.yields);
- BSONObjBuilder bob;
- Explain::statsToBSON(*root, &bob);
- res->stats = bob.obj();
- }
-
- *explainOut = res.release();
- return Status::OK();
- }
-
- } // namespace
-
- Status explainPlan(const PlanStageStats& stats, TypeExplain** explainOut, bool fullDetails) {
- // This function merely calls a recursive helper of the same name. The boolean "covered" is
- // used to determine the value of nscannedObjects for subtrees along the way. Recursive
- // calls will pass false for "covered" if a fetch stage has been seen at that point in the
- // traversal.
- const bool covered = true;
- return explainPlan(stats, explainOut, fullDetails, covered);
- }
-
-} // namespace mongo
diff --git a/src/mongo/db/query/explain_plan.h b/src/mongo/db/query/explain_plan.h
deleted file mode 100644
index 4313c2c4a51..00000000000
--- a/src/mongo/db/query/explain_plan.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/**
- * Copyright (C) 2013 10gen Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License, version 3,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the GNU Affero General Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-// THIS FILE IS DEPRECATED -- the old explain implementation is being replaced
-
-#pragma once
-
-#include "mongo/base/status.h"
-#include "mongo/db/exec/plan_stats.h"
-#include "mongo/db/query/query_solution.h"
-
-namespace mongo {
-
- class TypeExplain;
- struct PlanInfo;
-
- /**
- * Returns OK, allocating and filling in '*explainOut' describing the access paths used in
- * the 'stats' tree of a given query solution. The caller has the ownership of
- * '*explainOut', on success. Otherwise return an error status describing the problem.
- *
- * If 'fullDetails' was requested, the explain will return all available information about
- * the plan, otherwise, just a summary. The fields in the summary are: 'cursor', 'n',
- * 'nscannedObjects', 'nscanned', and 'indexBounds'. The remaining fields are: 'isMultKey',
- * 'nscannedObjectsAllPlans', 'nscannedAllPlans', 'scanAndOrder', 'indexOnly', 'nYields',
- * 'nChunkSkips', 'millis', 'allPlans', and 'oldPlan'.
- *
- * All these fields are documented in type_explain.h
- *
- * TODO: This is temporarily in place to support the legacy explain format. Once legacy
- * explain is removed, this function should be deleted.
- */
- Status explainPlan(const PlanStageStats& stats, TypeExplain** explainOut, bool fullDetails);
-
-} // namespace mongo
diff --git a/src/mongo/db/query/index_bounds_builder_test.cpp b/src/mongo/db/query/index_bounds_builder_test.cpp
index 0a89cef23fb..84b3b96ea51 100644
--- a/src/mongo/db/query/index_bounds_builder_test.cpp
+++ b/src/mongo/db/query/index_bounds_builder_test.cpp
@@ -1119,4 +1119,54 @@ namespace {
ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
}
+ // Test $type bounds for Code BSON type.
+ TEST(IndexBoundsBuilderTest, CodeTypeBounds) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: {$type: 13}}");
+ auto_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+
+ // Build the expected interval.
+ BSONObjBuilder bob;
+ bob.appendCode("", "");
+ bob.appendCodeWScope("", "", BSONObj());
+ BSONObj expectedInterval = bob.obj();
+
+ // Check the output of translate().
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
+ Interval(expectedInterval, true, true)));
+ ASSERT(tightness == IndexBoundsBuilder::INEXACT_FETCH);
+ }
+
+ // Test $type bounds for Code With Scoped BSON type.
+ TEST(IndexBoundsBuilderTest, CodeWithScopeTypeBounds) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: {$type: 15}}");
+ auto_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+
+ // Build the expected interval.
+ BSONObjBuilder bob;
+ bob.appendCodeWScope("", "", BSONObj());
+ bob.appendMaxKey("");
+ BSONObj expectedInterval = bob.obj();
+
+ // Check the output of translate().
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
+ Interval(expectedInterval, true, true)));
+ ASSERT(tightness == IndexBoundsBuilder::INEXACT_FETCH);
+ }
+
} // namespace
diff --git a/src/mongo/db/query/new_find.cpp b/src/mongo/db/query/new_find.cpp
index 6b7b8af08e1..b29aca68c25 100644
--- a/src/mongo/db/query/new_find.cpp
+++ b/src/mongo/db/query/new_find.cpp
@@ -90,11 +90,6 @@ namespace {
return n >= pq.getNumToReturn();
}
- bool enoughForExplain(const mongo::LiteParsedQuery& pq, long long n) {
- if (pq.wantMore() || 0 == pq.getNumToReturn()) { return false; }
- return n >= pq.getNumToReturn();
- }
-
/**
* Returns true if 'me' is a GTE or GE predicate over the "ts" field.
* Such predicates can be used for the oplog start hack.
@@ -511,60 +506,6 @@ namespace mongo {
// We use this a lot below.
const LiteParsedQuery& pq = cq->getParsed();
- // set this outside loop. we will need to use this both within loop and when deciding
- // to fill in explain information
- const bool isExplain = pq.isExplain();
-
- // New-style explains get diverted through a separate path which calls back into the
- // query planner and query execution mechanisms.
- //
- // TODO temporary until find() becomes a real command.
- if (isExplain && enableNewExplain) {
- size_t options = QueryPlannerParams::DEFAULT;
- if (shardingState.needCollectionMetadata(pq.ns())) {
- options |= QueryPlannerParams::INCLUDE_SHARD_FILTER;
- }
-
- BufBuilder bb;
- bb.skip(sizeof(QueryResult::Value));
-
- PlanExecutor* rawExec;
- // Takes ownership of 'cq'.
- Status execStatus = getExecutor(txn, collection, cq, &rawExec, options);
- if (!execStatus.isOK()) {
- uasserted(17510, "Explain error: " + execStatus.reason());
- }
-
- scoped_ptr<PlanExecutor> exec(rawExec);
- BSONObjBuilder explainBob;
- Status explainStatus = Explain::explainStages(exec.get(), Explain::EXEC_ALL_PLANS,
- &explainBob);
- if (!explainStatus.isOK()) {
- uasserted(18521, "Explain error: " + explainStatus.reason());
- }
-
- // Add the resulting object to the return buffer.
- BSONObj explainObj = explainBob.obj();
- bb.appendBuf((void*)explainObj.objdata(), explainObj.objsize());
-
- curop.debug().iscommand = true;
- // TODO: Does this get overwritten/do we really need to set this twice?
- curop.debug().query = q.query;
-
- // Set query result fields.
- QueryResult::View qr = bb.buf();
- bb.decouple();
- qr.setResultFlagsToOk();
- qr.msgdata().setLen(bb.len());
- curop.debug().responseLength = bb.len();
- qr.msgdata().setOperation(opReply);
- qr.setCursorId(0);
- qr.setStartingFrom(0);
- qr.setNReturned(1);
- result.setData(qr.view2ptr(), true);
- return "";
- }
-
// We'll now try to get the query executor that will execute this query for us. There
// are a few cases in which we know upfront which executor we should get and, therefore,
// we shortcut the selection process here.
@@ -606,6 +547,41 @@ namespace mongo {
verify(NULL != rawExec);
auto_ptr<PlanExecutor> exec(rawExec);
+ // If it's actually an explain, do the explain and return rather than falling through
+ // to the normal query execution loop.
+ if (pq.isExplain()) {
+ BufBuilder bb;
+ bb.skip(sizeof(QueryResult::Value));
+
+ BSONObjBuilder explainBob;
+ Status explainStatus = Explain::explainStages(exec.get(), Explain::EXEC_ALL_PLANS,
+ &explainBob);
+ if (!explainStatus.isOK()) {
+ uasserted(18521, "Explain error: " + explainStatus.reason());
+ }
+
+ // Add the resulting object to the return buffer.
+ BSONObj explainObj = explainBob.obj();
+ bb.appendBuf((void*)explainObj.objdata(), explainObj.objsize());
+
+ curop.debug().iscommand = true;
+ // TODO: Does this get overwritten/do we really need to set this twice?
+ curop.debug().query = q.query;
+
+ // Set query result fields.
+ QueryResult::View qr = bb.buf();
+ bb.decouple();
+ qr.setResultFlagsToOk();
+ qr.msgdata().setLen(bb.len());
+ curop.debug().responseLength = bb.len();
+ qr.msgdata().setOperation(opReply);
+ qr.setCursorId(0);
+ qr.setStartingFrom(0);
+ qr.setNReturned(1);
+ result.setData(qr.view2ptr(), true);
+ return "";
+ }
+
// We freak out later if this changes before we're done with the query.
const ChunkVersion shardingVersionAtStart = shardingState.getVersion(cq->ns());
@@ -662,10 +638,8 @@ namespace mongo {
curop.debug().planSummary = stats.summaryStr.c_str();
while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
- // Add result to output buffer. This is unnecessary if explain info is requested
- if (!isExplain) {
- bb.appendBuf((void*)obj.objdata(), obj.objsize());
- }
+ // Add result to output buffer.
+ bb.appendBuf((void*)obj.objdata(), obj.objsize());
// Count the result.
++numResults;
@@ -681,13 +655,8 @@ namespace mongo {
// TODO: only one type of 2d search doesn't support this. We need a way to pull it out
// of CanonicalQuery. :(
const bool supportsGetMore = true;
- if (isExplain) {
- if (enoughForExplain(pq, numResults)) {
- break;
- }
- }
- else if (!supportsGetMore && (enough(pq, numResults)
- || bb.len() >= MaxBytesToReturnToClientAtOnce)) {
+ if (!supportsGetMore && (enough(pq, numResults)
+ || bb.len() >= MaxBytesToReturnToClientAtOnce)) {
break;
}
else if (enoughForFirstBatch(pq, numResults, bb.len())) {
@@ -743,59 +712,33 @@ namespace mongo {
shardingState.getVersion(pq.ns()));
}
- // Used to fill in explain and to determine if the query is slow enough to be logged.
- int elapsedMillis = curop.elapsedMillis();
-
- // Get explain information if:
- // 1) it is needed by an explain query;
- // 2) profiling is enabled; or
- // 3) profiling is disabled but we still need explain details to log a "slow" query.
- // Producing explain information is expensive and should be done only if we are certain
- // the information will be used.
- boost::scoped_ptr<TypeExplain> explain(NULL);
- if (isExplain ||
- ctx.ctx().db()->getProfilingLevel() > 0 ||
- elapsedMillis > serverGlobalParams.slowMS) {
- // Ask the executor to produce explain information.
- TypeExplain* bareExplain;
- Status res = Explain::legacyExplain(exec.get(), &bareExplain);
- if (res.isOK()) {
- explain.reset(bareExplain);
- }
- else if (isExplain) {
- error() << "could not produce explain of query '" << pq.getFilter()
- << "', error: " << res.reason();
- // If numResults and the data in bb don't correspond, we'll crash later when rooting
- // through the reply msg.
- BSONObj emptyObj;
- bb.appendBuf((void*)emptyObj.objdata(), emptyObj.objsize());
- // The explain output is actually a result.
- numResults = 1;
- // TODO: we can fill out millis etc. here just fine even if the plan screwed up.
+ // Set debug information for consumption by the profiler.
+ if (ctx.ctx().db()->getProfilingLevel() > 0 ||
+ curop.elapsedMillis() > serverGlobalParams.slowMS) {
+ PlanSummaryStats newStats;
+ Explain::getSummaryStats(exec.get(), &newStats);
+
+ curop.debug().ntoskip = pq.getSkip();
+ curop.debug().nreturned = numResults;
+ curop.debug().scanAndOrder = newStats.hasSortStage;
+ curop.debug().nscanned = newStats.totalKeysExamined;
+ curop.debug().nscannedObjects = newStats.totalDocsExamined;
+ curop.debug().idhack = newStats.isIdhack;
+
+ // Get BSON stats.
+ scoped_ptr<PlanStageStats> execStats(exec->getStats());
+ BSONObjBuilder statsBob;
+ Explain::statsToBSON(*execStats, &statsBob);
+ curop.debug().execStats.set(statsBob.obj());
+
+ // Replace exec stats with plan summary if stats cannot fit into CachedBSONObj.
+ if (curop.debug().execStats.tooBig() && !curop.debug().planSummary.empty()) {
+ BSONObjBuilder bob;
+ bob.append("summary", curop.debug().planSummary.toString());
+ curop.debug().execStats.set(bob.done());
}
}
- // Fill in the missing run-time fields in explain, starting with propeties of
- // the process running the query.
- if (isExplain && NULL != explain.get()) {
- std::string server = mongoutils::str::stream()
- << getHostNameCached() << ":" << serverGlobalParams.port;
- explain->setServer(server);
-
- // We might have skipped some results due to chunk migration etc. so our count is
- // correct.
- explain->setN(numResults);
-
- // Clock the whole operation.
- explain->setMillis(elapsedMillis);
-
- BSONObj explainObj = explain->toBSON();
- bb.appendBuf((void*)explainObj.objdata(), explainObj.objsize());
-
- // The explain output is actually a result.
- numResults = 1;
- }
-
long long ccId = 0;
if (saveClientCursor) {
// We won't use the executor until it's getMore'd.
@@ -849,44 +792,6 @@ namespace mongo {
qr.setStartingFrom(0);
qr.setNReturned(numResults);
- // Set debug information for consumption by the profiler.
- curop.debug().ntoskip = pq.getSkip();
- curop.debug().nreturned = numResults;
- if (NULL != explain.get()) {
- if (explain->isScanAndOrderSet()) {
- curop.debug().scanAndOrder = explain->getScanAndOrder();
- }
- else {
- curop.debug().scanAndOrder = false;
- }
-
- if (explain->isNScannedSet()) {
- curop.debug().nscanned = explain->getNScanned();
- }
-
- if (explain->isNScannedObjectsSet()) {
- curop.debug().nscannedObjects = explain->getNScannedObjects();
- }
-
- if (explain->isIDHackSet()) {
- curop.debug().idhack = explain->getIDHack();
- }
-
- if (!explain->stats.isEmpty()) {
- // execStats is a CachedBSONObj because it lives in the race-prone
- // curop.
- curop.debug().execStats.set(explain->stats);
-
- // Replace exec stats with plan summary if stats cannot fit into CachedBSONObj.
- if (curop.debug().execStats.tooBig() && !curop.debug().planSummary.empty()) {
- BSONObjBuilder bob;
- bob.append("summary", curop.debug().planSummary.toString());
- curop.debug().execStats.set(bob.done());
- }
-
- }
- }
-
// curop.debug().exhaust is set above.
return curop.debug().exhaust ? pq.ns() : "";
}
diff --git a/src/mongo/db/query/planner_analysis.cpp b/src/mongo/db/query/planner_analysis.cpp
index 233e3b3e4b8..9c723ab46c2 100644
--- a/src/mongo/db/query/planner_analysis.cpp
+++ b/src/mongo/db/query/planner_analysis.cpp
@@ -448,6 +448,11 @@ namespace mongo {
// We have no idea what the client intended. One way to handle the ambiguity
// of a limited OR stage is to use the SPLIT_LIMITED_SORT hack.
//
+ // If wantMore is false (meaning that 'ntoreturn' was initially passed to
+ // the server as a negative value), then we treat numToReturn as a limit.
+ // Since there is no limit-batchSize ambiguity in this case, we do not use the
+ // SPLIT_LIMITED_SORT hack.
+ //
// If numToReturn is really a limit, then we want to add a limit to this
// SORT stage, and hence perform a topK.
//
@@ -458,7 +463,8 @@ namespace mongo {
// with the topK first. If the client wants a limit, they'll get the efficiency
// of topK. If they want a batchSize, the other OR branch will deliver the missing
// results. The OR stage handles deduping.
- if (params.options & QueryPlannerParams::SPLIT_LIMITED_SORT
+ if (query.getParsed().wantMore()
+ && params.options & QueryPlannerParams::SPLIT_LIMITED_SORT
&& !QueryPlannerCommon::hasNode(query.root(), MatchExpression::TEXT)
&& !QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO)
&& !QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR)) {
diff --git a/src/mongo/db/query/query_planner_test.cpp b/src/mongo/db/query/query_planner_test.cpp
index 4e4c92f930a..19faeb6d1c1 100644
--- a/src/mongo/db/query/query_planner_test.cpp
+++ b/src/mongo/db/query/query_planner_test.cpp
@@ -535,6 +535,68 @@ namespace {
assertSolutionExists("{fetch: {node: {ixscan: {pattern: {x: 1}}}}}");
}
+ TEST_F(QueryPlannerTest, ExistsBounds) {
+ addIndex(BSON("b" << 1));
+
+ runQuery(fromjson("{b: {$exists: true}}"));
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {filter: {b: {$exists: true}}, node: "
+ "{ixscan: {pattern: {b: 1}, bounds: "
+ "{b: [['MinKey', 'MaxKey', true, true]]}}}}}");
+
+ // This ends up being a double negation, which we currently don't index.
+ runQuery(fromjson("{b: {$not: {$exists: false}}}"));
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+
+ runQuery(fromjson("{b: {$exists: false}}"));
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {filter: {b: {$exists: false}}, node: "
+ "{ixscan: {pattern: {b: 1}, bounds: "
+ "{b: [[null, null, true, true]]}}}}}");
+
+ runQuery(fromjson("{b: {$not: {$exists: true}}}"));
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {filter: {b: {$exists: false}}, node: "
+ "{ixscan: {pattern: {b: 1}, bounds: "
+ "{b: [[null, null, true, true]]}}}}}");
+ }
+
+ TEST_F(QueryPlannerTest, ExistsBoundsCompound) {
+ addIndex(BSON("a" << 1 << "b" << 1));
+
+ runQuery(fromjson("{a: 1, b: {$exists: true}}"));
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {filter: {b: {$exists: true}}, node: "
+ "{ixscan: {pattern: {a: 1, b: 1}, bounds: "
+ "{a: [[1,1,true,true]], b: [['MinKey','MaxKey',true,true]]}}}}}");
+
+ // This ends up being a double negation, which we currently don't index.
+ runQuery(fromjson("{a: 1, b: {$not: {$exists: false}}}"));
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: 1, b: 1}, bounds: "
+ "{a: [[1,1,true,true]], b: [['MinKey','MaxKey',true,true]]}}}}}");
+
+ runQuery(fromjson("{a: 1, b: {$exists: false}}"));
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {filter: {b: {$exists: false}}, node: "
+ "{ixscan: {pattern: {a: 1, b: 1}, bounds: "
+ "{a: [[1,1,true,true]], b: [[null,null,true,true]]}}}}}");
+
+ runQuery(fromjson("{a: 1, b: {$not: {$exists: true}}}"));
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {filter: {b: {$exists: false}}, node: "
+ "{ixscan: {pattern: {a: 1, b: 1}, bounds: "
+ "{a: [[1,1,true,true]], b: [[null,null,true,true]]}}}}}");
+ }
+
//
// skip and limit
//
@@ -1525,6 +1587,19 @@ namespace {
"node: {ixscan: {filter: null, pattern: {'foo.b': 1}}}}}");*/
}
+ TEST_F(QueryPlannerTest, BasicAllElemMatch2) {
+ // true means multikey
+ addIndex(BSON("a.x" << 1), true);
+
+ runQuery(fromjson("{a: {$all: [{$elemMatch: {x: 3}}, {$elemMatch: {y: 5}}]}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {filter: {a:{$all:[{$elemMatch:{x:3}},{$elemMatch:{y:5}}]}},"
+ "node: {ixscan: {pattern: {'a.x': 1},"
+ "bounds: {'a.x': [[3,3,true,true]]}}}}}");
+ }
+
// SERVER-13677
TEST_F(QueryPlannerTest, ElemMatchWithAllElemMatchChild) {
addIndex(BSON("a.b.c.d" << 1));
@@ -3535,6 +3610,21 @@ namespace {
"bounds: {a: [['MinKey','MaxKey',true,true]]}}}}}");
}
+ TEST_F(QueryPlannerTest, BoundsTypeMinKeyMaxKey) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a" << 1));
+
+ runQuery(fromjson("{a: {$type: -1}}"));
+ assertNumSolutions(1U);
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: 1}, bounds:"
+ "{a: [['MinKey','MinKey',true,true]]}}}}}");
+
+ runQuery(fromjson("{a: {$type: 127}}"));
+ assertNumSolutions(1U);
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: 1}, bounds:"
+ "{a: [['MaxKey','MaxKey',true,true]]}}}}}");
+ }
+
//
// Tests related to building index bounds for multikey
// indices, combined with compound and $elemMatch
diff --git a/src/mongo/db/query/type_explain.cpp b/src/mongo/db/query/type_explain.cpp
deleted file mode 100644
index 329eb1dcb28..00000000000
--- a/src/mongo/db/query/type_explain.cpp
+++ /dev/null
@@ -1,807 +0,0 @@
-/**
- * Copyright (C) 2013 10gen Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License, version 3,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the GNU Affero General Public License in all respects
- * for all of the code used other than as permitted herein. If you modify
- * file(s) with this exception, you may extend this exception to your
- * version of the file(s), but you are not obligated to do so. If you do not
- * wish to do so, delete this exception statement from your version. If you
- * delete this exception statement from all source files in the program,
- * then also delete it in the license file.
- */
-
-// THIS FILE IS DEPRECATED -- the old explain implementation is being replaced
-
-#include "mongo/db/query/type_explain.h"
-
-#include "mongo/db/field_parser.h"
-#include "mongo/util/mongoutils/str.h"
-
-namespace mongo {
-
- // TODO: This doesn't need to be so complicated or serializable. Let's throw this out when we
- // move to explain V2
-
- using mongoutils::str::stream;
-
- const BSONField<std::vector<TypeExplain*> > TypeExplain::clauses("clauses");
- const BSONField<std::string> TypeExplain::cursor("cursor");
- const BSONField<bool> TypeExplain::isMultiKey("isMultiKey");
- const BSONField<long long> TypeExplain::n("n", 0);
- const BSONField<long long> TypeExplain::nScannedObjects("nscannedObjects", 0);
- const BSONField<long long> TypeExplain::nScanned("nscanned", 0);
- const BSONField<long long> TypeExplain::nScannedObjectsAllPlans("nscannedObjectsAllPlans");
- const BSONField<long long> TypeExplain::nScannedAllPlans("nscannedAllPlans");
- const BSONField<bool> TypeExplain::scanAndOrder("scanAndOrder");
- const BSONField<bool> TypeExplain::indexOnly("indexOnly");
- const BSONField<long long> TypeExplain::nYields("nYields");
- const BSONField<long long> TypeExplain::nChunkSkips("nChunkSkips");
- const BSONField<long long> TypeExplain::millis("millis");
- const BSONField<BSONObj> TypeExplain::indexBounds("indexBounds");
- const BSONField<std::vector<TypeExplain*> > TypeExplain::allPlans("allPlans");
- const BSONField<TypeExplain*> TypeExplain::oldPlan("oldPlan");
- const BSONField<bool> TypeExplain::indexFilterApplied("filterSet");
- const BSONField<std::string> TypeExplain::server("server");
-
- TypeExplain::TypeExplain() {
- clear();
- }
-
- TypeExplain::~TypeExplain() {
- unsetClauses();
- unsetAllPlans();
- }
-
- bool TypeExplain::isValid(std::string* errMsg) const {
- std::string dummy;
- if (errMsg == NULL) {
- errMsg = &dummy;
- }
-
- // All the mandatory fields must be present.
- if (!_isNSet) {
- *errMsg = stream() << "missing " << n.name() << " field";
- return false;
- }
-
- if (!_isNScannedObjectsSet) {
- *errMsg = stream() << "missing " << nScannedObjects.name() << " field";
- return false;
- }
-
- if (!_isNScannedSet) {
- *errMsg = stream() << "missing " << nScanned.name() << " field";
- return false;
- }
-
- return true;
- }
-
- BSONObj TypeExplain::toBSON() const {
- BSONObjBuilder builder;
-
- if (_clauses.get()) {
- BSONArrayBuilder clausesBuilder(builder.subarrayStart(clauses()));
- for (std::vector<TypeExplain*>::const_iterator it = _clauses->begin();
- it != _clauses->end();
- ++it) {
- BSONObj clausesDocument = (*it)->toBSON();
- clausesBuilder.append(clausesDocument);
- }
- clausesBuilder.done();
- }
-
- if (_isCursorSet) builder.append(cursor(), _cursor);
-
- if (_isIsMultiKeySet) builder.append(isMultiKey(), _isMultiKey);
-
- if (_isNSet) {
- builder.appendNumber(n(), _n);
- }
- else {
- builder.appendNumber(n(), n.getDefault());
- }
-
- if (_isNScannedObjectsSet) {
- builder.appendNumber(nScannedObjects(), _nScannedObjects);
- }
- else {
- builder.appendNumber(nScannedObjects(), nScannedObjects.getDefault());
- }
-
- if (_isNScannedSet) {
- builder.appendNumber(nScanned(), _nScanned);
- }
- else {
- builder.appendNumber(nScanned(), nScanned.getDefault());
- }
-
- if (_isNScannedObjectsAllPlansSet)
- builder.appendNumber(nScannedObjectsAllPlans(), _nScannedObjectsAllPlans);
-
- if (_isNScannedAllPlansSet) builder.appendNumber(nScannedAllPlans(), _nScannedAllPlans);
-
- if (_isScanAndOrderSet) builder.append(scanAndOrder(), _scanAndOrder);
-
- if (_isIndexOnlySet) builder.append(indexOnly(), _indexOnly);
-
- if (_isNYieldsSet) builder.appendNumber(nYields(), _nYields);
-
- if (_isNChunkSkipsSet) builder.appendNumber(nChunkSkips(), _nChunkSkips);
-
- if (_isMillisSet) builder.appendNumber(millis(), _millis);
-
- if (_isIndexBoundsSet) builder.append(indexBounds(), _indexBounds);
-
- if (_allPlans.get()) {
- BSONArrayBuilder allPlansBuilder(builder.subarrayStart(allPlans()));
- for (std::vector<TypeExplain*>::const_iterator it = _allPlans->begin();
- it != _allPlans->end();
- ++it) {
- BSONObj allPlansObject = (*it)->toBSON();
- allPlansBuilder.append(allPlansObject);
- }
- allPlansBuilder.done();
- }
-
- if (_oldPlan.get()) builder.append(oldPlan(), _oldPlan->toBSON());
-
- if (_isServerSet) builder.append(server(), _server);
-
- if (_isIndexFilterAppliedSet) builder.append(indexFilterApplied(), _indexFilterApplied);
-
- // Add this at the end as it can be huge
- if (!stats.isEmpty()) {
- builder.append("stats", stats);
- }
-
- return builder.obj();
- }
-
- bool TypeExplain::parseBSON(const BSONObj& source, string* errMsg) {
- clear();
-
- std::string dummy;
- if (!errMsg) errMsg = &dummy;
-
- FieldParser::FieldState fieldState;
-
- std::vector<TypeExplain*>* bareClauses = NULL;
- fieldState = FieldParser::extract(source, clauses, &bareClauses, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- if (fieldState == FieldParser::FIELD_SET) _clauses.reset(bareClauses);
-
- fieldState = FieldParser::extract(source, cursor, &_cursor, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isCursorSet = fieldState == FieldParser::FIELD_SET;
-
- fieldState = FieldParser::extract(source, isMultiKey, &_isMultiKey, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isIsMultiKeySet = fieldState == FieldParser::FIELD_SET;
-
- fieldState = FieldParser::extract(source, n, &_n, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isNSet = fieldState == FieldParser::FIELD_SET;
-
- fieldState = FieldParser::extract(source, nScannedObjects, &_nScannedObjects, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isNScannedObjectsSet = fieldState == FieldParser::FIELD_SET;
-
- fieldState = FieldParser::extract(source, nScanned, &_nScanned, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isNScannedSet = fieldState == FieldParser::FIELD_SET;
-
- fieldState = FieldParser::extract(source,
- nScannedObjectsAllPlans,
- &_nScannedObjectsAllPlans,
- errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isNScannedObjectsAllPlansSet = fieldState == FieldParser::FIELD_SET;
-
- fieldState = FieldParser::extract(source, nScannedAllPlans, &_nScannedAllPlans, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isNScannedAllPlansSet = fieldState == FieldParser::FIELD_SET;
-
- fieldState = FieldParser::extract(source, scanAndOrder, &_scanAndOrder, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isScanAndOrderSet = fieldState == FieldParser::FIELD_SET;
-
- fieldState = FieldParser::extract(source, indexOnly, &_indexOnly, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isIndexOnlySet = fieldState == FieldParser::FIELD_SET;
-
- fieldState = FieldParser::extract(source, nYields, &_nYields, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isNYieldsSet = fieldState == FieldParser::FIELD_SET;
-
- fieldState = FieldParser::extract(source, nChunkSkips, &_nChunkSkips, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isNChunkSkipsSet = fieldState == FieldParser::FIELD_SET;
-
- fieldState = FieldParser::extract(source, millis, &_millis, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isMillisSet = fieldState == FieldParser::FIELD_SET;
-
- fieldState = FieldParser::extract(source, indexBounds, &_indexBounds, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isIndexBoundsSet = fieldState == FieldParser::FIELD_SET;
-
- std::vector<TypeExplain*>* bareAllPlans = NULL;
- fieldState = FieldParser::extract(source, allPlans, &bareAllPlans, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- if (fieldState == FieldParser::FIELD_SET) _allPlans.reset(bareAllPlans);
-
- TypeExplain* bareOldPlan = NULL;
- fieldState = FieldParser::extract(source, oldPlan, &bareOldPlan, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- if (fieldState == FieldParser::FIELD_SET) _oldPlan.reset(bareOldPlan);
-
- fieldState = FieldParser::extract(source, server, &_server, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isServerSet = fieldState == FieldParser::FIELD_SET;
-
- return true;
- }
-
- void TypeExplain::clear() {
- unsetClauses();
-
- _cursor.clear();
- _isCursorSet = false;
-
- _isMultiKey = false;
- _isIsMultiKeySet = false;
-
- _n = 0;
- _isNSet = false;
-
- _nScannedObjects = 0;
- _isNScannedObjectsSet = false;
-
- _nScanned = 0;
- _isNScannedSet = false;
-
- _nScannedObjectsAllPlans = 0;
- _isNScannedObjectsAllPlansSet = false;
-
- _nScannedAllPlans = 0;
- _isNScannedAllPlansSet = false;
-
- _scanAndOrder = false;
- _isScanAndOrderSet = false;
-
- _indexOnly = false;
- _isIndexOnlySet = false;
-
- _idHack = false;
- _isIDHackSet = false;
-
- _indexFilterApplied = false;
- _isIndexFilterAppliedSet = false;
-
- _nYields = 0;
- _isNYieldsSet = false;
-
- _nChunkSkips = 0;
- _isNChunkSkipsSet = false;
-
- _millis = 0;
- _isMillisSet = false;
-
- _indexBounds = BSONObj();
- _isIndexBoundsSet = false;
-
- unsetAllPlans();
-
- unsetOldPlan();
-
- _server.clear();
- _isServerSet = false;
-
- }
-
- void TypeExplain::cloneTo(TypeExplain* other) const {
- other->clear();
-
- other->unsetClauses();
- if (_clauses.get()) {
- for(std::vector<TypeExplain*>::const_iterator it = _clauses->begin();
- it != _clauses->end();
- ++it) {
- TypeExplain* clausesItem = new TypeExplain;
- (*it)->cloneTo(clausesItem);
- other->addToClauses(clausesItem);
- }
- }
-
- other->_cursor = _cursor;
- other->_isCursorSet = _isCursorSet;
-
- other->_isMultiKey = _isMultiKey;
- other->_isIsMultiKeySet = _isIsMultiKeySet;
-
- other->_n = _n;
- other->_isNSet = _isNSet;
-
- other->_nScannedObjects = _nScannedObjects;
- other->_isNScannedObjectsSet = _isNScannedObjectsSet;
-
- other->_nScanned = _nScanned;
- other->_isNScannedSet = _isNScannedSet;
-
- other->_nScannedObjectsAllPlans = _nScannedObjectsAllPlans;
- other->_isNScannedObjectsAllPlansSet = _isNScannedObjectsAllPlansSet;
-
- other->_nScannedAllPlans = _nScannedAllPlans;
- other->_isNScannedAllPlansSet = _isNScannedAllPlansSet;
-
- other->_scanAndOrder = _scanAndOrder;
- other->_isScanAndOrderSet = _isScanAndOrderSet;
-
- other->_indexOnly = _indexOnly;
- other->_isIndexOnlySet = _isIndexOnlySet;
-
- other->_idHack = _idHack;
- other->_isIDHackSet = _isIDHackSet;
-
- other->_indexFilterApplied = _indexFilterApplied;
- other->_isIndexFilterAppliedSet = _isIndexFilterAppliedSet;
-
- other->_nYields = _nYields;
- other->_isNYieldsSet = _isNYieldsSet;
-
- other->_nChunkSkips = _nChunkSkips;
- other->_isNChunkSkipsSet = _isNChunkSkipsSet;
-
- other->_millis = _millis;
- other->_isMillisSet = _isMillisSet;
-
- other->_indexBounds = _indexBounds;
- other->_isIndexBoundsSet = _isIndexBoundsSet;
-
- other->unsetAllPlans();
- if (_allPlans.get()) {
- for(std::vector<TypeExplain*>::const_iterator it = _allPlans->begin();
- it != _allPlans->end();
- ++it) {
- TypeExplain* allPlansItem = new TypeExplain;
- (*it)->cloneTo(allPlansItem);
- other->addToAllPlans(allPlansItem);
- }
- }
-
- other->unsetOldPlan();
- if (_oldPlan.get()) {
- TypeExplain* oldPlanCopy = new TypeExplain;
- _oldPlan->cloneTo(oldPlanCopy);
- other->setOldPlan(oldPlanCopy);
- }
-
- other->_server = _server;
- other->_isServerSet = _isServerSet;
- }
-
- std::string TypeExplain::toString() const {
- return toBSON().toString();
- }
-
- void TypeExplain::setClauses(const std::vector<TypeExplain*>& clauses) {
- unsetClauses();
- for(std::vector<TypeExplain*>::const_iterator it = clauses.begin();
- it != clauses.end();
- ++it) {
- TypeExplain* clausesItem = new TypeExplain;
- (*it)->cloneTo(clausesItem);
- addToClauses(clausesItem);
- }
- }
-
- void TypeExplain::addToClauses(TypeExplain* clauses) {
- if (_clauses.get() == NULL) {
- _clauses.reset(new std::vector<TypeExplain*>);
- }
- _clauses->push_back(clauses);
- }
-
- void TypeExplain::unsetClauses() {
- if (_clauses.get()) {
- for(std::vector<TypeExplain*>::const_iterator it = _clauses->begin();
- it != _clauses->end();
- ++it) {
- delete *it;
- }
- }
- _clauses.reset();
- }
-
- bool TypeExplain::isClausesSet() const {
- return _clauses.get() != NULL;
- }
-
- size_t TypeExplain::sizeClauses() const {
- verify(_clauses.get());
- return _clauses->size();
- }
-
- const std::vector<TypeExplain*>& TypeExplain::getClauses() const {
- verify(_clauses.get());
- return *_clauses;
- }
-
- const TypeExplain* TypeExplain::getClausesAt(size_t pos) const {
- verify(_clauses.get());
- verify(_clauses->size() > pos);
- return _clauses->at(pos);
- }
-
- void TypeExplain::setCursor(const StringData& cursor) {
- _cursor = cursor.toString();
- _isCursorSet = true;
- }
-
- void TypeExplain::unsetCursor() {
- _isCursorSet = false;
- }
-
- bool TypeExplain::isCursorSet() const {
- return _isCursorSet;
- }
-
- const std::string& TypeExplain::getCursor() const {
- verify(_isCursorSet);
- return _cursor;
- }
-
- void TypeExplain::setIsMultiKey(bool isMultiKey) {
- _isMultiKey = isMultiKey;
- _isIsMultiKeySet = true;
- }
-
- void TypeExplain::unsetIsMultiKey() {
- _isIsMultiKeySet = false;
- }
-
- bool TypeExplain::isIsMultiKeySet() const {
- return _isIsMultiKeySet;
- }
-
- bool TypeExplain::getIsMultiKey() const {
- verify(_isIsMultiKeySet);
- return _isMultiKey;
- }
-
- void TypeExplain::setN(long long n) {
- _n = n;
- _isNSet = true;
- }
-
- void TypeExplain::unsetN() {
- _isNSet = false;
- }
-
- bool TypeExplain::isNSet() const {
- return _isNSet;
- }
-
- long long TypeExplain::getN() const {
- verify(_isNSet);
- return _n;
- }
-
- void TypeExplain::setNScannedObjects(long long nScannedObjects) {
- _nScannedObjects = nScannedObjects;
- _isNScannedObjectsSet = true;
- }
-
- void TypeExplain::unsetNScannedObjects() {
- _isNScannedObjectsSet = false;
- }
-
- bool TypeExplain::isNScannedObjectsSet() const {
- return _isNScannedObjectsSet;
- }
-
- long long TypeExplain::getNScannedObjects() const {
- verify(_isNScannedObjectsSet);
- return _nScannedObjects;
- }
-
- void TypeExplain::setNScanned(long long nScanned) {
- _nScanned = nScanned;
- _isNScannedSet = true;
- }
-
- void TypeExplain::unsetNScanned() {
- _isNScannedSet = false;
- }
-
- bool TypeExplain::isNScannedSet() const {
- return _isNScannedSet;
- }
-
- long long TypeExplain::getNScanned() const {
- verify(_isNScannedSet);
- return _nScanned;
- }
-
- void TypeExplain::setNScannedObjectsAllPlans(long long nScannedObjectsAllPlans) {
- _nScannedObjectsAllPlans = nScannedObjectsAllPlans;
- _isNScannedObjectsAllPlansSet = true;
- }
-
- void TypeExplain::unsetNScannedObjectsAllPlans() {
- _isNScannedObjectsAllPlansSet = false;
- }
-
- bool TypeExplain::isNScannedObjectsAllPlansSet() const {
- return _isNScannedObjectsAllPlansSet;
- }
-
- long long TypeExplain::getNScannedObjectsAllPlans() const {
- verify(_isNScannedObjectsAllPlansSet);
- return _nScannedObjectsAllPlans;
- }
-
- void TypeExplain::setNScannedAllPlans(long long nScannedAllPlans) {
- _nScannedAllPlans = nScannedAllPlans;
- _isNScannedAllPlansSet = true;
- }
-
- void TypeExplain::unsetNScannedAllPlans() {
- _isNScannedAllPlansSet = false;
- }
-
- bool TypeExplain::isNScannedAllPlansSet() const {
- return _isNScannedAllPlansSet;
- }
-
- long long TypeExplain::getNScannedAllPlans() const {
- verify(_isNScannedAllPlansSet);
- return _nScannedAllPlans;
- }
-
- void TypeExplain::setScanAndOrder(bool scanAndOrder) {
- _scanAndOrder = scanAndOrder;
- _isScanAndOrderSet = true;
- }
-
- void TypeExplain::unsetScanAndOrder() {
- _isScanAndOrderSet = false;
- }
-
- bool TypeExplain::isScanAndOrderSet() const {
- return _isScanAndOrderSet;
- }
-
- bool TypeExplain::getScanAndOrder() const {
- verify(_isScanAndOrderSet);
- return _scanAndOrder;
- }
-
- void TypeExplain::setIndexOnly(bool indexOnly) {
- _indexOnly = indexOnly;
- _isIndexOnlySet = true;
- }
-
- void TypeExplain::unsetIndexOnly() {
- _isIndexOnlySet = false;
- }
-
- bool TypeExplain::isIndexOnlySet() const {
- return _isIndexOnlySet;
- }
-
- bool TypeExplain::getIndexOnly() const {
- verify(_isIndexOnlySet);
- return _indexOnly;
- }
-
- void TypeExplain::setIDHack(bool idhack) {
- _idHack = idhack;
- _isIDHackSet = true;
- }
-
- void TypeExplain::unsetIDHack() {
- _isIDHackSet = false;
- }
-
- bool TypeExplain::isIDHackSet() const {
- return _isIDHackSet;
- }
-
- bool TypeExplain::getIDHack() const {
- verify(_isIDHackSet);
- return _idHack;
- }
-
- void TypeExplain::setIndexFilterApplied(bool indexFilterApplied) {
- _indexFilterApplied = indexFilterApplied;
- _isIndexFilterAppliedSet = true;
- }
-
- void TypeExplain::unsetIndexFilterApplied() {
- _isIndexFilterAppliedSet = false;
- }
-
- bool TypeExplain::isIndexFilterAppliedSet() const {
- return _isIndexFilterAppliedSet;
- }
-
- bool TypeExplain::getIndexFilterApplied() const {
- verify(_isIndexFilterAppliedSet);
- return _indexFilterApplied;
- }
-
- void TypeExplain::setNYields(long long nYields) {
- _nYields = nYields;
- _isNYieldsSet = true;
- }
-
- void TypeExplain::unsetNYields() {
- _isNYieldsSet = false;
- }
-
- bool TypeExplain::isNYieldsSet() const {
- return _isNYieldsSet;
- }
-
- long long TypeExplain::getNYields() const {
- verify(_isNYieldsSet);
- return _nYields;
- }
-
- void TypeExplain::setNChunkSkips(long long nChunkSkips) {
- _nChunkSkips = nChunkSkips;
- _isNChunkSkipsSet = true;
- }
-
- void TypeExplain::unsetNChunkSkips() {
- _isNChunkSkipsSet = false;
- }
-
- bool TypeExplain::isNChunkSkipsSet() const {
- return _isNChunkSkipsSet;
- }
-
- long long TypeExplain::getNChunkSkips() const {
- verify(_isNChunkSkipsSet);
- return _nChunkSkips;
- }
-
- void TypeExplain::setMillis(long long millis) {
- _millis = millis;
- _isMillisSet = true;
- }
-
- void TypeExplain::unsetMillis() {
- _isMillisSet = false;
- }
-
- bool TypeExplain::isMillisSet() const {
- return _isMillisSet;
- }
-
- long long TypeExplain::getMillis() const {
- verify(_isMillisSet);
- return _millis;
- }
-
- void TypeExplain::setIndexBounds(const BSONObj& indexBounds) {
- _indexBounds = indexBounds.getOwned();
- _isIndexBoundsSet = true;
- }
-
- void TypeExplain::unsetIndexBounds() {
- _isIndexBoundsSet = false;
- }
-
- bool TypeExplain::isIndexBoundsSet() const {
- return _isIndexBoundsSet;
- }
-
- const BSONObj& TypeExplain::getIndexBounds() const {
- verify(_isIndexBoundsSet);
- return _indexBounds;
- }
-
- void TypeExplain::setAllPlans(const std::vector<TypeExplain*>& allPlans) {
- unsetAllPlans();
- for (std::vector<TypeExplain*>::const_iterator it = allPlans.begin();
- it != allPlans.end();
- ++it) {
- TypeExplain* allPlansItem = new TypeExplain;
- (*it)->cloneTo(allPlansItem);
- addToClauses(allPlansItem);
- }
- }
-
- void TypeExplain::addToAllPlans(TypeExplain* allPlans) {
- if (_allPlans.get() == NULL) {
- _allPlans.reset(new std::vector<TypeExplain*>);
- }
- _allPlans->push_back(allPlans);
- }
-
- void TypeExplain::unsetAllPlans() {
- if (_allPlans.get()) {
- for (std::vector<TypeExplain*>::const_iterator it = _allPlans->begin();
- it != _allPlans->end();
- ++it) {
- delete *it;
- }
- _allPlans.reset();
- }
- }
-
- bool TypeExplain::isAllPlansSet() const {
- return _allPlans.get() != NULL;
- }
-
- size_t TypeExplain::sizeAllPlans() const {
- verify(_allPlans.get());
- return _allPlans->size();
- }
-
- const std::vector<TypeExplain*>& TypeExplain::getAllPlans() const {
- verify(_allPlans.get());
- return *_allPlans;
- }
-
- const TypeExplain* TypeExplain::getAllPlansAt(size_t pos) const {
- verify(_allPlans.get());
- verify(_allPlans->size() > pos);
- return _allPlans->at(pos);
- }
-
- void TypeExplain::setOldPlan(TypeExplain* oldPlan) {
- _oldPlan.reset(oldPlan);
- }
-
- void TypeExplain::unsetOldPlan() {
- _oldPlan.reset();
- }
-
- bool TypeExplain::isOldPlanSet() const {
- return _oldPlan.get() != NULL;
- }
-
- const TypeExplain* TypeExplain::getOldPlan() const {
- verify(_oldPlan.get());
- return _oldPlan.get();
- }
-
- void TypeExplain::setServer(const StringData& server) {
- _server = server.toString();
- _isServerSet = true;
- }
-
- void TypeExplain::unsetServer() {
- _isServerSet = false;
- }
-
- bool TypeExplain::isServerSet() const {
- return _isServerSet;
- }
-
- const std::string& TypeExplain::getServer() const {
- verify(_isServerSet);
- return _server;
- }
-
-} // namespace mongo
diff --git a/src/mongo/db/query/type_explain.h b/src/mongo/db/query/type_explain.h
deleted file mode 100644
index 6cca71f2bf0..00000000000
--- a/src/mongo/db/query/type_explain.h
+++ /dev/null
@@ -1,292 +0,0 @@
-/**
- * Copyright (C) 2013 10gen Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License, version 3,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the GNU Affero General Public License in all respects
- * for all of the code used other than as permitted herein. If you modify
- * file(s) with this exception, you may extend this exception to your
- * version of the file(s), but you are not obligated to do so. If you do not
- * wish to do so, delete this exception statement from your version. If you
- * delete this exception statement from all source files in the program,
- * then also delete it in the license file.
- */
-
-// THIS FILE IS DEPRECATED -- the old explain implementation is being replaced
-
-#pragma once
-
-#include <string>
-
-#include "mongo/base/string_data.h"
-#include "mongo/bson/bson_field.h"
-#include "mongo/s/bson_serializable.h"
-
-namespace mongo {
-
- /**
- * Contains query debug information that describes the
- * query plan. Generally this information depends only on
- * the planning process that happens without running the
- * query. The exception is the multi plan runner, in which
- * case plan selection depends on actually running the query.
- *
- * Currently, just a summary std::string describing the plan
- * used to run the query.
- */
- struct PlanInfo {
- PlanInfo() : planSummary("") { }
- std::string planSummary;
- };
-
- /**
- * This class represents the layout and content of a TypeExplain runCommand,
- * the response side.
- */
- class TypeExplain : public BSONSerializable {
- MONGO_DISALLOW_COPYING(TypeExplain);
- public:
-
- //
- // schema declarations
- //
-
- static const BSONField<std::vector<TypeExplain*> > clauses;
- static const BSONField<std::string> cursor;
- static const BSONField<bool> isMultiKey;
- static const BSONField<long long> n;
- static const BSONField<long long> nScannedObjects;
- static const BSONField<long long> nScanned;
- static const BSONField<long long> nScannedObjectsAllPlans;
- static const BSONField<long long> nScannedAllPlans;
- static const BSONField<bool> scanAndOrder;
- static const BSONField<bool> indexOnly;
- static const BSONField<long long> nYields;
- static const BSONField<long long> nChunkSkips;
- static const BSONField<long long> millis;
- static const BSONField<BSONObj> indexBounds;
- static const BSONField<std::vector<TypeExplain*> > allPlans;
- static const BSONField<TypeExplain*> oldPlan;
- static const BSONField<bool> indexFilterApplied;
- static const BSONField<std::string> server;
-
- //
- // construction / destruction
- //
-
- TypeExplain();
- virtual ~TypeExplain();
-
- /** Copies all the fields present in 'this' to 'other'. */
- void cloneTo(TypeExplain* other) const;
-
- //
- // bson serializable interface implementation
- //
-
- virtual bool isValid(std::string* errMsg) const;
- virtual BSONObj toBSON() const;
- virtual bool parseBSON(const BSONObj& source, std::string* errMsg);
- virtual void clear();
- virtual std::string toString() const;
-
- //
- // individual field accessors
- //
-
- void setClauses(const std::vector<TypeExplain*>& clauses);
- void addToClauses(TypeExplain* clauses);
- void unsetClauses();
- bool isClausesSet() const;
- size_t sizeClauses() const;
- const std::vector<TypeExplain*>& getClauses() const;
- const TypeExplain* getClausesAt(size_t pos) const;
-
- void setCursor(const StringData& cursor);
- void unsetCursor();
- bool isCursorSet() const;
- const std::string& getCursor() const;
-
- void setIsMultiKey(bool isMultiKey);
- void unsetIsMultiKey();
- bool isIsMultiKeySet() const;
- bool getIsMultiKey() const;
-
- void setN(long long n);
- void unsetN();
- bool isNSet() const;
- long long getN() const;
-
- void setNScannedObjects(long long nScannedObjects);
- void unsetNScannedObjects();
- bool isNScannedObjectsSet() const;
- long long getNScannedObjects() const;
-
- void setNScanned(long long nScanned);
- void unsetNScanned();
- bool isNScannedSet() const;
- long long getNScanned() const;
-
- void setNScannedObjectsAllPlans(long long nScannedObjectsAllPlans);
- void unsetNScannedObjectsAllPlans();
- bool isNScannedObjectsAllPlansSet() const;
- long long getNScannedObjectsAllPlans() const;
-
- void setNScannedAllPlans(long long nScannedAllPlans);
- void unsetNScannedAllPlans();
- bool isNScannedAllPlansSet() const;
- long long getNScannedAllPlans() const;
-
- void setScanAndOrder(bool scanAndOrder);
- void unsetScanAndOrder();
- bool isScanAndOrderSet() const;
- bool getScanAndOrder() const;
-
- void setIndexOnly(bool indexOnly);
- void unsetIndexOnly();
- bool isIndexOnlySet() const;
- bool getIndexOnly() const;
-
- void setIDHack(bool idhack);
- void unsetIDHack();
- bool isIDHackSet() const;
- bool getIDHack() const;
-
- void setIndexFilterApplied(bool indexFilterApplied);
- void unsetIndexFilterApplied();
- bool isIndexFilterAppliedSet() const;
- bool getIndexFilterApplied() const;
-
- void setNYields(long long nYields);
- void unsetNYields();
- bool isNYieldsSet() const;
- long long getNYields() const;
-
- void setNChunkSkips(long long nChunkSkips);
- void unsetNChunkSkips();
- bool isNChunkSkipsSet() const;
- long long getNChunkSkips() const;
-
- void setMillis(long long millis);
- void unsetMillis();
- bool isMillisSet() const;
- long long getMillis() const;
-
- void setIndexBounds(const BSONObj& indexBounds);
- void unsetIndexBounds();
- bool isIndexBoundsSet() const;
- const BSONObj& getIndexBounds() const;
-
- void setAllPlans(const std::vector<TypeExplain*>& allPlans);
- void addToAllPlans(TypeExplain* allPlans);
- void unsetAllPlans();
- bool isAllPlansSet() const;
- size_t sizeAllPlans() const;
- const std::vector<TypeExplain*>& getAllPlans() const;
- const TypeExplain* getAllPlansAt(size_t pos) const;
-
- void setOldPlan(TypeExplain* oldPlan);
- void unsetOldPlan();
- bool isOldPlanSet() const;
- const TypeExplain* getOldPlan() const;
-
- void setServer(const StringData& server);
- void unsetServer();
- bool isServerSet() const;
- const std::string& getServer() const;
-
- // Opaque stats object
- BSONObj stats;
-
- private:
- // Convention: (M)andatory, (O)ptional
-
- // (O) explain for branches on a $or query
- boost::scoped_ptr<std::vector<TypeExplain*> >_clauses;
-
- // (O) type and name of the cursor used on the leaf stage
- std::string _cursor;
- bool _isCursorSet;
-
- // (O) type and name of the cursor used on the leaf stage
- bool _isMultiKey;
- bool _isIsMultiKeySet;
-
- // (M) number of documents returned by the query
- long long _n;
- bool _isNSet;
-
- // (M) number of documents fetched entirely from the disk
- long long _nScannedObjects;
- bool _isNScannedObjectsSet;
-
- // (M) number of entries retrieved either from an index or collection
- long long _nScanned;
- bool _isNScannedSet;
-
- // (O) number of documents fetched entirely from the disk across all plans
- long long _nScannedObjectsAllPlans;
- bool _isNScannedObjectsAllPlansSet;
-
- // (O) number of entries retrieved either from an index or collection across all plans
- long long _nScannedAllPlans;
- bool _isNScannedAllPlansSet;
-
- // (O) whether this plan involved sorting
- bool _scanAndOrder;
- bool _isScanAndOrderSet;
-
- // (O) number of entries retrieved either from an index or collection across all plans
- bool _indexOnly;
- bool _isIndexOnlySet;
-
- // (O) whether the idhack was used to answer this query
- bool _idHack;
- bool _isIDHackSet;
-
- // (O) whether index filters were used in planning this query
- bool _indexFilterApplied;
- bool _isIndexFilterAppliedSet;
-
- // (O) number times this plan released and reacquired its lock
- long long _nYields;
- bool _isNYieldsSet;
-
- // (O) number times this plan skipped over migrated data
- long long _nChunkSkips;
- bool _isNChunkSkipsSet;
-
- // (O) elapsed time this plan took running, in milliseconds
- long long _millis;
- bool _isMillisSet;
-
- // (O) keys used to seek in and out of an index
- BSONObj _indexBounds;
- bool _isIndexBoundsSet;
-
- // (O) alternative plans considered
- boost::scoped_ptr<std::vector<TypeExplain*> > _allPlans;
-
- // (O) cached plan for this query
- boost::scoped_ptr<TypeExplain> _oldPlan;
-
- // (O) server's host:port against which the query ran
- std::string _server;
- bool _isServerSet;
- };
-
-} // namespace mongo
diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp
index 48686d3c621..a14d3e620b3 100644
--- a/src/mongo/dbtests/querytests.cpp
+++ b/src/mongo/dbtests/querytests.cpp
@@ -632,8 +632,9 @@ namespace QueryTests {
// Check number of results and filterSet flag in explain.
// filterSet is not available in oplog replay mode.
BSONObj explainObj = c->next();
- ASSERT_EQUALS( 1, explainObj.getIntField( "n" ) );
- ASSERT_FALSE( explainObj.hasField( "filterSet" ) );
+ ASSERT( explainObj.hasField("executionStats") );
+ BSONObj execStats = explainObj["executionStats"].Obj();
+ ASSERT_EQUALS( 1, execStats.getIntField( "nReturned" ) );
ASSERT( !c->more() );
}
@@ -973,26 +974,6 @@ namespace QueryTests {
checkMatch();
_client.ensureIndex( _ns, BSON( "a" << 1 ) );
checkMatch();
- // Use explain queries to check index bounds.
- {
- BSONObj explain = _client.findOne( _ns, QUERY( "a" << BSON( "$type" << (int)Code ) ).explain() );
- BSONObjBuilder lower;
- lower.appendCode( "", "" );
- BSONObjBuilder upper;
- upper.appendCodeWScope( "", "", BSONObj() );
- ASSERT( lower.done().firstElement().valuesEqual( explain[ "indexBounds" ].Obj()[ "a" ].Array()[ 0 ].Array()[ 0 ] ) );
- ASSERT( upper.done().firstElement().valuesEqual( explain[ "indexBounds" ].Obj()[ "a" ].Array()[ 0 ].Array()[ 1 ] ) );
- }
- {
- BSONObj explain = _client.findOne( _ns, QUERY( "a" << BSON( "$type" << (int)CodeWScope ) ).explain() );
- BSONObjBuilder lower;
- lower.appendCodeWScope( "", "", BSONObj() );
- // This upper bound may change if a new bson type is added.
- BSONObjBuilder upper;
- upper << "" << BSON( "$maxElement" << 1 );
- ASSERT( lower.done().firstElement().valuesEqual( explain[ "indexBounds" ].Obj()[ "a" ].Array()[ 0 ].Array()[ 0 ] ) );
- ASSERT( upper.done().firstElement().valuesEqual( explain[ "indexBounds" ].Obj()[ "a" ].Array()[ 0 ].Array()[ 1 ] ) );
- }
}
private:
void checkMatch() {
diff --git a/src/mongo/s/strategy.cpp b/src/mongo/s/strategy.cpp
index 7ef2cf0c40b..34d05825598 100644
--- a/src/mongo/s/strategy.cpp
+++ b/src/mongo/s/strategy.cpp
@@ -171,7 +171,7 @@ namespace mongo {
if ( qSpec.isExplain() ) {
BSONObjBuilder explain_builder;
cursor->explain( explain_builder );
- explain_builder.appendNumber( "millis",
+ explain_builder.appendNumber( "executionTimeMillis",
static_cast<long long>(queryTimer.millis()) );
BSONObj b = explain_builder.obj();
diff --git a/src/mongo/shell/shardingtest.js b/src/mongo/shell/shardingtest.js
index b3c0a1171ca..1c8241ec392 100644
--- a/src/mongo/shell/shardingtest.js
+++ b/src/mongo/shell/shardingtest.js
@@ -874,21 +874,21 @@ ShardingTest.prototype.getShard = function( coll, query, includeEmpty ){
ShardingTest.prototype.getShards = function( coll, query, includeEmpty ){
if( ! coll.getDB )
coll = this.s.getCollection( coll )
-
+
var explain = coll.find( query ).explain()
var shards = []
-
+
if( explain.shards ){
-
- for( var shardName in explain.shards ){
+ for( var shardName in explain.shards ){
for( var i = 0; i < explain.shards[shardName].length; i++ ){
- if( includeEmpty || ( explain.shards[shardName][i].n && explain.shards[shardName][i].n > 0 ) )
+ var hasResults = explain.shards[shardName][i].executionStats.nReturned &&
+ explain.shards[shardName][i].executionStats.nReturned > 0;
+ if( includeEmpty || hasResults )
shards.push( shardName )
}
}
-
}
-
+
for( var i = 0; i < shards.length; i++ ){
for( var j = 0; j < this._connections.length; j++ ){
if ( connectionURLTheSame( this._connections[j] , shards[i] ) ){
@@ -897,7 +897,7 @@ ShardingTest.prototype.getShards = function( coll, query, includeEmpty ){
}
}
}
-
+
return shards
}