summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/mongo/db/catalog/index_catalog.cpp6
-rw-r--r--src/mongo/db/catalog/index_create.cpp2
-rw-r--r--src/mongo/db/commands/collection_to_capped.cpp10
-rw-r--r--src/mongo/db/commands/count.cpp4
-rw-r--r--src/mongo/db/commands/dbhash.cpp6
-rw-r--r--src/mongo/db/commands/distinct.cpp4
-rw-r--r--src/mongo/db/commands/find_and_modify.cpp4
-rw-r--r--src/mongo/db/commands/geo_near_cmd.cpp2
-rw-r--r--src/mongo/db/commands/group.cpp4
-rw-r--r--src/mongo/db/commands/mr.cpp6
-rw-r--r--src/mongo/db/commands/pipeline_command.cpp2
-rw-r--r--src/mongo/db/commands/test_commands.cpp4
-rw-r--r--src/mongo/db/commands/validate.cpp1
-rw-r--r--src/mongo/db/db.cpp6
-rw-r--r--src/mongo/db/dbcommands.cpp10
-rw-r--r--src/mongo/db/dbhelpers.cpp26
-rw-r--r--src/mongo/db/exec/plan_stage.h2
-rw-r--r--src/mongo/db/exec/stagedebug_cmd.cpp2
-rw-r--r--src/mongo/db/fts/fts_command_mongod.cpp2
-rw-r--r--src/mongo/db/index/haystack_access_method.cpp4
-rw-r--r--src/mongo/db/ops/delete_executor.cpp4
-rw-r--r--src/mongo/db/ops/update.cpp8
-rw-r--r--src/mongo/db/pipeline/document_source.h2
-rw-r--r--src/mongo/db/pipeline/document_source_cursor.cpp10
-rw-r--r--src/mongo/db/query/explain_plan.cpp2
-rw-r--r--src/mongo/db/query/explain_plan.h2
-rw-r--r--src/mongo/db/query/new_find.cpp32
-rw-r--r--src/mongo/db/query/new_find.h1
-rw-r--r--src/mongo/db/query/plan_cache.h8
-rw-r--r--src/mongo/db/query/plan_executor.cpp14
-rw-r--r--src/mongo/db/query/plan_executor.h52
-rw-r--r--src/mongo/db/query/query_planner_params.h6
-rw-r--r--src/mongo/db/query/runner.h179
-rw-r--r--src/mongo/db/query/type_explain.cpp2
-rw-r--r--src/mongo/db/query/type_explain.h2
-rw-r--r--src/mongo/db/repl/master_slave.cpp12
-rw-r--r--src/mongo/db/repl/repl_info.cpp4
-rw-r--r--src/mongo/db/repl/rs_rollback.cpp6
-rw-r--r--src/mongo/dbtests/executor_registry.cpp26
-rw-r--r--src/mongo/dbtests/query_multi_plan_runner.cpp2
-rw-r--r--src/mongo/dbtests/query_plan_executor.cpp16
-rw-r--r--src/mongo/dbtests/query_stage_collscan.cpp6
-rw-r--r--src/mongo/dbtests/query_stage_merge_sort.cpp32
-rw-r--r--src/mongo/dbtests/query_stage_sort.cpp8
-rw-r--r--src/mongo/dbtests/query_stage_tests.cpp2
-rw-r--r--src/mongo/s/d_migrate.cpp2
-rw-r--r--src/mongo/s/d_split.cpp12
47 files changed, 213 insertions, 346 deletions
diff --git a/src/mongo/db/catalog/index_catalog.cpp b/src/mongo/db/catalog/index_catalog.cpp
index edb6219f607..a0587ef0c9d 100644
--- a/src/mongo/db/catalog/index_catalog.cpp
+++ b/src/mongo/db/catalog/index_catalog.cpp
@@ -227,8 +227,8 @@ namespace mongo {
db->getCollection(txn, db->_indexesName)));
BSONObj index;
- Runner::RunnerState state;
- while ( Runner::RUNNER_ADVANCED == (state = exec->getNext(&index, NULL)) ) {
+ PlanExecutor::ExecState state;
+ while ( PlanExecutor::ADVANCED == (state = exec->getNext(&index, NULL)) ) {
const BSONObj key = index.getObjectField("key");
const string plugin = IndexNames::findPluginName(key);
if ( IndexNames::existedBefore24(plugin) )
@@ -243,7 +243,7 @@ namespace mongo {
return Status( ErrorCodes::CannotCreateIndex, errmsg );
}
- if ( Runner::RUNNER_EOF != state ) {
+ if ( PlanExecutor::IS_EOF != state ) {
warning() << "Internal error while reading system.indexes collection";
}
diff --git a/src/mongo/db/catalog/index_create.cpp b/src/mongo/db/catalog/index_create.cpp
index 96bf25f1c94..b369b98ca61 100644
--- a/src/mongo/db/catalog/index_create.cpp
+++ b/src/mongo/db/catalog/index_create.cpp
@@ -114,7 +114,7 @@ namespace mongo {
BSONObj js;
DiskLoc loc;
- while (Runner::RUNNER_ADVANCED == exec->getNext(&js, &loc)) {
+ while (PlanExecutor::ADVANCED == exec->getNext(&js, &loc)) {
try {
if ( !dupsAllowed && dropDups ) {
LastError::Disabled led( lastError.get() );
diff --git a/src/mongo/db/commands/collection_to_capped.cpp b/src/mongo/db/commands/collection_to_capped.cpp
index aaa1674a40a..969326141db 100644
--- a/src/mongo/db/commands/collection_to_capped.cpp
+++ b/src/mongo/db/commands/collection_to_capped.cpp
@@ -91,17 +91,17 @@ namespace mongo {
while ( true ) {
BSONObj obj;
- Runner::RunnerState state = exec->getNext(&obj, NULL);
+ PlanExecutor::ExecState state = exec->getNext(&obj, NULL);
switch( state ) {
- case Runner::RUNNER_EOF:
+ case PlanExecutor::IS_EOF:
return Status::OK();
- case Runner::RUNNER_DEAD:
+ case PlanExecutor::DEAD:
db->dropCollection( txn, toNs );
return Status( ErrorCodes::InternalError, "executor turned dead while iterating" );
- case Runner::RUNNER_ERROR:
+ case PlanExecutor::EXEC_ERROR:
return Status( ErrorCodes::InternalError, "executor error while iterating" );
- case Runner::RUNNER_ADVANCED:
+ case PlanExecutor::ADVANCED:
if ( excessSize > 0 ) {
excessSize -= ( 4 * obj.objsize() ); // 4x is for padding, power of 2, etc...
continue;
diff --git a/src/mongo/db/commands/count.cpp b/src/mongo/db/commands/count.cpp
index 306af80f311..d49203cedfb 100644
--- a/src/mongo/db/commands/count.cpp
+++ b/src/mongo/db/commands/count.cpp
@@ -116,8 +116,8 @@ namespace mongo {
ScopedExecutorRegistration safety(exec.get());
long long count = 0;
- Runner::RunnerState state;
- while (Runner::RUNNER_ADVANCED == (state = exec->getNext(NULL, NULL))) {
+ PlanExecutor::ExecState state;
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(NULL, NULL))) {
if (skip > 0) {
--skip;
}
diff --git a/src/mongo/db/commands/dbhash.cpp b/src/mongo/db/commands/dbhash.cpp
index 2c5e5cee6f9..945a9e8ebb2 100644
--- a/src/mongo/db/commands/dbhash.cpp
+++ b/src/mongo/db/commands/dbhash.cpp
@@ -107,14 +107,14 @@ namespace mongo {
md5_init(&st);
long long n = 0;
- Runner::RunnerState state;
+ PlanExecutor::ExecState state;
BSONObj c;
verify(NULL != exec.get());
- while (Runner::RUNNER_ADVANCED == (state = exec->getNext(&c, NULL))) {
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(&c, NULL))) {
md5_append( &st , (const md5_byte_t*)c.objdata() , c.objsize() );
n++;
}
- if (Runner::RUNNER_EOF != state) {
+ if (PlanExecutor::IS_EOF != state) {
warning() << "error while hashing, db dropped? ns=" << fullCollectionName << endl;
}
md5digest d;
diff --git a/src/mongo/db/commands/distinct.cpp b/src/mongo/db/commands/distinct.cpp
index 27769ffcfee..de6ce85b6c3 100644
--- a/src/mongo/db/commands/distinct.cpp
+++ b/src/mongo/db/commands/distinct.cpp
@@ -122,8 +122,8 @@ namespace mongo {
const ScopedExecutorRegistration safety(exec.get());
BSONObj obj;
- Runner::RunnerState state;
- while (Runner::RUNNER_ADVANCED == (state = exec->getNext(&obj, NULL))) {
+ PlanExecutor::ExecState state;
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
// Distinct expands arrays.
//
// If our query is covered, each value of the key should be in the index key and
diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp
index ea90af2cb66..13ec1680ee2 100644
--- a/src/mongo/db/commands/find_and_modify.cpp
+++ b/src/mongo/db/commands/find_and_modify.cpp
@@ -161,8 +161,8 @@ namespace mongo {
// state and may continue doing that with document-level locking (approach is TBD).
const ScopedExecutorRegistration safety(exec.get());
- Runner::RunnerState state;
- if (Runner::RUNNER_ADVANCED == (state = exec->getNext(&doc, NULL))) {
+ PlanExecutor::ExecState state;
+ if (PlanExecutor::ADVANCED == (state = exec->getNext(&doc, NULL))) {
found = true;
}
}
diff --git a/src/mongo/db/commands/geo_near_cmd.cpp b/src/mongo/db/commands/geo_near_cmd.cpp
index ddb89a4f48a..28e9e802e23 100644
--- a/src/mongo/db/commands/geo_near_cmd.cpp
+++ b/src/mongo/db/commands/geo_near_cmd.cpp
@@ -201,7 +201,7 @@ namespace mongo {
BSONObj currObj;
int results = 0;
- while ((results < numWanted) && Runner::RUNNER_ADVANCED == exec->getNext(&currObj, NULL)) {
+ while ((results < numWanted) && PlanExecutor::ADVANCED == exec->getNext(&currObj, NULL)) {
// Come up with the correct distance.
double dist = currObj["$dis"].number() * distanceMultiplier;
diff --git a/src/mongo/db/commands/group.cpp b/src/mongo/db/commands/group.cpp
index 9bfa3259a6f..cd9f98fce18 100644
--- a/src/mongo/db/commands/group.cpp
+++ b/src/mongo/db/commands/group.cpp
@@ -156,8 +156,8 @@ namespace mongo {
const ScopedExecutorRegistration safety(exec.get());
BSONObj obj;
- Runner::RunnerState state;
- while (Runner::RUNNER_ADVANCED == (state = exec->getNext(&obj, NULL))) {
+ PlanExecutor::ExecState state;
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
BSONObj key = getKey(obj , keyPattern , keyFunction , keysize / keynum,
s.get() );
keysize += key.objsize();
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index f2501f99e80..8a902824482 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -1002,8 +1002,8 @@ namespace mongo {
// iterate over all sorted objects
BSONObj o;
- Runner::RunnerState state;
- while (Runner::RUNNER_ADVANCED == (state = exec->getNext(&o, NULL))) {
+ PlanExecutor::ExecState state;
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(&o, NULL))) {
pm.hit();
if ( o.woSortOrder( prev , sortKey ) == 0 ) {
@@ -1353,7 +1353,7 @@ namespace mongo {
// go through each doc
BSONObj o;
- while (Runner::RUNNER_ADVANCED == exec->getNext(&o, NULL)) {
+ while (PlanExecutor::ADVANCED == exec->getNext(&o, NULL)) {
// check to see if this is a new object we don't own yet
// because of a chunk migration
if ( collMetadata ) {
diff --git a/src/mongo/db/commands/pipeline_command.cpp b/src/mongo/db/commands/pipeline_command.cpp
index de49bc61d1c..36a9a7c75a8 100644
--- a/src/mongo/db/commands/pipeline_command.cpp
+++ b/src/mongo/db/commands/pipeline_command.cpp
@@ -224,7 +224,7 @@ namespace {
for (int objCount = 0; objCount < batchSize; objCount++) {
// The initial getNext() on a PipelineProxyStage may be very expensive so we don't
// do it when batchSize is 0 since that indicates a desire for a fast return.
- if (exec->getNext(&next, NULL) != Runner::RUNNER_ADVANCED) {
+ if (exec->getNext(&next, NULL) != PlanExecutor::ADVANCED) {
if (pin) pin->deleteUnderlying();
// make it an obvious error to use cursor or executor after this point
cursor = NULL;
diff --git a/src/mongo/db/commands/test_commands.cpp b/src/mongo/db/commands/test_commands.cpp
index fb5f38c7897..97a61695193 100644
--- a/src/mongo/db/commands/test_commands.cpp
+++ b/src/mongo/db/commands/test_commands.cpp
@@ -157,8 +157,8 @@ namespace mongo {
DiskLoc end;
// We remove 'n' elements so the start is one past that
for( int i = 0; i < n + 1; ++i ) {
- Runner::RunnerState state = exec->getNext(NULL, &end);
- massert( 13418, "captrunc invalid n", Runner::RUNNER_ADVANCED == state);
+ PlanExecutor::ExecState state = exec->getNext(NULL, &end);
+ massert( 13418, "captrunc invalid n", PlanExecutor::ADVANCED == state);
}
collection->temp_cappedTruncateAfter( txn, end, inc );
ctx.commit();
diff --git a/src/mongo/db/commands/validate.cpp b/src/mongo/db/commands/validate.cpp
index 7f6a36f2ff5..91c44de6424 100644
--- a/src/mongo/db/commands/validate.cpp
+++ b/src/mongo/db/commands/validate.cpp
@@ -32,7 +32,6 @@
#include "mongo/db/commands.h"
#include "mongo/db/query/internal_plans.h"
-#include "mongo/db/query/runner.h"
#include "mongo/db/operation_context_impl.h"
#include "mongo/db/catalog/collection.h"
#include "mongo/util/log.h"
diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp
index 07f7417fada..62d4c37d861 100644
--- a/src/mongo/db/db.cpp
+++ b/src/mongo/db/db.cpp
@@ -368,8 +368,8 @@ namespace mongo {
auto_ptr<PlanExecutor> exec(
InternalPlanner::collectionScan(&txn, systemIndexes,coll));
BSONObj index;
- Runner::RunnerState state;
- while (Runner::RUNNER_ADVANCED == (state = exec->getNext(&index, NULL))) {
+ PlanExecutor::ExecState state;
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(&index, NULL))) {
const BSONObj key = index.getObjectField("key");
const string plugin = IndexNames::findPluginName(key);
@@ -394,7 +394,7 @@ namespace mongo {
}
}
- if (Runner::RUNNER_EOF != state) {
+ if (PlanExecutor::IS_EOF != state) {
warning() << "Internal error while reading collection " << systemIndexes;
}
diff --git a/src/mongo/db/dbcommands.cpp b/src/mongo/db/dbcommands.cpp
index 27105525e91..8ff1931a8a2 100644
--- a/src/mongo/db/dbcommands.cpp
+++ b/src/mongo/db/dbcommands.cpp
@@ -689,8 +689,8 @@ namespace mongo {
const ChunkVersion shardVersionAtStart = shardingState.getVersion(ns);
BSONObj obj;
- Runner::RunnerState state;
- while (Runner::RUNNER_ADVANCED == (state = exec->getNext(&obj, NULL))) {
+ PlanExecutor::ExecState state;
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
BSONElement ne = obj["n"];
verify(ne.isNumber());
int myn = ne.numberInt();
@@ -835,8 +835,8 @@ namespace mongo {
long long numObjects = 0;
DiskLoc loc;
- Runner::RunnerState state;
- while (Runner::RUNNER_ADVANCED == (state = exec->getNext(NULL, &loc))) {
+ PlanExecutor::ExecState state;
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(NULL, &loc))) {
if ( estimate )
size += avgObjSize;
else
@@ -851,7 +851,7 @@ namespace mongo {
}
}
- if (Runner::RUNNER_EOF != state) {
+ if (PlanExecutor::IS_EOF != state) {
warning() << "Internal error while reading " << ns << endl;
}
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index db0132d5aa0..d9923e9c1b1 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -118,9 +118,9 @@ namespace mongo {
getExecutor(txn, collection, cq, &rawExec, options).isOK());
auto_ptr<PlanExecutor> exec(rawExec);
- Runner::RunnerState state;
+ PlanExecutor::ExecState state;
DiskLoc loc;
- if (Runner::RUNNER_ADVANCED == (state = exec->getNext(NULL, &loc))) {
+ if (PlanExecutor::ADVANCED == (state = exec->getNext(NULL, &loc))) {
return loc;
}
return DiskLoc();
@@ -187,9 +187,9 @@ namespace mongo {
auto_ptr<PlanExecutor> exec(
InternalPlanner::collectionScan(txn, ns, context.db()->getCollection(txn, ns)));
- Runner::RunnerState state = exec->getNext(&result, NULL);
+ PlanExecutor::ExecState state = exec->getNext(&result, NULL);
context.getClient()->curop()->done();
- return Runner::RUNNER_ADVANCED == state;
+ return PlanExecutor::ADVANCED == state;
}
bool Helpers::getLast(OperationContext* txn, const char *ns, BSONObj& result) {
@@ -198,8 +198,8 @@ namespace mongo {
auto_ptr<PlanExecutor> exec(
InternalPlanner::collectionScan(txn, ns, coll, InternalPlanner::BACKWARD));
- Runner::RunnerState state = exec->getNext(&result, NULL);
- return Runner::RUNNER_ADVANCED == state;
+ PlanExecutor::ExecState state = exec->getNext(&result, NULL);
+ return PlanExecutor::ADVANCED == state;
}
void Helpers::upsert( OperationContext* txn,
@@ -368,20 +368,20 @@ namespace mongo {
DiskLoc rloc;
BSONObj obj;
- Runner::RunnerState state;
+ PlanExecutor::ExecState state;
// This may yield so we cannot touch nsd after this.
state = exec->getNext(&obj, &rloc);
exec.reset();
- if (Runner::RUNNER_EOF == state) { break; }
+ if (PlanExecutor::IS_EOF == state) { break; }
- if (Runner::RUNNER_DEAD == state) {
+ if (PlanExecutor::DEAD == state) {
warning() << "cursor died: aborting deletion for "
<< min << " to " << max << " in " << ns
<< endl;
break;
}
- if (Runner::RUNNER_ERROR == state) {
+ if (PlanExecutor::EXEC_ERROR == state) {
warning() << "cursor error while trying to delete "
<< min << " to " << max
<< " in " << ns << ": "
@@ -389,7 +389,7 @@ namespace mongo {
break;
}
- verify(Runner::RUNNER_ADVANCED == state);
+ verify(PlanExecutor::ADVANCED == state);
if ( onlyRemoveOrphanedDocs ) {
// Do a final check in the write lock to make absolutely sure that our
@@ -525,8 +525,8 @@ namespace mongo {
// already being queued and will be migrated in the 'transferMods' stage
DiskLoc loc;
- Runner::RunnerState state;
- while (Runner::RUNNER_ADVANCED == (state = exec->getNext(NULL, &loc))) {
+ PlanExecutor::ExecState state;
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(NULL, &loc))) {
if ( !isLargeChunk ) {
locs->insert( loc );
}
diff --git a/src/mongo/db/exec/plan_stage.h b/src/mongo/db/exec/plan_stage.h
index 4c8f27d3a08..bc5d701587d 100644
--- a/src/mongo/db/exec/plan_stage.h
+++ b/src/mongo/db/exec/plan_stage.h
@@ -46,7 +46,7 @@ namespace mongo {
*
* Stages have zero or more input streams but only one output stream. Data-accessing stages are
* leaves and data-transforming stages have children. Stages can be connected together to form
- * a tree which is then executed (see plan_runner.h) to solve a query.
+ * a tree which is then executed (see plan_executor.h) to solve a query.
*
* A stage's input and output are each typed. Only stages with compatible types can be
* connected.
diff --git a/src/mongo/db/exec/stagedebug_cmd.cpp b/src/mongo/db/exec/stagedebug_cmd.cpp
index 952b3b1e104..ea8fff78edd 100644
--- a/src/mongo/db/exec/stagedebug_cmd.cpp
+++ b/src/mongo/db/exec/stagedebug_cmd.cpp
@@ -145,7 +145,7 @@ namespace mongo {
BSONArrayBuilder resultBuilder(result.subarrayStart("results"));
- for (BSONObj obj; Runner::RUNNER_ADVANCED == runner.getNext(&obj, NULL); ) {
+ for (BSONObj obj; PlanExecutor::ADVANCED == runner.getNext(&obj, NULL); ) {
resultBuilder.append(obj);
}
diff --git a/src/mongo/db/fts/fts_command_mongod.cpp b/src/mongo/db/fts/fts_command_mongod.cpp
index 146d0159faf..610f2438edb 100644
--- a/src/mongo/db/fts/fts_command_mongod.cpp
+++ b/src/mongo/db/fts/fts_command_mongod.cpp
@@ -130,7 +130,7 @@ namespace mongo {
int numReturned = 0;
BSONObj obj;
- while (Runner::RUNNER_ADVANCED == exec->getNext(&obj, NULL)) {
+ while (PlanExecutor::ADVANCED == exec->getNext(&obj, NULL)) {
if ((resultSize + obj.objsize()) >= BSONObjMaxUserSize) {
break;
}
diff --git a/src/mongo/db/index/haystack_access_method.cpp b/src/mongo/db/index/haystack_access_method.cpp
index 8d2e72a474f..57a0ac6d4f0 100644
--- a/src/mongo/db/index/haystack_access_method.cpp
+++ b/src/mongo/db/index/haystack_access_method.cpp
@@ -102,9 +102,9 @@ namespace mongo {
scoped_ptr<PlanExecutor> exec(InternalPlanner::indexScan(txn, collection,
_descriptor, key, key, true));
- Runner::RunnerState state;
+ PlanExecutor::ExecState state;
DiskLoc loc;
- while (Runner::RUNNER_ADVANCED == (state = exec->getNext(NULL, &loc))) {
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(NULL, &loc))) {
if (hopper.limitReached()) { break; }
pair<unordered_set<DiskLoc, DiskLoc::Hasher>::iterator, bool> p
= thisPass.insert(loc);
diff --git a/src/mongo/db/ops/delete_executor.cpp b/src/mongo/db/ops/delete_executor.cpp
index 17b2123c8f1..3fdbf6c77f9 100644
--- a/src/mongo/db/ops/delete_executor.cpp
+++ b/src/mongo/db/ops/delete_executor.cpp
@@ -138,10 +138,10 @@ namespace mongo {
ScopedExecutorRegistration safety(exec.get());
DiskLoc rloc;
- Runner::RunnerState state;
+ PlanExecutor::ExecState state;
CurOp* curOp = _request->getOpCtx()->getCurOp();
int oldYieldCount = curOp->numYields();
- while (Runner::RUNNER_ADVANCED == (state = exec->getNext(NULL, &rloc))) {
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(NULL, &rloc))) {
if (oldYieldCount != curOp->numYields()) {
uassert(ErrorCodes::NotMaster,
str::stream() << "No longer primary while removing from " << ns.ns(),
diff --git a/src/mongo/db/ops/update.cpp b/src/mongo/db/ops/update.cpp
index 4ca1367465c..e0269ecd146 100644
--- a/src/mongo/db/ops/update.cpp
+++ b/src/mongo/db/ops/update.cpp
@@ -517,7 +517,7 @@ namespace mongo {
BSONObj oldObj;
// Get first doc, and location
- Runner::RunnerState state = Runner::RUNNER_ADVANCED;
+ PlanExecutor::ExecState state = PlanExecutor::ADVANCED;
uassert(ErrorCodes::NotMaster,
mongoutils::str::stream() << "Not primary while updating " << nsString.ns(),
@@ -530,15 +530,15 @@ namespace mongo {
DiskLoc loc;
state = exec->getNext(&oldObj, &loc);
- if (state != Runner::RUNNER_ADVANCED) {
- if (state == Runner::RUNNER_EOF) {
+ if (state != PlanExecutor::ADVANCED) {
+ if (state == PlanExecutor::IS_EOF) {
// We have reached the logical end of the loop, so do yielding recovery
break;
}
else {
uassertStatusOK(Status(ErrorCodes::InternalError,
str::stream() << " Update query failed -- "
- << Runner::statestr(state)));
+ << PlanExecutor::statestr(state)));
}
}
diff --git a/src/mongo/db/pipeline/document_source.h b/src/mongo/db/pipeline/document_source.h
index 5c34eae6fd5..4b6a8bf0291 100644
--- a/src/mongo/db/pipeline/document_source.h
+++ b/src/mongo/db/pipeline/document_source.h
@@ -420,7 +420,7 @@ namespace mongo {
long long _docsAddedToBatches; // for _limit enforcement
const std::string _ns;
- boost::shared_ptr<PlanExecutor> _exec; // PipelineRunner holds a weak_ptr to this.
+ boost::shared_ptr<PlanExecutor> _exec; // PipelineProxyStage holds a weak_ptr to this.
};
diff --git a/src/mongo/db/pipeline/document_source_cursor.cpp b/src/mongo/db/pipeline/document_source_cursor.cpp
index b037e9b7f06..ee44d7c2c0a 100644
--- a/src/mongo/db/pipeline/document_source_cursor.cpp
+++ b/src/mongo/db/pipeline/document_source_cursor.cpp
@@ -87,8 +87,8 @@ namespace mongo {
int memUsageBytes = 0;
BSONObj obj;
- Runner::RunnerState state;
- while ((state = _exec->getNext(&obj, NULL)) == Runner::RUNNER_ADVANCED) {
+ PlanExecutor::ExecState state;
+ while ((state = _exec->getNext(&obj, NULL)) == PlanExecutor::ADVANCED) {
if (_dependencies) {
_currentBatch.push_back(_dependencies->extractFields(obj));
}
@@ -117,13 +117,13 @@ namespace mongo {
_exec.reset();
uassert(16028, "collection or index disappeared when cursor yielded",
- state != Runner::RUNNER_DEAD);
+ state != PlanExecutor::DEAD);
uassert(17285, "cursor encountered an error: " + WorkingSetCommon::toStatusString(obj),
- state != Runner::RUNNER_ERROR);
+ state != PlanExecutor::EXEC_ERROR);
massert(17286, str::stream() << "Unexpected return from PlanExecutor::getNext: " << state,
- state == Runner::RUNNER_EOF || state == Runner::RUNNER_ADVANCED);
+ state == PlanExecutor::IS_EOF || state == PlanExecutor::ADVANCED);
}
void DocumentSourceCursor::setSource(DocumentSource *pSource) {
diff --git a/src/mongo/db/query/explain_plan.cpp b/src/mongo/db/query/explain_plan.cpp
index db9f7b82f40..0da9d51be53 100644
--- a/src/mongo/db/query/explain_plan.cpp
+++ b/src/mongo/db/query/explain_plan.cpp
@@ -26,6 +26,8 @@
* it in the license file.
*/
+// THIS FILE IS DEPRECATED -- the old explain implementation is being replaced
+
#include "mongo/db/query/explain_plan.h"
#include "mongo/db/query/stage_types.h"
diff --git a/src/mongo/db/query/explain_plan.h b/src/mongo/db/query/explain_plan.h
index 657ec3610a2..9353a610114 100644
--- a/src/mongo/db/query/explain_plan.h
+++ b/src/mongo/db/query/explain_plan.h
@@ -26,6 +26,8 @@
* it in the license file.
*/
+// THIS FILE IS DEPRECATED -- the old explain implementation is being replaced
+
#pragma once
#include "mongo/base/status.h"
diff --git a/src/mongo/db/query/new_find.cpp b/src/mongo/db/query/new_find.cpp
index 83efa22a5ab..6781ef3702f 100644
--- a/src/mongo/db/query/new_find.cpp
+++ b/src/mongo/db/query/new_find.cpp
@@ -242,8 +242,8 @@ namespace mongo {
exec->restoreState(txn);
BSONObj obj;
- Runner::RunnerState state;
- while (Runner::RUNNER_ADVANCED == (state = exec->getNext(&obj, NULL))) {
+ PlanExecutor::ExecState state;
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
// Add result to output buffer.
bb.appendBuf((void*)obj.objdata(), obj.objsize());
@@ -264,7 +264,7 @@ namespace mongo {
}
}
- if (Runner::RUNNER_EOF == state && 0 == numResults
+ if (PlanExecutor::IS_EOF == state && 0 == numResults
&& (queryOptions & QueryOption_CursorTailable)
&& (queryOptions & QueryOption_AwaitData) && (pass < 1000)) {
// If the cursor is tailable we don't kill it if it's eof. We let it try to get
@@ -283,9 +283,9 @@ namespace mongo {
// to getNext(...) might just return EOF).
bool saveClientCursor = false;
- if (Runner::RUNNER_DEAD == state || Runner::RUNNER_ERROR == state) {
+ if (PlanExecutor::DEAD == state || PlanExecutor::EXEC_ERROR == state) {
// Propagate this error to caller.
- if (Runner::RUNNER_ERROR == state) {
+ if (PlanExecutor::EXEC_ERROR == state) {
scoped_ptr<PlanStageStats> stats(exec->getStats());
error() << "Plan executor error, stats: "
<< statsToBSON(*stats);
@@ -306,12 +306,12 @@ namespace mongo {
resultFlags = ResultFlag_CursorNotFound;
}
}
- else if (Runner::RUNNER_EOF == state) {
+ else if (PlanExecutor::IS_EOF == state) {
// EOF is also end of the line unless it's tailable.
saveClientCursor = queryOptions & QueryOption_CursorTailable;
}
else {
- verify(Runner::RUNNER_ADVANCED == state);
+ verify(PlanExecutor::ADVANCED == state);
saveClientCursor = true;
}
@@ -321,7 +321,7 @@ namespace mongo {
cursorid = 0;
cc = NULL;
QLOG() << "getMore NOT saving client cursor, ended with state "
- << Runner::statestr(state)
+ << PlanExecutor::statestr(state)
<< endl;
}
else {
@@ -329,7 +329,7 @@ namespace mongo {
cc->incPos(numResults);
exec->saveState();
QLOG() << "getMore saving client cursor ended with state "
- << Runner::statestr(state)
+ << PlanExecutor::statestr(state)
<< endl;
// Possibly note slave's position in the oplog.
@@ -404,15 +404,15 @@ namespace mongo {
// The stage returns a DiskLoc of where to start.
DiskLoc startLoc;
- Runner::RunnerState state = exec->getNext(NULL, &startLoc);
+ PlanExecutor::ExecState state = exec->getNext(NULL, &startLoc);
// This is normal. The start of the oplog is the beginning of the collection.
- if (Runner::RUNNER_EOF == state) {
+ if (PlanExecutor::IS_EOF == state) {
return getExecutor(txn, collection, autoCq.release(), execOut);
}
// This is not normal. An error was encountered.
- if (Runner::RUNNER_ADVANCED != state) {
+ if (PlanExecutor::ADVANCED != state) {
return Status(ErrorCodes::InternalError,
"quick oplog start location had error...?");
}
@@ -650,7 +650,7 @@ namespace mongo {
auto_ptr<ScopedExecutorRegistration> safety(new ScopedExecutorRegistration(exec.get()));
BSONObj obj;
- Runner::RunnerState state;
+ PlanExecutor::ExecState state;
// uint64_t numMisplacedDocs = 0;
// Get summary info about which plan the executor is using.
@@ -658,7 +658,7 @@ namespace mongo {
Explain::getSummaryStats(exec.get(), &stats);
curop.debug().planSummary = stats.summaryStr.c_str();
- while (Runner::RUNNER_ADVANCED == (state = exec->getNext(&obj, NULL))) {
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
// Add result to output buffer. This is unnecessary if explain info is requested
if (!isExplain) {
bb.appendBuf((void*)obj.objdata(), obj.objsize());
@@ -710,7 +710,7 @@ namespace mongo {
safety.reset();
// Caller expects exceptions thrown in certain cases.
- if (Runner::RUNNER_ERROR == state) {
+ if (PlanExecutor::EXEC_ERROR == state) {
scoped_ptr<PlanStageStats> stats(exec->getStats());
error() << "Plan executor error, stats: "
<< statsToBSON(*stats);
@@ -718,7 +718,7 @@ namespace mongo {
}
// Why save a dead executor?
- if (Runner::RUNNER_DEAD == state) {
+ if (PlanExecutor::DEAD == state) {
saveClientCursor = false;
}
else if (pq.hasOption(QueryOption_CursorTailable)) {
diff --git a/src/mongo/db/query/new_find.h b/src/mongo/db/query/new_find.h
index 6050d90d633..c136cee8981 100644
--- a/src/mongo/db/query/new_find.h
+++ b/src/mongo/db/query/new_find.h
@@ -34,7 +34,6 @@
#include "mongo/db/curop.h"
#include "mongo/db/dbmessage.h"
#include "mongo/db/query/canonical_query.h"
-#include "mongo/db/query/runner.h"
#include "mongo/util/net/message.h"
namespace mongo {
diff --git a/src/mongo/db/query/plan_cache.h b/src/mongo/db/query/plan_cache.h
index 84dbab1fa34..bc89e9aa7da 100644
--- a/src/mongo/db/query/plan_cache.h
+++ b/src/mongo/db/query/plan_cache.h
@@ -46,7 +46,7 @@ namespace mongo {
struct QuerySolutionNode;
/**
- * When the CachedPlanRunner runs a cached query, it can provide feedback to the cache. This
+ * When the CachedPlanStage runs a cached query, it can provide feedback to the cache. This
* feedback is available to anyone who retrieves that query in the future.
*/
struct PlanCacheEntryFeedback {
@@ -253,7 +253,7 @@ namespace mongo {
// the other plans lost.
boost::scoped_ptr<PlanRankingDecision> decision;
- // Annotations from cached runs. The CachedSolutionRunner provides these stats about its
+ // Annotations from cached runs. The CachedPlanStage provides these stats about its
// runs when they complete.
std::vector<PlanCacheEntryFeedback*> feedback;
@@ -321,8 +321,8 @@ namespace mongo {
Status get(const CanonicalQuery& query, CachedSolution** crOut) const;
/**
- * When the CachedPlanRunner runs a plan out of the cache, we want to record data about the
- * plan's performance. The CachedPlanRunner calls feedback(...) at the end of query
+ * When the CachedPlanStage runs a plan out of the cache, we want to record data about the
+ * plan's performance. The CachedPlanStage calls feedback(...) at the end of query
* execution in order to do this.
*
* Cache takes ownership of 'feedback'.
diff --git a/src/mongo/db/query/plan_executor.cpp b/src/mongo/db/query/plan_executor.cpp
index a449dc52372..9df91a83b77 100644
--- a/src/mongo/db/query/plan_executor.cpp
+++ b/src/mongo/db/query/plan_executor.cpp
@@ -124,8 +124,8 @@ namespace mongo {
if (!_killed) { _root->invalidate(dl, type); }
}
- Runner::RunnerState PlanExecutor::getNext(BSONObj* objOut, DiskLoc* dlOut) {
- if (_killed) { return Runner::RUNNER_DEAD; }
+ PlanExecutor::ExecState PlanExecutor::getNext(BSONObj* objOut, DiskLoc* dlOut) {
+ if (_killed) { return PlanExecutor::DEAD; }
for (;;) {
WorkingSetID id = WorkingSet::INVALID_ID;
@@ -136,7 +136,7 @@ namespace mongo {
if (WorkingSet::INVALID_ID == id) {
invariant(NULL == objOut);
invariant(NULL == dlOut);
- return Runner::RUNNER_ADVANCED;
+ return PlanExecutor::ADVANCED;
}
WorkingSetMember* member = _workingSet->get(id);
@@ -173,7 +173,7 @@ namespace mongo {
if (hasRequestedData) {
_workingSet->free(id);
- return Runner::RUNNER_ADVANCED;
+ return PlanExecutor::ADVANCED;
}
// This result didn't have the data the caller wanted, try again.
}
@@ -181,17 +181,17 @@ namespace mongo {
// Fall through to yield check at end of large conditional.
}
else if (PlanStage::IS_EOF == code) {
- return Runner::RUNNER_EOF;
+ return PlanExecutor::IS_EOF;
}
else if (PlanStage::DEAD == code) {
- return Runner::RUNNER_DEAD;
+ return PlanExecutor::DEAD;
}
else {
verify(PlanStage::FAILURE == code);
if (NULL != objOut) {
WorkingSetCommon::getStatusMemberObject(*_workingSet, id, objOut);
}
- return Runner::RUNNER_ERROR;
+ return PlanExecutor::EXEC_ERROR;
}
}
}
diff --git a/src/mongo/db/query/plan_executor.h b/src/mongo/db/query/plan_executor.h
index 87c8aaa285f..8a6104edf79 100644
--- a/src/mongo/db/query/plan_executor.h
+++ b/src/mongo/db/query/plan_executor.h
@@ -31,17 +31,18 @@
#include <boost/scoped_ptr.hpp>
#include "mongo/base/status.h"
-#include "mongo/db/query/runner.h"
+#include "mongo/db/invalidation_type.h"
#include "mongo/db/query/query_solution.h"
namespace mongo {
class BSONObj;
+ class Collection;
class DiskLoc;
class PlanStage;
+ class PlanExecutor;
struct PlanStageStats;
class WorkingSet;
- class PlanExecutor;
/**
* RAII approach to ensuring that plan executors are deregistered.
@@ -66,11 +67,50 @@ namespace mongo {
* The executor is usually part of a larger abstraction that is interacting with the cache
* and/or the query optimizer.
*
- * Executes a plan. Used by a runner. Calls work() on a plan until a result is produced.
- * Stops when the plan is EOF or if the plan errors.
+ * Executes a plan. Calls work() on a plan until a result is produced. Stops when the plan is
+ * EOF or if the plan errors.
*/
class PlanExecutor {
public:
+
+ enum ExecState {
+ // We successfully populated the out parameter.
+ ADVANCED,
+
+ // We're EOF. We won't return any more results (edge case exception: capped+tailable).
+ IS_EOF,
+
+ // We were killed or had an error.
+ DEAD,
+
+ // getNext was asked for data it cannot provide, or the underlying PlanStage had an
+ // unrecoverable error.
+ // If the underlying PlanStage has any information on the error, it will be available in
+ // the objOut parameter. Call WorkingSetCommon::toStatusString() to retrieve the error
+ // details from the output BSON object.
+ EXEC_ERROR,
+ };
+
+ static std::string statestr(ExecState s) {
+ if (PlanExecutor::ADVANCED == s) {
+ return "ADVANCED";
+ }
+ else if (PlanExecutor::IS_EOF == s) {
+ return "IS_EOF";
+ }
+ else if (PlanExecutor::DEAD == s) {
+ return "DEAD";
+ }
+ else {
+ verify(PlanExecutor::EXEC_ERROR == s);
+ return "EXEC_ERROR";
+ }
+ }
+
+ //
+ // Constructors / destructor.
+ //
+
/**
* Used when there is no canonical query and no query solution.
*
@@ -151,7 +191,7 @@ namespace mongo {
//
/** TODO document me */
- Runner::RunnerState getNext(BSONObj* objOut, DiskLoc* dlOut);
+ ExecState getNext(BSONObj* objOut, DiskLoc* dlOut);
/** TOOD document me */
bool isEOF();
@@ -174,7 +214,7 @@ namespace mongo {
/**
* During the yield, the database we're operating over or any collection we're relying on
- * may be dropped. When this happens all cursors and runners on that database and
+ * may be dropped. When this happens all cursors and plan executors on that database and
* collection are killed or deleted in some fashion. (This is how the _killed gets set.)
*/
void kill();
diff --git a/src/mongo/db/query/query_planner_params.h b/src/mongo/db/query/query_planner_params.h
index b2970aa59ce..e7be3aec265 100644
--- a/src/mongo/db/query/query_planner_params.h
+++ b/src/mongo/db/query/query_planner_params.h
@@ -59,8 +59,8 @@ namespace mongo {
//
// In order to set this, you must check
// shardingState.needCollectionMetadata(current_namespace) in the same lock that you use
- // to build the query runner. You must also wrap the Runner in a ClientCursor within the
- // same lock. See the comment on ShardFilterStage for details.
+ // to build the query executor. You must also wrap the PlanExecutor in a ClientCursor
+ // within the same lock. See the comment on ShardFilterStage for details.
INCLUDE_SHARD_FILTER = 1 << 2,
// Set this if you don't want any plans with a blocking sort stage. All sorts must be
@@ -99,7 +99,7 @@ namespace mongo {
bool indexFiltersApplied;
// What's the max number of indexed solutions we want to output? It's expensive to compare
- // plans via the MultiPlanRunner, and the set of possible plans is very large for certain
+ // plans via the MultiPlanStage, and the set of possible plans is very large for certain
// index+query combinations.
size_t maxIndexedSolutions;
};
diff --git a/src/mongo/db/query/runner.h b/src/mongo/db/query/runner.h
deleted file mode 100644
index db7a642552f..00000000000
--- a/src/mongo/db/query/runner.h
+++ /dev/null
@@ -1,179 +0,0 @@
-/**
- * Copyright (C) 2013 10gen Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License, version 3,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the GNU Affero General Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-// THIS FILE IS DEPRECATED -- Runner to be replaced with PlanExecutor.
-
-#pragma once
-
-#include "mongo/base/status.h"
-#include "mongo/db/query/canonical_query.h"
-#include "mongo/db/invalidation_type.h"
-
-namespace mongo {
-
- class Collection;
- class OperationContext;
- class DiskLoc;
- class TypeExplain;
- struct PlanInfo;
-
- /**
- * A runner runs a query.
- */
- class Runner {
- public:
- virtual ~Runner() { }
-
- enum RunnerState {
- // We successfully populated the out parameter.
- RUNNER_ADVANCED,
-
- // We're EOF. We won't return any more results (edge case exception: capped+tailable).
- RUNNER_EOF,
-
- // We were killed or had an error.
- RUNNER_DEAD,
-
- // getNext was asked for data it cannot provide, or the underlying PlanStage had an
- // unrecoverable error.
- // If the underlying PlanStage has any information on the error, it will be available in
- // the objOut parameter. Call WorkingSetCommon::toStatusString() to retrieve the error
- // details from the output BSON object.
- RUNNER_ERROR,
- };
-
- static std::string statestr(RunnerState s) {
- if (RUNNER_ADVANCED == s) {
- return "RUNNER_ADVANCED";
- }
- else if (RUNNER_EOF == s) {
- return "RUNNER_EOF";
- }
- else if (RUNNER_DEAD == s) {
- return "RUNNER_DEAD";
- }
- else {
- verify(RUNNER_ERROR == s);
- return "RUNNER_ERROR";
- }
- }
-
- /**
- * Get the next result from the query.
- *
- * If objOut is not NULL, only results that have a BSONObj are returned. The BSONObj may
- * point to on-disk data (isOwned will be false) and must be copied by the caller before
- * yielding.
- *
- * If dlOut is not NULL, only results that have a valid DiskLoc are returned.
- *
- * If both objOut and dlOut are not NULL, only results with both a valid BSONObj and DiskLoc
- * will be returned. The BSONObj is the object located at the DiskLoc provided.
- *
- * If the underlying query machinery produces a result that does not have the data requested
- * by the user, it will be silently dropped.
- *
- * If the caller is running a query, they probably only care about the object.
- * If the caller is an internal client, they may only care about DiskLocs (index scan), or
- * about object + DiskLocs (collection scan).
- *
- * Some notes on objOut and ownership:
- *
- * objOut may be an owned object in certain cases: invalidation of the underlying DiskLoc,
- * the object is created from covered index key data, the object is projected or otherwise
- * the result of a computation.
- *
- * objOut will also be owned when the underlying PlanStage has provided error details in the
- * event of a RUNNER_ERROR. Call WorkingSetCommon::toStatusString() to convert the object
- * to a loggable format.
- *
- * objOut will be unowned if it's the result of a fetch or a collection scan.
- */
- virtual RunnerState getNext(BSONObj* objOut, DiskLoc* dlOut) = 0;
-
- /**
- * Will the next call to getNext() return EOF? It's useful to know if the runner is done
- * without having to take responsibility for a result.
- */
- virtual bool isEOF() = 0;
-
- /**
- * Inform the runner about changes to DiskLoc(s) that occur while the runner is yielded.
- * The runner must take any actions required to continue operating correctly, including
- * broadcasting the invalidation request to the PlanStage tree being run.
- *
- * Called from CollectionCursorCache::invalidateDocument.
- *
- * See db/invalidation_type.h for InvalidationType.
- */
- virtual void invalidate(const DiskLoc& dl, InvalidationType type) = 0;
-
- /**
- * Mark the Runner as no longer valid. Can happen when a runner yields and the underlying
- * database is dropped/indexes removed/etc. All future to calls to getNext return
- * RUNNER_DEAD. Every other call is a NOOP.
- *
- * The runner must guarantee as a postcondition that future calls to collection() will
- * return NULL.
- */
- virtual void kill() = 0;
-
- /**
- * Save any state required to yield.
- */
- virtual void saveState() = 0;
-
- /**
- * Restore saved state, possibly after a yield. Return true if the runner is OK, false if
- * it was killed.
- */
- virtual bool restoreState(OperationContext* opCtx) = 0;
-
- /**
- * Return the NS that the query is running over.
- */
- virtual const std::string& ns() = 0;
-
- /**
- * Return the Collection that the query is running over.
- */
- virtual const Collection* collection() = 0;
-
- /**
- * Returns OK, allocating and filling '*explain' or '*planInfo' with a description of the
- * chosen plan, depending on which is non-NULL (one of the two should be NULL). Caller
- * takes onwership of either '*explain' and '*planInfo'. Otherwise, returns false
- * a detailed error status.
- *
- * If 'explain' is NULL, then this out-parameter is ignored. Similarly, if 'staticInfo'
- * is NULL, then no static debug information is produced.
- */
- virtual Status getInfo(TypeExplain** explain, PlanInfo** planInfo) const = 0;
- };
-
-} // namespace mongo
diff --git a/src/mongo/db/query/type_explain.cpp b/src/mongo/db/query/type_explain.cpp
index b6415b4db41..329eb1dcb28 100644
--- a/src/mongo/db/query/type_explain.cpp
+++ b/src/mongo/db/query/type_explain.cpp
@@ -26,6 +26,8 @@
* then also delete it in the license file.
*/
+// THIS FILE IS DEPRECATED -- the old explain implementation is being replaced
+
#include "mongo/db/query/type_explain.h"
#include "mongo/db/field_parser.h"
diff --git a/src/mongo/db/query/type_explain.h b/src/mongo/db/query/type_explain.h
index f19b8ca107a..6cca71f2bf0 100644
--- a/src/mongo/db/query/type_explain.h
+++ b/src/mongo/db/query/type_explain.h
@@ -26,6 +26,8 @@
* then also delete it in the license file.
*/
+// THIS FILE IS DEPRECATED -- the old explain implementation is being replaced
+
#pragma once
#include <string>
diff --git a/src/mongo/db/repl/master_slave.cpp b/src/mongo/db/repl/master_slave.cpp
index 27e446765bb..46ba55feaff 100644
--- a/src/mongo/db/repl/master_slave.cpp
+++ b/src/mongo/db/repl/master_slave.cpp
@@ -258,8 +258,8 @@ namespace repl {
localSources,
ctx.db()->getCollection(txn, localSources)));
BSONObj obj;
- Runner::RunnerState state;
- while (Runner::RUNNER_ADVANCED == (state = exec->getNext(&obj, NULL))) {
+ PlanExecutor::ExecState state;
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
n++;
ReplSource tmp(txn, obj);
if (tmp.hostName != replSettings.source) {
@@ -279,7 +279,7 @@ namespace repl {
dbexit( EXIT_REPLICATION_ERROR );
}
}
- uassert(17065, "Internal error reading from local.sources", Runner::RUNNER_EOF == state);
+ uassert(17065, "Internal error reading from local.sources", PlanExecutor::IS_EOF == state);
uassert( 10002 , "local.sources collection corrupt?", n<2 );
if ( n == 0 ) {
// source missing. add.
@@ -303,8 +303,8 @@ namespace repl {
localSources,
ctx.db()->getCollection(txn, localSources)));
BSONObj obj;
- Runner::RunnerState state;
- while (Runner::RUNNER_ADVANCED == (state = exec->getNext(&obj, NULL))) {
+ PlanExecutor::ExecState state;
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
ReplSource tmp(txn, obj);
if ( tmp.syncedTo.isNull() ) {
DBDirectClient c(txn);
@@ -317,7 +317,7 @@ namespace repl {
}
addSourceToList(txn, v, tmp, old);
}
- uassert(17066, "Internal error reading from local.sources", Runner::RUNNER_EOF == state);
+ uassert(17066, "Internal error reading from local.sources", PlanExecutor::IS_EOF == state);
}
bool ReplSource::throttledForceResyncDead( OperationContext* txn, const char *requester ) {
diff --git a/src/mongo/db/repl/repl_info.cpp b/src/mongo/db/repl/repl_info.cpp
index 9d3b1e02436..77aa8d7ffc6 100644
--- a/src/mongo/db/repl/repl_info.cpp
+++ b/src/mongo/db/repl/repl_info.cpp
@@ -89,8 +89,8 @@ namespace repl {
ctx.ctx().db()->getCollection(txn,
localSources)));
BSONObj obj;
- Runner::RunnerState state;
- while (Runner::RUNNER_ADVANCED == (state = exec->getNext(&obj, NULL))) {
+ PlanExecutor::ExecState state;
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
src.push_back(obj);
}
}
diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp
index 7aab1a71bc0..a64af7a597a 100644
--- a/src/mongo/db/repl/rs_rollback.cpp
+++ b/src/mongo/db/repl/rs_rollback.cpp
@@ -243,7 +243,7 @@ namespace repl {
BSONObj ourObj;
DiskLoc ourLoc;
- if (Runner::RUNNER_ADVANCED != exec->getNext(&ourObj, &ourLoc)) {
+ if (PlanExecutor::ADVANCED != exec->getNext(&ourObj, &ourLoc)) {
throw RSFatalException("our oplog empty or unreadable");
}
@@ -304,7 +304,7 @@ namespace repl {
theirObj = oplogCursor->nextSafe();
theirTime = theirObj["ts"]._opTime();
- if (Runner::RUNNER_ADVANCED != exec->getNext(&ourObj, &ourLoc)) {
+ if (PlanExecutor::ADVANCED != exec->getNext(&ourObj, &ourLoc)) {
log() << "replSet rollback error RS101 reached beginning of local oplog"
<< rsLog;
log() << "replSet them: " << them->toString() << " scanned: "
@@ -331,7 +331,7 @@ namespace repl {
else {
// theirTime < ourTime
refetch(fixUpInfo, ourObj);
- if (Runner::RUNNER_ADVANCED != exec->getNext(&ourObj, &ourLoc)) {
+ if (PlanExecutor::ADVANCED != exec->getNext(&ourObj, &ourLoc)) {
log() << "replSet rollback error RS101 reached beginning of local oplog"
<< rsLog;
log() << "replSet them: " << them->toString() << " scanned: "
diff --git a/src/mongo/dbtests/executor_registry.cpp b/src/mongo/dbtests/executor_registry.cpp
index 71a6a3bcb13..ea9dfac1389 100644
--- a/src/mongo/dbtests/executor_registry.cpp
+++ b/src/mongo/dbtests/executor_registry.cpp
@@ -116,7 +116,7 @@ namespace ExecutorRegistry {
// Read some of it.
for (int i = 0; i < 10; ++i) {
- ASSERT_EQUALS(Runner::RUNNER_ADVANCED, run->getNext(&obj, NULL));
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, run->getNext(&obj, NULL));
ASSERT_EQUALS(i, obj["foo"].numberInt());
}
@@ -141,11 +141,11 @@ namespace ExecutorRegistry {
// Make sure that the runner moved forward over the deleted data. We don't see foo==10
// or foo==11.
for (int i = 12; i < N(); ++i) {
- ASSERT_EQUALS(Runner::RUNNER_ADVANCED, run->getNext(&obj, NULL));
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, run->getNext(&obj, NULL));
ASSERT_EQUALS(i, obj["foo"].numberInt());
}
- ASSERT_EQUALS(Runner::RUNNER_EOF, run->getNext(&obj, NULL));
+ ASSERT_EQUALS(PlanExecutor::IS_EOF, run->getNext(&obj, NULL));
}
};
@@ -158,7 +158,7 @@ namespace ExecutorRegistry {
// Read some of it.
for (int i = 0; i < 10; ++i) {
- ASSERT_EQUALS(Runner::RUNNER_ADVANCED, run->getNext(&obj, NULL));
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, run->getNext(&obj, NULL));
ASSERT_EQUALS(i, obj["foo"].numberInt());
}
@@ -173,7 +173,7 @@ namespace ExecutorRegistry {
deregisterExecutor(run.get());
run->restoreState(&_opCtx);
- ASSERT_EQUALS(Runner::RUNNER_ADVANCED, run->getNext(&obj, NULL));
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, run->getNext(&obj, NULL));
ASSERT_EQUALS(10, obj["foo"].numberInt());
// Save state and register.
@@ -188,7 +188,7 @@ namespace ExecutorRegistry {
run->restoreState(&_opCtx);
// PlanExecutor was killed.
- ASSERT_EQUALS(Runner::RUNNER_DEAD, run->getNext(&obj, NULL));
+ ASSERT_EQUALS(PlanExecutor::DEAD, run->getNext(&obj, NULL));
}
};
@@ -203,7 +203,7 @@ namespace ExecutorRegistry {
// Read some of it.
for (int i = 0; i < 10; ++i) {
- ASSERT_EQUALS(Runner::RUNNER_ADVANCED, run->getNext(&obj, NULL));
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, run->getNext(&obj, NULL));
ASSERT_EQUALS(i, obj["foo"].numberInt());
}
@@ -219,7 +219,7 @@ namespace ExecutorRegistry {
run->restoreState(&_opCtx);
// PlanExecutor was killed.
- ASSERT_EQUALS(Runner::RUNNER_DEAD, run->getNext(&obj, NULL));
+ ASSERT_EQUALS(PlanExecutor::DEAD, run->getNext(&obj, NULL));
}
};
@@ -234,7 +234,7 @@ namespace ExecutorRegistry {
// Read some of it.
for (int i = 0; i < 10; ++i) {
- ASSERT_EQUALS(Runner::RUNNER_ADVANCED, run->getNext(&obj, NULL));
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, run->getNext(&obj, NULL));
ASSERT_EQUALS(i, obj["foo"].numberInt());
}
@@ -250,7 +250,7 @@ namespace ExecutorRegistry {
run->restoreState(&_opCtx);
// PlanExecutor was killed.
- ASSERT_EQUALS(Runner::RUNNER_DEAD, run->getNext(&obj, NULL));
+ ASSERT_EQUALS(PlanExecutor::DEAD, run->getNext(&obj, NULL));
}
};
@@ -263,7 +263,7 @@ namespace ExecutorRegistry {
// Read some of it.
for (int i = 0; i < 10; ++i) {
- ASSERT_EQUALS(Runner::RUNNER_ADVANCED, run->getNext(&obj, NULL));
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, run->getNext(&obj, NULL));
ASSERT_EQUALS(i, obj["foo"].numberInt());
}
@@ -282,7 +282,7 @@ namespace ExecutorRegistry {
deregisterExecutor(run.get());
run->restoreState(&_opCtx);
- ASSERT_EQUALS(Runner::RUNNER_ADVANCED, run->getNext(&obj, NULL));
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, run->getNext(&obj, NULL));
ASSERT_EQUALS(10, obj["foo"].numberInt());
// Save state and register.
@@ -302,7 +302,7 @@ namespace ExecutorRegistry {
_ctx.reset();
// PlanExecutor was killed.
- ASSERT_EQUALS(Runner::RUNNER_DEAD, run->getNext(&obj, NULL));
+ ASSERT_EQUALS(PlanExecutor::DEAD, run->getNext(&obj, NULL));
}
};
diff --git a/src/mongo/dbtests/query_multi_plan_runner.cpp b/src/mongo/dbtests/query_multi_plan_runner.cpp
index b3cab9fb910..ed277670d00 100644
--- a/src/mongo/dbtests/query_multi_plan_runner.cpp
+++ b/src/mongo/dbtests/query_multi_plan_runner.cpp
@@ -177,7 +177,7 @@ namespace QueryMultiPlanRunner {
// Get all our results out.
int results = 0;
BSONObj obj;
- while (Runner::RUNNER_ADVANCED == exec.getNext(&obj, NULL)) {
+ while (PlanExecutor::ADVANCED == exec.getNext(&obj, NULL)) {
ASSERT_EQUALS(obj["foo"].numberInt(), 7);
++results;
}
diff --git a/src/mongo/dbtests/query_plan_executor.cpp b/src/mongo/dbtests/query_plan_executor.cpp
index 42977eaefde..5074ca27d18 100644
--- a/src/mongo/dbtests/query_plan_executor.cpp
+++ b/src/mongo/dbtests/query_plan_executor.cpp
@@ -190,13 +190,13 @@ namespace QueryPlanExecutor {
registerExec(exec.get());
BSONObj objOut;
- ASSERT_EQUALS(Runner::RUNNER_ADVANCED, exec->getNext(&objOut, NULL));
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&objOut, NULL));
ASSERT_EQUALS(1, objOut["_id"].numberInt());
// After dropping the collection, the runner
// should be dead.
dropCollection();
- ASSERT_EQUALS(Runner::RUNNER_DEAD, exec->getNext(&objOut, NULL));
+ ASSERT_EQUALS(PlanExecutor::DEAD, exec->getNext(&objOut, NULL));
deregisterExec(exec.get());
ctx.commit();
@@ -220,13 +220,13 @@ namespace QueryPlanExecutor {
registerExec(exec.get());
BSONObj objOut;
- ASSERT_EQUALS(Runner::RUNNER_ADVANCED, exec->getNext(&objOut, NULL));
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&objOut, NULL));
ASSERT_EQUALS(7, objOut["a"].numberInt());
// After dropping the collection, the runner
// should be dead.
dropCollection();
- ASSERT_EQUALS(Runner::RUNNER_DEAD, exec->getNext(&objOut, NULL));
+ ASSERT_EQUALS(PlanExecutor::DEAD, exec->getNext(&objOut, NULL));
deregisterExec(exec.get());
ctx.commit();
@@ -266,7 +266,7 @@ namespace QueryPlanExecutor {
void checkIds(int* expectedIds, PlanExecutor* exec) {
BSONObj objOut;
int idcount = 0;
- while (Runner::RUNNER_ADVANCED == exec->getNext(&objOut, NULL)) {
+ while (PlanExecutor::ADVANCED == exec->getNext(&objOut, NULL)) {
ASSERT_EQUALS(expectedIds[idcount], objOut["_id"].numberInt());
++idcount;
}
@@ -288,7 +288,7 @@ namespace QueryPlanExecutor {
scoped_ptr<PlanExecutor> exec(makeCollScanExec(ctx.ctx(),filterObj));
BSONObj objOut;
- ASSERT_EQUALS(Runner::RUNNER_ADVANCED, exec->getNext(&objOut, NULL));
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&objOut, NULL));
ASSERT_EQUALS(2, objOut["a"].numberInt());
forceDocumentMove();
@@ -316,7 +316,7 @@ namespace QueryPlanExecutor {
scoped_ptr<PlanExecutor> exec(makeIndexScanExec(ctx.ctx(), indexSpec, 2, 5));
BSONObj objOut;
- ASSERT_EQUALS(Runner::RUNNER_ADVANCED, exec->getNext(&objOut, NULL));
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&objOut, NULL));
ASSERT_EQUALS(2, objOut["a"].numberInt());
forceDocumentMove();
@@ -386,7 +386,7 @@ namespace QueryPlanExecutor {
// The invalidation should have killed the runner.
BSONObj objOut;
- ASSERT_EQUALS(Runner::RUNNER_DEAD, exec->getNext(&objOut, NULL));
+ ASSERT_EQUALS(PlanExecutor::DEAD, exec->getNext(&objOut, NULL));
// Deleting the underlying cursor should cause the
// number of cursors to return to 0.
diff --git a/src/mongo/dbtests/query_stage_collscan.cpp b/src/mongo/dbtests/query_stage_collscan.cpp
index 797d0af3d28..4eecd3eacf3 100644
--- a/src/mongo/dbtests/query_stage_collscan.cpp
+++ b/src/mongo/dbtests/query_stage_collscan.cpp
@@ -98,7 +98,7 @@ namespace QueryStageCollectionScan {
// Use the runner to count the number of objects scanned.
int count = 0;
- for (BSONObj obj; Runner::RUNNER_ADVANCED == runner.getNext(&obj, NULL); ) { ++count; }
+ for (BSONObj obj; PlanExecutor::ADVANCED == runner.getNext(&obj, NULL); ) { ++count; }
return count;
}
@@ -202,7 +202,7 @@ namespace QueryStageCollectionScan {
PlanExecutor runner(ws, ps, params.collection);
int count = 0;
- for (BSONObj obj; Runner::RUNNER_ADVANCED == runner.getNext(&obj, NULL); ) {
+ for (BSONObj obj; PlanExecutor::ADVANCED == runner.getNext(&obj, NULL); ) {
// Make sure we get the objects in the order we want
ASSERT_EQUALS(count, obj["foo"].numberInt());
++count;
@@ -231,7 +231,7 @@ namespace QueryStageCollectionScan {
PlanExecutor runner(ws, ps, params.collection);
int count = 0;
- for (BSONObj obj; Runner::RUNNER_ADVANCED == runner.getNext(&obj, NULL); ) {
+ for (BSONObj obj; PlanExecutor::ADVANCED == runner.getNext(&obj, NULL); ) {
++count;
ASSERT_EQUALS(numObj() - count, obj["foo"].numberInt());
}
diff --git a/src/mongo/dbtests/query_stage_merge_sort.cpp b/src/mongo/dbtests/query_stage_merge_sort.cpp
index 1358315a5ca..f04fa8183fd 100644
--- a/src/mongo/dbtests/query_stage_merge_sort.cpp
+++ b/src/mongo/dbtests/query_stage_merge_sort.cpp
@@ -159,8 +159,8 @@ namespace QueryStageMergeSortTests {
for (int i = 0; i < N; ++i) {
BSONObj first, second;
- ASSERT_EQUALS(Runner::RUNNER_ADVANCED, runner.getNext(&first, NULL));
- ASSERT_EQUALS(Runner::RUNNER_ADVANCED, runner.getNext(&second, NULL));
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, runner.getNext(&first, NULL));
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, runner.getNext(&second, NULL));
ASSERT_EQUALS(first["c"].numberInt(), second["c"].numberInt());
ASSERT_EQUALS(i, first["c"].numberInt());
ASSERT((first.hasField("a") && second.hasField("b"))
@@ -169,7 +169,7 @@ namespace QueryStageMergeSortTests {
// Should be done now.
BSONObj foo;
- ASSERT_NOT_EQUALS(Runner::RUNNER_ADVANCED, runner.getNext(&foo, NULL));
+ ASSERT_NOT_EQUALS(PlanExecutor::ADVANCED, runner.getNext(&foo, NULL));
}
};
@@ -222,8 +222,8 @@ namespace QueryStageMergeSortTests {
for (int i = 0; i < N; ++i) {
BSONObj first, second;
- ASSERT_EQUALS(Runner::RUNNER_ADVANCED, runner.getNext(&first, NULL));
- ASSERT_EQUALS(Runner::RUNNER_ADVANCED, runner.getNext(&second, NULL));
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, runner.getNext(&first, NULL));
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, runner.getNext(&second, NULL));
ASSERT_EQUALS(first["c"].numberInt(), second["c"].numberInt());
ASSERT_EQUALS(i, first["c"].numberInt());
ASSERT((first.hasField("a") && second.hasField("b"))
@@ -232,7 +232,7 @@ namespace QueryStageMergeSortTests {
// Should be done now.
BSONObj foo;
- ASSERT_EQUALS(Runner::RUNNER_EOF, runner.getNext(&foo, NULL));
+ ASSERT_EQUALS(PlanExecutor::IS_EOF, runner.getNext(&foo, NULL));
}
};
@@ -286,8 +286,8 @@ namespace QueryStageMergeSortTests {
for (int i = 0; i < N; ++i) {
BSONObj first, second;
// We inserted N objects but we get 2 * N from the runner because of dups.
- ASSERT_EQUALS(Runner::RUNNER_ADVANCED, runner.getNext(&first, NULL));
- ASSERT_EQUALS(Runner::RUNNER_ADVANCED, runner.getNext(&second, NULL));
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, runner.getNext(&first, NULL));
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, runner.getNext(&second, NULL));
ASSERT_EQUALS(first["c"].numberInt(), second["c"].numberInt());
ASSERT_EQUALS(i, first["c"].numberInt());
ASSERT((first.hasField("a") && second.hasField("b"))
@@ -296,7 +296,7 @@ namespace QueryStageMergeSortTests {
// Should be done now.
BSONObj foo;
- ASSERT_EQUALS(Runner::RUNNER_EOF, runner.getNext(&foo, NULL));
+ ASSERT_EQUALS(PlanExecutor::IS_EOF, runner.getNext(&foo, NULL));
}
};
@@ -351,8 +351,8 @@ namespace QueryStageMergeSortTests {
for (int i = 0; i < N; ++i) {
BSONObj first, second;
- ASSERT_EQUALS(Runner::RUNNER_ADVANCED, runner.getNext(&first, NULL));
- ASSERT_EQUALS(Runner::RUNNER_ADVANCED, runner.getNext(&second, NULL));
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, runner.getNext(&first, NULL));
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, runner.getNext(&second, NULL));
ASSERT_EQUALS(first["c"].numberInt(), second["c"].numberInt());
ASSERT_EQUALS(N - i - 1, first["c"].numberInt());
ASSERT((first.hasField("a") && second.hasField("b"))
@@ -361,7 +361,7 @@ namespace QueryStageMergeSortTests {
// Should be done now.
BSONObj foo;
- ASSERT_EQUALS(Runner::RUNNER_EOF, runner.getNext(&foo, NULL));
+ ASSERT_EQUALS(PlanExecutor::IS_EOF, runner.getNext(&foo, NULL));
}
};
@@ -417,14 +417,14 @@ namespace QueryStageMergeSortTests {
// Only getting results from the a:1 index scan.
for (int i = 0; i < N; ++i) {
BSONObj obj;
- ASSERT_EQUALS(Runner::RUNNER_ADVANCED, runner.getNext(&obj, NULL));
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, runner.getNext(&obj, NULL));
ASSERT_EQUALS(i, obj["c"].numberInt());
ASSERT_EQUALS(1, obj["a"].numberInt());
}
// Should be done now.
BSONObj foo;
- ASSERT_EQUALS(Runner::RUNNER_EOF, runner.getNext(&foo, NULL));
+ ASSERT_EQUALS(PlanExecutor::IS_EOF, runner.getNext(&foo, NULL));
}
};
@@ -469,7 +469,7 @@ namespace QueryStageMergeSortTests {
for (int i = 0; i < numIndices; ++i) {
BSONObj obj;
- ASSERT_EQUALS(Runner::RUNNER_ADVANCED, runner.getNext(&obj, NULL));
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, runner.getNext(&obj, NULL));
ASSERT_EQUALS(i, obj["foo"].numberInt());
string index(1, 'a' + i);
ASSERT_EQUALS(1, obj[index].numberInt());
@@ -477,7 +477,7 @@ namespace QueryStageMergeSortTests {
// Should be done now.
BSONObj foo;
- ASSERT_EQUALS(Runner::RUNNER_EOF, runner.getNext(&foo, NULL));
+ ASSERT_EQUALS(PlanExecutor::IS_EOF, runner.getNext(&foo, NULL));
}
};
diff --git a/src/mongo/dbtests/query_stage_sort.cpp b/src/mongo/dbtests/query_stage_sort.cpp
index fe04b977f16..c071037c494 100644
--- a/src/mongo/dbtests/query_stage_sort.cpp
+++ b/src/mongo/dbtests/query_stage_sort.cpp
@@ -130,13 +130,13 @@ namespace QueryStageSortTests {
// Look at pairs of objects to make sure that the sort order is pairwise (and therefore
// totally) correct.
BSONObj last;
- ASSERT_EQUALS(Runner::RUNNER_ADVANCED, runner.getNext(&last, NULL));
+ ASSERT_EQUALS(PlanExecutor::ADVANCED, runner.getNext(&last, NULL));
// Count 'last'.
int count = 1;
BSONObj current;
- while (Runner::RUNNER_ADVANCED == runner.getNext(&current, NULL)) {
+ while (PlanExecutor::ADVANCED == runner.getNext(&current, NULL)) {
int cmp = sgn(current.woSortOrder(last, params.pattern));
// The next object should be equal to the previous or oriented according to the sort
// pattern.
@@ -373,8 +373,8 @@ namespace QueryStageSortTests {
// We don't get results back since we're sorting some parallel arrays.
PlanExecutor runner(
ws, new FetchStage(ws, new SortStage(params, ws, ms), NULL, coll), coll);
- Runner::RunnerState runnerState = runner.getNext(NULL, NULL);
- ASSERT_EQUALS(Runner::RUNNER_ERROR, runnerState);
+ PlanExecutor::ExecState runnerState = runner.getNext(NULL, NULL);
+ ASSERT_EQUALS(PlanExecutor::EXEC_ERROR, runnerState);
ctx.commit();
}
};
diff --git a/src/mongo/dbtests/query_stage_tests.cpp b/src/mongo/dbtests/query_stage_tests.cpp
index 24b0ef3db1e..d644d0a31a7 100644
--- a/src/mongo/dbtests/query_stage_tests.cpp
+++ b/src/mongo/dbtests/query_stage_tests.cpp
@@ -87,7 +87,7 @@ namespace QueryStageTests {
ctx.ctx().db()->getCollection(&_txn, ns()));
int count = 0;
- for (DiskLoc dl; Runner::RUNNER_ADVANCED == runner.getNext(NULL, &dl); ) {
+ for (DiskLoc dl; PlanExecutor::ADVANCED == runner.getNext(NULL, &dl); ) {
++count;
}
diff --git a/src/mongo/s/d_migrate.cpp b/src/mongo/s/d_migrate.cpp
index 5c7e1061dbe..4eb7cc01ee1 100644
--- a/src/mongo/s/d_migrate.cpp
+++ b/src/mongo/s/d_migrate.cpp
@@ -472,7 +472,7 @@ namespace mongo {
bool isLargeChunk = false;
unsigned long long recCount = 0;;
DiskLoc dl;
- while (Runner::RUNNER_ADVANCED == exec->getNext(NULL, &dl)) {
+ while (PlanExecutor::ADVANCED == exec->getNext(NULL, &dl)) {
if ( ! isLargeChunk ) {
scoped_spinlock lk( _trackerLocks );
_cloneLocs.insert( dl );
diff --git a/src/mongo/s/d_split.cpp b/src/mongo/s/d_split.cpp
index 3c7a79a207c..ac8280a6d91 100644
--- a/src/mongo/s/d_split.cpp
+++ b/src/mongo/s/d_split.cpp
@@ -164,7 +164,7 @@ namespace mongo {
DiskLoc loc;
BSONObj currKey;
- while (Runner::RUNNER_ADVANCED == exec->getNext(&currKey, &loc)) {
+ while (PlanExecutor::ADVANCED == exec->getNext(&currKey, &loc)) {
//check that current key contains non missing elements for all fields in keyPattern
BSONObjIterator i( currKey );
for( int k = 0; k < keyPatternLength ; k++ ) {
@@ -383,8 +383,8 @@ namespace mongo {
false, InternalPlanner::FORWARD));
BSONObj currKey;
- Runner::RunnerState state = exec->getNext(&currKey, NULL);
- if (Runner::RUNNER_ADVANCED != state) {
+ PlanExecutor::ExecState state = exec->getNext(&currKey, NULL);
+ if (PlanExecutor::ADVANCED != state) {
errmsg = "can't open a cursor for splitting (desired range is possibly empty)";
return false;
}
@@ -396,7 +396,7 @@ namespace mongo {
splitKeys.push_back(prettyKey(idx->keyPattern(), currKey.getOwned()).extractFields( keyPattern ) );
while ( 1 ) {
- while (Runner::RUNNER_ADVANCED == state) {
+ while (PlanExecutor::ADVANCED == state) {
currCount++;
if ( currCount > keyCount && !forceMedianSplit ) {
@@ -885,8 +885,8 @@ namespace mongo {
newmin, newmax, false));
// check if exactly one document found
- if (Runner::RUNNER_ADVANCED == exec->getNext(NULL, NULL)) {
- if (Runner::RUNNER_EOF == exec->getNext(NULL, NULL)) {
+ if (PlanExecutor::ADVANCED == exec->getNext(NULL, NULL)) {
+ if (PlanExecutor::IS_EOF == exec->getNext(NULL, NULL)) {
result.append( "shouldMigrate",
BSON("min" << chunk.min << "max" << chunk.max) );
break;