summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
authorDavid Storch <david.storch@10gen.com>2014-07-22 15:50:23 -0400
committerDavid Storch <david.storch@10gen.com>2014-07-23 12:40:20 -0400
commit5e4f3fef24ca76b6a6516e7379b72ae3310029fd (patch)
tree52ef6dddd9ffd1eb4d832d4b224a70caefd5c220 /src/mongo/db
parent26aff1dcc9a45e72a91a206119c12cc280148175 (diff)
downloadmongo-5e4f3fef24ca76b6a6516e7379b72ae3310029fd.tar.gz
SERVER-14407 delete runner.h
Replaces Runner::RunnerState with PlanExecutor::ExecState.
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/catalog/index_catalog.cpp6
-rw-r--r--src/mongo/db/catalog/index_create.cpp2
-rw-r--r--src/mongo/db/commands/collection_to_capped.cpp10
-rw-r--r--src/mongo/db/commands/count.cpp4
-rw-r--r--src/mongo/db/commands/dbhash.cpp6
-rw-r--r--src/mongo/db/commands/distinct.cpp4
-rw-r--r--src/mongo/db/commands/find_and_modify.cpp4
-rw-r--r--src/mongo/db/commands/geo_near_cmd.cpp2
-rw-r--r--src/mongo/db/commands/group.cpp4
-rw-r--r--src/mongo/db/commands/mr.cpp6
-rw-r--r--src/mongo/db/commands/pipeline_command.cpp2
-rw-r--r--src/mongo/db/commands/test_commands.cpp4
-rw-r--r--src/mongo/db/commands/validate.cpp1
-rw-r--r--src/mongo/db/db.cpp6
-rw-r--r--src/mongo/db/dbcommands.cpp10
-rw-r--r--src/mongo/db/dbhelpers.cpp26
-rw-r--r--src/mongo/db/exec/plan_stage.h2
-rw-r--r--src/mongo/db/exec/stagedebug_cmd.cpp2
-rw-r--r--src/mongo/db/fts/fts_command_mongod.cpp2
-rw-r--r--src/mongo/db/index/haystack_access_method.cpp4
-rw-r--r--src/mongo/db/ops/delete_executor.cpp4
-rw-r--r--src/mongo/db/ops/update.cpp8
-rw-r--r--src/mongo/db/pipeline/document_source.h2
-rw-r--r--src/mongo/db/pipeline/document_source_cursor.cpp10
-rw-r--r--src/mongo/db/query/explain_plan.cpp2
-rw-r--r--src/mongo/db/query/explain_plan.h2
-rw-r--r--src/mongo/db/query/new_find.cpp32
-rw-r--r--src/mongo/db/query/new_find.h1
-rw-r--r--src/mongo/db/query/plan_cache.h8
-rw-r--r--src/mongo/db/query/plan_executor.cpp14
-rw-r--r--src/mongo/db/query/plan_executor.h52
-rw-r--r--src/mongo/db/query/query_planner_params.h6
-rw-r--r--src/mongo/db/query/runner.h179
-rw-r--r--src/mongo/db/query/type_explain.cpp2
-rw-r--r--src/mongo/db/query/type_explain.h2
-rw-r--r--src/mongo/db/repl/master_slave.cpp12
-rw-r--r--src/mongo/db/repl/repl_info.cpp4
-rw-r--r--src/mongo/db/repl/rs_rollback.cpp6
38 files changed, 160 insertions, 293 deletions
diff --git a/src/mongo/db/catalog/index_catalog.cpp b/src/mongo/db/catalog/index_catalog.cpp
index edb6219f607..a0587ef0c9d 100644
--- a/src/mongo/db/catalog/index_catalog.cpp
+++ b/src/mongo/db/catalog/index_catalog.cpp
@@ -227,8 +227,8 @@ namespace mongo {
db->getCollection(txn, db->_indexesName)));
BSONObj index;
- Runner::RunnerState state;
- while ( Runner::RUNNER_ADVANCED == (state = exec->getNext(&index, NULL)) ) {
+ PlanExecutor::ExecState state;
+ while ( PlanExecutor::ADVANCED == (state = exec->getNext(&index, NULL)) ) {
const BSONObj key = index.getObjectField("key");
const string plugin = IndexNames::findPluginName(key);
if ( IndexNames::existedBefore24(plugin) )
@@ -243,7 +243,7 @@ namespace mongo {
return Status( ErrorCodes::CannotCreateIndex, errmsg );
}
- if ( Runner::RUNNER_EOF != state ) {
+ if ( PlanExecutor::IS_EOF != state ) {
warning() << "Internal error while reading system.indexes collection";
}
diff --git a/src/mongo/db/catalog/index_create.cpp b/src/mongo/db/catalog/index_create.cpp
index 96bf25f1c94..b369b98ca61 100644
--- a/src/mongo/db/catalog/index_create.cpp
+++ b/src/mongo/db/catalog/index_create.cpp
@@ -114,7 +114,7 @@ namespace mongo {
BSONObj js;
DiskLoc loc;
- while (Runner::RUNNER_ADVANCED == exec->getNext(&js, &loc)) {
+ while (PlanExecutor::ADVANCED == exec->getNext(&js, &loc)) {
try {
if ( !dupsAllowed && dropDups ) {
LastError::Disabled led( lastError.get() );
diff --git a/src/mongo/db/commands/collection_to_capped.cpp b/src/mongo/db/commands/collection_to_capped.cpp
index aaa1674a40a..969326141db 100644
--- a/src/mongo/db/commands/collection_to_capped.cpp
+++ b/src/mongo/db/commands/collection_to_capped.cpp
@@ -91,17 +91,17 @@ namespace mongo {
while ( true ) {
BSONObj obj;
- Runner::RunnerState state = exec->getNext(&obj, NULL);
+ PlanExecutor::ExecState state = exec->getNext(&obj, NULL);
switch( state ) {
- case Runner::RUNNER_EOF:
+ case PlanExecutor::IS_EOF:
return Status::OK();
- case Runner::RUNNER_DEAD:
+ case PlanExecutor::DEAD:
db->dropCollection( txn, toNs );
return Status( ErrorCodes::InternalError, "executor turned dead while iterating" );
- case Runner::RUNNER_ERROR:
+ case PlanExecutor::EXEC_ERROR:
return Status( ErrorCodes::InternalError, "executor error while iterating" );
- case Runner::RUNNER_ADVANCED:
+ case PlanExecutor::ADVANCED:
if ( excessSize > 0 ) {
excessSize -= ( 4 * obj.objsize() ); // 4x is for padding, power of 2, etc...
continue;
diff --git a/src/mongo/db/commands/count.cpp b/src/mongo/db/commands/count.cpp
index 306af80f311..d49203cedfb 100644
--- a/src/mongo/db/commands/count.cpp
+++ b/src/mongo/db/commands/count.cpp
@@ -116,8 +116,8 @@ namespace mongo {
ScopedExecutorRegistration safety(exec.get());
long long count = 0;
- Runner::RunnerState state;
- while (Runner::RUNNER_ADVANCED == (state = exec->getNext(NULL, NULL))) {
+ PlanExecutor::ExecState state;
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(NULL, NULL))) {
if (skip > 0) {
--skip;
}
diff --git a/src/mongo/db/commands/dbhash.cpp b/src/mongo/db/commands/dbhash.cpp
index 2c5e5cee6f9..945a9e8ebb2 100644
--- a/src/mongo/db/commands/dbhash.cpp
+++ b/src/mongo/db/commands/dbhash.cpp
@@ -107,14 +107,14 @@ namespace mongo {
md5_init(&st);
long long n = 0;
- Runner::RunnerState state;
+ PlanExecutor::ExecState state;
BSONObj c;
verify(NULL != exec.get());
- while (Runner::RUNNER_ADVANCED == (state = exec->getNext(&c, NULL))) {
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(&c, NULL))) {
md5_append( &st , (const md5_byte_t*)c.objdata() , c.objsize() );
n++;
}
- if (Runner::RUNNER_EOF != state) {
+ if (PlanExecutor::IS_EOF != state) {
warning() << "error while hashing, db dropped? ns=" << fullCollectionName << endl;
}
md5digest d;
diff --git a/src/mongo/db/commands/distinct.cpp b/src/mongo/db/commands/distinct.cpp
index 27769ffcfee..de6ce85b6c3 100644
--- a/src/mongo/db/commands/distinct.cpp
+++ b/src/mongo/db/commands/distinct.cpp
@@ -122,8 +122,8 @@ namespace mongo {
const ScopedExecutorRegistration safety(exec.get());
BSONObj obj;
- Runner::RunnerState state;
- while (Runner::RUNNER_ADVANCED == (state = exec->getNext(&obj, NULL))) {
+ PlanExecutor::ExecState state;
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
// Distinct expands arrays.
//
// If our query is covered, each value of the key should be in the index key and
diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp
index ea90af2cb66..13ec1680ee2 100644
--- a/src/mongo/db/commands/find_and_modify.cpp
+++ b/src/mongo/db/commands/find_and_modify.cpp
@@ -161,8 +161,8 @@ namespace mongo {
// state and may continue doing that with document-level locking (approach is TBD).
const ScopedExecutorRegistration safety(exec.get());
- Runner::RunnerState state;
- if (Runner::RUNNER_ADVANCED == (state = exec->getNext(&doc, NULL))) {
+ PlanExecutor::ExecState state;
+ if (PlanExecutor::ADVANCED == (state = exec->getNext(&doc, NULL))) {
found = true;
}
}
diff --git a/src/mongo/db/commands/geo_near_cmd.cpp b/src/mongo/db/commands/geo_near_cmd.cpp
index ddb89a4f48a..28e9e802e23 100644
--- a/src/mongo/db/commands/geo_near_cmd.cpp
+++ b/src/mongo/db/commands/geo_near_cmd.cpp
@@ -201,7 +201,7 @@ namespace mongo {
BSONObj currObj;
int results = 0;
- while ((results < numWanted) && Runner::RUNNER_ADVANCED == exec->getNext(&currObj, NULL)) {
+ while ((results < numWanted) && PlanExecutor::ADVANCED == exec->getNext(&currObj, NULL)) {
// Come up with the correct distance.
double dist = currObj["$dis"].number() * distanceMultiplier;
diff --git a/src/mongo/db/commands/group.cpp b/src/mongo/db/commands/group.cpp
index 9bfa3259a6f..cd9f98fce18 100644
--- a/src/mongo/db/commands/group.cpp
+++ b/src/mongo/db/commands/group.cpp
@@ -156,8 +156,8 @@ namespace mongo {
const ScopedExecutorRegistration safety(exec.get());
BSONObj obj;
- Runner::RunnerState state;
- while (Runner::RUNNER_ADVANCED == (state = exec->getNext(&obj, NULL))) {
+ PlanExecutor::ExecState state;
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
BSONObj key = getKey(obj , keyPattern , keyFunction , keysize / keynum,
s.get() );
keysize += key.objsize();
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index f2501f99e80..8a902824482 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -1002,8 +1002,8 @@ namespace mongo {
// iterate over all sorted objects
BSONObj o;
- Runner::RunnerState state;
- while (Runner::RUNNER_ADVANCED == (state = exec->getNext(&o, NULL))) {
+ PlanExecutor::ExecState state;
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(&o, NULL))) {
pm.hit();
if ( o.woSortOrder( prev , sortKey ) == 0 ) {
@@ -1353,7 +1353,7 @@ namespace mongo {
// go through each doc
BSONObj o;
- while (Runner::RUNNER_ADVANCED == exec->getNext(&o, NULL)) {
+ while (PlanExecutor::ADVANCED == exec->getNext(&o, NULL)) {
// check to see if this is a new object we don't own yet
// because of a chunk migration
if ( collMetadata ) {
diff --git a/src/mongo/db/commands/pipeline_command.cpp b/src/mongo/db/commands/pipeline_command.cpp
index de49bc61d1c..36a9a7c75a8 100644
--- a/src/mongo/db/commands/pipeline_command.cpp
+++ b/src/mongo/db/commands/pipeline_command.cpp
@@ -224,7 +224,7 @@ namespace {
for (int objCount = 0; objCount < batchSize; objCount++) {
// The initial getNext() on a PipelineProxyStage may be very expensive so we don't
// do it when batchSize is 0 since that indicates a desire for a fast return.
- if (exec->getNext(&next, NULL) != Runner::RUNNER_ADVANCED) {
+ if (exec->getNext(&next, NULL) != PlanExecutor::ADVANCED) {
if (pin) pin->deleteUnderlying();
// make it an obvious error to use cursor or executor after this point
cursor = NULL;
diff --git a/src/mongo/db/commands/test_commands.cpp b/src/mongo/db/commands/test_commands.cpp
index fb5f38c7897..97a61695193 100644
--- a/src/mongo/db/commands/test_commands.cpp
+++ b/src/mongo/db/commands/test_commands.cpp
@@ -157,8 +157,8 @@ namespace mongo {
DiskLoc end;
// We remove 'n' elements so the start is one past that
for( int i = 0; i < n + 1; ++i ) {
- Runner::RunnerState state = exec->getNext(NULL, &end);
- massert( 13418, "captrunc invalid n", Runner::RUNNER_ADVANCED == state);
+ PlanExecutor::ExecState state = exec->getNext(NULL, &end);
+ massert( 13418, "captrunc invalid n", PlanExecutor::ADVANCED == state);
}
collection->temp_cappedTruncateAfter( txn, end, inc );
ctx.commit();
diff --git a/src/mongo/db/commands/validate.cpp b/src/mongo/db/commands/validate.cpp
index 7f6a36f2ff5..91c44de6424 100644
--- a/src/mongo/db/commands/validate.cpp
+++ b/src/mongo/db/commands/validate.cpp
@@ -32,7 +32,6 @@
#include "mongo/db/commands.h"
#include "mongo/db/query/internal_plans.h"
-#include "mongo/db/query/runner.h"
#include "mongo/db/operation_context_impl.h"
#include "mongo/db/catalog/collection.h"
#include "mongo/util/log.h"
diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp
index 07f7417fada..62d4c37d861 100644
--- a/src/mongo/db/db.cpp
+++ b/src/mongo/db/db.cpp
@@ -368,8 +368,8 @@ namespace mongo {
auto_ptr<PlanExecutor> exec(
InternalPlanner::collectionScan(&txn, systemIndexes,coll));
BSONObj index;
- Runner::RunnerState state;
- while (Runner::RUNNER_ADVANCED == (state = exec->getNext(&index, NULL))) {
+ PlanExecutor::ExecState state;
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(&index, NULL))) {
const BSONObj key = index.getObjectField("key");
const string plugin = IndexNames::findPluginName(key);
@@ -394,7 +394,7 @@ namespace mongo {
}
}
- if (Runner::RUNNER_EOF != state) {
+ if (PlanExecutor::IS_EOF != state) {
warning() << "Internal error while reading collection " << systemIndexes;
}
diff --git a/src/mongo/db/dbcommands.cpp b/src/mongo/db/dbcommands.cpp
index 27105525e91..8ff1931a8a2 100644
--- a/src/mongo/db/dbcommands.cpp
+++ b/src/mongo/db/dbcommands.cpp
@@ -689,8 +689,8 @@ namespace mongo {
const ChunkVersion shardVersionAtStart = shardingState.getVersion(ns);
BSONObj obj;
- Runner::RunnerState state;
- while (Runner::RUNNER_ADVANCED == (state = exec->getNext(&obj, NULL))) {
+ PlanExecutor::ExecState state;
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
BSONElement ne = obj["n"];
verify(ne.isNumber());
int myn = ne.numberInt();
@@ -835,8 +835,8 @@ namespace mongo {
long long numObjects = 0;
DiskLoc loc;
- Runner::RunnerState state;
- while (Runner::RUNNER_ADVANCED == (state = exec->getNext(NULL, &loc))) {
+ PlanExecutor::ExecState state;
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(NULL, &loc))) {
if ( estimate )
size += avgObjSize;
else
@@ -851,7 +851,7 @@ namespace mongo {
}
}
- if (Runner::RUNNER_EOF != state) {
+ if (PlanExecutor::IS_EOF != state) {
warning() << "Internal error while reading " << ns << endl;
}
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index db0132d5aa0..d9923e9c1b1 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -118,9 +118,9 @@ namespace mongo {
getExecutor(txn, collection, cq, &rawExec, options).isOK());
auto_ptr<PlanExecutor> exec(rawExec);
- Runner::RunnerState state;
+ PlanExecutor::ExecState state;
DiskLoc loc;
- if (Runner::RUNNER_ADVANCED == (state = exec->getNext(NULL, &loc))) {
+ if (PlanExecutor::ADVANCED == (state = exec->getNext(NULL, &loc))) {
return loc;
}
return DiskLoc();
@@ -187,9 +187,9 @@ namespace mongo {
auto_ptr<PlanExecutor> exec(
InternalPlanner::collectionScan(txn, ns, context.db()->getCollection(txn, ns)));
- Runner::RunnerState state = exec->getNext(&result, NULL);
+ PlanExecutor::ExecState state = exec->getNext(&result, NULL);
context.getClient()->curop()->done();
- return Runner::RUNNER_ADVANCED == state;
+ return PlanExecutor::ADVANCED == state;
}
bool Helpers::getLast(OperationContext* txn, const char *ns, BSONObj& result) {
@@ -198,8 +198,8 @@ namespace mongo {
auto_ptr<PlanExecutor> exec(
InternalPlanner::collectionScan(txn, ns, coll, InternalPlanner::BACKWARD));
- Runner::RunnerState state = exec->getNext(&result, NULL);
- return Runner::RUNNER_ADVANCED == state;
+ PlanExecutor::ExecState state = exec->getNext(&result, NULL);
+ return PlanExecutor::ADVANCED == state;
}
void Helpers::upsert( OperationContext* txn,
@@ -368,20 +368,20 @@ namespace mongo {
DiskLoc rloc;
BSONObj obj;
- Runner::RunnerState state;
+ PlanExecutor::ExecState state;
// This may yield so we cannot touch nsd after this.
state = exec->getNext(&obj, &rloc);
exec.reset();
- if (Runner::RUNNER_EOF == state) { break; }
+ if (PlanExecutor::IS_EOF == state) { break; }
- if (Runner::RUNNER_DEAD == state) {
+ if (PlanExecutor::DEAD == state) {
warning() << "cursor died: aborting deletion for "
<< min << " to " << max << " in " << ns
<< endl;
break;
}
- if (Runner::RUNNER_ERROR == state) {
+ if (PlanExecutor::EXEC_ERROR == state) {
warning() << "cursor error while trying to delete "
<< min << " to " << max
<< " in " << ns << ": "
@@ -389,7 +389,7 @@ namespace mongo {
break;
}
- verify(Runner::RUNNER_ADVANCED == state);
+ verify(PlanExecutor::ADVANCED == state);
if ( onlyRemoveOrphanedDocs ) {
// Do a final check in the write lock to make absolutely sure that our
@@ -525,8 +525,8 @@ namespace mongo {
// already being queued and will be migrated in the 'transferMods' stage
DiskLoc loc;
- Runner::RunnerState state;
- while (Runner::RUNNER_ADVANCED == (state = exec->getNext(NULL, &loc))) {
+ PlanExecutor::ExecState state;
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(NULL, &loc))) {
if ( !isLargeChunk ) {
locs->insert( loc );
}
diff --git a/src/mongo/db/exec/plan_stage.h b/src/mongo/db/exec/plan_stage.h
index 4c8f27d3a08..bc5d701587d 100644
--- a/src/mongo/db/exec/plan_stage.h
+++ b/src/mongo/db/exec/plan_stage.h
@@ -46,7 +46,7 @@ namespace mongo {
*
* Stages have zero or more input streams but only one output stream. Data-accessing stages are
* leaves and data-transforming stages have children. Stages can be connected together to form
- * a tree which is then executed (see plan_runner.h) to solve a query.
+ * a tree which is then executed (see plan_executor.h) to solve a query.
*
* A stage's input and output are each typed. Only stages with compatible types can be
* connected.
diff --git a/src/mongo/db/exec/stagedebug_cmd.cpp b/src/mongo/db/exec/stagedebug_cmd.cpp
index 952b3b1e104..ea8fff78edd 100644
--- a/src/mongo/db/exec/stagedebug_cmd.cpp
+++ b/src/mongo/db/exec/stagedebug_cmd.cpp
@@ -145,7 +145,7 @@ namespace mongo {
BSONArrayBuilder resultBuilder(result.subarrayStart("results"));
- for (BSONObj obj; Runner::RUNNER_ADVANCED == runner.getNext(&obj, NULL); ) {
+ for (BSONObj obj; PlanExecutor::ADVANCED == runner.getNext(&obj, NULL); ) {
resultBuilder.append(obj);
}
diff --git a/src/mongo/db/fts/fts_command_mongod.cpp b/src/mongo/db/fts/fts_command_mongod.cpp
index 146d0159faf..610f2438edb 100644
--- a/src/mongo/db/fts/fts_command_mongod.cpp
+++ b/src/mongo/db/fts/fts_command_mongod.cpp
@@ -130,7 +130,7 @@ namespace mongo {
int numReturned = 0;
BSONObj obj;
- while (Runner::RUNNER_ADVANCED == exec->getNext(&obj, NULL)) {
+ while (PlanExecutor::ADVANCED == exec->getNext(&obj, NULL)) {
if ((resultSize + obj.objsize()) >= BSONObjMaxUserSize) {
break;
}
diff --git a/src/mongo/db/index/haystack_access_method.cpp b/src/mongo/db/index/haystack_access_method.cpp
index 8d2e72a474f..57a0ac6d4f0 100644
--- a/src/mongo/db/index/haystack_access_method.cpp
+++ b/src/mongo/db/index/haystack_access_method.cpp
@@ -102,9 +102,9 @@ namespace mongo {
scoped_ptr<PlanExecutor> exec(InternalPlanner::indexScan(txn, collection,
_descriptor, key, key, true));
- Runner::RunnerState state;
+ PlanExecutor::ExecState state;
DiskLoc loc;
- while (Runner::RUNNER_ADVANCED == (state = exec->getNext(NULL, &loc))) {
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(NULL, &loc))) {
if (hopper.limitReached()) { break; }
pair<unordered_set<DiskLoc, DiskLoc::Hasher>::iterator, bool> p
= thisPass.insert(loc);
diff --git a/src/mongo/db/ops/delete_executor.cpp b/src/mongo/db/ops/delete_executor.cpp
index 17b2123c8f1..3fdbf6c77f9 100644
--- a/src/mongo/db/ops/delete_executor.cpp
+++ b/src/mongo/db/ops/delete_executor.cpp
@@ -138,10 +138,10 @@ namespace mongo {
ScopedExecutorRegistration safety(exec.get());
DiskLoc rloc;
- Runner::RunnerState state;
+ PlanExecutor::ExecState state;
CurOp* curOp = _request->getOpCtx()->getCurOp();
int oldYieldCount = curOp->numYields();
- while (Runner::RUNNER_ADVANCED == (state = exec->getNext(NULL, &rloc))) {
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(NULL, &rloc))) {
if (oldYieldCount != curOp->numYields()) {
uassert(ErrorCodes::NotMaster,
str::stream() << "No longer primary while removing from " << ns.ns(),
diff --git a/src/mongo/db/ops/update.cpp b/src/mongo/db/ops/update.cpp
index 4ca1367465c..e0269ecd146 100644
--- a/src/mongo/db/ops/update.cpp
+++ b/src/mongo/db/ops/update.cpp
@@ -517,7 +517,7 @@ namespace mongo {
BSONObj oldObj;
// Get first doc, and location
- Runner::RunnerState state = Runner::RUNNER_ADVANCED;
+ PlanExecutor::ExecState state = PlanExecutor::ADVANCED;
uassert(ErrorCodes::NotMaster,
mongoutils::str::stream() << "Not primary while updating " << nsString.ns(),
@@ -530,15 +530,15 @@ namespace mongo {
DiskLoc loc;
state = exec->getNext(&oldObj, &loc);
- if (state != Runner::RUNNER_ADVANCED) {
- if (state == Runner::RUNNER_EOF) {
+ if (state != PlanExecutor::ADVANCED) {
+ if (state == PlanExecutor::IS_EOF) {
// We have reached the logical end of the loop, so do yielding recovery
break;
}
else {
uassertStatusOK(Status(ErrorCodes::InternalError,
str::stream() << " Update query failed -- "
- << Runner::statestr(state)));
+ << PlanExecutor::statestr(state)));
}
}
diff --git a/src/mongo/db/pipeline/document_source.h b/src/mongo/db/pipeline/document_source.h
index 5c34eae6fd5..4b6a8bf0291 100644
--- a/src/mongo/db/pipeline/document_source.h
+++ b/src/mongo/db/pipeline/document_source.h
@@ -420,7 +420,7 @@ namespace mongo {
long long _docsAddedToBatches; // for _limit enforcement
const std::string _ns;
- boost::shared_ptr<PlanExecutor> _exec; // PipelineRunner holds a weak_ptr to this.
+ boost::shared_ptr<PlanExecutor> _exec; // PipelineProxyStage holds a weak_ptr to this.
};
diff --git a/src/mongo/db/pipeline/document_source_cursor.cpp b/src/mongo/db/pipeline/document_source_cursor.cpp
index b037e9b7f06..ee44d7c2c0a 100644
--- a/src/mongo/db/pipeline/document_source_cursor.cpp
+++ b/src/mongo/db/pipeline/document_source_cursor.cpp
@@ -87,8 +87,8 @@ namespace mongo {
int memUsageBytes = 0;
BSONObj obj;
- Runner::RunnerState state;
- while ((state = _exec->getNext(&obj, NULL)) == Runner::RUNNER_ADVANCED) {
+ PlanExecutor::ExecState state;
+ while ((state = _exec->getNext(&obj, NULL)) == PlanExecutor::ADVANCED) {
if (_dependencies) {
_currentBatch.push_back(_dependencies->extractFields(obj));
}
@@ -117,13 +117,13 @@ namespace mongo {
_exec.reset();
uassert(16028, "collection or index disappeared when cursor yielded",
- state != Runner::RUNNER_DEAD);
+ state != PlanExecutor::DEAD);
uassert(17285, "cursor encountered an error: " + WorkingSetCommon::toStatusString(obj),
- state != Runner::RUNNER_ERROR);
+ state != PlanExecutor::EXEC_ERROR);
massert(17286, str::stream() << "Unexpected return from PlanExecutor::getNext: " << state,
- state == Runner::RUNNER_EOF || state == Runner::RUNNER_ADVANCED);
+ state == PlanExecutor::IS_EOF || state == PlanExecutor::ADVANCED);
}
void DocumentSourceCursor::setSource(DocumentSource *pSource) {
diff --git a/src/mongo/db/query/explain_plan.cpp b/src/mongo/db/query/explain_plan.cpp
index db9f7b82f40..0da9d51be53 100644
--- a/src/mongo/db/query/explain_plan.cpp
+++ b/src/mongo/db/query/explain_plan.cpp
@@ -26,6 +26,8 @@
* it in the license file.
*/
+// THIS FILE IS DEPRECATED -- the old explain implementation is being replaced
+
#include "mongo/db/query/explain_plan.h"
#include "mongo/db/query/stage_types.h"
diff --git a/src/mongo/db/query/explain_plan.h b/src/mongo/db/query/explain_plan.h
index 657ec3610a2..9353a610114 100644
--- a/src/mongo/db/query/explain_plan.h
+++ b/src/mongo/db/query/explain_plan.h
@@ -26,6 +26,8 @@
* it in the license file.
*/
+// THIS FILE IS DEPRECATED -- the old explain implementation is being replaced
+
#pragma once
#include "mongo/base/status.h"
diff --git a/src/mongo/db/query/new_find.cpp b/src/mongo/db/query/new_find.cpp
index 83efa22a5ab..6781ef3702f 100644
--- a/src/mongo/db/query/new_find.cpp
+++ b/src/mongo/db/query/new_find.cpp
@@ -242,8 +242,8 @@ namespace mongo {
exec->restoreState(txn);
BSONObj obj;
- Runner::RunnerState state;
- while (Runner::RUNNER_ADVANCED == (state = exec->getNext(&obj, NULL))) {
+ PlanExecutor::ExecState state;
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
// Add result to output buffer.
bb.appendBuf((void*)obj.objdata(), obj.objsize());
@@ -264,7 +264,7 @@ namespace mongo {
}
}
- if (Runner::RUNNER_EOF == state && 0 == numResults
+ if (PlanExecutor::IS_EOF == state && 0 == numResults
&& (queryOptions & QueryOption_CursorTailable)
&& (queryOptions & QueryOption_AwaitData) && (pass < 1000)) {
// If the cursor is tailable we don't kill it if it's eof. We let it try to get
@@ -283,9 +283,9 @@ namespace mongo {
// to getNext(...) might just return EOF).
bool saveClientCursor = false;
- if (Runner::RUNNER_DEAD == state || Runner::RUNNER_ERROR == state) {
+ if (PlanExecutor::DEAD == state || PlanExecutor::EXEC_ERROR == state) {
// Propagate this error to caller.
- if (Runner::RUNNER_ERROR == state) {
+ if (PlanExecutor::EXEC_ERROR == state) {
scoped_ptr<PlanStageStats> stats(exec->getStats());
error() << "Plan executor error, stats: "
<< statsToBSON(*stats);
@@ -306,12 +306,12 @@ namespace mongo {
resultFlags = ResultFlag_CursorNotFound;
}
}
- else if (Runner::RUNNER_EOF == state) {
+ else if (PlanExecutor::IS_EOF == state) {
// EOF is also end of the line unless it's tailable.
saveClientCursor = queryOptions & QueryOption_CursorTailable;
}
else {
- verify(Runner::RUNNER_ADVANCED == state);
+ verify(PlanExecutor::ADVANCED == state);
saveClientCursor = true;
}
@@ -321,7 +321,7 @@ namespace mongo {
cursorid = 0;
cc = NULL;
QLOG() << "getMore NOT saving client cursor, ended with state "
- << Runner::statestr(state)
+ << PlanExecutor::statestr(state)
<< endl;
}
else {
@@ -329,7 +329,7 @@ namespace mongo {
cc->incPos(numResults);
exec->saveState();
QLOG() << "getMore saving client cursor ended with state "
- << Runner::statestr(state)
+ << PlanExecutor::statestr(state)
<< endl;
// Possibly note slave's position in the oplog.
@@ -404,15 +404,15 @@ namespace mongo {
// The stage returns a DiskLoc of where to start.
DiskLoc startLoc;
- Runner::RunnerState state = exec->getNext(NULL, &startLoc);
+ PlanExecutor::ExecState state = exec->getNext(NULL, &startLoc);
// This is normal. The start of the oplog is the beginning of the collection.
- if (Runner::RUNNER_EOF == state) {
+ if (PlanExecutor::IS_EOF == state) {
return getExecutor(txn, collection, autoCq.release(), execOut);
}
// This is not normal. An error was encountered.
- if (Runner::RUNNER_ADVANCED != state) {
+ if (PlanExecutor::ADVANCED != state) {
return Status(ErrorCodes::InternalError,
"quick oplog start location had error...?");
}
@@ -650,7 +650,7 @@ namespace mongo {
auto_ptr<ScopedExecutorRegistration> safety(new ScopedExecutorRegistration(exec.get()));
BSONObj obj;
- Runner::RunnerState state;
+ PlanExecutor::ExecState state;
// uint64_t numMisplacedDocs = 0;
// Get summary info about which plan the executor is using.
@@ -658,7 +658,7 @@ namespace mongo {
Explain::getSummaryStats(exec.get(), &stats);
curop.debug().planSummary = stats.summaryStr.c_str();
- while (Runner::RUNNER_ADVANCED == (state = exec->getNext(&obj, NULL))) {
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
// Add result to output buffer. This is unnecessary if explain info is requested
if (!isExplain) {
bb.appendBuf((void*)obj.objdata(), obj.objsize());
@@ -710,7 +710,7 @@ namespace mongo {
safety.reset();
// Caller expects exceptions thrown in certain cases.
- if (Runner::RUNNER_ERROR == state) {
+ if (PlanExecutor::EXEC_ERROR == state) {
scoped_ptr<PlanStageStats> stats(exec->getStats());
error() << "Plan executor error, stats: "
<< statsToBSON(*stats);
@@ -718,7 +718,7 @@ namespace mongo {
}
// Why save a dead executor?
- if (Runner::RUNNER_DEAD == state) {
+ if (PlanExecutor::DEAD == state) {
saveClientCursor = false;
}
else if (pq.hasOption(QueryOption_CursorTailable)) {
diff --git a/src/mongo/db/query/new_find.h b/src/mongo/db/query/new_find.h
index 6050d90d633..c136cee8981 100644
--- a/src/mongo/db/query/new_find.h
+++ b/src/mongo/db/query/new_find.h
@@ -34,7 +34,6 @@
#include "mongo/db/curop.h"
#include "mongo/db/dbmessage.h"
#include "mongo/db/query/canonical_query.h"
-#include "mongo/db/query/runner.h"
#include "mongo/util/net/message.h"
namespace mongo {
diff --git a/src/mongo/db/query/plan_cache.h b/src/mongo/db/query/plan_cache.h
index 84dbab1fa34..bc89e9aa7da 100644
--- a/src/mongo/db/query/plan_cache.h
+++ b/src/mongo/db/query/plan_cache.h
@@ -46,7 +46,7 @@ namespace mongo {
struct QuerySolutionNode;
/**
- * When the CachedPlanRunner runs a cached query, it can provide feedback to the cache. This
+ * When the CachedPlanStage runs a cached query, it can provide feedback to the cache. This
* feedback is available to anyone who retrieves that query in the future.
*/
struct PlanCacheEntryFeedback {
@@ -253,7 +253,7 @@ namespace mongo {
// the other plans lost.
boost::scoped_ptr<PlanRankingDecision> decision;
- // Annotations from cached runs. The CachedSolutionRunner provides these stats about its
+ // Annotations from cached runs. The CachedPlanStage provides these stats about its
// runs when they complete.
std::vector<PlanCacheEntryFeedback*> feedback;
@@ -321,8 +321,8 @@ namespace mongo {
Status get(const CanonicalQuery& query, CachedSolution** crOut) const;
/**
- * When the CachedPlanRunner runs a plan out of the cache, we want to record data about the
- * plan's performance. The CachedPlanRunner calls feedback(...) at the end of query
+ * When the CachedPlanStage runs a plan out of the cache, we want to record data about the
+ * plan's performance. The CachedPlanStage calls feedback(...) at the end of query
* execution in order to do this.
*
* Cache takes ownership of 'feedback'.
diff --git a/src/mongo/db/query/plan_executor.cpp b/src/mongo/db/query/plan_executor.cpp
index a449dc52372..9df91a83b77 100644
--- a/src/mongo/db/query/plan_executor.cpp
+++ b/src/mongo/db/query/plan_executor.cpp
@@ -124,8 +124,8 @@ namespace mongo {
if (!_killed) { _root->invalidate(dl, type); }
}
- Runner::RunnerState PlanExecutor::getNext(BSONObj* objOut, DiskLoc* dlOut) {
- if (_killed) { return Runner::RUNNER_DEAD; }
+ PlanExecutor::ExecState PlanExecutor::getNext(BSONObj* objOut, DiskLoc* dlOut) {
+ if (_killed) { return PlanExecutor::DEAD; }
for (;;) {
WorkingSetID id = WorkingSet::INVALID_ID;
@@ -136,7 +136,7 @@ namespace mongo {
if (WorkingSet::INVALID_ID == id) {
invariant(NULL == objOut);
invariant(NULL == dlOut);
- return Runner::RUNNER_ADVANCED;
+ return PlanExecutor::ADVANCED;
}
WorkingSetMember* member = _workingSet->get(id);
@@ -173,7 +173,7 @@ namespace mongo {
if (hasRequestedData) {
_workingSet->free(id);
- return Runner::RUNNER_ADVANCED;
+ return PlanExecutor::ADVANCED;
}
// This result didn't have the data the caller wanted, try again.
}
@@ -181,17 +181,17 @@ namespace mongo {
// Fall through to yield check at end of large conditional.
}
else if (PlanStage::IS_EOF == code) {
- return Runner::RUNNER_EOF;
+ return PlanExecutor::IS_EOF;
}
else if (PlanStage::DEAD == code) {
- return Runner::RUNNER_DEAD;
+ return PlanExecutor::DEAD;
}
else {
verify(PlanStage::FAILURE == code);
if (NULL != objOut) {
WorkingSetCommon::getStatusMemberObject(*_workingSet, id, objOut);
}
- return Runner::RUNNER_ERROR;
+ return PlanExecutor::EXEC_ERROR;
}
}
}
diff --git a/src/mongo/db/query/plan_executor.h b/src/mongo/db/query/plan_executor.h
index 87c8aaa285f..8a6104edf79 100644
--- a/src/mongo/db/query/plan_executor.h
+++ b/src/mongo/db/query/plan_executor.h
@@ -31,17 +31,18 @@
#include <boost/scoped_ptr.hpp>
#include "mongo/base/status.h"
-#include "mongo/db/query/runner.h"
+#include "mongo/db/invalidation_type.h"
#include "mongo/db/query/query_solution.h"
namespace mongo {
class BSONObj;
+ class Collection;
class DiskLoc;
class PlanStage;
+ class PlanExecutor;
struct PlanStageStats;
class WorkingSet;
- class PlanExecutor;
/**
* RAII approach to ensuring that plan executors are deregistered.
@@ -66,11 +67,50 @@ namespace mongo {
* The executor is usually part of a larger abstraction that is interacting with the cache
* and/or the query optimizer.
*
- * Executes a plan. Used by a runner. Calls work() on a plan until a result is produced.
- * Stops when the plan is EOF or if the plan errors.
+ * Executes a plan. Calls work() on a plan until a result is produced. Stops when the plan is
+ * EOF or if the plan errors.
*/
class PlanExecutor {
public:
+
+ enum ExecState {
+ // We successfully populated the out parameter.
+ ADVANCED,
+
+ // We're EOF. We won't return any more results (edge case exception: capped+tailable).
+ IS_EOF,
+
+ // We were killed or had an error.
+ DEAD,
+
+ // getNext was asked for data it cannot provide, or the underlying PlanStage had an
+ // unrecoverable error.
+ // If the underlying PlanStage has any information on the error, it will be available in
+ // the objOut parameter. Call WorkingSetCommon::toStatusString() to retrieve the error
+ // details from the output BSON object.
+ EXEC_ERROR,
+ };
+
+ static std::string statestr(ExecState s) {
+ if (PlanExecutor::ADVANCED == s) {
+ return "ADVANCED";
+ }
+ else if (PlanExecutor::IS_EOF == s) {
+ return "IS_EOF";
+ }
+ else if (PlanExecutor::DEAD == s) {
+ return "DEAD";
+ }
+ else {
+ verify(PlanExecutor::EXEC_ERROR == s);
+ return "EXEC_ERROR";
+ }
+ }
+
+ //
+ // Constructors / destructor.
+ //
+
/**
* Used when there is no canonical query and no query solution.
*
@@ -151,7 +191,7 @@ namespace mongo {
//
/** TODO document me */
- Runner::RunnerState getNext(BSONObj* objOut, DiskLoc* dlOut);
+ ExecState getNext(BSONObj* objOut, DiskLoc* dlOut);
/** TOOD document me */
bool isEOF();
@@ -174,7 +214,7 @@ namespace mongo {
/**
* During the yield, the database we're operating over or any collection we're relying on
- * may be dropped. When this happens all cursors and runners on that database and
+ * may be dropped. When this happens all cursors and plan executors on that database and
* collection are killed or deleted in some fashion. (This is how the _killed gets set.)
*/
void kill();
diff --git a/src/mongo/db/query/query_planner_params.h b/src/mongo/db/query/query_planner_params.h
index b2970aa59ce..e7be3aec265 100644
--- a/src/mongo/db/query/query_planner_params.h
+++ b/src/mongo/db/query/query_planner_params.h
@@ -59,8 +59,8 @@ namespace mongo {
//
// In order to set this, you must check
// shardingState.needCollectionMetadata(current_namespace) in the same lock that you use
- // to build the query runner. You must also wrap the Runner in a ClientCursor within the
- // same lock. See the comment on ShardFilterStage for details.
+ // to build the query executor. You must also wrap the PlanExecutor in a ClientCursor
+ // within the same lock. See the comment on ShardFilterStage for details.
INCLUDE_SHARD_FILTER = 1 << 2,
// Set this if you don't want any plans with a blocking sort stage. All sorts must be
@@ -99,7 +99,7 @@ namespace mongo {
bool indexFiltersApplied;
// What's the max number of indexed solutions we want to output? It's expensive to compare
- // plans via the MultiPlanRunner, and the set of possible plans is very large for certain
+ // plans via the MultiPlanStage, and the set of possible plans is very large for certain
// index+query combinations.
size_t maxIndexedSolutions;
};
diff --git a/src/mongo/db/query/runner.h b/src/mongo/db/query/runner.h
deleted file mode 100644
index db7a642552f..00000000000
--- a/src/mongo/db/query/runner.h
+++ /dev/null
@@ -1,179 +0,0 @@
-/**
- * Copyright (C) 2013 10gen Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License, version 3,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the GNU Affero General Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-// THIS FILE IS DEPRECATED -- Runner to be replaced with PlanExecutor.
-
-#pragma once
-
-#include "mongo/base/status.h"
-#include "mongo/db/query/canonical_query.h"
-#include "mongo/db/invalidation_type.h"
-
-namespace mongo {
-
- class Collection;
- class OperationContext;
- class DiskLoc;
- class TypeExplain;
- struct PlanInfo;
-
- /**
- * A runner runs a query.
- */
- class Runner {
- public:
- virtual ~Runner() { }
-
- enum RunnerState {
- // We successfully populated the out parameter.
- RUNNER_ADVANCED,
-
- // We're EOF. We won't return any more results (edge case exception: capped+tailable).
- RUNNER_EOF,
-
- // We were killed or had an error.
- RUNNER_DEAD,
-
- // getNext was asked for data it cannot provide, or the underlying PlanStage had an
- // unrecoverable error.
- // If the underlying PlanStage has any information on the error, it will be available in
- // the objOut parameter. Call WorkingSetCommon::toStatusString() to retrieve the error
- // details from the output BSON object.
- RUNNER_ERROR,
- };
-
- static std::string statestr(RunnerState s) {
- if (RUNNER_ADVANCED == s) {
- return "RUNNER_ADVANCED";
- }
- else if (RUNNER_EOF == s) {
- return "RUNNER_EOF";
- }
- else if (RUNNER_DEAD == s) {
- return "RUNNER_DEAD";
- }
- else {
- verify(RUNNER_ERROR == s);
- return "RUNNER_ERROR";
- }
- }
-
- /**
- * Get the next result from the query.
- *
- * If objOut is not NULL, only results that have a BSONObj are returned. The BSONObj may
- * point to on-disk data (isOwned will be false) and must be copied by the caller before
- * yielding.
- *
- * If dlOut is not NULL, only results that have a valid DiskLoc are returned.
- *
- * If both objOut and dlOut are not NULL, only results with both a valid BSONObj and DiskLoc
- * will be returned. The BSONObj is the object located at the DiskLoc provided.
- *
- * If the underlying query machinery produces a result that does not have the data requested
- * by the user, it will be silently dropped.
- *
- * If the caller is running a query, they probably only care about the object.
- * If the caller is an internal client, they may only care about DiskLocs (index scan), or
- * about object + DiskLocs (collection scan).
- *
- * Some notes on objOut and ownership:
- *
- * objOut may be an owned object in certain cases: invalidation of the underlying DiskLoc,
- * the object is created from covered index key data, the object is projected or otherwise
- * the result of a computation.
- *
- * objOut will also be owned when the underlying PlanStage has provided error details in the
- * event of a RUNNER_ERROR. Call WorkingSetCommon::toStatusString() to convert the object
- * to a loggable format.
- *
- * objOut will be unowned if it's the result of a fetch or a collection scan.
- */
- virtual RunnerState getNext(BSONObj* objOut, DiskLoc* dlOut) = 0;
-
- /**
- * Will the next call to getNext() return EOF? It's useful to know if the runner is done
- * without having to take responsibility for a result.
- */
- virtual bool isEOF() = 0;
-
- /**
- * Inform the runner about changes to DiskLoc(s) that occur while the runner is yielded.
- * The runner must take any actions required to continue operating correctly, including
- * broadcasting the invalidation request to the PlanStage tree being run.
- *
- * Called from CollectionCursorCache::invalidateDocument.
- *
- * See db/invalidation_type.h for InvalidationType.
- */
- virtual void invalidate(const DiskLoc& dl, InvalidationType type) = 0;
-
- /**
- * Mark the Runner as no longer valid. Can happen when a runner yields and the underlying
- * database is dropped/indexes removed/etc. All future to calls to getNext return
- * RUNNER_DEAD. Every other call is a NOOP.
- *
- * The runner must guarantee as a postcondition that future calls to collection() will
- * return NULL.
- */
- virtual void kill() = 0;
-
- /**
- * Save any state required to yield.
- */
- virtual void saveState() = 0;
-
- /**
- * Restore saved state, possibly after a yield. Return true if the runner is OK, false if
- * it was killed.
- */
- virtual bool restoreState(OperationContext* opCtx) = 0;
-
- /**
- * Return the NS that the query is running over.
- */
- virtual const std::string& ns() = 0;
-
- /**
- * Return the Collection that the query is running over.
- */
- virtual const Collection* collection() = 0;
-
- /**
- * Returns OK, allocating and filling '*explain' or '*planInfo' with a description of the
- * chosen plan, depending on which is non-NULL (one of the two should be NULL). Caller
- * takes onwership of either '*explain' and '*planInfo'. Otherwise, returns false
- * a detailed error status.
- *
- * If 'explain' is NULL, then this out-parameter is ignored. Similarly, if 'staticInfo'
- * is NULL, then no static debug information is produced.
- */
- virtual Status getInfo(TypeExplain** explain, PlanInfo** planInfo) const = 0;
- };
-
-} // namespace mongo
diff --git a/src/mongo/db/query/type_explain.cpp b/src/mongo/db/query/type_explain.cpp
index b6415b4db41..329eb1dcb28 100644
--- a/src/mongo/db/query/type_explain.cpp
+++ b/src/mongo/db/query/type_explain.cpp
@@ -26,6 +26,8 @@
* then also delete it in the license file.
*/
+// THIS FILE IS DEPRECATED -- the old explain implementation is being replaced
+
#include "mongo/db/query/type_explain.h"
#include "mongo/db/field_parser.h"
diff --git a/src/mongo/db/query/type_explain.h b/src/mongo/db/query/type_explain.h
index f19b8ca107a..6cca71f2bf0 100644
--- a/src/mongo/db/query/type_explain.h
+++ b/src/mongo/db/query/type_explain.h
@@ -26,6 +26,8 @@
* then also delete it in the license file.
*/
+// THIS FILE IS DEPRECATED -- the old explain implementation is being replaced
+
#pragma once
#include <string>
diff --git a/src/mongo/db/repl/master_slave.cpp b/src/mongo/db/repl/master_slave.cpp
index 27e446765bb..46ba55feaff 100644
--- a/src/mongo/db/repl/master_slave.cpp
+++ b/src/mongo/db/repl/master_slave.cpp
@@ -258,8 +258,8 @@ namespace repl {
localSources,
ctx.db()->getCollection(txn, localSources)));
BSONObj obj;
- Runner::RunnerState state;
- while (Runner::RUNNER_ADVANCED == (state = exec->getNext(&obj, NULL))) {
+ PlanExecutor::ExecState state;
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
n++;
ReplSource tmp(txn, obj);
if (tmp.hostName != replSettings.source) {
@@ -279,7 +279,7 @@ namespace repl {
dbexit( EXIT_REPLICATION_ERROR );
}
}
- uassert(17065, "Internal error reading from local.sources", Runner::RUNNER_EOF == state);
+ uassert(17065, "Internal error reading from local.sources", PlanExecutor::IS_EOF == state);
uassert( 10002 , "local.sources collection corrupt?", n<2 );
if ( n == 0 ) {
// source missing. add.
@@ -303,8 +303,8 @@ namespace repl {
localSources,
ctx.db()->getCollection(txn, localSources)));
BSONObj obj;
- Runner::RunnerState state;
- while (Runner::RUNNER_ADVANCED == (state = exec->getNext(&obj, NULL))) {
+ PlanExecutor::ExecState state;
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
ReplSource tmp(txn, obj);
if ( tmp.syncedTo.isNull() ) {
DBDirectClient c(txn);
@@ -317,7 +317,7 @@ namespace repl {
}
addSourceToList(txn, v, tmp, old);
}
- uassert(17066, "Internal error reading from local.sources", Runner::RUNNER_EOF == state);
+ uassert(17066, "Internal error reading from local.sources", PlanExecutor::IS_EOF == state);
}
bool ReplSource::throttledForceResyncDead( OperationContext* txn, const char *requester ) {
diff --git a/src/mongo/db/repl/repl_info.cpp b/src/mongo/db/repl/repl_info.cpp
index 9d3b1e02436..77aa8d7ffc6 100644
--- a/src/mongo/db/repl/repl_info.cpp
+++ b/src/mongo/db/repl/repl_info.cpp
@@ -89,8 +89,8 @@ namespace repl {
ctx.ctx().db()->getCollection(txn,
localSources)));
BSONObj obj;
- Runner::RunnerState state;
- while (Runner::RUNNER_ADVANCED == (state = exec->getNext(&obj, NULL))) {
+ PlanExecutor::ExecState state;
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
src.push_back(obj);
}
}
diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp
index 7aab1a71bc0..a64af7a597a 100644
--- a/src/mongo/db/repl/rs_rollback.cpp
+++ b/src/mongo/db/repl/rs_rollback.cpp
@@ -243,7 +243,7 @@ namespace repl {
BSONObj ourObj;
DiskLoc ourLoc;
- if (Runner::RUNNER_ADVANCED != exec->getNext(&ourObj, &ourLoc)) {
+ if (PlanExecutor::ADVANCED != exec->getNext(&ourObj, &ourLoc)) {
throw RSFatalException("our oplog empty or unreadable");
}
@@ -304,7 +304,7 @@ namespace repl {
theirObj = oplogCursor->nextSafe();
theirTime = theirObj["ts"]._opTime();
- if (Runner::RUNNER_ADVANCED != exec->getNext(&ourObj, &ourLoc)) {
+ if (PlanExecutor::ADVANCED != exec->getNext(&ourObj, &ourLoc)) {
log() << "replSet rollback error RS101 reached beginning of local oplog"
<< rsLog;
log() << "replSet them: " << them->toString() << " scanned: "
@@ -331,7 +331,7 @@ namespace repl {
else {
// theirTime < ourTime
refetch(fixUpInfo, ourObj);
- if (Runner::RUNNER_ADVANCED != exec->getNext(&ourObj, &ourLoc)) {
+ if (PlanExecutor::ADVANCED != exec->getNext(&ourObj, &ourLoc)) {
log() << "replSet rollback error RS101 reached beginning of local oplog"
<< rsLog;
log() << "replSet them: " << them->toString() << " scanned: "