summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorQingyang Chen <qingyang.chen@10gen.com>2015-06-19 17:20:36 -0400
committerQingyang Chen <qingyang.chen@10gen.com>2015-06-26 10:10:34 -0400
commit159f54fcb550d6ff660efd2832ac5ae8b6fced56 (patch)
tree2b6ac085b3375ce151d92fa1db9b4a38d92da25f
parent2931e33f4d6efb3aa176eaa951be6c91abce2b43 (diff)
downloadmongo-159f54fcb550d6ff660efd2832ac5ae8b6fced56.tar.gz
SERVER-16889 Modernize getExecutor*(), PlanExecutor::make() signatures
-rw-r--r--src/mongo/db/commands/count_cmd.cpp36
-rw-r--r--src/mongo/db/commands/dbhash.cpp18
-rw-r--r--src/mongo/db/commands/distinct.cpp13
-rw-r--r--src/mongo/db/commands/find_and_modify.cpp42
-rw-r--r--src/mongo/db/commands/find_cmd.cpp52
-rw-r--r--src/mongo/db/commands/geo_near_cmd.cpp10
-rw-r--r--src/mongo/db/commands/group.cpp22
-rw-r--r--src/mongo/db/commands/list_collections.cpp15
-rw-r--r--src/mongo/db/commands/list_indexes.cpp15
-rw-r--r--src/mongo/db/commands/mr.cpp20
-rw-r--r--src/mongo/db/commands/parallel_collection_scan.cpp16
-rw-r--r--src/mongo/db/commands/pipeline_command.cpp42
-rw-r--r--src/mongo/db/commands/repair_cursor.cpp9
-rw-r--r--src/mongo/db/commands/write_commands/batch_executor.cpp15
-rw-r--r--src/mongo/db/commands/write_commands/write_commands.cpp10
-rw-r--r--src/mongo/db/dbcommands.cpp19
-rw-r--r--src/mongo/db/dbhelpers.cpp13
-rw-r--r--src/mongo/db/exec/delete.cpp8
-rw-r--r--src/mongo/db/exec/delete.h2
-rw-r--r--src/mongo/db/exec/stagedebug_cmd.cpp14
-rw-r--r--src/mongo/db/exec/update.cpp8
-rw-r--r--src/mongo/db/exec/update.h2
-rw-r--r--src/mongo/db/instance.cpp26
-rw-r--r--src/mongo/db/ops/delete.cpp7
-rw-r--r--src/mongo/db/ops/update.cpp7
-rw-r--r--src/mongo/db/pipeline/pipeline_d.cpp42
-rw-r--r--src/mongo/db/query/explain.cpp4
-rw-r--r--src/mongo/db/query/explain.h2
-rw-r--r--src/mongo/db/query/find.cpp22
-rw-r--r--src/mongo/db/query/find.h11
-rw-r--r--src/mongo/db/query/get_executor.cpp392
-rw-r--r--src/mongo/db/query/get_executor.h118
-rw-r--r--src/mongo/db/query/internal_plans.cpp68
-rw-r--r--src/mongo/db/query/internal_plans.h26
-rw-r--r--src/mongo/db/query/plan_executor.cpp112
-rw-r--r--src/mongo/db/query/plan_executor.h77
-rw-r--r--src/mongo/dbtests/documentsourcetests.cpp6
-rw-r--r--src/mongo/dbtests/executor_registry.cpp18
-rw-r--r--src/mongo/dbtests/query_multi_plan_runner.cpp23
-rw-r--r--src/mongo/dbtests/query_plan_executor.cpp37
-rw-r--r--src/mongo/dbtests/query_stage_collscan.cpp42
-rw-r--r--src/mongo/dbtests/query_stage_merge_sort.cpp148
-rw-r--r--src/mongo/dbtests/query_stage_sort.cpp58
-rw-r--r--src/mongo/dbtests/query_stage_tests.cpp20
-rw-r--r--src/mongo/s/d_migrate.cpp18
-rw-r--r--src/mongo/s/d_split.cpp4
46 files changed, 801 insertions, 888 deletions
diff --git a/src/mongo/db/commands/count_cmd.cpp b/src/mongo/db/commands/count_cmd.cpp
index 91838325936..d44feb3d58c 100644
--- a/src/mongo/db/commands/count_cmd.cpp
+++ b/src/mongo/db/commands/count_cmd.cpp
@@ -99,18 +99,16 @@ public:
// version on initial entry into count.
RangePreserver preserver(collection);
- PlanExecutor* rawExec;
- Status getExecStatus = getExecutorCount(txn,
- collection,
- request.getValue(),
- true, // explain
- PlanExecutor::YIELD_AUTO,
- &rawExec);
- if (!getExecStatus.isOK()) {
- return getExecStatus;
+ auto statusWithPlanExecutor = getExecutorCount(txn,
+ collection,
+ request.getValue(),
+ true, // explain
+ PlanExecutor::YIELD_AUTO);
+ if (!statusWithPlanExecutor.isOK()) {
+ return statusWithPlanExecutor.getStatus();
}
- unique_ptr<PlanExecutor> exec(rawExec);
+ unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
Explain::explainStages(exec.get(), verbosity, out);
return Status::OK();
@@ -134,18 +132,16 @@ public:
// version on initial entry into count.
RangePreserver preserver(collection);
- PlanExecutor* rawExec;
- Status getExecStatus = getExecutorCount(txn,
- collection,
- request.getValue(),
- false, // !explain
- PlanExecutor::YIELD_AUTO,
- &rawExec);
- if (!getExecStatus.isOK()) {
- return appendCommandStatus(result, getExecStatus);
+ auto statusWithPlanExecutor = getExecutorCount(txn,
+ collection,
+ request.getValue(),
+ false, // !explain
+ PlanExecutor::YIELD_AUTO);
+ if (!statusWithPlanExecutor.isOK()) {
+ return appendCommandStatus(result, statusWithPlanExecutor.getStatus());
}
- unique_ptr<PlanExecutor> exec(rawExec);
+ unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
// Store the plan summary string in CurOp.
if (NULL != CurOp::get(txn)) {
diff --git a/src/mongo/db/commands/dbhash.cpp b/src/mongo/db/commands/dbhash.cpp
index dd9db449300..c8b3cd4e179 100644
--- a/src/mongo/db/commands/dbhash.cpp
+++ b/src/mongo/db/commands/dbhash.cpp
@@ -96,16 +96,16 @@ std::string DBHashCmd::hashCollection(OperationContext* opCtx,
unique_ptr<PlanExecutor> exec;
if (desc) {
- exec.reset(InternalPlanner::indexScan(opCtx,
- collection,
- desc,
- BSONObj(),
- BSONObj(),
- false,
- InternalPlanner::FORWARD,
- InternalPlanner::IXSCAN_FETCH));
+ exec = InternalPlanner::indexScan(opCtx,
+ collection,
+ desc,
+ BSONObj(),
+ BSONObj(),
+ false,
+ InternalPlanner::FORWARD,
+ InternalPlanner::IXSCAN_FETCH);
} else if (collection->isCapped()) {
- exec.reset(InternalPlanner::collectionScan(opCtx, fullCollectionName, collection));
+ exec = InternalPlanner::collectionScan(opCtx, fullCollectionName, collection);
} else {
log() << "can't find _id index for: " << fullCollectionName << endl;
return "no _id _index";
diff --git a/src/mongo/db/commands/distinct.cpp b/src/mongo/db/commands/distinct.cpp
index 2c281140c63..9fb1214234c 100644
--- a/src/mongo/db/commands/distinct.cpp
+++ b/src/mongo/db/commands/distinct.cpp
@@ -123,17 +123,16 @@ public:
return true;
}
- PlanExecutor* rawExec;
- Status status =
- getExecutorDistinct(txn, collection, query, key, PlanExecutor::YIELD_AUTO, &rawExec);
- if (!status.isOK()) {
+ auto statusWithPlanExecutor =
+ getExecutorDistinct(txn, collection, query, key, PlanExecutor::YIELD_AUTO);
+ if (!statusWithPlanExecutor.isOK()) {
uasserted(17216,
mongoutils::str::stream() << "Can't get executor for query " << query << ": "
- << status.toString());
+ << statusWithPlanExecutor.getStatus().toString());
return 0;
}
- unique_ptr<PlanExecutor> exec(rawExec);
+ unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
BSONObj obj;
PlanExecutor::ExecState state;
@@ -165,7 +164,7 @@ public:
// Get summary information about the plan.
PlanSummaryStats stats;
- Explain::getSummaryStats(exec.get(), &stats);
+ Explain::getSummaryStats(*exec, &stats);
verify(start == bb.buf());
diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp
index ee7c2544ac3..2cc0887ae9d 100644
--- a/src/mongo/db/commands/find_and_modify.cpp
+++ b/src/mongo/db/commands/find_and_modify.cpp
@@ -261,12 +261,11 @@ public:
str::stream() << "database " << dbName << " does not exist."};
}
- PlanExecutor* rawExec;
- Status execStatus = getExecutorDelete(txn, collection, &parsedDelete, &rawExec);
- if (!execStatus.isOK()) {
- return execStatus;
+ auto statusWithPlanExecutor = getExecutorDelete(txn, collection, &parsedDelete);
+ if (!statusWithPlanExecutor.isOK()) {
+ return statusWithPlanExecutor.getStatus();
}
- const std::unique_ptr<PlanExecutor> exec(rawExec);
+ const std::unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
Explain::explainStages(exec.get(), verbosity, out);
} else {
UpdateRequest request(nsString);
@@ -298,13 +297,12 @@ public:
str::stream() << "database " << dbName << " does not exist."};
}
- PlanExecutor* rawExec;
- Status execStatus =
- getExecutorUpdate(txn, collection, &parsedUpdate, opDebug, &rawExec);
- if (!execStatus.isOK()) {
- return execStatus;
+ auto statusWithPlanExecutor =
+ getExecutorUpdate(txn, collection, &parsedUpdate, opDebug);
+ if (!statusWithPlanExecutor.isOK()) {
+ return statusWithPlanExecutor.getStatus();
}
- const std::unique_ptr<PlanExecutor> exec(rawExec);
+ const std::unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
Explain::explainStages(exec.get(), verbosity, out);
}
@@ -378,12 +376,12 @@ public:
return appendCommandStatus(result, isPrimary);
}
- PlanExecutor* rawExec;
- Status execStatus = getExecutorDelete(txn, collection, &parsedDelete, &rawExec);
- if (!execStatus.isOK()) {
- return appendCommandStatus(result, execStatus);
+ auto statusWithPlanExecutor = getExecutorDelete(txn, collection, &parsedDelete);
+ if (!statusWithPlanExecutor.isOK()) {
+ return appendCommandStatus(result, statusWithPlanExecutor.getStatus());
}
- const std::unique_ptr<PlanExecutor> exec(rawExec);
+ const std::unique_ptr<PlanExecutor> exec =
+ std::move(statusWithPlanExecutor.getValue());
StatusWith<boost::optional<BSONObj>> advanceStatus =
advanceExecutor(exec.get(), args.isRemove());
@@ -447,13 +445,13 @@ public:
}
}
- PlanExecutor* rawExec;
- Status execStatus =
- getExecutorUpdate(txn, collection, &parsedUpdate, opDebug, &rawExec);
- if (!execStatus.isOK()) {
- return appendCommandStatus(result, execStatus);
+ auto statusWithPlanExecutor =
+ getExecutorUpdate(txn, collection, &parsedUpdate, opDebug);
+ if (!statusWithPlanExecutor.isOK()) {
+ return appendCommandStatus(result, statusWithPlanExecutor.getStatus());
}
- const std::unique_ptr<PlanExecutor> exec(rawExec);
+ const std::unique_ptr<PlanExecutor> exec =
+ std::move(statusWithPlanExecutor.getValue());
StatusWith<boost::optional<BSONObj>> advanceStatus =
advanceExecutor(exec.get(), args.isRemove());
diff --git a/src/mongo/db/commands/find_cmd.cpp b/src/mongo/db/commands/find_cmd.cpp
index f86fbbe8b34..8b7979840bd 100644
--- a/src/mongo/db/commands/find_cmd.cpp
+++ b/src/mongo/db/commands/find_cmd.cpp
@@ -144,16 +144,12 @@ public:
Collection* collection = ctx.getCollection();
// We have a parsed query. Time to get the execution plan for it.
- std::unique_ptr<PlanExecutor> exec;
- {
- PlanExecutor* rawExec;
- Status execStatus = getExecutorFind(
- txn, collection, nss, cq.release(), PlanExecutor::YIELD_AUTO, &rawExec);
- if (!execStatus.isOK()) {
- return execStatus;
- }
- exec.reset(rawExec);
+ auto statusWithPlanExecutor =
+ getExecutorFind(txn, collection, nss, std::move(cq), PlanExecutor::YIELD_AUTO);
+ if (!statusWithPlanExecutor.isOK()) {
+ return statusWithPlanExecutor.getStatus();
}
+ std::unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
// Got the execution tree. Explain it.
Explain::explainStages(exec.get(), verbosity, out);
@@ -231,16 +227,12 @@ public:
const ChunkVersion shardingVersionAtStart = shardingState.getVersion(nss.ns());
// 3) Get the execution plan for the query.
- std::unique_ptr<PlanExecutor> execHolder;
- {
- PlanExecutor* rawExec;
- Status execStatus = getExecutorFind(
- txn, collection, nss, cq.release(), PlanExecutor::YIELD_AUTO, &rawExec);
- if (!execStatus.isOK()) {
- return appendCommandStatus(result, execStatus);
- }
- execHolder.reset(rawExec);
+ auto statusWithPlanExecutor =
+ getExecutorFind(txn, collection, nss, std::move(cq), PlanExecutor::YIELD_AUTO);
+ if (!statusWithPlanExecutor.isOK()) {
+ return appendCommandStatus(result, statusWithPlanExecutor.getStatus());
}
+ std::unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
// TODO: Currently, chunk ranges are kept around until all ClientCursors created while
// the chunk belonged on this node are gone. Separating chunk lifetime management from
@@ -259,24 +251,24 @@ public:
// there is no ClientCursor id, and then return.
const int numResults = 0;
const CursorId cursorId = 0;
- endQueryOp(txn, execHolder.get(), dbProfilingLevel, numResults, cursorId);
+ endQueryOp(txn, *exec, dbProfilingLevel, numResults, cursorId);
appendCursorResponseObject(cursorId, nss.ns(), BSONArray(), &result);
return true;
}
- const LiteParsedQuery& pq = execHolder->getCanonicalQuery()->getParsed();
+ const LiteParsedQuery& pq = exec->getCanonicalQuery()->getParsed();
// 4) If possible, register the execution plan inside a ClientCursor, and pin that
// cursor. In this case, ownership of the PlanExecutor is transferred to the
// ClientCursor, and 'exec' becomes null.
//
// First unregister the PlanExecutor so it can be re-registered with ClientCursor.
- execHolder->deregisterExec();
+ exec->deregisterExec();
// Create a ClientCursor containing this plan executor. We don't have to worry
// about leaking it as it's inserted into a global map by its ctor.
ClientCursor* cursor = new ClientCursor(collection->getCursorManager(),
- execHolder.release(),
+ exec.release(),
nss.ns(),
pq.getOptions(),
pq.getFilter());
@@ -286,8 +278,8 @@ public:
// On early return, get rid of the the cursor.
ScopeGuard cursorFreer = MakeGuard(&ClientCursorPin::deleteUnderlying, ccPin);
- invariant(!execHolder);
- PlanExecutor* exec = cursor->getExecutor();
+ invariant(!exec);
+ PlanExecutor* cursorExec = cursor->getExecutor();
// 5) Stream query results, adding them to a BSONArray as we go.
BSONArrayBuilder firstBatch;
@@ -295,11 +287,11 @@ public:
PlanExecutor::ExecState state;
int numResults = 0;
while (!enoughForFirstBatch(pq, numResults, firstBatch.len()) &&
- PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
+ PlanExecutor::ADVANCED == (state = cursorExec->getNext(&obj, NULL))) {
// If adding this object will cause us to exceed the BSON size limit, then we stash
// it for later.
if (firstBatch.len() + obj.objsize() > BSONObjMaxUserSize && numResults > 0) {
- exec->enqueue(obj);
+ cursorExec->enqueue(obj);
break;
}
@@ -310,7 +302,7 @@ public:
// Throw an assertion if query execution fails for any reason.
if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) {
- const std::unique_ptr<PlanStageStats> stats(exec->getStats());
+ const std::unique_ptr<PlanStageStats> stats(cursorExec->getStats());
error() << "Plan executor error during find command: " << PlanExecutor::statestr(state)
<< ", stats: " << Explain::statsToBSON(*stats);
@@ -322,9 +314,9 @@ public:
}
// 6) Set up the cursor for getMore.
- if (shouldSaveCursor(txn, collection, state, exec)) {
+ if (shouldSaveCursor(txn, collection, state, cursorExec)) {
// State will be restored on getMore.
- exec->saveState();
+ cursorExec->saveState();
cursor->setLeftoverMaxTimeMicros(CurOp::get(txn)->getRemainingMaxTimeMicros());
cursor->setPos(numResults);
@@ -344,7 +336,7 @@ public:
}
// Fill out curop based on the results.
- endQueryOp(txn, exec, dbProfilingLevel, numResults, cursorId);
+ endQueryOp(txn, *cursorExec, dbProfilingLevel, numResults, cursorId);
// 7) Generate the response object to send to the client.
appendCursorResponseObject(cursorId, nss.ns(), firstBatch.arr(), &result);
diff --git a/src/mongo/db/commands/geo_near_cmd.cpp b/src/mongo/db/commands/geo_near_cmd.cpp
index df8fd3bdee4..e0fba6daa1a 100644
--- a/src/mongo/db/commands/geo_near_cmd.cpp
+++ b/src/mongo/db/commands/geo_near_cmd.cpp
@@ -194,14 +194,14 @@ public:
// version on initial entry into geoNear.
RangePreserver preserver(collection);
- PlanExecutor* rawExec;
- if (!getExecutor(txn, collection, cq.release(), PlanExecutor::YIELD_AUTO, &rawExec, 0)
- .isOK()) {
+ auto statusWithPlanExecutor =
+ getExecutor(txn, collection, std::move(cq), PlanExecutor::YIELD_AUTO, 0);
+ if (!statusWithPlanExecutor.isOK()) {
errmsg = "can't get query executor";
return false;
}
- unique_ptr<PlanExecutor> exec(rawExec);
+ unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
double totalDistance = 0;
BSONObjBuilder resultBuilder(result.subarrayStart("results"));
@@ -256,7 +256,7 @@ public:
// Fill in nscanned from the explain.
PlanSummaryStats summary;
- Explain::getSummaryStats(exec.get(), &summary);
+ Explain::getSummaryStats(*exec, &summary);
stats.appendNumber("nscanned", summary.totalKeysExamined);
stats.appendNumber("objectsLoaded", summary.totalDocsExamined);
diff --git a/src/mongo/db/commands/group.cpp b/src/mongo/db/commands/group.cpp
index cdfb9ca0d5a..b4e14aa9200 100644
--- a/src/mongo/db/commands/group.cpp
+++ b/src/mongo/db/commands/group.cpp
@@ -136,14 +136,13 @@ bool GroupCommand::run(OperationContext* txn,
AutoGetCollectionForRead ctx(txn, groupRequest.ns);
Collection* coll = ctx.getCollection();
- PlanExecutor* rawPlanExecutor;
- Status getExecStatus =
- getExecutorGroup(txn, coll, groupRequest, PlanExecutor::YIELD_AUTO, &rawPlanExecutor);
- if (!getExecStatus.isOK()) {
- return appendCommandStatus(out, getExecStatus);
+ auto statusWithPlanExecutor =
+ getExecutorGroup(txn, coll, groupRequest, PlanExecutor::YIELD_AUTO);
+ if (!statusWithPlanExecutor.isOK()) {
+ return appendCommandStatus(out, statusWithPlanExecutor.getStatus());
}
- unique_ptr<PlanExecutor> planExecutor(rawPlanExecutor);
+ unique_ptr<PlanExecutor> planExecutor = std::move(statusWithPlanExecutor.getValue());
// Group executors return ADVANCED exactly once, with the entire group result.
BSONObj retval;
@@ -191,14 +190,13 @@ Status GroupCommand::explain(OperationContext* txn,
AutoGetCollectionForRead ctx(txn, groupRequest.ns);
Collection* coll = ctx.getCollection();
- PlanExecutor* rawPlanExecutor;
- Status getExecStatus =
- getExecutorGroup(txn, coll, groupRequest, PlanExecutor::YIELD_AUTO, &rawPlanExecutor);
- if (!getExecStatus.isOK()) {
- return getExecStatus;
+ auto statusWithPlanExecutor =
+ getExecutorGroup(txn, coll, groupRequest, PlanExecutor::YIELD_AUTO);
+ if (!statusWithPlanExecutor.isOK()) {
+ return statusWithPlanExecutor.getStatus();
}
- unique_ptr<PlanExecutor> planExecutor(rawPlanExecutor);
+ unique_ptr<PlanExecutor> planExecutor = std::move(statusWithPlanExecutor.getValue());
Explain::explainStages(planExecutor.get(), verbosity, out);
return Status::OK();
diff --git a/src/mongo/db/commands/list_collections.cpp b/src/mongo/db/commands/list_collections.cpp
index ac871b0f078..a97bdbc202a 100644
--- a/src/mongo/db/commands/list_collections.cpp
+++ b/src/mongo/db/commands/list_collections.cpp
@@ -169,17 +169,12 @@ public:
dassert(NamespaceString(cursorNamespace).isValid());
dassert(NamespaceString(cursorNamespace).isListCollectionsCursorNS());
- PlanExecutor* rawExec;
- Status makeStatus = PlanExecutor::make(txn,
- ws.release(),
- root.release(),
- cursorNamespace,
- PlanExecutor::YIELD_MANUAL,
- &rawExec);
- std::unique_ptr<PlanExecutor> exec(rawExec);
- if (!makeStatus.isOK()) {
- return appendCommandStatus(result, makeStatus);
+ auto statusWithPlanExecutor = PlanExecutor::make(
+ txn, std::move(ws), std::move(root), cursorNamespace, PlanExecutor::YIELD_MANUAL);
+ if (!statusWithPlanExecutor.isOK()) {
+ return appendCommandStatus(result, statusWithPlanExecutor.getStatus());
}
+ std::unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
BSONArrayBuilder firstBatch;
diff --git a/src/mongo/db/commands/list_indexes.cpp b/src/mongo/db/commands/list_indexes.cpp
index 9b84c27d6d8..a22d3b43f62 100644
--- a/src/mongo/db/commands/list_indexes.cpp
+++ b/src/mongo/db/commands/list_indexes.cpp
@@ -167,17 +167,12 @@ public:
dassert(NamespaceString(cursorNamespace).isListIndexesCursorNS());
dassert(ns == NamespaceString(cursorNamespace).getTargetNSForListIndexes());
- PlanExecutor* rawExec;
- Status makeStatus = PlanExecutor::make(txn,
- ws.release(),
- root.release(),
- cursorNamespace,
- PlanExecutor::YIELD_MANUAL,
- &rawExec);
- std::unique_ptr<PlanExecutor> exec(rawExec);
- if (!makeStatus.isOK()) {
- return appendCommandStatus(result, makeStatus);
+ auto statusWithPlanExecutor = PlanExecutor::make(
+ txn, std::move(ws), std::move(root), cursorNamespace, PlanExecutor::YIELD_MANUAL);
+ if (!statusWithPlanExecutor.isOK()) {
+ return appendCommandStatus(result, statusWithPlanExecutor.getStatus());
}
+ std::unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
BSONArrayBuilder firstBatch;
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index fa4add10df0..771271852fb 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -1012,15 +1012,11 @@ void State::finalReduce(CurOp* op, ProgressMeterHolder& pm) {
Collection* coll = getCollectionOrUassert(ctx->getDb(), _config.incLong);
invariant(coll);
- PlanExecutor* rawExec;
- verify(getExecutor(_txn,
- coll,
- cq.release(),
- PlanExecutor::YIELD_AUTO,
- &rawExec,
- QueryPlannerParams::NO_TABLE_SCAN).isOK());
+ auto statusWithPlanExecutor = getExecutor(
+ _txn, coll, std::move(cq), PlanExecutor::YIELD_AUTO, QueryPlannerParams::NO_TABLE_SCAN);
+ verify(statusWithPlanExecutor.isOK());
- unique_ptr<PlanExecutor> exec(rawExec);
+ unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
// iterate over all sorted objects
BSONObj o;
@@ -1375,14 +1371,14 @@ public:
Collection* coll = state.getCollectionOrUassert(db, config.ns);
invariant(coll);
- PlanExecutor* rawExec;
- if (!getExecutor(txn, coll, cq.release(), PlanExecutor::YIELD_AUTO, &rawExec)
- .isOK()) {
+ auto statusWithPlanExecutor =
+ getExecutor(txn, coll, std::move(cq), PlanExecutor::YIELD_AUTO);
+ if (!statusWithPlanExecutor.isOK()) {
uasserted(17239, "Can't get executor for query " + config.filter.toString());
return 0;
}
- unique_ptr<PlanExecutor> exec(rawExec);
+ unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
Timer mt;
diff --git a/src/mongo/db/commands/parallel_collection_scan.cpp b/src/mongo/db/commands/parallel_collection_scan.cpp
index 33e24b6648e..db4dc3f0170 100644
--- a/src/mongo/db/commands/parallel_collection_scan.cpp
+++ b/src/mongo/db/commands/parallel_collection_scan.cpp
@@ -37,12 +37,14 @@
#include "mongo/db/db_raii.h"
#include "mongo/db/exec/multi_iterator.h"
#include "mongo/db/query/cursor_responses.h"
+#include "mongo/stdx/memory.h"
#include "mongo/util/touch_pages.h"
namespace mongo {
using std::unique_ptr;
using std::string;
+using stdx::make_unique;
class ParallelCollectionScanCmd : public Command {
public:
@@ -106,15 +108,15 @@ public:
OwnedPointerVector<PlanExecutor> execs;
for (size_t i = 0; i < numCursors; i++) {
- WorkingSet* ws = new WorkingSet();
- MultiIteratorStage* mis = new MultiIteratorStage(txn, ws, collection);
+ unique_ptr<WorkingSet> ws = make_unique<WorkingSet>();
+ unique_ptr<MultiIteratorStage> mis =
+ make_unique<MultiIteratorStage>(txn, ws.get(), collection);
- PlanExecutor* rawExec;
// Takes ownership of 'ws' and 'mis'.
- Status execStatus =
- PlanExecutor::make(txn, ws, mis, collection, PlanExecutor::YIELD_AUTO, &rawExec);
- invariant(execStatus.isOK());
- unique_ptr<PlanExecutor> curExec(rawExec);
+ auto statusWithPlanExecutor = PlanExecutor::make(
+ txn, std::move(ws), std::move(mis), collection, PlanExecutor::YIELD_AUTO);
+ invariant(statusWithPlanExecutor.isOK());
+ unique_ptr<PlanExecutor> curExec = std::move(statusWithPlanExecutor.getValue());
// The PlanExecutor was registered on construction due to the YIELD_AUTO policy.
// We have to deregister it, as it will be registered with ClientCursor.
diff --git a/src/mongo/db/commands/pipeline_command.cpp b/src/mongo/db/commands/pipeline_command.cpp
index 95423cf2e7b..c90168b9d8e 100644
--- a/src/mongo/db/commands/pipeline_command.cpp
+++ b/src/mongo/db/commands/pipeline_command.cpp
@@ -217,9 +217,8 @@ public:
verify(pPipeline);
}
- PlanExecutor* exec = NULL;
- unique_ptr<ClientCursorPin> pin; // either this OR the execHolder will be non-null
- unique_ptr<PlanExecutor> execHolder;
+ unique_ptr<ClientCursorPin> pin; // either this OR the exec will be non-null
+ unique_ptr<PlanExecutor> exec;
{
// This will throw if the sharding version for this connection is out of date. The
// lock must be held continuously from now until we have we created both the output
@@ -243,24 +242,14 @@ public:
unique_ptr<WorkingSet> ws(new WorkingSet());
unique_ptr<PipelineProxyStage> proxy(
new PipelineProxyStage(pPipeline, input, ws.get()));
- Status execStatus = Status::OK();
- if (NULL == collection) {
- execStatus = PlanExecutor::make(txn,
- ws.release(),
- proxy.release(),
- nss.ns(),
- PlanExecutor::YIELD_MANUAL,
- &exec);
- } else {
- execStatus = PlanExecutor::make(txn,
- ws.release(),
- proxy.release(),
- collection,
- PlanExecutor::YIELD_MANUAL,
- &exec);
- }
- invariant(execStatus.isOK());
- execHolder.reset(exec);
+
+ auto statusWithPlanExecutor = (NULL == collection)
+ ? PlanExecutor::make(
+ txn, std::move(ws), std::move(proxy), nss.ns(), PlanExecutor::YIELD_MANUAL)
+ : PlanExecutor::make(
+ txn, std::move(ws), std::move(proxy), collection, PlanExecutor::YIELD_MANUAL);
+ invariant(statusWithPlanExecutor.isOK());
+ exec = std::move(statusWithPlanExecutor.getValue());
if (!collection && input) {
// If we don't have a collection, we won't be able to register any executors, so
@@ -272,7 +261,7 @@ public:
if (collection) {
const bool isAggCursor = true; // enable special locking behavior
ClientCursor* cursor = new ClientCursor(collection->getCursorManager(),
- execHolder.release(),
+ exec.release(),
nss.ns(),
0,
cmdObj.getOwned(),
@@ -286,7 +275,7 @@ public:
// collection lock later when cleaning up our ClientCursorPin.
// - In the case where we don't have a collection: our PlanExecutor won't be
// registered, so it will be safe to clean it up outside the lock.
- invariant(NULL == execHolder.get() || NULL == execHolder->collection());
+ invariant(NULL == exec.get() || NULL == exec->collection());
}
try {
@@ -299,7 +288,12 @@ public:
if (pPipeline->isExplain()) {
result << "stages" << Value(pPipeline->writeExplainOps());
} else if (isCursorCommand) {
- keepCursor = handleCursorCommand(txn, nss.ns(), pin.get(), exec, cmdObj, result);
+ keepCursor = handleCursorCommand(txn,
+ nss.ns(),
+ pin.get(),
+ pin ? pin->c()->getExecutor() : exec.get(),
+ cmdObj,
+ result);
} else {
pPipeline->run(result);
}
diff --git a/src/mongo/db/commands/repair_cursor.cpp b/src/mongo/db/commands/repair_cursor.cpp
index 5cf096fc511..eef18cfcfeb 100644
--- a/src/mongo/db/commands/repair_cursor.cpp
+++ b/src/mongo/db/commands/repair_cursor.cpp
@@ -93,11 +93,10 @@ public:
new MultiIteratorStage(txn, ws.get(), collection));
stage->addIterator(std::move(cursor));
- PlanExecutor* rawExec;
- Status execStatus = PlanExecutor::make(
- txn, ws.release(), stage.release(), collection, PlanExecutor::YIELD_AUTO, &rawExec);
- invariant(execStatus.isOK());
- std::unique_ptr<PlanExecutor> exec(rawExec);
+ auto statusWithPlanExecutor = PlanExecutor::make(
+ txn, std::move(ws), std::move(stage), collection, PlanExecutor::YIELD_AUTO);
+ invariant(statusWithPlanExecutor.isOK());
+ std::unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
// 'exec' will be used in getMore(). It was automatically registered on construction
// due to the auto yield policy, so it could yield during plan selection. We deregister
diff --git a/src/mongo/db/commands/write_commands/batch_executor.cpp b/src/mongo/db/commands/write_commands/batch_executor.cpp
index 2087b5bd2f4..0087f7b77f4 100644
--- a/src/mongo/db/commands/write_commands/batch_executor.cpp
+++ b/src/mongo/db/commands/write_commands/batch_executor.cpp
@@ -1237,12 +1237,11 @@ static void multiUpdate(OperationContext* txn,
try {
invariant(collection);
- PlanExecutor* rawExec;
- uassertStatusOK(getExecutorUpdate(txn, collection, &parsedUpdate, debug, &rawExec));
- std::unique_ptr<PlanExecutor> exec(rawExec);
+ std::unique_ptr<PlanExecutor> exec =
+ uassertStatusOK(getExecutorUpdate(txn, collection, &parsedUpdate, debug));
uassertStatusOK(exec->executePlan());
- UpdateResult res = UpdateStage::makeUpdateResult(exec.get(), debug);
+ UpdateResult res = UpdateStage::makeUpdateResult(*exec, debug);
const long long numDocsModified = res.numDocsModified;
const long long numMatched = res.numMatched;
@@ -1331,14 +1330,12 @@ static void multiRemove(OperationContext* txn,
return;
}
- PlanExecutor* rawExec;
- uassertStatusOK(getExecutorDelete(
- txn, autoDb.getDb()->getCollection(nss), &parsedDelete, &rawExec));
- std::unique_ptr<PlanExecutor> exec(rawExec);
+ std::unique_ptr<PlanExecutor> exec = uassertStatusOK(
+ getExecutorDelete(txn, autoDb.getDb()->getCollection(nss), &parsedDelete));
// Execute the delete and retrieve the number deleted.
uassertStatusOK(exec->executePlan());
- result->getStats().n = DeleteStage::getNumDeleted(exec.get());
+ result->getStats().n = DeleteStage::getNumDeleted(*exec);
break;
} catch (const WriteConflictException& dle) {
diff --git a/src/mongo/db/commands/write_commands/write_commands.cpp b/src/mongo/db/commands/write_commands/write_commands.cpp
index fe6bb9a2ff9..b310903918a 100644
--- a/src/mongo/db/commands/write_commands/write_commands.cpp
+++ b/src/mongo/db/commands/write_commands/write_commands.cpp
@@ -235,9 +235,8 @@ Status WriteCmd::explain(OperationContext* txn,
collection = autoDb.getDb()->getCollection(nsString.ns());
}
- PlanExecutor* rawExec;
- uassertStatusOK(getExecutorUpdate(txn, collection, &parsedUpdate, debug, &rawExec));
- std::unique_ptr<PlanExecutor> exec(rawExec);
+ std::unique_ptr<PlanExecutor> exec =
+ uassertStatusOK(getExecutorUpdate(txn, collection, &parsedUpdate, debug));
// Explain the plan tree.
Explain::explainStages(exec.get(), verbosity, out);
@@ -274,9 +273,8 @@ Status WriteCmd::explain(OperationContext* txn,
collection = autoDb.getDb()->getCollection(nsString.ns());
}
- PlanExecutor* rawExec;
- uassertStatusOK(getExecutorDelete(txn, collection, &parsedDelete, &rawExec));
- std::unique_ptr<PlanExecutor> exec(rawExec);
+ std::unique_ptr<PlanExecutor> exec =
+ uassertStatusOK(getExecutorDelete(txn, collection, &parsedDelete));
// Explain the plan tree.
Explain::explainStages(exec.get(), verbosity, out);
diff --git a/src/mongo/db/dbcommands.cpp b/src/mongo/db/dbcommands.cpp
index ba7e84b6e31..b951d2832e8 100644
--- a/src/mongo/db/dbcommands.cpp
+++ b/src/mongo/db/dbcommands.cpp
@@ -600,18 +600,17 @@ public:
unique_ptr<AutoGetCollectionForRead> ctx(new AutoGetCollectionForRead(txn, ns));
Collection* coll = ctx->getCollection();
- PlanExecutor* rawExec;
- if (!getExecutor(txn,
- coll,
- cq.release(),
- PlanExecutor::YIELD_MANUAL,
- &rawExec,
- QueryPlannerParams::NO_TABLE_SCAN).isOK()) {
+ auto statusWithPlanExecutor = getExecutor(txn,
+ coll,
+ std::move(cq),
+ PlanExecutor::YIELD_MANUAL,
+ QueryPlannerParams::NO_TABLE_SCAN);
+ if (!statusWithPlanExecutor.isOK()) {
uasserted(17241, "Can't get executor for query " + query.toString());
return 0;
}
- unique_ptr<PlanExecutor> exec(rawExec);
+ unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
// Process notifications when the lock is released/reacquired in the loop below
exec->registerExec();
@@ -756,7 +755,7 @@ public:
result.append("millis", timer.millis());
return 1;
}
- exec.reset(InternalPlanner::collectionScan(txn, ns, collection));
+ exec = InternalPlanner::collectionScan(txn, ns, collection);
} else if (min.isEmpty() || max.isEmpty()) {
errmsg = "only one of min or max specified";
return false;
@@ -780,7 +779,7 @@ public:
min = Helpers::toKeyFormat(kp.extendRangeBound(min, false));
max = Helpers::toKeyFormat(kp.extendRangeBound(max, false));
- exec.reset(InternalPlanner::indexScan(txn, collection, idx, min, max, false));
+ exec = InternalPlanner::indexScan(txn, collection, idx, min, max, false);
}
long long avgObjSize = collection->dataSize(txn) / collection->numRecords(txn);
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index 0517552c7c2..b4e76483a1f 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -135,15 +135,14 @@ RecordId Helpers::findOne(OperationContext* txn,
massert(17244, "Could not canonicalize " + query.toString(), statusWithCQ.isOK());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
- PlanExecutor* rawExec;
size_t options = requireIndex ? QueryPlannerParams::NO_TABLE_SCAN : QueryPlannerParams::DEFAULT;
- massert(
- 17245,
- "Could not get executor for query " + query.toString(),
- getExecutor(txn, collection, cq.release(), PlanExecutor::YIELD_MANUAL, &rawExec, options)
- .isOK());
+ auto statusWithPlanExecutor =
+ getExecutor(txn, collection, std::move(cq), PlanExecutor::YIELD_MANUAL, options);
+ massert(17245,
+ "Could not get executor for query " + query.toString(),
+ statusWithPlanExecutor.isOK());
- unique_ptr<PlanExecutor> exec(rawExec);
+ unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
PlanExecutor::ExecState state;
RecordId loc;
if (PlanExecutor::ADVANCED == (state = exec->getNext(NULL, &loc))) {
diff --git a/src/mongo/db/exec/delete.cpp b/src/mongo/db/exec/delete.cpp
index 5831b44e86a..31bb3aaf66f 100644
--- a/src/mongo/db/exec/delete.cpp
+++ b/src/mongo/db/exec/delete.cpp
@@ -303,10 +303,10 @@ const SpecificStats* DeleteStage::getSpecificStats() const {
}
// static
-long long DeleteStage::getNumDeleted(PlanExecutor* exec) {
- invariant(exec->getRootStage()->isEOF());
- invariant(exec->getRootStage()->stageType() == STAGE_DELETE);
- DeleteStage* deleteStage = static_cast<DeleteStage*>(exec->getRootStage());
+long long DeleteStage::getNumDeleted(const PlanExecutor& exec) {
+ invariant(exec.getRootStage()->isEOF());
+ invariant(exec.getRootStage()->stageType() == STAGE_DELETE);
+ DeleteStage* deleteStage = static_cast<DeleteStage*>(exec.getRootStage());
const DeleteStats* deleteStats =
static_cast<const DeleteStats*>(deleteStage->getSpecificStats());
return deleteStats->docsDeleted;
diff --git a/src/mongo/db/exec/delete.h b/src/mongo/db/exec/delete.h
index 75556152680..ef823781ef2 100644
--- a/src/mongo/db/exec/delete.h
+++ b/src/mongo/db/exec/delete.h
@@ -113,7 +113,7 @@ public:
*
* Should only be called if the root plan stage of 'exec' is UPDATE and if 'exec' is EOF.
*/
- static long long getNumDeleted(PlanExecutor* exec);
+ static long long getNumDeleted(const PlanExecutor& exec);
private:
// Transactional context. Not owned by us.
diff --git a/src/mongo/db/exec/stagedebug_cmd.cpp b/src/mongo/db/exec/stagedebug_cmd.cpp
index 3dc0aa8549b..a0188289c29 100644
--- a/src/mongo/db/exec/stagedebug_cmd.cpp
+++ b/src/mongo/db/exec/stagedebug_cmd.cpp
@@ -55,6 +55,7 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/matcher/expression_parser.h"
#include "mongo/db/query/plan_executor.h"
+#include "mongo/stdx/memory.h"
#include "mongo/util/log.h"
namespace mongo {
@@ -62,6 +63,7 @@ namespace mongo {
using std::unique_ptr;
using std::string;
using std::vector;
+using stdx::make_unique;
/**
* A command for manually constructing a query tree and running it.
@@ -168,13 +170,13 @@ public:
// Add a fetch at the top for the user so we can get obj back for sure.
// TODO: Do we want to do this for the user? I think so.
- PlanStage* rootFetch = new FetchStage(txn, ws.get(), userRoot, NULL, collection);
+ unique_ptr<PlanStage> rootFetch =
+ make_unique<FetchStage>(txn, ws.get(), userRoot, nullptr, collection);
- PlanExecutor* rawExec;
- Status execStatus = PlanExecutor::make(
- txn, ws.release(), rootFetch, collection, PlanExecutor::YIELD_AUTO, &rawExec);
- fassert(28536, execStatus);
- std::unique_ptr<PlanExecutor> exec(rawExec);
+ auto statusWithPlanExecutor = PlanExecutor::make(
+ txn, std::move(ws), std::move(rootFetch), collection, PlanExecutor::YIELD_AUTO);
+ fassert(28536, statusWithPlanExecutor.getStatus());
+ std::unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
BSONArrayBuilder resultBuilder(result.subarrayStart("results"));
diff --git a/src/mongo/db/exec/update.cpp b/src/mongo/db/exec/update.cpp
index e37d716d338..021e074e197 100644
--- a/src/mongo/db/exec/update.cpp
+++ b/src/mongo/db/exec/update.cpp
@@ -1030,11 +1030,11 @@ const SpecificStats* UpdateStage::getSpecificStats() const {
}
// static
-UpdateResult UpdateStage::makeUpdateResult(PlanExecutor* exec, OpDebug* opDebug) {
+UpdateResult UpdateStage::makeUpdateResult(const PlanExecutor& exec, OpDebug* opDebug) {
// Get stats from the root stage.
- invariant(exec->getRootStage()->isEOF());
- invariant(exec->getRootStage()->stageType() == STAGE_UPDATE);
- UpdateStage* updateStage = static_cast<UpdateStage*>(exec->getRootStage());
+ invariant(exec.getRootStage()->isEOF());
+ invariant(exec.getRootStage()->stageType() == STAGE_UPDATE);
+ UpdateStage* updateStage = static_cast<UpdateStage*>(exec.getRootStage());
const UpdateStats* updateStats =
static_cast<const UpdateStats*>(updateStage->getSpecificStats());
diff --git a/src/mongo/db/exec/update.h b/src/mongo/db/exec/update.h
index 28ff014b232..a891e2df9d8 100644
--- a/src/mongo/db/exec/update.h
+++ b/src/mongo/db/exec/update.h
@@ -111,7 +111,7 @@ public:
*
* Should only be called once this stage is EOF.
*/
- static UpdateResult makeUpdateResult(PlanExecutor* exec, OpDebug* opDebug);
+ static UpdateResult makeUpdateResult(const PlanExecutor& exec, OpDebug* opDebug);
/**
* Computes the document to insert if the upsert flag is set to true and no matching
diff --git a/src/mongo/db/instance.cpp b/src/mongo/db/instance.cpp
index a445171a32a..14936651d40 100644
--- a/src/mongo/db/instance.cpp
+++ b/src/mongo/db/instance.cpp
@@ -151,7 +151,7 @@ MONGO_FP_DECLARE(rsStopGetMore);
namespace {
-std::unique_ptr<AuthzManagerExternalState> createAuthzManagerExternalStateMongod() {
+unique_ptr<AuthzManagerExternalState> createAuthzManagerExternalStateMongod() {
return stdx::make_unique<AuthzManagerExternalStateMongod>();
}
@@ -689,14 +689,12 @@ void receivedUpdate(OperationContext* txn, const NamespaceString& nsString, Mess
// The common case: no implicit collection creation
if (!upsert || ctx.db()->getCollection(nsString) != NULL) {
- PlanExecutor* rawExec;
- uassertStatusOK(getExecutorUpdate(
- txn, ctx.db()->getCollection(nsString), &parsedUpdate, &op.debug(), &rawExec));
- std::unique_ptr<PlanExecutor> exec(rawExec);
+ unique_ptr<PlanExecutor> exec = uassertStatusOK(getExecutorUpdate(
+ txn, ctx.db()->getCollection(nsString), &parsedUpdate, &op.debug()));
// Run the plan and get stats out.
uassertStatusOK(exec->executePlan());
- UpdateResult res = UpdateStage::makeUpdateResult(exec.get(), &op.debug());
+ UpdateResult res = UpdateStage::makeUpdateResult(*exec, &op.debug());
// for getlasterror
LastError::get(txn->getClient())
@@ -738,14 +736,12 @@ void receivedUpdate(OperationContext* txn, const NamespaceString& nsString, Mess
wuow.commit();
}
- PlanExecutor* rawExec;
- uassertStatusOK(getExecutorUpdate(
- txn, ctx.db()->getCollection(nsString), &parsedUpdate, &op.debug(), &rawExec));
- std::unique_ptr<PlanExecutor> exec(rawExec);
+ unique_ptr<PlanExecutor> exec = uassertStatusOK(
+ getExecutorUpdate(txn, ctx.db()->getCollection(nsString), &parsedUpdate, &op.debug()));
// Run the plan and get stats out.
uassertStatusOK(exec->executePlan());
- UpdateResult res = UpdateStage::makeUpdateResult(exec.get(), &op.debug());
+ UpdateResult res = UpdateStage::makeUpdateResult(*exec, &op.debug());
LastError::get(txn->getClient()).recordUpdate(res.existing, res.numMatched, res.upserted);
}
@@ -795,14 +791,12 @@ void receivedDelete(OperationContext* txn, const NamespaceString& nsString, Mess
txn->lockState(), nsString.ns(), parsedDelete.isIsolated() ? MODE_X : MODE_IX);
OldClientContext ctx(txn, nsString);
- PlanExecutor* rawExec;
- uassertStatusOK(
- getExecutorDelete(txn, ctx.db()->getCollection(nsString), &parsedDelete, &rawExec));
- std::unique_ptr<PlanExecutor> exec(rawExec);
+ unique_ptr<PlanExecutor> exec = uassertStatusOK(
+ getExecutorDelete(txn, ctx.db()->getCollection(nsString), &parsedDelete));
// Run the plan and get the number of docs deleted.
uassertStatusOK(exec->executePlan());
- long long n = DeleteStage::getNumDeleted(exec.get());
+ long long n = DeleteStage::getNumDeleted(*exec);
LastError::get(txn->getClient()).recordDelete(n);
op.debug().ndeleted = n;
diff --git a/src/mongo/db/ops/delete.cpp b/src/mongo/db/ops/delete.cpp
index 86ed88395b5..5cf5e113cd4 100644
--- a/src/mongo/db/ops/delete.cpp
+++ b/src/mongo/db/ops/delete.cpp
@@ -67,12 +67,11 @@ long long deleteObjects(OperationContext* txn,
ParsedDelete parsedDelete(txn, &request);
uassertStatusOK(parsedDelete.parseRequest());
- PlanExecutor* rawExec;
- uassertStatusOK(getExecutorDelete(txn, collection, &parsedDelete, &rawExec));
- std::unique_ptr<PlanExecutor> exec(rawExec);
+ std::unique_ptr<PlanExecutor> exec =
+ uassertStatusOK(getExecutorDelete(txn, collection, &parsedDelete));
uassertStatusOK(exec->executePlan());
- return DeleteStage::getNumDeleted(exec.get());
+ return DeleteStage::getNumDeleted(*exec);
}
} // namespace mongo
diff --git a/src/mongo/db/ops/update.cpp b/src/mongo/db/ops/update.cpp
index 43c9be211ce..80c20fecd91 100644
--- a/src/mongo/db/ops/update.cpp
+++ b/src/mongo/db/ops/update.cpp
@@ -99,12 +99,11 @@ UpdateResult update(OperationContext* txn,
ParsedUpdate parsedUpdate(txn, &request);
uassertStatusOK(parsedUpdate.parseRequest());
- PlanExecutor* rawExec;
- uassertStatusOK(getExecutorUpdate(txn, collection, &parsedUpdate, opDebug, &rawExec));
- std::unique_ptr<PlanExecutor> exec(rawExec);
+ std::unique_ptr<PlanExecutor> exec =
+ uassertStatusOK(getExecutorUpdate(txn, collection, &parsedUpdate, opDebug));
uassertStatusOK(exec->executePlan());
- return UpdateStage::makeUpdateResult(exec.get(), opDebug);
+ return UpdateStage::makeUpdateResult(*exec, opDebug);
}
BSONObj applyUpdateOperators(const BSONObj& from, const BSONObj& operators) {
diff --git a/src/mongo/db/pipeline/pipeline_d.cpp b/src/mongo/db/pipeline/pipeline_d.cpp
index 0a10cfc5f3d..6281b84bdf4 100644
--- a/src/mongo/db/pipeline/pipeline_d.cpp
+++ b/src/mongo/db/pipeline/pipeline_d.cpp
@@ -184,22 +184,22 @@ shared_ptr<PlanExecutor> PipelineD::prepareCursorSource(
auto statusWithCQ = CanonicalQuery::canonicalize(
pExpCtx->ns, queryObj, sortObj, projectionForQuery, whereCallback);
- PlanExecutor* rawExec;
- if (statusWithCQ.isOK() &&
- getExecutor(txn,
- collection,
- statusWithCQ.getValue().release(),
- PlanExecutor::YIELD_AUTO,
- &rawExec,
- runnerOptions).isOK()) {
- // success: The PlanExecutor will handle sorting for us using an index.
- exec.reset(rawExec);
- sortInRunner = true;
-
- sources.pop_front();
- if (sortStage->getLimitSrc()) {
- // need to reinsert coalesced $limit after removing $sort
- sources.push_front(sortStage->getLimitSrc());
+ if (statusWithCQ.isOK()) {
+ auto statusWithPlanExecutor = getExecutor(txn,
+ collection,
+ std::move(statusWithCQ.getValue()),
+ PlanExecutor::YIELD_AUTO,
+ runnerOptions);
+ if (statusWithPlanExecutor.isOK()) {
+ // success: The PlanExecutor will handle sorting for us using an index.
+ exec = std::move(statusWithPlanExecutor.getValue());
+ sortInRunner = true;
+
+ sources.pop_front();
+ if (sortStage->getLimitSrc()) {
+ // need to reinsert coalesced $limit after removing $sort
+ sources.push_front(sortStage->getLimitSrc());
+ }
}
}
}
@@ -209,12 +209,12 @@ shared_ptr<PlanExecutor> PipelineD::prepareCursorSource(
auto statusWithCQ = CanonicalQuery::canonicalize(
pExpCtx->ns, queryObj, noSort, projectionForQuery, whereCallback);
uassertStatusOK(statusWithCQ.getStatus());
- unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
- PlanExecutor* rawExec;
- uassertStatusOK(getExecutor(
- txn, collection, cq.release(), PlanExecutor::YIELD_AUTO, &rawExec, runnerOptions));
- exec.reset(rawExec);
+ exec = uassertStatusOK(getExecutor(txn,
+ collection,
+ std::move(statusWithCQ.getValue()),
+ PlanExecutor::YIELD_AUTO,
+ runnerOptions));
}
diff --git a/src/mongo/db/query/explain.cpp b/src/mongo/db/query/explain.cpp
index 748e8af3b63..72c149f162b 100644
--- a/src/mongo/db/query/explain.cpp
+++ b/src/mongo/db/query/explain.cpp
@@ -670,10 +670,10 @@ std::string Explain::getPlanSummary(const PlanStage* root) {
}
// static
-void Explain::getSummaryStats(const PlanExecutor* exec, PlanSummaryStats* statsOut) {
+void Explain::getSummaryStats(const PlanExecutor& exec, PlanSummaryStats* statsOut) {
invariant(NULL != statsOut);
- PlanStage* root = exec->getRootStage();
+ PlanStage* root = exec.getRootStage();
// We can get some of the fields we need from the common stats stored in the
// root stage of the plan tree.
diff --git a/src/mongo/db/query/explain.h b/src/mongo/db/query/explain.h
index 7a0013294a0..85587811c2d 100644
--- a/src/mongo/db/query/explain.h
+++ b/src/mongo/db/query/explain.h
@@ -137,7 +137,7 @@ public:
*
* Does not take ownership of its arguments.
*/
- static void getSummaryStats(const PlanExecutor* exec, PlanSummaryStats* statsOut);
+ static void getSummaryStats(const PlanExecutor& exec, PlanSummaryStats* statsOut);
private:
/**
diff --git a/src/mongo/db/query/find.cpp b/src/mongo/db/query/find.cpp
index a142377bab7..33b95a1cf77 100644
--- a/src/mongo/db/query/find.cpp
+++ b/src/mongo/db/query/find.cpp
@@ -190,12 +190,11 @@ void beginQueryOp(OperationContext* txn,
}
void endQueryOp(OperationContext* txn,
- PlanExecutor* exec,
+ const PlanExecutor& exec,
int dbProfilingLevel,
int numResults,
CursorId cursorId) {
auto curop = CurOp::get(txn);
- invariant(exec);
// Fill out basic curop query exec properties.
curop->debug().nreturned = numResults;
@@ -217,13 +216,13 @@ void endQueryOp(OperationContext* txn,
if (dbProfilingLevel > 0 || curop->elapsedMillis() > serverGlobalParams.slowMS ||
logger::globalLogDomain()->shouldLog(queryLogComponent, logLevelOne)) {
// Generate plan summary string.
- curop->debug().planSummary = Explain::getPlanSummary(exec);
+ curop->debug().planSummary = Explain::getPlanSummary(&exec);
}
// Set debug information for consumption by the profiler only.
if (dbProfilingLevel > 0) {
// Get BSON stats.
- unique_ptr<PlanStageStats> execStats(exec->getStats());
+ unique_ptr<PlanStageStats> execStats(exec.getStats());
BSONObjBuilder statsBob;
Explain::statsToBSON(*execStats, &statsBob);
curop->debug().execStats.set(statsBob.obj());
@@ -531,14 +530,9 @@ std::string runQuery(OperationContext* txn,
ctx.getDb() ? ctx.getDb()->getProfilingLevel() : serverGlobalParams.defaultProfile;
// We have a parsed query. Time to get the execution plan for it.
- unique_ptr<PlanExecutor> exec;
- {
- PlanExecutor* rawExec;
- Status execStatus =
- getExecutorFind(txn, collection, nss, cq.release(), PlanExecutor::YIELD_AUTO, &rawExec);
- uassertStatusOK(execStatus);
- exec.reset(rawExec);
- }
+ std::unique_ptr<PlanExecutor> exec = uassertStatusOK(
+ getExecutorFind(txn, collection, nss, std::move(cq), PlanExecutor::YIELD_AUTO));
+
const LiteParsedQuery& pq = exec->getCanonicalQuery()->getParsed();
// If it's actually an explain, do the explain and return rather than falling through
@@ -707,10 +701,10 @@ std::string runQuery(OperationContext* txn,
// use by future getmore ops).
cc->setLeftoverMaxTimeMicros(curop.getRemainingMaxTimeMicros());
- endQueryOp(txn, cc->getExecutor(), dbProfilingLevel, numResults, ccId);
+ endQueryOp(txn, *cc->getExecutor(), dbProfilingLevel, numResults, ccId);
} else {
LOG(5) << "Not caching executor but returning " << numResults << " results.\n";
- endQueryOp(txn, exec.get(), dbProfilingLevel, numResults, ccId);
+ endQueryOp(txn, *exec, dbProfilingLevel, numResults, ccId);
}
// Add the results from the query into the output buffer.
diff --git a/src/mongo/db/query/find.h b/src/mongo/db/query/find.h
index 330afc76b90..a3d1a246ab6 100644
--- a/src/mongo/db/query/find.h
+++ b/src/mongo/db/query/find.h
@@ -127,7 +127,7 @@ void beginQueryOp(OperationContext* txn,
* do expensive stats gathering.
*/
void endQueryOp(OperationContext* txn,
- PlanExecutor* exec,
+ const PlanExecutor& exec,
int dbProfilingLevel,
int numResults,
CursorId cursorId);
@@ -139,13 +139,10 @@ void endQueryOp(OperationContext* txn,
*
* The oplog start finding hack requires that 'cq' has a $gt or $gte predicate over
* a field named 'ts'.
- *
- * On success, caller takes ownership of *execOut.
*/
-Status getOplogStartHack(OperationContext* txn,
- Collection* collection,
- CanonicalQuery* cq,
- PlanExecutor** execOut);
+StatusWith<std::unique_ptr<PlanExecutor>> getOplogStartHack(OperationContext* txn,
+ Collection* collection,
+ std::unique_ptr<CanonicalQuery> cq);
/**
* Called from the getMore entry point in ops/query.cpp.
diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp
index 765da9bd893..c87b17859e9 100644
--- a/src/mongo/db/query/get_executor.cpp
+++ b/src/mongo/db/query/get_executor.cpp
@@ -83,6 +83,7 @@ using std::unique_ptr;
using std::endl;
using std::string;
using std::vector;
+using stdx::make_unique;
// static
void filterAllowedIndexEntries(const AllowedIndices& allowedIndices,
@@ -147,7 +148,7 @@ void fillOutPlannerParams(OperationContext* txn,
// Filter index catalog if index filters are specified for query.
// Also, signal to planner that application hint should be ignored.
if (querySettings->getAllowedIndices(planCacheKey, &allowedIndicesRaw)) {
- std::unique_ptr<AllowedIndices> allowedIndices(allowedIndicesRaw);
+ unique_ptr<AllowedIndices> allowedIndices(allowedIndicesRaw);
filterAllowedIndexEntries(*allowedIndices, &plannerParams->indices);
plannerParams->indexFiltersApplied = true;
}
@@ -294,7 +295,7 @@ Status prepareExecution(OperationContext* opCtx,
if (PlanCache::shouldCacheQuery(*canonicalQuery) &&
collection->infoCache()->getPlanCache()->get(*canonicalQuery, &rawCS).isOK()) {
// We have a CachedSolution. Have the planner turn it into a QuerySolution.
- std::unique_ptr<CachedSolution> cs(rawCS);
+ unique_ptr<CachedSolution> cs(rawCS);
QuerySolution* qs;
Status status = QueryPlanner::planFromCache(*canonicalQuery, plannerParams, *cs, &qs);
@@ -401,47 +402,50 @@ Status prepareExecution(OperationContext* opCtx,
} // namespace
-Status getExecutor(OperationContext* txn,
- Collection* collection,
- CanonicalQuery* rawCanonicalQuery,
- PlanExecutor::YieldPolicy yieldPolicy,
- PlanExecutor** out,
- size_t plannerOptions) {
- unique_ptr<CanonicalQuery> canonicalQuery(rawCanonicalQuery);
- unique_ptr<WorkingSet> ws(new WorkingSet());
- PlanStage* root;
- QuerySolution* querySolution;
- Status status = prepareExecution(
- txn, collection, ws.get(), canonicalQuery.get(), plannerOptions, &root, &querySolution);
+StatusWith<unique_ptr<PlanExecutor>> getExecutor(OperationContext* txn,
+ Collection* collection,
+ unique_ptr<CanonicalQuery> canonicalQuery,
+ PlanExecutor::YieldPolicy yieldPolicy,
+ size_t plannerOptions) {
+ unique_ptr<WorkingSet> ws = make_unique<WorkingSet>();
+ PlanStage* rawRoot;
+ QuerySolution* rawQuerySolution;
+ Status status = prepareExecution(txn,
+ collection,
+ ws.get(),
+ canonicalQuery.get(),
+ plannerOptions,
+ &rawRoot,
+ &rawQuerySolution);
if (!status.isOK()) {
return status;
}
- invariant(root);
+ invariant(rawRoot);
+ unique_ptr<PlanStage> root(rawRoot);
+ unique_ptr<QuerySolution> querySolution(rawQuerySolution);
// We must have a tree of stages in order to have a valid plan executor, but the query
// solution may be null.
return PlanExecutor::make(txn,
- ws.release(),
- root,
- querySolution,
- canonicalQuery.release(),
+ std::move(ws),
+ std::move(root),
+ std::move(querySolution),
+ std::move(canonicalQuery),
collection,
- yieldPolicy,
- out);
+ yieldPolicy);
}
-Status getExecutor(OperationContext* txn,
- Collection* collection,
- const std::string& ns,
- const BSONObj& unparsedQuery,
- PlanExecutor::YieldPolicy yieldPolicy,
- PlanExecutor** out,
- size_t plannerOptions) {
+StatusWith<unique_ptr<PlanExecutor>> getExecutor(OperationContext* txn,
+ Collection* collection,
+ const std::string& ns,
+ const BSONObj& unparsedQuery,
+ PlanExecutor::YieldPolicy yieldPolicy,
+ size_t plannerOptions) {
if (!collection) {
LOG(2) << "Collection " << ns << " does not exist."
<< " Using EOF stage: " << unparsedQuery.toString();
- EOFStage* eofStage = new EOFStage();
- WorkingSet* ws = new WorkingSet();
- return PlanExecutor::make(txn, ws, eofStage, ns, yieldPolicy, out);
+ unique_ptr<EOFStage> eofStage = make_unique<EOFStage>();
+ unique_ptr<WorkingSet> ws = make_unique<WorkingSet>();
+ return PlanExecutor::make(txn, std::move(ws), std::move(eofStage), ns, yieldPolicy);
}
if (!CanonicalQuery::isSimpleIdQuery(unparsedQuery) ||
@@ -452,24 +456,25 @@ Status getExecutor(OperationContext* txn,
if (!statusWithCQ.isOK()) {
return statusWithCQ.getStatus();
}
- std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
+ unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
// Takes ownership of 'cq'.
- return getExecutor(txn, collection, cq.release(), yieldPolicy, out, plannerOptions);
+ return getExecutor(txn, collection, std::move(cq), yieldPolicy, plannerOptions);
}
LOG(2) << "Using idhack: " << unparsedQuery.toString();
- WorkingSet* ws = new WorkingSet();
- PlanStage* root = new IDHackStage(txn, collection, unparsedQuery["_id"].wrap(), ws);
+ unique_ptr<WorkingSet> ws = make_unique<WorkingSet>();
+ unique_ptr<PlanStage> root =
+ make_unique<IDHackStage>(txn, collection, unparsedQuery["_id"].wrap(), ws.get());
// Might have to filter out orphaned docs.
if (plannerOptions & QueryPlannerParams::INCLUDE_SHARD_FILTER) {
- root =
- new ShardFilterStage(shardingState.getCollectionMetadata(collection->ns()), ws, root);
+ root = make_unique<ShardFilterStage>(
+ shardingState.getCollectionMetadata(collection->ns()), ws.get(), root.release());
}
- return PlanExecutor::make(txn, ws, root, collection, yieldPolicy, out);
+ return PlanExecutor::make(txn, std::move(ws), std::move(root), collection, yieldPolicy);
}
//
@@ -496,13 +501,11 @@ mongo::BSONElement extractOplogTsOptime(const mongo::MatchExpression* me) {
return static_cast<const mongo::ComparisonMatchExpression*>(me)->getData();
}
-Status getOplogStartHack(OperationContext* txn,
- Collection* collection,
- CanonicalQuery* cq,
- PlanExecutor** execOut) {
+StatusWith<unique_ptr<PlanExecutor>> getOplogStartHack(OperationContext* txn,
+ Collection* collection,
+ unique_ptr<CanonicalQuery> cq) {
invariant(collection);
- invariant(cq);
- unique_ptr<CanonicalQuery> autoCq(cq);
+ invariant(cq.get());
// A query can only do oplog start finding if it has a top-level $gt or $gte predicate over
// the "ts" field (the operation's timestamp). Find that predicate and pass it to
@@ -546,15 +549,14 @@ Status getOplogStartHack(OperationContext* txn,
LOG(3) << "Using OplogStart stage";
// Fallback to trying the OplogStart stage.
- WorkingSet* oplogws = new WorkingSet();
- OplogStart* stage = new OplogStart(txn, collection, tsExpr, oplogws);
- PlanExecutor* rawExec;
-
+ unique_ptr<WorkingSet> oplogws = make_unique<WorkingSet>();
+ unique_ptr<OplogStart> stage =
+ make_unique<OplogStart>(txn, collection, tsExpr, oplogws.get());
// Takes ownership of oplogws and stage.
- Status execStatus =
- PlanExecutor::make(txn, oplogws, stage, collection, PlanExecutor::YIELD_AUTO, &rawExec);
- invariant(execStatus.isOK());
- std::unique_ptr<PlanExecutor> exec(rawExec);
+ auto statusWithPlanExecutor = PlanExecutor::make(
+ txn, std::move(oplogws), std::move(stage), collection, PlanExecutor::YIELD_AUTO);
+ invariant(statusWithPlanExecutor.isOK());
+ unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
// The stage returns a RecordId of where to start.
startLoc = RecordId();
@@ -562,8 +564,7 @@ Status getOplogStartHack(OperationContext* txn,
// This is normal. The start of the oplog is the beginning of the collection.
if (PlanExecutor::IS_EOF == state) {
- return getExecutor(
- txn, collection, autoCq.release(), PlanExecutor::YIELD_AUTO, execOut);
+ return getExecutor(txn, collection, std::move(cq), PlanExecutor::YIELD_AUTO);
}
// This is not normal. An error was encountered.
@@ -579,31 +580,30 @@ Status getOplogStartHack(OperationContext* txn,
params.direction = CollectionScanParams::FORWARD;
params.tailable = cq->getParsed().isTailable();
- WorkingSet* ws = new WorkingSet();
- CollectionScan* cs = new CollectionScan(txn, params, ws, cq->root());
+ unique_ptr<WorkingSet> ws = make_unique<WorkingSet>();
+ unique_ptr<CollectionScan> cs = make_unique<CollectionScan>(txn, params, ws.get(), cq->root());
// Takes ownership of 'ws', 'cs', and 'cq'.
return PlanExecutor::make(
- txn, ws, cs, autoCq.release(), collection, PlanExecutor::YIELD_AUTO, execOut);
+ txn, std::move(ws), std::move(cs), std::move(cq), collection, PlanExecutor::YIELD_AUTO);
}
} // namespace
-Status getExecutorFind(OperationContext* txn,
- Collection* collection,
- const NamespaceString& nss,
- CanonicalQuery* rawCanonicalQuery,
- PlanExecutor::YieldPolicy yieldPolicy,
- PlanExecutor** out) {
- std::unique_ptr<CanonicalQuery> cq(rawCanonicalQuery);
- if (NULL != collection && cq->getParsed().isOplogReplay()) {
- return getOplogStartHack(txn, collection, cq.release(), out);
+StatusWith<unique_ptr<PlanExecutor>> getExecutorFind(OperationContext* txn,
+ Collection* collection,
+ const NamespaceString& nss,
+ unique_ptr<CanonicalQuery> canonicalQuery,
+ PlanExecutor::YieldPolicy yieldPolicy) {
+ if (NULL != collection && canonicalQuery->getParsed().isOplogReplay()) {
+ return getOplogStartHack(txn, collection, std::move(canonicalQuery));
}
size_t options = QueryPlannerParams::DEFAULT;
if (shardingState.needCollectionMetadata(txn->getClient(), nss.ns())) {
options |= QueryPlannerParams::INCLUDE_SHARD_FILTER;
}
- return getExecutor(txn, collection, cq.release(), PlanExecutor::YIELD_AUTO, out, options);
+ return getExecutor(
+ txn, collection, std::move(canonicalQuery), PlanExecutor::YIELD_AUTO, options);
}
namespace {
@@ -615,13 +615,13 @@ namespace {
* If the projection was valid, then return Status::OK() with a pointer to the newly created
* ProjectionStage. Otherwise, return a status indicating the error reason.
*/
-StatusWith<std::unique_ptr<PlanStage>> applyProjection(OperationContext* txn,
- const NamespaceString& nsString,
- CanonicalQuery* cq,
- const BSONObj& proj,
- bool allowPositional,
- WorkingSet* ws,
- std::unique_ptr<PlanStage> root) {
+StatusWith<unique_ptr<PlanStage>> applyProjection(OperationContext* txn,
+ const NamespaceString& nsString,
+ CanonicalQuery* cq,
+ const BSONObj& proj,
+ bool allowPositional,
+ WorkingSet* ws,
+ unique_ptr<PlanStage> root) {
invariant(!proj.isEmpty());
ParsedProjection* rawParsedProj;
@@ -629,7 +629,7 @@ StatusWith<std::unique_ptr<PlanStage>> applyProjection(OperationContext* txn,
if (!ppStatus.isOK()) {
return ppStatus;
}
- std::unique_ptr<ParsedProjection> pp(rawParsedProj);
+ unique_ptr<ParsedProjection> pp(rawParsedProj);
// ProjectionExec requires the MatchDetails from the query expression when the projection
// uses the positional operator. Since the query may no longer match the newly-updated
@@ -642,7 +642,7 @@ StatusWith<std::unique_ptr<PlanStage>> applyProjection(OperationContext* txn,
ProjectionStageParams params(WhereCallbackReal(txn, nsString.db()));
params.projObj = proj;
params.fullExpression = cq->root();
- return {stdx::make_unique<ProjectionStage>(params, ws, root.release())};
+ return {make_unique<ProjectionStage>(params, ws, root.release())};
}
} // namespace
@@ -651,10 +651,9 @@ StatusWith<std::unique_ptr<PlanStage>> applyProjection(OperationContext* txn,
// Delete
//
-Status getExecutorDelete(OperationContext* txn,
- Collection* collection,
- ParsedDelete* parsedDelete,
- PlanExecutor** execOut) {
+StatusWith<unique_ptr<PlanExecutor>> getExecutorDelete(OperationContext* txn,
+ Collection* collection,
+ ParsedDelete* parsedDelete) {
const DeleteRequest* request = parsedDelete->getRequest();
const NamespaceString& nss(request->getNamespaceString());
@@ -688,7 +687,7 @@ Status getExecutorDelete(OperationContext* txn,
deleteStageParams.isExplain = request->isExplain();
deleteStageParams.returnDeleted = request->shouldReturnDeleted();
- unique_ptr<WorkingSet> ws(new WorkingSet());
+ unique_ptr<WorkingSet> ws = make_unique<WorkingSet>();
PlanExecutor::YieldPolicy policy =
parsedDelete->canYield() ? PlanExecutor::YIELD_AUTO : PlanExecutor::YIELD_MANUAL;
@@ -703,9 +702,9 @@ Status getExecutorDelete(OperationContext* txn,
// a DeleteStage, so in this case we put a DeleteStage on top of an EOFStage.
LOG(2) << "Collection " << nss.ns() << " does not exist."
<< " Using EOF stage: " << unparsedQuery.toString();
- DeleteStage* deleteStage =
- new DeleteStage(txn, deleteStageParams, ws.get(), NULL, new EOFStage());
- return PlanExecutor::make(txn, ws.release(), deleteStage, nss.ns(), policy, execOut);
+ unique_ptr<DeleteStage> deleteStage =
+ make_unique<DeleteStage>(txn, deleteStageParams, ws.get(), nullptr, new EOFStage());
+ return PlanExecutor::make(txn, std::move(ws), std::move(deleteStage), nss.ns(), policy);
}
if (CanonicalQuery::isSimpleIdQuery(unparsedQuery) &&
@@ -714,9 +713,9 @@ Status getExecutorDelete(OperationContext* txn,
PlanStage* idHackStage =
new IDHackStage(txn, collection, unparsedQuery["_id"].wrap(), ws.get());
- DeleteStage* root =
- new DeleteStage(txn, deleteStageParams, ws.get(), collection, idHackStage);
- return PlanExecutor::make(txn, ws.release(), root, collection, policy, execOut);
+ unique_ptr<DeleteStage> root =
+ make_unique<DeleteStage>(txn, deleteStageParams, ws.get(), collection, idHackStage);
+ return PlanExecutor::make(txn, std::move(ws), std::move(root), collection, policy);
}
// If we're here then we don't have a parsed query, but we're also not eligible for
@@ -728,7 +727,7 @@ Status getExecutorDelete(OperationContext* txn,
}
// This is the regular path for when we have a CanonicalQuery.
- std::unique_ptr<CanonicalQuery> cq(parsedDelete->releaseParsedQuery());
+ unique_ptr<CanonicalQuery> cq(parsedDelete->releaseParsedQuery());
PlanStage* rawRoot;
QuerySolution* rawQuerySolution;
@@ -739,17 +738,17 @@ Status getExecutorDelete(OperationContext* txn,
return status;
}
invariant(rawRoot);
- std::unique_ptr<QuerySolution> querySolution(rawQuerySolution);
+ unique_ptr<QuerySolution> querySolution(rawQuerySolution);
deleteStageParams.canonicalQuery = cq.get();
rawRoot = new DeleteStage(txn, deleteStageParams, ws.get(), collection, rawRoot);
- std::unique_ptr<PlanStage> root(rawRoot);
+ unique_ptr<PlanStage> root(rawRoot);
if (!request->getProj().isEmpty()) {
invariant(request->shouldReturnDeleted());
const bool allowPositional = true;
- StatusWith<std::unique_ptr<PlanStage>> projStatus = applyProjection(
+ StatusWith<unique_ptr<PlanStage>> projStatus = applyProjection(
txn, nss, cq.get(), request->getProj(), allowPositional, ws.get(), std::move(root));
if (!projStatus.isOK()) {
return projStatus.getStatus();
@@ -760,13 +759,12 @@ Status getExecutorDelete(OperationContext* txn,
// We must have a tree of stages in order to have a valid plan executor, but the query
// solution may be null.
return PlanExecutor::make(txn,
- ws.release(),
- root.release(),
- querySolution.release(),
- cq.release(),
+ std::move(ws),
+ std::move(root),
+ std::move(querySolution),
+ std::move(cq),
collection,
- policy,
- execOut);
+ policy);
}
//
@@ -790,11 +788,10 @@ inline void validateUpdate(const char* ns, const BSONObj& updateobj, const BSONO
} // namespace
-Status getExecutorUpdate(OperationContext* txn,
- Collection* collection,
- ParsedUpdate* parsedUpdate,
- OpDebug* opDebug,
- PlanExecutor** execOut) {
+StatusWith<unique_ptr<PlanExecutor>> getExecutorUpdate(OperationContext* txn,
+ Collection* collection,
+ ParsedUpdate* parsedUpdate,
+ OpDebug* opDebug) {
const UpdateRequest* request = parsedUpdate->getRequest();
UpdateDriver* driver = parsedUpdate->getDriver();
@@ -833,7 +830,7 @@ Status getExecutorUpdate(OperationContext* txn,
PlanExecutor::YieldPolicy policy =
parsedUpdate->canYield() ? PlanExecutor::YIELD_AUTO : PlanExecutor::YIELD_MANUAL;
- unique_ptr<WorkingSet> ws(new WorkingSet());
+ unique_ptr<WorkingSet> ws = make_unique<WorkingSet>();
UpdateStageParams updateStageParams(request, driver, opDebug);
if (!parsedUpdate->hasParsedQuery()) {
@@ -847,10 +844,10 @@ Status getExecutorUpdate(OperationContext* txn,
// an UpdateStage, so in this case we put an UpdateStage on top of an EOFStage.
LOG(2) << "Collection " << nsString.ns() << " does not exist."
<< " Using EOF stage: " << unparsedQuery.toString();
- UpdateStage* updateStage =
- new UpdateStage(txn, updateStageParams, ws.get(), collection, new EOFStage());
+ unique_ptr<UpdateStage> updateStage = make_unique<UpdateStage>(
+ txn, updateStageParams, ws.get(), collection, new EOFStage());
return PlanExecutor::make(
- txn, ws.release(), updateStage, nsString.ns(), policy, execOut);
+ txn, std::move(ws), std::move(updateStage), nsString.ns(), policy);
}
if (CanonicalQuery::isSimpleIdQuery(unparsedQuery) &&
@@ -859,9 +856,9 @@ Status getExecutorUpdate(OperationContext* txn,
PlanStage* idHackStage =
new IDHackStage(txn, collection, unparsedQuery["_id"].wrap(), ws.get());
- UpdateStage* root =
- new UpdateStage(txn, updateStageParams, ws.get(), collection, idHackStage);
- return PlanExecutor::make(txn, ws.release(), root, collection, policy, execOut);
+ unique_ptr<UpdateStage> root =
+ make_unique<UpdateStage>(txn, updateStageParams, ws.get(), collection, idHackStage);
+ return PlanExecutor::make(txn, std::move(ws), std::move(root), collection, policy);
}
// If we're here then we don't have a parsed query, but we're also not eligible for
@@ -873,7 +870,7 @@ Status getExecutorUpdate(OperationContext* txn,
}
// This is the regular path for when we have a CanonicalQuery.
- std::unique_ptr<CanonicalQuery> cq(parsedUpdate->releaseParsedQuery());
+ unique_ptr<CanonicalQuery> cq(parsedUpdate->releaseParsedQuery());
PlanStage* rawRoot;
QuerySolution* rawQuerySolution;
@@ -884,11 +881,11 @@ Status getExecutorUpdate(OperationContext* txn,
return status;
}
invariant(rawRoot);
- std::unique_ptr<QuerySolution> querySolution(rawQuerySolution);
+ unique_ptr<QuerySolution> querySolution(rawQuerySolution);
updateStageParams.canonicalQuery = cq.get();
rawRoot = new UpdateStage(txn, updateStageParams, ws.get(), collection, rawRoot);
- std::unique_ptr<PlanStage> root(rawRoot);
+ unique_ptr<PlanStage> root(rawRoot);
if (!request->getProj().isEmpty()) {
invariant(request->shouldReturnAnyDocs());
@@ -897,13 +894,13 @@ Status getExecutorUpdate(OperationContext* txn,
// is invalid to use a positional projection because the query expression need not
// match the array element after the update has been applied.
const bool allowPositional = request->shouldReturnOldDocs();
- StatusWith<std::unique_ptr<PlanStage>> projStatus = applyProjection(txn,
- nsString,
- cq.get(),
- request->getProj(),
- allowPositional,
- ws.get(),
- std::move(root));
+ StatusWith<unique_ptr<PlanStage>> projStatus = applyProjection(txn,
+ nsString,
+ cq.get(),
+ request->getProj(),
+ allowPositional,
+ ws.get(),
+ std::move(root));
if (!projStatus.isOK()) {
return projStatus.getStatus();
}
@@ -913,38 +910,36 @@ Status getExecutorUpdate(OperationContext* txn,
// We must have a tree of stages in order to have a valid plan executor, but the query
// solution may be null. Takes ownership of all args other than 'collection' and 'txn'
return PlanExecutor::make(txn,
- ws.release(),
- root.release(),
- querySolution.release(),
- cq.release(),
+ std::move(ws),
+ std::move(root),
+ std::move(querySolution),
+ std::move(cq),
collection,
- policy,
- execOut);
+ policy);
}
//
// Group
//
-Status getExecutorGroup(OperationContext* txn,
- Collection* collection,
- const GroupRequest& request,
- PlanExecutor::YieldPolicy yieldPolicy,
- PlanExecutor** execOut) {
+StatusWith<unique_ptr<PlanExecutor>> getExecutorGroup(OperationContext* txn,
+ Collection* collection,
+ const GroupRequest& request,
+ PlanExecutor::YieldPolicy yieldPolicy) {
if (!globalScriptEngine) {
return Status(ErrorCodes::BadValue, "server-side JavaScript execution is disabled");
}
- unique_ptr<WorkingSet> ws(new WorkingSet());
- PlanStage* root;
- QuerySolution* querySolution;
+ unique_ptr<WorkingSet> ws = make_unique<WorkingSet>();
if (!collection) {
// Treat collections that do not exist as empty collections. Note that the explain
// reporting machinery always assumes that the root stage for a group operation is a
// GroupStage, so in this case we put a GroupStage on top of an EOFStage.
- root = new GroupStage(txn, request, ws.get(), new EOFStage());
- return PlanExecutor::make(txn, ws.release(), root, request.ns, yieldPolicy, execOut);
+ unique_ptr<PlanStage> root =
+ make_unique<GroupStage>(txn, request, ws.get(), new EOFStage());
+
+ return PlanExecutor::make(txn, std::move(ws), std::move(root), request.ns, yieldPolicy);
}
const NamespaceString nss(request.ns);
@@ -958,29 +953,31 @@ Status getExecutorGroup(OperationContext* txn,
unique_ptr<CanonicalQuery> canonicalQuery = std::move(statusWithCQ.getValue());
const size_t defaultPlannerOptions = 0;
+ PlanStage* child;
+ QuerySolution* rawQuerySolution;
Status status = prepareExecution(txn,
collection,
ws.get(),
canonicalQuery.get(),
defaultPlannerOptions,
- &root,
- &querySolution);
+ &child,
+ &rawQuerySolution);
if (!status.isOK()) {
return status;
}
- invariant(root);
+ invariant(child);
- root = new GroupStage(txn, request, ws.get(), root);
+ unique_ptr<PlanStage> root = make_unique<GroupStage>(txn, request, ws.get(), child);
+ unique_ptr<QuerySolution> querySolution(rawQuerySolution);
// We must have a tree of stages in order to have a valid plan executor, but the query
// solution may be null. Takes ownership of all args other than 'collection'.
return PlanExecutor::make(txn,
- ws.release(),
- root,
- querySolution,
- canonicalQuery.release(),
+ std::move(ws),
+ std::move(root),
+ std::move(querySolution),
+ std::move(canonicalQuery),
collection,
- yieldPolicy,
- execOut);
+ yieldPolicy);
}
//
@@ -1157,15 +1154,12 @@ BSONObj getDistinctProjection(const std::string& field) {
} // namespace
-Status getExecutorCount(OperationContext* txn,
- Collection* collection,
- const CountRequest& request,
- bool explain,
- PlanExecutor::YieldPolicy yieldPolicy,
- PlanExecutor** execOut) {
- unique_ptr<WorkingSet> ws(new WorkingSet());
- PlanStage* root;
- QuerySolution* querySolution;
+StatusWith<unique_ptr<PlanExecutor>> getExecutorCount(OperationContext* txn,
+ Collection* collection,
+ const CountRequest& request,
+ bool explain,
+ PlanExecutor::YieldPolicy yieldPolicy) {
+ unique_ptr<WorkingSet> ws = make_unique<WorkingSet>();
// If collection exists and the query is empty, no additional canonicalization is needed.
// If the query is empty, then we can determine the count by just asking the collection
@@ -1174,8 +1168,10 @@ Status getExecutorCount(OperationContext* txn,
//
// If there is a hint, then we can't use a trival count plan as described above.
if (collection && request.getQuery().isEmpty() && request.getHint().isEmpty()) {
- root = new CountStage(txn, collection, request, ws.get(), NULL);
- return PlanExecutor::make(txn, ws.release(), root, request.getNs(), yieldPolicy, execOut);
+ unique_ptr<PlanStage> root =
+ make_unique<CountStage>(txn, collection, request, ws.get(), nullptr);
+ return PlanExecutor::make(
+ txn, std::move(ws), std::move(root), request.getNs(), yieldPolicy);
}
unique_ptr<CanonicalQuery> cq;
@@ -1207,26 +1203,36 @@ Status getExecutorCount(OperationContext* txn,
// Treat collections that do not exist as empty collections. Note that the explain
// reporting machinery always assumes that the root stage for a count operation is
// a CountStage, so in this case we put a CountStage on top of an EOFStage.
- root = new CountStage(txn, collection, request, ws.get(), new EOFStage());
- return PlanExecutor::make(txn, ws.release(), root, request.getNs(), yieldPolicy, execOut);
+ unique_ptr<PlanStage> root =
+ make_unique<CountStage>(txn, collection, request, ws.get(), new EOFStage());
+ return PlanExecutor::make(
+ txn, std::move(ws), std::move(root), request.getNs(), yieldPolicy);
}
invariant(cq.get());
const size_t plannerOptions = QueryPlannerParams::PRIVATE_IS_COUNT;
+ PlanStage* child;
+ QuerySolution* rawQuerySolution;
Status prepStatus = prepareExecution(
- txn, collection, ws.get(), cq.get(), plannerOptions, &root, &querySolution);
+ txn, collection, ws.get(), cq.get(), plannerOptions, &child, &rawQuerySolution);
if (!prepStatus.isOK()) {
return prepStatus;
}
- invariant(root);
+ invariant(child);
// Make a CountStage to be the new root.
- root = new CountStage(txn, collection, request, ws.get(), root);
+ unique_ptr<PlanStage> root = make_unique<CountStage>(txn, collection, request, ws.get(), child);
+ unique_ptr<QuerySolution> querySolution(rawQuerySolution);
// We must have a tree of stages in order to have a valid plan executor, but the query
// solution may be NULL. Takes ownership of all args other than 'collection' and 'txn'
- return PlanExecutor::make(
- txn, ws.release(), root, querySolution, cq.release(), collection, yieldPolicy, execOut);
+ return PlanExecutor::make(txn,
+ std::move(ws),
+ std::move(root),
+ std::move(querySolution),
+ std::move(cq),
+ collection,
+ yieldPolicy);
}
//
@@ -1282,12 +1288,11 @@ bool turnIxscanIntoDistinctIxscan(QuerySolution* soln, const string& field) {
return false;
}
-Status getExecutorDistinct(OperationContext* txn,
- Collection* collection,
- const BSONObj& query,
- const std::string& field,
- PlanExecutor::YieldPolicy yieldPolicy,
- PlanExecutor** out) {
+StatusWith<unique_ptr<PlanExecutor>> getExecutorDistinct(OperationContext* txn,
+ Collection* collection,
+ const BSONObj& query,
+ const std::string& field,
+ PlanExecutor::YieldPolicy yieldPolicy) {
// This should'a been checked by the distinct command.
invariant(collection);
@@ -1333,10 +1338,10 @@ Status getExecutorDistinct(OperationContext* txn,
if (!statusWithCQ.isOK()) {
return statusWithCQ.getStatus();
}
- std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
+ unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
// Takes ownership of 'cq'.
- return getExecutor(txn, collection, cq.release(), yieldPolicy, out);
+ return getExecutor(txn, collection, std::move(cq), yieldPolicy);
}
//
@@ -1371,25 +1376,32 @@ Status getExecutorDistinct(OperationContext* txn,
QueryPlannerParams params;
// Takes ownership of 'dn'.
- QuerySolution* soln = QueryPlannerAnalysis::analyzeDataAccess(*cq, params, dn);
+ unique_ptr<QuerySolution> soln(QueryPlannerAnalysis::analyzeDataAccess(*cq, params, dn));
invariant(soln);
- WorkingSet* ws = new WorkingSet();
- PlanStage* root;
- verify(StageBuilder::build(txn, collection, *soln, ws, &root));
+ unique_ptr<WorkingSet> ws = make_unique<WorkingSet>();
+ PlanStage* rawRoot;
+ verify(StageBuilder::build(txn, collection, *soln, ws.get(), &rawRoot));
+ unique_ptr<PlanStage> root(rawRoot);
LOG(2) << "Using fast distinct: " << cq->toStringShort()
- << ", planSummary: " << Explain::getPlanSummary(root);
+ << ", planSummary: " << Explain::getPlanSummary(root.get());
// Takes ownership of its arguments (except for 'collection').
- return PlanExecutor::make(txn, ws, root, soln, cq.release(), collection, yieldPolicy, out);
+ return PlanExecutor::make(txn,
+ std::move(ws),
+ std::move(root),
+ std::move(soln),
+ std::move(cq),
+ collection,
+ yieldPolicy);
}
// See if we can answer the query in a fast-distinct compatible fashion.
vector<QuerySolution*> solutions;
Status status = QueryPlanner::plan(*cq, plannerParams, &solutions);
if (!status.isOK()) {
- return getExecutor(txn, collection, cq.release(), yieldPolicy, out);
+ return getExecutor(txn, collection, std::move(cq), yieldPolicy);
}
// We look for a solution that has an ixscan we can turn into a distinctixscan
@@ -1403,16 +1415,22 @@ Status getExecutorDistinct(OperationContext* txn,
}
// Build and return the SSR over solutions[i].
- WorkingSet* ws = new WorkingSet();
- PlanStage* root;
- verify(StageBuilder::build(txn, collection, *solutions[i], ws, &root));
+ unique_ptr<WorkingSet> ws = make_unique<WorkingSet>();
+ unique_ptr<QuerySolution> currentSolution(solutions[i]);
+ PlanStage* rawRoot;
+ verify(StageBuilder::build(txn, collection, *currentSolution, ws.get(), &rawRoot));
+ unique_ptr<PlanStage> root(rawRoot);
LOG(2) << "Using fast distinct: " << cq->toStringShort()
- << ", planSummary: " << Explain::getPlanSummary(root);
-
- // Takes ownership of 'ws', 'root', 'solutions[i]', and 'cq'.
- return PlanExecutor::make(
- txn, ws, root, solutions[i], cq.release(), collection, yieldPolicy, out);
+ << ", planSummary: " << Explain::getPlanSummary(root.get());
+
+ return PlanExecutor::make(txn,
+ std::move(ws),
+ std::move(root),
+ std::move(currentSolution),
+ std::move(cq),
+ collection,
+ yieldPolicy);
}
}
@@ -1432,7 +1450,7 @@ Status getExecutorDistinct(OperationContext* txn,
cq = std::move(statusWithCQ.getValue());
// Takes ownership of 'cq'.
- return getExecutor(txn, collection, cq.release(), yieldPolicy, out);
+ return getExecutor(txn, collection, std::move(cq), yieldPolicy);
}
} // namespace mongo
diff --git a/src/mongo/db/query/get_executor.h b/src/mongo/db/query/get_executor.h
index 24d99ecc791..e157b735638 100644
--- a/src/mongo/db/query/get_executor.h
+++ b/src/mongo/db/query/get_executor.h
@@ -63,19 +63,19 @@ void fillOutPlannerParams(OperationContext* txn,
QueryPlannerParams* plannerParams);
/**
- * Get a plan executor for a query. Takes ownership of 'rawCanonicalQuery'.
+ * Get a plan executor for a query.
*
- * If the query is valid and an executor could be created, returns Status::OK()
- * and populates *out with the PlanExecutor.
+ * If the query is valid and an executor could be created, returns a StatusWith with the
+ * PlanExecutor.
*
* If the query cannot be executed, returns a Status indicating why.
*/
-Status getExecutor(OperationContext* txn,
- Collection* collection,
- CanonicalQuery* rawCanonicalQuery,
- PlanExecutor::YieldPolicy yieldPolicy,
- PlanExecutor** out,
- size_t plannerOptions = 0);
+StatusWith<std::unique_ptr<PlanExecutor>> getExecutor(
+ OperationContext* txn,
+ Collection* collection,
+ std::unique_ptr<CanonicalQuery> canonicalQuery,
+ PlanExecutor::YieldPolicy yieldPolicy,
+ size_t plannerOptions = 0);
/**
* Get a plan executor for query. This differs from the getExecutor(...) function
@@ -84,33 +84,32 @@ Status getExecutor(OperationContext* txn,
*
* Used to support idhack updates that do not create a canonical query.
*
- * If the query is valid and an executor could be created, returns Status::OK()
- * and populates *out with the PlanExecutor.
+ * If the query is valid and an executor could be created, returns a StatusWith with the
+ * PlanExecutor.
*
* If the query cannot be executed, returns a Status indicating why.
*/
-Status getExecutor(OperationContext* txn,
- Collection* collection,
- const std::string& ns,
- const BSONObj& unparsedQuery,
- PlanExecutor::YieldPolicy yieldPolicy,
- PlanExecutor** out,
- size_t plannerOptions = 0);
+StatusWith<std::unique_ptr<PlanExecutor>> getExecutor(OperationContext* txn,
+ Collection* collection,
+ const std::string& ns,
+ const BSONObj& unparsedQuery,
+ PlanExecutor::YieldPolicy yieldPolicy,
+ size_t plannerOptions = 0);
/**
- * Get a plan executor for a .find() operation. Takes ownership of 'rawCanonicalQuery'.
+ * Get a plan executor for a .find() operation.
*
- * If the query is valid and an executor could be created, returns Status::OK()
- * and populates *out with the PlanExecutor.
+ * If the query is valid and an executor could be created, returns a StatusWith with the
+ * PlanExecutor.
*
* If the query cannot be executed, returns a Status indicating why.
*/
-Status getExecutorFind(OperationContext* txn,
- Collection* collection,
- const NamespaceString& nss,
- CanonicalQuery* rawCanonicalQuery,
- PlanExecutor::YieldPolicy yieldPolicy,
- PlanExecutor** out);
+StatusWith<std::unique_ptr<PlanExecutor>> getExecutorFind(
+ OperationContext* txn,
+ Collection* collection,
+ const NamespaceString& nss,
+ std::unique_ptr<CanonicalQuery> canonicalQuery,
+ PlanExecutor::YieldPolicy yieldPolicy);
/**
* If possible, turn the provided QuerySolution into a QuerySolution that uses a DistinctNode
@@ -128,12 +127,12 @@ bool turnIxscanIntoDistinctIxscan(QuerySolution* soln, const std::string& field)
* possible values of a certain field. As such, we can skip lots of data in certain cases (see
* body of method for detail).
*/
-Status getExecutorDistinct(OperationContext* txn,
- Collection* collection,
- const BSONObj& query,
- const std::string& field,
- PlanExecutor::YieldPolicy yieldPolicy,
- PlanExecutor** out);
+StatusWith<std::unique_ptr<PlanExecutor>> getExecutorDistinct(
+ OperationContext* txn,
+ Collection* collection,
+ const BSONObj& query,
+ const std::string& field,
+ PlanExecutor::YieldPolicy yieldPolicy);
/*
* Get a PlanExecutor for a query executing as part of a count command.
@@ -142,12 +141,11 @@ Status getExecutorDistinct(OperationContext* txn,
* As such, with certain covered queries, we can skip the overhead of fetching etc. when
* executing a count.
*/
-Status getExecutorCount(OperationContext* txn,
- Collection* collection,
- const CountRequest& request,
- bool explain,
- PlanExecutor::YieldPolicy yieldPolicy,
- PlanExecutor** execOut);
+StatusWith<std::unique_ptr<PlanExecutor>> getExecutorCount(OperationContext* txn,
+ Collection* collection,
+ const CountRequest& request,
+ bool explain,
+ PlanExecutor::YieldPolicy yieldPolicy);
/**
* Get a PlanExecutor for a delete operation. 'parsedDelete' describes the query predicate
@@ -158,15 +156,14 @@ Status getExecutorCount(OperationContext* txn,
*
* Does not take ownership of its arguments.
*
- * If the query is valid and an executor could be created, returns Status::OK() and populates
- * *execOut with the PlanExecutor. The caller takes ownership of *execOut.
+ * If the query is valid and an executor could be created, returns a StatusWith with the
+ * PlanExecutor.
*
* If the query cannot be executed, returns a Status indicating why.
*/
-Status getExecutorDelete(OperationContext* txn,
- Collection* collection,
- ParsedDelete* parsedDelete,
- PlanExecutor** execOut);
+StatusWith<std::unique_ptr<PlanExecutor>> getExecutorDelete(OperationContext* txn,
+ Collection* collection,
+ ParsedDelete* parsedDelete);
/**
* Get a PlanExecutor for an update operation. 'parsedUpdate' describes the query predicate
@@ -178,32 +175,27 @@ Status getExecutorDelete(OperationContext* txn,
*
* Does not take ownership of its arguments.
*
- * If the query is valid and an executor could be created, returns Status::OK() and populates
- * *out with the PlanExecutor. The caller takes ownership of *execOut.
+ * If the query is valid and an executor could be created, returns a StatusWith with the
+ * PlanExecutor.
*
* If the query cannot be executed, returns a Status indicating why.
*/
-Status getExecutorUpdate(OperationContext* txn,
- Collection* collection,
- ParsedUpdate* parsedUpdate,
- OpDebug* opDebug,
- PlanExecutor** execOut);
+StatusWith<std::unique_ptr<PlanExecutor>> getExecutorUpdate(OperationContext* txn,
+ Collection* collection,
+ ParsedUpdate* parsedUpdate,
+ OpDebug* opDebug);
/**
- * Get a PlanExecutor for a group operation. 'rawCanonicalQuery' describes the predicate for
- * the documents to be grouped.
+ * Get a PlanExecutor for a group operation.
*
- * Takes ownership of 'rawCanonicalQuery'. Does not take ownership of other args.
- *
- * If the query is valid and an executor could be created, returns Status::OK() and populates
- * *out with the PlanExecutor.
+ * If the query is valid and an executor could be created, returns a StatusWith with the
+ * PlanExecutor.
*
* If an executor could not be created, returns a Status indicating why.
*/
-Status getExecutorGroup(OperationContext* txn,
- Collection* collection,
- const GroupRequest& request,
- PlanExecutor::YieldPolicy yieldPolicy,
- PlanExecutor** execOut);
+StatusWith<std::unique_ptr<PlanExecutor>> getExecutorGroup(OperationContext* txn,
+ Collection* collection,
+ const GroupRequest& request,
+ PlanExecutor::YieldPolicy yieldPolicy);
} // namespace mongo
diff --git a/src/mongo/db/query/internal_plans.cpp b/src/mongo/db/query/internal_plans.cpp
index fe44395f021..f85b8723b3b 100644
--- a/src/mongo/db/query/internal_plans.cpp
+++ b/src/mongo/db/query/internal_plans.cpp
@@ -37,25 +37,25 @@
#include "mongo/db/exec/fetch.h"
#include "mongo/db/exec/index_scan.h"
#include "mongo/db/query/plan_executor.h"
+#include "mongo/stdx/memory.h"
namespace mongo {
// static
-PlanExecutor* InternalPlanner::collectionScan(OperationContext* txn,
- StringData ns,
- Collection* collection,
- const Direction direction,
- const RecordId startLoc) {
- WorkingSet* ws = new WorkingSet();
+std::unique_ptr<PlanExecutor> InternalPlanner::collectionScan(OperationContext* txn,
+ StringData ns,
+ Collection* collection,
+ const Direction direction,
+ const RecordId startLoc) {
+ std::unique_ptr<WorkingSet> ws = stdx::make_unique<WorkingSet>();
if (NULL == collection) {
- EOFStage* eof = new EOFStage();
- PlanExecutor* exec;
+ std::unique_ptr<EOFStage> eof = stdx::make_unique<EOFStage>();
// Takes ownership of 'ws' and 'eof'.
- Status execStatus =
- PlanExecutor::make(txn, ws, eof, ns.toString(), PlanExecutor::YIELD_MANUAL, &exec);
- invariant(execStatus.isOK());
- return exec;
+ auto statusWithPlanExecutor = PlanExecutor::make(
+ txn, std::move(ws), std::move(eof), ns.toString(), PlanExecutor::YIELD_MANUAL);
+ invariant(statusWithPlanExecutor.isOK());
+ return std::move(statusWithPlanExecutor.getValue());
}
invariant(ns == collection->ns().ns());
@@ -70,24 +70,24 @@ PlanExecutor* InternalPlanner::collectionScan(OperationContext* txn,
params.direction = CollectionScanParams::BACKWARD;
}
- CollectionScan* cs = new CollectionScan(txn, params, ws, NULL);
- PlanExecutor* exec;
+ std::unique_ptr<CollectionScan> cs =
+ stdx::make_unique<CollectionScan>(txn, params, ws.get(), nullptr);
// Takes ownership of 'ws' and 'cs'.
- Status execStatus =
- PlanExecutor::make(txn, ws, cs, collection, PlanExecutor::YIELD_MANUAL, &exec);
- invariant(execStatus.isOK());
- return exec;
+ auto statusWithPlanExecutor = PlanExecutor::make(
+ txn, std::move(ws), std::move(cs), collection, PlanExecutor::YIELD_MANUAL);
+ invariant(statusWithPlanExecutor.isOK());
+ return std::move(statusWithPlanExecutor.getValue());
}
// static
-PlanExecutor* InternalPlanner::indexScan(OperationContext* txn,
- const Collection* collection,
- const IndexDescriptor* descriptor,
- const BSONObj& startKey,
- const BSONObj& endKey,
- bool endKeyInclusive,
- Direction direction,
- int options) {
+std::unique_ptr<PlanExecutor> InternalPlanner::indexScan(OperationContext* txn,
+ const Collection* collection,
+ const IndexDescriptor* descriptor,
+ const BSONObj& startKey,
+ const BSONObj& endKey,
+ bool endKeyInclusive,
+ Direction direction,
+ int options) {
invariant(collection);
invariant(descriptor);
@@ -99,21 +99,19 @@ PlanExecutor* InternalPlanner::indexScan(OperationContext* txn,
params.bounds.endKey = endKey;
params.bounds.endKeyInclusive = endKeyInclusive;
- WorkingSet* ws = new WorkingSet();
- IndexScan* ix = new IndexScan(txn, params, ws, NULL);
+ std::unique_ptr<WorkingSet> ws = stdx::make_unique<WorkingSet>();
- PlanStage* root = ix;
+ std::unique_ptr<PlanStage> root = stdx::make_unique<IndexScan>(txn, params, ws.get(), nullptr);
if (IXSCAN_FETCH & options) {
- root = new FetchStage(txn, ws, root, NULL, collection);
+ root = stdx::make_unique<FetchStage>(txn, ws.get(), root.release(), nullptr, collection);
}
- PlanExecutor* exec;
// Takes ownership of 'ws' and 'root'.
- Status execStatus =
- PlanExecutor::make(txn, ws, root, collection, PlanExecutor::YIELD_MANUAL, &exec);
- invariant(execStatus.isOK());
- return exec;
+ auto statusWithPlanExecutor = PlanExecutor::make(
+ txn, std::move(ws), std::move(root), collection, PlanExecutor::YIELD_MANUAL);
+ invariant(statusWithPlanExecutor.isOK());
+ return std::move(statusWithPlanExecutor.getValue());
}
} // namespace mongo
diff --git a/src/mongo/db/query/internal_plans.h b/src/mongo/db/query/internal_plans.h
index 3b21e3a4f1e..c0a33baceed 100644
--- a/src/mongo/db/query/internal_plans.h
+++ b/src/mongo/db/query/internal_plans.h
@@ -63,23 +63,23 @@ public:
/**
* Return a collection scan. Caller owns pointer.
*/
- static PlanExecutor* collectionScan(OperationContext* txn,
- StringData ns,
- Collection* collection,
- const Direction direction = FORWARD,
- const RecordId startLoc = RecordId());
+ static std::unique_ptr<PlanExecutor> collectionScan(OperationContext* txn,
+ StringData ns,
+ Collection* collection,
+ const Direction direction = FORWARD,
+ const RecordId startLoc = RecordId());
/**
* Return an index scan. Caller owns returned pointer.
*/
- static PlanExecutor* indexScan(OperationContext* txn,
- const Collection* collection,
- const IndexDescriptor* descriptor,
- const BSONObj& startKey,
- const BSONObj& endKey,
- bool endKeyInclusive,
- Direction direction = FORWARD,
- int options = 0);
+ static std::unique_ptr<PlanExecutor> indexScan(OperationContext* txn,
+ const Collection* collection,
+ const IndexDescriptor* descriptor,
+ const BSONObj& startKey,
+ const BSONObj& endKey,
+ bool endKeyInclusive,
+ Direction direction = FORWARD,
+ int options = 0);
};
} // namespace mongo
diff --git a/src/mongo/db/query/plan_executor.cpp b/src/mongo/db/query/plan_executor.cpp
index f234948fe50..4290830709c 100644
--- a/src/mongo/db/query/plan_executor.cpp
+++ b/src/mongo/db/query/plan_executor.cpp
@@ -43,6 +43,7 @@
#include "mongo/db/service_context.h"
#include "mongo/db/query/plan_yield_policy.h"
#include "mongo/db/storage/record_fetcher.h"
+#include "mongo/stdx/memory.h"
#include "mongo/util/stacktrace.h"
@@ -50,7 +51,9 @@ namespace mongo {
using std::shared_ptr;
using std::string;
+using std::unique_ptr;
using std::vector;
+using stdx::make_unique;
namespace {
@@ -76,59 +79,65 @@ PlanStage* getStageByType(PlanStage* root, StageType type) {
}
// static
-Status PlanExecutor::make(OperationContext* opCtx,
- WorkingSet* ws,
- PlanStage* rt,
- const Collection* collection,
- YieldPolicy yieldPolicy,
- PlanExecutor** out) {
- return PlanExecutor::make(opCtx, ws, rt, NULL, NULL, collection, "", yieldPolicy, out);
+StatusWith<unique_ptr<PlanExecutor>> PlanExecutor::make(OperationContext* opCtx,
+ unique_ptr<WorkingSet> ws,
+ unique_ptr<PlanStage> rt,
+ const Collection* collection,
+ YieldPolicy yieldPolicy) {
+ return PlanExecutor::make(
+ opCtx, std::move(ws), std::move(rt), nullptr, nullptr, collection, "", yieldPolicy);
}
// static
-Status PlanExecutor::make(OperationContext* opCtx,
- WorkingSet* ws,
- PlanStage* rt,
- const std::string& ns,
- YieldPolicy yieldPolicy,
- PlanExecutor** out) {
- return PlanExecutor::make(opCtx, ws, rt, NULL, NULL, NULL, ns, yieldPolicy, out);
+StatusWith<unique_ptr<PlanExecutor>> PlanExecutor::make(OperationContext* opCtx,
+ unique_ptr<WorkingSet> ws,
+ unique_ptr<PlanStage> rt,
+ const std::string& ns,
+ YieldPolicy yieldPolicy) {
+ return PlanExecutor::make(
+ opCtx, std::move(ws), std::move(rt), nullptr, nullptr, nullptr, ns, yieldPolicy);
}
// static
-Status PlanExecutor::make(OperationContext* opCtx,
- WorkingSet* ws,
- PlanStage* rt,
- CanonicalQuery* cq,
- const Collection* collection,
- YieldPolicy yieldPolicy,
- PlanExecutor** out) {
- return PlanExecutor::make(opCtx, ws, rt, NULL, cq, collection, "", yieldPolicy, out);
+StatusWith<unique_ptr<PlanExecutor>> PlanExecutor::make(OperationContext* opCtx,
+ unique_ptr<WorkingSet> ws,
+ unique_ptr<PlanStage> rt,
+ unique_ptr<CanonicalQuery> cq,
+ const Collection* collection,
+ YieldPolicy yieldPolicy) {
+ return PlanExecutor::make(
+ opCtx, std::move(ws), std::move(rt), nullptr, std::move(cq), collection, "", yieldPolicy);
}
// static
-Status PlanExecutor::make(OperationContext* opCtx,
- WorkingSet* ws,
- PlanStage* rt,
- QuerySolution* qs,
- CanonicalQuery* cq,
- const Collection* collection,
- YieldPolicy yieldPolicy,
- PlanExecutor** out) {
- return PlanExecutor::make(opCtx, ws, rt, qs, cq, collection, "", yieldPolicy, out);
+StatusWith<unique_ptr<PlanExecutor>> PlanExecutor::make(OperationContext* opCtx,
+ unique_ptr<WorkingSet> ws,
+ unique_ptr<PlanStage> rt,
+ unique_ptr<QuerySolution> qs,
+ unique_ptr<CanonicalQuery> cq,
+ const Collection* collection,
+ YieldPolicy yieldPolicy) {
+ return PlanExecutor::make(opCtx,
+ std::move(ws),
+ std::move(rt),
+ std::move(qs),
+ std::move(cq),
+ collection,
+ "",
+ yieldPolicy);
}
// static
-Status PlanExecutor::make(OperationContext* opCtx,
- WorkingSet* ws,
- PlanStage* rt,
- QuerySolution* qs,
- CanonicalQuery* cq,
- const Collection* collection,
- const std::string& ns,
- YieldPolicy yieldPolicy,
- PlanExecutor** out) {
- std::unique_ptr<PlanExecutor> exec(new PlanExecutor(opCtx, ws, rt, qs, cq, collection, ns));
+StatusWith<unique_ptr<PlanExecutor>> PlanExecutor::make(OperationContext* txn,
+ unique_ptr<WorkingSet> ws,
+ unique_ptr<PlanStage> rt,
+ unique_ptr<QuerySolution> qs,
+ unique_ptr<CanonicalQuery> cq,
+ const Collection* collection,
+ const std::string& ns,
+ YieldPolicy yieldPolicy) {
+ unique_ptr<PlanExecutor> exec(new PlanExecutor(
+ txn, std::move(ws), std::move(rt), std::move(qs), std::move(cq), collection, ns));
// Perform plan selection, if necessary.
Status status = exec->pickBestPlan(yieldPolicy);
@@ -136,23 +145,22 @@ Status PlanExecutor::make(OperationContext* opCtx,
return status;
}
- *out = exec.release();
- return Status::OK();
+ return std::move(exec);
}
PlanExecutor::PlanExecutor(OperationContext* opCtx,
- WorkingSet* ws,
- PlanStage* rt,
- QuerySolution* qs,
- CanonicalQuery* cq,
+ unique_ptr<WorkingSet> ws,
+ unique_ptr<PlanStage> rt,
+ unique_ptr<QuerySolution> qs,
+ unique_ptr<CanonicalQuery> cq,
const Collection* collection,
const std::string& ns)
: _opCtx(opCtx),
_collection(collection),
- _cq(cq),
- _workingSet(ws),
- _qs(qs),
- _root(rt),
+ _cq(std::move(cq)),
+ _workingSet(std::move(ws)),
+ _qs(std::move(qs)),
+ _root(std::move(rt)),
_ns(ns),
_yieldPolicy(new PlanYieldPolicy(this, YIELD_MANUAL)) {
// We may still need to initialize _ns from either _collection or _cq.
@@ -328,7 +336,7 @@ PlanExecutor::ExecState PlanExecutor::getNextSnapshotted(Snapshotted<BSONObj>* o
// to use to pull the record into memory. We take ownership of the RecordFetcher here,
// deleting it after we've had a chance to do the fetch. For timing-based yields, we
// just pass a NULL fetcher.
- std::unique_ptr<RecordFetcher> fetcher;
+ unique_ptr<RecordFetcher> fetcher;
// Incremented on every writeConflict, reset to 0 on any successful call to _root->work.
size_t writeConflictsInARow = 0;
diff --git a/src/mongo/db/query/plan_executor.h b/src/mongo/db/query/plan_executor.h
index 34611eba38f..b0000cee181 100644
--- a/src/mongo/db/query/plan_executor.h
+++ b/src/mongo/db/query/plan_executor.h
@@ -140,48 +140,44 @@ public:
* Right now this is only for idhack updates which neither canonicalize
* nor go through normal planning.
*/
- static Status make(OperationContext* opCtx,
- WorkingSet* ws,
- PlanStage* rt,
- const Collection* collection,
- YieldPolicy yieldPolicy,
- PlanExecutor** out);
+ static StatusWith<std::unique_ptr<PlanExecutor>> make(OperationContext* opCtx,
+ std::unique_ptr<WorkingSet> ws,
+ std::unique_ptr<PlanStage> rt,
+ const Collection* collection,
+ YieldPolicy yieldPolicy);
/**
* Used when we have a NULL collection and no canonical query. In this case,
* we need to explicitly pass a namespace to the plan executor.
*/
- static Status make(OperationContext* opCtx,
- WorkingSet* ws,
- PlanStage* rt,
- const std::string& ns,
- YieldPolicy yieldPolicy,
- PlanExecutor** out);
+ static StatusWith<std::unique_ptr<PlanExecutor>> make(OperationContext* opCtx,
+ std::unique_ptr<WorkingSet> ws,
+ std::unique_ptr<PlanStage> rt,
+ const std::string& ns,
+ YieldPolicy yieldPolicy);
/**
* Used when there is a canonical query but no query solution (e.g. idhack
* queries, queries against a NULL collection, queries using the subplan stage).
*/
- static Status make(OperationContext* opCtx,
- WorkingSet* ws,
- PlanStage* rt,
- CanonicalQuery* cq,
- const Collection* collection,
- YieldPolicy yieldPolicy,
- PlanExecutor** out);
+ static StatusWith<std::unique_ptr<PlanExecutor>> make(OperationContext* opCtx,
+ std::unique_ptr<WorkingSet> ws,
+ std::unique_ptr<PlanStage> rt,
+ std::unique_ptr<CanonicalQuery> cq,
+ const Collection* collection,
+ YieldPolicy yieldPolicy);
/**
- * The constructor for the normal case, when you have both a canonical query
+ * The constructor for the normal case, when you have a collection, a canonical query,
* and a query solution.
*/
- static Status make(OperationContext* opCtx,
- WorkingSet* ws,
- PlanStage* rt,
- QuerySolution* qs,
- CanonicalQuery* cq,
- const Collection* collection,
- YieldPolicy yieldPolicy,
- PlanExecutor** out);
+ static StatusWith<std::unique_ptr<PlanExecutor>> make(OperationContext* opCtx,
+ std::unique_ptr<WorkingSet> ws,
+ std::unique_ptr<PlanStage> rt,
+ std::unique_ptr<QuerySolution> qs,
+ std::unique_ptr<CanonicalQuery> cq,
+ const Collection* collection,
+ YieldPolicy yieldPolicy);
~PlanExecutor();
@@ -377,25 +373,24 @@ private:
* New PlanExecutor instances are created with the static make() methods above.
*/
PlanExecutor(OperationContext* opCtx,
- WorkingSet* ws,
- PlanStage* rt,
- QuerySolution* qs,
- CanonicalQuery* cq,
+ std::unique_ptr<WorkingSet> ws,
+ std::unique_ptr<PlanStage> rt,
+ std::unique_ptr<QuerySolution> qs,
+ std::unique_ptr<CanonicalQuery> cq,
const Collection* collection,
const std::string& ns);
/**
* Public factory methods delegate to this private factory to do their work.
*/
- static Status make(OperationContext* opCtx,
- WorkingSet* ws,
- PlanStage* rt,
- QuerySolution* qs,
- CanonicalQuery* cq,
- const Collection* collection,
- const std::string& ns,
- YieldPolicy yieldPolicy,
- PlanExecutor** out);
+ static StatusWith<std::unique_ptr<PlanExecutor>> make(OperationContext* txn,
+ std::unique_ptr<WorkingSet> ws,
+ std::unique_ptr<PlanStage> rt,
+ std::unique_ptr<QuerySolution> qs,
+ std::unique_ptr<CanonicalQuery> cq,
+ const Collection* collection,
+ const std::string& ns,
+ YieldPolicy yieldPolicy);
/**
* Clients of PlanExecutor expect that on receiving a new instance from one of the make()
diff --git a/src/mongo/dbtests/documentsourcetests.cpp b/src/mongo/dbtests/documentsourcetests.cpp
index 971acb64317..f9f644cc03b 100644
--- a/src/mongo/dbtests/documentsourcetests.cpp
+++ b/src/mongo/dbtests/documentsourcetests.cpp
@@ -182,11 +182,9 @@ protected:
auto statusWithCQ = CanonicalQuery::canonicalize(ns, /*query=*/BSONObj());
uassertStatusOK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
- PlanExecutor* execBare;
- uassertStatusOK(getExecutor(
- &_opCtx, ctx.getCollection(), cq.release(), PlanExecutor::YIELD_MANUAL, &execBare));
+ _exec = uassertStatusOK(
+ getExecutor(&_opCtx, ctx.getCollection(), std::move(cq), PlanExecutor::YIELD_MANUAL));
- _exec.reset(execBare);
_exec->saveState();
_exec->registerExec();
diff --git a/src/mongo/dbtests/executor_registry.cpp b/src/mongo/dbtests/executor_registry.cpp
index 3601ecc80e0..dc02f95c58f 100644
--- a/src/mongo/dbtests/executor_registry.cpp
+++ b/src/mongo/dbtests/executor_registry.cpp
@@ -76,17 +76,15 @@ public:
ASSERT_OK(statusWithCQ.getStatus());
std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
- PlanExecutor* exec;
// Takes ownership of 'ws', 'scan', and 'cq'.
- Status status = PlanExecutor::make(&_opCtx,
- ws.release(),
- scan.release(),
- cq.release(),
- _ctx->db()->getCollection(ns()),
- PlanExecutor::YIELD_MANUAL,
- &exec);
- ASSERT_OK(status);
- return exec;
+ auto statusWithPlanExecutor = PlanExecutor::make(&_opCtx,
+ std::move(ws),
+ std::move(scan),
+ std::move(cq),
+ _ctx->db()->getCollection(ns()),
+ PlanExecutor::YIELD_MANUAL);
+ ASSERT_OK(statusWithPlanExecutor.getStatus());
+ return statusWithPlanExecutor.getValue().release();
}
void registerExecutor(PlanExecutor* exec) {
diff --git a/src/mongo/dbtests/query_multi_plan_runner.cpp b/src/mongo/dbtests/query_multi_plan_runner.cpp
index 02e66609dc2..bd9efe25fd0 100644
--- a/src/mongo/dbtests/query_multi_plan_runner.cpp
+++ b/src/mongo/dbtests/query_multi_plan_runner.cpp
@@ -46,6 +46,7 @@
#include "mongo/db/query/query_planner_test_lib.h"
#include "mongo/db/query/stage_builder.h"
#include "mongo/dbtests/dbtests.h"
+#include "mongo/stdx/memory.h"
namespace mongo {
@@ -58,6 +59,7 @@ namespace QueryMultiPlanRunner {
using std::unique_ptr;
using std::vector;
+using stdx::make_unique;
/**
* Create query solution.
@@ -157,7 +159,8 @@ public:
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
verify(NULL != cq.get());
- MultiPlanStage* mps = new MultiPlanStage(&_txn, ctx.getCollection(), cq.get());
+ unique_ptr<MultiPlanStage> mps =
+ make_unique<MultiPlanStage>(&_txn, ctx.getCollection(), cq.get());
mps->addPlan(createQuerySolution(), firstRoot.release(), sharedWs.get());
mps->addPlan(createQuerySolution(), secondRoot.release(), sharedWs.get());
@@ -168,16 +171,14 @@ public:
ASSERT_EQUALS(0, mps->bestPlanIdx());
// Takes ownership of arguments other than 'collection'.
- PlanExecutor* rawExec;
- Status status = PlanExecutor::make(&_txn,
- sharedWs.release(),
- mps,
- cq.release(),
- coll,
- PlanExecutor::YIELD_MANUAL,
- &rawExec);
- ASSERT_OK(status);
- unique_ptr<PlanExecutor> exec(rawExec);
+ auto statusWithPlanExecutor = PlanExecutor::make(&_txn,
+ std::move(sharedWs),
+ std::move(mps),
+ std::move(cq),
+ coll,
+ PlanExecutor::YIELD_MANUAL);
+ ASSERT_OK(statusWithPlanExecutor.getStatus());
+ std::unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
// Get all our results out.
int results = 0;
diff --git a/src/mongo/dbtests/query_plan_executor.cpp b/src/mongo/dbtests/query_plan_executor.cpp
index 7cbd86ab9cb..e24638ee5c7 100644
--- a/src/mongo/dbtests/query_plan_executor.cpp
+++ b/src/mongo/dbtests/query_plan_executor.cpp
@@ -103,17 +103,11 @@ public:
// Make the stage.
unique_ptr<PlanStage> root(new CollectionScan(&_txn, csparams, ws.get(), cq.get()->root()));
- PlanExecutor* exec;
// Hand the plan off to the executor.
- Status stat = PlanExecutor::make(&_txn,
- ws.release(),
- root.release(),
- cq.release(),
- coll,
- PlanExecutor::YIELD_MANUAL,
- &exec);
- ASSERT_OK(stat);
- return exec;
+ auto statusWithPlanExecutor = PlanExecutor::make(
+ &_txn, std::move(ws), std::move(root), std::move(cq), coll, PlanExecutor::YIELD_MANUAL);
+ ASSERT_OK(statusWithPlanExecutor.getStatus());
+ return statusWithPlanExecutor.getValue().release();
}
/**
@@ -150,17 +144,11 @@ public:
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
verify(NULL != cq.get());
- PlanExecutor* exec;
// Hand the plan off to the executor.
- Status stat = PlanExecutor::make(&_txn,
- ws.release(),
- root.release(),
- cq.release(),
- coll,
- PlanExecutor::YIELD_MANUAL,
- &exec);
- ASSERT_OK(stat);
- return exec;
+ auto statusWithPlanExecutor = PlanExecutor::make(
+ &_txn, std::move(ws), std::move(root), std::move(cq), coll, PlanExecutor::YIELD_MANUAL);
+ ASSERT_OK(statusWithPlanExecutor.getStatus());
+ return statusWithPlanExecutor.getValue().release();
}
static const char* ns() {
@@ -296,11 +284,10 @@ public:
new PipelineProxyStage(pipeline, innerExec, ws.get()));
Collection* collection = ctx.getCollection();
- PlanExecutor* rawExec;
- Status status = PlanExecutor::make(
- &_txn, ws.release(), proxy.release(), collection, PlanExecutor::YIELD_MANUAL, &rawExec);
- ASSERT_OK(status);
- std::unique_ptr<PlanExecutor> outerExec(rawExec);
+ auto statusWithPlanExecutor = PlanExecutor::make(
+ &_txn, std::move(ws), std::move(proxy), collection, PlanExecutor::YIELD_MANUAL);
+ ASSERT_OK(statusWithPlanExecutor.getStatus());
+ std::unique_ptr<PlanExecutor> outerExec = std::move(statusWithPlanExecutor.getValue());
// Only the outer executor gets registered.
registerExec(outerExec.get());
diff --git a/src/mongo/dbtests/query_stage_collscan.cpp b/src/mongo/dbtests/query_stage_collscan.cpp
index e7af7378099..84593bc62f0 100644
--- a/src/mongo/dbtests/query_stage_collscan.cpp
+++ b/src/mongo/dbtests/query_stage_collscan.cpp
@@ -44,12 +44,14 @@
#include "mongo/db/query/plan_executor.h"
#include "mongo/db/storage/record_store.h"
#include "mongo/dbtests/dbtests.h"
+#include "mongo/stdx/memory.h"
#include "mongo/util/fail_point_service.h"
namespace QueryStageCollectionScan {
using std::unique_ptr;
using std::vector;
+using stdx::make_unique;
//
// Stage-specific tests.
@@ -91,14 +93,14 @@ public:
unique_ptr<MatchExpression> filterExpr(swme.getValue());
// Make a scan and have the runner own it.
- WorkingSet* ws = new WorkingSet();
- PlanStage* ps = new CollectionScan(&_txn, params, ws, filterExpr.get());
+ unique_ptr<WorkingSet> ws = make_unique<WorkingSet>();
+ unique_ptr<PlanStage> ps =
+ make_unique<CollectionScan>(&_txn, params, ws.get(), filterExpr.get());
- PlanExecutor* rawExec;
- Status status = PlanExecutor::make(
- &_txn, ws, ps, params.collection, PlanExecutor::YIELD_MANUAL, &rawExec);
- ASSERT_OK(status);
- std::unique_ptr<PlanExecutor> exec(rawExec);
+ auto statusWithPlanExecutor = PlanExecutor::make(
+ &_txn, std::move(ws), std::move(ps), params.collection, PlanExecutor::YIELD_MANUAL);
+ ASSERT_OK(statusWithPlanExecutor.getStatus());
+ unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
// Use the runner to count the number of objects scanned.
int count = 0;
@@ -207,14 +209,13 @@ public:
params.tailable = false;
// Make a scan and have the runner own it.
- WorkingSet* ws = new WorkingSet();
- PlanStage* ps = new CollectionScan(&_txn, params, ws, NULL);
+ unique_ptr<WorkingSet> ws = make_unique<WorkingSet>();
+ unique_ptr<PlanStage> ps = make_unique<CollectionScan>(&_txn, params, ws.get(), nullptr);
- PlanExecutor* rawExec;
- Status status = PlanExecutor::make(
- &_txn, ws, ps, params.collection, PlanExecutor::YIELD_MANUAL, &rawExec);
- ASSERT_OK(status);
- std::unique_ptr<PlanExecutor> exec(rawExec);
+ auto statusWithPlanExecutor = PlanExecutor::make(
+ &_txn, std::move(ws), std::move(ps), params.collection, PlanExecutor::YIELD_MANUAL);
+ ASSERT_OK(statusWithPlanExecutor.getStatus());
+ unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
int count = 0;
for (BSONObj obj; PlanExecutor::ADVANCED == exec->getNext(&obj, NULL);) {
@@ -241,14 +242,13 @@ public:
params.direction = CollectionScanParams::BACKWARD;
params.tailable = false;
- WorkingSet* ws = new WorkingSet();
- PlanStage* ps = new CollectionScan(&_txn, params, ws, NULL);
+ unique_ptr<WorkingSet> ws = make_unique<WorkingSet>();
+ unique_ptr<PlanStage> ps = make_unique<CollectionScan>(&_txn, params, ws.get(), nullptr);
- PlanExecutor* rawExec;
- Status status = PlanExecutor::make(
- &_txn, ws, ps, params.collection, PlanExecutor::YIELD_MANUAL, &rawExec);
- ASSERT_OK(status);
- std::unique_ptr<PlanExecutor> exec(rawExec);
+ auto statusWithPlanExecutor = PlanExecutor::make(
+ &_txn, std::move(ws), std::move(ps), params.collection, PlanExecutor::YIELD_MANUAL);
+ ASSERT_OK(statusWithPlanExecutor.getStatus());
+ unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
int count = 0;
for (BSONObj obj; PlanExecutor::ADVANCED == exec->getNext(&obj, NULL);) {
diff --git a/src/mongo/dbtests/query_stage_merge_sort.cpp b/src/mongo/dbtests/query_stage_merge_sort.cpp
index 884ec1bda55..55f27b3059e 100644
--- a/src/mongo/dbtests/query_stage_merge_sort.cpp
+++ b/src/mongo/dbtests/query_stage_merge_sort.cpp
@@ -39,6 +39,7 @@
#include "mongo/db/operation_context_impl.h"
#include "mongo/db/query/plan_executor.h"
#include "mongo/dbtests/dbtests.h"
+#include "mongo/stdx/memory.h"
/**
* This file tests db/exec/merge_sort.cpp
@@ -46,9 +47,10 @@
namespace QueryStageMergeSortTests {
-using std::unique_ptr;
using std::set;
using std::string;
+using std::unique_ptr;
+using stdx::make_unique;
class QueryStageMergeSortTestBase {
public:
@@ -134,11 +136,11 @@ public:
addIndex(firstIndex);
addIndex(secondIndex);
- WorkingSet* ws = new WorkingSet();
+ unique_ptr<WorkingSet> ws = make_unique<WorkingSet>();
// Sort by c:1
MergeSortStageParams msparams;
msparams.pattern = BSON("c" << 1);
- MergeSortStage* ms = new MergeSortStage(msparams, ws, coll);
+ MergeSortStage* ms = new MergeSortStage(msparams, ws.get(), coll);
// a:1
IndexScanParams params;
@@ -148,22 +150,19 @@ public:
params.bounds.endKey = objWithMaxKey(1);
params.bounds.endKeyInclusive = true;
params.direction = 1;
- ms->addChild(new IndexScan(&_txn, params, ws, NULL));
+ ms->addChild(new IndexScan(&_txn, params, ws.get(), NULL));
// b:1
params.descriptor = getIndex(secondIndex, coll);
- ms->addChild(new IndexScan(&_txn, params, ws, NULL));
+ ms->addChild(new IndexScan(&_txn, params, ws.get(), NULL));
+ unique_ptr<FetchStage> fetchStage =
+ make_unique<FetchStage>(&_txn, ws.get(), ms, nullptr, coll);
// Must fetch if we want to easily pull out an obj.
- PlanExecutor* rawExec;
- Status status = PlanExecutor::make(&_txn,
- ws,
- new FetchStage(&_txn, ws, ms, NULL, coll),
- coll,
- PlanExecutor::YIELD_MANUAL,
- &rawExec);
- ASSERT_OK(status);
- std::unique_ptr<PlanExecutor> exec(rawExec);
+ auto statusWithPlanExecutor = PlanExecutor::make(
+ &_txn, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL);
+ ASSERT_OK(statusWithPlanExecutor.getStatus());
+ unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
for (int i = 0; i < N; ++i) {
BSONObj first, second;
@@ -207,11 +206,11 @@ public:
addIndex(firstIndex);
addIndex(secondIndex);
- WorkingSet* ws = new WorkingSet();
+ unique_ptr<WorkingSet> ws = make_unique<WorkingSet>();
// Sort by c:1
MergeSortStageParams msparams;
msparams.pattern = BSON("c" << 1);
- MergeSortStage* ms = new MergeSortStage(msparams, ws, coll);
+ MergeSortStage* ms = new MergeSortStage(msparams, ws.get(), coll);
// a:1
IndexScanParams params;
@@ -221,21 +220,18 @@ public:
params.bounds.endKey = objWithMaxKey(1);
params.bounds.endKeyInclusive = true;
params.direction = 1;
- ms->addChild(new IndexScan(&_txn, params, ws, NULL));
+ ms->addChild(new IndexScan(&_txn, params, ws.get(), NULL));
// b:1
params.descriptor = getIndex(secondIndex, coll);
- ms->addChild(new IndexScan(&_txn, params, ws, NULL));
-
- PlanExecutor* rawExec;
- Status status = PlanExecutor::make(&_txn,
- ws,
- new FetchStage(&_txn, ws, ms, NULL, coll),
- coll,
- PlanExecutor::YIELD_MANUAL,
- &rawExec);
- ASSERT_OK(status);
- std::unique_ptr<PlanExecutor> exec(rawExec);
+ ms->addChild(new IndexScan(&_txn, params, ws.get(), NULL));
+ unique_ptr<FetchStage> fetchStage =
+ make_unique<FetchStage>(&_txn, ws.get(), ms, nullptr, coll);
+
+ auto statusWithPlanExecutor = PlanExecutor::make(
+ &_txn, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL);
+ ASSERT_OK(statusWithPlanExecutor.getStatus());
+ unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
for (int i = 0; i < N; ++i) {
BSONObj first, second;
@@ -278,12 +274,12 @@ public:
addIndex(firstIndex);
addIndex(secondIndex);
- WorkingSet* ws = new WorkingSet();
+ unique_ptr<WorkingSet> ws = make_unique<WorkingSet>();
// Sort by c:1
MergeSortStageParams msparams;
msparams.dedup = false;
msparams.pattern = BSON("c" << 1);
- MergeSortStage* ms = new MergeSortStage(msparams, ws, coll);
+ MergeSortStage* ms = new MergeSortStage(msparams, ws.get(), coll);
// a:1
IndexScanParams params;
@@ -293,21 +289,18 @@ public:
params.bounds.endKey = objWithMaxKey(1);
params.bounds.endKeyInclusive = true;
params.direction = 1;
- ms->addChild(new IndexScan(&_txn, params, ws, NULL));
+ ms->addChild(new IndexScan(&_txn, params, ws.get(), NULL));
// b:1
params.descriptor = getIndex(secondIndex, coll);
- ms->addChild(new IndexScan(&_txn, params, ws, NULL));
-
- PlanExecutor* rawExec;
- Status status = PlanExecutor::make(&_txn,
- ws,
- new FetchStage(&_txn, ws, ms, NULL, coll),
- coll,
- PlanExecutor::YIELD_MANUAL,
- &rawExec);
- ASSERT_OK(status);
- std::unique_ptr<PlanExecutor> exec(rawExec);
+ ms->addChild(new IndexScan(&_txn, params, ws.get(), NULL));
+ unique_ptr<FetchStage> fetchStage =
+ make_unique<FetchStage>(&_txn, ws.get(), ms, nullptr, coll);
+
+ auto statusWithPlanExecutor = PlanExecutor::make(
+ &_txn, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL);
+ ASSERT_OK(statusWithPlanExecutor.getStatus());
+ unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
for (int i = 0; i < N; ++i) {
BSONObj first, second;
@@ -353,11 +346,11 @@ public:
addIndex(firstIndex);
addIndex(secondIndex);
- WorkingSet* ws = new WorkingSet();
+ unique_ptr<WorkingSet> ws = make_unique<WorkingSet>();
// Sort by c:-1
MergeSortStageParams msparams;
msparams.pattern = BSON("c" << -1);
- MergeSortStage* ms = new MergeSortStage(msparams, ws, coll);
+ MergeSortStage* ms = new MergeSortStage(msparams, ws.get(), coll);
// a:1
IndexScanParams params;
@@ -368,21 +361,18 @@ public:
params.bounds.endKeyInclusive = true;
// This is the direction along the index.
params.direction = 1;
- ms->addChild(new IndexScan(&_txn, params, ws, NULL));
+ ms->addChild(new IndexScan(&_txn, params, ws.get(), NULL));
// b:1
params.descriptor = getIndex(secondIndex, coll);
- ms->addChild(new IndexScan(&_txn, params, ws, NULL));
-
- PlanExecutor* rawExec;
- Status status = PlanExecutor::make(&_txn,
- ws,
- new FetchStage(&_txn, ws, ms, NULL, coll),
- coll,
- PlanExecutor::YIELD_MANUAL,
- &rawExec);
- ASSERT_OK(status);
- std::unique_ptr<PlanExecutor> exec(rawExec);
+ ms->addChild(new IndexScan(&_txn, params, ws.get(), NULL));
+ unique_ptr<FetchStage> fetchStage =
+ make_unique<FetchStage>(&_txn, ws.get(), ms, nullptr, coll);
+
+ auto statusWithPlanExecutor = PlanExecutor::make(
+ &_txn, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL);
+ ASSERT_OK(statusWithPlanExecutor.getStatus());
+ unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
for (int i = 0; i < N; ++i) {
BSONObj first, second;
@@ -426,11 +416,11 @@ public:
addIndex(firstIndex);
addIndex(secondIndex);
- WorkingSet* ws = new WorkingSet();
+ unique_ptr<WorkingSet> ws = make_unique<WorkingSet>();
// Sort by c:1
MergeSortStageParams msparams;
msparams.pattern = BSON("c" << 1);
- MergeSortStage* ms = new MergeSortStage(msparams, ws, coll);
+ MergeSortStage* ms = new MergeSortStage(msparams, ws.get(), coll);
// a:1
IndexScanParams params;
@@ -440,23 +430,20 @@ public:
params.bounds.endKey = objWithMaxKey(1);
params.bounds.endKeyInclusive = true;
params.direction = 1;
- ms->addChild(new IndexScan(&_txn, params, ws, NULL));
+ ms->addChild(new IndexScan(&_txn, params, ws.get(), NULL));
// b:51 (EOF)
params.descriptor = getIndex(secondIndex, coll);
params.bounds.startKey = BSON("" << 51 << "" << MinKey);
params.bounds.endKey = BSON("" << 51 << "" << MaxKey);
- ms->addChild(new IndexScan(&_txn, params, ws, NULL));
-
- PlanExecutor* rawExec;
- Status status = PlanExecutor::make(&_txn,
- ws,
- new FetchStage(&_txn, ws, ms, NULL, coll),
- coll,
- PlanExecutor::YIELD_MANUAL,
- &rawExec);
- ASSERT_OK(status);
- std::unique_ptr<PlanExecutor> exec(rawExec);
+ ms->addChild(new IndexScan(&_txn, params, ws.get(), NULL));
+ unique_ptr<FetchStage> fetchStage =
+ make_unique<FetchStage>(&_txn, ws.get(), ms, nullptr, coll);
+
+ auto statusWithPlanExecutor = PlanExecutor::make(
+ &_txn, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL);
+ ASSERT_OK(statusWithPlanExecutor.getStatus());
+ unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
// Only getting results from the a:1 index scan.
for (int i = 0; i < N; ++i) {
@@ -485,11 +472,11 @@ public:
wuow.commit();
}
- WorkingSet* ws = new WorkingSet();
+ unique_ptr<WorkingSet> ws = make_unique<WorkingSet>();
// Sort by foo:1
MergeSortStageParams msparams;
msparams.pattern = BSON("foo" << 1);
- MergeSortStage* ms = new MergeSortStage(msparams, ws, coll);
+ MergeSortStage* ms = new MergeSortStage(msparams, ws.get(), coll);
IndexScanParams params;
params.bounds.isSimpleRange = true;
@@ -507,18 +494,15 @@ public:
BSONObj indexSpec = BSON(index << 1 << "foo" << 1);
addIndex(indexSpec);
params.descriptor = getIndex(indexSpec, coll);
- ms->addChild(new IndexScan(&_txn, params, ws, NULL));
+ ms->addChild(new IndexScan(&_txn, params, ws.get(), NULL));
}
+ unique_ptr<FetchStage> fetchStage =
+ make_unique<FetchStage>(&_txn, ws.get(), ms, nullptr, coll);
- PlanExecutor* rawExec;
- Status status = PlanExecutor::make(&_txn,
- ws,
- new FetchStage(&_txn, ws, ms, NULL, coll),
- coll,
- PlanExecutor::YIELD_MANUAL,
- &rawExec);
- ASSERT_OK(status);
- std::unique_ptr<PlanExecutor> exec(rawExec);
+ auto statusWithPlanExecutor = PlanExecutor::make(
+ &_txn, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL);
+ ASSERT_OK(statusWithPlanExecutor.getStatus());
+ unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
for (int i = 0; i < numIndices; ++i) {
BSONObj obj;
diff --git a/src/mongo/dbtests/query_stage_sort.cpp b/src/mongo/dbtests/query_stage_sort.cpp
index c2b30c1d084..0de6c1a3d9e 100644
--- a/src/mongo/dbtests/query_stage_sort.cpp
+++ b/src/mongo/dbtests/query_stage_sort.cpp
@@ -39,6 +39,7 @@
#include "mongo/db/operation_context_impl.h"
#include "mongo/db/query/plan_executor.h"
#include "mongo/dbtests/dbtests.h"
+#include "mongo/stdx/memory.h"
/**
* This file tests db/exec/sort.cpp
@@ -46,8 +47,9 @@
namespace QueryStageSortTests {
-using std::unique_ptr;
using std::set;
+using std::unique_ptr;
+using stdx::make_unique;
class QueryStageSortTestBase {
public:
@@ -100,9 +102,8 @@ public:
* which is owned by the caller.
*/
PlanExecutor* makePlanExecutorWithSortStage(Collection* coll) {
- PlanExecutor* exec;
// Build the mock scan stage which feeds the data.
- std::unique_ptr<WorkingSet> ws(new WorkingSet());
+ unique_ptr<WorkingSet> ws(new WorkingSet());
unique_ptr<QueuedDataStage> ms(new QueuedDataStage(ws.get()));
insertVarietyOfObjects(ms.get(), coll);
@@ -114,10 +115,10 @@ public:
// The PlanExecutor will be automatically registered on construction due to the auto
// yield policy, so it can receive invalidations when we remove documents later.
- Status execStatus = PlanExecutor::make(
- &_txn, ws.release(), ss.release(), coll, PlanExecutor::YIELD_AUTO, &exec);
- invariant(execStatus.isOK());
- return exec;
+ auto statusWithPlanExecutor =
+ PlanExecutor::make(&_txn, std::move(ws), std::move(ss), coll, PlanExecutor::YIELD_AUTO);
+ invariant(statusWithPlanExecutor.isOK());
+ return statusWithPlanExecutor.getValue().release();
}
// Return a value in the set {-1, 0, 1} to represent the sign of parameter i. Used to
@@ -135,8 +136,8 @@ public:
* If limit is not zero, we limit the output of the sort stage to 'limit' results.
*/
void sortAndCheck(int direction, Collection* coll) {
- WorkingSet* ws = new WorkingSet();
- QueuedDataStage* ms = new QueuedDataStage(ws);
+ unique_ptr<WorkingSet> ws = make_unique<WorkingSet>();
+ QueuedDataStage* ms = new QueuedDataStage(ws.get());
// Insert a mix of the various types of data.
insertVarietyOfObjects(ms, coll);
@@ -146,17 +147,14 @@ public:
params.pattern = BSON("foo" << direction);
params.limit = limit();
+ unique_ptr<FetchStage> fetchStage = make_unique<FetchStage>(
+ &_txn, ws.get(), new SortStage(params, ws.get(), ms), nullptr, coll);
+
// Must fetch so we can look at the doc as a BSONObj.
- PlanExecutor* rawExec;
- Status status =
- PlanExecutor::make(&_txn,
- ws,
- new FetchStage(&_txn, ws, new SortStage(params, ws, ms), NULL, coll),
- coll,
- PlanExecutor::YIELD_MANUAL,
- &rawExec);
- ASSERT_OK(status);
- std::unique_ptr<PlanExecutor> exec(rawExec);
+ auto statusWithPlanExecutor = PlanExecutor::make(
+ &_txn, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL);
+ ASSERT_OK(statusWithPlanExecutor.getStatus());
+ unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
// Look at pairs of objects to make sure that the sort order is pairwise (and therefore
// totally) correct.
@@ -316,7 +314,7 @@ public:
set<RecordId> locs;
getLocs(&locs, coll);
- std::unique_ptr<PlanExecutor> exec(makePlanExecutorWithSortStage(coll));
+ unique_ptr<PlanExecutor> exec(makePlanExecutorWithSortStage(coll));
SortStage* ss = static_cast<SortStage*>(exec->getRootStage());
QueuedDataStage* ms = static_cast<QueuedDataStage*>(ss->getChildren()[0]);
@@ -425,7 +423,7 @@ public:
set<RecordId> locs;
getLocs(&locs, coll);
- std::unique_ptr<PlanExecutor> exec(makePlanExecutorWithSortStage(coll));
+ unique_ptr<PlanExecutor> exec(makePlanExecutorWithSortStage(coll));
SortStage* ss = static_cast<SortStage*>(exec->getRootStage());
QueuedDataStage* ms = static_cast<QueuedDataStage*>(ss->getChildren()[0]);
@@ -514,8 +512,8 @@ public:
wuow.commit();
}
- WorkingSet* ws = new WorkingSet();
- QueuedDataStage* ms = new QueuedDataStage(ws);
+ unique_ptr<WorkingSet> ws = make_unique<WorkingSet>();
+ QueuedDataStage* ms = new QueuedDataStage(ws.get());
for (int i = 0; i < numObj(); ++i) {
WorkingSetMember member;
@@ -534,16 +532,12 @@ public:
params.pattern = BSON("b" << -1 << "c" << 1 << "a" << 1);
params.limit = 0;
+ unique_ptr<FetchStage> fetchStage = make_unique<FetchStage>(
+ &_txn, ws.get(), new SortStage(params, ws.get(), ms), nullptr, coll);
// We don't get results back since we're sorting some parallel arrays.
- PlanExecutor* rawExec;
- Status status =
- PlanExecutor::make(&_txn,
- ws,
- new FetchStage(&_txn, ws, new SortStage(params, ws, ms), NULL, coll),
- coll,
- PlanExecutor::YIELD_MANUAL,
- &rawExec);
- std::unique_ptr<PlanExecutor> exec(rawExec);
+ auto statusWithPlanExecutor = PlanExecutor::make(
+ &_txn, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL);
+ unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
PlanExecutor::ExecState runnerState = exec->getNext(NULL, NULL);
ASSERT_EQUALS(PlanExecutor::FAILURE, runnerState);
diff --git a/src/mongo/dbtests/query_stage_tests.cpp b/src/mongo/dbtests/query_stage_tests.cpp
index 2f7d59b3aa6..73dc9e9f23e 100644
--- a/src/mongo/dbtests/query_stage_tests.cpp
+++ b/src/mongo/dbtests/query_stage_tests.cpp
@@ -38,6 +38,7 @@
#include "mongo/db/operation_context_impl.h"
#include "mongo/db/query/plan_executor.h"
#include "mongo/dbtests/dbtests.h"
+#include "mongo/stdx/memory.h"
/**
* This file tests db/exec/index_scan.cpp
@@ -80,17 +81,14 @@ public:
verify(swme.isOK());
unique_ptr<MatchExpression> filterExpr(swme.getValue());
- WorkingSet* ws = new WorkingSet();
-
- PlanExecutor* rawExec;
- Status status = PlanExecutor::make(&_txn,
- ws,
- new IndexScan(&_txn, params, ws, filterExpr.get()),
- ctx.getCollection(),
- PlanExecutor::YIELD_MANUAL,
- &rawExec);
- ASSERT_OK(status);
- std::unique_ptr<PlanExecutor> exec(rawExec);
+ unique_ptr<WorkingSet> ws = stdx::make_unique<WorkingSet>();
+ unique_ptr<IndexScan> ix =
+ stdx::make_unique<IndexScan>(&_txn, params, ws.get(), filterExpr.get());
+
+ auto statusWithPlanExecutor = PlanExecutor::make(
+ &_txn, std::move(ws), std::move(ix), ctx.getCollection(), PlanExecutor::YIELD_MANUAL);
+ ASSERT_OK(statusWithPlanExecutor.getStatus());
+ unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
int count = 0;
for (RecordId dl; PlanExecutor::ADVANCED == exec->getNext(NULL, &dl);) {
diff --git a/src/mongo/s/d_migrate.cpp b/src/mongo/s/d_migrate.cpp
index 692a0890b99..84b9374a58a 100644
--- a/src/mongo/s/d_migrate.cpp
+++ b/src/mongo/s/d_migrate.cpp
@@ -81,6 +81,7 @@
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/grid.h"
#include "mongo/s/shard_key_pattern.h"
+#include "mongo/stdx/memory.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/elapsed_tracker.h"
#include "mongo/util/exit.h"
@@ -491,15 +492,15 @@ public:
std::lock_guard<std::mutex> sl(_mutex);
invariant(_deleteNotifyExec.get() == NULL);
- WorkingSet* ws = new WorkingSet();
- DeleteNotificationStage* dns = new DeleteNotificationStage();
- PlanExecutor* deleteNotifyExec;
+ std::unique_ptr<WorkingSet> ws = stdx::make_unique<WorkingSet>();
+ std::unique_ptr<DeleteNotificationStage> dns =
+ stdx::make_unique<DeleteNotificationStage>();
// Takes ownership of 'ws' and 'dns'.
- Status execStatus = PlanExecutor::make(
- txn, ws, dns, collection, PlanExecutor::YIELD_MANUAL, &deleteNotifyExec);
- invariant(execStatus.isOK());
- deleteNotifyExec->registerExec();
- _deleteNotifyExec.reset(deleteNotifyExec);
+ auto statusWithPlanExecutor = PlanExecutor::make(
+ txn, std::move(ws), std::move(dns), collection, PlanExecutor::YIELD_MANUAL);
+ invariant(statusWithPlanExecutor.isOK());
+ _deleteNotifyExec = std::move(statusWithPlanExecutor.getValue());
+ _deleteNotifyExec->registerExec();
min = Helpers::toKeyFormat(kp.extendRangeBound(_min, false));
max = Helpers::toKeyFormat(kp.extendRangeBound(_max, false));
@@ -1661,7 +1662,6 @@ public:
//
// if the commit made it to the config, we'll see the chunk in the new shard and
// there's no action
- //
// if the commit did not make it, currently the only way to fix this state is to
// bounce the mongod so that the old state (before migrating) be brought in
diff --git a/src/mongo/s/d_split.cpp b/src/mongo/s/d_split.cpp
index 5501cd3795d..13a0e93ec8e 100644
--- a/src/mongo/s/d_split.cpp
+++ b/src/mongo/s/d_split.cpp
@@ -487,8 +487,8 @@ public:
log() << "splitVector doing another cycle because of force, keyCount now: "
<< keyCount << endl;
- exec.reset(InternalPlanner::indexScan(
- txn, collection, idx, min, max, false, InternalPlanner::FORWARD));
+ exec = InternalPlanner::indexScan(
+ txn, collection, idx, min, max, false, InternalPlanner::FORWARD);
exec->setYieldPolicy(PlanExecutor::YIELD_AUTO);
state = exec->getNext(&currKey, NULL);