summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--jstests/core/expr.js16
-rw-r--r--jstests/noPassthrough/commands_preserve_exec_error_code.js28
-rw-r--r--jstests/serial_run/memory.js2
-rw-r--r--jstests/sharding/error_propagation.js3
-rw-r--r--src/mongo/db/SConscript1
-rw-r--r--src/mongo/db/catalog/capped_utils.cpp5
-rw-r--r--src/mongo/db/catalog/multi_index_block.cpp56
-rw-r--r--src/mongo/db/commands/count_cmd.cpp3
-rw-r--r--src/mongo/db/commands/dbcommands.cpp36
-rw-r--r--src/mongo/db/commands/dbcommands_d.cpp117
-rw-r--r--src/mongo/db/commands/dbhash.cpp24
-rw-r--r--src/mongo/db/commands/distinct.cpp72
-rw-r--r--src/mongo/db/commands/find_and_modify.cpp37
-rw-r--r--src/mongo/db/commands/find_cmd.cpp44
-rw-r--r--src/mongo/db/commands/getmore_cmd.cpp80
-rw-r--r--src/mongo/db/commands/run_aggregate.cpp31
-rw-r--r--src/mongo/db/dbhelpers.cpp3
-rw-r--r--src/mongo/db/exec/and_hash.cpp28
-rw-r--r--src/mongo/db/exec/and_sorted.cpp21
-rw-r--r--src/mongo/db/exec/cached_plan.cpp43
-rw-r--r--src/mongo/db/exec/collection_scan.cpp35
-rw-r--r--src/mongo/db/exec/collection_scan.h4
-rw-r--r--src/mongo/db/exec/count.cpp6
-rw-r--r--src/mongo/db/exec/delete.cpp7
-rw-r--r--src/mongo/db/exec/fetch.cpp6
-rw-r--r--src/mongo/db/exec/geo_near.cpp35
-rw-r--r--src/mongo/db/exec/geo_near.h16
-rw-r--r--src/mongo/db/exec/limit.cpp5
-rw-r--r--src/mongo/db/exec/merge_sort.cpp6
-rw-r--r--src/mongo/db/exec/mock_stage.cpp72
-rw-r--r--src/mongo/db/exec/mock_stage.h109
-rw-r--r--src/mongo/db/exec/multi_plan.cpp110
-rw-r--r--src/mongo/db/exec/multi_plan.h29
-rw-r--r--src/mongo/db/exec/near.cpp36
-rw-r--r--src/mongo/db/exec/near.h20
-rw-r--r--src/mongo/db/exec/or.cpp6
-rw-r--r--src/mongo/db/exec/pipeline_proxy.cpp4
-rw-r--r--src/mongo/db/exec/plan_stage.h47
-rw-r--r--src/mongo/db/exec/projection.cpp25
-rw-r--r--src/mongo/db/exec/projection.h8
-rw-r--r--src/mongo/db/exec/queued_data_stage.cpp32
-rw-r--r--src/mongo/db/exec/queued_data_stage.h12
-rw-r--r--src/mongo/db/exec/queued_data_stage_test.cpp11
-rw-r--r--src/mongo/db/exec/return_key.cpp21
-rw-r--r--src/mongo/db/exec/return_key.h2
-rw-r--r--src/mongo/db/exec/skip.cpp6
-rw-r--r--src/mongo/db/exec/sort.cpp19
-rw-r--r--src/mongo/db/exec/sort_key_generator.cpp12
-rw-r--r--src/mongo/db/exec/stagedebug_cmd.cpp12
-rw-r--r--src/mongo/db/exec/text_match.cpp10
-rw-r--r--src/mongo/db/exec/text_or.cpp13
-rw-r--r--src/mongo/db/exec/trial_stage.cpp12
-rw-r--r--src/mongo/db/exec/update_stage.cpp11
-rw-r--r--src/mongo/db/exec/working_set_common.cpp73
-rw-r--r--src/mongo/db/exec/working_set_common.h50
-rw-r--r--src/mongo/db/ops/delete.cpp2
-rw-r--r--src/mongo/db/ops/update.cpp2
-rw-r--r--src/mongo/db/ops/write_ops_exec.cpp4
-rw-r--r--src/mongo/db/pipeline/document_source_cursor.cpp77
-rw-r--r--src/mongo/db/query/explain.cpp8
-rw-r--r--src/mongo/db/query/find.cpp152
-rw-r--r--src/mongo/db/query/find.h12
-rw-r--r--src/mongo/db/query/plan_executor.h72
-rw-r--r--src/mongo/db/query/plan_executor_impl.cpp96
-rw-r--r--src/mongo/db/query/plan_executor_impl.h20
-rw-r--r--src/mongo/db/query/stage_builder.cpp1
-rw-r--r--src/mongo/db/query/stage_types.h2
-rw-r--r--src/mongo/db/repl/rs_rollback.cpp76
-rw-r--r--src/mongo/db/repl/storage_interface_impl.cpp63
-rw-r--r--src/mongo/db/s/chunk_splitter.cpp18
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp111
-rw-r--r--src/mongo/db/s/range_deletion_util.cpp23
-rw-r--r--src/mongo/db/s/split_vector.cpp54
-rw-r--r--src/mongo/db/s/split_vector.h18
-rw-r--r--src/mongo/db/s/split_vector_command.cpp23
-rw-r--r--src/mongo/db/s/split_vector_test.cpp228
-rw-r--r--src/mongo/db/transaction_history_iterator.cpp13
-rw-r--r--src/mongo/db/ttl.cpp7
-rw-r--r--src/mongo/dbtests/query_plan_executor.cpp24
-rw-r--r--src/mongo/dbtests/query_stage_and.cpp91
-rw-r--r--src/mongo/dbtests/query_stage_cached_plan.cpp29
-rw-r--r--src/mongo/dbtests/query_stage_collscan.cpp2
-rw-r--r--src/mongo/dbtests/query_stage_count.cpp23
-rw-r--r--src/mongo/dbtests/query_stage_distinct.cpp1
-rw-r--r--src/mongo/dbtests/query_stage_ensure_sorted.cpp1
-rw-r--r--src/mongo/dbtests/query_stage_ixscan.cpp3
-rw-r--r--src/mongo/dbtests/query_stage_limit_skip.cpp18
-rw-r--r--src/mongo/dbtests/query_stage_multiplan.cpp32
-rw-r--r--src/mongo/dbtests/query_stage_near.cpp16
-rw-r--r--src/mongo/dbtests/query_stage_sort.cpp8
-rw-r--r--src/mongo/dbtests/query_stage_subplan.cpp1
-rw-r--r--src/mongo/dbtests/query_stage_trial.cpp62
92 files changed, 1224 insertions, 1701 deletions
diff --git a/jstests/core/expr.js b/jstests/core/expr.js
index 4b39d05bc94..0810808ab00 100644
--- a/jstests/core/expr.js
+++ b/jstests/core/expr.js
@@ -2,6 +2,9 @@
// does_not_support_stepdowns,
// requires_getmore,
// requires_non_retryable_writes,
+// # Explain reports errors from $expr differently in 4.4 and older, so this test assumes that all
+// # nodes are at least binary version 4.6.
+// requires_fcv_46,
// ]
// Tests for $expr in the CRUD commands.
@@ -113,10 +116,15 @@ assert.throws(function() {
coll.find({$expr: {$eq: ["$a", "$$unbound"]}}).explain();
});
-// $expr with division by zero in find with explain with executionStats throws.
-assert.throws(function() {
- coll.find({$expr: {$divide: [1, "$a"]}}).explain("executionStats");
-});
+// $expr which causes a runtime error should be caught be explain and reported as an error in the
+// 'executionSuccess' field.
+let explain = coll.find({$expr: {$divide: [1, "$a"]}}).explain("executionStats");
+// Accommodate format differences between explain via mongos and explain directly on a mongod.
+if (!isMongos) {
+ assert(explain.hasOwnProperty("executionStats"), explain);
+ assert.eq(explain.executionStats.executionSuccess, false, explain);
+ assert.eq(explain.executionStats.errorCode, 16609, explain);
+}
// $expr is not allowed in $elemMatch projection.
coll.drop();
diff --git a/jstests/noPassthrough/commands_preserve_exec_error_code.js b/jstests/noPassthrough/commands_preserve_exec_error_code.js
index 3d0d1136f92..1c7abf5bcb0 100644
--- a/jstests/noPassthrough/commands_preserve_exec_error_code.js
+++ b/jstests/noPassthrough/commands_preserve_exec_error_code.js
@@ -5,6 +5,8 @@
(function() {
"use strict";
+const kPlanExecAlwaysFailsCode = 4382101;
+
const mongod = MongoRunner.runMongod({});
assert.neq(mongod, null, "mongod failed to start up");
const db = mongod.getDB("test");
@@ -17,35 +19,35 @@ assert.commandWorked(coll.createIndex({geo: "2d"}));
assert.commandWorked(
db.adminCommand({configureFailPoint: "planExecutorAlwaysFails", mode: "alwaysOn"}));
-function assertFailsWithInternalError(fn) {
+function assertFailsWithExpectedError(fn) {
const error = assert.throws(fn);
- assert.eq(error.code, ErrorCodes.InternalError, tojson(error));
+ assert.eq(error.code, kPlanExecAlwaysFailsCode, tojson(error));
assert.neq(-1,
error.message.indexOf("planExecutorAlwaysFails"),
"Expected error message to be preserved");
}
-function assertCmdFailsWithInternalError(cmd) {
+function assertCmdFailsWithExpectedError(cmd) {
const res =
- assert.commandFailedWithCode(db.runCommand(cmd), ErrorCodes.InternalError, tojson(cmd));
+ assert.commandFailedWithCode(db.runCommand(cmd), kPlanExecAlwaysFailsCode, tojson(cmd));
assert.neq(-1,
res.errmsg.indexOf("planExecutorAlwaysFails"),
"Expected error message to be preserved");
}
-assertFailsWithInternalError(() => coll.find().itcount());
-assertFailsWithInternalError(() => coll.updateOne({_id: 1}, {$set: {x: 2}}));
-assertFailsWithInternalError(() => coll.deleteOne({_id: 1}));
-assertFailsWithInternalError(() => coll.count({_id: 1}));
-assertFailsWithInternalError(() => coll.aggregate([]).itcount());
-assertFailsWithInternalError(
+assertFailsWithExpectedError(() => coll.find().itcount());
+assertFailsWithExpectedError(() => coll.updateOne({_id: 1}, {$set: {x: 2}}));
+assertFailsWithExpectedError(() => coll.deleteOne({_id: 1}));
+assertFailsWithExpectedError(() => coll.count({_id: 1}));
+assertFailsWithExpectedError(() => coll.aggregate([]).itcount());
+assertFailsWithExpectedError(
() => coll.aggregate([{$geoNear: {near: [0, 0], distanceField: "d"}}]).itcount());
-assertCmdFailsWithInternalError({distinct: coll.getName(), key: "_id"});
-assertCmdFailsWithInternalError(
+assertCmdFailsWithExpectedError({distinct: coll.getName(), key: "_id"});
+assertCmdFailsWithExpectedError(
{findAndModify: coll.getName(), query: {_id: 1}, update: {$set: {x: 2}}});
const cmdRes = db.runCommand({find: coll.getName(), batchSize: 0});
assert.commandWorked(cmdRes);
-assertCmdFailsWithInternalError(
+assertCmdFailsWithExpectedError(
{getMore: cmdRes.cursor.id, collection: coll.getName(), batchSize: 1});
assert.commandWorked(db.adminCommand({configureFailPoint: "planExecutorAlwaysFails", mode: "off"}));
diff --git a/jstests/serial_run/memory.js b/jstests/serial_run/memory.js
index dedf8f5fadb..05be9901151 100644
--- a/jstests/serial_run/memory.js
+++ b/jstests/serial_run/memory.js
@@ -38,7 +38,7 @@ function assertMemoryError(func) {
try {
func();
} catch (e) {
- if (e.message.includes('"errmsg" : "Out of memory"')) {
+ if (e.message.includes("Out of memory")) {
return;
}
throw e;
diff --git a/jstests/sharding/error_propagation.js b/jstests/sharding/error_propagation.js
index 5845581a5f1..6f47075f753 100644
--- a/jstests/sharding/error_propagation.js
+++ b/jstests/sharding/error_propagation.js
@@ -18,7 +18,6 @@ assert.commandWorked(db.foo.insert({a: [1, 2]}, {writeConcern: {w: 3}}));
var res = db.runCommand(
{aggregate: 'foo', pipeline: [{$project: {total: {'$add': ['$a', 1]}}}], cursor: {}});
-assert.commandFailed(res);
-assert.eq("$add only supports numeric or date types, not array", res.errmsg, printjson(res));
+assert.commandFailedWithCode(res, 16554);
st.stop();
}());
diff --git a/src/mongo/db/SConscript b/src/mongo/db/SConscript
index a21e77b47cc..5ec20fc2915 100644
--- a/src/mongo/db/SConscript
+++ b/src/mongo/db/SConscript
@@ -1072,6 +1072,7 @@ env.Library(
'exec/index_scan.cpp',
'exec/limit.cpp',
'exec/merge_sort.cpp',
+ 'exec/mock_stage.cpp',
'exec/multi_iterator.cpp',
'exec/multi_plan.cpp',
'exec/near.cpp',
diff --git a/src/mongo/db/catalog/capped_utils.cpp b/src/mongo/db/catalog/capped_utils.cpp
index 0cd1de8b507..182c7eaa7d0 100644
--- a/src/mongo/db/catalog/capped_utils.cpp
+++ b/src/mongo/db/catalog/capped_utils.cpp
@@ -174,12 +174,12 @@ void cloneCollectionAsCapped(OperationContext* opCtx,
Snapshotted<BSONObj> objToClone;
RecordId loc;
- PlanExecutor::ExecState state = PlanExecutor::FAILURE; // suppress uninitialized warnings
DisableDocumentValidation validationDisabler(opCtx);
int retries = 0; // non-zero when retrying our last document.
while (true) {
+ PlanExecutor::ExecState state = PlanExecutor::IS_EOF;
if (!retries) {
state = exec->getNextSnapshotted(&objToClone, &loc);
}
@@ -195,9 +195,6 @@ void cloneCollectionAsCapped(OperationContext* opCtx,
}
break;
}
- default:
- // A collection scan plan which does not yield should never fail.
- MONGO_UNREACHABLE;
}
try {
diff --git a/src/mongo/db/catalog/multi_index_block.cpp b/src/mongo/db/catalog/multi_index_block.cpp
index 5493ba950a3..2f54d71f7ed 100644
--- a/src/mongo/db/catalog/multi_index_block.cpp
+++ b/src/mongo/db/catalog/multi_index_block.cpp
@@ -386,39 +386,39 @@ Status MultiIndexBlock::insertAllDocumentsInCollection(OperationContext* opCtx,
bool readOnce = useReadOnceCursorsForIndexBuilds.load();
opCtx->recoveryUnit()->setReadOnce(readOnce);
- BSONObj objToIndex;
- RecordId loc;
- PlanExecutor::ExecState state;
- while (PlanExecutor::ADVANCED == (state = exec->getNext(&objToIndex, &loc)) ||
- MONGO_unlikely(hangAfterStartingIndexBuild.shouldFail())) {
- auto interruptStatus = opCtx->checkForInterruptNoAssert();
- if (!interruptStatus.isOK())
- return opCtx->checkForInterruptNoAssert();
-
- if (PlanExecutor::ADVANCED != state) {
- continue;
- }
+ try {
+ BSONObj objToIndex;
+ RecordId loc;
+ PlanExecutor::ExecState state;
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(&objToIndex, &loc)) ||
+ MONGO_unlikely(hangAfterStartingIndexBuild.shouldFail())) {
+ auto interruptStatus = opCtx->checkForInterruptNoAssert();
+ if (!interruptStatus.isOK())
+ return opCtx->checkForInterruptNoAssert();
+
+ if (PlanExecutor::ADVANCED != state) {
+ continue;
+ }
- progress->setTotalWhileRunning(collection->numRecords(opCtx));
+ progress->setTotalWhileRunning(collection->numRecords(opCtx));
- failPointHangDuringBuild(&hangBeforeIndexBuildOf, "before", objToIndex);
+ failPointHangDuringBuild(&hangBeforeIndexBuildOf, "before", objToIndex);
- // The external sorter is not part of the storage engine and therefore does not need a
- // WriteUnitOfWork to write keys.
- Status ret = insert(opCtx, objToIndex, loc);
- if (!ret.isOK()) {
- return ret;
- }
-
- failPointHangDuringBuild(&hangAfterIndexBuildOf, "after", objToIndex);
+ // The external sorter is not part of the storage engine and therefore does not need a
+ // WriteUnitOfWork to write keys.
+ Status ret = insert(opCtx, objToIndex, loc);
+ if (!ret.isOK()) {
+ return ret;
+ }
- // Go to the next document.
- progress->hit();
- n++;
- }
+ failPointHangDuringBuild(&hangAfterIndexBuildOf, "after", objToIndex);
- if (state != PlanExecutor::IS_EOF) {
- return exec->getMemberObjectStatus(objToIndex);
+ // Go to the next document.
+ progress->hit();
+ n++;
+ }
+ } catch (...) {
+ return exceptionToStatus();
}
if (MONGO_unlikely(leaveIndexBuildUnfinishedForShutdown.shouldFail())) {
diff --git a/src/mongo/db/commands/count_cmd.cpp b/src/mongo/db/commands/count_cmd.cpp
index 7b0ec4e8fad..56a07deb865 100644
--- a/src/mongo/db/commands/count_cmd.cpp
+++ b/src/mongo/db/commands/count_cmd.cpp
@@ -259,8 +259,7 @@ public:
curOp->setPlanSummary_inlock(Explain::getPlanSummary(exec.get()));
}
- Status execPlanStatus = exec->executePlan();
- uassertStatusOK(execPlanStatus);
+ exec->executePlan();
PlanSummaryStats summaryStats;
Explain::getSummaryStats(*exec, &summaryStats);
diff --git a/src/mongo/db/commands/dbcommands.cpp b/src/mongo/db/commands/dbcommands.cpp
index e543e543de3..8d9d6b9d9c7 100644
--- a/src/mongo/db/commands/dbcommands.cpp
+++ b/src/mongo/db/commands/dbcommands.cpp
@@ -542,30 +542,28 @@ public:
long long size = 0;
long long numObjects = 0;
- RecordId loc;
- BSONObj obj;
- PlanExecutor::ExecState state;
- while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, &loc))) {
- if (estimate)
- size += avgObjSize;
- else
- size += collection->getRecordStore()->dataFor(opCtx, loc).size();
-
- numObjects++;
-
- if ((maxSize && size > maxSize) || (maxObjects && numObjects > maxObjects)) {
- result.appendBool("maxReached", true);
- break;
+ try {
+ RecordId loc;
+ while (PlanExecutor::ADVANCED == exec->getNext(static_cast<BSONObj*>(nullptr), &loc)) {
+ if (estimate)
+ size += avgObjSize;
+ else
+ size += collection->getRecordStore()->dataFor(opCtx, loc).size();
+
+ numObjects++;
+
+ if ((maxSize && size > maxSize) || (maxObjects && numObjects > maxObjects)) {
+ result.appendBool("maxReached", true);
+ break;
+ }
}
- }
-
- if (PlanExecutor::FAILURE == state) {
+ } catch (DBException& exception) {
LOGV2_WARNING(23801,
"Internal error while reading {namespace}",
"Internal error while reading",
"namespace"_attr = ns);
- uassertStatusOK(WorkingSetCommon::getMemberObjectStatus(obj).withContext(
- "Executor error while reading during dataSize command"));
+ exception.addContext("Executor error while reading during dataSize command");
+ throw;
}
ostringstream os;
diff --git a/src/mongo/db/commands/dbcommands_d.cpp b/src/mongo/db/commands/dbcommands_d.cpp
index 78eb49487c7..65f6862b318 100644
--- a/src/mongo/db/commands/dbcommands_d.cpp
+++ b/src/mongo/db/commands/dbcommands_d.cpp
@@ -307,69 +307,68 @@ public:
exec.reset();
});
- BSONObj obj;
- PlanExecutor::ExecState state;
- while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, nullptr))) {
- BSONElement ne = obj["n"];
- verify(ne.isNumber());
- int myn = ne.numberInt();
- if (n != myn) {
- if (partialOk) {
- break; // skipped chunk is probably on another shard
+ try {
+ BSONObj obj;
+ while (PlanExecutor::ADVANCED == exec->getNext(&obj, nullptr)) {
+ BSONElement ne = obj["n"];
+ verify(ne.isNumber());
+ int myn = ne.numberInt();
+ if (n != myn) {
+ if (partialOk) {
+ break; // skipped chunk is probably on another shard
+ }
+ LOGV2(20452,
+ "Should have chunk: {expected} have: {observed}",
+ "Unexpected chunk",
+ "expected"_attr = n,
+ "observed"_attr = myn);
+ dumpChunks(opCtx, nss.ns(), query, sort);
+ uassert(10040, "chunks out of order", n == myn);
}
- LOGV2(20452,
- "Should have chunk: {expected} have: {observed}",
- "Unexpected chunk",
- "expected"_attr = n,
- "observed"_attr = myn);
- dumpChunks(opCtx, nss.ns(), query, sort);
- uassert(10040, "chunks out of order", n == myn);
- }
- // make a copy of obj since we access data in it while yielding locks
- BSONObj owned = obj.getOwned();
- uassert(50848,
- str::stream() << "The element that calls binDataClean() must be type "
- "of BinData, but type of misisng found. Field name is "
- "required",
- owned["data"]);
- uassert(50849,
- str::stream() << "The element that calls binDataClean() must be type "
- "of BinData, but type of "
- << owned["data"].type() << " found.",
- owned["data"].type() == BSONType::BinData);
-
- exec->saveState();
- // UNLOCKED
- ctx.reset();
+ // make a copy of obj since we access data in it while yielding locks
+ BSONObj owned = obj.getOwned();
+ uassert(50848,
+ str::stream() << "The element that calls binDataClean() must be type "
+ "of BinData, but type of misisng found. Field name is "
+ "required",
+ owned["data"]);
+ uassert(50849,
+ str::stream() << "The element that calls binDataClean() must be type "
+ "of BinData, but type of "
+ << owned["data"].type() << " found.",
+ owned["data"].type() == BSONType::BinData);
+
+ exec->saveState();
+ // UNLOCKED
+ ctx.reset();
+
+ int len;
+ const char* data = owned["data"].binDataClean(len);
+ // This is potentially an expensive operation, so do it out of the lock
+ md5_append(&st, (const md5_byte_t*)(data), len);
+ n++;
+
+ CurOpFailpointHelpers::waitWhileFailPointEnabled(
+ &waitInFilemd5DuringManualYield, opCtx, "waitInFilemd5DuringManualYield");
+
+ try {
+ // RELOCKED
+ ctx.reset(new AutoGetCollectionForReadCommand(opCtx, nss));
+ } catch (const StaleConfigException&) {
+ LOGV2_DEBUG(
+ 20453,
+ 1,
+ "Chunk metadata changed during filemd5, will retarget and continue");
+ break;
+ }
- int len;
- const char* data = owned["data"].binDataClean(len);
- // This is potentially an expensive operation, so do it out of the lock
- md5_append(&st, (const md5_byte_t*)(data), len);
- n++;
-
- CurOpFailpointHelpers::waitWhileFailPointEnabled(
- &waitInFilemd5DuringManualYield, opCtx, "waitInFilemd5DuringManualYield");
-
- try {
- // RELOCKED
- ctx.reset(new AutoGetCollectionForReadCommand(opCtx, nss));
- } catch (const StaleConfigException&) {
- LOGV2_DEBUG(
- 20453,
- 1,
- "Chunk metadata changed during filemd5, will retarget and continue");
- break;
+ // Now that we have the lock again, we can restore the PlanExecutor.
+ exec->restoreState();
}
-
- // Now that we have the lock again, we can restore the PlanExecutor.
- exec->restoreState();
- }
-
- if (PlanExecutor::FAILURE == state) {
- uassertStatusOK(WorkingSetCommon::getMemberObjectStatus(obj).withContext(
- "Executor error during filemd5 command"));
+ } catch (DBException& exception) {
+ exception.addContext("Executor error during filemd5 command");
+ throw;
}
if (partialOk)
diff --git a/src/mongo/db/commands/dbhash.cpp b/src/mongo/db/commands/dbhash.cpp
index 2d686835f81..b8eaf8fb438 100644
--- a/src/mongo/db/commands/dbhash.cpp
+++ b/src/mongo/db/commands/dbhash.cpp
@@ -360,21 +360,21 @@ private:
md5_state_t st;
md5_init(&st);
- long long n = 0;
- PlanExecutor::ExecState state;
- BSONObj c;
- verify(nullptr != exec.get());
- while (PlanExecutor::ADVANCED == (state = exec->getNext(&c, nullptr))) {
- md5_append(&st, (const md5_byte_t*)c.objdata(), c.objsize());
- n++;
- }
- if (PlanExecutor::IS_EOF != state) {
+ try {
+ long long n = 0;
+ BSONObj c;
+ verify(nullptr != exec.get());
+ while (exec->getNext(&c, nullptr) == PlanExecutor::ADVANCED) {
+ md5_append(&st, (const md5_byte_t*)c.objdata(), c.objsize());
+ n++;
+ }
+ } catch (DBException& exception) {
LOGV2_WARNING(
20456, "Error while hashing, db possibly dropped", "namespace"_attr = nss);
- uasserted(34371,
- "Plan executor error while running dbHash command: " +
- WorkingSetCommon::toStatusString(c));
+ exception.addContext("Plan executor error while running dbHash command");
+ throw;
}
+
md5digest d;
md5_finish(&st, d);
std::string hash = digestToString(d);
diff --git a/src/mongo/db/commands/distinct.cpp b/src/mongo/db/commands/distinct.cpp
index 09451fa653d..52432518498 100644
--- a/src/mongo/db/commands/distinct.cpp
+++ b/src/mongo/db/commands/distinct.cpp
@@ -246,54 +246,50 @@ public:
BSONElementSet values(executor.getValue()->getCanonicalQuery()->getCollator());
const int kMaxResponseSize = BSONObjMaxUserSize - 4096;
- size_t listApproxBytes = 0;
- BSONObj obj;
- PlanExecutor::ExecState state;
- while (PlanExecutor::ADVANCED == (state = executor.getValue()->getNext(&obj, nullptr))) {
- // Distinct expands arrays.
- //
- // If our query is covered, each value of the key should be in the index key and
- // available to us without this. If a collection scan is providing the data, we may
- // have to expand an array.
- BSONElementSet elts;
- dps::extractAllElementsAlongPath(obj, key, elts);
-
- for (BSONElementSet::iterator it = elts.begin(); it != elts.end(); ++it) {
- BSONElement elt = *it;
- if (values.count(elt)) {
- continue;
- }
-
- // This is an approximate size check which safeguards against use of unbounded
- // memory by the distinct command. We perform a more precise check at the end of
- // this method to confirm that the response size is less than 16MB.
- listApproxBytes += elt.size();
- uassert(17217, "distinct too big, 16mb cap", listApproxBytes < kMaxResponseSize);
- auto distinctObj = elt.wrap();
- values.insert(distinctObj.firstElement());
- distinctValueHolder.push_back(std::move(distinctObj));
+ try {
+ size_t listApproxBytes = 0;
+ BSONObj obj;
+ while (PlanExecutor::ADVANCED == executor.getValue()->getNext(&obj, nullptr)) {
+ // Distinct expands arrays.
+ //
+ // If our query is covered, each value of the key should be in the index key and
+ // available to us without this. If a collection scan is providing the data, we may
+ // have to expand an array.
+ BSONElementSet elts;
+ dps::extractAllElementsAlongPath(obj, key, elts);
+
+ for (BSONElementSet::iterator it = elts.begin(); it != elts.end(); ++it) {
+ BSONElement elt = *it;
+ if (values.count(elt)) {
+ continue;
+ }
+
+ // This is an approximate size check which safeguards against use of unbounded
+ // memory by the distinct command. We perform a more precise check at the end of
+ // this method to confirm that the response size is less than 16MB.
+ listApproxBytes += elt.size();
+ uassert(
+ 17217, "distinct too big, 16mb cap", listApproxBytes < kMaxResponseSize);
+
+ auto distinctObj = elt.wrap();
+ values.insert(distinctObj.firstElement());
+ distinctValueHolder.push_back(std::move(distinctObj));
+ }
}
- }
-
- // Return an error if execution fails for any reason.
- if (PlanExecutor::FAILURE == state) {
- // We should always have a valid status member object at this point.
- auto status = WorkingSetCommon::getMemberObjectStatus(obj);
- invariant(!status.isOK());
+ } catch (DBException& exception) {
LOGV2_WARNING(23797,
- "Plan executor error during distinct command: {state}, status: {error}, "
+ "Plan executor error during distinct command: {error}, "
"stats: {stats}",
"Plan executor error during distinct command",
- "state"_attr = redact(PlanExecutor::statestr(state)),
- "error"_attr = status,
+ "error"_attr = exception.toStatus(),
"stats"_attr =
redact(Explain::getWinningPlanStats(executor.getValue().get())));
- uassertStatusOK(status.withContext("Executor error during distinct command"));
+ exception.addContext("Executor error during distinct command");
+ throw;
}
-
auto curOp = CurOp::get(opCtx);
// Get summary information about the plan.
diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp
index 0df18451973..1b30ce9d5a5 100644
--- a/src/mongo/db/commands/find_and_modify.cpp
+++ b/src/mongo/db/commands/find_and_modify.cpp
@@ -83,38 +83,31 @@ MONGO_FAIL_POINT_DEFINE(hangBeforeFindAndModifyPerformsUpdate);
namespace {
/**
- * If the operation succeeded, then Status::OK() is returned, possibly with a document value
- * to return to the client. If no matching document to update or remove was found, then none
- * is returned. Otherwise, the updated or deleted document is returned.
- *
- * If the operation failed, throws.
+ * If the operation succeeded, then returns either a document to return to the client, or
+ * boost::none if no matching document to update/remove was found. If the operation failed, throws.
*/
boost::optional<BSONObj> advanceExecutor(OperationContext* opCtx,
PlanExecutor* exec,
bool isRemove) {
BSONObj value;
- PlanExecutor::ExecState state = exec->getNext(&value, nullptr);
+ PlanExecutor::ExecState state;
+ try {
+ state = exec->getNext(&value, nullptr);
+ } catch (DBException& exception) {
+ LOGV2_WARNING(23802,
+ "Plan executor error during findAndModify: {error}, stats: {stats}",
+ "Plan executor error during findAndModify",
+ "error"_attr = exception.toStatus(),
+ "stats"_attr = redact(Explain::getWinningPlanStats(exec)));
+
+ exception.addContext("Plan executor error during findAndModify");
+ throw;
+ }
if (PlanExecutor::ADVANCED == state) {
return {std::move(value)};
}
- if (PlanExecutor::FAILURE == state) {
- // We should always have a valid status member object at this point.
- auto status = WorkingSetCommon::getMemberObjectStatus(value);
- invariant(!status.isOK());
- LOGV2_WARNING(
- 23802,
- "Plan executor error during findAndModify: {state}, status: {error}, stats: {stats}",
- "Plan executor error during findAndModify",
- "state"_attr = PlanExecutor::statestr(state),
- "error"_attr = status,
- "stats"_attr = redact(Explain::getWinningPlanStats(exec)));
-
- uassertStatusOKWithContext(status, "Plan executor error during findAndModify");
- MONGO_UNREACHABLE;
- }
-
invariant(state == PlanExecutor::IS_EOF);
return boost::none;
}
diff --git a/src/mongo/db/commands/find_cmd.cpp b/src/mongo/db/commands/find_cmd.cpp
index 2c54701fcac..37590d4c13c 100644
--- a/src/mongo/db/commands/find_cmd.cpp
+++ b/src/mongo/db/commands/find_cmd.cpp
@@ -507,39 +507,37 @@ public:
Document doc;
PlanExecutor::ExecState state = PlanExecutor::ADVANCED;
std::uint64_t numResults = 0;
- while (!FindCommon::enoughForFirstBatch(originalQR, numResults) &&
- PlanExecutor::ADVANCED == (state = exec->getNext(&doc, nullptr))) {
- // If we can't fit this result inside the current batch, then we stash it for later.
- BSONObj obj = doc.toBson();
- if (!FindCommon::haveSpaceForNext(obj, numResults, firstBatch.bytesUsed())) {
- exec->enqueue(obj);
- break;
- }
- // If this executor produces a postBatchResumeToken, add it to the response.
- firstBatch.setPostBatchResumeToken(exec->getPostBatchResumeToken());
+ try {
+ while (!FindCommon::enoughForFirstBatch(originalQR, numResults) &&
+ PlanExecutor::ADVANCED == (state = exec->getNext(&doc, nullptr))) {
+ // If we can't fit this result inside the current batch, then we stash it for
+ // later.
+ BSONObj obj = doc.toBson();
+ if (!FindCommon::haveSpaceForNext(obj, numResults, firstBatch.bytesUsed())) {
+ exec->enqueue(obj);
+ break;
+ }
- // Add result to output buffer.
- firstBatch.append(obj);
- numResults++;
- }
+ // If this executor produces a postBatchResumeToken, add it to the response.
+ firstBatch.setPostBatchResumeToken(exec->getPostBatchResumeToken());
- // Throw an assertion if query execution fails for any reason.
- if (PlanExecutor::FAILURE == state) {
+ // Add result to output buffer.
+ firstBatch.append(obj);
+ numResults++;
+ }
+ } catch (DBException& exception) {
firstBatch.abandon();
- // We should always have a valid status member object at this point.
- auto status = WorkingSetCommon::getMemberObjectStatus(doc);
- invariant(!status.isOK());
LOGV2_WARNING(23798,
- "Plan executor error during find command: {state}, status: {error}, "
+ "Plan executor error during find command: {error}, "
"stats: {stats}",
"Plan executor error during find command",
- "state"_attr = PlanExecutor::statestr(state),
- "error"_attr = status,
+ "error"_attr = exception.toStatus(),
"stats"_attr = redact(Explain::getWinningPlanStats(exec.get())));
- uassertStatusOK(status.withContext("Executor error during find command"));
+ exception.addContext("Executor error during find command");
+ throw;
}
// Set up the cursor for getMore.
diff --git a/src/mongo/db/commands/getmore_cmd.cpp b/src/mongo/db/commands/getmore_cmd.cpp
index 133ecf102a9..47172236cad 100644
--- a/src/mongo/db/commands/getmore_cmd.cpp
+++ b/src/mongo/db/commands/getmore_cmd.cpp
@@ -285,27 +285,28 @@ public:
* Uses 'cursor' and 'request' to fill out 'nextBatch' with the batch of result documents to
* be returned by this getMore.
*
- * Returns the number of documents in the batch in *numResults, which must be initialized to
- * zero by the caller. Returns the final ExecState returned by the cursor in *state.
+ * Returns true if the cursor should be saved for subsequent getMores, and false otherwise.
+ * Fills out *numResults with the number of documents in the batch, which must be
+ * initialized to zero by the caller.
*
- * Returns an OK status if the batch was successfully generated, and a non-OK status if the
- * PlanExecutor encounters a failure.
+ * Throws an exception on failure.
*/
- Status generateBatch(OperationContext* opCtx,
- ClientCursor* cursor,
- const GetMoreRequest& request,
- CursorResponseBuilder* nextBatch,
- PlanExecutor::ExecState* state,
- std::uint64_t* numResults) {
+ bool generateBatch(OperationContext* opCtx,
+ ClientCursor* cursor,
+ const GetMoreRequest& request,
+ const bool isTailable,
+ CursorResponseBuilder* nextBatch,
+ std::uint64_t* numResults) {
PlanExecutor* exec = cursor->getExecutor();
// If an awaitData getMore is killed during this process due to our max time expiring at
// an interrupt point, we just continue as normal and return rather than reporting a
// timeout to the user.
Document doc;
+ PlanExecutor::ExecState state;
try {
while (!FindCommon::enoughForGetMore(request.batchSize.value_or(0), *numResults) &&
- PlanExecutor::ADVANCED == (*state = exec->getNext(&doc, nullptr))) {
+ PlanExecutor::ADVANCED == (state = exec->getNext(&doc, nullptr))) {
// Note that "needsMerge" implies a find or aggregate operation, which should
// always have a non-NULL 'expCtx' value.
BSONObj obj = cursor->needsMerge() ? doc.toBsonWithMetaData() : doc.toBson();
@@ -326,37 +327,29 @@ public:
(*numResults)++;
}
} catch (const ExceptionFor<ErrorCodes::CloseChangeStream>&) {
- // FAILURE state will make getMore command close the cursor even if it's tailable.
- *state = PlanExecutor::FAILURE;
- return Status::OK();
+ // This exception indicates that we should close the cursor without reporting an
+ // error.
+ return false;
+ } catch (DBException& exception) {
+ nextBatch->abandon();
+
+ LOGV2_WARNING(20478,
+ "getMore command executor error: {error}, stats: {stats}",
+ "getMore command executor error",
+ "error"_attr = exception.toStatus(),
+ "stats"_attr = redact(Explain::getWinningPlanStats(exec)));
+
+ exception.addContext("Executor error during getMore");
+ throw;
}
- switch (*state) {
- case PlanExecutor::FAILURE: {
- // We should always have a valid status member object at this point.
- auto status = WorkingSetCommon::getMemberObjectStatus(doc);
- invariant(!status.isOK());
- // Log an error message and then perform the cleanup.
- LOGV2_WARNING(
- 20478,
- "getMore command executor error: {state}, status: {error}, stats: {stats}",
- "getMore command executor error",
- "state"_attr = PlanExecutor::statestr(*state),
- "error"_attr = status,
- "stats"_attr = redact(Explain::getWinningPlanStats(exec)));
-
- nextBatch->abandon();
- return status;
- }
- case PlanExecutor::IS_EOF:
- // The latest oplog timestamp may advance even when there are no results. Ensure
- // that we have the latest postBatchResumeToken produced by the plan executor.
- nextBatch->setPostBatchResumeToken(exec->getPostBatchResumeToken());
- default:
- return Status::OK();
+ if (state == PlanExecutor::IS_EOF) {
+ // The latest oplog timestamp may advance even when there are no results. Ensure
+ // that we have the latest postBatchResumeToken produced by the plan executor.
+ nextBatch->setPostBatchResumeToken(exec->getPostBatchResumeToken());
}
- MONGO_UNREACHABLE;
+ return shouldSaveCursorGetMore(exec, isTailable);
}
void acquireLocksAndIterateCursor(OperationContext* opCtx,
@@ -582,7 +575,6 @@ public:
options.atClusterTime = repl::ReadConcernArgs::get(opCtx).getArgsAtClusterTime();
CursorResponseBuilder nextBatch(reply, options);
BSONObj obj;
- PlanExecutor::ExecState state = PlanExecutor::ADVANCED;
std::uint64_t numResults = 0;
// We report keysExamined and docsExamined to OpDebug for a given getMore operation. To
@@ -628,8 +620,12 @@ public:
_request.nss);
});
- uassertStatusOK(generateBatch(
- opCtx, cursorPin.getCursor(), _request, &nextBatch, &state, &numResults));
+ const auto shouldSaveCursor = generateBatch(opCtx,
+ cursorPin.getCursor(),
+ _request,
+ cursorPin->isTailable(),
+ &nextBatch,
+ &numResults);
PlanSummaryStats postExecutionStats;
Explain::getSummaryStats(*exec, &postExecutionStats);
@@ -649,7 +645,7 @@ public:
curOp->debug().execStats = execStatsBob.obj();
}
- if (shouldSaveCursorGetMore(state, exec, cursorPin->isTailable())) {
+ if (shouldSaveCursor) {
respondWithId = _request.cursorid;
exec->saveState();
diff --git a/src/mongo/db/commands/run_aggregate.cpp b/src/mongo/db/commands/run_aggregate.cpp
index c8805eee8c3..242846a3c03 100644
--- a/src/mongo/db/commands/run_aggregate.cpp
+++ b/src/mongo/db/commands/run_aggregate.cpp
@@ -181,37 +181,32 @@ bool handleCursorCommand(OperationContext* opCtx,
try {
state = exec->getNext(&nextDoc, nullptr);
} catch (const ExceptionFor<ErrorCodes::CloseChangeStream>&) {
- // This exception is thrown when a $changeStream stage encounters an event
- // that invalidates the cursor. We should close the cursor and return without
- // error.
+ // This exception is thrown when a $changeStream stage encounters an event that
+ // invalidates the cursor. We should close the cursor and return without error.
cursor = nullptr;
exec = nullptr;
break;
+ } catch (DBException& exception) {
+ LOGV2_WARNING(23799,
+ "Aggregate command executor error: {error}, stats: {stats}",
+ "Aggregate command executor error",
+ "error"_attr = exception.toStatus(),
+ "stats"_attr = redact(Explain::getWinningPlanStats(exec)));
+
+ exception.addContext("PlanExecutor error during aggregation");
+ throw;
}
if (state == PlanExecutor::IS_EOF) {
if (!cursor->isTailable()) {
- // make it an obvious error to use cursor or executor after this point
+ // Make it an obvious error to use cursor or executor after this point.
cursor = nullptr;
exec = nullptr;
}
break;
}
- if (PlanExecutor::ADVANCED != state) {
- // We should always have a valid status member object at this point.
- auto status = WorkingSetCommon::getMemberObjectStatus(nextDoc);
- invariant(!status.isOK());
- LOGV2_WARNING(
- 23799,
- "Aggregate command executor error: {state}, status: {error}, stats: {stats}",
- "Aggregate command executor error",
- "state"_attr = PlanExecutor::statestr(state),
- "error"_attr = status,
- "stats"_attr = redact(Explain::getWinningPlanStats(exec)));
-
- uassertStatusOK(status.withContext("PlanExecutor error during aggregation"));
- }
+ invariant(state == PlanExecutor::ADVANCED);
// If adding this object will cause us to exceed the message size limit, then we stash it
// for later.
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index 4864a941b6d..9e9c0ada8f9 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -117,9 +117,6 @@ RecordId Helpers::findOne(OperationContext* opCtx,
if (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, &loc))) {
return loc;
}
- massert(34427,
- "Plan executor error: " + WorkingSetCommon::toStatusString(obj),
- PlanExecutor::IS_EOF == state);
return RecordId();
}
diff --git a/src/mongo/db/exec/and_hash.cpp b/src/mongo/db/exec/and_hash.cpp
index d2666c905e6..f620db00faf 100644
--- a/src/mongo/db/exec/and_hash.cpp
+++ b/src/mongo/db/exec/and_hash.cpp
@@ -140,14 +140,6 @@ PlanStage::StageState AndHashStage::doWork(WorkingSetID* out) {
// yield.
_ws->get(_lookAheadResults[i])->makeObjOwnedIfNeeded();
break; // Stop looking at this child.
- } else if (PlanStage::FAILURE == childStatus) {
- // The stage which produces a failure is responsible for allocating a working
- // set member with error details.
- invariant(WorkingSet::INVALID_ID != _lookAheadResults[i]);
- *out = _lookAheadResults[i];
- _hashingChildren = false;
- _dataMap.clear();
- return childStatus;
}
// We ignore NEED_TIME. TODO: what do we want to do if we get NEED_YIELD here?
}
@@ -165,12 +157,10 @@ PlanStage::StageState AndHashStage::doWork(WorkingSetID* out) {
if (_hashingChildren) {
// Check memory usage of previously hashed results.
if (_memUsage > _maxMemUsage) {
- str::stream ss;
- ss << "hashed AND stage buffered data usage of " << _memUsage
+ StringBuilder sb;
+ sb << "hashed AND stage buffered data usage of " << _memUsage
<< " bytes exceeds internal limit of " << kDefaultMaxMemUsageBytes << " bytes";
- Status status(ErrorCodes::Overflow, ss);
- *out = WorkingSetCommon::allocateStatusMember(_ws, status);
- return PlanStage::FAILURE;
+ uasserted(ErrorCodes::QueryExceededMemoryLimitNoDiskUseAllowed, sb.str());
}
if (0 == _currentChild) {
@@ -279,12 +269,6 @@ PlanStage::StageState AndHashStage::readFirstChild(WorkingSetID* out) {
_specificStats.mapAfterChild.push_back(_dataMap.size());
return PlanStage::NEED_TIME;
- } else if (PlanStage::FAILURE == childStatus) {
- // The stage which produces a failure is responsible for allocating a working set member
- // with error details.
- invariant(WorkingSet::INVALID_ID != id);
- *out = id;
- return childStatus;
} else {
if (PlanStage::NEED_YIELD == childStatus) {
*out = id;
@@ -364,12 +348,6 @@ PlanStage::StageState AndHashStage::hashOtherChildren(WorkingSetID* out) {
}
return PlanStage::NEED_TIME;
- } else if (PlanStage::FAILURE == childStatus) {
- // The stage which produces a failure is responsible for allocating a working set member
- // with error details.
- invariant(WorkingSet::INVALID_ID != id);
- *out = id;
- return childStatus;
} else {
if (PlanStage::NEED_YIELD == childStatus) {
*out = id;
diff --git a/src/mongo/db/exec/and_sorted.cpp b/src/mongo/db/exec/and_sorted.cpp
index 57d2eb08b52..5d35a7a1bd2 100644
--- a/src/mongo/db/exec/and_sorted.cpp
+++ b/src/mongo/db/exec/and_sorted.cpp
@@ -116,19 +116,6 @@ PlanStage::StageState AndSortedStage::getTargetRecordId(WorkingSetID* out) {
} else if (PlanStage::IS_EOF == state) {
_isEOF = true;
return state;
- } else if (PlanStage::FAILURE == state) {
- *out = id;
- // If a stage fails, it may create a status WSM to indicate why it
- // failed, in which case 'id' is valid. If ID is invalid, we
- // create our own error message.
- if (WorkingSet::INVALID_ID == id) {
- str::stream ss;
- ss << "sorted AND stage failed to read in results from first child";
- Status status(ErrorCodes::InternalError, ss);
- *out = WorkingSetCommon::allocateStatusMember(_ws, status);
- }
- _isEOF = true;
- return state;
} else {
if (PlanStage::NEED_YIELD == state) {
*out = id;
@@ -208,14 +195,6 @@ PlanStage::StageState AndSortedStage::moveTowardTargetRecordId(WorkingSetID* out
_isEOF = true;
_ws->free(_targetId);
return state;
- } else if (PlanStage::FAILURE == state) {
- // The stage which produces a failure is responsible for allocating a working set member
- // with error details.
- invariant(WorkingSet::INVALID_ID != id);
- *out = id;
- _isEOF = true;
- _ws->free(_targetId);
- return state;
} else {
if (PlanStage::NEED_YIELD == state) {
*out = id;
diff --git a/src/mongo/db/exec/cached_plan.cpp b/src/mongo/db/exec/cached_plan.cpp
index 99ce35a7d5e..a65e0639776 100644
--- a/src/mongo/db/exec/cached_plan.cpp
+++ b/src/mongo/db/exec/cached_plan.cpp
@@ -102,7 +102,28 @@ Status CachedPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
}
WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState state = child()->work(&id);
+ PlanStage::StageState state;
+ try {
+ state = child()->work(&id);
+ } catch (const ExceptionFor<ErrorCodes::QueryExceededMemoryLimitNoDiskUseAllowed>& ex) {
+ // The plan failed by hitting the limit we impose on memory consumption. It's possible
+ // that a different plan is less resource-intensive, so we fall back to replanning the
+ // whole query. We neither evict the existing cache entry nor cache the result of
+ // replanning.
+ LOGV2_DEBUG(20579,
+ 1,
+ "Execution of cached plan failed, falling back to replan. query: "
+ "{query} planSummary: {planSummary} status: {status}",
+ "Execution of cached plan failed, failling back to replan",
+ "query"_attr = redact(_canonicalQuery->toStringShort()),
+ "planSummary"_attr = Explain::getPlanSummary(child().get()),
+ "status"_attr = redact(ex.toStatus()));
+
+ const bool shouldCache = false;
+ return replan(yieldPolicy,
+ shouldCache,
+ str::stream() << "cached plan returned: " << ex.toStatus());
+ }
if (PlanStage::ADVANCED == state) {
// Save result for later.
@@ -136,26 +157,6 @@ Status CachedPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
if (!yieldStatus.isOK()) {
return yieldStatus;
}
- } else if (PlanStage::FAILURE == state) {
- // On failure, fall back to replanning the whole query. We neither evict the
- // existing cache entry nor cache the result of replanning.
- BSONObj statusObj = WorkingSetCommon::getStatusMemberDocument(*_ws, id)->toBson();
-
- LOGV2_DEBUG(20579,
- 1,
- "Execution of cached plan failed, falling back to replan. query: "
- "{canonicalQuery_Short} planSummary: {Explain_getPlanSummary_child_get} "
- "status: {statusObj}",
- "canonicalQuery_Short"_attr = redact(_canonicalQuery->toStringShort()),
- "Explain_getPlanSummary_child_get"_attr =
- Explain::getPlanSummary(child().get()),
- "statusObj"_attr = redact(statusObj));
-
- const bool shouldCache = false;
- return replan(yieldPolicy,
- shouldCache,
- str::stream() << "cached plan returned: "
- << WorkingSetCommon::toStatusString(statusObj));
} else {
invariant(PlanStage::NEED_TIME == state);
}
diff --git a/src/mongo/db/exec/collection_scan.cpp b/src/mongo/db/exec/collection_scan.cpp
index ab39d7d54b4..00547983fc8 100644
--- a/src/mongo/db/exec/collection_scan.cpp
+++ b/src/mongo/db/exec/collection_scan.cpp
@@ -133,12 +133,10 @@ PlanStage::StageState CollectionScan::doWork(WorkingSetID* out) {
// only time we'd need to create a cursor after already getting a record out of it
// and updating our _lastSeenId.
if (!_cursor->seekExact(_lastSeenId)) {
- Status status(ErrorCodes::CappedPositionLost,
- str::stream() << "CollectionScan died due to failure to restore "
- << "tailable cursor position. "
- << "Last seen record id: " << _lastSeenId);
- *out = WorkingSetCommon::allocateStatusMember(_workingSet, status);
- return PlanStage::FAILURE;
+ uasserted(ErrorCodes::CappedPositionLost,
+ str::stream() << "CollectionScan died due to failure to restore "
+ << "tailable cursor position. "
+ << "Last seen record id: " << _lastSeenId);
}
}
@@ -152,14 +150,12 @@ PlanStage::StageState CollectionScan::doWork(WorkingSetID* out) {
// returned this one prior to the resume.
auto recordIdToSeek = *_params.resumeAfterRecordId;
if (!_cursor->seekExact(recordIdToSeek)) {
- Status status(
+ uasserted(
ErrorCodes::KeyNotFound,
str::stream()
<< "Failed to resume collection scan: the recordId from which we are "
<< "attempting to resume no longer exists in the collection. "
<< "recordId: " << recordIdToSeek);
- *out = WorkingSetCommon::allocateStatusMember(_workingSet, status);
- return PlanStage::FAILURE;
}
}
@@ -205,11 +201,7 @@ PlanStage::StageState CollectionScan::doWork(WorkingSetID* out) {
_lastSeenId = record->id;
if (_params.shouldTrackLatestOplogTimestamp) {
- auto status = setLatestOplogEntryTimestamp(*record);
- if (!status.isOK()) {
- *out = WorkingSetCommon::allocateStatusMember(_workingSet, status);
- return PlanStage::FAILURE;
- }
+ setLatestOplogEntryTimestamp(*record);
}
WorkingSetID id = _workingSet->allocate();
@@ -221,17 +213,14 @@ PlanStage::StageState CollectionScan::doWork(WorkingSetID* out) {
return returnIfMatches(member, id, out);
}
-Status CollectionScan::setLatestOplogEntryTimestamp(const Record& record) {
+void CollectionScan::setLatestOplogEntryTimestamp(const Record& record) {
auto tsElem = record.data.toBson()[repl::OpTime::kTimestampFieldName];
- if (tsElem.type() != BSONType::bsonTimestamp) {
- Status status(ErrorCodes::InternalError,
- str::stream() << "CollectionScan was asked to track latest operation time, "
- "but found a result without a valid 'ts' field: "
- << record.data.toBson().toString());
- return status;
- }
+ uassert(ErrorCodes::Error(4382100),
+ str::stream() << "CollectionScan was asked to track latest operation time, "
+ "but found a result without a valid 'ts' field: "
+ << record.data.toBson().toString(),
+ tsElem.type() == BSONType::bsonTimestamp);
_latestOplogEntryTimestamp = std::max(_latestOplogEntryTimestamp, tsElem.timestamp());
- return Status::OK();
}
PlanStage::StageState CollectionScan::returnIfMatches(WorkingSetMember* member,
diff --git a/src/mongo/db/exec/collection_scan.h b/src/mongo/db/exec/collection_scan.h
index b19915bb2c5..74d9d8ddb29 100644
--- a/src/mongo/db/exec/collection_scan.h
+++ b/src/mongo/db/exec/collection_scan.h
@@ -95,10 +95,10 @@ private:
/**
* Extracts the timestamp from the 'ts' field of 'record', and sets '_latestOplogEntryTimestamp'
- * to that time if it isn't already greater. Returns an error if the 'ts' field cannot be
+ * to that time if it isn't already greater. Throws an exception if the 'ts' field cannot be
* extracted.
*/
- Status setLatestOplogEntryTimestamp(const Record& record);
+ void setLatestOplogEntryTimestamp(const Record& record);
// WorkingSet is not owned by us.
WorkingSet* _workingSet;
diff --git a/src/mongo/db/exec/count.cpp b/src/mongo/db/exec/count.cpp
index c646e172f29..07158d20b08 100644
--- a/src/mongo/db/exec/count.cpp
+++ b/src/mongo/db/exec/count.cpp
@@ -84,12 +84,6 @@ PlanStage::StageState CountStage::doWork(WorkingSetID* out) {
if (PlanStage::IS_EOF == state) {
_commonStats.isEOF = true;
return PlanStage::IS_EOF;
- } else if (PlanStage::FAILURE == state) {
- // The stage which produces a failure is responsible for allocating a working set member
- // with error details.
- invariant(WorkingSet::INVALID_ID != id);
- *out = id;
- return state;
} else if (PlanStage::ADVANCED == state) {
// We got a result. If we're still skipping, then decrement the number left to skip.
// Otherwise increment the count until we hit the limit.
diff --git a/src/mongo/db/exec/delete.cpp b/src/mongo/db/exec/delete.cpp
index 3911a3087b5..a53447e9f03 100644
--- a/src/mongo/db/exec/delete.cpp
+++ b/src/mongo/db/exec/delete.cpp
@@ -123,13 +123,6 @@ PlanStage::StageState DeleteStage::doWork(WorkingSetID* out) {
case PlanStage::ADVANCED:
break;
- case PlanStage::FAILURE:
- // The stage which produces a failure is responsible for allocating a working set
- // member with error details.
- invariant(WorkingSet::INVALID_ID != id);
- *out = id;
- return status;
-
case PlanStage::NEED_TIME:
return status;
diff --git a/src/mongo/db/exec/fetch.cpp b/src/mongo/db/exec/fetch.cpp
index 914158d3191..9528e9dc085 100644
--- a/src/mongo/db/exec/fetch.cpp
+++ b/src/mongo/db/exec/fetch.cpp
@@ -118,12 +118,6 @@ PlanStage::StageState FetchStage::doWork(WorkingSetID* out) {
}
return returnIfMatches(member, id, out);
- } else if (PlanStage::FAILURE == status) {
- // The stage which produces a failure is responsible for allocating a working set member
- // with error details.
- invariant(WorkingSet::INVALID_ID != id);
- *out = id;
- return status;
} else if (PlanStage::NEED_YIELD == status) {
*out = id;
}
diff --git a/src/mongo/db/exec/geo_near.cpp b/src/mongo/db/exec/geo_near.cpp
index c12bd3e11ed..6d363303963 100644
--- a/src/mongo/db/exec/geo_near.cpp
+++ b/src/mongo/db/exec/geo_near.cpp
@@ -138,8 +138,7 @@ static void extractGeometries(const BSONObj& doc,
}
}
-static StatusWith<double> computeGeoNearDistance(const GeoNearParams& nearParams,
- WorkingSetMember* member) {
+static double computeGeoNearDistance(const GeoNearParams& nearParams, WorkingSetMember* member) {
//
// Generic GeoNear distance computation
// Distances are computed by projecting the stored geometry into the query CRS, and
@@ -183,7 +182,7 @@ static StatusWith<double> computeGeoNearDistance(const GeoNearParams& nearParams
if (minDistance < 0) {
// No distance to report
- return StatusWith<double>(-1);
+ return -1;
}
if (nearParams.addDistMeta) {
@@ -201,7 +200,7 @@ static StatusWith<double> computeGeoNearDistance(const GeoNearParams& nearParams
member->metadata().setGeoNearPoint(minDistanceMetadata);
}
- return StatusWith<double>(minDistance);
+ return minDistance;
}
static R2Annulus geoNearDistanceBounds(const GeoNearExpression& query) {
@@ -565,13 +564,11 @@ static R2Annulus projectBoundsToTwoDDegrees(R2Annulus sphereBounds) {
outerDegrees + maxErrorDegrees);
}
-StatusWith<NearStage::CoveredInterval*> //
-GeoNear2DStage::nextInterval(OperationContext* opCtx,
- WorkingSet* workingSet,
- const Collection* collection) {
+std::unique_ptr<NearStage::CoveredInterval> GeoNear2DStage::nextInterval(
+ OperationContext* opCtx, WorkingSet* workingSet, const Collection* collection) {
// The search is finished if we searched at least once and all the way to the edge
if (_currBounds.getInner() >= 0 && _currBounds.getOuter() == _fullBounds.getOuter()) {
- return StatusWith<CoveredInterval*>(nullptr);
+ return nullptr;
}
//
@@ -710,11 +707,11 @@ GeoNear2DStage::nextInterval(OperationContext* opCtx,
_children.emplace_back(std::make_unique<FetchStageWithMatch>(
expCtx(), workingSet, std::move(scan), docMatcher, collection));
- return StatusWith<CoveredInterval*>(new CoveredInterval(
- _children.back().get(), nextBounds.getInner(), nextBounds.getOuter(), isLastInterval));
+ return std::make_unique<CoveredInterval>(
+ _children.back().get(), nextBounds.getInner(), nextBounds.getOuter(), isLastInterval);
}
-StatusWith<double> GeoNear2DStage::computeDistance(WorkingSetMember* member) {
+double GeoNear2DStage::computeDistance(WorkingSetMember* member) {
return computeGeoNearDistance(_nearParams, member);
}
@@ -959,13 +956,11 @@ PlanStage::StageState GeoNear2DSphereStage::initialize(OperationContext* opCtx,
return state;
}
-StatusWith<NearStage::CoveredInterval*> //
-GeoNear2DSphereStage::nextInterval(OperationContext* opCtx,
- WorkingSet* workingSet,
- const Collection* collection) {
+std::unique_ptr<NearStage::CoveredInterval> GeoNear2DSphereStage::nextInterval(
+ OperationContext* opCtx, WorkingSet* workingSet, const Collection* collection) {
// The search is finished if we searched at least once and all the way to the edge
if (_currBounds.getInner() >= 0 && _currBounds.getOuter() == _fullBounds.getOuter()) {
- return StatusWith<CoveredInterval*>(nullptr);
+ return nullptr;
}
//
@@ -1033,11 +1028,11 @@ GeoNear2DSphereStage::nextInterval(OperationContext* opCtx,
_children.emplace_back(std::make_unique<FetchStage>(
expCtx(), workingSet, std::move(scan), _nearParams.filter, collection));
- return StatusWith<CoveredInterval*>(new CoveredInterval(
- _children.back().get(), nextBounds.getInner(), nextBounds.getOuter(), isLastInterval));
+ return std::make_unique<CoveredInterval>(
+ _children.back().get(), nextBounds.getInner(), nextBounds.getOuter(), isLastInterval);
}
-StatusWith<double> GeoNear2DSphereStage::computeDistance(WorkingSetMember* member) {
+double GeoNear2DSphereStage::computeDistance(WorkingSetMember* member) {
return computeGeoNearDistance(_nearParams, member);
}
diff --git a/src/mongo/db/exec/geo_near.h b/src/mongo/db/exec/geo_near.h
index eb096064d53..dd3d33be97d 100644
--- a/src/mongo/db/exec/geo_near.h
+++ b/src/mongo/db/exec/geo_near.h
@@ -74,11 +74,11 @@ public:
const IndexDescriptor* twoDIndex);
protected:
- StatusWith<CoveredInterval*> nextInterval(OperationContext* opCtx,
- WorkingSet* workingSet,
- const Collection* collection) final;
+ std::unique_ptr<CoveredInterval> nextInterval(OperationContext* opCtx,
+ WorkingSet* workingSet,
+ const Collection* collection) final;
- StatusWith<double> computeDistance(WorkingSetMember* member) final;
+ double computeDistance(WorkingSetMember* member) final;
PlanStage::StageState initialize(OperationContext* opCtx,
WorkingSet* workingSet,
@@ -142,11 +142,11 @@ public:
~GeoNear2DSphereStage();
protected:
- StatusWith<CoveredInterval*> nextInterval(OperationContext* opCtx,
- WorkingSet* workingSet,
- const Collection* collection) final;
+ std::unique_ptr<CoveredInterval> nextInterval(OperationContext* opCtx,
+ WorkingSet* workingSet,
+ const Collection* collection) final;
- StatusWith<double> computeDistance(WorkingSetMember* member) final;
+ double computeDistance(WorkingSetMember* member) final;
PlanStage::StageState initialize(OperationContext* opCtx,
WorkingSet* workingSet,
diff --git a/src/mongo/db/exec/limit.cpp b/src/mongo/db/exec/limit.cpp
index 41505be622f..220d86d31be 100644
--- a/src/mongo/db/exec/limit.cpp
+++ b/src/mongo/db/exec/limit.cpp
@@ -70,11 +70,6 @@ PlanStage::StageState LimitStage::doWork(WorkingSetID* out) {
if (PlanStage::ADVANCED == status) {
*out = id;
--_numToReturn;
- } else if (PlanStage::FAILURE == status) {
- // The stage which produces a failure is responsible for allocating a working set member
- // with error details.
- invariant(WorkingSet::INVALID_ID != id);
- *out = id;
} else if (PlanStage::NEED_YIELD == status) {
*out = id;
}
diff --git a/src/mongo/db/exec/merge_sort.cpp b/src/mongo/db/exec/merge_sort.cpp
index 58a3e33f241..4af01272d05 100644
--- a/src/mongo/db/exec/merge_sort.cpp
+++ b/src/mongo/db/exec/merge_sort.cpp
@@ -130,12 +130,6 @@ PlanStage::StageState MergeSortStage::doWork(WorkingSetID* out) {
// anymore.
_noResultToMerge.pop();
return PlanStage::NEED_TIME;
- } else if (PlanStage::FAILURE == code) {
- // The stage which produces a failure is responsible for allocating a working set member
- // with error details.
- invariant(WorkingSet::INVALID_ID != id);
- *out = id;
- return code;
} else if (PlanStage::NEED_YIELD == code) {
*out = id;
return code;
diff --git a/src/mongo/db/exec/mock_stage.cpp b/src/mongo/db/exec/mock_stage.cpp
new file mode 100644
index 00000000000..a84c7995408
--- /dev/null
+++ b/src/mongo/db/exec/mock_stage.cpp
@@ -0,0 +1,72 @@
+/**
+ * Copyright (C) 2020-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/db/exec/mock_stage.h"
+
+#include "mongo/util/visit_helper.h"
+
+namespace mongo {
+
+MockStage::MockStage(ExpressionContext* expCtx, WorkingSet* ws)
+ : PlanStage(kStageType.rawData(), expCtx), _ws(ws) {}
+
+std::unique_ptr<PlanStageStats> MockStage::getStats() {
+ _commonStats.isEOF = isEOF();
+ std::unique_ptr<PlanStageStats> ret =
+ std::make_unique<PlanStageStats>(_commonStats, StageType::STAGE_MOCK);
+ ret->specific = std::make_unique<MockStats>(_specificStats);
+ return ret;
+}
+
+PlanStage::StageState MockStage::doWork(WorkingSetID* out) {
+ if (isEOF()) {
+ return PlanStage::IS_EOF;
+ }
+
+ auto nextResult = _results.front();
+ _results.pop();
+
+ auto returnState = stdx::visit(
+ visit_helper::Overloaded{
+ [](WorkingSetID wsid) -> PlanStage::StageState { return PlanStage::ADVANCED; },
+ [](PlanStage::StageState state) -> PlanStage::StageState { return state; },
+ [](Status status) -> PlanStage::StageState {
+ uassertStatusOK(status);
+ MONGO_UNREACHABLE;
+ }},
+ nextResult);
+ if (returnState == PlanStage::ADVANCED) {
+ *out = stdx::get<WorkingSetID>(nextResult);
+ }
+ return returnState;
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/exec/mock_stage.h b/src/mongo/db/exec/mock_stage.h
new file mode 100644
index 00000000000..02e75a52bd4
--- /dev/null
+++ b/src/mongo/db/exec/mock_stage.h
@@ -0,0 +1,109 @@
+/**
+ * Copyright (C) 2020-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#pragma once
+
+#include <queue>
+
+#include "mongo/db/exec/plan_stage.h"
+
+namespace mongo {
+
+/**
+ * A stage designed for use in unit tests. The test can queue a sequence of results which will be
+ * returned to the parent stage using the 'enqueue*()' methods.
+ */
+class MockStage final : public PlanStage {
+public:
+ static constexpr StringData kStageType = "MOCK"_sd;
+
+ MockStage(ExpressionContext* expCtx, WorkingSet* ws);
+
+ StageState doWork(WorkingSetID* out) final;
+
+ bool isEOF() final {
+ return _results.empty();
+ }
+
+ StageType stageType() const final {
+ return STAGE_MOCK;
+ }
+
+ std::unique_ptr<PlanStageStats> getStats() final;
+
+ const SpecificStats* getSpecificStats() const final {
+ return &_specificStats;
+ }
+
+ /**
+ * Adds a WorkingSetMember to the back of the queue.
+ *
+ * The caller is responsible for allocating 'id' and filling out the WSM keyed by 'id'
+ * appropriately.
+ *
+ * The QueuedDataStage takes ownership of 'id', so the caller should not call WorkingSet::free()
+ * on it.
+ */
+ void enqueueAdvanced(WorkingSetID wsid) {
+ _results.push(wsid);
+ }
+
+ /**
+ * Adds a StageState code such as 'NEED_TIME' or 'NEED_YIELD' to the back of the queue. Illegal
+ * to call with 'ADVANCED' -- 'enqueueAdvanced()' should be used instead. Also illegal to call
+ * with 'IS_EOF', since EOF is implied when the mock stage's queue is emptied.
+ */
+ void enqueueStateCode(StageState stageState) {
+ invariant(stageState != PlanStage::ADVANCED);
+ invariant(stageState != PlanStage::IS_EOF);
+ _results.push(stageState);
+ }
+
+ /**
+ * Adds 'status' to the queue. When the 'status' is dequeued, it will be thrown from 'work()' as
+ * an exception.
+ */
+ void enqueueError(Status status) {
+ invariant(!status.isOK());
+ _results.push(status);
+ }
+
+private:
+ // The mock stage holds a queue of objects of this type. Each element in the queue can either be
+ // a document to return, a StageState code, or a Status representing an error.
+ using MockResult = stdx::variant<WorkingSetID, PlanStage::StageState, Status>;
+
+ WorkingSet* _ws;
+
+ std::queue<MockResult> _results;
+
+ MockStats _specificStats;
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/exec/multi_plan.cpp b/src/mongo/db/exec/multi_plan.cpp
index 97c16a8a009..2c0a28b92bd 100644
--- a/src/mongo/db/exec/multi_plan.cpp
+++ b/src/mongo/db/exec/multi_plan.cpp
@@ -78,10 +78,7 @@ MultiPlanStage::MultiPlanStage(ExpressionContext* expCtx,
_cachingMode(cachingMode),
_query(cq),
_bestPlanIdx(kNoSuchPlan),
- _backupPlanIdx(kNoSuchPlan),
- _failure(false),
- _failureCount(0),
- _statusMemberId(WorkingSet::INVALID_ID) {}
+ _backupPlanIdx(kNoSuchPlan) {}
void MultiPlanStage::addPlan(std::unique_ptr<QuerySolution> solution,
std::unique_ptr<PlanStage> root,
@@ -96,10 +93,6 @@ void MultiPlanStage::addPlan(std::unique_ptr<QuerySolution> solution,
}
bool MultiPlanStage::isEOF() {
- if (_failure) {
- return true;
- }
-
// If _bestPlanIdx hasn't been found, can't be at EOF
if (!bestPlanChosen()) {
return false;
@@ -112,11 +105,6 @@ bool MultiPlanStage::isEOF() {
}
PlanStage::StageState MultiPlanStage::doWork(WorkingSetID* out) {
- if (_failure) {
- *out = _statusMemberId;
- return PlanStage::FAILURE;
- }
-
CandidatePlan& bestPlan = _candidates[_bestPlanIdx];
// Look for an already produced result that provides the data the caller wants.
@@ -128,26 +116,24 @@ PlanStage::StageState MultiPlanStage::doWork(WorkingSetID* out) {
// best plan had no (or has no more) cached results
- StageState state = bestPlan.root->work(out);
+ StageState state;
+ try {
+ state = bestPlan.root->work(out);
+ } catch (const ExceptionFor<ErrorCodes::QueryExceededMemoryLimitNoDiskUseAllowed>&) {
+ // The winning plan ran out of memory. If we have a backup plan with no blocking states,
+ // then switch to it.
+ if (!hasBackupPlan()) {
+ throw;
+ }
- if (PlanStage::FAILURE == state && hasBackupPlan()) {
- LOGV2_DEBUG(20588, 5, "Best plan errored out switching to backup");
- // Uncache the bad solution if we fall back
- // on the backup solution.
- //
- // XXX: Instead of uncaching we should find a way for the
- // cached plan runner to fall back on a different solution
- // if the best solution fails. Alternatively we could try to
- // defer cache insertion to be after the first produced result.
+ LOGV2_DEBUG(20588, 5, "Best plan errored, switching to backup plan");
- CollectionQueryInfo::get(collection())
- .getPlanCache()
- ->remove(*_query)
- .transitional_ignore();
+ // Attempt to remove the plan from the cache. This will fail if the plan has already been
+ // removed, and we intentionally ignore such errors.
+ CollectionQueryInfo::get(collection()).getPlanCache()->remove(*_query).ignore();
_bestPlanIdx = _backupPlanIdx;
_backupPlanIdx = kNoSuchPlan;
-
return _candidates[_bestPlanIdx].root->work(out);
}
@@ -159,24 +145,15 @@ PlanStage::StageState MultiPlanStage::doWork(WorkingSetID* out) {
return state;
}
-Status MultiPlanStage::tryYield(PlanYieldPolicy* yieldPolicy) {
+void MultiPlanStage::tryYield(PlanYieldPolicy* yieldPolicy) {
// These are the conditions which can cause us to yield:
// 1) The yield policy's timer elapsed, or
// 2) some stage requested a yield, or
// 3) we need to yield and retry due to a WriteConflictException.
// In all cases, the actual yielding happens here.
if (yieldPolicy->shouldYieldOrInterrupt()) {
- auto yieldStatus = yieldPolicy->yieldOrInterrupt();
-
- if (!yieldStatus.isOK()) {
- _failure = true;
- _statusMemberId =
- WorkingSetCommon::allocateStatusMember(_candidates[0].ws, yieldStatus);
- return yieldStatus;
- }
+ uassertStatusOK(yieldPolicy->yieldOrInterrupt());
}
-
- return Status::OK();
}
// static
@@ -229,15 +206,7 @@ Status MultiPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
}
}
} catch (DBException& e) {
- e.addContext("exception thrown while multiplanner was selecting best plan");
- throw;
- }
-
- if (_failure) {
- invariant(WorkingSet::INVALID_ID != _statusMemberId);
- WorkingSetMember* member = _candidates[0].ws->get(_statusMemberId);
- return WorkingSetCommon::getMemberStatus(*member).withContext(
- "multiplanner encountered a failure while selecting best plan");
+ return e.toStatus().withContext("error while multiplanner was selecting best plan");
}
// After picking best plan, ranking will own plan stats from
@@ -397,12 +366,28 @@ bool MultiPlanStage::workAllPlans(size_t numResults, PlanYieldPolicy* yieldPolic
}
// Might need to yield between calls to work due to the timer elapsing.
- if (!(tryYield(yieldPolicy)).isOK()) {
- return false;
- }
+ tryYield(yieldPolicy);
WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState state = candidate.root->work(&id);
+ PlanStage::StageState state;
+ try {
+ state = candidate.root->work(&id);
+ } catch (const ExceptionFor<ErrorCodes::QueryExceededMemoryLimitNoDiskUseAllowed>&) {
+ // If a candidate fails due to exceeding allowed resource consumption, then mark the
+ // candidate as failed but proceed with the multi-plan trial period. The MultiPlanStage
+ // as a whole only fails if _all_ candidates hit their resource consumption limit, or if
+ // a different, query-fatal error code is thrown.
+ candidate.failed = true;
+ ++_failureCount;
+
+ // If all children have failed, then rethrow. Otherwise, swallow the error and move onto
+ // the next candidate plan.
+ if (_failureCount == _candidates.size()) {
+ throw;
+ }
+
+ continue;
+ }
if (PlanStage::ADVANCED == state) {
// Save result for later.
@@ -430,26 +415,7 @@ bool MultiPlanStage::workAllPlans(size_t numResults, PlanYieldPolicy* yieldPolic
yieldPolicy->forceYield();
}
- if (!(tryYield(yieldPolicy)).isOK()) {
- return false;
- }
- } else if (PlanStage::NEED_TIME != state) {
- // On FAILURE, mark this candidate as failed, but keep executing the other
- // candidates. The MultiPlanStage as a whole only fails when every candidate
- // plan fails.
-
- candidate.failed = true;
- ++_failureCount;
-
- // Propagate most recent seen failure to parent.
- invariant(state == PlanStage::FAILURE);
- _statusMemberId = id;
-
-
- if (_failureCount == _candidates.size()) {
- _failure = true;
- return false;
- }
+ tryYield(yieldPolicy);
}
}
diff --git a/src/mongo/db/exec/multi_plan.h b/src/mongo/db/exec/multi_plan.h
index 1c33885d245..4b9b64bf863 100644
--- a/src/mongo/db/exec/multi_plan.h
+++ b/src/mongo/db/exec/multi_plan.h
@@ -178,9 +178,9 @@ private:
* Checks whether we need to perform either a timing-based yield or a yield for a document
* fetch. If so, then uses 'yieldPolicy' to actually perform the yield.
*
- * Returns a non-OK status if killed during a yield or if the query has exceeded its time limit.
+ * Throws an exception if yield recovery fails.
*/
- Status tryYield(PlanYieldPolicy* yieldPolicy);
+ void tryYield(PlanYieldPolicy* yieldPolicy);
static const int kNoSuchPlan = -1;
@@ -205,25 +205,14 @@ private:
// uses -1 / kNoSuchPlan when best plan is not (yet) known
int _backupPlanIdx;
- // Set if this MultiPlanStage cannot continue, and the query must fail. This can happen in
- // two ways. The first is that all candidate plans fail. Note that one plan can fail
- // during normal execution of the plan competition. Here is an example:
+ // Count of the number of candidate plans that have failed during the trial period. The
+ // multi-planner swallows resource exhaustion errors (QueryExceededMemoryLimitNoDiskUseAllowed).
+ // This means that if one candidate involves a blocking sort, and the other does not, the entire
+ // query will not fail if the blocking sort hits the limit on its allowed memory footprint.
//
- // Plan 1: collection scan with sort. Sort runs out of memory.
- // Plan 2: ixscan that provides sort. Won't run out of memory.
- //
- // We want to choose plan 2 even if plan 1 fails.
- //
- // The second way for failure to occur is that the execution of this query is killed during
- // a yield, by some concurrent event such as a collection drop.
- bool _failure;
-
- // If everything fails during the plan competition, we can't pick one.
- size_t _failureCount;
-
- // if pickBestPlan fails, this is set to the wsid of the statusMember
- // returned by ::work()
- WorkingSetID _statusMemberId;
+ // Arbitrary error codes are not swallowed by the multi-planner, since it is not know whether it
+ // is safe for the query to continue executing.
+ size_t _failureCount = 0u;
// Stats
MultiPlanStats _specificStats;
diff --git a/src/mongo/db/exec/near.cpp b/src/mongo/db/exec/near.cpp
index 30bbb894881..f5bd5a7c157 100644
--- a/src/mongo/db/exec/near.cpp
+++ b/src/mongo/db/exec/near.cpp
@@ -81,7 +81,6 @@ PlanStage::StageState NearStage::initNext(WorkingSetID* out) {
PlanStage::StageState NearStage::doWork(WorkingSetID* out) {
WorkingSetID toReturn = WorkingSet::INVALID_ID;
- Status error = Status::OK();
PlanStage::StageState nextState = PlanStage::NEED_TIME;
//
@@ -91,7 +90,7 @@ PlanStage::StageState NearStage::doWork(WorkingSetID* out) {
if (SearchState_Initializing == _searchState) {
nextState = initNext(&toReturn);
} else if (SearchState_Buffering == _searchState) {
- nextState = bufferNext(&toReturn, &error);
+ nextState = bufferNext(&toReturn);
} else if (SearchState_Advancing == _searchState) {
nextState = advanceNext(&toReturn);
} else {
@@ -103,9 +102,7 @@ PlanStage::StageState NearStage::doWork(WorkingSetID* out) {
// Handle the results
//
- if (PlanStage::FAILURE == nextState) {
- *out = WorkingSetCommon::allocateStatusMember(_workingSet, error);
- } else if (PlanStage::ADVANCED == nextState) {
+ if (PlanStage::ADVANCED == nextState) {
*out = toReturn;
} else if (PlanStage::NEED_YIELD == nextState) {
*out = toReturn;
@@ -132,28 +129,20 @@ struct NearStage::SearchResult {
};
// Set "toReturn" when NEED_YIELD.
-PlanStage::StageState NearStage::bufferNext(WorkingSetID* toReturn, Status* error) {
+PlanStage::StageState NearStage::bufferNext(WorkingSetID* toReturn) {
//
// Try to retrieve the next covered member
//
if (!_nextInterval) {
- StatusWith<CoveredInterval*> intervalStatus =
- nextInterval(opCtx(), _workingSet, collection());
- if (!intervalStatus.isOK()) {
- _searchState = SearchState_Finished;
- *error = intervalStatus.getStatus();
- return PlanStage::FAILURE;
- }
-
- if (nullptr == intervalStatus.getValue()) {
+ auto interval = nextInterval(opCtx(), _workingSet, collection());
+ if (!interval) {
_searchState = SearchState_Finished;
return PlanStage::IS_EOF;
}
// CoveredInterval and its child stage are owned by _childrenIntervals
- _childrenIntervals.push_back(
- std::unique_ptr<NearStage::CoveredInterval>{intervalStatus.getValue()});
+ _childrenIntervals.push_back(std::move(interval));
_nextInterval = _childrenIntervals.back().get();
_specificStats.intervalStats.emplace_back();
_nextIntervalStats = &_specificStats.intervalStats.back();
@@ -168,9 +157,6 @@ PlanStage::StageState NearStage::bufferNext(WorkingSetID* toReturn, Status* erro
if (PlanStage::IS_EOF == intervalState) {
_searchState = SearchState_Advancing;
return PlanStage::NEED_TIME;
- } else if (PlanStage::FAILURE == intervalState) {
- *error = WorkingSetCommon::getMemberStatus(*_workingSet->get(nextMemberID));
- return intervalState;
} else if (PlanStage::NEED_YIELD == intervalState) {
*toReturn = nextMemberID;
return intervalState;
@@ -194,17 +180,9 @@ PlanStage::StageState NearStage::bufferNext(WorkingSetID* toReturn, Status* erro
++_nextIntervalStats->numResultsBuffered;
- StatusWith<double> distanceStatus = computeDistance(nextMember);
-
- if (!distanceStatus.isOK()) {
- _searchState = SearchState_Finished;
- *error = distanceStatus.getStatus();
- return PlanStage::FAILURE;
- }
-
// If the member's distance is in the current distance interval, add it to our buffered
// results.
- double memberDistance = distanceStatus.getValue();
+ auto memberDistance = computeDistance(nextMember);
// Ensure that the BSONObj underlying the WorkingSetMember is owned in case we yield.
nextMember->makeObjOwnedIfNeeded();
diff --git a/src/mongo/db/exec/near.h b/src/mongo/db/exec/near.h
index 8f55c777494..bbbbee686e3 100644
--- a/src/mongo/db/exec/near.h
+++ b/src/mongo/db/exec/near.h
@@ -115,23 +115,19 @@ protected:
//
/**
- * Constructs the next covering over the next interval to buffer results from, or NULL
- * if the full range has been searched. Use the provided working set as the working
- * set for the covering stage if required.
- *
- * Returns !OK on failure to create next stage.
+ * Constructs the next covering over the next interval to buffer results from, or nullptr if the
+ * full range has been searched. Use the provided working set as the working set for the
+ * covering stage if required.
*/
- virtual StatusWith<CoveredInterval*> nextInterval(OperationContext* opCtx,
- WorkingSet* workingSet,
- const Collection* collection) = 0;
+ virtual std::unique_ptr<CoveredInterval> nextInterval(OperationContext* opCtx,
+ WorkingSet* workingSet,
+ const Collection* collection) = 0;
/**
* Computes the distance value for the given member data, or -1 if the member should not be
* returned in the sorted results.
- *
- * Returns !OK on invalid member data.
*/
- virtual StatusWith<double> computeDistance(WorkingSetMember* member) = 0;
+ virtual double computeDistance(WorkingSetMember* member) = 0;
/*
* Initialize near stage before buffering the data.
@@ -157,7 +153,7 @@ private:
//
StageState initNext(WorkingSetID* out);
- StageState bufferNext(WorkingSetID* toReturn, Status* error);
+ StageState bufferNext(WorkingSetID* toReturn);
StageState advanceNext(WorkingSetID* toReturn);
//
diff --git a/src/mongo/db/exec/or.cpp b/src/mongo/db/exec/or.cpp
index c50a4981e94..ec0d680ac37 100644
--- a/src/mongo/db/exec/or.cpp
+++ b/src/mongo/db/exec/or.cpp
@@ -110,12 +110,6 @@ PlanStage::StageState OrStage::doWork(WorkingSetID* out) {
} else {
return PlanStage::NEED_TIME;
}
- } else if (PlanStage::FAILURE == childStatus) {
- // The stage which produces a failure is responsible for allocating a working set member
- // with error details.
- invariant(WorkingSet::INVALID_ID != id);
- *out = id;
- return childStatus;
} else if (PlanStage::NEED_YIELD == childStatus) {
*out = id;
}
diff --git a/src/mongo/db/exec/pipeline_proxy.cpp b/src/mongo/db/exec/pipeline_proxy.cpp
index bdfaaa746b8..c3014ce34a5 100644
--- a/src/mongo/db/exec/pipeline_proxy.cpp
+++ b/src/mongo/db/exec/pipeline_proxy.cpp
@@ -65,9 +65,7 @@ PipelineProxyStage::PipelineProxyStage(ExpressionContext* expCtx,
}
PlanStage::StageState PipelineProxyStage::doWork(WorkingSetID* out) {
- if (!out) {
- return PlanStage::FAILURE;
- }
+ invariant(out);
if (!_stash.empty()) {
*out = _ws->allocate();
diff --git a/src/mongo/db/exec/plan_stage.h b/src/mongo/db/exec/plan_stage.h
index 652bc62d051..a601ddb4f1c 100644
--- a/src/mongo/db/exec/plan_stage.h
+++ b/src/mongo/db/exec/plan_stage.h
@@ -74,6 +74,10 @@ class RecordId;
* saveState() if any underlying database state changes. If saveState() is called,
* restoreState() must be called again before any work() is done.
*
+ * If an error occurs at runtime (e.g. we reach resource limits for the request), then work() throws
+ * an exception. At this point, statistics may be extracted from the execution plan, but the
+ * execution tree is otherwise unusable and the plan must be discarded.
+ *
* Here is a very simple usage example:
*
* WorkingSet workingSet;
@@ -92,9 +96,6 @@ class RecordId;
* case PlanStage::NEED_TIME:
* // Need more time.
* break;
- * case PlanStage::FAILURE:
- * // Throw exception or return error
- * break;
* }
*
* if (shouldYield) {
@@ -170,28 +171,20 @@ public:
// wants fetched. On the next call to work() that stage can assume a fetch was performed
// on the WSM that the held WSID refers to.
NEED_YIELD,
-
- // Something has gone unrecoverably wrong. Stop running this query.
- // If the out parameter does not refer to an invalid working set member,
- // call WorkingSetCommon::getStatusMemberObject() to get details on the failure.
- // Any class implementing this interface must set the WSID out parameter to
- // INVALID_ID or a valid WSM ID if FAILURE is returned.
- FAILURE,
};
static std::string stateStr(const StageState& state) {
- if (ADVANCED == state) {
- return "ADVANCED";
- } else if (IS_EOF == state) {
- return "IS_EOF";
- } else if (NEED_TIME == state) {
- return "NEED_TIME";
- } else if (NEED_YIELD == state) {
- return "NEED_YIELD";
- } else {
- verify(FAILURE == state);
- return "FAILURE";
+ switch (state) {
+ case PlanStage::ADVANCED:
+ return "ADVANCED";
+ case PlanStage::IS_EOF:
+ return "IS_EOF";
+ case PlanStage::NEED_TIME:
+ return "NEED_TIME";
+ case PlanStage::NEED_YIELD:
+ return "NEED_YIELD";
}
+ MONGO_UNREACHABLE;
}
@@ -199,13 +192,21 @@ public:
* Perform a unit of work on the query. Ask the stage to produce the next unit of output.
* Stage returns StageState::ADVANCED if *out is set to the next unit of output. Otherwise,
* returns another value of StageState to indicate the stage's status.
+ *
+ * Throws an exception if an error is encountered while executing the query.
*/
StageState work(WorkingSetID* out) {
auto optTimer(getOptTimer());
++_commonStats.works;
- StageState workResult = doWork(out);
+ StageState workResult;
+ try {
+ workResult = doWork(out);
+ } catch (...) {
+ _commonStats.failed = true;
+ throw;
+ }
if (StageState::ADVANCED == workResult) {
++_commonStats.advanced;
@@ -213,8 +214,6 @@ public:
++_commonStats.needTime;
} else if (StageState::NEED_YIELD == workResult) {
++_commonStats.needYield;
- } else if (StageState::FAILURE == workResult) {
- _commonStats.failed = true;
}
return workResult;
diff --git a/src/mongo/db/exec/projection.cpp b/src/mongo/db/exec/projection.cpp
index 45abe1ec4ce..4b205739032 100644
--- a/src/mongo/db/exec/projection.cpp
+++ b/src/mongo/db/exec/projection.cpp
@@ -139,20 +139,7 @@ PlanStage::StageState ProjectionStage::doWork(WorkingSetID* out) {
if (PlanStage::ADVANCED == status) {
WorkingSetMember* member = _ws.get(id);
// Punt to our specific projection impl.
- Status projStatus = transform(member);
- if (!projStatus.isOK()) {
- LOGV2_WARNING(23827,
- "Couldn't execute projection, status = {projStatus}",
- "projStatus"_attr = redact(projStatus));
- *out = WorkingSetCommon::allocateStatusMember(&_ws, projStatus);
- return PlanStage::FAILURE;
- }
-
- *out = id;
- } else if (PlanStage::FAILURE == status) {
- // The stage which produces a failure is responsible for allocating a working set member
- // with error details.
- invariant(WorkingSet::INVALID_ID != id);
+ transform(member);
*out = id;
} else if (PlanStage::NEED_YIELD == status) {
*out = id;
@@ -184,7 +171,7 @@ ProjectionStageDefault::ProjectionStageDefault(boost::intrusive_ptr<ExpressionCo
_executor{projection_executor::buildProjectionExecutor(
expCtx, projection, {}, projection_executor::kDefaultBuilderParams)} {}
-Status ProjectionStageDefault::transform(WorkingSetMember* member) const {
+void ProjectionStageDefault::transform(WorkingSetMember* member) const {
Document input;
// Most metadata should have already been stored within the WSM when we project out a document.
@@ -226,8 +213,6 @@ Status ProjectionStageDefault::transform(WorkingSetMember* member) const {
// constructed from the input one backed by BSON which is owned by the storage system, so we
// need to make sure we transition an owned document.
transitionMemberToOwnedObj(projected.getOwned(), member);
-
- return Status::OK();
}
ProjectionStageCovered::ProjectionStageCovered(ExpressionContext* expCtx,
@@ -265,7 +250,7 @@ ProjectionStageCovered::ProjectionStageCovered(ExpressionContext* expCtx,
}
}
-Status ProjectionStageCovered::transform(WorkingSetMember* member) const {
+void ProjectionStageCovered::transform(WorkingSetMember* member) const {
BSONObjBuilder bob;
// We're pulling data out of the key.
@@ -284,7 +269,6 @@ Status ProjectionStageCovered::transform(WorkingSetMember* member) const {
++keyIndex;
}
transitionMemberToOwnedObj(bob.obj(), member);
- return Status::OK();
}
ProjectionStageSimple::ProjectionStageSimple(ExpressionContext* expCtx,
@@ -298,7 +282,7 @@ ProjectionStageSimple::ProjectionStageSimple(ExpressionContext* expCtx,
projection->getRequiredFields().end()};
}
-Status ProjectionStageSimple::transform(WorkingSetMember* member) const {
+void ProjectionStageSimple::transform(WorkingSetMember* member) const {
BSONObjBuilder bob;
// SIMPLE_DOC implies that we expect an object so it's kind of redundant.
// If we got here because of SIMPLE_DOC the planner shouldn't have messed up.
@@ -320,7 +304,6 @@ Status ProjectionStageSimple::transform(WorkingSetMember* member) const {
}
transitionMemberToOwnedObj(bob.obj(), member);
- return Status::OK();
}
} // namespace mongo
diff --git a/src/mongo/db/exec/projection.h b/src/mongo/db/exec/projection.h
index 5a5d525cc0a..00e7fb33dbc 100644
--- a/src/mongo/db/exec/projection.h
+++ b/src/mongo/db/exec/projection.h
@@ -71,7 +71,7 @@ private:
* Runs either the default complete implementation or a fast path depending on how this was
* constructed.
*/
- virtual Status transform(WorkingSetMember* member) const = 0;
+ virtual void transform(WorkingSetMember* member) const = 0;
// Used to retrieve a WorkingSetMember as part of 'doWork()'.
WorkingSet& _ws;
@@ -99,7 +99,7 @@ public:
}
private:
- Status transform(WorkingSetMember* member) const final;
+ void transform(WorkingSetMember* member) const final;
// Represents all metadata used in the projection.
const QueryMetadataBitSet _requestedMetadata;
@@ -129,7 +129,7 @@ public:
}
private:
- Status transform(WorkingSetMember* member) const final;
+ void transform(WorkingSetMember* member) const final;
// Field names present in the simple projection.
FieldSet _includedFields;
@@ -167,7 +167,7 @@ public:
}
private:
- Status transform(WorkingSetMember* member) const final;
+ void transform(WorkingSetMember* member) const final;
// Has the field names present in the simple projection.
stdx::unordered_set<std::string> _includedFields;
diff --git a/src/mongo/db/exec/queued_data_stage.cpp b/src/mongo/db/exec/queued_data_stage.cpp
index c5f6339dfaa..8ecb541d627 100644
--- a/src/mongo/db/exec/queued_data_stage.cpp
+++ b/src/mongo/db/exec/queued_data_stage.cpp
@@ -49,29 +49,13 @@ PlanStage::StageState QueuedDataStage::doWork(WorkingSetID* out) {
return PlanStage::IS_EOF;
}
- StageState state = _results.front();
- _results.pop();
-
- switch (state) {
- case PlanStage::ADVANCED:
- *out = _members.front();
- _members.pop();
- break;
- case PlanStage::FAILURE:
- // On FAILURE, this stage is reponsible for allocating the WorkingSetMember with
- // the error details.
- *out = WorkingSetCommon::allocateStatusMember(
- _ws, Status(ErrorCodes::InternalError, "Queued data stage failure"));
- break;
- default:
- break;
- }
-
- return state;
+ *out = _members.front();
+ _members.pop();
+ return PlanStage::ADVANCED;
}
bool QueuedDataStage::isEOF() {
- return _results.empty();
+ return _members.empty();
}
unique_ptr<PlanStageStats> QueuedDataStage::getStats() {
@@ -87,15 +71,7 @@ const SpecificStats* QueuedDataStage::getSpecificStats() const {
return &_specificStats;
}
-void QueuedDataStage::pushBack(const PlanStage::StageState state) {
- invariant(PlanStage::ADVANCED != state);
- _results.push(state);
-}
-
void QueuedDataStage::pushBack(const WorkingSetID& id) {
- _results.push(PlanStage::ADVANCED);
-
- // member lives in _ws. We'll return it when _results hits ADVANCED.
_members.push(id);
}
diff --git a/src/mongo/db/exec/queued_data_stage.h b/src/mongo/db/exec/queued_data_stage.h
index b952062803e..bc13988cef0 100644
--- a/src/mongo/db/exec/queued_data_stage.h
+++ b/src/mongo/db/exec/queued_data_stage.h
@@ -69,17 +69,6 @@ public:
/**
* Add a result to the back of the queue.
*
- * Note: do not add PlanStage::ADVANCED with this method, ADVANCED can
- * only be added with a data member.
- *
- * Work() goes through the queue.
- * Either no data is returned (just a state), or...
- */
- void pushBack(const PlanStage::StageState state);
-
- /**
- * ...data is returned (and we ADVANCED)
- *
* The caller is responsible for allocating 'id' and filling out the WSM keyed by 'id'
* appropriately.
*
@@ -95,7 +84,6 @@ private:
WorkingSet* _ws;
// The data we return.
- std::queue<PlanStage::StageState> _results;
std::queue<WorkingSetID> _members;
// Stats
diff --git a/src/mongo/db/exec/queued_data_stage_test.cpp b/src/mongo/db/exec/queued_data_stage_test.cpp
index 46ef8d371e2..264e57b387d 100644
--- a/src/mongo/db/exec/queued_data_stage_test.cpp
+++ b/src/mongo/db/exec/queued_data_stage_test.cpp
@@ -85,7 +85,7 @@ TEST_F(QueuedDataStageTest, getValidStats) {
//
// Test that our stats are updated as we perform operations.
//
-TEST_F(QueuedDataStageTest, validateStats) {
+TEST_F(QueuedDataStageTest, ValidateStats) {
WorkingSet ws;
WorkingSetID wsID;
auto expCtx = make_intrusive<ExpressionContext>(opCtx(), nullptr, kNss);
@@ -100,18 +100,11 @@ TEST_F(QueuedDataStageTest, validateStats) {
ASSERT_EQUALS(stats->advanced, 0U);
ASSERT_FALSE(stats->isEOF);
- // 'perform' some operations, validate stats
- // needTime
- mock->pushBack(PlanStage::NEED_TIME);
- mock->work(&wsID);
- ASSERT_EQUALS(stats->works, 1U);
- ASSERT_EQUALS(stats->needTime, 1U);
-
// advanced, with pushed data
WorkingSetID id = ws.allocate();
mock->pushBack(id);
mock->work(&wsID);
- ASSERT_EQUALS(stats->works, 2U);
+ ASSERT_EQUALS(stats->works, 1U);
ASSERT_EQUALS(stats->advanced, 1U);
// yields
diff --git a/src/mongo/db/exec/return_key.cpp b/src/mongo/db/exec/return_key.cpp
index 8691f336b0e..ed8c0a4d3b2 100644
--- a/src/mongo/db/exec/return_key.cpp
+++ b/src/mongo/db/exec/return_key.cpp
@@ -45,22 +45,7 @@ PlanStage::StageState ReturnKeyStage::doWork(WorkingSetID* out) {
if (PlanStage::ADVANCED == status) {
WorkingSetMember* member = _ws.get(id);
- Status indexKeyStatus = _extractIndexKey(member);
-
- if (!indexKeyStatus.isOK()) {
- LOGV2_WARNING(4615602,
- "Couldn't execute {stage}, status = {indexKeyStatus}",
- "stage"_attr = kStageName,
- "indexKeyStatus"_attr = redact(indexKeyStatus));
- *out = WorkingSetCommon::allocateStatusMember(&_ws, indexKeyStatus);
- return PlanStage::FAILURE;
- }
-
- *out = id;
- } else if (PlanStage::FAILURE == status) {
- // The stage which produces a failure is responsible for allocating a working set member
- // with error details.
- invariant(WorkingSet::INVALID_ID != id);
+ _extractIndexKey(member);
*out = id;
} else if (PlanStage::NEED_YIELD == status) {
*out = id;
@@ -78,7 +63,7 @@ std::unique_ptr<PlanStageStats> ReturnKeyStage::getStats() {
return ret;
}
-Status ReturnKeyStage::_extractIndexKey(WorkingSetMember* member) {
+void ReturnKeyStage::_extractIndexKey(WorkingSetMember* member) {
if (!_sortKeyMetaFields.empty()) {
invariant(member->metadata().hasSortKey());
}
@@ -107,7 +92,5 @@ Status ReturnKeyStage::_extractIndexKey(WorkingSetMember* member) {
member->recordId = {};
member->doc = {{}, md.freeze()};
member->transitionToOwnedObj();
-
- return Status::OK();
}
} // namespace mongo
diff --git a/src/mongo/db/exec/return_key.h b/src/mongo/db/exec/return_key.h
index b33a9cd0c31..2e6db5482c0 100644
--- a/src/mongo/db/exec/return_key.h
+++ b/src/mongo/db/exec/return_key.h
@@ -71,7 +71,7 @@ public:
}
private:
- Status _extractIndexKey(WorkingSetMember* member);
+ void _extractIndexKey(WorkingSetMember* member);
WorkingSet& _ws;
ReturnKeyStats _specificStats;
diff --git a/src/mongo/db/exec/skip.cpp b/src/mongo/db/exec/skip.cpp
index 94bb81153e4..d3d0fc48afd 100644
--- a/src/mongo/db/exec/skip.cpp
+++ b/src/mongo/db/exec/skip.cpp
@@ -72,12 +72,6 @@ PlanStage::StageState SkipStage::doWork(WorkingSetID* out) {
*out = id;
return PlanStage::ADVANCED;
- } else if (PlanStage::FAILURE == status) {
- // The stage which produces a failure is responsible for allocating a working set member
- // with error details.
- invariant(WorkingSet::INVALID_ID != id);
- *out = id;
- return status;
} else if (PlanStage::NEED_YIELD == status) {
*out = id;
}
diff --git a/src/mongo/db/exec/sort.cpp b/src/mongo/db/exec/sort.cpp
index 6b03db5b26f..aee2c1fa348 100644
--- a/src/mongo/db/exec/sort.cpp
+++ b/src/mongo/db/exec/sort.cpp
@@ -59,28 +59,13 @@ PlanStage::StageState SortStage::doWork(WorkingSetID* out) {
if (code == PlanStage::ADVANCED) {
// The plan must be structured such that a previous stage has attached the sort key
// metadata.
- try {
- spool(id);
- } catch (const AssertionException&) {
- // Propagate runtime errors using the FAILED status code.
- *out = WorkingSetCommon::allocateStatusMember(_ws, exceptionToStatus());
- return PlanStage::FAILURE;
- }
-
+ spool(id);
return PlanStage::NEED_TIME;
} else if (code == PlanStage::IS_EOF) {
// The child has returned all of its results. Record this fact so that subsequent calls
// to 'doWork()' will perform sorting and unspool the sorted results.
_populated = true;
-
- try {
- loadingDone();
- } catch (const AssertionException&) {
- // Propagate runtime errors using the FAILED status code.
- *out = WorkingSetCommon::allocateStatusMember(_ws, exceptionToStatus());
- return PlanStage::FAILURE;
- }
-
+ loadingDone();
return PlanStage::NEED_TIME;
} else {
*out = id;
diff --git a/src/mongo/db/exec/sort_key_generator.cpp b/src/mongo/db/exec/sort_key_generator.cpp
index e1ea1e9847c..b62a0c93d9e 100644
--- a/src/mongo/db/exec/sort_key_generator.cpp
+++ b/src/mongo/db/exec/sort_key_generator.cpp
@@ -67,16 +67,10 @@ PlanStage::StageState SortKeyGeneratorStage::doWork(WorkingSetID* out) {
if (stageState == PlanStage::ADVANCED) {
WorkingSetMember* member = _ws->get(*out);
- try {
- auto sortKey = _sortKeyGen.computeSortKey(*member);
-
- // Add the sort key to the WSM as metadata.
- member->metadata().setSortKey(std::move(sortKey), _sortKeyGen.isSingleElementKey());
- } catch (const DBException& computeSortKeyException) {
- *out = WorkingSetCommon::allocateStatusMember(_ws, computeSortKeyException.toStatus());
- return PlanStage::FAILURE;
- }
+ auto sortKey = _sortKeyGen.computeSortKey(*member);
+ // Add the sort key to the WSM as metadata.
+ member->metadata().setSortKey(std::move(sortKey), _sortKeyGen.isSingleElementKey());
return PlanStage::ADVANCED;
}
diff --git a/src/mongo/db/exec/stagedebug_cmd.cpp b/src/mongo/db/exec/stagedebug_cmd.cpp
index 8f4b837f5c0..6d1ffe1ea11 100644
--- a/src/mongo/db/exec/stagedebug_cmd.cpp
+++ b/src/mongo/db/exec/stagedebug_cmd.cpp
@@ -196,18 +196,6 @@ public:
}
resultBuilder.done();
-
- if (PlanExecutor::FAILURE == state) {
- LOGV2_ERROR(23795,
- "Plan executor error during StageDebug command: FAILURE, stats: "
- "{Explain_getWinningPlanStats_exec_get}",
- "Explain_getWinningPlanStats_exec_get"_attr =
- redact(Explain::getWinningPlanStats(exec.get())));
-
- uassertStatusOK(WorkingSetCommon::getMemberObjectStatus(obj).withContext(
- "Executor error during StageDebug command"));
- }
-
return true;
}
diff --git a/src/mongo/db/exec/text_match.cpp b/src/mongo/db/exec/text_match.cpp
index c0d15c0e2fb..67b5ccf762d 100644
--- a/src/mongo/db/exec/text_match.cpp
+++ b/src/mongo/db/exec/text_match.cpp
@@ -94,16 +94,6 @@ PlanStage::StageState TextMatchStage::doWork(WorkingSetID* out) {
++_specificStats.docsRejected;
stageState = PlanStage::NEED_TIME;
}
- } else if (stageState == PlanStage::FAILURE) {
- // If a stage fails, it may create a status WSM to indicate why it
- // failed, in which case '*out' is valid. If ID is invalid, we
- // create our own error message.
- if (WorkingSet::INVALID_ID == *out) {
- str::stream ss;
- ss << "TEXT_MATCH stage failed to read in results from child";
- Status status(ErrorCodes::InternalError, ss);
- *out = WorkingSetCommon::allocateStatusMember(_ws, status);
- }
}
return stageState;
diff --git a/src/mongo/db/exec/text_or.cpp b/src/mongo/db/exec/text_or.cpp
index 4f92025575a..75e90e278cc 100644
--- a/src/mongo/db/exec/text_or.cpp
+++ b/src/mongo/db/exec/text_or.cpp
@@ -197,19 +197,6 @@ PlanStage::StageState TextOrStage::readFromChildren(WorkingSetID* out) {
_internalState = State::kReturningResults;
return PlanStage::NEED_TIME;
- } else if (PlanStage::FAILURE == childState) {
- // If a stage fails, it may create a status WSM to indicate why it
- // failed, in which case 'id' is valid. If ID is invalid, we
- // create our own error message.
- if (WorkingSet::INVALID_ID == id) {
- str::stream ss;
- ss << "TEXT_OR stage failed to read in results from child";
- Status status(ErrorCodes::InternalError, ss);
- *out = WorkingSetCommon::allocateStatusMember(_ws, status);
- } else {
- *out = id;
- }
- return PlanStage::FAILURE;
} else {
// Propagate WSID from below.
*out = id;
diff --git a/src/mongo/db/exec/trial_stage.cpp b/src/mongo/db/exec/trial_stage.cpp
index d10632c4427..0b7ef73d4bb 100644
--- a/src/mongo/db/exec/trial_stage.cpp
+++ b/src/mongo/db/exec/trial_stage.cpp
@@ -141,18 +141,6 @@ PlanStage::StageState TrialStage::_workTrialPlan(WorkingSetID* out) {
_specificStats.trialCompleted = _specificStats.trialSucceeded = true;
_replaceCurrentPlan(_queuedData);
return NEED_TIME;
- case PlanStage::FAILURE:
- // Either of these cause us to immediately end the trial phase and switch to the backup.
- auto statusDoc = WorkingSetCommon::getStatusMemberDocument(*_ws, *out);
- BSONObj statusObj = statusDoc ? statusDoc->toBson() : BSONObj();
- LOGV2_DEBUG(20604,
- 1,
- "Trial plan failed; switching to backup plan. Status: {statusObj}",
- "statusObj"_attr = redact(statusObj));
- _specificStats.trialCompleted = true;
- _replaceCurrentPlan(_backupPlan);
- *out = WorkingSet::INVALID_ID;
- return NEED_TIME;
}
MONGO_UNREACHABLE;
diff --git a/src/mongo/db/exec/update_stage.cpp b/src/mongo/db/exec/update_stage.cpp
index 3a5b4399c86..f7ab7e84da6 100644
--- a/src/mongo/db/exec/update_stage.cpp
+++ b/src/mongo/db/exec/update_stage.cpp
@@ -587,17 +587,6 @@ PlanStage::StageState UpdateStage::doWork(WorkingSetID* out) {
} else if (PlanStage::IS_EOF == status) {
// The child is out of results, and therefore so are we.
return PlanStage::IS_EOF;
- } else if (PlanStage::FAILURE == status) {
- *out = id;
- // If a stage fails, it may create a status WSM to indicate why it failed, in which case
- // 'id' is valid. If ID is invalid, we create our own error message.
- if (WorkingSet::INVALID_ID == id) {
- const std::string errmsg = "update stage failed to read in results from child";
- *out = WorkingSetCommon::allocateStatusMember(
- _ws, Status(ErrorCodes::InternalError, errmsg));
- return PlanStage::FAILURE;
- }
- return status;
} else if (PlanStage::NEED_YIELD == status) {
*out = id;
}
diff --git a/src/mongo/db/exec/working_set_common.cpp b/src/mongo/db/exec/working_set_common.cpp
index 4025a1e6627..0a6d99724e3 100644
--- a/src/mongo/db/exec/working_set_common.cpp
+++ b/src/mongo/db/exec/working_set_common.cpp
@@ -162,77 +162,4 @@ bool WorkingSetCommon::fetch(OperationContext* opCtx,
return true;
}
-Document WorkingSetCommon::buildMemberStatusObject(const Status& status) {
- BSONObjBuilder bob;
- bob.append("ok", status.isOK() ? 1.0 : 0.0);
- bob.append("code", status.code());
- bob.append("errmsg", status.reason());
- if (auto extraInfo = status.extraInfo()) {
- extraInfo->serialize(&bob);
- }
-
- return Document{bob.obj()};
-}
-
-WorkingSetID WorkingSetCommon::allocateStatusMember(WorkingSet* ws, const Status& status) {
- invariant(ws);
-
- WorkingSetID wsid = ws->allocate();
- WorkingSetMember* member = ws->get(wsid);
- member->doc = {SnapshotId(), buildMemberStatusObject(status)};
- member->transitionToOwnedObj();
-
- return wsid;
-}
-
-bool WorkingSetCommon::isValidStatusMemberObject(const Document& obj) {
- return !obj["ok"].missing() && obj["code"].getType() == BSONType::NumberInt &&
- obj["errmsg"].getType() == BSONType::String;
-}
-
-bool WorkingSetCommon::isValidStatusMemberObject(const BSONObj& obj) {
- return isValidStatusMemberObject(Document{obj});
-}
-
-boost::optional<Document> WorkingSetCommon::getStatusMemberDocument(const WorkingSet& ws,
- WorkingSetID wsid) {
- if (WorkingSet::INVALID_ID == wsid) {
- return boost::none;
- }
- auto member = ws.get(wsid);
- if (!member->hasOwnedObj()) {
- return boost::none;
- }
-
- if (!isValidStatusMemberObject(member->doc.value())) {
- return boost::none;
- }
- return member->doc.value();
-}
-
-Status WorkingSetCommon::getMemberObjectStatus(const BSONObj& memberObj) {
- invariant(WorkingSetCommon::isValidStatusMemberObject(memberObj));
- return Status(ErrorCodes::Error(memberObj["code"].numberInt()),
- memberObj["errmsg"].valueStringData(),
- memberObj);
-}
-
-Status WorkingSetCommon::getMemberObjectStatus(const Document& doc) {
- return getMemberObjectStatus(doc.toBson());
-}
-
-Status WorkingSetCommon::getMemberStatus(const WorkingSetMember& member) {
- invariant(member.hasObj());
- return getMemberObjectStatus(member.doc.value().toBson());
-}
-
-std::string WorkingSetCommon::toStatusString(const BSONObj& obj) {
- Document doc{obj};
- if (!isValidStatusMemberObject(doc)) {
- Status unknownStatus(ErrorCodes::UnknownError, "no details available");
- return unknownStatus.toString();
- }
- return getMemberObjectStatus(doc).toString();
-}
-
} // namespace mongo
diff --git a/src/mongo/db/exec/working_set_common.h b/src/mongo/db/exec/working_set_common.h
index f62d861142f..b03b29a9682 100644
--- a/src/mongo/db/exec/working_set_common.h
+++ b/src/mongo/db/exec/working_set_common.h
@@ -35,8 +35,6 @@
namespace mongo {
-class CanonicalQuery;
-class Collection;
class OperationContext;
class SeekableRecordCursor;
@@ -56,54 +54,6 @@ public:
WorkingSetID id,
unowned_ptr<SeekableRecordCursor> cursor,
const NamespaceString& ns);
-
- /**
- * Build a Document which represents a Status to return in a WorkingSet.
- */
- static Document buildMemberStatusObject(const Status& status);
-
- /**
- * Allocate a new WSM and initialize it with
- * the code and reason from the status.
- * Owned BSON object will have the following layout:
- * {
- * ok: <ok>, // 1 for OK; 0 otherwise.
- * code: <code>, // Status::code()
- * errmsg: <errmsg> // Status::reason()
- * }
- */
- static WorkingSetID allocateStatusMember(WorkingSet* ws, const Status& status);
-
- /**
- * Returns true if object was created by allocateStatusMember().
- */
- static bool isValidStatusMemberObject(const Document& obj);
- static bool isValidStatusMemberObject(const BSONObj& obj);
-
- /**
- * If the working set member represents an error status, returns it as a Document (which can
- * subsequently be converted to Status). Otherwise returns boost::none.
- */
- static boost::optional<Document> getStatusMemberDocument(const WorkingSet& ws,
- WorkingSetID wsid);
-
- /**
- * Returns status from working set member object.
- * Assumes isValidStatusMemberObject().
- */
- static Status getMemberObjectStatus(const BSONObj& memberObj);
- static Status getMemberObjectStatus(const Document& memberObj);
-
- /**
- * Returns status from working set member created with allocateStatusMember().
- * Assumes isValidStatusMemberObject().
- */
- static Status getMemberStatus(const WorkingSetMember& member);
-
- /**
- * Formats working set member object created with allocateStatusMember().
- */
- static std::string toStatusString(const BSONObj& obj);
};
} // namespace mongo
diff --git a/src/mongo/db/ops/delete.cpp b/src/mongo/db/ops/delete.cpp
index 17d4a41e9ad..9b2720e681f 100644
--- a/src/mongo/db/ops/delete.cpp
+++ b/src/mongo/db/ops/delete.cpp
@@ -60,7 +60,7 @@ long long deleteObjects(OperationContext* opCtx,
auto exec = uassertStatusOK(getExecutorDelete(
&CurOp::get(opCtx)->debug(), collection, &parsedDelete, boost::none /* verbosity */));
- uassertStatusOK(exec->executePlan());
+ exec->executePlan();
return DeleteStage::getNumDeleted(*exec);
}
diff --git a/src/mongo/db/ops/update.cpp b/src/mongo/db/ops/update.cpp
index c065f670167..117fba38214 100644
--- a/src/mongo/db/ops/update.cpp
+++ b/src/mongo/db/ops/update.cpp
@@ -95,7 +95,7 @@ UpdateResult update(OperationContext* opCtx, Database* db, const UpdateRequest&
auto exec = uassertStatusOK(
getExecutorUpdate(nullOpDebug, collection, &parsedUpdate, boost::none /* verbosity */));
- uassertStatusOK(exec->executePlan());
+ exec->executePlan();
const UpdateStats* updateStats = UpdateStage::getUpdateStats(exec.get());
diff --git a/src/mongo/db/ops/write_ops_exec.cpp b/src/mongo/db/ops/write_ops_exec.cpp
index 19345d8f952..baa7adeee47 100644
--- a/src/mongo/db/ops/write_ops_exec.cpp
+++ b/src/mongo/db/ops/write_ops_exec.cpp
@@ -675,7 +675,7 @@ static SingleWriteResult performSingleUpdateOp(OperationContext* opCtx,
CurOp::get(opCtx)->setPlanSummary_inlock(Explain::getPlanSummary(exec.get()));
}
- uassertStatusOK(exec->executePlan());
+ exec->executePlan();
PlanSummaryStats summary;
Explain::getSummaryStats(*exec, &summary);
@@ -913,7 +913,7 @@ static SingleWriteResult performSingleDeleteOp(OperationContext* opCtx,
CurOp::get(opCtx)->setPlanSummary_inlock(Explain::getPlanSummary(exec.get()));
}
- uassertStatusOK(exec->executePlan());
+ exec->executePlan();
long long n = DeleteStage::getNumDeleted(*exec);
curOp.debug().additiveMetrics.ndeleted = n;
diff --git a/src/mongo/db/pipeline/document_source_cursor.cpp b/src/mongo/db/pipeline/document_source_cursor.cpp
index 6125fc6746b..d4e75b1d2b3 100644
--- a/src/mongo/db/pipeline/document_source_cursor.cpp
+++ b/src/mongo/db/pipeline/document_source_cursor.cpp
@@ -134,58 +134,47 @@ void DocumentSourceCursor::loadBatch() {
PlanExecutor::ExecState state;
Document resultObj;
- {
- AutoGetCollectionForRead autoColl(pExpCtx->opCtx, _exec->nss());
- uassertStatusOK(repl::ReplicationCoordinator::get(pExpCtx->opCtx)
- ->checkCanServeReadsFor(pExpCtx->opCtx, _exec->nss(), true));
-
- _exec->restoreState();
-
- {
- ON_BLOCK_EXIT([this] { recordPlanSummaryStats(); });
-
- while ((state = _exec->getNext(&resultObj, nullptr)) == PlanExecutor::ADVANCED) {
- _currentBatch.enqueue(transformDoc(std::move(resultObj)));
-
- // As long as we're waiting for inserts, we shouldn't do any batching at this level
- // we need the whole pipeline to see each document to see if we should stop waiting.
- if (awaitDataState(pExpCtx->opCtx).shouldWaitForInserts ||
- static_cast<long long>(_currentBatch.memUsageBytes()) >
- internalDocumentSourceCursorBatchSizeBytes.load()) {
- // End this batch and prepare PlanExecutor for yielding.
- _exec->saveState();
- return;
- }
- }
- // Special case for tailable cursor -- EOF doesn't preclude more results, so keep
- // the PlanExecutor alive.
- if (state == PlanExecutor::IS_EOF && pExpCtx->isTailableAwaitData()) {
+
+ AutoGetCollectionForRead autoColl(pExpCtx->opCtx, _exec->nss());
+ uassertStatusOK(repl::ReplicationCoordinator::get(pExpCtx->opCtx)
+ ->checkCanServeReadsFor(pExpCtx->opCtx, _exec->nss(), true));
+
+ _exec->restoreState();
+
+ try {
+ ON_BLOCK_EXIT([this] { recordPlanSummaryStats(); });
+
+ while ((state = _exec->getNext(&resultObj, nullptr)) == PlanExecutor::ADVANCED) {
+ _currentBatch.enqueue(transformDoc(std::move(resultObj)));
+
+ // As long as we're waiting for inserts, we shouldn't do any batching at this level we
+ // need the whole pipeline to see each document to see if we should stop waiting.
+ if (awaitDataState(pExpCtx->opCtx).shouldWaitForInserts ||
+ static_cast<long long>(_currentBatch.memUsageBytes()) >
+ internalDocumentSourceCursorBatchSizeBytes.load()) {
+ // End this batch and prepare PlanExecutor for yielding.
_exec->saveState();
return;
}
}
- // If we got here, there won't be any more documents, so destroy our PlanExecutor. Note we
- // must hold a collection lock to destroy '_exec', but we can only assume that our locks are
- // still held if '_exec' did not end in an error. If '_exec' encountered an error during a
- // yield, the locks might be yielded.
- if (state != PlanExecutor::FAILURE) {
- cleanupExecutor();
- }
- }
+ invariant(state == PlanExecutor::IS_EOF);
- switch (state) {
- case PlanExecutor::ADVANCED:
- case PlanExecutor::IS_EOF:
- return; // We've reached our limit or exhausted the cursor.
- case PlanExecutor::FAILURE: {
- _execStatus = WorkingSetCommon::getMemberObjectStatus(resultObj).withContext(
- "Error in $cursor stage");
- uassertStatusOK(_execStatus);
+ // Special case for tailable cursor -- EOF doesn't preclude more results, so keep the
+ // PlanExecutor alive.
+ if (pExpCtx->isTailableAwaitData()) {
+ _exec->saveState();
+ return;
}
- default:
- MONGO_UNREACHABLE;
+ } catch (...) {
+ // Record error details before re-throwing the exception.
+ _execStatus = exceptionToStatus().withContext("Error in $cursor stage");
+ throw;
}
+
+ // If we got here, there won't be any more documents, so destroy our PlanExecutor. Note we must
+ // hold a collection lock to destroy '_exec'.
+ cleanupExecutor();
}
void DocumentSourceCursor::_updateOplogTimestamp() {
diff --git a/src/mongo/db/query/explain.cpp b/src/mongo/db/query/explain.cpp
index 1287315005d..96fae9342db 100644
--- a/src/mongo/db/query/explain.cpp
+++ b/src/mongo/db/query/explain.cpp
@@ -868,7 +868,7 @@ void Explain::explainPipelineExecutor(PlanExecutor* exec,
if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
// TODO SERVER-32732: An execution error should be reported in explain, but should not
// cause the explain itself to fail.
- uassertStatusOK(exec->executePlan());
+ exec->executePlan();
}
*out << "stages" << Value(pps->writeExplainOps(verbosity));
@@ -888,7 +888,11 @@ void Explain::explainStages(PlanExecutor* exec,
// If we need execution stats, then run the plan in order to gather the stats.
if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
- executePlanStatus = exec->executePlan();
+ try {
+ exec->executePlan();
+ } catch (const DBException&) {
+ executePlanStatus = exceptionToStatus();
+ }
// If executing the query failed, for any number of reasons other than a planning failure,
// then the collection may no longer be valid. We conservatively set our collection pointer
diff --git a/src/mongo/db/query/find.cpp b/src/mongo/db/query/find.cpp
index 5e28db4d8c8..e1292e21306 100644
--- a/src/mongo/db/query/find.cpp
+++ b/src/mongo/db/query/find.cpp
@@ -84,10 +84,6 @@ bool shouldSaveCursor(OperationContext* opCtx,
const Collection* collection,
PlanExecutor::ExecState finalState,
PlanExecutor* exec) {
- if (PlanExecutor::FAILURE == finalState) {
- return false;
- }
-
const QueryRequest& qr = exec->getCanonicalQuery()->getQueryRequest();
if (!qr.wantMore()) {
return false;
@@ -106,18 +102,8 @@ bool shouldSaveCursor(OperationContext* opCtx,
return !exec->isEOF();
}
-bool shouldSaveCursorGetMore(PlanExecutor::ExecState finalState,
- PlanExecutor* exec,
- bool isTailable) {
- if (PlanExecutor::FAILURE == finalState) {
- return false;
- }
-
- if (isTailable) {
- return true;
- }
-
- return !exec->isEOF();
+bool shouldSaveCursorGetMore(PlanExecutor* exec, bool isTailable) {
+ return isTailable || !exec->isEOF();
}
void beginQueryOp(OperationContext* opCtx,
@@ -164,15 +150,12 @@ void endQueryOp(OperationContext* opCtx,
namespace {
/**
- * Uses 'cursor' to fill out 'bb' with the batch of result documents to
- * be returned by this getMore.
+ * Uses 'cursor' to fill out 'bb' with the batch of result documents to be returned by this getMore.
*
* Returns the number of documents in the batch in 'numResults', which must be initialized to
- * zero by the caller. Returns the final ExecState returned by the cursor in *state. Returns
- * whether or not to save the ClientCursor in 'shouldSaveCursor'.
+ * zero by the caller. Returns the final ExecState returned by the cursor in *state.
*
- * Returns an OK status if the batch was successfully generated, and a non-OK status if the
- * PlanExecutor encounters a failure.
+ * Throws an exception if the PlanExecutor encounters a failure.
*/
void generateBatch(int ntoreturn,
ClientCursor* cursor,
@@ -181,42 +164,31 @@ void generateBatch(int ntoreturn,
PlanExecutor::ExecState* state) {
PlanExecutor* exec = cursor->getExecutor();
- Document doc;
- while (!FindCommon::enoughForGetMore(ntoreturn, *numResults) &&
- PlanExecutor::ADVANCED == (*state = exec->getNext(&doc, nullptr))) {
- BSONObj obj = doc.toBson();
-
- // If we can't fit this result inside the current batch, then we stash it for later.
- if (!FindCommon::haveSpaceForNext(obj, *numResults, bb->len())) {
- exec->enqueue(obj);
- break;
- }
+ try {
+ Document doc;
+ while (!FindCommon::enoughForGetMore(ntoreturn, *numResults) &&
+ PlanExecutor::ADVANCED == (*state = exec->getNext(&doc, nullptr))) {
+ BSONObj obj = doc.toBson();
- // Add result to output buffer.
- bb->appendBuf((void*)obj.objdata(), obj.objsize());
+ // If we can't fit this result inside the current batch, then we stash it for later.
+ if (!FindCommon::haveSpaceForNext(obj, *numResults, bb->len())) {
+ exec->enqueue(obj);
+ break;
+ }
- // Count the result.
- (*numResults)++;
- }
+ // Add result to output buffer.
+ bb->appendBuf((void*)obj.objdata(), obj.objsize());
- // Propagate any errors to the caller.
- switch (*state) {
- // Log an error message and then perform the cleanup.
- case PlanExecutor::FAILURE: {
- LOGV2_ERROR(20918,
- "getMore executor error, stats: {Explain_getWinningPlanStats_exec}",
- "Explain_getWinningPlanStats_exec"_attr =
- redact(Explain::getWinningPlanStats(exec)));
- // We should always have a valid status object by this point.
- auto status = WorkingSetCommon::getMemberObjectStatus(doc);
- invariant(!status.isOK());
- uassertStatusOK(status);
+ // Count the result.
+ (*numResults)++;
}
- default:
- return;
+ } catch (DBException& exception) {
+ LOGV2_ERROR(20918,
+ "getMore executor error, stats: {stats}",
+ "stats"_attr = redact(Explain::getWinningPlanStats(exec)));
+ exception.addContext("Executor error during OP_GET_MORE");
+ throw;
}
-
- MONGO_UNREACHABLE;
}
Message makeCursorNotFoundResponse() {
@@ -533,7 +505,7 @@ Message getMore(OperationContext* opCtx,
// the pin's destructor will be invoked, which will call release() on the pin. Because our
// ClientCursorPin is declared after our lock is declared, this will happen under the lock if
// any locking was necessary.
- if (!shouldSaveCursorGetMore(state, exec, cursorPin->isTailable())) {
+ if (!shouldSaveCursorGetMore(exec, cursorPin->isTailable())) {
// cc is now invalid, as is the executor
cursorid = 0;
curOp.debug().cursorExhausted = true;
@@ -708,45 +680,43 @@ bool runQuery(OperationContext* opCtx,
curOp.setPlanSummary_inlock(Explain::getPlanSummary(exec.get()));
}
- Document doc;
- while (PlanExecutor::ADVANCED == (state = exec->getNext(&doc, nullptr))) {
- obj = doc.toBson();
-
- // If we can't fit this result inside the current batch, then we stash it for later.
- if (!FindCommon::haveSpaceForNext(obj, numResults, bb.len())) {
- exec->enqueue(obj);
- break;
- }
-
- // Add result to output buffer.
- bb.appendBuf((void*)obj.objdata(), obj.objsize());
-
- // Count the result.
- ++numResults;
-
- if (FindCommon::enoughForFirstBatch(qr, numResults)) {
- LOGV2_DEBUG(20915,
- 5,
- "Enough for first batch, wantMore={qr_wantMore} "
- "ntoreturn={qr_getNToReturn_value_or_0} numResults={numResults}",
- "qr_wantMore"_attr = qr.wantMore(),
- "qr_getNToReturn_value_or_0"_attr = qr.getNToReturn().value_or(0),
- "numResults"_attr = numResults);
- break;
+ try {
+ Document doc;
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(&doc, nullptr))) {
+ obj = doc.toBson();
+
+ // If we can't fit this result inside the current batch, then we stash it for later.
+ if (!FindCommon::haveSpaceForNext(obj, numResults, bb.len())) {
+ exec->enqueue(obj);
+ break;
+ }
+
+ // Add result to output buffer.
+ bb.appendBuf((void*)obj.objdata(), obj.objsize());
+
+ // Count the result.
+ ++numResults;
+
+ if (FindCommon::enoughForFirstBatch(qr, numResults)) {
+ LOGV2_DEBUG(20915,
+ 5,
+ "Enough for first batch, wantMore={qr_wantMore} "
+ "ntoreturn={qr_getNToReturn_value_or_0} numResults={numResults}",
+ "qr_wantMore"_attr = qr.wantMore(),
+ "qr_getNToReturn_value_or_0"_attr = qr.getNToReturn().value_or(0),
+ "numResults"_attr = numResults);
+ break;
+ }
}
- }
-
- // Caller expects exceptions thrown in certain cases.
- if (PlanExecutor::FAILURE == state) {
+ } catch (DBException& exception) {
LOGV2_ERROR(20919,
- "Plan executor error during find: {PlanExecutor_statestr_state}, stats: "
- "{Explain_getWinningPlanStats_exec_get}",
- "PlanExecutor_statestr_state"_attr = PlanExecutor::statestr(state),
- "Explain_getWinningPlanStats_exec_get"_attr =
- redact(Explain::getWinningPlanStats(exec.get())));
- uassertStatusOKWithContext(WorkingSetCommon::getMemberObjectStatus(doc),
- "Executor error during OP_QUERY find");
- MONGO_UNREACHABLE;
+ "Plan executor error during find: {error}, stats: {stats}",
+ "Plan executor error during find",
+ "error"_attr = redact(exception.toStatus()),
+ "stats"_attr = redact(Explain::getWinningPlanStats(exec.get())));
+
+ exception.addContext("Executor error during find");
+ throw;
}
// Fill out CurOp based on query results. If we have a cursorid, we will fill out CurOp with
diff --git a/src/mongo/db/query/find.h b/src/mongo/db/query/find.h
index 2522ae338c5..0349bf60931 100644
--- a/src/mongo/db/query/find.h
+++ b/src/mongo/db/query/find.h
@@ -55,15 +55,13 @@ bool shouldSaveCursor(OperationContext* opCtx,
PlanExecutor* exec);
/**
- * Similar to shouldSaveCursor(), but used in getMore to determine whether we should keep
- * the cursor around for additional getMores().
+ * Similar to shouldSaveCursor(), but used in getMore to determine whether we should keep the cursor
+ * around for additional getMores().
*
- * If false, the caller should close the cursor and indicate this to the client by sending back
- * a cursor ID of 0.
+ * If false, the caller should close the cursor and indicate this to the client by sending back a
+ * cursor ID of 0.
*/
-bool shouldSaveCursorGetMore(PlanExecutor::ExecState finalState,
- PlanExecutor* exec,
- bool isTailable);
+bool shouldSaveCursorGetMore(PlanExecutor* exec, bool isTailable);
/**
* Fills out the CurOp for "opCtx" with information about this query.
diff --git a/src/mongo/db/query/plan_executor.h b/src/mongo/db/query/plan_executor.h
index bef0a4d4d27..7c241a03b83 100644
--- a/src/mongo/db/query/plan_executor.h
+++ b/src/mongo/db/query/plan_executor.h
@@ -71,29 +71,11 @@ extern const OperationContext::Decoration<repl::OpTime> clientsLastKnownCommitte
class PlanExecutor {
public:
enum ExecState {
- // We successfully populated the out parameter.
+ // Successfully returned the next document and/or record id.
ADVANCED,
- // We're EOF. We won't return any more results (edge case exception: capped+tailable).
+ // Execution is complete. There is no next document to return.
IS_EOF,
-
- // getNext() was asked for data it cannot provide, or the underlying PlanStage had an
- // unrecoverable error, or the executor died, usually due to a concurrent catalog event
- // such as a collection drop.
- //
- // If the underlying PlanStage has any information on the error, it will be available in
- // the objOut parameter. Call WorkingSetCommon::toStatusString() to retrieve the error
- // details from the output BSON object.
- //
- // The PlanExecutor is no longer capable of executing. The caller may extract stats from the
- // underlying plan stages, but should not attempt to do anything else with the executor
- // other than dispose() and destroy it.
- //
- // N.B.: If the plan's YieldPolicy allows yielding, FAILURE can be returned on interrupt,
- // and any locks acquired might possibly be released, regardless of the use of any RAII
- // locking helpers such as AutoGetCollection. Code must be written to expect this
- // situation.
- FAILURE,
};
/**
@@ -103,10 +85,11 @@ public:
enum YieldPolicy {
// Any call to getNext() may yield. In particular, the executor may die on any call to
// getNext() due to a required index or collection becoming invalid during yield. If this
- // occurs, getNext() will produce an error during yield recovery and will return FAILURE.
- // Additionally, this will handle all WriteConflictExceptions that occur while processing
- // the query. With this yield policy, it is possible for getNext() to return FAILURE with
- // locks released, if the operation is killed while yielding.
+ // occurs, getNext() will produce an error during yield recovery and will throw an
+ // exception. Additionally, this will handle all WriteConflictExceptions that occur while
+ // processing the query. With this yield policy, it is possible for getNext() to return
+ // throw with locks released. Cleanup that happens while the stack unwinds cannot assume
+ // locks are held.
YIELD_AUTO,
// This will handle WriteConflictExceptions that occur while processing the query, but will
@@ -136,13 +119,11 @@ public:
INTERRUPT_ONLY,
// Used for testing, this yield policy will cause the PlanExecutor to time out on the first
- // yield, returning FAILURE with an error object encoding a ErrorCodes::ExceededTimeLimit
- // message.
+ // yield, throwing an ErrorCodes::ExceededTimeLimit error.
ALWAYS_TIME_OUT,
// Used for testing, this yield policy will cause the PlanExecutor to be marked as killed on
- // the first yield, returning FAILURE with an error object encoding a
- // ErrorCodes::QueryPlanKilled message.
+ // the first yield, throwing an ErrorCodes::QueryPlanKilled error.
ALWAYS_MARK_KILLED,
};
@@ -397,6 +378,22 @@ public:
virtual ExecState getNextSnapshotted(Snapshotted<Document>* objOut, RecordId* dlOut) = 0;
virtual ExecState getNextSnapshotted(Snapshotted<BSONObj>* objOut, RecordId* dlOut) = 0;
+ /**
+ * Produces the next document from the query execution plan. The caller can request that the
+ * executor returns documents by passing a non-null pointer for the 'objOut' output parameter,
+ * and similarly can request the RecordId by passing a non-null pointer for 'dlOut'.
+ *
+ * If a query-fatal error occurs, this method will throw an exception. If an exception is
+ * thrown, then the PlanExecutor is no longer capable of executing. The caller may extract stats
+ * from the underlying plan stages, but should not attempt to do anything else with the executor
+ * other than dispose() and destroy it.
+ *
+ * If the plan's YieldPolicy allows yielding, then any call to this method can result in a
+ * yield. This relinquishes any locks that were previously acquired, regardless of the use of
+ * any RAII locking helpers such as 'AutoGetCollection'. Furthermore, if an error is encountered
+ * during yield recovery, an exception can be thrown while locks are not held. Callers cannot
+ * expect locks to be held when this method throws an exception.
+ */
virtual ExecState getNext(Document* objOut, RecordId* dlOut) = 0;
/**
@@ -413,16 +410,14 @@ public:
virtual bool isEOF() = 0;
/**
- * Execute the plan to completion, throwing out the results. Used when you want to work the
+ * Execute the plan to completion, throwing out the results. Used when you want to work the
* underlying tree without getting results back.
*
* If a YIELD_AUTO policy is set on this executor, then this will automatically yield.
*
- * Returns ErrorCodes::QueryPlanKilled if the plan executor was killed during a yield. If this
- * error occurs, it is illegal to subsequently access the collection, since it may have been
- * dropped.
+ * Throws an exception if this plan results in a runtime error or is killed.
*/
- virtual Status executePlan() = 0;
+ virtual void executePlan() = 0;
//
// Concurrency-related methods.
@@ -430,10 +425,9 @@ public:
/**
* Notifies a PlanExecutor that it should die. Callers must specify the reason for why this
- * executor is being killed. Subsequent calls to getNext() will return FAILURE, and fill
- * 'objOut'
+ * executor is being killed. Subsequent calls to getNext() will throw a query-fatal exception
* with an error reflecting 'killStatus'. If this method is called multiple times, only the
- * first 'killStatus' will be retained. It is an error to call this method with Status::OK.
+ * first 'killStatus' will be retained. It is illegal to call this method with Status::OK.
*/
virtual void markAsKilled(Status killStatus) = 0;
@@ -488,12 +482,6 @@ public:
* for the batch that is currently being built. Otherwise, return an empty object.
*/
virtual BSONObj getPostBatchResumeToken() const = 0;
-
- /**
- * Turns a Document representing an error status produced by getNext() into a Status.
- */
- virtual Status getMemberObjectStatus(const Document& memberObj) const = 0;
- virtual Status getMemberObjectStatus(const BSONObj& memberObj) const = 0;
};
} // namespace mongo
diff --git a/src/mongo/db/query/plan_executor_impl.cpp b/src/mongo/db/query/plan_executor_impl.cpp
index f642f5be99b..959165ccd85 100644
--- a/src/mongo/db/query/plan_executor_impl.cpp
+++ b/src/mongo/db/query/plan_executor_impl.cpp
@@ -285,15 +285,14 @@ PlanExecutorImpl::~PlanExecutorImpl() {
invariant(_currentState == kDisposed);
}
-string PlanExecutor::statestr(ExecState s) {
- if (PlanExecutor::ADVANCED == s) {
- return "ADVANCED";
- } else if (PlanExecutor::IS_EOF == s) {
- return "IS_EOF";
- } else {
- verify(PlanExecutor::FAILURE == s);
- return "FAILURE";
+std::string PlanExecutor::statestr(ExecState execState) {
+ switch (execState) {
+ case PlanExecutor::ADVANCED:
+ return "ADVANCED";
+ case PlanExecutor::IS_EOF:
+ return "IS_EOF";
}
+ MONGO_UNREACHABLE;
}
WorkingSet* PlanExecutorImpl::getWorkingSet() const {
@@ -464,8 +463,7 @@ std::shared_ptr<CappedInsertNotifier> PlanExecutorImpl::_getCappedInsertNotifier
return collection->getCappedInsertNotifier();
}
-PlanExecutor::ExecState PlanExecutorImpl::_waitForInserts(CappedInsertNotifierData* notifierData,
- Snapshotted<Document>* errorObj) {
+void PlanExecutorImpl::_waitForInserts(CappedInsertNotifierData* notifierData) {
invariant(notifierData->notifier);
// The notifier wait() method will not wait unless the version passed to it matches the
@@ -490,36 +488,19 @@ PlanExecutor::ExecState PlanExecutorImpl::_waitForInserts(CappedInsertNotifierDa
});
notifierData->lastEOFVersion = currentNotifierVersion;
- if (yieldResult.isOK()) {
- // There may be more results, try to get more data.
- return ADVANCED;
- }
-
- if (errorObj) {
- *errorObj = Snapshotted<Document>(SnapshotId(),
- WorkingSetCommon::buildMemberStatusObject(yieldResult));
- }
- return FAILURE;
+ uassertStatusOK(yieldResult);
}
PlanExecutor::ExecState PlanExecutorImpl::_getNextImpl(Snapshotted<Document>* objOut,
RecordId* dlOut) {
if (MONGO_unlikely(planExecutorAlwaysFails.shouldFail())) {
- Status status(ErrorCodes::InternalError,
- str::stream() << "PlanExecutor hit planExecutorAlwaysFails fail point");
- *objOut =
- Snapshotted<Document>(SnapshotId(), WorkingSetCommon::buildMemberStatusObject(status));
-
- return PlanExecutor::FAILURE;
+ uasserted(ErrorCodes::Error(4382101),
+ "PlanExecutor hit planExecutorAlwaysFails fail point");
}
invariant(_currentState == kUsable);
if (isMarkedAsKilled()) {
- if (nullptr != objOut) {
- *objOut = Snapshotted<Document>(SnapshotId(),
- WorkingSetCommon::buildMemberStatusObject(_killStatus));
- }
- return PlanExecutor::FAILURE;
+ uassertStatusOK(_killStatus);
}
if (!_stash.empty()) {
@@ -547,14 +528,7 @@ PlanExecutor::ExecState PlanExecutorImpl::_getNextImpl(Snapshotted<Document>* ob
// 3) we need to yield and retry due to a WriteConflictException.
// In all cases, the actual yielding happens here.
if (_yieldPolicy->shouldYieldOrInterrupt()) {
- auto yieldStatus = _yieldPolicy->yieldOrInterrupt();
- if (!yieldStatus.isOK()) {
- if (objOut) {
- *objOut = Snapshotted<Document>(
- SnapshotId(), WorkingSetCommon::buildMemberStatusObject(yieldStatus));
- }
- return PlanExecutor::FAILURE;
- }
+ uassertStatusOK(_yieldPolicy->yieldOrInterrupt());
}
WorkingSetID id = WorkingSet::INVALID_ID;
@@ -624,7 +598,8 @@ PlanExecutor::ExecState PlanExecutorImpl::_getNextImpl(Snapshotted<Document>* ob
}
} else if (PlanStage::NEED_TIME == code) {
// Fall through to yield check at end of large conditional.
- } else if (PlanStage::IS_EOF == code) {
+ } else {
+ invariant(PlanStage::IS_EOF == code);
if (MONGO_unlikely(planExecutorHangBeforeShouldWaitForInserts.shouldFail(
[this](const BSONObj& data) {
if (data.hasField("namespace") &&
@@ -641,22 +616,9 @@ PlanExecutor::ExecState PlanExecutorImpl::_getNextImpl(Snapshotted<Document>* ob
if (!_shouldWaitForInserts()) {
return PlanExecutor::IS_EOF;
}
- const ExecState waitResult = _waitForInserts(&cappedInsertNotifierData, objOut);
- if (waitResult == PlanExecutor::ADVANCED) {
- // There may be more results, keep going.
- continue;
- }
- return waitResult;
- } else {
- invariant(PlanStage::FAILURE == code);
-
- if (nullptr != objOut) {
- invariant(WorkingSet::INVALID_ID != id);
- auto statusObj = WorkingSetCommon::getStatusMemberDocument(*_workingSet, id);
- *objOut = Snapshotted<Document>(SnapshotId(), *statusObj);
- }
-
- return PlanExecutor::FAILURE;
+ _waitForInserts(&cappedInsertNotifierData);
+ // There may be more results, keep going.
+ continue;
}
}
}
@@ -683,7 +645,7 @@ void PlanExecutorImpl::dispose(OperationContext* opCtx) {
_currentState = kDisposed;
}
-Status PlanExecutorImpl::executePlan() {
+void PlanExecutorImpl::executePlan() {
invariant(_currentState == kUsable);
Document obj;
PlanExecutor::ExecState state = PlanExecutor::ADVANCED;
@@ -691,23 +653,14 @@ Status PlanExecutorImpl::executePlan() {
state = this->getNext(&obj, nullptr);
}
- if (PlanExecutor::FAILURE == state) {
- if (isMarkedAsKilled()) {
- return _killStatus;
- }
-
- auto errorStatus = getMemberObjectStatus(obj);
- invariant(!errorStatus.isOK());
- return errorStatus.withContext(str::stream() << "Exec error resulting in state "
- << PlanExecutor::statestr(state));
+ if (isMarkedAsKilled()) {
+ uassertStatusOK(_killStatus);
}
invariant(!isMarkedAsKilled());
invariant(PlanExecutor::IS_EOF == state);
- return Status::OK();
}
-
void PlanExecutorImpl::enqueue(const Document& obj) {
_stash.push(obj.getOwned());
}
@@ -764,11 +717,4 @@ BSONObj PlanExecutorImpl::getPostBatchResumeToken() const {
}
}
-Status PlanExecutorImpl::getMemberObjectStatus(const Document& memberObj) const {
- return WorkingSetCommon::getMemberObjectStatus(memberObj);
-}
-
-Status PlanExecutorImpl::getMemberObjectStatus(const BSONObj& memberObj) const {
- return WorkingSetCommon::getMemberObjectStatus(memberObj);
-}
} // namespace mongo
diff --git a/src/mongo/db/query/plan_executor_impl.h b/src/mongo/db/query/plan_executor_impl.h
index cffddab0b3e..0b4e3ca8b24 100644
--- a/src/mongo/db/query/plan_executor_impl.h
+++ b/src/mongo/db/query/plan_executor_impl.h
@@ -72,7 +72,7 @@ public:
ExecState getNext(Document* objOut, RecordId* dlOut) final;
ExecState getNext(BSONObj* out, RecordId* dlOut) final;
bool isEOF() final;
- Status executePlan() final;
+ void executePlan() final;
void markAsKilled(Status killStatus) final;
void dispose(OperationContext* opCtx) final;
void enqueue(const Document& obj) final;
@@ -84,9 +84,6 @@ public:
Timestamp getLatestOplogTimestamp() const final;
BSONObj getPostBatchResumeToken() const final;
- Status getMemberObjectStatus(const Document& memberObj) const final;
- Status getMemberObjectStatus(const BSONObj& memberObj) const final;
-
private:
/**
* New PlanExecutor instances are created with the static make() method above.
@@ -138,17 +135,12 @@ private:
std::shared_ptr<CappedInsertNotifier> _getCappedInsertNotifier();
/**
- * Yields locks and waits for inserts to the collection. Returns ADVANCED if there has been an
- * insertion and there may be new results. Returns FAILURE if the PlanExecutor was killed during
- * a yield. This method is only to be used for tailable and awaitData cursors, so rather than
- * returning FAILURE if the operation has exceeded its time limit, we return IS_EOF to preserve
- * this PlanExecutor for future use.
- *
- * If an error is encountered and 'errorObj' is provided, it is populated with an object
- * describing the error.
+ * Called for tailable and awaitData cursors in order to yield locks and waits for inserts to
+ * the collection being tailed. Returns control to the caller once there has been an insertion
+ * and there may be new results. If the PlanExecutor was killed during a yield, throws an
+ * exception.
*/
- ExecState _waitForInserts(CappedInsertNotifierData* notifierData,
- Snapshotted<Document>* errorObj);
+ void _waitForInserts(CappedInsertNotifierData* notifierData);
/**
* Common implementation for getNext() and getNextSnapshotted().
diff --git a/src/mongo/db/query/stage_builder.cpp b/src/mongo/db/query/stage_builder.cpp
index 4434c97d928..ad6c2ffb9ca 100644
--- a/src/mongo/db/query/stage_builder.cpp
+++ b/src/mongo/db/query/stage_builder.cpp
@@ -358,6 +358,7 @@ std::unique_ptr<PlanStage> buildStages(OperationContext* opCtx,
case STAGE_DELETE:
case STAGE_EOF:
case STAGE_IDHACK:
+ case STAGE_MOCK:
case STAGE_MULTI_ITERATOR:
case STAGE_MULTI_PLAN:
case STAGE_PIPELINE_PROXY:
diff --git a/src/mongo/db/query/stage_types.h b/src/mongo/db/query/stage_types.h
index 1deb70d4cff..c45c3e01dd8 100644
--- a/src/mongo/db/query/stage_types.h
+++ b/src/mongo/db/query/stage_types.h
@@ -70,6 +70,8 @@ enum StageType {
STAGE_IXSCAN,
STAGE_LIMIT,
+ STAGE_MOCK,
+
// Implements iterating over one or more RecordStore::Cursor.
STAGE_MULTI_ITERATOR,
diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp
index 0b798eadf33..20c2d06f699 100644
--- a/src/mongo/db/repl/rs_rollback.cpp
+++ b/src/mongo/db/repl/rs_rollback.cpp
@@ -1031,61 +1031,43 @@ void dropCollection(OperationContext* opCtx,
// in order to keep an archive of items that were rolled back.
auto exec = InternalPlanner::collectionScan(
opCtx, nss.toString(), collection, PlanExecutor::YIELD_AUTO);
- BSONObj curObj;
+
PlanExecutor::ExecState execState;
- while (PlanExecutor::ADVANCED == (execState = exec->getNext(&curObj, nullptr))) {
- auto status = removeSaver.goingToDelete(curObj);
- if (!status.isOK()) {
- LOGV2_FATAL_CONTINUE(
- 21740,
- "Rolling back createCollection on {namespace} failed to write document to "
- "remove saver file: {error}",
- "Rolling back createCollection failed to write document to remove saver file",
- "namespace"_attr = nss,
- "error"_attr = redact(status));
- throw RSFatalException(
- "Rolling back createCollection. Failed to write document to remove saver "
- "file.");
+ try {
+ BSONObj curObj;
+ while (PlanExecutor::ADVANCED == (execState = exec->getNext(&curObj, nullptr))) {
+ auto status = removeSaver.goingToDelete(curObj);
+ if (!status.isOK()) {
+ LOGV2_FATAL_CONTINUE(21740,
+ "Rolling back createCollection on {namespace} failed to "
+ "write document to remove saver file: {error}",
+ "Rolling back createCollection failed to write document "
+ "to remove saver file",
+ "namespace"_attr = nss,
+ "error"_attr = redact(status));
+ throw RSFatalException(
+ "Rolling back createCollection. Failed to write document to remove saver "
+ "file.");
+ }
}
+ } catch (const DBException&) {
+ LOGV2_FATAL_CONTINUE(21741,
+ "Rolling back createCollection on {namespace} failed with "
+ "{error}. A full resync is necessary",
+ "Rolling back createCollection failed. A full resync is necessary",
+ "namespace"_attr = nss,
+ "error"_attr = redact(exceptionToStatus()));
+ throw RSFatalException(
+ "Rolling back createCollection failed. A full resync is necessary.");
}
- // If we exited the above for loop with any other execState than IS_EOF, this means that
- // a FAILURE state was returned. If a FAILURE state was returned, either an unrecoverable
- // error was thrown by exec, or we attempted to retrieve data that could not be provided
- // by the PlanExecutor. In both of these cases it is necessary for a full resync of the
- // server.
-
- if (execState != PlanExecutor::IS_EOF) {
- if (execState == PlanExecutor::FAILURE &&
- WorkingSetCommon::isValidStatusMemberObject(curObj)) {
- Status errorStatus = WorkingSetCommon::getMemberObjectStatus(curObj);
- LOGV2_FATAL_CONTINUE(
- 21741,
- "Rolling back createCollection on {namespace} failed with {error}. A "
- "full resync is necessary.",
- "Rolling back createCollection failed. A full resync is necessary",
- "namespace"_attr = nss,
- "error"_attr = redact(errorStatus));
- throw RSFatalException(
- "Rolling back createCollection failed. A full resync is necessary.");
- } else {
- LOGV2_FATAL_CONTINUE(
- 21742,
- "Rolling back createCollection on {namespace} failed. A full resync is "
- "necessary.",
- "Rolling back createCollection failed. A full resync is necessary",
- "namespace"_attr = nss);
- throw RSFatalException(
- "Rolling back createCollection failed. A full resync is necessary.");
- }
- }
+ invariant(execState == PlanExecutor::IS_EOF);
}
WriteUnitOfWork wunit(opCtx);
- // We permanently drop the collection rather than 2-phase drop the collection
- // here. By not passing in an opTime to dropCollectionEvenIfSystem() the collection
- // is immediately dropped.
+ // We permanently drop the collection rather than 2-phase drop the collection here. By not
+ // passing in an opTime to dropCollectionEvenIfSystem() the collection is immediately dropped.
fassert(40504, db->dropCollectionEvenIfSystem(opCtx, nss));
wunit.commit();
}
diff --git a/src/mongo/db/repl/storage_interface_impl.cpp b/src/mongo/db/repl/storage_interface_impl.cpp
index 0b0e2e4e065..f09894da4c8 100644
--- a/src/mongo/db/repl/storage_interface_impl.cpp
+++ b/src/mongo/db/repl/storage_interface_impl.cpp
@@ -116,7 +116,7 @@ StatusWith<int> StorageInterfaceImpl::getRollbackID(OperationContext* opCtx) {
auto rbid = RollbackID::parse(IDLParserErrorContext("RollbackID"), rbidDoc.getValue());
invariant(rbid.get_id() == kRollbackIdDocumentId);
return rbid.getRollbackId();
- } catch (...) {
+ } catch (const DBException&) {
return exceptionToStatus();
}
@@ -721,24 +721,24 @@ StatusWith<std::vector<BSONObj>> _findOrDeleteDocuments(
}
std::vector<BSONObj> docs;
- BSONObj out;
- PlanExecutor::ExecState state = PlanExecutor::ExecState::ADVANCED;
- while (state == PlanExecutor::ExecState::ADVANCED && docs.size() < limit) {
- state = planExecutor->getNext(&out, nullptr);
- if (state == PlanExecutor::ExecState::ADVANCED) {
- docs.push_back(out.getOwned());
+
+ try {
+ BSONObj out;
+ PlanExecutor::ExecState state = PlanExecutor::ExecState::ADVANCED;
+ while (state == PlanExecutor::ExecState::ADVANCED && docs.size() < limit) {
+ state = planExecutor->getNext(&out, nullptr);
+ if (state == PlanExecutor::ExecState::ADVANCED) {
+ docs.push_back(out.getOwned());
+ }
}
+ } catch (const WriteConflictException&) {
+ // Re-throw the WCE, since it will get caught be a retry loop at a higher level.
+ throw;
+ } catch (const DBException&) {
+ return exceptionToStatus();
}
- switch (state) {
- case PlanExecutor::ADVANCED:
- case PlanExecutor::IS_EOF:
- return Result(docs);
- case PlanExecutor::FAILURE:
- return WorkingSetCommon::getMemberObjectStatus(out);
- default:
- MONGO_UNREACHABLE;
- }
+ return Result{docs};
});
}
@@ -906,9 +906,16 @@ Status _updateWithQuery(OperationContext* opCtx,
}
auto planExecutor = std::move(planExecutorResult.getValue());
- auto ret = planExecutor->executePlan();
+ try {
+ planExecutor->executePlan();
+ } catch (const WriteConflictException&) {
+ // Re-throw the WCE, since it will get caught and retried at a higher level.
+ throw;
+ } catch (const DBException&) {
+ return exceptionToStatus();
+ }
wuow.commit();
- return ret;
+ return Status::OK();
});
}
@@ -971,7 +978,15 @@ Status StorageInterfaceImpl::upsertById(OperationContext* opCtx,
idKey.wrap(""),
parsedUpdate.yieldPolicy());
- return planExecutor->executePlan();
+ try {
+ planExecutor->executePlan();
+ } catch (const WriteConflictException&) {
+ // Re-throw the WCE, since it will get caught and retried at a higher level.
+ throw;
+ } catch (const DBException&) {
+ return exceptionToStatus();
+ }
+ return Status::OK();
});
}
@@ -1039,7 +1054,15 @@ Status StorageInterfaceImpl::deleteByFilter(OperationContext* opCtx,
}
auto planExecutor = std::move(planExecutorResult.getValue());
- return planExecutor->executePlan();
+ try {
+ planExecutor->executePlan();
+ } catch (const WriteConflictException&) {
+ // Re-throw the WCE, since it will get caught and retried at a higher level.
+ throw;
+ } catch (const DBException&) {
+ return exceptionToStatus();
+ }
+ return Status::OK();
});
}
diff --git a/src/mongo/db/s/chunk_splitter.cpp b/src/mongo/db/s/chunk_splitter.cpp
index aedbdfdebff..6aec767f8f1 100644
--- a/src/mongo/db/s/chunk_splitter.cpp
+++ b/src/mongo/db/s/chunk_splitter.cpp
@@ -326,15 +326,15 @@ void ChunkSplitter::_runAutosplit(std::shared_ptr<ChunkSplitStateDriver> chunkSp
"maxChunkSizeBytes"_attr = maxChunkSizeBytes);
chunkSplitStateDriver->prepareSplit();
- auto splitPoints = uassertStatusOK(splitVector(opCtx.get(),
- nss,
- shardKeyPattern.toBSON(),
- chunk.getMin(),
- chunk.getMax(),
- false,
- boost::none,
- boost::none,
- maxChunkSizeBytes));
+ auto splitPoints = splitVector(opCtx.get(),
+ nss,
+ shardKeyPattern.toBSON(),
+ chunk.getMin(),
+ chunk.getMax(),
+ false,
+ boost::none,
+ boost::none,
+ maxChunkSizeBytes);
if (splitPoints.empty()) {
LOGV2_DEBUG(21907,
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
index 40a1703318b..2c311d481b9 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
@@ -600,46 +600,50 @@ void MigrationChunkClonerSourceLegacy::_nextCloneBatchFromIndexScan(OperationCon
_jumboChunkCloneState->clonerExec->restoreState();
}
- BSONObj obj;
- RecordId recordId;
PlanExecutor::ExecState execState;
+ try {
+ BSONObj obj;
+ RecordId recordId;
+ while (PlanExecutor::ADVANCED ==
+ (execState = _jumboChunkCloneState->clonerExec->getNext(
+ &obj, _jumboChunkCloneState->stashedRecordId ? nullptr : &recordId))) {
- while (PlanExecutor::ADVANCED ==
- (execState = _jumboChunkCloneState->clonerExec->getNext(
- &obj, _jumboChunkCloneState->stashedRecordId ? nullptr : &recordId))) {
+ stdx::unique_lock<Latch> lk(_mutex);
+ _jumboChunkCloneState->clonerState = execState;
+ lk.unlock();
- stdx::unique_lock<Latch> lk(_mutex);
- _jumboChunkCloneState->clonerState = execState;
- lk.unlock();
+ opCtx->checkForInterrupt();
- opCtx->checkForInterrupt();
+ // Use the builder size instead of accumulating the document sizes directly so
+ // that we take into consideration the overhead of BSONArray indices.
+ if (arrBuilder->arrSize() &&
+ (arrBuilder->len() + obj.objsize() + 1024) > BSONObjMaxUserSize) {
+ _jumboChunkCloneState->clonerExec->enqueue(obj);
- // Use the builder size instead of accumulating the document sizes directly so
- // that we take into consideration the overhead of BSONArray indices.
- if (arrBuilder->arrSize() &&
- (arrBuilder->len() + obj.objsize() + 1024) > BSONObjMaxUserSize) {
- _jumboChunkCloneState->clonerExec->enqueue(obj);
+ // Stash the recordId we just read to add to the next batch.
+ if (!recordId.isNull()) {
+ invariant(!_jumboChunkCloneState->stashedRecordId);
+ _jumboChunkCloneState->stashedRecordId = std::move(recordId);
+ }
- // Stash the recordId we just read to add to the next batch.
- if (!recordId.isNull()) {
- invariant(!_jumboChunkCloneState->stashedRecordId);
- _jumboChunkCloneState->stashedRecordId = std::move(recordId);
+ break;
}
- break;
- }
-
- Snapshotted<BSONObj> doc;
- invariant(collection->findDoc(
- opCtx, _jumboChunkCloneState->stashedRecordId.value_or(recordId), &doc));
- arrBuilder->append(doc.value());
- _jumboChunkCloneState->stashedRecordId = boost::none;
+ Snapshotted<BSONObj> doc;
+ invariant(collection->findDoc(
+ opCtx, _jumboChunkCloneState->stashedRecordId.value_or(recordId), &doc));
+ arrBuilder->append(doc.value());
+ _jumboChunkCloneState->stashedRecordId = boost::none;
- lk.lock();
- _jumboChunkCloneState->docsCloned++;
- lk.unlock();
+ lk.lock();
+ _jumboChunkCloneState->docsCloned++;
+ lk.unlock();
- ShardingStatistics::get(opCtx).countDocsClonedOnDonor.addAndFetch(1);
+ ShardingStatistics::get(opCtx).countDocsClonedOnDonor.addAndFetch(1);
+ }
+ } catch (DBException& exception) {
+ exception.addContext("Executor error while scanning for documents belonging to chunk");
+ throw;
}
stdx::unique_lock<Latch> lk(_mutex);
@@ -648,10 +652,6 @@ void MigrationChunkClonerSourceLegacy::_nextCloneBatchFromIndexScan(OperationCon
_jumboChunkCloneState->clonerExec->saveState();
_jumboChunkCloneState->clonerExec->detachFromOperationContext();
-
- if (PlanExecutor::FAILURE == execState)
- uassertStatusOK(WorkingSetCommon::getMemberObjectStatus(obj).withContext(
- "Executor error while scanning for documents belonging to chunk"));
}
void MigrationChunkClonerSourceLegacy::_nextCloneBatchFromCloneLocs(OperationContext* opCtx,
@@ -874,33 +874,32 @@ Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* opC
bool isLargeChunk = false;
unsigned long long recCount = 0;
- BSONObj obj;
- RecordId recordId;
- PlanExecutor::ExecState state;
- while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, &recordId))) {
- Status interruptStatus = opCtx->checkForInterruptNoAssert();
- if (!interruptStatus.isOK()) {
- return interruptStatus;
- }
+ try {
+ BSONObj obj;
+ RecordId recordId;
+ while (PlanExecutor::ADVANCED == exec->getNext(&obj, &recordId)) {
+ Status interruptStatus = opCtx->checkForInterruptNoAssert();
+ if (!interruptStatus.isOK()) {
+ return interruptStatus;
+ }
- if (!isLargeChunk) {
- stdx::lock_guard<Latch> lk(_mutex);
- _cloneLocs.insert(recordId);
- }
+ if (!isLargeChunk) {
+ stdx::lock_guard<Latch> lk(_mutex);
+ _cloneLocs.insert(recordId);
+ }
- if (++recCount > maxRecsWhenFull) {
- isLargeChunk = true;
+ if (++recCount > maxRecsWhenFull) {
+ isLargeChunk = true;
- if (_forceJumbo) {
- _cloneLocs.clear();
- break;
+ if (_forceJumbo) {
+ _cloneLocs.clear();
+ break;
+ }
}
}
- }
-
- if (PlanExecutor::FAILURE == state) {
- return WorkingSetCommon::getMemberObjectStatus(obj).withContext(
- "Executor error while scanning for documents belonging to chunk");
+ } catch (DBException& exception) {
+ exception.addContext("Executor error while scanning for documents belonging to chunk");
+ throw;
}
const uint64_t collectionAverageObjectSize = collection->averageObjectSize(opCtx);
diff --git a/src/mongo/db/s/range_deletion_util.cpp b/src/mongo/db/s/range_deletion_util.cpp
index 42dccae3a6f..6149818a332 100644
--- a/src/mongo/db/s/range_deletion_util.cpp
+++ b/src/mongo/db/s/range_deletion_util.cpp
@@ -204,23 +204,22 @@ StatusWith<int> deleteNextBatch(OperationContext* opCtx,
uasserted(ErrorCodes::InternalError, "Failing for test");
}
- PlanExecutor::ExecState state = exec->getNext(&deletedObj, nullptr);
-
- if (state == PlanExecutor::IS_EOF) {
- break;
- }
-
- if (state == PlanExecutor::FAILURE) {
+ PlanExecutor::ExecState state;
+ try {
+ state = exec->getNext(&deletedObj, nullptr);
+ } catch (...) {
LOGV2_WARNING(
23776,
- "{PlanExecutor_statestr_state} - cursor error while trying to delete {min} to "
- "{max} in {nss}: FAILURE, stats: {Explain_getWinningPlanStats_exec_get}",
- "PlanExecutor_statestr_state"_attr = PlanExecutor::statestr(state),
+ "cursor error while trying to delete {min} to {max} in {nss}, stats: {stats}",
+ "cursor error while trying to delete range",
"min"_attr = redact(min),
"max"_attr = redact(max),
"nss"_attr = nss,
- "Explain_getWinningPlanStats_exec_get"_attr =
- Explain::getWinningPlanStats(exec.get()));
+ "stats"_attr = Explain::getWinningPlanStats(exec.get()));
+ break;
+ }
+
+ if (state == PlanExecutor::IS_EOF) {
break;
}
diff --git a/src/mongo/db/s/split_vector.cpp b/src/mongo/db/s/split_vector.cpp
index cdccbebb88b..9eae8191854 100644
--- a/src/mongo/db/s/split_vector.cpp
+++ b/src/mongo/db/s/split_vector.cpp
@@ -58,15 +58,15 @@ BSONObj prettyKey(const BSONObj& keyPattern, const BSONObj& key) {
} // namespace
-StatusWith<std::vector<BSONObj>> splitVector(OperationContext* opCtx,
- const NamespaceString& nss,
- const BSONObj& keyPattern,
- const BSONObj& min,
- const BSONObj& max,
- bool force,
- boost::optional<long long> maxSplitPoints,
- boost::optional<long long> maxChunkObjects,
- boost::optional<long long> maxChunkSizeBytes) {
+std::vector<BSONObj> splitVector(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const BSONObj& keyPattern,
+ const BSONObj& min,
+ const BSONObj& max,
+ bool force,
+ boost::optional<long long> maxSplitPoints,
+ boost::optional<long long> maxChunkObjects,
+ boost::optional<long long> maxChunkSizeBytes) {
std::vector<BSONObj> splitKeys;
std::size_t splitVectorResponseSize = 0;
@@ -79,19 +79,16 @@ StatusWith<std::vector<BSONObj>> splitVector(OperationContext* opCtx,
AutoGetCollection autoColl(opCtx, nss, MODE_IS);
Collection* const collection = autoColl.getCollection();
- if (!collection) {
- return {ErrorCodes::NamespaceNotFound, "ns not found"};
- }
+ uassert(ErrorCodes::NamespaceNotFound, "ns not found", collection);
// Allow multiKey based on the invariant that shard keys must be single-valued. Therefore,
// any multi-key index prefixed by shard key cannot be multikey over the shard key fields.
const IndexDescriptor* idx =
collection->getIndexCatalog()->findShardKeyPrefixedIndex(opCtx, keyPattern, false);
- if (idx == nullptr) {
- return {ErrorCodes::IndexNotFound,
- "couldn't find index over splitting key " +
- keyPattern.clientReadable().toString()};
- }
+ uassert(ErrorCodes::IndexNotFound,
+ str::stream() << "couldn't find index over splitting key "
+ << keyPattern.clientReadable().toString(),
+ idx);
// extend min to get (min, MinKey, MinKey, ....)
KeyPattern kp(idx->keyPattern());
@@ -121,7 +118,7 @@ StatusWith<std::vector<BSONObj>> splitVector(OperationContext* opCtx,
// We need a maximum size for the chunk.
if (!maxChunkSizeBytes || maxChunkSizeBytes.get() <= 0) {
- return {ErrorCodes::InvalidOptions, "need to specify the desired max chunk size"};
+ uasserted(ErrorCodes::InvalidOptions, "need to specify the desired max chunk size");
}
// If there's not enough data for more than one chunk, no point continuing.
@@ -177,10 +174,9 @@ StatusWith<std::vector<BSONObj>> splitVector(OperationContext* opCtx,
BSONObj currKey;
PlanExecutor::ExecState state = exec->getNext(&currKey, nullptr);
- if (PlanExecutor::ADVANCED != state) {
- return {ErrorCodes::OperationFailed,
- "can't open a cursor to scan the range (desired range is possibly empty)"};
- }
+ uassert(ErrorCodes::OperationFailed,
+ "can't open a cursor to scan the range (desired range is possibly empty)",
+ state == PlanExecutor::ADVANCED);
// Get the final key in the range, and see if it's the same as the first key.
BSONObj maxKeyInChunk;
@@ -195,11 +191,10 @@ StatusWith<std::vector<BSONObj>> splitVector(OperationContext* opCtx,
InternalPlanner::BACKWARD);
PlanExecutor::ExecState state = exec->getNext(&maxKeyInChunk, nullptr);
- if (PlanExecutor::ADVANCED != state) {
- return {ErrorCodes::OperationFailed,
- "can't open a cursor to find final key in range (desired range is possibly "
- "empty)"};
- }
+ uassert(
+ ErrorCodes::OperationFailed,
+ "can't open a cursor to find final key in range (desired range is possibly empty)",
+ state == PlanExecutor::ADVANCED);
}
if (currKey.woCompare(maxKeyInChunk) == 0) {
@@ -287,11 +282,6 @@ StatusWith<std::vector<BSONObj>> splitVector(OperationContext* opCtx,
state = exec->getNext(&currKey, nullptr);
}
- if (PlanExecutor::FAILURE == state) {
- return WorkingSetCommon::getMemberObjectStatus(currKey).withContext(
- "Executor error during splitVector command");
- }
-
if (!force)
break;
diff --git a/src/mongo/db/s/split_vector.h b/src/mongo/db/s/split_vector.h
index 9429dff6a8b..f7b28c8e648 100644
--- a/src/mongo/db/s/split_vector.h
+++ b/src/mongo/db/s/split_vector.h
@@ -56,14 +56,14 @@ class StatusWith;
* If force is set, split at the halfway point of the chunk. This also effectively
* makes maxChunkSize equal the size of the chunk.
*/
-StatusWith<std::vector<BSONObj>> splitVector(OperationContext* opCtx,
- const NamespaceString& nss,
- const BSONObj& keyPattern,
- const BSONObj& min,
- const BSONObj& max,
- bool force,
- boost::optional<long long> maxSplitPoints,
- boost::optional<long long> maxChunkObjects,
- boost::optional<long long> maxChunkSizeBytes);
+std::vector<BSONObj> splitVector(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const BSONObj& keyPattern,
+ const BSONObj& min,
+ const BSONObj& max,
+ bool force,
+ boost::optional<long long> maxSplitPoints,
+ boost::optional<long long> maxChunkObjects,
+ boost::optional<long long> maxChunkSizeBytes);
} // namespace mongo
diff --git a/src/mongo/db/s/split_vector_command.cpp b/src/mongo/db/s/split_vector_command.cpp
index 9af279ce4ef..6efe68ca5f2 100644
--- a/src/mongo/db/s/split_vector_command.cpp
+++ b/src/mongo/db/s/split_vector_command.cpp
@@ -133,18 +133,17 @@ public:
maxChunkSizeBytes = maxSizeBytesElem.numberLong();
}
- auto statusWithSplitKeys = splitVector(opCtx,
- nss,
- keyPattern,
- min,
- max,
- force,
- maxSplitPoints,
- maxChunkObjects,
- maxChunkSizeBytes);
- uassertStatusOK(statusWithSplitKeys.getStatus());
-
- result.append("splitKeys", statusWithSplitKeys.getValue());
+ auto splitKeys = splitVector(opCtx,
+ nss,
+ keyPattern,
+ min,
+ max,
+ force,
+ maxSplitPoints,
+ maxChunkObjects,
+ maxChunkSizeBytes);
+
+ result.append("splitKeys", splitKeys);
return true;
}
diff --git a/src/mongo/db/s/split_vector_test.cpp b/src/mongo/db/s/split_vector_test.cpp
index 2325884de11..7350527d8b3 100644
--- a/src/mongo/db/s/split_vector_test.cpp
+++ b/src/mongo/db/s/split_vector_test.cpp
@@ -84,15 +84,15 @@ private:
};
TEST_F(SplitVectorTest, SplitVectorInHalf) {
- std::vector<BSONObj> splitKeys = unittest::assertGet(splitVector(operationContext(),
- kNss,
- BSON(kPattern << 1),
- BSON(kPattern << 0),
- BSON(kPattern << 100),
- false,
- boost::none,
- boost::none,
- getDocSizeBytes() * 100LL));
+ std::vector<BSONObj> splitKeys = splitVector(operationContext(),
+ kNss,
+ BSON(kPattern << 1),
+ BSON(kPattern << 0),
+ BSON(kPattern << 100),
+ false,
+ boost::none,
+ boost::none,
+ getDocSizeBytes() * 100LL);
std::vector<BSONObj> expected = {BSON(kPattern << 50)};
ASSERT_EQ(splitKeys.size(), expected.size());
@@ -104,15 +104,15 @@ TEST_F(SplitVectorTest, SplitVectorInHalf) {
}
TEST_F(SplitVectorTest, ForceSplit) {
- std::vector<BSONObj> splitKeys = unittest::assertGet(splitVector(operationContext(),
- kNss,
- BSON(kPattern << 1),
- BSON(kPattern << 0),
- BSON(kPattern << 100),
- true,
- boost::none,
- boost::none,
- getDocSizeBytes() * 6LL));
+ std::vector<BSONObj> splitKeys = splitVector(operationContext(),
+ kNss,
+ BSON(kPattern << 1),
+ BSON(kPattern << 0),
+ BSON(kPattern << 100),
+ true,
+ boost::none,
+ boost::none,
+ getDocSizeBytes() * 6LL);
std::vector<BSONObj> expected = {BSON(kPattern << 50)};
ASSERT_EQ(splitKeys.size(), expected.size());
@@ -124,15 +124,15 @@ TEST_F(SplitVectorTest, ForceSplit) {
}
TEST_F(SplitVectorTest, MaxChunkObjectsSet) {
- std::vector<BSONObj> splitKeys = unittest::assertGet(splitVector(operationContext(),
- kNss,
- BSON(kPattern << 1),
- BSON(kPattern << 0),
- BSON(kPattern << 100),
- false,
- boost::none,
- 10,
- getDocSizeBytes() * 100LL));
+ std::vector<BSONObj> splitKeys = splitVector(operationContext(),
+ kNss,
+ BSON(kPattern << 1),
+ BSON(kPattern << 0),
+ BSON(kPattern << 100),
+ false,
+ boost::none,
+ 10,
+ getDocSizeBytes() * 100LL);
// Unlike the SplitVectorInHalf test, should split at every 10th key.
std::vector<BSONObj> expected = {BSON(kPattern << 10),
BSON(kPattern << 21),
@@ -153,15 +153,15 @@ TEST_F(SplitVectorTest, MaxChunkObjectsSet) {
}
TEST_F(SplitVectorTest, SplitEveryThird) {
- std::vector<BSONObj> splitKeys = unittest::assertGet(splitVector(operationContext(),
- kNss,
- BSON(kPattern << 1),
- BSON(kPattern << 0),
- BSON(kPattern << 100),
- false,
- boost::none,
- boost::none,
- getDocSizeBytes() * 6LL));
+ std::vector<BSONObj> splitKeys = splitVector(operationContext(),
+ kNss,
+ BSON(kPattern << 1),
+ BSON(kPattern << 0),
+ BSON(kPattern << 100),
+ false,
+ boost::none,
+ boost::none,
+ getDocSizeBytes() * 6LL);
std::vector<BSONObj> expected = {
BSON(kPattern << 3), BSON(kPattern << 7), BSON(kPattern << 11), BSON(kPattern << 15),
BSON(kPattern << 19), BSON(kPattern << 23), BSON(kPattern << 27), BSON(kPattern << 31),
@@ -180,15 +180,15 @@ TEST_F(SplitVectorTest, SplitEveryThird) {
}
TEST_F(SplitVectorTest, MaxSplitPointsSet) {
- std::vector<BSONObj> splitKeys = unittest::assertGet(splitVector(operationContext(),
- kNss,
- BSON(kPattern << 1),
- BSON(kPattern << 0),
- BSON(kPattern << 100),
- false,
- 3,
- boost::none,
- getDocSizeBytes() * 6LL));
+ std::vector<BSONObj> splitKeys = splitVector(operationContext(),
+ kNss,
+ BSON(kPattern << 1),
+ BSON(kPattern << 0),
+ BSON(kPattern << 100),
+ false,
+ 3,
+ boost::none,
+ getDocSizeBytes() * 6LL);
// Unlike the SplitEveryThird test, should only return the first 3 split points since
// maxSplitPoints is 3.
std::vector<BSONObj> expected = {
@@ -203,15 +203,15 @@ TEST_F(SplitVectorTest, MaxSplitPointsSet) {
}
TEST_F(SplitVectorTest, IgnoreMaxChunkObjects) {
- std::vector<BSONObj> splitKeys = unittest::assertGet(splitVector(operationContext(),
- kNss,
- BSON(kPattern << 1),
- BSON(kPattern << 0),
- BSON(kPattern << 100),
- false,
- boost::none,
- 10,
- getDocSizeBytes() * 6LL));
+ std::vector<BSONObj> splitKeys = splitVector(operationContext(),
+ kNss,
+ BSON(kPattern << 1),
+ BSON(kPattern << 0),
+ BSON(kPattern << 100),
+ false,
+ boost::none,
+ 10,
+ getDocSizeBytes() * 6LL);
// The "maxChunkObjects"th key (10) is larger than the key count at half the maxChunkSize (3),
// so it should be ignored.
std::vector<BSONObj> expected = {
@@ -232,59 +232,59 @@ TEST_F(SplitVectorTest, IgnoreMaxChunkObjects) {
}
TEST_F(SplitVectorTest, NoSplit) {
- std::vector<BSONObj> splitKeys = unittest::assertGet(splitVector(operationContext(),
- kNss,
- BSON(kPattern << 1),
- BSON(kPattern << 0),
- BSON(kPattern << 100),
- false,
- boost::none,
- boost::none,
- getDocSizeBytes() * 1000LL));
+ std::vector<BSONObj> splitKeys = splitVector(operationContext(),
+ kNss,
+ BSON(kPattern << 1),
+ BSON(kPattern << 0),
+ BSON(kPattern << 100),
+ false,
+ boost::none,
+ boost::none,
+ getDocSizeBytes() * 1000LL);
ASSERT_EQUALS(splitKeys.size(), 0UL);
}
TEST_F(SplitVectorTest, NoCollection) {
- auto status = splitVector(operationContext(),
- NamespaceString("dummy", "collection"),
- BSON(kPattern << 1),
- BSON(kPattern << 0),
- BSON(kPattern << 100),
- false,
- boost::none,
- boost::none,
- boost::none)
- .getStatus();
- ASSERT_EQUALS(status.code(), ErrorCodes::NamespaceNotFound);
+ ASSERT_THROWS_CODE(splitVector(operationContext(),
+ NamespaceString("dummy", "collection"),
+ BSON(kPattern << 1),
+ BSON(kPattern << 0),
+ BSON(kPattern << 100),
+ false,
+ boost::none,
+ boost::none,
+ boost::none),
+ DBException,
+ ErrorCodes::NamespaceNotFound);
}
TEST_F(SplitVectorTest, NoIndex) {
- auto status = splitVector(operationContext(),
- kNss,
- BSON("foo" << 1),
- BSON(kPattern << 0),
- BSON(kPattern << 100),
- false,
- boost::none,
- boost::none,
- boost::none)
- .getStatus();
- ASSERT_EQUALS(status.code(), ErrorCodes::IndexNotFound);
+ ASSERT_THROWS_CODE(splitVector(operationContext(),
+ kNss,
+ BSON("foo" << 1),
+ BSON(kPattern << 0),
+ BSON(kPattern << 100),
+ false,
+ boost::none,
+ boost::none,
+ boost::none),
+ DBException,
+ ErrorCodes::IndexNotFound);
}
TEST_F(SplitVectorTest, NoMaxChunkSize) {
- auto status = splitVector(operationContext(),
- kNss,
- BSON(kPattern << 1),
- BSON(kPattern << 0),
- BSON(kPattern << 100),
- false,
- boost::none,
- boost::none,
- boost::none)
- .getStatus();
- ASSERT_EQUALS(status.code(), ErrorCodes::InvalidOptions);
+ ASSERT_THROWS_CODE(splitVector(operationContext(),
+ kNss,
+ BSON(kPattern << 1),
+ BSON(kPattern << 0),
+ BSON(kPattern << 100),
+ false,
+ boost::none,
+ boost::none,
+ boost::none),
+ DBException,
+ ErrorCodes::InvalidOptions);
}
const NamespaceString kJumboNss = NamespaceString("foo", "bar2");
@@ -323,15 +323,15 @@ private:
};
TEST_F(SplitVectorJumboTest, JumboChunk) {
- std::vector<BSONObj> splitKeys = unittest::assertGet(splitVector(operationContext(),
- kJumboNss,
- BSON(kJumboPattern << 1),
- BSON(kJumboPattern << 1),
- BSON(kJumboPattern << 2),
- false,
- boost::none,
- boost::none,
- getDocSizeBytes() * 1LL));
+ std::vector<BSONObj> splitKeys = splitVector(operationContext(),
+ kJumboNss,
+ BSON(kJumboPattern << 1),
+ BSON(kJumboPattern << 1),
+ BSON(kJumboPattern << 2),
+ false,
+ boost::none,
+ boost::none,
+ getDocSizeBytes() * 1LL);
ASSERT_EQUALS(splitKeys.size(), 0UL);
}
@@ -383,15 +383,15 @@ public:
};
TEST_F(SplitVectorMaxResponseSizeTest, MaxResponseSize) {
- std::vector<BSONObj> splitKeys = unittest::assertGet(splitVector(operationContext(),
- kMaxResponseNss,
- BSON("a" << 1),
- {},
- {},
- false,
- boost::none,
- boost::none,
- 1LL));
+ std::vector<BSONObj> splitKeys = splitVector(operationContext(),
+ kMaxResponseNss,
+ BSON("a" << 1),
+ {},
+ {},
+ false,
+ boost::none,
+ boost::none,
+ 1LL);
ASSERT_EQUALS((int)splitKeys.size(), numDocs - 2);
diff --git a/src/mongo/db/transaction_history_iterator.cpp b/src/mongo/db/transaction_history_iterator.cpp
index 7fae8ea2da1..fae77f37d7b 100644
--- a/src/mongo/db/transaction_history_iterator.cpp
+++ b/src/mongo/db/transaction_history_iterator.cpp
@@ -90,16 +90,19 @@ BSONObj findOneOplogEntry(OperationContext* opCtx,
auto exec = uassertStatusOK(
getExecutorFind(opCtx, oplogRead.getCollection(), std::move(cq), permitYield));
- auto getNextResult = exec->getNext(&oplogBSON, nullptr);
+ PlanExecutor::ExecState getNextResult;
+ try {
+ getNextResult = exec->getNext(&oplogBSON, nullptr);
+ } catch (DBException& exception) {
+ exception.addContext("PlanExecutor error in TransactionHistoryIterator");
+ throw;
+ }
+
uassert(ErrorCodes::IncompleteTransactionHistory,
str::stream() << "oplog no longer contains the complete write history of this "
"transaction, log with opTime "
<< opTime.toBSON() << " cannot be found",
getNextResult != PlanExecutor::IS_EOF);
- if (getNextResult != PlanExecutor::ADVANCED) {
- uassertStatusOKWithContext(WorkingSetCommon::getMemberObjectStatus(oplogBSON),
- "PlanExecutor error in TransactionHistoryIterator");
- }
return oplogBSON.getOwned();
}
diff --git a/src/mongo/db/ttl.cpp b/src/mongo/db/ttl.cpp
index 1e5be6b1900..711bafcc8d4 100644
--- a/src/mongo/db/ttl.cpp
+++ b/src/mongo/db/ttl.cpp
@@ -378,13 +378,14 @@ private:
PlanExecutor::YIELD_AUTO,
direction);
- Status result = exec->executePlan();
- if (!result.isOK()) {
+ try {
+ exec->executePlan();
+ } catch (const DBException& exception) {
LOGV2_ERROR(22543,
"ttl query execution for index {index} failed with status: {error}",
"TTL query execution failed",
"index"_attr = idx,
- "error"_attr = redact(result));
+ "error"_attr = redact(exception.toStatus()));
return;
}
diff --git a/src/mongo/dbtests/query_plan_executor.cpp b/src/mongo/dbtests/query_plan_executor.cpp
index 15c069c04d8..32c61a80eb5 100644
--- a/src/mongo/dbtests/query_plan_executor.cpp
+++ b/src/mongo/dbtests/query_plan_executor.cpp
@@ -248,8 +248,10 @@ TEST_F(PlanExecutorTest, ShouldReportErrorIfExceedsTimeLimitDuringYield) {
auto exec = makeCollScanExec(coll, filterObj, PlanExecutor::YieldPolicy::ALWAYS_TIME_OUT);
BSONObj resultObj;
- ASSERT_EQ(PlanExecutor::FAILURE, exec->getNext(&resultObj, nullptr));
- ASSERT_EQ(ErrorCodes::ExceededTimeLimit, WorkingSetCommon::getMemberObjectStatus(resultObj));
+ ASSERT_THROWS_CODE_AND_WHAT(exec->getNext(&resultObj, nullptr),
+ DBException,
+ ErrorCodes::ExceededTimeLimit,
+ "Using AlwaysTimeOutYieldPolicy");
}
TEST_F(PlanExecutorTest, ShouldReportErrorIfKilledDuringYieldButIsTailableAndAwaitData) {
@@ -266,8 +268,10 @@ TEST_F(PlanExecutorTest, ShouldReportErrorIfKilledDuringYieldButIsTailableAndAwa
TailableModeEnum::kTailableAndAwaitData);
BSONObj resultObj;
- ASSERT_EQ(PlanExecutor::FAILURE, exec->getNext(&resultObj, nullptr));
- ASSERT_EQ(ErrorCodes::ExceededTimeLimit, WorkingSetCommon::getMemberObjectStatus(resultObj));
+ ASSERT_THROWS_CODE_AND_WHAT(exec->getNext(&resultObj, nullptr),
+ DBException,
+ ErrorCodes::ExceededTimeLimit,
+ "Using AlwaysTimeOutYieldPolicy");
}
TEST_F(PlanExecutorTest, ShouldNotSwallowExceedsTimeLimitDuringYieldButIsTailableButNotAwaitData) {
@@ -282,8 +286,10 @@ TEST_F(PlanExecutorTest, ShouldNotSwallowExceedsTimeLimitDuringYieldButIsTailabl
coll, filterObj, PlanExecutor::YieldPolicy::ALWAYS_TIME_OUT, TailableModeEnum::kTailable);
BSONObj resultObj;
- ASSERT_EQ(PlanExecutor::FAILURE, exec->getNext(&resultObj, nullptr));
- ASSERT_EQ(ErrorCodes::ExceededTimeLimit, WorkingSetCommon::getMemberObjectStatus(resultObj));
+ ASSERT_THROWS_CODE_AND_WHAT(exec->getNext(&resultObj, nullptr),
+ DBException,
+ ErrorCodes::ExceededTimeLimit,
+ "Using AlwaysTimeOutYieldPolicy");
}
TEST_F(PlanExecutorTest, ShouldReportErrorIfKilledDuringYield) {
@@ -297,8 +303,10 @@ TEST_F(PlanExecutorTest, ShouldReportErrorIfKilledDuringYield) {
auto exec = makeCollScanExec(coll, filterObj, PlanExecutor::YieldPolicy::ALWAYS_MARK_KILLED);
BSONObj resultObj;
- ASSERT_EQ(PlanExecutor::FAILURE, exec->getNext(&resultObj, nullptr));
- ASSERT_EQ(ErrorCodes::QueryPlanKilled, WorkingSetCommon::getMemberObjectStatus(resultObj));
+ ASSERT_THROWS_CODE_AND_WHAT(exec->getNext(&resultObj, nullptr),
+ DBException,
+ ErrorCodes::QueryPlanKilled,
+ "Using AlwaysPlanKilledYieldPolicy");
}
class PlanExecutorSnapshotTest : public PlanExecutorTest {
diff --git a/src/mongo/dbtests/query_stage_and.cpp b/src/mongo/dbtests/query_stage_and.cpp
index 9750b9af706..9070b5f5621 100644
--- a/src/mongo/dbtests/query_stage_and.cpp
+++ b/src/mongo/dbtests/query_stage_and.cpp
@@ -48,8 +48,8 @@
#include "mongo/db/exec/and_sorted.h"
#include "mongo/db/exec/fetch.h"
#include "mongo/db/exec/index_scan.h"
+#include "mongo/db/exec/mock_stage.h"
#include "mongo/db/exec/plan_stage.h"
-#include "mongo/db/exec/queued_data_stage.h"
#include "mongo/db/json.h"
#include "mongo/db/matcher/expression_parser.h"
#include "mongo/dbtests/dbtests.h"
@@ -108,18 +108,14 @@ public:
}
/**
- * Executes plan stage until EOF.
- * Returns number of results seen if execution reaches EOF successfully.
- * Otherwise, returns -1 on stage failure.
+ * Executes plan stage until EOF. Returns number of results seen if execution reaches EOF
+ * successfully. Throws on stage failure.
*/
int countResults(PlanStage* stage) {
int count = 0;
while (!stage->isEOF()) {
WorkingSetID id = WorkingSet::INVALID_ID;
PlanStage::StageState status = stage->work(&id);
- if (PlanStage::FAILURE == status) {
- return -1;
- }
if (PlanStage::ADVANCED != status) {
continue;
}
@@ -129,19 +125,14 @@ public:
}
/**
- * Gets the next result from 'stage'.
- *
- * Fails if the stage fails or returns FAILURE, if the returned working
- * set member is not fetched, or if there are no more results.
+ * Gets the next result from 'stage'. Asserts that the returned working set member is fetched,
+ * and that there are more results.
*/
BSONObj getNext(PlanStage* stage, WorkingSet* ws) {
while (!stage->isEOF()) {
WorkingSetID id = WorkingSet::INVALID_ID;
PlanStage::StageState status = stage->work(&id);
- // We shouldn't fail or be dead.
- ASSERT(PlanStage::FAILURE != status);
-
if (PlanStage::ADVANCED != status) {
continue;
}
@@ -432,8 +423,9 @@ public:
params.direction = -1;
ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
- // Stage execution should fail.
- ASSERT_EQUALS(-1, countResults(ah.get()));
+ ASSERT_THROWS_CODE(countResults(ah.get()),
+ DBException,
+ ErrorCodes::QueryExceededMemoryLimitNoDiskUseAllowed);
}
};
@@ -585,7 +577,9 @@ public:
ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// Stage execution should fail.
- ASSERT_EQUALS(-1, countResults(ah.get()));
+ ASSERT_THROWS_CODE(countResults(ah.get()),
+ DBException,
+ ErrorCodes::QueryExceededMemoryLimitNoDiskUseAllowed);
}
};
@@ -811,47 +805,42 @@ public:
const BSONObj dataObj = fromjson("{'foo': 'bar'}");
- // Confirm PlanStage::FAILURE when children contain the following WorkingSetMembers:
+ // Confirm exception is thrown when children contain the following WorkingSetMembers:
// Child1: Data
// Child2: NEED_TIME, FAILURE
{
WorkingSet ws;
const auto andHashStage = std::make_unique<AndHashStage>(_expCtx.get(), &ws);
- auto childStage1 = std::make_unique<QueuedDataStage>(_expCtx.get(), &ws);
+ auto childStage1 = std::make_unique<MockStage>(_expCtx.get(), &ws);
{
WorkingSetID id = ws.allocate();
WorkingSetMember* wsm = ws.get(id);
wsm->recordId = RecordId(1);
wsm->doc = {SnapshotId(), Document{dataObj}};
ws.transitionToRecordIdAndObj(id);
- childStage1->pushBack(id);
+ childStage1->enqueueAdvanced(id);
}
- auto childStage2 = std::make_unique<QueuedDataStage>(_expCtx.get(), &ws);
- childStage2->pushBack(PlanStage::NEED_TIME);
- childStage2->pushBack(PlanStage::FAILURE);
+ auto childStage2 = std::make_unique<MockStage>(_expCtx.get(), &ws);
+ childStage2->enqueueStateCode(PlanStage::NEED_TIME);
+ childStage2->enqueueError(Status{ErrorCodes::InternalError, "mock error"});
andHashStage->addChild(std::move(childStage1));
andHashStage->addChild(std::move(childStage2));
- WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState state = PlanStage::NEED_TIME;
- while (PlanStage::NEED_TIME == state) {
- state = andHashStage->work(&id);
- }
-
- ASSERT_EQ(PlanStage::FAILURE, state);
+ ASSERT_THROWS_CODE(
+ getNext(andHashStage.get(), &ws), DBException, ErrorCodes::InternalError);
}
- // Confirm PlanStage::FAILURE when children contain the following WorkingSetMembers:
+ // Confirm exception is thrown when children contain the following WorkingSetMembers:
// Child1: Data, FAILURE
// Child2: Data
{
WorkingSet ws;
const auto andHashStage = std::make_unique<AndHashStage>(_expCtx.get(), &ws);
- auto childStage1 = std::make_unique<QueuedDataStage>(_expCtx.get(), &ws);
+ auto childStage1 = std::make_unique<MockStage>(_expCtx.get(), &ws);
{
WorkingSetID id = ws.allocate();
@@ -859,70 +848,60 @@ public:
wsm->recordId = RecordId(1);
wsm->doc = {SnapshotId(), Document{dataObj}};
ws.transitionToRecordIdAndObj(id);
- childStage1->pushBack(id);
+ childStage1->enqueueAdvanced(id);
}
- childStage1->pushBack(PlanStage::FAILURE);
+ childStage1->enqueueError(Status{ErrorCodes::InternalError, "mock error"});
- auto childStage2 = std::make_unique<QueuedDataStage>(_expCtx.get(), &ws);
+ auto childStage2 = std::make_unique<MockStage>(_expCtx.get(), &ws);
{
WorkingSetID id = ws.allocate();
WorkingSetMember* wsm = ws.get(id);
wsm->recordId = RecordId(2);
wsm->doc = {SnapshotId(), Document{dataObj}};
ws.transitionToRecordIdAndObj(id);
- childStage2->pushBack(id);
+ childStage2->enqueueAdvanced(id);
}
andHashStage->addChild(std::move(childStage1));
andHashStage->addChild(std::move(childStage2));
- WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState state = PlanStage::NEED_TIME;
- while (PlanStage::NEED_TIME == state) {
- state = andHashStage->work(&id);
- }
-
- ASSERT_EQ(PlanStage::FAILURE, state);
+ ASSERT_THROWS_CODE(
+ getNext(andHashStage.get(), &ws), DBException, ErrorCodes::InternalError);
}
- // Confirm PlanStage::FAILURE when children contain the following WorkingSetMembers:
+ // Confirm throws exception when children contain the following WorkingSetMembers:
// Child1: Data
// Child2: Data, FAILURE
{
WorkingSet ws;
const auto andHashStage = std::make_unique<AndHashStage>(_expCtx.get(), &ws);
- auto childStage1 = std::make_unique<QueuedDataStage>(_expCtx.get(), &ws);
+ auto childStage1 = std::make_unique<MockStage>(_expCtx.get(), &ws);
{
WorkingSetID id = ws.allocate();
WorkingSetMember* wsm = ws.get(id);
wsm->recordId = RecordId(1);
wsm->doc = {SnapshotId(), Document{dataObj}};
ws.transitionToRecordIdAndObj(id);
- childStage1->pushBack(id);
+ childStage1->enqueueAdvanced(id);
}
- auto childStage2 = std::make_unique<QueuedDataStage>(_expCtx.get(), &ws);
+ auto childStage2 = std::make_unique<MockStage>(_expCtx.get(), &ws);
{
WorkingSetID id = ws.allocate();
WorkingSetMember* wsm = ws.get(id);
wsm->recordId = RecordId(2);
wsm->doc = {SnapshotId(), Document{dataObj}};
ws.transitionToRecordIdAndObj(id);
- childStage2->pushBack(id);
+ childStage2->enqueueAdvanced(id);
}
- childStage2->pushBack(PlanStage::FAILURE);
+ childStage2->enqueueError(Status{ErrorCodes::InternalError, "internal error"});
andHashStage->addChild(std::move(childStage1));
andHashStage->addChild(std::move(childStage2));
- WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState state = PlanStage::NEED_TIME;
- while (PlanStage::NEED_TIME == state) {
- state = andHashStage->work(&id);
- }
-
- ASSERT_EQ(PlanStage::FAILURE, state);
+ ASSERT_THROWS_CODE(
+ getNext(andHashStage.get(), &ws), DBException, ErrorCodes::InternalError);
}
}
};
diff --git a/src/mongo/dbtests/query_stage_cached_plan.cpp b/src/mongo/dbtests/query_stage_cached_plan.cpp
index 3811a73e3d0..87ea3b7e3bd 100644
--- a/src/mongo/dbtests/query_stage_cached_plan.cpp
+++ b/src/mongo/dbtests/query_stage_cached_plan.cpp
@@ -39,7 +39,7 @@
#include "mongo/db/db_raii.h"
#include "mongo/db/dbdirectclient.h"
#include "mongo/db/exec/cached_plan.h"
-#include "mongo/db/exec/queued_data_stage.h"
+#include "mongo/db/exec/mock_stage.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/json.h"
#include "mongo/db/namespace_string.h"
@@ -132,8 +132,6 @@ public:
WorkingSetID id = WorkingSet::INVALID_ID;
state = cachedPlanStage->work(&id);
- ASSERT_NE(state, PlanStage::FAILURE);
-
if (state == PlanStage::ADVANCED) {
auto member = ws.get(id);
ASSERT(cq->root()->matchesBSON(member->doc.value().toBson()));
@@ -152,9 +150,9 @@ public:
const size_t decisionWorks = 10;
const size_t mockWorks =
1U + static_cast<size_t>(internalQueryCacheEvictionRatio * decisionWorks);
- auto mockChild = std::make_unique<QueuedDataStage>(_expCtx.get(), &_ws);
+ auto mockChild = std::make_unique<MockStage>(_expCtx.get(), &_ws);
for (size_t i = 0; i < mockWorks; i++) {
- mockChild->pushBack(PlanStage::NEED_TIME);
+ mockChild->enqueueStateCode(PlanStage::NEED_TIME);
}
CachedPlanStage cachedPlanStage(_expCtx.get(),
@@ -182,10 +180,10 @@ protected:
};
/**
- * Test that on failure, the cached plan stage replans the query but does not create a new cache
- * entry.
+ * Test that on a memory limit exceeded failure, the cached plan stage replans the query but does
+ * not create a new cache entry.
*/
-TEST_F(QueryStageCachedPlan, QueryStageCachedPlanFailure) {
+TEST_F(QueryStageCachedPlan, QueryStageCachedPlanFailureMemoryLimitExceeded) {
AutoGetCollectionForReadCommand ctx(&_opCtx, nss);
Collection* collection = ctx.getCollection();
ASSERT(collection);
@@ -206,9 +204,10 @@ TEST_F(QueryStageCachedPlan, QueryStageCachedPlanFailure) {
QueryPlannerParams plannerParams;
fillOutPlannerParams(&_opCtx, collection, cq.get(), &plannerParams);
- // Queued data stage will return a failure during the cached plan trial period.
- auto mockChild = std::make_unique<QueuedDataStage>(_expCtx.get(), &_ws);
- mockChild->pushBack(PlanStage::FAILURE);
+ // Mock stage will return a failure during the cached plan trial period.
+ auto mockChild = std::make_unique<MockStage>(_expCtx.get(), &_ws);
+ mockChild->enqueueError(
+ Status{ErrorCodes::QueryExceededMemoryLimitNoDiskUseAllowed, "mock error"});
// High enough so that we shouldn't trigger a replan based on works.
const size_t decisionWorks = 50;
@@ -262,9 +261,9 @@ TEST_F(QueryStageCachedPlan, QueryStageCachedPlanHitMaxWorks) {
const size_t decisionWorks = 10;
const size_t mockWorks =
1U + static_cast<size_t>(internalQueryCacheEvictionRatio * decisionWorks);
- auto mockChild = std::make_unique<QueuedDataStage>(_expCtx.get(), &_ws);
+ auto mockChild = std::make_unique<MockStage>(_expCtx.get(), &_ws);
for (size_t i = 0; i < mockWorks; i++) {
- mockChild->pushBack(PlanStage::NEED_TIME);
+ mockChild->enqueueStateCode(PlanStage::NEED_TIME);
}
CachedPlanStage cachedPlanStage(_expCtx.get(),
@@ -477,7 +476,7 @@ TEST_F(QueryStageCachedPlan, ThrowsOnYieldRecoveryWhenIndexIsDroppedBeforePlanSe
cq.get(),
plannerParams,
decisionWorks,
- std::make_unique<QueuedDataStage>(_expCtx.get(), &_ws));
+ std::make_unique<MockStage>(_expCtx.get(), &_ws));
// Drop an index while the CachedPlanStage is in a saved state. Restoring should fail, since we
// may still need the dropped index for plan selection.
@@ -519,7 +518,7 @@ TEST_F(QueryStageCachedPlan, DoesNotThrowOnYieldRecoveryWhenIndexIsDroppedAferPl
cq.get(),
plannerParams,
decisionWorks,
- std::make_unique<QueuedDataStage>(_expCtx.get(), &_ws));
+ std::make_unique<MockStage>(_expCtx.get(), &_ws));
PlanYieldPolicy yieldPolicy(PlanExecutor::YIELD_MANUAL,
_opCtx.getServiceContext()->getFastClockSource());
diff --git a/src/mongo/dbtests/query_stage_collscan.cpp b/src/mongo/dbtests/query_stage_collscan.cpp
index f17052e3e03..c9b026119bf 100644
--- a/src/mongo/dbtests/query_stage_collscan.cpp
+++ b/src/mongo/dbtests/query_stage_collscan.cpp
@@ -422,7 +422,7 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanResumeAfterRecordIdSeekFai
WorkingSetID id = WorkingSet::INVALID_ID;
// Check that failed seek causes the entire resume to fail.
- ASSERT_EQUALS(PlanStage::FAILURE, ps->work(&id));
+ ASSERT_THROWS_CODE(ps->work(&id), DBException, ErrorCodes::KeyNotFound);
}
} // namespace query_stage_collection_scan
diff --git a/src/mongo/dbtests/query_stage_count.cpp b/src/mongo/dbtests/query_stage_count.cpp
index 304297724be..495105bad9c 100644
--- a/src/mongo/dbtests/query_stage_count.cpp
+++ b/src/mongo/dbtests/query_stage_count.cpp
@@ -175,28 +175,25 @@ public:
// Performs a test using a count stage whereby each unit of work is interjected
// in some way by the invocation of interject().
- const CountStats* runCount(CountStage& count_stage) {
+ const CountStats* runCount(CountStage& countStage) {
int interjection = 0;
WorkingSetID wsid;
- while (!count_stage.isEOF()) {
- // do some work -- assumes that one work unit counts a single doc
- PlanStage::StageState state = count_stage.work(&wsid);
- ASSERT_NOT_EQUALS(state, PlanStage::FAILURE);
+ while (!countStage.isEOF()) {
+ countStage.work(&wsid);
+ // Prepare for yield.
+ countStage.saveState();
- // prepare for yield
- count_stage.saveState();
-
- // interject in some way kInterjection times
+ // Interject in some way kInterjection times.
if (interjection < kInterjections) {
- interject(count_stage, interjection++);
+ interject(countStage, interjection++);
}
- // resume from yield
- count_stage.restoreState();
+ // Resume from yield.
+ countStage.restoreState();
}
- return static_cast<const CountStats*>(count_stage.getSpecificStats());
+ return static_cast<const CountStats*>(countStage.getSpecificStats());
}
IndexScan* createIndexScan(MatchExpression* expr, WorkingSet* ws) {
diff --git a/src/mongo/dbtests/query_stage_distinct.cpp b/src/mongo/dbtests/query_stage_distinct.cpp
index 2dd15cd8f6c..ee369386069 100644
--- a/src/mongo/dbtests/query_stage_distinct.cpp
+++ b/src/mongo/dbtests/query_stage_distinct.cpp
@@ -289,7 +289,6 @@ public:
std::vector<int> seen;
while (PlanStage::IS_EOF != (state = distinct.work(&wsid))) {
- ASSERT_NE(PlanStage::FAILURE, state);
if (PlanStage::ADVANCED == state) {
seen.push_back(getIntFieldDotted(ws, wsid, "b"));
}
diff --git a/src/mongo/dbtests/query_stage_ensure_sorted.cpp b/src/mongo/dbtests/query_stage_ensure_sorted.cpp
index cccca5ac8d4..cddb3586d60 100644
--- a/src/mongo/dbtests/query_stage_ensure_sorted.cpp
+++ b/src/mongo/dbtests/query_stage_ensure_sorted.cpp
@@ -98,7 +98,6 @@ public:
BSONArrayBuilder arr(bob.subarrayStart("output"));
while (state != PlanStage::IS_EOF) {
state = ess.work(&id);
- ASSERT_NE(state, PlanStage::FAILURE);
if (state == PlanStage::ADVANCED) {
WorkingSetMember* member = ws.get(id);
auto obj = member->doc.value().toBson();
diff --git a/src/mongo/dbtests/query_stage_ixscan.cpp b/src/mongo/dbtests/query_stage_ixscan.cpp
index c7f3d0aa5fd..0a46f2a34ae 100644
--- a/src/mongo/dbtests/query_stage_ixscan.cpp
+++ b/src/mongo/dbtests/query_stage_ixscan.cpp
@@ -85,10 +85,7 @@ public:
PlanStage::StageState state = PlanStage::NEED_TIME;
while (PlanStage::ADVANCED != state) {
state = ixscan->work(&out);
-
- // There are certain states we shouldn't get.
ASSERT_NE(PlanStage::IS_EOF, state);
- ASSERT_NE(PlanStage::FAILURE, state);
}
return _ws.get(out);
diff --git a/src/mongo/dbtests/query_stage_limit_skip.cpp b/src/mongo/dbtests/query_stage_limit_skip.cpp
index 41050b305dd..8b1656a2c5b 100644
--- a/src/mongo/dbtests/query_stage_limit_skip.cpp
+++ b/src/mongo/dbtests/query_stage_limit_skip.cpp
@@ -39,8 +39,8 @@
#include "mongo/client/dbclient_cursor.h"
#include "mongo/db/client.h"
#include "mongo/db/exec/limit.h"
+#include "mongo/db/exec/mock_stage.h"
#include "mongo/db/exec/plan_stage.h"
-#include "mongo/db/exec/queued_data_stage.h"
#include "mongo/db/exec/skip.h"
#include "mongo/db/json.h"
#include "mongo/dbtests/dbtests.h"
@@ -55,22 +55,24 @@ using std::unique_ptr;
static const int N = 50;
-/* Populate a QueuedDataStage and return it. Caller owns it. */
-std::unique_ptr<QueuedDataStage> getMS(const boost::intrusive_ptr<ExpressionContext>& expCtx,
- WorkingSet* ws) {
- auto ms = std::make_unique<QueuedDataStage>(expCtx.get(), ws);
+/**
+ * Populates a 'MockStage' and returns it.
+ */
+std::unique_ptr<MockStage> getMS(const boost::intrusive_ptr<ExpressionContext>& expCtx,
+ WorkingSet* ws) {
+ auto ms = std::make_unique<MockStage>(expCtx.get(), ws);
// Put N ADVANCED results into the mock stage, and some other stalling results (YIELD/TIME).
for (int i = 0; i < N; ++i) {
- ms->pushBack(PlanStage::NEED_TIME);
+ ms->enqueueStateCode(PlanStage::NEED_TIME);
WorkingSetID id = ws->allocate();
WorkingSetMember* wsm = ws->get(id);
wsm->doc = {SnapshotId(), Document{BSON("x" << i)}};
wsm->transitionToOwnedObj();
- ms->pushBack(id);
+ ms->enqueueAdvanced(id);
- ms->pushBack(PlanStage::NEED_TIME);
+ ms->enqueueStateCode(PlanStage::NEED_TIME);
}
return ms;
diff --git a/src/mongo/dbtests/query_stage_multiplan.cpp b/src/mongo/dbtests/query_stage_multiplan.cpp
index 59520a3b3aa..2236b34ac35 100644
--- a/src/mongo/dbtests/query_stage_multiplan.cpp
+++ b/src/mongo/dbtests/query_stage_multiplan.cpp
@@ -40,9 +40,9 @@
#include "mongo/db/exec/collection_scan.h"
#include "mongo/db/exec/fetch.h"
#include "mongo/db/exec/index_scan.h"
+#include "mongo/db/exec/mock_stage.h"
#include "mongo/db/exec/multi_plan.h"
#include "mongo/db/exec/plan_stage.h"
-#include "mongo/db/exec/queued_data_stage.h"
#include "mongo/db/json.h"
#include "mongo/db/matcher/expression_parser.h"
#include "mongo/db/namespace_string.h"
@@ -451,12 +451,12 @@ TEST_F(QueryStageMultiPlanTest, MPSBackupPlan) {
* Allocates a new WorkingSetMember with data 'dataObj' in 'ws', and adds the WorkingSetMember
* to 'qds'.
*/
-void addMember(QueuedDataStage* qds, WorkingSet* ws, BSONObj dataObj) {
+void addMember(MockStage* mockStage, WorkingSet* ws, BSONObj dataObj) {
WorkingSetID id = ws->allocate();
WorkingSetMember* wsm = ws->get(id);
wsm->doc = {SnapshotId(), Document{BSON("x" << 1)}};
wsm->transitionToOwnedObj();
- qds->pushBack(id);
+ mockStage->enqueueAdvanced(id);
}
// Test the structure and values of the explain output.
@@ -467,15 +467,15 @@ TEST_F(QueryStageMultiPlanTest, MPSExplainAllPlans) {
const int nDocs = 500;
auto ws = std::make_unique<WorkingSet>();
- auto firstPlan = std::make_unique<QueuedDataStage>(_expCtx.get(), ws.get());
- auto secondPlan = std::make_unique<QueuedDataStage>(_expCtx.get(), ws.get());
+ auto firstPlan = std::make_unique<MockStage>(_expCtx.get(), ws.get());
+ auto secondPlan = std::make_unique<MockStage>(_expCtx.get(), ws.get());
for (int i = 0; i < nDocs; ++i) {
addMember(firstPlan.get(), ws.get(), BSON("x" << 1));
// Make the second plan slower by inserting a NEED_TIME between every result.
addMember(secondPlan.get(), ws.get(), BSON("x" << 1));
- secondPlan->pushBack(PlanStage::NEED_TIME);
+ secondPlan->enqueueStateCode(PlanStage::NEED_TIME);
}
AutoGetCollectionForReadCommand ctx(_opCtx.get(), nss);
@@ -496,7 +496,7 @@ TEST_F(QueryStageMultiPlanTest, MPSExplainAllPlans) {
auto root = static_cast<MultiPlanStage*>(exec->getRootStage());
ASSERT_TRUE(root->bestPlanChosen());
- // The first QueuedDataStage should have won.
+ // The first candidate plan should have won.
ASSERT_EQ(root->bestPlanIdx(), 0);
BSONObjBuilder bob;
@@ -510,7 +510,7 @@ TEST_F(QueryStageMultiPlanTest, MPSExplainAllPlans) {
ASSERT_EQ(allPlansStats.size(), 2UL);
for (auto&& planStats : allPlansStats) {
int maxEvaluationResults = internalQueryPlanEvaluationMaxResults.load();
- ASSERT_EQ(planStats["executionStages"]["stage"].String(), "QUEUED_DATA");
+ ASSERT_EQ(planStats["executionStages"]["stage"].String(), "MOCK");
if (planStats["executionStages"]["needTime"].Int() > 0) {
// This is the losing plan. Should only have advanced about half the time.
ASSERT_LT(planStats["nReturned"].Int(), maxEvaluationResults);
@@ -545,7 +545,7 @@ TEST_F(QueryStageMultiPlanTest, MPSSummaryStats) {
uassertStatusOK(getExecutor(opCtx(), coll, std::move(cq), PlanExecutor::NO_YIELD, 0));
ASSERT_EQ(exec->getRootStage()->stageType(), STAGE_MULTI_PLAN);
- ASSERT_OK(exec->executePlan());
+ exec->executePlan();
PlanSummaryStats stats;
Explain::getSummaryStats(*exec, &stats);
@@ -596,8 +596,7 @@ TEST_F(QueryStageMultiPlanTest, ShouldReportErrorIfExceedsTimeLimitDuringPlannin
AlwaysTimeOutYieldPolicy alwaysTimeOutPolicy(serviceContext()->getFastClockSource());
const auto status = multiPlanStage.pickBestPlan(&alwaysTimeOutPolicy);
ASSERT_EQ(ErrorCodes::ExceededTimeLimit, status);
- ASSERT_STRING_CONTAINS(status.reason(),
- "multiplanner encountered a failure while selecting best plan");
+ ASSERT_STRING_CONTAINS(status.reason(), "error while multiplanner was selecting best plan");
}
TEST_F(QueryStageMultiPlanTest, ShouldReportErrorIfKilledDuringPlanning) {
@@ -686,14 +685,9 @@ TEST_F(QueryStageMultiPlanTest, AddsContextDuringException) {
createQuerySolution(), std::make_unique<ThrowyPlanStage>(_expCtx.get()), sharedWs.get());
PlanYieldPolicy yieldPolicy(PlanExecutor::NO_YIELD, _clock);
- ASSERT_THROWS_WITH_CHECK(multiPlanStage.pickBestPlan(&yieldPolicy),
- AssertionException,
- [](const AssertionException& ex) {
- ASSERT_EQ(ex.toStatus().code(), ErrorCodes::InternalError);
- ASSERT_STRING_CONTAINS(
- ex.what(),
- "exception thrown while multiplanner was selecting best plan");
- });
+ auto status = multiPlanStage.pickBestPlan(&yieldPolicy);
+ ASSERT_EQ(ErrorCodes::InternalError, status);
+ ASSERT_STRING_CONTAINS(status.reason(), "error while multiplanner was selecting best plan");
}
} // namespace
diff --git a/src/mongo/dbtests/query_stage_near.cpp b/src/mongo/dbtests/query_stage_near.cpp
index 2976c4cfeb1..b9c372c1dd2 100644
--- a/src/mongo/dbtests/query_stage_near.cpp
+++ b/src/mongo/dbtests/query_stage_near.cpp
@@ -120,11 +120,11 @@ public:
_intervals.push_back(std::make_unique<MockInterval>(data, min, max));
}
- virtual StatusWith<CoveredInterval*> nextInterval(OperationContext* opCtx,
- WorkingSet* workingSet,
- const Collection* collection) {
+ std::unique_ptr<CoveredInterval> nextInterval(OperationContext* opCtx,
+ WorkingSet* workingSet,
+ const Collection* collection) final {
if (_pos == static_cast<int>(_intervals.size()))
- return StatusWith<CoveredInterval*>(nullptr);
+ return nullptr;
const MockInterval& interval = *_intervals[_pos++];
@@ -142,13 +142,13 @@ public:
}
_children.push_back(std::move(queuedStage));
- return StatusWith<CoveredInterval*>(
- new CoveredInterval(_children.back().get(), interval.min, interval.max, lastInterval));
+ return std::make_unique<CoveredInterval>(
+ _children.back().get(), interval.min, interval.max, lastInterval);
}
- StatusWith<double> computeDistance(WorkingSetMember* member) final {
+ double computeDistance(WorkingSetMember* member) final {
ASSERT(member->hasObj());
- return StatusWith<double>(member->doc.value()["distance"].getDouble());
+ return member->doc.value()["distance"].getDouble();
}
virtual StageState initialize(OperationContext* opCtx,
diff --git a/src/mongo/dbtests/query_stage_sort.cpp b/src/mongo/dbtests/query_stage_sort.cpp
index a81702662d8..dc49b5c49f5 100644
--- a/src/mongo/dbtests/query_stage_sort.cpp
+++ b/src/mongo/dbtests/query_stage_sort.cpp
@@ -412,7 +412,6 @@ public:
WorkingSetID id = WorkingSet::INVALID_ID;
PlanStage::StageState status = ss->work(&id);
if (PlanStage::ADVANCED != status) {
- ASSERT_NE(status, PlanStage::FAILURE);
continue;
}
WorkingSetMember* member = exec->getWorkingSet()->get(id);
@@ -506,7 +505,6 @@ public:
WorkingSetID id = WorkingSet::INVALID_ID;
PlanStage::StageState status = ss->work(&id);
if (PlanStage::ADVANCED != status) {
- ASSERT_NE(status, PlanStage::FAILURE);
continue;
}
WorkingSetMember* member = exec->getWorkingSet()->get(id);
@@ -593,9 +591,9 @@ public:
_expCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::NO_YIELD);
auto exec = std::move(statusWithPlanExecutor.getValue());
- PlanExecutor::ExecState runnerState =
- exec->getNext(static_cast<BSONObj*>(nullptr), nullptr);
- ASSERT_EQUALS(PlanExecutor::FAILURE, runnerState);
+ ASSERT_THROWS_CODE(exec->getNext(static_cast<BSONObj*>(nullptr), nullptr),
+ DBException,
+ ErrorCodes::BadValue);
}
};
diff --git a/src/mongo/dbtests/query_stage_subplan.cpp b/src/mongo/dbtests/query_stage_subplan.cpp
index 47a67d8804f..f1b2ec000df 100644
--- a/src/mongo/dbtests/query_stage_subplan.cpp
+++ b/src/mongo/dbtests/query_stage_subplan.cpp
@@ -505,7 +505,6 @@ TEST_F(QueryStageSubplanTest, QueryStageSubplanPlanRootedOrNE) {
while (stageState != PlanStage::IS_EOF) {
WorkingSetID id = WorkingSet::INVALID_ID;
stageState = subplan->work(&id);
- ASSERT_NE(stageState, PlanStage::FAILURE);
if (stageState == PlanStage::ADVANCED) {
++numResults;
}
diff --git a/src/mongo/dbtests/query_stage_trial.cpp b/src/mongo/dbtests/query_stage_trial.cpp
index 5b843c7d1ba..72bb16ba34f 100644
--- a/src/mongo/dbtests/query_stage_trial.cpp
+++ b/src/mongo/dbtests/query_stage_trial.cpp
@@ -32,7 +32,7 @@
#include <boost/optional.hpp>
#include <memory>
-#include "mongo/db/exec/queued_data_stage.h"
+#include "mongo/db/exec/mock_stage.h"
#include "mongo/db/exec/trial_stage.h"
#include "mongo/db/exec/working_set.h"
#include "mongo/db/operation_context.h"
@@ -53,19 +53,19 @@ public:
_expCtx(make_intrusive<ExpressionContext>(_opCtx.get(), nullptr, kTestNss)) {}
protected:
- // Pushes BSONObjs from the given vector into the given QueuedDataStage. Each empty BSONObj in
+ // Pushes BSONObjs from the given vector into the given MockStage. Each empty BSONObj in
// the vector causes a NEED_TIME to be queued up at that point instead of a result.
- void queueData(const std::vector<BSONObj>& results, QueuedDataStage* queuedData) {
+ void queueData(const std::vector<BSONObj>& results, MockStage* mockStage) {
for (auto result : results) {
if (result.isEmpty()) {
- queuedData->pushBack(PlanStage::NEED_TIME);
+ mockStage->enqueueStateCode(PlanStage::NEED_TIME);
continue;
}
const auto id = _ws.allocate();
auto* member = _ws.get(id);
member->doc.setValue(Document{result});
_ws.transitionToOwnedObj(id);
- queuedData->pushBack(id);
+ mockStage->enqueueAdvanced(id);
}
}
@@ -108,8 +108,8 @@ protected:
};
TEST_F(TrialStageTest, AdoptsTrialPlanIfTrialSucceeds) {
- auto trialPlan = std::make_unique<QueuedDataStage>(_expCtx.get(), ws());
- auto backupPlan = std::make_unique<QueuedDataStage>(_expCtx.get(), ws());
+ auto trialPlan = std::make_unique<MockStage>(_expCtx.get(), ws());
+ auto backupPlan = std::make_unique<MockStage>(_expCtx.get(), ws());
// Seed the trial plan with 20 results and no NEED_TIMEs.
std::vector<BSONObj> trialResults;
@@ -138,8 +138,8 @@ TEST_F(TrialStageTest, AdoptsTrialPlanIfTrialSucceeds) {
}
TEST_F(TrialStageTest, AdoptsTrialPlanIfTrialPlanHitsEOF) {
- auto trialPlan = std::make_unique<QueuedDataStage>(_expCtx.get(), ws());
- auto backupPlan = std::make_unique<QueuedDataStage>(_expCtx.get(), ws());
+ auto trialPlan = std::make_unique<MockStage>(_expCtx.get(), ws());
+ auto backupPlan = std::make_unique<MockStage>(_expCtx.get(), ws());
// Seed the trial plan with 5 results and no NEED_TIMEs.
std::vector<BSONObj> trialResults;
@@ -173,8 +173,8 @@ TEST_F(TrialStageTest, AdoptsTrialPlanIfTrialPlanHitsEOF) {
}
TEST_F(TrialStageTest, AdoptsBackupPlanIfTrialDoesNotSucceed) {
- auto trialPlan = std::make_unique<QueuedDataStage>(_expCtx.get(), ws());
- auto backupPlan = std::make_unique<QueuedDataStage>(_expCtx.get(), ws());
+ auto trialPlan = std::make_unique<MockStage>(_expCtx.get(), ws());
+ auto backupPlan = std::make_unique<MockStage>(_expCtx.get(), ws());
// Seed the trial plan with 20 results. Every second result will produce a NEED_TIME.
std::vector<BSONObj> trialResults;
@@ -210,45 +210,5 @@ TEST_F(TrialStageTest, AdoptsBackupPlanIfTrialDoesNotSucceed) {
ASSERT_TRUE(trialStage->isEOF());
}
-TEST_F(TrialStageTest, AdoptsBackupPlanIfTrialPlanDies) {
- auto trialPlan = std::make_unique<QueuedDataStage>(_expCtx.get(), ws());
- auto backupPlan = std::make_unique<QueuedDataStage>(_expCtx.get(), ws());
-
- // Seed the trial plan with 2 results followed by a PlanStage::FAILURE.
- queueData({BSON("_id" << 0), BSON("_id" << 1)}, trialPlan.get());
- trialPlan->pushBack(PlanStage::FAILURE);
-
- // Seed the backup plan with 20 different results, so that we can validate that we see the
- // correct dataset once the trial phase is complete.
- std::vector<BSONObj> backupResults;
- for (auto i = 0; i < 20; ++i) {
- backupResults.push_back(BSON("_id" << (-i)));
- }
- queueData(backupResults, backupPlan.get());
-
- // We schedule the trial to run for 10 works. Because we will encounter a PlanStage::FAILURE
- // before this point, the trial will complete early and the backup plan will be adopted.
- auto trialStage = std::make_unique<TrialStage>(
- _expCtx.get(), ws(), std::move(trialPlan), std::move(backupPlan), 10, 0.75);
-
- ASSERT_OK(trialStage->pickBestPlan(yieldPolicy().get()));
-
- // The trial phase completed and we picked the backup plan.
- ASSERT_TRUE(trialStage->isTrialPhaseComplete());
- ASSERT_TRUE(trialStage->pickedBackupPlan());
-
- // Get the specific stats for the stage and confirm that the trial completed early.
- auto* stats = static_cast<const TrialStats*>(trialStage->getSpecificStats());
- ASSERT_EQ(stats->trialPeriodMaxWorks, 10U);
- ASSERT_EQ(stats->trialWorks, 2U);
-
- // Confirm that we see the full backupPlan results when we iterate the trialStage.
- for (auto result : backupResults) {
- ASSERT_BSONOBJ_EQ(result, *nextResult(trialStage.get()));
- }
- ASSERT_FALSE(nextResult(trialStage.get()));
- ASSERT_TRUE(trialStage->isEOF());
-}
-
} // namespace
} // namespace mongo