summaryrefslogtreecommitdiff
path: root/src/mongo/db/exec
diff options
context:
space:
mode:
authorDavid Storch <david.storch@mongodb.com>2020-05-18 18:20:48 -0400
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-05-29 18:44:40 +0000
commitd295b6646fcc815e73ad3085b212ad14c8c6de01 (patch)
tree3b17d6dcf49643018e5c1220fe61cb5808978ef0 /src/mongo/db/exec
parent84a7b81a73c7abfff42823b87612c9d50ea50e67 (diff)
downloadmongo-d295b6646fcc815e73ad3085b212ad14c8c6de01.tar.gz
SERVER-43821 Make PlanStage and PlanExecutor return errors by throwing
This eliminates the need for the FAILURE status codes in PlanStage and PlanExecutor, and brings query execution's error reporting more in line with that of the rest of the server. It also makes it easier for future implementations of PlanExecutor to comply with the interface.
Diffstat (limited to 'src/mongo/db/exec')
-rw-r--r--src/mongo/db/exec/and_hash.cpp28
-rw-r--r--src/mongo/db/exec/and_sorted.cpp21
-rw-r--r--src/mongo/db/exec/cached_plan.cpp43
-rw-r--r--src/mongo/db/exec/collection_scan.cpp35
-rw-r--r--src/mongo/db/exec/collection_scan.h4
-rw-r--r--src/mongo/db/exec/count.cpp6
-rw-r--r--src/mongo/db/exec/delete.cpp7
-rw-r--r--src/mongo/db/exec/fetch.cpp6
-rw-r--r--src/mongo/db/exec/geo_near.cpp35
-rw-r--r--src/mongo/db/exec/geo_near.h16
-rw-r--r--src/mongo/db/exec/limit.cpp5
-rw-r--r--src/mongo/db/exec/merge_sort.cpp6
-rw-r--r--src/mongo/db/exec/mock_stage.cpp72
-rw-r--r--src/mongo/db/exec/mock_stage.h109
-rw-r--r--src/mongo/db/exec/multi_plan.cpp110
-rw-r--r--src/mongo/db/exec/multi_plan.h29
-rw-r--r--src/mongo/db/exec/near.cpp36
-rw-r--r--src/mongo/db/exec/near.h20
-rw-r--r--src/mongo/db/exec/or.cpp6
-rw-r--r--src/mongo/db/exec/pipeline_proxy.cpp4
-rw-r--r--src/mongo/db/exec/plan_stage.h47
-rw-r--r--src/mongo/db/exec/projection.cpp25
-rw-r--r--src/mongo/db/exec/projection.h8
-rw-r--r--src/mongo/db/exec/queued_data_stage.cpp32
-rw-r--r--src/mongo/db/exec/queued_data_stage.h12
-rw-r--r--src/mongo/db/exec/queued_data_stage_test.cpp11
-rw-r--r--src/mongo/db/exec/return_key.cpp21
-rw-r--r--src/mongo/db/exec/return_key.h2
-rw-r--r--src/mongo/db/exec/skip.cpp6
-rw-r--r--src/mongo/db/exec/sort.cpp19
-rw-r--r--src/mongo/db/exec/sort_key_generator.cpp12
-rw-r--r--src/mongo/db/exec/stagedebug_cmd.cpp12
-rw-r--r--src/mongo/db/exec/text_match.cpp10
-rw-r--r--src/mongo/db/exec/text_or.cpp13
-rw-r--r--src/mongo/db/exec/trial_stage.cpp12
-rw-r--r--src/mongo/db/exec/update_stage.cpp11
-rw-r--r--src/mongo/db/exec/working_set_common.cpp73
-rw-r--r--src/mongo/db/exec/working_set_common.h50
38 files changed, 351 insertions, 623 deletions
diff --git a/src/mongo/db/exec/and_hash.cpp b/src/mongo/db/exec/and_hash.cpp
index d2666c905e6..f620db00faf 100644
--- a/src/mongo/db/exec/and_hash.cpp
+++ b/src/mongo/db/exec/and_hash.cpp
@@ -140,14 +140,6 @@ PlanStage::StageState AndHashStage::doWork(WorkingSetID* out) {
// yield.
_ws->get(_lookAheadResults[i])->makeObjOwnedIfNeeded();
break; // Stop looking at this child.
- } else if (PlanStage::FAILURE == childStatus) {
- // The stage which produces a failure is responsible for allocating a working
- // set member with error details.
- invariant(WorkingSet::INVALID_ID != _lookAheadResults[i]);
- *out = _lookAheadResults[i];
- _hashingChildren = false;
- _dataMap.clear();
- return childStatus;
}
// We ignore NEED_TIME. TODO: what do we want to do if we get NEED_YIELD here?
}
@@ -165,12 +157,10 @@ PlanStage::StageState AndHashStage::doWork(WorkingSetID* out) {
if (_hashingChildren) {
// Check memory usage of previously hashed results.
if (_memUsage > _maxMemUsage) {
- str::stream ss;
- ss << "hashed AND stage buffered data usage of " << _memUsage
+ StringBuilder sb;
+ sb << "hashed AND stage buffered data usage of " << _memUsage
<< " bytes exceeds internal limit of " << kDefaultMaxMemUsageBytes << " bytes";
- Status status(ErrorCodes::Overflow, ss);
- *out = WorkingSetCommon::allocateStatusMember(_ws, status);
- return PlanStage::FAILURE;
+ uasserted(ErrorCodes::QueryExceededMemoryLimitNoDiskUseAllowed, sb.str());
}
if (0 == _currentChild) {
@@ -279,12 +269,6 @@ PlanStage::StageState AndHashStage::readFirstChild(WorkingSetID* out) {
_specificStats.mapAfterChild.push_back(_dataMap.size());
return PlanStage::NEED_TIME;
- } else if (PlanStage::FAILURE == childStatus) {
- // The stage which produces a failure is responsible for allocating a working set member
- // with error details.
- invariant(WorkingSet::INVALID_ID != id);
- *out = id;
- return childStatus;
} else {
if (PlanStage::NEED_YIELD == childStatus) {
*out = id;
@@ -364,12 +348,6 @@ PlanStage::StageState AndHashStage::hashOtherChildren(WorkingSetID* out) {
}
return PlanStage::NEED_TIME;
- } else if (PlanStage::FAILURE == childStatus) {
- // The stage which produces a failure is responsible for allocating a working set member
- // with error details.
- invariant(WorkingSet::INVALID_ID != id);
- *out = id;
- return childStatus;
} else {
if (PlanStage::NEED_YIELD == childStatus) {
*out = id;
diff --git a/src/mongo/db/exec/and_sorted.cpp b/src/mongo/db/exec/and_sorted.cpp
index 57d2eb08b52..5d35a7a1bd2 100644
--- a/src/mongo/db/exec/and_sorted.cpp
+++ b/src/mongo/db/exec/and_sorted.cpp
@@ -116,19 +116,6 @@ PlanStage::StageState AndSortedStage::getTargetRecordId(WorkingSetID* out) {
} else if (PlanStage::IS_EOF == state) {
_isEOF = true;
return state;
- } else if (PlanStage::FAILURE == state) {
- *out = id;
- // If a stage fails, it may create a status WSM to indicate why it
- // failed, in which case 'id' is valid. If ID is invalid, we
- // create our own error message.
- if (WorkingSet::INVALID_ID == id) {
- str::stream ss;
- ss << "sorted AND stage failed to read in results from first child";
- Status status(ErrorCodes::InternalError, ss);
- *out = WorkingSetCommon::allocateStatusMember(_ws, status);
- }
- _isEOF = true;
- return state;
} else {
if (PlanStage::NEED_YIELD == state) {
*out = id;
@@ -208,14 +195,6 @@ PlanStage::StageState AndSortedStage::moveTowardTargetRecordId(WorkingSetID* out
_isEOF = true;
_ws->free(_targetId);
return state;
- } else if (PlanStage::FAILURE == state) {
- // The stage which produces a failure is responsible for allocating a working set member
- // with error details.
- invariant(WorkingSet::INVALID_ID != id);
- *out = id;
- _isEOF = true;
- _ws->free(_targetId);
- return state;
} else {
if (PlanStage::NEED_YIELD == state) {
*out = id;
diff --git a/src/mongo/db/exec/cached_plan.cpp b/src/mongo/db/exec/cached_plan.cpp
index 99ce35a7d5e..a65e0639776 100644
--- a/src/mongo/db/exec/cached_plan.cpp
+++ b/src/mongo/db/exec/cached_plan.cpp
@@ -102,7 +102,28 @@ Status CachedPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
}
WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState state = child()->work(&id);
+ PlanStage::StageState state;
+ try {
+ state = child()->work(&id);
+ } catch (const ExceptionFor<ErrorCodes::QueryExceededMemoryLimitNoDiskUseAllowed>& ex) {
+ // The plan failed by hitting the limit we impose on memory consumption. It's possible
+ // that a different plan is less resource-intensive, so we fall back to replanning the
+ // whole query. We neither evict the existing cache entry nor cache the result of
+ // replanning.
+ LOGV2_DEBUG(20579,
+ 1,
+ "Execution of cached plan failed, falling back to replan. query: "
+ "{query} planSummary: {planSummary} status: {status}",
+ "Execution of cached plan failed, failling back to replan",
+ "query"_attr = redact(_canonicalQuery->toStringShort()),
+ "planSummary"_attr = Explain::getPlanSummary(child().get()),
+ "status"_attr = redact(ex.toStatus()));
+
+ const bool shouldCache = false;
+ return replan(yieldPolicy,
+ shouldCache,
+ str::stream() << "cached plan returned: " << ex.toStatus());
+ }
if (PlanStage::ADVANCED == state) {
// Save result for later.
@@ -136,26 +157,6 @@ Status CachedPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
if (!yieldStatus.isOK()) {
return yieldStatus;
}
- } else if (PlanStage::FAILURE == state) {
- // On failure, fall back to replanning the whole query. We neither evict the
- // existing cache entry nor cache the result of replanning.
- BSONObj statusObj = WorkingSetCommon::getStatusMemberDocument(*_ws, id)->toBson();
-
- LOGV2_DEBUG(20579,
- 1,
- "Execution of cached plan failed, falling back to replan. query: "
- "{canonicalQuery_Short} planSummary: {Explain_getPlanSummary_child_get} "
- "status: {statusObj}",
- "canonicalQuery_Short"_attr = redact(_canonicalQuery->toStringShort()),
- "Explain_getPlanSummary_child_get"_attr =
- Explain::getPlanSummary(child().get()),
- "statusObj"_attr = redact(statusObj));
-
- const bool shouldCache = false;
- return replan(yieldPolicy,
- shouldCache,
- str::stream() << "cached plan returned: "
- << WorkingSetCommon::toStatusString(statusObj));
} else {
invariant(PlanStage::NEED_TIME == state);
}
diff --git a/src/mongo/db/exec/collection_scan.cpp b/src/mongo/db/exec/collection_scan.cpp
index ab39d7d54b4..00547983fc8 100644
--- a/src/mongo/db/exec/collection_scan.cpp
+++ b/src/mongo/db/exec/collection_scan.cpp
@@ -133,12 +133,10 @@ PlanStage::StageState CollectionScan::doWork(WorkingSetID* out) {
// only time we'd need to create a cursor after already getting a record out of it
// and updating our _lastSeenId.
if (!_cursor->seekExact(_lastSeenId)) {
- Status status(ErrorCodes::CappedPositionLost,
- str::stream() << "CollectionScan died due to failure to restore "
- << "tailable cursor position. "
- << "Last seen record id: " << _lastSeenId);
- *out = WorkingSetCommon::allocateStatusMember(_workingSet, status);
- return PlanStage::FAILURE;
+ uasserted(ErrorCodes::CappedPositionLost,
+ str::stream() << "CollectionScan died due to failure to restore "
+ << "tailable cursor position. "
+ << "Last seen record id: " << _lastSeenId);
}
}
@@ -152,14 +150,12 @@ PlanStage::StageState CollectionScan::doWork(WorkingSetID* out) {
// returned this one prior to the resume.
auto recordIdToSeek = *_params.resumeAfterRecordId;
if (!_cursor->seekExact(recordIdToSeek)) {
- Status status(
+ uasserted(
ErrorCodes::KeyNotFound,
str::stream()
<< "Failed to resume collection scan: the recordId from which we are "
<< "attempting to resume no longer exists in the collection. "
<< "recordId: " << recordIdToSeek);
- *out = WorkingSetCommon::allocateStatusMember(_workingSet, status);
- return PlanStage::FAILURE;
}
}
@@ -205,11 +201,7 @@ PlanStage::StageState CollectionScan::doWork(WorkingSetID* out) {
_lastSeenId = record->id;
if (_params.shouldTrackLatestOplogTimestamp) {
- auto status = setLatestOplogEntryTimestamp(*record);
- if (!status.isOK()) {
- *out = WorkingSetCommon::allocateStatusMember(_workingSet, status);
- return PlanStage::FAILURE;
- }
+ setLatestOplogEntryTimestamp(*record);
}
WorkingSetID id = _workingSet->allocate();
@@ -221,17 +213,14 @@ PlanStage::StageState CollectionScan::doWork(WorkingSetID* out) {
return returnIfMatches(member, id, out);
}
-Status CollectionScan::setLatestOplogEntryTimestamp(const Record& record) {
+void CollectionScan::setLatestOplogEntryTimestamp(const Record& record) {
auto tsElem = record.data.toBson()[repl::OpTime::kTimestampFieldName];
- if (tsElem.type() != BSONType::bsonTimestamp) {
- Status status(ErrorCodes::InternalError,
- str::stream() << "CollectionScan was asked to track latest operation time, "
- "but found a result without a valid 'ts' field: "
- << record.data.toBson().toString());
- return status;
- }
+ uassert(ErrorCodes::Error(4382100),
+ str::stream() << "CollectionScan was asked to track latest operation time, "
+ "but found a result without a valid 'ts' field: "
+ << record.data.toBson().toString(),
+ tsElem.type() == BSONType::bsonTimestamp);
_latestOplogEntryTimestamp = std::max(_latestOplogEntryTimestamp, tsElem.timestamp());
- return Status::OK();
}
PlanStage::StageState CollectionScan::returnIfMatches(WorkingSetMember* member,
diff --git a/src/mongo/db/exec/collection_scan.h b/src/mongo/db/exec/collection_scan.h
index b19915bb2c5..74d9d8ddb29 100644
--- a/src/mongo/db/exec/collection_scan.h
+++ b/src/mongo/db/exec/collection_scan.h
@@ -95,10 +95,10 @@ private:
/**
* Extracts the timestamp from the 'ts' field of 'record', and sets '_latestOplogEntryTimestamp'
- * to that time if it isn't already greater. Returns an error if the 'ts' field cannot be
+ * to that time if it isn't already greater. Throws an exception if the 'ts' field cannot be
* extracted.
*/
- Status setLatestOplogEntryTimestamp(const Record& record);
+ void setLatestOplogEntryTimestamp(const Record& record);
// WorkingSet is not owned by us.
WorkingSet* _workingSet;
diff --git a/src/mongo/db/exec/count.cpp b/src/mongo/db/exec/count.cpp
index c646e172f29..07158d20b08 100644
--- a/src/mongo/db/exec/count.cpp
+++ b/src/mongo/db/exec/count.cpp
@@ -84,12 +84,6 @@ PlanStage::StageState CountStage::doWork(WorkingSetID* out) {
if (PlanStage::IS_EOF == state) {
_commonStats.isEOF = true;
return PlanStage::IS_EOF;
- } else if (PlanStage::FAILURE == state) {
- // The stage which produces a failure is responsible for allocating a working set member
- // with error details.
- invariant(WorkingSet::INVALID_ID != id);
- *out = id;
- return state;
} else if (PlanStage::ADVANCED == state) {
// We got a result. If we're still skipping, then decrement the number left to skip.
// Otherwise increment the count until we hit the limit.
diff --git a/src/mongo/db/exec/delete.cpp b/src/mongo/db/exec/delete.cpp
index 3911a3087b5..a53447e9f03 100644
--- a/src/mongo/db/exec/delete.cpp
+++ b/src/mongo/db/exec/delete.cpp
@@ -123,13 +123,6 @@ PlanStage::StageState DeleteStage::doWork(WorkingSetID* out) {
case PlanStage::ADVANCED:
break;
- case PlanStage::FAILURE:
- // The stage which produces a failure is responsible for allocating a working set
- // member with error details.
- invariant(WorkingSet::INVALID_ID != id);
- *out = id;
- return status;
-
case PlanStage::NEED_TIME:
return status;
diff --git a/src/mongo/db/exec/fetch.cpp b/src/mongo/db/exec/fetch.cpp
index 914158d3191..9528e9dc085 100644
--- a/src/mongo/db/exec/fetch.cpp
+++ b/src/mongo/db/exec/fetch.cpp
@@ -118,12 +118,6 @@ PlanStage::StageState FetchStage::doWork(WorkingSetID* out) {
}
return returnIfMatches(member, id, out);
- } else if (PlanStage::FAILURE == status) {
- // The stage which produces a failure is responsible for allocating a working set member
- // with error details.
- invariant(WorkingSet::INVALID_ID != id);
- *out = id;
- return status;
} else if (PlanStage::NEED_YIELD == status) {
*out = id;
}
diff --git a/src/mongo/db/exec/geo_near.cpp b/src/mongo/db/exec/geo_near.cpp
index c12bd3e11ed..6d363303963 100644
--- a/src/mongo/db/exec/geo_near.cpp
+++ b/src/mongo/db/exec/geo_near.cpp
@@ -138,8 +138,7 @@ static void extractGeometries(const BSONObj& doc,
}
}
-static StatusWith<double> computeGeoNearDistance(const GeoNearParams& nearParams,
- WorkingSetMember* member) {
+static double computeGeoNearDistance(const GeoNearParams& nearParams, WorkingSetMember* member) {
//
// Generic GeoNear distance computation
// Distances are computed by projecting the stored geometry into the query CRS, and
@@ -183,7 +182,7 @@ static StatusWith<double> computeGeoNearDistance(const GeoNearParams& nearParams
if (minDistance < 0) {
// No distance to report
- return StatusWith<double>(-1);
+ return -1;
}
if (nearParams.addDistMeta) {
@@ -201,7 +200,7 @@ static StatusWith<double> computeGeoNearDistance(const GeoNearParams& nearParams
member->metadata().setGeoNearPoint(minDistanceMetadata);
}
- return StatusWith<double>(minDistance);
+ return minDistance;
}
static R2Annulus geoNearDistanceBounds(const GeoNearExpression& query) {
@@ -565,13 +564,11 @@ static R2Annulus projectBoundsToTwoDDegrees(R2Annulus sphereBounds) {
outerDegrees + maxErrorDegrees);
}
-StatusWith<NearStage::CoveredInterval*> //
-GeoNear2DStage::nextInterval(OperationContext* opCtx,
- WorkingSet* workingSet,
- const Collection* collection) {
+std::unique_ptr<NearStage::CoveredInterval> GeoNear2DStage::nextInterval(
+ OperationContext* opCtx, WorkingSet* workingSet, const Collection* collection) {
// The search is finished if we searched at least once and all the way to the edge
if (_currBounds.getInner() >= 0 && _currBounds.getOuter() == _fullBounds.getOuter()) {
- return StatusWith<CoveredInterval*>(nullptr);
+ return nullptr;
}
//
@@ -710,11 +707,11 @@ GeoNear2DStage::nextInterval(OperationContext* opCtx,
_children.emplace_back(std::make_unique<FetchStageWithMatch>(
expCtx(), workingSet, std::move(scan), docMatcher, collection));
- return StatusWith<CoveredInterval*>(new CoveredInterval(
- _children.back().get(), nextBounds.getInner(), nextBounds.getOuter(), isLastInterval));
+ return std::make_unique<CoveredInterval>(
+ _children.back().get(), nextBounds.getInner(), nextBounds.getOuter(), isLastInterval);
}
-StatusWith<double> GeoNear2DStage::computeDistance(WorkingSetMember* member) {
+double GeoNear2DStage::computeDistance(WorkingSetMember* member) {
return computeGeoNearDistance(_nearParams, member);
}
@@ -959,13 +956,11 @@ PlanStage::StageState GeoNear2DSphereStage::initialize(OperationContext* opCtx,
return state;
}
-StatusWith<NearStage::CoveredInterval*> //
-GeoNear2DSphereStage::nextInterval(OperationContext* opCtx,
- WorkingSet* workingSet,
- const Collection* collection) {
+std::unique_ptr<NearStage::CoveredInterval> GeoNear2DSphereStage::nextInterval(
+ OperationContext* opCtx, WorkingSet* workingSet, const Collection* collection) {
// The search is finished if we searched at least once and all the way to the edge
if (_currBounds.getInner() >= 0 && _currBounds.getOuter() == _fullBounds.getOuter()) {
- return StatusWith<CoveredInterval*>(nullptr);
+ return nullptr;
}
//
@@ -1033,11 +1028,11 @@ GeoNear2DSphereStage::nextInterval(OperationContext* opCtx,
_children.emplace_back(std::make_unique<FetchStage>(
expCtx(), workingSet, std::move(scan), _nearParams.filter, collection));
- return StatusWith<CoveredInterval*>(new CoveredInterval(
- _children.back().get(), nextBounds.getInner(), nextBounds.getOuter(), isLastInterval));
+ return std::make_unique<CoveredInterval>(
+ _children.back().get(), nextBounds.getInner(), nextBounds.getOuter(), isLastInterval);
}
-StatusWith<double> GeoNear2DSphereStage::computeDistance(WorkingSetMember* member) {
+double GeoNear2DSphereStage::computeDistance(WorkingSetMember* member) {
return computeGeoNearDistance(_nearParams, member);
}
diff --git a/src/mongo/db/exec/geo_near.h b/src/mongo/db/exec/geo_near.h
index eb096064d53..dd3d33be97d 100644
--- a/src/mongo/db/exec/geo_near.h
+++ b/src/mongo/db/exec/geo_near.h
@@ -74,11 +74,11 @@ public:
const IndexDescriptor* twoDIndex);
protected:
- StatusWith<CoveredInterval*> nextInterval(OperationContext* opCtx,
- WorkingSet* workingSet,
- const Collection* collection) final;
+ std::unique_ptr<CoveredInterval> nextInterval(OperationContext* opCtx,
+ WorkingSet* workingSet,
+ const Collection* collection) final;
- StatusWith<double> computeDistance(WorkingSetMember* member) final;
+ double computeDistance(WorkingSetMember* member) final;
PlanStage::StageState initialize(OperationContext* opCtx,
WorkingSet* workingSet,
@@ -142,11 +142,11 @@ public:
~GeoNear2DSphereStage();
protected:
- StatusWith<CoveredInterval*> nextInterval(OperationContext* opCtx,
- WorkingSet* workingSet,
- const Collection* collection) final;
+ std::unique_ptr<CoveredInterval> nextInterval(OperationContext* opCtx,
+ WorkingSet* workingSet,
+ const Collection* collection) final;
- StatusWith<double> computeDistance(WorkingSetMember* member) final;
+ double computeDistance(WorkingSetMember* member) final;
PlanStage::StageState initialize(OperationContext* opCtx,
WorkingSet* workingSet,
diff --git a/src/mongo/db/exec/limit.cpp b/src/mongo/db/exec/limit.cpp
index 41505be622f..220d86d31be 100644
--- a/src/mongo/db/exec/limit.cpp
+++ b/src/mongo/db/exec/limit.cpp
@@ -70,11 +70,6 @@ PlanStage::StageState LimitStage::doWork(WorkingSetID* out) {
if (PlanStage::ADVANCED == status) {
*out = id;
--_numToReturn;
- } else if (PlanStage::FAILURE == status) {
- // The stage which produces a failure is responsible for allocating a working set member
- // with error details.
- invariant(WorkingSet::INVALID_ID != id);
- *out = id;
} else if (PlanStage::NEED_YIELD == status) {
*out = id;
}
diff --git a/src/mongo/db/exec/merge_sort.cpp b/src/mongo/db/exec/merge_sort.cpp
index 58a3e33f241..4af01272d05 100644
--- a/src/mongo/db/exec/merge_sort.cpp
+++ b/src/mongo/db/exec/merge_sort.cpp
@@ -130,12 +130,6 @@ PlanStage::StageState MergeSortStage::doWork(WorkingSetID* out) {
// anymore.
_noResultToMerge.pop();
return PlanStage::NEED_TIME;
- } else if (PlanStage::FAILURE == code) {
- // The stage which produces a failure is responsible for allocating a working set member
- // with error details.
- invariant(WorkingSet::INVALID_ID != id);
- *out = id;
- return code;
} else if (PlanStage::NEED_YIELD == code) {
*out = id;
return code;
diff --git a/src/mongo/db/exec/mock_stage.cpp b/src/mongo/db/exec/mock_stage.cpp
new file mode 100644
index 00000000000..a84c7995408
--- /dev/null
+++ b/src/mongo/db/exec/mock_stage.cpp
@@ -0,0 +1,72 @@
+/**
+ * Copyright (C) 2020-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/db/exec/mock_stage.h"
+
+#include "mongo/util/visit_helper.h"
+
+namespace mongo {
+
+MockStage::MockStage(ExpressionContext* expCtx, WorkingSet* ws)
+ : PlanStage(kStageType.rawData(), expCtx), _ws(ws) {}
+
+std::unique_ptr<PlanStageStats> MockStage::getStats() {
+ _commonStats.isEOF = isEOF();
+ std::unique_ptr<PlanStageStats> ret =
+ std::make_unique<PlanStageStats>(_commonStats, StageType::STAGE_MOCK);
+ ret->specific = std::make_unique<MockStats>(_specificStats);
+ return ret;
+}
+
+PlanStage::StageState MockStage::doWork(WorkingSetID* out) {
+ if (isEOF()) {
+ return PlanStage::IS_EOF;
+ }
+
+ auto nextResult = _results.front();
+ _results.pop();
+
+ auto returnState = stdx::visit(
+ visit_helper::Overloaded{
+ [](WorkingSetID wsid) -> PlanStage::StageState { return PlanStage::ADVANCED; },
+ [](PlanStage::StageState state) -> PlanStage::StageState { return state; },
+ [](Status status) -> PlanStage::StageState {
+ uassertStatusOK(status);
+ MONGO_UNREACHABLE;
+ }},
+ nextResult);
+ if (returnState == PlanStage::ADVANCED) {
+ *out = stdx::get<WorkingSetID>(nextResult);
+ }
+ return returnState;
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/exec/mock_stage.h b/src/mongo/db/exec/mock_stage.h
new file mode 100644
index 00000000000..02e75a52bd4
--- /dev/null
+++ b/src/mongo/db/exec/mock_stage.h
@@ -0,0 +1,109 @@
+/**
+ * Copyright (C) 2020-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#pragma once
+
+#include <queue>
+
+#include "mongo/db/exec/plan_stage.h"
+
+namespace mongo {
+
+/**
+ * A stage designed for use in unit tests. The test can queue a sequence of results which will be
+ * returned to the parent stage using the 'enqueue*()' methods.
+ */
+class MockStage final : public PlanStage {
+public:
+ static constexpr StringData kStageType = "MOCK"_sd;
+
+ MockStage(ExpressionContext* expCtx, WorkingSet* ws);
+
+ StageState doWork(WorkingSetID* out) final;
+
+ bool isEOF() final {
+ return _results.empty();
+ }
+
+ StageType stageType() const final {
+ return STAGE_MOCK;
+ }
+
+ std::unique_ptr<PlanStageStats> getStats() final;
+
+ const SpecificStats* getSpecificStats() const final {
+ return &_specificStats;
+ }
+
+ /**
+ * Adds a WorkingSetMember to the back of the queue.
+ *
+ * The caller is responsible for allocating 'id' and filling out the WSM keyed by 'id'
+ * appropriately.
+ *
+ * The QueuedDataStage takes ownership of 'id', so the caller should not call WorkingSet::free()
+ * on it.
+ */
+ void enqueueAdvanced(WorkingSetID wsid) {
+ _results.push(wsid);
+ }
+
+ /**
+ * Adds a StageState code such as 'NEED_TIME' or 'NEED_YIELD' to the back of the queue. Illegal
+ * to call with 'ADVANCED' -- 'enqueueAdvanced()' should be used instead. Also illegal to call
+ * with 'IS_EOF', since EOF is implied when the mock stage's queue is emptied.
+ */
+ void enqueueStateCode(StageState stageState) {
+ invariant(stageState != PlanStage::ADVANCED);
+ invariant(stageState != PlanStage::IS_EOF);
+ _results.push(stageState);
+ }
+
+ /**
+ * Adds 'status' to the queue. When the 'status' is dequeued, it will be thrown from 'work()' as
+ * an exception.
+ */
+ void enqueueError(Status status) {
+ invariant(!status.isOK());
+ _results.push(status);
+ }
+
+private:
+ // The mock stage holds a queue of objects of this type. Each element in the queue can either be
+ // a document to return, a StageState code, or a Status representing an error.
+ using MockResult = stdx::variant<WorkingSetID, PlanStage::StageState, Status>;
+
+ WorkingSet* _ws;
+
+ std::queue<MockResult> _results;
+
+ MockStats _specificStats;
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/exec/multi_plan.cpp b/src/mongo/db/exec/multi_plan.cpp
index 97c16a8a009..2c0a28b92bd 100644
--- a/src/mongo/db/exec/multi_plan.cpp
+++ b/src/mongo/db/exec/multi_plan.cpp
@@ -78,10 +78,7 @@ MultiPlanStage::MultiPlanStage(ExpressionContext* expCtx,
_cachingMode(cachingMode),
_query(cq),
_bestPlanIdx(kNoSuchPlan),
- _backupPlanIdx(kNoSuchPlan),
- _failure(false),
- _failureCount(0),
- _statusMemberId(WorkingSet::INVALID_ID) {}
+ _backupPlanIdx(kNoSuchPlan) {}
void MultiPlanStage::addPlan(std::unique_ptr<QuerySolution> solution,
std::unique_ptr<PlanStage> root,
@@ -96,10 +93,6 @@ void MultiPlanStage::addPlan(std::unique_ptr<QuerySolution> solution,
}
bool MultiPlanStage::isEOF() {
- if (_failure) {
- return true;
- }
-
// If _bestPlanIdx hasn't been found, can't be at EOF
if (!bestPlanChosen()) {
return false;
@@ -112,11 +105,6 @@ bool MultiPlanStage::isEOF() {
}
PlanStage::StageState MultiPlanStage::doWork(WorkingSetID* out) {
- if (_failure) {
- *out = _statusMemberId;
- return PlanStage::FAILURE;
- }
-
CandidatePlan& bestPlan = _candidates[_bestPlanIdx];
// Look for an already produced result that provides the data the caller wants.
@@ -128,26 +116,24 @@ PlanStage::StageState MultiPlanStage::doWork(WorkingSetID* out) {
// best plan had no (or has no more) cached results
- StageState state = bestPlan.root->work(out);
+ StageState state;
+ try {
+ state = bestPlan.root->work(out);
+ } catch (const ExceptionFor<ErrorCodes::QueryExceededMemoryLimitNoDiskUseAllowed>&) {
+ // The winning plan ran out of memory. If we have a backup plan with no blocking states,
+ // then switch to it.
+ if (!hasBackupPlan()) {
+ throw;
+ }
- if (PlanStage::FAILURE == state && hasBackupPlan()) {
- LOGV2_DEBUG(20588, 5, "Best plan errored out switching to backup");
- // Uncache the bad solution if we fall back
- // on the backup solution.
- //
- // XXX: Instead of uncaching we should find a way for the
- // cached plan runner to fall back on a different solution
- // if the best solution fails. Alternatively we could try to
- // defer cache insertion to be after the first produced result.
+ LOGV2_DEBUG(20588, 5, "Best plan errored, switching to backup plan");
- CollectionQueryInfo::get(collection())
- .getPlanCache()
- ->remove(*_query)
- .transitional_ignore();
+ // Attempt to remove the plan from the cache. This will fail if the plan has already been
+ // removed, and we intentionally ignore such errors.
+ CollectionQueryInfo::get(collection()).getPlanCache()->remove(*_query).ignore();
_bestPlanIdx = _backupPlanIdx;
_backupPlanIdx = kNoSuchPlan;
-
return _candidates[_bestPlanIdx].root->work(out);
}
@@ -159,24 +145,15 @@ PlanStage::StageState MultiPlanStage::doWork(WorkingSetID* out) {
return state;
}
-Status MultiPlanStage::tryYield(PlanYieldPolicy* yieldPolicy) {
+void MultiPlanStage::tryYield(PlanYieldPolicy* yieldPolicy) {
// These are the conditions which can cause us to yield:
// 1) The yield policy's timer elapsed, or
// 2) some stage requested a yield, or
// 3) we need to yield and retry due to a WriteConflictException.
// In all cases, the actual yielding happens here.
if (yieldPolicy->shouldYieldOrInterrupt()) {
- auto yieldStatus = yieldPolicy->yieldOrInterrupt();
-
- if (!yieldStatus.isOK()) {
- _failure = true;
- _statusMemberId =
- WorkingSetCommon::allocateStatusMember(_candidates[0].ws, yieldStatus);
- return yieldStatus;
- }
+ uassertStatusOK(yieldPolicy->yieldOrInterrupt());
}
-
- return Status::OK();
}
// static
@@ -229,15 +206,7 @@ Status MultiPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
}
}
} catch (DBException& e) {
- e.addContext("exception thrown while multiplanner was selecting best plan");
- throw;
- }
-
- if (_failure) {
- invariant(WorkingSet::INVALID_ID != _statusMemberId);
- WorkingSetMember* member = _candidates[0].ws->get(_statusMemberId);
- return WorkingSetCommon::getMemberStatus(*member).withContext(
- "multiplanner encountered a failure while selecting best plan");
+ return e.toStatus().withContext("error while multiplanner was selecting best plan");
}
// After picking best plan, ranking will own plan stats from
@@ -397,12 +366,28 @@ bool MultiPlanStage::workAllPlans(size_t numResults, PlanYieldPolicy* yieldPolic
}
// Might need to yield between calls to work due to the timer elapsing.
- if (!(tryYield(yieldPolicy)).isOK()) {
- return false;
- }
+ tryYield(yieldPolicy);
WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState state = candidate.root->work(&id);
+ PlanStage::StageState state;
+ try {
+ state = candidate.root->work(&id);
+ } catch (const ExceptionFor<ErrorCodes::QueryExceededMemoryLimitNoDiskUseAllowed>&) {
+ // If a candidate fails due to exceeding allowed resource consumption, then mark the
+ // candidate as failed but proceed with the multi-plan trial period. The MultiPlanStage
+ // as a whole only fails if _all_ candidates hit their resource consumption limit, or if
+ // a different, query-fatal error code is thrown.
+ candidate.failed = true;
+ ++_failureCount;
+
+ // If all children have failed, then rethrow. Otherwise, swallow the error and move onto
+ // the next candidate plan.
+ if (_failureCount == _candidates.size()) {
+ throw;
+ }
+
+ continue;
+ }
if (PlanStage::ADVANCED == state) {
// Save result for later.
@@ -430,26 +415,7 @@ bool MultiPlanStage::workAllPlans(size_t numResults, PlanYieldPolicy* yieldPolic
yieldPolicy->forceYield();
}
- if (!(tryYield(yieldPolicy)).isOK()) {
- return false;
- }
- } else if (PlanStage::NEED_TIME != state) {
- // On FAILURE, mark this candidate as failed, but keep executing the other
- // candidates. The MultiPlanStage as a whole only fails when every candidate
- // plan fails.
-
- candidate.failed = true;
- ++_failureCount;
-
- // Propagate most recent seen failure to parent.
- invariant(state == PlanStage::FAILURE);
- _statusMemberId = id;
-
-
- if (_failureCount == _candidates.size()) {
- _failure = true;
- return false;
- }
+ tryYield(yieldPolicy);
}
}
diff --git a/src/mongo/db/exec/multi_plan.h b/src/mongo/db/exec/multi_plan.h
index 1c33885d245..4b9b64bf863 100644
--- a/src/mongo/db/exec/multi_plan.h
+++ b/src/mongo/db/exec/multi_plan.h
@@ -178,9 +178,9 @@ private:
* Checks whether we need to perform either a timing-based yield or a yield for a document
* fetch. If so, then uses 'yieldPolicy' to actually perform the yield.
*
- * Returns a non-OK status if killed during a yield or if the query has exceeded its time limit.
+ * Throws an exception if yield recovery fails.
*/
- Status tryYield(PlanYieldPolicy* yieldPolicy);
+ void tryYield(PlanYieldPolicy* yieldPolicy);
static const int kNoSuchPlan = -1;
@@ -205,25 +205,14 @@ private:
// uses -1 / kNoSuchPlan when best plan is not (yet) known
int _backupPlanIdx;
- // Set if this MultiPlanStage cannot continue, and the query must fail. This can happen in
- // two ways. The first is that all candidate plans fail. Note that one plan can fail
- // during normal execution of the plan competition. Here is an example:
+ // Count of the number of candidate plans that have failed during the trial period. The
+ // multi-planner swallows resource exhaustion errors (QueryExceededMemoryLimitNoDiskUseAllowed).
+ // This means that if one candidate involves a blocking sort, and the other does not, the entire
+ // query will not fail if the blocking sort hits the limit on its allowed memory footprint.
//
- // Plan 1: collection scan with sort. Sort runs out of memory.
- // Plan 2: ixscan that provides sort. Won't run out of memory.
- //
- // We want to choose plan 2 even if plan 1 fails.
- //
- // The second way for failure to occur is that the execution of this query is killed during
- // a yield, by some concurrent event such as a collection drop.
- bool _failure;
-
- // If everything fails during the plan competition, we can't pick one.
- size_t _failureCount;
-
- // if pickBestPlan fails, this is set to the wsid of the statusMember
- // returned by ::work()
- WorkingSetID _statusMemberId;
+ // Arbitrary error codes are not swallowed by the multi-planner, since it is not know whether it
+ // is safe for the query to continue executing.
+ size_t _failureCount = 0u;
// Stats
MultiPlanStats _specificStats;
diff --git a/src/mongo/db/exec/near.cpp b/src/mongo/db/exec/near.cpp
index 30bbb894881..f5bd5a7c157 100644
--- a/src/mongo/db/exec/near.cpp
+++ b/src/mongo/db/exec/near.cpp
@@ -81,7 +81,6 @@ PlanStage::StageState NearStage::initNext(WorkingSetID* out) {
PlanStage::StageState NearStage::doWork(WorkingSetID* out) {
WorkingSetID toReturn = WorkingSet::INVALID_ID;
- Status error = Status::OK();
PlanStage::StageState nextState = PlanStage::NEED_TIME;
//
@@ -91,7 +90,7 @@ PlanStage::StageState NearStage::doWork(WorkingSetID* out) {
if (SearchState_Initializing == _searchState) {
nextState = initNext(&toReturn);
} else if (SearchState_Buffering == _searchState) {
- nextState = bufferNext(&toReturn, &error);
+ nextState = bufferNext(&toReturn);
} else if (SearchState_Advancing == _searchState) {
nextState = advanceNext(&toReturn);
} else {
@@ -103,9 +102,7 @@ PlanStage::StageState NearStage::doWork(WorkingSetID* out) {
// Handle the results
//
- if (PlanStage::FAILURE == nextState) {
- *out = WorkingSetCommon::allocateStatusMember(_workingSet, error);
- } else if (PlanStage::ADVANCED == nextState) {
+ if (PlanStage::ADVANCED == nextState) {
*out = toReturn;
} else if (PlanStage::NEED_YIELD == nextState) {
*out = toReturn;
@@ -132,28 +129,20 @@ struct NearStage::SearchResult {
};
// Set "toReturn" when NEED_YIELD.
-PlanStage::StageState NearStage::bufferNext(WorkingSetID* toReturn, Status* error) {
+PlanStage::StageState NearStage::bufferNext(WorkingSetID* toReturn) {
//
// Try to retrieve the next covered member
//
if (!_nextInterval) {
- StatusWith<CoveredInterval*> intervalStatus =
- nextInterval(opCtx(), _workingSet, collection());
- if (!intervalStatus.isOK()) {
- _searchState = SearchState_Finished;
- *error = intervalStatus.getStatus();
- return PlanStage::FAILURE;
- }
-
- if (nullptr == intervalStatus.getValue()) {
+ auto interval = nextInterval(opCtx(), _workingSet, collection());
+ if (!interval) {
_searchState = SearchState_Finished;
return PlanStage::IS_EOF;
}
// CoveredInterval and its child stage are owned by _childrenIntervals
- _childrenIntervals.push_back(
- std::unique_ptr<NearStage::CoveredInterval>{intervalStatus.getValue()});
+ _childrenIntervals.push_back(std::move(interval));
_nextInterval = _childrenIntervals.back().get();
_specificStats.intervalStats.emplace_back();
_nextIntervalStats = &_specificStats.intervalStats.back();
@@ -168,9 +157,6 @@ PlanStage::StageState NearStage::bufferNext(WorkingSetID* toReturn, Status* erro
if (PlanStage::IS_EOF == intervalState) {
_searchState = SearchState_Advancing;
return PlanStage::NEED_TIME;
- } else if (PlanStage::FAILURE == intervalState) {
- *error = WorkingSetCommon::getMemberStatus(*_workingSet->get(nextMemberID));
- return intervalState;
} else if (PlanStage::NEED_YIELD == intervalState) {
*toReturn = nextMemberID;
return intervalState;
@@ -194,17 +180,9 @@ PlanStage::StageState NearStage::bufferNext(WorkingSetID* toReturn, Status* erro
++_nextIntervalStats->numResultsBuffered;
- StatusWith<double> distanceStatus = computeDistance(nextMember);
-
- if (!distanceStatus.isOK()) {
- _searchState = SearchState_Finished;
- *error = distanceStatus.getStatus();
- return PlanStage::FAILURE;
- }
-
// If the member's distance is in the current distance interval, add it to our buffered
// results.
- double memberDistance = distanceStatus.getValue();
+ auto memberDistance = computeDistance(nextMember);
// Ensure that the BSONObj underlying the WorkingSetMember is owned in case we yield.
nextMember->makeObjOwnedIfNeeded();
diff --git a/src/mongo/db/exec/near.h b/src/mongo/db/exec/near.h
index 8f55c777494..bbbbee686e3 100644
--- a/src/mongo/db/exec/near.h
+++ b/src/mongo/db/exec/near.h
@@ -115,23 +115,19 @@ protected:
//
/**
- * Constructs the next covering over the next interval to buffer results from, or NULL
- * if the full range has been searched. Use the provided working set as the working
- * set for the covering stage if required.
- *
- * Returns !OK on failure to create next stage.
+ * Constructs the next covering over the next interval to buffer results from, or nullptr if the
+ * full range has been searched. Use the provided working set as the working set for the
+ * covering stage if required.
*/
- virtual StatusWith<CoveredInterval*> nextInterval(OperationContext* opCtx,
- WorkingSet* workingSet,
- const Collection* collection) = 0;
+ virtual std::unique_ptr<CoveredInterval> nextInterval(OperationContext* opCtx,
+ WorkingSet* workingSet,
+ const Collection* collection) = 0;
/**
* Computes the distance value for the given member data, or -1 if the member should not be
* returned in the sorted results.
- *
- * Returns !OK on invalid member data.
*/
- virtual StatusWith<double> computeDistance(WorkingSetMember* member) = 0;
+ virtual double computeDistance(WorkingSetMember* member) = 0;
/*
* Initialize near stage before buffering the data.
@@ -157,7 +153,7 @@ private:
//
StageState initNext(WorkingSetID* out);
- StageState bufferNext(WorkingSetID* toReturn, Status* error);
+ StageState bufferNext(WorkingSetID* toReturn);
StageState advanceNext(WorkingSetID* toReturn);
//
diff --git a/src/mongo/db/exec/or.cpp b/src/mongo/db/exec/or.cpp
index c50a4981e94..ec0d680ac37 100644
--- a/src/mongo/db/exec/or.cpp
+++ b/src/mongo/db/exec/or.cpp
@@ -110,12 +110,6 @@ PlanStage::StageState OrStage::doWork(WorkingSetID* out) {
} else {
return PlanStage::NEED_TIME;
}
- } else if (PlanStage::FAILURE == childStatus) {
- // The stage which produces a failure is responsible for allocating a working set member
- // with error details.
- invariant(WorkingSet::INVALID_ID != id);
- *out = id;
- return childStatus;
} else if (PlanStage::NEED_YIELD == childStatus) {
*out = id;
}
diff --git a/src/mongo/db/exec/pipeline_proxy.cpp b/src/mongo/db/exec/pipeline_proxy.cpp
index bdfaaa746b8..c3014ce34a5 100644
--- a/src/mongo/db/exec/pipeline_proxy.cpp
+++ b/src/mongo/db/exec/pipeline_proxy.cpp
@@ -65,9 +65,7 @@ PipelineProxyStage::PipelineProxyStage(ExpressionContext* expCtx,
}
PlanStage::StageState PipelineProxyStage::doWork(WorkingSetID* out) {
- if (!out) {
- return PlanStage::FAILURE;
- }
+ invariant(out);
if (!_stash.empty()) {
*out = _ws->allocate();
diff --git a/src/mongo/db/exec/plan_stage.h b/src/mongo/db/exec/plan_stage.h
index 652bc62d051..a601ddb4f1c 100644
--- a/src/mongo/db/exec/plan_stage.h
+++ b/src/mongo/db/exec/plan_stage.h
@@ -74,6 +74,10 @@ class RecordId;
* saveState() if any underlying database state changes. If saveState() is called,
* restoreState() must be called again before any work() is done.
*
+ * If an error occurs at runtime (e.g. we reach resource limits for the request), then work() throws
+ * an exception. At this point, statistics may be extracted from the execution plan, but the
+ * execution tree is otherwise unusable and the plan must be discarded.
+ *
* Here is a very simple usage example:
*
* WorkingSet workingSet;
@@ -92,9 +96,6 @@ class RecordId;
* case PlanStage::NEED_TIME:
* // Need more time.
* break;
- * case PlanStage::FAILURE:
- * // Throw exception or return error
- * break;
* }
*
* if (shouldYield) {
@@ -170,28 +171,20 @@ public:
// wants fetched. On the next call to work() that stage can assume a fetch was performed
// on the WSM that the held WSID refers to.
NEED_YIELD,
-
- // Something has gone unrecoverably wrong. Stop running this query.
- // If the out parameter does not refer to an invalid working set member,
- // call WorkingSetCommon::getStatusMemberObject() to get details on the failure.
- // Any class implementing this interface must set the WSID out parameter to
- // INVALID_ID or a valid WSM ID if FAILURE is returned.
- FAILURE,
};
static std::string stateStr(const StageState& state) {
- if (ADVANCED == state) {
- return "ADVANCED";
- } else if (IS_EOF == state) {
- return "IS_EOF";
- } else if (NEED_TIME == state) {
- return "NEED_TIME";
- } else if (NEED_YIELD == state) {
- return "NEED_YIELD";
- } else {
- verify(FAILURE == state);
- return "FAILURE";
+ switch (state) {
+ case PlanStage::ADVANCED:
+ return "ADVANCED";
+ case PlanStage::IS_EOF:
+ return "IS_EOF";
+ case PlanStage::NEED_TIME:
+ return "NEED_TIME";
+ case PlanStage::NEED_YIELD:
+ return "NEED_YIELD";
}
+ MONGO_UNREACHABLE;
}
@@ -199,13 +192,21 @@ public:
* Perform a unit of work on the query. Ask the stage to produce the next unit of output.
* Stage returns StageState::ADVANCED if *out is set to the next unit of output. Otherwise,
* returns another value of StageState to indicate the stage's status.
+ *
+ * Throws an exception if an error is encountered while executing the query.
*/
StageState work(WorkingSetID* out) {
auto optTimer(getOptTimer());
++_commonStats.works;
- StageState workResult = doWork(out);
+ StageState workResult;
+ try {
+ workResult = doWork(out);
+ } catch (...) {
+ _commonStats.failed = true;
+ throw;
+ }
if (StageState::ADVANCED == workResult) {
++_commonStats.advanced;
@@ -213,8 +214,6 @@ public:
++_commonStats.needTime;
} else if (StageState::NEED_YIELD == workResult) {
++_commonStats.needYield;
- } else if (StageState::FAILURE == workResult) {
- _commonStats.failed = true;
}
return workResult;
diff --git a/src/mongo/db/exec/projection.cpp b/src/mongo/db/exec/projection.cpp
index 45abe1ec4ce..4b205739032 100644
--- a/src/mongo/db/exec/projection.cpp
+++ b/src/mongo/db/exec/projection.cpp
@@ -139,20 +139,7 @@ PlanStage::StageState ProjectionStage::doWork(WorkingSetID* out) {
if (PlanStage::ADVANCED == status) {
WorkingSetMember* member = _ws.get(id);
// Punt to our specific projection impl.
- Status projStatus = transform(member);
- if (!projStatus.isOK()) {
- LOGV2_WARNING(23827,
- "Couldn't execute projection, status = {projStatus}",
- "projStatus"_attr = redact(projStatus));
- *out = WorkingSetCommon::allocateStatusMember(&_ws, projStatus);
- return PlanStage::FAILURE;
- }
-
- *out = id;
- } else if (PlanStage::FAILURE == status) {
- // The stage which produces a failure is responsible for allocating a working set member
- // with error details.
- invariant(WorkingSet::INVALID_ID != id);
+ transform(member);
*out = id;
} else if (PlanStage::NEED_YIELD == status) {
*out = id;
@@ -184,7 +171,7 @@ ProjectionStageDefault::ProjectionStageDefault(boost::intrusive_ptr<ExpressionCo
_executor{projection_executor::buildProjectionExecutor(
expCtx, projection, {}, projection_executor::kDefaultBuilderParams)} {}
-Status ProjectionStageDefault::transform(WorkingSetMember* member) const {
+void ProjectionStageDefault::transform(WorkingSetMember* member) const {
Document input;
// Most metadata should have already been stored within the WSM when we project out a document.
@@ -226,8 +213,6 @@ Status ProjectionStageDefault::transform(WorkingSetMember* member) const {
// constructed from the input one backed by BSON which is owned by the storage system, so we
// need to make sure we transition an owned document.
transitionMemberToOwnedObj(projected.getOwned(), member);
-
- return Status::OK();
}
ProjectionStageCovered::ProjectionStageCovered(ExpressionContext* expCtx,
@@ -265,7 +250,7 @@ ProjectionStageCovered::ProjectionStageCovered(ExpressionContext* expCtx,
}
}
-Status ProjectionStageCovered::transform(WorkingSetMember* member) const {
+void ProjectionStageCovered::transform(WorkingSetMember* member) const {
BSONObjBuilder bob;
// We're pulling data out of the key.
@@ -284,7 +269,6 @@ Status ProjectionStageCovered::transform(WorkingSetMember* member) const {
++keyIndex;
}
transitionMemberToOwnedObj(bob.obj(), member);
- return Status::OK();
}
ProjectionStageSimple::ProjectionStageSimple(ExpressionContext* expCtx,
@@ -298,7 +282,7 @@ ProjectionStageSimple::ProjectionStageSimple(ExpressionContext* expCtx,
projection->getRequiredFields().end()};
}
-Status ProjectionStageSimple::transform(WorkingSetMember* member) const {
+void ProjectionStageSimple::transform(WorkingSetMember* member) const {
BSONObjBuilder bob;
// SIMPLE_DOC implies that we expect an object so it's kind of redundant.
// If we got here because of SIMPLE_DOC the planner shouldn't have messed up.
@@ -320,7 +304,6 @@ Status ProjectionStageSimple::transform(WorkingSetMember* member) const {
}
transitionMemberToOwnedObj(bob.obj(), member);
- return Status::OK();
}
} // namespace mongo
diff --git a/src/mongo/db/exec/projection.h b/src/mongo/db/exec/projection.h
index 5a5d525cc0a..00e7fb33dbc 100644
--- a/src/mongo/db/exec/projection.h
+++ b/src/mongo/db/exec/projection.h
@@ -71,7 +71,7 @@ private:
* Runs either the default complete implementation or a fast path depending on how this was
* constructed.
*/
- virtual Status transform(WorkingSetMember* member) const = 0;
+ virtual void transform(WorkingSetMember* member) const = 0;
// Used to retrieve a WorkingSetMember as part of 'doWork()'.
WorkingSet& _ws;
@@ -99,7 +99,7 @@ public:
}
private:
- Status transform(WorkingSetMember* member) const final;
+ void transform(WorkingSetMember* member) const final;
// Represents all metadata used in the projection.
const QueryMetadataBitSet _requestedMetadata;
@@ -129,7 +129,7 @@ public:
}
private:
- Status transform(WorkingSetMember* member) const final;
+ void transform(WorkingSetMember* member) const final;
// Field names present in the simple projection.
FieldSet _includedFields;
@@ -167,7 +167,7 @@ public:
}
private:
- Status transform(WorkingSetMember* member) const final;
+ void transform(WorkingSetMember* member) const final;
// Has the field names present in the simple projection.
stdx::unordered_set<std::string> _includedFields;
diff --git a/src/mongo/db/exec/queued_data_stage.cpp b/src/mongo/db/exec/queued_data_stage.cpp
index c5f6339dfaa..8ecb541d627 100644
--- a/src/mongo/db/exec/queued_data_stage.cpp
+++ b/src/mongo/db/exec/queued_data_stage.cpp
@@ -49,29 +49,13 @@ PlanStage::StageState QueuedDataStage::doWork(WorkingSetID* out) {
return PlanStage::IS_EOF;
}
- StageState state = _results.front();
- _results.pop();
-
- switch (state) {
- case PlanStage::ADVANCED:
- *out = _members.front();
- _members.pop();
- break;
- case PlanStage::FAILURE:
- // On FAILURE, this stage is reponsible for allocating the WorkingSetMember with
- // the error details.
- *out = WorkingSetCommon::allocateStatusMember(
- _ws, Status(ErrorCodes::InternalError, "Queued data stage failure"));
- break;
- default:
- break;
- }
-
- return state;
+ *out = _members.front();
+ _members.pop();
+ return PlanStage::ADVANCED;
}
bool QueuedDataStage::isEOF() {
- return _results.empty();
+ return _members.empty();
}
unique_ptr<PlanStageStats> QueuedDataStage::getStats() {
@@ -87,15 +71,7 @@ const SpecificStats* QueuedDataStage::getSpecificStats() const {
return &_specificStats;
}
-void QueuedDataStage::pushBack(const PlanStage::StageState state) {
- invariant(PlanStage::ADVANCED != state);
- _results.push(state);
-}
-
void QueuedDataStage::pushBack(const WorkingSetID& id) {
- _results.push(PlanStage::ADVANCED);
-
- // member lives in _ws. We'll return it when _results hits ADVANCED.
_members.push(id);
}
diff --git a/src/mongo/db/exec/queued_data_stage.h b/src/mongo/db/exec/queued_data_stage.h
index b952062803e..bc13988cef0 100644
--- a/src/mongo/db/exec/queued_data_stage.h
+++ b/src/mongo/db/exec/queued_data_stage.h
@@ -69,17 +69,6 @@ public:
/**
* Add a result to the back of the queue.
*
- * Note: do not add PlanStage::ADVANCED with this method, ADVANCED can
- * only be added with a data member.
- *
- * Work() goes through the queue.
- * Either no data is returned (just a state), or...
- */
- void pushBack(const PlanStage::StageState state);
-
- /**
- * ...data is returned (and we ADVANCED)
- *
* The caller is responsible for allocating 'id' and filling out the WSM keyed by 'id'
* appropriately.
*
@@ -95,7 +84,6 @@ private:
WorkingSet* _ws;
// The data we return.
- std::queue<PlanStage::StageState> _results;
std::queue<WorkingSetID> _members;
// Stats
diff --git a/src/mongo/db/exec/queued_data_stage_test.cpp b/src/mongo/db/exec/queued_data_stage_test.cpp
index 46ef8d371e2..264e57b387d 100644
--- a/src/mongo/db/exec/queued_data_stage_test.cpp
+++ b/src/mongo/db/exec/queued_data_stage_test.cpp
@@ -85,7 +85,7 @@ TEST_F(QueuedDataStageTest, getValidStats) {
//
// Test that our stats are updated as we perform operations.
//
-TEST_F(QueuedDataStageTest, validateStats) {
+TEST_F(QueuedDataStageTest, ValidateStats) {
WorkingSet ws;
WorkingSetID wsID;
auto expCtx = make_intrusive<ExpressionContext>(opCtx(), nullptr, kNss);
@@ -100,18 +100,11 @@ TEST_F(QueuedDataStageTest, validateStats) {
ASSERT_EQUALS(stats->advanced, 0U);
ASSERT_FALSE(stats->isEOF);
- // 'perform' some operations, validate stats
- // needTime
- mock->pushBack(PlanStage::NEED_TIME);
- mock->work(&wsID);
- ASSERT_EQUALS(stats->works, 1U);
- ASSERT_EQUALS(stats->needTime, 1U);
-
// advanced, with pushed data
WorkingSetID id = ws.allocate();
mock->pushBack(id);
mock->work(&wsID);
- ASSERT_EQUALS(stats->works, 2U);
+ ASSERT_EQUALS(stats->works, 1U);
ASSERT_EQUALS(stats->advanced, 1U);
// yields
diff --git a/src/mongo/db/exec/return_key.cpp b/src/mongo/db/exec/return_key.cpp
index 8691f336b0e..ed8c0a4d3b2 100644
--- a/src/mongo/db/exec/return_key.cpp
+++ b/src/mongo/db/exec/return_key.cpp
@@ -45,22 +45,7 @@ PlanStage::StageState ReturnKeyStage::doWork(WorkingSetID* out) {
if (PlanStage::ADVANCED == status) {
WorkingSetMember* member = _ws.get(id);
- Status indexKeyStatus = _extractIndexKey(member);
-
- if (!indexKeyStatus.isOK()) {
- LOGV2_WARNING(4615602,
- "Couldn't execute {stage}, status = {indexKeyStatus}",
- "stage"_attr = kStageName,
- "indexKeyStatus"_attr = redact(indexKeyStatus));
- *out = WorkingSetCommon::allocateStatusMember(&_ws, indexKeyStatus);
- return PlanStage::FAILURE;
- }
-
- *out = id;
- } else if (PlanStage::FAILURE == status) {
- // The stage which produces a failure is responsible for allocating a working set member
- // with error details.
- invariant(WorkingSet::INVALID_ID != id);
+ _extractIndexKey(member);
*out = id;
} else if (PlanStage::NEED_YIELD == status) {
*out = id;
@@ -78,7 +63,7 @@ std::unique_ptr<PlanStageStats> ReturnKeyStage::getStats() {
return ret;
}
-Status ReturnKeyStage::_extractIndexKey(WorkingSetMember* member) {
+void ReturnKeyStage::_extractIndexKey(WorkingSetMember* member) {
if (!_sortKeyMetaFields.empty()) {
invariant(member->metadata().hasSortKey());
}
@@ -107,7 +92,5 @@ Status ReturnKeyStage::_extractIndexKey(WorkingSetMember* member) {
member->recordId = {};
member->doc = {{}, md.freeze()};
member->transitionToOwnedObj();
-
- return Status::OK();
}
} // namespace mongo
diff --git a/src/mongo/db/exec/return_key.h b/src/mongo/db/exec/return_key.h
index b33a9cd0c31..2e6db5482c0 100644
--- a/src/mongo/db/exec/return_key.h
+++ b/src/mongo/db/exec/return_key.h
@@ -71,7 +71,7 @@ public:
}
private:
- Status _extractIndexKey(WorkingSetMember* member);
+ void _extractIndexKey(WorkingSetMember* member);
WorkingSet& _ws;
ReturnKeyStats _specificStats;
diff --git a/src/mongo/db/exec/skip.cpp b/src/mongo/db/exec/skip.cpp
index 94bb81153e4..d3d0fc48afd 100644
--- a/src/mongo/db/exec/skip.cpp
+++ b/src/mongo/db/exec/skip.cpp
@@ -72,12 +72,6 @@ PlanStage::StageState SkipStage::doWork(WorkingSetID* out) {
*out = id;
return PlanStage::ADVANCED;
- } else if (PlanStage::FAILURE == status) {
- // The stage which produces a failure is responsible for allocating a working set member
- // with error details.
- invariant(WorkingSet::INVALID_ID != id);
- *out = id;
- return status;
} else if (PlanStage::NEED_YIELD == status) {
*out = id;
}
diff --git a/src/mongo/db/exec/sort.cpp b/src/mongo/db/exec/sort.cpp
index 6b03db5b26f..aee2c1fa348 100644
--- a/src/mongo/db/exec/sort.cpp
+++ b/src/mongo/db/exec/sort.cpp
@@ -59,28 +59,13 @@ PlanStage::StageState SortStage::doWork(WorkingSetID* out) {
if (code == PlanStage::ADVANCED) {
// The plan must be structured such that a previous stage has attached the sort key
// metadata.
- try {
- spool(id);
- } catch (const AssertionException&) {
- // Propagate runtime errors using the FAILED status code.
- *out = WorkingSetCommon::allocateStatusMember(_ws, exceptionToStatus());
- return PlanStage::FAILURE;
- }
-
+ spool(id);
return PlanStage::NEED_TIME;
} else if (code == PlanStage::IS_EOF) {
// The child has returned all of its results. Record this fact so that subsequent calls
// to 'doWork()' will perform sorting and unspool the sorted results.
_populated = true;
-
- try {
- loadingDone();
- } catch (const AssertionException&) {
- // Propagate runtime errors using the FAILED status code.
- *out = WorkingSetCommon::allocateStatusMember(_ws, exceptionToStatus());
- return PlanStage::FAILURE;
- }
-
+ loadingDone();
return PlanStage::NEED_TIME;
} else {
*out = id;
diff --git a/src/mongo/db/exec/sort_key_generator.cpp b/src/mongo/db/exec/sort_key_generator.cpp
index e1ea1e9847c..b62a0c93d9e 100644
--- a/src/mongo/db/exec/sort_key_generator.cpp
+++ b/src/mongo/db/exec/sort_key_generator.cpp
@@ -67,16 +67,10 @@ PlanStage::StageState SortKeyGeneratorStage::doWork(WorkingSetID* out) {
if (stageState == PlanStage::ADVANCED) {
WorkingSetMember* member = _ws->get(*out);
- try {
- auto sortKey = _sortKeyGen.computeSortKey(*member);
-
- // Add the sort key to the WSM as metadata.
- member->metadata().setSortKey(std::move(sortKey), _sortKeyGen.isSingleElementKey());
- } catch (const DBException& computeSortKeyException) {
- *out = WorkingSetCommon::allocateStatusMember(_ws, computeSortKeyException.toStatus());
- return PlanStage::FAILURE;
- }
+ auto sortKey = _sortKeyGen.computeSortKey(*member);
+ // Add the sort key to the WSM as metadata.
+ member->metadata().setSortKey(std::move(sortKey), _sortKeyGen.isSingleElementKey());
return PlanStage::ADVANCED;
}
diff --git a/src/mongo/db/exec/stagedebug_cmd.cpp b/src/mongo/db/exec/stagedebug_cmd.cpp
index 8f4b837f5c0..6d1ffe1ea11 100644
--- a/src/mongo/db/exec/stagedebug_cmd.cpp
+++ b/src/mongo/db/exec/stagedebug_cmd.cpp
@@ -196,18 +196,6 @@ public:
}
resultBuilder.done();
-
- if (PlanExecutor::FAILURE == state) {
- LOGV2_ERROR(23795,
- "Plan executor error during StageDebug command: FAILURE, stats: "
- "{Explain_getWinningPlanStats_exec_get}",
- "Explain_getWinningPlanStats_exec_get"_attr =
- redact(Explain::getWinningPlanStats(exec.get())));
-
- uassertStatusOK(WorkingSetCommon::getMemberObjectStatus(obj).withContext(
- "Executor error during StageDebug command"));
- }
-
return true;
}
diff --git a/src/mongo/db/exec/text_match.cpp b/src/mongo/db/exec/text_match.cpp
index c0d15c0e2fb..67b5ccf762d 100644
--- a/src/mongo/db/exec/text_match.cpp
+++ b/src/mongo/db/exec/text_match.cpp
@@ -94,16 +94,6 @@ PlanStage::StageState TextMatchStage::doWork(WorkingSetID* out) {
++_specificStats.docsRejected;
stageState = PlanStage::NEED_TIME;
}
- } else if (stageState == PlanStage::FAILURE) {
- // If a stage fails, it may create a status WSM to indicate why it
- // failed, in which case '*out' is valid. If ID is invalid, we
- // create our own error message.
- if (WorkingSet::INVALID_ID == *out) {
- str::stream ss;
- ss << "TEXT_MATCH stage failed to read in results from child";
- Status status(ErrorCodes::InternalError, ss);
- *out = WorkingSetCommon::allocateStatusMember(_ws, status);
- }
}
return stageState;
diff --git a/src/mongo/db/exec/text_or.cpp b/src/mongo/db/exec/text_or.cpp
index 4f92025575a..75e90e278cc 100644
--- a/src/mongo/db/exec/text_or.cpp
+++ b/src/mongo/db/exec/text_or.cpp
@@ -197,19 +197,6 @@ PlanStage::StageState TextOrStage::readFromChildren(WorkingSetID* out) {
_internalState = State::kReturningResults;
return PlanStage::NEED_TIME;
- } else if (PlanStage::FAILURE == childState) {
- // If a stage fails, it may create a status WSM to indicate why it
- // failed, in which case 'id' is valid. If ID is invalid, we
- // create our own error message.
- if (WorkingSet::INVALID_ID == id) {
- str::stream ss;
- ss << "TEXT_OR stage failed to read in results from child";
- Status status(ErrorCodes::InternalError, ss);
- *out = WorkingSetCommon::allocateStatusMember(_ws, status);
- } else {
- *out = id;
- }
- return PlanStage::FAILURE;
} else {
// Propagate WSID from below.
*out = id;
diff --git a/src/mongo/db/exec/trial_stage.cpp b/src/mongo/db/exec/trial_stage.cpp
index d10632c4427..0b7ef73d4bb 100644
--- a/src/mongo/db/exec/trial_stage.cpp
+++ b/src/mongo/db/exec/trial_stage.cpp
@@ -141,18 +141,6 @@ PlanStage::StageState TrialStage::_workTrialPlan(WorkingSetID* out) {
_specificStats.trialCompleted = _specificStats.trialSucceeded = true;
_replaceCurrentPlan(_queuedData);
return NEED_TIME;
- case PlanStage::FAILURE:
- // Either of these cause us to immediately end the trial phase and switch to the backup.
- auto statusDoc = WorkingSetCommon::getStatusMemberDocument(*_ws, *out);
- BSONObj statusObj = statusDoc ? statusDoc->toBson() : BSONObj();
- LOGV2_DEBUG(20604,
- 1,
- "Trial plan failed; switching to backup plan. Status: {statusObj}",
- "statusObj"_attr = redact(statusObj));
- _specificStats.trialCompleted = true;
- _replaceCurrentPlan(_backupPlan);
- *out = WorkingSet::INVALID_ID;
- return NEED_TIME;
}
MONGO_UNREACHABLE;
diff --git a/src/mongo/db/exec/update_stage.cpp b/src/mongo/db/exec/update_stage.cpp
index 3a5b4399c86..f7ab7e84da6 100644
--- a/src/mongo/db/exec/update_stage.cpp
+++ b/src/mongo/db/exec/update_stage.cpp
@@ -587,17 +587,6 @@ PlanStage::StageState UpdateStage::doWork(WorkingSetID* out) {
} else if (PlanStage::IS_EOF == status) {
// The child is out of results, and therefore so are we.
return PlanStage::IS_EOF;
- } else if (PlanStage::FAILURE == status) {
- *out = id;
- // If a stage fails, it may create a status WSM to indicate why it failed, in which case
- // 'id' is valid. If ID is invalid, we create our own error message.
- if (WorkingSet::INVALID_ID == id) {
- const std::string errmsg = "update stage failed to read in results from child";
- *out = WorkingSetCommon::allocateStatusMember(
- _ws, Status(ErrorCodes::InternalError, errmsg));
- return PlanStage::FAILURE;
- }
- return status;
} else if (PlanStage::NEED_YIELD == status) {
*out = id;
}
diff --git a/src/mongo/db/exec/working_set_common.cpp b/src/mongo/db/exec/working_set_common.cpp
index 4025a1e6627..0a6d99724e3 100644
--- a/src/mongo/db/exec/working_set_common.cpp
+++ b/src/mongo/db/exec/working_set_common.cpp
@@ -162,77 +162,4 @@ bool WorkingSetCommon::fetch(OperationContext* opCtx,
return true;
}
-Document WorkingSetCommon::buildMemberStatusObject(const Status& status) {
- BSONObjBuilder bob;
- bob.append("ok", status.isOK() ? 1.0 : 0.0);
- bob.append("code", status.code());
- bob.append("errmsg", status.reason());
- if (auto extraInfo = status.extraInfo()) {
- extraInfo->serialize(&bob);
- }
-
- return Document{bob.obj()};
-}
-
-WorkingSetID WorkingSetCommon::allocateStatusMember(WorkingSet* ws, const Status& status) {
- invariant(ws);
-
- WorkingSetID wsid = ws->allocate();
- WorkingSetMember* member = ws->get(wsid);
- member->doc = {SnapshotId(), buildMemberStatusObject(status)};
- member->transitionToOwnedObj();
-
- return wsid;
-}
-
-bool WorkingSetCommon::isValidStatusMemberObject(const Document& obj) {
- return !obj["ok"].missing() && obj["code"].getType() == BSONType::NumberInt &&
- obj["errmsg"].getType() == BSONType::String;
-}
-
-bool WorkingSetCommon::isValidStatusMemberObject(const BSONObj& obj) {
- return isValidStatusMemberObject(Document{obj});
-}
-
-boost::optional<Document> WorkingSetCommon::getStatusMemberDocument(const WorkingSet& ws,
- WorkingSetID wsid) {
- if (WorkingSet::INVALID_ID == wsid) {
- return boost::none;
- }
- auto member = ws.get(wsid);
- if (!member->hasOwnedObj()) {
- return boost::none;
- }
-
- if (!isValidStatusMemberObject(member->doc.value())) {
- return boost::none;
- }
- return member->doc.value();
-}
-
-Status WorkingSetCommon::getMemberObjectStatus(const BSONObj& memberObj) {
- invariant(WorkingSetCommon::isValidStatusMemberObject(memberObj));
- return Status(ErrorCodes::Error(memberObj["code"].numberInt()),
- memberObj["errmsg"].valueStringData(),
- memberObj);
-}
-
-Status WorkingSetCommon::getMemberObjectStatus(const Document& doc) {
- return getMemberObjectStatus(doc.toBson());
-}
-
-Status WorkingSetCommon::getMemberStatus(const WorkingSetMember& member) {
- invariant(member.hasObj());
- return getMemberObjectStatus(member.doc.value().toBson());
-}
-
-std::string WorkingSetCommon::toStatusString(const BSONObj& obj) {
- Document doc{obj};
- if (!isValidStatusMemberObject(doc)) {
- Status unknownStatus(ErrorCodes::UnknownError, "no details available");
- return unknownStatus.toString();
- }
- return getMemberObjectStatus(doc).toString();
-}
-
} // namespace mongo
diff --git a/src/mongo/db/exec/working_set_common.h b/src/mongo/db/exec/working_set_common.h
index f62d861142f..b03b29a9682 100644
--- a/src/mongo/db/exec/working_set_common.h
+++ b/src/mongo/db/exec/working_set_common.h
@@ -35,8 +35,6 @@
namespace mongo {
-class CanonicalQuery;
-class Collection;
class OperationContext;
class SeekableRecordCursor;
@@ -56,54 +54,6 @@ public:
WorkingSetID id,
unowned_ptr<SeekableRecordCursor> cursor,
const NamespaceString& ns);
-
- /**
- * Build a Document which represents a Status to return in a WorkingSet.
- */
- static Document buildMemberStatusObject(const Status& status);
-
- /**
- * Allocate a new WSM and initialize it with
- * the code and reason from the status.
- * Owned BSON object will have the following layout:
- * {
- * ok: <ok>, // 1 for OK; 0 otherwise.
- * code: <code>, // Status::code()
- * errmsg: <errmsg> // Status::reason()
- * }
- */
- static WorkingSetID allocateStatusMember(WorkingSet* ws, const Status& status);
-
- /**
- * Returns true if object was created by allocateStatusMember().
- */
- static bool isValidStatusMemberObject(const Document& obj);
- static bool isValidStatusMemberObject(const BSONObj& obj);
-
- /**
- * If the working set member represents an error status, returns it as a Document (which can
- * subsequently be converted to Status). Otherwise returns boost::none.
- */
- static boost::optional<Document> getStatusMemberDocument(const WorkingSet& ws,
- WorkingSetID wsid);
-
- /**
- * Returns status from working set member object.
- * Assumes isValidStatusMemberObject().
- */
- static Status getMemberObjectStatus(const BSONObj& memberObj);
- static Status getMemberObjectStatus(const Document& memberObj);
-
- /**
- * Returns status from working set member created with allocateStatusMember().
- * Assumes isValidStatusMemberObject().
- */
- static Status getMemberStatus(const WorkingSetMember& member);
-
- /**
- * Formats working set member object created with allocateStatusMember().
- */
- static std::string toStatusString(const BSONObj& obj);
};
} // namespace mongo