summaryrefslogtreecommitdiff
path: root/src/mongo
diff options
context:
space:
mode:
authorPawel Terlecki <pawel.terlecki@mongodb.com>2019-01-29 14:25:20 -0500
committerPawel Terlecki <pawel.terlecki@mongodb.com>2019-01-30 12:59:42 -0500
commitb7df0530e35a23bc1139f22a84ff4ba8b7688b4a (patch)
treeb8d3967252d3b207ee1925227cfd31266adf2130 /src/mongo
parentd1771e696b6df883af70eedaaa0733548c573fec (diff)
downloadmongo-b7df0530e35a23bc1139f22a84ff4ba8b7688b4a.tar.gz
SERVER-38316 Consolidate PlanExecutor::DEAD and PlanExecutor::FAILURE
Replaced PlanStage::DEAD with PlanStage::FAILURE. In the subsequent commit, PlanExecutor::DEAD will be taken care of in the next commit
Diffstat (limited to 'src/mongo')
-rw-r--r--src/mongo/db/exec/and_hash.cpp6
-rw-r--r--src/mongo/db/exec/and_sorted.cpp2
-rw-r--r--src/mongo/db/exec/cached_plan.cpp11
-rw-r--r--src/mongo/db/exec/collection_scan.cpp2
-rw-r--r--src/mongo/db/exec/count.cpp2
-rw-r--r--src/mongo/db/exec/delete.cpp1
-rw-r--r--src/mongo/db/exec/fetch.cpp2
-rw-r--r--src/mongo/db/exec/limit.cpp2
-rw-r--r--src/mongo/db/exec/merge_sort.cpp2
-rw-r--r--src/mongo/db/exec/or.cpp2
-rw-r--r--src/mongo/db/exec/plan_stage.h6
-rw-r--r--src/mongo/db/exec/projection.cpp2
-rw-r--r--src/mongo/db/exec/queued_data_stage.cpp3
-rw-r--r--src/mongo/db/exec/skip.cpp2
-rw-r--r--src/mongo/db/exec/sort.cpp2
-rw-r--r--src/mongo/db/exec/trial_stage.cpp1
-rw-r--r--src/mongo/db/query/plan_executor_impl.cpp4
-rw-r--r--src/mongo/dbtests/query_stage_and.cpp25
-rw-r--r--src/mongo/dbtests/query_stage_cached_plan.cpp1
-rw-r--r--src/mongo/dbtests/query_stage_count.cpp1
-rw-r--r--src/mongo/dbtests/query_stage_distinct.cpp1
-rw-r--r--src/mongo/dbtests/query_stage_ensure_sorted.cpp1
-rw-r--r--src/mongo/dbtests/query_stage_ixscan.cpp1
-rw-r--r--src/mongo/dbtests/query_stage_sort.cpp2
-rw-r--r--src/mongo/dbtests/query_stage_subplan.cpp1
-rw-r--r--src/mongo/dbtests/query_stage_trial.cpp8
26 files changed, 32 insertions, 61 deletions
diff --git a/src/mongo/db/exec/and_hash.cpp b/src/mongo/db/exec/and_hash.cpp
index fdce9a79066..07c531bc105 100644
--- a/src/mongo/db/exec/and_hash.cpp
+++ b/src/mongo/db/exec/and_hash.cpp
@@ -141,7 +141,7 @@ PlanStage::StageState AndHashStage::doWork(WorkingSetID* out) {
// yield.
_ws->get(_lookAheadResults[i])->makeObjOwnedIfNeeded();
break; // Stop looking at this child.
- } else if (PlanStage::FAILURE == childStatus || PlanStage::DEAD == childStatus) {
+ } else if (PlanStage::FAILURE == childStatus) {
// The stage which produces a failure is responsible for allocating a working
// set member with error details.
invariant(WorkingSet::INVALID_ID != _lookAheadResults[i]);
@@ -280,7 +280,7 @@ PlanStage::StageState AndHashStage::readFirstChild(WorkingSetID* out) {
_specificStats.mapAfterChild.push_back(_dataMap.size());
return PlanStage::NEED_TIME;
- } else if (PlanStage::FAILURE == childStatus || PlanStage::DEAD == childStatus) {
+ } else if (PlanStage::FAILURE == childStatus) {
// The stage which produces a failure is responsible for allocating a working set member
// with error details.
invariant(WorkingSet::INVALID_ID != id);
@@ -365,7 +365,7 @@ PlanStage::StageState AndHashStage::hashOtherChildren(WorkingSetID* out) {
}
return PlanStage::NEED_TIME;
- } else if (PlanStage::FAILURE == childStatus || PlanStage::DEAD == childStatus) {
+ } else if (PlanStage::FAILURE == childStatus) {
// The stage which produces a failure is responsible for allocating a working set member
// with error details.
invariant(WorkingSet::INVALID_ID != id);
diff --git a/src/mongo/db/exec/and_sorted.cpp b/src/mongo/db/exec/and_sorted.cpp
index a9e57bf8ea7..ae16646c6cc 100644
--- a/src/mongo/db/exec/and_sorted.cpp
+++ b/src/mongo/db/exec/and_sorted.cpp
@@ -209,7 +209,7 @@ PlanStage::StageState AndSortedStage::moveTowardTargetRecordId(WorkingSetID* out
_isEOF = true;
_ws->free(_targetId);
return state;
- } else if (PlanStage::FAILURE == state || PlanStage::DEAD == state) {
+ } else if (PlanStage::FAILURE == state) {
// The stage which produces a failure is responsible for allocating a working set member
// with error details.
invariant(WorkingSet::INVALID_ID != id);
diff --git a/src/mongo/db/exec/cached_plan.cpp b/src/mongo/db/exec/cached_plan.cpp
index 5f348ae3533..ec7d1d3608f 100644
--- a/src/mongo/db/exec/cached_plan.cpp
+++ b/src/mongo/db/exec/cached_plan.cpp
@@ -148,17 +148,6 @@ Status CachedPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
const bool shouldCache = false;
return replan(yieldPolicy, shouldCache);
- } else if (PlanStage::DEAD == state) {
- BSONObj statusObj;
- invariant(WorkingSet::INVALID_ID != id);
- WorkingSetCommon::getStatusMemberObject(*_ws, id, &statusObj);
-
- LOG(1) << "Execution of cached plan failed: PlanStage died"
- << ", query: " << redact(_canonicalQuery->toStringShort())
- << " planSummary: " << Explain::getPlanSummary(child().get())
- << " status: " << redact(statusObj);
-
- return WorkingSetCommon::getMemberObjectStatus(statusObj);
} else {
invariant(PlanStage::NEED_TIME == state);
}
diff --git a/src/mongo/db/exec/collection_scan.cpp b/src/mongo/db/exec/collection_scan.cpp
index f507a24dba4..b789a39f442 100644
--- a/src/mongo/db/exec/collection_scan.cpp
+++ b/src/mongo/db/exec/collection_scan.cpp
@@ -122,7 +122,7 @@ PlanStage::StageState CollectionScan::doWork(WorkingSetID* out) {
<< "Last seen record id: "
<< _lastSeenId);
*out = WorkingSetCommon::allocateStatusMember(_workingSet, status);
- return PlanStage::DEAD;
+ return PlanStage::FAILURE;
}
}
diff --git a/src/mongo/db/exec/count.cpp b/src/mongo/db/exec/count.cpp
index 15d6ec5baff..3440e843780 100644
--- a/src/mongo/db/exec/count.cpp
+++ b/src/mongo/db/exec/count.cpp
@@ -82,7 +82,7 @@ PlanStage::StageState CountStage::doWork(WorkingSetID* out) {
if (PlanStage::IS_EOF == state) {
_commonStats.isEOF = true;
return PlanStage::IS_EOF;
- } else if (PlanStage::FAILURE == state || PlanStage::DEAD == state) {
+ } else if (PlanStage::FAILURE == state) {
// The stage which produces a failure is responsible for allocating a working set member
// with error details.
invariant(WorkingSet::INVALID_ID != id);
diff --git a/src/mongo/db/exec/delete.cpp b/src/mongo/db/exec/delete.cpp
index e71ec2adcaf..d59a16be27d 100644
--- a/src/mongo/db/exec/delete.cpp
+++ b/src/mongo/db/exec/delete.cpp
@@ -126,7 +126,6 @@ PlanStage::StageState DeleteStage::doWork(WorkingSetID* out) {
break;
case PlanStage::FAILURE:
- case PlanStage::DEAD:
// The stage which produces a failure is responsible for allocating a working set
// member with error details.
invariant(WorkingSet::INVALID_ID != id);
diff --git a/src/mongo/db/exec/fetch.cpp b/src/mongo/db/exec/fetch.cpp
index 1a41badc768..eccdd920abe 100644
--- a/src/mongo/db/exec/fetch.cpp
+++ b/src/mongo/db/exec/fetch.cpp
@@ -119,7 +119,7 @@ PlanStage::StageState FetchStage::doWork(WorkingSetID* out) {
}
return returnIfMatches(member, id, out);
- } else if (PlanStage::FAILURE == status || PlanStage::DEAD == status) {
+ } else if (PlanStage::FAILURE == status) {
// The stage which produces a failure is responsible for allocating a working set member
// with error details.
invariant(WorkingSet::INVALID_ID != id);
diff --git a/src/mongo/db/exec/limit.cpp b/src/mongo/db/exec/limit.cpp
index 2f1466752f0..506b99bba2d 100644
--- a/src/mongo/db/exec/limit.cpp
+++ b/src/mongo/db/exec/limit.cpp
@@ -68,7 +68,7 @@ PlanStage::StageState LimitStage::doWork(WorkingSetID* out) {
if (PlanStage::ADVANCED == status) {
*out = id;
--_numToReturn;
- } else if (PlanStage::FAILURE == status || PlanStage::DEAD == status) {
+ } else if (PlanStage::FAILURE == status) {
// The stage which produces a failure is responsible for allocating a working set member
// with error details.
invariant(WorkingSet::INVALID_ID != id);
diff --git a/src/mongo/db/exec/merge_sort.cpp b/src/mongo/db/exec/merge_sort.cpp
index 79c37bb9449..9e1f3877d9e 100644
--- a/src/mongo/db/exec/merge_sort.cpp
+++ b/src/mongo/db/exec/merge_sort.cpp
@@ -131,7 +131,7 @@ PlanStage::StageState MergeSortStage::doWork(WorkingSetID* out) {
// anymore.
_noResultToMerge.pop();
return PlanStage::NEED_TIME;
- } else if (PlanStage::FAILURE == code || PlanStage::DEAD == code) {
+ } else if (PlanStage::FAILURE == code) {
// The stage which produces a failure is responsible for allocating a working set member
// with error details.
invariant(WorkingSet::INVALID_ID != id);
diff --git a/src/mongo/db/exec/or.cpp b/src/mongo/db/exec/or.cpp
index d98ae0228fd..cd837084f59 100644
--- a/src/mongo/db/exec/or.cpp
+++ b/src/mongo/db/exec/or.cpp
@@ -108,7 +108,7 @@ PlanStage::StageState OrStage::doWork(WorkingSetID* out) {
} else {
return PlanStage::NEED_TIME;
}
- } else if (PlanStage::FAILURE == childStatus || PlanStage::DEAD == childStatus) {
+ } else if (PlanStage::FAILURE == childStatus) {
// The stage which produces a failure is responsible for allocating a working set member
// with error details.
invariant(WorkingSet::INVALID_ID != id);
diff --git a/src/mongo/db/exec/plan_stage.h b/src/mongo/db/exec/plan_stage.h
index 7aa5f725f85..37e8d471780 100644
--- a/src/mongo/db/exec/plan_stage.h
+++ b/src/mongo/db/exec/plan_stage.h
@@ -163,10 +163,6 @@ public:
// on the WSM that the held WSID refers to.
NEED_YIELD,
- // Something went wrong but it's not an internal error. Perhaps our collection was
- // dropped or state deleted.
- DEAD,
-
// Something has gone unrecoverably wrong. Stop running this query.
// If the out parameter does not refer to an invalid working set member,
// call WorkingSetCommon::getStatusMemberObject() to get details on the failure.
@@ -184,8 +180,6 @@ public:
return "NEED_TIME";
} else if (NEED_YIELD == state) {
return "NEED_YIELD";
- } else if (DEAD == state) {
- return "DEAD";
} else {
verify(FAILURE == state);
return "FAILURE";
diff --git a/src/mongo/db/exec/projection.cpp b/src/mongo/db/exec/projection.cpp
index 0a559fb2a1c..38b32564798 100644
--- a/src/mongo/db/exec/projection.cpp
+++ b/src/mongo/db/exec/projection.cpp
@@ -170,7 +170,7 @@ PlanStage::StageState ProjectionStage::doWork(WorkingSetID* out) {
}
*out = id;
- } else if (PlanStage::FAILURE == status || PlanStage::DEAD == status) {
+ } else if (PlanStage::FAILURE == status) {
// The stage which produces a failure is responsible for allocating a working set member
// with error details.
invariant(WorkingSet::INVALID_ID != id);
diff --git a/src/mongo/db/exec/queued_data_stage.cpp b/src/mongo/db/exec/queued_data_stage.cpp
index 86fba94a4fb..30b07c51af4 100644
--- a/src/mongo/db/exec/queued_data_stage.cpp
+++ b/src/mongo/db/exec/queued_data_stage.cpp
@@ -58,9 +58,8 @@ PlanStage::StageState QueuedDataStage::doWork(WorkingSetID* out) {
*out = _members.front();
_members.pop();
break;
- case PlanStage::DEAD:
case PlanStage::FAILURE:
- // On DEAD or FAILURE, this stage is reponsible for allocating the WorkingSetMember with
+ // On FAILURE, this stage is reponsible for allocating the WorkingSetMember with
// the error details.
*out = WorkingSetCommon::allocateStatusMember(
_ws, Status(ErrorCodes::InternalError, "Queued data stage failure"));
diff --git a/src/mongo/db/exec/skip.cpp b/src/mongo/db/exec/skip.cpp
index f17fdee7b81..f2c38003098 100644
--- a/src/mongo/db/exec/skip.cpp
+++ b/src/mongo/db/exec/skip.cpp
@@ -69,7 +69,7 @@ PlanStage::StageState SkipStage::doWork(WorkingSetID* out) {
*out = id;
return PlanStage::ADVANCED;
- } else if (PlanStage::FAILURE == status || PlanStage::DEAD == status) {
+ } else if (PlanStage::FAILURE == status) {
// The stage which produces a failure is responsible for allocating a working set member
// with error details.
invariant(WorkingSet::INVALID_ID != id);
diff --git a/src/mongo/db/exec/sort.cpp b/src/mongo/db/exec/sort.cpp
index 918f2a873a1..6ddf5b4078e 100644
--- a/src/mongo/db/exec/sort.cpp
+++ b/src/mongo/db/exec/sort.cpp
@@ -151,7 +151,7 @@ PlanStage::StageState SortStage::doWork(WorkingSetID* out) {
_resultIterator = _data.begin();
_sorted = true;
return PlanStage::NEED_TIME;
- } else if (PlanStage::FAILURE == code || PlanStage::DEAD == code) {
+ } else if (PlanStage::FAILURE == code) {
// The stage which produces a failure is responsible for allocating a working set member
// with error details.
invariant(WorkingSet::INVALID_ID != id);
diff --git a/src/mongo/db/exec/trial_stage.cpp b/src/mongo/db/exec/trial_stage.cpp
index 8053fd636df..a126d5c2bc7 100644
--- a/src/mongo/db/exec/trial_stage.cpp
+++ b/src/mongo/db/exec/trial_stage.cpp
@@ -142,7 +142,6 @@ PlanStage::StageState TrialStage::_workTrialPlan(WorkingSetID* out) {
_replaceCurrentPlan(_queuedData);
return NEED_TIME;
case PlanStage::FAILURE:
- case PlanStage::DEAD:
// Either of these cause us to immediately end the trial phase and switch to the backup.
BSONObj statusObj;
WorkingSetCommon::getStatusMemberObject(*_ws, *out, &statusObj);
diff --git a/src/mongo/db/query/plan_executor_impl.cpp b/src/mongo/db/query/plan_executor_impl.cpp
index 2b5aad16259..d9641bc540c 100644
--- a/src/mongo/db/query/plan_executor_impl.cpp
+++ b/src/mongo/db/query/plan_executor_impl.cpp
@@ -626,7 +626,7 @@ PlanExecutor::ExecState PlanExecutorImpl::_getNextImpl(Snapshotted<BSONObj>* obj
}
return waitResult;
} else {
- invariant(PlanStage::DEAD == code || PlanStage::FAILURE == code);
+ invariant(PlanStage::FAILURE == code);
if (NULL != objOut) {
BSONObj statusObj;
@@ -635,7 +635,7 @@ PlanExecutor::ExecState PlanExecutorImpl::_getNextImpl(Snapshotted<BSONObj>* obj
*objOut = Snapshotted<BSONObj>(SnapshotId(), statusObj);
}
- return (PlanStage::DEAD == code) ? PlanExecutor::DEAD : PlanExecutor::FAILURE;
+ return PlanExecutor::FAILURE;
}
}
}
diff --git a/src/mongo/dbtests/query_stage_and.cpp b/src/mongo/dbtests/query_stage_and.cpp
index e3478d4bf37..bb502de5705 100644
--- a/src/mongo/dbtests/query_stage_and.cpp
+++ b/src/mongo/dbtests/query_stage_and.cpp
@@ -142,7 +142,6 @@ public:
// We shouldn't fail or be dead.
ASSERT(PlanStage::FAILURE != status);
- ASSERT(PlanStage::DEAD != status);
if (PlanStage::ADVANCED != status) {
continue;
@@ -805,9 +804,9 @@ public:
const BSONObj dataObj = fromjson("{'foo': 'bar'}");
- // Confirm PlanStage::DEAD when children contain the following WorkingSetMembers:
+ // Confirm PlanStage::FAILURE when children contain the following WorkingSetMembers:
// Child1: Data
- // Child2: NEED_TIME, DEAD
+ // Child2: NEED_TIME, FAILURE
{
WorkingSet ws;
const auto andHashStage = make_unique<AndHashStage>(&_opCtx, &ws);
@@ -824,7 +823,7 @@ public:
auto childStage2 = make_unique<QueuedDataStage>(&_opCtx, &ws);
childStage2->pushBack(PlanStage::NEED_TIME);
- childStage2->pushBack(PlanStage::DEAD);
+ childStage2->pushBack(PlanStage::FAILURE);
andHashStage->addChild(childStage1.release());
andHashStage->addChild(childStage2.release());
@@ -835,11 +834,11 @@ public:
state = andHashStage->work(&id);
}
- ASSERT_EQ(PlanStage::DEAD, state);
+ ASSERT_EQ(PlanStage::FAILURE, state);
}
- // Confirm PlanStage::DEAD when children contain the following WorkingSetMembers:
- // Child1: Data, DEAD
+ // Confirm PlanStage::FAILURE when children contain the following WorkingSetMembers:
+ // Child1: Data, FAILURE
// Child2: Data
{
WorkingSet ws;
@@ -855,7 +854,7 @@ public:
ws.transitionToRecordIdAndObj(id);
childStage1->pushBack(id);
}
- childStage1->pushBack(PlanStage::DEAD);
+ childStage1->pushBack(PlanStage::FAILURE);
auto childStage2 = make_unique<QueuedDataStage>(&_opCtx, &ws);
{
@@ -876,12 +875,12 @@ public:
state = andHashStage->work(&id);
}
- ASSERT_EQ(PlanStage::DEAD, state);
+ ASSERT_EQ(PlanStage::FAILURE, state);
}
- // Confirm PlanStage::DEAD when children contain the following WorkingSetMembers:
+ // Confirm PlanStage::FAILURE when children contain the following WorkingSetMembers:
// Child1: Data
- // Child2: Data, DEAD
+ // Child2: Data, FAILURE
{
WorkingSet ws;
const auto andHashStage = make_unique<AndHashStage>(&_opCtx, &ws);
@@ -905,7 +904,7 @@ public:
ws.transitionToRecordIdAndObj(id);
childStage2->pushBack(id);
}
- childStage2->pushBack(PlanStage::DEAD);
+ childStage2->pushBack(PlanStage::FAILURE);
andHashStage->addChild(childStage1.release());
andHashStage->addChild(childStage2.release());
@@ -916,7 +915,7 @@ public:
state = andHashStage->work(&id);
}
- ASSERT_EQ(PlanStage::DEAD, state);
+ ASSERT_EQ(PlanStage::FAILURE, state);
}
}
};
diff --git a/src/mongo/dbtests/query_stage_cached_plan.cpp b/src/mongo/dbtests/query_stage_cached_plan.cpp
index bd2dc068672..94e20f10f98 100644
--- a/src/mongo/dbtests/query_stage_cached_plan.cpp
+++ b/src/mongo/dbtests/query_stage_cached_plan.cpp
@@ -132,7 +132,6 @@ public:
state = cachedPlanStage->work(&id);
ASSERT_NE(state, PlanStage::FAILURE);
- ASSERT_NE(state, PlanStage::DEAD);
if (state == PlanStage::ADVANCED) {
WorkingSetMember* member = ws.get(id);
diff --git a/src/mongo/dbtests/query_stage_count.cpp b/src/mongo/dbtests/query_stage_count.cpp
index 9438576b614..de40aa79b84 100644
--- a/src/mongo/dbtests/query_stage_count.cpp
+++ b/src/mongo/dbtests/query_stage_count.cpp
@@ -180,7 +180,6 @@ public:
// do some work -- assumes that one work unit counts a single doc
PlanStage::StageState state = count_stage.work(&wsid);
ASSERT_NOT_EQUALS(state, PlanStage::FAILURE);
- ASSERT_NOT_EQUALS(state, PlanStage::DEAD);
// prepare for yield
count_stage.saveState();
diff --git a/src/mongo/dbtests/query_stage_distinct.cpp b/src/mongo/dbtests/query_stage_distinct.cpp
index 32fb05dbcde..76eed91d300 100644
--- a/src/mongo/dbtests/query_stage_distinct.cpp
+++ b/src/mongo/dbtests/query_stage_distinct.cpp
@@ -289,7 +289,6 @@ public:
while (PlanStage::IS_EOF != (state = distinct.work(&wsid))) {
ASSERT_NE(PlanStage::FAILURE, state);
- ASSERT_NE(PlanStage::DEAD, state);
if (PlanStage::ADVANCED == state) {
seen.push_back(getIntFieldDotted(ws, wsid, "b"));
}
diff --git a/src/mongo/dbtests/query_stage_ensure_sorted.cpp b/src/mongo/dbtests/query_stage_ensure_sorted.cpp
index 3093a053ce2..3351b1f3eab 100644
--- a/src/mongo/dbtests/query_stage_ensure_sorted.cpp
+++ b/src/mongo/dbtests/query_stage_ensure_sorted.cpp
@@ -92,7 +92,6 @@ public:
BSONArrayBuilder arr(bob.subarrayStart("output"));
while (state != PlanStage::IS_EOF) {
state = ess.work(&id);
- ASSERT_NE(state, PlanStage::DEAD);
ASSERT_NE(state, PlanStage::FAILURE);
if (state == PlanStage::ADVANCED) {
WorkingSetMember* member = ws.get(id);
diff --git a/src/mongo/dbtests/query_stage_ixscan.cpp b/src/mongo/dbtests/query_stage_ixscan.cpp
index c6f87f48169..0f8e3b16391 100644
--- a/src/mongo/dbtests/query_stage_ixscan.cpp
+++ b/src/mongo/dbtests/query_stage_ixscan.cpp
@@ -88,7 +88,6 @@ public:
// There are certain states we shouldn't get.
ASSERT_NE(PlanStage::IS_EOF, state);
- ASSERT_NE(PlanStage::DEAD, state);
ASSERT_NE(PlanStage::FAILURE, state);
}
diff --git a/src/mongo/dbtests/query_stage_sort.cpp b/src/mongo/dbtests/query_stage_sort.cpp
index fea7abfc6a0..1acecaf4316 100644
--- a/src/mongo/dbtests/query_stage_sort.cpp
+++ b/src/mongo/dbtests/query_stage_sort.cpp
@@ -395,7 +395,6 @@ public:
PlanStage::StageState status = ss->work(&id);
if (PlanStage::ADVANCED != status) {
ASSERT_NE(status, PlanStage::FAILURE);
- ASSERT_NE(status, PlanStage::DEAD);
continue;
}
WorkingSetMember* member = exec->getWorkingSet()->get(id);
@@ -489,7 +488,6 @@ public:
PlanStage::StageState status = ss->work(&id);
if (PlanStage::ADVANCED != status) {
ASSERT_NE(status, PlanStage::FAILURE);
- ASSERT_NE(status, PlanStage::DEAD);
continue;
}
WorkingSetMember* member = exec->getWorkingSet()->get(id);
diff --git a/src/mongo/dbtests/query_stage_subplan.cpp b/src/mongo/dbtests/query_stage_subplan.cpp
index 4b550ed7b9c..e8a119c3543 100644
--- a/src/mongo/dbtests/query_stage_subplan.cpp
+++ b/src/mongo/dbtests/query_stage_subplan.cpp
@@ -502,7 +502,6 @@ TEST_F(QueryStageSubplanTest, QueryStageSubplanPlanRootedOrNE) {
while (stageState != PlanStage::IS_EOF) {
WorkingSetID id = WorkingSet::INVALID_ID;
stageState = subplan->work(&id);
- ASSERT_NE(stageState, PlanStage::DEAD);
ASSERT_NE(stageState, PlanStage::FAILURE);
if (stageState == PlanStage::ADVANCED) {
++numResults;
diff --git a/src/mongo/dbtests/query_stage_trial.cpp b/src/mongo/dbtests/query_stage_trial.cpp
index d83380c9e51..aa5cdf6ff08 100644
--- a/src/mongo/dbtests/query_stage_trial.cpp
+++ b/src/mongo/dbtests/query_stage_trial.cpp
@@ -208,9 +208,9 @@ TEST_F(TrialStageTest, AdoptsBackupPlanIfTrialPlanDies) {
auto trialPlan = std::make_unique<QueuedDataStage>(opCtx(), ws());
auto backupPlan = std::make_unique<QueuedDataStage>(opCtx(), ws());
- // Seed the trial plan with 2 results followed by a PlanStage::DEAD.
+ // Seed the trial plan with 2 results followed by a PlanStage::FAILURE.
queueData({BSON("_id" << 0), BSON("_id" << 1)}, trialPlan.get());
- trialPlan->pushBack(PlanStage::DEAD);
+ trialPlan->pushBack(PlanStage::FAILURE);
// Seed the backup plan with 20 different results, so that we can validate that we see the
// correct dataset once the trial phase is complete.
@@ -220,8 +220,8 @@ TEST_F(TrialStageTest, AdoptsBackupPlanIfTrialPlanDies) {
}
queueData(backupResults, backupPlan.get());
- // We schedule the trial to run for 10 works. Because we will encounter a PlanStage::DEAD before
- // this point, the trial will complete early and the backup plan will be adopted.
+ // We schedule the trial to run for 10 works. Because we will encounter a PlanStage::FAILURE
+ // before this point, the trial will complete early and the backup plan will be adopted.
auto trialStage = std::make_unique<TrialStage>(
opCtx(), ws(), std::move(trialPlan), std::move(backupPlan), 10, 0.75);