From bd6d9a07a669891f18d0fb6a0f9b1535a6f13524 Mon Sep 17 00:00:00 2001 From: Pawel Terlecki Date: Fri, 1 Feb 2019 18:43:03 -0500 Subject: SERVER-38316 Consolidate PlanExecutor::DEAD and PlanExecutor::FAILURE Takes care of PlanExecutor states. In some cases we log more information for former DEAD state scenarios now. --- src/mongo/db/catalog/capped_utils.cpp | 7 +------ src/mongo/db/commands/dbcommands.cpp | 2 +- src/mongo/db/commands/dbcommands_d.cpp | 2 +- src/mongo/db/commands/distinct.cpp | 2 +- src/mongo/db/commands/find_and_modify.cpp | 2 +- src/mongo/db/commands/find_cmd.cpp | 2 +- src/mongo/db/commands/getmore_cmd.cpp | 10 ++++----- src/mongo/db/commands/mr.cpp | 2 +- src/mongo/db/exec/multi_plan.cpp | 11 +++++----- src/mongo/db/exec/stagedebug_cmd.cpp | 7 +++---- src/mongo/db/pipeline/document_source_cursor.cpp | 3 +-- src/mongo/db/query/find.cpp | 11 +++++----- src/mongo/db/query/plan_executor.h | 24 +++++++--------------- src/mongo/db/query/plan_executor_impl.cpp | 10 ++++----- src/mongo/db/query/plan_executor_impl.h | 6 +++--- src/mongo/db/repl/rs_rollback.cpp | 9 ++++---- src/mongo/db/repl/storage_interface_impl.cpp | 1 - src/mongo/db/s/check_sharding_index_command.cpp | 2 +- src/mongo/db/s/collection_range_deleter.cpp | 7 +++---- .../db/s/migration_chunk_cloner_source_legacy.cpp | 2 +- src/mongo/db/s/split_vector.cpp | 2 +- src/mongo/dbtests/query_plan_executor.cpp | 8 ++++---- src/mongo/dbtests/query_stage_and.cpp | 2 +- 23 files changed, 56 insertions(+), 78 deletions(-) diff --git a/src/mongo/db/catalog/capped_utils.cpp b/src/mongo/db/catalog/capped_utils.cpp index 6e34677f61d..cf244eddeb9 100644 --- a/src/mongo/db/catalog/capped_utils.cpp +++ b/src/mongo/db/catalog/capped_utils.cpp @@ -199,12 +199,7 @@ void mongo::cloneCollectionAsCapped(OperationContext* opCtx, break; } default: - // Unreachable as: - // 1) We require a read lock (at a minimum) on the "from" collection - // and won't yield, preventing collection drop and PlanExecutor::DEAD - // 2) PlanExecutor::FAILURE is only returned on PlanStage::FAILURE. The - // CollectionScan PlanStage does not have a FAILURE scenario. - // 3) All other PlanExecutor states are handled above + // A collection scan plan which does not yield should never fail. MONGO_UNREACHABLE; } diff --git a/src/mongo/db/commands/dbcommands.cpp b/src/mongo/db/commands/dbcommands.cpp index 2190cdc3910..6c8b86af81a 100644 --- a/src/mongo/db/commands/dbcommands.cpp +++ b/src/mongo/db/commands/dbcommands.cpp @@ -496,7 +496,7 @@ public: } } - if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) { + if (PlanExecutor::FAILURE == state) { warning() << "Internal error while reading " << ns; uassertStatusOK(WorkingSetCommon::getMemberObjectStatus(obj).withContext( "Executor error while reading during dataSize command")); diff --git a/src/mongo/db/commands/dbcommands_d.cpp b/src/mongo/db/commands/dbcommands_d.cpp index 5151c566467..ad0b2096050 100644 --- a/src/mongo/db/commands/dbcommands_d.cpp +++ b/src/mongo/db/commands/dbcommands_d.cpp @@ -320,7 +320,7 @@ public: exec->restoreState(); } - if (PlanExecutor::DEAD == state || PlanExecutor::FAILURE == state) { + if (PlanExecutor::FAILURE == state) { uassertStatusOK(WorkingSetCommon::getMemberObjectStatus(obj).withContext( "Executor error during filemd5 command")); } diff --git a/src/mongo/db/commands/distinct.cpp b/src/mongo/db/commands/distinct.cpp index e6c9227ee26..4d0f1dd70ea 100644 --- a/src/mongo/db/commands/distinct.cpp +++ b/src/mongo/db/commands/distinct.cpp @@ -242,7 +242,7 @@ public: } // Return an error if execution fails for any reason. - if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) { + if (PlanExecutor::FAILURE == state) { log() << "Plan executor error during distinct command: " << redact(PlanExecutor::statestr(state)) << ", stats: " << redact(Explain::getWinningPlanStats(executor.getValue().get())); diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp index b3920af779d..6de5962ad71 100644 --- a/src/mongo/db/commands/find_and_modify.cpp +++ b/src/mongo/db/commands/find_and_modify.cpp @@ -92,7 +92,7 @@ boost::optional advanceExecutor(OperationContext* opCtx, return {std::move(value)}; } - if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) { + if (PlanExecutor::FAILURE == state) { error() << "Plan executor error during findAndModify: " << PlanExecutor::statestr(state) << ", stats: " << redact(Explain::getWinningPlanStats(exec)); diff --git a/src/mongo/db/commands/find_cmd.cpp b/src/mongo/db/commands/find_cmd.cpp index ad5b9c4d1fd..377e7ccbe15 100644 --- a/src/mongo/db/commands/find_cmd.cpp +++ b/src/mongo/db/commands/find_cmd.cpp @@ -372,7 +372,7 @@ public: } // Throw an assertion if query execution fails for any reason. - if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) { + if (PlanExecutor::FAILURE == state) { firstBatch.abandon(); LOG(1) << "Plan executor error during find command: " << PlanExecutor::statestr(state) diff --git a/src/mongo/db/commands/getmore_cmd.cpp b/src/mongo/db/commands/getmore_cmd.cpp index b0a34a2bbf5..d3036bb6fe3 100644 --- a/src/mongo/db/commands/getmore_cmd.cpp +++ b/src/mongo/db/commands/getmore_cmd.cpp @@ -247,11 +247,11 @@ public: } switch (*state) { - case PlanExecutor::FAILURE: - // Log an error message and then perform the same cleanup as DEAD. - error() << "GetMore command executor error: " << PlanExecutor::statestr(*state) - << ", stats: " << redact(Explain::getWinningPlanStats(exec)); - case PlanExecutor::DEAD: { + case PlanExecutor::FAILURE: { + // Log an error message and then perform the cleanup. + error() << "GetMore command executor error: FAILURE, stats: " + << redact(Explain::getWinningPlanStats(exec)); + nextBatch->abandon(); // We should always have a valid status member object at this point. auto status = WorkingSetCommon::getMemberObjectStatus(obj); diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp index 716ffce1d7b..450fe0c53dc 100644 --- a/src/mongo/db/commands/mr.cpp +++ b/src/mongo/db/commands/mr.cpp @@ -1563,7 +1563,7 @@ public: break; } - if (PlanExecutor::DEAD == execState || PlanExecutor::FAILURE == execState) { + if (PlanExecutor::FAILURE == execState) { uasserted(ErrorCodes::OperationFailed, str::stream() << "Executor error during mapReduce command: " << WorkingSetCommon::toStatusString(o)); diff --git a/src/mongo/db/exec/multi_plan.cpp b/src/mongo/db/exec/multi_plan.cpp index c670dddc02a..2c5bed6c253 100644 --- a/src/mongo/db/exec/multi_plan.cpp +++ b/src/mongo/db/exec/multi_plan.cpp @@ -379,16 +379,17 @@ bool MultiPlanStage::workAllPlans(size_t numResults, PlanYieldPolicy* yieldPolic return false; } } else if (PlanStage::NEED_TIME != state) { - // FAILURE or DEAD. Do we want to just tank that plan and try the rest? We - // probably want to fail globally as this shouldn't happen anyway. + // On FAILURE, mark this candidate as failed, but keep executing the other + // candidates. The MultiPlanStage as a whole only fails when every candidate + // plan fails. candidate.failed = true; ++_failureCount; // Propagate most recent seen failure to parent. - if (PlanStage::FAILURE == state) { - _statusMemberId = id; - } + invariant(state == PlanStage::FAILURE); + _statusMemberId = id; + if (_failureCount == _candidates.size()) { _failure = true; diff --git a/src/mongo/db/exec/stagedebug_cmd.cpp b/src/mongo/db/exec/stagedebug_cmd.cpp index 12a5dd29745..fb3c5d8929f 100644 --- a/src/mongo/db/exec/stagedebug_cmd.cpp +++ b/src/mongo/db/exec/stagedebug_cmd.cpp @@ -207,10 +207,9 @@ public: resultBuilder.done(); - if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) { - error() << "Plan executor error during StageDebug command: " - << PlanExecutor::statestr(state) - << ", stats: " << redact(Explain::getWinningPlanStats(exec.get())); + if (PlanExecutor::FAILURE == state) { + error() << "Plan executor error during StageDebug command: FAILURE, stats: " + << redact(Explain::getWinningPlanStats(exec.get())); uassertStatusOK(WorkingSetCommon::getMemberObjectStatus(obj).withContext( "Executor error during StageDebug command")); diff --git a/src/mongo/db/pipeline/document_source_cursor.cpp b/src/mongo/db/pipeline/document_source_cursor.cpp index 978f62e4f2b..fe78bda703c 100644 --- a/src/mongo/db/pipeline/document_source_cursor.cpp +++ b/src/mongo/db/pipeline/document_source_cursor.cpp @@ -139,7 +139,7 @@ void DocumentSourceCursor::loadBatch() { // must hold a collection lock to destroy '_exec', but we can only assume that our locks are // still held if '_exec' did not end in an error. If '_exec' encountered an error during a // yield, the locks might be yielded. - if (state != PlanExecutor::DEAD && state != PlanExecutor::FAILURE) { + if (state != PlanExecutor::FAILURE) { cleanupExecutor(); } } @@ -148,7 +148,6 @@ void DocumentSourceCursor::loadBatch() { case PlanExecutor::ADVANCED: case PlanExecutor::IS_EOF: return; // We've reached our limit or exhausted the cursor. - case PlanExecutor::DEAD: case PlanExecutor::FAILURE: { _execStatus = WorkingSetCommon::getMemberObjectStatus(resultObj).withContext( "Error in $cursor stage"); diff --git a/src/mongo/db/query/find.cpp b/src/mongo/db/query/find.cpp index d6e4ce7a1e0..da3ff98fb0c 100644 --- a/src/mongo/db/query/find.cpp +++ b/src/mongo/db/query/find.cpp @@ -84,7 +84,7 @@ bool shouldSaveCursor(OperationContext* opCtx, const Collection* collection, PlanExecutor::ExecState finalState, PlanExecutor* exec) { - if (PlanExecutor::FAILURE == finalState || PlanExecutor::DEAD == finalState) { + if (PlanExecutor::FAILURE == finalState) { return false; } @@ -109,7 +109,7 @@ bool shouldSaveCursor(OperationContext* opCtx, bool shouldSaveCursorGetMore(PlanExecutor::ExecState finalState, PlanExecutor* exec, bool isTailable) { - if (PlanExecutor::FAILURE == finalState || PlanExecutor::DEAD == finalState) { + if (PlanExecutor::FAILURE == finalState) { return false; } @@ -199,11 +199,10 @@ void generateBatch(int ntoreturn, // Propagate any errors to the caller. switch (*state) { - // Log an error message and then perform the same cleanup as DEAD. - case PlanExecutor::FAILURE: + // Log an error message and then perform the cleanup. + case PlanExecutor::FAILURE: { error() << "getMore executor error, stats: " << redact(Explain::getWinningPlanStats(exec)); - case PlanExecutor::DEAD: { // We should always have a valid status object by this point. auto status = WorkingSetCommon::getMemberObjectStatus(obj); invariant(!status.isOK()); @@ -646,7 +645,7 @@ std::string runQuery(OperationContext* opCtx, } // Caller expects exceptions thrown in certain cases. - if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) { + if (PlanExecutor::FAILURE == state) { error() << "Plan executor error during find: " << PlanExecutor::statestr(state) << ", stats: " << redact(Explain::getWinningPlanStats(exec.get())); uassertStatusOKWithContext(WorkingSetCommon::getMemberObjectStatus(obj), diff --git a/src/mongo/db/query/plan_executor.h b/src/mongo/db/query/plan_executor.h index 3ecee6edb5a..aafafd6cc16 100644 --- a/src/mongo/db/query/plan_executor.h +++ b/src/mongo/db/query/plan_executor.h @@ -78,20 +78,9 @@ public: // We're EOF. We won't return any more results (edge case exception: capped+tailable). IS_EOF, - // The plan executor died, usually due to a concurrent catalog event such as a collection - // drop. - // - // If the underlying PlanStage has any information on the error, it will be available in - // the objOut parameter. Call WorkingSetCommon::toStatusString() to retrieve the error - // details from the output BSON object. - // - // The PlanExecutor is no longer capable of executing. The caller may extract stats from the - // underlying plan stages, but should not attempt to do anything else with the executor - // other than dispose() and destroy it. - DEAD, - // getNext() was asked for data it cannot provide, or the underlying PlanStage had an - // unrecoverable error. + // unrecoverable error, or the executor died, usually due to a concurrent catalog event + // such as a collection drop. // // If the underlying PlanStage has any information on the error, it will be available in // the objOut parameter. Call WorkingSetCommon::toStatusString() to retrieve the error @@ -110,7 +99,7 @@ public: enum YieldPolicy { // Any call to getNext() may yield. In particular, the executor may die on any call to // getNext() due to a required index or collection becoming invalid during yield. If this - // occurs, getNext() will produce an error during yield recovery and will return DEAD. + // occurs, getNext() will produce an error during yield recovery and will return FAILURE. // Additionally, this will handle all WriteConflictExceptions that occur while processing // the query. YIELD_AUTO, @@ -142,12 +131,12 @@ public: INTERRUPT_ONLY, // Used for testing, this yield policy will cause the PlanExecutor to time out on the first - // yield, returning DEAD with an error object encoding a ErrorCodes::ExceededTimeLimit + // yield, returning FAILURE with an error object encoding a ErrorCodes::ExceededTimeLimit // message. ALWAYS_TIME_OUT, // Used for testing, this yield policy will cause the PlanExecutor to be marked as killed on - // the first yield, returning DEAD with an error object encoding a + // the first yield, returning FAILURE with an error object encoding a // ErrorCodes::QueryPlanKilled message. ALWAYS_MARK_KILLED, }; @@ -393,7 +382,8 @@ public: /** * Notifies a PlanExecutor that it should die. Callers must specify the reason for why this - * executor is being killed. Subsequent calls to getNext() will return DEAD, and fill 'objOut' + * executor is being killed. Subsequent calls to getNext() will return FAILURE, and fill + * 'objOut' * with an error reflecting 'killStatus'. If this method is called multiple times, only the * first 'killStatus' will be retained. It is an error to call this method with Status::OK. */ diff --git a/src/mongo/db/query/plan_executor_impl.cpp b/src/mongo/db/query/plan_executor_impl.cpp index d9641bc540c..88b0da30c31 100644 --- a/src/mongo/db/query/plan_executor_impl.cpp +++ b/src/mongo/db/query/plan_executor_impl.cpp @@ -293,8 +293,6 @@ string PlanExecutor::statestr(ExecState s) { return "ADVANCED"; } else if (PlanExecutor::IS_EOF == s) { return "IS_EOF"; - } else if (PlanExecutor::DEAD == s) { - return "DEAD"; } else { verify(PlanExecutor::FAILURE == s); return "FAILURE"; @@ -493,7 +491,7 @@ PlanExecutor::ExecState PlanExecutorImpl::_waitForInserts(CappedInsertNotifierDa *errorObj = Snapshotted(SnapshotId(), WorkingSetCommon::buildMemberStatusObject(yieldResult)); } - return DEAD; + return FAILURE; } PlanExecutor::ExecState PlanExecutorImpl::_getNextImpl(Snapshotted* objOut, @@ -513,7 +511,7 @@ PlanExecutor::ExecState PlanExecutorImpl::_getNextImpl(Snapshotted* obj *objOut = Snapshotted(SnapshotId(), WorkingSetCommon::buildMemberStatusObject(_killStatus)); } - return PlanExecutor::DEAD; + return PlanExecutor::FAILURE; } if (!_stash.empty()) { @@ -547,7 +545,7 @@ PlanExecutor::ExecState PlanExecutorImpl::_getNextImpl(Snapshotted* obj *objOut = Snapshotted( SnapshotId(), WorkingSetCommon::buildMemberStatusObject(yieldStatus)); } - return PlanExecutor::DEAD; + return PlanExecutor::FAILURE; } } @@ -670,7 +668,7 @@ Status PlanExecutorImpl::executePlan() { state = this->getNext(&obj, NULL); } - if (PlanExecutor::DEAD == state || PlanExecutor::FAILURE == state) { + if (PlanExecutor::FAILURE == state) { if (isMarkedAsKilled()) { return _killStatus; } diff --git a/src/mongo/db/query/plan_executor_impl.h b/src/mongo/db/query/plan_executor_impl.h index 1fc8e52f94d..7688a3cdba3 100644 --- a/src/mongo/db/query/plan_executor_impl.h +++ b/src/mongo/db/query/plan_executor_impl.h @@ -133,9 +133,9 @@ private: /** * Yields locks and waits for inserts to the collection. Returns ADVANCED if there has been an - * insertion and there may be new results. Returns DEAD if the PlanExecutor was killed during a - * yield. This method is only to be used for tailable and awaitData cursors, so rather than - * returning DEAD if the operation has exceeded its time limit, we return IS_EOF to preserve + * insertion and there may be new results. Returns FAILURE if the PlanExecutor was killed during + * a yield. This method is only to be used for tailable and awaitData cursors, so rather than + * returning FAILURE if the operation has exceeded its time limit, we return IS_EOF to preserve * this PlanExecutor for future use. * * If an error is encountered and 'errorObj' is provided, it is populated with an object diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp index 12a2268a824..61f3c91b5df 100644 --- a/src/mongo/db/repl/rs_rollback.cpp +++ b/src/mongo/db/repl/rs_rollback.cpp @@ -763,11 +763,10 @@ void dropCollection(OperationContext* opCtx, } // If we exited the above for loop with any other execState than IS_EOF, this means that - // a FAILURE or DEAD state was returned. If a DEAD state occurred, the collection or - // database that we are attempting to save may no longer be valid. If a FAILURE state - // was returned, either an unrecoverable error was thrown by exec, or we attempted to - // retrieve data that could not be provided by the PlanExecutor. In both of these cases - // it is necessary for a full resync of the server. + // a FAILURE state was returned. If a FAILURE state was returned, either an unrecoverable + // error was thrown by exec, or we attempted to retrieve data that could not be provided + // by the PlanExecutor. In both of these cases it is necessary for a full resync of the + // server. if (execState != PlanExecutor::IS_EOF) { if (execState == PlanExecutor::FAILURE && diff --git a/src/mongo/db/repl/storage_interface_impl.cpp b/src/mongo/db/repl/storage_interface_impl.cpp index 28626a54c5e..b473d08aa62 100644 --- a/src/mongo/db/repl/storage_interface_impl.cpp +++ b/src/mongo/db/repl/storage_interface_impl.cpp @@ -702,7 +702,6 @@ StatusWith> _findOrDeleteDocuments( case PlanExecutor::IS_EOF: return Result(docs); case PlanExecutor::FAILURE: - case PlanExecutor::DEAD: return WorkingSetCommon::getMemberObjectStatus(out); default: MONGO_UNREACHABLE; diff --git a/src/mongo/db/s/check_sharding_index_command.cpp b/src/mongo/db/s/check_sharding_index_command.cpp index cf25be9b60e..6ff43a68805 100644 --- a/src/mongo/db/s/check_sharding_index_command.cpp +++ b/src/mongo/db/s/check_sharding_index_command.cpp @@ -202,7 +202,7 @@ public: } } - if (PlanExecutor::DEAD == state || PlanExecutor::FAILURE == state) { + if (PlanExecutor::FAILURE == state) { uassertStatusOK(WorkingSetCommon::getMemberObjectStatus(currKey).withContext( "Executor error while checking sharding index")); } diff --git a/src/mongo/db/s/collection_range_deleter.cpp b/src/mongo/db/s/collection_range_deleter.cpp index 4ba855355e8..42d9e5cca23 100644 --- a/src/mongo/db/s/collection_range_deleter.cpp +++ b/src/mongo/db/s/collection_range_deleter.cpp @@ -420,11 +420,10 @@ StatusWith CollectionRangeDeleter::_doDeletion(OperationContext* opCtx, break; } - if (state == PlanExecutor::FAILURE || state == PlanExecutor::DEAD) { + if (state == PlanExecutor::FAILURE) { warning() << PlanExecutor::statestr(state) << " - cursor error while trying to delete " - << redact(min) << " to " << redact(max) << " in " << nss << ": " - << redact(WorkingSetCommon::toStatusString(deletedObj)) - << ", stats: " << Explain::getWinningPlanStats(exec.get()); + << redact(min) << " to " << redact(max) << " in " << nss + << ": FAILURE, stats: " << Explain::getWinningPlanStats(exec.get()); break; } diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp index 716083aed25..04917e6aa75 100644 --- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp +++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp @@ -617,7 +617,7 @@ Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* opC } } - if (PlanExecutor::DEAD == state || PlanExecutor::FAILURE == state) { + if (PlanExecutor::FAILURE == state) { return WorkingSetCommon::getMemberObjectStatus(obj).withContext( "Executor error while scanning for documents belonging to chunk"); } diff --git a/src/mongo/db/s/split_vector.cpp b/src/mongo/db/s/split_vector.cpp index 3ec13d27614..35d82e1cc3e 100644 --- a/src/mongo/db/s/split_vector.cpp +++ b/src/mongo/db/s/split_vector.cpp @@ -215,7 +215,7 @@ StatusWith> splitVector(OperationContext* opCtx, state = exec->getNext(&currKey, NULL); } - if (PlanExecutor::DEAD == state || PlanExecutor::FAILURE == state) { + if (PlanExecutor::FAILURE == state) { return WorkingSetCommon::getMemberObjectStatus(currKey).withContext( "Executor error during splitVector command"); } diff --git a/src/mongo/dbtests/query_plan_executor.cpp b/src/mongo/dbtests/query_plan_executor.cpp index 12f1cda5a3f..faf37a9dd04 100644 --- a/src/mongo/dbtests/query_plan_executor.cpp +++ b/src/mongo/dbtests/query_plan_executor.cpp @@ -247,7 +247,7 @@ TEST_F(PlanExecutorTest, ShouldReportErrorIfExceedsTimeLimitDuringYield) { auto exec = makeCollScanExec(coll, filterObj, PlanExecutor::YieldPolicy::ALWAYS_TIME_OUT); BSONObj resultObj; - ASSERT_EQ(PlanExecutor::DEAD, exec->getNext(&resultObj, nullptr)); + ASSERT_EQ(PlanExecutor::FAILURE, exec->getNext(&resultObj, nullptr)); ASSERT_EQ(ErrorCodes::ExceededTimeLimit, WorkingSetCommon::getMemberObjectStatus(resultObj)); } @@ -265,7 +265,7 @@ TEST_F(PlanExecutorTest, ShouldReportErrorIfKilledDuringYieldButIsTailableAndAwa TailableModeEnum::kTailableAndAwaitData); BSONObj resultObj; - ASSERT_EQ(PlanExecutor::DEAD, exec->getNext(&resultObj, nullptr)); + ASSERT_EQ(PlanExecutor::FAILURE, exec->getNext(&resultObj, nullptr)); ASSERT_EQ(ErrorCodes::ExceededTimeLimit, WorkingSetCommon::getMemberObjectStatus(resultObj)); } @@ -281,7 +281,7 @@ TEST_F(PlanExecutorTest, ShouldNotSwallowExceedsTimeLimitDuringYieldButIsTailabl coll, filterObj, PlanExecutor::YieldPolicy::ALWAYS_TIME_OUT, TailableModeEnum::kTailable); BSONObj resultObj; - ASSERT_EQ(PlanExecutor::DEAD, exec->getNext(&resultObj, nullptr)); + ASSERT_EQ(PlanExecutor::FAILURE, exec->getNext(&resultObj, nullptr)); ASSERT_EQ(ErrorCodes::ExceededTimeLimit, WorkingSetCommon::getMemberObjectStatus(resultObj)); } @@ -296,7 +296,7 @@ TEST_F(PlanExecutorTest, ShouldReportErrorIfKilledDuringYield) { auto exec = makeCollScanExec(coll, filterObj, PlanExecutor::YieldPolicy::ALWAYS_MARK_KILLED); BSONObj resultObj; - ASSERT_EQ(PlanExecutor::DEAD, exec->getNext(&resultObj, nullptr)); + ASSERT_EQ(PlanExecutor::FAILURE, exec->getNext(&resultObj, nullptr)); ASSERT_EQ(ErrorCodes::QueryPlanKilled, WorkingSetCommon::getMemberObjectStatus(resultObj)); } diff --git a/src/mongo/dbtests/query_stage_and.cpp b/src/mongo/dbtests/query_stage_and.cpp index bb502de5705..bb1a3580797 100644 --- a/src/mongo/dbtests/query_stage_and.cpp +++ b/src/mongo/dbtests/query_stage_and.cpp @@ -132,7 +132,7 @@ public: /** * Gets the next result from 'stage'. * - * Fails if the stage fails or returns DEAD, if the returned working + * Fails if the stage fails or returns FAILURE, if the returned working * set member is not fetched, or if there are no more results. */ BSONObj getNext(PlanStage* stage, WorkingSet* ws) { -- cgit v1.2.1