summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCharlie Swanson <charlie.swanson@mongodb.com>2018-03-07 12:29:22 -0500
committerCharlie Swanson <charlie.swanson@mongodb.com>2018-03-15 16:53:24 -0400
commit332f23a46c9f28e2c60cac7c19a93abac0c8359c (patch)
tree5d8b688974ac237946535560cd8b21d23a243ea7
parentaa78106c9e096b74d1a2247878ca6bd06fb29bed (diff)
downloadmongo-332f23a46c9f28e2c60cac7c19a93abac0c8359c.tar.gz
SERVER-33542 Avoid swallowing error codes from PlanExecutor errors
(cherry picked from commit 3f723beb10caf6bd809b7df05c34b6a97402dadb)
-rw-r--r--jstests/core/max_time_ms.js18
-rw-r--r--jstests/noPassthrough/aggregation_cursor_invalidations.js2
-rw-r--r--jstests/noPassthrough/commands_preserve_exec_error_code.js57
-rw-r--r--src/mongo/client/dbclientcursor.cpp2
-rw-r--r--src/mongo/db/commands/dbcommands.cpp14
-rw-r--r--src/mongo/db/commands/distinct.cpp6
-rw-r--r--src/mongo/db/commands/find_and_modify.cpp13
-rw-r--r--src/mongo/db/commands/find_cmd.cpp6
-rw-r--r--src/mongo/db/commands/geo_near_cmd.cpp6
-rw-r--r--src/mongo/db/commands/getmore_cmd.cpp39
-rw-r--r--src/mongo/db/commands/group_cmd.cpp9
-rw-r--r--src/mongo/db/exec/stagedebug_cmd.cpp7
-rw-r--r--src/mongo/db/pipeline/document_source_cursor.cpp12
-rw-r--r--src/mongo/db/query/find.cpp27
-rw-r--r--src/mongo/db/query/plan_executor.cpp2
-rw-r--r--src/mongo/db/s/check_sharding_index_command.cpp8
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp5
-rw-r--r--src/mongo/db/s/split_vector.cpp5
-rw-r--r--src/mongo/dbtests/documentsourcetests.cpp6
19 files changed, 140 insertions, 104 deletions
diff --git a/jstests/core/max_time_ms.js b/jstests/core/max_time_ms.js
index f10ffb0bce1..9b9b11ed7d5 100644
--- a/jstests/core/max_time_ms.js
+++ b/jstests/core/max_time_ms.js
@@ -9,9 +9,9 @@
// ]
var t = db.max_time_ms;
-var exceededTimeLimit = 50; // ErrorCodes::ExceededTimeLimit
var cursor;
var res;
+var error;
//
// Simple positive test for query: a ~300ms query with a 100ms time limit should be aborted.
@@ -26,9 +26,10 @@ cursor = t.find({
}
});
cursor.maxTimeMS(100);
-assert.throws(function() {
+error = assert.throws(function() {
cursor.itcount();
}, [], "expected query to abort due to time limit");
+assert.eq(ErrorCodes.ExceededTimeLimit, error.code);
//
// Simple negative test for query: a ~300ms query with a 10s time limit should not hit the time
@@ -73,11 +74,12 @@ assert.doesNotThrow(function() {
cursor.next();
cursor.next();
}, [], "expected batch 1 (query) to not hit the time limit");
-assert.throws(function() {
+error = assert.throws(function() {
cursor.next();
cursor.next();
cursor.next();
}, [], "expected batch 2 (getmore) to abort due to time limit");
+assert.eq(ErrorCodes.ExceededTimeLimit, error.code);
//
// Simple negative test for getmore:
@@ -130,9 +132,10 @@ cursor = t.find({
}).sort({_id: 1});
cursor.batchSize(3);
cursor.maxTimeMS(6 * 1000);
-assert.throws(function() {
+error = assert.throws(function() {
cursor.itcount();
}, [], "expected find() to abort due to time limit");
+assert.eq(ErrorCodes.ExceededTimeLimit, error.code);
//
// Many-batch negative test for getmore:
@@ -164,7 +167,7 @@ assert.doesNotThrow(function() {
t.drop();
res = t.getDB().adminCommand({sleep: 1, millis: 300, maxTimeMS: 100});
-assert(res.ok == 0 && res.code == exceededTimeLimit,
+assert(res.ok == 0 && res.code == ErrorCodes.ExceededTimeLimit,
"expected sleep command to abort due to time limit, ok=" + res.ok + ", code=" + res.code);
//
@@ -292,7 +295,7 @@ t.drop();
assert.eq(
1, t.getDB().adminCommand({configureFailPoint: "maxTimeAlwaysTimeOut", mode: "alwaysOn"}).ok);
res = t.getDB().runCommand({ping: 1, maxTimeMS: 10 * 1000});
-assert(res.ok == 0 && res.code == exceededTimeLimit,
+assert(res.ok == 0 && res.code == ErrorCodes.ExceededTimeLimit,
"expected command to trigger maxTimeAlwaysTimeOut fail point, ok=" + res.ok + ", code=" +
res.code);
assert.eq(1, t.getDB().adminCommand({configureFailPoint: "maxTimeAlwaysTimeOut", mode: "off"}).ok);
@@ -402,9 +405,10 @@ assert.commandWorked(res);
var cursor = new DBCommandCursor(t.getDB(), res.cursors[0], 5);
assert.commandWorked(
t.getDB().adminCommand({configureFailPoint: "maxTimeAlwaysTimeOut", mode: "alwaysOn"}));
-assert.throws(function() {
+error = assert.throws(function() {
cursor.itcount();
}, [], "expected query to abort due to time limit");
+assert.eq(ErrorCodes.ExceededTimeLimit, error.code);
assert.commandWorked(
t.getDB().adminCommand({configureFailPoint: "maxTimeAlwaysTimeOut", mode: "off"}));
diff --git a/jstests/noPassthrough/aggregation_cursor_invalidations.js b/jstests/noPassthrough/aggregation_cursor_invalidations.js
index 9fdee5812fe..887f97b567d 100644
--- a/jstests/noPassthrough/aggregation_cursor_invalidations.js
+++ b/jstests/noPassthrough/aggregation_cursor_invalidations.js
@@ -282,7 +282,7 @@
assert.eq(maxNumDocs, sourceCollection.count());
assert.commandFailedWithCode(
testDB.runCommand({getMore: res.cursor.id, collection: getMoreCollName}),
- ErrorCodes.QueryPlanKilled,
+ ErrorCodes.CappedPositionLost,
'expected getMore to fail because the capped collection was truncated');
// Test that killing an aggregation's cursor via the killCursors command will cause a subsequent
diff --git a/jstests/noPassthrough/commands_preserve_exec_error_code.js b/jstests/noPassthrough/commands_preserve_exec_error_code.js
new file mode 100644
index 00000000000..b8bffb260ce
--- /dev/null
+++ b/jstests/noPassthrough/commands_preserve_exec_error_code.js
@@ -0,0 +1,57 @@
+// Tests that an error encountered during PlanExecutor execution will be propagated back to the user
+// with the original error code. This is important for retryable errors like
+// 'InterruptedDueToReplStateChange',
+// and also to ensure that the error is not swallowed and the diagnostic info is not lost.
+(function() {
+ "use strict";
+
+ const mongod = MongoRunner.runMongod({});
+ assert.neq(mongod, null, "mongod failed to start up");
+ const db = mongod.getDB("test");
+ const coll = db.commands_preserve_exec_error_code;
+ coll.drop();
+
+ assert.writeOK(coll.insert([{_id: 0}, {_id: 1}, {_id: 2}]));
+ assert.commandWorked(coll.createIndex({geo: "2d"}));
+
+ assert.commandWorked(
+ db.adminCommand({configureFailPoint: "planExecutorAlwaysFails", mode: "alwaysOn"}));
+
+ function assertFailsWithInternalError(fn) {
+ const error = assert.throws(fn);
+ assert.eq(error.code, ErrorCodes.InternalError, tojson(error));
+ assert.neq(-1,
+ error.message.indexOf("planExecutorAlwaysFails"),
+ "Expected error message to be preserved");
+ }
+ function assertCmdFailsWithInternalError(cmd) {
+ const res =
+ assert.commandFailedWithCode(db.runCommand(cmd), ErrorCodes.InternalError, tojson(cmd));
+ assert.neq(-1,
+ res.errmsg.indexOf("planExecutorAlwaysFails"),
+ "Expected error message to be preserved");
+ }
+
+ assertFailsWithInternalError(() => coll.find().itcount());
+ assertFailsWithInternalError(() => coll.updateOne({_id: 1}, {$set: {x: 2}}));
+ assertFailsWithInternalError(() => coll.deleteOne({_id: 1}));
+ assertFailsWithInternalError(() => coll.count({_id: 1}));
+ assertFailsWithInternalError(() => coll.group({
+ key: "_id",
+ cond: {},
+ reduce: () => {
+ result.total += 1;
+ },
+ initial: {total: 0}
+ })
+ .itcount());
+ assertFailsWithInternalError(() => coll.aggregate([]).itcount());
+ assertCmdFailsWithInternalError({distinct: coll.getName(), key: "_id"});
+ assertCmdFailsWithInternalError({geoNear: coll.getName(), near: [0, 0]});
+ assertCmdFailsWithInternalError(
+ {findAndModify: coll.getName(), query: {_id: 1}, update: {$set: {x: 2}}});
+
+ assert.commandWorked(
+ db.adminCommand({configureFailPoint: "planExecutorAlwaysFails", mode: "off"}));
+ MongoRunner.stopMongod(mongod);
+}());
diff --git a/src/mongo/client/dbclientcursor.cpp b/src/mongo/client/dbclientcursor.cpp
index 3bae83d91b2..111ad613886 100644
--- a/src/mongo/client/dbclientcursor.cpp
+++ b/src/mongo/client/dbclientcursor.cpp
@@ -305,7 +305,7 @@ void DBClientCursor::dataReceived(const Message& reply, bool& retry, string& hos
invariant(qr.getCursorId() == 0);
if (!(opts & QueryOption_CursorTailable)) {
- uasserted(13127,
+ uasserted(ErrorCodes::CursorNotFound,
str::stream() << "cursor id " << cursorId << " didn't exist on server.");
}
diff --git a/src/mongo/db/commands/dbcommands.cpp b/src/mongo/db/commands/dbcommands.cpp
index 5eb2e77fb60..ec0a6d58d3d 100644
--- a/src/mongo/db/commands/dbcommands.cpp
+++ b/src/mongo/db/commands/dbcommands.cpp
@@ -708,10 +708,8 @@ public:
if (PlanExecutor::DEAD == state || PlanExecutor::FAILURE == state) {
return appendCommandStatus(result,
- Status(ErrorCodes::OperationFailed,
- str::stream()
- << "Executor error during filemd5 command: "
- << WorkingSetCommon::toStatusString(obj)));
+ WorkingSetCommon::getMemberObjectStatus(obj).withContext(
+ "Executor error during filemd5 command"));
}
if (partialOk)
@@ -876,11 +874,9 @@ public:
if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) {
warning() << "Internal error while reading " << ns;
- return appendCommandStatus(
- result,
- Status(ErrorCodes::OperationFailed,
- str::stream() << "Executor error while reading during dataSize command: "
- << WorkingSetCommon::toStatusString(obj)));
+ return appendCommandStatus(result,
+ WorkingSetCommon::getMemberObjectStatus(obj).withContext(
+ "Executor error while reading during dataSize command"));
}
ostringstream os;
diff --git a/src/mongo/db/commands/distinct.cpp b/src/mongo/db/commands/distinct.cpp
index 96f08dc80ed..7187341ecce 100644
--- a/src/mongo/db/commands/distinct.cpp
+++ b/src/mongo/db/commands/distinct.cpp
@@ -250,10 +250,8 @@ public:
<< ", stats: " << redact(Explain::getWinningPlanStats(executor.getValue().get()));
return appendCommandStatus(result,
- Status(ErrorCodes::OperationFailed,
- str::stream()
- << "Executor error during distinct command: "
- << WorkingSetCommon::toStatusString(obj)));
+ WorkingSetCommon::getMemberObjectStatus(obj).withContext(
+ "Executor error during distinct command"));
}
diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp
index cc4d291cc62..5d83aee5f1d 100644
--- a/src/mongo/db/commands/find_and_modify.cpp
+++ b/src/mongo/db/commands/find_and_modify.cpp
@@ -120,16 +120,9 @@ StatusWith<boost::optional<BSONObj>> advanceExecutor(OperationContext* opCtx,
error() << "Plan executor error during findAndModify: " << PlanExecutor::statestr(state)
<< ", stats: " << redact(Explain::getWinningPlanStats(exec));
- if (WorkingSetCommon::isValidStatusMemberObject(value)) {
- const Status errorStatus = WorkingSetCommon::getMemberObjectStatus(value);
- invariant(!errorStatus.isOK());
- return {errorStatus.code(), errorStatus.reason()};
- }
- const std::string opstr = isRemove ? "delete" : "update";
- return {ErrorCodes::OperationFailed,
- str::stream() << "executor returned " << PlanExecutor::statestr(state)
- << " while executing "
- << opstr};
+ uassertStatusOK(WorkingSetCommon::getMemberObjectStatus(value).withContext(
+ "Plan executor error during findAndModify"));
+ MONGO_UNREACHABLE;
}
invariant(state == PlanExecutor::IS_EOF);
diff --git a/src/mongo/db/commands/find_cmd.cpp b/src/mongo/db/commands/find_cmd.cpp
index 9d73ecc39fa..be100fd54f0 100644
--- a/src/mongo/db/commands/find_cmd.cpp
+++ b/src/mongo/db/commands/find_cmd.cpp
@@ -357,10 +357,8 @@ public:
<< ", stats: " << redact(Explain::getWinningPlanStats(exec.get()));
return appendCommandStatus(result,
- Status(ErrorCodes::OperationFailed,
- str::stream()
- << "Executor error during find command: "
- << WorkingSetCommon::toStatusString(obj)));
+ WorkingSetCommon::getMemberObjectStatus(obj).withContext(
+ "Executor error during find command"));
}
// Before saving the cursor, ensure that whatever plan we established happened with the
diff --git a/src/mongo/db/commands/geo_near_cmd.cpp b/src/mongo/db/commands/geo_near_cmd.cpp
index 0389d550618..80253404762 100644
--- a/src/mongo/db/commands/geo_near_cmd.cpp
+++ b/src/mongo/db/commands/geo_near_cmd.cpp
@@ -303,10 +303,8 @@ public:
<< ", stats: " << redact(Explain::getWinningPlanStats(exec.get()));
return appendCommandStatus(result,
- Status(ErrorCodes::OperationFailed,
- str::stream()
- << "Executor error during geoNear command: "
- << WorkingSetCommon::toStatusString(currObj)));
+ WorkingSetCommon::getMemberObjectStatus(currObj).withContext(
+ "Executor error during geoNear command"));
}
PlanSummaryStats summary;
diff --git a/src/mongo/db/commands/getmore_cmd.cpp b/src/mongo/db/commands/getmore_cmd.cpp
index 4bab002396f..74c0db96790 100644
--- a/src/mongo/db/commands/getmore_cmd.cpp
+++ b/src/mongo/db/commands/getmore_cmd.cpp
@@ -448,28 +448,27 @@ public:
return Status::OK();
}
- if (PlanExecutor::FAILURE == *state) {
- nextBatch->abandon();
-
- error() << "GetMore command executor error: " << PlanExecutor::statestr(*state)
- << ", stats: " << redact(Explain::getWinningPlanStats(exec));
-
- return Status(ErrorCodes::OperationFailed,
- str::stream() << "GetMore command executor error: "
- << WorkingSetCommon::toStatusString(obj));
- } else if (PlanExecutor::DEAD == *state) {
- nextBatch->abandon();
-
- return Status(ErrorCodes::QueryPlanKilled,
- str::stream() << "PlanExecutor killed: "
- << WorkingSetCommon::toStatusString(obj));
- } else if (PlanExecutor::IS_EOF == *state) {
- // This causes the reported latest oplog timestamp to advance even when there are
- // no results for this particular query.
- nextBatch->setLatestOplogTimestamp(exec->getLatestOplogTimestamp());
+ switch (*state) {
+ case PlanExecutor::FAILURE:
+ // Log an error message and then perform the same cleanup as DEAD.
+ error() << "GetMore command executor error: " << PlanExecutor::statestr(*state)
+ << ", stats: " << redact(Explain::getWinningPlanStats(exec));
+ case PlanExecutor::DEAD: {
+ nextBatch->abandon();
+ // We should always have a valid status member object at this point.
+ auto status = WorkingSetCommon::getMemberObjectStatus(obj);
+ invariant(!status.isOK());
+ return status;
+ }
+ case PlanExecutor::IS_EOF:
+ // This causes the reported latest oplog timestamp to advance even when there are
+ // no results for this particular query.
+ nextBatch->setLatestOplogTimestamp(exec->getLatestOplogTimestamp());
+ default:
+ return Status::OK();
}
- return Status::OK();
+ MONGO_UNREACHABLE;
}
} getMoreCmd;
diff --git a/src/mongo/db/commands/group_cmd.cpp b/src/mongo/db/commands/group_cmd.cpp
index 1bdcb3e70a9..703cc60573e 100644
--- a/src/mongo/db/commands/group_cmd.cpp
+++ b/src/mongo/db/commands/group_cmd.cpp
@@ -185,14 +185,9 @@ private:
if (PlanExecutor::ADVANCED != state) {
invariant(PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state);
- if (WorkingSetCommon::isValidStatusMemberObject(retval)) {
- return appendCommandStatus(result, WorkingSetCommon::getMemberObjectStatus(retval));
- }
return appendCommandStatus(result,
- Status(ErrorCodes::BadValue,
- str::stream() << "error encountered during group "
- << "operation, executor returned "
- << PlanExecutor::statestr(state)));
+ WorkingSetCommon::getMemberObjectStatus(retval).withContext(
+ "Plan executor error during group command"));
}
invariant(planExecutor->isEOF());
diff --git a/src/mongo/db/exec/stagedebug_cmd.cpp b/src/mongo/db/exec/stagedebug_cmd.cpp
index 6e65094764c..55da162a5b2 100644
--- a/src/mongo/db/exec/stagedebug_cmd.cpp
+++ b/src/mongo/db/exec/stagedebug_cmd.cpp
@@ -211,11 +211,8 @@ public:
<< ", stats: " << redact(Explain::getWinningPlanStats(exec.get()));
return appendCommandStatus(result,
- Status(ErrorCodes::OperationFailed,
- str::stream()
- << "Executor error during "
- << "StageDebug command: "
- << WorkingSetCommon::toStatusString(obj)));
+ WorkingSetCommon::getMemberObjectStatus(obj).withContext(
+ "Executor error during StageDebug command"));
}
return true;
diff --git a/src/mongo/db/pipeline/document_source_cursor.cpp b/src/mongo/db/pipeline/document_source_cursor.cpp
index a25926f97e0..da2dd818a74 100644
--- a/src/mongo/db/pipeline/document_source_cursor.cpp
+++ b/src/mongo/db/pipeline/document_source_cursor.cpp
@@ -134,17 +134,11 @@ void DocumentSourceCursor::loadBatch() {
case PlanExecutor::ADVANCED:
case PlanExecutor::IS_EOF:
return; // We've reached our limit or exhausted the cursor.
- case PlanExecutor::DEAD: {
- cleanupExecutor();
- uasserted(ErrorCodes::QueryPlanKilled,
- str::stream() << "collection or index disappeared when cursor yielded: "
- << WorkingSetCommon::toStatusString(resultObj));
- }
+ case PlanExecutor::DEAD:
case PlanExecutor::FAILURE: {
cleanupExecutor();
- uasserted(17285,
- str::stream() << "cursor encountered an error: "
- << WorkingSetCommon::toStatusString(resultObj));
+ uassertStatusOK(WorkingSetCommon::getMemberObjectStatus(resultObj).withContext(
+ "Error in $cursor stage"));
}
default:
MONGO_UNREACHABLE;
diff --git a/src/mongo/db/query/find.cpp b/src/mongo/db/query/find.cpp
index 6038193508e..3dfbee99390 100644
--- a/src/mongo/db/query/find.cpp
+++ b/src/mongo/db/query/find.cpp
@@ -201,15 +201,22 @@ void generateBatch(int ntoreturn,
}
}
- // Propagate any errors to the caller.
- if (PlanExecutor::FAILURE == *state) {
- error() << "getMore executor error, stats: " << redact(Explain::getWinningPlanStats(exec));
- uasserted(17406, "getMore executor error: " + WorkingSetCommon::toStatusString(obj));
- } else if (PlanExecutor::DEAD == *state) {
- uasserted(ErrorCodes::QueryPlanKilled,
- str::stream() << "PlanExecutor killed: "
- << WorkingSetCommon::toStatusString(obj));
+ switch (*state) {
+ // Log an error message and then perform the same cleanup as DEAD.
+ case PlanExecutor::FAILURE:
+ error() << "getMore executor error, stats: "
+ << redact(Explain::getWinningPlanStats(exec));
+ case PlanExecutor::DEAD: {
+ // We should always have a valid status object by this point.
+ auto status = WorkingSetCommon::getMemberObjectStatus(obj);
+ invariant(!status.isOK());
+ uassertStatusOK(status);
+ }
+ default:
+ return;
}
+
+ MONGO_UNREACHABLE;
}
} // namespace
@@ -669,7 +676,9 @@ std::string runQuery(OperationContext* opCtx,
if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) {
error() << "Plan executor error during find: " << PlanExecutor::statestr(state)
<< ", stats: " << redact(Explain::getWinningPlanStats(exec.get()));
- uasserted(17144, "Executor error: " + WorkingSetCommon::toStatusString(obj));
+ uassertStatusOK(WorkingSetCommon::getMemberObjectStatus(obj).withContext(
+ "Executor error during OP_QUERY find"));
+ MONGO_UNREACHABLE;
}
// Before saving the cursor, ensure that whatever plan we established happened with the expected
diff --git a/src/mongo/db/query/plan_executor.cpp b/src/mongo/db/query/plan_executor.cpp
index b2619986a46..8e3ab97971c 100644
--- a/src/mongo/db/query/plan_executor.cpp
+++ b/src/mongo/db/query/plan_executor.cpp
@@ -489,7 +489,7 @@ PlanExecutor::ExecState PlanExecutor::waitForInserts(CappedInsertNotifierData* n
PlanExecutor::ExecState PlanExecutor::getNextImpl(Snapshotted<BSONObj>* objOut, RecordId* dlOut) {
if (MONGO_FAIL_POINT(planExecutorAlwaysFails)) {
- Status status(ErrorCodes::OperationFailed,
+ Status status(ErrorCodes::InternalError,
str::stream() << "PlanExecutor hit planExecutorAlwaysFails fail point");
*objOut =
Snapshotted<BSONObj>(SnapshotId(), WorkingSetCommon::buildMemberStatusObject(status));
diff --git a/src/mongo/db/s/check_sharding_index_command.cpp b/src/mongo/db/s/check_sharding_index_command.cpp
index 6955d93db83..c8989fec460 100644
--- a/src/mongo/db/s/check_sharding_index_command.cpp
+++ b/src/mongo/db/s/check_sharding_index_command.cpp
@@ -200,11 +200,9 @@ public:
}
if (PlanExecutor::DEAD == state || PlanExecutor::FAILURE == state) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::OperationFailed,
- str::stream() << "Executor error while checking sharding index: "
- << WorkingSetCommon::toStatusString(currKey)));
+ return appendCommandStatus(result,
+ WorkingSetCommon::getMemberObjectStatus(currKey).withContext(
+ "Executor error while checking sharding index"));
}
return true;
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
index b2cb362263d..b00c3875c44 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
@@ -667,9 +667,8 @@ Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* opC
}
if (PlanExecutor::DEAD == state || PlanExecutor::FAILURE == state) {
- return {ErrorCodes::InternalError,
- str::stream() << "Executor error while scanning for documents belonging to chunk: "
- << WorkingSetCommon::toStatusString(obj)};
+ return WorkingSetCommon::getMemberObjectStatus(obj).withContext(
+ "Executor error while scanning for documents belonging to chunk");
}
const uint64_t collectionAverageObjectSize = collection->averageObjectSize(opCtx);
diff --git a/src/mongo/db/s/split_vector.cpp b/src/mongo/db/s/split_vector.cpp
index 3e917524aaf..7d24d69d2c2 100644
--- a/src/mongo/db/s/split_vector.cpp
+++ b/src/mongo/db/s/split_vector.cpp
@@ -215,9 +215,8 @@ StatusWith<std::vector<BSONObj>> splitVector(OperationContext* opCtx,
}
if (PlanExecutor::DEAD == state || PlanExecutor::FAILURE == state) {
- return {ErrorCodes::OperationFailed,
- "Executor error during splitVector command: " +
- WorkingSetCommon::toStatusString(currKey)};
+ return WorkingSetCommon::getMemberObjectStatus(currKey).withContext(
+ "Executor error during splitVector command");
}
if (!force)
diff --git a/src/mongo/dbtests/documentsourcetests.cpp b/src/mongo/dbtests/documentsourcetests.cpp
index db9ac3f47a6..d6d512f4bc7 100644
--- a/src/mongo/dbtests/documentsourcetests.cpp
+++ b/src/mongo/dbtests/documentsourcetests.cpp
@@ -349,7 +349,8 @@ TEST_F(DocumentSourceCursorTest, TailableAwaitDataCursorShouldErrorAfterTimeout)
DocumentSourceCursor::create(readLock.getCollection(), std::move(planExecutor), ctx());
ON_BLOCK_EXIT([cursor]() { cursor->dispose(); });
- ASSERT_THROWS_CODE(cursor->getNext().isEOF(), AssertionException, ErrorCodes::QueryPlanKilled);
+ ASSERT_THROWS_CODE(
+ cursor->getNext().isEOF(), AssertionException, ErrorCodes::ExceededTimeLimit);
}
TEST_F(DocumentSourceCursorTest, NonAwaitDataCursorShouldErrorAfterTimeout) {
@@ -386,7 +387,8 @@ TEST_F(DocumentSourceCursorTest, NonAwaitDataCursorShouldErrorAfterTimeout) {
DocumentSourceCursor::create(readLock.getCollection(), std::move(planExecutor), ctx());
ON_BLOCK_EXIT([cursor]() { cursor->dispose(); });
- ASSERT_THROWS_CODE(cursor->getNext().isEOF(), AssertionException, ErrorCodes::QueryPlanKilled);
+ ASSERT_THROWS_CODE(
+ cursor->getNext().isEOF(), AssertionException, ErrorCodes::ExceededTimeLimit);
}
TEST_F(DocumentSourceCursorTest, TailableAwaitDataCursorShouldErrorAfterBeingKilled) {