summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--jstests/core/update_arrayFilters.js11
-rw-r--r--src/mongo/client/dbclient_rs.cpp17
-rw-r--r--src/mongo/client/dbclientcursor.cpp6
-rw-r--r--src/mongo/client/sasl_client_authenticate.cpp2
-rw-r--r--src/mongo/client/sasl_client_authenticate.h6
-rw-r--r--src/mongo/client/sasl_client_authenticate_impl.cpp12
-rw-r--r--src/mongo/db/catalog/capped_utils.cpp9
-rw-r--r--src/mongo/db/catalog/collection_options.cpp3
-rw-r--r--src/mongo/db/catalog/create_collection.cpp12
-rw-r--r--src/mongo/db/catalog/rename_collection.cpp11
-rw-r--r--src/mongo/db/commands/apply_ops_cmd.cpp7
-rw-r--r--src/mongo/db/commands/user_management_commands.cpp67
-rw-r--r--src/mongo/db/namespace_string.cpp18
-rw-r--r--src/mongo/db/ops/parsed_update.cpp9
-rw-r--r--src/mongo/db/pipeline/document_source_merge_cursors.cpp9
-rw-r--r--src/mongo/db/pipeline/pipeline.cpp7
-rw-r--r--src/mongo/db/repl/SConscript1
-rw-r--r--src/mongo/db/repl/abstract_async_component.cpp16
-rw-r--r--src/mongo/db/repl/abstract_async_component_test.cpp4
-rw-r--r--src/mongo/db/repl/collection_cloner.cpp79
-rw-r--r--src/mongo/db/repl/database_cloner.cpp18
-rw-r--r--src/mongo/db/repl/initial_syncer.cpp10
-rw-r--r--src/mongo/db/repl/repl_set_config_checks.cpp12
-rw-r--r--src/mongo/db/repl/repl_set_heartbeat_response.cpp18
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.cpp10
-rw-r--r--src/mongo/db/repl/resync.cpp6
-rw-r--r--src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp6
-rw-r--r--src/mongo/db/s/balancer/cluster_statistics_impl.cpp8
-rw-r--r--src/mongo/db/s/balancer/migration_manager.cpp8
-rw-r--r--src/mongo/db/s/balancer/scoped_migration_request.cpp27
-rw-r--r--src/mongo/db/s/chunk_splitter.cpp7
-rw-r--r--src/mongo/db/s/collection_range_deleter.cpp6
-rw-r--r--src/mongo/db/s/collection_sharding_state.cpp8
-rw-r--r--src/mongo/db/s/config/configsvr_drop_database_command.cpp8
-rw-r--r--src/mongo/db/s/config/configsvr_move_primary_command.cpp10
-rw-r--r--src/mongo/db/s/merge_chunks_command.cpp37
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp6
-rw-r--r--src/mongo/db/s/migration_destination_manager.cpp7
-rw-r--r--src/mongo/db/s/migration_source_manager.cpp50
-rw-r--r--src/mongo/db/s/shard_metadata_util.cpp12
-rw-r--r--src/mongo/db/s/shard_server_catalog_cache_loader.cpp42
-rw-r--r--src/mongo/db/s/sharding_state.cpp7
-rw-r--r--src/mongo/db/s/split_chunk.cpp10
-rw-r--r--src/mongo/db/session_catalog.cpp10
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp2
-rw-r--r--src/mongo/db/views/view_catalog.cpp9
-rw-r--r--src/mongo/executor/async_secure_stream.cpp8
-rw-r--r--src/mongo/rpc/get_status_from_command_result.cpp3
-rw-r--r--src/mongo/s/balancer_configuration.cpp16
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_impl.cpp63
-rw-r--r--src/mongo/s/catalog/sharding_catalog_manager.cpp40
-rw-r--r--src/mongo/s/catalog/sharding_catalog_manager_collection_operations.cpp5
-rw-r--r--src/mongo/s/catalog/sharding_catalog_manager_shard_operations.cpp50
-rw-r--r--src/mongo/s/catalog/type_chunk.cpp5
-rw-r--r--src/mongo/s/catalog/type_collection.cpp2
-rw-r--r--src/mongo/s/chunk_manager.cpp4
-rw-r--r--src/mongo/s/client/parallel.cpp3
-rw-r--r--src/mongo/s/client/shard_registry.cpp15
-rw-r--r--src/mongo/s/cluster_identity_loader.cpp4
-rw-r--r--src/mongo/s/commands/chunk_manager_targeter.cpp10
-rw-r--r--src/mongo/s/commands/cluster_aggregate.cpp8
-rw-r--r--src/mongo/s/commands/cluster_commands_helpers.cpp10
-rw-r--r--src/mongo/s/commands/cluster_count_cmd.cpp6
-rw-r--r--src/mongo/s/commands/cluster_explain.cpp17
-rw-r--r--src/mongo/s/commands/cluster_get_last_error_cmd.cpp8
-rw-r--r--src/mongo/s/commands/cluster_write.cpp22
-rw-r--r--src/mongo/s/shard_util.cpp2
-rw-r--r--src/mongo/s/write_ops/batch_write_exec.cpp7
-rw-r--r--src/mongo/scripting/mozjs/exception.cpp4
-rw-r--r--src/mongo/scripting/mozjs/implscope.cpp7
-rw-r--r--src/mongo/scripting/mozjs/mongo.cpp8
-rw-r--r--src/mongo/util/assert_util.h29
72 files changed, 390 insertions, 626 deletions
diff --git a/jstests/core/update_arrayFilters.js b/jstests/core/update_arrayFilters.js
index e1457f7f809..eb63f2d0ffa 100644
--- a/jstests/core/update_arrayFilters.js
+++ b/jstests/core/update_arrayFilters.js
@@ -644,11 +644,12 @@
res = coll.update({_id: 0}, {$set: {"a.$[I]": 1}}, {arrayFilters: [{"I": 0}]});
assert.writeErrorWithCode(res, ErrorCodes.BadValue);
- assert.neq(
- -1,
- res.getWriteError().errmsg.indexOf(
- "Error parsing array filter: The top-level field name must be an alphanumeric string beginning with a lowercase letter, found 'I'"),
- "update failed for a reason other than bad array filter identifier");
+ assert(res.getWriteError().errmsg.startsWith("Error parsing array filter") &&
+ res.getWriteError().errmsg.endsWith(
+ "The top-level field name must be an alphanumeric " +
+ "string beginning with a lowercase letter, found 'I'"),
+ "update failed for a reason other than bad array filter identifier: " +
+ tojson(res.getWriteError()));
assert.writeOK(coll.insert({_id: 0, a: [0], b: [{j: 0}]}));
res = coll.update(
diff --git a/src/mongo/client/dbclient_rs.cpp b/src/mongo/client/dbclient_rs.cpp
index 10547fc66c1..dec27398071 100644
--- a/src/mongo/client/dbclient_rs.cpp
+++ b/src/mongo/client/dbclient_rs.cpp
@@ -449,12 +449,9 @@ void DBClientReplicaSet::_auth(const BSONObj& params) {
throw;
}
- const Status status = ex.toStatus();
- lastNodeStatus = {status.code(),
- str::stream() << "can't authenticate against replica set node "
- << _lastSlaveOkHost
- << ": "
- << status.reason()};
+ lastNodeStatus =
+ ex.toStatus(str::stream() << "can't authenticate against replica set node "
+ << _lastSlaveOkHost);
_invalidateLastSlaveOkCache(lastNodeStatus);
}
}
@@ -464,7 +461,7 @@ void DBClientReplicaSet::_auth(const BSONObj& params) {
assertMsgB << "Failed to authenticate, no good nodes in " << _getMonitor()->getName();
uasserted(ErrorCodes::NodeNotFound, assertMsgB.str());
} else {
- uasserted(lastNodeStatus.code(), lastNodeStatus.reason());
+ uassertStatusOK(lastNodeStatus);
}
}
@@ -1006,10 +1003,8 @@ bool DBClientReplicaSet::call(Message& toSend,
*actualServer = "";
const Status status = ex.toStatus();
- _invalidateLastSlaveOkCache(
- {status.code(),
- str::stream() << "can't call replica set node " << _lastSlaveOkHost << ": "
- << status.reason()});
+ _invalidateLastSlaveOkCache(status.withContext(
+ str::stream() << "can't call replica set node " << _lastSlaveOkHost));
}
}
diff --git a/src/mongo/client/dbclientcursor.cpp b/src/mongo/client/dbclientcursor.cpp
index b2b38566479..a275f984ae5 100644
--- a/src/mongo/client/dbclientcursor.cpp
+++ b/src/mongo/client/dbclientcursor.cpp
@@ -392,11 +392,7 @@ BSONObj DBClientCursor::nextSafe() {
// Only convert legacy errors ($err) to exceptions. Otherwise, just return the response and the
// caller will interpret it as a command error.
if (wasError && strcmp(o.firstElementFieldName(), "$err") == 0) {
- auto code = o["code"].numberInt();
- if (!code) {
- code = ErrorCodes::UnknownError;
- }
- uasserted(code, o.firstElement().str());
+ uassertStatusOK(getStatusFromCommandResult(o));
}
return o;
diff --git a/src/mongo/client/sasl_client_authenticate.cpp b/src/mongo/client/sasl_client_authenticate.cpp
index 13e0b4ee3fa..00525ad75eb 100644
--- a/src/mongo/client/sasl_client_authenticate.cpp
+++ b/src/mongo/client/sasl_client_authenticate.cpp
@@ -46,10 +46,8 @@ void (*saslClientAuthenticate)(auth::RunCommandHook runCommand,
const char* const saslStartCommandName = "saslStart";
const char* const saslContinueCommandName = "saslContinue";
const char* const saslCommandAutoAuthorizeFieldName = "autoAuthorize";
-const char* const saslCommandCodeFieldName = "code";
const char* const saslCommandConversationIdFieldName = "conversationId";
const char* const saslCommandDoneFieldName = "done";
-const char* const saslCommandErrmsgFieldName = "errmsg";
const char* const saslCommandMechanismFieldName = "mechanism";
const char* const saslCommandMechanismListFieldName = "supportedMechanisms";
const char* const saslCommandPasswordFieldName = "pwd";
diff --git a/src/mongo/client/sasl_client_authenticate.h b/src/mongo/client/sasl_client_authenticate.h
index cde16cd3afe..fac72f2712e 100644
--- a/src/mongo/client/sasl_client_authenticate.h
+++ b/src/mongo/client/sasl_client_authenticate.h
@@ -95,9 +95,6 @@ extern const char* const saslContinueCommandName;
/// connection all privileges associated with the user after successful authentication.
extern const char* const saslCommandAutoAuthorizeFieldName;
-/// Name of the field contain the status code in responses from the server.
-extern const char* const saslCommandCodeFieldName;
-
/// Name of the field containing the conversation identifier in server respones and saslContinue
/// commands.
extern const char* const saslCommandConversationIdFieldName;
@@ -106,9 +103,6 @@ extern const char* const saslCommandConversationIdFieldName;
/// completed successfully.
extern const char* const saslCommandDoneFieldName;
-/// Field in which to store error messages associated with non-success return codes.
-extern const char* const saslCommandErrmsgFieldName;
-
/// Name of parameter to saslStart command indiciating the client's desired sasl mechanism.
extern const char* const saslCommandMechanismFieldName;
diff --git a/src/mongo/client/sasl_client_authenticate_impl.cpp b/src/mongo/client/sasl_client_authenticate_impl.cpp
index d4755a22775..8ea87b55794 100644
--- a/src/mongo/client/sasl_client_authenticate_impl.cpp
+++ b/src/mongo/client/sasl_client_authenticate_impl.cpp
@@ -212,15 +212,9 @@ void asyncSaslConversation(auth::RunCommandHook runCommand,
}
auto serverResponse = response.data.getOwned();
- auto code = getStatusFromCommandResult(serverResponse).code();
-
- // Server versions 2.3.2 and earlier may return "ok: 1" with a non-zero
- // "code" field, indicating a failure. Subsequent versions should
- // return "ok: 0" on failure with a non-zero "code" field to indicate specific
- // failure. In all versions, either (ok: 1, code: > 0) or (ok: 0, code optional)
- // indicate failure.
- if (code != ErrorCodes::OK) {
- return handler({code, serverResponse[saslCommandErrmsgFieldName].str()});
+ auto status = getStatusFromCommandResult(serverResponse);
+ if (!status.isOK()) {
+ return handler(status);
}
// Exit if we have finished
diff --git a/src/mongo/db/catalog/capped_utils.cpp b/src/mongo/db/catalog/capped_utils.cpp
index 058acad2684..d38983423cf 100644
--- a/src/mongo/db/catalog/capped_utils.cpp
+++ b/src/mongo/db/catalog/capped_utils.cpp
@@ -278,11 +278,10 @@ mongo::Status mongo::convertToCapped(OperationContext* opCtx,
auto tmpNameResult =
db->makeUniqueCollectionNamespace(opCtx, "tmp%%%%%.convertToCapped." + shortSource);
if (!tmpNameResult.isOK()) {
- return Status(tmpNameResult.getStatus().code(),
- str::stream() << "Cannot generate temporary collection namespace to convert "
- << collectionName.ns()
- << " to a capped collection: "
- << tmpNameResult.getStatus().reason());
+ return tmpNameResult.getStatus().withContext(
+ str::stream() << "Cannot generate temporary collection namespace to convert "
+ << collectionName.ns()
+ << " to a capped collection");
}
const auto& longTmpName = tmpNameResult.getValue();
const auto shortTmpName = longTmpName.coll().toString();
diff --git a/src/mongo/db/catalog/collection_options.cpp b/src/mongo/db/catalog/collection_options.cpp
index 9ca8ab1b584..b3c1ead20c4 100644
--- a/src/mongo/db/catalog/collection_options.cpp
+++ b/src/mongo/db/catalog/collection_options.cpp
@@ -187,8 +187,7 @@ Status CollectionOptions::parse(const BSONObj& options, ParseKind kind) {
if (option.fieldNameStringData() == "storageEngine") {
Status status = checkStorageEngineOptions(option);
if (!status.isOK()) {
- return {status.code(),
- str::stream() << "In indexOptionDefaults: " << status.reason()};
+ return status.withContext("Error in indexOptionDefaults");
}
} else {
// Return an error on first unrecognized field.
diff --git a/src/mongo/db/catalog/create_collection.cpp b/src/mongo/db/catalog/create_collection.cpp
index 3b5ed3f50dc..ebd43cc24b9 100644
--- a/src/mongo/db/catalog/create_collection.cpp
+++ b/src/mongo/db/catalog/create_collection.cpp
@@ -188,13 +188,11 @@ Status createCollectionForApplyOps(OperationContext* opCtx,
auto tmpNameResult =
db->makeUniqueCollectionNamespace(opCtx, "tmp%%%%%.create");
if (!tmpNameResult.isOK()) {
- return Result(Status(tmpNameResult.getStatus().code(),
- str::stream() << "Cannot generate temporary "
- "collection namespace for applyOps "
- "create command: collection: "
- << newCollName.ns()
- << ". error: "
- << tmpNameResult.getStatus().reason()));
+ return Result(tmpNameResult.getStatus().withContext(
+ str::stream() << "Cannot generate temporary "
+ "collection namespace for applyOps "
+ "create command: collection: "
+ << newCollName.ns()));
}
const auto& tmpName = tmpNameResult.getValue();
// It is ok to log this because this doesn't happen very frequently.
diff --git a/src/mongo/db/catalog/rename_collection.cpp b/src/mongo/db/catalog/rename_collection.cpp
index d63f6674aa6..d9f80ccfd5d 100644
--- a/src/mongo/db/catalog/rename_collection.cpp
+++ b/src/mongo/db/catalog/rename_collection.cpp
@@ -236,13 +236,10 @@ Status renameCollectionCommon(OperationContext* opCtx,
auto tmpNameResult =
targetDB->makeUniqueCollectionNamespace(opCtx, "tmp%%%%%.renameCollection");
if (!tmpNameResult.isOK()) {
- return Status(tmpNameResult.getStatus().code(),
- str::stream() << "Cannot generate temporary collection name to rename "
- << source.ns()
- << " to "
- << target.ns()
- << ": "
- << tmpNameResult.getStatus().reason());
+ return tmpNameResult.getStatus().withContext(
+ str::stream() << "Cannot generate temporary collection name to rename " << source.ns()
+ << " to "
+ << target.ns());
}
const auto& tmpName = tmpNameResult.getValue();
diff --git a/src/mongo/db/commands/apply_ops_cmd.cpp b/src/mongo/db/commands/apply_ops_cmd.cpp
index 344626f11ab..81f4aa49f34 100644
--- a/src/mongo/db/commands/apply_ops_cmd.cpp
+++ b/src/mongo/db/commands/apply_ops_cmd.cpp
@@ -269,11 +269,8 @@ public:
// NoSuchKey means the user did not supply a mode.
return CommandHelpers::appendCommandStatus(
result,
- Status(status.code(),
- str::stream() << "Could not parse out "
- << ApplyOps::kOplogApplicationModeFieldName
- << ": "
- << status.reason()));
+ status.withContext(str::stream() << "Could not parse out "
+ << ApplyOps::kOplogApplicationModeFieldName));
}
auto applyOpsStatus = CommandHelpers::appendCommandStatus(
diff --git a/src/mongo/db/commands/user_management_commands.cpp b/src/mongo/db/commands/user_management_commands.cpp
index 0ad3dff7fc2..97255194138 100644
--- a/src/mongo/db/commands/user_management_commands.cpp
+++ b/src/mongo/db/commands/user_management_commands.cpp
@@ -87,6 +87,12 @@ namespace {
// Used to obtain mutex that guards modifications to persistent authorization data
const auto getAuthzDataMutex = ServiceContext::declareDecoration<stdx::mutex>();
+Status useDefaultCode(const Status& status, ErrorCodes::Error defaultCode) {
+ if (status.code() != ErrorCodes::UnknownError)
+ return status;
+ return Status(defaultCode, status.reason());
+}
+
BSONArray roleSetToBSONArray(const unordered_set<RoleName>& roles) {
BSONArrayBuilder rolesArrayBuilder;
for (unordered_set<RoleName>::const_iterator it = roles.begin(); it != roles.end(); ++it) {
@@ -1986,15 +1992,11 @@ public:
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserCache();
if (!status.isOK()) {
- ErrorCodes::Error code = status.code() == ErrorCodes::UnknownError
- ? ErrorCodes::UserModificationFailed
- : status.code();
return CommandHelpers::appendCommandStatus(
result,
- Status(code,
- str::stream() << "Failed to remove role " << roleName.getFullName()
- << " from all users: "
- << status.reason()));
+ useDefaultCode(status, ErrorCodes::UserModificationFailed)
+ .withContext(str::stream() << "Failed to remove role " << roleName.getFullName()
+ << " from all users"));
}
// Remove this role from all other roles
@@ -2015,15 +2017,12 @@ public:
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserCache();
if (!status.isOK()) {
- ErrorCodes::Error code = status.code() == ErrorCodes::UnknownError
- ? ErrorCodes::RoleModificationFailed
- : status.code();
return CommandHelpers::appendCommandStatus(
result,
- Status(code,
- str::stream() << "Removed role " << roleName.getFullName()
- << " from all users but failed to remove from all roles: "
- << status.reason()));
+ useDefaultCode(status, ErrorCodes::RoleModificationFailed)
+ .withContext(
+ str::stream() << "Removed role " << roleName.getFullName()
+ << " from all users but failed to remove from all roles"));
}
audit::logDropRole(Client::getCurrent(), roleName);
@@ -2039,11 +2038,10 @@ public:
if (!status.isOK()) {
return CommandHelpers::appendCommandStatus(
result,
- Status(status.code(),
- str::stream() << "Removed role " << roleName.getFullName()
- << " from all users and roles but failed to actually delete"
- " the role itself: "
- << status.reason()));
+ status.withContext(
+ str::stream() << "Removed role " << roleName.getFullName()
+ << " from all users and roles but failed to actually delete"
+ " the role itself"));
}
dassert(nMatched == 0 || nMatched == 1);
@@ -2118,15 +2116,11 @@ public:
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserCache();
if (!status.isOK()) {
- ErrorCodes::Error code = status.code() == ErrorCodes::UnknownError
- ? ErrorCodes::UserModificationFailed
- : status.code();
return CommandHelpers::appendCommandStatus(
result,
- Status(code,
- str::stream() << "Failed to remove roles from \"" << dbname
- << "\" db from all users: "
- << status.reason()));
+ useDefaultCode(status, ErrorCodes::UserModificationFailed)
+ .withContext(str::stream() << "Failed to remove roles from \"" << dbname
+ << "\" db from all users"));
}
// Remove these roles from all other roles
@@ -2144,15 +2138,11 @@ public:
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserCache();
if (!status.isOK()) {
- ErrorCodes::Error code = status.code() == ErrorCodes::UnknownError
- ? ErrorCodes::RoleModificationFailed
- : status.code();
return CommandHelpers::appendCommandStatus(
result,
- Status(code,
- str::stream() << "Failed to remove roles from \"" << dbname
- << "\" db from all roles: "
- << status.reason()));
+ useDefaultCode(status, ErrorCodes::RoleModificationFailed)
+ .withContext(str::stream() << "Failed to remove roles from \"" << dbname
+ << "\" db from all roles"));
}
audit::logDropAllRolesFromDatabase(Client::getCurrent(), dbname);
@@ -2164,12 +2154,11 @@ public:
if (!status.isOK()) {
return CommandHelpers::appendCommandStatus(
result,
- Status(status.code(),
- str::stream() << "Removed roles from \"" << dbname
- << "\" db "
- " from all users and roles but failed to actually delete"
- " those roles themselves: "
- << status.reason()));
+ status.withContext(
+ str::stream() << "Removed roles from \"" << dbname
+ << "\" db "
+ " from all users and roles but failed to actually delete"
+ " those roles themselves"));
}
result.append("n", nMatched);
diff --git a/src/mongo/db/namespace_string.cpp b/src/mongo/db/namespace_string.cpp
index 970b0be8e2a..a6ae64728e9 100644
--- a/src/mongo/db/namespace_string.cpp
+++ b/src/mongo/db/namespace_string.cpp
@@ -210,10 +210,8 @@ StatusWith<repl::OpTime> NamespaceString::getDropPendingNamespaceOpTime() const
long long seconds;
auto status = parseNumberFromString(opTimeStr.substr(0, incrementSeparatorIndex), &seconds);
if (!status.isOK()) {
- return Status(
- status.code(),
- str::stream() << "Invalid timestamp seconds in drop-pending namespace: " << _ns << ": "
- << status.reason());
+ return status.withContext(
+ str::stream() << "Invalid timestamp seconds in drop-pending namespace: " << _ns);
}
unsigned int increment;
@@ -222,19 +220,15 @@ StatusWith<repl::OpTime> NamespaceString::getDropPendingNamespaceOpTime() const
termSeparatorIndex - (incrementSeparatorIndex + 1)),
&increment);
if (!status.isOK()) {
- return Status(status.code(),
- str::stream() << "Invalid timestamp increment in drop-pending namespace: "
- << _ns
- << ": "
- << status.reason());
+ return status.withContext(
+ str::stream() << "Invalid timestamp increment in drop-pending namespace: " << _ns);
}
long long term;
status = mongo::parseNumberFromString(opTimeStr.substr(termSeparatorIndex + 1), &term);
if (!status.isOK()) {
- return Status(status.code(),
- str::stream() << "Invalid term in drop-pending namespace: " << _ns << ": "
- << status.reason());
+ return status.withContext(str::stream() << "Invalid term in drop-pending namespace: "
+ << _ns);
}
return repl::OpTime(Timestamp(Seconds(seconds), increment), term);
diff --git a/src/mongo/db/ops/parsed_update.cpp b/src/mongo/db/ops/parsed_update.cpp
index 1b7d6fbec6f..ec177e7cc45 100644
--- a/src/mongo/db/ops/parsed_update.cpp
+++ b/src/mongo/db/ops/parsed_update.cpp
@@ -168,16 +168,13 @@ Status ParsedUpdate::parseArrayFilters() {
ExtensionsCallbackNoop(),
MatchExpressionParser::kBanAllSpecialFeatures);
if (!parsedArrayFilter.isOK()) {
- return Status(parsedArrayFilter.getStatus().code(),
- str::stream() << "Error parsing array filter: "
- << parsedArrayFilter.getStatus().reason());
+ return parsedArrayFilter.getStatus().withContext("Error parsing array filter");
}
auto parsedArrayFilterWithPlaceholder =
ExpressionWithPlaceholder::make(std::move(parsedArrayFilter.getValue()));
if (!parsedArrayFilterWithPlaceholder.isOK()) {
- return Status(parsedArrayFilterWithPlaceholder.getStatus().code(),
- str::stream() << "Error parsing array filter: "
- << parsedArrayFilterWithPlaceholder.getStatus().reason());
+ return parsedArrayFilterWithPlaceholder.getStatus().withContext(
+ "Error parsing array filter");
}
auto finalArrayFilter = std::move(parsedArrayFilterWithPlaceholder.getValue());
auto fieldName = finalArrayFilter->getPlaceholder();
diff --git a/src/mongo/db/pipeline/document_source_merge_cursors.cpp b/src/mongo/db/pipeline/document_source_merge_cursors.cpp
index 23d413d6b60..539e15c0ccc 100644
--- a/src/mongo/db/pipeline/document_source_merge_cursors.cpp
+++ b/src/mongo/db/pipeline/document_source_merge_cursors.cpp
@@ -31,6 +31,7 @@
#include "mongo/db/pipeline/document_source_merge_cursors.h"
#include "mongo/db/pipeline/lite_parsed_document_source.h"
+#include "mongo/rpc/get_status_from_command_result.h"
namespace mongo {
@@ -140,11 +141,9 @@ void DocumentSourceMergeCursors::start() {
Document DocumentSourceMergeCursors::nextSafeFrom(DBClientCursor* cursor) {
const BSONObj next = cursor->next();
if (next.hasField("$err")) {
- const int code = next.hasField("code") ? next["code"].numberInt() : 17029;
- uasserted(code,
- str::stream() << "Received error in response from " << cursor->originalHost()
- << ": "
- << next);
+ uassertStatusOKWithContext(getStatusFromCommandResult(next),
+ str::stream() << "Received error in response from "
+ << cursor->originalHost());
}
return Document::fromBsonWithMetaData(next);
}
diff --git a/src/mongo/db/pipeline/pipeline.cpp b/src/mongo/db/pipeline/pipeline.cpp
index 4e12ba4358d..499d8c3d6e5 100644
--- a/src/mongo/db/pipeline/pipeline.cpp
+++ b/src/mongo/db/pipeline/pipeline.cpp
@@ -503,10 +503,9 @@ bool Pipeline::requiredToRunOnMongos() const {
// Verify that the remainder of this pipeline can run on mongoS.
auto mongosRunStatus = _pipelineCanRunOnMongoS();
- uassert(mongosRunStatus.code(),
- str::stream() << stage->getSourceName() << " must run on mongoS, but "
- << mongosRunStatus.reason(),
- mongosRunStatus.isOK());
+ uassertStatusOKWithContext(mongosRunStatus,
+ str::stream() << stage->getSourceName()
+ << " must run on mongoS, but cannot");
return true;
}
diff --git a/src/mongo/db/repl/SConscript b/src/mongo/db/repl/SConscript
index 718f70a324b..15890c6cf0e 100644
--- a/src/mongo/db/repl/SConscript
+++ b/src/mongo/db/repl/SConscript
@@ -1047,6 +1047,7 @@ env.Library('replica_set_messages',
'$BUILD_DIR/mongo/db/server_options_core',
'$BUILD_DIR/mongo/transport/transport_layer_common',
'$BUILD_DIR/mongo/util/net/network',
+ '$BUILD_DIR/mongo/rpc/command_status',
'optime',
'read_concern_args',
])
diff --git a/src/mongo/db/repl/abstract_async_component.cpp b/src/mongo/db/repl/abstract_async_component.cpp
index 597fb2001cb..2933d7e24dd 100644
--- a/src/mongo/db/repl/abstract_async_component.cpp
+++ b/src/mongo/db/repl/abstract_async_component.cpp
@@ -160,11 +160,7 @@ Status AbstractAsyncComponent::_checkForShutdownAndConvertStatus_inlock(
str::stream() << message << ": " << _componentName << " is shutting down");
}
- if (!status.isOK()) {
- return Status(status.code(), message + ": " + status.reason());
- }
-
- return Status::OK();
+ return status.withContext(message);
}
Status AbstractAsyncComponent::_scheduleWorkAndSaveHandle_inlock(
@@ -179,9 +175,7 @@ Status AbstractAsyncComponent::_scheduleWorkAndSaveHandle_inlock(
}
auto result = _executor->scheduleWork(work);
if (!result.isOK()) {
- return Status(result.getStatus().code(),
- str::stream() << "failed to schedule work " << name << ": "
- << result.getStatus().reason());
+ return result.getStatus().withContext(str::stream() << "failed to schedule work " << name);
}
*handle = result.getValue();
return Status::OK();
@@ -202,10 +196,8 @@ Status AbstractAsyncComponent::_scheduleWorkAtAndSaveHandle_inlock(
}
auto result = _executor->scheduleWorkAt(when, work);
if (!result.isOK()) {
- return Status(
- result.getStatus().code(),
- str::stream() << "failed to schedule work " << name << " at " << when.toString() << ": "
- << result.getStatus().reason());
+ return result.getStatus().withContext(
+ str::stream() << "failed to schedule work " << name << " at " << when.toString());
}
*handle = result.getValue();
return Status::OK();
diff --git a/src/mongo/db/repl/abstract_async_component_test.cpp b/src/mongo/db/repl/abstract_async_component_test.cpp
index db0571fc940..5e762cc27b2 100644
--- a/src/mongo/db/repl/abstract_async_component_test.cpp
+++ b/src/mongo/db/repl/abstract_async_component_test.cpp
@@ -323,11 +323,11 @@ TEST_F(AbstractAsyncComponentTest,
auto newStatus =
component.checkForShutdownAndConvertStatus_forTest(statusToCallbackArgs(status), "mytask");
ASSERT_EQUALS(status.code(), newStatus.code());
- ASSERT_EQUALS("mytask: " + status.reason(), newStatus.reason());
+ ASSERT_EQUALS("mytask :: caused by :: " + status.reason(), newStatus.reason());
newStatus = component.checkForShutdownAndConvertStatus_forTest(status, "mytask");
ASSERT_EQUALS(status.code(), newStatus.code());
- ASSERT_EQUALS("mytask: " + status.reason(), newStatus.reason());
+ ASSERT_EQUALS("mytask :: caused by :: " + status.reason(), newStatus.reason());
}
TEST_F(AbstractAsyncComponentTest, CheckForShutdownAndConvertStatusPassesThroughSuccessfulStatus) {
diff --git a/src/mongo/db/repl/collection_cloner.cpp b/src/mongo/db/repl/collection_cloner.cpp
index 97e3ccf4f21..a24d728afc2 100644
--- a/src/mongo/db/repl/collection_cloner.cpp
+++ b/src/mongo/db/repl/collection_cloner.cpp
@@ -304,13 +304,9 @@ void CollectionCloner::_countCallback(
}
if (!args.response.status.isOK()) {
- _finishCallback({args.response.status.code(),
- str::stream() << "During count call on collection '" << _sourceNss.ns()
- << "' from "
- << _source.toString()
- << ", there was an error '"
- << args.response.status.reason()
- << "'"});
+ _finishCallback(args.response.status.withContext(
+ str::stream() << "Count call failed on collection '" << _sourceNss.ns() << "' from "
+ << _source.toString()));
return;
}
@@ -320,27 +316,20 @@ void CollectionCloner::_countCallback(
// Querying by a non-existing collection by UUID returns an error. Treat same as
// behavior of find by namespace and use count == 0.
} else if (!commandStatus.isOK()) {
- _finishCallback({commandStatus.code(),
- str::stream() << "During count call on collection '" << _sourceNss.ns()
- << "' from "
- << _source.toString()
- << ", there was a command error '"
- << commandStatus.reason()
- << "'"});
+ _finishCallback(commandStatus.withContext(
+ str::stream() << "Count call failed on collection '" << _sourceNss.ns() << "' from "
+ << _source.toString()));
return;
} else {
auto countStatus = bsonExtractIntegerField(
args.response.data, kCountResponseDocumentCountFieldName, &count);
if (!countStatus.isOK()) {
- _finishCallback({countStatus.code(),
- str::stream()
- << "There was an error parsing document count from count "
- "command result on collection "
- << _sourceNss.ns()
- << " from "
- << _source.toString()
- << ": "
- << countStatus.reason()});
+ _finishCallback(countStatus.withContext(
+ str::stream() << "There was an error parsing document count from count "
+ "command result on collection "
+ << _sourceNss.ns()
+ << " from "
+ << _source.toString()));
return;
}
}
@@ -391,14 +380,8 @@ void CollectionCloner::_listIndexesCallback(const Fetcher::QueryResponseStatus&
return;
};
if (!fetchResult.isOK()) {
- Status newStatus{fetchResult.getStatus().code(),
- str::stream() << "During listIndexes call on collection '"
- << _sourceNss.ns()
- << "' there was an error '"
- << fetchResult.getStatus().reason()
- << "'"};
-
- _finishCallback(newStatus);
+ _finishCallback(fetchResult.getStatus().withContext(
+ str::stream() << "listIndexes call failed on collection '" << _sourceNss.ns() << "'"));
return;
}
@@ -553,14 +536,10 @@ Status CollectionCloner::_parseCursorResponse(BSONObj response,
case Find: {
StatusWith<CursorResponse> findResponse = CursorResponse::parseFromBSON(response);
if (!findResponse.isOK()) {
- Status errorStatus{findResponse.getStatus().code(),
- str::stream()
- << "While parsing the 'find' query against collection '"
- << _sourceNss.ns()
- << "' there was an error '"
- << findResponse.getStatus().reason()
- << "'"};
- return errorStatus;
+ return findResponse.getStatus().withContext(
+ str::stream() << "Error parsing the 'find' query against collection '"
+ << _sourceNss.ns()
+ << "'");
}
cursors->push_back(std::move(findResponse.getValue()));
break;
@@ -619,12 +598,8 @@ void CollectionCloner::_establishCollectionCursorsCallback(const RemoteCommandCa
return;
}
if (!commandStatus.isOK()) {
- Status newStatus{commandStatus.code(),
- str::stream() << "While querying collection '" << _sourceNss.ns()
- << "' there was an error '"
- << commandStatus.reason()
- << "'"};
- _finishCallback(commandStatus);
+ _finishCallback(commandStatus.withContext(
+ str::stream() << "Error querying collection '" << _sourceNss.ns() << "'"));
return;
}
@@ -743,11 +718,8 @@ void CollectionCloner::_handleARMResultsCallback(
if (!cbd.status.isOK()) {
// Wait for active inserts to complete.
waitForDbWorker();
- Status newStatus{cbd.status.code(),
- str::stream() << "While querying collection '" << _sourceNss.ns()
- << "' there was an error '"
- << cbd.status.reason()
- << "'"};
+ Status newStatus = cbd.status.withContext(str::stream() << "Error querying collection '"
+ << _sourceNss.ns());
setResultAndCancelRemainingWork(onCompletionGuard, cbd.status);
return;
}
@@ -781,11 +753,8 @@ void CollectionCloner::_handleARMResultsCallback(
_insertDocumentsCallback(cbd, lastBatch, onCompletionGuard);
});
if (!scheduleResult.isOK()) {
- Status newStatus{scheduleResult.getStatus().code(),
- str::stream() << "While cloning collection '" << _sourceNss.ns()
- << "' there was an error '"
- << scheduleResult.getStatus().reason()
- << "'"};
+ Status newStatus = scheduleResult.getStatus().withContext(
+ str::stream() << "Error cloning collection '" << _sourceNss.ns() << "'");
setResultAndCancelRemainingWork(onCompletionGuard, scheduleResult.getStatus());
return;
}
diff --git a/src/mongo/db/repl/database_cloner.cpp b/src/mongo/db/repl/database_cloner.cpp
index 7cdb4550002..71d11c1436a 100644
--- a/src/mongo/db/repl/database_cloner.cpp
+++ b/src/mongo/db/repl/database_cloner.cpp
@@ -271,13 +271,10 @@ void DatabaseCloner::_listCollectionsCallback(const StatusWith<Fetcher::QueryRes
Fetcher::NextAction* nextAction,
BSONObjBuilder* getMoreBob) {
if (!result.isOK()) {
- _finishCallback({result.getStatus().code(),
- str::stream() << "While issuing listCollections on db '" << _dbname
- << "' (host:"
- << _source.toString()
- << ") there was an error '"
- << result.getStatus().reason()
- << "'"});
+ _finishCallback(result.getStatus().withContext(
+ str::stream() << "Error issuing listCollections on db '" << _dbname << "' (host:"
+ << _source.toString()
+ << ")"));
return;
}
@@ -438,11 +435,8 @@ void DatabaseCloner::_collectionClonerCallback(const Status& status, const Names
UniqueLock lk(_mutex);
if (!status.isOK()) {
- newStatus = {status.code(),
- str::stream() << "While cloning collection '" << nss.toString()
- << "' there was an error '"
- << status.reason()
- << "'"};
+ newStatus = status.withContext(
+ str::stream() << "Error cloning collection '" << nss.toString() << "'");
_failedNamespaces.push_back({newStatus, nss});
}
++_stats.clonedCollections;
diff --git a/src/mongo/db/repl/initial_syncer.cpp b/src/mongo/db/repl/initial_syncer.cpp
index 59e7908c916..4ecc0887078 100644
--- a/src/mongo/db/repl/initial_syncer.cpp
+++ b/src/mongo/db/repl/initial_syncer.cpp
@@ -1366,9 +1366,7 @@ Status InitialSyncer::_scheduleWorkAndSaveHandle_inlock(
}
auto result = _exec->scheduleWork(work);
if (!result.isOK()) {
- return Status(result.getStatus().code(),
- str::stream() << "failed to schedule work " << name << ": "
- << result.getStatus().reason());
+ return result.getStatus().withContext(str::stream() << "failed to schedule work " << name);
}
*handle = result.getValue();
return Status::OK();
@@ -1388,10 +1386,8 @@ Status InitialSyncer::_scheduleWorkAtAndSaveHandle_inlock(
}
auto result = _exec->scheduleWorkAt(when, work);
if (!result.isOK()) {
- return Status(
- result.getStatus().code(),
- str::stream() << "failed to schedule work " << name << " at " << when.toString() << ": "
- << result.getStatus().reason());
+ return result.getStatus().withContext(
+ str::stream() << "failed to schedule work " << name << " at " << when.toString());
}
*handle = result.getValue();
return Status::OK();
diff --git a/src/mongo/db/repl/repl_set_config_checks.cpp b/src/mongo/db/repl/repl_set_config_checks.cpp
index e98689a8190..4e535d12313 100644
--- a/src/mongo/db/repl/repl_set_config_checks.cpp
+++ b/src/mongo/db/repl/repl_set_config_checks.cpp
@@ -275,10 +275,8 @@ StatusWith<int> validateConfigForInitiate(ReplicationCoordinatorExternalState* e
status = newConfig.checkIfWriteConcernCanBeSatisfied(newConfig.getDefaultWriteConcern());
if (!status.isOK()) {
- return StatusWith<int>(
- status.code(),
- str::stream() << "Found invalid default write concern in 'getLastErrorDefaults' field"
- << causedBy(status.reason()));
+ return status.withContext(
+ "Found invalid default write concern in 'getLastErrorDefaults' field");
}
status = validateArbiterPriorities(newConfig);
@@ -307,10 +305,8 @@ StatusWith<int> validateConfigForReconfig(ReplicationCoordinatorExternalState* e
status = newConfig.checkIfWriteConcernCanBeSatisfied(newConfig.getDefaultWriteConcern());
if (!status.isOK()) {
- return StatusWith<int>(
- status.code(),
- str::stream() << "Found invalid default write concern in 'getLastErrorDefaults' field"
- << causedBy(status.reason()));
+ return status.withContext(
+ "Found invalid default write concern in 'getLastErrorDefaults' field");
}
status = validateOldAndNewConfigsCompatible(oldConfig, newConfig);
diff --git a/src/mongo/db/repl/repl_set_heartbeat_response.cpp b/src/mongo/db/repl/repl_set_heartbeat_response.cpp
index dd99ffdc61b..6aae6929de6 100644
--- a/src/mongo/db/repl/repl_set_heartbeat_response.cpp
+++ b/src/mongo/db/repl/repl_set_heartbeat_response.cpp
@@ -38,6 +38,7 @@
#include "mongo/bson/util/bson_extract.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/repl/bson_extract_optime.h"
+#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
@@ -49,8 +50,6 @@ namespace {
const std::string kConfigFieldName = "config";
const std::string kConfigVersionFieldName = "v";
const std::string kElectionTimeFieldName = "electionTime";
-const std::string kErrMsgFieldName = "errmsg";
-const std::string kErrorCodeFieldName = "code";
const std::string kHasDataFieldName = "hasData";
const std::string kHasStateDisagreementFieldName = "stateDisagreement";
const std::string kHbMessageFieldName = "hbmsg";
@@ -158,18 +157,11 @@ Status ReplSetHeartbeatResponse::initialize(const BSONObj& doc, long long term)
_setName = replSetNameElement.String();
}
- if (_setName.empty() && !doc[kOkFieldName].trueValue()) {
- std::string errMsg = doc[kErrMsgFieldName].str();
-
- BSONElement errCodeElem = doc[kErrorCodeFieldName];
- if (errCodeElem.ok()) {
- if (!errCodeElem.isNumber())
- return Status(ErrorCodes::BadValue, "Error code is not a number!");
-
- int errorCode = errCodeElem.numberInt();
- return Status(ErrorCodes::Error(errorCode), errMsg);
+ if (_setName.empty()) {
+ auto status = getStatusFromCommandResult(doc);
+ if (!status.isOK()) {
+ return status;
}
- return Status(ErrorCodes::UnknownError, errMsg);
}
const BSONElement hasDataElement = doc[kHasDataFieldName];
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
index 0f24f4b2a2c..92cf503c0c3 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
@@ -716,12 +716,10 @@ void ReplicationCoordinatorExternalStateImpl::_shardingOnTransitionToPrimaryHook
return;
}
- fassertFailedWithStatus(40184,
- Status(status.code(),
- str::stream()
- << "Failed to initialize config database on config "
- "server's first transition to primary"
- << causedBy(status)));
+ fassertFailedWithStatus(
+ 40184,
+ status.withContext("Failed to initialize config database on config server's "
+ "first transition to primary"));
}
if (status.isOK()) {
diff --git a/src/mongo/db/repl/resync.cpp b/src/mongo/db/repl/resync.cpp
index 2b7ba1b90b7..13cc0ffbbd9 100644
--- a/src/mongo/db/repl/resync.cpp
+++ b/src/mongo/db/repl/resync.cpp
@@ -107,10 +107,8 @@ public:
if (!status.isOK()) {
return CommandHelpers::appendCommandStatus(
result,
- Status(status.code(),
- str::stream()
- << "Failed to transition to STARTUP2 state to perform resync: "
- << status.reason()));
+ status.withContext(
+ "Failed to transition to STARTUP2 state to perform resync"));
}
}
uassertStatusOKWithLocation(replCoord->resyncData(opCtx, waitForResync), "resync", 0);
diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
index 0bb177b8a47..c00455f05cd 100644
--- a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
+++ b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
@@ -85,10 +85,8 @@ StatusWith<DistributionStatus> createCollectionDistributionStatus(
const auto swCollectionTags =
Grid::get(opCtx)->catalogClient()->getTagsForCollection(opCtx, chunkMgr->getns());
if (!swCollectionTags.isOK()) {
- return {swCollectionTags.getStatus().code(),
- str::stream() << "Unable to load tags for collection " << chunkMgr->getns()
- << " due to "
- << swCollectionTags.getStatus().toString()};
+ return swCollectionTags.getStatus().withContext(
+ str::stream() << "Unable to load tags for collection " << chunkMgr->getns());
}
const auto& collectionTags = swCollectionTags.getValue();
diff --git a/src/mongo/db/s/balancer/cluster_statistics_impl.cpp b/src/mongo/db/s/balancer/cluster_statistics_impl.cpp
index af42b546d61..f0f50c1c09f 100644
--- a/src/mongo/db/s/balancer/cluster_statistics_impl.cpp
+++ b/src/mongo/db/s/balancer/cluster_statistics_impl.cpp
@@ -128,11 +128,9 @@ StatusWith<vector<ShardStatistics>> ClusterStatisticsImpl::getStats(OperationCon
if (!shardSizeStatus.isOK()) {
const auto& status = shardSizeStatus.getStatus();
- return {status.code(),
- str::stream() << "Unable to obtain shard utilization information for "
- << shard.getName()
- << " due to "
- << status.reason()};
+ return status.withContext(str::stream()
+ << "Unable to obtain shard utilization information for "
+ << shard.getName());
}
string mongoDVersion;
diff --git a/src/mongo/db/s/balancer/migration_manager.cpp b/src/mongo/db/s/balancer/migration_manager.cpp
index 071e7363c47..5eb1d625b3e 100644
--- a/src/mongo/db/s/balancer/migration_manager.cpp
+++ b/src/mongo/db/s/balancer/migration_manager.cpp
@@ -497,11 +497,9 @@ void MigrationManager::_schedule(WithLock lock,
DistLockManager::kSingleLockAttemptTimeout);
if (!statusWithDistLockHandle.isOK()) {
- migration.completionNotification->set(
- Status(statusWithDistLockHandle.getStatus().code(),
- stream() << "Could not acquire collection lock for " << nss.ns()
- << " to migrate chunks, due to "
- << statusWithDistLockHandle.getStatus().reason()));
+ migration.completionNotification->set(statusWithDistLockHandle.getStatus().withContext(
+ stream() << "Could not acquire collection lock for " << nss.ns()
+ << " to migrate chunks"));
return;
}
diff --git a/src/mongo/db/s/balancer/scoped_migration_request.cpp b/src/mongo/db/s/balancer/scoped_migration_request.cpp
index fda4b5dbbfc..d55855b14b8 100644
--- a/src/mongo/db/s/balancer/scoped_migration_request.cpp
+++ b/src/mongo/db/s/balancer/scoped_migration_request.cpp
@@ -115,13 +115,11 @@ StatusWith<ScopedMigrationRequest> ScopedMigrationRequest::writeMigration(
BSONObj(),
boost::none);
if (!statusWithMigrationQueryResult.isOK()) {
- return {statusWithMigrationQueryResult.getStatus().code(),
- str::stream()
- << "Failed to verify whether conflicting migration is in "
- << "progress for migration '"
- << redact(migrateInfo.toString())
- << "' while trying to query config.migrations."
- << causedBy(redact(statusWithMigrationQueryResult.getStatus()))};
+ return statusWithMigrationQueryResult.getStatus().withContext(
+ str::stream() << "Failed to verify whether conflicting migration is in "
+ << "progress for migration '"
+ << redact(migrateInfo.toString())
+ << "' while trying to query config.migrations.");
}
if (statusWithMigrationQueryResult.getValue().docs.empty()) {
// The document that caused the DuplicateKey error is no longer in the collection,
@@ -133,14 +131,13 @@ StatusWith<ScopedMigrationRequest> ScopedMigrationRequest::writeMigration(
BSONObj activeMigrationBSON = statusWithMigrationQueryResult.getValue().docs.front();
auto statusWithActiveMigration = MigrationType::fromBSON(activeMigrationBSON);
if (!statusWithActiveMigration.isOK()) {
- return {statusWithActiveMigration.getStatus().code(),
- str::stream() << "Failed to verify whether conflicting migration is in "
- << "progress for migration '"
- << redact(migrateInfo.toString())
- << "' while trying to parse active migration document '"
- << redact(activeMigrationBSON.toString())
- << "'."
- << causedBy(redact(statusWithActiveMigration.getStatus()))};
+ return statusWithActiveMigration.getStatus().withContext(
+ str::stream() << "Failed to verify whether conflicting migration is in "
+ << "progress for migration '"
+ << redact(migrateInfo.toString())
+ << "' while trying to parse active migration document '"
+ << redact(activeMigrationBSON.toString())
+ << "'.");
}
MigrateInfo activeMigrateInfo = statusWithActiveMigration.getValue().toMigrateInfo();
diff --git a/src/mongo/db/s/chunk_splitter.cpp b/src/mongo/db/s/chunk_splitter.cpp
index 8c4e0a859e8..caea3e11fed 100644
--- a/src/mongo/db/s/chunk_splitter.cpp
+++ b/src/mongo/db/s/chunk_splitter.cpp
@@ -99,12 +99,7 @@ Status splitChunkAtMultiplePoints(OperationContext* opCtx,
shardId.toString(),
collectionVersion.epoch());
- if (!status.isOK()) {
- return {status.getStatus().code(),
- str::stream() << "split failed due to " << status.getStatus().reason()};
- }
-
- return status.getStatus();
+ return status.getStatus().withContext("split failed");
}
/**
diff --git a/src/mongo/db/s/collection_range_deleter.cpp b/src/mongo/db/s/collection_range_deleter.cpp
index c8afafcf431..8de246d264a 100644
--- a/src/mongo/db/s/collection_range_deleter.cpp
+++ b/src/mongo/db/s/collection_range_deleter.cpp
@@ -188,10 +188,8 @@ boost::optional<Date_t> CollectionRangeDeleter::cleanUpNextRange(
stdx::lock_guard<stdx::mutex> scopedLock(css->_metadataManager->_managerLock);
css->_metadataManager->_clearAllCleanups(
scopedLock,
- {e.code(),
- str::stream() << "cannot push startRangeDeletion record to Op Log,"
- " abandoning scheduled range deletions: "
- << e.what()});
+ e.toStatus("cannot push startRangeDeletion record to Op Log,"
+ " abandoning scheduled range deletions"));
return boost::none;
}
}
diff --git a/src/mongo/db/s/collection_sharding_state.cpp b/src/mongo/db/s/collection_sharding_state.cpp
index 6f0f3cf4a39..9b554195cd7 100644
--- a/src/mongo/db/s/collection_sharding_state.cpp
+++ b/src/mongo/db/s/collection_sharding_state.cpp
@@ -263,11 +263,9 @@ Status CollectionShardingState::waitForClean(OperationContext* opCtx,
Status result = stillScheduled->waitStatus(opCtx);
if (!result.isOK()) {
- return {result.code(),
- str::stream() << "Failed to delete orphaned " << nss.ns() << " range "
- << orphanRange.toString()
- << " due to "
- << result.reason()};
+ return result.withContext(str::stream() << "Failed to delete orphaned " << nss.ns()
+ << " range "
+ << orphanRange.toString());
}
}
diff --git a/src/mongo/db/s/config/configsvr_drop_database_command.cpp b/src/mongo/db/s/config/configsvr_drop_database_command.cpp
index e024edb997a..7f1db2ca2f8 100644
--- a/src/mongo/db/s/config/configsvr_drop_database_command.cpp
+++ b/src/mongo/db/s/config/configsvr_drop_database_command.cpp
@@ -168,12 +168,8 @@ public:
DatabaseType::ConfigNS,
BSON(DatabaseType::name(dbname)),
ShardingCatalogClient::kMajorityWriteConcern);
- if (!status.isOK()) {
- uassertStatusOK({status.code(),
- str::stream() << "Could not remove database '" << dbname
- << "' from metadata due to "
- << status.reason()});
- }
+ uassertStatusOKWithContext(
+ status, str::stream() << "Could not remove database '" << dbname << "' from metadata");
catalogClient
->logChange(opCtx,
diff --git a/src/mongo/db/s/config/configsvr_move_primary_command.cpp b/src/mongo/db/s/config/configsvr_move_primary_command.cpp
index 3ccde97a393..e9042865c64 100644
--- a/src/mongo/db/s/config/configsvr_move_primary_command.cpp
+++ b/src/mongo/db/s/config/configsvr_move_primary_command.cpp
@@ -170,12 +170,12 @@ public:
const auto toShard = [&]() {
auto toShardStatus = shardRegistry->getShard(opCtx, to);
if (!toShardStatus.isOK()) {
- const std::string msg(
+ log() << "Could not move database '" << dbname << "' to shard '" << to
+ << causedBy(toShardStatus.getStatus());
+ uassertStatusOKWithContext(
+ toShardStatus.getStatus(),
str::stream() << "Could not move database '" << dbname << "' to shard '" << to
- << "' due to "
- << toShardStatus.getStatus().reason());
- log() << msg;
- uasserted(toShardStatus.getStatus().code(), msg);
+ << "'");
}
return toShardStatus.getValue();
diff --git a/src/mongo/db/s/merge_chunks_command.cpp b/src/mongo/db/s/merge_chunks_command.cpp
index aa36b6dfdf9..da9755d5383 100644
--- a/src/mongo/db/s/merge_chunks_command.cpp
+++ b/src/mongo/db/s/merge_chunks_command.cpp
@@ -89,13 +89,12 @@ Status mergeChunks(OperationContext* opCtx,
opCtx, nss.ns(), whyMessage, DistLockManager::kSingleLockAttemptTimeout);
if (!scopedDistLock.isOK()) {
- std::string errmsg = stream() << "could not acquire collection lock for " << nss.ns()
- << " to merge chunks in [" << redact(minKey) << ", "
- << redact(maxKey) << ")"
- << causedBy(scopedDistLock.getStatus());
+ std::string context = stream() << "could not acquire collection lock for " << nss.ns()
+ << " to merge chunks in [" << redact(minKey) << ", "
+ << redact(maxKey) << ")";
- warning() << errmsg;
- return Status(scopedDistLock.getStatus().code(), errmsg);
+ warning() << context << causedBy(scopedDistLock.getStatus());
+ return scopedDistLock.getStatus().withContext(context);
}
auto const shardingState = ShardingState::get(opCtx);
@@ -107,12 +106,11 @@ Status mergeChunks(OperationContext* opCtx,
ChunkVersion unusedShardVersion;
Status refreshStatus = shardingState->refreshMetadataNow(opCtx, nss, &unusedShardVersion);
if (!refreshStatus.isOK()) {
- std::string errmsg = str::stream()
- << "could not merge chunks, failed to refresh metadata for " << nss.ns()
- << causedBy(redact(refreshStatus));
+ std::string context = str::stream()
+ << "could not merge chunks, failed to refresh metadata for " << nss.ns();
- warning() << errmsg;
- return Status(refreshStatus.code(), errmsg);
+ warning() << context << causedBy(redact(refreshStatus));
+ return refreshStatus.withContext(context);
}
const auto metadata = [&] {
@@ -272,12 +270,11 @@ Status mergeChunks(OperationContext* opCtx,
refreshStatus = shardingState->refreshMetadataNow(opCtx, nss, &unusedShardVersion);
if (!refreshStatus.isOK()) {
- std::string errmsg = str::stream() << "failed to refresh metadata for merge chunk ["
- << redact(minKey) << "," << redact(maxKey) << ") "
- << redact(refreshStatus);
+ std::string context = str::stream() << "failed to refresh metadata for merge chunk ["
+ << redact(minKey) << "," << redact(maxKey) << ") ";
- warning() << errmsg;
- return Status(refreshStatus.code(), errmsg);
+ warning() << context << redact(refreshStatus);
+ return refreshStatus.withContext(context);
}
}
@@ -300,13 +297,9 @@ Status mergeChunks(OperationContext* opCtx,
LOG(1) << "mergeChunk [" << redact(minKey) << "," << redact(maxKey)
<< ") has already been committed.";
} else if (!commandStatus.isOK()) {
- std::string errmsg = str::stream() << "Failed to commit chunk merge"
- << causedBy(redact(commandStatus));
- return Status(commandStatus.code(), errmsg);
+ return commandStatus.withContext("Failed to commit chunk merge");
} else if (!writeConcernStatus.isOK()) {
- std::string errmsg = str::stream() << "Failed to commit chunk merge"
- << causedBy(redact(writeConcernStatus));
- return Status(writeConcernStatus.code(), errmsg);
+ return writeConcernStatus.withContext("Failed to commit chunk merge");
}
return Status::OK();
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
index 42bb184d83a..95914f9c4b0 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
@@ -266,10 +266,8 @@ Status MigrationChunkClonerSourceLegacy::awaitUntilCriticalSectionIsAppropriate(
auto responseStatus = _callRecipient(
createRequestWithSessionId(kRecvChunkStatus, _args.getNss(), _sessionId));
if (!responseStatus.isOK()) {
- return {responseStatus.getStatus().code(),
- str::stream()
- << "Failed to contact recipient shard to monitor data transfer due to "
- << responseStatus.getStatus().toString()};
+ return responseStatus.getStatus().withContext(
+ "Failed to contact recipient shard to monitor data transfer");
}
const BSONObj& res = responseStatus.getValue();
diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp
index 7c06445204e..669a3678862 100644
--- a/src/mongo/db/s/migration_destination_manager.cpp
+++ b/src/mongo/db/s/migration_destination_manager.cpp
@@ -1071,10 +1071,9 @@ CollectionShardingState::CleanupNotification MigrationDestinationManager::_noteP
// Start clearing any leftovers that would be in the new chunk
auto notification = css->beginReceive(range);
if (notification.ready() && !notification.waitStatus(opCtx).isOK()) {
- return Status{notification.waitStatus(opCtx).code(),
- str::stream() << "Collection " << nss.ns() << " range " << range.toString()
- << " migration aborted: "
- << notification.waitStatus(opCtx).reason()};
+ return notification.waitStatus(opCtx).withContext(
+ str::stream() << "Collection " << nss.ns() << " range " << range.toString()
+ << " migration aborted");
}
return notification;
}
diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp
index 50d06dd59af..73ca446dc08 100644
--- a/src/mongo/db/s/migration_source_manager.cpp
+++ b/src/mongo/db/s/migration_source_manager.cpp
@@ -149,12 +149,9 @@ MigrationSourceManager::MigrationSourceManager(OperationContext* opCtx,
auto const shardingState = ShardingState::get(opCtx);
ChunkVersion unusedShardVersion;
- Status refreshStatus =
- shardingState->refreshMetadataNow(opCtx, getNss(), &unusedShardVersion);
- uassert(refreshStatus.code(),
- str::stream() << "cannot start migrate of chunk " << _args.toString() << " due to "
- << refreshStatus.reason(),
- refreshStatus.isOK());
+ uassertStatusOKWithContext(
+ shardingState->refreshMetadataNow(opCtx, getNss(), &unusedShardVersion),
+ str::stream() << "cannot start migrate of chunk " << _args.toString());
}
// Snapshot the committed metadata from the time the migration starts
@@ -201,12 +198,9 @@ MigrationSourceManager::MigrationSourceManager(OperationContext* opCtx,
chunkToMove.setMin(_args.getMinKey());
chunkToMove.setMax(_args.getMaxKey());
- Status chunkValidateStatus = collectionMetadata->checkChunkIsValid(chunkToMove);
- uassert(chunkValidateStatus.code(),
- str::stream() << "Unable to move chunk with arguments '" << redact(_args.toString())
- << "' due to error "
- << redact(chunkValidateStatus.reason()),
- chunkValidateStatus.isOK());
+ uassertStatusOKWithContext(collectionMetadata->checkChunkIsValid(chunkToMove),
+ str::stream() << "Unable to move chunk with arguments '"
+ << redact(_args.toString()));
_collectionEpoch = collectionVersion.epoch();
_collectionUuid = std::get<1>(collectionMetadataAndUUID);
@@ -363,8 +357,7 @@ Status MigrationSourceManager::commitChunkOnRecipient(OperationContext* opCtx) {
}
if (!commitCloneStatus.isOK()) {
- return {commitCloneStatus.code(),
- str::stream() << "commit clone failed due to " << commitCloneStatus.toString()};
+ return commitCloneStatus.withContext("commit clone failed");
}
_state = kCloneCompleted;
@@ -474,13 +467,12 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* opC
fassertStatusOK(
40137,
- {status.code(),
- str::stream() << "Failed to commit migration for chunk " << _args.toString()
- << " due to "
- << redact(migrationCommitStatus)
- << ". Updating the optime with a write before refreshing the "
- << "metadata also failed with "
- << redact(status)});
+ status.withContext(
+ str::stream() << "Failed to commit migration for chunk " << _args.toString()
+ << " due to "
+ << redact(migrationCommitStatus)
+ << ". Updating the optime with a write before refreshing the "
+ << "metadata also failed"));
}
// Do a best effort attempt to incrementally refresh the metadata before leaving the critical
@@ -503,13 +495,11 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* opC
// migrationCommitStatus may be OK or an error. The migration is considered a success at
// this point if the commit succeeded. The metadata refresh either occurred or the metadata
// was safely cleared.
- return {migrationCommitStatus.code(),
- str::stream() << "Orphaned range not cleaned up. Failed to refresh metadata after"
- " migration commit due to '"
- << refreshStatus.toString()
- << "', and commit failed due to '"
- << migrationCommitStatus.toString()
- << "'"};
+ return migrationCommitStatus.withContext(
+ str::stream() << "Orphaned range not cleaned up. Failed to refresh metadata after"
+ " migration commit due to '"
+ << refreshStatus.toString()
+ << "' after commit failed");
}
auto refreshedMetadata = [&] {
@@ -526,9 +516,7 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* opC
if (refreshedMetadata->keyBelongsToMe(_args.getMinKey())) {
// The chunk modification was not applied, so report the original error
- return {migrationCommitStatus.code(),
- str::stream() << "Chunk move was not successful due to "
- << migrationCommitStatus.reason()};
+ return migrationCommitStatus.withContext("Chunk move was not successful");
}
// Migration succeeded
diff --git a/src/mongo/db/s/shard_metadata_util.cpp b/src/mongo/db/s/shard_metadata_util.cpp
index 6d929554f30..88a3d761cee 100644
--- a/src/mongo/db/s/shard_metadata_util.cpp
+++ b/src/mongo/db/s/shard_metadata_util.cpp
@@ -170,10 +170,8 @@ StatusWith<ShardCollectionType> readShardCollectionsEntry(OperationContext* opCt
return statusWithCollectionEntry.getValue();
} catch (const DBException& ex) {
- return {ex.toStatus().code(),
- str::stream() << "Failed to read the '" << nss.ns()
- << "' entry locally from config.collections"
- << causedBy(ex.toStatus())};
+ return ex.toStatus(str::stream() << "Failed to read the '" << nss.ns()
+ << "' entry locally from config.collections");
}
}
@@ -248,10 +246,8 @@ StatusWith<std::vector<ChunkType>> readShardChunks(OperationContext* opCtx,
BSONObj document = cursor->nextSafe().getOwned();
auto statusWithChunk = ChunkType::fromShardBSON(document, epoch);
if (!statusWithChunk.isOK()) {
- return {statusWithChunk.getStatus().code(),
- str::stream() << "Failed to parse chunk '" << document.toString()
- << "' due to "
- << statusWithChunk.getStatus().reason()};
+ return statusWithChunk.getStatus().withContext(
+ str::stream() << "Failed to parse chunk '" << document.toString() << "'");
}
chunks.push_back(std::move(statusWithChunk.getValue()));
diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
index 4d1abdf658a..4f831c4382f 100644
--- a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
+++ b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
@@ -660,10 +660,9 @@ Status ShardServerCatalogCacheLoader::_ensureMajorityPrimaryAndScheduleTask(
OperationContext* opCtx, const NamespaceString& nss, Task task) {
Status linearizableReadStatus = waitForLinearizableReadConcern(opCtx);
if (!linearizableReadStatus.isOK()) {
- return {linearizableReadStatus.code(),
- str::stream() << "Unable to schedule routing table update because this is not the"
- << " majority primary and may not have the latest data. Error: "
- << linearizableReadStatus.reason()};
+ return linearizableReadStatus.withContext(
+ "Unable to schedule routing table update because this is not the majority primary and "
+ "may not have the latest data.");
}
stdx::lock_guard<stdx::mutex> lock(_mutex);
@@ -747,31 +746,22 @@ void ShardServerCatalogCacheLoader::_updatePersistedMetadata(OperationContext* o
// Check if this is a drop task
if (task.dropped) {
// The namespace was dropped. The persisted metadata for the collection must be cleared.
- Status status = dropChunksAndDeleteCollectionsEntry(opCtx, nss);
- uassert(status.code(),
- str::stream() << "Failed to clear persisted chunk metadata for collection '"
- << nss.ns()
- << "' due to '"
- << status.reason()
- << "'. Will be retried.",
- status.isOK());
+ uassertStatusOKWithContext(
+ dropChunksAndDeleteCollectionsEntry(opCtx, nss),
+ str::stream() << "Failed to clear persisted chunk metadata for collection '" << nss.ns()
+ << "'. Will be retried.");
return;
}
- Status status =
- persistCollectionAndChangedChunks(opCtx, nss, task.collectionAndChangedChunks.get());
-
- uassert(status.code(),
- str::stream() << "Failed to update the persisted chunk metadata for collection '"
- << nss.ns()
- << "' from '"
- << task.minQueryVersion.toString()
- << "' to '"
- << task.maxQueryVersion.toString()
- << "' due to '"
- << status.reason()
- << "'. Will be retried.",
- status.isOK());
+ uassertStatusOKWithContext(
+ persistCollectionAndChangedChunks(opCtx, nss, task.collectionAndChangedChunks.get()),
+ str::stream() << "Failed to update the persisted chunk metadata for collection '"
+ << nss.ns()
+ << "' from '"
+ << task.minQueryVersion.toString()
+ << "' to '"
+ << task.maxQueryVersion.toString()
+ << "'. Will be retried.");
LOG(1) << "Successfully updated persisted chunk metadata for collection '" << nss << "' from '"
<< task.minQueryVersion << "' to collection version '" << task.maxQueryVersion << "'.";
diff --git a/src/mongo/db/s/sharding_state.cpp b/src/mongo/db/s/sharding_state.cpp
index d4025ea9e11..81a60c2be60 100644
--- a/src/mongo/db/s/sharding_state.cpp
+++ b/src/mongo/db/s/sharding_state.cpp
@@ -277,11 +277,8 @@ Status ShardingState::initializeFromShardIdentity(OperationContext* opCtx,
Status validationStatus = shardIdentity.validate();
if (!validationStatus.isOK()) {
- return Status(
- validationStatus.code(),
- str::stream()
- << "Invalid shard identity document found when initializing sharding state: "
- << validationStatus.reason());
+ return validationStatus.withContext(
+ "Invalid shard identity document found when initializing sharding state");
}
log() << "initializing sharding state with: " << shardIdentity;
diff --git a/src/mongo/db/s/split_chunk.cpp b/src/mongo/db/s/split_chunk.cpp
index 7a34132f7ad..181fa43c1fa 100644
--- a/src/mongo/db/s/split_chunk.cpp
+++ b/src/mongo/db/s/split_chunk.cpp
@@ -144,10 +144,10 @@ StatusWith<boost::optional<ChunkRange>> splitChunk(OperationContext* opCtx,
auto scopedDistLock = Grid::get(opCtx)->catalogClient()->getDistLockManager()->lock(
opCtx, nss.ns(), whyMessage, DistLockManager::kSingleLockAttemptTimeout);
if (!scopedDistLock.isOK()) {
- errmsg = str::stream() << "could not acquire collection lock for " << nss.toString()
- << " to split chunk " << chunkRange.toString() << " "
- << causedBy(scopedDistLock.getStatus());
- return {scopedDistLock.getStatus().code(), errmsg};
+ return scopedDistLock.getStatus().withContext(
+ str::stream() << "could not acquire collection lock for " << nss.toString()
+ << " to split chunk "
+ << chunkRange.toString());
}
// If the shard key is hashed, then we must make sure that the split points are of type
@@ -220,7 +220,7 @@ StatusWith<boost::optional<ChunkRange>> splitChunk(OperationContext* opCtx,
<< refreshStatus.toString();
warning() << redact(errmsg);
- return {errorStatus.code(), errmsg};
+ return errorStatus.withReason(errmsg);
}
}
diff --git a/src/mongo/db/session_catalog.cpp b/src/mongo/db/session_catalog.cpp
index 30b1ddfeb8a..0716dd959f9 100644
--- a/src/mongo/db/session_catalog.cpp
+++ b/src/mongo/db/session_catalog.cpp
@@ -134,11 +134,11 @@ void SessionCatalog::onStepUp(OperationContext* opCtx) {
return;
}
- uasserted(status.code(),
- str::stream() << "Failed to create the "
- << NamespaceString::kSessionTransactionsTableNamespace.ns()
- << " collection due to "
- << status.reason());
+ uassertStatusOKWithContext(status,
+ str::stream()
+ << "Failed to create the "
+ << NamespaceString::kSessionTransactionsTableNamespace.ns()
+ << " collection");
}
ScopedCheckedOutSession SessionCatalog::checkOutSession(OperationContext* opCtx) {
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp
index 941c98fe30e..0c4dc874e84 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp
@@ -271,7 +271,7 @@ Status WiredTigerUtil::checkTableCreationOptions(const BSONElement& configElem)
errorMsg << ". " << error;
}
errorMsg << ".";
- return {status.code(), errorMsg.str()};
+ return status.withReason(errorMsg.stringData());
}
return Status::OK();
}
diff --git a/src/mongo/db/views/view_catalog.cpp b/src/mongo/db/views/view_catalog.cpp
index 3809ca0b94c..8c59710941e 100644
--- a/src/mongo/db/views/view_catalog.cpp
+++ b/src/mongo/db/views/view_catalog.cpp
@@ -177,10 +177,11 @@ Status ViewCatalog::_upsertIntoGraph(OperationContext* opCtx, const ViewDefiniti
// will also return the set of involved namespaces.
auto pipelineStatus = _validatePipeline_inlock(opCtx, viewDef);
if (!pipelineStatus.isOK()) {
- uassert(pipelineStatus.getStatus().code(),
- str::stream() << "Invalid pipeline for view " << viewDef.name().ns() << "; "
- << pipelineStatus.getStatus().reason(),
- !needsValidation);
+ if (needsValidation) {
+ uassertStatusOKWithContext(pipelineStatus.getStatus(),
+ str::stream() << "Invalid pipeline for view "
+ << viewDef.name().ns());
+ }
return pipelineStatus.getStatus();
}
diff --git a/src/mongo/executor/async_secure_stream.cpp b/src/mongo/executor/async_secure_stream.cpp
index 0c7969024f7..2c9dc268205 100644
--- a/src/mongo/executor/async_secure_stream.cpp
+++ b/src/mongo/executor/async_secure_stream.cpp
@@ -100,11 +100,15 @@ void AsyncSecureStream::_handleConnect(asio::ip::tcp::resolver::iterator iter) {
void AsyncSecureStream::_handleHandshake(std::error_code ec, const std::string& hostName) {
auto certStatus =
getSSLManager()->parseAndValidatePeerCertificate(_stream.native_handle(), hostName);
- if (!certStatus.isOK()) {
+ if (certStatus.isOK()) {
+ _userHandler(make_error_code(ErrorCodes::OK));
+ } else {
warning() << "Failed to validate peer certificate during SSL handshake: "
<< certStatus.getStatus();
+ // We aren't able to propagate error extra info through here so make sure we only use a code
+ // that won't have any.
+ _userHandler(make_error_code(ErrorCodes::SSLHandshakeFailed));
}
- _userHandler(make_error_code(certStatus.getStatus().code()));
}
void AsyncSecureStream::cancel() {
diff --git a/src/mongo/rpc/get_status_from_command_result.cpp b/src/mongo/rpc/get_status_from_command_result.cpp
index ed0ad38824d..e63ed87100d 100644
--- a/src/mongo/rpc/get_status_from_command_result.cpp
+++ b/src/mongo/rpc/get_status_from_command_result.cpp
@@ -135,7 +135,8 @@ Status getFirstWriteErrorStatusFromCommandResult(const BSONObj& cmdResponse) {
auto firstWriteErrorObj = firstWriteErrorElem.Obj();
return Status(ErrorCodes::Error(firstWriteErrorObj["code"].Int()),
- firstWriteErrorObj["errmsg"].String());
+ firstWriteErrorObj["errmsg"].String(),
+ firstWriteErrorObj);
}
Status getStatusFromWriteCommandReply(const BSONObj& cmdResponse) {
diff --git a/src/mongo/s/balancer_configuration.cpp b/src/mongo/s/balancer_configuration.cpp
index 413a39af5b1..50d7c219b11 100644
--- a/src/mongo/s/balancer_configuration.cpp
+++ b/src/mongo/s/balancer_configuration.cpp
@@ -94,9 +94,7 @@ Status BalancerConfiguration::setBalancerMode(OperationContext* opCtx,
}
if (!updateStatus.isOK() && (getBalancerMode() != mode)) {
- return {updateStatus.getStatus().code(),
- str::stream() << "Failed to update balancer configuration due to "
- << updateStatus.getStatus().reason()};
+ return updateStatus.getStatus().withContext("Failed to update balancer configuration");
}
return Status::OK();
@@ -135,25 +133,19 @@ Status BalancerConfiguration::refreshAndCheck(OperationContext* opCtx) {
// Balancer configuration
Status balancerSettingsStatus = _refreshBalancerSettings(opCtx);
if (!balancerSettingsStatus.isOK()) {
- return {balancerSettingsStatus.code(),
- str::stream() << "Failed to refresh the balancer settings due to "
- << balancerSettingsStatus.toString()};
+ return balancerSettingsStatus.withContext("Failed to refresh the balancer settings");
}
// Chunk size settings
Status chunkSizeStatus = _refreshChunkSizeSettings(opCtx);
if (!chunkSizeStatus.isOK()) {
- return {chunkSizeStatus.code(),
- str::stream() << "Failed to refresh the chunk sizes settings due to "
- << chunkSizeStatus.toString()};
+ return chunkSizeStatus.withContext("Failed to refresh the chunk sizes settings");
}
// AutoSplit settings
Status autoSplitStatus = _refreshAutoSplitSettings(opCtx);
if (!autoSplitStatus.isOK()) {
- return {autoSplitStatus.code(),
- str::stream() << "Failed to refresh the autoSplit settings due to "
- << autoSplitStatus.toString()};
+ return autoSplitStatus.withContext("Failed to refresh the autoSplit settings");
}
return Status::OK();
diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
index 4a3bc273eaa..0d2633627c2 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
@@ -155,13 +155,7 @@ Status ShardingCatalogClientImpl::updateShardingCatalogEntryForCollection(
coll.toBSON(),
upsert,
ShardingCatalogClient::kMajorityWriteConcern);
- if (!status.isOK()) {
- return {status.getStatus().code(),
- str::stream() << "Collection metadata write failed due to "
- << status.getStatus().reason()};
- }
-
- return Status::OK();
+ return status.getStatus().withContext(str::stream() << "Collection metadata write failed");
}
Status ShardingCatalogClientImpl::updateDatabase(OperationContext* opCtx,
@@ -175,13 +169,7 @@ Status ShardingCatalogClientImpl::updateDatabase(OperationContext* opCtx,
db.toBSON(),
true,
ShardingCatalogClient::kMajorityWriteConcern);
- if (!status.isOK()) {
- return {status.getStatus().code(),
- str::stream() << "Database metadata write failed due to "
- << status.getStatus().reason()};
- }
-
- return Status::OK();
+ return status.getStatus().withContext(str::stream() << "Database metadata write failed");
}
Status ShardingCatalogClientImpl::logAction(OperationContext* opCtx,
@@ -296,10 +284,8 @@ StatusWith<repl::OpTimeWith<DatabaseType>> ShardingCatalogClientImpl::getDatabas
result = _fetchDatabaseMetadata(
opCtx, dbName, ReadPreferenceSetting{ReadPreference::PrimaryOnly}, readConcernLevel);
if (!result.isOK() && (result != ErrorCodes::NamespaceNotFound)) {
- return {result.getStatus().code(),
- str::stream() << "Could not confirm non-existence of database " << dbName
- << " due to "
- << result.getStatus().reason()};
+ return result.getStatus().withContext(
+ str::stream() << "Could not confirm non-existence of database " << dbName);
}
}
@@ -477,18 +463,14 @@ StatusWith<VersionType> ShardingCatalogClientImpl::getConfigVersion(
BSONObj versionDoc = queryResults.front();
auto versionTypeResult = VersionType::fromBSON(versionDoc);
if (!versionTypeResult.isOK()) {
- return {versionTypeResult.getStatus().code(),
- str::stream() << "Unable to parse config.version document " << versionDoc
- << " due to "
- << versionTypeResult.getStatus().reason()};
+ return versionTypeResult.getStatus().withContext(
+ str::stream() << "Unable to parse config.version document " << versionDoc);
}
auto validationStatus = versionTypeResult.getValue().validate();
if (!validationStatus.isOK()) {
- return Status(validationStatus.code(),
- str::stream() << "Unable to validate config.version document " << versionDoc
- << " due to "
- << validationStatus.reason());
+ return Status(validationStatus.withContext(
+ str::stream() << "Unable to validate config.version document " << versionDoc));
}
return versionTypeResult.getValue();
@@ -541,9 +523,7 @@ StatusWith<std::vector<ChunkType>> ShardingCatalogClientImpl::getChunks(
sort,
longLimit);
if (!findStatus.isOK()) {
- return {findStatus.getStatus().code(),
- str::stream() << "Failed to load chunks due to "
- << findStatus.getStatus().reason()};
+ return findStatus.getStatus().withContext("Failed to load chunks");
}
const auto& chunkDocsOpTimePair = findStatus.getValue();
@@ -552,10 +532,8 @@ StatusWith<std::vector<ChunkType>> ShardingCatalogClientImpl::getChunks(
for (const BSONObj& obj : chunkDocsOpTimePair.value) {
auto chunkRes = ChunkType::fromConfigBSON(obj);
if (!chunkRes.isOK()) {
- return {chunkRes.getStatus().code(),
- stream() << "Failed to parse chunk with id " << obj[ChunkType::name()]
- << " due to "
- << chunkRes.getStatus().reason()};
+ return chunkRes.getStatus().withContext(stream() << "Failed to parse chunk with id "
+ << obj[ChunkType::name()]);
}
chunks.push_back(chunkRes.getValue());
@@ -578,8 +556,7 @@ StatusWith<std::vector<TagsType>> ShardingCatalogClientImpl::getTagsForCollectio
BSON(TagsType::min() << 1),
boost::none); // no limit
if (!findStatus.isOK()) {
- return {findStatus.getStatus().code(),
- str::stream() << "Failed to load tags due to " << findStatus.getStatus().reason()};
+ return findStatus.getStatus().withContext("Failed to load tags");
}
const auto& tagDocsOpTimePair = findStatus.getValue();
@@ -588,10 +565,8 @@ StatusWith<std::vector<TagsType>> ShardingCatalogClientImpl::getTagsForCollectio
for (const BSONObj& obj : tagDocsOpTimePair.value) {
auto tagRes = TagsType::fromBSON(obj);
if (!tagRes.isOK()) {
- return {tagRes.getStatus().code(),
- str::stream() << "Failed to parse tag with id " << obj[TagsType::tag()]
- << " due to "
- << tagRes.getStatus().toString()};
+ return tagRes.getStatus().withContext(str::stream() << "Failed to parse tag with id "
+ << obj[TagsType::tag()]);
}
tags.push_back(tagRes.getValue());
@@ -617,16 +592,14 @@ StatusWith<repl::OpTimeWith<std::vector<ShardType>>> ShardingCatalogClientImpl::
for (const BSONObj& doc : findStatus.getValue().value) {
auto shardRes = ShardType::fromBSON(doc);
if (!shardRes.isOK()) {
- return {shardRes.getStatus().code(),
- stream() << "Failed to parse shard document " << doc << " due to "
- << shardRes.getStatus().reason()};
+ return shardRes.getStatus().withContext(stream() << "Failed to parse shard document "
+ << doc);
}
Status validateStatus = shardRes.getValue().validate();
if (!validateStatus.isOK()) {
- return {validateStatus.code(),
- stream() << "Failed to validate shard document " << doc << " due to "
- << validateStatus.reason()};
+ return validateStatus.withContext(stream() << "Failed to validate shard document "
+ << doc);
}
shards.push_back(shardRes.getValue());
diff --git a/src/mongo/s/catalog/sharding_catalog_manager.cpp b/src/mongo/s/catalog/sharding_catalog_manager.cpp
index 5331a45cb5c..b2fb3521f1d 100644
--- a/src/mongo/s/catalog/sharding_catalog_manager.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_manager.cpp
@@ -216,9 +216,7 @@ Status ShardingCatalogManager::_initConfigIndexes(OperationContext* opCtx) {
BSON(ChunkType::ns() << 1 << ChunkType::min() << 1),
unique);
if (!result.isOK()) {
- return Status(result.code(),
- str::stream() << "couldn't create ns_1_min_1 index on config db"
- << causedBy(result));
+ return result.withContext("couldn't create ns_1_min_1 index on config db");
}
result = configShard->createIndexOnConfig(
@@ -227,9 +225,7 @@ Status ShardingCatalogManager::_initConfigIndexes(OperationContext* opCtx) {
BSON(ChunkType::ns() << 1 << ChunkType::shard() << 1 << ChunkType::min() << 1),
unique);
if (!result.isOK()) {
- return Status(result.code(),
- str::stream() << "couldn't create ns_1_shard_1_min_1 index on config db"
- << causedBy(result));
+ return result.withContext("couldn't create ns_1_shard_1_min_1 index on config db");
}
result =
@@ -238,9 +234,7 @@ Status ShardingCatalogManager::_initConfigIndexes(OperationContext* opCtx) {
BSON(ChunkType::ns() << 1 << ChunkType::lastmod() << 1),
unique);
if (!result.isOK()) {
- return Status(result.code(),
- str::stream() << "couldn't create ns_1_lastmod_1 index on config db"
- << causedBy(result));
+ return result.withContext("couldn't create ns_1_lastmod_1 index on config db");
}
result = configShard->createIndexOnConfig(
@@ -249,25 +243,19 @@ Status ShardingCatalogManager::_initConfigIndexes(OperationContext* opCtx) {
BSON(MigrationType::ns() << 1 << MigrationType::min() << 1),
unique);
if (!result.isOK()) {
- return Status(result.code(),
- str::stream() << "couldn't create ns_1_min_1 index on config.migrations"
- << causedBy(result));
+ return result.withContext("couldn't create ns_1_min_1 index on config.migrations");
}
result = configShard->createIndexOnConfig(
opCtx, NamespaceString(ShardType::ConfigNS), BSON(ShardType::host() << 1), unique);
if (!result.isOK()) {
- return Status(result.code(),
- str::stream() << "couldn't create host_1 index on config db"
- << causedBy(result));
+ return result.withContext("couldn't create host_1 index on config db");
}
result = configShard->createIndexOnConfig(
opCtx, NamespaceString(LocksType::ConfigNS), BSON(LocksType::lockID() << 1), !unique);
if (!result.isOK()) {
- return Status(result.code(),
- str::stream() << "couldn't create lock id index on config db"
- << causedBy(result));
+ return result.withContext("couldn't create lock id index on config db");
}
result =
@@ -276,17 +264,13 @@ Status ShardingCatalogManager::_initConfigIndexes(OperationContext* opCtx) {
BSON(LocksType::state() << 1 << LocksType::process() << 1),
!unique);
if (!result.isOK()) {
- return Status(result.code(),
- str::stream() << "couldn't create state and process id index on config db"
- << causedBy(result));
+ return result.withContext("couldn't create state and process id index on config db");
}
result = configShard->createIndexOnConfig(
opCtx, NamespaceString(LockpingsType::ConfigNS), BSON(LockpingsType::ping() << 1), !unique);
if (!result.isOK()) {
- return Status(result.code(),
- str::stream() << "couldn't create lockping ping time index on config db"
- << causedBy(result));
+ return result.withContext("couldn't create lockping ping time index on config db");
}
result = configShard->createIndexOnConfig(opCtx,
@@ -294,9 +278,7 @@ Status ShardingCatalogManager::_initConfigIndexes(OperationContext* opCtx) {
BSON(TagsType::ns() << 1 << TagsType::min() << 1),
unique);
if (!result.isOK()) {
- return Status(result.code(),
- str::stream() << "couldn't create ns_1_min_1 index on config db"
- << causedBy(result));
+ return result.withContext("couldn't create ns_1_min_1 index on config db");
}
result = configShard->createIndexOnConfig(opCtx,
@@ -304,9 +286,7 @@ Status ShardingCatalogManager::_initConfigIndexes(OperationContext* opCtx) {
BSON(TagsType::ns() << 1 << TagsType::tag() << 1),
!unique);
if (!result.isOK()) {
- return Status(result.code(),
- str::stream() << "couldn't create ns_1_tag_1 index on config db"
- << causedBy(result));
+ return result.withContext("couldn't create ns_1_tag_1 index on config db");
}
return Status::OK();
diff --git a/src/mongo/s/catalog/sharding_catalog_manager_collection_operations.cpp b/src/mongo/s/catalog/sharding_catalog_manager_collection_operations.cpp
index 8c7c872ba5a..9439fd7f85a 100644
--- a/src/mongo/s/catalog/sharding_catalog_manager_collection_operations.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_manager_collection_operations.cpp
@@ -272,9 +272,8 @@ Status ShardingCatalogManager::dropCollection(OperationContext* opCtx, const Nam
Shard::RetryPolicy::kIdempotent);
if (!swDropResult.isOK()) {
- return {swDropResult.getStatus().code(),
- str::stream() << swDropResult.getStatus().reason() << " at "
- << shardEntry.getName()};
+ return swDropResult.getStatus().withContext(
+ str::stream() << "Error dropping collection on shard " << shardEntry.getName());
}
auto& dropResult = swDropResult.getValue();
diff --git a/src/mongo/s/catalog/sharding_catalog_manager_shard_operations.cpp b/src/mongo/s/catalog/sharding_catalog_manager_shard_operations.cpp
index ac28fb3b82c..0b511748104 100644
--- a/src/mongo/s/catalog/sharding_catalog_manager_shard_operations.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_manager_shard_operations.cpp
@@ -213,9 +213,8 @@ StatusWith<boost::optional<ShardType>> ShardingCatalogManager::_checkIfShardExis
const auto existingShards = Grid::get(opCtx)->catalogClient()->getAllShards(
opCtx, repl::ReadConcernLevel::kLocalReadConcern);
if (!existingShards.isOK()) {
- return Status(existingShards.getStatus().code(),
- str::stream() << "Failed to load existing shards during addShard"
- << causedBy(existingShards.getStatus().reason()));
+ return existingShards.getStatus().withContext(
+ "Failed to load existing shards during addShard");
}
// Now check if this shard already exists - if it already exists *with the same options* then
@@ -307,10 +306,10 @@ StatusWith<ShardType> ShardingCatalogManager::_validateHostAsShard(
auto swCommandResponse =
_runCommandForAddShard(opCtx, targeter.get(), "admin", BSON("isMaster" << 1));
if (swCommandResponse.getStatus() == ErrorCodes::IncompatibleServerVersion) {
- return {swCommandResponse.getStatus().code(),
- str::stream() << "Cannot add " << connectionString.toString()
- << " as a shard because its binary version is not compatible with "
- "the cluster's featureCompatibilityVersion."};
+ return swCommandResponse.getStatus().withReason(
+ str::stream() << "Cannot add " << connectionString.toString()
+ << " as a shard because its binary version is not compatible with "
+ "the cluster's featureCompatibilityVersion.");
} else if (!swCommandResponse.isOK()) {
return swCommandResponse.getStatus();
}
@@ -318,11 +317,9 @@ StatusWith<ShardType> ShardingCatalogManager::_validateHostAsShard(
// Check for a command response error
auto resIsMasterStatus = std::move(swCommandResponse.getValue().commandStatus);
if (!resIsMasterStatus.isOK()) {
- return {resIsMasterStatus.code(),
- str::stream() << "Error running isMaster against "
- << targeter->connectionString().toString()
- << ": "
- << causedBy(resIsMasterStatus)};
+ return resIsMasterStatus.withContext(str::stream()
+ << "Error running isMaster against "
+ << targeter->connectionString().toString());
}
auto resIsMaster = std::move(swCommandResponse.getValue().response);
@@ -341,12 +338,10 @@ StatusWith<ShardType> ShardingCatalogManager::_validateHostAsShard(
long long maxWireVersion;
Status status = bsonExtractIntegerField(resIsMaster, "maxWireVersion", &maxWireVersion);
if (!status.isOK()) {
- return Status(status.code(),
- str::stream() << "isMaster returned invalid 'maxWireVersion' "
- << "field when attempting to add "
- << connectionString.toString()
- << " as a shard: "
- << status.reason());
+ return status.withContext(str::stream() << "isMaster returned invalid 'maxWireVersion' "
+ << "field when attempting to add "
+ << connectionString.toString()
+ << " as a shard");
}
if (serverGlobalParams.featureCompatibility.getVersion() ==
ServerGlobalParams::FeatureCompatibility::Version::kFullyDowngradedTo34) {
@@ -360,12 +355,10 @@ StatusWith<ShardType> ShardingCatalogManager::_validateHostAsShard(
bool isMaster;
status = bsonExtractBooleanField(resIsMaster, "ismaster", &isMaster);
if (!status.isOK()) {
- return Status(status.code(),
- str::stream() << "isMaster returned invalid 'ismaster' "
- << "field when attempting to add "
- << connectionString.toString()
- << " as a shard: "
- << status.reason());
+ return status.withContext(str::stream() << "isMaster returned invalid 'ismaster' "
+ << "field when attempting to add "
+ << connectionString.toString()
+ << " as a shard");
}
if (!isMaster) {
return {ErrorCodes::NotMaster,
@@ -626,12 +619,9 @@ StatusWith<std::string> ShardingCatalogManager::addShard(
auto res = _dropSessionsCollection(opCtx, targeter);
if (!res.isOK()) {
- return Status(
- res.code(),
- str::stream()
- << "can't add shard with a local copy of config.system.sessions due to "
- << res.reason()
- << ", please drop this collection from the shard manually and try again.");
+ return res.withContext(
+ "can't add shard with a local copy of config.system.sessions, please drop this "
+ "collection from the shard manually and try again.");
}
// If a name for a shard wasn't provided, generate one
diff --git a/src/mongo/s/catalog/type_chunk.cpp b/src/mongo/s/catalog/type_chunk.cpp
index 2ea99e909e9..456771f4aff 100644
--- a/src/mongo/s/catalog/type_chunk.cpp
+++ b/src/mongo/s/catalog/type_chunk.cpp
@@ -66,9 +66,8 @@ const char kMaxKey[] = "max";
Status extractObject(const BSONObj& obj, const std::string& fieldName, BSONElement* bsonElement) {
Status elementStatus = bsonExtractTypedField(obj, fieldName, Object, bsonElement);
if (!elementStatus.isOK()) {
- return {elementStatus.code(),
- str::stream() << "The field '" << fieldName << "' cannot be parsed due to "
- << elementStatus.reason()};
+ return elementStatus.withContext(str::stream() << "The field '" << fieldName
+ << "' cannot be parsed");
}
if (bsonElement->Obj().isEmpty()) {
diff --git a/src/mongo/s/catalog/type_collection.cpp b/src/mongo/s/catalog/type_collection.cpp
index ef35628a148..f071f2f42c9 100644
--- a/src/mongo/s/catalog/type_collection.cpp
+++ b/src/mongo/s/catalog/type_collection.cpp
@@ -110,7 +110,7 @@ StatusWith<CollectionType> CollectionType::fromBSON(const BSONObj& source) {
} else if (status == ErrorCodes::NoSuchKey) {
// Sharding key can only be missing if the collection is dropped
if (!coll.getDropped()) {
- return {status.code(),
+ return {ErrorCodes::NoSuchKey,
str::stream() << "Shard key for collection " << coll._fullNs->ns()
<< " is missing, but the collection is not marked as "
"dropped. This is an indication of corrupted sharding "
diff --git a/src/mongo/s/chunk_manager.cpp b/src/mongo/s/chunk_manager.cpp
index fdcb35984b5..a3d348c8a07 100644
--- a/src/mongo/s/chunk_manager.cpp
+++ b/src/mongo/s/chunk_manager.cpp
@@ -272,9 +272,7 @@ IndexBounds ChunkManager::getIndexBoundsForQuery(const BSONObj& key,
NULL /* collator */);
plannerParams.indices.push_back(indexEntry);
- auto statusWithSolutions = QueryPlanner::plan(canonicalQuery, plannerParams);
- uassertStatusOK(statusWithSolutions.getStatus());
- auto solutions = std::move(statusWithSolutions.getValue());
+ auto solutions = uassertStatusOK(QueryPlanner::plan(canonicalQuery, plannerParams));
IndexBounds bounds;
diff --git a/src/mongo/s/client/parallel.cpp b/src/mongo/s/client/parallel.cpp
index 2505c039cb3..9a3756088fd 100644
--- a/src/mongo/s/client/parallel.cpp
+++ b/src/mongo/s/client/parallel.cpp
@@ -68,8 +68,7 @@ void throwCursorError(DBClientCursor* cursor) {
verify(cursor);
if (cursor->hasResultFlag(ResultFlag_ErrSet)) {
- BSONObj o = cursor->next();
- uasserted(o["code"].numberInt(), o["$err"].str());
+ uassertStatusOK(getStatusFromCommandResult(cursor->next()));
}
}
diff --git a/src/mongo/s/client/shard_registry.cpp b/src/mongo/s/client/shard_registry.cpp
index 82c5471def1..0ef7d72a497 100644
--- a/src/mongo/s/client/shard_registry.cpp
+++ b/src/mongo/s/client/shard_registry.cpp
@@ -377,17 +377,12 @@ void ShardRegistry::replicaSetChangeConfigServerUpdateHook(const std::string& se
////////////// ShardRegistryData //////////////////
ShardRegistryData::ShardRegistryData(OperationContext* opCtx, ShardFactory* shardFactory) {
- auto shardsStatus =
- grid.catalogClient()->getAllShards(opCtx, repl::ReadConcernLevel::kMajorityReadConcern);
+ auto shardsAndOpTime = uassertStatusOKWithContext(
+ grid.catalogClient()->getAllShards(opCtx, repl::ReadConcernLevel::kMajorityReadConcern),
+ "could not get updated shard list from config server");
- if (!shardsStatus.isOK()) {
- uasserted(shardsStatus.getStatus().code(),
- str::stream() << "could not get updated shard list from config server due to "
- << shardsStatus.getStatus().reason());
- }
-
- auto shards = std::move(shardsStatus.getValue().value);
- auto reloadOpTime = std::move(shardsStatus.getValue().opTime);
+ auto shards = std::move(shardsAndOpTime.value);
+ auto reloadOpTime = std::move(shardsAndOpTime.opTime);
LOG(1) << "found " << shards.size()
<< " shards listed on config server(s) with lastVisibleOpTime: "
diff --git a/src/mongo/s/cluster_identity_loader.cpp b/src/mongo/s/cluster_identity_loader.cpp
index d3cad97c4d7..8f1fa8adb55 100644
--- a/src/mongo/s/cluster_identity_loader.cpp
+++ b/src/mongo/s/cluster_identity_loader.cpp
@@ -98,9 +98,7 @@ StatusWith<OID> ClusterIdentityLoader::_fetchClusterIdFromConfig(
auto catalogClient = Grid::get(opCtx)->catalogClient();
auto loadResult = catalogClient->getConfigVersion(opCtx, readConcernLevel);
if (!loadResult.isOK()) {
- return Status(loadResult.getStatus().code(),
- str::stream() << "Error loading clusterID"
- << causedBy(loadResult.getStatus().reason()));
+ return loadResult.getStatus().withContext("Error loading clusterID");
}
return loadResult.getValue().getClusterId();
}
diff --git a/src/mongo/s/commands/chunk_manager_targeter.cpp b/src/mongo/s/commands/chunk_manager_targeter.cpp
index 6388c8b6626..8cde8fa627c 100644
--- a/src/mongo/s/commands/chunk_manager_targeter.cpp
+++ b/src/mongo/s/commands/chunk_manager_targeter.cpp
@@ -424,9 +424,8 @@ StatusWith<std::vector<ShardEndpoint>> ChunkManagerTargeter::targetUpdate(
"$expr is not allowed in the query predicate for an upsert"};
}
if (!cq.isOK()) {
- return {cq.getStatus().code(),
- str::stream() << "Could not parse update query " << updateDoc.getQ()
- << causedBy(cq.getStatus())};
+ return cq.getStatus().withContext(str::stream() << "Could not parse update query "
+ << updateDoc.getQ());
}
// Single (non-multi) updates must target a single shard or be exact-ID.
@@ -498,9 +497,8 @@ StatusWith<std::vector<ShardEndpoint>> ChunkManagerTargeter::targetDelete(
ExtensionsCallbackNoop(),
MatchExpressionParser::kAllowAllSpecialFeatures);
if (!cq.isOK()) {
- return Status(cq.getStatus().code(),
- str::stream() << "Could not parse delete query " << deleteDoc.getQ()
- << causedBy(cq.getStatus()));
+ return cq.getStatus().withContext(str::stream() << "Could not parse delete query "
+ << deleteDoc.getQ());
}
// Single deletes must target a single shard or be exact-ID.
diff --git a/src/mongo/s/commands/cluster_aggregate.cpp b/src/mongo/s/commands/cluster_aggregate.cpp
index 4d80eb95b18..f187d95d677 100644
--- a/src/mongo/s/commands/cluster_aggregate.cpp
+++ b/src/mongo/s/commands/cluster_aggregate.cpp
@@ -655,10 +655,10 @@ Status ClusterAggregate::runAggregate(OperationContext* opCtx,
// Standard aggregations swallow 'NamespaceNotFound' and return an empty cursor with id 0 in
// the event that the database does not exist. For $changeStream aggregations, however, we
// throw the exception in all error cases, including that of a non-existent database.
- uassert(executionNsRoutingInfoStatus.getStatus().code(),
- str::stream() << "failed to open $changeStream: "
- << executionNsRoutingInfoStatus.getStatus().reason(),
- !liteParsedPipeline.hasChangeStream());
+ if (liteParsedPipeline.hasChangeStream()) {
+ uassertStatusOKWithContext(executionNsRoutingInfoStatus.getStatus(),
+ "failed to open $changeStream");
+ }
appendEmptyResultSet(
*result, executionNsRoutingInfoStatus.getStatus(), namespaces.requestedNss.ns());
return Status::OK();
diff --git a/src/mongo/s/commands/cluster_commands_helpers.cpp b/src/mongo/s/commands/cluster_commands_helpers.cpp
index 263bd3bc281..7d4312ecd53 100644
--- a/src/mongo/s/commands/cluster_commands_helpers.cpp
+++ b/src/mongo/s/commands/cluster_commands_helpers.cpp
@@ -338,6 +338,12 @@ bool appendRawResponses(OperationContext* opCtx,
if (commonErrCode > 0) {
output->append("code", commonErrCode);
output->append("codeName", ErrorCodes::errorString(ErrorCodes::Error(commonErrCode)));
+ if (errors.size() == 1) {
+ // Only propagate extra info if there was a single error object.
+ if (auto extraInfo = errors.begin()->second.extraInfo()) {
+ extraInfo->serialize(output);
+ }
+ }
}
return false;
}
@@ -427,9 +433,7 @@ StatusWith<CachedDatabaseInfo> createShardDatabase(OperationContext* opCtx, Stri
return dbStatus;
}
- return {dbStatus.getStatus().code(),
- str::stream() << "Database " << dbName << " not found due to "
- << dbStatus.getStatus().reason()};
+ return dbStatus.getStatus().withContext(str::stream() << "Database " << dbName << " not found");
}
} // namespace mongo
diff --git a/src/mongo/s/commands/cluster_count_cmd.cpp b/src/mongo/s/commands/cluster_count_cmd.cpp
index 1fac70826e8..f05f349982c 100644
--- a/src/mongo/s/commands/cluster_count_cmd.cpp
+++ b/src/mongo/s/commands/cluster_count_cmd.cpp
@@ -207,10 +207,8 @@ public:
shardSubTotal.doneFast();
// Add error context so that you can see on which shard failed as well as details
// about that error.
- auto errorWithContext = Status(status.code(),
- str::stream() << "failed on: " << response.shardId
- << causedBy(status.reason()));
- return CommandHelpers::appendCommandStatus(result, errorWithContext);
+ return CommandHelpers::appendCommandStatus(
+ result, status.withContext(str::stream() << "failed on: " << response.shardId));
}
shardSubTotal.doneFast();
diff --git a/src/mongo/s/commands/cluster_explain.cpp b/src/mongo/s/commands/cluster_explain.cpp
index c18ba90aedc..3b455954ca9 100644
--- a/src/mongo/s/commands/cluster_explain.cpp
+++ b/src/mongo/s/commands/cluster_explain.cpp
@@ -161,18 +161,11 @@ Status ClusterExplain::validateShardResults(const vector<Strategy::CommandResult
// Check that the result from each shard has a true value for "ok" and has
// the expected "queryPlanner" field.
for (size_t i = 0; i < shardResults.size(); i++) {
- if (!shardResults[i].result["ok"].trueValue()) {
- // Try to pass up the error code from the shard.
- ErrorCodes::Error error = ErrorCodes::OperationFailed;
- if (shardResults[i].result["code"].isNumber()) {
- error = ErrorCodes::Error(shardResults[i].result["code"].numberInt());
- }
-
- return Status(error,
- str::stream() << "Explain command on shard "
- << shardResults[i].target.toString()
- << " failed, caused by: "
- << shardResults[i].result);
+ auto status = getStatusFromCommandResult(shardResults[i].result);
+ if (!status.isOK()) {
+ return status.withContext(str::stream() << "Explain command on shard "
+ << shardResults[i].target.toString()
+ << " failed");
}
if (Object != shardResults[i].result["queryPlanner"].type()) {
diff --git a/src/mongo/s/commands/cluster_get_last_error_cmd.cpp b/src/mongo/s/commands/cluster_get_last_error_cmd.cpp
index cc51e18c453..ebbaab0cc36 100644
--- a/src/mongo/s/commands/cluster_get_last_error_cmd.cpp
+++ b/src/mongo/s/commands/cluster_get_last_error_cmd.cpp
@@ -179,9 +179,11 @@ Status enforceLegacyWriteConcern(OperationContext* opCtx,
}
}
- return Status(failedStatuses.size() == 1u ? failedStatuses.front().code()
- : ErrorCodes::MultipleErrorsOccurred,
- builder.str());
+ if (failedStatuses.size() == 1u) {
+ return failedStatuses.front();
+ } else {
+ return Status(ErrorCodes::MultipleErrorsOccurred, builder.str());
+ }
}
diff --git a/src/mongo/s/commands/cluster_write.cpp b/src/mongo/s/commands/cluster_write.cpp
index 53091e86b6f..14a0482a268 100644
--- a/src/mongo/s/commands/cluster_write.cpp
+++ b/src/mongo/s/commands/cluster_write.cpp
@@ -193,24 +193,22 @@ void ClusterWriter::write(OperationContext* opCtx,
Status targetInitStatus = targeter.init(opCtx);
if (!targetInitStatus.isOK()) {
- toBatchError({targetInitStatus.code(),
- str::stream() << "unable to initialize targeter for"
- << (request.isInsertIndexRequest() ? " index" : "")
- << " write op for collection "
- << request.getTargetingNS().ns()
- << causedBy(targetInitStatus)},
+ toBatchError(targetInitStatus.withContext(
+ str::stream() << "unable to initialize targeter for"
+ << (request.isInsertIndexRequest() ? " index" : "")
+ << " write op for collection "
+ << request.getTargetingNS().ns()),
response);
return;
}
auto swEndpoints = targeter.targetCollection();
if (!swEndpoints.isOK()) {
- toBatchError({swEndpoints.getStatus().code(),
- str::stream() << "unable to target"
- << (request.isInsertIndexRequest() ? " index" : "")
- << " write op for collection "
- << request.getTargetingNS().ns()
- << causedBy(swEndpoints.getStatus())},
+ toBatchError(swEndpoints.getStatus().withContext(
+ str::stream() << "unable to target"
+ << (request.isInsertIndexRequest() ? " index" : "")
+ << " write op for collection "
+ << request.getTargetingNS().ns()),
response);
return;
}
diff --git a/src/mongo/s/shard_util.cpp b/src/mongo/s/shard_util.cpp
index b66cfc82ce9..b43aee32537 100644
--- a/src/mongo/s/shard_util.cpp
+++ b/src/mongo/s/shard_util.cpp
@@ -204,7 +204,7 @@ StatusWith<boost::optional<ChunkRange>> splitChunkAtMultiplePoints(
if (!status.isOK()) {
log() << "Split chunk " << redact(cmdObj) << " failed" << causedBy(redact(status));
- return {status.code(), str::stream() << "split failed due to " << status.toString()};
+ return status.withContext("split failed");
}
BSONElement shouldMigrateElement;
diff --git a/src/mongo/s/write_ops/batch_write_exec.cpp b/src/mongo/s/write_ops/batch_write_exec.cpp
index b2471a6d30a..9ee10f364b3 100644
--- a/src/mongo/s/write_ops/batch_write_exec.cpp
+++ b/src/mongo/s/write_ops/batch_write_exec.cpp
@@ -288,11 +288,8 @@ void BatchWriteExec::executeBatch(OperationContext* opCtx,
: OID());
} else {
// Error occurred dispatching, note it
- const Status status(responseStatus.code(),
- str::stream() << "Write results unavailable from "
- << shardHost
- << " due to "
- << responseStatus.reason());
+ const Status status = responseStatus.withContext(
+ str::stream() << "Write results unavailable from " << shardHost);
batchOp.noteBatchError(*batch, errorFromStatus(status));
diff --git a/src/mongo/scripting/mozjs/exception.cpp b/src/mongo/scripting/mozjs/exception.cpp
index e5af3cd729d..410fce55b3d 100644
--- a/src/mongo/scripting/mozjs/exception.cpp
+++ b/src/mongo/scripting/mozjs/exception.cpp
@@ -131,8 +131,8 @@ Status JSErrorReportToStatus(JSContext* cx,
}
void throwCurrentJSException(JSContext* cx, ErrorCodes::Error altCode, StringData altReason) {
- auto status = currentJSExceptionToStatus(cx, altCode, altReason);
- uasserted(status.code(), status.reason());
+ uassertStatusOK(currentJSExceptionToStatus(cx, altCode, altReason));
+ MONGO_UNREACHABLE;
}
} // namespace mozjs
diff --git a/src/mongo/scripting/mozjs/implscope.cpp b/src/mongo/scripting/mozjs/implscope.cpp
index 55c376c1a19..b21fd817fad 100644
--- a/src/mongo/scripting/mozjs/implscope.cpp
+++ b/src/mongo/scripting/mozjs/implscope.cpp
@@ -151,9 +151,10 @@ void MozJSImplScope::_reportError(JSContext* cx, const char* message, JSErrorRep
log() << exceptionMsg << ":" << dbe.toString() << ":" << message;
}
- scope->_status = Status(
- JSErrorReportToStatus(cx, report, ErrorCodes::JSInterpreterFailure, message).code(),
- exceptionMsg);
+ // TODO SERVER-32239 is this right?
+ scope->_status =
+ JSErrorReportToStatus(cx, report, ErrorCodes::JSInterpreterFailure, message)
+ .withReason(exceptionMsg);
}
}
diff --git a/src/mongo/scripting/mozjs/mongo.cpp b/src/mongo/scripting/mozjs/mongo.cpp
index 995bcf5515b..24582b17179 100644
--- a/src/mongo/scripting/mozjs/mongo.cpp
+++ b/src/mongo/scripting/mozjs/mongo.cpp
@@ -594,13 +594,7 @@ void MongoBase::Functions::copyDatabaseWithSCRAM::call(JSContext* cx, JS::CallAr
BSONObj command = commandBuilder.obj();
bool ok = conn->runCommand("admin", command, inputObj);
-
- ErrorCodes::Error code = ErrorCodes::Error(inputObj[saslCommandCodeFieldName].numberInt());
-
- if (!ok || code != ErrorCodes::OK) {
- if (code == ErrorCodes::OK)
- code = ErrorCodes::UnknownError;
-
+ if (!ok) {
ValueReader(cx, args.rval()).fromBSON(inputObj, nullptr, true);
return;
}
diff --git a/src/mongo/util/assert_util.h b/src/mongo/util/assert_util.h
index 1907d9446da..1ed24cec710 100644
--- a/src/mongo/util/assert_util.h
+++ b/src/mongo/util/assert_util.h
@@ -362,6 +362,35 @@ inline T uassertStatusOKWithLocation(StatusWith<T> sw, const char* file, unsigne
}
/**
+ * Like uassertStatusOK(status), but also takes an expression that evaluates to something
+ * convertible to std::string to add more context to error messages. This contextExpr is only
+ * evaluated if the status is not OK.
+ */
+#define uassertStatusOKWithContext(status, contextExpr) \
+ ::mongo::uassertStatusOKWithContextAndLocation( \
+ status, [&]() -> std::string { return (contextExpr); }, __FILE__, __LINE__)
+template <typename ContextExpr>
+inline void uassertStatusOKWithContextAndLocation(const Status& status,
+ ContextExpr&& contextExpr,
+ const char* file,
+ unsigned line) {
+ if (MONGO_unlikely(!status.isOK())) {
+ uassertedWithLocation(
+ status.withContext(std::forward<ContextExpr>(contextExpr)()), file, line);
+ }
+}
+
+template <typename T, typename ContextExpr>
+inline T uassertStatusOKWithContextAndLocation(StatusWith<T> sw,
+ ContextExpr&& contextExpr,
+ const char* file,
+ unsigned line) {
+ uassertStatusOKWithContextAndLocation(
+ sw.getStatus(), std::forward<ContextExpr>(contextExpr), file, line);
+ return std::move(sw.getValue());
+}
+
+/**
* massert is like uassert but it logs the message before throwing.
*/
#define massert(msgid, msg, expr) \