summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMathias Stearn <mathias@10gen.com>2018-05-02 17:32:34 -0400
committerMathias Stearn <mathias@10gen.com>2018-05-08 14:57:37 -0400
commitdb41862c5380ab33cf28db99726cdac252df0872 (patch)
treea0fefd08ea9cc063456abe796390faaaa07ec272
parent2d35461cb54e35afea223714fab1a184a9b381e2 (diff)
downloadmongo-db41862c5380ab33cf28db99726cdac252df0872.tar.gz
SERVER-34628 Really remove appendCommandStatus
All remaining callers are transitioned to some form of usassert. This was done with an elaborate set of vim macros to make this tractable. Therefore it should not be considered an example of the best way to write new code, just as an improvement on what was there before. In particular, I couldn't easily remove Status's that are named then only used once in uassertStatusOK, nor could I convert the pattern of checking a StatusWith<T>'s getStatus() then calling getValue() to just call uassertStatusOK(returnsStatusWith()).
-rw-r--r--src/mongo/client/embedded/embedded_commands.cpp9
-rw-r--r--src/mongo/client/embedded/embedded_ismaster.cpp12
-rw-r--r--src/mongo/db/auth/sasl_commands.cpp14
-rw-r--r--src/mongo/db/commands.cpp6
-rw-r--r--src/mongo/db/commands.h3
-rw-r--r--src/mongo/db/commands/apply_ops_cmd.cpp20
-rw-r--r--src/mongo/db/commands/authentication_commands.cpp11
-rw-r--r--src/mongo/db/commands/clone.cpp3
-rw-r--r--src/mongo/db/commands/clone_collection.cpp8
-rw-r--r--src/mongo/db/commands/collection_to_capped.cpp22
-rw-r--r--src/mongo/db/commands/compact.cpp9
-rw-r--r--src/mongo/db/commands/connection_status.cpp4
-rw-r--r--src/mongo/db/commands/copydb_start_commands.cpp6
-rw-r--r--src/mongo/db/commands/count_cmd.cpp16
-rw-r--r--src/mongo/db/commands/create_indexes.cpp37
-rw-r--r--src/mongo/db/commands/current_op.cpp2
-rw-r--r--src/mongo/db/commands/current_op_common.cpp2
-rw-r--r--src/mongo/db/commands/dbcommands.cpp94
-rw-r--r--src/mongo/db/commands/dbcommands_d.cpp6
-rw-r--r--src/mongo/db/commands/distinct.cpp14
-rw-r--r--src/mongo/db/commands/do_txn_cmd.cpp4
-rw-r--r--src/mongo/db/commands/drop_indexes.cpp17
-rw-r--r--src/mongo/db/commands/find_cmd.cpp32
-rw-r--r--src/mongo/db/commands/fsync.cpp2
-rw-r--r--src/mongo/db/commands/generic_servers.cpp19
-rw-r--r--src/mongo/db/commands/geo_near_cmd.cpp8
-rw-r--r--src/mongo/db/commands/get_last_error.cpp10
-rw-r--r--src/mongo/db/commands/getmore_cmd.cpp49
-rw-r--r--src/mongo/db/commands/group_cmd.cpp14
-rw-r--r--src/mongo/db/commands/index_filter_commands.cpp3
-rw-r--r--src/mongo/db/commands/kill_all_sessions_by_pattern_command.cpp10
-rw-r--r--src/mongo/db/commands/kill_all_sessions_command.cpp4
-rw-r--r--src/mongo/db/commands/kill_sessions_command.cpp4
-rw-r--r--src/mongo/db/commands/killcursors_common.cpp4
-rw-r--r--src/mongo/db/commands/list_collections.cpp20
-rw-r--r--src/mongo/db/commands/list_databases.cpp13
-rw-r--r--src/mongo/db/commands/list_indexes.cpp4
-rw-r--r--src/mongo/db/commands/mr.cpp35
-rw-r--r--src/mongo/db/commands/oplog_note.cpp23
-rw-r--r--src/mongo/db/commands/pipeline_command.cpp10
-rw-r--r--src/mongo/db/commands/plan_cache_commands.cpp3
-rw-r--r--src/mongo/db/commands/reap_logical_session_cache_now.cpp4
-rw-r--r--src/mongo/db/commands/refresh_logical_session_cache_now.cpp4
-rw-r--r--src/mongo/db/commands/refresh_sessions_command.cpp4
-rw-r--r--src/mongo/db/commands/refresh_sessions_command_internal.cpp4
-rw-r--r--src/mongo/db/commands/rename_collection_cmd.cpp14
-rw-r--r--src/mongo/db/commands/repair_cursor.cpp6
-rw-r--r--src/mongo/db/commands/resize_oplog.cpp22
-rw-r--r--src/mongo/db/commands/restart_catalog_command.cpp8
-rw-r--r--src/mongo/db/commands/shutdown_d.cpp2
-rw-r--r--src/mongo/db/commands/snapshot_management.cpp8
-rw-r--r--src/mongo/db/commands/start_session_command.cpp12
-rw-r--r--src/mongo/db/commands/test_commands.cpp32
-rw-r--r--src/mongo/db/commands/touch.cpp4
-rw-r--r--src/mongo/db/commands/user_management_commands.cpp552
-rw-r--r--src/mongo/db/commands/validate.cpp26
-rw-r--r--src/mongo/db/exec/stagedebug_cmd.cpp6
-rw-r--r--src/mongo/db/free_mon/free_mon_commands.cpp4
-rw-r--r--src/mongo/db/free_mon/free_mon_mongod.cpp6
-rw-r--r--src/mongo/db/repl/repl_set_commands.cpp135
-rw-r--r--src/mongo/db/repl/repl_set_get_status_cmd.cpp10
-rw-r--r--src/mongo/db/repl/repl_set_request_votes.cpp8
-rw-r--r--src/mongo/db/repl/replication_info.cpp11
-rw-r--r--src/mongo/db/s/check_sharding_index_command.cpp6
-rw-r--r--src/mongo/db/s/config/configsvr_add_shard_command.cpp16
-rw-r--r--src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp4
-rw-r--r--src/mongo/db/s/config/configsvr_commit_move_primary_command.cpp6
-rw-r--r--src/mongo/db/s/config/configsvr_drop_database_command.cpp6
-rw-r--r--src/mongo/db/s/config/configsvr_enable_sharding_command.cpp12
-rw-r--r--src/mongo/db/s/config/configsvr_merge_chunk_command.cpp5
-rw-r--r--src/mongo/db/s/config/configsvr_move_primary_command.cpp21
-rw-r--r--src/mongo/db/s/config/configsvr_remove_shard_command.cpp7
-rw-r--r--src/mongo/db/s/merge_chunks_command.cpp3
-rw-r--r--src/mongo/db/s/migration_destination_manager_legacy_commands.cpp4
-rw-r--r--src/mongo/db/s/move_chunk_command.cpp10
-rw-r--r--src/mongo/db/s/split_chunk_command.cpp3
-rw-r--r--src/mongo/db/s/split_vector_command.cpp4
-rw-r--r--src/mongo/db/service_entry_point_mongod.cpp2
-rw-r--r--src/mongo/rpc/reply_builder_interface.cpp2
-rw-r--r--src/mongo/s/commands/cluster_aggregate.cpp2
-rw-r--r--src/mongo/s/commands/cluster_commands_helpers.cpp3
-rw-r--r--src/mongo/s/commands/cluster_count_cmd.cpp21
-rw-r--r--src/mongo/s/commands/cluster_current_op.cpp2
-rw-r--r--src/mongo/s/commands/cluster_distinct_cmd.cpp17
-rw-r--r--src/mongo/s/commands/cluster_find_cmd.cpp22
-rw-r--r--src/mongo/s/commands/cluster_getmore_cmd.cpp8
-rw-r--r--src/mongo/s/commands/cluster_is_master_cmd.cpp12
-rw-r--r--src/mongo/s/commands/cluster_kill_op.cpp4
-rw-r--r--src/mongo/s/commands/cluster_map_reduce_cmd.cpp4
-rw-r--r--src/mongo/s/commands/cluster_move_chunk_cmd.cpp3
-rw-r--r--src/mongo/s/commands/cluster_pipeline_cmd.cpp4
-rw-r--r--src/mongo/s/commands/cluster_set_free_monitoring.cpp3
-rw-r--r--src/mongo/s/commands/cluster_user_management_commands.cpp20
-rw-r--r--src/mongo/s/commands/cluster_validate_cmd.cpp3
-rw-r--r--src/mongo/s/write_ops/batched_command_response.cpp2
-rw-r--r--src/mongo/s/write_ops/batched_command_response_test.cpp15
96 files changed, 591 insertions, 1133 deletions
diff --git a/src/mongo/client/embedded/embedded_commands.cpp b/src/mongo/client/embedded/embedded_commands.cpp
index 5ce6e74dc06..ff288428e73 100644
--- a/src/mongo/client/embedded/embedded_commands.cpp
+++ b/src/mongo/client/embedded/embedded_commands.cpp
@@ -73,10 +73,8 @@ public:
} else if (mode == "conservative") {
} else {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::InvalidOptions,
- "Only conservative|moderate|aggressive are valid options."));
+ uasserted(ErrorCodes::InvalidOptions,
+ "Only conservative|moderate|aggressive are valid options.");
}
return true;
}
@@ -122,8 +120,7 @@ public:
} else if (mode == "normal") {
} else {
- return CommandHelpers::appendCommandStatus(
- result, Status(ErrorCodes::InvalidOptions, "Only low|normal are valid options."));
+ uasserted(ErrorCodes::InvalidOptions, "Only low|normal are valid options.");
}
return true;
}
diff --git a/src/mongo/client/embedded/embedded_ismaster.cpp b/src/mongo/client/embedded/embedded_ismaster.cpp
index 6631b0d1537..eb45ed178b4 100644
--- a/src/mongo/client/embedded/embedded_ismaster.cpp
+++ b/src/mongo/client/embedded/embedded_ismaster.cpp
@@ -76,18 +76,12 @@ public:
BSONElement element = cmdObj[kMetadataDocumentName];
if (!element.eoo()) {
if (seenIsMaster) {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::ClientMetadataCannotBeMutated,
- "The client metadata document may only be sent in the first isMaster"));
+ uasserted(ErrorCodes::ClientMetadataCannotBeMutated,
+ "The client metadata document may only be sent in the first isMaster");
}
auto swParseClientMetadata = ClientMetadata::parse(element);
-
- if (!swParseClientMetadata.getStatus().isOK()) {
- return CommandHelpers::appendCommandStatus(result,
- swParseClientMetadata.getStatus());
- }
+ uassertStatusOK(swParseClientMetadata.getStatus());
invariant(swParseClientMetadata.getValue());
diff --git a/src/mongo/db/auth/sasl_commands.cpp b/src/mongo/db/auth/sasl_commands.cpp
index 00cb221a6cc..aa6a19b840c 100644
--- a/src/mongo/db/auth/sasl_commands.cpp
+++ b/src/mongo/db/auth/sasl_commands.cpp
@@ -283,10 +283,7 @@ bool CmdSaslStart::run(OperationContext* opCtx,
StatusWith<std::unique_ptr<AuthenticationSession>> swSession =
doSaslStart(opCtx, db, cmdObj, &result);
- CommandHelpers::appendCommandStatus(result, swSession.getStatus());
- if (!swSession.isOK()) {
- return false;
- }
+ uassertStatusOK(swSession.getStatus());
auto session = std::move(swSession.getValue());
auto& mechanism = session->getMechanism();
@@ -317,8 +314,7 @@ bool CmdSaslContinue::run(OperationContext* opCtx,
AuthenticationSession::swap(client, sessionGuard);
if (!sessionGuard) {
- return CommandHelpers::appendCommandStatus(
- result, Status(ErrorCodes::ProtocolError, "No SASL session state found"));
+ uasserted(ErrorCodes::ProtocolError, "No SASL session state found");
}
AuthenticationSession* session = static_cast<AuthenticationSession*>(sessionGuard.get());
@@ -327,10 +323,8 @@ bool CmdSaslContinue::run(OperationContext* opCtx,
// Authenticating the __system@local user to the admin database on mongos is required
// by the auth passthrough test suite.
if (mechanism.getAuthenticationDatabase() != db && !getTestCommandsEnabled()) {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::ProtocolError,
- "Attempt to switch database target during SASL authentication."));
+ uasserted(ErrorCodes::ProtocolError,
+ "Attempt to switch database target during SASL authentication.");
}
Status status = doSaslContinue(opCtx, session, cmdObj, &result);
diff --git a/src/mongo/db/commands.cpp b/src/mongo/db/commands.cpp
index b24813e9b83..d84a387d7b7 100644
--- a/src/mongo/db/commands.cpp
+++ b/src/mongo/db/commands.cpp
@@ -185,12 +185,6 @@ Command* CommandHelpers::findCommand(StringData name) {
return globalCommandRegistry()->findCommand(name);
}
-bool CommandHelpers::appendCommandStatus(BSONObjBuilder& result, const Status& status) {
- uassertStatusOK(status);
- appendSimpleCommandStatus(result, true);
- return true;
-}
-
bool CommandHelpers::appendCommandStatusNoThrow(BSONObjBuilder& result, const Status& status) {
appendSimpleCommandStatus(result, status.isOK(), status.reason());
BSONObj tmp = result.asTempObj();
diff --git a/src/mongo/db/commands.h b/src/mongo/db/commands.h
index 7b0c2e26a36..8d467747a28 100644
--- a/src/mongo/db/commands.h
+++ b/src/mongo/db/commands.h
@@ -108,9 +108,6 @@ struct CommandHelpers {
*/
static bool appendCommandStatusNoThrow(BSONObjBuilder& result, const Status& status);
- // About to be deleted
- static bool appendCommandStatus(BSONObjBuilder& result, const Status& status);
-
/**
* If "ok" field is present in `reply`, uses its truthiness.
* Otherwise, the absence of failure is considered success, `reply` is patched to indicate it.
diff --git a/src/mongo/db/commands/apply_ops_cmd.cpp b/src/mongo/db/commands/apply_ops_cmd.cpp
index d78414e95ea..599f3a52902 100644
--- a/src/mongo/db/commands/apply_ops_cmd.cpp
+++ b/src/mongo/db/commands/apply_ops_cmd.cpp
@@ -225,9 +225,7 @@ public:
maybeDisableValidation.emplace(opCtx);
auto status = OplogApplicationChecks::checkOperationArray(cmdObj.firstElement());
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
// TODO (SERVER-30217): When a write concern is provided to the applyOps command, we
// normally wait on the OpTime of whichever operation successfully completed last. This is
@@ -251,20 +249,16 @@ public:
auto modeSW = repl::OplogApplication::parseMode(oplogApplicationModeString);
if (!modeSW.isOK()) {
// Unable to parse the mode argument.
- return CommandHelpers::appendCommandStatus(
- result,
- modeSW.getStatus().withContext(
- str::stream()
- << "Could not parse " + repl::ApplyOps::kOplogApplicationModeFieldName));
+ uassertStatusOK(modeSW.getStatus().withContext(
+ str::stream() << "Could not parse " +
+ repl::ApplyOps::kOplogApplicationModeFieldName));
}
oplogApplicationMode = modeSW.getValue();
} else if (status != ErrorCodes::NoSuchKey) {
// NoSuchKey means the user did not supply a mode.
- return CommandHelpers::appendCommandStatus(
- result,
- status.withContext(str::stream()
- << "Could not parse out "
- << repl::ApplyOps::kOplogApplicationModeFieldName));
+ uassertStatusOK(status.withContext(str::stream()
+ << "Could not parse out "
+ << repl::ApplyOps::kOplogApplicationModeFieldName));
}
auto applyOpsStatus = CommandHelpers::appendCommandStatusNoThrow(
diff --git a/src/mongo/db/commands/authentication_commands.cpp b/src/mongo/db/commands/authentication_commands.cpp
index 189416ee125..8c6f414661c 100644
--- a/src/mongo/db/commands/authentication_commands.cpp
+++ b/src/mongo/db/commands/authentication_commands.cpp
@@ -231,9 +231,7 @@ bool CmdAuthenticate::run(OperationContext* opCtx,
}
std::string mechanism = cmdObj.getStringField("mechanism");
if (mechanism.empty()) {
- CommandHelpers::appendCommandStatus(result,
- {ErrorCodes::BadValue, "Auth mechanism not specified"});
- return false;
+ uasserted(ErrorCodes::BadValue, "Auth mechanism not specified");
}
UserName user;
auto& sslPeerInfo = SSLPeerInfo::forSession(opCtx->getClient()->session());
@@ -260,15 +258,14 @@ bool CmdAuthenticate::run(OperationContext* opCtx,
<< (client->hasRemote() ? (" from client " + client->getRemote().toString()) : "")
<< " with mechanism " << mechanism << ": " << status;
}
+ sleepmillis(saslGlobalParams.authFailedDelay.load());
if (status.code() == ErrorCodes::AuthenticationFailed) {
// Statuses with code AuthenticationFailed may contain messages we do not wish to
// reveal to the user, so we return a status with the message "auth failed".
- CommandHelpers::appendCommandStatus(
- result, Status(ErrorCodes::AuthenticationFailed, "auth failed"));
+ uasserted(ErrorCodes::AuthenticationFailed, "auth failed");
} else {
- CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
}
- sleepmillis(saslGlobalParams.authFailedDelay.load());
return false;
}
result.append("dbname", user.getDB());
diff --git a/src/mongo/db/commands/clone.cpp b/src/mongo/db/commands/clone.cpp
index 9bb2624e163..2bc47480310 100644
--- a/src/mongo/db/commands/clone.cpp
+++ b/src/mongo/db/commands/clone.cpp
@@ -127,7 +127,8 @@ public:
barr.append(clonedColls);
result.append("clonedColls", barr.arr());
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
+ return true;
}
} cmdClone;
diff --git a/src/mongo/db/commands/clone_collection.cpp b/src/mongo/db/commands/clone_collection.cpp
index 96e99496b90..f04d9bac576 100644
--- a/src/mongo/db/commands/clone_collection.cpp
+++ b/src/mongo/db/commands/clone_collection.cpp
@@ -133,13 +133,9 @@ public:
// (e.g. system.indexes), so we must check that it is legal to both create and write to the
// namespace.
auto allowedCreateStatus = userAllowedCreateNS(dbname, nsToCollectionSubstring(ns));
- if (!allowedCreateStatus.isOK()) {
- return CommandHelpers::appendCommandStatus(result, allowedCreateStatus);
- }
+ uassertStatusOK(allowedCreateStatus);
auto allowedWriteStatus = userAllowedWriteNS(dbname, nsToCollectionSubstring(ns));
- if (!allowedWriteStatus.isOK()) {
- return CommandHelpers::appendCommandStatus(result, allowedWriteStatus);
- }
+ uassertStatusOK(allowedWriteStatus);
BSONObj query = cmdObj.getObjectField("query");
if (query.isEmpty())
diff --git a/src/mongo/db/commands/collection_to_capped.cpp b/src/mongo/db/commands/collection_to_capped.cpp
index 07da52d1bca..4820dc1608f 100644
--- a/src/mongo/db/commands/collection_to_capped.cpp
+++ b/src/mongo/db/commands/collection_to_capped.cpp
@@ -121,25 +121,22 @@ public:
NamespaceString nss(dbname, to);
if (!repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, nss)) {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::NotMaster,
- str::stream() << "Not primary while cloning collection " << from << " to "
- << to
- << " (as capped)"));
+ uasserted(ErrorCodes::NotMaster,
+ str::stream() << "Not primary while cloning collection " << from << " to "
+ << to
+ << " (as capped)");
}
Database* const db = autoDb.getDb();
if (!db) {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::NamespaceNotFound,
- str::stream() << "database " << dbname << " not found"));
+ uasserted(ErrorCodes::NamespaceNotFound,
+ str::stream() << "database " << dbname << " not found");
}
Status status =
cloneCollectionAsCapped(opCtx, db, from.toString(), to.toString(), size, temp);
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
+ return true;
}
} cmdCloneCollectionAsCapped;
@@ -181,7 +178,8 @@ public:
return false;
}
- return CommandHelpers::appendCommandStatus(result, convertToCapped(opCtx, nss, size));
+ uassertStatusOK(convertToCapped(opCtx, nss, size));
+ return true;
}
} cmdConvertToCapped;
diff --git a/src/mongo/db/commands/compact.cpp b/src/mongo/db/commands/compact.cpp
index 83ddab63ce1..bffe66dd8c0 100644
--- a/src/mongo/db/commands/compact.cpp
+++ b/src/mongo/db/commands/compact.cpp
@@ -153,11 +153,9 @@ public:
// If db/collection does not exist, short circuit and return.
if (!collDB || !collection) {
if (view)
- return CommandHelpers::appendCommandStatus(
- result, {ErrorCodes::CommandNotSupportedOnView, "can't compact a view"});
+ uasserted(ErrorCodes::CommandNotSupportedOnView, "can't compact a view");
else
- return CommandHelpers::appendCommandStatus(
- result, {ErrorCodes::NamespaceNotFound, "collection does not exist"});
+ uasserted(ErrorCodes::NamespaceNotFound, "collection does not exist");
}
OldClientContext ctx(opCtx, nss.ns());
@@ -166,8 +164,7 @@ public:
log() << "compact " << nss.ns() << " begin, options: " << compactOptions;
StatusWith<CompactStats> status = collection->compact(opCtx, &compactOptions);
- if (!status.isOK())
- return CommandHelpers::appendCommandStatus(result, status.getStatus());
+ uassertStatusOK(status.getStatus());
if (status.getValue().corruptDocuments > 0)
result.append("invalidObjects", status.getValue().corruptDocuments);
diff --git a/src/mongo/db/commands/connection_status.cpp b/src/mongo/db/commands/connection_status.cpp
index 0d5a3f63980..bd71bcdbe0f 100644
--- a/src/mongo/db/commands/connection_status.cpp
+++ b/src/mongo/db/commands/connection_status.cpp
@@ -64,9 +64,7 @@ public:
bool showPrivileges;
Status status =
bsonExtractBooleanFieldWithDefault(cmdObj, "showPrivileges", false, &showPrivileges);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
BSONObjBuilder authInfo(result.subobjStart("authInfo"));
{
diff --git a/src/mongo/db/commands/copydb_start_commands.cpp b/src/mongo/db/commands/copydb_start_commands.cpp
index 00105a60a9f..fd3cb1c272a 100644
--- a/src/mongo/db/commands/copydb_start_commands.cpp
+++ b/src/mongo/db/commands/copydb_start_commands.cpp
@@ -125,9 +125,7 @@ public:
BSONElement mechanismElement;
Status status = bsonExtractField(cmdObj, saslCommandMechanismFieldName, &mechanismElement);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
BSONElement payloadElement;
status = bsonExtractField(cmdObj, saslCommandPayloadFieldName, &payloadElement);
@@ -146,7 +144,7 @@ public:
if (!authConn->runCommand(
fromDb, BSON("saslStart" << 1 << mechanismElement << payloadElement), ret)) {
authConn.reset();
- return CommandHelpers::appendCommandStatus(result, getStatusFromCommandResult(ret));
+ uassertStatusOK(getStatusFromCommandResult(ret));
}
CommandHelpers::filterCommandReplyForPassthrough(ret, &result);
diff --git a/src/mongo/db/commands/count_cmd.cpp b/src/mongo/db/commands/count_cmd.cpp
index 76a381d239e..45ad89b47b6 100644
--- a/src/mongo/db/commands/count_cmd.cpp
+++ b/src/mongo/db/commands/count_cmd.cpp
@@ -178,9 +178,7 @@ public:
const bool isExplain = false;
auto request = CountRequest::parseFromBSON(nss, cmdObj, isExplain);
- if (!request.isOK()) {
- return CommandHelpers::appendCommandStatus(result, request.getStatus());
- }
+ uassertStatusOK(request.getStatus());
// Check whether we are allowed to read from this node after acquiring our locks.
auto replCoord = repl::ReplicationCoordinator::get(opCtx);
@@ -192,9 +190,7 @@ public:
ctx.reset();
auto viewAggregation = request.getValue().asAggregationCommand();
- if (!viewAggregation.isOK()) {
- return CommandHelpers::appendCommandStatus(result, viewAggregation.getStatus());
- }
+ uassertStatusOK(viewAggregation.getStatus());
BSONObj aggResult = CommandHelpers::runCommandDirectly(
opCtx, OpMsgRequest::fromDBAndBody(dbname, std::move(viewAggregation.getValue())));
@@ -211,9 +207,7 @@ public:
auto statusWithPlanExecutor =
getExecutorCount(opCtx, collection, request.getValue(), false /*explain*/);
- if (!statusWithPlanExecutor.isOK()) {
- return CommandHelpers::appendCommandStatus(result, statusWithPlanExecutor.getStatus());
- }
+ uassertStatusOK(statusWithPlanExecutor.getStatus());
auto exec = std::move(statusWithPlanExecutor.getValue());
@@ -225,9 +219,7 @@ public:
}
Status execPlanStatus = exec->executePlan();
- if (!execPlanStatus.isOK()) {
- return CommandHelpers::appendCommandStatus(result, execPlanStatus);
- }
+ uassertStatusOK(execPlanStatus);
PlanSummaryStats summaryStats;
Explain::getSummaryStats(*exec, &summaryStats);
diff --git a/src/mongo/db/commands/create_indexes.cpp b/src/mongo/db/commands/create_indexes.cpp
index 0e08effe965..e36545cddba 100644
--- a/src/mongo/db/commands/create_indexes.cpp
+++ b/src/mongo/db/commands/create_indexes.cpp
@@ -240,8 +240,7 @@ public:
const NamespaceString ns(CommandHelpers::parseNsCollectionRequired(dbname, cmdObj));
Status status = userAllowedWriteNS(ns);
- if (!status.isOK())
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
// Disallow users from creating new indexes on config.transactions since the sessions
// code was optimized to not update indexes.
@@ -251,9 +250,7 @@ public:
auto specsWithStatus =
parseAndValidateIndexSpecs(opCtx, ns, cmdObj, serverGlobalParams.featureCompatibility);
- if (!specsWithStatus.isOK()) {
- return CommandHelpers::appendCommandStatus(result, specsWithStatus.getStatus());
- }
+ uassertStatusOK(specsWithStatus.getStatus());
auto specs = std::move(specsWithStatus.getValue());
// Index builds cannot currently handle lock interruption.
@@ -263,10 +260,8 @@ public:
// Do not use AutoGetOrCreateDb because we may relock the DbLock in mode IX.
Lock::DBLock dbLock(opCtx, ns.db(), MODE_X);
if (!repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, ns)) {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::NotMaster,
- str::stream() << "Not primary while creating indexes in " << ns.ns()));
+ uasserted(ErrorCodes::NotMaster,
+ str::stream() << "Not primary while creating indexes in " << ns.ns());
}
Database* db = DatabaseHolder::getDatabaseHolder().get(opCtx, ns.db());
@@ -281,14 +276,11 @@ public:
} else {
if (db->getViewCatalog()->lookup(opCtx, ns.ns())) {
errmsg = "Cannot create indexes on a view";
- return CommandHelpers::appendCommandStatus(
- result, {ErrorCodes::CommandNotSupportedOnView, errmsg});
+ uasserted(ErrorCodes::CommandNotSupportedOnView, errmsg);
}
status = userAllowedCreateNS(ns.db(), ns.coll());
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
writeConflictRetry(opCtx, kCommandName, ns.ns(), [&] {
WriteUnitOfWork wunit(opCtx);
@@ -301,9 +293,7 @@ public:
auto indexSpecsWithDefaults =
resolveCollectionDefaultProperties(opCtx, collection, std::move(specs));
- if (!indexSpecsWithDefaults.isOK()) {
- return CommandHelpers::appendCommandStatus(result, indexSpecsWithDefaults.getStatus());
- }
+ uassertStatusOK(indexSpecsWithDefaults.getStatus());
specs = std::move(indexSpecsWithDefaults.getValue());
const int numIndexesBefore = collection->getIndexCatalog()->numIndexesTotal(opCtx);
@@ -330,10 +320,7 @@ public:
const BSONObj& spec = specs[i];
if (spec["unique"].trueValue()) {
status = checkUniqueIndexConstraints(opCtx, ns, spec["key"].Obj());
-
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
}
}
@@ -348,11 +335,9 @@ public:
opCtx->recoveryUnit()->abandonSnapshot();
dbLock.relockWithMode(MODE_IX);
if (!repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, ns)) {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::NotMaster,
- str::stream() << "Not primary while creating background indexes in "
- << ns.ns()));
+ uasserted(ErrorCodes::NotMaster,
+ str::stream() << "Not primary while creating background indexes in "
+ << ns.ns());
}
}
diff --git a/src/mongo/db/commands/current_op.cpp b/src/mongo/db/commands/current_op.cpp
index a53d7876c2c..47e22ea8296 100644
--- a/src/mongo/db/commands/current_op.cpp
+++ b/src/mongo/db/commands/current_op.cpp
@@ -75,7 +75,7 @@ public:
return status;
}
- CommandHelpers::appendCommandStatus(responseBuilder, Status::OK());
+ CommandHelpers::appendSimpleCommandStatus(responseBuilder, true);
return CursorResponse::parseFromBSON(responseBuilder.obj());
}
diff --git a/src/mongo/db/commands/current_op_common.cpp b/src/mongo/db/commands/current_op_common.cpp
index 35bbadb32a6..de5af29560c 100644
--- a/src/mongo/db/commands/current_op_common.cpp
+++ b/src/mongo/db/commands/current_op_common.cpp
@@ -112,7 +112,7 @@ bool CurrentOpCommandBase::run(OperationContext* opCtx,
// Make any final custom additions to the response object.
appendToResponse(&result);
- return CommandHelpers::appendCommandStatus(result, Status::OK());
+ return true;
}
} // namespace mongo
diff --git a/src/mongo/db/commands/dbcommands.cpp b/src/mongo/db/commands/dbcommands.cpp
index 02a279e4653..9fdff86d86a 100644
--- a/src/mongo/db/commands/dbcommands.cpp
+++ b/src/mongo/db/commands/dbcommands.cpp
@@ -137,37 +137,33 @@ public:
// disallow dropping the config database
if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer &&
(dbname == NamespaceString::kConfigDb)) {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::IllegalOperation,
- "Cannot drop 'config' database if mongod started "
- "with --configsvr"));
+ uasserted(ErrorCodes::IllegalOperation,
+ "Cannot drop 'config' database if mongod started "
+ "with --configsvr");
}
if ((repl::ReplicationCoordinator::get(opCtx)->getReplicationMode() !=
repl::ReplicationCoordinator::modeNone) &&
(dbname == NamespaceString::kLocalDb)) {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::IllegalOperation,
- str::stream() << "Cannot drop '" << dbname
- << "' database while replication is active"));
+ uasserted(ErrorCodes::IllegalOperation,
+ str::stream() << "Cannot drop '" << dbname
+ << "' database while replication is active");
}
BSONElement e = cmdObj.firstElement();
int p = (int)e.number();
if (p != 1) {
- return CommandHelpers::appendCommandStatus(
- result, Status(ErrorCodes::IllegalOperation, "have to pass 1 as db parameter"));
+ uasserted(ErrorCodes::IllegalOperation, "have to pass 1 as db parameter");
}
Status status = dropDatabase(opCtx, dbname);
if (status == ErrorCodes::NamespaceNotFound) {
- return CommandHelpers::appendCommandStatus(result, Status::OK());
+ return true;
}
if (status.isOK()) {
result.append("dropped", dbname);
}
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
+ return true;
}
} cmdDropDatabase;
@@ -215,11 +211,9 @@ public:
auto db = DatabaseHolder::getDatabaseHolder().get(opCtx, dbname);
if (db) {
if (db->isDropPending(opCtx)) {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::DatabaseDropPending,
- str::stream() << "Cannot repair database " << dbname
- << " since it is pending being dropped."));
+ uasserted(ErrorCodes::DatabaseDropPending,
+ str::stream() << "Cannot repair database " << dbname
+ << " since it is pending being dropped.");
}
} else {
// If the name doesn't make an exact match, check for a case insensitive match.
@@ -259,7 +253,8 @@ public:
// Open database before returning
DatabaseHolder::getDatabaseHolder().openDb(opCtx, dbname);
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
+ return true;
}
} cmdRepairDatabase;
@@ -308,13 +303,13 @@ public:
return false;
}
- return CommandHelpers::appendCommandStatus(
- result,
+ uassertStatusOK(
dropCollection(opCtx,
nsToDrop,
result,
{},
DropCollectionSystemCollectionMode::kDisallowSystemCollectionDrops));
+ return true;
}
} cmdDrop;
@@ -362,24 +357,19 @@ public:
// Validate _id index spec and fill in missing fields.
if (auto idIndexElem = cmdObj["idIndex"]) {
if (cmdObj["viewOn"]) {
- return CommandHelpers::appendCommandStatus(
- result,
- {ErrorCodes::InvalidOptions,
- str::stream() << "'idIndex' is not allowed with 'viewOn': " << idIndexElem});
+ uasserted(ErrorCodes::InvalidOptions,
+ str::stream() << "'idIndex' is not allowed with 'viewOn': "
+ << idIndexElem);
}
if (cmdObj["autoIndexId"]) {
- return CommandHelpers::appendCommandStatus(
- result,
- {ErrorCodes::InvalidOptions,
- str::stream() << "'idIndex' is not allowed with 'autoIndexId': "
- << idIndexElem});
+ uasserted(ErrorCodes::InvalidOptions,
+ str::stream() << "'idIndex' is not allowed with 'autoIndexId': "
+ << idIndexElem);
}
if (idIndexElem.type() != BSONType::Object) {
- return CommandHelpers::appendCommandStatus(
- result,
- {ErrorCodes::TypeMismatch,
- str::stream() << "'idIndex' has to be a document: " << idIndexElem});
+ uasserted(ErrorCodes::TypeMismatch,
+ str::stream() << "'idIndex' has to be a document: " << idIndexElem);
}
auto idIndexSpec = idIndexElem.Obj();
@@ -393,16 +383,13 @@ public:
std::unique_ptr<CollatorInterface> defaultCollator;
if (auto collationElem = cmdObj["collation"]) {
if (collationElem.type() != BSONType::Object) {
- return CommandHelpers::appendCommandStatus(
- result,
- {ErrorCodes::TypeMismatch,
- str::stream() << "'collation' has to be a document: " << collationElem});
+ uasserted(ErrorCodes::TypeMismatch,
+ str::stream() << "'collation' has to be a document: "
+ << collationElem);
}
auto collatorStatus = CollatorFactoryInterface::get(opCtx->getServiceContext())
->makeFromBSON(collationElem.Obj());
- if (!collatorStatus.isOK()) {
- return CommandHelpers::appendCommandStatus(result, collatorStatus.getStatus());
- }
+ uassertStatusOK(collatorStatus.getStatus());
defaultCollator = std::move(collatorStatus.getValue());
}
idIndexSpec = uassertStatusOK(index_key_validate::validateIndexSpecCollation(
@@ -417,22 +404,20 @@ public:
idIndexCollator = std::move(collatorStatus.getValue());
}
if (!CollatorInterface::collatorsMatch(defaultCollator.get(), idIndexCollator.get())) {
- return CommandHelpers::appendCommandStatus(
- result,
- {ErrorCodes::BadValue,
- "'idIndex' must have the same collation as the collection."});
+ uasserted(ErrorCodes::BadValue,
+ "'idIndex' must have the same collation as the collection.");
}
// Remove "idIndex" field from command.
auto resolvedCmdObj = cmdObj.removeField("idIndex");
- return CommandHelpers::appendCommandStatus(
- result, createCollection(opCtx, dbname, resolvedCmdObj, idIndexSpec));
+ uassertStatusOK(createCollection(opCtx, dbname, resolvedCmdObj, idIndexSpec));
+ return true;
}
BSONObj idIndexSpec;
- return CommandHelpers::appendCommandStatus(
- result, createCollection(opCtx, dbname, cmdObj, idIndexSpec));
+ uassertStatusOK(createCollection(opCtx, dbname, cmdObj, idIndexSpec));
+ return true;
}
} cmdCreate;
@@ -569,10 +554,8 @@ public:
if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) {
warning() << "Internal error while reading " << ns;
- return CommandHelpers::appendCommandStatus(
- result,
- WorkingSetCommon::getMemberObjectStatus(obj).withContext(
- "Executor error while reading during dataSize command"));
+ uassertStatusOK(WorkingSetCommon::getMemberObjectStatus(obj).withContext(
+ "Executor error while reading during dataSize command"));
}
ostringstream os;
@@ -665,7 +648,8 @@ public:
const BSONObj& jsobj,
BSONObjBuilder& result) {
const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbname, jsobj));
- return CommandHelpers::appendCommandStatus(result, collMod(opCtx, nss, jsobj, &result));
+ uassertStatusOK(collMod(opCtx, nss, jsobj, &result));
+ return true;
}
} collectionModCommand;
diff --git a/src/mongo/db/commands/dbcommands_d.cpp b/src/mongo/db/commands/dbcommands_d.cpp
index 90ab6c70bfa..a323bd2b3d0 100644
--- a/src/mongo/db/commands/dbcommands_d.cpp
+++ b/src/mongo/db/commands/dbcommands_d.cpp
@@ -275,10 +275,8 @@ public:
}
if (PlanExecutor::DEAD == state || PlanExecutor::FAILURE == state) {
- return CommandHelpers::appendCommandStatus(
- result,
- WorkingSetCommon::getMemberObjectStatus(obj).withContext(
- "Executor error during filemd5 command"));
+ uassertStatusOK(WorkingSetCommon::getMemberObjectStatus(obj).withContext(
+ "Executor error during filemd5 command"));
}
if (partialOk)
diff --git a/src/mongo/db/commands/distinct.cpp b/src/mongo/db/commands/distinct.cpp
index 2f47166c6e4..8802e4875cb 100644
--- a/src/mongo/db/commands/distinct.cpp
+++ b/src/mongo/db/commands/distinct.cpp
@@ -181,9 +181,7 @@ public:
ctx.reset();
auto viewAggregation = parsedDistinct.asAggregationCommand();
- if (!viewAggregation.isOK()) {
- return CommandHelpers::appendCommandStatus(result, viewAggregation.getStatus());
- }
+ uassertStatusOK(viewAggregation.getStatus());
BSONObj aggResult = CommandHelpers::runCommandDirectly(
opCtx, OpMsgRequest::fromDBAndBody(dbname, std::move(viewAggregation.getValue())));
@@ -194,9 +192,7 @@ public:
Collection* const collection = ctx->getCollection();
auto executor = getExecutorDistinct(opCtx, collection, nss.ns(), &parsedDistinct);
- if (!executor.isOK()) {
- return CommandHelpers::appendCommandStatus(result, executor.getStatus());
- }
+ uassertStatusOK(executor.getStatus());
{
stdx::lock_guard<Client> lk(*opCtx->getClient());
@@ -247,10 +243,8 @@ public:
<< redact(PlanExecutor::statestr(state))
<< ", stats: " << redact(Explain::getWinningPlanStats(executor.getValue().get()));
- return CommandHelpers::appendCommandStatus(
- result,
- WorkingSetCommon::getMemberObjectStatus(obj).withContext(
- "Executor error during distinct command"));
+ uassertStatusOK(WorkingSetCommon::getMemberObjectStatus(obj).withContext(
+ "Executor error during distinct command"));
}
diff --git a/src/mongo/db/commands/do_txn_cmd.cpp b/src/mongo/db/commands/do_txn_cmd.cpp
index 15183f8524c..d9ab5c0df9e 100644
--- a/src/mongo/db/commands/do_txn_cmd.cpp
+++ b/src/mongo/db/commands/do_txn_cmd.cpp
@@ -145,9 +145,7 @@ public:
maybeDisableValidation.emplace(opCtx);
auto status = OplogApplicationChecks::checkOperationArray(cmdObj.firstElement());
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
// TODO (SERVER-30217): When a write concern is provided to the doTxn command, we
// normally wait on the OpTime of whichever operation successfully completed last. This is
diff --git a/src/mongo/db/commands/drop_indexes.cpp b/src/mongo/db/commands/drop_indexes.cpp
index 8d8fc318eff..269319f0c7a 100644
--- a/src/mongo/db/commands/drop_indexes.cpp
+++ b/src/mongo/db/commands/drop_indexes.cpp
@@ -89,7 +89,8 @@ public:
const BSONObj& jsobj,
BSONObjBuilder& result) {
const NamespaceString nss = CommandHelpers::parseNsCollectionRequired(dbname, jsobj);
- return CommandHelpers::appendCommandStatus(result, dropIndexes(opCtx, nss, jsobj, &result));
+ uassertStatusOK(dropIndexes(opCtx, nss, jsobj, &result));
+ return true;
}
} cmdDropIndexes;
@@ -130,11 +131,9 @@ public:
Collection* collection = autoDb.getDb()->getCollection(opCtx, toReIndexNss);
if (!collection) {
if (autoDb.getDb()->getViewCatalog()->lookup(opCtx, toReIndexNss.ns()))
- return CommandHelpers::appendCommandStatus(
- result, {ErrorCodes::CommandNotSupportedOnView, "can't re-index a view"});
+ uasserted(ErrorCodes::CommandNotSupportedOnView, "can't re-index a view");
else
- return CommandHelpers::appendCommandStatus(
- result, {ErrorCodes::NamespaceNotFound, "collection does not exist"});
+ uasserted(ErrorCodes::NamespaceNotFound, "collection does not exist");
}
BackgroundOperation::assertNoBgOpInProgForNs(toReIndexNss.ns());
@@ -197,16 +196,12 @@ public:
indexer = stdx::make_unique<MultiIndexBlock>(opCtx, collection);
swIndexesToRebuild = indexer->init(all);
- if (!swIndexesToRebuild.isOK()) {
- return CommandHelpers::appendCommandStatus(result, swIndexesToRebuild.getStatus());
- }
+ uassertStatusOK(swIndexesToRebuild.getStatus());
wunit.commit();
}
auto status = indexer->insertAllDocumentsInCollection();
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
{
WriteUnitOfWork wunit(opCtx);
diff --git a/src/mongo/db/commands/find_cmd.cpp b/src/mongo/db/commands/find_cmd.cpp
index 17dce3de227..2acf75442f6 100644
--- a/src/mongo/db/commands/find_cmd.cpp
+++ b/src/mongo/db/commands/find_cmd.cpp
@@ -229,9 +229,7 @@ public:
// Pass parseNs to makeFromFindCommand in case cmdObj does not have a UUID.
auto qrStatus = QueryRequest::makeFromFindCommand(
NamespaceString(parseNs(dbname, cmdObj)), cmdObj, isExplain);
- if (!qrStatus.isOK()) {
- return CommandHelpers::appendCommandStatus(result, qrStatus.getStatus());
- }
+ uassertStatusOK(qrStatus.getStatus());
auto replCoord = repl::ReplicationCoordinator::get(opCtx);
auto& qr = qrStatus.getValue();
@@ -240,9 +238,7 @@ public:
if (auto term = qr->getReplicationTerm()) {
Status status = replCoord->updateTerm(opCtx, *term);
// Note: updateTerm returns ok if term stayed the same.
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
}
// Acquire locks. If the query is on a view, we release our locks and convert the query
@@ -277,9 +273,7 @@ public:
expCtx,
extensionsCallback,
MatchExpressionParser::kAllowAllSpecialFeatures);
- if (!statusWithCQ.isOK()) {
- return CommandHelpers::appendCommandStatus(result, statusWithCQ.getStatus());
- }
+ uassertStatusOK(statusWithCQ.getStatus());
std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
if (ctx->getView()) {
@@ -290,19 +284,15 @@ public:
// necessary), if possible.
const auto& qr = cq->getQueryRequest();
auto viewAggregationCommand = qr.asAggregationCommand();
- if (!viewAggregationCommand.isOK())
- return CommandHelpers::appendCommandStatus(result,
- viewAggregationCommand.getStatus());
+ uassertStatusOK(viewAggregationCommand.getStatus());
BSONObj aggResult = CommandHelpers::runCommandDirectly(
opCtx,
OpMsgRequest::fromDBAndBody(dbname, std::move(viewAggregationCommand.getValue())));
auto status = getStatusFromCommandResult(aggResult);
if (status.code() == ErrorCodes::InvalidPipelineOperator) {
- return CommandHelpers::appendCommandStatus(
- result,
- {ErrorCodes::InvalidPipelineOperator,
- str::stream() << "Unsupported in view pipeline: " << status.reason()});
+ uasserted(ErrorCodes::InvalidPipelineOperator,
+ str::stream() << "Unsupported in view pipeline: " << status.reason());
}
result.resetToEmpty();
result.appendElements(aggResult);
@@ -313,9 +303,7 @@ public:
// Get the execution plan for the query.
auto statusWithPlanExecutor = getExecutorFind(opCtx, collection, nss, std::move(cq));
- if (!statusWithPlanExecutor.isOK()) {
- return CommandHelpers::appendCommandStatus(result, statusWithPlanExecutor.getStatus());
- }
+ uassertStatusOK(statusWithPlanExecutor.getStatus());
auto exec = std::move(statusWithPlanExecutor.getValue());
@@ -363,10 +351,8 @@ public:
error() << "Plan executor error during find command: " << PlanExecutor::statestr(state)
<< ", stats: " << redact(Explain::getWinningPlanStats(exec.get()));
- return CommandHelpers::appendCommandStatus(
- result,
- WorkingSetCommon::getMemberObjectStatus(obj).withContext(
- "Executor error during find command"));
+ uassertStatusOK(WorkingSetCommon::getMemberObjectStatus(obj).withContext(
+ "Executor error during find command"));
}
// Before saving the cursor, ensure that whatever plan we established happened with the
diff --git a/src/mongo/db/commands/fsync.cpp b/src/mongo/db/commands/fsync.cpp
index 62d25856e38..1af900475e7 100644
--- a/src/mongo/db/commands/fsync.cpp
+++ b/src/mongo/db/commands/fsync.cpp
@@ -185,7 +185,7 @@ public:
if (!status.isOK()) {
releaseLock();
warning() << "fsyncLock failed. Lock count reset to 0. Status: " << status;
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
}
}
diff --git a/src/mongo/db/commands/generic_servers.cpp b/src/mongo/db/commands/generic_servers.cpp
index c3612a49a26..40f47e42465 100644
--- a/src/mongo/db/commands/generic_servers.cpp
+++ b/src/mongo/db/commands/generic_servers.cpp
@@ -236,13 +236,11 @@ public:
BSONObjBuilder& result) {
BSONElement val = cmdObj.firstElement();
if (val.type() != String) {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::TypeMismatch,
- str::stream() << "Argument to getLog must be of type String; found "
- << val.toString(false)
- << " of type "
- << typeName(val.type())));
+ uasserted(ErrorCodes::TypeMismatch,
+ str::stream() << "Argument to getLog must be of type String; found "
+ << val.toString(false)
+ << " of type "
+ << typeName(val.type()));
}
string p = val.String();
@@ -306,13 +304,10 @@ public:
BSONObjBuilder& result) {
std::string logName;
Status status = bsonExtractStringField(cmdObj, "clearLog", &logName);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
if (logName != "global") {
- return CommandHelpers::appendCommandStatus(
- result, Status(ErrorCodes::InvalidOptions, "Only the 'global' log can be cleared"));
+ uasserted(ErrorCodes::InvalidOptions, "Only the 'global' log can be cleared");
}
RamLog* ramlog = RamLog::getIfExists(logName);
invariant(ramlog);
diff --git a/src/mongo/db/commands/geo_near_cmd.cpp b/src/mongo/db/commands/geo_near_cmd.cpp
index f76dbef2a67..7385f6645f5 100644
--- a/src/mongo/db/commands/geo_near_cmd.cpp
+++ b/src/mongo/db/commands/geo_near_cmd.cpp
@@ -176,7 +176,7 @@ public:
Status collationEltStatus =
bsonExtractTypedField(cmdObj, "collation", BSONType::Object, &collationElt);
if (!collationEltStatus.isOK() && (collationEltStatus != ErrorCodes::NoSuchKey)) {
- return CommandHelpers::appendCommandStatus(result, collationEltStatus);
+ uassertStatusOK(collationEltStatus);
}
if (collationEltStatus.isOK()) {
collation = collationElt.Obj();
@@ -304,10 +304,8 @@ public:
log() << "Plan executor error during geoNear command: " << PlanExecutor::statestr(state)
<< ", stats: " << redact(Explain::getWinningPlanStats(exec.get()));
- return CommandHelpers::appendCommandStatus(
- result,
- WorkingSetCommon::getMemberObjectStatus(currObj).withContext(
- "Executor error during geoNear command"));
+ uassertStatusOK(WorkingSetCommon::getMemberObjectStatus(currObj).withContext(
+ "Executor error during geoNear command"));
}
PlanSummaryStats summary;
diff --git a/src/mongo/db/commands/get_last_error.cpp b/src/mongo/db/commands/get_last_error.cpp
index 2cb45c3b445..abeb8ed2220 100644
--- a/src/mongo/db/commands/get_last_error.cpp
+++ b/src/mongo/db/commands/get_last_error.cpp
@@ -180,12 +180,10 @@ public:
return CommandHelpers::appendCommandStatusNoThrow(result, status);
}
} else {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::TypeMismatch,
- str::stream() << "Expected \"wOpTime\" field in getLastError to "
- "have type Date, Timestamp, or OpTime but found type "
- << typeName(opTimeElement.type())));
+ uasserted(ErrorCodes::TypeMismatch,
+ str::stream() << "Expected \"wOpTime\" field in getLastError to "
+ "have type Date, Timestamp, or OpTime but found type "
+ << typeName(opTimeElement.type()));
}
diff --git a/src/mongo/db/commands/getmore_cmd.cpp b/src/mongo/db/commands/getmore_cmd.cpp
index 9f2904a971d..6047649475f 100644
--- a/src/mongo/db/commands/getmore_cmd.cpp
+++ b/src/mongo/db/commands/getmore_cmd.cpp
@@ -225,9 +225,7 @@ public:
auto replCoord = repl::ReplicationCoordinator::get(opCtx);
Status status = replCoord->updateTerm(opCtx, *request.term);
// Note: updateTerm returns ok if term stayed the same.
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
}
// Cursors come in one of two flavors:
@@ -273,18 +271,13 @@ public:
Collection* collection = readLock->getCollection();
if (!collection) {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::OperationFailed,
- "collection dropped between getMore calls"));
+ uasserted(ErrorCodes::OperationFailed, "collection dropped between getMore calls");
}
cursorManager = collection->getCursorManager();
}
auto ccPin = cursorManager->pinCursor(opCtx, request.cursorid);
- if (!ccPin.isOK()) {
- return CommandHelpers::appendCommandStatus(result, ccPin.getStatus());
- }
+ uassertStatusOK(ccPin.getStatus());
ClientCursor* cursor = ccPin.getValue().getCursor();
@@ -315,20 +308,16 @@ public:
// authenticated in order to run getMore on the cursor.
if (!AuthorizationSession::get(opCtx->getClient())
->isCoauthorizedWith(cursor->getAuthenticatedUsers())) {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::Unauthorized,
- str::stream() << "cursor id " << request.cursorid
- << " was not created by the authenticated user"));
+ uasserted(ErrorCodes::Unauthorized,
+ str::stream() << "cursor id " << request.cursorid
+ << " was not created by the authenticated user");
}
if (request.nss != cursor->nss()) {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::Unauthorized,
- str::stream() << "Requested getMore on namespace '" << request.nss.ns()
- << "', but cursor belongs to a different namespace "
- << cursor->nss().ns()));
+ uasserted(ErrorCodes::Unauthorized,
+ str::stream() << "Requested getMore on namespace '" << request.nss.ns()
+ << "', but cursor belongs to a different namespace "
+ << cursor->nss().ns());
}
// Ensure the lsid and txnNumber of the getMore match that of the originating command.
@@ -336,11 +325,9 @@ public:
validateTxnNumber(opCtx, request, cursor);
if (request.nss.isOplog() && MONGO_FAIL_POINT(rsStopGetMoreCmd)) {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::CommandFailed,
- str::stream() << "getMore on " << request.nss.ns()
- << " rejected due to active fail point rsStopGetMoreCmd"));
+ uasserted(ErrorCodes::CommandFailed,
+ str::stream() << "getMore on " << request.nss.ns()
+ << " rejected due to active fail point rsStopGetMoreCmd");
}
// Validation related to awaitData.
@@ -351,7 +338,7 @@ public:
if (request.awaitDataTimeout && !cursor->isAwaitData()) {
Status status(ErrorCodes::BadValue,
"cannot set maxTimeMS on getMore command for a non-awaitData cursor");
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
}
// On early return, get rid of the cursor.
@@ -441,9 +428,7 @@ public:
}
Status batchStatus = generateBatch(opCtx, cursor, request, &nextBatch, &state, &numResults);
- if (!batchStatus.isOK()) {
- return CommandHelpers::appendCommandStatus(result, batchStatus);
- }
+ uassertStatusOK(batchStatus);
PlanSummaryStats postExecutionStats;
Explain::getSummaryStats(*exec, &postExecutionStats);
@@ -506,9 +491,7 @@ public:
globalOpCounters.gotGetMore();
StatusWith<GetMoreRequest> parsedRequest = GetMoreRequest::parseFromBSON(dbname, cmdObj);
- if (!parsedRequest.isOK()) {
- return CommandHelpers::appendCommandStatus(result, parsedRequest.getStatus());
- }
+ uassertStatusOK(parsedRequest.getStatus());
auto request = parsedRequest.getValue();
return runParsed(opCtx, request.nss, request, cmdObj, result);
}
diff --git a/src/mongo/db/commands/group_cmd.cpp b/src/mongo/db/commands/group_cmd.cpp
index de3811cc6fa..77e5e8e62b5 100644
--- a/src/mongo/db/commands/group_cmd.cpp
+++ b/src/mongo/db/commands/group_cmd.cpp
@@ -156,17 +156,13 @@ private:
GroupRequest groupRequest;
Status parseRequestStatus = _parseRequest(dbname, cmdObj, &groupRequest);
- if (!parseRequestStatus.isOK()) {
- return CommandHelpers::appendCommandStatus(result, parseRequestStatus);
- }
+ uassertStatusOK(parseRequestStatus);
AutoGetCollectionForReadCommand ctx(opCtx, groupRequest.ns);
Collection* coll = ctx.getCollection();
auto statusWithPlanExecutor = getExecutorGroup(opCtx, coll, groupRequest);
- if (!statusWithPlanExecutor.isOK()) {
- return CommandHelpers::appendCommandStatus(result, statusWithPlanExecutor.getStatus());
- }
+ uassertStatusOK(statusWithPlanExecutor.getStatus());
auto planExecutor = std::move(statusWithPlanExecutor.getValue());
@@ -182,10 +178,8 @@ private:
if (PlanExecutor::ADVANCED != state) {
invariant(PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state);
- return CommandHelpers::appendCommandStatus(
- result,
- WorkingSetCommon::getMemberObjectStatus(retval).withContext(
- "Plan executor error during group command"));
+ uassertStatusOK(WorkingSetCommon::getMemberObjectStatus(retval).withContext(
+ "Plan executor error during group command"));
}
invariant(planExecutor->isEOF());
diff --git a/src/mongo/db/commands/index_filter_commands.cpp b/src/mongo/db/commands/index_filter_commands.cpp
index 203152d5fec..6b1407d29ac 100644
--- a/src/mongo/db/commands/index_filter_commands.cpp
+++ b/src/mongo/db/commands/index_filter_commands.cpp
@@ -121,7 +121,8 @@ bool IndexFilterCommand::run(OperationContext* opCtx,
BSONObjBuilder& result) {
const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbname, cmdObj));
Status status = runIndexFilterCommand(opCtx, nss.ns(), cmdObj, &result);
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
+ return true;
}
diff --git a/src/mongo/db/commands/kill_all_sessions_by_pattern_command.cpp b/src/mongo/db/commands/kill_all_sessions_by_pattern_command.cpp
index 925695e9b2d..cebc66d0cc4 100644
--- a/src/mongo/db/commands/kill_all_sessions_by_pattern_command.cpp
+++ b/src/mongo/db/commands/kill_all_sessions_by_pattern_command.cpp
@@ -101,10 +101,8 @@ public:
for (const auto& pattern : ksc.getKillAllSessionsByPattern()) {
if (pattern.getUsers() || pattern.getRoles()) {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::Unauthorized,
- "Not authorized to impersonate in killAllSessionsByPattern"));
+ uasserted(ErrorCodes::Unauthorized,
+ "Not authorized to impersonate in killAllSessionsByPattern");
}
}
}
@@ -113,8 +111,8 @@ public:
KillAllSessionsByPatternSet patterns{ksc.getKillAllSessionsByPattern().begin(),
ksc.getKillAllSessionsByPattern().end()};
- return CommandHelpers::appendCommandStatus(result,
- killSessionsCmdHelper(opCtx, result, patterns));
+ uassertStatusOK(killSessionsCmdHelper(opCtx, result, patterns));
+ return true;
}
} killAllSessionsByPatternCommand;
diff --git a/src/mongo/db/commands/kill_all_sessions_command.cpp b/src/mongo/db/commands/kill_all_sessions_command.cpp
index 624d3192968..80b37071e24 100644
--- a/src/mongo/db/commands/kill_all_sessions_command.cpp
+++ b/src/mongo/db/commands/kill_all_sessions_command.cpp
@@ -101,8 +101,8 @@ public:
}
}
- return CommandHelpers::appendCommandStatus(result,
- killSessionsCmdHelper(opCtx, result, patterns));
+ uassertStatusOK(killSessionsCmdHelper(opCtx, result, patterns));
+ return true;
}
} killAllSessionsCommand;
diff --git a/src/mongo/db/commands/kill_sessions_command.cpp b/src/mongo/db/commands/kill_sessions_command.cpp
index 2afcc1588d4..18623529346 100644
--- a/src/mongo/db/commands/kill_sessions_command.cpp
+++ b/src/mongo/db/commands/kill_sessions_command.cpp
@@ -127,8 +127,8 @@ public:
}
}
- return CommandHelpers::appendCommandStatus(result,
- killSessionsCmdHelper(opCtx, result, patterns));
+ uassertStatusOK(killSessionsCmdHelper(opCtx, result, patterns));
+ return true;
}
} killSessionsCommand;
diff --git a/src/mongo/db/commands/killcursors_common.cpp b/src/mongo/db/commands/killcursors_common.cpp
index 17933cf2cad..06928f53eab 100644
--- a/src/mongo/db/commands/killcursors_common.cpp
+++ b/src/mongo/db/commands/killcursors_common.cpp
@@ -70,9 +70,7 @@ bool KillCursorsCmdBase::run(OperationContext* opCtx,
const BSONObj& cmdObj,
BSONObjBuilder& result) {
auto statusWithRequest = KillCursorsRequest::parseFromBSON(dbname, cmdObj);
- if (!statusWithRequest.isOK()) {
- return CommandHelpers::appendCommandStatus(result, statusWithRequest.getStatus());
- }
+ uassertStatusOK(statusWithRequest.getStatus());
auto killCursorsRequest = std::move(statusWithRequest.getValue());
std::vector<CursorId> cursorsKilled;
diff --git a/src/mongo/db/commands/list_collections.cpp b/src/mongo/db/commands/list_collections.cpp
index d943086439e..dce39421301 100644
--- a/src/mongo/db/commands/list_collections.cpp
+++ b/src/mongo/db/commands/list_collections.cpp
@@ -253,17 +253,14 @@ public:
BSONElement filterElt = jsobj["filter"];
if (!filterElt.eoo()) {
if (filterElt.type() != mongo::Object) {
- return CommandHelpers::appendCommandStatus(
- result, Status(ErrorCodes::BadValue, "\"filter\" must be an object"));
+ uasserted(ErrorCodes::BadValue, "\"filter\" must be an object");
}
// The collator is null because collection objects are compared using binary comparison.
const CollatorInterface* collator = nullptr;
boost::intrusive_ptr<ExpressionContext> expCtx(new ExpressionContext(opCtx, collator));
StatusWithMatchExpression statusWithMatcher =
MatchExpressionParser::parse(filterElt.Obj(), std::move(expCtx));
- if (!statusWithMatcher.isOK()) {
- return CommandHelpers::appendCommandStatus(result, statusWithMatcher.getStatus());
- }
+ uassertStatusOK(statusWithMatcher.getStatus());
matcher = std::move(statusWithMatcher.getValue());
}
@@ -271,19 +268,14 @@ public:
long long batchSize;
Status parseCursorStatus =
CursorRequest::parseCommandCursorOptions(jsobj, defaultBatchSize, &batchSize);
- if (!parseCursorStatus.isOK()) {
- return CommandHelpers::appendCommandStatus(result, parseCursorStatus);
- }
+ uassertStatusOK(parseCursorStatus);
// Check for 'includePendingDrops' flag. The default is to not include drop-pending
// collections.
bool includePendingDrops;
Status status = bsonExtractBooleanFieldWithDefault(
jsobj, "includePendingDrops", false, &includePendingDrops);
-
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
AutoGetDb autoDb(opCtx, dbname, MODE_IS);
@@ -331,9 +323,7 @@ public:
auto statusWithPlanExecutor = PlanExecutor::make(
opCtx, std::move(ws), std::move(root), cursorNss, PlanExecutor::NO_YIELD);
- if (!statusWithPlanExecutor.isOK()) {
- return CommandHelpers::appendCommandStatus(result, statusWithPlanExecutor.getStatus());
- }
+ uassertStatusOK(statusWithPlanExecutor.getStatus());
auto exec = std::move(statusWithPlanExecutor.getValue());
BSONArrayBuilder firstBatch;
diff --git a/src/mongo/db/commands/list_databases.cpp b/src/mongo/db/commands/list_databases.cpp
index 3c93304a370..5643466a32b 100644
--- a/src/mongo/db/commands/list_databases.cpp
+++ b/src/mongo/db/commands/list_databases.cpp
@@ -92,11 +92,10 @@ public:
std::unique_ptr<MatchExpression> filter;
if (auto filterElt = jsobj[kFilterField]) {
if (filterElt.type() != BSONType::Object) {
- return CommandHelpers::appendCommandStatus(
- result,
- {ErrorCodes::TypeMismatch,
- str::stream() << "Field '" << kFilterField << "' must be of type Object in: "
- << jsobj});
+ uasserted(ErrorCodes::TypeMismatch,
+ str::stream() << "Field '" << kFilterField
+ << "' must be of type Object in: "
+ << jsobj);
}
// The collator is null because database metadata objects are compared using simple
// binary comparison.
@@ -104,9 +103,7 @@ public:
boost::intrusive_ptr<ExpressionContext> expCtx(new ExpressionContext(opCtx, collator));
auto statusWithMatcher =
MatchExpressionParser::parse(filterElt.Obj(), std::move(expCtx));
- if (!statusWithMatcher.isOK()) {
- return CommandHelpers::appendCommandStatus(result, statusWithMatcher.getStatus());
- }
+ uassertStatusOK(statusWithMatcher.getStatus());
filter = std::move(statusWithMatcher.getValue());
}
bool nameOnly = jsobj[kNameOnlyField].trueValue();
diff --git a/src/mongo/db/commands/list_indexes.cpp b/src/mongo/db/commands/list_indexes.cpp
index 19993d6084b..dce38f51fa5 100644
--- a/src/mongo/db/commands/list_indexes.cpp
+++ b/src/mongo/db/commands/list_indexes.cpp
@@ -165,9 +165,7 @@ public:
auto statusWithPlanExecutor = PlanExecutor::make(
opCtx, std::move(ws), std::move(root), cursorNss, PlanExecutor::NO_YIELD);
- if (!statusWithPlanExecutor.isOK()) {
- return CommandHelpers::appendCommandStatus(result, statusWithPlanExecutor.getStatus());
- }
+ uassertStatusOK(statusWithPlanExecutor.getStatus());
auto exec = std::move(statusWithPlanExecutor.getValue());
BSONArrayBuilder firstBatch;
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index c743a951ba5..36345d98854 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -1414,9 +1414,7 @@ public:
auto client = opCtx->getClient();
if (client->isInDirectClient()) {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::IllegalOperation, "Cannot run mapReduce command from eval()"));
+ uasserted(ErrorCodes::IllegalOperation, "Cannot run mapReduce command from eval()");
}
auto curOp = CurOp::get(opCtx);
@@ -1442,10 +1440,8 @@ public:
try {
State state(opCtx, config);
if (!state.sourceExists()) {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::NamespaceNotFound,
- str::stream() << "namespace does not exist: " << config.nss.ns()));
+ uasserted(ErrorCodes::NamespaceNotFound,
+ str::stream() << "namespace does not exist: " << config.nss.ns());
}
state.init();
@@ -1580,9 +1576,7 @@ public:
scopedAutoDb.reset(new AutoGetDb(opCtx, config.nss.db(), MODE_S));
auto restoreStatus = exec->restoreState();
- if (!restoreStatus.isOK()) {
- return CommandHelpers::appendCommandStatus(result, restoreStatus);
- }
+ uassertStatusOK(restoreStatus);
reduceTime += t.micros();
@@ -1596,11 +1590,9 @@ public:
}
if (PlanExecutor::DEAD == execState || PlanExecutor::FAILURE == execState) {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::OperationFailed,
- str::stream() << "Executor error during mapReduce command: "
- << WorkingSetCommon::toStatusString(o)));
+ uasserted(ErrorCodes::OperationFailed,
+ str::stream() << "Executor error during mapReduce command: "
+ << WorkingSetCommon::toStatusString(o));
}
// Record the indexes used by the PlanExecutor.
@@ -1730,11 +1722,9 @@ public:
const BSONObj& cmdObj,
BSONObjBuilder& result) {
if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::CommandNotSupported,
- str::stream() << "Can not execute mapReduce with output database " << dbname
- << " which lives on config servers"));
+ uasserted(ErrorCodes::CommandNotSupported,
+ str::stream() << "Can not execute mapReduce with output database " << dbname
+ << " which lives on config servers");
}
// Don't let any lock acquisitions get interrupted.
@@ -1809,10 +1799,7 @@ public:
if (config.outputOptions.outType != Config::OutputType::INMEMORY) {
auto outRoutingInfoStatus = Grid::get(opCtx)->catalogCache()->getCollectionRoutingInfo(
opCtx, config.outputOptions.finalNamespace);
- if (!outRoutingInfoStatus.isOK()) {
- return CommandHelpers::appendCommandStatus(result,
- outRoutingInfoStatus.getStatus());
- }
+ uassertStatusOK(outRoutingInfoStatus.getStatus());
if (auto cm = outRoutingInfoStatus.getValue().cm()) {
// Fetch result from other shards 1 chunk at a time. It would be better to do just
diff --git a/src/mongo/db/commands/oplog_note.cpp b/src/mongo/db/commands/oplog_note.cpp
index 8cd038204d4..09a32cc0f6e 100644
--- a/src/mongo/db/commands/oplog_note.cpp
+++ b/src/mongo/db/commands/oplog_note.cpp
@@ -119,17 +119,13 @@ public:
BSONObjBuilder& result) {
auto replCoord = repl::ReplicationCoordinator::get(opCtx);
if (!replCoord->isReplEnabled()) {
- return CommandHelpers::appendCommandStatus(
- result,
- {ErrorCodes::NoReplicationEnabled,
- "Must have replication set up to run \"appendOplogNote\""});
+ uasserted(ErrorCodes::NoReplicationEnabled,
+ "Must have replication set up to run \"appendOplogNote\"");
}
BSONElement dataElement;
auto dataStatus = bsonExtractTypedField(cmdObj, "data", Object, &dataElement);
- if (!dataStatus.isOK()) {
- return CommandHelpers::appendCommandStatus(result, dataStatus);
- }
+ uassertStatusOK(dataStatus);
Timestamp maxClusterTime;
auto maxClusterTimeStatus =
@@ -137,24 +133,23 @@ public:
if (!maxClusterTimeStatus.isOK()) {
if (maxClusterTimeStatus == ErrorCodes::NoSuchKey) { // no need to use maxClusterTime
- return CommandHelpers::appendCommandStatus(
- result, _performNoopWrite(opCtx, dataElement.Obj(), "appendOpLogNote"));
+ uassertStatusOK(_performNoopWrite(opCtx, dataElement.Obj(), "appendOpLogNote"));
+ return true;
}
- return CommandHelpers::appendCommandStatus(result, maxClusterTimeStatus);
+ uassertStatusOK(maxClusterTimeStatus);
}
auto lastAppliedOpTime = replCoord->getMyLastAppliedOpTime().getTimestamp();
if (maxClusterTime > lastAppliedOpTime) {
- return CommandHelpers::appendCommandStatus(
- result, _performNoopWrite(opCtx, dataElement.Obj(), "appendOpLogNote"));
+ uassertStatusOK(_performNoopWrite(opCtx, dataElement.Obj(), "appendOpLogNote"));
} else {
std::stringstream ss;
ss << "Requested maxClusterTime " << LogicalTime(maxClusterTime).toString()
<< " is less or equal to the last primary OpTime: "
<< LogicalTime(lastAppliedOpTime).toString();
- return CommandHelpers::appendCommandStatus(result,
- {ErrorCodes::StaleClusterTime, ss.str()});
+ uasserted(ErrorCodes::StaleClusterTime, ss.str());
}
+ return true;
}
};
diff --git a/src/mongo/db/commands/pipeline_command.cpp b/src/mongo/db/commands/pipeline_command.cpp
index aac860e8855..24142d7da5d 100644
--- a/src/mongo/db/commands/pipeline_command.cpp
+++ b/src/mongo/db/commands/pipeline_command.cpp
@@ -92,13 +92,9 @@ public:
const auto aggregationRequest =
uassertStatusOK(AggregationRequest::parseFromBSON(dbname, cmdObj, boost::none));
- return CommandHelpers::appendCommandStatus(
- result,
- runAggregate(opCtx,
- aggregationRequest.getNamespaceString(),
- aggregationRequest,
- cmdObj,
- result));
+ uassertStatusOK(runAggregate(
+ opCtx, aggregationRequest.getNamespaceString(), aggregationRequest, cmdObj, result));
+ return true;
}
Status explain(OperationContext* opCtx,
diff --git a/src/mongo/db/commands/plan_cache_commands.cpp b/src/mongo/db/commands/plan_cache_commands.cpp
index e5a5c193368..703526343e7 100644
--- a/src/mongo/db/commands/plan_cache_commands.cpp
+++ b/src/mongo/db/commands/plan_cache_commands.cpp
@@ -116,7 +116,8 @@ bool PlanCacheCommand::run(OperationContext* opCtx,
BSONObjBuilder& result) {
const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbname, cmdObj));
Status status = runPlanCacheCommand(opCtx, nss.ns(), cmdObj, &result);
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
+ return true;
}
diff --git a/src/mongo/db/commands/reap_logical_session_cache_now.cpp b/src/mongo/db/commands/reap_logical_session_cache_now.cpp
index adb9b15e897..05b1ad665e1 100644
--- a/src/mongo/db/commands/reap_logical_session_cache_now.cpp
+++ b/src/mongo/db/commands/reap_logical_session_cache_now.cpp
@@ -76,9 +76,7 @@ public:
auto client = opCtx->getClient();
auto res = cache->reapNow(client);
- if (!res.isOK()) {
- return CommandHelpers::appendCommandStatus(result, res);
- }
+ uassertStatusOK(res);
return true;
}
diff --git a/src/mongo/db/commands/refresh_logical_session_cache_now.cpp b/src/mongo/db/commands/refresh_logical_session_cache_now.cpp
index f00adb6afb0..4cb54b0c823 100644
--- a/src/mongo/db/commands/refresh_logical_session_cache_now.cpp
+++ b/src/mongo/db/commands/refresh_logical_session_cache_now.cpp
@@ -77,9 +77,7 @@ public:
auto client = opCtx->getClient();
auto res = cache->refreshNow(client);
- if (!res.isOK()) {
- return CommandHelpers::appendCommandStatus(result, res);
- }
+ uassertStatusOK(res);
return true;
}
diff --git a/src/mongo/db/commands/refresh_sessions_command.cpp b/src/mongo/db/commands/refresh_sessions_command.cpp
index 09882f10fe7..c05850eff79 100644
--- a/src/mongo/db/commands/refresh_sessions_command.cpp
+++ b/src/mongo/db/commands/refresh_sessions_command.cpp
@@ -81,9 +81,7 @@ public:
auto cmd = RefreshSessionsCmdFromClient::parse(ctx, cmdObj);
auto res =
LogicalSessionCache::get(opCtx->getServiceContext())->refreshSessions(opCtx, cmd);
- if (!res.isOK()) {
- return CommandHelpers::appendCommandStatus(result, res);
- }
+ uassertStatusOK(res);
return true;
}
diff --git a/src/mongo/db/commands/refresh_sessions_command_internal.cpp b/src/mongo/db/commands/refresh_sessions_command_internal.cpp
index 8f942feb1c4..e5529754fa9 100644
--- a/src/mongo/db/commands/refresh_sessions_command_internal.cpp
+++ b/src/mongo/db/commands/refresh_sessions_command_internal.cpp
@@ -78,9 +78,7 @@ public:
auto cmd = RefreshSessionsCmdFromClusterMember::parse(ctx, cmdObj);
auto res =
LogicalSessionCache::get(opCtx->getServiceContext())->refreshSessions(opCtx, cmd);
- if (!res.isOK()) {
- return CommandHelpers::appendCommandStatus(result, res);
- }
+ uassertStatusOK(res);
return true;
}
diff --git a/src/mongo/db/commands/rename_collection_cmd.cpp b/src/mongo/db/commands/rename_collection_cmd.cpp
index e152a1ae7b4..2232104c44e 100644
--- a/src/mongo/db/commands/rename_collection_cmd.cpp
+++ b/src/mongo/db/commands/rename_collection_cmd.cpp
@@ -149,19 +149,17 @@ public:
}
if (source.isServerConfigurationCollection()) {
- CommandHelpers::appendCommandStatus(result,
- Status(ErrorCodes::IllegalOperation,
- "renaming the server configuration "
- "collection (admin.system.version) is not "
- "allowed"));
- return false;
+ uasserted(ErrorCodes::IllegalOperation,
+ "renaming the server configuration "
+ "collection (admin.system.version) is not "
+ "allowed");
}
RenameCollectionOptions options;
options.dropTarget = cmdObj["dropTarget"].trueValue();
options.stayTemp = cmdObj["stayTemp"].trueValue();
- return CommandHelpers::appendCommandStatus(
- result, renameCollection(opCtx, source, target, options));
+ uassertStatusOK(renameCollection(opCtx, source, target, options));
+ return true;
}
} cmdrenamecollection;
diff --git a/src/mongo/db/commands/repair_cursor.cpp b/src/mongo/db/commands/repair_cursor.cpp
index f8d3b058d2d..8df85a45c19 100644
--- a/src/mongo/db/commands/repair_cursor.cpp
+++ b/src/mongo/db/commands/repair_cursor.cpp
@@ -77,14 +77,12 @@ public:
Collection* collection = ctx.getCollection();
if (!collection) {
- return CommandHelpers::appendCommandStatus(
- result, Status(ErrorCodes::NamespaceNotFound, "ns does not exist: " + ns.ns()));
+ uasserted(ErrorCodes::NamespaceNotFound, "ns does not exist: " + ns.ns());
}
auto cursor = collection->getRecordStore()->getCursorForRepair(opCtx);
if (!cursor) {
- return CommandHelpers::appendCommandStatus(
- result, Status(ErrorCodes::CommandNotSupported, "repair iterator not supported"));
+ uasserted(ErrorCodes::CommandNotSupported, "repair iterator not supported");
}
std::unique_ptr<WorkingSet> ws(new WorkingSet());
diff --git a/src/mongo/db/commands/resize_oplog.cpp b/src/mongo/db/commands/resize_oplog.cpp
index 726cf043b5d..d743e57cac3 100644
--- a/src/mongo/db/commands/resize_oplog.cpp
+++ b/src/mongo/db/commands/resize_oplog.cpp
@@ -89,40 +89,32 @@ public:
Lock::GlobalWrite global(opCtx);
Database* database = DatabaseHolder::getDatabaseHolder().get(opCtx, nss.db());
if (!database) {
- return CommandHelpers::appendCommandStatus(
- result, Status(ErrorCodes::NamespaceNotFound, "database local does not exist"));
+ uasserted(ErrorCodes::NamespaceNotFound, "database local does not exist");
}
Collection* coll = database->getCollection(opCtx, nss);
if (!coll) {
- return CommandHelpers::appendCommandStatus(
- result, Status(ErrorCodes::NamespaceNotFound, "oplog does not exist"));
+ uasserted(ErrorCodes::NamespaceNotFound, "oplog does not exist");
}
if (!coll->isCapped()) {
- return CommandHelpers::appendCommandStatus(
- result, Status(ErrorCodes::IllegalOperation, "oplog isn't capped"));
+ uasserted(ErrorCodes::IllegalOperation, "oplog isn't capped");
}
if (!jsobj["size"].isNumber()) {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::InvalidOptions, "invalid size field, size should be a number"));
+ uasserted(ErrorCodes::InvalidOptions, "invalid size field, size should be a number");
}
long long sizeMb = jsobj["size"].numberLong();
long long size = sizeMb * 1024 * 1024;
if (sizeMb < 990L) {
- return CommandHelpers::appendCommandStatus(
- result, Status(ErrorCodes::InvalidOptions, "oplog size should be 990MB at least"));
+ uasserted(ErrorCodes::InvalidOptions, "oplog size should be 990MB at least");
}
WriteUnitOfWork wunit(opCtx);
Status status = coll->getRecordStore()->updateCappedSize(opCtx, size);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
CollectionCatalogEntry* entry = coll->getCatalogEntry();
entry->updateCappedSize(opCtx, size);
wunit.commit();
LOG(0) << "replSetResizeOplog success, currentSize:" << size;
- return CommandHelpers::appendCommandStatus(result, Status::OK());
+ return true;
}
} cmdReplSetResizeOplog;
}
diff --git a/src/mongo/db/commands/restart_catalog_command.cpp b/src/mongo/db/commands/restart_catalog_command.cpp
index bfa9a27b22c..c1671eaded5 100644
--- a/src/mongo/db/commands/restart_catalog_command.cpp
+++ b/src/mongo/db/commands/restart_catalog_command.cpp
@@ -97,11 +97,9 @@ public:
for (auto&& dbName : allDbs) {
const auto db = DatabaseHolder::getDatabaseHolder().get(opCtx, dbName);
if (db->isDropPending(opCtx)) {
- return CommandHelpers::appendCommandStatus(
- result,
- {ErrorCodes::DatabaseDropPending,
- str::stream() << "cannot restart the catalog because database " << dbName
- << " is pending removal"});
+ uasserted(ErrorCodes::DatabaseDropPending,
+ str::stream() << "cannot restart the catalog because database " << dbName
+ << " is pending removal");
}
}
diff --git a/src/mongo/db/commands/shutdown_d.cpp b/src/mongo/db/commands/shutdown_d.cpp
index 77806c0d368..ee2de23a67e 100644
--- a/src/mongo/db/commands/shutdown_d.cpp
+++ b/src/mongo/db/commands/shutdown_d.cpp
@@ -63,7 +63,7 @@ public:
Status status = repl::ReplicationCoordinator::get(opCtx)->stepDown(
opCtx, force, Seconds(timeoutSecs), Seconds(120));
if (!status.isOK() && status.code() != ErrorCodes::NotMaster) { // ignore not master
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
}
// Never returns
diff --git a/src/mongo/db/commands/snapshot_management.cpp b/src/mongo/db/commands/snapshot_management.cpp
index 84c57af241b..b0b9896eb9b 100644
--- a/src/mongo/db/commands/snapshot_management.cpp
+++ b/src/mongo/db/commands/snapshot_management.cpp
@@ -73,8 +73,7 @@ public:
BSONObjBuilder& result) {
auto snapshotManager = getGlobalServiceContext()->getStorageEngine()->getSnapshotManager();
if (!snapshotManager) {
- return CommandHelpers::appendCommandStatus(result,
- {ErrorCodes::CommandNotSupported, ""});
+ uasserted(ErrorCodes::CommandNotSupported, "");
}
Lock::GlobalLock lk(opCtx, MODE_IX);
@@ -82,7 +81,7 @@ public:
auto name = LogicalClock::getClusterTimeForReplicaSet(opCtx).asTimestamp();
result.append("name", static_cast<long long>(name.asULL()));
- return CommandHelpers::appendCommandStatus(result, Status::OK());
+ return true;
}
};
@@ -117,8 +116,7 @@ public:
BSONObjBuilder& result) {
auto snapshotManager = getGlobalServiceContext()->getStorageEngine()->getSnapshotManager();
if (!snapshotManager) {
- return CommandHelpers::appendCommandStatus(result,
- {ErrorCodes::CommandNotSupported, ""});
+ uasserted(ErrorCodes::CommandNotSupported, "");
}
Lock::GlobalLock lk(opCtx, MODE_IX);
diff --git a/src/mongo/db/commands/start_session_command.cpp b/src/mongo/db/commands/start_session_command.cpp
index 710a01d1fb7..01ce7e0be6d 100644
--- a/src/mongo/db/commands/start_session_command.cpp
+++ b/src/mongo/db/commands/start_session_command.cpp
@@ -77,16 +77,8 @@ public:
ServiceContext* serviceContext = client->getServiceContext();
auto lsCache = LogicalSessionCache::get(serviceContext);
- boost::optional<LogicalSessionRecord> record;
-
- try {
- record = makeLogicalSessionRecord(opCtx, lsCache->now());
- } catch (...) {
- auto status = exceptionToStatus();
-
- return CommandHelpers::appendCommandStatus(result, status);
- }
-
+ boost::optional<LogicalSessionRecord> record =
+ makeLogicalSessionRecord(opCtx, lsCache->now());
lsCache->startSession(opCtx, record.get());
makeLogicalSessionToClient(record->getId()).serialize(&result);
diff --git a/src/mongo/db/commands/test_commands.cpp b/src/mongo/db/commands/test_commands.cpp
index 153fb7d775f..8d8982f3b20 100644
--- a/src/mongo/db/commands/test_commands.cpp
+++ b/src/mongo/db/commands/test_commands.cpp
@@ -103,7 +103,8 @@ public:
if (status.isOK()) {
wunit.commit();
}
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
+ return true;
}
};
@@ -217,33 +218,27 @@ public:
BSONObjBuilder& result) {
const NamespaceString fullNs = CommandHelpers::parseNsCollectionRequired(dbname, cmdObj);
if (!fullNs.isValid()) {
- return CommandHelpers::appendCommandStatus(
- result,
- {ErrorCodes::InvalidNamespace,
- str::stream() << "collection name " << fullNs.ns() << " is not valid"});
+ uasserted(ErrorCodes::InvalidNamespace,
+ str::stream() << "collection name " << fullNs.ns() << " is not valid");
}
int n = cmdObj.getIntField("n");
bool inc = cmdObj.getBoolField("inc"); // inclusive range?
if (n <= 0) {
- return CommandHelpers::appendCommandStatus(
- result, {ErrorCodes::BadValue, "n must be a positive integer"});
+ uasserted(ErrorCodes::BadValue, "n must be a positive integer");
}
// Lock the database in mode IX and lock the collection exclusively.
AutoGetCollection autoColl(opCtx, fullNs, MODE_IX, MODE_X);
Collection* collection = autoColl.getCollection();
if (!collection) {
- return CommandHelpers::appendCommandStatus(
- result,
- {ErrorCodes::NamespaceNotFound,
- str::stream() << "collection " << fullNs.ns() << " does not exist"});
+ uasserted(ErrorCodes::NamespaceNotFound,
+ str::stream() << "collection " << fullNs.ns() << " does not exist");
}
if (!collection->isCapped()) {
- return CommandHelpers::appendCommandStatus(
- result, {ErrorCodes::IllegalOperation, "collection must be capped"});
+ uasserted(ErrorCodes::IllegalOperation, "collection must be capped");
}
RecordId end;
@@ -257,11 +252,9 @@ public:
for (int i = 0; i < n + 1; ++i) {
PlanExecutor::ExecState state = exec->getNext(nullptr, &end);
if (PlanExecutor::ADVANCED != state) {
- return CommandHelpers::appendCommandStatus(
- result,
- {ErrorCodes::IllegalOperation,
- str::stream() << "invalid n, collection contains fewer than " << n
- << " documents"});
+ uasserted(ErrorCodes::IllegalOperation,
+ str::stream() << "invalid n, collection contains fewer than " << n
+ << " documents");
}
}
}
@@ -293,7 +286,8 @@ public:
BSONObjBuilder& result) {
const NamespaceString nss = CommandHelpers::parseNsCollectionRequired(dbname, cmdObj);
- return CommandHelpers::appendCommandStatus(result, emptyCapped(opCtx, nss));
+ uassertStatusOK(emptyCapped(opCtx, nss));
+ return true;
}
};
diff --git a/src/mongo/db/commands/touch.cpp b/src/mongo/db/commands/touch.cpp
index bde76e8ceff..4a2d1a7bc5f 100644
--- a/src/mongo/db/commands/touch.cpp
+++ b/src/mongo/db/commands/touch.cpp
@@ -109,8 +109,8 @@ public:
return false;
}
- return CommandHelpers::appendCommandStatus(
- result, collection->touch(opCtx, touch_data, touch_indexes, &result));
+ uassertStatusOK(collection->touch(opCtx, touch_data, touch_indexes, &result));
+ return true;
}
};
static TouchCmd touchCmd;
diff --git a/src/mongo/db/commands/user_management_commands.cpp b/src/mongo/db/commands/user_management_commands.cpp
index 1fb2582eee5..f2c4f3ad6cf 100644
--- a/src/mongo/db/commands/user_management_commands.cpp
+++ b/src/mongo/db/commands/user_management_commands.cpp
@@ -772,46 +772,35 @@ public:
BSONObjBuilder& result) {
auth::CreateOrUpdateUserArgs args;
Status status = auth::parseCreateOrUpdateUserCommands(cmdObj, "createUser", dbname, &args);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
if (args.userName.getDB() == "local") {
- return CommandHelpers::appendCommandStatus(
- result, Status(ErrorCodes::BadValue, "Cannot create users in the local database"));
+ uasserted(ErrorCodes::BadValue, "Cannot create users in the local database");
}
if (!args.hasPassword && args.userName.getDB() != "$external") {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::BadValue,
- "Must provide a 'pwd' field for all user documents, except those"
- " with '$external' as the user's source db"));
+ uasserted(ErrorCodes::BadValue,
+ "Must provide a 'pwd' field for all user documents, except those"
+ " with '$external' as the user's source db");
}
if ((args.hasPassword) && args.userName.getDB() == "$external") {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::BadValue,
- "Cannot set the password for users defined on the '$external' "
- "database"));
+ uasserted(ErrorCodes::BadValue,
+ "Cannot set the password for users defined on the '$external' "
+ "database");
}
if (!args.hasRoles) {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::BadValue, "\"createUser\" command requires a \"roles\" array"));
+ uasserted(ErrorCodes::BadValue, "\"createUser\" command requires a \"roles\" array");
}
#ifdef MONGO_CONFIG_SSL
if (args.userName.getDB() == "$external" && getSSLManager() &&
getSSLManager()->getSSLConfiguration().isClusterMember(args.userName.getUser())) {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::BadValue,
- "Cannot create an x.509 user with a subjectname "
- "that would be recognized as an internal "
- "cluster member."));
+ uasserted(ErrorCodes::BadValue,
+ "Cannot create an x.509 user with a subjectname "
+ "that would be recognized as an internal "
+ "cluster member.");
}
#endif
@@ -825,15 +814,11 @@ public:
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
int authzVersion;
status = authzManager->getAuthorizationVersion(opCtx, &authzVersion);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
BSONObjBuilder credentialsBuilder(userObjBuilder.subobjStart("credentials"));
status = buildCredentials(&credentialsBuilder, args);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
credentialsBuilder.done();
if (args.authenticationRestrictions && !args.authenticationRestrictions->isEmpty()) {
@@ -850,24 +835,18 @@ public:
BSONObj userObj = userObjBuilder.obj();
V2UserDocumentParser parser;
status = parser.checkValidUserDocument(userObj);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
status = requireWritableAuthSchema28SCRAM(opCtx, authzManager);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
// Role existence has to be checked after acquiring the update lock
for (size_t i = 0; i < args.roles.size(); ++i) {
BSONObj ignored;
status = authzManager->getRoleDescription(opCtx, args.roles[i], &ignored);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
}
audit::logCreateUser(Client::getCurrent(),
@@ -877,7 +856,8 @@ public:
args.roles,
args.authenticationRestrictions);
status = insertPrivilegeDocument(opCtx, userObj);
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
+ return true;
}
void redactForLogging(mutablebson::Document* cmdObj) const override {
@@ -918,24 +898,18 @@ public:
BSONObjBuilder& result) {
auth::CreateOrUpdateUserArgs args;
Status status = auth::parseCreateOrUpdateUserCommands(cmdObj, "updateUser", dbname, &args);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
if (!args.hasPassword && !args.hasCustomData && !args.hasRoles &&
!args.authenticationRestrictions && args.mechanisms.empty()) {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::BadValue,
- "Must specify at least one field to update in updateUser"));
+ uasserted(ErrorCodes::BadValue,
+ "Must specify at least one field to update in updateUser");
}
if (args.hasPassword && args.userName.getDB() == "$external") {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::BadValue,
- "Cannot set the password for users defined on the '$external' "
- "database"));
+ uasserted(ErrorCodes::BadValue,
+ "Cannot set the password for users defined on the '$external' "
+ "database");
}
BSONObjBuilder queryBuilder;
@@ -947,15 +921,11 @@ public:
if (args.hasPassword) {
BSONObjBuilder credentialsBuilder(updateSetBuilder.subobjStart("credentials"));
status = buildCredentials(&credentialsBuilder, args);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
credentialsBuilder.done();
} else if (!args.mechanisms.empty()) {
status = trimCredentials(opCtx, &queryBuilder, &updateUnsetBuilder, args);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
}
if (args.hasCustomData) {
@@ -968,10 +938,7 @@ public:
} else {
auto swParsedRestrictions =
parseAuthenticationRestriction(*args.authenticationRestrictions);
- if (!swParsedRestrictions.isOK()) {
- return CommandHelpers::appendCommandStatus(result,
- swParsedRestrictions.getStatus());
- }
+ uassertStatusOK(swParsedRestrictions.getStatus());
updateSetBuilder.append("authenticationRestrictions",
*args.authenticationRestrictions);
@@ -997,18 +964,14 @@ public:
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
status = requireWritableAuthSchema28SCRAM(opCtx, authzManager);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
// Role existence has to be checked after acquiring the update lock
if (args.hasRoles) {
for (size_t i = 0; i < args.roles.size(); ++i) {
BSONObj ignored;
status = authzManager->getRoleDescription(opCtx, args.roles[i], &ignored);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
}
}
@@ -1023,7 +986,8 @@ public:
opCtx, args.userName, queryBuilder.done(), updateDocumentBuilder.done());
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserByName(args.userName);
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
+ return true;
}
void redactForLogging(mutablebson::Document* cmdObj) const override {
@@ -1064,17 +1028,13 @@ public:
BSONObjBuilder& result) {
UserName userName;
Status status = auth::parseAndValidateDropUserCommand(cmdObj, dbname, &userName);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
status = requireWritableAuthSchema28SCRAM(opCtx, authzManager);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
audit::logDropUser(Client::getCurrent(), userName);
@@ -1087,15 +1047,11 @@ public:
&nMatched);
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserByName(userName);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
if (nMatched == 0) {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::UserNotFound,
- str::stream() << "User '" << userName.getFullName() << "' not found"));
+ uasserted(ErrorCodes::UserNotFound,
+ str::stream() << "User '" << userName.getFullName() << "' not found");
}
return true;
@@ -1134,17 +1090,13 @@ public:
const BSONObj& cmdObj,
BSONObjBuilder& result) {
Status status = auth::parseAndValidateDropAllUsersFromDatabaseCommand(cmdObj, dbname);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
status = requireWritableAuthSchema28SCRAM(opCtx, authzManager);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
audit::logDropAllUsersFromDatabase(Client::getCurrent(), dbname);
@@ -1153,9 +1105,7 @@ public:
opCtx, BSON(AuthorizationManager::USER_DB_FIELD_NAME << dbname), &numRemoved);
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUsersFromDB(dbname);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
result.append("n", numRemoved);
return true;
@@ -1197,33 +1147,25 @@ public:
std::vector<RoleName> roles;
Status status = auth::parseRolePossessionManipulationCommands(
cmdObj, "grantRolesToUser", dbname, &userNameString, &roles);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
status = requireWritableAuthSchema28SCRAM(opCtx, authzManager);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
UserName userName(userNameString, dbname);
stdx::unordered_set<RoleName> userRoles;
status = getCurrentUserRoles(opCtx, authzManager, userName, &userRoles);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
for (vector<RoleName>::iterator it = roles.begin(); it != roles.end(); ++it) {
RoleName& roleName = *it;
BSONObj roleDoc;
status = authzManager->getRoleDescription(opCtx, roleName, &roleDoc);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
userRoles.insert(roleName);
}
@@ -1234,7 +1176,8 @@ public:
opCtx, userName, BSON("$set" << BSON("roles" << newRolesBSONArray)));
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserByName(userName);
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
+ return true;
}
} cmdGrantRolesToUser;
@@ -1273,33 +1216,25 @@ public:
std::vector<RoleName> roles;
Status status = auth::parseRolePossessionManipulationCommands(
cmdObj, "revokeRolesFromUser", dbname, &userNameString, &roles);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
status = requireWritableAuthSchema28SCRAM(opCtx, authzManager);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
UserName userName(userNameString, dbname);
stdx::unordered_set<RoleName> userRoles;
status = getCurrentUserRoles(opCtx, authzManager, userName, &userRoles);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
for (vector<RoleName>::iterator it = roles.begin(); it != roles.end(); ++it) {
RoleName& roleName = *it;
BSONObj roleDoc;
status = authzManager->getRoleDescription(opCtx, roleName, &roleDoc);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
userRoles.erase(roleName);
}
@@ -1310,7 +1245,8 @@ public:
opCtx, userName, BSON("$set" << BSON("roles" << newRolesBSONArray)));
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserByName(userName);
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
+ return true;
}
} cmdRevokeRolesFromUser;
@@ -1343,23 +1279,17 @@ public:
BSONObjBuilder& result) {
auth::UsersInfoArgs args;
Status status = auth::parseUsersInfoCommand(cmdObj, dbname, &args);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
status = requireReadableAuthSchema26Upgrade(opCtx, getGlobalAuthorizationManager());
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
if ((args.target != auth::UsersInfoArgs::Target::kExplicitUsers || args.filter) &&
(args.showPrivileges ||
args.authenticationRestrictionsFormat == AuthenticationRestrictionsFormat::kShow)) {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::IllegalOperation,
- "Privilege or restriction details require exact-match usersInfo "
- "queries."));
+ uasserted(ErrorCodes::IllegalOperation,
+ "Privilege or restriction details require exact-match usersInfo "
+ "queries.");
}
BSONArrayBuilder usersArrayBuilder;
@@ -1375,9 +1305,7 @@ public:
if (status.code() == ErrorCodes::UserNotFound) {
continue;
}
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
// getUserDescription always includes credentials and restrictions, which may need
// to be stripped out
@@ -1459,15 +1387,11 @@ public:
aggRequest,
aggRequest.serializeToCommandObj().toBson(),
responseBuilder);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
CommandHelpers::appendSimpleCommandStatus(responseBuilder, true);
auto swResponse = CursorResponse::parseFromBSON(responseBuilder.obj());
- if (!swResponse.isOK()) {
- return CommandHelpers::appendCommandStatus(result, swResponse.getStatus());
- }
+ uassertStatusOK(swResponse.getStatus());
for (const BSONObj& obj : swResponse.getValue().getBatch()) {
usersArrayBuilder.append(obj);
}
@@ -1510,44 +1434,32 @@ public:
BSONObjBuilder& result) {
auth::CreateOrUpdateRoleArgs args;
Status status = auth::parseCreateOrUpdateRoleCommands(cmdObj, "createRole", dbname, &args);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
if (args.roleName.getRole().empty()) {
- return CommandHelpers::appendCommandStatus(
- result, Status(ErrorCodes::BadValue, "Role name must be non-empty"));
+ uasserted(ErrorCodes::BadValue, "Role name must be non-empty");
}
if (args.roleName.getDB() == "local") {
- return CommandHelpers::appendCommandStatus(
- result, Status(ErrorCodes::BadValue, "Cannot create roles in the local database"));
+ uasserted(ErrorCodes::BadValue, "Cannot create roles in the local database");
}
if (args.roleName.getDB() == "$external") {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::BadValue, "Cannot create roles in the $external database"));
+ uasserted(ErrorCodes::BadValue, "Cannot create roles in the $external database");
}
if (RoleGraph::isBuiltinRole(args.roleName)) {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::BadValue,
- "Cannot create roles with the same name as a built-in role"));
+ uasserted(ErrorCodes::BadValue,
+ "Cannot create roles with the same name as a built-in role");
}
if (!args.hasRoles) {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::BadValue, "\"createRole\" command requires a \"roles\" array"));
+ uasserted(ErrorCodes::BadValue, "\"createRole\" command requires a \"roles\" array");
}
if (!args.hasPrivileges) {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::BadValue,
- "\"createRole\" command requires a \"privileges\" array"));
+ uasserted(ErrorCodes::BadValue,
+ "\"createRole\" command requires a \"privileges\" array");
}
BSONObjBuilder roleObjBuilder;
@@ -1559,9 +1471,7 @@ public:
BSONArray privileges;
status = privilegeVectorToBSONArray(args.privileges, &privileges);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
roleObjBuilder.append("privileges", privileges);
roleObjBuilder.append("roles", rolesVectorToBSONArray(args.roles));
@@ -1576,20 +1486,14 @@ public:
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
status = requireWritableAuthSchema28SCRAM(opCtx, authzManager);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
// Role existence has to be checked after acquiring the update lock
status = checkOkayToGrantRolesToRole(opCtx, args.roleName, args.roles, authzManager);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
status = checkOkayToGrantPrivilegesToRole(args.roleName, args.privileges);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
audit::logCreateRole(Client::getCurrent(),
args.roleName,
@@ -1598,7 +1502,8 @@ public:
args.authenticationRestrictions);
status = insertRoleDocument(opCtx, roleObjBuilder.done());
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
+ return true;
}
} cmdCreateRole;
@@ -1635,15 +1540,11 @@ public:
BSONObjBuilder& result) {
auth::CreateOrUpdateRoleArgs args;
Status status = auth::parseCreateOrUpdateRoleCommands(cmdObj, "updateRole", dbname, &args);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
if (!args.hasPrivileges && !args.hasRoles && !args.authenticationRestrictions) {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::BadValue,
- "Must specify at least one field to update in updateRole"));
+ uasserted(ErrorCodes::BadValue,
+ "Must specify at least one field to update in updateRole");
}
BSONObjBuilder updateSetBuilder;
@@ -1652,9 +1553,7 @@ public:
if (args.hasPrivileges) {
BSONArray privileges;
status = privilegeVectorToBSONArray(args.privileges, &privileges);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
updateSetBuilder.append("privileges", privileges);
}
@@ -1676,29 +1575,21 @@ public:
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
status = requireWritableAuthSchema28SCRAM(opCtx, authzManager);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
// Role existence has to be checked after acquiring the update lock
BSONObj ignored;
status = authzManager->getRoleDescription(opCtx, args.roleName, &ignored);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
if (args.hasRoles) {
status = checkOkayToGrantRolesToRole(opCtx, args.roleName, args.roles, authzManager);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
}
if (args.hasPrivileges) {
status = checkOkayToGrantPrivilegesToRole(args.roleName, args.privileges);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
}
audit::logUpdateRole(Client::getCurrent(),
@@ -1720,7 +1611,8 @@ public:
status = updateRoleDocument(opCtx, args.roleName, updateDocumentBuilder.obj());
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserCache();
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
+ return true;
}
} cmdUpdateRole;
@@ -1759,31 +1651,23 @@ public:
PrivilegeVector privilegesToAdd;
Status status = auth::parseAndValidateRolePrivilegeManipulationCommands(
cmdObj, "grantPrivilegesToRole", dbname, &roleName, &privilegesToAdd);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
status = requireWritableAuthSchema28SCRAM(opCtx, authzManager);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
if (RoleGraph::isBuiltinRole(roleName)) {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::InvalidRoleModification,
- str::stream() << roleName.getFullName()
- << " is a built-in role and cannot be modified."));
+ uasserted(ErrorCodes::InvalidRoleModification,
+ str::stream() << roleName.getFullName()
+ << " is a built-in role and cannot be modified.");
}
status = checkOkayToGrantPrivilegesToRole(roleName, privilegesToAdd);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
BSONObj roleDoc;
status = authzManager->getRoleDescription(opCtx,
@@ -1791,17 +1675,13 @@ public:
PrivilegeFormat::kShowSeparate,
AuthenticationRestrictionsFormat::kOmit,
&roleDoc);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
PrivilegeVector privileges;
status = auth::parseAndValidatePrivilegeArray(BSONArray(roleDoc["privileges"].Obj()),
&privileges);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
for (PrivilegeVector::iterator it = privilegesToAdd.begin(); it != privilegesToAdd.end();
++it) {
@@ -1812,18 +1692,12 @@ public:
mutablebson::Document updateObj;
mutablebson::Element setElement = updateObj.makeElementObject("$set");
status = updateObj.root().pushBack(setElement);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
mutablebson::Element privilegesElement = updateObj.makeElementArray("privileges");
status = setElement.pushBack(privilegesElement);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
status = authzManager->getBSONForPrivileges(privileges, privilegesElement);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
BSONObjBuilder updateBSONBuilder;
updateObj.writeTo(&updateBSONBuilder);
@@ -1833,7 +1707,8 @@ public:
status = updateRoleDocument(opCtx, roleName, updateBSONBuilder.done());
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserCache();
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
+ return true;
}
} cmdGrantPrivilegesToRole;
@@ -1872,25 +1747,19 @@ public:
PrivilegeVector privilegesToRemove;
Status status = auth::parseAndValidateRolePrivilegeManipulationCommands(
cmdObj, "revokePrivilegesFromRole", dbname, &roleName, &privilegesToRemove);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
status = requireWritableAuthSchema28SCRAM(opCtx, authzManager);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
if (RoleGraph::isBuiltinRole(roleName)) {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::InvalidRoleModification,
- str::stream() << roleName.getFullName()
- << " is a built-in role and cannot be modified."));
+ uasserted(ErrorCodes::InvalidRoleModification,
+ str::stream() << roleName.getFullName()
+ << " is a built-in role and cannot be modified.");
}
BSONObj roleDoc;
@@ -1899,16 +1768,12 @@ public:
PrivilegeFormat::kShowSeparate,
AuthenticationRestrictionsFormat::kOmit,
&roleDoc);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
PrivilegeVector privileges;
status = auth::parseAndValidatePrivilegeArray(BSONArray(roleDoc["privileges"].Obj()),
&privileges);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
for (PrivilegeVector::iterator itToRm = privilegesToRemove.begin();
itToRm != privilegesToRemove.end();
@@ -1929,18 +1794,12 @@ public:
mutablebson::Document updateObj;
mutablebson::Element setElement = updateObj.makeElementObject("$set");
status = updateObj.root().pushBack(setElement);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
mutablebson::Element privilegesElement = updateObj.makeElementArray("privileges");
status = setElement.pushBack(privilegesElement);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
status = authzManager->getBSONForPrivileges(privileges, privilegesElement);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
audit::logRevokePrivilegesFromRole(Client::getCurrent(), roleName, privilegesToRemove);
@@ -1949,7 +1808,8 @@ public:
status = updateRoleDocument(opCtx, roleName, updateBSONBuilder.done());
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserCache();
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
+ return true;
}
} cmdRevokePrivilegesFromRole;
@@ -1988,17 +1848,13 @@ public:
std::vector<RoleName> rolesToAdd;
Status status = auth::parseRolePossessionManipulationCommands(
cmdObj, "grantRolesToRole", dbname, &roleNameString, &rolesToAdd);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
RoleName roleName(roleNameString, dbname);
if (RoleGraph::isBuiltinRole(roleName)) {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::InvalidRoleModification,
- str::stream() << roleName.getFullName()
- << " is a built-in role and cannot be modified."));
+ uasserted(ErrorCodes::InvalidRoleModification,
+ str::stream() << roleName.getFullName()
+ << " is a built-in role and cannot be modified.");
}
ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
@@ -2006,30 +1862,22 @@ public:
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
status = requireWritableAuthSchema28SCRAM(opCtx, authzManager);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
// Role existence has to be checked after acquiring the update lock
BSONObj roleDoc;
status = authzManager->getRoleDescription(opCtx, roleName, &roleDoc);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
// Check for cycles
status = checkOkayToGrantRolesToRole(opCtx, roleName, rolesToAdd, authzManager);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
// Add new roles to existing roles
std::vector<RoleName> directRoles;
status = auth::parseRoleNamesFromBSONArray(
BSONArray(roleDoc["roles"].Obj()), roleName.getDB(), &directRoles);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
for (vector<RoleName>::iterator it = rolesToAdd.begin(); it != rolesToAdd.end(); ++it) {
const RoleName& roleToAdd = *it;
if (!sequenceContains(directRoles, roleToAdd)) // Don't double-add role
@@ -2042,7 +1890,8 @@ public:
opCtx, roleName, BSON("$set" << BSON("roles" << rolesVectorToBSONArray(directRoles))));
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserCache();
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
+ return true;
}
} cmdGrantRolesToRole;
@@ -2081,40 +1930,30 @@ public:
std::vector<RoleName> rolesToRemove;
Status status = auth::parseRolePossessionManipulationCommands(
cmdObj, "revokeRolesFromRole", dbname, &roleNameString, &rolesToRemove);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
status = requireWritableAuthSchema28SCRAM(opCtx, authzManager);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
RoleName roleName(roleNameString, dbname);
if (RoleGraph::isBuiltinRole(roleName)) {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::InvalidRoleModification,
- str::stream() << roleName.getFullName()
- << " is a built-in role and cannot be modified."));
+ uasserted(ErrorCodes::InvalidRoleModification,
+ str::stream() << roleName.getFullName()
+ << " is a built-in role and cannot be modified.");
}
BSONObj roleDoc;
status = authzManager->getRoleDescription(opCtx, roleName, &roleDoc);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
std::vector<RoleName> roles;
status = auth::parseRoleNamesFromBSONArray(
BSONArray(roleDoc["roles"].Obj()), roleName.getDB(), &roles);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
for (vector<RoleName>::const_iterator it = rolesToRemove.begin(); it != rolesToRemove.end();
++it) {
@@ -2130,7 +1969,8 @@ public:
opCtx, roleName, BSON("$set" << BSON("roles" << rolesVectorToBSONArray(roles))));
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserCache();
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
+ return true;
}
} cmdRevokeRolesFromRole;
@@ -2170,32 +2010,24 @@ public:
BSONObjBuilder& result) {
RoleName roleName;
Status status = auth::parseDropRoleCommand(cmdObj, dbname, &roleName);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
status = requireWritableAuthSchema28SCRAM(opCtx, authzManager);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
if (RoleGraph::isBuiltinRole(roleName)) {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::InvalidRoleModification,
- str::stream() << roleName.getFullName()
- << " is a built-in role and cannot be modified."));
+ uasserted(ErrorCodes::InvalidRoleModification,
+ str::stream() << roleName.getFullName()
+ << " is a built-in role and cannot be modified.");
}
BSONObj roleDoc;
status = authzManager->getRoleDescription(opCtx, roleName, &roleDoc);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
// Remove this role from all users
long long nMatched;
@@ -2216,11 +2048,10 @@ public:
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserCache();
if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(
- result,
- useDefaultCode(status, ErrorCodes::UserModificationFailed)
- .withContext(str::stream() << "Failed to remove role " << roleName.getFullName()
- << " from all users"));
+ uassertStatusOK(useDefaultCode(status, ErrorCodes::UserModificationFailed)
+ .withContext(str::stream() << "Failed to remove role "
+ << roleName.getFullName()
+ << " from all users"));
}
// Remove this role from all other roles
@@ -2241,8 +2072,7 @@ public:
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserCache();
if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(
- result,
+ uassertStatusOK(
useDefaultCode(status, ErrorCodes::RoleModificationFailed)
.withContext(
str::stream() << "Removed role " << roleName.getFullName()
@@ -2260,20 +2090,16 @@ public:
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserCache();
if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(
- result,
- status.withContext(
- str::stream() << "Removed role " << roleName.getFullName()
- << " from all users and roles but failed to actually delete"
- " the role itself"));
+ uassertStatusOK(status.withContext(
+ str::stream() << "Removed role " << roleName.getFullName()
+ << " from all users and roles but failed to actually delete"
+ " the role itself"));
}
dassert(nMatched == 0 || nMatched == 1);
if (nMatched == 0) {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::RoleNotFound,
- str::stream() << "Role '" << roleName.getFullName() << "' not found"));
+ uasserted(ErrorCodes::RoleNotFound,
+ str::stream() << "Role '" << roleName.getFullName() << "' not found");
}
return true;
@@ -2316,18 +2142,14 @@ public:
const BSONObj& cmdObj,
BSONObjBuilder& result) {
Status status = auth::parseDropAllRolesFromDatabaseCommand(cmdObj, dbname);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
status = requireWritableAuthSchema28SCRAM(opCtx, authzManager);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
// Remove these roles from all users
long long nMatched;
@@ -2343,11 +2165,10 @@ public:
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserCache();
if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(
- result,
- useDefaultCode(status, ErrorCodes::UserModificationFailed)
- .withContext(str::stream() << "Failed to remove roles from \"" << dbname
- << "\" db from all users"));
+ uassertStatusOK(useDefaultCode(status, ErrorCodes::UserModificationFailed)
+ .withContext(str::stream() << "Failed to remove roles from \""
+ << dbname
+ << "\" db from all users"));
}
// Remove these roles from all other roles
@@ -2365,11 +2186,10 @@ public:
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserCache();
if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(
- result,
- useDefaultCode(status, ErrorCodes::RoleModificationFailed)
- .withContext(str::stream() << "Failed to remove roles from \"" << dbname
- << "\" db from all roles"));
+ uassertStatusOK(useDefaultCode(status, ErrorCodes::RoleModificationFailed)
+ .withContext(str::stream() << "Failed to remove roles from \""
+ << dbname
+ << "\" db from all roles"));
}
audit::logDropAllRolesFromDatabase(Client::getCurrent(), dbname);
@@ -2379,13 +2199,11 @@ public:
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserCache();
if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(
- result,
- status.withContext(
- str::stream() << "Removed roles from \"" << dbname
- << "\" db "
- " from all users and roles but failed to actually delete"
- " those roles themselves"));
+ uassertStatusOK(status.withContext(
+ str::stream() << "Removed roles from \"" << dbname
+ << "\" db "
+ " from all users and roles but failed to actually delete"
+ " those roles themselves"));
}
result.append("n", nMatched);
@@ -2446,14 +2264,10 @@ public:
BSONObjBuilder& result) {
auth::RolesInfoArgs args;
Status status = auth::parseRolesInfoCommand(cmdObj, dbname, &args);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
status = requireReadableAuthSchema26Upgrade(opCtx, getGlobalAuthorizationManager());
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
if (args.allForDB) {
std::vector<BSONObj> rolesDocs;
@@ -2464,15 +2278,11 @@ public:
args.authenticationRestrictionsFormat,
args.showBuiltinRoles,
&rolesDocs);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
if (args.privilegeFormat == PrivilegeFormat::kShowAsUserFragment) {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::IllegalOperation,
- "Cannot get user fragment for all roles in a database"));
+ uasserted(ErrorCodes::IllegalOperation,
+ "Cannot get user fragment for all roles in a database");
}
BSONArrayBuilder rolesArrayBuilder;
for (size_t i = 0; i < rolesDocs.size(); ++i) {
@@ -2487,9 +2297,7 @@ public:
args.privilegeFormat,
args.authenticationRestrictionsFormat,
&roleDetails);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
if (args.privilegeFormat == PrivilegeFormat::kShowAsUserFragment) {
result.append("userFragment", roleDetails);
@@ -2940,16 +2748,12 @@ public:
BSONObjBuilder& result) {
auth::MergeAuthzCollectionsArgs args;
Status status = auth::parseMergeAuthzCollectionsCommand(cmdObj, &args);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
if (args.usersCollName.empty() && args.rolesCollName.empty()) {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::BadValue,
- "Must provide at least one of \"tempUsersCollection\" and "
- "\"tempRolescollection\""));
+ uasserted(ErrorCodes::BadValue,
+ "Must provide at least one of \"tempUsersCollection\" and "
+ "\"tempRolescollection\"");
}
ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
@@ -2957,24 +2761,18 @@ public:
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
status = requireWritableAuthSchema28SCRAM(opCtx, authzManager);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
if (!args.usersCollName.empty()) {
Status status =
processUsers(opCtx, authzManager, args.usersCollName, args.db, args.drop);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
}
if (!args.rolesCollName.empty()) {
Status status =
processRoles(opCtx, authzManager, args.rolesCollName, args.db, args.drop);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
}
return true;
diff --git a/src/mongo/db/commands/validate.cpp b/src/mongo/db/commands/validate.cpp
index c98c02d629d..4ca9fe21135 100644
--- a/src/mongo/db/commands/validate.cpp
+++ b/src/mongo/db/commands/validate.cpp
@@ -114,10 +114,8 @@ public:
}
if (!nss.isNormal() && full) {
- CommandHelpers::appendCommandStatus(
- result,
- {ErrorCodes::CommandFailed, "Can only run full validate on a regular collection"});
- return false;
+ uasserted(ErrorCodes::CommandFailed,
+ "Can only run full validate on a regular collection");
}
if (!serverGlobalParams.quiet.load()) {
@@ -129,13 +127,10 @@ public:
Collection* collection = ctx.getDb() ? ctx.getDb()->getCollection(opCtx, nss) : NULL;
if (!collection) {
if (ctx.getDb() && ctx.getDb()->getViewCatalog()->lookup(opCtx, nss.ns())) {
- return CommandHelpers::appendCommandStatus(
- result, {ErrorCodes::CommandNotSupportedOnView, "Cannot validate a view"});
+ uasserted(ErrorCodes::CommandNotSupportedOnView, "Cannot validate a view");
}
- CommandHelpers::appendCommandStatus(result,
- {ErrorCodes::NamespaceNotFound, "ns not found"});
- return false;
+ uasserted(ErrorCodes::NamespaceNotFound, "ns not found");
}
// Omit background validation logic until it is fully implemented and vetted.
@@ -151,18 +146,15 @@ public:
}
if (!isInRecordIdOrder && background) {
- appendCommandStatus(result,
- {ErrorCodes::CommandFailed,
- "This storage engine does not support the background option, use "
- "background:false"});
+ uasserted(ErrorCodes::CommandFailed,
+ "This storage engine does not support the background option, use "
+ "background:false");
return false;
}
if (full && background) {
- appendCommandStatus(result,
- {ErrorCodes::CommandFailed,
- "A full validate cannot run in the background, use full:false"});
- return false;
+ uasserted(ErrorCodes::CommandFailed,
+ "A full validate cannot run in the background, use full:false");
}
*/
diff --git a/src/mongo/db/exec/stagedebug_cmd.cpp b/src/mongo/db/exec/stagedebug_cmd.cpp
index c6056f5f3a4..a7e0dac8a2d 100644
--- a/src/mongo/db/exec/stagedebug_cmd.cpp
+++ b/src/mongo/db/exec/stagedebug_cmd.cpp
@@ -210,10 +210,8 @@ public:
<< PlanExecutor::statestr(state)
<< ", stats: " << redact(Explain::getWinningPlanStats(exec.get()));
- return CommandHelpers::appendCommandStatus(
- result,
- WorkingSetCommon::getMemberObjectStatus(obj).withContext(
- "Executor error during StageDebug command"));
+ uassertStatusOK(WorkingSetCommon::getMemberObjectStatus(obj).withContext(
+ "Executor error during StageDebug command"));
}
return true;
diff --git a/src/mongo/db/free_mon/free_mon_commands.cpp b/src/mongo/db/free_mon/free_mon_commands.cpp
index 459559cda38..5f8792fec49 100644
--- a/src/mongo/db/free_mon/free_mon_commands.cpp
+++ b/src/mongo/db/free_mon/free_mon_commands.cpp
@@ -153,11 +153,11 @@ public:
if (optStatus) {
// Completed within timeout.
- return CommandHelpers::appendCommandStatus(result, *optStatus);
+ uassertStatusOK(*optStatus);
} else {
// Pending operation.
- return CommandHelpers::appendCommandStatus(result, Status::OK());
}
+ return true;
}
} setFreeMonitoringCmd;
diff --git a/src/mongo/db/free_mon/free_mon_mongod.cpp b/src/mongo/db/free_mon/free_mon_mongod.cpp
index d90f0c37182..17d9b09f559 100644
--- a/src/mongo/db/free_mon/free_mon_mongod.cpp
+++ b/src/mongo/db/free_mon/free_mon_mongod.cpp
@@ -107,8 +107,7 @@ public:
.then([](std::vector<uint8_t> blob) {
if (blob.empty()) {
- uassertStatusOK(
- Status(ErrorCodes::FreeMonHttpTemporaryFailure, "Empty response received"));
+ uasserted(ErrorCodes::FreeMonHttpTemporaryFailure, "Empty response received");
}
ConstDataRange cdr(reinterpret_cast<char*>(blob.data()), blob.size());
@@ -133,8 +132,7 @@ public:
.then([](std::vector<uint8_t> blob) {
if (blob.empty()) {
- uassertStatusOK(
- Status(ErrorCodes::FreeMonHttpTemporaryFailure, "Empty response received"));
+ uasserted(ErrorCodes::FreeMonHttpTemporaryFailure, "Empty response received");
}
ConstDataRange cdr(reinterpret_cast<char*>(blob.data()), blob.size());
diff --git a/src/mongo/db/repl/repl_set_commands.cpp b/src/mongo/db/repl/repl_set_commands.cpp
index a6a97ceaa48..ded7149946a 100644
--- a/src/mongo/db/repl/repl_set_commands.cpp
+++ b/src/mongo/db/repl/repl_set_commands.cpp
@@ -107,44 +107,39 @@ public:
if (cmdObj.hasElement("waitForMemberState")) {
long long stateVal;
auto status = bsonExtractIntegerField(cmdObj, "waitForMemberState", &stateVal);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
const auto swMemberState = MemberState::create(stateVal);
- if (!swMemberState.isOK()) {
- return CommandHelpers::appendCommandStatus(result, swMemberState.getStatus());
- }
+ uassertStatusOK(swMemberState.getStatus());
const auto expectedState = swMemberState.getValue();
long long timeoutMillis;
status = bsonExtractIntegerField(cmdObj, "timeoutMillis", &timeoutMillis);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
Milliseconds timeout(timeoutMillis);
log() << "replSetTest: waiting " << timeout << " for member state to become "
<< expectedState;
status = replCoord->waitForMemberState(expectedState, timeout);
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
+ return true;
} else if (cmdObj.hasElement("waitForDrainFinish")) {
long long timeoutMillis;
auto status = bsonExtractIntegerField(cmdObj, "waitForDrainFinish", &timeoutMillis);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
Milliseconds timeout(timeoutMillis);
log() << "replSetTest: waiting " << timeout << " for applier buffer to finish draining";
status = replCoord->waitForDrainFinish(timeout);
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
+ return true;
}
Status status = replCoord->checkReplEnabledForCommand(&result);
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
+ return true;
}
};
@@ -168,11 +163,10 @@ public:
const BSONObj& cmdObj,
BSONObjBuilder& result) {
Status status = ReplicationCoordinator::get(opCtx)->checkReplEnabledForCommand(&result);
- if (!status.isOK())
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
result.append("rbid", ReplicationProcess::get(opCtx)->getRollbackID());
- return CommandHelpers::appendCommandStatus(result, Status::OK());
+ return true;
}
} cmdReplSetRBID;
@@ -189,8 +183,7 @@ public:
const BSONObj& cmdObj,
BSONObjBuilder& result) {
Status status = ReplicationCoordinator::get(opCtx)->checkReplEnabledForCommand(&result);
- if (!status.isOK())
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
ReplicationCoordinator::get(opCtx)->processReplSetGetConfig(&result);
return true;
@@ -312,10 +305,8 @@ public:
std::string replSetString =
ReplicationCoordinator::get(opCtx)->getSettings().getReplSetString();
if (replSetString.empty()) {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::NoReplicationEnabled,
- "This node was not started with the replSet option"));
+ uasserted(ErrorCodes::NoReplicationEnabled,
+ "This node was not started with the replSet option");
}
if (configObj.isEmpty()) {
@@ -359,7 +350,8 @@ public:
Status status =
ReplicationCoordinator::get(opCtx)->processReplSetInitiate(opCtx, configObj, &result);
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
+ return true;
}
private:
@@ -381,9 +373,7 @@ public:
const BSONObj& cmdObj,
BSONObjBuilder& result) {
Status status = ReplicationCoordinator::get(opCtx)->checkReplEnabledForCommand(&result);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
if (cmdObj["replSetReconfig"].type() != Object) {
result.append("errmsg", "no configuration specified");
@@ -411,7 +401,8 @@ public:
}
wuow.commit();
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
+ return true;
}
private:
@@ -438,12 +429,11 @@ public:
const BSONObj& cmdObj,
BSONObjBuilder& result) {
Status status = ReplicationCoordinator::get(opCtx)->checkReplEnabledForCommand(&result);
- if (!status.isOK())
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
int secs = (int)cmdObj.firstElement().numberInt();
- return CommandHelpers::appendCommandStatus(
- result, ReplicationCoordinator::get(opCtx)->processReplSetFreeze(secs, &result));
+ uassertStatusOK(ReplicationCoordinator::get(opCtx)->processReplSetFreeze(secs, &result));
+ return true;
}
private:
@@ -468,8 +458,7 @@ public:
const BSONObj& cmdObj,
BSONObjBuilder& result) {
Status status = ReplicationCoordinator::get(opCtx)->checkReplEnabledForCommand(&result);
- if (!status.isOK())
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
const bool force = cmdObj["force"].trueValue();
@@ -478,7 +467,7 @@ public:
stepDownForSecs = 60;
} else if (stepDownForSecs < 0) {
status = Status(ErrorCodes::BadValue, "stepdown period must be a positive integer");
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
}
long long secondaryCatchUpPeriodSecs;
@@ -492,26 +481,27 @@ public:
secondaryCatchUpPeriodSecs = 10;
}
} else if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
}
if (secondaryCatchUpPeriodSecs < 0) {
status = Status(ErrorCodes::BadValue,
"secondaryCatchUpPeriodSecs period must be a positive or absent");
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
}
if (stepDownForSecs < secondaryCatchUpPeriodSecs) {
status = Status(ErrorCodes::BadValue,
"stepdown period must be longer than secondaryCatchUpPeriodSecs");
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
}
log() << "Attempting to step down in response to replSetStepDown command";
status = ReplicationCoordinator::get(opCtx)->stepDown(
opCtx, force, Seconds(secondaryCatchUpPeriodSecs), Seconds(stepDownForSecs));
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
+ return true;
}
private:
@@ -532,13 +522,11 @@ public:
const BSONObj& cmdObj,
BSONObjBuilder& result) {
Status status = ReplicationCoordinator::get(opCtx)->checkReplEnabledForCommand(&result);
- if (!status.isOK())
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
- return CommandHelpers::appendCommandStatus(
- result,
- ReplicationCoordinator::get(opCtx)->setMaintenanceMode(
- cmdObj["replSetMaintenance"].trueValue()));
+ uassertStatusOK(ReplicationCoordinator::get(opCtx)->setMaintenanceMode(
+ cmdObj["replSetMaintenance"].trueValue()));
+ return true;
}
private:
@@ -560,18 +548,15 @@ public:
const BSONObj& cmdObj,
BSONObjBuilder& result) {
Status status = ReplicationCoordinator::get(opCtx)->checkReplEnabledForCommand(&result);
- if (!status.isOK())
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
HostAndPort targetHostAndPort;
status = targetHostAndPort.initialize(cmdObj["replSetSyncFrom"].valuestrsafe());
- if (!status.isOK())
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
- return CommandHelpers::appendCommandStatus(
- result,
- ReplicationCoordinator::get(opCtx)->processReplSetSyncFrom(
- opCtx, targetHostAndPort, &result));
+ uassertStatusOK(ReplicationCoordinator::get(opCtx)->processReplSetSyncFrom(
+ opCtx, targetHostAndPort, &result));
+ return true;
}
private:
@@ -590,8 +575,7 @@ public:
auto replCoord = repl::ReplicationCoordinator::get(opCtx->getClient()->getServiceContext());
Status status = replCoord->checkReplEnabledForCommand(&result);
- if (!status.isOK())
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
// accept and ignore handshakes sent from old (3.0-series) nodes without erroring to
// enable mixed-version operation, since we no longer use the handshakes
@@ -623,8 +607,9 @@ public:
return CommandHelpers::appendCommandStatusNoThrow(result, status);
} else {
// Parsing error from UpdatePositionArgs.
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
}
+ return true;
}
} cmdReplSetUpdatePosition;
@@ -685,7 +670,7 @@ public:
checks many things that are pre-initialization. */
if (!ReplicationCoordinator::get(opCtx)->getSettings().usingReplSets()) {
status = Status(ErrorCodes::NoReplicationEnabled, "not running with --replSet");
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
}
// Process heartbeat based on the version of request. The missing fields in mismatched
@@ -702,7 +687,8 @@ public:
LOG_FOR_HEARTBEATS(2) << "Processed heartbeat from "
<< cmdObj.getStringField("from")
<< " and generated response, " << response;
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
+ return true;
}
// else: fall through to old heartbeat protocol as it is likely that
// a new node just joined the set
@@ -710,9 +696,7 @@ public:
ReplSetHeartbeatArgs args;
status = args.initialize(cmdObj);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
// ugh.
if (args.getCheckEmpty()) {
@@ -726,7 +710,8 @@ public:
LOG_FOR_HEARTBEATS(2) << "Processed heartbeat from " << cmdObj.getStringField("from")
<< " and generated response, " << response;
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
+ return true;
}
} cmdReplSetHeartbeat;
@@ -742,8 +727,7 @@ public:
const BSONObj& cmdObj,
BSONObjBuilder& result) {
Status status = ReplicationCoordinator::get(opCtx)->checkReplEnabledForCommand(&result);
- if (!status.isOK())
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
ReplicationCoordinator::ReplSetFreshArgs parsedArgs;
parsedArgs.id = cmdObj["id"].Int();
@@ -759,7 +743,8 @@ public:
parsedArgs.opTime = Timestamp(cmdObj["opTime"].Date());
status = ReplicationCoordinator::get(opCtx)->processReplSetFresh(parsedArgs, &result);
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
+ return true;
}
} cmdReplSetFresh;
@@ -776,8 +761,7 @@ private:
else LOG(2) << "received elect msg " << cmdObj.toString();
Status status = ReplicationCoordinator::get(opCtx)->checkReplEnabledForCommand(&result);
- if (!status.isOK())
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
ReplicationCoordinator::ReplSetElectArgs parsedArgs;
parsedArgs.set = cmdObj["set"].String();
@@ -792,7 +776,8 @@ private:
parsedArgs.round = cmdObj["round"].OID();
status = ReplicationCoordinator::get(opCtx)->processReplSetElect(parsedArgs, &result);
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
+ return true;
}
} cmdReplSetElect;
@@ -805,8 +790,7 @@ public:
const BSONObj& cmdObj,
BSONObjBuilder& result) {
Status status = ReplicationCoordinator::get(opCtx)->checkReplEnabledForCommand(&result);
- if (!status.isOK())
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
log() << "Received replSetStepUp request";
@@ -816,7 +800,8 @@ public:
log() << "replSetStepUp request failed" << causedBy(status);
}
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
+ return true;
}
private:
@@ -840,15 +825,15 @@ public:
const BSONObj& cmdObj,
BSONObjBuilder& result) override {
Status status = ReplicationCoordinator::get(opCtx)->checkReplEnabledForCommand(&result);
- if (!status.isOK())
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
log() << "Received replSetAbortPrimaryCatchUp request";
status = ReplicationCoordinator::get(opCtx)->abortCatchupIfNeeded();
if (!status.isOK()) {
log() << "replSetAbortPrimaryCatchUp request failed" << causedBy(status);
}
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
+ return true;
}
private:
diff --git a/src/mongo/db/repl/repl_set_get_status_cmd.cpp b/src/mongo/db/repl/repl_set_get_status_cmd.cpp
index fb6e7fd7006..75b28031c27 100644
--- a/src/mongo/db/repl/repl_set_get_status_cmd.cpp
+++ b/src/mongo/db/repl/repl_set_get_status_cmd.cpp
@@ -51,15 +51,12 @@ public:
LastError::get(opCtx->getClient()).disable();
Status status = ReplicationCoordinator::get(opCtx)->checkReplEnabledForCommand(&result);
- if (!status.isOK())
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
bool includeInitialSync = false;
Status initialSyncStatus =
bsonExtractBooleanFieldWithDefault(cmdObj, "initialSync", false, &includeInitialSync);
- if (!initialSyncStatus.isOK()) {
- return CommandHelpers::appendCommandStatus(result, initialSyncStatus);
- }
+ uassertStatusOK(initialSyncStatus);
auto responseStyle = ReplicationCoordinator::ReplSetGetStatusResponseStyle::kBasic;
if (includeInitialSync) {
@@ -67,7 +64,8 @@ public:
}
status =
ReplicationCoordinator::get(opCtx)->processReplSetGetStatus(&result, responseStyle);
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
+ return true;
}
private:
diff --git a/src/mongo/db/repl/repl_set_request_votes.cpp b/src/mongo/db/repl/repl_set_request_votes.cpp
index 7f7682831ef..6a62caad7ee 100644
--- a/src/mongo/db/repl/repl_set_request_votes.cpp
+++ b/src/mongo/db/repl/repl_set_request_votes.cpp
@@ -52,15 +52,11 @@ private:
const BSONObj& cmdObj,
BSONObjBuilder& result) final {
Status status = ReplicationCoordinator::get(opCtx)->checkReplEnabledForCommand(&result);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
ReplSetRequestVotesArgs parsedArgs;
status = parsedArgs.initialize(cmdObj);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
ReplSetRequestVotesResponse response;
status = ReplicationCoordinator::get(opCtx)->processReplSetRequestVotes(
diff --git a/src/mongo/db/repl/replication_info.cpp b/src/mongo/db/repl/replication_info.cpp
index bed3b9aeca2..0b4ea7df501 100644
--- a/src/mongo/db/repl/replication_info.cpp
+++ b/src/mongo/db/repl/replication_info.cpp
@@ -255,18 +255,13 @@ public:
BSONElement element = cmdObj[kMetadataDocumentName];
if (!element.eoo()) {
if (seenIsMaster) {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::ClientMetadataCannotBeMutated,
- "The client metadata document may only be sent in the first isMaster"));
+ uasserted(ErrorCodes::ClientMetadataCannotBeMutated,
+ "The client metadata document may only be sent in the first isMaster");
}
auto swParseClientMetadata = ClientMetadata::parse(element);
- if (!swParseClientMetadata.getStatus().isOK()) {
- return CommandHelpers::appendCommandStatus(result,
- swParseClientMetadata.getStatus());
- }
+ uassertStatusOK(swParseClientMetadata.getStatus());
invariant(swParseClientMetadata.getValue());
diff --git a/src/mongo/db/s/check_sharding_index_command.cpp b/src/mongo/db/s/check_sharding_index_command.cpp
index f87b527ec28..804cf285001 100644
--- a/src/mongo/db/s/check_sharding_index_command.cpp
+++ b/src/mongo/db/s/check_sharding_index_command.cpp
@@ -200,10 +200,8 @@ public:
}
if (PlanExecutor::DEAD == state || PlanExecutor::FAILURE == state) {
- return CommandHelpers::appendCommandStatus(
- result,
- WorkingSetCommon::getMemberObjectStatus(currKey).withContext(
- "Executor error while checking sharding index"));
+ uassertStatusOK(WorkingSetCommon::getMemberObjectStatus(currKey).withContext(
+ "Executor error while checking sharding index"));
}
return true;
diff --git a/src/mongo/db/s/config/configsvr_add_shard_command.cpp b/src/mongo/db/s/config/configsvr_add_shard_command.cpp
index 0b8b39bab93..75fcba534a5 100644
--- a/src/mongo/db/s/config/configsvr_add_shard_command.cpp
+++ b/src/mongo/db/s/config/configsvr_add_shard_command.cpp
@@ -92,10 +92,8 @@ public:
const BSONObj& cmdObj,
BSONObjBuilder& result) override {
if (serverGlobalParams.clusterRole != ClusterRole::ConfigServer) {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::IllegalOperation,
- "_configsvrAddShard can only be run on config servers"));
+ uasserted(ErrorCodes::IllegalOperation,
+ "_configsvrAddShard can only be run on config servers");
}
// Do not allow adding shards while a featureCompatibilityVersion upgrade or downgrade is in
@@ -104,18 +102,14 @@ public:
Lock::SharedLock lk(opCtx->lockState(), FeatureCompatibilityVersion::fcvLock);
auto swParsedRequest = AddShardRequest::parseFromConfigCommand(cmdObj);
- if (!swParsedRequest.isOK()) {
- return CommandHelpers::appendCommandStatus(result, swParsedRequest.getStatus());
- }
+ uassertStatusOK(swParsedRequest.getStatus());
auto parsedRequest = std::move(swParsedRequest.getValue());
auto replCoord = repl::ReplicationCoordinator::get(opCtx);
auto rsConfig = replCoord->getConfig();
auto validationStatus = parsedRequest.validate(rsConfig.isLocalHostAllowed());
- if (!validationStatus.isOK()) {
- return CommandHelpers::appendCommandStatus(result, validationStatus);
- }
+ uassertStatusOK(validationStatus);
uassert(ErrorCodes::InvalidOptions,
str::stream() << "addShard must be called with majority writeConcern, got "
@@ -137,7 +131,7 @@ public:
if (!addShardResult.isOK()) {
log() << "addShard request '" << parsedRequest << "'"
<< "failed" << causedBy(addShardResult.getStatus());
- return CommandHelpers::appendCommandStatus(result, addShardResult.getStatus());
+ uassertStatusOK(addShardResult.getStatus());
}
result << "shardAdded" << addShardResult.getValue();
diff --git a/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp b/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp
index a3881e2626b..c80f4bb02a3 100644
--- a/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp
+++ b/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp
@@ -136,9 +136,7 @@ public:
commitRequest.getFromShard(),
commitRequest.getToShard(),
commitRequest.getValidAfter());
- if (!response.isOK()) {
- return CommandHelpers::appendCommandStatus(result, response.getStatus());
- }
+ uassertStatusOK(response.getStatus());
result.appendElements(response.getValue());
return true;
}
diff --git a/src/mongo/db/s/config/configsvr_commit_move_primary_command.cpp b/src/mongo/db/s/config/configsvr_commit_move_primary_command.cpp
index ecb74cbfa48..a01327befca 100644
--- a/src/mongo/db/s/config/configsvr_commit_move_primary_command.cpp
+++ b/src/mongo/db/s/config/configsvr_commit_move_primary_command.cpp
@@ -75,10 +75,8 @@ public:
BSONObjBuilder& result) override {
if (serverGlobalParams.clusterRole != ClusterRole::ConfigServer) {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::IllegalOperation,
- "_configsvrCommitMovePrimary can only be run on config servers"));
+ uasserted(ErrorCodes::IllegalOperation,
+ "_configsvrCommitMovePrimary can only be run on config servers");
}
uassert(ErrorCodes::InvalidOptions,
diff --git a/src/mongo/db/s/config/configsvr_drop_database_command.cpp b/src/mongo/db/s/config/configsvr_drop_database_command.cpp
index 7d4cfcbd5a4..9e8f2d092c7 100644
--- a/src/mongo/db/s/config/configsvr_drop_database_command.cpp
+++ b/src/mongo/db/s/config/configsvr_drop_database_command.cpp
@@ -89,10 +89,8 @@ public:
const BSONObj& cmdObj,
BSONObjBuilder& result) {
if (serverGlobalParams.clusterRole != ClusterRole::ConfigServer) {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::IllegalOperation,
- "_configsvrDropDatabase can only be run on config servers"));
+ uasserted(ErrorCodes::IllegalOperation,
+ "_configsvrDropDatabase can only be run on config servers");
}
const std::string dbname = parseNs("", cmdObj);
diff --git a/src/mongo/db/s/config/configsvr_enable_sharding_command.cpp b/src/mongo/db/s/config/configsvr_enable_sharding_command.cpp
index ba51e302969..5401fd64a1e 100644
--- a/src/mongo/db/s/config/configsvr_enable_sharding_command.cpp
+++ b/src/mongo/db/s/config/configsvr_enable_sharding_command.cpp
@@ -99,10 +99,8 @@ public:
BSONObjBuilder& result) {
if (serverGlobalParams.clusterRole != ClusterRole::ConfigServer) {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::IllegalOperation,
- "_configsvrEnableSharding can only be run on config servers"));
+ uasserted(ErrorCodes::IllegalOperation,
+ "_configsvrEnableSharding can only be run on config servers");
}
const std::string dbname = parseNs("", cmdObj);
@@ -113,10 +111,8 @@ public:
NamespaceString::validDBName(dbname, NamespaceString::DollarInDbNameBehavior::Allow));
if (dbname == NamespaceString::kAdminDb || dbname == NamespaceString::kLocalDb) {
- return CommandHelpers::appendCommandStatus(
- result,
- {ErrorCodes::InvalidOptions,
- str::stream() << "can't shard " + dbname + " database"});
+ uasserted(ErrorCodes::InvalidOptions,
+ str::stream() << "can't shard " + dbname + " database");
}
uassert(ErrorCodes::InvalidOptions,
diff --git a/src/mongo/db/s/config/configsvr_merge_chunk_command.cpp b/src/mongo/db/s/config/configsvr_merge_chunk_command.cpp
index 3de90801559..6e5853fd14e 100644
--- a/src/mongo/db/s/config/configsvr_merge_chunk_command.cpp
+++ b/src/mongo/db/s/config/configsvr_merge_chunk_command.cpp
@@ -117,10 +117,7 @@ public:
parsedRequest.getShardName(),
parsedRequest.getValidAfter());
- if (!mergeChunkResult.isOK()) {
- return CommandHelpers::appendCommandStatus(result, mergeChunkResult);
- }
-
+ uassertStatusOK(mergeChunkResult);
return true;
}
} configsvrMergeChunkCmd;
diff --git a/src/mongo/db/s/config/configsvr_move_primary_command.cpp b/src/mongo/db/s/config/configsvr_move_primary_command.cpp
index b8d4e39f108..f84d976283c 100644
--- a/src/mongo/db/s/config/configsvr_move_primary_command.cpp
+++ b/src/mongo/db/s/config/configsvr_move_primary_command.cpp
@@ -108,10 +108,8 @@ public:
BSONObjBuilder& result) override {
if (serverGlobalParams.clusterRole != ClusterRole::ConfigServer) {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::IllegalOperation,
- "_configsvrMovePrimary can only be run on config servers"));
+ uasserted(ErrorCodes::IllegalOperation,
+ "_configsvrMovePrimary can only be run on config servers");
}
auto movePrimaryRequest =
@@ -125,10 +123,8 @@ public:
if (dbname == NamespaceString::kAdminDb || dbname == NamespaceString::kConfigDb ||
dbname == NamespaceString::kLocalDb) {
- return CommandHelpers::appendCommandStatus(
- result,
- {ErrorCodes::InvalidOptions,
- str::stream() << "Can't move primary for " << dbname << " database"});
+ uasserted(ErrorCodes::InvalidOptions,
+ str::stream() << "Can't move primary for " << dbname << " database");
}
uassert(ErrorCodes::InvalidOptions,
@@ -139,10 +135,8 @@ public:
const std::string to = movePrimaryRequest.getTo().toString();
if (to.empty()) {
- return CommandHelpers::appendCommandStatus(
- result,
- {ErrorCodes::InvalidOptions,
- str::stream() << "you have to specify where you want to move it"});
+ uasserted(ErrorCodes::InvalidOptions,
+ str::stream() << "you have to specify where you want to move it");
}
auto const catalogClient = Grid::get(opCtx)->catalogClient();
@@ -249,8 +243,7 @@ public:
if (!worked) {
log() << "clone failed" << redact(cloneRes);
- return CommandHelpers::appendCommandStatus(
- result, {ErrorCodes::OperationFailed, str::stream() << "clone failed"});
+ uasserted(ErrorCodes::OperationFailed, str::stream() << "clone failed");
}
if (auto wcErrorElem = cloneRes["writeConcernError"]) {
diff --git a/src/mongo/db/s/config/configsvr_remove_shard_command.cpp b/src/mongo/db/s/config/configsvr_remove_shard_command.cpp
index 2ce5145b355..b77554f419d 100644
--- a/src/mongo/db/s/config/configsvr_remove_shard_command.cpp
+++ b/src/mongo/db/s/config/configsvr_remove_shard_command.cpp
@@ -109,8 +109,7 @@ public:
std::string msg(str::stream() << "Could not drop shard '" << target
<< "' because it does not exist");
log() << msg;
- return CommandHelpers::appendCommandStatus(result,
- Status(ErrorCodes::ShardNotFound, msg));
+ uasserted(ErrorCodes::ShardNotFound, msg);
}
const auto& shard = shardStatus.getValue();
@@ -156,9 +155,7 @@ public:
boost::none, // return all
nullptr,
repl::ReadConcernLevel::kMajorityReadConcern);
- if (!swChunks.isOK()) {
- return CommandHelpers::appendCommandStatus(result, swChunks.getStatus());
- }
+ uassertStatusOK(swChunks.getStatus());
const auto& chunks = swChunks.getValue();
result.append("msg", "draining ongoing");
diff --git a/src/mongo/db/s/merge_chunks_command.cpp b/src/mongo/db/s/merge_chunks_command.cpp
index 97ed3c87c14..a1fab846dc3 100644
--- a/src/mongo/db/s/merge_chunks_command.cpp
+++ b/src/mongo/db/s/merge_chunks_command.cpp
@@ -373,7 +373,8 @@ public:
}
auto mergeStatus = mergeChunks(opCtx, nss, minKey, maxKey, epoch);
- return CommandHelpers::appendCommandStatus(result, mergeStatus);
+ uassertStatusOK(mergeStatus);
+ return true;
}
} mergeChunksCmd;
diff --git a/src/mongo/db/s/migration_destination_manager_legacy_commands.cpp b/src/mongo/db/s/migration_destination_manager_legacy_commands.cpp
index c2d70d4476a..4ff6789ec65 100644
--- a/src/mongo/db/s/migration_destination_manager_legacy_commands.cpp
+++ b/src/mongo/db/s/migration_destination_manager_legacy_commands.cpp
@@ -218,7 +218,7 @@ public:
mdm->report(result, opCtx, false);
if (!status.isOK()) {
log() << status.reason();
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
}
return true;
}
@@ -266,7 +266,7 @@ public:
mdm->report(result, opCtx, false);
if (!status.isOK()) {
log() << status.reason();
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
}
} else if (migrationSessionIdStatus == ErrorCodes::NoSuchKey) {
mdm->abortWithoutSessionIdCheck();
diff --git a/src/mongo/db/s/move_chunk_command.cpp b/src/mongo/db/s/move_chunk_command.cpp
index cb1fa99fc2c..80b31ba76c7 100644
--- a/src/mongo/db/s/move_chunk_command.cpp
+++ b/src/mongo/db/s/move_chunk_command.cpp
@@ -145,16 +145,6 @@ public:
} else {
status = scopedMigration.waitForCompletion(opCtx);
}
-
- if (status == ErrorCodes::ChunkTooBig) {
- // This code is for compatibility with pre-3.2 balancer, which does not recognize the
- // ChunkTooBig error code and instead uses the "chunkTooBig" field in the response,
- // and the 3.4 shard, which failed to set the ChunkTooBig status code.
- // TODO: Remove after 3.6 is released.
- result.appendBool("chunkTooBig", true);
- return CommandHelpers::appendCommandStatus(result, status);
- }
-
uassertStatusOK(status);
if (moveChunkRequest.getWaitForDelete()) {
diff --git a/src/mongo/db/s/split_chunk_command.cpp b/src/mongo/db/s/split_chunk_command.cpp
index b69b33173e0..77b0cbacaf1 100644
--- a/src/mongo/db/s/split_chunk_command.cpp
+++ b/src/mongo/db/s/split_chunk_command.cpp
@@ -118,8 +118,7 @@ public:
string shardName;
auto parseShardNameStatus = bsonExtractStringField(cmdObj, "from", &shardName);
- if (!parseShardNameStatus.isOK())
- return CommandHelpers::appendCommandStatus(result, parseShardNameStatus);
+ uassertStatusOK(parseShardNameStatus);
log() << "received splitChunk request: " << redact(cmdObj);
diff --git a/src/mongo/db/s/split_vector_command.cpp b/src/mongo/db/s/split_vector_command.cpp
index a3f275f046e..207358a23dd 100644
--- a/src/mongo/db/s/split_vector_command.cpp
+++ b/src/mongo/db/s/split_vector_command.cpp
@@ -144,9 +144,7 @@ public:
maxChunkObjects,
maxChunkSize,
maxChunkSizeBytes);
- if (!statusWithSplitKeys.isOK()) {
- return CommandHelpers::appendCommandStatus(result, statusWithSplitKeys.getStatus());
- }
+ uassertStatusOK(statusWithSplitKeys.getStatus());
result.append("splitKeys", statusWithSplitKeys.getValue());
return true;
diff --git a/src/mongo/db/service_entry_point_mongod.cpp b/src/mongo/db/service_entry_point_mongod.cpp
index 4e9cdb89d9c..4ba450faa74 100644
--- a/src/mongo/db/service_entry_point_mongod.cpp
+++ b/src/mongo/db/service_entry_point_mongod.cpp
@@ -110,7 +110,7 @@ public:
void uassertCommandDoesNotSpecifyWriteConcern(const BSONObj& cmd) const override {
if (commandSpecifiesWriteConcern(cmd)) {
- uassertStatusOK({ErrorCodes::InvalidOptions, "Command does not support writeConcern"});
+ uasserted(ErrorCodes::InvalidOptions, "Command does not support writeConcern");
}
}
diff --git a/src/mongo/rpc/reply_builder_interface.cpp b/src/mongo/rpc/reply_builder_interface.cpp
index 461e3edc6a3..8a4460d7247 100644
--- a/src/mongo/rpc/reply_builder_interface.cpp
+++ b/src/mongo/rpc/reply_builder_interface.cpp
@@ -44,7 +44,7 @@ const char kCodeField[] = "code";
const char kCodeNameField[] = "codeName";
const char kErrorField[] = "errmsg";
-// similar to appendCommandStatus (duplicating logic here to avoid cyclic library
+// similar to appendCommandStatusNoThrow (duplicating logic here to avoid cyclic library
// dependency)
BSONObj augmentReplyWithStatus(const Status& status, BSONObj reply) {
auto okField = reply.getField(kOKField);
diff --git a/src/mongo/s/commands/cluster_aggregate.cpp b/src/mongo/s/commands/cluster_aggregate.cpp
index a5cc67049b7..46badd74eb5 100644
--- a/src/mongo/s/commands/cluster_aggregate.cpp
+++ b/src/mongo/s/commands/cluster_aggregate.cpp
@@ -612,7 +612,7 @@ BSONObj establishMergingMongosCursor(OperationContext* opCtx,
responseBuilder.done(clusterCursorId, requestedNss.ns());
- CommandHelpers::appendCommandStatus(cursorResponse, Status::OK());
+ CommandHelpers::appendSimpleCommandStatus(cursorResponse, true);
return cursorResponse.obj();
}
diff --git a/src/mongo/s/commands/cluster_commands_helpers.cpp b/src/mongo/s/commands/cluster_commands_helpers.cpp
index 777cbc141b8..d420afcdfa1 100644
--- a/src/mongo/s/commands/cluster_commands_helpers.cpp
+++ b/src/mongo/s/commands/cluster_commands_helpers.cpp
@@ -495,7 +495,8 @@ bool appendEmptyResultSet(OperationContext* opCtx,
return true;
}
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
+ return true;
}
StatusWith<CachedDatabaseInfo> createShardDatabase(OperationContext* opCtx, StringData dbName) {
diff --git a/src/mongo/s/commands/cluster_count_cmd.cpp b/src/mongo/s/commands/cluster_count_cmd.cpp
index ef0a753359a..f443c297905 100644
--- a/src/mongo/s/commands/cluster_count_cmd.cpp
+++ b/src/mongo/s/commands/cluster_count_cmd.cpp
@@ -110,7 +110,7 @@ public:
if (status.isOK()) {
collation = collationElement.Obj();
} else if (status != ErrorCodes::NoSuchKey) {
- return CommandHelpers::appendCommandStatus(result, status);
+ uassertStatusOK(status);
}
if (cmdObj["limit"].isNumber()) {
@@ -158,19 +158,13 @@ public:
// Rewrite the count command as an aggregation.
auto countRequest = CountRequest::parseFromBSON(nss, cmdObj, false);
- if (!countRequest.isOK()) {
- return CommandHelpers::appendCommandStatus(result, countRequest.getStatus());
- }
+ uassertStatusOK(countRequest.getStatus());
auto aggCmdOnView = countRequest.getValue().asAggregationCommand();
- if (!aggCmdOnView.isOK()) {
- return CommandHelpers::appendCommandStatus(result, aggCmdOnView.getStatus());
- }
+ uassertStatusOK(aggCmdOnView.getStatus());
auto aggRequestOnView = AggregationRequest::parseFromBSON(nss, aggCmdOnView.getValue());
- if (!aggRequestOnView.isOK()) {
- return CommandHelpers::appendCommandStatus(result, aggRequestOnView.getStatus());
- }
+ uassertStatusOK(aggRequestOnView.getStatus());
auto resolvedAggRequest = ex->asExpandedViewAggregation(aggRequestOnView.getValue());
auto resolvedAggCmd = resolvedAggRequest.serializeToCommandObj().toBson();
@@ -181,9 +175,7 @@ public:
result.resetToEmpty();
ViewResponseFormatter formatter(aggResult);
auto formatStatus = formatter.appendAsCountResponse(&result);
- if (!formatStatus.isOK()) {
- return CommandHelpers::appendCommandStatus(result, formatStatus);
- }
+ uassertStatusOK(formatStatus);
return true;
} catch (const ExceptionFor<ErrorCodes::NamespaceNotFound>&) {
@@ -210,8 +202,7 @@ public:
shardSubTotal.doneFast();
// Add error context so that you can see on which shard failed as well as details
// about that error.
- return CommandHelpers::appendCommandStatus(
- result, status.withContext(str::stream() << "failed on: " << response.shardId));
+ uassertStatusOK(status.withContext(str::stream() << "failed on: " << response.shardId));
}
shardSubTotal.doneFast();
diff --git a/src/mongo/s/commands/cluster_current_op.cpp b/src/mongo/s/commands/cluster_current_op.cpp
index 9c9a1a08730..8d46da6d193 100644
--- a/src/mongo/s/commands/cluster_current_op.cpp
+++ b/src/mongo/s/commands/cluster_current_op.cpp
@@ -85,7 +85,7 @@ private:
return status;
}
- CommandHelpers::appendCommandStatus(responseBuilder, Status::OK());
+ CommandHelpers::appendSimpleCommandStatus(responseBuilder, true);
return CursorResponse::parseFromBSON(responseBuilder.obj());
}
diff --git a/src/mongo/s/commands/cluster_distinct_cmd.cpp b/src/mongo/s/commands/cluster_distinct_cmd.cpp
index 944ab5befce..284e66398ed 100644
--- a/src/mongo/s/commands/cluster_distinct_cmd.cpp
+++ b/src/mongo/s/commands/cluster_distinct_cmd.cpp
@@ -183,19 +183,13 @@ public:
} catch (const ExceptionFor<ErrorCodes::CommandOnShardedViewNotSupportedOnMongod>& ex) {
auto parsedDistinct = ParsedDistinct::parse(
opCtx, ex->getNamespace(), cmdObj, ExtensionsCallbackNoop(), true);
- if (!parsedDistinct.isOK()) {
- return CommandHelpers::appendCommandStatus(result, parsedDistinct.getStatus());
- }
+ uassertStatusOK(parsedDistinct.getStatus());
auto aggCmdOnView = parsedDistinct.getValue().asAggregationCommand();
- if (!aggCmdOnView.isOK()) {
- return CommandHelpers::appendCommandStatus(result, aggCmdOnView.getStatus());
- }
+ uassertStatusOK(aggCmdOnView.getStatus());
auto aggRequestOnView = AggregationRequest::parseFromBSON(nss, aggCmdOnView.getValue());
- if (!aggRequestOnView.isOK()) {
- return CommandHelpers::appendCommandStatus(result, aggRequestOnView.getStatus());
- }
+ uassertStatusOK(aggRequestOnView.getStatus());
auto resolvedAggRequest = ex->asExpandedViewAggregation(aggRequestOnView.getValue());
auto resolvedAggCmd = resolvedAggRequest.serializeToCommandObj().toBson();
@@ -205,9 +199,8 @@ public:
ViewResponseFormatter formatter(aggResult);
auto formatStatus = formatter.appendAsDistinctResponse(&result);
- if (!formatStatus.isOK()) {
- return CommandHelpers::appendCommandStatus(result, formatStatus);
- }
+ uassertStatusOK(formatStatus);
+
return true;
}
diff --git a/src/mongo/s/commands/cluster_find_cmd.cpp b/src/mongo/s/commands/cluster_find_cmd.cpp
index 50050ad9778..c5fbc7e684f 100644
--- a/src/mongo/s/commands/cluster_find_cmd.cpp
+++ b/src/mongo/s/commands/cluster_find_cmd.cpp
@@ -176,7 +176,7 @@ public:
auto status = ClusterAggregate::runAggregate(
opCtx, nsStruct, resolvedAggRequest, resolvedAggCmd, out);
- CommandHelpers::appendCommandStatus(*out, status);
+ uassertStatusOK(status);
return status;
}
}
@@ -192,9 +192,7 @@ public:
const bool isExplain = false;
auto qr = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
- if (!qr.isOK()) {
- return CommandHelpers::appendCommandStatus(result, qr.getStatus());
- }
+ uassertStatusOK(qr.getStatus());
const boost::intrusive_ptr<ExpressionContext> expCtx;
auto cq = CanonicalQuery::canonicalize(opCtx,
@@ -202,9 +200,7 @@ public:
expCtx,
ExtensionsCallbackNoop(),
MatchExpressionParser::kAllowAllSpecialFeatures);
- if (!cq.isOK()) {
- return CommandHelpers::appendCommandStatus(result, cq.getStatus());
- }
+ uassertStatusOK(cq.getStatus());
try {
// Do the work to generate the first batch of results. This blocks waiting to get
@@ -221,14 +217,10 @@ public:
return true;
} catch (const ExceptionFor<ErrorCodes::CommandOnShardedViewNotSupportedOnMongod>& ex) {
auto aggCmdOnView = cq.getValue()->getQueryRequest().asAggregationCommand();
- if (!aggCmdOnView.isOK()) {
- return CommandHelpers::appendCommandStatus(result, aggCmdOnView.getStatus());
- }
+ uassertStatusOK(aggCmdOnView.getStatus());
auto aggRequestOnView = AggregationRequest::parseFromBSON(nss, aggCmdOnView.getValue());
- if (!aggRequestOnView.isOK()) {
- return CommandHelpers::appendCommandStatus(result, aggRequestOnView.getStatus());
- }
+ uassertStatusOK(aggRequestOnView.getStatus());
auto resolvedAggRequest = ex->asExpandedViewAggregation(aggRequestOnView.getValue());
auto resolvedAggCmd = resolvedAggRequest.serializeToCommandObj().toBson();
@@ -243,8 +235,8 @@ public:
auto status = ClusterAggregate::runAggregate(
opCtx, nsStruct, resolvedAggRequest, resolvedAggCmd, &result);
- CommandHelpers::appendCommandStatus(result, status);
- return status.isOK();
+ uassertStatusOK(status);
+ return true;
}
}
diff --git a/src/mongo/s/commands/cluster_getmore_cmd.cpp b/src/mongo/s/commands/cluster_getmore_cmd.cpp
index 5455eb0f1ec..688cacd28ef 100644
--- a/src/mongo/s/commands/cluster_getmore_cmd.cpp
+++ b/src/mongo/s/commands/cluster_getmore_cmd.cpp
@@ -106,15 +106,11 @@ public:
globalOpCounters.gotGetMore();
StatusWith<GetMoreRequest> parseStatus = GetMoreRequest::parseFromBSON(dbname, cmdObj);
- if (!parseStatus.isOK()) {
- return CommandHelpers::appendCommandStatus(result, parseStatus.getStatus());
- }
+ uassertStatusOK(parseStatus.getStatus());
const GetMoreRequest& request = parseStatus.getValue();
auto response = ClusterFind::runGetMore(opCtx, request);
- if (!response.isOK()) {
- return CommandHelpers::appendCommandStatus(result, response.getStatus());
- }
+ uassertStatusOK(response.getStatus());
response.getValue().addToBSON(CursorResponse::ResponseType::SubsequentResponse, &result);
return true;
diff --git a/src/mongo/s/commands/cluster_is_master_cmd.cpp b/src/mongo/s/commands/cluster_is_master_cmd.cpp
index d94f6abb182..9b1220801cc 100644
--- a/src/mongo/s/commands/cluster_is_master_cmd.cpp
+++ b/src/mongo/s/commands/cluster_is_master_cmd.cpp
@@ -85,18 +85,12 @@ public:
BSONElement element = cmdObj[kMetadataDocumentName];
if (!element.eoo()) {
if (seenIsMaster) {
- return CommandHelpers::appendCommandStatus(
- result,
- Status(ErrorCodes::ClientMetadataCannotBeMutated,
- "The client metadata document may only be sent in the first isMaster"));
+ uasserted(ErrorCodes::ClientMetadataCannotBeMutated,
+ "The client metadata document may only be sent in the first isMaster");
}
auto swParseClientMetadata = ClientMetadata::parse(element);
-
- if (!swParseClientMetadata.getStatus().isOK()) {
- return CommandHelpers::appendCommandStatus(result,
- swParseClientMetadata.getStatus());
- }
+ uassertStatusOK(swParseClientMetadata.getStatus());
invariant(swParseClientMetadata.getValue());
diff --git a/src/mongo/s/commands/cluster_kill_op.cpp b/src/mongo/s/commands/cluster_kill_op.cpp
index 3e4b38d5bd8..232380d5dfc 100644
--- a/src/mongo/s/commands/cluster_kill_op.cpp
+++ b/src/mongo/s/commands/cluster_kill_op.cpp
@@ -99,9 +99,7 @@ private:
// Will throw if shard id is not found
auto shardStatus = Grid::get(opCtx)->shardRegistry()->getShard(opCtx, shardIdent);
- if (!shardStatus.isOK()) {
- return CommandHelpers::appendCommandStatus(result, shardStatus.getStatus());
- }
+ uassertStatusOK(shardStatus.getStatus());
auto shard = shardStatus.getValue();
int opId;
diff --git a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
index 3e01b229262..c98f8d53a52 100644
--- a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
+++ b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
@@ -500,9 +500,7 @@ public:
// Take distributed lock to prevent split / migration.
auto scopedDistLock = catalogClient->getDistLockManager()->lock(
opCtx, outputCollNss.ns(), "mr-post-process", kNoDistLockTimeout);
- if (!scopedDistLock.isOK()) {
- return CommandHelpers::appendCommandStatus(result, scopedDistLock.getStatus());
- }
+ uassertStatusOK(scopedDistLock.getStatus());
BSONObj finalCmdObj = appendAllowImplicitCreate(finalCmd.obj(), true);
mrCommandResults.clear();
diff --git a/src/mongo/s/commands/cluster_move_chunk_cmd.cpp b/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
index bbb6ca5caa2..d6b53ca5748 100644
--- a/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
+++ b/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
@@ -121,8 +121,7 @@ public:
<< toString
<< "' because that shard does not exist");
log() << msg;
- return CommandHelpers::appendCommandStatus(result,
- Status(ErrorCodes::ShardNotFound, msg));
+ uasserted(ErrorCodes::ShardNotFound, msg);
}
const auto to = toStatus.getValue();
diff --git a/src/mongo/s/commands/cluster_pipeline_cmd.cpp b/src/mongo/s/commands/cluster_pipeline_cmd.cpp
index bb4c6400ec5..576fbb3460f 100644
--- a/src/mongo/s/commands/cluster_pipeline_cmd.cpp
+++ b/src/mongo/s/commands/cluster_pipeline_cmd.cpp
@@ -80,8 +80,8 @@ public:
const std::string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder& result) override {
- return CommandHelpers::appendCommandStatus(
- result, _runAggCommand(opCtx, dbname, cmdObj, boost::none, &result));
+ uassertStatusOK(_runAggCommand(opCtx, dbname, cmdObj, boost::none, &result));
+ return true;
}
Status explain(OperationContext* opCtx,
diff --git a/src/mongo/s/commands/cluster_set_free_monitoring.cpp b/src/mongo/s/commands/cluster_set_free_monitoring.cpp
index 6a92763d81a..7f664a43aaa 100644
--- a/src/mongo/s/commands/cluster_set_free_monitoring.cpp
+++ b/src/mongo/s/commands/cluster_set_free_monitoring.cpp
@@ -64,7 +64,8 @@ public:
const std::string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder& result) final {
- return CommandHelpers::appendCommandStatus(result, {ErrorCodes::CommandFailed, help()});
+ uasserted(ErrorCodes::CommandFailed, help());
+ return true;
}
} clusterSetFreeMonitoring;
diff --git a/src/mongo/s/commands/cluster_user_management_commands.cpp b/src/mongo/s/commands/cluster_user_management_commands.cpp
index 9d6bf19d73f..f899393c2f5 100644
--- a/src/mongo/s/commands/cluster_user_management_commands.cpp
+++ b/src/mongo/s/commands/cluster_user_management_commands.cpp
@@ -129,9 +129,8 @@ public:
BSONObjBuilder& result) {
auth::CreateOrUpdateUserArgs args;
Status status = auth::parseCreateOrUpdateUserCommands(cmdObj, getName(), dbname, &args);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
+
const bool ok = Grid::get(opCtx)->catalogClient()->runUserManagementWriteCommand(
opCtx,
getName(),
@@ -181,9 +180,8 @@ public:
BSONObjBuilder& result) {
UserName userName;
Status status = auth::parseAndValidateDropUserCommand(cmdObj, dbname, &userName);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
+
const bool ok = Grid::get(opCtx)->catalogClient()->runUserManagementWriteCommand(
opCtx,
getName(),
@@ -273,9 +271,8 @@ public:
vector<RoleName> roles;
Status status = auth::parseRolePossessionManipulationCommands(
cmdObj, getName(), dbname, &userNameString, &roles);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
+
const bool ok = Grid::get(opCtx)->catalogClient()->runUserManagementWriteCommand(
opCtx,
getName(),
@@ -323,9 +320,8 @@ public:
vector<RoleName> unusedRoles;
Status status = auth::parseRolePossessionManipulationCommands(
cmdObj, getName(), dbname, &userNameString, &unusedRoles);
- if (!status.isOK()) {
- return CommandHelpers::appendCommandStatus(result, status);
- }
+ uassertStatusOK(status);
+
const bool ok = Grid::get(opCtx)->catalogClient()->runUserManagementWriteCommand(
opCtx,
getName(),
diff --git a/src/mongo/s/commands/cluster_validate_cmd.cpp b/src/mongo/s/commands/cluster_validate_cmd.cpp
index e73ab7bdd78..505c191c2c0 100644
--- a/src/mongo/s/commands/cluster_validate_cmd.cpp
+++ b/src/mongo/s/commands/cluster_validate_cmd.cpp
@@ -128,7 +128,8 @@ public:
if (firstFailedShardStatus.isOK())
output.appendBool("valid", isValid);
- return CommandHelpers::appendCommandStatus(output, firstFailedShardStatus);
+ uassertStatusOK(firstFailedShardStatus);
+ return true;
}
} validateCmd;
diff --git a/src/mongo/s/write_ops/batched_command_response.cpp b/src/mongo/s/write_ops/batched_command_response.cpp
index 5931afc36be..d08496135bd 100644
--- a/src/mongo/s/write_ops/batched_command_response.cpp
+++ b/src/mongo/s/write_ops/batched_command_response.cpp
@@ -81,7 +81,7 @@ BSONObj BatchedCommandResponse::toBSON() const {
BSONObjBuilder builder;
invariant(_isStatusSet);
- CommandHelpers::appendCommandStatus(builder, _status);
+ uassertStatusOK(_status);
if (_isNModifiedSet)
builder.appendNumber(nModified(), _nModified);
diff --git a/src/mongo/s/write_ops/batched_command_response_test.cpp b/src/mongo/s/write_ops/batched_command_response_test.cpp
index 8f37da45cb2..32dd9ad8e10 100644
--- a/src/mongo/s/write_ops/batched_command_response_test.cpp
+++ b/src/mongo/s/write_ops/batched_command_response_test.cpp
@@ -61,18 +61,21 @@ TEST(BatchedCommandResponse, Basic) {
<< BSON("a" << 1)));
BSONObj origResponseObj =
- BSON("ok" << 1.0 << BatchedCommandResponse::n(0) << "opTime" << mongo::Timestamp(1ULL)
- << BatchedCommandResponse::writeErrors()
- << writeErrorsArray
- << BatchedCommandResponse::writeConcernError()
- << writeConcernError);
+ BSON(BatchedCommandResponse::n(0) << "opTime" << mongo::Timestamp(1ULL)
+ << BatchedCommandResponse::writeErrors()
+ << writeErrorsArray
+ << BatchedCommandResponse::writeConcernError()
+ << writeConcernError
+ << "ok"
+ << 1.0);
string errMsg;
BatchedCommandResponse response;
bool ok = response.parseBSON(origResponseObj, &errMsg);
ASSERT_TRUE(ok);
- BSONObj genResponseObj = response.toBSON();
+ BSONObj genResponseObj = BSONObjBuilder(response.toBSON()).append("ok", 1.0).obj();
+
ASSERT_EQUALS(0, genResponseObj.woCompare(origResponseObj))
<< "\nparsed: " << genResponseObj //
<< "\noriginal: " << origResponseObj;