summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
authorBilly Donahue <billy.donahue@mongodb.com>2018-01-10 17:09:22 -0500
committerBilly Donahue <billy.donahue@mongodb.com>2018-01-16 13:52:44 -0500
commita6e45f0e35724ae7958e31fc141c89afcefe4d33 (patch)
tree5ac0bf28fc1b977078845cbff966ff3427cfaa92 /src/mongo/db
parent1378f8ec42068913c5bdc5927bbe86d5aed08814 (diff)
downloadmongo-a6e45f0e35724ae7958e31fc141c89afcefe4d33.tar.gz
SERVER-32646 CommandHelpers
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/auth/sasl_commands.cpp8
-rw-r--r--src/mongo/db/auth/user_management_commands_parser.cpp2
-rw-r--r--src/mongo/db/catalog/coll_mod.cpp2
-rw-r--r--src/mongo/db/catalog/collection_options.cpp2
-rw-r--r--src/mongo/db/catalog/create_collection.cpp6
-rw-r--r--src/mongo/db/commands.cpp381
-rw-r--r--src/mongo/db/commands.h283
-rw-r--r--src/mongo/db/commands/apply_ops_cmd.cpp20
-rw-r--r--src/mongo/db/commands/authentication_commands.cpp8
-rw-r--r--src/mongo/db/commands/clone.cpp2
-rw-r--r--src/mongo/db/commands/clone_collection.cpp4
-rw-r--r--src/mongo/db/commands/collection_to_capped.cpp10
-rw-r--r--src/mongo/db/commands/compact.cpp8
-rw-r--r--src/mongo/db/commands/connection_status.cpp2
-rw-r--r--src/mongo/db/commands/copydb.cpp2
-rw-r--r--src/mongo/db/commands/copydb_start_commands.cpp6
-rw-r--r--src/mongo/db/commands/count_cmd.cpp18
-rw-r--r--src/mongo/db/commands/create_indexes.cpp21
-rw-r--r--src/mongo/db/commands/current_op.cpp2
-rw-r--r--src/mongo/db/commands/current_op_common.cpp4
-rw-r--r--src/mongo/db/commands/dbcheck.cpp2
-rw-r--r--src/mongo/db/commands/dbcommands.cpp72
-rw-r--r--src/mongo/db/commands/distinct.cpp24
-rw-r--r--src/mongo/db/commands/do_txn_cmd.cpp5
-rw-r--r--src/mongo/db/commands/drop_indexes.cpp15
-rw-r--r--src/mongo/db/commands/end_sessions_command.cpp3
-rw-r--r--src/mongo/db/commands/explain_cmd.cpp14
-rw-r--r--src/mongo/db/commands/feature_compatibility_version_command_parser.cpp2
-rw-r--r--src/mongo/db/commands/find_and_modify.cpp40
-rw-r--r--src/mongo/db/commands/find_cmd.cpp29
-rw-r--r--src/mongo/db/commands/fsync.cpp2
-rw-r--r--src/mongo/db/commands/generic.cpp6
-rw-r--r--src/mongo/db/commands/geo_near_cmd.cpp14
-rw-r--r--src/mongo/db/commands/get_last_error.cpp12
-rw-r--r--src/mongo/db/commands/getmore_cmd.cpp23
-rw-r--r--src/mongo/db/commands/group_cmd.cpp18
-rw-r--r--src/mongo/db/commands/haystack.cpp2
-rw-r--r--src/mongo/db/commands/index_filter_commands.cpp4
-rw-r--r--src/mongo/db/commands/kill_all_sessions_by_pattern_command.cpp8
-rw-r--r--src/mongo/db/commands/kill_all_sessions_command.cpp6
-rw-r--r--src/mongo/db/commands/kill_sessions_command.cpp6
-rw-r--r--src/mongo/db/commands/killcursors_common.cpp2
-rw-r--r--src/mongo/db/commands/list_collections.cpp10
-rw-r--r--src/mongo/db/commands/list_databases.cpp12
-rw-r--r--src/mongo/db/commands/list_indexes.cpp13
-rw-r--r--src/mongo/db/commands/mr.cpp13
-rw-r--r--src/mongo/db/commands/oplog_application_checks.cpp2
-rw-r--r--src/mongo/db/commands/oplog_note.cpp18
-rw-r--r--src/mongo/db/commands/parallel_collection_scan.cpp23
-rw-r--r--src/mongo/db/commands/parameters.cpp2
-rw-r--r--src/mongo/db/commands/pipeline_command.cpp13
-rw-r--r--src/mongo/db/commands/plan_cache_commands.cpp4
-rw-r--r--src/mongo/db/commands/reap_logical_session_cache_now.cpp2
-rw-r--r--src/mongo/db/commands/refresh_logical_session_cache_now.cpp5
-rw-r--r--src/mongo/db/commands/refresh_sessions_command.cpp5
-rw-r--r--src/mongo/db/commands/refresh_sessions_command_internal.cpp2
-rw-r--r--src/mongo/db/commands/rename_collection_cmd.cpp10
-rw-r--r--src/mongo/db/commands/repair_cursor.cpp4
-rw-r--r--src/mongo/db/commands/resize_oplog.cpp16
-rw-r--r--src/mongo/db/commands/set_feature_compatibility_version_command.cpp18
-rw-r--r--src/mongo/db/commands/snapshot_management.cpp8
-rw-r--r--src/mongo/db/commands/start_session_command.cpp5
-rw-r--r--src/mongo/db/commands/test_commands.cpp24
-rw-r--r--src/mongo/db/commands/touch.cpp6
-rw-r--r--src/mongo/db/commands/user_management_commands.cpp272
-rw-r--r--src/mongo/db/commands/validate.cpp13
-rw-r--r--src/mongo/db/commands_test.cpp23
-rw-r--r--src/mongo/db/dbdirectclient.cpp2
-rw-r--r--src/mongo/db/exec/stagedebug_cmd.cpp12
-rw-r--r--src/mongo/db/ftdc/ftdc_server.cpp4
-rw-r--r--src/mongo/db/ops/write_ops_exec.cpp2
-rw-r--r--src/mongo/db/pipeline/aggregation_request.cpp2
-rw-r--r--src/mongo/db/query/getmore_request.cpp2
-rw-r--r--src/mongo/db/query/query_request.cpp2
-rw-r--r--src/mongo/db/repl/master_slave.cpp4
-rw-r--r--src/mongo/db/repl/oplog.cpp2
-rw-r--r--src/mongo/db/repl/repl_set_commands.cpp105
-rw-r--r--src/mongo/db/repl/repl_set_request_votes.cpp6
-rw-r--r--src/mongo/db/repl/replication_info.cpp5
-rw-r--r--src/mongo/db/repl/resync.cpp8
-rw-r--r--src/mongo/db/repl/rs_rollback_no_uuid.cpp2
-rw-r--r--src/mongo/db/s/balancer/migration_manager_test.cpp2
-rw-r--r--src/mongo/db/s/check_sharding_index_command.cpp4
-rw-r--r--src/mongo/db/s/config/configsvr_add_shard_command.cpp8
-rw-r--r--src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp4
-rw-r--r--src/mongo/db/s/config/configsvr_create_database_command.cpp2
-rw-r--r--src/mongo/db/s/config/configsvr_drop_collection_command.cpp4
-rw-r--r--src/mongo/db/s/config/configsvr_drop_database_command.cpp2
-rw-r--r--src/mongo/db/s/config/configsvr_enable_sharding_command.cpp9
-rw-r--r--src/mongo/db/s/config/configsvr_merge_chunk_command.cpp4
-rw-r--r--src/mongo/db/s/config/configsvr_move_primary_command.cpp8
-rw-r--r--src/mongo/db/s/config/configsvr_remove_shard_command.cpp5
-rw-r--r--src/mongo/db/s/config/configsvr_shard_collection_command.cpp2
-rw-r--r--src/mongo/db/s/config/configsvr_split_chunk_command.cpp2
-rw-r--r--src/mongo/db/s/flush_routing_table_cache_updates_command.cpp2
-rw-r--r--src/mongo/db/s/get_shard_version_command.cpp2
-rw-r--r--src/mongo/db/s/merge_chunks_command.cpp4
-rw-r--r--src/mongo/db/s/migration_destination_manager_legacy_commands.cpp4
-rw-r--r--src/mongo/db/s/move_chunk_command.cpp4
-rw-r--r--src/mongo/db/s/split_chunk_command.cpp4
-rw-r--r--src/mongo/db/s/split_vector_command.cpp4
-rw-r--r--src/mongo/db/service_entry_point_mongod.cpp24
-rw-r--r--src/mongo/db/sessions_collection_config_server.cpp2
103 files changed, 970 insertions, 917 deletions
diff --git a/src/mongo/db/auth/sasl_commands.cpp b/src/mongo/db/auth/sasl_commands.cpp
index bd4e4e04c5f..0d762e5e38c 100644
--- a/src/mongo/db/auth/sasl_commands.cpp
+++ b/src/mongo/db/auth/sasl_commands.cpp
@@ -285,7 +285,7 @@ bool CmdSaslStart::run(OperationContext* opCtx,
session->setOpCtxt(opCtx);
Status status = doSaslStart(client, session, db, cmdObj, &result);
- appendCommandStatus(result, status);
+ CommandHelpers::appendCommandStatus(result, status);
if (session->isDone()) {
audit::logAuthentication(client,
@@ -314,7 +314,7 @@ bool CmdSaslContinue::run(OperationContext* opCtx,
AuthenticationSession::swap(client, sessionGuard);
if (!sessionGuard || sessionGuard->getType() != AuthenticationSession::SESSION_TYPE_SASL) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result, Status(ErrorCodes::ProtocolError, "No SASL session state found"));
}
@@ -324,7 +324,7 @@ bool CmdSaslContinue::run(OperationContext* opCtx,
// Authenticating the __system@local user to the admin database on mongos is required
// by the auth passthrough test suite.
if (session->getAuthenticationDatabase() != db && !Command::testCommandsEnabled) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::ProtocolError,
"Attempt to switch database target during SASL authentication."));
@@ -333,7 +333,7 @@ bool CmdSaslContinue::run(OperationContext* opCtx,
session->setOpCtxt(opCtx);
Status status = doSaslContinue(client, session, cmdObj, &result);
- appendCommandStatus(result, status);
+ CommandHelpers::appendCommandStatus(result, status);
if (session->isDone()) {
audit::logAuthentication(client,
diff --git a/src/mongo/db/auth/user_management_commands_parser.cpp b/src/mongo/db/auth/user_management_commands_parser.cpp
index 5d7f682579e..52d33822be0 100644
--- a/src/mongo/db/auth/user_management_commands_parser.cpp
+++ b/src/mongo/db/auth/user_management_commands_parser.cpp
@@ -62,7 +62,7 @@ Status _checkNoExtraFields(const BSONObj& cmdObj,
// ones.
for (BSONObjIterator iter(cmdObj); iter.more(); iter.next()) {
StringData fieldName = (*iter).fieldNameStringData();
- if (!Command::isGenericArgument(fieldName) &&
+ if (!CommandHelpers::isGenericArgument(fieldName) &&
!validFieldNames.count(fieldName.toString())) {
return Status(ErrorCodes::BadValue,
mongoutils::str::stream() << "\"" << fieldName << "\" is not "
diff --git a/src/mongo/db/catalog/coll_mod.cpp b/src/mongo/db/catalog/coll_mod.cpp
index b264a01da5c..d46be96b4e4 100644
--- a/src/mongo/db/catalog/coll_mod.cpp
+++ b/src/mongo/db/catalog/coll_mod.cpp
@@ -89,7 +89,7 @@ StatusWith<CollModRequest> parseCollModRequest(OperationContext* opCtx,
BSONForEach(e, cmdObj) {
const auto fieldName = e.fieldNameStringData();
- if (Command::isGenericArgument(fieldName)) {
+ if (CommandHelpers::isGenericArgument(fieldName)) {
continue; // Don't add to oplog builder.
} else if (fieldName == "collMod") {
// no-op
diff --git a/src/mongo/db/catalog/collection_options.cpp b/src/mongo/db/catalog/collection_options.cpp
index 52724289782..9ca8ab1b584 100644
--- a/src/mongo/db/catalog/collection_options.cpp
+++ b/src/mongo/db/catalog/collection_options.cpp
@@ -241,7 +241,7 @@ Status CollectionOptions::parse(const BSONObj& options, ParseKind kind) {
}
pipeline = e.Obj().getOwned();
- } else if (!createdOn24OrEarlier && !Command::isGenericArgument(fieldName)) {
+ } else if (!createdOn24OrEarlier && !CommandHelpers::isGenericArgument(fieldName)) {
return Status(ErrorCodes::InvalidOptions,
str::stream() << "The field '" << fieldName
<< "' is not a valid collection option. Options: "
diff --git a/src/mongo/db/catalog/create_collection.cpp b/src/mongo/db/catalog/create_collection.cpp
index a9bfb2ee7f5..3b5ed3f50dc 100644
--- a/src/mongo/db/catalog/create_collection.cpp
+++ b/src/mongo/db/catalog/create_collection.cpp
@@ -72,7 +72,7 @@ Status createCollection(OperationContext* opCtx,
BSONObjBuilder optionsBuilder;
while (it.more()) {
const auto elem = it.next();
- if (!Command::isGenericArgument(elem.fieldNameStringData()))
+ if (!CommandHelpers::isGenericArgument(elem.fieldNameStringData()))
optionsBuilder.append(elem);
if (elem.fieldNameStringData() == "viewOn") {
// Views don't have UUIDs so it should always be parsed for command.
@@ -119,7 +119,7 @@ Status createCollection(OperationContext* opCtx,
const BSONObj& cmdObj,
const BSONObj& idIndex) {
return createCollection(opCtx,
- Command::parseNsCollectionRequired(dbName, cmdObj),
+ CommandHelpers::parseNsCollectionRequired(dbName, cmdObj),
cmdObj,
idIndex,
CollectionOptions::parseForCommand);
@@ -132,7 +132,7 @@ Status createCollectionForApplyOps(OperationContext* opCtx,
const BSONObj& idIndex) {
invariant(opCtx->lockState()->isDbLockedForMode(dbName, MODE_X));
- const NamespaceString newCollName(Command::parseNsCollectionRequired(dbName, cmdObj));
+ const NamespaceString newCollName(CommandHelpers::parseNsCollectionRequired(dbName, cmdObj));
auto newCmd = cmdObj;
auto* const serviceContext = opCtx->getServiceContext();
diff --git a/src/mongo/db/commands.cpp b/src/mongo/db/commands.cpp
index 25ee7429189..b7b67b5092d 100644
--- a/src/mongo/db/commands.cpp
+++ b/src/mongo/db/commands.cpp
@@ -73,58 +73,30 @@ const WriteConcernOptions kMajorityWriteConcern(
} // namespace
-Command::~Command() = default;
-
-BSONObj Command::appendPassthroughFields(const BSONObj& cmdObjWithPassthroughFields,
- const BSONObj& request) {
- BSONObjBuilder b;
- b.appendElements(request);
- for (const auto& elem :
- Command::filterCommandRequestForPassthrough(cmdObjWithPassthroughFields)) {
- const auto name = elem.fieldNameStringData();
- if (Command::isGenericArgument(name) && !request.hasField(name)) {
- b.append(elem);
- }
- }
- return b.obj();
-}
-
-BSONObj Command::appendMajorityWriteConcern(const BSONObj& cmdObj) {
-
- WriteConcernOptions newWC = kMajorityWriteConcern;
-
- if (cmdObj.hasField(kWriteConcernField)) {
- auto wc = cmdObj.getField(kWriteConcernField);
- // The command has a writeConcern field and it's majority, so we can
- // return it as-is.
- if (wc["w"].ok() && wc["w"].str() == "majority") {
- return cmdObj;
- }
- if (wc["wtimeout"].ok()) {
- // They set a timeout, but aren't using majority WC. We want to use their
- // timeout along with majority WC.
- newWC = WriteConcernOptions(WriteConcernOptions::kMajority,
- WriteConcernOptions::SyncMode::UNSET,
- wc["wtimeout"].Number());
- }
- }
+//////////////////////////////////////////////////////////////
+// CommandHelpers
- // Append all original fields except the writeConcern field to the new command.
- BSONObjBuilder cmdObjWithWriteConcern;
- for (const auto& elem : cmdObj) {
- const auto name = elem.fieldNameStringData();
- if (name != "writeConcern" && !cmdObjWithWriteConcern.hasField(name)) {
- cmdObjWithWriteConcern.append(elem);
- }
+BSONObj CommandHelpers::runCommandDirectly(OperationContext* opCtx, const OpMsgRequest& request) {
+ auto command = globalCommandRegistry()->findCommand(request.getCommandName());
+ invariant(command);
+ BSONObjBuilder out;
+ try {
+ bool ok = command->publicRun(opCtx, request, out);
+ appendCommandStatus(out, ok);
+ } catch (const StaleConfigException&) {
+ // These exceptions are intended to be handled at a higher level and cannot losslessly
+ // round-trip through Status.
+ throw;
+ } catch (const DBException& ex) {
+ out.resetToEmpty();
+ appendCommandStatus(out, ex.toStatus());
}
-
- // Finally, add the new write concern.
- cmdObjWithWriteConcern.append(kWriteConcernField, newWC.toBSON());
- return cmdObjWithWriteConcern.obj();
+ return out.obj();
}
-std::string Command::parseNsFullyQualified(const std::string& dbname, const BSONObj& cmdObj) {
+std::string CommandHelpers::parseNsFullyQualified(const std::string& dbname,
+ const BSONObj& cmdObj) {
BSONElement first = cmdObj.firstElement();
uassert(ErrorCodes::BadValue,
str::stream() << "collection name has invalid type " << typeName(first.type()),
@@ -136,8 +108,8 @@ std::string Command::parseNsFullyQualified(const std::string& dbname, const BSON
return nss.ns();
}
-NamespaceString Command::parseNsCollectionRequired(const std::string& dbname,
- const BSONObj& cmdObj) {
+NamespaceString CommandHelpers::parseNsCollectionRequired(const std::string& dbname,
+ const BSONObj& cmdObj) {
// Accepts both BSON String and Symbol for collection name per SERVER-16260
// TODO(kangas) remove Symbol support in MongoDB 3.0 after Ruby driver audit
BSONElement first = cmdObj.firstElement();
@@ -151,9 +123,9 @@ NamespaceString Command::parseNsCollectionRequired(const std::string& dbname,
return nss;
}
-NamespaceString Command::parseNsOrUUID(OperationContext* opCtx,
- const std::string& dbname,
- const BSONObj& cmdObj) {
+NamespaceString CommandHelpers::parseNsOrUUID(OperationContext* opCtx,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
BSONElement first = cmdObj.firstElement();
if (first.type() == BinData && first.binDataType() == BinDataType::newUUID) {
UUIDCatalog& catalog = UUIDCatalog::get(opCtx);
@@ -178,51 +150,11 @@ NamespaceString Command::parseNsOrUUID(OperationContext* opCtx,
}
}
-std::string Command::parseNs(const std::string& dbname, const BSONObj& cmdObj) const {
- BSONElement first = cmdObj.firstElement();
- if (first.type() != mongo::String)
- return dbname;
-
- return str::stream() << dbname << '.' << cmdObj.firstElement().valueStringData();
-}
-
-ResourcePattern Command::parseResourcePattern(const std::string& dbname,
- const BSONObj& cmdObj) const {
- const std::string ns = parseNs(dbname, cmdObj);
- if (!NamespaceString::validCollectionComponent(ns)) {
- return ResourcePattern::forDatabaseName(ns);
- }
- return ResourcePattern::forExactNamespace(NamespaceString(ns));
-}
-
-Command::Command(StringData name, StringData oldName)
- : _name(name.toString()),
- _commandsExecutedMetric("commands." + _name + ".total", &_commandsExecuted),
- _commandsFailedMetric("commands." + _name + ".failed", &_commandsFailed) {
- globalCommandRegistry()->registerCommand(this, name, oldName);
-}
-
-void Command::help(std::stringstream& help) const {
- help << "no help defined";
-}
-
-Status Command::explain(OperationContext* opCtx,
- const std::string& dbname,
- const BSONObj& cmdObj,
- ExplainOptions::Verbosity verbosity,
- BSONObjBuilder* out) const {
- return {ErrorCodes::IllegalOperation, str::stream() << "Cannot explain cmd: " << getName()};
-}
-
-BSONObj Command::runCommandDirectly(OperationContext* opCtx, const OpMsgRequest& request) {
- return CommandHelpers::runCommandDirectly(opCtx, request);
-}
-
-Command* Command::findCommand(StringData name) {
+Command* CommandHelpers::findCommand(StringData name) {
return globalCommandRegistry()->findCommand(name);
}
-bool Command::appendCommandStatus(BSONObjBuilder& result, const Status& status) {
+bool CommandHelpers::appendCommandStatus(BSONObjBuilder& result, const Status& status) {
appendCommandStatus(result, status.isOK(), status.reason());
BSONObj tmp = result.asTempObj();
if (!status.isOK() && !tmp.hasField("code")) {
@@ -235,7 +167,9 @@ bool Command::appendCommandStatus(BSONObjBuilder& result, const Status& status)
return status.isOK();
}
-void Command::appendCommandStatus(BSONObjBuilder& result, bool ok, const std::string& errmsg) {
+void CommandHelpers::appendCommandStatus(BSONObjBuilder& result,
+ bool ok,
+ const std::string& errmsg) {
BSONObj tmp = result.asTempObj();
bool have_ok = tmp.hasField("ok");
bool need_errmsg = !ok && !tmp.hasField("errmsg");
@@ -248,9 +182,9 @@ void Command::appendCommandStatus(BSONObjBuilder& result, bool ok, const std::st
}
}
-void Command::appendCommandWCStatus(BSONObjBuilder& result,
- const Status& awaitReplicationStatus,
- const WriteConcernResult& wcResult) {
+void CommandHelpers::appendCommandWCStatus(BSONObjBuilder& result,
+ const Status& awaitReplicationStatus,
+ const WriteConcernResult& wcResult) {
if (!awaitReplicationStatus.isOK() && !result.hasField("writeConcernError")) {
WriteConcernErrorDetail wcError;
wcError.setErrCode(awaitReplicationStatus.code());
@@ -262,6 +196,165 @@ void Command::appendCommandWCStatus(BSONObjBuilder& result,
}
}
+BSONObj CommandHelpers::appendPassthroughFields(const BSONObj& cmdObjWithPassthroughFields,
+ const BSONObj& request) {
+ BSONObjBuilder b;
+ b.appendElements(request);
+ for (const auto& elem : filterCommandRequestForPassthrough(cmdObjWithPassthroughFields)) {
+ const auto name = elem.fieldNameStringData();
+ if (isGenericArgument(name) && !request.hasField(name)) {
+ b.append(elem);
+ }
+ }
+ return b.obj();
+}
+
+BSONObj CommandHelpers::appendMajorityWriteConcern(const BSONObj& cmdObj) {
+ WriteConcernOptions newWC = kMajorityWriteConcern;
+
+ if (cmdObj.hasField(kWriteConcernField)) {
+ auto wc = cmdObj.getField(kWriteConcernField);
+ // The command has a writeConcern field and it's majority, so we can
+ // return it as-is.
+ if (wc["w"].ok() && wc["w"].str() == "majority") {
+ return cmdObj;
+ }
+
+ if (wc["wtimeout"].ok()) {
+ // They set a timeout, but aren't using majority WC. We want to use their
+ // timeout along with majority WC.
+ newWC = WriteConcernOptions(WriteConcernOptions::kMajority,
+ WriteConcernOptions::SyncMode::UNSET,
+ wc["wtimeout"].Number());
+ }
+ }
+
+ // Append all original fields except the writeConcern field to the new command.
+ BSONObjBuilder cmdObjWithWriteConcern;
+ for (const auto& elem : cmdObj) {
+ const auto name = elem.fieldNameStringData();
+ if (name != "writeConcern" && !cmdObjWithWriteConcern.hasField(name)) {
+ cmdObjWithWriteConcern.append(elem);
+ }
+ }
+
+ // Finally, add the new write concern.
+ cmdObjWithWriteConcern.append(kWriteConcernField, newWC.toBSON());
+ return cmdObjWithWriteConcern.obj();
+}
+
+namespace {
+const stdx::unordered_set<std::string> userManagementCommands{"createUser",
+ "updateUser",
+ "dropUser",
+ "dropAllUsersFromDatabase",
+ "grantRolesToUser",
+ "revokeRolesFromUser",
+ "createRole",
+ "updateRole",
+ "dropRole",
+ "dropAllRolesFromDatabase",
+ "grantPrivilegesToRole",
+ "revokePrivilegesFromRole",
+ "grantRolesToRole",
+ "revokeRolesFromRole",
+ "_mergeAuthzCollections"};
+} // namespace
+
+bool CommandHelpers::isUserManagementCommand(const std::string& name) {
+ return userManagementCommands.count(name);
+}
+
+BSONObj CommandHelpers::filterCommandRequestForPassthrough(const BSONObj& cmdObj) {
+ BSONObjBuilder bob;
+ for (auto elem : cmdObj) {
+ const auto name = elem.fieldNameStringData();
+ if (name == "$readPreference") {
+ BSONObjBuilder(bob.subobjStart("$queryOptions")).append(elem);
+ } else if (!isGenericArgument(name) || //
+ name == "$queryOptions" || //
+ name == "maxTimeMS" || //
+ name == "readConcern" || //
+ name == "writeConcern" || //
+ name == "lsid" || //
+ name == "txnNumber") {
+ // This is the whitelist of generic arguments that commands can be trusted to blindly
+ // forward to the shards.
+ bob.append(elem);
+ }
+ }
+ return bob.obj();
+}
+
+void CommandHelpers::filterCommandReplyForPassthrough(const BSONObj& cmdObj,
+ BSONObjBuilder* output) {
+ for (auto elem : cmdObj) {
+ const auto name = elem.fieldNameStringData();
+ if (name == "$configServerState" || //
+ name == "$gleStats" || //
+ name == "$clusterTime" || //
+ name == "$oplogQueryData" || //
+ name == "$replData" || //
+ name == "operationTime") {
+ continue;
+ }
+ output->append(elem);
+ }
+}
+
+BSONObj CommandHelpers::filterCommandReplyForPassthrough(const BSONObj& cmdObj) {
+ BSONObjBuilder bob;
+ filterCommandReplyForPassthrough(cmdObj, &bob);
+ return bob.obj();
+}
+
+bool CommandHelpers::isHelpRequest(const BSONElement& helpElem) {
+ return !helpElem.eoo() && helpElem.trueValue();
+}
+
+constexpr StringData CommandHelpers::kHelpFieldName;
+
+//////////////////////////////////////////////////////////////
+// Command
+
+Command::~Command() = default;
+
+std::string Command::parseNs(const std::string& dbname, const BSONObj& cmdObj) const {
+ BSONElement first = cmdObj.firstElement();
+ if (first.type() != mongo::String)
+ return dbname;
+
+ return str::stream() << dbname << '.' << cmdObj.firstElement().valueStringData();
+}
+
+ResourcePattern Command::parseResourcePattern(const std::string& dbname,
+ const BSONObj& cmdObj) const {
+ const std::string ns = parseNs(dbname, cmdObj);
+ if (!NamespaceString::validCollectionComponent(ns)) {
+ return ResourcePattern::forDatabaseName(ns);
+ }
+ return ResourcePattern::forExactNamespace(NamespaceString(ns));
+}
+
+Command::Command(StringData name, StringData oldName)
+ : _name(name.toString()),
+ _commandsExecutedMetric("commands." + _name + ".total", &_commandsExecuted),
+ _commandsFailedMetric("commands." + _name + ".failed", &_commandsFailed) {
+ globalCommandRegistry()->registerCommand(this, name, oldName);
+}
+
+void Command::help(std::stringstream& help) const {
+ help << "no help defined";
+}
+
+Status Command::explain(OperationContext* opCtx,
+ const std::string& dbname,
+ const BSONObj& cmdObj,
+ ExplainOptions::Verbosity verbosity,
+ BSONObjBuilder* out) const {
+ return {ErrorCodes::IllegalOperation, str::stream() << "Cannot explain cmd: " << getName()};
+}
+
Status BasicCommand::checkAuthForRequest(OperationContext* opCtx, const OpMsgRequest& request) {
uassertNoDocumentSequences(request);
return checkAuthForOperation(opCtx, request.getDatabase().toString(), request.body);
@@ -372,12 +465,6 @@ bool Command::publicRun(OperationContext* opCtx,
}
}
-bool Command::isHelpRequest(const BSONElement& helpElem) {
- return !helpElem.eoo() && helpElem.trueValue();
-}
-
-const char Command::kHelpFieldName[] = "help";
-
void Command::generateHelpResponse(OperationContext* opCtx,
rpc::ReplyBuilderInterface* replyBuilder,
const Command& command) {
@@ -391,28 +478,6 @@ void Command::generateHelpResponse(OperationContext* opCtx,
replyBuilder->setMetadata(rpc::makeEmptyMetadata());
}
-namespace {
-const stdx::unordered_set<std::string> userManagementCommands{"createUser",
- "updateUser",
- "dropUser",
- "dropAllUsersFromDatabase",
- "grantRolesToUser",
- "revokeRolesFromUser",
- "createRole",
- "updateRole",
- "dropRole",
- "dropAllRolesFromDatabase",
- "grantPrivilegesToRole",
- "revokePrivilegesFromRole",
- "grantRolesToRole",
- "revokeRolesFromRole",
- "_mergeAuthzCollections"};
-} // namespace
-
-bool Command::isUserManagementCommand(const std::string& name) {
- return userManagementCommands.count(name);
-}
-
void BasicCommand::uassertNoDocumentSequences(const OpMsgRequest& request) {
uassert(40472,
str::stream() << "The " << getName() << " command does not support document sequences.",
@@ -433,52 +498,13 @@ bool ErrmsgCommandDeprecated::run(OperationContext* opCtx,
std::string errmsg;
auto ok = errmsgRun(opCtx, db, cmdObj, errmsg, result);
if (!errmsg.empty()) {
- appendCommandStatus(result, ok, errmsg);
+ CommandHelpers::appendCommandStatus(result, ok, errmsg);
}
return ok;
}
-BSONObj Command::filterCommandRequestForPassthrough(const BSONObj& cmdObj) {
- BSONObjBuilder bob;
- for (auto elem : cmdObj) {
- const auto name = elem.fieldNameStringData();
- if (name == "$readPreference") {
- BSONObjBuilder(bob.subobjStart("$queryOptions")).append(elem);
- } else if (!Command::isGenericArgument(name) || //
- name == "$queryOptions" || //
- name == "maxTimeMS" || //
- name == "readConcern" || //
- name == "writeConcern" || //
- name == "lsid" || //
- name == "txnNumber") {
- // This is the whitelist of generic arguments that commands can be trusted to blindly
- // forward to the shards.
- bob.append(elem);
- }
- }
- return bob.obj();
-}
-
-void Command::filterCommandReplyForPassthrough(const BSONObj& cmdObj, BSONObjBuilder* output) {
- for (auto elem : cmdObj) {
- const auto name = elem.fieldNameStringData();
- if (name == "$configServerState" || //
- name == "$gleStats" || //
- name == "$clusterTime" || //
- name == "$oplogQueryData" || //
- name == "$replData" || //
- name == "operationTime") {
- continue;
- }
- output->append(elem);
- }
-}
-
-BSONObj Command::filterCommandReplyForPassthrough(const BSONObj& cmdObj) {
- BSONObjBuilder bob;
- filterCommandReplyForPassthrough(cmdObj, &bob);
- return bob.obj();
-}
+//////////////////////////////////////////////////////////////
+// CommandRegistry
void CommandRegistry::registerCommand(Command* command, StringData name, StringData oldName) {
for (StringData key : {name, oldName}) {
@@ -499,25 +525,6 @@ Command* CommandRegistry::findCommand(StringData name) const {
return it->second;
}
-BSONObj CommandHelpers::runCommandDirectly(OperationContext* opCtx, const OpMsgRequest& request) {
- auto command = globalCommandRegistry()->findCommand(request.getCommandName());
- invariant(command);
-
- BSONObjBuilder out;
- try {
- bool ok = command->publicRun(opCtx, request, out);
- Command::appendCommandStatus(out, ok);
- } catch (const StaleConfigException&) {
- // These exceptions are intended to be handled at a higher level and cannot losslessly
- // round-trip through Status.
- throw;
- } catch (const DBException& ex) {
- out.resetToEmpty();
- Command::appendCommandStatus(out, ex.toStatus());
- }
- return out.obj();
-}
-
CommandRegistry* globalCommandRegistry() {
static auto reg = new CommandRegistry();
return reg;
diff --git a/src/mongo/db/commands.h b/src/mongo/db/commands.h
index 37693df5455..f6c987d03b6 100644
--- a/src/mongo/db/commands.h
+++ b/src/mongo/db/commands.h
@@ -56,6 +56,132 @@ namespace mutablebson {
class Document;
} // namespace mutablebson
+class Command;
+
+// Various helpers unrelated to any single command or to the command registry.
+// Would be a namespace, but want to keep it closed rather than open.
+// Some of these may move to the BasicCommand shim if they are only for legacy implementations.
+struct CommandHelpers {
+ // The type of the first field in 'cmdObj' must be mongo::String. The first field is
+ // interpreted as a collection name.
+ static std::string parseNsFullyQualified(const std::string& dbname, const BSONObj& cmdObj);
+
+ // The type of the first field in 'cmdObj' must be mongo::String or Symbol.
+ // The first field is interpreted as a collection name.
+ static NamespaceString parseNsCollectionRequired(const std::string& dbname,
+ const BSONObj& cmdObj);
+
+ static NamespaceString parseNsOrUUID(OperationContext* opCtx,
+ const std::string& dbname,
+ const BSONObj& cmdObj);
+
+ static Command* findCommand(StringData name);
+
+ // Helper for setting errmsg and ok field in command result object.
+ static void appendCommandStatus(BSONObjBuilder& result,
+ bool ok,
+ const std::string& errmsg = {});
+ // @return s.isOK()
+ static bool appendCommandStatus(BSONObjBuilder& result, const Status& status);
+ /**
+ * Helper for setting a writeConcernError field in the command result object if
+ * a writeConcern error occurs.
+ *
+ * @param result is the BSONObjBuilder for the command response. This function creates the
+ * writeConcernError field for the response.
+ * @param awaitReplicationStatus is the status received from awaitReplication.
+ * @param wcResult is the writeConcernResult object that holds other write concern information.
+ * This is primarily used for populating errInfo when a timeout occurs, and is populated
+ * by waitForWriteConcern.
+ */
+ static void appendCommandWCStatus(BSONObjBuilder& result,
+ const Status& awaitReplicationStatus,
+ const WriteConcernResult& wcResult = WriteConcernResult());
+ /**
+ * Appends passthrough fields from a cmdObj to a given request.
+ */
+ static BSONObj appendPassthroughFields(const BSONObj& cmdObjWithPassthroughFields,
+ const BSONObj& request);
+ /**
+ * Returns a copy of 'cmdObj' with a majority writeConcern appended.
+ */
+ static BSONObj appendMajorityWriteConcern(const BSONObj& cmdObj);
+ /**
+ * Returns true if the provided argument is one that is handled by the command processing layer
+ * and should generally be ignored by individual command implementations. In particular,
+ * commands that fail on unrecognized arguments must not fail for any of these.
+ */
+ static bool isGenericArgument(StringData arg) {
+ // Not including "help" since we don't pass help requests through to the command parser.
+ // If that changes, it should be added. When you add to this list, consider whether you
+ // should also change the filterCommandRequestForPassthrough() function.
+ return arg == "$audit" || //
+ arg == "$client" || //
+ arg == "$configServerState" || //
+ arg == "$db" || //
+ arg == "allowImplicitCollectionCreation" || //
+ arg == "$oplogQueryData" || //
+ arg == "$queryOptions" || //
+ arg == "$readPreference" || //
+ arg == "$replData" || //
+ arg == "$clusterTime" || //
+ arg == "maxTimeMS" || //
+ arg == "readConcern" || //
+ arg == "shardVersion" || //
+ arg == "tracking_info" || //
+ arg == "writeConcern" || //
+ arg == "lsid" || //
+ arg == "txnNumber" || //
+ false; // These comments tell clang-format to keep this line-oriented.
+ }
+
+ /**
+ * This function checks if a command is a user management command by name.
+ */
+ static bool isUserManagementCommand(const std::string& name);
+
+ /**
+ * Rewrites cmdObj into a format safe to blindly forward to shards.
+ *
+ * This performs 2 transformations:
+ * 1) $readPreference fields are moved into a subobject called $queryOptions. This matches the
+ * "wrapped" format historically used internally by mongos. Moving off of that format will be
+ * done as SERVER-29091.
+ *
+ * 2) Filter out generic arguments that shouldn't be blindly passed to the shards. This is
+ * necessary because many mongos implementations of Command::run() just pass cmdObj through
+ * directly to the shards. However, some of the generic arguments fields are automatically
+ * appended in the egress layer. Removing them here ensures that they don't get duplicated.
+ *
+ * Ideally this function can be deleted once mongos run() implementations are more careful about
+ * what they send to the shards.
+ */
+ static BSONObj filterCommandRequestForPassthrough(const BSONObj& cmdObj);
+ static void filterCommandReplyForPassthrough(const BSONObj& reply, BSONObjBuilder* output);
+
+ /**
+ * Rewrites reply into a format safe to blindly forward from shards to clients.
+ *
+ * Ideally this function can be deleted once mongos run() implementations are more careful about
+ * what they return from the shards.
+ */
+ static BSONObj filterCommandReplyForPassthrough(const BSONObj& reply);
+
+ /**
+ * Returns true if this a request for the 'help' information associated with the command.
+ */
+ static bool isHelpRequest(const BSONElement& helpElem);
+
+ /**
+ * Runs a command directly and returns the result. Does not do any other work normally handled
+ * by command dispatch, such as checking auth, dealing with CurOp or waiting for write concern.
+ * It is illegal to call this if the command does not exist.
+ */
+ static BSONObj runCommandDirectly(OperationContext* opCtx, const OpMsgRequest& request);
+
+ static constexpr StringData kHelpFieldName = "help"_sd;
+};
+
/**
* Serves as a base for server commands. See the constructor for more details.
*/
@@ -280,19 +406,6 @@ public:
_commandsFailed.increment();
}
- // The type of the first field in 'cmdObj' must be mongo::String. The first field is
- // interpreted as a collection name.
- static std::string parseNsFullyQualified(const std::string& dbname, const BSONObj& cmdObj);
-
- // The type of the first field in 'cmdObj' must be mongo::String or Symbol.
- // The first field is interpreted as a collection name.
- static NamespaceString parseNsCollectionRequired(const std::string& dbname,
- const BSONObj& cmdObj);
- static NamespaceString parseNsOrUUID(OperationContext* opCtx,
- const std::string& dbname,
- const BSONObj& cmdObj);
-
-
/**
* Runs the command.
*
@@ -301,36 +414,24 @@ public:
bool publicRun(OperationContext* opCtx, const OpMsgRequest& request, BSONObjBuilder& result);
/**
- * Runs a command directly and returns the result. Does not do any other work normally handled
- * by command dispatch, such as checking auth, dealing with CurOp or waiting for write concern.
- * It is illegal to call this if the command does not exist.
+ * Generates a reply from the 'help' information associated with a command. The state of
+ * the passed ReplyBuilder will be in kOutputDocs after calling this method.
*/
- static BSONObj runCommandDirectly(OperationContext* txn, const OpMsgRequest& request);
-
- static Command* findCommand(StringData name);
-
- // Helper for setting errmsg and ok field in command result object.
- static void appendCommandStatus(BSONObjBuilder& result,
- bool ok,
- const std::string& errmsg = {});
-
- // @return s.isOK()
- static bool appendCommandStatus(BSONObjBuilder& result, const Status& status);
+ static void generateHelpResponse(OperationContext* opCtx,
+ rpc::ReplyBuilderInterface* replyBuilder,
+ const Command& command);
/**
- * Helper for setting a writeConcernError field in the command result object if
- * a writeConcern error occurs.
+ * Checks to see if the client executing "opCtx" is authorized to run the given command with the
+ * given parameters on the given named database.
*
- * @param result is the BSONObjBuilder for the command response. This function creates the
- * writeConcernError field for the response.
- * @param awaitReplicationStatus is the status received from awaitReplication.
- * @param wcResult is the writeConcernResult object that holds other write concern information.
- * This is primarily used for populating errInfo when a timeout occurs, and is populated
- * by waitForWriteConcern.
+ * Returns Status::OK() if the command is authorized. Most likely returns
+ * ErrorCodes::Unauthorized otherwise, but any return other than Status::OK implies not
+ * authorized.
*/
- static void appendCommandWCStatus(BSONObjBuilder& result,
- const Status& awaitReplicationStatus,
- const WriteConcernResult& wcResult = WriteConcernResult());
+ static Status checkAuthorization(Command* c,
+ OperationContext* opCtx,
+ const OpMsgRequest& request);
/**
* If true, then testing commands are available. Defaults to false.
@@ -354,107 +455,10 @@ public:
* runGlobalInitializersOrDie(argc, argv, envp);
* ...
* }
- */
- static bool testCommandsEnabled;
-
- /**
- * Returns true if this a request for the 'help' information associated with the command.
- */
- static bool isHelpRequest(const BSONElement& helpElem);
-
- static const char kHelpFieldName[];
-
- /**
- * Generates a reply from the 'help' information associated with a command. The state of
- * the passed ReplyBuilder will be in kOutputDocs after calling this method.
- */
- static void generateHelpResponse(OperationContext* opCtx,
- rpc::ReplyBuilderInterface* replyBuilder,
- const Command& command);
-
- /**
- * This function checks if a command is a user management command by name.
- */
- static bool isUserManagementCommand(const std::string& name);
-
- /**
- * Checks to see if the client executing "opCtx" is authorized to run the given command with the
- * given parameters on the given named database.
*
- * Returns Status::OK() if the command is authorized. Most likely returns
- * ErrorCodes::Unauthorized otherwise, but any return other than Status::OK implies not
- * authorized.
- */
- static Status checkAuthorization(Command* c,
- OperationContext* opCtx,
- const OpMsgRequest& request);
-
- /**
- * Appends passthrough fields from a cmdObj to a given request.
+ * Note: variable is defined in test_commands_enabled.cpp as a dependency hack.
*/
- static BSONObj appendPassthroughFields(const BSONObj& cmdObjWithPassthroughFields,
- const BSONObj& request);
-
- /**
- * Returns a copy of 'cmdObj' with a majority writeConcern appended.
- */
- static BSONObj appendMajorityWriteConcern(const BSONObj& cmdObj);
-
- /**
- * Returns true if the provided argument is one that is handled by the command processing layer
- * and should generally be ignored by individual command implementations. In particular,
- * commands that fail on unrecognized arguments must not fail for any of these.
- */
- static bool isGenericArgument(StringData arg) {
- // Not including "help" since we don't pass help requests through to the command parser.
- // If that changes, it should be added. When you add to this list, consider whether you
- // should also change the filterCommandRequestForPassthrough() function.
- return arg == "$audit" || //
- arg == "$client" || //
- arg == "$configServerState" || //
- arg == "$db" || //
- arg == "allowImplicitCollectionCreation" || //
- arg == "$oplogQueryData" || //
- arg == "$queryOptions" || //
- arg == "$readPreference" || //
- arg == "$replData" || //
- arg == "$clusterTime" || //
- arg == "maxTimeMS" || //
- arg == "readConcern" || //
- arg == "shardVersion" || //
- arg == "tracking_info" || //
- arg == "writeConcern" || //
- arg == "lsid" || //
- arg == "txnNumber" || //
- false; // These comments tell clang-format to keep this line-oriented.
- }
-
- /**
- * Rewrites cmdObj into a format safe to blindly forward to shards.
- *
- * This performs 2 transformations:
- * 1) $readPreference fields are moved into a subobject called $queryOptions. This matches the
- * "wrapped" format historically used internally by mongos. Moving off of that format will be
- * done as SERVER-29091.
- *
- * 2) Filter out generic arguments that shouldn't be blindly passed to the shards. This is
- * necessary because many mongos implementations of Command::run() just pass cmdObj through
- * directly to the shards. However, some of the generic arguments fields are automatically
- * appended in the egress layer. Removing them here ensures that they don't get duplicated.
- *
- * Ideally this function can be deleted once mongos run() implementations are more careful about
- * what they send to the shards.
- */
- static BSONObj filterCommandRequestForPassthrough(const BSONObj& cmdObj);
-
- /**
- * Rewrites reply into a format safe to blindly forward from shards to clients.
- *
- * Ideally this function can be deleted once mongos run() implementations are more careful about
- * what they return from the shards.
- */
- static void filterCommandReplyForPassthrough(const BSONObj& reply, BSONObjBuilder* output);
- static BSONObj filterCommandReplyForPassthrough(const BSONObj& reply);
+ static bool testCommandsEnabled;
private:
/**
@@ -575,11 +579,6 @@ class ErrmsgCommandDeprecated : public BasicCommand {
BSONObjBuilder& result) = 0;
};
-// Struct as closed namespace. Nothing but statics.
-struct CommandHelpers {
- static BSONObj runCommandDirectly(OperationContext* opCtx, const OpMsgRequest& request);
-};
-
// See the 'globalCommandRegistry()' singleton accessor.
class CommandRegistry {
public:
diff --git a/src/mongo/db/commands/apply_ops_cmd.cpp b/src/mongo/db/commands/apply_ops_cmd.cpp
index 5822cf9b61b..344626f11ab 100644
--- a/src/mongo/db/commands/apply_ops_cmd.cpp
+++ b/src/mongo/db/commands/apply_ops_cmd.cpp
@@ -234,7 +234,7 @@ public:
auto status = OplogApplicationChecks::checkOperationArray(cmdObj.firstElement());
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
// TODO (SERVER-30217): When a write concern is provided to the applyOps command, we
@@ -259,7 +259,7 @@ public:
auto modeSW = repl::OplogApplication::parseMode(oplogApplicationModeString);
if (!modeSW.isOK()) {
// Unable to parse the mode argument.
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
modeSW.getStatus().withContext(str::stream() << "Could not parse " +
ApplyOps::kOplogApplicationModeFieldName));
@@ -267,16 +267,16 @@ public:
oplogApplicationMode = modeSW.getValue();
} else if (status != ErrorCodes::NoSuchKey) {
// NoSuchKey means the user did not supply a mode.
- return appendCommandStatus(result,
- Status(status.code(),
- str::stream()
- << "Could not parse out "
- << ApplyOps::kOplogApplicationModeFieldName
- << ": "
- << status.reason()));
+ return CommandHelpers::appendCommandStatus(
+ result,
+ Status(status.code(),
+ str::stream() << "Could not parse out "
+ << ApplyOps::kOplogApplicationModeFieldName
+ << ": "
+ << status.reason()));
}
- auto applyOpsStatus = appendCommandStatus(
+ auto applyOpsStatus = CommandHelpers::appendCommandStatus(
result, applyOps(opCtx, dbname, cmdObj, oplogApplicationMode, &result));
return applyOpsStatus;
diff --git a/src/mongo/db/commands/authentication_commands.cpp b/src/mongo/db/commands/authentication_commands.cpp
index 244244c3a3f..690f75ee89f 100644
--- a/src/mongo/db/commands/authentication_commands.cpp
+++ b/src/mongo/db/commands/authentication_commands.cpp
@@ -147,7 +147,8 @@ bool CmdAuthenticate::run(OperationContext* opCtx,
}
std::string mechanism = cmdObj.getStringField("mechanism");
if (mechanism.empty()) {
- appendCommandStatus(result, {ErrorCodes::BadValue, "Auth mechanism not specified"});
+ CommandHelpers::appendCommandStatus(result,
+ {ErrorCodes::BadValue, "Auth mechanism not specified"});
return false;
}
UserName user;
@@ -178,9 +179,10 @@ bool CmdAuthenticate::run(OperationContext* opCtx,
if (status.code() == ErrorCodes::AuthenticationFailed) {
// Statuses with code AuthenticationFailed may contain messages we do not wish to
// reveal to the user, so we return a status with the message "auth failed".
- appendCommandStatus(result, Status(ErrorCodes::AuthenticationFailed, "auth failed"));
+ CommandHelpers::appendCommandStatus(
+ result, Status(ErrorCodes::AuthenticationFailed, "auth failed"));
} else {
- appendCommandStatus(result, status);
+ CommandHelpers::appendCommandStatus(result, status);
}
sleepmillis(saslGlobalParams.authFailedDelay.load());
return false;
diff --git a/src/mongo/db/commands/clone.cpp b/src/mongo/db/commands/clone.cpp
index f219a374778..8ee47ce26be 100644
--- a/src/mongo/db/commands/clone.cpp
+++ b/src/mongo/db/commands/clone.cpp
@@ -128,7 +128,7 @@ public:
result.append("clonedColls", barr.arr());
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
} cmdClone;
diff --git a/src/mongo/db/commands/clone_collection.cpp b/src/mongo/db/commands/clone_collection.cpp
index 5746348dfc1..f152b97213d 100644
--- a/src/mongo/db/commands/clone_collection.cpp
+++ b/src/mongo/db/commands/clone_collection.cpp
@@ -73,7 +73,7 @@ public:
}
virtual std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const {
- return parseNsFullyQualified(dbname, cmdObj);
+ return CommandHelpers::parseNsFullyQualified(dbname, cmdObj);
}
virtual Status checkAuthForCommand(Client* client,
@@ -129,7 +129,7 @@ public:
string collection = parseNs(dbname, cmdObj);
Status allowedWriteStatus = userAllowedWriteNS(dbname, collection);
if (!allowedWriteStatus.isOK()) {
- return appendCommandStatus(result, allowedWriteStatus);
+ return CommandHelpers::appendCommandStatus(result, allowedWriteStatus);
}
BSONObj query = cmdObj.getObjectField("query");
diff --git a/src/mongo/db/commands/collection_to_capped.cpp b/src/mongo/db/commands/collection_to_capped.cpp
index 55272f941a0..5f283fa2195 100644
--- a/src/mongo/db/commands/collection_to_capped.cpp
+++ b/src/mongo/db/commands/collection_to_capped.cpp
@@ -121,7 +121,7 @@ public:
NamespaceString nss(dbname, to);
if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, nss)) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::NotMaster,
str::stream() << "Not primary while cloning collection " << from << " to "
@@ -131,7 +131,7 @@ public:
Database* const db = autoDb.getDb();
if (!db) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::NamespaceNotFound,
str::stream() << "database " << dbname << " not found"));
@@ -139,7 +139,7 @@ public:
Status status =
cloneCollectionAsCapped(opCtx, db, from.toString(), to.toString(), size, temp);
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
} cmdCloneCollectionAsCapped;
@@ -173,7 +173,7 @@ public:
const BSONObj& jsobj,
string& errmsg,
BSONObjBuilder& result) {
- const NamespaceString nss(parseNsCollectionRequired(dbname, jsobj));
+ const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbname, jsobj));
double size = jsobj.getField("size").number();
if (size == 0) {
@@ -181,7 +181,7 @@ public:
return false;
}
- return appendCommandStatus(result, convertToCapped(opCtx, nss, size));
+ return CommandHelpers::appendCommandStatus(result, convertToCapped(opCtx, nss, size));
}
} cmdConvertToCapped;
diff --git a/src/mongo/db/commands/compact.cpp b/src/mongo/db/commands/compact.cpp
index 780790bb994..a4371eb129d 100644
--- a/src/mongo/db/commands/compact.cpp
+++ b/src/mongo/db/commands/compact.cpp
@@ -91,7 +91,7 @@ public:
const BSONObj& cmdObj,
string& errmsg,
BSONObjBuilder& result) {
- NamespaceString nss = parseNsCollectionRequired(db, cmdObj);
+ NamespaceString nss = CommandHelpers::parseNsCollectionRequired(db, cmdObj);
repl::ReplicationCoordinator* replCoord = repl::getGlobalReplicationCoordinator();
if (replCoord->getMemberState().primary() && !cmdObj["force"].trueValue()) {
@@ -153,10 +153,10 @@ public:
// If db/collection does not exist, short circuit and return.
if (!collDB || !collection) {
if (view)
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result, {ErrorCodes::CommandNotSupportedOnView, "can't compact a view"});
else
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result, {ErrorCodes::NamespaceNotFound, "collection does not exist"});
}
@@ -167,7 +167,7 @@ public:
StatusWith<CompactStats> status = collection->compact(opCtx, &compactOptions);
if (!status.isOK())
- return appendCommandStatus(result, status.getStatus());
+ return CommandHelpers::appendCommandStatus(result, status.getStatus());
if (status.getValue().corruptDocuments > 0)
result.append("invalidObjects", status.getValue().corruptDocuments);
diff --git a/src/mongo/db/commands/connection_status.cpp b/src/mongo/db/commands/connection_status.cpp
index b8ea2e101fd..2a9c97f453f 100644
--- a/src/mongo/db/commands/connection_status.cpp
+++ b/src/mongo/db/commands/connection_status.cpp
@@ -65,7 +65,7 @@ public:
Status status =
bsonExtractBooleanFieldWithDefault(cmdObj, "showPrivileges", false, &showPrivileges);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
BSONObjBuilder authInfo(result.subobjStart("authInfo"));
diff --git a/src/mongo/db/commands/copydb.cpp b/src/mongo/db/commands/copydb.cpp
index ad6faa61308..4f3c00fdab0 100644
--- a/src/mongo/db/commands/copydb.cpp
+++ b/src/mongo/db/commands/copydb.cpp
@@ -183,7 +183,7 @@ public:
}
if (!ret["done"].Bool()) {
- filterCommandReplyForPassthrough(ret, &result);
+ CommandHelpers::filterCommandReplyForPassthrough(ret, &result);
return true;
}
diff --git a/src/mongo/db/commands/copydb_start_commands.cpp b/src/mongo/db/commands/copydb_start_commands.cpp
index 03bd2f3b831..3d1cd9786d2 100644
--- a/src/mongo/db/commands/copydb_start_commands.cpp
+++ b/src/mongo/db/commands/copydb_start_commands.cpp
@@ -127,7 +127,7 @@ public:
BSONElement mechanismElement;
Status status = bsonExtractField(cmdObj, saslCommandMechanismFieldName, &mechanismElement);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
BSONElement payloadElement;
@@ -147,10 +147,10 @@ public:
if (!authConn->runCommand(
fromDb, BSON("saslStart" << 1 << mechanismElement << payloadElement), ret)) {
authConn.reset();
- return appendCommandStatus(result, getStatusFromCommandResult(ret));
+ return CommandHelpers::appendCommandStatus(result, getStatusFromCommandResult(ret));
}
- filterCommandReplyForPassthrough(ret, &result);
+ CommandHelpers::filterCommandReplyForPassthrough(ret, &result);
return true;
}
diff --git a/src/mongo/db/commands/count_cmd.cpp b/src/mongo/db/commands/count_cmd.cpp
index d70c9ce6c80..6cefbbccf9f 100644
--- a/src/mongo/db/commands/count_cmd.cpp
+++ b/src/mongo/db/commands/count_cmd.cpp
@@ -101,7 +101,7 @@ public:
return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
- const NamespaceString nss(parseNsOrUUID(opCtx, dbname, cmdObj));
+ const NamespaceString nss(CommandHelpers::parseNsOrUUID(opCtx, dbname, cmdObj));
if (!authSession->isAuthorizedForActionsOnNamespace(nss, ActionType::find)) {
return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
@@ -116,7 +116,7 @@ public:
BSONObjBuilder* out) const {
const bool isExplain = true;
Lock::DBLock dbLock(opCtx, dbname, MODE_IS);
- auto nss = parseNsOrUUID(opCtx, dbname, cmdObj);
+ auto nss = CommandHelpers::parseNsOrUUID(opCtx, dbname, cmdObj);
auto request = CountRequest::parseFromBSON(nss, cmdObj, isExplain);
if (!request.isOK()) {
return request.getStatus();
@@ -174,10 +174,10 @@ public:
BSONObjBuilder& result) {
const bool isExplain = false;
Lock::DBLock dbLock(opCtx, dbname, MODE_IS);
- auto nss = parseNsOrUUID(opCtx, dbname, cmdObj);
+ auto nss = CommandHelpers::parseNsOrUUID(opCtx, dbname, cmdObj);
auto request = CountRequest::parseFromBSON(nss, cmdObj, isExplain);
if (!request.isOK()) {
- return appendCommandStatus(result, request.getStatus());
+ return CommandHelpers::appendCommandStatus(result, request.getStatus());
}
AutoGetCollectionOrViewForReadCommand ctx(
@@ -189,10 +189,10 @@ public:
auto viewAggregation = request.getValue().asAggregationCommand();
if (!viewAggregation.isOK()) {
- return appendCommandStatus(result, viewAggregation.getStatus());
+ return CommandHelpers::appendCommandStatus(result, viewAggregation.getStatus());
}
- BSONObj aggResult = Command::runCommandDirectly(
+ BSONObj aggResult = CommandHelpers::runCommandDirectly(
opCtx, OpMsgRequest::fromDBAndBody(dbname, std::move(viewAggregation.getValue())));
if (ResolvedView::isResolvedViewErrorResponse(aggResult)) {
@@ -203,7 +203,7 @@ public:
ViewResponseFormatter formatter(aggResult);
Status formatStatus = formatter.appendAsCountResponse(&result);
if (!formatStatus.isOK()) {
- return appendCommandStatus(result, formatStatus);
+ return CommandHelpers::appendCommandStatus(result, formatStatus);
}
return true;
}
@@ -219,7 +219,7 @@ public:
false, // !explain
PlanExecutor::YIELD_AUTO);
if (!statusWithPlanExecutor.isOK()) {
- return appendCommandStatus(result, statusWithPlanExecutor.getStatus());
+ return CommandHelpers::appendCommandStatus(result, statusWithPlanExecutor.getStatus());
}
auto exec = std::move(statusWithPlanExecutor.getValue());
@@ -233,7 +233,7 @@ public:
Status execPlanStatus = exec->executePlan();
if (!execPlanStatus.isOK()) {
- return appendCommandStatus(result, execPlanStatus);
+ return CommandHelpers::appendCommandStatus(result, execPlanStatus);
}
PlanSummaryStats summaryStats;
diff --git a/src/mongo/db/commands/create_indexes.cpp b/src/mongo/db/commands/create_indexes.cpp
index 52c3d08fc1e..79c38e4424b 100644
--- a/src/mongo/db/commands/create_indexes.cpp
+++ b/src/mongo/db/commands/create_indexes.cpp
@@ -129,7 +129,7 @@ StatusWith<std::vector<BSONObj>> parseAndValidateIndexSpecs(
hasIndexesField = true;
} else if (kCommandName == cmdElemFieldName ||
- Command::isGenericArgument(cmdElemFieldName)) {
+ CommandHelpers::isGenericArgument(cmdElemFieldName)) {
continue;
} else {
return {ErrorCodes::BadValue,
@@ -236,11 +236,11 @@ public:
const BSONObj& cmdObj,
string& errmsg,
BSONObjBuilder& result) {
- const NamespaceString ns(parseNsCollectionRequired(dbname, cmdObj));
+ const NamespaceString ns(CommandHelpers::parseNsCollectionRequired(dbname, cmdObj));
Status status = userAllowedWriteNS(ns);
if (!status.isOK())
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
// Disallow users from creating new indexes on config.transactions since the sessions
// code was optimized to not update indexes.
@@ -251,7 +251,7 @@ public:
auto specsWithStatus =
parseAndValidateIndexSpecs(opCtx, ns, cmdObj, serverGlobalParams.featureCompatibility);
if (!specsWithStatus.isOK()) {
- return appendCommandStatus(result, specsWithStatus.getStatus());
+ return CommandHelpers::appendCommandStatus(result, specsWithStatus.getStatus());
}
auto specs = std::move(specsWithStatus.getValue());
@@ -259,7 +259,7 @@ public:
// Note: createIndexes command does not currently respect shard versioning.
Lock::DBLock dbLock(opCtx, ns.db(), MODE_X);
if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, ns)) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::NotMaster,
str::stream() << "Not primary while creating indexes in " << ns.ns()));
@@ -276,7 +276,8 @@ public:
} else {
if (db->getViewCatalog()->lookup(opCtx, ns.ns())) {
errmsg = "Cannot create indexes on a view";
- return appendCommandStatus(result, {ErrorCodes::CommandNotSupportedOnView, errmsg});
+ return CommandHelpers::appendCommandStatus(
+ result, {ErrorCodes::CommandNotSupportedOnView, errmsg});
}
writeConflictRetry(opCtx, kCommandName, ns.ns(), [&] {
@@ -291,7 +292,7 @@ public:
auto indexSpecsWithDefaults =
resolveCollectionDefaultProperties(opCtx, collection, std::move(specs));
if (!indexSpecsWithDefaults.isOK()) {
- return appendCommandStatus(result, indexSpecsWithDefaults.getStatus());
+ return CommandHelpers::appendCommandStatus(result, indexSpecsWithDefaults.getStatus());
}
specs = std::move(indexSpecsWithDefaults.getValue());
@@ -321,7 +322,7 @@ public:
status = checkUniqueIndexConstraints(opCtx, ns.ns(), spec["key"].Obj());
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
}
}
@@ -337,7 +338,7 @@ public:
opCtx->recoveryUnit()->abandonSnapshot();
dbLock.relockWithMode(MODE_IX);
if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, ns)) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::NotMaster,
str::stream() << "Not primary while creating background indexes in "
@@ -359,7 +360,7 @@ public:
opCtx->recoveryUnit()->abandonSnapshot();
dbLock.relockWithMode(MODE_X);
if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, ns)) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::NotMaster,
str::stream()
diff --git a/src/mongo/db/commands/current_op.cpp b/src/mongo/db/commands/current_op.cpp
index 838f01c44f0..c742e9fba31 100644
--- a/src/mongo/db/commands/current_op.cpp
+++ b/src/mongo/db/commands/current_op.cpp
@@ -76,7 +76,7 @@ public:
return status;
}
- appendCommandStatus(responseBuilder, Status::OK());
+ CommandHelpers::appendCommandStatus(responseBuilder, Status::OK());
return CursorResponse::parseFromBSON(responseBuilder.obj());
}
diff --git a/src/mongo/db/commands/current_op_common.cpp b/src/mongo/db/commands/current_op_common.cpp
index 2e2a1ec799d..7f85aa2952d 100644
--- a/src/mongo/db/commands/current_op_common.cpp
+++ b/src/mongo/db/commands/current_op_common.cpp
@@ -66,7 +66,7 @@ bool CurrentOpCommandBase::run(OperationContext* opCtx,
const auto fieldName = elt.fieldNameStringData();
if (0 == idx++ || fieldName == "$all" || fieldName == "$ownOps" ||
- Command::isGenericArgument(fieldName)) {
+ CommandHelpers::isGenericArgument(fieldName)) {
continue;
}
@@ -111,7 +111,7 @@ bool CurrentOpCommandBase::run(OperationContext* opCtx,
// Make any final custom additions to the response object.
appendToResponse(&result);
- return appendCommandStatus(result, Status::OK());
+ return CommandHelpers::appendCommandStatus(result, Status::OK());
}
} // namespace mongo
diff --git a/src/mongo/db/commands/dbcheck.cpp b/src/mongo/db/commands/dbcheck.cpp
index 9adf39f6b5e..8642c381f1e 100644
--- a/src/mongo/db/commands/dbcheck.cpp
+++ b/src/mongo/db/commands/dbcheck.cpp
@@ -155,7 +155,7 @@ std::unique_ptr<DbCheckRun> getRun(OperationContext* opCtx,
// Get rid of generic command fields.
for (const auto& elem : obj) {
- if (!Command::isGenericArgument(elem.fieldNameStringData())) {
+ if (!CommandHelpers::isGenericArgument(elem.fieldNameStringData())) {
builder.append(elem);
}
}
diff --git a/src/mongo/db/commands/dbcommands.cpp b/src/mongo/db/commands/dbcommands.cpp
index 693bb58d5a0..c71161bcb2b 100644
--- a/src/mongo/db/commands/dbcommands.cpp
+++ b/src/mongo/db/commands/dbcommands.cpp
@@ -132,7 +132,7 @@ public:
Status status = repl::getGlobalReplicationCoordinator()->stepDown(
opCtx, force, Seconds(timeoutSecs), Seconds(120));
if (!status.isOK() && status.code() != ErrorCodes::NotMaster) { // ignore not master
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
// Never returns
@@ -173,16 +173,17 @@ public:
// disallow dropping the config database
if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer &&
(dbname == NamespaceString::kConfigDb)) {
- return appendCommandStatus(result,
- Status(ErrorCodes::IllegalOperation,
- "Cannot drop 'config' database if mongod started "
- "with --configsvr"));
+ return CommandHelpers::appendCommandStatus(
+ result,
+ Status(ErrorCodes::IllegalOperation,
+ "Cannot drop 'config' database if mongod started "
+ "with --configsvr"));
}
if ((repl::getGlobalReplicationCoordinator()->getReplicationMode() !=
repl::ReplicationCoordinator::modeNone) &&
((dbname == NamespaceString::kLocalDb) || (dbname == NamespaceString::kAdminDb))) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::IllegalOperation,
str::stream() << "Cannot drop '" << dbname
@@ -191,18 +192,18 @@ public:
BSONElement e = cmdObj.firstElement();
int p = (int)e.number();
if (p != 1) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result, Status(ErrorCodes::IllegalOperation, "have to pass 1 as db parameter"));
}
Status status = dropDatabase(opCtx, dbname);
if (status == ErrorCodes::NamespaceNotFound) {
- return appendCommandStatus(result, Status::OK());
+ return CommandHelpers::appendCommandStatus(result, Status::OK());
}
if (status.isOK()) {
result.append("dropped", dbname);
}
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
} cmdDropDatabase;
@@ -284,7 +285,7 @@ public:
// Open database before returning
dbHolder().openDb(opCtx, dbname);
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
} cmdRepairDatabase;
@@ -422,7 +423,7 @@ public:
const BSONObj& cmdObj,
string& errmsg,
BSONObjBuilder& result) {
- const NamespaceString nsToDrop = parseNsCollectionRequired(dbname, cmdObj);
+ const NamespaceString nsToDrop = CommandHelpers::parseNsCollectionRequired(dbname, cmdObj);
if (NamespaceString::virtualized(nsToDrop.ns())) {
errmsg = "can't drop a virtual collection";
@@ -436,7 +437,7 @@ public:
return false;
}
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
dropCollection(opCtx,
nsToDrop,
@@ -478,7 +479,7 @@ public:
const string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder& result) {
- const NamespaceString ns(parseNsCollectionRequired(dbname, cmdObj));
+ const NamespaceString ns(CommandHelpers::parseNsCollectionRequired(dbname, cmdObj));
if (cmdObj.hasField("autoIndexId")) {
const char* deprecationWarning =
@@ -490,21 +491,21 @@ public:
// Validate _id index spec and fill in missing fields.
if (auto idIndexElem = cmdObj["idIndex"]) {
if (cmdObj["viewOn"]) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
{ErrorCodes::InvalidOptions,
str::stream() << "'idIndex' is not allowed with 'viewOn': " << idIndexElem});
}
if (cmdObj["autoIndexId"]) {
- return appendCommandStatus(result,
- {ErrorCodes::InvalidOptions,
- str::stream()
- << "'idIndex' is not allowed with 'autoIndexId': "
- << idIndexElem});
+ return CommandHelpers::appendCommandStatus(
+ result,
+ {ErrorCodes::InvalidOptions,
+ str::stream() << "'idIndex' is not allowed with 'autoIndexId': "
+ << idIndexElem});
}
if (idIndexElem.type() != BSONType::Object) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
{ErrorCodes::TypeMismatch,
str::stream() << "'idIndex' has to be a document: " << idIndexElem});
@@ -521,7 +522,7 @@ public:
std::unique_ptr<CollatorInterface> defaultCollator;
if (auto collationElem = cmdObj["collation"]) {
if (collationElem.type() != BSONType::Object) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
{ErrorCodes::TypeMismatch,
str::stream() << "'collation' has to be a document: " << collationElem});
@@ -529,7 +530,7 @@ public:
auto collatorStatus = CollatorFactoryInterface::get(opCtx->getServiceContext())
->makeFromBSON(collationElem.Obj());
if (!collatorStatus.isOK()) {
- return appendCommandStatus(result, collatorStatus.getStatus());
+ return CommandHelpers::appendCommandStatus(result, collatorStatus.getStatus());
}
defaultCollator = std::move(collatorStatus.getValue());
}
@@ -545,7 +546,7 @@ public:
idIndexCollator = std::move(collatorStatus.getValue());
}
if (!CollatorInterface::collatorsMatch(defaultCollator.get(), idIndexCollator.get())) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
{ErrorCodes::BadValue,
"'idIndex' must have the same collation as the collection."});
@@ -554,12 +555,13 @@ public:
// Remove "idIndex" field from command.
auto resolvedCmdObj = cmdObj.removeField("idIndex");
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result, createCollection(opCtx, dbname, resolvedCmdObj, idIndexSpec));
}
BSONObj idIndexSpec;
- return appendCommandStatus(result, createCollection(opCtx, dbname, cmdObj, idIndexSpec));
+ return CommandHelpers::appendCommandStatus(
+ result, createCollection(opCtx, dbname, cmdObj, idIndexSpec));
}
} cmdCreate;
@@ -701,11 +703,11 @@ public:
}
if (PlanExecutor::DEAD == state || PlanExecutor::FAILURE == state) {
- return appendCommandStatus(result,
- Status(ErrorCodes::OperationFailed,
- str::stream()
- << "Executor error during filemd5 command: "
- << WorkingSetCommon::toStatusString(obj)));
+ return CommandHelpers::appendCommandStatus(
+ result,
+ Status(ErrorCodes::OperationFailed,
+ str::stream() << "Executor error during filemd5 command: "
+ << WorkingSetCommon::toStatusString(obj)));
}
if (partialOk)
@@ -739,7 +741,7 @@ public:
class CmdDatasize : public ErrmsgCommandDeprecated {
virtual string parseNs(const string& dbname, const BSONObj& cmdObj) const {
- return parseNsFullyQualified(dbname, cmdObj);
+ return CommandHelpers::parseNsFullyQualified(dbname, cmdObj);
}
public:
@@ -870,7 +872,7 @@ public:
if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) {
warning() << "Internal error while reading " << ns;
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::OperationFailed,
str::stream() << "Executor error while reading during dataSize command: "
@@ -920,7 +922,7 @@ public:
const BSONObj& jsobj,
string& errmsg,
BSONObjBuilder& result) {
- const NamespaceString nss(parseNsCollectionRequired(dbname, jsobj));
+ const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbname, jsobj));
if (nss.coll().empty()) {
errmsg = "No collection name specified";
@@ -967,8 +969,8 @@ public:
const string& dbname,
const BSONObj& jsobj,
BSONObjBuilder& result) {
- const NamespaceString nss(parseNsCollectionRequired(dbname, jsobj));
- return appendCommandStatus(result, collMod(opCtx, nss, jsobj, &result));
+ const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbname, jsobj));
+ return CommandHelpers::appendCommandStatus(result, collMod(opCtx, nss, jsobj, &result));
}
} collectionModCommand;
diff --git a/src/mongo/db/commands/distinct.cpp b/src/mongo/db/commands/distinct.cpp
index 96f08dc80ed..a17c3bd3a23 100644
--- a/src/mongo/db/commands/distinct.cpp
+++ b/src/mongo/db/commands/distinct.cpp
@@ -116,7 +116,7 @@ public:
const BSONObj& cmdObj,
ExplainOptions::Verbosity verbosity,
BSONObjBuilder* out) const {
- const NamespaceString nss(parseNsCollectionRequired(dbname, cmdObj));
+ const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbname, cmdObj));
const ExtensionsCallbackReal extensionsCallback(opCtx, &nss);
auto parsedDistinct = ParsedDistinct::parse(opCtx, nss, cmdObj, extensionsCallback, true);
@@ -159,12 +159,12 @@ public:
const string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder& result) {
- const NamespaceString nss(parseNsCollectionRequired(dbname, cmdObj));
+ const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbname, cmdObj));
const ExtensionsCallbackReal extensionsCallback(opCtx, &nss);
auto parsedDistinct = ParsedDistinct::parse(opCtx, nss, cmdObj, extensionsCallback, false);
if (!parsedDistinct.isOK()) {
- return appendCommandStatus(result, parsedDistinct.getStatus());
+ return CommandHelpers::appendCommandStatus(result, parsedDistinct.getStatus());
}
AutoGetCollectionOrViewForReadCommand ctx(opCtx, nss);
@@ -175,10 +175,10 @@ public:
auto viewAggregation = parsedDistinct.getValue().asAggregationCommand();
if (!viewAggregation.isOK()) {
- return appendCommandStatus(result, viewAggregation.getStatus());
+ return CommandHelpers::appendCommandStatus(result, viewAggregation.getStatus());
}
- BSONObj aggResult = Command::runCommandDirectly(
+ BSONObj aggResult = CommandHelpers::runCommandDirectly(
opCtx, OpMsgRequest::fromDBAndBody(dbname, std::move(viewAggregation.getValue())));
if (ResolvedView::isResolvedViewErrorResponse(aggResult)) {
@@ -189,7 +189,7 @@ public:
ViewResponseFormatter formatter(aggResult);
Status formatStatus = formatter.appendAsDistinctResponse(&result);
if (!formatStatus.isOK()) {
- return appendCommandStatus(result, formatStatus);
+ return CommandHelpers::appendCommandStatus(result, formatStatus);
}
return true;
}
@@ -197,7 +197,7 @@ public:
auto executor = getExecutorDistinct(
opCtx, collection, nss.ns(), &parsedDistinct.getValue(), PlanExecutor::YIELD_AUTO);
if (!executor.isOK()) {
- return appendCommandStatus(result, executor.getStatus());
+ return CommandHelpers::appendCommandStatus(result, executor.getStatus());
}
{
@@ -249,11 +249,11 @@ public:
<< redact(PlanExecutor::statestr(state))
<< ", stats: " << redact(Explain::getWinningPlanStats(executor.getValue().get()));
- return appendCommandStatus(result,
- Status(ErrorCodes::OperationFailed,
- str::stream()
- << "Executor error during distinct command: "
- << WorkingSetCommon::toStatusString(obj)));
+ return CommandHelpers::appendCommandStatus(
+ result,
+ Status(ErrorCodes::OperationFailed,
+ str::stream() << "Executor error during distinct command: "
+ << WorkingSetCommon::toStatusString(obj)));
}
diff --git a/src/mongo/db/commands/do_txn_cmd.cpp b/src/mongo/db/commands/do_txn_cmd.cpp
index 58965f0283f..f695470f034 100644
--- a/src/mongo/db/commands/do_txn_cmd.cpp
+++ b/src/mongo/db/commands/do_txn_cmd.cpp
@@ -163,7 +163,7 @@ public:
auto status = OplogApplicationChecks::checkOperationArray(cmdObj.firstElement());
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
// TODO (SERVER-30217): When a write concern is provided to the doTxn command, we
@@ -176,7 +176,8 @@ public:
// was acknowledged. To fix this, we should wait for replication of the node’s last applied
// OpTime if the last write operation was a no-op write.
- auto doTxnStatus = appendCommandStatus(result, doTxn(opCtx, dbname, cmdObj, &result));
+ auto doTxnStatus =
+ CommandHelpers::appendCommandStatus(result, doTxn(opCtx, dbname, cmdObj, &result));
return doTxnStatus;
}
diff --git a/src/mongo/db/commands/drop_indexes.cpp b/src/mongo/db/commands/drop_indexes.cpp
index 7f73d58557a..14a7a5d91a2 100644
--- a/src/mongo/db/commands/drop_indexes.cpp
+++ b/src/mongo/db/commands/drop_indexes.cpp
@@ -88,8 +88,8 @@ public:
const string& dbname,
const BSONObj& jsobj,
BSONObjBuilder& result) {
- const NamespaceString nss = parseNsCollectionRequired(dbname, jsobj);
- return appendCommandStatus(result, dropIndexes(opCtx, nss, jsobj, &result));
+ const NamespaceString nss = CommandHelpers::parseNsCollectionRequired(dbname, jsobj);
+ return CommandHelpers::appendCommandStatus(result, dropIndexes(opCtx, nss, jsobj, &result));
}
} cmdDropIndexes;
@@ -121,7 +121,8 @@ public:
BSONObjBuilder& result) {
DBDirectClient db(opCtx);
- const NamespaceString toReIndexNs = parseNsCollectionRequired(dbname, jsobj);
+ const NamespaceString toReIndexNs =
+ CommandHelpers::parseNsCollectionRequired(dbname, jsobj);
LOG(0) << "CMD: reIndex " << toReIndexNs;
@@ -131,10 +132,10 @@ public:
Collection* collection = ctx.db()->getCollection(opCtx, toReIndexNs);
if (!collection) {
if (ctx.db()->getViewCatalog()->lookup(opCtx, toReIndexNs.ns()))
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result, {ErrorCodes::CommandNotSupportedOnView, "can't re-index a view"});
else
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result, {ErrorCodes::NamespaceNotFound, "collection does not exist"});
}
@@ -198,12 +199,12 @@ public:
auto indexInfoObjs = indexer.init(all);
if (!indexInfoObjs.isOK()) {
- return appendCommandStatus(result, indexInfoObjs.getStatus());
+ return CommandHelpers::appendCommandStatus(result, indexInfoObjs.getStatus());
}
auto status = indexer.insertAllDocumentsInCollection();
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
{
diff --git a/src/mongo/db/commands/end_sessions_command.cpp b/src/mongo/db/commands/end_sessions_command.cpp
index fdaf5f37187..8f6bb1ded29 100644
--- a/src/mongo/db/commands/end_sessions_command.cpp
+++ b/src/mongo/db/commands/end_sessions_command.cpp
@@ -84,7 +84,8 @@ public:
if (serverGlobalParams.featureCompatibility.getVersion() !=
ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo36) {
- return appendCommandStatus(result, SessionsCommandFCV34Status(getName()));
+ return CommandHelpers::appendCommandStatus(result,
+ SessionsCommandFCV34Status(getName()));
}
auto lsCache = LogicalSessionCache::get(opCtx);
diff --git a/src/mongo/db/commands/explain_cmd.cpp b/src/mongo/db/commands/explain_cmd.cpp
index d0a218a2e1a..a2adaef7d38 100644
--- a/src/mongo/db/commands/explain_cmd.cpp
+++ b/src/mongo/db/commands/explain_cmd.cpp
@@ -88,7 +88,7 @@ public:
Object == cmdObj.firstElement().type());
auto explainObj = cmdObj.firstElement().Obj();
- Command* commToExplain = Command::findCommand(explainObj.firstElementFieldName());
+ Command* commToExplain = CommandHelpers::findCommand(explainObj.firstElementFieldName());
uassert(ErrorCodes::CommandNotFound,
str::stream() << "explain failed due to unknown command: "
<< explainObj.firstElementFieldName(),
@@ -110,7 +110,7 @@ public:
BSONObj explainObj = cmdObj.firstElement().Obj();
- Command* commToExplain = Command::findCommand(explainObj.firstElementFieldName());
+ Command* commToExplain = CommandHelpers::findCommand(explainObj.firstElementFieldName());
if (NULL == commToExplain) {
mongoutils::str::stream ss;
ss << "unknown command: " << explainObj.firstElementFieldName();
@@ -127,7 +127,7 @@ public:
BSONObjBuilder& result) {
auto verbosity = ExplainOptions::parseCmdBSON(cmdObj);
if (!verbosity.isOK()) {
- return appendCommandStatus(result, verbosity.getStatus());
+ return CommandHelpers::appendCommandStatus(result, verbosity.getStatus());
}
// This is the nested command which we are explaining.
@@ -141,12 +141,12 @@ public:
innerDb.checkAndGetStringData() == dbname);
}
- Command* commToExplain = Command::findCommand(explainObj.firstElementFieldName());
+ Command* commToExplain = CommandHelpers::findCommand(explainObj.firstElementFieldName());
if (NULL == commToExplain) {
mongoutils::str::stream ss;
ss << "Explain failed due to unknown command: " << explainObj.firstElementFieldName();
Status explainStatus(ErrorCodes::CommandNotFound, ss);
- return appendCommandStatus(result, explainStatus);
+ return CommandHelpers::appendCommandStatus(result, explainStatus);
}
// Check whether the child command is allowed to run here. TODO: this logic is
@@ -167,7 +167,7 @@ public:
mongoutils::str::stream ss;
ss << "Explain's child command cannot run on this node. "
<< "Are you explaining a write command on a secondary?";
- appendCommandStatus(result, false, ss);
+ CommandHelpers::appendCommandStatus(result, false, ss);
return false;
}
@@ -175,7 +175,7 @@ public:
Status explainStatus =
commToExplain->explain(opCtx, dbname, explainObj, verbosity.getValue(), &result);
if (!explainStatus.isOK()) {
- return appendCommandStatus(result, explainStatus);
+ return CommandHelpers::appendCommandStatus(result, explainStatus);
}
return true;
diff --git a/src/mongo/db/commands/feature_compatibility_version_command_parser.cpp b/src/mongo/db/commands/feature_compatibility_version_command_parser.cpp
index e5cbe4bd137..be841f8baf8 100644
--- a/src/mongo/db/commands/feature_compatibility_version_command_parser.cpp
+++ b/src/mongo/db/commands/feature_compatibility_version_command_parser.cpp
@@ -72,7 +72,7 @@ StatusWith<std::string> FeatureCompatibilityVersionCommandParser::extractVersion
// Ensure that the command does not contain any unrecognized parameters
for (const auto& cmdElem : cmdObj) {
const auto fieldName = cmdElem.fieldNameStringData();
- if (fieldName == commandName || Command::isGenericArgument(fieldName)) {
+ if (fieldName == commandName || CommandHelpers::isGenericArgument(fieldName)) {
continue;
}
diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp
index dbbd28447bf..2a9cb589c64 100644
--- a/src/mongo/db/commands/find_and_modify.cpp
+++ b/src/mongo/db/commands/find_and_modify.cpp
@@ -242,7 +242,7 @@ public:
const BSONObj& cmdObj,
ExplainOptions::Verbosity verbosity,
BSONObjBuilder* out) const override {
- const NamespaceString fullNs = parseNsCollectionRequired(dbName, cmdObj);
+ const NamespaceString fullNs = CommandHelpers::parseNsCollectionRequired(dbName, cmdObj);
Status allowedWriteStatus = userAllowedWriteNS(fullNs.ns());
if (!allowedWriteStatus.isOK()) {
return allowedWriteStatus;
@@ -330,16 +330,16 @@ public:
BSONObjBuilder& result) override {
// findAndModify command is not replicated directly.
invariant(opCtx->writesAreReplicated());
- const NamespaceString fullNs = parseNsCollectionRequired(dbName, cmdObj);
+ const NamespaceString fullNs = CommandHelpers::parseNsCollectionRequired(dbName, cmdObj);
Status allowedWriteStatus = userAllowedWriteNS(fullNs.ns());
if (!allowedWriteStatus.isOK()) {
- return appendCommandStatus(result, allowedWriteStatus);
+ return CommandHelpers::appendCommandStatus(result, allowedWriteStatus);
}
StatusWith<FindAndModifyRequest> parseStatus =
FindAndModifyRequest::parseFromBSON(NamespaceString(fullNs.ns()), cmdObj);
if (!parseStatus.isOK()) {
- return appendCommandStatus(result, parseStatus.getStatus());
+ return CommandHelpers::appendCommandStatus(result, parseStatus.getStatus());
}
const FindAndModifyRequest& args = parseStatus.getValue();
@@ -378,7 +378,7 @@ public:
ParsedDelete parsedDelete(opCtx, &request);
Status parsedDeleteStatus = parsedDelete.parseRequest();
if (!parsedDeleteStatus.isOK()) {
- appendCommandStatus(result, parsedDeleteStatus);
+ CommandHelpers::appendCommandStatus(result, parsedDeleteStatus);
return false;
}
@@ -397,21 +397,21 @@ public:
Status isPrimary = checkCanAcceptWritesForDatabase(opCtx, nsString);
if (!isPrimary.isOK()) {
- appendCommandStatus(result, isPrimary);
+ CommandHelpers::appendCommandStatus(result, isPrimary);
return false;
}
Collection* const collection = autoDb.getDb()->getCollection(opCtx, nsString);
if (!collection && autoDb.getDb()->getViewCatalog()->lookup(opCtx, nsString.ns())) {
- appendCommandStatus(result,
- {ErrorCodes::CommandNotSupportedOnView,
- "findAndModify not supported on a view"});
+ CommandHelpers::appendCommandStatus(result,
+ {ErrorCodes::CommandNotSupportedOnView,
+ "findAndModify not supported on a view"});
return false;
}
auto statusWithPlanExecutor =
getExecutorDelete(opCtx, opDebug, collection, &parsedDelete);
if (!statusWithPlanExecutor.isOK()) {
- appendCommandStatus(result, statusWithPlanExecutor.getStatus());
+ CommandHelpers::appendCommandStatus(result, statusWithPlanExecutor.getStatus());
return false;
}
const auto exec = std::move(statusWithPlanExecutor.getValue());
@@ -424,7 +424,7 @@ public:
StatusWith<boost::optional<BSONObj>> advanceStatus =
advanceExecutor(opCtx, exec.get(), args.isRemove());
if (!advanceStatus.isOK()) {
- appendCommandStatus(result, advanceStatus.getStatus());
+ CommandHelpers::appendCommandStatus(result, advanceStatus.getStatus());
return false;
}
// Nothing after advancing the plan executor should throw a WriteConflictException,
@@ -463,7 +463,7 @@ public:
ParsedUpdate parsedUpdate(opCtx, &request);
Status parsedUpdateStatus = parsedUpdate.parseRequest();
if (!parsedUpdateStatus.isOK()) {
- appendCommandStatus(result, parsedUpdateStatus);
+ CommandHelpers::appendCommandStatus(result, parsedUpdateStatus);
return false;
}
@@ -482,15 +482,15 @@ public:
Status isPrimary = checkCanAcceptWritesForDatabase(opCtx, nsString);
if (!isPrimary.isOK()) {
- appendCommandStatus(result, isPrimary);
+ CommandHelpers::appendCommandStatus(result, isPrimary);
return false;
}
Collection* collection = autoDb.getDb()->getCollection(opCtx, nsString.ns());
if (!collection && autoDb.getDb()->getViewCatalog()->lookup(opCtx, nsString.ns())) {
- appendCommandStatus(result,
- {ErrorCodes::CommandNotSupportedOnView,
- "findAndModify not supported on a view"});
+ CommandHelpers::appendCommandStatus(result,
+ {ErrorCodes::CommandNotSupportedOnView,
+ "findAndModify not supported on a view"});
return false;
}
@@ -503,7 +503,7 @@ public:
collection = autoDb.getDb()->getCollection(opCtx, nsString);
Status isPrimaryAfterRelock = checkCanAcceptWritesForDatabase(opCtx, nsString);
if (!isPrimaryAfterRelock.isOK()) {
- appendCommandStatus(result, isPrimaryAfterRelock);
+ CommandHelpers::appendCommandStatus(result, isPrimaryAfterRelock);
return false;
}
@@ -514,7 +514,7 @@ public:
Status createCollStatus =
userCreateNS(opCtx, autoDb.getDb(), nsString.ns(), BSONObj());
if (!createCollStatus.isOK()) {
- appendCommandStatus(result, createCollStatus);
+ CommandHelpers::appendCommandStatus(result, createCollStatus);
return false;
}
wuow.commit();
@@ -527,7 +527,7 @@ public:
auto statusWithPlanExecutor =
getExecutorUpdate(opCtx, opDebug, collection, &parsedUpdate);
if (!statusWithPlanExecutor.isOK()) {
- appendCommandStatus(result, statusWithPlanExecutor.getStatus());
+ CommandHelpers::appendCommandStatus(result, statusWithPlanExecutor.getStatus());
return false;
}
const auto exec = std::move(statusWithPlanExecutor.getValue());
@@ -540,7 +540,7 @@ public:
StatusWith<boost::optional<BSONObj>> advanceStatus =
advanceExecutor(opCtx, exec.get(), args.isRemove());
if (!advanceStatus.isOK()) {
- appendCommandStatus(result, advanceStatus.getStatus());
+ CommandHelpers::appendCommandStatus(result, advanceStatus.getStatus());
return false;
}
// Nothing after advancing the plan executor should throw a WriteConflictException,
diff --git a/src/mongo/db/commands/find_cmd.cpp b/src/mongo/db/commands/find_cmd.cpp
index 9d73ecc39fa..b3294bf8d6c 100644
--- a/src/mongo/db/commands/find_cmd.cpp
+++ b/src/mongo/db/commands/find_cmd.cpp
@@ -121,7 +121,7 @@ public:
if (!authSession->isAuthorizedToParseNamespaceElement(cmdObj.firstElement())) {
return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
- const NamespaceString nss(parseNsOrUUID(opCtx, dbname, cmdObj));
+ const NamespaceString nss(CommandHelpers::parseNsOrUUID(opCtx, dbname, cmdObj));
auto hasTerm = cmdObj.hasField(kTermField);
return authSession->checkAuthForFind(nss, hasTerm);
}
@@ -233,7 +233,7 @@ public:
auto qrStatus = QueryRequest::makeFromFindCommand(
NamespaceString(parseNs(dbname, cmdObj)), cmdObj, isExplain);
if (!qrStatus.isOK()) {
- return appendCommandStatus(result, qrStatus.getStatus());
+ return CommandHelpers::appendCommandStatus(result, qrStatus.getStatus());
}
auto& qr = qrStatus.getValue();
@@ -244,14 +244,14 @@ public:
Status status = replCoord->updateTerm(opCtx, *term);
// Note: updateTerm returns ok if term stayed the same.
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
}
// Acquire locks. If the query is on a view, we release our locks and convert the query
// request into an aggregation command.
Lock::DBLock dbSLock(opCtx, dbname, MODE_IS);
- const NamespaceString nss(parseNsOrUUID(opCtx, dbname, cmdObj));
+ const NamespaceString nss(CommandHelpers::parseNsOrUUID(opCtx, dbname, cmdObj));
qr->refreshNSS(opCtx);
// Fill out curop information.
@@ -274,7 +274,7 @@ public:
MatchExpressionParser::kAllowAllSpecialFeatures &
~MatchExpressionParser::AllowedFeatures::kIsolated);
if (!statusWithCQ.isOK()) {
- return appendCommandStatus(result, statusWithCQ.getStatus());
+ return CommandHelpers::appendCommandStatus(result, statusWithCQ.getStatus());
}
std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
@@ -289,14 +289,15 @@ public:
const auto& qr = cq->getQueryRequest();
auto viewAggregationCommand = qr.asAggregationCommand();
if (!viewAggregationCommand.isOK())
- return appendCommandStatus(result, viewAggregationCommand.getStatus());
+ return CommandHelpers::appendCommandStatus(result,
+ viewAggregationCommand.getStatus());
- BSONObj aggResult = Command::runCommandDirectly(
+ BSONObj aggResult = CommandHelpers::runCommandDirectly(
opCtx,
OpMsgRequest::fromDBAndBody(dbname, std::move(viewAggregationCommand.getValue())));
auto status = getStatusFromCommandResult(aggResult);
if (status.code() == ErrorCodes::InvalidPipelineOperator) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
{ErrorCodes::InvalidPipelineOperator,
str::stream() << "Unsupported in view pipeline: " << status.reason()});
@@ -310,7 +311,7 @@ public:
auto statusWithPlanExecutor =
getExecutorFind(opCtx, collection, nss, std::move(cq), PlanExecutor::YIELD_AUTO);
if (!statusWithPlanExecutor.isOK()) {
- return appendCommandStatus(result, statusWithPlanExecutor.getStatus());
+ return CommandHelpers::appendCommandStatus(result, statusWithPlanExecutor.getStatus());
}
auto exec = std::move(statusWithPlanExecutor.getValue());
@@ -356,11 +357,11 @@ public:
error() << "Plan executor error during find command: " << PlanExecutor::statestr(state)
<< ", stats: " << redact(Explain::getWinningPlanStats(exec.get()));
- return appendCommandStatus(result,
- Status(ErrorCodes::OperationFailed,
- str::stream()
- << "Executor error during find command: "
- << WorkingSetCommon::toStatusString(obj)));
+ return CommandHelpers::appendCommandStatus(
+ result,
+ Status(ErrorCodes::OperationFailed,
+ str::stream() << "Executor error during find command: "
+ << WorkingSetCommon::toStatusString(obj)));
}
// Before saving the cursor, ensure that whatever plan we established happened with the
diff --git a/src/mongo/db/commands/fsync.cpp b/src/mongo/db/commands/fsync.cpp
index e4c3aca53bd..fe5945e4589 100644
--- a/src/mongo/db/commands/fsync.cpp
+++ b/src/mongo/db/commands/fsync.cpp
@@ -184,7 +184,7 @@ public:
if (!status.isOK()) {
releaseLock();
warning() << "fsyncLock failed. Lock count reset to 0. Status: " << status;
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
}
diff --git a/src/mongo/db/commands/generic.cpp b/src/mongo/db/commands/generic.cpp
index f047d9fd70d..5975a311fcc 100644
--- a/src/mongo/db/commands/generic.cpp
+++ b/src/mongo/db/commands/generic.cpp
@@ -358,7 +358,7 @@ public:
BSONObjBuilder& result) {
BSONElement val = cmdObj.firstElement();
if (val.type() != String) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::TypeMismatch,
str::stream() << "Argument to getLog must be of type String; found "
@@ -429,11 +429,11 @@ public:
std::string logName;
Status status = bsonExtractStringField(cmdObj, "clearLog", &logName);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
if (logName != "global") {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result, Status(ErrorCodes::InvalidOptions, "Only the 'global' log can be cleared"));
}
RamLog* ramlog = RamLog::getIfExists(logName);
diff --git a/src/mongo/db/commands/geo_near_cmd.cpp b/src/mongo/db/commands/geo_near_cmd.cpp
index b6c957eb64e..970bdd6871a 100644
--- a/src/mongo/db/commands/geo_near_cmd.cpp
+++ b/src/mongo/db/commands/geo_near_cmd.cpp
@@ -123,7 +123,7 @@ public:
return false;
}
- const NamespaceString nss(parseNsCollectionRequired(dbname, cmdObj));
+ const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbname, cmdObj));
AutoGetCollectionForReadCommand ctx(opCtx, nss);
Collection* collection = ctx.getCollection();
@@ -178,7 +178,7 @@ public:
Status collationEltStatus =
bsonExtractTypedField(cmdObj, "collation", BSONType::Object, &collationElt);
if (!collationEltStatus.isOK() && (collationEltStatus != ErrorCodes::NoSuchKey)) {
- return appendCommandStatus(result, collationEltStatus);
+ return CommandHelpers::appendCommandStatus(result, collationEltStatus);
}
if (collationEltStatus.isOK()) {
collation = collationElt.Obj();
@@ -303,11 +303,11 @@ public:
log() << "Plan executor error during geoNear command: " << PlanExecutor::statestr(state)
<< ", stats: " << redact(Explain::getWinningPlanStats(exec.get()));
- return appendCommandStatus(result,
- Status(ErrorCodes::OperationFailed,
- str::stream()
- << "Executor error during geoNear command: "
- << WorkingSetCommon::toStatusString(currObj)));
+ return CommandHelpers::appendCommandStatus(
+ result,
+ Status(ErrorCodes::OperationFailed,
+ str::stream() << "Executor error during geoNear command: "
+ << WorkingSetCommon::toStatusString(currObj)));
}
PlanSummaryStats summary;
diff --git a/src/mongo/db/commands/get_last_error.cpp b/src/mongo/db/commands/get_last_error.cpp
index bcf4387d7c9..a0b66d375ef 100644
--- a/src/mongo/db/commands/get_last_error.cpp
+++ b/src/mongo/db/commands/get_last_error.cpp
@@ -176,10 +176,10 @@ public:
Status status = bsonExtractOpTimeField(cmdObj, "wOpTime", &lastOpTime);
if (!status.isOK()) {
result.append("badGLE", cmdObj);
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
} else {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::TypeMismatch,
str::stream() << "Expected \"wOpTime\" field in getLastError to "
@@ -194,7 +194,7 @@ public:
FieldParser::extract(cmdObj, wElectionIdField, &electionId, &errmsg);
if (!extracted) {
result.append("badGLE", cmdObj);
- appendCommandStatus(result, false, errmsg);
+ CommandHelpers::appendCommandStatus(result, false, errmsg);
return false;
}
@@ -213,7 +213,7 @@ public:
BSONObj writeConcernDoc = ([&] {
BSONObjBuilder bob;
for (auto&& elem : cmdObj) {
- if (!Command::isGenericArgument(elem.fieldNameStringData()))
+ if (!CommandHelpers::isGenericArgument(elem.fieldNameStringData()))
bob.append(elem);
}
return bob.obj();
@@ -244,7 +244,7 @@ public:
if (!status.isOK()) {
result.append("badGLE", writeConcernDoc);
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
// Don't wait for replication if there was an error reported - this matches 2.4 behavior
@@ -300,7 +300,7 @@ public:
return true;
}
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
} cmdGetLastError;
diff --git a/src/mongo/db/commands/getmore_cmd.cpp b/src/mongo/db/commands/getmore_cmd.cpp
index 4bab002396f..360dc810f5e 100644
--- a/src/mongo/db/commands/getmore_cmd.cpp
+++ b/src/mongo/db/commands/getmore_cmd.cpp
@@ -164,7 +164,7 @@ public:
Status status = replCoord->updateTerm(opCtx, *request.term);
// Note: updateTerm returns ok if term stayed the same.
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
}
@@ -211,16 +211,17 @@ public:
Collection* collection = readLock->getCollection();
if (!collection) {
- return appendCommandStatus(result,
- Status(ErrorCodes::OperationFailed,
- "collection dropped between getMore calls"));
+ return CommandHelpers::appendCommandStatus(
+ result,
+ Status(ErrorCodes::OperationFailed,
+ "collection dropped between getMore calls"));
}
cursorManager = collection->getCursorManager();
}
auto ccPin = cursorManager->pinCursor(opCtx, request.cursorid);
if (!ccPin.isOK()) {
- return appendCommandStatus(result, ccPin.getStatus());
+ return CommandHelpers::appendCommandStatus(result, ccPin.getStatus());
}
ClientCursor* cursor = ccPin.getValue().getCursor();
@@ -241,7 +242,7 @@ public:
// authenticated in order to run getMore on the cursor.
if (!AuthorizationSession::get(opCtx->getClient())
->isCoauthorizedWith(cursor->getAuthenticatedUsers())) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::Unauthorized,
str::stream() << "cursor id " << request.cursorid
@@ -249,7 +250,7 @@ public:
}
if (request.nss != cursor->nss()) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::Unauthorized,
str::stream() << "Requested getMore on namespace '" << request.nss.ns()
@@ -258,7 +259,7 @@ public:
}
if (request.nss.isOplog() && MONGO_FAIL_POINT(rsStopGetMoreCmd)) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::CommandFailed,
str::stream() << "getMore on " << request.nss.ns()
@@ -273,7 +274,7 @@ public:
if (request.awaitDataTimeout && !cursor->isAwaitData()) {
Status status(ErrorCodes::BadValue,
"cannot set maxTimeMS on getMore command for a non-awaitData cursor");
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
// On early return, get rid of the cursor.
@@ -343,7 +344,7 @@ public:
Status batchStatus = generateBatch(opCtx, cursor, request, &nextBatch, &state, &numResults);
if (!batchStatus.isOK()) {
- return appendCommandStatus(result, batchStatus);
+ return CommandHelpers::appendCommandStatus(result, batchStatus);
}
PlanSummaryStats postExecutionStats;
@@ -397,7 +398,7 @@ public:
StatusWith<GetMoreRequest> parsedRequest = GetMoreRequest::parseFromBSON(dbname, cmdObj);
if (!parsedRequest.isOK()) {
- return appendCommandStatus(result, parsedRequest.getStatus());
+ return CommandHelpers::appendCommandStatus(result, parsedRequest.getStatus());
}
auto request = parsedRequest.getValue();
return runParsed(opCtx, request.nss, request, cmdObj, result);
diff --git a/src/mongo/db/commands/group_cmd.cpp b/src/mongo/db/commands/group_cmd.cpp
index 1bdcb3e70a9..5eb751a2991 100644
--- a/src/mongo/db/commands/group_cmd.cpp
+++ b/src/mongo/db/commands/group_cmd.cpp
@@ -159,7 +159,7 @@ private:
GroupRequest groupRequest;
Status parseRequestStatus = _parseRequest(dbname, cmdObj, &groupRequest);
if (!parseRequestStatus.isOK()) {
- return appendCommandStatus(result, parseRequestStatus);
+ return CommandHelpers::appendCommandStatus(result, parseRequestStatus);
}
AutoGetCollectionForReadCommand ctx(opCtx, groupRequest.ns);
@@ -168,7 +168,7 @@ private:
auto statusWithPlanExecutor =
getExecutorGroup(opCtx, coll, groupRequest, PlanExecutor::YIELD_AUTO);
if (!statusWithPlanExecutor.isOK()) {
- return appendCommandStatus(result, statusWithPlanExecutor.getStatus());
+ return CommandHelpers::appendCommandStatus(result, statusWithPlanExecutor.getStatus());
}
auto planExecutor = std::move(statusWithPlanExecutor.getValue());
@@ -186,13 +186,15 @@ private:
invariant(PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state);
if (WorkingSetCommon::isValidStatusMemberObject(retval)) {
- return appendCommandStatus(result, WorkingSetCommon::getMemberObjectStatus(retval));
+ return CommandHelpers::appendCommandStatus(
+ result, WorkingSetCommon::getMemberObjectStatus(retval));
}
- return appendCommandStatus(result,
- Status(ErrorCodes::BadValue,
- str::stream() << "error encountered during group "
- << "operation, executor returned "
- << PlanExecutor::statestr(state)));
+ return CommandHelpers::appendCommandStatus(
+ result,
+ Status(ErrorCodes::BadValue,
+ str::stream() << "error encountered during group "
+ << "operation, executor returned "
+ << PlanExecutor::statestr(state)));
}
invariant(planExecutor->isEOF());
diff --git a/src/mongo/db/commands/haystack.cpp b/src/mongo/db/commands/haystack.cpp
index de048b6f4c6..1f7dd0fcdf6 100644
--- a/src/mongo/db/commands/haystack.cpp
+++ b/src/mongo/db/commands/haystack.cpp
@@ -101,7 +101,7 @@ public:
const BSONObj& cmdObj,
string& errmsg,
BSONObjBuilder& result) {
- const NamespaceString nss = parseNsCollectionRequired(dbname, cmdObj);
+ const NamespaceString nss = CommandHelpers::parseNsCollectionRequired(dbname, cmdObj);
AutoGetCollectionForReadCommand ctx(opCtx, nss);
diff --git a/src/mongo/db/commands/index_filter_commands.cpp b/src/mongo/db/commands/index_filter_commands.cpp
index 9945beb82f5..d8187c00861 100644
--- a/src/mongo/db/commands/index_filter_commands.cpp
+++ b/src/mongo/db/commands/index_filter_commands.cpp
@@ -119,9 +119,9 @@ bool IndexFilterCommand::run(OperationContext* opCtx,
const string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder& result) {
- const NamespaceString nss(parseNsCollectionRequired(dbname, cmdObj));
+ const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbname, cmdObj));
Status status = runIndexFilterCommand(opCtx, nss.ns(), cmdObj, &result);
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
diff --git a/src/mongo/db/commands/kill_all_sessions_by_pattern_command.cpp b/src/mongo/db/commands/kill_all_sessions_by_pattern_command.cpp
index d0f7b0d1368..45b01c712be 100644
--- a/src/mongo/db/commands/kill_all_sessions_by_pattern_command.cpp
+++ b/src/mongo/db/commands/kill_all_sessions_by_pattern_command.cpp
@@ -94,7 +94,8 @@ public:
if (serverGlobalParams.featureCompatibility.getVersion() !=
ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo36) {
- return appendCommandStatus(result, SessionsCommandFCV34Status(getName()));
+ return CommandHelpers::appendCommandStatus(result,
+ SessionsCommandFCV34Status(getName()));
}
IDLParserErrorContext ctx("KillAllSessionsByPatternCmd");
@@ -113,7 +114,7 @@ public:
for (const auto& pattern : ksc.getKillAllSessionsByPattern()) {
if (pattern.getUsers() || pattern.getRoles()) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::Unauthorized,
"Not authorized to impersonate in killAllSessionsByPattern"));
@@ -125,7 +126,8 @@ public:
KillAllSessionsByPatternSet patterns{ksc.getKillAllSessionsByPattern().begin(),
ksc.getKillAllSessionsByPattern().end()};
- return appendCommandStatus(result, killSessionsCmdHelper(opCtx, result, patterns));
+ return CommandHelpers::appendCommandStatus(result,
+ killSessionsCmdHelper(opCtx, result, patterns));
}
} killAllSessionsByPatternCommand;
diff --git a/src/mongo/db/commands/kill_all_sessions_command.cpp b/src/mongo/db/commands/kill_all_sessions_command.cpp
index d0ef93cb466..907ffbe51d1 100644
--- a/src/mongo/db/commands/kill_all_sessions_command.cpp
+++ b/src/mongo/db/commands/kill_all_sessions_command.cpp
@@ -94,7 +94,8 @@ public:
if (serverGlobalParams.featureCompatibility.getVersion() !=
ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo36) {
- return appendCommandStatus(result, SessionsCommandFCV34Status(getName()));
+ return CommandHelpers::appendCommandStatus(result,
+ SessionsCommandFCV34Status(getName()));
}
IDLParserErrorContext ctx("KillAllSessionsCmd");
@@ -113,7 +114,8 @@ public:
}
}
- return appendCommandStatus(result, killSessionsCmdHelper(opCtx, result, patterns));
+ return CommandHelpers::appendCommandStatus(result,
+ killSessionsCmdHelper(opCtx, result, patterns));
}
} killAllSessionsCommand;
diff --git a/src/mongo/db/commands/kill_sessions_command.cpp b/src/mongo/db/commands/kill_sessions_command.cpp
index 9336cf1f588..7b0e5b1375a 100644
--- a/src/mongo/db/commands/kill_sessions_command.cpp
+++ b/src/mongo/db/commands/kill_sessions_command.cpp
@@ -117,7 +117,8 @@ public:
if (serverGlobalParams.featureCompatibility.getVersion() !=
ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo36) {
- return appendCommandStatus(result, SessionsCommandFCV34Status(getName()));
+ return CommandHelpers::appendCommandStatus(result,
+ SessionsCommandFCV34Status(getName()));
}
IDLParserErrorContext ctx("KillSessionsCmd");
@@ -139,7 +140,8 @@ public:
}
}
- return appendCommandStatus(result, killSessionsCmdHelper(opCtx, result, patterns));
+ return CommandHelpers::appendCommandStatus(result,
+ killSessionsCmdHelper(opCtx, result, patterns));
}
} killSessionsCommand;
diff --git a/src/mongo/db/commands/killcursors_common.cpp b/src/mongo/db/commands/killcursors_common.cpp
index 39edb0a7c6f..51c7909f107 100644
--- a/src/mongo/db/commands/killcursors_common.cpp
+++ b/src/mongo/db/commands/killcursors_common.cpp
@@ -71,7 +71,7 @@ bool KillCursorsCmdBase::run(OperationContext* opCtx,
BSONObjBuilder& result) {
auto statusWithRequest = KillCursorsRequest::parseFromBSON(dbname, cmdObj);
if (!statusWithRequest.isOK()) {
- return appendCommandStatus(result, statusWithRequest.getStatus());
+ return CommandHelpers::appendCommandStatus(result, statusWithRequest.getStatus());
}
auto killCursorsRequest = std::move(statusWithRequest.getValue());
diff --git a/src/mongo/db/commands/list_collections.cpp b/src/mongo/db/commands/list_collections.cpp
index 20560d3a4c9..d596a1644fc 100644
--- a/src/mongo/db/commands/list_collections.cpp
+++ b/src/mongo/db/commands/list_collections.cpp
@@ -239,7 +239,7 @@ public:
BSONElement filterElt = jsobj["filter"];
if (!filterElt.eoo()) {
if (filterElt.type() != mongo::Object) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result, Status(ErrorCodes::BadValue, "\"filter\" must be an object"));
}
// The collator is null because collection objects are compared using binary comparison.
@@ -248,7 +248,7 @@ public:
StatusWithMatchExpression statusWithMatcher =
MatchExpressionParser::parse(filterElt.Obj(), std::move(expCtx));
if (!statusWithMatcher.isOK()) {
- return appendCommandStatus(result, statusWithMatcher.getStatus());
+ return CommandHelpers::appendCommandStatus(result, statusWithMatcher.getStatus());
}
matcher = std::move(statusWithMatcher.getValue());
}
@@ -258,7 +258,7 @@ public:
Status parseCursorStatus =
CursorRequest::parseCommandCursorOptions(jsobj, defaultBatchSize, &batchSize);
if (!parseCursorStatus.isOK()) {
- return appendCommandStatus(result, parseCursorStatus);
+ return CommandHelpers::appendCommandStatus(result, parseCursorStatus);
}
// Check for 'includePendingDrops' flag. The default is to not include drop-pending
@@ -268,7 +268,7 @@ public:
jsobj, "includePendingDrops", false, &includePendingDrops);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
AutoGetDb autoDb(opCtx, dbname, MODE_S);
@@ -316,7 +316,7 @@ public:
auto statusWithPlanExecutor = PlanExecutor::make(
opCtx, std::move(ws), std::move(root), cursorNss, PlanExecutor::NO_YIELD);
if (!statusWithPlanExecutor.isOK()) {
- return appendCommandStatus(result, statusWithPlanExecutor.getStatus());
+ return CommandHelpers::appendCommandStatus(result, statusWithPlanExecutor.getStatus());
}
auto exec = std::move(statusWithPlanExecutor.getValue());
diff --git a/src/mongo/db/commands/list_databases.cpp b/src/mongo/db/commands/list_databases.cpp
index aa5130c4410..0012e114c90 100644
--- a/src/mongo/db/commands/list_databases.cpp
+++ b/src/mongo/db/commands/list_databases.cpp
@@ -95,11 +95,11 @@ public:
std::unique_ptr<MatchExpression> filter;
if (auto filterElt = jsobj[kFilterField]) {
if (filterElt.type() != BSONType::Object) {
- return appendCommandStatus(result,
- {ErrorCodes::TypeMismatch,
- str::stream() << "Field '" << kFilterField
- << "' must be of type Object in: "
- << jsobj});
+ return CommandHelpers::appendCommandStatus(
+ result,
+ {ErrorCodes::TypeMismatch,
+ str::stream() << "Field '" << kFilterField << "' must be of type Object in: "
+ << jsobj});
}
// The collator is null because database metadata objects are compared using simple
// binary comparison.
@@ -108,7 +108,7 @@ public:
auto statusWithMatcher =
MatchExpressionParser::parse(filterElt.Obj(), std::move(expCtx));
if (!statusWithMatcher.isOK()) {
- return appendCommandStatus(result, statusWithMatcher.getStatus());
+ return CommandHelpers::appendCommandStatus(result, statusWithMatcher.getStatus());
}
filter = std::move(statusWithMatcher.getValue());
}
diff --git a/src/mongo/db/commands/list_indexes.cpp b/src/mongo/db/commands/list_indexes.cpp
index d94c798db00..10e4be8af33 100644
--- a/src/mongo/db/commands/list_indexes.cpp
+++ b/src/mongo/db/commands/list_indexes.cpp
@@ -104,7 +104,8 @@ public:
// Check for the listIndexes ActionType on the database, or find on system.indexes for pre
// 3.0 systems.
- const NamespaceString ns(parseNsOrUUID(client->getOperationContext(), dbname, cmdObj));
+ const NamespaceString ns(
+ CommandHelpers::parseNsOrUUID(client->getOperationContext(), dbname, cmdObj));
if (authzSession->isAuthorizedForActionsOnResource(ResourcePattern::forExactNamespace(ns),
ActionType::listIndexes) ||
authzSession->isAuthorizedForActionsOnResource(
@@ -125,25 +126,25 @@ public:
const BSONObj& cmdObj,
BSONObjBuilder& result) {
Lock::DBLock dbSLock(opCtx, dbname, MODE_IS);
- const NamespaceString ns(parseNsOrUUID(opCtx, dbname, cmdObj));
+ const NamespaceString ns(CommandHelpers::parseNsOrUUID(opCtx, dbname, cmdObj));
const long long defaultBatchSize = std::numeric_limits<long long>::max();
long long batchSize;
Status parseCursorStatus =
CursorRequest::parseCommandCursorOptions(cmdObj, defaultBatchSize, &batchSize);
if (!parseCursorStatus.isOK()) {
- return appendCommandStatus(result, parseCursorStatus);
+ return CommandHelpers::appendCommandStatus(result, parseCursorStatus);
}
AutoGetCollectionForReadCommand autoColl(opCtx, ns, std::move(dbSLock));
if (!autoColl.getDb()) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::NamespaceNotFound, "Database " + ns.db() + " doesn't exist"));
}
const Collection* collection = autoColl.getCollection();
if (!collection) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::NamespaceNotFound, "Collection " + ns.ns() + " doesn't exist"));
}
@@ -181,7 +182,7 @@ public:
auto statusWithPlanExecutor = PlanExecutor::make(
opCtx, std::move(ws), std::move(root), cursorNss, PlanExecutor::NO_YIELD);
if (!statusWithPlanExecutor.isOK()) {
- return appendCommandStatus(result, statusWithPlanExecutor.getStatus());
+ return CommandHelpers::appendCommandStatus(result, statusWithPlanExecutor.getStatus());
}
auto exec = std::move(statusWithPlanExecutor.getValue());
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index 90ee22d85a0..03196350ca2 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -1395,7 +1395,7 @@ public:
auto client = opCtx->getClient();
if (client->isInDirectClient()) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::IllegalOperation, "Cannot run mapReduce command from eval()"));
}
@@ -1422,7 +1422,7 @@ public:
BSONObjBuilder timingBuilder;
State state(opCtx, config);
if (!state.sourceExists()) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::NamespaceNotFound,
str::stream() << "namespace does not exist: " << config.nss.ns()));
@@ -1565,7 +1565,7 @@ public:
auto restoreStatus = exec->restoreState();
if (!restoreStatus.isOK()) {
- return appendCommandStatus(result, restoreStatus);
+ return CommandHelpers::appendCommandStatus(result, restoreStatus);
}
reduceTime += t.micros();
@@ -1580,7 +1580,7 @@ public:
}
if (PlanExecutor::DEAD == execState || PlanExecutor::FAILURE == execState) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::OperationFailed,
str::stream() << "Executor error during mapReduce command: "
@@ -1712,7 +1712,7 @@ public:
const BSONObj& cmdObj,
BSONObjBuilder& result) {
if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::CommandNotSupported,
str::stream() << "Can not execute mapReduce with output database " << dbname
@@ -1791,7 +1791,8 @@ public:
auto outRoutingInfoStatus = Grid::get(opCtx)->catalogCache()->getCollectionRoutingInfo(
opCtx, config.outputOptions.finalNamespace);
if (!outRoutingInfoStatus.isOK()) {
- return appendCommandStatus(result, outRoutingInfoStatus.getStatus());
+ return CommandHelpers::appendCommandStatus(result,
+ outRoutingInfoStatus.getStatus());
}
if (auto cm = outRoutingInfoStatus.getValue().cm()) {
diff --git a/src/mongo/db/commands/oplog_application_checks.cpp b/src/mongo/db/commands/oplog_application_checks.cpp
index 1fbbe738a33..4ef23c19a2c 100644
--- a/src/mongo/db/commands/oplog_application_checks.cpp
+++ b/src/mongo/db/commands/oplog_application_checks.cpp
@@ -76,7 +76,7 @@ Status OplogApplicationChecks::checkOperationAuthorization(OperationContext* opC
if (opType == "c"_sd) {
StringData commandName = o.firstElement().fieldNameStringData();
- Command* commandInOplogEntry = Command::findCommand(commandName);
+ Command* commandInOplogEntry = CommandHelpers::findCommand(commandName);
if (!commandInOplogEntry) {
return Status(ErrorCodes::FailedToParse, "Unrecognized command in op");
}
diff --git a/src/mongo/db/commands/oplog_note.cpp b/src/mongo/db/commands/oplog_note.cpp
index 914da7721ff..6ebbc1e28bd 100644
--- a/src/mongo/db/commands/oplog_note.cpp
+++ b/src/mongo/db/commands/oplog_note.cpp
@@ -119,15 +119,16 @@ public:
BSONObjBuilder& result) {
auto replCoord = repl::ReplicationCoordinator::get(opCtx);
if (!replCoord->isReplEnabled()) {
- return appendCommandStatus(result,
- {ErrorCodes::NoReplicationEnabled,
- "Must have replication set up to run \"appendOplogNote\""});
+ return CommandHelpers::appendCommandStatus(
+ result,
+ {ErrorCodes::NoReplicationEnabled,
+ "Must have replication set up to run \"appendOplogNote\""});
}
BSONElement dataElement;
auto dataStatus = bsonExtractTypedField(cmdObj, "data", Object, &dataElement);
if (!dataStatus.isOK()) {
- return appendCommandStatus(result, dataStatus);
+ return CommandHelpers::appendCommandStatus(result, dataStatus);
}
Timestamp maxClusterTime;
@@ -136,22 +137,23 @@ public:
if (!maxClusterTimeStatus.isOK()) {
if (maxClusterTimeStatus == ErrorCodes::NoSuchKey) { // no need to use maxClusterTime
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result, _performNoopWrite(opCtx, dataElement.Obj(), "appendOpLogNote"));
}
- return appendCommandStatus(result, maxClusterTimeStatus);
+ return CommandHelpers::appendCommandStatus(result, maxClusterTimeStatus);
}
auto lastAppliedOpTime = replCoord->getMyLastAppliedOpTime().getTimestamp();
if (maxClusterTime > lastAppliedOpTime) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result, _performNoopWrite(opCtx, dataElement.Obj(), "appendOpLogNote"));
} else {
std::stringstream ss;
ss << "Requested maxClusterTime " << LogicalTime(maxClusterTime).toString()
<< " is less or equal to the last primary OpTime: "
<< LogicalTime(lastAppliedOpTime).toString();
- return appendCommandStatus(result, {ErrorCodes::StaleClusterTime, ss.str()});
+ return CommandHelpers::appendCommandStatus(result,
+ {ErrorCodes::StaleClusterTime, ss.str()});
}
}
};
diff --git a/src/mongo/db/commands/parallel_collection_scan.cpp b/src/mongo/db/commands/parallel_collection_scan.cpp
index 533daa6ab94..ec72bc03324 100644
--- a/src/mongo/db/commands/parallel_collection_scan.cpp
+++ b/src/mongo/db/commands/parallel_collection_scan.cpp
@@ -83,7 +83,7 @@ public:
return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
- const NamespaceString ns(parseNsOrUUID(opCtx, dbname, cmdObj));
+ const NamespaceString ns(CommandHelpers::parseNsOrUUID(opCtx, dbname, cmdObj));
if (!authSession->isAuthorizedForActionsOnNamespace(ns, ActionType::find)) {
return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
@@ -96,25 +96,26 @@ public:
const BSONObj& cmdObj,
BSONObjBuilder& result) {
Lock::DBLock dbSLock(opCtx, dbname, MODE_IS);
- const NamespaceString ns(parseNsOrUUID(opCtx, dbname, cmdObj));
+ const NamespaceString ns(CommandHelpers::parseNsOrUUID(opCtx, dbname, cmdObj));
AutoGetCollectionForReadCommand ctx(opCtx, ns, std::move(dbSLock));
Collection* collection = ctx.getCollection();
if (!collection)
- return appendCommandStatus(result,
- Status(ErrorCodes::NamespaceNotFound,
- str::stream() << "ns does not exist: " << ns.ns()));
+ return CommandHelpers::appendCommandStatus(
+ result,
+ Status(ErrorCodes::NamespaceNotFound,
+ str::stream() << "ns does not exist: " << ns.ns()));
size_t numCursors = static_cast<size_t>(cmdObj["numCursors"].numberInt());
if (numCursors == 0 || numCursors > 10000)
- return appendCommandStatus(result,
- Status(ErrorCodes::BadValue,
- str::stream()
- << "numCursors has to be between 1 and 10000"
- << " was: "
- << numCursors));
+ return CommandHelpers::appendCommandStatus(
+ result,
+ Status(ErrorCodes::BadValue,
+ str::stream() << "numCursors has to be between 1 and 10000"
+ << " was: "
+ << numCursors));
std::vector<std::unique_ptr<RecordCursor>> iterators;
// Opening multiple cursors on a capped collection and reading them in parallel can produce
diff --git a/src/mongo/db/commands/parameters.cpp b/src/mongo/db/commands/parameters.cpp
index f75a440bb77..224b0cb0fcc 100644
--- a/src/mongo/db/commands/parameters.cpp
+++ b/src/mongo/db/commands/parameters.cpp
@@ -162,7 +162,7 @@ public:
while (parameterCheckIterator.more()) {
BSONElement parameter = parameterCheckIterator.next();
std::string parameterName = parameter.fieldName();
- if (Command::isGenericArgument(parameterName))
+ if (CommandHelpers::isGenericArgument(parameterName))
continue;
ServerParameter::Map::const_iterator foundParameter = parameterMap.find(parameterName);
diff --git a/src/mongo/db/commands/pipeline_command.cpp b/src/mongo/db/commands/pipeline_command.cpp
index 68c74a0e865..7f2586f5827 100644
--- a/src/mongo/db/commands/pipeline_command.cpp
+++ b/src/mongo/db/commands/pipeline_command.cpp
@@ -88,12 +88,13 @@ public:
const auto aggregationRequest =
uassertStatusOK(AggregationRequest::parseFromBSON(dbname, cmdObj, boost::none));
- return appendCommandStatus(result,
- runAggregate(opCtx,
- aggregationRequest.getNamespaceString(),
- aggregationRequest,
- cmdObj,
- result));
+ return CommandHelpers::appendCommandStatus(
+ result,
+ runAggregate(opCtx,
+ aggregationRequest.getNamespaceString(),
+ aggregationRequest,
+ cmdObj,
+ result));
}
Status explain(OperationContext* opCtx,
diff --git a/src/mongo/db/commands/plan_cache_commands.cpp b/src/mongo/db/commands/plan_cache_commands.cpp
index 002a4e9d94d..fc8ef25f39e 100644
--- a/src/mongo/db/commands/plan_cache_commands.cpp
+++ b/src/mongo/db/commands/plan_cache_commands.cpp
@@ -114,9 +114,9 @@ bool PlanCacheCommand::run(OperationContext* opCtx,
const string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder& result) {
- const NamespaceString nss(parseNsCollectionRequired(dbname, cmdObj));
+ const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbname, cmdObj));
Status status = runPlanCacheCommand(opCtx, nss.ns(), cmdObj, &result);
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
diff --git a/src/mongo/db/commands/reap_logical_session_cache_now.cpp b/src/mongo/db/commands/reap_logical_session_cache_now.cpp
index a1817d4f24e..50abcd781ad 100644
--- a/src/mongo/db/commands/reap_logical_session_cache_now.cpp
+++ b/src/mongo/db/commands/reap_logical_session_cache_now.cpp
@@ -76,7 +76,7 @@ public:
auto res = cache->reapNow(client);
if (!res.isOK()) {
- return appendCommandStatus(result, res);
+ return CommandHelpers::appendCommandStatus(result, res);
}
return true;
diff --git a/src/mongo/db/commands/refresh_logical_session_cache_now.cpp b/src/mongo/db/commands/refresh_logical_session_cache_now.cpp
index 8629d98e287..7f21f14b952 100644
--- a/src/mongo/db/commands/refresh_logical_session_cache_now.cpp
+++ b/src/mongo/db/commands/refresh_logical_session_cache_now.cpp
@@ -81,7 +81,8 @@ public:
if (serverGlobalParams.featureCompatibility.getVersion() !=
ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo36) {
- return appendCommandStatus(result, SessionsCommandFCV34Status(getName()));
+ return CommandHelpers::appendCommandStatus(result,
+ SessionsCommandFCV34Status(getName()));
}
auto cache = LogicalSessionCache::get(opCtx);
@@ -89,7 +90,7 @@ public:
auto res = cache->refreshNow(client);
if (!res.isOK()) {
- return appendCommandStatus(result, res);
+ return CommandHelpers::appendCommandStatus(result, res);
}
return true;
diff --git a/src/mongo/db/commands/refresh_sessions_command.cpp b/src/mongo/db/commands/refresh_sessions_command.cpp
index 8e65c32fa9d..3f1d3d932cd 100644
--- a/src/mongo/db/commands/refresh_sessions_command.cpp
+++ b/src/mongo/db/commands/refresh_sessions_command.cpp
@@ -86,7 +86,8 @@ public:
if (serverGlobalParams.featureCompatibility.getVersion() !=
ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo36) {
- return appendCommandStatus(result, SessionsCommandFCV34Status(getName()));
+ return CommandHelpers::appendCommandStatus(result,
+ SessionsCommandFCV34Status(getName()));
}
IDLParserErrorContext ctx("RefreshSessionsCmdFromClient");
@@ -94,7 +95,7 @@ public:
auto res =
LogicalSessionCache::get(opCtx->getServiceContext())->refreshSessions(opCtx, cmd);
if (!res.isOK()) {
- return appendCommandStatus(result, res);
+ return CommandHelpers::appendCommandStatus(result, res);
}
return true;
diff --git a/src/mongo/db/commands/refresh_sessions_command_internal.cpp b/src/mongo/db/commands/refresh_sessions_command_internal.cpp
index 1ce8702a1db..74e36cf5462 100644
--- a/src/mongo/db/commands/refresh_sessions_command_internal.cpp
+++ b/src/mongo/db/commands/refresh_sessions_command_internal.cpp
@@ -79,7 +79,7 @@ public:
auto res =
LogicalSessionCache::get(opCtx->getServiceContext())->refreshSessions(opCtx, cmd);
if (!res.isOK()) {
- return appendCommandStatus(result, res);
+ return CommandHelpers::appendCommandStatus(result, res);
}
return true;
diff --git a/src/mongo/db/commands/rename_collection_cmd.cpp b/src/mongo/db/commands/rename_collection_cmd.cpp
index 6385dcc5d0f..c4217c02e6e 100644
--- a/src/mongo/db/commands/rename_collection_cmd.cpp
+++ b/src/mongo/db/commands/rename_collection_cmd.cpp
@@ -145,16 +145,18 @@ public:
}
if (source.isAdminDotSystemDotVersion()) {
- appendCommandStatus(result,
- Status(ErrorCodes::IllegalOperation,
- "renaming admin.system.version is not allowed"));
+ CommandHelpers::appendCommandStatus(
+ result,
+ Status(ErrorCodes::IllegalOperation,
+ "renaming admin.system.version is not allowed"));
return false;
}
RenameCollectionOptions options;
options.dropTarget = cmdObj["dropTarget"].trueValue();
options.stayTemp = cmdObj["stayTemp"].trueValue();
- return appendCommandStatus(result, renameCollection(opCtx, source, target, options));
+ return CommandHelpers::appendCommandStatus(
+ result, renameCollection(opCtx, source, target, options));
}
} cmdrenamecollection;
diff --git a/src/mongo/db/commands/repair_cursor.cpp b/src/mongo/db/commands/repair_cursor.cpp
index 5154731e423..51567abbabc 100644
--- a/src/mongo/db/commands/repair_cursor.cpp
+++ b/src/mongo/db/commands/repair_cursor.cpp
@@ -77,13 +77,13 @@ public:
Collection* collection = ctx.getCollection();
if (!collection) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result, Status(ErrorCodes::NamespaceNotFound, "ns does not exist: " + ns.ns()));
}
auto cursor = collection->getRecordStore()->getCursorForRepair(opCtx);
if (!cursor) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result, Status(ErrorCodes::CommandNotSupported, "repair iterator not supported"));
}
diff --git a/src/mongo/db/commands/resize_oplog.cpp b/src/mongo/db/commands/resize_oplog.cpp
index e8d34e251a9..7c85812ba14 100644
--- a/src/mongo/db/commands/resize_oplog.cpp
+++ b/src/mongo/db/commands/resize_oplog.cpp
@@ -89,20 +89,20 @@ public:
Lock::GlobalWrite global(opCtx);
Database* database = dbHolder().get(opCtx, nss.db());
if (!database) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result, Status(ErrorCodes::NamespaceNotFound, "database local does not exist"));
}
Collection* coll = database->getCollection(opCtx, nss);
if (!coll) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result, Status(ErrorCodes::NamespaceNotFound, "oplog does not exist"));
}
if (!coll->isCapped()) {
- return appendCommandStatus(result,
- Status(ErrorCodes::IllegalOperation, "oplog isn't capped"));
+ return CommandHelpers::appendCommandStatus(
+ result, Status(ErrorCodes::IllegalOperation, "oplog isn't capped"));
}
if (!jsobj["size"].isNumber()) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::InvalidOptions, "invalid size field, size should be a number"));
}
@@ -110,19 +110,19 @@ public:
long long sizeMb = jsobj["size"].numberLong();
long long size = sizeMb * 1024 * 1024;
if (sizeMb < 990L) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result, Status(ErrorCodes::InvalidOptions, "oplog size should be 990MB at least"));
}
WriteUnitOfWork wunit(opCtx);
Status status = coll->getRecordStore()->updateCappedSize(opCtx, size);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
CollectionCatalogEntry* entry = coll->getCatalogEntry();
entry->updateCappedSize(opCtx, size);
wunit.commit();
LOG(0) << "replSetResizeOplog success, currentSize:" << size;
- return appendCommandStatus(result, Status::OK());
+ return CommandHelpers::appendCommandStatus(result, Status::OK());
}
} cmdReplSetResizeOplog;
}
diff --git a/src/mongo/db/commands/set_feature_compatibility_version_command.cpp b/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
index edf382aede7..0819e15656f 100644
--- a/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
+++ b/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
@@ -127,7 +127,7 @@ public:
WriteConcernOptions(
WriteConcernOptions::kMajority, WriteConcernOptions::SyncMode::UNSET, timeout),
&res);
- Command::appendCommandWCStatus(result, waitForWCStatus, res);
+ CommandHelpers::appendCommandWCStatus(result, waitForWCStatus, res);
});
// Only allow one instance of setFeatureCompatibilityVersion to run at a time.
@@ -168,9 +168,11 @@ public:
uassertStatusOK(
ShardingCatalogManager::get(opCtx)->setFeatureCompatibilityVersionOnShards(
opCtx,
- Command::appendMajorityWriteConcern(Command::appendPassthroughFields(
- cmdObj,
- BSON(FeatureCompatibilityVersion::kCommandName << requestedVersion)))));
+ CommandHelpers::appendMajorityWriteConcern(
+ CommandHelpers::appendPassthroughFields(
+ cmdObj,
+ BSON(FeatureCompatibilityVersion::kCommandName
+ << requestedVersion)))));
}
if (ShardingState::get(opCtx)->enabled()) {
@@ -216,9 +218,11 @@ public:
uassertStatusOK(
ShardingCatalogManager::get(opCtx)->setFeatureCompatibilityVersionOnShards(
opCtx,
- Command::appendMajorityWriteConcern(Command::appendPassthroughFields(
- cmdObj,
- BSON(FeatureCompatibilityVersion::kCommandName << requestedVersion)))));
+ CommandHelpers::appendMajorityWriteConcern(
+ CommandHelpers::appendPassthroughFields(
+ cmdObj,
+ BSON(FeatureCompatibilityVersion::kCommandName
+ << requestedVersion)))));
// Stop the background key generator thread from running before trying to drop the
// collection so we know the key won't just be recreated.
diff --git a/src/mongo/db/commands/snapshot_management.cpp b/src/mongo/db/commands/snapshot_management.cpp
index 5bad41d15eb..349befd56f1 100644
--- a/src/mongo/db/commands/snapshot_management.cpp
+++ b/src/mongo/db/commands/snapshot_management.cpp
@@ -72,7 +72,8 @@ public:
auto snapshotManager =
getGlobalServiceContext()->getGlobalStorageEngine()->getSnapshotManager();
if (!snapshotManager) {
- return appendCommandStatus(result, {ErrorCodes::CommandNotSupported, ""});
+ return CommandHelpers::appendCommandStatus(result,
+ {ErrorCodes::CommandNotSupported, ""});
}
Lock::GlobalLock lk(opCtx, MODE_IX, UINT_MAX);
@@ -82,7 +83,7 @@ public:
const auto name = repl::ReplicationCoordinator::get(opCtx)->reserveSnapshotName(opCtx);
result.append("name", static_cast<long long>(name.asULL()));
}
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
};
@@ -118,7 +119,8 @@ public:
auto snapshotManager =
getGlobalServiceContext()->getGlobalStorageEngine()->getSnapshotManager();
if (!snapshotManager) {
- return appendCommandStatus(result, {ErrorCodes::CommandNotSupported, ""});
+ return CommandHelpers::appendCommandStatus(result,
+ {ErrorCodes::CommandNotSupported, ""});
}
Lock::GlobalLock lk(opCtx, MODE_IX, UINT_MAX);
diff --git a/src/mongo/db/commands/start_session_command.cpp b/src/mongo/db/commands/start_session_command.cpp
index 70b153b6789..9d03da70fbe 100644
--- a/src/mongo/db/commands/start_session_command.cpp
+++ b/src/mongo/db/commands/start_session_command.cpp
@@ -82,7 +82,8 @@ public:
if (serverGlobalParams.featureCompatibility.getVersion() !=
ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo36) {
- return appendCommandStatus(result, SessionsCommandFCV34Status(getName()));
+ return CommandHelpers::appendCommandStatus(result,
+ SessionsCommandFCV34Status(getName()));
}
auto client = opCtx->getClient();
@@ -96,7 +97,7 @@ public:
} catch (...) {
auto status = exceptionToStatus();
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
lsCache->startSession(opCtx, record.get());
diff --git a/src/mongo/db/commands/test_commands.cpp b/src/mongo/db/commands/test_commands.cpp
index 0b3d1e2091d..2dd96eeee8c 100644
--- a/src/mongo/db/commands/test_commands.cpp
+++ b/src/mongo/db/commands/test_commands.cpp
@@ -80,7 +80,7 @@ public:
const BSONObj& cmdObj,
string& errmsg,
BSONObjBuilder& result) {
- const NamespaceString nss(parseNsCollectionRequired(dbname, cmdObj));
+ const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbname, cmdObj));
log() << "test only command godinsert invoked coll:" << nss.coll();
BSONObj obj = cmdObj["obj"].embeddedObjectUserCheck();
@@ -103,7 +103,7 @@ public:
if (status.isOK()) {
wunit.commit();
}
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
};
@@ -215,9 +215,9 @@ public:
const string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder& result) {
- const NamespaceString fullNs = parseNsCollectionRequired(dbname, cmdObj);
+ const NamespaceString fullNs = CommandHelpers::parseNsCollectionRequired(dbname, cmdObj);
if (!fullNs.isValid()) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
{ErrorCodes::InvalidNamespace,
str::stream() << "collection name " << fullNs.ns() << " is not valid"});
@@ -227,23 +227,23 @@ public:
bool inc = cmdObj.getBoolField("inc"); // inclusive range?
if (n <= 0) {
- return appendCommandStatus(result,
- {ErrorCodes::BadValue, "n must be a positive integer"});
+ return CommandHelpers::appendCommandStatus(
+ result, {ErrorCodes::BadValue, "n must be a positive integer"});
}
// Lock the database in mode IX and lock the collection exclusively.
AutoGetCollection autoColl(opCtx, fullNs, MODE_IX, MODE_X);
Collection* collection = autoColl.getCollection();
if (!collection) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
{ErrorCodes::NamespaceNotFound,
str::stream() << "collection " << fullNs.ns() << " does not exist"});
}
if (!collection->isCapped()) {
- return appendCommandStatus(result,
- {ErrorCodes::IllegalOperation, "collection must be capped"});
+ return CommandHelpers::appendCommandStatus(
+ result, {ErrorCodes::IllegalOperation, "collection must be capped"});
}
RecordId end;
@@ -257,7 +257,7 @@ public:
for (int i = 0; i < n + 1; ++i) {
PlanExecutor::ExecState state = exec->getNext(nullptr, &end);
if (PlanExecutor::ADVANCED != state) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
{ErrorCodes::IllegalOperation,
str::stream() << "invalid n, collection contains fewer than " << n
@@ -291,9 +291,9 @@ public:
const string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder& result) {
- const NamespaceString nss = parseNsCollectionRequired(dbname, cmdObj);
+ const NamespaceString nss = CommandHelpers::parseNsCollectionRequired(dbname, cmdObj);
- return appendCommandStatus(result, emptyCapped(opCtx, nss));
+ return CommandHelpers::appendCommandStatus(result, emptyCapped(opCtx, nss));
}
};
diff --git a/src/mongo/db/commands/touch.cpp b/src/mongo/db/commands/touch.cpp
index fe7dd6b0cf5..2d430378475 100644
--- a/src/mongo/db/commands/touch.cpp
+++ b/src/mongo/db/commands/touch.cpp
@@ -87,7 +87,7 @@ public:
const BSONObj& cmdObj,
string& errmsg,
BSONObjBuilder& result) {
- const NamespaceString nss = parseNsCollectionRequired(dbname, cmdObj);
+ const NamespaceString nss = CommandHelpers::parseNsCollectionRequired(dbname, cmdObj);
if (!nss.isNormal()) {
errmsg = "bad namespace name";
return false;
@@ -109,8 +109,8 @@ public:
return false;
}
- return appendCommandStatus(result,
- collection->touch(opCtx, touch_data, touch_indexes, &result));
+ return CommandHelpers::appendCommandStatus(
+ result, collection->touch(opCtx, touch_data, touch_indexes, &result));
}
};
static TouchCmd touchCmd;
diff --git a/src/mongo/db/commands/user_management_commands.cpp b/src/mongo/db/commands/user_management_commands.cpp
index 5a4419bedff..0ad3dff7fc2 100644
--- a/src/mongo/db/commands/user_management_commands.cpp
+++ b/src/mongo/db/commands/user_management_commands.cpp
@@ -630,16 +630,16 @@ public:
auth::CreateOrUpdateUserArgs args;
Status status = auth::parseCreateOrUpdateUserCommands(cmdObj, "createUser", dbname, &args);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
if (args.userName.getDB() == "local") {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result, Status(ErrorCodes::BadValue, "Cannot create users in the local database"));
}
if (!args.hasHashedPassword && args.userName.getDB() != "$external") {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::BadValue,
"Must provide a 'pwd' field for all user documents, except those"
@@ -647,7 +647,7 @@ public:
}
if ((args.hasHashedPassword) && args.userName.getDB() == "$external") {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::BadValue,
"Cannot set the password for users defined on the '$external' "
@@ -655,7 +655,7 @@ public:
}
if (!args.hasRoles) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::BadValue, "\"createUser\" command requires a \"roles\" array"));
}
@@ -663,11 +663,12 @@ public:
#ifdef MONGO_CONFIG_SSL
if (args.userName.getDB() == "$external" && getSSLManager() &&
getSSLManager()->getSSLConfiguration().isClusterMember(args.userName.getUser())) {
- return appendCommandStatus(result,
- Status(ErrorCodes::BadValue,
- "Cannot create an x.509 user with a subjectname "
- "that would be recognized as an internal "
- "cluster member."));
+ return CommandHelpers::appendCommandStatus(
+ result,
+ Status(ErrorCodes::BadValue,
+ "Cannot create an x.509 user with a subjectname "
+ "that would be recognized as an internal "
+ "cluster member."));
}
#endif
@@ -682,7 +683,7 @@ public:
int authzVersion;
status = authzManager->getAuthorizationVersion(opCtx, &authzVersion);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
BSONObjBuilder credentialsBuilder(userObjBuilder.subobjStart("credentials"));
@@ -712,14 +713,14 @@ public:
V2UserDocumentParser parser;
status = parser.checkValidUserDocument(userObj);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
status = requireWritableAuthSchema28SCRAM(opCtx, authzManager);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
// Role existence has to be checked after acquiring the update lock
@@ -727,7 +728,7 @@ public:
BSONObj ignored;
status = authzManager->getRoleDescription(opCtx, args.roles[i], &ignored);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
}
@@ -738,7 +739,7 @@ public:
args.roles,
args.authenticationRestrictions);
status = insertPrivilegeDocument(opCtx, userObj);
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
virtual void redactForLogging(mutablebson::Document* cmdObj) {
@@ -776,19 +777,19 @@ public:
auth::CreateOrUpdateUserArgs args;
Status status = auth::parseCreateOrUpdateUserCommands(cmdObj, "updateUser", dbname, &args);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
if (!args.hasHashedPassword && !args.hasCustomData && !args.hasRoles &&
!args.authenticationRestrictions) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::BadValue,
"Must specify at least one field to update in updateUser"));
}
if (args.hasHashedPassword && args.userName.getDB() == "$external") {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::BadValue,
"Cannot set the password for users defined on the '$external' "
@@ -819,7 +820,8 @@ public:
auto swParsedRestrictions =
parseAuthenticationRestriction(*args.authenticationRestrictions);
if (!swParsedRestrictions.isOK()) {
- return appendCommandStatus(result, swParsedRestrictions.getStatus());
+ return CommandHelpers::appendCommandStatus(result,
+ swParsedRestrictions.getStatus());
}
updateSetBuilder.append("authenticationRestrictions",
@@ -847,7 +849,7 @@ public:
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
status = requireWritableAuthSchema28SCRAM(opCtx, authzManager);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
// Role existence has to be checked after acquiring the update lock
@@ -856,7 +858,7 @@ public:
BSONObj ignored;
status = authzManager->getRoleDescription(opCtx, args.roles[i], &ignored);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
}
}
@@ -871,7 +873,7 @@ public:
status = updatePrivilegeDocument(opCtx, args.userName, updateDocumentBuilder.done());
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserByName(args.userName);
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
virtual void redactForLogging(mutablebson::Document* cmdObj) {
@@ -909,7 +911,7 @@ public:
UserName userName;
Status status = auth::parseAndValidateDropUserCommand(cmdObj, dbname, &userName);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
@@ -917,7 +919,7 @@ public:
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
status = requireWritableAuthSchema28SCRAM(opCtx, authzManager);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
audit::logDropUser(Client::getCurrent(), userName);
@@ -932,11 +934,11 @@ public:
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserByName(userName);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
if (nMatched == 0) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::UserNotFound,
str::stream() << "User '" << userName.getFullName() << "' not found"));
@@ -975,7 +977,7 @@ public:
BSONObjBuilder& result) {
Status status = auth::parseAndValidateDropAllUsersFromDatabaseCommand(cmdObj, dbname);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
@@ -983,7 +985,7 @@ public:
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
status = requireWritableAuthSchema28SCRAM(opCtx, authzManager);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
audit::logDropAllUsersFromDatabase(Client::getCurrent(), dbname);
@@ -994,7 +996,7 @@ public:
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUsersFromDB(dbname);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
result.append("n", numRemoved);
@@ -1034,7 +1036,7 @@ public:
Status status = auth::parseRolePossessionManipulationCommands(
cmdObj, "grantRolesToUser", dbname, &userNameString, &roles);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
@@ -1043,14 +1045,14 @@ public:
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
status = requireWritableAuthSchema28SCRAM(opCtx, authzManager);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
UserName userName(userNameString, dbname);
unordered_set<RoleName> userRoles;
status = getCurrentUserRoles(opCtx, authzManager, userName, &userRoles);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
for (vector<RoleName>::iterator it = roles.begin(); it != roles.end(); ++it) {
@@ -1058,7 +1060,7 @@ public:
BSONObj roleDoc;
status = authzManager->getRoleDescription(opCtx, roleName, &roleDoc);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
userRoles.insert(roleName);
@@ -1070,7 +1072,7 @@ public:
opCtx, userName, BSON("$set" << BSON("roles" << newRolesBSONArray)));
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserByName(userName);
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
} cmdGrantRolesToUser;
@@ -1106,7 +1108,7 @@ public:
Status status = auth::parseRolePossessionManipulationCommands(
cmdObj, "revokeRolesFromUser", dbname, &userNameString, &roles);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
@@ -1115,14 +1117,14 @@ public:
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
status = requireWritableAuthSchema28SCRAM(opCtx, authzManager);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
UserName userName(userNameString, dbname);
unordered_set<RoleName> userRoles;
status = getCurrentUserRoles(opCtx, authzManager, userName, &userRoles);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
for (vector<RoleName>::iterator it = roles.begin(); it != roles.end(); ++it) {
@@ -1130,7 +1132,7 @@ public:
BSONObj roleDoc;
status = authzManager->getRoleDescription(opCtx, roleName, &roleDoc);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
userRoles.erase(roleName);
@@ -1142,7 +1144,7 @@ public:
opCtx, userName, BSON("$set" << BSON("roles" << newRolesBSONArray)));
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserByName(userName);
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
} cmdRevokeRolesFromUser;
@@ -1180,18 +1182,18 @@ public:
auth::UsersInfoArgs args;
Status status = auth::parseUsersInfoCommand(cmdObj, dbname, &args);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
status = requireReadableAuthSchema26Upgrade(opCtx, getGlobalAuthorizationManager());
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
if (args.allForDB &&
(args.showPrivileges ||
args.authenticationRestrictionsFormat == AuthenticationRestrictionsFormat::kShow)) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::IllegalOperation,
"Can only get privilege or restriction details on exact-match usersInfo "
@@ -1211,7 +1213,7 @@ public:
continue;
}
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
// getUserDescription always includes credentials and restrictions, which may need
@@ -1264,7 +1266,7 @@ public:
projection.done(),
[&](const BSONObj& obj) { usersArrayBuilder.append(obj); });
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
}
result.append("users", usersArrayBuilder.arr());
@@ -1302,40 +1304,40 @@ public:
auth::CreateOrUpdateRoleArgs args;
Status status = auth::parseCreateOrUpdateRoleCommands(cmdObj, "createRole", dbname, &args);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
if (args.roleName.getRole().empty()) {
- return appendCommandStatus(result,
- Status(ErrorCodes::BadValue, "Role name must be non-empty"));
+ return CommandHelpers::appendCommandStatus(
+ result, Status(ErrorCodes::BadValue, "Role name must be non-empty"));
}
if (args.roleName.getDB() == "local") {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result, Status(ErrorCodes::BadValue, "Cannot create roles in the local database"));
}
if (args.roleName.getDB() == "$external") {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::BadValue, "Cannot create roles in the $external database"));
}
if (RoleGraph::isBuiltinRole(args.roleName)) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::BadValue,
"Cannot create roles with the same name as a built-in role"));
}
if (!args.hasRoles) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::BadValue, "\"createRole\" command requires a \"roles\" array"));
}
if (!args.hasPrivileges) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::BadValue,
"\"createRole\" command requires a \"privileges\" array"));
@@ -1351,7 +1353,7 @@ public:
BSONArray privileges;
status = privilegeVectorToBSONArray(args.privileges, &privileges);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
roleObjBuilder.append("privileges", privileges);
@@ -1368,18 +1370,18 @@ public:
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
status = requireWritableAuthSchema28SCRAM(opCtx, authzManager);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
// Role existence has to be checked after acquiring the update lock
status = checkOkayToGrantRolesToRole(opCtx, args.roleName, args.roles, authzManager);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
status = checkOkayToGrantPrivilegesToRole(args.roleName, args.privileges);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
audit::logCreateRole(Client::getCurrent(),
@@ -1389,7 +1391,7 @@ public:
args.authenticationRestrictions);
status = insertRoleDocument(opCtx, roleObjBuilder.done());
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
} cmdCreateRole;
@@ -1423,11 +1425,11 @@ public:
auth::CreateOrUpdateRoleArgs args;
Status status = auth::parseCreateOrUpdateRoleCommands(cmdObj, "updateRole", dbname, &args);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
if (!args.hasPrivileges && !args.hasRoles && !args.authenticationRestrictions) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::BadValue,
"Must specify at least one field to update in updateRole"));
@@ -1440,7 +1442,7 @@ public:
BSONArray privileges;
status = privilegeVectorToBSONArray(args.privileges, &privileges);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
updateSetBuilder.append("privileges", privileges);
}
@@ -1464,27 +1466,27 @@ public:
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
status = requireWritableAuthSchema28SCRAM(opCtx, authzManager);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
// Role existence has to be checked after acquiring the update lock
BSONObj ignored;
status = authzManager->getRoleDescription(opCtx, args.roleName, &ignored);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
if (args.hasRoles) {
status = checkOkayToGrantRolesToRole(opCtx, args.roleName, args.roles, authzManager);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
}
if (args.hasPrivileges) {
status = checkOkayToGrantPrivilegesToRole(args.roleName, args.privileges);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
}
@@ -1507,7 +1509,7 @@ public:
status = updateRoleDocument(opCtx, args.roleName, updateDocumentBuilder.obj());
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserCache();
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
} cmdUpdateRole;
@@ -1543,7 +1545,7 @@ public:
Status status = auth::parseAndValidateRolePrivilegeManipulationCommands(
cmdObj, "grantPrivilegesToRole", dbname, &roleName, &privilegesToAdd);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
@@ -1552,11 +1554,11 @@ public:
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
status = requireWritableAuthSchema28SCRAM(opCtx, authzManager);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
if (RoleGraph::isBuiltinRole(roleName)) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::InvalidRoleModification,
str::stream() << roleName.getFullName()
@@ -1565,7 +1567,7 @@ public:
status = checkOkayToGrantPrivilegesToRole(roleName, privilegesToAdd);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
BSONObj roleDoc;
@@ -1575,7 +1577,7 @@ public:
AuthenticationRestrictionsFormat::kOmit,
&roleDoc);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
PrivilegeVector privileges;
@@ -1583,7 +1585,7 @@ public:
&privileges);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
for (PrivilegeVector::iterator it = privilegesToAdd.begin(); it != privilegesToAdd.end();
@@ -1596,16 +1598,16 @@ public:
mutablebson::Element setElement = updateObj.makeElementObject("$set");
status = updateObj.root().pushBack(setElement);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
mutablebson::Element privilegesElement = updateObj.makeElementArray("privileges");
status = setElement.pushBack(privilegesElement);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
status = authzManager->getBSONForPrivileges(privileges, privilegesElement);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
BSONObjBuilder updateBSONBuilder;
@@ -1616,7 +1618,7 @@ public:
status = updateRoleDocument(opCtx, roleName, updateBSONBuilder.done());
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserCache();
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
} cmdGrantPrivilegesToRole;
@@ -1652,7 +1654,7 @@ public:
Status status = auth::parseAndValidateRolePrivilegeManipulationCommands(
cmdObj, "revokePrivilegesFromRole", dbname, &roleName, &privilegesToRemove);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
@@ -1661,11 +1663,11 @@ public:
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
status = requireWritableAuthSchema28SCRAM(opCtx, authzManager);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
if (RoleGraph::isBuiltinRole(roleName)) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::InvalidRoleModification,
str::stream() << roleName.getFullName()
@@ -1679,14 +1681,14 @@ public:
AuthenticationRestrictionsFormat::kOmit,
&roleDoc);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
PrivilegeVector privileges;
status = auth::parseAndValidatePrivilegeArray(BSONArray(roleDoc["privileges"].Obj()),
&privileges);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
for (PrivilegeVector::iterator itToRm = privilegesToRemove.begin();
@@ -1709,16 +1711,16 @@ public:
mutablebson::Element setElement = updateObj.makeElementObject("$set");
status = updateObj.root().pushBack(setElement);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
mutablebson::Element privilegesElement = updateObj.makeElementArray("privileges");
status = setElement.pushBack(privilegesElement);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
status = authzManager->getBSONForPrivileges(privileges, privilegesElement);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
audit::logRevokePrivilegesFromRole(Client::getCurrent(), roleName, privilegesToRemove);
@@ -1728,7 +1730,7 @@ public:
status = updateRoleDocument(opCtx, roleName, updateBSONBuilder.done());
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserCache();
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
} cmdRevokePrivilegesFromRole;
@@ -1764,12 +1766,12 @@ public:
Status status = auth::parseRolePossessionManipulationCommands(
cmdObj, "grantRolesToRole", dbname, &roleNameString, &rolesToAdd);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
RoleName roleName(roleNameString, dbname);
if (RoleGraph::isBuiltinRole(roleName)) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::InvalidRoleModification,
str::stream() << roleName.getFullName()
@@ -1782,20 +1784,20 @@ public:
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
status = requireWritableAuthSchema28SCRAM(opCtx, authzManager);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
// Role existence has to be checked after acquiring the update lock
BSONObj roleDoc;
status = authzManager->getRoleDescription(opCtx, roleName, &roleDoc);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
// Check for cycles
status = checkOkayToGrantRolesToRole(opCtx, roleName, rolesToAdd, authzManager);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
// Add new roles to existing roles
@@ -1803,7 +1805,7 @@ public:
status = auth::parseRoleNamesFromBSONArray(
BSONArray(roleDoc["roles"].Obj()), roleName.getDB(), &directRoles);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
for (vector<RoleName>::iterator it = rolesToAdd.begin(); it != rolesToAdd.end(); ++it) {
const RoleName& roleToAdd = *it;
@@ -1817,7 +1819,7 @@ public:
opCtx, roleName, BSON("$set" << BSON("roles" << rolesVectorToBSONArray(directRoles))));
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserCache();
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
} cmdGrantRolesToRole;
@@ -1853,7 +1855,7 @@ public:
Status status = auth::parseRolePossessionManipulationCommands(
cmdObj, "revokeRolesFromRole", dbname, &roleNameString, &rolesToRemove);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
@@ -1862,12 +1864,12 @@ public:
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
status = requireWritableAuthSchema28SCRAM(opCtx, authzManager);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
RoleName roleName(roleNameString, dbname);
if (RoleGraph::isBuiltinRole(roleName)) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::InvalidRoleModification,
str::stream() << roleName.getFullName()
@@ -1877,14 +1879,14 @@ public:
BSONObj roleDoc;
status = authzManager->getRoleDescription(opCtx, roleName, &roleDoc);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
std::vector<RoleName> roles;
status = auth::parseRoleNamesFromBSONArray(
BSONArray(roleDoc["roles"].Obj()), roleName.getDB(), &roles);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
for (vector<RoleName>::const_iterator it = rolesToRemove.begin(); it != rolesToRemove.end();
@@ -1901,7 +1903,7 @@ public:
opCtx, roleName, BSON("$set" << BSON("roles" << rolesVectorToBSONArray(roles))));
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserCache();
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
} cmdRevokeRolesFromRole;
@@ -1939,7 +1941,7 @@ public:
RoleName roleName;
Status status = auth::parseDropRoleCommand(cmdObj, dbname, &roleName);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
@@ -1948,11 +1950,11 @@ public:
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
status = requireWritableAuthSchema28SCRAM(opCtx, authzManager);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
if (RoleGraph::isBuiltinRole(roleName)) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::InvalidRoleModification,
str::stream() << roleName.getFullName()
@@ -1962,7 +1964,7 @@ public:
BSONObj roleDoc;
status = authzManager->getRoleDescription(opCtx, roleName, &roleDoc);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
// Remove this role from all users
@@ -1987,12 +1989,12 @@ public:
ErrorCodes::Error code = status.code() == ErrorCodes::UnknownError
? ErrorCodes::UserModificationFailed
: status.code();
- return appendCommandStatus(result,
- Status(code,
- str::stream() << "Failed to remove role "
- << roleName.getFullName()
- << " from all users: "
- << status.reason()));
+ return CommandHelpers::appendCommandStatus(
+ result,
+ Status(code,
+ str::stream() << "Failed to remove role " << roleName.getFullName()
+ << " from all users: "
+ << status.reason()));
}
// Remove this role from all other roles
@@ -2016,7 +2018,7 @@ public:
ErrorCodes::Error code = status.code() == ErrorCodes::UnknownError
? ErrorCodes::RoleModificationFailed
: status.code();
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(code,
str::stream() << "Removed role " << roleName.getFullName()
@@ -2035,7 +2037,7 @@ public:
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserCache();
if (!status.isOK()) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(status.code(),
str::stream() << "Removed role " << roleName.getFullName()
@@ -2046,7 +2048,7 @@ public:
dassert(nMatched == 0 || nMatched == 1);
if (nMatched == 0) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::RoleNotFound,
str::stream() << "Role '" << roleName.getFullName() << "' not found"));
@@ -2090,7 +2092,7 @@ public:
BSONObjBuilder& result) {
Status status = auth::parseDropAllRolesFromDatabaseCommand(cmdObj, dbname);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
@@ -2099,7 +2101,7 @@ public:
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
status = requireWritableAuthSchema28SCRAM(opCtx, authzManager);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
// Remove these roles from all users
@@ -2119,12 +2121,12 @@ public:
ErrorCodes::Error code = status.code() == ErrorCodes::UnknownError
? ErrorCodes::UserModificationFailed
: status.code();
- return appendCommandStatus(result,
- Status(code,
- str::stream() << "Failed to remove roles from \""
- << dbname
- << "\" db from all users: "
- << status.reason()));
+ return CommandHelpers::appendCommandStatus(
+ result,
+ Status(code,
+ str::stream() << "Failed to remove roles from \"" << dbname
+ << "\" db from all users: "
+ << status.reason()));
}
// Remove these roles from all other roles
@@ -2145,12 +2147,12 @@ public:
ErrorCodes::Error code = status.code() == ErrorCodes::UnknownError
? ErrorCodes::RoleModificationFailed
: status.code();
- return appendCommandStatus(result,
- Status(code,
- str::stream() << "Failed to remove roles from \""
- << dbname
- << "\" db from all roles: "
- << status.reason()));
+ return CommandHelpers::appendCommandStatus(
+ result,
+ Status(code,
+ str::stream() << "Failed to remove roles from \"" << dbname
+ << "\" db from all roles: "
+ << status.reason()));
}
audit::logDropAllRolesFromDatabase(Client::getCurrent(), dbname);
@@ -2160,7 +2162,7 @@ public:
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserCache();
if (!status.isOK()) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(status.code(),
str::stream() << "Removed roles from \"" << dbname
@@ -2233,12 +2235,12 @@ public:
auth::RolesInfoArgs args;
Status status = auth::parseRolesInfoCommand(cmdObj, dbname, &args);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
status = requireReadableAuthSchema26Upgrade(opCtx, getGlobalAuthorizationManager());
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
if (args.allForDB) {
@@ -2251,11 +2253,11 @@ public:
args.showBuiltinRoles,
&rolesDocs);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
if (args.privilegeFormat == PrivilegeFormat::kShowAsUserFragment) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::IllegalOperation,
"Cannot get user fragment for all roles in a database"));
@@ -2274,7 +2276,7 @@ public:
args.authenticationRestrictionsFormat,
&roleDetails);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
if (args.privilegeFormat == PrivilegeFormat::kShowAsUserFragment) {
@@ -2721,11 +2723,11 @@ public:
auth::MergeAuthzCollectionsArgs args;
Status status = auth::parseMergeAuthzCollectionsCommand(cmdObj, &args);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
if (args.usersCollName.empty() && args.rolesCollName.empty()) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::BadValue,
"Must provide at least one of \"tempUsersCollection\" and "
@@ -2738,14 +2740,14 @@ public:
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
status = requireWritableAuthSchema28SCRAM(opCtx, authzManager);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
if (!args.usersCollName.empty()) {
Status status =
processUsers(opCtx, authzManager, args.usersCollName, args.db, args.drop);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
}
@@ -2753,7 +2755,7 @@ public:
Status status =
processRoles(opCtx, authzManager, args.rolesCollName, args.db, args.drop);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
}
diff --git a/src/mongo/db/commands/validate.cpp b/src/mongo/db/commands/validate.cpp
index d181021fc84..29472a20c47 100644
--- a/src/mongo/db/commands/validate.cpp
+++ b/src/mongo/db/commands/validate.cpp
@@ -100,7 +100,7 @@ public:
return true;
}
- const NamespaceString nss(parseNsCollectionRequired(dbname, cmdObj));
+ const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbname, cmdObj));
const bool full = cmdObj["full"].trueValue();
const bool scanData = cmdObj["scandata"].trueValue();
@@ -114,7 +114,7 @@ public:
}
if (!nss.isNormal() && full) {
- appendCommandStatus(
+ CommandHelpers::appendCommandStatus(
result,
{ErrorCodes::CommandFailed, "Can only run full validate on a regular collection"});
return false;
@@ -129,11 +129,12 @@ public:
Collection* collection = ctx.getDb() ? ctx.getDb()->getCollection(opCtx, nss) : NULL;
if (!collection) {
if (ctx.getDb() && ctx.getDb()->getViewCatalog()->lookup(opCtx, nss.ns())) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result, {ErrorCodes::CommandNotSupportedOnView, "Cannot validate a view"});
}
- appendCommandStatus(result, {ErrorCodes::NamespaceNotFound, "ns not found"});
+ CommandHelpers::appendCommandStatus(result,
+ {ErrorCodes::NamespaceNotFound, "ns not found"});
return false;
}
@@ -175,7 +176,7 @@ public:
opCtx->waitForConditionOrInterrupt(_validationNotifier, lock);
}
} catch (AssertionException& e) {
- appendCommandStatus(
+ CommandHelpers::appendCommandStatus(
result,
{ErrorCodes::CommandFailed,
str::stream() << "Exception during validation: " << e.toString()});
@@ -195,7 +196,7 @@ public:
Status status =
collection->validate(opCtx, level, background, std::move(collLk), &results, &result);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
CollectionCatalogEntry* catalogEntry = collection->getCatalogEntry();
diff --git a/src/mongo/db/commands_test.cpp b/src/mongo/db/commands_test.cpp
index 85fd263d4c2..b4ebf32a464 100644
--- a/src/mongo/db/commands_test.cpp
+++ b/src/mongo/db/commands_test.cpp
@@ -38,7 +38,7 @@ namespace mongo {
TEST(Commands, appendCommandStatusOK) {
BSONObjBuilder actualResult;
- Command::appendCommandStatus(actualResult, Status::OK());
+ CommandHelpers::appendCommandStatus(actualResult, Status::OK());
BSONObjBuilder expectedResult;
expectedResult.append("ok", 1.0);
@@ -49,7 +49,7 @@ TEST(Commands, appendCommandStatusOK) {
TEST(Commands, appendCommandStatusError) {
BSONObjBuilder actualResult;
const Status status(ErrorCodes::InvalidLength, "Response payload too long");
- Command::appendCommandStatus(actualResult, status);
+ CommandHelpers::appendCommandStatus(actualResult, status);
BSONObjBuilder expectedResult;
expectedResult.append("ok", 0.0);
@@ -66,7 +66,7 @@ TEST(Commands, appendCommandStatusNoOverwrite) {
actualResult.append("c", "d");
actualResult.append("ok", "not ok");
const Status status(ErrorCodes::InvalidLength, "Response payload too long");
- Command::appendCommandStatus(actualResult, status);
+ CommandHelpers::appendCommandStatus(actualResult, status);
BSONObjBuilder expectedResult;
expectedResult.append("a", "b");
@@ -82,7 +82,7 @@ TEST(Commands, appendCommandStatusNoOverwrite) {
TEST(Commands, appendCommandStatusErrorExtraInfo) {
BSONObjBuilder actualResult;
const Status status(ErrorExtraInfoExample(123), "not again!");
- Command::appendCommandStatus(actualResult, status);
+ CommandHelpers::appendCommandStatus(actualResult, status);
BSONObjBuilder expectedResult;
expectedResult.append("ok", 0.0);
@@ -109,26 +109,27 @@ public:
TEST_F(ParseNsOrUUID, FailWrongType) {
auto cmd = BSON("query" << BSON("a" << BSON("$gte" << 11)));
ASSERT_THROWS_CODE(
- Command::parseNsOrUUID(opCtx, "db", cmd), DBException, ErrorCodes::InvalidNamespace);
+ CommandHelpers::parseNsOrUUID(opCtx, "db", cmd), DBException, ErrorCodes::InvalidNamespace);
}
TEST_F(ParseNsOrUUID, FailEmptyDbName) {
auto cmd = BSON("query"
<< "coll");
ASSERT_THROWS_CODE(
- Command::parseNsOrUUID(opCtx, "", cmd), DBException, ErrorCodes::InvalidNamespace);
+ CommandHelpers::parseNsOrUUID(opCtx, "", cmd), DBException, ErrorCodes::InvalidNamespace);
}
TEST_F(ParseNsOrUUID, FailInvalidDbName) {
auto cmd = BSON("query"
<< "coll");
- ASSERT_THROWS_CODE(
- Command::parseNsOrUUID(opCtx, "test.coll", cmd), DBException, ErrorCodes::InvalidNamespace);
+ ASSERT_THROWS_CODE(CommandHelpers::parseNsOrUUID(opCtx, "test.coll", cmd),
+ DBException,
+ ErrorCodes::InvalidNamespace);
}
TEST_F(ParseNsOrUUID, ParseUnknownUUID) {
auto cmd = BSON("query" << UUID::gen());
- ASSERT_THROWS_CODE(Command::parseNsOrUUID(opCtx, "test.coll", cmd),
+ ASSERT_THROWS_CODE(CommandHelpers::parseNsOrUUID(opCtx, "test.coll", cmd),
DBException,
ErrorCodes::NamespaceNotFound);
}
@@ -136,7 +137,7 @@ TEST_F(ParseNsOrUUID, ParseUnknownUUID) {
TEST_F(ParseNsOrUUID, ParseValidColl) {
auto cmd = BSON("query"
<< "coll");
- auto parsedNss = Command::parseNsOrUUID(opCtx, "test", cmd);
+ auto parsedNss = CommandHelpers::parseNsOrUUID(opCtx, "test", cmd);
ASSERT_EQ(parsedNss, NamespaceString("test.coll"));
}
@@ -149,7 +150,7 @@ TEST_F(ParseNsOrUUID, ParseValidUUID) {
catalog.onCreateCollection(opCtx, &coll, uuid);
auto cmd = BSON("query" << uuid);
- auto parsedNss = Command::parseNsOrUUID(opCtx, "test", cmd);
+ auto parsedNss = CommandHelpers::parseNsOrUUID(opCtx, "test", cmd);
ASSERT_EQUALS(nss, parsedNss);
}
} // namespace mongo
diff --git a/src/mongo/db/dbdirectclient.cpp b/src/mongo/db/dbdirectclient.cpp
index 132a01131ce..db170ac90e8 100644
--- a/src/mongo/db/dbdirectclient.cpp
+++ b/src/mongo/db/dbdirectclient.cpp
@@ -176,7 +176,7 @@ unsigned long long DBDirectClient::count(
NamespaceString nsString(ns);
- auto result = Command::runCommandDirectly(
+ auto result = CommandHelpers::runCommandDirectly(
_opCtx, OpMsgRequest::fromDBAndBody(nsString.db(), std::move(cmdObj)));
uassertStatusOK(getStatusFromCommandResult(result));
diff --git a/src/mongo/db/exec/stagedebug_cmd.cpp b/src/mongo/db/exec/stagedebug_cmd.cpp
index 6e65094764c..d0b20203eff 100644
--- a/src/mongo/db/exec/stagedebug_cmd.cpp
+++ b/src/mongo/db/exec/stagedebug_cmd.cpp
@@ -210,12 +210,12 @@ public:
<< PlanExecutor::statestr(state)
<< ", stats: " << redact(Explain::getWinningPlanStats(exec.get()));
- return appendCommandStatus(result,
- Status(ErrorCodes::OperationFailed,
- str::stream()
- << "Executor error during "
- << "StageDebug command: "
- << WorkingSetCommon::toStatusString(obj)));
+ return CommandHelpers::appendCommandStatus(
+ result,
+ Status(ErrorCodes::OperationFailed,
+ str::stream() << "Executor error during "
+ << "StageDebug command: "
+ << WorkingSetCommon::toStatusString(obj)));
}
return true;
diff --git a/src/mongo/db/ftdc/ftdc_server.cpp b/src/mongo/db/ftdc/ftdc_server.cpp
index a4bb5572c7c..5ffb7a126e9 100644
--- a/src/mongo/db/ftdc/ftdc_server.cpp
+++ b/src/mongo/db/ftdc/ftdc_server.cpp
@@ -249,11 +249,11 @@ FTDCSimpleInternalCommandCollector::FTDCSimpleInternalCommandCollector(StringDat
BSONObj cmdObj)
: _name(name.toString()), _request(OpMsgRequest::fromDBAndBody(ns, std::move(cmdObj))) {
invariant(command == _request.getCommandName());
- invariant(Command::findCommand(command)); // Fail early if it doesn't exist.
+ invariant(CommandHelpers::findCommand(command)); // Fail early if it doesn't exist.
}
void FTDCSimpleInternalCommandCollector::collect(OperationContext* opCtx, BSONObjBuilder& builder) {
- auto result = Command::runCommandDirectly(opCtx, _request);
+ auto result = CommandHelpers::runCommandDirectly(opCtx, _request);
builder.appendElements(result);
}
diff --git a/src/mongo/db/ops/write_ops_exec.cpp b/src/mongo/db/ops/write_ops_exec.cpp
index 27e53a898c9..64464bbf189 100644
--- a/src/mongo/db/ops/write_ops_exec.cpp
+++ b/src/mongo/db/ops/write_ops_exec.cpp
@@ -255,7 +255,7 @@ SingleWriteResult createIndex(OperationContext* opCtx,
cmdBuilder << "createIndexes" << ns.coll();
cmdBuilder << "indexes" << BSON_ARRAY(spec);
- auto cmdResult = Command::runCommandDirectly(
+ auto cmdResult = CommandHelpers::runCommandDirectly(
opCtx, OpMsgRequest::fromDBAndBody(systemIndexes.db(), cmdBuilder.obj()));
uassertStatusOK(getStatusFromCommandResult(cmdResult));
diff --git a/src/mongo/db/pipeline/aggregation_request.cpp b/src/mongo/db/pipeline/aggregation_request.cpp
index e944be2f03e..8eb646a7599 100644
--- a/src/mongo/db/pipeline/aggregation_request.cpp
+++ b/src/mongo/db/pipeline/aggregation_request.cpp
@@ -214,7 +214,7 @@ StatusWith<AggregationRequest> AggregationRequest::parseFromBSON(
request.setAllowDiskUse(elem.Bool());
} else if (bypassDocumentValidationCommandOption() == fieldName) {
request.setBypassDocumentValidation(elem.trueValue());
- } else if (!Command::isGenericArgument(fieldName)) {
+ } else if (!CommandHelpers::isGenericArgument(fieldName)) {
return {ErrorCodes::FailedToParse,
str::stream() << "unrecognized field '" << elem.fieldName() << "'"};
}
diff --git a/src/mongo/db/query/getmore_request.cpp b/src/mongo/db/query/getmore_request.cpp
index 6c738c4da67..21db56e4274 100644
--- a/src/mongo/db/query/getmore_request.cpp
+++ b/src/mongo/db/query/getmore_request.cpp
@@ -158,7 +158,7 @@ StatusWith<GetMoreRequest> GetMoreRequest::parseFromBSON(const std::string& dbna
return status;
}
lastKnownCommittedOpTime = ot;
- } else if (!Command::isGenericArgument(fieldName)) {
+ } else if (!CommandHelpers::isGenericArgument(fieldName)) {
return {ErrorCodes::FailedToParse,
str::stream() << "Failed to parse: " << cmdObj << ". "
<< "Unrecognized field '"
diff --git a/src/mongo/db/query/query_request.cpp b/src/mongo/db/query/query_request.cpp
index 074bf7511fa..512a78f9cb3 100644
--- a/src/mongo/db/query/query_request.cpp
+++ b/src/mongo/db/query/query_request.cpp
@@ -382,7 +382,7 @@ StatusWith<unique_ptr<QueryRequest>> QueryRequest::parseFromFindCommand(unique_p
return status;
}
qr->_replicationTerm = el._numberLong();
- } else if (!Command::isGenericArgument(fieldName)) {
+ } else if (!CommandHelpers::isGenericArgument(fieldName)) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "Failed to parse: " << cmdObj.toString() << ". "
<< "Unrecognized field '"
diff --git a/src/mongo/db/repl/master_slave.cpp b/src/mongo/db/repl/master_slave.cpp
index 5ca2ed2f0f0..5cac46ad48c 100644
--- a/src/mongo/db/repl/master_slave.cpp
+++ b/src/mongo/db/repl/master_slave.cpp
@@ -389,13 +389,13 @@ public:
HandshakeArgs handshake;
Status status = handshake.initialize(cmdObj);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
ReplClientInfo::forClient(opCtx->getClient()).setRemoteID(handshake.getRid());
status = getGlobalReplicationCoordinator()->processHandshake(opCtx, handshake);
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
} handshakeCmd;
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 001c20af84f..91aa0e601f3 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -1616,7 +1616,7 @@ Status applyCommand_inlock(OperationContext* opCtx,
case ErrorCodes::BackgroundOperationInProgressForNamespace: {
Lock::TempRelease release(opCtx->lockState());
- Command* cmd = Command::findCommand(o.firstElement().fieldName());
+ Command* cmd = CommandHelpers::findCommand(o.firstElement().fieldName());
invariant(cmd);
BackgroundOperation::awaitNoBgOpInProgForNs(cmd->parseNs(nss.db().toString(), o));
opCtx->recoveryUnit()->abandonSnapshot();
diff --git a/src/mongo/db/repl/repl_set_commands.cpp b/src/mongo/db/repl/repl_set_commands.cpp
index ebee2285b3e..e2d28cadb6f 100644
--- a/src/mongo/db/repl/repl_set_commands.cpp
+++ b/src/mongo/db/repl/repl_set_commands.cpp
@@ -108,19 +108,19 @@ public:
long long stateVal;
auto status = bsonExtractIntegerField(cmdObj, "waitForMemberState", &stateVal);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
const auto swMemberState = MemberState::create(stateVal);
if (!swMemberState.isOK()) {
- return appendCommandStatus(result, swMemberState.getStatus());
+ return CommandHelpers::appendCommandStatus(result, swMemberState.getStatus());
}
const auto expectedState = swMemberState.getValue();
long long timeoutMillis;
status = bsonExtractIntegerField(cmdObj, "timeoutMillis", &timeoutMillis);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
Milliseconds timeout(timeoutMillis);
log() << "replSetTest: waiting " << timeout << " for member state to become "
@@ -128,23 +128,23 @@ public:
status = replCoord->waitForMemberState(expectedState, timeout);
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
} else if (cmdObj.hasElement("waitForDrainFinish")) {
long long timeoutMillis;
auto status = bsonExtractIntegerField(cmdObj, "waitForDrainFinish", &timeoutMillis);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
Milliseconds timeout(timeoutMillis);
log() << "replSetTest: waiting " << timeout << " for applier buffer to finish draining";
status = replCoord->waitForDrainFinish(timeout);
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
Status status = replCoord->checkReplEnabledForCommand(&result);
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
};
@@ -169,7 +169,7 @@ public:
BSONObjBuilder& result) {
Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
if (!status.isOK())
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
auto rbid = ReplicationProcess::get(opCtx)->getRollbackID(opCtx);
@@ -177,7 +177,7 @@ public:
fassertStatusOK(40426, rbid.getStatus());
result.append("rbid", rbid.getValue());
- return appendCommandStatus(result, Status::OK());
+ return CommandHelpers::appendCommandStatus(result, Status::OK());
}
} cmdReplSetRBID;
@@ -198,13 +198,13 @@ public:
Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
if (!status.isOK())
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
bool includeInitialSync = false;
Status initialSyncStatus =
bsonExtractBooleanFieldWithDefault(cmdObj, "initialSync", false, &includeInitialSync);
if (!initialSyncStatus.isOK()) {
- return appendCommandStatus(result, initialSyncStatus);
+ return CommandHelpers::appendCommandStatus(result, initialSyncStatus);
}
auto responseStyle = ReplicationCoordinator::ReplSetGetStatusResponseStyle::kBasic;
@@ -212,7 +212,7 @@ public:
responseStyle = ReplicationCoordinator::ReplSetGetStatusResponseStyle::kInitialSync;
}
status = getGlobalReplicationCoordinator()->processReplSetGetStatus(&result, responseStyle);
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
private:
@@ -235,7 +235,7 @@ public:
BSONObjBuilder& result) {
Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
if (!status.isOK())
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
getGlobalReplicationCoordinator()->processReplSetGetConfig(&result);
return true;
@@ -357,9 +357,10 @@ public:
std::string replSetString =
ReplicationCoordinator::get(opCtx)->getSettings().getReplSetString();
if (replSetString.empty()) {
- return appendCommandStatus(result,
- Status(ErrorCodes::NoReplicationEnabled,
- "This node was not started with the replSet option"));
+ return CommandHelpers::appendCommandStatus(
+ result,
+ Status(ErrorCodes::NoReplicationEnabled,
+ "This node was not started with the replSet option"));
}
if (configObj.isEmpty()) {
@@ -403,7 +404,7 @@ public:
Status status =
getGlobalReplicationCoordinator()->processReplSetInitiate(opCtx, configObj, &result);
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
private:
@@ -426,7 +427,7 @@ public:
BSONObjBuilder& result) {
Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
if (cmdObj["replSetReconfig"].type() != Object) {
@@ -455,7 +456,7 @@ public:
}
wuow.commit();
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
private:
@@ -483,10 +484,10 @@ public:
BSONObjBuilder& result) {
Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
if (!status.isOK())
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
int secs = (int)cmdObj.firstElement().numberInt();
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result, getGlobalReplicationCoordinator()->processReplSetFreeze(secs, &result));
}
@@ -513,7 +514,7 @@ public:
BSONObjBuilder& result) {
Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
if (!status.isOK())
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
const bool force = cmdObj["force"].trueValue();
@@ -522,7 +523,7 @@ public:
stepDownForSecs = 60;
} else if (stepDownForSecs < 0) {
status = Status(ErrorCodes::BadValue, "stepdown period must be a positive integer");
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
long long secondaryCatchUpPeriodSecs;
@@ -536,26 +537,26 @@ public:
secondaryCatchUpPeriodSecs = 10;
}
} else if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
if (secondaryCatchUpPeriodSecs < 0) {
status = Status(ErrorCodes::BadValue,
"secondaryCatchUpPeriodSecs period must be a positive or absent");
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
if (stepDownForSecs < secondaryCatchUpPeriodSecs) {
status = Status(ErrorCodes::BadValue,
"stepdown period must be longer than secondaryCatchUpPeriodSecs");
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
log() << "Attempting to step down in response to replSetStepDown command";
status = getGlobalReplicationCoordinator()->stepDown(
opCtx, force, Seconds(secondaryCatchUpPeriodSecs), Seconds(stepDownForSecs));
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
private:
@@ -577,11 +578,12 @@ public:
BSONObjBuilder& result) {
Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
if (!status.isOK())
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
- return appendCommandStatus(result,
- getGlobalReplicationCoordinator()->setMaintenanceMode(
- cmdObj["replSetMaintenance"].trueValue()));
+ return CommandHelpers::appendCommandStatus(
+ result,
+ getGlobalReplicationCoordinator()->setMaintenanceMode(
+ cmdObj["replSetMaintenance"].trueValue()));
}
private:
@@ -604,16 +606,17 @@ public:
BSONObjBuilder& result) {
Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
if (!status.isOK())
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
HostAndPort targetHostAndPort;
status = targetHostAndPort.initialize(cmdObj["replSetSyncFrom"].valuestrsafe());
if (!status.isOK())
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
- return appendCommandStatus(result,
- getGlobalReplicationCoordinator()->processReplSetSyncFrom(
- opCtx, targetHostAndPort, &result));
+ return CommandHelpers::appendCommandStatus(
+ result,
+ getGlobalReplicationCoordinator()->processReplSetSyncFrom(
+ opCtx, targetHostAndPort, &result));
}
private:
@@ -633,7 +636,7 @@ public:
Status status = replCoord->checkReplEnabledForCommand(&result);
if (!status.isOK())
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
// accept and ignore handshakes sent from old (3.0-series) nodes without erroring to
// enable mixed-version operation, since we no longer use the handshakes
@@ -661,10 +664,10 @@ public:
if (status == ErrorCodes::InvalidReplicaSetConfig) {
result.append("configVersion", configVersion);
}
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
} else {
// Parsing error from UpdatePositionArgs.
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
}
} cmdReplSetUpdatePosition;
@@ -726,7 +729,7 @@ public:
checks many things that are pre-initialization. */
if (!getGlobalReplicationCoordinator()->getSettings().usingReplSets()) {
status = Status(ErrorCodes::NoReplicationEnabled, "not running with --replSet");
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
// Process heartbeat based on the version of request. The missing fields in mismatched
@@ -743,7 +746,7 @@ public:
LOG_FOR_HEARTBEATS(2) << "Processed heartbeat from "
<< cmdObj.getStringField("from")
<< " and generated response, " << response;
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
// else: fall through to old heartbeat protocol as it is likely that
// a new node just joined the set
@@ -752,7 +755,7 @@ public:
ReplSetHeartbeatArgs args;
status = args.initialize(cmdObj);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
// ugh.
@@ -767,7 +770,7 @@ public:
LOG_FOR_HEARTBEATS(2) << "Processed heartbeat from " << cmdObj.getStringField("from")
<< " and generated response, " << response;
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
} cmdReplSetHeartbeat;
@@ -784,7 +787,7 @@ public:
BSONObjBuilder& result) {
Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
if (!status.isOK())
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
ReplicationCoordinator::ReplSetFreshArgs parsedArgs;
parsedArgs.id = cmdObj["id"].Int();
@@ -800,7 +803,7 @@ public:
parsedArgs.opTime = Timestamp(cmdObj["opTime"].Date());
status = getGlobalReplicationCoordinator()->processReplSetFresh(parsedArgs, &result);
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
} cmdReplSetFresh;
@@ -818,7 +821,7 @@ private:
Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
if (!status.isOK())
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
ReplicationCoordinator::ReplSetElectArgs parsedArgs;
parsedArgs.set = cmdObj["set"].String();
@@ -833,7 +836,7 @@ private:
parsedArgs.round = cmdObj["round"].OID();
status = getGlobalReplicationCoordinator()->processReplSetElect(parsedArgs, &result);
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
} cmdReplSetElect;
@@ -847,7 +850,7 @@ public:
BSONObjBuilder& result) {
Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
if (!status.isOK())
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
log() << "Received replSetStepUp request";
@@ -857,7 +860,7 @@ public:
log() << "replSetStepUp request failed" << causedBy(status);
}
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
private:
@@ -882,14 +885,14 @@ public:
BSONObjBuilder& result) override {
Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
if (!status.isOK())
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
log() << "Received replSetAbortPrimaryCatchUp request";
status = getGlobalReplicationCoordinator()->abortCatchupIfNeeded();
if (!status.isOK()) {
log() << "replSetAbortPrimaryCatchUp request failed" << causedBy(status);
}
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
private:
diff --git a/src/mongo/db/repl/repl_set_request_votes.cpp b/src/mongo/db/repl/repl_set_request_votes.cpp
index 903533f703e..70b7c3d9b4b 100644
--- a/src/mongo/db/repl/repl_set_request_votes.cpp
+++ b/src/mongo/db/repl/repl_set_request_votes.cpp
@@ -53,20 +53,20 @@ private:
BSONObjBuilder& result) final {
Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
ReplSetRequestVotesArgs parsedArgs;
status = parsedArgs.initialize(cmdObj);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
ReplSetRequestVotesResponse response;
status = getGlobalReplicationCoordinator()->processReplSetRequestVotes(
opCtx, parsedArgs, &response);
response.addToBSON(&result);
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
} cmdReplSetRequestVotes;
diff --git a/src/mongo/db/repl/replication_info.cpp b/src/mongo/db/repl/replication_info.cpp
index 76388589f95..7b5a5288dce 100644
--- a/src/mongo/db/repl/replication_info.cpp
+++ b/src/mongo/db/repl/replication_info.cpp
@@ -261,7 +261,7 @@ public:
BSONElement element = cmdObj[kMetadataDocumentName];
if (!element.eoo()) {
if (seenIsMaster) {
- return Command::appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::ClientMetadataCannotBeMutated,
"The client metadata document may only be sent in the first isMaster"));
@@ -270,7 +270,8 @@ public:
auto swParseClientMetadata = ClientMetadata::parse(element);
if (!swParseClientMetadata.getStatus().isOK()) {
- return Command::appendCommandStatus(result, swParseClientMetadata.getStatus());
+ return CommandHelpers::appendCommandStatus(result,
+ swParseClientMetadata.getStatus());
}
invariant(swParseClientMetadata.getValue());
diff --git a/src/mongo/db/repl/resync.cpp b/src/mongo/db/repl/resync.cpp
index a1e60050652..2b7ba1b90b7 100644
--- a/src/mongo/db/repl/resync.cpp
+++ b/src/mongo/db/repl/resync.cpp
@@ -84,7 +84,7 @@ public:
if (getGlobalReplicationCoordinator()->getSettings().usingReplSets()) {
// Resync is disabled in production on replica sets until it stabilizes (SERVER-27081).
if (!Command::testCommandsEnabled) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::OperationFailed,
"Replica sets do not support the resync command"));
@@ -96,16 +96,16 @@ public:
const MemberState memberState = replCoord->getMemberState();
if (memberState.startup()) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result, Status(ErrorCodes::NotYetInitialized, "no replication yet active"));
}
if (memberState.primary()) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result, Status(ErrorCodes::NotSecondary, "primaries cannot resync"));
}
auto status = replCoord->setFollowerMode(MemberState::RS_STARTUP2);
if (!status.isOK()) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(status.code(),
str::stream()
diff --git a/src/mongo/db/repl/rs_rollback_no_uuid.cpp b/src/mongo/db/repl/rs_rollback_no_uuid.cpp
index 256719f04cd..7b87b8dbc1c 100644
--- a/src/mongo/db/repl/rs_rollback_no_uuid.cpp
+++ b/src/mongo/db/repl/rs_rollback_no_uuid.cpp
@@ -234,7 +234,7 @@ Status rollback_internal_no_uuid::updateFixUpInfoFromLocalOplogEntry(FixUpInfo&
// Retrieves the command name, so out of {renameCollection: "test.x"} it returns
// "renameCollection".
string cmdname = first.fieldName();
- Command* cmd = Command::findCommand(cmdname.c_str());
+ Command* cmd = CommandHelpers::findCommand(cmdname.c_str());
if (cmd == NULL) {
severe() << "Rollback no such command " << first.fieldName();
return Status(ErrorCodes::UnrecoverableRollbackError,
diff --git a/src/mongo/db/s/balancer/migration_manager_test.cpp b/src/mongo/db/s/balancer/migration_manager_test.cpp
index fb4ed4905ea..82be67e10a4 100644
--- a/src/mongo/db/s/balancer/migration_manager_test.cpp
+++ b/src/mongo/db/s/balancer/migration_manager_test.cpp
@@ -283,7 +283,7 @@ void MigrationManagerTest::expectMoveChunkCommand(const ChunkType& chunk,
const ShardId& toShardId,
const Status& returnStatus) {
BSONObjBuilder resultBuilder;
- Command::appendCommandStatus(resultBuilder, returnStatus);
+ CommandHelpers::appendCommandStatus(resultBuilder, returnStatus);
expectMoveChunkCommand(chunk, toShardId, resultBuilder.obj());
}
diff --git a/src/mongo/db/s/check_sharding_index_command.cpp b/src/mongo/db/s/check_sharding_index_command.cpp
index 6955d93db83..2d1e0e674da 100644
--- a/src/mongo/db/s/check_sharding_index_command.cpp
+++ b/src/mongo/db/s/check_sharding_index_command.cpp
@@ -80,7 +80,7 @@ public:
}
virtual std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const {
- return parseNsFullyQualified(dbname, cmdObj);
+ return CommandHelpers::parseNsFullyQualified(dbname, cmdObj);
}
bool errmsgRun(OperationContext* opCtx,
@@ -200,7 +200,7 @@ public:
}
if (PlanExecutor::DEAD == state || PlanExecutor::FAILURE == state) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::OperationFailed,
str::stream() << "Executor error while checking sharding index: "
diff --git a/src/mongo/db/s/config/configsvr_add_shard_command.cpp b/src/mongo/db/s/config/configsvr_add_shard_command.cpp
index 214e109ebb3..329c24127ce 100644
--- a/src/mongo/db/s/config/configsvr_add_shard_command.cpp
+++ b/src/mongo/db/s/config/configsvr_add_shard_command.cpp
@@ -92,7 +92,7 @@ public:
const BSONObj& cmdObj,
BSONObjBuilder& result) override {
if (serverGlobalParams.clusterRole != ClusterRole::ConfigServer) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::IllegalOperation,
"_configsvrAddShard can only be run on config servers"));
@@ -104,7 +104,7 @@ public:
auto swParsedRequest = AddShardRequest::parseFromConfigCommand(cmdObj);
if (!swParsedRequest.isOK()) {
- return appendCommandStatus(result, swParsedRequest.getStatus());
+ return CommandHelpers::appendCommandStatus(result, swParsedRequest.getStatus());
}
auto parsedRequest = std::move(swParsedRequest.getValue());
@@ -113,7 +113,7 @@ public:
auto validationStatus = parsedRequest.validate(rsConfig.isLocalHostAllowed());
if (!validationStatus.isOK()) {
- return appendCommandStatus(result, validationStatus);
+ return CommandHelpers::appendCommandStatus(result, validationStatus);
}
uassert(ErrorCodes::InvalidOptions,
@@ -136,7 +136,7 @@ public:
if (!addShardResult.isOK()) {
log() << "addShard request '" << parsedRequest << "'"
<< "failed" << causedBy(addShardResult.getStatus());
- return appendCommandStatus(result, addShardResult.getStatus());
+ return CommandHelpers::appendCommandStatus(result, addShardResult.getStatus());
}
result << "shardAdded" << addShardResult.getValue();
diff --git a/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp b/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp
index 7be915ca4f4..49b01f5454f 100644
--- a/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp
+++ b/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp
@@ -114,7 +114,7 @@ public:
}
std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const override {
- return parseNsFullyQualified(dbname, cmdObj);
+ return CommandHelpers::parseNsFullyQualified(dbname, cmdObj);
}
bool run(OperationContext* opCtx,
@@ -136,7 +136,7 @@ public:
commitRequest.getFromShard(),
commitRequest.getToShard());
if (!response.isOK()) {
- return appendCommandStatus(result, response.getStatus());
+ return CommandHelpers::appendCommandStatus(result, response.getStatus());
}
result.appendElements(response.getValue());
return true;
diff --git a/src/mongo/db/s/config/configsvr_create_database_command.cpp b/src/mongo/db/s/config/configsvr_create_database_command.cpp
index 86690de4e2e..a95c6748860 100644
--- a/src/mongo/db/s/config/configsvr_create_database_command.cpp
+++ b/src/mongo/db/s/config/configsvr_create_database_command.cpp
@@ -97,7 +97,7 @@ public:
BSONObjBuilder& result) {
if (serverGlobalParams.clusterRole != ClusterRole::ConfigServer) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::IllegalOperation,
"_configsvrCreateDatabase can only be run on config servers"));
diff --git a/src/mongo/db/s/config/configsvr_drop_collection_command.cpp b/src/mongo/db/s/config/configsvr_drop_collection_command.cpp
index e186584a120..2b3987585bc 100644
--- a/src/mongo/db/s/config/configsvr_drop_collection_command.cpp
+++ b/src/mongo/db/s/config/configsvr_drop_collection_command.cpp
@@ -87,7 +87,7 @@ public:
}
std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const override {
- return parseNsFullyQualified(dbname, cmdObj);
+ return CommandHelpers::parseNsFullyQualified(dbname, cmdObj);
}
bool run(OperationContext* opCtx,
@@ -96,7 +96,7 @@ public:
BSONObjBuilder& result) override {
if (serverGlobalParams.clusterRole != ClusterRole::ConfigServer) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::IllegalOperation,
"_configsvrDropCollection can only be run on config servers"));
diff --git a/src/mongo/db/s/config/configsvr_drop_database_command.cpp b/src/mongo/db/s/config/configsvr_drop_database_command.cpp
index 0439cf35cf2..c772c34cf97 100644
--- a/src/mongo/db/s/config/configsvr_drop_database_command.cpp
+++ b/src/mongo/db/s/config/configsvr_drop_database_command.cpp
@@ -91,7 +91,7 @@ public:
const BSONObj& cmdObj,
BSONObjBuilder& result) {
if (serverGlobalParams.clusterRole != ClusterRole::ConfigServer) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::IllegalOperation,
"_configsvrDropDatabase can only be run on config servers"));
diff --git a/src/mongo/db/s/config/configsvr_enable_sharding_command.cpp b/src/mongo/db/s/config/configsvr_enable_sharding_command.cpp
index 62b1ff60606..cc60ec19b77 100644
--- a/src/mongo/db/s/config/configsvr_enable_sharding_command.cpp
+++ b/src/mongo/db/s/config/configsvr_enable_sharding_command.cpp
@@ -99,7 +99,7 @@ public:
BSONObjBuilder& result) {
if (serverGlobalParams.clusterRole != ClusterRole::ConfigServer) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::IllegalOperation,
"_configsvrEnableSharding can only be run on config servers"));
@@ -113,9 +113,10 @@ public:
NamespaceString::validDBName(dbname, NamespaceString::DollarInDbNameBehavior::Allow));
if (dbname == NamespaceString::kAdminDb || dbname == NamespaceString::kLocalDb) {
- return appendCommandStatus(result,
- {ErrorCodes::InvalidOptions,
- str::stream() << "can't shard " + dbname + " database"});
+ return CommandHelpers::appendCommandStatus(
+ result,
+ {ErrorCodes::InvalidOptions,
+ str::stream() << "can't shard " + dbname + " database"});
}
uassert(ErrorCodes::InvalidOptions,
diff --git a/src/mongo/db/s/config/configsvr_merge_chunk_command.cpp b/src/mongo/db/s/config/configsvr_merge_chunk_command.cpp
index 07d4af626f3..4f7f06c5176 100644
--- a/src/mongo/db/s/config/configsvr_merge_chunk_command.cpp
+++ b/src/mongo/db/s/config/configsvr_merge_chunk_command.cpp
@@ -95,7 +95,7 @@ public:
}
std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const override {
- return parseNsFullyQualified(dbname, cmdObj);
+ return CommandHelpers::parseNsFullyQualified(dbname, cmdObj);
}
bool run(OperationContext* opCtx,
@@ -117,7 +117,7 @@ public:
parsedRequest.getShardName());
if (!mergeChunkResult.isOK()) {
- return appendCommandStatus(result, mergeChunkResult);
+ return CommandHelpers::appendCommandStatus(result, mergeChunkResult);
}
return true;
diff --git a/src/mongo/db/s/config/configsvr_move_primary_command.cpp b/src/mongo/db/s/config/configsvr_move_primary_command.cpp
index 3d1c85fa85f..3ccde97a393 100644
--- a/src/mongo/db/s/config/configsvr_move_primary_command.cpp
+++ b/src/mongo/db/s/config/configsvr_move_primary_command.cpp
@@ -110,7 +110,7 @@ public:
BSONObjBuilder& result) override {
if (serverGlobalParams.clusterRole != ClusterRole::ConfigServer) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::IllegalOperation,
"_configsvrMovePrimary can only be run on config servers"));
@@ -127,7 +127,7 @@ public:
if (dbname == NamespaceString::kAdminDb || dbname == NamespaceString::kConfigDb ||
dbname == NamespaceString::kLocalDb) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
{ErrorCodes::InvalidOptions,
str::stream() << "Can't move primary for " << dbname << " database"});
@@ -159,7 +159,7 @@ public:
const std::string to = movePrimaryRequest.getTo().toString();
if (to.empty()) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
{ErrorCodes::InvalidOptions,
str::stream() << "you have to specify where you want to move it"});
@@ -231,7 +231,7 @@ public:
if (!worked) {
log() << "clone failed" << redact(cloneRes);
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result, {ErrorCodes::OperationFailed, str::stream() << "clone failed"});
}
diff --git a/src/mongo/db/s/config/configsvr_remove_shard_command.cpp b/src/mongo/db/s/config/configsvr_remove_shard_command.cpp
index d16fa7950c0..ae0b3e070aa 100644
--- a/src/mongo/db/s/config/configsvr_remove_shard_command.cpp
+++ b/src/mongo/db/s/config/configsvr_remove_shard_command.cpp
@@ -109,7 +109,8 @@ public:
std::string msg(str::stream() << "Could not drop shard '" << target
<< "' because it does not exist");
log() << msg;
- return appendCommandStatus(result, Status(ErrorCodes::ShardNotFound, msg));
+ return CommandHelpers::appendCommandStatus(result,
+ Status(ErrorCodes::ShardNotFound, msg));
}
const auto& shard = shardStatus.getValue();
@@ -156,7 +157,7 @@ public:
nullptr,
repl::ReadConcernLevel::kMajorityReadConcern);
if (!swChunks.isOK()) {
- return appendCommandStatus(result, swChunks.getStatus());
+ return CommandHelpers::appendCommandStatus(result, swChunks.getStatus());
}
const auto& chunks = swChunks.getValue();
diff --git a/src/mongo/db/s/config/configsvr_shard_collection_command.cpp b/src/mongo/db/s/config/configsvr_shard_collection_command.cpp
index 49d09e326e5..64cc9986f58 100644
--- a/src/mongo/db/s/config/configsvr_shard_collection_command.cpp
+++ b/src/mongo/db/s/config/configsvr_shard_collection_command.cpp
@@ -725,7 +725,7 @@ public:
}
std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const override {
- return parseNsFullyQualified(dbname, cmdObj);
+ return CommandHelpers::parseNsFullyQualified(dbname, cmdObj);
}
bool run(OperationContext* opCtx,
diff --git a/src/mongo/db/s/config/configsvr_split_chunk_command.cpp b/src/mongo/db/s/config/configsvr_split_chunk_command.cpp
index 23e243384dd..4740525ee23 100644
--- a/src/mongo/db/s/config/configsvr_split_chunk_command.cpp
+++ b/src/mongo/db/s/config/configsvr_split_chunk_command.cpp
@@ -93,7 +93,7 @@ public:
}
std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const override {
- return parseNsFullyQualified(dbname, cmdObj);
+ return CommandHelpers::parseNsFullyQualified(dbname, cmdObj);
}
bool run(OperationContext* opCtx,
diff --git a/src/mongo/db/s/flush_routing_table_cache_updates_command.cpp b/src/mongo/db/s/flush_routing_table_cache_updates_command.cpp
index 3bdf4d5556f..e24808b412c 100644
--- a/src/mongo/db/s/flush_routing_table_cache_updates_command.cpp
+++ b/src/mongo/db/s/flush_routing_table_cache_updates_command.cpp
@@ -79,7 +79,7 @@ public:
}
std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const override {
- return parseNsFullyQualified(dbname, cmdObj);
+ return CommandHelpers::parseNsFullyQualified(dbname, cmdObj);
}
Status checkAuthForCommand(Client* client,
diff --git a/src/mongo/db/s/get_shard_version_command.cpp b/src/mongo/db/s/get_shard_version_command.cpp
index 7cdfa33f98c..65ab0f0b652 100644
--- a/src/mongo/db/s/get_shard_version_command.cpp
+++ b/src/mongo/db/s/get_shard_version_command.cpp
@@ -78,7 +78,7 @@ public:
}
std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const override {
- return parseNsFullyQualified(dbname, cmdObj);
+ return CommandHelpers::parseNsFullyQualified(dbname, cmdObj);
}
bool run(OperationContext* opCtx,
diff --git a/src/mongo/db/s/merge_chunks_command.cpp b/src/mongo/db/s/merge_chunks_command.cpp
index aba860514cb..aa36b6dfdf9 100644
--- a/src/mongo/db/s/merge_chunks_command.cpp
+++ b/src/mongo/db/s/merge_chunks_command.cpp
@@ -333,7 +333,7 @@ public:
}
std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const override {
- return parseNsFullyQualified(dbname, cmdObj);
+ return CommandHelpers::parseNsFullyQualified(dbname, cmdObj);
}
bool adminOnly() const override {
@@ -404,7 +404,7 @@ public:
}
auto mergeStatus = mergeChunks(opCtx, NamespaceString(ns), minKey, maxKey, epoch);
- return appendCommandStatus(result, mergeStatus);
+ return CommandHelpers::appendCommandStatus(result, mergeStatus);
}
} mergeChunksCmd;
diff --git a/src/mongo/db/s/migration_destination_manager_legacy_commands.cpp b/src/mongo/db/s/migration_destination_manager_legacy_commands.cpp
index 1024433b5d6..007288ff96d 100644
--- a/src/mongo/db/s/migration_destination_manager_legacy_commands.cpp
+++ b/src/mongo/db/s/migration_destination_manager_legacy_commands.cpp
@@ -232,7 +232,7 @@ public:
mdm->report(result);
if (!status.isOK()) {
log() << status.reason();
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
return true;
}
@@ -281,7 +281,7 @@ public:
mdm->report(result);
if (!status.isOK()) {
log() << status.reason();
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
} else if (migrationSessionIdStatus == ErrorCodes::NoSuchKey) {
mdm->abortWithoutSessionIdCheck();
diff --git a/src/mongo/db/s/move_chunk_command.cpp b/src/mongo/db/s/move_chunk_command.cpp
index 235e053a3f8..f736f1b8d2e 100644
--- a/src/mongo/db/s/move_chunk_command.cpp
+++ b/src/mongo/db/s/move_chunk_command.cpp
@@ -103,7 +103,7 @@ public:
}
std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const override {
- return parseNsFullyQualified(dbname, cmdObj);
+ return CommandHelpers::parseNsFullyQualified(dbname, cmdObj);
}
bool run(OperationContext* opCtx,
@@ -151,7 +151,7 @@ public:
// and the 3.4 shard, which failed to set the ChunkTooBig status code.
// TODO: Remove after 3.6 is released.
result.appendBool("chunkTooBig", true);
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
uassertStatusOK(status);
diff --git a/src/mongo/db/s/split_chunk_command.cpp b/src/mongo/db/s/split_chunk_command.cpp
index bd783fd6b84..4d0060cb941 100644
--- a/src/mongo/db/s/split_chunk_command.cpp
+++ b/src/mongo/db/s/split_chunk_command.cpp
@@ -87,7 +87,7 @@ public:
}
std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const override {
- return parseNsFullyQualified(dbname, cmdObj);
+ return CommandHelpers::parseNsFullyQualified(dbname, cmdObj);
}
bool errmsgRun(OperationContext* opCtx,
@@ -126,7 +126,7 @@ public:
string shardName;
auto parseShardNameStatus = bsonExtractStringField(cmdObj, "from", &shardName);
if (!parseShardNameStatus.isOK())
- return appendCommandStatus(result, parseShardNameStatus);
+ return CommandHelpers::appendCommandStatus(result, parseShardNameStatus);
log() << "received splitChunk request: " << redact(cmdObj);
diff --git a/src/mongo/db/s/split_vector_command.cpp b/src/mongo/db/s/split_vector_command.cpp
index 5f2ec7da886..3a6fed519ec 100644
--- a/src/mongo/db/s/split_vector_command.cpp
+++ b/src/mongo/db/s/split_vector_command.cpp
@@ -80,7 +80,7 @@ public:
}
std::string parseNs(const string& dbname, const BSONObj& cmdObj) const override {
- return parseNsFullyQualified(dbname, cmdObj);
+ return CommandHelpers::CommandHelpers::parseNsFullyQualified(dbname, cmdObj);
}
bool errmsgRun(OperationContext* opCtx,
@@ -145,7 +145,7 @@ public:
maxChunkSize,
maxChunkSizeBytes);
if (!statusWithSplitKeys.isOK()) {
- return appendCommandStatus(result, statusWithSplitKeys.getStatus());
+ return CommandHelpers::appendCommandStatus(result, statusWithSplitKeys.getStatus());
}
result.append("splitKeys", statusWithSplitKeys.getValue());
diff --git a/src/mongo/db/service_entry_point_mongod.cpp b/src/mongo/db/service_entry_point_mongod.cpp
index c6acd32c214..d875e12e0a5 100644
--- a/src/mongo/db/service_entry_point_mongod.cpp
+++ b/src/mongo/db/service_entry_point_mongod.cpp
@@ -362,14 +362,14 @@ void _waitForWriteConcernAndAddToCommandResponse(OperationContext* opCtx,
WriteConcernResult res;
auto waitForWCStatus =
waitForWriteConcern(opCtx, lastOpAfterRun, opCtx->getWriteConcern(), &res);
- Command::appendCommandWCStatus(*commandResponseBuilder, waitForWCStatus, res);
+ CommandHelpers::appendCommandWCStatus(*commandResponseBuilder, waitForWCStatus, res);
// SERVER-22421: This code is to ensure error response backwards compatibility with the
// user management commands. This can be removed in 3.6.
- if (!waitForWCStatus.isOK() && Command::isUserManagementCommand(commandName)) {
+ if (!waitForWCStatus.isOK() && CommandHelpers::isUserManagementCommand(commandName)) {
BSONObj temp = commandResponseBuilder->asTempObj().copy();
commandResponseBuilder->resetToEmpty();
- Command::appendCommandStatus(*commandResponseBuilder, waitForWCStatus);
+ CommandHelpers::appendCommandStatus(*commandResponseBuilder, waitForWCStatus);
commandResponseBuilder->appendElementsUnique(temp);
}
}
@@ -457,7 +457,7 @@ bool runCommandImpl(OperationContext* opCtx,
<< redact(command->getRedactedCopyForLogging(request.body));
}
- auto result = Command::appendCommandStatus(inPlaceReplyBob, rcStatus);
+ auto result = CommandHelpers::appendCommandStatus(inPlaceReplyBob, rcStatus);
inPlaceReplyBob.doneFast();
BSONObjBuilder metadataBob;
appendReplyMetadataOnError(opCtx, &metadataBob);
@@ -468,7 +468,7 @@ bool runCommandImpl(OperationContext* opCtx,
bool result;
if (!command->supportsWriteConcern(cmd)) {
if (commandSpecifiesWriteConcern(cmd)) {
- auto result = Command::appendCommandStatus(
+ auto result = CommandHelpers::appendCommandStatus(
inPlaceReplyBob,
{ErrorCodes::InvalidOptions, "Command does not support writeConcern"});
inPlaceReplyBob.doneFast();
@@ -482,7 +482,8 @@ bool runCommandImpl(OperationContext* opCtx,
} else {
auto wcResult = extractWriteConcern(opCtx, cmd, db);
if (!wcResult.isOK()) {
- auto result = Command::appendCommandStatus(inPlaceReplyBob, wcResult.getStatus());
+ auto result =
+ CommandHelpers::appendCommandStatus(inPlaceReplyBob, wcResult.getStatus());
inPlaceReplyBob.doneFast();
BSONObjBuilder metadataBob;
appendReplyMetadataOnError(opCtx, &metadataBob);
@@ -519,7 +520,8 @@ bool runCommandImpl(OperationContext* opCtx,
if (!linearizableReadStatus.isOK()) {
inPlaceReplyBob.resetToEmpty();
- auto result = Command::appendCommandStatus(inPlaceReplyBob, linearizableReadStatus);
+ auto result =
+ CommandHelpers::appendCommandStatus(inPlaceReplyBob, linearizableReadStatus);
inPlaceReplyBob.doneFast();
BSONObjBuilder metadataBob;
appendReplyMetadataOnError(opCtx, &metadataBob);
@@ -528,7 +530,7 @@ bool runCommandImpl(OperationContext* opCtx,
}
}
- Command::appendCommandStatus(inPlaceReplyBob, result);
+ CommandHelpers::appendCommandStatus(inPlaceReplyBob, result);
auto operationTime = computeOperationTime(
opCtx, startOperationTime, repl::ReadConcernArgs::get(opCtx).getLevel());
@@ -607,7 +609,7 @@ void execCommandDatabase(OperationContext* opCtx,
cmdOptionMaxTimeMSField = element;
} else if (fieldName == "allowImplicitCollectionCreation") {
allowImplicitCollectionCreationField = element;
- } else if (fieldName == Command::kHelpFieldName) {
+ } else if (fieldName == CommandHelpers::kHelpFieldName) {
helpField = element;
} else if (fieldName == ChunkVersion::kShardVersionField) {
shardVersionFieldIdx = element;
@@ -621,7 +623,7 @@ void execCommandDatabase(OperationContext* opCtx,
topLevelFields[fieldName]++ == 0);
}
- if (Command::isHelpRequest(helpField)) {
+ if (CommandHelpers::isHelpRequest(helpField)) {
CurOp::get(opCtx)->ensureStarted();
// We disable last-error for help requests due to SERVER-11492, because config servers
// use help requests to determine which commands are database writes, and so must be
@@ -856,7 +858,7 @@ DbResponse runCommands(OperationContext* opCtx, const Message& message) {
// to avoid displaying potentially sensitive information in the logs,
// we restrict the log message to the name of the unrecognized command.
// However, the complete command object will still be echoed to the client.
- if (!(c = Command::findCommand(request.getCommandName()))) {
+ if (!(c = CommandHelpers::findCommand(request.getCommandName()))) {
globalCommandRegistry()->incrementUnknownCommands();
std::string msg = str::stream() << "no such command: '" << request.getCommandName()
<< "'";
diff --git a/src/mongo/db/sessions_collection_config_server.cpp b/src/mongo/db/sessions_collection_config_server.cpp
index 104e787981e..2ea43051aff 100644
--- a/src/mongo/db/sessions_collection_config_server.cpp
+++ b/src/mongo/db/sessions_collection_config_server.cpp
@@ -70,7 +70,7 @@ Status SessionsCollectionConfigServer::_shardCollectionIfNeeded(OperationContext
DBDirectClient client(opCtx);
BSONObj info;
if (!client.runCommand(
- "admin", Command::appendMajorityWriteConcern(shardCollection.toBSON()), info)) {
+ "admin", CommandHelpers::appendMajorityWriteConcern(shardCollection.toBSON()), info)) {
return getStatusFromCommandResult(info);
}