summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--buildscripts/idl/idl/generator.py2
-rw-r--r--src/mongo/bson/util/bson_check.h2
-rw-r--r--src/mongo/db/auth/sasl_commands.cpp8
-rw-r--r--src/mongo/db/auth/user_management_commands_parser.cpp2
-rw-r--r--src/mongo/db/catalog/coll_mod.cpp2
-rw-r--r--src/mongo/db/catalog/collection_options.cpp2
-rw-r--r--src/mongo/db/catalog/create_collection.cpp6
-rw-r--r--src/mongo/db/commands.cpp381
-rw-r--r--src/mongo/db/commands.h283
-rw-r--r--src/mongo/db/commands/apply_ops_cmd.cpp20
-rw-r--r--src/mongo/db/commands/authentication_commands.cpp8
-rw-r--r--src/mongo/db/commands/clone.cpp2
-rw-r--r--src/mongo/db/commands/clone_collection.cpp4
-rw-r--r--src/mongo/db/commands/collection_to_capped.cpp10
-rw-r--r--src/mongo/db/commands/compact.cpp8
-rw-r--r--src/mongo/db/commands/connection_status.cpp2
-rw-r--r--src/mongo/db/commands/copydb.cpp2
-rw-r--r--src/mongo/db/commands/copydb_start_commands.cpp6
-rw-r--r--src/mongo/db/commands/count_cmd.cpp18
-rw-r--r--src/mongo/db/commands/create_indexes.cpp21
-rw-r--r--src/mongo/db/commands/current_op.cpp2
-rw-r--r--src/mongo/db/commands/current_op_common.cpp4
-rw-r--r--src/mongo/db/commands/dbcheck.cpp2
-rw-r--r--src/mongo/db/commands/dbcommands.cpp72
-rw-r--r--src/mongo/db/commands/distinct.cpp24
-rw-r--r--src/mongo/db/commands/do_txn_cmd.cpp5
-rw-r--r--src/mongo/db/commands/drop_indexes.cpp15
-rw-r--r--src/mongo/db/commands/end_sessions_command.cpp3
-rw-r--r--src/mongo/db/commands/explain_cmd.cpp14
-rw-r--r--src/mongo/db/commands/feature_compatibility_version_command_parser.cpp2
-rw-r--r--src/mongo/db/commands/find_and_modify.cpp40
-rw-r--r--src/mongo/db/commands/find_cmd.cpp29
-rw-r--r--src/mongo/db/commands/fsync.cpp2
-rw-r--r--src/mongo/db/commands/generic.cpp6
-rw-r--r--src/mongo/db/commands/geo_near_cmd.cpp14
-rw-r--r--src/mongo/db/commands/get_last_error.cpp12
-rw-r--r--src/mongo/db/commands/getmore_cmd.cpp23
-rw-r--r--src/mongo/db/commands/group_cmd.cpp18
-rw-r--r--src/mongo/db/commands/haystack.cpp2
-rw-r--r--src/mongo/db/commands/index_filter_commands.cpp4
-rw-r--r--src/mongo/db/commands/kill_all_sessions_by_pattern_command.cpp8
-rw-r--r--src/mongo/db/commands/kill_all_sessions_command.cpp6
-rw-r--r--src/mongo/db/commands/kill_sessions_command.cpp6
-rw-r--r--src/mongo/db/commands/killcursors_common.cpp2
-rw-r--r--src/mongo/db/commands/list_collections.cpp10
-rw-r--r--src/mongo/db/commands/list_databases.cpp12
-rw-r--r--src/mongo/db/commands/list_indexes.cpp13
-rw-r--r--src/mongo/db/commands/mr.cpp13
-rw-r--r--src/mongo/db/commands/oplog_application_checks.cpp2
-rw-r--r--src/mongo/db/commands/oplog_note.cpp18
-rw-r--r--src/mongo/db/commands/parallel_collection_scan.cpp23
-rw-r--r--src/mongo/db/commands/parameters.cpp2
-rw-r--r--src/mongo/db/commands/pipeline_command.cpp13
-rw-r--r--src/mongo/db/commands/plan_cache_commands.cpp4
-rw-r--r--src/mongo/db/commands/reap_logical_session_cache_now.cpp2
-rw-r--r--src/mongo/db/commands/refresh_logical_session_cache_now.cpp5
-rw-r--r--src/mongo/db/commands/refresh_sessions_command.cpp5
-rw-r--r--src/mongo/db/commands/refresh_sessions_command_internal.cpp2
-rw-r--r--src/mongo/db/commands/rename_collection_cmd.cpp10
-rw-r--r--src/mongo/db/commands/repair_cursor.cpp4
-rw-r--r--src/mongo/db/commands/resize_oplog.cpp16
-rw-r--r--src/mongo/db/commands/set_feature_compatibility_version_command.cpp18
-rw-r--r--src/mongo/db/commands/snapshot_management.cpp8
-rw-r--r--src/mongo/db/commands/start_session_command.cpp5
-rw-r--r--src/mongo/db/commands/test_commands.cpp24
-rw-r--r--src/mongo/db/commands/touch.cpp6
-rw-r--r--src/mongo/db/commands/user_management_commands.cpp272
-rw-r--r--src/mongo/db/commands/validate.cpp13
-rw-r--r--src/mongo/db/commands_test.cpp23
-rw-r--r--src/mongo/db/dbdirectclient.cpp2
-rw-r--r--src/mongo/db/exec/stagedebug_cmd.cpp12
-rw-r--r--src/mongo/db/ftdc/ftdc_server.cpp4
-rw-r--r--src/mongo/db/ops/write_ops_exec.cpp2
-rw-r--r--src/mongo/db/pipeline/aggregation_request.cpp2
-rw-r--r--src/mongo/db/query/getmore_request.cpp2
-rw-r--r--src/mongo/db/query/query_request.cpp2
-rw-r--r--src/mongo/db/repl/master_slave.cpp4
-rw-r--r--src/mongo/db/repl/oplog.cpp2
-rw-r--r--src/mongo/db/repl/repl_set_commands.cpp105
-rw-r--r--src/mongo/db/repl/repl_set_request_votes.cpp6
-rw-r--r--src/mongo/db/repl/replication_info.cpp5
-rw-r--r--src/mongo/db/repl/resync.cpp8
-rw-r--r--src/mongo/db/repl/rs_rollback_no_uuid.cpp2
-rw-r--r--src/mongo/db/s/balancer/migration_manager_test.cpp2
-rw-r--r--src/mongo/db/s/check_sharding_index_command.cpp4
-rw-r--r--src/mongo/db/s/config/configsvr_add_shard_command.cpp8
-rw-r--r--src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp4
-rw-r--r--src/mongo/db/s/config/configsvr_create_database_command.cpp2
-rw-r--r--src/mongo/db/s/config/configsvr_drop_collection_command.cpp4
-rw-r--r--src/mongo/db/s/config/configsvr_drop_database_command.cpp2
-rw-r--r--src/mongo/db/s/config/configsvr_enable_sharding_command.cpp9
-rw-r--r--src/mongo/db/s/config/configsvr_merge_chunk_command.cpp4
-rw-r--r--src/mongo/db/s/config/configsvr_move_primary_command.cpp8
-rw-r--r--src/mongo/db/s/config/configsvr_remove_shard_command.cpp5
-rw-r--r--src/mongo/db/s/config/configsvr_shard_collection_command.cpp2
-rw-r--r--src/mongo/db/s/config/configsvr_split_chunk_command.cpp2
-rw-r--r--src/mongo/db/s/flush_routing_table_cache_updates_command.cpp2
-rw-r--r--src/mongo/db/s/get_shard_version_command.cpp2
-rw-r--r--src/mongo/db/s/merge_chunks_command.cpp4
-rw-r--r--src/mongo/db/s/migration_destination_manager_legacy_commands.cpp4
-rw-r--r--src/mongo/db/s/move_chunk_command.cpp4
-rw-r--r--src/mongo/db/s/split_chunk_command.cpp4
-rw-r--r--src/mongo/db/s/split_vector_command.cpp4
-rw-r--r--src/mongo/db/service_entry_point_mongod.cpp24
-rw-r--r--src/mongo/db/sessions_collection_config_server.cpp2
-rw-r--r--src/mongo/executor/network_test_env.cpp4
-rw-r--r--src/mongo/idl/idl_parser.cpp2
-rw-r--r--src/mongo/rpc/command_request_builder.cpp2
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_impl.cpp18
-rw-r--r--src/mongo/s/catalog/sharding_catalog_log_change_test.cpp8
-rw-r--r--src/mongo/s/catalog/sharding_catalog_shard_collection_test.cpp2
-rw-r--r--src/mongo/s/catalog/sharding_catalog_test.cpp23
-rw-r--r--src/mongo/s/commands/cluster_add_shard_cmd.cpp6
-rw-r--r--src/mongo/s/commands/cluster_aggregate.cpp10
-rw-r--r--src/mongo/s/commands/cluster_commands_helpers.cpp20
-rw-r--r--src/mongo/s/commands/cluster_control_balancer_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_count_cmd.cpp16
-rw-r--r--src/mongo/s/commands/cluster_current_op.cpp2
-rw-r--r--src/mongo/s/commands/cluster_db_stats_cmd.cpp14
-rw-r--r--src/mongo/s/commands/cluster_drop_cmd.cpp6
-rw-r--r--src/mongo/s/commands/cluster_drop_database_cmd.cpp6
-rw-r--r--src/mongo/s/commands/cluster_enable_sharding_cmd.cpp6
-rw-r--r--src/mongo/s/commands/cluster_explain.cpp6
-rw-r--r--src/mongo/s/commands/cluster_explain_cmd.cpp12
-rw-r--r--src/mongo/s/commands/cluster_find_and_modify_cmd.cpp10
-rw-r--r--src/mongo/s/commands/cluster_find_cmd.cpp19
-rw-r--r--src/mongo/s/commands/cluster_get_last_error_cmd.cpp12
-rw-r--r--src/mongo/s/commands/cluster_get_shard_version_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_getmore_cmd.cpp4
-rw-r--r--src/mongo/s/commands/cluster_index_filter_cmd.cpp4
-rw-r--r--src/mongo/s/commands/cluster_is_master_cmd.cpp5
-rw-r--r--src/mongo/s/commands/cluster_kill_op.cpp2
-rw-r--r--src/mongo/s/commands/cluster_list_databases_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_map_reduce_cmd.cpp11
-rw-r--r--src/mongo/s/commands/cluster_merge_chunks_cmd.cpp6
-rw-r--r--src/mongo/s/commands/cluster_move_chunk_cmd.cpp5
-rw-r--r--src/mongo/s/commands/cluster_move_primary_cmd.cpp6
-rw-r--r--src/mongo/s/commands/cluster_multicast.cpp2
-rw-r--r--src/mongo/s/commands/cluster_pipeline_cmd.cpp4
-rw-r--r--src/mongo/s/commands/cluster_plan_cache_cmd.cpp8
-rw-r--r--src/mongo/s/commands/cluster_remove_shard_cmd.cpp6
-rw-r--r--src/mongo/s/commands/cluster_reset_error_cmd.cpp3
-rw-r--r--src/mongo/s/commands/cluster_set_feature_compatibility_version_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_shard_collection_cmd.cpp8
-rw-r--r--src/mongo/s/commands/cluster_split_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_user_management_commands.cpp102
-rw-r--r--src/mongo/s/commands/commands_public.cpp162
-rw-r--r--src/mongo/s/commands/pipeline_s.cpp2
-rw-r--r--src/mongo/s/commands/strategy.cpp22
-rw-r--r--src/mongo/s/shard_server_test_fixture.cpp2
-rw-r--r--src/mongo/s/sharding_test_fixture.cpp2
151 files changed, 1300 insertions, 1171 deletions
diff --git a/buildscripts/idl/idl/generator.py b/buildscripts/idl/idl/generator.py
index f1b9cb97bdb..407303ff60f 100644
--- a/buildscripts/idl/idl/generator.py
+++ b/buildscripts/idl/idl/generator.py
@@ -1052,7 +1052,7 @@ class _CppSourceFileWriter(_CppFileWriterBase):
# should ignore regardless of strict mode.
command_predicate = None
if isinstance(struct, ast.Command):
- command_predicate = "!Command::isGenericArgument(fieldName)"
+ command_predicate = "!CommandHelpers::isGenericArgument(fieldName)"
with self._predicate(command_predicate):
self._writer.write_line('ctxt.throwUnknownField(fieldName);')
diff --git a/src/mongo/bson/util/bson_check.h b/src/mongo/bson/util/bson_check.h
index 953279a751d..4e290024824 100644
--- a/src/mongo/bson/util/bson_check.h
+++ b/src/mongo/bson/util/bson_check.h
@@ -91,7 +91,7 @@ Status bsonCheckOnlyHasFieldsForCommand(StringData objectName,
const BSONObj& obj,
const Container& allowedFields) {
return bsonCheckOnlyHasFieldsImpl(objectName, obj, [&](StringData name) {
- return Command::isGenericArgument(name) ||
+ return CommandHelpers::isGenericArgument(name) ||
(std::find(std::begin(allowedFields), std::end(allowedFields), name) !=
std::end(allowedFields));
});
diff --git a/src/mongo/db/auth/sasl_commands.cpp b/src/mongo/db/auth/sasl_commands.cpp
index bd4e4e04c5f..0d762e5e38c 100644
--- a/src/mongo/db/auth/sasl_commands.cpp
+++ b/src/mongo/db/auth/sasl_commands.cpp
@@ -285,7 +285,7 @@ bool CmdSaslStart::run(OperationContext* opCtx,
session->setOpCtxt(opCtx);
Status status = doSaslStart(client, session, db, cmdObj, &result);
- appendCommandStatus(result, status);
+ CommandHelpers::appendCommandStatus(result, status);
if (session->isDone()) {
audit::logAuthentication(client,
@@ -314,7 +314,7 @@ bool CmdSaslContinue::run(OperationContext* opCtx,
AuthenticationSession::swap(client, sessionGuard);
if (!sessionGuard || sessionGuard->getType() != AuthenticationSession::SESSION_TYPE_SASL) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result, Status(ErrorCodes::ProtocolError, "No SASL session state found"));
}
@@ -324,7 +324,7 @@ bool CmdSaslContinue::run(OperationContext* opCtx,
// Authenticating the __system@local user to the admin database on mongos is required
// by the auth passthrough test suite.
if (session->getAuthenticationDatabase() != db && !Command::testCommandsEnabled) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::ProtocolError,
"Attempt to switch database target during SASL authentication."));
@@ -333,7 +333,7 @@ bool CmdSaslContinue::run(OperationContext* opCtx,
session->setOpCtxt(opCtx);
Status status = doSaslContinue(client, session, cmdObj, &result);
- appendCommandStatus(result, status);
+ CommandHelpers::appendCommandStatus(result, status);
if (session->isDone()) {
audit::logAuthentication(client,
diff --git a/src/mongo/db/auth/user_management_commands_parser.cpp b/src/mongo/db/auth/user_management_commands_parser.cpp
index 5d7f682579e..52d33822be0 100644
--- a/src/mongo/db/auth/user_management_commands_parser.cpp
+++ b/src/mongo/db/auth/user_management_commands_parser.cpp
@@ -62,7 +62,7 @@ Status _checkNoExtraFields(const BSONObj& cmdObj,
// ones.
for (BSONObjIterator iter(cmdObj); iter.more(); iter.next()) {
StringData fieldName = (*iter).fieldNameStringData();
- if (!Command::isGenericArgument(fieldName) &&
+ if (!CommandHelpers::isGenericArgument(fieldName) &&
!validFieldNames.count(fieldName.toString())) {
return Status(ErrorCodes::BadValue,
mongoutils::str::stream() << "\"" << fieldName << "\" is not "
diff --git a/src/mongo/db/catalog/coll_mod.cpp b/src/mongo/db/catalog/coll_mod.cpp
index b264a01da5c..d46be96b4e4 100644
--- a/src/mongo/db/catalog/coll_mod.cpp
+++ b/src/mongo/db/catalog/coll_mod.cpp
@@ -89,7 +89,7 @@ StatusWith<CollModRequest> parseCollModRequest(OperationContext* opCtx,
BSONForEach(e, cmdObj) {
const auto fieldName = e.fieldNameStringData();
- if (Command::isGenericArgument(fieldName)) {
+ if (CommandHelpers::isGenericArgument(fieldName)) {
continue; // Don't add to oplog builder.
} else if (fieldName == "collMod") {
// no-op
diff --git a/src/mongo/db/catalog/collection_options.cpp b/src/mongo/db/catalog/collection_options.cpp
index 52724289782..9ca8ab1b584 100644
--- a/src/mongo/db/catalog/collection_options.cpp
+++ b/src/mongo/db/catalog/collection_options.cpp
@@ -241,7 +241,7 @@ Status CollectionOptions::parse(const BSONObj& options, ParseKind kind) {
}
pipeline = e.Obj().getOwned();
- } else if (!createdOn24OrEarlier && !Command::isGenericArgument(fieldName)) {
+ } else if (!createdOn24OrEarlier && !CommandHelpers::isGenericArgument(fieldName)) {
return Status(ErrorCodes::InvalidOptions,
str::stream() << "The field '" << fieldName
<< "' is not a valid collection option. Options: "
diff --git a/src/mongo/db/catalog/create_collection.cpp b/src/mongo/db/catalog/create_collection.cpp
index a9bfb2ee7f5..3b5ed3f50dc 100644
--- a/src/mongo/db/catalog/create_collection.cpp
+++ b/src/mongo/db/catalog/create_collection.cpp
@@ -72,7 +72,7 @@ Status createCollection(OperationContext* opCtx,
BSONObjBuilder optionsBuilder;
while (it.more()) {
const auto elem = it.next();
- if (!Command::isGenericArgument(elem.fieldNameStringData()))
+ if (!CommandHelpers::isGenericArgument(elem.fieldNameStringData()))
optionsBuilder.append(elem);
if (elem.fieldNameStringData() == "viewOn") {
// Views don't have UUIDs so it should always be parsed for command.
@@ -119,7 +119,7 @@ Status createCollection(OperationContext* opCtx,
const BSONObj& cmdObj,
const BSONObj& idIndex) {
return createCollection(opCtx,
- Command::parseNsCollectionRequired(dbName, cmdObj),
+ CommandHelpers::parseNsCollectionRequired(dbName, cmdObj),
cmdObj,
idIndex,
CollectionOptions::parseForCommand);
@@ -132,7 +132,7 @@ Status createCollectionForApplyOps(OperationContext* opCtx,
const BSONObj& idIndex) {
invariant(opCtx->lockState()->isDbLockedForMode(dbName, MODE_X));
- const NamespaceString newCollName(Command::parseNsCollectionRequired(dbName, cmdObj));
+ const NamespaceString newCollName(CommandHelpers::parseNsCollectionRequired(dbName, cmdObj));
auto newCmd = cmdObj;
auto* const serviceContext = opCtx->getServiceContext();
diff --git a/src/mongo/db/commands.cpp b/src/mongo/db/commands.cpp
index 25ee7429189..b7b67b5092d 100644
--- a/src/mongo/db/commands.cpp
+++ b/src/mongo/db/commands.cpp
@@ -73,58 +73,30 @@ const WriteConcernOptions kMajorityWriteConcern(
} // namespace
-Command::~Command() = default;
-
-BSONObj Command::appendPassthroughFields(const BSONObj& cmdObjWithPassthroughFields,
- const BSONObj& request) {
- BSONObjBuilder b;
- b.appendElements(request);
- for (const auto& elem :
- Command::filterCommandRequestForPassthrough(cmdObjWithPassthroughFields)) {
- const auto name = elem.fieldNameStringData();
- if (Command::isGenericArgument(name) && !request.hasField(name)) {
- b.append(elem);
- }
- }
- return b.obj();
-}
-
-BSONObj Command::appendMajorityWriteConcern(const BSONObj& cmdObj) {
-
- WriteConcernOptions newWC = kMajorityWriteConcern;
-
- if (cmdObj.hasField(kWriteConcernField)) {
- auto wc = cmdObj.getField(kWriteConcernField);
- // The command has a writeConcern field and it's majority, so we can
- // return it as-is.
- if (wc["w"].ok() && wc["w"].str() == "majority") {
- return cmdObj;
- }
- if (wc["wtimeout"].ok()) {
- // They set a timeout, but aren't using majority WC. We want to use their
- // timeout along with majority WC.
- newWC = WriteConcernOptions(WriteConcernOptions::kMajority,
- WriteConcernOptions::SyncMode::UNSET,
- wc["wtimeout"].Number());
- }
- }
+//////////////////////////////////////////////////////////////
+// CommandHelpers
- // Append all original fields except the writeConcern field to the new command.
- BSONObjBuilder cmdObjWithWriteConcern;
- for (const auto& elem : cmdObj) {
- const auto name = elem.fieldNameStringData();
- if (name != "writeConcern" && !cmdObjWithWriteConcern.hasField(name)) {
- cmdObjWithWriteConcern.append(elem);
- }
+BSONObj CommandHelpers::runCommandDirectly(OperationContext* opCtx, const OpMsgRequest& request) {
+ auto command = globalCommandRegistry()->findCommand(request.getCommandName());
+ invariant(command);
+ BSONObjBuilder out;
+ try {
+ bool ok = command->publicRun(opCtx, request, out);
+ appendCommandStatus(out, ok);
+ } catch (const StaleConfigException&) {
+ // These exceptions are intended to be handled at a higher level and cannot losslessly
+ // round-trip through Status.
+ throw;
+ } catch (const DBException& ex) {
+ out.resetToEmpty();
+ appendCommandStatus(out, ex.toStatus());
}
-
- // Finally, add the new write concern.
- cmdObjWithWriteConcern.append(kWriteConcernField, newWC.toBSON());
- return cmdObjWithWriteConcern.obj();
+ return out.obj();
}
-std::string Command::parseNsFullyQualified(const std::string& dbname, const BSONObj& cmdObj) {
+std::string CommandHelpers::parseNsFullyQualified(const std::string& dbname,
+ const BSONObj& cmdObj) {
BSONElement first = cmdObj.firstElement();
uassert(ErrorCodes::BadValue,
str::stream() << "collection name has invalid type " << typeName(first.type()),
@@ -136,8 +108,8 @@ std::string Command::parseNsFullyQualified(const std::string& dbname, const BSON
return nss.ns();
}
-NamespaceString Command::parseNsCollectionRequired(const std::string& dbname,
- const BSONObj& cmdObj) {
+NamespaceString CommandHelpers::parseNsCollectionRequired(const std::string& dbname,
+ const BSONObj& cmdObj) {
// Accepts both BSON String and Symbol for collection name per SERVER-16260
// TODO(kangas) remove Symbol support in MongoDB 3.0 after Ruby driver audit
BSONElement first = cmdObj.firstElement();
@@ -151,9 +123,9 @@ NamespaceString Command::parseNsCollectionRequired(const std::string& dbname,
return nss;
}
-NamespaceString Command::parseNsOrUUID(OperationContext* opCtx,
- const std::string& dbname,
- const BSONObj& cmdObj) {
+NamespaceString CommandHelpers::parseNsOrUUID(OperationContext* opCtx,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
BSONElement first = cmdObj.firstElement();
if (first.type() == BinData && first.binDataType() == BinDataType::newUUID) {
UUIDCatalog& catalog = UUIDCatalog::get(opCtx);
@@ -178,51 +150,11 @@ NamespaceString Command::parseNsOrUUID(OperationContext* opCtx,
}
}
-std::string Command::parseNs(const std::string& dbname, const BSONObj& cmdObj) const {
- BSONElement first = cmdObj.firstElement();
- if (first.type() != mongo::String)
- return dbname;
-
- return str::stream() << dbname << '.' << cmdObj.firstElement().valueStringData();
-}
-
-ResourcePattern Command::parseResourcePattern(const std::string& dbname,
- const BSONObj& cmdObj) const {
- const std::string ns = parseNs(dbname, cmdObj);
- if (!NamespaceString::validCollectionComponent(ns)) {
- return ResourcePattern::forDatabaseName(ns);
- }
- return ResourcePattern::forExactNamespace(NamespaceString(ns));
-}
-
-Command::Command(StringData name, StringData oldName)
- : _name(name.toString()),
- _commandsExecutedMetric("commands." + _name + ".total", &_commandsExecuted),
- _commandsFailedMetric("commands." + _name + ".failed", &_commandsFailed) {
- globalCommandRegistry()->registerCommand(this, name, oldName);
-}
-
-void Command::help(std::stringstream& help) const {
- help << "no help defined";
-}
-
-Status Command::explain(OperationContext* opCtx,
- const std::string& dbname,
- const BSONObj& cmdObj,
- ExplainOptions::Verbosity verbosity,
- BSONObjBuilder* out) const {
- return {ErrorCodes::IllegalOperation, str::stream() << "Cannot explain cmd: " << getName()};
-}
-
-BSONObj Command::runCommandDirectly(OperationContext* opCtx, const OpMsgRequest& request) {
- return CommandHelpers::runCommandDirectly(opCtx, request);
-}
-
-Command* Command::findCommand(StringData name) {
+Command* CommandHelpers::findCommand(StringData name) {
return globalCommandRegistry()->findCommand(name);
}
-bool Command::appendCommandStatus(BSONObjBuilder& result, const Status& status) {
+bool CommandHelpers::appendCommandStatus(BSONObjBuilder& result, const Status& status) {
appendCommandStatus(result, status.isOK(), status.reason());
BSONObj tmp = result.asTempObj();
if (!status.isOK() && !tmp.hasField("code")) {
@@ -235,7 +167,9 @@ bool Command::appendCommandStatus(BSONObjBuilder& result, const Status& status)
return status.isOK();
}
-void Command::appendCommandStatus(BSONObjBuilder& result, bool ok, const std::string& errmsg) {
+void CommandHelpers::appendCommandStatus(BSONObjBuilder& result,
+ bool ok,
+ const std::string& errmsg) {
BSONObj tmp = result.asTempObj();
bool have_ok = tmp.hasField("ok");
bool need_errmsg = !ok && !tmp.hasField("errmsg");
@@ -248,9 +182,9 @@ void Command::appendCommandStatus(BSONObjBuilder& result, bool ok, const std::st
}
}
-void Command::appendCommandWCStatus(BSONObjBuilder& result,
- const Status& awaitReplicationStatus,
- const WriteConcernResult& wcResult) {
+void CommandHelpers::appendCommandWCStatus(BSONObjBuilder& result,
+ const Status& awaitReplicationStatus,
+ const WriteConcernResult& wcResult) {
if (!awaitReplicationStatus.isOK() && !result.hasField("writeConcernError")) {
WriteConcernErrorDetail wcError;
wcError.setErrCode(awaitReplicationStatus.code());
@@ -262,6 +196,165 @@ void Command::appendCommandWCStatus(BSONObjBuilder& result,
}
}
+BSONObj CommandHelpers::appendPassthroughFields(const BSONObj& cmdObjWithPassthroughFields,
+ const BSONObj& request) {
+ BSONObjBuilder b;
+ b.appendElements(request);
+ for (const auto& elem : filterCommandRequestForPassthrough(cmdObjWithPassthroughFields)) {
+ const auto name = elem.fieldNameStringData();
+ if (isGenericArgument(name) && !request.hasField(name)) {
+ b.append(elem);
+ }
+ }
+ return b.obj();
+}
+
+BSONObj CommandHelpers::appendMajorityWriteConcern(const BSONObj& cmdObj) {
+ WriteConcernOptions newWC = kMajorityWriteConcern;
+
+ if (cmdObj.hasField(kWriteConcernField)) {
+ auto wc = cmdObj.getField(kWriteConcernField);
+ // The command has a writeConcern field and it's majority, so we can
+ // return it as-is.
+ if (wc["w"].ok() && wc["w"].str() == "majority") {
+ return cmdObj;
+ }
+
+ if (wc["wtimeout"].ok()) {
+ // They set a timeout, but aren't using majority WC. We want to use their
+ // timeout along with majority WC.
+ newWC = WriteConcernOptions(WriteConcernOptions::kMajority,
+ WriteConcernOptions::SyncMode::UNSET,
+ wc["wtimeout"].Number());
+ }
+ }
+
+ // Append all original fields except the writeConcern field to the new command.
+ BSONObjBuilder cmdObjWithWriteConcern;
+ for (const auto& elem : cmdObj) {
+ const auto name = elem.fieldNameStringData();
+ if (name != "writeConcern" && !cmdObjWithWriteConcern.hasField(name)) {
+ cmdObjWithWriteConcern.append(elem);
+ }
+ }
+
+ // Finally, add the new write concern.
+ cmdObjWithWriteConcern.append(kWriteConcernField, newWC.toBSON());
+ return cmdObjWithWriteConcern.obj();
+}
+
+namespace {
+const stdx::unordered_set<std::string> userManagementCommands{"createUser",
+ "updateUser",
+ "dropUser",
+ "dropAllUsersFromDatabase",
+ "grantRolesToUser",
+ "revokeRolesFromUser",
+ "createRole",
+ "updateRole",
+ "dropRole",
+ "dropAllRolesFromDatabase",
+ "grantPrivilegesToRole",
+ "revokePrivilegesFromRole",
+ "grantRolesToRole",
+ "revokeRolesFromRole",
+ "_mergeAuthzCollections"};
+} // namespace
+
+bool CommandHelpers::isUserManagementCommand(const std::string& name) {
+ return userManagementCommands.count(name);
+}
+
+BSONObj CommandHelpers::filterCommandRequestForPassthrough(const BSONObj& cmdObj) {
+ BSONObjBuilder bob;
+ for (auto elem : cmdObj) {
+ const auto name = elem.fieldNameStringData();
+ if (name == "$readPreference") {
+ BSONObjBuilder(bob.subobjStart("$queryOptions")).append(elem);
+ } else if (!isGenericArgument(name) || //
+ name == "$queryOptions" || //
+ name == "maxTimeMS" || //
+ name == "readConcern" || //
+ name == "writeConcern" || //
+ name == "lsid" || //
+ name == "txnNumber") {
+ // This is the whitelist of generic arguments that commands can be trusted to blindly
+ // forward to the shards.
+ bob.append(elem);
+ }
+ }
+ return bob.obj();
+}
+
+void CommandHelpers::filterCommandReplyForPassthrough(const BSONObj& cmdObj,
+ BSONObjBuilder* output) {
+ for (auto elem : cmdObj) {
+ const auto name = elem.fieldNameStringData();
+ if (name == "$configServerState" || //
+ name == "$gleStats" || //
+ name == "$clusterTime" || //
+ name == "$oplogQueryData" || //
+ name == "$replData" || //
+ name == "operationTime") {
+ continue;
+ }
+ output->append(elem);
+ }
+}
+
+BSONObj CommandHelpers::filterCommandReplyForPassthrough(const BSONObj& cmdObj) {
+ BSONObjBuilder bob;
+ filterCommandReplyForPassthrough(cmdObj, &bob);
+ return bob.obj();
+}
+
+bool CommandHelpers::isHelpRequest(const BSONElement& helpElem) {
+ return !helpElem.eoo() && helpElem.trueValue();
+}
+
+constexpr StringData CommandHelpers::kHelpFieldName;
+
+//////////////////////////////////////////////////////////////
+// Command
+
+Command::~Command() = default;
+
+std::string Command::parseNs(const std::string& dbname, const BSONObj& cmdObj) const {
+ BSONElement first = cmdObj.firstElement();
+ if (first.type() != mongo::String)
+ return dbname;
+
+ return str::stream() << dbname << '.' << cmdObj.firstElement().valueStringData();
+}
+
+ResourcePattern Command::parseResourcePattern(const std::string& dbname,
+ const BSONObj& cmdObj) const {
+ const std::string ns = parseNs(dbname, cmdObj);
+ if (!NamespaceString::validCollectionComponent(ns)) {
+ return ResourcePattern::forDatabaseName(ns);
+ }
+ return ResourcePattern::forExactNamespace(NamespaceString(ns));
+}
+
+Command::Command(StringData name, StringData oldName)
+ : _name(name.toString()),
+ _commandsExecutedMetric("commands." + _name + ".total", &_commandsExecuted),
+ _commandsFailedMetric("commands." + _name + ".failed", &_commandsFailed) {
+ globalCommandRegistry()->registerCommand(this, name, oldName);
+}
+
+void Command::help(std::stringstream& help) const {
+ help << "no help defined";
+}
+
+Status Command::explain(OperationContext* opCtx,
+ const std::string& dbname,
+ const BSONObj& cmdObj,
+ ExplainOptions::Verbosity verbosity,
+ BSONObjBuilder* out) const {
+ return {ErrorCodes::IllegalOperation, str::stream() << "Cannot explain cmd: " << getName()};
+}
+
Status BasicCommand::checkAuthForRequest(OperationContext* opCtx, const OpMsgRequest& request) {
uassertNoDocumentSequences(request);
return checkAuthForOperation(opCtx, request.getDatabase().toString(), request.body);
@@ -372,12 +465,6 @@ bool Command::publicRun(OperationContext* opCtx,
}
}
-bool Command::isHelpRequest(const BSONElement& helpElem) {
- return !helpElem.eoo() && helpElem.trueValue();
-}
-
-const char Command::kHelpFieldName[] = "help";
-
void Command::generateHelpResponse(OperationContext* opCtx,
rpc::ReplyBuilderInterface* replyBuilder,
const Command& command) {
@@ -391,28 +478,6 @@ void Command::generateHelpResponse(OperationContext* opCtx,
replyBuilder->setMetadata(rpc::makeEmptyMetadata());
}
-namespace {
-const stdx::unordered_set<std::string> userManagementCommands{"createUser",
- "updateUser",
- "dropUser",
- "dropAllUsersFromDatabase",
- "grantRolesToUser",
- "revokeRolesFromUser",
- "createRole",
- "updateRole",
- "dropRole",
- "dropAllRolesFromDatabase",
- "grantPrivilegesToRole",
- "revokePrivilegesFromRole",
- "grantRolesToRole",
- "revokeRolesFromRole",
- "_mergeAuthzCollections"};
-} // namespace
-
-bool Command::isUserManagementCommand(const std::string& name) {
- return userManagementCommands.count(name);
-}
-
void BasicCommand::uassertNoDocumentSequences(const OpMsgRequest& request) {
uassert(40472,
str::stream() << "The " << getName() << " command does not support document sequences.",
@@ -433,52 +498,13 @@ bool ErrmsgCommandDeprecated::run(OperationContext* opCtx,
std::string errmsg;
auto ok = errmsgRun(opCtx, db, cmdObj, errmsg, result);
if (!errmsg.empty()) {
- appendCommandStatus(result, ok, errmsg);
+ CommandHelpers::appendCommandStatus(result, ok, errmsg);
}
return ok;
}
-BSONObj Command::filterCommandRequestForPassthrough(const BSONObj& cmdObj) {
- BSONObjBuilder bob;
- for (auto elem : cmdObj) {
- const auto name = elem.fieldNameStringData();
- if (name == "$readPreference") {
- BSONObjBuilder(bob.subobjStart("$queryOptions")).append(elem);
- } else if (!Command::isGenericArgument(name) || //
- name == "$queryOptions" || //
- name == "maxTimeMS" || //
- name == "readConcern" || //
- name == "writeConcern" || //
- name == "lsid" || //
- name == "txnNumber") {
- // This is the whitelist of generic arguments that commands can be trusted to blindly
- // forward to the shards.
- bob.append(elem);
- }
- }
- return bob.obj();
-}
-
-void Command::filterCommandReplyForPassthrough(const BSONObj& cmdObj, BSONObjBuilder* output) {
- for (auto elem : cmdObj) {
- const auto name = elem.fieldNameStringData();
- if (name == "$configServerState" || //
- name == "$gleStats" || //
- name == "$clusterTime" || //
- name == "$oplogQueryData" || //
- name == "$replData" || //
- name == "operationTime") {
- continue;
- }
- output->append(elem);
- }
-}
-
-BSONObj Command::filterCommandReplyForPassthrough(const BSONObj& cmdObj) {
- BSONObjBuilder bob;
- filterCommandReplyForPassthrough(cmdObj, &bob);
- return bob.obj();
-}
+//////////////////////////////////////////////////////////////
+// CommandRegistry
void CommandRegistry::registerCommand(Command* command, StringData name, StringData oldName) {
for (StringData key : {name, oldName}) {
@@ -499,25 +525,6 @@ Command* CommandRegistry::findCommand(StringData name) const {
return it->second;
}
-BSONObj CommandHelpers::runCommandDirectly(OperationContext* opCtx, const OpMsgRequest& request) {
- auto command = globalCommandRegistry()->findCommand(request.getCommandName());
- invariant(command);
-
- BSONObjBuilder out;
- try {
- bool ok = command->publicRun(opCtx, request, out);
- Command::appendCommandStatus(out, ok);
- } catch (const StaleConfigException&) {
- // These exceptions are intended to be handled at a higher level and cannot losslessly
- // round-trip through Status.
- throw;
- } catch (const DBException& ex) {
- out.resetToEmpty();
- Command::appendCommandStatus(out, ex.toStatus());
- }
- return out.obj();
-}
-
CommandRegistry* globalCommandRegistry() {
static auto reg = new CommandRegistry();
return reg;
diff --git a/src/mongo/db/commands.h b/src/mongo/db/commands.h
index 37693df5455..f6c987d03b6 100644
--- a/src/mongo/db/commands.h
+++ b/src/mongo/db/commands.h
@@ -56,6 +56,132 @@ namespace mutablebson {
class Document;
} // namespace mutablebson
+class Command;
+
+// Various helpers unrelated to any single command or to the command registry.
+// Would be a namespace, but want to keep it closed rather than open.
+// Some of these may move to the BasicCommand shim if they are only for legacy implementations.
+struct CommandHelpers {
+ // The type of the first field in 'cmdObj' must be mongo::String. The first field is
+ // interpreted as a collection name.
+ static std::string parseNsFullyQualified(const std::string& dbname, const BSONObj& cmdObj);
+
+ // The type of the first field in 'cmdObj' must be mongo::String or Symbol.
+ // The first field is interpreted as a collection name.
+ static NamespaceString parseNsCollectionRequired(const std::string& dbname,
+ const BSONObj& cmdObj);
+
+ static NamespaceString parseNsOrUUID(OperationContext* opCtx,
+ const std::string& dbname,
+ const BSONObj& cmdObj);
+
+ static Command* findCommand(StringData name);
+
+ // Helper for setting errmsg and ok field in command result object.
+ static void appendCommandStatus(BSONObjBuilder& result,
+ bool ok,
+ const std::string& errmsg = {});
+ // @return s.isOK()
+ static bool appendCommandStatus(BSONObjBuilder& result, const Status& status);
+ /**
+ * Helper for setting a writeConcernError field in the command result object if
+ * a writeConcern error occurs.
+ *
+ * @param result is the BSONObjBuilder for the command response. This function creates the
+ * writeConcernError field for the response.
+ * @param awaitReplicationStatus is the status received from awaitReplication.
+ * @param wcResult is the writeConcernResult object that holds other write concern information.
+ * This is primarily used for populating errInfo when a timeout occurs, and is populated
+ * by waitForWriteConcern.
+ */
+ static void appendCommandWCStatus(BSONObjBuilder& result,
+ const Status& awaitReplicationStatus,
+ const WriteConcernResult& wcResult = WriteConcernResult());
+ /**
+ * Appends passthrough fields from a cmdObj to a given request.
+ */
+ static BSONObj appendPassthroughFields(const BSONObj& cmdObjWithPassthroughFields,
+ const BSONObj& request);
+ /**
+ * Returns a copy of 'cmdObj' with a majority writeConcern appended.
+ */
+ static BSONObj appendMajorityWriteConcern(const BSONObj& cmdObj);
+ /**
+ * Returns true if the provided argument is one that is handled by the command processing layer
+ * and should generally be ignored by individual command implementations. In particular,
+ * commands that fail on unrecognized arguments must not fail for any of these.
+ */
+ static bool isGenericArgument(StringData arg) {
+ // Not including "help" since we don't pass help requests through to the command parser.
+ // If that changes, it should be added. When you add to this list, consider whether you
+ // should also change the filterCommandRequestForPassthrough() function.
+ return arg == "$audit" || //
+ arg == "$client" || //
+ arg == "$configServerState" || //
+ arg == "$db" || //
+ arg == "allowImplicitCollectionCreation" || //
+ arg == "$oplogQueryData" || //
+ arg == "$queryOptions" || //
+ arg == "$readPreference" || //
+ arg == "$replData" || //
+ arg == "$clusterTime" || //
+ arg == "maxTimeMS" || //
+ arg == "readConcern" || //
+ arg == "shardVersion" || //
+ arg == "tracking_info" || //
+ arg == "writeConcern" || //
+ arg == "lsid" || //
+ arg == "txnNumber" || //
+ false; // These comments tell clang-format to keep this line-oriented.
+ }
+
+ /**
+ * This function checks if a command is a user management command by name.
+ */
+ static bool isUserManagementCommand(const std::string& name);
+
+ /**
+ * Rewrites cmdObj into a format safe to blindly forward to shards.
+ *
+ * This performs 2 transformations:
+ * 1) $readPreference fields are moved into a subobject called $queryOptions. This matches the
+ * "wrapped" format historically used internally by mongos. Moving off of that format will be
+ * done as SERVER-29091.
+ *
+ * 2) Filter out generic arguments that shouldn't be blindly passed to the shards. This is
+ * necessary because many mongos implementations of Command::run() just pass cmdObj through
+ * directly to the shards. However, some of the generic arguments fields are automatically
+ * appended in the egress layer. Removing them here ensures that they don't get duplicated.
+ *
+ * Ideally this function can be deleted once mongos run() implementations are more careful about
+ * what they send to the shards.
+ */
+ static BSONObj filterCommandRequestForPassthrough(const BSONObj& cmdObj);
+ static void filterCommandReplyForPassthrough(const BSONObj& reply, BSONObjBuilder* output);
+
+ /**
+ * Rewrites reply into a format safe to blindly forward from shards to clients.
+ *
+ * Ideally this function can be deleted once mongos run() implementations are more careful about
+ * what they return from the shards.
+ */
+ static BSONObj filterCommandReplyForPassthrough(const BSONObj& reply);
+
+ /**
+ * Returns true if this a request for the 'help' information associated with the command.
+ */
+ static bool isHelpRequest(const BSONElement& helpElem);
+
+ /**
+ * Runs a command directly and returns the result. Does not do any other work normally handled
+ * by command dispatch, such as checking auth, dealing with CurOp or waiting for write concern.
+ * It is illegal to call this if the command does not exist.
+ */
+ static BSONObj runCommandDirectly(OperationContext* opCtx, const OpMsgRequest& request);
+
+ static constexpr StringData kHelpFieldName = "help"_sd;
+};
+
/**
* Serves as a base for server commands. See the constructor for more details.
*/
@@ -280,19 +406,6 @@ public:
_commandsFailed.increment();
}
- // The type of the first field in 'cmdObj' must be mongo::String. The first field is
- // interpreted as a collection name.
- static std::string parseNsFullyQualified(const std::string& dbname, const BSONObj& cmdObj);
-
- // The type of the first field in 'cmdObj' must be mongo::String or Symbol.
- // The first field is interpreted as a collection name.
- static NamespaceString parseNsCollectionRequired(const std::string& dbname,
- const BSONObj& cmdObj);
- static NamespaceString parseNsOrUUID(OperationContext* opCtx,
- const std::string& dbname,
- const BSONObj& cmdObj);
-
-
/**
* Runs the command.
*
@@ -301,36 +414,24 @@ public:
bool publicRun(OperationContext* opCtx, const OpMsgRequest& request, BSONObjBuilder& result);
/**
- * Runs a command directly and returns the result. Does not do any other work normally handled
- * by command dispatch, such as checking auth, dealing with CurOp or waiting for write concern.
- * It is illegal to call this if the command does not exist.
+ * Generates a reply from the 'help' information associated with a command. The state of
+ * the passed ReplyBuilder will be in kOutputDocs after calling this method.
*/
- static BSONObj runCommandDirectly(OperationContext* txn, const OpMsgRequest& request);
-
- static Command* findCommand(StringData name);
-
- // Helper for setting errmsg and ok field in command result object.
- static void appendCommandStatus(BSONObjBuilder& result,
- bool ok,
- const std::string& errmsg = {});
-
- // @return s.isOK()
- static bool appendCommandStatus(BSONObjBuilder& result, const Status& status);
+ static void generateHelpResponse(OperationContext* opCtx,
+ rpc::ReplyBuilderInterface* replyBuilder,
+ const Command& command);
/**
- * Helper for setting a writeConcernError field in the command result object if
- * a writeConcern error occurs.
+ * Checks to see if the client executing "opCtx" is authorized to run the given command with the
+ * given parameters on the given named database.
*
- * @param result is the BSONObjBuilder for the command response. This function creates the
- * writeConcernError field for the response.
- * @param awaitReplicationStatus is the status received from awaitReplication.
- * @param wcResult is the writeConcernResult object that holds other write concern information.
- * This is primarily used for populating errInfo when a timeout occurs, and is populated
- * by waitForWriteConcern.
+ * Returns Status::OK() if the command is authorized. Most likely returns
+ * ErrorCodes::Unauthorized otherwise, but any return other than Status::OK implies not
+ * authorized.
*/
- static void appendCommandWCStatus(BSONObjBuilder& result,
- const Status& awaitReplicationStatus,
- const WriteConcernResult& wcResult = WriteConcernResult());
+ static Status checkAuthorization(Command* c,
+ OperationContext* opCtx,
+ const OpMsgRequest& request);
/**
* If true, then testing commands are available. Defaults to false.
@@ -354,107 +455,10 @@ public:
* runGlobalInitializersOrDie(argc, argv, envp);
* ...
* }
- */
- static bool testCommandsEnabled;
-
- /**
- * Returns true if this a request for the 'help' information associated with the command.
- */
- static bool isHelpRequest(const BSONElement& helpElem);
-
- static const char kHelpFieldName[];
-
- /**
- * Generates a reply from the 'help' information associated with a command. The state of
- * the passed ReplyBuilder will be in kOutputDocs after calling this method.
- */
- static void generateHelpResponse(OperationContext* opCtx,
- rpc::ReplyBuilderInterface* replyBuilder,
- const Command& command);
-
- /**
- * This function checks if a command is a user management command by name.
- */
- static bool isUserManagementCommand(const std::string& name);
-
- /**
- * Checks to see if the client executing "opCtx" is authorized to run the given command with the
- * given parameters on the given named database.
*
- * Returns Status::OK() if the command is authorized. Most likely returns
- * ErrorCodes::Unauthorized otherwise, but any return other than Status::OK implies not
- * authorized.
- */
- static Status checkAuthorization(Command* c,
- OperationContext* opCtx,
- const OpMsgRequest& request);
-
- /**
- * Appends passthrough fields from a cmdObj to a given request.
+ * Note: variable is defined in test_commands_enabled.cpp as a dependency hack.
*/
- static BSONObj appendPassthroughFields(const BSONObj& cmdObjWithPassthroughFields,
- const BSONObj& request);
-
- /**
- * Returns a copy of 'cmdObj' with a majority writeConcern appended.
- */
- static BSONObj appendMajorityWriteConcern(const BSONObj& cmdObj);
-
- /**
- * Returns true if the provided argument is one that is handled by the command processing layer
- * and should generally be ignored by individual command implementations. In particular,
- * commands that fail on unrecognized arguments must not fail for any of these.
- */
- static bool isGenericArgument(StringData arg) {
- // Not including "help" since we don't pass help requests through to the command parser.
- // If that changes, it should be added. When you add to this list, consider whether you
- // should also change the filterCommandRequestForPassthrough() function.
- return arg == "$audit" || //
- arg == "$client" || //
- arg == "$configServerState" || //
- arg == "$db" || //
- arg == "allowImplicitCollectionCreation" || //
- arg == "$oplogQueryData" || //
- arg == "$queryOptions" || //
- arg == "$readPreference" || //
- arg == "$replData" || //
- arg == "$clusterTime" || //
- arg == "maxTimeMS" || //
- arg == "readConcern" || //
- arg == "shardVersion" || //
- arg == "tracking_info" || //
- arg == "writeConcern" || //
- arg == "lsid" || //
- arg == "txnNumber" || //
- false; // These comments tell clang-format to keep this line-oriented.
- }
-
- /**
- * Rewrites cmdObj into a format safe to blindly forward to shards.
- *
- * This performs 2 transformations:
- * 1) $readPreference fields are moved into a subobject called $queryOptions. This matches the
- * "wrapped" format historically used internally by mongos. Moving off of that format will be
- * done as SERVER-29091.
- *
- * 2) Filter out generic arguments that shouldn't be blindly passed to the shards. This is
- * necessary because many mongos implementations of Command::run() just pass cmdObj through
- * directly to the shards. However, some of the generic arguments fields are automatically
- * appended in the egress layer. Removing them here ensures that they don't get duplicated.
- *
- * Ideally this function can be deleted once mongos run() implementations are more careful about
- * what they send to the shards.
- */
- static BSONObj filterCommandRequestForPassthrough(const BSONObj& cmdObj);
-
- /**
- * Rewrites reply into a format safe to blindly forward from shards to clients.
- *
- * Ideally this function can be deleted once mongos run() implementations are more careful about
- * what they return from the shards.
- */
- static void filterCommandReplyForPassthrough(const BSONObj& reply, BSONObjBuilder* output);
- static BSONObj filterCommandReplyForPassthrough(const BSONObj& reply);
+ static bool testCommandsEnabled;
private:
/**
@@ -575,11 +579,6 @@ class ErrmsgCommandDeprecated : public BasicCommand {
BSONObjBuilder& result) = 0;
};
-// Struct as closed namespace. Nothing but statics.
-struct CommandHelpers {
- static BSONObj runCommandDirectly(OperationContext* opCtx, const OpMsgRequest& request);
-};
-
// See the 'globalCommandRegistry()' singleton accessor.
class CommandRegistry {
public:
diff --git a/src/mongo/db/commands/apply_ops_cmd.cpp b/src/mongo/db/commands/apply_ops_cmd.cpp
index 5822cf9b61b..344626f11ab 100644
--- a/src/mongo/db/commands/apply_ops_cmd.cpp
+++ b/src/mongo/db/commands/apply_ops_cmd.cpp
@@ -234,7 +234,7 @@ public:
auto status = OplogApplicationChecks::checkOperationArray(cmdObj.firstElement());
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
// TODO (SERVER-30217): When a write concern is provided to the applyOps command, we
@@ -259,7 +259,7 @@ public:
auto modeSW = repl::OplogApplication::parseMode(oplogApplicationModeString);
if (!modeSW.isOK()) {
// Unable to parse the mode argument.
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
modeSW.getStatus().withContext(str::stream() << "Could not parse " +
ApplyOps::kOplogApplicationModeFieldName));
@@ -267,16 +267,16 @@ public:
oplogApplicationMode = modeSW.getValue();
} else if (status != ErrorCodes::NoSuchKey) {
// NoSuchKey means the user did not supply a mode.
- return appendCommandStatus(result,
- Status(status.code(),
- str::stream()
- << "Could not parse out "
- << ApplyOps::kOplogApplicationModeFieldName
- << ": "
- << status.reason()));
+ return CommandHelpers::appendCommandStatus(
+ result,
+ Status(status.code(),
+ str::stream() << "Could not parse out "
+ << ApplyOps::kOplogApplicationModeFieldName
+ << ": "
+ << status.reason()));
}
- auto applyOpsStatus = appendCommandStatus(
+ auto applyOpsStatus = CommandHelpers::appendCommandStatus(
result, applyOps(opCtx, dbname, cmdObj, oplogApplicationMode, &result));
return applyOpsStatus;
diff --git a/src/mongo/db/commands/authentication_commands.cpp b/src/mongo/db/commands/authentication_commands.cpp
index 244244c3a3f..690f75ee89f 100644
--- a/src/mongo/db/commands/authentication_commands.cpp
+++ b/src/mongo/db/commands/authentication_commands.cpp
@@ -147,7 +147,8 @@ bool CmdAuthenticate::run(OperationContext* opCtx,
}
std::string mechanism = cmdObj.getStringField("mechanism");
if (mechanism.empty()) {
- appendCommandStatus(result, {ErrorCodes::BadValue, "Auth mechanism not specified"});
+ CommandHelpers::appendCommandStatus(result,
+ {ErrorCodes::BadValue, "Auth mechanism not specified"});
return false;
}
UserName user;
@@ -178,9 +179,10 @@ bool CmdAuthenticate::run(OperationContext* opCtx,
if (status.code() == ErrorCodes::AuthenticationFailed) {
// Statuses with code AuthenticationFailed may contain messages we do not wish to
// reveal to the user, so we return a status with the message "auth failed".
- appendCommandStatus(result, Status(ErrorCodes::AuthenticationFailed, "auth failed"));
+ CommandHelpers::appendCommandStatus(
+ result, Status(ErrorCodes::AuthenticationFailed, "auth failed"));
} else {
- appendCommandStatus(result, status);
+ CommandHelpers::appendCommandStatus(result, status);
}
sleepmillis(saslGlobalParams.authFailedDelay.load());
return false;
diff --git a/src/mongo/db/commands/clone.cpp b/src/mongo/db/commands/clone.cpp
index f219a374778..8ee47ce26be 100644
--- a/src/mongo/db/commands/clone.cpp
+++ b/src/mongo/db/commands/clone.cpp
@@ -128,7 +128,7 @@ public:
result.append("clonedColls", barr.arr());
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
} cmdClone;
diff --git a/src/mongo/db/commands/clone_collection.cpp b/src/mongo/db/commands/clone_collection.cpp
index 5746348dfc1..f152b97213d 100644
--- a/src/mongo/db/commands/clone_collection.cpp
+++ b/src/mongo/db/commands/clone_collection.cpp
@@ -73,7 +73,7 @@ public:
}
virtual std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const {
- return parseNsFullyQualified(dbname, cmdObj);
+ return CommandHelpers::parseNsFullyQualified(dbname, cmdObj);
}
virtual Status checkAuthForCommand(Client* client,
@@ -129,7 +129,7 @@ public:
string collection = parseNs(dbname, cmdObj);
Status allowedWriteStatus = userAllowedWriteNS(dbname, collection);
if (!allowedWriteStatus.isOK()) {
- return appendCommandStatus(result, allowedWriteStatus);
+ return CommandHelpers::appendCommandStatus(result, allowedWriteStatus);
}
BSONObj query = cmdObj.getObjectField("query");
diff --git a/src/mongo/db/commands/collection_to_capped.cpp b/src/mongo/db/commands/collection_to_capped.cpp
index 55272f941a0..5f283fa2195 100644
--- a/src/mongo/db/commands/collection_to_capped.cpp
+++ b/src/mongo/db/commands/collection_to_capped.cpp
@@ -121,7 +121,7 @@ public:
NamespaceString nss(dbname, to);
if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, nss)) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::NotMaster,
str::stream() << "Not primary while cloning collection " << from << " to "
@@ -131,7 +131,7 @@ public:
Database* const db = autoDb.getDb();
if (!db) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::NamespaceNotFound,
str::stream() << "database " << dbname << " not found"));
@@ -139,7 +139,7 @@ public:
Status status =
cloneCollectionAsCapped(opCtx, db, from.toString(), to.toString(), size, temp);
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
} cmdCloneCollectionAsCapped;
@@ -173,7 +173,7 @@ public:
const BSONObj& jsobj,
string& errmsg,
BSONObjBuilder& result) {
- const NamespaceString nss(parseNsCollectionRequired(dbname, jsobj));
+ const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbname, jsobj));
double size = jsobj.getField("size").number();
if (size == 0) {
@@ -181,7 +181,7 @@ public:
return false;
}
- return appendCommandStatus(result, convertToCapped(opCtx, nss, size));
+ return CommandHelpers::appendCommandStatus(result, convertToCapped(opCtx, nss, size));
}
} cmdConvertToCapped;
diff --git a/src/mongo/db/commands/compact.cpp b/src/mongo/db/commands/compact.cpp
index 780790bb994..a4371eb129d 100644
--- a/src/mongo/db/commands/compact.cpp
+++ b/src/mongo/db/commands/compact.cpp
@@ -91,7 +91,7 @@ public:
const BSONObj& cmdObj,
string& errmsg,
BSONObjBuilder& result) {
- NamespaceString nss = parseNsCollectionRequired(db, cmdObj);
+ NamespaceString nss = CommandHelpers::parseNsCollectionRequired(db, cmdObj);
repl::ReplicationCoordinator* replCoord = repl::getGlobalReplicationCoordinator();
if (replCoord->getMemberState().primary() && !cmdObj["force"].trueValue()) {
@@ -153,10 +153,10 @@ public:
// If db/collection does not exist, short circuit and return.
if (!collDB || !collection) {
if (view)
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result, {ErrorCodes::CommandNotSupportedOnView, "can't compact a view"});
else
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result, {ErrorCodes::NamespaceNotFound, "collection does not exist"});
}
@@ -167,7 +167,7 @@ public:
StatusWith<CompactStats> status = collection->compact(opCtx, &compactOptions);
if (!status.isOK())
- return appendCommandStatus(result, status.getStatus());
+ return CommandHelpers::appendCommandStatus(result, status.getStatus());
if (status.getValue().corruptDocuments > 0)
result.append("invalidObjects", status.getValue().corruptDocuments);
diff --git a/src/mongo/db/commands/connection_status.cpp b/src/mongo/db/commands/connection_status.cpp
index b8ea2e101fd..2a9c97f453f 100644
--- a/src/mongo/db/commands/connection_status.cpp
+++ b/src/mongo/db/commands/connection_status.cpp
@@ -65,7 +65,7 @@ public:
Status status =
bsonExtractBooleanFieldWithDefault(cmdObj, "showPrivileges", false, &showPrivileges);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
BSONObjBuilder authInfo(result.subobjStart("authInfo"));
diff --git a/src/mongo/db/commands/copydb.cpp b/src/mongo/db/commands/copydb.cpp
index ad6faa61308..4f3c00fdab0 100644
--- a/src/mongo/db/commands/copydb.cpp
+++ b/src/mongo/db/commands/copydb.cpp
@@ -183,7 +183,7 @@ public:
}
if (!ret["done"].Bool()) {
- filterCommandReplyForPassthrough(ret, &result);
+ CommandHelpers::filterCommandReplyForPassthrough(ret, &result);
return true;
}
diff --git a/src/mongo/db/commands/copydb_start_commands.cpp b/src/mongo/db/commands/copydb_start_commands.cpp
index 03bd2f3b831..3d1cd9786d2 100644
--- a/src/mongo/db/commands/copydb_start_commands.cpp
+++ b/src/mongo/db/commands/copydb_start_commands.cpp
@@ -127,7 +127,7 @@ public:
BSONElement mechanismElement;
Status status = bsonExtractField(cmdObj, saslCommandMechanismFieldName, &mechanismElement);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
BSONElement payloadElement;
@@ -147,10 +147,10 @@ public:
if (!authConn->runCommand(
fromDb, BSON("saslStart" << 1 << mechanismElement << payloadElement), ret)) {
authConn.reset();
- return appendCommandStatus(result, getStatusFromCommandResult(ret));
+ return CommandHelpers::appendCommandStatus(result, getStatusFromCommandResult(ret));
}
- filterCommandReplyForPassthrough(ret, &result);
+ CommandHelpers::filterCommandReplyForPassthrough(ret, &result);
return true;
}
diff --git a/src/mongo/db/commands/count_cmd.cpp b/src/mongo/db/commands/count_cmd.cpp
index d70c9ce6c80..6cefbbccf9f 100644
--- a/src/mongo/db/commands/count_cmd.cpp
+++ b/src/mongo/db/commands/count_cmd.cpp
@@ -101,7 +101,7 @@ public:
return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
- const NamespaceString nss(parseNsOrUUID(opCtx, dbname, cmdObj));
+ const NamespaceString nss(CommandHelpers::parseNsOrUUID(opCtx, dbname, cmdObj));
if (!authSession->isAuthorizedForActionsOnNamespace(nss, ActionType::find)) {
return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
@@ -116,7 +116,7 @@ public:
BSONObjBuilder* out) const {
const bool isExplain = true;
Lock::DBLock dbLock(opCtx, dbname, MODE_IS);
- auto nss = parseNsOrUUID(opCtx, dbname, cmdObj);
+ auto nss = CommandHelpers::parseNsOrUUID(opCtx, dbname, cmdObj);
auto request = CountRequest::parseFromBSON(nss, cmdObj, isExplain);
if (!request.isOK()) {
return request.getStatus();
@@ -174,10 +174,10 @@ public:
BSONObjBuilder& result) {
const bool isExplain = false;
Lock::DBLock dbLock(opCtx, dbname, MODE_IS);
- auto nss = parseNsOrUUID(opCtx, dbname, cmdObj);
+ auto nss = CommandHelpers::parseNsOrUUID(opCtx, dbname, cmdObj);
auto request = CountRequest::parseFromBSON(nss, cmdObj, isExplain);
if (!request.isOK()) {
- return appendCommandStatus(result, request.getStatus());
+ return CommandHelpers::appendCommandStatus(result, request.getStatus());
}
AutoGetCollectionOrViewForReadCommand ctx(
@@ -189,10 +189,10 @@ public:
auto viewAggregation = request.getValue().asAggregationCommand();
if (!viewAggregation.isOK()) {
- return appendCommandStatus(result, viewAggregation.getStatus());
+ return CommandHelpers::appendCommandStatus(result, viewAggregation.getStatus());
}
- BSONObj aggResult = Command::runCommandDirectly(
+ BSONObj aggResult = CommandHelpers::runCommandDirectly(
opCtx, OpMsgRequest::fromDBAndBody(dbname, std::move(viewAggregation.getValue())));
if (ResolvedView::isResolvedViewErrorResponse(aggResult)) {
@@ -203,7 +203,7 @@ public:
ViewResponseFormatter formatter(aggResult);
Status formatStatus = formatter.appendAsCountResponse(&result);
if (!formatStatus.isOK()) {
- return appendCommandStatus(result, formatStatus);
+ return CommandHelpers::appendCommandStatus(result, formatStatus);
}
return true;
}
@@ -219,7 +219,7 @@ public:
false, // !explain
PlanExecutor::YIELD_AUTO);
if (!statusWithPlanExecutor.isOK()) {
- return appendCommandStatus(result, statusWithPlanExecutor.getStatus());
+ return CommandHelpers::appendCommandStatus(result, statusWithPlanExecutor.getStatus());
}
auto exec = std::move(statusWithPlanExecutor.getValue());
@@ -233,7 +233,7 @@ public:
Status execPlanStatus = exec->executePlan();
if (!execPlanStatus.isOK()) {
- return appendCommandStatus(result, execPlanStatus);
+ return CommandHelpers::appendCommandStatus(result, execPlanStatus);
}
PlanSummaryStats summaryStats;
diff --git a/src/mongo/db/commands/create_indexes.cpp b/src/mongo/db/commands/create_indexes.cpp
index 52c3d08fc1e..79c38e4424b 100644
--- a/src/mongo/db/commands/create_indexes.cpp
+++ b/src/mongo/db/commands/create_indexes.cpp
@@ -129,7 +129,7 @@ StatusWith<std::vector<BSONObj>> parseAndValidateIndexSpecs(
hasIndexesField = true;
} else if (kCommandName == cmdElemFieldName ||
- Command::isGenericArgument(cmdElemFieldName)) {
+ CommandHelpers::isGenericArgument(cmdElemFieldName)) {
continue;
} else {
return {ErrorCodes::BadValue,
@@ -236,11 +236,11 @@ public:
const BSONObj& cmdObj,
string& errmsg,
BSONObjBuilder& result) {
- const NamespaceString ns(parseNsCollectionRequired(dbname, cmdObj));
+ const NamespaceString ns(CommandHelpers::parseNsCollectionRequired(dbname, cmdObj));
Status status = userAllowedWriteNS(ns);
if (!status.isOK())
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
// Disallow users from creating new indexes on config.transactions since the sessions
// code was optimized to not update indexes.
@@ -251,7 +251,7 @@ public:
auto specsWithStatus =
parseAndValidateIndexSpecs(opCtx, ns, cmdObj, serverGlobalParams.featureCompatibility);
if (!specsWithStatus.isOK()) {
- return appendCommandStatus(result, specsWithStatus.getStatus());
+ return CommandHelpers::appendCommandStatus(result, specsWithStatus.getStatus());
}
auto specs = std::move(specsWithStatus.getValue());
@@ -259,7 +259,7 @@ public:
// Note: createIndexes command does not currently respect shard versioning.
Lock::DBLock dbLock(opCtx, ns.db(), MODE_X);
if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, ns)) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::NotMaster,
str::stream() << "Not primary while creating indexes in " << ns.ns()));
@@ -276,7 +276,8 @@ public:
} else {
if (db->getViewCatalog()->lookup(opCtx, ns.ns())) {
errmsg = "Cannot create indexes on a view";
- return appendCommandStatus(result, {ErrorCodes::CommandNotSupportedOnView, errmsg});
+ return CommandHelpers::appendCommandStatus(
+ result, {ErrorCodes::CommandNotSupportedOnView, errmsg});
}
writeConflictRetry(opCtx, kCommandName, ns.ns(), [&] {
@@ -291,7 +292,7 @@ public:
auto indexSpecsWithDefaults =
resolveCollectionDefaultProperties(opCtx, collection, std::move(specs));
if (!indexSpecsWithDefaults.isOK()) {
- return appendCommandStatus(result, indexSpecsWithDefaults.getStatus());
+ return CommandHelpers::appendCommandStatus(result, indexSpecsWithDefaults.getStatus());
}
specs = std::move(indexSpecsWithDefaults.getValue());
@@ -321,7 +322,7 @@ public:
status = checkUniqueIndexConstraints(opCtx, ns.ns(), spec["key"].Obj());
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
}
}
@@ -337,7 +338,7 @@ public:
opCtx->recoveryUnit()->abandonSnapshot();
dbLock.relockWithMode(MODE_IX);
if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, ns)) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::NotMaster,
str::stream() << "Not primary while creating background indexes in "
@@ -359,7 +360,7 @@ public:
opCtx->recoveryUnit()->abandonSnapshot();
dbLock.relockWithMode(MODE_X);
if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, ns)) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::NotMaster,
str::stream()
diff --git a/src/mongo/db/commands/current_op.cpp b/src/mongo/db/commands/current_op.cpp
index 838f01c44f0..c742e9fba31 100644
--- a/src/mongo/db/commands/current_op.cpp
+++ b/src/mongo/db/commands/current_op.cpp
@@ -76,7 +76,7 @@ public:
return status;
}
- appendCommandStatus(responseBuilder, Status::OK());
+ CommandHelpers::appendCommandStatus(responseBuilder, Status::OK());
return CursorResponse::parseFromBSON(responseBuilder.obj());
}
diff --git a/src/mongo/db/commands/current_op_common.cpp b/src/mongo/db/commands/current_op_common.cpp
index 2e2a1ec799d..7f85aa2952d 100644
--- a/src/mongo/db/commands/current_op_common.cpp
+++ b/src/mongo/db/commands/current_op_common.cpp
@@ -66,7 +66,7 @@ bool CurrentOpCommandBase::run(OperationContext* opCtx,
const auto fieldName = elt.fieldNameStringData();
if (0 == idx++ || fieldName == "$all" || fieldName == "$ownOps" ||
- Command::isGenericArgument(fieldName)) {
+ CommandHelpers::isGenericArgument(fieldName)) {
continue;
}
@@ -111,7 +111,7 @@ bool CurrentOpCommandBase::run(OperationContext* opCtx,
// Make any final custom additions to the response object.
appendToResponse(&result);
- return appendCommandStatus(result, Status::OK());
+ return CommandHelpers::appendCommandStatus(result, Status::OK());
}
} // namespace mongo
diff --git a/src/mongo/db/commands/dbcheck.cpp b/src/mongo/db/commands/dbcheck.cpp
index 9adf39f6b5e..8642c381f1e 100644
--- a/src/mongo/db/commands/dbcheck.cpp
+++ b/src/mongo/db/commands/dbcheck.cpp
@@ -155,7 +155,7 @@ std::unique_ptr<DbCheckRun> getRun(OperationContext* opCtx,
// Get rid of generic command fields.
for (const auto& elem : obj) {
- if (!Command::isGenericArgument(elem.fieldNameStringData())) {
+ if (!CommandHelpers::isGenericArgument(elem.fieldNameStringData())) {
builder.append(elem);
}
}
diff --git a/src/mongo/db/commands/dbcommands.cpp b/src/mongo/db/commands/dbcommands.cpp
index 693bb58d5a0..c71161bcb2b 100644
--- a/src/mongo/db/commands/dbcommands.cpp
+++ b/src/mongo/db/commands/dbcommands.cpp
@@ -132,7 +132,7 @@ public:
Status status = repl::getGlobalReplicationCoordinator()->stepDown(
opCtx, force, Seconds(timeoutSecs), Seconds(120));
if (!status.isOK() && status.code() != ErrorCodes::NotMaster) { // ignore not master
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
// Never returns
@@ -173,16 +173,17 @@ public:
// disallow dropping the config database
if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer &&
(dbname == NamespaceString::kConfigDb)) {
- return appendCommandStatus(result,
- Status(ErrorCodes::IllegalOperation,
- "Cannot drop 'config' database if mongod started "
- "with --configsvr"));
+ return CommandHelpers::appendCommandStatus(
+ result,
+ Status(ErrorCodes::IllegalOperation,
+ "Cannot drop 'config' database if mongod started "
+ "with --configsvr"));
}
if ((repl::getGlobalReplicationCoordinator()->getReplicationMode() !=
repl::ReplicationCoordinator::modeNone) &&
((dbname == NamespaceString::kLocalDb) || (dbname == NamespaceString::kAdminDb))) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::IllegalOperation,
str::stream() << "Cannot drop '" << dbname
@@ -191,18 +192,18 @@ public:
BSONElement e = cmdObj.firstElement();
int p = (int)e.number();
if (p != 1) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result, Status(ErrorCodes::IllegalOperation, "have to pass 1 as db parameter"));
}
Status status = dropDatabase(opCtx, dbname);
if (status == ErrorCodes::NamespaceNotFound) {
- return appendCommandStatus(result, Status::OK());
+ return CommandHelpers::appendCommandStatus(result, Status::OK());
}
if (status.isOK()) {
result.append("dropped", dbname);
}
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
} cmdDropDatabase;
@@ -284,7 +285,7 @@ public:
// Open database before returning
dbHolder().openDb(opCtx, dbname);
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
} cmdRepairDatabase;
@@ -422,7 +423,7 @@ public:
const BSONObj& cmdObj,
string& errmsg,
BSONObjBuilder& result) {
- const NamespaceString nsToDrop = parseNsCollectionRequired(dbname, cmdObj);
+ const NamespaceString nsToDrop = CommandHelpers::parseNsCollectionRequired(dbname, cmdObj);
if (NamespaceString::virtualized(nsToDrop.ns())) {
errmsg = "can't drop a virtual collection";
@@ -436,7 +437,7 @@ public:
return false;
}
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
dropCollection(opCtx,
nsToDrop,
@@ -478,7 +479,7 @@ public:
const string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder& result) {
- const NamespaceString ns(parseNsCollectionRequired(dbname, cmdObj));
+ const NamespaceString ns(CommandHelpers::parseNsCollectionRequired(dbname, cmdObj));
if (cmdObj.hasField("autoIndexId")) {
const char* deprecationWarning =
@@ -490,21 +491,21 @@ public:
// Validate _id index spec and fill in missing fields.
if (auto idIndexElem = cmdObj["idIndex"]) {
if (cmdObj["viewOn"]) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
{ErrorCodes::InvalidOptions,
str::stream() << "'idIndex' is not allowed with 'viewOn': " << idIndexElem});
}
if (cmdObj["autoIndexId"]) {
- return appendCommandStatus(result,
- {ErrorCodes::InvalidOptions,
- str::stream()
- << "'idIndex' is not allowed with 'autoIndexId': "
- << idIndexElem});
+ return CommandHelpers::appendCommandStatus(
+ result,
+ {ErrorCodes::InvalidOptions,
+ str::stream() << "'idIndex' is not allowed with 'autoIndexId': "
+ << idIndexElem});
}
if (idIndexElem.type() != BSONType::Object) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
{ErrorCodes::TypeMismatch,
str::stream() << "'idIndex' has to be a document: " << idIndexElem});
@@ -521,7 +522,7 @@ public:
std::unique_ptr<CollatorInterface> defaultCollator;
if (auto collationElem = cmdObj["collation"]) {
if (collationElem.type() != BSONType::Object) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
{ErrorCodes::TypeMismatch,
str::stream() << "'collation' has to be a document: " << collationElem});
@@ -529,7 +530,7 @@ public:
auto collatorStatus = CollatorFactoryInterface::get(opCtx->getServiceContext())
->makeFromBSON(collationElem.Obj());
if (!collatorStatus.isOK()) {
- return appendCommandStatus(result, collatorStatus.getStatus());
+ return CommandHelpers::appendCommandStatus(result, collatorStatus.getStatus());
}
defaultCollator = std::move(collatorStatus.getValue());
}
@@ -545,7 +546,7 @@ public:
idIndexCollator = std::move(collatorStatus.getValue());
}
if (!CollatorInterface::collatorsMatch(defaultCollator.get(), idIndexCollator.get())) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
{ErrorCodes::BadValue,
"'idIndex' must have the same collation as the collection."});
@@ -554,12 +555,13 @@ public:
// Remove "idIndex" field from command.
auto resolvedCmdObj = cmdObj.removeField("idIndex");
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result, createCollection(opCtx, dbname, resolvedCmdObj, idIndexSpec));
}
BSONObj idIndexSpec;
- return appendCommandStatus(result, createCollection(opCtx, dbname, cmdObj, idIndexSpec));
+ return CommandHelpers::appendCommandStatus(
+ result, createCollection(opCtx, dbname, cmdObj, idIndexSpec));
}
} cmdCreate;
@@ -701,11 +703,11 @@ public:
}
if (PlanExecutor::DEAD == state || PlanExecutor::FAILURE == state) {
- return appendCommandStatus(result,
- Status(ErrorCodes::OperationFailed,
- str::stream()
- << "Executor error during filemd5 command: "
- << WorkingSetCommon::toStatusString(obj)));
+ return CommandHelpers::appendCommandStatus(
+ result,
+ Status(ErrorCodes::OperationFailed,
+ str::stream() << "Executor error during filemd5 command: "
+ << WorkingSetCommon::toStatusString(obj)));
}
if (partialOk)
@@ -739,7 +741,7 @@ public:
class CmdDatasize : public ErrmsgCommandDeprecated {
virtual string parseNs(const string& dbname, const BSONObj& cmdObj) const {
- return parseNsFullyQualified(dbname, cmdObj);
+ return CommandHelpers::parseNsFullyQualified(dbname, cmdObj);
}
public:
@@ -870,7 +872,7 @@ public:
if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) {
warning() << "Internal error while reading " << ns;
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::OperationFailed,
str::stream() << "Executor error while reading during dataSize command: "
@@ -920,7 +922,7 @@ public:
const BSONObj& jsobj,
string& errmsg,
BSONObjBuilder& result) {
- const NamespaceString nss(parseNsCollectionRequired(dbname, jsobj));
+ const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbname, jsobj));
if (nss.coll().empty()) {
errmsg = "No collection name specified";
@@ -967,8 +969,8 @@ public:
const string& dbname,
const BSONObj& jsobj,
BSONObjBuilder& result) {
- const NamespaceString nss(parseNsCollectionRequired(dbname, jsobj));
- return appendCommandStatus(result, collMod(opCtx, nss, jsobj, &result));
+ const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbname, jsobj));
+ return CommandHelpers::appendCommandStatus(result, collMod(opCtx, nss, jsobj, &result));
}
} collectionModCommand;
diff --git a/src/mongo/db/commands/distinct.cpp b/src/mongo/db/commands/distinct.cpp
index 96f08dc80ed..a17c3bd3a23 100644
--- a/src/mongo/db/commands/distinct.cpp
+++ b/src/mongo/db/commands/distinct.cpp
@@ -116,7 +116,7 @@ public:
const BSONObj& cmdObj,
ExplainOptions::Verbosity verbosity,
BSONObjBuilder* out) const {
- const NamespaceString nss(parseNsCollectionRequired(dbname, cmdObj));
+ const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbname, cmdObj));
const ExtensionsCallbackReal extensionsCallback(opCtx, &nss);
auto parsedDistinct = ParsedDistinct::parse(opCtx, nss, cmdObj, extensionsCallback, true);
@@ -159,12 +159,12 @@ public:
const string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder& result) {
- const NamespaceString nss(parseNsCollectionRequired(dbname, cmdObj));
+ const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbname, cmdObj));
const ExtensionsCallbackReal extensionsCallback(opCtx, &nss);
auto parsedDistinct = ParsedDistinct::parse(opCtx, nss, cmdObj, extensionsCallback, false);
if (!parsedDistinct.isOK()) {
- return appendCommandStatus(result, parsedDistinct.getStatus());
+ return CommandHelpers::appendCommandStatus(result, parsedDistinct.getStatus());
}
AutoGetCollectionOrViewForReadCommand ctx(opCtx, nss);
@@ -175,10 +175,10 @@ public:
auto viewAggregation = parsedDistinct.getValue().asAggregationCommand();
if (!viewAggregation.isOK()) {
- return appendCommandStatus(result, viewAggregation.getStatus());
+ return CommandHelpers::appendCommandStatus(result, viewAggregation.getStatus());
}
- BSONObj aggResult = Command::runCommandDirectly(
+ BSONObj aggResult = CommandHelpers::runCommandDirectly(
opCtx, OpMsgRequest::fromDBAndBody(dbname, std::move(viewAggregation.getValue())));
if (ResolvedView::isResolvedViewErrorResponse(aggResult)) {
@@ -189,7 +189,7 @@ public:
ViewResponseFormatter formatter(aggResult);
Status formatStatus = formatter.appendAsDistinctResponse(&result);
if (!formatStatus.isOK()) {
- return appendCommandStatus(result, formatStatus);
+ return CommandHelpers::appendCommandStatus(result, formatStatus);
}
return true;
}
@@ -197,7 +197,7 @@ public:
auto executor = getExecutorDistinct(
opCtx, collection, nss.ns(), &parsedDistinct.getValue(), PlanExecutor::YIELD_AUTO);
if (!executor.isOK()) {
- return appendCommandStatus(result, executor.getStatus());
+ return CommandHelpers::appendCommandStatus(result, executor.getStatus());
}
{
@@ -249,11 +249,11 @@ public:
<< redact(PlanExecutor::statestr(state))
<< ", stats: " << redact(Explain::getWinningPlanStats(executor.getValue().get()));
- return appendCommandStatus(result,
- Status(ErrorCodes::OperationFailed,
- str::stream()
- << "Executor error during distinct command: "
- << WorkingSetCommon::toStatusString(obj)));
+ return CommandHelpers::appendCommandStatus(
+ result,
+ Status(ErrorCodes::OperationFailed,
+ str::stream() << "Executor error during distinct command: "
+ << WorkingSetCommon::toStatusString(obj)));
}
diff --git a/src/mongo/db/commands/do_txn_cmd.cpp b/src/mongo/db/commands/do_txn_cmd.cpp
index 58965f0283f..f695470f034 100644
--- a/src/mongo/db/commands/do_txn_cmd.cpp
+++ b/src/mongo/db/commands/do_txn_cmd.cpp
@@ -163,7 +163,7 @@ public:
auto status = OplogApplicationChecks::checkOperationArray(cmdObj.firstElement());
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
// TODO (SERVER-30217): When a write concern is provided to the doTxn command, we
@@ -176,7 +176,8 @@ public:
// was acknowledged. To fix this, we should wait for replication of the node’s last applied
// OpTime if the last write operation was a no-op write.
- auto doTxnStatus = appendCommandStatus(result, doTxn(opCtx, dbname, cmdObj, &result));
+ auto doTxnStatus =
+ CommandHelpers::appendCommandStatus(result, doTxn(opCtx, dbname, cmdObj, &result));
return doTxnStatus;
}
diff --git a/src/mongo/db/commands/drop_indexes.cpp b/src/mongo/db/commands/drop_indexes.cpp
index 7f73d58557a..14a7a5d91a2 100644
--- a/src/mongo/db/commands/drop_indexes.cpp
+++ b/src/mongo/db/commands/drop_indexes.cpp
@@ -88,8 +88,8 @@ public:
const string& dbname,
const BSONObj& jsobj,
BSONObjBuilder& result) {
- const NamespaceString nss = parseNsCollectionRequired(dbname, jsobj);
- return appendCommandStatus(result, dropIndexes(opCtx, nss, jsobj, &result));
+ const NamespaceString nss = CommandHelpers::parseNsCollectionRequired(dbname, jsobj);
+ return CommandHelpers::appendCommandStatus(result, dropIndexes(opCtx, nss, jsobj, &result));
}
} cmdDropIndexes;
@@ -121,7 +121,8 @@ public:
BSONObjBuilder& result) {
DBDirectClient db(opCtx);
- const NamespaceString toReIndexNs = parseNsCollectionRequired(dbname, jsobj);
+ const NamespaceString toReIndexNs =
+ CommandHelpers::parseNsCollectionRequired(dbname, jsobj);
LOG(0) << "CMD: reIndex " << toReIndexNs;
@@ -131,10 +132,10 @@ public:
Collection* collection = ctx.db()->getCollection(opCtx, toReIndexNs);
if (!collection) {
if (ctx.db()->getViewCatalog()->lookup(opCtx, toReIndexNs.ns()))
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result, {ErrorCodes::CommandNotSupportedOnView, "can't re-index a view"});
else
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result, {ErrorCodes::NamespaceNotFound, "collection does not exist"});
}
@@ -198,12 +199,12 @@ public:
auto indexInfoObjs = indexer.init(all);
if (!indexInfoObjs.isOK()) {
- return appendCommandStatus(result, indexInfoObjs.getStatus());
+ return CommandHelpers::appendCommandStatus(result, indexInfoObjs.getStatus());
}
auto status = indexer.insertAllDocumentsInCollection();
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
{
diff --git a/src/mongo/db/commands/end_sessions_command.cpp b/src/mongo/db/commands/end_sessions_command.cpp
index fdaf5f37187..8f6bb1ded29 100644
--- a/src/mongo/db/commands/end_sessions_command.cpp
+++ b/src/mongo/db/commands/end_sessions_command.cpp
@@ -84,7 +84,8 @@ public:
if (serverGlobalParams.featureCompatibility.getVersion() !=
ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo36) {
- return appendCommandStatus(result, SessionsCommandFCV34Status(getName()));
+ return CommandHelpers::appendCommandStatus(result,
+ SessionsCommandFCV34Status(getName()));
}
auto lsCache = LogicalSessionCache::get(opCtx);
diff --git a/src/mongo/db/commands/explain_cmd.cpp b/src/mongo/db/commands/explain_cmd.cpp
index d0a218a2e1a..a2adaef7d38 100644
--- a/src/mongo/db/commands/explain_cmd.cpp
+++ b/src/mongo/db/commands/explain_cmd.cpp
@@ -88,7 +88,7 @@ public:
Object == cmdObj.firstElement().type());
auto explainObj = cmdObj.firstElement().Obj();
- Command* commToExplain = Command::findCommand(explainObj.firstElementFieldName());
+ Command* commToExplain = CommandHelpers::findCommand(explainObj.firstElementFieldName());
uassert(ErrorCodes::CommandNotFound,
str::stream() << "explain failed due to unknown command: "
<< explainObj.firstElementFieldName(),
@@ -110,7 +110,7 @@ public:
BSONObj explainObj = cmdObj.firstElement().Obj();
- Command* commToExplain = Command::findCommand(explainObj.firstElementFieldName());
+ Command* commToExplain = CommandHelpers::findCommand(explainObj.firstElementFieldName());
if (NULL == commToExplain) {
mongoutils::str::stream ss;
ss << "unknown command: " << explainObj.firstElementFieldName();
@@ -127,7 +127,7 @@ public:
BSONObjBuilder& result) {
auto verbosity = ExplainOptions::parseCmdBSON(cmdObj);
if (!verbosity.isOK()) {
- return appendCommandStatus(result, verbosity.getStatus());
+ return CommandHelpers::appendCommandStatus(result, verbosity.getStatus());
}
// This is the nested command which we are explaining.
@@ -141,12 +141,12 @@ public:
innerDb.checkAndGetStringData() == dbname);
}
- Command* commToExplain = Command::findCommand(explainObj.firstElementFieldName());
+ Command* commToExplain = CommandHelpers::findCommand(explainObj.firstElementFieldName());
if (NULL == commToExplain) {
mongoutils::str::stream ss;
ss << "Explain failed due to unknown command: " << explainObj.firstElementFieldName();
Status explainStatus(ErrorCodes::CommandNotFound, ss);
- return appendCommandStatus(result, explainStatus);
+ return CommandHelpers::appendCommandStatus(result, explainStatus);
}
// Check whether the child command is allowed to run here. TODO: this logic is
@@ -167,7 +167,7 @@ public:
mongoutils::str::stream ss;
ss << "Explain's child command cannot run on this node. "
<< "Are you explaining a write command on a secondary?";
- appendCommandStatus(result, false, ss);
+ CommandHelpers::appendCommandStatus(result, false, ss);
return false;
}
@@ -175,7 +175,7 @@ public:
Status explainStatus =
commToExplain->explain(opCtx, dbname, explainObj, verbosity.getValue(), &result);
if (!explainStatus.isOK()) {
- return appendCommandStatus(result, explainStatus);
+ return CommandHelpers::appendCommandStatus(result, explainStatus);
}
return true;
diff --git a/src/mongo/db/commands/feature_compatibility_version_command_parser.cpp b/src/mongo/db/commands/feature_compatibility_version_command_parser.cpp
index e5cbe4bd137..be841f8baf8 100644
--- a/src/mongo/db/commands/feature_compatibility_version_command_parser.cpp
+++ b/src/mongo/db/commands/feature_compatibility_version_command_parser.cpp
@@ -72,7 +72,7 @@ StatusWith<std::string> FeatureCompatibilityVersionCommandParser::extractVersion
// Ensure that the command does not contain any unrecognized parameters
for (const auto& cmdElem : cmdObj) {
const auto fieldName = cmdElem.fieldNameStringData();
- if (fieldName == commandName || Command::isGenericArgument(fieldName)) {
+ if (fieldName == commandName || CommandHelpers::isGenericArgument(fieldName)) {
continue;
}
diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp
index dbbd28447bf..2a9cb589c64 100644
--- a/src/mongo/db/commands/find_and_modify.cpp
+++ b/src/mongo/db/commands/find_and_modify.cpp
@@ -242,7 +242,7 @@ public:
const BSONObj& cmdObj,
ExplainOptions::Verbosity verbosity,
BSONObjBuilder* out) const override {
- const NamespaceString fullNs = parseNsCollectionRequired(dbName, cmdObj);
+ const NamespaceString fullNs = CommandHelpers::parseNsCollectionRequired(dbName, cmdObj);
Status allowedWriteStatus = userAllowedWriteNS(fullNs.ns());
if (!allowedWriteStatus.isOK()) {
return allowedWriteStatus;
@@ -330,16 +330,16 @@ public:
BSONObjBuilder& result) override {
// findAndModify command is not replicated directly.
invariant(opCtx->writesAreReplicated());
- const NamespaceString fullNs = parseNsCollectionRequired(dbName, cmdObj);
+ const NamespaceString fullNs = CommandHelpers::parseNsCollectionRequired(dbName, cmdObj);
Status allowedWriteStatus = userAllowedWriteNS(fullNs.ns());
if (!allowedWriteStatus.isOK()) {
- return appendCommandStatus(result, allowedWriteStatus);
+ return CommandHelpers::appendCommandStatus(result, allowedWriteStatus);
}
StatusWith<FindAndModifyRequest> parseStatus =
FindAndModifyRequest::parseFromBSON(NamespaceString(fullNs.ns()), cmdObj);
if (!parseStatus.isOK()) {
- return appendCommandStatus(result, parseStatus.getStatus());
+ return CommandHelpers::appendCommandStatus(result, parseStatus.getStatus());
}
const FindAndModifyRequest& args = parseStatus.getValue();
@@ -378,7 +378,7 @@ public:
ParsedDelete parsedDelete(opCtx, &request);
Status parsedDeleteStatus = parsedDelete.parseRequest();
if (!parsedDeleteStatus.isOK()) {
- appendCommandStatus(result, parsedDeleteStatus);
+ CommandHelpers::appendCommandStatus(result, parsedDeleteStatus);
return false;
}
@@ -397,21 +397,21 @@ public:
Status isPrimary = checkCanAcceptWritesForDatabase(opCtx, nsString);
if (!isPrimary.isOK()) {
- appendCommandStatus(result, isPrimary);
+ CommandHelpers::appendCommandStatus(result, isPrimary);
return false;
}
Collection* const collection = autoDb.getDb()->getCollection(opCtx, nsString);
if (!collection && autoDb.getDb()->getViewCatalog()->lookup(opCtx, nsString.ns())) {
- appendCommandStatus(result,
- {ErrorCodes::CommandNotSupportedOnView,
- "findAndModify not supported on a view"});
+ CommandHelpers::appendCommandStatus(result,
+ {ErrorCodes::CommandNotSupportedOnView,
+ "findAndModify not supported on a view"});
return false;
}
auto statusWithPlanExecutor =
getExecutorDelete(opCtx, opDebug, collection, &parsedDelete);
if (!statusWithPlanExecutor.isOK()) {
- appendCommandStatus(result, statusWithPlanExecutor.getStatus());
+ CommandHelpers::appendCommandStatus(result, statusWithPlanExecutor.getStatus());
return false;
}
const auto exec = std::move(statusWithPlanExecutor.getValue());
@@ -424,7 +424,7 @@ public:
StatusWith<boost::optional<BSONObj>> advanceStatus =
advanceExecutor(opCtx, exec.get(), args.isRemove());
if (!advanceStatus.isOK()) {
- appendCommandStatus(result, advanceStatus.getStatus());
+ CommandHelpers::appendCommandStatus(result, advanceStatus.getStatus());
return false;
}
// Nothing after advancing the plan executor should throw a WriteConflictException,
@@ -463,7 +463,7 @@ public:
ParsedUpdate parsedUpdate(opCtx, &request);
Status parsedUpdateStatus = parsedUpdate.parseRequest();
if (!parsedUpdateStatus.isOK()) {
- appendCommandStatus(result, parsedUpdateStatus);
+ CommandHelpers::appendCommandStatus(result, parsedUpdateStatus);
return false;
}
@@ -482,15 +482,15 @@ public:
Status isPrimary = checkCanAcceptWritesForDatabase(opCtx, nsString);
if (!isPrimary.isOK()) {
- appendCommandStatus(result, isPrimary);
+ CommandHelpers::appendCommandStatus(result, isPrimary);
return false;
}
Collection* collection = autoDb.getDb()->getCollection(opCtx, nsString.ns());
if (!collection && autoDb.getDb()->getViewCatalog()->lookup(opCtx, nsString.ns())) {
- appendCommandStatus(result,
- {ErrorCodes::CommandNotSupportedOnView,
- "findAndModify not supported on a view"});
+ CommandHelpers::appendCommandStatus(result,
+ {ErrorCodes::CommandNotSupportedOnView,
+ "findAndModify not supported on a view"});
return false;
}
@@ -503,7 +503,7 @@ public:
collection = autoDb.getDb()->getCollection(opCtx, nsString);
Status isPrimaryAfterRelock = checkCanAcceptWritesForDatabase(opCtx, nsString);
if (!isPrimaryAfterRelock.isOK()) {
- appendCommandStatus(result, isPrimaryAfterRelock);
+ CommandHelpers::appendCommandStatus(result, isPrimaryAfterRelock);
return false;
}
@@ -514,7 +514,7 @@ public:
Status createCollStatus =
userCreateNS(opCtx, autoDb.getDb(), nsString.ns(), BSONObj());
if (!createCollStatus.isOK()) {
- appendCommandStatus(result, createCollStatus);
+ CommandHelpers::appendCommandStatus(result, createCollStatus);
return false;
}
wuow.commit();
@@ -527,7 +527,7 @@ public:
auto statusWithPlanExecutor =
getExecutorUpdate(opCtx, opDebug, collection, &parsedUpdate);
if (!statusWithPlanExecutor.isOK()) {
- appendCommandStatus(result, statusWithPlanExecutor.getStatus());
+ CommandHelpers::appendCommandStatus(result, statusWithPlanExecutor.getStatus());
return false;
}
const auto exec = std::move(statusWithPlanExecutor.getValue());
@@ -540,7 +540,7 @@ public:
StatusWith<boost::optional<BSONObj>> advanceStatus =
advanceExecutor(opCtx, exec.get(), args.isRemove());
if (!advanceStatus.isOK()) {
- appendCommandStatus(result, advanceStatus.getStatus());
+ CommandHelpers::appendCommandStatus(result, advanceStatus.getStatus());
return false;
}
// Nothing after advancing the plan executor should throw a WriteConflictException,
diff --git a/src/mongo/db/commands/find_cmd.cpp b/src/mongo/db/commands/find_cmd.cpp
index 9d73ecc39fa..b3294bf8d6c 100644
--- a/src/mongo/db/commands/find_cmd.cpp
+++ b/src/mongo/db/commands/find_cmd.cpp
@@ -121,7 +121,7 @@ public:
if (!authSession->isAuthorizedToParseNamespaceElement(cmdObj.firstElement())) {
return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
- const NamespaceString nss(parseNsOrUUID(opCtx, dbname, cmdObj));
+ const NamespaceString nss(CommandHelpers::parseNsOrUUID(opCtx, dbname, cmdObj));
auto hasTerm = cmdObj.hasField(kTermField);
return authSession->checkAuthForFind(nss, hasTerm);
}
@@ -233,7 +233,7 @@ public:
auto qrStatus = QueryRequest::makeFromFindCommand(
NamespaceString(parseNs(dbname, cmdObj)), cmdObj, isExplain);
if (!qrStatus.isOK()) {
- return appendCommandStatus(result, qrStatus.getStatus());
+ return CommandHelpers::appendCommandStatus(result, qrStatus.getStatus());
}
auto& qr = qrStatus.getValue();
@@ -244,14 +244,14 @@ public:
Status status = replCoord->updateTerm(opCtx, *term);
// Note: updateTerm returns ok if term stayed the same.
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
}
// Acquire locks. If the query is on a view, we release our locks and convert the query
// request into an aggregation command.
Lock::DBLock dbSLock(opCtx, dbname, MODE_IS);
- const NamespaceString nss(parseNsOrUUID(opCtx, dbname, cmdObj));
+ const NamespaceString nss(CommandHelpers::parseNsOrUUID(opCtx, dbname, cmdObj));
qr->refreshNSS(opCtx);
// Fill out curop information.
@@ -274,7 +274,7 @@ public:
MatchExpressionParser::kAllowAllSpecialFeatures &
~MatchExpressionParser::AllowedFeatures::kIsolated);
if (!statusWithCQ.isOK()) {
- return appendCommandStatus(result, statusWithCQ.getStatus());
+ return CommandHelpers::appendCommandStatus(result, statusWithCQ.getStatus());
}
std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
@@ -289,14 +289,15 @@ public:
const auto& qr = cq->getQueryRequest();
auto viewAggregationCommand = qr.asAggregationCommand();
if (!viewAggregationCommand.isOK())
- return appendCommandStatus(result, viewAggregationCommand.getStatus());
+ return CommandHelpers::appendCommandStatus(result,
+ viewAggregationCommand.getStatus());
- BSONObj aggResult = Command::runCommandDirectly(
+ BSONObj aggResult = CommandHelpers::runCommandDirectly(
opCtx,
OpMsgRequest::fromDBAndBody(dbname, std::move(viewAggregationCommand.getValue())));
auto status = getStatusFromCommandResult(aggResult);
if (status.code() == ErrorCodes::InvalidPipelineOperator) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
{ErrorCodes::InvalidPipelineOperator,
str::stream() << "Unsupported in view pipeline: " << status.reason()});
@@ -310,7 +311,7 @@ public:
auto statusWithPlanExecutor =
getExecutorFind(opCtx, collection, nss, std::move(cq), PlanExecutor::YIELD_AUTO);
if (!statusWithPlanExecutor.isOK()) {
- return appendCommandStatus(result, statusWithPlanExecutor.getStatus());
+ return CommandHelpers::appendCommandStatus(result, statusWithPlanExecutor.getStatus());
}
auto exec = std::move(statusWithPlanExecutor.getValue());
@@ -356,11 +357,11 @@ public:
error() << "Plan executor error during find command: " << PlanExecutor::statestr(state)
<< ", stats: " << redact(Explain::getWinningPlanStats(exec.get()));
- return appendCommandStatus(result,
- Status(ErrorCodes::OperationFailed,
- str::stream()
- << "Executor error during find command: "
- << WorkingSetCommon::toStatusString(obj)));
+ return CommandHelpers::appendCommandStatus(
+ result,
+ Status(ErrorCodes::OperationFailed,
+ str::stream() << "Executor error during find command: "
+ << WorkingSetCommon::toStatusString(obj)));
}
// Before saving the cursor, ensure that whatever plan we established happened with the
diff --git a/src/mongo/db/commands/fsync.cpp b/src/mongo/db/commands/fsync.cpp
index e4c3aca53bd..fe5945e4589 100644
--- a/src/mongo/db/commands/fsync.cpp
+++ b/src/mongo/db/commands/fsync.cpp
@@ -184,7 +184,7 @@ public:
if (!status.isOK()) {
releaseLock();
warning() << "fsyncLock failed. Lock count reset to 0. Status: " << status;
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
}
diff --git a/src/mongo/db/commands/generic.cpp b/src/mongo/db/commands/generic.cpp
index f047d9fd70d..5975a311fcc 100644
--- a/src/mongo/db/commands/generic.cpp
+++ b/src/mongo/db/commands/generic.cpp
@@ -358,7 +358,7 @@ public:
BSONObjBuilder& result) {
BSONElement val = cmdObj.firstElement();
if (val.type() != String) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::TypeMismatch,
str::stream() << "Argument to getLog must be of type String; found "
@@ -429,11 +429,11 @@ public:
std::string logName;
Status status = bsonExtractStringField(cmdObj, "clearLog", &logName);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
if (logName != "global") {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result, Status(ErrorCodes::InvalidOptions, "Only the 'global' log can be cleared"));
}
RamLog* ramlog = RamLog::getIfExists(logName);
diff --git a/src/mongo/db/commands/geo_near_cmd.cpp b/src/mongo/db/commands/geo_near_cmd.cpp
index b6c957eb64e..970bdd6871a 100644
--- a/src/mongo/db/commands/geo_near_cmd.cpp
+++ b/src/mongo/db/commands/geo_near_cmd.cpp
@@ -123,7 +123,7 @@ public:
return false;
}
- const NamespaceString nss(parseNsCollectionRequired(dbname, cmdObj));
+ const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbname, cmdObj));
AutoGetCollectionForReadCommand ctx(opCtx, nss);
Collection* collection = ctx.getCollection();
@@ -178,7 +178,7 @@ public:
Status collationEltStatus =
bsonExtractTypedField(cmdObj, "collation", BSONType::Object, &collationElt);
if (!collationEltStatus.isOK() && (collationEltStatus != ErrorCodes::NoSuchKey)) {
- return appendCommandStatus(result, collationEltStatus);
+ return CommandHelpers::appendCommandStatus(result, collationEltStatus);
}
if (collationEltStatus.isOK()) {
collation = collationElt.Obj();
@@ -303,11 +303,11 @@ public:
log() << "Plan executor error during geoNear command: " << PlanExecutor::statestr(state)
<< ", stats: " << redact(Explain::getWinningPlanStats(exec.get()));
- return appendCommandStatus(result,
- Status(ErrorCodes::OperationFailed,
- str::stream()
- << "Executor error during geoNear command: "
- << WorkingSetCommon::toStatusString(currObj)));
+ return CommandHelpers::appendCommandStatus(
+ result,
+ Status(ErrorCodes::OperationFailed,
+ str::stream() << "Executor error during geoNear command: "
+ << WorkingSetCommon::toStatusString(currObj)));
}
PlanSummaryStats summary;
diff --git a/src/mongo/db/commands/get_last_error.cpp b/src/mongo/db/commands/get_last_error.cpp
index bcf4387d7c9..a0b66d375ef 100644
--- a/src/mongo/db/commands/get_last_error.cpp
+++ b/src/mongo/db/commands/get_last_error.cpp
@@ -176,10 +176,10 @@ public:
Status status = bsonExtractOpTimeField(cmdObj, "wOpTime", &lastOpTime);
if (!status.isOK()) {
result.append("badGLE", cmdObj);
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
} else {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::TypeMismatch,
str::stream() << "Expected \"wOpTime\" field in getLastError to "
@@ -194,7 +194,7 @@ public:
FieldParser::extract(cmdObj, wElectionIdField, &electionId, &errmsg);
if (!extracted) {
result.append("badGLE", cmdObj);
- appendCommandStatus(result, false, errmsg);
+ CommandHelpers::appendCommandStatus(result, false, errmsg);
return false;
}
@@ -213,7 +213,7 @@ public:
BSONObj writeConcernDoc = ([&] {
BSONObjBuilder bob;
for (auto&& elem : cmdObj) {
- if (!Command::isGenericArgument(elem.fieldNameStringData()))
+ if (!CommandHelpers::isGenericArgument(elem.fieldNameStringData()))
bob.append(elem);
}
return bob.obj();
@@ -244,7 +244,7 @@ public:
if (!status.isOK()) {
result.append("badGLE", writeConcernDoc);
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
// Don't wait for replication if there was an error reported - this matches 2.4 behavior
@@ -300,7 +300,7 @@ public:
return true;
}
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
} cmdGetLastError;
diff --git a/src/mongo/db/commands/getmore_cmd.cpp b/src/mongo/db/commands/getmore_cmd.cpp
index 4bab002396f..360dc810f5e 100644
--- a/src/mongo/db/commands/getmore_cmd.cpp
+++ b/src/mongo/db/commands/getmore_cmd.cpp
@@ -164,7 +164,7 @@ public:
Status status = replCoord->updateTerm(opCtx, *request.term);
// Note: updateTerm returns ok if term stayed the same.
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
}
@@ -211,16 +211,17 @@ public:
Collection* collection = readLock->getCollection();
if (!collection) {
- return appendCommandStatus(result,
- Status(ErrorCodes::OperationFailed,
- "collection dropped between getMore calls"));
+ return CommandHelpers::appendCommandStatus(
+ result,
+ Status(ErrorCodes::OperationFailed,
+ "collection dropped between getMore calls"));
}
cursorManager = collection->getCursorManager();
}
auto ccPin = cursorManager->pinCursor(opCtx, request.cursorid);
if (!ccPin.isOK()) {
- return appendCommandStatus(result, ccPin.getStatus());
+ return CommandHelpers::appendCommandStatus(result, ccPin.getStatus());
}
ClientCursor* cursor = ccPin.getValue().getCursor();
@@ -241,7 +242,7 @@ public:
// authenticated in order to run getMore on the cursor.
if (!AuthorizationSession::get(opCtx->getClient())
->isCoauthorizedWith(cursor->getAuthenticatedUsers())) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::Unauthorized,
str::stream() << "cursor id " << request.cursorid
@@ -249,7 +250,7 @@ public:
}
if (request.nss != cursor->nss()) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::Unauthorized,
str::stream() << "Requested getMore on namespace '" << request.nss.ns()
@@ -258,7 +259,7 @@ public:
}
if (request.nss.isOplog() && MONGO_FAIL_POINT(rsStopGetMoreCmd)) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::CommandFailed,
str::stream() << "getMore on " << request.nss.ns()
@@ -273,7 +274,7 @@ public:
if (request.awaitDataTimeout && !cursor->isAwaitData()) {
Status status(ErrorCodes::BadValue,
"cannot set maxTimeMS on getMore command for a non-awaitData cursor");
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
// On early return, get rid of the cursor.
@@ -343,7 +344,7 @@ public:
Status batchStatus = generateBatch(opCtx, cursor, request, &nextBatch, &state, &numResults);
if (!batchStatus.isOK()) {
- return appendCommandStatus(result, batchStatus);
+ return CommandHelpers::appendCommandStatus(result, batchStatus);
}
PlanSummaryStats postExecutionStats;
@@ -397,7 +398,7 @@ public:
StatusWith<GetMoreRequest> parsedRequest = GetMoreRequest::parseFromBSON(dbname, cmdObj);
if (!parsedRequest.isOK()) {
- return appendCommandStatus(result, parsedRequest.getStatus());
+ return CommandHelpers::appendCommandStatus(result, parsedRequest.getStatus());
}
auto request = parsedRequest.getValue();
return runParsed(opCtx, request.nss, request, cmdObj, result);
diff --git a/src/mongo/db/commands/group_cmd.cpp b/src/mongo/db/commands/group_cmd.cpp
index 1bdcb3e70a9..5eb751a2991 100644
--- a/src/mongo/db/commands/group_cmd.cpp
+++ b/src/mongo/db/commands/group_cmd.cpp
@@ -159,7 +159,7 @@ private:
GroupRequest groupRequest;
Status parseRequestStatus = _parseRequest(dbname, cmdObj, &groupRequest);
if (!parseRequestStatus.isOK()) {
- return appendCommandStatus(result, parseRequestStatus);
+ return CommandHelpers::appendCommandStatus(result, parseRequestStatus);
}
AutoGetCollectionForReadCommand ctx(opCtx, groupRequest.ns);
@@ -168,7 +168,7 @@ private:
auto statusWithPlanExecutor =
getExecutorGroup(opCtx, coll, groupRequest, PlanExecutor::YIELD_AUTO);
if (!statusWithPlanExecutor.isOK()) {
- return appendCommandStatus(result, statusWithPlanExecutor.getStatus());
+ return CommandHelpers::appendCommandStatus(result, statusWithPlanExecutor.getStatus());
}
auto planExecutor = std::move(statusWithPlanExecutor.getValue());
@@ -186,13 +186,15 @@ private:
invariant(PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state);
if (WorkingSetCommon::isValidStatusMemberObject(retval)) {
- return appendCommandStatus(result, WorkingSetCommon::getMemberObjectStatus(retval));
+ return CommandHelpers::appendCommandStatus(
+ result, WorkingSetCommon::getMemberObjectStatus(retval));
}
- return appendCommandStatus(result,
- Status(ErrorCodes::BadValue,
- str::stream() << "error encountered during group "
- << "operation, executor returned "
- << PlanExecutor::statestr(state)));
+ return CommandHelpers::appendCommandStatus(
+ result,
+ Status(ErrorCodes::BadValue,
+ str::stream() << "error encountered during group "
+ << "operation, executor returned "
+ << PlanExecutor::statestr(state)));
}
invariant(planExecutor->isEOF());
diff --git a/src/mongo/db/commands/haystack.cpp b/src/mongo/db/commands/haystack.cpp
index de048b6f4c6..1f7dd0fcdf6 100644
--- a/src/mongo/db/commands/haystack.cpp
+++ b/src/mongo/db/commands/haystack.cpp
@@ -101,7 +101,7 @@ public:
const BSONObj& cmdObj,
string& errmsg,
BSONObjBuilder& result) {
- const NamespaceString nss = parseNsCollectionRequired(dbname, cmdObj);
+ const NamespaceString nss = CommandHelpers::parseNsCollectionRequired(dbname, cmdObj);
AutoGetCollectionForReadCommand ctx(opCtx, nss);
diff --git a/src/mongo/db/commands/index_filter_commands.cpp b/src/mongo/db/commands/index_filter_commands.cpp
index 9945beb82f5..d8187c00861 100644
--- a/src/mongo/db/commands/index_filter_commands.cpp
+++ b/src/mongo/db/commands/index_filter_commands.cpp
@@ -119,9 +119,9 @@ bool IndexFilterCommand::run(OperationContext* opCtx,
const string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder& result) {
- const NamespaceString nss(parseNsCollectionRequired(dbname, cmdObj));
+ const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbname, cmdObj));
Status status = runIndexFilterCommand(opCtx, nss.ns(), cmdObj, &result);
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
diff --git a/src/mongo/db/commands/kill_all_sessions_by_pattern_command.cpp b/src/mongo/db/commands/kill_all_sessions_by_pattern_command.cpp
index d0f7b0d1368..45b01c712be 100644
--- a/src/mongo/db/commands/kill_all_sessions_by_pattern_command.cpp
+++ b/src/mongo/db/commands/kill_all_sessions_by_pattern_command.cpp
@@ -94,7 +94,8 @@ public:
if (serverGlobalParams.featureCompatibility.getVersion() !=
ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo36) {
- return appendCommandStatus(result, SessionsCommandFCV34Status(getName()));
+ return CommandHelpers::appendCommandStatus(result,
+ SessionsCommandFCV34Status(getName()));
}
IDLParserErrorContext ctx("KillAllSessionsByPatternCmd");
@@ -113,7 +114,7 @@ public:
for (const auto& pattern : ksc.getKillAllSessionsByPattern()) {
if (pattern.getUsers() || pattern.getRoles()) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::Unauthorized,
"Not authorized to impersonate in killAllSessionsByPattern"));
@@ -125,7 +126,8 @@ public:
KillAllSessionsByPatternSet patterns{ksc.getKillAllSessionsByPattern().begin(),
ksc.getKillAllSessionsByPattern().end()};
- return appendCommandStatus(result, killSessionsCmdHelper(opCtx, result, patterns));
+ return CommandHelpers::appendCommandStatus(result,
+ killSessionsCmdHelper(opCtx, result, patterns));
}
} killAllSessionsByPatternCommand;
diff --git a/src/mongo/db/commands/kill_all_sessions_command.cpp b/src/mongo/db/commands/kill_all_sessions_command.cpp
index d0ef93cb466..907ffbe51d1 100644
--- a/src/mongo/db/commands/kill_all_sessions_command.cpp
+++ b/src/mongo/db/commands/kill_all_sessions_command.cpp
@@ -94,7 +94,8 @@ public:
if (serverGlobalParams.featureCompatibility.getVersion() !=
ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo36) {
- return appendCommandStatus(result, SessionsCommandFCV34Status(getName()));
+ return CommandHelpers::appendCommandStatus(result,
+ SessionsCommandFCV34Status(getName()));
}
IDLParserErrorContext ctx("KillAllSessionsCmd");
@@ -113,7 +114,8 @@ public:
}
}
- return appendCommandStatus(result, killSessionsCmdHelper(opCtx, result, patterns));
+ return CommandHelpers::appendCommandStatus(result,
+ killSessionsCmdHelper(opCtx, result, patterns));
}
} killAllSessionsCommand;
diff --git a/src/mongo/db/commands/kill_sessions_command.cpp b/src/mongo/db/commands/kill_sessions_command.cpp
index 9336cf1f588..7b0e5b1375a 100644
--- a/src/mongo/db/commands/kill_sessions_command.cpp
+++ b/src/mongo/db/commands/kill_sessions_command.cpp
@@ -117,7 +117,8 @@ public:
if (serverGlobalParams.featureCompatibility.getVersion() !=
ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo36) {
- return appendCommandStatus(result, SessionsCommandFCV34Status(getName()));
+ return CommandHelpers::appendCommandStatus(result,
+ SessionsCommandFCV34Status(getName()));
}
IDLParserErrorContext ctx("KillSessionsCmd");
@@ -139,7 +140,8 @@ public:
}
}
- return appendCommandStatus(result, killSessionsCmdHelper(opCtx, result, patterns));
+ return CommandHelpers::appendCommandStatus(result,
+ killSessionsCmdHelper(opCtx, result, patterns));
}
} killSessionsCommand;
diff --git a/src/mongo/db/commands/killcursors_common.cpp b/src/mongo/db/commands/killcursors_common.cpp
index 39edb0a7c6f..51c7909f107 100644
--- a/src/mongo/db/commands/killcursors_common.cpp
+++ b/src/mongo/db/commands/killcursors_common.cpp
@@ -71,7 +71,7 @@ bool KillCursorsCmdBase::run(OperationContext* opCtx,
BSONObjBuilder& result) {
auto statusWithRequest = KillCursorsRequest::parseFromBSON(dbname, cmdObj);
if (!statusWithRequest.isOK()) {
- return appendCommandStatus(result, statusWithRequest.getStatus());
+ return CommandHelpers::appendCommandStatus(result, statusWithRequest.getStatus());
}
auto killCursorsRequest = std::move(statusWithRequest.getValue());
diff --git a/src/mongo/db/commands/list_collections.cpp b/src/mongo/db/commands/list_collections.cpp
index 20560d3a4c9..d596a1644fc 100644
--- a/src/mongo/db/commands/list_collections.cpp
+++ b/src/mongo/db/commands/list_collections.cpp
@@ -239,7 +239,7 @@ public:
BSONElement filterElt = jsobj["filter"];
if (!filterElt.eoo()) {
if (filterElt.type() != mongo::Object) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result, Status(ErrorCodes::BadValue, "\"filter\" must be an object"));
}
// The collator is null because collection objects are compared using binary comparison.
@@ -248,7 +248,7 @@ public:
StatusWithMatchExpression statusWithMatcher =
MatchExpressionParser::parse(filterElt.Obj(), std::move(expCtx));
if (!statusWithMatcher.isOK()) {
- return appendCommandStatus(result, statusWithMatcher.getStatus());
+ return CommandHelpers::appendCommandStatus(result, statusWithMatcher.getStatus());
}
matcher = std::move(statusWithMatcher.getValue());
}
@@ -258,7 +258,7 @@ public:
Status parseCursorStatus =
CursorRequest::parseCommandCursorOptions(jsobj, defaultBatchSize, &batchSize);
if (!parseCursorStatus.isOK()) {
- return appendCommandStatus(result, parseCursorStatus);
+ return CommandHelpers::appendCommandStatus(result, parseCursorStatus);
}
// Check for 'includePendingDrops' flag. The default is to not include drop-pending
@@ -268,7 +268,7 @@ public:
jsobj, "includePendingDrops", false, &includePendingDrops);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
AutoGetDb autoDb(opCtx, dbname, MODE_S);
@@ -316,7 +316,7 @@ public:
auto statusWithPlanExecutor = PlanExecutor::make(
opCtx, std::move(ws), std::move(root), cursorNss, PlanExecutor::NO_YIELD);
if (!statusWithPlanExecutor.isOK()) {
- return appendCommandStatus(result, statusWithPlanExecutor.getStatus());
+ return CommandHelpers::appendCommandStatus(result, statusWithPlanExecutor.getStatus());
}
auto exec = std::move(statusWithPlanExecutor.getValue());
diff --git a/src/mongo/db/commands/list_databases.cpp b/src/mongo/db/commands/list_databases.cpp
index aa5130c4410..0012e114c90 100644
--- a/src/mongo/db/commands/list_databases.cpp
+++ b/src/mongo/db/commands/list_databases.cpp
@@ -95,11 +95,11 @@ public:
std::unique_ptr<MatchExpression> filter;
if (auto filterElt = jsobj[kFilterField]) {
if (filterElt.type() != BSONType::Object) {
- return appendCommandStatus(result,
- {ErrorCodes::TypeMismatch,
- str::stream() << "Field '" << kFilterField
- << "' must be of type Object in: "
- << jsobj});
+ return CommandHelpers::appendCommandStatus(
+ result,
+ {ErrorCodes::TypeMismatch,
+ str::stream() << "Field '" << kFilterField << "' must be of type Object in: "
+ << jsobj});
}
// The collator is null because database metadata objects are compared using simple
// binary comparison.
@@ -108,7 +108,7 @@ public:
auto statusWithMatcher =
MatchExpressionParser::parse(filterElt.Obj(), std::move(expCtx));
if (!statusWithMatcher.isOK()) {
- return appendCommandStatus(result, statusWithMatcher.getStatus());
+ return CommandHelpers::appendCommandStatus(result, statusWithMatcher.getStatus());
}
filter = std::move(statusWithMatcher.getValue());
}
diff --git a/src/mongo/db/commands/list_indexes.cpp b/src/mongo/db/commands/list_indexes.cpp
index d94c798db00..10e4be8af33 100644
--- a/src/mongo/db/commands/list_indexes.cpp
+++ b/src/mongo/db/commands/list_indexes.cpp
@@ -104,7 +104,8 @@ public:
// Check for the listIndexes ActionType on the database, or find on system.indexes for pre
// 3.0 systems.
- const NamespaceString ns(parseNsOrUUID(client->getOperationContext(), dbname, cmdObj));
+ const NamespaceString ns(
+ CommandHelpers::parseNsOrUUID(client->getOperationContext(), dbname, cmdObj));
if (authzSession->isAuthorizedForActionsOnResource(ResourcePattern::forExactNamespace(ns),
ActionType::listIndexes) ||
authzSession->isAuthorizedForActionsOnResource(
@@ -125,25 +126,25 @@ public:
const BSONObj& cmdObj,
BSONObjBuilder& result) {
Lock::DBLock dbSLock(opCtx, dbname, MODE_IS);
- const NamespaceString ns(parseNsOrUUID(opCtx, dbname, cmdObj));
+ const NamespaceString ns(CommandHelpers::parseNsOrUUID(opCtx, dbname, cmdObj));
const long long defaultBatchSize = std::numeric_limits<long long>::max();
long long batchSize;
Status parseCursorStatus =
CursorRequest::parseCommandCursorOptions(cmdObj, defaultBatchSize, &batchSize);
if (!parseCursorStatus.isOK()) {
- return appendCommandStatus(result, parseCursorStatus);
+ return CommandHelpers::appendCommandStatus(result, parseCursorStatus);
}
AutoGetCollectionForReadCommand autoColl(opCtx, ns, std::move(dbSLock));
if (!autoColl.getDb()) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::NamespaceNotFound, "Database " + ns.db() + " doesn't exist"));
}
const Collection* collection = autoColl.getCollection();
if (!collection) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::NamespaceNotFound, "Collection " + ns.ns() + " doesn't exist"));
}
@@ -181,7 +182,7 @@ public:
auto statusWithPlanExecutor = PlanExecutor::make(
opCtx, std::move(ws), std::move(root), cursorNss, PlanExecutor::NO_YIELD);
if (!statusWithPlanExecutor.isOK()) {
- return appendCommandStatus(result, statusWithPlanExecutor.getStatus());
+ return CommandHelpers::appendCommandStatus(result, statusWithPlanExecutor.getStatus());
}
auto exec = std::move(statusWithPlanExecutor.getValue());
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index 90ee22d85a0..03196350ca2 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -1395,7 +1395,7 @@ public:
auto client = opCtx->getClient();
if (client->isInDirectClient()) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::IllegalOperation, "Cannot run mapReduce command from eval()"));
}
@@ -1422,7 +1422,7 @@ public:
BSONObjBuilder timingBuilder;
State state(opCtx, config);
if (!state.sourceExists()) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::NamespaceNotFound,
str::stream() << "namespace does not exist: " << config.nss.ns()));
@@ -1565,7 +1565,7 @@ public:
auto restoreStatus = exec->restoreState();
if (!restoreStatus.isOK()) {
- return appendCommandStatus(result, restoreStatus);
+ return CommandHelpers::appendCommandStatus(result, restoreStatus);
}
reduceTime += t.micros();
@@ -1580,7 +1580,7 @@ public:
}
if (PlanExecutor::DEAD == execState || PlanExecutor::FAILURE == execState) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::OperationFailed,
str::stream() << "Executor error during mapReduce command: "
@@ -1712,7 +1712,7 @@ public:
const BSONObj& cmdObj,
BSONObjBuilder& result) {
if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::CommandNotSupported,
str::stream() << "Can not execute mapReduce with output database " << dbname
@@ -1791,7 +1791,8 @@ public:
auto outRoutingInfoStatus = Grid::get(opCtx)->catalogCache()->getCollectionRoutingInfo(
opCtx, config.outputOptions.finalNamespace);
if (!outRoutingInfoStatus.isOK()) {
- return appendCommandStatus(result, outRoutingInfoStatus.getStatus());
+ return CommandHelpers::appendCommandStatus(result,
+ outRoutingInfoStatus.getStatus());
}
if (auto cm = outRoutingInfoStatus.getValue().cm()) {
diff --git a/src/mongo/db/commands/oplog_application_checks.cpp b/src/mongo/db/commands/oplog_application_checks.cpp
index 1fbbe738a33..4ef23c19a2c 100644
--- a/src/mongo/db/commands/oplog_application_checks.cpp
+++ b/src/mongo/db/commands/oplog_application_checks.cpp
@@ -76,7 +76,7 @@ Status OplogApplicationChecks::checkOperationAuthorization(OperationContext* opC
if (opType == "c"_sd) {
StringData commandName = o.firstElement().fieldNameStringData();
- Command* commandInOplogEntry = Command::findCommand(commandName);
+ Command* commandInOplogEntry = CommandHelpers::findCommand(commandName);
if (!commandInOplogEntry) {
return Status(ErrorCodes::FailedToParse, "Unrecognized command in op");
}
diff --git a/src/mongo/db/commands/oplog_note.cpp b/src/mongo/db/commands/oplog_note.cpp
index 914da7721ff..6ebbc1e28bd 100644
--- a/src/mongo/db/commands/oplog_note.cpp
+++ b/src/mongo/db/commands/oplog_note.cpp
@@ -119,15 +119,16 @@ public:
BSONObjBuilder& result) {
auto replCoord = repl::ReplicationCoordinator::get(opCtx);
if (!replCoord->isReplEnabled()) {
- return appendCommandStatus(result,
- {ErrorCodes::NoReplicationEnabled,
- "Must have replication set up to run \"appendOplogNote\""});
+ return CommandHelpers::appendCommandStatus(
+ result,
+ {ErrorCodes::NoReplicationEnabled,
+ "Must have replication set up to run \"appendOplogNote\""});
}
BSONElement dataElement;
auto dataStatus = bsonExtractTypedField(cmdObj, "data", Object, &dataElement);
if (!dataStatus.isOK()) {
- return appendCommandStatus(result, dataStatus);
+ return CommandHelpers::appendCommandStatus(result, dataStatus);
}
Timestamp maxClusterTime;
@@ -136,22 +137,23 @@ public:
if (!maxClusterTimeStatus.isOK()) {
if (maxClusterTimeStatus == ErrorCodes::NoSuchKey) { // no need to use maxClusterTime
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result, _performNoopWrite(opCtx, dataElement.Obj(), "appendOpLogNote"));
}
- return appendCommandStatus(result, maxClusterTimeStatus);
+ return CommandHelpers::appendCommandStatus(result, maxClusterTimeStatus);
}
auto lastAppliedOpTime = replCoord->getMyLastAppliedOpTime().getTimestamp();
if (maxClusterTime > lastAppliedOpTime) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result, _performNoopWrite(opCtx, dataElement.Obj(), "appendOpLogNote"));
} else {
std::stringstream ss;
ss << "Requested maxClusterTime " << LogicalTime(maxClusterTime).toString()
<< " is less or equal to the last primary OpTime: "
<< LogicalTime(lastAppliedOpTime).toString();
- return appendCommandStatus(result, {ErrorCodes::StaleClusterTime, ss.str()});
+ return CommandHelpers::appendCommandStatus(result,
+ {ErrorCodes::StaleClusterTime, ss.str()});
}
}
};
diff --git a/src/mongo/db/commands/parallel_collection_scan.cpp b/src/mongo/db/commands/parallel_collection_scan.cpp
index 533daa6ab94..ec72bc03324 100644
--- a/src/mongo/db/commands/parallel_collection_scan.cpp
+++ b/src/mongo/db/commands/parallel_collection_scan.cpp
@@ -83,7 +83,7 @@ public:
return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
- const NamespaceString ns(parseNsOrUUID(opCtx, dbname, cmdObj));
+ const NamespaceString ns(CommandHelpers::parseNsOrUUID(opCtx, dbname, cmdObj));
if (!authSession->isAuthorizedForActionsOnNamespace(ns, ActionType::find)) {
return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
@@ -96,25 +96,26 @@ public:
const BSONObj& cmdObj,
BSONObjBuilder& result) {
Lock::DBLock dbSLock(opCtx, dbname, MODE_IS);
- const NamespaceString ns(parseNsOrUUID(opCtx, dbname, cmdObj));
+ const NamespaceString ns(CommandHelpers::parseNsOrUUID(opCtx, dbname, cmdObj));
AutoGetCollectionForReadCommand ctx(opCtx, ns, std::move(dbSLock));
Collection* collection = ctx.getCollection();
if (!collection)
- return appendCommandStatus(result,
- Status(ErrorCodes::NamespaceNotFound,
- str::stream() << "ns does not exist: " << ns.ns()));
+ return CommandHelpers::appendCommandStatus(
+ result,
+ Status(ErrorCodes::NamespaceNotFound,
+ str::stream() << "ns does not exist: " << ns.ns()));
size_t numCursors = static_cast<size_t>(cmdObj["numCursors"].numberInt());
if (numCursors == 0 || numCursors > 10000)
- return appendCommandStatus(result,
- Status(ErrorCodes::BadValue,
- str::stream()
- << "numCursors has to be between 1 and 10000"
- << " was: "
- << numCursors));
+ return CommandHelpers::appendCommandStatus(
+ result,
+ Status(ErrorCodes::BadValue,
+ str::stream() << "numCursors has to be between 1 and 10000"
+ << " was: "
+ << numCursors));
std::vector<std::unique_ptr<RecordCursor>> iterators;
// Opening multiple cursors on a capped collection and reading them in parallel can produce
diff --git a/src/mongo/db/commands/parameters.cpp b/src/mongo/db/commands/parameters.cpp
index f75a440bb77..224b0cb0fcc 100644
--- a/src/mongo/db/commands/parameters.cpp
+++ b/src/mongo/db/commands/parameters.cpp
@@ -162,7 +162,7 @@ public:
while (parameterCheckIterator.more()) {
BSONElement parameter = parameterCheckIterator.next();
std::string parameterName = parameter.fieldName();
- if (Command::isGenericArgument(parameterName))
+ if (CommandHelpers::isGenericArgument(parameterName))
continue;
ServerParameter::Map::const_iterator foundParameter = parameterMap.find(parameterName);
diff --git a/src/mongo/db/commands/pipeline_command.cpp b/src/mongo/db/commands/pipeline_command.cpp
index 68c74a0e865..7f2586f5827 100644
--- a/src/mongo/db/commands/pipeline_command.cpp
+++ b/src/mongo/db/commands/pipeline_command.cpp
@@ -88,12 +88,13 @@ public:
const auto aggregationRequest =
uassertStatusOK(AggregationRequest::parseFromBSON(dbname, cmdObj, boost::none));
- return appendCommandStatus(result,
- runAggregate(opCtx,
- aggregationRequest.getNamespaceString(),
- aggregationRequest,
- cmdObj,
- result));
+ return CommandHelpers::appendCommandStatus(
+ result,
+ runAggregate(opCtx,
+ aggregationRequest.getNamespaceString(),
+ aggregationRequest,
+ cmdObj,
+ result));
}
Status explain(OperationContext* opCtx,
diff --git a/src/mongo/db/commands/plan_cache_commands.cpp b/src/mongo/db/commands/plan_cache_commands.cpp
index 002a4e9d94d..fc8ef25f39e 100644
--- a/src/mongo/db/commands/plan_cache_commands.cpp
+++ b/src/mongo/db/commands/plan_cache_commands.cpp
@@ -114,9 +114,9 @@ bool PlanCacheCommand::run(OperationContext* opCtx,
const string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder& result) {
- const NamespaceString nss(parseNsCollectionRequired(dbname, cmdObj));
+ const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbname, cmdObj));
Status status = runPlanCacheCommand(opCtx, nss.ns(), cmdObj, &result);
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
diff --git a/src/mongo/db/commands/reap_logical_session_cache_now.cpp b/src/mongo/db/commands/reap_logical_session_cache_now.cpp
index a1817d4f24e..50abcd781ad 100644
--- a/src/mongo/db/commands/reap_logical_session_cache_now.cpp
+++ b/src/mongo/db/commands/reap_logical_session_cache_now.cpp
@@ -76,7 +76,7 @@ public:
auto res = cache->reapNow(client);
if (!res.isOK()) {
- return appendCommandStatus(result, res);
+ return CommandHelpers::appendCommandStatus(result, res);
}
return true;
diff --git a/src/mongo/db/commands/refresh_logical_session_cache_now.cpp b/src/mongo/db/commands/refresh_logical_session_cache_now.cpp
index 8629d98e287..7f21f14b952 100644
--- a/src/mongo/db/commands/refresh_logical_session_cache_now.cpp
+++ b/src/mongo/db/commands/refresh_logical_session_cache_now.cpp
@@ -81,7 +81,8 @@ public:
if (serverGlobalParams.featureCompatibility.getVersion() !=
ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo36) {
- return appendCommandStatus(result, SessionsCommandFCV34Status(getName()));
+ return CommandHelpers::appendCommandStatus(result,
+ SessionsCommandFCV34Status(getName()));
}
auto cache = LogicalSessionCache::get(opCtx);
@@ -89,7 +90,7 @@ public:
auto res = cache->refreshNow(client);
if (!res.isOK()) {
- return appendCommandStatus(result, res);
+ return CommandHelpers::appendCommandStatus(result, res);
}
return true;
diff --git a/src/mongo/db/commands/refresh_sessions_command.cpp b/src/mongo/db/commands/refresh_sessions_command.cpp
index 8e65c32fa9d..3f1d3d932cd 100644
--- a/src/mongo/db/commands/refresh_sessions_command.cpp
+++ b/src/mongo/db/commands/refresh_sessions_command.cpp
@@ -86,7 +86,8 @@ public:
if (serverGlobalParams.featureCompatibility.getVersion() !=
ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo36) {
- return appendCommandStatus(result, SessionsCommandFCV34Status(getName()));
+ return CommandHelpers::appendCommandStatus(result,
+ SessionsCommandFCV34Status(getName()));
}
IDLParserErrorContext ctx("RefreshSessionsCmdFromClient");
@@ -94,7 +95,7 @@ public:
auto res =
LogicalSessionCache::get(opCtx->getServiceContext())->refreshSessions(opCtx, cmd);
if (!res.isOK()) {
- return appendCommandStatus(result, res);
+ return CommandHelpers::appendCommandStatus(result, res);
}
return true;
diff --git a/src/mongo/db/commands/refresh_sessions_command_internal.cpp b/src/mongo/db/commands/refresh_sessions_command_internal.cpp
index 1ce8702a1db..74e36cf5462 100644
--- a/src/mongo/db/commands/refresh_sessions_command_internal.cpp
+++ b/src/mongo/db/commands/refresh_sessions_command_internal.cpp
@@ -79,7 +79,7 @@ public:
auto res =
LogicalSessionCache::get(opCtx->getServiceContext())->refreshSessions(opCtx, cmd);
if (!res.isOK()) {
- return appendCommandStatus(result, res);
+ return CommandHelpers::appendCommandStatus(result, res);
}
return true;
diff --git a/src/mongo/db/commands/rename_collection_cmd.cpp b/src/mongo/db/commands/rename_collection_cmd.cpp
index 6385dcc5d0f..c4217c02e6e 100644
--- a/src/mongo/db/commands/rename_collection_cmd.cpp
+++ b/src/mongo/db/commands/rename_collection_cmd.cpp
@@ -145,16 +145,18 @@ public:
}
if (source.isAdminDotSystemDotVersion()) {
- appendCommandStatus(result,
- Status(ErrorCodes::IllegalOperation,
- "renaming admin.system.version is not allowed"));
+ CommandHelpers::appendCommandStatus(
+ result,
+ Status(ErrorCodes::IllegalOperation,
+ "renaming admin.system.version is not allowed"));
return false;
}
RenameCollectionOptions options;
options.dropTarget = cmdObj["dropTarget"].trueValue();
options.stayTemp = cmdObj["stayTemp"].trueValue();
- return appendCommandStatus(result, renameCollection(opCtx, source, target, options));
+ return CommandHelpers::appendCommandStatus(
+ result, renameCollection(opCtx, source, target, options));
}
} cmdrenamecollection;
diff --git a/src/mongo/db/commands/repair_cursor.cpp b/src/mongo/db/commands/repair_cursor.cpp
index 5154731e423..51567abbabc 100644
--- a/src/mongo/db/commands/repair_cursor.cpp
+++ b/src/mongo/db/commands/repair_cursor.cpp
@@ -77,13 +77,13 @@ public:
Collection* collection = ctx.getCollection();
if (!collection) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result, Status(ErrorCodes::NamespaceNotFound, "ns does not exist: " + ns.ns()));
}
auto cursor = collection->getRecordStore()->getCursorForRepair(opCtx);
if (!cursor) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result, Status(ErrorCodes::CommandNotSupported, "repair iterator not supported"));
}
diff --git a/src/mongo/db/commands/resize_oplog.cpp b/src/mongo/db/commands/resize_oplog.cpp
index e8d34e251a9..7c85812ba14 100644
--- a/src/mongo/db/commands/resize_oplog.cpp
+++ b/src/mongo/db/commands/resize_oplog.cpp
@@ -89,20 +89,20 @@ public:
Lock::GlobalWrite global(opCtx);
Database* database = dbHolder().get(opCtx, nss.db());
if (!database) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result, Status(ErrorCodes::NamespaceNotFound, "database local does not exist"));
}
Collection* coll = database->getCollection(opCtx, nss);
if (!coll) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result, Status(ErrorCodes::NamespaceNotFound, "oplog does not exist"));
}
if (!coll->isCapped()) {
- return appendCommandStatus(result,
- Status(ErrorCodes::IllegalOperation, "oplog isn't capped"));
+ return CommandHelpers::appendCommandStatus(
+ result, Status(ErrorCodes::IllegalOperation, "oplog isn't capped"));
}
if (!jsobj["size"].isNumber()) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::InvalidOptions, "invalid size field, size should be a number"));
}
@@ -110,19 +110,19 @@ public:
long long sizeMb = jsobj["size"].numberLong();
long long size = sizeMb * 1024 * 1024;
if (sizeMb < 990L) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result, Status(ErrorCodes::InvalidOptions, "oplog size should be 990MB at least"));
}
WriteUnitOfWork wunit(opCtx);
Status status = coll->getRecordStore()->updateCappedSize(opCtx, size);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
CollectionCatalogEntry* entry = coll->getCatalogEntry();
entry->updateCappedSize(opCtx, size);
wunit.commit();
LOG(0) << "replSetResizeOplog success, currentSize:" << size;
- return appendCommandStatus(result, Status::OK());
+ return CommandHelpers::appendCommandStatus(result, Status::OK());
}
} cmdReplSetResizeOplog;
}
diff --git a/src/mongo/db/commands/set_feature_compatibility_version_command.cpp b/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
index edf382aede7..0819e15656f 100644
--- a/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
+++ b/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
@@ -127,7 +127,7 @@ public:
WriteConcernOptions(
WriteConcernOptions::kMajority, WriteConcernOptions::SyncMode::UNSET, timeout),
&res);
- Command::appendCommandWCStatus(result, waitForWCStatus, res);
+ CommandHelpers::appendCommandWCStatus(result, waitForWCStatus, res);
});
// Only allow one instance of setFeatureCompatibilityVersion to run at a time.
@@ -168,9 +168,11 @@ public:
uassertStatusOK(
ShardingCatalogManager::get(opCtx)->setFeatureCompatibilityVersionOnShards(
opCtx,
- Command::appendMajorityWriteConcern(Command::appendPassthroughFields(
- cmdObj,
- BSON(FeatureCompatibilityVersion::kCommandName << requestedVersion)))));
+ CommandHelpers::appendMajorityWriteConcern(
+ CommandHelpers::appendPassthroughFields(
+ cmdObj,
+ BSON(FeatureCompatibilityVersion::kCommandName
+ << requestedVersion)))));
}
if (ShardingState::get(opCtx)->enabled()) {
@@ -216,9 +218,11 @@ public:
uassertStatusOK(
ShardingCatalogManager::get(opCtx)->setFeatureCompatibilityVersionOnShards(
opCtx,
- Command::appendMajorityWriteConcern(Command::appendPassthroughFields(
- cmdObj,
- BSON(FeatureCompatibilityVersion::kCommandName << requestedVersion)))));
+ CommandHelpers::appendMajorityWriteConcern(
+ CommandHelpers::appendPassthroughFields(
+ cmdObj,
+ BSON(FeatureCompatibilityVersion::kCommandName
+ << requestedVersion)))));
// Stop the background key generator thread from running before trying to drop the
// collection so we know the key won't just be recreated.
diff --git a/src/mongo/db/commands/snapshot_management.cpp b/src/mongo/db/commands/snapshot_management.cpp
index 5bad41d15eb..349befd56f1 100644
--- a/src/mongo/db/commands/snapshot_management.cpp
+++ b/src/mongo/db/commands/snapshot_management.cpp
@@ -72,7 +72,8 @@ public:
auto snapshotManager =
getGlobalServiceContext()->getGlobalStorageEngine()->getSnapshotManager();
if (!snapshotManager) {
- return appendCommandStatus(result, {ErrorCodes::CommandNotSupported, ""});
+ return CommandHelpers::appendCommandStatus(result,
+ {ErrorCodes::CommandNotSupported, ""});
}
Lock::GlobalLock lk(opCtx, MODE_IX, UINT_MAX);
@@ -82,7 +83,7 @@ public:
const auto name = repl::ReplicationCoordinator::get(opCtx)->reserveSnapshotName(opCtx);
result.append("name", static_cast<long long>(name.asULL()));
}
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
};
@@ -118,7 +119,8 @@ public:
auto snapshotManager =
getGlobalServiceContext()->getGlobalStorageEngine()->getSnapshotManager();
if (!snapshotManager) {
- return appendCommandStatus(result, {ErrorCodes::CommandNotSupported, ""});
+ return CommandHelpers::appendCommandStatus(result,
+ {ErrorCodes::CommandNotSupported, ""});
}
Lock::GlobalLock lk(opCtx, MODE_IX, UINT_MAX);
diff --git a/src/mongo/db/commands/start_session_command.cpp b/src/mongo/db/commands/start_session_command.cpp
index 70b153b6789..9d03da70fbe 100644
--- a/src/mongo/db/commands/start_session_command.cpp
+++ b/src/mongo/db/commands/start_session_command.cpp
@@ -82,7 +82,8 @@ public:
if (serverGlobalParams.featureCompatibility.getVersion() !=
ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo36) {
- return appendCommandStatus(result, SessionsCommandFCV34Status(getName()));
+ return CommandHelpers::appendCommandStatus(result,
+ SessionsCommandFCV34Status(getName()));
}
auto client = opCtx->getClient();
@@ -96,7 +97,7 @@ public:
} catch (...) {
auto status = exceptionToStatus();
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
lsCache->startSession(opCtx, record.get());
diff --git a/src/mongo/db/commands/test_commands.cpp b/src/mongo/db/commands/test_commands.cpp
index 0b3d1e2091d..2dd96eeee8c 100644
--- a/src/mongo/db/commands/test_commands.cpp
+++ b/src/mongo/db/commands/test_commands.cpp
@@ -80,7 +80,7 @@ public:
const BSONObj& cmdObj,
string& errmsg,
BSONObjBuilder& result) {
- const NamespaceString nss(parseNsCollectionRequired(dbname, cmdObj));
+ const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbname, cmdObj));
log() << "test only command godinsert invoked coll:" << nss.coll();
BSONObj obj = cmdObj["obj"].embeddedObjectUserCheck();
@@ -103,7 +103,7 @@ public:
if (status.isOK()) {
wunit.commit();
}
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
};
@@ -215,9 +215,9 @@ public:
const string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder& result) {
- const NamespaceString fullNs = parseNsCollectionRequired(dbname, cmdObj);
+ const NamespaceString fullNs = CommandHelpers::parseNsCollectionRequired(dbname, cmdObj);
if (!fullNs.isValid()) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
{ErrorCodes::InvalidNamespace,
str::stream() << "collection name " << fullNs.ns() << " is not valid"});
@@ -227,23 +227,23 @@ public:
bool inc = cmdObj.getBoolField("inc"); // inclusive range?
if (n <= 0) {
- return appendCommandStatus(result,
- {ErrorCodes::BadValue, "n must be a positive integer"});
+ return CommandHelpers::appendCommandStatus(
+ result, {ErrorCodes::BadValue, "n must be a positive integer"});
}
// Lock the database in mode IX and lock the collection exclusively.
AutoGetCollection autoColl(opCtx, fullNs, MODE_IX, MODE_X);
Collection* collection = autoColl.getCollection();
if (!collection) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
{ErrorCodes::NamespaceNotFound,
str::stream() << "collection " << fullNs.ns() << " does not exist"});
}
if (!collection->isCapped()) {
- return appendCommandStatus(result,
- {ErrorCodes::IllegalOperation, "collection must be capped"});
+ return CommandHelpers::appendCommandStatus(
+ result, {ErrorCodes::IllegalOperation, "collection must be capped"});
}
RecordId end;
@@ -257,7 +257,7 @@ public:
for (int i = 0; i < n + 1; ++i) {
PlanExecutor::ExecState state = exec->getNext(nullptr, &end);
if (PlanExecutor::ADVANCED != state) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
{ErrorCodes::IllegalOperation,
str::stream() << "invalid n, collection contains fewer than " << n
@@ -291,9 +291,9 @@ public:
const string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder& result) {
- const NamespaceString nss = parseNsCollectionRequired(dbname, cmdObj);
+ const NamespaceString nss = CommandHelpers::parseNsCollectionRequired(dbname, cmdObj);
- return appendCommandStatus(result, emptyCapped(opCtx, nss));
+ return CommandHelpers::appendCommandStatus(result, emptyCapped(opCtx, nss));
}
};
diff --git a/src/mongo/db/commands/touch.cpp b/src/mongo/db/commands/touch.cpp
index fe7dd6b0cf5..2d430378475 100644
--- a/src/mongo/db/commands/touch.cpp
+++ b/src/mongo/db/commands/touch.cpp
@@ -87,7 +87,7 @@ public:
const BSONObj& cmdObj,
string& errmsg,
BSONObjBuilder& result) {
- const NamespaceString nss = parseNsCollectionRequired(dbname, cmdObj);
+ const NamespaceString nss = CommandHelpers::parseNsCollectionRequired(dbname, cmdObj);
if (!nss.isNormal()) {
errmsg = "bad namespace name";
return false;
@@ -109,8 +109,8 @@ public:
return false;
}
- return appendCommandStatus(result,
- collection->touch(opCtx, touch_data, touch_indexes, &result));
+ return CommandHelpers::appendCommandStatus(
+ result, collection->touch(opCtx, touch_data, touch_indexes, &result));
}
};
static TouchCmd touchCmd;
diff --git a/src/mongo/db/commands/user_management_commands.cpp b/src/mongo/db/commands/user_management_commands.cpp
index 5a4419bedff..0ad3dff7fc2 100644
--- a/src/mongo/db/commands/user_management_commands.cpp
+++ b/src/mongo/db/commands/user_management_commands.cpp
@@ -630,16 +630,16 @@ public:
auth::CreateOrUpdateUserArgs args;
Status status = auth::parseCreateOrUpdateUserCommands(cmdObj, "createUser", dbname, &args);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
if (args.userName.getDB() == "local") {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result, Status(ErrorCodes::BadValue, "Cannot create users in the local database"));
}
if (!args.hasHashedPassword && args.userName.getDB() != "$external") {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::BadValue,
"Must provide a 'pwd' field for all user documents, except those"
@@ -647,7 +647,7 @@ public:
}
if ((args.hasHashedPassword) && args.userName.getDB() == "$external") {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::BadValue,
"Cannot set the password for users defined on the '$external' "
@@ -655,7 +655,7 @@ public:
}
if (!args.hasRoles) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::BadValue, "\"createUser\" command requires a \"roles\" array"));
}
@@ -663,11 +663,12 @@ public:
#ifdef MONGO_CONFIG_SSL
if (args.userName.getDB() == "$external" && getSSLManager() &&
getSSLManager()->getSSLConfiguration().isClusterMember(args.userName.getUser())) {
- return appendCommandStatus(result,
- Status(ErrorCodes::BadValue,
- "Cannot create an x.509 user with a subjectname "
- "that would be recognized as an internal "
- "cluster member."));
+ return CommandHelpers::appendCommandStatus(
+ result,
+ Status(ErrorCodes::BadValue,
+ "Cannot create an x.509 user with a subjectname "
+ "that would be recognized as an internal "
+ "cluster member."));
}
#endif
@@ -682,7 +683,7 @@ public:
int authzVersion;
status = authzManager->getAuthorizationVersion(opCtx, &authzVersion);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
BSONObjBuilder credentialsBuilder(userObjBuilder.subobjStart("credentials"));
@@ -712,14 +713,14 @@ public:
V2UserDocumentParser parser;
status = parser.checkValidUserDocument(userObj);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
status = requireWritableAuthSchema28SCRAM(opCtx, authzManager);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
// Role existence has to be checked after acquiring the update lock
@@ -727,7 +728,7 @@ public:
BSONObj ignored;
status = authzManager->getRoleDescription(opCtx, args.roles[i], &ignored);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
}
@@ -738,7 +739,7 @@ public:
args.roles,
args.authenticationRestrictions);
status = insertPrivilegeDocument(opCtx, userObj);
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
virtual void redactForLogging(mutablebson::Document* cmdObj) {
@@ -776,19 +777,19 @@ public:
auth::CreateOrUpdateUserArgs args;
Status status = auth::parseCreateOrUpdateUserCommands(cmdObj, "updateUser", dbname, &args);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
if (!args.hasHashedPassword && !args.hasCustomData && !args.hasRoles &&
!args.authenticationRestrictions) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::BadValue,
"Must specify at least one field to update in updateUser"));
}
if (args.hasHashedPassword && args.userName.getDB() == "$external") {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::BadValue,
"Cannot set the password for users defined on the '$external' "
@@ -819,7 +820,8 @@ public:
auto swParsedRestrictions =
parseAuthenticationRestriction(*args.authenticationRestrictions);
if (!swParsedRestrictions.isOK()) {
- return appendCommandStatus(result, swParsedRestrictions.getStatus());
+ return CommandHelpers::appendCommandStatus(result,
+ swParsedRestrictions.getStatus());
}
updateSetBuilder.append("authenticationRestrictions",
@@ -847,7 +849,7 @@ public:
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
status = requireWritableAuthSchema28SCRAM(opCtx, authzManager);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
// Role existence has to be checked after acquiring the update lock
@@ -856,7 +858,7 @@ public:
BSONObj ignored;
status = authzManager->getRoleDescription(opCtx, args.roles[i], &ignored);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
}
}
@@ -871,7 +873,7 @@ public:
status = updatePrivilegeDocument(opCtx, args.userName, updateDocumentBuilder.done());
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserByName(args.userName);
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
virtual void redactForLogging(mutablebson::Document* cmdObj) {
@@ -909,7 +911,7 @@ public:
UserName userName;
Status status = auth::parseAndValidateDropUserCommand(cmdObj, dbname, &userName);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
@@ -917,7 +919,7 @@ public:
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
status = requireWritableAuthSchema28SCRAM(opCtx, authzManager);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
audit::logDropUser(Client::getCurrent(), userName);
@@ -932,11 +934,11 @@ public:
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserByName(userName);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
if (nMatched == 0) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::UserNotFound,
str::stream() << "User '" << userName.getFullName() << "' not found"));
@@ -975,7 +977,7 @@ public:
BSONObjBuilder& result) {
Status status = auth::parseAndValidateDropAllUsersFromDatabaseCommand(cmdObj, dbname);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
@@ -983,7 +985,7 @@ public:
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
status = requireWritableAuthSchema28SCRAM(opCtx, authzManager);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
audit::logDropAllUsersFromDatabase(Client::getCurrent(), dbname);
@@ -994,7 +996,7 @@ public:
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUsersFromDB(dbname);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
result.append("n", numRemoved);
@@ -1034,7 +1036,7 @@ public:
Status status = auth::parseRolePossessionManipulationCommands(
cmdObj, "grantRolesToUser", dbname, &userNameString, &roles);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
@@ -1043,14 +1045,14 @@ public:
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
status = requireWritableAuthSchema28SCRAM(opCtx, authzManager);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
UserName userName(userNameString, dbname);
unordered_set<RoleName> userRoles;
status = getCurrentUserRoles(opCtx, authzManager, userName, &userRoles);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
for (vector<RoleName>::iterator it = roles.begin(); it != roles.end(); ++it) {
@@ -1058,7 +1060,7 @@ public:
BSONObj roleDoc;
status = authzManager->getRoleDescription(opCtx, roleName, &roleDoc);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
userRoles.insert(roleName);
@@ -1070,7 +1072,7 @@ public:
opCtx, userName, BSON("$set" << BSON("roles" << newRolesBSONArray)));
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserByName(userName);
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
} cmdGrantRolesToUser;
@@ -1106,7 +1108,7 @@ public:
Status status = auth::parseRolePossessionManipulationCommands(
cmdObj, "revokeRolesFromUser", dbname, &userNameString, &roles);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
@@ -1115,14 +1117,14 @@ public:
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
status = requireWritableAuthSchema28SCRAM(opCtx, authzManager);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
UserName userName(userNameString, dbname);
unordered_set<RoleName> userRoles;
status = getCurrentUserRoles(opCtx, authzManager, userName, &userRoles);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
for (vector<RoleName>::iterator it = roles.begin(); it != roles.end(); ++it) {
@@ -1130,7 +1132,7 @@ public:
BSONObj roleDoc;
status = authzManager->getRoleDescription(opCtx, roleName, &roleDoc);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
userRoles.erase(roleName);
@@ -1142,7 +1144,7 @@ public:
opCtx, userName, BSON("$set" << BSON("roles" << newRolesBSONArray)));
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserByName(userName);
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
} cmdRevokeRolesFromUser;
@@ -1180,18 +1182,18 @@ public:
auth::UsersInfoArgs args;
Status status = auth::parseUsersInfoCommand(cmdObj, dbname, &args);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
status = requireReadableAuthSchema26Upgrade(opCtx, getGlobalAuthorizationManager());
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
if (args.allForDB &&
(args.showPrivileges ||
args.authenticationRestrictionsFormat == AuthenticationRestrictionsFormat::kShow)) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::IllegalOperation,
"Can only get privilege or restriction details on exact-match usersInfo "
@@ -1211,7 +1213,7 @@ public:
continue;
}
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
// getUserDescription always includes credentials and restrictions, which may need
@@ -1264,7 +1266,7 @@ public:
projection.done(),
[&](const BSONObj& obj) { usersArrayBuilder.append(obj); });
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
}
result.append("users", usersArrayBuilder.arr());
@@ -1302,40 +1304,40 @@ public:
auth::CreateOrUpdateRoleArgs args;
Status status = auth::parseCreateOrUpdateRoleCommands(cmdObj, "createRole", dbname, &args);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
if (args.roleName.getRole().empty()) {
- return appendCommandStatus(result,
- Status(ErrorCodes::BadValue, "Role name must be non-empty"));
+ return CommandHelpers::appendCommandStatus(
+ result, Status(ErrorCodes::BadValue, "Role name must be non-empty"));
}
if (args.roleName.getDB() == "local") {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result, Status(ErrorCodes::BadValue, "Cannot create roles in the local database"));
}
if (args.roleName.getDB() == "$external") {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::BadValue, "Cannot create roles in the $external database"));
}
if (RoleGraph::isBuiltinRole(args.roleName)) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::BadValue,
"Cannot create roles with the same name as a built-in role"));
}
if (!args.hasRoles) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::BadValue, "\"createRole\" command requires a \"roles\" array"));
}
if (!args.hasPrivileges) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::BadValue,
"\"createRole\" command requires a \"privileges\" array"));
@@ -1351,7 +1353,7 @@ public:
BSONArray privileges;
status = privilegeVectorToBSONArray(args.privileges, &privileges);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
roleObjBuilder.append("privileges", privileges);
@@ -1368,18 +1370,18 @@ public:
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
status = requireWritableAuthSchema28SCRAM(opCtx, authzManager);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
// Role existence has to be checked after acquiring the update lock
status = checkOkayToGrantRolesToRole(opCtx, args.roleName, args.roles, authzManager);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
status = checkOkayToGrantPrivilegesToRole(args.roleName, args.privileges);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
audit::logCreateRole(Client::getCurrent(),
@@ -1389,7 +1391,7 @@ public:
args.authenticationRestrictions);
status = insertRoleDocument(opCtx, roleObjBuilder.done());
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
} cmdCreateRole;
@@ -1423,11 +1425,11 @@ public:
auth::CreateOrUpdateRoleArgs args;
Status status = auth::parseCreateOrUpdateRoleCommands(cmdObj, "updateRole", dbname, &args);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
if (!args.hasPrivileges && !args.hasRoles && !args.authenticationRestrictions) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::BadValue,
"Must specify at least one field to update in updateRole"));
@@ -1440,7 +1442,7 @@ public:
BSONArray privileges;
status = privilegeVectorToBSONArray(args.privileges, &privileges);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
updateSetBuilder.append("privileges", privileges);
}
@@ -1464,27 +1466,27 @@ public:
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
status = requireWritableAuthSchema28SCRAM(opCtx, authzManager);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
// Role existence has to be checked after acquiring the update lock
BSONObj ignored;
status = authzManager->getRoleDescription(opCtx, args.roleName, &ignored);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
if (args.hasRoles) {
status = checkOkayToGrantRolesToRole(opCtx, args.roleName, args.roles, authzManager);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
}
if (args.hasPrivileges) {
status = checkOkayToGrantPrivilegesToRole(args.roleName, args.privileges);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
}
@@ -1507,7 +1509,7 @@ public:
status = updateRoleDocument(opCtx, args.roleName, updateDocumentBuilder.obj());
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserCache();
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
} cmdUpdateRole;
@@ -1543,7 +1545,7 @@ public:
Status status = auth::parseAndValidateRolePrivilegeManipulationCommands(
cmdObj, "grantPrivilegesToRole", dbname, &roleName, &privilegesToAdd);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
@@ -1552,11 +1554,11 @@ public:
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
status = requireWritableAuthSchema28SCRAM(opCtx, authzManager);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
if (RoleGraph::isBuiltinRole(roleName)) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::InvalidRoleModification,
str::stream() << roleName.getFullName()
@@ -1565,7 +1567,7 @@ public:
status = checkOkayToGrantPrivilegesToRole(roleName, privilegesToAdd);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
BSONObj roleDoc;
@@ -1575,7 +1577,7 @@ public:
AuthenticationRestrictionsFormat::kOmit,
&roleDoc);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
PrivilegeVector privileges;
@@ -1583,7 +1585,7 @@ public:
&privileges);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
for (PrivilegeVector::iterator it = privilegesToAdd.begin(); it != privilegesToAdd.end();
@@ -1596,16 +1598,16 @@ public:
mutablebson::Element setElement = updateObj.makeElementObject("$set");
status = updateObj.root().pushBack(setElement);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
mutablebson::Element privilegesElement = updateObj.makeElementArray("privileges");
status = setElement.pushBack(privilegesElement);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
status = authzManager->getBSONForPrivileges(privileges, privilegesElement);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
BSONObjBuilder updateBSONBuilder;
@@ -1616,7 +1618,7 @@ public:
status = updateRoleDocument(opCtx, roleName, updateBSONBuilder.done());
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserCache();
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
} cmdGrantPrivilegesToRole;
@@ -1652,7 +1654,7 @@ public:
Status status = auth::parseAndValidateRolePrivilegeManipulationCommands(
cmdObj, "revokePrivilegesFromRole", dbname, &roleName, &privilegesToRemove);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
@@ -1661,11 +1663,11 @@ public:
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
status = requireWritableAuthSchema28SCRAM(opCtx, authzManager);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
if (RoleGraph::isBuiltinRole(roleName)) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::InvalidRoleModification,
str::stream() << roleName.getFullName()
@@ -1679,14 +1681,14 @@ public:
AuthenticationRestrictionsFormat::kOmit,
&roleDoc);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
PrivilegeVector privileges;
status = auth::parseAndValidatePrivilegeArray(BSONArray(roleDoc["privileges"].Obj()),
&privileges);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
for (PrivilegeVector::iterator itToRm = privilegesToRemove.begin();
@@ -1709,16 +1711,16 @@ public:
mutablebson::Element setElement = updateObj.makeElementObject("$set");
status = updateObj.root().pushBack(setElement);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
mutablebson::Element privilegesElement = updateObj.makeElementArray("privileges");
status = setElement.pushBack(privilegesElement);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
status = authzManager->getBSONForPrivileges(privileges, privilegesElement);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
audit::logRevokePrivilegesFromRole(Client::getCurrent(), roleName, privilegesToRemove);
@@ -1728,7 +1730,7 @@ public:
status = updateRoleDocument(opCtx, roleName, updateBSONBuilder.done());
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserCache();
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
} cmdRevokePrivilegesFromRole;
@@ -1764,12 +1766,12 @@ public:
Status status = auth::parseRolePossessionManipulationCommands(
cmdObj, "grantRolesToRole", dbname, &roleNameString, &rolesToAdd);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
RoleName roleName(roleNameString, dbname);
if (RoleGraph::isBuiltinRole(roleName)) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::InvalidRoleModification,
str::stream() << roleName.getFullName()
@@ -1782,20 +1784,20 @@ public:
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
status = requireWritableAuthSchema28SCRAM(opCtx, authzManager);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
// Role existence has to be checked after acquiring the update lock
BSONObj roleDoc;
status = authzManager->getRoleDescription(opCtx, roleName, &roleDoc);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
// Check for cycles
status = checkOkayToGrantRolesToRole(opCtx, roleName, rolesToAdd, authzManager);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
// Add new roles to existing roles
@@ -1803,7 +1805,7 @@ public:
status = auth::parseRoleNamesFromBSONArray(
BSONArray(roleDoc["roles"].Obj()), roleName.getDB(), &directRoles);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
for (vector<RoleName>::iterator it = rolesToAdd.begin(); it != rolesToAdd.end(); ++it) {
const RoleName& roleToAdd = *it;
@@ -1817,7 +1819,7 @@ public:
opCtx, roleName, BSON("$set" << BSON("roles" << rolesVectorToBSONArray(directRoles))));
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserCache();
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
} cmdGrantRolesToRole;
@@ -1853,7 +1855,7 @@ public:
Status status = auth::parseRolePossessionManipulationCommands(
cmdObj, "revokeRolesFromRole", dbname, &roleNameString, &rolesToRemove);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
@@ -1862,12 +1864,12 @@ public:
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
status = requireWritableAuthSchema28SCRAM(opCtx, authzManager);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
RoleName roleName(roleNameString, dbname);
if (RoleGraph::isBuiltinRole(roleName)) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::InvalidRoleModification,
str::stream() << roleName.getFullName()
@@ -1877,14 +1879,14 @@ public:
BSONObj roleDoc;
status = authzManager->getRoleDescription(opCtx, roleName, &roleDoc);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
std::vector<RoleName> roles;
status = auth::parseRoleNamesFromBSONArray(
BSONArray(roleDoc["roles"].Obj()), roleName.getDB(), &roles);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
for (vector<RoleName>::const_iterator it = rolesToRemove.begin(); it != rolesToRemove.end();
@@ -1901,7 +1903,7 @@ public:
opCtx, roleName, BSON("$set" << BSON("roles" << rolesVectorToBSONArray(roles))));
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserCache();
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
} cmdRevokeRolesFromRole;
@@ -1939,7 +1941,7 @@ public:
RoleName roleName;
Status status = auth::parseDropRoleCommand(cmdObj, dbname, &roleName);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
@@ -1948,11 +1950,11 @@ public:
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
status = requireWritableAuthSchema28SCRAM(opCtx, authzManager);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
if (RoleGraph::isBuiltinRole(roleName)) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::InvalidRoleModification,
str::stream() << roleName.getFullName()
@@ -1962,7 +1964,7 @@ public:
BSONObj roleDoc;
status = authzManager->getRoleDescription(opCtx, roleName, &roleDoc);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
// Remove this role from all users
@@ -1987,12 +1989,12 @@ public:
ErrorCodes::Error code = status.code() == ErrorCodes::UnknownError
? ErrorCodes::UserModificationFailed
: status.code();
- return appendCommandStatus(result,
- Status(code,
- str::stream() << "Failed to remove role "
- << roleName.getFullName()
- << " from all users: "
- << status.reason()));
+ return CommandHelpers::appendCommandStatus(
+ result,
+ Status(code,
+ str::stream() << "Failed to remove role " << roleName.getFullName()
+ << " from all users: "
+ << status.reason()));
}
// Remove this role from all other roles
@@ -2016,7 +2018,7 @@ public:
ErrorCodes::Error code = status.code() == ErrorCodes::UnknownError
? ErrorCodes::RoleModificationFailed
: status.code();
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(code,
str::stream() << "Removed role " << roleName.getFullName()
@@ -2035,7 +2037,7 @@ public:
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserCache();
if (!status.isOK()) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(status.code(),
str::stream() << "Removed role " << roleName.getFullName()
@@ -2046,7 +2048,7 @@ public:
dassert(nMatched == 0 || nMatched == 1);
if (nMatched == 0) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::RoleNotFound,
str::stream() << "Role '" << roleName.getFullName() << "' not found"));
@@ -2090,7 +2092,7 @@ public:
BSONObjBuilder& result) {
Status status = auth::parseDropAllRolesFromDatabaseCommand(cmdObj, dbname);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
@@ -2099,7 +2101,7 @@ public:
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
status = requireWritableAuthSchema28SCRAM(opCtx, authzManager);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
// Remove these roles from all users
@@ -2119,12 +2121,12 @@ public:
ErrorCodes::Error code = status.code() == ErrorCodes::UnknownError
? ErrorCodes::UserModificationFailed
: status.code();
- return appendCommandStatus(result,
- Status(code,
- str::stream() << "Failed to remove roles from \""
- << dbname
- << "\" db from all users: "
- << status.reason()));
+ return CommandHelpers::appendCommandStatus(
+ result,
+ Status(code,
+ str::stream() << "Failed to remove roles from \"" << dbname
+ << "\" db from all users: "
+ << status.reason()));
}
// Remove these roles from all other roles
@@ -2145,12 +2147,12 @@ public:
ErrorCodes::Error code = status.code() == ErrorCodes::UnknownError
? ErrorCodes::RoleModificationFailed
: status.code();
- return appendCommandStatus(result,
- Status(code,
- str::stream() << "Failed to remove roles from \""
- << dbname
- << "\" db from all roles: "
- << status.reason()));
+ return CommandHelpers::appendCommandStatus(
+ result,
+ Status(code,
+ str::stream() << "Failed to remove roles from \"" << dbname
+ << "\" db from all roles: "
+ << status.reason()));
}
audit::logDropAllRolesFromDatabase(Client::getCurrent(), dbname);
@@ -2160,7 +2162,7 @@ public:
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserCache();
if (!status.isOK()) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(status.code(),
str::stream() << "Removed roles from \"" << dbname
@@ -2233,12 +2235,12 @@ public:
auth::RolesInfoArgs args;
Status status = auth::parseRolesInfoCommand(cmdObj, dbname, &args);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
status = requireReadableAuthSchema26Upgrade(opCtx, getGlobalAuthorizationManager());
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
if (args.allForDB) {
@@ -2251,11 +2253,11 @@ public:
args.showBuiltinRoles,
&rolesDocs);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
if (args.privilegeFormat == PrivilegeFormat::kShowAsUserFragment) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::IllegalOperation,
"Cannot get user fragment for all roles in a database"));
@@ -2274,7 +2276,7 @@ public:
args.authenticationRestrictionsFormat,
&roleDetails);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
if (args.privilegeFormat == PrivilegeFormat::kShowAsUserFragment) {
@@ -2721,11 +2723,11 @@ public:
auth::MergeAuthzCollectionsArgs args;
Status status = auth::parseMergeAuthzCollectionsCommand(cmdObj, &args);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
if (args.usersCollName.empty() && args.rolesCollName.empty()) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::BadValue,
"Must provide at least one of \"tempUsersCollection\" and "
@@ -2738,14 +2740,14 @@ public:
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
status = requireWritableAuthSchema28SCRAM(opCtx, authzManager);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
if (!args.usersCollName.empty()) {
Status status =
processUsers(opCtx, authzManager, args.usersCollName, args.db, args.drop);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
}
@@ -2753,7 +2755,7 @@ public:
Status status =
processRoles(opCtx, authzManager, args.rolesCollName, args.db, args.drop);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
}
diff --git a/src/mongo/db/commands/validate.cpp b/src/mongo/db/commands/validate.cpp
index d181021fc84..29472a20c47 100644
--- a/src/mongo/db/commands/validate.cpp
+++ b/src/mongo/db/commands/validate.cpp
@@ -100,7 +100,7 @@ public:
return true;
}
- const NamespaceString nss(parseNsCollectionRequired(dbname, cmdObj));
+ const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbname, cmdObj));
const bool full = cmdObj["full"].trueValue();
const bool scanData = cmdObj["scandata"].trueValue();
@@ -114,7 +114,7 @@ public:
}
if (!nss.isNormal() && full) {
- appendCommandStatus(
+ CommandHelpers::appendCommandStatus(
result,
{ErrorCodes::CommandFailed, "Can only run full validate on a regular collection"});
return false;
@@ -129,11 +129,12 @@ public:
Collection* collection = ctx.getDb() ? ctx.getDb()->getCollection(opCtx, nss) : NULL;
if (!collection) {
if (ctx.getDb() && ctx.getDb()->getViewCatalog()->lookup(opCtx, nss.ns())) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result, {ErrorCodes::CommandNotSupportedOnView, "Cannot validate a view"});
}
- appendCommandStatus(result, {ErrorCodes::NamespaceNotFound, "ns not found"});
+ CommandHelpers::appendCommandStatus(result,
+ {ErrorCodes::NamespaceNotFound, "ns not found"});
return false;
}
@@ -175,7 +176,7 @@ public:
opCtx->waitForConditionOrInterrupt(_validationNotifier, lock);
}
} catch (AssertionException& e) {
- appendCommandStatus(
+ CommandHelpers::appendCommandStatus(
result,
{ErrorCodes::CommandFailed,
str::stream() << "Exception during validation: " << e.toString()});
@@ -195,7 +196,7 @@ public:
Status status =
collection->validate(opCtx, level, background, std::move(collLk), &results, &result);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
CollectionCatalogEntry* catalogEntry = collection->getCatalogEntry();
diff --git a/src/mongo/db/commands_test.cpp b/src/mongo/db/commands_test.cpp
index 85fd263d4c2..b4ebf32a464 100644
--- a/src/mongo/db/commands_test.cpp
+++ b/src/mongo/db/commands_test.cpp
@@ -38,7 +38,7 @@ namespace mongo {
TEST(Commands, appendCommandStatusOK) {
BSONObjBuilder actualResult;
- Command::appendCommandStatus(actualResult, Status::OK());
+ CommandHelpers::appendCommandStatus(actualResult, Status::OK());
BSONObjBuilder expectedResult;
expectedResult.append("ok", 1.0);
@@ -49,7 +49,7 @@ TEST(Commands, appendCommandStatusOK) {
TEST(Commands, appendCommandStatusError) {
BSONObjBuilder actualResult;
const Status status(ErrorCodes::InvalidLength, "Response payload too long");
- Command::appendCommandStatus(actualResult, status);
+ CommandHelpers::appendCommandStatus(actualResult, status);
BSONObjBuilder expectedResult;
expectedResult.append("ok", 0.0);
@@ -66,7 +66,7 @@ TEST(Commands, appendCommandStatusNoOverwrite) {
actualResult.append("c", "d");
actualResult.append("ok", "not ok");
const Status status(ErrorCodes::InvalidLength, "Response payload too long");
- Command::appendCommandStatus(actualResult, status);
+ CommandHelpers::appendCommandStatus(actualResult, status);
BSONObjBuilder expectedResult;
expectedResult.append("a", "b");
@@ -82,7 +82,7 @@ TEST(Commands, appendCommandStatusNoOverwrite) {
TEST(Commands, appendCommandStatusErrorExtraInfo) {
BSONObjBuilder actualResult;
const Status status(ErrorExtraInfoExample(123), "not again!");
- Command::appendCommandStatus(actualResult, status);
+ CommandHelpers::appendCommandStatus(actualResult, status);
BSONObjBuilder expectedResult;
expectedResult.append("ok", 0.0);
@@ -109,26 +109,27 @@ public:
TEST_F(ParseNsOrUUID, FailWrongType) {
auto cmd = BSON("query" << BSON("a" << BSON("$gte" << 11)));
ASSERT_THROWS_CODE(
- Command::parseNsOrUUID(opCtx, "db", cmd), DBException, ErrorCodes::InvalidNamespace);
+ CommandHelpers::parseNsOrUUID(opCtx, "db", cmd), DBException, ErrorCodes::InvalidNamespace);
}
TEST_F(ParseNsOrUUID, FailEmptyDbName) {
auto cmd = BSON("query"
<< "coll");
ASSERT_THROWS_CODE(
- Command::parseNsOrUUID(opCtx, "", cmd), DBException, ErrorCodes::InvalidNamespace);
+ CommandHelpers::parseNsOrUUID(opCtx, "", cmd), DBException, ErrorCodes::InvalidNamespace);
}
TEST_F(ParseNsOrUUID, FailInvalidDbName) {
auto cmd = BSON("query"
<< "coll");
- ASSERT_THROWS_CODE(
- Command::parseNsOrUUID(opCtx, "test.coll", cmd), DBException, ErrorCodes::InvalidNamespace);
+ ASSERT_THROWS_CODE(CommandHelpers::parseNsOrUUID(opCtx, "test.coll", cmd),
+ DBException,
+ ErrorCodes::InvalidNamespace);
}
TEST_F(ParseNsOrUUID, ParseUnknownUUID) {
auto cmd = BSON("query" << UUID::gen());
- ASSERT_THROWS_CODE(Command::parseNsOrUUID(opCtx, "test.coll", cmd),
+ ASSERT_THROWS_CODE(CommandHelpers::parseNsOrUUID(opCtx, "test.coll", cmd),
DBException,
ErrorCodes::NamespaceNotFound);
}
@@ -136,7 +137,7 @@ TEST_F(ParseNsOrUUID, ParseUnknownUUID) {
TEST_F(ParseNsOrUUID, ParseValidColl) {
auto cmd = BSON("query"
<< "coll");
- auto parsedNss = Command::parseNsOrUUID(opCtx, "test", cmd);
+ auto parsedNss = CommandHelpers::parseNsOrUUID(opCtx, "test", cmd);
ASSERT_EQ(parsedNss, NamespaceString("test.coll"));
}
@@ -149,7 +150,7 @@ TEST_F(ParseNsOrUUID, ParseValidUUID) {
catalog.onCreateCollection(opCtx, &coll, uuid);
auto cmd = BSON("query" << uuid);
- auto parsedNss = Command::parseNsOrUUID(opCtx, "test", cmd);
+ auto parsedNss = CommandHelpers::parseNsOrUUID(opCtx, "test", cmd);
ASSERT_EQUALS(nss, parsedNss);
}
} // namespace mongo
diff --git a/src/mongo/db/dbdirectclient.cpp b/src/mongo/db/dbdirectclient.cpp
index 132a01131ce..db170ac90e8 100644
--- a/src/mongo/db/dbdirectclient.cpp
+++ b/src/mongo/db/dbdirectclient.cpp
@@ -176,7 +176,7 @@ unsigned long long DBDirectClient::count(
NamespaceString nsString(ns);
- auto result = Command::runCommandDirectly(
+ auto result = CommandHelpers::runCommandDirectly(
_opCtx, OpMsgRequest::fromDBAndBody(nsString.db(), std::move(cmdObj)));
uassertStatusOK(getStatusFromCommandResult(result));
diff --git a/src/mongo/db/exec/stagedebug_cmd.cpp b/src/mongo/db/exec/stagedebug_cmd.cpp
index 6e65094764c..d0b20203eff 100644
--- a/src/mongo/db/exec/stagedebug_cmd.cpp
+++ b/src/mongo/db/exec/stagedebug_cmd.cpp
@@ -210,12 +210,12 @@ public:
<< PlanExecutor::statestr(state)
<< ", stats: " << redact(Explain::getWinningPlanStats(exec.get()));
- return appendCommandStatus(result,
- Status(ErrorCodes::OperationFailed,
- str::stream()
- << "Executor error during "
- << "StageDebug command: "
- << WorkingSetCommon::toStatusString(obj)));
+ return CommandHelpers::appendCommandStatus(
+ result,
+ Status(ErrorCodes::OperationFailed,
+ str::stream() << "Executor error during "
+ << "StageDebug command: "
+ << WorkingSetCommon::toStatusString(obj)));
}
return true;
diff --git a/src/mongo/db/ftdc/ftdc_server.cpp b/src/mongo/db/ftdc/ftdc_server.cpp
index a4bb5572c7c..5ffb7a126e9 100644
--- a/src/mongo/db/ftdc/ftdc_server.cpp
+++ b/src/mongo/db/ftdc/ftdc_server.cpp
@@ -249,11 +249,11 @@ FTDCSimpleInternalCommandCollector::FTDCSimpleInternalCommandCollector(StringDat
BSONObj cmdObj)
: _name(name.toString()), _request(OpMsgRequest::fromDBAndBody(ns, std::move(cmdObj))) {
invariant(command == _request.getCommandName());
- invariant(Command::findCommand(command)); // Fail early if it doesn't exist.
+ invariant(CommandHelpers::findCommand(command)); // Fail early if it doesn't exist.
}
void FTDCSimpleInternalCommandCollector::collect(OperationContext* opCtx, BSONObjBuilder& builder) {
- auto result = Command::runCommandDirectly(opCtx, _request);
+ auto result = CommandHelpers::runCommandDirectly(opCtx, _request);
builder.appendElements(result);
}
diff --git a/src/mongo/db/ops/write_ops_exec.cpp b/src/mongo/db/ops/write_ops_exec.cpp
index 27e53a898c9..64464bbf189 100644
--- a/src/mongo/db/ops/write_ops_exec.cpp
+++ b/src/mongo/db/ops/write_ops_exec.cpp
@@ -255,7 +255,7 @@ SingleWriteResult createIndex(OperationContext* opCtx,
cmdBuilder << "createIndexes" << ns.coll();
cmdBuilder << "indexes" << BSON_ARRAY(spec);
- auto cmdResult = Command::runCommandDirectly(
+ auto cmdResult = CommandHelpers::runCommandDirectly(
opCtx, OpMsgRequest::fromDBAndBody(systemIndexes.db(), cmdBuilder.obj()));
uassertStatusOK(getStatusFromCommandResult(cmdResult));
diff --git a/src/mongo/db/pipeline/aggregation_request.cpp b/src/mongo/db/pipeline/aggregation_request.cpp
index e944be2f03e..8eb646a7599 100644
--- a/src/mongo/db/pipeline/aggregation_request.cpp
+++ b/src/mongo/db/pipeline/aggregation_request.cpp
@@ -214,7 +214,7 @@ StatusWith<AggregationRequest> AggregationRequest::parseFromBSON(
request.setAllowDiskUse(elem.Bool());
} else if (bypassDocumentValidationCommandOption() == fieldName) {
request.setBypassDocumentValidation(elem.trueValue());
- } else if (!Command::isGenericArgument(fieldName)) {
+ } else if (!CommandHelpers::isGenericArgument(fieldName)) {
return {ErrorCodes::FailedToParse,
str::stream() << "unrecognized field '" << elem.fieldName() << "'"};
}
diff --git a/src/mongo/db/query/getmore_request.cpp b/src/mongo/db/query/getmore_request.cpp
index 6c738c4da67..21db56e4274 100644
--- a/src/mongo/db/query/getmore_request.cpp
+++ b/src/mongo/db/query/getmore_request.cpp
@@ -158,7 +158,7 @@ StatusWith<GetMoreRequest> GetMoreRequest::parseFromBSON(const std::string& dbna
return status;
}
lastKnownCommittedOpTime = ot;
- } else if (!Command::isGenericArgument(fieldName)) {
+ } else if (!CommandHelpers::isGenericArgument(fieldName)) {
return {ErrorCodes::FailedToParse,
str::stream() << "Failed to parse: " << cmdObj << ". "
<< "Unrecognized field '"
diff --git a/src/mongo/db/query/query_request.cpp b/src/mongo/db/query/query_request.cpp
index 074bf7511fa..512a78f9cb3 100644
--- a/src/mongo/db/query/query_request.cpp
+++ b/src/mongo/db/query/query_request.cpp
@@ -382,7 +382,7 @@ StatusWith<unique_ptr<QueryRequest>> QueryRequest::parseFromFindCommand(unique_p
return status;
}
qr->_replicationTerm = el._numberLong();
- } else if (!Command::isGenericArgument(fieldName)) {
+ } else if (!CommandHelpers::isGenericArgument(fieldName)) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "Failed to parse: " << cmdObj.toString() << ". "
<< "Unrecognized field '"
diff --git a/src/mongo/db/repl/master_slave.cpp b/src/mongo/db/repl/master_slave.cpp
index 5ca2ed2f0f0..5cac46ad48c 100644
--- a/src/mongo/db/repl/master_slave.cpp
+++ b/src/mongo/db/repl/master_slave.cpp
@@ -389,13 +389,13 @@ public:
HandshakeArgs handshake;
Status status = handshake.initialize(cmdObj);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
ReplClientInfo::forClient(opCtx->getClient()).setRemoteID(handshake.getRid());
status = getGlobalReplicationCoordinator()->processHandshake(opCtx, handshake);
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
} handshakeCmd;
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 001c20af84f..91aa0e601f3 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -1616,7 +1616,7 @@ Status applyCommand_inlock(OperationContext* opCtx,
case ErrorCodes::BackgroundOperationInProgressForNamespace: {
Lock::TempRelease release(opCtx->lockState());
- Command* cmd = Command::findCommand(o.firstElement().fieldName());
+ Command* cmd = CommandHelpers::findCommand(o.firstElement().fieldName());
invariant(cmd);
BackgroundOperation::awaitNoBgOpInProgForNs(cmd->parseNs(nss.db().toString(), o));
opCtx->recoveryUnit()->abandonSnapshot();
diff --git a/src/mongo/db/repl/repl_set_commands.cpp b/src/mongo/db/repl/repl_set_commands.cpp
index ebee2285b3e..e2d28cadb6f 100644
--- a/src/mongo/db/repl/repl_set_commands.cpp
+++ b/src/mongo/db/repl/repl_set_commands.cpp
@@ -108,19 +108,19 @@ public:
long long stateVal;
auto status = bsonExtractIntegerField(cmdObj, "waitForMemberState", &stateVal);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
const auto swMemberState = MemberState::create(stateVal);
if (!swMemberState.isOK()) {
- return appendCommandStatus(result, swMemberState.getStatus());
+ return CommandHelpers::appendCommandStatus(result, swMemberState.getStatus());
}
const auto expectedState = swMemberState.getValue();
long long timeoutMillis;
status = bsonExtractIntegerField(cmdObj, "timeoutMillis", &timeoutMillis);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
Milliseconds timeout(timeoutMillis);
log() << "replSetTest: waiting " << timeout << " for member state to become "
@@ -128,23 +128,23 @@ public:
status = replCoord->waitForMemberState(expectedState, timeout);
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
} else if (cmdObj.hasElement("waitForDrainFinish")) {
long long timeoutMillis;
auto status = bsonExtractIntegerField(cmdObj, "waitForDrainFinish", &timeoutMillis);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
Milliseconds timeout(timeoutMillis);
log() << "replSetTest: waiting " << timeout << " for applier buffer to finish draining";
status = replCoord->waitForDrainFinish(timeout);
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
Status status = replCoord->checkReplEnabledForCommand(&result);
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
};
@@ -169,7 +169,7 @@ public:
BSONObjBuilder& result) {
Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
if (!status.isOK())
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
auto rbid = ReplicationProcess::get(opCtx)->getRollbackID(opCtx);
@@ -177,7 +177,7 @@ public:
fassertStatusOK(40426, rbid.getStatus());
result.append("rbid", rbid.getValue());
- return appendCommandStatus(result, Status::OK());
+ return CommandHelpers::appendCommandStatus(result, Status::OK());
}
} cmdReplSetRBID;
@@ -198,13 +198,13 @@ public:
Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
if (!status.isOK())
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
bool includeInitialSync = false;
Status initialSyncStatus =
bsonExtractBooleanFieldWithDefault(cmdObj, "initialSync", false, &includeInitialSync);
if (!initialSyncStatus.isOK()) {
- return appendCommandStatus(result, initialSyncStatus);
+ return CommandHelpers::appendCommandStatus(result, initialSyncStatus);
}
auto responseStyle = ReplicationCoordinator::ReplSetGetStatusResponseStyle::kBasic;
@@ -212,7 +212,7 @@ public:
responseStyle = ReplicationCoordinator::ReplSetGetStatusResponseStyle::kInitialSync;
}
status = getGlobalReplicationCoordinator()->processReplSetGetStatus(&result, responseStyle);
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
private:
@@ -235,7 +235,7 @@ public:
BSONObjBuilder& result) {
Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
if (!status.isOK())
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
getGlobalReplicationCoordinator()->processReplSetGetConfig(&result);
return true;
@@ -357,9 +357,10 @@ public:
std::string replSetString =
ReplicationCoordinator::get(opCtx)->getSettings().getReplSetString();
if (replSetString.empty()) {
- return appendCommandStatus(result,
- Status(ErrorCodes::NoReplicationEnabled,
- "This node was not started with the replSet option"));
+ return CommandHelpers::appendCommandStatus(
+ result,
+ Status(ErrorCodes::NoReplicationEnabled,
+ "This node was not started with the replSet option"));
}
if (configObj.isEmpty()) {
@@ -403,7 +404,7 @@ public:
Status status =
getGlobalReplicationCoordinator()->processReplSetInitiate(opCtx, configObj, &result);
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
private:
@@ -426,7 +427,7 @@ public:
BSONObjBuilder& result) {
Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
if (cmdObj["replSetReconfig"].type() != Object) {
@@ -455,7 +456,7 @@ public:
}
wuow.commit();
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
private:
@@ -483,10 +484,10 @@ public:
BSONObjBuilder& result) {
Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
if (!status.isOK())
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
int secs = (int)cmdObj.firstElement().numberInt();
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result, getGlobalReplicationCoordinator()->processReplSetFreeze(secs, &result));
}
@@ -513,7 +514,7 @@ public:
BSONObjBuilder& result) {
Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
if (!status.isOK())
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
const bool force = cmdObj["force"].trueValue();
@@ -522,7 +523,7 @@ public:
stepDownForSecs = 60;
} else if (stepDownForSecs < 0) {
status = Status(ErrorCodes::BadValue, "stepdown period must be a positive integer");
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
long long secondaryCatchUpPeriodSecs;
@@ -536,26 +537,26 @@ public:
secondaryCatchUpPeriodSecs = 10;
}
} else if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
if (secondaryCatchUpPeriodSecs < 0) {
status = Status(ErrorCodes::BadValue,
"secondaryCatchUpPeriodSecs period must be a positive or absent");
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
if (stepDownForSecs < secondaryCatchUpPeriodSecs) {
status = Status(ErrorCodes::BadValue,
"stepdown period must be longer than secondaryCatchUpPeriodSecs");
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
log() << "Attempting to step down in response to replSetStepDown command";
status = getGlobalReplicationCoordinator()->stepDown(
opCtx, force, Seconds(secondaryCatchUpPeriodSecs), Seconds(stepDownForSecs));
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
private:
@@ -577,11 +578,12 @@ public:
BSONObjBuilder& result) {
Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
if (!status.isOK())
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
- return appendCommandStatus(result,
- getGlobalReplicationCoordinator()->setMaintenanceMode(
- cmdObj["replSetMaintenance"].trueValue()));
+ return CommandHelpers::appendCommandStatus(
+ result,
+ getGlobalReplicationCoordinator()->setMaintenanceMode(
+ cmdObj["replSetMaintenance"].trueValue()));
}
private:
@@ -604,16 +606,17 @@ public:
BSONObjBuilder& result) {
Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
if (!status.isOK())
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
HostAndPort targetHostAndPort;
status = targetHostAndPort.initialize(cmdObj["replSetSyncFrom"].valuestrsafe());
if (!status.isOK())
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
- return appendCommandStatus(result,
- getGlobalReplicationCoordinator()->processReplSetSyncFrom(
- opCtx, targetHostAndPort, &result));
+ return CommandHelpers::appendCommandStatus(
+ result,
+ getGlobalReplicationCoordinator()->processReplSetSyncFrom(
+ opCtx, targetHostAndPort, &result));
}
private:
@@ -633,7 +636,7 @@ public:
Status status = replCoord->checkReplEnabledForCommand(&result);
if (!status.isOK())
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
// accept and ignore handshakes sent from old (3.0-series) nodes without erroring to
// enable mixed-version operation, since we no longer use the handshakes
@@ -661,10 +664,10 @@ public:
if (status == ErrorCodes::InvalidReplicaSetConfig) {
result.append("configVersion", configVersion);
}
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
} else {
// Parsing error from UpdatePositionArgs.
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
}
} cmdReplSetUpdatePosition;
@@ -726,7 +729,7 @@ public:
checks many things that are pre-initialization. */
if (!getGlobalReplicationCoordinator()->getSettings().usingReplSets()) {
status = Status(ErrorCodes::NoReplicationEnabled, "not running with --replSet");
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
// Process heartbeat based on the version of request. The missing fields in mismatched
@@ -743,7 +746,7 @@ public:
LOG_FOR_HEARTBEATS(2) << "Processed heartbeat from "
<< cmdObj.getStringField("from")
<< " and generated response, " << response;
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
// else: fall through to old heartbeat protocol as it is likely that
// a new node just joined the set
@@ -752,7 +755,7 @@ public:
ReplSetHeartbeatArgs args;
status = args.initialize(cmdObj);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
// ugh.
@@ -767,7 +770,7 @@ public:
LOG_FOR_HEARTBEATS(2) << "Processed heartbeat from " << cmdObj.getStringField("from")
<< " and generated response, " << response;
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
} cmdReplSetHeartbeat;
@@ -784,7 +787,7 @@ public:
BSONObjBuilder& result) {
Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
if (!status.isOK())
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
ReplicationCoordinator::ReplSetFreshArgs parsedArgs;
parsedArgs.id = cmdObj["id"].Int();
@@ -800,7 +803,7 @@ public:
parsedArgs.opTime = Timestamp(cmdObj["opTime"].Date());
status = getGlobalReplicationCoordinator()->processReplSetFresh(parsedArgs, &result);
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
} cmdReplSetFresh;
@@ -818,7 +821,7 @@ private:
Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
if (!status.isOK())
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
ReplicationCoordinator::ReplSetElectArgs parsedArgs;
parsedArgs.set = cmdObj["set"].String();
@@ -833,7 +836,7 @@ private:
parsedArgs.round = cmdObj["round"].OID();
status = getGlobalReplicationCoordinator()->processReplSetElect(parsedArgs, &result);
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
} cmdReplSetElect;
@@ -847,7 +850,7 @@ public:
BSONObjBuilder& result) {
Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
if (!status.isOK())
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
log() << "Received replSetStepUp request";
@@ -857,7 +860,7 @@ public:
log() << "replSetStepUp request failed" << causedBy(status);
}
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
private:
@@ -882,14 +885,14 @@ public:
BSONObjBuilder& result) override {
Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
if (!status.isOK())
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
log() << "Received replSetAbortPrimaryCatchUp request";
status = getGlobalReplicationCoordinator()->abortCatchupIfNeeded();
if (!status.isOK()) {
log() << "replSetAbortPrimaryCatchUp request failed" << causedBy(status);
}
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
private:
diff --git a/src/mongo/db/repl/repl_set_request_votes.cpp b/src/mongo/db/repl/repl_set_request_votes.cpp
index 903533f703e..70b7c3d9b4b 100644
--- a/src/mongo/db/repl/repl_set_request_votes.cpp
+++ b/src/mongo/db/repl/repl_set_request_votes.cpp
@@ -53,20 +53,20 @@ private:
BSONObjBuilder& result) final {
Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
ReplSetRequestVotesArgs parsedArgs;
status = parsedArgs.initialize(cmdObj);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
ReplSetRequestVotesResponse response;
status = getGlobalReplicationCoordinator()->processReplSetRequestVotes(
opCtx, parsedArgs, &response);
response.addToBSON(&result);
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
} cmdReplSetRequestVotes;
diff --git a/src/mongo/db/repl/replication_info.cpp b/src/mongo/db/repl/replication_info.cpp
index 76388589f95..7b5a5288dce 100644
--- a/src/mongo/db/repl/replication_info.cpp
+++ b/src/mongo/db/repl/replication_info.cpp
@@ -261,7 +261,7 @@ public:
BSONElement element = cmdObj[kMetadataDocumentName];
if (!element.eoo()) {
if (seenIsMaster) {
- return Command::appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::ClientMetadataCannotBeMutated,
"The client metadata document may only be sent in the first isMaster"));
@@ -270,7 +270,8 @@ public:
auto swParseClientMetadata = ClientMetadata::parse(element);
if (!swParseClientMetadata.getStatus().isOK()) {
- return Command::appendCommandStatus(result, swParseClientMetadata.getStatus());
+ return CommandHelpers::appendCommandStatus(result,
+ swParseClientMetadata.getStatus());
}
invariant(swParseClientMetadata.getValue());
diff --git a/src/mongo/db/repl/resync.cpp b/src/mongo/db/repl/resync.cpp
index a1e60050652..2b7ba1b90b7 100644
--- a/src/mongo/db/repl/resync.cpp
+++ b/src/mongo/db/repl/resync.cpp
@@ -84,7 +84,7 @@ public:
if (getGlobalReplicationCoordinator()->getSettings().usingReplSets()) {
// Resync is disabled in production on replica sets until it stabilizes (SERVER-27081).
if (!Command::testCommandsEnabled) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::OperationFailed,
"Replica sets do not support the resync command"));
@@ -96,16 +96,16 @@ public:
const MemberState memberState = replCoord->getMemberState();
if (memberState.startup()) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result, Status(ErrorCodes::NotYetInitialized, "no replication yet active"));
}
if (memberState.primary()) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result, Status(ErrorCodes::NotSecondary, "primaries cannot resync"));
}
auto status = replCoord->setFollowerMode(MemberState::RS_STARTUP2);
if (!status.isOK()) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(status.code(),
str::stream()
diff --git a/src/mongo/db/repl/rs_rollback_no_uuid.cpp b/src/mongo/db/repl/rs_rollback_no_uuid.cpp
index 256719f04cd..7b87b8dbc1c 100644
--- a/src/mongo/db/repl/rs_rollback_no_uuid.cpp
+++ b/src/mongo/db/repl/rs_rollback_no_uuid.cpp
@@ -234,7 +234,7 @@ Status rollback_internal_no_uuid::updateFixUpInfoFromLocalOplogEntry(FixUpInfo&
// Retrieves the command name, so out of {renameCollection: "test.x"} it returns
// "renameCollection".
string cmdname = first.fieldName();
- Command* cmd = Command::findCommand(cmdname.c_str());
+ Command* cmd = CommandHelpers::findCommand(cmdname.c_str());
if (cmd == NULL) {
severe() << "Rollback no such command " << first.fieldName();
return Status(ErrorCodes::UnrecoverableRollbackError,
diff --git a/src/mongo/db/s/balancer/migration_manager_test.cpp b/src/mongo/db/s/balancer/migration_manager_test.cpp
index fb4ed4905ea..82be67e10a4 100644
--- a/src/mongo/db/s/balancer/migration_manager_test.cpp
+++ b/src/mongo/db/s/balancer/migration_manager_test.cpp
@@ -283,7 +283,7 @@ void MigrationManagerTest::expectMoveChunkCommand(const ChunkType& chunk,
const ShardId& toShardId,
const Status& returnStatus) {
BSONObjBuilder resultBuilder;
- Command::appendCommandStatus(resultBuilder, returnStatus);
+ CommandHelpers::appendCommandStatus(resultBuilder, returnStatus);
expectMoveChunkCommand(chunk, toShardId, resultBuilder.obj());
}
diff --git a/src/mongo/db/s/check_sharding_index_command.cpp b/src/mongo/db/s/check_sharding_index_command.cpp
index 6955d93db83..2d1e0e674da 100644
--- a/src/mongo/db/s/check_sharding_index_command.cpp
+++ b/src/mongo/db/s/check_sharding_index_command.cpp
@@ -80,7 +80,7 @@ public:
}
virtual std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const {
- return parseNsFullyQualified(dbname, cmdObj);
+ return CommandHelpers::parseNsFullyQualified(dbname, cmdObj);
}
bool errmsgRun(OperationContext* opCtx,
@@ -200,7 +200,7 @@ public:
}
if (PlanExecutor::DEAD == state || PlanExecutor::FAILURE == state) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::OperationFailed,
str::stream() << "Executor error while checking sharding index: "
diff --git a/src/mongo/db/s/config/configsvr_add_shard_command.cpp b/src/mongo/db/s/config/configsvr_add_shard_command.cpp
index 214e109ebb3..329c24127ce 100644
--- a/src/mongo/db/s/config/configsvr_add_shard_command.cpp
+++ b/src/mongo/db/s/config/configsvr_add_shard_command.cpp
@@ -92,7 +92,7 @@ public:
const BSONObj& cmdObj,
BSONObjBuilder& result) override {
if (serverGlobalParams.clusterRole != ClusterRole::ConfigServer) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::IllegalOperation,
"_configsvrAddShard can only be run on config servers"));
@@ -104,7 +104,7 @@ public:
auto swParsedRequest = AddShardRequest::parseFromConfigCommand(cmdObj);
if (!swParsedRequest.isOK()) {
- return appendCommandStatus(result, swParsedRequest.getStatus());
+ return CommandHelpers::appendCommandStatus(result, swParsedRequest.getStatus());
}
auto parsedRequest = std::move(swParsedRequest.getValue());
@@ -113,7 +113,7 @@ public:
auto validationStatus = parsedRequest.validate(rsConfig.isLocalHostAllowed());
if (!validationStatus.isOK()) {
- return appendCommandStatus(result, validationStatus);
+ return CommandHelpers::appendCommandStatus(result, validationStatus);
}
uassert(ErrorCodes::InvalidOptions,
@@ -136,7 +136,7 @@ public:
if (!addShardResult.isOK()) {
log() << "addShard request '" << parsedRequest << "'"
<< "failed" << causedBy(addShardResult.getStatus());
- return appendCommandStatus(result, addShardResult.getStatus());
+ return CommandHelpers::appendCommandStatus(result, addShardResult.getStatus());
}
result << "shardAdded" << addShardResult.getValue();
diff --git a/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp b/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp
index 7be915ca4f4..49b01f5454f 100644
--- a/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp
+++ b/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp
@@ -114,7 +114,7 @@ public:
}
std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const override {
- return parseNsFullyQualified(dbname, cmdObj);
+ return CommandHelpers::parseNsFullyQualified(dbname, cmdObj);
}
bool run(OperationContext* opCtx,
@@ -136,7 +136,7 @@ public:
commitRequest.getFromShard(),
commitRequest.getToShard());
if (!response.isOK()) {
- return appendCommandStatus(result, response.getStatus());
+ return CommandHelpers::appendCommandStatus(result, response.getStatus());
}
result.appendElements(response.getValue());
return true;
diff --git a/src/mongo/db/s/config/configsvr_create_database_command.cpp b/src/mongo/db/s/config/configsvr_create_database_command.cpp
index 86690de4e2e..a95c6748860 100644
--- a/src/mongo/db/s/config/configsvr_create_database_command.cpp
+++ b/src/mongo/db/s/config/configsvr_create_database_command.cpp
@@ -97,7 +97,7 @@ public:
BSONObjBuilder& result) {
if (serverGlobalParams.clusterRole != ClusterRole::ConfigServer) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::IllegalOperation,
"_configsvrCreateDatabase can only be run on config servers"));
diff --git a/src/mongo/db/s/config/configsvr_drop_collection_command.cpp b/src/mongo/db/s/config/configsvr_drop_collection_command.cpp
index e186584a120..2b3987585bc 100644
--- a/src/mongo/db/s/config/configsvr_drop_collection_command.cpp
+++ b/src/mongo/db/s/config/configsvr_drop_collection_command.cpp
@@ -87,7 +87,7 @@ public:
}
std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const override {
- return parseNsFullyQualified(dbname, cmdObj);
+ return CommandHelpers::parseNsFullyQualified(dbname, cmdObj);
}
bool run(OperationContext* opCtx,
@@ -96,7 +96,7 @@ public:
BSONObjBuilder& result) override {
if (serverGlobalParams.clusterRole != ClusterRole::ConfigServer) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::IllegalOperation,
"_configsvrDropCollection can only be run on config servers"));
diff --git a/src/mongo/db/s/config/configsvr_drop_database_command.cpp b/src/mongo/db/s/config/configsvr_drop_database_command.cpp
index 0439cf35cf2..c772c34cf97 100644
--- a/src/mongo/db/s/config/configsvr_drop_database_command.cpp
+++ b/src/mongo/db/s/config/configsvr_drop_database_command.cpp
@@ -91,7 +91,7 @@ public:
const BSONObj& cmdObj,
BSONObjBuilder& result) {
if (serverGlobalParams.clusterRole != ClusterRole::ConfigServer) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::IllegalOperation,
"_configsvrDropDatabase can only be run on config servers"));
diff --git a/src/mongo/db/s/config/configsvr_enable_sharding_command.cpp b/src/mongo/db/s/config/configsvr_enable_sharding_command.cpp
index 62b1ff60606..cc60ec19b77 100644
--- a/src/mongo/db/s/config/configsvr_enable_sharding_command.cpp
+++ b/src/mongo/db/s/config/configsvr_enable_sharding_command.cpp
@@ -99,7 +99,7 @@ public:
BSONObjBuilder& result) {
if (serverGlobalParams.clusterRole != ClusterRole::ConfigServer) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::IllegalOperation,
"_configsvrEnableSharding can only be run on config servers"));
@@ -113,9 +113,10 @@ public:
NamespaceString::validDBName(dbname, NamespaceString::DollarInDbNameBehavior::Allow));
if (dbname == NamespaceString::kAdminDb || dbname == NamespaceString::kLocalDb) {
- return appendCommandStatus(result,
- {ErrorCodes::InvalidOptions,
- str::stream() << "can't shard " + dbname + " database"});
+ return CommandHelpers::appendCommandStatus(
+ result,
+ {ErrorCodes::InvalidOptions,
+ str::stream() << "can't shard " + dbname + " database"});
}
uassert(ErrorCodes::InvalidOptions,
diff --git a/src/mongo/db/s/config/configsvr_merge_chunk_command.cpp b/src/mongo/db/s/config/configsvr_merge_chunk_command.cpp
index 07d4af626f3..4f7f06c5176 100644
--- a/src/mongo/db/s/config/configsvr_merge_chunk_command.cpp
+++ b/src/mongo/db/s/config/configsvr_merge_chunk_command.cpp
@@ -95,7 +95,7 @@ public:
}
std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const override {
- return parseNsFullyQualified(dbname, cmdObj);
+ return CommandHelpers::parseNsFullyQualified(dbname, cmdObj);
}
bool run(OperationContext* opCtx,
@@ -117,7 +117,7 @@ public:
parsedRequest.getShardName());
if (!mergeChunkResult.isOK()) {
- return appendCommandStatus(result, mergeChunkResult);
+ return CommandHelpers::appendCommandStatus(result, mergeChunkResult);
}
return true;
diff --git a/src/mongo/db/s/config/configsvr_move_primary_command.cpp b/src/mongo/db/s/config/configsvr_move_primary_command.cpp
index 3d1c85fa85f..3ccde97a393 100644
--- a/src/mongo/db/s/config/configsvr_move_primary_command.cpp
+++ b/src/mongo/db/s/config/configsvr_move_primary_command.cpp
@@ -110,7 +110,7 @@ public:
BSONObjBuilder& result) override {
if (serverGlobalParams.clusterRole != ClusterRole::ConfigServer) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::IllegalOperation,
"_configsvrMovePrimary can only be run on config servers"));
@@ -127,7 +127,7 @@ public:
if (dbname == NamespaceString::kAdminDb || dbname == NamespaceString::kConfigDb ||
dbname == NamespaceString::kLocalDb) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
{ErrorCodes::InvalidOptions,
str::stream() << "Can't move primary for " << dbname << " database"});
@@ -159,7 +159,7 @@ public:
const std::string to = movePrimaryRequest.getTo().toString();
if (to.empty()) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
{ErrorCodes::InvalidOptions,
str::stream() << "you have to specify where you want to move it"});
@@ -231,7 +231,7 @@ public:
if (!worked) {
log() << "clone failed" << redact(cloneRes);
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result, {ErrorCodes::OperationFailed, str::stream() << "clone failed"});
}
diff --git a/src/mongo/db/s/config/configsvr_remove_shard_command.cpp b/src/mongo/db/s/config/configsvr_remove_shard_command.cpp
index d16fa7950c0..ae0b3e070aa 100644
--- a/src/mongo/db/s/config/configsvr_remove_shard_command.cpp
+++ b/src/mongo/db/s/config/configsvr_remove_shard_command.cpp
@@ -109,7 +109,8 @@ public:
std::string msg(str::stream() << "Could not drop shard '" << target
<< "' because it does not exist");
log() << msg;
- return appendCommandStatus(result, Status(ErrorCodes::ShardNotFound, msg));
+ return CommandHelpers::appendCommandStatus(result,
+ Status(ErrorCodes::ShardNotFound, msg));
}
const auto& shard = shardStatus.getValue();
@@ -156,7 +157,7 @@ public:
nullptr,
repl::ReadConcernLevel::kMajorityReadConcern);
if (!swChunks.isOK()) {
- return appendCommandStatus(result, swChunks.getStatus());
+ return CommandHelpers::appendCommandStatus(result, swChunks.getStatus());
}
const auto& chunks = swChunks.getValue();
diff --git a/src/mongo/db/s/config/configsvr_shard_collection_command.cpp b/src/mongo/db/s/config/configsvr_shard_collection_command.cpp
index 49d09e326e5..64cc9986f58 100644
--- a/src/mongo/db/s/config/configsvr_shard_collection_command.cpp
+++ b/src/mongo/db/s/config/configsvr_shard_collection_command.cpp
@@ -725,7 +725,7 @@ public:
}
std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const override {
- return parseNsFullyQualified(dbname, cmdObj);
+ return CommandHelpers::parseNsFullyQualified(dbname, cmdObj);
}
bool run(OperationContext* opCtx,
diff --git a/src/mongo/db/s/config/configsvr_split_chunk_command.cpp b/src/mongo/db/s/config/configsvr_split_chunk_command.cpp
index 23e243384dd..4740525ee23 100644
--- a/src/mongo/db/s/config/configsvr_split_chunk_command.cpp
+++ b/src/mongo/db/s/config/configsvr_split_chunk_command.cpp
@@ -93,7 +93,7 @@ public:
}
std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const override {
- return parseNsFullyQualified(dbname, cmdObj);
+ return CommandHelpers::parseNsFullyQualified(dbname, cmdObj);
}
bool run(OperationContext* opCtx,
diff --git a/src/mongo/db/s/flush_routing_table_cache_updates_command.cpp b/src/mongo/db/s/flush_routing_table_cache_updates_command.cpp
index 3bdf4d5556f..e24808b412c 100644
--- a/src/mongo/db/s/flush_routing_table_cache_updates_command.cpp
+++ b/src/mongo/db/s/flush_routing_table_cache_updates_command.cpp
@@ -79,7 +79,7 @@ public:
}
std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const override {
- return parseNsFullyQualified(dbname, cmdObj);
+ return CommandHelpers::parseNsFullyQualified(dbname, cmdObj);
}
Status checkAuthForCommand(Client* client,
diff --git a/src/mongo/db/s/get_shard_version_command.cpp b/src/mongo/db/s/get_shard_version_command.cpp
index 7cdfa33f98c..65ab0f0b652 100644
--- a/src/mongo/db/s/get_shard_version_command.cpp
+++ b/src/mongo/db/s/get_shard_version_command.cpp
@@ -78,7 +78,7 @@ public:
}
std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const override {
- return parseNsFullyQualified(dbname, cmdObj);
+ return CommandHelpers::parseNsFullyQualified(dbname, cmdObj);
}
bool run(OperationContext* opCtx,
diff --git a/src/mongo/db/s/merge_chunks_command.cpp b/src/mongo/db/s/merge_chunks_command.cpp
index aba860514cb..aa36b6dfdf9 100644
--- a/src/mongo/db/s/merge_chunks_command.cpp
+++ b/src/mongo/db/s/merge_chunks_command.cpp
@@ -333,7 +333,7 @@ public:
}
std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const override {
- return parseNsFullyQualified(dbname, cmdObj);
+ return CommandHelpers::parseNsFullyQualified(dbname, cmdObj);
}
bool adminOnly() const override {
@@ -404,7 +404,7 @@ public:
}
auto mergeStatus = mergeChunks(opCtx, NamespaceString(ns), minKey, maxKey, epoch);
- return appendCommandStatus(result, mergeStatus);
+ return CommandHelpers::appendCommandStatus(result, mergeStatus);
}
} mergeChunksCmd;
diff --git a/src/mongo/db/s/migration_destination_manager_legacy_commands.cpp b/src/mongo/db/s/migration_destination_manager_legacy_commands.cpp
index 1024433b5d6..007288ff96d 100644
--- a/src/mongo/db/s/migration_destination_manager_legacy_commands.cpp
+++ b/src/mongo/db/s/migration_destination_manager_legacy_commands.cpp
@@ -232,7 +232,7 @@ public:
mdm->report(result);
if (!status.isOK()) {
log() << status.reason();
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
return true;
}
@@ -281,7 +281,7 @@ public:
mdm->report(result);
if (!status.isOK()) {
log() << status.reason();
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
} else if (migrationSessionIdStatus == ErrorCodes::NoSuchKey) {
mdm->abortWithoutSessionIdCheck();
diff --git a/src/mongo/db/s/move_chunk_command.cpp b/src/mongo/db/s/move_chunk_command.cpp
index 235e053a3f8..f736f1b8d2e 100644
--- a/src/mongo/db/s/move_chunk_command.cpp
+++ b/src/mongo/db/s/move_chunk_command.cpp
@@ -103,7 +103,7 @@ public:
}
std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const override {
- return parseNsFullyQualified(dbname, cmdObj);
+ return CommandHelpers::parseNsFullyQualified(dbname, cmdObj);
}
bool run(OperationContext* opCtx,
@@ -151,7 +151,7 @@ public:
// and the 3.4 shard, which failed to set the ChunkTooBig status code.
// TODO: Remove after 3.6 is released.
result.appendBool("chunkTooBig", true);
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
uassertStatusOK(status);
diff --git a/src/mongo/db/s/split_chunk_command.cpp b/src/mongo/db/s/split_chunk_command.cpp
index bd783fd6b84..4d0060cb941 100644
--- a/src/mongo/db/s/split_chunk_command.cpp
+++ b/src/mongo/db/s/split_chunk_command.cpp
@@ -87,7 +87,7 @@ public:
}
std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const override {
- return parseNsFullyQualified(dbname, cmdObj);
+ return CommandHelpers::parseNsFullyQualified(dbname, cmdObj);
}
bool errmsgRun(OperationContext* opCtx,
@@ -126,7 +126,7 @@ public:
string shardName;
auto parseShardNameStatus = bsonExtractStringField(cmdObj, "from", &shardName);
if (!parseShardNameStatus.isOK())
- return appendCommandStatus(result, parseShardNameStatus);
+ return CommandHelpers::appendCommandStatus(result, parseShardNameStatus);
log() << "received splitChunk request: " << redact(cmdObj);
diff --git a/src/mongo/db/s/split_vector_command.cpp b/src/mongo/db/s/split_vector_command.cpp
index 5f2ec7da886..3a6fed519ec 100644
--- a/src/mongo/db/s/split_vector_command.cpp
+++ b/src/mongo/db/s/split_vector_command.cpp
@@ -80,7 +80,7 @@ public:
}
std::string parseNs(const string& dbname, const BSONObj& cmdObj) const override {
- return parseNsFullyQualified(dbname, cmdObj);
+ return CommandHelpers::CommandHelpers::parseNsFullyQualified(dbname, cmdObj);
}
bool errmsgRun(OperationContext* opCtx,
@@ -145,7 +145,7 @@ public:
maxChunkSize,
maxChunkSizeBytes);
if (!statusWithSplitKeys.isOK()) {
- return appendCommandStatus(result, statusWithSplitKeys.getStatus());
+ return CommandHelpers::appendCommandStatus(result, statusWithSplitKeys.getStatus());
}
result.append("splitKeys", statusWithSplitKeys.getValue());
diff --git a/src/mongo/db/service_entry_point_mongod.cpp b/src/mongo/db/service_entry_point_mongod.cpp
index c6acd32c214..d875e12e0a5 100644
--- a/src/mongo/db/service_entry_point_mongod.cpp
+++ b/src/mongo/db/service_entry_point_mongod.cpp
@@ -362,14 +362,14 @@ void _waitForWriteConcernAndAddToCommandResponse(OperationContext* opCtx,
WriteConcernResult res;
auto waitForWCStatus =
waitForWriteConcern(opCtx, lastOpAfterRun, opCtx->getWriteConcern(), &res);
- Command::appendCommandWCStatus(*commandResponseBuilder, waitForWCStatus, res);
+ CommandHelpers::appendCommandWCStatus(*commandResponseBuilder, waitForWCStatus, res);
// SERVER-22421: This code is to ensure error response backwards compatibility with the
// user management commands. This can be removed in 3.6.
- if (!waitForWCStatus.isOK() && Command::isUserManagementCommand(commandName)) {
+ if (!waitForWCStatus.isOK() && CommandHelpers::isUserManagementCommand(commandName)) {
BSONObj temp = commandResponseBuilder->asTempObj().copy();
commandResponseBuilder->resetToEmpty();
- Command::appendCommandStatus(*commandResponseBuilder, waitForWCStatus);
+ CommandHelpers::appendCommandStatus(*commandResponseBuilder, waitForWCStatus);
commandResponseBuilder->appendElementsUnique(temp);
}
}
@@ -457,7 +457,7 @@ bool runCommandImpl(OperationContext* opCtx,
<< redact(command->getRedactedCopyForLogging(request.body));
}
- auto result = Command::appendCommandStatus(inPlaceReplyBob, rcStatus);
+ auto result = CommandHelpers::appendCommandStatus(inPlaceReplyBob, rcStatus);
inPlaceReplyBob.doneFast();
BSONObjBuilder metadataBob;
appendReplyMetadataOnError(opCtx, &metadataBob);
@@ -468,7 +468,7 @@ bool runCommandImpl(OperationContext* opCtx,
bool result;
if (!command->supportsWriteConcern(cmd)) {
if (commandSpecifiesWriteConcern(cmd)) {
- auto result = Command::appendCommandStatus(
+ auto result = CommandHelpers::appendCommandStatus(
inPlaceReplyBob,
{ErrorCodes::InvalidOptions, "Command does not support writeConcern"});
inPlaceReplyBob.doneFast();
@@ -482,7 +482,8 @@ bool runCommandImpl(OperationContext* opCtx,
} else {
auto wcResult = extractWriteConcern(opCtx, cmd, db);
if (!wcResult.isOK()) {
- auto result = Command::appendCommandStatus(inPlaceReplyBob, wcResult.getStatus());
+ auto result =
+ CommandHelpers::appendCommandStatus(inPlaceReplyBob, wcResult.getStatus());
inPlaceReplyBob.doneFast();
BSONObjBuilder metadataBob;
appendReplyMetadataOnError(opCtx, &metadataBob);
@@ -519,7 +520,8 @@ bool runCommandImpl(OperationContext* opCtx,
if (!linearizableReadStatus.isOK()) {
inPlaceReplyBob.resetToEmpty();
- auto result = Command::appendCommandStatus(inPlaceReplyBob, linearizableReadStatus);
+ auto result =
+ CommandHelpers::appendCommandStatus(inPlaceReplyBob, linearizableReadStatus);
inPlaceReplyBob.doneFast();
BSONObjBuilder metadataBob;
appendReplyMetadataOnError(opCtx, &metadataBob);
@@ -528,7 +530,7 @@ bool runCommandImpl(OperationContext* opCtx,
}
}
- Command::appendCommandStatus(inPlaceReplyBob, result);
+ CommandHelpers::appendCommandStatus(inPlaceReplyBob, result);
auto operationTime = computeOperationTime(
opCtx, startOperationTime, repl::ReadConcernArgs::get(opCtx).getLevel());
@@ -607,7 +609,7 @@ void execCommandDatabase(OperationContext* opCtx,
cmdOptionMaxTimeMSField = element;
} else if (fieldName == "allowImplicitCollectionCreation") {
allowImplicitCollectionCreationField = element;
- } else if (fieldName == Command::kHelpFieldName) {
+ } else if (fieldName == CommandHelpers::kHelpFieldName) {
helpField = element;
} else if (fieldName == ChunkVersion::kShardVersionField) {
shardVersionFieldIdx = element;
@@ -621,7 +623,7 @@ void execCommandDatabase(OperationContext* opCtx,
topLevelFields[fieldName]++ == 0);
}
- if (Command::isHelpRequest(helpField)) {
+ if (CommandHelpers::isHelpRequest(helpField)) {
CurOp::get(opCtx)->ensureStarted();
// We disable last-error for help requests due to SERVER-11492, because config servers
// use help requests to determine which commands are database writes, and so must be
@@ -856,7 +858,7 @@ DbResponse runCommands(OperationContext* opCtx, const Message& message) {
// to avoid displaying potentially sensitive information in the logs,
// we restrict the log message to the name of the unrecognized command.
// However, the complete command object will still be echoed to the client.
- if (!(c = Command::findCommand(request.getCommandName()))) {
+ if (!(c = CommandHelpers::findCommand(request.getCommandName()))) {
globalCommandRegistry()->incrementUnknownCommands();
std::string msg = str::stream() << "no such command: '" << request.getCommandName()
<< "'";
diff --git a/src/mongo/db/sessions_collection_config_server.cpp b/src/mongo/db/sessions_collection_config_server.cpp
index 104e787981e..2ea43051aff 100644
--- a/src/mongo/db/sessions_collection_config_server.cpp
+++ b/src/mongo/db/sessions_collection_config_server.cpp
@@ -70,7 +70,7 @@ Status SessionsCollectionConfigServer::_shardCollectionIfNeeded(OperationContext
DBDirectClient client(opCtx);
BSONObj info;
if (!client.runCommand(
- "admin", Command::appendMajorityWriteConcern(shardCollection.toBSON()), info)) {
+ "admin", CommandHelpers::appendMajorityWriteConcern(shardCollection.toBSON()), info)) {
return getStatusFromCommandResult(info);
}
diff --git a/src/mongo/executor/network_test_env.cpp b/src/mongo/executor/network_test_env.cpp
index 79dbd27a992..a1b16aa192e 100644
--- a/src/mongo/executor/network_test_env.cpp
+++ b/src/mongo/executor/network_test_env.cpp
@@ -51,7 +51,7 @@ void NetworkTestEnv::onCommand(OnCommandFunction func) {
if (resultStatus.isOK()) {
BSONObjBuilder result(std::move(resultStatus.getValue()));
- Command::appendCommandStatus(result, resultStatus.getStatus());
+ CommandHelpers::appendCommandStatus(result, resultStatus.getStatus());
const RemoteCommandResponse response(result.obj(), BSONObj(), Milliseconds(1));
_mockNetwork->scheduleResponse(noi, _mockNetwork->now(), response);
@@ -74,7 +74,7 @@ void NetworkTestEnv::onCommandWithMetadata(OnCommandWithMetadataFunction func) {
if (cmdResponseStatus.isOK()) {
BSONObjBuilder result(std::move(cmdResponseStatus.data));
- Command::appendCommandStatus(result, cmdResponseStatus.status);
+ CommandHelpers::appendCommandStatus(result, cmdResponseStatus.status);
const RemoteCommandResponse response(
result.obj(), cmdResponseStatus.metadata, Milliseconds(1));
diff --git a/src/mongo/idl/idl_parser.cpp b/src/mongo/idl/idl_parser.cpp
index 888ea2894bd..a8b591a43db 100644
--- a/src/mongo/idl/idl_parser.cpp
+++ b/src/mongo/idl/idl_parser.cpp
@@ -247,7 +247,7 @@ void IDLParserErrorContext::appendGenericCommandArguments(
StringData name = element.fieldNameStringData();
// Include a passthrough field as long the IDL class has not defined it.
- if (Command::isGenericArgument(name) &&
+ if (CommandHelpers::isGenericArgument(name) &&
std::find(knownFields.begin(), knownFields.end(), name) == knownFields.end()) {
builder->append(element);
}
diff --git a/src/mongo/rpc/command_request_builder.cpp b/src/mongo/rpc/command_request_builder.cpp
index 9ab4e0ce2e4..051989015d5 100644
--- a/src/mongo/rpc/command_request_builder.cpp
+++ b/src/mongo/rpc/command_request_builder.cpp
@@ -40,7 +40,7 @@ namespace rpc {
namespace {
// OP_COMMAND put some generic arguments in the metadata and some in the body.
bool fieldGoesInMetadata(StringData commandName, StringData field) {
- if (!Command::isGenericArgument(field))
+ if (!CommandHelpers::isGenericArgument(field))
return false; // All non-generic arguments go to the body.
// For some reason this goes in the body only for a single command...
diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
index 37fc16f3b7a..851b753cff9 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
@@ -832,12 +832,12 @@ bool ShardingCatalogClientImpl::runUserManagementWriteCommand(OperationContext*
if (initialCmdHadWriteConcern) {
Status status = writeConcern.parse(writeConcernElement.Obj());
if (!status.isOK()) {
- return Command::appendCommandStatus(*result, status);
+ return CommandHelpers::appendCommandStatus(*result, status);
}
if (!(writeConcern.wNumNodes == 1 ||
writeConcern.wMode == WriteConcernOptions::kMajority)) {
- return Command::appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
*result,
{ErrorCodes::InvalidOptions,
str::stream() << "Invalid replication write concern. User management write "
@@ -876,16 +876,16 @@ bool ShardingCatalogClientImpl::runUserManagementWriteCommand(OperationContext*
Shard::RetryPolicy::kNotIdempotent);
if (!response.isOK()) {
- return Command::appendCommandStatus(*result, response.getStatus());
+ return CommandHelpers::appendCommandStatus(*result, response.getStatus());
}
if (!response.getValue().commandStatus.isOK()) {
- return Command::appendCommandStatus(*result, response.getValue().commandStatus);
+ return CommandHelpers::appendCommandStatus(*result, response.getValue().commandStatus);
}
if (!response.getValue().writeConcernStatus.isOK()) {
- return Command::appendCommandStatus(*result, response.getValue().writeConcernStatus);
+ return CommandHelpers::appendCommandStatus(*result, response.getValue().writeConcernStatus);
}
- Command::filterCommandReplyForPassthrough(response.getValue().response, result);
+ CommandHelpers::filterCommandReplyForPassthrough(response.getValue().response, result);
return true;
}
@@ -905,7 +905,7 @@ bool ShardingCatalogClientImpl::runReadCommandForTest(OperationContext* opCtx,
return resultStatus.getValue().commandStatus.isOK();
}
- return Command::appendCommandStatus(*result, resultStatus.getStatus());
+ return CommandHelpers::appendCommandStatus(*result, resultStatus.getStatus());
}
bool ShardingCatalogClientImpl::runUserManagementReadCommand(OperationContext* opCtx,
@@ -921,11 +921,11 @@ bool ShardingCatalogClientImpl::runUserManagementReadCommand(OperationContext* o
Shard::kDefaultConfigCommandTimeout,
Shard::RetryPolicy::kIdempotent);
if (resultStatus.isOK()) {
- Command::filterCommandReplyForPassthrough(resultStatus.getValue().response, result);
+ CommandHelpers::filterCommandReplyForPassthrough(resultStatus.getValue().response, result);
return resultStatus.getValue().commandStatus.isOK();
}
- return Command::appendCommandStatus(*result, resultStatus.getStatus());
+ return CommandHelpers::appendCommandStatus(*result, resultStatus.getStatus());
}
Status ShardingCatalogClientImpl::applyChunkOpsDeprecated(OperationContext* opCtx,
diff --git a/src/mongo/s/catalog/sharding_catalog_log_change_test.cpp b/src/mongo/s/catalog/sharding_catalog_log_change_test.cpp
index 020371a23c3..1dffcecf95e 100644
--- a/src/mongo/s/catalog/sharding_catalog_log_change_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_log_change_test.cpp
@@ -109,8 +109,8 @@ protected:
});
BSONObjBuilder createResponseBuilder;
- Command::appendCommandStatus(createResponseBuilder,
- Status(ErrorCodes::NamespaceExists, "coll already exists"));
+ CommandHelpers::appendCommandStatus(
+ createResponseBuilder, Status(ErrorCodes::NamespaceExists, "coll already exists"));
expectConfigCollectionCreate(
configHost, getConfigCollName(), _cappedSize, createResponseBuilder.obj());
expectConfigCollectionInsert(configHost,
@@ -146,8 +146,8 @@ protected:
});
BSONObjBuilder createResponseBuilder;
- Command::appendCommandStatus(createResponseBuilder,
- Status(ErrorCodes::ExceededTimeLimit, "operation timed out"));
+ CommandHelpers::appendCommandStatus(
+ createResponseBuilder, Status(ErrorCodes::ExceededTimeLimit, "operation timed out"));
expectConfigCollectionCreate(
configHost, getConfigCollName(), _cappedSize, createResponseBuilder.obj());
diff --git a/src/mongo/s/catalog/sharding_catalog_shard_collection_test.cpp b/src/mongo/s/catalog/sharding_catalog_shard_collection_test.cpp
index 3f0161c1783..6170ce5bcb7 100644
--- a/src/mongo/s/catalog/sharding_catalog_shard_collection_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_shard_collection_test.cpp
@@ -100,7 +100,7 @@ public:
}
BSONObjBuilder responseBuilder;
- Command::appendCommandStatus(responseBuilder, response.getStatus());
+ CommandHelpers::appendCommandStatus(responseBuilder, response.getStatus());
return responseBuilder.obj();
});
}
diff --git a/src/mongo/s/catalog/sharding_catalog_test.cpp b/src/mongo/s/catalog/sharding_catalog_test.cpp
index 970037bfdfa..5055674bc26 100644
--- a/src/mongo/s/catalog/sharding_catalog_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_test.cpp
@@ -593,8 +593,8 @@ TEST_F(ShardingCatalogClientTest, RunUserManagementWriteCommandSuccess) {
rpc::TrackingMetadata::removeTrackingData(request.metadata));
BSONObjBuilder responseBuilder;
- Command::appendCommandStatus(responseBuilder,
- Status(ErrorCodes::UserNotFound, "User test@test not found"));
+ CommandHelpers::appendCommandStatus(
+ responseBuilder, Status(ErrorCodes::UserNotFound, "User test@test not found"));
return responseBuilder.obj();
});
@@ -666,8 +666,8 @@ TEST_F(ShardingCatalogClientTest, RunUserManagementWriteCommandRewriteWriteConce
rpc::TrackingMetadata::removeTrackingData(request.metadata));
BSONObjBuilder responseBuilder;
- Command::appendCommandStatus(responseBuilder,
- Status(ErrorCodes::UserNotFound, "User test@test not found"));
+ CommandHelpers::appendCommandStatus(
+ responseBuilder, Status(ErrorCodes::UserNotFound, "User test@test not found"));
return responseBuilder.obj();
});
@@ -695,8 +695,8 @@ TEST_F(ShardingCatalogClientTest, RunUserManagementWriteCommandNotMaster) {
for (int i = 0; i < 3; ++i) {
onCommand([](const RemoteCommandRequest& request) {
BSONObjBuilder responseBuilder;
- Command::appendCommandStatus(responseBuilder,
- Status(ErrorCodes::NotMaster, "not master"));
+ CommandHelpers::appendCommandStatus(responseBuilder,
+ Status(ErrorCodes::NotMaster, "not master"));
return responseBuilder.obj();
});
}
@@ -729,7 +729,8 @@ TEST_F(ShardingCatalogClientTest, RunUserManagementWriteCommandNotMasterRetrySuc
ASSERT_EQUALS(host1, request.target);
BSONObjBuilder responseBuilder;
- Command::appendCommandStatus(responseBuilder, Status(ErrorCodes::NotMaster, "not master"));
+ CommandHelpers::appendCommandStatus(responseBuilder,
+ Status(ErrorCodes::NotMaster, "not master"));
// Ensure that when the catalog manager tries to retarget after getting the
// NotMaster response, it will get back a new target.
@@ -1233,8 +1234,8 @@ TEST_F(ShardingCatalogClientTest, ApplyChunkOpsDeprecatedSuccessfulWithCheck) {
onCommand([&](const RemoteCommandRequest& request) {
BSONObjBuilder responseBuilder;
- Command::appendCommandStatus(responseBuilder,
- Status(ErrorCodes::DuplicateKey, "precondition failed"));
+ CommandHelpers::appendCommandStatus(
+ responseBuilder, Status(ErrorCodes::DuplicateKey, "precondition failed"));
return responseBuilder.obj();
});
@@ -1281,8 +1282,8 @@ TEST_F(ShardingCatalogClientTest, ApplyChunkOpsDeprecatedFailedWithCheck) {
onCommand([&](const RemoteCommandRequest& request) {
BSONObjBuilder responseBuilder;
- Command::appendCommandStatus(responseBuilder,
- Status(ErrorCodes::NoMatchingDocument, "some error"));
+ CommandHelpers::appendCommandStatus(responseBuilder,
+ Status(ErrorCodes::NoMatchingDocument, "some error"));
return responseBuilder.obj();
});
diff --git a/src/mongo/s/commands/cluster_add_shard_cmd.cpp b/src/mongo/s/commands/cluster_add_shard_cmd.cpp
index e3a35c12e1b..1fee4f8cfb9 100644
--- a/src/mongo/s/commands/cluster_add_shard_cmd.cpp
+++ b/src/mongo/s/commands/cluster_add_shard_cmd.cpp
@@ -98,11 +98,11 @@ public:
opCtx,
kPrimaryOnlyReadPreference,
"admin",
- Command::appendMajorityWriteConcern(
- Command::appendPassthroughFields(cmdObj, parsedRequest.toCommandForConfig())),
+ CommandHelpers::appendMajorityWriteConcern(CommandHelpers::appendPassthroughFields(
+ cmdObj, parsedRequest.toCommandForConfig())),
Shard::RetryPolicy::kIdempotent));
- Command::filterCommandReplyForPassthrough(cmdResponse.response, &result);
+ CommandHelpers::filterCommandReplyForPassthrough(cmdResponse.response, &result);
return true;
}
diff --git a/src/mongo/s/commands/cluster_aggregate.cpp b/src/mongo/s/commands/cluster_aggregate.cpp
index 60fe5bfbb11..f7e55ee4965 100644
--- a/src/mongo/s/commands/cluster_aggregate.cpp
+++ b/src/mongo/s/commands/cluster_aggregate.cpp
@@ -110,7 +110,7 @@ Status appendCursorResponseToCommandResult(const ShardId& shardId,
}
// Pass the results from the remote shard into our command response.
- result->appendElementsUnique(Command::filterCommandReplyForPassthrough(cursorResponse));
+ result->appendElementsUnique(CommandHelpers::filterCommandReplyForPassthrough(cursorResponse));
return getStatusFromCommandResult(result->asTempObj());
}
@@ -322,7 +322,7 @@ Status ClusterAggregate::runAggregate(OperationContext* opCtx,
liteParsedPipeline,
std::move(pipeline),
{});
- Command::filterCommandReplyForPassthrough(cursorResponse, result);
+ CommandHelpers::filterCommandReplyForPassthrough(cursorResponse, result);
return getStatusFromCommandResult(result->asTempObj());
}
@@ -382,7 +382,7 @@ Status ClusterAggregate::runAggregate(OperationContext* opCtx,
// We don't need to storePossibleCursor or propagate writeConcern errors; an $out pipeline
// can never run on mongoS. Filter the command response and return immediately.
- Command::filterCommandReplyForPassthrough(cursorResponse, result);
+ CommandHelpers::filterCommandReplyForPassthrough(cursorResponse, result);
return getStatusFromCommandResult(result->asTempObj());
}
@@ -464,7 +464,7 @@ Status ClusterAggregate::aggPassthrough(OperationContext* opCtx,
// Format the command for the shard. This adds the 'fromMongos' field, wraps the command as an
// explain if necessary, and rewrites the result into a format safe to forward to shards.
- cmdObj = Command::filterCommandRequestForPassthrough(
+ cmdObj = CommandHelpers::filterCommandRequestForPassthrough(
PipelineS::createCommandForTargetedShards(aggRequest, cmdObj, nullptr));
auto cmdResponse = uassertStatusOK(shard->runCommandWithFixedRetryAttempts(
@@ -505,7 +505,7 @@ Status ClusterAggregate::aggPassthrough(OperationContext* opCtx,
appendWriteConcernErrorToCmdResponse(shard->getId(), wcErrorElem, *out);
}
- out->appendElementsUnique(Command::filterCommandReplyForPassthrough(result));
+ out->appendElementsUnique(CommandHelpers::filterCommandReplyForPassthrough(result));
BSONObj responseObj = out->asTempObj();
if (ResolvedView::isResolvedViewErrorResponse(responseObj)) {
diff --git a/src/mongo/s/commands/cluster_commands_helpers.cpp b/src/mongo/s/commands/cluster_commands_helpers.cpp
index 0e0a6ccba31..106a0dd84c5 100644
--- a/src/mongo/s/commands/cluster_commands_helpers.cpp
+++ b/src/mongo/s/commands/cluster_commands_helpers.cpp
@@ -301,7 +301,7 @@ bool appendRawResponses(OperationContext* opCtx,
// Convert the error status back into the form of a command result and append it as the
// raw response.
BSONObjBuilder statusObjBob;
- Command::appendCommandStatus(statusObjBob, sendStatus);
+ CommandHelpers::appendCommandStatus(statusObjBob, sendStatus);
subobj.append(shardConnStr, statusObjBob.obj());
errors.push_back(std::make_pair(shardConnStr, sendStatus));
@@ -313,7 +313,7 @@ bool appendRawResponses(OperationContext* opCtx,
auto& resObj = shardResponse.swResponse.getValue().data;
// Append the shard's raw response.
- subobj.append(shardConnStr, Command::filterCommandReplyForPassthrough(resObj));
+ subobj.append(shardConnStr, CommandHelpers::filterCommandReplyForPassthrough(resObj));
auto commandStatus = getStatusFromCommandResult(resObj);
if (!commandStatus.isOK()) {
@@ -409,7 +409,7 @@ bool appendEmptyResultSet(BSONObjBuilder& result, Status status, const std::stri
return true;
}
- return Command::appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
CachedCollectionRoutingInfo getShardedCollection(OperationContext* opCtx,
@@ -432,13 +432,13 @@ StatusWith<CachedDatabaseInfo> createShardDatabase(OperationContext* opCtx, Stri
auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
auto createDbStatus =
- uassertStatusOK(
- configShard->runCommandWithFixedRetryAttempts(
- opCtx,
- ReadPreferenceSetting(ReadPreference::PrimaryOnly),
- "admin",
- Command::appendMajorityWriteConcern(configCreateDatabaseRequest.toBSON()),
- Shard::RetryPolicy::kIdempotent))
+ uassertStatusOK(configShard->runCommandWithFixedRetryAttempts(
+ opCtx,
+ ReadPreferenceSetting(ReadPreference::PrimaryOnly),
+ "admin",
+ CommandHelpers::appendMajorityWriteConcern(
+ configCreateDatabaseRequest.toBSON()),
+ Shard::RetryPolicy::kIdempotent))
.commandStatus;
if (createDbStatus.isOK() || createDbStatus == ErrorCodes::NamespaceExists) {
diff --git a/src/mongo/s/commands/cluster_control_balancer_cmd.cpp b/src/mongo/s/commands/cluster_control_balancer_cmd.cpp
index 65d955efc8f..17f88f3d655 100644
--- a/src/mongo/s/commands/cluster_control_balancer_cmd.cpp
+++ b/src/mongo/s/commands/cluster_control_balancer_cmd.cpp
@@ -91,7 +91,7 @@ public:
uassertStatusOK(cmdResponse.commandStatus);
// Append any return value from the response, which the config server returned
- filterCommandReplyForPassthrough(cmdResponse.response, &result);
+ CommandHelpers::filterCommandReplyForPassthrough(cmdResponse.response, &result);
return true;
}
diff --git a/src/mongo/s/commands/cluster_count_cmd.cpp b/src/mongo/s/commands/cluster_count_cmd.cpp
index e88eb8c00cc..3c34593cc95 100644
--- a/src/mongo/s/commands/cluster_count_cmd.cpp
+++ b/src/mongo/s/commands/cluster_count_cmd.cpp
@@ -110,7 +110,7 @@ public:
if (status.isOK()) {
collation = collationElement.Obj();
} else if (status != ErrorCodes::NoSuchKey) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
if (cmdObj["limit"].isNumber()) {
@@ -154,7 +154,7 @@ public:
if (ErrorCodes::CommandOnShardedViewNotSupportedOnMongod == swShardResponses.getStatus()) {
if (viewDefinition.isEmpty()) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
{ErrorCodes::InternalError,
str::stream()
@@ -166,17 +166,17 @@ public:
auto countRequest = CountRequest::parseFromBSON(nss, cmdObj, false);
if (!countRequest.isOK()) {
- return appendCommandStatus(result, countRequest.getStatus());
+ return CommandHelpers::appendCommandStatus(result, countRequest.getStatus());
}
auto aggCmdOnView = countRequest.getValue().asAggregationCommand();
if (!aggCmdOnView.isOK()) {
- return appendCommandStatus(result, aggCmdOnView.getStatus());
+ return CommandHelpers::appendCommandStatus(result, aggCmdOnView.getStatus());
}
auto aggRequestOnView = AggregationRequest::parseFromBSON(nss, aggCmdOnView.getValue());
if (!aggRequestOnView.isOK()) {
- return appendCommandStatus(result, aggRequestOnView.getStatus());
+ return CommandHelpers::appendCommandStatus(result, aggRequestOnView.getStatus());
}
auto resolvedView = ResolvedView::fromBSON(viewDefinition);
@@ -184,14 +184,14 @@ public:
resolvedView.asExpandedViewAggregation(aggRequestOnView.getValue());
auto resolvedAggCmd = resolvedAggRequest.serializeToCommandObj().toBson();
- BSONObj aggResult = Command::runCommandDirectly(
+ BSONObj aggResult = CommandHelpers::runCommandDirectly(
opCtx, OpMsgRequest::fromDBAndBody(dbname, std::move(resolvedAggCmd)));
result.resetToEmpty();
ViewResponseFormatter formatter(aggResult);
auto formatStatus = formatter.appendAsCountResponse(&result);
if (!formatStatus.isOK()) {
- return appendCommandStatus(result, formatStatus);
+ return CommandHelpers::appendCommandStatus(result, formatStatus);
}
return true;
@@ -228,7 +228,7 @@ public:
auto errorWithContext = Status(status.code(),
str::stream() << "failed on: " << response.shardId
<< causedBy(status.reason()));
- return appendCommandStatus(result, errorWithContext);
+ return CommandHelpers::appendCommandStatus(result, errorWithContext);
}
shardSubTotal.doneFast();
diff --git a/src/mongo/s/commands/cluster_current_op.cpp b/src/mongo/s/commands/cluster_current_op.cpp
index 3dad4b0c6bd..4cc75ffc87b 100644
--- a/src/mongo/s/commands/cluster_current_op.cpp
+++ b/src/mongo/s/commands/cluster_current_op.cpp
@@ -85,7 +85,7 @@ private:
return status;
}
- appendCommandStatus(responseBuilder, Status::OK());
+ CommandHelpers::appendCommandStatus(responseBuilder, Status::OK());
return CursorResponse::parseFromBSON(responseBuilder.obj());
}
diff --git a/src/mongo/s/commands/cluster_db_stats_cmd.cpp b/src/mongo/s/commands/cluster_db_stats_cmd.cpp
index 54edc5c9db4..4f11dc5c146 100644
--- a/src/mongo/s/commands/cluster_db_stats_cmd.cpp
+++ b/src/mongo/s/commands/cluster_db_stats_cmd.cpp
@@ -68,13 +68,13 @@ public:
const BSONObj& cmdObj,
std::string& errmsg,
BSONObjBuilder& output) override {
- auto shardResponses = uassertStatusOK(
- scatterGatherUnversionedTargetAllShards(opCtx,
- dbName,
- boost::none,
- filterCommandRequestForPassthrough(cmdObj),
- ReadPreferenceSetting::get(opCtx),
- Shard::RetryPolicy::kIdempotent));
+ auto shardResponses = uassertStatusOK(scatterGatherUnversionedTargetAllShards(
+ opCtx,
+ dbName,
+ boost::none,
+ CommandHelpers::filterCommandRequestForPassthrough(cmdObj),
+ ReadPreferenceSetting::get(opCtx),
+ Shard::RetryPolicy::kIdempotent));
if (!appendRawResponses(opCtx, &errmsg, &output, shardResponses)) {
return false;
}
diff --git a/src/mongo/s/commands/cluster_drop_cmd.cpp b/src/mongo/s/commands/cluster_drop_cmd.cpp
index feb0d75455e..6a8081c20c2 100644
--- a/src/mongo/s/commands/cluster_drop_cmd.cpp
+++ b/src/mongo/s/commands/cluster_drop_cmd.cpp
@@ -70,7 +70,7 @@ public:
const BSONObj& cmdObj,
BSONObjBuilder& result) override {
- const NamespaceString nss(parseNsCollectionRequired(dbname, cmdObj));
+ const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbname, cmdObj));
// Invalidate the routing table cache entry for this collection so that we reload it the
// next time it is accessed, even if sending the command to the config server fails due
@@ -83,11 +83,11 @@ public:
opCtx,
ReadPreferenceSetting(ReadPreference::PrimaryOnly),
"admin",
- Command::appendMajorityWriteConcern(Command::appendPassthroughFields(
+ CommandHelpers::appendMajorityWriteConcern(CommandHelpers::appendPassthroughFields(
cmdObj, BSON("_configsvrDropCollection" << nss.toString()))),
Shard::RetryPolicy::kIdempotent));
- Command::filterCommandReplyForPassthrough(cmdResponse.response, &result);
+ CommandHelpers::filterCommandReplyForPassthrough(cmdResponse.response, &result);
return true;
}
diff --git a/src/mongo/s/commands/cluster_drop_database_cmd.cpp b/src/mongo/s/commands/cluster_drop_database_cmd.cpp
index 6285acdec75..da0380565eb 100644
--- a/src/mongo/s/commands/cluster_drop_database_cmd.cpp
+++ b/src/mongo/s/commands/cluster_drop_database_cmd.cpp
@@ -88,11 +88,11 @@ public:
opCtx,
ReadPreferenceSetting(ReadPreference::PrimaryOnly),
"admin",
- Command::appendMajorityWriteConcern(
- Command::appendPassthroughFields(cmdObj, BSON("_configsvrDropDatabase" << dbname))),
+ CommandHelpers::appendMajorityWriteConcern(CommandHelpers::appendPassthroughFields(
+ cmdObj, BSON("_configsvrDropDatabase" << dbname))),
Shard::RetryPolicy::kIdempotent));
- Command::filterCommandReplyForPassthrough(cmdResponse.response, &result);
+ CommandHelpers::filterCommandReplyForPassthrough(cmdResponse.response, &result);
return true;
}
diff --git a/src/mongo/s/commands/cluster_enable_sharding_cmd.cpp b/src/mongo/s/commands/cluster_enable_sharding_cmd.cpp
index 3ef6624984b..fd5e6b566b6 100644
--- a/src/mongo/s/commands/cluster_enable_sharding_cmd.cpp
+++ b/src/mongo/s/commands/cluster_enable_sharding_cmd.cpp
@@ -102,11 +102,11 @@ public:
opCtx,
ReadPreferenceSetting(ReadPreference::PrimaryOnly),
"admin",
- Command::appendMajorityWriteConcern(
- Command::appendPassthroughFields(cmdObj, BSON("_configsvrEnableSharding" << db))),
+ CommandHelpers::appendMajorityWriteConcern(CommandHelpers::appendPassthroughFields(
+ cmdObj, BSON("_configsvrEnableSharding" << db))),
Shard::RetryPolicy::kIdempotent));
- Command::filterCommandReplyForPassthrough(cmdResponse.response, &result);
+ CommandHelpers::filterCommandReplyForPassthrough(cmdResponse.response, &result);
return true;
}
diff --git a/src/mongo/s/commands/cluster_explain.cpp b/src/mongo/s/commands/cluster_explain.cpp
index 33dabb52f74..c18ba90aedc 100644
--- a/src/mongo/s/commands/cluster_explain.cpp
+++ b/src/mongo/s/commands/cluster_explain.cpp
@@ -118,7 +118,7 @@ std::vector<Strategy::CommandResult> ClusterExplain::downconvert(
}
// Convert the error status back into the format of a command result.
BSONObjBuilder statusObjBob;
- Command::appendCommandStatus(statusObjBob, status);
+ CommandHelpers::appendCommandStatus(statusObjBob, status);
// Get the Shard object in order to get the ConnectionString.
auto shard =
@@ -130,7 +130,7 @@ std::vector<Strategy::CommandResult> ClusterExplain::downconvert(
// static
BSONObj ClusterExplain::wrapAsExplain(const BSONObj& cmdObj, ExplainOptions::Verbosity verbosity) {
- auto filtered = Command::filterCommandRequestForPassthrough(cmdObj);
+ auto filtered = CommandHelpers::filterCommandRequestForPassthrough(cmdObj);
BSONObjBuilder out;
out.append("explain", filtered);
out.append("verbosity", ExplainOptions::verbosityString(verbosity));
@@ -138,7 +138,7 @@ BSONObj ClusterExplain::wrapAsExplain(const BSONObj& cmdObj, ExplainOptions::Ver
// Propagate all generic arguments out of the inner command since the shards will only process
// them at the top level.
for (auto elem : filtered) {
- if (Command::isGenericArgument(elem.fieldNameStringData())) {
+ if (CommandHelpers::isGenericArgument(elem.fieldNameStringData())) {
out.append(elem);
}
}
diff --git a/src/mongo/s/commands/cluster_explain_cmd.cpp b/src/mongo/s/commands/cluster_explain_cmd.cpp
index 0b7897f416c..a02f11943c6 100644
--- a/src/mongo/s/commands/cluster_explain_cmd.cpp
+++ b/src/mongo/s/commands/cluster_explain_cmd.cpp
@@ -95,7 +95,7 @@ public:
BSONObj explainObj = cmdObj.firstElement().Obj();
- Command* commToExplain = Command::findCommand(explainObj.firstElementFieldName());
+ Command* commToExplain = CommandHelpers::findCommand(explainObj.firstElementFieldName());
if (NULL == commToExplain) {
mongoutils::str::stream ss;
ss << "unknown command: " << explainObj.firstElementFieldName();
@@ -112,7 +112,7 @@ public:
BSONObjBuilder& result) {
auto verbosity = ExplainOptions::parseCmdBSON(cmdObj);
if (!verbosity.isOK()) {
- return appendCommandStatus(result, verbosity.getStatus());
+ return CommandHelpers::appendCommandStatus(result, verbosity.getStatus());
}
// This is the nested command which we are explaining. We need to propagate generic
@@ -134,7 +134,7 @@ public:
// If the argument is in both the inner and outer command, we currently let the
// inner version take precedence.
const auto name = outerElem.fieldNameStringData();
- if (Command::isGenericArgument(name) && !innerObj.hasField(name)) {
+ if (CommandHelpers::isGenericArgument(name) && !innerObj.hasField(name)) {
bob.append(outerElem);
}
}
@@ -142,9 +142,9 @@ public:
}());
const std::string cmdName = explainObj.firstElementFieldName();
- Command* commToExplain = Command::findCommand(cmdName);
+ Command* commToExplain = CommandHelpers::findCommand(cmdName);
if (!commToExplain) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status{ErrorCodes::CommandNotFound,
str::stream() << "Explain failed due to unknown command: " << cmdName});
@@ -154,7 +154,7 @@ public:
Status explainStatus =
commToExplain->explain(opCtx, dbName, explainObj, verbosity.getValue(), &result);
if (!explainStatus.isOK()) {
- return appendCommandStatus(result, explainStatus);
+ return CommandHelpers::appendCommandStatus(result, explainStatus);
}
return true;
diff --git a/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp b/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp
index a07a4ae5b88..5dbc3f994b4 100644
--- a/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp
+++ b/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp
@@ -101,7 +101,7 @@ public:
const BSONObj& cmdObj,
ExplainOptions::Verbosity verbosity,
BSONObjBuilder* out) const override {
- const NamespaceString nss(parseNsCollectionRequired(dbName, cmdObj));
+ const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbName, cmdObj));
auto routingInfo =
uassertStatusOK(Grid::get(opCtx)->catalogCache()->getCollectionRoutingInfo(opCtx, nss));
@@ -152,7 +152,7 @@ public:
const std::string& dbName,
const BSONObj& cmdObj,
BSONObjBuilder& result) override {
- const NamespaceString nss(parseNsCollectionRequired(dbName, cmdObj));
+ const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbName, cmdObj));
// findAndModify should only be creating database if upsert is true, but this would require
// that the parsing be pulled into this function.
@@ -196,7 +196,8 @@ private:
std::vector<AsyncRequestsSender::Request> requests;
requests.emplace_back(
shardId,
- appendShardVersion(filterCommandRequestForPassthrough(cmdObj), shardVersion));
+ appendShardVersion(CommandHelpers::filterCommandRequestForPassthrough(cmdObj),
+ shardVersion));
AsyncRequestsSender ars(opCtx,
Grid::get(opCtx)->getExecutorPool()->getArbitraryExecutor(),
@@ -226,7 +227,8 @@ private:
appendWriteConcernErrorToCmdResponse(shardId, wcErrorElem, *result);
}
- result->appendElementsUnique(filterCommandReplyForPassthrough(response.data));
+ result->appendElementsUnique(
+ CommandHelpers::filterCommandReplyForPassthrough(response.data));
}
} findAndModifyCmd;
diff --git a/src/mongo/s/commands/cluster_find_cmd.cpp b/src/mongo/s/commands/cluster_find_cmd.cpp
index 51d796f21fb..eb54af9caaf 100644
--- a/src/mongo/s/commands/cluster_find_cmd.cpp
+++ b/src/mongo/s/commands/cluster_find_cmd.cpp
@@ -106,7 +106,7 @@ public:
const BSONObj& cmdObj,
ExplainOptions::Verbosity verbosity,
BSONObjBuilder* out) const final {
- const NamespaceString nss(parseNsCollectionRequired(dbname, cmdObj));
+ const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbname, cmdObj));
// Parse the command BSON to a QueryRequest.
bool isExplain = true;
auto qr = QueryRequest::makeFromFindCommand(std::move(nss), cmdObj, isExplain);
@@ -142,7 +142,7 @@ public:
auto status = ClusterAggregate::runAggregate(
opCtx, nsStruct, resolvedAggRequest, resolvedAggCmd, out);
- appendCommandStatus(*out, status);
+ CommandHelpers::appendCommandStatus(*out, status);
return status;
}
@@ -156,12 +156,12 @@ public:
// We count find command as a query op.
globalOpCounters.gotQuery();
- const NamespaceString nss(parseNsCollectionRequired(dbname, cmdObj));
+ const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbname, cmdObj));
const bool isExplain = false;
auto qr = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
if (!qr.isOK()) {
- return appendCommandStatus(result, qr.getStatus());
+ return CommandHelpers::appendCommandStatus(result, qr.getStatus());
}
const boost::intrusive_ptr<ExpressionContext> expCtx;
@@ -171,7 +171,7 @@ public:
ExtensionsCallbackNoop(),
MatchExpressionParser::kAllowAllSpecialFeatures);
if (!cq.isOK()) {
- return appendCommandStatus(result, cq.getStatus());
+ return CommandHelpers::appendCommandStatus(result, cq.getStatus());
}
// Do the work to generate the first batch of results. This blocks waiting to get responses
@@ -184,13 +184,14 @@ public:
if (cursorId.getStatus() == ErrorCodes::CommandOnShardedViewNotSupportedOnMongod) {
auto aggCmdOnView = cq.getValue()->getQueryRequest().asAggregationCommand();
if (!aggCmdOnView.isOK()) {
- return appendCommandStatus(result, aggCmdOnView.getStatus());
+ return CommandHelpers::appendCommandStatus(result, aggCmdOnView.getStatus());
}
auto aggRequestOnView =
AggregationRequest::parseFromBSON(nss, aggCmdOnView.getValue());
if (!aggRequestOnView.isOK()) {
- return appendCommandStatus(result, aggRequestOnView.getStatus());
+ return CommandHelpers::appendCommandStatus(result,
+ aggRequestOnView.getStatus());
}
auto resolvedView = ResolvedView::fromBSON(viewDefinition);
@@ -208,11 +209,11 @@ public:
auto status = ClusterAggregate::runAggregate(
opCtx, nsStruct, resolvedAggRequest, resolvedAggCmd, &result);
- appendCommandStatus(result, status);
+ CommandHelpers::appendCommandStatus(result, status);
return status.isOK();
}
- return appendCommandStatus(result, cursorId.getStatus());
+ return CommandHelpers::appendCommandStatus(result, cursorId.getStatus());
}
// Build the response document.
diff --git a/src/mongo/s/commands/cluster_get_last_error_cmd.cpp b/src/mongo/s/commands/cluster_get_last_error_cmd.cpp
index 53fb6d5f7e8..e7150e595b3 100644
--- a/src/mongo/s/commands/cluster_get_last_error_cmd.cpp
+++ b/src/mongo/s/commands/cluster_get_last_error_cmd.cpp
@@ -247,8 +247,12 @@ public:
const HostOpTimeMap hostOpTimes(ClusterLastErrorInfo::get(cc())->getPrevHostOpTimes());
std::vector<LegacyWCResponse> wcResponses;
- auto status = enforceLegacyWriteConcern(
- opCtx, dbname, filterCommandRequestForPassthrough(cmdObj), hostOpTimes, &wcResponses);
+ auto status =
+ enforceLegacyWriteConcern(opCtx,
+ dbname,
+ CommandHelpers::filterCommandRequestForPassthrough(cmdObj),
+ hostOpTimes,
+ &wcResponses);
// Don't forget about our last hosts, reset the client info
ClusterLastErrorInfo::get(cc())->disableForCommand();
@@ -308,7 +312,7 @@ public:
if (numWCErrors == 1) {
// Return the single write concern error we found, err should be set or not
// from gle response
- filterCommandReplyForPassthrough(lastErrResponse->gleResponse, &result);
+ CommandHelpers::filterCommandReplyForPassthrough(lastErrResponse->gleResponse, &result);
return lastErrResponse->gleResponse["ok"].trueValue();
} else {
// Return a generic combined WC error message
@@ -318,7 +322,7 @@ public:
// Need to always return err
result.appendNull("err");
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::WriteConcernFailed, "multiple write concern errors occurred"));
}
diff --git a/src/mongo/s/commands/cluster_get_shard_version_cmd.cpp b/src/mongo/s/commands/cluster_get_shard_version_cmd.cpp
index 6758342d307..4ceb6cbcf64 100644
--- a/src/mongo/s/commands/cluster_get_shard_version_cmd.cpp
+++ b/src/mongo/s/commands/cluster_get_shard_version_cmd.cpp
@@ -76,7 +76,7 @@ public:
}
std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const override {
- return parseNsFullyQualified(dbname, cmdObj);
+ return CommandHelpers::parseNsFullyQualified(dbname, cmdObj);
}
bool run(OperationContext* opCtx,
diff --git a/src/mongo/s/commands/cluster_getmore_cmd.cpp b/src/mongo/s/commands/cluster_getmore_cmd.cpp
index dcf15e263ac..c90eeaff5ad 100644
--- a/src/mongo/s/commands/cluster_getmore_cmd.cpp
+++ b/src/mongo/s/commands/cluster_getmore_cmd.cpp
@@ -100,13 +100,13 @@ public:
StatusWith<GetMoreRequest> parseStatus = GetMoreRequest::parseFromBSON(dbname, cmdObj);
if (!parseStatus.isOK()) {
- return appendCommandStatus(result, parseStatus.getStatus());
+ return CommandHelpers::appendCommandStatus(result, parseStatus.getStatus());
}
const GetMoreRequest& request = parseStatus.getValue();
auto response = ClusterFind::runGetMore(opCtx, request);
if (!response.isOK()) {
- return appendCommandStatus(result, response.getStatus());
+ return CommandHelpers::appendCommandStatus(result, response.getStatus());
}
response.getValue().addToBSON(CursorResponse::ResponseType::SubsequentResponse, &result);
diff --git a/src/mongo/s/commands/cluster_index_filter_cmd.cpp b/src/mongo/s/commands/cluster_index_filter_cmd.cpp
index b050368e3a0..97437c8bbef 100644
--- a/src/mongo/s/commands/cluster_index_filter_cmd.cpp
+++ b/src/mongo/s/commands/cluster_index_filter_cmd.cpp
@@ -107,7 +107,7 @@ public:
const BSONObj query;
Strategy::commandOp(opCtx,
dbname,
- filterCommandRequestForPassthrough(cmdObj),
+ CommandHelpers::filterCommandRequestForPassthrough(cmdObj),
nss.ns(),
query,
CollationSpec::kSimpleSpec,
@@ -124,7 +124,7 @@ public:
// XXX: In absence of sensible aggregation strategy,
// promote first shard's result to top level.
if (i == results.begin()) {
- filterCommandReplyForPassthrough(cmdResult.result, &result);
+ CommandHelpers::filterCommandReplyForPassthrough(cmdResult.result, &result);
clusterCmdResult = cmdResult.result["ok"].trueValue();
}
diff --git a/src/mongo/s/commands/cluster_is_master_cmd.cpp b/src/mongo/s/commands/cluster_is_master_cmd.cpp
index fa4d556d922..2e36478d263 100644
--- a/src/mongo/s/commands/cluster_is_master_cmd.cpp
+++ b/src/mongo/s/commands/cluster_is_master_cmd.cpp
@@ -81,7 +81,7 @@ public:
BSONElement element = cmdObj[kMetadataDocumentName];
if (!element.eoo()) {
if (seenIsMaster) {
- return Command::appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::ClientMetadataCannotBeMutated,
"The client metadata document may only be sent in the first isMaster"));
@@ -90,7 +90,8 @@ public:
auto swParseClientMetadata = ClientMetadata::parse(element);
if (!swParseClientMetadata.getStatus().isOK()) {
- return Command::appendCommandStatus(result, swParseClientMetadata.getStatus());
+ return CommandHelpers::appendCommandStatus(result,
+ swParseClientMetadata.getStatus());
}
invariant(swParseClientMetadata.getValue());
diff --git a/src/mongo/s/commands/cluster_kill_op.cpp b/src/mongo/s/commands/cluster_kill_op.cpp
index 9ede8a5661a..bfeb02ea493 100644
--- a/src/mongo/s/commands/cluster_kill_op.cpp
+++ b/src/mongo/s/commands/cluster_kill_op.cpp
@@ -103,7 +103,7 @@ public:
// Will throw if shard id is not found
auto shardStatus = grid.shardRegistry()->getShard(opCtx, shardIdent);
if (!shardStatus.isOK()) {
- return appendCommandStatus(result, shardStatus.getStatus());
+ return CommandHelpers::appendCommandStatus(result, shardStatus.getStatus());
}
auto shard = shardStatus.getValue();
diff --git a/src/mongo/s/commands/cluster_list_databases_cmd.cpp b/src/mongo/s/commands/cluster_list_databases_cmd.cpp
index 3dbbf2f6e9b..1e0d0c16778 100644
--- a/src/mongo/s/commands/cluster_list_databases_cmd.cpp
+++ b/src/mongo/s/commands/cluster_list_databases_cmd.cpp
@@ -102,7 +102,7 @@ public:
grid.shardRegistry()->getAllShardIds(&shardIds);
shardIds.emplace_back(ShardRegistry::kConfigServerShardId);
- auto filteredCmd = filterCommandRequestForPassthrough(cmdObj);
+ auto filteredCmd = CommandHelpers::filterCommandRequestForPassthrough(cmdObj);
for (const ShardId& shardId : shardIds) {
const auto shardStatus = grid.shardRegistry()->getShard(opCtx, shardId);
diff --git a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
index 88ad5925067..8b1376ad3aa 100644
--- a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
+++ b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
@@ -93,7 +93,7 @@ BSONObj fixForShards(const BSONObj& orig,
b.append(e);
} else if (fn == "out" || fn == "finalize" || fn == "writeConcern") {
// We don't want to copy these
- } else if (!Command::isGenericArgument(fn)) {
+ } else if (!CommandHelpers::isGenericArgument(fn)) {
badShardedField = fn.toString();
return BSONObj();
}
@@ -160,7 +160,7 @@ public:
}
std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const override {
- return parseNsCollectionRequired(dbname, cmdObj).ns();
+ return CommandHelpers::parseNsCollectionRequired(dbname, cmdObj).ns();
}
bool supportsWriteConcern(const BSONObj& cmd) const override {
@@ -283,7 +283,8 @@ public:
ShardConnection conn(inputRoutingInfo.primary()->getConnString(), "");
BSONObj res;
- bool ok = conn->runCommand(dbname, filterCommandRequestForPassthrough(cmdObj), res);
+ bool ok = conn->runCommand(
+ dbname, CommandHelpers::filterCommandRequestForPassthrough(cmdObj), res);
conn.done();
if (auto wcErrorElem = res["writeConcernError"]) {
@@ -291,7 +292,7 @@ public:
inputRoutingInfo.primary()->getId(), wcErrorElem, result);
}
- result.appendElementsUnique(filterCommandReplyForPassthrough(res));
+ result.appendElementsUnique(CommandHelpers::filterCommandReplyForPassthrough(res));
return ok;
}
@@ -493,7 +494,7 @@ public:
auto scopedDistLock = catalogClient->getDistLockManager()->lock(
opCtx, outputCollNss.ns(), "mr-post-process", kNoDistLockTimeout);
if (!scopedDistLock.isOK()) {
- return appendCommandStatus(result, scopedDistLock.getStatus());
+ return CommandHelpers::appendCommandStatus(result, scopedDistLock.getStatus());
}
BSONObj finalCmdObj = finalCmd.obj();
diff --git a/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp b/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp
index db51420bf21..463203917df 100644
--- a/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp
+++ b/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp
@@ -73,7 +73,7 @@ public:
}
std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const override {
- return parseNsFullyQualified(dbname, cmdObj);
+ return CommandHelpers::parseNsFullyQualified(dbname, cmdObj);
}
bool adminOnly() const override {
@@ -167,7 +167,7 @@ public:
const auto shardStatus =
Grid::get(opCtx)->shardRegistry()->getShard(opCtx, firstChunk->getShardId());
if (!shardStatus.isOK()) {
- return appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
result,
Status(ErrorCodes::ShardNotFound,
str::stream() << "Can't find shard for chunk: " << firstChunk->toString()));
@@ -179,7 +179,7 @@ public:
Grid::get(opCtx)->catalogCache()->onStaleConfigError(std::move(routingInfo));
- filterCommandReplyForPassthrough(remoteResult, &result);
+ CommandHelpers::filterCommandReplyForPassthrough(remoteResult, &result);
return ok;
}
diff --git a/src/mongo/s/commands/cluster_move_chunk_cmd.cpp b/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
index 0af81436129..954da092625 100644
--- a/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
+++ b/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
@@ -88,7 +88,7 @@ public:
}
std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const override {
- return parseNsFullyQualified(dbname, cmdObj);
+ return CommandHelpers::parseNsFullyQualified(dbname, cmdObj);
}
bool errmsgRun(OperationContext* opCtx,
@@ -122,7 +122,8 @@ public:
<< toString
<< "' because that shard does not exist");
log() << msg;
- return appendCommandStatus(result, Status(ErrorCodes::ShardNotFound, msg));
+ return CommandHelpers::appendCommandStatus(result,
+ Status(ErrorCodes::ShardNotFound, msg));
}
const auto to = toStatus.getValue();
diff --git a/src/mongo/s/commands/cluster_move_primary_cmd.cpp b/src/mongo/s/commands/cluster_move_primary_cmd.cpp
index 4f244b9d67e..a15aed40fd4 100644
--- a/src/mongo/s/commands/cluster_move_primary_cmd.cpp
+++ b/src/mongo/s/commands/cluster_move_primary_cmd.cpp
@@ -116,11 +116,11 @@ public:
opCtx,
ReadPreferenceSetting(ReadPreference::PrimaryOnly),
"admin",
- Command::appendMajorityWriteConcern(
- Command::appendPassthroughFields(cmdObj, configMovePrimaryRequest.toBSON())),
+ CommandHelpers::appendMajorityWriteConcern(
+ CommandHelpers::appendPassthroughFields(cmdObj, configMovePrimaryRequest.toBSON())),
Shard::RetryPolicy::kIdempotent));
- Command::filterCommandReplyForPassthrough(cmdResponse.response, &result);
+ CommandHelpers::filterCommandReplyForPassthrough(cmdResponse.response, &result);
return true;
}
diff --git a/src/mongo/s/commands/cluster_multicast.cpp b/src/mongo/s/commands/cluster_multicast.cpp
index 56af56e255a..3a92b006b9b 100644
--- a/src/mongo/s/commands/cluster_multicast.cpp
+++ b/src/mongo/s/commands/cluster_multicast.cpp
@@ -132,7 +132,7 @@ public:
{
BSONObjBuilder subbob(bob.subobjStart(host.toString()));
- if (appendCommandStatus(subbob, response.status)) {
+ if (CommandHelpers::appendCommandStatus(subbob, response.status)) {
subbob.append("data", response.data);
subbob.append("metadata", response.metadata);
if (response.elapsedMillis) {
diff --git a/src/mongo/s/commands/cluster_pipeline_cmd.cpp b/src/mongo/s/commands/cluster_pipeline_cmd.cpp
index e433c6d252d..90a641dfd97 100644
--- a/src/mongo/s/commands/cluster_pipeline_cmd.cpp
+++ b/src/mongo/s/commands/cluster_pipeline_cmd.cpp
@@ -70,8 +70,8 @@ public:
const std::string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder& result) override {
- return appendCommandStatus(result,
- _runAggCommand(opCtx, dbname, cmdObj, boost::none, &result));
+ return CommandHelpers::appendCommandStatus(
+ result, _runAggCommand(opCtx, dbname, cmdObj, boost::none, &result));
}
Status explain(OperationContext* opCtx,
diff --git a/src/mongo/s/commands/cluster_plan_cache_cmd.cpp b/src/mongo/s/commands/cluster_plan_cache_cmd.cpp
index 748459fe163..76a4a7af53f 100644
--- a/src/mongo/s/commands/cluster_plan_cache_cmd.cpp
+++ b/src/mongo/s/commands/cluster_plan_cache_cmd.cpp
@@ -71,7 +71,7 @@ public:
}
std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const override {
- return parseNsCollectionRequired(dbname, cmdObj).ns();
+ return CommandHelpers::parseNsCollectionRequired(dbname, cmdObj).ns();
}
Status checkAuthForCommand(Client* client, const std::string& dbname, const BSONObj& cmdObj) {
@@ -112,7 +112,7 @@ bool ClusterPlanCacheCmd::run(OperationContext* opCtx,
const std::string& dbName,
const BSONObj& cmdObj,
BSONObjBuilder& result) {
- const NamespaceString nss(parseNsCollectionRequired(dbName, cmdObj));
+ const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbName, cmdObj));
// Dispatch command to all the shards.
// Targeted shard commands are generally data-dependent but plan cache
@@ -121,7 +121,7 @@ bool ClusterPlanCacheCmd::run(OperationContext* opCtx,
const BSONObj query;
Strategy::commandOp(opCtx,
dbName,
- filterCommandRequestForPassthrough(cmdObj),
+ CommandHelpers::filterCommandRequestForPassthrough(cmdObj),
nss.ns(),
query,
CollationSpec::kSimpleSpec,
@@ -137,7 +137,7 @@ bool ClusterPlanCacheCmd::run(OperationContext* opCtx,
// XXX: In absence of sensible aggregation strategy,
// promote first shard's result to top level.
if (i == results.begin()) {
- filterCommandReplyForPassthrough(cmdResult.result, &result);
+ CommandHelpers::filterCommandReplyForPassthrough(cmdResult.result, &result);
clusterCmdResult = cmdResult.result["ok"].trueValue();
}
diff --git a/src/mongo/s/commands/cluster_remove_shard_cmd.cpp b/src/mongo/s/commands/cluster_remove_shard_cmd.cpp
index f07e886ce86..4137466e5a5 100644
--- a/src/mongo/s/commands/cluster_remove_shard_cmd.cpp
+++ b/src/mongo/s/commands/cluster_remove_shard_cmd.cpp
@@ -84,12 +84,12 @@ public:
opCtx,
ReadPreferenceSetting(ReadPreference::PrimaryOnly),
"admin",
- Command::appendMajorityWriteConcern(
- Command::appendPassthroughFields(cmdObj, BSON("_configsvrRemoveShard" << target))),
+ CommandHelpers::appendMajorityWriteConcern(CommandHelpers::appendPassthroughFields(
+ cmdObj, BSON("_configsvrRemoveShard" << target))),
Shard::RetryPolicy::kIdempotent));
uassertStatusOK(cmdResponseStatus.commandStatus);
- Command::filterCommandReplyForPassthrough(cmdResponseStatus.response, &result);
+ CommandHelpers::filterCommandReplyForPassthrough(cmdResponseStatus.response, &result);
return true;
}
diff --git a/src/mongo/s/commands/cluster_reset_error_cmd.cpp b/src/mongo/s/commands/cluster_reset_error_cmd.cpp
index 8fbe4c7a78b..50ba801ba37 100644
--- a/src/mongo/s/commands/cluster_reset_error_cmd.cpp
+++ b/src/mongo/s/commands/cluster_reset_error_cmd.cpp
@@ -75,7 +75,8 @@ public:
BSONObj res;
// Don't care about result from shards.
- conn->runCommand(dbname, filterCommandRequestForPassthrough(cmdObj), res);
+ conn->runCommand(
+ dbname, CommandHelpers::filterCommandRequestForPassthrough(cmdObj), res);
conn.done();
}
diff --git a/src/mongo/s/commands/cluster_set_feature_compatibility_version_cmd.cpp b/src/mongo/s/commands/cluster_set_feature_compatibility_version_cmd.cpp
index a26c773f78a..4558c3a707f 100644
--- a/src/mongo/s/commands/cluster_set_feature_compatibility_version_cmd.cpp
+++ b/src/mongo/s/commands/cluster_set_feature_compatibility_version_cmd.cpp
@@ -98,7 +98,7 @@ public:
opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
dbname,
- Command::appendMajorityWriteConcern(Command::appendPassthroughFields(
+ CommandHelpers::appendMajorityWriteConcern(CommandHelpers::appendPassthroughFields(
cmdObj, BSON("setFeatureCompatibilityVersion" << version))),
Shard::RetryPolicy::kIdempotent));
uassertStatusOK(response.commandStatus);
diff --git a/src/mongo/s/commands/cluster_shard_collection_cmd.cpp b/src/mongo/s/commands/cluster_shard_collection_cmd.cpp
index 2ec79a764a0..0c9df731b06 100644
--- a/src/mongo/s/commands/cluster_shard_collection_cmd.cpp
+++ b/src/mongo/s/commands/cluster_shard_collection_cmd.cpp
@@ -101,7 +101,7 @@ public:
}
std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const override {
- return parseNsFullyQualified(dbname, cmdObj);
+ return CommandHelpers::parseNsFullyQualified(dbname, cmdObj);
}
bool run(OperationContext* opCtx,
@@ -129,11 +129,11 @@ public:
opCtx,
ReadPreferenceSetting(ReadPreference::PrimaryOnly),
"admin",
- Command::appendMajorityWriteConcern(
- Command::appendPassthroughFields(cmdObj, configShardCollRequest.toBSON())),
+ CommandHelpers::appendMajorityWriteConcern(
+ CommandHelpers::appendPassthroughFields(cmdObj, configShardCollRequest.toBSON())),
Shard::RetryPolicy::kIdempotent));
- Command::filterCommandReplyForPassthrough(cmdResponse.response, &result);
+ CommandHelpers::filterCommandReplyForPassthrough(cmdResponse.response, &result);
return true;
}
diff --git a/src/mongo/s/commands/cluster_split_cmd.cpp b/src/mongo/s/commands/cluster_split_cmd.cpp
index e29e1a7c412..44f8388cfa1 100644
--- a/src/mongo/s/commands/cluster_split_cmd.cpp
+++ b/src/mongo/s/commands/cluster_split_cmd.cpp
@@ -120,7 +120,7 @@ public:
}
std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const override {
- return parseNsFullyQualified(dbname, cmdObj);
+ return CommandHelpers::parseNsFullyQualified(dbname, cmdObj);
}
bool errmsgRun(OperationContext* opCtx,
diff --git a/src/mongo/s/commands/cluster_user_management_commands.cpp b/src/mongo/s/commands/cluster_user_management_commands.cpp
index d82af362143..24503042928 100644
--- a/src/mongo/s/commands/cluster_user_management_commands.cpp
+++ b/src/mongo/s/commands/cluster_user_management_commands.cpp
@@ -91,7 +91,11 @@ public:
const BSONObj& cmdObj,
BSONObjBuilder& result) {
return Grid::get(opCtx)->catalogClient()->runUserManagementWriteCommand(
- opCtx, getName(), dbname, filterCommandRequestForPassthrough(cmdObj), &result);
+ opCtx,
+ getName(),
+ dbname,
+ CommandHelpers::filterCommandRequestForPassthrough(cmdObj),
+ &result);
}
virtual void redactForLogging(mutablebson::Document* cmdObj) {
@@ -130,10 +134,14 @@ public:
auth::CreateOrUpdateUserArgs args;
Status status = auth::parseCreateOrUpdateUserCommands(cmdObj, getName(), dbname, &args);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
const bool ok = Grid::get(opCtx)->catalogClient()->runUserManagementWriteCommand(
- opCtx, getName(), dbname, filterCommandRequestForPassthrough(cmdObj), &result);
+ opCtx,
+ getName(),
+ dbname,
+ CommandHelpers::filterCommandRequestForPassthrough(cmdObj),
+ &result);
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
invariant(authzManager);
@@ -178,10 +186,14 @@ public:
UserName userName;
Status status = auth::parseAndValidateDropUserCommand(cmdObj, dbname, &userName);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
const bool ok = Grid::get(opCtx)->catalogClient()->runUserManagementWriteCommand(
- opCtx, getName(), dbname, filterCommandRequestForPassthrough(cmdObj), &result);
+ opCtx,
+ getName(),
+ dbname,
+ CommandHelpers::filterCommandRequestForPassthrough(cmdObj),
+ &result);
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
invariant(authzManager);
@@ -220,7 +232,11 @@ public:
const BSONObj& cmdObj,
BSONObjBuilder& result) {
const bool ok = Grid::get(opCtx)->catalogClient()->runUserManagementWriteCommand(
- opCtx, getName(), dbname, filterCommandRequestForPassthrough(cmdObj), &result);
+ opCtx,
+ getName(),
+ dbname,
+ CommandHelpers::filterCommandRequestForPassthrough(cmdObj),
+ &result);
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
invariant(authzManager);
@@ -263,10 +279,14 @@ public:
Status status = auth::parseRolePossessionManipulationCommands(
cmdObj, getName(), dbname, &userNameString, &roles);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
const bool ok = Grid::get(opCtx)->catalogClient()->runUserManagementWriteCommand(
- opCtx, getName(), dbname, filterCommandRequestForPassthrough(cmdObj), &result);
+ opCtx,
+ getName(),
+ dbname,
+ CommandHelpers::filterCommandRequestForPassthrough(cmdObj),
+ &result);
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
invariant(authzManager);
@@ -309,10 +329,14 @@ public:
Status status = auth::parseRolePossessionManipulationCommands(
cmdObj, getName(), dbname, &userNameString, &unusedRoles);
if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ return CommandHelpers::appendCommandStatus(result, status);
}
const bool ok = Grid::get(opCtx)->catalogClient()->runUserManagementWriteCommand(
- opCtx, getName(), dbname, filterCommandRequestForPassthrough(cmdObj), &result);
+ opCtx,
+ getName(),
+ dbname,
+ CommandHelpers::filterCommandRequestForPassthrough(cmdObj),
+ &result);
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
invariant(authzManager);
@@ -355,7 +379,7 @@ public:
const BSONObj& cmdObj,
BSONObjBuilder& result) {
return Grid::get(opCtx)->catalogClient()->runUserManagementReadCommand(
- opCtx, dbname, filterCommandRequestForPassthrough(cmdObj), &result);
+ opCtx, dbname, CommandHelpers::filterCommandRequestForPassthrough(cmdObj), &result);
}
} cmdUsersInfo;
@@ -388,7 +412,11 @@ public:
const BSONObj& cmdObj,
BSONObjBuilder& result) {
return Grid::get(opCtx)->catalogClient()->runUserManagementWriteCommand(
- opCtx, getName(), dbname, filterCommandRequestForPassthrough(cmdObj), &result);
+ opCtx,
+ getName(),
+ dbname,
+ CommandHelpers::filterCommandRequestForPassthrough(cmdObj),
+ &result);
}
} cmdCreateRole;
@@ -421,7 +449,11 @@ public:
const BSONObj& cmdObj,
BSONObjBuilder& result) {
const bool ok = Grid::get(opCtx)->catalogClient()->runUserManagementWriteCommand(
- opCtx, getName(), dbname, filterCommandRequestForPassthrough(cmdObj), &result);
+ opCtx,
+ getName(),
+ dbname,
+ CommandHelpers::filterCommandRequestForPassthrough(cmdObj),
+ &result);
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
invariant(authzManager);
@@ -460,7 +492,11 @@ public:
const BSONObj& cmdObj,
BSONObjBuilder& result) {
const bool ok = Grid::get(opCtx)->catalogClient()->runUserManagementWriteCommand(
- opCtx, getName(), dbname, filterCommandRequestForPassthrough(cmdObj), &result);
+ opCtx,
+ getName(),
+ dbname,
+ CommandHelpers::filterCommandRequestForPassthrough(cmdObj),
+ &result);
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
invariant(authzManager);
@@ -499,7 +535,11 @@ public:
const BSONObj& cmdObj,
BSONObjBuilder& result) {
const bool ok = Grid::get(opCtx)->catalogClient()->runUserManagementWriteCommand(
- opCtx, getName(), dbname, filterCommandRequestForPassthrough(cmdObj), &result);
+ opCtx,
+ getName(),
+ dbname,
+ CommandHelpers::filterCommandRequestForPassthrough(cmdObj),
+ &result);
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
invariant(authzManager);
@@ -538,7 +578,11 @@ public:
const BSONObj& cmdObj,
BSONObjBuilder& result) {
const bool ok = Grid::get(opCtx)->catalogClient()->runUserManagementWriteCommand(
- opCtx, getName(), dbname, filterCommandRequestForPassthrough(cmdObj), &result);
+ opCtx,
+ getName(),
+ dbname,
+ CommandHelpers::filterCommandRequestForPassthrough(cmdObj),
+ &result);
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
invariant(authzManager);
@@ -577,7 +621,11 @@ public:
const BSONObj& cmdObj,
BSONObjBuilder& result) {
const bool ok = Grid::get(opCtx)->catalogClient()->runUserManagementWriteCommand(
- opCtx, getName(), dbname, filterCommandRequestForPassthrough(cmdObj), &result);
+ opCtx,
+ getName(),
+ dbname,
+ CommandHelpers::filterCommandRequestForPassthrough(cmdObj),
+ &result);
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
invariant(authzManager);
@@ -619,7 +667,11 @@ public:
const BSONObj& cmdObj,
BSONObjBuilder& result) {
const bool ok = Grid::get(opCtx)->catalogClient()->runUserManagementWriteCommand(
- opCtx, getName(), dbname, filterCommandRequestForPassthrough(cmdObj), &result);
+ opCtx,
+ getName(),
+ dbname,
+ CommandHelpers::filterCommandRequestForPassthrough(cmdObj),
+ &result);
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
invariant(authzManager);
@@ -662,7 +714,11 @@ public:
const BSONObj& cmdObj,
BSONObjBuilder& result) {
const bool ok = Grid::get(opCtx)->catalogClient()->runUserManagementWriteCommand(
- opCtx, getName(), dbname, filterCommandRequestForPassthrough(cmdObj), &result);
+ opCtx,
+ getName(),
+ dbname,
+ CommandHelpers::filterCommandRequestForPassthrough(cmdObj),
+ &result);
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
invariant(authzManager);
@@ -705,7 +761,7 @@ public:
const BSONObj& cmdObj,
BSONObjBuilder& result) {
return Grid::get(opCtx)->catalogClient()->runUserManagementReadCommand(
- opCtx, dbname, filterCommandRequestForPassthrough(cmdObj), &result);
+ opCtx, dbname, CommandHelpers::filterCommandRequestForPassthrough(cmdObj), &result);
}
} cmdRolesInfo;
@@ -791,7 +847,11 @@ public:
const BSONObj& cmdObj,
BSONObjBuilder& result) {
return Grid::get(opCtx)->catalogClient()->runUserManagementWriteCommand(
- opCtx, getName(), dbname, filterCommandRequestForPassthrough(cmdObj), &result);
+ opCtx,
+ getName(),
+ dbname,
+ CommandHelpers::filterCommandRequestForPassthrough(cmdObj),
+ &result);
}
} cmdMergeAuthzCollections;
diff --git a/src/mongo/s/commands/commands_public.cpp b/src/mongo/s/commands/commands_public.cpp
index cfedc3a8f2e..cbc8d772b79 100644
--- a/src/mongo/s/commands/commands_public.cpp
+++ b/src/mongo/s/commands/commands_public.cpp
@@ -90,15 +90,15 @@ bool cursorCommandPassthrough(OperationContext* opCtx,
BSONObjBuilder* out) {
const auto shardStatus = Grid::get(opCtx)->shardRegistry()->getShard(opCtx, shardId);
if (!shardStatus.isOK()) {
- return Command::appendCommandStatus(*out, shardStatus.getStatus());
+ return CommandHelpers::appendCommandStatus(*out, shardStatus.getStatus());
}
const auto shard = shardStatus.getValue();
ScopedDbConnection conn(shard->getConnString());
auto cursor = conn->query(str::stream() << dbName << ".$cmd",
- Command::filterCommandRequestForPassthrough(cmdObj),
+ CommandHelpers::filterCommandRequestForPassthrough(cmdObj),
/* nToReturn=*/-1);
if (!cursor || !cursor->more()) {
- return Command::appendCommandStatus(
+ return CommandHelpers::appendCommandStatus(
*out, {ErrorCodes::OperationFailed, "failed to read command response from shard"});
}
BSONObj response = cursor->nextSafe().getOwned();
@@ -108,7 +108,7 @@ bool cursorCommandPassthrough(OperationContext* opCtx,
throw StaleConfigException("command failed because of stale config", response);
}
if (!status.isOK()) {
- return Command::appendCommandStatus(*out, status);
+ return CommandHelpers::appendCommandStatus(*out, status);
}
StatusWith<BSONObj> transformedResponse =
@@ -120,9 +120,9 @@ bool cursorCommandPassthrough(OperationContext* opCtx,
Grid::get(opCtx)->getExecutorPool()->getArbitraryExecutor(),
Grid::get(opCtx)->getCursorManager());
if (!transformedResponse.isOK()) {
- return Command::appendCommandStatus(*out, transformedResponse.getStatus());
+ return CommandHelpers::appendCommandStatus(*out, transformedResponse.getStatus());
}
- Command::filterCommandReplyForPassthrough(transformedResponse.getValue(), out);
+ CommandHelpers::filterCommandReplyForPassthrough(transformedResponse.getValue(), out);
return true;
}
@@ -177,7 +177,8 @@ protected:
ShardConnection conn(shard->getConnString(), "");
BSONObj res;
- bool ok = conn->runCommand(db, filterCommandRequestForPassthrough(cmdObj), res);
+ bool ok =
+ conn->runCommand(db, CommandHelpers::filterCommandRequestForPassthrough(cmdObj), res);
conn.done();
// First append the properly constructed writeConcernError. It will then be skipped
@@ -185,7 +186,7 @@ protected:
if (auto wcErrorElem = res["writeConcernError"]) {
appendWriteConcernErrorToCmdResponse(shard->getId(), wcErrorElem, result);
}
- result.appendElementsUnique(filterCommandReplyForPassthrough(res));
+ result.appendElementsUnique(CommandHelpers::filterCommandReplyForPassthrough(res));
return ok;
}
};
@@ -212,7 +213,7 @@ protected:
// Here, we first filter the command before appending an UNSHARDED shardVersion, because
// "shardVersion" is one of the fields that gets filtered out.
- BSONObj filteredCmdObj(Command::filterCommandRequestForPassthrough(cmdObj));
+ BSONObj filteredCmdObj(CommandHelpers::filterCommandRequestForPassthrough(cmdObj));
BSONObj filteredCmdObjWithVersion(
appendShardVersion(filteredCmdObj, ChunkVersion::UNSHARDED()));
@@ -234,7 +235,7 @@ protected:
primaryShardId, commandResponse.response["writeConcernError"], result);
}
result.appendElementsUnique(
- filterCommandReplyForPassthrough(std::move(commandResponse.response)));
+ CommandHelpers::filterCommandReplyForPassthrough(std::move(commandResponse.response)));
return true;
}
@@ -271,16 +272,16 @@ public:
const BSONObj& cmdObj,
std::string& errmsg,
BSONObjBuilder& output) override {
- const NamespaceString nss(parseNsCollectionRequired(dbName, cmdObj));
+ const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbName, cmdObj));
LOG(1) << "dropIndexes: " << nss << " cmd:" << redact(cmdObj);
- auto shardResponses = uassertStatusOK(
- scatterGatherOnlyVersionIfUnsharded(opCtx,
- dbName,
- nss,
- filterCommandRequestForPassthrough(cmdObj),
- ReadPreferenceSetting::get(opCtx),
- Shard::RetryPolicy::kNotIdempotent));
+ auto shardResponses = uassertStatusOK(scatterGatherOnlyVersionIfUnsharded(
+ opCtx,
+ dbName,
+ nss,
+ CommandHelpers::filterCommandRequestForPassthrough(cmdObj),
+ ReadPreferenceSetting::get(opCtx),
+ Shard::RetryPolicy::kNotIdempotent));
return appendRawResponses(
opCtx, &errmsg, &output, std::move(shardResponses), {ErrorCodes::NamespaceNotFound});
}
@@ -315,18 +316,18 @@ public:
const BSONObj& cmdObj,
std::string& errmsg,
BSONObjBuilder& output) override {
- const NamespaceString nss(parseNsCollectionRequired(dbName, cmdObj));
+ const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbName, cmdObj));
LOG(1) << "createIndexes: " << nss << " cmd:" << redact(cmdObj);
uassertStatusOK(createShardDatabase(opCtx, dbName));
- auto shardResponses = uassertStatusOK(
- scatterGatherOnlyVersionIfUnsharded(opCtx,
- dbName,
- nss,
- filterCommandRequestForPassthrough(cmdObj),
- ReadPreferenceSetting::get(opCtx),
- Shard::RetryPolicy::kNoRetry));
+ auto shardResponses = uassertStatusOK(scatterGatherOnlyVersionIfUnsharded(
+ opCtx,
+ dbName,
+ nss,
+ CommandHelpers::filterCommandRequestForPassthrough(cmdObj),
+ ReadPreferenceSetting::get(opCtx),
+ Shard::RetryPolicy::kNoRetry));
return appendRawResponses(opCtx,
&errmsg,
&output,
@@ -364,16 +365,16 @@ public:
const BSONObj& cmdObj,
std::string& errmsg,
BSONObjBuilder& output) override {
- const NamespaceString nss(parseNsCollectionRequired(dbName, cmdObj));
+ const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbName, cmdObj));
LOG(1) << "reIndex: " << nss << " cmd:" << redact(cmdObj);
- auto shardResponses = uassertStatusOK(
- scatterGatherOnlyVersionIfUnsharded(opCtx,
- dbName,
- nss,
- filterCommandRequestForPassthrough(cmdObj),
- ReadPreferenceSetting::get(opCtx),
- Shard::RetryPolicy::kNoRetry));
+ auto shardResponses = uassertStatusOK(scatterGatherOnlyVersionIfUnsharded(
+ opCtx,
+ dbName,
+ nss,
+ CommandHelpers::filterCommandRequestForPassthrough(cmdObj),
+ ReadPreferenceSetting::get(opCtx),
+ Shard::RetryPolicy::kNoRetry));
return appendRawResponses(
opCtx, &errmsg, &output, std::move(shardResponses), {ErrorCodes::NamespaceNotFound});
}
@@ -394,7 +395,7 @@ public:
virtual Status checkAuthForCommand(Client* client,
const std::string& dbname,
const BSONObj& cmdObj) {
- const NamespaceString nss(parseNsCollectionRequired(dbname, cmdObj));
+ const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbname, cmdObj));
return AuthorizationSession::get(client)->checkAuthForCollMod(nss, cmdObj, true);
}
@@ -407,16 +408,16 @@ public:
const BSONObj& cmdObj,
std::string& errmsg,
BSONObjBuilder& output) override {
- const NamespaceString nss(parseNsCollectionRequired(dbName, cmdObj));
+ const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbName, cmdObj));
LOG(1) << "collMod: " << nss << " cmd:" << redact(cmdObj);
- auto shardResponses = uassertStatusOK(
- scatterGatherOnlyVersionIfUnsharded(opCtx,
- dbName,
- nss,
- filterCommandRequestForPassthrough(cmdObj),
- ReadPreferenceSetting::get(opCtx),
- Shard::RetryPolicy::kNoRetry));
+ auto shardResponses = uassertStatusOK(scatterGatherOnlyVersionIfUnsharded(
+ opCtx,
+ dbName,
+ nss,
+ CommandHelpers::filterCommandRequestForPassthrough(cmdObj),
+ ReadPreferenceSetting::get(opCtx),
+ Shard::RetryPolicy::kNoRetry));
return appendRawResponses(
opCtx, &errmsg, &output, std::move(shardResponses), {ErrorCodes::NamespaceNotFound});
}
@@ -442,7 +443,7 @@ public:
const string& dbName,
const BSONObj& cmdObj,
BSONObjBuilder& output) {
- const NamespaceString nss(parseNsCollectionRequired(dbName, cmdObj));
+ const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbName, cmdObj));
auto routingInfo =
uassertStatusOK(Grid::get(opCtx)->catalogCache()->getCollectionRoutingInfo(opCtx, nss));
@@ -456,7 +457,7 @@ public:
const BSONObj query;
Strategy::commandOp(opCtx,
dbName,
- filterCommandRequestForPassthrough(cmdObj),
+ CommandHelpers::filterCommandRequestForPassthrough(cmdObj),
cm->getns(),
query,
CollationSpec::kSimpleSpec,
@@ -505,7 +506,7 @@ public:
Status checkAuthForCommand(Client* client,
const std::string& dbname,
const BSONObj& cmdObj) override {
- const NamespaceString nss(parseNsCollectionRequired(dbname, cmdObj));
+ const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbname, cmdObj));
return AuthorizationSession::get(client)->checkAuthForCreate(nss, cmdObj, true);
}
@@ -640,7 +641,7 @@ public:
!fromDbInfo.shardingEnabled());
BSONObjBuilder b;
- BSONForEach(e, filterCommandRequestForPassthrough(cmdObj)) {
+ BSONForEach(e, CommandHelpers::filterCommandRequestForPassthrough(cmdObj)) {
if (strcmp(e.fieldName(), "fromhost") != 0) {
b.append(e);
}
@@ -677,7 +678,7 @@ public:
const string& dbName,
const BSONObj& cmdObj,
BSONObjBuilder& result) override {
- const NamespaceString nss(parseNsCollectionRequired(dbName, cmdObj));
+ const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbName, cmdObj));
auto routingInfo =
uassertStatusOK(Grid::get(opCtx)->catalogCache()->getCollectionRoutingInfo(opCtx, nss));
@@ -713,7 +714,8 @@ public:
BSONObj res;
{
ScopedDbConnection conn(shard->getConnString());
- if (!conn->runCommand(dbName, filterCommandRequestForPassthrough(cmdObj), res)) {
+ if (!conn->runCommand(
+ dbName, CommandHelpers::filterCommandRequestForPassthrough(cmdObj), res)) {
if (!res["code"].eoo()) {
result.append(res["code"]);
}
@@ -837,7 +839,7 @@ public:
DataSizeCmd() : PublicGridCommand("dataSize", "datasize") {}
std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const override {
- return parseNsFullyQualified(dbname, cmdObj);
+ return CommandHelpers::parseNsFullyQualified(dbname, cmdObj);
}
void addRequiredPrivileges(const std::string& dbname,
@@ -900,11 +902,12 @@ public:
ScopedDbConnection conn(shardStatus.getValue()->getConnString());
BSONObj res;
- bool ok = conn->runCommand(dbName, filterCommandRequestForPassthrough(cmdObj), res);
+ bool ok = conn->runCommand(
+ dbName, CommandHelpers::filterCommandRequestForPassthrough(cmdObj), res);
conn.done();
if (!ok) {
- filterCommandReplyForPassthrough(res, &result);
+ CommandHelpers::filterCommandReplyForPassthrough(res, &result);
return false;
}
@@ -938,7 +941,7 @@ public:
}
std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const override {
- return parseNsCollectionRequired(dbname, cmdObj).ns();
+ return CommandHelpers::parseNsCollectionRequired(dbname, cmdObj).ns();
}
} convertToCappedCmd;
@@ -1041,7 +1044,7 @@ public:
}
std::string parseNs(const string& dbname, const BSONObj& cmdObj) const override {
- return parseNsFullyQualified(dbname, cmdObj);
+ return CommandHelpers::parseNsFullyQualified(dbname, cmdObj);
}
bool run(OperationContext* opCtx,
@@ -1082,7 +1085,7 @@ public:
const string& dbName,
const BSONObj& cmdObj,
BSONObjBuilder& result) override {
- const NamespaceString nss(parseNsCollectionRequired(dbName, cmdObj));
+ const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbName, cmdObj));
auto query = getQuery(cmdObj);
@@ -1110,16 +1113,16 @@ public:
uassertStatusOK(Grid::get(opCtx)->catalogCache()->getCollectionRoutingInfo(opCtx, nss));
BSONObj viewDefinition;
- auto swShardResponses =
- scatterGatherVersionedTargetByRoutingTable(opCtx,
- dbName,
- nss,
- filterCommandRequestForPassthrough(cmdObj),
- ReadPreferenceSetting::get(opCtx),
- Shard::RetryPolicy::kIdempotent,
- query,
- collation,
- &viewDefinition);
+ auto swShardResponses = scatterGatherVersionedTargetByRoutingTable(
+ opCtx,
+ dbName,
+ nss,
+ CommandHelpers::filterCommandRequestForPassthrough(cmdObj),
+ ReadPreferenceSetting::get(opCtx),
+ Shard::RetryPolicy::kIdempotent,
+ query,
+ collation,
+ &viewDefinition);
if (ErrorCodes::CommandOnShardedViewNotSupportedOnMongod == swShardResponses.getStatus()) {
uassert(ErrorCodes::InternalError,
@@ -1131,30 +1134,30 @@ public:
auto parsedDistinct = ParsedDistinct::parse(
opCtx, resolvedView.getNamespace(), cmdObj, ExtensionsCallbackNoop(), true);
if (!parsedDistinct.isOK()) {
- return appendCommandStatus(result, parsedDistinct.getStatus());
+ return CommandHelpers::appendCommandStatus(result, parsedDistinct.getStatus());
}
auto aggCmdOnView = parsedDistinct.getValue().asAggregationCommand();
if (!aggCmdOnView.isOK()) {
- return appendCommandStatus(result, aggCmdOnView.getStatus());
+ return CommandHelpers::appendCommandStatus(result, aggCmdOnView.getStatus());
}
auto aggRequestOnView = AggregationRequest::parseFromBSON(nss, aggCmdOnView.getValue());
if (!aggRequestOnView.isOK()) {
- return appendCommandStatus(result, aggRequestOnView.getStatus());
+ return CommandHelpers::appendCommandStatus(result, aggRequestOnView.getStatus());
}
auto resolvedAggRequest =
resolvedView.asExpandedViewAggregation(aggRequestOnView.getValue());
auto resolvedAggCmd = resolvedAggRequest.serializeToCommandObj().toBson();
- BSONObj aggResult = Command::runCommandDirectly(
+ BSONObj aggResult = CommandHelpers::runCommandDirectly(
opCtx, OpMsgRequest::fromDBAndBody(dbName, std::move(resolvedAggCmd)));
ViewResponseFormatter formatter(aggResult);
auto formatStatus = formatter.appendAsDistinctResponse(&result);
if (!formatStatus.isOK()) {
- return appendCommandStatus(result, formatStatus);
+ return CommandHelpers::appendCommandStatus(result, formatStatus);
}
return true;
}
@@ -1201,7 +1204,7 @@ public:
const BSONObj& cmdObj,
ExplainOptions::Verbosity verbosity,
BSONObjBuilder* out) const {
- const NamespaceString nss(parseNsCollectionRequired(dbname, cmdObj));
+ const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbname, cmdObj));
// Extract the targeting query.
BSONObj targetingQuery;
@@ -1346,7 +1349,7 @@ public:
vector<Strategy::CommandResult> results;
Strategy::commandOp(opCtx,
dbName,
- filterCommandRequestForPassthrough(cmdObj),
+ CommandHelpers::filterCommandRequestForPassthrough(cmdObj),
nss.ns(),
finder,
CollationSpec::kSimpleSpec,
@@ -1354,7 +1357,7 @@ public:
verify(results.size() == 1); // querying on shard key so should only talk to one shard
BSONObj res = results.begin()->result;
- filterCommandReplyForPassthrough(res, &result);
+ CommandHelpers::filterCommandReplyForPassthrough(res, &result);
return res["ok"].trueValue();
} else if (SimpleBSONObjComparator::kInstance.evaluate(cm->getShardKeyPattern().toBSON() ==
BSON("files_id" << 1 << "n" << 1))) {
@@ -1370,7 +1373,7 @@ public:
// long as we keep getting more chunks. The end condition is when we go to
// look for chunk n and it doesn't exist. This means that the file's last
// chunk is n-1, so we return the computed md5 results.
- BSONObjBuilder bb(filterCommandRequestForPassthrough(cmdObj));
+ BSONObjBuilder bb(CommandHelpers::filterCommandRequestForPassthrough(cmdObj));
bb.appendBool("partialOk", true);
bb.append("startAt", n);
if (!lastResult.isEmpty()) {
@@ -1431,7 +1434,7 @@ public:
if (n == nNext) {
// no new data means we've reached the end of the file
- filterCommandReplyForPassthrough(res, &result);
+ CommandHelpers::filterCommandReplyForPassthrough(res, &result);
return true;
}
@@ -1487,7 +1490,7 @@ public:
"http://dochub.mongodb.org/core/geoNear-deprecation.";
}
- const NamespaceString nss(parseNsCollectionRequired(dbName, cmdObj));
+ const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbName, cmdObj));
auto routingInfo =
@@ -1516,7 +1519,8 @@ public:
vector<AsyncRequestsSender::Request> requests;
BSONArrayBuilder shardArray;
for (const ShardId& shardId : shardIds) {
- requests.emplace_back(shardId, filterCommandRequestForPassthrough(cmdObj));
+ requests.emplace_back(shardId,
+ CommandHelpers::filterCommandRequestForPassthrough(cmdObj));
shardArray.append(shardId.toString());
}
@@ -1682,7 +1686,7 @@ public:
// Check for the listIndexes ActionType on the database, or find on system.indexes for pre
// 3.0 systems.
- const NamespaceString ns(parseNsCollectionRequired(dbname, cmdObj));
+ const NamespaceString ns(CommandHelpers::parseNsCollectionRequired(dbname, cmdObj));
if (authzSession->isAuthorizedForActionsOnResource(ResourcePattern::forExactNamespace(ns),
ActionType::listIndexes) ||
@@ -1705,7 +1709,7 @@ public:
const string& dbName,
const BSONObj& cmdObj,
BSONObjBuilder& result) final {
- const NamespaceString nss(parseNsCollectionRequired(dbName, cmdObj));
+ const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbName, cmdObj));
const auto routingInfo =
uassertStatusOK(Grid::get(opCtx)->catalogCache()->getCollectionRoutingInfo(opCtx, nss));
diff --git a/src/mongo/s/commands/pipeline_s.cpp b/src/mongo/s/commands/pipeline_s.cpp
index 3aef12ca6c2..45662b6aa55 100644
--- a/src/mongo/s/commands/pipeline_s.cpp
+++ b/src/mongo/s/commands/pipeline_s.cpp
@@ -368,7 +368,7 @@ BSONObj PipelineS::establishMergingMongosCursor(
responseBuilder.done(clusterCursorId, requestedNss.ns());
- Command::appendCommandStatus(cursorResponse, Status::OK());
+ CommandHelpers::appendCommandStatus(cursorResponse, Status::OK());
return cursorResponse.obj();
}
diff --git a/src/mongo/s/commands/strategy.cpp b/src/mongo/s/commands/strategy.cpp
index 0a66edcbcb0..b10366b5a1e 100644
--- a/src/mongo/s/commands/strategy.cpp
+++ b/src/mongo/s/commands/strategy.cpp
@@ -164,7 +164,7 @@ void execCommandClient(OperationContext* opCtx,
help << "help for: " << c->getName() << " ";
c->help(help);
result.append("help", help.str());
- Command::appendCommandStatus(result, true, "");
+ CommandHelpers::appendCommandStatus(result, true, "");
return;
}
@@ -176,7 +176,7 @@ void execCommandClient(OperationContext* opCtx,
Status status = Command::checkAuthorization(c, opCtx, request);
if (!status.isOK()) {
- Command::appendCommandStatus(result, status);
+ CommandHelpers::appendCommandStatus(result, status);
return;
}
@@ -189,7 +189,7 @@ void execCommandClient(OperationContext* opCtx,
StatusWith<WriteConcernOptions> wcResult =
WriteConcernOptions::extractWCFromCommand(request.body, dbname);
if (!wcResult.isOK()) {
- Command::appendCommandStatus(result, wcResult.getStatus());
+ CommandHelpers::appendCommandStatus(result, wcResult.getStatus());
return;
}
@@ -198,7 +198,7 @@ void execCommandClient(OperationContext* opCtx,
// This command doesn't do writes so it should not be passed a writeConcern.
// If we did not use the default writeConcern, one was provided when it shouldn't have
// been by the user.
- Command::appendCommandStatus(
+ CommandHelpers::appendCommandStatus(
result, Status(ErrorCodes::InvalidOptions, "Command does not support writeConcern"));
return;
}
@@ -210,7 +210,7 @@ void execCommandClient(OperationContext* opCtx,
auto metadataStatus = processCommandMetadata(opCtx, request.body);
if (!metadataStatus.isOK()) {
- Command::appendCommandStatus(result, metadataStatus);
+ CommandHelpers::appendCommandStatus(result, metadataStatus);
return;
}
@@ -228,7 +228,7 @@ void execCommandClient(OperationContext* opCtx,
if (!ok) {
c->incrementCommandsFailed();
}
- Command::appendCommandStatus(result, ok);
+ CommandHelpers::appendCommandStatus(result, ok);
}
void runCommand(OperationContext* opCtx, const OpMsgRequest& request, BSONObjBuilder&& builder) {
@@ -245,10 +245,10 @@ void runCommand(OperationContext* opCtx, const OpMsgRequest& request, BSONObjBui
}
auto const commandName = request.getCommandName();
- auto const command = Command::findCommand(commandName);
+ auto const command = CommandHelpers::findCommand(commandName);
if (!command) {
ON_BLOCK_EXIT([opCtx, &builder] { appendRequiredFieldsToResponse(opCtx, &builder); });
- Command::appendCommandStatus(
+ CommandHelpers::appendCommandStatus(
builder,
{ErrorCodes::CommandNotFound, str::stream() << "no such cmd: " << commandName});
globalCommandRegistry()->incrementUnknownCommands();
@@ -291,7 +291,7 @@ void runCommand(OperationContext* opCtx, const OpMsgRequest& request, BSONObjBui
ON_BLOCK_EXIT([opCtx, &builder] { appendRequiredFieldsToResponse(opCtx, &builder); });
builder.resetToEmpty();
command->incrementCommandsFailed();
- Command::appendCommandStatus(builder, e.toStatus());
+ CommandHelpers::appendCommandStatus(builder, e.toStatus());
LastError::get(opCtx->getClient()).setLastError(e.code(), e.reason());
return;
}
@@ -412,7 +412,7 @@ DbResponse Strategy::clientCommand(OperationContext* opCtx, const Message& m) {
LOG(1) << "Exception thrown while parsing command " << causedBy(redact(ex));
reply->reset();
auto bob = reply->getInPlaceReplyBuilder(0);
- Command::appendCommandStatus(bob, ex.toStatus());
+ CommandHelpers::appendCommandStatus(bob, ex.toStatus());
appendRequiredFieldsToResponse(opCtx, &bob);
return; // From lambda. Don't try executing if parsing failed.
@@ -428,7 +428,7 @@ DbResponse Strategy::clientCommand(OperationContext* opCtx, const Message& m) {
reply->reset();
auto bob = reply->getInPlaceReplyBuilder(0);
- Command::appendCommandStatus(bob, ex.toStatus());
+ CommandHelpers::appendCommandStatus(bob, ex.toStatus());
appendRequiredFieldsToResponse(opCtx, &bob);
}
}();
diff --git a/src/mongo/s/shard_server_test_fixture.cpp b/src/mongo/s/shard_server_test_fixture.cpp
index de22375398d..0efca5710f6 100644
--- a/src/mongo/s/shard_server_test_fixture.cpp
+++ b/src/mongo/s/shard_server_test_fixture.cpp
@@ -62,7 +62,7 @@ void ShardServerTestFixture::expectFindOnConfigSendErrorCode(ErrorCodes::Error c
ASSERT_EQ(request.target, kConfigHostAndPort);
ASSERT_EQ(request.dbname, "config");
BSONObjBuilder responseBuilder;
- Command::appendCommandStatus(responseBuilder, Status(code, ""));
+ CommandHelpers::appendCommandStatus(responseBuilder, Status(code, ""));
return responseBuilder.obj();
});
}
diff --git a/src/mongo/s/sharding_test_fixture.cpp b/src/mongo/s/sharding_test_fixture.cpp
index 9f4583f485a..cf2e5ea9604 100644
--- a/src/mongo/s/sharding_test_fixture.cpp
+++ b/src/mongo/s/sharding_test_fixture.cpp
@@ -517,7 +517,7 @@ void ShardingTestFixture::expectCount(const HostAndPort& configHost,
checkReadConcern(request.cmdObj, Timestamp(0, 0), repl::OpTime::kUninitializedTerm);
BSONObjBuilder responseBuilder;
- Command::appendCommandStatus(responseBuilder, response.getStatus());
+ CommandHelpers::appendCommandStatus(responseBuilder, response.getStatus());
return responseBuilder.obj();
});
}