summaryrefslogtreecommitdiff
path: root/src/mongo
diff options
context:
space:
mode:
authorSophia Tan <sophia_tll@hotmail.com>2023-01-23 16:23:20 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2023-01-23 18:12:53 +0000
commit056b0578ea08a530dbee00986c0f4a651771628a (patch)
tree2f75472d62f86b9c2d117e5531f8ea5e2a720887 /src/mongo
parent368ae38dea1386062c6c7a1591c469c7bc9d56fe (diff)
downloadmongo-056b0578ea08a530dbee00986c0f4a651771628a.tar.gz
SERVER-72948 Change DBClient runCommand function to use DatabaseName object
Diffstat (limited to 'src/mongo')
-rw-r--r--src/mongo/client/dbclient_base.cpp40
-rw-r--r--src/mongo/client/dbclient_base.h19
-rw-r--r--src/mongo/client/dbclient_connection.cpp2
-rw-r--r--src/mongo/client/dbclient_connection_integration_test.cpp9
-rw-r--r--src/mongo/client/dbclient_cursor.cpp2
-rw-r--r--src/mongo/db/commands/analyze_cmd.cpp2
-rw-r--r--src/mongo/db/commands/create_indexes_test.cpp2
-rw-r--r--src/mongo/db/commands/external_data_source_commands_test.cpp6
-rw-r--r--src/mongo/db/commands/feature_compatibility_version.cpp2
-rw-r--r--src/mongo/db/keys_collection_cache_test.cpp4
-rw-r--r--src/mongo/db/pipeline/aggregation_request_helper.cpp8
-rw-r--r--src/mongo/db/pipeline/aggregation_request_helper.h2
-rw-r--r--src/mongo/db/pipeline/aggregation_request_test.cpp4
-rw-r--r--src/mongo/db/repl/all_database_cloner.cpp4
-rw-r--r--src/mongo/db/repl/collection_cloner.cpp2
-rw-r--r--src/mongo/db/repl/initial_sync_base_cloner.cpp3
-rw-r--r--src/mongo/db/repl/isself.cpp2
-rw-r--r--src/mongo/db/repl/oplog_applier_impl_test.cpp10
-rw-r--r--src/mongo/db/repl/primary_only_service_test.cpp2
-rw-r--r--src/mongo/db/repl/rollback_source_impl.cpp3
-rw-r--r--src/mongo/db/repl/storage_timestamp_test.cpp10
-rw-r--r--src/mongo/db/repl/tenant_all_database_cloner.cpp9
-rw-r--r--src/mongo/db/repl/tenant_collection_cloner.cpp6
-rw-r--r--src/mongo/db/repl/tenant_database_cloner.cpp7
-rw-r--r--src/mongo/db/repl/tenant_migration_donor_service.cpp4
-rw-r--r--src/mongo/db/repl/tenant_migration_recipient_service.cpp5
-rw-r--r--src/mongo/db/s/add_shard_cmd.cpp3
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager.cpp6
-rw-r--r--src/mongo/db/s/create_collection_coordinator.cpp2
-rw-r--r--src/mongo/db/s/global_index/global_index_cloning_service_test.cpp3
-rw-r--r--src/mongo/db/s/global_index/global_index_inserter_test.cpp3
-rw-r--r--src/mongo/db/s/migration_destination_manager.cpp2
-rw-r--r--src/mongo/db/s/move_primary_coordinator.cpp2
-rw-r--r--src/mongo/db/s/move_primary_source_manager.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_coordinator_service.cpp4
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_fetcher_test.cpp2
-rw-r--r--src/mongo/db/s/shard_key_util.cpp4
-rw-r--r--src/mongo/db/serverless/shard_split_donor_service.cpp4
-rw-r--r--src/mongo/db/session/sessions_collection.cpp6
-rw-r--r--src/mongo/db/session/sessions_collection_rs.cpp51
-rw-r--r--src/mongo/db/session/sessions_collection_standalone.cpp3
-rw-r--r--src/mongo/db/transaction/transaction_api.cpp2
-rw-r--r--src/mongo/db/transaction/transaction_participant_retryable_writes_test.cpp2
-rw-r--r--src/mongo/dbtests/clienttests.cpp2
-rw-r--r--src/mongo/dbtests/commandtests.cpp8
-rw-r--r--src/mongo/dbtests/directclienttests.cpp2
-rw-r--r--src/mongo/dbtests/indexupdatetests.cpp2
-rw-r--r--src/mongo/dbtests/mock_dbclient_conn_test.cpp28
-rw-r--r--src/mongo/dbtests/mock_replica_set_test.cpp33
-rw-r--r--src/mongo/dbtests/plan_executor_invalidation_test.cpp2
-rw-r--r--src/mongo/dbtests/querytests.cpp24
-rw-r--r--src/mongo/rpc/op_legacy_integration_test.cpp8
-rw-r--r--src/mongo/rpc/op_msg_integration_test.cpp32
-rw-r--r--src/mongo/s/analyze_shard_key_util.cpp6
-rw-r--r--src/mongo/s/client/sharding_connection_hook.cpp3
-rw-r--r--src/mongo/scripting/mozjs/session.cpp6
-rw-r--r--src/mongo/shell/bench.cpp10
-rw-r--r--src/mongo/shell/encrypted_dbclient_base.cpp2
-rw-r--r--src/mongo/shell/shell_utils.cpp7
-rw-r--r--src/mongo/shell/shell_utils_launcher.cpp2
60 files changed, 242 insertions, 205 deletions
diff --git a/src/mongo/client/dbclient_base.cpp b/src/mongo/client/dbclient_base.cpp
index 438e6a9eade..038eef80dae 100644
--- a/src/mongo/client/dbclient_base.cpp
+++ b/src/mongo/client/dbclient_base.cpp
@@ -275,8 +275,9 @@ std::tuple<bool, std::shared_ptr<DBClientBase>> DBClientBase::runCommandWithTarg
return std::make_tuple(isOk(info), result.second);
}
-bool DBClientBase::runCommand(const string& dbname, BSONObj cmd, BSONObj& info, int options) {
- auto res = runCommandWithTarget(dbname, std::move(cmd), info, options);
+bool DBClientBase::runCommand(const DatabaseName& dbName, BSONObj cmd, BSONObj& info, int options) {
+ auto res =
+ runCommandWithTarget(DatabaseNameUtil::serialize(dbName), std::move(cmd), info, options);
return std::get<0>(res);
}
@@ -296,7 +297,7 @@ long long DBClientBase::count(const NamespaceStringOrUUID nsOrUuid,
BSONObj cmd = _countCmd(nsOrUuid, query, options, limit, skip, readConcernObj, dollarTenant);
BSONObj res;
- if (!runCommand(DatabaseNameUtil::serialize(*dbName), cmd, res, options)) {
+ if (!runCommand(*dbName, cmd, res, options)) {
auto status = getStatusFromCommandResult(res);
uassertStatusOK(status.withContext("count fails:"));
}
@@ -460,7 +461,8 @@ bool DBClientBase::auth(const string& dbname,
}
void DBClientBase::logout(const string& dbname, BSONObj& info) {
- runCommand(dbname, BSON("logout" << 1), info);
+ // TODO SERVER-72977: Use dbname which is DatabaseName object already.
+ runCommand(DatabaseName(boost::none, dbname), BSON("logout" << 1), info);
}
bool DBClientBase::isPrimary(bool& isPrimary, BSONObj* info) {
@@ -473,7 +475,7 @@ bool DBClientBase::isPrimary(bool& isPrimary, BSONObj* info) {
BSONObj o;
if (info == nullptr)
info = &o;
- bool ok = runCommand("admin", bob.obj(), *info);
+ bool ok = runCommand(DatabaseName(boost::none, "admin"), bob.obj(), *info);
isPrimary =
info->getField(_apiParameters.getVersion() ? "isWritablePrimary" : "ismaster").trueValue();
return ok;
@@ -501,7 +503,8 @@ bool DBClientBase::createCollection(const string& ns,
if (writeConcernObj) {
b.append(WriteConcernOptions::kWriteConcernField, *writeConcernObj);
}
- return runCommand(db.c_str(), b.done(), *info);
+ // TODO SERVER-72942: Use ns.dbName() which is DatabaseName object already.
+ return runCommand(DatabaseName(boost::none, db), b.done(), *info);
}
list<BSONObj> DBClientBase::getCollectionInfos(const std::string& db, const BSONObj& filter) {
@@ -525,7 +528,7 @@ list<BSONObj> DBClientBase::getCollectionInfos(const DatabaseName& dbName,
}
BSONObj res;
- if (runCommand(DatabaseNameUtil::serialize(dbName), b.obj(), res, QueryOption_SecondaryOk)) {
+ if (runCommand(dbName, b.obj(), res, QueryOption_SecondaryOk)) {
BSONObj cursorObj = res["cursor"].Obj();
BSONObj collections = cursorObj["firstBatch"].Obj();
BSONObjIterator it(collections);
@@ -582,7 +585,7 @@ vector<BSONObj> DBClientBase::getDatabaseInfos(const BSONObj& filter,
BSONObj cmd = bob.done();
BSONObj res;
- if (runCommand("admin", cmd, res, QueryOption_SecondaryOk)) {
+ if (runCommand(DatabaseName(boost::none, "admin"), cmd, res, QueryOption_SecondaryOk)) {
BSONObj dbs = res["databases"].Obj();
BSONObjIterator it(dbs);
while (it.more()) {
@@ -836,7 +839,7 @@ std::list<BSONObj> DBClientBase::_getIndexSpecs(const NamespaceStringOrUUID& nsO
(dbName->tenantId() && dollarTenant) ? (dbName->tenantId() == dollarTenant) : true);
BSONObj res;
- if (runCommand(DatabaseNameUtil::serialize(*dbName), cmd, res, options)) {
+ if (runCommand(*dbName, cmd, res, options)) {
BSONObj cursorObj = res["cursor"].Obj();
BSONObjIterator i(cursorObj["firstBatch"].Obj());
while (i.more()) {
@@ -901,7 +904,8 @@ void DBClientBase::dropIndex(const string& ns,
}
BSONObj info;
- if (!runCommand(nsToDatabase(ns), cmdBuilder.obj(), info)) {
+ // TODO SERVER-72946: Use ns.dbName() which is DatabaseName object already.
+ if (!runCommand(DatabaseName(boost::none, nsToDatabase(ns)), cmdBuilder.obj(), info)) {
LOGV2_DEBUG(20118,
_logLevel.toInt(),
"dropIndex failed: {info}",
@@ -919,14 +923,20 @@ void DBClientBase::dropIndexes(const string& ns, boost::optional<BSONObj> writeC
cmdBuilder.append(WriteConcernOptions::kWriteConcernField, *writeConcernObj);
}
BSONObj info;
- uassert(10008, "dropIndexes failed", runCommand(nsToDatabase(ns), cmdBuilder.obj(), info));
+ // TODO SERVER-72946: Use ns.dbName() which is DatabaseName object already.
+ uassert(10008,
+ "dropIndexes failed",
+ runCommand(DatabaseName(boost::none, nsToDatabase(ns)), cmdBuilder.obj(), info));
}
void DBClientBase::reIndex(const string& ns) {
BSONObj info;
+ // TODO SERVER-72946: Use ns.dbName() which is DatabaseName object already.
uassert(18908,
str::stream() << "reIndex failed: " << info,
- runCommand(nsToDatabase(ns), BSON("reIndex" << nsToCollectionSubstring(ns)), info));
+ runCommand(DatabaseName(boost::none, nsToDatabase(ns)),
+ BSON("reIndex" << nsToCollectionSubstring(ns)),
+ info));
}
@@ -973,7 +983,8 @@ void DBClientBase::createIndexes(StringData ns,
const BSONObj commandObj = command.done();
BSONObj infoObj;
- if (!runCommand(nsToDatabase(ns), commandObj, infoObj)) {
+ // TODO SERVER-72946: Use the ns.dbName() which is DatabaseName object already.
+ if (!runCommand(DatabaseName(boost::none, nsToDatabase(ns)), commandObj, infoObj)) {
Status runCommandStatus = getStatusFromCommandResult(infoObj);
invariant(!runCommandStatus.isOK());
uassertStatusOK(runCommandStatus);
@@ -1002,7 +1013,8 @@ void DBClientBase::createIndexes(StringData ns,
const BSONObj commandObj = command.done();
BSONObj infoObj;
- if (!runCommand(nsToDatabase(ns), commandObj, infoObj)) {
+ // TODO SERVER-72946: Use the ns.dbName() which is DatabaseName object already.
+ if (!runCommand(DatabaseName(boost::none, nsToDatabase(ns)), commandObj, infoObj)) {
Status runCommandStatus = getStatusFromCommandResult(infoObj);
invariant(!runCommandStatus.isOK());
uassertStatusOK(runCommandStatus);
diff --git a/src/mongo/client/dbclient_base.h b/src/mongo/client/dbclient_base.h
index bc4a4d70e26..f820db28645 100644
--- a/src/mongo/client/dbclient_base.h
+++ b/src/mongo/client/dbclient_base.h
@@ -234,7 +234,7 @@ public:
* commands have prebuilt helper functions -- see below. If a helper is not available you can
* directly call runCommand.
*
- * 'dbname': Database name. Use "admin" for global administrative commands.
+ * 'dbName': Database name. Use "admin" for global administrative commands.
* 'cmd': The command object to execute. For example, { hello : 1 }.
* 'info': The result object the database returns. Typically has { ok : ..., errmsg : ... }
* fields set.
@@ -242,7 +242,7 @@ public:
*
* Returns true if the command returned "ok".
*/
- bool runCommand(const std::string& dbname, BSONObj cmd, BSONObj& info, int options = 0);
+ bool runCommand(const DatabaseName& dbName, BSONObj cmd, BSONObj& info, int options = 0);
/*
* Wraps up the runCommand function avove, but returns the DBClient that actually ran the
@@ -363,8 +363,10 @@ public:
info = &temp;
}
- bool res = runCommand(
- db.c_str(), BSON("drop" << coll << "writeConcern" << writeConcern.toBSON()), *info);
+ // TODO SERVER-72942: Use ns.dbName() which is DatabaseName object already.
+ bool res = runCommand(DatabaseName(boost::none, db),
+ BSON("drop" << coll << "writeConcern" << writeConcern.toBSON()),
+ *info);
return res;
}
@@ -375,7 +377,8 @@ public:
bool validate(const std::string& ns) {
BSONObj cmd = BSON("validate" << nsGetCollection(ns));
BSONObj info;
- return runCommand(nsGetDB(ns).c_str(), cmd, info);
+ // TODO SERVER-72943: Use ns.dbName() which is DatabaseName object already.
+ return runCommand(DatabaseName(boost::none, nsGetDB(ns)), cmd, info);
}
/**
@@ -406,8 +409,10 @@ public:
BSONObj o;
if (info == nullptr)
info = &o;
- return runCommand(
- dbname, BSON("dropDatabase" << 1 << "writeConcern" << writeConcern.toBSON()), *info);
+ // TODO SERVER-72944: Use dbname which is DatabaseName object already.
+ return runCommand(DatabaseName(boost::none, dbname),
+ BSON("dropDatabase" << 1 << "writeConcern" << writeConcern.toBSON()),
+ *info);
}
/**
diff --git a/src/mongo/client/dbclient_connection.cpp b/src/mongo/client/dbclient_connection.cpp
index 7cb1aa813a9..1f5f4070d9e 100644
--- a/src/mongo/client/dbclient_connection.cpp
+++ b/src/mongo/client/dbclient_connection.cpp
@@ -435,7 +435,7 @@ Status DBClientConnection::connectSocketOnly(
void DBClientConnection::logout(const string& dbname, BSONObj& info) {
authCache.erase(dbname);
_internalAuthOnReconnect = false;
- runCommand(dbname, BSON("logout" << 1), info);
+ runCommand(DatabaseName(boost::none, dbname), BSON("logout" << 1), info);
}
std::pair<rpc::UniqueReply, DBClientBase*> DBClientConnection::runCommandWithTarget(
diff --git a/src/mongo/client/dbclient_connection_integration_test.cpp b/src/mongo/client/dbclient_connection_integration_test.cpp
index 34b13adda99..65b3311583b 100644
--- a/src/mongo/client/dbclient_connection_integration_test.cpp
+++ b/src/mongo/client/dbclient_connection_integration_test.cpp
@@ -58,7 +58,7 @@ public:
auto conn = makeConn(kAppName + "-cleanup");
BSONObj currOp;
- if (!conn->runCommand("admin", BSON("currentOp" << 1), currOp))
+ if (!conn->runCommand({boost::none, "admin"}, BSON("currentOp" << 1), currOp))
uassertStatusOK(getStatusFromCommandResult(currOp));
for (auto&& op : currOp["inprog"].Obj()) {
@@ -74,7 +74,8 @@ public:
// Ignore failures to clean up.
BSONObj ignored;
- (void)conn->runCommand("admin", BSON("killOp" << 1 << "op" << op["opid"]), ignored);
+ (void)conn->runCommand(
+ {boost::none, "admin"}, BSON("killOp" << 1 << "op" << op["opid"]), ignored);
}
}
};
@@ -85,7 +86,7 @@ TEST_F(DBClientConnectionFixture, shutdownWorksIfCalledFirst) {
conn->shutdownAndDisallowReconnect();
BSONObj reply;
- ASSERT_THROWS(conn->runCommand("admin", sleepCmd, reply),
+ ASSERT_THROWS(conn->runCommand({boost::none, "admin"}, sleepCmd, reply),
ExceptionForCat<ErrorCategory::NetworkError>); // Currently SocketException.
}
@@ -100,7 +101,7 @@ TEST_F(DBClientConnectionFixture, shutdownWorksIfRunCommandInProgress) {
ON_BLOCK_EXIT([&] { shutdownThread.join(); });
BSONObj reply;
- ASSERT_THROWS(conn->runCommand("admin", sleepCmd, reply),
+ ASSERT_THROWS(conn->runCommand({boost::none, "admin"}, sleepCmd, reply),
ExceptionForCat<ErrorCategory::NetworkError>); // Currently HostUnreachable.
}
diff --git a/src/mongo/client/dbclient_cursor.cpp b/src/mongo/client/dbclient_cursor.cpp
index 9ecc2ce069e..c15183e2c25 100644
--- a/src/mongo/client/dbclient_cursor.cpp
+++ b/src/mongo/client/dbclient_cursor.cpp
@@ -371,7 +371,7 @@ StatusWith<std::unique_ptr<DBClientCursor>> DBClientCursor::fromAggregationReque
DBClientBase* client, AggregateCommandRequest aggRequest, bool secondaryOk, bool useExhaust) {
BSONObj ret;
try {
- if (!client->runCommand(aggRequest.getNamespace().db().toString(),
+ if (!client->runCommand(aggRequest.getNamespace().dbName(),
aggregation_request_helper::serializeToCommandObj(aggRequest),
ret,
secondaryOk ? QueryOption_SecondaryOk : 0)) {
diff --git a/src/mongo/db/commands/analyze_cmd.cpp b/src/mongo/db/commands/analyze_cmd.cpp
index cad6d7c8b57..8461f923181 100644
--- a/src/mongo/db/commands/analyze_cmd.cpp
+++ b/src/mongo/db/commands/analyze_cmd.cpp
@@ -214,7 +214,7 @@ public:
// Run Aggregate
BSONObj analyzeResult;
- client.runCommand(nss.db().toString(),
+ client.runCommand(nss.dbName(),
analyzeCommandAsAggregationCommand(opCtx,
nss.db(),
nss.coll(),
diff --git a/src/mongo/db/commands/create_indexes_test.cpp b/src/mongo/db/commands/create_indexes_test.cpp
index b7977ef5e67..1477ab3dcb0 100644
--- a/src/mongo/db/commands/create_indexes_test.cpp
+++ b/src/mongo/db/commands/create_indexes_test.cpp
@@ -68,7 +68,7 @@ TEST_F(CreateIndexesTest, CreateIndexesFailsWhenIndexBuildsCollectionIsMissing)
"createIndexes" << nss.coll() << "indexes" << BSON_ARRAY(index) << "commitQuorum" << 0);
BSONObj result;
// This should fail since config.system.indexBuilds does not exist.
- ASSERT_FALSE(client.runCommand(nss.db().toString(), createIndexesCmdObj, result)) << result;
+ ASSERT_FALSE(client.runCommand(nss.dbName(), createIndexesCmdObj, result)) << result;
ASSERT(result.hasField("code"));
ASSERT_EQ(result.getIntField("code"), 6325700);
}
diff --git a/src/mongo/db/commands/external_data_source_commands_test.cpp b/src/mongo/db/commands/external_data_source_commands_test.cpp
index 5303c77e757..217773fa023 100644
--- a/src/mongo/db/commands/external_data_source_commands_test.cpp
+++ b/src/mongo/db/commands/external_data_source_commands_test.cpp
@@ -31,6 +31,7 @@
#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/client/dbclient_cursor.h"
+#include "mongo/db/database_name.h"
#include "mongo/db/dbdirectclient.h"
#include "mongo/db/pipeline/aggregation_request_helper.h"
#include "mongo/db/query/query_knobs_gen.h"
@@ -132,9 +133,12 @@ protected:
ServiceContext::UniqueOperationContext _uniqueOpCtx{makeOperationContext()};
OperationContext* _opCtx{_uniqueOpCtx.get()};
- static constexpr auto kDatabaseName = "external_data_source";
+ static const DatabaseName kDatabaseName;
};
+const DatabaseName ExternalDataSourceCommandsTest::kDatabaseName =
+ DatabaseName(boost::none, "external_data_source");
+
TEST_F(ExternalDataSourceCommandsTest, SimpleScanAggRequest) {
const auto nDocs = _random.nextInt32(100) + 1;
std::vector<BSONObj> srcDocs = generateRandomSimpleDocs(nDocs);
diff --git a/src/mongo/db/commands/feature_compatibility_version.cpp b/src/mongo/db/commands/feature_compatibility_version.cpp
index dfa4471bacf..8c9f621f0c2 100644
--- a/src/mongo/db/commands/feature_compatibility_version.cpp
+++ b/src/mongo/db/commands/feature_compatibility_version.cpp
@@ -252,7 +252,7 @@ void runUpdateCommand(OperationContext* opCtx, const FeatureCompatibilityVersion
// Update the featureCompatibilityVersion document stored in the server configuration
// collection.
BSONObj updateResult;
- client.runCommand(nss.db().toString(), updateCmd.obj(), updateResult);
+ client.runCommand(nss.dbName(), updateCmd.obj(), updateResult);
uassertStatusOK(getStatusFromWriteCommandReply(updateResult));
}
diff --git a/src/mongo/db/keys_collection_cache_test.cpp b/src/mongo/db/keys_collection_cache_test.cpp
index 0e2f179000e..b260dbb82e9 100644
--- a/src/mongo/db/keys_collection_cache_test.cpp
+++ b/src/mongo/db/keys_collection_cache_test.cpp
@@ -93,7 +93,7 @@ protected:
DBDirectClient client(opCtx);
BSONObj result;
- client.runCommand(nss.db().toString(), cmdObj, result);
+ client.runCommand(nss.dbName(), cmdObj, result);
ASSERT_OK(getStatusFromWriteCommandReply(result));
}
@@ -114,7 +114,7 @@ protected:
DBDirectClient client(opCtx);
BSONObj result;
- client.runCommand(nss.db().toString(), cmdObj, result);
+ client.runCommand(nss.dbName(), cmdObj, result);
ASSERT_OK(getStatusFromWriteCommandReply(result));
}
diff --git a/src/mongo/db/pipeline/aggregation_request_helper.cpp b/src/mongo/db/pipeline/aggregation_request_helper.cpp
index 1af17053607..60f19b9cf2e 100644
--- a/src/mongo/db/pipeline/aggregation_request_helper.cpp
+++ b/src/mongo/db/pipeline/aggregation_request_helper.cpp
@@ -75,17 +75,13 @@ StatusWith<AggregateCommandRequest> parseFromBSONForTests(
}
StatusWith<AggregateCommandRequest> parseFromBSONForTests(
- const std::string& dbName,
+ const DatabaseName& dbName,
const BSONObj& cmdObj,
boost::optional<ExplainOptions::Verbosity> explainVerbosity,
bool apiStrict) {
try {
return parseFromBSON(
- /*opCtx=*/nullptr,
- DatabaseName(boost::none, dbName),
- cmdObj,
- explainVerbosity,
- apiStrict);
+ /*opCtx=*/nullptr, dbName, cmdObj, explainVerbosity, apiStrict);
} catch (const AssertionException&) {
return exceptionToStatus();
}
diff --git a/src/mongo/db/pipeline/aggregation_request_helper.h b/src/mongo/db/pipeline/aggregation_request_helper.h
index 7c7c7c2453c..dff875ff7f5 100644
--- a/src/mongo/db/pipeline/aggregation_request_helper.h
+++ b/src/mongo/db/pipeline/aggregation_request_helper.h
@@ -90,7 +90,7 @@ AggregateCommandRequest parseFromBSON(OperationContext* opCtx,
bool apiStrict);
StatusWith<AggregateCommandRequest> parseFromBSONForTests(
- const std::string& dbName,
+ const DatabaseName& dbName,
const BSONObj& cmdObj,
boost::optional<ExplainOptions::Verbosity> explainVerbosity = boost::none,
bool apiStrict = false);
diff --git a/src/mongo/db/pipeline/aggregation_request_test.cpp b/src/mongo/db/pipeline/aggregation_request_test.cpp
index f2512b6a1c0..ba75b6f28ca 100644
--- a/src/mongo/db/pipeline/aggregation_request_test.cpp
+++ b/src/mongo/db/pipeline/aggregation_request_test.cpp
@@ -699,8 +699,8 @@ TEST(AggregationRequestTest, ParseFromBSONOverloadsShouldProduceIdenticalRequest
"'a'}");
NamespaceString nss("a.collection");
- auto aggReqDBName =
- unittest::assertGet(aggregation_request_helper::parseFromBSONForTests("a", inputBSON));
+ auto aggReqDBName = unittest::assertGet(
+ aggregation_request_helper::parseFromBSONForTests(nss.dbName(), inputBSON));
auto aggReqNSS =
unittest::assertGet(aggregation_request_helper::parseFromBSONForTests(nss, inputBSON));
diff --git a/src/mongo/db/repl/all_database_cloner.cpp b/src/mongo/db/repl/all_database_cloner.cpp
index 12ba9a0c33f..399ad24f608 100644
--- a/src/mongo/db/repl/all_database_cloner.cpp
+++ b/src/mongo/db/repl/all_database_cloner.cpp
@@ -227,8 +227,6 @@ void AllDatabaseCloner::postStage() {
_stats.databaseStats.emplace_back();
_stats.databaseStats.back().dbname = dbName;
- auto db = DatabaseNameUtil::serialize(dbName);
-
BSONObj cmdObj = BSON("dbStats" << 1);
BSONObjBuilder b(cmdObj);
if (gMultitenancySupport &&
@@ -239,7 +237,7 @@ void AllDatabaseCloner::postStage() {
}
BSONObj res;
- getClient()->runCommand(db, b.obj(), res);
+ getClient()->runCommand(dbName, b.obj(), res);
// It is possible for the call to 'dbStats' to fail if the sync source contains invalid
// views. We should not fail initial sync in this case due to the situation where the
diff --git a/src/mongo/db/repl/collection_cloner.cpp b/src/mongo/db/repl/collection_cloner.cpp
index b8bd3ba7331..216eec53a0a 100644
--- a/src/mongo/db/repl/collection_cloner.cpp
+++ b/src/mongo/db/repl/collection_cloner.cpp
@@ -138,7 +138,7 @@ void CollectionCloner::preStage() {
}
BSONObj res;
- getClient()->runCommand(DatabaseNameUtil::serialize(_sourceNss.dbName()), b.obj(), res);
+ getClient()->runCommand(_sourceNss.dbName(), b.obj(), res);
if (auto status = getStatusFromCommandResult(res); status.isOK()) {
_stats.bytesToCopy = res.getField("size").safeNumberLong();
if (_stats.bytesToCopy > 0) {
diff --git a/src/mongo/db/repl/initial_sync_base_cloner.cpp b/src/mongo/db/repl/initial_sync_base_cloner.cpp
index cf979b02f9e..6f6888fe9ce 100644
--- a/src/mongo/db/repl/initial_sync_base_cloner.cpp
+++ b/src/mongo/db/repl/initial_sync_base_cloner.cpp
@@ -145,7 +145,8 @@ Status InitialSyncBaseCloner::checkInitialSyncIdIsUnchanged() {
Status InitialSyncBaseCloner::checkRollBackIdIsUnchanged() {
BSONObj info;
try {
- getClient()->runCommand("admin", BSON("replSetGetRBID" << 1), info);
+ getClient()->runCommand(
+ DatabaseName(boost::none, "admin"), BSON("replSetGetRBID" << 1), info);
} catch (DBException& e) {
if (ErrorCodes::isRetriableError(e)) {
static constexpr char errorMsg[] =
diff --git a/src/mongo/db/repl/isself.cpp b/src/mongo/db/repl/isself.cpp
index 1ce196edc69..cc35bcacf4b 100644
--- a/src/mongo/db/repl/isself.cpp
+++ b/src/mongo/db/repl/isself.cpp
@@ -293,7 +293,7 @@ bool isSelfSlowPath(const HostAndPort& hostAndPort,
}
}
BSONObj out;
- bool ok = conn.runCommand("admin", BSON("_isSelf" << 1), out);
+ bool ok = conn.runCommand(DatabaseName(boost::none, "admin"), BSON("_isSelf" << 1), out);
bool me = ok && out["id"].type() == jstOID && instanceId == out["id"].OID();
return me;
diff --git a/src/mongo/db/repl/oplog_applier_impl_test.cpp b/src/mongo/db/repl/oplog_applier_impl_test.cpp
index 7252aabc100..e75c20b2707 100644
--- a/src/mongo/db/repl/oplog_applier_impl_test.cpp
+++ b/src/mongo/db/repl/oplog_applier_impl_test.cpp
@@ -3423,7 +3423,7 @@ public:
DBDirectClient client(_opCtx.get());
BSONObj result;
- ASSERT(client.runCommand(kNs.db().toString(), BSON("create" << kNs.coll()), result));
+ ASSERT(client.runCommand(kNs.dbName(), BSON("create" << kNs.coll()), result));
}
/**
@@ -3833,10 +3833,10 @@ TEST_F(OplogApplierImplTxnTableTest, MultiApplyUpdatesTheTransactionTable) {
DBDirectClient client(_opCtx.get());
BSONObj result;
- ASSERT(client.runCommand(ns0.db().toString(), BSON("create" << ns0.coll()), result));
- ASSERT(client.runCommand(ns1.db().toString(), BSON("create" << ns1.coll()), result));
- ASSERT(client.runCommand(ns2.db().toString(), BSON("create" << ns2.coll()), result));
- ASSERT(client.runCommand(ns3.db().toString(), BSON("create" << ns3.coll()), result));
+ ASSERT(client.runCommand(ns0.dbName(), BSON("create" << ns0.coll()), result));
+ ASSERT(client.runCommand(ns1.dbName(), BSON("create" << ns1.coll()), result));
+ ASSERT(client.runCommand(ns2.dbName(), BSON("create" << ns2.coll()), result));
+ ASSERT(client.runCommand(ns3.dbName(), BSON("create" << ns3.coll()), result));
auto uuid0 = [&] {
return AutoGetCollectionForRead(_opCtx.get(), ns0).getCollection()->uuid();
}();
diff --git a/src/mongo/db/repl/primary_only_service_test.cpp b/src/mongo/db/repl/primary_only_service_test.cpp
index f32dae2ffd4..2b019c61879 100644
--- a/src/mongo/db/repl/primary_only_service_test.cpp
+++ b/src/mongo/db/repl/primary_only_service_test.cpp
@@ -289,7 +289,7 @@ private:
auto opCtx = opCtxHolder.get();
DBDirectClient client(opCtx);
BSONObj result;
- client.runCommand(nss.db().toString(),
+ client.runCommand(nss.dbName(),
BSON("createIndexes"
<< nss.coll().toString() << "indexes"
<< BSON_ARRAY(BSON("key" << BSON("x" << 1) << "name"
diff --git a/src/mongo/db/repl/rollback_source_impl.cpp b/src/mongo/db/repl/rollback_source_impl.cpp
index 8b427be197c..315de2df6af 100644
--- a/src/mongo/db/repl/rollback_source_impl.cpp
+++ b/src/mongo/db/repl/rollback_source_impl.cpp
@@ -61,7 +61,8 @@ const HostAndPort& RollbackSourceImpl::getSource() const {
int RollbackSourceImpl::getRollbackId() const {
bo info;
- _getConnection()->runCommand("admin", BSON("replSetGetRBID" << 1), info);
+ _getConnection()->runCommand(
+ DatabaseName(boost::none, "admin"), BSON("replSetGetRBID" << 1), info);
return info["rbid"].numberInt();
}
diff --git a/src/mongo/db/repl/storage_timestamp_test.cpp b/src/mongo/db/repl/storage_timestamp_test.cpp
index 466fdf3de34..26bf7b1cdbe 100644
--- a/src/mongo/db/repl/storage_timestamp_test.cpp
+++ b/src/mongo/db/repl/storage_timestamp_test.cpp
@@ -2073,7 +2073,7 @@ TEST_F(StorageTimestampTest, TimestampMultiIndexBuilds) {
BSON("createIndexes" << nss.coll() << "indexes" << BSON_ARRAY(index1 << index2)
<< "commitQuorum" << 0);
BSONObj result;
- ASSERT(client.runCommand(nss.db().toString(), createIndexesCmdObj, result)) << result;
+ ASSERT(client.runCommand(nss.dbName(), createIndexesCmdObj, result)) << result;
}
auto indexCreateInitTs = queryOplog(BSON("op"
@@ -2172,7 +2172,7 @@ TEST_F(StorageTimestampTest, TimestampMultiIndexBuildsDuringRename) {
BSON("createIndexes" << nss.coll() << "indexes" << BSON_ARRAY(index1 << index2)
<< "commitQuorum" << 0);
BSONObj result;
- ASSERT(client.runCommand(nss.db().toString(), createIndexesCmdObj, result)) << result;
+ ASSERT(client.runCommand(nss.dbName(), createIndexesCmdObj, result)) << result;
}
AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_X);
@@ -2187,7 +2187,7 @@ TEST_F(StorageTimestampTest, TimestampMultiIndexBuildsDuringRename) {
// Rename collection.
BSONObj renameResult;
ASSERT(client.runCommand(
- "admin",
+ DatabaseName(boost::none, "admin"),
BSON("renameCollection" << nss.ns() << "to" << renamedNss.ns() << "dropTarget" << true),
renameResult))
<< renameResult;
@@ -2298,7 +2298,7 @@ TEST_F(StorageTimestampTest, TimestampAbortIndexBuild) {
DBDirectClient client(_opCtx);
BSONObj result;
- ASSERT_FALSE(client.runCommand(nss.db().toString(), createIndexesCmdObj, result));
+ ASSERT_FALSE(client.runCommand(nss.dbName(), createIndexesCmdObj, result));
ASSERT_EQUALS(ErrorCodes::DuplicateKey, getStatusFromCommandResult(result));
}
@@ -3076,7 +3076,7 @@ TEST_F(StorageTimestampTest, MultipleTimestampsForMultikeyWrites) {
BSON("createIndexes" << nss.coll() << "indexes" << BSON_ARRAY(index1 << index2)
<< "commitQuorum" << 0);
BSONObj result;
- ASSERT(client.runCommand(nss.db().toString(), createIndexesCmdObj, result)) << result;
+ ASSERT(client.runCommand(nss.dbName(), createIndexesCmdObj, result)) << result;
}
AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_IX);
diff --git a/src/mongo/db/repl/tenant_all_database_cloner.cpp b/src/mongo/db/repl/tenant_all_database_cloner.cpp
index 70c55eb373a..ab56852ed99 100644
--- a/src/mongo/db/repl/tenant_all_database_cloner.cpp
+++ b/src/mongo/db/repl/tenant_all_database_cloner.cpp
@@ -99,7 +99,8 @@ BaseCloner::AfterStageBehavior TenantAllDatabaseCloner::listDatabasesStage() {
BSONObj readResult;
BSONObj cmd = ClonerUtils::buildMajorityWaitRequest(_operationTime);
- getClient()->runCommand("admin", cmd, readResult, QueryOption_SecondaryOk);
+ getClient()->runCommand(
+ DatabaseName(boost::none, "admin"), cmd, readResult, QueryOption_SecondaryOk);
uassertStatusOKWithContext(
getStatusFromCommandResult(readResult),
"TenantAllDatabaseCloner failed to get listDatabases result majority-committed");
@@ -162,7 +163,8 @@ BaseCloner::AfterStageBehavior TenantAllDatabaseCloner::listExistingDatabasesSta
clonedDatabases.emplace_back(dbName);
BSONObj res;
- client.runCommand(dbName, BSON("dbStats" << 1), res);
+ // TODO SERVER-72945: Use dbName which is DatabaseName object already.
+ client.runCommand(DatabaseName(boost::none, dbName), BSON("dbStats" << 1), res);
if (auto status = getStatusFromCommandResult(res); !status.isOK()) {
LOGV2_WARNING(5522900,
"Skipping recording of data size metrics for database due to failure "
@@ -232,7 +234,8 @@ BaseCloner::AfterStageBehavior TenantAllDatabaseCloner::initializeStatsStage() {
long long approxTotalDataSizeLeftOnRemote = 0;
for (const auto& dbName : _databases) {
BSONObj res;
- getClient()->runCommand(dbName, BSON("dbStats" << 1), res);
+ // TODO SERVER-72945: Use dbName which is DatabaseName object already.
+ getClient()->runCommand(DatabaseName(boost::none, dbName), BSON("dbStats" << 1), res);
if (auto status = getStatusFromCommandResult(res); !status.isOK()) {
LOGV2_WARNING(5426600,
"Skipping recording of data size metrics for database due to failure "
diff --git a/src/mongo/db/repl/tenant_collection_cloner.cpp b/src/mongo/db/repl/tenant_collection_cloner.cpp
index 834fb288f47..de558fc74fd 100644
--- a/src/mongo/db/repl/tenant_collection_cloner.cpp
+++ b/src/mongo/db/repl/tenant_collection_cloner.cpp
@@ -163,8 +163,7 @@ BaseCloner::AfterStageBehavior TenantCollectionCloner::countStage() {
}
BSONObj res;
- getClient()->runCommand(
- _sourceNss.db().toString(), BSON("collStats" << _sourceNss.coll()), res);
+ getClient()->runCommand(_sourceNss.dbName(), BSON("collStats" << _sourceNss.coll()), res);
auto status = getStatusFromCommandResult(res);
if (!status.isOK()) {
LOGV2_WARNING(5426601,
@@ -245,7 +244,8 @@ BaseCloner::AfterStageBehavior TenantCollectionCloner::listIndexesStage() {
BSONObj readResult;
BSONObj cmd = ClonerUtils::buildMajorityWaitRequest(_operationTime);
- getClient()->runCommand("admin", cmd, readResult, QueryOption_SecondaryOk);
+ getClient()->runCommand(
+ DatabaseName(boost::none, "admin"), cmd, readResult, QueryOption_SecondaryOk);
uassertStatusOKWithContext(
getStatusFromCommandResult(readResult),
"TenantCollectionCloner failed to get listIndexes result majority-committed");
diff --git a/src/mongo/db/repl/tenant_database_cloner.cpp b/src/mongo/db/repl/tenant_database_cloner.cpp
index 91462a8eae8..b326c1128e3 100644
--- a/src/mongo/db/repl/tenant_database_cloner.cpp
+++ b/src/mongo/db/repl/tenant_database_cloner.cpp
@@ -115,7 +115,8 @@ BaseCloner::AfterStageBehavior TenantDatabaseCloner::listCollectionsStage() {
BSONObj readResult;
BSONObj cmd = ClonerUtils::buildMajorityWaitRequest(_operationTime);
- getClient()->runCommand("admin", cmd, readResult, QueryOption_SecondaryOk);
+ getClient()->runCommand(
+ DatabaseName(boost::none, "admin"), cmd, readResult, QueryOption_SecondaryOk);
uassertStatusOKWithContext(
getStatusFromCommandResult(readResult),
"TenantDatabaseCloner failed to get listCollections result majority-committed");
@@ -224,7 +225,9 @@ BaseCloner::AfterStageBehavior TenantDatabaseCloner::listExistingCollectionsStag
clonedCollectionUUIDs.emplace_back(result.getInfo().getUuid());
BSONObj res;
- client.runCommand(_dbName, BSON("collStats" << result.getName()), res);
+ // TODO SERVER-72945: Use the _dbName which is DatabaseName object already.
+ client.runCommand(
+ DatabaseName(boost::none, _dbName), BSON("collStats" << result.getName()), res);
if (auto status = getStatusFromCommandResult(res); !status.isOK()) {
LOGV2_WARNING(5522901,
"Skipping recording of data size metrics for database due to failure "
diff --git a/src/mongo/db/repl/tenant_migration_donor_service.cpp b/src/mongo/db/repl/tenant_migration_donor_service.cpp
index 51c305c646e..df716176ecf 100644
--- a/src/mongo/db/repl/tenant_migration_donor_service.cpp
+++ b/src/mongo/db/repl/tenant_migration_donor_service.cpp
@@ -263,7 +263,7 @@ ExecutorFuture<void> TenantMigrationDonorService::createStateDocumentTTLIndex(
BSONObj result;
client.runCommand(
- nss.db().toString(),
+ nss.dbName(),
BSON("createIndexes"
<< nss.coll().toString() << "indexes"
<< BSON_ARRAY(BSON("key" << BSON("expireAt" << 1) << "name" << kTTLIndexName
@@ -290,7 +290,7 @@ ExecutorFuture<void> TenantMigrationDonorService::createExternalKeysTTLIndex(
BSONObj result;
client.runCommand(
- nss.db().toString(),
+ nss.dbName(),
BSON("createIndexes"
<< nss.coll().toString() << "indexes"
<< BSON_ARRAY(BSON("key" << BSON("ttlExpiresAt" << 1) << "name"
diff --git a/src/mongo/db/repl/tenant_migration_recipient_service.cpp b/src/mongo/db/repl/tenant_migration_recipient_service.cpp
index cf87a25bc51..30a92773e96 100644
--- a/src/mongo/db/repl/tenant_migration_recipient_service.cpp
+++ b/src/mongo/db/repl/tenant_migration_recipient_service.cpp
@@ -320,7 +320,7 @@ ExecutorFuture<void> TenantMigrationRecipientService::_rebuildService(
BSONObj result;
client.runCommand(
- nss.db().toString(),
+ nss.dbName(),
BSON("createIndexes"
<< nss.coll().toString() << "indexes"
<< BSON_ARRAY(BSON("key" << BSON("expireAt" << 1) << "name" << kTTLIndexName
@@ -1762,7 +1762,8 @@ TenantMigrationRecipientService::Instance::_fetchRetryableWritesOplogBeforeStart
BSONObj readResult;
BSONObj cmd = ClonerUtils::buildMajorityWaitRequest(*operationTime);
- _client.get()->runCommand("admin", cmd, readResult, QueryOption_SecondaryOk);
+ _client.get()->runCommand(
+ DatabaseName(boost::none, "admin"), cmd, readResult, QueryOption_SecondaryOk);
uassertStatusOKWithContext(
getStatusFromCommandResult(readResult),
"Failed to wait for retryable writes pre-fetch result majority committed");
diff --git a/src/mongo/db/s/add_shard_cmd.cpp b/src/mongo/db/s/add_shard_cmd.cpp
index e839049e525..82b4077f619 100644
--- a/src/mongo/db/s/add_shard_cmd.cpp
+++ b/src/mongo/db/s/add_shard_cmd.cpp
@@ -77,7 +77,8 @@ public:
DBDirectClient localClient(opCtx);
BSONObj res;
- localClient.runCommand(NamespaceString::kAdminDb.toString(), shardIdUpsertCmd, res);
+ localClient.runCommand(
+ DatabaseName(boost::none, NamespaceString::kAdminDb), shardIdUpsertCmd, res);
uassertStatusOK(getStatusFromCommandResult(res));
diff --git a/src/mongo/db/s/config/sharding_catalog_manager.cpp b/src/mongo/db/s/config/sharding_catalog_manager.cpp
index ae1d331d192..cf04ddd7b24 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager.cpp
@@ -114,7 +114,7 @@ BSONObj executeConfigRequest(OperationContext* opCtx,
invariant(nss.db() == NamespaceString::kConfigDb);
DBDirectClient client(opCtx);
BSONObj result;
- client.runCommand(nss.db().toString(), request.toBSON(), result);
+ client.runCommand(nss.dbName(), request.toBSON(), result);
return result;
}
@@ -489,7 +489,7 @@ Status ShardingCatalogManager::_initConfigCollections(OperationContext* opCtx) {
BSONObj cmd = BSON("create" << CollectionType::ConfigNS.coll());
BSONObj result;
- const bool ok = client.runCommand(CollectionType::ConfigNS.db().toString(), cmd, result);
+ const bool ok = client.runCommand(CollectionType::ConfigNS.dbName(), cmd, result);
if (!ok) { // create returns error NamespaceExists if collection already exists
Status status = getStatusFromCommandResult(result);
if (status != ErrorCodes::NamespaceExists) {
@@ -536,7 +536,7 @@ Status ShardingCatalogManager::_initConfigSettings(OperationContext* opCtx) {
BSONObj cmd = BSON("create" << NamespaceString::kConfigSettingsNamespace.coll());
BSONObj result;
const bool ok =
- client.runCommand(NamespaceString::kConfigSettingsNamespace.db().toString(), cmd, result);
+ client.runCommand(NamespaceString::kConfigSettingsNamespace.dbName(), cmd, result);
if (!ok) { // create returns error NamespaceExists if collection already exists
Status status = getStatusFromCommandResult(result);
if (status != ErrorCodes::NamespaceExists) {
diff --git a/src/mongo/db/s/create_collection_coordinator.cpp b/src/mongo/db/s/create_collection_coordinator.cpp
index 131a7035262..ddd383991b0 100644
--- a/src/mongo/db/s/create_collection_coordinator.cpp
+++ b/src/mongo/db/s/create_collection_coordinator.cpp
@@ -1094,7 +1094,7 @@ void CreateCollectionCoordinator::_createCollectionAndIndexes(
BSONObj createRes;
DBDirectClient localClient(opCtx);
- localClient.runCommand(nss().db().toString(), createCmd, createRes);
+ localClient.runCommand(nss().dbName(), createCmd, createRes);
auto createStatus = getStatusFromCommandResult(createRes);
if (!createStatus.isOK() && createStatus.code() == ErrorCodes::NamespaceExists) {
diff --git a/src/mongo/db/s/global_index/global_index_cloning_service_test.cpp b/src/mongo/db/s/global_index/global_index_cloning_service_test.cpp
index 01cf6a8ac6c..0e21d2a6b7e 100644
--- a/src/mongo/db/s/global_index/global_index_cloning_service_test.cpp
+++ b/src/mongo/db/s/global_index/global_index_cloning_service_test.cpp
@@ -309,7 +309,8 @@ public:
CreateGlobalIndex createGlobalIndex(_indexCollectionUUID);
createGlobalIndex.setDbName({boost::none, "admin"});
BSONObj cmdResult;
- auto success = client.runCommand("admin", createGlobalIndex.toBSON({}), cmdResult);
+ auto success =
+ client.runCommand({boost::none, "admin"}, createGlobalIndex.toBSON({}), cmdResult);
ASSERT(success) << "createGlobalIndex cmd failed with result: " << cmdResult;
}
diff --git a/src/mongo/db/s/global_index/global_index_inserter_test.cpp b/src/mongo/db/s/global_index/global_index_inserter_test.cpp
index 2ffc1eae4a3..760886964f9 100644
--- a/src/mongo/db/s/global_index/global_index_inserter_test.cpp
+++ b/src/mongo/db/s/global_index/global_index_inserter_test.cpp
@@ -78,7 +78,8 @@ public:
CreateGlobalIndex createGlobalIndex(_indexUUID);
createGlobalIndex.setDbName({boost::none, "admin"});
BSONObj cmdResult;
- auto success = client.runCommand("admin", createGlobalIndex.toBSON({}), cmdResult);
+ auto success =
+ client.runCommand({boost::none, "admin"}, createGlobalIndex.toBSON({}), cmdResult);
ASSERT(success) << "createGlobalIndex cmd failed with result: " << cmdResult;
}
diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp
index 8a7851ba3c8..54a7b3abbff 100644
--- a/src/mongo/db/s/migration_destination_manager.cpp
+++ b/src/mongo/db/s/migration_destination_manager.cpp
@@ -972,7 +972,7 @@ void MigrationDestinationManager::_dropLocalIndexesIfNecessary(
recipientIndex[IndexDescriptor::kKeyPatternFieldName].Obj())) {
BSONObj info;
if (!client.runCommand(
- nss.db().toString(),
+ nss.dbName(),
BSON("dropIndexes" << nss.coll() << "index" << indexNameElem),
info))
uassertStatusOK(getStatusFromCommandResult(info));
diff --git a/src/mongo/db/s/move_primary_coordinator.cpp b/src/mongo/db/s/move_primary_coordinator.cpp
index 84e29628587..3f95ebc25cd 100644
--- a/src/mongo/db/s/move_primary_coordinator.cpp
+++ b/src/mongo/db/s/move_primary_coordinator.cpp
@@ -463,7 +463,7 @@ void MovePrimaryCoordinator::dropStaleDataOnDonor(OperationContext* opCtx) const
for (const auto& nss : *_doc.getCollectionsToClone()) {
const auto dropStatus = [&] {
BSONObj dropResult;
- dbClient.runCommand(_dbName.toString(), BSON("drop" << nss.coll()), dropResult);
+ dbClient.runCommand(_dbName, BSON("drop" << nss.coll()), dropResult);
return getStatusFromCommandResult(dropResult);
}();
diff --git a/src/mongo/db/s/move_primary_source_manager.cpp b/src/mongo/db/s/move_primary_source_manager.cpp
index 0df7fc1fad9..add1bd585eb 100644
--- a/src/mongo/db/s/move_primary_source_manager.cpp
+++ b/src/mongo/db/s/move_primary_source_manager.cpp
@@ -465,7 +465,7 @@ Status MovePrimarySourceManager::cleanStaleData(OperationContext* opCtx) {
DBDirectClient client(opCtx);
for (auto& coll : _clonedColls) {
BSONObj dropCollResult;
- client.runCommand(_dbname.toString(), BSON("drop" << coll.coll()), dropCollResult);
+ client.runCommand(_dbname, BSON("drop" << coll.coll()), dropCollResult);
Status dropStatus = getStatusFromCommandResult(dropCollResult);
if (!dropStatus.isOK()) {
LOGV2(22045,
diff --git a/src/mongo/db/s/resharding/resharding_coordinator_service.cpp b/src/mongo/db/s/resharding/resharding_coordinator_service.cpp
index 021699d4adc..52d006f5308 100644
--- a/src/mongo/db/s/resharding/resharding_coordinator_service.cpp
+++ b/src/mongo/db/s/resharding/resharding_coordinator_service.cpp
@@ -740,7 +740,7 @@ void updateTagsDocsForTempNss(OperationContext* opCtx,
// currently have 'ns' as the temporary collection namespace.
DBDirectClient client(opCtx);
BSONObj tagsRes;
- client.runCommand(tagsRequest.getNS().db().toString(), tagsRequest.toBSON(), tagsRes);
+ client.runCommand(tagsRequest.getNS().dbName(), tagsRequest.toBSON(), tagsRes);
uassertStatusOK(getStatusFromWriteCommandReply(tagsRes));
}
@@ -1071,7 +1071,7 @@ ExecutorFuture<void> ReshardingCoordinatorService::_rebuildService(
DBDirectClient client(opCtx);
BSONObj result;
client.runCommand(
- nss.db().toString(),
+ nss.dbName(),
BSON("createIndexes"
<< nss.coll().toString() << "indexes"
<< BSON_ARRAY(BSON("key" << BSON("active" << 1) << "name"
diff --git a/src/mongo/db/s/resharding/resharding_oplog_fetcher_test.cpp b/src/mongo/db/s/resharding/resharding_oplog_fetcher_test.cpp
index d620bee286d..62b408da0b7 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_fetcher_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_fetcher_test.cpp
@@ -248,7 +248,7 @@ public:
onCommand([&](const executor::RemoteCommandRequest& request) -> StatusWith<BSONObj> {
DBDirectClient client(cc().getOperationContext());
BSONObj result;
- bool res = client.runCommand(request.dbname, request.cmdObj, result);
+ bool res = client.runCommand({boost::none, request.dbname}, request.cmdObj, result);
if (res == false || result.hasField("cursorsKilled") ||
result["cursor"]["id"].Long() == 0) {
hasMore = false;
diff --git a/src/mongo/db/s/shard_key_util.cpp b/src/mongo/db/s/shard_key_util.cpp
index 8bc0929970a..7c585ff9677 100644
--- a/src/mongo/db/s/shard_key_util.cpp
+++ b/src/mongo/db/s/shard_key_util.cpp
@@ -289,7 +289,7 @@ void ValidationBehaviorsShardCollection::verifyUsefulNonMultiKeyIndex(
const NamespaceString& nss, const BSONObj& proposedKey) const {
BSONObj res;
auto success = _localClient->runCommand(
- "admin",
+ DatabaseName(boost::none, "admin"),
BSON(kCheckShardingIndexCmdName << nss.ns() << kKeyPatternField << proposedKey),
res);
uassert(ErrorCodes::InvalidOptions, res["errmsg"].str(), success);
@@ -313,7 +313,7 @@ void ValidationBehaviorsShardCollection::createShardKeyIndex(
defaultCollation && !defaultCollation->isEmpty() ? CollationSpec::kSimpleSpec : BSONObj();
auto createIndexesCmd = makeCreateIndexesCmd(nss, proposedKey, collation, unique);
BSONObj res;
- _localClient->runCommand(nss.db().toString(), createIndexesCmd, res);
+ _localClient->runCommand(nss.dbName(), createIndexesCmd, res);
uassertStatusOK(getStatusFromCommandResult(res));
}
diff --git a/src/mongo/db/serverless/shard_split_donor_service.cpp b/src/mongo/db/serverless/shard_split_donor_service.cpp
index ed52e344a70..afa630bc9d8 100644
--- a/src/mongo/db/serverless/shard_split_donor_service.cpp
+++ b/src/mongo/db/serverless/shard_split_donor_service.cpp
@@ -728,7 +728,7 @@ ExecutorFuture<void> ShardSplitDonorService::DonorStateMachine::_applySplitConfi
DBDirectClient client(opCtxHolder.get());
BSONObj result;
const bool returnValue =
- client.runCommand(NamespaceString::kAdminDb.toString(),
+ client.runCommand(DatabaseName(boost::none, NamespaceString::kAdminDb),
BSON("replSetReconfig" << splitConfig.toBSON()),
result);
uassert(ErrorCodes::BadValue,
@@ -1219,7 +1219,7 @@ ExecutorFuture<void> ShardSplitDonorService::DonorStateMachine::_removeSplitConf
BSONObj result;
const bool returnValue =
- client.runCommand(NamespaceString::kAdminDb.toString(),
+ client.runCommand(DatabaseName(boost::none, NamespaceString::kAdminDb),
BSON("replSetReconfig" << newConfigBob.obj()),
result);
uassert(
diff --git a/src/mongo/db/session/sessions_collection.cpp b/src/mongo/db/session/sessions_collection.cpp
index 4fc79189206..78492298140 100644
--- a/src/mongo/db/session/sessions_collection.cpp
+++ b/src/mongo/db/session/sessions_collection.cpp
@@ -156,7 +156,7 @@ SessionsCollection::SendBatchFn SessionsCollection::makeSendFnForBatchWrite(
const NamespaceString& ns, DBClientBase* client) {
auto send = [client, ns](BSONObj batch) {
BSONObj res;
- if (!client->runCommand(ns.db().toString(), batch, res)) {
+ if (!client->runCommand(ns.dbName(), batch, res)) {
uassertStatusOK(getStatusFromCommandResult(res));
}
};
@@ -168,7 +168,7 @@ SessionsCollection::SendBatchFn SessionsCollection::makeSendFnForCommand(const N
DBClientBase* client) {
auto send = [client, ns](BSONObj cmd) {
BSONObj res;
- if (!client->runCommand(ns.db().toString(), cmd, res)) {
+ if (!client->runCommand(ns.dbName(), cmd, res)) {
uassertStatusOK(getStatusFromCommandResult(res));
}
};
@@ -180,7 +180,7 @@ SessionsCollection::FindBatchFn SessionsCollection::makeFindFnForCommand(const N
DBClientBase* client) {
auto send = [client, ns](BSONObj cmd) -> BSONObj {
BSONObj res;
- if (!client->runCommand(ns.db().toString(), cmd, res)) {
+ if (!client->runCommand(ns.dbName(), cmd, res)) {
uassertStatusOK(getStatusFromCommandResult(res));
}
diff --git a/src/mongo/db/session/sessions_collection_rs.cpp b/src/mongo/db/session/sessions_collection_rs.cpp
index 865f0b825e9..5483466eaa7 100644
--- a/src/mongo/db/session/sessions_collection_rs.cpp
+++ b/src/mongo/db/session/sessions_collection_rs.cpp
@@ -117,32 +117,31 @@ auto SessionsCollectionRS::_dispatch(const NamespaceString& ns,
}
void SessionsCollectionRS::setupSessionsCollection(OperationContext* opCtx) {
- _dispatch(
- NamespaceString::kLogicalSessionsNamespace,
- opCtx,
- [&] {
- try {
- checkSessionsCollectionExists(opCtx);
- } catch (const DBException& ex) {
-
- DBDirectClient client(opCtx);
- BSONObj cmd;
-
- if (ex.code() == ErrorCodes::IndexOptionsConflict) {
- cmd = generateCollModCmd();
- } else {
- // Creating the TTL index will auto-generate the collection.
- cmd = generateCreateIndexesCmd();
- }
-
- BSONObj info;
- if (!client.runCommand(
- NamespaceString::kLogicalSessionsNamespace.db().toString(), cmd, info)) {
- uassertStatusOK(getStatusFromCommandResult(info));
- }
- }
- },
- [&](DBClientBase*) { checkSessionsCollectionExists(opCtx); });
+ _dispatch(NamespaceString::kLogicalSessionsNamespace,
+ opCtx,
+ [&] {
+ try {
+ checkSessionsCollectionExists(opCtx);
+ } catch (const DBException& ex) {
+
+ DBDirectClient client(opCtx);
+ BSONObj cmd;
+
+ if (ex.code() == ErrorCodes::IndexOptionsConflict) {
+ cmd = generateCollModCmd();
+ } else {
+ // Creating the TTL index will auto-generate the collection.
+ cmd = generateCreateIndexesCmd();
+ }
+
+ BSONObj info;
+ if (!client.runCommand(
+ NamespaceString::kLogicalSessionsNamespace.dbName(), cmd, info)) {
+ uassertStatusOK(getStatusFromCommandResult(info));
+ }
+ }
+ },
+ [&](DBClientBase*) { checkSessionsCollectionExists(opCtx); });
}
void SessionsCollectionRS::checkSessionsCollectionExists(OperationContext* opCtx) {
diff --git a/src/mongo/db/session/sessions_collection_standalone.cpp b/src/mongo/db/session/sessions_collection_standalone.cpp
index 94fb61cfc75..ef63bd4b2e0 100644
--- a/src/mongo/db/session/sessions_collection_standalone.cpp
+++ b/src/mongo/db/session/sessions_collection_standalone.cpp
@@ -59,8 +59,7 @@ void SessionsCollectionStandalone::setupSessionsCollection(OperationContext* opC
}
BSONObj info;
- if (!client.runCommand(
- NamespaceString::kLogicalSessionsNamespace.db().toString(), cmd, info)) {
+ if (!client.runCommand(NamespaceString::kLogicalSessionsNamespace.dbName(), cmd, info)) {
uassertStatusOKWithContext(getStatusFromCommandResult(info),
str::stream() << "Failed to create "
<< NamespaceString::kLogicalSessionsNamespace);
diff --git a/src/mongo/db/transaction/transaction_api.cpp b/src/mongo/db/transaction/transaction_api.cpp
index 313b3825991..484c522c7f2 100644
--- a/src/mongo/db/transaction/transaction_api.cpp
+++ b/src/mongo/db/transaction/transaction_api.cpp
@@ -569,7 +569,7 @@ SemiFuture<BSONObj> Transaction::_commitOrAbort(StringData dbName, StringData cm
}
return ExecutorFuture<void>(_executor)
- .then([this, dbNameCopy = dbName.toString(), cmdObj = cmdBuilder.obj()] {
+ .then([this, dbNameCopy = dbName, cmdObj = cmdBuilder.obj()] {
return _txnClient->runCommand(dbNameCopy, cmdObj);
})
// Safe to inline because the continuation only holds state.
diff --git a/src/mongo/db/transaction/transaction_participant_retryable_writes_test.cpp b/src/mongo/db/transaction/transaction_participant_retryable_writes_test.cpp
index b91e93dd336..3a5a784b4a9 100644
--- a/src/mongo/db/transaction/transaction_participant_retryable_writes_test.cpp
+++ b/src/mongo/db/transaction/transaction_participant_retryable_writes_test.cpp
@@ -498,7 +498,7 @@ TEST_F(TransactionParticipantRetryableWritesTest, SessionTransactionsCollectionN
BSONObj dropResult;
DBDirectClient client(opCtx());
const auto& nss = NamespaceString::kSessionTransactionsTableNamespace;
- ASSERT(client.runCommand(nss.db().toString(), BSON("drop" << nss.coll()), dropResult));
+ ASSERT(client.runCommand(nss.dbName(), BSON("drop" << nss.coll()), dropResult));
const TxnNumber txnNum = 21;
txnParticipant.beginOrContinue(
diff --git a/src/mongo/dbtests/clienttests.cpp b/src/mongo/dbtests/clienttests.cpp
index 3abf5681950..2dfe5432423 100644
--- a/src/mongo/dbtests/clienttests.cpp
+++ b/src/mongo/dbtests/clienttests.cpp
@@ -226,7 +226,7 @@ public:
db.createCollection("unittests.clienttests.create");
BSONObj info;
- ASSERT(db.runCommand("unittests",
+ ASSERT(db.runCommand({boost::none, "unittests"},
BSON("collstats"
<< "clienttests.create"),
info));
diff --git a/src/mongo/dbtests/commandtests.cpp b/src/mongo/dbtests/commandtests.cpp
index 3122f5ff427..e7440c4309a 100644
--- a/src/mongo/dbtests/commandtests.cpp
+++ b/src/mongo/dbtests/commandtests.cpp
@@ -88,8 +88,8 @@ public:
NamespaceString nss() {
return NamespaceString("test.testCollection");
}
- const char* nsDb() {
- return "test";
+ DatabaseName nsDb() {
+ return {boost::none, "test"};
}
const char* nsColl() {
return "testCollection";
@@ -136,7 +136,7 @@ struct Type0 : Base {
}
BSONObj result;
- ASSERT(db.runCommand("test", BSON("filemd5" << 0), result));
+ ASSERT(db.runCommand({boost::none, "test"}, BSON("filemd5" << 0), result));
ASSERT_EQUALS(string("5eb63bbbe01eeed093cb22bb8f5acdc3"), result.getStringField("md5"));
}
};
@@ -160,7 +160,7 @@ struct Type2 : Base {
}
BSONObj result;
- ASSERT(db.runCommand("test", BSON("filemd5" << 0), result));
+ ASSERT(db.runCommand({boost::none, "test"}, BSON("filemd5" << 0), result));
ASSERT_EQUALS(string("5eb63bbbe01eeed093cb22bb8f5acdc3"), result.getStringField("md5"));
}
};
diff --git a/src/mongo/dbtests/directclienttests.cpp b/src/mongo/dbtests/directclienttests.cpp
index f63b0e85cba..9d8c827cfae 100644
--- a/src/mongo/dbtests/directclienttests.cpp
+++ b/src/mongo/dbtests/directclienttests.cpp
@@ -81,7 +81,7 @@ public:
BSONObj result;
BSONObj cmdObj = BSON("count"
<< "");
- ASSERT(!client.runCommand("", cmdObj, result)) << result;
+ ASSERT(!client.runCommand({boost::none, ""}, cmdObj, result)) << result;
ASSERT_EQ(getStatusFromCommandResult(result), ErrorCodes::InvalidNamespace);
}
};
diff --git a/src/mongo/dbtests/indexupdatetests.cpp b/src/mongo/dbtests/indexupdatetests.cpp
index fc9b1f82f7e..056cb77e490 100644
--- a/src/mongo/dbtests/indexupdatetests.cpp
+++ b/src/mongo/dbtests/indexupdatetests.cpp
@@ -657,7 +657,7 @@ public:
DBDirectClient client(opCtx.get());
client.dropCollection(_ns);
BSONObj cmdResult;
- ASSERT_TRUE(client.runCommand("unittests",
+ ASSERT_TRUE(client.runCommand({boost::none, "unittests"},
BSON("create"
<< "indexupdate"
<< "collation"
diff --git a/src/mongo/dbtests/mock_dbclient_conn_test.cpp b/src/mongo/dbtests/mock_dbclient_conn_test.cpp
index 484c479da7d..c7be3c00343 100644
--- a/src/mongo/dbtests/mock_dbclient_conn_test.cpp
+++ b/src/mongo/dbtests/mock_dbclient_conn_test.cpp
@@ -435,7 +435,7 @@ TEST(MockDBClientConnTest, SetCmdReply) {
{
MockDBClientConnection conn(&server);
BSONObj response;
- ASSERT(conn.runCommand("foo", BSON("serverStatus" << 1), response));
+ ASSERT(conn.runCommand({boost::none, "foo"}, BSON("serverStatus" << 1), response));
ASSERT_EQUALS(1, response["ok"].numberInt());
ASSERT_EQUALS("local", response["host"].str());
@@ -446,7 +446,7 @@ TEST(MockDBClientConnTest, SetCmdReply) {
{
MockDBClientConnection conn(&server);
BSONObj response;
- ASSERT(conn.runCommand("foo", BSON("serverStatus" << 1), response));
+ ASSERT(conn.runCommand({boost::none, "foo"}, BSON("serverStatus" << 1), response));
ASSERT_EQUALS(1, response["ok"].numberInt());
ASSERT_EQUALS("local", response["host"].str());
@@ -456,7 +456,7 @@ TEST(MockDBClientConnTest, SetCmdReply) {
{
MockDBClientConnection conn(&server);
BSONObj response;
- ASSERT(conn.runCommand("foo", BSON("serverStatus" << 1), response));
+ ASSERT(conn.runCommand({boost::none, "foo"}, BSON("serverStatus" << 1), response));
ASSERT_EQUALS(1, response["ok"].numberInt());
ASSERT_EQUALS("local", response["host"].str());
@@ -481,7 +481,7 @@ TEST(MockDBClientConnTest, CyclingCmd) {
{
MockDBClientConnection conn(&server);
BSONObj response;
- ASSERT(conn.runCommand("foo", BSON("isMaster" << 1), response));
+ ASSERT(conn.runCommand({boost::none, "foo"}, BSON("isMaster" << 1), response));
ASSERT_EQUALS(1, response["ok"].numberInt());
ASSERT_EQUALS("a", response["set"].str());
ASSERT(response["isMaster"].trueValue());
@@ -492,7 +492,7 @@ TEST(MockDBClientConnTest, CyclingCmd) {
{
MockDBClientConnection conn(&server);
BSONObj response;
- ASSERT(conn.runCommand("foo", BSON("isMaster" << 1), response));
+ ASSERT(conn.runCommand({boost::none, "foo"}, BSON("isMaster" << 1), response));
ASSERT_EQUALS(1, response["ok"].numberInt());
ASSERT_EQUALS("a", response["set"].str());
ASSERT(!response["isMaster"].trueValue());
@@ -503,7 +503,7 @@ TEST(MockDBClientConnTest, CyclingCmd) {
{
MockDBClientConnection conn(&server);
BSONObj response;
- ASSERT(conn.runCommand("foo", BSON("isMaster" << 1), response));
+ ASSERT(conn.runCommand({boost::none, "foo"}, BSON("isMaster" << 1), response));
ASSERT_EQUALS(1, response["ok"].numberInt());
ASSERT_EQUALS("a", response["set"].str());
ASSERT(response["isMaster"].trueValue());
@@ -520,7 +520,7 @@ TEST(MockDBClientConnTest, MultipleStoredResponse) {
MockDBClientConnection conn(&server);
{
BSONObj response;
- ASSERT(conn.runCommand("foo",
+ ASSERT(conn.runCommand({boost::none, "foo"},
BSON("isMaster"
<< "abc"),
response));
@@ -529,7 +529,7 @@ TEST(MockDBClientConnTest, MultipleStoredResponse) {
{
BSONObj response;
- ASSERT(!conn.runCommand("a", BSON("serverStatus" << 1), response));
+ ASSERT(!conn.runCommand({boost::none, "a"}, BSON("serverStatus" << 1), response));
}
}
@@ -542,14 +542,14 @@ TEST(MockDBClientConnTest, CmdCount) {
{
MockDBClientConnection conn(&server);
BSONObj response;
- ASSERT(conn.runCommand("foo", BSON("serverStatus" << 1), response));
+ ASSERT(conn.runCommand({boost::none, "foo"}, BSON("serverStatus" << 1), response));
ASSERT_EQUALS(1U, server.getCmdCount());
}
{
MockDBClientConnection conn(&server);
BSONObj response;
- ASSERT(conn.runCommand("baz", BSON("serverStatus" << 1), response));
+ ASSERT(conn.runCommand({boost::none, "baz"}, BSON("serverStatus" << 1), response));
ASSERT_EQUALS(2U, server.getCmdCount());
}
}
@@ -572,7 +572,7 @@ TEST(MockDBClientConnTest, Shutdown) {
{
MockDBClientConnection conn(&server);
BSONObj response;
- ASSERT_THROWS(conn.runCommand("test", BSON("serverStatus" << 1), response),
+ ASSERT_THROWS(conn.runCommand({boost::none, "test"}, BSON("serverStatus" << 1), response),
mongo::NetworkException);
}
@@ -590,7 +590,7 @@ TEST(MockDBClientConnTest, Restart) {
// new instance still has it
conn1.find(FindCommandRequest(NamespaceString("test.user")));
BSONObj response;
- conn1.runCommand("test", BSON("serverStatus" << 1), response);
+ conn1.runCommand({boost::none, "test"}, BSON("serverStatus" << 1), response);
server.shutdown();
ASSERT_THROWS(conn1.find(FindCommandRequest(NamespaceString("test.user"))),
@@ -629,7 +629,7 @@ TEST(MockDBClientConnTest, ClearCounter) {
MockDBClientConnection conn(&server);
conn.find(FindCommandRequest(FindCommandRequest(NamespaceString("test.user"))));
BSONObj response;
- conn.runCommand("test", BSON("serverStatus" << 1), response);
+ conn.runCommand({boost::none, "test"}, BSON("serverStatus" << 1), response);
server.clearCounters();
ASSERT_EQUALS(0U, server.getQueryCount());
@@ -656,7 +656,7 @@ TEST(MockDBClientConnTest, Delay) {
{
mongo::Timer timer;
BSONObj response;
- conn.runCommand("x", BSON("serverStatus" << 1), response);
+ conn.runCommand({boost::none, "x"}, BSON("serverStatus" << 1), response);
const int nowInMilliSec = timer.millis();
ASSERT_GREATER_THAN_OR_EQUALS(nowInMilliSec, 130);
}
diff --git a/src/mongo/dbtests/mock_replica_set_test.cpp b/src/mongo/dbtests/mock_replica_set_test.cpp
index f82063033c4..1d2630d99a5 100644
--- a/src/mongo/dbtests/mock_replica_set_test.cpp
+++ b/src/mongo/dbtests/mock_replica_set_test.cpp
@@ -80,7 +80,8 @@ TEST(MockReplicaSetTest, IsMasterNode0) {
BSONObj cmdResponse;
MockRemoteDBServer* node = replSet.getNode("$n0:27017");
- bool ok = MockDBClientConnection(node).runCommand("foo", BSON("ismaster" << 1), cmdResponse);
+ bool ok = MockDBClientConnection(node).runCommand(
+ {boost::none, "foo"}, BSON("ismaster" << 1), cmdResponse);
ASSERT(ok);
ASSERT(cmdResponse["ismaster"].trueValue());
@@ -107,7 +108,8 @@ TEST(MockReplicaSetTest, IsMasterNode1) {
BSONObj cmdResponse;
MockRemoteDBServer* node = replSet.getNode("$n1:27017");
- bool ok = MockDBClientConnection(node).runCommand("foo", BSON("ismaster" << 1), cmdResponse);
+ bool ok = MockDBClientConnection(node).runCommand(
+ {boost::none, "foo"}, BSON("ismaster" << 1), cmdResponse);
ASSERT(ok);
ASSERT(!cmdResponse["ismaster"].trueValue());
@@ -134,7 +136,8 @@ TEST(MockReplicaSetTest, IsMasterNode2) {
BSONObj cmdResponse;
MockRemoteDBServer* node = replSet.getNode("$n2:27017");
- bool ok = MockDBClientConnection(node).runCommand("foo", BSON("ismaster" << 1), cmdResponse);
+ bool ok = MockDBClientConnection(node).runCommand(
+ {boost::none, "foo"}, BSON("ismaster" << 1), cmdResponse);
ASSERT(ok);
ASSERT(!cmdResponse["ismaster"].trueValue());
@@ -161,8 +164,8 @@ TEST(MockReplicaSetTest, ReplSetGetStatusNode0) {
BSONObj cmdResponse;
MockRemoteDBServer* node = replSet.getNode("$n0:27017");
- bool ok =
- MockDBClientConnection(node).runCommand("foo", BSON("replSetGetStatus" << 1), cmdResponse);
+ bool ok = MockDBClientConnection(node).runCommand(
+ {boost::none, "foo"}, BSON("replSetGetStatus" << 1), cmdResponse);
ASSERT(ok);
ASSERT_EQUALS("n", cmdResponse["set"].str());
@@ -194,8 +197,8 @@ TEST(MockReplicaSetTest, ReplSetGetStatusNode1) {
BSONObj cmdResponse;
MockRemoteDBServer* node = replSet.getNode("$n1:27017");
- bool ok =
- MockDBClientConnection(node).runCommand("foo", BSON("replSetGetStatus" << 1), cmdResponse);
+ bool ok = MockDBClientConnection(node).runCommand(
+ {boost::none, "foo"}, BSON("replSetGetStatus" << 1), cmdResponse);
ASSERT(ok);
ASSERT_EQUALS("n", cmdResponse["set"].str());
@@ -229,8 +232,8 @@ TEST(MockReplicaSetTest, ReplSetGetStatusNode2) {
BSONObj cmdResponse;
MockRemoteDBServer* node = replSet.getNode("$n2:27017");
- bool ok =
- MockDBClientConnection(node).runCommand("foo", BSON("replSetGetStatus" << 1), cmdResponse);
+ bool ok = MockDBClientConnection(node).runCommand(
+ {boost::none, "foo"}, BSON("replSetGetStatus" << 1), cmdResponse);
ASSERT(ok);
ASSERT_EQUALS("n", cmdResponse["set"].str());
@@ -299,8 +302,8 @@ TEST(MockReplicaSetTest, IsMasterReconfigNodeRemoved) {
// Check isMaster for node still in set
BSONObj cmdResponse;
MockRemoteDBServer* node = replSet.getNode("$n0:27017");
- bool ok =
- MockDBClientConnection(node).runCommand("foo", BSON("ismaster" << 1), cmdResponse);
+ bool ok = MockDBClientConnection(node).runCommand(
+ {boost::none, "foo"}, BSON("ismaster" << 1), cmdResponse);
ASSERT(ok);
ASSERT(cmdResponse["ismaster"].trueValue());
@@ -327,8 +330,8 @@ TEST(MockReplicaSetTest, IsMasterReconfigNodeRemoved) {
// Check isMaster for node still not in set anymore
BSONObj cmdResponse;
MockRemoteDBServer* node = replSet.getNode(hostToRemove);
- bool ok =
- MockDBClientConnection(node).runCommand("foo", BSON("ismaster" << 1), cmdResponse);
+ bool ok = MockDBClientConnection(node).runCommand(
+ {boost::none, "foo"}, BSON("ismaster" << 1), cmdResponse);
ASSERT(ok);
ASSERT(!cmdResponse["ismaster"].trueValue());
@@ -351,7 +354,7 @@ TEST(MockReplicaSetTest, replSetGetStatusReconfigNodeRemoved) {
BSONObj cmdResponse;
MockRemoteDBServer* node = replSet.getNode("$n2:27017");
bool ok = MockDBClientConnection(node).runCommand(
- "foo", BSON("replSetGetStatus" << 1), cmdResponse);
+ {boost::none, "foo"}, BSON("replSetGetStatus" << 1), cmdResponse);
ASSERT(ok);
ASSERT_EQUALS("n", cmdResponse["set"].str());
@@ -384,7 +387,7 @@ TEST(MockReplicaSetTest, replSetGetStatusReconfigNodeRemoved) {
BSONObj cmdResponse;
MockRemoteDBServer* node = replSet.getNode(hostToRemove);
bool ok = MockDBClientConnection(node).runCommand(
- "foo", BSON("replSetGetStatus" << 1), cmdResponse);
+ {boost::none, "foo"}, BSON("replSetGetStatus" << 1), cmdResponse);
ASSERT(ok);
ASSERT_EQUALS("n", cmdResponse["set"].str());
diff --git a/src/mongo/dbtests/plan_executor_invalidation_test.cpp b/src/mongo/dbtests/plan_executor_invalidation_test.cpp
index 99ece768657..8a823f04432 100644
--- a/src/mongo/dbtests/plan_executor_invalidation_test.cpp
+++ b/src/mongo/dbtests/plan_executor_invalidation_test.cpp
@@ -146,7 +146,7 @@ public:
void renameCollection(const std::string& to) {
BSONObj info;
ASSERT_TRUE(_client.runCommand(
- "admin",
+ DatabaseName(boost::none, "admin"),
BSON("renameCollection" << nss.ns() << "to" << to << "dropTarget" << true),
info));
_refreshCollection();
diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp
index ed00510caa0..934fa153a4c 100644
--- a/src/mongo/dbtests/querytests.cpp
+++ b/src/mongo/dbtests/querytests.cpp
@@ -219,7 +219,7 @@ public:
DBDirectClient cl(&_opCtx);
BSONObj info;
- bool ok = cl.runCommand("unittests",
+ bool ok = cl.runCommand({boost::none, "unittests"},
BSON("godinsert"
<< "querytests"
<< "obj" << BSONObj()),
@@ -635,7 +635,7 @@ public:
const char* ns = "unittests.querytests.TailableQueryOnId";
BSONObj info;
- _client.runCommand("unittests",
+ _client.runCommand({boost::none, "unittests"},
BSON("create"
<< "querytests.TailableQueryOnId"
<< "capped" << true << "size" << 8192 << "autoIndexId" << true),
@@ -690,7 +690,7 @@ public:
// to truncating the oplog instead.
if (_opCtx.getServiceContext()->getStorageEngine()->supportsRecoveryTimestamp()) {
BSONObj info;
- _client.runCommand("local",
+ _client.runCommand({boost::none, "local"},
BSON("emptycapped"
<< "oplog.querytests.OplogScanWithGtTimstampPred"),
info);
@@ -742,7 +742,7 @@ public:
// to truncating the oplog instead.
if (_opCtx.getServiceContext()->getStorageEngine()->supportsRecoveryTimestamp()) {
BSONObj info;
- _client.runCommand("local",
+ _client.runCommand({boost::none, "local"},
BSON("emptycapped"
<< "oplog.querytests.OplogScanGtTsExplain"),
info);
@@ -1481,7 +1481,7 @@ public:
BSONObj info;
// Must use local db so that the collection is not replicated, to allow autoIndexId:false.
- _client.runCommand("local",
+ _client.runCommand({boost::none, "local"},
BSON("create"
<< "oplog.querytests.findingstart"
<< "capped" << true << "size" << 4096 << "autoIndexId" << false),
@@ -1493,7 +1493,7 @@ public:
// To ensure we are working with a clean oplog (an oplog without entries), we resort
// to truncating the oplog instead.
if (_opCtx.getServiceContext()->getStorageEngine()->supportsRecoveryTimestamp()) {
- _client.runCommand("local",
+ _client.runCommand({boost::none, "local"},
BSON("emptycapped"
<< "oplog.querytests.findingstart"),
info);
@@ -1550,7 +1550,7 @@ public:
BSONObj info;
// Must use local db so that the collection is not replicated, to allow autoIndexId:false.
- _client.runCommand("local",
+ _client.runCommand({boost::none, "local"},
BSON("create"
<< "oplog.querytests.findingstart"
<< "capped" << true << "size" << 4096 << "autoIndexId" << false),
@@ -1562,7 +1562,7 @@ public:
// To ensure we are working with a clean oplog (an oplog without entries), we resort
// to truncating the oplog instead.
if (_opCtx.getServiceContext()->getStorageEngine()->supportsRecoveryTimestamp()) {
- _client.runCommand("local",
+ _client.runCommand({boost::none, "local"},
BSON("emptycapped"
<< "oplog.querytests.findingstart"),
info);
@@ -1621,7 +1621,7 @@ public:
BSONObj info;
// Must use local db so that the collection is not replicated, to allow autoIndexId:false.
- _client.runCommand("local",
+ _client.runCommand({boost::none, "local"},
BSON("create"
<< "oplog.querytests.findingstart"
<< "capped" << true << "size" << 4096 << "autoIndexId" << false),
@@ -1633,7 +1633,7 @@ public:
// To ensure we are working with a clean oplog (an oplog without entries), we resort
// to truncating the oplog instead.
if (_opCtx.getServiceContext()->getStorageEngine()->supportsRecoveryTimestamp()) {
- _client.runCommand("local",
+ _client.runCommand({boost::none, "local"},
BSON("emptycapped"
<< "oplog.querytests.findingstart"),
info);
@@ -1664,7 +1664,7 @@ public:
WhatsMyUri() : CollectionBase("whatsmyuri") {}
void run() {
BSONObj result;
- _client.runCommand("admin", BSON("whatsmyuri" << 1), result);
+ _client.runCommand({boost::none, "admin"}, BSON("whatsmyuri" << 1), result);
ASSERT_EQUALS("", result["you"].str());
}
};
@@ -1674,7 +1674,7 @@ public:
WhatsMySni() : CollectionBase("whatsmysni") {}
void run() {
BSONObj result;
- _client.runCommand("admin", BSON("whatsmysni" << 1), result);
+ _client.runCommand({boost::none, "admin"}, BSON("whatsmysni" << 1), result);
ASSERT_EQUALS("", result["sni"].str());
}
};
diff --git a/src/mongo/rpc/op_legacy_integration_test.cpp b/src/mongo/rpc/op_legacy_integration_test.cpp
index 7806e1aa866..4c8b1e67e5c 100644
--- a/src/mongo/rpc/op_legacy_integration_test.cpp
+++ b/src/mongo/rpc/op_legacy_integration_test.cpp
@@ -129,7 +129,7 @@ TEST(OpLegacy, GetLastError) {
static const auto getLastErrorCommand = fromjson(R"({"getlasterror": 1})");
BSONObj replyObj;
- conn->runCommand("admin", getLastErrorCommand, replyObj);
+ conn->runCommand({boost::none, "admin"}, getLastErrorCommand, replyObj);
// 'getLastError' command is no longer supported and will always fail.
auto status = getStatusFromCommandResult(replyObj);
@@ -181,7 +181,7 @@ TEST(OpLegacy, UnsupportedReadOps) {
documents: [ {a: 1},{a: 2},{a: 3},{a: 4},{a: 5},{a: 6},{a: 7} ]
})");
BSONObj ignoreResponse;
- ASSERT(conn->runCommand("testOpLegacy", insert, ignoreResponse));
+ ASSERT(conn->runCommand({boost::none, "testOpLegacy"}, insert, ignoreResponse));
// Issue the unsupported requests. They all should fail one way or another.
Message opQueryRequest = makeUnsupportedOpQueryMessage(ns,
@@ -241,7 +241,7 @@ void testAllowedCommand(const char* command,
auto serverStatusCmd = fromjson("{serverStatus: 1}");
BSONObj serverStatus;
- ASSERT(conn->runCommand("admin", serverStatusCmd, serverStatus));
+ ASSERT(conn->runCommand({boost::none, "admin"}, serverStatusCmd, serverStatus));
auto opCountersPrior = serverStatus["opcounters"]["deprecated"];
const auto queryCountPrior = opCountersPrior ? opCountersPrior["query"].Long() : 0;
@@ -253,7 +253,7 @@ void testAllowedCommand(const char* command,
auto status = getStatusFromCommandResult(obj);
ASSERT_EQ(status.code(), code);
- ASSERT(conn->runCommand("admin", serverStatusCmd, serverStatus));
+ ASSERT(conn->runCommand({boost::none, "admin"}, serverStatusCmd, serverStatus));
auto opCounters = serverStatus["opcounters"]["deprecated"];
const auto queryCount = opCounters ? opCounters["query"].Long() : 0;
diff --git a/src/mongo/rpc/op_msg_integration_test.cpp b/src/mongo/rpc/op_msg_integration_test.cpp
index fa9f9eedf1a..d73c1fc3314 100644
--- a/src/mongo/rpc/op_msg_integration_test.cpp
+++ b/src/mongo/rpc/op_msg_integration_test.cpp
@@ -239,7 +239,7 @@ TEST(OpMsg, CloseConnectionOnFireAndForgetNotWritablePrimaryError) {
// Disable eager checking of primary to simulate a stepdown occurring after the check. This
// should respect w:0.
BSONObj output;
- ASSERT(conn.runCommand("admin",
+ ASSERT(conn.runCommand({boost::none, "admin"},
fromjson(R"({
configureFailPoint: 'skipCheckingForNotPrimaryInCommandDispatch',
mode: 'alwaysOn'
@@ -248,7 +248,7 @@ TEST(OpMsg, CloseConnectionOnFireAndForgetNotWritablePrimaryError) {
<< output;
ON_BLOCK_EXIT([&] {
uassertStatusOK(conn.connect(host, "integration_test-cleanup", boost::none));
- ASSERT(conn.runCommand("admin",
+ ASSERT(conn.runCommand({boost::none, "admin"},
fromjson(R"({
configureFailPoint:
'skipCheckingForNotPrimaryInCommandDispatch',
@@ -779,7 +779,7 @@ void serverStatusCorrectlyShowsExhaustMetrics(std::string commandName) {
ASSERT(waitForCondition([&] {
auto serverStatusCmd = BSON("serverStatus" << 1);
BSONObj serverStatusReply;
- ASSERT(conn->runCommand("admin", serverStatusCmd, serverStatusReply));
+ ASSERT(conn->runCommand({boost::none, "admin"}, serverStatusCmd, serverStatusReply));
return serverStatusReply["connections"]["exhaustIsMaster"].numberInt() == 0 &&
serverStatusReply["connections"]["exhaustHello"].numberInt() == 0;
}));
@@ -814,7 +814,7 @@ void serverStatusCorrectlyShowsExhaustMetrics(std::string commandName) {
auto serverStatusCmd = BSON("serverStatus" << 1);
BSONObj serverStatusReply;
- ASSERT(conn2->runCommand("admin", serverStatusCmd, serverStatusReply));
+ ASSERT(conn2->runCommand({boost::none, "admin"}, serverStatusCmd, serverStatusReply));
if (useLegacyCommandName) {
ASSERT_EQUALS(1, serverStatusReply["connections"]["exhaustIsMaster"].numberInt());
ASSERT_EQUALS(0, serverStatusReply["connections"]["exhaustHello"].numberInt());
@@ -853,7 +853,7 @@ void exhaustMetricSwitchingCommandNames(bool useLegacyCommandNameAtStart) {
ASSERT(waitForCondition([&] {
auto serverStatusCmd = BSON("serverStatus" << 1);
BSONObj serverStatusReply;
- ASSERT(conn1->runCommand("admin", serverStatusCmd, serverStatusReply));
+ ASSERT(conn1->runCommand({boost::none, "admin"}, serverStatusCmd, serverStatusReply));
return serverStatusReply["connections"]["exhaustIsMaster"].numberInt() == 0 &&
serverStatusReply["connections"]["exhaustHello"].numberInt() == 0;
}));
@@ -900,7 +900,7 @@ void exhaustMetricSwitchingCommandNames(bool useLegacyCommandNameAtStart) {
auto serverStatusCmd = BSON("serverStatus" << 1);
BSONObj serverStatusReply;
- ASSERT(conn2->runCommand("admin", serverStatusCmd, serverStatusReply));
+ ASSERT(conn2->runCommand({boost::none, "admin"}, serverStatusCmd, serverStatusReply));
if (useLegacyCommandNameAtStart) {
ASSERT_EQUALS(1, serverStatusReply["connections"]["exhaustIsMaster"].numberInt());
ASSERT_EQUALS(0, serverStatusReply["connections"]["exhaustHello"].numberInt());
@@ -927,7 +927,7 @@ void exhaustMetricSwitchingCommandNames(bool useLegacyCommandNameAtStart) {
}));
// Terminating the exhaust stream should not decrement the number of exhaust connections.
- ASSERT(conn2->runCommand("admin", serverStatusCmd, serverStatusReply));
+ ASSERT(conn2->runCommand({boost::none, "admin"}, serverStatusCmd, serverStatusReply));
if (useLegacyCommandNameAtStart) {
ASSERT_EQUALS(1, serverStatusReply["connections"]["exhaustIsMaster"].numberInt());
ASSERT_EQUALS(0, serverStatusReply["connections"]["exhaustHello"].numberInt());
@@ -955,7 +955,7 @@ void exhaustMetricSwitchingCommandNames(bool useLegacyCommandNameAtStart) {
// exhaust metric should decrease for the exhaust type that was closed, and increase for the
// exhaust type that was just opened.
- ASSERT(conn2->runCommand("admin", serverStatusCmd, serverStatusReply));
+ ASSERT(conn2->runCommand({boost::none, "admin"}, serverStatusCmd, serverStatusReply));
if (useLegacyCommandNameAtStart) {
ASSERT_EQUALS(0, serverStatusReply["connections"]["exhaustIsMaster"].numberInt());
ASSERT_EQUALS(1, serverStatusReply["connections"]["exhaustHello"].numberInt());
@@ -990,7 +990,7 @@ void exhaustMetricDecrementsOnNewOpAfterTerminatingExhaustStream(bool useLegacyC
ASSERT(waitForCondition([&] {
auto serverStatusCmd = BSON("serverStatus" << 1);
BSONObj serverStatusReply;
- ASSERT(conn1->runCommand("admin", serverStatusCmd, serverStatusReply));
+ ASSERT(conn1->runCommand({boost::none, "admin"}, serverStatusCmd, serverStatusReply));
return serverStatusReply["connections"]["exhaustIsMaster"].numberInt() == 0 &&
serverStatusReply["connections"]["exhaustHello"].numberInt() == 0;
}));
@@ -1036,7 +1036,7 @@ void exhaustMetricDecrementsOnNewOpAfterTerminatingExhaustStream(bool useLegacyC
auto serverStatusCmd = BSON("serverStatus" << 1);
BSONObj serverStatusReply;
- ASSERT(conn2->runCommand("admin", serverStatusCmd, serverStatusReply));
+ ASSERT(conn2->runCommand({boost::none, "admin"}, serverStatusCmd, serverStatusReply));
if (useLegacyCommandName) {
ASSERT_EQUALS(1, serverStatusReply["connections"]["exhaustIsMaster"].numberInt());
ASSERT_EQUALS(0, serverStatusReply["connections"]["exhaustHello"].numberInt());
@@ -1063,7 +1063,7 @@ void exhaustMetricDecrementsOnNewOpAfterTerminatingExhaustStream(bool useLegacyC
}));
// Terminating the exhaust stream should not decrement the number of exhaust connections.
- ASSERT(conn2->runCommand("admin", serverStatusCmd, serverStatusReply));
+ ASSERT(conn2->runCommand({boost::none, "admin"}, serverStatusCmd, serverStatusReply));
if (useLegacyCommandName) {
ASSERT_EQUALS(1, serverStatusReply["connections"]["exhaustIsMaster"].numberInt());
ASSERT_EQUALS(0, serverStatusReply["connections"]["exhaustHello"].numberInt());
@@ -1074,7 +1074,7 @@ void exhaustMetricDecrementsOnNewOpAfterTerminatingExhaustStream(bool useLegacyC
// exhaust metric should now decrement after calling serverStatus on the connection that used
// to have the exhaust stream.
- ASSERT(conn1->runCommand("admin", serverStatusCmd, serverStatusReply));
+ ASSERT(conn1->runCommand({boost::none, "admin"}, serverStatusCmd, serverStatusReply));
ASSERT_EQUALS(0, serverStatusReply["connections"]["exhaustIsMaster"].numberInt());
ASSERT_EQUALS(0, serverStatusReply["connections"]["exhaustHello"].numberInt());
}
@@ -1103,7 +1103,7 @@ void exhaustMetricOnNewExhaustAfterTerminatingExhaustStream(bool useLegacyComman
ASSERT(waitForCondition([&] {
auto serverStatusCmd = BSON("serverStatus" << 1);
BSONObj serverStatusReply;
- ASSERT(conn1->runCommand("admin", serverStatusCmd, serverStatusReply));
+ ASSERT(conn1->runCommand({boost::none, "admin"}, serverStatusCmd, serverStatusReply));
return serverStatusReply["connections"]["exhaustIsMaster"].numberInt() == 0 &&
serverStatusReply["connections"]["exhaustHello"].numberInt() == 0;
}));
@@ -1149,7 +1149,7 @@ void exhaustMetricOnNewExhaustAfterTerminatingExhaustStream(bool useLegacyComman
auto serverStatusCmd = BSON("serverStatus" << 1);
BSONObj serverStatusReply;
- ASSERT(conn2->runCommand("admin", serverStatusCmd, serverStatusReply));
+ ASSERT(conn2->runCommand({boost::none, "admin"}, serverStatusCmd, serverStatusReply));
if (useLegacyCommandName) {
ASSERT_EQUALS(1, serverStatusReply["connections"]["exhaustIsMaster"].numberInt());
ASSERT_EQUALS(0, serverStatusReply["connections"]["exhaustHello"].numberInt());
@@ -1176,7 +1176,7 @@ void exhaustMetricOnNewExhaustAfterTerminatingExhaustStream(bool useLegacyComman
}));
// Terminating the exhaust stream should not decrement the number of exhaust connections.
- ASSERT(conn2->runCommand("admin", serverStatusCmd, serverStatusReply));
+ ASSERT(conn2->runCommand({boost::none, "admin"}, serverStatusCmd, serverStatusReply));
if (useLegacyCommandName) {
ASSERT_EQUALS(1, serverStatusReply["connections"]["exhaustIsMaster"].numberInt());
ASSERT_EQUALS(0, serverStatusReply["connections"]["exhaustHello"].numberInt());
@@ -1196,7 +1196,7 @@ void exhaustMetricOnNewExhaustAfterTerminatingExhaustStream(bool useLegacyComman
ASSERT_OK(getStatusFromCommandResult(res));
// exhaust metric should not increment or decrement after initiating a new exhaust stream.
- ASSERT(conn2->runCommand("admin", serverStatusCmd, serverStatusReply));
+ ASSERT(conn2->runCommand({boost::none, "admin"}, serverStatusCmd, serverStatusReply));
if (useLegacyCommandName) {
ASSERT_EQUALS(1, serverStatusReply["connections"]["exhaustIsMaster"].numberInt());
ASSERT_EQUALS(0, serverStatusReply["connections"]["exhaustHello"].numberInt());
diff --git a/src/mongo/s/analyze_shard_key_util.cpp b/src/mongo/s/analyze_shard_key_util.cpp
index 8fc280b5752..ac4ce4ce0a3 100644
--- a/src/mongo/s/analyze_shard_key_util.cpp
+++ b/src/mongo/s/analyze_shard_key_util.cpp
@@ -203,7 +203,7 @@ bool canAcceptWrites(OperationContext* opCtx, const NamespaceString& nss) {
* Returns the command response.
*/
BSONObj executeWriteCommandLocal(OperationContext* opCtx,
- const std::string dbName,
+ const DatabaseName& dbName,
const BSONObj& cmdObj,
const std::function<void(const BSONObj&)>& uassertWriteStatusFn) {
DBDirectClient client(opCtx);
@@ -224,7 +224,7 @@ BSONObj executeWriteCommandLocal(OperationContext* opCtx,
* Returns the command response.
*/
BSONObj executeWriteCommandRemote(OperationContext* opCtx,
- const std::string dbName,
+ const DatabaseName& dbName,
const BSONObj& cmdObj,
const std::function<void(const BSONObj&)>& uassertWriteStatusFn) {
auto hostAndPort = repl::ReplicationCoordinator::get(opCtx)->getCurrentPrimaryHostAndPort();
@@ -269,7 +269,7 @@ BSONObj executeWriteCommand(OperationContext* opCtx,
const NamespaceString& nss,
const BSONObj& cmdObj,
const std::function<void(const BSONObj&)>& uassertWriteStatusFn) {
- const auto dbName = nss.db().toString();
+ const auto dbName = nss.dbName();
auto numRetries = 0;
while (true) {
diff --git a/src/mongo/s/client/sharding_connection_hook.cpp b/src/mongo/s/client/sharding_connection_hook.cpp
index afeaca5ec50..be6498ccc6f 100644
--- a/src/mongo/s/client/sharding_connection_hook.cpp
+++ b/src/mongo/s/client/sharding_connection_hook.cpp
@@ -76,7 +76,8 @@ void ShardingConnectionHook::onCreate(DBClientBase* conn) {
if (conn->type() == ConnectionString::ConnectionType::kStandalone) {
BSONObj isMasterResponse;
- if (!conn->runCommand("admin", BSON("ismaster" << 1), isMasterResponse)) {
+ if (!conn->runCommand(
+ DatabaseName(boost::none, "admin"), BSON("ismaster" << 1), isMasterResponse)) {
uassertStatusOK(getStatusFromCommandResult(isMasterResponse));
}
diff --git a/src/mongo/scripting/mozjs/session.cpp b/src/mongo/scripting/mozjs/session.cpp
index 61623822cba..3a18c6c401c 100644
--- a/src/mongo/scripting/mozjs/session.cpp
+++ b/src/mongo/scripting/mozjs/session.cpp
@@ -128,14 +128,16 @@ void endSession(SessionHolder* holder) {
BSONObj abortObj = BSON("abortTransaction" << 1 << "lsid" << holder->lsid << "txnNumber"
<< holder->txnNumber << "autocommit" << false);
- [[maybe_unused]] auto ignored = holder->client->runCommand("admin", abortObj, out);
+ [[maybe_unused]] auto ignored =
+ holder->client->runCommand(DatabaseName(boost::none, "admin"), abortObj, out);
}
EndSessions es;
es.setEndSessions({holder->lsid});
- [[maybe_unused]] auto ignored = holder->client->runCommand("admin", es.toBSON(), out);
+ [[maybe_unused]] auto ignored =
+ holder->client->runCommand(DatabaseName(boost::none, "admin"), es.toBSON(), out);
holder->client.reset();
}
diff --git a/src/mongo/shell/bench.cpp b/src/mongo/shell/bench.cpp
index 95311481c83..d58509279b5 100644
--- a/src/mongo/shell/bench.cpp
+++ b/src/mongo/shell/bench.cpp
@@ -126,7 +126,8 @@ bool runCommandWithSession(DBClientBase* conn,
BSONObj* result) {
if (!lsid) {
invariant(!txnNumber);
- return conn->runCommand(dbname, cmdObj, *result);
+ // Shell is not tenant aware, so use boost::none here.
+ return conn->runCommand(DatabaseName(boost::none, dbname), cmdObj, *result);
}
BSONObjBuilder cmdObjWithLsidBuilder;
@@ -161,7 +162,9 @@ bool runCommandWithSession(DBClientBase* conn,
cmdObjWithLsidBuilder.append("startTransaction", true);
}
- return conn->runCommand(dbname, cmdObjWithLsidBuilder.done(), *result);
+ // Shell is not tenant aware, so use boost::none here.
+ return conn->runCommand(
+ DatabaseName(boost::none, dbname), cmdObjWithLsidBuilder.done(), *result);
}
bool runCommandWithSession(DBClientBase* conn,
@@ -929,7 +932,8 @@ void BenchRunWorker::generateLoadOnConnection(DBClientBase* conn) {
BSONObj result;
uassert(40640,
str::stream() << "Unable to create session due to error " << result,
- conn->runCommand("admin", BSON("startSession" << 1), result));
+ conn->runCommand(
+ DatabaseName(boost::none, "admin"), BSON("startSession" << 1), result));
lsid.emplace(LogicalSessionIdToClient::parse(IDLParserContext("lsid"), result["id"].Obj()));
}
diff --git a/src/mongo/shell/encrypted_dbclient_base.cpp b/src/mongo/shell/encrypted_dbclient_base.cpp
index a2e2ed037b0..74f124da8ac 100644
--- a/src/mongo/shell/encrypted_dbclient_base.cpp
+++ b/src/mongo/shell/encrypted_dbclient_base.cpp
@@ -575,7 +575,7 @@ void EncryptedDBClientBase::compact(JSContext* cx, JS::CallArgs args) {
efc ? FLEClientCrypto::generateCompactionTokens(*efc, this) : BSONObj());
BSONObj reply;
- runCommand(nss.db().toString(), builder.obj(), reply, 0);
+ runCommand(nss.dbName(), builder.obj(), reply, 0);
reply = reply.getOwned();
mozjs::ValueReader(cx, args.rval()).fromBSON(reply, nullptr, false);
}
diff --git a/src/mongo/shell/shell_utils.cpp b/src/mongo/shell/shell_utils.cpp
index f770137b2cd..698bb89292b 100644
--- a/src/mongo/shell/shell_utils.cpp
+++ b/src/mongo/shell/shell_utils.cpp
@@ -750,7 +750,7 @@ void ConnectionRegistry::registerConnection(DBClientBase& client, StringData uri
command = BSON("whatsmyuri" << 1);
}
- if (client.runCommand("admin", command, info)) {
+ if (client.runCommand({boost::none, "admin"}, command, info)) {
stdx::lock_guard<Latch> lk(_mutex);
_connectionUris[uri.toString()].insert(info["you"].str());
}
@@ -771,7 +771,7 @@ void ConnectionRegistry::killOperationsOnAllConnections(bool withPrompt) const {
const std::set<std::string>& uris = connection.second;
BSONObj currentOpRes;
- conn->runCommand("admin", BSON("currentOp" << 1), currentOpRes);
+ conn->runCommand({boost::none, "admin"}, BSON("currentOp" << 1), currentOpRes);
if (!currentOpRes["inprog"].isABSONObj()) {
// We don't have permissions (or the call didn't succeed) - go to the next connection.
continue;
@@ -807,7 +807,8 @@ void ConnectionRegistry::killOperationsOnAllConnections(bool withPrompt) const {
if (uris.count(client)) {
if (!withPrompt || prompter.confirm()) {
BSONObj info;
- conn->runCommand("admin", BSON("killOp" << 1 << "op" << op["opid"]), info);
+ conn->runCommand(
+ {boost::none, "admin"}, BSON("killOp" << 1 << "op" << op["opid"]), info);
} else {
return;
}
diff --git a/src/mongo/shell/shell_utils_launcher.cpp b/src/mongo/shell/shell_utils_launcher.cpp
index 5a60c5ed2e6..30a04df5a89 100644
--- a/src/mongo/shell/shell_utils_launcher.cpp
+++ b/src/mongo/shell/shell_utils_launcher.cpp
@@ -410,7 +410,7 @@ inline void kill_wrapper(ProcessId pid, int sig, int port, const BSONObj& opt) {
BSONObjBuilder b;
b.append("shutdown", 1);
b.append("force", 1);
- conn.runCommand("admin", b.done(), info);
+ conn.runCommand(DatabaseName(boost::none, "admin"), b.done(), info);
} catch (...) {
// Do nothing. This command never returns data to the client and the driver
// doesn't like that.