summaryrefslogtreecommitdiff
path: root/src/mongo/s
diff options
context:
space:
mode:
authorHugh Tong <hugh.tong@mongodb.com>2022-07-26 19:20:58 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-07-26 21:00:49 +0000
commit044d2f734e0dba40b9dfb02cc49c2bff8f575cd5 (patch)
treeeb2d17564a0c99d2ee9ea3d2f95c81c46bcfcee1 /src/mongo/s
parent921bba175902f9b9f29751a466383c3d7e80df7b (diff)
downloadmongo-044d2f734e0dba40b9dfb02cc49c2bff8f575cd5.tar.gz
SERVER-67824 Rename IDLParserErrorContext to IDLParserContext
Diffstat (limited to 'src/mongo/s')
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_impl.cpp8
-rw-r--r--src/mongo/s/catalog/type_chunk.cpp2
-rw-r--r--src/mongo/s/catalog/type_collection.cpp2
-rw-r--r--src/mongo/s/catalog/type_database_test.cpp10
-rw-r--r--src/mongo/s/chunk_version.cpp2
-rw-r--r--src/mongo/s/cluster_ddl.cpp6
-rw-r--r--src/mongo/s/commands/cluster_abort_transaction_cmd.h2
-rw-r--r--src/mongo/s/commands/cluster_analyze.cpp2
-rw-r--r--src/mongo/s/commands/cluster_balancer_collection_status_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_collection_mod_cmd.cpp4
-rw-r--r--src/mongo/s/commands/cluster_commit_transaction_cmd.h2
-rw-r--r--src/mongo/s/commands/cluster_count_cmd.cpp8
-rw-r--r--src/mongo/s/commands/cluster_create_indexes_cmd.cpp4
-rw-r--r--src/mongo/s/commands/cluster_drop_indexes_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_enable_sharding_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_explain_cmd.cpp4
-rw-r--r--src/mongo/s/commands/cluster_find_and_modify_cmd.cpp6
-rw-r--r--src/mongo/s/commands/cluster_list_collections_cmd.cpp5
-rw-r--r--src/mongo/s/commands/cluster_map_reduce_agg.cpp2
-rw-r--r--src/mongo/s/commands/cluster_move_primary_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_multicast_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_rename_collection_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_rwc_defaults_commands.cpp6
-rw-r--r--src/mongo/s/commands/cluster_shard_collection_cmd.cpp3
-rw-r--r--src/mongo/s/commands/cluster_user_management_commands.cpp2
-rw-r--r--src/mongo/s/commands/cluster_validate_db_metadata_cmd.cpp2
-rw-r--r--src/mongo/s/commands/internal_transactions_test_command.h2
-rw-r--r--src/mongo/s/database_version.h2
-rw-r--r--src/mongo/s/query/async_results_merger_test.cpp2
-rw-r--r--src/mongo/s/query/document_source_merge_cursors.cpp2
-rw-r--r--src/mongo/s/read_write_concern_defaults_cache_lookup_mongos.cpp4
-rw-r--r--src/mongo/s/request_types/merge_chunks_request_test.cpp2
-rw-r--r--src/mongo/s/shard_util.cpp2
33 files changed, 53 insertions, 57 deletions
diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
index 5941d8b3499..0e3e603a269 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
@@ -377,7 +377,7 @@ std::vector<DatabaseType> ShardingCatalogClientImpl::getAllDBs(OperationContext*
std::vector<DatabaseType> databases;
databases.reserve(dbs.size());
for (const BSONObj& doc : dbs) {
- databases.emplace_back(DatabaseType::parse(IDLParserErrorContext("DatabaseType"), doc));
+ databases.emplace_back(DatabaseType::parse(IDLParserContext("DatabaseType"), doc));
}
return databases;
@@ -409,8 +409,8 @@ StatusWith<repl::OpTimeWith<DatabaseType>> ShardingCatalogClientImpl::_fetchData
invariant(docsWithOpTime.value.size() == 1);
try {
- auto db = DatabaseType::parse(IDLParserErrorContext("DatabaseType"),
- docsWithOpTime.value.front());
+ auto db =
+ DatabaseType::parse(IDLParserContext("DatabaseType"), docsWithOpTime.value.front());
return repl::OpTimeWith<DatabaseType>(db, docsWithOpTime.opTime);
} catch (const DBException& e) {
return e.toStatus("Failed to parse DatabaseType");
@@ -1106,7 +1106,7 @@ StatusWith<std::vector<KeysCollectionDocument>> ShardingCatalogClientImpl::getNe
keys.reserve(keyDocs.size());
for (auto&& keyDoc : keyDocs) {
try {
- keys.push_back(KeysCollectionDocument::parse(IDLParserErrorContext("keyDoc"), keyDoc));
+ keys.push_back(KeysCollectionDocument::parse(IDLParserContext("keyDoc"), keyDoc));
} catch (...) {
return exceptionToStatus();
}
diff --git a/src/mongo/s/catalog/type_chunk.cpp b/src/mongo/s/catalog/type_chunk.cpp
index 7ed4cc739cc..3ad4047d3c1 100644
--- a/src/mongo/s/catalog/type_chunk.cpp
+++ b/src/mongo/s/catalog/type_chunk.cpp
@@ -193,7 +193,7 @@ StatusWith<std::vector<ChunkHistory>> ChunkHistory::fromBSON(const BSONArray& so
for (const auto& arrayElement : source) {
if (arrayElement.type() == Object) {
- IDLParserErrorContext tempContext("chunk history array");
+ IDLParserContext tempContext("chunk history array");
values.emplace_back(ChunkHistoryBase::parse(tempContext, arrayElement.Obj()));
} else {
return {ErrorCodes::BadValue,
diff --git a/src/mongo/s/catalog/type_collection.cpp b/src/mongo/s/catalog/type_collection.cpp
index 5cc64eca8b5..86854f7a257 100644
--- a/src/mongo/s/catalog/type_collection.cpp
+++ b/src/mongo/s/catalog/type_collection.cpp
@@ -59,7 +59,7 @@ CollectionType::CollectionType(NamespaceString nss,
}
CollectionType::CollectionType(const BSONObj& obj) {
- CollectionType::parseProtected(IDLParserErrorContext("CollectionType"), obj);
+ CollectionType::parseProtected(IDLParserContext("CollectionType"), obj);
invariant(getTimestamp() != Timestamp(0, 0));
uassert(ErrorCodes::BadValue,
str::stream() << "Invalid namespace " << getNss(),
diff --git a/src/mongo/s/catalog/type_database_test.cpp b/src/mongo/s/catalog/type_database_test.cpp
index 73197980f55..f81016bae22 100644
--- a/src/mongo/s/catalog/type_database_test.cpp
+++ b/src/mongo/s/catalog/type_database_test.cpp
@@ -42,7 +42,7 @@ using std::string;
TEST(DatabaseType, Empty) {
// Constructing from empty BSON must fails
- ASSERT_THROWS(DatabaseType::parse(IDLParserErrorContext("DatabaseType"), BSONObj()),
+ ASSERT_THROWS(DatabaseType::parse(IDLParserContext("DatabaseType"), BSONObj()),
AssertionException);
}
@@ -55,7 +55,7 @@ TEST(DatabaseType, Basic) {
<< DatabaseType::kShardedFieldName << true << DatabaseType::kVersionFieldName
<< BSON("uuid" << uuid << "lastMod" << 0 << "timestamp" << timestamp));
- const auto db = DatabaseType::parse(IDLParserErrorContext("DatabaseType"), dbObj);
+ const auto db = DatabaseType::parse(IDLParserContext("DatabaseType"), dbObj);
ASSERT_EQUALS(db.getName(), "mydb");
ASSERT_EQUALS(db.getPrimary(), "shard");
ASSERT_TRUE(db.getSharded());
@@ -66,15 +66,13 @@ TEST(DatabaseType, Basic) {
TEST(DatabaseType, BadType) {
// Cosntructing from an BSON object with a malformed database must fails
const auto dbObj = BSON(DatabaseType::kNameFieldName << 0);
- ASSERT_THROWS(DatabaseType::parse(IDLParserErrorContext("DatabaseType"), dbObj),
- AssertionException);
+ ASSERT_THROWS(DatabaseType::parse(IDLParserContext("DatabaseType"), dbObj), AssertionException);
}
TEST(DatabaseType, MissingRequired) {
// Cosntructing from an BSON object without all the required fields must fails
const auto dbObj = BSON(DatabaseType::kNameFieldName << "mydb");
- ASSERT_THROWS(DatabaseType::parse(IDLParserErrorContext("DatabaseType"), dbObj),
- AssertionException);
+ ASSERT_THROWS(DatabaseType::parse(IDLParserContext("DatabaseType"), dbObj), AssertionException);
}
} // unnamed namespace
diff --git a/src/mongo/s/chunk_version.cpp b/src/mongo/s/chunk_version.cpp
index ca260776a34..88d7833f10e 100644
--- a/src/mongo/s/chunk_version.cpp
+++ b/src/mongo/s/chunk_version.cpp
@@ -58,7 +58,7 @@ std::string CollectionGeneration::toString() const {
ChunkVersion ChunkVersion::parse(const BSONElement& element) {
auto parsedVersion =
- ChunkVersion60Format::parse(IDLParserErrorContext("ChunkVersion"), element.Obj());
+ ChunkVersion60Format::parse(IDLParserContext("ChunkVersion"), element.Obj());
auto version = parsedVersion.getVersion();
return ChunkVersion({parsedVersion.getEpoch(), parsedVersion.getTimestamp()},
{version.getSecs(), version.getInc()});
diff --git a/src/mongo/s/cluster_ddl.cpp b/src/mongo/s/cluster_ddl.cpp
index 5010575774f..99d186fef90 100644
--- a/src/mongo/s/cluster_ddl.cpp
+++ b/src/mongo/s/cluster_ddl.cpp
@@ -109,7 +109,7 @@ CachedDatabaseInfo createDatabase(OperationContext* opCtx,
<< "Database " << dbName << " could not be created");
auto createDbResponse = ConfigsvrCreateDatabaseResponse::parse(
- IDLParserErrorContext("configsvrCreateDatabaseResponse"), response.response);
+ IDLParserContext("configsvrCreateDatabaseResponse"), response.response);
catalogCache->onStaleDatabaseVersion(dbName, createDbResponse.getDatabaseVersion());
dbStatus = catalogCache->getDatabase(opCtx, dbName);
@@ -133,8 +133,8 @@ void createCollection(OperationContext* opCtx, const ShardsvrCreateCollection& r
const auto remoteResponse = uassertStatusOK(cmdResponse.swResponse);
uassertStatusOK(getStatusFromCommandResult(remoteResponse.data));
- auto createCollResp = CreateCollectionResponse::parse(IDLParserErrorContext("createCollection"),
- remoteResponse.data);
+ auto createCollResp =
+ CreateCollectionResponse::parse(IDLParserContext("createCollection"), remoteResponse.data);
auto catalogCache = Grid::get(opCtx)->catalogCache();
catalogCache->invalidateShardOrEntireCollectionEntryForShardedCollection(
diff --git a/src/mongo/s/commands/cluster_abort_transaction_cmd.h b/src/mongo/s/commands/cluster_abort_transaction_cmd.h
index 04c70945e35..734e3363f90 100644
--- a/src/mongo/s/commands/cluster_abort_transaction_cmd.h
+++ b/src/mongo/s/commands/cluster_abort_transaction_cmd.h
@@ -56,7 +56,7 @@ public:
typename BasicCommandWithRequestParser<ClusterAbortTransactionCmdBase<Impl>>::RequestParser;
void validateResult(const BSONObj& resultObj) final {
- auto ctx = IDLParserErrorContext("AbortReply");
+ auto ctx = IDLParserContext("AbortReply");
if (!BaseType::checkIsErrorStatus(resultObj, ctx)) {
// Will throw if the result doesn't match the abortReply.
Reply::parse(ctx, resultObj);
diff --git a/src/mongo/s/commands/cluster_analyze.cpp b/src/mongo/s/commands/cluster_analyze.cpp
index 4eca4a1238f..2575982722d 100644
--- a/src/mongo/s/commands/cluster_analyze.cpp
+++ b/src/mongo/s/commands/cluster_analyze.cpp
@@ -129,7 +129,7 @@ public:
}
void validateResult(const BSONObj& result) final {
- auto ctx = IDLParserErrorContext("AnalyzeCommandReply");
+ auto ctx = IDLParserContext("AnalyzeCommandReply");
if (checkIsErrorStatus(result, ctx)) {
return;
}
diff --git a/src/mongo/s/commands/cluster_balancer_collection_status_cmd.cpp b/src/mongo/s/commands/cluster_balancer_collection_status_cmd.cpp
index 50e654f5008..f0903d3894c 100644
--- a/src/mongo/s/commands/cluster_balancer_collection_status_cmd.cpp
+++ b/src/mongo/s/commands/cluster_balancer_collection_status_cmd.cpp
@@ -77,7 +77,7 @@ public:
uassertStatusOK(cmdResponse.commandStatus);
- return Response::parse(IDLParserErrorContext("BalancerCollectionStatusResponse"),
+ return Response::parse(IDLParserContext("BalancerCollectionStatusResponse"),
cmdResponse.response);
}
diff --git a/src/mongo/s/commands/cluster_collection_mod_cmd.cpp b/src/mongo/s/commands/cluster_collection_mod_cmd.cpp
index 38e62973687..a1b8cb4a250 100644
--- a/src/mongo/s/commands/cluster_collection_mod_cmd.cpp
+++ b/src/mongo/s/commands/cluster_collection_mod_cmd.cpp
@@ -127,7 +127,7 @@ public:
}
void validateResult(const BSONObj& resultObj) final {
- auto ctx = IDLParserErrorContext("CollModReply");
+ auto ctx = IDLParserContext("CollModReply");
if (checkIsErrorStatus(resultObj, ctx)) {
return;
}
@@ -148,7 +148,7 @@ public:
return;
}
- auto rawCtx = IDLParserErrorContext(kRawFieldName, &ctx);
+ auto rawCtx = IDLParserContext(kRawFieldName, &ctx);
for (const auto& element : rawData.Obj()) {
if (!rawCtx.checkAndAssertType(element, Object)) {
return;
diff --git a/src/mongo/s/commands/cluster_commit_transaction_cmd.h b/src/mongo/s/commands/cluster_commit_transaction_cmd.h
index 72d21d6111e..926c2bbe6eb 100644
--- a/src/mongo/s/commands/cluster_commit_transaction_cmd.h
+++ b/src/mongo/s/commands/cluster_commit_transaction_cmd.h
@@ -55,7 +55,7 @@ public:
ClusterCommitTransactionCmdBase<Impl>>::RequestParser;
void validateResult(const BSONObj& resultObj) final {
- auto ctx = IDLParserErrorContext("CommitReply");
+ auto ctx = IDLParserContext("CommitReply");
if (!BaseType::checkIsErrorStatus(resultObj, ctx)) {
// Will throw if the result doesn't match the commitReply.
Reply::parse(ctx, resultObj);
diff --git a/src/mongo/s/commands/cluster_count_cmd.cpp b/src/mongo/s/commands/cluster_count_cmd.cpp
index 539717d9de7..bfb24f62ce4 100644
--- a/src/mongo/s/commands/cluster_count_cmd.cpp
+++ b/src/mongo/s/commands/cluster_count_cmd.cpp
@@ -100,7 +100,7 @@ public:
std::vector<AsyncRequestsSender::Response> shardResponses;
try {
- auto countRequest = CountCommandRequest::parse(IDLParserErrorContext("count"), cmdObj);
+ auto countRequest = CountCommandRequest::parse(IDLParserContext("count"), cmdObj);
if (shouldDoFLERewrite(countRequest)) {
processFLECountS(opCtx, nss, &countRequest);
}
@@ -134,7 +134,7 @@ public:
collation);
} catch (const ExceptionFor<ErrorCodes::CommandOnShardedViewNotSupportedOnMongod>& ex) {
// Rewrite the count command as an aggregation.
- auto countRequest = CountCommandRequest::parse(IDLParserErrorContext("count"), cmdObj);
+ auto countRequest = CountCommandRequest::parse(IDLParserContext("count"), cmdObj);
auto aggCmdOnView =
uassertStatusOK(countCommandAsAggregationCommand(countRequest, nss));
auto aggCmdOnViewObj = OpMsgRequest::fromDBAndBody(nss.db(), aggCmdOnView).body;
@@ -200,7 +200,7 @@ public:
CountCommandRequest countRequest(NamespaceStringOrUUID(NamespaceString{}));
try {
- countRequest = CountCommandRequest::parse(IDLParserErrorContext("count"), request);
+ countRequest = CountCommandRequest::parse(IDLParserContext("count"), request);
} catch (...) {
return exceptionToStatus();
}
@@ -240,7 +240,7 @@ public:
} catch (const ExceptionFor<ErrorCodes::CommandOnShardedViewNotSupportedOnMongod>& ex) {
CountCommandRequest countRequest(NamespaceStringOrUUID(NamespaceString{}));
try {
- countRequest = CountCommandRequest::parse(IDLParserErrorContext("count"), cmdObj);
+ countRequest = CountCommandRequest::parse(IDLParserContext("count"), cmdObj);
} catch (...) {
return exceptionToStatus();
}
diff --git a/src/mongo/s/commands/cluster_create_indexes_cmd.cpp b/src/mongo/s/commands/cluster_create_indexes_cmd.cpp
index 9b2246e571b..ef3aacd159c 100644
--- a/src/mongo/s/commands/cluster_create_indexes_cmd.cpp
+++ b/src/mongo/s/commands/cluster_create_indexes_cmd.cpp
@@ -140,7 +140,7 @@ public:
* 'code' & 'codeName' are permitted in either scenario, but non-zero 'code' indicates "not ok".
*/
void validateResult(const BSONObj& result) final {
- auto ctx = IDLParserErrorContext("createIndexesReply");
+ auto ctx = IDLParserContext("createIndexesReply");
if (checkIsErrorStatus(result, ctx)) {
return;
}
@@ -159,7 +159,7 @@ public:
return;
}
- auto rawCtx = IDLParserErrorContext(kRawFieldName, &ctx);
+ auto rawCtx = IDLParserContext(kRawFieldName, &ctx);
for (const auto& element : rawData.Obj()) {
if (!rawCtx.checkAndAssertType(element, Object)) {
return;
diff --git a/src/mongo/s/commands/cluster_drop_indexes_cmd.cpp b/src/mongo/s/commands/cluster_drop_indexes_cmd.cpp
index a4fbf12625e..45b20abae15 100644
--- a/src/mongo/s/commands/cluster_drop_indexes_cmd.cpp
+++ b/src/mongo/s/commands/cluster_drop_indexes_cmd.cpp
@@ -71,7 +71,7 @@ public:
}
void validateResult(const BSONObj& resultObj) final {
- auto ctx = IDLParserErrorContext("DropIndexesReply");
+ auto ctx = IDLParserContext("DropIndexesReply");
if (!checkIsErrorStatus(resultObj, ctx)) {
Reply::parse(ctx, resultObj.removeField(kRawFieldName));
if (resultObj.hasField(kRawFieldName)) {
diff --git a/src/mongo/s/commands/cluster_enable_sharding_cmd.cpp b/src/mongo/s/commands/cluster_enable_sharding_cmd.cpp
index c4fadfc733d..ac5f87a6d85 100644
--- a/src/mongo/s/commands/cluster_enable_sharding_cmd.cpp
+++ b/src/mongo/s/commands/cluster_enable_sharding_cmd.cpp
@@ -90,7 +90,7 @@ public:
uassertStatusOK(response.writeConcernStatus);
auto createDbResponse = ConfigsvrCreateDatabaseResponse::parse(
- IDLParserErrorContext("configsvrCreateDatabaseResponse"), response.response);
+ IDLParserContext("configsvrCreateDatabaseResponse"), response.response);
catalogCache->onStaleDatabaseVersion(dbName, createDbResponse.getDatabaseVersion());
purgeDatabaseOnExit.dismiss();
}
diff --git a/src/mongo/s/commands/cluster_explain_cmd.cpp b/src/mongo/s/commands/cluster_explain_cmd.cpp
index 00ed9bfd521..8b8f03db407 100644
--- a/src/mongo/s/commands/cluster_explain_cmd.cpp
+++ b/src/mongo/s/commands/cluster_explain_cmd.cpp
@@ -167,8 +167,8 @@ std::unique_ptr<CommandInvocation> ClusterExplainCmd::parse(OperationContext* op
// To enforce API versioning
auto cmdObj = ExplainCommandRequest::parse(
- IDLParserErrorContext(ExplainCommandRequest::kCommandName,
- APIParameters::get(opCtx).getAPIStrict().value_or(false)),
+ IDLParserContext(ExplainCommandRequest::kCommandName,
+ APIParameters::get(opCtx).getAPIStrict().value_or(false)),
request.body);
std::string dbName = cmdObj.getDbName().toString();
ExplainOptions::Verbosity verbosity = cmdObj.getVerbosity();
diff --git a/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp b/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp
index afa76fd80cd..9d509016a82 100644
--- a/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp
+++ b/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp
@@ -119,7 +119,7 @@ boost::optional<BSONObj> getLet(const BSONObj& cmdObj) {
boost::optional<LegacyRuntimeConstants> getLegacyRuntimeConstants(const BSONObj& cmdObj) {
if (auto rcElem = cmdObj.getField("runtimeConstants"_sd); rcElem.type() == BSONType::Object) {
- IDLParserErrorContext ctx("internalLegacyRuntimeConstants");
+ IDLParserContext ctx("internalLegacyRuntimeConstants");
return LegacyRuntimeConstants::parse(ctx, rcElem.embeddedObject());
}
return boost::none;
@@ -375,7 +375,7 @@ public:
const BSONObj& cmdObj = [&]() {
// Check whether the query portion needs to be rewritten for FLE.
auto findAndModifyRequest = write_ops::FindAndModifyCommandRequest::parse(
- IDLParserErrorContext("ClusterFindAndModify"), request.body);
+ IDLParserContext("ClusterFindAndModify"), request.body);
if (shouldDoFLERewrite(findAndModifyRequest)) {
auto newRequest = processFLEFindAndModifyExplainMongos(opCtx, findAndModifyRequest);
return newRequest.first.toBSON(request.body);
@@ -565,7 +565,7 @@ private:
if (feature_flags::gFeatureFlagUpdateDocumentShardKeyUsingTransactionApi.isEnabled(
serverGlobalParams.featureCompatibility)) {
auto parsedRequest = write_ops::FindAndModifyCommandRequest::parse(
- IDLParserErrorContext("ClusterFindAndModify"), cmdObj);
+ IDLParserContext("ClusterFindAndModify"), cmdObj);
// Strip write concern because this command will be sent as part of a
// transaction and the write concern has already been loaded onto the opCtx and
// will be picked up by the transaction API.
diff --git a/src/mongo/s/commands/cluster_list_collections_cmd.cpp b/src/mongo/s/commands/cluster_list_collections_cmd.cpp
index 64093b61191..0f1a1eab2f2 100644
--- a/src/mongo/s/commands/cluster_list_collections_cmd.cpp
+++ b/src/mongo/s/commands/cluster_list_collections_cmd.cpp
@@ -179,8 +179,7 @@ BSONObj rewriteCommandForListingOwnCollections(OperationContext* opCtx,
// testing because an error while parsing indicates an internal error, not something that should
// surface to a user.
if (getTestCommandsEnabled()) {
- ListCollections::parse(IDLParserErrorContext("ListCollectionsForOwnCollections"),
- rewrittenCmd);
+ ListCollections::parse(IDLParserContext("ListCollectionsForOwnCollections"), rewrittenCmd);
}
return rewrittenCmd;
@@ -258,7 +257,7 @@ public:
void validateResult(const BSONObj& result) final {
StringDataSet ignorableFields({ErrorReply::kOkFieldName});
- ListCollectionsReply::parse(IDLParserErrorContext("ListCollectionsReply"),
+ ListCollectionsReply::parse(IDLParserContext("ListCollectionsReply"),
result.removeFields(ignorableFields));
}
diff --git a/src/mongo/s/commands/cluster_map_reduce_agg.cpp b/src/mongo/s/commands/cluster_map_reduce_agg.cpp
index 2ea18a9500e..d25acff8bc7 100644
--- a/src/mongo/s/commands/cluster_map_reduce_agg.cpp
+++ b/src/mongo/s/commands/cluster_map_reduce_agg.cpp
@@ -148,7 +148,7 @@ bool runAggregationMapReduce(OperationContext* opCtx,
const BSONObj& cmd,
BSONObjBuilder& result,
boost::optional<ExplainOptions::Verbosity> verbosity) {
- auto parsedMr = MapReduceCommandRequest::parse(IDLParserErrorContext("mapReduce"), cmd);
+ auto parsedMr = MapReduceCommandRequest::parse(IDLParserContext("mapReduce"), cmd);
stdx::unordered_set<NamespaceString> involvedNamespaces{parsedMr.getNamespace()};
auto hasOutDB = parsedMr.getOutOptions().getDatabaseName();
auto resolvedOutNss = NamespaceString{hasOutDB ? *hasOutDB : parsedMr.getNamespace().db(),
diff --git a/src/mongo/s/commands/cluster_move_primary_cmd.cpp b/src/mongo/s/commands/cluster_move_primary_cmd.cpp
index c24272ef7ac..3c1c48cb609 100644
--- a/src/mongo/s/commands/cluster_move_primary_cmd.cpp
+++ b/src/mongo/s/commands/cluster_move_primary_cmd.cpp
@@ -93,7 +93,7 @@ public:
const std::string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder& result) {
- auto request = MovePrimary::parse(IDLParserErrorContext("MovePrimary"), cmdObj);
+ auto request = MovePrimary::parse(IDLParserContext("MovePrimary"), cmdObj);
const string db = parseNs("", cmdObj);
const StringData toShard(request.getTo());
diff --git a/src/mongo/s/commands/cluster_multicast_cmd.cpp b/src/mongo/s/commands/cluster_multicast_cmd.cpp
index 834a786769f..4a38364c63b 100644
--- a/src/mongo/s/commands/cluster_multicast_cmd.cpp
+++ b/src/mongo/s/commands/cluster_multicast_cmd.cpp
@@ -91,7 +91,7 @@ public:
const std::string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder& result) override {
- IDLParserErrorContext ctx("ClusterMulticast");
+ IDLParserContext ctx("ClusterMulticast");
auto args = ClusterMulticast::parse(ctx, cmdObj);
// Grab an arbitrary executor.
diff --git a/src/mongo/s/commands/cluster_rename_collection_cmd.cpp b/src/mongo/s/commands/cluster_rename_collection_cmd.cpp
index 1df93f9458e..b109fdcc556 100644
--- a/src/mongo/s/commands/cluster_rename_collection_cmd.cpp
+++ b/src/mongo/s/commands/cluster_rename_collection_cmd.cpp
@@ -125,7 +125,7 @@ public:
uassertStatusOK(cmdResponse.commandStatus);
auto renameCollResp = RenameCollectionResponse::parse(
- IDLParserErrorContext("renameCollection"), cmdResponse.response);
+ IDLParserContext("renameCollection"), cmdResponse.response);
catalogCache->invalidateShardOrEntireCollectionEntryForShardedCollection(
toNss, renameCollResp.getCollectionVersion(), dbInfo->getPrimary());
diff --git a/src/mongo/s/commands/cluster_rwc_defaults_commands.cpp b/src/mongo/s/commands/cluster_rwc_defaults_commands.cpp
index 8db311e9c0b..58d6c63891a 100644
--- a/src/mongo/s/commands/cluster_rwc_defaults_commands.cpp
+++ b/src/mongo/s/commands/cluster_rwc_defaults_commands.cpp
@@ -71,8 +71,8 @@ public:
uassertStatusOK(cmdResponse.writeConcernStatus);
// Quickly pick up the new defaults by setting them in the cache.
- auto newDefaults = RWConcernDefault::parse(
- IDLParserErrorContext("ClusterSetDefaultRWConcern"), cmdResponse.response);
+ auto newDefaults = RWConcernDefault::parse(IDLParserContext("ClusterSetDefaultRWConcern"),
+ cmdResponse.response);
if (auto optWC = newDefaults.getDefaultWriteConcern()) {
if (optWC->hasCustomWriteMode()) {
LOGV2_WARNING(
@@ -155,7 +155,7 @@ public:
uassertStatusOK(cmdResponse.commandStatus);
return GetDefaultRWConcernResponse::parse(
- IDLParserErrorContext("ClusterGetDefaultRWConcernResponse"), cmdResponse.response);
+ IDLParserContext("ClusterGetDefaultRWConcernResponse"), cmdResponse.response);
}
private:
diff --git a/src/mongo/s/commands/cluster_shard_collection_cmd.cpp b/src/mongo/s/commands/cluster_shard_collection_cmd.cpp
index 4741bebba5e..f5245136e2c 100644
--- a/src/mongo/s/commands/cluster_shard_collection_cmd.cpp
+++ b/src/mongo/s/commands/cluster_shard_collection_cmd.cpp
@@ -96,8 +96,7 @@ public:
"Sharding a Queryable Encryption state collection is not allowed",
!nss.isFLE2StateCollection());
- auto shardCollRequest =
- ShardCollection::parse(IDLParserErrorContext("ShardCollection"), cmdObj);
+ auto shardCollRequest = ShardCollection::parse(IDLParserContext("ShardCollection"), cmdObj);
ShardsvrCreateCollection shardsvrCollRequest(nss);
CreateCollectionRequest requestParamsObj;
diff --git a/src/mongo/s/commands/cluster_user_management_commands.cpp b/src/mongo/s/commands/cluster_user_management_commands.cpp
index 210d1e20b00..f6384772b80 100644
--- a/src/mongo/s/commands/cluster_user_management_commands.cpp
+++ b/src/mongo/s/commands/cluster_user_management_commands.cpp
@@ -77,7 +77,7 @@ void uassertEmptyReply(BSONObj obj) {
template <typename Request, typename Reply>
Reply parseUMCReply(BSONObj obj) try {
- return Reply::parse(IDLParserErrorContext(Request::kCommandName), obj);
+ return Reply::parse(IDLParserContext(Request::kCommandName), obj);
} catch (const AssertionException& ex) {
uasserted(ex.code(),
"Received invalid response from {} command: {}, error: {}"_format(
diff --git a/src/mongo/s/commands/cluster_validate_db_metadata_cmd.cpp b/src/mongo/s/commands/cluster_validate_db_metadata_cmd.cpp
index 9af5f532a74..ea2e17ee350 100644
--- a/src/mongo/s/commands/cluster_validate_db_metadata_cmd.cpp
+++ b/src/mongo/s/commands/cluster_validate_db_metadata_cmd.cpp
@@ -105,7 +105,7 @@ public:
"The array element in 'apiVersionErrors' should be object",
error.type() == Object);
ErrorReplyElement apiVersionError = ErrorReplyElement::parse(
- IDLParserErrorContext("ErrorReplyElement"), error.Obj());
+ IDLParserContext("ErrorReplyElement"), error.Obj());
// Ensure that the final output doesn't exceed max BSON size.
apiVersionError.setShard(StringData(shardRes.shardId.toString()));
diff --git a/src/mongo/s/commands/internal_transactions_test_command.h b/src/mongo/s/commands/internal_transactions_test_command.h
index bbbea80de38..a605722c7f5 100644
--- a/src/mongo/s/commands/internal_transactions_test_command.h
+++ b/src/mongo/s/commands/internal_transactions_test_command.h
@@ -91,7 +91,7 @@ public:
// from the command to append $db, which FindCommandRequest expects.
auto findOpMsgRequest = OpMsgRequest::fromDBAndBody(dbName, command);
auto findCommand = FindCommandRequest::parse(
- IDLParserErrorContext("FindCommandRequest", false /* apiStrict */),
+ IDLParserContext("FindCommandRequest", false /* apiStrict */),
findOpMsgRequest.body);
auto docs = txnClient.exhaustiveFind(findCommand).get();
diff --git a/src/mongo/s/database_version.h b/src/mongo/s/database_version.h
index 5de01497318..3868c3b370b 100644
--- a/src/mongo/s/database_version.h
+++ b/src/mongo/s/database_version.h
@@ -54,7 +54,7 @@ public:
DatabaseVersion() = default;
explicit DatabaseVersion(const BSONObj& obj) {
- DatabaseVersionBase::parseProtected(IDLParserErrorContext("DatabaseVersion"), obj);
+ DatabaseVersionBase::parseProtected(IDLParserContext("DatabaseVersion"), obj);
}
explicit DatabaseVersion(const DatabaseVersionBase& dbv) : DatabaseVersionBase(dbv) {}
diff --git a/src/mongo/s/query/async_results_merger_test.cpp b/src/mongo/s/query/async_results_merger_test.cpp
index e20c06538f9..ef7817b0961 100644
--- a/src/mongo/s/query/async_results_merger_test.cpp
+++ b/src/mongo/s/query/async_results_merger_test.cpp
@@ -50,7 +50,7 @@ namespace mongo {
namespace {
LogicalSessionId parseSessionIdFromCmd(BSONObj cmdObj) {
- return LogicalSessionId::parse(IDLParserErrorContext("lsid"), cmdObj["lsid"].Obj());
+ return LogicalSessionId::parse(IDLParserContext("lsid"), cmdObj["lsid"].Obj());
}
BSONObj makePostBatchResumeToken(Timestamp clusterTime) {
diff --git a/src/mongo/s/query/document_source_merge_cursors.cpp b/src/mongo/s/query/document_source_merge_cursors.cpp
index c6f8f3fbaf6..b2881d81415 100644
--- a/src/mongo/s/query/document_source_merge_cursors.cpp
+++ b/src/mongo/s/query/document_source_merge_cursors.cpp
@@ -129,7 +129,7 @@ boost::intrusive_ptr<DocumentSource> DocumentSourceMergeCursors::createFromBson(
"$mergeCursors stage expected an object as argument",
elem.type() == BSONType::Object);
auto ownedObj = elem.embeddedObject().getOwned();
- auto armParams = AsyncResultsMergerParams::parse(IDLParserErrorContext(kStageName), ownedObj);
+ auto armParams = AsyncResultsMergerParams::parse(IDLParserContext(kStageName), ownedObj);
return new DocumentSourceMergeCursors(expCtx, std::move(armParams), std::move(ownedObj));
}
diff --git a/src/mongo/s/read_write_concern_defaults_cache_lookup_mongos.cpp b/src/mongo/s/read_write_concern_defaults_cache_lookup_mongos.cpp
index 7a2c6b5b737..a506a7c30c0 100644
--- a/src/mongo/s/read_write_concern_defaults_cache_lookup_mongos.cpp
+++ b/src/mongo/s/read_write_concern_defaults_cache_lookup_mongos.cpp
@@ -51,8 +51,8 @@ boost::optional<RWConcernDefault> readWriteConcernDefaultsCacheLookupMongoS(
uassertStatusOK(cmdResponse.commandStatus);
- return RWConcernDefault::parse(
- IDLParserErrorContext("readWriteConcernDefaultsCacheLookupMongoS"), cmdResponse.response);
+ return RWConcernDefault::parse(IDLParserContext("readWriteConcernDefaultsCacheLookupMongoS"),
+ cmdResponse.response);
}
} // namespace mongo
diff --git a/src/mongo/s/request_types/merge_chunks_request_test.cpp b/src/mongo/s/request_types/merge_chunks_request_test.cpp
index b631ca1dffa..ff7cac5d91c 100644
--- a/src/mongo/s/request_types/merge_chunks_request_test.cpp
+++ b/src/mongo/s/request_types/merge_chunks_request_test.cpp
@@ -40,7 +40,7 @@ namespace {
using unittest::assertGet;
ChunkRange chunkRange(BSON("a" << 1), BSON("a" << 10));
-IDLParserErrorContext ctx("_configsvrCommitChunksMerge");
+IDLParserContext ctx("_configsvrCommitChunksMerge");
TEST(ConfigSvrMergeChunks, BasicValidConfigCommand) {
auto collUUID = UUID::gen();
diff --git a/src/mongo/s/shard_util.cpp b/src/mongo/s/shard_util.cpp
index 2824eabacc2..77c0ec29481 100644
--- a/src/mongo/s/shard_util.cpp
+++ b/src/mongo/s/shard_util.cpp
@@ -156,7 +156,7 @@ StatusWith<std::vector<BSONObj>> selectChunkSplitPoints(OperationContext* opCtx,
}
const auto response = AutoSplitVectorResponse::parse(
- IDLParserErrorContext("AutoSplitVectorResponse"), std::move(cmdStatus.getValue().response));
+ IDLParserContext("AutoSplitVectorResponse"), std::move(cmdStatus.getValue().response));
return response.getSplitKeys();
}