summaryrefslogtreecommitdiff
path: root/src/mongo/db/s
diff options
context:
space:
mode:
authorHugh Tong <hugh.tong@mongodb.com>2022-07-26 19:20:58 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-07-26 21:00:49 +0000
commit044d2f734e0dba40b9dfb02cc49c2bff8f575cd5 (patch)
treeeb2d17564a0c99d2ee9ea3d2f95c81c46bcfcee1 /src/mongo/db/s
parent921bba175902f9b9f29751a466383c3d7e80df7b (diff)
downloadmongo-044d2f734e0dba40b9dfb02cc49c2bff8f575cd5.tar.gz
SERVER-67824 Rename IDLParserErrorContext to IDLParserContext
Diffstat (limited to 'src/mongo/db/s')
-rw-r--r--src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp2
-rw-r--r--src/mongo/db/s/balancer/balancer_commands_scheduler_impl.cpp2
-rw-r--r--src/mongo/db/s/balancer/balancer_defragmentation_policy_test.cpp2
-rw-r--r--src/mongo/db/s/balancer/type_migration.cpp4
-rw-r--r--src/mongo/db/s/clone_catalog_data_command.cpp2
-rw-r--r--src/mongo/db/s/collmod_coordinator.cpp2
-rw-r--r--src/mongo/db/s/config/config_server_test_fixture.cpp2
-rw-r--r--src/mongo/db/s/config/configsvr_coordinator.cpp4
-rw-r--r--src/mongo/db/s/config/set_cluster_parameter_coordinator.cpp4
-rw-r--r--src/mongo/db/s/config/set_cluster_parameter_coordinator.h2
-rw-r--r--src/mongo/db/s/config/set_user_write_block_mode_coordinator.cpp4
-rw-r--r--src/mongo/db/s/config/set_user_write_block_mode_coordinator.h2
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp4
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp4
-rw-r--r--src/mongo/db/s/create_collection_coordinator.cpp4
-rw-r--r--src/mongo/db/s/forwardable_operation_metadata.cpp2
-rw-r--r--src/mongo/db/s/migration_util.cpp2
-rw-r--r--src/mongo/db/s/move_primary_coordinator.cpp2
-rw-r--r--src/mongo/db/s/move_primary_source_manager.cpp2
-rw-r--r--src/mongo/db/s/persistent_task_queue.h2
-rw-r--r--src/mongo/db/s/persistent_task_queue_test.cpp2
-rw-r--r--src/mongo/db/s/recoverable_critical_section_service.cpp6
-rw-r--r--src/mongo/db/s/refine_collection_shard_key_coordinator.cpp2
-rw-r--r--src/mongo/db/s/rename_collection_coordinator.cpp2
-rw-r--r--src/mongo/db/s/rename_collection_participant_service.cpp2
-rw-r--r--src/mongo/db/s/rename_collection_participant_service.h2
-rw-r--r--src/mongo/db/s/reshard_collection_coordinator.cpp2
-rw-r--r--src/mongo/db/s/resharding/document_source_resharding_iterate_transaction.cpp3
-rw-r--r--src/mongo/db/s/resharding/resharding_agg_test.cpp4
-rw-r--r--src/mongo/db/s/resharding/resharding_coordinator_service.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp6
-rw-r--r--src/mongo/db/s/resharding/resharding_coordinator_test.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp6
-rw-r--r--src/mongo/db/s/resharding/resharding_donor_oplog_iterator.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_donor_service_test.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_op_observer.cpp4
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_applier.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_recipient_service.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_recipient_service_test.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp8
-rw-r--r--src/mongo/db/s/session_catalog_migration_source.cpp6
-rw-r--r--src/mongo/db/s/set_allow_migrations_coordinator.cpp2
-rw-r--r--src/mongo/db/s/shard_metadata_util.cpp2
-rw-r--r--src/mongo/db/s/shard_server_op_observer.cpp14
-rw-r--r--src/mongo/db/s/sharding_ddl_coordinator.cpp4
-rw-r--r--src/mongo/db/s/sharding_ddl_coordinator.h2
-rw-r--r--src/mongo/db/s/shardsvr_collmod_command.cpp2
-rw-r--r--src/mongo/db/s/shardsvr_collmod_participant_command.cpp2
-rw-r--r--src/mongo/db/s/shardsvr_move_primary_command.cpp2
-rw-r--r--src/mongo/db/s/start_chunk_clone_request.cpp2
-rw-r--r--src/mongo/db/s/transaction_coordinator_structures_test.cpp2
-rw-r--r--src/mongo/db/s/transaction_coordinator_test.cpp2
-rw-r--r--src/mongo/db/s/transaction_coordinator_util.cpp2
-rw-r--r--src/mongo/db/s/type_shard_collection.cpp2
-rw-r--r--src/mongo/db/s/type_shard_identity.cpp2
-rw-r--r--src/mongo/db/s/user_writes_recoverable_critical_section_service.cpp8
56 files changed, 86 insertions, 87 deletions
diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
index 208f96a13ef..aca57e115c9 100644
--- a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
+++ b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
@@ -152,7 +152,7 @@ getDataSizeInfoForCollections(OperationContext* opCtx,
const ShardsvrGetStatsForBalancingReply reply =
ShardsvrGetStatsForBalancingReply::parse(
- IDLParserErrorContext("ShardsvrGetStatsForBalancingReply"),
+ IDLParserContext("ShardsvrGetStatsForBalancingReply"),
std::move(responseValue.data));
const auto collStatsFromShard = reply.getStats();
diff --git a/src/mongo/db/s/balancer/balancer_commands_scheduler_impl.cpp b/src/mongo/db/s/balancer/balancer_commands_scheduler_impl.cpp
index 7ebe9dac42c..3bc4f16bc89 100644
--- a/src/mongo/db/s/balancer/balancer_commands_scheduler_impl.cpp
+++ b/src/mongo/db/s/balancer/balancer_commands_scheduler_impl.cpp
@@ -328,7 +328,7 @@ SemiFuture<AutoSplitVectorResponse> BalancerCommandsSchedulerImpl::requestAutoSp
if (!responseStatus.isOK()) {
return responseStatus;
}
- return AutoSplitVectorResponse::parse(IDLParserErrorContext("AutoSplitVectorResponse"),
+ return AutoSplitVectorResponse::parse(IDLParserContext("AutoSplitVectorResponse"),
std::move(remoteResponse.data));
})
.semi();
diff --git a/src/mongo/db/s/balancer/balancer_defragmentation_policy_test.cpp b/src/mongo/db/s/balancer/balancer_defragmentation_policy_test.cpp
index a34c79ffc33..1512beb054c 100644
--- a/src/mongo/db/s/balancer/balancer_defragmentation_policy_test.cpp
+++ b/src/mongo/db/s/balancer/balancer_defragmentation_policy_test.cpp
@@ -170,7 +170,7 @@ protected:
.getValue();
if (expectedPhase.has_value()) {
auto storedDefragmentationPhase = DefragmentationPhase_parse(
- IDLParserErrorContext("BalancerDefragmentationPolicyTest"),
+ IDLParserContext("BalancerDefragmentationPolicyTest"),
configDoc.getStringField(CollectionType::kDefragmentationPhaseFieldName));
ASSERT_TRUE(storedDefragmentationPhase == *expectedPhase);
} else {
diff --git a/src/mongo/db/s/balancer/type_migration.cpp b/src/mongo/db/s/balancer/type_migration.cpp
index cd50dce077d..4f1a4ac71b1 100644
--- a/src/mongo/db/s/balancer/type_migration.cpp
+++ b/src/mongo/db/s/balancer/type_migration.cpp
@@ -136,8 +136,8 @@ StatusWith<MigrationType> MigrationType::fromBSON(const BSONObj& source) {
if (!status.isOK())
return status;
- migrationType._forceJumbo = ForceJumbo_parse(IDLParserErrorContext("ForceJumbo"),
- static_cast<int32_t>(forceJumboVal));
+ migrationType._forceJumbo =
+ ForceJumbo_parse(IDLParserContext("ForceJumbo"), static_cast<int32_t>(forceJumboVal));
}
{
diff --git a/src/mongo/db/s/clone_catalog_data_command.cpp b/src/mongo/db/s/clone_catalog_data_command.cpp
index 692bed243fd..b0df5c0cb98 100644
--- a/src/mongo/db/s/clone_catalog_data_command.cpp
+++ b/src/mongo/db/s/clone_catalog_data_command.cpp
@@ -100,7 +100,7 @@ public:
CommandHelpers::uassertCommandRunWithMajority(getName(), opCtx->getWriteConcern());
const auto cloneCatalogDataRequest =
- CloneCatalogData::parse(IDLParserErrorContext("_shardsvrCloneCatalogData"), cmdObj);
+ CloneCatalogData::parse(IDLParserContext("_shardsvrCloneCatalogData"), cmdObj);
const auto dbname = cloneCatalogDataRequest.getCommandParameter().toString();
uassert(
diff --git a/src/mongo/db/s/collmod_coordinator.cpp b/src/mongo/db/s/collmod_coordinator.cpp
index 06e8cd028dd..b7b0f160035 100644
--- a/src/mongo/db/s/collmod_coordinator.cpp
+++ b/src/mongo/db/s/collmod_coordinator.cpp
@@ -82,7 +82,7 @@ CollModCoordinator::CollModCoordinator(ShardingDDLCoordinatorService* service,
void CollModCoordinator::checkIfOptionsConflict(const BSONObj& doc) const {
const auto otherDoc =
- CollModCoordinatorDocument::parse(IDLParserErrorContext("CollModCoordinatorDocument"), doc);
+ CollModCoordinatorDocument::parse(IDLParserContext("CollModCoordinatorDocument"), doc);
const auto& selfReq = _request.toBSON();
const auto& otherReq = otherDoc.getCollModRequest().toBSON();
diff --git a/src/mongo/db/s/config/config_server_test_fixture.cpp b/src/mongo/db/s/config/config_server_test_fixture.cpp
index 15a25e430f8..2d845151764 100644
--- a/src/mongo/db/s/config/config_server_test_fixture.cpp
+++ b/src/mongo/db/s/config/config_server_test_fixture.cpp
@@ -445,7 +445,7 @@ std::vector<KeysCollectionDocument> ConfigServerTestFixture::getKeys(OperationCo
std::vector<KeysCollectionDocument> keys;
const auto& docs = findStatus.getValue().docs;
for (const auto& doc : docs) {
- auto key = KeysCollectionDocument::parse(IDLParserErrorContext("keyDoc"), doc);
+ auto key = KeysCollectionDocument::parse(IDLParserContext("keyDoc"), doc);
keys.push_back(std::move(key));
}
diff --git a/src/mongo/db/s/config/configsvr_coordinator.cpp b/src/mongo/db/s/config/configsvr_coordinator.cpp
index 8952273e8eb..0cbfecadac1 100644
--- a/src/mongo/db/s/config/configsvr_coordinator.cpp
+++ b/src/mongo/db/s/config/configsvr_coordinator.cpp
@@ -50,8 +50,8 @@ const Backoff kExponentialBackoff(Seconds(1), Milliseconds::max());
} // namespace
ConfigsvrCoordinatorMetadata extractConfigsvrCoordinatorMetadata(const BSONObj& stateDoc) {
- return ConfigsvrCoordinatorMetadata::parse(
- IDLParserErrorContext("ConfigsvrCoordinatorMetadata"), stateDoc);
+ return ConfigsvrCoordinatorMetadata::parse(IDLParserContext("ConfigsvrCoordinatorMetadata"),
+ stateDoc);
}
ConfigsvrCoordinator::ConfigsvrCoordinator(const BSONObj& stateDoc)
diff --git a/src/mongo/db/s/config/set_cluster_parameter_coordinator.cpp b/src/mongo/db/s/config/set_cluster_parameter_coordinator.cpp
index 36543c7cbaa..2eeaacf5ea0 100644
--- a/src/mongo/db/s/config/set_cluster_parameter_coordinator.cpp
+++ b/src/mongo/db/s/config/set_cluster_parameter_coordinator.cpp
@@ -58,8 +58,8 @@ const WriteConcernOptions kMajorityWriteConcern{WriteConcernOptions::kMajority,
}
bool SetClusterParameterCoordinator::hasSameOptions(const BSONObj& otherDocBSON) const {
- const auto otherDoc = StateDoc::parse(
- IDLParserErrorContext("SetClusterParameterCoordinatorDocument"), otherDocBSON);
+ const auto otherDoc =
+ StateDoc::parse(IDLParserContext("SetClusterParameterCoordinatorDocument"), otherDocBSON);
return SimpleBSONObjComparator::kInstance.evaluate(_doc.getParameter() ==
otherDoc.getParameter());
}
diff --git a/src/mongo/db/s/config/set_cluster_parameter_coordinator.h b/src/mongo/db/s/config/set_cluster_parameter_coordinator.h
index 2e172991a00..167898c2ecd 100644
--- a/src/mongo/db/s/config/set_cluster_parameter_coordinator.h
+++ b/src/mongo/db/s/config/set_cluster_parameter_coordinator.h
@@ -42,7 +42,7 @@ public:
explicit SetClusterParameterCoordinator(const BSONObj& stateDoc)
: ConfigsvrCoordinator(stateDoc),
- _doc(StateDoc::parse(IDLParserErrorContext("SetClusterParameterCoordinatorDocument"),
+ _doc(StateDoc::parse(IDLParserContext("SetClusterParameterCoordinatorDocument"),
stateDoc)) {}
bool hasSameOptions(const BSONObj& participantDoc) const override;
diff --git a/src/mongo/db/s/config/set_user_write_block_mode_coordinator.cpp b/src/mongo/db/s/config/set_user_write_block_mode_coordinator.cpp
index 2c3139f0cff..ecd04057546 100644
--- a/src/mongo/db/s/config/set_user_write_block_mode_coordinator.cpp
+++ b/src/mongo/db/s/config/set_user_write_block_mode_coordinator.cpp
@@ -84,8 +84,8 @@ void sendSetUserWriteBlockModeCmdToAllShards(OperationContext* opCtx,
} // namespace
bool SetUserWriteBlockModeCoordinator::hasSameOptions(const BSONObj& otherDocBSON) const {
- const auto otherDoc = StateDoc::parse(
- IDLParserErrorContext("SetUserWriteBlockModeCoordinatorDocument"), otherDocBSON);
+ const auto otherDoc =
+ StateDoc::parse(IDLParserContext("SetUserWriteBlockModeCoordinatorDocument"), otherDocBSON);
return _doc.getBlock() == otherDoc.getBlock();
}
diff --git a/src/mongo/db/s/config/set_user_write_block_mode_coordinator.h b/src/mongo/db/s/config/set_user_write_block_mode_coordinator.h
index be51f32f014..8ac13d5aca6 100644
--- a/src/mongo/db/s/config/set_user_write_block_mode_coordinator.h
+++ b/src/mongo/db/s/config/set_user_write_block_mode_coordinator.h
@@ -42,7 +42,7 @@ public:
explicit SetUserWriteBlockModeCoordinator(const BSONObj& stateDoc)
: ConfigsvrCoordinator(stateDoc),
- _doc(StateDoc::parse(IDLParserErrorContext("SetUserWriteBlockModeCoordinatorDocument"),
+ _doc(StateDoc::parse(IDLParserContext("SetUserWriteBlockModeCoordinatorDocument"),
stateDoc)) {}
bool hasSameOptions(const BSONObj& participantDoc) const override;
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp
index bfef69bcb9f..3ae9a91fce5 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp
@@ -283,8 +283,8 @@ protected:
const auto addShardOpMsgRequest =
OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj);
- auto addShardCmd = AddShard::parse(IDLParserErrorContext(AddShard::kCommandName),
- addShardOpMsgRequest);
+ auto addShardCmd =
+ AddShard::parse(IDLParserContext(AddShard::kCommandName), addShardOpMsgRequest);
const auto& updateOpField =
add_shard_util::createShardIdentityUpsertForAddShard(addShardCmd);
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp
index 2af9f43bc2d..f3f2a6fd4a6 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp
@@ -134,7 +134,7 @@ DatabaseType ShardingCatalogManager::createDatabase(
auto dbObj = client.findOne(NamespaceString::kConfigDatabasesNamespace, dbMatchFilter);
if (!dbObj.isEmpty()) {
replClient.setLastOpToSystemLastOpTime(opCtx);
- return DatabaseType::parse(IDLParserErrorContext("DatabaseType"), std::move(dbObj));
+ return DatabaseType::parse(IDLParserContext("DatabaseType"), std::move(dbObj));
}
if (dbLock) {
@@ -160,7 +160,7 @@ DatabaseType ShardingCatalogManager::createDatabase(
auto dbDoc = client.findOne(NamespaceString::kConfigDatabasesNamespace, queryBuilder.obj());
auto const [primaryShardPtr, database] = [&] {
if (!dbDoc.isEmpty()) {
- auto actualDb = DatabaseType::parse(IDLParserErrorContext("DatabaseType"), dbDoc);
+ auto actualDb = DatabaseType::parse(IDLParserContext("DatabaseType"), dbDoc);
uassert(ErrorCodes::DatabaseDifferCase,
str::stream() << "can't have 2 databases that just differ on case "
diff --git a/src/mongo/db/s/create_collection_coordinator.cpp b/src/mongo/db/s/create_collection_coordinator.cpp
index 23be2f5d94f..f023a4457df 100644
--- a/src/mongo/db/s/create_collection_coordinator.cpp
+++ b/src/mongo/db/s/create_collection_coordinator.cpp
@@ -323,7 +323,7 @@ const NamespaceString& CreateCollectionCoordinator::nss() const {
void CreateCollectionCoordinator::checkIfOptionsConflict(const BSONObj& doc) const {
// If we have two shard collections on the same namespace, then the arguments must be the same.
const auto otherDoc = CreateCollectionCoordinatorDocument::parse(
- IDLParserErrorContext("CreateCollectionCoordinatorDocument"), doc);
+ IDLParserContext("CreateCollectionCoordinatorDocument"), doc);
uassert(ErrorCodes::ConflictingOperationInProgress,
"Another create collection with different arguments is already running for the same "
@@ -601,7 +601,7 @@ void CreateCollectionCoordinator::_createCollectionAndIndexes(OperationContext*
boost::optional<Collation> collation;
if (!collationBSON.isEmpty()) {
collation.emplace(
- Collation::parse(IDLParserErrorContext("CreateCollectionCoordinator"), collationBSON));
+ Collation::parse(IDLParserContext("CreateCollectionCoordinator"), collationBSON));
}
// We need to implicitly create a timeseries view and underlying bucket collection.
diff --git a/src/mongo/db/s/forwardable_operation_metadata.cpp b/src/mongo/db/s/forwardable_operation_metadata.cpp
index 1a83a2eb1f1..51c06f80347 100644
--- a/src/mongo/db/s/forwardable_operation_metadata.cpp
+++ b/src/mongo/db/s/forwardable_operation_metadata.cpp
@@ -39,7 +39,7 @@ namespace mongo {
ForwardableOperationMetadata::ForwardableOperationMetadata(const BSONObj& obj) {
ForwardableOperationMetadataBase::parseProtected(
- IDLParserErrorContext("ForwardableOperationMetadataBase"), obj);
+ IDLParserContext("ForwardableOperationMetadataBase"), obj);
}
ForwardableOperationMetadata::ForwardableOperationMetadata(OperationContext* opCtx) {
diff --git a/src/mongo/db/s/migration_util.cpp b/src/mongo/db/s/migration_util.cpp
index f0d85e91d0a..52fc0fdaa7a 100644
--- a/src/mongo/db/s/migration_util.cpp
+++ b/src/mongo/db/s/migration_util.cpp
@@ -525,7 +525,7 @@ void resubmitRangeDeletionsOnStepUp(ServiceContext* serviceContext) {
while (cursor->more()) {
retFuture = migrationutil::submitRangeDeletionTask(
opCtx.get(),
- RangeDeletionTask::parse(IDLParserErrorContext("rangeDeletionRecovery"),
+ RangeDeletionTask::parse(IDLParserContext("rangeDeletionRecovery"),
cursor->next()));
rangeDeletionsMarkedAsProcessing++;
}
diff --git a/src/mongo/db/s/move_primary_coordinator.cpp b/src/mongo/db/s/move_primary_coordinator.cpp
index 863a4c17b9d..95f4b851cc5 100644
--- a/src/mongo/db/s/move_primary_coordinator.cpp
+++ b/src/mongo/db/s/move_primary_coordinator.cpp
@@ -54,7 +54,7 @@ void MovePrimaryCoordinator::appendCommandInfo(BSONObjBuilder* cmdInfoBuilder) c
void MovePrimaryCoordinator::checkIfOptionsConflict(const BSONObj& doc) const {
// If we have two shard collections on the same namespace, then the arguments must be the same.
const auto otherDoc = MovePrimaryCoordinatorDocument::parse(
- IDLParserErrorContext("MovePrimaryCoordinatorDocument"), doc);
+ IDLParserContext("MovePrimaryCoordinatorDocument"), doc);
uassert(
ErrorCodes::ConflictingOperationInProgress,
diff --git a/src/mongo/db/s/move_primary_source_manager.cpp b/src/mongo/db/s/move_primary_source_manager.cpp
index e016d060f09..5292e7ce66d 100644
--- a/src/mongo/db/s/move_primary_source_manager.cpp
+++ b/src/mongo/db/s/move_primary_source_manager.cpp
@@ -345,7 +345,7 @@ Status MovePrimarySourceManager::_commitOnConfig(OperationContext* opCtx) {
!databasesVector.empty());
const auto dbType =
- DatabaseType::parse(IDLParserErrorContext("DatabaseType"), databasesVector.front());
+ DatabaseType::parse(IDLParserContext("DatabaseType"), databasesVector.front());
if (dbType.getPrimary() == _toShard) {
return Status::OK();
diff --git a/src/mongo/db/s/persistent_task_queue.h b/src/mongo/db/s/persistent_task_queue.h
index c1802278afe..32a1b4210fc 100644
--- a/src/mongo/db/s/persistent_task_queue.h
+++ b/src/mongo/db/s/persistent_task_queue.h
@@ -226,7 +226,7 @@ PersistentTaskQueue<T>::_loadNextRecord(DBDirectClient& client) {
if (!bson.isEmpty()) {
result = typename PersistentTaskQueue<T>::Record{
bson.getField("_id").Long(),
- T::parse(IDLParserErrorContext("PersistentTaskQueue:" + _storageNss.toString()),
+ T::parse(IDLParserContext("PersistentTaskQueue:" + _storageNss.toString()),
bson.getObjectField("task"))};
}
diff --git a/src/mongo/db/s/persistent_task_queue_test.cpp b/src/mongo/db/s/persistent_task_queue_test.cpp
index 55f490d9a7f..17b7a568613 100644
--- a/src/mongo/db/s/persistent_task_queue_test.cpp
+++ b/src/mongo/db/s/persistent_task_queue_test.cpp
@@ -49,7 +49,7 @@ struct TestTask {
TestTask(BSONObj bson)
: key(bson.getField("key").String()), val(bson.getField("value").Int()) {}
- static TestTask parse(IDLParserErrorContext, BSONObj bson) {
+ static TestTask parse(IDLParserContext, BSONObj bson) {
return TestTask{bson};
}
diff --git a/src/mongo/db/s/recoverable_critical_section_service.cpp b/src/mongo/db/s/recoverable_critical_section_service.cpp
index dbf5b492f71..57bf7501caa 100644
--- a/src/mongo/db/s/recoverable_critical_section_service.cpp
+++ b/src/mongo/db/s/recoverable_critical_section_service.cpp
@@ -112,7 +112,7 @@ void RecoverableCriticalSectionService::acquireRecoverableCriticalSectionBlockWr
if (cursor->more()) {
const auto bsonObj = cursor->next();
const auto collCSDoc = CollectionCriticalSectionDocument::parse(
- IDLParserErrorContext("AcquireRecoverableCSBW"), bsonObj);
+ IDLParserContext("AcquireRecoverableCSBW"), bsonObj);
invariant(collCSDoc.getReason().woCompare(reason) == 0,
str::stream()
@@ -203,7 +203,7 @@ void RecoverableCriticalSectionService::promoteRecoverableCriticalSectionToBlock
<< " but the critical section wasn't acquired first blocking writers.");
BSONObj bsonObj = cursor->next();
const auto collCSDoc = CollectionCriticalSectionDocument::parse(
- IDLParserErrorContext("AcquireRecoverableCSBR"), bsonObj);
+ IDLParserContext("AcquireRecoverableCSBR"), bsonObj);
invariant(
collCSDoc.getReason().woCompare(reason) == 0,
@@ -313,7 +313,7 @@ void RecoverableCriticalSectionService::releaseRecoverableCriticalSection(
BSONObj bsonObj = cursor->next();
const auto collCSDoc = CollectionCriticalSectionDocument::parse(
- IDLParserErrorContext("ReleaseRecoverableCS"), bsonObj);
+ IDLParserContext("ReleaseRecoverableCS"), bsonObj);
invariant(
collCSDoc.getReason().woCompare(reason) == 0,
diff --git a/src/mongo/db/s/refine_collection_shard_key_coordinator.cpp b/src/mongo/db/s/refine_collection_shard_key_coordinator.cpp
index bfeec770791..ae507094e83 100644
--- a/src/mongo/db/s/refine_collection_shard_key_coordinator.cpp
+++ b/src/mongo/db/s/refine_collection_shard_key_coordinator.cpp
@@ -94,7 +94,7 @@ RefineCollectionShardKeyCoordinator::RefineCollectionShardKeyCoordinator(
void RefineCollectionShardKeyCoordinator::checkIfOptionsConflict(const BSONObj& doc) const {
// If we have two refine collections on the same namespace, then the arguments must be the same.
const auto otherDoc = RefineCollectionShardKeyCoordinatorDocument::parse(
- IDLParserErrorContext("RefineCollectionShardKeyCoordinatorDocument"), doc);
+ IDLParserContext("RefineCollectionShardKeyCoordinatorDocument"), doc);
uassert(ErrorCodes::ConflictingOperationInProgress,
"Another refine collection with different arguments is already running for the same "
diff --git a/src/mongo/db/s/rename_collection_coordinator.cpp b/src/mongo/db/s/rename_collection_coordinator.cpp
index bec561b6a2b..11a00d34f04 100644
--- a/src/mongo/db/s/rename_collection_coordinator.cpp
+++ b/src/mongo/db/s/rename_collection_coordinator.cpp
@@ -97,7 +97,7 @@ RenameCollectionCoordinator::RenameCollectionCoordinator(ShardingDDLCoordinatorS
void RenameCollectionCoordinator::checkIfOptionsConflict(const BSONObj& doc) const {
const auto otherDoc = RenameCollectionCoordinatorDocument::parse(
- IDLParserErrorContext("RenameCollectionCoordinatorDocument"), doc);
+ IDLParserContext("RenameCollectionCoordinatorDocument"), doc);
const auto& selfReq = _request.toBSON();
const auto& otherReq = otherDoc.getRenameCollectionRequest().toBSON();
diff --git a/src/mongo/db/s/rename_collection_participant_service.cpp b/src/mongo/db/s/rename_collection_participant_service.cpp
index 58ced2cc9ac..6fc4a2bed2f 100644
--- a/src/mongo/db/s/rename_collection_participant_service.cpp
+++ b/src/mongo/db/s/rename_collection_participant_service.cpp
@@ -155,7 +155,7 @@ RenameParticipantInstance::~RenameParticipantInstance() {
bool RenameParticipantInstance::hasSameOptions(const BSONObj& participantDoc) {
const auto otherDoc = RenameCollectionParticipantDocument::parse(
- IDLParserErrorContext("RenameCollectionParticipantDocument"), participantDoc);
+ IDLParserContext("RenameCollectionParticipantDocument"), participantDoc);
const auto& selfReq = _doc.getRenameCollectionRequest().toBSON();
const auto& otherReq = otherDoc.getRenameCollectionRequest().toBSON();
diff --git a/src/mongo/db/s/rename_collection_participant_service.h b/src/mongo/db/s/rename_collection_participant_service.h
index 73da51667ec..0f683458122 100644
--- a/src/mongo/db/s/rename_collection_participant_service.h
+++ b/src/mongo/db/s/rename_collection_participant_service.h
@@ -86,7 +86,7 @@ public:
explicit RenameParticipantInstance(const BSONObj& participantDoc)
: _doc(RenameCollectionParticipantDocument::parse(
- IDLParserErrorContext("RenameCollectionParticipantDocument"), participantDoc)) {}
+ IDLParserContext("RenameCollectionParticipantDocument"), participantDoc)) {}
~RenameParticipantInstance();
diff --git a/src/mongo/db/s/reshard_collection_coordinator.cpp b/src/mongo/db/s/reshard_collection_coordinator.cpp
index da1c65e01d3..afb1a0f7ab4 100644
--- a/src/mongo/db/s/reshard_collection_coordinator.cpp
+++ b/src/mongo/db/s/reshard_collection_coordinator.cpp
@@ -112,7 +112,7 @@ ReshardCollectionCoordinator::ReshardCollectionCoordinator(ShardingDDLCoordinato
void ReshardCollectionCoordinator::checkIfOptionsConflict(const BSONObj& doc) const {
const auto otherDoc = ReshardCollectionCoordinatorDocument::parse(
- IDLParserErrorContext("ReshardCollectionCoordinatorDocument"), doc);
+ IDLParserContext("ReshardCollectionCoordinatorDocument"), doc);
uassert(ErrorCodes::ConflictingOperationInProgress,
"Another reshard collection with different arguments is already running for the same "
diff --git a/src/mongo/db/s/resharding/document_source_resharding_iterate_transaction.cpp b/src/mongo/db/s/resharding/document_source_resharding_iterate_transaction.cpp
index 60f034dcfea..c37eb05fc80 100644
--- a/src/mongo/db/s/resharding/document_source_resharding_iterate_transaction.cpp
+++ b/src/mongo/db/s/resharding/document_source_resharding_iterate_transaction.cpp
@@ -192,8 +192,7 @@ DocumentSource::GetNextResult DocumentSourceReshardingIterateTransaction::doGetN
bool DocumentSourceReshardingIterateTransaction::_isTransactionOplogEntry(const Document& doc) {
auto op = doc[repl::OplogEntry::kOpTypeFieldName];
- auto opType =
- repl::OpType_parse(IDLParserErrorContext("ReshardingEntry.op"), op.getStringData());
+ auto opType = repl::OpType_parse(IDLParserContext("ReshardingEntry.op"), op.getStringData());
auto commandVal = doc["o"];
if (opType != repl::OpTypeEnum::kCommand || doc["txnNumber"].missing() ||
diff --git a/src/mongo/db/s/resharding/resharding_agg_test.cpp b/src/mongo/db/s/resharding/resharding_agg_test.cpp
index c49467f79f9..9c2f9fcba23 100644
--- a/src/mongo/db/s/resharding/resharding_agg_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_agg_test.cpp
@@ -257,7 +257,7 @@ bool validateOplogId(const Timestamp& clusterTime,
const mongo::Document& sourceDoc,
const repl::OplogEntry& oplogEntry) {
auto oplogIdExpected = ReshardingDonorOplogId{clusterTime, sourceDoc["ts"].getTimestamp()};
- auto oplogId = ReshardingDonorOplogId::parse(IDLParserErrorContext("ReshardingAggTest"),
+ auto oplogId = ReshardingDonorOplogId::parse(IDLParserContext("ReshardingAggTest"),
oplogEntry.get_id()->getDocument().toBson());
return oplogIdExpected == oplogId;
}
@@ -351,7 +351,7 @@ protected:
ReshardingDonorOplogId getOplogId(const repl::MutableOplogEntry& oplog) {
- return ReshardingDonorOplogId::parse(IDLParserErrorContext("ReshardingAggTest::getOplogId"),
+ return ReshardingDonorOplogId::parse(IDLParserContext("ReshardingAggTest::getOplogId"),
oplog.get_id()->getDocument().toBson());
}
diff --git a/src/mongo/db/s/resharding/resharding_coordinator_service.cpp b/src/mongo/db/s/resharding/resharding_coordinator_service.cpp
index 00a5310b176..181024995d2 100644
--- a/src/mongo/db/s/resharding/resharding_coordinator_service.cpp
+++ b/src/mongo/db/s/resharding/resharding_coordinator_service.cpp
@@ -980,7 +980,7 @@ std::shared_ptr<repl::PrimaryOnlyService::Instance> ReshardingCoordinatorService
BSONObj initialState) {
return std::make_shared<ReshardingCoordinator>(
this,
- ReshardingCoordinatorDocument::parse(IDLParserErrorContext("ReshardingCoordinatorStateDoc"),
+ ReshardingCoordinatorDocument::parse(IDLParserContext("ReshardingCoordinatorStateDoc"),
std::move(initialState)),
std::make_shared<ReshardingCoordinatorExternalStateImpl>(),
_serviceContext);
diff --git a/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp b/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp
index fc4c6b722d9..a849cc5ca87 100644
--- a/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp
@@ -116,8 +116,8 @@ public:
std::shared_ptr<PrimaryOnlyService::Instance> constructInstance(BSONObj initialState) override {
return std::make_shared<ReshardingCoordinator>(
this,
- ReshardingCoordinatorDocument::parse(
- IDLParserErrorContext("ReshardingCoordinatorStateDoc"), std::move(initialState)),
+ ReshardingCoordinatorDocument::parse(IDLParserContext("ReshardingCoordinatorStateDoc"),
+ std::move(initialState)),
std::make_shared<ExternalStateForTest>(),
_serviceContext);
}
@@ -231,7 +231,7 @@ public:
DBDirectClient client(opCtx);
auto doc = client.findOne(NamespaceString::kConfigReshardingOperationsNamespace, BSONObj{});
- IDLParserErrorContext errCtx("reshardingCoordFromTest");
+ IDLParserContext errCtx("reshardingCoordFromTest");
return ReshardingCoordinatorDocument::parse(errCtx, doc);
}
diff --git a/src/mongo/db/s/resharding/resharding_coordinator_test.cpp b/src/mongo/db/s/resharding/resharding_coordinator_test.cpp
index 35ffa75b31a..a5236d91c5b 100644
--- a/src/mongo/db/s/resharding/resharding_coordinator_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_coordinator_test.cpp
@@ -248,7 +248,7 @@ protected:
BSON("ns" << expectedCoordinatorDoc.getSourceNss().ns()));
auto coordinatorDoc = ReshardingCoordinatorDocument::parse(
- IDLParserErrorContext("ReshardingCoordinatorTest"), doc);
+ IDLParserContext("ReshardingCoordinatorTest"), doc);
ASSERT_EQUALS(coordinatorDoc.getReshardingUUID(),
expectedCoordinatorDoc.getReshardingUUID());
diff --git a/src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp b/src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp
index 859450a3b6d..d81662f968c 100644
--- a/src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp
@@ -355,7 +355,7 @@ TEST_F(DestinedRecipientTest, TestOpObserverSetsDestinedRecipientOnInsertsInTran
auto info = repl::ApplyOpsCommandInfo::parse(entry.getOperationToApply());
auto ops = info.getOperations();
- auto replOp = repl::ReplOperation::parse(IDLParserErrorContext("insertOp"), ops[0]);
+ auto replOp = repl::ReplOperation::parse(IDLParserContext("insertOp"), ops[0]);
ASSERT_EQ(replOp.getNss(), kNss);
auto recipShard = replOp.getDestinedRecipient();
@@ -442,7 +442,7 @@ TEST_F(DestinedRecipientTest, TestOpObserverSetsDestinedRecipientOnUpdatesInTran
auto info = repl::ApplyOpsCommandInfo::parse(entry.getOperationToApply());
auto ops = info.getOperations();
- auto replOp = repl::ReplOperation::parse(IDLParserErrorContext("insertOp"), ops[0]);
+ auto replOp = repl::ReplOperation::parse(IDLParserContext("insertOp"), ops[0]);
ASSERT_EQ(replOp.getNss(), kNss);
auto recipShard = replOp.getDestinedRecipient();
@@ -487,7 +487,7 @@ TEST_F(DestinedRecipientTest, TestOpObserverSetsDestinedRecipientOnDeletesInTran
auto info = repl::ApplyOpsCommandInfo::parse(entry.getOperationToApply());
auto ops = info.getOperations();
- auto replOp = repl::ReplOperation::parse(IDLParserErrorContext("deleteOp"), ops[0]);
+ auto replOp = repl::ReplOperation::parse(IDLParserContext("deleteOp"), ops[0]);
ASSERT_EQ(replOp.getNss(), kNss);
auto recipShard = replOp.getDestinedRecipient();
diff --git a/src/mongo/db/s/resharding/resharding_donor_oplog_iterator.cpp b/src/mongo/db/s/resharding/resharding_donor_oplog_iterator.cpp
index 0a9027deea2..5a9c8343fa2 100644
--- a/src/mongo/db/s/resharding/resharding_donor_oplog_iterator.cpp
+++ b/src/mongo/db/s/resharding/resharding_donor_oplog_iterator.cpp
@@ -66,7 +66,7 @@ namespace {
*/
ReshardingDonorOplogId getId(const repl::OplogEntry& oplog) {
return ReshardingDonorOplogId::parse(
- IDLParserErrorContext("ReshardingDonorOplogIterator::getOplogId"),
+ IDLParserContext("ReshardingDonorOplogIterator::getOplogId"),
oplog.get_id()->getDocument().toBson());
}
diff --git a/src/mongo/db/s/resharding/resharding_donor_service_test.cpp b/src/mongo/db/s/resharding/resharding_donor_service_test.cpp
index 92f848d2d17..d4f5046e340 100644
--- a/src/mongo/db/s/resharding/resharding_donor_service_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_donor_service_test.cpp
@@ -289,7 +289,7 @@ TEST_F(ReshardingDonorServiceTest, WritesNoOpOplogEntryOnReshardingBegin) {
ReshardBeginChangeEventO2Field expectedChangeEvent{sourceNss, doc.getReshardingUUID()};
auto receivedChangeEvent = ReshardBeginChangeEventO2Field::parse(
- IDLParserErrorContext("ReshardBeginChangeEventO2Field"), *op.getObject2());
+ IDLParserContext("ReshardBeginChangeEventO2Field"), *op.getObject2());
ASSERT_EQ(OpType_serializer(op.getOpType()), OpType_serializer(repl::OpTypeEnum::kNoop))
<< op.getEntry();
diff --git a/src/mongo/db/s/resharding/resharding_op_observer.cpp b/src/mongo/db/s/resharding/resharding_op_observer.cpp
index 9fbfa40bf55..3441f7c1eea 100644
--- a/src/mongo/db/s/resharding/resharding_op_observer.cpp
+++ b/src/mongo/db/s/resharding/resharding_op_observer.cpp
@@ -60,7 +60,7 @@ std::shared_ptr<ReshardingCoordinatorObserver> getReshardingCoordinatorObserver(
}
boost::optional<Timestamp> parseNewMinFetchTimestampValue(const BSONObj& obj) {
- auto doc = ReshardingDonorDocument::parse(IDLParserErrorContext("Resharding"), obj);
+ auto doc = ReshardingDonorDocument::parse(IDLParserContext("Resharding"), obj);
if (doc.getMutableState().getState() == DonorStateEnum::kDonatingInitialData) {
return doc.getMutableState().getMinFetchTimestamp().get();
} else {
@@ -218,7 +218,7 @@ void ReshardingOpObserver::onUpdate(OperationContext* opCtx, const OplogUpdateEn
if (args.nss == NamespaceString::kConfigReshardingOperationsNamespace) {
auto newCoordinatorDoc = ReshardingCoordinatorDocument::parse(
- IDLParserErrorContext("reshardingCoordinatorDoc"), args.updateArgs->updatedDoc);
+ IDLParserContext("reshardingCoordinatorDoc"), args.updateArgs->updatedDoc);
opCtx->recoveryUnit()->onCommit([opCtx, newCoordinatorDoc = std::move(newCoordinatorDoc)](
boost::optional<Timestamp> unusedCommitTime) mutable {
try {
diff --git a/src/mongo/db/s/resharding/resharding_oplog_applier.cpp b/src/mongo/db/s/resharding/resharding_oplog_applier.cpp
index d9edf786371..470b5b382d2 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_applier.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_applier.cpp
@@ -220,7 +220,7 @@ boost::optional<ReshardingOplogApplierProgress> ReshardingOplogApplier::checkSto
return boost::none;
}
- IDLParserErrorContext ctx("ReshardingOplogApplierProgress");
+ IDLParserContext ctx("ReshardingOplogApplierProgress");
return ReshardingOplogApplierProgress::parse(ctx, doc);
}
diff --git a/src/mongo/db/s/resharding/resharding_recipient_service.cpp b/src/mongo/db/s/resharding/resharding_recipient_service.cpp
index 21b3042fef6..9a63fe0daea 100644
--- a/src/mongo/db/s/resharding/resharding_recipient_service.cpp
+++ b/src/mongo/db/s/resharding/resharding_recipient_service.cpp
@@ -1200,7 +1200,7 @@ void ReshardingRecipientService::RecipientStateMachine::_restoreMetrics(
if (!result.isEmpty()) {
progressDoc = ReshardingOplogApplierProgress::parse(
- IDLParserErrorContext("resharding-recipient-service-progress-doc"), result);
+ IDLParserContext("resharding-recipient-service-progress-doc"), result);
oplogEntriesApplied += progressDoc->getNumEntriesApplied();
}
}
diff --git a/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp b/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp
index 26217cee42f..d054ae355ab 100644
--- a/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp
@@ -647,7 +647,7 @@ TEST_F(ReshardingRecipientServiceTest, WritesNoopOplogEntryOnReshardDoneCatchUp)
ReshardDoneCatchUpChangeEventO2Field expectedChangeEvent{sourceNss, doc.getReshardingUUID()};
auto receivedChangeEvent = ReshardDoneCatchUpChangeEventO2Field::parse(
- IDLParserErrorContext("ReshardDoneCatchUpChangeEventO2Field"), *op.getObject2());
+ IDLParserContext("ReshardDoneCatchUpChangeEventO2Field"), *op.getObject2());
ASSERT_EQ(OpType_serializer(op.getOpType()), OpType_serializer(repl::OpTypeEnum::kNoop))
<< op.getEntry();
diff --git a/src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp b/src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp
index 24045678550..223da448f54 100644
--- a/src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp
@@ -179,7 +179,7 @@ protected:
}
LogicalSessionId getTxnRecordLsid(BSONObj txnRecord) {
- return SessionTxnRecord::parse(IDLParserErrorContext("ReshardingTxnClonerTest"), txnRecord)
+ return SessionTxnRecord::parse(IDLParserContext("ReshardingTxnClonerTest"), txnRecord)
.getSessionId();
}
@@ -272,7 +272,7 @@ protected:
BSON(SessionTxnRecord::kSessionIdFieldName << sessionId.toBSON()));
ASSERT(!bsonTxn.isEmpty());
auto txn = SessionTxnRecord::parse(
- IDLParserErrorContext("resharding config transactions cloning test"), bsonTxn);
+ IDLParserContext("resharding config transactions cloning test"), bsonTxn);
ASSERT_EQ(txn.getTxnNum(), txnNum);
ASSERT_EQ(txn.getLastWriteOpTime(), oplogEntry.getOpTime());
}
@@ -303,8 +303,8 @@ protected:
return boost::none;
}
- return ReshardingTxnClonerProgress::parse(
- IDLParserErrorContext("ReshardingTxnClonerProgress"), progressDoc);
+ return ReshardingTxnClonerProgress::parse(IDLParserContext("ReshardingTxnClonerProgress"),
+ progressDoc);
}
boost::optional<LogicalSessionId> getProgressLsid(const ReshardingSourceId& sourceId) {
diff --git a/src/mongo/db/s/session_catalog_migration_source.cpp b/src/mongo/db/s/session_catalog_migration_source.cpp
index e97005571ef..afa613481c6 100644
--- a/src/mongo/db/s/session_catalog_migration_source.cpp
+++ b/src/mongo/db/s/session_catalog_migration_source.cpp
@@ -78,7 +78,7 @@ boost::optional<repl::OplogEntry> forgeNoopEntryFromImageCollection(
return boost::none;
}
- auto image = repl::ImageEntry::parse(IDLParserErrorContext("image entry"), imageObj);
+ auto image = repl::ImageEntry::parse(IDLParserContext("image entry"), imageObj);
if (image.getTxnNumber() != retryableFindAndModifyOplogEntry.getTxnNumber()) {
// In our snapshot, fetch the current transaction number for a session. If that transaction
// number doesn't match what's found on the image lookup, it implies that the image is not
@@ -243,8 +243,8 @@ SessionCatalogMigrationSource::SessionCatalogMigrationSource(OperationContext* o
boost::optional<LastTxnSession> lastTxnSession;
while (cursor->more()) {
- const auto txnRecord = SessionTxnRecord::parse(
- IDLParserErrorContext("Session migration cloning"), cursor->next());
+ const auto txnRecord =
+ SessionTxnRecord::parse(IDLParserContext("Session migration cloning"), cursor->next());
const auto sessionId = txnRecord.getSessionId();
const auto parentSessionId = castToParentSessionId(sessionId);
diff --git a/src/mongo/db/s/set_allow_migrations_coordinator.cpp b/src/mongo/db/s/set_allow_migrations_coordinator.cpp
index d8cb15afb2e..ed1e55da18f 100644
--- a/src/mongo/db/s/set_allow_migrations_coordinator.cpp
+++ b/src/mongo/db/s/set_allow_migrations_coordinator.cpp
@@ -54,7 +54,7 @@ void SetAllowMigrationsCoordinator::checkIfOptionsConflict(const BSONObj& doc) c
// If we have two set allow migrations on the same namespace, then the arguments must be the
// same.
const auto otherDoc = SetAllowMigrationsCoordinatorDocument::parse(
- IDLParserErrorContext("SetAllowMigrationsCoordinatorDocument"), doc);
+ IDLParserContext("SetAllowMigrationsCoordinatorDocument"), doc);
uassert(ErrorCodes::ConflictingOperationInProgress,
"Another set allow migrations with different arguments is already running for the same "
diff --git a/src/mongo/db/s/shard_metadata_util.cpp b/src/mongo/db/s/shard_metadata_util.cpp
index e52a5e28d1a..ae0dedc5cfe 100644
--- a/src/mongo/db/s/shard_metadata_util.cpp
+++ b/src/mongo/db/s/shard_metadata_util.cpp
@@ -189,7 +189,7 @@ StatusWith<ShardDatabaseType> readShardDatabasesEntry(OperationContext* opCtx, S
}
BSONObj document = cursor->nextSafe();
- return ShardDatabaseType::parse(IDLParserErrorContext("ShardDatabaseType"), document);
+ return ShardDatabaseType::parse(IDLParserContext("ShardDatabaseType"), document);
} catch (const DBException& ex) {
return ex.toStatus(str::stream()
<< "Failed to read the '" << dbName.toString() << "' entry locally from "
diff --git a/src/mongo/db/s/shard_server_op_observer.cpp b/src/mongo/db/s/shard_server_op_observer.cpp
index 9179504e4f2..5069ac4613c 100644
--- a/src/mongo/db/s/shard_server_op_observer.cpp
+++ b/src/mongo/db/s/shard_server_op_observer.cpp
@@ -268,8 +268,8 @@ void ShardServerOpObserver::onInserts(OperationContext* opCtx,
return;
}
- auto deletionTask = RangeDeletionTask::parse(
- IDLParserErrorContext("ShardServerOpObserver"), insertedDoc);
+ auto deletionTask =
+ RangeDeletionTask::parse(IDLParserContext("ShardServerOpObserver"), insertedDoc);
if (!deletionTask.getPending()) {
opCtx->recoveryUnit()->registerChange(
@@ -284,7 +284,7 @@ void ShardServerOpObserver::onInserts(OperationContext* opCtx,
if (nss == NamespaceString::kCollectionCriticalSectionsNamespace &&
!recoverable_critical_section_util::inRecoveryMode(opCtx)) {
const auto collCSDoc = CollectionCriticalSectionDocument::parse(
- IDLParserErrorContext("ShardServerOpObserver"), insertedDoc);
+ IDLParserContext("ShardServerOpObserver"), insertedDoc);
opCtx->recoveryUnit()->onCommit([opCtx,
insertedNss = collCSDoc.getNss(),
reason = collCSDoc.getReason().getOwned()](
@@ -411,8 +411,8 @@ void ShardServerOpObserver::onUpdate(OperationContext* opCtx, const OplogUpdateE
update_oplog_entry::isFieldRemovedByUpdate(args.updateArgs->update, "pending");
if (pendingFieldRemovedStatus == update_oplog_entry::FieldRemovedStatus::kFieldRemoved) {
- auto deletionTask = RangeDeletionTask::parse(
- IDLParserErrorContext("ShardServerOpObserver"), args.updateArgs->updatedDoc);
+ auto deletionTask = RangeDeletionTask::parse(IDLParserContext("ShardServerOpObserver"),
+ args.updateArgs->updatedDoc);
if (deletionTask.getDonorShardId() != ShardingState::get(opCtx)->shardId()) {
// Range deletion tasks for moved away chunks are scheduled through the
@@ -426,7 +426,7 @@ void ShardServerOpObserver::onUpdate(OperationContext* opCtx, const OplogUpdateE
if (args.nss == NamespaceString::kCollectionCriticalSectionsNamespace &&
!recoverable_critical_section_util::inRecoveryMode(opCtx)) {
const auto collCSDoc = CollectionCriticalSectionDocument::parse(
- IDLParserErrorContext("ShardServerOpObserver"), args.updateArgs->updatedDoc);
+ IDLParserContext("ShardServerOpObserver"), args.updateArgs->updatedDoc);
opCtx->recoveryUnit()->onCommit(
[opCtx, updatedNss = collCSDoc.getNss(), reason = collCSDoc.getReason().getOwned()](
@@ -522,7 +522,7 @@ void ShardServerOpObserver::onDelete(OperationContext* opCtx,
!recoverable_critical_section_util::inRecoveryMode(opCtx)) {
const auto& deletedDoc = documentId;
const auto collCSDoc = CollectionCriticalSectionDocument::parse(
- IDLParserErrorContext("ShardServerOpObserver"), deletedDoc);
+ IDLParserContext("ShardServerOpObserver"), deletedDoc);
opCtx->recoveryUnit()->onCommit(
[opCtx, deletedNss = collCSDoc.getNss(), reason = collCSDoc.getReason().getOwned()](
diff --git a/src/mongo/db/s/sharding_ddl_coordinator.cpp b/src/mongo/db/s/sharding_ddl_coordinator.cpp
index 4795370b17c..7d880424df3 100644
--- a/src/mongo/db/s/sharding_ddl_coordinator.cpp
+++ b/src/mongo/db/s/sharding_ddl_coordinator.cpp
@@ -74,8 +74,8 @@ bool isRetriableErrorForDDLCoordinator(const Status& status) {
} // namespace
ShardingDDLCoordinatorMetadata extractShardingDDLCoordinatorMetadata(const BSONObj& coorDoc) {
- return ShardingDDLCoordinatorMetadata::parse(
- IDLParserErrorContext("ShardingDDLCoordinatorMetadata"), coorDoc);
+ return ShardingDDLCoordinatorMetadata::parse(IDLParserContext("ShardingDDLCoordinatorMetadata"),
+ coorDoc);
}
ShardingDDLCoordinator::ShardingDDLCoordinator(ShardingDDLCoordinatorService* service,
diff --git a/src/mongo/db/s/sharding_ddl_coordinator.h b/src/mongo/db/s/sharding_ddl_coordinator.h
index 9119d44583d..66ec5706030 100644
--- a/src/mongo/db/s/sharding_ddl_coordinator.h
+++ b/src/mongo/db/s/sharding_ddl_coordinator.h
@@ -189,7 +189,7 @@ protected:
: ShardingDDLCoordinator(service, initialStateDoc),
_coordinatorName(name),
_initialState(initialStateDoc.getOwned()),
- _doc(StateDoc::parse(IDLParserErrorContext("CoordinatorDocument"), _initialState)) {}
+ _doc(StateDoc::parse(IDLParserContext("CoordinatorDocument"), _initialState)) {}
ShardingDDLCoordinatorMetadata const& metadata() const override {
return _doc.getShardingDDLCoordinatorMetadata();
diff --git a/src/mongo/db/s/shardsvr_collmod_command.cpp b/src/mongo/db/s/shardsvr_collmod_command.cpp
index 96400a31776..b1bef41021e 100644
--- a/src/mongo/db/s/shardsvr_collmod_command.cpp
+++ b/src/mongo/db/s/shardsvr_collmod_command.cpp
@@ -127,7 +127,7 @@ public:
void validateResult(const BSONObj& resultObj) final {
StringDataSet ignorableFields({"raw", "ok", "errmsg"});
- auto reply = Response::parse(IDLParserErrorContext("CollModReply"),
+ auto reply = Response::parse(IDLParserContext("CollModReply"),
resultObj.removeFields(ignorableFields));
coll_mod_reply_validation::validateReply(reply);
}
diff --git a/src/mongo/db/s/shardsvr_collmod_participant_command.cpp b/src/mongo/db/s/shardsvr_collmod_participant_command.cpp
index 2a7e78886b2..64d7c97b798 100644
--- a/src/mongo/db/s/shardsvr_collmod_participant_command.cpp
+++ b/src/mongo/db/s/shardsvr_collmod_participant_command.cpp
@@ -122,7 +122,7 @@ public:
auto performViewChange = request().getPerformViewChange();
uassertStatusOK(timeseries::processCollModCommandWithTimeSeriesTranslation(
opCtx, ns(), cmd, performViewChange, &builder));
- return CollModReply::parse(IDLParserErrorContext("CollModReply"), builder.obj());
+ return CollModReply::parse(IDLParserContext("CollModReply"), builder.obj());
}
private:
diff --git a/src/mongo/db/s/shardsvr_move_primary_command.cpp b/src/mongo/db/s/shardsvr_move_primary_command.cpp
index 2fbb2f9362b..9cde19c4ee4 100644
--- a/src/mongo/db/s/shardsvr_move_primary_command.cpp
+++ b/src/mongo/db/s/shardsvr_move_primary_command.cpp
@@ -93,7 +93,7 @@ public:
uassertStatusOK(ShardingState::get(opCtx)->canAcceptShardedCommands());
const auto movePrimaryRequest =
- ShardMovePrimary::parse(IDLParserErrorContext("_shardsvrMovePrimary"), cmdObj);
+ ShardMovePrimary::parse(IDLParserContext("_shardsvrMovePrimary"), cmdObj);
const auto dbname = parseNs("", cmdObj);
const NamespaceString dbNss(dbname);
diff --git a/src/mongo/db/s/start_chunk_clone_request.cpp b/src/mongo/db/s/start_chunk_clone_request.cpp
index 86567dce161..96854b8dd8a 100644
--- a/src/mongo/db/s/start_chunk_clone_request.cpp
+++ b/src/mongo/db/s/start_chunk_clone_request.cpp
@@ -154,7 +154,7 @@ StatusWith<StartChunkCloneRequest> StartChunkCloneRequest::createFromCommand(Nam
request._migrationId = UUID::parse(obj);
request._lsid =
- LogicalSessionId::parse(IDLParserErrorContext("StartChunkCloneRequest"), obj[kLsid].Obj());
+ LogicalSessionId::parse(IDLParserContext("StartChunkCloneRequest"), obj[kLsid].Obj());
request._txnNumber = obj.getField(kTxnNumber).Long();
return request;
diff --git a/src/mongo/db/s/transaction_coordinator_structures_test.cpp b/src/mongo/db/s/transaction_coordinator_structures_test.cpp
index df1d3cc2ade..587c8d16ae0 100644
--- a/src/mongo/db/s/transaction_coordinator_structures_test.cpp
+++ b/src/mongo/db/s/transaction_coordinator_structures_test.cpp
@@ -64,7 +64,7 @@ TEST(CoordinatorCommitDecisionTest, SerializeAbortHasNoTimestampAndAbortStatus)
ASSERT_BSONOBJ_EQ(expectedObj, obj);
auto deserializedDecision =
- CoordinatorCommitDecision::parse(IDLParserErrorContext("AbortTest"), expectedObj);
+ CoordinatorCommitDecision::parse(IDLParserContext("AbortTest"), expectedObj);
ASSERT_BSONOBJ_EQ(obj, deserializedDecision.toBSON());
}
diff --git a/src/mongo/db/s/transaction_coordinator_test.cpp b/src/mongo/db/s/transaction_coordinator_test.cpp
index b4a643339e6..9c02733fdb0 100644
--- a/src/mongo/db/s/transaction_coordinator_test.cpp
+++ b/src/mongo/db/s/transaction_coordinator_test.cpp
@@ -160,7 +160,7 @@ protected:
TransactionCoordinatorDocument doc;
do {
doc = TransactionCoordinatorDocument::parse(
- IDLParserErrorContext("dummy"),
+ IDLParserContext("dummy"),
dbClient.findOne(NamespaceString::kTransactionCoordinatorsNamespace, BSONObj{}));
} while (!doc.getDecision());
}
diff --git a/src/mongo/db/s/transaction_coordinator_util.cpp b/src/mongo/db/s/transaction_coordinator_util.cpp
index 252ec91dd15..c591bf839da 100644
--- a/src/mongo/db/s/transaction_coordinator_util.cpp
+++ b/src/mongo/db/s/transaction_coordinator_util.cpp
@@ -664,7 +664,7 @@ std::vector<TransactionCoordinatorDocument> readAllCoordinatorDocs(OperationCont
while (coordinatorDocsCursor->more()) {
// TODO (SERVER-38307): Try/catch around parsing the document and skip the document if it
// fails to parse.
- auto nextDecision = TransactionCoordinatorDocument::parse(IDLParserErrorContext(""),
+ auto nextDecision = TransactionCoordinatorDocument::parse(IDLParserContext(""),
coordinatorDocsCursor->next());
allCoordinatorDocs.push_back(nextDecision);
}
diff --git a/src/mongo/db/s/type_shard_collection.cpp b/src/mongo/db/s/type_shard_collection.cpp
index 2628297e0d7..480eb393403 100644
--- a/src/mongo/db/s/type_shard_collection.cpp
+++ b/src/mongo/db/s/type_shard_collection.cpp
@@ -47,7 +47,7 @@ ShardCollectionType::ShardCollectionType(NamespaceString nss,
unique) {}
ShardCollectionType::ShardCollectionType(const BSONObj& obj) {
- ShardCollectionTypeBase::parseProtected(IDLParserErrorContext("ShardCollectionType"), obj);
+ ShardCollectionTypeBase::parseProtected(IDLParserContext("ShardCollectionType"), obj);
uassert(ErrorCodes::ShardKeyNotFound,
str::stream() << "Empty shard key. Failed to parse: " << obj.toString(),
diff --git a/src/mongo/db/s/type_shard_identity.cpp b/src/mongo/db/s/type_shard_identity.cpp
index 673636f01b1..07519bf5637 100644
--- a/src/mongo/db/s/type_shard_identity.cpp
+++ b/src/mongo/db/s/type_shard_identity.cpp
@@ -49,7 +49,7 @@ StatusWith<ShardIdentityType> ShardIdentityType::fromShardIdentityDocument(const
try {
ShardIdentityType shardIdentity =
- ShardIdentity::parse(IDLParserErrorContext("ShardIdentity"), shardIdentityBSON);
+ ShardIdentity::parse(IDLParserContext("ShardIdentity"), shardIdentityBSON);
const auto& configsvrConnStr = shardIdentity.getConfigsvrConnectionString();
if (configsvrConnStr.type() != ConnectionString::ConnectionType::kReplicaSet) {
diff --git a/src/mongo/db/s/user_writes_recoverable_critical_section_service.cpp b/src/mongo/db/s/user_writes_recoverable_critical_section_service.cpp
index 4fa8a1d5bd2..be1cc7c2375 100644
--- a/src/mongo/db/s/user_writes_recoverable_critical_section_service.cpp
+++ b/src/mongo/db/s/user_writes_recoverable_critical_section_service.cpp
@@ -111,7 +111,7 @@ void acquireRecoverableCriticalSection(OperationContext* opCtx,
const auto bsonObj = findRecoverableCriticalSectionDoc(opCtx, nss);
if (!bsonObj.isEmpty()) {
const auto collCSDoc = UserWriteBlockingCriticalSectionDocument::parse(
- IDLParserErrorContext("AcquireUserWritesCS"), bsonObj);
+ IDLParserContext("AcquireUserWritesCS"), bsonObj);
uassert(ErrorCodes::IllegalOperation,
str::stream() << "Cannot acquire user writes critical section with different "
@@ -225,7 +225,7 @@ void UserWritesRecoverableCriticalSectionService::
!bsonObj.isEmpty());
const auto collCSDoc = UserWriteBlockingCriticalSectionDocument::parse(
- IDLParserErrorContext("PromoteUserWritesCS"), bsonObj);
+ IDLParserContext("PromoteUserWritesCS"), bsonObj);
uassert(ErrorCodes::IllegalOperation,
"Cannot promote user writes critical section to block user writes if sharded DDL "
@@ -284,7 +284,7 @@ void UserWritesRecoverableCriticalSectionService::
}
const auto collCSDoc = UserWriteBlockingCriticalSectionDocument::parse(
- IDLParserErrorContext("DemoteUserWritesCS"), bsonObj);
+ IDLParserContext("DemoteUserWritesCS"), bsonObj);
// If we are not currently blocking user writes, then we are done.
if (!collCSDoc.getBlockUserWrites()) {
@@ -332,7 +332,7 @@ void UserWritesRecoverableCriticalSectionService::releaseRecoverableCriticalSect
}
const auto collCSDoc = UserWriteBlockingCriticalSectionDocument::parse(
- IDLParserErrorContext("ReleaseUserWritesCS"), bsonObj);
+ IDLParserContext("ReleaseUserWritesCS"), bsonObj);
// Release the critical section by deleting the critical section document. The OpObserver
// will release the in-memory CS when reacting to the delete event.