diff options
Diffstat (limited to 'src/mongo/db/commands')
34 files changed, 360 insertions, 337 deletions
diff --git a/src/mongo/db/commands/count_cmd.cpp b/src/mongo/db/commands/count_cmd.cpp index 1347c0af83c..c53ffb44608 100644 --- a/src/mongo/db/commands/count_cmd.cpp +++ b/src/mongo/db/commands/count_cmd.cpp @@ -146,9 +146,9 @@ public: AutoGetCollectionViewMode::kViewsPermitted); const auto nss = ctx->getNss(); - CountCommand request(NamespaceStringOrUUID(NamespaceString{})); + CountCommandRequest request(NamespaceStringOrUUID(NamespaceString{})); try { - request = CountCommand::parse(IDLParserErrorContext("count"), opMsgRequest); + request = CountCommandRequest::parse(IDLParserErrorContext("count"), opMsgRequest); } catch (...) { return exceptionToStatus(); } @@ -225,7 +225,7 @@ public: CurOpFailpointHelpers::waitWhileFailPointEnabled( &hangBeforeCollectionCount, opCtx, "hangBeforeCollectionCount", []() {}, nss); - auto request = CountCommand::parse(IDLParserErrorContext("count"), cmdObj); + auto request = CountCommandRequest::parse(IDLParserErrorContext("count"), cmdObj); // Check whether we are allowed to read from this node after acquiring our locks. auto replCoord = repl::ReplicationCoordinator::get(opCtx); diff --git a/src/mongo/db/commands/current_op.cpp b/src/mongo/db/commands/current_op.cpp index 6c6e542bdfc..1fd9200f9da 100644 --- a/src/mongo/db/commands/current_op.cpp +++ b/src/mongo/db/commands/current_op.cpp @@ -66,8 +66,8 @@ public: return Status(ErrorCodes::Unauthorized, "Unauthorized"); } - virtual StatusWith<CursorResponse> runAggregation(OperationContext* opCtx, - const AggregateCommand& request) const final { + virtual StatusWith<CursorResponse> runAggregation( + OperationContext* opCtx, const AggregateCommandRequest& request) const final { auto aggCmdObj = aggregation_request_helper::serializeToCommandObj(request); rpc::OpMsgReplyBuilder replyBuilder; diff --git a/src/mongo/db/commands/current_op_common.cpp b/src/mongo/db/commands/current_op_common.cpp index ccec9928924..d44d5c066d2 100644 --- a/src/mongo/db/commands/current_op_common.cpp +++ b/src/mongo/db/commands/current_op_common.cpp @@ -109,9 +109,9 @@ bool CurrentOpCommandBase::run(OperationContext* opCtx, pipeline.push_back(groupBuilder.obj()); - // Pipeline is complete; create an AggregateCommand for $currentOp. - const AggregateCommand request(NamespaceString::makeCollectionlessAggregateNSS("admin"), - std::move(pipeline)); + // Pipeline is complete; create an AggregateCommandRequest for $currentOp. + const AggregateCommandRequest request(NamespaceString::makeCollectionlessAggregateNSS("admin"), + std::move(pipeline)); // Run the pipeline and obtain a CursorResponse. auto aggResults = uassertStatusOK(runAggregation(opCtx, request)); diff --git a/src/mongo/db/commands/current_op_common.h b/src/mongo/db/commands/current_op_common.h index 8331f44d990..31ff95ac764 100644 --- a/src/mongo/db/commands/current_op_common.h +++ b/src/mongo/db/commands/current_op_common.h @@ -70,11 +70,11 @@ private: virtual void modifyPipeline(std::vector<BSONObj>* pipeline) const {}; /** - * Runs the aggregation specified by the supplied AggregateCommand, returning a CursorResponse - * if successful or a Status containing the error otherwise. + * Runs the aggregation specified by the supplied AggregateCommandRequest, returning a + * CursorResponse if successful or a Status containing the error otherwise. */ - virtual StatusWith<CursorResponse> runAggregation(OperationContext* opCtx, - const AggregateCommand& request) const = 0; + virtual StatusWith<CursorResponse> runAggregation( + OperationContext* opCtx, const AggregateCommandRequest& request) const = 0; /** * Allows overriders to optionally write additional data to the response object before the final diff --git a/src/mongo/db/commands/dbcommands_d.cpp b/src/mongo/db/commands/dbcommands_d.cpp index 6b71f0cc36f..4184a0af2ce 100644 --- a/src/mongo/db/commands/dbcommands_d.cpp +++ b/src/mongo/db/commands/dbcommands_d.cpp @@ -282,7 +282,7 @@ public: BSONObj sort = BSON("files_id" << 1 << "n" << 1); return writeConflictRetry(opCtx, "filemd5", dbname, [&] { - auto findCommand = std::make_unique<FindCommand>(nss); + auto findCommand = std::make_unique<FindCommandRequest>(nss); findCommand->setFilter(query.getOwned()); findCommand->setSort(sort.getOwned()); diff --git a/src/mongo/db/commands/explain_cmd.cpp b/src/mongo/db/commands/explain_cmd.cpp index 8f754f6e1d4..318029aef75 100644 --- a/src/mongo/db/commands/explain_cmd.cpp +++ b/src/mongo/db/commands/explain_cmd.cpp @@ -154,8 +154,8 @@ std::unique_ptr<CommandInvocation> CmdExplain::parse(OperationContext* opCtx, CommandHelpers::uassertNoDocumentSequences(getName(), request); // To enforce API versioning - auto cmdObj = ExplainCmd::parse( - IDLParserErrorContext(ExplainCmd::kCommandName, + auto cmdObj = ExplainCommandRequest::parse( + IDLParserErrorContext(ExplainCommandRequest::kCommandName, APIParameters::get(opCtx).getAPIStrict().value_or(false)), request.body); std::string dbname = cmdObj.getDbName().toString(); diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp index 1c5da6a7c98..95968d013b4 100644 --- a/src/mongo/db/commands/find_and_modify.cpp +++ b/src/mongo/db/commands/find_and_modify.cpp @@ -87,7 +87,7 @@ MONGO_FAIL_POINT_DEFINE(hangBeforeFindAndModifyPerformsUpdate); * boost::none if no matching document to update/remove was found. If the operation failed, throws. */ boost::optional<BSONObj> advanceExecutor(OperationContext* opCtx, - const write_ops::FindAndModifyCommand& request, + const write_ops::FindAndModifyCommandRequest& request, PlanExecutor* exec, bool isRemove) { BSONObj value; @@ -117,7 +117,7 @@ boost::optional<BSONObj> advanceExecutor(OperationContext* opCtx, return boost::none; } -void validate(const write_ops::FindAndModifyCommand& request) { +void validate(const write_ops::FindAndModifyCommandRequest& request) { uassert(ErrorCodes::FailedToParse, "Either an update or remove=true must be specified", request.getRemove().value_or(false) || request.getUpdate()); @@ -148,7 +148,7 @@ void validate(const write_ops::FindAndModifyCommand& request) { } void makeUpdateRequest(OperationContext* opCtx, - const write_ops::FindAndModifyCommand& request, + const write_ops::FindAndModifyCommandRequest& request, boost::optional<ExplainOptions::Verbosity> explain, UpdateRequest* requestOut) { requestOut->setQuery(request.getQuery()); @@ -174,7 +174,7 @@ void makeUpdateRequest(OperationContext* opCtx, } void makeDeleteRequest(OperationContext* opCtx, - const write_ops::FindAndModifyCommand& request, + const write_ops::FindAndModifyCommandRequest& request, bool explain, DeleteRequest* requestOut) { requestOut->setQuery(request.getQuery()); @@ -194,9 +194,9 @@ void makeDeleteRequest(OperationContext* opCtx, : PlanYieldPolicy::YieldPolicy::YIELD_AUTO); } -write_ops::FindAndModifyReply buildResponse(const PlanExecutor* exec, - bool isRemove, - const boost::optional<BSONObj>& value) { +write_ops::FindAndModifyCommandReply buildResponse(const PlanExecutor* exec, + bool isRemove, + const boost::optional<BSONObj>& value) { write_ops::FindAndModifyLastError lastError; if (isRemove) { lastError.setNumDocs(value ? 1 : 0); @@ -212,7 +212,7 @@ write_ops::FindAndModifyReply buildResponse(const PlanExecutor* exec, } } - write_ops::FindAndModifyReply result; + write_ops::FindAndModifyCommandReply result; result.setLastErrorObject(std::move(lastError)); result.setValue(value); return result; @@ -304,19 +304,19 @@ public: void appendMirrorableRequest(BSONObjBuilder* bob) const final; private: - static write_ops::FindAndModifyReply writeConflictRetryRemove( + static write_ops::FindAndModifyCommandReply writeConflictRetryRemove( OperationContext* opCtx, const NamespaceString& nsString, - const write_ops::FindAndModifyCommand& request, + const write_ops::FindAndModifyCommandRequest& request, int stmtId, CurOp* curOp, OpDebug* opDebug, bool inTransaction); - static write_ops::FindAndModifyReply writeConflictRetryUpsert( + static write_ops::FindAndModifyCommandReply writeConflictRetryUpsert( OperationContext* opCtx, const NamespaceString& nsString, - const write_ops::FindAndModifyCommand& request, + const write_ops::FindAndModifyCommandRequest& request, CurOp* curOp, OpDebug* opDebug, bool inTransaction, @@ -330,10 +330,10 @@ private: UpdateMetrics CmdFindAndModify::_updateMetrics{"findAndModify"}; -write_ops::FindAndModifyReply CmdFindAndModify::Invocation::writeConflictRetryRemove( +write_ops::FindAndModifyCommandReply CmdFindAndModify::Invocation::writeConflictRetryRemove( OperationContext* opCtx, const NamespaceString& nsString, - const write_ops::FindAndModifyCommand& request, + const write_ops::FindAndModifyCommandRequest& request, int stmtId, CurOp* curOp, OpDebug* const opDebug, @@ -406,10 +406,10 @@ write_ops::FindAndModifyReply CmdFindAndModify::Invocation::writeConflictRetryRe return buildResponse(exec.get(), request.getRemove().value_or(false), docFound); } -write_ops::FindAndModifyReply CmdFindAndModify::Invocation::writeConflictRetryUpsert( +write_ops::FindAndModifyCommandReply CmdFindAndModify::Invocation::writeConflictRetryUpsert( OperationContext* opCtx, const NamespaceString& nsString, - const write_ops::FindAndModifyCommand& request, + const write_ops::FindAndModifyCommandRequest& request, CurOp* curOp, OpDebug* opDebug, bool inTransaction, @@ -597,7 +597,8 @@ void CmdFindAndModify::Invocation::explain(OperationContext* opCtx, } } -write_ops::FindAndModifyReply CmdFindAndModify::Invocation::typedRun(OperationContext* opCtx) { +write_ops::FindAndModifyCommandReply CmdFindAndModify::Invocation::typedRun( + OperationContext* opCtx) { const auto& req = request(); validate(req); @@ -708,16 +709,17 @@ write_ops::FindAndModifyReply CmdFindAndModify::Invocation::typedRun(OperationCo void CmdFindAndModify::Invocation::appendMirrorableRequest(BSONObjBuilder* bob) const { const auto& req = request(); - bob->append(FindCommand::kCommandName, req.getNamespace().coll()); + bob->append(FindCommandRequest::kCommandName, req.getNamespace().coll()); if (!req.getQuery().isEmpty()) { - bob->append(FindCommand::kFilterFieldName, req.getQuery()); + bob->append(FindCommandRequest::kFilterFieldName, req.getQuery()); } if (req.getSort()) { - bob->append(write_ops::FindAndModifyCommand::kSortFieldName, *req.getSort()); + bob->append(write_ops::FindAndModifyCommandRequest::kSortFieldName, *req.getSort()); } if (req.getCollation()) { - bob->append(write_ops::FindAndModifyCommand::kCollationFieldName, *req.getCollation()); + bob->append(write_ops::FindAndModifyCommandRequest::kCollationFieldName, + *req.getCollation()); } // Prevent the find from returning multiple documents since we can diff --git a/src/mongo/db/commands/find_cmd.cpp b/src/mongo/db/commands/find_cmd.cpp index f9b858d0b49..669f133fab5 100644 --- a/src/mongo/db/commands/find_cmd.cpp +++ b/src/mongo/db/commands/find_cmd.cpp @@ -65,11 +65,11 @@ namespace { const auto kTermField = "term"_sd; -// Parses the command object to a FindCommand. If the client request did not specify any runtime -// constants, make them available to the query here. -std::unique_ptr<FindCommand> parseCmdObjectToFindCommand(OperationContext* opCtx, - NamespaceString nss, - BSONObj cmdObj) { +// Parses the command object to a FindCommandRequest. If the client request did not specify any +// runtime constants, make them available to the query here. +std::unique_ptr<FindCommandRequest> parseCmdObjectToFindCommandRequest(OperationContext* opCtx, + NamespaceString nss, + BSONObj cmdObj) { auto findCommand = query_request_helper::makeFromFindCommand( std::move(cmdObj), std::move(nss), @@ -82,7 +82,7 @@ std::unique_ptr<FindCommand> parseCmdObjectToFindCommand(OperationContext* opCtx boost::intrusive_ptr<ExpressionContext> makeExpressionContext( OperationContext* opCtx, - const FindCommand& findCommand, + const FindCommandRequest& findCommand, boost::optional<ExplainOptions::Verbosity> verbosity) { std::unique_ptr<CollatorInterface> collator; if (!findCommand.getCollation().isEmpty()) { @@ -248,10 +248,10 @@ public: AutoGetCollectionViewMode::kViewsPermitted); const auto nss = ctx->getNss(); - // Parse the command BSON to a FindCommand. - auto findCommand = parseCmdObjectToFindCommand(opCtx, nss, _request.body); + // Parse the command BSON to a FindCommandRequest. + auto findCommand = parseCmdObjectToFindCommandRequest(opCtx, nss, _request.body); - // Finish the parsing step by using the FindCommand to create a CanonicalQuery. + // Finish the parsing step by using the FindCommandRequest to create a CanonicalQuery. const ExtensionsCallbackReal extensionsCallback(opCtx, &nss); auto expCtx = makeExpressionContext(opCtx, *findCommand, verbosity); const bool isExplain = true; @@ -269,7 +269,7 @@ public: // Convert the find command into an aggregation using $match (and other stages, as // necessary), if possible. - const auto& findCommand = cq->getFindCommand(); + const auto& findCommand = cq->getFindCommandRequest(); auto viewAggregationCommand = uassertStatusOK(query_request_helper::asAggregationCommand(findCommand)); @@ -329,12 +329,13 @@ public: const BSONObj& cmdObj = _request.body; - // Parse the command BSON to a FindCommand. Pass in the parsedNss in case cmdObj does - // not have a UUID. + // Parse the command BSON to a FindCommandRequest. Pass in the parsedNss in case cmdObj + // does not have a UUID. auto parsedNss = NamespaceString{CommandHelpers::parseNsFromCommand(_dbName, cmdObj)}; const bool isExplain = false; const bool isOplogNss = (parsedNss == NamespaceString::kRsOplogNamespace); - auto findCommand = parseCmdObjectToFindCommand(opCtx, std::move(parsedNss), cmdObj); + auto findCommand = + parseCmdObjectToFindCommandRequest(opCtx, std::move(parsedNss), cmdObj); // Only allow speculative majority for internal commands that specify the correct flag. uassert(ErrorCodes::ReadConcernMajorityNotEnabled, @@ -402,7 +403,7 @@ public: const int ntoskip = -1; beginQueryOp(opCtx, nss, _request.body, ntoreturn, ntoskip); - // Finish the parsing step by using the FindCommand to create a CanonicalQuery. + // Finish the parsing step by using the FindCommandRequest to create a CanonicalQuery. const ExtensionsCallbackReal extensionsCallback(opCtx, &nss); auto expCtx = makeExpressionContext(opCtx, *findCommand, boost::none /* verbosity */); auto cq = uassertStatusOK( @@ -419,7 +420,7 @@ public: // Convert the find command into an aggregation using $match (and other stages, as // necessary), if possible. - const auto& findCommand = cq->getFindCommand(); + const auto& findCommand = cq->getFindCommandRequest(); auto viewAggregationCommand = uassertStatusOK(query_request_helper::asAggregationCommand(findCommand)); @@ -437,7 +438,7 @@ public: const auto& collection = ctx->getCollection(); - if (cq->getFindCommand().getReadOnce()) { + if (cq->getFindCommandRequest().getReadOnce()) { // The readOnce option causes any storage-layer cursors created during plan // execution to assume read data will not be needed again and need not be cached. opCtx->recoveryUnit()->setReadOnce(true); @@ -466,7 +467,8 @@ public: FindCommon::waitInFindBeforeMakingBatch(opCtx, *exec->getCanonicalQuery()); - const FindCommand& originalFC = exec->getCanonicalQuery()->getFindCommand(); + const FindCommandRequest& originalFC = + exec->getCanonicalQuery()->getFindCommandRequest(); // Stream query results, adding them to a BSONArray as we go. CursorResponseBuilder::Options options; diff --git a/src/mongo/db/commands/getmore_cmd.cpp b/src/mongo/db/commands/getmore_cmd.cpp index e5a68078df4..15f42dfe249 100644 --- a/src/mongo/db/commands/getmore_cmd.cpp +++ b/src/mongo/db/commands/getmore_cmd.cpp @@ -188,7 +188,7 @@ void applyCursorReadConcern(OperationContext* opCtx, repl::ReadConcernArgs rcArg */ void setUpOperationDeadline(OperationContext* opCtx, const ClientCursor& cursor, - const GetMoreCommand& cmd, + const GetMoreCommandRequest& cmd, bool disableAwaitDataFailpointActive) { // We assume that cursors created through a DBDirectClient are always used from their @@ -214,7 +214,7 @@ void setUpOperationDeadline(OperationContext* opCtx, */ void setUpOperationContextStateForGetMore(OperationContext* opCtx, const ClientCursor& cursor, - const GetMoreCommand& cmd, + const GetMoreCommandRequest& cmd, bool disableAwaitDataFailpointActive) { applyCursorReadConcern(opCtx, cursor.getReadConcernArgs()); opCtx->setWriteConcern(cursor.getWriteConcernOptions()); @@ -254,7 +254,8 @@ public: class Invocation final : public CommandInvocation { public: Invocation(Command* cmd, const OpMsgRequest& request) - : CommandInvocation(cmd), _cmd(GetMoreCommand::parse({"getMore"}, request.body)) { + : CommandInvocation(cmd), + _cmd(GetMoreCommandRequest::parse({"getMore"}, request.body)) { NamespaceString nss(_cmd.getDbName(), _cmd.getCollection()); uassert(ErrorCodes::InvalidNamespace, str::stream() << "Invalid namespace for getMore: " << nss.ns(), @@ -303,7 +304,7 @@ public: */ bool generateBatch(OperationContext* opCtx, ClientCursor* cursor, - const GetMoreCommand& cmd, + const GetMoreCommandRequest& cmd, const bool isTailable, CursorResponseBuilder* nextBatch, std::uint64_t* numResults, @@ -520,7 +521,7 @@ public: PlanExecutor* exec = cursorPin->getExecutor(); const auto* cq = exec->getCanonicalQuery(); - if (cq && cq->getFindCommand().getReadOnce()) { + if (cq && cq->getFindCommandRequest().getReadOnce()) { // The readOnce option causes any storage-layer cursors created during plan // execution to assume read data will not be needed again and need not be cached. opCtx->recoveryUnit()->setReadOnce(true); @@ -754,7 +755,7 @@ public: CursorGetMoreReply::parse({"CursorGetMoreReply"}, ret.removeField("ok")); } - const GetMoreCommand _cmd; + const GetMoreCommandRequest _cmd; }; bool maintenanceOk() const override { diff --git a/src/mongo/db/commands/index_filter_commands.cpp b/src/mongo/db/commands/index_filter_commands.cpp index 286b3595056..bc2498acd60 100644 --- a/src/mongo/db/commands/index_filter_commands.cpp +++ b/src/mongo/db/commands/index_filter_commands.cpp @@ -303,7 +303,7 @@ Status ClearFilters::clear(OperationContext* opCtx, AllowedIndexEntry entry = *i; // Create canonical query. - auto findCommand = std::make_unique<FindCommand>(nss); + auto findCommand = std::make_unique<FindCommandRequest>(nss); findCommand->setFilter(entry.query); findCommand->setSort(entry.sort); findCommand->setProjection(entry.projection); diff --git a/src/mongo/db/commands/index_filter_commands_test.cpp b/src/mongo/db/commands/index_filter_commands_test.cpp index 760ccfbf9d2..1293a0d0db3 100644 --- a/src/mongo/db/commands/index_filter_commands_test.cpp +++ b/src/mongo/db/commands/index_filter_commands_test.cpp @@ -130,7 +130,7 @@ void addQueryShapeToPlanCache(OperationContext* opCtx, const char* projectionStr, const char* collationStr) { // Create canonical query. - auto findCommand = std::make_unique<FindCommand>(nss); + auto findCommand = std::make_unique<FindCommandRequest>(nss); findCommand->setFilter(fromjson(queryStr)); findCommand->setSort(fromjson(sortStr)); findCommand->setProjection(fromjson(projectionStr)); @@ -161,7 +161,7 @@ bool planCacheContains(OperationContext* opCtx, const char* collationStr) { // Create canonical query. - auto findCommand = std::make_unique<FindCommand>(nss); + auto findCommand = std::make_unique<FindCommandRequest>(nss); findCommand->setFilter(fromjson(queryStr)); findCommand->setSort(fromjson(sortStr)); findCommand->setProjection(fromjson(projectionStr)); @@ -181,7 +181,7 @@ bool planCacheContains(OperationContext* opCtx, // Canonicalize the query shape stored in the cache entry in order to get the plan cache // key. - auto findCommand = std::make_unique<FindCommand>(nss); + auto findCommand = std::make_unique<FindCommandRequest>(nss); findCommand->setFilter(createdFromQuery.filter); findCommand->setSort(createdFromQuery.sort); findCommand->setProjection(createdFromQuery.projection); diff --git a/src/mongo/db/commands/killcursors_common.h b/src/mongo/db/commands/killcursors_common.h index 900038fad59..4d906b66777 100644 --- a/src/mongo/db/commands/killcursors_common.h +++ b/src/mongo/db/commands/killcursors_common.h @@ -104,7 +104,7 @@ public: } } - KillCursorsReply typedRun(OperationContext* opCtx) final { + KillCursorsCommandReply typedRun(OperationContext* opCtx) final { auto killCursorsRequest = this->request(); std::vector<CursorId> cursorsKilled; @@ -122,7 +122,7 @@ public: } } - KillCursorsReply reply; + KillCursorsCommandReply reply; reply.setCursorsKilled(std::move(cursorsKilled)); reply.setCursorsNotFound(std::move(cursorsNotFound)); reply.setCursorsAlive(std::move(cursorsAlive)); diff --git a/src/mongo/db/commands/map_reduce.idl b/src/mongo/db/commands/map_reduce.idl index b2e8cbf22f6..cf74f858a39 100644 --- a/src/mongo/db/commands/map_reduce.idl +++ b/src/mongo/db/commands/map_reduce.idl @@ -68,9 +68,10 @@ types: commands: - MapReduce: + mapReduce: description: "The MapReduce command." - command_name: "MapReduce" + command_name: "mapReduce" + cpp_name: MapReduceCommandRequest namespace: concatenate_with_db api_version: "" strict: true diff --git a/src/mongo/db/commands/map_reduce_agg.cpp b/src/mongo/db/commands/map_reduce_agg.cpp index 557c199b788..d86b4679814 100644 --- a/src/mongo/db/commands/map_reduce_agg.cpp +++ b/src/mongo/db/commands/map_reduce_agg.cpp @@ -59,7 +59,7 @@ namespace mongo::map_reduce_agg { namespace { auto makeExpressionContext(OperationContext* opCtx, - const MapReduce& parsedMr, + const MapReduceCommandRequest& parsedMr, boost::optional<ExplainOptions::Verbosity> verbosity) { // AutoGetCollectionForReadCommand will throw if the sharding version for this connection is // out of date. @@ -123,7 +123,7 @@ bool runAggregationMapReduce(OperationContext* opCtx, Timer cmdTimer; - auto parsedMr = MapReduce::parse(IDLParserErrorContext("MapReduce"), cmd); + auto parsedMr = MapReduceCommandRequest::parse(IDLParserErrorContext("mapReduce"), cmd); auto expCtx = makeExpressionContext(opCtx, parsedMr, verbosity); auto runnablePipeline = [&]() { auto pipeline = map_reduce_common::translateFromMR(parsedMr, expCtx); diff --git a/src/mongo/db/commands/map_reduce_agg_test.cpp b/src/mongo/db/commands/map_reduce_agg_test.cpp index a5859d040ca..52ee67c416b 100644 --- a/src/mongo/db/commands/map_reduce_agg_test.cpp +++ b/src/mongo/db/commands/map_reduce_agg_test.cpp @@ -70,10 +70,11 @@ constexpr auto finalizeJavascript = "finalize!"_sd; TEST(MapReduceAggTest, testBasicTranslate) { auto nss = NamespaceString{"db", "coll"}; - auto mr = MapReduce{nss, - MapReduceJavascriptCode{mapJavascript.toString()}, - MapReduceJavascriptCode{reduceJavascript.toString()}, - MapReduceOutOptions{boost::none, "", OutputType::InMemory, false}}; + auto mr = + MapReduceCommandRequest{nss, + MapReduceJavascriptCode{mapJavascript.toString()}, + MapReduceJavascriptCode{reduceJavascript.toString()}, + MapReduceOutOptions{boost::none, "", OutputType::InMemory, false}}; boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest(nss)); auto pipeline = map_reduce_common::translateFromMR(mr, expCtx); auto& sources = pipeline->getSources(); @@ -86,10 +87,11 @@ TEST(MapReduceAggTest, testBasicTranslate) { TEST(MapReduceAggTest, testSortWithoutLimit) { auto nss = NamespaceString{"db", "coll"}; - auto mr = MapReduce{nss, - MapReduceJavascriptCode{mapJavascript.toString()}, - MapReduceJavascriptCode{reduceJavascript.toString()}, - MapReduceOutOptions{boost::none, "", OutputType::InMemory, false}}; + auto mr = + MapReduceCommandRequest{nss, + MapReduceJavascriptCode{mapJavascript.toString()}, + MapReduceJavascriptCode{reduceJavascript.toString()}, + MapReduceOutOptions{boost::none, "", OutputType::InMemory, false}}; mr.setSort(BSON("foo" << 1)); boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest(nss)); auto pipeline = map_reduce_common::translateFromMR(mr, expCtx); @@ -106,10 +108,11 @@ TEST(MapReduceAggTest, testSortWithoutLimit) { TEST(MapReduceAggTest, testSortWithLimit) { auto nss = NamespaceString{"db", "coll"}; - auto mr = MapReduce{nss, - MapReduceJavascriptCode{mapJavascript.toString()}, - MapReduceJavascriptCode{reduceJavascript.toString()}, - MapReduceOutOptions{boost::none, "", OutputType::InMemory, false}}; + auto mr = + MapReduceCommandRequest{nss, + MapReduceJavascriptCode{mapJavascript.toString()}, + MapReduceJavascriptCode{reduceJavascript.toString()}, + MapReduceOutOptions{boost::none, "", OutputType::InMemory, false}}; mr.setSort(BSON("foo" << 1)); mr.setLimit(23); boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest(nss)); @@ -130,10 +133,11 @@ TEST(MapReduceAggTest, testSortWithLimit) { TEST(MapReduceAggTest, testLimitNoSort) { auto nss = NamespaceString{"db", "coll"}; - auto mr = MapReduce{nss, - MapReduceJavascriptCode{mapJavascript.toString()}, - MapReduceJavascriptCode{reduceJavascript.toString()}, - MapReduceOutOptions{boost::none, "", OutputType::InMemory, false}}; + auto mr = + MapReduceCommandRequest{nss, + MapReduceJavascriptCode{mapJavascript.toString()}, + MapReduceJavascriptCode{reduceJavascript.toString()}, + MapReduceOutOptions{boost::none, "", OutputType::InMemory, false}}; mr.setLimit(23); boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest(nss)); auto pipeline = map_reduce_common::translateFromMR(mr, expCtx); @@ -150,7 +154,7 @@ TEST(MapReduceAggTest, testLimitNoSort) { TEST(MapReduceAggTest, testFeatureLadenTranslate) { auto nss = NamespaceString{"db", "coll"}; - auto mr = MapReduce{ + auto mr = MapReduceCommandRequest{ nss, MapReduceJavascriptCode{mapJavascript.toString()}, MapReduceJavascriptCode{reduceJavascript.toString()}, @@ -176,7 +180,7 @@ TEST(MapReduceAggTest, testFeatureLadenTranslate) { TEST(MapReduceAggTest, testOutMergeTranslate) { auto nss = NamespaceString{"db", "coll"}; - auto mr = MapReduce{ + auto mr = MapReduceCommandRequest{ nss, MapReduceJavascriptCode{mapJavascript.toString()}, MapReduceJavascriptCode{reduceJavascript.toString()}, @@ -196,7 +200,7 @@ TEST(MapReduceAggTest, testOutMergeTranslate) { TEST(MapReduceAggTest, testOutReduceTranslate) { auto nss = NamespaceString{"db", "coll"}; - auto mr = MapReduce{ + auto mr = MapReduceCommandRequest{ nss, MapReduceJavascriptCode{mapJavascript.toString()}, MapReduceJavascriptCode{reduceJavascript.toString()}, @@ -218,7 +222,7 @@ TEST(MapReduceAggTest, testOutReduceTranslate) { TEST(MapReduceAggTest, testOutSameCollection) { auto nss = NamespaceString{"db", "coll"}; - auto mr = MapReduce{ + auto mr = MapReduceCommandRequest{ nss, MapReduceJavascriptCode{mapJavascript.toString()}, MapReduceJavascriptCode{reduceJavascript.toString()}, @@ -236,7 +240,7 @@ TEST(MapReduceAggTest, testOutSameCollection) { TEST(MapReduceAggTest, testSourceDestinationCollectionsEqualMergeDoesNotFail) { auto nss = NamespaceString{"db", "coll"}; - auto mr = MapReduce{ + auto mr = MapReduceCommandRequest{ nss, MapReduceJavascriptCode{mapJavascript.toString()}, MapReduceJavascriptCode{reduceJavascript.toString()}, @@ -247,7 +251,7 @@ TEST(MapReduceAggTest, testSourceDestinationCollectionsEqualMergeDoesNotFail) { TEST(MapReduceAggTest, testSourceDestinationCollectionsNotEqualMergeDoesNotFail) { auto nss = NamespaceString{"db", "coll"}; - auto mr = MapReduce{ + auto mr = MapReduceCommandRequest{ nss, MapReduceJavascriptCode{mapJavascript.toString()}, MapReduceJavascriptCode{reduceJavascript.toString()}, @@ -258,7 +262,7 @@ TEST(MapReduceAggTest, testSourceDestinationCollectionsNotEqualMergeDoesNotFail) TEST(MapReduceAggTest, testShardedTrueWithReplaceActionIsNotAllowed) { auto nss = NamespaceString{"db", "coll"}; - auto mr = MapReduce{ + auto mr = MapReduceCommandRequest{ nss, MapReduceJavascriptCode{mapJavascript.toString()}, MapReduceJavascriptCode{reduceJavascript.toString()}, @@ -272,7 +276,7 @@ TEST(MapReduceAggTest, testErrorMessagesTranslated) { // Verifies that agg specific error messages are translated to be mapReduce specific. auto nss = NamespaceString{"db", "coll1"}; - auto mr = MapReduce{ + auto mr = MapReduceCommandRequest{ nss, MapReduceJavascriptCode{mapJavascript.toString()}, MapReduceJavascriptCode{reduceJavascript.toString()}, diff --git a/src/mongo/db/commands/map_reduce_parse_test.cpp b/src/mongo/db/commands/map_reduce_parse_test.cpp index fc7f7e1ae9d..f89040dbf2e 100644 --- a/src/mongo/db/commands/map_reduce_parse_test.cpp +++ b/src/mongo/db/commands/map_reduce_parse_test.cpp @@ -50,201 +50,211 @@ constexpr auto finalizeJavascript = "finalize!"_sd; TEST(MapReduceParseTest, failedParse) { auto ctx = IDLParserErrorContext("mapReduce"); // Missing fields. - ASSERT_THROWS(MapReduce::parse(ctx, - BSON("" - << "" - << "$db" - << "db")), - DBException); - ASSERT_THROWS(MapReduce::parse(ctx, - BSON("mapReduce" - << "foo" - << "$db" - << "db")), + ASSERT_THROWS(MapReduceCommandRequest::parse(ctx, + BSON("" + << "" + << "$db" + << "db")), DBException); - ASSERT_THROWS(MapReduce::parse(ctx, - BSON("map" << mapJavascript << "reduce" << reduceJavascript - << "out" << BSON("inline" << 1) << "$db" - << "db")), + ASSERT_THROWS(MapReduceCommandRequest::parse(ctx, + BSON("mapReduce" + << "foo" + << "$db" + << "db")), DBException); + ASSERT_THROWS( + MapReduceCommandRequest::parse(ctx, + BSON("map" << mapJavascript << "reduce" << reduceJavascript + << "out" << BSON("inline" << 1) << "$db" + << "db")), + DBException); // Extra fields. - ASSERT_THROWS(MapReduce::parse(ctx, - BSON("mapReduce" - << "theSource" - << "map" << mapJavascript << "reduce" << reduceJavascript - << "out" << BSON("inline" << 1) << "alloy" - << "chromium steel" - << "$db" - << "db")), + ASSERT_THROWS(MapReduceCommandRequest::parse(ctx, + BSON("mapReduce" + << "theSource" + << "map" << mapJavascript << "reduce" + << reduceJavascript << "out" + << BSON("inline" << 1) << "alloy" + << "chromium steel" + << "$db" + << "db")), DBException); - ASSERT_THROWS(MapReduce::parse(ctx, - BSON("mapReduce" - << "theSource" - << "map" << mapJavascript << "reduce" << reduceJavascript - << "out" << BSON("inline" << 1 << "notinline" << 0) << "$db" - << "db")), + ASSERT_THROWS(MapReduceCommandRequest::parse( + ctx, + BSON("mapReduce" + << "theSource" + << "map" << mapJavascript << "reduce" << reduceJavascript << "out" + << BSON("inline" << 1 << "notinline" << 0) << "$db" + << "db")), DBException); } TEST(MapReduceParseTest, failsToParseCodeWithScope) { auto ctx = IDLParserErrorContext("mapReduce"); - ASSERT_THROWS(MapReduce::parse(ctx, - BSON("mapReduce" - << "theSource" - << "map" << mapJavascript << "reduce" - << BSONCodeWScope("var x = 3", BSONObj()) << "out" - << BSON("inline" << 1) << "$db" - << "db")), + ASSERT_THROWS(MapReduceCommandRequest::parse(ctx, + BSON("mapReduce" + << "theSource" + << "map" << mapJavascript << "reduce" + << BSONCodeWScope("var x = 3", BSONObj()) + << "out" << BSON("inline" << 1) << "$db" + << "db")), + DBException); + ASSERT_THROWS(MapReduceCommandRequest::parse( + ctx, + BSON("mapReduce" + << "theSource" + << "map" << BSONCodeWScope("var x = 3", BSONObj()) << "reduce" + << reduceJavascript << "out" << BSON("inline" << 1) << "$db" + << "db")), DBException); - ASSERT_THROWS( - MapReduce::parse(ctx, - BSON("mapReduce" - << "theSource" - << "map" << BSONCodeWScope("var x = 3", BSONObj()) << "reduce" - << reduceJavascript << "out" << BSON("inline" << 1) << "$db" - << "db")), - DBException); } TEST(MapReduceParseTest, parseOutputTypes) { auto ctx = IDLParserErrorContext("mapReduce"); - MapReduce::parse(ctx, - BSON("mapReduce" - << "theSource" - << "map" << mapJavascript << "reduce" << reduceJavascript << "out" - << BSON("inline" << 1) << "$db" - << "db")); - MapReduce::parse(ctx, - BSON("mapReduce" - << "theSource" - << "map" << mapJavascript << "reduce" << reduceJavascript << "out" - << "theSink" - << "$db" - << "db")); - MapReduce::parse(ctx, - BSON("mapReduce" - << "theSource" - << "map" << mapJavascript << "reduce" << reduceJavascript << "out" - << BSON("replace" - << "theSink" - << "db" - << "myDb") - << "$db" - << "db")); - MapReduce::parse(ctx, - BSON("mapReduce" - << "theSource" - << "map" << mapJavascript << "reduce" << reduceJavascript << "out" - << BSON("merge" - << "theSink") - << "$db" - << "db")); - MapReduce::parse(ctx, - BSON("mapReduce" - << "theSource" - << "map" << mapJavascript << "reduce" << reduceJavascript << "out" - << BSON("reduce" - << "theSink" - << "db" - << "myDb" - << "sharded" << true) - << "$db" - << "db")); + MapReduceCommandRequest::parse(ctx, + BSON("mapReduce" + << "theSource" + << "map" << mapJavascript << "reduce" << reduceJavascript + << "out" << BSON("inline" << 1) << "$db" + << "db")); + MapReduceCommandRequest::parse(ctx, + BSON("mapReduce" + << "theSource" + << "map" << mapJavascript << "reduce" << reduceJavascript + << "out" + << "theSink" + << "$db" + << "db")); + MapReduceCommandRequest::parse(ctx, + BSON("mapReduce" + << "theSource" + << "map" << mapJavascript << "reduce" << reduceJavascript + << "out" + << BSON("replace" + << "theSink" + << "db" + << "myDb") + << "$db" + << "db")); + MapReduceCommandRequest::parse(ctx, + BSON("mapReduce" + << "theSource" + << "map" << mapJavascript << "reduce" << reduceJavascript + << "out" + << BSON("merge" + << "theSink") + << "$db" + << "db")); + MapReduceCommandRequest::parse(ctx, + BSON("mapReduce" + << "theSource" + << "map" << mapJavascript << "reduce" << reduceJavascript + << "out" + << BSON("reduce" + << "theSink" + << "db" + << "myDb" + << "sharded" << true) + << "$db" + << "db")); ASSERT(true); } TEST(MapReduceParseTest, parseAllOptionalFields) { auto ctx = IDLParserErrorContext("mapReduce"); - MapReduce::parse(ctx, - BSON("mapReduce" - << "theSource" - << "map" << mapJavascript << "reduce" << reduceJavascript << "out" - << BSON("inline" << 1) << "query" - << BSON("author" - << "dave") - << "sort" << BSON("bottlecaps" << 1) << "collation" - << BSON("locale" - << "zh@collation=pinyin") - << "limit" << 86 << "finalize" << finalizeJavascript << "scope" - << BSON("global" << initJavascript) << "verbose" << false - << "bypassDocumentValidation" << true << "writeConcern" - << BSON("w" << 1 << "j" << false << "wtimeout" << 1498) << "$db" - << "db")); + MapReduceCommandRequest::parse(ctx, + BSON("mapReduce" + << "theSource" + << "map" << mapJavascript << "reduce" << reduceJavascript + << "out" << BSON("inline" << 1) << "query" + << BSON("author" + << "dave") + << "sort" << BSON("bottlecaps" << 1) << "collation" + << BSON("locale" + << "zh@collation=pinyin") + << "limit" << 86 << "finalize" << finalizeJavascript + << "scope" << BSON("global" << initJavascript) << "verbose" + << false << "bypassDocumentValidation" << true + << "writeConcern" + << BSON("w" << 1 << "j" << false << "wtimeout" << 1498) + << "$db" + << "db")); } TEST(MapReduceParseTest, deprecatedOptions) { auto ctx = IDLParserErrorContext("mapReduce"); // jsMode can be true or false - MapReduce::parse(ctx, - BSON("mapReduce" - << "theSource" - << "map" << mapJavascript << "reduce" << reduceJavascript << "out" - << BSON("inline" << 1) << "$db" - << "db" - << "jsMode" << true)); - MapReduce::parse(ctx, - BSON("mapReduce" - << "theSource" - << "map" << mapJavascript << "reduce" << reduceJavascript << "out" - << BSON("inline" << 1) << "$db" - << "db" - << "jsMode" << false)); + MapReduceCommandRequest::parse(ctx, + BSON("mapReduce" + << "theSource" + << "map" << mapJavascript << "reduce" << reduceJavascript + << "out" << BSON("inline" << 1) << "$db" + << "db" + << "jsMode" << true)); + MapReduceCommandRequest::parse(ctx, + BSON("mapReduce" + << "theSource" + << "map" << mapJavascript << "reduce" << reduceJavascript + << "out" << BSON("inline" << 1) << "$db" + << "db" + << "jsMode" << false)); // nonAtomic can be true but not false - MapReduce::parse(ctx, - BSON("mapReduce" - << "theSource" - << "map" << mapJavascript << "reduce" << reduceJavascript << "out" - << BSON("reduce" - << "theSink" - << "db" - << "myDb" - << "nonAtomic" << true) - << "$db" - << "db")); - ASSERT_THROWS( - MapReduce::parse(ctx, - BSON("mapReduce" - << "theSource" - << "map" << mapJavascript << "reduce" << reduceJavascript << "out" - << BSON("reduce" - << "theSink" - << "db" - << "myDb" - << "nonAtomic" << false) - << "$db" - << "db")), - DBException); - ASSERT_THROWS( - MapReduce::parse(ctx, - BSON("mapReduce" - << "theSource" - << "map" << mapJavascript << "reduce" << reduceJavascript << "out" - << BSON("reduce" - << "theSink" - << "db" - << "myDb" - << "nonAtomic" << false) - << "$db" - << "db")), - DBException); + MapReduceCommandRequest::parse(ctx, + BSON("mapReduce" + << "theSource" + << "map" << mapJavascript << "reduce" << reduceJavascript + << "out" + << BSON("reduce" + << "theSink" + << "db" + << "myDb" + << "nonAtomic" << true) + << "$db" + << "db")); + ASSERT_THROWS(MapReduceCommandRequest::parse(ctx, + BSON("mapReduce" + << "theSource" + << "map" << mapJavascript << "reduce" + << reduceJavascript << "out" + << BSON("reduce" + << "theSink" + << "db" + << "myDb" + << "nonAtomic" << false) + << "$db" + << "db")), + DBException); + ASSERT_THROWS(MapReduceCommandRequest::parse(ctx, + BSON("mapReduce" + << "theSource" + << "map" << mapJavascript << "reduce" + << reduceJavascript << "out" + << BSON("reduce" + << "theSink" + << "db" + << "myDb" + << "nonAtomic" << false) + << "$db" + << "db")), + DBException); // out.sharded cannot be false - ASSERT_THROWS( - MapReduce::parse(ctx, - BSON("mapReduce" - << "theSource" - << "map" << mapJavascript << "reduce" << reduceJavascript << "out" - << BSON("reduce" - << "theSink" - << "db" - << "myDb" - << "sharded" << false) - << "$db" - << "db")), - DBException); + ASSERT_THROWS(MapReduceCommandRequest::parse(ctx, + BSON("mapReduce" + << "theSource" + << "map" << mapJavascript << "reduce" + << reduceJavascript << "out" + << BSON("reduce" + << "theSink" + << "db" + << "myDb" + << "sharded" << false) + << "$db" + << "db")), + DBException); } } // namespace diff --git a/src/mongo/db/commands/mr_common.cpp b/src/mongo/db/commands/mr_common.cpp index db1708a8715..46a0a0e6284 100644 --- a/src/mongo/db/commands/mr_common.cpp +++ b/src/mongo/db/commands/mr_common.cpp @@ -62,7 +62,7 @@ namespace { using namespace std::string_literals; -Status interpretTranslationError(DBException* ex, const MapReduce& parsedMr) { +Status interpretTranslationError(DBException* ex, const MapReduceCommandRequest& parsedMr) { auto status = ex->toStatus(); auto outOptions = parsedMr.getOutOptions(); auto outNss = NamespaceString{outOptions.getDatabaseName() ? *outOptions.getDatabaseName() @@ -377,7 +377,7 @@ bool mrSupportsWriteConcern(const BSONObj& cmd) { } std::unique_ptr<Pipeline, PipelineDeleter> translateFromMR( - MapReduce parsedMr, boost::intrusive_ptr<ExpressionContext> expCtx) { + MapReduceCommandRequest parsedMr, boost::intrusive_ptr<ExpressionContext> expCtx) { auto outNss = NamespaceString{parsedMr.getOutOptions().getDatabaseName() ? *parsedMr.getOutOptions().getDatabaseName() : parsedMr.getNamespace().db(), diff --git a/src/mongo/db/commands/mr_common.h b/src/mongo/db/commands/mr_common.h index 5e5c6dc55e7..344d819841f 100644 --- a/src/mongo/db/commands/mr_common.h +++ b/src/mongo/db/commands/mr_common.h @@ -66,6 +66,6 @@ bool mrSupportsWriteConcern(const BSONObj& cmd); * returned pipeline does *not* contain a $cursor stage and thus is not runnable. */ std::unique_ptr<Pipeline, PipelineDeleter> translateFromMR( - MapReduce parsedMr, boost::intrusive_ptr<ExpressionContext> expCtx); + MapReduceCommandRequest parsedMr, boost::intrusive_ptr<ExpressionContext> expCtx); } // namespace mongo::map_reduce_common diff --git a/src/mongo/db/commands/pipeline_command.cpp b/src/mongo/db/commands/pipeline_command.cpp index 507ac3a9301..beed004686e 100644 --- a/src/mongo/db/commands/pipeline_command.cpp +++ b/src/mongo/db/commands/pipeline_command.cpp @@ -101,7 +101,7 @@ public: public: Invocation(Command* cmd, const OpMsgRequest& request, - const AggregateCommand aggregationRequest, + const AggregateCommandRequest aggregationRequest, PrivilegeVector privileges) : CommandInvocation(cmd), _request(request), @@ -180,7 +180,7 @@ public: const OpMsgRequest& _request; const std::string _dbName; - const AggregateCommand _aggregationRequest; + const AggregateCommandRequest _aggregationRequest; const LiteParsedPipeline _liteParsedPipeline; const PrivilegeVector _privileges; }; @@ -201,7 +201,7 @@ public: } const AuthorizationContract* getAuthorizationContract() const final { - return &::mongo::AggregateCommand::kAuthorizationContract; + return &::mongo::AggregateCommandRequest::kAuthorizationContract; } } pipelineCmd; diff --git a/src/mongo/db/commands/plan_cache_clear_command.cpp b/src/mongo/db/commands/plan_cache_clear_command.cpp index 69c632bdc0a..b49a54fe416 100644 --- a/src/mongo/db/commands/plan_cache_clear_command.cpp +++ b/src/mongo/db/commands/plan_cache_clear_command.cpp @@ -86,9 +86,9 @@ Status clear(OperationContext* opCtx, "Query shape doesn't exist in PlanCache", "namespace"_attr = ns, "query"_attr = redact(cq->getQueryObj()), - "sort"_attr = cq->getFindCommand().getSort(), - "projection"_attr = cq->getFindCommand().getProjection(), - "collation"_attr = cq->getFindCommand().getCollation()); + "sort"_attr = cq->getFindCommandRequest().getSort(), + "projection"_attr = cq->getFindCommandRequest().getProjection(), + "collation"_attr = cq->getFindCommandRequest().getCollation()); return Status::OK(); } @@ -99,9 +99,9 @@ Status clear(OperationContext* opCtx, "Removed plan cache entry", "namespace"_attr = ns, "query"_attr = redact(cq->getQueryObj()), - "sort"_attr = cq->getFindCommand().getSort(), - "projection"_attr = cq->getFindCommand().getProjection(), - "collation"_attr = cq->getFindCommand().getCollation()); + "sort"_attr = cq->getFindCommandRequest().getSort(), + "projection"_attr = cq->getFindCommandRequest().getProjection(), + "collation"_attr = cq->getFindCommandRequest().getCollation()); return Status::OK(); } diff --git a/src/mongo/db/commands/plan_cache_commands.cpp b/src/mongo/db/commands/plan_cache_commands.cpp index 7ac23d5911b..9aa4ca30bb7 100644 --- a/src/mongo/db/commands/plan_cache_commands.cpp +++ b/src/mongo/db/commands/plan_cache_commands.cpp @@ -85,7 +85,7 @@ StatusWith<std::unique_ptr<CanonicalQuery>> canonicalize(OperationContext* opCtx } // Create canonical query - auto findCommand = std::make_unique<FindCommand>(NamespaceString{ns}); + auto findCommand = std::make_unique<FindCommandRequest>(NamespaceString{ns}); findCommand->setFilter(queryObj.getOwned()); findCommand->setSort(sortObj.getOwned()); findCommand->setProjection(projObj.getOwned()); diff --git a/src/mongo/db/commands/run_aggregate.cpp b/src/mongo/db/commands/run_aggregate.cpp index 96da0fda23a..7ba72ab079d 100644 --- a/src/mongo/db/commands/run_aggregate.cpp +++ b/src/mongo/db/commands/run_aggregate.cpp @@ -101,7 +101,7 @@ namespace { */ bool canOptimizeAwayPipeline(const Pipeline* pipeline, const PlanExecutor* exec, - const AggregateCommand& request, + const AggregateCommandRequest& request, bool hasGeoNearStage, bool hasChangeStreamStage) { return pipeline && exec && !hasGeoNearStage && !hasChangeStreamStage && @@ -121,7 +121,7 @@ bool handleCursorCommand(OperationContext* opCtx, boost::intrusive_ptr<ExpressionContext> expCtx, const NamespaceString& nsForCursor, std::vector<ClientCursor*> cursors, - const AggregateCommand& request, + const AggregateCommandRequest& request, const BSONObj& cmdObj, rpc::ReplyBuilderInterface* result) { invariant(!cursors.empty()); @@ -266,7 +266,7 @@ bool handleCursorCommand(OperationContext* opCtx, } StatusWith<StringMap<ExpressionContext::ResolvedNamespace>> resolveInvolvedNamespaces( - OperationContext* opCtx, const AggregateCommand& request) { + OperationContext* opCtx, const AggregateCommandRequest& request) { const LiteParsedPipeline liteParsedPipeline(request); const auto& pipelineInvolvedNamespaces = liteParsedPipeline.getInvolvedNamespaces(); @@ -418,7 +418,7 @@ Status collatorCompatibleWithPipeline(OperationContext* opCtx, // versioned. This can happen in the case where we are running in a cluster with a 4.4 mongoS, which // does not set any shard version on a $mergeCursors pipeline. void setIgnoredShardVersionForMergeCursors(OperationContext* opCtx, - const AggregateCommand& request) { + const AggregateCommandRequest& request) { auto isMergeCursors = request.getFromMongos() && request.getPipeline().size() > 0 && request.getPipeline().front().firstElementFieldNameStringData() == "$mergeCursors"_sd; if (isMergeCursors && !OperationShardingState::isOperationVersioned(opCtx)) { @@ -429,7 +429,7 @@ void setIgnoredShardVersionForMergeCursors(OperationContext* opCtx, boost::intrusive_ptr<ExpressionContext> makeExpressionContext( OperationContext* opCtx, - const AggregateCommand& request, + const AggregateCommandRequest& request, std::unique_ptr<CollatorInterface> collator, boost::optional<UUID> uuid) { setIgnoredShardVersionForMergeCursors(opCtx, request); @@ -489,7 +489,7 @@ void _adjustChangeStreamReadConcern(OperationContext* opCtx) { std::vector<std::unique_ptr<Pipeline, PipelineDeleter>> createExchangePipelinesIfNeeded( OperationContext* opCtx, boost::intrusive_ptr<ExpressionContext> expCtx, - const AggregateCommand& request, + const AggregateCommandRequest& request, std::unique_ptr<Pipeline, PipelineDeleter> pipeline, boost::optional<UUID> uuid) { std::vector<std::unique_ptr<Pipeline, PipelineDeleter>> pipelines; @@ -526,11 +526,11 @@ std::vector<std::unique_ptr<Pipeline, PipelineDeleter>> createExchangePipelinesI * Performs validations related to API versioning and time-series stages. * Throws UserAssertion if any of the validations fails * - validation of API versioning on each stage on the pipeline - * - validation of API versioning on 'AggregateCommand' request + * - validation of API versioning on 'AggregateCommandRequest' request * - validation of time-series related stages */ void performValidationChecks(const OperationContext* opCtx, - const AggregateCommand& request, + const AggregateCommandRequest& request, const LiteParsedPipeline& liteParsedPipeline) { liteParsedPipeline.validate(opCtx); aggregation_request_helper::validateRequestForAPIVersion(opCtx, request); @@ -540,7 +540,7 @@ void performValidationChecks(const OperationContext* opCtx, Status runAggregate(OperationContext* opCtx, const NamespaceString& nss, - const AggregateCommand& request, + const AggregateCommandRequest& request, const BSONObj& cmdObj, const PrivilegeVector& privileges, rpc::ReplyBuilderInterface* result) { @@ -549,7 +549,7 @@ Status runAggregate(OperationContext* opCtx, Status runAggregate(OperationContext* opCtx, const NamespaceString& origNss, - const AggregateCommand& request, + const AggregateCommandRequest& request, const LiteParsedPipeline& liteParsedPipeline, const BSONObj& cmdObj, const PrivilegeVector& privileges, @@ -592,7 +592,7 @@ Status runAggregate(OperationContext* opCtx, // If this is a change stream, perform special checks and change the execution namespace. if (liteParsedPipeline.hasChangeStream()) { uassert(4928900, - str::stream() << AggregateCommand::kCollectionUUIDFieldName + str::stream() << AggregateCommandRequest::kCollectionUUIDFieldName << " is not supported for a change stream", !request.getCollectionUUID()); @@ -629,7 +629,7 @@ Status runAggregate(OperationContext* opCtx, ctx.emplace(opCtx, nss, AutoGetCollectionViewMode::kViewsForbidden); } else if (nss.isCollectionlessAggregateNS() && pipelineInvolvedNamespaces.empty()) { uassert(4928901, - str::stream() << AggregateCommand::kCollectionUUIDFieldName + str::stream() << AggregateCommandRequest::kCollectionUUIDFieldName << " is not supported for a collectionless aggregation", !request.getCollectionUUID()); @@ -662,7 +662,7 @@ Status runAggregate(OperationContext* opCtx, invariant(nss != NamespaceString::kRsOplogNamespace); invariant(!nss.isCollectionlessAggregateNS()); uassert(ErrorCodes::OptionNotSupportedOnView, - str::stream() << AggregateCommand::kCollectionUUIDFieldName + str::stream() << AggregateCommandRequest::kCollectionUUIDFieldName << " is not supported against a view", !request.getCollectionUUID()); diff --git a/src/mongo/db/commands/run_aggregate.h b/src/mongo/db/commands/run_aggregate.h index 30376367e83..ea7873dade6 100644 --- a/src/mongo/db/commands/run_aggregate.h +++ b/src/mongo/db/commands/run_aggregate.h @@ -53,7 +53,7 @@ namespace mongo { */ Status runAggregate(OperationContext* opCtx, const NamespaceString& nss, - const AggregateCommand& request, + const AggregateCommandRequest& request, const LiteParsedPipeline& liteParsedPipeline, const BSONObj& cmdObj, const PrivilegeVector& privileges, @@ -64,7 +64,7 @@ Status runAggregate(OperationContext* opCtx, */ Status runAggregate(OperationContext* opCtx, const NamespaceString& nss, - const AggregateCommand& request, + const AggregateCommandRequest& request, const BSONObj& cmdObj, const PrivilegeVector& privileges, rpc::ReplyBuilderInterface* result); diff --git a/src/mongo/db/commands/rwc_defaults_commands.cpp b/src/mongo/db/commands/rwc_defaults_commands.cpp index 90f817f29c5..ef2ce1272d2 100644 --- a/src/mongo/db/commands/rwc_defaults_commands.cpp +++ b/src/mongo/db/commands/rwc_defaults_commands.cpp @@ -54,7 +54,7 @@ namespace { void updatePersistedDefaultRWConcernDocument(OperationContext* opCtx, const RWConcernDefault& rw) { DBDirectClient client(opCtx); const auto commandResponse = client.runCommand([&] { - write_ops::Update updateOp(NamespaceString::kConfigSettingsNamespace); + write_ops::UpdateCommandRequest updateOp(NamespaceString::kConfigSettingsNamespace); updateOp.setUpdates({[&] { write_ops::UpdateOpEntry entry; entry.setQ(BSON("_id" << ReadWriteConcernDefaults::kPersistedDocumentId)); diff --git a/src/mongo/db/commands/set_feature_compatibility_version_command.cpp b/src/mongo/db/commands/set_feature_compatibility_version_command.cpp index 38198ad1aff..1869090bc4e 100644 --- a/src/mongo/db/commands/set_feature_compatibility_version_command.cpp +++ b/src/mongo/db/commands/set_feature_compatibility_version_command.cpp @@ -81,7 +81,7 @@ MONGO_FAIL_POINT_DEFINE(hangWhileDowngrading); void deletePersistedDefaultRWConcernDocument(OperationContext* opCtx) { DBDirectClient client(opCtx); const auto commandResponse = client.runCommand([&] { - write_ops::Delete deleteOp(NamespaceString::kConfigSettingsNamespace); + write_ops::DeleteCommandRequest deleteOp(NamespaceString::kConfigSettingsNamespace); deleteOp.setDeletes({[&] { write_ops::DeleteOpEntry entry; entry.setQ(BSON("_id" << ReadWriteConcernDefaults::kPersistedDocumentId)); diff --git a/src/mongo/db/commands/update_metrics.cpp b/src/mongo/db/commands/update_metrics.cpp index 938a29944aa..357920aeb7e 100644 --- a/src/mongo/db/commands/update_metrics.cpp +++ b/src/mongo/db/commands/update_metrics.cpp @@ -58,7 +58,7 @@ void UpdateMetrics::collectMetrics(const BSONObj& cmdObj) { } } -void UpdateMetrics::collectMetrics(const write_ops::FindAndModifyCommand& cmd) { +void UpdateMetrics::collectMetrics(const write_ops::FindAndModifyCommandRequest& cmd) { if (auto update = cmd.getUpdate()) { if (update->type() == write_ops::UpdateModification::Type::kPipeline) { _commandsWithAggregationPipeline.increment(); diff --git a/src/mongo/db/commands/update_metrics.h b/src/mongo/db/commands/update_metrics.h index b237c9632a2..c6732762778 100644 --- a/src/mongo/db/commands/update_metrics.h +++ b/src/mongo/db/commands/update_metrics.h @@ -68,7 +68,7 @@ public: /** * Increments update metrics corresponding to the supplied parameters. */ - void collectMetrics(const write_ops::FindAndModifyCommand& cmd); + void collectMetrics(const write_ops::FindAndModifyCommandRequest& cmd); private: // A counter for how many times this command has been executed with an aggregation diff --git a/src/mongo/db/commands/user_management_commands.cpp b/src/mongo/db/commands/user_management_commands.cpp index adae82a5251..855c63acb60 100644 --- a/src/mongo/db/commands/user_management_commands.cpp +++ b/src/mongo/db/commands/user_management_commands.cpp @@ -243,7 +243,7 @@ Status insertAuthzDocument(OperationContext* opCtx, BSONObj res; client.runCommand(collectionName.db().toString(), [&] { - write_ops::Insert insertOp(collectionName); + write_ops::InsertCommandRequest insertOp(collectionName); insertOp.setDocuments({document}); return insertOp.toBSON({}); }(), @@ -279,7 +279,7 @@ Status updateAuthzDocuments(OperationContext* opCtx, BSONObj res; client.runCommand(collectionName.db().toString(), [&] { - write_ops::Update updateOp(collectionName); + write_ops::UpdateCommandRequest updateOp(collectionName); updateOp.setUpdates({[&] { write_ops::UpdateOpEntry entry; entry.setQ(query); @@ -353,7 +353,7 @@ Status removeAuthzDocuments(OperationContext* opCtx, BSONObj res; client.runCommand(collectionName.db().toString(), [&] { - write_ops::Delete deleteOp(collectionName); + write_ops::DeleteCommandRequest deleteOp(collectionName); deleteOp.setDeletes({[&] { write_ops::DeleteOpEntry entry; entry.setQ(query); @@ -790,7 +790,7 @@ public: StatusWith<std::uint32_t> insert(const NamespaceString& nss, const std::vector<BSONObj>& docs) { dassert(nss.db() == kAdminDB); - write_ops::Insert op(nss); + write_ops::InsertCommandRequest op(nss); op.setDocuments(docs); return doCrudOp(op.toBSON({})); } @@ -801,7 +801,7 @@ public: entry.setQ(query); entry.setU(write_ops::UpdateModification::parseFromClassicUpdate(update)); entry.setMulti(true); - write_ops::Update op(nss); + write_ops::UpdateCommandRequest op(nss); op.setUpdates({entry}); return doCrudOp(op.toBSON({})); } @@ -811,7 +811,7 @@ public: write_ops::DeleteOpEntry entry; entry.setQ(query); entry.setMulti(true); - write_ops::Delete op(nss); + write_ops::DeleteCommandRequest op(nss); op.setDeletes({entry}); return doCrudOp(op.toBSON({})); } @@ -1420,8 +1420,8 @@ UsersInfoReply CmdUMCTyped<UsersInfoCommand, UsersInfoReply, UMCInfoParams>::Inv DBDirectClient client(opCtx); rpc::OpMsgReplyBuilder replyBuilder; - AggregateCommand aggRequest(AuthorizationManager::usersCollectionNamespace, - std::move(pipeline)); + AggregateCommandRequest aggRequest(AuthorizationManager::usersCollectionNamespace, + std::move(pipeline)); // Impose no cursor privilege requirements, as cursor is drained internally uassertStatusOK(runAggregate(opCtx, AuthorizationManager::usersCollectionNamespace, diff --git a/src/mongo/db/commands/validate_db_metadata.idl b/src/mongo/db/commands/validate_db_metadata.idl index c44a9dd1807..b66e3ac557c 100644 --- a/src/mongo/db/commands/validate_db_metadata.idl +++ b/src/mongo/db/commands/validate_db_metadata.idl @@ -46,7 +46,7 @@ structs: codeName: string errmsg: string - ValidateDBMetadataReply: + ValidateDBMetadataCommandReply: description: "The validateDBMetadata command's reply." fields: apiVersionErrors: @@ -68,9 +68,10 @@ commands: validateDBMetadata: description: "Input request for validateDBMetadata command." command_name: validateDBMetadata + cpp_name: ValidateDBMetadataCommandRequest namespace: ignored api_version: "" - reply_type: ValidateDBMetadataReply + reply_type: ValidateDBMetadataCommandReply fields: db: type: string diff --git a/src/mongo/db/commands/validate_db_metadata_cmd.cpp b/src/mongo/db/commands/validate_db_metadata_cmd.cpp index 6ed59358880..8ef1d91fd7a 100644 --- a/src/mongo/db/commands/validate_db_metadata_cmd.cpp +++ b/src/mongo/db/commands/validate_db_metadata_cmd.cpp @@ -68,8 +68,8 @@ class ValidateDBMetadataCmd : public TypedCommand<ValidateDBMetadataCmd> { typename TypedCommand<ValidateDBMetadataCmd>::InvocationBase; public: - using Request = ValidateDBMetadata; - using Reply = ValidateDBMetadataReply; + using Request = ValidateDBMetadataCommandRequest; + using Reply = ValidateDBMetadataCommandReply; AllowedOnSecondary secondaryAllowed(ServiceContext*) const override { return AllowedOnSecondary::kAlways; @@ -236,7 +236,7 @@ public: ValidateDBMetadataSizeTracker _sizeTracker; std::vector<ErrorReplyElement> apiVersionErrors; - ValidateDBMetadataReply _reply; + ValidateDBMetadataCommandReply _reply; }; } validateDBMetadataCmd; } // namespace mongo diff --git a/src/mongo/db/commands/validate_db_metadata_common.h b/src/mongo/db/commands/validate_db_metadata_common.h index 52e336b0dbe..a3b76ab6ab5 100644 --- a/src/mongo/db/commands/validate_db_metadata_common.h +++ b/src/mongo/db/commands/validate_db_metadata_common.h @@ -49,7 +49,8 @@ private: size_t currentSize = 0; }; -void assertUserCanRunValidate(OperationContext* opCtx, const ValidateDBMetadata& request) { +void assertUserCanRunValidate(OperationContext* opCtx, + const ValidateDBMetadataCommandRequest& request) { const auto resource = request.getDb() ? ResourcePattern::forDatabaseName(*request.getDb()) : ResourcePattern::forAnyNormalResource(); uassert(ErrorCodes::Unauthorized, diff --git a/src/mongo/db/commands/write_commands.cpp b/src/mongo/db/commands/write_commands.cpp index 33a5598bf02..241fe8f44d9 100644 --- a/src/mongo/db/commands/write_commands.cpp +++ b/src/mongo/db/commands/write_commands.cpp @@ -401,7 +401,7 @@ void populateReply(OperationContext* opCtx, hooks->singleWriteResultHandler(opResult, i); } - auto& replyBase = cmdReply->getWriteReplyBase(); + auto& replyBase = cmdReply->getWriteCommandReplyBase(); replyBase.setN(nVal); if (!errors.empty()) { @@ -485,9 +485,9 @@ public: return request().getNamespace(); } - write_ops::InsertReply typedRun(OperationContext* opCtx) final try { + write_ops::InsertCommandReply typedRun(OperationContext* opCtx) final try { transactionChecks(opCtx, ns()); - write_ops::InsertReply insertReply; + write_ops::InsertCommandReply insertReply; if (isTimeseries(opCtx, ns())) { // Re-throw parsing exceptions to be consistent with CmdInsert::Invocation's @@ -504,7 +504,7 @@ public: auto reply = write_ops_exec::performInserts(opCtx, request()); populateReply(opCtx, - !request().getWriteCommandBase().getOrdered(), + !request().getWriteCommandRequestBase().getOrdered(), request().getDocuments().size(), std::move(reply), &insertReply); @@ -547,21 +547,22 @@ public: auto bucketsNs = ns().makeTimeseriesBucketsNamespace(); BSONObjBuilder builder; - builder.append(write_ops::Insert::kCommandName, bucketsNs.coll()); + builder.append(write_ops::InsertCommandRequest::kCommandName, bucketsNs.coll()); // The schema validation configured in the bucket collection is intended for direct // operations by end users and is not applicable here. - builder.append(write_ops::Insert::kBypassDocumentValidationFieldName, true); + builder.append(write_ops::InsertCommandRequest::kBypassDocumentValidationFieldName, + true); if (stmtIds) { - builder.append(write_ops::Insert::kStmtIdsFieldName, *stmtIds); + builder.append(write_ops::InsertCommandRequest::kStmtIdsFieldName, *stmtIds); } - builder.append(write_ops::Insert::kDocumentsFieldName, + builder.append(write_ops::InsertCommandRequest::kDocumentsFieldName, makeTimeseriesInsertDocument(batch, metadata)); auto request = OpMsgRequest::fromDBAndBody(bucketsNs.db(), builder.obj()); - auto timeseriesInsertBatch = - write_ops::Insert::parse({"CmdInsert::_performTimeseriesInsert"}, request); + auto timeseriesInsertBatch = write_ops::InsertCommandRequest::parse( + {"CmdInsert::_performTimeseriesInsert"}, request); return _getTimeseriesSingleWriteResult(write_ops_exec::performInserts( opCtx, timeseriesInsertBatch, OperationSource::kTimeseries)); @@ -578,10 +579,10 @@ public: } auto update = makeTimeseriesUpdateOpEntry(batch, metadata); - write_ops::Update timeseriesUpdateBatch(ns().makeTimeseriesBucketsNamespace(), - {update}); + write_ops::UpdateCommandRequest timeseriesUpdateBatch( + ns().makeTimeseriesBucketsNamespace(), {update}); - write_ops::WriteCommandBase writeCommandBase; + write_ops::WriteCommandRequestBase writeCommandBase; // The schema validation configured in the bucket collection is intended for direct // operations by end users and is not applicable here. writeCommandBase.setBypassDocumentValidation(true); @@ -590,7 +591,7 @@ public: writeCommandBase.setStmtIds(*stmtIds); } - timeseriesUpdateBatch.setWriteCommandBase(std::move(writeCommandBase)); + timeseriesUpdateBatch.setWriteCommandRequestBase(std::move(writeCommandBase)); return _getTimeseriesSingleWriteResult(write_ops_exec::performUpdates( opCtx, timeseriesUpdateBatch, OperationSource::kTimeseries)); @@ -805,7 +806,7 @@ public: } void _performTimeseriesWrites(OperationContext* opCtx, - write_ops::InsertReply* insertReply) const { + write_ops::InsertCommandReply* insertReply) const { auto& curOp = *CurOp::get(opCtx); ON_BLOCK_EXIT([&] { // This is the only part of finishCurOp we need to do for inserts because they reuse @@ -826,7 +827,7 @@ public: boost::optional<OID> electionId; bool containsRetry = false; - auto& baseReply = insertReply->getWriteReplyBase(); + auto& baseReply = insertReply->getWriteCommandReplyBase(); if (request().getOrdered()) { baseReply.setN(request().getDocuments().size()); @@ -952,10 +953,10 @@ public: bob->append("singleBatch", true); } - write_ops::UpdateReply typedRun(OperationContext* opCtx) final try { + write_ops::UpdateCommandReply typedRun(OperationContext* opCtx) final try { transactionChecks(opCtx, ns()); - write_ops::UpdateReply updateReply; + write_ops::UpdateCommandReply updateReply; long long nModified = 0; // Tracks the upserted information. The memory of this variable gets moved in the @@ -981,7 +982,7 @@ public: }; populateReply(opCtx, - !request().getWriteCommandBase().getOrdered(), + !request().getWriteCommandRequestBase().getOrdered(), request().getUpdates().size(), std::move(reply), &updateReply, @@ -993,8 +994,8 @@ public: // which stages were being used. auto& updateMod = update.getU(); if (updateMod.type() == write_ops::UpdateModification::Type::kPipeline) { - AggregateCommand aggCmd(request().getNamespace(), - updateMod.getUpdatePipeline()); + AggregateCommandRequest aggCmd(request().getNamespace(), + updateMod.getUpdatePipeline()); LiteParsedPipeline pipeline(aggCmd); pipeline.tickGlobalStageCounters(); CmdUpdate::updateMetrics.incrementExecutedWithAggregationPipeline(); @@ -1114,14 +1115,14 @@ public: return request().getNamespace(); } - write_ops::DeleteReply typedRun(OperationContext* opCtx) final try { + write_ops::DeleteCommandReply typedRun(OperationContext* opCtx) final try { transactionChecks(opCtx, ns()); - write_ops::DeleteReply deleteReply; + write_ops::DeleteCommandReply deleteReply; auto reply = write_ops_exec::performDeletes(opCtx, request()); populateReply(opCtx, - !request().getWriteCommandBase().getOrdered(), + !request().getWriteCommandRequestBase().getOrdered(), request().getDeletes().size(), std::move(reply), &deleteReply); diff --git a/src/mongo/db/commands/write_commands_common.cpp b/src/mongo/db/commands/write_commands_common.cpp index bb16789da55..ac39adca706 100644 --- a/src/mongo/db/commands/write_commands_common.cpp +++ b/src/mongo/db/commands/write_commands_common.cpp @@ -63,13 +63,13 @@ NamespaceString _getIndexedNss(const std::vector<BSONObj>& documents) { return NamespaceString(std::move(ns)); } -void fillPrivileges(const write_ops::Insert& op, +void fillPrivileges(const write_ops::InsertCommandRequest& op, std::vector<Privilege>* privileges, ActionSet* actions) { actions->addAction(ActionType::insert); } -void fillPrivileges(const write_ops::Update& op, +void fillPrivileges(const write_ops::UpdateCommandRequest& op, std::vector<Privilege>* privileges, ActionSet* actions) { actions->addAction(ActionType::update); @@ -80,7 +80,7 @@ void fillPrivileges(const write_ops::Update& op, } } -void fillPrivileges(const write_ops::Delete& op, +void fillPrivileges(const write_ops::DeleteCommandRequest& op, std::vector<Privilege>* privileges, ActionSet* actions) { actions->addAction(ActionType::remove); @@ -109,19 +109,19 @@ void checkAuthorizationImpl(AuthorizationSession* authzSession, void checkAuthForInsertCommand(AuthorizationSession* authzSession, bool withDocumentValidationBypass, - const write_ops::Insert& op) { + const write_ops::InsertCommandRequest& op) { checkAuthorizationImpl(authzSession, withDocumentValidationBypass, op); } void checkAuthForUpdateCommand(AuthorizationSession* authzSession, bool withDocumentValidationBypass, - const write_ops::Update& op) { + const write_ops::UpdateCommandRequest& op) { checkAuthorizationImpl(authzSession, withDocumentValidationBypass, op); } void checkAuthForDeleteCommand(AuthorizationSession* authzSession, bool withDocumentValidationBypass, - const write_ops::Delete& op) { + const write_ops::DeleteCommandRequest& op) { checkAuthorizationImpl(authzSession, withDocumentValidationBypass, op); } diff --git a/src/mongo/db/commands/write_commands_common.h b/src/mongo/db/commands/write_commands_common.h index 62561d7bb31..088c0c8f690 100644 --- a/src/mongo/db/commands/write_commands_common.h +++ b/src/mongo/db/commands/write_commands_common.h @@ -43,13 +43,13 @@ namespace auth { void checkAuthForInsertCommand(AuthorizationSession* authzSession, bool withDocumentValidationBypass, - const write_ops::Insert& op); + const write_ops::InsertCommandRequest& op); void checkAuthForUpdateCommand(AuthorizationSession* authzSession, bool withDocumentValidationBypass, - const write_ops::Update& op); + const write_ops::UpdateCommandRequest& op); void checkAuthForDeleteCommand(AuthorizationSession* authzSession, bool withDocumentValidationBypass, - const write_ops::Delete& op); + const write_ops::DeleteCommandRequest& op); } // namespace auth } // namespace mongo |