diff options
author | Waley Chen <waleycz@gmail.com> | 2016-08-03 15:54:57 -0400 |
---|---|---|
committer | Waley Chen <waleycz@gmail.com> | 2016-08-03 15:54:57 -0400 |
commit | 05e1c33649e08ec3736121254da7b29a73934788 (patch) | |
tree | 591de8a744ed1e31535038474247102e540c209d /src/mongo/db | |
parent | 1aeb9f04c0cdaaa4832ada812797b50456986baf (diff) | |
download | mongo-05e1c33649e08ec3736121254da7b29a73934788.tar.gz |
SERVER-24067 TaskExecutor RemoteCommandCallbackArgs should include elapsedMS and metadata
Diffstat (limited to 'src/mongo/db')
-rw-r--r-- | src/mongo/db/repl/base_cloner_test_fixture.cpp | 2 | ||||
-rw-r--r-- | src/mongo/db/repl/check_quorum_for_config_change.cpp | 8 | ||||
-rw-r--r-- | src/mongo/db/repl/check_quorum_for_config_change_test.cpp | 22 | ||||
-rw-r--r-- | src/mongo/db/repl/databases_cloner.cpp | 6 | ||||
-rw-r--r-- | src/mongo/db/repl/elect_cmd_runner.cpp | 4 | ||||
-rw-r--r-- | src/mongo/db/repl/freshness_checker.cpp | 5 | ||||
-rw-r--r-- | src/mongo/db/repl/freshness_scanner.cpp | 4 | ||||
-rw-r--r-- | src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp | 14 | ||||
-rw-r--r-- | src/mongo/db/repl/replication_executor.cpp | 2 | ||||
-rw-r--r-- | src/mongo/db/repl/replication_executor.h | 2 | ||||
-rw-r--r-- | src/mongo/db/repl/reporter.cpp | 4 | ||||
-rw-r--r-- | src/mongo/db/repl/reporter_test.cpp | 16 | ||||
-rw-r--r-- | src/mongo/db/repl/rollback_checker.cpp | 12 | ||||
-rw-r--r-- | src/mongo/db/repl/scatter_gather_runner.cpp | 2 | ||||
-rw-r--r-- | src/mongo/db/repl/vote_requester.cpp | 11 | ||||
-rw-r--r-- | src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp | 8 |
16 files changed, 58 insertions, 64 deletions
diff --git a/src/mongo/db/repl/base_cloner_test_fixture.cpp b/src/mongo/db/repl/base_cloner_test_fixture.cpp index ac926addcd9..399be148782 100644 --- a/src/mongo/db/repl/base_cloner_test_fixture.cpp +++ b/src/mongo/db/repl/base_cloner_test_fixture.cpp @@ -146,7 +146,7 @@ void BaseClonerTest::scheduleNetworkResponse(NetworkOperationIterator noi, auto net = getNet(); executor::TaskExecutor::ResponseStatus responseStatus(code, reason); log() << "Scheduling error response to request:" << noi->getDiagnosticString() - << " -- status:" << responseStatus.getStatus().toString(); + << " -- status:" << responseStatus.status.toString(); net->scheduleResponse(noi, net->now(), responseStatus); } diff --git a/src/mongo/db/repl/check_quorum_for_config_change.cpp b/src/mongo/db/repl/check_quorum_for_config_change.cpp index bd3740ed410..423ea40cc94 100644 --- a/src/mongo/db/repl/check_quorum_for_config_change.cpp +++ b/src/mongo/db/repl/check_quorum_for_config_change.cpp @@ -181,12 +181,12 @@ void QuorumChecker::_tabulateHeartbeatResponse(const RemoteCommandRequest& reque ++_numResponses; if (!response.isOK()) { warning() << "Failed to complete heartbeat request to " << request.target << "; " - << response.getStatus(); - _badResponses.push_back(std::make_pair(request.target, response.getStatus())); + << response.status; + _badResponses.push_back(std::make_pair(request.target, response.status)); return; } - BSONObj resBSON = response.getValue().data; + BSONObj resBSON = response.data; ReplSetHeartbeatResponse hbResp; Status hbStatus = hbResp.initialize(resBSON, 0); @@ -219,7 +219,7 @@ void QuorumChecker::_tabulateHeartbeatResponse(const RemoteCommandRequest& reque if (_rsConfig->hasReplicaSetId()) { StatusWith<rpc::ReplSetMetadata> replMetadata = - rpc::ReplSetMetadata::readFromMetadata(response.getValue().metadata); + rpc::ReplSetMetadata::readFromMetadata(response.metadata); if (replMetadata.isOK() && replMetadata.getValue().getReplicaSetId().isSet() && _rsConfig->getReplicaSetId() != replMetadata.getValue().getReplicaSetId()) { std::string message = str::stream() diff --git a/src/mongo/db/repl/check_quorum_for_config_change_test.cpp b/src/mongo/db/repl/check_quorum_for_config_change_test.cpp index 86527def28a..610dbad4cf7 100644 --- a/src/mongo/db/repl/check_quorum_for_config_change_test.cpp +++ b/src/mongo/db/repl/check_quorum_for_config_change_test.cpp @@ -203,7 +203,7 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToSeveralDownNodes) { for (int i = 0; i < numCommandsExpected; ++i) { _net->scheduleResponse(_net->getNextReadyRequest(), startDate + Milliseconds(10), - ResponseStatus(ErrorCodes::NoSuchKey, "No reply")); + {ErrorCodes::NoSuchKey, "No reply"}); } _net->runUntil(startDate + Milliseconds(10)); _net->exitNetwork(); @@ -320,9 +320,8 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToOneDownNode) { ASSERT(seenHosts.insert(request.target).second) << "Already saw " << request.target.toString(); if (request.target == HostAndPort("h2", 1)) { - _net->scheduleResponse(noi, - startDate + Milliseconds(10), - ResponseStatus(ErrorCodes::NoSuchKey, "No response")); + _net->scheduleResponse( + noi, startDate + Milliseconds(10), {ErrorCodes::NoSuchKey, "No response"}); } else { _net->scheduleResponse( noi, @@ -766,9 +765,8 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckVetoedDueToIncompatibleSetName) { ResponseStatus(RemoteCommandResponse( BSON("ok" << 0 << "mismatch" << true), BSONObj(), Milliseconds(8)))); } else { - _net->scheduleResponse(noi, - startDate + Milliseconds(10), - ResponseStatus(ErrorCodes::NoSuchKey, "No response")); + _net->scheduleResponse( + noi, startDate + Milliseconds(10), {ErrorCodes::NoSuchKey, "No response"}); } } _net->runUntil(startDate + Milliseconds(10)); @@ -832,9 +830,8 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckFailsDueToInsufficientVoters) { startDate + Milliseconds(10), ResponseStatus(RemoteCommandResponse(BSON("ok" << 1), BSONObj(), Milliseconds(8)))); } else { - _net->scheduleResponse(noi, - startDate + Milliseconds(10), - ResponseStatus(ErrorCodes::NoSuchKey, "No response")); + _net->scheduleResponse( + noi, startDate + Milliseconds(10), {ErrorCodes::NoSuchKey, "No response"}); } } _net->runUntil(startDate + Milliseconds(10)); @@ -894,9 +891,8 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckFailsDueToNoElectableNodeResponding) { startDate + Milliseconds(10), ResponseStatus(RemoteCommandResponse(BSON("ok" << 1), BSONObj(), Milliseconds(8)))); } else { - _net->scheduleResponse(noi, - startDate + Milliseconds(10), - ResponseStatus(ErrorCodes::NoSuchKey, "No response")); + _net->scheduleResponse( + noi, startDate + Milliseconds(10), {ErrorCodes::NoSuchKey, "No response"}); } } _net->runUntil(startDate + Milliseconds(10)); diff --git a/src/mongo/db/repl/databases_cloner.cpp b/src/mongo/db/repl/databases_cloner.cpp index 3b01a07e139..cbc8d763490 100644 --- a/src/mongo/db/repl/databases_cloner.cpp +++ b/src/mongo/db/repl/databases_cloner.cpp @@ -191,9 +191,9 @@ void DatabasesCloner::setScheduleDbWorkFn_forTest(const CollectionCloner::Schedu } void DatabasesCloner::_onListDatabaseFinish(const CommandCallbackArgs& cbd) { - Status respStatus = cbd.response.getStatus(); + Status respStatus = cbd.response.status; if (respStatus.isOK()) { - respStatus = getStatusFromCommandResult(cbd.response.getValue().data); + respStatus = getStatusFromCommandResult(cbd.response.data); } UniqueLock lk(_mutex); @@ -204,7 +204,7 @@ void DatabasesCloner::_onListDatabaseFinish(const CommandCallbackArgs& cbd) { return; } - const auto respBSON = cbd.response.getValue().data; + const auto respBSON = cbd.response.data; // There should not be any cloners yet invariant(_databaseCloners.size() == 0); const auto dbsElem = respBSON["databases"].Obj(); diff --git a/src/mongo/db/repl/elect_cmd_runner.cpp b/src/mongo/db/repl/elect_cmd_runner.cpp index afe28960c1f..907f396c23c 100644 --- a/src/mongo/db/repl/elect_cmd_runner.cpp +++ b/src/mongo/db/repl/elect_cmd_runner.cpp @@ -108,7 +108,7 @@ void ElectCmdRunner::Algorithm::processResponse(const RemoteCommandRequest& requ ++_actualResponses; if (response.isOK()) { - BSONObj res = response.getValue().data; + BSONObj res = response.data; log() << "received " << res["vote"] << " votes from " << request.target; LOG(1) << "full elect res: " << res.toString(); BSONElement vote(res["vote"]); @@ -121,7 +121,7 @@ void ElectCmdRunner::Algorithm::processResponse(const RemoteCommandRequest& requ _receivedVotes += vote._numberInt(); } else { - warning() << "elect command to " << request.target << " failed: " << response.getStatus(); + warning() << "elect command to " << request.target << " failed: " << response.status; } } diff --git a/src/mongo/db/repl/freshness_checker.cpp b/src/mongo/db/repl/freshness_checker.cpp index d3927c33102..1b74c3810b5 100644 --- a/src/mongo/db/repl/freshness_checker.cpp +++ b/src/mongo/db/repl/freshness_checker.cpp @@ -128,8 +128,7 @@ void FreshnessChecker::Algorithm::processResponse(const RemoteCommandRequest& re Status status = Status::OK(); - if (!response.isOK() || - !((status = getStatusFromCommandResult(response.getValue().data)).isOK())) { + if (!response.isOK() || !((status = getStatusFromCommandResult(response.data)).isOK())) { if (votingMember) { ++_failedVoterResponses; if (hadTooManyFailedVoterResponses()) { @@ -145,7 +144,7 @@ void FreshnessChecker::Algorithm::processResponse(const RemoteCommandRequest& re return; } - const BSONObj res = response.getValue().data; + const BSONObj res = response.data; LOG(2) << "FreshnessChecker: Got response from " << request.target << " of " << res; diff --git a/src/mongo/db/repl/freshness_scanner.cpp b/src/mongo/db/repl/freshness_scanner.cpp index 87182309a39..2623fb32b88 100644 --- a/src/mongo/db/repl/freshness_scanner.cpp +++ b/src/mongo/db/repl/freshness_scanner.cpp @@ -74,9 +74,9 @@ void FreshnessScanner::Algorithm::processResponse(const RemoteCommandRequest& re _responsesProcessed++; if (!response.isOK()) { // failed response LOG(2) << "FreshnessScanner: Got failed response from " << request.target << ": " - << response.getStatus(); + << response.status; } else { - BSONObj opTimesObj = response.getValue().data.getObjectField("optimes"); + BSONObj opTimesObj = response.data.getObjectField("optimes"); OpTime lastOpTime; Status status = bsonExtractOpTimeField(opTimesObj, "appliedOpTime", &lastOpTime); if (!status.isOK()) { diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp index e56fa347fcd..d0fe0aa4e58 100644 --- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp +++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp @@ -127,7 +127,7 @@ void ReplicationCoordinatorImpl::_handleHeartbeatResponse( // Parse and validate the response. At the end of this step, if responseStatus is OK then // hbResponse is valid. - Status responseStatus = cbData.response.getStatus(); + Status responseStatus = cbData.response.status; if (responseStatus == ErrorCodes::CallbackCanceled) { return; } @@ -136,10 +136,10 @@ void ReplicationCoordinatorImpl::_handleHeartbeatResponse( ReplSetHeartbeatResponse hbResponse; BSONObj resp; if (responseStatus.isOK()) { - resp = cbData.response.getValue().data; + resp = cbData.response.data; responseStatus = hbResponse.initialize(resp, _topCoord->getTerm()); StatusWith<rpc::ReplSetMetadata> replMetadata = - rpc::ReplSetMetadata::readFromMetadata(cbData.response.getValue().metadata); + rpc::ReplSetMetadata::readFromMetadata(cbData.response.metadata); // Reject heartbeat responses (and metadata) from nodes with mismatched replica set IDs. // It is problematic to perform this check in the heartbeat reconfiguring logic because it @@ -170,7 +170,7 @@ void ReplicationCoordinatorImpl::_handleHeartbeatResponse( StatusWith<ReplSetHeartbeatResponse> hbStatusResponse(hbResponse); if (responseStatus.isOK()) { - networkTime = cbData.response.getValue().elapsedMillis; + networkTime = cbData.response.elapsedMillis.value_or(Milliseconds{0}); // TODO(sz) Because the term is duplicated in ReplSetMetaData, we can get rid of this // and update tests. _updateTerm_incallback(hbStatusResponse.getValue().getTerm()); @@ -288,17 +288,17 @@ namespace { * This callback is purely for logging and has no effect on any other operations */ void remoteStepdownCallback(const ReplicationExecutor::RemoteCommandCallbackArgs& cbData) { - const Status status = cbData.response.getStatus(); + const Status status = cbData.response.status; if (status == ErrorCodes::CallbackCanceled) { return; } if (status.isOK()) { LOG(1) << "stepdown of primary(" << cbData.request.target << ") succeeded with response -- " - << cbData.response.getValue().data; + << cbData.response.data; } else { warning() << "stepdown of primary(" << cbData.request.target << ") failed due to " - << cbData.response.getStatus(); + << cbData.response.status; } } } // namespace diff --git a/src/mongo/db/repl/replication_executor.cpp b/src/mongo/db/repl/replication_executor.cpp index 6a076aff54d..dd077167b0e 100644 --- a/src/mongo/db/repl/replication_executor.cpp +++ b/src/mongo/db/repl/replication_executor.cpp @@ -317,7 +317,7 @@ void ReplicationExecutor::_finishRemoteCommand(const RemoteCommandRequest& reque } LOG(4) << "Received remote response: " - << (response.isOK() ? response.getValue().toString() : response.getStatus().toString()); + << (response.isOK() ? response.toString() : response.status.toString()); callback->_callbackFn = stdx::bind(remoteCommandFinished, stdx::placeholders::_1, cb, request, response); diff --git a/src/mongo/db/repl/replication_executor.h b/src/mongo/db/repl/replication_executor.h index ffb90fb78ea..be231372c8a 100644 --- a/src/mongo/db/repl/replication_executor.h +++ b/src/mongo/db/repl/replication_executor.h @@ -263,7 +263,7 @@ private: void finishShutdown(); void _finishRemoteCommand(const executor::RemoteCommandRequest& request, - const StatusWith<executor::RemoteCommandResponse>& response, + const executor::RemoteCommandResponse& response, const CallbackHandle& cbHandle, const uint64_t expectedHandleGeneration, const RemoteCommandCallbackFn& cb); diff --git a/src/mongo/db/repl/reporter.cpp b/src/mongo/db/repl/reporter.cpp index 2e19e804108..64b7dd27e12 100644 --- a/src/mongo/db/repl/reporter.cpp +++ b/src/mongo/db/repl/reporter.cpp @@ -257,14 +257,14 @@ void Reporter::_processResponseCallback( return; } - _status = rcbd.response.getStatus(); + _status = rcbd.response.status; if (!_status.isOK()) { _onShutdown_inlock(); return; } // Override _status with the one embedded in the command result. - const auto& commandResult = rcbd.response.getValue().data; + const auto& commandResult = rcbd.response.data; _status = getStatusFromCommandResult(commandResult); // Some error types are OK and should not cause the reporter to stop sending updates to the diff --git a/src/mongo/db/repl/reporter_test.cpp b/src/mongo/db/repl/reporter_test.cpp index 9d89e00bf95..42ee28c6aa5 100644 --- a/src/mongo/db/repl/reporter_test.cpp +++ b/src/mongo/db/repl/reporter_test.cpp @@ -46,6 +46,8 @@ using executor::NetworkInterfaceMock; using executor::RemoteCommandRequest; using executor::RemoteCommandResponse; +using ResponseStatus = mongo::executor::TaskExecutor::ResponseStatus; + class MockProgressManager { public: void updateMap(int memberId, const OpTime& lastDurableOpTime, const OpTime& lastAppliedOpTime) { @@ -120,8 +122,7 @@ public: */ BSONObj processNetworkResponse(const BSONObj& obj, bool expectReadyRequestsAfterProcessing = false); - BSONObj processNetworkResponse(ErrorCodes::Error code, - const std::string& reason, + BSONObj processNetworkResponse(const ResponseStatus rs, bool expectReadyRequestsAfterProcessing = false); void runUntil(Date_t when, bool expectReadyRequestsAfterAdvancingClock = false); @@ -210,12 +211,11 @@ BSONObj ReporterTest::processNetworkResponse(const BSONObj& obj, return cmdObj; } -BSONObj ReporterTest::processNetworkResponse(ErrorCodes::Error code, - const std::string& reason, +BSONObj ReporterTest::processNetworkResponse(const ResponseStatus rs, bool expectReadyRequestsAfterProcessing) { auto net = getNet(); net->enterNetwork(); - auto cmdObj = net->scheduleErrorResponse({code, reason}).cmdObj; + auto cmdObj = net->scheduleErrorResponse(rs).cmdObj; net->runReadyNetworkOperations(); ASSERT_EQUALS(expectReadyRequestsAfterProcessing, net->hasReadyRequests()); net->exitNetwork(); @@ -320,7 +320,7 @@ TEST_F(ReporterTest, TaskExecutorAndNetworkErrorsStopTheReporter) { ASSERT_TRUE(reporter->isActive()); ASSERT_TRUE(reporter->isWaitingToSendReport()); - processNetworkResponse(ErrorCodes::NoSuchKey, "waaaah"); + processNetworkResponse({ErrorCodes::NoSuchKey, "waaaah", Milliseconds(0)}); ASSERT_EQUALS(ErrorCodes::NoSuchKey, reporter->join()); assertReporterDone(); @@ -561,7 +561,7 @@ TEST_F(ReporterTest, } TEST_F(ReporterTest, FailedUpdateShouldNotRescheduleUpdate) { - processNetworkResponse(ErrorCodes::OperationFailed, "update failed"); + processNetworkResponse({ErrorCodes::OperationFailed, "update failed", Milliseconds(0)}); ASSERT_EQUALS(ErrorCodes::OperationFailed, reporter->join()); assertReporterDone(); @@ -576,7 +576,7 @@ TEST_F(ReporterTest, SuccessfulUpdateShouldRescheduleUpdate) { runUntil(until, true); - processNetworkResponse(ErrorCodes::OperationFailed, "update failed"); + processNetworkResponse({ErrorCodes::OperationFailed, "update failed", Milliseconds(0)}); ASSERT_EQUALS(ErrorCodes::OperationFailed, reporter->join()); assertReporterDone(); diff --git a/src/mongo/db/repl/rollback_checker.cpp b/src/mongo/db/repl/rollback_checker.cpp index 2a1685d51ab..ffca29bc211 100644 --- a/src/mongo/db/repl/rollback_checker.cpp +++ b/src/mongo/db/repl/rollback_checker.cpp @@ -51,14 +51,14 @@ RollbackChecker::~RollbackChecker() {} RollbackChecker::CallbackHandle RollbackChecker::checkForRollback(const CallbackFn& nextAction) { return _scheduleGetRollbackId( [this, nextAction](const RemoteCommandCallbackArgs& args) { - if (args.response.getStatus() == ErrorCodes::CallbackCanceled) { + if (args.response.status == ErrorCodes::CallbackCanceled) { return; } if (!args.response.isOK()) { - nextAction(args.response.getStatus()); + nextAction(args.response.status); return; } - if (auto rbidElement = args.response.getValue().data["rbid"]) { + if (auto rbidElement = args.response.data["rbid"]) { int remoteRBID = rbidElement.numberInt(); UniqueLock lk(_mutex); @@ -97,14 +97,14 @@ StatusWith<bool> RollbackChecker::hasHadRollback() { RollbackChecker::CallbackHandle RollbackChecker::reset(const CallbackFn& nextAction) { return _scheduleGetRollbackId( [this, nextAction](const RemoteCommandCallbackArgs& args) { - if (args.response.getStatus() == ErrorCodes::CallbackCanceled) { + if (args.response.status == ErrorCodes::CallbackCanceled) { return; } if (!args.response.isOK()) { - nextAction(args.response.getStatus()); + nextAction(args.response.status); return; } - if (auto rbidElement = args.response.getValue().data["rbid"]) { + if (auto rbidElement = args.response.data["rbid"]) { int newRBID = rbidElement.numberInt(); UniqueLock lk(_mutex); diff --git a/src/mongo/db/repl/scatter_gather_runner.cpp b/src/mongo/db/repl/scatter_gather_runner.cpp index 90a16ea470a..597239179e0 100644 --- a/src/mongo/db/repl/scatter_gather_runner.cpp +++ b/src/mongo/db/repl/scatter_gather_runner.cpp @@ -138,7 +138,7 @@ void ScatterGatherRunner::RunnerImpl::processResponse( std::swap(*iter, _callbacks.back()); _callbacks.pop_back(); - if (cbData.response.getStatus() == ErrorCodes::CallbackCanceled) { + if (cbData.response.status == ErrorCodes::CallbackCanceled) { return; } diff --git a/src/mongo/db/repl/vote_requester.cpp b/src/mongo/db/repl/vote_requester.cpp index b02b3bb7c0c..4f710827884 100644 --- a/src/mongo/db/repl/vote_requester.cpp +++ b/src/mongo/db/repl/vote_requester.cpp @@ -93,24 +93,23 @@ void VoteRequester::Algorithm::processResponse(const RemoteCommandRequest& reque _responsesProcessed++; if (!response.isOK()) { // failed response log() << "VoteRequester: Got failed response from " << request.target << ": " - << response.getStatus(); + << response.status; } else { _responders.insert(request.target); ReplSetRequestVotesResponse voteResponse; - const auto status = voteResponse.initialize(response.getValue().data); + const auto status = voteResponse.initialize(response.data); if (!status.isOK()) { log() << "VoteRequester: Got error processing response with status: " << status - << ", resp:" << response.getValue().data; + << ", resp:" << response.data; } if (voteResponse.getVoteGranted()) { LOG(3) << "VoteRequester: Got yes vote from " << request.target - << ", resp:" << response.getValue().data; + << ", resp:" << response.data; _votes++; } else { log() << "VoteRequester: Got no vote from " << request.target - << " because: " << voteResponse.getReason() - << ", resp:" << response.getValue().data; + << " because: " << voteResponse.getReason() << ", resp:" << response.data; } if (voteResponse.getTerm() > _term) { diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp index 500b11e57e7..08721b06419 100644 --- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp +++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp @@ -467,7 +467,7 @@ void MigrationChunkClonerSourceLegacy::_cleanup(OperationContext* txn) { } StatusWith<BSONObj> MigrationChunkClonerSourceLegacy::_callRecipient(const BSONObj& cmdObj) { - StatusWith<executor::RemoteCommandResponse> responseStatus( + executor::RemoteCommandResponse responseStatus( Status{ErrorCodes::InternalError, "Uninitialized value"}); auto executor = grid.getExecutorPool()->getArbitraryExecutor(); @@ -485,15 +485,15 @@ StatusWith<BSONObj> MigrationChunkClonerSourceLegacy::_callRecipient(const BSONO executor->wait(scheduleStatus.getValue()); if (!responseStatus.isOK()) { - return responseStatus.getStatus(); + return responseStatus.status; } - Status commandStatus = getStatusFromCommandResult(responseStatus.getValue().data); + Status commandStatus = getStatusFromCommandResult(responseStatus.data); if (!commandStatus.isOK()) { return commandStatus; } - return responseStatus.getValue().data.getOwned(); + return responseStatus.data.getOwned(); } Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* txn) { |