summaryrefslogtreecommitdiff
path: root/src/mongo/s/query/async_results_merger.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/s/query/async_results_merger.cpp')
-rw-r--r--src/mongo/s/query/async_results_merger.cpp24
1 files changed, 12 insertions, 12 deletions
diff --git a/src/mongo/s/query/async_results_merger.cpp b/src/mongo/s/query/async_results_merger.cpp
index afcf5257db0..614002978f1 100644
--- a/src/mongo/s/query/async_results_merger.cpp
+++ b/src/mongo/s/query/async_results_merger.cpp
@@ -253,7 +253,7 @@ ClusterQueryResult AsyncResultsMerger::nextReadyUnsorted() {
return {};
}
-Status AsyncResultsMerger::askForNextBatch_inlock(OperationContext* txn, size_t remoteIndex) {
+Status AsyncResultsMerger::askForNextBatch_inlock(OperationContext* opCtx, size_t remoteIndex) {
auto& remote = _remotes[remoteIndex];
invariant(!remote.cbHandle.isValid());
@@ -291,14 +291,14 @@ Status AsyncResultsMerger::askForNextBatch_inlock(OperationContext* txn, size_t
}
executor::RemoteCommandRequest request(
- remote.getTargetHost(), _params->nsString.db().toString(), cmdObj, _metadataObj, txn);
+ remote.getTargetHost(), _params->nsString.db().toString(), cmdObj, _metadataObj, opCtx);
auto callbackStatus =
_executor->scheduleRemoteCommand(request,
stdx::bind(&AsyncResultsMerger::handleBatchResponse,
this,
stdx::placeholders::_1,
- txn,
+ opCtx,
remoteIndex));
if (!callbackStatus.isOK()) {
return callbackStatus.getStatus();
@@ -317,7 +317,7 @@ Status AsyncResultsMerger::askForNextBatch_inlock(OperationContext* txn, size_t
* 3. Remotes that reached maximum retries will be in 'exhausted' state.
*/
StatusWith<executor::TaskExecutor::EventHandle> AsyncResultsMerger::nextEvent(
- OperationContext* txn) {
+ OperationContext* opCtx) {
stdx::lock_guard<stdx::mutex> lk(_mutex);
if (_lifecycleState != kAlive) {
@@ -345,7 +345,7 @@ StatusWith<executor::TaskExecutor::EventHandle> AsyncResultsMerger::nextEvent(
// If we already have established a cursor with this remote, and there is no outstanding
// request for which we have a valid callback handle, then schedule work to retrieve the
// next batch.
- auto nextBatchStatus = askForNextBatch_inlock(txn, i);
+ auto nextBatchStatus = askForNextBatch_inlock(opCtx, i);
if (!nextBatchStatus.isOK()) {
return nextBatchStatus;
}
@@ -391,7 +391,7 @@ StatusWith<CursorResponse> AsyncResultsMerger::parseCursorResponse(const BSONObj
void AsyncResultsMerger::handleBatchResponse(
const executor::TaskExecutor::RemoteCommandCallbackArgs& cbData,
- OperationContext* txn,
+ OperationContext* opCtx,
size_t remoteIndex) {
stdx::lock_guard<stdx::mutex> lk(_mutex);
@@ -426,7 +426,7 @@ void AsyncResultsMerger::handleBatchResponse(
// If the event handle is invalid, then the executor is in the middle of shutting down,
// and we can't schedule any more work for it to complete.
if (_killCursorsScheduledEvent.isValid()) {
- scheduleKillCursors_inlock(txn);
+ scheduleKillCursors_inlock(opCtx);
_executor->signalEvent(_killCursorsScheduledEvent);
}
@@ -571,7 +571,7 @@ void AsyncResultsMerger::handleBatchResponse(
// We do not ask for the next batch if the cursor is tailable, as batches received from remote
// tailable cursors should be passed through to the client without asking for more batches.
if (!_params->isTailable && !remote.hasNext() && !remote.exhausted()) {
- remote.status = askForNextBatch_inlock(txn, remoteIndex);
+ remote.status = askForNextBatch_inlock(opCtx, remoteIndex);
if (!remote.status.isOK()) {
return;
}
@@ -602,7 +602,7 @@ bool AsyncResultsMerger::haveOutstandingBatchRequests_inlock() {
return false;
}
-void AsyncResultsMerger::scheduleKillCursors_inlock(OperationContext* txn) {
+void AsyncResultsMerger::scheduleKillCursors_inlock(OperationContext* opCtx) {
invariant(_lifecycleState == kKillStarted);
invariant(_killCursorsScheduledEvent.isValid());
@@ -613,7 +613,7 @@ void AsyncResultsMerger::scheduleKillCursors_inlock(OperationContext* txn) {
BSONObj cmdObj = KillCursorsRequest(_params->nsString, {*remote.cursorId}).toBSON();
executor::RemoteCommandRequest request(
- remote.getTargetHost(), _params->nsString.db().toString(), cmdObj, txn);
+ remote.getTargetHost(), _params->nsString.db().toString(), cmdObj, opCtx);
_executor->scheduleRemoteCommand(
request,
@@ -627,7 +627,7 @@ void AsyncResultsMerger::handleKillCursorsResponse(
// We just ignore any killCursors command responses.
}
-executor::TaskExecutor::EventHandle AsyncResultsMerger::kill(OperationContext* txn) {
+executor::TaskExecutor::EventHandle AsyncResultsMerger::kill(OperationContext* opCtx) {
stdx::lock_guard<stdx::mutex> lk(_mutex);
if (_killCursorsScheduledEvent.isValid()) {
invariant(_lifecycleState != kAlive);
@@ -653,7 +653,7 @@ executor::TaskExecutor::EventHandle AsyncResultsMerger::kill(OperationContext* t
// remotes now. Otherwise, we have to wait until all responses are back, and then we can kill
// the remote cursors.
if (!haveOutstandingBatchRequests_inlock()) {
- scheduleKillCursors_inlock(txn);
+ scheduleKillCursors_inlock(opCtx);
_lifecycleState = kKillComplete;
_executor->signalEvent(_killCursorsScheduledEvent);
}