summaryrefslogtreecommitdiff
path: root/src/mongo/s/query
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/s/query')
-rw-r--r--src/mongo/s/query/async_results_merger.cpp24
-rw-r--r--src/mongo/s/query/async_results_merger.h10
-rw-r--r--src/mongo/s/query/cluster_client_cursor.h4
-rw-r--r--src/mongo/s/query/cluster_client_cursor_impl.cpp18
-rw-r--r--src/mongo/s/query/cluster_client_cursor_impl.h10
-rw-r--r--src/mongo/s/query/cluster_client_cursor_mock.cpp4
-rw-r--r--src/mongo/s/query/cluster_client_cursor_mock.h4
-rw-r--r--src/mongo/s/query/cluster_cursor_manager.cpp10
-rw-r--r--src/mongo/s/query/cluster_cursor_manager.h6
-rw-r--r--src/mongo/s/query/cluster_find.cpp41
-rw-r--r--src/mongo/s/query/cluster_find.h4
-rw-r--r--src/mongo/s/query/router_exec_stage.h4
-rw-r--r--src/mongo/s/query/router_stage_limit.cpp8
-rw-r--r--src/mongo/s/query/router_stage_limit.h4
-rw-r--r--src/mongo/s/query/router_stage_merge.cpp8
-rw-r--r--src/mongo/s/query/router_stage_merge.h4
-rw-r--r--src/mongo/s/query/router_stage_mock.cpp4
-rw-r--r--src/mongo/s/query/router_stage_mock.h4
-rw-r--r--src/mongo/s/query/router_stage_remove_sortkey.cpp8
-rw-r--r--src/mongo/s/query/router_stage_remove_sortkey.h4
-rw-r--r--src/mongo/s/query/router_stage_skip.cpp10
-rw-r--r--src/mongo/s/query/router_stage_skip.h4
-rw-r--r--src/mongo/s/query/store_possible_cursor.cpp6
-rw-r--r--src/mongo/s/query/store_possible_cursor.h2
24 files changed, 105 insertions, 100 deletions
diff --git a/src/mongo/s/query/async_results_merger.cpp b/src/mongo/s/query/async_results_merger.cpp
index afcf5257db0..614002978f1 100644
--- a/src/mongo/s/query/async_results_merger.cpp
+++ b/src/mongo/s/query/async_results_merger.cpp
@@ -253,7 +253,7 @@ ClusterQueryResult AsyncResultsMerger::nextReadyUnsorted() {
return {};
}
-Status AsyncResultsMerger::askForNextBatch_inlock(OperationContext* txn, size_t remoteIndex) {
+Status AsyncResultsMerger::askForNextBatch_inlock(OperationContext* opCtx, size_t remoteIndex) {
auto& remote = _remotes[remoteIndex];
invariant(!remote.cbHandle.isValid());
@@ -291,14 +291,14 @@ Status AsyncResultsMerger::askForNextBatch_inlock(OperationContext* txn, size_t
}
executor::RemoteCommandRequest request(
- remote.getTargetHost(), _params->nsString.db().toString(), cmdObj, _metadataObj, txn);
+ remote.getTargetHost(), _params->nsString.db().toString(), cmdObj, _metadataObj, opCtx);
auto callbackStatus =
_executor->scheduleRemoteCommand(request,
stdx::bind(&AsyncResultsMerger::handleBatchResponse,
this,
stdx::placeholders::_1,
- txn,
+ opCtx,
remoteIndex));
if (!callbackStatus.isOK()) {
return callbackStatus.getStatus();
@@ -317,7 +317,7 @@ Status AsyncResultsMerger::askForNextBatch_inlock(OperationContext* txn, size_t
* 3. Remotes that reached maximum retries will be in 'exhausted' state.
*/
StatusWith<executor::TaskExecutor::EventHandle> AsyncResultsMerger::nextEvent(
- OperationContext* txn) {
+ OperationContext* opCtx) {
stdx::lock_guard<stdx::mutex> lk(_mutex);
if (_lifecycleState != kAlive) {
@@ -345,7 +345,7 @@ StatusWith<executor::TaskExecutor::EventHandle> AsyncResultsMerger::nextEvent(
// If we already have established a cursor with this remote, and there is no outstanding
// request for which we have a valid callback handle, then schedule work to retrieve the
// next batch.
- auto nextBatchStatus = askForNextBatch_inlock(txn, i);
+ auto nextBatchStatus = askForNextBatch_inlock(opCtx, i);
if (!nextBatchStatus.isOK()) {
return nextBatchStatus;
}
@@ -391,7 +391,7 @@ StatusWith<CursorResponse> AsyncResultsMerger::parseCursorResponse(const BSONObj
void AsyncResultsMerger::handleBatchResponse(
const executor::TaskExecutor::RemoteCommandCallbackArgs& cbData,
- OperationContext* txn,
+ OperationContext* opCtx,
size_t remoteIndex) {
stdx::lock_guard<stdx::mutex> lk(_mutex);
@@ -426,7 +426,7 @@ void AsyncResultsMerger::handleBatchResponse(
// If the event handle is invalid, then the executor is in the middle of shutting down,
// and we can't schedule any more work for it to complete.
if (_killCursorsScheduledEvent.isValid()) {
- scheduleKillCursors_inlock(txn);
+ scheduleKillCursors_inlock(opCtx);
_executor->signalEvent(_killCursorsScheduledEvent);
}
@@ -571,7 +571,7 @@ void AsyncResultsMerger::handleBatchResponse(
// We do not ask for the next batch if the cursor is tailable, as batches received from remote
// tailable cursors should be passed through to the client without asking for more batches.
if (!_params->isTailable && !remote.hasNext() && !remote.exhausted()) {
- remote.status = askForNextBatch_inlock(txn, remoteIndex);
+ remote.status = askForNextBatch_inlock(opCtx, remoteIndex);
if (!remote.status.isOK()) {
return;
}
@@ -602,7 +602,7 @@ bool AsyncResultsMerger::haveOutstandingBatchRequests_inlock() {
return false;
}
-void AsyncResultsMerger::scheduleKillCursors_inlock(OperationContext* txn) {
+void AsyncResultsMerger::scheduleKillCursors_inlock(OperationContext* opCtx) {
invariant(_lifecycleState == kKillStarted);
invariant(_killCursorsScheduledEvent.isValid());
@@ -613,7 +613,7 @@ void AsyncResultsMerger::scheduleKillCursors_inlock(OperationContext* txn) {
BSONObj cmdObj = KillCursorsRequest(_params->nsString, {*remote.cursorId}).toBSON();
executor::RemoteCommandRequest request(
- remote.getTargetHost(), _params->nsString.db().toString(), cmdObj, txn);
+ remote.getTargetHost(), _params->nsString.db().toString(), cmdObj, opCtx);
_executor->scheduleRemoteCommand(
request,
@@ -627,7 +627,7 @@ void AsyncResultsMerger::handleKillCursorsResponse(
// We just ignore any killCursors command responses.
}
-executor::TaskExecutor::EventHandle AsyncResultsMerger::kill(OperationContext* txn) {
+executor::TaskExecutor::EventHandle AsyncResultsMerger::kill(OperationContext* opCtx) {
stdx::lock_guard<stdx::mutex> lk(_mutex);
if (_killCursorsScheduledEvent.isValid()) {
invariant(_lifecycleState != kAlive);
@@ -653,7 +653,7 @@ executor::TaskExecutor::EventHandle AsyncResultsMerger::kill(OperationContext* t
// remotes now. Otherwise, we have to wait until all responses are back, and then we can kill
// the remote cursors.
if (!haveOutstandingBatchRequests_inlock()) {
- scheduleKillCursors_inlock(txn);
+ scheduleKillCursors_inlock(opCtx);
_lifecycleState = kKillComplete;
_executor->signalEvent(_killCursorsScheduledEvent);
}
diff --git a/src/mongo/s/query/async_results_merger.h b/src/mongo/s/query/async_results_merger.h
index 3252a22bf0e..e6766a4faa3 100644
--- a/src/mongo/s/query/async_results_merger.h
+++ b/src/mongo/s/query/async_results_merger.h
@@ -154,7 +154,7 @@ public:
* the caller should call nextEvent() to retry the request on the hosts that errored. If
* ready() is true, then either the error was not retriable or it has exhausted max retries.
*/
- StatusWith<executor::TaskExecutor::EventHandle> nextEvent(OperationContext* txn);
+ StatusWith<executor::TaskExecutor::EventHandle> nextEvent(OperationContext* opCtx);
/**
* Starts shutting down this ARM. Returns a handle to an event which is signaled when this
@@ -169,7 +169,7 @@ public:
*
* May be called multiple times (idempotent).
*/
- executor::TaskExecutor::EventHandle kill(OperationContext* txn);
+ executor::TaskExecutor::EventHandle kill(OperationContext* opCtx);
private:
/**
@@ -291,7 +291,7 @@ private:
*
* Returns success if the command to retrieve the next batch was scheduled successfully.
*/
- Status askForNextBatch_inlock(OperationContext* txn, size_t remoteIndex);
+ Status askForNextBatch_inlock(OperationContext* opCtx, size_t remoteIndex);
/**
* Checks whether or not the remote cursors are all exhausted.
@@ -322,7 +322,7 @@ private:
* buffered.
*/
void handleBatchResponse(const executor::TaskExecutor::RemoteCommandCallbackArgs& cbData,
- OperationContext* txn,
+ OperationContext* opCtx,
size_t remoteIndex);
/**
@@ -342,7 +342,7 @@ private:
/**
* Schedules a killCursors command to be run on all remote hosts that have open cursors.
*/
- void scheduleKillCursors_inlock(OperationContext* txn);
+ void scheduleKillCursors_inlock(OperationContext* opCtx);
// Not owned here.
executor::TaskExecutor* _executor;
diff --git a/src/mongo/s/query/cluster_client_cursor.h b/src/mongo/s/query/cluster_client_cursor.h
index bea7bbba7aa..bd34689e62f 100644
--- a/src/mongo/s/query/cluster_client_cursor.h
+++ b/src/mongo/s/query/cluster_client_cursor.h
@@ -64,7 +64,7 @@ public:
*
* A non-ok status is returned in case of any error.
*/
- virtual StatusWith<ClusterQueryResult> next(OperationContext* txn) = 0;
+ virtual StatusWith<ClusterQueryResult> next(OperationContext* opCtx) = 0;
/**
* Must be called before destruction to abandon a not-yet-exhausted cursor. If next() has
@@ -72,7 +72,7 @@ public:
*
* May block waiting for responses from remote hosts.
*/
- virtual void kill(OperationContext* txn) = 0;
+ virtual void kill(OperationContext* opCtx) = 0;
/**
* Returns whether or not this cursor is tailing a capped collection on a shard.
diff --git a/src/mongo/s/query/cluster_client_cursor_impl.cpp b/src/mongo/s/query/cluster_client_cursor_impl.cpp
index 9f3157651b8..24ffc0b220a 100644
--- a/src/mongo/s/query/cluster_client_cursor_impl.cpp
+++ b/src/mongo/s/query/cluster_client_cursor_impl.cpp
@@ -41,13 +41,13 @@
namespace mongo {
-ClusterClientCursorGuard::ClusterClientCursorGuard(OperationContext* txn,
+ClusterClientCursorGuard::ClusterClientCursorGuard(OperationContext* opCtx,
std::unique_ptr<ClusterClientCursor> ccc)
- : _txn(txn), _ccc(std::move(ccc)) {}
+ : _opCtx(opCtx), _ccc(std::move(ccc)) {}
ClusterClientCursorGuard::~ClusterClientCursorGuard() {
if (_ccc && !_ccc->remotesExhausted()) {
- _ccc->kill(_txn);
+ _ccc->kill(_opCtx);
}
}
@@ -59,12 +59,12 @@ std::unique_ptr<ClusterClientCursor> ClusterClientCursorGuard::releaseCursor() {
return std::move(_ccc);
}
-ClusterClientCursorGuard ClusterClientCursorImpl::make(OperationContext* txn,
+ClusterClientCursorGuard ClusterClientCursorImpl::make(OperationContext* opCtx,
executor::TaskExecutor* executor,
ClusterClientCursorParams&& params) {
std::unique_ptr<ClusterClientCursor> cursor(
new ClusterClientCursorImpl(executor, std::move(params)));
- return ClusterClientCursorGuard(txn, std::move(cursor));
+ return ClusterClientCursorGuard(opCtx, std::move(cursor));
}
ClusterClientCursorImpl::ClusterClientCursorImpl(executor::TaskExecutor* executor,
@@ -75,7 +75,7 @@ ClusterClientCursorImpl::ClusterClientCursorImpl(std::unique_ptr<RouterStageMock
ClusterClientCursorParams&& params)
: _params(std::move(params)), _root(std::move(root)) {}
-StatusWith<ClusterQueryResult> ClusterClientCursorImpl::next(OperationContext* txn) {
+StatusWith<ClusterQueryResult> ClusterClientCursorImpl::next(OperationContext* opCtx) {
// First return stashed results, if there are any.
if (!_stash.empty()) {
auto front = std::move(_stash.front());
@@ -84,15 +84,15 @@ StatusWith<ClusterQueryResult> ClusterClientCursorImpl::next(OperationContext* t
return {front};
}
- auto next = _root->next(txn);
+ auto next = _root->next(opCtx);
if (next.isOK() && !next.getValue().isEOF()) {
++_numReturnedSoFar;
}
return next;
}
-void ClusterClientCursorImpl::kill(OperationContext* txn) {
- _root->kill(txn);
+void ClusterClientCursorImpl::kill(OperationContext* opCtx) {
+ _root->kill(opCtx);
}
bool ClusterClientCursorImpl::isTailable() const {
diff --git a/src/mongo/s/query/cluster_client_cursor_impl.h b/src/mongo/s/query/cluster_client_cursor_impl.h
index 929cf655849..de4e09d0950 100644
--- a/src/mongo/s/query/cluster_client_cursor_impl.h
+++ b/src/mongo/s/query/cluster_client_cursor_impl.h
@@ -50,7 +50,7 @@ class ClusterClientCursorGuard final {
MONGO_DISALLOW_COPYING(ClusterClientCursorGuard);
public:
- ClusterClientCursorGuard(OperationContext* txn, std::unique_ptr<ClusterClientCursor> ccc);
+ ClusterClientCursorGuard(OperationContext* opCtx, std::unique_ptr<ClusterClientCursor> ccc);
/**
* If a cursor is owned, safely destroys the cursor, cleaning up remote cursor state if
@@ -74,7 +74,7 @@ public:
std::unique_ptr<ClusterClientCursor> releaseCursor();
private:
- OperationContext* _txn;
+ OperationContext* _opCtx;
std::unique_ptr<ClusterClientCursor> _ccc;
};
@@ -85,7 +85,7 @@ public:
/**
* Constructs a CCC whose safe cleanup is ensured by an RAII object.
*/
- static ClusterClientCursorGuard make(OperationContext* txn,
+ static ClusterClientCursorGuard make(OperationContext* opCtx,
executor::TaskExecutor* executor,
ClusterClientCursorParams&& params);
@@ -95,9 +95,9 @@ public:
ClusterClientCursorImpl(std::unique_ptr<RouterStageMock> root,
ClusterClientCursorParams&& params);
- StatusWith<ClusterQueryResult> next(OperationContext* txn) final;
+ StatusWith<ClusterQueryResult> next(OperationContext* opCtx) final;
- void kill(OperationContext* txn) final;
+ void kill(OperationContext* opCtx) final;
bool isTailable() const final;
diff --git a/src/mongo/s/query/cluster_client_cursor_mock.cpp b/src/mongo/s/query/cluster_client_cursor_mock.cpp
index 533773742e7..28a4f2643f3 100644
--- a/src/mongo/s/query/cluster_client_cursor_mock.cpp
+++ b/src/mongo/s/query/cluster_client_cursor_mock.cpp
@@ -43,7 +43,7 @@ ClusterClientCursorMock::~ClusterClientCursorMock() {
invariant(_exhausted || _killed);
}
-StatusWith<ClusterQueryResult> ClusterClientCursorMock::next(OperationContext* txn) {
+StatusWith<ClusterQueryResult> ClusterClientCursorMock::next(OperationContext* opCtx) {
invariant(!_killed);
if (_resultsQueue.empty()) {
@@ -66,7 +66,7 @@ long long ClusterClientCursorMock::getNumReturnedSoFar() const {
return _numReturnedSoFar;
}
-void ClusterClientCursorMock::kill(OperationContext* txn) {
+void ClusterClientCursorMock::kill(OperationContext* opCtx) {
_killed = true;
if (_killCallback) {
_killCallback();
diff --git a/src/mongo/s/query/cluster_client_cursor_mock.h b/src/mongo/s/query/cluster_client_cursor_mock.h
index 7011911ce67..baea6660535 100644
--- a/src/mongo/s/query/cluster_client_cursor_mock.h
+++ b/src/mongo/s/query/cluster_client_cursor_mock.h
@@ -43,9 +43,9 @@ public:
~ClusterClientCursorMock();
- StatusWith<ClusterQueryResult> next(OperationContext* txn) final;
+ StatusWith<ClusterQueryResult> next(OperationContext* opCtx) final;
- void kill(OperationContext* txn) final;
+ void kill(OperationContext* opCtx) final;
bool isTailable() const final;
diff --git a/src/mongo/s/query/cluster_cursor_manager.cpp b/src/mongo/s/query/cluster_cursor_manager.cpp
index 2b4e68ac2cf..85d396490c6 100644
--- a/src/mongo/s/query/cluster_cursor_manager.cpp
+++ b/src/mongo/s/query/cluster_cursor_manager.cpp
@@ -110,9 +110,9 @@ ClusterCursorManager::PinnedCursor& ClusterCursorManager::PinnedCursor::operator
return *this;
}
-StatusWith<ClusterQueryResult> ClusterCursorManager::PinnedCursor::next(OperationContext* txn) {
+StatusWith<ClusterQueryResult> ClusterCursorManager::PinnedCursor::next(OperationContext* opCtx) {
invariant(_cursor);
- return _cursor->next(txn);
+ return _cursor->next(opCtx);
}
bool ClusterCursorManager::PinnedCursor::isTailable() const {
@@ -187,7 +187,7 @@ void ClusterCursorManager::shutdown() {
}
StatusWith<CursorId> ClusterCursorManager::registerCursor(
- OperationContext* txn,
+ OperationContext* opCtx,
std::unique_ptr<ClusterClientCursor> cursor,
const NamespaceString& nss,
CursorType cursorType,
@@ -199,7 +199,7 @@ StatusWith<CursorId> ClusterCursorManager::registerCursor(
if (_inShutdown) {
lk.unlock();
- cursor->kill(txn);
+ cursor->kill(opCtx);
return Status(ErrorCodes::ShutdownInProgress,
"Cannot register new cursors as we are in the process of shutting down");
}
@@ -246,7 +246,7 @@ StatusWith<CursorId> ClusterCursorManager::registerCursor(
}
StatusWith<ClusterCursorManager::PinnedCursor> ClusterCursorManager::checkOutCursor(
- const NamespaceString& nss, CursorId cursorId, OperationContext* txn) {
+ const NamespaceString& nss, CursorId cursorId, OperationContext* opCtx) {
// Read the clock out of the lock.
const auto now = _clockSource->now();
diff --git a/src/mongo/s/query/cluster_cursor_manager.h b/src/mongo/s/query/cluster_cursor_manager.h
index 6126ef0757e..ad320452b3b 100644
--- a/src/mongo/s/query/cluster_cursor_manager.h
+++ b/src/mongo/s/query/cluster_cursor_manager.h
@@ -154,7 +154,7 @@ public:
*
* Can block.
*/
- StatusWith<ClusterQueryResult> next(OperationContext* txn);
+ StatusWith<ClusterQueryResult> next(OperationContext* opCtx);
/**
* Returns whether or not the underlying cursor is tailing a capped collection. Cannot be
@@ -261,7 +261,7 @@ public:
*
* Does not block.
*/
- StatusWith<CursorId> registerCursor(OperationContext* txn,
+ StatusWith<CursorId> registerCursor(OperationContext* opCtx,
std::unique_ptr<ClusterClientCursor> cursor,
const NamespaceString& nss,
CursorType cursorType,
@@ -282,7 +282,7 @@ public:
*/
StatusWith<PinnedCursor> checkOutCursor(const NamespaceString& nss,
CursorId cursorId,
- OperationContext* txn);
+ OperationContext* opCtx);
/**
* Informs the manager that the given cursor should be killed. The cursor need not necessarily
diff --git a/src/mongo/s/query/cluster_find.cpp b/src/mongo/s/query/cluster_find.cpp
index c8790a8cf83..d944954635a 100644
--- a/src/mongo/s/query/cluster_find.cpp
+++ b/src/mongo/s/query/cluster_find.cpp
@@ -149,14 +149,14 @@ StatusWith<std::unique_ptr<QueryRequest>> transformQueryForShards(const QueryReq
return std::move(newQR);
}
-StatusWith<CursorId> runQueryWithoutRetrying(OperationContext* txn,
+StatusWith<CursorId> runQueryWithoutRetrying(OperationContext* opCtx,
const CanonicalQuery& query,
const ReadPreferenceSetting& readPref,
ChunkManager* chunkManager,
std::shared_ptr<Shard> primary,
std::vector<BSONObj>* results,
BSONObj* viewDefinition) {
- auto shardRegistry = Grid::get(txn)->shardRegistry();
+ auto shardRegistry = Grid::get(opCtx)->shardRegistry();
// Get the set of shards on which we will run the query.
std::vector<std::shared_ptr<Shard>> shards;
@@ -166,13 +166,13 @@ StatusWith<CursorId> runQueryWithoutRetrying(OperationContext* txn,
invariant(chunkManager);
std::set<ShardId> shardIds;
- chunkManager->getShardIdsForQuery(txn,
+ chunkManager->getShardIdsForQuery(opCtx,
query.getQueryRequest().getFilter(),
query.getQueryRequest().getCollation(),
&shardIds);
for (auto id : shardIds) {
- auto shardStatus = shardRegistry->getShard(txn, id);
+ auto shardStatus = shardRegistry->getShard(opCtx, id);
if (!shardStatus.isOK()) {
return shardStatus.getStatus();
}
@@ -231,12 +231,12 @@ StatusWith<CursorId> runQueryWithoutRetrying(OperationContext* txn,
}
auto ccc = ClusterClientCursorImpl::make(
- txn, Grid::get(txn)->getExecutorPool()->getArbitraryExecutor(), std::move(params));
+ opCtx, Grid::get(opCtx)->getExecutorPool()->getArbitraryExecutor(), std::move(params));
auto cursorState = ClusterCursorManager::CursorState::NotExhausted;
int bytesBuffered = 0;
while (!FindCommon::enoughForFirstBatch(query.getQueryRequest(), results->size())) {
- auto next = ccc->next(txn);
+ auto next = ccc->next(opCtx);
if (!next.isOK()) {
if (viewDefinition &&
@@ -289,21 +289,21 @@ StatusWith<CursorId> runQueryWithoutRetrying(OperationContext* txn,
}
// Register the cursor with the cursor manager.
- auto cursorManager = Grid::get(txn)->getCursorManager();
+ auto cursorManager = Grid::get(opCtx)->getCursorManager();
const auto cursorType = chunkManager ? ClusterCursorManager::CursorType::NamespaceSharded
: ClusterCursorManager::CursorType::NamespaceNotSharded;
const auto cursorLifetime = query.getQueryRequest().isNoCursorTimeout()
? ClusterCursorManager::CursorLifetime::Immortal
: ClusterCursorManager::CursorLifetime::Mortal;
return cursorManager->registerCursor(
- txn, ccc.releaseCursor(), query.nss(), cursorType, cursorLifetime);
+ opCtx, ccc.releaseCursor(), query.nss(), cursorType, cursorLifetime);
}
} // namespace
const size_t ClusterFind::kMaxStaleConfigRetries = 10;
-StatusWith<CursorId> ClusterFind::runQuery(OperationContext* txn,
+StatusWith<CursorId> ClusterFind::runQuery(OperationContext* opCtx,
const CanonicalQuery& query,
const ReadPreferenceSetting& readPref,
std::vector<BSONObj>* results,
@@ -322,7 +322,7 @@ StatusWith<CursorId> ClusterFind::runQuery(OperationContext* txn,
// Re-target and re-send the initial find command to the shards until we have established the
// shard version.
for (size_t retries = 1; retries <= kMaxStaleConfigRetries; ++retries) {
- auto scopedCMStatus = ScopedChunkManager::get(txn, query.nss());
+ auto scopedCMStatus = ScopedChunkManager::get(opCtx, query.nss());
if (scopedCMStatus == ErrorCodes::NamespaceNotFound) {
// If the database doesn't exist, we successfully return an empty result set without
// creating a cursor.
@@ -333,8 +333,13 @@ StatusWith<CursorId> ClusterFind::runQuery(OperationContext* txn,
const auto& scopedCM = scopedCMStatus.getValue();
- auto cursorId = runQueryWithoutRetrying(
- txn, query, readPref, scopedCM.cm().get(), scopedCM.primary(), results, viewDefinition);
+ auto cursorId = runQueryWithoutRetrying(opCtx,
+ query,
+ readPref,
+ scopedCM.cm().get(),
+ scopedCM.primary(),
+ results,
+ viewDefinition);
if (cursorId.isOK()) {
return cursorId;
}
@@ -353,9 +358,9 @@ StatusWith<CursorId> ClusterFind::runQuery(OperationContext* txn,
<< redact(status);
if (status == ErrorCodes::StaleEpoch) {
- Grid::get(txn)->catalogCache()->invalidate(query.nss().db().toString());
+ Grid::get(opCtx)->catalogCache()->invalidate(query.nss().db().toString());
} else {
- scopedCM.db()->getChunkManagerIfExists(txn, query.nss().ns(), true);
+ scopedCM.db()->getChunkManagerIfExists(opCtx, query.nss().ns(), true);
}
}
@@ -364,11 +369,11 @@ StatusWith<CursorId> ClusterFind::runQuery(OperationContext* txn,
<< " times without successfully establishing shard version."};
}
-StatusWith<CursorResponse> ClusterFind::runGetMore(OperationContext* txn,
+StatusWith<CursorResponse> ClusterFind::runGetMore(OperationContext* opCtx,
const GetMoreRequest& request) {
- auto cursorManager = Grid::get(txn)->getCursorManager();
+ auto cursorManager = Grid::get(opCtx)->getCursorManager();
- auto pinnedCursor = cursorManager->checkOutCursor(request.nss, request.cursorid, txn);
+ auto pinnedCursor = cursorManager->checkOutCursor(request.nss, request.cursorid, opCtx);
if (!pinnedCursor.isOK()) {
return pinnedCursor.getStatus();
}
@@ -391,7 +396,7 @@ StatusWith<CursorResponse> ClusterFind::runGetMore(OperationContext* txn,
long long startingFrom = pinnedCursor.getValue().getNumReturnedSoFar();
auto cursorState = ClusterCursorManager::CursorState::NotExhausted;
while (!FindCommon::enoughForGetMore(batchSize, batch.size())) {
- auto next = pinnedCursor.getValue().next(txn);
+ auto next = pinnedCursor.getValue().next(opCtx);
if (!next.isOK()) {
return next.getStatus();
}
diff --git a/src/mongo/s/query/cluster_find.h b/src/mongo/s/query/cluster_find.h
index 22d7ad89b04..5a011d27958 100644
--- a/src/mongo/s/query/cluster_find.h
+++ b/src/mongo/s/query/cluster_find.h
@@ -66,7 +66,7 @@ public:
* If a CommandOnShardedViewNotSupportedOnMongod error is returned, then 'viewDefinition', if
* not null, will contain a view definition.
*/
- static StatusWith<CursorId> runQuery(OperationContext* txn,
+ static StatusWith<CursorId> runQuery(OperationContext* opCtx,
const CanonicalQuery& query,
const ReadPreferenceSetting& readPref,
std::vector<BSONObj>* results,
@@ -75,7 +75,7 @@ public:
/**
* Executes the getMore request 'request', and on success returns a CursorResponse.
*/
- static StatusWith<CursorResponse> runGetMore(OperationContext* txn,
+ static StatusWith<CursorResponse> runGetMore(OperationContext* opCtx,
const GetMoreRequest& request);
/**
diff --git a/src/mongo/s/query/router_exec_stage.h b/src/mongo/s/query/router_exec_stage.h
index 5fcb6053e58..f6128a53e43 100644
--- a/src/mongo/s/query/router_exec_stage.h
+++ b/src/mongo/s/query/router_exec_stage.h
@@ -66,13 +66,13 @@ public:
* holding on to a subset of the returned results and need to minimize memory usage, call copy()
* on the BSONObjs.
*/
- virtual StatusWith<ClusterQueryResult> next(OperationContext* txn) = 0;
+ virtual StatusWith<ClusterQueryResult> next(OperationContext* opCtx) = 0;
/**
* Must be called before destruction to abandon a not-yet-exhausted plan. May block waiting for
* responses from remote hosts.
*/
- virtual void kill(OperationContext* txn) = 0;
+ virtual void kill(OperationContext* opCtx) = 0;
/**
* Returns whether or not all the remote cursors are exhausted.
diff --git a/src/mongo/s/query/router_stage_limit.cpp b/src/mongo/s/query/router_stage_limit.cpp
index 4a1a428a533..ea90251eef6 100644
--- a/src/mongo/s/query/router_stage_limit.cpp
+++ b/src/mongo/s/query/router_stage_limit.cpp
@@ -39,12 +39,12 @@ RouterStageLimit::RouterStageLimit(std::unique_ptr<RouterExecStage> child, long
invariant(limit > 0);
}
-StatusWith<ClusterQueryResult> RouterStageLimit::next(OperationContext* txn) {
+StatusWith<ClusterQueryResult> RouterStageLimit::next(OperationContext* opCtx) {
if (_returnedSoFar >= _limit) {
return {ClusterQueryResult()};
}
- auto childResult = getChildStage()->next(txn);
+ auto childResult = getChildStage()->next(opCtx);
if (!childResult.isOK()) {
return childResult;
}
@@ -55,8 +55,8 @@ StatusWith<ClusterQueryResult> RouterStageLimit::next(OperationContext* txn) {
return childResult;
}
-void RouterStageLimit::kill(OperationContext* txn) {
- getChildStage()->kill(txn);
+void RouterStageLimit::kill(OperationContext* opCtx) {
+ getChildStage()->kill(opCtx);
}
bool RouterStageLimit::remotesExhausted() {
diff --git a/src/mongo/s/query/router_stage_limit.h b/src/mongo/s/query/router_stage_limit.h
index 29fb85dd458..42223902cc1 100644
--- a/src/mongo/s/query/router_stage_limit.h
+++ b/src/mongo/s/query/router_stage_limit.h
@@ -39,9 +39,9 @@ class RouterStageLimit final : public RouterExecStage {
public:
RouterStageLimit(std::unique_ptr<RouterExecStage> child, long long limit);
- StatusWith<ClusterQueryResult> next(OperationContext* txn) final;
+ StatusWith<ClusterQueryResult> next(OperationContext* opCtx) final;
- void kill(OperationContext* txn) final;
+ void kill(OperationContext* opCtx) final;
bool remotesExhausted() final;
diff --git a/src/mongo/s/query/router_stage_merge.cpp b/src/mongo/s/query/router_stage_merge.cpp
index e66aaf91fc4..90a80e7161b 100644
--- a/src/mongo/s/query/router_stage_merge.cpp
+++ b/src/mongo/s/query/router_stage_merge.cpp
@@ -40,9 +40,9 @@ RouterStageMerge::RouterStageMerge(executor::TaskExecutor* executor,
ClusterClientCursorParams* params)
: _executor(executor), _arm(executor, params) {}
-StatusWith<ClusterQueryResult> RouterStageMerge::next(OperationContext* txn) {
+StatusWith<ClusterQueryResult> RouterStageMerge::next(OperationContext* opCtx) {
while (!_arm.ready()) {
- auto nextEventStatus = _arm.nextEvent(txn);
+ auto nextEventStatus = _arm.nextEvent(opCtx);
if (!nextEventStatus.isOK()) {
return nextEventStatus.getStatus();
}
@@ -55,8 +55,8 @@ StatusWith<ClusterQueryResult> RouterStageMerge::next(OperationContext* txn) {
return _arm.nextReady();
}
-void RouterStageMerge::kill(OperationContext* txn) {
- auto killEvent = _arm.kill(txn);
+void RouterStageMerge::kill(OperationContext* opCtx) {
+ auto killEvent = _arm.kill(opCtx);
if (!killEvent) {
// Mongos is shutting down.
return;
diff --git a/src/mongo/s/query/router_stage_merge.h b/src/mongo/s/query/router_stage_merge.h
index 58a8061355e..428a405b401 100644
--- a/src/mongo/s/query/router_stage_merge.h
+++ b/src/mongo/s/query/router_stage_merge.h
@@ -45,9 +45,9 @@ class RouterStageMerge final : public RouterExecStage {
public:
RouterStageMerge(executor::TaskExecutor* executor, ClusterClientCursorParams* params);
- StatusWith<ClusterQueryResult> next(OperationContext* txn) final;
+ StatusWith<ClusterQueryResult> next(OperationContext* opCtx) final;
- void kill(OperationContext* txn) final;
+ void kill(OperationContext* opCtx) final;
bool remotesExhausted() final;
diff --git a/src/mongo/s/query/router_stage_mock.cpp b/src/mongo/s/query/router_stage_mock.cpp
index c348018fe6f..e134340713a 100644
--- a/src/mongo/s/query/router_stage_mock.cpp
+++ b/src/mongo/s/query/router_stage_mock.cpp
@@ -50,7 +50,7 @@ void RouterStageMock::markRemotesExhausted() {
_remotesExhausted = true;
}
-StatusWith<ClusterQueryResult> RouterStageMock::next(OperationContext* txn) {
+StatusWith<ClusterQueryResult> RouterStageMock::next(OperationContext* opCtx) {
if (_resultsQueue.empty()) {
return {ClusterQueryResult()};
}
@@ -60,7 +60,7 @@ StatusWith<ClusterQueryResult> RouterStageMock::next(OperationContext* txn) {
return out;
}
-void RouterStageMock::kill(OperationContext* txn) {
+void RouterStageMock::kill(OperationContext* opCtx) {
// No child to kill.
}
diff --git a/src/mongo/s/query/router_stage_mock.h b/src/mongo/s/query/router_stage_mock.h
index dce077d8122..7cba32a81f6 100644
--- a/src/mongo/s/query/router_stage_mock.h
+++ b/src/mongo/s/query/router_stage_mock.h
@@ -44,9 +44,9 @@ class RouterStageMock final : public RouterExecStage {
public:
~RouterStageMock() final {}
- StatusWith<ClusterQueryResult> next(OperationContext* txn) final;
+ StatusWith<ClusterQueryResult> next(OperationContext* opCtx) final;
- void kill(OperationContext* txn) final;
+ void kill(OperationContext* opCtx) final;
bool remotesExhausted() final;
diff --git a/src/mongo/s/query/router_stage_remove_sortkey.cpp b/src/mongo/s/query/router_stage_remove_sortkey.cpp
index 9c58e489b13..9cb1e4d26c9 100644
--- a/src/mongo/s/query/router_stage_remove_sortkey.cpp
+++ b/src/mongo/s/query/router_stage_remove_sortkey.cpp
@@ -41,8 +41,8 @@ namespace mongo {
RouterStageRemoveSortKey::RouterStageRemoveSortKey(std::unique_ptr<RouterExecStage> child)
: RouterExecStage(std::move(child)) {}
-StatusWith<ClusterQueryResult> RouterStageRemoveSortKey::next(OperationContext* txn) {
- auto childResult = getChildStage()->next(txn);
+StatusWith<ClusterQueryResult> RouterStageRemoveSortKey::next(OperationContext* opCtx) {
+ auto childResult = getChildStage()->next(opCtx);
if (!childResult.isOK() || !childResult.getValue().getResult()) {
return childResult;
}
@@ -59,8 +59,8 @@ StatusWith<ClusterQueryResult> RouterStageRemoveSortKey::next(OperationContext*
return {builder.obj()};
}
-void RouterStageRemoveSortKey::kill(OperationContext* txn) {
- getChildStage()->kill(txn);
+void RouterStageRemoveSortKey::kill(OperationContext* opCtx) {
+ getChildStage()->kill(opCtx);
}
bool RouterStageRemoveSortKey::remotesExhausted() {
diff --git a/src/mongo/s/query/router_stage_remove_sortkey.h b/src/mongo/s/query/router_stage_remove_sortkey.h
index 291cf01a803..e3599a3e9b0 100644
--- a/src/mongo/s/query/router_stage_remove_sortkey.h
+++ b/src/mongo/s/query/router_stage_remove_sortkey.h
@@ -41,9 +41,9 @@ class RouterStageRemoveSortKey final : public RouterExecStage {
public:
RouterStageRemoveSortKey(std::unique_ptr<RouterExecStage> child);
- StatusWith<ClusterQueryResult> next(OperationContext* txn) final;
+ StatusWith<ClusterQueryResult> next(OperationContext* opCtx) final;
- void kill(OperationContext* txn) final;
+ void kill(OperationContext* opCtx) final;
bool remotesExhausted() final;
diff --git a/src/mongo/s/query/router_stage_skip.cpp b/src/mongo/s/query/router_stage_skip.cpp
index a6bec5c8733..6763ca5808b 100644
--- a/src/mongo/s/query/router_stage_skip.cpp
+++ b/src/mongo/s/query/router_stage_skip.cpp
@@ -39,9 +39,9 @@ RouterStageSkip::RouterStageSkip(std::unique_ptr<RouterExecStage> child, long lo
invariant(skip > 0);
}
-StatusWith<ClusterQueryResult> RouterStageSkip::next(OperationContext* txn) {
+StatusWith<ClusterQueryResult> RouterStageSkip::next(OperationContext* opCtx) {
while (_skippedSoFar < _skip) {
- auto next = getChildStage()->next(txn);
+ auto next = getChildStage()->next(opCtx);
if (!next.isOK()) {
return next;
}
@@ -53,11 +53,11 @@ StatusWith<ClusterQueryResult> RouterStageSkip::next(OperationContext* txn) {
++_skippedSoFar;
}
- return getChildStage()->next(txn);
+ return getChildStage()->next(opCtx);
}
-void RouterStageSkip::kill(OperationContext* txn) {
- getChildStage()->kill(txn);
+void RouterStageSkip::kill(OperationContext* opCtx) {
+ getChildStage()->kill(opCtx);
}
bool RouterStageSkip::remotesExhausted() {
diff --git a/src/mongo/s/query/router_stage_skip.h b/src/mongo/s/query/router_stage_skip.h
index c949271f79e..773220d4fe6 100644
--- a/src/mongo/s/query/router_stage_skip.h
+++ b/src/mongo/s/query/router_stage_skip.h
@@ -39,9 +39,9 @@ class RouterStageSkip final : public RouterExecStage {
public:
RouterStageSkip(std::unique_ptr<RouterExecStage> child, long long skip);
- StatusWith<ClusterQueryResult> next(OperationContext* txn) final;
+ StatusWith<ClusterQueryResult> next(OperationContext* opCtx) final;
- void kill(OperationContext* txn) final;
+ void kill(OperationContext* opCtx) final;
bool remotesExhausted() final;
diff --git a/src/mongo/s/query/store_possible_cursor.cpp b/src/mongo/s/query/store_possible_cursor.cpp
index 1e3b7d03306..8647871b6a7 100644
--- a/src/mongo/s/query/store_possible_cursor.cpp
+++ b/src/mongo/s/query/store_possible_cursor.cpp
@@ -39,7 +39,7 @@
namespace mongo {
-StatusWith<BSONObj> storePossibleCursor(OperationContext* txn,
+StatusWith<BSONObj> storePossibleCursor(OperationContext* opCtx,
const HostAndPort& server,
const BSONObj& cmdResult,
const NamespaceString& requestedNss,
@@ -62,10 +62,10 @@ StatusWith<BSONObj> storePossibleCursor(OperationContext* txn,
params.remotes.emplace_back(server, incomingCursorResponse.getValue().getCursorId());
- auto ccc = ClusterClientCursorImpl::make(txn, executor, std::move(params));
+ auto ccc = ClusterClientCursorImpl::make(opCtx, executor, std::move(params));
auto clusterCursorId =
- cursorManager->registerCursor(txn,
+ cursorManager->registerCursor(opCtx,
ccc.releaseCursor(),
requestedNss,
ClusterCursorManager::CursorType::NamespaceNotSharded,
diff --git a/src/mongo/s/query/store_possible_cursor.h b/src/mongo/s/query/store_possible_cursor.h
index f06c959b41c..03d61ac4d33 100644
--- a/src/mongo/s/query/store_possible_cursor.h
+++ b/src/mongo/s/query/store_possible_cursor.h
@@ -57,7 +57,7 @@ class TaskExecutor;
* BSONObj response document describing the newly-created cursor, which is suitable for returning to
* the client.
*/
-StatusWith<BSONObj> storePossibleCursor(OperationContext* txn,
+StatusWith<BSONObj> storePossibleCursor(OperationContext* opCtx,
const HostAndPort& server,
const BSONObj& cmdResult,
const NamespaceString& requestedNss,