diff options
-rw-r--r-- | jstests/noPassthrough/read_concern_snapshot_yielding.js | 192 | ||||
-rw-r--r-- | src/mongo/db/commands/find_cmd.cpp | 14 | ||||
-rw-r--r-- | src/mongo/db/exec/cached_plan.cpp | 4 | ||||
-rw-r--r-- | src/mongo/db/exec/multi_plan.cpp | 4 | ||||
-rw-r--r-- | src/mongo/db/query/get_executor.cpp | 3 | ||||
-rw-r--r-- | src/mongo/db/query/mock_yield_policies.h | 16 | ||||
-rw-r--r-- | src/mongo/db/query/plan_executor.cpp | 11 | ||||
-rw-r--r-- | src/mongo/db/query/plan_executor.h | 3 | ||||
-rw-r--r-- | src/mongo/db/query/plan_yield_policy.cpp | 35 | ||||
-rw-r--r-- | src/mongo/db/query/plan_yield_policy.h | 41 |
10 files changed, 284 insertions, 39 deletions
diff --git a/jstests/noPassthrough/read_concern_snapshot_yielding.js b/jstests/noPassthrough/read_concern_snapshot_yielding.js new file mode 100644 index 00000000000..1b800322f44 --- /dev/null +++ b/jstests/noPassthrough/read_concern_snapshot_yielding.js @@ -0,0 +1,192 @@ +// Test that the read concern level 'snapshot' exhibits the correct yielding behavior for find and +// getMore. That is, snapshot reads check for interrupt but do not yield locks. +(function() { + "use strict"; + + load("jstests/libs/profiler.js"); // For getLatestProfilerEntry. + + const dbName = "test"; + const collName = "coll"; + + const rst = new ReplSetTest({nodes: 1}); + rst.startSet(); + rst.initiate(); + const db = rst.getPrimary().getDB(dbName); + const adminDB = db.getSiblingDB("admin"); + const coll = db.coll; + assert.commandWorked(db.adminCommand({setParameter: 1, internalQueryExecYieldIterations: 1})); + db.setProfilingLevel(2); + + if (!db.serverStatus().storageEngine.supportsSnapshotReadConcern) { + rst.stopSet(); + return; + } + + function waitForOpId(curOpFilter) { + let opId; + assert.soon( + function() { + const res = adminDB + .aggregate([ + {$currentOp: {}}, + {$match: {$and: [{ns: coll.getFullName()}, curOpFilter]}} + ]) + .toArray(); + if (res.length === 1) { + opId = res[0].opid; + return true; + } + return false; + }, + function() { + return "Failed to find operation in $currentOp output: " + + tojson(adminDB.aggregate([{$currentOp: {}}, {$match: {ns: coll.getFullName()}}]) + .toArray()); + }); + return opId; + } + + function assertKillPending(opId) { + const res = + adminDB.aggregate([{$currentOp: {}}, {$match: {ns: coll.getFullName(), opid: opId}}]) + .toArray(); + assert.eq(res.length, + 1, + tojson(adminDB.aggregate([{$currentOp: {}}, {$match: {ns: coll.getFullName()}}]) + .toArray())); + assert(res[0].hasOwnProperty("killPending"), tojson(res)); + assert.eq(true, res[0].killPending, tojson(res)); + } + + function awaitFindFn() { + const session = db.getMongo().startSession({causalConsistency: false}); + const sessionDB = session.getDatabase("test"); + assert.commandWorked(sessionDB.runCommand({ + find: "coll", + filter: {x: 1}, + readConcern: {level: "snapshot"}, + txnNumber: NumberLong(0) + })); + } + + function awaitGetMoreFn() { + const session = db.getMongo().startSession({causalConsistency: false}); + const sessionDB = session.getDatabase("test"); + const cursorId = assert + .commandWorked(sessionDB.runCommand({ + find: "coll", + filter: {x: 1}, + batchSize: 2, + readConcern: {level: "snapshot"}, + txnNumber: NumberLong(0) + })) + .cursor.id; + assert.commandWorked(sessionDB.adminCommand( + {configureFailPoint: "setCheckForInterruptHang", mode: "alwaysOn"})); + assert.commandWorked(sessionDB.runCommand( + {getMore: NumberLong(cursorId), collection: "coll", txnNumber: NumberLong(0)})); + } + + for (let i = 0; i < 4; i++) { + assert.commandWorked(db.coll.insert({_id: i, x: 1})); + } + + // + // Snapshot finds can be killed. + // + + // Start a find command that hangs before checking for interrupt. + assert.commandWorked( + db.adminCommand({configureFailPoint: "setCheckForInterruptHang", mode: "alwaysOn"})); + let awaitFind = startParallelShell(awaitFindFn, rst.ports[0]); + + // Kill the command, and check that it is set to killPending. + let opId = waitForOpId({"command.filter": {x: 1}}); + assert.commandWorked(db.killOp(opId)); + assertKillPending(opId); + + // Remove the hang, and check that the command is killed. + assert.commandWorked( + db.adminCommand({configureFailPoint: "setCheckForInterruptHang", mode: "off"})); + let exitCode = awaitFind({checkExitSuccess: false}); + assert.neq(0, exitCode, "Expected shell to exit with failure due to operation kill"); + + // + // Snapshot getMores can be killed. + // + + // Start a getMore command that hangs before checking for interrupt. + let awaitGetMore = startParallelShell(awaitGetMoreFn, rst.ports[0]); + + // Kill the command, and check that it is set to killPending. + opId = waitForOpId({"originatingCommand.filter": {x: 1}}); + assert.commandWorked(db.killOp(opId)); + assertKillPending(opId); + + // Remove the hang, and check that the command is killed. + assert.commandWorked( + db.adminCommand({configureFailPoint: "setCheckForInterruptHang", mode: "off"})); + exitCode = awaitGetMore({checkExitSuccess: false}); + assert.neq(0, exitCode, "Expected shell to exit with failure due to operation kill"); + + // + // Snapshot finds do not yield locks. + // + + // Start a find command that hangs before checking for interrupt. + assert.commandWorked( + db.adminCommand({configureFailPoint: "setCheckForInterruptHang", mode: "alwaysOn"})); + awaitFind = startParallelShell(awaitFindFn, rst.ports[0]); + waitForOpId({"command.filter": {x: 1}}); + + // Start a drop. This should block behind the find, since the find does not yield locks. + let awaitDrop = startParallelShell(function() { + db.getSiblingDB("test").coll.drop(); + }, rst.ports[0]); + + // Remove the hang. The find should complete successfully. + assert.commandWorked( + db.adminCommand({configureFailPoint: "setCheckForInterruptHang", mode: "off"})); + awaitFind(); + + // Now the drop can complete. + awaitDrop(); + + // Confirm that the find did not yield. + let profilerEntry = getLatestProfilerEntry(db, {op: "query"}); + assert(profilerEntry.hasOwnProperty("numYield"), tojson(profilerEntry)); + assert.eq(0, profilerEntry.numYield, tojson(profilerEntry)); + + // Restore the collection. + for (let i = 0; i < 4; i++) { + assert.commandWorked(db.coll.insert({_id: i, x: 1})); + } + + // + // Snapshot getMores do not yield locks. + // + + // Start a getMore command that hangs before checking for interrupt. + awaitGetMore = startParallelShell(awaitGetMoreFn, rst.ports[0]); + waitForOpId({"originatingCommand.filter": {x: 1}}); + + // Start a drop. This should block behing the getMore, since the getMore does not yield locks. + awaitDrop = startParallelShell(function() { + db.getSiblingDB("test").coll.drop(); + }, rst.ports[0]); + + // Remove the hang. The getMore should complete successfully. + assert.commandWorked( + db.adminCommand({configureFailPoint: "setCheckForInterruptHang", mode: "off"})); + awaitGetMore(); + + // Now the drop can complete. + awaitDrop(); + + // Confirm that the getMore did not yield. + profilerEntry = getLatestProfilerEntry(db, {op: "getmore"}); + assert(profilerEntry.hasOwnProperty("numYield"), tojson(profilerEntry)); + assert.eq(0, profilerEntry.numYield, tojson(profilerEntry)); + + rst.stopSet(); +}()); diff --git a/src/mongo/db/commands/find_cmd.cpp b/src/mongo/db/commands/find_cmd.cpp index c337efc9389..b5830b08a09 100644 --- a/src/mongo/db/commands/find_cmd.cpp +++ b/src/mongo/db/commands/find_cmd.cpp @@ -196,8 +196,13 @@ public: Collection* collection = ctx.getCollection(); // We have a parsed query. Time to get the execution plan for it. + auto readConcernArgs = repl::ReadConcernArgs::get(opCtx); + auto yieldPolicy = + readConcernArgs.getLevel() == repl::ReadConcernLevel::kSnapshotReadConcern + ? PlanExecutor::INTERRUPT_ONLY + : PlanExecutor::YIELD_AUTO; auto statusWithPlanExecutor = - getExecutorFind(opCtx, collection, nss, std::move(cq), PlanExecutor::YIELD_AUTO); + getExecutorFind(opCtx, collection, nss, std::move(cq), yieldPolicy); if (!statusWithPlanExecutor.isOK()) { return statusWithPlanExecutor.getStatus(); } @@ -305,8 +310,13 @@ public: } // Get the execution plan for the query. + auto readConcernArgs = repl::ReadConcernArgs::get(opCtx); + auto yieldPolicy = + readConcernArgs.getLevel() == repl::ReadConcernLevel::kSnapshotReadConcern + ? PlanExecutor::INTERRUPT_ONLY + : PlanExecutor::YIELD_AUTO; auto statusWithPlanExecutor = - getExecutorFind(opCtx, collection, nss, std::move(cq), PlanExecutor::YIELD_AUTO); + getExecutorFind(opCtx, collection, nss, std::move(cq), yieldPolicy); if (!statusWithPlanExecutor.isOK()) { return CommandHelpers::appendCommandStatus(result, statusWithPlanExecutor.getStatus()); } diff --git a/src/mongo/db/exec/cached_plan.cpp b/src/mongo/db/exec/cached_plan.cpp index b1bf5d16cbd..b6d185c4032 100644 --- a/src/mongo/db/exec/cached_plan.cpp +++ b/src/mongo/db/exec/cached_plan.cpp @@ -179,9 +179,9 @@ Status CachedPlanStage::tryYield(PlanYieldPolicy* yieldPolicy) { // 2) some stage requested a yield due to a document fetch, or // 3) we need to yield and retry due to a WriteConflictException. // In all cases, the actual yielding happens here. - if (yieldPolicy->shouldYield()) { + if (yieldPolicy->shouldYieldOrInterrupt()) { // Here's where we yield. - return yieldPolicy->yield(_fetcher.get()); + return yieldPolicy->yieldOrInterrupt(_fetcher.get()); } // We're done using the fetcher, so it should be freed. We don't want to diff --git a/src/mongo/db/exec/multi_plan.cpp b/src/mongo/db/exec/multi_plan.cpp index 34d7e6d5cc8..5dc4bd0b863 100644 --- a/src/mongo/db/exec/multi_plan.cpp +++ b/src/mongo/db/exec/multi_plan.cpp @@ -151,8 +151,8 @@ Status MultiPlanStage::tryYield(PlanYieldPolicy* yieldPolicy) { // 2) some stage requested a yield due to a document fetch, or // 3) we need to yield and retry due to a WriteConflictException. // In all cases, the actual yielding happens here. - if (yieldPolicy->shouldYield()) { - auto yieldStatus = yieldPolicy->yield(_fetcher.get()); + if (yieldPolicy->shouldYieldOrInterrupt()) { + auto yieldStatus = yieldPolicy->yieldOrInterrupt(_fetcher.get()); if (!yieldStatus.isOK()) { _failure = true; diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp index 84ab8649f2f..dd6af8d52cc 100644 --- a/src/mongo/db/query/get_executor.cpp +++ b/src/mongo/db/query/get_executor.cpp @@ -657,8 +657,7 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorFind( if (ShardingState::get(opCtx)->needCollectionMetadata(opCtx, nss.ns())) { plannerOptions |= QueryPlannerParams::INCLUDE_SHARD_FILTER; } - return getExecutor( - opCtx, collection, std::move(canonicalQuery), PlanExecutor::YIELD_AUTO, plannerOptions); + return getExecutor(opCtx, collection, std::move(canonicalQuery), yieldPolicy, plannerOptions); } namespace { diff --git a/src/mongo/db/query/mock_yield_policies.h b/src/mongo/db/query/mock_yield_policies.h index 2a690853799..dc3f5c1ef26 100644 --- a/src/mongo/db/query/mock_yield_policies.h +++ b/src/mongo/db/query/mock_yield_policies.h @@ -45,16 +45,16 @@ public: AlwaysTimeOutYieldPolicy(ClockSource* cs) : PlanYieldPolicy(PlanExecutor::YieldPolicy::ALWAYS_TIME_OUT, cs) {} - bool shouldYield() override { + bool shouldYieldOrInterrupt() override { return true; } - Status yield(RecordFetcher* recordFetcher) override { + Status yieldOrInterrupt(RecordFetcher* recordFetcher) override { return {ErrorCodes::ExceededTimeLimit, "Using AlwaysTimeOutYieldPolicy"}; } - Status yield(stdx::function<void()> beforeYieldingFn, - stdx::function<void()> whileYieldingFn) override { + Status yieldOrInterrupt(stdx::function<void()> beforeYieldingFn, + stdx::function<void()> whileYieldingFn) override { return {ErrorCodes::ExceededTimeLimit, "Using AlwaysTimeOutYieldPolicy"}; } }; @@ -71,16 +71,16 @@ public: AlwaysPlanKilledYieldPolicy(ClockSource* cs) : PlanYieldPolicy(PlanExecutor::YieldPolicy::ALWAYS_MARK_KILLED, cs) {} - bool shouldYield() override { + bool shouldYieldOrInterrupt() override { return true; } - Status yield(RecordFetcher* recordFetcher) override { + Status yieldOrInterrupt(RecordFetcher* recordFetcher) override { return {ErrorCodes::QueryPlanKilled, "Using AlwaysPlanKilledYieldPolicy"}; } - Status yield(stdx::function<void()> beforeYieldingFn, - stdx::function<void()> whileYieldingFn) override { + Status yieldOrInterrupt(stdx::function<void()> beforeYieldingFn, + stdx::function<void()> whileYieldingFn) override { return {ErrorCodes::QueryPlanKilled, "Using AlwaysPlanKilledYieldPolicy"}; } }; diff --git a/src/mongo/db/query/plan_executor.cpp b/src/mongo/db/query/plan_executor.cpp index b2619986a46..b3ad1aa08d0 100644 --- a/src/mongo/db/query/plan_executor.cpp +++ b/src/mongo/db/query/plan_executor.cpp @@ -84,7 +84,8 @@ std::unique_ptr<PlanYieldPolicy> makeYieldPolicy(PlanExecutor* exec, case PlanExecutor::YieldPolicy::YIELD_AUTO: case PlanExecutor::YieldPolicy::YIELD_MANUAL: case PlanExecutor::YieldPolicy::NO_YIELD: - case PlanExecutor::YieldPolicy::WRITE_CONFLICT_RETRY_ONLY: { + case PlanExecutor::YieldPolicy::WRITE_CONFLICT_RETRY_ONLY: + case PlanExecutor::YieldPolicy::INTERRUPT_ONLY: { return stdx::make_unique<PlanYieldPolicy>(exec, policy); } case PlanExecutor::YieldPolicy::ALWAYS_TIME_OUT: { @@ -357,7 +358,7 @@ Status PlanExecutor::restoreState() { throw; // Handles retries by calling restoreStateWithoutRetrying() in a loop. - return _yieldPolicy->yield(); + return _yieldPolicy->yieldOrInterrupt(); } } @@ -469,7 +470,7 @@ PlanExecutor::ExecState PlanExecutor::waitForInserts(CappedInsertNotifierData* n ON_BLOCK_EXIT([curOp] { curOp->resumeTimer(); }); auto opCtx = _opCtx; uint64_t currentNotifierVersion = notifierData->notifier->getVersion(); - auto yieldResult = _yieldPolicy->yield(nullptr, [opCtx, notifierData] { + auto yieldResult = _yieldPolicy->yieldOrInterrupt(nullptr, [opCtx, notifierData] { const auto deadline = awaitDataState(opCtx).waitForInsertsDeadline; notifierData->notifier->waitUntil(notifierData->lastEOFVersion, deadline); }); @@ -537,8 +538,8 @@ PlanExecutor::ExecState PlanExecutor::getNextImpl(Snapshotted<BSONObj>* objOut, // 2) some stage requested a yield due to a document fetch, or // 3) we need to yield and retry due to a WriteConflictException. // In all cases, the actual yielding happens here. - if (_yieldPolicy->shouldYield()) { - auto yieldStatus = _yieldPolicy->yield(fetcher.get()); + if (_yieldPolicy->shouldYieldOrInterrupt()) { + auto yieldStatus = _yieldPolicy->yieldOrInterrupt(fetcher.get()); if (!yieldStatus.isOK()) { if (objOut) { *objOut = Snapshotted<BSONObj>( diff --git a/src/mongo/db/query/plan_executor.h b/src/mongo/db/query/plan_executor.h index c0bd8b8a2c7..9ce62619e2d 100644 --- a/src/mongo/db/query/plan_executor.h +++ b/src/mongo/db/query/plan_executor.h @@ -122,6 +122,9 @@ public: // which is being used to execute an aggregation pipeline. NO_YIELD, + // Will not yield locks or storage engine resources, but will check for interrupt. + INTERRUPT_ONLY, + // Used for testing, this yield policy will cause the PlanExecutor to time out on the first // yield, returning DEAD with an error object encoding a ErrorCodes::ExceededTimeLimit // message. diff --git a/src/mongo/db/query/plan_yield_policy.cpp b/src/mongo/db/query/plan_yield_policy.cpp index 13bded7f1b4..4c3a96282d4 100644 --- a/src/mongo/db/query/plan_yield_policy.cpp +++ b/src/mongo/db/query/plan_yield_policy.cpp @@ -36,11 +36,16 @@ #include "mongo/db/query/query_knobs.h" #include "mongo/db/query/query_yield.h" #include "mongo/db/service_context.h" +#include "mongo/util/fail_point_service.h" #include "mongo/util/scopeguard.h" #include "mongo/util/time_support.h" namespace mongo { +namespace { +MONGO_FP_DECLARE(setCheckForInterruptHang); +} // namespace + PlanYieldPolicy::PlanYieldPolicy(PlanExecutor* exec, PlanExecutor::YieldPolicy policy) : _policy(exec->getOpCtx()->lockState()->isGlobalLockedRecursively() ? PlanExecutor::NO_YIELD : policy), @@ -59,6 +64,13 @@ PlanYieldPolicy::PlanYieldPolicy(PlanExecutor::YieldPolicy policy, ClockSource* Milliseconds(internalQueryExecYieldPeriodMS.load())), _planYielding(nullptr) {} +bool PlanYieldPolicy::shouldYieldOrInterrupt() { + if (_policy == PlanExecutor::INTERRUPT_ONLY) { + return _elapsedTracker.intervalHasElapsed(); + } + return shouldYield(); +} + bool PlanYieldPolicy::shouldYield() { if (!canAutoYield()) return false; @@ -72,15 +84,28 @@ void PlanYieldPolicy::resetTimer() { _elapsedTracker.resetLastTime(); } -Status PlanYieldPolicy::yield(RecordFetcher* recordFetcher) { +Status PlanYieldPolicy::yieldOrInterrupt(RecordFetcher* recordFetcher) { invariant(_planYielding); if (recordFetcher) { OperationContext* opCtx = _planYielding->getOpCtx(); - return yield([recordFetcher, opCtx] { recordFetcher->setup(opCtx); }, - [recordFetcher] { recordFetcher->fetch(); }); + return yieldOrInterrupt([recordFetcher, opCtx] { recordFetcher->setup(opCtx); }, + [recordFetcher] { recordFetcher->fetch(); }); } else { - return yield(nullptr, nullptr); + return yieldOrInterrupt(nullptr, nullptr); + } +} + +Status PlanYieldPolicy::yieldOrInterrupt(stdx::function<void()> beforeYieldingFn, + stdx::function<void()> whileYieldingFn) { + if (_policy == PlanExecutor::INTERRUPT_ONLY) { + ON_BLOCK_EXIT([this]() { resetTimer(); }); + OperationContext* opCtx = _planYielding->getOpCtx(); + invariant(opCtx); + MONGO_FAIL_POINT_PAUSE_WHILE_SET(setCheckForInterruptHang); + return opCtx->checkForInterruptNoAssert(); } + + return yield(beforeYieldingFn, whileYieldingFn); } Status PlanYieldPolicy::yield(stdx::function<void()> beforeYieldingFn, @@ -106,6 +131,8 @@ Status PlanYieldPolicy::yield(stdx::function<void()> beforeYieldingFn, // that it's time to yield. Whether or not we will actually yield, we need to check // if this operation has been interrupted. if (_policy == PlanExecutor::YIELD_AUTO) { + MONGO_FAIL_POINT_PAUSE_WHILE_SET(setCheckForInterruptHang); + auto interruptStatus = opCtx->checkForInterruptNoAssert(); if (!interruptStatus.isOK()) { return interruptStatus; diff --git a/src/mongo/db/query/plan_yield_policy.h b/src/mongo/db/query/plan_yield_policy.h index 154a4b13d74..80be6e11095 100644 --- a/src/mongo/db/query/plan_yield_policy.h +++ b/src/mongo/db/query/plan_yield_policy.h @@ -52,20 +52,22 @@ public: PlanYieldPolicy(PlanExecutor::YieldPolicy policy, ClockSource* cs); /** - * Used by YIELD_AUTO plan executors in order to check whether it is time to yield. - * PlanExecutors give up their locks periodically in order to be fair to other - * threads. + * Periodically returns true to indicate that it is time to check for interrupt (in the case of + * YIELD_AUTO and INTERRUPT_ONLY) or release locks or storage engine state (in the case of + * auto-yielding plans). */ - virtual bool shouldYield(); + virtual bool shouldYieldOrInterrupt(); /** - * Resets the yield timer so that we wait for a while before yielding again. + * Resets the yield timer so that we wait for a while before yielding/interrupting again. */ void resetTimer(); /** - * Used to cause a plan executor to release locks or storage engine state. The PlanExecutor must - * *not* be in saved state. Handles calls to save/restore state internally. + * Used to cause a plan executor to check for interrupt (in the case of YIELD_AUTO and + * INTERRUPT_ONLY) or release locks or storage engine state (in the case of auto-yielding + * plans). The PlanExecutor must *not* be in saved state. Handles calls to save/restore state + * internally. * * If 'fetcher' is non-NULL, then we are yielding because the storage engine told us * that we will page fault on this record. We use 'fetcher' to retrieve the record @@ -75,17 +77,20 @@ public: * ErrorCodes::QueryPlanKilled if the executor got killed during yield, and * ErrorCodes::ExceededTimeLimit if the operation has exceeded the time limit. */ - virtual Status yield(RecordFetcher* fetcher = NULL); + virtual Status yieldOrInterrupt(RecordFetcher* fetcher = nullptr); /** - * More generic version of yield() above. This version calls 'beforeYieldingFn' immediately - * before locks are yielded (if they are), and 'whileYieldingFn' before locks are restored. + * More generic version of yieldOrInterrupt() above. This version calls 'beforeYieldingFn' + * immediately before locks are yielded (if they are), and 'whileYieldingFn' before locks are + * restored. */ - virtual Status yield(stdx::function<void()> beforeYieldingFn, - stdx::function<void()> whileYieldingFn); + virtual Status yieldOrInterrupt(stdx::function<void()> beforeYieldingFn, + stdx::function<void()> whileYieldingFn); /** - * All calls to shouldYield() will return true until the next call to yield. + * All calls to shouldYieldOrInterrupt() will return true until the next call to + * yieldOrInterrupt(). This must only be called for auto-yielding plans, to force a yield. It + * cannot be used to force an interrupt for INTERRUPT_ONLY plans. */ void forceYield() { dassert(canAutoYield()); @@ -105,7 +110,8 @@ public: return true; } case PlanExecutor::NO_YIELD: - case PlanExecutor::WRITE_CONFLICT_RETRY_ONLY: { + case PlanExecutor::WRITE_CONFLICT_RETRY_ONLY: + case PlanExecutor::INTERRUPT_ONLY: { return false; } } @@ -127,6 +133,7 @@ public: } case PlanExecutor::NO_YIELD: case PlanExecutor::YIELD_MANUAL: + case PlanExecutor::INTERRUPT_ONLY: return false; } MONGO_UNREACHABLE; @@ -145,6 +152,12 @@ private: // The plan executor which this yield policy is responsible for yielding. Must // not outlive the plan executor. PlanExecutor* const _planYielding; + + // Returns true to indicate it's time to release locks or storage engine state. + bool shouldYield(); + + // Releases locks or storage engine state. + Status yield(stdx::function<void()> beforeYieldingFn, stdx::function<void()> whileYieldingFn); }; } // namespace mongo |