summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
authorTess Avitabile <tess.avitabile@mongodb.com>2018-02-05 17:21:06 -0500
committerTess Avitabile <tess.avitabile@mongodb.com>2018-02-08 10:31:50 -0500
commitb730e13c75d21ee86fea0622a55410d790a23224 (patch)
tree5a2aeec9dce06a77fbd0070261b1b6eea17ed248 /src/mongo/db
parentc556e377094792e7253a95cb5fedcd703a99bf2c (diff)
downloadmongo-b730e13c75d21ee86fea0622a55410d790a23224.tar.gz
SERVER-32685 Disable lock and WT transaction yielding under readConcern snapshot for find and getMore
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/commands/find_cmd.cpp14
-rw-r--r--src/mongo/db/exec/cached_plan.cpp4
-rw-r--r--src/mongo/db/exec/multi_plan.cpp4
-rw-r--r--src/mongo/db/query/get_executor.cpp3
-rw-r--r--src/mongo/db/query/mock_yield_policies.h16
-rw-r--r--src/mongo/db/query/plan_executor.cpp11
-rw-r--r--src/mongo/db/query/plan_executor.h3
-rw-r--r--src/mongo/db/query/plan_yield_policy.cpp35
-rw-r--r--src/mongo/db/query/plan_yield_policy.h41
9 files changed, 92 insertions, 39 deletions
diff --git a/src/mongo/db/commands/find_cmd.cpp b/src/mongo/db/commands/find_cmd.cpp
index c337efc9389..b5830b08a09 100644
--- a/src/mongo/db/commands/find_cmd.cpp
+++ b/src/mongo/db/commands/find_cmd.cpp
@@ -196,8 +196,13 @@ public:
Collection* collection = ctx.getCollection();
// We have a parsed query. Time to get the execution plan for it.
+ auto readConcernArgs = repl::ReadConcernArgs::get(opCtx);
+ auto yieldPolicy =
+ readConcernArgs.getLevel() == repl::ReadConcernLevel::kSnapshotReadConcern
+ ? PlanExecutor::INTERRUPT_ONLY
+ : PlanExecutor::YIELD_AUTO;
auto statusWithPlanExecutor =
- getExecutorFind(opCtx, collection, nss, std::move(cq), PlanExecutor::YIELD_AUTO);
+ getExecutorFind(opCtx, collection, nss, std::move(cq), yieldPolicy);
if (!statusWithPlanExecutor.isOK()) {
return statusWithPlanExecutor.getStatus();
}
@@ -305,8 +310,13 @@ public:
}
// Get the execution plan for the query.
+ auto readConcernArgs = repl::ReadConcernArgs::get(opCtx);
+ auto yieldPolicy =
+ readConcernArgs.getLevel() == repl::ReadConcernLevel::kSnapshotReadConcern
+ ? PlanExecutor::INTERRUPT_ONLY
+ : PlanExecutor::YIELD_AUTO;
auto statusWithPlanExecutor =
- getExecutorFind(opCtx, collection, nss, std::move(cq), PlanExecutor::YIELD_AUTO);
+ getExecutorFind(opCtx, collection, nss, std::move(cq), yieldPolicy);
if (!statusWithPlanExecutor.isOK()) {
return CommandHelpers::appendCommandStatus(result, statusWithPlanExecutor.getStatus());
}
diff --git a/src/mongo/db/exec/cached_plan.cpp b/src/mongo/db/exec/cached_plan.cpp
index b1bf5d16cbd..b6d185c4032 100644
--- a/src/mongo/db/exec/cached_plan.cpp
+++ b/src/mongo/db/exec/cached_plan.cpp
@@ -179,9 +179,9 @@ Status CachedPlanStage::tryYield(PlanYieldPolicy* yieldPolicy) {
// 2) some stage requested a yield due to a document fetch, or
// 3) we need to yield and retry due to a WriteConflictException.
// In all cases, the actual yielding happens here.
- if (yieldPolicy->shouldYield()) {
+ if (yieldPolicy->shouldYieldOrInterrupt()) {
// Here's where we yield.
- return yieldPolicy->yield(_fetcher.get());
+ return yieldPolicy->yieldOrInterrupt(_fetcher.get());
}
// We're done using the fetcher, so it should be freed. We don't want to
diff --git a/src/mongo/db/exec/multi_plan.cpp b/src/mongo/db/exec/multi_plan.cpp
index 34d7e6d5cc8..5dc4bd0b863 100644
--- a/src/mongo/db/exec/multi_plan.cpp
+++ b/src/mongo/db/exec/multi_plan.cpp
@@ -151,8 +151,8 @@ Status MultiPlanStage::tryYield(PlanYieldPolicy* yieldPolicy) {
// 2) some stage requested a yield due to a document fetch, or
// 3) we need to yield and retry due to a WriteConflictException.
// In all cases, the actual yielding happens here.
- if (yieldPolicy->shouldYield()) {
- auto yieldStatus = yieldPolicy->yield(_fetcher.get());
+ if (yieldPolicy->shouldYieldOrInterrupt()) {
+ auto yieldStatus = yieldPolicy->yieldOrInterrupt(_fetcher.get());
if (!yieldStatus.isOK()) {
_failure = true;
diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp
index 84ab8649f2f..dd6af8d52cc 100644
--- a/src/mongo/db/query/get_executor.cpp
+++ b/src/mongo/db/query/get_executor.cpp
@@ -657,8 +657,7 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorFind(
if (ShardingState::get(opCtx)->needCollectionMetadata(opCtx, nss.ns())) {
plannerOptions |= QueryPlannerParams::INCLUDE_SHARD_FILTER;
}
- return getExecutor(
- opCtx, collection, std::move(canonicalQuery), PlanExecutor::YIELD_AUTO, plannerOptions);
+ return getExecutor(opCtx, collection, std::move(canonicalQuery), yieldPolicy, plannerOptions);
}
namespace {
diff --git a/src/mongo/db/query/mock_yield_policies.h b/src/mongo/db/query/mock_yield_policies.h
index 2a690853799..dc3f5c1ef26 100644
--- a/src/mongo/db/query/mock_yield_policies.h
+++ b/src/mongo/db/query/mock_yield_policies.h
@@ -45,16 +45,16 @@ public:
AlwaysTimeOutYieldPolicy(ClockSource* cs)
: PlanYieldPolicy(PlanExecutor::YieldPolicy::ALWAYS_TIME_OUT, cs) {}
- bool shouldYield() override {
+ bool shouldYieldOrInterrupt() override {
return true;
}
- Status yield(RecordFetcher* recordFetcher) override {
+ Status yieldOrInterrupt(RecordFetcher* recordFetcher) override {
return {ErrorCodes::ExceededTimeLimit, "Using AlwaysTimeOutYieldPolicy"};
}
- Status yield(stdx::function<void()> beforeYieldingFn,
- stdx::function<void()> whileYieldingFn) override {
+ Status yieldOrInterrupt(stdx::function<void()> beforeYieldingFn,
+ stdx::function<void()> whileYieldingFn) override {
return {ErrorCodes::ExceededTimeLimit, "Using AlwaysTimeOutYieldPolicy"};
}
};
@@ -71,16 +71,16 @@ public:
AlwaysPlanKilledYieldPolicy(ClockSource* cs)
: PlanYieldPolicy(PlanExecutor::YieldPolicy::ALWAYS_MARK_KILLED, cs) {}
- bool shouldYield() override {
+ bool shouldYieldOrInterrupt() override {
return true;
}
- Status yield(RecordFetcher* recordFetcher) override {
+ Status yieldOrInterrupt(RecordFetcher* recordFetcher) override {
return {ErrorCodes::QueryPlanKilled, "Using AlwaysPlanKilledYieldPolicy"};
}
- Status yield(stdx::function<void()> beforeYieldingFn,
- stdx::function<void()> whileYieldingFn) override {
+ Status yieldOrInterrupt(stdx::function<void()> beforeYieldingFn,
+ stdx::function<void()> whileYieldingFn) override {
return {ErrorCodes::QueryPlanKilled, "Using AlwaysPlanKilledYieldPolicy"};
}
};
diff --git a/src/mongo/db/query/plan_executor.cpp b/src/mongo/db/query/plan_executor.cpp
index b2619986a46..b3ad1aa08d0 100644
--- a/src/mongo/db/query/plan_executor.cpp
+++ b/src/mongo/db/query/plan_executor.cpp
@@ -84,7 +84,8 @@ std::unique_ptr<PlanYieldPolicy> makeYieldPolicy(PlanExecutor* exec,
case PlanExecutor::YieldPolicy::YIELD_AUTO:
case PlanExecutor::YieldPolicy::YIELD_MANUAL:
case PlanExecutor::YieldPolicy::NO_YIELD:
- case PlanExecutor::YieldPolicy::WRITE_CONFLICT_RETRY_ONLY: {
+ case PlanExecutor::YieldPolicy::WRITE_CONFLICT_RETRY_ONLY:
+ case PlanExecutor::YieldPolicy::INTERRUPT_ONLY: {
return stdx::make_unique<PlanYieldPolicy>(exec, policy);
}
case PlanExecutor::YieldPolicy::ALWAYS_TIME_OUT: {
@@ -357,7 +358,7 @@ Status PlanExecutor::restoreState() {
throw;
// Handles retries by calling restoreStateWithoutRetrying() in a loop.
- return _yieldPolicy->yield();
+ return _yieldPolicy->yieldOrInterrupt();
}
}
@@ -469,7 +470,7 @@ PlanExecutor::ExecState PlanExecutor::waitForInserts(CappedInsertNotifierData* n
ON_BLOCK_EXIT([curOp] { curOp->resumeTimer(); });
auto opCtx = _opCtx;
uint64_t currentNotifierVersion = notifierData->notifier->getVersion();
- auto yieldResult = _yieldPolicy->yield(nullptr, [opCtx, notifierData] {
+ auto yieldResult = _yieldPolicy->yieldOrInterrupt(nullptr, [opCtx, notifierData] {
const auto deadline = awaitDataState(opCtx).waitForInsertsDeadline;
notifierData->notifier->waitUntil(notifierData->lastEOFVersion, deadline);
});
@@ -537,8 +538,8 @@ PlanExecutor::ExecState PlanExecutor::getNextImpl(Snapshotted<BSONObj>* objOut,
// 2) some stage requested a yield due to a document fetch, or
// 3) we need to yield and retry due to a WriteConflictException.
// In all cases, the actual yielding happens here.
- if (_yieldPolicy->shouldYield()) {
- auto yieldStatus = _yieldPolicy->yield(fetcher.get());
+ if (_yieldPolicy->shouldYieldOrInterrupt()) {
+ auto yieldStatus = _yieldPolicy->yieldOrInterrupt(fetcher.get());
if (!yieldStatus.isOK()) {
if (objOut) {
*objOut = Snapshotted<BSONObj>(
diff --git a/src/mongo/db/query/plan_executor.h b/src/mongo/db/query/plan_executor.h
index c0bd8b8a2c7..9ce62619e2d 100644
--- a/src/mongo/db/query/plan_executor.h
+++ b/src/mongo/db/query/plan_executor.h
@@ -122,6 +122,9 @@ public:
// which is being used to execute an aggregation pipeline.
NO_YIELD,
+ // Will not yield locks or storage engine resources, but will check for interrupt.
+ INTERRUPT_ONLY,
+
// Used for testing, this yield policy will cause the PlanExecutor to time out on the first
// yield, returning DEAD with an error object encoding a ErrorCodes::ExceededTimeLimit
// message.
diff --git a/src/mongo/db/query/plan_yield_policy.cpp b/src/mongo/db/query/plan_yield_policy.cpp
index 13bded7f1b4..4c3a96282d4 100644
--- a/src/mongo/db/query/plan_yield_policy.cpp
+++ b/src/mongo/db/query/plan_yield_policy.cpp
@@ -36,11 +36,16 @@
#include "mongo/db/query/query_knobs.h"
#include "mongo/db/query/query_yield.h"
#include "mongo/db/service_context.h"
+#include "mongo/util/fail_point_service.h"
#include "mongo/util/scopeguard.h"
#include "mongo/util/time_support.h"
namespace mongo {
+namespace {
+MONGO_FP_DECLARE(setCheckForInterruptHang);
+} // namespace
+
PlanYieldPolicy::PlanYieldPolicy(PlanExecutor* exec, PlanExecutor::YieldPolicy policy)
: _policy(exec->getOpCtx()->lockState()->isGlobalLockedRecursively() ? PlanExecutor::NO_YIELD
: policy),
@@ -59,6 +64,13 @@ PlanYieldPolicy::PlanYieldPolicy(PlanExecutor::YieldPolicy policy, ClockSource*
Milliseconds(internalQueryExecYieldPeriodMS.load())),
_planYielding(nullptr) {}
+bool PlanYieldPolicy::shouldYieldOrInterrupt() {
+ if (_policy == PlanExecutor::INTERRUPT_ONLY) {
+ return _elapsedTracker.intervalHasElapsed();
+ }
+ return shouldYield();
+}
+
bool PlanYieldPolicy::shouldYield() {
if (!canAutoYield())
return false;
@@ -72,15 +84,28 @@ void PlanYieldPolicy::resetTimer() {
_elapsedTracker.resetLastTime();
}
-Status PlanYieldPolicy::yield(RecordFetcher* recordFetcher) {
+Status PlanYieldPolicy::yieldOrInterrupt(RecordFetcher* recordFetcher) {
invariant(_planYielding);
if (recordFetcher) {
OperationContext* opCtx = _planYielding->getOpCtx();
- return yield([recordFetcher, opCtx] { recordFetcher->setup(opCtx); },
- [recordFetcher] { recordFetcher->fetch(); });
+ return yieldOrInterrupt([recordFetcher, opCtx] { recordFetcher->setup(opCtx); },
+ [recordFetcher] { recordFetcher->fetch(); });
} else {
- return yield(nullptr, nullptr);
+ return yieldOrInterrupt(nullptr, nullptr);
+ }
+}
+
+Status PlanYieldPolicy::yieldOrInterrupt(stdx::function<void()> beforeYieldingFn,
+ stdx::function<void()> whileYieldingFn) {
+ if (_policy == PlanExecutor::INTERRUPT_ONLY) {
+ ON_BLOCK_EXIT([this]() { resetTimer(); });
+ OperationContext* opCtx = _planYielding->getOpCtx();
+ invariant(opCtx);
+ MONGO_FAIL_POINT_PAUSE_WHILE_SET(setCheckForInterruptHang);
+ return opCtx->checkForInterruptNoAssert();
}
+
+ return yield(beforeYieldingFn, whileYieldingFn);
}
Status PlanYieldPolicy::yield(stdx::function<void()> beforeYieldingFn,
@@ -106,6 +131,8 @@ Status PlanYieldPolicy::yield(stdx::function<void()> beforeYieldingFn,
// that it's time to yield. Whether or not we will actually yield, we need to check
// if this operation has been interrupted.
if (_policy == PlanExecutor::YIELD_AUTO) {
+ MONGO_FAIL_POINT_PAUSE_WHILE_SET(setCheckForInterruptHang);
+
auto interruptStatus = opCtx->checkForInterruptNoAssert();
if (!interruptStatus.isOK()) {
return interruptStatus;
diff --git a/src/mongo/db/query/plan_yield_policy.h b/src/mongo/db/query/plan_yield_policy.h
index 154a4b13d74..80be6e11095 100644
--- a/src/mongo/db/query/plan_yield_policy.h
+++ b/src/mongo/db/query/plan_yield_policy.h
@@ -52,20 +52,22 @@ public:
PlanYieldPolicy(PlanExecutor::YieldPolicy policy, ClockSource* cs);
/**
- * Used by YIELD_AUTO plan executors in order to check whether it is time to yield.
- * PlanExecutors give up their locks periodically in order to be fair to other
- * threads.
+ * Periodically returns true to indicate that it is time to check for interrupt (in the case of
+ * YIELD_AUTO and INTERRUPT_ONLY) or release locks or storage engine state (in the case of
+ * auto-yielding plans).
*/
- virtual bool shouldYield();
+ virtual bool shouldYieldOrInterrupt();
/**
- * Resets the yield timer so that we wait for a while before yielding again.
+ * Resets the yield timer so that we wait for a while before yielding/interrupting again.
*/
void resetTimer();
/**
- * Used to cause a plan executor to release locks or storage engine state. The PlanExecutor must
- * *not* be in saved state. Handles calls to save/restore state internally.
+ * Used to cause a plan executor to check for interrupt (in the case of YIELD_AUTO and
+ * INTERRUPT_ONLY) or release locks or storage engine state (in the case of auto-yielding
+ * plans). The PlanExecutor must *not* be in saved state. Handles calls to save/restore state
+ * internally.
*
* If 'fetcher' is non-NULL, then we are yielding because the storage engine told us
* that we will page fault on this record. We use 'fetcher' to retrieve the record
@@ -75,17 +77,20 @@ public:
* ErrorCodes::QueryPlanKilled if the executor got killed during yield, and
* ErrorCodes::ExceededTimeLimit if the operation has exceeded the time limit.
*/
- virtual Status yield(RecordFetcher* fetcher = NULL);
+ virtual Status yieldOrInterrupt(RecordFetcher* fetcher = nullptr);
/**
- * More generic version of yield() above. This version calls 'beforeYieldingFn' immediately
- * before locks are yielded (if they are), and 'whileYieldingFn' before locks are restored.
+ * More generic version of yieldOrInterrupt() above. This version calls 'beforeYieldingFn'
+ * immediately before locks are yielded (if they are), and 'whileYieldingFn' before locks are
+ * restored.
*/
- virtual Status yield(stdx::function<void()> beforeYieldingFn,
- stdx::function<void()> whileYieldingFn);
+ virtual Status yieldOrInterrupt(stdx::function<void()> beforeYieldingFn,
+ stdx::function<void()> whileYieldingFn);
/**
- * All calls to shouldYield() will return true until the next call to yield.
+ * All calls to shouldYieldOrInterrupt() will return true until the next call to
+ * yieldOrInterrupt(). This must only be called for auto-yielding plans, to force a yield. It
+ * cannot be used to force an interrupt for INTERRUPT_ONLY plans.
*/
void forceYield() {
dassert(canAutoYield());
@@ -105,7 +110,8 @@ public:
return true;
}
case PlanExecutor::NO_YIELD:
- case PlanExecutor::WRITE_CONFLICT_RETRY_ONLY: {
+ case PlanExecutor::WRITE_CONFLICT_RETRY_ONLY:
+ case PlanExecutor::INTERRUPT_ONLY: {
return false;
}
}
@@ -127,6 +133,7 @@ public:
}
case PlanExecutor::NO_YIELD:
case PlanExecutor::YIELD_MANUAL:
+ case PlanExecutor::INTERRUPT_ONLY:
return false;
}
MONGO_UNREACHABLE;
@@ -145,6 +152,12 @@ private:
// The plan executor which this yield policy is responsible for yielding. Must
// not outlive the plan executor.
PlanExecutor* const _planYielding;
+
+ // Returns true to indicate it's time to release locks or storage engine state.
+ bool shouldYield();
+
+ // Releases locks or storage engine state.
+ Status yield(stdx::function<void()> beforeYieldingFn, stdx::function<void()> whileYieldingFn);
};
} // namespace mongo