diff options
author | David Storch <david.storch@10gen.com> | 2018-10-26 17:21:58 -0400 |
---|---|---|
committer | David Storch <david.storch@10gen.com> | 2018-10-31 17:26:35 -0400 |
commit | d6c618fc94ebdfdba5d270e396a084290a54d360 (patch) | |
tree | 81f42ec3ebe8522ebbca4e95a6c1615fccfbdb26 /src/mongo/db/query | |
parent | 5180b48eedec5e57e7f12f734d173184bbff2af7 (diff) | |
download | mongo-d6c618fc94ebdfdba5d270e396a084290a54d360.tar.gz |
SERVER-37444 Added RequiresCollectionStage and use for COLLSCAN.
This is a pure refactor with no user-facing changes. It is
the first step in making PlanExecutors check their own
validity during yield recovery, rather than requiring the
invalidating actor to issue a kill notification.
Diffstat (limited to 'src/mongo/db/query')
-rw-r--r-- | src/mongo/db/query/find.cpp | 4 | ||||
-rw-r--r-- | src/mongo/db/query/get_executor.cpp | 3 | ||||
-rw-r--r-- | src/mongo/db/query/internal_plans.cpp | 3 | ||||
-rw-r--r-- | src/mongo/db/query/plan_executor.h | 22 | ||||
-rw-r--r-- | src/mongo/db/query/plan_executor_impl.cpp | 10 | ||||
-rw-r--r-- | src/mongo/db/query/plan_executor_impl.h | 4 | ||||
-rw-r--r-- | src/mongo/db/query/plan_yield_policy.cpp | 7 | ||||
-rw-r--r-- | src/mongo/db/query/stage_builder.cpp | 3 |
8 files changed, 28 insertions, 28 deletions
diff --git a/src/mongo/db/query/find.cpp b/src/mongo/db/query/find.cpp index a4eb53eb058..08d43c05df0 100644 --- a/src/mongo/db/query/find.cpp +++ b/src/mongo/db/query/find.cpp @@ -398,7 +398,7 @@ Message getMore(OperationContext* opCtx, PlanExecutor* exec = cc->getExecutor(); exec->reattachToOperationContext(opCtx); - uassertStatusOK(exec->restoreState()); + exec->restoreState(); auto planSummary = Explain::getPlanSummary(exec); { @@ -447,7 +447,7 @@ Message getMore(OperationContext* opCtx, // Reacquiring locks. readLock.emplace(opCtx, nss); - uassertStatusOK(exec->restoreState()); + exec->restoreState(); // We woke up because either the timed_wait expired, or there was more data. Either // way, attempt to generate another batch of results. diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp index 8e74fd72cbc..dca519ec764 100644 --- a/src/mongo/db/query/get_executor.cpp +++ b/src/mongo/db/query/get_executor.cpp @@ -630,7 +630,6 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getOplogStartHack( // Build our collection scan. CollectionScanParams params; - params.collection = collection; if (startLoc) { LOG(3) << "Using direct oplog seek"; params.start = *startLoc; @@ -652,7 +651,7 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getOplogStartHack( } auto ws = make_unique<WorkingSet>(); - auto cs = make_unique<CollectionScan>(opCtx, params, ws.get(), cq->root()); + auto cs = make_unique<CollectionScan>(opCtx, collection, params, ws.get(), cq->root()); return PlanExecutor::make( opCtx, std::move(ws), std::move(cs), std::move(cq), collection, PlanExecutor::YIELD_AUTO); } diff --git a/src/mongo/db/query/internal_plans.cpp b/src/mongo/db/query/internal_plans.cpp index 5d72b5f93eb..638dcf68662 100644 --- a/src/mongo/db/query/internal_plans.cpp +++ b/src/mongo/db/query/internal_plans.cpp @@ -180,7 +180,6 @@ std::unique_ptr<PlanStage> InternalPlanner::_collectionScan(OperationContext* op invariant(collection); CollectionScanParams params; - params.collection = collection; params.start = startLoc; params.shouldWaitForOplogVisibility = shouldWaitForOplogVisibility(opCtx, collection, false); @@ -190,7 +189,7 @@ std::unique_ptr<PlanStage> InternalPlanner::_collectionScan(OperationContext* op params.direction = CollectionScanParams::BACKWARD; } - return stdx::make_unique<CollectionScan>(opCtx, params, ws, nullptr); + return stdx::make_unique<CollectionScan>(opCtx, collection, params, ws, nullptr); } std::unique_ptr<PlanStage> InternalPlanner::_indexScan(OperationContext* opCtx, diff --git a/src/mongo/db/query/plan_executor.h b/src/mongo/db/query/plan_executor.h index 9c97d30903d..86480562f98 100644 --- a/src/mongo/db/query/plan_executor.h +++ b/src/mongo/db/query/plan_executor.h @@ -306,20 +306,18 @@ public: virtual void saveState() = 0; /** - * Restores the state saved by a saveState() call. + * Restores the state saved by a saveState() call. When this method returns successfully, the + * execution tree can once again be executed via work(). * - * Returns Status::OK() if the state was successfully restored and the execution tree can be - * work()'d. + * Throws a UserException if the state cannot be successfully restored (e.g. a collection was + * dropped or the position of a capped cursor was lost during a yield). If restore fails, it is + * only safe to call dispose(), detachFromOperationContext(), or the destructor. * - * Returns ErrorCodes::QueryPlanKilled if the PlanExecutor was killed while saved. - * - * If allowed, will yield and retry if a WriteConflictException is encountered. If the time - * limit is exceeded during this retry process, returns ErrorCodes::MaxTimeMSExpired. If this - * PlanExecutor is killed during this retry process, returns ErrorCodes::QueryPlanKilled. In - * this scenario, locks will have been released, and will not be held when control returns to - * the caller. + * If allowed by the executor's yield policy, will yield and retry internally if a + * WriteConflictException is encountered. If the time limit is exceeded during this retry + * process, throws ErrorCodes::MaxTimeMSExpired. */ - virtual Status restoreState() = 0; + virtual void restoreState() = 0; /** * Detaches from the OperationContext and releases any storage-engine state. @@ -344,7 +342,7 @@ public: * * This is only public for PlanYieldPolicy. DO NOT CALL ANYWHERE ELSE. */ - virtual Status restoreStateWithoutRetrying() = 0; + virtual void restoreStateWithoutRetrying() = 0; // // Running Support diff --git a/src/mongo/db/query/plan_executor_impl.cpp b/src/mongo/db/query/plan_executor_impl.cpp index 037df73f515..23bbd3e5428 100644 --- a/src/mongo/db/query/plan_executor_impl.cpp +++ b/src/mongo/db/query/plan_executor_impl.cpp @@ -355,19 +355,19 @@ void PlanExecutorImpl::saveState() { _currentState = kSaved; } -Status PlanExecutorImpl::restoreState() { +void PlanExecutorImpl::restoreState() { try { - return restoreStateWithoutRetrying(); + restoreStateWithoutRetrying(); } catch (const WriteConflictException&) { if (!_yieldPolicy->canAutoYield()) throw; // Handles retries by calling restoreStateWithoutRetrying() in a loop. - return _yieldPolicy->yieldOrInterrupt(); + uassertStatusOK(_yieldPolicy->yieldOrInterrupt()); } } -Status PlanExecutorImpl::restoreStateWithoutRetrying() { +void PlanExecutorImpl::restoreStateWithoutRetrying() { invariant(_currentState == kSaved); if (!isMarkedAsKilled()) { @@ -375,7 +375,7 @@ Status PlanExecutorImpl::restoreStateWithoutRetrying() { } _currentState = kUsable; - return _killStatus; + uassertStatusOK(_killStatus); } void PlanExecutorImpl::detachFromOperationContext() { diff --git a/src/mongo/db/query/plan_executor_impl.h b/src/mongo/db/query/plan_executor_impl.h index 6447d014cdc..0a13cc25b57 100644 --- a/src/mongo/db/query/plan_executor_impl.h +++ b/src/mongo/db/query/plan_executor_impl.h @@ -62,10 +62,10 @@ public: const NamespaceString& nss() const final; OperationContext* getOpCtx() const final; void saveState() final; - Status restoreState() final; + void restoreState() final; void detachFromOperationContext() final; void reattachToOperationContext(OperationContext* opCtx) final; - Status restoreStateWithoutRetrying() final; + void restoreStateWithoutRetrying() final; ExecState getNextSnapshotted(Snapshotted<BSONObj>* objOut, RecordId* dlOut) final; ExecState getNext(BSONObj* objOut, RecordId* dlOut) final; bool isEOF() final; diff --git a/src/mongo/db/query/plan_yield_policy.cpp b/src/mongo/db/query/plan_yield_policy.cpp index 3091d7e78cd..030d5102733 100644 --- a/src/mongo/db/query/plan_yield_policy.cpp +++ b/src/mongo/db/query/plan_yield_policy.cpp @@ -151,12 +151,17 @@ Status PlanYieldPolicy::yield(stdx::function<void()> whileYieldingFn) { QueryYield::yieldAllLocks(opCtx, whileYieldingFn, _planYielding->nss()); } - return _planYielding->restoreStateWithoutRetrying(); + _planYielding->restoreStateWithoutRetrying(); + return Status::OK(); } catch (const WriteConflictException&) { CurOp::get(opCtx)->debug().additiveMetrics.incrementWriteConflicts(1); WriteConflictException::logAndBackoff( attempt, "plan execution restoreState", _planYielding->nss().ns()); // retry + } catch (...) { + // Errors other than write conflicts don't get retried, and should instead result in the + // PlanExecutor dying. We propagate all such errors as status codes. + return exceptionToStatus(); } } } diff --git a/src/mongo/db/query/stage_builder.cpp b/src/mongo/db/query/stage_builder.cpp index f67af999503..a7b2bb23510 100644 --- a/src/mongo/db/query/stage_builder.cpp +++ b/src/mongo/db/query/stage_builder.cpp @@ -77,13 +77,12 @@ PlanStage* buildStages(OperationContext* opCtx, case STAGE_COLLSCAN: { const CollectionScanNode* csn = static_cast<const CollectionScanNode*>(root); CollectionScanParams params; - params.collection = collection; params.tailable = csn->tailable; params.shouldTrackLatestOplogTimestamp = csn->shouldTrackLatestOplogTimestamp; params.direction = (csn->direction == 1) ? CollectionScanParams::FORWARD : CollectionScanParams::BACKWARD; params.shouldWaitForOplogVisibility = csn->shouldWaitForOplogVisibility; - return new CollectionScan(opCtx, params, ws, csn->filter.get()); + return new CollectionScan(opCtx, collection, params, ws, csn->filter.get()); } case STAGE_IXSCAN: { const IndexScanNode* ixn = static_cast<const IndexScanNode*>(root); |