diff options
author | David Storch <david.storch@10gen.com> | 2018-10-26 17:21:58 -0400 |
---|---|---|
committer | David Storch <david.storch@10gen.com> | 2018-10-31 17:26:35 -0400 |
commit | d6c618fc94ebdfdba5d270e396a084290a54d360 (patch) | |
tree | 81f42ec3ebe8522ebbca4e95a6c1615fccfbdb26 | |
parent | 5180b48eedec5e57e7f12f734d173184bbff2af7 (diff) | |
download | mongo-d6c618fc94ebdfdba5d270e396a084290a54d360.tar.gz |
SERVER-37444 Added RequiresCollectionStage and use for COLLSCAN.
This is a pure refactor with no user-facing changes. It is
the first step in making PlanExecutors check their own
validity during yield recovery, rather than requiring the
invalidating actor to issue a kill notification.
37 files changed, 330 insertions, 227 deletions
diff --git a/src/mongo/db/SConscript b/src/mongo/db/SConscript index 580b995e181..2921ba61b33 100644 --- a/src/mongo/db/SConscript +++ b/src/mongo/db/SConscript @@ -1038,6 +1038,7 @@ env.Library( 'exec/projection.cpp', 'exec/projection_exec.cpp', 'exec/queued_data_stage.cpp', + 'exec/requires_collection_stage.cpp', 'exec/shard_filter.cpp', 'exec/skip.cpp', 'exec/sort.cpp', diff --git a/src/mongo/db/catalog/capped_utils.cpp b/src/mongo/db/catalog/capped_utils.cpp index 38b54311042..431e23fa6ff 100644 --- a/src/mongo/db/catalog/capped_utils.cpp +++ b/src/mongo/db/catalog/capped_utils.cpp @@ -112,39 +112,37 @@ mongo::Status mongo::emptyCapped(OperationContext* opCtx, const NamespaceString& return Status::OK(); } -mongo::Status mongo::cloneCollectionAsCapped(OperationContext* opCtx, - Database* db, - const std::string& shortFrom, - const std::string& shortTo, - long long size, - bool temp) { +void mongo::cloneCollectionAsCapped(OperationContext* opCtx, + Database* db, + const std::string& shortFrom, + const std::string& shortTo, + long long size, + bool temp) { NamespaceString fromNss(db->name(), shortFrom); NamespaceString toNss(db->name(), shortTo); Collection* fromCollection = db->getCollection(opCtx, fromNss); if (!fromCollection) { - if (db->getViewCatalog()->lookup(opCtx, fromNss.ns())) { - return Status(ErrorCodes::CommandNotSupportedOnView, - str::stream() << "cloneCollectionAsCapped not supported for views: " - << fromNss.ns()); - } - return Status(ErrorCodes::NamespaceNotFound, - str::stream() << "source collection " << fromNss.ns() << " does not exist"); - } + uassert(ErrorCodes::CommandNotSupportedOnView, + str::stream() << "cloneCollectionAsCapped not supported for views: " + << fromNss.ns(), + !db->getViewCatalog()->lookup(opCtx, fromNss.ns())); - if (fromNss.isDropPendingNamespace()) { - return Status(ErrorCodes::NamespaceNotFound, - str::stream() << "source collection " << fromNss.ns() - << " is currently in a drop-pending state."); + uasserted(ErrorCodes::NamespaceNotFound, + str::stream() << "source collection " << fromNss.ns() << " does not exist"); } - if (db->getCollection(opCtx, toNss)) { - return Status(ErrorCodes::NamespaceExists, - str::stream() << "cloneCollectionAsCapped failed - destination collection " - << toNss.ns() - << " already exists. source collection: " - << fromNss.ns()); - } + uassert(ErrorCodes::NamespaceNotFound, + str::stream() << "source collection " << fromNss.ns() + << " is currently in a drop-pending state.", + !fromNss.isDropPendingNamespace()); + + uassert(ErrorCodes::NamespaceExists, + str::stream() << "cloneCollectionAsCapped failed - destination collection " + << toNss.ns() + << " already exists. source collection: " + << fromNss.ns(), + !db->getCollection(opCtx, toNss)); // create new collection { @@ -160,9 +158,7 @@ mongo::Status mongo::cloneCollectionAsCapped(OperationContext* opCtx, BSONObjBuilder cmd; cmd.append("create", toNss.coll()); cmd.appendElements(options.toBSON()); - Status status = createCollection(opCtx, toNss.db().toString(), cmd.done()); - if (!status.isOK()) - return status; + uassertStatusOK(createCollection(opCtx, toNss.db().toString(), cmd.done())); } Collection* toCollection = db->getCollection(opCtx, toNss); @@ -197,7 +193,7 @@ mongo::Status mongo::cloneCollectionAsCapped(OperationContext* opCtx, switch (state) { case PlanExecutor::IS_EOF: - return Status::OK(); + return; case PlanExecutor::ADVANCED: { if (excessSize > 0) { // 4x is for padding, power of 2, etc... @@ -242,19 +238,16 @@ mongo::Status mongo::cloneCollectionAsCapped(OperationContext* opCtx, // abandonSnapshot. exec->saveState(); opCtx->recoveryUnit()->abandonSnapshot(); - auto restoreStatus = exec->restoreState(); // Handles any WCEs internally. - if (!restoreStatus.isOK()) { - return restoreStatus; - } + exec->restoreState(); // Handles any WCEs internally. } } MONGO_UNREACHABLE; } -mongo::Status mongo::convertToCapped(OperationContext* opCtx, - const NamespaceString& collectionName, - long long size) { +void mongo::convertToCapped(OperationContext* opCtx, + const NamespaceString& collectionName, + long long size) { StringData dbname = collectionName.db(); StringData shortSource = collectionName.coll(); @@ -263,42 +256,33 @@ mongo::Status mongo::convertToCapped(OperationContext* opCtx, bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() && !repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, collectionName); - if (userInitiatedWritesAndNotPrimary) { - return Status(ErrorCodes::NotMaster, - str::stream() << "Not primary while converting " << collectionName.ns() - << " to a capped collection"); - } + uassert(ErrorCodes::NotMaster, + str::stream() << "Not primary while converting " << collectionName.ns() + << " to a capped collection", + !userInitiatedWritesAndNotPrimary); Database* const db = autoDb.getDb(); - if (!db) { - return Status(ErrorCodes::NamespaceNotFound, - str::stream() << "database " << dbname << " not found"); - } + uassert( + ErrorCodes::NamespaceNotFound, str::stream() << "database " << dbname << " not found", db); BackgroundOperation::assertNoBgOpInProgForDb(dbname); // Generate a temporary collection name that will not collide with any existing collections. auto tmpNameResult = db->makeUniqueCollectionNamespace(opCtx, "tmp%%%%%.convertToCapped." + shortSource); - if (!tmpNameResult.isOK()) { - return tmpNameResult.getStatus().withContext( - str::stream() << "Cannot generate temporary collection namespace to convert " - << collectionName.ns() - << " to a capped collection"); - } + uassertStatusOKWithContext(tmpNameResult, + str::stream() + << "Cannot generate temporary collection namespace to convert " + << collectionName.ns() + << " to a capped collection"); + const auto& longTmpName = tmpNameResult.getValue(); const auto shortTmpName = longTmpName.coll().toString(); - { - Status status = - cloneCollectionAsCapped(opCtx, db, shortSource.toString(), shortTmpName, size, true); - - if (!status.isOK()) - return status; - } + cloneCollectionAsCapped(opCtx, db, shortSource.toString(), shortTmpName, size, true); RenameCollectionOptions options; options.dropTarget = true; options.stayTemp = false; - return renameCollection(opCtx, longTmpName, collectionName, options); + uassertStatusOK(renameCollection(opCtx, longTmpName, collectionName, options)); } diff --git a/src/mongo/db/catalog/capped_utils.h b/src/mongo/db/catalog/capped_utils.h index a4c683dc632..a100d5fd3a9 100644 --- a/src/mongo/db/catalog/capped_utils.h +++ b/src/mongo/db/catalog/capped_utils.h @@ -43,17 +43,17 @@ Status emptyCapped(OperationContext* opCtx, const NamespaceString& collectionNam /** * Clones the collection "shortFrom" to the capped collection "shortTo" with a size of "size". */ -Status cloneCollectionAsCapped(OperationContext* opCtx, - Database* db, - const std::string& shortFrom, - const std::string& shortTo, - long long size, - bool temp); +void cloneCollectionAsCapped(OperationContext* opCtx, + Database* db, + const std::string& shortFrom, + const std::string& shortTo, + long long size, + bool temp); /** * Converts the collection "collectionName" to a capped collection with a size of "size". */ -Status convertToCapped(OperationContext* opCtx, - const NamespaceString& collectionName, - long long size); +void convertToCapped(OperationContext* opCtx, + const NamespaceString& collectionName, + long long size); } // namespace mongo diff --git a/src/mongo/db/catalog/capped_utils_test.cpp b/src/mongo/db/catalog/capped_utils_test.cpp index 5cda6319aa3..2e23dcccd10 100644 --- a/src/mongo/db/catalog/capped_utils_test.cpp +++ b/src/mongo/db/catalog/capped_utils_test.cpp @@ -111,7 +111,8 @@ TEST_F(CappedUtilsTest, ConvertToCappedReturnsNamespaceNotFoundIfCollectionIsMis NamespaceString nss("test.t"); auto opCtx = makeOpCtx(); ASSERT_FALSE(collectionExists(opCtx.get(), nss)); - ASSERT_EQUALS(ErrorCodes::NamespaceNotFound, convertToCapped(opCtx.get(), nss, 1000.0)); + ASSERT_THROWS_CODE( + convertToCapped(opCtx.get(), nss, 1000.0), DBException, ErrorCodes::NamespaceNotFound); } TEST_F(CappedUtilsTest, ConvertToCappedUpdatesCollectionOptionsOnSuccess) { @@ -122,7 +123,7 @@ TEST_F(CappedUtilsTest, ConvertToCappedUpdatesCollectionOptionsOnSuccess) { auto options = getCollectionOptions(opCtx.get(), nss); ASSERT_FALSE(options.capped); - ASSERT_OK(convertToCapped(opCtx.get(), nss, cappedCollectionSize)); + convertToCapped(opCtx.get(), nss, cappedCollectionSize); options = getCollectionOptions(opCtx.get(), nss); ASSERT_TRUE(options.capped); ASSERT_APPROX_EQUAL(cappedCollectionSize, options.cappedSize, 0.001) @@ -139,8 +140,9 @@ TEST_F(CappedUtilsTest, ConvertToCappedReturnsNamespaceNotFoundIfCollectionIsDro auto options = getCollectionOptions(opCtx.get(), dropPendingNss); ASSERT_FALSE(options.capped); - ASSERT_EQUALS(ErrorCodes::NamespaceNotFound, - convertToCapped(opCtx.get(), dropPendingNss, cappedCollectionSize)); + ASSERT_THROWS_CODE(convertToCapped(opCtx.get(), dropPendingNss, cappedCollectionSize), + DBException, + ErrorCodes::NamespaceNotFound); options = getCollectionOptions(opCtx.get(), dropPendingNss); ASSERT_FALSE(options.capped); } diff --git a/src/mongo/db/catalog/multi_index_block_impl.cpp b/src/mongo/db/catalog/multi_index_block_impl.cpp index 87a851f9c1d..32be5a0e4a7 100644 --- a/src/mongo/db/catalog/multi_index_block_impl.cpp +++ b/src/mongo/db/catalog/multi_index_block_impl.cpp @@ -400,9 +400,10 @@ Status MultiIndexBlockImpl::insertAllDocumentsInCollection() { } wunit.commit(); if (_buildInBackground) { - auto restoreStatus = exec->restoreState(); // Handles any WCEs internally. - if (!restoreStatus.isOK()) { - return restoreStatus; + try { + exec->restoreState(); // Handles any WCEs internally. + } catch (...) { + return exceptionToStatus(); } } @@ -422,9 +423,10 @@ Status MultiIndexBlockImpl::insertAllDocumentsInCollection() { // abandonSnapshot. exec->saveState(); _opCtx->recoveryUnit()->abandonSnapshot(); - auto restoreStatus = exec->restoreState(); // Handles any WCEs internally. - if (!restoreStatus.isOK()) { - return restoreStatus; + try { + exec->restoreState(); // Handles any WCEs internally. + } catch (...) { + return exceptionToStatus(); } } } diff --git a/src/mongo/db/catalog/uuid_catalog.h b/src/mongo/db/catalog/uuid_catalog.h index 9fd0b518aa8..9a045be4b5f 100644 --- a/src/mongo/db/catalog/uuid_catalog.h +++ b/src/mongo/db/catalog/uuid_catalog.h @@ -171,10 +171,11 @@ public: Collection* removeUUIDCatalogEntry(CollectionUUID uuid); /** - * This function gets the Collection* pointer that corresponds to - * CollectionUUID uuid. The required locks should be obtained prior - * to calling this function, or else the found Collection pointer - * might no longer be valid when the call returns. + * This function gets the Collection pointer that corresponds to the CollectionUUID. The + * required locks should be obtained prior to calling this function, or else the found + * Collection pointer might no longer be valid when the call returns. + * + * Returns nullptr if the 'uuid' is not known. */ Collection* lookupCollectionByUUID(CollectionUUID uuid) const; diff --git a/src/mongo/db/commands/collection_to_capped.cpp b/src/mongo/db/commands/collection_to_capped.cpp index a7b3530d0e9..816882ad0fb 100644 --- a/src/mongo/db/commands/collection_to_capped.cpp +++ b/src/mongo/db/commands/collection_to_capped.cpp @@ -135,9 +135,7 @@ public: str::stream() << "database " << dbname << " not found"); } - Status status = - cloneCollectionAsCapped(opCtx, db, from.toString(), to.toString(), size, temp); - uassertStatusOK(status); + cloneCollectionAsCapped(opCtx, db, from.toString(), to.toString(), size, temp); return true; } } cmdCloneCollectionAsCapped; @@ -180,7 +178,7 @@ public: return false; } - uassertStatusOK(convertToCapped(opCtx, nss, size)); + convertToCapped(opCtx, nss, size); return true; } diff --git a/src/mongo/db/commands/dbcommands_d.cpp b/src/mongo/db/commands/dbcommands_d.cpp index 111b693e865..7c715eeacb4 100644 --- a/src/mongo/db/commands/dbcommands_d.cpp +++ b/src/mongo/db/commands/dbcommands_d.cpp @@ -315,12 +315,8 @@ public: break; } - // Have the lock again. See if we were killed. - if (!exec->restoreState().isOK()) { - if (!partialOk) { - uasserted(13281, "File deleted during filemd5 command"); - } - } + // Now that we have the lock again, we can restore the PlanExecutor. + exec->restoreState(); } if (PlanExecutor::DEAD == state || PlanExecutor::FAILURE == state) { diff --git a/src/mongo/db/commands/getmore_cmd.cpp b/src/mongo/db/commands/getmore_cmd.cpp index d3484500099..31b465475de 100644 --- a/src/mongo/db/commands/getmore_cmd.cpp +++ b/src/mongo/db/commands/getmore_cmd.cpp @@ -411,7 +411,7 @@ public: PlanExecutor* exec = cursor->getExecutor(); exec->reattachToOperationContext(opCtx); - uassertStatusOK(exec->restoreState()); + exec->restoreState(); auto planSummary = Explain::getPlanSummary(exec); { diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp index 7fca7451fb9..9b039f98e96 100644 --- a/src/mongo/db/commands/mr.cpp +++ b/src/mongo/db/commands/mr.cpp @@ -1207,7 +1207,7 @@ void State::finalReduce(OperationContext* opCtx, CurOp* curOp, ProgressMeterHold all.push_back(o); _opCtx->checkForInterrupt(); - uassertStatusOK(exec->restoreState()); + exec->restoreState(); } uassert(34428, @@ -1553,8 +1553,7 @@ public: state.reduceAndSpillInMemoryStateIfNeeded(); scopedAutoColl.emplace(opCtx, config.nss, MODE_S); - auto restoreStatus = exec->restoreState(); - uassertStatusOK(restoreStatus); + exec->restoreState(); reduceTime += t.micros(); diff --git a/src/mongo/db/exec/collection_scan.cpp b/src/mongo/db/exec/collection_scan.cpp index ad040cdbd24..3f395d6fa21 100644 --- a/src/mongo/db/exec/collection_scan.cpp +++ b/src/mongo/db/exec/collection_scan.cpp @@ -57,18 +57,18 @@ using stdx::make_unique; const char* CollectionScan::kStageType = "COLLSCAN"; CollectionScan::CollectionScan(OperationContext* opCtx, + const Collection* collection, const CollectionScanParams& params, WorkingSet* workingSet, const MatchExpression* filter) - : PlanStage(kStageType, opCtx), + : RequiresCollectionStage(kStageType, opCtx, collection), _workingSet(workingSet), _filter(filter), - _params(params), - _isDead(false) { + _params(params) { // Explain reports the direction of the collection scan. _specificStats.direction = params.direction; _specificStats.maxTs = params.maxTs; - invariant(!_params.shouldTrackLatestOplogTimestamp || _params.collection->ns().isOplog()); + invariant(!_params.shouldTrackLatestOplogTimestamp || collection->ns().isOplog()); if (params.maxTs) { _endConditionBSON = BSON("$gte" << *(params.maxTs)); @@ -78,17 +78,6 @@ CollectionScan::CollectionScan(OperationContext* opCtx, } PlanStage::StageState CollectionScan::doWork(WorkingSetID* out) { - if (_isDead) { - Status status( - ErrorCodes::CappedPositionLost, - str::stream() - << "CollectionScan died due to position in capped collection being deleted. " - << "Last seen record id: " - << _lastSeenId); - *out = WorkingSetCommon::allocateStatusMember(_workingSet, status); - return PlanStage::DEAD; - } - if (_commonStats.isEOF) { return PlanStage::IS_EOF; } @@ -110,14 +99,13 @@ PlanStage::StageState CollectionScan::doWork(WorkingSetID* out) { // the cursor. Also call abandonSnapshot to make sure that we are using a fresh // storage engine snapshot while waiting. Otherwise, we will end up reading from the // snapshot where the oplog entries are not yet visible even after the wait. - invariant(!_params.tailable && _params.collection->ns().isOplog()); + invariant(!_params.tailable && collection()->ns().isOplog()); getOpCtx()->recoveryUnit()->abandonSnapshot(); - _params.collection->getRecordStore()->waitForAllEarlierOplogWritesToBeVisible( - getOpCtx()); + collection()->getRecordStore()->waitForAllEarlierOplogWritesToBeVisible(getOpCtx()); } - _cursor = _params.collection->getCursor(getOpCtx(), forward); + _cursor = collection()->getCursor(getOpCtx(), forward); if (!_lastSeenId.isNull()) { invariant(_params.tailable); @@ -128,7 +116,6 @@ PlanStage::StageState CollectionScan::doWork(WorkingSetID* out) { // returned this one. This is only possible in the tailing case because that is the // only time we'd need to create a cursor after already getting a record out of it. if (!_cursor->seekExact(_lastSeenId)) { - _isDead = true; Status status(ErrorCodes::CappedPositionLost, str::stream() << "CollectionScan died due to failure to restore " << "tailable cursor position. " @@ -221,20 +208,24 @@ PlanStage::StageState CollectionScan::returnIfMatches(WorkingSetMember* member, } bool CollectionScan::isEOF() { - return _commonStats.isEOF || _isDead; + return _commonStats.isEOF; } -void CollectionScan::doSaveState() { +void CollectionScan::saveState(RequiresCollTag) { if (_cursor) { _cursor->save(); } } -void CollectionScan::doRestoreState() { +void CollectionScan::restoreState(RequiresCollTag) { if (_cursor) { - if (!_cursor->restore()) { - _isDead = true; - } + const bool couldRestore = _cursor->restore(); + uassert(ErrorCodes::CappedPositionLost, + str::stream() + << "CollectionScan died due to position in capped collection being deleted. " + << "Last seen record id: " + << _lastSeenId, + couldRestore); } } diff --git a/src/mongo/db/exec/collection_scan.h b/src/mongo/db/exec/collection_scan.h index 5fe56a6b138..1c7138256e0 100644 --- a/src/mongo/db/exec/collection_scan.h +++ b/src/mongo/db/exec/collection_scan.h @@ -33,7 +33,7 @@ #include <memory> #include "mongo/db/exec/collection_scan_common.h" -#include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/requires_collection_stage.h" #include "mongo/db/matcher/expression_leaf.h" #include "mongo/db/record_id.h" @@ -50,9 +50,12 @@ class OperationContext; * * Preconditions: Valid RecordId. */ -class CollectionScan final : public PlanStage { +class CollectionScan final : public RequiresCollectionStage { public: + static const char* kStageType; + CollectionScan(OperationContext* opCtx, + const Collection* collection, const CollectionScanParams& params, WorkingSet* workingSet, const MatchExpression* filter); @@ -60,8 +63,6 @@ public: StageState doWork(WorkingSetID* out) final; bool isEOF() final; - void doSaveState() final; - void doRestoreState() final; void doDetachFromOperationContext() final; void doReattachToOperationContext() final; @@ -77,7 +78,10 @@ public: const SpecificStats* getSpecificStats() const final; - static const char* kStageType; +protected: + void saveState(RequiresCollTag) final; + + void restoreState(RequiresCollTag) final; private: /** @@ -108,8 +112,6 @@ private: CollectionScanParams _params; - bool _isDead; - RecordId _lastSeenId; // Null if nothing has been returned from _cursor yet. // If _params.shouldTrackLatestOplogTimestamp is set and the collection is the oplog, the latest diff --git a/src/mongo/db/exec/collection_scan_common.h b/src/mongo/db/exec/collection_scan_common.h index 108f76154d5..af62b586898 100644 --- a/src/mongo/db/exec/collection_scan_common.h +++ b/src/mongo/db/exec/collection_scan_common.h @@ -43,10 +43,6 @@ struct CollectionScanParams { BACKWARD = -1, }; - // What collection? - // not owned - const Collection* collection = nullptr; - // The RecordId to which we should seek to as the first document of the scan. RecordId start; diff --git a/src/mongo/db/exec/plan_stage.h b/src/mongo/db/exec/plan_stage.h index 9bee44f2900..cd3f1963962 100644 --- a/src/mongo/db/exec/plan_stage.h +++ b/src/mongo/db/exec/plan_stage.h @@ -223,6 +223,10 @@ public: * Can only be called while the stage in is the "saved" state. * * Propagates to all children, then calls doRestoreState(). + * + * Throws a UserException on failure to restore due to a conflicting event such as a + * collection drop. May throw a WriteConflictException, in which case the caller may choose to + * retry. */ void restoreState(); diff --git a/src/mongo/db/exec/requires_collection_stage.cpp b/src/mongo/db/exec/requires_collection_stage.cpp new file mode 100644 index 00000000000..106767eed73 --- /dev/null +++ b/src/mongo/db/exec/requires_collection_stage.cpp @@ -0,0 +1,57 @@ +/** + * Copyright (C) 2018-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * <http://www.mongodb.com/licensing/server-side-public-license>. + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/platform/basic.h" + +#include "mongo/db/exec/requires_collection_stage.h" + +#include "mongo/db/catalog/uuid_catalog.h" + +namespace mongo { + +void RequiresCollectionStage::doSaveState() { + // A stage may not access storage while in a saved state. + _collection = nullptr; + + saveState(RequiresCollTag{}); +} + +void RequiresCollectionStage::doRestoreState() { + invariant(!_collection); + + const UUIDCatalog& catalog = UUIDCatalog::get(getOpCtx()); + _collection = catalog.lookupCollectionByUUID(_collectionUUID); + uassert(ErrorCodes::QueryPlanKilled, + str::stream() << "UUID " << _collectionUUID << " no longer exists.", + _collection); + + restoreState(RequiresCollTag{}); +} + +} // namespace mongo diff --git a/src/mongo/db/exec/requires_collection_stage.h b/src/mongo/db/exec/requires_collection_stage.h new file mode 100644 index 00000000000..837b98ec702 --- /dev/null +++ b/src/mongo/db/exec/requires_collection_stage.h @@ -0,0 +1,86 @@ +/** + * Copyright (C) 2018-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * <http://www.mongodb.com/licensing/server-side-public-license>. + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include "mongo/db/catalog/collection.h" +#include "mongo/db/exec/plan_stage.h" +#include "mongo/util/uuid.h" + +namespace mongo { + +/** + * A base class for plan stages which access a collection. In addition to providing derived classes + * access to the Collection pointer, the primary purpose of this class is to assume responsibility + * for checking that the collection is still valid (e.g. has not been dropped) when recovering from + * yield. + * + * Subclasses must implement the saveStage() and restoreState() variants tagged with RequiresCollTag + * in order to supply custom yield preparation or yield recovery logic. + */ +class RequiresCollectionStage : public PlanStage { +public: + RequiresCollectionStage(const char* stageType, OperationContext* opCtx, const Collection* coll) + : PlanStage(stageType, opCtx), + _collection(coll), + _collectionUUID(_collection->uuid().get()) {} + + virtual ~RequiresCollectionStage() = default; + +protected: + struct RequiresCollTag {}; + + void doSaveState() final; + + void doRestoreState() final; + + /** + * Performs yield preparation specific to a stage which subclasses from RequiresCollectionStage. + */ + virtual void saveState(RequiresCollTag) = 0; + + /** + * Performs yield recovery specific to a stage which subclasses from RequiresCollectionStage. + */ + virtual void restoreState(RequiresCollTag) = 0; + + const Collection* collection() { + return _collection; + } + + UUID uuid() { + return _collectionUUID; + } + +private: + const Collection* _collection; + const UUID _collectionUUID; +}; + +} // namespace mongo diff --git a/src/mongo/db/exec/stagedebug_cmd.cpp b/src/mongo/db/exec/stagedebug_cmd.cpp index d0738a8330f..2cb843ea293 100644 --- a/src/mongo/db/exec/stagedebug_cmd.cpp +++ b/src/mongo/db/exec/stagedebug_cmd.cpp @@ -412,7 +412,6 @@ public: return new SkipStage(opCtx, nodeArgs["num"].numberInt(), workingSet, subNode); } else if ("cscan" == nodeName) { CollectionScanParams params; - params.collection = collection; // What direction? uassert(16963, @@ -424,7 +423,7 @@ public: params.direction = CollectionScanParams::BACKWARD; } - return new CollectionScan(opCtx, params, workingSet, matcher); + return new CollectionScan(opCtx, collection, params, workingSet, matcher); } // sort is disabled for now. #if 0 diff --git a/src/mongo/db/pipeline/document_source_cursor.cpp b/src/mongo/db/pipeline/document_source_cursor.cpp index 2457d0f3635..cd0783eb5ac 100644 --- a/src/mongo/db/pipeline/document_source_cursor.cpp +++ b/src/mongo/db/pipeline/document_source_cursor.cpp @@ -93,7 +93,7 @@ void DocumentSourceCursor::loadBatch() { uassertStatusOK(repl::ReplicationCoordinator::get(pExpCtx->opCtx) ->checkCanServeReadsFor(pExpCtx->opCtx, _exec->nss(), true)); - uassertStatusOK(_exec->restoreState()); + _exec->restoreState(); int memUsageBytes = 0; { diff --git a/src/mongo/db/query/find.cpp b/src/mongo/db/query/find.cpp index a4eb53eb058..08d43c05df0 100644 --- a/src/mongo/db/query/find.cpp +++ b/src/mongo/db/query/find.cpp @@ -398,7 +398,7 @@ Message getMore(OperationContext* opCtx, PlanExecutor* exec = cc->getExecutor(); exec->reattachToOperationContext(opCtx); - uassertStatusOK(exec->restoreState()); + exec->restoreState(); auto planSummary = Explain::getPlanSummary(exec); { @@ -447,7 +447,7 @@ Message getMore(OperationContext* opCtx, // Reacquiring locks. readLock.emplace(opCtx, nss); - uassertStatusOK(exec->restoreState()); + exec->restoreState(); // We woke up because either the timed_wait expired, or there was more data. Either // way, attempt to generate another batch of results. diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp index 8e74fd72cbc..dca519ec764 100644 --- a/src/mongo/db/query/get_executor.cpp +++ b/src/mongo/db/query/get_executor.cpp @@ -630,7 +630,6 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getOplogStartHack( // Build our collection scan. CollectionScanParams params; - params.collection = collection; if (startLoc) { LOG(3) << "Using direct oplog seek"; params.start = *startLoc; @@ -652,7 +651,7 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getOplogStartHack( } auto ws = make_unique<WorkingSet>(); - auto cs = make_unique<CollectionScan>(opCtx, params, ws.get(), cq->root()); + auto cs = make_unique<CollectionScan>(opCtx, collection, params, ws.get(), cq->root()); return PlanExecutor::make( opCtx, std::move(ws), std::move(cs), std::move(cq), collection, PlanExecutor::YIELD_AUTO); } diff --git a/src/mongo/db/query/internal_plans.cpp b/src/mongo/db/query/internal_plans.cpp index 5d72b5f93eb..638dcf68662 100644 --- a/src/mongo/db/query/internal_plans.cpp +++ b/src/mongo/db/query/internal_plans.cpp @@ -180,7 +180,6 @@ std::unique_ptr<PlanStage> InternalPlanner::_collectionScan(OperationContext* op invariant(collection); CollectionScanParams params; - params.collection = collection; params.start = startLoc; params.shouldWaitForOplogVisibility = shouldWaitForOplogVisibility(opCtx, collection, false); @@ -190,7 +189,7 @@ std::unique_ptr<PlanStage> InternalPlanner::_collectionScan(OperationContext* op params.direction = CollectionScanParams::BACKWARD; } - return stdx::make_unique<CollectionScan>(opCtx, params, ws, nullptr); + return stdx::make_unique<CollectionScan>(opCtx, collection, params, ws, nullptr); } std::unique_ptr<PlanStage> InternalPlanner::_indexScan(OperationContext* opCtx, diff --git a/src/mongo/db/query/plan_executor.h b/src/mongo/db/query/plan_executor.h index 9c97d30903d..86480562f98 100644 --- a/src/mongo/db/query/plan_executor.h +++ b/src/mongo/db/query/plan_executor.h @@ -306,20 +306,18 @@ public: virtual void saveState() = 0; /** - * Restores the state saved by a saveState() call. + * Restores the state saved by a saveState() call. When this method returns successfully, the + * execution tree can once again be executed via work(). * - * Returns Status::OK() if the state was successfully restored and the execution tree can be - * work()'d. + * Throws a UserException if the state cannot be successfully restored (e.g. a collection was + * dropped or the position of a capped cursor was lost during a yield). If restore fails, it is + * only safe to call dispose(), detachFromOperationContext(), or the destructor. * - * Returns ErrorCodes::QueryPlanKilled if the PlanExecutor was killed while saved. - * - * If allowed, will yield and retry if a WriteConflictException is encountered. If the time - * limit is exceeded during this retry process, returns ErrorCodes::MaxTimeMSExpired. If this - * PlanExecutor is killed during this retry process, returns ErrorCodes::QueryPlanKilled. In - * this scenario, locks will have been released, and will not be held when control returns to - * the caller. + * If allowed by the executor's yield policy, will yield and retry internally if a + * WriteConflictException is encountered. If the time limit is exceeded during this retry + * process, throws ErrorCodes::MaxTimeMSExpired. */ - virtual Status restoreState() = 0; + virtual void restoreState() = 0; /** * Detaches from the OperationContext and releases any storage-engine state. @@ -344,7 +342,7 @@ public: * * This is only public for PlanYieldPolicy. DO NOT CALL ANYWHERE ELSE. */ - virtual Status restoreStateWithoutRetrying() = 0; + virtual void restoreStateWithoutRetrying() = 0; // // Running Support diff --git a/src/mongo/db/query/plan_executor_impl.cpp b/src/mongo/db/query/plan_executor_impl.cpp index 037df73f515..23bbd3e5428 100644 --- a/src/mongo/db/query/plan_executor_impl.cpp +++ b/src/mongo/db/query/plan_executor_impl.cpp @@ -355,19 +355,19 @@ void PlanExecutorImpl::saveState() { _currentState = kSaved; } -Status PlanExecutorImpl::restoreState() { +void PlanExecutorImpl::restoreState() { try { - return restoreStateWithoutRetrying(); + restoreStateWithoutRetrying(); } catch (const WriteConflictException&) { if (!_yieldPolicy->canAutoYield()) throw; // Handles retries by calling restoreStateWithoutRetrying() in a loop. - return _yieldPolicy->yieldOrInterrupt(); + uassertStatusOK(_yieldPolicy->yieldOrInterrupt()); } } -Status PlanExecutorImpl::restoreStateWithoutRetrying() { +void PlanExecutorImpl::restoreStateWithoutRetrying() { invariant(_currentState == kSaved); if (!isMarkedAsKilled()) { @@ -375,7 +375,7 @@ Status PlanExecutorImpl::restoreStateWithoutRetrying() { } _currentState = kUsable; - return _killStatus; + uassertStatusOK(_killStatus); } void PlanExecutorImpl::detachFromOperationContext() { diff --git a/src/mongo/db/query/plan_executor_impl.h b/src/mongo/db/query/plan_executor_impl.h index 6447d014cdc..0a13cc25b57 100644 --- a/src/mongo/db/query/plan_executor_impl.h +++ b/src/mongo/db/query/plan_executor_impl.h @@ -62,10 +62,10 @@ public: const NamespaceString& nss() const final; OperationContext* getOpCtx() const final; void saveState() final; - Status restoreState() final; + void restoreState() final; void detachFromOperationContext() final; void reattachToOperationContext(OperationContext* opCtx) final; - Status restoreStateWithoutRetrying() final; + void restoreStateWithoutRetrying() final; ExecState getNextSnapshotted(Snapshotted<BSONObj>* objOut, RecordId* dlOut) final; ExecState getNext(BSONObj* objOut, RecordId* dlOut) final; bool isEOF() final; diff --git a/src/mongo/db/query/plan_yield_policy.cpp b/src/mongo/db/query/plan_yield_policy.cpp index 3091d7e78cd..030d5102733 100644 --- a/src/mongo/db/query/plan_yield_policy.cpp +++ b/src/mongo/db/query/plan_yield_policy.cpp @@ -151,12 +151,17 @@ Status PlanYieldPolicy::yield(stdx::function<void()> whileYieldingFn) { QueryYield::yieldAllLocks(opCtx, whileYieldingFn, _planYielding->nss()); } - return _planYielding->restoreStateWithoutRetrying(); + _planYielding->restoreStateWithoutRetrying(); + return Status::OK(); } catch (const WriteConflictException&) { CurOp::get(opCtx)->debug().additiveMetrics.incrementWriteConflicts(1); WriteConflictException::logAndBackoff( attempt, "plan execution restoreState", _planYielding->nss().ns()); // retry + } catch (...) { + // Errors other than write conflicts don't get retried, and should instead result in the + // PlanExecutor dying. We propagate all such errors as status codes. + return exceptionToStatus(); } } } diff --git a/src/mongo/db/query/stage_builder.cpp b/src/mongo/db/query/stage_builder.cpp index f67af999503..a7b2bb23510 100644 --- a/src/mongo/db/query/stage_builder.cpp +++ b/src/mongo/db/query/stage_builder.cpp @@ -77,13 +77,12 @@ PlanStage* buildStages(OperationContext* opCtx, case STAGE_COLLSCAN: { const CollectionScanNode* csn = static_cast<const CollectionScanNode*>(root); CollectionScanParams params; - params.collection = collection; params.tailable = csn->tailable; params.shouldTrackLatestOplogTimestamp = csn->shouldTrackLatestOplogTimestamp; params.direction = (csn->direction == 1) ? CollectionScanParams::FORWARD : CollectionScanParams::BACKWARD; params.shouldWaitForOplogVisibility = csn->shouldWaitForOplogVisibility; - return new CollectionScan(opCtx, params, ws, csn->filter.get()); + return new CollectionScan(opCtx, collection, params, ws, csn->filter.get()); } case STAGE_IXSCAN: { const IndexScanNode* ixn = static_cast<const IndexScanNode*>(root); diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp index e2c497de5f7..47fd238871b 100644 --- a/src/mongo/db/repl/oplog.cpp +++ b/src/mongo/db/repl/oplog.cpp @@ -995,7 +995,8 @@ std::map<std::string, ApplyOpMetadata> opsMap = { const OpTime& opTime, const OplogEntry& entry, OplogApplication::Mode mode) -> Status { - return convertToCapped(opCtx, parseUUIDorNs(opCtx, ns, ui, cmd), cmd["size"].number()); + convertToCapped(opCtx, parseUUIDorNs(opCtx, ns, ui, cmd), cmd["size"].number()); + return Status::OK(); }, {ErrorCodes::NamespaceNotFound}}}, {"emptycapped", diff --git a/src/mongo/db/s/collection_range_deleter.cpp b/src/mongo/db/s/collection_range_deleter.cpp index e70f949424a..408d5afafee 100644 --- a/src/mongo/db/s/collection_range_deleter.cpp +++ b/src/mongo/db/s/collection_range_deleter.cpp @@ -382,12 +382,14 @@ StatusWith<int> CollectionRangeDeleter::_doDeletion(OperationContext* opCtx, collection->deleteDocument(opCtx, kUninitializedStmtId, rloc, nullptr, true); wuow.commit(); }); - auto restoreStateStatus = exec->restoreState(); - if (!restoreStateStatus.isOK()) { + + try { + exec->restoreState(); + } catch (const DBException& ex) { warning() << "error restoring cursor state while trying to delete " << redact(min) << " to " << redact(max) << " in " << nss << ", stats: " << Explain::getWinningPlanStats(exec.get()) << ": " - << redact(restoreStateStatus); + << redact(ex.toStatus()); break; } ShardingStatistics::get(opCtx).countDocsDeletedOnDonor.addAndFetch(1); diff --git a/src/mongo/dbtests/documentsourcetests.cpp b/src/mongo/dbtests/documentsourcetests.cpp index bd37bc31416..40e666ad92c 100644 --- a/src/mongo/dbtests/documentsourcetests.cpp +++ b/src/mongo/dbtests/documentsourcetests.cpp @@ -368,12 +368,11 @@ TEST_F(DocumentSourceCursorTest, TailableAwaitDataCursorShouldErrorAfterTimeout) AutoGetCollectionForRead readLock(opCtx(), nss); auto workingSet = stdx::make_unique<WorkingSet>(); CollectionScanParams collScanParams; - collScanParams.collection = readLock.getCollection(); collScanParams.tailable = true; auto filter = BSON("a" << 1); auto matchExpression = uassertStatusOK(MatchExpressionParser::parse(filter, ctx())); auto collectionScan = stdx::make_unique<CollectionScan>( - opCtx(), collScanParams, workingSet.get(), matchExpression.get()); + opCtx(), readLock.getCollection(), collScanParams, workingSet.get(), matchExpression.get()); auto queryRequest = stdx::make_unique<QueryRequest>(nss); queryRequest->setFilter(filter); queryRequest->setTailableMode(TailableModeEnum::kTailableAndAwaitData); @@ -408,11 +407,10 @@ TEST_F(DocumentSourceCursorTest, NonAwaitDataCursorShouldErrorAfterTimeout) { AutoGetCollectionForRead readLock(opCtx(), nss); auto workingSet = stdx::make_unique<WorkingSet>(); CollectionScanParams collScanParams; - collScanParams.collection = readLock.getCollection(); auto filter = BSON("a" << 1); auto matchExpression = uassertStatusOK(MatchExpressionParser::parse(filter, ctx())); auto collectionScan = stdx::make_unique<CollectionScan>( - opCtx(), collScanParams, workingSet.get(), matchExpression.get()); + opCtx(), readLock.getCollection(), collScanParams, workingSet.get(), matchExpression.get()); auto queryRequest = stdx::make_unique<QueryRequest>(nss); queryRequest->setFilter(filter); auto canonicalQuery = unittest::assertGet( @@ -453,12 +451,11 @@ TEST_F(DocumentSourceCursorTest, TailableAwaitDataCursorShouldErrorAfterBeingKil AutoGetCollectionForRead readLock(opCtx(), nss); auto workingSet = stdx::make_unique<WorkingSet>(); CollectionScanParams collScanParams; - collScanParams.collection = readLock.getCollection(); collScanParams.tailable = true; auto filter = BSON("a" << 1); auto matchExpression = uassertStatusOK(MatchExpressionParser::parse(filter, ctx())); auto collectionScan = stdx::make_unique<CollectionScan>( - opCtx(), collScanParams, workingSet.get(), matchExpression.get()); + opCtx(), readLock.getCollection(), collScanParams, workingSet.get(), matchExpression.get()); auto queryRequest = stdx::make_unique<QueryRequest>(nss); queryRequest->setFilter(filter); queryRequest->setTailableMode(TailableModeEnum::kTailableAndAwaitData); @@ -492,11 +489,10 @@ TEST_F(DocumentSourceCursorTest, NormalCursorShouldErrorAfterBeingKilled) { AutoGetCollectionForRead readLock(opCtx(), nss); auto workingSet = stdx::make_unique<WorkingSet>(); CollectionScanParams collScanParams; - collScanParams.collection = readLock.getCollection(); auto filter = BSON("a" << 1); auto matchExpression = uassertStatusOK(MatchExpressionParser::parse(filter, ctx())); auto collectionScan = stdx::make_unique<CollectionScan>( - opCtx(), collScanParams, workingSet.get(), matchExpression.get()); + opCtx(), readLock.getCollection(), collScanParams, workingSet.get(), matchExpression.get()); auto queryRequest = stdx::make_unique<QueryRequest>(nss); queryRequest->setFilter(filter); auto canonicalQuery = unittest::assertGet( diff --git a/src/mongo/dbtests/executor_registry.cpp b/src/mongo/dbtests/executor_registry.cpp index ea0934836bd..b53ec260a8e 100644 --- a/src/mongo/dbtests/executor_registry.cpp +++ b/src/mongo/dbtests/executor_registry.cpp @@ -72,10 +72,10 @@ public: std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> getCollscan() { unique_ptr<WorkingSet> ws(new WorkingSet()); CollectionScanParams params; - params.collection = collection(); params.direction = CollectionScanParams::FORWARD; params.tailable = false; - unique_ptr<CollectionScan> scan(new CollectionScan(&_opCtx, params, ws.get(), NULL)); + unique_ptr<CollectionScan> scan( + new CollectionScan(&_opCtx, collection(), params, ws.get(), NULL)); // Create a plan executor to hold it auto qr = stdx::make_unique<QueryRequest>(nss); @@ -139,7 +139,7 @@ public: // At this point, we're done yielding. We recover our lock. // And clean up anything that happened before. - ASSERT_OK(exec->restoreState()); + exec->restoreState(); // Make sure that the PlanExecutor moved forward over the deleted data. We don't see // foo==10 @@ -171,7 +171,7 @@ public: // Drop a collection that's not ours. _client.dropCollection("unittests.someboguscollection"); - ASSERT_OK(exec->restoreState()); + exec->restoreState(); ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL)); ASSERT_EQUALS(10, obj["foo"].numberInt()); @@ -180,7 +180,7 @@ public: _client.dropCollection(nss.ns()); - ASSERT_EQUALS(ErrorCodes::QueryPlanKilled, exec->restoreState()); + ASSERT_THROWS_CODE(exec->restoreState(), DBException, ErrorCodes::QueryPlanKilled); } }; @@ -201,7 +201,7 @@ public: exec->saveState(); _client.dropIndexes(nss.ns()); - ASSERT_EQUALS(ErrorCodes::QueryPlanKilled, exec->restoreState()); + ASSERT_THROWS_CODE(exec->restoreState(), DBException, ErrorCodes::QueryPlanKilled); } }; @@ -222,7 +222,7 @@ public: exec->saveState(); _client.dropIndex(nss.ns(), BSON("foo" << 1)); - ASSERT_EQUALS(ErrorCodes::QueryPlanKilled, exec->restoreState()); + ASSERT_THROWS_CODE(exec->restoreState(), DBException, ErrorCodes::QueryPlanKilled); } }; @@ -246,7 +246,7 @@ public: _ctx.reset(); _client.dropDatabase("somesillydb"); _ctx.reset(new dbtests::WriteContextForTests(&_opCtx, nss.ns())); - ASSERT_OK(exec->restoreState()); + exec->restoreState(); ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL)); ASSERT_EQUALS(10, obj["foo"].numberInt()); @@ -257,7 +257,7 @@ public: _ctx.reset(); _client.dropDatabase("unittests"); _ctx.reset(new dbtests::WriteContextForTests(&_opCtx, nss.ns())); - ASSERT_EQUALS(ErrorCodes::QueryPlanKilled, exec->restoreState()); + ASSERT_THROWS_CODE(exec->restoreState(), DBException, ErrorCodes::QueryPlanKilled); } }; diff --git a/src/mongo/dbtests/query_plan_executor.cpp b/src/mongo/dbtests/query_plan_executor.cpp index a5c968c5f03..7aaf6145e87 100644 --- a/src/mongo/dbtests/query_plan_executor.cpp +++ b/src/mongo/dbtests/query_plan_executor.cpp @@ -103,7 +103,6 @@ public: PlanExecutor::YieldPolicy yieldPolicy = PlanExecutor::YieldPolicy::YIELD_MANUAL, TailableModeEnum tailableMode = TailableModeEnum::kNormal) { CollectionScanParams csparams; - csparams.collection = coll; csparams.direction = CollectionScanParams::FORWARD; unique_ptr<WorkingSet> ws(new WorkingSet()); @@ -118,7 +117,7 @@ public: // Make the stage. unique_ptr<PlanStage> root( - new CollectionScan(&_opCtx, csparams, ws.get(), cq.get()->root())); + new CollectionScan(&_opCtx, coll, csparams, ws.get(), cq.get()->root())); // Hand the plan off to the executor. auto statusWithPlanExecutor = PlanExecutor::make( diff --git a/src/mongo/dbtests/query_stage_collscan.cpp b/src/mongo/dbtests/query_stage_collscan.cpp index e11bbe5eb4c..5fa7248fbbb 100644 --- a/src/mongo/dbtests/query_stage_collscan.cpp +++ b/src/mongo/dbtests/query_stage_collscan.cpp @@ -86,10 +86,10 @@ public: int countResults(CollectionScanParams::Direction direction, const BSONObj& filterObj) { AutoGetCollectionForReadCommand ctx(&_opCtx, nss); + auto collection = ctx.getCollection(); // Configure the scan. CollectionScanParams params; - params.collection = ctx.getCollection(); params.direction = direction; params.tailable = false; @@ -105,10 +105,10 @@ public: // Make a scan and have the runner own it. unique_ptr<WorkingSet> ws = make_unique<WorkingSet>(); unique_ptr<PlanStage> ps = - make_unique<CollectionScan>(&_opCtx, params, ws.get(), filterExpr.get()); + make_unique<CollectionScan>(&_opCtx, collection, params, ws.get(), filterExpr.get()); auto statusWithPlanExecutor = PlanExecutor::make( - &_opCtx, std::move(ws), std::move(ps), params.collection, PlanExecutor::NO_YIELD); + &_opCtx, std::move(ws), std::move(ps), collection, PlanExecutor::NO_YIELD); ASSERT_OK(statusWithPlanExecutor.getStatus()); auto exec = std::move(statusWithPlanExecutor.getValue()); @@ -128,11 +128,10 @@ public: WorkingSet ws; CollectionScanParams params; - params.collection = collection; params.direction = direction; params.tailable = false; - unique_ptr<CollectionScan> scan(new CollectionScan(&_opCtx, params, &ws, NULL)); + unique_ptr<CollectionScan> scan(new CollectionScan(&_opCtx, collection, params, &ws, NULL)); while (!scan->isEOF()) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state = scan->work(&id); @@ -210,19 +209,20 @@ class QueryStageCollscanObjectsInOrderForward : public QueryStageCollectionScanB public: void run() { AutoGetCollectionForReadCommand ctx(&_opCtx, nss); + auto collection = ctx.getCollection(); // Configure the scan. CollectionScanParams params; - params.collection = ctx.getCollection(); params.direction = CollectionScanParams::FORWARD; params.tailable = false; // Make a scan and have the runner own it. unique_ptr<WorkingSet> ws = make_unique<WorkingSet>(); - unique_ptr<PlanStage> ps = make_unique<CollectionScan>(&_opCtx, params, ws.get(), nullptr); + unique_ptr<PlanStage> ps = + make_unique<CollectionScan>(&_opCtx, collection, params, ws.get(), nullptr); auto statusWithPlanExecutor = PlanExecutor::make( - &_opCtx, std::move(ws), std::move(ps), params.collection, PlanExecutor::NO_YIELD); + &_opCtx, std::move(ws), std::move(ps), collection, PlanExecutor::NO_YIELD); ASSERT_OK(statusWithPlanExecutor.getStatus()); auto exec = std::move(statusWithPlanExecutor.getValue()); @@ -246,17 +246,18 @@ class QueryStageCollscanObjectsInOrderBackward : public QueryStageCollectionScan public: void run() { AutoGetCollectionForReadCommand ctx(&_opCtx, nss); + auto collection = ctx.getCollection(); CollectionScanParams params; - params.collection = ctx.getCollection(); params.direction = CollectionScanParams::BACKWARD; params.tailable = false; unique_ptr<WorkingSet> ws = make_unique<WorkingSet>(); - unique_ptr<PlanStage> ps = make_unique<CollectionScan>(&_opCtx, params, ws.get(), nullptr); + unique_ptr<PlanStage> ps = + make_unique<CollectionScan>(&_opCtx, collection, params, ws.get(), nullptr); auto statusWithPlanExecutor = PlanExecutor::make( - &_opCtx, std::move(ws), std::move(ps), params.collection, PlanExecutor::NO_YIELD); + &_opCtx, std::move(ws), std::move(ps), collection, PlanExecutor::NO_YIELD); ASSERT_OK(statusWithPlanExecutor.getStatus()); auto exec = std::move(statusWithPlanExecutor.getValue()); @@ -289,12 +290,11 @@ public: // Configure the scan. CollectionScanParams params; - params.collection = coll; params.direction = CollectionScanParams::FORWARD; params.tailable = false; WorkingSet ws; - unique_ptr<CollectionScan> scan(new CollectionScan(&_opCtx, params, &ws, NULL)); + unique_ptr<PlanStage> scan(new CollectionScan(&_opCtx, coll, params, &ws, NULL)); int count = 0; while (count < 10) { @@ -349,12 +349,11 @@ public: // Configure the scan. CollectionScanParams params; - params.collection = coll; params.direction = CollectionScanParams::BACKWARD; params.tailable = false; WorkingSet ws; - unique_ptr<CollectionScan> scan(new CollectionScan(&_opCtx, params, &ws, NULL)); + unique_ptr<PlanStage> scan(new CollectionScan(&_opCtx, coll, params, &ws, NULL)); int count = 0; while (count < 10) { diff --git a/src/mongo/dbtests/query_stage_count.cpp b/src/mongo/dbtests/query_stage_count.cpp index c6ab01e62c8..82d7cef339e 100644 --- a/src/mongo/dbtests/query_stage_count.cpp +++ b/src/mongo/dbtests/query_stage_count.cpp @@ -91,11 +91,10 @@ public: WorkingSet ws; CollectionScanParams params; - params.collection = _coll; params.direction = CollectionScanParams::FORWARD; params.tailable = false; - unique_ptr<CollectionScan> scan(new CollectionScan(&_opCtx, params, &ws, NULL)); + unique_ptr<CollectionScan> scan(new CollectionScan(&_opCtx, _coll, params, &ws, NULL)); while (!scan->isEOF()) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state = scan->work(&id); @@ -221,10 +220,9 @@ public: CollectionScan* createCollScan(MatchExpression* expr, WorkingSet* ws) { CollectionScanParams params; - params.collection = _coll; // This child stage gets owned and freed by its parent CountStage - return new CollectionScan(&_opCtx, params, ws, expr); + return new CollectionScan(&_opCtx, _coll, params, ws, expr); } static const char* ns() { diff --git a/src/mongo/dbtests/query_stage_delete.cpp b/src/mongo/dbtests/query_stage_delete.cpp index 5d86035a58e..e22ee615b3f 100644 --- a/src/mongo/dbtests/query_stage_delete.cpp +++ b/src/mongo/dbtests/query_stage_delete.cpp @@ -88,11 +88,10 @@ public: WorkingSet ws; CollectionScanParams params; - params.collection = collection; params.direction = direction; params.tailable = false; - unique_ptr<CollectionScan> scan(new CollectionScan(&_opCtx, params, &ws, NULL)); + unique_ptr<CollectionScan> scan(new CollectionScan(&_opCtx, collection, params, &ws, NULL)); while (!scan->isEOF()) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state = scan->work(&id); @@ -139,7 +138,6 @@ public: // Configure the scan. CollectionScanParams collScanParams; - collScanParams.collection = coll; collScanParams.direction = CollectionScanParams::FORWARD; collScanParams.tailable = false; @@ -152,7 +150,7 @@ public: deleteStageParams, &ws, coll, - new CollectionScan(&_opCtx, collScanParams, &ws, NULL)); + new CollectionScan(&_opCtx, coll, collScanParams, &ws, NULL)); const DeleteStats* stats = static_cast<const DeleteStats*>(deleteStage.getSpecificStats()); diff --git a/src/mongo/dbtests/query_stage_multiplan.cpp b/src/mongo/dbtests/query_stage_multiplan.cpp index 3c2cc9d27d4..d4cdeb8048f 100644 --- a/src/mongo/dbtests/query_stage_multiplan.cpp +++ b/src/mongo/dbtests/query_stage_multiplan.cpp @@ -171,10 +171,9 @@ unique_ptr<PlanStage> getCollScanPlan(OperationContext* opCtx, WorkingSet* sharedWs, MatchExpression* matchExpr) { CollectionScanParams csparams; - csparams.collection = coll; csparams.direction = CollectionScanParams::FORWARD; - unique_ptr<PlanStage> root(new CollectionScan(opCtx, csparams, sharedWs, matchExpr)); + unique_ptr<PlanStage> root(new CollectionScan(opCtx, coll, csparams, sharedWs, matchExpr)); return root; } @@ -581,11 +580,6 @@ TEST_F(QueryStageMultiPlanTest, ShouldReportErrorIfExceedsTimeLimitDuringPlannin unique_ptr<WorkingSet> sharedWs(new WorkingSet()); unique_ptr<PlanStage> ixScanRoot = getIxScanPlan(_opCtx.get(), coll, sharedWs.get(), 7); - // Plan 1: CollScan with matcher. - CollectionScanParams csparams; - csparams.collection = coll; - csparams.direction = CollectionScanParams::FORWARD; - // Make the filter. BSONObj filterObj = BSON("foo" << 7); unique_ptr<MatchExpression> filter = makeMatchExpressionFromFilter(_opCtx.get(), filterObj); diff --git a/src/mongo/dbtests/query_stage_sort.cpp b/src/mongo/dbtests/query_stage_sort.cpp index 62b87d30c24..dec254e9a50 100644 --- a/src/mongo/dbtests/query_stage_sort.cpp +++ b/src/mongo/dbtests/query_stage_sort.cpp @@ -366,7 +366,7 @@ public: coll->updateDocument(&_opCtx, *it, oldDoc, newDoc(oldDoc), false, NULL, &args); wuow.commit(); } - ASSERT_OK(exec->restoreState()); + exec->restoreState(); // Read the rest of the data from the queued data stage. while (!queuedDataStage->isEOF()) { @@ -385,7 +385,7 @@ public: wuow.commit(); } } - ASSERT_OK(exec->restoreState()); + exec->restoreState(); // Verify that it's sorted, the right number of documents are returned, and they're all // in the expected range. @@ -465,7 +465,7 @@ public: coll->deleteDocument(&_opCtx, kUninitializedStmtId, *it++, nullOpDebug); wuow.commit(); } - ASSERT_OK(exec->restoreState()); + exec->restoreState(); // Read the rest of the data from the queued data stage. while (!queuedDataStage->isEOF()) { @@ -482,7 +482,7 @@ public: wuow.commit(); } } - ASSERT_OK(exec->restoreState()); + exec->restoreState(); // Regardless of storage engine, all the documents should come back with their objects int count = 0; diff --git a/src/mongo/dbtests/query_stage_update.cpp b/src/mongo/dbtests/query_stage_update.cpp index b5e153294d3..bc07c4ec803 100644 --- a/src/mongo/dbtests/query_stage_update.cpp +++ b/src/mongo/dbtests/query_stage_update.cpp @@ -129,11 +129,10 @@ public: WorkingSet ws; CollectionScanParams params; - params.collection = collection; params.direction = CollectionScanParams::FORWARD; params.tailable = false; - unique_ptr<CollectionScan> scan(new CollectionScan(&_opCtx, params, &ws, NULL)); + unique_ptr<CollectionScan> scan(new CollectionScan(&_opCtx, collection, params, &ws, NULL)); while (!scan->isEOF()) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state = scan->work(&id); @@ -151,11 +150,10 @@ public: WorkingSet ws; CollectionScanParams params; - params.collection = collection; params.direction = direction; params.tailable = false; - unique_ptr<CollectionScan> scan(new CollectionScan(&_opCtx, params, &ws, NULL)); + unique_ptr<CollectionScan> scan(new CollectionScan(&_opCtx, collection, params, &ws, NULL)); while (!scan->isEOF()) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state = scan->work(&id); @@ -300,7 +298,6 @@ public: // Configure the scan. CollectionScanParams collScanParams; - collScanParams.collection = coll; collScanParams.direction = CollectionScanParams::FORWARD; collScanParams.tailable = false; @@ -310,7 +307,8 @@ public: updateParams.canonicalQuery = cq.get(); auto ws = make_unique<WorkingSet>(); - auto cs = make_unique<CollectionScan>(&_opCtx, collScanParams, ws.get(), cq->root()); + auto cs = + make_unique<CollectionScan>(&_opCtx, coll, collScanParams, ws.get(), cq->root()); auto updateStage = make_unique<UpdateStage>(&_opCtx, updateParams, ws.get(), coll, cs.release()); |