summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--buildscripts/resmokeconfig/suites/concurrency_replication_multi_stmt_txn.yml4
-rw-r--r--buildscripts/resmokeconfig/suites/concurrency_replication_multi_stmt_txn_ubsan.yml4
-rw-r--r--jstests/concurrency/fsm_workloads/drop_index_during_replan.js79
-rw-r--r--jstests/concurrency/fsm_workloads/kill_rooted_or.js9
-rw-r--r--src/mongo/db/SConscript1
-rw-r--r--src/mongo/db/catalog/index_catalog.h6
-rw-r--r--src/mongo/db/catalog/index_catalog_entry.h4
-rw-r--r--src/mongo/db/catalog/index_catalog_impl.cpp5
-rw-r--r--src/mongo/db/catalog/index_catalog_impl.h2
-rw-r--r--src/mongo/db/catalog/index_catalog_noop.h5
-rw-r--r--src/mongo/db/exec/cached_plan.cpp10
-rw-r--r--src/mongo/db/exec/cached_plan.h20
-rw-r--r--src/mongo/db/exec/requires_all_indices_stage.cpp46
-rw-r--r--src/mongo/db/exec/requires_all_indices_stage.h81
-rw-r--r--src/mongo/db/exec/requires_collection_stage.cpp11
-rw-r--r--src/mongo/db/exec/requires_collection_stage.h7
-rw-r--r--src/mongo/db/exec/requires_index_stage.cpp3
-rw-r--r--src/mongo/db/exec/subplan.cpp11
-rw-r--r--src/mongo/db/exec/subplan.h9
-rw-r--r--src/mongo/dbtests/query_stage_cached_plan.cpp88
-rw-r--r--src/mongo/dbtests/query_stage_subplan.cpp90
21 files changed, 468 insertions, 27 deletions
diff --git a/buildscripts/resmokeconfig/suites/concurrency_replication_multi_stmt_txn.yml b/buildscripts/resmokeconfig/suites/concurrency_replication_multi_stmt_txn.yml
index 79427047760..7e2f381f67d 100644
--- a/buildscripts/resmokeconfig/suites/concurrency_replication_multi_stmt_txn.yml
+++ b/buildscripts/resmokeconfig/suites/concurrency_replication_multi_stmt_txn.yml
@@ -31,6 +31,10 @@ selector:
- jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands_same_session.js
- jstests/concurrency/fsm_workloads/snapshot_read_kill_operations.js
+ # Expects reads to die with a particular error, but other errors are possible if the read is part
+ # of a transaction (e.g. ErrorCodes.LockTimeout).
+ - jstests/concurrency/fsm_workloads/drop_index_during_replan.js
+
exclude_with_any_tags:
- requires_sharding
diff --git a/buildscripts/resmokeconfig/suites/concurrency_replication_multi_stmt_txn_ubsan.yml b/buildscripts/resmokeconfig/suites/concurrency_replication_multi_stmt_txn_ubsan.yml
index 49229732693..64acf2f107e 100644
--- a/buildscripts/resmokeconfig/suites/concurrency_replication_multi_stmt_txn_ubsan.yml
+++ b/buildscripts/resmokeconfig/suites/concurrency_replication_multi_stmt_txn_ubsan.yml
@@ -31,6 +31,10 @@ selector:
- jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands_same_session.js
- jstests/concurrency/fsm_workloads/snapshot_read_kill_operations.js
+ # Expects reads to die with a particular error, but other errors are possible if the read is part
+ # of a transaction (e.g. ErrorCodes.LockTimeout).
+ - jstests/concurrency/fsm_workloads/drop_index_during_replan.js
+
exclude_with_any_tags:
- requires_sharding
diff --git a/jstests/concurrency/fsm_workloads/drop_index_during_replan.js b/jstests/concurrency/fsm_workloads/drop_index_during_replan.js
new file mode 100644
index 00000000000..637fbbd4954
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/drop_index_during_replan.js
@@ -0,0 +1,79 @@
+'use strict';
+
+/**
+ * drop_index_during_replan.js
+ *
+ * Sets up a situation where there is a plan cache entry for an index scan plan using {a: 1}. Then,
+ * provokes a replan by running a query of the same shape which should use index {b: 1}. At the same
+ * time, other threads may be dropping {b: 1}. This tests that the replanning process is robust to
+ * index drops.
+ */
+var $config = (function() {
+
+ let data = {
+ collName: 'drop_index_during_replan',
+ indexSpecs: [
+ {a: 1},
+ {b: 1},
+ ],
+ };
+
+ let states = {
+ query: function query(db, collName) {
+ const coll = db[collName];
+ try {
+ // By running this query multiple times, we expect to create an active plan cache
+ // entry whose plan uses index {a: 1}.
+ for (let i = 0; i < 2; ++i) {
+ assertAlways.eq(
+ coll.find({a: "common_value_a", b: "unique_value_15"}).itcount(), 1);
+ }
+
+ // Run a query with the same shape, but with different parameters. For this query,
+ // we expect the {a: 1} plan to be evicted during replanning, in favor of {b: 1}.
+ // The query may fail due to a concurrent index drop.
+ assertAlways.eq(coll.find({a: "unique_value_15", b: "common_value_b"}).itcount(),
+ 1);
+ } catch (e) {
+ // We expect any errors to be due to the query getting killed.
+ assertAlways.eq(e.code, ErrorCodes.QueryPlanKilled);
+ }
+ },
+
+ dropIndex: function dropIndex(db, collName) {
+ // Drop the index which we expect to be selected after replanning completes We don't
+ // assert that the command succeeded when dropping an index because it's possible
+ // another thread has already dropped this index.
+ db[collName].dropIndex({b: 1});
+
+ // Recreate the index that was dropped.
+ assertAlways.commandWorked(db[collName].createIndex({b: 1}));
+ }
+ };
+
+ let transitions = {query: {query: 0.8, dropIndex: 0.2}, dropIndex: {query: 1}};
+
+ function setup(db, collName, cluster) {
+ this.indexSpecs.forEach(indexSpec => {
+ assertAlways.commandWorked(db[collName].createIndex(indexSpec));
+ });
+
+ for (let i = 0; i < 200; ++i) {
+ assertAlways.commandWorked(
+ db[collName].insert({a: "common_value_a", b: "unique_value_" + i}));
+ assertAlways.commandWorked(
+ db[collName].insert({a: "unique_value_" + i, b: "common_value_b"}));
+ }
+ }
+
+ return {
+ threadCount: 10,
+ iterations: 50,
+ data: data,
+ states: states,
+ startState: 'query',
+ transitions: transitions,
+ setup: setup
+ };
+
+})();
diff --git a/jstests/concurrency/fsm_workloads/kill_rooted_or.js b/jstests/concurrency/fsm_workloads/kill_rooted_or.js
index c74e3b05e87..dab63ba8938 100644
--- a/jstests/concurrency/fsm_workloads/kill_rooted_or.js
+++ b/jstests/concurrency/fsm_workloads/kill_rooted_or.js
@@ -22,15 +22,15 @@ var $config = (function() {
{a: 1, c: 1},
{b: 1},
{b: 1, c: 1},
- ]
+ ],
+ numDocs: 200,
};
var states = {
query: function query(db, collName) {
var cursor = db[this.collName].find({$or: [{a: 0}, {b: 0}]});
try {
- // No documents are ever inserted into the collection.
- assertAlways.eq(0, cursor.itcount());
+ assertAlways.eq(this.numDocs, cursor.itcount());
} catch (e) {
// Ignore errors due to the plan executor being killed.
}
@@ -67,6 +67,9 @@ var $config = (function() {
this.indexSpecs.forEach(indexSpec => {
assertAlways.commandWorked(db[this.collName].createIndex(indexSpec));
});
+ for (let i = 0; i < this.numDocs; ++i) {
+ assertAlways.commandWorked(db[this.collName].insert({a: 0, b: 0, c: 0}));
+ }
}
return {
diff --git a/src/mongo/db/SConscript b/src/mongo/db/SConscript
index 2bc852aa9a0..8c76d6ee9c4 100644
--- a/src/mongo/db/SConscript
+++ b/src/mongo/db/SConscript
@@ -1100,6 +1100,7 @@ env.Library(
'exec/projection_exec.cpp',
'exec/queued_data_stage.cpp',
'exec/record_store_fast_count.cpp',
+ 'exec/requires_all_indices_stage.cpp',
'exec/requires_collection_stage.cpp',
'exec/requires_index_stage.cpp',
'exec/shard_filter.cpp',
diff --git a/src/mongo/db/catalog/index_catalog.h b/src/mongo/db/catalog/index_catalog.h
index 4e8f70d6d66..68ab42b8c92 100644
--- a/src/mongo/db/catalog/index_catalog.h
+++ b/src/mongo/db/catalog/index_catalog.h
@@ -292,6 +292,12 @@ public:
virtual std::shared_ptr<const IndexCatalogEntry> getEntryShared(
const IndexDescriptor*) const = 0;
+ /**
+ * Returns a vector of shared pointers to all index entries. Excludes unfinished indexes.
+ */
+ virtual std::vector<std::shared_ptr<const IndexCatalogEntry>> getAllReadyEntriesShared()
+ const = 0;
+
virtual IndexAccessMethod* getIndex(const IndexDescriptor* const desc) = 0;
virtual const IndexAccessMethod* getIndex(const IndexDescriptor* const desc) const = 0;
diff --git a/src/mongo/db/catalog/index_catalog_entry.h b/src/mongo/db/catalog/index_catalog_entry.h
index 192788e3948..1e52a6ac447 100644
--- a/src/mongo/db/catalog/index_catalog_entry.h
+++ b/src/mongo/db/catalog/index_catalog_entry.h
@@ -207,6 +207,10 @@ public:
_entries.push_back(std::move(entry));
}
+ std::vector<std::shared_ptr<const IndexCatalogEntry>> getAllEntries() const {
+ return {_entries.begin(), _entries.end()};
+ }
+
private:
std::vector<std::shared_ptr<IndexCatalogEntry>> _entries;
};
diff --git a/src/mongo/db/catalog/index_catalog_impl.cpp b/src/mongo/db/catalog/index_catalog_impl.cpp
index 5d0c9542dd7..88805d030fd 100644
--- a/src/mongo/db/catalog/index_catalog_impl.cpp
+++ b/src/mongo/db/catalog/index_catalog_impl.cpp
@@ -1109,6 +1109,11 @@ std::shared_ptr<const IndexCatalogEntry> IndexCatalogImpl::getEntryShared(
return _buildingIndexes.findShared(indexDescriptor);
}
+std::vector<std::shared_ptr<const IndexCatalogEntry>> IndexCatalogImpl::getAllReadyEntriesShared()
+ const {
+ return _readyIndexes.getAllEntries();
+}
+
const IndexDescriptor* IndexCatalogImpl::refreshEntry(OperationContext* opCtx,
const IndexDescriptor* oldDesc) {
invariant(opCtx->lockState()->isCollectionLockedForMode(_collection->ns().ns(), MODE_X));
diff --git a/src/mongo/db/catalog/index_catalog_impl.h b/src/mongo/db/catalog/index_catalog_impl.h
index 121bcf69bc5..ead5634dbbb 100644
--- a/src/mongo/db/catalog/index_catalog_impl.h
+++ b/src/mongo/db/catalog/index_catalog_impl.h
@@ -166,6 +166,8 @@ public:
std::shared_ptr<const IndexCatalogEntry> getEntryShared(const IndexDescriptor*) const override;
+ std::vector<std::shared_ptr<const IndexCatalogEntry>> getAllReadyEntriesShared() const override;
+
IndexAccessMethod* getIndex(const IndexDescriptor* desc) override;
const IndexAccessMethod* getIndex(const IndexDescriptor* desc) const override;
diff --git a/src/mongo/db/catalog/index_catalog_noop.h b/src/mongo/db/catalog/index_catalog_noop.h
index 34ea94afee2..9ef0f7d1fc4 100644
--- a/src/mongo/db/catalog/index_catalog_noop.h
+++ b/src/mongo/db/catalog/index_catalog_noop.h
@@ -119,6 +119,11 @@ public:
return nullptr;
}
+ std::vector<std::shared_ptr<const IndexCatalogEntry>> getAllReadyEntriesShared()
+ const override {
+ return {};
+ }
+
IndexAccessMethod* getIndex(const IndexDescriptor* const desc) override {
return nullptr;
}
diff --git a/src/mongo/db/exec/cached_plan.cpp b/src/mongo/db/exec/cached_plan.cpp
index 2955132451e..5f348ae3533 100644
--- a/src/mongo/db/exec/cached_plan.cpp
+++ b/src/mongo/db/exec/cached_plan.cpp
@@ -63,7 +63,7 @@ CachedPlanStage::CachedPlanStage(OperationContext* opCtx,
const QueryPlannerParams& params,
size_t decisionWorks,
PlanStage* root)
- : RequiresCollectionStage(kStageType, opCtx, collection),
+ : RequiresAllIndicesStage(kStageType, opCtx, collection),
_ws(ws),
_canonicalQuery(cq),
_plannerParams(params),
@@ -77,6 +77,14 @@ Status CachedPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
// make sense.
ScopedTimer timer(getClock(), &_commonStats.executionTimeMillis);
+ // During plan selection, the list of indices we are using to plan must remain stable, so the
+ // query will die during yield recovery if any index has been dropped. However, once plan
+ // selection completes successfully, we no longer need all indices to stick around. The selected
+ // plan should safely die on yield recovery if it is using the dropped index.
+ //
+ // Dismiss the requirement that no indices can be dropped when this method returns.
+ ON_BLOCK_EXIT([this] { releaseAllIndicesRequirement(); });
+
// If we work this many times during the trial period, then we will replan the
// query from scratch.
size_t maxWorksBeforeReplan =
diff --git a/src/mongo/db/exec/cached_plan.h b/src/mongo/db/exec/cached_plan.h
index d1c6fcaaf8f..2ea991517cd 100644
--- a/src/mongo/db/exec/cached_plan.h
+++ b/src/mongo/db/exec/cached_plan.h
@@ -33,7 +33,7 @@
#include <memory>
#include <queue>
-#include "mongo/db/exec/requires_collection_stage.h"
+#include "mongo/db/exec/requires_all_indices_stage.h"
#include "mongo/db/exec/working_set.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/query/canonical_query.h"
@@ -46,13 +46,16 @@ namespace mongo {
class PlanYieldPolicy;
/**
- * This stage outputs its mainChild, and possibly its backup child
- * and also updates the cache.
- *
- * Preconditions: Valid RecordId.
+ * Runs a trial period in order to evaluate the cost of a cached plan. If the cost is unexpectedly
+ * high, the plan cache entry is deactivated and we use multi-planning to select an entirely new
+ * winning plan. This process is called "replanning".
*
+ * This stage requires all indices to stay intact during the trial period so that replanning can
+ * occur with the set of indices in 'params'. As a future improvement, we could instead refresh the
+ * list of indices in 'params' prior to replanning, and thus avoid inheriting from
+ * RequiresAllIndicesStage.
*/
-class CachedPlanStage final : public RequiresCollectionStage {
+class CachedPlanStage final : public RequiresAllIndicesStage {
public:
CachedPlanStage(OperationContext* opCtx,
Collection* collection,
@@ -86,11 +89,6 @@ public:
*/
Status pickBestPlan(PlanYieldPolicy* yieldPolicy);
-protected:
- void doSaveStateRequiresCollection() final {}
-
- void doRestoreStateRequiresCollection() final {}
-
private:
/**
* Passes stats from the trial period run of the cached plan to the plan cache.
diff --git a/src/mongo/db/exec/requires_all_indices_stage.cpp b/src/mongo/db/exec/requires_all_indices_stage.cpp
new file mode 100644
index 00000000000..be26a5a0402
--- /dev/null
+++ b/src/mongo/db/exec/requires_all_indices_stage.cpp
@@ -0,0 +1,46 @@
+/**
+ * Copyright (C) 2018-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/db/exec/requires_all_indices_stage.h"
+
+namespace mongo {
+
+void RequiresAllIndicesStage::doRestoreStateRequiresCollection() {
+ size_t i = 0;
+ for (auto&& index : _indexCatalogEntries) {
+ uassert(ErrorCodes::QueryPlanKilled,
+ str::stream() << "query plan killed :: index '" << _indexNames[i] << "' dropped",
+ index.lock());
+ ++i;
+ }
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/exec/requires_all_indices_stage.h b/src/mongo/db/exec/requires_all_indices_stage.h
new file mode 100644
index 00000000000..f10492d160b
--- /dev/null
+++ b/src/mongo/db/exec/requires_all_indices_stage.h
@@ -0,0 +1,81 @@
+/**
+ * Copyright (C) 2018-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#pragma once
+
+#include "mongo/db/exec/requires_collection_stage.h"
+
+namespace mongo {
+
+/**
+ * A base class for plan stages which require access to _all_ indices of a collection, and should
+ * cause the query to die on yield recovery if any index is dropped. Plan stages which depend on a
+ * single index, such as IXSCAN, should instead use RequiresIndexStage.
+ */
+class RequiresAllIndicesStage : public RequiresCollectionStage {
+public:
+ RequiresAllIndicesStage(const char* stageType, OperationContext* opCtx, const Collection* coll)
+ : RequiresCollectionStage(stageType, opCtx, coll) {
+ auto allEntriesShared = coll->getIndexCatalog()->getAllReadyEntriesShared();
+ _indexCatalogEntries.reserve(allEntriesShared.size());
+ _indexNames.reserve(allEntriesShared.size());
+ for (auto&& index : allEntriesShared) {
+ _indexCatalogEntries.emplace_back(index);
+ _indexNames.push_back(index->descriptor()->indexName());
+ }
+ }
+
+ virtual ~RequiresAllIndicesStage() = default;
+
+protected:
+ void doSaveStateRequiresCollection() override final {}
+
+ void doRestoreStateRequiresCollection() override final;
+
+ /**
+ * Subclasses may call this to indicate that they no longer require all indices on the
+ * collection to survive. After calling this, yield recovery will never fail.
+ */
+ void releaseAllIndicesRequirement() {
+ _indexCatalogEntries.clear();
+ _indexNames.clear();
+ }
+
+private:
+ // This stage holds weak pointers to all of the index catalog entries known at the time of
+ // construction. During yield recovery, we attempt to lock each weak pointer in order to
+ // determine whether an index we rely on has been destroyed. If any index has been destroyed,
+ // then we throw a query-fatal exception during restore.
+ std::vector<std::weak_ptr<const IndexCatalogEntry>> _indexCatalogEntries;
+
+ // The names of the indices above. Used for error reporting.
+ std::vector<std::string> _indexNames;
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/exec/requires_collection_stage.cpp b/src/mongo/db/exec/requires_collection_stage.cpp
index 5825485dea6..c4794c3bfd2 100644
--- a/src/mongo/db/exec/requires_collection_stage.cpp
+++ b/src/mongo/db/exec/requires_collection_stage.cpp
@@ -50,9 +50,18 @@ void RequiresCollectionStageBase<CollectionT>::doRestoreState() {
const UUIDCatalog& catalog = UUIDCatalog::get(getOpCtx());
_collection = catalog.lookupCollectionByUUID(_collectionUUID);
uassert(ErrorCodes::QueryPlanKilled,
- str::stream() << "UUID " << _collectionUUID << " no longer exists.",
+ str::stream() << "Collection dropped. UUID " << _collectionUUID << " no longer exists.",
_collection);
+ // TODO SERVER-31695: Allow queries to survive collection rename, rather than throwing here when
+ // a rename has happened during yield.
+ uassert(ErrorCodes::QueryPlanKilled,
+ str::stream() << "Collection with UUID " << _collectionUUID << " was renamed from '"
+ << _nss.ns()
+ << "' to '"
+ << _collection->ns().ns(),
+ _nss == _collection->ns());
+
doRestoreStateRequiresCollection();
}
diff --git a/src/mongo/db/exec/requires_collection_stage.h b/src/mongo/db/exec/requires_collection_stage.h
index 9ebd96f84f0..f57e2bf8987 100644
--- a/src/mongo/db/exec/requires_collection_stage.h
+++ b/src/mongo/db/exec/requires_collection_stage.h
@@ -56,7 +56,8 @@ public:
RequiresCollectionStageBase(const char* stageType, OperationContext* opCtx, CollectionT coll)
: PlanStage(stageType, opCtx),
_collection(coll),
- _collectionUUID(_collection->uuid().get()) {
+ _collectionUUID(_collection->uuid().get()),
+ _nss(_collection->ns()) {
invariant(_collection);
}
@@ -88,6 +89,10 @@ protected:
private:
CollectionT _collection;
const UUID _collectionUUID;
+
+ // TODO SERVER-31695: The namespace will no longer be needed once queries can survive collection
+ // renames.
+ const NamespaceString _nss;
};
// Type alias for use by PlanStages that read a Collection.
diff --git a/src/mongo/db/exec/requires_index_stage.cpp b/src/mongo/db/exec/requires_index_stage.cpp
index 4c7150e1920..8718f98d1f4 100644
--- a/src/mongo/db/exec/requires_index_stage.cpp
+++ b/src/mongo/db/exec/requires_index_stage.cpp
@@ -51,8 +51,7 @@ void RequiresIndexStage::doRestoreStateRequiresCollection() {
// our index is no longer valid, and the query should die.
_indexCatalogEntry = _weakIndexCatalogEntry.lock();
uassert(ErrorCodes::QueryPlanKilled,
- str::stream() << "query plan killed :: index named '" << _indexName
- << "' is no longer valid",
+ str::stream() << "query plan killed :: index '" << _indexName << "' dropped",
_indexCatalogEntry);
_indexDescriptor = _indexCatalogEntry->descriptor();
diff --git a/src/mongo/db/exec/subplan.cpp b/src/mongo/db/exec/subplan.cpp
index 2ac55e6870c..200caa778b7 100644
--- a/src/mongo/db/exec/subplan.cpp
+++ b/src/mongo/db/exec/subplan.cpp
@@ -40,6 +40,7 @@
#include "mongo/db/exec/multi_plan.h"
#include "mongo/db/exec/scoped_timer.h"
#include "mongo/db/matcher/extensions_callback_real.h"
+#include "mongo/db/query/get_executor.h"
#include "mongo/db/query/plan_executor.h"
#include "mongo/db/query/planner_access.h"
#include "mongo/db/query/planner_analysis.h"
@@ -65,7 +66,7 @@ SubplanStage::SubplanStage(OperationContext* opCtx,
WorkingSet* ws,
const QueryPlannerParams& params,
CanonicalQuery* cq)
- : RequiresCollectionStage(kStageType, opCtx, collection),
+ : RequiresAllIndicesStage(kStageType, opCtx, collection),
_ws(ws),
_plannerParams(params),
_query(cq) {
@@ -435,6 +436,14 @@ Status SubplanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
// work that happens here, so this is needed for the time accounting to make sense.
ScopedTimer timer(getClock(), &_commonStats.executionTimeMillis);
+ // During plan selection, the list of indices we are using to plan must remain stable, so the
+ // query will die during yield recovery if any index has been dropped. However, once plan
+ // selection completes successfully, we no longer need all indices to stick around. The selected
+ // plan should safely die on yield recovery if it is using the dropped index.
+ //
+ // Dismiss the requirement that no indices can be dropped when this method returns.
+ ON_BLOCK_EXIT([this] { releaseAllIndicesRequirement(); });
+
// Plan each branch of the $or.
Status subplanningStatus = planSubqueries();
if (!subplanningStatus.isOK()) {
diff --git a/src/mongo/db/exec/subplan.h b/src/mongo/db/exec/subplan.h
index 3f589a2b8ef..a56c47532e2 100644
--- a/src/mongo/db/exec/subplan.h
+++ b/src/mongo/db/exec/subplan.h
@@ -37,7 +37,7 @@
#include "mongo/base/owned_pointer_vector.h"
#include "mongo/base/status.h"
#include "mongo/base/string_data.h"
-#include "mongo/db/exec/requires_collection_stage.h"
+#include "mongo/db/exec/requires_all_indices_stage.h"
#include "mongo/db/query/canonical_query.h"
#include "mongo/db/query/plan_cache.h"
#include "mongo/db/query/plan_yield_policy.h"
@@ -69,7 +69,7 @@ class OperationContext;
*
* --Plans for entire rooted $or queries are neither written to nor read from the plan cache.
*/
-class SubplanStage final : public RequiresCollectionStage {
+class SubplanStage final : public RequiresAllIndicesStage {
public:
SubplanStage(OperationContext* opCtx,
const Collection* collection,
@@ -127,11 +127,6 @@ public:
return _compositeSolution.get();
}
-protected:
- void doSaveStateRequiresCollection() final {}
-
- void doRestoreStateRequiresCollection() final {}
-
private:
/**
* A class used internally in order to keep track of the results of planning
diff --git a/src/mongo/dbtests/query_stage_cached_plan.cpp b/src/mongo/dbtests/query_stage_cached_plan.cpp
index 72b62e1c8dd..fac004bd41d 100644
--- a/src/mongo/dbtests/query_stage_cached_plan.cpp
+++ b/src/mongo/dbtests/query_stage_cached_plan.cpp
@@ -36,6 +36,7 @@
#include "mongo/db/catalog/database_holder.h"
#include "mongo/db/client.h"
#include "mongo/db/db_raii.h"
+#include "mongo/db/dbdirectclient.h"
#include "mongo/db/exec/cached_plan.h"
#include "mongo/db/exec/queued_data_stage.h"
#include "mongo/db/jsobj.h"
@@ -68,6 +69,8 @@ std::unique_ptr<CanonicalQuery> canonicalQueryFromFilterObj(OperationContext* op
class QueryStageCachedPlan : public unittest::Test {
public:
+ QueryStageCachedPlan() : _client(&_opCtx) {}
+
void setUp() {
// If collection exists already, we need to drop it.
dropCollection();
@@ -90,6 +93,10 @@ public:
ASSERT_OK(dbtests::createIndex(&_opCtx, nss.ns(), obj));
}
+ void dropIndex(BSONObj keyPattern) {
+ _client.dropIndex(nss.ns(), std::move(keyPattern));
+ }
+
void dropCollection() {
Lock::DBLock dbLock(&_opCtx, nss.db(), MODE_X);
Database* database = DatabaseHolder::getDatabaseHolder().get(&_opCtx, nss.db());
@@ -162,6 +169,7 @@ protected:
const ServiceContext::UniqueOperationContext _opCtxPtr = cc().makeOperationContext();
OperationContext& _opCtx = *_opCtxPtr;
WorkingSet _ws;
+ DBDirectClient _client{&_opCtx};
};
/**
@@ -419,4 +427,84 @@ TEST_F(QueryStageCachedPlan, EntriesAreNotDeactivatedWhenInactiveEntriesDisabled
ASSERT_EQ(cache->get(*shapeCq).state, PlanCache::CacheEntryState::kPresentActive);
}
+TEST_F(QueryStageCachedPlan, ThrowsOnYieldRecoveryWhenIndexIsDroppedBeforePlanSelection) {
+ // Create an index which we will drop later on.
+ BSONObj keyPattern = BSON("c" << 1);
+ addIndex(keyPattern);
+
+ Collection* collection = AutoGetCollectionForReadCommand{&_opCtx, nss}.getCollection();
+ ASSERT(collection);
+
+ // Query can be answered by either index on "a" or index on "b".
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), std::move(qr));
+ ASSERT_OK(statusWithCQ.getStatus());
+ const std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
+
+ // We shouldn't have anything in the plan cache for this shape yet.
+ PlanCache* cache = collection->infoCache()->getPlanCache();
+ ASSERT(cache);
+
+ // Get planner params.
+ QueryPlannerParams plannerParams;
+ fillOutPlannerParams(&_opCtx, collection, cq.get(), &plannerParams);
+
+ const size_t decisionWorks = 10;
+ CachedPlanStage cachedPlanStage(&_opCtx,
+ collection,
+ &_ws,
+ cq.get(),
+ plannerParams,
+ decisionWorks,
+ new QueuedDataStage(&_opCtx, &_ws));
+
+ // Drop an index while the CachedPlanStage is in a saved state. Restoring should fail, since we
+ // may still need the dropped index for plan selection.
+ cachedPlanStage.saveState();
+ dropIndex(keyPattern);
+ ASSERT_THROWS_CODE(cachedPlanStage.restoreState(), DBException, ErrorCodes::QueryPlanKilled);
+}
+
+TEST_F(QueryStageCachedPlan, DoesNotThrowOnYieldRecoveryWhenIndexIsDroppedAferPlanSelection) {
+ // Create an index which we will drop later on.
+ BSONObj keyPattern = BSON("c" << 1);
+ addIndex(keyPattern);
+
+ Collection* collection = AutoGetCollectionForReadCommand{&_opCtx, nss}.getCollection();
+ ASSERT(collection);
+
+ // Query can be answered by either index on "a" or index on "b".
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), std::move(qr));
+ ASSERT_OK(statusWithCQ.getStatus());
+ const std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
+
+ // We shouldn't have anything in the plan cache for this shape yet.
+ PlanCache* cache = collection->infoCache()->getPlanCache();
+ ASSERT(cache);
+
+ // Get planner params.
+ QueryPlannerParams plannerParams;
+ fillOutPlannerParams(&_opCtx, collection, cq.get(), &plannerParams);
+
+ const size_t decisionWorks = 10;
+ CachedPlanStage cachedPlanStage(&_opCtx,
+ collection,
+ &_ws,
+ cq.get(),
+ plannerParams,
+ decisionWorks,
+ new QueuedDataStage(&_opCtx, &_ws));
+
+ PlanYieldPolicy yieldPolicy(PlanExecutor::YIELD_MANUAL,
+ _opCtx.getServiceContext()->getFastClockSource());
+ ASSERT_OK(cachedPlanStage.pickBestPlan(&yieldPolicy));
+
+ // Drop an index while the CachedPlanStage is in a saved state. We should be able to restore
+ // successfully.
+ cachedPlanStage.saveState();
+ dropIndex(keyPattern);
+ cachedPlanStage.restoreState();
+}
+
} // namespace QueryStageCachedPlan
diff --git a/src/mongo/dbtests/query_stage_subplan.cpp b/src/mongo/dbtests/query_stage_subplan.cpp
index b413f6908a7..8b8f94fb9b3 100644
--- a/src/mongo/dbtests/query_stage_subplan.cpp
+++ b/src/mongo/dbtests/query_stage_subplan.cpp
@@ -68,6 +68,10 @@ public:
ASSERT_OK(dbtests::createIndex(opCtx(), nss.ns(), obj));
}
+ void dropIndex(BSONObj keyPattern) {
+ _client.dropIndex(nss.ns(), std::move(keyPattern));
+ }
+
void insert(const BSONObj& doc) {
_client.insert(nss.ns(), doc);
}
@@ -566,5 +570,91 @@ TEST_F(QueryStageSubplanTest, ShouldReportErrorIfKilledDuringPlanning) {
ASSERT_EQ(ErrorCodes::QueryPlanKilled, subplanStage.pickBestPlan(&alwaysPlanKilledYieldPolicy));
}
+TEST_F(QueryStageSubplanTest, ShouldThrowOnRestoreIfIndexDroppedBeforePlanSelection) {
+ Collection* collection = nullptr;
+ {
+ dbtests::WriteContextForTests ctx{opCtx(), nss.ns()};
+ addIndex(BSON("p1" << 1 << "opt1" << 1));
+ addIndex(BSON("p1" << 1 << "opt2" << 1));
+ addIndex(BSON("p2" << 1 << "opt1" << 1));
+ addIndex(BSON("p2" << 1 << "opt2" << 1));
+ addIndex(BSON("irrelevant" << 1));
+
+ collection = ctx.getCollection();
+ ASSERT(collection);
+ }
+
+ // Build a query with a rooted $or.
+ auto queryRequest = stdx::make_unique<QueryRequest>(nss);
+ queryRequest->setFilter(BSON("$or" << BSON_ARRAY(BSON("p1" << 1) << BSON("p2" << 2))));
+ auto canonicalQuery =
+ uassertStatusOK(CanonicalQuery::canonicalize(opCtx(), std::move(queryRequest)));
+
+ // Add 4 indices: 2 for each predicate to choose from, and one index which is not relevant to
+ // the query.
+ QueryPlannerParams params;
+ fillOutPlannerParams(opCtx(), collection, canonicalQuery.get(), &params);
+
+ // Create the SubplanStage.
+ WorkingSet workingSet;
+ SubplanStage subplanStage(opCtx(), collection, &workingSet, params, canonicalQuery.get());
+
+ // Mimic a yield by saving the state of the subplan stage. Then, drop an index not being used
+ // while yielded.
+ subplanStage.saveState();
+ dropIndex(BSON("irrelevant" << 1));
+
+ // Attempt to restore state. This should throw due the index drop. As a future improvement, we
+ // may wish to make the subplan stage tolerate drops of indices it is not using.
+ ASSERT_THROWS_CODE(subplanStage.restoreState(), DBException, ErrorCodes::QueryPlanKilled);
+}
+
+TEST_F(QueryStageSubplanTest, ShouldNotThrowOnRestoreIfIndexDroppedAfterPlanSelection) {
+ Collection* collection = nullptr;
+ {
+ dbtests::WriteContextForTests ctx{opCtx(), nss.ns()};
+ addIndex(BSON("p1" << 1 << "opt1" << 1));
+ addIndex(BSON("p1" << 1 << "opt2" << 1));
+ addIndex(BSON("p2" << 1 << "opt1" << 1));
+ addIndex(BSON("p2" << 1 << "opt2" << 1));
+ addIndex(BSON("irrelevant" << 1));
+
+ collection = ctx.getCollection();
+ ASSERT(collection);
+ }
+
+ // Build a query with a rooted $or.
+ auto queryRequest = stdx::make_unique<QueryRequest>(nss);
+ queryRequest->setFilter(BSON("$or" << BSON_ARRAY(BSON("p1" << 1) << BSON("p2" << 2))));
+ auto canonicalQuery =
+ uassertStatusOK(CanonicalQuery::canonicalize(opCtx(), std::move(queryRequest)));
+
+ // Add 4 indices: 2 for each predicate to choose from, and one index which is not relevant to
+ // the query.
+ QueryPlannerParams params;
+ fillOutPlannerParams(opCtx(), collection, canonicalQuery.get(), &params);
+
+ boost::optional<AutoGetCollectionForReadCommand> collLock;
+ collLock.emplace(opCtx(), nss);
+
+ // Create the SubplanStage.
+ WorkingSet workingSet;
+ SubplanStage subplanStage(opCtx(), collection, &workingSet, params, canonicalQuery.get());
+
+ PlanYieldPolicy yieldPolicy(PlanExecutor::YIELD_MANUAL, serviceContext()->getFastClockSource());
+ ASSERT_OK(subplanStage.pickBestPlan(&yieldPolicy));
+
+ // Mimic a yield by saving the state of the subplan stage and dropping our lock. Then drop an
+ // index not being used while yielded.
+ subplanStage.saveState();
+ collLock.reset();
+ dropIndex(BSON("irrelevant" << 1));
+
+ // Restoring state should succeed, since the plan selected by pickBestPlan() does not use the
+ // index {irrelevant: 1}.
+ collLock.emplace(opCtx(), nss);
+ subplanStage.restoreState();
+}
+
} // namespace
} // namespace mongo