summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMax Hirschhorn <max.hirschhorn@mongodb.com>2016-07-15 09:31:01 -0400
committerMax Hirschhorn <max.hirschhorn@mongodb.com>2016-07-15 09:31:01 -0400
commit3c1de5d668f3ebef750c6a4745d311d1b65bcbbc (patch)
tree10da251b8074ddf0ad0a94cb2c6958652764212a
parentf070dd1fe27fa811778348475044cb1f14b3f0ef (diff)
downloadmongo-3c1de5d668f3ebef750c6a4745d311d1b65bcbbc.tar.gz
SERVER-24761 Abort entire query plan when a catalog operation occurs.
Plan execution cannot proceed if the collection or a candidate index was dropped during a yield. This prevents the subplanner from trying to build plan stages when the collection and indexes no longer exist. (cherry picked from commit a7e0e028e73c0b4f543c1ded1f4af0673630617a)
-rw-r--r--jstests/concurrency/fsm_workloads/kill_rooted_or.js81
-rw-r--r--src/mongo/base/error_codes.err1
-rw-r--r--src/mongo/db/exec/cached_plan.cpp2
-rw-r--r--src/mongo/db/exec/cached_plan.h3
-rw-r--r--src/mongo/db/exec/multi_plan.cpp2
-rw-r--r--src/mongo/db/exec/multi_plan.h3
-rw-r--r--src/mongo/db/exec/subplan.cpp10
-rw-r--r--src/mongo/db/exec/subplan.h3
-rw-r--r--src/mongo/db/query/plan_executor.h4
9 files changed, 104 insertions, 5 deletions
diff --git a/jstests/concurrency/fsm_workloads/kill_rooted_or.js b/jstests/concurrency/fsm_workloads/kill_rooted_or.js
new file mode 100644
index 00000000000..efc3c5eabf2
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/kill_rooted_or.js
@@ -0,0 +1,81 @@
+'use strict';
+
+/**
+ * kill_rooted_or.js
+ *
+ * Queries using a rooted $or predicate to cause plan selection to use the subplanner. Tests that
+ * the subplanner correctly halts plan execution when the collection is dropped or a candidate index
+ * is dropped.
+ *
+ * This workload was designed to reproduce SERVER-24761.
+ */
+var $config = (function() {
+
+ // Use the workload name as the collection name, since the workload name is assumed to be
+ // unique.
+ var uniqueCollectionName = 'kill_rooted_or';
+
+ var data = {
+ collName: uniqueCollectionName,
+ indexSpecs: [
+ {a: 1},
+ {a: 1, c: 1},
+ {b: 1},
+ {b: 1, c: 1},
+ ]
+ };
+
+ var states = {
+ query: function query(db, collName) {
+ var cursor = db[this.collName].find({$or: [{a: 0}, {b: 0}]});
+ try {
+ assert.eq(0, cursor.itcount());
+ } catch (e) {
+ // Ignore errors due to the plan executor being killed.
+ }
+ },
+
+ dropCollection: function dropCollection(db, collName) {
+ db[this.collName].drop();
+
+ // Recreate all of the indexes on the collection.
+ this.indexSpecs.forEach(function(indexSpec) {
+ assertAlways.commandWorked(db[this.collName].createIndex(indexSpec));
+ }, this);
+ },
+
+ dropIndex: function dropIndex(db, collName) {
+ var indexSpec = this.indexSpecs[Random.randInt(this.indexSpecs.length)];
+
+ // We don't assert that the command succeeded when dropping an index because it's
+ // possible another thread has already dropped this index.
+ db[this.collName].dropIndex(indexSpec);
+
+ // Recreate the index that was dropped.
+ assertAlways.commandWorked(db[this.collName].createIndex(indexSpec));
+ }
+ };
+
+ var transitions = {
+ query: {query: 0.8, dropCollection: 0.1, dropIndex: 0.1},
+ dropCollection: {query: 1},
+ dropIndex: {query: 1}
+ };
+
+ function setup(db, collName, cluster) {
+ this.indexSpecs.forEach(function(indexSpec) {
+ assertAlways.commandWorked(db[this.collName].createIndex(indexSpec));
+ }, this);
+ }
+
+ return {
+ threadCount: 10,
+ iterations: 50,
+ data: data,
+ states: states,
+ startState: 'query',
+ transitions: transitions,
+ setup: setup
+ };
+
+})();
diff --git a/src/mongo/base/error_codes.err b/src/mongo/base/error_codes.err
index 83459e8b7e3..03a24b6fb60 100644
--- a/src/mongo/base/error_codes.err
+++ b/src/mongo/base/error_codes.err
@@ -122,6 +122,7 @@ error_code("FailedToSatisfyReadPreference", 133)
# backported
error_code("OplogStartMissing", 120)
error_code("CappedPositionLost", 121)
+error_code("QueryPlanKilled", 173)
# Non-sequential error codes (for compatibility only)
error_code("NotMaster", 10107) #this comes from assert_util.h
diff --git a/src/mongo/db/exec/cached_plan.cpp b/src/mongo/db/exec/cached_plan.cpp
index d649cdcadfb..73e109669b7 100644
--- a/src/mongo/db/exec/cached_plan.cpp
+++ b/src/mongo/db/exec/cached_plan.cpp
@@ -203,7 +203,7 @@ Status CachedPlanStage::tryYield(PlanYieldPolicy* yieldPolicy) {
bool alive = yieldPolicy->yield(_fetcher.get());
if (!alive) {
- return Status(ErrorCodes::OperationFailed,
+ return Status(ErrorCodes::QueryPlanKilled,
"PlanExecutor killed during cached plan trial period");
}
}
diff --git a/src/mongo/db/exec/cached_plan.h b/src/mongo/db/exec/cached_plan.h
index 751cce976bc..bdb7dc0d315 100644
--- a/src/mongo/db/exec/cached_plan.h
+++ b/src/mongo/db/exec/cached_plan.h
@@ -109,7 +109,8 @@ private:
/**
* May yield during the cached plan stage's trial period or replanning phases.
*
- * Returns a non-OK status if the plan was killed during a yield.
+ * Returns a non-OK status if query planning fails. In particular, this function returns
+ * ErrorCodes::QueryPlanKilled if the query plan was killed during a yield.
*/
Status tryYield(PlanYieldPolicy* yieldPolicy);
diff --git a/src/mongo/db/exec/multi_plan.cpp b/src/mongo/db/exec/multi_plan.cpp
index 46503849045..9808edcd4a4 100644
--- a/src/mongo/db/exec/multi_plan.cpp
+++ b/src/mongo/db/exec/multi_plan.cpp
@@ -171,7 +171,7 @@ Status MultiPlanStage::tryYield(PlanYieldPolicy* yieldPolicy) {
if (!alive) {
_failure = true;
- Status failStat(ErrorCodes::OperationFailed,
+ Status failStat(ErrorCodes::QueryPlanKilled,
"PlanExecutor killed during plan selection");
_statusMemberId = WorkingSetCommon::allocateStatusMember(_candidates[0].ws, failStat);
return failStat;
diff --git a/src/mongo/db/exec/multi_plan.h b/src/mongo/db/exec/multi_plan.h
index 600e7130017..94bd0ef9033 100644
--- a/src/mongo/db/exec/multi_plan.h
+++ b/src/mongo/db/exec/multi_plan.h
@@ -100,7 +100,8 @@ public:
* works of the candidate plans. By default, 'yieldPolicy' is NULL and no yielding will
* take place.
*
- * Returns a non-OK status if the plan was killed during yield.
+ * Returns a non-OK status if query planning fails. In particular, this function returns
+ * ErrorCodes::QueryPlanKilled if the query plan was killed during a yield.
*/
Status pickBestPlan(PlanYieldPolicy* yieldPolicy);
diff --git a/src/mongo/db/exec/subplan.cpp b/src/mongo/db/exec/subplan.cpp
index 379cbb67081..9249f066618 100644
--- a/src/mongo/db/exec/subplan.cpp
+++ b/src/mongo/db/exec/subplan.cpp
@@ -423,6 +423,11 @@ Status SubplanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
// Plan each branch of the $or.
Status subplanningStatus = planSubqueries();
if (!subplanningStatus.isOK()) {
+ if (subplanningStatus == ErrorCodes::QueryPlanKilled) {
+ // Query planning cannot continue if the plan for one of the subqueries was killed
+ // because the collection or a candidate index may have been dropped.
+ return subplanningStatus;
+ }
return choosePlanWholeQuery(yieldPolicy);
}
@@ -430,6 +435,11 @@ Status SubplanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
// the overall winning plan from the resulting index tags.
Status subplanSelectStat = choosePlanForSubqueries(yieldPolicy);
if (!subplanSelectStat.isOK()) {
+ if (subplanSelectStat == ErrorCodes::QueryPlanKilled) {
+ // Query planning cannot continue if the plan was killed because the collection or a
+ // candidate index may have been dropped.
+ return subplanSelectStat;
+ }
return choosePlanWholeQuery(yieldPolicy);
}
diff --git a/src/mongo/db/exec/subplan.h b/src/mongo/db/exec/subplan.h
index bd22193310c..2099f168131 100644
--- a/src/mongo/db/exec/subplan.h
+++ b/src/mongo/db/exec/subplan.h
@@ -106,7 +106,8 @@ public:
* works of the candidate plans. By default, 'yieldPolicy' is NULL and no yielding will
* take place.
*
- * Returns a non-OK status if the plan was killed during yield or if planning fails.
+ * Returns a non-OK status if query planning fails. In particular, this function returns
+ * ErrorCodes::QueryPlanKilled if the query plan was killed during a yield.
*/
Status pickBestPlan(PlanYieldPolicy* yieldPolicy);
diff --git a/src/mongo/db/query/plan_executor.h b/src/mongo/db/query/plan_executor.h
index 66b78a4ba1a..2c6d13bc071 100644
--- a/src/mongo/db/query/plan_executor.h
+++ b/src/mongo/db/query/plan_executor.h
@@ -369,6 +369,10 @@ private:
* this calls into their underlying plan selection facilities. Otherwise, does nothing.
*
* If a YIELD_AUTO policy is set then locks are yielded during plan selection.
+ *
+ * Returns a non-OK status if query planning fails. In particular, this function returns
+ * ErrorCodes::QueryPlanKilled if plan execution cannot proceed due to a concurrent write or
+ * catalog operation.
*/
Status pickBestPlan(YieldPolicy policy);