summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Storch <david.storch@10gen.com>2015-02-04 10:22:40 -0500
committerRamon Fernandez <ramon.fernandez@mongodb.com>2015-02-05 11:33:03 -0500
commit3e9b48ff434a6ccda0890e752f2034845c042f21 (patch)
treefe60b1ed8431922e8ea1253fa52afc886160f979
parentf2372f36bd5390704be6ddf4a1556deff49f0d69 (diff)
downloadmongo-3e9b48ff434a6ccda0890e752f2034845c042f21.tar.gz
SERVER-17117 propagate kill notifications to CachedPlanStage
(cherry picked from commit 5936e08901898c8c8cc3e31de96cfe4264c171c6)
-rw-r--r--jstests/concurrency/fsm_workloads/plan_cache_drop_database.js100
-rw-r--r--src/mongo/db/exec/cached_plan.cpp21
-rw-r--r--src/mongo/db/exec/cached_plan.h5
-rw-r--r--src/mongo/db/query/plan_executor.cpp24
4 files changed, 141 insertions, 9 deletions
diff --git a/jstests/concurrency/fsm_workloads/plan_cache_drop_database.js b/jstests/concurrency/fsm_workloads/plan_cache_drop_database.js
new file mode 100644
index 00000000000..19da2dcbf88
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/plan_cache_drop_database.js
@@ -0,0 +1,100 @@
+'use strict';
+
+/**
+ * plan_cache_drop_database.js
+ *
+ * Repeatedly executes count queries with limits against a collection that
+ * is periodically dropped (as part of a database drop). This combination of
+ * events triggers the concurrent destruction of a Collection object and
+ * the updating of said object's PlanCache (SERVER-17117).
+ */
+
+var $config = (function() {
+
+ var data = {
+ // Use the workload name as the database name because the workload name
+ // is assumed to be unique and we'll be dropping the database as part
+ // of our workload.
+ dbName: 'plan_cache_drop_database'
+ };
+
+ function populateData(db, collName) {
+ var coll = db[collName];
+
+ var bulk = coll.initializeUnorderedBulkOp();
+ for (var i = 0; i < 1000; ++i) {
+ bulk.insert({ a: 1, b: Random.rand() });
+ }
+ var res = bulk.execute();
+ assertAlways.writeOK(res);
+
+ // Create two indexes to force plan caching: The {a: 1} index is
+ // cached by the query planner because we query on a single value
+ // of 'a' and a range of 'b' values.
+ assertAlways.commandWorked(coll.ensureIndex({ a: 1 }));
+ assertAlways.commandWorked(coll.ensureIndex({ b: 1 }));
+ }
+
+ var states = (function() {
+
+ function count(db, collName) {
+ var coll = db.getSiblingDB(this.dbName)[collName];
+
+ var cmdObj = {
+ query: { a: 1, b: { $gt: Random.rand() } },
+ limit: Random.randInt(10)
+ };
+
+ // We can't use assertAlways.commandWorked here because the plan
+ // executor can be killed during the count.
+ coll.runCommand('count', cmdObj);
+ }
+
+ function dropDB(db, collName) {
+ var myDB = db.getSiblingDB(this.dbName);
+ // We can't assert anything about the dropDatabase return value
+ // because the database might not exist due to other threads
+ // calling dropDB.
+ myDB.dropDatabase();
+
+ // Re-populate the data to make plan caching possible.
+ populateData(myDB, collName);
+ }
+
+ return {
+ count: count,
+ dropDB: dropDB
+ };
+
+ })();
+
+ var transitions = {
+ count: { count: 0.95, dropDB: 0.05 },
+ dropDB: { count: 0.95, dropDB: 0.05 }
+ };
+
+ function setup(db, collName) {
+ var myDB = db.getSiblingDB(this.dbName);
+ populateData(myDB, collName);
+ }
+
+ function teardown(db, collName) {
+ var myDB = db.getSiblingDB(this.dbName);
+
+ // We can't assert anything about the dropDatabase return value because
+ // the database won't exist if the dropDB state is executed last.
+ myDB.dropDatabase();
+ }
+
+ return {
+ threadCount: 10,
+ iterations: 50,
+ data: data,
+ states: states,
+ startState: 'count',
+ transitions: transitions,
+ setup: setup,
+ teardown: teardown
+ };
+
+})();
diff --git a/src/mongo/db/exec/cached_plan.cpp b/src/mongo/db/exec/cached_plan.cpp
index f4e026f740c..cd4f0aca42d 100644
--- a/src/mongo/db/exec/cached_plan.cpp
+++ b/src/mongo/db/exec/cached_plan.cpp
@@ -62,17 +62,23 @@ namespace mongo {
_usingBackupChild(false),
_alreadyProduced(false),
_updatedCache(false),
+ _killed(false),
_commonStats(kStageType) {}
CachedPlanStage::~CachedPlanStage() {
- // We may have produced all necessary results without hitting EOF.
- // In this case, we still want to update the cache with feedback.
- if (!_updatedCache) {
+ // We may have produced all necessary results without hitting EOF. In this case, we still
+ // want to update the cache with feedback.
+ //
+ // We can't touch the plan cache if we've been killed.
+ if (!_updatedCache && !_killed) {
updateCache();
}
}
- bool CachedPlanStage::isEOF() { return getActiveChild()->isEOF(); }
+ bool CachedPlanStage::isEOF() {
+ invariant(!_killed);
+ return getActiveChild()->isEOF();
+ }
PlanStage::StageState CachedPlanStage::work(WorkingSetID* out) {
++_commonStats.works;
@@ -80,6 +86,9 @@ namespace mongo {
// Adds the amount of time taken by work() to executionTimeMillis.
ScopedTimer timer(&_commonStats.executionTimeMillis);
+ // We shouldn't be trying to work a dead plan.
+ invariant(!_killed);
+
if (isEOF()) { return PlanStage::IS_EOF; }
StageState childStatus = getActiveChild()->work(out);
@@ -201,4 +210,8 @@ namespace mongo {
return _usingBackupChild ? _backupChildPlan.get() : _mainChildPlan.get();
}
+ void CachedPlanStage::kill() {
+ _killed = true;
+ }
+
} // namespace mongo
diff --git a/src/mongo/db/exec/cached_plan.h b/src/mongo/db/exec/cached_plan.h
index 5f9fe877e3f..ceb63e1e190 100644
--- a/src/mongo/db/exec/cached_plan.h
+++ b/src/mongo/db/exec/cached_plan.h
@@ -78,6 +78,8 @@ namespace mongo {
static const char* kStageType;
+ void kill();
+
private:
PlanStage* getActiveChild() const;
void updateCache();
@@ -108,6 +110,9 @@ namespace mongo {
// Have we updated the cache with our plan stats yet?
bool _updatedCache;
+ // Has this query been killed?
+ bool _killed;
+
// Stats
CommonStats _commonStats;
CachedPlanStats _specificStats;
diff --git a/src/mongo/db/query/plan_executor.cpp b/src/mongo/db/query/plan_executor.cpp
index 069a569653a..b3e329a354e 100644
--- a/src/mongo/db/query/plan_executor.cpp
+++ b/src/mongo/db/query/plan_executor.cpp
@@ -31,6 +31,7 @@
#include <boost/shared_ptr.hpp>
#include "mongo/db/catalog/collection.h"
+#include "mongo/db/exec/cached_plan.h"
#include "mongo/db/exec/multi_plan.h"
#include "mongo/db/exec/pipeline_proxy.h"
#include "mongo/db/exec/plan_stage.h"
@@ -425,12 +426,25 @@ namespace mongo {
// the "inner" executor. This is bad, and hopefully can be fixed down the line with the
// unification of agg and query.
//
+ // The CachedPlanStage is another special case. It needs to update the plan cache from
+ // its destructor. It needs to know whether it has been killed so that it can avoid
+ // touching a potentially invalid plan cache in this case.
+ //
// TODO: get rid of this code block.
- if (STAGE_PIPELINE_PROXY == _root->stageType()) {
- PipelineProxyStage* proxyStage = static_cast<PipelineProxyStage*>(_root.get());
- shared_ptr<PlanExecutor> childExec = proxyStage->getChildExecutor();
- if (childExec) {
- childExec->kill();
+ {
+ PlanStage* foundStage = getStageByType(_root.get(), STAGE_PIPELINE_PROXY);
+ if (foundStage) {
+ PipelineProxyStage* proxyStage = static_cast<PipelineProxyStage*>(foundStage);
+ shared_ptr<PlanExecutor> childExec = proxyStage->getChildExecutor();
+ if (childExec) {
+ childExec->kill();
+ }
+ }
+
+ foundStage = getStageByType(_root.get(), STAGE_CACHED_PLAN);
+ if (foundStage) {
+ CachedPlanStage* cacheStage = static_cast<CachedPlanStage*>(foundStage);
+ cacheStage->kill();
}
}
}