summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--jstests/replsets/explain_slaveok.js132
-rw-r--r--src/mongo/db/commands/explain_cmd.cpp39
-rw-r--r--src/mongo/db/commands/explain_cmd.h7
-rw-r--r--src/mongo/db/commands/write_commands/batch_executor.cpp28
-rw-r--r--src/mongo/db/commands/write_commands/batch_executor.h7
-rw-r--r--src/mongo/db/commands/write_commands/write_commands.cpp81
-rw-r--r--src/mongo/db/commands/write_commands/write_commands.h7
-rw-r--r--src/mongo/db/exec/update.cpp55
-rw-r--r--src/mongo/db/ops/update.cpp107
-rw-r--r--src/mongo/db/ops/update.h14
-rw-r--r--src/mongo/db/ops/update_executor.cpp127
-rw-r--r--src/mongo/db/ops/update_executor.h20
-rw-r--r--src/mongo/db/ops/update_request.h19
-rw-r--r--src/mongo/db/query/explain.cpp14
14 files changed, 494 insertions, 163 deletions
diff --git a/jstests/replsets/explain_slaveok.js b/jstests/replsets/explain_slaveok.js
new file mode 100644
index 00000000000..311a374fc97
--- /dev/null
+++ b/jstests/replsets/explain_slaveok.js
@@ -0,0 +1,132 @@
+// Test the explain command on the primary and on secondaries:
+//
+// 1) Explain of read operations should work on the secondaries iff slaveOk is set.
+//
+// 2) Explain of write operations should
+// --fail on secondaries, even if slaveOk is set,
+// --succeed on primary without applying any writes.
+
+var name = "explain_slaveok";
+
+print("Start replica set with two nodes");
+var replTest = new ReplSetTest({name: name, nodes: 2});
+var nodes = replTest.startSet();
+replTest.initiate();
+var primary = replTest.getMaster();
+
+// Insert a document and let it sync to the secondary.
+print("Initial sync");
+primary.getDB("test").explain_slaveok.insert({a: 1});
+replTest.awaitReplication();
+
+// Check that the document is present on the primary.
+assert.eq(1, primary.getDB("test").explain_slaveok.findOne({a: 1})["a"]);
+
+// We shouldn't be able to read from the secondary with slaveOk off.
+var secondary = replTest.getSecondary();
+secondary.getDB("test").getMongo().setSlaveOk(false);
+assert.throws(function() {
+ secondary.getDB("test").explain_slaveok.findOne({a: 1});
+});
+
+// With slaveOk on, we should be able to read from the secondary.
+secondary.getDB("test").getMongo().setSlaveOk(true);
+assert.eq(1, secondary.getDB("test").explain_slaveok.findOne({a: 1})["a"]);
+
+//
+// Test explains on primary.
+//
+
+// Explain a count on the primary.
+var explainOut = primary.getDB("test").runCommand({
+ explain: {
+ count: "explain_slaveok",
+ query: {a: 1}
+ },
+ verbosity: "executionStats"
+});
+printjson(explainOut);
+assert.commandWorked(explainOut, "explain read op on primary");
+
+// Explain an update on the primary.
+explainOut = primary.getDB("test").runCommand({
+ explain: {
+ update: "explain_slaveok",
+ updates: [
+ {q: {a: 1}, u: {$set: {a: 5}}}
+ ]
+ },
+ verbosity: "executionStats"
+});
+printjson(explainOut);
+assert.commandWorked(explainOut, "explain write op on primary");
+
+// Plan should have an update stage at its root, reporting that it would
+// modify a single document.
+var stages = explainOut.executionStats.executionStages;
+assert.eq("UPDATE", stages.stage);
+assert.eq(1, stages.nWouldModify);
+
+// Confirm that the document did not actually get modified on the primary
+// or on the secondary.
+assert.eq(1, primary.getDB("test").explain_slaveok.findOne({a: 1})["a"]);
+secondary.getDB("test").getMongo().setSlaveOk(true);
+assert.eq(1, secondary.getDB("test").explain_slaveok.findOne({a: 1})["a"]);
+
+//
+// Test explains on secondary.
+//
+
+// Explain a count on the secondary with slaveOk off. Should fail because
+// slaveOk is required for explains on a secondary.
+secondary.getDB("test").getMongo().setSlaveOk(false);
+explainOut = secondary.getDB("test").runCommand({
+ explain: {
+ count: "explain_slaveok",
+ query: {a: 1}
+ },
+ verbosity: "executionStats"
+});
+printjson(explainOut);
+assert.commandFailed(explainOut, "explain read op on secondary, slaveOk false");
+
+// Explain of count should succeed once slaveOk is true.
+secondary.getDB("test").getMongo().setSlaveOk(true);
+explainOut = secondary.getDB("test").runCommand({
+ explain: {
+ count: "explain_slaveok",
+ query: {a: 1}
+ },
+ verbosity: "executionStats"
+});
+printjson(explainOut);
+assert.commandWorked(explainOut, "explain read op on secondary, slaveOk true");
+
+// Explain an update on the secondary with slaveOk off. Should fail because
+// slaveOk is required for explains on a secondary.
+secondary.getDB("test").getMongo().setSlaveOk(false);
+explainOut = secondary.getDB("test").runCommand({
+ explain: {
+ update: "explain_slaveok",
+ updates: [
+ {q: {a: 1}, u: {$set: {a: 5}}}
+ ]
+ },
+ verbosity: "executionStats"
+});
+printjson(explainOut);
+assert.commandFailed(explainOut, "explain write op on secondary, slaveOk false");
+
+// Explain of the update should also fail with slaveOk on.
+secondary.getDB("test").getMongo().setSlaveOk(true);
+explainOut = secondary.getDB("test").runCommand({
+ explain: {
+ update: "explain_slaveok",
+ updates: [
+ {q: {a: 1}, u: {$set: {a: 5}}}
+ ]
+ },
+ verbosity: "executionStats"
+});
+printjson(explainOut);
+assert.commandFailed(explainOut, "explain write op on secondary, slaveOk true");
diff --git a/src/mongo/db/commands/explain_cmd.cpp b/src/mongo/db/commands/explain_cmd.cpp
index a895769453f..5fc492edb4d 100644
--- a/src/mongo/db/commands/explain_cmd.cpp
+++ b/src/mongo/db/commands/explain_cmd.cpp
@@ -30,10 +30,12 @@
#include "mongo/db/commands/explain_cmd.h"
+#include "mongo/client/dbclientinterface.h"
#include "mongo/db/catalog/database.h"
#include "mongo/db/client.h"
#include "mongo/db/commands.h"
#include "mongo/db/query/explain.h"
+#include "mongo/db/repl/repl_coordinator_global.h"
#include "mongo/util/mongoutils/str.h"
namespace mongo {
@@ -65,6 +67,14 @@ namespace mongo {
string& errmsg,
BSONObjBuilder& result,
bool fromRepl) {
+ // Should never get explain commands issued from replication.
+ if (fromRepl) {
+ Status commandStat(ErrorCodes::IllegalOperation,
+ "explain command should not be from repl");
+ appendCommandStatus(result, commandStat);
+ return false;
+ }
+
// Get the verbosity.
Explain::Verbosity verbosity = Explain::QUERY_PLANNER;
if (!cmdObj["verbosity"].eoo()) {
@@ -79,14 +89,18 @@ namespace mongo {
verbosity = Explain::FULL;
}
else if (!mongoutils::str::equals(verbStr, "queryPlanner")) {
- errmsg = "verbosity string must be one of "
- "{'queryPlanner', 'executionStats', 'allPlansExecution'}";
- return false;
+ Status commandStat(ErrorCodes::BadValue,
+ "verbosity string must be one of "
+ "{'queryPlanner', 'executionStats', 'allPlansExecution'}");
+ appendCommandStatus(result, commandStat);
+ return false;
}
}
if (Object != cmdObj.firstElement().type()) {
- errmsg = "explain command requires a nested object";
+ Status commandStat(ErrorCodes::BadValue,
+ "explain command requires a nested object");
+ appendCommandStatus(result, commandStat);
return false;
}
@@ -101,6 +115,23 @@ namespace mongo {
return appendCommandStatus(result, explainStatus);
}
+ // Check whether the child command is allowed to run here. TODO: this logic is
+ // copied from Command::execCommand and should be abstracted. Until then, make
+ // sure to keep it up to date.
+ repl::ReplicationCoordinator* replCoord = repl::getGlobalReplicationCoordinator();
+ bool canRunHere =
+ replCoord->canAcceptWritesForDatabase(dbname) ||
+ commToExplain->slaveOk() ||
+ (commToExplain->slaveOverrideOk() && (options & QueryOption_SlaveOk));
+
+ if (!canRunHere) {
+ mongoutils::str::stream ss;
+ ss << "Explain's child command cannot run on this node. "
+ << "Are you explaining a write command on a secondary?";
+ appendCommandStatus(result, false, ss);
+ return false;
+ }
+
// Actually call the nested command's explain(...) method.
Status explainStatus = commToExplain->explain(txn, dbname, explainObj, verbosity, &result);
if (!explainStatus.isOK()) {
diff --git a/src/mongo/db/commands/explain_cmd.h b/src/mongo/db/commands/explain_cmd.h
index c30c405cd0f..bfb8dd5cf1a 100644
--- a/src/mongo/db/commands/explain_cmd.h
+++ b/src/mongo/db/commands/explain_cmd.h
@@ -51,10 +51,15 @@ namespace mongo {
virtual bool isWriteCommandForConfigServer() const { return false; }
- // TODO: make slave ok true, test explains on secondaries.
+ /**
+ * Running an explain on a secondary requires explicitly setting slaveOk.
+ */
virtual bool slaveOk() const {
return false;
}
+ virtual bool slaveOverrideOk() const {
+ return true;
+ }
virtual bool maintenanceOk() const { return false; }
diff --git a/src/mongo/db/commands/write_commands/batch_executor.cpp b/src/mongo/db/commands/write_commands/batch_executor.cpp
index efada43e780..ce4609ac4af 100644
--- a/src/mongo/db/commands/write_commands/batch_executor.cpp
+++ b/src/mongo/db/commands/write_commands/batch_executor.cpp
@@ -48,7 +48,6 @@
#include "mongo/db/ops/insert.h"
#include "mongo/db/ops/update_executor.h"
#include "mongo/db/ops/update_lifecycle_impl.h"
-#include "mongo/db/ops/update_request.h"
#include "mongo/db/repl/oplog.h"
#include "mongo/db/repl/repl_coordinator_global.h"
#include "mongo/db/repl/repl_settings.h"
@@ -147,30 +146,39 @@ namespace mongo {
staleError->setErrInfo( builder.obj() );
}
- void WriteBatchExecutor::executeBatch( const BatchedCommandRequest& request,
- BatchedCommandResponse* response ) {
+ // static
+ Status WriteBatchExecutor::validateBatch( const BatchedCommandRequest& request ) {
// Validate namespace
const NamespaceString nss = NamespaceString( request.getNS() );
if ( !nss.isValid() ) {
- toBatchError( Status( ErrorCodes::InvalidNamespace,
- nss.ns() + " is not a valid namespace" ),
- response );
- return;
+ return Status( ErrorCodes::InvalidNamespace,
+ nss.ns() + " is not a valid namespace" );
}
// Make sure we can write to the namespace
Status allowedStatus = userAllowedWriteNS( nss );
if ( !allowedStatus.isOK() ) {
- toBatchError( allowedStatus, response );
- return;
+ return allowedStatus;
}
// Validate insert index requests
// TODO: Push insert index requests through createIndex once all upgrade paths support it
string errMsg;
if ( request.isInsertIndexRequest() && !request.isValidIndexRequest( &errMsg ) ) {
- toBatchError( Status( ErrorCodes::InvalidOptions, errMsg ), response );
+ return Status( ErrorCodes::InvalidOptions, errMsg );
+ }
+
+ return Status::OK();
+ }
+
+ void WriteBatchExecutor::executeBatch( const BatchedCommandRequest& request,
+ BatchedCommandResponse* response ) {
+
+ // Validate namespace
+ Status isValid = validateBatch(request);
+ if (!isValid.isOK()) {
+ toBatchError( isValid, response );
return;
}
diff --git a/src/mongo/db/commands/write_commands/batch_executor.h b/src/mongo/db/commands/write_commands/batch_executor.h
index 82380962c17..3114b8803d0 100644
--- a/src/mongo/db/commands/write_commands/batch_executor.h
+++ b/src/mongo/db/commands/write_commands/batch_executor.h
@@ -31,6 +31,7 @@
#include <string>
#include "mongo/base/disallow_copying.h"
+#include "mongo/db/ops/update_request.h"
#include "mongo/s/write_ops/batched_command_request.h"
#include "mongo/s/write_ops/batched_command_response.h"
#include "mongo/s/write_ops/batched_delete_document.h"
@@ -71,6 +72,12 @@ namespace mongo {
const WriteBatchStats& getStats() const;
+ /**
+ * Does basic validation of the batch request. Returns a non-OK status if
+ * any problems with the batch are found.
+ */
+ static Status validateBatch( const BatchedCommandRequest& request );
+
private:
/**
* Executes the writes in the batch and returns upserted _ids and write errors.
diff --git a/src/mongo/db/commands/write_commands/write_commands.cpp b/src/mongo/db/commands/write_commands/write_commands.cpp
index 28a9bcfd619..be49326cf48 100644
--- a/src/mongo/db/commands/write_commands/write_commands.cpp
+++ b/src/mongo/db/commands/write_commands/write_commands.cpp
@@ -36,6 +36,10 @@
#include "mongo/db/curop.h"
#include "mongo/db/json.h"
#include "mongo/db/lasterror.h"
+#include "mongo/db/ops/update_executor.h"
+#include "mongo/db/ops/update_lifecycle_impl.h"
+#include "mongo/db/query/explain.h"
+#include "mongo/db/query/get_executor.h"
#include "mongo/db/repl/repl_coordinator_global.h"
#include "mongo/db/server_parameters.h"
#include "mongo/db/stats/counters.h"
@@ -138,6 +142,83 @@ namespace mongo {
return response.getOk();
}
+ Status WriteCmd::explain(OperationContext* txn,
+ const std::string& dbname,
+ const BSONObj& cmdObj,
+ Explain::Verbosity verbosity,
+ BSONObjBuilder* out) const {
+ // For now we only explain updates.
+ if ( BatchedCommandRequest::BatchType_Update != _writeType ) {
+ return Status( ErrorCodes::IllegalOperation,
+ "Non-update write ops cannot yet be explained" );
+ }
+
+ // Parse the batch request.
+ BatchedCommandRequest request( _writeType );
+ std::string errMsg;
+ if ( !request.parseBSON( cmdObj, &errMsg ) || !request.isValid( &errMsg ) ) {
+ return Status( ErrorCodes::FailedToParse, errMsg );
+ }
+
+ // Note that this is a runCommmand, and therefore, the database and the collection name
+ // are in different parts of the grammar for the command. But it's more convenient to
+ // work with a NamespaceString. We built it here and replace it in the parsed command.
+ // Internally, everything work with the namespace string as opposed to just the
+ // collection name.
+ NamespaceString nsString(dbname, request.getNS());
+ request.setNS(nsString.ns());
+
+ // Do the validation of the batch that is shared with non-explained write batches.
+ Status isValid = WriteBatchExecutor::validateBatch( request );
+ if (!isValid.isOK()) {
+ return isValid;
+ }
+
+ // Explain must do one additional piece of validation: For now we only explain
+ // singleton batches.
+ if ( request.sizeWriteOps() != 1u ) {
+ return Status( ErrorCodes::InvalidLength,
+ "explained write batches must be of size 1" );
+ }
+
+ // Get a reference to the singleton batch item (it's the 0th item in the batch).
+ BatchItemRef batchItem( &request, 0 );
+
+ // Create the update request.
+ UpdateRequest updateRequest( txn, nsString );
+ updateRequest.setQuery( batchItem.getUpdate()->getQuery() );
+ updateRequest.setUpdates( batchItem.getUpdate()->getUpdateExpr() );
+ updateRequest.setMulti( batchItem.getUpdate()->getMulti() );
+ updateRequest.setUpsert( batchItem.getUpdate()->getUpsert() );
+ updateRequest.setUpdateOpLog( true );
+ UpdateLifecycleImpl updateLifecycle( true, updateRequest.getNamespaceString() );
+ updateRequest.setLifecycle( &updateLifecycle );
+ updateRequest.setExplain();
+
+ // Use the request to create an UpdateExecutor, and from it extract the
+ // plan tree which will be used to execute this update.
+ UpdateExecutor updateExecutor( &updateRequest, &txn->getCurOp()->debug() );
+ Status prepStatus = updateExecutor.prepare();
+ if ( !prepStatus.isOK() ) {
+ return prepStatus;
+ }
+
+ // Explains of write commands are read-only, but we take a write lock so that timing info
+ // is more accurate.
+ Client::WriteContext ctx( txn, nsString );
+
+ Status prepInLockStatus = updateExecutor.prepareInLock( ctx.ctx().db() );
+ if ( !prepInLockStatus.isOK() ) {
+ return prepInLockStatus;
+ }
+
+ PlanExecutor* exec = updateExecutor.getPlanExecutor();
+ const ScopedExecutorRegistration safety( exec );
+
+ // Explain the plan tree.
+ return Explain::explainStages( exec, verbosity, out );
+ }
+
CmdInsert::CmdInsert() :
WriteCmd( "insert", BatchedCommandRequest::BatchType_Insert ) {
}
diff --git a/src/mongo/db/commands/write_commands/write_commands.h b/src/mongo/db/commands/write_commands/write_commands.h
index c5f5da37cac..016427816f3 100644
--- a/src/mongo/db/commands/write_commands/write_commands.h
+++ b/src/mongo/db/commands/write_commands/write_commands.h
@@ -81,6 +81,13 @@ namespace mongo {
BSONObjBuilder& result,
bool fromRepl);
+ // Write commands can be explained.
+ virtual Status explain(OperationContext* txn,
+ const std::string& dbname,
+ const BSONObj& cmdObj,
+ Explain::Verbosity verbosity,
+ BSONObjBuilder* out) const;
+
// Type of batch (e.g. insert).
BatchedCommandRequest::BatchType _writeType;
};
diff --git a/src/mongo/db/exec/update.cpp b/src/mongo/db/exec/update.cpp
index 53e13469f33..087b7e84bcd 100644
--- a/src/mongo/db/exec/update.cpp
+++ b/src/mongo/db/exec/update.cpp
@@ -499,7 +499,11 @@ namespace mongo {
// If a set of modifiers were all no-ops, we are still 'in place', but there
// is no work to do, in which case we want to consider the object unchanged.
if (!_damages.empty() ) {
- _collection->updateDocumentWithDamages(request->getOpCtx(), loc, source, _damages);
+ // Don't actually do the write if this is an explain.
+ if (!request->isExplain()) {
+ _collection->updateDocumentWithDamages(request->getOpCtx(), loc, source,
+ _damages);
+ }
docWasModified = true;
_specificStats.fastmod = true;
}
@@ -518,27 +522,32 @@ namespace mongo {
str::stream() << "Resulting document after update is larger than "
<< BSONObjMaxUserSize,
newObj.objsize() <= BSONObjMaxUserSize);
- StatusWith<DiskLoc> res = _collection->updateDocument(request->getOpCtx(),
- loc,
- newObj,
- true,
- _params.opDebug);
- uassertStatusOK(res.getStatus());
- DiskLoc newLoc = res.getValue();
docWasModified = true;
- // If the document moved, we might see it again in a collection scan (maybe it's
- // a document after our current document).
- //
- // If the document is indexed and the mod changes an indexed value, we might see it
- // again. For an example, see the comment above near declaration of updatedLocs.
- if (_updatedLocs && (newLoc != loc || driver->modsAffectIndices())) {
- _updatedLocs->insert(newLoc);
+ // Don't actually do the write if this is an explain.
+ if (!request->isExplain()) {
+ StatusWith<DiskLoc> res = _collection->updateDocument(request->getOpCtx(),
+ loc,
+ newObj,
+ true,
+ _params.opDebug);
+ uassertStatusOK(res.getStatus());
+ DiskLoc newLoc = res.getValue();
+
+ // If the document moved, we might see it again in a collection scan (maybe it's
+ // a document after our current document).
+ //
+ // If the document is indexed and the mod changes an indexed value, we might see
+ // it again. For an example, see the comment above near declaration of
+ // updatedLocs.
+ if (_updatedLocs && (newLoc != loc || driver->modsAffectIndices())) {
+ _updatedLocs->insert(newLoc);
+ }
}
}
- // Call logOp if requested.
- if (request->shouldCallLogOp() && !logObj.isEmpty()) {
+ // Call logOp if requested, and we're not an explain.
+ if (request->shouldCallLogOp() && !logObj.isEmpty() && !request->isExplain()) {
BSONObj idQuery = driver->makeOplogEntryQuery(newObj, request->isMulti());
repl::logOp(request->getOpCtx(),
"u",
@@ -559,7 +568,8 @@ namespace mongo {
restoreState(request->getOpCtx());
- // Only record doc modifications if they wrote (exclude no-ops)
+ // Only record doc modifications if they wrote (exclude no-ops). Explains get
+ // recorded as if they wrote.
if (docWasModified) {
_specificStats.nModified++;
}
@@ -641,6 +651,13 @@ namespace mongo {
str::stream() << "Document to upsert is larger than " << BSONObjMaxUserSize,
newObj.objsize() <= BSONObjMaxUserSize);
+ _specificStats.objInserted = newObj;
+
+ // If this is an explain, bail out now without doing the insert.
+ if (request->isExplain()) {
+ return;
+ }
+
WriteUnitOfWork wunit(request->getOpCtx());
// Only create the collection if the doc will be inserted.
if (!_collection) {
@@ -668,8 +685,6 @@ namespace mongo {
}
wunit.commit();
-
- _specificStats.objInserted = newObj;
}
bool UpdateStage::doneUpdating() {
diff --git a/src/mongo/db/ops/update.cpp b/src/mongo/db/ops/update.cpp
index 301e89a13fd..435d4104df6 100644
--- a/src/mongo/db/ops/update.cpp
+++ b/src/mongo/db/ops/update.cpp
@@ -50,25 +50,6 @@
namespace mongo {
- namespace {
-
- // TODO: Make this a function on NamespaceString, or make it cleaner.
- inline void validateUpdate(const char* ns ,
- const BSONObj& updateobj,
- const BSONObj& patternOrig) {
- uassert(10155 , "cannot update reserved $ collection", strchr(ns, '$') == 0);
- if (strstr(ns, ".system.")) {
- /* dm: it's very important that system.indexes is never updated as IndexDetails
- has pointers into it */
- uassert(10156,
- str::stream() << "cannot update system collection: "
- << ns << " q: " << patternOrig << " u: " << updateobj,
- legalClientSystemNS(ns , true));
- }
- }
-
- } // namespace
-
UpdateResult update(Database* db,
const UpdateRequest& request,
OpDebug* opDebug) {
@@ -77,94 +58,6 @@ namespace mongo {
return executor.execute(db);
}
- UpdateResult update(Database* db,
- const UpdateRequest& request,
- OpDebug* opDebug,
- UpdateDriver* driver,
- CanonicalQuery* cq) {
-
- LOG(3) << "processing update : " << request;
-
- std::auto_ptr<CanonicalQuery> cqHolder(cq);
- const NamespaceString& nsString = request.getNamespaceString();
- UpdateLifecycle* lifecycle = request.getLifecycle();
-
- Collection* collection = db->getCollection(request.getOpCtx(), nsString.ns());
-
- validateUpdate(nsString.ns().c_str(), request.getUpdates(), request.getQuery());
-
-
- // TODO: This seems a bit circuitious.
- opDebug->updateobj = request.getUpdates();
-
- if (lifecycle) {
- lifecycle->setCollection(collection);
- driver->refreshIndexKeys(lifecycle->getIndexKeys(request.getOpCtx()));
- }
-
- PlanExecutor* rawExec;
- Status status = Status::OK();
- if (cq) {
- // This is the regular path for when we have a CanonicalQuery.
- status = getExecutorUpdate(request.getOpCtx(), db, cqHolder.release(), &request, driver,
- opDebug, &rawExec);
- }
- else {
- // This is the idhack fast-path for getting a PlanExecutor without doing the work
- // to create a CanonicalQuery.
- status = getExecutorUpdate(request.getOpCtx(), db, nsString.ns(), &request, driver,
- opDebug, &rawExec);
- }
-
- uassert(17243,
- "could not get executor" + request.getQuery().toString() + "; " + causedBy(status),
- status.isOK());
-
- // Create the plan executor and setup all deps.
- scoped_ptr<PlanExecutor> exec(rawExec);
-
- // Register executor with the collection cursor cache.
- const ScopedExecutorRegistration safety(exec.get());
-
- // Run the plan (don't need to collect results because UpdateStage always returns
- // NEED_TIME).
- uassertStatusOK(exec->executePlan());
-
- // Get stats from the root stage.
- invariant(exec->getRootStage()->stageType() == STAGE_UPDATE);
- UpdateStage* updateStage = static_cast<UpdateStage*>(exec->getRootStage());
- const UpdateStats* updateStats =
- static_cast<const UpdateStats*>(updateStage->getSpecificStats());
-
- // Use stats from the root stage to fill out opDebug.
- opDebug->nMatched = updateStats->nMatched;
- opDebug->nModified = updateStats->nModified;
- opDebug->upsert = updateStats->inserted;
- opDebug->fastmodinsert = updateStats->fastmodinsert;
- opDebug->fastmod = updateStats->fastmod;
-
- // Historically, 'opDebug' considers 'nMatched' and 'nModified' to be 1 (rather than 0) if
- // there is an upsert that inserts a document. The UpdateStage does not participate in this
- // madness in order to have saner stats reporting for explain. This means that we have to
- // set these values "manually" in the case of an insert.
- if (updateStats->inserted) {
- opDebug->nMatched = 1;
- opDebug->nModified = 1;
- }
-
- // Get summary information about the plan.
- PlanSummaryStats stats;
- Explain::getSummaryStats(exec.get(), &stats);
- opDebug->nscanned = stats.totalKeysExamined;
- opDebug->nscannedObjects = stats.totalDocsExamined;
-
- return UpdateResult(updateStats->nMatched > 0 /* Did we update at least one obj? */,
- !driver->isDocReplacement() /* $mod or obj replacement */,
- opDebug->nModified /* number of modified docs, no no-ops */,
- opDebug->nMatched /* # of docs matched/updated, even no-ops */,
- updateStats->objInserted);
- }
-
BSONObj applyUpdateOperators(const BSONObj& from, const BSONObj& operators) {
UpdateDriver::Options opts;
UpdateDriver driver(opts);
diff --git a/src/mongo/db/ops/update.h b/src/mongo/db/ops/update.h
index cff79a0d90b..b28d722b28b 100644
--- a/src/mongo/db/ops/update.h
+++ b/src/mongo/db/ops/update.h
@@ -51,20 +51,6 @@ namespace mongo {
OpDebug* opDebug);
/**
- * Execute the update described by "request", using the given already-parsed
- * driver and canonical query.
- *
- * NOTE: This function is really a utility method for UpdateExecutor.
- *
- * TODO: Move this into a private method of UpdateExecutor.
- */
- UpdateResult update(Database* db,
- const UpdateRequest& request,
- OpDebug* opDebug,
- UpdateDriver* driver,
- CanonicalQuery* cq);
-
- /**
* takes the from document and returns a new document
* after apply all the operators
* e.g.
diff --git a/src/mongo/db/ops/update_executor.cpp b/src/mongo/db/ops/update_executor.cpp
index 268e7616d3c..b2f96860bb6 100644
--- a/src/mongo/db/ops/update_executor.cpp
+++ b/src/mongo/db/ops/update_executor.cpp
@@ -30,13 +30,38 @@
#include "mongo/db/ops/update_executor.h"
+#include "mongo/db/catalog/database.h"
+#include "mongo/db/exec/update.h"
#include "mongo/db/ops/update.h"
+#include "mongo/db/ops/update_driver.h"
+#include "mongo/db/ops/update_lifecycle.h"
#include "mongo/db/ops/update_request.h"
#include "mongo/db/query/canonical_query.h"
+#include "mongo/db/query/get_executor.h"
#include "mongo/util/assert_util.h"
+#include "mongo/util/log.h"
namespace mongo {
+ namespace {
+
+ // TODO: Make this a function on NamespaceString, or make it cleaner.
+ inline void validateUpdate(const char* ns ,
+ const BSONObj& updateobj,
+ const BSONObj& patternOrig) {
+ uassert(10155 , "cannot update reserved $ collection", strchr(ns, '$') == 0);
+ if (strstr(ns, ".system.")) {
+ /* dm: it's very important that system.indexes is never updated as IndexDetails
+ has pointers into it */
+ uassert(10156,
+ str::stream() << "cannot update system collection: "
+ << ns << " q: " << patternOrig << " u: " << updateobj,
+ legalClientSystemNS(ns , true));
+ }
+ }
+
+ } // namespace
+
UpdateExecutor::UpdateExecutor(const UpdateRequest* request, OpDebug* opDebug) :
_request(request),
_opDebug(opDebug),
@@ -62,13 +87,105 @@ namespace mongo {
return Status::OK();
}
+ PlanExecutor* UpdateExecutor::getPlanExecutor() {
+ return _exec.get();
+ }
+
+ Status UpdateExecutor::prepareInLock(Database* db) {
+ // If we have a non-NULL PlanExecutor, then we've already done the in-lock preparation.
+ if (_exec.get()) {
+ return Status::OK();
+ }
+
+ const NamespaceString& nsString = _request->getNamespaceString();
+ UpdateLifecycle* lifecycle = _request->getLifecycle();
+
+ Collection* collection = db->getCollection(_request->getOpCtx(), nsString.ns());
+
+ validateUpdate(nsString.ns().c_str(), _request->getUpdates(), _request->getQuery());
+
+ // TODO: This seems a bit circuitious.
+ _opDebug->updateobj = _request->getUpdates();
+
+ if (lifecycle) {
+ lifecycle->setCollection(collection);
+ _driver.refreshIndexKeys(lifecycle->getIndexKeys(_request->getOpCtx()));
+ }
+
+ PlanExecutor* rawExec = NULL;
+ Status getExecStatus = Status::OK();
+ if (_canonicalQuery.get()) {
+ // This is the regular path for when we have a CanonicalQuery.
+ getExecStatus = getExecutorUpdate(_request->getOpCtx(), db, _canonicalQuery.release(),
+ _request, &_driver, _opDebug, &rawExec);
+ }
+ else {
+ // This is the idhack fast-path for getting a PlanExecutor without doing the work
+ // to create a CanonicalQuery.
+ getExecStatus = getExecutorUpdate(_request->getOpCtx(), db, nsString.ns(), _request,
+ &_driver, _opDebug, &rawExec);
+ }
+
+ if (getExecStatus.isOK()) {
+ invariant(rawExec);
+ _exec.reset(rawExec);
+ }
+
+ return getExecStatus;
+ }
+
UpdateResult UpdateExecutor::execute(Database* db) {
uassertStatusOK(prepare());
- return update(db,
- *_request,
- _opDebug,
- &_driver,
- _canonicalQuery.release());
+
+ LOG(3) << "processing update : " << *_request;
+
+ // If we've already done the in-lock preparation, this is a no-op.
+ Status status = prepareInLock(db);
+ uassert(17243,
+ "could not get executor " + _request->getQuery().toString()
+ + "; " + causedBy(status),
+ status.isOK());
+
+ // Register executor with the collection cursor cache.
+ const ScopedExecutorRegistration safety(_exec.get());
+
+ // Run the plan (don't need to collect results because UpdateStage always returns
+ // NEED_TIME).
+ uassertStatusOK(_exec->executePlan());
+
+ // Get stats from the root stage.
+ invariant(_exec->getRootStage()->stageType() == STAGE_UPDATE);
+ UpdateStage* updateStage = static_cast<UpdateStage*>(_exec->getRootStage());
+ const UpdateStats* updateStats =
+ static_cast<const UpdateStats*>(updateStage->getSpecificStats());
+
+ // Use stats from the root stage to fill out opDebug.
+ _opDebug->nMatched = updateStats->nMatched;
+ _opDebug->nModified = updateStats->nModified;
+ _opDebug->upsert = updateStats->inserted;
+ _opDebug->fastmodinsert = updateStats->fastmodinsert;
+ _opDebug->fastmod = updateStats->fastmod;
+
+ // Historically, 'opDebug' considers 'nMatched' and 'nModified' to be 1 (rather than 0) if
+ // there is an upsert that inserts a document. The UpdateStage does not participate in this
+ // madness in order to have saner stats reporting for explain. This means that we have to
+ // set these values "manually" in the case of an insert.
+ if (updateStats->inserted) {
+ _opDebug->nMatched = 1;
+ _opDebug->nModified = 1;
+ }
+
+ // Get summary information about the plan.
+ PlanSummaryStats stats;
+ Explain::getSummaryStats(_exec.get(), &stats);
+ _opDebug->nscanned = stats.totalKeysExamined;
+ _opDebug->nscannedObjects = stats.totalDocsExamined;
+
+ return UpdateResult(updateStats->nMatched > 0 /* Did we update at least one obj? */,
+ !_driver.isDocReplacement() /* $mod or obj replacement */,
+ _opDebug->nModified /* number of modified docs, no no-ops */,
+ _opDebug->nMatched /* # of docs matched/updated, even no-ops */,
+ updateStats->objInserted);
}
Status UpdateExecutor::parseQuery() {
diff --git a/src/mongo/db/ops/update_executor.h b/src/mongo/db/ops/update_executor.h
index f4a62224c59..9294de770d8 100644
--- a/src/mongo/db/ops/update_executor.h
+++ b/src/mongo/db/ops/update_executor.h
@@ -34,6 +34,7 @@
#include "mongo/base/status.h"
#include "mongo/db/ops/update_driver.h"
#include "mongo/db/ops/update_result.h"
+#include "mongo/db/query/plan_executor.h"
namespace mongo {
@@ -88,6 +89,22 @@ namespace mongo {
Status prepare();
/**
+ * Performs preparatory work that *does* require the appropriate database lock. This
+ * preparation involves construction of a PlanExecutor. Construction of a PlanExecutor
+ * requires the database lock because it goes through query planning and optimization,
+ * which may involve partial execution of the update plan tree.
+ *
+ * On success, a non-NULL PlanExecutor will be available via getPlanExecutor().
+ */
+ Status prepareInLock(Database* db);
+
+ /**
+ * Retrieve the PlanExecutor that will be used to execute this update upon calling
+ * execute(). Returns NULL if no PlanExecutor has been created.
+ */
+ PlanExecutor* getPlanExecutor();
+
+ /**
* Execute an update. Requires the caller to hold the database lock on the
* appropriate resources for the request.
*/
@@ -116,6 +133,9 @@ namespace mongo {
/// Parsed query object, or NULL if the query proves to be an id hack query.
std::auto_ptr<CanonicalQuery> _canonicalQuery;
+ // The tree of execution stages which will be used to execute the update.
+ boost::scoped_ptr<PlanExecutor> _exec;
+
/// Flag indicating if the query has been successfully parsed.
bool _isQueryParsed;
diff --git a/src/mongo/db/ops/update_request.h b/src/mongo/db/ops/update_request.h
index 5ed6eca209a..8bc42db69d4 100644
--- a/src/mongo/db/ops/update_request.h
+++ b/src/mongo/db/ops/update_request.h
@@ -31,6 +31,7 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/curop.h"
#include "mongo/db/namespace_string.h"
+#include "mongo/db/query/explain.h"
#include "mongo/util/mongoutils/str.h"
namespace mongo {
@@ -51,7 +52,8 @@ namespace mongo {
, _callLogOp(false)
, _fromMigration(false)
, _fromReplication(false)
- , _lifecycle(NULL) {}
+ , _lifecycle(NULL)
+ , _isExplain(false) {}
const NamespaceString& getNamespaceString() const {
return _nsString;
@@ -136,6 +138,14 @@ namespace mongo {
return _txn;
}
+ inline void setExplain(bool value = true) {
+ _isExplain = value;
+ }
+
+ inline bool isExplain() const {
+ return _isExplain;
+ }
+
const std::string toString() const {
return str::stream()
<< " query: " << _query
@@ -145,7 +155,8 @@ namespace mongo {
<< " multi: " << _multi
<< " callLogOp: " << _callLogOp
<< " fromMigration: " << _fromMigration
- << " fromReplications: " << _fromReplication;
+ << " fromReplications: " << _fromReplication
+ << " isExplain: " << _isExplain;
}
private:
@@ -183,6 +194,10 @@ namespace mongo {
// The lifecycle data, and events used during the update request.
UpdateLifecycle* _lifecycle;
+
+ // Whether or not we are requesting an explained update. Explained updates are read-only.
+ bool _isExplain;
+
};
} // namespace mongo
diff --git a/src/mongo/db/query/explain.cpp b/src/mongo/db/query/explain.cpp
index 68ebe4f86b7..633f869e832 100644
--- a/src/mongo/db/query/explain.cpp
+++ b/src/mongo/db/query/explain.cpp
@@ -401,6 +401,20 @@ namespace mongo {
bob->append("indexPrefix", spec->indexPrefix);
bob->append("parsedTextQuery", spec->parsedTextQuery);
}
+ else if (STAGE_UPDATE == stats.stageType) {
+ UpdateStats* spec = static_cast<UpdateStats*>(stats.specific.get());
+
+ if (verbosity >= Explain::EXEC_STATS) {
+ bob->appendNumber("nMatched", spec->nMatched);
+ bob->appendNumber("nWouldModify", spec->nModified);
+ bob->appendBool("wouldInsert", spec->inserted);
+ }
+
+ if (verbosity == Explain::FULL) {
+ bob->appendBool("fastmod", spec->fastmod);
+ bob->appendBool("fastmodinsert", spec->fastmodinsert);
+ }
+ }
// We're done if there are no children.
if (stats.children.empty()) {