summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--jstests/core/explain_delete.js75
-rw-r--r--jstests/slow2/sharding_jscore_passthrough.js1
-rw-r--r--src/mongo/db/commands/write_commands/write_commands.cpp107
-rw-r--r--src/mongo/db/exec/delete.cpp27
-rw-r--r--src/mongo/db/exec/delete.h9
-rw-r--r--src/mongo/db/ops/delete_executor.cpp69
-rw-r--r--src/mongo/db/ops/delete_executor.h20
-rw-r--r--src/mongo/db/ops/delete_request.h6
-rw-r--r--src/mongo/db/query/explain.cpp7
-rw-r--r--src/mongo/db/query/get_executor.cpp8
-rw-r--r--src/mongo/db/query/get_executor.h2
11 files changed, 261 insertions, 70 deletions
diff --git a/jstests/core/explain_delete.js b/jstests/core/explain_delete.js
new file mode 100644
index 00000000000..31fd4a002b3
--- /dev/null
+++ b/jstests/core/explain_delete.js
@@ -0,0 +1,75 @@
+// Tests for explaining the delete command.
+
+var collName = "jstests_explain_delete";
+var t = db[collName];
+t.drop();
+
+var explain;
+
+/**
+ * Verify that the explain command output 'explain' shows a DELETE stage with an nWouldDelete
+ * value equal to 'nWouldDelete'.
+ */
+function checkNWouldDelete(explain, nWouldDelete) {
+ printjson(explain);
+ assert.commandWorked(explain);
+ assert("executionStats" in explain);
+ var executionStats = explain.executionStats;
+ assert("executionStages" in executionStats);
+ var rootStage = executionStats.executionStages;
+ assert.eq(rootStage.stage, "DELETE");
+ assert.eq(rootStage.nWouldDelete, nWouldDelete);
+}
+
+// Explain delete against an empty collection.
+explain = db.runCommand({
+ explain: {
+ delete: collName,
+ deletes: [
+ {q: {a: 1}, limit: 0}
+ ]
+ }
+});
+checkNWouldDelete(explain, 0);
+
+// Add an index but no data, and check that the explain still works.
+t.ensureIndex({a: 1});
+explain = db.runCommand({
+ explain: {
+ delete: collName,
+ deletes: [
+ {q: {a: 1}, limit: 0}
+ ]
+ }
+});
+checkNWouldDelete(explain, 0);
+
+// Add some copies of the same document.
+for (var i = 0; i < 10; i++) {
+ t.insert({a: 1});
+}
+assert.eq(10, t.count());
+
+// Run an explain which shows that all 10 documents *would* be deleted.
+explain = db.runCommand({
+ explain: {
+ delete: collName,
+ deletes: [
+ {q: {a: 1}, limit: 0}
+ ]
+ }
+});
+checkNWouldDelete(explain, 10);
+
+// Make sure all 10 documents are still there.
+assert.eq(10, t.count());
+
+// If we run the same thing without the explain, then all 10 docs should be deleted.
+var deleteResult = db.runCommand({
+ delete: collName,
+ deletes: [
+ {q: {a: 1}, limit: 0}
+ ]
+});
+assert.commandWorked(deleteResult);
+assert.eq(0, t.count());
diff --git a/jstests/slow2/sharding_jscore_passthrough.js b/jstests/slow2/sharding_jscore_passthrough.js
index 985a8d24b21..15a7636da17 100644
--- a/jstests/slow2/sharding_jscore_passthrough.js
+++ b/jstests/slow2/sharding_jscore_passthrough.js
@@ -106,6 +106,7 @@ files.forEach(function(x) {
'dbadmin|' +
'error1|' +
'explain_count|' + // TODO: allow passthrough once explain is implemented on mongos
+ 'explain_delete|' + // TODO: allow passthrough once explain is implemented on mongos
'explain_find|' + // TODO: allow passthrough once explain is implemented on mongos
'fsync|' +
'fsync2|' +
diff --git a/src/mongo/db/commands/write_commands/write_commands.cpp b/src/mongo/db/commands/write_commands/write_commands.cpp
index be49326cf48..11aeafe7be2 100644
--- a/src/mongo/db/commands/write_commands/write_commands.cpp
+++ b/src/mongo/db/commands/write_commands/write_commands.cpp
@@ -36,6 +36,8 @@
#include "mongo/db/curop.h"
#include "mongo/db/json.h"
#include "mongo/db/lasterror.h"
+#include "mongo/db/ops/delete_executor.h"
+#include "mongo/db/ops/delete_request.h"
#include "mongo/db/ops/update_executor.h"
#include "mongo/db/ops/update_lifecycle_impl.h"
#include "mongo/db/query/explain.h"
@@ -147,10 +149,11 @@ namespace mongo {
const BSONObj& cmdObj,
Explain::Verbosity verbosity,
BSONObjBuilder* out) const {
- // For now we only explain updates.
- if ( BatchedCommandRequest::BatchType_Update != _writeType ) {
+ // For now we only explain update and delete write commands.
+ if ( BatchedCommandRequest::BatchType_Update != _writeType &&
+ BatchedCommandRequest::BatchType_Delete != _writeType ) {
return Status( ErrorCodes::IllegalOperation,
- "Non-update write ops cannot yet be explained" );
+ "Only update and delete write ops can be explained" );
}
// Parse the batch request.
@@ -184,39 +187,75 @@ namespace mongo {
// Get a reference to the singleton batch item (it's the 0th item in the batch).
BatchItemRef batchItem( &request, 0 );
- // Create the update request.
- UpdateRequest updateRequest( txn, nsString );
- updateRequest.setQuery( batchItem.getUpdate()->getQuery() );
- updateRequest.setUpdates( batchItem.getUpdate()->getUpdateExpr() );
- updateRequest.setMulti( batchItem.getUpdate()->getMulti() );
- updateRequest.setUpsert( batchItem.getUpdate()->getUpsert() );
- updateRequest.setUpdateOpLog( true );
- UpdateLifecycleImpl updateLifecycle( true, updateRequest.getNamespaceString() );
- updateRequest.setLifecycle( &updateLifecycle );
- updateRequest.setExplain();
-
- // Use the request to create an UpdateExecutor, and from it extract the
- // plan tree which will be used to execute this update.
- UpdateExecutor updateExecutor( &updateRequest, &txn->getCurOp()->debug() );
- Status prepStatus = updateExecutor.prepare();
- if ( !prepStatus.isOK() ) {
- return prepStatus;
+ if ( BatchedCommandRequest::BatchType_Update == _writeType ) {
+ // Create the update request.
+ UpdateRequest updateRequest( txn, nsString );
+ updateRequest.setQuery( batchItem.getUpdate()->getQuery() );
+ updateRequest.setUpdates( batchItem.getUpdate()->getUpdateExpr() );
+ updateRequest.setMulti( batchItem.getUpdate()->getMulti() );
+ updateRequest.setUpsert( batchItem.getUpdate()->getUpsert() );
+ updateRequest.setUpdateOpLog( true );
+ UpdateLifecycleImpl updateLifecycle( true, updateRequest.getNamespaceString() );
+ updateRequest.setLifecycle( &updateLifecycle );
+ updateRequest.setExplain();
+
+ // Use the request to create an UpdateExecutor, and from it extract the
+ // plan tree which will be used to execute this update.
+ UpdateExecutor updateExecutor( &updateRequest, &txn->getCurOp()->debug() );
+ Status prepStatus = updateExecutor.prepare();
+ if ( !prepStatus.isOK() ) {
+ return prepStatus;
+ }
+
+ // Explains of write commands are read-only, but we take a write lock so that timing
+ // info is more accurate.
+ Client::WriteContext ctx( txn, nsString );
+
+ Status prepInLockStatus = updateExecutor.prepareInLock( ctx.ctx().db() );
+ if ( !prepInLockStatus.isOK() ) {
+ return prepInLockStatus;
+ }
+
+ PlanExecutor* exec = updateExecutor.getPlanExecutor();
+ const ScopedExecutorRegistration safety( exec );
+
+ // Explain the plan tree.
+ return Explain::explainStages( exec, verbosity, out );
}
-
- // Explains of write commands are read-only, but we take a write lock so that timing info
- // is more accurate.
- Client::WriteContext ctx( txn, nsString );
-
- Status prepInLockStatus = updateExecutor.prepareInLock( ctx.ctx().db() );
- if ( !prepInLockStatus.isOK() ) {
- return prepInLockStatus;
+ else {
+ invariant( BatchedCommandRequest::BatchType_Delete == _writeType );
+
+ // Create the delete request.
+ DeleteRequest deleteRequest( txn, nsString );
+ deleteRequest.setQuery( batchItem.getDelete()->getQuery() );
+ deleteRequest.setMulti( batchItem.getDelete()->getLimit() != 1 );
+ deleteRequest.setUpdateOpLog(true);
+ deleteRequest.setGod( false );
+ deleteRequest.setExplain();
+
+ // Use the request to create a DeleteExecutor, and from it extract the
+ // plan tree which will be used to execute this update.
+ DeleteExecutor deleteExecutor( &deleteRequest );
+ Status prepStatus = deleteExecutor.prepare();
+ if ( !prepStatus.isOK() ) {
+ return prepStatus;
+ }
+
+ // Explains of write commands are read-only, but we take a write lock so that timing
+ // info is more accurate.
+ Client::WriteContext ctx( txn, nsString );
+
+ Status prepInLockStatus = deleteExecutor.prepareInLock( ctx.ctx().db() );
+ if ( !prepInLockStatus.isOK()) {
+ return prepInLockStatus;
+ }
+
+ PlanExecutor* exec = deleteExecutor.getPlanExecutor();
+ const ScopedExecutorRegistration safety( exec );
+
+ // Explain the plan tree.
+ return Explain::explainStages( exec, verbosity, out );
}
-
- PlanExecutor* exec = updateExecutor.getPlanExecutor();
- const ScopedExecutorRegistration safety( exec );
-
- // Explain the plan tree.
- return Explain::explainStages( exec, verbosity, out );
}
CmdInsert::CmdInsert() :
diff --git a/src/mongo/db/exec/delete.cpp b/src/mongo/db/exec/delete.cpp
index d7e689e62f6..b8c4b8c0d93 100644
--- a/src/mongo/db/exec/delete.cpp
+++ b/src/mongo/db/exec/delete.cpp
@@ -100,20 +100,25 @@ namespace mongo {
const bool deleteCappedOK = false;
const bool deleteNoWarn = false;
- _collection->deleteDocument(_txn, rloc, deleteCappedOK, deleteNoWarn,
- _params.shouldCallLogOp ? &deletedDoc : NULL);
- if (_params.shouldCallLogOp) {
- if (deletedDoc.isEmpty()) {
- log() << "Deleted object without id in collection " << _collection->ns()
- << ", not logging.";
- }
- else {
- bool replJustOne = true;
- repl::logOp(_txn, "d", _collection->ns().ns().c_str(), deletedDoc, 0,
- &replJustOne, _params.fromMigrate);
+ // Do the write, unless this is an explain.
+ if (!_params.isExplain) {
+ _collection->deleteDocument(_txn, rloc, deleteCappedOK, deleteNoWarn,
+ _params.shouldCallLogOp ? &deletedDoc : NULL);
+
+ if (_params.shouldCallLogOp) {
+ if (deletedDoc.isEmpty()) {
+ log() << "Deleted object without id in collection " << _collection->ns()
+ << ", not logging.";
+ }
+ else {
+ bool replJustOne = true;
+ repl::logOp(_txn, "d", _collection->ns().ns().c_str(), deletedDoc, 0,
+ &replJustOne, _params.fromMigrate);
+ }
}
}
+
wunit.commit();
}
diff --git a/src/mongo/db/exec/delete.h b/src/mongo/db/exec/delete.h
index 48a783c9b85..17dbc79a316 100644
--- a/src/mongo/db/exec/delete.h
+++ b/src/mongo/db/exec/delete.h
@@ -36,7 +36,11 @@ namespace mongo {
class OperationContext;
struct DeleteStageParams {
- DeleteStageParams() : isMulti(false), shouldCallLogOp(false), fromMigrate(false) { }
+ DeleteStageParams() :
+ isMulti(false),
+ shouldCallLogOp(false),
+ fromMigrate(false),
+ isExplain(false) { }
// Should we delete all documents returned from the child (a "multi delete"), or at most one
// (a "single delete")?
@@ -48,6 +52,9 @@ namespace mongo {
// Is this delete part of a migrate operation that is essentially like a no-op
// when the cluster is observed by an external client.
bool fromMigrate;
+
+ // Are we explaining a delete command rather than actually executing it?
+ bool isExplain;
};
/**
diff --git a/src/mongo/db/ops/delete_executor.cpp b/src/mongo/db/ops/delete_executor.cpp
index ddb0c29b025..25f603ce7f6 100644
--- a/src/mongo/db/ops/delete_executor.cpp
+++ b/src/mongo/db/ops/delete_executor.cpp
@@ -78,8 +78,16 @@ namespace mongo {
return status;
}
- long long DeleteExecutor::execute(Database* db) {
- uassertStatusOK(prepare());
+ PlanExecutor* DeleteExecutor::getPlanExecutor() {
+ return _exec.get();
+ }
+
+ Status DeleteExecutor::prepareInLock(Database* db) {
+ // If we have a non-NULL PlanExecutor, then we've already done the in-lock preparation.
+ if (_exec.get()) {
+ return Status::OK();
+ }
+
uassert(17417,
mongoutils::str::stream() <<
"DeleteExecutor::prepare() failed to parse query " << _request->getQuery(),
@@ -98,52 +106,71 @@ namespace mongo {
}
}
+ // Note that 'collection' may by NULL in the case that the collection we are trying to
+ // delete from does not exist. NULL 'collection' is handled by getExecutorDelete(); we
+ // expect to get back a plan executor whose plan is a DeleteStage on top of an EOFStage.
Collection* collection = db->getCollection(_request->getOpCtx(), ns.ns());
- if (NULL == collection) {
- return 0;
- }
- uassert(10101,
- str::stream() << "cannot remove from a capped collection: " << ns.ns(),
- !collection->isCapped());
+ if (collection && collection->isCapped()) {
+ return Status(ErrorCodes::IllegalOperation,
+ str::stream() << "cannot remove from a capped collection: " << ns.ns());
+ }
- uassert(ErrorCodes::NotMaster,
- str::stream() << "Not primary while removing from " << ns.ns(),
- !_request->shouldCallLogOp() ||
- repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(ns.db()));
+ if (_request->shouldCallLogOp() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(ns.db())) {
+ return Status(ErrorCodes::NotMaster,
+ str::stream() << "Not primary while removing from " << ns.ns());
+ }
PlanExecutor* rawExec;
+ Status getExecStatus = Status::OK();
if (_canonicalQuery.get()) {
// This is the non-idhack branch.
- uassertStatusOK(getExecutorDelete(_request->getOpCtx(),
+ getExecStatus = getExecutorDelete(_request->getOpCtx(),
collection,
_canonicalQuery.release(),
_request->isMulti(),
_request->shouldCallLogOp(),
_request->isFromMigrate(),
- &rawExec));
+ _request->isExplain(),
+ &rawExec);
}
else {
// This is the idhack branch.
- uassertStatusOK(getExecutorDelete(_request->getOpCtx(),
+ getExecStatus = getExecutorDelete(_request->getOpCtx(),
collection,
ns.ns(),
_request->getQuery(),
_request->isMulti(),
_request->shouldCallLogOp(),
_request->isFromMigrate(),
- &rawExec));
+ _request->isExplain(),
+ &rawExec);
+ }
+
+ if (getExecStatus.isOK()) {
+ invariant(rawExec);
+ _exec.reset(rawExec);
}
- scoped_ptr<PlanExecutor> exec(rawExec);
+
+ return getExecStatus;
+ }
+
+ long long DeleteExecutor::execute(Database* db) {
+ uassertStatusOK(prepare());
+
+ // If we've already done the in-lock preparation, this is a no-op.
+ uassertStatusOK(prepareInLock(db));
+ invariant(_exec.get());
// Concurrently mutating state (by us) so we need to register 'exec'.
- const ScopedExecutorRegistration safety(exec.get());
+ const ScopedExecutorRegistration safety(_exec.get());
- uassertStatusOK(exec->executePlan());
+ uassertStatusOK(_exec->executePlan());
// Extract the number of documents deleted from the DeleteStage stats.
- invariant(exec->getRootStage()->stageType() == STAGE_DELETE);
- DeleteStage* deleteStage = static_cast<DeleteStage*>(exec->getRootStage());
+ invariant(_exec->getRootStage()->stageType() == STAGE_DELETE);
+ DeleteStage* deleteStage = static_cast<DeleteStage*>(_exec->getRootStage());
const DeleteStats* deleteStats =
static_cast<const DeleteStats*>(deleteStage->getSpecificStats());
return deleteStats->docsDeleted;
diff --git a/src/mongo/db/ops/delete_executor.h b/src/mongo/db/ops/delete_executor.h
index c0324b14901..e4f5c2f2fe4 100644
--- a/src/mongo/db/ops/delete_executor.h
+++ b/src/mongo/db/ops/delete_executor.h
@@ -32,6 +32,7 @@
#include "mongo/base/disallow_copying.h"
#include "mongo/base/status.h"
+#include "mongo/db/query/plan_executor.h"
namespace mongo {
@@ -87,6 +88,22 @@ namespace mongo {
Status prepare();
/**
+ * Performs preparatory work that *does* require the appropriate database lock. This
+ * preparation involves construction of a PlanExecutor. Construction of a PlanExecutor
+ * requires the database lock because it goes through query planning and optimization,
+ * which may involve partial execution of the delete plan tree.
+ *
+ * On success, a non-NULL PlanExecutor will be available via getPlanExecutor().
+ */
+ Status prepareInLock(Database* db);
+
+ /**
+ * Retrieve the PlanExecutor that will be used to execute this delete upon calling
+ * execute(). Returns NULL if no PlanExecutor has been created.
+ */
+ PlanExecutor* getPlanExecutor();
+
+ /**
* Execute a delete. Requires the caller to hold the database lock on the
* appropriate resources for the request.
*
@@ -101,6 +118,9 @@ namespace mongo {
/// Parsed query object, or NULL if the query proves to be an id hack query.
std::auto_ptr<CanonicalQuery> _canonicalQuery;
+ // The tree of execution stages which will be used to execute the update.
+ boost::scoped_ptr<PlanExecutor> _exec;
+
/// Flag indicating if the query has been successfully parsed.
bool _isQueryParsed;
diff --git a/src/mongo/db/ops/delete_request.h b/src/mongo/db/ops/delete_request.h
index 53f8d13d475..ee58c59d476 100644
--- a/src/mongo/db/ops/delete_request.h
+++ b/src/mongo/db/ops/delete_request.h
@@ -45,13 +45,15 @@ namespace mongo {
_multi(false),
_logop(false),
_god(false),
- _fromMigrate(false) {}
+ _fromMigrate(false),
+ _isExplain(false) {}
void setQuery(const BSONObj& query) { _query = query; }
void setMulti(bool multi = true) { _multi = multi; }
void setUpdateOpLog(bool logop = true) { _logop = logop; }
void setGod(bool god = true) { _god = god; }
void setFromMigrate(bool fromMigrate = true) { _fromMigrate = fromMigrate; }
+ void setExplain(bool isExplain = true) { _isExplain = isExplain; }
const NamespaceString& getNamespaceString() const { return _nsString; }
const BSONObj& getQuery() const { return _query; }
@@ -59,6 +61,7 @@ namespace mongo {
bool shouldCallLogOp() const { return _logop; }
bool isGod() const { return _god; }
bool isFromMigrate() const { return _fromMigrate; }
+ bool isExplain() const { return _isExplain; }
OperationContext* getOpCtx() const { return _txn; }
std::string toString() const;
@@ -71,6 +74,7 @@ namespace mongo {
bool _logop;
bool _god;
bool _fromMigrate;
+ bool _isExplain;
};
} // namespace mongo
diff --git a/src/mongo/db/query/explain.cpp b/src/mongo/db/query/explain.cpp
index 2f5256461a1..773e97207b9 100644
--- a/src/mongo/db/query/explain.cpp
+++ b/src/mongo/db/query/explain.cpp
@@ -289,6 +289,13 @@ namespace mongo {
bob->append("keyPattern", spec->keyPattern);
bob->appendBool("isMultiKey", spec->isMultiKey);
}
+ else if (STAGE_DELETE == stats.stageType) {
+ DeleteStats* spec = static_cast<DeleteStats*>(stats.specific.get());
+
+ if (verbosity >= Explain::EXEC_STATS) {
+ bob->appendNumber("nWouldDelete", spec->docsDeleted);
+ }
+ }
else if (STAGE_FETCH == stats.stageType) {
FetchStats* spec = static_cast<FetchStats*>(stats.specific.get());
if (verbosity >= Explain::EXEC_STATS) {
diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp
index d80c4c0ae01..f4244139a9a 100644
--- a/src/mongo/db/query/get_executor.cpp
+++ b/src/mongo/db/query/get_executor.cpp
@@ -474,6 +474,7 @@ namespace mongo {
bool isMulti,
bool shouldCallLogOp,
bool fromMigrate,
+ bool isExplain,
PlanExecutor** out) {
auto_ptr<CanonicalQuery> canonicalQuery(rawCanonicalQuery);
auto_ptr<WorkingSet> ws(new WorkingSet());
@@ -490,6 +491,7 @@ namespace mongo {
deleteStageParams.isMulti = isMulti;
deleteStageParams.shouldCallLogOp = shouldCallLogOp;
deleteStageParams.fromMigrate = fromMigrate;
+ deleteStageParams.isExplain = isExplain;
root = new DeleteStage(txn, deleteStageParams, ws.get(), collection, root);
// We must have a tree of stages in order to have a valid plan executor, but the query
// solution may be null.
@@ -505,12 +507,14 @@ namespace mongo {
bool isMulti,
bool shouldCallLogOp,
bool fromMigrate,
+ bool isExplain,
PlanExecutor** out) {
auto_ptr<WorkingSet> ws(new WorkingSet());
DeleteStageParams deleteStageParams;
deleteStageParams.isMulti = isMulti;
deleteStageParams.shouldCallLogOp = shouldCallLogOp;
deleteStageParams.fromMigrate = fromMigrate;
+ deleteStageParams.isExplain = isExplain;
if (!collection) {
// Treat collections that do not exist as empty collections. Note that the explain
// reporting machinery always assumes that the root stage for a delete operation is a
@@ -543,8 +547,8 @@ namespace mongo {
return status;
// Takes ownership of 'cq'.
- return getExecutorDelete(txn, collection, cq, isMulti,
- shouldCallLogOp, fromMigrate, out);
+ return getExecutorDelete(txn, collection, cq, isMulti, shouldCallLogOp,
+ fromMigrate, isExplain, out);
}
//
diff --git a/src/mongo/db/query/get_executor.h b/src/mongo/db/query/get_executor.h
index b0e01d4a2a7..fa8b89bc9fb 100644
--- a/src/mongo/db/query/get_executor.h
+++ b/src/mongo/db/query/get_executor.h
@@ -148,6 +148,7 @@ namespace mongo {
bool isMulti,
bool shouldCallLogOp,
bool fromMigrate,
+ bool isExplain,
PlanExecutor** execOut);
/**
@@ -166,6 +167,7 @@ namespace mongo {
bool isMulti,
bool shouldCallLogOp,
bool fromMigrate,
+ bool isExplain,
PlanExecutor** execOut);
//