summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMartin Bligh <mbligh@mongodb.com>2015-10-26 10:19:39 -0400
committerMartin Bligh <mbligh@mongodb.com>2015-10-26 10:21:05 -0400
commitb9c5d9f97d63429c36cb9027b6fc1594b67373a9 (patch)
tree3e2dc520f6f6d15c2c2f4fc1687c4b32b435af70
parentb82f1a20b8c3b44f84e5744fcf9ed062341e7d58 (diff)
downloadmongo-b9c5d9f97d63429c36cb9027b6fc1594b67373a9.tar.gz
SERVER-21093: Make ordered bulk insert operation error out properly
-rw-r--r--jstests/core/batch_write_command_insert.js16
-rw-r--r--src/mongo/db/commands/write_commands/batch_executor.cpp11
-rw-r--r--src/mongo/db/commands/write_commands/batch_executor.h3
3 files changed, 26 insertions, 4 deletions
diff --git a/jstests/core/batch_write_command_insert.js b/jstests/core/batch_write_command_insert.js
index 54ed12d846b..a9c271f3d24 100644
--- a/jstests/core/batch_write_command_insert.js
+++ b/jstests/core/batch_write_command_insert.js
@@ -302,6 +302,22 @@ assert(!result.ok, tojson(result));
assert.eq(coll.getIndexes().length, 0);
//
+// Ensure we error out correctly in the middle of a batch
+coll.drop();
+coll.insert({_id: 50}); // Create a document to force a duplicate key exception.
+
+var bulk = coll.initializeOrderedBulkOp();
+for (i = 1; i < 100; i++) {
+ bulk.insert( { _id: i } );
+}
+try {
+ bulk.execute();
+ assert(false, "should have failed due to duplicate key");
+} catch(err) {
+ assert(coll.count() == 50, "Unexpected number inserted by bulk write: " + coll.count());
+}
+
+//
// Background index creation
// Note: due to SERVER-13304 this test is at the end of this file, and we don't drop
// the collection afterwards.
diff --git a/src/mongo/db/commands/write_commands/batch_executor.cpp b/src/mongo/db/commands/write_commands/batch_executor.cpp
index 753b6917b3d..184807edea6 100644
--- a/src/mongo/db/commands/write_commands/batch_executor.cpp
+++ b/src/mongo/db/commands/write_commands/batch_executor.cpp
@@ -753,7 +753,8 @@ static void normalizeInserts(const BatchedCommandRequest& request,
static void insertOne(WriteBatchExecutor::ExecInsertsState* state, WriteOpResult* result);
// Loops over the specified subset of the batch, processes one document at a time.
-void WriteBatchExecutor::insertMany(WriteBatchExecutor::ExecInsertsState* state,
+// Returns a true to discontinue the insert, or false if not.
+bool WriteBatchExecutor::insertMany(WriteBatchExecutor::ExecInsertsState* state,
size_t startIndex,
size_t endIndex,
CurOp* currentOp,
@@ -791,11 +792,12 @@ void WriteBatchExecutor::insertMany(WriteBatchExecutor::ExecInsertsState* state,
CurOp* const currentOp = CurOp::get(_txn);
logCurOpError(currentOp, error);
if (ordered)
- break;
+ return true;
} else {
_le->recordInsert(nInserted);
}
}
+ return false;
}
// Instantiates an ExecInsertsState, which represents all of the state for the batch.
@@ -832,7 +834,8 @@ void WriteBatchExecutor::execInserts(const BatchedCommandRequest& request,
if ((chunkCount >= chunkMaxCount) || (chunkBytes >= insertVectorMaxBytes) ||
(i == maxIndex)) {
- insertMany(&state, startIndex, i + 1, currentOp, errors, request.getOrdered());
+ bool stop;
+ stop = insertMany(&state, startIndex, i + 1, currentOp, errors, request.getOrdered());
startIndex = i + 1;
chunkCount = 0;
chunkBytes = 0;
@@ -846,6 +849,8 @@ void WriteBatchExecutor::execInserts(const BatchedCommandRequest& request,
// Since the lock manager guarantees FIFO queues waiting on locks,
// there is no need to explicitly sleep or give up control of the processor here.
}
+ if (stop)
+ break;
}
}
}
diff --git a/src/mongo/db/commands/write_commands/batch_executor.h b/src/mongo/db/commands/write_commands/batch_executor.h
index 78ab8028449..945adfca021 100644
--- a/src/mongo/db/commands/write_commands/batch_executor.h
+++ b/src/mongo/db/commands/write_commands/batch_executor.h
@@ -85,8 +85,9 @@ private:
/**
* Inserts a subset of an insert batch.
+ * Returns a true to discontinue the insert, or false if not.
*/
- void insertMany(WriteBatchExecutor::ExecInsertsState* state,
+ bool insertMany(WriteBatchExecutor::ExecInsertsState* state,
size_t startIndex,
size_t endIndex,
CurOp* currentOp,