diff options
author | Greg Studer <greg@10gen.com> | 2014-03-14 11:59:18 -0400 |
---|---|---|
committer | Greg Studer <greg@10gen.com> | 2014-03-17 10:02:22 -0400 |
commit | b5756056a7b5acba550a7d31a587f3a5b7651e00 (patch) | |
tree | c74127db66b4845b2a9ff44b468551dadfeb75f6 | |
parent | ecc00b5bfba8961e0884e591ba1866eeb0cb67ab (diff) | |
download | mongo-b5756056a7b5acba550a7d31a587f3a5b7651e00.tar.gz |
SERVER-12977 disallow empty write batches
Also fix shell batch processing to avoid extra empty batches.
-rw-r--r-- | jstests/core/batch_write_command_insert.js | 8 | ||||
-rw-r--r-- | src/mongo/db/commands/write_commands/batch_executor.cpp | 9 | ||||
-rw-r--r-- | src/mongo/s/cluster_write.cpp | 9 | ||||
-rw-r--r-- | src/mongo/shell/bulk_api.js | 36 |
4 files changed, 40 insertions, 22 deletions
diff --git a/jstests/core/batch_write_command_insert.js b/jstests/core/batch_write_command_insert.js index 5f7482d671d..542d24d9dc3 100644 --- a/jstests/core/batch_write_command_insert.js +++ b/jstests/core/batch_write_command_insert.js @@ -123,6 +123,14 @@ assert(resultNOK(result)); assert.eq(coll.count(), 0); // +// Batch of size zero should fail to insert +coll.remove({}); +printjson( request = {insert : coll.getName(), + documents: [] } ); +printjson( result = coll.runCommand(request) ); +assert(resultNOK(result)); + +// // // Unique index tests coll.remove({}); diff --git a/src/mongo/db/commands/write_commands/batch_executor.cpp b/src/mongo/db/commands/write_commands/batch_executor.cpp index 06ca5479e9d..213ff3d9db5 100644 --- a/src/mongo/db/commands/write_commands/batch_executor.cpp +++ b/src/mongo/db/commands/write_commands/batch_executor.cpp @@ -179,9 +179,16 @@ namespace mongo { return; } + if ( request.sizeWriteOps() == 0u ) { + toBatchError( Status( ErrorCodes::InvalidLength, + "no write ops were included in the batch" ), + response ); + return; + } + // Validate batch size if ( request.sizeWriteOps() > BatchedCommandRequest::kMaxWriteBatchSize ) { - toBatchError( Status( ErrorCodes::FailedToParse, + toBatchError( Status( ErrorCodes::InvalidLength, stream() << "exceeded maximum write batch size of " << BatchedCommandRequest::kMaxWriteBatchSize ), response ); diff --git a/src/mongo/s/cluster_write.cpp b/src/mongo/s/cluster_write.cpp index 5a8e690d61e..8b2008f02b8 100644 --- a/src/mongo/s/cluster_write.cpp +++ b/src/mongo/s/cluster_write.cpp @@ -333,8 +333,15 @@ namespace mongo { return; } + if ( request.sizeWriteOps() == 0u ) { + toBatchError( Status( ErrorCodes::InvalidLength, + "no write ops were included in the batch" ), + response ); + return; + } + if ( request.sizeWriteOps() > BatchedCommandRequest::kMaxWriteBatchSize ) { - toBatchError( Status( ErrorCodes::FailedToParse, + toBatchError( Status( ErrorCodes::InvalidLength, str::stream() << "exceeded maximum write batch size of " << BatchedCommandRequest::kMaxWriteBatchSize ), response ); diff --git a/src/mongo/shell/bulk_api.js b/src/mongo/shell/bulk_api.js index 5af814b81d9..2dd2bb94ff2 100644 --- a/src/mongo/shell/bulk_api.js +++ b/src/mongo/shell/bulk_api.js @@ -431,34 +431,30 @@ var _bulk_api_module = (function() { // Add to internal list of documents var addToOperationsList = function(docType, document) { + + if (Array.isArray(document)) + throw Error("operation passed in cannot be an Array"); + // Get the bsonSize var bsonSize = Object.bsonsize(document); + // Create a new batch object if we don't have a current one if(currentBatch == null) currentBatch = new Batch(docType, currentIndex); + // Finalize and create a new batch if this op would take us over the + // limits *or* if this op is of a different type + if(currentBatchSize + 1 > maxNumberOfDocsInBatch + || (currentBatchSize > 0 && + currentBatchSizeBytes + bsonSize >= maxBatchSizeBytes) + || currentBatch.batchType != docType) { + finalizeBatch(docType); + } + + currentBatch.operations.push(document); + currentIndex = currentIndex + 1; // Update current batch size currentBatchSize = currentBatchSize + 1; currentBatchSizeBytes = currentBatchSizeBytes + bsonSize; - - // Finalize and create a new batch if we have a new operation type - if (currentBatch.batchType != docType) { - finalizeBatch(docType); - } - - // We have an array of documents - if(Array.isArray(document)) { - throw Error("operation passed in cannot be an Array"); - } else { - currentBatch.operations.push(document) - currentIndex = currentIndex + 1; - } - - // Check if the batch exceeds one of the size limits - if((currentBatchSize >= maxNumberOfDocsInBatch) - || (currentBatchSizeBytes >= maxBatchSizeBytes)) { - finalizeBatch(docType); - } - }; /** |