summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMax Hirschhorn <max.hirschhorn@mongodb.com>2016-01-15 15:10:24 -0500
committerRamon Fernandez <ramon@mongodb.com>2016-03-29 17:42:03 -0400
commit752031f3a0186167997631d64a2aea2409ab0f1a (patch)
tree34ed5be0d9dc8fbcf9b0576bcbe9659b4ec3353b
parent36b4c60e7ccc96e3c05479136d264109d61a5339 (diff)
downloadmongo-752031f3a0186167997631d64a2aea2409ab0f1a.tar.gz
SERVER-21863 Prevent map-reduce from inserting >16MB documents.
(cherry picked from commit 64a7daba1746dcda0f7d25eab82d35e2c093d54f)
-rw-r--r--jstests/core/mr_bigobject_replace.js54
-rw-r--r--src/mongo/db/commands/mr.cpp20
2 files changed, 74 insertions, 0 deletions
diff --git a/jstests/core/mr_bigobject_replace.js b/jstests/core/mr_bigobject_replace.js
new file mode 100644
index 00000000000..dbed4664e7a
--- /dev/null
+++ b/jstests/core/mr_bigobject_replace.js
@@ -0,0 +1,54 @@
+/**
+ * Test that the server returns an error response for map-reduce operations that attempt to insert a
+ * document larger than 16MB as a result of the reduce() or finalize() functions and using the
+ * "replace" action for the out collection.
+ */
+(function() {
+ function mapper() {
+ // Emit multiple values to ensure that the reducer gets called.
+ emit(this._id, 1);
+ emit(this._id, 1);
+ }
+
+ function createBigDocument() {
+ // Returns a document of the form { _id: ObjectId(...), value: '...' } with the specified
+ // 'targetSize' in bytes.
+ function makeDocWithSize(targetSize) {
+ var doc = {_id: new ObjectId(), value: ''};
+
+ var size = Object.bsonsize(doc);
+ assert.gte(targetSize, size);
+
+ // Set 'value' as a string with enough characters to make the whole document 'size'
+ // bytes long.
+ doc.value = new Array(targetSize - size + 1).join('x');
+ assert.eq(targetSize, Object.bsonsize(doc));
+
+ return doc;
+ }
+
+ var maxDocSize = 16 * 1024 * 1024;
+ return makeDocWithSize(maxDocSize + 1).value;
+ }
+
+ function runTest(testOptions) {
+ db.input.drop();
+ db.mr_bigobject_replace.drop();
+
+ // Insert a document so the mapper gets run.
+ assert.writeOK(db.input.insert({}));
+
+ var res = db.runCommand(Object.extend({
+ mapReduce: "input",
+ map: mapper,
+ out: {replace: "mr_bigobject_replace"},
+ }, testOptions));
+
+ assert.commandFailed(res, "creating a document larger than 16MB didn't fail");
+ assert.lte(0, res.errmsg.indexOf("object to insert too large"),
+ "map-reduce command failed for a reason other than inserting a large document");
+ }
+
+ runTest({reduce: createBigDocument});
+ runTest({reduce: function() { return 1; }, finalize: createBigDocument});
+})();
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index d04e91b5db1..f93dee2b554 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -32,6 +32,8 @@
#include "mongo/db/commands/mr.h"
+#include "mongo/base/status_with.h"
+#include "mongo/bson/util/builder.h"
#include "mongo/client/connpool.h"
#include "mongo/client/parallel.h"
#include "mongo/db/auth/authorization_session.h"
@@ -54,6 +56,7 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/op_observer.h"
#include "mongo/db/operation_context_impl.h"
+#include "mongo/db/ops/insert.h"
#include "mongo/db/query/get_executor.h"
#include "mongo/db/query/query_planner.h"
#include "mongo/db/query/find_common.h"
@@ -74,6 +77,7 @@
#include "mongo/scripting/engine.h"
#include "mongo/stdx/mutex.h"
#include "mongo/util/log.h"
+#include "mongo/util/mongoutils/str.h"
#include "mongo/util/scopeguard.h"
namespace mongo {
@@ -697,6 +701,11 @@ void State::insert(const string& ns, const BSONObj& o) {
b.appendElements(o);
BSONObj bo = b.obj();
+ StatusWith<BSONObj> res = fixDocumentForInsert(bo);
+ uassertStatusOK(res.getStatus());
+ if (!res.getValue().isEmpty()) {
+ bo = res.getValue();
+ }
uassertStatusOK(coll->insertDocument(_txn, bo, true));
wuow.commit();
}
@@ -716,6 +725,17 @@ void State::_insertToInc(BSONObj& o) {
bool shouldReplicateWrites = _txn->writesAreReplicated();
_txn->setReplicatedWrites(false);
ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, _txn, shouldReplicateWrites);
+
+ // The documents inserted into the incremental collection are of the form
+ // {"0": <key>, "1": <value>}, so we cannot call fixDocumentForInsert(o) here because the
+ // check that the document has an "_id" field would fail. Instead, we directly verify that
+ // the size of the document to insert is smaller than 16MB.
+ if (o.objsize() > BSONObjMaxUserSize) {
+ uasserted(ErrorCodes::BadValue,
+ str::stream() << "object to insert too large for incremental collection"
+ << ". size in bytes: " << o.objsize()
+ << ", max size: " << BSONObjMaxUserSize);
+ }
uassertStatusOK(coll->insertDocument(_txn, o, true, false));
wuow.commit();
}