diff options
author | Kyle Suarez <kyle.suarez@mongodb.com> | 2017-03-24 11:58:32 -0400 |
---|---|---|
committer | Kyle Suarez <kyle.suarez@mongodb.com> | 2017-03-24 12:03:31 -0400 |
commit | 59bf804f975b8128557215c61ca2447ca630abdc (patch) | |
tree | 7e2e4acc48ee4c8ae5def14a55dd7fd3df81dbbf /src | |
parent | 921f12c9aa005488ac93b672d74a63af51b93139 (diff) | |
download | mongo-59bf804f975b8128557215c61ca2447ca630abdc.tar.gz |
SERVER-28347 enforce storage depth limit for user documents
Introduces a nesting depth limit for document storage, which is lower
than the hard limit for general BSONObjects. Users cannot insert
documents exceeding this limit, nor can they update a document to exceed
it.
Diffstat (limited to 'src')
-rw-r--r-- | src/mongo/bson/bson_depth.cpp | 5 | ||||
-rw-r--r-- | src/mongo/bson/bson_depth.h | 15 | ||||
-rw-r--r-- | src/mongo/db/SConscript | 13 | ||||
-rw-r--r-- | src/mongo/db/exec/update.cpp | 166 | ||||
-rw-r--r-- | src/mongo/db/nesting_depth_test.cpp | 333 | ||||
-rw-r--r-- | src/mongo/db/ops/insert.cpp | 42 | ||||
-rw-r--r-- | src/mongo/db/ops/insert.h | 8 | ||||
-rw-r--r-- | src/mongo/dbtests/SConscript | 1 | ||||
-rw-r--r-- | src/mongo/dbtests/insert_test.cpp | 100 | ||||
-rw-r--r-- | src/mongo/executor/network_interface_asio_integration_fixture.cpp | 19 | ||||
-rw-r--r-- | src/mongo/executor/network_interface_asio_integration_fixture.h | 8 |
11 files changed, 661 insertions, 49 deletions
diff --git a/src/mongo/bson/bson_depth.cpp b/src/mongo/bson/bson_depth.cpp index 153d53d85ef..b131158c350 100644 --- a/src/mongo/bson/bson_depth.cpp +++ b/src/mongo/bson/bson_depth.cpp @@ -40,4 +40,9 @@ std::int32_t BSONDepth::maxAllowableDepth = BSONDepth::kDefaultMaxAllowableDepth std::uint32_t BSONDepth::getMaxAllowableDepth() { return static_cast<std::uint32_t>(BSONDepth::maxAllowableDepth); } + +std::uint32_t BSONDepth::getMaxDepthForUserStorage() { + return static_cast<std::uint32_t>(BSONDepth::maxAllowableDepth - + BSONDepth::kExtraSystemDepthLevels); +} } // namespace mongo diff --git a/src/mongo/bson/bson_depth.h b/src/mongo/bson/bson_depth.h index ef8ffc3631d..d2caef3979f 100644 --- a/src/mongo/bson/bson_depth.h +++ b/src/mongo/bson/bson_depth.h @@ -44,13 +44,26 @@ struct BSONDepth { // The maximum allowable value for the BSON depth parameter. static constexpr std::int32_t kBSONDepthParameterCeiling = 1000; + // The number of extra levels of nesting above the storage depth limit that the server will + // tolerate. + static constexpr std::uint32_t kExtraSystemDepthLevels = 20; + // The depth of BSON accepted by the server. Configurable via the 'maxBSONDepth' server // parameter. static std::int32_t maxAllowableDepth; /** - * Returns the maximum allowable BSON depth as an unsigned integer. + * Returns the maximum allowable BSON depth as an unsigned integer. Note that this is a hard + * limit -- any BSON document that exceeds this limit should be considered invalid. */ static std::uint32_t getMaxAllowableDepth(); + + /** + * Returns the BSON nesting depth limit for stored objects. User documents that exceed this + * depth are not valid for storage. This limit is slightly lower than the hard limit in + * getMaxAllowableDepth(), since we may generate things like oplog entries from these documents + * that contain extra levels of nesting. + */ + static std::uint32_t getMaxDepthForUserStorage(); }; } // namespace mongo diff --git a/src/mongo/db/SConscript b/src/mongo/db/SConscript index 56792ae813d..40c9eabc16d 100644 --- a/src/mongo/db/SConscript +++ b/src/mongo/db/SConscript @@ -1144,3 +1144,16 @@ env.Library( '$BUILD_DIR/mongo/base', ], ) + +asioEnv = env.Clone() +asioEnv.InjectThirdPartyIncludePaths('asio') + +asioEnv.CppIntegrationTest( + target='nesting_depth_test', + source=[ + 'nesting_depth_test.cpp', + ], + LIBDEPS=[ + '$BUILD_DIR/mongo/executor/network_interface_asio_fixture', + ], +) diff --git a/src/mongo/db/exec/update.cpp b/src/mongo/db/exec/update.cpp index 1c67f79ba08..9a9b6c36a07 100644 --- a/src/mongo/db/exec/update.cpp +++ b/src/mongo/db/exec/update.cpp @@ -32,6 +32,10 @@ #include "mongo/db/exec/update.h" +#include <algorithm> + +#include "mongo/base/status_with.h" +#include "mongo/bson/bson_depth.h" #include "mongo/bson/mutable/algorithm.h" #include "mongo/db/bson/dotted_path_support.h" #include "mongo/db/concurrency/write_conflict_exception.h" @@ -65,15 +69,34 @@ namespace { const char idFieldName[] = "_id"; const FieldRef idFieldRef(idFieldName); -Status storageValid(const mb::Document&, const bool = true); -Status storageValid(const mb::ConstElement&, const bool = true); -Status storageValidChildren(const mb::ConstElement&, const bool = true); +StatusWith<std::uint32_t> storageValid(const mb::Document&, + bool deep, + std::uint32_t recursionLevel); +StatusWith<std::uint32_t> storageValid(const mb::ConstElement&, + bool deep, + std::uint32_t recursionLevel); +StatusWith<std::uint32_t> storageValidChildren(const mb::ConstElement&, + bool deep, + std::uint32_t recursionLevel); /** - * mutable::document storageValid check -- like BSONObj::_okForStorage + * Validates that the MutableBSON document 'doc' is acceptable for storage in a collection. If + * 'deep' is true, the check is performed recursively on subdocuments. + * + * An error is returned if the validation fails or if 'recursionLevel' exceeds the maximum allowable + * depth. On success, an integer is returned that represents the nesting depth of this document. */ -Status storageValid(const mb::Document& doc, const bool deep) { +StatusWith<std::uint32_t> storageValid(const mb::Document& doc, + bool deep, + std::uint32_t recursionLevel) { + if (recursionLevel >= BSONDepth::getMaxDepthForUserStorage()) { + return Status(ErrorCodes::Overflow, + str::stream() << "Document exceeds maximum nesting depth of " + << BSONDepth::getMaxDepthForUserStorage()); + } + mb::ConstElement currElem = doc.root().leftChild(); + std::uint32_t greatestDepth = recursionLevel; while (currElem.ok()) { if (currElem.getFieldName() == idFieldName) { switch (currElem.getType()) { @@ -87,13 +110,20 @@ Status storageValid(const mb::Document& doc, const bool deep) { break; } } - Status s = storageValid(currElem, deep); - if (!s.isOK()) - return s; + + // Get the nesting depth of this child element. + auto depth = storageValid(currElem, deep, recursionLevel + 1); + if (!depth.isOK()) { + return depth; + } + + // The depth of this document is the depth of its deepest child, so we only keep track of + // the maximum depth seen so far. + greatestDepth = std::max(greatestDepth, depth.getValue()); currElem = currElem.rightSibling(); } - return Status::OK(); + return greatestDepth; } /** @@ -123,9 +153,13 @@ Status validateDollarPrefixElement(const mb::ConstElement elem, const bool deep) // Found a $id field if (currName == "$id") { - Status s = storageValidChildren(curr, deep); - if (!s.isOK()) - return s; + // We don't care about the recursion level being accurate, as the validate() command will + // perform full validation of the updated object. + const uint32_t recursionLevel = 0; + auto depth = storageValidChildren(curr, deep, recursionLevel); + if (!depth.isOK()) { + return depth.getStatus(); + } curr = curr.leftSibling(); if (!curr.ok() || (curr.getFieldName() != "$ref")) { @@ -160,30 +194,50 @@ Status validateDollarPrefixElement(const mb::ConstElement elem, const bool deep) } /** - * Checks that all parents, of the element passed in, are valid for storage + * Checks that all of the parents of the MutableBSON element 'elem' are valid for storage. Note that + * 'elem' must be in a valid state when using this function. * - * Note: The elem argument must be in a valid state when using this function + * An error is returned if the validation fails, or if 'recursionLevel' exceeds the maximum + * allowable depth. On success, an integer is returned that represents the number of steps from this + * element to the root through ancestor nodes. */ -Status storageValidParents(const mb::ConstElement& elem) { +StatusWith<std::uint32_t> storageValidParents(const mb::ConstElement& elem, + std::uint32_t recursionLevel) { + if (recursionLevel >= BSONDepth::getMaxDepthForUserStorage()) { + return Status(ErrorCodes::Overflow, + str::stream() << "Document exceeds maximum nesting depth of " + << BSONDepth::getMaxDepthForUserStorage()); + } + const mb::ConstElement& root = elem.getDocument().root(); if (elem != root) { const mb::ConstElement& parent = elem.parent(); if (parent.ok() && parent != root) { - Status s = storageValid(parent, false); - if (s.isOK()) { - s = storageValidParents(parent); + const bool doRecursiveCheck = false; + const uint32_t parentsRecursionLevel = 0; + auto height = storageValid(parent, doRecursiveCheck, parentsRecursionLevel); + if (height.isOK()) { + height = storageValidParents(parent, recursionLevel + 1); } - - return s; + return height; } + return recursionLevel + 1; } - return Status::OK(); + return recursionLevel; } -Status storageValid(const mb::ConstElement& elem, const bool deep) { +StatusWith<std::uint32_t> storageValid(const mb::ConstElement& elem, + const bool deep, + std::uint32_t recursionLevel) { if (!elem.ok()) return Status(ErrorCodes::BadValue, "Invalid elements cannot be stored."); + if (recursionLevel >= BSONDepth::getMaxDepthForUserStorage()) { + return Status(ErrorCodes::Overflow, + str::stream() << "Document exceeds maximum nesting depth of " + << BSONDepth::getMaxDepthForUserStorage()); + } + // Field names of elements inside arrays are not meaningful in mutable bson, // so we do not want to validate them. // @@ -211,27 +265,38 @@ Status storageValid(const mb::ConstElement& elem, const bool deep) { if (deep) { // Check children if there are any. - Status s = storageValidChildren(elem, deep); - if (!s.isOK()) - return s; + auto depth = storageValidChildren(elem, deep, recursionLevel); + if (!depth.isOK()) { + return depth; + } + invariant(depth.getValue() >= recursionLevel); + return depth.getValue(); } - return Status::OK(); + return recursionLevel; } -Status storageValidChildren(const mb::ConstElement& elem, const bool deep) { - if (!elem.hasChildren()) - return Status::OK(); +StatusWith<std::uint32_t> storageValidChildren(const mb::ConstElement& elem, + const bool deep, + std::uint32_t recursionLevel) { + if (!elem.hasChildren()) { + return recursionLevel; + } + std::uint32_t greatestDepth = recursionLevel; mb::ConstElement curr = elem.leftChild(); while (curr.ok()) { - Status s = storageValid(curr, deep); - if (!s.isOK()) - return s; + auto depth = storageValid(curr, deep, recursionLevel + 1); + if (!depth.isOK()) { + return depth.getStatus(); + } + + // Find the maximum depth amongst all of the children of 'elem'. + greatestDepth = std::max(greatestDepth, depth.getValue()); curr = curr.rightSibling(); } - return Status::OK(); + return greatestDepth; } /** @@ -262,9 +327,12 @@ inline Status validate(const BSONObj& original, if (updatedFields.empty() || !opts.enforceOkForStorage) { if (opts.enforceOkForStorage) { // No specific fields were updated so the whole doc must be checked - Status s = storageValid(updated, true); - if (!s.isOK()) - return s; + const bool doRecursiveCheck = true; + const std::uint32_t recursionLevel = 1; + auto documentDepth = storageValid(updated, doRecursiveCheck, recursionLevel); + if (!documentDepth.isOK()) { + return documentDepth.getStatus(); + } } // Check all immutable fields @@ -295,14 +363,28 @@ inline Status validate(const BSONObj& original, // newElem might be missing if $unset/$renamed-away if (newElem.ok()) { // Check element, and its children - Status s = storageValid(newElem, true); - if (!s.isOK()) - return s; + const bool doRecursiveCheck = true; + const std::uint32_t recursionLevel = 0; + auto newElemDepth = storageValid(newElem, doRecursiveCheck, recursionLevel); + if (!newElemDepth.isOK()) { + return newElemDepth.getStatus(); + } // Check parents to make sure they are valid as well. - s = storageValidParents(newElem); - if (!s.isOK()) - return s; + auto parentsDepth = storageValidParents(newElem, recursionLevel); + if (!parentsDepth.isOK()) { + return parentsDepth.getStatus(); + } + + // Ensure that the combined depths of both the new element and its parents do not + // exceed the maximum BSON depth. + if (newElemDepth.getValue() + parentsDepth.getValue() > + BSONDepth::getMaxDepthForUserStorage()) { + return {ErrorCodes::Overflow, + str::stream() << "Update operation causes document to exceed maximum " + "nesting depth of " + << BSONDepth::getMaxDepthForUserStorage()}; + } } // Check if the updated field conflicts with immutable fields immutableFieldRef.findConflicts(¤t, &changedImmutableFields); diff --git a/src/mongo/db/nesting_depth_test.cpp b/src/mongo/db/nesting_depth_test.cpp new file mode 100644 index 00000000000..139dddfa35e --- /dev/null +++ b/src/mongo/db/nesting_depth_test.cpp @@ -0,0 +1,333 @@ +/** + * Copyright (C) 2017 MongoDB Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License, version 3, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the GNU Affero General Public License in all respects + * for all of the code used other than as permitted herein. If you modify + * file(s) with this exception, you may extend this exception to your + * version of the file(s), but you are not obligated to do so. If you do not + * wish to do so, delete this exception statement from your version. If you + * delete this exception statement from all source files in the program, + * then also delete it in the license file. + */ + +#include "mongo/platform/basic.h" + +#include <exception> + +#include "mongo/bson/bson_depth.h" +#include "mongo/bson/bson_validate.h" +#include "mongo/bson/json.h" +#include "mongo/client/connection_string.h" +#include "mongo/executor/network_interface_asio_integration_fixture.h" +#include "mongo/util/concurrency/thread_pool.h" + +namespace mongo { +namespace executor { +namespace { +class NestingDepthFixture : public NetworkInterfaceASIOIntegrationFixture { +public: + void setUp() final { + startNet(); + } +}; + +constexpr auto kCollectionName = "depthTest"; + +/** + * Appends an object to 'builder' that is nested 'depth' levels deep. + */ +void appendNestedObject(BSONObjBuilder* builder, size_t depth) { + if (depth == 1) { + builder->append("a", 1); + } else { + BSONObjBuilder subobj(builder->subobjStart("a")); + appendNestedObject(&subobj, depth - 1); + subobj.doneFast(); + } +} + +/** + * Appends a command to 'builder' that inserts a document nested 'depth' levels deep. Calling + * obj() on the builder returns the completed insert command as a BSONObj. + */ +void appendInsertCommandWithNestedDocument(BSONObjBuilder* builder, size_t depth) { + builder->append("insert", kCollectionName); + { + BSONArrayBuilder array(builder->subarrayStart("documents")); + { + BSONObjBuilder document(array.subobjStart()); + appendNestedObject(&document, depth); + document.doneFast(); + } + array.doneFast(); + } + builder->doneFast(); +} + +TEST_F(NestingDepthFixture, CanInsertLargeNestedDocumentAtOrUnderDepthLimit) { + BSONObjBuilder insertDocumentOneLessThanLimit; + appendInsertCommandWithNestedDocument(&insertDocumentOneLessThanLimit, + BSONDepth::getMaxDepthForUserStorage() - 1); + assertCommandOK(kCollectionName, insertDocumentOneLessThanLimit.obj()); + + // Insert a document exactly at the BSON nesting limit. + BSONObjBuilder insertCommandExactLimit; + appendInsertCommandWithNestedDocument(&insertCommandExactLimit, + BSONDepth::getMaxDepthForUserStorage()); + assertCommandOK(kCollectionName, insertCommandExactLimit.obj()); +} + +TEST_F(NestingDepthFixture, CannotInsertLargeNestedDocumentExceedingDepthLimit) { + BSONObjBuilder insertCmd; + appendInsertCommandWithNestedDocument(&insertCmd, BSONDepth::getMaxDepthForUserStorage() + 1); + assertWriteError(kCollectionName, insertCmd.obj(), ErrorCodes::Overflow); +} + +/** + * Appends an array to 'builder' that is nested 'depth' levels deep. + */ +void appendNestedArray(BSONArrayBuilder* builder, size_t depth) { + if (depth == 1) { + builder->append(1); + } else { + BSONArrayBuilder subarr(builder->subarrayStart()); + appendNestedArray(&subarr, depth - 1); + subarr.doneFast(); + } +} + +/** + * Appends a command to 'builder' that inserts a document with an array nested 'depth' levels deep. + * Calling obj() on the builder returns the completed insert command as a BSONObj. + */ +void appendInsertCommandWithNestedArray(BSONObjBuilder* builder, size_t depth) { + builder->append("insert", kCollectionName); + { + BSONArrayBuilder documentsBuilder(builder->subarrayStart("documents")); + { + BSONObjBuilder docBuilder(documentsBuilder.subobjStart()); + { + BSONArrayBuilder arrayBuilder(docBuilder.subarrayStart("a")); + appendNestedArray(&arrayBuilder, depth - 1); + arrayBuilder.doneFast(); + } + docBuilder.doneFast(); + } + documentsBuilder.doneFast(); + } + builder->doneFast(); +} + +TEST_F(NestingDepthFixture, CanInsertLargeNestedArrayAtOrUnderDepthLimit) { + BSONObjBuilder insertDocumentOneLessThanLimit; + appendInsertCommandWithNestedArray(&insertDocumentOneLessThanLimit, + BSONDepth::getMaxDepthForUserStorage() - 1); + assertCommandOK(kCollectionName, insertDocumentOneLessThanLimit.obj()); + + // Insert a document exactly at the BSON nesting limit. + BSONObjBuilder insertCommandExactLimit; + appendInsertCommandWithNestedArray(&insertCommandExactLimit, + BSONDepth::getMaxDepthForUserStorage()); + assertCommandOK(kCollectionName, insertCommandExactLimit.obj()); +} + +TEST_F(NestingDepthFixture, CannotInsertLargeNestedArrayExceedingDepthLimit) { + BSONObjBuilder insertCmd; + appendInsertCommandWithNestedArray(&insertCmd, BSONDepth::getMaxDepthForUserStorage() + 1); + assertWriteError(kCollectionName, insertCmd.obj(), ErrorCodes::Overflow); +} + +/** + * Creates a field name string that represents a document nested 'depth' levels deep. + */ +std::string getRepeatedFieldName(size_t depth) { + ASSERT_GT(depth, 0U); + + StringBuilder builder; + for (size_t i = 0U; i < depth - 1; i++) { + builder << "a."; + } + builder << "a"; + return builder.str(); +} + +/** + * Appends a command to 'builder' that updates a document with nesting depth 'originalNestingDepth' + * to be 'newNestingDepth' levels deep. For example, + * + * BSONObjBuilder b; + * appendUpdateCommandWithNestedDocuments(&b, 1, 2); + * + * appends an update to 'b' that updates { a: 1 } to be { a: { a: 1 } }. + */ +void appendUpdateCommandWithNestedDocuments(BSONObjBuilder* builder, + size_t originalNestingDepth, + size_t newNestingDepth) { + ASSERT_GT(newNestingDepth, originalNestingDepth); + + auto originalFieldName = getRepeatedFieldName(originalNestingDepth); + builder->append("update", kCollectionName); + { + BSONArrayBuilder updates(builder->subarrayStart("updates")); + { + BSONObjBuilder updateDoc(updates.subobjStart()); + { + BSONObjBuilder query(updateDoc.subobjStart("q")); + query.append(originalFieldName, 1); + query.doneFast(); + } + { + BSONObjBuilder update(updateDoc.subobjStart("u")); + BSONObjBuilder set(update.subobjStart("$set")); + BSONObjBuilder field(set.subobjStart(originalFieldName)); + appendNestedObject(&field, newNestingDepth - originalNestingDepth); + field.doneFast(); + set.doneFast(); + update.doneFast(); + } + updateDoc.doneFast(); + } + } + builder->doneFast(); +} + +TEST_F(NestingDepthFixture, CanUpdateDocumentIfItStaysWithinDepthLimit) { + BSONObjBuilder insertCmd; + appendInsertCommandWithNestedDocument(&insertCmd, 3); + assertCommandOK(kCollectionName, insertCmd.obj()); + + BSONObjBuilder updateCmd; + appendUpdateCommandWithNestedDocuments(&updateCmd, 3, 5); + assertCommandOK(kCollectionName, updateCmd.obj()); +} + +TEST_F(NestingDepthFixture, CanUpdateDocumentToBeExactlyAtDepthLimit) { + const auto largeButValidDepth = BSONDepth::getMaxDepthForUserStorage() - 2; + BSONObjBuilder insertCmd; + appendInsertCommandWithNestedDocument(&insertCmd, largeButValidDepth); + assertCommandOK(kCollectionName, insertCmd.obj()); + + BSONObjBuilder updateCmd; + appendUpdateCommandWithNestedDocuments( + &updateCmd, largeButValidDepth, BSONDepth::getMaxDepthForUserStorage()); + assertCommandOK(kCollectionName, updateCmd.obj()); +} + +TEST_F(NestingDepthFixture, CannotUpdateDocumentToExceedDepthLimit) { + const auto largeButValidDepth = BSONDepth::getMaxDepthForUserStorage() - 3; + BSONObjBuilder insertCmd; + appendInsertCommandWithNestedDocument(&insertCmd, largeButValidDepth); + assertCommandOK(kCollectionName, insertCmd.obj()); + + BSONObjBuilder updateCmd; + appendUpdateCommandWithNestedDocuments( + &updateCmd, largeButValidDepth, BSONDepth::getMaxDepthForUserStorage() + 1); + assertWriteError(kCollectionName, updateCmd.obj(), ErrorCodes::Overflow); +} + +/** + * Creates a field name string that represents an array nested 'depth' levels deep. + */ +std::string getRepeatedArrayPath(size_t depth) { + ASSERT_GT(depth, 0U); + + StringBuilder builder; + builder << "a"; + for (size_t i = 0U; i < depth - 1; i++) { + builder << ".0"; + } + return builder.str(); +} + +/** + * Appends a command to 'builder' that updates a document with an array nested + * 'originalNestingDepth' levels deep to be 'newNestingDepth' levels deep. For example, + * + * BSONObjBuilder b; + * appendUpdateCommandWithNestedDocuments(&b, 3, 4); + * + * appends an update to 'b' that updates { a: [[1]] } to be { a: [[[1]]] }. + */ +void appendUpdateCommandWithNestedArrays(BSONObjBuilder* builder, + size_t originalNestingDepth, + size_t newNestingDepth) { + ASSERT_GT(newNestingDepth, originalNestingDepth); + + auto originalFieldName = getRepeatedArrayPath(originalNestingDepth); + builder->append("update", kCollectionName); + { + BSONArrayBuilder updates(builder->subarrayStart("updates")); + { + BSONObjBuilder updateDoc(updates.subobjStart()); + { + BSONObjBuilder query(updateDoc.subobjStart("q")); + query.append(originalFieldName, 1); + query.doneFast(); + } + { + BSONObjBuilder update(updateDoc.subobjStart("u")); + BSONObjBuilder set(update.subobjStart("$set")); + BSONArrayBuilder field(set.subobjStart(originalFieldName)); + appendNestedArray(&field, newNestingDepth - originalNestingDepth); + field.doneFast(); + set.doneFast(); + update.doneFast(); + } + updateDoc.doneFast(); + } + } + builder->doneFast(); +} + +TEST_F(NestingDepthFixture, CanUpdateArrayIfItStaysWithinDepthLimit) { + BSONObjBuilder insertCmd; + appendInsertCommandWithNestedArray(&insertCmd, 3); + assertCommandOK(kCollectionName, insertCmd.obj()); + + BSONObjBuilder updateCmd; + appendUpdateCommandWithNestedArrays(&updateCmd, 3, 5); + assertCommandOK(kCollectionName, updateCmd.obj()); +} + +TEST_F(NestingDepthFixture, CanUpdateArrayToBeExactlyAtDepthLimit) { + const auto largeButValidDepth = BSONDepth::getMaxDepthForUserStorage() - 1; + BSONObjBuilder insertCmd; + appendInsertCommandWithNestedArray(&insertCmd, largeButValidDepth); + assertCommandOK(kCollectionName, insertCmd.obj()); + + BSONObjBuilder updateCmd; + appendUpdateCommandWithNestedArrays( + &updateCmd, largeButValidDepth, BSONDepth::getMaxDepthForUserStorage()); + assertCommandOK(kCollectionName, updateCmd.obj()); +} + +TEST_F(NestingDepthFixture, CannotUpdateArrayToExceedDepthLimit) { + const auto largeButValidDepth = BSONDepth::getMaxDepthForUserStorage() - 4; + BSONObjBuilder insertCmd; + appendInsertCommandWithNestedArray(&insertCmd, largeButValidDepth); + assertCommandOK(kCollectionName, insertCmd.obj()); + + BSONObjBuilder updateCmd; + appendUpdateCommandWithNestedArrays( + &updateCmd, largeButValidDepth, BSONDepth::getMaxDepthForUserStorage() + 1); + assertWriteError(kCollectionName, updateCmd.obj(), ErrorCodes::Overflow); +} +} // namespace +} // namespace executor +} // namespace mongo diff --git a/src/mongo/db/ops/insert.cpp b/src/mongo/db/ops/insert.cpp index 1928ceb3661..f856c227138 100644 --- a/src/mongo/db/ops/insert.cpp +++ b/src/mongo/db/ops/insert.cpp @@ -27,8 +27,13 @@ * exception statement from all source files in the program, then also delete * it in the license file. */ +#include "mongo/platform/basic.h" #include "mongo/db/ops/insert.h" + +#include <vector> + +#include "mongo/bson/bson_depth.h" #include "mongo/db/logical_clock.h" #include "mongo/db/logical_time.h" #include "mongo/db/views/durable_view_catalog.h" @@ -40,6 +45,38 @@ using std::string; using namespace mongoutils; +namespace { +/** + * Validates the nesting depth of 'obj', returning a non-OK status if it exceeds the limit. + */ +Status validateDepth(const BSONObj& obj) { + std::vector<BSONObjIterator> frames; + frames.reserve(16); + frames.emplace_back(obj); + + while (!frames.empty()) { + const auto elem = frames.back().next(); + if (elem.type() == BSONType::Object || elem.type() == BSONType::Array) { + if (MONGO_unlikely(frames.size() == BSONDepth::getMaxDepthForUserStorage())) { + // We're exactly at the limit, so descending to the next level would exceed + // the maximum depth. + return {ErrorCodes::Overflow, + str::stream() << "cannot insert document because it exceeds " + << BSONDepth::getMaxDepthForUserStorage() + << " levels of nesting"}; + } + frames.emplace_back(elem.embeddedObject()); + } + + if (!frames.back().more()) { + frames.pop_back(); + } + } + + return Status::OK(); +} +} // namespace + StatusWith<BSONObj> fixDocumentForInsert(ServiceContext* service, const BSONObj& doc) { if (doc.objsize() > BSONObjMaxUserSize) return StatusWith<BSONObj>(ErrorCodes::BadValue, @@ -49,6 +86,11 @@ StatusWith<BSONObj> fixDocumentForInsert(ServiceContext* service, const BSONObj& << ", max size: " << BSONObjMaxUserSize); + auto depthStatus = validateDepth(doc); + if (!depthStatus.isOK()) { + return depthStatus; + } + bool firstElementIsId = false; bool hasTimestampToFix = false; bool hadId = false; diff --git a/src/mongo/db/ops/insert.h b/src/mongo/db/ops/insert.h index 52233313571..187a3aa7e32 100644 --- a/src/mongo/db/ops/insert.h +++ b/src/mongo/db/ops/insert.h @@ -36,8 +36,12 @@ namespace mongo { class ServiceContext; /** - * if doc is ok, then return is BSONObj() - * otherwise, BSONObj is what should be inserted instead + * Validates that 'doc' is legal for insertion, possibly with some modifications. + * + * This function returns: + * - a non-OK status if 'doc' is not valid; + * - an empty BSONObj if 'doc' can be inserted as-is; or + * - a non-empty BSONObj representing what should be inserted instead of 'doc'. */ StatusWith<BSONObj> fixDocumentForInsert(ServiceContext* service, const BSONObj& doc); diff --git a/src/mongo/dbtests/SConscript b/src/mongo/dbtests/SConscript index 38dcf788f09..e99b08dab9c 100644 --- a/src/mongo/dbtests/SConscript +++ b/src/mongo/dbtests/SConscript @@ -67,6 +67,7 @@ dbtest = env.Program( 'index_access_method_test.cpp', 'indexcatalogtests.cpp', 'indexupdatetests.cpp', + 'insert_test.cpp', 'jsobjtests.cpp', 'jsontests.cpp', 'jstests.cpp', diff --git a/src/mongo/dbtests/insert_test.cpp b/src/mongo/dbtests/insert_test.cpp new file mode 100644 index 00000000000..6c234b4b1da --- /dev/null +++ b/src/mongo/dbtests/insert_test.cpp @@ -0,0 +1,100 @@ +/** + * Copyright (C) 2017 MongoDB Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License, version 3, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the GNU Affero General Public License in all respects + * for all of the code used other than as permitted herein. If you modify + * file(s) with this exception, you may extend this exception to your + * version of the file(s), but you are not obligated to do so. If you do not + * wish to do so, delete this exception statement from your version. If you + * delete this exception statement from all source files in the program, + * then also delete it in the license file. + */ + +#include "mongo/platform/basic.h" + +#include "mongo/bson/bson_depth.h" +#include "mongo/db/client.h" +#include "mongo/db/db_raii.h" +#include "mongo/db/ops/insert.h" +#include "mongo/unittest/unittest.h" + +namespace mongo { +namespace { +const auto kInsertTestNss = NamespaceString{"dbtests.InsertTest"}; + +class InsertTest : public unittest::Test { +public: + InsertTest() + : _opCtx(cc().makeOperationContext()), + _lock(_opCtx.get()), + _autoColl(_opCtx.get(), kInsertTestNss, MODE_IX) {} + + const OperationContext* getOperationContext() const { + return _opCtx.get(); + } + +private: + ServiceContext::UniqueOperationContext _opCtx; + Lock::GlobalWrite _lock; + AutoGetCollection _autoColl; +}; + +BSONObj makeNestedObject(size_t depth) { + ASSERT_GTE(depth, 1U); + + auto obj = BSON("a" << 1); + while (--depth) { + obj = BSON("a" << obj); + } + + return obj; +} + +BSONObj makeNestedArray(size_t depth) { + ASSERT_GTE(depth, 1U); + + auto obj = BSON_ARRAY(1); + while (--depth) { + obj = BSON_ARRAY(obj); + } + + return obj; +} + +TEST_F(InsertTest, FixDocumentForInsertAcceptsEmptyDocuments) { + ASSERT_OK(fixDocumentForInsert(getOperationContext()->getServiceContext(), BSONObj())); +} + +TEST_F(InsertTest, FixDocumentForInsertAcceptsDocumentsAtStorageDepthLimit) { + ASSERT_OK(fixDocumentForInsert(getOperationContext()->getServiceContext(), + makeNestedObject(BSONDepth::getMaxDepthForUserStorage()))); + ASSERT_OK(fixDocumentForInsert(getOperationContext()->getServiceContext(), + makeNestedArray(BSONDepth::getMaxDepthForUserStorage()))); +} + +TEST_F(InsertTest, FixDocumentForInsertFailsOnDeeplyNestedDocuments) { + ASSERT_EQ(fixDocumentForInsert(getOperationContext()->getServiceContext(), + makeNestedObject(BSONDepth::getMaxDepthForUserStorage() + 1)), + ErrorCodes::Overflow); + ASSERT_EQ(fixDocumentForInsert(getOperationContext()->getServiceContext(), + makeNestedArray(BSONDepth::getMaxDepthForUserStorage() + 1)), + ErrorCodes::Overflow); +} +} // namespace +} // namespace mongo diff --git a/src/mongo/executor/network_interface_asio_integration_fixture.cpp b/src/mongo/executor/network_interface_asio_integration_fixture.cpp index 388de25f543..76f0e451a71 100644 --- a/src/mongo/executor/network_interface_asio_integration_fixture.cpp +++ b/src/mongo/executor/network_interface_asio_integration_fixture.cpp @@ -119,6 +119,7 @@ void NetworkInterfaceASIOIntegrationFixture::assertCommandOK(StringData db, auto res = runCommandSync(request); ASSERT_OK(res.status); ASSERT_OK(getStatusFromCommandResult(res.data)); + ASSERT(!res.data["writeErrors"]); } void NetworkInterfaceASIOIntegrationFixture::assertCommandFailsOnClient( @@ -138,5 +139,21 @@ void NetworkInterfaceASIOIntegrationFixture::assertCommandFailsOnServer( auto serverStatus = getStatusFromCommandResult(res.data); ASSERT_EQ(reason, serverStatus); } + +void NetworkInterfaceASIOIntegrationFixture::assertWriteError(StringData db, + const BSONObj& cmd, + ErrorCodes::Error reason, + Milliseconds timeoutMillis) { + RemoteCommandRequest request{ + fixture().getServers()[0], db.toString(), cmd, BSONObj(), nullptr, timeoutMillis}; + auto res = runCommandSync(request); + ASSERT_OK(res.status); + ASSERT_OK(getStatusFromCommandResult(res.data)); + ASSERT(res.data["writeErrors"]); + auto firstWriteError = res.data["writeErrors"].embeddedObject().firstElement().embeddedObject(); + Status writeErrorStatus(ErrorCodes::fromInt(firstWriteError.getIntField("code")), + firstWriteError.getStringField("errmsg")); + ASSERT_EQ(reason, writeErrorStatus); } -} +} // namespace executor +} // namespace mongo diff --git a/src/mongo/executor/network_interface_asio_integration_fixture.h b/src/mongo/executor/network_interface_asio_integration_fixture.h index 3e62932b153..f6207e7a12c 100644 --- a/src/mongo/executor/network_interface_asio_integration_fixture.h +++ b/src/mongo/executor/network_interface_asio_integration_fixture.h @@ -25,10 +25,8 @@ * exception statement from all source files in the program, then also delete * it in the license file. */ - #pragma once - #include "mongo/unittest/unittest.h" #include "mongo/executor/network_interface_asio.h" @@ -78,10 +76,14 @@ public: ErrorCodes::Error reason, Milliseconds timeoutMillis = Minutes(5)); + void assertWriteError(StringData db, + const BSONObj& cmd, + ErrorCodes::Error reason, + Milliseconds timeoutMillis = Minutes(5)); + private: std::unique_ptr<NetworkInterfaceASIO> _net; PseudoRandom* _rng = nullptr; }; - } // namespace executor } // namespace mongo |