summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_mock.cpp3
-rw-r--r--src/mongo/db/auth/role_graph_update.cpp5
-rw-r--r--src/mongo/db/commands/write_commands/write_commands.cpp1
-rw-r--r--src/mongo/db/exec/update.cpp8
-rw-r--r--src/mongo/db/nesting_depth_test.cpp143
-rw-r--r--src/mongo/db/ops/parsed_update.cpp15
-rw-r--r--src/mongo/db/ops/update.cpp3
-rw-r--r--src/mongo/db/ops/update.h4
-rw-r--r--src/mongo/db/repl/storage_interface_impl_test.cpp11
-rw-r--r--src/mongo/db/update/arithmetic_node.cpp8
-rw-r--r--src/mongo/db/update/update_driver.cpp284
-rw-r--r--src/mongo/db/update/update_driver.h22
-rw-r--r--src/mongo/db/update/update_driver_test.cpp72
-rw-r--r--src/mongo/dbtests/query_stage_update.cpp20
14 files changed, 450 insertions, 149 deletions
diff --git a/src/mongo/db/auth/authz_manager_external_state_mock.cpp b/src/mongo/db/auth/authz_manager_external_state_mock.cpp
index 933f0aff752..8bdd5e71191 100644
--- a/src/mongo/db/auth/authz_manager_external_state_mock.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_mock.cpp
@@ -179,7 +179,8 @@ Status AuthzManagerExternalStateMock::updateOne(OperationContext* opCtx,
namespace mmb = mutablebson;
UpdateDriver::Options updateOptions;
UpdateDriver driver(updateOptions);
- Status status = driver.parse(updatePattern);
+ std::map<StringData, std::unique_ptr<ArrayFilter>> arrayFilters;
+ Status status = driver.parse(updatePattern, arrayFilters);
if (!status.isOK())
return status;
diff --git a/src/mongo/db/auth/role_graph_update.cpp b/src/mongo/db/auth/role_graph_update.cpp
index d1b45e3b98d..d8148f56132 100644
--- a/src/mongo/db/auth/role_graph_update.cpp
+++ b/src/mongo/db/auth/role_graph_update.cpp
@@ -175,7 +175,10 @@ Status handleOplogUpdate(OperationContext* opCtx,
UpdateDriver::Options updateOptions;
UpdateDriver driver(updateOptions);
- status = driver.parse(updatePattern);
+
+ // Oplog updates do not have array filters.
+ std::map<StringData, std::unique_ptr<ArrayFilter>> arrayFilters;
+ status = driver.parse(updatePattern, arrayFilters);
if (!status.isOK())
return status;
diff --git a/src/mongo/db/commands/write_commands/write_commands.cpp b/src/mongo/db/commands/write_commands/write_commands.cpp
index 7ddf04a7aff..b9e92e13947 100644
--- a/src/mongo/db/commands/write_commands/write_commands.cpp
+++ b/src/mongo/db/commands/write_commands/write_commands.cpp
@@ -313,6 +313,7 @@ public:
updateRequest.setQuery(batch.updates[0].query);
updateRequest.setCollation(batch.updates[0].collation);
updateRequest.setUpdates(batch.updates[0].update);
+ updateRequest.setArrayFilters(batch.updates[0].arrayFilters);
updateRequest.setMulti(batch.updates[0].multi);
updateRequest.setUpsert(batch.updates[0].upsert);
updateRequest.setYieldPolicy(PlanExecutor::YIELD_AUTO);
diff --git a/src/mongo/db/exec/update.cpp b/src/mongo/db/exec/update.cpp
index 90cd46ab423..87145233e3d 100644
--- a/src/mongo/db/exec/update.cpp
+++ b/src/mongo/db/exec/update.cpp
@@ -89,7 +89,7 @@ StatusWith<std::uint32_t> storageValidChildren(const mb::ConstElement&,
StatusWith<std::uint32_t> storageValid(const mb::Document& doc,
bool deep,
std::uint32_t recursionLevel) {
- if (recursionLevel >= BSONDepth::getMaxDepthForUserStorage()) {
+ if (recursionLevel > BSONDepth::getMaxDepthForUserStorage()) {
return Status(ErrorCodes::Overflow,
str::stream() << "Document exceeds maximum nesting depth of "
<< BSONDepth::getMaxDepthForUserStorage());
@@ -203,7 +203,7 @@ Status validateDollarPrefixElement(const mb::ConstElement elem, const bool deep)
*/
StatusWith<std::uint32_t> storageValidParents(const mb::ConstElement& elem,
std::uint32_t recursionLevel) {
- if (recursionLevel >= BSONDepth::getMaxDepthForUserStorage()) {
+ if (recursionLevel > BSONDepth::getMaxDepthForUserStorage()) {
return Status(ErrorCodes::Overflow,
str::stream() << "Document exceeds maximum nesting depth of "
<< BSONDepth::getMaxDepthForUserStorage());
@@ -232,7 +232,7 @@ StatusWith<std::uint32_t> storageValid(const mb::ConstElement& elem,
if (!elem.ok())
return Status(ErrorCodes::BadValue, "Invalid elements cannot be stored.");
- if (recursionLevel >= BSONDepth::getMaxDepthForUserStorage()) {
+ if (recursionLevel > BSONDepth::getMaxDepthForUserStorage()) {
return Status(ErrorCodes::Overflow,
str::stream() << "Document exceeds maximum nesting depth of "
<< BSONDepth::getMaxDepthForUserStorage());
@@ -322,7 +322,7 @@ inline Status validate(const BSONObj& original,
if (opts.enforceOkForStorage) {
// No specific fields were updated so the whole doc must be checked
const bool doRecursiveCheck = true;
- const std::uint32_t recursionLevel = 1;
+ const std::uint32_t recursionLevel = 0;
auto documentDepth = storageValid(updated, doRecursiveCheck, recursionLevel);
if (!documentDepth.isOK()) {
return documentDepth.getStatus();
diff --git a/src/mongo/db/nesting_depth_test.cpp b/src/mongo/db/nesting_depth_test.cpp
index 139dddfa35e..fd14418024d 100644
--- a/src/mongo/db/nesting_depth_test.cpp
+++ b/src/mongo/db/nesting_depth_test.cpp
@@ -207,6 +207,42 @@ void appendUpdateCommandWithNestedDocuments(BSONObjBuilder* builder,
builder->doneFast();
}
+/**
+ * Appends a command to 'builder' that replaces a document with nesting depth 'originalNestingDepth'
+ * with a document 'newNestingDepth' levels deep. For example,
+ *
+ * BSONObjBuilder b;
+ * appendUpdateCommandWithNestedDocuments(&b, 1, 2);
+ *
+ * appends an update to 'b' that replaces { a: 1 } with { a: { a: 1 } }.
+ */
+void appendUpdateReplaceCommandWithNestedDocuments(BSONObjBuilder* builder,
+ size_t originalNestingDepth,
+ size_t newNestingDepth) {
+ ASSERT_GT(newNestingDepth, originalNestingDepth);
+
+ auto originalFieldName = getRepeatedFieldName(originalNestingDepth);
+ builder->append("update", kCollectionName);
+ {
+ BSONArrayBuilder updates(builder->subarrayStart("updates"));
+ {
+ BSONObjBuilder updateDoc(updates.subobjStart());
+ {
+ BSONObjBuilder query(updateDoc.subobjStart("q"));
+ query.append(originalFieldName, 1);
+ query.doneFast();
+ }
+ {
+ BSONObjBuilder update(updateDoc.subobjStart("u"));
+ appendNestedObject(&update, newNestingDepth);
+ update.doneFast();
+ }
+ updateDoc.doneFast();
+ }
+ }
+ builder->doneFast();
+}
+
TEST_F(NestingDepthFixture, CanUpdateDocumentIfItStaysWithinDepthLimit) {
BSONObjBuilder insertCmd;
appendInsertCommandWithNestedDocument(&insertCmd, 3);
@@ -241,6 +277,40 @@ TEST_F(NestingDepthFixture, CannotUpdateDocumentToExceedDepthLimit) {
assertWriteError(kCollectionName, updateCmd.obj(), ErrorCodes::Overflow);
}
+TEST_F(NestingDepthFixture, CanReplaceDocumentIfItStaysWithinDepthLimit) {
+ BSONObjBuilder insertCmd;
+ appendInsertCommandWithNestedDocument(&insertCmd, 3);
+ assertCommandOK(kCollectionName, insertCmd.obj());
+
+ BSONObjBuilder updateCmd;
+ appendUpdateReplaceCommandWithNestedDocuments(&updateCmd, 3, 5);
+ assertCommandOK(kCollectionName, updateCmd.obj());
+}
+
+TEST_F(NestingDepthFixture, CanReplaceDocumentToBeExactlyAtDepthLimit) {
+ const auto largeButValidDepth = BSONDepth::getMaxDepthForUserStorage() - 2;
+ BSONObjBuilder insertCmd;
+ appendInsertCommandWithNestedDocument(&insertCmd, largeButValidDepth);
+ assertCommandOK(kCollectionName, insertCmd.obj());
+
+ BSONObjBuilder updateCmd;
+ appendUpdateReplaceCommandWithNestedDocuments(
+ &updateCmd, largeButValidDepth, BSONDepth::getMaxDepthForUserStorage());
+ assertCommandOK(kCollectionName, updateCmd.obj());
+}
+
+TEST_F(NestingDepthFixture, CannotReplaceDocumentToExceedDepthLimit) {
+ const auto largeButValidDepth = BSONDepth::getMaxDepthForUserStorage() - 3;
+ BSONObjBuilder insertCmd;
+ appendInsertCommandWithNestedDocument(&insertCmd, largeButValidDepth);
+ assertCommandOK(kCollectionName, insertCmd.obj());
+
+ BSONObjBuilder updateCmd;
+ appendUpdateReplaceCommandWithNestedDocuments(
+ &updateCmd, largeButValidDepth, BSONDepth::getMaxDepthForUserStorage() + 1);
+ assertWriteError(kCollectionName, updateCmd.obj(), ErrorCodes::Overflow);
+}
+
/**
* Creates a field name string that represents an array nested 'depth' levels deep.
*/
@@ -295,6 +365,45 @@ void appendUpdateCommandWithNestedArrays(BSONObjBuilder* builder,
builder->doneFast();
}
+/**
+ * Appends a command to 'builder' that replaces a document with an array nested
+ * 'originalNestingDepth' levels deep with a document with an array nested 'newNestingDepth' levels
+ * deep. For example,
+ *
+ * BSONObjBuilder b;
+ * appendUpdateCommandWithNestedDocuments(&b, 3, 4);
+ *
+ * appends an update to 'b' that replaces { a: [[1]] } with { a: [[[1]]] }.
+ */
+void appendUpdateReplaceCommandWithNestedArrays(BSONObjBuilder* builder,
+ size_t originalNestingDepth,
+ size_t newNestingDepth) {
+ ASSERT_GT(newNestingDepth, originalNestingDepth);
+
+ auto originalFieldName = getRepeatedArrayPath(originalNestingDepth);
+ builder->append("update", kCollectionName);
+ {
+ BSONArrayBuilder updates(builder->subarrayStart("updates"));
+ {
+ BSONObjBuilder updateDoc(updates.subobjStart());
+ {
+ BSONObjBuilder query(updateDoc.subobjStart("q"));
+ query.append(originalFieldName, 1);
+ query.doneFast();
+ }
+ {
+ BSONObjBuilder update(updateDoc.subobjStart("u"));
+ BSONArrayBuilder field(update.subobjStart(originalFieldName));
+ appendNestedArray(&field, newNestingDepth - 1);
+ field.doneFast();
+ update.doneFast();
+ }
+ updateDoc.doneFast();
+ }
+ }
+ builder->doneFast();
+}
+
TEST_F(NestingDepthFixture, CanUpdateArrayIfItStaysWithinDepthLimit) {
BSONObjBuilder insertCmd;
appendInsertCommandWithNestedArray(&insertCmd, 3);
@@ -328,6 +437,40 @@ TEST_F(NestingDepthFixture, CannotUpdateArrayToExceedDepthLimit) {
&updateCmd, largeButValidDepth, BSONDepth::getMaxDepthForUserStorage() + 1);
assertWriteError(kCollectionName, updateCmd.obj(), ErrorCodes::Overflow);
}
+
+TEST_F(NestingDepthFixture, CanReplaceArrayIfItStaysWithinDepthLimit) {
+ BSONObjBuilder insertCmd;
+ appendInsertCommandWithNestedArray(&insertCmd, 3);
+ assertCommandOK(kCollectionName, insertCmd.obj());
+
+ BSONObjBuilder updateCmd;
+ appendUpdateReplaceCommandWithNestedArrays(&updateCmd, 3, 5);
+ assertCommandOK(kCollectionName, updateCmd.obj());
+}
+
+TEST_F(NestingDepthFixture, CanReplaceArrayToBeExactlyAtDepthLimit) {
+ const auto largeButValidDepth = BSONDepth::getMaxDepthForUserStorage() - 1;
+ BSONObjBuilder insertCmd;
+ appendInsertCommandWithNestedArray(&insertCmd, largeButValidDepth);
+ assertCommandOK(kCollectionName, insertCmd.obj());
+
+ BSONObjBuilder updateCmd;
+ appendUpdateReplaceCommandWithNestedArrays(
+ &updateCmd, largeButValidDepth, BSONDepth::getMaxDepthForUserStorage());
+ assertCommandOK(kCollectionName, updateCmd.obj());
+}
+
+TEST_F(NestingDepthFixture, CannotReplaceArrayToExceedDepthLimit) {
+ const auto largeButValidDepth = BSONDepth::getMaxDepthForUserStorage() - 4;
+ BSONObjBuilder insertCmd;
+ appendInsertCommandWithNestedArray(&insertCmd, largeButValidDepth);
+ assertCommandOK(kCollectionName, insertCmd.obj());
+
+ BSONObjBuilder updateCmd;
+ appendUpdateReplaceCommandWithNestedArrays(
+ &updateCmd, largeButValidDepth, BSONDepth::getMaxDepthForUserStorage() + 1);
+ assertWriteError(kCollectionName, updateCmd.obj(), ErrorCodes::Overflow);
+}
} // namespace
} // namespace executor
} // namespace mongo
diff --git a/src/mongo/db/ops/parsed_update.cpp b/src/mongo/db/ops/parsed_update.cpp
index 2d3de40342a..b5979baed00 100644
--- a/src/mongo/db/ops/parsed_update.cpp
+++ b/src/mongo/db/ops/parsed_update.cpp
@@ -35,6 +35,7 @@
#include "mongo/db/query/canonical_query.h"
#include "mongo/db/query/collation/collator_factory_interface.h"
#include "mongo/db/query/query_planner_common.h"
+#include "mongo/db/server_options.h"
namespace mongo {
@@ -132,12 +133,20 @@ Status ParsedUpdate::parseUpdate() {
_driver.setModOptions(ModifierInterface::Options(
!_opCtx->writesAreReplicated(), shouldValidate, _collator.get()));
- return _driver.parse(_request->getUpdates(), _request->isMulti());
+ return _driver.parse(_request->getUpdates(), _arrayFilters, _request->isMulti());
}
Status ParsedUpdate::parseArrayFilters() {
const ExtensionsCallbackReal extensionsCallback(_opCtx, &_request->getNamespaceString());
+ if (!_request->getArrayFilters().empty() &&
+ serverGlobalParams.featureCompatibility.version.load() ==
+ ServerGlobalParams::FeatureCompatibility::Version::k34) {
+ return Status(ErrorCodes::InvalidOptions,
+ "The featureCompatibilityVersion must be 3.6 to use arrayFilters. See "
+ "http://dochub.mongodb.org/core/3.6-feature-compatibility.");
+ }
+
for (auto rawArrayFilter : _request->getArrayFilters()) {
auto arrayFilterStatus =
ArrayFilter::parse(rawArrayFilter, extensionsCallback, _collator.get());
@@ -195,6 +204,10 @@ void ParsedUpdate::setCollator(std::unique_ptr<CollatorInterface> collator) {
_collator = std::move(collator);
_driver.setCollator(_collator.get());
+
+ for (auto&& arrayFilter : _arrayFilters) {
+ arrayFilter.second->getFilter()->setCollator(_collator.get());
+ }
}
} // namespace mongo
diff --git a/src/mongo/db/ops/update.cpp b/src/mongo/db/ops/update.cpp
index 0e5a11e2a0f..2aa57637773 100644
--- a/src/mongo/db/ops/update.cpp
+++ b/src/mongo/db/ops/update.cpp
@@ -129,7 +129,8 @@ UpdateResult update(OperationContext* opCtx, Database* db, const UpdateRequest&
BSONObj applyUpdateOperators(const BSONObj& from, const BSONObj& operators) {
UpdateDriver::Options opts;
UpdateDriver driver(opts);
- Status status = driver.parse(operators);
+ std::map<StringData, std::unique_ptr<ArrayFilter>> arrayFilters;
+ Status status = driver.parse(operators, arrayFilters);
if (!status.isOK()) {
uasserted(16838, status.reason());
}
diff --git a/src/mongo/db/ops/update.h b/src/mongo/db/ops/update.h
index 2c5e0fc0f97..5b7f0a4e324 100644
--- a/src/mongo/db/ops/update.h
+++ b/src/mongo/db/ops/update.h
@@ -50,8 +50,8 @@ class UpdateDriver;
UpdateResult update(OperationContext* opCtx, Database* db, const UpdateRequest& request);
/**
- * takes the from document and returns a new document
- * after apply all the operators
+ * Takes the 'from' document and returns a new document after applying 'operators'. arrayFilters are
+ * not supported.
* e.g.
* applyUpdateOperators( BSON( "x" << 1 ) , BSON( "$inc" << BSON( "x" << 1 ) ) );
* returns: { x : 2 }
diff --git a/src/mongo/db/repl/storage_interface_impl_test.cpp b/src/mongo/db/repl/storage_interface_impl_test.cpp
index 57eb9d4d262..9bae858342f 100644
--- a/src/mongo/db/repl/storage_interface_impl_test.cpp
+++ b/src/mongo/db/repl/storage_interface_impl_test.cpp
@@ -1890,10 +1890,13 @@ TEST_F(StorageInterfaceImplTest,
auto nss = makeNamespace(_agent);
ASSERT_OK(storage.createCollection(opCtx, nss, CollectionOptions()));
- auto status = storage.upsertById(
- opCtx, nss, BSON("" << 1).firstElement(), BSON("$unknownUpdateOp" << BSON("x" << 1000)));
- ASSERT_EQUALS(ErrorCodes::FailedToParse, status);
- ASSERT_STRING_CONTAINS(status.reason(), "Unknown modifier: $unknownUpdateOp");
+ ASSERT_THROWS_CODE_AND_WHAT(storage.upsertById(opCtx,
+ nss,
+ BSON("" << 1).firstElement(),
+ BSON("$unknownUpdateOp" << BSON("x" << 1000))),
+ UserException,
+ ErrorCodes::FailedToParse,
+ "Unknown modifier: $unknownUpdateOp");
}
TEST_F(StorageInterfaceImplTest, DeleteByFilterReturnsNamespaceNotFoundWhenDatabaseDoesNotExist) {
diff --git a/src/mongo/db/update/arithmetic_node.cpp b/src/mongo/db/update/arithmetic_node.cpp
index e040ed700d8..69c25a87da8 100644
--- a/src/mongo/db/update/arithmetic_node.cpp
+++ b/src/mongo/db/update/arithmetic_node.cpp
@@ -103,7 +103,9 @@ void ArithmeticNode::updateExistingElement(mutablebson::Element* element, bool*
if (element->getValue().ok() && valueToSet.isIdentical(originalValue)) {
*noop = true;
} else {
- invariantOK(element->setValueSafeNum(valueToSet));
+
+ // This can fail if 'valueToSet' is not representable as a 64-bit integer.
+ uassertStatusOK(element->setValueSafeNum(valueToSet));
}
}
@@ -119,7 +121,9 @@ void ArithmeticNode::setValueForNewElement(mutablebson::Element* element) const
valueToSet *= SafeNum(static_cast<int32_t>(0));
break;
}
- invariantOK(element->setValueSafeNum(valueToSet));
+
+ // This can fail if 'valueToSet' is not representable as a 64-bit integer.
+ uassertStatusOK(element->setValueSafeNum(valueToSet));
}
} // namespace mongo
diff --git a/src/mongo/db/update/update_driver.cpp b/src/mongo/db/update/update_driver.cpp
index d13118e4ed4..8cd0a05511b 100644
--- a/src/mongo/db/update/update_driver.cpp
+++ b/src/mongo/db/update/update_driver.cpp
@@ -37,6 +37,7 @@
#include "mongo/db/matcher/expression_leaf.h"
#include "mongo/db/matcher/extensions_callback_noop.h"
#include "mongo/db/ops/modifier_object_replace.h"
+#include "mongo/db/server_options.h"
#include "mongo/db/update/log_builder.h"
#include "mongo/db/update/modifier_table.h"
#include "mongo/db/update/path_support.h"
@@ -53,6 +54,82 @@ using std::vector;
using pathsupport::EqualityMatches;
+namespace {
+
+modifiertable::ModifierType validateMod(BSONElement mod) {
+ auto modType = modifiertable::getType(mod.fieldName());
+
+ uassert(ErrorCodes::FailedToParse,
+ str::stream() << "Unknown modifier: " << mod.fieldName(),
+ modType != modifiertable::MOD_UNKNOWN);
+
+ uassert(ErrorCodes::FailedToParse,
+ str::stream() << "Modifiers operate on fields but we found type "
+ << typeName(mod.type())
+ << " instead. For example: {$mod: {<field>: ...}}"
+ << " not {"
+ << mod
+ << "}",
+ mod.type() == BSONType::Object);
+
+ uassert(ErrorCodes::FailedToParse,
+ str::stream() << "'" << mod.fieldName()
+ << "' is empty. You must specify a field like so: "
+ "{"
+ << mod.fieldName()
+ << ": {<field>: ...}}",
+ !mod.embeddedObject().isEmpty());
+
+ return modType;
+}
+
+// Parses 'updateExpr' and merges it into 'root'. Returns whether 'updateExpr' is positional.
+StatusWith<bool> parseUpdateExpression(
+ BSONObj updateExpr,
+ UpdateObjectNode* root,
+ const CollatorInterface* collator,
+ const std::map<StringData, std::unique_ptr<ArrayFilter>>& arrayFilters) {
+ bool positional = false;
+ std::set<std::string> foundIdentifiers;
+ for (auto&& mod : updateExpr) {
+ auto modType = validateMod(mod);
+ for (auto&& field : mod.Obj()) {
+ auto statusWithPositional = UpdateObjectNode::parseAndMerge(
+ root, modType, field, collator, arrayFilters, foundIdentifiers);
+ if (!statusWithPositional.isOK()) {
+
+ // Check whether we failed to parse because this mod type is not yet supported by
+ // UpdateNode.
+ // TODO SERVER-28777: Skip this check.
+ auto leaf = modifiertable::makeUpdateLeafNode(modType);
+ if (!leaf) {
+ uassert(ErrorCodes::InvalidOptions,
+ str::stream() << "Cannot use array filters with modifier "
+ << mod.fieldName(),
+ arrayFilters.empty());
+ return statusWithPositional;
+ }
+
+ uassertStatusOK(statusWithPositional);
+ MONGO_UNREACHABLE;
+ }
+ positional = positional || statusWithPositional.getValue();
+ }
+ }
+
+ for (const auto& arrayFilter : arrayFilters) {
+ uassert(ErrorCodes::FailedToParse,
+ str::stream() << "The array filter for identifier '" << arrayFilter.first
+ << "' was not used in the update "
+ << updateExpr,
+ foundIdentifiers.find(arrayFilter.first.toString()) != foundIdentifiers.end());
+ }
+
+ return positional;
+}
+
+} // namespace
+
UpdateDriver::UpdateDriver(const Options& opts)
: _replacementMode(false),
_indexedFields(NULL),
@@ -65,7 +142,9 @@ UpdateDriver::~UpdateDriver() {
clear();
}
-Status UpdateDriver::parse(const BSONObj& updateExpr, const bool multi) {
+Status UpdateDriver::parse(const BSONObj& updateExpr,
+ const std::map<StringData, std::unique_ptr<ArrayFilter>>& arrayFilters,
+ const bool multi) {
clear();
// Check if the update expression is a full object replacement.
@@ -93,55 +172,41 @@ Status UpdateDriver::parse(const BSONObj& updateExpr, const bool multi) {
return Status::OK();
}
- // The update expression is made of mod operators, that is
- // { <$mod>: {...}, <$mod>: {...}, ... }
- BSONObjIterator outerIter(updateExpr);
- while (outerIter.more()) {
- BSONElement outerModElem = outerIter.next();
-
- // Check whether this is a valid mod type.
- modifiertable::ModifierType modType = modifiertable::getType(outerModElem.fieldName());
- if (modType == modifiertable::MOD_UNKNOWN) {
- return Status(ErrorCodes::FailedToParse,
- str::stream() << "Unknown modifier: " << outerModElem.fieldName());
- }
-
- // Check whether there is indeed a list of mods under this modifier.
- if (outerModElem.type() != Object) {
- return Status(ErrorCodes::FailedToParse,
- str::stream() << "Modifiers operate on fields but we found type "
- << typeName(outerModElem.type())
- << " instead. For example: {$mod: {<field>: ...}}"
- << " not {"
- << outerModElem.toString()
- << "}");
- }
+ // Register the fact that this driver is not doing a full object replacement.
+ _replacementMode = false;
- // Check whether there are indeed mods under this modifier.
- if (outerModElem.embeddedObject().isEmpty()) {
- return Status(ErrorCodes::FailedToParse,
- str::stream() << "'" << outerModElem.fieldName()
- << "' is empty. You must specify a field like so: "
- "{"
- << outerModElem.fieldName()
- << ": {<field>: ...}}");
+ // If the featureCompatibilityVersion is 3.6, parse using UpdateNode.
+ // TODO SERVER-28777: Remove the restriction that this is only done if the update is not from
+ // replication.
+ if (serverGlobalParams.featureCompatibility.version.load() ==
+ ServerGlobalParams::FeatureCompatibility::Version::k36 &&
+ !_modOptions.fromReplication) {
+ _root = stdx::make_unique<UpdateObjectNode>();
+ auto statusWithPositional =
+ parseUpdateExpression(updateExpr, _root.get(), _modOptions.collator, arrayFilters);
+ if (statusWithPositional.isOK()) {
+ _positional = statusWithPositional.getValue();
+ return Status::OK();
}
+ _root.reset();
+ }
- BSONObjIterator innerIter(outerModElem.embeddedObject());
- while (innerIter.more()) {
- BSONElement innerModElem = innerIter.next();
-
- Status status = addAndParse(modType, innerModElem);
+ // TODO SERVER-28777: This can be an else case, since we will not fall back to the old parsing
+ // if we fail to parse using the new implementation.
+ uassert(ErrorCodes::InvalidOptions,
+ "The featureCompatibilityVersion must be 3.6 to use arrayFilters. See "
+ "http://dochub.mongodb.org/core/3.6-feature-compatibility.",
+ arrayFilters.empty());
+ for (auto&& mod : updateExpr) {
+ auto modType = validateMod(mod);
+ for (auto&& field : mod.Obj()) {
+ auto status = addAndParse(modType, field);
if (!status.isOK()) {
return status;
}
}
}
- // Register the fact that there will be only $mod's in this driver -- no object
- // replacement.
- _replacementMode = false;
-
return Status::OK();
}
@@ -248,74 +313,103 @@ Status UpdateDriver::update(StringData matchedField,
_logDoc.reset();
LogBuilder logBuilder(_logDoc.root());
- // Ask each of the mods to type check whether they can operate over the current document
- // and, if so, to change that document accordingly.
- for (vector<ModifierInterface*>::iterator it = _mods.begin(); it != _mods.end(); ++it) {
- ModifierInterface::ExecInfo execInfo;
- Status status = (*it)->prepare(doc->root(), matchedField, &execInfo);
- if (!status.isOK()) {
- return status;
+ if (_root) {
+
+ // We parsed using the new UpdateNode implementation.
+ FieldRef pathToCreate;
+ FieldRef pathTaken;
+ bool indexesAffected = false;
+ bool noop = false;
+ _root->apply(doc->root(),
+ &pathToCreate,
+ &pathTaken,
+ matchedField,
+ _modOptions.fromReplication,
+ _indexedFields,
+ (_logOp && logOpRec) ? &logBuilder : nullptr,
+ &indexesAffected,
+ &noop);
+ if (indexesAffected) {
+ _affectIndices = true;
+ doc->disableInPlaceUpdates();
}
-
- // If a mod wants to be applied only if this is an upsert (or only if this is a
- // strict update), we should respect that. If a mod doesn't care, it would state
- // it is fine with ANY update context.
- const bool validContext = (execInfo.context == ModifierInterface::ExecInfo::ANY_CONTEXT ||
- execInfo.context == _context);
-
- // Nothing to do if not in a valid context.
- if (!validContext) {
- continue;
+ if (docWasModified) {
+ *docWasModified = !noop;
}
+ } else {
- // Gather which fields this mod is interested on and whether these fields were
- // "taken" by previous mods. Note that not all mods are multi-field mods. When we
- // see an empty field, we may stop looking for others.
- for (int i = 0; i < ModifierInterface::ExecInfo::MAX_NUM_FIELDS; i++) {
- if (execInfo.fieldRef[i] == 0) {
- break;
+ // We parsed using the old ModifierInterface implementation.
+ // Ask each of the mods to type check whether they can operate over the current document
+ // and, if so, to change that document accordingly.
+ for (vector<ModifierInterface*>::iterator it = _mods.begin(); it != _mods.end(); ++it) {
+ ModifierInterface::ExecInfo execInfo;
+ Status status = (*it)->prepare(doc->root(), matchedField, &execInfo);
+ if (!status.isOK()) {
+ return status;
}
- // Record each field being updated but check for conflicts first
- const FieldRef* other;
- if (!targetFields->insert(execInfo.fieldRef[i], &other)) {
- return Status(ErrorCodes::ConflictingUpdateOperators,
- str::stream() << "Cannot update '" << other->dottedField()
- << "' and '"
- << execInfo.fieldRef[i]->dottedField()
- << "' at the same time");
+ // If a mod wants to be applied only if this is an upsert (or only if this is a
+ // strict update), we should respect that. If a mod doesn't care, it would state
+ // it is fine with ANY update context.
+ const bool validContext =
+ (execInfo.context == ModifierInterface::ExecInfo::ANY_CONTEXT ||
+ execInfo.context == _context);
+
+ // Nothing to do if not in a valid context.
+ if (!validContext) {
+ continue;
}
- // We start with the expectation that a mod will be in-place. But if the mod
- // touched an indexed field and the mod will indeed be executed -- that is, it
- // is not a no-op and it is in a valid context -- then we switch back to a
- // non-in-place mode.
- //
- // TODO: make mightBeIndexed and fieldRef like each other.
- if (!_affectIndices && !execInfo.noOp && _indexedFields &&
- _indexedFields->mightBeIndexed(execInfo.fieldRef[i]->dottedField())) {
- _affectIndices = true;
- doc->disableInPlaceUpdates();
+
+ // Gather which fields this mod is interested on and whether these fields were
+ // "taken" by previous mods. Note that not all mods are multi-field mods. When we
+ // see an empty field, we may stop looking for others.
+ for (int i = 0; i < ModifierInterface::ExecInfo::MAX_NUM_FIELDS; i++) {
+ if (execInfo.fieldRef[i] == 0) {
+ break;
+ }
+
+ // Record each field being updated but check for conflicts first
+ const FieldRef* other;
+ if (!targetFields->insert(execInfo.fieldRef[i], &other)) {
+ return Status(ErrorCodes::ConflictingUpdateOperators,
+ str::stream() << "Cannot update '" << other->dottedField()
+ << "' and '"
+ << execInfo.fieldRef[i]->dottedField()
+ << "' at the same time");
+ }
+
+ // We start with the expectation that a mod will be in-place. But if the mod
+ // touched an indexed field and the mod will indeed be executed -- that is, it
+ // is not a no-op and it is in a valid context -- then we switch back to a
+ // non-in-place mode.
+ //
+ // TODO: make mightBeIndexed and fieldRef like each other.
+ if (!_affectIndices && !execInfo.noOp && _indexedFields &&
+ _indexedFields->mightBeIndexed(execInfo.fieldRef[i]->dottedField())) {
+ _affectIndices = true;
+ doc->disableInPlaceUpdates();
+ }
}
- }
- if (!execInfo.noOp) {
- status = (*it)->apply();
+ if (!execInfo.noOp) {
+ status = (*it)->apply();
- if (docWasModified)
- *docWasModified = true;
+ if (docWasModified)
+ *docWasModified = true;
- if (!status.isOK()) {
- return status;
+ if (!status.isOK()) {
+ return status;
+ }
}
- }
- // If we require a replication oplog entry for this update, go ahead and generate one.
- if (!execInfo.noOp && _logOp && logOpRec) {
- status = (*it)->log(&logBuilder);
- if (!status.isOK()) {
- return status;
+ // If we require a replication oplog entry for this update, go ahead and generate one.
+ if (!execInfo.noOp && _logOp && logOpRec) {
+ status = (*it)->log(&logBuilder);
+ if (!status.isOK()) {
+ return status;
+ }
}
}
}
diff --git a/src/mongo/db/update/update_driver.h b/src/mongo/db/update/update_driver.h
index 6152e22cefb..65785542414 100644
--- a/src/mongo/db/update/update_driver.h
+++ b/src/mongo/db/update/update_driver.h
@@ -39,6 +39,7 @@
#include "mongo/db/ops/modifier_interface.h"
#include "mongo/db/query/canonical_query.h"
#include "mongo/db/update/modifier_table.h"
+#include "mongo/db/update/update_object_node.h"
#include "mongo/db/update_index_data.h"
namespace mongo {
@@ -54,10 +55,18 @@ public:
~UpdateDriver();
/**
- * Returns OK and fills in '_mods' if 'updateExpr' is correct. Otherwise returns an
- * error status with a corresponding description.
+ * Parses the update expression 'updateExpr'. If the featurCompatibilityVersion is 3.6,
+ * 'updateExpr' is parsed into '_root'. Otherwise, 'updateExpr' is parsed into '_mods'. This is
+ * done because applying updates via UpdateNode creates new fields in lexicographic order,
+ * whereas applying updates via ModifierInterface creates new fields in the order they are
+ * specified in 'updateExpr', so it is necessary that the whole replica set have version 3.6 in
+ * order to use UpdateNode. Uasserts if the featureCompatibilityVersion is 3.4 and
+ * 'arrayFilters' is non-empty. Uasserts or returns a non-ok status if 'updateExpr' fails to
+ * parse.
*/
- Status parse(const BSONObj& updateExpr, const bool multi = false);
+ Status parse(const BSONObj& updateExpr,
+ const std::map<StringData, std::unique_ptr<ArrayFilter>>& arrayFilters,
+ const bool multi = false);
/**
* Fills in document with any fields in the query which are valid.
@@ -157,7 +166,12 @@ private:
// Is there a list of $mod's on '_mods' or is it just full object replacement?
bool _replacementMode;
- // Collection of update mod instances. Owned here.
+ // The root of the UpdateNode tree. If the featureCompatibilityVersion is 3.6, the update
+ // expression is parsed into '_root'.
+ std::unique_ptr<UpdateObjectNode> _root;
+
+ // Collection of update mod instances. Owned here. If the featureCompatibilityVersion is 3.4,
+ // the update expression is parsed into '_mods'.
std::vector<ModifierInterface*> _mods;
// What are the list of fields in the collection over which the update is going to be
diff --git a/src/mongo/db/update/update_driver_test.cpp b/src/mongo/db/update/update_driver_test.cpp
index e9b92433142..fb7698dc5a3 100644
--- a/src/mongo/db/update/update_driver_test.cpp
+++ b/src/mongo/db/update/update_driver_test.cpp
@@ -44,30 +44,17 @@
#include "mongo/db/update_index_data.h"
#include "mongo/unittest/unittest.h"
+namespace mongo {
namespace {
-using mongo::BSONElement;
-using mongo::BSONElementComparator;
-using mongo::BSONObj;
-using mongo::BSONObjIterator;
-using mongo::CollatorInterfaceMock;
-using mongo::FieldRef;
-using mongo::OperationContext;
-using mongo::OwnedPointerVector;
-using mongo::QueryTestServiceContext;
-using mongo::ServiceContext;
-using mongo::SimpleStringDataComparator;
-using mongo::StringData;
-using mongo::UpdateDriver;
-using mongo::UpdateIndexData;
-using mongo::fromjson;
using mongo::mutablebson::Document;
using mongoutils::str::stream;
TEST(Parse, Normal) {
UpdateDriver::Options opts;
UpdateDriver driver(opts);
- ASSERT_OK(driver.parse(fromjson("{$set:{a:1}}")));
+ std::map<StringData, std::unique_ptr<ArrayFilter>> arrayFilters;
+ ASSERT_OK(driver.parse(fromjson("{$set:{a:1}}"), arrayFilters));
ASSERT_EQUALS(driver.numMods(), 1U);
ASSERT_FALSE(driver.isDocReplacement());
}
@@ -75,7 +62,8 @@ TEST(Parse, Normal) {
TEST(Parse, MultiMods) {
UpdateDriver::Options opts;
UpdateDriver driver(opts);
- ASSERT_OK(driver.parse(fromjson("{$set:{a:1, b:1}}")));
+ std::map<StringData, std::unique_ptr<ArrayFilter>> arrayFilters;
+ ASSERT_OK(driver.parse(fromjson("{$set:{a:1, b:1}}"), arrayFilters));
ASSERT_EQUALS(driver.numMods(), 2U);
ASSERT_FALSE(driver.isDocReplacement());
}
@@ -83,7 +71,8 @@ TEST(Parse, MultiMods) {
TEST(Parse, MixingMods) {
UpdateDriver::Options opts;
UpdateDriver driver(opts);
- ASSERT_OK(driver.parse(fromjson("{$set:{a:1}, $unset:{b:1}}")));
+ std::map<StringData, std::unique_ptr<ArrayFilter>> arrayFilters;
+ ASSERT_OK(driver.parse(fromjson("{$set:{a:1}, $unset:{b:1}}"), arrayFilters));
ASSERT_EQUALS(driver.numMods(), 2U);
ASSERT_FALSE(driver.isDocReplacement());
}
@@ -91,38 +80,59 @@ TEST(Parse, MixingMods) {
TEST(Parse, ObjectReplacment) {
UpdateDriver::Options opts;
UpdateDriver driver(opts);
- ASSERT_OK(driver.parse(fromjson("{obj: \"obj replacement\"}")));
+ std::map<StringData, std::unique_ptr<ArrayFilter>> arrayFilters;
+ ASSERT_OK(driver.parse(fromjson("{obj: \"obj replacement\"}"), arrayFilters));
ASSERT_TRUE(driver.isDocReplacement());
}
TEST(Parse, EmptyMod) {
UpdateDriver::Options opts;
UpdateDriver driver(opts);
- ASSERT_NOT_OK(driver.parse(fromjson("{$set:{}}")));
+ std::map<StringData, std::unique_ptr<ArrayFilter>> arrayFilters;
+ ASSERT_THROWS_CODE_AND_WHAT(
+ driver.parse(fromjson("{$set:{}}"), arrayFilters),
+ UserException,
+ ErrorCodes::FailedToParse,
+ "'$set' is empty. You must specify a field like so: {$set: {<field>: ...}}");
}
TEST(Parse, WrongMod) {
UpdateDriver::Options opts;
UpdateDriver driver(opts);
- ASSERT_NOT_OK(driver.parse(fromjson("{$xyz:{a:1}}")));
+ std::map<StringData, std::unique_ptr<ArrayFilter>> arrayFilters;
+ ASSERT_THROWS_CODE_AND_WHAT(driver.parse(fromjson("{$xyz:{a:1}}"), arrayFilters),
+ UserException,
+ ErrorCodes::FailedToParse,
+ "Unknown modifier: $xyz");
}
TEST(Parse, WrongType) {
UpdateDriver::Options opts;
UpdateDriver driver(opts);
- ASSERT_NOT_OK(driver.parse(fromjson("{$set:[{a:1}]}")));
+ std::map<StringData, std::unique_ptr<ArrayFilter>> arrayFilters;
+ ASSERT_THROWS_CODE_AND_WHAT(driver.parse(fromjson("{$set:[{a:1}]}"), arrayFilters),
+ UserException,
+ ErrorCodes::FailedToParse,
+ "Modifiers operate on fields but we found type array instead. For "
+ "example: {$mod: {<field>: ...}} not {$set: [ { a: 1 } ]}");
}
TEST(Parse, ModsWithLaterObjReplacement) {
UpdateDriver::Options opts;
UpdateDriver driver(opts);
- ASSERT_NOT_OK(driver.parse(fromjson("{$set:{a:1}, obj: \"obj replacement\"}")));
+ std::map<StringData, std::unique_ptr<ArrayFilter>> arrayFilters;
+ ASSERT_THROWS_CODE_AND_WHAT(
+ driver.parse(fromjson("{$set:{a:1}, obj: \"obj replacement\"}"), arrayFilters),
+ UserException,
+ ErrorCodes::FailedToParse,
+ "Unknown modifier: obj");
}
TEST(Parse, PushAll) {
UpdateDriver::Options opts;
UpdateDriver driver(opts);
- ASSERT_OK(driver.parse(fromjson("{$pushAll:{a:[1,2,3]}}")));
+ std::map<StringData, std::unique_ptr<ArrayFilter>> arrayFilters;
+ ASSERT_OK(driver.parse(fromjson("{$pushAll:{a:[1,2,3]}}"), arrayFilters));
ASSERT_EQUALS(driver.numMods(), 1U);
ASSERT_FALSE(driver.isDocReplacement());
}
@@ -130,7 +140,8 @@ TEST(Parse, PushAll) {
TEST(Parse, SetOnInsert) {
UpdateDriver::Options opts;
UpdateDriver driver(opts);
- ASSERT_OK(driver.parse(fromjson("{$setOnInsert:{a:1}}")));
+ std::map<StringData, std::unique_ptr<ArrayFilter>> arrayFilters;
+ ASSERT_OK(driver.parse(fromjson("{$setOnInsert:{a:1}}"), arrayFilters));
ASSERT_EQUALS(driver.numMods(), 1U);
ASSERT_FALSE(driver.isDocReplacement());
}
@@ -140,8 +151,9 @@ TEST(Collator, SetCollationUpdatesModifierInterfaces) {
BSONObj updateDocument = fromjson("{$max: {a: 'abd'}}");
UpdateDriver::Options opts;
UpdateDriver driver(opts);
+ std::map<StringData, std::unique_ptr<ArrayFilter>> arrayFilters;
- ASSERT_OK(driver.parse(updateDocument));
+ ASSERT_OK(driver.parse(updateDocument, arrayFilters));
ASSERT_EQUALS(driver.numMods(), 1U);
bool modified = false;
@@ -164,8 +176,8 @@ public:
CreateFromQueryFixture()
: _driverOps(new UpdateDriver(UpdateDriver::Options())),
_driverRepl(new UpdateDriver(UpdateDriver::Options())) {
- _driverOps->parse(fromjson("{$set:{'_':1}}")).transitional_ignore();
- _driverRepl->parse(fromjson("{}")).transitional_ignore();
+ _driverOps->parse(fromjson("{$set:{'_':1}}"), _arrayFilters).transitional_ignore();
+ _driverRepl->parse(fromjson("{}"), _arrayFilters).transitional_ignore();
_opCtx = _serviceContext.makeOperationContext();
}
@@ -188,6 +200,7 @@ public:
private:
QueryTestServiceContext _serviceContext;
ServiceContext::UniqueOperationContext _opCtx;
+ std::map<StringData, std::unique_ptr<ArrayFilter>> _arrayFilters;
std::unique_ptr<UpdateDriver> _driverOps;
std::unique_ptr<UpdateDriver> _driverRepl;
Document _doc;
@@ -428,4 +441,5 @@ TEST_F(CreateFromQuery, NotFullShardKeyRepl) {
opCtx(), query, &immutablePaths.vector(), doc()));
}
-} // unnamed namespace
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/dbtests/query_stage_update.cpp b/src/mongo/dbtests/query_stage_update.cpp
index c0c692473a6..a1b41763503 100644
--- a/src/mongo/dbtests/query_stage_update.cpp
+++ b/src/mongo/dbtests/query_stage_update.cpp
@@ -209,7 +209,9 @@ public:
request.setQuery(query);
request.setUpdates(updates);
- ASSERT_OK(driver.parse(request.getUpdates(), request.isMulti()));
+ const std::map<StringData, std::unique_ptr<ArrayFilter>> arrayFilters;
+
+ ASSERT_OK(driver.parse(request.getUpdates(), arrayFilters, request.isMulti()));
// Setup update params.
UpdateStageParams params(&request, &driver, opDebug);
@@ -280,7 +282,9 @@ public:
request.setQuery(query);
request.setUpdates(updates);
- ASSERT_OK(driver.parse(request.getUpdates(), request.isMulti()));
+ const std::map<StringData, std::unique_ptr<ArrayFilter>> arrayFilters;
+
+ ASSERT_OK(driver.parse(request.getUpdates(), arrayFilters, request.isMulti()));
// Configure the scan.
CollectionScanParams collScanParams;
@@ -393,7 +397,9 @@ public:
request.setReturnDocs(UpdateRequest::RETURN_OLD);
request.setLifecycle(&updateLifecycle);
- ASSERT_OK(driver.parse(request.getUpdates(), request.isMulti()));
+ const std::map<StringData, std::unique_ptr<ArrayFilter>> arrayFilters;
+
+ ASSERT_OK(driver.parse(request.getUpdates(), arrayFilters, request.isMulti()));
// Configure a QueuedDataStage to pass the first object in the collection back in a
// RID_AND_OBJ state.
@@ -481,7 +487,9 @@ public:
request.setReturnDocs(UpdateRequest::RETURN_NEW);
request.setLifecycle(&updateLifecycle);
- ASSERT_OK(driver.parse(request.getUpdates(), request.isMulti()));
+ const std::map<StringData, std::unique_ptr<ArrayFilter>> arrayFilters;
+
+ ASSERT_OK(driver.parse(request.getUpdates(), arrayFilters, request.isMulti()));
// Configure a QueuedDataStage to pass the first object in the collection back in a
// RID_AND_OBJ state.
@@ -559,7 +567,9 @@ public:
request.setMulti(false);
request.setLifecycle(&updateLifecycle);
- ASSERT_OK(driver.parse(request.getUpdates(), request.isMulti()));
+ const std::map<StringData, std::unique_ptr<ArrayFilter>> arrayFilters;
+
+ ASSERT_OK(driver.parse(request.getUpdates(), arrayFilters, request.isMulti()));
// Configure a QueuedDataStage to pass an OWNED_OBJ to the update stage.
auto qds = make_unique<QueuedDataStage>(&_opCtx, ws.get());