summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTess Avitabile <tess.avitabile@mongodb.com>2017-06-15 10:01:54 -0400
committerTess Avitabile <tess.avitabile@mongodb.com>2017-06-19 10:29:10 -0400
commitab165e7a81e319cd7e99af3e1eed86e826fd34ba (patch)
tree9bfbc962946848d8bc97d208e1aabdf0e0363915
parent0d7f9a01b1ae168b8adfc02bb1eb0c1616138d38 (diff)
downloadmongo-ab165e7a81e319cd7e99af3e1eed86e826fd34ba.tar.gz
SERVER-28762 Conditionally parse an update expression as an UpdateNode tree
-rw-r--r--jstests/core/profile_findandmodify.js8
-rw-r--r--jstests/core/profile_update.js29
-rw-r--r--jstests/core/update_arrayFilters.js639
-rw-r--r--jstests/core/updateh.js2
-rw-r--r--jstests/noPassthrough/arrayFilters_feature_compatibility_version.js78
-rw-r--r--jstests/replsets/oplog_replay_on_startup.js8
-rw-r--r--jstests/replsets/oplog_replay_on_startup_with_bad_op.js8
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_mock.cpp3
-rw-r--r--src/mongo/db/auth/role_graph_update.cpp5
-rw-r--r--src/mongo/db/commands/write_commands/write_commands.cpp1
-rw-r--r--src/mongo/db/exec/update.cpp8
-rw-r--r--src/mongo/db/nesting_depth_test.cpp143
-rw-r--r--src/mongo/db/ops/parsed_update.cpp15
-rw-r--r--src/mongo/db/ops/update.cpp3
-rw-r--r--src/mongo/db/ops/update.h4
-rw-r--r--src/mongo/db/repl/storage_interface_impl_test.cpp11
-rw-r--r--src/mongo/db/update/arithmetic_node.cpp8
-rw-r--r--src/mongo/db/update/update_driver.cpp284
-rw-r--r--src/mongo/db/update/update_driver.h22
-rw-r--r--src/mongo/db/update/update_driver_test.cpp72
-rw-r--r--src/mongo/dbtests/query_stage_update.cpp20
21 files changed, 1156 insertions, 215 deletions
diff --git a/jstests/core/profile_findandmodify.js b/jstests/core/profile_findandmodify.js
index bca4b995553..8b431b50cdc 100644
--- a/jstests/core/profile_findandmodify.js
+++ b/jstests/core/profile_findandmodify.js
@@ -16,13 +16,13 @@
//
coll.drop();
for (var i = 0; i < 3; i++) {
- assert.writeOK(coll.insert({_id: i, a: i, b: i}));
+ assert.writeOK(coll.insert({_id: i, a: i, b: [0]}));
}
assert.commandWorked(coll.createIndex({b: 1}));
- assert.eq({_id: 2, a: 2, b: 2}, coll.findAndModify({
+ assert.eq({_id: 2, a: 2, b: [0]}, coll.findAndModify({
query: {a: 2},
- update: {$inc: {b: 1}},
+ update: {$inc: {"b.$[i]": 1}},
collation: {locale: "fr"},
arrayFilters: [{i: 0}]
}));
@@ -32,7 +32,7 @@
assert.eq(profileObj.op, "command", tojson(profileObj));
assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj));
assert.eq(profileObj.command.query, {a: 2}, tojson(profileObj));
- assert.eq(profileObj.command.update, {$inc: {b: 1}}, tojson(profileObj));
+ assert.eq(profileObj.command.update, {$inc: {"b.$[i]": 1}}, tojson(profileObj));
assert.eq(profileObj.command.collation, {locale: "fr"}, tojson(profileObj));
assert.eq(profileObj.command.arrayFilters, [{i: 0}], tojson(profileObj));
assert.eq(profileObj.keysExamined, 0, tojson(profileObj));
diff --git a/jstests/core/profile_update.js b/jstests/core/profile_update.js
index 0690a8ec6ee..9b23d894585 100644
--- a/jstests/core/profile_update.js
+++ b/jstests/core/profile_update.js
@@ -21,20 +21,12 @@
}
assert.commandWorked(coll.createIndex({a: 1}));
- assert.writeOK(coll.update({a: {$gte: 2}},
- {$set: {c: 1}, $inc: {a: -10}},
- db.getMongo().writeMode() === "commands"
- ? {collation: {locale: "fr"}, arrayFilters: [{i: 0}]}
- : {}));
+ assert.writeOK(coll.update({a: {$gte: 2}}, {$set: {c: 1}, $inc: {a: -10}}));
var profileObj = getLatestProfilerEntry(testDB);
assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj));
assert.eq(profileObj.op, "update", tojson(profileObj));
- if (db.getMongo().writeMode() === "commands") {
- assert.eq(profileObj.command.collation, {locale: "fr"}, tojson(profileObj));
- assert.eq(profileObj.command.arrayFilters, [{i: 0}], tojson(profileObj));
- }
assert.eq(profileObj.keysExamined, 1, tojson(profileObj));
assert.eq(profileObj.docsExamined, 1, tojson(profileObj));
assert.eq(profileObj.keysInserted, 1, tojson(profileObj));
@@ -49,6 +41,25 @@
assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
//
+ // Confirm metrics for parameters that require "commands" mode.
+ //
+
+ if (db.getMongo().writeMode() === "commands") {
+ coll.drop();
+ assert.writeOK(coll.insert({_id: 0, a: [0]}));
+
+ assert.writeOK(coll.update(
+ {_id: 0}, {$set: {"a.$[i]": 1}}, {collation: {locale: "fr"}, arrayFilters: [{i: 0}]}));
+
+ profileObj = getLatestProfilerEntry(testDB);
+
+ assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj));
+ assert.eq(profileObj.op, "update", tojson(profileObj));
+ assert.eq(profileObj.command.collation, {locale: "fr"}, tojson(profileObj));
+ assert.eq(profileObj.command.arrayFilters, [{i: 0}], tojson(profileObj));
+ }
+
+ //
// Confirm metrics for multiple indexed document update.
//
coll.drop();
diff --git a/jstests/core/update_arrayFilters.js b/jstests/core/update_arrayFilters.js
index 4271eb8cddb..b54b981ae4d 100644
--- a/jstests/core/update_arrayFilters.js
+++ b/jstests/core/update_arrayFilters.js
@@ -1,9 +1,14 @@
+// Cannot implicitly shard accessed collections because of collection existing when none
+// expected.
+// @tags: [assumes_no_implicit_collection_creation_after_drop]
+
// Tests for the arrayFilters option to update and findAndModify.
(function() {
"use strict";
let coll = db.update_arrayFilters;
coll.drop();
+ let res;
//
// Tests for update.
@@ -11,36 +16,47 @@
if (db.getMongo().writeMode() !== "commands") {
assert.throws(function() {
- coll.update({_id: 0}, {$set: {a: 5}}, {arrayFilters: [{i: 0}]});
+ coll.update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0}]});
});
} else {
// Non-array arrayFilters fails to parse.
- assert.writeError(coll.update({_id: 0}, {$set: {a: 5}}, {arrayFilters: {i: 0}}),
+ assert.writeError(coll.update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: {i: 0}}),
ErrorCodes.TypeMismatch);
// Non-object array filter fails to parse.
- assert.writeError(coll.update({_id: 0}, {$set: {a: 5}}, {arrayFilters: ["bad"]}),
+ assert.writeError(coll.update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: ["bad"]}),
ErrorCodes.TypeMismatch);
// Bad array filter fails to parse.
- let res = coll.update({_id: 0}, {$set: {a: 5}}, {arrayFilters: [{i: 0, j: 0}]});
- assert.writeError(res, ErrorCodes.FailedToParse);
+ res = coll.update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0, j: 0}]});
+ assert.writeErrorWithCode(res, ErrorCodes.FailedToParse);
assert.neq(-1,
res.getWriteError().errmsg.indexOf(
"Each array filter must use a single top-level field name"),
"update failed for a reason other than failing to parse array filters");
// Multiple array filters with the same id fails to parse.
- res = coll.update({_id: 0}, {$set: {a: 5}}, {arrayFilters: [{i: 0}, {j: 0}, {i: 1}]});
- assert.writeError(res, ErrorCodes.FailedToParse);
+ res = coll.update(
+ {_id: 0}, {$set: {"a.$[i]": 5, "a.$[j]": 6}}, {arrayFilters: [{i: 0}, {j: 0}, {i: 1}]});
+ assert.writeErrorWithCode(res, ErrorCodes.FailedToParse);
assert.neq(
-1,
res.getWriteError().errmsg.indexOf(
"Found multiple array filters with the same top-level field name"),
"update failed for a reason other than multiple array filters with the same top-level field name");
+ // Unused array filter fails to parse.
+ res = coll.update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0}, {j: 0}]});
+ assert.writeErrorWithCode(res, ErrorCodes.FailedToParse);
+ assert.neq(
+ -1,
+ res.getWriteError().errmsg.indexOf(
+ "The array filter for identifier 'j' was not used in the update { $set: { a.$[i]: 5.0 } }"),
+ "update failed for a reason other than unused array filter");
+
// Good value for arrayFilters succeeds.
- assert.writeOK(coll.update({_id: 0}, {$set: {a: 5}}, {arrayFilters: [{i: 0}, {j: 0}]}));
+ assert.writeOK(coll.update(
+ {_id: 0}, {$set: {"a.$[i]": 5, "a.$[j]": 6}}, {arrayFilters: [{i: 0}, {j: 0}]}));
}
//
@@ -49,12 +65,12 @@
// Non-array arrayFilters fails to parse.
assert.throws(function() {
- coll.findAndModify({query: {_id: 0}, update: {$set: {a: 5}}, arrayFilters: {i: 0}});
+ coll.findAndModify({query: {_id: 0}, update: {$set: {"a.$[i]": 5}}, arrayFilters: {i: 0}});
});
// Non-object array filter fails to parse.
assert.throws(function() {
- coll.findAndModify({query: {_id: 0}, update: {$set: {a: 5}}, arrayFilters: ["bad"]});
+ coll.findAndModify({query: {_id: 0}, update: {$set: {"a.$[i]": 5}}, arrayFilters: ["bad"]});
});
// arrayFilters option not allowed with remove=true.
@@ -64,19 +80,31 @@
// Bad array filter fails to parse.
assert.throws(function() {
- coll.findAndModify({query: {_id: 0}, update: {$set: {a: 5}}, arrayFilters: [{i: 0, j: 0}]});
+ coll.findAndModify(
+ {query: {_id: 0}, update: {$set: {"a.$[i]": 5}}, arrayFilters: [{i: 0, j: 0}]});
});
// Multiple array filters with the same id fails to parse.
assert.throws(function() {
+ coll.findAndModify({
+ query: {_id: 0},
+ update: {$set: {"a.$[i]": 5, "a.$[j]": 6}},
+ arrayFilters: [{i: 0}, {j: 0}, {i: 1}]
+ });
+ });
+
+ // Unused array filter fails to parse.
+ assert.throws(function() {
coll.findAndModify(
- {query: {_id: 0}, update: {$set: {a: 5}}, arrayFilters: [{i: 0}, {j: 0}, {i: 1}]});
+ {query: {_id: 0}, update: {$set: {"a.$[i]": 5}, arrayFilters: [{i: 0}, {j: 0}]}});
});
// Good value for arrayFilters succeeds.
- assert.eq(null,
- coll.findAndModify(
- {query: {_id: 0}, update: {$set: {a: 5}}, arrayFilters: [{i: 0}, {j: 0}]}));
+ assert.eq(null, coll.findAndModify({
+ query: {_id: 0},
+ update: {$set: {"a.$[i]": 5, "a.$[j]": 6}},
+ arrayFilters: [{i: 0}, {j: 0}]
+ }));
//
// Tests for the bulk API.
@@ -91,22 +119,22 @@
} else {
// update().
let bulk = coll.initializeUnorderedBulkOp();
- bulk.find({}).arrayFilters("bad").update({$set: {a: 5}});
+ bulk.find({}).arrayFilters("bad").update({$set: {"a.$[i]": 5}});
assert.throws(function() {
bulk.execute();
});
bulk = coll.initializeUnorderedBulkOp();
- bulk.find({}).arrayFilters([{i: 0}]).update({$set: {a: 5}});
+ bulk.find({}).arrayFilters([{i: 0}]).update({$set: {"a.$[i]": 5}});
assert.writeOK(bulk.execute());
// updateOne().
bulk = coll.initializeUnorderedBulkOp();
- bulk.find({_id: 0}).arrayFilters("bad").updateOne({$set: {a: 5}});
+ bulk.find({_id: 0}).arrayFilters("bad").updateOne({$set: {"a.$[i]": 5}});
assert.throws(function() {
bulk.execute();
});
bulk = coll.initializeUnorderedBulkOp();
- bulk.find({_id: 0}).arrayFilters([{i: 0}]).updateOne({$set: {a: 5}});
+ bulk.find({_id: 0}).arrayFilters([{i: 0}]).updateOne({$set: {"a.$[i]": 5}});
assert.writeOK(bulk.execute());
}
@@ -116,65 +144,72 @@
// findOneAndUpdate().
assert.throws(function() {
- coll.findOneAndUpdate({_id: 0}, {$set: {a: 5}}, {arrayFilters: "bad"});
+ coll.findOneAndUpdate({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: "bad"});
});
- assert.eq(null, coll.findOneAndUpdate({_id: 0}, {$set: {a: 5}}, {arrayFilters: [{i: 0}]}));
+ assert.eq(null,
+ coll.findOneAndUpdate({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0}]}));
// updateOne().
if (db.getMongo().writeMode() !== "commands") {
assert.throws(function() {
- coll.updateOne({_id: 0}, {$set: {a: 5}}, {arrayFilters: [{i: 0}]});
+ coll.updateOne({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0}]});
});
} else {
assert.throws(function() {
- coll.updateOne({_id: 0}, {$set: {a: 5}}, {arrayFilters: "bad"});
+ coll.updateOne({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: "bad"});
});
- let res = coll.updateOne({_id: 0}, {$set: {a: 5}}, {arrayFilters: [{i: 0}]});
+ res = coll.updateOne({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0}]});
assert.eq(0, res.modifiedCount);
}
// updateMany().
if (db.getMongo().writeMode() !== "commands") {
assert.throws(function() {
- coll.updateMany({}, {$set: {a: 5}}, {arrayFilters: [{i: 0}]});
+ coll.updateMany({}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0}]});
});
} else {
assert.throws(function() {
- coll.updateMany({}, {$set: {a: 5}}, {arrayFilters: "bad"});
+ coll.updateMany({}, {$set: {"a.$[i]": 5}}, {arrayFilters: "bad"});
});
- let res = coll.updateMany({}, {$set: {a: 5}}, {arrayFilters: [{i: 0}]});
+ res = coll.updateMany({}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0}]});
assert.eq(0, res.modifiedCount);
}
// updateOne with bulkWrite().
if (db.getMongo().writeMode() !== "commands") {
assert.throws(function() {
- coll.bulkWrite(
- [{updateOne: {filter: {_id: 0}, update: {$set: {a: 5}}, arrayFilters: [{i: 0}]}}]);
+ coll.bulkWrite([{
+ updateOne:
+ {filter: {_id: 0}, update: {$set: {"a.$[i]": 5}}, arrayFilters: [{i: 0}]}
+ }]);
});
} else {
assert.throws(function() {
- coll.bulkWrite(
- [{updateOne: {filter: {_id: 0}, update: {$set: {a: 5}}, arrayFilters: "bad"}}]);
+ coll.bulkWrite([{
+ updateOne:
+ {filter: {_id: 0}, update: {$set: {"a.$[i]": 5}}, arrayFilters: "bad"}
+ }]);
});
- let res = coll.bulkWrite(
- [{updateOne: {filter: {_id: 0}, update: {$set: {a: 5}}, arrayFilters: [{i: 0}]}}]);
+ res = coll.bulkWrite([{
+ updateOne: {filter: {_id: 0}, update: {$set: {"a.$[i]": 5}}, arrayFilters: [{i: 0}]}
+ }]);
assert.eq(0, res.matchedCount);
}
// updateMany with bulkWrite().
if (db.getMongo().writeMode() !== "commands") {
assert.throws(function() {
- coll.bulkWrite(
- [{updateMany: {filter: {}, update: {$set: {a: 5}}, arrayFilters: [{i: 0}]}}]);
+ coll.bulkWrite([
+ {updateMany: {filter: {}, update: {$set: {"a.$[i]": 5}}, arrayFilters: [{i: 0}]}}
+ ]);
});
} else {
assert.throws(function() {
coll.bulkWrite(
- [{updateMany: {filter: {}, update: {$set: {a: 5}}, arrayFilters: "bad"}}]);
+ [{updateMany: {filter: {}, update: {$set: {"a.$[i]": 5}}, arrayFilters: "bad"}}]);
});
- let res = coll.bulkWrite(
- [{updateMany: {filter: {}, update: {$set: {a: 5}}, arrayFilters: [{i: 0}]}}]);
+ res = coll.bulkWrite(
+ [{updateMany: {filter: {}, update: {$set: {"a.$[i]": 5}}, arrayFilters: [{i: 0}]}}]);
assert.eq(0, res.matchedCount);
}
@@ -185,21 +220,537 @@
// update().
if (db.getMongo().writeMode() !== "commands") {
assert.throws(function() {
- coll.explain().update({_id: 0}, {$set: {a: 5}}, {arrayFilters: [{i: 0}]});
+ coll.explain().update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0}]});
});
} else {
assert.throws(function() {
- coll.explain().update({_id: 0}, {$set: {a: 5}}, {arrayFilters: "bad"});
+ coll.explain().update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: "bad"});
});
assert.commandWorked(
- coll.explain().update({_id: 0}, {$set: {a: 5}}, {arrayFilters: [{i: 0}]}));
+ coll.explain().update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0}]}));
}
// findAndModify().
assert.throws(function() {
coll.explain().findAndModify(
- {query: {_id: 0}, update: {$set: {a: 5}}, arrayFilters: "bad"});
+ {query: {_id: 0}, update: {$set: {"a.$[i]": 5}}, arrayFilters: "bad"});
});
assert.commandWorked(coll.explain().findAndModify(
- {query: {_id: 0}, update: {$set: {a: 5}}, arrayFilters: [{i: 0}]}));
-})(); \ No newline at end of file
+ {query: {_id: 0}, update: {$set: {"a.$[i]": 5}}, arrayFilters: [{i: 0}]}));
+
+ //
+ // Tests for individual update modifiers.
+ //
+
+ // $set.
+ coll.drop();
+ assert.writeOK(coll.insert({_id: 0, a: [0, 1, 0, 1]}));
+ if (db.getMongo().writeMode() === "commands") {
+ assert.writeOK(coll.update({_id: 0}, {$set: {"a.$[i]": 2}}, {arrayFilters: [{i: 0}]}));
+ assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [2, 1, 2, 1]});
+ }
+ assert.writeOK(coll.update({_id: 0}, {$set: {"a.$[]": 3}}));
+ assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [3, 3, 3, 3]});
+
+ // $unset.
+ coll.drop();
+ assert.writeOK(coll.insert({_id: 0, a: [0, 1, 0, 1]}));
+ if (db.getMongo().writeMode() === "commands") {
+ assert.writeOK(coll.update({_id: 0}, {$unset: {"a.$[i]": true}}, {arrayFilters: [{i: 0}]}));
+ assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [null, 1, null, 1]});
+ }
+ assert.writeOK(coll.update({_id: 0}, {$unset: {"a.$[]": true}}));
+ assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [null, null, null, null]});
+
+ // $inc.
+ coll.drop();
+ assert.writeOK(coll.insert({_id: 0, a: [0, 1, 0, 1]}));
+ if (db.getMongo().writeMode() === "commands") {
+ assert.writeOK(coll.update({_id: 0}, {$inc: {"a.$[i]": 1}}, {arrayFilters: [{i: 1}]}));
+ assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [0, 2, 0, 2]});
+ }
+ coll.drop();
+ assert.writeOK(coll.insert({_id: 0, a: [0, 1, 0, 1]}));
+ assert.writeOK(coll.update({_id: 0}, {$inc: {"a.$[]": 1}}));
+ assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [1, 2, 1, 2]});
+
+ // $mul.
+ coll.drop();
+ assert.writeOK(coll.insert({_id: 0, a: [0, 2, 0, 2]}));
+ if (db.getMongo().writeMode() === "commands") {
+ assert.writeOK(coll.update({_id: 0}, {$mul: {"a.$[i]": 3}}, {arrayFilters: [{i: 2}]}));
+ assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [0, 6, 0, 6]});
+ }
+ coll.drop();
+ assert.writeOK(coll.insert({_id: 0, a: [1, 2, 1, 2]}));
+ assert.writeOK(coll.update({_id: 0}, {$mul: {"a.$[]": 3}}));
+ assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [3, 6, 3, 6]});
+
+ // $rename.
+ // TODO SERVER-28774: $rename should use the new update implementation. These tests should then
+ // fail because you cannot rename fields through arrays.
+ coll.drop();
+ if (db.getMongo().writeMode() === "commands") {
+ res = coll.update({_id: 0}, {$rename: {"a.$[i]": "b"}}, {arrayFilters: [{i: 0}]});
+ assert.writeErrorWithCode(res, ErrorCodes.InvalidOptions);
+ assert.neq(
+ -1,
+ res.getWriteError().errmsg.indexOf("Cannot use array filters with modifier $rename"),
+ "update failed for a reason other than using array filters with $rename");
+ }
+ assert.writeOK(coll.insert({_id: 0, a: [0], b: [0]}));
+ res = coll.update({_id: 0}, {$rename: {"a.$[]": "b"}});
+ assert.writeErrorWithCode(res, 16837);
+ assert.neq(-1,
+ res.getWriteError().errmsg.indexOf(
+ "cannot use the part (a of a.$[]) to traverse the element ({a: [ 0.0 ]})"),
+ "update failed for a reason other than using array updates with $rename");
+ res = coll.update({_id: 0}, {$rename: {"a": "b.$[]"}});
+ assert.writeErrorWithCode(res, 16837);
+ assert.neq(-1,
+ res.getWriteError().errmsg.indexOf(
+ "cannot use the part (b of b.$[]) to traverse the element ({b: [ 0.0 ]})"),
+ "update failed for a reason other than using array updates with $rename");
+
+ // $setOnInsert.
+ // TODO SERVER-28773: $setOnInsert should use the new update implementation.
+ coll.drop();
+ if (db.getMongo().writeMode() === "commands") {
+ res = coll.update({_id: 0}, {$setOnInsert: {"a.$[i]": 1}}, {arrayFilters: [{i: 0}]});
+ assert.writeErrorWithCode(res, ErrorCodes.InvalidOptions);
+ assert.neq(-1,
+ res.getWriteError().errmsg.indexOf(
+ "Cannot use array filters with modifier $setOnInsert"),
+ "update failed for a reason other than using array filters with $setOnInsert");
+ }
+ res = coll.update({_id: 0, a: [0]}, {$setOnInsert: {"a.$[]": 1}}, {upsert: true});
+ assert.writeErrorWithCode(res, 16836);
+ assert.neq(-1,
+ res.getWriteError().errmsg.indexOf(
+ "cannot use the part (a of a.$[]) to traverse the element ({a: [ 0.0 ]})"),
+ "update failed for a reason other than using array updates with $setOnInsert");
+
+ // $min.
+ // TODO SERVER-28768: $min should use the new update implementation.
+ coll.drop();
+ if (db.getMongo().writeMode() === "commands") {
+ res = coll.update({_id: 0}, {$min: {"a.$[i]": 1}}, {arrayFilters: [{i: 0}]});
+ assert.writeErrorWithCode(res, ErrorCodes.InvalidOptions);
+ assert.neq(
+ -1,
+ res.getWriteError().errmsg.indexOf("Cannot use array filters with modifier $min"),
+ "update failed for a reason other than using array filters with $min");
+ }
+ assert.writeOK(coll.insert({_id: 0, a: [0]}));
+ res = coll.update({_id: 0}, {$min: {"a.$[]": 1}});
+ assert.writeErrorWithCode(res, 16837);
+ assert.neq(-1,
+ res.getWriteError().errmsg.indexOf(
+ "cannot use the part (a of a.$[]) to traverse the element ({a: [ 0.0 ]})"),
+ "update failed for a reason other than using array updates with $min");
+
+ // $max.
+ // TODO SERVER-28768: $max should use the new update implementation.
+ coll.drop();
+ if (db.getMongo().writeMode() === "commands") {
+ res = coll.update({_id: 0}, {$max: {"a.$[i]": 1}}, {arrayFilters: [{i: 0}]});
+ assert.writeErrorWithCode(res, ErrorCodes.InvalidOptions);
+ assert.neq(
+ -1,
+ res.getWriteError().errmsg.indexOf("Cannot use array filters with modifier $max"),
+ "update failed for a reason other than using array filters with $max");
+ }
+ assert.writeOK(coll.insert({_id: 0, a: [2]}));
+ res = coll.update({_id: 0}, {$max: {"a.$[]": 1}});
+ assert.writeErrorWithCode(res, 16837);
+ assert.neq(-1,
+ res.getWriteError().errmsg.indexOf(
+ "cannot use the part (a of a.$[]) to traverse the element ({a: [ 2.0 ]})"),
+ "update failed for a reason other than using array updates with $max");
+
+ // $currentDate.
+ // TODO SERVER-28766: $currentDate should use the new update implementation.
+ coll.drop();
+ if (db.getMongo().writeMode() === "commands") {
+ res = coll.update({_id: 0}, {$currentDate: {"a.$[i]": true}}, {arrayFilters: [{i: 0}]});
+ assert.writeErrorWithCode(res, ErrorCodes.InvalidOptions);
+ assert.neq(-1,
+ res.getWriteError().errmsg.indexOf(
+ "Cannot use array filters with modifier $currentDate"),
+ "update failed for a reason other than using array filters with $currentDate");
+ }
+ assert.writeOK(coll.insert({_id: 0, a: [0]}));
+ res = coll.update({_id: 0}, {$currentDate: {"a.$[]": true}});
+ assert.writeErrorWithCode(res, 16837);
+ assert.neq(-1,
+ res.getWriteError().errmsg.indexOf(
+ "cannot use the part (a of a.$[]) to traverse the element ({a: [ 0.0 ]})"),
+ "update failed for a reason other than using array updates with $currentDate");
+
+ // $addToSet.
+ // TODO SERVER-28764: $addToSet should use the new update implementation.
+ coll.drop();
+ if (db.getMongo().writeMode() === "commands") {
+ res = coll.update({_id: 0}, {$addToSet: {"a.$[i]": ["elem"]}}, {arrayFilters: [{i: 0}]});
+ assert.writeErrorWithCode(res, ErrorCodes.InvalidOptions);
+ assert.neq(
+ -1,
+ res.getWriteError().errmsg.indexOf("Cannot use array filters with modifier $addToSet"),
+ "update failed for a reason other than using array filters with $addToSet");
+ }
+ assert.writeOK(coll.insert({_id: 0, a: [[]]}));
+ res = coll.update({_id: 0}, {$addToSet: {"a.$[]": ["elem"]}});
+ assert.writeErrorWithCode(res, 16837);
+ assert.neq(-1,
+ res.getWriteError().errmsg.indexOf(
+ "cannot use the part (a of a.$[]) to traverse the element ({a: [ [] ]})"),
+ "update failed for a reason other than using array updates with $addToSet");
+
+ // $pop.
+ // TODO SERVER-28769: $pop should use the new update implementation.
+ coll.drop();
+ if (db.getMongo().writeMode() === "commands") {
+ res = coll.update({_id: 0}, {$pop: {"a.$[i]": 1}}, {arrayFilters: [{i: 0}]});
+ assert.writeErrorWithCode(res, ErrorCodes.InvalidOptions);
+ assert.neq(
+ -1,
+ res.getWriteError().errmsg.indexOf("Cannot use array filters with modifier $pop"),
+ "update failed for a reason other than using array filters with $pop");
+ }
+ assert.writeOK(coll.insert({_id: 0, a: [[0]]}));
+ res = coll.update({_id: 0}, {$pop: {"a.$[]": 1}});
+ assert.writeErrorWithCode(res, 16837);
+ assert.neq(-1,
+ res.getWriteError().errmsg.indexOf(
+ "cannot use the part (a of a.$[]) to traverse the element ({a: [ [ 0.0 ] ]})"),
+ "update failed for a reason other than using array updates with $pop");
+
+ // $pullAll.
+ // TODO SERVER-28771: $pullAll should use the new update implementation.
+ coll.drop();
+ if (db.getMongo().writeMode() === "commands") {
+ res = coll.update({_id: 0}, {$pullAll: {"a.$[i]": [0]}}, {arrayFilters: [{i: 0}]});
+ assert.writeErrorWithCode(res, ErrorCodes.InvalidOptions);
+ assert.neq(
+ -1,
+ res.getWriteError().errmsg.indexOf("Cannot use array filters with modifier $pullAll"),
+ "update failed for a reason other than using array filters with $pullAll");
+ }
+ assert.writeOK(coll.insert({_id: 0, a: [[0]]}));
+ res = coll.update({_id: 0}, {$pullAll: {"a.$[]": [0]}});
+ assert.writeErrorWithCode(res, 16837);
+ assert.neq(-1,
+ res.getWriteError().errmsg.indexOf(
+ "cannot use the part (a of a.$[]) to traverse the element ({a: [ [ 0.0 ] ]})"),
+ "update failed for a reason other than using array updates with $pullAll");
+
+ // $pull.
+ // TODO SERVER-28770: $pull should use the new update implementation.
+ coll.drop();
+ if (db.getMongo().writeMode() === "commands") {
+ res = coll.update({_id: 0}, {$pull: {"a.$[i]": {$in: [0]}}}, {arrayFilters: [{i: 0}]});
+ assert.writeErrorWithCode(res, ErrorCodes.InvalidOptions);
+ assert.neq(
+ -1,
+ res.getWriteError().errmsg.indexOf("Cannot use array filters with modifier $pull"),
+ "update failed for a reason other than using array filters with $pull");
+ }
+ assert.writeOK(coll.insert({_id: 0, a: [[0]]}));
+ res = coll.update({_id: 0}, {$pull: {"a.$[]": {$in: [0]}}});
+ assert.writeErrorWithCode(res, 16837);
+ assert.neq(-1,
+ res.getWriteError().errmsg.indexOf(
+ "cannot use the part (a of a.$[]) to traverse the element ({a: [ [ 0.0 ] ]})"),
+ "update failed for a reason other than using array updates with $pull");
+
+ // $pushAll.
+ // TODO SERVER-28772: $pushAll should use the new update implementation.
+ coll.drop();
+ if (db.getMongo().writeMode() === "commands") {
+ res = coll.update({_id: 0}, {$pushAll: {"a.$[i]": [0]}}, {arrayFilters: [{i: 0}]});
+ assert.writeErrorWithCode(res, ErrorCodes.InvalidOptions);
+ assert.neq(
+ -1,
+ res.getWriteError().errmsg.indexOf("Cannot use array filters with modifier $pushAll"),
+ "update failed for a reason other than using array filters with $pushAll");
+ }
+ assert.writeOK(coll.insert({_id: 0, a: [[0]]}));
+ res = coll.update({_id: 0}, {$pushAll: {"a.$[]": [1]}});
+ assert.writeErrorWithCode(res, 16837);
+ assert.neq(-1,
+ res.getWriteError().errmsg.indexOf(
+ "cannot use the part (a of a.$[]) to traverse the element ({a: [ [ 0.0 ] ]})"),
+ "update failed for a reason other than using array updates with $pushAll");
+
+ // $push.
+ // TODO SERVER-28772: $push should use the new update implementation.
+ coll.drop();
+ if (db.getMongo().writeMode() === "commands") {
+ res = coll.update({_id: 0}, {$push: {"a.$[i]": 0}}, {arrayFilters: [{i: 0}]});
+ assert.writeErrorWithCode(res, ErrorCodes.InvalidOptions);
+ assert.neq(
+ -1,
+ res.getWriteError().errmsg.indexOf("Cannot use array filters with modifier $push"),
+ "update failed for a reason other than using array filters with $push");
+ }
+ assert.writeOK(coll.insert({_id: 0, a: [[0]]}));
+ res = coll.update({_id: 0}, {$push: {"a.$[]": 1}});
+ assert.writeErrorWithCode(res, 16837);
+ assert.neq(-1,
+ res.getWriteError().errmsg.indexOf(
+ "cannot use the part (a of a.$[]) to traverse the element ({a: [ [ 0.0 ] ]})"),
+ "update failed for a reason other than using array updates with $push");
+
+ // $bit.
+ // TODO SERVER-28765: $bit should use the new update implementation.
+ coll.drop();
+ if (db.getMongo().writeMode() === "commands") {
+ res = coll.update(
+ {_id: 0}, {$bit: {"a.$[i]": {or: NumberInt(10)}}}, {arrayFilters: [{i: 0}]});
+ assert.writeErrorWithCode(res, ErrorCodes.InvalidOptions);
+ assert.neq(
+ -1,
+ res.getWriteError().errmsg.indexOf("Cannot use array filters with modifier $bit"),
+ "update failed for a reason other than using array filters with $bit");
+ }
+ assert.writeOK(coll.insert({_id: 0, a: [0]}));
+ res = coll.update({_id: 0}, {$bit: {"a.$[]": {or: NumberInt(10)}}});
+ assert.writeErrorWithCode(res, 16837);
+ assert.neq(-1,
+ res.getWriteError().errmsg.indexOf(
+ "cannot use the part (a of a.$[]) to traverse the element ({a: [ 0.0 ]})"),
+ "update failed for a reason other than using array updates with $bit");
+
+ //
+ // Multi update tests.
+ //
+
+ coll.drop();
+ assert.writeOK(coll.insert({_id: 0, a: [0, 1, 0, 1]}));
+ assert.writeOK(coll.insert({_id: 1, a: [0, 2, 0, 2]}));
+ if (db.getMongo().writeMode() === "commands") {
+ assert.writeOK(
+ coll.update({}, {$set: {"a.$[i]": 3}}, {multi: true, arrayFilters: [{i: 0}]}));
+ assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [3, 1, 3, 1]});
+ assert.eq(coll.findOne({_id: 1}), {_id: 1, a: [3, 2, 3, 2]});
+ }
+ assert.writeOK(coll.update({}, {$set: {"a.$[]": 3}}, {multi: true}));
+ assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [3, 3, 3, 3]});
+ assert.eq(coll.findOne({_id: 1}), {_id: 1, a: [3, 3, 3, 3]});
+
+ //
+ // Collation tests.
+ //
+
+ if (db.getMongo().writeMode() === "commands") {
+ // arrayFilters respect operation collation.
+ coll.drop();
+ assert.writeOK(coll.insert({_id: 0, a: ["foo", "FOO"]}));
+ assert.writeOK(
+ coll.update({_id: 0},
+ {$set: {"a.$[i]": "bar"}},
+ {arrayFilters: [{i: "foo"}], collation: {locale: "en_US", strength: 2}}));
+ assert.eq(coll.findOne({_id: 0}), {_id: 0, a: ["bar", "bar"]});
+
+ // arrayFilters respect the collection default collation.
+ coll.drop();
+ assert.commandWorked(db.createCollection("update_arrayFilters",
+ {collation: {locale: "en_US", strength: 2}}));
+ coll = db.update_arrayFilters;
+ assert.writeOK(coll.insert({_id: 0, a: ["foo", "FOO"]}));
+ assert.writeOK(
+ coll.update({_id: 0}, {$set: {"a.$[i]": "bar"}}, {arrayFilters: [{i: "foo"}]}));
+ assert.eq(coll.findOne({_id: 0}), {_id: 0, a: ["bar", "bar"]});
+ }
+
+ //
+ // Examples.
+ //
+
+ // Update all documents in array.
+ coll.drop();
+ assert.writeOK(coll.insert({_id: 0, a: [{b: 0}, {b: 1}]}));
+ assert.writeOK(coll.update({_id: 0}, {$set: {"a.$[].b": 2}}));
+ assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [{b: 2}, {b: 2}]});
+
+ // Update all matching documents in array.
+ if (db.getMongo().writeMode() === "commands") {
+ coll.drop();
+ assert.writeOK(coll.insert({_id: 0, a: [{b: 0}, {b: 1}]}));
+ assert.writeOK(
+ coll.update({_id: 0}, {$set: {"a.$[i].b": 2}}, {arrayFilters: [{"i.b": 0}]}));
+ assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [{b: 2}, {b: 1}]});
+ }
+
+ // Update all matching scalars in array.
+ if (db.getMongo().writeMode() === "commands") {
+ coll.drop();
+ assert.writeOK(coll.insert({_id: 0, a: [0, 1]}));
+ assert.writeOK(coll.update({_id: 0}, {$set: {"a.$[i]": 2}}, {arrayFilters: [{i: 0}]}));
+ assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [2, 1]});
+ }
+
+ // Update all matching scalars in array of arrays.
+ if (db.getMongo().writeMode() === "commands") {
+ coll.drop();
+ assert.writeOK(coll.insert({_id: 0, a: [[0, 1], [0, 1]]}));
+ assert.writeOK(coll.update({_id: 0}, {$set: {"a.$[].$[j]": 2}}, {arrayFilters: [{j: 0}]}));
+ assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [[2, 1], [2, 1]]});
+ }
+
+ // Update all matching documents in nested array.
+ if (db.getMongo().writeMode() === "commands") {
+ coll.drop();
+ assert.writeOK(
+ coll.insert({_id: 0, a: [{b: 0, c: [{d: 0}, {d: 1}]}, {b: 1, c: [{d: 0}, {d: 1}]}]}));
+ assert.writeOK(coll.update(
+ {_id: 0}, {$set: {"a.$[i].c.$[j].d": 2}}, {arrayFilters: [{"i.b": 0}, {"j.d": 0}]}));
+ assert.eq(coll.findOne({_id: 0}),
+ {_id: 0, a: [{b: 0, c: [{d: 2}, {d: 1}]}, {b: 1, c: [{d: 0}, {d: 1}]}]});
+ }
+
+ // Update all scalars in array matching a logical predicate.
+ if (db.getMongo().writeMode() === "commands") {
+ coll.drop();
+ assert.writeOK(coll.insert({_id: 0, a: [0, 1, 3]}));
+ assert.writeOK(coll.update(
+ {_id: 0}, {$set: {"a.$[i]": 2}}, {arrayFilters: [{$or: [{i: 0}, {i: 3}]}]}));
+ assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [2, 1, 2]});
+ }
+
+ //
+ // Error cases.
+ //
+
+ // Provide an <id> with no array filter.
+ coll.drop();
+ res = coll.update({_id: 0}, {$set: {"a.$[i]": 0}});
+ assert.writeErrorWithCode(res, ErrorCodes.BadValue);
+ assert.neq(-1,
+ res.getWriteError().errmsg.indexOf(
+ "No array filter found for identifier 'i' in path 'a.$[i]'"),
+ "update failed for a reason other than missing array filter");
+
+ // Use an <id> at the same position as a $, integer, or field name.
+ if (db.getMongo().writeMode() === "commands") {
+ coll.drop();
+
+ res = coll.update({_id: 0}, {$set: {"a.$[i]": 0, "a.$": 0}}, {arrayFilters: [{i: 0}]});
+ assert.writeErrorWithCode(res, ErrorCodes.ConflictingUpdateOperators);
+ assert.neq(
+ -1,
+ res.getWriteError().errmsg.indexOf(
+ "Updating the path 'a.$' would create a conflict at 'a'"),
+ "update failed for a reason other than conflicting array update and positional operator");
+
+ res = coll.update({_id: 0}, {$set: {"a.$[i]": 0, "a.0": 0}}, {arrayFilters: [{i: 0}]});
+ assert.writeErrorWithCode(res, ErrorCodes.ConflictingUpdateOperators);
+ assert.neq(
+ -1,
+ res.getWriteError().errmsg.indexOf(
+ "Updating the path 'a.0' would create a conflict at 'a'"),
+ "update failed for a reason other than conflicting array update and integer field name");
+
+ res = coll.update({_id: 0}, {$set: {"a.$[i]": 0, "a.b": 0}}, {arrayFilters: [{i: 0}]});
+ assert.writeErrorWithCode(res, ErrorCodes.ConflictingUpdateOperators);
+ assert.neq(-1,
+ res.getWriteError().errmsg.indexOf(
+ "Updating the path 'a.b' would create a conflict at 'a'"),
+ "update failed for a reason other than conflicting array update and field name");
+ }
+
+ // Include an implicit array traversal in a path in an update modifier.
+ coll.drop();
+ assert.writeOK(coll.insert({_id: 0, a: [{b: 0}]}));
+ res = coll.update({_id: 0}, {$set: {"a.b": 1}});
+ assert.writeErrorWithCode(res, ErrorCodes.PathNotViable);
+ assert.neq(-1,
+ res.getWriteError().errmsg.indexOf(
+ "Cannot create field 'b' in element {a: [ { b: 0.0 } ]}"),
+ "update failed for a reason other than implicit array traversal");
+
+ // <id> contains special characters or does not begin with a lowercase letter.
+ if (db.getMongo().writeMode() === "commands") {
+ coll.drop();
+
+ res = coll.update({_id: 0}, {$set: {"a.$[$i]": 1}}, {arrayFilters: [{"$i": 0}]});
+ assert.writeErrorWithCode(res, ErrorCodes.BadValue);
+ assert.neq(-1,
+ res.getWriteError().errmsg.indexOf("unknown top level operator: $i"),
+ "update failed for a reason other than bad array filter identifier");
+
+ res = coll.update({_id: 0}, {$set: {"a.$[I]": 1}}, {arrayFilters: [{"I": 0}]});
+ assert.writeErrorWithCode(res, ErrorCodes.BadValue);
+ assert.neq(
+ -1,
+ res.getWriteError().errmsg.indexOf(
+ "The top-level field name in an array filter must be an alphanumeric string beginning with a lowercase letter, found 'I'"),
+ "update failed for a reason other than bad array filter identifier");
+
+ assert.writeOK(coll.insert({_id: 0, a: [0], b: [{j: 0}]}));
+ res = coll.update(
+ {_id: 0}, {$set: {"a.$[i.j]": 1, "b.$[i]": 1}}, {arrayFilters: [{"i.j": 0}]});
+ assert.writeErrorWithCode(res, ErrorCodes.PathNotViable);
+ assert.neq(
+ -1,
+ res.getWriteError().errmsg.indexOf("Cannot create field '$[i' in element {a: [ 0.0 ]}"),
+ "update failed for a reason other than bad array filter identifier");
+ }
+
+ //
+ // Nested array update conflict detection.
+ //
+
+ if (db.getMongo().writeMode() === "commands") {
+ // "a.$[i].b.$[k].c" and "a.$[j].b.$[k].d" are not a conflict, even if i and j are not
+ // disjoint.
+ coll.drop();
+ assert.writeOK(coll.insert({_id: 0, a: [{x: 0, b: [{y: 0, c: 0, d: 0}]}]}));
+ assert.writeOK(coll.update({_id: 0},
+ {$set: {"a.$[i].b.$[k].c": 1, "a.$[j].b.$[k].d": 1}},
+ {arrayFilters: [{"i.x": 0}, {"j.x": 0}, {"k.y": 0}]}));
+ assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [{x: 0, b: [{y: 0, c: 1, d: 1}]}]});
+
+ // "a.$[i].b.$[k].c" and "a.$[j].b.$[k].c" are a conflict iff i and j are not disjoint.
+ coll.drop();
+ assert.writeOK(
+ coll.insert({_id: 0, a: [{x: 0, b: [{y: 0, c: 0}]}, {x: 1, b: [{y: 0, c: 0}]}]}));
+
+ res = coll.update({_id: 0},
+ {$set: {"a.$[i].b.$[k].c": 1, "a.$[j].b.$[k].c": 2}},
+ {arrayFilters: [{"i.x": 0}, {"j.x": 0}, {"k.y": 0}]});
+ assert.writeErrorWithCode(res, ErrorCodes.ConflictingUpdateOperators);
+ assert.neq(
+ -1,
+ res.getWriteError().errmsg.indexOf("Update created a conflict at 'a.0.b.$[k].c'"),
+ "update failed for a reason other than conflicting array updates");
+
+ assert.writeOK(coll.update({_id: 0},
+ {$set: {"a.$[i].b.$[k].c": 1, "a.$[j].b.$[k].c": 2}},
+ {arrayFilters: [{"i.x": 0}, {"j.x": 1}, {"k.y": 0}]}));
+ assert.eq(coll.findOne({_id: 0}),
+ {_id: 0, a: [{x: 0, b: [{y: 0, c: 1}]}, {x: 1, b: [{y: 0, c: 2}]}]});
+
+ // "a.$[i].b.$[k].c" and "a.$[j].b.$[m].c" are a conflict iff k and m intersect for some
+ // element of a matching i and j.
+ coll.drop();
+ assert.writeOK(coll.insert(
+ {_id: 0, a: [{x: 0, b: [{y: 0, c: 0}]}, {x: 1, b: [{y: 0, c: 0}, {y: 1, c: 0}]}]}));
+
+ res = coll.update({_id: 0},
+ {$set: {"a.$[i].b.$[k].c": 1, "a.$[j].b.$[m].c": 2}},
+ {arrayFilters: [{"i.x": 0}, {"j.x": 0}, {"k.y": 0}, {"m.y": 0}]});
+ assert.writeErrorWithCode(res, ErrorCodes.ConflictingUpdateOperators);
+ assert.neq(-1,
+ res.getWriteError().errmsg.indexOf("Update created a conflict at 'a.0.b.0.c'"),
+ "update failed for a reason other than conflicting array updates");
+
+ assert.writeOK(coll.update({_id: 0}, {$set: {"a.$[i].b.$[k].c": 1, "a.$[j].b.$[m].c": 2}}, {
+ arrayFilters: [{"i.x": 1}, {"j.x": 1}, {"k.y": 0}, {"m.y": 1}]
+ }));
+ assert.eq(
+ coll.findOne({_id: 0}),
+ {_id: 0, a: [{x: 0, b: [{y: 0, c: 0}]}, {x: 1, b: [{y: 0, c: 1}, {y: 1, c: 2}]}]});
+ }
+
+})();
diff --git a/jstests/core/updateh.js b/jstests/core/updateh.js
index ea1c87582f9..412807f1041 100644
--- a/jstests/core/updateh.js
+++ b/jstests/core/updateh.js
@@ -75,7 +75,7 @@ assert.writeError(res);
// Test that '$id', '$db', and '$ref' are acceptable field names in
// the correct case ( subdoc)
// SERVER-3231
-res = t.update({n: 0}, {$set: {'x.$ref': '1', 'x.$id': 1, 'x.$db': '1'}});
+res = t.update({n: 0}, {$set: {x: {$ref: '1', $id: 1, $db: '1'}}});
assert.writeOK(res);
t.save({_id: 0, n: 0});
diff --git a/jstests/noPassthrough/arrayFilters_feature_compatibility_version.js b/jstests/noPassthrough/arrayFilters_feature_compatibility_version.js
new file mode 100644
index 00000000000..6877bc12c30
--- /dev/null
+++ b/jstests/noPassthrough/arrayFilters_feature_compatibility_version.js
@@ -0,0 +1,78 @@
+// Test that arrayFilters usage is restricted when the featureCompatibilityVersion is 3.4.
+
+(function() {
+ "use strict";
+
+ const conn = MongoRunner.runMongod({});
+ assert.neq(null, conn, "mongod was unable to start up");
+
+ let testDB = conn.getDB("arrayFilters_feature_compatibility_version");
+ assert.commandWorked(testDB.dropDatabase());
+ let coll = testDB.coll;
+
+ let adminDB = conn.getDB("admin");
+
+ let res;
+
+ //
+ // arrayFilters is not permitted when the featureCompatibilityVersion is 3.4.
+ //
+
+ assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: "3.4"}));
+ res = adminDB.runCommand({getParameter: 1, featureCompatibilityVersion: 1});
+ assert.commandWorked(res);
+ assert.eq("3.4", res.featureCompatibilityVersion);
+
+ // Update.
+ res = coll.update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0}]});
+ assert.writeError(res, ErrorCodes.InvalidOptions);
+ assert.neq(
+ -1,
+ res.getWriteError().errmsg.indexOf(
+ "The featureCompatibilityVersion must be 3.6 to use arrayFilters. See http://dochub.mongodb.org/core/3.6-feature-compatibility."),
+ "update failed for a reason other than featureCompatibilityVersion");
+
+ // FindAndModify.
+ assert.throws(function() {
+ coll.findAndModify(
+ {query: {_id: 0}, update: {$set: {"a.$[i]": 5}}, arrayFilters: [{i: 0}]});
+ });
+
+ // Update explain.
+ assert.throws(function() {
+ coll.explain().update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0}]});
+ });
+
+ // FindAndModify explain.
+ assert.throws(function() {
+ coll.explain().findAndModify(
+ {query: {_id: 0}, update: {$set: {"a.$[i]": 5}}, arrayFilters: [{i: 0}]});
+ });
+
+ //
+ // arrayFilters is permitted when the featureCompatibilityVersion is 3.6.
+ //
+
+ assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: "3.6"}));
+ res = adminDB.runCommand({getParameter: 1, featureCompatibilityVersion: 1});
+ assert.commandWorked(res);
+ assert.eq("3.6", res.featureCompatibilityVersion);
+
+ // Update.
+ assert.writeOK(coll.update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0}]}));
+
+ // FindAndModify.
+ assert.eq(null,
+ coll.findAndModify(
+ {query: {_id: 0}, update: {$set: {"a.$[i]": 5}}, arrayFilters: [{i: 0}]}));
+
+ // Update explain.
+ assert.commandWorked(
+ coll.explain().update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0}]}));
+
+ // FindAndModify explain.
+ assert.commandWorked(coll.explain().findAndModify(
+ {query: {_id: 0}, update: {$set: {"a.$[i]": 5}}, arrayFilters: [{i: 0}]}));
+
+ MongoRunner.stopMongod(conn);
+}());
diff --git a/jstests/replsets/oplog_replay_on_startup.js b/jstests/replsets/oplog_replay_on_startup.js
index 050cbe1cc5d..248d04aa1a3 100644
--- a/jstests/replsets/oplog_replay_on_startup.js
+++ b/jstests/replsets/oplog_replay_on_startup.js
@@ -72,10 +72,6 @@
var injectedMinValidDoc = {
_id: ObjectId(),
- // minvalid:
- ts: ts(minValid),
- t: term,
-
// appliedThrough
begin: {
ts: ts(begin),
@@ -83,6 +79,10 @@
},
oplogDeleteFromPoint: ts(deletePoint),
+
+ // minvalid:
+ t: term,
+ ts: ts(minValid),
};
// This weird mechanism is the only way to bypass mongod's attempt to fill in null
diff --git a/jstests/replsets/oplog_replay_on_startup_with_bad_op.js b/jstests/replsets/oplog_replay_on_startup_with_bad_op.js
index e238cc05955..9598f670acd 100644
--- a/jstests/replsets/oplog_replay_on_startup_with_bad_op.js
+++ b/jstests/replsets/oplog_replay_on_startup_with_bad_op.js
@@ -38,10 +38,6 @@
var injectedMinValidDoc = {
_id: ObjectId(),
- // minvalid:
- ts: newTs,
- t: term,
-
// appliedThrough
begin: {
ts: lastTs,
@@ -49,6 +45,10 @@
},
oplogDeleteFromPoint: Timestamp(),
+
+ // minvalid:
+ t: term,
+ ts: newTs,
};
// This weird mechanism is the only way to bypass mongod's attempt to fill in null
diff --git a/src/mongo/db/auth/authz_manager_external_state_mock.cpp b/src/mongo/db/auth/authz_manager_external_state_mock.cpp
index 933f0aff752..8bdd5e71191 100644
--- a/src/mongo/db/auth/authz_manager_external_state_mock.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_mock.cpp
@@ -179,7 +179,8 @@ Status AuthzManagerExternalStateMock::updateOne(OperationContext* opCtx,
namespace mmb = mutablebson;
UpdateDriver::Options updateOptions;
UpdateDriver driver(updateOptions);
- Status status = driver.parse(updatePattern);
+ std::map<StringData, std::unique_ptr<ArrayFilter>> arrayFilters;
+ Status status = driver.parse(updatePattern, arrayFilters);
if (!status.isOK())
return status;
diff --git a/src/mongo/db/auth/role_graph_update.cpp b/src/mongo/db/auth/role_graph_update.cpp
index d1b45e3b98d..d8148f56132 100644
--- a/src/mongo/db/auth/role_graph_update.cpp
+++ b/src/mongo/db/auth/role_graph_update.cpp
@@ -175,7 +175,10 @@ Status handleOplogUpdate(OperationContext* opCtx,
UpdateDriver::Options updateOptions;
UpdateDriver driver(updateOptions);
- status = driver.parse(updatePattern);
+
+ // Oplog updates do not have array filters.
+ std::map<StringData, std::unique_ptr<ArrayFilter>> arrayFilters;
+ status = driver.parse(updatePattern, arrayFilters);
if (!status.isOK())
return status;
diff --git a/src/mongo/db/commands/write_commands/write_commands.cpp b/src/mongo/db/commands/write_commands/write_commands.cpp
index 7ddf04a7aff..b9e92e13947 100644
--- a/src/mongo/db/commands/write_commands/write_commands.cpp
+++ b/src/mongo/db/commands/write_commands/write_commands.cpp
@@ -313,6 +313,7 @@ public:
updateRequest.setQuery(batch.updates[0].query);
updateRequest.setCollation(batch.updates[0].collation);
updateRequest.setUpdates(batch.updates[0].update);
+ updateRequest.setArrayFilters(batch.updates[0].arrayFilters);
updateRequest.setMulti(batch.updates[0].multi);
updateRequest.setUpsert(batch.updates[0].upsert);
updateRequest.setYieldPolicy(PlanExecutor::YIELD_AUTO);
diff --git a/src/mongo/db/exec/update.cpp b/src/mongo/db/exec/update.cpp
index 90cd46ab423..87145233e3d 100644
--- a/src/mongo/db/exec/update.cpp
+++ b/src/mongo/db/exec/update.cpp
@@ -89,7 +89,7 @@ StatusWith<std::uint32_t> storageValidChildren(const mb::ConstElement&,
StatusWith<std::uint32_t> storageValid(const mb::Document& doc,
bool deep,
std::uint32_t recursionLevel) {
- if (recursionLevel >= BSONDepth::getMaxDepthForUserStorage()) {
+ if (recursionLevel > BSONDepth::getMaxDepthForUserStorage()) {
return Status(ErrorCodes::Overflow,
str::stream() << "Document exceeds maximum nesting depth of "
<< BSONDepth::getMaxDepthForUserStorage());
@@ -203,7 +203,7 @@ Status validateDollarPrefixElement(const mb::ConstElement elem, const bool deep)
*/
StatusWith<std::uint32_t> storageValidParents(const mb::ConstElement& elem,
std::uint32_t recursionLevel) {
- if (recursionLevel >= BSONDepth::getMaxDepthForUserStorage()) {
+ if (recursionLevel > BSONDepth::getMaxDepthForUserStorage()) {
return Status(ErrorCodes::Overflow,
str::stream() << "Document exceeds maximum nesting depth of "
<< BSONDepth::getMaxDepthForUserStorage());
@@ -232,7 +232,7 @@ StatusWith<std::uint32_t> storageValid(const mb::ConstElement& elem,
if (!elem.ok())
return Status(ErrorCodes::BadValue, "Invalid elements cannot be stored.");
- if (recursionLevel >= BSONDepth::getMaxDepthForUserStorage()) {
+ if (recursionLevel > BSONDepth::getMaxDepthForUserStorage()) {
return Status(ErrorCodes::Overflow,
str::stream() << "Document exceeds maximum nesting depth of "
<< BSONDepth::getMaxDepthForUserStorage());
@@ -322,7 +322,7 @@ inline Status validate(const BSONObj& original,
if (opts.enforceOkForStorage) {
// No specific fields were updated so the whole doc must be checked
const bool doRecursiveCheck = true;
- const std::uint32_t recursionLevel = 1;
+ const std::uint32_t recursionLevel = 0;
auto documentDepth = storageValid(updated, doRecursiveCheck, recursionLevel);
if (!documentDepth.isOK()) {
return documentDepth.getStatus();
diff --git a/src/mongo/db/nesting_depth_test.cpp b/src/mongo/db/nesting_depth_test.cpp
index 139dddfa35e..fd14418024d 100644
--- a/src/mongo/db/nesting_depth_test.cpp
+++ b/src/mongo/db/nesting_depth_test.cpp
@@ -207,6 +207,42 @@ void appendUpdateCommandWithNestedDocuments(BSONObjBuilder* builder,
builder->doneFast();
}
+/**
+ * Appends a command to 'builder' that replaces a document with nesting depth 'originalNestingDepth'
+ * with a document 'newNestingDepth' levels deep. For example,
+ *
+ * BSONObjBuilder b;
+ * appendUpdateCommandWithNestedDocuments(&b, 1, 2);
+ *
+ * appends an update to 'b' that replaces { a: 1 } with { a: { a: 1 } }.
+ */
+void appendUpdateReplaceCommandWithNestedDocuments(BSONObjBuilder* builder,
+ size_t originalNestingDepth,
+ size_t newNestingDepth) {
+ ASSERT_GT(newNestingDepth, originalNestingDepth);
+
+ auto originalFieldName = getRepeatedFieldName(originalNestingDepth);
+ builder->append("update", kCollectionName);
+ {
+ BSONArrayBuilder updates(builder->subarrayStart("updates"));
+ {
+ BSONObjBuilder updateDoc(updates.subobjStart());
+ {
+ BSONObjBuilder query(updateDoc.subobjStart("q"));
+ query.append(originalFieldName, 1);
+ query.doneFast();
+ }
+ {
+ BSONObjBuilder update(updateDoc.subobjStart("u"));
+ appendNestedObject(&update, newNestingDepth);
+ update.doneFast();
+ }
+ updateDoc.doneFast();
+ }
+ }
+ builder->doneFast();
+}
+
TEST_F(NestingDepthFixture, CanUpdateDocumentIfItStaysWithinDepthLimit) {
BSONObjBuilder insertCmd;
appendInsertCommandWithNestedDocument(&insertCmd, 3);
@@ -241,6 +277,40 @@ TEST_F(NestingDepthFixture, CannotUpdateDocumentToExceedDepthLimit) {
assertWriteError(kCollectionName, updateCmd.obj(), ErrorCodes::Overflow);
}
+TEST_F(NestingDepthFixture, CanReplaceDocumentIfItStaysWithinDepthLimit) {
+ BSONObjBuilder insertCmd;
+ appendInsertCommandWithNestedDocument(&insertCmd, 3);
+ assertCommandOK(kCollectionName, insertCmd.obj());
+
+ BSONObjBuilder updateCmd;
+ appendUpdateReplaceCommandWithNestedDocuments(&updateCmd, 3, 5);
+ assertCommandOK(kCollectionName, updateCmd.obj());
+}
+
+TEST_F(NestingDepthFixture, CanReplaceDocumentToBeExactlyAtDepthLimit) {
+ const auto largeButValidDepth = BSONDepth::getMaxDepthForUserStorage() - 2;
+ BSONObjBuilder insertCmd;
+ appendInsertCommandWithNestedDocument(&insertCmd, largeButValidDepth);
+ assertCommandOK(kCollectionName, insertCmd.obj());
+
+ BSONObjBuilder updateCmd;
+ appendUpdateReplaceCommandWithNestedDocuments(
+ &updateCmd, largeButValidDepth, BSONDepth::getMaxDepthForUserStorage());
+ assertCommandOK(kCollectionName, updateCmd.obj());
+}
+
+TEST_F(NestingDepthFixture, CannotReplaceDocumentToExceedDepthLimit) {
+ const auto largeButValidDepth = BSONDepth::getMaxDepthForUserStorage() - 3;
+ BSONObjBuilder insertCmd;
+ appendInsertCommandWithNestedDocument(&insertCmd, largeButValidDepth);
+ assertCommandOK(kCollectionName, insertCmd.obj());
+
+ BSONObjBuilder updateCmd;
+ appendUpdateReplaceCommandWithNestedDocuments(
+ &updateCmd, largeButValidDepth, BSONDepth::getMaxDepthForUserStorage() + 1);
+ assertWriteError(kCollectionName, updateCmd.obj(), ErrorCodes::Overflow);
+}
+
/**
* Creates a field name string that represents an array nested 'depth' levels deep.
*/
@@ -295,6 +365,45 @@ void appendUpdateCommandWithNestedArrays(BSONObjBuilder* builder,
builder->doneFast();
}
+/**
+ * Appends a command to 'builder' that replaces a document with an array nested
+ * 'originalNestingDepth' levels deep with a document with an array nested 'newNestingDepth' levels
+ * deep. For example,
+ *
+ * BSONObjBuilder b;
+ * appendUpdateCommandWithNestedDocuments(&b, 3, 4);
+ *
+ * appends an update to 'b' that replaces { a: [[1]] } with { a: [[[1]]] }.
+ */
+void appendUpdateReplaceCommandWithNestedArrays(BSONObjBuilder* builder,
+ size_t originalNestingDepth,
+ size_t newNestingDepth) {
+ ASSERT_GT(newNestingDepth, originalNestingDepth);
+
+ auto originalFieldName = getRepeatedArrayPath(originalNestingDepth);
+ builder->append("update", kCollectionName);
+ {
+ BSONArrayBuilder updates(builder->subarrayStart("updates"));
+ {
+ BSONObjBuilder updateDoc(updates.subobjStart());
+ {
+ BSONObjBuilder query(updateDoc.subobjStart("q"));
+ query.append(originalFieldName, 1);
+ query.doneFast();
+ }
+ {
+ BSONObjBuilder update(updateDoc.subobjStart("u"));
+ BSONArrayBuilder field(update.subobjStart(originalFieldName));
+ appendNestedArray(&field, newNestingDepth - 1);
+ field.doneFast();
+ update.doneFast();
+ }
+ updateDoc.doneFast();
+ }
+ }
+ builder->doneFast();
+}
+
TEST_F(NestingDepthFixture, CanUpdateArrayIfItStaysWithinDepthLimit) {
BSONObjBuilder insertCmd;
appendInsertCommandWithNestedArray(&insertCmd, 3);
@@ -328,6 +437,40 @@ TEST_F(NestingDepthFixture, CannotUpdateArrayToExceedDepthLimit) {
&updateCmd, largeButValidDepth, BSONDepth::getMaxDepthForUserStorage() + 1);
assertWriteError(kCollectionName, updateCmd.obj(), ErrorCodes::Overflow);
}
+
+TEST_F(NestingDepthFixture, CanReplaceArrayIfItStaysWithinDepthLimit) {
+ BSONObjBuilder insertCmd;
+ appendInsertCommandWithNestedArray(&insertCmd, 3);
+ assertCommandOK(kCollectionName, insertCmd.obj());
+
+ BSONObjBuilder updateCmd;
+ appendUpdateReplaceCommandWithNestedArrays(&updateCmd, 3, 5);
+ assertCommandOK(kCollectionName, updateCmd.obj());
+}
+
+TEST_F(NestingDepthFixture, CanReplaceArrayToBeExactlyAtDepthLimit) {
+ const auto largeButValidDepth = BSONDepth::getMaxDepthForUserStorage() - 1;
+ BSONObjBuilder insertCmd;
+ appendInsertCommandWithNestedArray(&insertCmd, largeButValidDepth);
+ assertCommandOK(kCollectionName, insertCmd.obj());
+
+ BSONObjBuilder updateCmd;
+ appendUpdateReplaceCommandWithNestedArrays(
+ &updateCmd, largeButValidDepth, BSONDepth::getMaxDepthForUserStorage());
+ assertCommandOK(kCollectionName, updateCmd.obj());
+}
+
+TEST_F(NestingDepthFixture, CannotReplaceArrayToExceedDepthLimit) {
+ const auto largeButValidDepth = BSONDepth::getMaxDepthForUserStorage() - 4;
+ BSONObjBuilder insertCmd;
+ appendInsertCommandWithNestedArray(&insertCmd, largeButValidDepth);
+ assertCommandOK(kCollectionName, insertCmd.obj());
+
+ BSONObjBuilder updateCmd;
+ appendUpdateReplaceCommandWithNestedArrays(
+ &updateCmd, largeButValidDepth, BSONDepth::getMaxDepthForUserStorage() + 1);
+ assertWriteError(kCollectionName, updateCmd.obj(), ErrorCodes::Overflow);
+}
} // namespace
} // namespace executor
} // namespace mongo
diff --git a/src/mongo/db/ops/parsed_update.cpp b/src/mongo/db/ops/parsed_update.cpp
index 2d3de40342a..b5979baed00 100644
--- a/src/mongo/db/ops/parsed_update.cpp
+++ b/src/mongo/db/ops/parsed_update.cpp
@@ -35,6 +35,7 @@
#include "mongo/db/query/canonical_query.h"
#include "mongo/db/query/collation/collator_factory_interface.h"
#include "mongo/db/query/query_planner_common.h"
+#include "mongo/db/server_options.h"
namespace mongo {
@@ -132,12 +133,20 @@ Status ParsedUpdate::parseUpdate() {
_driver.setModOptions(ModifierInterface::Options(
!_opCtx->writesAreReplicated(), shouldValidate, _collator.get()));
- return _driver.parse(_request->getUpdates(), _request->isMulti());
+ return _driver.parse(_request->getUpdates(), _arrayFilters, _request->isMulti());
}
Status ParsedUpdate::parseArrayFilters() {
const ExtensionsCallbackReal extensionsCallback(_opCtx, &_request->getNamespaceString());
+ if (!_request->getArrayFilters().empty() &&
+ serverGlobalParams.featureCompatibility.version.load() ==
+ ServerGlobalParams::FeatureCompatibility::Version::k34) {
+ return Status(ErrorCodes::InvalidOptions,
+ "The featureCompatibilityVersion must be 3.6 to use arrayFilters. See "
+ "http://dochub.mongodb.org/core/3.6-feature-compatibility.");
+ }
+
for (auto rawArrayFilter : _request->getArrayFilters()) {
auto arrayFilterStatus =
ArrayFilter::parse(rawArrayFilter, extensionsCallback, _collator.get());
@@ -195,6 +204,10 @@ void ParsedUpdate::setCollator(std::unique_ptr<CollatorInterface> collator) {
_collator = std::move(collator);
_driver.setCollator(_collator.get());
+
+ for (auto&& arrayFilter : _arrayFilters) {
+ arrayFilter.second->getFilter()->setCollator(_collator.get());
+ }
}
} // namespace mongo
diff --git a/src/mongo/db/ops/update.cpp b/src/mongo/db/ops/update.cpp
index 0e5a11e2a0f..2aa57637773 100644
--- a/src/mongo/db/ops/update.cpp
+++ b/src/mongo/db/ops/update.cpp
@@ -129,7 +129,8 @@ UpdateResult update(OperationContext* opCtx, Database* db, const UpdateRequest&
BSONObj applyUpdateOperators(const BSONObj& from, const BSONObj& operators) {
UpdateDriver::Options opts;
UpdateDriver driver(opts);
- Status status = driver.parse(operators);
+ std::map<StringData, std::unique_ptr<ArrayFilter>> arrayFilters;
+ Status status = driver.parse(operators, arrayFilters);
if (!status.isOK()) {
uasserted(16838, status.reason());
}
diff --git a/src/mongo/db/ops/update.h b/src/mongo/db/ops/update.h
index 2c5e0fc0f97..5b7f0a4e324 100644
--- a/src/mongo/db/ops/update.h
+++ b/src/mongo/db/ops/update.h
@@ -50,8 +50,8 @@ class UpdateDriver;
UpdateResult update(OperationContext* opCtx, Database* db, const UpdateRequest& request);
/**
- * takes the from document and returns a new document
- * after apply all the operators
+ * Takes the 'from' document and returns a new document after applying 'operators'. arrayFilters are
+ * not supported.
* e.g.
* applyUpdateOperators( BSON( "x" << 1 ) , BSON( "$inc" << BSON( "x" << 1 ) ) );
* returns: { x : 2 }
diff --git a/src/mongo/db/repl/storage_interface_impl_test.cpp b/src/mongo/db/repl/storage_interface_impl_test.cpp
index 57eb9d4d262..9bae858342f 100644
--- a/src/mongo/db/repl/storage_interface_impl_test.cpp
+++ b/src/mongo/db/repl/storage_interface_impl_test.cpp
@@ -1890,10 +1890,13 @@ TEST_F(StorageInterfaceImplTest,
auto nss = makeNamespace(_agent);
ASSERT_OK(storage.createCollection(opCtx, nss, CollectionOptions()));
- auto status = storage.upsertById(
- opCtx, nss, BSON("" << 1).firstElement(), BSON("$unknownUpdateOp" << BSON("x" << 1000)));
- ASSERT_EQUALS(ErrorCodes::FailedToParse, status);
- ASSERT_STRING_CONTAINS(status.reason(), "Unknown modifier: $unknownUpdateOp");
+ ASSERT_THROWS_CODE_AND_WHAT(storage.upsertById(opCtx,
+ nss,
+ BSON("" << 1).firstElement(),
+ BSON("$unknownUpdateOp" << BSON("x" << 1000))),
+ UserException,
+ ErrorCodes::FailedToParse,
+ "Unknown modifier: $unknownUpdateOp");
}
TEST_F(StorageInterfaceImplTest, DeleteByFilterReturnsNamespaceNotFoundWhenDatabaseDoesNotExist) {
diff --git a/src/mongo/db/update/arithmetic_node.cpp b/src/mongo/db/update/arithmetic_node.cpp
index e040ed700d8..69c25a87da8 100644
--- a/src/mongo/db/update/arithmetic_node.cpp
+++ b/src/mongo/db/update/arithmetic_node.cpp
@@ -103,7 +103,9 @@ void ArithmeticNode::updateExistingElement(mutablebson::Element* element, bool*
if (element->getValue().ok() && valueToSet.isIdentical(originalValue)) {
*noop = true;
} else {
- invariantOK(element->setValueSafeNum(valueToSet));
+
+ // This can fail if 'valueToSet' is not representable as a 64-bit integer.
+ uassertStatusOK(element->setValueSafeNum(valueToSet));
}
}
@@ -119,7 +121,9 @@ void ArithmeticNode::setValueForNewElement(mutablebson::Element* element) const
valueToSet *= SafeNum(static_cast<int32_t>(0));
break;
}
- invariantOK(element->setValueSafeNum(valueToSet));
+
+ // This can fail if 'valueToSet' is not representable as a 64-bit integer.
+ uassertStatusOK(element->setValueSafeNum(valueToSet));
}
} // namespace mongo
diff --git a/src/mongo/db/update/update_driver.cpp b/src/mongo/db/update/update_driver.cpp
index d13118e4ed4..8cd0a05511b 100644
--- a/src/mongo/db/update/update_driver.cpp
+++ b/src/mongo/db/update/update_driver.cpp
@@ -37,6 +37,7 @@
#include "mongo/db/matcher/expression_leaf.h"
#include "mongo/db/matcher/extensions_callback_noop.h"
#include "mongo/db/ops/modifier_object_replace.h"
+#include "mongo/db/server_options.h"
#include "mongo/db/update/log_builder.h"
#include "mongo/db/update/modifier_table.h"
#include "mongo/db/update/path_support.h"
@@ -53,6 +54,82 @@ using std::vector;
using pathsupport::EqualityMatches;
+namespace {
+
+modifiertable::ModifierType validateMod(BSONElement mod) {
+ auto modType = modifiertable::getType(mod.fieldName());
+
+ uassert(ErrorCodes::FailedToParse,
+ str::stream() << "Unknown modifier: " << mod.fieldName(),
+ modType != modifiertable::MOD_UNKNOWN);
+
+ uassert(ErrorCodes::FailedToParse,
+ str::stream() << "Modifiers operate on fields but we found type "
+ << typeName(mod.type())
+ << " instead. For example: {$mod: {<field>: ...}}"
+ << " not {"
+ << mod
+ << "}",
+ mod.type() == BSONType::Object);
+
+ uassert(ErrorCodes::FailedToParse,
+ str::stream() << "'" << mod.fieldName()
+ << "' is empty. You must specify a field like so: "
+ "{"
+ << mod.fieldName()
+ << ": {<field>: ...}}",
+ !mod.embeddedObject().isEmpty());
+
+ return modType;
+}
+
+// Parses 'updateExpr' and merges it into 'root'. Returns whether 'updateExpr' is positional.
+StatusWith<bool> parseUpdateExpression(
+ BSONObj updateExpr,
+ UpdateObjectNode* root,
+ const CollatorInterface* collator,
+ const std::map<StringData, std::unique_ptr<ArrayFilter>>& arrayFilters) {
+ bool positional = false;
+ std::set<std::string> foundIdentifiers;
+ for (auto&& mod : updateExpr) {
+ auto modType = validateMod(mod);
+ for (auto&& field : mod.Obj()) {
+ auto statusWithPositional = UpdateObjectNode::parseAndMerge(
+ root, modType, field, collator, arrayFilters, foundIdentifiers);
+ if (!statusWithPositional.isOK()) {
+
+ // Check whether we failed to parse because this mod type is not yet supported by
+ // UpdateNode.
+ // TODO SERVER-28777: Skip this check.
+ auto leaf = modifiertable::makeUpdateLeafNode(modType);
+ if (!leaf) {
+ uassert(ErrorCodes::InvalidOptions,
+ str::stream() << "Cannot use array filters with modifier "
+ << mod.fieldName(),
+ arrayFilters.empty());
+ return statusWithPositional;
+ }
+
+ uassertStatusOK(statusWithPositional);
+ MONGO_UNREACHABLE;
+ }
+ positional = positional || statusWithPositional.getValue();
+ }
+ }
+
+ for (const auto& arrayFilter : arrayFilters) {
+ uassert(ErrorCodes::FailedToParse,
+ str::stream() << "The array filter for identifier '" << arrayFilter.first
+ << "' was not used in the update "
+ << updateExpr,
+ foundIdentifiers.find(arrayFilter.first.toString()) != foundIdentifiers.end());
+ }
+
+ return positional;
+}
+
+} // namespace
+
UpdateDriver::UpdateDriver(const Options& opts)
: _replacementMode(false),
_indexedFields(NULL),
@@ -65,7 +142,9 @@ UpdateDriver::~UpdateDriver() {
clear();
}
-Status UpdateDriver::parse(const BSONObj& updateExpr, const bool multi) {
+Status UpdateDriver::parse(const BSONObj& updateExpr,
+ const std::map<StringData, std::unique_ptr<ArrayFilter>>& arrayFilters,
+ const bool multi) {
clear();
// Check if the update expression is a full object replacement.
@@ -93,55 +172,41 @@ Status UpdateDriver::parse(const BSONObj& updateExpr, const bool multi) {
return Status::OK();
}
- // The update expression is made of mod operators, that is
- // { <$mod>: {...}, <$mod>: {...}, ... }
- BSONObjIterator outerIter(updateExpr);
- while (outerIter.more()) {
- BSONElement outerModElem = outerIter.next();
-
- // Check whether this is a valid mod type.
- modifiertable::ModifierType modType = modifiertable::getType(outerModElem.fieldName());
- if (modType == modifiertable::MOD_UNKNOWN) {
- return Status(ErrorCodes::FailedToParse,
- str::stream() << "Unknown modifier: " << outerModElem.fieldName());
- }
-
- // Check whether there is indeed a list of mods under this modifier.
- if (outerModElem.type() != Object) {
- return Status(ErrorCodes::FailedToParse,
- str::stream() << "Modifiers operate on fields but we found type "
- << typeName(outerModElem.type())
- << " instead. For example: {$mod: {<field>: ...}}"
- << " not {"
- << outerModElem.toString()
- << "}");
- }
+ // Register the fact that this driver is not doing a full object replacement.
+ _replacementMode = false;
- // Check whether there are indeed mods under this modifier.
- if (outerModElem.embeddedObject().isEmpty()) {
- return Status(ErrorCodes::FailedToParse,
- str::stream() << "'" << outerModElem.fieldName()
- << "' is empty. You must specify a field like so: "
- "{"
- << outerModElem.fieldName()
- << ": {<field>: ...}}");
+ // If the featureCompatibilityVersion is 3.6, parse using UpdateNode.
+ // TODO SERVER-28777: Remove the restriction that this is only done if the update is not from
+ // replication.
+ if (serverGlobalParams.featureCompatibility.version.load() ==
+ ServerGlobalParams::FeatureCompatibility::Version::k36 &&
+ !_modOptions.fromReplication) {
+ _root = stdx::make_unique<UpdateObjectNode>();
+ auto statusWithPositional =
+ parseUpdateExpression(updateExpr, _root.get(), _modOptions.collator, arrayFilters);
+ if (statusWithPositional.isOK()) {
+ _positional = statusWithPositional.getValue();
+ return Status::OK();
}
+ _root.reset();
+ }
- BSONObjIterator innerIter(outerModElem.embeddedObject());
- while (innerIter.more()) {
- BSONElement innerModElem = innerIter.next();
-
- Status status = addAndParse(modType, innerModElem);
+ // TODO SERVER-28777: This can be an else case, since we will not fall back to the old parsing
+ // if we fail to parse using the new implementation.
+ uassert(ErrorCodes::InvalidOptions,
+ "The featureCompatibilityVersion must be 3.6 to use arrayFilters. See "
+ "http://dochub.mongodb.org/core/3.6-feature-compatibility.",
+ arrayFilters.empty());
+ for (auto&& mod : updateExpr) {
+ auto modType = validateMod(mod);
+ for (auto&& field : mod.Obj()) {
+ auto status = addAndParse(modType, field);
if (!status.isOK()) {
return status;
}
}
}
- // Register the fact that there will be only $mod's in this driver -- no object
- // replacement.
- _replacementMode = false;
-
return Status::OK();
}
@@ -248,74 +313,103 @@ Status UpdateDriver::update(StringData matchedField,
_logDoc.reset();
LogBuilder logBuilder(_logDoc.root());
- // Ask each of the mods to type check whether they can operate over the current document
- // and, if so, to change that document accordingly.
- for (vector<ModifierInterface*>::iterator it = _mods.begin(); it != _mods.end(); ++it) {
- ModifierInterface::ExecInfo execInfo;
- Status status = (*it)->prepare(doc->root(), matchedField, &execInfo);
- if (!status.isOK()) {
- return status;
+ if (_root) {
+
+ // We parsed using the new UpdateNode implementation.
+ FieldRef pathToCreate;
+ FieldRef pathTaken;
+ bool indexesAffected = false;
+ bool noop = false;
+ _root->apply(doc->root(),
+ &pathToCreate,
+ &pathTaken,
+ matchedField,
+ _modOptions.fromReplication,
+ _indexedFields,
+ (_logOp && logOpRec) ? &logBuilder : nullptr,
+ &indexesAffected,
+ &noop);
+ if (indexesAffected) {
+ _affectIndices = true;
+ doc->disableInPlaceUpdates();
}
-
- // If a mod wants to be applied only if this is an upsert (or only if this is a
- // strict update), we should respect that. If a mod doesn't care, it would state
- // it is fine with ANY update context.
- const bool validContext = (execInfo.context == ModifierInterface::ExecInfo::ANY_CONTEXT ||
- execInfo.context == _context);
-
- // Nothing to do if not in a valid context.
- if (!validContext) {
- continue;
+ if (docWasModified) {
+ *docWasModified = !noop;
}
+ } else {
- // Gather which fields this mod is interested on and whether these fields were
- // "taken" by previous mods. Note that not all mods are multi-field mods. When we
- // see an empty field, we may stop looking for others.
- for (int i = 0; i < ModifierInterface::ExecInfo::MAX_NUM_FIELDS; i++) {
- if (execInfo.fieldRef[i] == 0) {
- break;
+ // We parsed using the old ModifierInterface implementation.
+ // Ask each of the mods to type check whether they can operate over the current document
+ // and, if so, to change that document accordingly.
+ for (vector<ModifierInterface*>::iterator it = _mods.begin(); it != _mods.end(); ++it) {
+ ModifierInterface::ExecInfo execInfo;
+ Status status = (*it)->prepare(doc->root(), matchedField, &execInfo);
+ if (!status.isOK()) {
+ return status;
}
- // Record each field being updated but check for conflicts first
- const FieldRef* other;
- if (!targetFields->insert(execInfo.fieldRef[i], &other)) {
- return Status(ErrorCodes::ConflictingUpdateOperators,
- str::stream() << "Cannot update '" << other->dottedField()
- << "' and '"
- << execInfo.fieldRef[i]->dottedField()
- << "' at the same time");
+ // If a mod wants to be applied only if this is an upsert (or only if this is a
+ // strict update), we should respect that. If a mod doesn't care, it would state
+ // it is fine with ANY update context.
+ const bool validContext =
+ (execInfo.context == ModifierInterface::ExecInfo::ANY_CONTEXT ||
+ execInfo.context == _context);
+
+ // Nothing to do if not in a valid context.
+ if (!validContext) {
+ continue;
}
- // We start with the expectation that a mod will be in-place. But if the mod
- // touched an indexed field and the mod will indeed be executed -- that is, it
- // is not a no-op and it is in a valid context -- then we switch back to a
- // non-in-place mode.
- //
- // TODO: make mightBeIndexed and fieldRef like each other.
- if (!_affectIndices && !execInfo.noOp && _indexedFields &&
- _indexedFields->mightBeIndexed(execInfo.fieldRef[i]->dottedField())) {
- _affectIndices = true;
- doc->disableInPlaceUpdates();
+
+ // Gather which fields this mod is interested on and whether these fields were
+ // "taken" by previous mods. Note that not all mods are multi-field mods. When we
+ // see an empty field, we may stop looking for others.
+ for (int i = 0; i < ModifierInterface::ExecInfo::MAX_NUM_FIELDS; i++) {
+ if (execInfo.fieldRef[i] == 0) {
+ break;
+ }
+
+ // Record each field being updated but check for conflicts first
+ const FieldRef* other;
+ if (!targetFields->insert(execInfo.fieldRef[i], &other)) {
+ return Status(ErrorCodes::ConflictingUpdateOperators,
+ str::stream() << "Cannot update '" << other->dottedField()
+ << "' and '"
+ << execInfo.fieldRef[i]->dottedField()
+ << "' at the same time");
+ }
+
+ // We start with the expectation that a mod will be in-place. But if the mod
+ // touched an indexed field and the mod will indeed be executed -- that is, it
+ // is not a no-op and it is in a valid context -- then we switch back to a
+ // non-in-place mode.
+ //
+ // TODO: make mightBeIndexed and fieldRef like each other.
+ if (!_affectIndices && !execInfo.noOp && _indexedFields &&
+ _indexedFields->mightBeIndexed(execInfo.fieldRef[i]->dottedField())) {
+ _affectIndices = true;
+ doc->disableInPlaceUpdates();
+ }
}
- }
- if (!execInfo.noOp) {
- status = (*it)->apply();
+ if (!execInfo.noOp) {
+ status = (*it)->apply();
- if (docWasModified)
- *docWasModified = true;
+ if (docWasModified)
+ *docWasModified = true;
- if (!status.isOK()) {
- return status;
+ if (!status.isOK()) {
+ return status;
+ }
}
- }
- // If we require a replication oplog entry for this update, go ahead and generate one.
- if (!execInfo.noOp && _logOp && logOpRec) {
- status = (*it)->log(&logBuilder);
- if (!status.isOK()) {
- return status;
+ // If we require a replication oplog entry for this update, go ahead and generate one.
+ if (!execInfo.noOp && _logOp && logOpRec) {
+ status = (*it)->log(&logBuilder);
+ if (!status.isOK()) {
+ return status;
+ }
}
}
}
diff --git a/src/mongo/db/update/update_driver.h b/src/mongo/db/update/update_driver.h
index 6152e22cefb..65785542414 100644
--- a/src/mongo/db/update/update_driver.h
+++ b/src/mongo/db/update/update_driver.h
@@ -39,6 +39,7 @@
#include "mongo/db/ops/modifier_interface.h"
#include "mongo/db/query/canonical_query.h"
#include "mongo/db/update/modifier_table.h"
+#include "mongo/db/update/update_object_node.h"
#include "mongo/db/update_index_data.h"
namespace mongo {
@@ -54,10 +55,18 @@ public:
~UpdateDriver();
/**
- * Returns OK and fills in '_mods' if 'updateExpr' is correct. Otherwise returns an
- * error status with a corresponding description.
+ * Parses the update expression 'updateExpr'. If the featurCompatibilityVersion is 3.6,
+ * 'updateExpr' is parsed into '_root'. Otherwise, 'updateExpr' is parsed into '_mods'. This is
+ * done because applying updates via UpdateNode creates new fields in lexicographic order,
+ * whereas applying updates via ModifierInterface creates new fields in the order they are
+ * specified in 'updateExpr', so it is necessary that the whole replica set have version 3.6 in
+ * order to use UpdateNode. Uasserts if the featureCompatibilityVersion is 3.4 and
+ * 'arrayFilters' is non-empty. Uasserts or returns a non-ok status if 'updateExpr' fails to
+ * parse.
*/
- Status parse(const BSONObj& updateExpr, const bool multi = false);
+ Status parse(const BSONObj& updateExpr,
+ const std::map<StringData, std::unique_ptr<ArrayFilter>>& arrayFilters,
+ const bool multi = false);
/**
* Fills in document with any fields in the query which are valid.
@@ -157,7 +166,12 @@ private:
// Is there a list of $mod's on '_mods' or is it just full object replacement?
bool _replacementMode;
- // Collection of update mod instances. Owned here.
+ // The root of the UpdateNode tree. If the featureCompatibilityVersion is 3.6, the update
+ // expression is parsed into '_root'.
+ std::unique_ptr<UpdateObjectNode> _root;
+
+ // Collection of update mod instances. Owned here. If the featureCompatibilityVersion is 3.4,
+ // the update expression is parsed into '_mods'.
std::vector<ModifierInterface*> _mods;
// What are the list of fields in the collection over which the update is going to be
diff --git a/src/mongo/db/update/update_driver_test.cpp b/src/mongo/db/update/update_driver_test.cpp
index e9b92433142..fb7698dc5a3 100644
--- a/src/mongo/db/update/update_driver_test.cpp
+++ b/src/mongo/db/update/update_driver_test.cpp
@@ -44,30 +44,17 @@
#include "mongo/db/update_index_data.h"
#include "mongo/unittest/unittest.h"
+namespace mongo {
namespace {
-using mongo::BSONElement;
-using mongo::BSONElementComparator;
-using mongo::BSONObj;
-using mongo::BSONObjIterator;
-using mongo::CollatorInterfaceMock;
-using mongo::FieldRef;
-using mongo::OperationContext;
-using mongo::OwnedPointerVector;
-using mongo::QueryTestServiceContext;
-using mongo::ServiceContext;
-using mongo::SimpleStringDataComparator;
-using mongo::StringData;
-using mongo::UpdateDriver;
-using mongo::UpdateIndexData;
-using mongo::fromjson;
using mongo::mutablebson::Document;
using mongoutils::str::stream;
TEST(Parse, Normal) {
UpdateDriver::Options opts;
UpdateDriver driver(opts);
- ASSERT_OK(driver.parse(fromjson("{$set:{a:1}}")));
+ std::map<StringData, std::unique_ptr<ArrayFilter>> arrayFilters;
+ ASSERT_OK(driver.parse(fromjson("{$set:{a:1}}"), arrayFilters));
ASSERT_EQUALS(driver.numMods(), 1U);
ASSERT_FALSE(driver.isDocReplacement());
}
@@ -75,7 +62,8 @@ TEST(Parse, Normal) {
TEST(Parse, MultiMods) {
UpdateDriver::Options opts;
UpdateDriver driver(opts);
- ASSERT_OK(driver.parse(fromjson("{$set:{a:1, b:1}}")));
+ std::map<StringData, std::unique_ptr<ArrayFilter>> arrayFilters;
+ ASSERT_OK(driver.parse(fromjson("{$set:{a:1, b:1}}"), arrayFilters));
ASSERT_EQUALS(driver.numMods(), 2U);
ASSERT_FALSE(driver.isDocReplacement());
}
@@ -83,7 +71,8 @@ TEST(Parse, MultiMods) {
TEST(Parse, MixingMods) {
UpdateDriver::Options opts;
UpdateDriver driver(opts);
- ASSERT_OK(driver.parse(fromjson("{$set:{a:1}, $unset:{b:1}}")));
+ std::map<StringData, std::unique_ptr<ArrayFilter>> arrayFilters;
+ ASSERT_OK(driver.parse(fromjson("{$set:{a:1}, $unset:{b:1}}"), arrayFilters));
ASSERT_EQUALS(driver.numMods(), 2U);
ASSERT_FALSE(driver.isDocReplacement());
}
@@ -91,38 +80,59 @@ TEST(Parse, MixingMods) {
TEST(Parse, ObjectReplacment) {
UpdateDriver::Options opts;
UpdateDriver driver(opts);
- ASSERT_OK(driver.parse(fromjson("{obj: \"obj replacement\"}")));
+ std::map<StringData, std::unique_ptr<ArrayFilter>> arrayFilters;
+ ASSERT_OK(driver.parse(fromjson("{obj: \"obj replacement\"}"), arrayFilters));
ASSERT_TRUE(driver.isDocReplacement());
}
TEST(Parse, EmptyMod) {
UpdateDriver::Options opts;
UpdateDriver driver(opts);
- ASSERT_NOT_OK(driver.parse(fromjson("{$set:{}}")));
+ std::map<StringData, std::unique_ptr<ArrayFilter>> arrayFilters;
+ ASSERT_THROWS_CODE_AND_WHAT(
+ driver.parse(fromjson("{$set:{}}"), arrayFilters),
+ UserException,
+ ErrorCodes::FailedToParse,
+ "'$set' is empty. You must specify a field like so: {$set: {<field>: ...}}");
}
TEST(Parse, WrongMod) {
UpdateDriver::Options opts;
UpdateDriver driver(opts);
- ASSERT_NOT_OK(driver.parse(fromjson("{$xyz:{a:1}}")));
+ std::map<StringData, std::unique_ptr<ArrayFilter>> arrayFilters;
+ ASSERT_THROWS_CODE_AND_WHAT(driver.parse(fromjson("{$xyz:{a:1}}"), arrayFilters),
+ UserException,
+ ErrorCodes::FailedToParse,
+ "Unknown modifier: $xyz");
}
TEST(Parse, WrongType) {
UpdateDriver::Options opts;
UpdateDriver driver(opts);
- ASSERT_NOT_OK(driver.parse(fromjson("{$set:[{a:1}]}")));
+ std::map<StringData, std::unique_ptr<ArrayFilter>> arrayFilters;
+ ASSERT_THROWS_CODE_AND_WHAT(driver.parse(fromjson("{$set:[{a:1}]}"), arrayFilters),
+ UserException,
+ ErrorCodes::FailedToParse,
+ "Modifiers operate on fields but we found type array instead. For "
+ "example: {$mod: {<field>: ...}} not {$set: [ { a: 1 } ]}");
}
TEST(Parse, ModsWithLaterObjReplacement) {
UpdateDriver::Options opts;
UpdateDriver driver(opts);
- ASSERT_NOT_OK(driver.parse(fromjson("{$set:{a:1}, obj: \"obj replacement\"}")));
+ std::map<StringData, std::unique_ptr<ArrayFilter>> arrayFilters;
+ ASSERT_THROWS_CODE_AND_WHAT(
+ driver.parse(fromjson("{$set:{a:1}, obj: \"obj replacement\"}"), arrayFilters),
+ UserException,
+ ErrorCodes::FailedToParse,
+ "Unknown modifier: obj");
}
TEST(Parse, PushAll) {
UpdateDriver::Options opts;
UpdateDriver driver(opts);
- ASSERT_OK(driver.parse(fromjson("{$pushAll:{a:[1,2,3]}}")));
+ std::map<StringData, std::unique_ptr<ArrayFilter>> arrayFilters;
+ ASSERT_OK(driver.parse(fromjson("{$pushAll:{a:[1,2,3]}}"), arrayFilters));
ASSERT_EQUALS(driver.numMods(), 1U);
ASSERT_FALSE(driver.isDocReplacement());
}
@@ -130,7 +140,8 @@ TEST(Parse, PushAll) {
TEST(Parse, SetOnInsert) {
UpdateDriver::Options opts;
UpdateDriver driver(opts);
- ASSERT_OK(driver.parse(fromjson("{$setOnInsert:{a:1}}")));
+ std::map<StringData, std::unique_ptr<ArrayFilter>> arrayFilters;
+ ASSERT_OK(driver.parse(fromjson("{$setOnInsert:{a:1}}"), arrayFilters));
ASSERT_EQUALS(driver.numMods(), 1U);
ASSERT_FALSE(driver.isDocReplacement());
}
@@ -140,8 +151,9 @@ TEST(Collator, SetCollationUpdatesModifierInterfaces) {
BSONObj updateDocument = fromjson("{$max: {a: 'abd'}}");
UpdateDriver::Options opts;
UpdateDriver driver(opts);
+ std::map<StringData, std::unique_ptr<ArrayFilter>> arrayFilters;
- ASSERT_OK(driver.parse(updateDocument));
+ ASSERT_OK(driver.parse(updateDocument, arrayFilters));
ASSERT_EQUALS(driver.numMods(), 1U);
bool modified = false;
@@ -164,8 +176,8 @@ public:
CreateFromQueryFixture()
: _driverOps(new UpdateDriver(UpdateDriver::Options())),
_driverRepl(new UpdateDriver(UpdateDriver::Options())) {
- _driverOps->parse(fromjson("{$set:{'_':1}}")).transitional_ignore();
- _driverRepl->parse(fromjson("{}")).transitional_ignore();
+ _driverOps->parse(fromjson("{$set:{'_':1}}"), _arrayFilters).transitional_ignore();
+ _driverRepl->parse(fromjson("{}"), _arrayFilters).transitional_ignore();
_opCtx = _serviceContext.makeOperationContext();
}
@@ -188,6 +200,7 @@ public:
private:
QueryTestServiceContext _serviceContext;
ServiceContext::UniqueOperationContext _opCtx;
+ std::map<StringData, std::unique_ptr<ArrayFilter>> _arrayFilters;
std::unique_ptr<UpdateDriver> _driverOps;
std::unique_ptr<UpdateDriver> _driverRepl;
Document _doc;
@@ -428,4 +441,5 @@ TEST_F(CreateFromQuery, NotFullShardKeyRepl) {
opCtx(), query, &immutablePaths.vector(), doc()));
}
-} // unnamed namespace
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/dbtests/query_stage_update.cpp b/src/mongo/dbtests/query_stage_update.cpp
index c0c692473a6..a1b41763503 100644
--- a/src/mongo/dbtests/query_stage_update.cpp
+++ b/src/mongo/dbtests/query_stage_update.cpp
@@ -209,7 +209,9 @@ public:
request.setQuery(query);
request.setUpdates(updates);
- ASSERT_OK(driver.parse(request.getUpdates(), request.isMulti()));
+ const std::map<StringData, std::unique_ptr<ArrayFilter>> arrayFilters;
+
+ ASSERT_OK(driver.parse(request.getUpdates(), arrayFilters, request.isMulti()));
// Setup update params.
UpdateStageParams params(&request, &driver, opDebug);
@@ -280,7 +282,9 @@ public:
request.setQuery(query);
request.setUpdates(updates);
- ASSERT_OK(driver.parse(request.getUpdates(), request.isMulti()));
+ const std::map<StringData, std::unique_ptr<ArrayFilter>> arrayFilters;
+
+ ASSERT_OK(driver.parse(request.getUpdates(), arrayFilters, request.isMulti()));
// Configure the scan.
CollectionScanParams collScanParams;
@@ -393,7 +397,9 @@ public:
request.setReturnDocs(UpdateRequest::RETURN_OLD);
request.setLifecycle(&updateLifecycle);
- ASSERT_OK(driver.parse(request.getUpdates(), request.isMulti()));
+ const std::map<StringData, std::unique_ptr<ArrayFilter>> arrayFilters;
+
+ ASSERT_OK(driver.parse(request.getUpdates(), arrayFilters, request.isMulti()));
// Configure a QueuedDataStage to pass the first object in the collection back in a
// RID_AND_OBJ state.
@@ -481,7 +487,9 @@ public:
request.setReturnDocs(UpdateRequest::RETURN_NEW);
request.setLifecycle(&updateLifecycle);
- ASSERT_OK(driver.parse(request.getUpdates(), request.isMulti()));
+ const std::map<StringData, std::unique_ptr<ArrayFilter>> arrayFilters;
+
+ ASSERT_OK(driver.parse(request.getUpdates(), arrayFilters, request.isMulti()));
// Configure a QueuedDataStage to pass the first object in the collection back in a
// RID_AND_OBJ state.
@@ -559,7 +567,9 @@ public:
request.setMulti(false);
request.setLifecycle(&updateLifecycle);
- ASSERT_OK(driver.parse(request.getUpdates(), request.isMulti()));
+ const std::map<StringData, std::unique_ptr<ArrayFilter>> arrayFilters;
+
+ ASSERT_OK(driver.parse(request.getUpdates(), arrayFilters, request.isMulti()));
// Configure a QueuedDataStage to pass an OWNED_OBJ to the update stage.
auto qds = make_unique<QueuedDataStage>(&_opCtx, ws.get());