summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJustin Seyster <justin.seyster@mongodb.com>2017-09-14 12:14:54 -0400
committerJustin Seyster <justin.seyster@mongodb.com>2017-09-14 12:14:54 -0400
commit390e5f47f00dcf133f361e3f9027e4da7d08d628 (patch)
tree901690cdc5bc1b681e1e2b95c30061784a15187e
parent5767ee2421fa6c7934a90e9083f07743a83dcf71 (diff)
downloadmongo-390e5f47f00dcf133f361e3f9027e4da7d08d628.tar.gz
SERVER-30705 Add $v field for update semantics in oplog updates.
With the new UpdateNodes class hierarchy, there are two code paths for applying an update to a document that have slightly different semantics. The order of fields in the resulting document can vary depending on which code path is used to apply an update. A difference in ordering between documents in a replica set is considered a "mismatch," so we need to ensure that secondaries always apply updates using the same update system that the primary uses. When an update executes as part of the application of an oplog entry, the update is now allowed to have a $v field, which allows it to specify which semantics were used by the operation that we are replicating by applying the entry. When the primary uses the new semantics (because it is a 3.6 mongod with featureCompatibilityVersion set to 3.6), it includes {$v: 1} in the oplog's update document to indicate that the secondary should apply with the newer 'UpdateNode' semantics. There are two other places where we need this behavior: 1) In role_graph_update.cpp, where the handleOplogUpdate observer needs to update its in-memory BSON representation of a role to reflect an update in the admin database and 2) in the applyOps command, which is used for testing how oplog entries get applied. Both these code paths set the fromOplogApplication flag, which replaces the old fromReplication flag, and they also gain behavior that used to be exclusive to oplog applications from replication. (Specifically, they skip update validation checks, which should have already passed before the oplog entry was created.)
-rw-r--r--jstests/concurrency/fsm_all_simultaneous.js2
-rw-r--r--jstests/concurrency/fsm_workloads/toggle_feature_compatibility.js64
-rw-r--r--jstests/core/apply_ops1.js39
-rw-r--r--jstests/multiVersion/updates_in_heterogeneous_repl_set.js75
-rw-r--r--jstests/replsets/oplog_format.js34
-rw-r--r--src/mongo/db/auth/role_graph_update.cpp1
-rw-r--r--src/mongo/db/ops/modifier_interface.h6
-rw-r--r--src/mongo/db/ops/modifier_set.cpp8
-rw-r--r--src/mongo/db/ops/parsed_update.cpp4
-rw-r--r--src/mongo/db/ops/update_request.h13
-rw-r--r--src/mongo/db/repl/oplog.cpp2
-rw-r--r--src/mongo/db/update/arithmetic_node_test.cpp2
-rw-r--r--src/mongo/db/update/log_builder.cpp28
-rw-r--r--src/mongo/db/update/log_builder.h29
-rw-r--r--src/mongo/db/update/modifier_node.cpp6
-rw-r--r--src/mongo/db/update/set_node_test.cpp16
-rw-r--r--src/mongo/db/update/update_array_node_test.cpp2
-rw-r--r--src/mongo/db/update/update_driver.cpp120
-rw-r--r--src/mongo/db/update/update_driver.h21
-rw-r--r--src/mongo/db/update/update_node.h2
-rw-r--r--src/mongo/db/update/update_node_test_fixture.h10
-rw-r--r--src/mongo/db/update/update_object_node_test.cpp2
22 files changed, 410 insertions, 76 deletions
diff --git a/jstests/concurrency/fsm_all_simultaneous.js b/jstests/concurrency/fsm_all_simultaneous.js
index bf4fdfe3d02..f390f262e97 100644
--- a/jstests/concurrency/fsm_all_simultaneous.js
+++ b/jstests/concurrency/fsm_all_simultaneous.js
@@ -14,6 +14,8 @@ var blacklist = [
'agg_group_external.js', // uses >100MB of data, which can overwhelm test hosts
'agg_sort_external.js', // uses >100MB of data, which can overwhelm test hosts
+
+ 'toggle_feature_compatibility.js', // Sets FCV to 3.4, which could interefere with other tests.
].map(function(file) {
return dir + '/' + file;
});
diff --git a/jstests/concurrency/fsm_workloads/toggle_feature_compatibility.js b/jstests/concurrency/fsm_workloads/toggle_feature_compatibility.js
new file mode 100644
index 00000000000..0c0ed9297fd
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/toggle_feature_compatibility.js
@@ -0,0 +1,64 @@
+"use strict";
+
+/**
+ * toggle_feature_compatibility.js
+ *
+ * Adds and updates documents in some threads while rapidly toggling the feature
+ * compatibility version between 3.4 and 3.6 in other threads, triggering the
+ * failure in SERVER-30705.
+ */
+var $config = (function() {
+
+ var states = (function() {
+
+ function init(db, collName) {
+ }
+
+ function featureCompatibilityVersion34(db, collName) {
+ assert.commandWorked(db.adminCommand({setFeatureCompatibilityVersion: "3.4"}));
+ }
+
+ function featureCompatibilityVersion36(db, collName) {
+ assert.commandWorked(db.adminCommand({setFeatureCompatibilityVersion: "3.6"}));
+ }
+
+ function insertAndUpdate(db, collName) {
+ let insertID = Random.randInt(1000000000);
+ let res = db[collName].insert({_id: insertID});
+
+ // Fail the test on any write error, except for a duplicate key error, which can
+ // (rarely) happen when we accidentally choose the same random key more than once.
+ assert(!res.hasWriteError() || res.getWriteError().code == ErrorCodes.DuplicateKey);
+ assert.writeOK(db[collName].update({_id: insertID}, {$set: {b: 1, a: 1}}));
+ }
+
+ return {
+ init: init,
+ featureCompatibilityVersion34: featureCompatibilityVersion34,
+ featureCompatibilityVersion36: featureCompatibilityVersion36,
+ insertAndUpdate: insertAndUpdate
+ };
+
+ })();
+
+ var transitions = {
+ init: {featureCompatibilityVersion34: 0.5, insertAndUpdate: 0.5},
+ featureCompatibilityVersion34: {featureCompatibilityVersion36: 1},
+ featureCompatibilityVersion36: {featureCompatibilityVersion34: 1},
+ insertAndUpdate: {insertAndUpdate: 1}
+ };
+
+ function teardown(db, collName, cluster) {
+ assert.commandWorked(db.adminCommand({setFeatureCompatibilityVersion: "3.6"}));
+ assertWhenOwnColl(db[collName].drop());
+ }
+
+ return {
+ threadCount: 8,
+ iterations: 1000,
+ data: null,
+ states: states,
+ transitions: transitions,
+ teardown: teardown
+ };
+})();
diff --git a/jstests/core/apply_ops1.js b/jstests/core/apply_ops1.js
index d5729f616cc..cb8ea228501 100644
--- a/jstests/core/apply_ops1.js
+++ b/jstests/core/apply_ops1.js
@@ -450,4 +450,43 @@
spec = GetIndexHelpers.findByName(allIndexes, "c_1");
assert.neq(null, spec, "Foreground index 'c_1' not found: " + tojson(allIndexes));
assert.eq(2, spec.v, "Expected v=2 index to be built");
+
+ // When applying a "u" (update) op, we default to 'ModifierInterface' update semantics, and
+ // $set operations get performed in user order.
+ res = assert.commandWorked(db.adminCommand({
+ applyOps: [
+ {"op": "i", "ns": t.getFullName(), "o": {_id: 6}},
+ {"op": "u", "ns": t.getFullName(), "o2": {_id: 6}, "o": {$set: {z: 1, a: 2}}}
+ ]
+ }));
+ assert.eq(t.findOne({_id: 6}), {_id: 6, z: 1, a: 2});
+
+ // When we explicitly specify {$v: 0}, we should also get 'ModifierInterface' update semantics.
+ res = assert.commandWorked(db.adminCommand({
+ applyOps: [
+ {"op": "i", "ns": t.getFullName(), "o": {_id: 7}},
+ {
+ "op": "u",
+ "ns": t.getFullName(),
+ "o2": {_id: 7},
+ "o": {$v: NumberLong(0), $set: {z: 1, a: 2}}
+ }
+ ]
+ }));
+ assert.eq(t.findOne({_id: 7}), {_id: 7, z: 1, a: 2});
+
+ // When we explicitly specify {$v: 1}, we should get 'UpdateNode' update semantics, and $set
+ // operations get performed in lexicographic order.
+ res = assert.commandWorked(db.adminCommand({
+ applyOps: [
+ {"op": "i", "ns": t.getFullName(), "o": {_id: 8}},
+ {
+ "op": "u",
+ "ns": t.getFullName(),
+ "o2": {_id: 8},
+ "o": {$v: NumberLong(1), $set: {z: 1, a: 2}}
+ }
+ ]
+ }));
+ assert.eq(t.findOne({_id: 8}), {_id: 8, a: 2, z: 1}); // Note: 'a' and 'z' have been sorted.
})();
diff --git a/jstests/multiVersion/updates_in_heterogeneous_repl_set.js b/jstests/multiVersion/updates_in_heterogeneous_repl_set.js
new file mode 100644
index 00000000000..1f4530889aa
--- /dev/null
+++ b/jstests/multiVersion/updates_in_heterogeneous_repl_set.js
@@ -0,0 +1,75 @@
+// Create a replica set with feature compatibility version set to 3.4, add a
+// binVersion 3.4 member, and then update documents with each member as the
+// primary. Finally, upgrade the 3.4 member to 3.6, upgrade the replica set to
+// feature compatibility version 3.6, and again update documents with each
+// member as the primary.
+
+const testName = "updates_in_heterogeneous_repl_set";
+
+(function() {
+ "use strict";
+
+ // Initialize the binVersion 3.6 versions of the replica set.
+ let replTest =
+ new ReplSetTest({name: testName, nodes: [{binVersion: "latest"}, {binVersion: "latest"}]});
+
+ replTest.startSet();
+ replTest.initiate();
+
+ let primary = replTest.getPrimary();
+
+ // Set the feature compatibility version to 3.4.
+ assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: "3.4"}));
+
+ // Add the binVersion 3.4 member fo the replica set.
+ let binVersion34Node = replTest.add({binVersion: "3.4"});
+ replTest.reInitiate();
+ replTest.awaitSecondaryNodes();
+
+ // Give each member a chance to be primary while updating documents.
+ let collIndex = 0;
+ replTest.nodes.forEach(function(node) {
+ replTest.awaitReplication();
+ replTest.stepUp(node);
+
+ let coll = node.getDB("test")["coll" + (collIndex++)];
+
+ for (let id = 0; id < 1000; id++) {
+ assert.writeOK(coll.insert({_id: id}));
+ assert.writeOK(coll.update({_id: id}, {$set: {z: 1, a: 2}}));
+
+ // Because we are using the update system from earlier MongodDB
+ // versions (as a result of using feature compatibility version
+ // 3.4), we expect to see the new 'z' and 'a' fields to get added in
+ // the same order as they appeared in the update document.
+ assert.eq(coll.findOne({_id: id}), {_id: id, z: 1, a: 2});
+ }
+ });
+
+ // Upgrade the binVersion 3.4 member to binVersion 3.6.
+ replTest.restart(binVersion34Node, {binVersion: "latest"});
+ replTest.awaitSecondaryNodes();
+
+ // Set the replica set feature compatibility version to 3.6.
+ primary = replTest.getPrimary();
+ assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: "3.6"}));
+
+ // Again, give each member a chance to be primary while updating documents.
+ replTest.nodes.forEach(function(node) {
+ replTest.stepUp(node);
+
+ let coll = node.getDB("test")["coll" + (collIndex++)];
+
+ for (let id = 0; id < 1000; id++) {
+ assert.writeOK(coll.insert({_id: id}));
+ assert.writeOK(coll.update({_id: id}, {$set: {z: 1, a: 2}}));
+
+ // We can tell that we are using the new 3.6 update system, because
+ // it inserts new fields in lexicographic order, causing the 'a' and
+ // 'z' fields to be swapped.
+ assert.eq(coll.findOne({_id: id}), {_id: id, a: 2, z: 1});
+ }
+ });
+
+ replTest.stopSet();
+})();
diff --git a/jstests/replsets/oplog_format.js b/jstests/replsets/oplog_format.js
index 5604ae1278b..2fd87e2d665 100644
--- a/jstests/replsets/oplog_format.js
+++ b/jstests/replsets/oplog_format.js
@@ -37,7 +37,7 @@ assertLastOplog({_id: 1, a: 2}, {_id: 1}, "save " + msg);
var res = assert.writeOK(coll.update({}, {$inc: {a: 1}, $set: {b: 2}}));
assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString());
assert.docEq({_id: 1, a: 3, b: 2}, coll.findOne({}), msg);
-assertLastOplog({$set: {a: 3, b: 2}}, {_id: 1}, msg);
+assertLastOplog({$v: 1, $set: {a: 3, b: 2}}, {_id: 1}, msg);
var msg = "IncRewriteNonExistingField: $inc $set";
coll.save({_id: 1, c: 0});
@@ -45,7 +45,7 @@ assertLastOplog({_id: 1, c: 0}, {_id: 1}, "save " + msg);
res = assert.writeOK(coll.update({}, {$inc: {a: 1}, $set: {b: 2}}));
assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString());
assert.docEq({_id: 1, c: 0, a: 1, b: 2}, coll.findOne({}), msg);
-assertLastOplog({$set: {a: 1, b: 2}}, {_id: 1}, msg);
+assertLastOplog({$v: 1, $set: {a: 1, b: 2}}, {_id: 1}, msg);
var msg = "TwoNestedPulls: two $pull";
coll.save({_id: 1, a: {b: [1, 2], c: [1, 2]}});
@@ -53,7 +53,7 @@ assertLastOplog({_id: 1, a: {b: [1, 2], c: [1, 2]}}, {_id: 1}, "save " + msg);
res = assert.writeOK(coll.update({}, {$pull: {'a.b': 2, 'a.c': 2}}));
assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString());
assert.docEq({_id: 1, a: {b: [1], c: [1]}}, coll.findOne({}), msg);
-assertLastOplog({$set: {'a.b': [1], 'a.c': [1]}}, {_id: 1}, msg);
+assertLastOplog({$v: 1, $set: {'a.b': [1], 'a.c': [1]}}, {_id: 1}, msg);
var msg = "MultiSets: two $set";
coll.save({_id: 1, a: 1, b: 1});
@@ -61,7 +61,7 @@ assertLastOplog({_id: 1, a: 1, b: 1}, {_id: 1}, "save " + msg);
res = assert.writeOK(coll.update({}, {$set: {a: 2, b: 2}}));
assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString());
assert.docEq({_id: 1, a: 2, b: 2}, coll.findOne({}), msg);
-assertLastOplog({$set: {a: 2, b: 2}}, {_id: 1}, msg);
+assertLastOplog({$v: 1, $set: {a: 2, b: 2}}, {_id: 1}, msg);
// More tests to validate the oplog format and correct excution
@@ -71,19 +71,19 @@ assertLastOplog({_id: 1, a: 1}, {_id: 1}, "save " + msg);
res = assert.writeOK(coll.update({}, {$set: {a: 2}}));
assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString());
assert.docEq({_id: 1, a: 2}, coll.findOne({}), msg);
-assertLastOplog({$set: {a: 2}}, {_id: 1}, msg);
+assertLastOplog({$v: 1, $set: {a: 2}}, {_id: 1}, msg);
var msg = "bad single $inc";
res = assert.writeOK(coll.update({}, {$inc: {a: 1}}));
assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString());
assert.docEq({_id: 1, a: 3}, coll.findOne({}), msg);
-assertLastOplog({$set: {a: 3}}, {_id: 1}, msg);
+assertLastOplog({$v: 1, $set: {a: 3}}, {_id: 1}, msg);
var msg = "bad double $set";
res = assert.writeOK(coll.update({}, {$set: {a: 2, b: 2}}));
assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString());
assert.docEq({_id: 1, a: 2, b: 2}, coll.findOne({}), msg);
-assertLastOplog({$set: {a: 2, b: 2}}, {_id: 1}, msg);
+assertLastOplog({$v: 1, $set: {a: 2, b: 2}}, {_id: 1}, msg);
var msg = "bad save";
assert.writeOK(coll.save({_id: 1, a: [2]}));
@@ -94,14 +94,14 @@ var msg = "bad array $inc";
res = assert.writeOK(coll.update({}, {$inc: {"a.0": 1}}));
assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString());
assert.docEq({_id: 1, a: [3]}, coll.findOne({}), msg);
-var lastTS = assertLastOplog({$set: {"a.0": 3}}, {_id: 1}, msg);
+var lastTS = assertLastOplog({$v: 1, $set: {"a.0": 3}}, {_id: 1}, msg);
var msg = "bad $setOnInsert";
res = assert.writeOK(coll.update({}, {$setOnInsert: {a: -1}}));
assert.eq(res.nMatched, 1, "update failed for '" + msg + "': " + res.toString());
-assert.docEq({_id: 1, a: [3]}, coll.findOne({}), msg); // No-op
-var otherTS = assertLastOplog({$set: {"a.0": 3}}, {_id: 1}, msg); // Nothing new
-assert.eq(lastTS, otherTS, "new oplog was not expected -- " + msg); // No new oplog entry
+assert.docEq({_id: 1, a: [3]}, coll.findOne({}), msg); // No-op
+var otherTS = assertLastOplog({$v: 1, $set: {"a.0": 3}}, {_id: 1}, msg); // Nothing new
+assert.eq(lastTS, otherTS, "new oplog was not expected -- " + msg); // No new oplog entry
coll.remove({});
assert.eq(coll.find().itcount(), 0, "collection not empty");
@@ -134,14 +134,14 @@ coll.save({_id: 1, a: "foo"});
res = assert.writeOK(coll.update({}, {$push: {c: 18}}));
assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString());
assert.docEq({_id: 1, a: "foo", c: [18]}, coll.findOne({}), msg);
-assertLastOplog({$set: {"c": [18]}}, {_id: 1}, msg);
+assertLastOplog({$v: 1, $set: {"c": [18]}}, {_id: 1}, msg);
var msg = "bad array $push $slice";
coll.save({_id: 1, a: {b: [18]}});
res = assert.writeOK(coll.update({_id: {$gt: 0}}, {$push: {"a.b": {$each: [1, 2], $slice: -2}}}));
assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString());
assert.docEq({_id: 1, a: {b: [1, 2]}}, coll.findOne({}), msg);
-assertLastOplog({$set: {"a.b": [1, 2]}}, {_id: 1}, msg);
+assertLastOplog({$v: 1, $set: {"a.b": [1, 2]}}, {_id: 1}, msg);
var msg = "bad array $push $sort ($slice -100)";
coll.save({_id: 1, a: {b: [{c: 2}, {c: 1}]}});
@@ -149,7 +149,7 @@ res = assert.writeOK(
coll.update({}, {$push: {"a.b": {$each: [{c: -1}], $sort: {c: 1}, $slice: -100}}}));
assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString());
assert.docEq({_id: 1, a: {b: [{c: -1}, {c: 1}, {c: 2}]}}, coll.findOne({}), msg);
-assertLastOplog({$set: {"a.b": [{c: -1}, {c: 1}, {c: 2}]}}, {_id: 1}, msg);
+assertLastOplog({$v: 1, $set: {"a.b": [{c: -1}, {c: 1}, {c: 2}]}}, {_id: 1}, msg);
var msg = "bad array $push $slice $sort";
coll.save({_id: 1, a: [{b: 2}, {b: 1}]});
@@ -157,7 +157,7 @@ res = assert.writeOK(
coll.update({_id: {$gt: 0}}, {$push: {a: {$each: [{b: -1}], $slice: -2, $sort: {b: 1}}}}));
assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString());
assert.docEq({_id: 1, a: [{b: 1}, {b: 2}]}, coll.findOne({}), msg);
-assertLastOplog({$set: {a: [{b: 1}, {b: 2}]}}, {_id: 1}, msg);
+assertLastOplog({$v: 1, $set: {a: [{b: 1}, {b: 2}]}}, {_id: 1}, msg);
var msg = "bad array $push $slice $sort first two";
coll.save({_id: 1, a: {b: [{c: 2}, {c: 1}]}});
@@ -165,7 +165,7 @@ res = assert.writeOK(
coll.update({_id: {$gt: 0}}, {$push: {"a.b": {$each: [{c: -1}], $slice: -2, $sort: {c: 1}}}}));
assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString());
assert.docEq({_id: 1, a: {b: [{c: 1}, {c: 2}]}}, coll.findOne({}), msg);
-assertLastOplog({$set: {"a.b": [{c: 1}, {c: 2}]}}, {_id: 1}, msg);
+assertLastOplog({$v: 1, $set: {"a.b": [{c: 1}, {c: 2}]}}, {_id: 1}, msg);
var msg = "bad array $push $slice $sort reversed first two";
coll.save({_id: 1, a: {b: [{c: 1}, {c: 2}]}});
@@ -173,6 +173,6 @@ res = assert.writeOK(
coll.update({_id: {$gt: 0}}, {$push: {"a.b": {$each: [{c: -1}], $slice: -2, $sort: {c: -1}}}}));
assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString());
assert.docEq({_id: 1, a: {b: [{c: 1}, {c: -1}]}}, coll.findOne({}), msg);
-assertLastOplog({$set: {"a.b": [{c: 1}, {c: -1}]}}, {_id: 1}, msg);
+assertLastOplog({$v: 1, $set: {"a.b": [{c: 1}, {c: -1}]}}, {_id: 1}, msg);
replTest.stopSet();
diff --git a/src/mongo/db/auth/role_graph_update.cpp b/src/mongo/db/auth/role_graph_update.cpp
index 692529feb5c..9b5629cd3ab 100644
--- a/src/mongo/db/auth/role_graph_update.cpp
+++ b/src/mongo/db/auth/role_graph_update.cpp
@@ -203,6 +203,7 @@ Status handleOplogUpdate(OperationContext* opCtx,
return status;
UpdateDriver::Options updateOptions;
+ updateOptions.modOptions.fromOplogApplication = true;
UpdateDriver driver(updateOptions);
// Oplog updates do not have array filters.
diff --git a/src/mongo/db/ops/modifier_interface.h b/src/mongo/db/ops/modifier_interface.h
index d833dcf07a1..77e85a2d447 100644
--- a/src/mongo/db/ops/modifier_interface.h
+++ b/src/mongo/db/ops/modifier_interface.h
@@ -169,8 +169,8 @@ public:
*/
struct ModifierInterface::Options {
Options() = default;
- Options(bool repl, bool ofs, const CollatorInterface* collator)
- : fromReplication(repl), enforceOkForStorage(ofs), collator(collator) {}
+ Options(bool fromOpLog, bool ofs, const CollatorInterface* collator)
+ : fromOplogApplication(fromOpLog), enforceOkForStorage(ofs), collator(collator) {}
static Options normal(const CollatorInterface* collator = nullptr) {
return Options(false, true, collator);
@@ -179,7 +179,7 @@ struct ModifierInterface::Options {
return Options(true, false, collator);
}
- bool fromReplication = false;
+ bool fromOplogApplication = false;
bool enforceOkForStorage = true;
const CollatorInterface* collator = nullptr;
};
diff --git a/src/mongo/db/ops/modifier_set.cpp b/src/mongo/db/ops/modifier_set.cpp
index 86796a31314..fa0e11126c9 100644
--- a/src/mongo/db/ops/modifier_set.cpp
+++ b/src/mongo/db/ops/modifier_set.cpp
@@ -133,9 +133,9 @@ Status ModifierSet::prepare(mutablebson::Element root,
// proceed.
if (status.code() == ErrorCodes::NonExistentPath) {
_preparedState->elemFound = root.getDocument().end();
- } else if (_modOptions.fromReplication && status.code() == ErrorCodes::PathNotViable) {
- // If we are coming from replication and it is an invalid path,
- // then push on indicating that we had a blocking element, which we stopped at
+ } else if (_modOptions.fromOplogApplication && status.code() == ErrorCodes::PathNotViable) {
+ // If we are applying an oplog entry and it is an invalid path, then push on indicating that
+ // we had a blocking element, which we stopped at
_preparedState->elemIsBlocking = true;
} else if (!status.isOK()) {
return status;
@@ -211,7 +211,7 @@ Status ModifierSet::apply() const {
}
// Remove the blocking element, if we are from replication applier. See comment below.
- if (_modOptions.fromReplication && !destExists && _preparedState->elemFound.ok() &&
+ if (_modOptions.fromOplogApplication && !destExists && _preparedState->elemFound.ok() &&
_preparedState->elemIsBlocking && (!(_preparedState->elemFound.isType(Array)) ||
!(_preparedState->elemFound.isType(Object)))) {
/**
diff --git a/src/mongo/db/ops/parsed_update.cpp b/src/mongo/db/ops/parsed_update.cpp
index 5d1188d7493..576147bdcee 100644
--- a/src/mongo/db/ops/parsed_update.cpp
+++ b/src/mongo/db/ops/parsed_update.cpp
@@ -135,11 +135,11 @@ Status ParsedUpdate::parseUpdate() {
// Config db docs shouldn't get checked for valid field names since the shard key can have
// a dot (".") in it.
const bool shouldValidate =
- !(!_opCtx->writesAreReplicated() || ns.isConfigDB() || _request->isFromMigration());
+ !(_request->isFromOplogApplication() || ns.isConfigDB() || _request->isFromMigration());
_driver.setLogOp(true);
_driver.setModOptions(ModifierInterface::Options(
- !_opCtx->writesAreReplicated(), shouldValidate, _collator.get()));
+ _request->isFromOplogApplication(), shouldValidate, _collator.get()));
return _driver.parse(_request->getUpdates(), _arrayFilters, _request->isMulti());
}
diff --git a/src/mongo/db/ops/update_request.h b/src/mongo/db/ops/update_request.h
index 0e2e7e92210..6ae19c94d76 100644
--- a/src/mongo/db/ops/update_request.h
+++ b/src/mongo/db/ops/update_request.h
@@ -62,6 +62,7 @@ public:
_upsert(false),
_multi(false),
_fromMigration(false),
+ _fromOplogApplication(false),
_lifecycle(NULL),
_isExplain(false),
_returnDocs(ReturnDocOption::RETURN_NONE),
@@ -154,6 +155,14 @@ public:
return _fromMigration;
}
+ inline void setFromOplogApplication(bool value = true) {
+ _fromOplogApplication = value;
+ }
+
+ bool isFromOplogApplication() const {
+ return _fromOplogApplication;
+ }
+
inline void setLifecycle(UpdateLifecycle* value) {
_lifecycle = value;
}
@@ -226,6 +235,7 @@ public:
builder << " upsert: " << _upsert;
builder << " multi: " << _multi;
builder << " fromMigration: " << _fromMigration;
+ builder << " fromOplogApplication: " << _fromOplogApplication;
builder << " isExplain: " << _isExplain;
return builder.str();
}
@@ -269,6 +279,9 @@ private:
// True if this update is on behalf of a chunk migration.
bool _fromMigration;
+ // True if this update was triggered by the application of an oplog entry.
+ bool _fromOplogApplication;
+
// The lifecycle data, and events used during the update request.
UpdateLifecycle* _lifecycle;
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index cf8c0539ff4..6cea52c076a 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -1087,6 +1087,7 @@ Status applyOperation_inlock(OperationContext* opCtx,
request.setQuery(b.done());
request.setUpdates(o);
request.setUpsert();
+ request.setFromOplogApplication(true);
UpdateLifecycleImpl updateLifecycle(requestNss);
request.setLifecycle(&updateLifecycle);
@@ -1117,6 +1118,7 @@ Status applyOperation_inlock(OperationContext* opCtx,
request.setQuery(updateCriteria);
request.setUpdates(o);
request.setUpsert(upsert);
+ request.setFromOplogApplication(true);
UpdateLifecycleImpl updateLifecycle(requestNss);
request.setLifecycle(&updateLifecycle);
diff --git a/src/mongo/db/update/arithmetic_node_test.cpp b/src/mongo/db/update/arithmetic_node_test.cpp
index 105f2019057..64f1ad3c8d3 100644
--- a/src/mongo/db/update/arithmetic_node_test.cpp
+++ b/src/mongo/db/update/arithmetic_node_test.cpp
@@ -273,7 +273,7 @@ TEST_F(ArithmeticNodeTest, ApplyNonViablePathToCreateFromReplicationIsNoOp) {
setPathToCreate("b");
setPathTaken("a");
addIndexedPath("a");
- setFromReplication(true);
+ setFromOplogApplication(true);
auto result = node.apply(getApplyParams(doc.root()["a"]));
ASSERT_TRUE(result.noop);
ASSERT_FALSE(result.indexesAffected);
diff --git a/src/mongo/db/update/log_builder.cpp b/src/mongo/db/update/log_builder.cpp
index 173c8afb3f9..36c77cee99f 100644
--- a/src/mongo/db/update/log_builder.cpp
+++ b/src/mongo/db/update/log_builder.cpp
@@ -39,6 +39,8 @@ const char kSet[] = "$set";
const char kUnset[] = "$unset";
} // namespace
+constexpr StringData LogBuilder::kUpdateSemanticsFieldName;
+
inline Status LogBuilder::addToSection(Element newElt, Element* section, const char* sectionName) {
// If we don't already have this section, try to create it now.
if (!section->ok()) {
@@ -128,6 +130,26 @@ Status LogBuilder::addToUnsets(StringData path) {
return addToSection(logElement, &_unsetAccumulator, kUnset);
}
+Status LogBuilder::setUpdateSemantics(UpdateSemantics updateSemantics) {
+ if (hasObjectReplacement()) {
+ return Status(ErrorCodes::IllegalOperation,
+ "LogBuilder: Invalid attempt to add a $v entry to a log with an existing "
+ "object replacement");
+ }
+
+ if (_updateSemantics.ok()) {
+ return Status(ErrorCodes::IllegalOperation, "LogBuilder: Invalid attempt to set $v twice.");
+ }
+
+ mutablebson::Document& doc = _logRoot.getDocument();
+ _updateSemantics =
+ doc.makeElementInt(kUpdateSemanticsFieldName, static_cast<int>(updateSemantics));
+
+ dassert(_logRoot[kUpdateSemanticsFieldName] == doc.end());
+
+ return _logRoot.pushFront(_updateSemantics);
+}
+
Status LogBuilder::getReplacementObject(Element* outElt) {
// If the replacement accumulator is not ok, we must have started a $set or $unset
// already, so an object replacement is not permitted.
@@ -143,6 +165,12 @@ Status LogBuilder::getReplacementObject(Element* outElt) {
"LogBuilder: Invalid attempt to acquire the replacement object "
"in a log with existing object replacement data");
+ if (_updateSemantics.ok()) {
+ return Status(ErrorCodes::IllegalOperation,
+ "LogBuilder: Invalid attempt to acquire the replacement object in a log with "
+ "an update semantics value");
+ }
+
// OK to enqueue object replacement items.
*outElt = _objectReplacementAccumulator;
return Status::OK();
diff --git a/src/mongo/db/update/log_builder.h b/src/mongo/db/update/log_builder.h
index 0d667f94151..9d7b1954365 100644
--- a/src/mongo/db/update/log_builder.h
+++ b/src/mongo/db/update/log_builder.h
@@ -33,6 +33,24 @@
namespace mongo {
+/**
+ * There are two update subsystems in MongoDB with slightly different semantics.
+ */
+enum class UpdateSemantics {
+ // The update system that was in use up until v3.4, which is implemented in ModifierInterface
+ // and its subclasses. When a single update adds multiple fields, those fields are added in the
+ // same order as they are specified in the update document.
+ kModifierInterface = 0,
+
+ // The update system introduced in v3.6, which is implemented in UpdateNode and its subclassees.
+ // When a single update adds multiple fields, those fields are added in lexicographic order by
+ // field name. This system introduces support for arrayFilters and $[] syntax.
+ kUpdateNode = 1,
+
+ // Must be last.
+ kNumUpdateSemantics
+};
+
/** LogBuilder abstracts away some of the details of producing a properly constructed oplog
* update entry. It manages separate regions into which it accumulates $set and $unset
* operations, and distinguishes object replacement style oplog generation from
@@ -40,6 +58,8 @@ namespace mongo {
*/
class LogBuilder {
public:
+ static constexpr StringData kUpdateSemanticsFieldName = "$v"_sd;
+
/** Construct a new LogBuilder. Log entries will be recorded as new children under the
* 'logRoot' Element, which must be of type mongo::Object and have no children.
*/
@@ -47,7 +67,8 @@ public:
: _logRoot(logRoot),
_objectReplacementAccumulator(_logRoot),
_setAccumulator(_logRoot.getDocument().end()),
- _unsetAccumulator(_setAccumulator) {
+ _unsetAccumulator(_setAccumulator),
+ _updateSemantics(_setAccumulator) {
dassert(logRoot.isType(mongo::Object));
dassert(!logRoot.hasChildren());
}
@@ -95,6 +116,11 @@ public:
*/
Status addToUnsets(StringData path);
+ /**
+ * Add a "$v" field to the log. Fails if there is already a "$v" field.
+ */
+ Status setUpdateSemantics(UpdateSemantics updateSemantics);
+
/** Obtain, via the out parameter 'outElt', a pointer to the mongo::Object type Element
* to which the components of an object replacement should be recorded. It is an error
* to call this if any Elements have been added by calling either addToSets or
@@ -116,6 +142,7 @@ private:
mutablebson::Element _objectReplacementAccumulator;
mutablebson::Element _setAccumulator;
mutablebson::Element _unsetAccumulator;
+ mutablebson::Element _updateSemantics;
};
} // namespace mongo
diff --git a/src/mongo/db/update/modifier_node.cpp b/src/mongo/db/update/modifier_node.cpp
index 7de8ff73ac5..82b66014a51 100644
--- a/src/mongo/db/update/modifier_node.cpp
+++ b/src/mongo/db/update/modifier_node.cpp
@@ -230,10 +230,10 @@ UpdateNode::ApplyResult ModifierNode::applyToNonexistentElement(ApplyParams appl
// {$set: {a: 0}}
// Setting 'a.b' will fail the second time, but we must still set 'c'.
// (There are modifiers besides $set that use this code path, but they are not used for
- // replication, so we are not concerned with their behavior when "fromReplication" is
- // true.)
+ // replication, so we are not concerned with their behavior when "fromOplogApplication"
+ // is true.)
if (statusWithFirstCreatedElem.getStatus().code() == ErrorCodes::PathNotViable &&
- applyParams.fromReplication) {
+ applyParams.fromOplogApplication) {
return ApplyResult::noopResult();
}
uassertStatusOK(statusWithFirstCreatedElem);
diff --git a/src/mongo/db/update/set_node_test.cpp b/src/mongo/db/update/set_node_test.cpp
index ec51b1b1e9c..5363c7ea654 100644
--- a/src/mongo/db/update/set_node_test.cpp
+++ b/src/mongo/db/update/set_node_test.cpp
@@ -171,7 +171,7 @@ TEST_F(SetNodeTest, ApplyNonViablePathToCreateFromReplicationIsNoOp) {
setPathToCreate("b");
setPathTaken("a");
addIndexedPath("a");
- setFromReplication(true);
+ setFromOplogApplication(true);
auto result = node.apply(getApplyParams(doc.root()["a"]));
ASSERT_TRUE(result.noop);
ASSERT_FALSE(result.indexesAffected);
@@ -804,7 +804,7 @@ TEST_F(SetNodeTest, SingleFieldFromReplication) {
setPathToCreate("1.b");
setPathTaken("a");
addIndexedPath("a.1.b");
- setFromReplication(true);
+ setFromOplogApplication(true);
auto result = node.apply(getApplyParams(doc.root()["a"]));
ASSERT_TRUE(result.noop);
ASSERT_FALSE(result.indexesAffected);
@@ -822,7 +822,7 @@ TEST_F(SetNodeTest, SingleFieldNoIdFromReplication) {
setPathToCreate("1.b");
setPathTaken("a");
addIndexedPath("a.1.b");
- setFromReplication(true);
+ setFromOplogApplication(true);
auto result = node.apply(getApplyParams(doc.root()["a"]));
ASSERT_TRUE(result.noop);
ASSERT_FALSE(result.indexesAffected);
@@ -840,7 +840,7 @@ TEST_F(SetNodeTest, NestedFieldFromReplication) {
setPathToCreate("1.b");
setPathTaken("a.a");
addIndexedPath("a.a.1.b");
- setFromReplication(true);
+ setFromOplogApplication(true);
auto result = node.apply(getApplyParams(doc.root()["a"]["a"]));
ASSERT_TRUE(result.noop);
ASSERT_FALSE(result.indexesAffected);
@@ -858,7 +858,7 @@ TEST_F(SetNodeTest, DoubleNestedFieldFromReplication) {
setPathToCreate("d");
setPathTaken("a.b.c");
addIndexedPath("a.b.c.d");
- setFromReplication(true);
+ setFromOplogApplication(true);
auto result = node.apply(getApplyParams(doc.root()["a"]["b"]["c"]));
ASSERT_TRUE(result.noop);
ASSERT_FALSE(result.indexesAffected);
@@ -876,7 +876,7 @@ TEST_F(SetNodeTest, NestedFieldNoIdFromReplication) {
setPathToCreate("1.b");
setPathTaken("a.a");
addIndexedPath("a.a.1.b");
- setFromReplication(true);
+ setFromOplogApplication(true);
auto result = node.apply(getApplyParams(doc.root()["a"]["a"]));
ASSERT_TRUE(result.noop);
ASSERT_FALSE(result.indexesAffected);
@@ -894,7 +894,7 @@ TEST_F(SetNodeTest, ReplayArrayFieldNotAppendedIntermediateFromReplication) {
setPathToCreate("b");
setPathTaken("a.0");
addIndexedPath("a.1.b");
- setFromReplication(true);
+ setFromOplogApplication(true);
auto result = node.apply(getApplyParams(doc.root()["a"]["0"]));
ASSERT_TRUE(result.noop);
ASSERT_FALSE(result.indexesAffected);
@@ -929,7 +929,7 @@ TEST_F(SetNodeTest, Set6FromRepl) {
mutablebson::Document doc(fromjson("{_id: 1, r: {a:1, b:2}}"));
setPathTaken("r.a");
addIndexedPath("r.a");
- setFromReplication(true);
+ setFromOplogApplication(true);
auto result = node.apply(getApplyParams(doc.root()["r"]["a"]));
ASSERT_FALSE(result.noop);
ASSERT_TRUE(result.indexesAffected);
diff --git a/src/mongo/db/update/update_array_node_test.cpp b/src/mongo/db/update/update_array_node_test.cpp
index fc76b08e418..cd573a7dc80 100644
--- a/src/mongo/db/update/update_array_node_test.cpp
+++ b/src/mongo/db/update/update_array_node_test.cpp
@@ -632,7 +632,7 @@ TEST_F(UpdateArrayNodeTest, ApplyArrayUpdateFromReplication) {
mutablebson::Document doc(fromjson("{a: [0]}"));
addIndexedPath("a");
- setFromReplication(true);
+ setFromOplogApplication(true);
auto result = root.apply(getApplyParams(doc.root()));
ASSERT_FALSE(result.indexesAffected);
ASSERT_TRUE(result.noop);
diff --git a/src/mongo/db/update/update_driver.cpp b/src/mongo/db/update/update_driver.cpp
index e8b33765444..3c6124e4d8e 100644
--- a/src/mongo/db/update/update_driver.cpp
+++ b/src/mongo/db/update/update_driver.cpp
@@ -59,6 +59,23 @@ using pathsupport::EqualityMatches;
namespace {
+StatusWith<UpdateSemantics> updateSemanticsFromElement(BSONElement element) {
+ if (element.type() != BSONType::NumberInt && element.type() != BSONType::NumberLong) {
+ return {ErrorCodes::BadValue, "'$v' (UpdateSemantics) field must be an integer."};
+ }
+
+ auto updateSemantics = element.numberLong();
+
+ if (updateSemantics < 0 ||
+ updateSemantics >= static_cast<int>(UpdateSemantics::kNumUpdateSemantics)) {
+ return {ErrorCodes::BadValue,
+ str::stream() << "Unrecognized value for '$v' (UpdateSemantics) field: "
+ << updateSemantics};
+ }
+
+ return static_cast<UpdateSemantics>(updateSemantics);
+}
+
modifiertable::ModifierType validateMod(BSONElement mod) {
auto modType = modifiertable::getType(mod.fieldName());
@@ -94,7 +111,19 @@ bool parseUpdateExpression(
const std::map<StringData, std::unique_ptr<ExpressionWithPlaceholder>>& arrayFilters) {
bool positional = false;
std::set<std::string> foundIdentifiers;
+ bool foundUpdateSemanticsField = false;
for (auto&& mod : updateExpr) {
+ // If there is a "$v" field among the modifiers, it should have already been used by the
+ // caller to determine that this is the correct parsing function.
+ if (mod.fieldNameStringData() == LogBuilder::kUpdateSemanticsFieldName) {
+ uassert(ErrorCodes::BadValue,
+ "Duplicate $v in oplog update document",
+ !foundUpdateSemanticsField);
+ foundUpdateSemanticsField = true;
+ invariant(mod.numberLong() == static_cast<long long>(UpdateSemantics::kUpdateNode));
+ continue;
+ }
+
auto modType = validateMod(mod);
for (auto&& field : mod.Obj()) {
auto statusWithPositional = UpdateObjectNode::parseAndMerge(
@@ -152,29 +181,70 @@ Status UpdateDriver::parse(
// Register the fact that this driver is not doing a full object replacement.
_replacementMode = false;
- // If the featureCompatibilityVersion is 3.4, parse using the ModifierInterfaces.
- if (serverGlobalParams.featureCompatibility.version.load() ==
- ServerGlobalParams::FeatureCompatibility::Version::k34) {
- uassert(
- ErrorCodes::InvalidOptions,
- str::stream() << "The featureCompatibilityVersion must be 3.6 to use arrayFilters. See "
- << feature_compatibility_version::kDochubLink
- << ".",
- arrayFilters.empty());
- for (auto&& mod : updateExpr) {
- auto modType = validateMod(mod);
- for (auto&& field : mod.Obj()) {
- auto status = addAndParse(modType, field);
- if (!status.isOK()) {
- return status;
+ // Decide which update semantics to used, using the criteria outlined in the comment above this
+ // function's declaration.
+ BSONElement updateSemanticsElement = updateExpr[LogBuilder::kUpdateSemanticsFieldName];
+ UpdateSemantics updateSemantics;
+ if (updateSemanticsElement) {
+ if (!_modOptions.fromOplogApplication) {
+ return {ErrorCodes::FailedToParse, "The $v update field is only recognized internally"};
+ }
+ auto statusWithUpdateSemantics = updateSemanticsFromElement(updateSemanticsElement);
+ if (!statusWithUpdateSemantics.isOK()) {
+ return statusWithUpdateSemantics.getStatus();
+ }
+
+ updateSemantics = statusWithUpdateSemantics.getValue();
+ } else if (_modOptions.fromOplogApplication) {
+ updateSemantics = UpdateSemantics::kModifierInterface;
+ } else {
+ updateSemantics = (serverGlobalParams.featureCompatibility.version.load() ==
+ ServerGlobalParams::FeatureCompatibility::Version::k34)
+ ? UpdateSemantics::kModifierInterface
+ : UpdateSemantics::kUpdateNode;
+ }
+
+ switch (updateSemantics) {
+ case UpdateSemantics::kModifierInterface: {
+ uassert(ErrorCodes::InvalidOptions,
+ str::stream()
+ << "The featureCompatibilityVersion must be 3.6 to use arrayFilters. See "
+ << feature_compatibility_version::kDochubLink
+ << ".",
+ arrayFilters.empty());
+ bool foundUpdateSemanticsField = false;
+ for (auto&& mod : updateExpr) {
+ // If there is a "$v" field among the modifiers, we have already used it to
+ // determine that this is the correct parsing code.
+ if (mod.fieldNameStringData() == LogBuilder::kUpdateSemanticsFieldName) {
+ uassert(ErrorCodes::BadValue,
+ "Duplicate $v in oplog update document",
+ !foundUpdateSemanticsField);
+ foundUpdateSemanticsField = true;
+ invariant(mod.numberLong() ==
+ static_cast<long long>(UpdateSemantics::kModifierInterface));
+ continue;
+ }
+
+ auto modType = validateMod(mod);
+ for (auto&& field : mod.Obj()) {
+ auto status = addAndParse(modType, field);
+ if (!status.isOK()) {
+ return status;
+ }
}
}
+ break;
}
- } else {
- auto root = stdx::make_unique<UpdateObjectNode>();
- _positional =
- parseUpdateExpression(updateExpr, root.get(), _modOptions.collator, arrayFilters);
- _root = std::move(root);
+ case UpdateSemantics::kUpdateNode: {
+ auto root = stdx::make_unique<UpdateObjectNode>();
+ _positional =
+ parseUpdateExpression(updateExpr, root.get(), _modOptions.collator, arrayFilters);
+ _root = std::move(root);
+ break;
+ }
+ default:
+ MONGO_UNREACHABLE;
}
return Status::OK();
@@ -276,7 +346,7 @@ Status UpdateDriver::update(StringData matchedField,
UpdateNode::ApplyParams applyParams(doc->root(), immutablePaths);
applyParams.matchedField = matchedField;
applyParams.insert = _insert;
- applyParams.fromReplication = _modOptions.fromReplication;
+ applyParams.fromOplogApplication = _modOptions.fromOplogApplication;
applyParams.validateForStorage = validateForStorage;
applyParams.indexData = _indexedFields;
if (_logOp && logOpRec) {
@@ -290,6 +360,14 @@ Status UpdateDriver::update(StringData matchedField,
if (docWasModified) {
*docWasModified = !applyResult.noop;
}
+ if (!_replacementMode && _logOp && logOpRec) {
+ // When using kUpdateNode update semantics on the primary, we must include a "$v" field
+ // in the update document so that the secondary knows to apply the update with
+ // kUpdateNode semantics. If this is a full document replacement, we don't need to
+ // specify the semantics (and there would be no place to put a "$v" field in the update
+ // document).
+ invariantOK(logBuilder.setUpdateSemantics(UpdateSemantics::kUpdateNode));
+ }
} else {
diff --git a/src/mongo/db/update/update_driver.h b/src/mongo/db/update/update_driver.h
index 52a9f77e912..823e8eafeeb 100644
--- a/src/mongo/db/update/update_driver.h
+++ b/src/mongo/db/update/update_driver.h
@@ -55,14 +55,19 @@ public:
~UpdateDriver();
/**
- * Parses the update expression 'updateExpr'. If the featurCompatibilityVersion is 3.6,
- * 'updateExpr' is parsed into '_root'. Otherwise, 'updateExpr' is parsed into '_mods'. This is
- * done because applying updates via UpdateNode creates new fields in lexicographic order,
- * whereas applying updates via ModifierInterface creates new fields in the order they are
- * specified in 'updateExpr', so it is necessary that the whole replica set have version 3.6 in
- * order to use UpdateNode. Uasserts if the featureCompatibilityVersion is 3.4 and
- * 'arrayFilters' is non-empty. Uasserts or returns a non-ok status if 'updateExpr' fails to
- * parse.
+ * Parses the 'updateExpr' update expression. When parsing with the kUpdateNode update
+ * semantics, 'updateExpr' is parsed into the '_root' member variable, and when parsing with the
+ * kModifierInterface update semantics, 'updateExpr' is parsed into the '_mods' member variable.
+ * It is important that the secondary applies updates with the same semantics that the primary
+ * used.
+ * - The primary can add a "$v" field with an integer value (storing on UpdateSemantics enum
+ * value) that this function will use to determine which update semantics to use.
+ * - When applying an oplog entry that has no "$v" field, this function assumes
+ * kModifierInterface semantics.
+ * - When applying an update on the primary, this function uses the feature compatibility
+ * version to determine which update semantics to use.
+ * Uasserts if the featureCompatibilityVersion is 3.4 and 'arrayFilters' is non-empty. Uasserts
+ * or returns a non-ok status if 'updateExpr' fails to parse.
*/
Status parse(
const BSONObj& updateExpr,
diff --git a/src/mongo/db/update/update_node.h b/src/mongo/db/update/update_node.h
index b8cb17dbb9a..7b73383eabc 100644
--- a/src/mongo/db/update/update_node.h
+++ b/src/mongo/db/update/update_node.h
@@ -110,7 +110,7 @@ public:
// This is provided because some modifiers may ignore certain errors when the update is from
// replication.
- bool fromReplication = false;
+ bool fromOplogApplication = false;
// If true, UpdateNode::apply ensures that modified elements do not violate depth or DBRef
// constraints.
diff --git a/src/mongo/db/update/update_node_test_fixture.h b/src/mongo/db/update/update_node_test_fixture.h
index b434d93a99e..7a3e47636c1 100644
--- a/src/mongo/db/update/update_node_test_fixture.h
+++ b/src/mongo/db/update/update_node_test_fixture.h
@@ -56,7 +56,7 @@ protected:
_pathTaken = std::make_shared<FieldRef>();
_matchedField = StringData();
_insert = false;
- _fromReplication = false;
+ _fromOplogApplication = false;
_validateForStorage = true;
_indexData.reset();
_logDoc.reset();
@@ -69,7 +69,7 @@ protected:
applyParams.pathTaken = _pathTaken;
applyParams.matchedField = _matchedField;
applyParams.insert = _insert;
- applyParams.fromReplication = _fromReplication;
+ applyParams.fromOplogApplication = _fromOplogApplication;
applyParams.validateForStorage = _validateForStorage;
applyParams.indexData = _indexData.get();
applyParams.logBuilder = _logBuilder.get();
@@ -100,8 +100,8 @@ protected:
_insert = insert;
}
- void setFromReplication(bool fromReplication) {
- _fromReplication = fromReplication;
+ void setFromOplogApplication(bool fromOplogApplication) {
+ _fromOplogApplication = fromOplogApplication;
}
void setValidateForStorage(bool validateForStorage) {
@@ -130,7 +130,7 @@ private:
std::shared_ptr<FieldRef> _pathTaken;
StringData _matchedField;
bool _insert;
- bool _fromReplication;
+ bool _fromOplogApplication;
bool _validateForStorage;
std::unique_ptr<UpdateIndexData> _indexData;
mutablebson::Document _logDoc;
diff --git a/src/mongo/db/update/update_object_node_test.cpp b/src/mongo/db/update/update_object_node_test.cpp
index 37569cdea05..1e699326587 100644
--- a/src/mongo/db/update/update_object_node_test.cpp
+++ b/src/mongo/db/update/update_object_node_test.cpp
@@ -2114,7 +2114,7 @@ TEST_F(UpdateObjectNodeTest, ApplyBlockingElementFromReplication) {
mutablebson::Document doc(fromjson("{a: 0}"));
addIndexedPath("a");
- setFromReplication(true);
+ setFromOplogApplication(true);
auto result = root.apply(getApplyParams(doc.root()));
ASSERT_FALSE(result.indexesAffected);
ASSERT_FALSE(result.noop);