diff options
author | Justin Seyster <justin.seyster@mongodb.com> | 2017-09-14 12:14:54 -0400 |
---|---|---|
committer | Justin Seyster <justin.seyster@mongodb.com> | 2017-09-14 12:14:54 -0400 |
commit | 390e5f47f00dcf133f361e3f9027e4da7d08d628 (patch) | |
tree | 901690cdc5bc1b681e1e2b95c30061784a15187e /jstests/concurrency | |
parent | 5767ee2421fa6c7934a90e9083f07743a83dcf71 (diff) | |
download | mongo-390e5f47f00dcf133f361e3f9027e4da7d08d628.tar.gz |
SERVER-30705 Add $v field for update semantics in oplog updates.
With the new UpdateNodes class hierarchy, there are two code paths for
applying an update to a document that have slightly different
semantics. The order of fields in the resulting document can vary
depending on which code path is used to apply an update. A difference
in ordering between documents in a replica set is considered a
"mismatch," so we need to ensure that secondaries always apply updates
using the same update system that the primary uses.
When an update executes as part of the application of an oplog entry,
the update is now allowed to have a $v field, which allows it to
specify which semantics were used by the operation that we are
replicating by applying the entry. When the primary uses the new
semantics (because it is a 3.6 mongod with featureCompatibilityVersion
set to 3.6), it includes {$v: 1} in the oplog's update document to
indicate that the secondary should apply with the newer 'UpdateNode'
semantics.
There are two other places where we need this behavior:
1) In role_graph_update.cpp, where the handleOplogUpdate observer
needs to update its in-memory BSON representation of a role to
reflect an update in the admin database and
2) in the applyOps command, which is used for testing how oplog
entries get applied.
Both these code paths set the fromOplogApplication flag, which
replaces the old fromReplication flag, and they also gain behavior
that used to be exclusive to oplog applications from
replication. (Specifically, they skip update validation checks, which
should have already passed before the oplog entry was created.)
Diffstat (limited to 'jstests/concurrency')
-rw-r--r-- | jstests/concurrency/fsm_all_simultaneous.js | 2 | ||||
-rw-r--r-- | jstests/concurrency/fsm_workloads/toggle_feature_compatibility.js | 64 |
2 files changed, 66 insertions, 0 deletions
diff --git a/jstests/concurrency/fsm_all_simultaneous.js b/jstests/concurrency/fsm_all_simultaneous.js index bf4fdfe3d02..f390f262e97 100644 --- a/jstests/concurrency/fsm_all_simultaneous.js +++ b/jstests/concurrency/fsm_all_simultaneous.js @@ -14,6 +14,8 @@ var blacklist = [ 'agg_group_external.js', // uses >100MB of data, which can overwhelm test hosts 'agg_sort_external.js', // uses >100MB of data, which can overwhelm test hosts + + 'toggle_feature_compatibility.js', // Sets FCV to 3.4, which could interefere with other tests. ].map(function(file) { return dir + '/' + file; }); diff --git a/jstests/concurrency/fsm_workloads/toggle_feature_compatibility.js b/jstests/concurrency/fsm_workloads/toggle_feature_compatibility.js new file mode 100644 index 00000000000..0c0ed9297fd --- /dev/null +++ b/jstests/concurrency/fsm_workloads/toggle_feature_compatibility.js @@ -0,0 +1,64 @@ +"use strict"; + +/** + * toggle_feature_compatibility.js + * + * Adds and updates documents in some threads while rapidly toggling the feature + * compatibility version between 3.4 and 3.6 in other threads, triggering the + * failure in SERVER-30705. + */ +var $config = (function() { + + var states = (function() { + + function init(db, collName) { + } + + function featureCompatibilityVersion34(db, collName) { + assert.commandWorked(db.adminCommand({setFeatureCompatibilityVersion: "3.4"})); + } + + function featureCompatibilityVersion36(db, collName) { + assert.commandWorked(db.adminCommand({setFeatureCompatibilityVersion: "3.6"})); + } + + function insertAndUpdate(db, collName) { + let insertID = Random.randInt(1000000000); + let res = db[collName].insert({_id: insertID}); + + // Fail the test on any write error, except for a duplicate key error, which can + // (rarely) happen when we accidentally choose the same random key more than once. + assert(!res.hasWriteError() || res.getWriteError().code == ErrorCodes.DuplicateKey); + assert.writeOK(db[collName].update({_id: insertID}, {$set: {b: 1, a: 1}})); + } + + return { + init: init, + featureCompatibilityVersion34: featureCompatibilityVersion34, + featureCompatibilityVersion36: featureCompatibilityVersion36, + insertAndUpdate: insertAndUpdate + }; + + })(); + + var transitions = { + init: {featureCompatibilityVersion34: 0.5, insertAndUpdate: 0.5}, + featureCompatibilityVersion34: {featureCompatibilityVersion36: 1}, + featureCompatibilityVersion36: {featureCompatibilityVersion34: 1}, + insertAndUpdate: {insertAndUpdate: 1} + }; + + function teardown(db, collName, cluster) { + assert.commandWorked(db.adminCommand({setFeatureCompatibilityVersion: "3.6"})); + assertWhenOwnColl(db[collName].drop()); + } + + return { + threadCount: 8, + iterations: 1000, + data: null, + states: states, + transitions: transitions, + teardown: teardown + }; +})(); |