summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMax Hirschhorn <max.hirschhorn@mongodb.com>2015-05-04 17:39:51 -0400
committerMax Hirschhorn <max.hirschhorn@mongodb.com>2015-05-04 17:41:05 -0400
commitfeef878308e0ee990a17826073d8e4db9ec1657b (patch)
treecad901a660f04beae391c158b0f2b5d125149fa1
parent7de337ef2b7d1a179bbd599550f417fd75ebdea0 (diff)
downloadmongo-feef878308e0ee990a17826073d8e4db9ec1657b.tar.gz
SERVER-18304 Only call _appendHelper() if update/remove succeeds.
Otherwise the response will contain the "value" field multiple times because the command result is mutated even when the operation does not succeed (due to a WriteConflictException). Depending on the client logic, e.g. using the first occurrence, it may end up claiming to have deleted a document that was actually deleted by another client.
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js181
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_update_queue.js66
-rw-r--r--src/mongo/db/commands/find_and_modify.cpp9
3 files changed, 254 insertions, 2 deletions
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js b/jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js
new file mode 100644
index 00000000000..3265963a94f
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js
@@ -0,0 +1,181 @@
+'use strict';
+
+/**
+ * findAndModify_remove_queue.js
+ *
+ * A large number of documents are inserted during the workload setup. Each thread repeated removes
+ * a document from the collection using the findAndModify command, and stores the _id field of that
+ * document in another database. At the end of the workload, the contents of the other database are
+ * checked for whether one thread removed the same document as another thread.
+ *
+ * This workload was designed to reproduce SERVER-18304.
+ */
+var $config = (function() {
+
+ var data = {
+ numDocs: 1000,
+
+ // Use the workload name as the database name, since the workload name is assumed to be
+ // unique.
+ uniqueDBName: 'findAndModify_remove_queue',
+
+ newDocForInsert: function newDocForInsert(i) {
+ return { _id: i, rand: Random.rand() };
+ },
+
+ getIndexSpec: function getIndexSpec() {
+ return { rand: 1 };
+ },
+
+ opName: 'removed',
+
+ saveDocId: function saveDocId(db, collName, id) {
+ // Use a separate database to avoid conflicts with other FSM workloads.
+ var ownedDB = db.getSiblingDB(this.uniqueDBName);
+
+ var updateDoc = { $push: {} };
+ updateDoc.$push[this.opName] = id;
+
+ var res = ownedDB[collName].update({ _id: this.tid }, updateDoc, { upsert: true });
+ assertAlways.writeOK(res);
+
+ assertAlways.contains(res.nMatched, [0, 1], tojson(res));
+ if (res.nMatched === 0) {
+ if (ownedDB.getMongo().writeMode() === 'commands') {
+ assertAlways.eq(0, res.nModified, tojson(res));
+ }
+ assertAlways.eq(1, res.nUpserted, tojson(res));
+ }
+ else {
+ if (ownedDB.getMongo().writeMode() === 'commands') {
+ assertAlways.eq(1, res.nModified, tojson(res));
+ }
+ assertAlways.eq(0, res.nUpserted, tojson(res));
+ }
+ }
+ };
+
+ var states = (function() {
+
+ function remove(db, collName) {
+ var res = db.runCommand({
+ findAndModify: db[collName].getName(),
+ query: {},
+ sort: { rand: -1 },
+ remove: true
+ });
+ assertAlways.commandWorked(res);
+
+ var doc = res.value;
+ if (doc !== null) {
+ this.saveDocId.call(this, db, collName, doc._id);
+ }
+ }
+
+ return {
+ remove: remove
+ };
+
+ })();
+
+ var transitions = {
+ remove: { remove: 1 }
+ };
+
+ function setup(db, collName, cluster) {
+ var bulk = db[collName].initializeUnorderedBulkOp();
+ for (var i = 0; i < this.numDocs; ++i) {
+ var doc = this.newDocForInsert(i);
+ // Require that documents inserted by this workload use _id values that can be compared
+ // using the default JS comparator.
+ assertAlways.neq(typeof doc._id, 'object', 'default comparator of' +
+ ' Array.prototype.sort() is not well-ordered for JS objects');
+ bulk.insert(doc);
+ }
+ var res = bulk.execute();
+ assertAlways.writeOK(res);
+ assertAlways.eq(this.numDocs, res.nInserted);
+
+ assertAlways.commandWorked(db[collName].ensureIndex(this.getIndexSpec()));
+ }
+
+ function teardown(db, collName, cluster) {
+ var ownedDB = db.getSiblingDB(this.uniqueDBName);
+
+ assertWhenOwnColl(function() {
+ var docs = ownedDB[collName].find().toArray();
+ var ids = [];
+
+ for (var i = 0; i < docs.length; ++i) {
+ ids.push(docs[i][this.opName].sort());
+ }
+
+ checkForDuplicateIds(ids, this.opName);
+ }.bind(this));
+
+ var res = ownedDB.dropDatabase();
+ assertAlways.commandWorked(res);
+ assertAlways.eq(this.uniqueDBName, res.dropped);
+
+ function checkForDuplicateIds(ids, opName) {
+ var indices = new Array(ids.length);
+ for (var i = 0; i < indices.length; ++i) {
+ indices[i] = 0;
+ }
+
+ while (true) {
+ var smallest = findSmallest(ids, indices);
+ if (smallest === null) {
+ break;
+ }
+
+ var msg = 'threads ' + tojson(smallest.indices) +
+ ' claim to have ' + opName +
+ ' a document with _id = ' + tojson(smallest.value);
+ assertWhenOwnColl.eq(1, smallest.indices.length, msg);
+
+ indices[smallest.indices[0]]++;
+ }
+ }
+
+ function findSmallest(arrays, indices) {
+ var smallestValueIsSet = false;
+ var smallestValue;
+ var smallestIndices;
+
+ for (var i = 0; i < indices.length; ++i) {
+ if (indices[i] >= arrays[i].length) {
+ continue;
+ }
+
+ var value = arrays[i][indices[i]];
+ if (!smallestValueIsSet || value < smallestValue) {
+ smallestValueIsSet = true;
+ smallestValue = value;
+ smallestIndices = [i];
+ }
+ else if (value === smallestValue) {
+ smallestIndices.push(i);
+ }
+ }
+
+ if (!smallestValueIsSet) {
+ return null;
+ }
+ return { value: smallestValue, indices: smallestIndices };
+ }
+ }
+
+ var threadCount = 10;
+ return {
+ threadCount: threadCount,
+ iterations: Math.floor(data.numDocs / threadCount),
+ data: data,
+ startState: 'remove',
+ states: states,
+ transitions: transitions,
+ setup: setup,
+ teardown: teardown
+ };
+
+})();
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_update_queue.js b/jstests/concurrency/fsm_workloads/findAndModify_update_queue.js
new file mode 100644
index 00000000000..e765ccd4ece
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/findAndModify_update_queue.js
@@ -0,0 +1,66 @@
+'use strict';
+
+/**
+ * findAndModify_update_queue.js
+ *
+ * A large number of documents are inserted during the workload setup. Each thread repeated updates
+ * a document from the collection using the findAndModify command, and stores the _id field of that
+ * document in another database. At the end of the workload, the contents of the other database are
+ * checked for whether one thread updated the same document as another thread.
+ *
+ * This workload was designed to reproduce an issue similar to SERVER-18304 for update operations
+ * using the findAndModify command where the old version of the document is returned.
+ */
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js'); // for $config
+
+var $config = extendWorkload($config, function($config, $super) {
+
+ // Use the workload name as the database name, since the workload name is assumed to be unique.
+ $config.data.uniqueDBName = 'findAndModify_update_queue';
+
+ $config.data.newDocForInsert = function newDocForInsert(i) {
+ return { _id: i, rand: Random.rand(), counter: 0 };
+ };
+
+ $config.data.getIndexSpec = function getIndexSpec() {
+ return { counter: 1, rand: -1 };
+ };
+
+ $config.data.opName = 'updated';
+
+ var states = (function() {
+
+ function update(db, collName) {
+ // Update the counter field to avoid matching the same document again.
+ var res = db.runCommand({
+ findAndModify: db[collName].getName(),
+ query: { counter: 0 },
+ sort: { rand: -1 },
+ update: { $inc: { counter: 1 } },
+ new: false
+ });
+ assertAlways.commandWorked(res);
+
+ var doc = res.value;
+ if (doc !== null) {
+ this.saveDocId.call(this, db, collName, doc._id);
+ }
+ }
+
+ return {
+ update: update
+ };
+
+ })();
+
+ var transitions = {
+ update: { update: 1 }
+ };
+
+ $config.startState = 'update';
+ $config.states = states;
+ $config.transitions = transitions;
+
+ return $config;
+});
diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp
index 0f23f2fc6a1..5399595d76b 100644
--- a/src/mongo/db/commands/find_and_modify.cpp
+++ b/src/mongo/db/commands/find_and_modify.cpp
@@ -373,7 +373,7 @@ namespace mongo {
}
if ( remove ) {
- _appendHelper(result, doc, found, fields, whereCallback);
+ BSONObj oldDoc = doc.getOwned();
if ( found ) {
deleteObjects(txn, ctx.db(), ns, queryModified, PlanExecutor::YIELD_MANUAL,
true, true);
@@ -389,6 +389,7 @@ namespace mongo {
le.appendNumber( "n" , 1 );
le.done();
}
+ _appendHelper(result, oldDoc, found, fields, whereCallback);
}
else {
// update
@@ -458,8 +459,9 @@ namespace mongo {
else {
// we found it or we're updating
+ BSONObj oldDoc;
if ( ! returnNew ) {
- _appendHelper(result, doc, found, fields, whereCallback);
+ oldDoc = doc.getOwned();
}
const NamespaceString requestNs(ns);
@@ -497,6 +499,9 @@ namespace mongo {
dassert(!res.newObj.isEmpty());
_appendHelper(result, res.newObj, true, fields, whereCallback);
}
+ else {
+ _appendHelper(result, oldDoc, found, fields, whereCallback);
+ }
BSONObjBuilder le( result.subobjStart( "lastErrorObject" ) );
le.appendBool( "updatedExisting" , res.existing );