summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--jstests/concurrency/fsm_all.js20
-rw-r--r--jstests/concurrency/fsm_all_composed.js33
-rw-r--r--jstests/concurrency/fsm_all_master_slave.js20
-rw-r--r--jstests/concurrency/fsm_all_replication.js20
-rw-r--r--jstests/concurrency/fsm_all_sharded.js38
-rw-r--r--jstests/concurrency/fsm_all_sharded_replication.js38
-rw-r--r--jstests/concurrency/fsm_all_simultaneous.js19
-rw-r--r--jstests/concurrency/fsm_workloads/agg_base.js89
-rw-r--r--jstests/concurrency/fsm_workloads/agg_group_external.js54
-rw-r--r--jstests/concurrency/fsm_workloads/agg_match.js38
-rw-r--r--jstests/concurrency/fsm_workloads/agg_sort.js39
-rw-r--r--jstests/concurrency/fsm_workloads/agg_sort_external.js50
-rw-r--r--jstests/concurrency/fsm_workloads/auth_create_role.js79
-rw-r--r--jstests/concurrency/fsm_workloads/auth_create_user.js72
-rw-r--r--jstests/concurrency/fsm_workloads/auth_drop_role.js74
-rw-r--r--jstests/concurrency/fsm_workloads/auth_drop_user.js65
-rw-r--r--jstests/concurrency/fsm_workloads/create_capped_collection.js149
-rw-r--r--jstests/concurrency/fsm_workloads/create_capped_collection_maxdocs.js76
-rw-r--r--jstests/concurrency/fsm_workloads/create_collection.js61
-rw-r--r--jstests/concurrency/fsm_workloads/drop_collection.js53
-rw-r--r--jstests/concurrency/fsm_workloads/drop_database.js39
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_remove.js60
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_update.js101
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_update_collscan.js23
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_update_grow.js123
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_upsert.js126
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_upsert_collscan.js21
-rw-r--r--jstests/concurrency/fsm_workloads/group.js99
-rw-r--r--jstests/concurrency/fsm_workloads/group_cond.js40
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_inline.js107
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_merge.js65
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_merge_nonatomic.js63
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_reduce.js55
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_reduce_nonatomic.js61
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_replace.js57
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js50
-rw-r--r--jstests/concurrency/fsm_workloads/remove_multiple_documents.js56
-rw-r--r--jstests/concurrency/fsm_workloads/remove_single_document.js68
-rw-r--r--jstests/concurrency/fsm_workloads/remove_single_document_eval.js37
-rw-r--r--jstests/concurrency/fsm_workloads/remove_single_document_eval_nolock.js16
-rw-r--r--jstests/concurrency/fsm_workloads/rename_capped_collection_chain.js73
-rw-r--r--jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_chain.js86
-rw-r--r--jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js109
-rw-r--r--jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js101
-rw-r--r--jstests/concurrency/fsm_workloads/rename_collection_chain.js65
-rw-r--r--jstests/concurrency/fsm_workloads/rename_collection_dbname_chain.js78
-rw-r--r--jstests/concurrency/fsm_workloads/rename_collection_dbname_droptarget.js101
-rw-r--r--jstests/concurrency/fsm_workloads/rename_collection_droptarget.js93
-rw-r--r--jstests/concurrency/fsm_workloads/server_status.js37
-rw-r--r--jstests/concurrency/fsm_workloads/update_array.js112
-rw-r--r--jstests/concurrency/fsm_workloads/update_array_noindex.js13
-rw-r--r--jstests/concurrency/fsm_workloads/update_multifield.js97
-rw-r--r--jstests/concurrency/fsm_workloads/update_multifield_isolated_multiupdate.js38
-rw-r--r--jstests/concurrency/fsm_workloads/update_multifield_isolated_multiupdate_noindex.js13
-rw-r--r--jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js52
-rw-r--r--jstests/concurrency/fsm_workloads/update_multifield_multiupdate_noindex.js13
-rw-r--r--jstests/concurrency/fsm_workloads/update_multifield_noindex.js13
-rw-r--r--jstests/concurrency/fsm_workloads/update_rename.js74
-rw-r--r--jstests/concurrency/fsm_workloads/update_rename_noindex.js13
-rw-r--r--jstests/concurrency/fsm_workloads/update_replace.js80
-rw-r--r--jstests/concurrency/fsm_workloads/update_replace_noindex.js13
-rw-r--r--jstests/concurrency/fsm_workloads/update_simple.js86
-rw-r--r--jstests/concurrency/fsm_workloads/update_simple_capped.js12
-rw-r--r--jstests/concurrency/fsm_workloads/update_simple_eval.js33
-rw-r--r--jstests/concurrency/fsm_workloads/update_simple_eval_nolock.js16
-rw-r--r--jstests/concurrency/fsm_workloads/update_simple_noindex.js13
-rw-r--r--jstests/concurrency/fsm_workloads/update_upsert_multi.js83
-rw-r--r--jstests/concurrency/fsm_workloads/update_upsert_multi_noindex.js13
68 files changed, 3875 insertions, 9 deletions
diff --git a/jstests/concurrency/fsm_all.js b/jstests/concurrency/fsm_all.js
index 34b0d1f5af0..ad78b6de42b 100644
--- a/jstests/concurrency/fsm_all.js
+++ b/jstests/concurrency/fsm_all.js
@@ -1,9 +1,27 @@
+'use strict';
+
load('jstests/concurrency/fsm_libs/runner.js');
var dir = 'jstests/concurrency/fsm_workloads';
var blacklist = [
- 'indexed_insert_multikey.js' // SERVER-16143
+ // Disabled due to known bugs
+ 'agg_sort_external.js', // SERVER-16700 Deadlock on WiredTiger LSM
+
+ // Disabled due to MongoDB restrictions and/or workload restrictions
+
+ // These workloads sometimes trigger 'Could not lock auth data update lock'
+ // errors because the AuthorizationManager currently waits for only five
+ // seconds to acquire the lock for authorization documents
+ 'auth_create_role.js',
+ 'auth_create_user.js',
+ 'auth_drop_role.js',
+ 'auth_drop_user.js', // SERVER-16739 OpenSSL libcrypto crash
+
+ // These workloads are disabled because of recent changes in capped
+ // collection behavior with wiredTiger (see: SERVER-16235)
+ 'create_capped_collection.js',
+ 'create_capped_collection_maxdocs.js',
].map(function(file) { return dir + '/' + file; });
runWorkloadsSerially(ls(dir).filter(function(file) {
diff --git a/jstests/concurrency/fsm_all_composed.js b/jstests/concurrency/fsm_all_composed.js
index a9ae736c27e..63390989ecd 100644
--- a/jstests/concurrency/fsm_all_composed.js
+++ b/jstests/concurrency/fsm_all_composed.js
@@ -1,13 +1,40 @@
+'use strict';
+
load('jstests/concurrency/fsm_libs/runner.js');
var dir = 'jstests/concurrency/fsm_workloads';
var blacklist = [
- 'indexed_insert_multikey.js', // SERVER-16143
- 'indexed_insert_multikey_noindex.js' // SERVER-16143
+ // Disabled due to known bugs
+ 'agg_sort_external.js', // SERVER-16700 Deadlock on WiredTiger LSM
+
+ // Disabled due to MongoDB restrictions and/or workload restrictions
+
+ // These workloads sometimes trigger 'Could not lock auth data update lock'
+ // errors because the AuthorizationManager currently waits for only five
+ // seconds to acquire the lock for authorization documents
+ 'auth_create_role.js',
+ 'auth_create_user.js',
+ 'auth_drop_role.js',
+ 'auth_drop_user.js', // SERVER-16739 OpenSSL libcrypto crash
+
+ // These workloads are disabled because of recent changes in capped
+ // collection behavior with wiredTiger (see: SERVER-16235)
+ 'create_capped_collection.js',
+ 'create_capped_collection_maxdocs.js',
+
+ // These workloads take too long when composed because eval takes a
+ // global lock and the composer doesn't honor iteration counts:
+ 'remove_single_document_eval.js',
+ 'update_simple_eval.js',
+
+ // These workloads take too long when composed because server-side JS
+ // is slow and the composer doesn't honor iteration counts:
+ 'remove_single_document_eval_nolock.js',
+ 'update_simple_eval_nolock.js',
].map(function(file) { return dir + '/' + file; });
// SERVER-16196 re-enable executing workloads
-// runMixtureOfWorkloads(ls(dir).filter(function(file) {
+// runCompositionOfWorkloads(ls(dir).filter(function(file) {
// return !Array.contains(blacklist, file);
// }));
diff --git a/jstests/concurrency/fsm_all_master_slave.js b/jstests/concurrency/fsm_all_master_slave.js
index 085c07db8dc..48340d6846a 100644
--- a/jstests/concurrency/fsm_all_master_slave.js
+++ b/jstests/concurrency/fsm_all_master_slave.js
@@ -1,9 +1,27 @@
+'use strict';
+
load('jstests/concurrency/fsm_libs/runner.js');
var dir = 'jstests/concurrency/fsm_workloads';
var blacklist = [
- 'indexed_insert_multikey.js' // SERVER-16143
+ // Disabled due to known bugs
+ 'agg_sort_external.js', // SERVER-16700 Deadlock on WiredTiger LSM
+
+ // Disabled due to MongoDB restrictions and/or workload restrictions
+
+ // These workloads sometimes trigger 'Could not lock auth data update lock'
+ // errors because the AuthorizationManager currently waits for only five
+ // seconds to acquire the lock for authorization documents
+ 'auth_create_role.js',
+ 'auth_create_user.js',
+ 'auth_drop_role.js',
+ 'auth_drop_user.js', // SERVER-16739 OpenSSL libcrypto crash
+
+ // These workloads are disabled because of recent changes in capped
+ // collection behavior with wiredTiger (see: SERVER-16235)
+ 'create_capped_collection.js',
+ 'create_capped_collection_maxdocs.js',
].map(function(file) { return dir + '/' + file; });
// SERVER-16196 re-enable executing workloads with master-slave replication
diff --git a/jstests/concurrency/fsm_all_replication.js b/jstests/concurrency/fsm_all_replication.js
index 27f9395f3ed..5b6093ec1f3 100644
--- a/jstests/concurrency/fsm_all_replication.js
+++ b/jstests/concurrency/fsm_all_replication.js
@@ -1,9 +1,27 @@
+'use strict';
+
load('jstests/concurrency/fsm_libs/runner.js');
var dir = 'jstests/concurrency/fsm_workloads';
var blacklist = [
- 'indexed_insert_multikey.js' // SERVER-16143
+ // Disabled due to known bugs
+ 'agg_sort_external.js', // SERVER-16700 Deadlock on WiredTiger LSM
+
+ // Disabled due to MongoDB restrictions and/or workload restrictions
+
+ // These workloads sometimes trigger 'Could not lock auth data update lock'
+ // errors because the AuthorizationManager currently waits for only five
+ // seconds to acquire the lock for authorization documents
+ 'auth_create_role.js',
+ 'auth_create_user.js',
+ 'auth_drop_role.js',
+ 'auth_drop_user.js', // SERVER-16739 OpenSSL libcrypto crash
+
+ // These workloads are disabled because of recent changes in capped
+ // collection behavior with wiredTiger (see: SERVER-16235)
+ 'create_capped_collection.js',
+ 'create_capped_collection_maxdocs.js',
].map(function(file) { return dir + '/' + file; });
// SERVER-16196 re-enable executing workloads against replica sets
diff --git a/jstests/concurrency/fsm_all_sharded.js b/jstests/concurrency/fsm_all_sharded.js
index 0fe5a2a1189..decb64f4bff 100644
--- a/jstests/concurrency/fsm_all_sharded.js
+++ b/jstests/concurrency/fsm_all_sharded.js
@@ -1,9 +1,45 @@
+'use strict';
+
load('jstests/concurrency/fsm_libs/runner.js');
var dir = 'jstests/concurrency/fsm_workloads';
var blacklist = [
- 'indexed_insert_multikey.js' // SERVER-16143
+ // Disabled due to known bugs
+ 'agg_match.js', // SERVER-3645 .count() can be wrong on sharded collections
+
+ // Disabled due to MongoDB restrictions and/or workload restrictions
+
+ // These workloads sometimes trigger 'Could not lock auth data update lock'
+ // errors because the AuthorizationManager currently waits for only five
+ // seconds to acquire the lock for authorization documents
+ 'auth_create_role.js',
+ 'auth_create_user.js',
+ 'auth_drop_role.js',
+ 'auth_drop_user.js', // SERVER-16739 OpenSSL libcrypto crash
+
+ // These workloads are disabled because of recent changes in capped
+ // collection behavior with wiredTiger (see: SERVER-16235)
+ 'create_capped_collection.js',
+ 'create_capped_collection_maxdocs.js',
+
+ 'agg_group_external.js', // uses >100MB of data, and is flaky
+ 'agg_sort_external.js', // uses >100MB of data, and is flaky
+ 'findAndModify_remove.js', // our findAndModify queries lack shard keys
+ 'findAndModify_update.js', // our findAndModify queries lack shard keys
+ 'findAndModify_update_collscan.js', // our findAndModify queries lack shard keys
+ 'findAndModify_update_grow.js', // our findAndModify queries lack shard keys
+ 'findAndModify_upsert.js', // our findAndModify queries lack shard keys
+ 'findAndModify_upsert_collscan.js', // our findAndModify queries lack shard keys
+ 'group.js', // the group command cannot be issued against a sharded cluster
+ 'group_cond.js', // the group command cannot be issued against a sharded cluster
+ 'indexed_insert_eval.js', // eval doesn't work with sharded collections
+ 'indexed_insert_eval_nolock.js', // eval doesn't work with sharded collections
+ 'remove_single_document.js', // our .remove(query, {justOne: true}) calls lack shard keys
+ 'remove_single_document_eval.js', // eval doesn't work with sharded collections
+ 'update_simple_eval.js', // eval doesn't work with sharded collections
+ 'update_simple_eval_nolock.js', // eval doesn't work with sharded collections
+ 'update_upsert_multi.js', // our update queries lack shard keys
].map(function(file) { return dir + '/' + file; });
// SERVER-16196 re-enable executing workloads against sharded clusters
diff --git a/jstests/concurrency/fsm_all_sharded_replication.js b/jstests/concurrency/fsm_all_sharded_replication.js
index ab4868f1153..0ffcb80d9ff 100644
--- a/jstests/concurrency/fsm_all_sharded_replication.js
+++ b/jstests/concurrency/fsm_all_sharded_replication.js
@@ -1,9 +1,45 @@
+'use strict';
+
load('jstests/concurrency/fsm_libs/runner.js');
var dir = 'jstests/concurrency/fsm_workloads';
var blacklist = [
- 'indexed_insert_multikey.js' // SERVER-16143
+ // Disabled due to known bugs
+ 'agg_match.js', // SERVER-3645 .count() can be wrong on sharded collections
+
+ // Disabled due to MongoDB restrictions and/or workload restrictions
+
+ // These workloads sometimes trigger 'Could not lock auth data update lock'
+ // errors because the AuthorizationManager currently waits for only five
+ // seconds to acquire the lock for authorization documents
+ 'auth_create_role.js',
+ 'auth_create_user.js',
+ 'auth_drop_role.js',
+ 'auth_drop_user.js', // SERVER-16739 OpenSSL libcrypto crash
+
+ // These workloads are disabled because of recent changes in capped
+ // collection behavior with wiredTiger (see: SERVER-16235)
+ 'create_capped_collection.js',
+ 'create_capped_collection_maxdocs.js',
+
+ 'agg_group_external.js', // uses >100MB of data, and is flaky
+ 'agg_sort_external.js', // uses >100MB of data, and is flaky
+ 'findAndModify_remove.js', // our findAndModify queries lack shard keys
+ 'findAndModify_update.js', // our findAndModify queries lack shard keys
+ 'findAndModify_update_collscan.js', // our findAndModify queries lack shard keys
+ 'findAndModify_update_grow.js', // our findAndModify queries lack shard keys
+ 'findAndModify_upsert.js', // our findAndModify queries lack shard keys
+ 'findAndModify_upsert_collscan.js', // our findAndModify queries lack shard keys
+ 'group.js', // the group command cannot be issued against a sharded cluster
+ 'group_cond.js', // the group command cannot be issued against a sharded cluster
+ 'indexed_insert_eval.js', // eval doesn't work with sharded collections
+ 'indexed_insert_eval_nolock.js', // eval doesn't work with sharded collections
+ 'remove_single_document.js', // our .remove(query, {justOne: true}) calls lack shard keys
+ 'remove_single_document_eval.js', // eval doesn't work with sharded collections
+ 'update_simple_eval.js', // eval doesn't work with sharded collections
+ 'update_simple_eval_nolock.js', // eval doesn't work with sharded collections
+ 'update_upsert_multi.js', // our update queries lack shard keys
].map(function(file) { return dir + '/' + file; });
// SERVER-16196 re-enable executing workloads against sharded replica sets
diff --git a/jstests/concurrency/fsm_all_simultaneous.js b/jstests/concurrency/fsm_all_simultaneous.js
index 018b5239939..a5583e6c395 100644
--- a/jstests/concurrency/fsm_all_simultaneous.js
+++ b/jstests/concurrency/fsm_all_simultaneous.js
@@ -1,9 +1,26 @@
+'use strict';
+
load('jstests/concurrency/fsm_libs/runner.js');
var dir = 'jstests/concurrency/fsm_workloads';
var blacklist = [
- 'indexed_insert_multikey.js' // SERVER-16143
+ // Disabled due to known bugs
+
+ // Disabled due to MongoDB restrictions and/or workload restrictions
+
+ // These workloads sometimes trigger 'Could not lock auth data update lock'
+ // errors because the AuthorizationManager currently waits for only five
+ // seconds to acquire the lock for authorization documents
+ 'auth_create_role.js',
+ 'auth_create_user.js',
+ 'auth_drop_role.js',
+ 'auth_drop_user.js', // SERVER-16739 OpenSSL libcrypto crash
+
+ // These workloads are disabled because of recent changes in capped
+ // collection behavior with wiredTiger (see: SERVER-16235)
+ 'create_capped_collection.js',
+ 'create_capped_collection_maxdocs.js',
].map(function(file) { return dir + '/' + file; });
// SERVER-16196 re-enable executing workloads
diff --git a/jstests/concurrency/fsm_workloads/agg_base.js b/jstests/concurrency/fsm_workloads/agg_base.js
new file mode 100644
index 00000000000..e701682e028
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/agg_base.js
@@ -0,0 +1,89 @@
+'use strict';
+
+/**
+ * agg_base.js
+ *
+ * Base workload for aggregation. Inserts a bunch of documents in its setup,
+ * then each thread does an aggregation with an empty $match.
+ */
+var $config = (function() {
+
+ var data = {
+ numDocs: 1000,
+ // Use 12KB documents by default. This number is useful because 12,000 documents each of
+ // size 12KB take up more than 100MB in total, and 100MB is the in-memory limit for $sort
+ // and $group.
+ docSize: 12 * 1000
+ };
+
+ var getStringOfLength = (function() {
+ var cache = {};
+ return function getStringOfLength(size) {
+ if (!cache[size]) {
+ cache[size] = new Array(size + 1).join('x');
+ }
+ return cache[size];
+ };
+ })();
+
+ function padDoc(doc, size) {
+ // first set doc.padding so that Object.bsonsize will include the field name and other
+ // overhead
+ doc.padding = "";
+ var paddingLength = size - Object.bsonsize(doc);
+ assertAlways.lte(0, paddingLength,
+ 'document is already bigger than ' + size + ' bytes: ' + tojson(doc));
+ doc.padding = getStringOfLength(paddingLength);
+ assertAlways.eq(size, Object.bsonsize(doc));
+ return doc;
+ }
+
+ var states = {
+ query: function query(db, collName) {
+ var count = db[collName].aggregate([]).itcount();
+ assertWhenOwnColl.eq(count, this.numDocs);
+ }
+ };
+
+ var transitions = {
+ query: { query: 1 }
+ };
+
+ function setup(db, collName) {
+ // load example data
+ var bulk = db[collName].initializeUnorderedBulkOp();
+ for (var i = 0; i < this.numDocs; ++i) {
+ // note: padDoc caches the large string after allocating it once, so it's ok to call it
+ // in this loop
+ bulk.insert(padDoc({
+ flag: i % 2 ? true : false,
+ rand: Random.rand(),
+ randInt: Random.randInt(this.numDocs)
+ }, this.docSize));
+ }
+ var res = bulk.execute();
+ assertWhenOwnColl.writeOK(res);
+ assertWhenOwnColl.eq(this.numDocs, res.nInserted);
+ assertWhenOwnColl.eq(this.numDocs, db[collName].find().itcount());
+ assertWhenOwnColl.eq(this.numDocs / 2, db[collName].find({ flag: false }).itcount());
+ assertWhenOwnColl.eq(this.numDocs / 2, db[collName].find({ flag: true }).itcount());
+ }
+
+ function teardown(db, collName) {
+ assertWhenOwnColl(db[collName].drop());
+ }
+
+ return {
+ // Using few threads and iterations because each iteration is fairly expensive compared to
+ // other workloads' iterations. (Each does a collection scan over a few thousand documents
+ // rather than a few dozen documents.)
+ threadCount: 5,
+ iterations: 10,
+ states: states,
+ startState: 'query',
+ transitions: transitions,
+ data: data,
+ setup: setup,
+ teardown: teardown
+ };
+})();
diff --git a/jstests/concurrency/fsm_workloads/agg_group_external.js b/jstests/concurrency/fsm_workloads/agg_group_external.js
new file mode 100644
index 00000000000..9a1b9ba5fbb
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/agg_group_external.js
@@ -0,0 +1,54 @@
+'use strict';
+
+/**
+ * agg_group_external.js
+ *
+ * Runs an aggregation with a $group.
+ *
+ * The data passed to the $group is greater than 100MB, which should force
+ * disk to be used.
+ */
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/agg_base.js'); // for $config
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+
+var $config = extendWorkload($config, function($config, $super) {
+
+ // use enough docs to exceed 100MB, the in-memory limit for $sort and $group
+ $config.data.numDocs = 24 * 1000;
+ var MB = 1024 * 1024; // bytes
+ assertAlways.lte(100 * MB, $config.data.numDocs * $config.data.docSize);
+
+ // assume no other workload will manipulate collections with this prefix
+ $config.data.getOutputCollPrefix = function getOutputCollPrefix(collName) {
+ return collName + '_out_agg_group_external_';
+ };
+
+ $config.states.query = function query(db, collName) {
+ var otherCollName = this.getOutputCollPrefix(collName) + this.tid;
+ var cursor = db[collName].aggregate([
+ { $group: { _id: '$randInt', count: { $sum: 1 } } },
+ { $out: otherCollName }
+ ], {
+ allowDiskUse: true
+ });
+ assertAlways.eq(0, cursor.itcount());
+ assertWhenOwnColl(function() {
+ // sum the .count fields in the output coll
+ var sum = db[otherCollName].aggregate([
+ { $group: { _id: null, totalCount: { $sum: '$count' } } }
+ ]).toArray()[0].totalCount;
+ assertWhenOwnColl.eq(this.numDocs, sum);
+ }.bind(this));
+ };
+
+ $config.teardown = function teardown(db, collName) {
+ $super.teardown.apply(this, arguments);
+
+ // drop all collections with this workload's assumed-to-be-unique prefix
+ // NOTE: assumes the prefix contains no special regex chars
+ dropCollections(db, new RegExp('^' + this.getOutputCollPrefix(collName)));
+ };
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/agg_match.js b/jstests/concurrency/fsm_workloads/agg_match.js
new file mode 100644
index 00000000000..a8961db5a68
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/agg_match.js
@@ -0,0 +1,38 @@
+'use strict';
+
+/**
+ * agg_match.js
+ *
+ * Runs an aggregation with a $match that returns half the documents.
+ */
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/agg_base.js'); // for $config
+
+var $config = extendWorkload($config, function($config, $super) {
+
+ $config.data.getOutCollName = function getOutCollName(collName) {
+ return collName + '_out_agg_match';
+ };
+
+ $config.states.query = function query(db, collName) {
+ // note that all threads output to the same collection
+ var otherCollName = this.getOutCollName(collName);
+ var cursor = db[collName].aggregate([
+ { $match: { flag: true } },
+ { $out: otherCollName }
+ ]);
+ assertAlways.eq(0, cursor.itcount(), 'cursor returned by $out should always be empty');
+ // NOTE: This relies on the fast-path for .count() with no query being isolated.
+ // NOTE: There's a bug, SERVER-3645, where .count() is wrong on sharded collections, so we
+ // blacklisted this test for sharded clusters.
+ assertWhenOwnColl.eq(db[collName].count() / 2, db[otherCollName].count());
+ };
+
+ $config.teardown = function teardown(db, collName) {
+ $super.teardown.apply(this, arguments);
+
+ assertWhenOwnColl(db[this.getOutCollName(collName)].drop());
+ };
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/agg_sort.js b/jstests/concurrency/fsm_workloads/agg_sort.js
new file mode 100644
index 00000000000..322ed5297e8
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/agg_sort.js
@@ -0,0 +1,39 @@
+'use strict';
+
+/**
+ * agg_sort.js
+ *
+ * Runs an aggregation with a $match that returns half the documents followed
+ * by a $sort on a field containing a random float.
+ */
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/agg_base.js'); // for $config
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+
+var $config = extendWorkload($config, function($config, $super) {
+
+ $config.data.getOutputCollPrefix = function getOutputCollPrefix(collName) {
+ return collName + '_out_agg_sort_';
+ };
+
+ $config.states.query = function query(db, collName) {
+ var otherCollName = this.getOutputCollPrefix(collName) + this.tid;
+ var cursor = db[collName].aggregate([
+ { $match: { flag: true } },
+ { $sort: { rand: 1 } },
+ { $out: otherCollName }
+ ]);
+ assertAlways.eq(0, cursor.itcount());
+ assertWhenOwnColl.eq(db[collName].find().itcount() / 2, db[otherCollName].find().itcount());
+ };
+
+ $config.teardown = function teardown(db, collName) {
+ $super.teardown.apply(this, arguments);
+
+ // drop all collections with this workload's assumed-to-be-unique prefix
+ // NOTE: assumes the prefix contains no special regex chars
+ dropCollections(db, new RegExp('^' + this.getOutputCollPrefix(collName)));
+ };
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/agg_sort_external.js b/jstests/concurrency/fsm_workloads/agg_sort_external.js
new file mode 100644
index 00000000000..65bfdb264bc
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/agg_sort_external.js
@@ -0,0 +1,50 @@
+'use strict';
+
+/**
+ * agg_sort_external.js
+ *
+ * Runs an aggregation with a $match that returns half the documents followed
+ * by a $sort on a field containing a random float.
+ *
+ * The data returned by the $match is greater than 100MB, which should force an external sort.
+ */
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/agg_base.js'); // for $config
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+
+var $config = extendWorkload($config, function($config, $super) {
+
+ // use enough docs to exceed 100MB, the in-memory limit for $sort and $group
+ $config.data.numDocs = 24 * 1000;
+ var MB = 1024 * 1024; // bytes
+ // assert that *half* the docs exceed the in-memory limit, because the $match stage will only
+ // pass half the docs in the collection on to the $sort stage.
+ assertAlways.lte(100 * MB, $config.data.numDocs * $config.data.docSize / 2);
+
+ $config.data.getOutputCollPrefix = function getOutputCollPrefix(collName) {
+ return collName + '_out_agg_sort_external_';
+ };
+
+ $config.states.query = function query(db, collName) {
+ var otherCollName = this.getOutputCollPrefix(collName) + this.tid;
+ var cursor = db[collName].aggregate([
+ { $match: { flag: true } },
+ { $sort: { rand: 1 } },
+ { $out: otherCollName }
+ ], {
+ allowDiskUse: true
+ });
+ assertAlways.eq(0, cursor.itcount());
+ assertWhenOwnColl.eq(db[collName].find().itcount() / 2, db[otherCollName].find().itcount());
+ };
+
+ $config.teardown = function teardown(db, collName) {
+ $super.teardown.apply(this, arguments);
+
+ // drop all collections with this workload's assumed-to-be-unique prefix
+ // NOTE: assumes the prefix contains no special regex chars
+ dropCollections(db, new RegExp('^' + this.getOutputCollPrefix(collName)));
+ };
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/auth_create_role.js b/jstests/concurrency/fsm_workloads/auth_create_role.js
new file mode 100644
index 00000000000..53f1f03cab0
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/auth_create_role.js
@@ -0,0 +1,79 @@
+'use strict';
+
+/**
+ * auth_create_role.js
+ *
+ * Repeatedly creates new roles on a database.
+ */
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropRoles
+
+var $config = (function() {
+
+ var data = {
+ // Use the workload name as a prefix for the role name,
+ // since the workload name is assumed to be unique.
+ prefix: 'auth_create_role'
+ };
+
+ var states = (function() {
+
+ function uniqueRoleName(prefix, tid, num) {
+ return prefix + tid + '_' + num;
+ }
+
+ function init(db, collName) {
+ this.num = 0;
+ }
+
+ function createRole(db, collName) {
+ var roleName = uniqueRoleName(this.prefix, this.tid, this.num++);
+ db.createRole({
+ role: roleName,
+ privileges: [
+ {
+ resource: { db: db.getName(), collection: collName },
+ actions: ['update']
+ }
+ ],
+ roles: [
+ { role: 'read', db: db.getName() }
+ ]
+ });
+
+ // Verify the newly created role exists, as well as all previously created roles
+ for (var i = 0; i < this.num; ++i) {
+ var name = uniqueRoleName(this.prefix, this.tid, i);
+ var res = db.getRole(name);
+ assertAlways(res !== null, "role '" + name + "' should exist");
+ assertAlways.eq(name, res.role);
+ assertAlways(!res.isBuiltin, 'role should be user-defined');
+ }
+ }
+
+ return {
+ init: init,
+ createRole: createRole
+ };
+
+ })();
+
+ var transitions = {
+ init: { createRole: 1 },
+ createRole: { createRole: 1 }
+ };
+
+ function teardown(db, collName) {
+ var pattern = new RegExp('^' + this.prefix + '\\d+_\\d+$');
+ dropRoles(db, pattern);
+ }
+
+ return {
+ threadCount: 10,
+ iterations: 20,
+ data: data,
+ states: states,
+ transitions: transitions,
+ teardown: teardown
+ };
+
+})();
diff --git a/jstests/concurrency/fsm_workloads/auth_create_user.js b/jstests/concurrency/fsm_workloads/auth_create_user.js
new file mode 100644
index 00000000000..c50dc65afb4
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/auth_create_user.js
@@ -0,0 +1,72 @@
+'use strict';
+
+/**
+ * auth_create_user.js
+ *
+ * Repeatedly creates new users on a database.
+ */
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropUsers
+
+var $config = (function() {
+
+ var data = {
+ // Use the workload name as a prefix for the username,
+ // since the workload name is assumed to be unique.
+ prefix: 'auth_create_user'
+ };
+
+ var states = (function() {
+
+ function uniqueUsername(prefix, tid, num) {
+ return prefix + tid + '_' + num;
+ }
+
+ function init(db, collName) {
+ this.num = 0;
+ }
+
+ function createUser(db, collName) {
+ var username = uniqueUsername(this.prefix, this.tid, this.num++);
+ db.createUser({
+ user: username,
+ pwd: 'password',
+ roles: ['readWrite', 'dbAdmin']
+ });
+
+ // Verify the newly created user exists, as well as all previously created users
+ for (var i = 0; i < this.num; ++i) {
+ var name = uniqueUsername(this.prefix, this.tid, i);
+ var res = db.getUser(username);
+ assertAlways(res !== null, "user '" + username + "' should exist");
+ assertAlways.eq(username, res.user);
+ assertAlways.eq(db.getName(), res.db);
+ }
+ }
+
+ return {
+ init: init,
+ createUser: createUser
+ };
+
+ })();
+
+ var transitions = {
+ init: { createUser: 1 },
+ createUser: { createUser: 1 }
+ };
+
+ function teardown(db, collName) {
+ var pattern = new RegExp('^' + this.prefix + '\\d+_\\d+$');
+ dropUsers(db, pattern);
+ }
+
+ return {
+ threadCount: 10,
+ iterations: 20,
+ data: data,
+ states: states,
+ transitions: transitions,
+ teardown: teardown
+ };
+
+})();
diff --git a/jstests/concurrency/fsm_workloads/auth_drop_role.js b/jstests/concurrency/fsm_workloads/auth_drop_role.js
new file mode 100644
index 00000000000..262de710fa2
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/auth_drop_role.js
@@ -0,0 +1,74 @@
+'use strict';
+
+/**
+ * auth_drop_role.js
+ *
+ * Repeatedly creates a new role on a database, and subsequently
+ * drops it from the database.
+ */
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropRoles
+
+var $config = (function() {
+
+ var data = {
+ // Use the workload name as a prefix for the role name,
+ // since the workload name is assumed to be unique.
+ prefix: 'auth_drop_role'
+ };
+
+ var states = (function() {
+
+ function uniqueRoleName(prefix, tid, num) {
+ return prefix + tid + '_' + num;
+ }
+
+ function init(db, collName) {
+ this.num = 0;
+ }
+
+ function createAndDropRole(db, collName) {
+ var roleName = uniqueRoleName(this.prefix, this.tid, this.num++);
+ db.createRole({
+ role: roleName,
+ privileges: [
+ {
+ resource: { db: db.getName(), collection: collName },
+ actions: ['remove']
+ }
+ ],
+ roles: [
+ { role: 'read', db: db.getName() }
+ ]
+ });
+
+ var res = db.getRole(roleName);
+ assertAlways(res !== null, "role '" + roleName + "' should exist");
+ assertAlways.eq(roleName, res.role);
+ assertAlways(!res.isBuiltin, 'role should be user-defined');
+
+ assertAlways(db.dropRole(roleName));
+ assertAlways.isnull(db.getRole(roleName),
+ "role '" + roleName + "' should not exist");
+ }
+
+ return {
+ init: init,
+ createAndDropRole: createAndDropRole
+ };
+
+ })();
+
+ var transitions = {
+ init: { createAndDropRole: 1 },
+ createAndDropRole: { createAndDropRole: 1 }
+ };
+
+ return {
+ threadCount: 10,
+ iterations: 20,
+ data: data,
+ states: states,
+ transitions: transitions
+ };
+
+})();
diff --git a/jstests/concurrency/fsm_workloads/auth_drop_user.js b/jstests/concurrency/fsm_workloads/auth_drop_user.js
new file mode 100644
index 00000000000..96f41eb4160
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/auth_drop_user.js
@@ -0,0 +1,65 @@
+'use strict';
+
+/**
+ * auth_drop_user.js
+ *
+ * Repeatedly creates a new user on a database, and subsequently
+ * drops the user from the database.
+ */
+var $config = (function() {
+
+ var data = {
+ // Use the workload name as a prefix for the username,
+ // since the workload name is assumed to be unique.
+ prefix: 'auth_drop_user'
+ };
+
+ var states = (function() {
+
+ function uniqueUsername(prefix, tid, num) {
+ return prefix + tid + '_' + num;
+ }
+
+ function init(db, collName) {
+ this.num = 0;
+ }
+
+ function createAndDropUser(db, collName) {
+ var username = uniqueUsername(this.prefix, this.tid, this.num++);
+ db.createUser({
+ user: username,
+ pwd: 'password',
+ roles: ['readWrite', 'dbAdmin']
+ });
+
+ var res = db.getUser(username);
+ assertAlways(res !== null, "user '" + username + "' should exist");
+ assertAlways.eq(username, res.user);
+ assertAlways.eq(db.getName(), res.db);
+
+ assertAlways(db.dropUser(username));
+ assertAlways.isnull(db.getUser(username),
+ "user '" + username + "' should not exist");
+ }
+
+ return {
+ init: init,
+ createAndDropUser: createAndDropUser
+ };
+
+ })();
+
+ var transitions = {
+ init: { createAndDropUser: 1 },
+ createAndDropUser: { createAndDropUser: 1 }
+ };
+
+ return {
+ threadCount: 10,
+ iterations: 20,
+ data: data,
+ states: states,
+ transitions: transitions
+ };
+
+})();
diff --git a/jstests/concurrency/fsm_workloads/create_capped_collection.js b/jstests/concurrency/fsm_workloads/create_capped_collection.js
new file mode 100644
index 00000000000..882ef3d2239
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/create_capped_collection.js
@@ -0,0 +1,149 @@
+'use strict';
+
+/**
+ * create_capped_collection.js
+ *
+ * Repeatedly creates a capped collection. Also verifies that truncation
+ * occurs once the collection reaches a certain size.
+ */
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+
+var $config = (function() {
+
+ // Returns a document of the form { _id: ObjectId(...), field: '...' }
+ // with specified BSON size.
+ function makeDocWithSize(targetSize) {
+ var doc = { _id: new ObjectId(), field: '' };
+
+ var size = Object.bsonsize(doc);
+ assertAlways.gte(targetSize, size);
+
+ // Set 'field' as a string with enough characters
+ // to make the whole document 'size' bytes long
+ doc.field = new Array(targetSize - size + 1).join('x');
+ assertAlways.eq(targetSize, Object.bsonsize(doc));
+
+ return doc;
+ }
+
+ // Inserts a document of a certain size into the specified collection
+ // and returns its _id field.
+ function insert(db, collName, targetSize) {
+ var doc = makeDocWithSize(targetSize);
+
+ var res = db[collName].insert(doc);
+ assertAlways.writeOK(res);
+ assertAlways.eq(1, res.nInserted);
+
+ return doc._id;
+ }
+
+ // Returns an array containing the _id fields of all the documents
+ // in the collection, sorted according to their insertion order.
+ function getObjectIds(db, collName) {
+ return db[collName].find({}, { _id: 1 }).map(function(doc) {
+ return doc._id;
+ });
+ }
+
+ var data = {
+ // Use the workload name as a prefix for the collection name,
+ // since the workload name is assumed to be unique.
+ prefix: 'create_capped_collection',
+ insert: insert,
+ getObjectIds: getObjectIds
+ };
+
+ var states = (function() {
+
+ var options = {
+ capped: true,
+ size: 8192 // multiple of 256; larger than 4096 default
+ };
+
+ function uniqueCollectionName(prefix, tid, num) {
+ return prefix + tid + '_' + num;
+ }
+
+ function init(db, collName) {
+ this.num = 0;
+ }
+
+ // TODO: how to avoid having too many files open?
+ function create(db, collName) {
+ var myCollName = uniqueCollectionName(this.prefix, this.tid, this.num++);
+ assertAlways.commandWorked(db.createCollection(myCollName, options));
+
+ // Define a large document to be half the size of the capped collection,
+ // and a small document to be an eighth the size of the capped collection.
+ var largeDocSize = Math.floor(options.size / 2) - 1;
+ var smallDocSize = Math.floor(options.size / 8) - 1;
+
+ var ids = [];
+ var count;
+
+ ids.push(this.insert(db, myCollName, largeDocSize));
+ ids.push(this.insert(db, myCollName, largeDocSize));
+
+ assertWhenOwnDB.contains(db[myCollName].find().itcount(), [1, 2]);
+
+ // Insert another large document and verify that at least one
+ // truncation has occurred. There may be 1 or 2 documents in
+ // the collection, depending on the storage engine, but they
+ // should always be the most recently inserted documents.
+
+ ids.push(this.insert(db, myCollName, largeDocSize));
+
+ count = db[myCollName].find().itcount();
+ assertWhenOwnDB.contains(count, [1, 2], 'expected truncation to occur');
+ assertWhenOwnDB.eq(ids.slice(ids.length - count), this.getObjectIds(db, myCollName));
+
+ // Insert multiple small documents and verify that at least one
+ // truncation has occurred. There may be 4 or 5 documents in
+ // the collection, depending on the storage engine, but they
+ // should always be the most recently inserted documents.
+
+ ids.push(this.insert(db, myCollName, smallDocSize));
+ ids.push(this.insert(db, myCollName, smallDocSize));
+ ids.push(this.insert(db, myCollName, smallDocSize));
+ ids.push(this.insert(db, myCollName, smallDocSize));
+
+ var prevCount = count;
+ count = db[myCollName].find().itcount();
+
+ if (prevCount === 1) {
+ assertWhenOwnDB.eq(4, count, 'expected truncation to occur');
+ } else { // prevCount === 2
+ assertWhenOwnDB.eq(5, count, 'expected truncation to occur');
+ }
+
+ assertWhenOwnDB.eq(ids.slice(ids.length - count), this.getObjectIds(db, myCollName));
+ }
+
+ return {
+ init: init,
+ create: create
+ };
+
+ })();
+
+ var transitions = {
+ init: { create: 1 },
+ create: { create: 1 }
+ };
+
+ function teardown(db, collName) {
+ var pattern = new RegExp('^' + this.prefix + '\\d+_\\d+$');
+ dropCollections(db, pattern);
+ }
+
+ return {
+ threadCount: 5,
+ iterations: 20,
+ data: data,
+ states: states,
+ transitions: transitions,
+ teardown: teardown
+ };
+
+})();
diff --git a/jstests/concurrency/fsm_workloads/create_capped_collection_maxdocs.js b/jstests/concurrency/fsm_workloads/create_capped_collection_maxdocs.js
new file mode 100644
index 00000000000..7153a78c2c7
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/create_capped_collection_maxdocs.js
@@ -0,0 +1,76 @@
+'use strict';
+
+/**
+ * create_capped_collection_maxdocs.js
+ *
+ * Repeatedly creates a capped collection. Also verifies that truncation
+ * occurs once the collection reaches a certain size or contains a
+ * certain number of documents.
+ */
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/create_capped_collection.js'); // for $config
+
+var $config = extendWorkload($config, function($config, $super) {
+
+ // Use the workload name as a prefix for the collection name,
+ // since the workload name is assumed to be unique.
+ $config.data.prefix = 'create_capped_collection_maxdocs';
+
+ var options = {
+ capped: true,
+ size: 8192, // multiple of 256; larger than 4096 default
+ max: 3
+ };
+
+ function uniqueCollectionName(prefix, tid, num) {
+ return prefix + tid + '_' + num;
+ }
+
+ // TODO: how to avoid having too many files open?
+ function create(db, collName) {
+ var myCollName = uniqueCollectionName(this.prefix, this.tid, this.num++);
+ assertAlways.commandWorked(db.createCollection(myCollName, options));
+
+ // Define a large document to be half the size of the capped collection,
+ // and a small document to be an eighth the size of the capped collection.
+ var largeDocSize = Math.floor(options.size / 2) - 1;
+ var smallDocSize = Math.floor(options.size / 8) - 1;
+
+ var ids = [];
+ var count;
+
+ ids.push(this.insert(db, myCollName, largeDocSize));
+ ids.push(this.insert(db, myCollName, largeDocSize));
+
+ assertWhenOwnDB.contains(db[myCollName].find().itcount(), [1, 2]);
+
+ // Insert another large document and verify that at least one
+ // truncation has occurred. There may be 1 or 2 documents in
+ // the collection, depending on the storage engine, but they
+ // should always be the most recently inserted documents.
+
+ ids.push(this.insert(db, myCollName, largeDocSize));
+
+ count = db[myCollName].find().itcount();
+ assertWhenOwnDB.contains(count, [1, 2], 'expected truncation to occur due to size');
+ assertWhenOwnDB.eq(ids.slice(ids.length - count), this.getObjectIds(db, myCollName));
+
+ // Insert multiple small documents and verify that at least one
+ // truncation has occurred. There should be 3 documents in the
+ // collection, regardless of the storage engine. They should
+ // always be the most recently inserted documents.
+
+ ids.push(this.insert(db, myCollName, smallDocSize));
+ ids.push(this.insert(db, myCollName, smallDocSize));
+ ids.push(this.insert(db, myCollName, smallDocSize));
+ ids.push(this.insert(db, myCollName, smallDocSize));
+
+ count = db[myCollName].find().itcount();
+ assertWhenOwnDB.eq(3, count, 'expected truncation to occur due to number of docs');
+ assertWhenOwnDB.eq(ids.slice(ids.length - count), this.getObjectIds(db, myCollName));
+ }
+
+ $config.states.create = create;
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/create_collection.js b/jstests/concurrency/fsm_workloads/create_collection.js
new file mode 100644
index 00000000000..47e0534b2cf
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/create_collection.js
@@ -0,0 +1,61 @@
+'use strict';
+
+/**
+ * create_collection.js
+ *
+ * Repeatedly creates a collection.
+ */
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+
+var $config = (function() {
+
+ var data = {
+ // Use the workload name as a prefix for the collection name,
+ // since the workload name is assumed to be unique.
+ prefix: 'create_collection'
+ };
+
+ var states = (function() {
+
+ function uniqueCollectionName(prefix, tid, num) {
+ return prefix + tid + '_' + num;
+ }
+
+ function init(db, collName) {
+ this.num = 0;
+ }
+
+ // TODO: how to avoid having too many files open?
+ function create(db, collName) {
+ // TODO: should we ever do something different?
+ var myCollName = uniqueCollectionName(this.prefix, this.tid, this.num++);
+ assertAlways.commandWorked(db.createCollection(myCollName));
+ }
+
+ return {
+ init: init,
+ create: create
+ };
+
+ })();
+
+ var transitions = {
+ init: { create: 1 },
+ create: { create: 1 }
+ };
+
+ function teardown(db, collName) {
+ var pattern = new RegExp('^' + this.prefix + '\\d+_\\d+$');
+ dropCollections(db, pattern);
+ }
+
+ return {
+ threadCount: 5,
+ iterations: 20,
+ data: data,
+ states: states,
+ transitions: transitions,
+ teardown: teardown
+ };
+
+})();
diff --git a/jstests/concurrency/fsm_workloads/drop_collection.js b/jstests/concurrency/fsm_workloads/drop_collection.js
new file mode 100644
index 00000000000..1f92541e9fe
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/drop_collection.js
@@ -0,0 +1,53 @@
+'use strict';
+
+/**
+ * drop_collection.js
+ *
+ * Repeatedly creates and drops a collection.
+ */
+var $config = (function() {
+
+ var data = {
+ // Use the workload name as a prefix for the collection name,
+ // since the workload name is assumed to be unique.
+ prefix: 'drop_collection'
+ };
+
+ var states = (function() {
+
+ function uniqueCollectionName(prefix, tid, num) {
+ return prefix + tid + '_' + num;
+ }
+
+ function init(db, collName) {
+ this.num = 0;
+ }
+
+ function createAndDrop(db, collName) {
+ // TODO: should we ever do something different?
+ var myCollName = uniqueCollectionName(this.prefix, this.tid, this.num++);
+ assertAlways.commandWorked(db.createCollection(myCollName));
+ assertAlways(db[myCollName].drop());
+ }
+
+ return {
+ init: init,
+ createAndDrop: createAndDrop
+ };
+
+ })();
+
+ var transitions = {
+ init: { createAndDrop: 1 },
+ createAndDrop: { createAndDrop: 1 }
+ };
+
+ return {
+ threadCount: 10,
+ iterations: 10,
+ data: data,
+ states: states,
+ transitions: transitions
+ };
+
+})();
diff --git a/jstests/concurrency/fsm_workloads/drop_database.js b/jstests/concurrency/fsm_workloads/drop_database.js
new file mode 100644
index 00000000000..d98b7cc18ff
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/drop_database.js
@@ -0,0 +1,39 @@
+'use strict';
+
+/**
+ * drop_database.js
+ *
+ * Repeatedly creates and drops a database.
+ */
+var $config = (function() {
+
+ var states = {
+ init: function init(db, collName) {
+ this.uniqueDBName = 'drop_database' + this.tid;
+ },
+
+ createAndDrop: function createAndDrop(db, collName) {
+ // TODO: should we ever do something different?
+ // e.g. create multiple collections on the database and then drop?
+ var myDB = db.getSiblingDB(this.uniqueDBName);
+ assertAlways.commandWorked(myDB.createCollection(collName));
+
+ var res = myDB.dropDatabase();
+ assertAlways.commandWorked(res);
+ assertAlways.eq(this.uniqueDBName, res.dropped);
+ }
+ };
+
+ var transitions = {
+ init: { createAndDrop: 1 },
+ createAndDrop: { createAndDrop: 1 }
+ };
+
+ return {
+ threadCount: 10,
+ iterations: 10,
+ states: states,
+ transitions: transitions
+ };
+
+})();
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_remove.js b/jstests/concurrency/fsm_workloads/findAndModify_remove.js
new file mode 100644
index 00000000000..146829bfbca
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/findAndModify_remove.js
@@ -0,0 +1,60 @@
+'use strict';
+
+/**
+ * findAndModify_remove.js
+ *
+ * Each thread repeatedly inserts a document, and subsequently performs
+ * the findAndModify command to remove it.
+ */
+var $config = (function() {
+
+ var states = (function() {
+
+ function init(db, collName) {
+ this.iter = 0;
+ }
+
+ function insertAndRemove(db, collName) {
+ var res = db[collName].insert({ tid: this.tid, value: this.iter });
+ assertAlways.writeOK(res);
+ assertAlways.eq(1, res.nInserted);
+
+ res = db.runCommand({
+ findandmodify: db[collName].getName(),
+ query: { tid: this.tid },
+ sort: { iter: -1 },
+ remove: true
+ });
+ assertAlways.commandWorked(res);
+
+ var doc = res.value;
+ assertWhenOwnColl(doc !== null, 'query spec should have matched a document');
+
+ if (doc !== null) {
+ assertAlways.eq(this.tid, doc.tid);
+ assertWhenOwnColl.eq(this.iter, doc.value);
+ }
+
+ this.iter++;
+ }
+
+ return {
+ init: init,
+ insertAndRemove: insertAndRemove
+ };
+
+ })();
+
+ var transitions = {
+ init: { insertAndRemove: 1 },
+ insertAndRemove: { insertAndRemove: 1 }
+ };
+
+ return {
+ threadCount: 20,
+ iterations: 20,
+ states: states,
+ transitions: transitions
+ };
+
+})();
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_update.js b/jstests/concurrency/fsm_workloads/findAndModify_update.js
new file mode 100644
index 00000000000..95e168f7482
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/findAndModify_update.js
@@ -0,0 +1,101 @@
+'use strict';
+
+/**
+ * findAndModify_update.js
+ *
+ * Each thread inserts multiple documents into a collection, and then
+ * repeatedly performs the findAndModify command. A single document is
+ * selected based on 'query' and 'sort' specifications, and updated
+ * using either the $min or $max operator.
+ */
+var $config = (function() {
+
+ var data = {
+ numDocsPerThread: 3, // >1 for 'sort' to be meaningful
+ };
+
+ var states = (function() {
+
+ function makeDoc(tid) {
+ return { _id: new ObjectId(), tid: tid, value: 0 };
+ }
+
+ function init(db, collName) {
+ for (var i = 0; i < this.numDocsPerThread; ++i) {
+ var res = db[collName].insert(makeDoc(this.tid));
+ assertAlways.writeOK(res);
+ assertAlways.eq(1, res.nInserted);
+ }
+ }
+
+ function findAndModifyAscending(db, collName) {
+ var updatedValue = this.tid;
+
+ var res = db.runCommand({
+ findandmodify: db[collName].getName(),
+ query: { tid: this.tid },
+ sort: { value: 1 },
+ update: { $max: { value: updatedValue } },
+ new: true
+ });
+ assertAlways.commandWorked(res);
+
+ var doc = res.value;
+ assertWhenOwnColl(doc !== null, 'query spec should have matched a document');
+
+ if (doc !== null) {
+ assertAlways.eq(this.tid, doc.tid);
+ assertWhenOwnColl.eq(updatedValue, doc.value);
+ }
+ }
+
+ function findAndModifyDescending(db, collName) {
+ var updatedValue = -this.tid;
+
+ var res = db.runCommand({
+ findandmodify: db[collName].getName(),
+ query: { tid: this.tid },
+ sort: { value: -1 },
+ update: { $min: { value: updatedValue } },
+ new: true
+ });
+ assertAlways.commandWorked(res);
+
+ var doc = res.value;
+ assertWhenOwnColl(doc !== null, 'query spec should have matched a document');
+
+ if (doc !== null) {
+ assertAlways.eq(this.tid, doc.tid);
+ assertWhenOwnColl.eq(updatedValue, doc.value);
+ }
+ }
+
+ return {
+ init: init,
+ findAndModifyAscending: findAndModifyAscending,
+ findAndModifyDescending: findAndModifyDescending
+ };
+
+ })();
+
+ var transitions = {
+ init: { findAndModifyAscending: 0.5, findAndModifyDescending: 0.5 },
+ findAndModifyAscending: { findAndModifyAscending: 0.5, findAndModifyDescending: 0.5 },
+ findAndModifyDescending: { findAndModifyAscending: 0.5, findAndModifyDescending: 0.5 }
+ };
+
+ function setup(db, collName) {
+ var res = db[collName].ensureIndex({ tid: 1, value: 1 });
+ assertAlways.commandWorked(res);
+ }
+
+ return {
+ threadCount: 20,
+ iterations: 20,
+ data: data,
+ states: states,
+ transitions: transitions,
+ setup: setup
+ };
+
+})();
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_update_collscan.js b/jstests/concurrency/fsm_workloads/findAndModify_update_collscan.js
new file mode 100644
index 00000000000..913be623360
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/findAndModify_update_collscan.js
@@ -0,0 +1,23 @@
+'use strict';
+
+/**
+ * findAndModify_update_collscan.js
+ *
+ * Each thread inserts multiple documents into a collection, and then
+ * repeatedly performs the findAndModify command. A single document is
+ * selected based on 'query' and 'sort' specifications, and updated
+ * using either the $min or $max operator.
+ *
+ * Attempts to force a collection scan by not creating an index.
+ */
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/findAndModify_update.js'); // for $config
+
+var $config = extendWorkload($config, function($config, $super) {
+
+ // Do not create the { tid: 1, value: 1 } index so that a collection
+ // scan is performed for the query and sort operations.
+ $config.setup = function setup() { };
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_update_grow.js b/jstests/concurrency/fsm_workloads/findAndModify_update_grow.js
new file mode 100644
index 00000000000..edb81e8b7f2
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/findAndModify_update_grow.js
@@ -0,0 +1,123 @@
+'use strict';
+
+/**
+ * findAndModify_update_grow.js
+ *
+ * Each thread inserts a single document into a collection, and then
+ * repeatedly performs the findAndModify command. Attempts to trigger
+ * a document move by growing the size of the inserted document using
+ * the $set and $mul update operators.
+ */
+load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod and isMMAPv1
+
+var $config = (function() {
+
+ var states = (function() {
+
+ // Use the workload name as the field name (since it is assumed
+ // to be unique) to avoid any potential issues with large keys
+ // and indexes on the collection.
+ var uniqueFieldName = 'findAndModify_update_grow';
+
+ function makeStringOfLength(length) {
+ return new Array(length + 1).join('x');
+ }
+
+ function makeDoc(tid) {
+ // Use 32-bit integer for representing 'length' property
+ // to ensure $mul does integer multiplication
+ var doc = { _id: new ObjectId(), tid: tid, length: new NumberInt(1) };
+ doc[uniqueFieldName] = makeStringOfLength(doc.length);
+ return doc;
+ }
+
+ function insert(db, collName) {
+ var doc = makeDoc(this.tid);
+ this.length = doc.length;
+ this.bsonsize = Object.bsonsize(doc);
+
+ var res = db[collName].insert(doc);
+ assertAlways.writeOK(res);
+ assertAlways.eq(1, res.nInserted);
+ }
+
+ function findAndModify(db, collName) {
+ // When the size of the document starts to near the 16MB limit,
+ // start over with a new document
+ if (this.bsonsize > 4 * 1024 * 1024 /* 4MB */) {
+ insert.call(this, db, collName);
+ }
+
+ // Get the DiskLoc of the document before its potential move
+ var before = db[collName].find({ tid: this.tid })
+ .showDiskLoc()
+ .sort({ length: 1 }) // fetch document of smallest size
+ .limit(1)
+ .next();
+
+ // Increase the length of the 'findAndModify_update_grow' string
+ // to double the size of the overall document
+ var factor = Math.ceil(2 * this.bsonsize / this.length);
+ var updatedLength = factor * this.length;
+ var updatedValue = makeStringOfLength(updatedLength);
+
+ var update = { $set: {}, $mul: { length: factor } };
+ update.$set[uniqueFieldName] = updatedValue;
+
+ var res = db.runCommand({
+ findandmodify: db[collName].getName(),
+ query: { tid: this.tid },
+ sort: { length: 1 }, // fetch document of smallest size
+ update: update,
+ new: true
+ });
+ assertAlways.commandWorked(res);
+
+ var doc = res.value;
+ assertWhenOwnColl(doc !== null, 'query spec should have matched a document');
+
+ if (doc === null) {
+ return;
+ }
+
+ assertAlways.eq(this.tid, doc.tid);
+ assertWhenOwnColl.eq(updatedValue, doc[uniqueFieldName]);
+ assertWhenOwnColl.eq(updatedLength, doc.length);
+
+ this.length = updatedLength;
+ this.bsonsize = Object.bsonsize(doc);
+
+ // Get the DiskLoc of the document after its potential move
+ var after = db[collName].find({ _id: before._id }).showDiskLoc().next();
+
+ var status = db.serverStatus();
+ if (isMongod(status) && isMMAPv1(status)) {
+ // Since the document has at least doubled in size, and the default
+ // allocation strategy of mmapv1 is to use power of two sizes, the
+ // document will have always moved
+ assertWhenOwnColl.neq(before.$diskLoc, after.$diskLoc,
+ 'document should have moved');
+ }
+ }
+
+ return {
+ insert: insert,
+ findAndModify: findAndModify,
+ };
+
+ })();
+
+ var transitions = {
+ insert: { findAndModify: 1 },
+ findAndModify: { findAndModify: 1 }
+ };
+
+ return {
+ threadCount: 20,
+ iterations: 20,
+ states: states,
+ startState: 'insert',
+ transitions: transitions
+ };
+
+})();
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_upsert.js b/jstests/concurrency/fsm_workloads/findAndModify_upsert.js
new file mode 100644
index 00000000000..7e3e64fb845
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/findAndModify_upsert.js
@@ -0,0 +1,126 @@
+'use strict';
+
+/**
+ * findAndModify_upsert.js
+ *
+ * Each thread repeatedly performs the findAndModify command, specifying
+ * upsert as either true or false. A single document is selected (or
+ * created) based on the 'query' specification, and updated using the
+ * $push operator.
+ */
+var $config = (function() {
+
+ var data = {
+ sort: false
+ };
+
+ var states = (function() {
+
+ // Returns true if the specified array is sorted in ascending order,
+ // and false otherwise.
+ function isSorted(arr) {
+ for (var i = 0; i < arr.length - 1; ++i) {
+ if (arr[i] > arr[i + 1]) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ function init(db, collName) {
+ this.iter = 0;
+
+ // Need to guarantee that an upsert has occurred prior to an update,
+ // which is not enforced by the transition table under composition
+ upsert.call(this, db, collName);
+ }
+
+ function upsert(db, collName) {
+ var updatedValue = this.iter++;
+
+ // Use a query specification that does not match any existing documents
+ var query = { _id: new ObjectId(), tid: this.tid };
+
+ var cmdObj = {
+ findandmodify: db[collName].getName(),
+ query: query,
+ update: { $setOnInsert: { values: [updatedValue] } },
+ new: true,
+ upsert: true
+ };
+
+ if (this.sort) {
+ cmdObj.sort = this.sort;
+ }
+
+ var res = db.runCommand(cmdObj);
+ assertAlways.commandWorked(res);
+
+ var doc = res.value;
+ assertAlways(doc !== null, 'a document should have been inserted');
+
+ assertAlways((function() {
+ assertAlways.eq(this.tid, doc.tid);
+ assertAlways(Array.isArray(doc.values), 'expected values to be an array');
+ assertAlways.eq(1, doc.values.length);
+ assertAlways.eq(updatedValue, doc.values[0]);
+ }).bind(this));
+ }
+
+ function update(db, collName) {
+ var updatedValue = this.iter++;
+
+ var cmdObj = {
+ findandmodify: db[collName].getName(),
+ query: { tid: this.tid },
+ update: { $push: { values: updatedValue } },
+ new: true,
+ upsert: false
+ };
+
+ if (this.sort) {
+ cmdObj.sort = this.sort;
+ }
+
+ var res = db.runCommand(cmdObj);
+ assertAlways.commandWorked(res);
+
+ var doc = res.value;
+ assertWhenOwnColl(doc !== null, 'query spec should have matched a document');
+
+ if (doc !== null) {
+ assertAlways.eq(this.tid, doc.tid);
+ assertWhenOwnColl(Array.isArray(doc.values), 'expected values to be an array');
+ assertWhenOwnColl(function() {
+ assertWhenOwnColl.gte(doc.values.length, 2);
+ assertWhenOwnColl.eq(updatedValue, doc.values[doc.values.length - 1]);
+ assertWhenOwnColl(isSorted(doc.values),
+ 'expected values to be sorted: ' + tojson(doc.values));
+ });
+ }
+ }
+
+ return {
+ init: init,
+ upsert: upsert,
+ update: update
+ };
+
+ })();
+
+ var transitions = {
+ init: { upsert: 0.1, update: 0.9 },
+ upsert: { upsert: 0.1, update: 0.9 },
+ update: { upsert: 0.1, update: 0.9 }
+ };
+
+ return {
+ threadCount: 20,
+ iterations: 20,
+ data: data,
+ states: states,
+ transitions: transitions
+ };
+
+})();
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_upsert_collscan.js b/jstests/concurrency/fsm_workloads/findAndModify_upsert_collscan.js
new file mode 100644
index 00000000000..200de213235
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/findAndModify_upsert_collscan.js
@@ -0,0 +1,21 @@
+'use strict';
+
+/**
+ * findAndModify_upsert_collscan.js
+ *
+ * Each thread repeatedly performs the findAndModify command, specifying
+ * upsert as either true or false. A single document is selected (or
+ * created) based on the 'query' specification, and updated using the
+ * $push operator.
+ *
+ * Forces 'sort' to perform a collection scan by using $natural.
+ */
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/findAndModify_upsert.js'); // for $config
+
+var $config = extendWorkload($config, function($config, $super) {
+
+ $config.data.sort = { $natural: 1 };
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/group.js b/jstests/concurrency/fsm_workloads/group.js
new file mode 100644
index 00000000000..efbe82bda57
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/group.js
@@ -0,0 +1,99 @@
+'use strict';
+
+/**
+ * group.js
+ *
+ * Inserts 1000 documents with a field set to a random
+ * float value. The group command is then used to partition these documents
+ * into one of ten buckets:
+ * [0, 0.09x), [0.10, 0.19x), ..., [0.80, 0.89x), [0.90, 1.0)
+ *
+ * The float field is not indexed.
+ *
+ */
+
+var $config = (function() {
+
+ function generateGroupCmdObj(collName) {
+ return {
+ group: {
+ ns: collName,
+ initial: { bucketCount: 0, bucketSum: 0},
+ $keyf: function $keyf(doc) {
+ // place doc.rand into appropriate bucket
+ return { bucket: Math.floor(doc.rand * 10) + 1 };
+ },
+ $reduce: function $reduce(curr, result) {
+ result.bucketCount++;
+ result.bucketSum += curr.rand;
+ },
+ finalize: function finalize(result) {
+ // calculate average float value per bucket
+ result.bucketAvg = result.bucketSum / (result.bucketCount || 1);
+ }
+ }
+ };
+ }
+
+ function sumBucketCount(arr) {
+ return arr.reduce(function(a, b) {
+ return a + b.bucketCount;
+ }, 0);
+ }
+
+ var data = {
+ numDocs: 1000,
+ generateGroupCmdObj: generateGroupCmdObj,
+ sumBucketCount: sumBucketCount
+ };
+
+ var states = (function() {
+
+ function group(db, collName) {
+ var res = db.runCommand(this.generateGroupCmdObj(collName));
+ assertWhenOwnColl.commandWorked(res);
+
+ assertWhenOwnColl.lte(res.count, this.numDocs);
+ assertWhenOwnColl.lte(res.keys, 10);
+ assertWhenOwnColl(function() {
+ assertWhenOwnColl.lte(res.retval.length, 10);
+ assertWhenOwnColl.eq(this.sumBucketCount(res.retval), res.count);
+ }.bind(this));
+ }
+
+ return {
+ group: group
+ };
+
+ })();
+
+ var transitions = {
+ group: { group: 1 }
+ };
+
+ function setup(db, collName) {
+ var bulk = db[collName].initializeUnorderedBulkOp();
+ for (var i = 0; i < this.numDocs; ++i) {
+ bulk.insert({ rand: Random.rand() });
+ }
+ var res = bulk.execute();
+ assertAlways.writeOK(res);
+ assertAlways.eq(this.numDocs, res.nInserted);
+ }
+
+ function teardown(db, collName) {
+ assertWhenOwnColl(db[collName].drop());
+ }
+
+ return {
+ threadCount: 5,
+ iterations: 10, // fewer iterations because each 'group' operation is fairly expensive
+ startState: 'group',
+ states: states,
+ transitions: transitions,
+ data: data,
+ setup: setup,
+ teardown: teardown
+ };
+
+})();
diff --git a/jstests/concurrency/fsm_workloads/group_cond.js b/jstests/concurrency/fsm_workloads/group_cond.js
new file mode 100644
index 00000000000..02a4f41703a
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/group_cond.js
@@ -0,0 +1,40 @@
+'use strict';
+
+/**
+ * group_cond.js
+ *
+ * Inserts 1000 documents with a field set to a random
+ * float value. The group command is then used to partition these documents
+ * into one of ten buckets:
+ * [0, 0.09x), [0.10, 0.19x), ..., [0.80, 0.89x), [0.90, 1.0)
+ *
+ * To increase testing coverage, the float field is indexed and
+ * a 'cond' document is supplied to the group command.
+ *
+ */
+
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/group.js'); // for $config
+
+var $config = extendWorkload($config, function($config, $super) {
+ $config.setup = function setup(db, collName) {
+ $super.setup.apply(this, arguments);
+ assertAlways.commandWorked(db[collName].ensureIndex({ rand: 1 }));
+ };
+
+ $config.states.group = function group(db, collName) {
+ var cmdObj = this.generateGroupCmdObj(collName);
+ cmdObj.group.cond = { rand: { $gte: 0.5 } };
+ var res = db.runCommand(cmdObj);
+ assertWhenOwnColl.commandWorked(res);
+
+ assertWhenOwnColl.lte(res.count, this.numDocs);
+ assertWhenOwnColl.lte(res.keys, 5);
+ assertWhenOwnColl(function() {
+ assertWhenOwnColl.lte(res.retval.length, 5);
+ assertWhenOwnColl.eq(this.sumBucketCount(res.retval), res.count);
+ }.bind(this));
+ };
+
+ return $config;
+})
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_inline.js b/jstests/concurrency/fsm_workloads/map_reduce_inline.js
new file mode 100644
index 00000000000..c535c25a372
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/map_reduce_inline.js
@@ -0,0 +1,107 @@
+'use strict';
+
+/**
+ * map_reduce_inline.js
+ *
+ * Generates some random data and inserts it into a collection. Runs a
+ * map-reduce command over the collection that computes the frequency
+ * counts of the 'value' field in memory.
+ *
+ * Used as the base workload for the other map-reduce workloads.
+ */
+var $config = (function() {
+
+ function mapper() {
+ if (this.hasOwnProperty('key') && this.hasOwnProperty('value')) {
+ var obj = {};
+ obj[this.value] = 1;
+ emit(this.key, obj);
+ }
+ }
+
+ function reducer(key, values) {
+ var res = {};
+
+ values.forEach(function(obj) {
+ Object.keys(obj).forEach(function(value) {
+ if (!res.hasOwnProperty(value)) {
+ res[value] = 0;
+ }
+ res[value] += obj[value];
+ });
+ });
+
+ return res;
+ }
+
+ function finalizer(key, reducedValue) {
+ return reducedValue;
+ }
+
+ var data = {
+ numDocs: 2000,
+ mapper: mapper,
+ reducer: reducer,
+ finalizer: finalizer
+ };
+
+ var states = (function() {
+
+ function init(db, collName) {
+ // no-op
+ // other workloads that extend this workload use this method
+ }
+
+ function mapReduce(db, collName) {
+ var options = {
+ finalize: this.finalizer,
+ out: { inline: 1 }
+ };
+
+ var res = db[collName].mapReduce(this.mapper, this.reducer, options);
+ assertAlways.commandWorked(res);
+ }
+
+ return {
+ init: init,
+ mapReduce: mapReduce
+ };
+
+ })();
+
+ var transitions = {
+ init: { mapReduce: 1 },
+ mapReduce: { mapReduce: 1 }
+ };
+
+ function makeDoc(keyLimit, valueLimit) {
+ return {
+ _id: new ObjectId(),
+ key: Random.randInt(keyLimit),
+ value: Random.randInt(valueLimit)
+ };
+ }
+
+ function setup(db, collName) {
+ var bulk = db[collName].initializeUnorderedBulkOp();
+ for (var i = 0; i < this.numDocs; ++i) {
+ // TODO: this actually does assume that there are no unique indexes
+ var doc = makeDoc(this.numDocs / 100, this.numDocs / 10);
+ bulk.insert(doc);
+ }
+
+ var res = bulk.execute();
+ assertAlways.writeOK(res);
+ assertAlways.eq(this.numDocs, res.nInserted);
+ }
+
+ return {
+ threadCount: 5,
+ iterations: 10,
+ data: data,
+ states: states,
+ transitions: transitions,
+ setup: setup
+ };
+
+})();
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_merge.js b/jstests/concurrency/fsm_workloads/map_reduce_merge.js
new file mode 100644
index 00000000000..02138304f96
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/map_reduce_merge.js
@@ -0,0 +1,65 @@
+'use strict';
+
+/**
+ * map_reduce_merge.js
+ *
+ * Generates some random data and inserts it into a collection. Runs a
+ * map-reduce command over the collection that computes the frequency
+ * counts of the 'value' field and stores the results in an existing
+ * collection on a separate database.
+ *
+ * Uses the "merge" action to combine the results with the contents
+ * of the output collection.
+ *
+ * Writes the results of each thread to the same collection.
+ */
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/map_reduce_inline.js'); // for $config
+
+var $config = extendWorkload($config, function($config, $super) {
+
+ // Use the workload name as the database name,
+ // since the workload name is assumed to be unique.
+ var uniqueDBName = 'map_reduce_merge';
+
+ $config.states.init = function init(db, collName) {
+ $super.states.init.apply(this, arguments);
+
+ this.outDBName = uniqueDBName;
+ };
+
+ $config.states.mapReduce = function mapReduce(db, collName) {
+ var outDB = db.getSiblingDB(this.outDBName);
+ var fullName = outDB[collName].getFullName();
+ assertAlways(outDB[collName].exists() !== null,
+ "output collection '" + fullName + "' should exist");
+
+ // Have all threads combine their results into the same collection
+ var options = {
+ finalize: this.finalizer,
+ out: {
+ merge: collName,
+ db: this.outDBName
+ }
+ };
+
+ var res = db[collName].mapReduce(this.mapper, this.reducer, options);
+ assertAlways.commandWorked(res);
+ };
+
+ $config.setup = function setup(db, collName) {
+ $super.setup.apply(this, arguments);
+
+ var outDB = db.getSiblingDB(uniqueDBName);
+ assertAlways.commandWorked(outDB.createCollection(collName));
+ };
+
+ $config.teardown = function teardown(db, collName) {
+ var outDB = db.getSiblingDB(uniqueDBName);
+ var res = outDB.dropDatabase();
+ assertAlways.commandWorked(res);
+ assertAlways.eq(uniqueDBName, res.dropped);
+ };
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_merge_nonatomic.js b/jstests/concurrency/fsm_workloads/map_reduce_merge_nonatomic.js
new file mode 100644
index 00000000000..a54de881be7
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/map_reduce_merge_nonatomic.js
@@ -0,0 +1,63 @@
+'use strict';
+
+/**
+ * map_reduce_merge_nonatomic.js
+ *
+ * Generates some random data and inserts it into a collection. Runs a
+ * map-reduce command over the collection that computes the frequency
+ * counts of the 'value' field and stores the results in an existing
+ * collection on a separate database.
+ *
+ * Uses the "merge" action to combine the results with the contents
+ * of the output collection.
+ *
+ * Specifies nonAtomic=true.
+ */
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/map_reduce_inline.js'); // for $config
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropDatabases
+
+var $config = extendWorkload($config, function($config, $super) {
+
+ // Use the workload name as a prefix for the database name,
+ // since the workload name is assumed to be unique.
+ var prefix = 'map_reduce_merge_nonatomic';
+
+ function uniqueDBName(prefix, tid) {
+ return prefix + tid;
+ }
+
+ $config.states.init = function init(db, collName) {
+ $super.states.init.apply(this, arguments);
+
+ this.outDBName = uniqueDBName(prefix, this.tid);
+ var outDB = db.getSiblingDB(this.outDBName);
+ assertAlways.commandWorked(outDB.createCollection(collName));
+ };
+
+ $config.states.mapReduce = function mapReduce(db, collName) {
+ var outDB = db.getSiblingDB(this.outDBName);
+ var fullName = outDB[collName].getFullName();
+ assertAlways(outDB[collName].exists() !== null,
+ "output collection '" + fullName + "' should exist");
+
+ var options = {
+ finalize: this.finalizer,
+ out: {
+ merge: collName,
+ db: this.outDBName,
+ nonAtomic: true
+ }
+ };
+
+ var res = db[collName].mapReduce(this.mapper, this.reducer, options);
+ assertAlways.commandWorked(res);
+ };
+
+ $config.teardown = function teardown(db, collName) {
+ var pattern = new RegExp('^' + prefix + '\\d+$');
+ dropDatabases(db, pattern);
+ };
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_reduce.js b/jstests/concurrency/fsm_workloads/map_reduce_reduce.js
new file mode 100644
index 00000000000..6a46cbe08f0
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/map_reduce_reduce.js
@@ -0,0 +1,55 @@
+'use strict';
+
+/**
+ * map_reduce_reduce.js
+ *
+ * Generates some random data and inserts it into a collection. Runs a
+ * map-reduce command over the collection that computes the frequency
+ * counts of the 'value' field and stores the results in an existing
+ * collection.
+ *
+ * Uses the "reduce" action to combine the results with the contents
+ * of the output collection.
+ */
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/map_reduce_inline.js'); // for $config
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+
+var $config = extendWorkload($config, function($config, $super) {
+
+ // Use the workload name as a prefix for the collection name,
+ // since the workload name is assumed to be unique.
+ var prefix = 'map_reduce_reduce';
+
+ function uniqueCollectionName(prefix, tid) {
+ return prefix + tid;
+ }
+
+ $config.states.init = function init(db, collName) {
+ $super.states.init.apply(this, arguments);
+
+ this.outCollName = uniqueCollectionName(prefix, this.tid);
+ assertAlways.commandWorked(db.createCollection(this.outCollName));
+ };
+
+ $config.states.mapReduce = function mapReduce(db, collName) {
+ var fullName = db[this.outCollName].getFullName();
+ assertAlways(db[this.outCollName].exists() !== null,
+ "output collection '" + fullName + "' should exist");
+
+ var options = {
+ finalize: this.finalizer,
+ out: { reduce: this.outCollName }
+ };
+
+ var res = db[collName].mapReduce(this.mapper, this.reducer, options);
+ assertAlways.commandWorked(res);
+ };
+
+ $config.teardown = function teardown(db, collName) {
+ var pattern = new RegExp('^' + prefix + '\\d+$');
+ dropCollections(db, pattern);
+ };
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_reduce_nonatomic.js b/jstests/concurrency/fsm_workloads/map_reduce_reduce_nonatomic.js
new file mode 100644
index 00000000000..e63511b44d0
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/map_reduce_reduce_nonatomic.js
@@ -0,0 +1,61 @@
+'use strict';
+
+/**
+ * map_reduce_reduce_nonatomic.js
+ *
+ * Generates some random data and inserts it into a collection. Runs a
+ * map-reduce command over the collection that computes the frequency
+ * counts of the 'value' field and stores the results in an existing
+ * collection.
+ *
+ * Uses the "reduce" action to combine the results with the contents
+ * of the output collection.
+ *
+ * Specifies nonAtomic=true and writes the results of each thread to
+ * the same collection.
+ */
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/map_reduce_inline.js'); // for $config
+
+var $config = extendWorkload($config, function($config, $super) {
+
+ // Use the workload name as the collection name,
+ // since the workload name is assumed to be unique.
+ var uniqueCollectionName = 'map_reduce_reduce_nonatomic';
+
+ $config.states.init = function init(db, collName) {
+ $super.states.init.apply(this, arguments);
+
+ this.outCollName = uniqueCollectionName;
+ };
+
+ $config.states.mapReduce = function mapReduce(db, collName) {
+ var fullName = db[this.outCollName].getFullName();
+ assertAlways(db[this.outCollName].exists() !== null,
+ "output collection '" + fullName + "' should exist");
+
+ // Have all threads combine their results into the same collection
+ var options = {
+ finalize: this.finalizer,
+ out: {
+ reduce: this.outCollName,
+ nonAtomic: true
+ }
+ };
+
+ var res = db[collName].mapReduce(this.mapper, this.reducer, options);
+ assertAlways.commandWorked(res);
+ };
+
+ $config.setup = function setup(db, collName) {
+ $super.setup.apply(this, arguments);
+
+ assertAlways.commandWorked(db.createCollection(uniqueCollectionName));
+ };
+
+ $config.teardown = function teardown(db, collName) {
+ assertAlways(db[uniqueCollectionName].drop());
+ };
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_replace.js b/jstests/concurrency/fsm_workloads/map_reduce_replace.js
new file mode 100644
index 00000000000..7d00614eb23
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/map_reduce_replace.js
@@ -0,0 +1,57 @@
+'use strict';
+
+/**
+ * map_reduce_replace.js
+ *
+ * Generates some random data and inserts it into a collection. Runs a
+ * map-reduce command over the collection that computes the frequency
+ * counts of the 'value' field and stores the results in an existing
+ * collection.
+ *
+ * Uses the "replace" action to overwrite the entire contents of the
+ * collection.
+ */
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/map_reduce_inline.js'); // for $config
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+
+var $config = extendWorkload($config, function($config, $super) {
+
+ // Use the workload name as a prefix for the collection name,
+ // since the workload name is assumed to be unique.
+ var prefix = 'map_reduce_replace';
+
+ function uniqueCollectionName(prefix, tid) {
+ return prefix + tid;
+ }
+
+ $config.states.init = function init(db, collName) {
+ $super.states.init.apply(this, arguments);
+
+ this.outCollName = uniqueCollectionName(prefix, this.tid);
+ assertAlways.commandWorked(db.createCollection(this.outCollName));
+ };
+
+ $config.states.mapReduce = function mapReduce(db, collName) {
+ var fullName = db[this.outCollName].getFullName();
+ assertAlways(db[this.outCollName].exists() !== null,
+ "output collection '" + fullName + "' should exist");
+
+ var options = {
+ finalize: this.finalizer,
+ out: { replace: this.outCollName },
+ query: { key: { $exists: true }, value: { $exists: true } },
+ sort: { _id: -1 } // sort key must be an existing index
+ };
+
+ var res = db[collName].mapReduce(this.mapper, this.reducer, options);
+ assertAlways.commandWorked(res);
+ };
+
+ $config.teardown = function teardown(db, collName) {
+ var pattern = new RegExp('^' + prefix + '\\d+$');
+ dropCollections(db, pattern);
+ };
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js b/jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js
new file mode 100644
index 00000000000..89deddcd71b
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js
@@ -0,0 +1,50 @@
+'use strict';
+
+/**
+ * map_reduce_replace_nonexistent.js
+ *
+ * Generates some random data and inserts it into a collection. Runs a
+ * map-reduce command over the collection that computes the frequency
+ * counts of the 'value' field and stores the results in a new collection.
+ *
+ * Uses the "replace" action to write the results to a nonexistent
+ * output collection.
+ */
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/map_reduce_inline.js'); // for $config
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+
+var $config = extendWorkload($config, function($config, $super) {
+
+ // Use the workload name as a prefix for the collection name,
+ // since the workload name is assumed to be unique.
+ var prefix = 'map_reduce_replace_nonexistent';
+
+ function uniqueCollectionName(prefix, tid) {
+ return prefix + tid;
+ }
+
+ $config.states.mapReduce = function mapReduce(db, collName) {
+ var outCollName = uniqueCollectionName(prefix, this.tid);
+ var fullName = db[outCollName].getFullName();
+ assertAlways.isnull(db[outCollName].exists(),
+ "output collection '" + fullName + "' should not exist");
+
+ var options = {
+ finalize: this.finalizer,
+ out: { replace: outCollName },
+ query: { key: { $exists: true }, value: { $exists: true } }
+ };
+
+ var res = db[collName].mapReduce(this.mapper, this.reducer, options);
+ assertAlways.commandWorked(res);
+ assertAlways(db[outCollName].drop());
+ };
+
+ $config.teardown = function teardown(db, collName) {
+ var pattern = new RegExp('^' + prefix + '\\d+$');
+ dropCollections(db, pattern);
+ };
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/remove_multiple_documents.js b/jstests/concurrency/fsm_workloads/remove_multiple_documents.js
new file mode 100644
index 00000000000..c04cb198f50
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/remove_multiple_documents.js
@@ -0,0 +1,56 @@
+'use strict';
+
+/**
+ * remove_multiple_documents.js
+ *
+ * Each thread first inserts 200 documents, each containing the thread id and
+ * a random float. Then on each iteration, each thread repeatedly removes some
+ * of the documents it inserted.
+ */
+var $config = (function() {
+
+ var states = {
+ init: function init(db, collName) {
+ this.numDocs = 200;
+ for (var i = 0; i < this.numDocs; ++i) {
+ db[collName].insert({ tid: this.tid, rand: Random.rand() });
+ }
+ },
+
+ remove: function remove(db, collName) {
+ // choose a random interval to remove documents from
+ var low = Random.rand();
+ var high = low + 0.05 * Random.rand();
+
+ var res = db[collName].remove({
+ tid: this.tid,
+ rand: { $gte: low, $lte: high }
+ });
+ assertAlways.gte(res.nRemoved, 0);
+ assertAlways.lte(res.nRemoved, this.numDocs);
+ this.numDocs -= res.nRemoved;
+ },
+
+ count: function count(db, collName) {
+ var numDocs = db[collName].find({ tid: this.tid }).itcount();
+ assertWhenOwnColl.eq(this.numDocs, numDocs);
+ }
+ };
+
+ var transitions = {
+ init: { count: 1 },
+ count: { remove: 1 },
+ remove: {
+ remove: 0.825,
+ count: 0.125
+ }
+ };
+
+ return {
+ threadCount: 10,
+ iterations: 20,
+ states: states,
+ transitions: transitions
+ };
+
+})();
diff --git a/jstests/concurrency/fsm_workloads/remove_single_document.js b/jstests/concurrency/fsm_workloads/remove_single_document.js
new file mode 100644
index 00000000000..167e742a193
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/remove_single_document.js
@@ -0,0 +1,68 @@
+'use strict';
+
+/**
+ * remove_single_document.js
+ *
+ * Repeatedly remove a document from the collection.
+ */
+var $config = (function() {
+
+ var states = {
+ remove: function remove(db, collName) {
+ // try removing a random document
+ var res = this.doRemove(db,
+ collName,
+ { rand: { $gte: Random.rand() } },
+ { justOne: true });
+ assertAlways.lte(res.nRemoved, 1);
+ if (res.nRemoved === 0) {
+ // The above remove() can fail to remove a document when the random value
+ // in the query is greater than any of the random values in the collection.
+ // When that situation occurs, just remove an arbitrary document instead.
+ res = this.doRemove(db,
+ collName,
+ {},
+ { justOne: true });
+ assertAlways.lte(res.nRemoved, 1);
+ }
+ this.assertResult(res);
+ }
+ };
+
+ var transitions = {
+ remove: { remove: 1 }
+ };
+
+ var threadCount = 10;
+ var iterations = 20;
+
+ function setup(db, collName) {
+ // insert enough documents so that each thread can remove exactly one per iteration
+ var num = threadCount * iterations;
+ for (var i = 0; i < num; ++i) {
+ db[collName].insert({ i: i, rand: Random.rand() });
+ }
+ assertWhenOwnColl.eq(db[collName].find().itcount(), num);
+ }
+
+ return {
+ threadCount: threadCount,
+ iterations: iterations,
+ states: states,
+ transitions: transitions,
+ setup: setup,
+ data: {
+ doRemove: function doRemove(db, collName, query, options) {
+ return db[collName].remove(query, options);
+ },
+ assertResult: function assertResult(res) {
+ assertAlways.writeOK(res);
+ // when running on its own collection,
+ // this iteration should remove exactly one document
+ assertWhenOwnColl.eq(1, res.nRemoved, tojson(res));
+ }
+ },
+ startState: 'remove'
+ };
+
+})();
diff --git a/jstests/concurrency/fsm_workloads/remove_single_document_eval.js b/jstests/concurrency/fsm_workloads/remove_single_document_eval.js
new file mode 100644
index 00000000000..ee6411c9c74
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/remove_single_document_eval.js
@@ -0,0 +1,37 @@
+'use strict';
+
+/**
+ * remove_single_document_eval.js
+ *
+ * Runs remove_single_document using the eval command.
+ */
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/remove_single_document.js'); // for $config
+
+var $config = extendWorkload($config, function($config, $super) {
+
+ $config.data.doRemove = function doRemove(db, collName, query, options) {
+ var evalResult = db.runCommand({
+ eval: function(f, collName, query, options) {
+ return tojson(f(db, collName, query, options));
+ },
+ args: [$super.data.doRemove, collName, query, options],
+ nolock: this.nolock
+ });
+ assertAlways.commandWorked(evalResult);
+ var res = JSON.parse(evalResult.retval);
+ return res;
+ };
+
+ $config.data.assertResult = function assertResult(res) {
+ assertWhenOwnColl.eq(1, res.nRemoved, tojson(res));
+ };
+
+ $config.data.nolock = false;
+
+ // scale down threadCount and iterations because eval takes a global lock
+ $config.threadCount = 5;
+ $config.iterations = 10;
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/remove_single_document_eval_nolock.js b/jstests/concurrency/fsm_workloads/remove_single_document_eval_nolock.js
new file mode 100644
index 00000000000..c5aba00523e
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/remove_single_document_eval_nolock.js
@@ -0,0 +1,16 @@
+'use strict';
+
+/**
+ * remove_single_document_eval_nolock.js
+ *
+ * Runs remove_single_document_eval with the eval option { nolock: true }.
+ */
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/remove_single_document_eval.js'); // for $config
+
+var $config = extendWorkload($config, function($config, $super) {
+
+ $config.data.nolock = true;
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/rename_capped_collection_chain.js b/jstests/concurrency/fsm_workloads/rename_capped_collection_chain.js
new file mode 100644
index 00000000000..7dbdd25dfe8
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/rename_capped_collection_chain.js
@@ -0,0 +1,73 @@
+'use strict';
+
+/**
+ * rename_capped_collection_chain.js
+ *
+ * Creates a capped collection and then repeatedly executes the renameCollection
+ * command against it. The previous "to" namespace is used as the next "from"
+ * namespace.
+ */
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+
+var $config = (function() {
+
+ var data = {
+ // Use the workload name as a prefix for the collection name,
+ // since the workload name is assumed to be unique.
+ prefix: 'rename_capped_collection_chain'
+ };
+
+ var states = (function() {
+
+ function uniqueCollectionName(prefix, tid, num) {
+ return prefix + tid + '_' + num;
+ }
+
+ function init(db, collName) {
+ this.fromCollName = uniqueCollectionName(this.prefix, this.tid, 0);
+ this.num = 1;
+
+ var options = {
+ capped: true,
+ size: 4096
+ };
+
+ assertAlways.commandWorked(db.createCollection(this.fromCollName, options));
+ assertWhenOwnDB(db[this.fromCollName].isCapped());
+ }
+
+ function rename(db, collName) {
+ var toCollName = uniqueCollectionName(this.prefix, this.tid, this.num++);
+ var res = db[this.fromCollName].renameCollection(toCollName, false /* dropTarget */);
+ assertWhenOwnDB.commandWorked(res);
+ assertWhenOwnDB(db[toCollName].isCapped());
+ this.fromCollName = toCollName;
+ }
+
+ return {
+ init: init,
+ rename: rename
+ };
+
+ })();
+
+ var transitions = {
+ init: { rename: 1 },
+ rename: { rename: 1 }
+ };
+
+ function teardown(db, collName) {
+ var pattern = new RegExp('^' + this.prefix + '\\d+_\\d+$');
+ dropCollections(db, pattern);
+ }
+
+ return {
+ threadCount: 10,
+ iterations: 20,
+ data: data,
+ states: states,
+ transitions: transitions,
+ teardown: teardown
+ };
+
+})();
diff --git a/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_chain.js b/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_chain.js
new file mode 100644
index 00000000000..b59ceddcc13
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_chain.js
@@ -0,0 +1,86 @@
+'use strict';
+
+/**
+ * rename_capped_collection_dbname_chain.js
+ *
+ * Creates a capped collection and then repeatedly executes the renameCollection
+ * command against it, specifying a different database name in the namespace.
+ * The previous "to" namespace is used as the next "from" namespace.
+ */
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropDatabases
+
+var $config = (function() {
+
+ var data = {
+ // Use the workload name as a prefix for the collection name,
+ // since the workload name is assumed to be unique.
+ prefix: 'rename_capped_collection_dbname_chain'
+ };
+
+ var states = (function() {
+
+ function uniqueDBName(prefix, tid, num) {
+ return prefix + tid + '_' + num;
+ }
+
+ function init(db, collName) {
+ this.fromDBName = uniqueDBName(this.prefix, this.tid, 0);
+ this.num = 1;
+ var fromDB = db.getSiblingDB(this.fromDBName);
+
+ var options = {
+ capped: true,
+ size: 4096
+ };
+
+ assertAlways.commandWorked(fromDB.createCollection(collName, options));
+ assertAlways(fromDB[collName].isCapped());
+ }
+
+ function rename(db, collName) {
+ var toDBName = uniqueDBName(this.prefix, this.tid, this.num++);
+ var renameCommand = {
+ renameCollection: this.fromDBName + '.' + collName,
+ to: toDBName + '.' + collName,
+ dropTarget: false
+ };
+
+ assertAlways.commandWorked(db.adminCommand(renameCommand));
+ assertAlways(db.getSiblingDB(toDBName)[collName].isCapped());
+
+ // Remove any files associated with the "from" namespace
+ // to avoid having too many files open
+ var res = db.getSiblingDB(this.fromDBName).dropDatabase();
+ assertAlways.commandWorked(res);
+ assertAlways.eq(this.fromDBName, res.dropped);
+
+ this.fromDBName = toDBName;
+ }
+
+ return {
+ init: init,
+ rename: rename
+ };
+
+ })();
+
+ var transitions = {
+ init: { rename: 1 },
+ rename: { rename: 1 }
+ };
+
+ function teardown(db, collName) {
+ var pattern = new RegExp('^' + this.prefix + '\\d+_\\d+$');
+ dropDatabases(db, pattern);
+ }
+
+ return {
+ threadCount: 10,
+ iterations: 20,
+ data: data,
+ states: states,
+ transitions: transitions,
+ teardown: teardown
+ };
+
+})();
diff --git a/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js b/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js
new file mode 100644
index 00000000000..a64dbfbbd97
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js
@@ -0,0 +1,109 @@
+'use strict';
+
+/**
+ * rename_capped_collection_dbname_droptarget.js
+ *
+ * Creates a capped collection and then repeatedly executes the renameCollection
+ * command against it, specifying a different database name in the namespace.
+ * Inserts documents into the "to" namespace and specifies dropTarget=true.
+ */
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropDatabases
+
+var $config = (function() {
+
+ var data = {
+ // Use the workload name as a prefix for the collection name,
+ // since the workload name is assumed to be unique.
+ prefix: 'rename_capped_collection_dbname_droptarget'
+ };
+
+ var states = (function() {
+
+ var options = {
+ capped: true,
+ size: 4096
+ };
+
+ function uniqueDBName(prefix, tid, num) {
+ return prefix + tid + '_' + num;
+ }
+
+ function insert(db, collName, numDocs) {
+ for (var i = 0; i < numDocs; ++i) {
+ var res = db[collName].insert({});
+ assertAlways.writeOK(res);
+ assertAlways.eq(1, res.nInserted);
+ }
+ }
+
+ function init(db, collName) {
+ var num = 0;
+ this.fromDBName = uniqueDBName(this.prefix, this.tid, num++);
+ this.toDBName = uniqueDBName(this.prefix, this.tid, num++);
+
+ var fromDB = db.getSiblingDB(this.fromDBName);
+ assertAlways.commandWorked(fromDB.createCollection(collName, options));
+ assertAlways(fromDB[collName].isCapped());
+ }
+
+ function rename(db, collName) {
+ var fromDB = db.getSiblingDB(this.fromDBName);
+ var toDB = db.getSiblingDB(this.toDBName);
+
+ // Clear out the "from" collection and insert 'fromCollCount' documents
+ var fromCollCount = 7;
+ assertAlways(fromDB[collName].drop());
+ assertAlways.commandWorked(fromDB.createCollection(collName, options));
+ assertAlways(fromDB[collName].isCapped());
+ insert(fromDB, collName, fromCollCount);
+
+ var toCollCount = 4;
+ assertAlways.commandWorked(toDB.createCollection(collName, options));
+ insert(toDB, collName, toCollCount);
+
+ // Verify that 'fromCollCount' documents exist in the "to" collection
+ // after the rename occurs
+ var renameCommand = {
+ renameCollection: this.fromDBName + '.' + collName,
+ to: this.toDBName + '.' + collName,
+ dropTarget: true
+ };
+
+ assertAlways.commandWorked(fromDB.adminCommand(renameCommand));
+ assertAlways(toDB[collName].isCapped());
+ assertAlways.eq(fromCollCount, toDB[collName].find().itcount());
+ assertAlways.eq(0, fromDB[collName].find().itcount());
+
+ // Swap "to" and "from" collections for next execution
+ var temp = this.fromDBName;
+ this.fromDBName = this.toDBName;
+ this.toDBName = temp;
+ }
+
+ return {
+ init: init,
+ rename: rename
+ };
+
+ })();
+
+ var transitions = {
+ init: { rename: 1 },
+ rename: { rename: 1 }
+ };
+
+ function teardown(db, collName) {
+ var pattern = new RegExp('^' + this.prefix + '\\d+_\\d+$');
+ dropDatabases(db, pattern);
+ }
+
+ return {
+ threadCount: 10,
+ iterations: 20,
+ data: data,
+ states: states,
+ transitions: transitions,
+ teardown: teardown
+ };
+
+})();
diff --git a/jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js b/jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js
new file mode 100644
index 00000000000..f7caff7a20f
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js
@@ -0,0 +1,101 @@
+'use strict';
+
+/**
+ * rename_capped_collection_droptarget.js
+ *
+ * Creates a capped collection and then repeatedly executes the renameCollection
+ * command against it. Inserts documents into the "to" namespace and specifies
+ * dropTarget=true.
+ */
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+
+var $config = (function() {
+
+ var data = {
+ // Use the workload name as a prefix for the collection name,
+ // since the workload name is assumed to be unique.
+ prefix: 'rename_capped_collection_droptarget'
+ };
+
+ var states = (function() {
+
+ var options = {
+ capped: true,
+ size: 4096
+ };
+
+ function uniqueCollectionName(prefix, tid, num) {
+ return prefix + tid + '_' + num;
+ }
+
+ function insert(db, collName, numDocs) {
+ for (var i = 0; i < numDocs; ++i) {
+ var res = db[collName].insert({});
+ assertAlways.writeOK(res);
+ assertAlways.eq(1, res.nInserted);
+ }
+ }
+
+ function init(db, collName) {
+ var num = 0;
+ this.fromCollName = uniqueCollectionName(this.prefix, this.tid, num++);
+ this.toCollName = uniqueCollectionName(this.prefix, this.tid, num++);
+
+ assertAlways.commandWorked(db.createCollection(this.fromCollName, options));
+ assertWhenOwnDB(db[this.fromCollName].isCapped());
+ }
+
+ function rename(db, collName) {
+ // Clear out the "from" collection and insert 'fromCollCount' documents
+ var fromCollCount = 7;
+ assertWhenOwnDB(db[this.fromCollName].drop());
+ assertAlways.commandWorked(db.createCollection(this.fromCollName, options));
+ assertWhenOwnDB(db[this.fromCollName].isCapped());
+ insert(db, this.fromCollName, fromCollCount);
+
+ var toCollCount = 4;
+ assertAlways.commandWorked(db.createCollection(this.toCollName, options));
+ insert(db, this.toCollName, toCollCount);
+
+ // Verify that 'fromCollCount' documents exist in the "to" collection
+ // after the rename occurs
+ var res = db[this.fromCollName].renameCollection(this.toCollName,
+ true /* dropTarget */);
+ assertWhenOwnDB.commandWorked(res);
+ assertWhenOwnDB(db[this.toCollName].isCapped());
+ assertWhenOwnDB.eq(fromCollCount, db[this.toCollName].find().itcount());
+ assertWhenOwnDB.eq(0, db[this.fromCollName].find().itcount());
+
+ // Swap "to" and "from" collections for next execution
+ var temp = this.fromCollName;
+ this.fromCollName = this.toCollName;
+ this.toCollName = temp;
+ }
+
+ return {
+ init: init,
+ rename: rename
+ };
+
+ })();
+
+ var transitions = {
+ init: { rename: 1 },
+ rename: { rename: 1 }
+ };
+
+ function teardown(db, collName) {
+ var pattern = new RegExp('^' + this.prefix + '\\d+_\\d+$');
+ dropCollections(db, pattern);
+ }
+
+ return {
+ threadCount: 10,
+ iterations: 20,
+ data: data,
+ states: states,
+ transitions: transitions,
+ teardown: teardown
+ };
+
+})();
diff --git a/jstests/concurrency/fsm_workloads/rename_collection_chain.js b/jstests/concurrency/fsm_workloads/rename_collection_chain.js
new file mode 100644
index 00000000000..414d9060989
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/rename_collection_chain.js
@@ -0,0 +1,65 @@
+'use strict';
+
+/**
+ * rename_collection_chain.js
+ *
+ * Creates a collection and then repeatedly executes the renameCollection
+ * command against it. The previous "to" namespace is used as the next "from"
+ * namespace.
+ */
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+
+var $config = (function() {
+
+ var data = {
+ // Use the workload name as a prefix for the collection name,
+ // since the workload name is assumed to be unique.
+ prefix: 'rename_collection_chain'
+ };
+
+ var states = (function() {
+
+ function uniqueCollectionName(prefix, tid, num) {
+ return prefix + tid + '_' + num;
+ }
+
+ function init(db, collName) {
+ this.fromCollName = uniqueCollectionName(this.prefix, this.tid, 0);
+ this.num = 1;
+ assertAlways.commandWorked(db.createCollection(this.fromCollName));
+ }
+
+ function rename(db, collName) {
+ var toCollName = uniqueCollectionName(this.prefix, this.tid, this.num++);
+ var res = db[this.fromCollName].renameCollection(toCollName, false /* dropTarget */);
+ assertWhenOwnDB.commandWorked(res);
+ this.fromCollName = toCollName;
+ }
+
+ return {
+ init: init,
+ rename: rename
+ };
+
+ })();
+
+ var transitions = {
+ init: { rename: 1 },
+ rename: { rename: 1 }
+ };
+
+ function teardown(db, collName) {
+ var pattern = new RegExp('^' + this.prefix + '\\d+_\\d+$');
+ dropCollections(db, pattern);
+ }
+
+ return {
+ threadCount: 10,
+ iterations: 20,
+ data: data,
+ states: states,
+ transitions: transitions,
+ teardown: teardown
+ };
+
+})();
diff --git a/jstests/concurrency/fsm_workloads/rename_collection_dbname_chain.js b/jstests/concurrency/fsm_workloads/rename_collection_dbname_chain.js
new file mode 100644
index 00000000000..2be2b172106
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/rename_collection_dbname_chain.js
@@ -0,0 +1,78 @@
+'use strict';
+
+/**
+ * rename_collection_dbname_chain.js
+ *
+ * Creates a collection and then repeatedly executes the renameCollection
+ * command against it, specifying a different database name in the namespace.
+ * The previous "to" namespace is used as the next "from" namespace.
+ */
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropDatabases
+
+var $config = (function() {
+
+ var data = {
+ // Use the workload name as a prefix for the collection name,
+ // since the workload name is assumed to be unique.
+ prefix: 'rename_collection_dbname_chain'
+ };
+
+ var states = (function() {
+
+ function uniqueDBName(prefix, tid, num) {
+ return prefix + tid + '_' + num;
+ }
+
+ function init(db, collName) {
+ this.fromDBName = uniqueDBName(this.prefix, this.tid, 0);
+ this.num = 1;
+ var fromDB = db.getSiblingDB(this.fromDBName);
+ assertAlways.commandWorked(fromDB.createCollection(collName));
+ }
+
+ function rename(db, collName) {
+ var toDBName = uniqueDBName(this.prefix, this.tid, this.num++);
+ var renameCommand = {
+ renameCollection: this.fromDBName + '.' + collName,
+ to: toDBName + '.' + collName,
+ dropTarget: false
+ };
+
+ assertAlways.commandWorked(db.adminCommand(renameCommand));
+
+ // Remove any files associated with the "from" namespace
+ // to avoid having too many files open
+ var res = db.getSiblingDB(this.fromDBName).dropDatabase();
+ assertAlways.commandWorked(res);
+ assertAlways.eq(this.fromDBName, res.dropped);
+
+ this.fromDBName = toDBName;
+ }
+
+ return {
+ init: init,
+ rename: rename
+ };
+
+ })();
+
+ var transitions = {
+ init: { rename: 1 },
+ rename: { rename: 1 }
+ };
+
+ function teardown(db, collName) {
+ var pattern = new RegExp('^' + this.prefix + '\\d+_\\d+$');
+ dropDatabases(db, pattern);
+ }
+
+ return {
+ threadCount: 10,
+ iterations: 20,
+ data: data,
+ states: states,
+ transitions: transitions,
+ teardown: teardown
+ };
+
+})();
diff --git a/jstests/concurrency/fsm_workloads/rename_collection_dbname_droptarget.js b/jstests/concurrency/fsm_workloads/rename_collection_dbname_droptarget.js
new file mode 100644
index 00000000000..063cc45f1dc
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/rename_collection_dbname_droptarget.js
@@ -0,0 +1,101 @@
+'use strict';
+
+/**
+ * rename_collection_dbname_droptarget.js
+ *
+ * Creates a collection and then repeatedly executes the renameCollection
+ * command against it, specifying a different database name in the namespace.
+ * Inserts documents into the "to" namespace and specifies dropTarget=true.
+ */
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropDatabases
+
+var $config = (function() {
+
+ var data = {
+ // Use the workload name as a prefix for the collection name,
+ // since the workload name is assumed to be unique.
+ prefix: 'rename_collection_dbname_droptarget'
+ };
+
+ var states = (function() {
+
+ function uniqueDBName(prefix, tid, num) {
+ return prefix + tid + '_' + num;
+ }
+
+ function insert(db, collName, numDocs) {
+ for (var i = 0; i < numDocs; ++i) {
+ var res = db[collName].insert({});
+ assertAlways.writeOK(res);
+ assertAlways.eq(1, res.nInserted);
+ }
+ }
+
+ function init(db, collName) {
+ var num = 0;
+ this.fromDBName = uniqueDBName(this.prefix, this.tid, num++);
+ this.toDBName = uniqueDBName(this.prefix, this.tid, num++);
+
+ var fromDB = db.getSiblingDB(this.fromDBName);
+ assertAlways.commandWorked(fromDB.createCollection(collName));
+ }
+
+ function rename(db, collName) {
+ var fromDB = db.getSiblingDB(this.fromDBName);
+ var toDB = db.getSiblingDB(this.toDBName);
+
+ // Clear out the "from" collection and insert 'fromCollCount' documents
+ var fromCollCount = 7;
+ assertAlways(fromDB[collName].drop());
+ assertAlways.commandWorked(fromDB.createCollection(collName));
+ insert(fromDB, collName, fromCollCount);
+
+ var toCollCount = 4;
+ assertAlways.commandWorked(toDB.createCollection(collName));
+ insert(toDB, collName, toCollCount);
+
+ // Verify that 'fromCollCount' documents exist in the "to" collection
+ // after the rename occurs
+ var renameCommand = {
+ renameCollection: this.fromDBName + '.' + collName,
+ to: this.toDBName + '.' + collName,
+ dropTarget: true
+ };
+
+ assertAlways.commandWorked(fromDB.adminCommand(renameCommand));
+ assertAlways.eq(fromCollCount, toDB[collName].find().itcount());
+ assertAlways.eq(0, fromDB[collName].find().itcount());
+
+ // Swap "to" and "from" collections for next execution
+ var temp = this.fromDBName;
+ this.fromDBName = this.toDBName;
+ this.toDBName = temp;
+ }
+
+ return {
+ init: init,
+ rename: rename
+ };
+
+ })();
+
+ var transitions = {
+ init: { rename: 1 },
+ rename: { rename: 1 }
+ };
+
+ function teardown(db, collName) {
+ var pattern = new RegExp('^' + this.prefix + '\\d+_\\d+$');
+ dropDatabases(db, pattern);
+ }
+
+ return {
+ threadCount: 10,
+ iterations: 20,
+ data: data,
+ states: states,
+ transitions: transitions,
+ teardown: teardown
+ };
+
+})();
diff --git a/jstests/concurrency/fsm_workloads/rename_collection_droptarget.js b/jstests/concurrency/fsm_workloads/rename_collection_droptarget.js
new file mode 100644
index 00000000000..57e6cf0776e
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/rename_collection_droptarget.js
@@ -0,0 +1,93 @@
+'use strict';
+
+/**
+ * rename_collection_droptarget.js
+ *
+ * Creates a collection and then repeatedly executes the renameCollection
+ * command against it. Inserts documents into the "to" namespace and specifies
+ * dropTarget=true.
+ */
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+
+var $config = (function() {
+
+ var data = {
+ // Use the workload name as a prefix for the collection name,
+ // since the workload name is assumed to be unique.
+ prefix: 'rename_collection_droptarget'
+ };
+
+ var states = (function() {
+
+ function uniqueCollectionName(prefix, tid, num) {
+ return prefix + tid + '_' + num;
+ }
+
+ function insert(db, collName, numDocs) {
+ for (var i = 0; i < numDocs; ++i) {
+ var res = db[collName].insert({});
+ assertAlways.writeOK(res);
+ assertAlways.eq(1, res.nInserted);
+ }
+ }
+
+ function init(db, collName) {
+ var num = 0;
+ this.fromCollName = uniqueCollectionName(this.prefix, this.tid, num++);
+ this.toCollName = uniqueCollectionName(this.prefix, this.tid, num++);
+
+ assertAlways.commandWorked(db.createCollection(this.fromCollName));
+ }
+
+ function rename(db, collName) {
+ // Clear out the "from" collection and insert 'fromCollCount' documents
+ var fromCollCount = 7;
+ assertWhenOwnDB(db[this.fromCollName].drop());
+ assertAlways.commandWorked(db.createCollection(this.fromCollName));
+ insert(db, this.fromCollName, fromCollCount);
+
+ var toCollCount = 4;
+ assertAlways.commandWorked(db.createCollection(this.toCollName));
+ insert(db, this.toCollName, toCollCount);
+
+ // Verify that 'fromCollCount' documents exist in the "to" collection
+ // after the rename occurs
+ var res = db[this.fromCollName].renameCollection(this.toCollName,
+ true /* dropTarget */);
+ assertWhenOwnDB.commandWorked(res);
+ assertWhenOwnDB.eq(fromCollCount, db[this.toCollName].find().itcount());
+ assertWhenOwnDB.eq(0, db[this.fromCollName].find().itcount());
+
+ // Swap "to" and "from" collections for next execution
+ var temp = this.fromCollName;
+ this.fromCollName = this.toCollName;
+ this.toCollName = temp;
+ }
+
+ return {
+ init: init,
+ rename: rename
+ };
+
+ })();
+
+ var transitions = {
+ init: { rename: 1 },
+ rename: { rename: 1 }
+ };
+
+ function teardown(db, collName) {
+ var pattern = new RegExp('^' + this.prefix + '\\d+_\\d+$');
+ dropCollections(db, pattern);
+ }
+
+ return {
+ threadCount: 10,
+ iterations: 20,
+ data: data,
+ states: states,
+ transitions: transitions,
+ teardown: teardown
+ };
+
+})();
diff --git a/jstests/concurrency/fsm_workloads/server_status.js b/jstests/concurrency/fsm_workloads/server_status.js
new file mode 100644
index 00000000000..70de8395f49
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/server_status.js
@@ -0,0 +1,37 @@
+'use strict';
+
+/**
+ * server_status.js
+ *
+ * Simply checks that the serverStatus command works
+ */
+var $config = (function() {
+
+ var states = {
+ status: function status(db, collName) {
+ var opts = {
+ opcounterRepl: 1,
+ oplog: 1,
+ rangeDeleter: 1,
+ repl: 1,
+ security: 1,
+ tcmalloc: 1
+ };
+ var res = db.serverStatus();
+ assertAlways.commandWorked(res);
+ assertAlways(res.hasOwnProperty('version'));
+ }
+ };
+
+ var transitions = {
+ status: { status: 1 }
+ };
+
+ return {
+ threadCount: 10,
+ iterations: 20,
+ states: states,
+ startState: 'status',
+ transitions: transitions
+ };
+})();
diff --git a/jstests/concurrency/fsm_workloads/update_array.js b/jstests/concurrency/fsm_workloads/update_array.js
new file mode 100644
index 00000000000..b73aa94a258
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/update_array.js
@@ -0,0 +1,112 @@
+'use strict';
+
+/**
+ * update_array.js
+ *
+ * Each thread does a $push or $pull on a random doc, pushing or pulling its
+ * thread id. After each push or pull, the thread does a .findOne() to verify
+ * that its thread id is present or absent (respectively). This is correct even
+ * though other threads in the workload may be modifying the array between the
+ * update and the find, because thread ids are unique.
+ */
+var $config = (function() {
+
+ var states = (function() {
+
+ // db: explicitly passed to avoid accidentally using the global `db`
+ // res: WriteResult
+ // nModifiedPossibilities: array of allowed values for res.nModified
+ function assertUpdateSuccess(db, res, nModifiedPossibilities) {
+ assertAlways.eq(0, res.nUpserted, tojson(res));
+ assertWhenOwnColl.eq(1, res.nMatched, tojson(res));
+ if (db.getMongo().writeMode() === 'commands') {
+ assertWhenOwnColl.contains(res.nModified, nModifiedPossibilities, tojson(res));
+ }
+ }
+
+ function doPush(db, collName, docIndex, value) {
+ var res = db[collName].update({ _id: docIndex }, { $push: { arr: value } });
+
+ // assert the update reported success
+ assertUpdateSuccess(db, res, [1]);
+
+ // find the doc and make sure it was updated
+ var doc = db[collName].findOne({ _id: docIndex });
+ assertWhenOwnColl(function() {
+ assertWhenOwnColl.neq(null, doc);
+ assertWhenOwnColl(doc.hasOwnProperty('arr'),
+ 'doc should have contained a field named "arr": ' + tojson(doc));
+ assertWhenOwnColl.contains(value, doc.arr,
+ "doc.arr doesn't contain value (" + value +
+ ') after $push: ' + tojson(doc.arr));
+ });
+ }
+
+ function doPull(db, collName, docIndex, value) {
+ var res = db[collName].update({ _id: docIndex }, { $pull: { arr: value } });
+
+ // assert the update reported success
+ assertUpdateSuccess(db, res, [0, 1]);
+
+ // find the doc and make sure it was updated
+ var doc = db[collName].findOne({ _id: docIndex });
+ assertWhenOwnColl(function() {
+ assertWhenOwnColl.neq(null, doc);
+ assertWhenOwnColl.eq(-1, doc.arr.indexOf(value),
+ 'doc.arr contains removed value (' + value +
+ ') after $pull: ' + tojson(doc.arr));
+ });
+ }
+
+ return {
+ push: function push(db, collName) {
+ var docIndex = Random.randInt(this.numDocs);
+ var value = this.tid;
+
+ doPush(db, collName, docIndex, value);
+ },
+
+ pull: function pull(db, collName) {
+ var docIndex = Random.randInt(this.numDocs);
+ var value = this.tid;
+
+ doPull(db, collName, docIndex, value);
+ }
+ };
+
+ })();
+
+ var transitions = {
+ push: {
+ push: 0.8,
+ pull: 0.2
+ },
+ pull: {
+ push: 0.8,
+ pull: 0.2
+ }
+ };
+
+ function setup(db, collName) {
+ // index on 'arr', the field being updated
+ assertAlways.commandWorked(db[collName].ensureIndex({ arr: 1 }));
+ for (var i = 0; i < this.numDocs; ++i) {
+ var res = db[collName].insert({ _id: i, arr: [] });
+ assertWhenOwnColl.writeOK(res);
+ assertWhenOwnColl.eq(1, res.nInserted);
+ }
+ }
+
+ return {
+ threadCount: 5,
+ iterations: 10,
+ startState: 'push',
+ states: states,
+ transitions: transitions,
+ data: {
+ numDocs: 10
+ },
+ setup: setup
+ };
+
+})();
diff --git a/jstests/concurrency/fsm_workloads/update_array_noindex.js b/jstests/concurrency/fsm_workloads/update_array_noindex.js
new file mode 100644
index 00000000000..cd1b4c27129
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/update_array_noindex.js
@@ -0,0 +1,13 @@
+'use strict';
+
+/**
+ * update_array_noindex.js
+ *
+ * Executes the update_array.js workload after dropping all non-_id indexes on
+ * the collection.
+ */
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/update_array.js'); // for $config
+load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes
+
+var $config = extendWorkload($config, dropAllIndexes);
diff --git a/jstests/concurrency/fsm_workloads/update_multifield.js b/jstests/concurrency/fsm_workloads/update_multifield.js
new file mode 100644
index 00000000000..434dc518c51
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/update_multifield.js
@@ -0,0 +1,97 @@
+'use strict';
+
+/**
+ * update_multifield.js
+ *
+ * Does updates that affect multiple fields on a single document.
+ * The collection has an index for each field, and a compound index for all fields.
+ */
+var $config = (function() {
+
+ function makeQuery(options) {
+ var query = {};
+ if (!options.multi) {
+ query._id = Random.randInt(options.numDocs);
+ }
+
+ if (options.isolated) {
+ query.$isolated = 1;
+ }
+
+ return query;
+ }
+
+ // returns an update doc
+ function makeRandomUpdateDoc() {
+ var x = Random.randInt(5);
+ var y = Random.randInt(5);
+ // ensure z is never 0, so the $inc is never 0, so we can assert nModified === nMatched
+ var z = Random.randInt(5) + 1;
+ var set = Random.rand() > 0.5;
+ var push = Random.rand() > 0.2;
+
+ var updateDoc = {};
+ updateDoc[set ? '$set' : '$unset'] = { x: x };
+ updateDoc[push ? '$push' : '$pull'] = { y: y };
+ updateDoc.$inc = { z: z };
+
+ return updateDoc;
+ }
+
+ var states = {
+ update: function update(db, collName) {
+ // choose an update to apply
+ var updateDoc = makeRandomUpdateDoc();
+
+ // apply this update
+ var query = makeQuery({
+ multi: this.multi,
+ isolated: this.isolated,
+ numDocs: this.numDocs
+ });
+ var res = db[collName].update(query, updateDoc, { multi: this.multi });
+ this.assertResult(res, db, collName, query);
+ }
+ };
+
+ var transitions = {
+ update: { update: 1 }
+ };
+
+ function setup(db, collName) {
+ assertAlways.commandWorked(db[collName].ensureIndex({ x: 1 }));
+ assertAlways.commandWorked(db[collName].ensureIndex({ y: 1 }));
+ assertAlways.commandWorked(db[collName].ensureIndex({ z: 1 }));
+ assertAlways.commandWorked(db[collName].ensureIndex({ x: 1, y: 1, z: 1 }));
+
+ for (var i = 0; i < this.numDocs; ++i) {
+ var res = db[collName].insert({ _id: i });
+ assertWhenOwnColl.writeOK(res);
+ assertWhenOwnColl.eq(1, res.nInserted);
+ }
+ }
+
+ var threadCount = 10;
+ return {
+ threadCount: threadCount,
+ iterations: 10,
+ startState: 'update',
+ states: states,
+ transitions: transitions,
+ data: {
+ assertResult: function(res, db, collName, query) {
+ assertAlways.eq(0, res.nUpserted, tojson(res));
+ assertWhenOwnColl.eq(1, res.nMatched, tojson(res));
+ if (db.getMongo().writeMode() === 'commands') {
+ assertWhenOwnColl.eq(1, res.nModified, tojson(res));
+ }
+ },
+ multi: false,
+ isolated: false,
+ // numDocs should be much less than threadCount, to make more threads use the same docs
+ numDocs: Math.floor(threadCount / 3)
+ },
+ setup: setup
+ };
+
+})();
diff --git a/jstests/concurrency/fsm_workloads/update_multifield_isolated_multiupdate.js b/jstests/concurrency/fsm_workloads/update_multifield_isolated_multiupdate.js
new file mode 100644
index 00000000000..8c3f6704231
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/update_multifield_isolated_multiupdate.js
@@ -0,0 +1,38 @@
+'use strict';
+
+/**
+ * update_multifield_isolated_multiupdate.js
+ *
+ * Does updates that affect multiple fields on multiple documents, using $isolated.
+ * The collection has an index for each field, and a multikey index for all fields.
+ */
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/update_multifield.js'); // for $config
+
+var $config = extendWorkload($config, function($config, $super) {
+
+ $config.data.multi = true;
+ $config.data.isolated = true;
+
+ $config.data.assertResult = function assertResult(res, db, collName, query) {
+ assertAlways.eq(0, res.nUpserted, tojson(res));
+ // documents can't move during an update, because we use $isolated
+ assertWhenOwnColl.eq(this.numDocs, res.nMatched, tojson(res));
+ if (db.getMongo().writeMode() === 'commands') {
+ assertWhenOwnColl.eq(this.numDocs, res.nModified, tojson(res));
+ }
+
+ // every thread only increments z, and z starts at 0,
+ // so z should always be strictly greater than 0 after an update,
+ // even if other threads modify the doc.
+ var docs = db[collName].find().toArray();
+ assertWhenOwnColl(function() {
+ docs.forEach(function(doc) {
+ assertWhenOwnColl.eq('number', typeof doc.z);
+ assertWhenOwnColl.gt(doc.z, 0);
+ });
+ });
+ };
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/update_multifield_isolated_multiupdate_noindex.js b/jstests/concurrency/fsm_workloads/update_multifield_isolated_multiupdate_noindex.js
new file mode 100644
index 00000000000..6ac6aeabc89
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/update_multifield_isolated_multiupdate_noindex.js
@@ -0,0 +1,13 @@
+'use strict';
+
+/**
+ * update_multifield_isolated_multiupdate_noindex.js
+ *
+ * Executes the update_multifield_isolated_multiupdate.js workload after
+ * dropping all non-_id indexes on the collection.
+ */
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/update_multifield_isolated_multiupdate.js'); // for $config
+load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes
+
+var $config = extendWorkload($config, dropAllIndexes);
diff --git a/jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js b/jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js
new file mode 100644
index 00000000000..298c35a6522
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js
@@ -0,0 +1,52 @@
+'use strict';
+
+/**
+ * update_multifield_multiupdate.js
+ *
+ * Does updates that affect multiple fields on multiple documents.
+ * The collection has an index for each field, and a multikey index for all fields.
+ */
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/update_multifield.js'); // for $config
+load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod and isMMAPv1
+
+var $config = extendWorkload($config, function($config, $super) {
+
+ $config.data.multi = true;
+
+ $config.data.assertResult = function(res, db, collName, query) {
+ assertAlways.eq(0, res.nUpserted, tojson(res));
+
+ var status = db.serverStatus();
+ if (isMongod(status)) {
+ if (isMMAPv1(status)) {
+ // If an update triggers a document to move forward, then
+ // that document can be matched multiple times. If an update
+ // triggers a document to move backwards, then that document
+ // can be missed by other threads.
+ assertAlways.gte(res.nMatched, 0, tojson(res));
+ } else { // non-mmapv1 storage engine
+ // TODO: Can we assert exact equality with WiredTiger?
+ // What about for other storage engines?
+ assertWhenOwnColl.lte(this.numDocs, res.nMatched, tojson(res));
+ }
+ } else { // mongos
+ // In a mixed cluster, it is unknown what underlying storage engine
+ // the update operations will be executed against. Thus, we can only
+ // make the weakest of all assertions above.
+ assertAlways.gte(res.nMatched, 0, tojson(res));
+ }
+
+ if (db.getMongo().writeMode() === 'commands') {
+ assertWhenOwnColl.eq(res.nMatched, res.nModified, tojson(res));
+ }
+
+ var docs = db[collName].find().toArray();
+ docs.forEach(function(doc) {
+ assertWhenOwnColl.eq('number', typeof doc.z);
+ assertWhenOwnColl.gt(doc.z, 0);
+ });
+ };
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/update_multifield_multiupdate_noindex.js b/jstests/concurrency/fsm_workloads/update_multifield_multiupdate_noindex.js
new file mode 100644
index 00000000000..fe12f2e33fb
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/update_multifield_multiupdate_noindex.js
@@ -0,0 +1,13 @@
+'use strict';
+
+/**
+ * update_multifield_multiupdate_noindex.js
+ *
+ * Executes the update_multifield_multiupdate.js workload after dropping all
+ * non-_id indexes on the collection.
+ */
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js'); // for $config
+load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes
+
+var $config = extendWorkload($config, dropAllIndexes);
diff --git a/jstests/concurrency/fsm_workloads/update_multifield_noindex.js b/jstests/concurrency/fsm_workloads/update_multifield_noindex.js
new file mode 100644
index 00000000000..0be46a25f6a
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/update_multifield_noindex.js
@@ -0,0 +1,13 @@
+'use strict';
+
+/**
+ * update_multifield_noindex.js
+ *
+ * Executes the update_multifield.js workload after dropping all non-_id indexes
+ * on the collection.
+ */
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/update_multifield.js'); // for $config
+load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes
+
+var $config = extendWorkload($config, dropAllIndexes);
diff --git a/jstests/concurrency/fsm_workloads/update_rename.js b/jstests/concurrency/fsm_workloads/update_rename.js
new file mode 100644
index 00000000000..ff0f31959ca
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/update_rename.js
@@ -0,0 +1,74 @@
+'use strict';
+
+/**
+ * update_rename.js
+ *
+ * Each thread does a $rename to cause documents to jump between indexes.
+ */
+var $config = (function() {
+
+ var fieldNames = ['update_rename_x', 'update_rename_y', 'update_rename_z'];
+
+ function choose(array) {
+ assertAlways.gt(array.length, 0, "can't choose an element of an empty array");
+ return array[Random.randInt(array.length)];
+ }
+
+ var states = {
+ update: function update(db, collName) {
+ var from = choose(fieldNames);
+ var to = choose(fieldNames.filter(function(n) { return n !== from; }));
+ var updater = { $rename: {} };
+ updater.$rename[from] = to;
+
+ var query = {};
+ query[from] = { $exists: 1 };
+
+ var res = db[collName].update(query, updater);
+
+ assertAlways.eq(0, res.nUpserted, tojson(res));
+ assertWhenOwnColl.contains(res.nMatched, [0, 1], tojson(res));
+ if (db.getMongo().writeMode() === 'commands') {
+ assertWhenOwnColl.eq(res.nMatched, res.nModified, tojson(res));
+ }
+ }
+ };
+
+ var transitions = {
+ update: { update: 1 }
+ };
+
+ function setup(db, collName) {
+ // Create an index on all but one fieldName key to make it possible to test renames
+ // between indexed fields and non-indexed fields
+ fieldNames.slice(1).forEach(function(fieldName) {
+ var indexSpec = {};
+ indexSpec[fieldName] = 1;
+ assertAlways.commandWorked(db[collName].ensureIndex(indexSpec));
+ });
+
+ for (var i = 0; i < this.numDocs; ++i) {
+ var fieldName = fieldNames[i % fieldNames.length];
+ var doc = {};
+ doc[fieldName] = i;
+ var res = db[collName].insert(doc);
+ assertAlways.writeOK(res);
+ assertAlways.eq(1, res.nInserted);
+ }
+ }
+
+ var threadCount = 20;
+ return {
+ threadCount: threadCount,
+ iterations: 20,
+ startState: 'update',
+ states: states,
+ transitions: transitions,
+ data: {
+ // numDocs should be much less than threadCount, to make more threads use the same docs
+ numDocs: Math.floor(threadCount / 5)
+ },
+ setup: setup
+ };
+
+})();
diff --git a/jstests/concurrency/fsm_workloads/update_rename_noindex.js b/jstests/concurrency/fsm_workloads/update_rename_noindex.js
new file mode 100644
index 00000000000..bbf19227865
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/update_rename_noindex.js
@@ -0,0 +1,13 @@
+'use strict';
+
+/**
+ * update_rename_noindex.js
+ *
+ * Executes the update_rename.js workload after dropping all non-_id indexes on
+ * the collection.
+ */
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/update_rename.js'); // for $config
+load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes
+
+var $config = extendWorkload($config, dropAllIndexes);
diff --git a/jstests/concurrency/fsm_workloads/update_replace.js b/jstests/concurrency/fsm_workloads/update_replace.js
new file mode 100644
index 00000000000..b0c621c94fe
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/update_replace.js
@@ -0,0 +1,80 @@
+'use strict';
+
+/**
+ * update_replace.js
+ *
+ * Does updates that replace an entire document.
+ * The collection has indexes on some but not all fields.
+ */
+var $config = (function() {
+
+ // explicitly pass db to avoid accidentally using the global `db`
+ function assertResult(db, res) {
+ assertAlways.eq(0, res.nUpserted, tojson(res));
+ assertWhenOwnColl.eq(1, res.nMatched, tojson(res));
+ if (db.getMongo().writeMode() === 'commands') {
+ assertWhenOwnColl.contains(res.nModified, [0, 1], tojson(res));
+ }
+ }
+
+ // returns an update doc
+ function getRandomUpdateDoc() {
+ var choices = [
+ {},
+ { x: 1, y: 1, z: 1 },
+ { a: 1, b: 1, c: 1 }
+ ];
+ return choices[Random.randInt(choices.length)];
+ }
+
+ var states = {
+ update: function update(db, collName) {
+ // choose a doc to update
+ var docIndex = Random.randInt(this.numDocs);
+
+ // choose an update to apply
+ var updateDoc = getRandomUpdateDoc();
+
+ // apply the update
+ var res = db[collName].update({ _id: docIndex }, updateDoc);
+ assertResult(db, res);
+ }
+ };
+
+ var transitions = {
+ update: { update: 1 }
+ };
+
+ function setup(db, collName) {
+ assertAlways.commandWorked(db[collName].ensureIndex({ a: 1 }));
+ assertAlways.commandWorked(db[collName].ensureIndex({ b: 1 }));
+ // no index on c
+
+ assertAlways.commandWorked(db[collName].ensureIndex({ x: 1 }));
+ assertAlways.commandWorked(db[collName].ensureIndex({ y: 1 }));
+ // no index on z
+
+ for (var i = 0; i < this.numDocs; ++i) {
+ var res = db[collName].insert({ _id: i });
+ assertWhenOwnColl.writeOK(res);
+ assertWhenOwnColl.eq(1, res.nInserted);
+ }
+
+ assertWhenOwnColl.eq(this.numDocs, db[collName].find().itcount());
+ }
+
+ var threadCount = 10;
+ return {
+ threadCount: threadCount,
+ iterations: 10,
+ startState: 'update',
+ states: states,
+ transitions: transitions,
+ data: {
+ // numDocs should be much less than threadCount, to make more threads use the same docs
+ numDocs: Math.floor(threadCount / 3)
+ },
+ setup: setup
+ };
+
+})();
diff --git a/jstests/concurrency/fsm_workloads/update_replace_noindex.js b/jstests/concurrency/fsm_workloads/update_replace_noindex.js
new file mode 100644
index 00000000000..590326a8edc
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/update_replace_noindex.js
@@ -0,0 +1,13 @@
+'use strict';
+
+/**
+ * update_replace_noindex.js
+ *
+ * Executes the update_replace.js workload after dropping all non-_id indexes
+ * on the collection.
+ */
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/update_replace.js'); // for $config
+load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes
+
+var $config = extendWorkload($config, dropAllIndexes);
diff --git a/jstests/concurrency/fsm_workloads/update_simple.js b/jstests/concurrency/fsm_workloads/update_simple.js
new file mode 100644
index 00000000000..a864fcec617
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/update_simple.js
@@ -0,0 +1,86 @@
+'use strict';
+
+/**
+ * update_simple.js
+ *
+ * Creates several docs. On each iteration, each thread chooses:
+ * - a random doc
+ * - whether to $set or $unset its field
+ * - what value to $set the field to
+ */
+var $config = (function() {
+
+ var states = {
+ set: function set(db, collName) {
+ this.setOrUnset(db, collName, true, this.numDocs);
+ },
+
+ unset: function unset(db, collName) {
+ this.setOrUnset(db, collName, false, this.numDocs);
+ }
+ };
+
+ var transitions = {
+ set: {
+ set: 0.5,
+ unset: 0.5
+ },
+ unset: {
+ set: 0.5,
+ unset: 0.5
+ }
+ };
+
+ function setup(db, collName) {
+ // index on 'value', the field being updated
+ assertAlways.commandWorked(db[collName].ensureIndex({ value: 1 }));
+ for (var i = 0; i < this.numDocs; ++i) {
+ // make sure the inserted docs have a 'value' field, so they won't need
+ // to grow when this workload runs against a capped collection
+ var res = db[collName].insert({ _id: i, value: 0 });
+ assertWhenOwnColl.writeOK(res);
+ assertWhenOwnColl.eq(1, res.nInserted);
+ }
+ }
+
+ var threadCount = 20;
+ return {
+ threadCount: threadCount,
+ iterations: 20,
+ startState: 'set',
+ states: states,
+ transitions: transitions,
+ data: {
+ // explicitly pass db to avoid accidentally using the global `db`
+ assertResult: function assertResult(db, res) {
+ assertAlways.eq(0, res.nUpserted, tojson(res));
+ assertWhenOwnColl.eq(1, res.nMatched, tojson(res));
+ if (db.getMongo().writeMode() === 'commands') {
+ assertWhenOwnColl.contains(res.nModified, [0, 1], tojson(res));
+ }
+ },
+
+ setOrUnset: function setOrUnset(db, collName, set, numDocs) {
+ // choose a doc and value to use in the update
+ var docIndex = Random.randInt(numDocs);
+ var value = Random.randInt(5);
+
+ var updater = {};
+ updater[set ? '$set' : '$unset'] = { value: value };
+
+ var query = { _id: docIndex };
+ var res = this.doUpdate(db, collName, query, updater);
+ this.assertResult(db, res);
+ },
+
+ doUpdate: function doUpdate(db, collName, query, updater) {
+ return db[collName].update(query, updater);
+ },
+
+ // numDocs should be much less than threadCount, to make more threads use the same docs
+ numDocs: Math.floor(threadCount / 5)
+ },
+ setup: setup
+ };
+
+})();
diff --git a/jstests/concurrency/fsm_workloads/update_simple_capped.js b/jstests/concurrency/fsm_workloads/update_simple_capped.js
new file mode 100644
index 00000000000..d2193945d64
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/update_simple_capped.js
@@ -0,0 +1,12 @@
+'use strict';
+
+/**
+ * update_simple_capped.js
+ *
+ * Executes the update_simple.js workload on a capped collection.
+ */
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/update_simple.js'); // for $config
+load('jstests/concurrency/fsm_workload_modifiers/make_capped.js'); // for makeCapped
+
+var $config = extendWorkload($config, makeCapped);
diff --git a/jstests/concurrency/fsm_workloads/update_simple_eval.js b/jstests/concurrency/fsm_workloads/update_simple_eval.js
new file mode 100644
index 00000000000..b0f0897a3eb
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/update_simple_eval.js
@@ -0,0 +1,33 @@
+'use strict';
+
+/**
+ * update_eval.js
+ *
+ * Creates several docs. On each iteration, each thread chooses:
+ * - a random doc
+ * - whether to $set or $unset its field
+ * - what value to $set the field to
+ * and then applies the update using db.runCommand({ eval: ... })
+ */
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/update_simple.js'); // for $config
+
+var $config = extendWorkload($config, function($config, $super) {
+
+ $config.data.doUpdate = function doUpdate(db, collName, query, updater) {
+ var evalResult = db.runCommand({
+ eval: function(f, collName, query, updater) {
+ return tojson(f(db, collName, query, updater));
+ },
+ args: [$super.data.doUpdate, collName, query, updater],
+ nolock: this.nolock
+ });
+ assertAlways.commandWorked(evalResult);
+ var res = JSON.parse(evalResult.retval);
+ return res;
+ };
+
+ $config.data.nolock = false;
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/update_simple_eval_nolock.js b/jstests/concurrency/fsm_workloads/update_simple_eval_nolock.js
new file mode 100644
index 00000000000..0d89e509751
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/update_simple_eval_nolock.js
@@ -0,0 +1,16 @@
+'use strict';
+
+/**
+ * update_simple_eval_nolock.js
+ *
+ * Runs update_simple_eval with the eval option { nolock: true }.
+ */
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/update_simple_eval.js'); // for $config
+
+var $config = extendWorkload($config, function($config, $super) {
+
+ $config.data.nolock = true;
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/update_simple_noindex.js b/jstests/concurrency/fsm_workloads/update_simple_noindex.js
new file mode 100644
index 00000000000..b39c71f4266
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/update_simple_noindex.js
@@ -0,0 +1,13 @@
+'use strict';
+
+/**
+ * update_simple_noindex.js
+ *
+ * Executes the update_simple.js workload after dropping all non-_id indexes on
+ * the collection.
+ */
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/update_simple.js'); // for $config
+load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes
+
+var $config = extendWorkload($config, dropAllIndexes);
diff --git a/jstests/concurrency/fsm_workloads/update_upsert_multi.js b/jstests/concurrency/fsm_workloads/update_upsert_multi.js
new file mode 100644
index 00000000000..7fc7a61980c
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/update_upsert_multi.js
@@ -0,0 +1,83 @@
+'use strict';
+
+/**
+ * update_upsert_multi.js
+ *
+ * Tests updates that specify both multi=true and upsert=true.
+ * The 'insert' state uses a query that will match no documents, causing an upsert.
+ * The 'update' state uses a query that will match one or more documents, causing a multi-update.
+ * Both states use { multi: true, upsert: true }, but only one option will ever take effect,
+ * depending on whether 0 or more than 0 documents match the query.
+ */
+var $config = (function() {
+
+ var states = {
+ insert: function insert(db, collName) {
+ var query, update, options;
+ var res = db[collName].update(
+ // The counter ensures that the query will not match any existing document.
+ query = { tid: this.tid, i: this.counter++ },
+ update = { $inc: { n: 1 } },
+ options = { multi: true, upsert: true }
+ );
+ var debugDoc = tojson({ query: query, update: update, options: options, result: res });
+ assertWhenOwnColl.eq(1, res.nUpserted, debugDoc);
+ assertWhenOwnColl.eq(0, res.nMatched, debugDoc);
+ if (db.getMongo().writeMode() === 'commands') {
+ assertWhenOwnColl.eq(0, res.nModified, debugDoc);
+ }
+ },
+
+ update: function update(db, collName) {
+ var res = db[collName].update(
+ // This query will match an existing document, since the 'insert' state
+ // always runs first.
+ { tid: this.tid },
+ { $inc: { n: 1 } },
+ { multi: true, upsert: true }
+ );
+
+ assertWhenOwnColl.eq(0, res.nUpserted, tojson(res));
+ assertWhenOwnColl.lte(1, res.nMatched, tojson(res));
+ if (db.getMongo().writeMode() === 'commands') {
+ assertWhenOwnColl.eq(res.nMatched, res.nModified, tojson(res));
+ }
+ },
+
+ assertConsistency: function assertConsistency(db, collName) {
+ // Since each update operation either:
+ // - inserts a new doc { tid: tid, i: counter++, n: 0 }
+ // - updates any doc matching { tid: tid } with { $inc: { n: 1 } }
+ // Then within each tid, as you walk from lower to higher values of i,
+ // the value of n should be non-increasing. (This should be true
+ // because docs with lower i are newer, so they have had fewer
+ // opportunities to have n incremented.)
+ var prevN = Infinity;
+ db[collName].find({ tid: this.tid }).sort({ i: 1 }).forEach(function(doc) {
+ assertWhenOwnColl.gte(prevN, doc.n);
+ prevN = doc.n;
+ });
+ }
+ };
+
+ var transitions = {
+ insert: { update: 0.875, assertConsistency: 0.125 },
+ update: { insert: 0.875, assertConsistency: 0.125 },
+ assertConsistency: { insert: 0.5, update: 0.5 }
+ };
+
+ function setup(db, collName) {
+ assertAlways.commandWorked(db[collName].ensureIndex({ tid: 1, i: 1 }));
+ }
+
+ return {
+ threadCount: 10,
+ iterations: 20,
+ states: states,
+ startState: 'insert',
+ transitions: transitions,
+ data: { counter: 0 },
+ setup: setup
+ };
+
+})();
diff --git a/jstests/concurrency/fsm_workloads/update_upsert_multi_noindex.js b/jstests/concurrency/fsm_workloads/update_upsert_multi_noindex.js
new file mode 100644
index 00000000000..a463c6ba17d
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/update_upsert_multi_noindex.js
@@ -0,0 +1,13 @@
+'use strict';
+
+/**
+ * update_upsert_multi_noindex.js
+ *
+ * Executes the update_upsert_multi.js workload after dropping all non-_id
+ * indexes on the collection.
+ */
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/update_upsert_multi.js'); // for $config
+load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes
+
+var $config = extendWorkload($config, dropAllIndexes);