summaryrefslogtreecommitdiff
path: root/jstests/parallel
diff options
context:
space:
mode:
authorMax Hirschhorn <max.hirschhorn@mongodb.com>2014-11-19 11:54:45 -0500
committerMax Hirschhorn <max.hirschhorn@mongodb.com>2014-11-19 13:05:24 -0500
commit365cca0c47566d192ca847f0b077cedef4b3430e (patch)
tree8cdce27cfe6a34002a01ce6569f3ad0cd5deecbb /jstests/parallel
parent217151b66aefddca0a62e92aa095bb4f27dba574 (diff)
downloadmongo-365cca0c47566d192ca847f0b077cedef4b3430e.tar.gz
SERVER-16196 Add FSM-based concurrency tests to parallel suite.
The actual execution of the workloads is disabled for now.
Diffstat (limited to 'jstests/parallel')
-rw-r--r--jstests/parallel/fsm_all.js12
-rw-r--r--jstests/parallel/fsm_all_composed.js13
-rw-r--r--jstests/parallel/fsm_all_master_slave.js12
-rw-r--r--jstests/parallel/fsm_all_replication.js12
-rw-r--r--jstests/parallel/fsm_all_sharded.js12
-rw-r--r--jstests/parallel/fsm_all_sharded_replication.js12
-rw-r--r--jstests/parallel/fsm_all_simultaneous.js12
-rw-r--r--jstests/parallel/fsm_example.js72
-rw-r--r--jstests/parallel/fsm_example_inheritance.js18
-rw-r--r--jstests/parallel/fsm_libs/assert.js107
-rw-r--r--jstests/parallel/fsm_libs/composer.js77
-rw-r--r--jstests/parallel/fsm_libs/fsm.js59
-rw-r--r--jstests/parallel/fsm_libs/runner.js586
-rw-r--r--jstests/parallel/fsm_libs/utils.js19
-rw-r--r--jstests/parallel/fsm_libs/worker_thread.js74
-rw-r--r--jstests/parallel/fsm_selftests.js39
-rw-r--r--jstests/parallel/fsm_workload_helpers/indexed_noindex.js17
-rw-r--r--jstests/parallel/fsm_workloads/findAndModify_inc.js59
-rw-r--r--jstests/parallel/fsm_workloads/indexed_insert_1char.js20
-rw-r--r--jstests/parallel/fsm_workloads/indexed_insert_1char_noindex.js10
-rw-r--r--jstests/parallel/fsm_workloads/indexed_insert_base.js65
-rw-r--r--jstests/parallel/fsm_workloads/indexed_insert_base_noindex.js10
-rw-r--r--jstests/parallel/fsm_workloads/indexed_insert_heterogeneous.js49
-rw-r--r--jstests/parallel/fsm_workloads/indexed_insert_heterogeneous_noindex.js10
-rw-r--r--jstests/parallel/fsm_workloads/indexed_insert_large.js36
-rw-r--r--jstests/parallel/fsm_workloads/indexed_insert_large_noindex.js10
-rw-r--r--jstests/parallel/fsm_workloads/indexed_insert_long_fieldname.js16
-rw-r--r--jstests/parallel/fsm_workloads/indexed_insert_long_fieldname_noindex.js10
-rw-r--r--jstests/parallel/fsm_workloads/indexed_insert_multikey.js22
-rw-r--r--jstests/parallel/fsm_workloads/indexed_insert_multikey_noindex.js10
-rw-r--r--jstests/parallel/fsm_workloads/indexed_insert_ordered_bulk.js30
-rw-r--r--jstests/parallel/fsm_workloads/indexed_insert_unordered_bulk.js30
-rw-r--r--jstests/parallel/fsm_workloads/update_inc.js67
-rw-r--r--jstests/parallel/fsm_workloads/update_ordered_bulk_inc.js69
34 files changed, 1676 insertions, 0 deletions
diff --git a/jstests/parallel/fsm_all.js b/jstests/parallel/fsm_all.js
new file mode 100644
index 00000000000..4d29784e7b3
--- /dev/null
+++ b/jstests/parallel/fsm_all.js
@@ -0,0 +1,12 @@
+load('jstests/parallel/fsm_libs/runner.js');
+
+var dir = 'jstests/parallel/fsm_workloads';
+
+var blacklist = [
+ 'indexed_insert_multikey.js' // SERVER-16143
+].map(function(file) { return dir + '/' + file; });
+
+// SERVER-16196 re-enable executing workloads
+// runWorkloadsSerially(ls(dir).filter(function(file) {
+// return !Array.contains(blacklist, file);
+// }));
diff --git a/jstests/parallel/fsm_all_composed.js b/jstests/parallel/fsm_all_composed.js
new file mode 100644
index 00000000000..594ecdbac81
--- /dev/null
+++ b/jstests/parallel/fsm_all_composed.js
@@ -0,0 +1,13 @@
+load('jstests/parallel/fsm_libs/runner.js');
+
+var dir = 'jstests/parallel/fsm_workloads';
+
+var blacklist = [
+ 'indexed_insert_multikey.js', // SERVER-16143
+ 'indexed_insert_multikey_noindex.js' // SERVER-16143
+].map(function(file) { return dir + '/' + file; });
+
+// SERVER-16196 re-enable executing workloads
+// runMixtureOfWorkloads(ls(dir).filter(function(file) {
+// return !Array.contains(blacklist, file);
+// }));
diff --git a/jstests/parallel/fsm_all_master_slave.js b/jstests/parallel/fsm_all_master_slave.js
new file mode 100644
index 00000000000..c2de2d3a13d
--- /dev/null
+++ b/jstests/parallel/fsm_all_master_slave.js
@@ -0,0 +1,12 @@
+load('jstests/parallel/fsm_libs/runner.js');
+
+var dir = 'jstests/parallel/fsm_workloads';
+
+var blacklist = [
+ 'indexed_insert_multikey.js' // SERVER-16143
+].map(function(file) { return dir + '/' + file; });
+
+// SERVER-16196 re-enable executing workloads with master-slave replication
+// runWorkloadsSerially(ls(dir).filter(function(file) {
+// return !Array.contains(blacklist, file);
+// }), { masterSlave: true });
diff --git a/jstests/parallel/fsm_all_replication.js b/jstests/parallel/fsm_all_replication.js
new file mode 100644
index 00000000000..f00d28589b9
--- /dev/null
+++ b/jstests/parallel/fsm_all_replication.js
@@ -0,0 +1,12 @@
+load('jstests/parallel/fsm_libs/runner.js');
+
+var dir = 'jstests/parallel/fsm_workloads';
+
+var blacklist = [
+ 'indexed_insert_multikey.js' // SERVER-16143
+].map(function(file) { return dir + '/' + file; });
+
+// SERVER-16196 re-enable executing workloads against replica sets
+// runWorkloadsSerially(ls(dir).filter(function(file) {
+// return !Array.contains(blacklist, file);
+// }), { replication: true });
diff --git a/jstests/parallel/fsm_all_sharded.js b/jstests/parallel/fsm_all_sharded.js
new file mode 100644
index 00000000000..7cb1a47ddaf
--- /dev/null
+++ b/jstests/parallel/fsm_all_sharded.js
@@ -0,0 +1,12 @@
+load('jstests/parallel/fsm_libs/runner.js');
+
+var dir = 'jstests/parallel/fsm_workloads';
+
+var blacklist = [
+ 'indexed_insert_multikey.js' // SERVER-16143
+].map(function(file) { return dir + '/' + file; });
+
+// SERVER-16196 re-enable executing workloads against sharded clusters
+// runWorkloadsSerially(ls(dir).filter(function(file) {
+// return !Array.contains(blacklist, file);
+// }), { sharded: true });
diff --git a/jstests/parallel/fsm_all_sharded_replication.js b/jstests/parallel/fsm_all_sharded_replication.js
new file mode 100644
index 00000000000..b482252310b
--- /dev/null
+++ b/jstests/parallel/fsm_all_sharded_replication.js
@@ -0,0 +1,12 @@
+load('jstests/parallel/fsm_libs/runner.js');
+
+var dir = 'jstests/parallel/fsm_workloads';
+
+var blacklist = [
+ 'indexed_insert_multikey.js' // SERVER-16143
+].map(function(file) { return dir + '/' + file; });
+
+// SERVER-16196 re-enable executing workloads against sharded replica sets
+// runWorkloadsSerially(ls(dir).filter(function(file) {
+// return !Array.contains(blacklist, file);
+// }), { sharded: true, replication: true });
diff --git a/jstests/parallel/fsm_all_simultaneous.js b/jstests/parallel/fsm_all_simultaneous.js
new file mode 100644
index 00000000000..8cd32a0ef90
--- /dev/null
+++ b/jstests/parallel/fsm_all_simultaneous.js
@@ -0,0 +1,12 @@
+load('jstests/parallel/fsm_libs/runner.js');
+
+var dir = 'jstests/parallel/fsm_workloads';
+
+var blacklist = [
+ 'indexed_insert_multikey.js' // SERVER-16143
+].map(function(file) { return dir + '/' + file; });
+
+// SERVER-16196 re-enable executing workloads
+// runWorkloadsInParallel(ls(dir).filter(function(file) {
+// return !Array.contains(blacklist, file);
+// }));
diff --git a/jstests/parallel/fsm_example.js b/jstests/parallel/fsm_example.js
new file mode 100644
index 00000000000..6bb549258c3
--- /dev/null
+++ b/jstests/parallel/fsm_example.js
@@ -0,0 +1,72 @@
+/**
+ * fsm_example.js
+ *
+ * Includes documentation of each property on $config.
+ * Serves as a template for new workloads.
+ */
+var $config = (function() {
+
+ // 'data' is passed (copied) to each of the worker threads.
+ var data = {};
+
+ // 'states' are the different functions callable by a worker
+ // thread. The 'this' argument of any exposed function is
+ // bound as '$config.data'.
+ var states = {
+ init: function init(db, collName) {
+ this.start = 10 * this.tid;
+ },
+
+ scanGT: function scanGT(db, collName) {
+ db[collName].find({ _id: { $gt: this.start } }).itcount();
+ },
+
+ scanLTE: function scanLTE(db, collName) {
+ db[collName].find({ _id: { $lte: this.start } }).itcount();
+ },
+ };
+
+ // 'transitions' defines how the FSM should proceed from its
+ // current state to the next state. The value associated with a
+ // particular state represents the likelihood of that transition.
+ //
+ // For example, 'init: { scanGT: 0.5, scanLTE: 0.5 }' means that
+ // the worker thread will transition from the 'init' state
+ // to the 'scanGT' state with probability 0.5, and
+ // to the 'scanLTE' state with probability 0.5.
+ //
+ // All state functions should appear as keys within 'transitions'.
+ var transitions = {
+ init: { scanGT: 0.5, scanLTE: 0.5 },
+ scanGT: { scanGT: 0.8, scanLTE: 0.2 },
+ scanLTE: { scanGT: 0.2, scanLTE: 0.8 }
+ };
+
+ // 'setup' is run once by the parent thread after the cluster has
+ // been initialized, but before the worker threads have been spawned.
+ // The 'this' argument is bound as '$config.data'.
+ function setup(db, collName) {
+ // Workloads should NOT drop the collection db[collName], as
+ // doing so is handled by runner.js before 'setup' is called.
+ for (var i = 0; i < 1000; ++i) {
+ db[collName].insert({ _id: i });
+ }
+ }
+
+ // 'teardown' is run once by the parent thread before the cluster
+ // is destroyed, but after the worker threads have been reaped.
+ // The 'this' argument is bound as '$config.data'.
+ function teardown(db, collName) {}
+
+ return {
+ threadCount: 5,
+ iterations: 10,
+ startState: 'init', // optional, default 'init'
+ states: states,
+ transitions: transitions,
+ setup: setup, // optional, default empty function
+ teardown: teardown, // optional, default empty function
+ data: data // optional, default empty object
+ };
+
+})();
diff --git a/jstests/parallel/fsm_example_inheritance.js b/jstests/parallel/fsm_example_inheritance.js
new file mode 100644
index 00000000000..e8e5452da3a
--- /dev/null
+++ b/jstests/parallel/fsm_example_inheritance.js
@@ -0,0 +1,18 @@
+load('jstests/parallel/fsm_libs/runner.js'); // for extendWorkload
+load('jstests/parallel/fsm_example.js'); // for $config
+
+// extendWorkload takes a $config object and a callback, and returns an extended $config object.
+var $config = extendWorkload($config, function($config, $super) {
+ // In the callback, $super is the base workload definition we're extending,
+ // and $config is the extended workload definition we're creating.
+
+ // You can replace any properties on $config, including methods you want to override.
+ $config.setup = function(db, collName) {
+ // Overridden methods should usually call the corresponding method on $super.
+ $super.setup.apply(this, arguments);
+
+ db[collName].ensureIndex({ exampleIndexedField: 1 });
+ };
+
+ return $config;
+});
diff --git a/jstests/parallel/fsm_libs/assert.js b/jstests/parallel/fsm_libs/assert.js
new file mode 100644
index 00000000000..e0409c132b5
--- /dev/null
+++ b/jstests/parallel/fsm_libs/assert.js
@@ -0,0 +1,107 @@
+
+/**
+ * Helpers for controlling under which situations an assert is actually executed.
+ * This allows us to define workloads that will have only valid assertions,
+ * regardless of how any particular workload gets run with any others.
+ *
+ * There are 3 different assert levels:
+ * ALWAYS = these assertions are always executed
+ * OWN_COLL = these assertions are executed when workloads are run on separate collections
+ * OWN_DB = these assertions are executed when workloads are run on separate databases
+ */
+
+var AssertLevel = (function() {
+
+ function AssertLevel(level) {
+ this.level = level;
+
+ // Returns < 0 if this < other
+ // = 0 if this == other
+ // > 0 if this > other
+ this.compareTo = function(other) {
+ return this.level - other.level;
+ };
+ }
+
+ function isAssertLevel(obj) {
+ return obj instanceof AssertLevel;
+ }
+
+ return {
+ ALWAYS: new AssertLevel(0),
+ OWN_COLL: new AssertLevel(1),
+ OWN_DB: new AssertLevel(2),
+ isAssertLevel: isAssertLevel
+ };
+
+})();
+
+if (typeof globalAssertLevel === 'undefined') {
+ var globalAssertLevel = AssertLevel.ALWAYS;
+}
+
+var assertWithLevel = function(level) {
+ assert(AssertLevel.isAssertLevel(level), 'expected AssertLevel as first argument');
+
+ function quietlyDoAssert(msg) {
+ // eval if msg is a function
+ if (typeof msg === 'function') {
+ msg = msg();
+ }
+
+ throw new Error(msg);
+ }
+
+ function wrapAssertFn(fn, args) {
+ var doassertSaved = doassert;
+ try {
+ doassert = quietlyDoAssert;
+ fn.apply(assert, args); // functions typically get called on 'assert'
+ } finally {
+ doassert = doassertSaved;
+ }
+ }
+
+ var assertWithLevel = function() {
+ // Only execute assertion if level for which it was defined is
+ // a subset of the global assertion level
+ if (level.compareTo(globalAssertLevel) > 0) {
+ return;
+ }
+
+ if (arguments.length === 1 && typeof arguments[0] === 'function') {
+ // Assert against the value returned by the function
+ arguments[0] = arguments[0]();
+
+ // If a function does not explictly return a value,
+ // then have it implicitly return true
+ if (typeof arguments[0] === 'undefined') {
+ arguments[0] = true;
+ }
+ }
+
+ wrapAssertFn(assert, arguments);
+ };
+
+ Object.keys(assert).forEach(function(fn) {
+ if (typeof assert[fn] !== 'function') {
+ return;
+ }
+
+ assertWithLevel[fn] = function() {
+ // Only execute assertion if level for which it was defined is
+ // a subset of the global assertion level
+ if (level.compareTo(globalAssertLevel) > 0) {
+ return;
+ }
+
+ wrapAssertFn(assert[fn], arguments);
+ };
+ });
+
+ return assertWithLevel;
+};
+
+var assertAlways = assertWithLevel(AssertLevel.ALWAYS);
+var assertWhenOwnColl = assertWithLevel(AssertLevel.OWN_COLL);
+var assertWhenOwnDB = assertWithLevel(AssertLevel.OWN_DB);
diff --git a/jstests/parallel/fsm_libs/composer.js b/jstests/parallel/fsm_libs/composer.js
new file mode 100644
index 00000000000..c7e2e226e73
--- /dev/null
+++ b/jstests/parallel/fsm_libs/composer.js
@@ -0,0 +1,77 @@
+load('jstests/parallel/fsm_libs/fsm.js');
+
+var composer = (function() {
+
+ function runCombinedFSM(workloads, configs, mixProb) {
+ // TODO: what if a workload depends on iterations?
+ var iterations = 100;
+
+ assert.eq(AssertLevel.ALWAYS, globalAssertLevel,
+ 'global assertion level is not set as ALWAYS');
+
+ var currentWorkload = getRandomElem(workloads, Random.rand());
+ var currentState = configs[currentWorkload].startState;
+
+ var myDB, collName;
+ var first = true;
+ workloads.forEach(function(workload) {
+ var args = configs[workload];
+ if (!first) {
+ assert.eq(myDB, args.db, 'expected all workloads to use same database');
+ assert.eq(collName, args.collName,
+ 'expected all workloads to use same collection');
+ }
+ myDB = args.db;
+ collName = args.collName;
+ first = false;
+
+ if (workload !== currentWorkload) {
+ args.states[args.startState].call(args.data, myDB, collName);
+ }
+ });
+
+ // Runs an interleaving of the specified workloads
+ for (var i = 0; i < iterations; ++i) {
+ var args = configs[currentWorkload];
+ args.states[currentState].call(args.data, myDB, collName);
+
+ // Transition to another valid state of the current workload,
+ // with probability '1 - mixProb'
+ if (Random.rand() >= mixProb) {
+ var nextState = fsm._getWeightedRandomChoice(args.transitions[currentState],
+ Random.rand());
+ currentState = nextState;
+ continue;
+ }
+
+ // Transition to a state of another workloads with probability 'mixProb'
+ var otherStates = [];
+ workloads.forEach(function(workload) {
+ if (workload === currentWorkload) {
+ return;
+ }
+
+ var args = configs[workload];
+ Object.keys(args.states).forEach(function(state) {
+ if (state !== args.startState) {
+ otherStates.push({ workload: workload, state: state });
+ }
+ });
+ });
+
+ var next = getRandomElem(otherStates, Random.rand());
+ currentWorkload = next.workload;
+ currentState = next.state;
+ }
+ }
+
+ function getRandomElem(items, randVal) {
+ assert.gt(items.length, 0);
+ return items[Math.floor(randVal * items.length)];
+ }
+
+ return {
+ run: runCombinedFSM
+ };
+
+})();
diff --git a/jstests/parallel/fsm_libs/fsm.js b/jstests/parallel/fsm_libs/fsm.js
new file mode 100644
index 00000000000..e8ce2e27317
--- /dev/null
+++ b/jstests/parallel/fsm_libs/fsm.js
@@ -0,0 +1,59 @@
+var fsm = (function() {
+ // args.data = 'this' object of the state functions
+ // args.db = database object
+ // args.collName = collection name
+ // args.startState = name of initial state function
+ // args.states = state functions of the form
+ // { stateName: function(db, collName) { ... } }
+ // args.transitions = transitions between state functions of the form
+ // { stateName: { nextState1: probability,
+ // nextState2: ... } }
+ // args.iterations = number of iterations to run the FSM for
+ function runFSM(args) {
+ var currentState = args.startState;
+ for (var i = 0; i < args.iterations; ++i) {
+ args.states[currentState].call(args.data, args.db, args.collName);
+ var nextState = getWeightedRandomChoice(args.transitions[currentState], Random.rand());
+ currentState = nextState;
+ }
+ }
+
+ // doc = document of the form
+ // { nextState1: probability, nextState2: ... }
+ // randVal = a value on the interval [0, 1)
+ // returns a state, weighted by its probability,
+ // assuming randVal was chosen randomly by the caller
+ function getWeightedRandomChoice(doc, randVal) {
+ assert.gte(randVal, 0);
+ assert.lt(randVal, 1);
+
+ var states = Object.keys(doc);
+ assert.gt(states.length, 0, "transition must have at least one state to transition to");
+
+ // weights = [ 0.25, 0.5, 0.25 ]
+ // => accumulated = [ 0.25, 0.75, 1 ]
+ var weights = states.map(function(k) { return doc[k]; });
+
+ var accumulated = [];
+ var sum = weights.reduce(function(a, b, i) {
+ accumulated[i] = a + b;
+ return accumulated[i];
+ }, 0);
+
+ // Scale the random value by the sum of the weights
+ randVal *= sum; // ~ U[0, sum)
+
+ // Find the state corresponding to randVal
+ for (var i = 0; i < accumulated.length; ++i) {
+ if (randVal < accumulated[i]) {
+ return states[i];
+ }
+ }
+ assert(false, 'not reached');
+ }
+
+ return {
+ run: runFSM,
+ _getWeightedRandomChoice: getWeightedRandomChoice
+ };
+})();
diff --git a/jstests/parallel/fsm_libs/runner.js b/jstests/parallel/fsm_libs/runner.js
new file mode 100644
index 00000000000..e847feef69a
--- /dev/null
+++ b/jstests/parallel/fsm_libs/runner.js
@@ -0,0 +1,586 @@
+load('jstests/libs/parallelTester.js');
+load('jstests/parallel/fsm_libs/assert.js');
+load('jstests/parallel/fsm_libs/utils.js');
+load('jstests/parallel/fsm_libs/worker_thread.js');
+
+
+/** extendWorkload usage:
+ *
+ * $config = extendWorkload($config, function($config, $super) {
+ * // ... modify $config ...
+ * $config.foo = function() { // override a method
+ * $super.foo.call(this, arguments); // call super
+ * };
+ * return $config;
+ * });
+ */
+function extendWorkload($config, callback) {
+ assert.eq(2, arguments.length,
+ "extendWorkload must be called with 2 arguments: $config and callback");
+ assert.eq('function', typeof callback,
+ "2nd argument to extendWorkload must be a callback");
+ assert.eq(2, callback.length,
+ "2nd argument to extendWorkload must take 2 arguments: $config and $super");
+ var parsedSuperConfig = parseConfig($config);
+ var childConfig = Object.extend({}, parsedSuperConfig, true);
+ return callback(childConfig, parsedSuperConfig);
+}
+
+function runWorkloadsSerially(workloads, clusterOptions) {
+ if (typeof workloads === 'string') {
+ workloads = [workloads];
+ }
+ assert.gt(workloads.length, 0);
+ workloads.forEach(function(workload) {
+ // 'workload' is a JS file expected to set the global $config variable to an object.
+ load(workload);
+ assert.neq(typeof $config, 'undefined');
+
+ _runWorkload(workload, $config, clusterOptions);
+ });
+}
+
+function runWorkloadsInParallel(workloads, clusterOptions) {
+ assert.gt(workloads.length, 0);
+
+ var context = {};
+ workloads.forEach(function(workload) {
+ // 'workload' is a JS file expected to set the global $config variable to an object.
+ load(workload);
+ assert.neq(typeof $config, 'undefined');
+ context[workload] = { config: $config };
+ });
+
+ _runAllWorkloads(workloads, context, clusterOptions);
+}
+
+function runMixtureOfWorkloads(workloads, clusterOptions) {
+ assert.gt(workloads.length, 0);
+
+ var context = {};
+ workloads.forEach(function(workload) {
+ // 'workload' is a JS file expected to set the global $config variable to an object.
+ load(workload);
+ assert.neq(typeof $config, 'undefined');
+ context[workload] = { config: $config };
+ });
+
+ clusterOptions = Object.extend({}, clusterOptions, true); // defensive deep copy
+ clusterOptions.sameDB = true;
+ clusterOptions.sameCollection = true;
+
+ var cluster = setupCluster(clusterOptions, 'fakedb');
+ globalAssertLevel = AssertLevel.ALWAYS;
+
+ var cleanup = [];
+ var errors = [];
+
+ try {
+ prepareCollections(workloads, context, cluster, clusterOptions);
+ cleanup = setUpWorkloads(workloads, context);
+
+ var threads = makeAllThreads(workloads, context, clusterOptions, true);
+
+ joinThreads(threads).forEach(function(err) {
+ errors.push(err);
+ });
+
+ } finally {
+ // TODO: does order of calling 'config.teardown' matter?
+ cleanup.forEach(function(teardown) {
+ try {
+ teardown.fn.call(teardown.data, teardown.db, teardown.collName);
+ } catch (err) {
+ print('Teardown function threw an exception:\n' + err.stack);
+ }
+ });
+
+ cluster.teardown();
+ }
+
+ throwError(errors);
+}
+
+// Validate the config object and return a normalized copy of it.
+// Normalized means all optional parameters are set to their default values,
+// and any parameters that need to be coerced have been coerced.
+function parseConfig(config) {
+ // make a deep copy so we can mutate config without surprising the caller
+ config = Object.extend({}, config, true);
+ var allowedKeys = [
+ 'data',
+ 'iterations',
+ 'setup',
+ 'startState',
+ 'states',
+ 'teardown',
+ 'threadCount',
+ 'transitions'
+ ];
+ Object.keys(config).forEach(function(k) {
+ assert.gte(allowedKeys.indexOf(k), 0,
+ "invalid config parameter: " + k + ". valid parameters are: " +
+ tojson(allowedKeys));
+ });
+
+ assert.eq('number', typeof config.threadCount);
+
+ assert.eq('number', typeof config.iterations);
+
+ config.startState = config.startState || 'init';
+ assert.eq('string', typeof config.startState);
+
+ assert.eq('object', typeof config.states);
+ assert.gt(Object.keys(config.states).length, 0);
+ Object.keys(config.states).forEach(function(k) {
+ assert.eq('function', typeof config.states[k],
+ "config.states." + k + " is not a function");
+ assert.eq(2, config.states[k].length,
+ "state functions should accept 2 parameters: db and collName");
+ });
+
+ // assert all states mentioned in config.transitions are present in config.states
+ assert.eq('object', typeof config.transitions);
+ assert.gt(Object.keys(config.transitions).length, 0);
+ Object.keys(config.transitions).forEach(function(fromState) {
+ assert(config.states.hasOwnProperty(fromState),
+ "config.transitions contains a state not in config.states: " + fromState);
+
+ assert.gt(Object.keys(config.transitions[fromState]).length, 0);
+ Object.keys(config.transitions[fromState]).forEach(function(toState) {
+ assert(config.states.hasOwnProperty(toState),
+ "config.transitions." + fromState +
+ " contains a state not in config.states: " + toState);
+ assert.eq('number', typeof config.transitions[fromState][toState],
+ "transitions." + fromState + "." + toState + " should be a number");
+ });
+ });
+
+ config.setup = config.setup || function(){};
+ assert.eq('function', typeof config.setup);
+
+ config.teardown = config.teardown || function(){};
+ assert.eq('function', typeof config.teardown);
+
+ config.data = config.data || {};
+ assert.eq('object', typeof config.data);
+
+ return config;
+}
+
+function setupCluster(clusterOptions, dbName) {
+ var cluster = {};
+
+ var allowedKeys = [
+ 'masterSlave',
+ 'replication',
+ 'sameCollection',
+ 'sameDB',
+ 'seed',
+ 'sharded'
+ ];
+ Object.keys(clusterOptions).forEach(function(opt) {
+ assert(0 <= allowedKeys.indexOf(opt),
+ "invalid option: " + tojson(opt) + ". valid options are: " + tojson(allowedKeys));
+ });
+
+ var verbosityLevel = 1;
+ if (clusterOptions.sharded) {
+ // TODO: allow 'clusterOptions' to specify the number of shards
+ var shardConfig = {
+ shards: 2,
+ mongos: 1,
+ verbose: verbosityLevel
+ };
+
+ // TODO: allow 'clusterOptions' to specify an 'rs' config
+ if (clusterOptions.replication) {
+ shardConfig.rs = {
+ nodes: 3,
+ verbose: verbosityLevel
+ };
+ }
+
+ var st = new ShardingTest(shardConfig);
+ st.stopBalancer();
+ var mongos = st.s;
+
+ clusterOptions.addr = mongos.host;
+ cluster.db = mongos.getDB(dbName);
+ cluster.shardCollection = function() {
+ st.shardColl.apply(st, arguments);
+ };
+ cluster.teardown = function() {
+ st.stop();
+ };
+ } else if (clusterOptions.replication) {
+ // TODO: allow 'clusterOptions' to specify the number of nodes
+ var replSetConfig = {
+ nodes: 3,
+ nodeOptions: { verbose: verbosityLevel }
+ };
+
+ var rst = new ReplSetTest(replSetConfig);
+ rst.startSet();
+
+ // Send the replSetInitiate command and wait for initiation
+ rst.initiate();
+ rst.awaitSecondaryNodes();
+
+ var primary = rst.getPrimary();
+
+ clusterOptions.addr = primary.host;
+ cluster.db = primary.getDB(dbName);
+ cluster.teardown = function() {
+ rst.stopSet();
+ };
+ } else if (clusterOptions.masterSlave) {
+ var rt = new ReplTest('replTest');
+
+ var master = rt.start(true);
+ var slave = rt.start(false);
+
+ master.adminCommand({ setParameter: 1, logLevel: verbosityLevel });
+ slave.adminCommand({ setParameter: 1, logLevel: verbosityLevel });
+
+ clusterOptions.addr = master.host;
+ cluster.db = master.getDB(dbName);
+ cluster.teardown = function() {
+ rt.stop();
+ };
+ } else { // standalone server
+ cluster.db = db.getSiblingDB(dbName);
+ cluster.db.adminCommand({ setParameter: 1, logLevel: verbosityLevel });
+ cluster.teardown = function() {};
+ }
+
+ return cluster;
+}
+
+function _runWorkload(workload, config, clusterOptions) {
+ var context = {};
+ context[workload] = { config: config };
+ _runAllWorkloads([workload], context, clusterOptions);
+}
+
+// TODO: give this function a more descriptive name?
+// Calls the 'config.setup' function for each workload, and returns
+// an array of 'config.teardown' functions to execute with the appropriate
+// arguments. Note that the implementation relies on having 'db' and 'collName'
+// set as properties on context[workload].
+function setUpWorkloads(workloads, context) {
+ return workloads.map(function(workload) {
+ var myDB = context[workload].db;
+ var collName = context[workload].collName;
+
+ var config = context[workload].config;
+ config = parseConfig(config);
+ config.setup.call(config.data, myDB, collName);
+
+ return {
+ fn: config.teardown,
+ data: config.data,
+ db: myDB,
+ collName: collName
+ };
+ });
+}
+
+function prepareCollections(workloads, context, cluster, clusterOptions) {
+ var dbName, collName, myDB;
+ var firstWorkload = true;
+
+ // Clean up the state left behind by other tests in the parallel suite
+ // to avoid having too many open files
+ db.dropDatabase();
+
+ workloads.forEach(function(workload) {
+ if (firstWorkload || !clusterOptions.sameCollection) {
+ if (firstWorkload || !clusterOptions.sameDB) {
+ dbName = uniqueDBName();
+ }
+ collName = uniqueCollName();
+
+ myDB = cluster.db.getSiblingDB(dbName);
+ myDB[collName].drop();
+
+ if (clusterOptions.sharded) {
+ // TODO: allow 'clusterOptions' to specify the shard key and split
+ cluster.shardCollection(myDB[collName], { _id: 'hashed' }, false);
+ }
+ }
+
+ context[workload].db = myDB;
+ context[workload].dbName = dbName;
+ context[workload].collName = collName;
+
+ firstWorkload = false;
+ });
+}
+
+/* This is the function that most other run*Workload* functions delegate to.
+ * It takes an array of workload filenames and runs them all in parallel.
+ *
+ * TODO: document the other two parameters
+ */
+function _runAllWorkloads(workloads, context, clusterOptions) {
+ clusterOptions = Object.extend({}, clusterOptions, true); // defensive deep copy
+ var cluster = setupCluster(clusterOptions, 'fakedb');
+
+ // Determine how strong to make assertions while simultaneously executing different workloads
+ var assertLevel = AssertLevel.OWN_DB;
+ if (clusterOptions.sameDB) {
+ // The database is shared by multiple workloads, so only make the asserts
+ // that apply when the collection is owned by an individual workload
+ assertLevel = AssertLevel.OWN_COLL;
+ }
+ if (clusterOptions.sameCollection) {
+ // The collection is shared by multiple workloads, so only make the asserts
+ // that always apply
+ assertLevel = AssertLevel.ALWAYS;
+ }
+ globalAssertLevel = assertLevel;
+
+ var cleanup = [];
+ var errors = [];
+
+ try {
+ prepareCollections(workloads, context, cluster, clusterOptions);
+ cleanup = setUpWorkloads(workloads, context);
+
+ var threads = makeAllThreads(workloads, context, clusterOptions, false);
+
+ joinThreads(threads).forEach(function(err) {
+ errors.push(err);
+ });
+ } finally {
+ // TODO: does order of calling 'config.teardown' matter?
+ cleanup.forEach(function(teardown) {
+ try {
+ teardown.fn.call(teardown.data, teardown.db, teardown.collName);
+ } catch (err) {
+ print('Teardown function threw an exception:\n' + err.stack);
+ }
+ });
+
+ cluster.teardown();
+ }
+
+ throwError(errors);
+}
+
+function makeAllThreads(workloads, context, clusterOptions, compose) {
+ var threadFn, getWorkloads;
+ if (compose) {
+ // Worker threads need to load() all workloads when composed
+ threadFn = workerThread.composed;
+ getWorkloads = function() { return workloads; };
+ } else {
+ // Worker threads only need to load() the specified workload
+ threadFn = workerThread.fsm;
+ getWorkloads = function(workload) { return [workload]; };
+ }
+
+ function sumRequestedThreads() {
+ return Array.sum(workloads.map(function(wl) {
+ return context[wl].config.threadCount;
+ }));
+ }
+
+ // TODO: pick a better cap for maximum allowed threads?
+ var maxAllowedThreads = 100;
+ var requestedNumThreads = sumRequestedThreads();
+ if (requestedNumThreads > maxAllowedThreads) {
+ print('\n\ntoo many threads requested: ' + requestedNumThreads);
+ // Scale down the requested '$config.threadCount' values to make
+ // them sum to less than 'maxAllowedThreads'
+ var factor = maxAllowedThreads / requestedNumThreads;
+ workloads.forEach(function(workload) {
+ var threadCount = context[workload].config.threadCount;
+ threadCount = Math.floor(factor * threadCount);
+ threadCount = Math.max(1, threadCount); // ensure workload is executed
+ context[workload].config.threadCount = threadCount;
+ });
+ }
+ var numThreads = sumRequestedThreads();
+ print('using num threads: ' + numThreads);
+ assert.lte(numThreads, maxAllowedThreads);
+
+ var latch = new CountDownLatch(numThreads);
+
+ var threads = [];
+
+ jsTest.log(workloads.join('\n'));
+ Random.setRandomSeed(clusterOptions.seed);
+
+ var tid = 0;
+ workloads.forEach(function(workload) {
+ var workloadsToLoad = getWorkloads(workload);
+ var config = context[workload].config;
+
+ for (var i = 0; i < config.threadCount; ++i) {
+ var args = {
+ tid: tid++,
+ latch: latch,
+ dbName: context[workload].dbName,
+ collName: context[workload].collName,
+ clusterOptions: clusterOptions,
+ seed: Random.randInt(1e13), // contains range of Date.getTime()
+ globalAssertLevel: globalAssertLevel
+ };
+
+ // Wrap threadFn with try/finally to make sure it always closes the db connection
+ // that is implicitly created within the thread's scope.
+ var guardedThreadFn = function(threadFn, args) {
+ try {
+ return threadFn.apply(this, args);
+ } finally {
+ db = null;
+ gc();
+ }
+ };
+
+ var t = new ScopedThread(guardedThreadFn, threadFn, [workloadsToLoad, args]);
+ threads.push(t);
+ t.start();
+
+ // Wait a little before starting the next thread
+ // to avoid creating new connections at the same time
+ sleep(10);
+ }
+ });
+
+ var failedThreadIndexes = [];
+ while (latch.getCount() > 0) {
+ threads.forEach(function(t, i) {
+ if (t.hasFailed() && !Array.contains(failedThreadIndexes, i)) {
+ failedThreadIndexes.push(i);
+ latch.countDown();
+ }
+ });
+
+ sleep(100);
+ }
+
+ var failedThreads = failedThreadIndexes.length;
+ if (failedThreads > 0) {
+ print(failedThreads + ' thread(s) threw a JS or C++ exception while spawning');
+ }
+
+ var allowedFailure = 0.2;
+ if (failedThreads / numThreads > allowedFailure) {
+ throw new Error('Too many worker threads failed to spawn - aborting');
+ }
+
+ return threads;
+}
+
+function joinThreads(workerThreads) {
+ var workerErrs = [];
+
+ workerThreads.forEach(function(t) {
+ t.join();
+
+ var data = t.returnData();
+ if (data && !data.ok) {
+ workerErrs.push(data);
+ }
+ });
+
+ return workerErrs;
+}
+
+function throwError(workerErrs) {
+
+ // Returns an array containing all unique values from the specified array
+ // and their corresponding number of occurrences in the original array.
+ function freqCount(arr) {
+ var unique = [];
+ var freqs = [];
+
+ arr.forEach(function(item) {
+ var i = unique.indexOf(item);
+ if (i < 0) {
+ unique.push(item);
+ freqs.push(1);
+ } else {
+ freqs[i]++;
+ }
+ });
+
+ return unique.map(function(value, i) {
+ return { value: value, freq: freqs[i] };
+ });
+ }
+
+ // Indents a multiline string with the specified number of spaces.
+ function indent(str, size) {
+ var prefix = new Array(size + 1).join(' ');
+ return prefix + str.split('\n').join('\n' + prefix);
+ }
+
+ function pluralize(str, num) {
+ var suffix = num > 1 ? 's' : '';
+ return num + ' ' + str + suffix;
+ }
+
+ function prepareMsg(stackTraces) {
+ var uniqueTraces = freqCount(stackTraces);
+ var numUniqueTraces = uniqueTraces.length;
+
+ // Special case message when threads all have the same trace
+ if (numUniqueTraces === 1) {
+ return pluralize('thread', stackTraces.length) + ' threw\n\n' +
+ indent(uniqueTraces[0].value, 8);
+ }
+
+ var summary = pluralize('thread', stackTraces.length) + ' threw ' +
+ numUniqueTraces + ' different exceptions:\n\n';
+
+ return summary + uniqueTraces.map(function(obj) {
+ var line = pluralize('thread', obj.freq) + ' threw\n';
+ return indent(line + obj.value, 8);
+ }).join('\n\n');
+ }
+
+ if (workerErrs.length > 0) {
+ var stackTraces = workerErrs.map(function(e) {
+ return e.stack || e.err;
+ });
+
+ var err = new Error(prepareMsg(stackTraces) + '\n');
+
+ // Avoid having any stack traces omitted from the logs
+ var maxLogLine = 10 * 1024; // 10KB
+
+ // Check if the combined length of the error message and the stack traces
+ // exceeds the maximum line-length the shell will log
+ if (err.stack.length >= maxLogLine) {
+ print(err.stack);
+ throw new Error('stack traces would have been snipped, see logs');
+ }
+
+ throw err;
+ }
+}
+
+workerThread.fsm = function(workloads, args) {
+ load('jstests/parallel/fsm_libs/worker_thread.js'); // for workerThread.main
+ load('jstests/parallel/fsm_libs/fsm.js'); // for fsm.run
+
+ return workerThread.main(workloads, args, function(configs) {
+ var workloads = Object.keys(configs);
+ assert.eq(1, workloads.length);
+ fsm.run(configs[workloads[0]]);
+ });
+};
+
+workerThread.composed = function(workloads, args) {
+ load('jstests/parallel/fsm_libs/worker_thread.js'); // for workerThread.main
+ load('jstests/parallel/fsm_libs/composer.js'); // for composer.run
+
+ return workerThread.main(workloads, args, function(configs) {
+ // TODO: make mixing probability configurable
+ composer.run(workloads, configs, 0.1);
+ });
+};
diff --git a/jstests/parallel/fsm_libs/utils.js b/jstests/parallel/fsm_libs/utils.js
new file mode 100644
index 00000000000..a9fa8fe0603
--- /dev/null
+++ b/jstests/parallel/fsm_libs/utils.js
@@ -0,0 +1,19 @@
+// Returns a unique database name:
+// db0, db1, ...
+var uniqueDBName = (function() {
+ var i = 0;
+
+ return function() {
+ return 'db' + i++;
+ };
+})();
+
+// Returns a unique collection name:
+// coll0, coll1, ...
+var uniqueCollName = (function() {
+ var i = 0;
+
+ return function() {
+ return 'coll' + i++;
+ };
+})();
diff --git a/jstests/parallel/fsm_libs/worker_thread.js b/jstests/parallel/fsm_libs/worker_thread.js
new file mode 100644
index 00000000000..01dfc29f694
--- /dev/null
+++ b/jstests/parallel/fsm_libs/worker_thread.js
@@ -0,0 +1,74 @@
+var workerThread = (function() {
+
+ // workloads = list of workload filenames
+ // args.tid = the thread identifier
+ // args.latch = the CountDownLatch instance for starting all threads
+ // args.dbName = the database name
+ // args.collName = the collection name
+ // args.clusterOptions = the configuration of the cluster
+ // args.seed = seed for the random number generator
+ // args.globalAssertLevel = the global assertion level to use
+ // run = callback that takes a map of workloads to their associated $config
+ function main(workloads, args, run) {
+ var myDB;
+ var configs = {};
+
+ try {
+ load('jstests/parallel/fsm_libs/assert.js');
+ globalAssertLevel = args.globalAssertLevel;
+
+ if (args.clusterOptions.addr) {
+ // We won't use the implicit db connection created within the thread's scope, so
+ // forcibly clean it up before creating a new connection.
+ db = null;
+ gc();
+
+ myDB = new Mongo(args.clusterOptions.addr).getDB(args.dbName);
+ } else {
+ myDB = db.getSiblingDB(args.dbName);
+ }
+
+ load('jstests/parallel/fsm_libs/runner.js'); // for parseConfig
+ workloads.forEach(function(workload) {
+ load(workload);
+ var config = parseConfig($config); // to normalize
+ config.data.tid = args.tid;
+ configs[workload] = {
+ data: config.data,
+ db: myDB,
+ collName: args.collName,
+ startState: config.startState,
+ states: config.states,
+ transitions: config.transitions,
+ iterations: config.iterations
+ };
+ });
+
+ args.latch.countDown();
+
+ // Converts any exceptions to a return status. In order for the
+ // parent thread to call countDown() on our behalf, we must throw
+ // an exception. Nothing prior to (and including) args.latch.countDown()
+ // should be wrapped in a try/catch statement.
+ try {
+ args.latch.await(); // wait for all threads to start
+
+ Random.setRandomSeed(args.seed);
+ run(configs);
+ return { ok: 1 };
+ } catch(e) {
+ return { ok: 0, err: e.toString(), stack: e.stack };
+ }
+ } finally {
+ // Avoid retention of connection object
+ configs = null;
+ myDB = null;
+ gc();
+ }
+ }
+
+ return {
+ main: main
+ };
+
+})();
diff --git a/jstests/parallel/fsm_selftests.js b/jstests/parallel/fsm_selftests.js
new file mode 100644
index 00000000000..910c4916be6
--- /dev/null
+++ b/jstests/parallel/fsm_selftests.js
@@ -0,0 +1,39 @@
+/*
+ * This file tests the FSM test framework.
+ */
+
+load('jstests/parallel/fsm_libs/fsm.js');
+
+(function() {
+ var getWeightedRandomChoice = fsm._getWeightedRandomChoice;
+
+ var doc = {
+ a: 0.25,
+ b: 0.5,
+ c: 0.25
+ };
+
+ // NOTE: getWeightedRandomChoice calls assert internally, so it will print stack traces
+ // when assert.throws executes
+ assert.throws(function() { getWeightedRandomChoice(doc, -1); }, [],
+ 'should reject negative values');
+ assert.throws(function() { getWeightedRandomChoice(doc, 1); }, [],
+ 'should reject values == 1');
+ assert.throws(function() { getWeightedRandomChoice(doc, 2); }, [],
+ 'should reject values > 1');
+
+ assert.throws(function() { getWeightedRandomChoice({}, 0.0); }, [],
+ 'cannot choose from zero states');
+ assert.throws(function() { getWeightedRandomChoice({}, 0.5); }, [],
+ 'cannot choose from zero states');
+ assert.throws(function() { getWeightedRandomChoice({}, 0.99); }, [],
+ 'cannot choose from zero states');
+
+ assert.eq('a', getWeightedRandomChoice(doc, 0.00), '0');
+ assert.eq('a', getWeightedRandomChoice(doc, 0.24), '1');
+ assert.eq('b', getWeightedRandomChoice(doc, 0.25), '2');
+ assert.eq('b', getWeightedRandomChoice(doc, 0.50), '3');
+ assert.eq('b', getWeightedRandomChoice(doc, 0.74), '4');
+ assert.eq('c', getWeightedRandomChoice(doc, 0.75), '5');
+ assert.eq('c', getWeightedRandomChoice(doc, 0.99), '6');
+})();
diff --git a/jstests/parallel/fsm_workload_helpers/indexed_noindex.js b/jstests/parallel/fsm_workload_helpers/indexed_noindex.js
new file mode 100644
index 00000000000..7986bfd79cd
--- /dev/null
+++ b/jstests/parallel/fsm_workload_helpers/indexed_noindex.js
@@ -0,0 +1,17 @@
+/**
+ * indexed_noindex.js
+ *
+ * Defines a modifier for indexed workloads that drops the index, specified by
+ * $config.data.getIndexSpec(), at the end of the workload setup.
+ */
+function indexedNoindex($config, $super) {
+
+ $config.setup = function(db, collName) {
+ $super.setup.apply(this, arguments);
+
+ var res = db[collName].dropIndex(this.getIndexSpec());
+ assertAlways.commandWorked(res);
+ };
+
+ return $config;
+}
diff --git a/jstests/parallel/fsm_workloads/findAndModify_inc.js b/jstests/parallel/fsm_workloads/findAndModify_inc.js
new file mode 100644
index 00000000000..125b7d4f474
--- /dev/null
+++ b/jstests/parallel/fsm_workloads/findAndModify_inc.js
@@ -0,0 +1,59 @@
+/**
+ * findAndModify_inc.js
+ *
+ * Inserts a single document into a collection. Each thread performs a
+ * findAndModify command to select the document and increment a particular
+ * field. Asserts that the field has the correct value based on the number
+ * of increments performed.
+ *
+ * This workload was designed to reproduce SERVER-15892.
+ */
+var $config = (function() {
+
+ var states = {
+
+ init: function init(db, collName) {
+ this.fieldName = 't' + this.tid;
+ this.count = 0;
+ },
+
+ update: function update(db, collName) {
+ var updateDoc = { $inc: {} };
+ updateDoc.$inc[this.fieldName] = 1;
+ db[collName].findAndModify({
+ query: { _id: 'findAndModify_inc' },
+ update: updateDoc
+ });
+ ++this.count;
+ },
+
+ find: function find(db, collName) {
+ var docs = db[collName].find().toArray();
+ assertWhenOwnColl.eq(1, docs.length);
+ assertWhenOwnColl((function() {
+ var doc = docs[0];
+ assertWhenOwnColl.eq(this.count, doc[this.fieldName]);
+ }).bind(this));
+ }
+
+ };
+
+ var transitions = {
+ init: { update: 1 },
+ update: { find: 1 },
+ find: { update: 1 }
+ };
+
+ function setup(db, collName) {
+ db[collName].insert({ _id: 'findAndModify_inc' });
+ }
+
+ return {
+ threadCount: 30,
+ iterations: 100,
+ states: states,
+ transitions: transitions,
+ setup: setup
+ };
+
+})();
diff --git a/jstests/parallel/fsm_workloads/indexed_insert_1char.js b/jstests/parallel/fsm_workloads/indexed_insert_1char.js
new file mode 100644
index 00000000000..3e358bbfca3
--- /dev/null
+++ b/jstests/parallel/fsm_workloads/indexed_insert_1char.js
@@ -0,0 +1,20 @@
+/**
+ * indexed_insert_1char.js
+ *
+ * Inserts multiple documents into an indexed collection. Asserts that all
+ * documents appear in both a collection scan and an index scan. The indexed
+ * value is a 1-character string based on the thread's id.
+ */
+load('jstests/parallel/fsm_libs/runner.js'); // for parseConfig
+load('jstests/parallel/fsm_workloads/indexed_insert_base.js'); // for $config
+
+var $config = extendWorkload($config, function($config, $super) {
+
+ $config.states.init = function(db, collName) {
+ $super.states.init.apply(this, arguments);
+
+ this.indexedValue = String.fromCharCode(33 + this.tid);
+ };
+
+ return $config;
+});
diff --git a/jstests/parallel/fsm_workloads/indexed_insert_1char_noindex.js b/jstests/parallel/fsm_workloads/indexed_insert_1char_noindex.js
new file mode 100644
index 00000000000..70c70c7e8d8
--- /dev/null
+++ b/jstests/parallel/fsm_workloads/indexed_insert_1char_noindex.js
@@ -0,0 +1,10 @@
+/**
+ * indexed_insert_1char_noindex.js
+ *
+ * Executes the indexed_insert_1char.js workload after dropping its index.
+ */
+load('jstests/parallel/fsm_libs/runner.js'); // for extendWorkload
+load('jstests/parallel/fsm_workloads/indexed_insert_1char.js'); // for $config
+load('jstests/parallel/fsm_workload_helpers/indexed_noindex.js'); // for indexedNoindex
+
+var $config = extendWorkload($config, indexedNoindex);
diff --git a/jstests/parallel/fsm_workloads/indexed_insert_base.js b/jstests/parallel/fsm_workloads/indexed_insert_base.js
new file mode 100644
index 00000000000..09566b8b04b
--- /dev/null
+++ b/jstests/parallel/fsm_workloads/indexed_insert_base.js
@@ -0,0 +1,65 @@
+/**
+ * indexed_insert_base.js
+ *
+ * Inserts multiple documents into an indexed collection. Asserts that all
+ * documents appear in both a collection scan and an index scan. The indexed
+ * value is the thread's id.
+ */
+var $config = (function() {
+
+ var states = {
+ init: function init(db, collName) {
+ this.nInserted = 0;
+ this.indexedValue = this.tid;
+ },
+
+ insert: function insert(db, collName) {
+ var res = db[collName].insert(this.getDoc());
+ assertAlways.eq(1, res.nInserted, tojson(res));
+ this.nInserted += this.docsPerInsert;
+ },
+
+ find: function find(db, collName) {
+ // collection scan
+ var count = db[collName].find(this.getDoc()).sort({ $natural: 1 }).itcount();
+ assertWhenOwnColl.eq(count, this.nInserted);
+
+ // index scan
+ count = db[collName].find(this.getDoc()).sort(this.getIndexSpec()).itcount();
+ assertWhenOwnColl.eq(count, this.nInserted);
+ }
+ };
+
+ var transitions = {
+ init: { insert: 1 },
+ insert: { find: 1 },
+ find: { insert: 1 }
+ };
+
+ function setup(db, collName) {
+ db[collName].ensureIndex(this.getIndexSpec());
+ }
+
+ return {
+ threadCount: 30,
+ iterations: 100,
+ states: states,
+ transitions: transitions,
+ data: {
+ getIndexSpec: function() {
+ var ixSpec = {};
+ ixSpec[this.indexedField] = 1;
+ return ixSpec;
+ },
+ getDoc: function() {
+ var doc = {};
+ doc[this.indexedField] = this.indexedValue;
+ return doc;
+ },
+ indexedField: 'x',
+ docsPerInsert: 1
+ },
+ setup: setup
+ };
+
+})();
diff --git a/jstests/parallel/fsm_workloads/indexed_insert_base_noindex.js b/jstests/parallel/fsm_workloads/indexed_insert_base_noindex.js
new file mode 100644
index 00000000000..ebee79c20ca
--- /dev/null
+++ b/jstests/parallel/fsm_workloads/indexed_insert_base_noindex.js
@@ -0,0 +1,10 @@
+/**
+ * indexed_insert_base_noindex.js
+ *
+ * Executes the indexed_insert_base.js workload after dropping its index.
+ */
+load('jstests/parallel/fsm_libs/runner.js'); // for extendWorkload
+load('jstests/parallel/fsm_workloads/indexed_insert_base.js'); // for $config
+load('jstests/parallel/fsm_workload_helpers/indexed_noindex.js'); // for indexedNoindex
+
+var $config = extendWorkload($config, indexedNoindex);
diff --git a/jstests/parallel/fsm_workloads/indexed_insert_heterogeneous.js b/jstests/parallel/fsm_workloads/indexed_insert_heterogeneous.js
new file mode 100644
index 00000000000..ee9470d68d4
--- /dev/null
+++ b/jstests/parallel/fsm_workloads/indexed_insert_heterogeneous.js
@@ -0,0 +1,49 @@
+/**
+ * indexed_insert_heterogeneous.js
+ *
+ * Inserts multiple documents into an indexed collection. Asserts that all
+ * documents appear in both a collection scan and an index scan. The indexed
+ * value is a different BSON type, depending on the thread's id.
+ */
+load('jstests/parallel/fsm_libs/runner.js'); // for extendWorkload
+load('jstests/parallel/fsm_workloads/indexed_insert_base.js'); // for $config
+
+var $config = extendWorkload($config, function($config, $super) {
+
+ $config.states.init = function(db, collName) {
+ $super.states.init.apply(this, arguments);
+
+ // prefix str with zeroes to make it have length len
+ function pad(len, str) {
+ var padding = new Array(len + 1).join('0');
+ return (padding + str).slice(-len);
+ }
+
+ function makeOID(tid) {
+ var str = pad(24, tid.toString(16));
+ return ObjectId(str);
+ }
+
+ function makeDate(tid) {
+ var d = ISODate("2000-01-01T00:00:00.000Z");
+ // setSeconds(n) where n >= 60 will just cause the minutes, hours, etc to increase,
+ // so this produces a unique date for each tid
+ d.setSeconds(tid);
+ return d;
+ }
+
+ var choices = [
+ this.tid, // int
+ "" + this.tid, // string
+ this.tid * 0.0001, // float
+ { tid: this.tid }, // subdocument
+ makeOID(this.tid), // objectid
+ makeDate(this.tid), // date
+ new Function('', 'return ' + this.tid + ';') // function
+ ];
+
+ this.indexedValue = choices[this.tid % choices.length];
+ };
+
+ return $config;
+});
diff --git a/jstests/parallel/fsm_workloads/indexed_insert_heterogeneous_noindex.js b/jstests/parallel/fsm_workloads/indexed_insert_heterogeneous_noindex.js
new file mode 100644
index 00000000000..02a31d56356
--- /dev/null
+++ b/jstests/parallel/fsm_workloads/indexed_insert_heterogeneous_noindex.js
@@ -0,0 +1,10 @@
+/**
+ * indexed_insert_heterogeneous_noindex.js
+ *
+ * Executes the indexed_insert_heterogeneous.js workload after dropping its index.
+ */
+load('jstests/parallel/fsm_libs/runner.js'); // for extendWorkload
+load('jstests/parallel/fsm_workloads/indexed_insert_heterogeneous.js'); // for $config
+load('jstests/parallel/fsm_workload_helpers/indexed_noindex.js'); // for indexedNoindex
+
+var $config = extendWorkload($config, indexedNoindex);
diff --git a/jstests/parallel/fsm_workloads/indexed_insert_large.js b/jstests/parallel/fsm_workloads/indexed_insert_large.js
new file mode 100644
index 00000000000..fb4f34d2e0a
--- /dev/null
+++ b/jstests/parallel/fsm_workloads/indexed_insert_large.js
@@ -0,0 +1,36 @@
+/**
+ * indexed_insert_large.js
+ *
+ * Inserts multiple documents into an indexed collection. Asserts that all
+ * documents appear in both a collection scan and an index scan. The indexed
+ * value is a string large enough to make the whole index key be 1K, which is
+ * the maximum.
+ */
+load('jstests/parallel/fsm_libs/runner.js'); // for parseConfig
+load('jstests/parallel/fsm_workloads/indexed_insert_base.js'); // for $config
+
+var $config = extendWorkload($config, function($config, $super) {
+
+ $config.states.init = function(db, collName) {
+ $super.states.init.apply(this, arguments);
+
+ // "The total size of an index entry, which can include structural overhead depending on the
+ // BSON type, must be less than 1024 bytes."
+ // http://docs.mongodb.org/manual/reference/limits/
+ var maxIndexedSize = 1023;
+
+ var bsonOverhead = Object.bsonsize({ '': '' });
+
+ var bigstr = new Array(maxIndexedSize + 1).join('x');
+
+ // prefix the big string with tid to make it unique,
+ // then trim it down so that it plus bson overhead is maxIndexedSize
+
+ this.indexedValue = (this.tid + bigstr).slice(0, maxIndexedSize - bsonOverhead);
+
+ assertAlways.eq(maxIndexedSize, Object.bsonsize({ '': this.indexedValue }),
+ "buggy test: the inserted docs won't have the expected index-key size");
+ };
+
+ return $config;
+});
diff --git a/jstests/parallel/fsm_workloads/indexed_insert_large_noindex.js b/jstests/parallel/fsm_workloads/indexed_insert_large_noindex.js
new file mode 100644
index 00000000000..88350e6db08
--- /dev/null
+++ b/jstests/parallel/fsm_workloads/indexed_insert_large_noindex.js
@@ -0,0 +1,10 @@
+/**
+ * indexed_insert_large_noindex.js
+ *
+ * Executes the indexed_insert_large.js workload after dropping its index.
+ */
+load('jstests/parallel/fsm_libs/runner.js'); // for extendWorkload
+load('jstests/parallel/fsm_workloads/indexed_insert_large.js'); // for $config
+load('jstests/parallel/fsm_workload_helpers/indexed_noindex.js'); // for indexedNoindex
+
+var $config = extendWorkload($config, indexedNoindex);
diff --git a/jstests/parallel/fsm_workloads/indexed_insert_long_fieldname.js b/jstests/parallel/fsm_workloads/indexed_insert_long_fieldname.js
new file mode 100644
index 00000000000..55fbdb5b42a
--- /dev/null
+++ b/jstests/parallel/fsm_workloads/indexed_insert_long_fieldname.js
@@ -0,0 +1,16 @@
+/**
+ * indexed_insert_long_fieldname.js
+ *
+ * Inserts multiple documents into an indexed collection. Asserts that all
+ * documents appear in both a collection scan and an index scan. The indexed
+ * field name is a long string.
+ */
+load('jstests/parallel/fsm_libs/runner.js'); // for extendWorkload
+load('jstests/parallel/fsm_workloads/indexed_insert_base.js'); // for $config
+
+var $config = extendWorkload($config, function($config, $super) {
+
+ $config.data.indexedField = 'Supercalifragilisticexpialidocious';
+
+ return $config;
+});
diff --git a/jstests/parallel/fsm_workloads/indexed_insert_long_fieldname_noindex.js b/jstests/parallel/fsm_workloads/indexed_insert_long_fieldname_noindex.js
new file mode 100644
index 00000000000..9c33b5ff876
--- /dev/null
+++ b/jstests/parallel/fsm_workloads/indexed_insert_long_fieldname_noindex.js
@@ -0,0 +1,10 @@
+/**
+ * indexed_insert_long_fieldname_noindex.js
+ *
+ * Executes the indexed_insert_long_fieldname.js workload after dropping its index.
+ */
+load('jstests/parallel/fsm_libs/runner.js'); // for extendWorkload
+load('jstests/parallel/fsm_workloads/indexed_insert_long_fieldname.js'); // for $config
+load('jstests/parallel/fsm_workload_helpers/indexed_noindex.js'); // for indexedNoindex
+
+var $config = extendWorkload($config, indexedNoindex);
diff --git a/jstests/parallel/fsm_workloads/indexed_insert_multikey.js b/jstests/parallel/fsm_workloads/indexed_insert_multikey.js
new file mode 100644
index 00000000000..825c08718f7
--- /dev/null
+++ b/jstests/parallel/fsm_workloads/indexed_insert_multikey.js
@@ -0,0 +1,22 @@
+/**
+ * indexed_insert_multikey.js
+ *
+ * Inserts multiple documents into an indexed collection. Asserts that all
+ * documents appear in both a collection scan and an index scan. The indexed
+ * value is an array of numbers.
+ */
+load('jstests/parallel/fsm_libs/runner.js'); // for parseConfig
+load('jstests/parallel/fsm_workloads/indexed_insert_base.js'); // for $config
+
+var $config = extendWorkload($config, function($config, $super) {
+
+ $config.states.init = function(db, collName) {
+ $super.states.init.apply(this, arguments);
+
+ this.indexedValue = [0,1,2,3,4,5,6,7,8,9].map(function(n) {
+ return this.tid * 10 + n;
+ }.bind(this));
+ };
+
+ return $config;
+});
diff --git a/jstests/parallel/fsm_workloads/indexed_insert_multikey_noindex.js b/jstests/parallel/fsm_workloads/indexed_insert_multikey_noindex.js
new file mode 100644
index 00000000000..f1b7d72405e
--- /dev/null
+++ b/jstests/parallel/fsm_workloads/indexed_insert_multikey_noindex.js
@@ -0,0 +1,10 @@
+/**
+ * indexed_insert_multikey_noindex.js
+ *
+ * Executes the indexed_insert_multikey.js workload after dropping its index.
+ */
+load('jstests/parallel/fsm_libs/runner.js'); // for extendWorkload
+load('jstests/parallel/fsm_workloads/indexed_insert_multikey.js'); // for $config
+load('jstests/parallel/fsm_workload_helpers/indexed_noindex.js'); // for indexedNoindex
+
+var $config = extendWorkload($config, indexedNoindex);
diff --git a/jstests/parallel/fsm_workloads/indexed_insert_ordered_bulk.js b/jstests/parallel/fsm_workloads/indexed_insert_ordered_bulk.js
new file mode 100644
index 00000000000..84d94c10e60
--- /dev/null
+++ b/jstests/parallel/fsm_workloads/indexed_insert_ordered_bulk.js
@@ -0,0 +1,30 @@
+/**
+ * indexed_insert_ordered_bulk.js
+ *
+ * Inserts multiple documents into an indexed collection. Asserts that all
+ * documents appear in both a collection scan and an index scan.
+ *
+ * Uses an ordered, bulk operation to perform the inserts.
+ */
+load('jstests/parallel/fsm_libs/runner.js'); // for extendWorkload
+load('jstests/parallel/fsm_workloads/indexed_insert_base.js'); // for $config
+
+var $config = extendWorkload($config, function($config, $super) {
+
+ $config.states.insert = function(db, collName) {
+ var doc = {};
+ doc[this.indexedField] = this.indexedValue;
+
+ var bulk = db[collName].initializeOrderedBulkOp();
+ for (var i = 0; i < this.docsPerInsert; ++i) {
+ bulk.insert(doc);
+ }
+ assertWhenOwnColl.writeOK(bulk.execute());
+
+ this.nInserted += this.docsPerInsert;
+ };
+
+ $config.data.docsPerInsert = 15;
+
+ return $config;
+});
diff --git a/jstests/parallel/fsm_workloads/indexed_insert_unordered_bulk.js b/jstests/parallel/fsm_workloads/indexed_insert_unordered_bulk.js
new file mode 100644
index 00000000000..468ddef6bf2
--- /dev/null
+++ b/jstests/parallel/fsm_workloads/indexed_insert_unordered_bulk.js
@@ -0,0 +1,30 @@
+/**
+ * indexed_insert_unordered_bulk.js
+ *
+ * Inserts multiple documents into an indexed collection. Asserts that all
+ * documents appear in both a collection scan and an index scan.
+ *
+ * Uses an unordered, bulk operation to perform the inserts.
+ */
+load('jstests/parallel/fsm_libs/runner.js'); // for extendWorkload
+load('jstests/parallel/fsm_workloads/indexed_insert_base.js'); // for $config
+
+var $config = extendWorkload($config, function($config, $super) {
+
+ $config.states.insert = function(db, collName) {
+ var doc = {};
+ doc[this.indexedField] = this.indexedValue;
+
+ var bulk = db[collName].initializeUnorderedBulkOp();
+ for (var i = 0; i < this.docsPerInsert; ++i) {
+ bulk.insert(doc);
+ }
+ assertWhenOwnColl.writeOK(bulk.execute());
+
+ this.nInserted += this.docsPerInsert;
+ };
+
+ $config.data.docsPerInsert = 15;
+
+ return $config;
+});
diff --git a/jstests/parallel/fsm_workloads/update_inc.js b/jstests/parallel/fsm_workloads/update_inc.js
new file mode 100644
index 00000000000..8590cc91130
--- /dev/null
+++ b/jstests/parallel/fsm_workloads/update_inc.js
@@ -0,0 +1,67 @@
+/**
+ * update_inc.js
+ *
+ * Inserts a single document into a collection. Each thread performs an
+ * update operation to select the document and increment a particular
+ * field. Asserts that the field has the correct value based on the number
+ * of increments performed.
+ */
+var $config = (function() {
+
+ var data = {
+ // uses the workload name as _id on the document.
+ // assumes this name will be unique.
+ id: 'update_inc'
+ };
+
+ var states = {
+ init: function init(db, collName) {
+ this.fieldName = 't' + this.tid;
+ this.count = 0;
+ },
+
+ update: function update(db, collName) {
+ var updateDoc = { $inc: {} };
+ updateDoc.$inc[this.fieldName] = 1;
+
+ var res = db[collName].update({ _id: this.id }, updateDoc);
+ assertAlways.eq(0, res.nUpserted, tojson(res));
+ assertWhenOwnColl.eq(1, res.nMatched, tojson(res));
+
+ if (db.getMongo().writeMode() === 'commands') {
+ assertWhenOwnColl.eq(1, res.nModified, tojson(res));
+ }
+
+ ++this.count;
+ },
+
+ find: function find(db, collName) {
+ var docs = db[collName].find().toArray();
+ assertWhenOwnColl.eq(1, docs.length);
+ assertWhenOwnColl((function() {
+ var doc = docs[0];
+ assertWhenOwnColl.eq(this.count, doc[this.fieldName]);
+ }).bind(this));
+ }
+ };
+
+ var transitions = {
+ init: { update: 1 },
+ update: { find: 1 },
+ find: { update: 1 }
+ };
+
+ function setup(db, collName) {
+ db[collName].insert({ _id: this.id });
+ }
+
+ return {
+ threadCount: 30,
+ iterations: 100,
+ data: data,
+ states: states,
+ transitions: transitions,
+ setup: setup
+ };
+
+})();
diff --git a/jstests/parallel/fsm_workloads/update_ordered_bulk_inc.js b/jstests/parallel/fsm_workloads/update_ordered_bulk_inc.js
new file mode 100644
index 00000000000..0dceb47f28b
--- /dev/null
+++ b/jstests/parallel/fsm_workloads/update_ordered_bulk_inc.js
@@ -0,0 +1,69 @@
+/**
+ * update_ordered_bulk_inc.js
+ *
+ * Inserts multiple documents into a collection. Each thread performs a
+ * bulk update operation to select the document and increment a particular
+ * field. Asserts that the field has the correct value based on the number
+ * of increments performed.
+ *
+ * Uses an ordered, bulk operation to perform the updates.
+ */
+var $config = (function() {
+
+ var states = {
+ init: function init(db, collName) {
+ this.fieldName = 't' + this.tid;
+ },
+
+ update: function update(db, collName) {
+ var updateDoc = { $inc: {} };
+ updateDoc.$inc[this.fieldName] = 1;
+
+ var bulk = db[collName].initializeOrderedBulkOp();
+ for (var i = 0; i < this.docCount; ++i) {
+ bulk.find({ _id: i }).update(updateDoc);
+ }
+ var result = bulk.execute();
+ // TODO: this actually does assume that there are no unique indexes.
+ // but except for weird cases like that, it is valid even when other
+ // threads are modifying the same collection
+ assertAlways.eq(0, result.getWriteErrorCount());
+
+ ++this.count;
+ },
+
+ find: function find(db, collName) {
+ var docs = db[collName].find().toArray();
+ assertWhenOwnColl.eq(this.docCount, docs.length);
+
+ docs.forEach(function (doc) {
+ assertWhenOwnColl.eq(this.count, doc[this.fieldName]);
+ });
+ }
+ };
+
+ var transitions = {
+ init: { update: 1 },
+ update: { find: 1 },
+ find: { update: 1 }
+ };
+
+ function setup(db, collName) {
+ this.count = 0;
+ for (var i = 0; i < this.docCount; ++i) {
+ db[collName].insert({ _id: i });
+ }
+ }
+
+ return {
+ threadCount: 30,
+ iterations: 100,
+ states: states,
+ transitions: transitions,
+ setup: setup,
+ data: {
+ docCount: 15
+ }
+ };
+
+})();