summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authoralabid <alabidan@gmail.com>2015-01-05 09:47:07 -0500
committerRamon Fernandez <ramon.fernandez@mongodb.com>2015-03-09 18:30:32 -0400
commit747e066e97ec42cab358d9b0b328257e997b74b9 (patch)
treea36d2109eadd3984af1ff50bc20bbf544ee739a1
parentd6b7c2351e47992bf350131c2b60e15644ce4b69 (diff)
downloadmongo-747e066e97ec42cab358d9b0b328257e997b74b9.tar.gz
SERVER-16648 Additional FSM-based concurrency workloads with some cleanup and blacklisting
Includes workloads for: explain compact reindex collMod count distinct touch $where Added to blacklists in FSM runners Fixed the way we check for storage engines Added two options for arbitrary function execution against cluster: - Specified via ClusterOptions as setupFunctions to be run on the cluster before workloads are run - As part of setup and teardown with the cluster provided as a third argument to these workload functions (cherry picked from commit f6a65290f22f126b8a2eb616f800582c5c43b6c8) Conflicts: jstests/concurrency/fsm_all_sharded.js jstests/concurrency/fsm_all_sharded_replication.js jstests/concurrency/fsm_all_simultaneous.js
-rw-r--r--jstests/concurrency/fsm_all_sharded.js5
-rw-r--r--jstests/concurrency/fsm_all_sharded_replication.js5
-rw-r--r--jstests/concurrency/fsm_example.js18
-rw-r--r--jstests/concurrency/fsm_example_inheritance.js2
-rw-r--r--jstests/concurrency/fsm_libs/cluster.js136
-rw-r--r--jstests/concurrency/fsm_libs/composer.js2
-rw-r--r--jstests/concurrency/fsm_libs/parse_config.js2
-rw-r--r--jstests/concurrency/fsm_libs/runner.js13
-rw-r--r--jstests/concurrency/fsm_libs/thread_mgr.js9
-rw-r--r--jstests/concurrency/fsm_libs/worker_thread.js19
-rw-r--r--jstests/concurrency/fsm_workload_helpers/indexed_noindex.js2
-rw-r--r--jstests/concurrency/fsm_workload_helpers/server_types.js67
-rw-r--r--jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js2
-rw-r--r--jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js2
-rw-r--r--jstests/concurrency/fsm_workload_modifiers/make_capped.js2
-rw-r--r--jstests/concurrency/fsm_workloads/agg_base.js4
-rw-r--r--jstests/concurrency/fsm_workloads/agg_group_external.js2
-rw-r--r--jstests/concurrency/fsm_workloads/agg_match.js2
-rw-r--r--jstests/concurrency/fsm_workloads/agg_sort.js2
-rw-r--r--jstests/concurrency/fsm_workloads/agg_sort_external.js2
-rw-r--r--jstests/concurrency/fsm_workloads/auth_create_role.js2
-rw-r--r--jstests/concurrency/fsm_workloads/auth_create_user.js2
-rw-r--r--jstests/concurrency/fsm_workloads/collmod.js75
-rw-r--r--jstests/concurrency/fsm_workloads/collmod_separate_collections.js41
-rw-r--r--jstests/concurrency/fsm_workloads/compact.js103
-rw-r--r--jstests/concurrency/fsm_workloads/compact_simultaneous_padding_bytes.js33
-rw-r--r--jstests/concurrency/fsm_workloads/convert_to_capped_collection.js93
-rw-r--r--jstests/concurrency/fsm_workloads/convert_to_capped_collection_index.js27
-rw-r--r--jstests/concurrency/fsm_workloads/count.js76
-rw-r--r--jstests/concurrency/fsm_workloads/count_indexed.js38
-rw-r--r--jstests/concurrency/fsm_workloads/count_limit_skip.js52
-rw-r--r--jstests/concurrency/fsm_workloads/create_capped_collection.js2
-rw-r--r--jstests/concurrency/fsm_workloads/create_collection.js2
-rw-r--r--jstests/concurrency/fsm_workloads/distinct.js63
-rw-r--r--jstests/concurrency/fsm_workloads/distinct_noindex.js59
-rw-r--r--jstests/concurrency/fsm_workloads/distinct_projection.js23
-rw-r--r--jstests/concurrency/fsm_workloads/explain.js83
-rw-r--r--jstests/concurrency/fsm_workloads/explain_aggregate.js45
-rw-r--r--jstests/concurrency/fsm_workloads/explain_count.js59
-rw-r--r--jstests/concurrency/fsm_workloads/explain_find.js66
-rw-r--r--jstests/concurrency/fsm_workloads/explain_group.js29
-rw-r--r--jstests/concurrency/fsm_workloads/explain_remove.js43
-rw-r--r--jstests/concurrency/fsm_workloads/explain_update.js67
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_inc.js2
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_update.js2
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_update_collscan.js2
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_update_grow.js3
-rw-r--r--jstests/concurrency/fsm_workloads/group.js4
-rw-r--r--jstests/concurrency/fsm_workloads/group_cond.js4
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_base.js4
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_text.js2
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_ttl.js4
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_where.js57
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_inline.js2
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_merge.js4
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_merge_nonatomic.js2
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_reduce.js2
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_reduce_nonatomic.js4
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_replace.js2
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js2
-rw-r--r--jstests/concurrency/fsm_workloads/reindex.js114
-rw-r--r--jstests/concurrency/fsm_workloads/reindex_background.js33
-rw-r--r--jstests/concurrency/fsm_workloads/remove_single_document.js2
-rw-r--r--jstests/concurrency/fsm_workloads/remove_where.js42
-rw-r--r--jstests/concurrency/fsm_workloads/rename_capped_collection_chain.js2
-rw-r--r--jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_chain.js2
-rw-r--r--jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js2
-rw-r--r--jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js2
-rw-r--r--jstests/concurrency/fsm_workloads/rename_collection_chain.js2
-rw-r--r--jstests/concurrency/fsm_workloads/rename_collection_dbname_chain.js2
-rw-r--r--jstests/concurrency/fsm_workloads/rename_collection_dbname_droptarget.js2
-rw-r--r--jstests/concurrency/fsm_workloads/rename_collection_droptarget.js2
-rw-r--r--jstests/concurrency/fsm_workloads/touch_base.js51
-rw-r--r--jstests/concurrency/fsm_workloads/touch_data.js19
-rw-r--r--jstests/concurrency/fsm_workloads/touch_index.js19
-rw-r--r--jstests/concurrency/fsm_workloads/touch_no_data_no_index.js25
-rw-r--r--jstests/concurrency/fsm_workloads/update_array.js5
-rw-r--r--jstests/concurrency/fsm_workloads/update_inc.js5
-rw-r--r--jstests/concurrency/fsm_workloads/update_multifield.js5
-rw-r--r--jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js5
-rw-r--r--jstests/concurrency/fsm_workloads/update_ordered_bulk_inc.js8
-rw-r--r--jstests/concurrency/fsm_workloads/update_rename.js2
-rw-r--r--jstests/concurrency/fsm_workloads/update_replace.js2
-rw-r--r--jstests/concurrency/fsm_workloads/update_simple.js5
-rw-r--r--jstests/concurrency/fsm_workloads/update_upsert_multi.js2
-rw-r--r--jstests/concurrency/fsm_workloads/update_where.js46
-rw-r--r--jstests/concurrency/fsm_workloads/upsert_where.js43
87 files changed, 1783 insertions, 149 deletions
diff --git a/jstests/concurrency/fsm_all_sharded.js b/jstests/concurrency/fsm_all_sharded.js
index 8b143a268d7..fd575720c05 100644
--- a/jstests/concurrency/fsm_all_sharded.js
+++ b/jstests/concurrency/fsm_all_sharded.js
@@ -31,6 +31,10 @@ var blacklist = [
'agg_group_external.js', // uses >100MB of data, and is flaky
'agg_sort_external.js', // uses >100MB of data, and is flaky
+ 'compact.js', // compact can only be run against a standalone mongod
+ 'compact_simultaneous_padding_bytes.js', // compact can only be run against a mongod
+ 'convert_to_capped_collection.js', // convertToCapped can't be run on mongos processes
+ 'convert_to_capped_collection_index.js', // convertToCapped can't be run on mongos processes
'findAndModify_remove.js', // our findAndModify queries lack shard keys
'findAndModify_update.js', // our findAndModify queries lack shard keys
'findAndModify_update_collscan.js', // our findAndModify queries lack shard keys
@@ -43,6 +47,7 @@ var blacklist = [
'indexed_insert_eval_nolock.js', // eval doesn't work with sharded collections
'remove_single_document.js', // our .remove(query, {justOne: true}) calls lack shard keys
'remove_single_document_eval.js', // eval doesn't work with sharded collections
+ 'remove_single_document_eval_nolock.js', // eval doesn't work with sharded collections
'update_simple_eval.js', // eval doesn't work with sharded collections
'update_simple_eval_nolock.js', // eval doesn't work with sharded collections
'update_upsert_multi.js', // our update queries lack shard keys
diff --git a/jstests/concurrency/fsm_all_sharded_replication.js b/jstests/concurrency/fsm_all_sharded_replication.js
index 87fa7e663c8..ed2e1b39c42 100644
--- a/jstests/concurrency/fsm_all_sharded_replication.js
+++ b/jstests/concurrency/fsm_all_sharded_replication.js
@@ -31,6 +31,10 @@ var blacklist = [
'agg_group_external.js', // uses >100MB of data, and is flaky
'agg_sort_external.js', // uses >100MB of data, and is flaky
+ 'compact.js', // compact can only be run against a standalone mongod
+ 'compact_simultaneous_padding_bytes.js', // compact can only be run against a mongod
+ 'convert_to_capped_collection.js', // convertToCapped can't be run on mongos processes
+ 'convert_to_capped_collection_index.js', // convertToCapped can't be run on mongos processes
'findAndModify_remove.js', // our findAndModify queries lack shard keys
'findAndModify_update.js', // our findAndModify queries lack shard keys
'findAndModify_update_collscan.js', // our findAndModify queries lack shard keys
@@ -43,6 +47,7 @@ var blacklist = [
'indexed_insert_eval_nolock.js', // eval doesn't work with sharded collections
'remove_single_document.js', // our .remove(query, {justOne: true}) calls lack shard keys
'remove_single_document_eval.js', // eval doesn't work with sharded collections
+ 'remove_single_document_eval_nolock.js', // eval doesn't work with sharded collections
'update_simple_eval.js', // eval doesn't work with sharded collections
'update_simple_eval_nolock.js', // eval doesn't work with sharded collections
'update_upsert_multi.js', // our update queries lack shard keys
diff --git a/jstests/concurrency/fsm_example.js b/jstests/concurrency/fsm_example.js
index 8747e763892..a112195e1a4 100644
--- a/jstests/concurrency/fsm_example.js
+++ b/jstests/concurrency/fsm_example.js
@@ -46,19 +46,29 @@ var $config = (function() {
// 'setup' is run once by the parent thread after the cluster has
// been initialized, but before the worker threads have been spawned.
- // The 'this' argument is bound as '$config.data'.
- function setup(db, collName) {
+ // The 'this' argument is bound as '$config.data'. 'cluster' is provided
+ // to allow execution against all mongos and mongod nodes.
+ function setup(db, collName, cluster) {
// Workloads should NOT drop the collection db[collName], as
// doing so is handled by runner.js before 'setup' is called.
for (var i = 0; i < 1000; ++i) {
db[collName].insert({ _id: i });
}
+
+ cluster.executeOnMongodNodes(function(db) {
+ printjson(db.serverCmdLineOpts());
+ });
+
+ cluster.executeOnMongosNodes(function(db) {
+ printjson(db.serverCmdLineOpts());
+ });
}
// 'teardown' is run once by the parent thread before the cluster
// is destroyed, but after the worker threads have been reaped.
- // The 'this' argument is bound as '$config.data'.
- function teardown(db, collName) {}
+ // The 'this' argument is bound as '$config.data'. 'cluster' is provided
+ // to allow execution against all mongos and mongod nodes.
+ function teardown(db, collName, cluster) {}
return {
threadCount: 5,
diff --git a/jstests/concurrency/fsm_example_inheritance.js b/jstests/concurrency/fsm_example_inheritance.js
index 0664b84d077..820758f0c6a 100644
--- a/jstests/concurrency/fsm_example_inheritance.js
+++ b/jstests/concurrency/fsm_example_inheritance.js
@@ -9,7 +9,7 @@ var $config = extendWorkload($config, function($config, $super) {
// and $config is the extended workload definition we're creating.
// You can replace any properties on $config, including methods you want to override.
- $config.setup = function(db, collName) {
+ $config.setup = function(db, collName, cluster) {
// Overridden methods should usually call the corresponding method on $super.
$super.setup.apply(this, arguments);
diff --git a/jstests/concurrency/fsm_libs/cluster.js b/jstests/concurrency/fsm_libs/cluster.js
index fc7f6baff00..18e4056e5f8 100644
--- a/jstests/concurrency/fsm_libs/cluster.js
+++ b/jstests/concurrency/fsm_libs/cluster.js
@@ -9,25 +9,64 @@ var Cluster = function(options) {
return new Cluster(options);
}
- var allowedKeys = [
- 'masterSlave',
- 'replication',
- 'sameCollection',
- 'sameDB',
- 'seed',
- 'sharded'
- ];
-
- Object.keys(options).forEach(function(option) {
- assert.contains(option, allowedKeys,
- 'invalid option: ' + tojson(option) +
- '; valid options are: ' + tojson(allowedKeys));
- });
+ function validateClusterOptions(options) {
+ var allowedKeys = [
+ 'masterSlave',
+ 'replication',
+ 'sameCollection',
+ 'sameDB',
+ 'setupFunctions',
+ 'sharded',
+ ];
+
+ Object.keys(options).forEach(function(option) {
+ assert.contains(option, allowedKeys,
+ 'invalid option: ' + tojson(option) +
+ '; valid options are: ' + tojson(allowedKeys));
+ });
+
+ options.masterSlave = options.masterSlave || false;
+ assert.eq('boolean', typeof options.masterSlave);
+
+ options.replication = options.replication || false;
+ assert.eq('boolean', typeof options.replication);
+
+ options.sameCollection = options.sameCollection || false;
+ assert.eq('boolean', typeof options.sameCollection);
+
+ options.sameDB = options.sameDB || false;
+ assert.eq('boolean', typeof options.sameDB);
+
+ options.sharded = options.sharded || false;
+ assert.eq('boolean', typeof options.sharded);
+
+ options.setupFunctions = options.setupFunctions || {};
+ assert.eq('object', typeof options.setupFunctions);
+
+ options.setupFunctions.mongod = options.setupFunctions.mongod || function(db) { };
+ assert.eq('function', typeof options.setupFunctions.mongod);
+
+ options.setupFunctions.mongos = options.setupFunctions.mongos || function(db) { };
+ assert.eq('function', typeof options.setupFunctions.mongos);
+
+ assert(!options.masterSlave || !options.replication, "Both 'masterSlave' and " +
+ "'replication' cannot be true");
+ assert(!options.masterSlave || !options.sharded, "Both 'masterSlave' and 'sharded' cannot" +
+ "be true");
+ }
var conn;
var initialized = false;
+ var _conns = {
+ mongos: [],
+ mongod: []
+ };
+
+ validateClusterOptions(options);
+ Object.freeze(options);
+
this.setup = function setup() {
var verbosityLevel = 0;
@@ -64,6 +103,32 @@ var Cluster = function(options) {
st.stop();
};
+ // Save all mongos and mongod connections
+ var i = 0;
+ var mongos = st.s0;
+ var mongod = st.d0;
+ while (mongos) {
+ _conns.mongos.push(mongos);
+ ++i;
+ mongos = st['s' + i];
+ }
+ if (options.replication) {
+ var rsTest = st.rs0;
+
+ i = 0;
+ while (rsTest) {
+ this._addReplicaSetConns(rsTest);
+ ++i;
+ rsTest = st['rs' + i];
+ }
+ }
+ i = 0;
+ while (mongod) {
+ _conns.mongod.push(mongod);
+ ++i;
+ mongod = st['d' + i];
+ }
+
} else if (options.replication) {
// TODO: allow 'options' to specify the number of nodes
var replSetConfig = {
@@ -84,6 +149,8 @@ var Cluster = function(options) {
rst.stopSet();
};
+ this._addReplicaSetConns(rst);
+
} else if (options.masterSlave) {
var rt = new ReplTest('replTest');
@@ -98,14 +165,55 @@ var Cluster = function(options) {
rt.stop();
};
+ _conns.mongod = [master, slave];
+
} else { // standalone server
conn = db.getMongo();
db.adminCommand({ setParameter: 1, logLevel: verbosityLevel });
+
+ _conns.mongod = [conn];
}
initialized = true;
+
+ this.executeOnMongodNodes(options.setupFunctions.mongod);
+ this.executeOnMongosNodes(options.setupFunctions.mongos);
};
+
+ this._addReplicaSetConns = function _addReplicaSetConns(rsTest) {
+ _conns.mongod.push(rsTest.getPrimary());
+ rsTest.getSecondaries().forEach(function (secondaryConn) {
+ _conns.mongod.push(secondaryConn);
+ });
+ };
+
+ this.executeOnMongodNodes = function executeOnMongodNodes(fn) {
+ if (!initialized) {
+ throw new Error('cluster must be initialized before functions can be executed ' +
+ 'against it');
+ }
+ if (!fn || typeof(fn) !== 'function' || fn.length !== 1) {
+ throw new Error('mongod function must be a function that takes a db as an argument');
+ }
+ _conns.mongod.forEach(function(mongodConn) {
+ fn(mongodConn.getDB('admin'));
+ });
+ };
+
+ this.executeOnMongosNodes = function executeOnMongosNodes(fn) {
+ if (!initialized) {
+ throw new Error('cluster must be initialized before functions can be executed ' +
+ 'against it');
+ }
+ if (!fn || typeof(fn) !== 'function' || fn.length !== 1) {
+ throw new Error('mongos function must be a function that takes a db as an argument');
+ }
+ _conns.mongos.forEach(function(mongosConn) {
+ fn(mongosConn.getDB('admin'));
+ });
+ };
+
this.teardown = function teardown() { };
this.getDB = function getDB(dbName) {
diff --git a/jstests/concurrency/fsm_libs/composer.js b/jstests/concurrency/fsm_libs/composer.js
index bc74afdcd19..d0552f45fd0 100644
--- a/jstests/concurrency/fsm_libs/composer.js
+++ b/jstests/concurrency/fsm_libs/composer.js
@@ -44,7 +44,7 @@ var composer = (function() {
continue;
}
- // Transition to a state of another workloads with probability 'mixProb'
+ // Transition to a state of another workload with probability 'mixProb'
var otherStates = [];
workloads.forEach(function(workload) {
if (workload === currentWorkload) {
diff --git a/jstests/concurrency/fsm_libs/parse_config.js b/jstests/concurrency/fsm_libs/parse_config.js
index 6a19c071a88..fa482e83151 100644
--- a/jstests/concurrency/fsm_libs/parse_config.js
+++ b/jstests/concurrency/fsm_libs/parse_config.js
@@ -57,6 +57,8 @@ function parseConfig(config) {
' contains a state not in config.states: ' + toState);
assert.eq('number', typeof config.transitions[fromState][toState],
'transitions.' + fromState + '.' + toState + ' should be a number');
+ assert(!isNaN(config.transitions[fromState][toState]),
+ 'transitions.' + fromState + '.' + toState + ' cannot be NaN');
});
});
diff --git a/jstests/concurrency/fsm_libs/runner.js b/jstests/concurrency/fsm_libs/runner.js
index ee399eeb475..5e417ba6d9e 100644
--- a/jstests/concurrency/fsm_libs/runner.js
+++ b/jstests/concurrency/fsm_libs/runner.js
@@ -274,20 +274,20 @@ var runner = (function() {
}
}
- function setupWorkload(workload, context) {
+ function setupWorkload(workload, context, cluster) {
var myDB = context[workload].db;
var collName = context[workload].collName;
var config = context[workload].config;
- config.setup.call(config.data, myDB, collName);
+ config.setup.call(config.data, myDB, collName, cluster);
}
- function teardownWorkload(workload, context) {
+ function teardownWorkload(workload, context, cluster) {
var myDB = context[workload].db;
var collName = context[workload].collName;
var config = context[workload].config;
- config.teardown.call(config.data, myDB, collName);
+ config.teardown.call(config.data, myDB, collName, cluster);
}
function runWorkloads(workloads, clusterOptions, executionMode, executionOptions) {
@@ -303,7 +303,6 @@ var runner = (function() {
clusterOptions.sameDB = true;
clusterOptions.sameCollection = true;
}
- Object.freeze(clusterOptions);
// Determine how strong to make assertions while simultaneously executing
// different workloads
@@ -355,7 +354,7 @@ var runner = (function() {
try {
workloads.forEach(function(workload) {
- setupWorkload(workload, context);
+ setupWorkload(workload, context, cluster);
cleanup.push(workload);
});
@@ -369,7 +368,7 @@ var runner = (function() {
endTime = new Date();
cleanup.forEach(function(workload) {
try {
- teardownWorkload(workload, context);
+ teardownWorkload(workload, context, cluster);
} catch (err) {
print('Workload teardown function threw an exception:\n' + err.stack);
teardownFailed = true;
diff --git a/jstests/concurrency/fsm_libs/thread_mgr.js b/jstests/concurrency/fsm_libs/thread_mgr.js
index cb7d48a558a..03e5607ed2c 100644
--- a/jstests/concurrency/fsm_libs/thread_mgr.js
+++ b/jstests/concurrency/fsm_libs/thread_mgr.js
@@ -92,6 +92,11 @@ var ThreadManager = function(clusterOptions, executionMode) {
throw new Error('thread manager has not been initialized yet');
}
+ var workloadData = {};
+ _workloads.forEach(function(workload) {
+ workloadData[workload] = _context[workload].config.data;
+ });
+
var tid = 0;
_workloads.forEach(function(workload) {
var workloads = [workload]; // worker thread only needs to load 'workload'
@@ -102,9 +107,9 @@ var ThreadManager = function(clusterOptions, executionMode) {
var config = _context[workload].config;
for (var i = 0; i < threadCounts[workload]; ++i) {
- config.data.tid = tid++;
var args = {
- data: config.data,
+ tid: tid++,
+ data: workloadData,
host: host,
latch: latch,
dbName: _context[workload].dbName,
diff --git a/jstests/concurrency/fsm_libs/worker_thread.js b/jstests/concurrency/fsm_libs/worker_thread.js
index 466438c4360..9276daa88ef 100644
--- a/jstests/concurrency/fsm_libs/worker_thread.js
+++ b/jstests/concurrency/fsm_libs/worker_thread.js
@@ -7,8 +7,8 @@ load('jstests/concurrency/fsm_libs/parse_config.js'); // for parseConfig
var workerThread = (function() {
// workloads = list of workload filenames
- // args.data = 'this' parameter passed to the FSM state functions
- // args.data.tid = the thread identifier
+ // args.tid = the thread identifier
+ // args.data = map of workload -> 'this' parameter passed to the FSM state functions
// args.host = the address to make a new connection to
// args.latch = CountDownLatch instance for starting all threads
// args.dbName = the database name
@@ -40,10 +40,21 @@ var workerThread = (function() {
var config = parseConfig($config); // to normalize
// Copy any modifications that were made to $config.data
- // during the setup function of the workload
- var data = Object.extend({}, args.data, true);
+ // during the setup function of the workload (see caveat
+ // below).
+
+ // XXX: Changing the order of extend calls causes problems
+ // for workloads that reference $super.
+ // Suppose you have workloads A and B, where workload B extends
+ // workload A. The $config.data of workload B can define a
+ // function that closes over the $config object of workload A
+ // (known as $super to workload B). This reference is lost when
+ // the config object is serialized through BSON into the V8 isolate,
+ // which results in undefined variables in the derived workload.
+ var data = Object.extend({}, args.data[workload], true);
data = Object.extend(data, config.data, true);
+ data.tid = args.tid;
configs[workload] = {
data: data,
db: myDB,
diff --git a/jstests/concurrency/fsm_workload_helpers/indexed_noindex.js b/jstests/concurrency/fsm_workload_helpers/indexed_noindex.js
index 7986bfd79cd..47f22e0daa3 100644
--- a/jstests/concurrency/fsm_workload_helpers/indexed_noindex.js
+++ b/jstests/concurrency/fsm_workload_helpers/indexed_noindex.js
@@ -6,7 +6,7 @@
*/
function indexedNoindex($config, $super) {
- $config.setup = function(db, collName) {
+ $config.setup = function(db, collName, cluster) {
$super.setup.apply(this, arguments);
var res = db[collName].dropIndex(this.getIndexSpec());
diff --git a/jstests/concurrency/fsm_workload_helpers/server_types.js b/jstests/concurrency/fsm_workload_helpers/server_types.js
index cb95641955b..7a3665a7931 100644
--- a/jstests/concurrency/fsm_workload_helpers/server_types.js
+++ b/jstests/concurrency/fsm_workload_helpers/server_types.js
@@ -1,57 +1,35 @@
'use strict';
/**
- * Returns true if the process is a mongod, and false otherwise.
+ * Returns true if the process is a mongos, and false otherwise.
*
- * 'dbOrServerStatus' can either be a server connection,
- * or the result of the { serverStatus: 1 } command.
*/
-function isMongod(dbOrServerStatus) {
- var status = dbOrServerStatus;
-
- if (dbOrServerStatus instanceof DB) {
- var db = dbOrServerStatus;
- status = db.serverStatus();
- }
+function isMongos(db) {
+ var res = db.runCommand('ismaster');
+ assert.commandWorked(res);
- return status.process === 'mongod';
+ return 'isdbgrid' === res.msg;
}
/**
- * Returns true if the process is a mongos, and false otherwise.
+ * Returns true if the process is a mongod, and false otherwise.
*
- * 'dbOrServerStatus' can either be a server connection,
- * or the result of the { serverStatus: 1 } command.
*/
-function isMongos(dbOrServerStatus) {
- var status = dbOrServerStatus;
-
- if (dbOrServerStatus instanceof DB) {
- var db = dbOrServerStatus;
- status = db.serverStatus();
- }
-
- return status.process === 'mongos';
+function isMongod(db) {
+ return !isMongos(db);
}
/**
* Returns true if the current storage engine is mmapv1,
* and false otherwise.
*
- * 'dbOrServerStatus' must refer to a mongod connection
- * (and not a mongos connection), or the result of the
- * { serverStatus: 1 } command.
*/
-function isMMAPv1(dbOrServerStatus) {
- var status = dbOrServerStatus;
+function isMMAPv1(db) {
+ var status = db.serverStatus();
+ assert.commandWorked(status);
- if (dbOrServerStatus instanceof DB) {
- var db = dbOrServerStatus;
- status = db.serverStatus();
- }
-
- // No storage engine is reported when connected to a mongos
- assert(isMongod(status), 'expected status of mongod process');
+ assert(isMongod(db),
+ 'no storage engine is reported when connected to mongos');
assert.neq('undefined', typeof status.storageEngine,
'missing storage engine info in server status');
@@ -62,22 +40,15 @@ function isMMAPv1(dbOrServerStatus) {
* Returns true if the current storage engine is wiredTiger
* and false otherwise.
*
- * 'dbOrServerStatus' must refer to a mongod connection
- * (and not a mongos connection), or the result of the
- * { serverStatus: 1 } command.
*/
-function isWiredTiger(dbOrServerStatus) {
- var status = dbOrServerStatus;
-
- if (dbOrServerStatus instanceof DB) {
- var db = dbOrServerStatus;
- status = db.serverStatus();
- }
+function isWiredTiger(db) {
+ var status = db.serverStatus();
+ assert.commandWorked(status);
- // No storage engine is reported when connected to a mongos
- assert(isMongod(status), 'expected status of mongod process');
+ assert(isMongod(db),
+ 'no storage engine is reported when connected to mongos');
assert.neq('undefined', typeof status.storageEngine,
'missing storage engine info in server status');
- return Array.contains(['wiredTiger'], status.storageEngine.name);
+ return status.storageEngine.name === 'wiredTiger';
}
diff --git a/jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js b/jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js
index a8460ef84fa..b4d61410c4a 100644
--- a/jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js
+++ b/jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js
@@ -10,7 +10,7 @@
function dropAllIndexes($config, $super) {
- $config.setup = function setup(db, collName) {
+ $config.setup = function setup(db, collName, cluster) {
var oldIndexes = db[collName].getIndexes().map(function(ixSpec) {
return ixSpec.name;
});
diff --git a/jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js b/jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js
index 161ca88bd48..63c26f0526c 100644
--- a/jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js
+++ b/jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js
@@ -8,7 +8,7 @@
*/
function indexedNoindex($config, $super) {
- $config.setup = function(db, collName) {
+ $config.setup = function(db, collName, cluster) {
$super.setup.apply(this, arguments);
var res = db[collName].dropIndex(this.getIndexSpec());
diff --git a/jstests/concurrency/fsm_workload_modifiers/make_capped.js b/jstests/concurrency/fsm_workload_modifiers/make_capped.js
index 4bae11fae71..40ba3d09c24 100644
--- a/jstests/concurrency/fsm_workload_modifiers/make_capped.js
+++ b/jstests/concurrency/fsm_workload_modifiers/make_capped.js
@@ -15,7 +15,7 @@
function makeCapped($config, $super) {
- $config.setup = function setup(db, collName) {
+ $config.setup = function setup(db, collName, cluster) {
assertWhenOwnColl(function() {
db[collName].drop();
assertAlways.commandWorked(db.createCollection(collName, {
diff --git a/jstests/concurrency/fsm_workloads/agg_base.js b/jstests/concurrency/fsm_workloads/agg_base.js
index e701682e028..3ce16aaea31 100644
--- a/jstests/concurrency/fsm_workloads/agg_base.js
+++ b/jstests/concurrency/fsm_workloads/agg_base.js
@@ -49,7 +49,7 @@ var $config = (function() {
query: { query: 1 }
};
- function setup(db, collName) {
+ function setup(db, collName, cluster) {
// load example data
var bulk = db[collName].initializeUnorderedBulkOp();
for (var i = 0; i < this.numDocs; ++i) {
@@ -69,7 +69,7 @@ var $config = (function() {
assertWhenOwnColl.eq(this.numDocs / 2, db[collName].find({ flag: true }).itcount());
}
- function teardown(db, collName) {
+ function teardown(db, collName, cluster) {
assertWhenOwnColl(db[collName].drop());
}
diff --git a/jstests/concurrency/fsm_workloads/agg_group_external.js b/jstests/concurrency/fsm_workloads/agg_group_external.js
index 9a1b9ba5fbb..3c3cf973434 100644
--- a/jstests/concurrency/fsm_workloads/agg_group_external.js
+++ b/jstests/concurrency/fsm_workloads/agg_group_external.js
@@ -42,7 +42,7 @@ var $config = extendWorkload($config, function($config, $super) {
}.bind(this));
};
- $config.teardown = function teardown(db, collName) {
+ $config.teardown = function teardown(db, collName, cluster) {
$super.teardown.apply(this, arguments);
// drop all collections with this workload's assumed-to-be-unique prefix
diff --git a/jstests/concurrency/fsm_workloads/agg_match.js b/jstests/concurrency/fsm_workloads/agg_match.js
index a8961db5a68..00e23a24c03 100644
--- a/jstests/concurrency/fsm_workloads/agg_match.js
+++ b/jstests/concurrency/fsm_workloads/agg_match.js
@@ -28,7 +28,7 @@ var $config = extendWorkload($config, function($config, $super) {
assertWhenOwnColl.eq(db[collName].count() / 2, db[otherCollName].count());
};
- $config.teardown = function teardown(db, collName) {
+ $config.teardown = function teardown(db, collName, cluster) {
$super.teardown.apply(this, arguments);
assertWhenOwnColl(db[this.getOutCollName(collName)].drop());
diff --git a/jstests/concurrency/fsm_workloads/agg_sort.js b/jstests/concurrency/fsm_workloads/agg_sort.js
index 322ed5297e8..2f312e0adda 100644
--- a/jstests/concurrency/fsm_workloads/agg_sort.js
+++ b/jstests/concurrency/fsm_workloads/agg_sort.js
@@ -27,7 +27,7 @@ var $config = extendWorkload($config, function($config, $super) {
assertWhenOwnColl.eq(db[collName].find().itcount() / 2, db[otherCollName].find().itcount());
};
- $config.teardown = function teardown(db, collName) {
+ $config.teardown = function teardown(db, collName, cluster) {
$super.teardown.apply(this, arguments);
// drop all collections with this workload's assumed-to-be-unique prefix
diff --git a/jstests/concurrency/fsm_workloads/agg_sort_external.js b/jstests/concurrency/fsm_workloads/agg_sort_external.js
index 65bfdb264bc..161f7592d08 100644
--- a/jstests/concurrency/fsm_workloads/agg_sort_external.js
+++ b/jstests/concurrency/fsm_workloads/agg_sort_external.js
@@ -38,7 +38,7 @@ var $config = extendWorkload($config, function($config, $super) {
assertWhenOwnColl.eq(db[collName].find().itcount() / 2, db[otherCollName].find().itcount());
};
- $config.teardown = function teardown(db, collName) {
+ $config.teardown = function teardown(db, collName, cluster) {
$super.teardown.apply(this, arguments);
// drop all collections with this workload's assumed-to-be-unique prefix
diff --git a/jstests/concurrency/fsm_workloads/auth_create_role.js b/jstests/concurrency/fsm_workloads/auth_create_role.js
index 53f1f03cab0..6ad1573cb5a 100644
--- a/jstests/concurrency/fsm_workloads/auth_create_role.js
+++ b/jstests/concurrency/fsm_workloads/auth_create_role.js
@@ -62,7 +62,7 @@ var $config = (function() {
createRole: { createRole: 1 }
};
- function teardown(db, collName) {
+ function teardown(db, collName, cluster) {
var pattern = new RegExp('^' + this.prefix + '\\d+_\\d+$');
dropRoles(db, pattern);
}
diff --git a/jstests/concurrency/fsm_workloads/auth_create_user.js b/jstests/concurrency/fsm_workloads/auth_create_user.js
index c50dc65afb4..7fe71f006fb 100644
--- a/jstests/concurrency/fsm_workloads/auth_create_user.js
+++ b/jstests/concurrency/fsm_workloads/auth_create_user.js
@@ -55,7 +55,7 @@ var $config = (function() {
createUser: { createUser: 1 }
};
- function teardown(db, collName) {
+ function teardown(db, collName, cluster) {
var pattern = new RegExp('^' + this.prefix + '\\d+_\\d+$');
dropUsers(db, pattern);
}
diff --git a/jstests/concurrency/fsm_workloads/collmod.js b/jstests/concurrency/fsm_workloads/collmod.js
new file mode 100644
index 00000000000..758a02634d9
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/collmod.js
@@ -0,0 +1,75 @@
+'use strict';
+
+/**
+ * collmod.js
+ *
+ * Base workload for collMod command.
+ * Generates some random data and inserts it into a collection with a
+ * TTL index. Runs a collMod command to change the value of the
+ * expireAfterSeconds setting to a random integer.
+ *
+ * All threads update the same TTL index on the same collection.
+ */
+var $config = (function() {
+
+ var data = {
+ numDocs: 1000,
+ maxTTL: 5000 // max time to live
+ };
+
+ var states = (function() {
+
+ function collMod(db, collName) {
+ var newTTL = Random.randInt(this.maxTTL);
+ var res = db.runCommand({ collMod: this.threadCollName,
+ index: {
+ keyPattern: { createdAt: 1 },
+ expireAfterSeconds: newTTL
+ }
+ });
+ assertAlways.commandWorked(res);
+ // only assert if new expireAfterSeconds differs from old one
+ if (res.hasOwnProperty('expireAfterSeconds_new')) {
+ assertWhenOwnDB.eq(res.expireAfterSeconds_new, newTTL);
+ }
+ }
+
+ return {
+ collMod: collMod
+ };
+
+ })();
+
+ var transitions = {
+ collMod: { collMod: 1 }
+ };
+
+ function setup(db, collName, cluster) {
+ // other workloads that extend this one might have set 'this.threadCollName'
+ this.threadCollName = this.threadCollName || collName;
+ var bulk = db[this.threadCollName].initializeUnorderedBulkOp();
+ for (var i = 0; i < this.numDocs; ++i) {
+ bulk.insert({ createdAt: new Date() });
+ }
+
+ var res = bulk.execute();
+ assertAlways.writeOK(res);
+ assertAlways.eq(this.numDocs, res.nInserted);
+
+ // create TTL index
+ res = db[this.threadCollName].ensureIndex({ createdAt: 1 },
+ { expireAfterSeconds: 3600 });
+ assertAlways.commandWorked(res);
+ }
+
+ return {
+ threadCount: 10,
+ iterations: 20,
+ data: data,
+ startState: 'collMod',
+ states: states,
+ transitions: transitions,
+ setup: setup
+ };
+
+})();
diff --git a/jstests/concurrency/fsm_workloads/collmod_separate_collections.js b/jstests/concurrency/fsm_workloads/collmod_separate_collections.js
new file mode 100644
index 00000000000..6802ee72883
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/collmod_separate_collections.js
@@ -0,0 +1,41 @@
+'use strict';
+
+/**
+ * collmod_separate_collections.js
+ *
+ * Generates some random data and inserts it into a collection with a
+ * TTL index. Runs a collMod command to change the value of the
+ * expireAfterSeconds setting to a random integer.
+ *
+ * Each thread updates a TTL index on a separate collection.
+ */
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/collmod.js'); // for $config
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+
+var $config = extendWorkload($config, function($config, $super) {
+ $config.data.prefix = 'collmod_separate_collections';
+
+ $config.states.init = function init(db, collName) {
+ this.threadCollName = this.prefix + '_' + this.tid;
+ $super.setup.call(this, db, this.threadCollName);
+ };
+
+ $config.transitions = Object.extend({
+ init: { collMod: 1 }
+ }, $super.transitions);
+
+ $config.setup = function setup(db, collName) {
+ // no-op: since the init state is used to setup
+ // the separate collections on a per-thread basis.
+ };
+
+ $config.teardown = function teardown(db, collName, cluster) {
+ var pattern = new RegExp('^' + this.prefix + '_\\d+$');
+ dropCollections(db, pattern);
+ $super.teardown.apply(this, arguments);
+ };
+
+ $config.startState = 'init';
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/compact.js b/jstests/concurrency/fsm_workloads/compact.js
new file mode 100644
index 00000000000..f7488c5143f
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/compact.js
@@ -0,0 +1,103 @@
+'use strict';
+
+/**
+ * compact.js
+ *
+ * Bulk inserts 1000 documents and builds indexes. Then alternates between compacting the
+ * collection and verifying the number of documents and indexes. Operates on a separate collection
+ * for each thread.
+ */
+
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+
+var $config = (function() {
+ var data = {
+ nDocumentsToInsert: 1000,
+ nIndexes: 3 + 1, // The number of indexes created in createIndexes + 1 for { _id: 1 }
+ prefix: 'compact' // Use filename for prefix because filename is assumed unique
+ };
+
+ var states = (function() {
+ function insertDocuments(db, collName) {
+ var bulk = db[collName].initializeUnorderedBulkOp();
+ for (var i = 0; i < this.nDocumentsToInsert; ++i) {
+ bulk.insert({
+ a: Random.randInt(10),
+ b: Random.randInt(10),
+ c: Random.randInt(10)
+ });
+ }
+ var res = bulk.execute();
+ assertAlways.writeOK(res);
+ assertAlways.eq(this.nDocumentsToInsert, res.nInserted);
+ }
+
+ function createIndexes(db, collName) {
+ // The number of indexes created here is also stored in data.nIndexes
+ var aResult = db[collName].ensureIndex({ a: 1 });
+ assertAlways.commandWorked(aResult);
+
+ var bResult = db[collName].ensureIndex({ b: 1 });
+ assertAlways.commandWorked(bResult);
+
+ var cResult = db[collName].ensureIndex({ c: 1 });
+ assertAlways.commandWorked(cResult);
+ }
+
+ // This method is independent of collectionSetup to allow it to be overridden in
+ // workloads that extend this one
+ function init(db, collName) {
+ this.threadCollName = this.prefix + '_' + this.tid;
+ }
+
+ function collectionSetup(db, collName) {
+ insertDocuments.call(this, db, this.threadCollName);
+ createIndexes.call(this, db, this.threadCollName);
+ }
+
+ function compact(db, collName) {
+ var res = db.runCommand({
+ compact: this.threadCollName,
+ paddingFactor: 1.0,
+ force: true
+ });
+ assertAlways.commandWorked(res);
+ }
+
+ function query(db, collName) {
+ var count = db[this.threadCollName].find().itcount();
+ assertWhenOwnColl.eq(count, this.nDocumentsToInsert, 'number of documents in ' +
+ 'collection should not change following a compact');
+ var indexesCount = db[this.threadCollName].getIndexes().length;
+ assertWhenOwnColl.eq(indexesCount, this.nIndexes);
+ }
+
+ return {
+ init: init,
+ collectionSetup: collectionSetup,
+ compact: compact,
+ query: query
+ };
+ })();
+
+ var transitions = {
+ init: { collectionSetup: 1 },
+ collectionSetup: { compact: 0.5, query: 0.5 },
+ compact: { compact: 0.5, query: 0.5 },
+ query: { compact: 0.5, query: 0.5 }
+ };
+
+ var teardown = function teardown(db, collName, cluster) {
+ var pattern = new RegExp('^' + this.prefix + '_\\d+$');
+ dropCollections(db, pattern);
+ };
+
+ return {
+ threadCount: 15,
+ iterations: 10,
+ states: states,
+ transitions: transitions,
+ teardown: teardown,
+ data: data
+ };
+})();
diff --git a/jstests/concurrency/fsm_workloads/compact_simultaneous_padding_bytes.js b/jstests/concurrency/fsm_workloads/compact_simultaneous_padding_bytes.js
new file mode 100644
index 00000000000..1328fdf5382
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/compact_simultaneous_padding_bytes.js
@@ -0,0 +1,33 @@
+'use strict';
+
+/**
+ * compact_simultaneous_padding_bytes.js
+ *
+ * Bulk inserts 1000 documents and builds indexes. Then alternates between compacting the
+ * collection and verifying the number of documents and indexes. Operates on a single collection
+ * for all threads. Uses paddingBytes as a parameter for compact.
+ */
+
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/compact.js'); // for $config
+
+var $config = extendWorkload($config, function($config, $super) {
+ $config.states.init = function init(db, collName) {
+ this.threadCollName = collName;
+ };
+
+ $config.states.compact = function compact(db, collName) {
+ var res = db.runCommand({
+ compact: this.threadCollName,
+ paddingBytes: 1024 * 5,
+ force: true
+ });
+ assertAlways.commandWorked(res);
+ };
+
+ // no-op the query state because querying while compacting can result in closed cursors
+ // as per SERVER-3964, as well as inaccurate counts, leaving nothing to assert.
+ $config.states.query = function query(db, collName) { };
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/convert_to_capped_collection.js b/jstests/concurrency/fsm_workloads/convert_to_capped_collection.js
new file mode 100644
index 00000000000..ef575b17849
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/convert_to_capped_collection.js
@@ -0,0 +1,93 @@
+/**
+ * convert_to_capped_collection.js
+ *
+ * Creates a non-capped collection. Converts it to a
+ * capped collection. After each iteration, truncates the
+ * collection, ensuring that the storage size of the
+ * collection is still a multiple of 256.
+ *
+ * MongoDB raises the storage size of a capped collection
+ * to an integer multiple of 256.
+ */
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js');
+
+var $config = (function() {
+ var iter = 20;
+ var data = {
+ prefix: 'convert_to_capped_collection',
+
+ // initial size should not be a power of 256
+ size: Math.pow(2, iter + 5) + 1
+ };
+
+ var states = (function() {
+
+ function uniqueCollectionName(prefix, tid) {
+ return prefix + '_' + tid;
+ }
+
+ function isMultiple256(num) {
+ return num % 256 === 0;
+ }
+
+ function init(db, collName) {
+ this.threadCollName = uniqueCollectionName(this.prefix, this.tid);
+
+ var bulk = db[this.threadCollName].initializeUnorderedBulkOp();
+ for (var i = 0; i < (this.tid + 1) * 200; i++) {
+ bulk.insert({ i: i, rand: Random.rand() });
+ }
+
+ var res = bulk.execute();
+ assertAlways.writeOK(res);
+ assertAlways.eq((this.tid + 1) * 200, res.nInserted);
+
+ assertWhenOwnDB(!db[this.threadCollName].isCapped());
+ assertWhenOwnDB.commandWorked(db[this.threadCollName].convertToCapped(this.size));
+ assertWhenOwnDB(db[this.threadCollName].isCapped());
+ assertWhenOwnDB(isMultiple256(db[this.threadCollName].storageSize()));
+ }
+
+ function convertToCapped(db, collName) {
+ // divide size by 1.5 so that the resulting size
+ // is not a multiple of 256
+ this.size /= 1.5;
+
+ assertWhenOwnDB.commandWorked(db[this.threadCollName].convertToCapped(this.size));
+ assertWhenOwnDB(db[this.threadCollName].isCapped());
+ assertWhenOwnDB(isMultiple256(db[this.threadCollName].storageSize()));
+
+ // only the _id index should remain after running convertToCapped
+ var indexKeys = db[this.threadCollName].getIndexKeys();
+ assertWhenOwnDB.eq(1, indexKeys.length);
+ assertWhenOwnDB(function() {
+ assertWhenOwnDB.docEq({ _id: 1 }, indexKeys[0]);
+ });
+ }
+
+ return {
+ init: init,
+ convertToCapped: convertToCapped
+ };
+ })();
+
+ var transitions = {
+ init: { convertToCapped: 1 },
+ convertToCapped: { convertToCapped: 1 }
+ };
+
+ function teardown(db, collName, cluster) {
+ var pattern = new RegExp('^' + this.prefix + '_\\d+$');
+ dropCollections(db, pattern);
+ }
+
+ return {
+ threadCount: 10,
+ iterations: iter,
+ data: data,
+ states: states,
+ transitions: transitions,
+ teardown: teardown
+ };
+
+})();
diff --git a/jstests/concurrency/fsm_workloads/convert_to_capped_collection_index.js b/jstests/concurrency/fsm_workloads/convert_to_capped_collection_index.js
new file mode 100644
index 00000000000..dd6716c750d
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/convert_to_capped_collection_index.js
@@ -0,0 +1,27 @@
+/**
+ * convert_to_capped_collection_index.js
+ *
+ * Creates a non-capped collection. Converts it to a
+ * capped collection. After each iteration, truncates the
+ * collection, ensuring that the storage size of the
+ * collection is still a multiple of 256.
+ *
+ * MongoDB raises the storage size of a capped collection
+ * to an integer multiple of 256.
+ *
+ * Make sure that we can create indexes on any collection
+ * but that only the _id index remains after (re-)converting
+ * to a capped collection.
+ */
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/convert_to_capped_collection.js'); // for $config
+
+var $config = extendWorkload($config, function($config, $super) {
+ $config.states.convertToCapped = function convertToCapped(db, collName) {
+ assertWhenOwnDB.commandWorked(db[this.threadCollName].ensureIndex({ i: 1, rand: 1 }));
+ assertWhenOwnDB.eq(2, db[this.threadCollName].getIndexes().length);
+ $super.states.convertToCapped.apply(this, arguments);
+ };
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/count.js b/jstests/concurrency/fsm_workloads/count.js
new file mode 100644
index 00000000000..626ac49a4c8
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/count.js
@@ -0,0 +1,76 @@
+'use strict';
+
+/**
+ * count.js
+ *
+ * Base workload for count.
+ * Runs count on a non-indexed field and verifies that the count
+ * is correct.
+ * Each thread picks a random 'modulus' in range [5, 10]
+ * and a random 'countPerNum' in range [50, 100]
+ * and then inserts 'modulus * countPerNum' documents. [250, 1000]
+ * All threads insert into the same collection.
+ */
+var $config = (function() {
+
+ var data = {
+ randRange: function randRange(low, high) {
+ // return random number in range [low, high]
+ assert.gt(high, low);
+ return low + Random.randInt(high - low + 1);
+ },
+ getNumDocs: function getNumDocs() {
+ return this.modulus * this.countPerNum;
+ },
+ getCount: function getCount(db, predicate) {
+ var query = Object.extend({ tid: this.tid }, predicate);
+ return db[this.threadCollName].count(query);
+ }
+ };
+
+ var states = (function() {
+
+ function init(db, collName) {
+ this.modulus = this.randRange(5, 10);
+ this.countPerNum = this.randRange(50, 100);
+
+ // workloads that extend this one might have set 'this.threadCollName'
+ this.threadCollName = this.threadCollName || collName;
+
+ var bulk = db[this.threadCollName].initializeUnorderedBulkOp();
+ for (var i = 0; i < this.getNumDocs(); ++i) {
+ bulk.insert({ i: i % this.modulus, tid: this.tid });
+ }
+ var res = bulk.execute();
+ assertAlways.writeOK(res);
+ assertAlways.eq(this.getNumDocs(), res.nInserted);
+ }
+
+ function count(db, collName) {
+ assertWhenOwnColl.eq(this.getCount(db), this.getNumDocs());
+
+ var num = Random.randInt(this.modulus);
+ assertWhenOwnColl.eq(this.getCount(db, { i: num }), this.countPerNum);
+ }
+
+ return {
+ init: init,
+ count: count
+ };
+
+ })();
+
+ var transitions = {
+ init: { count: 1 },
+ count: { count: 1 }
+ };
+
+ return {
+ data: data,
+ threadCount: 10,
+ iterations: 20,
+ states: states,
+ transitions: transitions
+ };
+
+})();
diff --git a/jstests/concurrency/fsm_workloads/count_indexed.js b/jstests/concurrency/fsm_workloads/count_indexed.js
new file mode 100644
index 00000000000..7dbe408a82a
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/count_indexed.js
@@ -0,0 +1,38 @@
+'use strict';
+
+/**
+ * count_indexed.js
+ *
+ * Runs count on an indexed field (using hint), which results in an index scan,
+ * and verifies the result.
+ * Each thread picks a random 'modulus' in range [5, 10]
+ * and a random 'countPerNum' in range [50, 100]
+ * and then inserts 'modulus * countPerNum' documents. [250, 1000]
+ * Each thread inserts docs into a unique collection.
+ */
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/count.js'); // for $config
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+
+var $config = extendWorkload($config, function($config, $super) {
+ $config.data.prefix = 'count_fsm';
+
+ $config.data.getCount = function getCount(db, predicate) {
+ var query = Object.extend({ tid: this.tid }, predicate);
+ return db[this.threadCollName].find(query).hint({ tid: 1, i: 1 }).count();
+ };
+
+ $config.states.init = function init(db, collName) {
+ this.threadCollName = this.prefix + '_' + this.tid;
+ $super.states.init.apply(this, arguments);
+ assertAlways.commandWorked(db[this.threadCollName].ensureIndex({ tid: 1, i: 1 }));
+ };
+
+ $config.teardown = function teardown(db, collName) {
+ var pattern = new RegExp('^' + this.prefix + '_\\d+$');
+ dropCollections(db, pattern);
+ $super.teardown.apply(this, arguments);
+ };
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/count_limit_skip.js b/jstests/concurrency/fsm_workloads/count_limit_skip.js
new file mode 100644
index 00000000000..b770e542382
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/count_limit_skip.js
@@ -0,0 +1,52 @@
+'use strict';
+
+/**
+ * count_limit_skip.js
+ *
+ * Runs count with skip, limit, and a query (without using hint)
+ * resulting in a collection scan and then verifies the result.
+ * Each thread picks a random 'modulus' in range [5, 10]
+ * and a random 'countPerNum' in range [50, 100]
+ * and then inserts 'modulus * countPerNum' documents. [250, 1000]
+ * Each thread inserts docs into a unique collection.
+ */
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/count.js'); // for $config
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+
+var $config = extendWorkload($config, function($config, $super) {
+ $config.data.prefix = 'count_fsm_q_l_s';
+
+ $config.data.getCount = function getCount(db, predicate) {
+ var query = Object.extend({ tid: this.tid }, predicate);
+ return db[this.threadCollName].find(query)
+ .skip(this.countPerNum - 1)
+ .limit(10).count(true);
+ };
+
+ $config.states.init = function init(db, collName) {
+ this.threadCollName = this.prefix + '_' + this.tid;
+
+ $super.states.init.apply(this, arguments);
+ };
+
+ $config.states.count = function count(db, collName) {
+ assertWhenOwnColl.eq(this.getCount(db),
+ // having done 'skip(this.countPerNum - 1).limit(10)'
+ 10);
+
+ var num = Random.randInt(this.modulus);
+ assertWhenOwnColl.eq(this.getCount(db, { i: num }),
+ // having done 'skip(this.countPerNum - 1).limit(10)'
+ 1);
+ };
+
+ $config.teardown = function teardown(db, collName) {
+ var pattern = new RegExp('^' + this.prefix + '_\\d+$');
+ dropCollections(db, pattern);
+ $super.teardown.apply(this, arguments);
+ };
+
+ return $config;
+});
+
diff --git a/jstests/concurrency/fsm_workloads/create_capped_collection.js b/jstests/concurrency/fsm_workloads/create_capped_collection.js
index 882ef3d2239..8066e3e4f7d 100644
--- a/jstests/concurrency/fsm_workloads/create_capped_collection.js
+++ b/jstests/concurrency/fsm_workloads/create_capped_collection.js
@@ -132,7 +132,7 @@ var $config = (function() {
create: { create: 1 }
};
- function teardown(db, collName) {
+ function teardown(db, collName, cluster) {
var pattern = new RegExp('^' + this.prefix + '\\d+_\\d+$');
dropCollections(db, pattern);
}
diff --git a/jstests/concurrency/fsm_workloads/create_collection.js b/jstests/concurrency/fsm_workloads/create_collection.js
index 47e0534b2cf..fa2a13fb45d 100644
--- a/jstests/concurrency/fsm_workloads/create_collection.js
+++ b/jstests/concurrency/fsm_workloads/create_collection.js
@@ -44,7 +44,7 @@ var $config = (function() {
create: { create: 1 }
};
- function teardown(db, collName) {
+ function teardown(db, collName, cluster) {
var pattern = new RegExp('^' + this.prefix + '\\d+_\\d+$');
dropCollections(db, pattern);
}
diff --git a/jstests/concurrency/fsm_workloads/distinct.js b/jstests/concurrency/fsm_workloads/distinct.js
new file mode 100644
index 00000000000..a9cef6f991e
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/distinct.js
@@ -0,0 +1,63 @@
+'use strict';
+
+/**
+ * distinct.js
+ *
+ * Runs distinct on an indexed field and verifies the result.
+ * The indexed field contains unique values.
+ * Each thread operates on a separate collection.
+ */
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+
+var $config = (function() {
+
+ var data = {
+ numDocs: 1000,
+ prefix: 'distinct_fsm'
+ };
+
+ var states = (function() {
+
+ function init(db, collName) {
+ this.threadCollName = this.prefix + '_' + this.tid;
+ var bulk = db[this.threadCollName].initializeUnorderedBulkOp();
+ for (var i = 0; i < this.numDocs; ++i) {
+ bulk.insert({ i: i });
+ }
+ var res = bulk.execute();
+ assertAlways.writeOK(res);
+ assertAlways.eq(this.numDocs, res.nInserted);
+ assertAlways.commandWorked(db[this.threadCollName].ensureIndex({ i: 1 }));
+ }
+
+ function distinct(db, collName) {
+ assertWhenOwnColl.eq(this.numDocs, db[this.threadCollName].distinct('i').length);
+ }
+
+ return {
+ init: init,
+ distinct: distinct
+ };
+
+ })();
+
+ var transitions = {
+ init: { distinct: 1 },
+ distinct: { distinct: 1 }
+ };
+
+ function teardown(db, collName, cluster) {
+ var pattern = new RegExp('^' + this.prefix + '_\\d+$');
+ dropCollections(db, pattern);
+ }
+
+ return {
+ data: data,
+ threadCount: 10,
+ iterations: 20,
+ states: states,
+ transitions: transitions,
+ teardown: teardown
+ };
+
+})();
diff --git a/jstests/concurrency/fsm_workloads/distinct_noindex.js b/jstests/concurrency/fsm_workloads/distinct_noindex.js
new file mode 100644
index 00000000000..6a38830f9d6
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/distinct_noindex.js
@@ -0,0 +1,59 @@
+'use strict';
+
+/**
+ * distinct_noindex.js
+ *
+ * Runs distinct on a non-indexed field and verifies the result.
+ * The field contains non-unique values.
+ * Each thread operates on the same collection.
+ */
+var $config = (function() {
+
+ var data = {
+ randRange: function randRange(low, high) {
+ assertAlways.gt(high, low);
+ return low + Random.randInt(high - low + 1);
+ },
+ numDocs: 1000
+ };
+
+ var states = (function() {
+
+ function init(db, collName) {
+ this.modulus = this.randRange(5, 15);
+
+ var bulk = db[collName].initializeUnorderedBulkOp();
+ for (var i = 0; i < this.numDocs; ++i) {
+ bulk.insert({ i: i % this.modulus, tid: this.tid });
+ }
+ var res = bulk.execute();
+ assertAlways.writeOK(res);
+ assertAlways.eq(this.numDocs, res.nInserted);
+ }
+
+ function distinct(db, collName) {
+ assertWhenOwnColl.eq(this.modulus,
+ db[collName].distinct('i', { tid: this.tid }).length);
+ }
+
+ return {
+ init: init,
+ distinct: distinct
+ };
+
+ })();
+
+ var transitions = {
+ init: { distinct: 1 },
+ distinct: { distinct: 1 }
+ };
+
+ return {
+ data: data,
+ threadCount: 10,
+ iterations: 20,
+ states: states,
+ transitions: transitions
+ };
+
+})();
diff --git a/jstests/concurrency/fsm_workloads/distinct_projection.js b/jstests/concurrency/fsm_workloads/distinct_projection.js
new file mode 100644
index 00000000000..d934c329f20
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/distinct_projection.js
@@ -0,0 +1,23 @@
+'use strict';
+
+/**
+ * distinct_projection.js
+ *
+ * Runs distinct, with a projection on an indexed field, and verifies the result.
+ * The indexed field contains unique values.
+ * Each thread operates on a separate collection.
+ */
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/distinct.js'); // for $config
+
+var $config = extendWorkload($config, function($config, $super) {
+ $config.data.prefix = 'distinct_projection_fsm';
+
+ $config.states.distinct = function distinct(db, collName) {
+ var query = { i: { $lt: this.numDocs / 2 } };
+ assertWhenOwnColl.eq(this.numDocs / 2,
+ db[this.threadCollName].distinct('i', query).length);
+ };
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/explain.js b/jstests/concurrency/fsm_workloads/explain.js
new file mode 100644
index 00000000000..238f01fe793
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/explain.js
@@ -0,0 +1,83 @@
+'use strict';
+
+/**
+ * explain.js
+ *
+ * Runs explain() on a collection.
+ *
+ */
+var $config = (function() {
+
+ var data = {
+ collNotExist: 'donotexist__',
+ nInserted: 0,
+ assignEqualProbsToTransitions: function assignEqualProbsToTransitions(statesMap) {
+ var states = Object.keys(statesMap);
+ assertAlways.gt(states.length, 0);
+ var probs = {};
+ var pr = 1.0 / states.length;
+ states.forEach(function(s) {
+ probs[s] = pr;
+ });
+ return probs;
+ }
+ };
+
+ function setup(db, collName, cluster) {
+ assertAlways.commandWorked(db[collName].ensureIndex({ j: 1 }));
+ }
+
+ var states = (function() {
+ function insert(db, collName) {
+ db[collName].insert({
+ i: this.nInserted,
+ j: 2 * this.nInserted
+ });
+ this.nInserted++;
+ }
+
+ function explain(db, collName) {
+ // test the three verbosity levels:
+ // 'queryPlanner', 'executionStats', and 'allPlansExecution'
+ ['queryPlanner', 'executionStats', 'allPlansExecution'].forEach(
+ function(verbosity) {
+ assertAlways.commandWorked(db[collName]
+ .find({ j: this.nInserted / 2 })
+ .explain(verbosity));
+ }.bind(this)
+ );
+ }
+
+ function explainNonExistentNS(db, collName) {
+ assertAlways(!db[this.collNotExist].exists());
+ var res = db[this.collNotExist].find().explain();
+ assertAlways.commandWorked(res);
+ assertAlways(res.queryPlanner, tojson(res));
+ assertAlways(res.queryPlanner.winningPlan, tojson(res));
+ assertAlways.eq(res.queryPlanner.winningPlan.stage, 'EOF',
+ tojson(res));
+ }
+
+ return {
+ insert: insert,
+ explain: explain,
+ explainNonExistentNS: explainNonExistentNS
+ };
+
+ })();
+
+ var transitions = {
+ insert: { insert: 0.1, explain: 0.8, explainNonExistentNS: 0.1 },
+ explain: { insert: 0.7, explain: 0.2, explainNonExistentNS: 0.1 },
+ explainNonExistentNS: { insert: 0.4, explain: 0.5, explainNonExistentNS: 0.1 }
+ };
+
+ return {
+ threadCount: 10,
+ iterations: 50,
+ startState: 'insert',
+ states: states,
+ transitions: transitions,
+ data: data
+ };
+})();
diff --git a/jstests/concurrency/fsm_workloads/explain_aggregate.js b/jstests/concurrency/fsm_workloads/explain_aggregate.js
new file mode 100644
index 00000000000..02a00923c0d
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/explain_aggregate.js
@@ -0,0 +1,45 @@
+'use strict';
+
+/**
+ * explain_aggregate.js
+ *
+ * Runs explain() and aggregate() on a collection.
+ *
+ */
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/explain.js'); // for $config
+
+var $config = extendWorkload($config, function($config, $super) {
+
+ function assertCursorStages(num, obj) {
+ assertAlways(obj.stages, tojson(obj));
+ assertAlways.eq(num, obj.stages.length, tojson(obj.stages));
+ assertAlways(obj.stages[0].$cursor, tojson(obj.stages[0]));
+ assertAlways(obj.stages[0].$cursor.hasOwnProperty('queryPlanner'),
+ tojson(obj.stages[0].$cursor));
+ }
+
+ $config.states = Object.extend({
+ explainMatch: function explainMatch(db, collName) {
+ var res = db[collName].explain().aggregate([{ $match: { i: this.nInserted / 2 } }]);
+ assertAlways.commandWorked(res);
+
+ // stages reported: $cursor
+ assertCursorStages(1, res);
+ },
+ explainMatchProject: function explainMatchProject(db, collName) {
+ var res = db[collName].explain().aggregate([{ $match: { i: this.nInserted / 3 } },
+ { $project: { i: 1 } }]);
+ assertAlways.commandWorked(res);
+
+ // stages reported: $cursor, $project
+ assertCursorStages(2, res);
+ }
+ }, $super.states);
+
+ $config.transitions = Object.extend({
+ explain: $config.data.assignEqualProbsToTransitions($config.states)
+ }, $super.transitions);
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/explain_count.js b/jstests/concurrency/fsm_workloads/explain_count.js
new file mode 100644
index 00000000000..0b44073195d
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/explain_count.js
@@ -0,0 +1,59 @@
+'use strict';
+
+/**
+ * explain_count.js
+ *
+ * Runs explain() and count() on a collection.
+ */
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/explain.js'); // for $config
+load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongos
+load('jstests/libs/analyze_plan.js'); // for planHasStage
+
+var $config = extendWorkload($config, function($config, $super) {
+
+ function assertNCounted(num, obj, db) {
+ var stage = obj.executionStats.executionStages;
+ // get sharded stage(s) if counting on mongos
+ if (isMongos(db)) {
+ stage = stage.shards[0].executionStages;
+ }
+ assertWhenOwnColl.eq(num, stage.nCounted);
+ }
+
+ $config.states = Object.extend({
+ explainBasicCount: function explainBasicCount(db, collName) {
+ var res = db[collName].explain().count();
+ assertAlways.commandWorked(res);
+ assertAlways(planHasStage(res.queryPlanner.winningPlan, 'COUNT'));
+ },
+ explainCountHint: function explainCountHint(db, collName) {
+ assertWhenOwnColl(function() {
+ var res = db[collName].explain()
+ .find({ i: this.nInserted / 2 })
+ .hint({ i: 1 }).count();
+ assertWhenOwnColl.commandWorked(res);
+ assertWhenOwnColl(planHasStage(res.queryPlanner.winningPlan, 'COUNT'));
+ assertWhenOwnColl(planHasStage(res.queryPlanner.winningPlan, 'COUNT_SCAN'));
+ });
+ },
+ explainCountNoSkipLimit: function explainCountNoSkipLimit(db, collName) {
+ var res = db[collName].explain('executionStats')
+ .find({ i: this.nInserted }).skip(1).count(false);
+ assertAlways.commandWorked(res);
+ assertNCounted(1, res, db);
+ },
+ explainCountSkipLimit: function explainCountSkipLimit(db, collName) {
+ var res = db[collName].explain('executionStats')
+ .find({ i: this.nInserted }).skip(1).count(true);
+ assertAlways.commandWorked(res);
+ assertNCounted(0, res, db);
+ }
+ }, $super.states);
+
+ $config.transitions = Object.extend({
+ explain: $config.data.assignEqualProbsToTransitions($config.states)
+ }, $super.transitions);
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/explain_find.js b/jstests/concurrency/fsm_workloads/explain_find.js
new file mode 100644
index 00000000000..acb189d24be
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/explain_find.js
@@ -0,0 +1,66 @@
+'use strict';
+
+/**
+ * explain_find.js
+ *
+ * Runs explain() and find() on a collection.
+ *
+ */
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/explain.js'); // for $config
+load('jstests/libs/analyze_plan.js'); // for planHasStage and isIxscan
+
+var $config = extendWorkload($config, function($config, $super) {
+
+ $config.states = Object.extend({
+ explainLimit: function explainLimit(db, collName) {
+ var res = db[collName].find().limit(3).explain();
+ assertAlways.commandWorked(res);
+ assertAlways(planHasStage(res.queryPlanner.winningPlan, 'LIMIT'));
+ },
+ explainBatchSize: function explainBatchSize(db, collName) {
+ var res = db[collName].find().batchSize(3).explain();
+ assertAlways.commandWorked(res);
+ },
+ explainAddOption: function explainAddOption(db, collName) {
+ var res = db[collName].explain().find().addOption(DBQuery.Option.exhaust).finish();
+ assertAlways.commandWorked(res);
+ },
+ explainSkip: function explainSkip(db, collName) {
+ var res = db[collName].explain().find().skip(3).finish();
+ assertAlways.commandWorked(res);
+ assertAlways(planHasStage(res.queryPlanner.winningPlan, 'SKIP'));
+ },
+ explainSort: function explainSort(db, collName) {
+ var res = db[collName].find().sort({ i: -1 }).explain();
+ assertAlways.commandWorked(res);
+ assertAlways(planHasStage(res.queryPlanner.winningPlan, 'SORT'));
+ },
+ explainHint: function explainHint(db, collName) {
+ assertWhenOwnColl(function() {
+ var res = db[collName].find().hint({ j: 1 }).explain();
+ assertWhenOwnColl.commandWorked(res);
+ assertWhenOwnColl(isIxscan(res.queryPlanner.winningPlan));
+ });
+ },
+ explainMaxTimeMS: function explainMaxTimeMS(db, collName) {
+ var res = db[collName].find().maxTimeMS(2000).explain();
+ assertAlways.commandWorked(res);
+ },
+ explainSnapshot: function explainSnapshot(db, collName) {
+ var res = db[collName].find().snapshot().explain();
+ assertAlways.commandWorked(res);
+ assertWhenOwnColl(isIxscan(res.queryPlanner.winningPlan));
+ }
+ }, $super.states);
+
+ $config.transitions = Object.extend({
+ explain: $config.data.assignEqualProbsToTransitions($config.states)
+ }, $super.transitions);
+
+ // doubling number of iterations so there is a higher chance we will
+ // transition to each of the 8 new states at least once
+ $config.iterations = $super.iterations * 2;
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/explain_group.js b/jstests/concurrency/fsm_workloads/explain_group.js
new file mode 100644
index 00000000000..d99a60d7c42
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/explain_group.js
@@ -0,0 +1,29 @@
+'use strict';
+
+/**
+ * explain_group.js
+ *
+ * Runs explain() and group() on a collection.
+ *
+ */
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/explain.js'); // for $config
+load('jstests/libs/analyze_plan.js'); // for planHasStage
+
+var $config = extendWorkload($config, function($config, $super) {
+
+ $config.states = Object.extend({
+ explainBasicGroup: function explainBasicGroup(db, collName) {
+ var res = db[collName].explain().group(
+ { key: { i: 1 }, initial: {}, reduce: function() {} }
+ );
+ assertAlways.commandWorked(res);
+ }
+ }, $super.states);
+
+ $config.transitions = Object.extend({
+ explain: $config.data.assignEqualProbsToTransitions($config.states)
+ }, $super.transitions);
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/explain_remove.js b/jstests/concurrency/fsm_workloads/explain_remove.js
new file mode 100644
index 00000000000..02620a92bea
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/explain_remove.js
@@ -0,0 +1,43 @@
+'use strict';
+
+/**
+ * explain_remove.js
+ *
+ * Runs explain() and remove() on a collection.
+ */
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/explain.js'); // for $config
+
+var $config = extendWorkload($config, function($config, $super) {
+
+ $config.states = Object.extend({
+ explainSingleRemove: function explainSingleRemove(db, collName) {
+ var res = db[collName].explain('executionStats')
+ .remove({ i: this.nInserted }, /* justOne */ true);
+ assertAlways.commandWorked(res);
+ assertWhenOwnColl(function() {
+ assertWhenOwnColl.eq(1, res.executionStats.totalDocsExamined);
+
+ // the document should not have been deleted.
+ assertWhenOwnColl.eq(1, db[collName].find({i: this.nInserted}).itcount());
+ }.bind(this));
+ },
+ explainMultiRemove: function explainMultiRemove(db, collName) {
+ var res = db[collName].explain('executionStats')
+ .remove({i: {$lte: this.nInserted / 2}});
+ assertAlways.commandWorked(res);
+ assertWhenOwnColl(function() {
+ assertWhenOwnColl.eq(this.nInserted / 2 + 1,
+ explain.executionStats.totalDocsExamined);
+ // no documents should have been deleted
+ assertWhenOwnColl.eq(this.nInserted, db[collName].itcount());
+ }.bind(this));
+ }
+ }, $super.states);
+
+ $config.transitions = Object.extend({
+ explain: $config.data.assignEqualProbsToTransitions($config.states)
+ }, $super.transitions);
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/explain_update.js b/jstests/concurrency/fsm_workloads/explain_update.js
new file mode 100644
index 00000000000..f72f06babb5
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/explain_update.js
@@ -0,0 +1,67 @@
+'use strict';
+
+/**
+ * explain_update.js
+ *
+ * Runs explain() and update() on a collection.
+ */
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/explain.js'); // for $config
+load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongos
+
+var $config = extendWorkload($config, function($config, $super) {
+
+ $config.states = Object.extend({
+ explainBasicUpdate: function explainBasicUpdate(db, collName) {
+ var res = db[collName].explain('executionStats').update({i: this.nInserted},
+ {$set: {j: 49}});
+ assertAlways.commandWorked(res);
+ assertWhenOwnColl(function() {
+ assertWhenOwnColl.eq(1, explain.executionStats.totalDocsExamined);
+
+ // document should not have been updated.
+ var doc = db[collName].findOne({ i: this.nInserted });
+ assertWhenOwnColl.eq(2 * this.nInserted, doc.j);
+ }.bind(this));
+ },
+ explainUpdateUpsert: function explainUpdateUpsert(db, collName) {
+ var res = db[collName].explain('executionStats').update({i: 2 * this.nInserted + 1},
+ {$set: {j: 81}},
+ /* upsert */ true);
+ assertAlways.commandWorked(res);
+ var stage = res.executionStats.executionStages;
+
+ // if explaining a write command through mongos
+ if (isMongos(db)) {
+ stage = stage.shards[0].executionStages;
+ }
+ assertAlways.eq(stage.stage, 'UPDATE');
+ assertWhenOwnColl(stage.wouldInsert);
+
+ // make sure that the insert didn't actually happen.
+ assertWhenOwnColl.eq(this.nInserted, db[collName].find().itcount());
+ },
+ explainUpdateMulti: function explainUpdateMulti(db, collName) {
+ var res = db[collName].explain('executionStats').update({i: {$lte: 2}}, {$set: {b: 3}},
+ /* upsert */ false,
+ /* multi */ true);
+ assertAlways.commandWorked(res);
+ var stage = res.executionStats.executionStages;
+
+ // if explaining a write command through mongos
+ if (isMongos(db)) {
+ stage = stage.shards[0].executionStages;
+ }
+ assertAlways.eq(stage.stage, 'UPDATE');
+ assertWhenOwnColl(!stage.wouldInsert);
+ assertWhenOwnColl.eq(3, stage.nMatched);
+ assertWhenOwnColl.eq(3, stage.nWouldModify);
+ }
+ }, $super.states);
+
+ $config.transitions = Object.extend({
+ explain: $config.data.assignEqualProbsToTransitions($config.states)
+ }, $super.transitions);
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_inc.js b/jstests/concurrency/fsm_workloads/findAndModify_inc.js
index 999cd6583cf..84aa5e3e5b9 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_inc.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_inc.js
@@ -46,7 +46,7 @@ var $config = (function() {
find: { update: 1 }
};
- function setup(db, collName) {
+ function setup(db, collName, cluster) {
db[collName].insert({ _id: 'findAndModify_inc' });
}
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_update.js b/jstests/concurrency/fsm_workloads/findAndModify_update.js
index 95e168f7482..02b1b016895 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_update.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_update.js
@@ -84,7 +84,7 @@ var $config = (function() {
findAndModifyDescending: { findAndModifyAscending: 0.5, findAndModifyDescending: 0.5 }
};
- function setup(db, collName) {
+ function setup(db, collName, cluster) {
var res = db[collName].ensureIndex({ tid: 1, value: 1 });
assertAlways.commandWorked(res);
}
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_update_collscan.js b/jstests/concurrency/fsm_workloads/findAndModify_update_collscan.js
index 913be623360..2e6908e979e 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_update_collscan.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_update_collscan.js
@@ -17,7 +17,7 @@ var $config = extendWorkload($config, function($config, $super) {
// Do not create the { tid: 1, value: 1 } index so that a collection
// scan is performed for the query and sort operations.
- $config.setup = function setup() { };
+ $config.setup = function setup(db, collName, cluster) { };
return $config;
});
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_update_grow.js b/jstests/concurrency/fsm_workloads/findAndModify_update_grow.js
index edb81e8b7f2..c9c70fecc54 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_update_grow.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_update_grow.js
@@ -90,8 +90,7 @@ var $config = (function() {
// Get the DiskLoc of the document after its potential move
var after = db[collName].find({ _id: before._id }).showDiskLoc().next();
- var status = db.serverStatus();
- if (isMongod(status) && isMMAPv1(status)) {
+ if (isMongod(db) && isMMAPv1(db)) {
// Since the document has at least doubled in size, and the default
// allocation strategy of mmapv1 is to use power of two sizes, the
// document will have always moved
diff --git a/jstests/concurrency/fsm_workloads/group.js b/jstests/concurrency/fsm_workloads/group.js
index 6207ee8ba20..34bde848e00 100644
--- a/jstests/concurrency/fsm_workloads/group.js
+++ b/jstests/concurrency/fsm_workloads/group.js
@@ -71,7 +71,7 @@ var $config = (function() {
group: { group: 1 }
};
- function setup(db, collName) {
+ function setup(db, collName, cluster) {
var bulk = db[collName].initializeUnorderedBulkOp();
for (var i = 0; i < this.numDocs; ++i) {
bulk.insert({ rand: Random.rand() });
@@ -81,7 +81,7 @@ var $config = (function() {
assertAlways.eq(this.numDocs, res.nInserted);
}
- function teardown(db, collName) {
+ function teardown(db, collName, cluster) {
assertWhenOwnColl(db[collName].drop());
}
diff --git a/jstests/concurrency/fsm_workloads/group_cond.js b/jstests/concurrency/fsm_workloads/group_cond.js
index 02a4f41703a..7344b781d31 100644
--- a/jstests/concurrency/fsm_workloads/group_cond.js
+++ b/jstests/concurrency/fsm_workloads/group_cond.js
@@ -17,7 +17,7 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/group.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
- $config.setup = function setup(db, collName) {
+ $config.setup = function setup(db, collName, cluster) {
$super.setup.apply(this, arguments);
assertAlways.commandWorked(db[collName].ensureIndex({ rand: 1 }));
};
@@ -37,4 +37,4 @@ var $config = extendWorkload($config, function($config, $super) {
};
return $config;
-})
+});
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_base.js b/jstests/concurrency/fsm_workloads/indexed_insert_base.js
index 548e00d6efb..95cc782e17e 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_base.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_base.js
@@ -52,7 +52,7 @@ var $config = (function() {
var ownColl = false;
assertWhenOwnColl(function() { ownColl = true; });
if (this.indexExists && ownColl) {
- var count = db[collName].find(this.getDoc()).hint(this.getIndexSpec()).itcount();
+ count = db[collName].find(this.getDoc()).hint(this.getIndexSpec()).itcount();
assertWhenOwnColl.eq(count, this.nInserted);
}
@@ -73,7 +73,7 @@ var $config = (function() {
find: { insert: 1 }
};
- function setup(db, collName) {
+ function setup(db, collName, cluster) {
var res = db[collName].ensureIndex(this.getIndexSpec());
assertAlways.commandWorked(res);
this.indexExists = true;
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_text.js b/jstests/concurrency/fsm_workloads/indexed_insert_text.js
index 0f2e5177cbb..b73373b1090 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_text.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_text.js
@@ -37,7 +37,7 @@ var $config = (function() {
insert: { insert: 1 }
};
- function setup(db, collName) {
+ function setup(db, collName, cluster) {
var ixSpec = {};
ixSpec[this.indexedField] = 'text';
// Only allowed to create one text index, other tests may create one.
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_ttl.js b/jstests/concurrency/fsm_workloads/indexed_insert_ttl.js
index 62d9d047092..f7ebf71ef14 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_ttl.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_ttl.js
@@ -29,14 +29,14 @@ var $config = (function() {
insert: { insert: 1 }
};
- function setup(db, collName) {
+ function setup(db, collName, cluster) {
var res = db[collName].ensureIndex(
{ indexed_insert_ttl: 1 },
{ expireAfterSeconds: this.ttlSeconds });
assertAlways.commandWorked(res);
}
- function teardown(db, collName) {
+ function teardown(db, collName, cluster) {
// The TTL thread runs every 60 seconds, so for reliability, wait more than ttlSeconds
// plus a minute.
sleep((2 * this.ttlSeconds + 70) * 1000);
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_where.js b/jstests/concurrency/fsm_workloads/indexed_insert_where.js
new file mode 100644
index 00000000000..9cb9c14c374
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_where.js
@@ -0,0 +1,57 @@
+'use strict';
+
+/**
+ * indexed_insert_where.js
+ *
+ * Bulk inserts documents in batches of 100, and then queries for documents inserted by the thread
+ */
+
+var $config = (function() {
+
+ var data = {
+ documentsToInsert: 100,
+ insertedDocuments: 0,
+ generateDocumentToInsert: function generateDocumentToInsert() {
+ return { tid: this.tid };
+ }
+ };
+
+ var states = {
+ insert: function insert(db, collName) {
+ var bulk = db[collName].initializeUnorderedBulkOp();
+ for (var i = 0; i < this.documentsToInsert; ++i) {
+ bulk.insert(this.generateDocumentToInsert());
+ }
+ var res = bulk.execute();
+ assertAlways.writeOK(res);
+ assertAlways.eq(this.documentsToInsert, res.nInserted);
+ this.insertedDocuments += this.documentsToInsert;
+ },
+
+ query: function query(db, collName) {
+ var count = db[collName].find({ $where: 'this.tid === ' + this.tid }).itcount();
+ assertWhenOwnColl.eq(count, this.insertedDocuments,
+ '$where query should return the number of documents this ' +
+ 'thread inserted');
+ }
+ };
+
+ var transitions = {
+ insert: { insert: 0.2, query: 0.8 },
+ query: { insert: 0.8, query: 0.2 }
+ };
+
+ var setup = function setup(db, collName, cluster) {
+ assertAlways.commandWorked(db[collName].ensureIndex({ tid: 1 }));
+ };
+
+ return {
+ threadCount: 10,
+ iterations: 10,
+ data: data,
+ states: states,
+ startState: 'insert',
+ setup: setup,
+ transitions: transitions
+ };
+})();
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_inline.js b/jstests/concurrency/fsm_workloads/map_reduce_inline.js
index c535c25a372..278d9e95f25 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_inline.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_inline.js
@@ -82,7 +82,7 @@ var $config = (function() {
};
}
- function setup(db, collName) {
+ function setup(db, collName, cluster) {
var bulk = db[collName].initializeUnorderedBulkOp();
for (var i = 0; i < this.numDocs; ++i) {
// TODO: this actually does assume that there are no unique indexes
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_merge.js b/jstests/concurrency/fsm_workloads/map_reduce_merge.js
index 02138304f96..c4004664189 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_merge.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_merge.js
@@ -47,14 +47,14 @@ var $config = extendWorkload($config, function($config, $super) {
assertAlways.commandWorked(res);
};
- $config.setup = function setup(db, collName) {
+ $config.setup = function setup(db, collName, cluster) {
$super.setup.apply(this, arguments);
var outDB = db.getSiblingDB(uniqueDBName);
assertAlways.commandWorked(outDB.createCollection(collName));
};
- $config.teardown = function teardown(db, collName) {
+ $config.teardown = function teardown(db, collName, cluster) {
var outDB = db.getSiblingDB(uniqueDBName);
var res = outDB.dropDatabase();
assertAlways.commandWorked(res);
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_merge_nonatomic.js b/jstests/concurrency/fsm_workloads/map_reduce_merge_nonatomic.js
index a54de881be7..fcb58ec5b4c 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_merge_nonatomic.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_merge_nonatomic.js
@@ -54,7 +54,7 @@ var $config = extendWorkload($config, function($config, $super) {
assertAlways.commandWorked(res);
};
- $config.teardown = function teardown(db, collName) {
+ $config.teardown = function teardown(db, collName, cluster) {
var pattern = new RegExp('^' + prefix + '\\d+$');
dropDatabases(db, pattern);
};
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_reduce.js b/jstests/concurrency/fsm_workloads/map_reduce_reduce.js
index 6a46cbe08f0..53b1246f4c6 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_reduce.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_reduce.js
@@ -46,7 +46,7 @@ var $config = extendWorkload($config, function($config, $super) {
assertAlways.commandWorked(res);
};
- $config.teardown = function teardown(db, collName) {
+ $config.teardown = function teardown(db, collName, cluster) {
var pattern = new RegExp('^' + prefix + '\\d+$');
dropCollections(db, pattern);
};
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_reduce_nonatomic.js b/jstests/concurrency/fsm_workloads/map_reduce_reduce_nonatomic.js
index e63511b44d0..cb0eeb1948a 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_reduce_nonatomic.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_reduce_nonatomic.js
@@ -47,13 +47,13 @@ var $config = extendWorkload($config, function($config, $super) {
assertAlways.commandWorked(res);
};
- $config.setup = function setup(db, collName) {
+ $config.setup = function setup(db, collName, cluster) {
$super.setup.apply(this, arguments);
assertAlways.commandWorked(db.createCollection(uniqueCollectionName));
};
- $config.teardown = function teardown(db, collName) {
+ $config.teardown = function teardown(db, collName, cluster) {
assertAlways(db[uniqueCollectionName].drop());
};
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_replace.js b/jstests/concurrency/fsm_workloads/map_reduce_replace.js
index 7d00614eb23..186caf5a41e 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_replace.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_replace.js
@@ -48,7 +48,7 @@ var $config = extendWorkload($config, function($config, $super) {
assertAlways.commandWorked(res);
};
- $config.teardown = function teardown(db, collName) {
+ $config.teardown = function teardown(db, collName, cluster) {
var pattern = new RegExp('^' + prefix + '\\d+$');
dropCollections(db, pattern);
};
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js b/jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js
index 89deddcd71b..1cd6e18fbef 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js
@@ -41,7 +41,7 @@ var $config = extendWorkload($config, function($config, $super) {
assertAlways(db[outCollName].drop());
};
- $config.teardown = function teardown(db, collName) {
+ $config.teardown = function teardown(db, collName, cluster) {
var pattern = new RegExp('^' + prefix + '\\d+$');
dropCollections(db, pattern);
};
diff --git a/jstests/concurrency/fsm_workloads/reindex.js b/jstests/concurrency/fsm_workloads/reindex.js
new file mode 100644
index 00000000000..51aad94c016
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/reindex.js
@@ -0,0 +1,114 @@
+'use strict';
+
+/**
+ * reindex.js
+ *
+ * Bulk inserts 1000 documents and builds indexes. Then alternates between reindexing and querying
+ * against the collection. Operates on a separate collection for each thread.
+ */
+
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+
+var $config = (function() {
+ var data = {
+ nIndexes: 3 + 1, // 3 created and 1 for _id
+ nDocumentsToInsert: 1000,
+ maxInteger: 100, // Used for document values. Must be a factor of nDocumentsToInsert
+ prefix: 'reindex' // Use filename for prefix because filename is assumed unique
+ };
+
+ var states = (function() {
+ function insertDocuments(db, collName) {
+ var bulk = db[collName].initializeUnorderedBulkOp();
+ for (var i = 0; i < this.nDocumentsToInsert; ++i) {
+ bulk.insert({
+ text: 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do' +
+ ' eiusmod tempor incididunt ut labore et dolore magna aliqua.',
+ geo: { type: 'Point', coordinates: [(i % 50) - 25, (i % 50) - 25] },
+ integer: i % this.maxInteger
+ });
+ }
+ var res = bulk.execute();
+ assertAlways.writeOK(res);
+ assertAlways.eq(this.nDocumentsToInsert, res.nInserted);
+ }
+
+ function createIndexes(db, collName) {
+ // The number of indexes created here is also stored in data.nIndexes
+ var textResult = db[this.threadCollName].ensureIndex({ text: 'text' });
+ assertAlways.commandWorked(textResult);
+
+ var geoResult = db[this.threadCollName].ensureIndex({ geo: '2dsphere' });
+ assertAlways.commandWorked(geoResult);
+
+ var integerResult = db[this.threadCollName].ensureIndex({ integer: 1 });
+ assertAlways.commandWorked(integerResult);
+ }
+
+ function init(db, collName) {
+ this.threadCollName = this.prefix + '_' + this.tid;
+ insertDocuments.call(this, db, this.threadCollName);
+ }
+
+ function query(db, collName) {
+ var coll = db[this.threadCollName];
+ var nInsertedDocuments = this.nDocumentsToInsert;
+ var count = coll.find({ integer: Random.randInt(this.maxInteger) }).itcount();
+ assertWhenOwnColl.eq(nInsertedDocuments / this.maxInteger, count, 'number of ' +
+ 'documents returned by integer query should match the number ' +
+ 'inserted');
+
+ var coords = [[ [-26, -26], [-26, 26], [26, 26], [26, -26], [-26, -26] ]];
+ var geoQuery = { geo: { $geoWithin: { $geometry: { type: 'Polygon',
+ coordinates: coords}}}};
+
+ // We can only perform a geo query when we own the collection and are sure a geo index
+ // is present. The same is true of text queries.
+ assertWhenOwnColl(function() {
+ count = coll.find(geoQuery).itcount();
+ assertWhenOwnColl.eq(count, nInsertedDocuments, 'number of documents returned by' +
+ ' geospatial query should match number inserted');
+
+ count = coll.find({ $text: { $search: 'ipsum' } }).itcount();
+ assertWhenOwnColl.eq(count, nInsertedDocuments, 'number of documents returned by' +
+ ' text query should match number inserted');
+ });
+
+ var indexCount = db[this.threadCollName].getIndexes().length;
+ assertWhenOwnColl.eq(indexCount, this.nIndexes);
+ }
+
+ function reIndex(db, collName) {
+ var res = db[this.threadCollName].reIndex();
+ assertAlways.commandWorked(res);
+ }
+
+ return {
+ init: init,
+ createIndexes: createIndexes,
+ reIndex: reIndex,
+ query: query
+ };
+ })();
+
+ var transitions = {
+ init: { createIndexes: 1 },
+ createIndexes: { reIndex: 0.5, query: 0.5 },
+ reIndex: { reIndex: 0.5, query: 0.5 },
+ query: { reIndex: 0.5, query: 0.5 }
+ };
+
+ var teardown = function teardown(db, collName, cluster) {
+ var pattern = new RegExp('^' + this.prefix + '_\\d+$');
+ dropCollections(db, pattern);
+ };
+
+ return {
+ threadCount: 15,
+ iterations: 10,
+ states: states,
+ transitions: transitions,
+ teardown: teardown,
+ data: data
+ };
+})();
diff --git a/jstests/concurrency/fsm_workloads/reindex_background.js b/jstests/concurrency/fsm_workloads/reindex_background.js
new file mode 100644
index 00000000000..fe4d00bb9e5
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/reindex_background.js
@@ -0,0 +1,33 @@
+'use strict';
+
+/**
+ * reindex_background.js
+ *
+ * Bulk inserts 1000 documents and builds indexes in background, then alternates between reindexing
+ * and querying against the collection. Operates on a separate collection for each thread. Note
+ * that because indexes are initially built in the background, reindexing is also done in the
+ * background.
+ */
+
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/reindex.js'); // for $config
+
+var $config = extendWorkload($config, function($config, $super) {
+ $config.data.prefix = 'reindex_background';
+
+ $config.states.createIndexes = function createIndexes(db, collName) {
+ var coll = db[this.threadCollName];
+
+ // The number of indexes created here is also stored in data.nIndexes
+ var textResult = coll.ensureIndex({ text: 'text' }, { background: true });
+ assertAlways.commandWorked(textResult);
+
+ var geoResult = coll.ensureIndex({ geo: '2dsphere' }, { background: true });
+ assertAlways.commandWorked(geoResult);
+
+ var integerResult = coll.ensureIndex({ integer: 1 }, {background: true });
+ assertAlways.commandWorked(integerResult);
+ };
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/remove_single_document.js b/jstests/concurrency/fsm_workloads/remove_single_document.js
index 167e742a193..c54cf02ec85 100644
--- a/jstests/concurrency/fsm_workloads/remove_single_document.js
+++ b/jstests/concurrency/fsm_workloads/remove_single_document.js
@@ -36,7 +36,7 @@ var $config = (function() {
var threadCount = 10;
var iterations = 20;
- function setup(db, collName) {
+ function setup(db, collName, cluster) {
// insert enough documents so that each thread can remove exactly one per iteration
var num = threadCount * iterations;
for (var i = 0; i < num; ++i) {
diff --git a/jstests/concurrency/fsm_workloads/remove_where.js b/jstests/concurrency/fsm_workloads/remove_where.js
new file mode 100644
index 00000000000..3b3d4babef9
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/remove_where.js
@@ -0,0 +1,42 @@
+'use strict';
+
+/**
+ * remove_where.js
+ *
+ * Bulk inserts documents in batches of 100. Randomly selects ~1/10th of existing documents created
+ * by the thread and removes them. Queries by the thread that created the documents to verify
+ * counts.
+ */
+
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_where.js'); // for $config
+
+var $config = extendWorkload($config, function($config, $super) {
+ $config.data.randomBound = 10;
+ $config.data.generateDocumentToInsert = function generateDocumentToInsert() {
+ return { tid: this.tid, x: Random.randInt(this.randomBound) };
+ };
+
+ $config.states.remove = function remove(db, collName) {
+ var res = db[collName].remove({
+ // Server-side JS does not support Random.randInt, so use Math.floor/random instead
+ $where: 'this.x === Math.floor(Math.random() * ' + this.randomBound + ') ' +
+ '&& this.tid === ' + this.tid
+ });
+ assertWhenOwnColl.gte(res.nRemoved, 0);
+ assertWhenOwnColl.lte(res.nRemoved, this.insertedDocuments);
+ this.insertedDocuments -= res.nRemoved;
+ };
+
+ $config.transitions = {
+ insert: { insert: 0.2, remove: 0.4, query: 0.4 },
+ remove: { insert: 0.4, remove: 0.2, query: 0.4 },
+ query: { insert: 0.4, remove: 0.4, query: 0.2 }
+ };
+
+ $config.setup = function setup(db, collName, cluster) {
+ /* no-op to prevent index from being created */
+ };
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/rename_capped_collection_chain.js b/jstests/concurrency/fsm_workloads/rename_capped_collection_chain.js
index 7dbdd25dfe8..7b77eec792b 100644
--- a/jstests/concurrency/fsm_workloads/rename_capped_collection_chain.js
+++ b/jstests/concurrency/fsm_workloads/rename_capped_collection_chain.js
@@ -56,7 +56,7 @@ var $config = (function() {
rename: { rename: 1 }
};
- function teardown(db, collName) {
+ function teardown(db, collName, cluster) {
var pattern = new RegExp('^' + this.prefix + '\\d+_\\d+$');
dropCollections(db, pattern);
}
diff --git a/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_chain.js b/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_chain.js
index b59ceddcc13..546e98a2124 100644
--- a/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_chain.js
+++ b/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_chain.js
@@ -69,7 +69,7 @@ var $config = (function() {
rename: { rename: 1 }
};
- function teardown(db, collName) {
+ function teardown(db, collName, cluster) {
var pattern = new RegExp('^' + this.prefix + '\\d+_\\d+$');
dropDatabases(db, pattern);
}
diff --git a/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js b/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js
index a64dbfbbd97..c5e78b9b0e7 100644
--- a/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js
+++ b/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js
@@ -92,7 +92,7 @@ var $config = (function() {
rename: { rename: 1 }
};
- function teardown(db, collName) {
+ function teardown(db, collName, cluster) {
var pattern = new RegExp('^' + this.prefix + '\\d+_\\d+$');
dropDatabases(db, pattern);
}
diff --git a/jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js b/jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js
index f7caff7a20f..b656b004373 100644
--- a/jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js
+++ b/jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js
@@ -84,7 +84,7 @@ var $config = (function() {
rename: { rename: 1 }
};
- function teardown(db, collName) {
+ function teardown(db, collName, cluster) {
var pattern = new RegExp('^' + this.prefix + '\\d+_\\d+$');
dropCollections(db, pattern);
}
diff --git a/jstests/concurrency/fsm_workloads/rename_collection_chain.js b/jstests/concurrency/fsm_workloads/rename_collection_chain.js
index 414d9060989..0514fe6d075 100644
--- a/jstests/concurrency/fsm_workloads/rename_collection_chain.js
+++ b/jstests/concurrency/fsm_workloads/rename_collection_chain.js
@@ -48,7 +48,7 @@ var $config = (function() {
rename: { rename: 1 }
};
- function teardown(db, collName) {
+ function teardown(db, collName, cluster) {
var pattern = new RegExp('^' + this.prefix + '\\d+_\\d+$');
dropCollections(db, pattern);
}
diff --git a/jstests/concurrency/fsm_workloads/rename_collection_dbname_chain.js b/jstests/concurrency/fsm_workloads/rename_collection_dbname_chain.js
index 2be2b172106..7effa11bb4c 100644
--- a/jstests/concurrency/fsm_workloads/rename_collection_dbname_chain.js
+++ b/jstests/concurrency/fsm_workloads/rename_collection_dbname_chain.js
@@ -61,7 +61,7 @@ var $config = (function() {
rename: { rename: 1 }
};
- function teardown(db, collName) {
+ function teardown(db, collName, cluster) {
var pattern = new RegExp('^' + this.prefix + '\\d+_\\d+$');
dropDatabases(db, pattern);
}
diff --git a/jstests/concurrency/fsm_workloads/rename_collection_dbname_droptarget.js b/jstests/concurrency/fsm_workloads/rename_collection_dbname_droptarget.js
index 063cc45f1dc..0dbff6fb75c 100644
--- a/jstests/concurrency/fsm_workloads/rename_collection_dbname_droptarget.js
+++ b/jstests/concurrency/fsm_workloads/rename_collection_dbname_droptarget.js
@@ -84,7 +84,7 @@ var $config = (function() {
rename: { rename: 1 }
};
- function teardown(db, collName) {
+ function teardown(db, collName, cluster) {
var pattern = new RegExp('^' + this.prefix + '\\d+_\\d+$');
dropDatabases(db, pattern);
}
diff --git a/jstests/concurrency/fsm_workloads/rename_collection_droptarget.js b/jstests/concurrency/fsm_workloads/rename_collection_droptarget.js
index 57e6cf0776e..161720d019e 100644
--- a/jstests/concurrency/fsm_workloads/rename_collection_droptarget.js
+++ b/jstests/concurrency/fsm_workloads/rename_collection_droptarget.js
@@ -76,7 +76,7 @@ var $config = (function() {
rename: { rename: 1 }
};
- function teardown(db, collName) {
+ function teardown(db, collName, cluster) {
var pattern = new RegExp('^' + this.prefix + '\\d+_\\d+$');
dropCollections(db, pattern);
}
diff --git a/jstests/concurrency/fsm_workloads/touch_base.js b/jstests/concurrency/fsm_workloads/touch_base.js
new file mode 100644
index 00000000000..0f8e26976de
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/touch_base.js
@@ -0,0 +1,51 @@
+'use strict';
+
+/**
+ * touch_base.js
+ *
+ * Bulk inserts documents in batches of 100, uses the touch command on "data" and "index",
+ * and queries to verify the number of documents inserted by the thread.
+ */
+
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_where.js'); // for $config
+load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod and isMMAPv1
+
+var $config = extendWorkload($config, function($config, $super) {
+ $config.data.generateDocumentToInsert = function generateDocumentToInsert() {
+ return { tid: this.tid, x: Random.randInt(10) };
+ };
+
+ $config.data.generateTouchCmdObj = function generateTouchCmdObj(collName) {
+ return { touch: collName, data: true, index: true };
+ };
+
+ $config.states.touch = function touch(db, collName) {
+ var res = db.runCommand(this.generateTouchCmdObj(collName));
+ if (isMongod(db) && isMMAPv1(db)) {
+ assertAlways.commandWorked(res);
+ } else {
+ // SERVER-16850 and SERVER-16797
+ assertAlways.commandFailed(res);
+ }
+ };
+
+ $config.states.query = function query(db, collName) {
+ var count = db[collName].find( { tid: this.tid } ).itcount();
+ assertWhenOwnColl.eq(count, this.insertedDocuments,
+ 'collection scan should return the number of documents this thread' +
+ ' inserted');
+ };
+
+ $config.transitions = {
+ insert: { insert: 0.2, touch: 0.4, query: 0.4 },
+ touch: { insert: 0.4, touch: 0.2, query: 0.4 },
+ query: { insert: 0.4, touch: 0.4, query: 0.2 }
+ };
+
+ $config.setup = function setup(db, collName, cluster) {
+ assertAlways.commandWorked(db[collName].ensureIndex({ x: 1 }));
+ };
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/touch_data.js b/jstests/concurrency/fsm_workloads/touch_data.js
new file mode 100644
index 00000000000..08130dfcf2e
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/touch_data.js
@@ -0,0 +1,19 @@
+'use strict';
+
+/**
+ * touch_data.js
+ *
+ * Bulk inserts documents in batches of 100, uses touch on "data" but not "index",
+ * and queries to verify the number of documents inserted by the thread.
+ */
+
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/touch_base.js'); // for $config
+
+var $config = extendWorkload($config, function($config, $super) {
+ $config.data.generateTouchCmdObj = function generateTouchCmdObj(collName) {
+ return { touch: collName, data: true, index: false };
+ };
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/touch_index.js b/jstests/concurrency/fsm_workloads/touch_index.js
new file mode 100644
index 00000000000..a1cfa6db2ba
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/touch_index.js
@@ -0,0 +1,19 @@
+'use strict';
+
+/**
+ * touch_index.js
+ *
+ * Bulk inserts documents in batches of 100, uses touch on "index" but not "data",
+ * and queries to verify the number of documents inserted by the thread.
+ */
+
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/touch_base.js'); // for $config
+
+var $config = extendWorkload($config, function($config, $super) {
+ $config.data.generateTouchCmdObj = function generateTouchCmdObj(collName) {
+ return { touch: collName, data: false, index: true };
+ };
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/touch_no_data_no_index.js b/jstests/concurrency/fsm_workloads/touch_no_data_no_index.js
new file mode 100644
index 00000000000..18cf0329b02
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/touch_no_data_no_index.js
@@ -0,0 +1,25 @@
+'use strict';
+
+/**
+ * touch_no_data_no_index.js
+ *
+ * Bulk inserts documents in batches of 100, uses touch as a no-op,
+ * and queries to verify the number of documents inserted by the thread.
+ */
+
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/touch_base.js'); // for $config
+
+var $config = extendWorkload($config, function($config, $super) {
+ $config.data.generateTouchCmdObj = function generateTouchCmdObj(collName) {
+ return { touch: collName, data: false, index: false };
+ };
+
+ $config.states.touch = function touch(db, collName) {
+ var res = db.runCommand(this.generateTouchCmdObj(collName));
+ // The command always fails because "index" and "data" are both false
+ assertAlways.commandFailed(res);
+ };
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/update_array.js b/jstests/concurrency/fsm_workloads/update_array.js
index 280080e9726..fe741090dd6 100644
--- a/jstests/concurrency/fsm_workloads/update_array.js
+++ b/jstests/concurrency/fsm_workloads/update_array.js
@@ -21,8 +21,7 @@ var $config = (function() {
function assertUpdateSuccess(db, res, nModifiedPossibilities) {
assertAlways.eq(0, res.nUpserted, tojson(res));
- var status = db.serverStatus();
- if (isMongod(status) && !isMMAPv1(status)) {
+ if (isMongod(db) && !isMMAPv1(db)) {
assertWhenOwnColl.contains(1, nModifiedPossibilities, tojson(res));
if (db.getMongo().writeMode() === 'commands') {
assertWhenOwnColl.contains(res.nModified, nModifiedPossibilities, tojson(res));
@@ -101,7 +100,7 @@ var $config = (function() {
}
};
- function setup(db, collName) {
+ function setup(db, collName, cluster) {
// index on 'arr', the field being updated
assertAlways.commandWorked(db[collName].ensureIndex({ arr: 1 }));
for (var i = 0; i < this.numDocs; ++i) {
diff --git a/jstests/concurrency/fsm_workloads/update_inc.js b/jstests/concurrency/fsm_workloads/update_inc.js
index 842b17a7b87..6f7b5deb8c1 100644
--- a/jstests/concurrency/fsm_workloads/update_inc.js
+++ b/jstests/concurrency/fsm_workloads/update_inc.js
@@ -31,8 +31,7 @@ var $config = (function() {
var res = db[collName].update({ _id: this.id }, updateDoc);
assertAlways.eq(0, res.nUpserted, tojson(res));
- var status = db.serverStatus();
- if (isMongod(status) && !isMMAPv1(status)) {
+ if (isMongod(db) && !isMMAPv1(db)) {
// For non-mmap storage engines we can have a strong assertion that exactly one doc
// will be modified.
assertWhenOwnColl.eq(res.nMatched, 1, tojson(res));
@@ -69,7 +68,7 @@ var $config = (function() {
find: { update: 1 }
};
- function setup(db, collName) {
+ function setup(db, collName, cluster) {
db[collName].insert({ _id: this.id });
}
diff --git a/jstests/concurrency/fsm_workloads/update_multifield.js b/jstests/concurrency/fsm_workloads/update_multifield.js
index f854ad1ad29..6df238051bf 100644
--- a/jstests/concurrency/fsm_workloads/update_multifield.js
+++ b/jstests/concurrency/fsm_workloads/update_multifield.js
@@ -61,7 +61,7 @@ var $config = (function() {
update: { update: 1 }
};
- function setup(db, collName) {
+ function setup(db, collName, cluster) {
assertAlways.commandWorked(db[collName].ensureIndex({ x: 1 }));
assertAlways.commandWorked(db[collName].ensureIndex({ y: 1 }));
assertAlways.commandWorked(db[collName].ensureIndex({ z: 1 }));
@@ -85,8 +85,7 @@ var $config = (function() {
assertResult: function(res, db, collName, query) {
assertAlways.eq(0, res.nUpserted, tojson(res));
- var status = db.serverStatus();
- if (isMongod(status) && !isMMAPv1(status)) {
+ if (isMongod(db) && !isMMAPv1(db)) {
// For non-mmap storage engines we can have a strong assertion that exactly one
// doc will be modified.
assertWhenOwnColl.eq(res.nMatched, 1, tojson(res));
diff --git a/jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js b/jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js
index 298c35a6522..8d9f3d875cc 100644
--- a/jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js
+++ b/jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js
@@ -17,9 +17,8 @@ var $config = extendWorkload($config, function($config, $super) {
$config.data.assertResult = function(res, db, collName, query) {
assertAlways.eq(0, res.nUpserted, tojson(res));
- var status = db.serverStatus();
- if (isMongod(status)) {
- if (isMMAPv1(status)) {
+ if (isMongod(db)) {
+ if (isMMAPv1(db)) {
// If an update triggers a document to move forward, then
// that document can be matched multiple times. If an update
// triggers a document to move backwards, then that document
diff --git a/jstests/concurrency/fsm_workloads/update_ordered_bulk_inc.js b/jstests/concurrency/fsm_workloads/update_ordered_bulk_inc.js
index ffb4fd913b8..9bd36519229 100644
--- a/jstests/concurrency/fsm_workloads/update_ordered_bulk_inc.js
+++ b/jstests/concurrency/fsm_workloads/update_ordered_bulk_inc.js
@@ -38,16 +38,16 @@ var $config = (function() {
find: function find(db, collName) {
var docs = db[collName].find().toArray();
- var serverStatus = db.serverStatus();
+
// In MMAP v1, some documents may appear twice due to moves
- if (isMongod(serverStatus) && !isMMAPv1(serverStatus)) {
+ if (isMongod(db) && !isMMAPv1(db)) {
assertWhenOwnColl.eq(this.docCount, docs.length);
}
assertWhenOwnColl.gte(docs.length, this.docCount);
// It is possible that a document was not incremented in MMAP v1 because
// updates skip documents that were invalidated during yielding
- if (isMongod(serverStatus) && !isMMAPv1(serverStatus)) {
+ if (isMongod(db) && !isMMAPv1(db)) {
docs.forEach(function(doc) {
assertWhenOwnColl.eq(this.count, doc[this.fieldName]);
}, this);
@@ -66,7 +66,7 @@ var $config = (function() {
find: { update: 1 }
};
- function setup(db, collName) {
+ function setup(db, collName, cluster) {
this.count = 0;
for (var i = 0; i < this.docCount; ++i) {
db[collName].insert({ _id: i });
diff --git a/jstests/concurrency/fsm_workloads/update_rename.js b/jstests/concurrency/fsm_workloads/update_rename.js
index ff0f31959ca..c4ebaa3e812 100644
--- a/jstests/concurrency/fsm_workloads/update_rename.js
+++ b/jstests/concurrency/fsm_workloads/update_rename.js
@@ -38,7 +38,7 @@ var $config = (function() {
update: { update: 1 }
};
- function setup(db, collName) {
+ function setup(db, collName, cluster) {
// Create an index on all but one fieldName key to make it possible to test renames
// between indexed fields and non-indexed fields
fieldNames.slice(1).forEach(function(fieldName) {
diff --git a/jstests/concurrency/fsm_workloads/update_replace.js b/jstests/concurrency/fsm_workloads/update_replace.js
index b0c621c94fe..25a35d5255d 100644
--- a/jstests/concurrency/fsm_workloads/update_replace.js
+++ b/jstests/concurrency/fsm_workloads/update_replace.js
@@ -45,7 +45,7 @@ var $config = (function() {
update: { update: 1 }
};
- function setup(db, collName) {
+ function setup(db, collName, cluster) {
assertAlways.commandWorked(db[collName].ensureIndex({ a: 1 }));
assertAlways.commandWorked(db[collName].ensureIndex({ b: 1 }));
// no index on c
diff --git a/jstests/concurrency/fsm_workloads/update_simple.js b/jstests/concurrency/fsm_workloads/update_simple.js
index dda5d72fc52..9b116f40c23 100644
--- a/jstests/concurrency/fsm_workloads/update_simple.js
+++ b/jstests/concurrency/fsm_workloads/update_simple.js
@@ -33,7 +33,7 @@ var $config = (function() {
}
};
- function setup(db, collName) {
+ function setup(db, collName, cluster) {
// index on 'value', the field being updated
assertAlways.commandWorked(db[collName].ensureIndex({ value: 1 }));
for (var i = 0; i < this.numDocs; ++i) {
@@ -57,8 +57,7 @@ var $config = (function() {
assertResult: function assertResult(db, res) {
assertAlways.eq(0, res.nUpserted, tojson(res));
- var status = db.serverStatus();
- if (isMongod(status) && !isMMAPv1(status)) {
+ if (isMongod(db) && !isMMAPv1(db)) {
// For non-mmap storage engines we can have a strong assertion that exactly one
// doc will be modified.
assertWhenOwnColl.eq(res.nMatched, 1, tojson(res));
diff --git a/jstests/concurrency/fsm_workloads/update_upsert_multi.js b/jstests/concurrency/fsm_workloads/update_upsert_multi.js
index 7fc7a61980c..63aed616bc3 100644
--- a/jstests/concurrency/fsm_workloads/update_upsert_multi.js
+++ b/jstests/concurrency/fsm_workloads/update_upsert_multi.js
@@ -66,7 +66,7 @@ var $config = (function() {
assertConsistency: { insert: 0.5, update: 0.5 }
};
- function setup(db, collName) {
+ function setup(db, collName, cluster) {
assertAlways.commandWorked(db[collName].ensureIndex({ tid: 1, i: 1 }));
}
diff --git a/jstests/concurrency/fsm_workloads/update_where.js b/jstests/concurrency/fsm_workloads/update_where.js
new file mode 100644
index 00000000000..befc8cde972
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/update_where.js
@@ -0,0 +1,46 @@
+'use strict';
+
+/**
+ * update_where.js
+ *
+ * Bulk inserts documents in batches of 100, randomly selects ~1/10th of documents inserted by the
+ * thread and updates them. Also queries by the thread that created the documents to verify counts.
+ */
+
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_where.js'); // for $config
+
+var $config = extendWorkload($config, function($config, $super) {
+ $config.data.randomBound = 10;
+ $config.data.generateDocumentToInsert = function generateDocumentToInsert() {
+ return { tid: this.tid, x: Random.randInt(this.randomBound) };
+ };
+
+ $config.states.update = function update(db, collName) {
+ var res = db[collName].update(
+ // Server-side JS does not support Random.randInt, so use Math.floor/random instead
+ { $where: 'this.x === Math.floor(Math.random() * ' + this.randomBound + ') ' +
+ '&& this.tid === ' + this.tid },
+ { $set: { x: Random.randInt(this.randomBound) } },
+ { multi: true }
+ );
+ assertAlways.writeOK(res);
+
+ if (db.getMongo().writeMode() === 'commands') {
+ assertWhenOwnColl.gte(res.nModified, 0);
+ assertWhenOwnColl.lte(res.nModified, this.insertedDocuments);
+ }
+ };
+
+ $config.transitions = {
+ insert: { insert: 0.2, update: 0.4, query: 0.4 },
+ update: { insert: 0.4, update: 0.2, query: 0.4 },
+ query: { insert: 0.4, update: 0.4, query: 0.2 }
+ };
+
+ $config.setup = function setup(db, collName, cluster) {
+ /* no-op to prevent index from being created */
+ };
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/upsert_where.js b/jstests/concurrency/fsm_workloads/upsert_where.js
new file mode 100644
index 00000000000..e89aa56d184
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/upsert_where.js
@@ -0,0 +1,43 @@
+'use strict';
+
+/**
+ * upsert_where.js
+ *
+ * Bulk inserts documents in batches of 100, randomly selects a document that doesn't exist and
+ * updates it, and queries by the thread that created the documents to verify counts. */
+
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_where.js'); // for $config
+
+var $config = extendWorkload($config, function($config, $super) {
+ $config.data.randomBound = 10;
+ $config.data.generateDocumentToInsert = function generateDocumentToInsert() {
+ return { tid: this.tid, x: Random.randInt(this.randomBound)};
+ };
+
+ $config.states.upsert = function upsert(db, collName) {
+ var res = db[collName].update(
+ { $where: 'this.x === ' + this.randomBound + ' && this.tid === ' + this.tid },
+ { $set: { x: Random.randInt(this.randomBound), tid: this.tid } },
+ { upsert: true }
+ );
+ assertWhenOwnColl.eq(res.nUpserted, 1);
+ var upsertedDocument = db[collName].findOne({ _id: res.getUpsertedId()._id });
+ assertWhenOwnColl(function() {
+ assertWhenOwnColl.eq(upsertedDocument.tid, this.tid);
+ }.bind(this));
+ this.insertedDocuments += res.nUpserted;
+ };
+
+ $config.transitions = {
+ insert: { insert: 0.2, upsert: 0.4, query: 0.4 },
+ upsert: { insert: 0.4, upsert: 0.2, query: 0.4 },
+ query: { insert: 0.4, upsert: 0.4, query: 0.2 }
+ };
+
+ $config.setup = function setup(db, collName, cluster) {
+ /* no-op to prevent index from being created */
+ };
+
+ return $config;
+});